From f193bca595c0050474c4f1f4e4540151f67e4250 Mon Sep 17 00:00:00 2001 From: Bernardo Meurer Costa Date: Wed, 27 Aug 2025 22:19:07 +0000 Subject: [PATCH 001/332] feat(libstore): warn when kvm is enabled but /dev/kvm isn't available --- src/libstore/unix/build/linux-derivation-builder.cc | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/src/libstore/unix/build/linux-derivation-builder.cc b/src/libstore/unix/build/linux-derivation-builder.cc index b92d05607..3e34a1a7f 100644 --- a/src/libstore/unix/build/linux-derivation-builder.cc +++ b/src/libstore/unix/build/linux-derivation-builder.cc @@ -3,6 +3,7 @@ # include "nix/store/personality.hh" # include "nix/util/cgroup.hh" # include "nix/util/linux-namespaces.hh" +# include "nix/util/logging.hh" # include "linux/fchmodat2-compat.hh" # include @@ -492,8 +493,16 @@ struct ChrootLinuxDerivationBuilder : ChrootDerivationBuilder, LinuxDerivationBu createDirs(chrootRootDir + "/dev/shm"); createDirs(chrootRootDir + "/dev/pts"); ss.push_back("/dev/full"); - if (systemFeatures.count("kvm") && pathExists("/dev/kvm")) - ss.push_back("/dev/kvm"); + if (systemFeatures.count("kvm")) { + if (pathExists("/dev/kvm")) { + ss.push_back("/dev/kvm"); + } else { + warn( + "KVM is enabled in system-features but /dev/kvm is not available. " + "QEMU builds may fall back to slow emulation. " + "Consider removing 'kvm' from system-features in nix.conf if KVM is not supported on this system."); + } + } ss.push_back("/dev/null"); ss.push_back("/dev/random"); ss.push_back("/dev/tty"); From a408bc3e30e3e5b7ff61596d1072973679761363 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jean-Fran=C3=A7ois=20Roche?= Date: Fri, 19 Sep 2025 13:54:39 +0200 Subject: [PATCH 002/332] installer: prepend nix paths to shell config files instead of appending Some distribution will stop evaluating the shell config file if they are not running in an interactive shell. As we append the nix paths to the end of the file, they will not be evaluated. We better prepend the nix paths to the shell config files to be sure that once nix is installed, nix path will be available in any shell. Note that this is already the case for the detsys installer script for a while: https://github.com/DeterminateSystems/nix-installer/pull/148 Possibly related errors: https://github.com/NixOS/nix/issues/8061 https://github.com/NixOS/nix/pull/6628 https://github.com/NixOS/nix/issues/2587 --- scripts/install-multi-user.sh | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/scripts/install-multi-user.sh b/scripts/install-multi-user.sh index 477eb1fd6..450a773e9 100644 --- a/scripts/install-multi-user.sh +++ b/scripts/install-multi-user.sh @@ -915,9 +915,11 @@ configure_shell_profile() { fi if [ -e "$profile_target" ]; then - shell_source_lines \ - | _sudo "extend your $profile_target with nix-daemon settings" \ - tee -a "$profile_target" + { + shell_source_lines + cat "$profile_target" + } | _sudo "extend your $profile_target with nix-daemon settings" \ + tee "$profile_target" fi done From b63d9fbc8799b62eed0b21e595d22ef660c3924e Mon Sep 17 00:00:00 2001 From: Bernardo Meurer Costa Date: Fri, 19 Sep 2025 18:23:35 +0000 Subject: [PATCH 003/332] test(libstore): additional ParsedS3Url tests Extracted from the work in #13752 --- src/libstore-tests/s3.cc | 43 ++++++++++++++++++++++++++++++++-------- 1 file changed, 35 insertions(+), 8 deletions(-) diff --git a/src/libstore-tests/s3.cc b/src/libstore-tests/s3.cc index 44a31ddc9..799e102fe 100644 --- a/src/libstore-tests/s3.cc +++ b/src/libstore-tests/s3.cc @@ -8,6 +8,10 @@ namespace nix { +// ============================================================================= +// ParsedS3URL Tests +// ============================================================================= + struct ParsedS3URLTestCase { std::string url; @@ -86,18 +90,41 @@ INSTANTIATE_TEST_SUITE_P( }), [](const ::testing::TestParamInfo & info) { return info.param.description; }); -TEST(InvalidParsedS3URLTest, parseS3URLErrors) +// Parameterized test for invalid S3 URLs +struct InvalidS3URLTestCase { - auto invalidBucketMatcher = ::testing::ThrowsMessage( - testing::HasSubstrIgnoreANSIMatcher("error: URI has a missing or invalid bucket name")); + std::string url; + std::string expectedErrorSubstring; + std::string description; +}; - /* Empty bucket (authority) */ - ASSERT_THAT([]() { ParsedS3URL::parse(parseURL("s3:///key")); }, invalidBucketMatcher); - /* Invalid bucket name */ - ASSERT_THAT([]() { ParsedS3URL::parse(parseURL("s3://127.0.0.1")); }, invalidBucketMatcher); +class InvalidParsedS3URLTest : public ::testing::WithParamInterface, public ::testing::Test +{}; + +TEST_P(InvalidParsedS3URLTest, parseS3URLErrors) +{ + const auto & testCase = GetParam(); + + ASSERT_THAT( + [&testCase]() { ParsedS3URL::parse(parseURL(testCase.url)); }, + ::testing::ThrowsMessage(testing::HasSubstrIgnoreANSIMatcher(testCase.expectedErrorSubstring))); } -// Parameterized test for s3ToHttpsUrl conversion +INSTANTIATE_TEST_SUITE_P( + InvalidUrls, + InvalidParsedS3URLTest, + ::testing::Values( + InvalidS3URLTestCase{"s3:///key", "error: URI has a missing or invalid bucket name", "empty_bucket"}, + InvalidS3URLTestCase{"s3://127.0.0.1", "error: URI has a missing or invalid bucket name", "ip_address_bucket"}, + InvalidS3URLTestCase{"s3://bucket with spaces/key", "is not a valid URL", "bucket_with_spaces"}, + InvalidS3URLTestCase{"s3://", "error: URI has a missing or invalid bucket name", "completely_empty"}, + InvalidS3URLTestCase{"s3://bucket", "error: URI has a missing or invalid key", "missing_key"}), + [](const ::testing::TestParamInfo & info) { return info.param.description; }); + +// ============================================================================= +// S3 URL to HTTPS Conversion Tests +// ============================================================================= + struct S3ToHttpsConversionTestCase { ParsedS3URL input; From 0e74b25f626a33554feb7fb192b95ed99f807b00 Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Fri, 19 Sep 2025 21:46:53 +0200 Subject: [PATCH 004/332] C API: Fix bounds checking in _byidx functions The docs weren't 100% clear about bounds checking, but suggested that errors would be caught. The bounds checks are cheap compared to the function calls they're in, so we have no reason to omit them. --- src/libexpr-c/nix_api_value.cc | 12 +++++++ src/libexpr-tests/nix_api_value.cc | 55 ++++++++++++++++++++++++++++++ 2 files changed, 67 insertions(+) diff --git a/src/libexpr-c/nix_api_value.cc b/src/libexpr-c/nix_api_value.cc index 3339790f4..3442bf1a1 100644 --- a/src/libexpr-c/nix_api_value.cc +++ b/src/libexpr-c/nix_api_value.cc @@ -326,6 +326,10 @@ nix_value * nix_get_list_byidx(nix_c_context * context, const nix_value * value, try { auto & v = check_value_in(value); assert(v.type() == nix::nList); + if (ix >= v.listSize()) { + nix_set_err_msg(context, NIX_ERR_KEY, "list index out of bounds"); + return nullptr; + } auto * p = v.listView()[ix]; nix_gc_incref(nullptr, p); if (p != nullptr) @@ -389,6 +393,10 @@ nix_get_attr_byidx(nix_c_context * context, nix_value * value, EvalState * state try { auto & v = check_value_in(value); collapse_attrset_layer_chain_if_needed(v, state); + if (i >= v.attrs()->size()) { + nix_set_err_msg(context, NIX_ERR_KEY, "attribute index out of bounds"); + return nullptr; + } const nix::Attr & a = (*v.attrs())[i]; *name = state->state.symbols[a.name].c_str(); nix_gc_incref(nullptr, a.value); @@ -405,6 +413,10 @@ const char * nix_get_attr_name_byidx(nix_c_context * context, nix_value * value, try { auto & v = check_value_in(value); collapse_attrset_layer_chain_if_needed(v, state); + if (i >= v.attrs()->size()) { + nix_set_err_msg(context, NIX_ERR_KEY, "attribute index out of bounds (Nix C API contract violation)"); + return nullptr; + } const nix::Attr & a = (*v.attrs())[i]; return state->state.symbols[a.name].c_str(); } diff --git a/src/libexpr-tests/nix_api_value.cc b/src/libexpr-tests/nix_api_value.cc index af95224de..c74c3258f 100644 --- a/src/libexpr-tests/nix_api_value.cc +++ b/src/libexpr-tests/nix_api_value.cc @@ -162,6 +162,29 @@ TEST_F(nix_api_expr_test, nix_build_and_init_list) nix_gc_decref(ctx, intValue); } +TEST_F(nix_api_expr_test, nix_get_list_byidx_large_indices) +{ + // Create a small list to test extremely large out-of-bounds access + ListBuilder * builder = nix_make_list_builder(ctx, state, 2); + nix_value * intValue = nix_alloc_value(ctx, state); + nix_init_int(ctx, intValue, 42); + nix_list_builder_insert(ctx, builder, 0, intValue); + nix_list_builder_insert(ctx, builder, 1, intValue); + nix_make_list(ctx, builder, value); + nix_list_builder_free(builder); + + // Test extremely large indices that would definitely crash without bounds checking + ASSERT_EQ(nullptr, nix_get_list_byidx(ctx, value, state, 1000000)); + ASSERT_EQ(NIX_ERR_KEY, nix_err_code(ctx)); + ASSERT_EQ(nullptr, nix_get_list_byidx(ctx, value, state, UINT_MAX / 2)); + ASSERT_EQ(NIX_ERR_KEY, nix_err_code(ctx)); + ASSERT_EQ(nullptr, nix_get_list_byidx(ctx, value, state, UINT_MAX / 2 + 1000000)); + ASSERT_EQ(NIX_ERR_KEY, nix_err_code(ctx)); + + // Clean up + nix_gc_decref(ctx, intValue); +} + TEST_F(nix_api_expr_test, nix_build_and_init_attr_invalid) { ASSERT_EQ(nullptr, nix_get_attr_byname(ctx, nullptr, state, 0)); @@ -244,6 +267,38 @@ TEST_F(nix_api_expr_test, nix_build_and_init_attr) free(out_name); } +TEST_F(nix_api_expr_test, nix_get_attr_byidx_large_indices) +{ + // Create a small attribute set to test extremely large out-of-bounds access + const char ** out_name = (const char **) malloc(sizeof(char *)); + BindingsBuilder * builder = nix_make_bindings_builder(ctx, state, 2); + nix_value * intValue = nix_alloc_value(ctx, state); + nix_init_int(ctx, intValue, 42); + nix_bindings_builder_insert(ctx, builder, "test", intValue); + nix_make_attrs(ctx, value, builder); + nix_bindings_builder_free(builder); + + // Test extremely large indices that would definitely crash without bounds checking + ASSERT_EQ(nullptr, nix_get_attr_byidx(ctx, value, state, 1000000, out_name)); + ASSERT_EQ(NIX_ERR_KEY, nix_err_code(ctx)); + ASSERT_EQ(nullptr, nix_get_attr_byidx(ctx, value, state, UINT_MAX / 2, out_name)); + ASSERT_EQ(NIX_ERR_KEY, nix_err_code(ctx)); + ASSERT_EQ(nullptr, nix_get_attr_byidx(ctx, value, state, UINT_MAX / 2 + 1000000, out_name)); + ASSERT_EQ(NIX_ERR_KEY, nix_err_code(ctx)); + + // Test nix_get_attr_name_byidx with large indices too + ASSERT_EQ(nullptr, nix_get_attr_name_byidx(ctx, value, state, 1000000)); + ASSERT_EQ(NIX_ERR_KEY, nix_err_code(ctx)); + ASSERT_EQ(nullptr, nix_get_attr_name_byidx(ctx, value, state, UINT_MAX / 2)); + ASSERT_EQ(NIX_ERR_KEY, nix_err_code(ctx)); + ASSERT_EQ(nullptr, nix_get_attr_name_byidx(ctx, value, state, UINT_MAX / 2 + 1000000)); + ASSERT_EQ(NIX_ERR_KEY, nix_err_code(ctx)); + + // Clean up + nix_gc_decref(ctx, intValue); + free(out_name); +} + TEST_F(nix_api_expr_test, nix_value_init) { // Setup From 7c553a30a9e3efcb917752b1d9019ab464aeccdc Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Fri, 19 Sep 2025 22:20:20 +0200 Subject: [PATCH 005/332] C API: Improve nix_get_attr_name_byidx() doc --- src/libexpr-c/nix_api_value.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libexpr-c/nix_api_value.h b/src/libexpr-c/nix_api_value.h index ddff494b7..2c4e35b66 100644 --- a/src/libexpr-c/nix_api_value.h +++ b/src/libexpr-c/nix_api_value.h @@ -302,7 +302,7 @@ nix_get_attr_byidx(nix_c_context * context, nix_value * value, EvalState * state /** @brief Get an attribute name by index in the sorted bindings * - * Useful when you want the name but want to avoid evaluation. + * Returns the attribute name without forcing evaluation of the attribute's value. * * Owned by the nix EvalState * @param[out] context Optional, stores error information From 3d777eb37f42cb8b6cd88b6a7c6a846dcb8cbcff Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Fri, 19 Sep 2025 23:06:51 +0200 Subject: [PATCH 006/332] C API: Add lazy attribute value and list item accessors --- src/libexpr-c/nix_api_value.cc | 62 +++++++ src/libexpr-c/nix_api_value.h | 50 +++++- src/libexpr-tests/nix_api_value.cc | 272 +++++++++++++++++++++++++++++ 3 files changed, 382 insertions(+), 2 deletions(-) diff --git a/src/libexpr-c/nix_api_value.cc b/src/libexpr-c/nix_api_value.cc index 3442bf1a1..c58d4fe89 100644 --- a/src/libexpr-c/nix_api_value.cc +++ b/src/libexpr-c/nix_api_value.cc @@ -339,6 +339,26 @@ nix_value * nix_get_list_byidx(nix_c_context * context, const nix_value * value, NIXC_CATCH_ERRS_NULL } +nix_value * +nix_get_list_byidx_lazy(nix_c_context * context, const nix_value * value, EvalState * state, unsigned int ix) +{ + if (context) + context->last_err_code = NIX_OK; + try { + auto & v = check_value_in(value); + assert(v.type() == nix::nList); + if (ix >= v.listSize()) { + nix_set_err_msg(context, NIX_ERR_KEY, "list index out of bounds"); + return nullptr; + } + auto * p = v.listView()[ix]; + nix_gc_incref(nullptr, p); + // Note: intentionally NOT calling forceValue() to keep the element lazy + return as_nix_value_ptr(p); + } + NIXC_CATCH_ERRS_NULL +} + nix_value * nix_get_attr_byname(nix_c_context * context, const nix_value * value, EvalState * state, const char * name) { if (context) @@ -359,6 +379,27 @@ nix_value * nix_get_attr_byname(nix_c_context * context, const nix_value * value NIXC_CATCH_ERRS_NULL } +nix_value * +nix_get_attr_byname_lazy(nix_c_context * context, const nix_value * value, EvalState * state, const char * name) +{ + if (context) + context->last_err_code = NIX_OK; + try { + auto & v = check_value_in(value); + assert(v.type() == nix::nAttrs); + nix::Symbol s = state->state.symbols.create(name); + auto attr = v.attrs()->get(s); + if (attr) { + nix_gc_incref(nullptr, attr->value); + // Note: intentionally NOT calling forceValue() to keep the attribute lazy + return as_nix_value_ptr(attr->value); + } + nix_set_err_msg(context, NIX_ERR_KEY, "missing attribute"); + return nullptr; + } + NIXC_CATCH_ERRS_NULL +} + bool nix_has_attr_byname(nix_c_context * context, const nix_value * value, EvalState * state, const char * name) { if (context) @@ -406,6 +447,27 @@ nix_get_attr_byidx(nix_c_context * context, nix_value * value, EvalState * state NIXC_CATCH_ERRS_NULL } +nix_value * nix_get_attr_byidx_lazy( + nix_c_context * context, nix_value * value, EvalState * state, unsigned int i, const char ** name) +{ + if (context) + context->last_err_code = NIX_OK; + try { + auto & v = check_value_in(value); + collapse_attrset_layer_chain_if_needed(v, state); + if (i >= v.attrs()->size()) { + nix_set_err_msg(context, NIX_ERR_KEY, "attribute index out of bounds (Nix C API contract violation)"); + return nullptr; + } + const nix::Attr & a = (*v.attrs())[i]; + *name = state->state.symbols[a.name].c_str(); + nix_gc_incref(nullptr, a.value); + // Note: intentionally NOT calling forceValue() to keep the attribute lazy + return as_nix_value_ptr(a.value); + } + NIXC_CATCH_ERRS_NULL +} + const char * nix_get_attr_name_byidx(nix_c_context * context, nix_value * value, EvalState * state, unsigned int i) { if (context) diff --git a/src/libexpr-c/nix_api_value.h b/src/libexpr-c/nix_api_value.h index 2c4e35b66..38fede62b 100644 --- a/src/libexpr-c/nix_api_value.h +++ b/src/libexpr-c/nix_api_value.h @@ -265,10 +265,25 @@ ExternalValue * nix_get_external(nix_c_context * context, nix_value * value); */ nix_value * nix_get_list_byidx(nix_c_context * context, const nix_value * value, EvalState * state, unsigned int ix); -/** @brief Get an attr by name +/** @brief Get the ix'th element of a list without forcing evaluation of the element + * + * Returns the list element without forcing its evaluation, allowing access to lazy values. + * The list value itself must already be evaluated. * * Owned by the GC. Use nix_gc_decref when you're done with the pointer * @param[out] context Optional, stores error information + * @param[in] value Nix value to inspect (must be an evaluated list) + * @param[in] state nix evaluator state + * @param[in] ix list element to get + * @return value, NULL in case of errors + */ +nix_value * +nix_get_list_byidx_lazy(nix_c_context * context, const nix_value * value, EvalState * state, unsigned int ix); + +/** @brief Get an attr by name + * + * Use nix_gc_decref when you're done with the pointer + * @param[out] context Optional, stores error information * @param[in] value Nix value to inspect * @param[in] state nix evaluator state * @param[in] name attribute name @@ -276,6 +291,21 @@ nix_value * nix_get_list_byidx(nix_c_context * context, const nix_value * value, */ nix_value * nix_get_attr_byname(nix_c_context * context, const nix_value * value, EvalState * state, const char * name); +/** @brief Get an attribute value by attribute name, without forcing evaluation of the attribute's value + * + * Returns the attribute value without forcing its evaluation, allowing access to lazy values. + * The attribute set value itself must already be evaluated. + * + * Use nix_gc_decref when you're done with the pointer + * @param[out] context Optional, stores error information + * @param[in] value Nix value to inspect (must be an evaluated attribute set) + * @param[in] state nix evaluator state + * @param[in] name attribute name + * @return value, NULL in case of errors + */ +nix_value * +nix_get_attr_byname_lazy(nix_c_context * context, const nix_value * value, EvalState * state, const char * name); + /** @brief Check if an attribute name exists on a value * @param[out] context Optional, stores error information * @param[in] value Nix value to inspect @@ -289,7 +319,7 @@ bool nix_has_attr_byname(nix_c_context * context, const nix_value * value, EvalS * * Also gives you the name. * - * Owned by the GC. Use nix_gc_decref when you're done with the pointer + * Use nix_gc_decref when you're done with the pointer * @param[out] context Optional, stores error information * @param[in] value Nix value to inspect * @param[in] state nix evaluator state @@ -300,6 +330,22 @@ bool nix_has_attr_byname(nix_c_context * context, const nix_value * value, EvalS nix_value * nix_get_attr_byidx(nix_c_context * context, nix_value * value, EvalState * state, unsigned int i, const char ** name); +/** @brief Get an attribute by index in the sorted bindings, without forcing evaluation of the attribute's value + * + * Also gives you the name. Returns the attribute value without forcing its evaluation, allowing access to lazy values. + * The attribute set value itself must already be evaluated. + * + * Use nix_gc_decref when you're done with the pointer + * @param[out] context Optional, stores error information + * @param[in] value Nix value to inspect (must be an evaluated attribute set) + * @param[in] state nix evaluator state + * @param[in] i attribute index + * @param[out] name will store a pointer to the attribute name + * @return value, NULL in case of errors + */ +nix_value * nix_get_attr_byidx_lazy( + nix_c_context * context, nix_value * value, EvalState * state, unsigned int i, const char ** name); + /** @brief Get an attribute name by index in the sorted bindings * * Returns the attribute name without forcing evaluation of the attribute's value. diff --git a/src/libexpr-tests/nix_api_value.cc b/src/libexpr-tests/nix_api_value.cc index c74c3258f..830637f3e 100644 --- a/src/libexpr-tests/nix_api_value.cc +++ b/src/libexpr-tests/nix_api_value.cc @@ -185,6 +185,91 @@ TEST_F(nix_api_expr_test, nix_get_list_byidx_large_indices) nix_gc_decref(ctx, intValue); } +TEST_F(nix_api_expr_test, nix_get_list_byidx_lazy) +{ + // Create a list with a throwing lazy element, an already-evaluated int, and a lazy function call + + // 1. Throwing lazy element - create a function application thunk that will throw when forced + nix_value * throwingFn = nix_alloc_value(ctx, state); + nix_value * throwingValue = nix_alloc_value(ctx, state); + + nix_expr_eval_from_string( + ctx, + state, + R"( + _: throw "This should not be evaluated by the lazy accessor" + )", + "", + throwingFn); + assert_ctx_ok(); + + nix_init_apply(ctx, throwingValue, throwingFn, throwingFn); + assert_ctx_ok(); + + // 2. Already evaluated int (not lazy) + nix_value * intValue = nix_alloc_value(ctx, state); + nix_init_int(ctx, intValue, 42); + assert_ctx_ok(); + + // 3. Lazy function application that would compute increment 5 = 6 + nix_value * lazyApply = nix_alloc_value(ctx, state); + nix_value * incrementFn = nix_alloc_value(ctx, state); + nix_value * argFive = nix_alloc_value(ctx, state); + + nix_expr_eval_from_string(ctx, state, "x: x + 1", "", incrementFn); + assert_ctx_ok(); + nix_init_int(ctx, argFive, 5); + + // Create a lazy application: (x: x + 1) 5 + nix_init_apply(ctx, lazyApply, incrementFn, argFive); + assert_ctx_ok(); + + ListBuilder * builder = nix_make_list_builder(ctx, state, 3); + nix_list_builder_insert(ctx, builder, 0, throwingValue); + nix_list_builder_insert(ctx, builder, 1, intValue); + nix_list_builder_insert(ctx, builder, 2, lazyApply); + nix_make_list(ctx, builder, value); + nix_list_builder_free(builder); + + // Test 1: Lazy accessor should return the throwing element without forcing evaluation + nix_value * lazyThrowingElement = nix_get_list_byidx_lazy(ctx, value, state, 0); + assert_ctx_ok(); + ASSERT_NE(nullptr, lazyThrowingElement); + + // Verify the element is still lazy by checking that forcing it throws + nix_value_force(ctx, state, lazyThrowingElement); + assert_ctx_err(); + ASSERT_THAT( + nix_err_msg(nullptr, ctx, nullptr), testing::HasSubstr("This should not be evaluated by the lazy accessor")); + + // Test 2: Lazy accessor should return the already-evaluated int + nix_value * intElement = nix_get_list_byidx_lazy(ctx, value, state, 1); + assert_ctx_ok(); + ASSERT_NE(nullptr, intElement); + ASSERT_EQ(42, nix_get_int(ctx, intElement)); + + // Test 3: Lazy accessor should return the lazy function application without forcing + nix_value * lazyFunctionElement = nix_get_list_byidx_lazy(ctx, value, state, 2); + assert_ctx_ok(); + ASSERT_NE(nullptr, lazyFunctionElement); + + // Force the lazy function application - should compute 5 + 1 = 6 + nix_value_force(ctx, state, lazyFunctionElement); + assert_ctx_ok(); + ASSERT_EQ(6, nix_get_int(ctx, lazyFunctionElement)); + + // Clean up + nix_gc_decref(ctx, throwingFn); + nix_gc_decref(ctx, throwingValue); + nix_gc_decref(ctx, intValue); + nix_gc_decref(ctx, lazyApply); + nix_gc_decref(ctx, incrementFn); + nix_gc_decref(ctx, argFive); + nix_gc_decref(ctx, lazyThrowingElement); + nix_gc_decref(ctx, intElement); + nix_gc_decref(ctx, lazyFunctionElement); +} + TEST_F(nix_api_expr_test, nix_build_and_init_attr_invalid) { ASSERT_EQ(nullptr, nix_get_attr_byname(ctx, nullptr, state, 0)); @@ -299,6 +384,193 @@ TEST_F(nix_api_expr_test, nix_get_attr_byidx_large_indices) free(out_name); } +TEST_F(nix_api_expr_test, nix_get_attr_byname_lazy) +{ + // Create an attribute set with a throwing lazy attribute, an already-evaluated int, and a lazy function call + + // 1. Throwing lazy element - create a function application thunk that will throw when forced + nix_value * throwingFn = nix_alloc_value(ctx, state); + nix_value * throwingValue = nix_alloc_value(ctx, state); + + nix_expr_eval_from_string( + ctx, + state, + R"( + _: throw "This should not be evaluated by the lazy accessor" + )", + "", + throwingFn); + assert_ctx_ok(); + + nix_init_apply(ctx, throwingValue, throwingFn, throwingFn); + assert_ctx_ok(); + + // 2. Already evaluated int (not lazy) + nix_value * intValue = nix_alloc_value(ctx, state); + nix_init_int(ctx, intValue, 42); + assert_ctx_ok(); + + // 3. Lazy function application that would compute increment 7 = 8 + nix_value * lazyApply = nix_alloc_value(ctx, state); + nix_value * incrementFn = nix_alloc_value(ctx, state); + nix_value * argSeven = nix_alloc_value(ctx, state); + + nix_expr_eval_from_string(ctx, state, "x: x + 1", "", incrementFn); + assert_ctx_ok(); + nix_init_int(ctx, argSeven, 7); + + // Create a lazy application: (x: x + 1) 7 + nix_init_apply(ctx, lazyApply, incrementFn, argSeven); + assert_ctx_ok(); + + BindingsBuilder * builder = nix_make_bindings_builder(ctx, state, 3); + nix_bindings_builder_insert(ctx, builder, "throwing", throwingValue); + nix_bindings_builder_insert(ctx, builder, "normal", intValue); + nix_bindings_builder_insert(ctx, builder, "lazy", lazyApply); + nix_make_attrs(ctx, value, builder); + nix_bindings_builder_free(builder); + + // Test 1: Lazy accessor should return the throwing attribute without forcing evaluation + nix_value * lazyThrowingAttr = nix_get_attr_byname_lazy(ctx, value, state, "throwing"); + assert_ctx_ok(); + ASSERT_NE(nullptr, lazyThrowingAttr); + + // Verify the attribute is still lazy by checking that forcing it throws + nix_value_force(ctx, state, lazyThrowingAttr); + assert_ctx_err(); + ASSERT_THAT( + nix_err_msg(nullptr, ctx, nullptr), testing::HasSubstr("This should not be evaluated by the lazy accessor")); + + // Test 2: Lazy accessor should return the already-evaluated int + nix_value * intAttr = nix_get_attr_byname_lazy(ctx, value, state, "normal"); + assert_ctx_ok(); + ASSERT_NE(nullptr, intAttr); + ASSERT_EQ(42, nix_get_int(ctx, intAttr)); + + // Test 3: Lazy accessor should return the lazy function application without forcing + nix_value * lazyFunctionAttr = nix_get_attr_byname_lazy(ctx, value, state, "lazy"); + assert_ctx_ok(); + ASSERT_NE(nullptr, lazyFunctionAttr); + + // Force the lazy function application - should compute 7 + 1 = 8 + nix_value_force(ctx, state, lazyFunctionAttr); + assert_ctx_ok(); + ASSERT_EQ(8, nix_get_int(ctx, lazyFunctionAttr)); + + // Test 4: Missing attribute should return NULL with NIX_ERR_KEY + nix_value * missingAttr = nix_get_attr_byname_lazy(ctx, value, state, "nonexistent"); + ASSERT_EQ(nullptr, missingAttr); + ASSERT_EQ(NIX_ERR_KEY, nix_err_code(ctx)); + + // Clean up + nix_gc_decref(ctx, throwingFn); + nix_gc_decref(ctx, throwingValue); + nix_gc_decref(ctx, intValue); + nix_gc_decref(ctx, lazyApply); + nix_gc_decref(ctx, incrementFn); + nix_gc_decref(ctx, argSeven); + nix_gc_decref(ctx, lazyThrowingAttr); + nix_gc_decref(ctx, intAttr); + nix_gc_decref(ctx, lazyFunctionAttr); +} + +TEST_F(nix_api_expr_test, nix_get_attr_byidx_lazy) +{ + // Create an attribute set with a throwing lazy attribute, an already-evaluated int, and a lazy function call + + // 1. Throwing lazy element - create a function application thunk that will throw when forced + nix_value * throwingFn = nix_alloc_value(ctx, state); + nix_value * throwingValue = nix_alloc_value(ctx, state); + + nix_expr_eval_from_string( + ctx, + state, + R"( + _: throw "This should not be evaluated by the lazy accessor" + )", + "", + throwingFn); + assert_ctx_ok(); + + nix_init_apply(ctx, throwingValue, throwingFn, throwingFn); + assert_ctx_ok(); + + // 2. Already evaluated int (not lazy) + nix_value * intValue = nix_alloc_value(ctx, state); + nix_init_int(ctx, intValue, 99); + assert_ctx_ok(); + + // 3. Lazy function application that would compute increment 10 = 11 + nix_value * lazyApply = nix_alloc_value(ctx, state); + nix_value * incrementFn = nix_alloc_value(ctx, state); + nix_value * argTen = nix_alloc_value(ctx, state); + + nix_expr_eval_from_string(ctx, state, "x: x + 1", "", incrementFn); + assert_ctx_ok(); + nix_init_int(ctx, argTen, 10); + + // Create a lazy application: (x: x + 1) 10 + nix_init_apply(ctx, lazyApply, incrementFn, argTen); + assert_ctx_ok(); + + BindingsBuilder * builder = nix_make_bindings_builder(ctx, state, 3); + nix_bindings_builder_insert(ctx, builder, "a_throwing", throwingValue); + nix_bindings_builder_insert(ctx, builder, "b_normal", intValue); + nix_bindings_builder_insert(ctx, builder, "c_lazy", lazyApply); + nix_make_attrs(ctx, value, builder); + nix_bindings_builder_free(builder); + + // Proper usage: first get the size and gather all attributes into a map + unsigned int attrCount = nix_get_attrs_size(ctx, value); + assert_ctx_ok(); + ASSERT_EQ(3u, attrCount); + + // Gather all attributes into a map (proper contract usage) + std::map attrMap; + const char * name; + + for (unsigned int i = 0; i < attrCount; i++) { + nix_value * attr = nix_get_attr_byidx_lazy(ctx, value, state, i, &name); + assert_ctx_ok(); + ASSERT_NE(nullptr, attr); + attrMap[std::string(name)] = attr; + } + + // Now test the gathered attributes + ASSERT_EQ(3u, attrMap.size()); + ASSERT_TRUE(attrMap.count("a_throwing")); + ASSERT_TRUE(attrMap.count("b_normal")); + ASSERT_TRUE(attrMap.count("c_lazy")); + + // Test 1: Throwing attribute should be lazy + nix_value * throwingAttr = attrMap["a_throwing"]; + nix_value_force(ctx, state, throwingAttr); + assert_ctx_err(); + ASSERT_THAT( + nix_err_msg(nullptr, ctx, nullptr), testing::HasSubstr("This should not be evaluated by the lazy accessor")); + + // Test 2: Normal attribute should be already evaluated + nix_value * normalAttr = attrMap["b_normal"]; + ASSERT_EQ(99, nix_get_int(ctx, normalAttr)); + + // Test 3: Lazy function should compute when forced + nix_value * lazyAttr = attrMap["c_lazy"]; + nix_value_force(ctx, state, lazyAttr); + assert_ctx_ok(); + ASSERT_EQ(11, nix_get_int(ctx, lazyAttr)); + + // Clean up + nix_gc_decref(ctx, throwingFn); + nix_gc_decref(ctx, throwingValue); + nix_gc_decref(ctx, intValue); + nix_gc_decref(ctx, lazyApply); + nix_gc_decref(ctx, incrementFn); + nix_gc_decref(ctx, argTen); + for (auto & pair : attrMap) { + nix_gc_decref(ctx, pair.second); + } +} + TEST_F(nix_api_expr_test, nix_value_init) { // Setup From 2d1b412e5b34aa17b6012a484621e8a6e5e10679 Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Fri, 19 Sep 2025 23:35:31 +0200 Subject: [PATCH 007/332] libexpr-tests: Enable when test setup for building succeeds Accidentally disabled by 9bc218ca3fc98889719684abba73b5d8a168cf3c --- src/libexpr-tests/main.cc | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/libexpr-tests/main.cc b/src/libexpr-tests/main.cc index d6b0d0ab9..88a9d6684 100644 --- a/src/libexpr-tests/main.cc +++ b/src/libexpr-tests/main.cc @@ -1,15 +1,19 @@ #include #include "nix/store/tests/test-main.hh" +#include "nix/util/config-global.hh" using namespace nix; int main(int argc, char ** argv) { auto res = testMainForBuidingPre(argc, argv); - if (!res) + if (res) return res; + // For pipe operator tests in trivial.cc + experimentalFeatureSettings.set("experimental-features", "pipe-operators"); + ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } From d0b1caf53af6fb648b0c5b3d5d3dbac0a9a1b611 Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Sat, 20 Sep 2025 00:13:50 +0200 Subject: [PATCH 008/332] C API: Document and verify NIX_ERR_KEY behavior --- src/libexpr-tests/nix_api_expr.cc | 49 +++++++++++++++++++++++++++++++ src/libutil-c/nix_api_util.h | 19 +++++++++--- 2 files changed, 64 insertions(+), 4 deletions(-) diff --git a/src/libexpr-tests/nix_api_expr.cc b/src/libexpr-tests/nix_api_expr.cc index dce8c6cb9..de508b4e4 100644 --- a/src/libexpr-tests/nix_api_expr.cc +++ b/src/libexpr-tests/nix_api_expr.cc @@ -423,6 +423,55 @@ TEST_F(nix_api_expr_test, nix_expr_primop_bad_return_thunk) ASSERT_THAT(nix_err_msg(nullptr, ctx, nullptr), testing::HasSubstr("badReturnThunk")); } +static void primop_with_nix_err_key( + void * user_data, nix_c_context * context, EvalState * state, nix_value ** args, nix_value * ret) +{ + nix_set_err_msg(context, NIX_ERR_KEY, "Test error from primop"); +} + +TEST_F(nix_api_expr_test, nix_expr_primop_nix_err_key_conversion) +{ + // Test that NIX_ERR_KEY from a custom primop gets converted to a generic EvalError + // + // RATIONALE: NIX_ERR_KEY must not be propagated from custom primops because it would + // create semantic confusion. NIX_ERR_KEY indicates missing keys/indices in C API functions + // (like nix_get_attr_byname, nix_get_list_byidx). If custom primops could return NIX_ERR_KEY, + // an evaluation error would be indistinguishable from an actual missing attribute. + // + // For example, if nix_get_attr_byname returned NIX_ERR_KEY when the attribute is present + // but the value evaluation fails, callers expecting NIX_ERR_KEY to mean "missing attribute" + // would incorrectly handle evaluation failures as missing attributes. In places where + // missing attributes are tolerated (like optional attributes), this would cause the + // program to continue after swallowing the error, leading to silent failures. + PrimOp * primop = nix_alloc_primop( + ctx, primop_with_nix_err_key, 1, "testErrorPrimop", nullptr, "a test primop that sets NIX_ERR_KEY", nullptr); + assert_ctx_ok(); + nix_value * primopValue = nix_alloc_value(ctx, state); + assert_ctx_ok(); + nix_init_primop(ctx, primopValue, primop); + assert_ctx_ok(); + + nix_value * arg = nix_alloc_value(ctx, state); + assert_ctx_ok(); + nix_init_int(ctx, arg, 42); + assert_ctx_ok(); + + nix_value * result = nix_alloc_value(ctx, state); + assert_ctx_ok(); + nix_value_call(ctx, state, primopValue, arg, result); + + // Verify that NIX_ERR_KEY gets converted to NIX_ERR_NIX_ERROR (generic evaluation error) + ASSERT_EQ(nix_err_code(ctx), NIX_ERR_NIX_ERROR); + ASSERT_THAT(nix_err_msg(nullptr, ctx, nullptr), testing::HasSubstr("Error from custom function")); + ASSERT_THAT(nix_err_msg(nullptr, ctx, nullptr), testing::HasSubstr("Test error from primop")); + ASSERT_THAT(nix_err_msg(nullptr, ctx, nullptr), testing::HasSubstr("testErrorPrimop")); + + // Clean up + nix_gc_decref(ctx, primopValue); + nix_gc_decref(ctx, arg); + nix_gc_decref(ctx, result); +} + TEST_F(nix_api_expr_test, nix_value_call_multi_no_args) { nix_value * n = nix_alloc_value(ctx, state); diff --git a/src/libutil-c/nix_api_util.h b/src/libutil-c/nix_api_util.h index 5f42641d4..eaa07c9de 100644 --- a/src/libutil-c/nix_api_util.h +++ b/src/libutil-c/nix_api_util.h @@ -53,7 +53,7 @@ extern "C" { * - NIX_OK: No error occurred (0) * - NIX_ERR_UNKNOWN: An unknown error occurred (-1) * - NIX_ERR_OVERFLOW: An overflow error occurred (-2) - * - NIX_ERR_KEY: A key error occurred (-3) + * - NIX_ERR_KEY: A key/index access error occurred in C API functions (-3) * - NIX_ERR_NIX_ERROR: A generic Nix error occurred (-4) */ enum nix_err { @@ -83,10 +83,21 @@ enum nix_err { NIX_ERR_OVERFLOW = -2, /** - * @brief A key error occurred. + * @brief A key/index access error occurred in C API functions. * - * This error code is returned when a key error occurred during the function - * execution. + * This error code is returned when accessing a key, index, or identifier that + * does not exist in C API functions. Common scenarios include: + * - Setting keys that don't exist (nix_setting_get, nix_setting_set) + * - List indices that are out of bounds (nix_get_list_byidx*) + * - Attribute names that don't exist (nix_get_attr_byname*) + * - Attribute indices that are out of bounds (nix_get_attr_byidx*, nix_get_attr_name_byidx) + * + * This error typically indicates incorrect usage or assumptions about data structure + * contents, rather than internal Nix evaluation errors. + * + * @note This error code should ONLY be returned by C API functions themselves, + * not by underlying Nix evaluation. For example, evaluating `{}.foo` in Nix + * will throw a normal error (NIX_ERR_NIX_ERROR), not NIX_ERR_KEY. */ NIX_ERR_KEY = -3, From e04381edbdc8b344eac36da339ff504060ed1b0d Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Sun, 21 Sep 2025 01:12:42 +0300 Subject: [PATCH 009/332] libfetchers/github: Use getFSAccessor for downloadFile result We should use proper abstractions for reading files from the store. E.g. this caused errors when trying to download github flakes into an in-memory store in #14023. --- src/libfetchers/github.cc | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/libfetchers/github.cc b/src/libfetchers/github.cc index 723c075f2..15a19021d 100644 --- a/src/libfetchers/github.cc +++ b/src/libfetchers/github.cc @@ -398,8 +398,9 @@ struct GitHubInputScheme : GitArchiveInputScheme Headers headers = makeHeadersWithAuthTokens(*input.settings, host, input); - auto json = nlohmann::json::parse( - readFile(store->toRealPath(downloadFile(store, *input.settings, url, "source", headers).storePath))); + auto accessor = store->getFSAccessor(); + auto downloadResult = downloadFile(store, *input.settings, url, "source", headers); + auto json = nlohmann::json::parse(accessor->readFile(CanonPath(downloadResult.storePath.to_string()))); return RefInfo{ .rev = Hash::parseAny(std::string{json["sha"]}, HashAlgorithm::SHA1), @@ -472,8 +473,9 @@ struct GitLabInputScheme : GitArchiveInputScheme Headers headers = makeHeadersWithAuthTokens(*input.settings, host, input); - auto json = nlohmann::json::parse( - readFile(store->toRealPath(downloadFile(store, *input.settings, url, "source", headers).storePath))); + auto accessor = store->getFSAccessor(); + auto downloadResult = downloadFile(store, *input.settings, url, "source", headers); + auto json = nlohmann::json::parse(accessor->readFile(CanonPath(downloadResult.storePath.to_string()))); if (json.is_array() && json.size() >= 1 && json[0]["id"] != nullptr) { return RefInfo{.rev = Hash::parseAny(std::string(json[0]["id"]), HashAlgorithm::SHA1)}; From 4df60e639b7e492ac5f651f2b3aa02055de5549a Mon Sep 17 00:00:00 2001 From: John Ericson Date: Fri, 19 Sep 2025 12:09:46 -0400 Subject: [PATCH 010/332] Use shared pointers in the memory source accessor This allows aliasing, like hard links. --- src/libutil-tests/git.cc | 16 ++++++++-------- .../include/nix/util/memory-source-accessor.hh | 16 ++++++++++++---- src/libutil/memory-source-accessor.cc | 6 +++--- 3 files changed, 23 insertions(+), 15 deletions(-) diff --git a/src/libutil-tests/git.cc b/src/libutil-tests/git.cc index 6180a4cfc..a06c5896d 100644 --- a/src/libutil-tests/git.cc +++ b/src/libutil-tests/git.cc @@ -233,30 +233,30 @@ TEST_F(GitTest, both_roundrip) .contents{ { "foo", - File::Regular{ + make_ref(File::Regular{ .contents = "hello\n\0\n\tworld!", - }, + }), }, { "bar", - File::Directory{ + make_ref(File::Directory{ .contents = { { "baz", - File::Regular{ + make_ref(File::Regular{ .executable = true, .contents = "good day,\n\0\n\tworld!", - }, + }), }, { "quux", - File::Symlink{ + make_ref(File::Symlink{ .target = "/over/there", - }, + }), }, }, - }, + }), }, }, }; diff --git a/src/libutil/include/nix/util/memory-source-accessor.hh b/src/libutil/include/nix/util/memory-source-accessor.hh index 98c193800..be1d17665 100644 --- a/src/libutil/include/nix/util/memory-source-accessor.hh +++ b/src/libutil/include/nix/util/memory-source-accessor.hh @@ -35,7 +35,7 @@ struct MemorySourceAccessor : virtual SourceAccessor { using Name = std::string; - std::map> contents; + std::map, std::less<>> contents; bool operator==(const Directory &) const noexcept; // TODO libc++ 16 (used by darwin) missing `std::map::operator <=>`, can't do yet. @@ -89,13 +89,21 @@ struct MemorySourceAccessor : virtual SourceAccessor SourcePath addFile(CanonPath path, std::string && contents); }; -inline bool MemorySourceAccessor::File::Directory::operator==( - const MemorySourceAccessor::File::Directory &) const noexcept = default; +inline bool +MemorySourceAccessor::File::Directory::operator==(const MemorySourceAccessor::File::Directory & other) const noexcept +{ + return std::ranges::equal(contents, other.contents, [](const auto & lhs, const auto & rhs) -> bool { + return lhs.first == rhs.first && *lhs.second == *rhs.second; + }); +}; inline bool MemorySourceAccessor::File::Directory::operator<(const MemorySourceAccessor::File::Directory & other) const noexcept { - return contents < other.contents; + return std::ranges::lexicographical_compare( + contents, other.contents, [](const auto & lhs, const auto & rhs) -> bool { + return lhs.first < rhs.first && *lhs.second < *rhs.second; + }); } inline bool MemorySourceAccessor::File::operator==(const MemorySourceAccessor::File &) const noexcept = default; diff --git a/src/libutil/memory-source-accessor.cc b/src/libutil/memory-source-accessor.cc index 363f52a54..c25079497 100644 --- a/src/libutil/memory-source-accessor.cc +++ b/src/libutil/memory-source-accessor.cc @@ -24,11 +24,11 @@ MemorySourceAccessor::File * MemorySourceAccessor::open(const CanonPath & path, i, { std::string{name}, - File::Directory{}, + make_ref(File::Directory{}), }); } } - cur = &i->second; + cur = &*i->second; } if (newF && create) @@ -92,7 +92,7 @@ MemorySourceAccessor::DirEntries MemorySourceAccessor::readDirectory(const Canon if (auto * d = std::get_if(&f->raw)) { DirEntries res; for (auto & [name, file] : d->contents) - res.insert_or_assign(name, file.lstat().type); + res.insert_or_assign(name, file->lstat().type); return res; } else throw Error("file '%s' is not a directory", path); From 02c9ac445ff527a7b4c5105d20d9ab401117dcee Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Sat, 20 Sep 2025 11:25:23 +0300 Subject: [PATCH 011/332] libutil: Improve handling of non-directory root in MemorySourceAccessor --- .../nix/util/memory-source-accessor.hh | 2 +- src/libutil/memory-source-accessor.cc | 21 ++++++++++++++++++- 2 files changed, 21 insertions(+), 2 deletions(-) diff --git a/src/libutil/include/nix/util/memory-source-accessor.hh b/src/libutil/include/nix/util/memory-source-accessor.hh index be1d17665..53f1b0241 100644 --- a/src/libutil/include/nix/util/memory-source-accessor.hh +++ b/src/libutil/include/nix/util/memory-source-accessor.hh @@ -58,7 +58,7 @@ struct MemorySourceAccessor : virtual SourceAccessor Stat lstat() const; }; - File root{File::Directory{}}; + std::optional root; bool operator==(const MemorySourceAccessor &) const noexcept = default; diff --git a/src/libutil/memory-source-accessor.cc b/src/libutil/memory-source-accessor.cc index c25079497..7d53d6785 100644 --- a/src/libutil/memory-source-accessor.cc +++ b/src/libutil/memory-source-accessor.cc @@ -4,7 +4,22 @@ namespace nix { MemorySourceAccessor::File * MemorySourceAccessor::open(const CanonPath & path, std::optional create) { - File * cur = &root; + bool hasRoot = root.has_value(); + + // Special handling of root directory. + if (path.isRoot() && !hasRoot) { + if (create) { + root = std::move(*create); + return &root.value(); + } + return nullptr; + } + + // Root does not exist. + if (!hasRoot) + return nullptr; + + File * cur = &root.value(); bool newF = false; @@ -112,6 +127,10 @@ std::string MemorySourceAccessor::readLink(const CanonPath & path) SourcePath MemorySourceAccessor::addFile(CanonPath path, std::string && contents) { + // Create root directory automatically if necessary as a convenience. + if (!root && !path.isRoot()) + open(CanonPath::root, File::Directory{}); + auto * f = open(path, File{File::Regular{}}); if (!f) throw Error("file '%s' cannot be made because some parent file is not a directory", path); From 341878ce0fe7d264acc4425d1685f924b17e0b29 Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Sat, 20 Sep 2025 12:09:45 +0300 Subject: [PATCH 012/332] libstore: Make dummy store also store path info --- src/libstore/dummy-store.cc | 105 ++++++++++++++++++++++++++++++------ 1 file changed, 88 insertions(+), 17 deletions(-) diff --git a/src/libstore/dummy-store.cc b/src/libstore/dummy-store.cc index 1cd1fd08c..12c55472c 100644 --- a/src/libstore/dummy-store.cc +++ b/src/libstore/dummy-store.cc @@ -19,20 +19,43 @@ struct DummyStore : virtual Store ref config; - ref contents; + struct PathInfoAndContents + { + UnkeyedValidPathInfo info; + ref contents; + }; + + /** + * This is map conceptually owns the file system objects for each + * store object. + */ + std::map contents; + + /** + * This view conceptually just borrows the file systems objects of + * each store object from `contents`, and combines them together + * into one store-wide source accessor. + * + * This is needed just in order to implement `Store::getFSAccessor`. + */ + ref wholeStoreView = make_ref(); DummyStore(ref config) : Store{*config} , config(config) - , contents(make_ref()) { - contents->setPathDisplay(config->storeDir); + wholeStoreView->setPathDisplay(config->storeDir); + MemorySink sink{*wholeStoreView}; + sink.createDirectory(CanonPath::root); } void queryPathInfoUncached( const StorePath & path, Callback> callback) noexcept override { - callback(nullptr); + if (auto it = contents.find(path); it != contents.end()) + callback(std::make_shared(StorePath{path}, it->second.info)); + else + callback(nullptr); } /** @@ -50,7 +73,33 @@ struct DummyStore : virtual Store void addToStore(const ValidPathInfo & info, Source & source, RepairFlag repair, CheckSigsFlag checkSigs) override { - unsupported("addToStore"); + if (config->readOnly) + unsupported("addToStore"); + + if (repair) + throw Error("repairing is not supported for '%s' store", config->getHumanReadableURI()); + + if (checkSigs) + throw Error("checking signatures is not supported for '%s' store", config->getHumanReadableURI()); + + auto temp = make_ref(); + MemorySink tempSink{*temp}; + parseDump(tempSink, source); + auto path = info.path; + + auto [it, _] = contents.insert({ + path, + { + std::move(info), + make_ref(std::move(*temp)), + }, + }); + + auto & pathAndContents = it->second; + + bool inserted = wholeStoreView->open(CanonPath(path.to_string()), pathAndContents.contents->root); + if (!inserted) + unreachable(); } StorePath addToStoreFromDump( @@ -65,6 +114,9 @@ struct DummyStore : virtual Store if (config->readOnly) unsupported("addToStoreFromDump"); + if (repair) + throw Error("repairing is not supported for '%s' store", config->getHumanReadableURI()); + auto temp = make_ref(); { @@ -85,22 +137,41 @@ struct DummyStore : virtual Store } auto hash = hashPath({temp, CanonPath::root}, hashMethod.getFileIngestionMethod(), hashAlgo).first; + auto narHash = hashPath({temp, CanonPath::root}, FileIngestionMethod::NixArchive, HashAlgorithm::SHA256); - auto desc = ContentAddressWithReferences::fromParts( - hashMethod, - hash, + auto info = ValidPathInfo::makeFromCA( + *this, + name, + ContentAddressWithReferences::fromParts( + hashMethod, + std::move(hash), + { + .others = references, + // caller is not capable of creating a self-reference, because + // this is content-addressed without modulus + .self = false, + }), + std::move(narHash.first)); + + info.narSize = narHash.second.value(); + + auto path = info.path; + + auto [it, _] = contents.insert({ + path, { - .others = references, - // caller is not capable of creating a self-reference, because - // this is content-addressed without modulus - .self = false, - }); + std::move(info), + make_ref(std::move(*temp)), + }, + }); - auto dstPath = makeFixedOutputPathFromCA(name, desc); + auto & pathAndContents = it->second; - contents->open(CanonPath(printStorePath(dstPath)), std::move(temp->root)); + bool inserted = wholeStoreView->open(CanonPath(path.to_string()), pathAndContents.contents->root); + if (!inserted) + unreachable(); - return dstPath; + return path; } void narFromPath(const StorePath & path, Sink & sink) override @@ -116,7 +187,7 @@ struct DummyStore : virtual Store virtual ref getFSAccessor(bool requireValidPath) override { - return this->contents; + return wholeStoreView; } }; From ed9b377928bf94ae80ef6245f4b50583eacd2db6 Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Sat, 20 Sep 2025 12:10:48 +0300 Subject: [PATCH 013/332] libstore: Disable path info cache for dummy store --- src/libstore/include/nix/store/dummy-store.hh | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/src/libstore/include/nix/store/dummy-store.hh b/src/libstore/include/nix/store/dummy-store.hh index 0a15667b6..4898e8a5b 100644 --- a/src/libstore/include/nix/store/dummy-store.hh +++ b/src/libstore/include/nix/store/dummy-store.hh @@ -4,10 +4,15 @@ namespace nix { struct DummyStoreConfig : public std::enable_shared_from_this, virtual StoreConfig { - using StoreConfig::StoreConfig; + DummyStoreConfig(const Params & params) + : StoreConfig(params) + { + // Disable caching since this a temporary in-memory store. + pathInfoCacheSize = 0; + } DummyStoreConfig(std::string_view scheme, std::string_view authority, const Params & params) - : StoreConfig(params) + : DummyStoreConfig(params) { if (!authority.empty()) throw UsageError("`%s` store URIs must not contain an authority part %s", scheme, authority); From 3a4c618483342b64c01e8598ea2d09a4b61e98c3 Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Sat, 20 Sep 2025 12:31:26 +0300 Subject: [PATCH 014/332] libstore: Fix typo in description of dummy store --- src/libstore/dummy-store.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libstore/dummy-store.md b/src/libstore/dummy-store.md index 3cbec3b3a..3ba96fecb 100644 --- a/src/libstore/dummy-store.md +++ b/src/libstore/dummy-store.md @@ -4,7 +4,7 @@ R"( This store type represents a store in memory. Store objects can be read and written, but only so long as the store is open. -Once the store is closed, all data will be forgoton. +Once the store is closed, all data will be discarded. It's useful when you want to use the Nix evaluator when no actual Nix store exists, e.g. From b66c357b5833f8e44ca12de7a766ef8691d6279e Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Sat, 20 Sep 2025 17:36:35 +0300 Subject: [PATCH 015/332] libstore: Implement DummyStore::narFromPath --- src/libstore/dummy-store.cc | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/libstore/dummy-store.cc b/src/libstore/dummy-store.cc index 12c55472c..06b518c15 100644 --- a/src/libstore/dummy-store.cc +++ b/src/libstore/dummy-store.cc @@ -176,7 +176,13 @@ struct DummyStore : virtual Store void narFromPath(const StorePath & path, Sink & sink) override { - unsupported("narFromPath"); + auto object = contents.find(path); + if (object == contents.end()) + throw Error("path '%s' is not valid", printStorePath(path)); + + const auto & [info, accessor] = object->second; + SourcePath sourcePath(accessor); + dumpPath(sourcePath, sink, FileSerialisationMethod::NixArchive); } void From a453a49043999fcdb726e5d95634914de9234fb7 Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Sun, 21 Sep 2025 13:36:31 +0300 Subject: [PATCH 016/332] tests: Tests for writeable dummy in-memory store --- tests/functional/eval-store.sh | 4 ++++ tests/functional/flakes/flakes.sh | 1 + tests/nixos/github-flakes.nix | 1 + 3 files changed, 6 insertions(+) diff --git a/tests/functional/eval-store.sh b/tests/functional/eval-store.sh index 202e7b004..92faa4005 100755 --- a/tests/functional/eval-store.sh +++ b/tests/functional/eval-store.sh @@ -52,3 +52,7 @@ rm -rf "$eval_store" [[ $(nix eval --eval-store "$eval_store?require-sigs=false" --impure --raw --file ./ifd.nix) = hi ]] ls $NIX_STORE_DIR/*dependencies-top/foobar (! ls $eval_store/nix/store/*dependencies-top/foobar) + +# Can't write .drv by default +(! nix-instantiate dependencies.nix --eval-store "dummy://") +nix-instantiate dependencies.nix --eval-store "dummy://?read-only=false" diff --git a/tests/functional/flakes/flakes.sh b/tests/functional/flakes/flakes.sh index 7b5be112e..97d238654 100755 --- a/tests/functional/flakes/flakes.sh +++ b/tests/functional/flakes/flakes.sh @@ -406,6 +406,7 @@ nix flake update flake1 flake2/flake1 --flake "$flake3Dir" # Test 'nix flake metadata --json'. nix flake metadata "$flake3Dir" --json | jq . +nix flake metadata "$flake3Dir" --json --eval-store "dummy://?read-only=false" | jq . # Test flake in store does not evaluate. rm -rf $badFlakeDir diff --git a/tests/nixos/github-flakes.nix b/tests/nixos/github-flakes.nix index 91fd6b062..d14cd9d0c 100644 --- a/tests/nixos/github-flakes.nix +++ b/tests/nixos/github-flakes.nix @@ -219,6 +219,7 @@ in client.succeed("nix registry pin nixpkgs") client.succeed("nix flake metadata nixpkgs --tarball-ttl 0 >&2") + client.succeed("nix eval nixpkgs#hello --eval-store dummy://?read-only=false >&2") # Test fetchTree on a github URL. hash = client.succeed(f"nix eval --no-trust-tarballs-from-git-forges --raw --expr '(fetchTree {info['url']}).narHash'") From c121c6564052381c8067ce5c31d5418e968f69e5 Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Sat, 20 Sep 2025 00:47:00 +0200 Subject: [PATCH 017/332] C API: Clarify valid use of bindings ordering --- src/libexpr-c/nix_api_value.h | 42 ++++++++++++++++++++++++++++++----- 1 file changed, 37 insertions(+), 5 deletions(-) diff --git a/src/libexpr-c/nix_api_value.h b/src/libexpr-c/nix_api_value.h index 38fede62b..835eaec6e 100644 --- a/src/libexpr-c/nix_api_value.h +++ b/src/libexpr-c/nix_api_value.h @@ -315,10 +315,20 @@ nix_get_attr_byname_lazy(nix_c_context * context, const nix_value * value, EvalS */ bool nix_has_attr_byname(nix_c_context * context, const nix_value * value, EvalState * state, const char * name); -/** @brief Get an attribute by index in the sorted bindings +/** @brief Get an attribute by index * * Also gives you the name. * + * Attributes are returned in an unspecified order which is NOT suitable for + * reproducible operations. In Nix's domain, reproducibility is paramount. The caller + * is responsible for sorting the attributes or storing them in an ordered map to + * ensure deterministic behavior in your application. + * + * @note When Nix does sort attributes, which it does for virtually all intermediate + * operations and outputs, it uses byte-wise lexicographic order (equivalent to + * lexicographic order by Unicode scalar value for valid UTF-8). We recommend + * applying this same ordering for consistency. + * * Use nix_gc_decref when you're done with the pointer * @param[out] context Optional, stores error information * @param[in] value Nix value to inspect @@ -330,10 +340,22 @@ bool nix_has_attr_byname(nix_c_context * context, const nix_value * value, EvalS nix_value * nix_get_attr_byidx(nix_c_context * context, nix_value * value, EvalState * state, unsigned int i, const char ** name); -/** @brief Get an attribute by index in the sorted bindings, without forcing evaluation of the attribute's value +/** @brief Get an attribute by index, without forcing evaluation of the attribute's value * - * Also gives you the name. Returns the attribute value without forcing its evaluation, allowing access to lazy values. - * The attribute set value itself must already be evaluated. + * Also gives you the name. + * + * Returns the attribute value without forcing its evaluation, allowing access to lazy values. + * The attribute set value itself must already have been evaluated. + * + * Attributes are returned in an unspecified order which is NOT suitable for + * reproducible operations. In Nix's domain, reproducibility is paramount. The caller + * is responsible for sorting the attributes or storing them in an ordered map to + * ensure deterministic behavior in your application. + * + * @note When Nix does sort attributes, which it does for virtually all intermediate + * operations and outputs, it uses byte-wise lexicographic order (equivalent to + * lexicographic order by Unicode scalar value for valid UTF-8). We recommend + * applying this same ordering for consistency. * * Use nix_gc_decref when you're done with the pointer * @param[out] context Optional, stores error information @@ -346,10 +368,20 @@ nix_get_attr_byidx(nix_c_context * context, nix_value * value, EvalState * state nix_value * nix_get_attr_byidx_lazy( nix_c_context * context, nix_value * value, EvalState * state, unsigned int i, const char ** name); -/** @brief Get an attribute name by index in the sorted bindings +/** @brief Get an attribute name by index * * Returns the attribute name without forcing evaluation of the attribute's value. * + * Attributes are returned in an unspecified order which is NOT suitable for + * reproducible operations. In Nix's domain, reproducibility is paramount. The caller + * is responsible for sorting the attributes or storing them in an ordered map to + * ensure deterministic behavior in your application. + * + * @note When Nix does sort attributes, which it does for virtually all intermediate + * operations and outputs, it uses byte-wise lexicographic order (equivalent to + * lexicographic order by Unicode scalar value for valid UTF-8). We recommend + * applying this same ordering for consistency. + * * Owned by the nix EvalState * @param[out] context Optional, stores error information * @param[in] value Nix value to inspect From 71b27774f0aa7fef1a99256dcb9bce733e61f4b8 Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Tue, 13 Aug 2024 21:09:11 +0200 Subject: [PATCH 018/332] libexpr: Document {eval,maybeThunk} methods --- src/libexpr/eval.cc | 4 ---- src/libexpr/include/nix/expr/nixexpr.hh | 9 +++++++++ 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/src/libexpr/eval.cc b/src/libexpr/eval.cc index bf55a9c9c..660f474b8 100644 --- a/src/libexpr/eval.cc +++ b/src/libexpr/eval.cc @@ -979,10 +979,6 @@ void EvalState::mkSingleDerivedPathString(const SingleDerivedPath & p, Value & v }); } -/* Create a thunk for the delayed computation of the given expression - in the given environment. But if the expression is a variable, - then look it up right away. This significantly reduces the number - of thunks allocated. */ Value * Expr::maybeThunk(EvalState & state, Env & env) { Value * v = state.allocValue(); diff --git a/src/libexpr/include/nix/expr/nixexpr.hh b/src/libexpr/include/nix/expr/nixexpr.hh index 414eb5116..aa62760d8 100644 --- a/src/libexpr/include/nix/expr/nixexpr.hh +++ b/src/libexpr/include/nix/expr/nixexpr.hh @@ -99,7 +99,16 @@ struct Expr virtual ~Expr() {}; virtual void show(const SymbolTable & symbols, std::ostream & str) const; virtual void bindVars(EvalState & es, const std::shared_ptr & env); + + /** Normal evaluation, implemented directly by all subclasses. */ virtual void eval(EvalState & state, Env & env, Value & v); + + /** + * Create a thunk for the delayed computation of the given expression + * in the given environment. But if the expression is a variable, + * then look it up right away. This significantly reduces the number + * of thunks allocated. + */ virtual Value * maybeThunk(EvalState & state, Env & env); virtual void setName(Symbol name); virtual void setDocComment(DocComment docComment) {}; From 5f60602875cc9cf706747efe8f3c68c9096c201f Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 22 Sep 2025 11:48:58 +0200 Subject: [PATCH 019/332] Reapply "Merge pull request #13938 from NixOS/import-thunk" This reverts commit fd034814dc12a3061529f0480932d6e23a89363e. --- src/libexpr/eval.cc | 138 +++++++++++------- src/libexpr/include/nix/expr/eval.hh | 30 ++-- src/libfetchers/filtering-source-accessor.cc | 6 +- src/libfetchers/git-utils.cc | 4 +- .../nix/fetchers/filtering-source-accessor.hh | 2 +- src/libutil/include/nix/util/canon-path.hh | 14 +- src/libutil/include/nix/util/source-path.hh | 14 +- src/libutil/include/nix/util/util.hh | 11 ++ src/libutil/posix-source-accessor.cc | 4 +- 9 files changed, 135 insertions(+), 88 deletions(-) diff --git a/src/libexpr/eval.cc b/src/libexpr/eval.cc index bf55a9c9c..43e4c3643 100644 --- a/src/libexpr/eval.cc +++ b/src/libexpr/eval.cc @@ -38,6 +38,7 @@ #include #include +#include #include "nix/util/strings-inline.hh" @@ -264,6 +265,9 @@ EvalState::EvalState( , debugRepl(nullptr) , debugStop(false) , trylevel(0) + , srcToStore(make_ref()) + , importResolutionCache(make_ref()) + , fileEvalCache(make_ref()) , regexCache(makeRegexCache()) #if NIX_USE_BOEHMGC , valueAllocCache(std::allocate_shared(traceable_allocator(), nullptr)) @@ -1026,63 +1030,85 @@ Value * ExprPath::maybeThunk(EvalState & state, Env & env) return &v; } +/** + * A helper `Expr` class to lets us parse and evaluate Nix expressions + * from a thunk, ensuring that every file is parsed/evaluated only + * once (via the thunk stored in `EvalState::fileEvalCache`). + */ +struct ExprParseFile : Expr +{ + SourcePath & path; + bool mustBeTrivial; + + ExprParseFile(SourcePath & path, bool mustBeTrivial) + : path(path) + , mustBeTrivial(mustBeTrivial) + { + } + + void eval(EvalState & state, Env & env, Value & v) override + { + printTalkative("evaluating file '%s'", path); + + auto e = state.parseExprFromFile(path); + + try { + auto dts = + state.debugRepl + ? makeDebugTraceStacker( + state, *e, state.baseEnv, e->getPos(), "while evaluating the file '%s':", path.to_string()) + : nullptr; + + // Enforce that 'flake.nix' is a direct attrset, not a + // computation. + if (mustBeTrivial && !(dynamic_cast(e))) + state.error("file '%s' must be an attribute set", path).debugThrow(); + + state.eval(e, v); + } catch (Error & e) { + state.addErrorTrace(e, "while evaluating the file '%s':", path.to_string()); + throw; + } + } +}; + void EvalState::evalFile(const SourcePath & path, Value & v, bool mustBeTrivial) { - FileEvalCache::iterator i; - if ((i = fileEvalCache.find(path)) != fileEvalCache.end()) { - v = i->second; + auto resolvedPath = getConcurrent(*importResolutionCache, path); + + if (!resolvedPath) { + resolvedPath = resolveExprPath(path); + importResolutionCache->emplace(path, *resolvedPath); + } + + if (auto v2 = getConcurrent(*fileEvalCache, *resolvedPath)) { + forceValue(**v2, noPos); + v = **v2; return; } - auto resolvedPath = resolveExprPath(path); - if ((i = fileEvalCache.find(resolvedPath)) != fileEvalCache.end()) { - v = i->second; - return; - } + Value * vExpr; + ExprParseFile expr{*resolvedPath, mustBeTrivial}; - printTalkative("evaluating file '%1%'", resolvedPath); - Expr * e = nullptr; + fileEvalCache->try_emplace_and_cvisit( + *resolvedPath, + nullptr, + [&](auto & i) { + vExpr = allocValue(); + vExpr->mkThunk(&baseEnv, &expr); + i.second = vExpr; + }, + [&](auto & i) { vExpr = i.second; }); - auto j = fileParseCache.find(resolvedPath); - if (j != fileParseCache.end()) - e = j->second; + forceValue(*vExpr, noPos); - if (!e) - e = parseExprFromFile(resolvedPath); - - fileParseCache.emplace(resolvedPath, e); - - try { - auto dts = debugRepl ? makeDebugTraceStacker( - *this, - *e, - this->baseEnv, - e->getPos(), - "while evaluating the file '%1%':", - resolvedPath.to_string()) - : nullptr; - - // Enforce that 'flake.nix' is a direct attrset, not a - // computation. - if (mustBeTrivial && !(dynamic_cast(e))) - error("file '%s' must be an attribute set", path).debugThrow(); - eval(e, v); - } catch (Error & e) { - addErrorTrace(e, "while evaluating the file '%1%':", resolvedPath.to_string()); - throw; - } - - fileEvalCache.emplace(resolvedPath, v); - if (path != resolvedPath) - fileEvalCache.emplace(path, v); + v = *vExpr; } void EvalState::resetFileCache() { - fileEvalCache.clear(); - fileEvalCache.rehash(0); - fileParseCache.clear(); - fileParseCache.rehash(0); + importResolutionCache->clear(); + fileEvalCache->clear(); inputCache->clear(); } @@ -2401,9 +2427,10 @@ StorePath EvalState::copyPathToStore(NixStringContext & context, const SourcePat if (nix::isDerivation(path.path.abs())) error("file names are not allowed to end in '%1%'", drvExtension).debugThrow(); - std::optional dstPath; - if (!srcToStore.cvisit(path, [&dstPath](const auto & kv) { dstPath.emplace(kv.second); })) { - dstPath.emplace(fetchToStore( + auto dstPathCached = getConcurrent(*srcToStore, path); + + auto dstPath = dstPathCached ? *dstPathCached : [&]() { + auto dstPath = fetchToStore( fetchSettings, *store, path.resolveSymlinks(SymlinkResolution::Ancestors), @@ -2411,14 +2438,15 @@ StorePath EvalState::copyPathToStore(NixStringContext & context, const SourcePat path.baseName(), ContentAddressMethod::Raw::NixArchive, nullptr, - repair)); - allowPath(*dstPath); - srcToStore.try_emplace(path, *dstPath); - printMsg(lvlChatty, "copied source '%1%' -> '%2%'", path, store->printStorePath(*dstPath)); - } + repair); + allowPath(dstPath); + srcToStore->try_emplace(path, dstPath); + printMsg(lvlChatty, "copied source '%1%' -> '%2%'", path, store->printStorePath(dstPath)); + return dstPath; + }(); - context.insert(NixStringContextElem::Opaque{.path = *dstPath}); - return *dstPath; + context.insert(NixStringContextElem::Opaque{.path = dstPath}); + return dstPath; } SourcePath EvalState::coerceToPath(const PosIdx pos, Value & v, NixStringContext & context, std::string_view errorCtx) diff --git a/src/libexpr/include/nix/expr/eval.hh b/src/libexpr/include/nix/expr/eval.hh index 64f528581..8f7a0ec32 100644 --- a/src/libexpr/include/nix/expr/eval.hh +++ b/src/libexpr/include/nix/expr/eval.hh @@ -20,8 +20,9 @@ // For `NIX_USE_BOEHMGC`, and if that's set, `GC_THREADS` #include "nix/expr/config.hh" -#include #include +#include + #include #include #include @@ -403,37 +404,30 @@ private: /* Cache for calls to addToStore(); maps source paths to the store paths. */ - boost::concurrent_flat_map> srcToStore; + ref> srcToStore; /** - * A cache from path names to parse trees. + * A cache that maps paths to "resolved" paths for importing Nix + * expressions, i.e. `/foo` to `/foo/default.nix`. */ - typedef boost::unordered_flat_map< - SourcePath, - Expr *, - std::hash, - std::equal_to, - traceable_allocator>> - FileParseCache; - FileParseCache fileParseCache; + ref> importResolutionCache; /** - * A cache from path names to values. + * A cache from resolved paths to values. */ - typedef boost::unordered_flat_map< + ref, std::equal_to, - traceable_allocator>> - FileEvalCache; - FileEvalCache fileEvalCache; + traceable_allocator>>> + fileEvalCache; /** * Associate source positions of certain AST nodes with their preceding doc comment, if they have one. * Grouped by file. */ - boost::unordered_flat_map> positionToDocComment; + boost::unordered_flat_map positionToDocComment; LookupPath lookupPath; diff --git a/src/libfetchers/filtering-source-accessor.cc b/src/libfetchers/filtering-source-accessor.cc index d0991ae23..a99ecacef 100644 --- a/src/libfetchers/filtering-source-accessor.cc +++ b/src/libfetchers/filtering-source-accessor.cc @@ -59,12 +59,12 @@ void FilteringSourceAccessor::checkAccess(const CanonPath & path) struct AllowListSourceAccessorImpl : AllowListSourceAccessor { std::set allowedPrefixes; - boost::unordered_flat_set> allowedPaths; + boost::unordered_flat_set allowedPaths; AllowListSourceAccessorImpl( ref next, std::set && allowedPrefixes, - boost::unordered_flat_set> && allowedPaths, + boost::unordered_flat_set && allowedPaths, MakeNotAllowedError && makeNotAllowedError) : AllowListSourceAccessor(SourcePath(next), std::move(makeNotAllowedError)) , allowedPrefixes(std::move(allowedPrefixes)) @@ -86,7 +86,7 @@ struct AllowListSourceAccessorImpl : AllowListSourceAccessor ref AllowListSourceAccessor::create( ref next, std::set && allowedPrefixes, - boost::unordered_flat_set> && allowedPaths, + boost::unordered_flat_set && allowedPaths, MakeNotAllowedError && makeNotAllowedError) { return make_ref( diff --git a/src/libfetchers/git-utils.cc b/src/libfetchers/git-utils.cc index 4ed94a4ed..a3652e522 100644 --- a/src/libfetchers/git-utils.cc +++ b/src/libfetchers/git-utils.cc @@ -817,7 +817,7 @@ struct GitSourceAccessor : SourceAccessor return toHash(*git_tree_entry_id(entry)); } - boost::unordered_flat_map> lookupCache; + boost::unordered_flat_map lookupCache; /* Recursively look up 'path' relative to the root. */ git_tree_entry * lookup(State & state, const CanonPath & path) @@ -1254,7 +1254,7 @@ GitRepoImpl::getAccessor(const WorkdirInfo & wd, bool exportIgnore, MakeNotAllow makeFSSourceAccessor(path), std::set{wd.files}, // Always allow access to the root, but not its children. - boost::unordered_flat_set>{CanonPath::root}, + boost::unordered_flat_set{CanonPath::root}, std::move(makeNotAllowedError)) .cast(); if (exportIgnore) diff --git a/src/libfetchers/include/nix/fetchers/filtering-source-accessor.hh b/src/libfetchers/include/nix/fetchers/filtering-source-accessor.hh index 1d4028be5..f8a57bfb3 100644 --- a/src/libfetchers/include/nix/fetchers/filtering-source-accessor.hh +++ b/src/libfetchers/include/nix/fetchers/filtering-source-accessor.hh @@ -72,7 +72,7 @@ struct AllowListSourceAccessor : public FilteringSourceAccessor static ref create( ref next, std::set && allowedPrefixes, - boost::unordered_flat_set> && allowedPaths, + boost::unordered_flat_set && allowedPaths, MakeNotAllowedError && makeNotAllowedError); using FilteringSourceAccessor::FilteringSourceAccessor; diff --git a/src/libutil/include/nix/util/canon-path.hh b/src/libutil/include/nix/util/canon-path.hh index 334c9e332..dd07929b4 100644 --- a/src/libutil/include/nix/util/canon-path.hh +++ b/src/libutil/include/nix/util/canon-path.hh @@ -8,6 +8,8 @@ #include #include +#include + namespace nix { /** @@ -258,11 +260,17 @@ public: */ std::string makeRelative(const CanonPath & path) const; - friend struct std::hash; + friend std::size_t hash_value(const CanonPath &); }; std::ostream & operator<<(std::ostream & stream, const CanonPath & path); +inline std::size_t hash_value(const CanonPath & path) +{ + boost::hash hasher; + return hasher(path.path); +} + } // namespace nix template<> @@ -270,8 +278,8 @@ struct std::hash { using is_avalanching = std::true_type; - std::size_t operator()(const nix::CanonPath & s) const noexcept + std::size_t operator()(const nix::CanonPath & path) const noexcept { - return std::hash{}(s.path); + return nix::hash_value(path); } }; diff --git a/src/libutil/include/nix/util/source-path.hh b/src/libutil/include/nix/util/source-path.hh index f7cfc8ef7..08f9fe580 100644 --- a/src/libutil/include/nix/util/source-path.hh +++ b/src/libutil/include/nix/util/source-path.hh @@ -119,15 +119,23 @@ struct SourcePath std::ostream & operator<<(std::ostream & str, const SourcePath & path); +inline std::size_t hash_value(const SourcePath & path) +{ + std::size_t hash = 0; + boost::hash_combine(hash, path.accessor->number); + boost::hash_combine(hash, path.path); + return hash; +} + } // namespace nix template<> struct std::hash { + using is_avalanching = std::true_type; + std::size_t operator()(const nix::SourcePath & s) const noexcept { - std::size_t hash = 0; - hash_combine(hash, s.accessor->number, s.path); - return hash; + return nix::hash_value(s); } }; diff --git a/src/libutil/include/nix/util/util.hh b/src/libutil/include/nix/util/util.hh index 2e78120fc..26f03938a 100644 --- a/src/libutil/include/nix/util/util.hh +++ b/src/libutil/include/nix/util/util.hh @@ -220,6 +220,17 @@ typename T::mapped_type * get(T & map, const K & key) template typename T::mapped_type * get(T && map, const K & key) = delete; +/** + * Look up a value in a `boost::concurrent_flat_map`. + */ +template +std::optional getConcurrent(const T & map, const typename T::key_type & key) +{ + std::optional res; + map.cvisit(key, [&](auto & x) { res = x.second; }); + return res; +} + /** * Get a value for the specified key from an associate container, or a default value if the key isn't present. */ diff --git a/src/libutil/posix-source-accessor.cc b/src/libutil/posix-source-accessor.cc index 877c63331..c524f3e4f 100644 --- a/src/libutil/posix-source-accessor.cc +++ b/src/libutil/posix-source-accessor.cc @@ -95,9 +95,7 @@ std::optional PosixSourceAccessor::cachedLstat(const CanonPath & pa // former is not hashable on libc++. Path absPath = makeAbsPath(path).string(); - std::optional res; - cache.cvisit(absPath, [&](auto & x) { res.emplace(x.second); }); - if (res) + if (auto res = getConcurrent(cache, absPath)) return *res; auto st = nix::maybeLstat(absPath.c_str()); From d32d77f4d4b8ded8b9d4a6520aa4d24f16bdc3da Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 22 Sep 2025 12:06:51 +0200 Subject: [PATCH 020/332] Allocate ExprParseFile on the heap for now https://github.com/NixOS/nix/pull/14013#issuecomment-3308085755 --- src/libexpr/eval.cc | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/src/libexpr/eval.cc b/src/libexpr/eval.cc index 43e4c3643..9fe0263c2 100644 --- a/src/libexpr/eval.cc +++ b/src/libexpr/eval.cc @@ -1035,9 +1035,10 @@ Value * ExprPath::maybeThunk(EvalState & state, Env & env) * from a thunk, ensuring that every file is parsed/evaluated only * once (via the thunk stored in `EvalState::fileEvalCache`). */ -struct ExprParseFile : Expr +struct ExprParseFile : Expr, gc { - SourcePath & path; + // FIXME: make this a reference (see below). + SourcePath path; bool mustBeTrivial; ExprParseFile(SourcePath & path, bool mustBeTrivial) @@ -1088,14 +1089,18 @@ void EvalState::evalFile(const SourcePath & path, Value & v, bool mustBeTrivial) } Value * vExpr; - ExprParseFile expr{*resolvedPath, mustBeTrivial}; + // FIXME: put ExprParseFile on the stack instead of the heap once + // https://github.com/NixOS/nix/pull/13930 is merged. That will ensure + // the post-condition that `expr` is unreachable after + // `forceValue()` returns. + auto expr = new ExprParseFile{*resolvedPath, mustBeTrivial}; fileEvalCache->try_emplace_and_cvisit( *resolvedPath, nullptr, [&](auto & i) { vExpr = allocValue(); - vExpr->mkThunk(&baseEnv, &expr); + vExpr->mkThunk(&baseEnv, expr); i.second = vExpr; }, [&](auto & i) { vExpr = i.second; }); From 32d4ea81402cc40ac0acb686e2bac9f130d368c1 Mon Sep 17 00:00:00 2001 From: David McFarland Date: Mon, 22 Sep 2025 09:09:45 -0300 Subject: [PATCH 021/332] fix cross-build for cygwin --- nix-meson-build-support/common/meson.build | 9 +++++++++ src/libexpr/meson.build | 4 ++++ 2 files changed, 13 insertions(+) diff --git a/nix-meson-build-support/common/meson.build b/nix-meson-build-support/common/meson.build index c76c2971c..8c4e98862 100644 --- a/nix-meson-build-support/common/meson.build +++ b/nix-meson-build-support/common/meson.build @@ -5,6 +5,15 @@ if not (host_machine.system() == 'windows' and cxx.get_id() == 'gcc') deps_private += dependency('threads') endif +if host_machine.system() == 'cygwin' + # -std=gnu on cygwin defines 'unix', which conflicts with the namespace + add_project_arguments( + '-D_POSIX_C_SOURCE=200809L', + '-D_GNU_SOURCE', + language : 'cpp', + ) +endif + add_project_arguments( '-Wdeprecated-copy', '-Werror=suggest-override', diff --git a/src/libexpr/meson.build b/src/libexpr/meson.build index 32a4d511b..d24e7fae3 100644 --- a/src/libexpr/meson.build +++ b/src/libexpr/meson.build @@ -69,6 +69,10 @@ if bdw_gc.found() define_value = cxx.has_function(funcspec).to_int() configdata_priv.set(define_name, define_value) endforeach + if host_machine.system() == 'cygwin' + # undefined reference to `__wrap__Znwm' + configdata_pub.set('GC_NO_INLINE_STD_NEW', 1) + endif endif # Used in public header. Affects ABI! configdata_pub.set('NIX_USE_BOEHMGC', bdw_gc.found().to_int()) From 6389f65d631dd46f82696dd542aee8eba6964688 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Wed, 17 Sep 2025 17:46:49 -0400 Subject: [PATCH 022/332] Rework derivation format release note slightly --- doc/manual/rl-next/derivation-json.md | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/doc/manual/rl-next/derivation-json.md b/doc/manual/rl-next/derivation-json.md index 420395f1d..be7ab1cfe 100644 --- a/doc/manual/rl-next/derivation-json.md +++ b/doc/manual/rl-next/derivation-json.md @@ -4,14 +4,12 @@ prs: [13980] issues: [13570] --- -Experience with many JSON frameworks (e.g. nlohmann/json in C++, Serde -in Rust, and Aeson in Haskell), has show that the use of the store dir -in JSON formats is an impediment to systematic JSON formats, because it -requires the serializer/deserializer to take an extra paramater (the -store dir). +Experience with many JSON frameworks (e.g. nlohmann/json in C++, Serde in Rust, and Aeson in Haskell), has shown that the use of the store dir in JSON formats is an impediment to systematic JSON formats, +because it requires the serializer/deserializer to take an extra paramater (the store dir). -We ultimately want to rectify this issue with all (non-stable, able to -be changed) JSON formats. To start with, we are changing the JSON format -for derivations because the `nix derivation` commands are --- in -addition to being formally unstable --- less widely used than other -unstable commands. +We ultimately want to rectify this issue with all (non-stable, able to be changed) JSON formats. +To start with, we are changing the JSON format for derivations because the `nix derivation` commands are +--- in addition to being formally unstable +--- less widely used than other unstable commands. + +See the documentation on the [JSON format for derivations](@docroot@/protocols/json/derivation.md) for further details. From 91593a237ff4b3de28edd2fd85bc0905efe4ea8b Mon Sep 17 00:00:00 2001 From: John Ericson Date: Wed, 17 Sep 2025 18:14:12 -0400 Subject: [PATCH 023/332] Convert Realisation JSON logic to standard style No behavior is changed, just: - Declare a canonical `nlohmnan::json::adl_serializer` - Use `json-utils.hh` to shorten code without getting worse error messages. Co-authored-by: Robert Hensing --- src/libstore/binary-cache-store.cc | 12 ++- src/libstore/common-protocol.cc | 9 +- src/libstore/include/nix/store/realisation.hh | 5 +- src/libstore/nar-info-disk-cache.cc | 16 ++-- src/libstore/realisation.cc | 88 +++++++++---------- src/nix/realisation.cc | 2 +- src/perl/lib/Nix/Store.xs | 2 +- 7 files changed, 73 insertions(+), 61 deletions(-) diff --git a/src/libstore/binary-cache-store.cc b/src/libstore/binary-cache-store.cc index e08a1449b..d5184b1bf 100644 --- a/src/libstore/binary-cache-store.cc +++ b/src/libstore/binary-cache-store.cc @@ -515,8 +515,14 @@ void BinaryCacheStore::queryRealisationUncached( if (!data) return (*callbackPtr)({}); - auto realisation = Realisation::fromJSON(nlohmann::json::parse(*data), outputInfoFilePath); - return (*callbackPtr)(std::make_shared(realisation)); + std::shared_ptr realisation; + try { + realisation = std::make_shared(nlohmann::json::parse(*data)); + } catch (Error & e) { + e.addTrace({}, "while parsing file '%s' as a realisation", outputInfoFilePath); + throw; + } + return (*callbackPtr)(std::move(realisation)); } catch (...) { callbackPtr->rethrow(); } @@ -530,7 +536,7 @@ void BinaryCacheStore::registerDrvOutput(const Realisation & info) if (diskCache) diskCache->upsertRealisation(config.getReference().render(/*FIXME withParams=*/false), info); auto filePath = realisationsPrefix + "/" + info.id.to_string() + ".doi"; - upsertFile(filePath, info.toJSON().dump(), "application/json"); + upsertFile(filePath, static_cast(info).dump(), "application/json"); } ref BinaryCacheStore::getFSAccessor(bool requireValidPath) diff --git a/src/libstore/common-protocol.cc b/src/libstore/common-protocol.cc index d4f3efc9b..b069c9498 100644 --- a/src/libstore/common-protocol.cc +++ b/src/libstore/common-protocol.cc @@ -49,13 +49,18 @@ void CommonProto::Serialise::write( Realisation CommonProto::Serialise::read(const StoreDirConfig & store, CommonProto::ReadConn conn) { std::string rawInput = readString(conn.from); - return Realisation::fromJSON(nlohmann::json::parse(rawInput), "remote-protocol"); + try { + return nlohmann::json::parse(rawInput); + } catch (Error & e) { + e.addTrace({}, "while parsing a realisation object in the remote protocol"); + throw; + } } void CommonProto::Serialise::write( const StoreDirConfig & store, CommonProto::WriteConn conn, const Realisation & realisation) { - conn.to << realisation.toJSON().dump(); + conn.to << static_cast(realisation).dump(); } DrvOutput CommonProto::Serialise::read(const StoreDirConfig & store, CommonProto::ReadConn conn) diff --git a/src/libstore/include/nix/store/realisation.hh b/src/libstore/include/nix/store/realisation.hh index 6eb3eecf3..3424a39c9 100644 --- a/src/libstore/include/nix/store/realisation.hh +++ b/src/libstore/include/nix/store/realisation.hh @@ -64,9 +64,6 @@ struct Realisation */ std::map dependentRealisations; - nlohmann::json toJSON() const; - static Realisation fromJSON(const nlohmann::json & json, const std::string & whence); - std::string fingerprint() const; void sign(const Signer &); bool checkSignature(const PublicKeys & publicKeys, const std::string & sig) const; @@ -169,3 +166,5 @@ public: }; } // namespace nix + +JSON_IMPL(nix::Realisation) diff --git a/src/libstore/nar-info-disk-cache.cc b/src/libstore/nar-info-disk-cache.cc index 69d8d2e14..11608a667 100644 --- a/src/libstore/nar-info-disk-cache.cc +++ b/src/libstore/nar-info-disk-cache.cc @@ -304,10 +304,15 @@ public: if (queryRealisation.isNull(0)) return {oInvalid, 0}; - auto realisation = std::make_shared( - Realisation::fromJSON(nlohmann::json::parse(queryRealisation.getStr(0)), "Local disk cache")); - - return {oValid, realisation}; + try { + return { + oValid, + std::make_shared(nlohmann::json::parse(queryRealisation.getStr(0))), + }; + } catch (Error & e) { + e.addTrace({}, "while parsing the local disk cache"); + throw; + } }); } @@ -349,7 +354,8 @@ public: auto & cache(getCache(*state, uri)); - state->insertRealisation.use()(cache.id)(realisation.id.to_string())(realisation.toJSON().dump())(time(0)) + state->insertRealisation + .use()(cache.id)(realisation.id.to_string())(static_cast(realisation).dump())(time(0)) .exec(); }); } diff --git a/src/libstore/realisation.cc b/src/libstore/realisation.cc index 8c3baa73b..d59f4b0ea 100644 --- a/src/libstore/realisation.cc +++ b/src/libstore/realisation.cc @@ -2,6 +2,7 @@ #include "nix/store/store-api.hh" #include "nix/util/closure.hh" #include "nix/util/signature/local-keys.hh" +#include "nix/util/json-utils.hh" #include namespace nix { @@ -60,54 +61,9 @@ void Realisation::closure(Store & store, const std::set & startOutp }); } -nlohmann::json Realisation::toJSON() const -{ - auto jsonDependentRealisations = nlohmann::json::object(); - for (auto & [depId, depOutPath] : dependentRealisations) - jsonDependentRealisations.emplace(depId.to_string(), depOutPath.to_string()); - return nlohmann::json{ - {"id", id.to_string()}, - {"outPath", outPath.to_string()}, - {"signatures", signatures}, - {"dependentRealisations", jsonDependentRealisations}, - }; -} - -Realisation Realisation::fromJSON(const nlohmann::json & json, const std::string & whence) -{ - auto getOptionalField = [&](std::string fieldName) -> std::optional { - auto fieldIterator = json.find(fieldName); - if (fieldIterator == json.end()) - return std::nullopt; - return {*fieldIterator}; - }; - auto getField = [&](std::string fieldName) -> std::string { - if (auto field = getOptionalField(fieldName)) - return *field; - else - throw Error("Drv output info file '%1%' is corrupt, missing field %2%", whence, fieldName); - }; - - StringSet signatures; - if (auto signaturesIterator = json.find("signatures"); signaturesIterator != json.end()) - signatures.insert(signaturesIterator->begin(), signaturesIterator->end()); - - std::map dependentRealisations; - if (auto jsonDependencies = json.find("dependentRealisations"); jsonDependencies != json.end()) - for (auto & [jsonDepId, jsonDepOutPath] : jsonDependencies->get()) - dependentRealisations.insert({DrvOutput::parse(jsonDepId), StorePath(jsonDepOutPath)}); - - return Realisation{ - .id = DrvOutput::parse(getField("id")), - .outPath = StorePath(getField("outPath")), - .signatures = signatures, - .dependentRealisations = dependentRealisations, - }; -} - std::string Realisation::fingerprint() const { - auto serialized = toJSON(); + nlohmann::json serialized = *this; serialized.erase("signatures"); return serialized.dump(); } @@ -183,3 +139,43 @@ RealisedPath::Set RealisedPath::closure(Store & store) const } } // namespace nix + +namespace nlohmann { + +using namespace nix; + +Realisation adl_serializer::from_json(const json & json0) +{ + auto json = getObject(json0); + + StringSet signatures; + if (auto signaturesOpt = optionalValueAt(json, "signatures")) + signatures = *signaturesOpt; + + std::map dependentRealisations; + if (auto jsonDependencies = optionalValueAt(json, "dependentRealisations")) + for (auto & [jsonDepId, jsonDepOutPath] : getObject(*jsonDependencies)) + dependentRealisations.insert({DrvOutput::parse(jsonDepId), jsonDepOutPath}); + + return Realisation{ + .id = DrvOutput::parse(valueAt(json, "id")), + .outPath = valueAt(json, "outPath"), + .signatures = signatures, + .dependentRealisations = dependentRealisations, + }; +} + +void adl_serializer::to_json(json & json, Realisation r) +{ + auto jsonDependentRealisations = nlohmann::json::object(); + for (auto & [depId, depOutPath] : r.dependentRealisations) + jsonDependentRealisations.emplace(depId.to_string(), depOutPath); + json = { + {"id", r.id.to_string()}, + {"outPath", r.outPath}, + {"signatures", r.signatures}, + {"dependentRealisations", jsonDependentRealisations}, + }; +} + +} // namespace nlohmann diff --git a/src/nix/realisation.cc b/src/nix/realisation.cc index a0e400f54..8dd608d23 100644 --- a/src/nix/realisation.cc +++ b/src/nix/realisation.cc @@ -59,7 +59,7 @@ struct CmdRealisationInfo : BuiltPathsCommand, MixJSON for (auto & path : realisations) { nlohmann::json currentPath; if (auto realisation = std::get_if(&path.raw)) - currentPath = realisation->toJSON(); + currentPath = *realisation; else currentPath["opaquePath"] = store->printStorePath(path.path()); diff --git a/src/perl/lib/Nix/Store.xs b/src/perl/lib/Nix/Store.xs index 7aa918ba0..93e9f0f95 100644 --- a/src/perl/lib/Nix/Store.xs +++ b/src/perl/lib/Nix/Store.xs @@ -168,7 +168,7 @@ StoreWrapper::queryRawRealisation(char * outputId) try { auto realisation = THIS->store->queryRealisation(DrvOutput::parse(outputId)); if (realisation) - XPUSHs(sv_2mortal(newSVpv(realisation->toJSON().dump().c_str(), 0))); + XPUSHs(sv_2mortal(newSVpv(static_cast(*realisation).dump().c_str(), 0))); else XPUSHs(sv_2mortal(newSVpv("", 0))); } catch (Error & e) { From df23f2b3c1ee47012b271186f62de68b539cddef Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Mon, 22 Sep 2025 19:09:35 +0200 Subject: [PATCH 024/332] packaging/dev-shell: Add shellcheck It was already in the closure for the pre-commit hook installation script. --- packaging/dev-shell.nix | 1 + 1 file changed, 1 insertion(+) diff --git a/packaging/dev-shell.nix b/packaging/dev-shell.nix index 949f79752..ccfb9c4ae 100644 --- a/packaging/dev-shell.nix +++ b/packaging/dev-shell.nix @@ -118,6 +118,7 @@ pkgs.nixComponents2.nix-util.overrideAttrs ( modular.pre-commit.settings.package (pkgs.writeScriptBin "pre-commit-hooks-install" modular.pre-commit.settings.installationScript) pkgs.buildPackages.nixfmt-rfc-style + pkgs.buildPackages.shellcheck pkgs.buildPackages.gdb ] ++ lib.optional (stdenv.cc.isClang && stdenv.hostPlatform == stdenv.buildPlatform) ( From c12187b15a95d788be5e5e3f9edfc4b0e2c5d826 Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Mon, 22 Sep 2025 19:12:33 +0200 Subject: [PATCH 025/332] pre-commit: Drop exclude config/install-sh This file was part of the make-based build, which has been removed. --- maintainers/flake-module.nix | 1 - 1 file changed, 1 deletion(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 4815313dd..86248c883 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^config/install-sh$'' ''^misc/bash/completion\.sh$'' ''^misc/fish/completion\.fish$'' ''^misc/zsh/completion\.zsh$'' From 1878e788cec3a6a9b9b22d158cadf44659276117 Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Mon, 22 Sep 2025 19:15:44 +0200 Subject: [PATCH 026/332] misc/bash/completion.sh: Fix shellcheck --- maintainers/flake-module.nix | 1 - misc/bash/completion.sh | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 86248c883..cd62b6135 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^misc/bash/completion\.sh$'' ''^misc/fish/completion\.fish$'' ''^misc/zsh/completion\.zsh$'' ''^scripts/create-darwin-volume\.sh$'' diff --git a/misc/bash/completion.sh b/misc/bash/completion.sh index c4ba96cd3..96f98d6c1 100644 --- a/misc/bash/completion.sh +++ b/misc/bash/completion.sh @@ -1,3 +1,4 @@ +# shellcheck shell=bash function _complete_nix { local -a words local cword cur From c71f80b6ebedd481b4e2d360463e5466d392ba19 Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Mon, 22 Sep 2025 01:16:52 +0300 Subject: [PATCH 027/332] libstore: Implement boost::hash for StorePath --- src/libstore/include/nix/store/path.hh | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/libstore/include/nix/store/path.hh b/src/libstore/include/nix/store/path.hh index 8124cf580..74ee0422b 100644 --- a/src/libstore/include/nix/store/path.hh +++ b/src/libstore/include/nix/store/path.hh @@ -108,4 +108,13 @@ struct hash } // namespace std +namespace nix { + +inline std::size_t hash_value(const StorePath & path) +{ + return std::hash{}(path); +} + +} // namespace nix + JSON_IMPL(nix::StorePath) From 6195dfff3a5e43d24a0436c6109521e95519476f Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Mon, 22 Sep 2025 19:17:58 +0200 Subject: [PATCH 028/332] pre-commit: Move fish exclude --- maintainers/flake-module.nix | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index cd62b6135..be924b37b 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^misc/fish/completion\.fish$'' ''^misc/zsh/completion\.zsh$'' ''^scripts/create-darwin-volume\.sh$'' ''^scripts/install-darwin-multi-user\.sh$'' @@ -246,6 +245,9 @@ ''^tests/functional/user-envs\.builder\.sh$'' ''^tests/functional/user-envs\.sh$'' ''^tests/functional/why-depends\.sh$'' + + # Shellcheck doesn't support fish shell syntax + ''^misc/fish/completion\.fish$'' ]; }; }; From 34e9caaf9bbffeef9381fe086060fb333d8904b6 Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Mon, 22 Sep 2025 19:18:52 +0200 Subject: [PATCH 029/332] pre-commit: Move zsh exclude --- maintainers/flake-module.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index be924b37b..83bdda7f2 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^misc/zsh/completion\.zsh$'' ''^scripts/create-darwin-volume\.sh$'' ''^scripts/install-darwin-multi-user\.sh$'' ''^scripts/install-multi-user\.sh$'' @@ -246,8 +245,9 @@ ''^tests/functional/user-envs\.sh$'' ''^tests/functional/why-depends\.sh$'' - # Shellcheck doesn't support fish shell syntax + # Shellcheck doesn't support fish or zsh shell syntax ''^misc/fish/completion\.fish$'' + ''^misc/zsh/completion\.zsh$'' ]; }; }; From 033f13fb1af00b3d938722e0b885bceb7da37ee3 Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Mon, 22 Sep 2025 19:19:39 +0200 Subject: [PATCH 030/332] pre-commit: Remove exclude that passes --- maintainers/flake-module.nix | 1 - 1 file changed, 1 deletion(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 83bdda7f2..48de03116 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^scripts/create-darwin-volume\.sh$'' ''^scripts/install-darwin-multi-user\.sh$'' ''^scripts/install-multi-user\.sh$'' ''^scripts/install-systemd-multi-user\.sh$'' From 43ec36cddf6c1e3cca38cd2eb3710c242b0054f2 Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Mon, 22 Sep 2025 19:21:06 +0200 Subject: [PATCH 031/332] pre-commit: Remove exclude that passes --- maintainers/flake-module.nix | 1 - 1 file changed, 1 deletion(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 48de03116..54284784e 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^scripts/install-darwin-multi-user\.sh$'' ''^scripts/install-multi-user\.sh$'' ''^scripts/install-systemd-multi-user\.sh$'' ''^src/nix/get-env\.sh$'' From c4c92c4c6148199ec07e59b1c90a8584997e3a53 Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Mon, 22 Sep 2025 01:16:56 +0300 Subject: [PATCH 032/332] libstore: Make writable dummy store thread-safe Tested by building with b_sanitize=thread and running: nix flake prefetch-inputs --store "dummy://?read-only=false" It might make sense to move this utility class out of dummy-store.cc, but it seems fine for now. --- src/libstore/dummy-store.cc | 165 +++++++++++++++++++++++++++--------- 1 file changed, 125 insertions(+), 40 deletions(-) diff --git a/src/libstore/dummy-store.cc b/src/libstore/dummy-store.cc index 06b518c15..367cdb5d2 100644 --- a/src/libstore/dummy-store.cc +++ b/src/libstore/dummy-store.cc @@ -4,6 +4,8 @@ #include "nix/util/memory-source-accessor.hh" #include "nix/store/dummy-store.hh" +#include + namespace nix { std::string DummyStoreConfig::doc() @@ -13,6 +15,99 @@ std::string DummyStoreConfig::doc() ; } +namespace { + +class WholeStoreViewAccessor : public SourceAccessor +{ + using BaseName = std::string; + + /** + * Map from store path basenames to corresponding accessors. + */ + boost::concurrent_flat_map> subdirs; + + /** + * Helper accessor for accessing just the CanonPath::root. + */ + MemorySourceAccessor rootPathAccessor; + + /** + * Helper empty accessor. + */ + MemorySourceAccessor emptyAccessor; + + auto + callWithAccessorForPath(CanonPath path, std::invocable auto callback) + { + if (path.isRoot()) + return callback(rootPathAccessor, path); + + BaseName baseName(*path.begin()); + MemorySourceAccessor * res = nullptr; + + subdirs.cvisit(baseName, [&](const auto & kv) { + path = path.removePrefix(CanonPath{baseName}); + res = &*kv.second; + }); + + if (!res) + res = &emptyAccessor; + + return callback(*res, path); + } + +public: + WholeStoreViewAccessor() + { + MemorySink sink{rootPathAccessor}; + sink.createDirectory(CanonPath::root); + } + + void addObject(std::string_view baseName, ref accessor) + { + subdirs.emplace(baseName, std::move(accessor)); + } + + std::string readFile(const CanonPath & path) override + { + return callWithAccessorForPath( + path, [](SourceAccessor & accessor, const CanonPath & path) { return accessor.readFile(path); }); + } + + void readFile(const CanonPath & path, Sink & sink, std::function sizeCallback) override + { + return callWithAccessorForPath(path, [&](SourceAccessor & accessor, const CanonPath & path) { + return accessor.readFile(path, sink, sizeCallback); + }); + } + + bool pathExists(const CanonPath & path) override + { + return callWithAccessorForPath( + path, [](SourceAccessor & accessor, const CanonPath & path) { return accessor.pathExists(path); }); + } + + std::optional maybeLstat(const CanonPath & path) override + { + return callWithAccessorForPath( + path, [](SourceAccessor & accessor, const CanonPath & path) { return accessor.maybeLstat(path); }); + } + + DirEntries readDirectory(const CanonPath & path) override + { + return callWithAccessorForPath( + path, [](SourceAccessor & accessor, const CanonPath & path) { return accessor.readDirectory(path); }); + } + + std::string readLink(const CanonPath & path) override + { + return callWithAccessorForPath( + path, [](SourceAccessor & accessor, const CanonPath & path) { return accessor.readLink(path); }); + } +}; + +} // namespace + struct DummyStore : virtual Store { using Config = DummyStoreConfig; @@ -29,7 +124,7 @@ struct DummyStore : virtual Store * This is map conceptually owns the file system objects for each * store object. */ - std::map contents; + boost::concurrent_flat_map contents; /** * This view conceptually just borrows the file systems objects of @@ -38,23 +133,23 @@ struct DummyStore : virtual Store * * This is needed just in order to implement `Store::getFSAccessor`. */ - ref wholeStoreView = make_ref(); + ref wholeStoreView = make_ref(); DummyStore(ref config) : Store{*config} , config(config) { wholeStoreView->setPathDisplay(config->storeDir); - MemorySink sink{*wholeStoreView}; - sink.createDirectory(CanonPath::root); } void queryPathInfoUncached( const StorePath & path, Callback> callback) noexcept override { - if (auto it = contents.find(path); it != contents.end()) - callback(std::make_shared(StorePath{path}, it->second.info)); - else + bool visited = contents.cvisit(path, [&](const auto & kv) { + callback(std::make_shared(StorePath{kv.first}, kv.second.info)); + }); + + if (!visited) callback(nullptr); } @@ -87,19 +182,14 @@ struct DummyStore : virtual Store parseDump(tempSink, source); auto path = info.path; - auto [it, _] = contents.insert({ - path, - { - std::move(info), - make_ref(std::move(*temp)), - }, - }); - - auto & pathAndContents = it->second; - - bool inserted = wholeStoreView->open(CanonPath(path.to_string()), pathAndContents.contents->root); - if (!inserted) - unreachable(); + auto accessor = make_ref(std::move(*temp)); + contents.insert( + {path, + PathInfoAndContents{ + std::move(info), + accessor, + }}); + wholeStoreView->addObject(path.to_string(), accessor); } StorePath addToStoreFromDump( @@ -156,33 +246,28 @@ struct DummyStore : virtual Store info.narSize = narHash.second.value(); auto path = info.path; - - auto [it, _] = contents.insert({ - path, - { - std::move(info), - make_ref(std::move(*temp)), - }, - }); - - auto & pathAndContents = it->second; - - bool inserted = wholeStoreView->open(CanonPath(path.to_string()), pathAndContents.contents->root); - if (!inserted) - unreachable(); + auto accessor = make_ref(std::move(*temp)); + contents.insert( + {path, + PathInfoAndContents{ + std::move(info), + accessor, + }}); + wholeStoreView->addObject(path.to_string(), accessor); return path; } void narFromPath(const StorePath & path, Sink & sink) override { - auto object = contents.find(path); - if (object == contents.end()) - throw Error("path '%s' is not valid", printStorePath(path)); + bool visited = contents.cvisit(path, [&](const auto & kv) { + const auto & [info, accessor] = kv.second; + SourcePath sourcePath(accessor); + dumpPath(sourcePath, sink, FileSerialisationMethod::NixArchive); + }); - const auto & [info, accessor] = object->second; - SourcePath sourcePath(accessor); - dumpPath(sourcePath, sink, FileSerialisationMethod::NixArchive); + if (!visited) + throw Error("path '%s' is not valid", printStorePath(path)); } void From 5915fe319011b4be8accb2e3e4a21e9e2000d7db Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Mon, 22 Sep 2025 01:17:00 +0300 Subject: [PATCH 033/332] Revert "Use shared pointers in the memory source accessor" This is no longer necessary. This reverts commit 4df60e639b7e492ac5f651f2b3aa02055de5549a. --- src/libutil-tests/git.cc | 16 ++++++++-------- .../include/nix/util/memory-source-accessor.hh | 16 ++++------------ src/libutil/memory-source-accessor.cc | 6 +++--- 3 files changed, 15 insertions(+), 23 deletions(-) diff --git a/src/libutil-tests/git.cc b/src/libutil-tests/git.cc index a06c5896d..6180a4cfc 100644 --- a/src/libutil-tests/git.cc +++ b/src/libutil-tests/git.cc @@ -233,30 +233,30 @@ TEST_F(GitTest, both_roundrip) .contents{ { "foo", - make_ref(File::Regular{ + File::Regular{ .contents = "hello\n\0\n\tworld!", - }), + }, }, { "bar", - make_ref(File::Directory{ + File::Directory{ .contents = { { "baz", - make_ref(File::Regular{ + File::Regular{ .executable = true, .contents = "good day,\n\0\n\tworld!", - }), + }, }, { "quux", - make_ref(File::Symlink{ + File::Symlink{ .target = "/over/there", - }), + }, }, }, - }), + }, }, }, }; diff --git a/src/libutil/include/nix/util/memory-source-accessor.hh b/src/libutil/include/nix/util/memory-source-accessor.hh index 53f1b0241..eba282fe1 100644 --- a/src/libutil/include/nix/util/memory-source-accessor.hh +++ b/src/libutil/include/nix/util/memory-source-accessor.hh @@ -35,7 +35,7 @@ struct MemorySourceAccessor : virtual SourceAccessor { using Name = std::string; - std::map, std::less<>> contents; + std::map> contents; bool operator==(const Directory &) const noexcept; // TODO libc++ 16 (used by darwin) missing `std::map::operator <=>`, can't do yet. @@ -89,21 +89,13 @@ struct MemorySourceAccessor : virtual SourceAccessor SourcePath addFile(CanonPath path, std::string && contents); }; -inline bool -MemorySourceAccessor::File::Directory::operator==(const MemorySourceAccessor::File::Directory & other) const noexcept -{ - return std::ranges::equal(contents, other.contents, [](const auto & lhs, const auto & rhs) -> bool { - return lhs.first == rhs.first && *lhs.second == *rhs.second; - }); -}; +inline bool MemorySourceAccessor::File::Directory::operator==( + const MemorySourceAccessor::File::Directory &) const noexcept = default; inline bool MemorySourceAccessor::File::Directory::operator<(const MemorySourceAccessor::File::Directory & other) const noexcept { - return std::ranges::lexicographical_compare( - contents, other.contents, [](const auto & lhs, const auto & rhs) -> bool { - return lhs.first < rhs.first && *lhs.second < *rhs.second; - }); + return contents < other.contents; } inline bool MemorySourceAccessor::File::operator==(const MemorySourceAccessor::File &) const noexcept = default; diff --git a/src/libutil/memory-source-accessor.cc b/src/libutil/memory-source-accessor.cc index 7d53d6785..caff5b56a 100644 --- a/src/libutil/memory-source-accessor.cc +++ b/src/libutil/memory-source-accessor.cc @@ -39,11 +39,11 @@ MemorySourceAccessor::File * MemorySourceAccessor::open(const CanonPath & path, i, { std::string{name}, - make_ref(File::Directory{}), + File::Directory{}, }); } } - cur = &*i->second; + cur = &i->second; } if (newF && create) @@ -107,7 +107,7 @@ MemorySourceAccessor::DirEntries MemorySourceAccessor::readDirectory(const Canon if (auto * d = std::get_if(&f->raw)) { DirEntries res; for (auto & [name, file] : d->contents) - res.insert_or_assign(name, file->lstat().type); + res.insert_or_assign(name, file.lstat().type); return res; } else throw Error("file '%s' is not a directory", path); From 5af644492ba6cf21acf6ef064ee3d05bcf203a73 Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Mon, 22 Sep 2025 19:31:22 +0200 Subject: [PATCH 034/332] nix develop: Apply shellcheck --- maintainers/flake-module.nix | 1 - src/nix/get-env.sh | 14 ++++++++++---- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 54284784e..ac8fb6f76 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -108,7 +108,6 @@ # We haven't linted these files yet ''^scripts/install-multi-user\.sh$'' ''^scripts/install-systemd-multi-user\.sh$'' - ''^src/nix/get-env\.sh$'' ''^tests/functional/ca/build-dry\.sh$'' ''^tests/functional/ca/build-with-garbage-path\.sh$'' ''^tests/functional/ca/common\.sh$'' diff --git a/src/nix/get-env.sh b/src/nix/get-env.sh index 071edf9b9..39fa6f9ac 100644 --- a/src/nix/get-env.sh +++ b/src/nix/get-env.sh @@ -1,11 +1,14 @@ +# shellcheck shell=bash set -e +# shellcheck disable=SC1090 # Dynamic sourcing is intentional if [ -e "$NIX_ATTRS_SH_FILE" ]; then source "$NIX_ATTRS_SH_FILE"; fi export IN_NIX_SHELL=impure export dontAddDisableDepTrack=1 if [[ -n $stdenv ]]; then - source $stdenv/setup + # shellcheck disable=SC1091 # setup file is in nix store + source "$stdenv"/setup fi # Better to use compgen, but stdenv bash doesn't have it. @@ -17,10 +20,10 @@ __dumpEnv() { printf ' "bashFunctions": {\n' local __first=1 - while read __line; do + while read -r __line; do if ! [[ $__line =~ ^declare\ -f\ (.*) ]]; then continue; fi __fun_name="${BASH_REMATCH[1]}" - __fun_body="$(type $__fun_name)" + __fun_body="$(type "$__fun_name")" if [[ $__fun_body =~ \{(.*)\} ]]; then if [[ -z $__first ]]; then printf ',\n'; else __first=; fi __fun_body="${BASH_REMATCH[1]}" @@ -37,7 +40,7 @@ __dumpEnv() { printf ' "variables": {\n' local __first=1 - while read __line; do + while read -r __line; do if ! [[ $__line =~ ^declare\ (-[^ ])\ ([^=]*) ]]; then continue; fi local type="${BASH_REMATCH[1]}" local __var_name="${BASH_REMATCH[2]}" @@ -76,7 +79,9 @@ __dumpEnv() { elif [[ $type == -a ]]; then printf '"type": "array", "value": [' local __first2=1 + # shellcheck disable=SC1087 # Complex array manipulation, syntax is correct __var_name="$__var_name[@]" + # shellcheck disable=SC1087 # Complex array manipulation, syntax is correct for __i in "${!__var_name}"; do if [[ -z $__first2 ]]; then printf ', '; else __first2=; fi __escapeString "$__i" @@ -142,6 +147,7 @@ __dumpEnvToOutput() { # array with a format like `outname => /nix/store/hash-drvname-outname`. # Otherwise it is a space-separated list of output variable names. if [ -e "$NIX_ATTRS_SH_FILE" ]; then + # shellcheck disable=SC2154 # outputs is set by sourced file for __output in "${outputs[@]}"; do __dumpEnvToOutput "$__output" done From 8b97d14c08d9d851ff4ba03bfc5851152a01e6c3 Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Mon, 22 Sep 2025 19:57:06 +0200 Subject: [PATCH 035/332] pre-commit: Give reason for ca test wrappers exclusion --- maintainers/flake-module.nix | 26 +++++++++++++++----------- 1 file changed, 15 insertions(+), 11 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index ac8fb6f76..0c2ffe781 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -108,20 +108,9 @@ # We haven't linted these files yet ''^scripts/install-multi-user\.sh$'' ''^scripts/install-systemd-multi-user\.sh$'' - ''^tests/functional/ca/build-dry\.sh$'' ''^tests/functional/ca/build-with-garbage-path\.sh$'' ''^tests/functional/ca/common\.sh$'' ''^tests/functional/ca/concurrent-builds\.sh$'' - ''^tests/functional/ca/eval-store\.sh$'' - ''^tests/functional/ca/gc\.sh$'' - ''^tests/functional/ca/import-from-derivation\.sh$'' - ''^tests/functional/ca/new-build-cmd\.sh$'' - ''^tests/functional/ca/nix-shell\.sh$'' - ''^tests/functional/ca/post-hook\.sh$'' - ''^tests/functional/ca/recursive\.sh$'' - ''^tests/functional/ca/repl\.sh$'' - ''^tests/functional/ca/selfref-gc\.sh$'' - ''^tests/functional/ca/why-depends\.sh$'' ''^tests/functional/characterisation-test-infra\.sh$'' ''^tests/functional/common/vars-and-functions\.sh$'' ''^tests/functional/completions\.sh$'' @@ -245,6 +234,21 @@ # Shellcheck doesn't support fish or zsh shell syntax ''^misc/fish/completion\.fish$'' ''^misc/zsh/completion\.zsh$'' + + # Content-addressed test files that use recursive-*looking* sourcing + # (cd .. && source ), causing shellcheck to loop + # They're small wrapper scripts with not a lot going on + ''^tests/functional/ca/build-dry\.sh$'' + ''^tests/functional/ca/eval-store\.sh$'' + ''^tests/functional/ca/gc\.sh$'' + ''^tests/functional/ca/import-from-derivation\.sh$'' + ''^tests/functional/ca/new-build-cmd\.sh$'' + ''^tests/functional/ca/nix-shell\.sh$'' + ''^tests/functional/ca/post-hook\.sh$'' + ''^tests/functional/ca/recursive\.sh$'' + ''^tests/functional/ca/repl\.sh$'' + ''^tests/functional/ca/selfref-gc\.sh$'' + ''^tests/functional/ca/why-depends\.sh$'' ]; }; }; From 8c31e07cce68022b52eb252270389eb5c4581545 Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Mon, 22 Sep 2025 19:58:00 +0200 Subject: [PATCH 036/332] tests/func*/ca/build-with-garbage-path: Fix shellcheck --- maintainers/flake-module.nix | 1 - tests/functional/ca/build-with-garbage-path.sh | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 0c2ffe781..29f8fd1f9 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -108,7 +108,6 @@ # We haven't linted these files yet ''^scripts/install-multi-user\.sh$'' ''^scripts/install-systemd-multi-user\.sh$'' - ''^tests/functional/ca/build-with-garbage-path\.sh$'' ''^tests/functional/ca/common\.sh$'' ''^tests/functional/ca/concurrent-builds\.sh$'' ''^tests/functional/characterisation-test-infra\.sh$'' diff --git a/tests/functional/ca/build-with-garbage-path.sh b/tests/functional/ca/build-with-garbage-path.sh index 884cd2802..298cd469a 100755 --- a/tests/functional/ca/build-with-garbage-path.sh +++ b/tests/functional/ca/build-with-garbage-path.sh @@ -8,6 +8,7 @@ requireDaemonNewerThan "2.4pre20210621" # Get the output path of `rootCA`, and put some garbage instead outPath="$(nix-build ./content-addressed.nix -A rootCA --no-out-link)" +# shellcheck disable=SC2046 # Multiple store paths need to become individual args nix-store --delete $(nix-store -q --referrers-closure "$outPath") touch "$outPath" From 926287d813a1f9d719f54dea041fc62a1ed82b06 Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Mon, 22 Sep 2025 20:05:37 +0200 Subject: [PATCH 037/332] tests/func*/ca/common: Fix shellcheck --- maintainers/flake-module.nix | 1 - tests/functional/ca/common.sh | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 29f8fd1f9..f0268a69d 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -108,7 +108,6 @@ # We haven't linted these files yet ''^scripts/install-multi-user\.sh$'' ''^scripts/install-systemd-multi-user\.sh$'' - ''^tests/functional/ca/common\.sh$'' ''^tests/functional/ca/concurrent-builds\.sh$'' ''^tests/functional/characterisation-test-infra\.sh$'' ''^tests/functional/common/vars-and-functions\.sh$'' diff --git a/tests/functional/ca/common.sh b/tests/functional/ca/common.sh index 48f1ac46b..dc8e650fd 100644 --- a/tests/functional/ca/common.sh +++ b/tests/functional/ca/common.sh @@ -1,3 +1,4 @@ +# shellcheck shell=bash source ../common.sh enableFeatures "ca-derivations" From 993ea14f528936a915262c0588d46fd7c92f571d Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Mon, 22 Sep 2025 20:08:30 +0200 Subject: [PATCH 038/332] pre-commit: Remove exclude that passes --- maintainers/flake-module.nix | 1 - 1 file changed, 1 deletion(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index f0268a69d..3012b6427 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -108,7 +108,6 @@ # We haven't linted these files yet ''^scripts/install-multi-user\.sh$'' ''^scripts/install-systemd-multi-user\.sh$'' - ''^tests/functional/ca/concurrent-builds\.sh$'' ''^tests/functional/characterisation-test-infra\.sh$'' ''^tests/functional/common/vars-and-functions\.sh$'' ''^tests/functional/completions\.sh$'' From 4183308ee2f7c07b891f1c007f265531e8149bb8 Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Mon, 22 Sep 2025 20:17:30 +0200 Subject: [PATCH 039/332] tests/func*/characterisation-test-infra: Fix shellcheck --- maintainers/flake-module.nix | 1 - tests/functional/characterisation-test-infra.sh | 4 ++-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 3012b6427..c531c9297 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -108,7 +108,6 @@ # We haven't linted these files yet ''^scripts/install-multi-user\.sh$'' ''^scripts/install-systemd-multi-user\.sh$'' - ''^tests/functional/characterisation-test-infra\.sh$'' ''^tests/functional/common/vars-and-functions\.sh$'' ''^tests/functional/completions\.sh$'' ''^tests/functional/compute-levels\.sh$'' diff --git a/tests/functional/characterisation-test-infra.sh b/tests/functional/characterisation-test-infra.sh index 279454550..fecae29e8 100755 --- a/tests/functional/characterisation-test-infra.sh +++ b/tests/functional/characterisation-test-infra.sh @@ -40,7 +40,7 @@ echo Bye! > "$TEST_ROOT/expected" diffAndAcceptInner test "$TEST_ROOT/got" "$TEST_ROOT/expected" (( "$badDiff" == 1 )) ) -[[ "$(echo Bye! )" == $(< "$TEST_ROOT/expected") ]] +[[ "Bye!" == $(< "$TEST_ROOT/expected") ]] # _NIX_TEST_ACCEPT=1 matches non-empty echo Hi! > "$TEST_ROOT/got" @@ -57,7 +57,7 @@ echo Bye! > "$TEST_ROOT/expected" _NIX_TEST_ACCEPT=1 diffAndAcceptInner test "$TEST_ROOT/got" "$TEST_ROOT/expected" (( "$badDiff" == 1 )) ) -[[ "$(echo Hi! )" == $(< "$TEST_ROOT/expected") ]] +[[ "Hi!" == $(< "$TEST_ROOT/expected") ]] # second time succeeds ( diffAndAcceptInner test "$TEST_ROOT/got" "$TEST_ROOT/expected" From 8a9d9bb0e9f7ac414fbe0972266a38372cb54ac2 Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Mon, 22 Sep 2025 20:21:07 +0200 Subject: [PATCH 040/332] pre-commit: Remove exclusion for removed file --- maintainers/flake-module.nix | 1 - 1 file changed, 1 deletion(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index c531c9297..a54499654 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -108,7 +108,6 @@ # We haven't linted these files yet ''^scripts/install-multi-user\.sh$'' ''^scripts/install-systemd-multi-user\.sh$'' - ''^tests/functional/common/vars-and-functions\.sh$'' ''^tests/functional/completions\.sh$'' ''^tests/functional/compute-levels\.sh$'' ''^tests/functional/config\.sh$'' From 7ea31c6e5674ad0afc329f72c6acfd3f76dfa2fe Mon Sep 17 00:00:00 2001 From: John Ericson Date: Fri, 19 Sep 2025 14:16:35 -0400 Subject: [PATCH 041/332] Run multiple outputs and build-delete test for CA drvs also --- maintainers/flake-module.nix | 2 ++ tests/functional/build-delete.sh | 4 +++ tests/functional/ca/build-delete.sh | 7 ++++ tests/functional/ca/meson.build | 2 ++ tests/functional/ca/multiple-outputs.sh | 7 ++++ tests/functional/multiple-outputs.sh | 48 ++++++++++++++++--------- 6 files changed, 53 insertions(+), 17 deletions(-) create mode 100644 tests/functional/ca/build-delete.sh create mode 100644 tests/functional/ca/multiple-outputs.sh diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index a54499654..a5360675f 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -233,10 +233,12 @@ # Content-addressed test files that use recursive-*looking* sourcing # (cd .. && source ), causing shellcheck to loop # They're small wrapper scripts with not a lot going on + ''^tests/functional/ca/build-delete\.sh$'' ''^tests/functional/ca/build-dry\.sh$'' ''^tests/functional/ca/eval-store\.sh$'' ''^tests/functional/ca/gc\.sh$'' ''^tests/functional/ca/import-from-derivation\.sh$'' + ''^tests/functional/ca/multiple-outputs\.sh$'' ''^tests/functional/ca/new-build-cmd\.sh$'' ''^tests/functional/ca/nix-shell\.sh$'' ''^tests/functional/ca/post-hook\.sh$'' diff --git a/tests/functional/build-delete.sh b/tests/functional/build-delete.sh index 18841509d..66b14fd14 100755 --- a/tests/functional/build-delete.sh +++ b/tests/functional/build-delete.sh @@ -43,6 +43,10 @@ issue_6572_dependent_outputs() { nix-store --delete "$p" # Clean up for next test # Make sure that 'nix build' tracks input-outputs correctly when a single output is already present. + if [[ -n "${NIX_TESTS_CA_BY_DEFAULT:-}" ]]; then + # Resolved derivations interferre with the deletion + nix-store --delete "${NIX_STORE_DIR}"/*.drv + fi nix-store --delete "$(jq -r <"$TEST_ROOT"/a.json .[0].outputs.second)" p=$(nix build -f multiple-outputs.nix use-a --no-link --print-out-paths) cmp "$p" <&1 | grep 'contains illegal character' -expect 1 nix build -f multiple-outputs.nix invalid-output-name-2 2>&1 | grep 'contains illegal character' +# TODO inspect why this doesn't work with floating content-addressing +# derivations. +if [[ ! -n "${NIX_TESTS_CA_BY_DEFAULT:-}" ]]; then + expect 1 nix build -f multiple-outputs.nix invalid-output-name-1 2>&1 | grep 'contains illegal character' + expect 1 nix build -f multiple-outputs.nix invalid-output-name-2 2>&1 | grep 'contains illegal character' +fi From af71a9dbd96c282ef23096e5a3b71dd220fab3f0 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Tue, 23 Sep 2025 13:05:12 -0400 Subject: [PATCH 042/332] Fix `JSON_IMPL` macro to avoid extraneous copies Should take the thing we're serializing by reference. --- src/libfetchers/fetchers.cc | 2 +- src/libstore/derivation-options.cc | 4 ++-- src/libstore/derivations.cc | 2 +- src/libstore/outputs-spec.cc | 4 ++-- src/libstore/path.cc | 2 +- src/libstore/realisation.cc | 2 +- src/libutil/include/nix/util/json-impls.hh | 18 +++++++++--------- 7 files changed, 17 insertions(+), 17 deletions(-) diff --git a/src/libfetchers/fetchers.cc b/src/libfetchers/fetchers.cc index 54013bf55..a6b5e295a 100644 --- a/src/libfetchers/fetchers.cc +++ b/src/libfetchers/fetchers.cc @@ -509,7 +509,7 @@ fetchers::PublicKey adl_serializer::from_json(const json & return res; } -void adl_serializer::to_json(json & json, fetchers::PublicKey p) +void adl_serializer::to_json(json & json, const fetchers::PublicKey & p) { json["type"] = p.type; json["key"] = p.key; diff --git a/src/libstore/derivation-options.cc b/src/libstore/derivation-options.cc index 630159629..4cb9bf726 100644 --- a/src/libstore/derivation-options.cc +++ b/src/libstore/derivation-options.cc @@ -356,7 +356,7 @@ DerivationOptions adl_serializer::from_json(const json & json }; } -void adl_serializer::to_json(json & json, DerivationOptions o) +void adl_serializer::to_json(json & json, const DerivationOptions & o) { json["outputChecks"] = std::visit( overloaded{ @@ -398,7 +398,7 @@ DerivationOptions::OutputChecks adl_serializer: }; } -void adl_serializer::to_json(json & json, DerivationOptions::OutputChecks c) +void adl_serializer::to_json(json & json, const DerivationOptions::OutputChecks & c) { json["ignoreSelfRefs"] = c.ignoreSelfRefs; json["allowedReferences"] = c.allowedReferences; diff --git a/src/libstore/derivations.cc b/src/libstore/derivations.cc index 92266b61b..a0c709791 100644 --- a/src/libstore/derivations.cc +++ b/src/libstore/derivations.cc @@ -1494,7 +1494,7 @@ Derivation adl_serializer::from_json(const json & json) return Derivation::fromJSON(json); } -void adl_serializer::to_json(json & json, Derivation c) +void adl_serializer::to_json(json & json, const Derivation & c) { json = c.toJSON(); } diff --git a/src/libstore/outputs-spec.cc b/src/libstore/outputs-spec.cc index 7f73c7d35..aacc964cd 100644 --- a/src/libstore/outputs-spec.cc +++ b/src/libstore/outputs-spec.cc @@ -150,7 +150,7 @@ OutputsSpec adl_serializer::from_json(const json & json) return OutputsSpec::Names{std::move(names)}; } -void adl_serializer::to_json(json & json, OutputsSpec t) +void adl_serializer::to_json(json & json, const OutputsSpec & t) { std::visit( overloaded{ @@ -169,7 +169,7 @@ ExtendedOutputsSpec adl_serializer::from_json(const json & } } -void adl_serializer::to_json(json & json, ExtendedOutputsSpec t) +void adl_serializer::to_json(json & json, const ExtendedOutputsSpec & t) { std::visit( overloaded{ diff --git a/src/libstore/path.cc b/src/libstore/path.cc index 942f97a88..fa430ce94 100644 --- a/src/libstore/path.cc +++ b/src/libstore/path.cc @@ -88,7 +88,7 @@ StorePath adl_serializer::from_json(const json & json) return StorePath{getString(json)}; } -void adl_serializer::to_json(json & json, StorePath storePath) +void adl_serializer::to_json(json & json, const StorePath & storePath) { json = storePath.to_string(); } diff --git a/src/libstore/realisation.cc b/src/libstore/realisation.cc index d59f4b0ea..febd67bd2 100644 --- a/src/libstore/realisation.cc +++ b/src/libstore/realisation.cc @@ -165,7 +165,7 @@ Realisation adl_serializer::from_json(const json & json0) }; } -void adl_serializer::to_json(json & json, Realisation r) +void adl_serializer::to_json(json & json, const Realisation & r) { auto jsonDependentRealisations = nlohmann::json::object(); for (auto & [depId, depOutPath] : r.dependentRealisations) diff --git a/src/libutil/include/nix/util/json-impls.hh b/src/libutil/include/nix/util/json-impls.hh index 8a6198313..751fc410f 100644 --- a/src/libutil/include/nix/util/json-impls.hh +++ b/src/libutil/include/nix/util/json-impls.hh @@ -4,13 +4,13 @@ #include // Following https://github.com/nlohmann/json#how-can-i-use-get-for-non-default-constructiblenon-copyable-types -#define JSON_IMPL(TYPE) \ - namespace nlohmann { \ - using namespace nix; \ - template<> \ - struct adl_serializer \ - { \ - static TYPE from_json(const json & json); \ - static void to_json(json & json, TYPE t); \ - }; \ +#define JSON_IMPL(TYPE) \ + namespace nlohmann { \ + using namespace nix; \ + template<> \ + struct adl_serializer \ + { \ + static TYPE from_json(const json & json); \ + static void to_json(json & json, const TYPE & t); \ + }; \ } From 1c71cb4005809c7e238dac296d039f542970b29b Mon Sep 17 00:00:00 2001 From: John Ericson Date: Tue, 23 Sep 2025 13:58:02 -0400 Subject: [PATCH 043/332] Remove some pointless `std::visit` These are not needed, because the `toJSON` methods are already implemented for the variant wrapper too. --- src/nix/build.cc | 30 +++++++++++++----------------- 1 file changed, 13 insertions(+), 17 deletions(-) diff --git a/src/nix/build.cc b/src/nix/build.cc index eb47c3133..ea05251ad 100644 --- a/src/nix/build.cc +++ b/src/nix/build.cc @@ -12,7 +12,7 @@ static nlohmann::json derivedPathsToJSON(const DerivedPaths & paths, Store & sto { auto res = nlohmann::json::array(); for (auto & t : paths) { - std::visit([&](const auto & t) { res.push_back(t.toJSON(store)); }, t.raw()); + res.push_back(t.toJSON(store)); } return res; } @@ -22,22 +22,18 @@ builtPathsWithResultToJSON(const std::vector & buildables, { auto res = nlohmann::json::array(); for (auto & b : buildables) { - std::visit( - [&](const auto & t) { - auto j = t.toJSON(store); - if (b.result) { - if (b.result->startTime) - j["startTime"] = b.result->startTime; - if (b.result->stopTime) - j["stopTime"] = b.result->stopTime; - if (b.result->cpuUser) - j["cpuUser"] = ((double) b.result->cpuUser->count()) / 1000000; - if (b.result->cpuSystem) - j["cpuSystem"] = ((double) b.result->cpuSystem->count()) / 1000000; - } - res.push_back(j); - }, - b.path.raw()); + auto j = b.path.toJSON(store); + if (b.result) { + if (b.result->startTime) + j["startTime"] = b.result->startTime; + if (b.result->stopTime) + j["stopTime"] = b.result->stopTime; + if (b.result->cpuUser) + j["cpuUser"] = ((double) b.result->cpuUser->count()) / 1000000; + if (b.result->cpuSystem) + j["cpuSystem"] = ((double) b.result->cpuSystem->count()) / 1000000; + } + res.push_back(j); } return res; } From f24e00710e805bc1d338b4a2c876b541b25b92e8 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Tue, 23 Sep 2025 14:44:52 -0400 Subject: [PATCH 044/332] Convert `{Extended,}OutputsSpec` JSON tests to characterization tests This brings them in line with the other tests, and furthers my goals of separating unit test data from code. Doing this cleanup as part of my #13570 effort, but strictly-speaking, this is separate as these data types' JSON never contained and store paths or store dirs, just simple output name strings. --- src/libstore-tests/data/outputs-spec/all.json | 3 + .../data/outputs-spec/extended/all.json | 3 + .../data/outputs-spec/extended/def.json | 1 + .../data/outputs-spec/extended/name.json | 3 + .../data/outputs-spec/extended/names.json | 4 + .../data/outputs-spec/name.json | 3 + .../data/outputs-spec/names.json | 4 + src/libstore-tests/outputs-spec.cc | 126 ++++++++++++------ 8 files changed, 103 insertions(+), 44 deletions(-) create mode 100644 src/libstore-tests/data/outputs-spec/all.json create mode 100644 src/libstore-tests/data/outputs-spec/extended/all.json create mode 100644 src/libstore-tests/data/outputs-spec/extended/def.json create mode 100644 src/libstore-tests/data/outputs-spec/extended/name.json create mode 100644 src/libstore-tests/data/outputs-spec/extended/names.json create mode 100644 src/libstore-tests/data/outputs-spec/name.json create mode 100644 src/libstore-tests/data/outputs-spec/names.json diff --git a/src/libstore-tests/data/outputs-spec/all.json b/src/libstore-tests/data/outputs-spec/all.json new file mode 100644 index 000000000..1449203e9 --- /dev/null +++ b/src/libstore-tests/data/outputs-spec/all.json @@ -0,0 +1,3 @@ +[ + "*" +] diff --git a/src/libstore-tests/data/outputs-spec/extended/all.json b/src/libstore-tests/data/outputs-spec/extended/all.json new file mode 100644 index 000000000..1449203e9 --- /dev/null +++ b/src/libstore-tests/data/outputs-spec/extended/all.json @@ -0,0 +1,3 @@ +[ + "*" +] diff --git a/src/libstore-tests/data/outputs-spec/extended/def.json b/src/libstore-tests/data/outputs-spec/extended/def.json new file mode 100644 index 000000000..19765bd50 --- /dev/null +++ b/src/libstore-tests/data/outputs-spec/extended/def.json @@ -0,0 +1 @@ +null diff --git a/src/libstore-tests/data/outputs-spec/extended/name.json b/src/libstore-tests/data/outputs-spec/extended/name.json new file mode 100644 index 000000000..0ede90fb4 --- /dev/null +++ b/src/libstore-tests/data/outputs-spec/extended/name.json @@ -0,0 +1,3 @@ +[ + "a" +] diff --git a/src/libstore-tests/data/outputs-spec/extended/names.json b/src/libstore-tests/data/outputs-spec/extended/names.json new file mode 100644 index 000000000..517c9d68e --- /dev/null +++ b/src/libstore-tests/data/outputs-spec/extended/names.json @@ -0,0 +1,4 @@ +[ + "a", + "b" +] diff --git a/src/libstore-tests/data/outputs-spec/name.json b/src/libstore-tests/data/outputs-spec/name.json new file mode 100644 index 000000000..0ede90fb4 --- /dev/null +++ b/src/libstore-tests/data/outputs-spec/name.json @@ -0,0 +1,3 @@ +[ + "a" +] diff --git a/src/libstore-tests/data/outputs-spec/names.json b/src/libstore-tests/data/outputs-spec/names.json new file mode 100644 index 000000000..517c9d68e --- /dev/null +++ b/src/libstore-tests/data/outputs-spec/names.json @@ -0,0 +1,4 @@ +[ + "a", + "b" +] diff --git a/src/libstore-tests/outputs-spec.cc b/src/libstore-tests/outputs-spec.cc index b0b80e7c4..7b3fc8f45 100644 --- a/src/libstore-tests/outputs-spec.cc +++ b/src/libstore-tests/outputs-spec.cc @@ -1,18 +1,44 @@ -#include "nix/store/tests/outputs-spec.hh" - #include #include #include +#include "nix/store/tests/outputs-spec.hh" + +#include "nix/util/tests/characterization.hh" + namespace nix { -TEST(OutputsSpec, no_empty_names) +class OutputsSpecTest : public CharacterizationTest +{ + std::filesystem::path unitTestData = getUnitTestData() / "outputs-spec"; + +public: + + std::filesystem::path goldenMaster(std::string_view testStem) const override + { + return unitTestData / testStem; + } +}; + +class ExtendedOutputsSpecTest : public CharacterizationTest +{ + std::filesystem::path unitTestData = getUnitTestData() / "outputs-spec" / "extended"; + +public: + + std::filesystem::path goldenMaster(std::string_view testStem) const override + { + return unitTestData / testStem; + } +}; + +TEST_F(OutputsSpecTest, no_empty_names) { ASSERT_DEATH(OutputsSpec::Names{StringSet{}}, ""); } #define TEST_DONT_PARSE(NAME, STR) \ - TEST(OutputsSpec, bad_##NAME) \ + TEST_F(OutputsSpecTest, bad_##NAME) \ { \ std::optional OutputsSpecOpt = OutputsSpec::parseOpt(STR); \ ASSERT_FALSE(OutputsSpecOpt); \ @@ -26,7 +52,7 @@ TEST_DONT_PARSE(star_second, "foo,*") #undef TEST_DONT_PARSE -TEST(OutputsSpec, all) +TEST_F(OutputsSpecTest, all) { std::string_view str = "*"; OutputsSpec expected = OutputsSpec::All{}; @@ -34,7 +60,7 @@ TEST(OutputsSpec, all) ASSERT_EQ(expected.to_string(), str); } -TEST(OutputsSpec, names_out) +TEST_F(OutputsSpecTest, names_out) { std::string_view str = "out"; OutputsSpec expected = OutputsSpec::Names{"out"}; @@ -42,7 +68,7 @@ TEST(OutputsSpec, names_out) ASSERT_EQ(expected.to_string(), str); } -TEST(OutputsSpec, names_underscore) +TEST_F(OutputsSpecTest, names_underscore) { std::string_view str = "a_b"; OutputsSpec expected = OutputsSpec::Names{"a_b"}; @@ -50,7 +76,7 @@ TEST(OutputsSpec, names_underscore) ASSERT_EQ(expected.to_string(), str); } -TEST(OutputsSpec, names_numeric) +TEST_F(OutputsSpecTest, names_numeric) { std::string_view str = "01"; OutputsSpec expected = OutputsSpec::Names{"01"}; @@ -58,7 +84,7 @@ TEST(OutputsSpec, names_numeric) ASSERT_EQ(expected.to_string(), str); } -TEST(OutputsSpec, names_out_bin) +TEST_F(OutputsSpecTest, names_out_bin) { OutputsSpec expected = OutputsSpec::Names{"out", "bin"}; ASSERT_EQ(OutputsSpec::parse("out,bin"), expected); @@ -68,32 +94,32 @@ TEST(OutputsSpec, names_out_bin) #define TEST_SUBSET(X, THIS, THAT) X((OutputsSpec{THIS}).isSubsetOf(THAT)); -TEST(OutputsSpec, subsets_all_all) +TEST_F(OutputsSpecTest, subsets_all_all) { TEST_SUBSET(ASSERT_TRUE, OutputsSpec::All{}, OutputsSpec::All{}); } -TEST(OutputsSpec, subsets_names_all) +TEST_F(OutputsSpecTest, subsets_names_all) { TEST_SUBSET(ASSERT_TRUE, OutputsSpec::Names{"a"}, OutputsSpec::All{}); } -TEST(OutputsSpec, subsets_names_names_eq) +TEST_F(OutputsSpecTest, subsets_names_names_eq) { TEST_SUBSET(ASSERT_TRUE, OutputsSpec::Names{"a"}, OutputsSpec::Names{"a"}); } -TEST(OutputsSpec, subsets_names_names_noneq) +TEST_F(OutputsSpecTest, subsets_names_names_noneq) { TEST_SUBSET(ASSERT_TRUE, OutputsSpec::Names{"a"}, (OutputsSpec::Names{"a", "b"})); } -TEST(OutputsSpec, not_subsets_all_names) +TEST_F(OutputsSpecTest, not_subsets_all_names) { TEST_SUBSET(ASSERT_FALSE, OutputsSpec::All{}, OutputsSpec::Names{"a"}); } -TEST(OutputsSpec, not_subsets_names_names) +TEST_F(OutputsSpecTest, not_subsets_names_names) { TEST_SUBSET(ASSERT_FALSE, (OutputsSpec::Names{"a", "b"}), (OutputsSpec::Names{"a"})); } @@ -102,22 +128,22 @@ TEST(OutputsSpec, not_subsets_names_names) #define TEST_UNION(RES, THIS, THAT) ASSERT_EQ(OutputsSpec{RES}, (OutputsSpec{THIS}).union_(THAT)); -TEST(OutputsSpec, union_all_all) +TEST_F(OutputsSpecTest, union_all_all) { TEST_UNION(OutputsSpec::All{}, OutputsSpec::All{}, OutputsSpec::All{}); } -TEST(OutputsSpec, union_all_names) +TEST_F(OutputsSpecTest, union_all_names) { TEST_UNION(OutputsSpec::All{}, OutputsSpec::All{}, OutputsSpec::Names{"a"}); } -TEST(OutputsSpec, union_names_all) +TEST_F(OutputsSpecTest, union_names_all) { TEST_UNION(OutputsSpec::All{}, OutputsSpec::Names{"a"}, OutputsSpec::All{}); } -TEST(OutputsSpec, union_names_names) +TEST_F(OutputsSpecTest, union_names_names) { TEST_UNION((OutputsSpec::Names{"a", "b"}), OutputsSpec::Names{"a"}, OutputsSpec::Names{"b"}); } @@ -125,7 +151,7 @@ TEST(OutputsSpec, union_names_names) #undef TEST_UNION #define TEST_DONT_PARSE(NAME, STR) \ - TEST(ExtendedOutputsSpec, bad_##NAME) \ + TEST_F(ExtendedOutputsSpecTest, bad_##NAME) \ { \ std::optional extendedOutputsSpecOpt = ExtendedOutputsSpec::parseOpt(STR); \ ASSERT_FALSE(extendedOutputsSpecOpt); \ @@ -140,7 +166,7 @@ TEST_DONT_PARSE(star_second, "^foo,*") #undef TEST_DONT_PARSE -TEST(ExtendedOutputsSpec, default) +TEST_F(ExtendedOutputsSpecTest, default) { std::string_view str = "foo"; auto [prefix, extendedOutputsSpec] = ExtendedOutputsSpec::parse(str); @@ -150,7 +176,7 @@ TEST(ExtendedOutputsSpec, default) ASSERT_EQ(std::string{prefix} + expected.to_string(), str); } -TEST(ExtendedOutputsSpec, all) +TEST_F(ExtendedOutputsSpecTest, all) { std::string_view str = "foo^*"; auto [prefix, extendedOutputsSpec] = ExtendedOutputsSpec::parse(str); @@ -160,7 +186,7 @@ TEST(ExtendedOutputsSpec, all) ASSERT_EQ(std::string{prefix} + expected.to_string(), str); } -TEST(ExtendedOutputsSpec, out) +TEST_F(ExtendedOutputsSpecTest, out) { std::string_view str = "foo^out"; auto [prefix, extendedOutputsSpec] = ExtendedOutputsSpec::parse(str); @@ -170,7 +196,7 @@ TEST(ExtendedOutputsSpec, out) ASSERT_EQ(std::string{prefix} + expected.to_string(), str); } -TEST(ExtendedOutputsSpec, out_bin) +TEST_F(ExtendedOutputsSpecTest, out_bin) { auto [prefix, extendedOutputsSpec] = ExtendedOutputsSpec::parse("foo^out,bin"); ASSERT_EQ(prefix, "foo"); @@ -179,7 +205,7 @@ TEST(ExtendedOutputsSpec, out_bin) ASSERT_EQ(std::string{prefix} + expected.to_string(), "foo^bin,out"); } -TEST(ExtendedOutputsSpec, many_carrot) +TEST_F(ExtendedOutputsSpecTest, many_carrot) { auto [prefix, extendedOutputsSpec] = ExtendedOutputsSpec::parse("foo^bar^out,bin"); ASSERT_EQ(prefix, "foo^bar"); @@ -188,28 +214,40 @@ TEST(ExtendedOutputsSpec, many_carrot) ASSERT_EQ(std::string{prefix} + expected.to_string(), "foo^bar^bin,out"); } -#define TEST_JSON(TYPE, NAME, STR, VAL) \ - \ - TEST(TYPE, NAME##_to_json) \ - { \ - using nlohmann::literals::operator"" _json; \ - ASSERT_EQ(STR##_json, ((nlohmann::json) TYPE{VAL})); \ - } \ - \ - TEST(TYPE, NAME##_from_json) \ - { \ - using nlohmann::literals::operator"" _json; \ - ASSERT_EQ(TYPE{VAL}, (STR##_json).get()); \ +#define TEST_JSON(FIXTURE, TYPE, NAME, VAL) \ + static const TYPE FIXTURE##_##NAME = VAL; \ + \ + TEST_F(FIXTURE, NAME##_from_json) \ + { \ + using namespace nlohmann; \ + \ + readTest(#NAME ".json", [&](const auto & encoded_) { \ + auto encoded = json::parse(encoded_); \ + TYPE got = adl_serializer::from_json(encoded); \ + ASSERT_EQ(got, FIXTURE##_##NAME); \ + }); \ + } \ + \ + TEST_F(FIXTURE, NAME##_to_json) \ + { \ + using namespace nlohmann; \ + \ + writeTest( \ + #NAME ".json", \ + [&]() -> json { return static_cast(FIXTURE##_##NAME); }, \ + [](const auto & file) { return json::parse(readFile(file)); }, \ + [](const auto & file, const auto & got) { return writeFile(file, got.dump(2) + "\n"); }); \ } -TEST_JSON(OutputsSpec, all, R"(["*"])", OutputsSpec::All{}) -TEST_JSON(OutputsSpec, name, R"(["a"])", OutputsSpec::Names{"a"}) -TEST_JSON(OutputsSpec, names, R"(["a","b"])", (OutputsSpec::Names{"a", "b"})) +TEST_JSON(OutputsSpecTest, OutputsSpec, all, OutputsSpec::All{}) +TEST_JSON(OutputsSpecTest, OutputsSpec, name, OutputsSpec::Names{"a"}) +TEST_JSON(OutputsSpecTest, OutputsSpec, names, (OutputsSpec::Names{"a", "b"})) -TEST_JSON(ExtendedOutputsSpec, def, R"(null)", ExtendedOutputsSpec::Default{}) -TEST_JSON(ExtendedOutputsSpec, all, R"(["*"])", ExtendedOutputsSpec::Explicit{OutputsSpec::All{}}) -TEST_JSON(ExtendedOutputsSpec, name, R"(["a"])", ExtendedOutputsSpec::Explicit{OutputsSpec::Names{"a"}}) -TEST_JSON(ExtendedOutputsSpec, names, R"(["a","b"])", (ExtendedOutputsSpec::Explicit{OutputsSpec::Names{"a", "b"}})) +TEST_JSON(ExtendedOutputsSpecTest, ExtendedOutputsSpec, def, ExtendedOutputsSpec::Default{}) +TEST_JSON(ExtendedOutputsSpecTest, ExtendedOutputsSpec, all, ExtendedOutputsSpec::Explicit{OutputsSpec::All{}}) +TEST_JSON(ExtendedOutputsSpecTest, ExtendedOutputsSpec, name, ExtendedOutputsSpec::Explicit{OutputsSpec::Names{"a"}}) +TEST_JSON( + ExtendedOutputsSpecTest, ExtendedOutputsSpec, names, (ExtendedOutputsSpec::Explicit{OutputsSpec::Names{"a", "b"}})) #undef TEST_JSON From d23e59bb6bb5b429bc8ca0e303f03cbb385130c3 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Thu, 13 Feb 2025 00:52:05 -0500 Subject: [PATCH 045/332] Modernize and test derived path JSON Old code is now just used for `nix build` --- there is no CLI breaking change. Test the new format, too. The new format is not currently used, but will be used going forward, for example in the C API. Progress on #13570 --- src/libcmd/built-path.cc | 14 +- .../data/derived-path/multi_built_built.json | 10 ++ .../multi_built_built_wildcard.json | 9 ++ .../data/derived-path/multi_opaque.json | 1 + .../data/derived-path/mutli_built.json | 7 + .../data/derived-path/single_built.json | 4 + .../data/derived-path/single_built_built.json | 7 + .../data/derived-path/single_opaque.json | 1 + src/libstore-tests/derived-path.cc | 100 +++++++++++++- src/libstore/derived-path.cc | 130 ++++++++++-------- .../include/nix/store/derived-path.hh | 16 +-- src/nix/build.cc | 70 +++++++++- 12 files changed, 300 insertions(+), 69 deletions(-) create mode 100644 src/libstore-tests/data/derived-path/multi_built_built.json create mode 100644 src/libstore-tests/data/derived-path/multi_built_built_wildcard.json create mode 100644 src/libstore-tests/data/derived-path/multi_opaque.json create mode 100644 src/libstore-tests/data/derived-path/mutli_built.json create mode 100644 src/libstore-tests/data/derived-path/single_built.json create mode 100644 src/libstore-tests/data/derived-path/single_built_built.json create mode 100644 src/libstore-tests/data/derived-path/single_opaque.json diff --git a/src/libcmd/built-path.cc b/src/libcmd/built-path.cc index 80d97dc3e..4d76dd6da 100644 --- a/src/libcmd/built-path.cc +++ b/src/libcmd/built-path.cc @@ -83,12 +83,22 @@ nlohmann::json SingleBuiltPath::Built::toJSON(const StoreDirConfig & store) cons nlohmann::json SingleBuiltPath::toJSON(const StoreDirConfig & store) const { - return std::visit([&](const auto & buildable) { return buildable.toJSON(store); }, raw()); + return std::visit( + overloaded{ + [&](const SingleBuiltPath::Opaque & o) -> nlohmann::json { return store.printStorePath(o.path); }, + [&](const SingleBuiltPath::Built & b) { return b.toJSON(store); }, + }, + raw()); } nlohmann::json BuiltPath::toJSON(const StoreDirConfig & store) const { - return std::visit([&](const auto & buildable) { return buildable.toJSON(store); }, raw()); + return std::visit( + overloaded{ + [&](const BuiltPath::Opaque & o) -> nlohmann::json { return store.printStorePath(o.path); }, + [&](const BuiltPath::Built & b) { return b.toJSON(store); }, + }, + raw()); } RealisedPath::Set BuiltPath::toRealisedPaths(Store & store) const diff --git a/src/libstore-tests/data/derived-path/multi_built_built.json b/src/libstore-tests/data/derived-path/multi_built_built.json new file mode 100644 index 000000000..561d04850 --- /dev/null +++ b/src/libstore-tests/data/derived-path/multi_built_built.json @@ -0,0 +1,10 @@ +{ + "drvPath": { + "drvPath": "g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo.drv", + "output": "bar" + }, + "outputs": [ + "baz", + "quux" + ] +} diff --git a/src/libstore-tests/data/derived-path/multi_built_built_wildcard.json b/src/libstore-tests/data/derived-path/multi_built_built_wildcard.json new file mode 100644 index 000000000..da1f9d996 --- /dev/null +++ b/src/libstore-tests/data/derived-path/multi_built_built_wildcard.json @@ -0,0 +1,9 @@ +{ + "drvPath": { + "drvPath": "g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo.drv", + "output": "bar" + }, + "outputs": [ + "*" + ] +} diff --git a/src/libstore-tests/data/derived-path/multi_opaque.json b/src/libstore-tests/data/derived-path/multi_opaque.json new file mode 100644 index 000000000..9bedb882b --- /dev/null +++ b/src/libstore-tests/data/derived-path/multi_opaque.json @@ -0,0 +1 @@ +"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo.drv" diff --git a/src/libstore-tests/data/derived-path/mutli_built.json b/src/libstore-tests/data/derived-path/mutli_built.json new file mode 100644 index 000000000..d7bcff53d --- /dev/null +++ b/src/libstore-tests/data/derived-path/mutli_built.json @@ -0,0 +1,7 @@ +{ + "drvPath": "g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo.drv", + "outputs": [ + "bar", + "baz" + ] +} diff --git a/src/libstore-tests/data/derived-path/single_built.json b/src/libstore-tests/data/derived-path/single_built.json new file mode 100644 index 000000000..64110a364 --- /dev/null +++ b/src/libstore-tests/data/derived-path/single_built.json @@ -0,0 +1,4 @@ +{ + "drvPath": "g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo.drv", + "output": "bar" +} diff --git a/src/libstore-tests/data/derived-path/single_built_built.json b/src/libstore-tests/data/derived-path/single_built_built.json new file mode 100644 index 000000000..66faa668c --- /dev/null +++ b/src/libstore-tests/data/derived-path/single_built_built.json @@ -0,0 +1,7 @@ +{ + "drvPath": { + "drvPath": "g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo.drv", + "output": "bar" + }, + "output": "baz" +} diff --git a/src/libstore-tests/data/derived-path/single_opaque.json b/src/libstore-tests/data/derived-path/single_opaque.json new file mode 100644 index 000000000..9bedb882b --- /dev/null +++ b/src/libstore-tests/data/derived-path/single_opaque.json @@ -0,0 +1 @@ +"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo.drv" diff --git a/src/libstore-tests/derived-path.cc b/src/libstore-tests/derived-path.cc index c7d2c5817..6e7648f25 100644 --- a/src/libstore-tests/derived-path.cc +++ b/src/libstore-tests/derived-path.cc @@ -3,13 +3,23 @@ #include #include +#include "nix/util/tests/characterization.hh" #include "nix/store/tests/derived-path.hh" #include "nix/store/tests/libstore.hh" namespace nix { -class DerivedPathTest : public LibStoreTest -{}; +class DerivedPathTest : public CharacterizationTest, public LibStoreTest +{ + std::filesystem::path unitTestData = getUnitTestData() / "derived-path"; + +public: + + std::filesystem::path goldenMaster(std::string_view testStem) const override + { + return unitTestData / testStem; + } +}; /** * Round trip (string <-> data structure) test for @@ -107,4 +117,90 @@ RC_GTEST_FIXTURE_PROP(DerivedPathTest, prop_round_rip, (const DerivedPath & o)) #endif +/* ---------------------------------------------------------------------------- + * JSON + * --------------------------------------------------------------------------*/ + +using nlohmann::json; + +#define TEST_JSON(TYPE, NAME, VAL) \ + static const TYPE NAME = VAL; \ + \ + TEST_F(DerivedPathTest, NAME##_from_json) \ + { \ + readTest(#NAME ".json", [&](const auto & encoded_) { \ + auto encoded = json::parse(encoded_); \ + TYPE got = static_cast(encoded); \ + ASSERT_EQ(got, NAME); \ + }); \ + } \ + \ + TEST_F(DerivedPathTest, NAME##_to_json) \ + { \ + writeTest( \ + #NAME ".json", \ + [&]() -> json { return static_cast(NAME); }, \ + [](const auto & file) { return json::parse(readFile(file)); }, \ + [](const auto & file, const auto & got) { return writeFile(file, got.dump(2) + "\n"); }); \ + } + +TEST_JSON( + SingleDerivedPath, single_opaque, SingleDerivedPath::Opaque{StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo.drv"}}); + +TEST_JSON( + SingleDerivedPath, + single_built, + (SingleDerivedPath::Built{ + .drvPath = make_ref(SingleDerivedPath::Opaque{ + StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo.drv"}}), + .output = "bar", + })); + +TEST_JSON( + SingleDerivedPath, + single_built_built, + (SingleDerivedPath::Built{ + .drvPath = make_ref(SingleDerivedPath::Built{ + .drvPath = make_ref(SingleDerivedPath::Opaque{ + StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo.drv"}}), + .output = "bar", + }), + .output = "baz", + })); + +TEST_JSON(DerivedPath, multi_opaque, DerivedPath::Opaque{StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo.drv"}}); + +TEST_JSON( + DerivedPath, + mutli_built, + (DerivedPath::Built{ + .drvPath = make_ref(SingleDerivedPath::Opaque{ + StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo.drv"}}), + .outputs = OutputsSpec::Names{"bar", "baz"}, + })); + +TEST_JSON( + DerivedPath, + multi_built_built, + (DerivedPath::Built{ + .drvPath = make_ref(SingleDerivedPath::Built{ + .drvPath = make_ref(SingleDerivedPath::Opaque{ + StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo.drv"}}), + .output = "bar", + }), + .outputs = OutputsSpec::Names{"baz", "quux"}, + })); + +TEST_JSON( + DerivedPath, + multi_built_built_wildcard, + (DerivedPath::Built{ + .drvPath = make_ref(SingleDerivedPath::Built{ + .drvPath = make_ref(SingleDerivedPath::Opaque{ + StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo.drv"}}), + .output = "bar", + }), + .outputs = OutputsSpec::All{}, + })); + } // namespace nix diff --git a/src/libstore/derived-path.cc b/src/libstore/derived-path.cc index 1fee1ae75..2cf720b82 100644 --- a/src/libstore/derived-path.cc +++ b/src/libstore/derived-path.cc @@ -2,8 +2,7 @@ #include "nix/store/derivations.hh" #include "nix/store/store-api.hh" #include "nix/util/comparator.hh" - -#include +#include "nix/util/json-utils.hh" #include @@ -19,59 +18,6 @@ GENERATE_CMP_EXT(, std::strong_ordering, SingleDerivedPathBuilt, *me->drvPath, m GENERATE_EQUAL(, DerivedPathBuilt ::, DerivedPathBuilt, *me->drvPath, me->outputs); GENERATE_ONE_CMP(, bool, DerivedPathBuilt ::, <, DerivedPathBuilt, *me->drvPath, me->outputs); -nlohmann::json DerivedPath::Opaque::toJSON(const StoreDirConfig & store) const -{ - return store.printStorePath(path); -} - -nlohmann::json SingleDerivedPath::Built::toJSON(Store & store) const -{ - nlohmann::json res; - res["drvPath"] = drvPath->toJSON(store); - // Fallback for the input-addressed derivation case: We expect to always be - // able to print the output paths, so let’s do it - // FIXME try-resolve on drvPath - const auto outputMap = store.queryPartialDerivationOutputMap(resolveDerivedPath(store, *drvPath)); - res["output"] = output; - auto outputPathIter = outputMap.find(output); - if (outputPathIter == outputMap.end()) - res["outputPath"] = nullptr; - else if (std::optional p = outputPathIter->second) - res["outputPath"] = store.printStorePath(*p); - else - res["outputPath"] = nullptr; - return res; -} - -nlohmann::json DerivedPath::Built::toJSON(Store & store) const -{ - nlohmann::json res; - res["drvPath"] = drvPath->toJSON(store); - // Fallback for the input-addressed derivation case: We expect to always be - // able to print the output paths, so let’s do it - // FIXME try-resolve on drvPath - const auto outputMap = store.queryPartialDerivationOutputMap(resolveDerivedPath(store, *drvPath)); - for (const auto & [output, outputPathOpt] : outputMap) { - if (!outputs.contains(output)) - continue; - if (outputPathOpt) - res["outputs"][output] = store.printStorePath(*outputPathOpt); - else - res["outputs"][output] = nullptr; - } - return res; -} - -nlohmann::json SingleDerivedPath::toJSON(Store & store) const -{ - return std::visit([&](const auto & buildable) { return buildable.toJSON(store); }, raw()); -} - -nlohmann::json DerivedPath::toJSON(Store & store) const -{ - return std::visit([&](const auto & buildable) { return buildable.toJSON(store); }, raw()); -} - std::string DerivedPath::Opaque::to_string(const StoreDirConfig & store) const { return store.printStorePath(path); @@ -273,3 +219,77 @@ const StorePath & DerivedPath::getBaseStorePath() const } } // namespace nix + +namespace nlohmann { + +void adl_serializer::to_json(json & json, const SingleDerivedPath::Opaque & o) +{ + json = o.path; +} + +SingleDerivedPath::Opaque adl_serializer::from_json(const json & json) +{ + return SingleDerivedPath::Opaque{json}; +} + +void adl_serializer::to_json(json & json, const SingleDerivedPath::Built & sdpb) +{ + json = { + {"drvPath", *sdpb.drvPath}, + {"output", sdpb.output}, + }; +} + +void adl_serializer::to_json(json & json, const DerivedPath::Built & dbp) +{ + json = { + {"drvPath", *dbp.drvPath}, + {"outputs", dbp.outputs}, + }; +} + +SingleDerivedPath::Built adl_serializer::from_json(const json & json0) +{ + auto & json = getObject(json0); + return { + .drvPath = make_ref(static_cast(valueAt(json, "drvPath"))), + .output = getString(valueAt(json, "output")), + }; +} + +DerivedPath::Built adl_serializer::from_json(const json & json0) +{ + auto & json = getObject(json0); + return { + .drvPath = make_ref(static_cast(valueAt(json, "drvPath"))), + .outputs = adl_serializer::from_json(valueAt(json, "outputs")), + }; +} + +void adl_serializer::to_json(json & json, const SingleDerivedPath & sdp) +{ + std::visit([&](const auto & buildable) { json = buildable; }, sdp.raw()); +} + +void adl_serializer::to_json(json & json, const DerivedPath & sdp) +{ + std::visit([&](const auto & buildable) { json = buildable; }, sdp.raw()); +} + +SingleDerivedPath adl_serializer::from_json(const json & json) +{ + if (json.is_string()) + return static_cast(json); + else + return static_cast(json); +} + +DerivedPath adl_serializer::from_json(const json & json) +{ + if (json.is_string()) + return static_cast(json); + else + return static_cast(json); +} + +} // namespace nlohmann diff --git a/src/libstore/include/nix/store/derived-path.hh b/src/libstore/include/nix/store/derived-path.hh index bc89b012e..47b29b2d6 100644 --- a/src/libstore/include/nix/store/derived-path.hh +++ b/src/libstore/include/nix/store/derived-path.hh @@ -5,6 +5,7 @@ #include "nix/store/outputs-spec.hh" #include "nix/util/configuration.hh" #include "nix/util/ref.hh" +#include "nix/util/json-impls.hh" #include @@ -14,9 +15,6 @@ namespace nix { struct StoreDirConfig; -// TODO stop needing this, `toJSON` below should be pure -class Store; - /** * An opaque derived path. * @@ -30,7 +28,6 @@ struct DerivedPathOpaque std::string to_string(const StoreDirConfig & store) const; static DerivedPathOpaque parse(const StoreDirConfig & store, std::string_view); - nlohmann::json toJSON(const StoreDirConfig & store) const; bool operator==(const DerivedPathOpaque &) const = default; auto operator<=>(const DerivedPathOpaque &) const = default; @@ -80,7 +77,6 @@ struct SingleDerivedPathBuilt ref drvPath, OutputNameView outputs, const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings); - nlohmann::json toJSON(Store & store) const; bool operator==(const SingleDerivedPathBuilt &) const noexcept; std::strong_ordering operator<=>(const SingleDerivedPathBuilt &) const noexcept; @@ -153,7 +149,6 @@ struct SingleDerivedPath : _SingleDerivedPathRaw const StoreDirConfig & store, std::string_view, const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings); - nlohmann::json toJSON(Store & store) const; }; static inline ref makeConstantStorePathRef(StorePath drvPath) @@ -208,7 +203,6 @@ struct DerivedPathBuilt ref, std::string_view, const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings); - nlohmann::json toJSON(Store & store) const; bool operator==(const DerivedPathBuilt &) const noexcept; // TODO libc++ 16 (used by darwin) missing `std::set::operator <=>`, can't do yet. @@ -287,8 +281,6 @@ struct DerivedPath : _DerivedPathRaw * Convert a `SingleDerivedPath` to a `DerivedPath`. */ static DerivedPath fromSingle(const SingleDerivedPath &); - - nlohmann::json toJSON(Store & store) const; }; typedef std::vector DerivedPaths; @@ -305,3 +297,9 @@ typedef std::vector DerivedPaths; void drvRequireExperiment( const SingleDerivedPath & drv, const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings); } // namespace nix + +JSON_IMPL(nix::SingleDerivedPath::Opaque) +JSON_IMPL(nix::SingleDerivedPath::Built) +JSON_IMPL(nix::SingleDerivedPath) +JSON_IMPL(nix::DerivedPath::Built) +JSON_IMPL(nix::DerivedPath) diff --git a/src/nix/build.cc b/src/nix/build.cc index ea05251ad..2d4f426a4 100644 --- a/src/nix/build.cc +++ b/src/nix/build.cc @@ -8,11 +8,79 @@ using namespace nix; +/* This serialization code is diferent from the canonical (single) + derived path serialization because: + + - It looks up output paths where possible + + - It includes the store dir in store paths + + We might want to replace it with the canonical format at some point, + but that would be a breaking change (to a still-experimental but + widely-used command, so that isn't being done at this time just yet. + */ + +static nlohmann::json toJSON(Store & store, const SingleDerivedPath::Opaque & o) +{ + return store.printStorePath(o.path); +} + +static nlohmann::json toJSON(Store & store, const SingleDerivedPath & sdp); +static nlohmann::json toJSON(Store & store, const DerivedPath & dp); + +static nlohmann::json toJSON(Store & store, const SingleDerivedPath::Built & sdpb) +{ + nlohmann::json res; + res["drvPath"] = toJSON(store, *sdpb.drvPath); + // Fallback for the input-addressed derivation case: We expect to always be + // able to print the output paths, so let’s do it + // FIXME try-resolve on drvPath + const auto outputMap = store.queryPartialDerivationOutputMap(resolveDerivedPath(store, *sdpb.drvPath)); + res["output"] = sdpb.output; + auto outputPathIter = outputMap.find(sdpb.output); + if (outputPathIter == outputMap.end()) + res["outputPath"] = nullptr; + else if (std::optional p = outputPathIter->second) + res["outputPath"] = store.printStorePath(*p); + else + res["outputPath"] = nullptr; + return res; +} + +static nlohmann::json toJSON(Store & store, const DerivedPath::Built & dpb) +{ + nlohmann::json res; + res["drvPath"] = toJSON(store, *dpb.drvPath); + // Fallback for the input-addressed derivation case: We expect to always be + // able to print the output paths, so let’s do it + // FIXME try-resolve on drvPath + const auto outputMap = store.queryPartialDerivationOutputMap(resolveDerivedPath(store, *dpb.drvPath)); + for (const auto & [output, outputPathOpt] : outputMap) { + if (!dpb.outputs.contains(output)) + continue; + if (outputPathOpt) + res["outputs"][output] = store.printStorePath(*outputPathOpt); + else + res["outputs"][output] = nullptr; + } + return res; +} + +static nlohmann::json toJSON(Store & store, const SingleDerivedPath & sdp) +{ + return std::visit([&](const auto & buildable) { return toJSON(store, buildable); }, sdp.raw()); +} + +static nlohmann::json toJSON(Store & store, const DerivedPath & dp) +{ + return std::visit([&](const auto & buildable) { return toJSON(store, buildable); }, dp.raw()); +} + static nlohmann::json derivedPathsToJSON(const DerivedPaths & paths, Store & store) { auto res = nlohmann::json::array(); for (auto & t : paths) { - res.push_back(t.toJSON(store)); + res.push_back(toJSON(store, t)); } return res; } From 35d8ffe01d28ec6d8936664a631710bda62a8678 Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Wed, 24 Sep 2025 00:34:35 +0300 Subject: [PATCH 046/332] ci: Split formatting check into a separate job, gate other jobs This makes the CI fail fast and more explicitly in case the formatting is incorrect and provides a better error messages. This also ensures that we don't burn CI on useless checks for code that wouldn't pass lints anyway. --- .github/workflows/ci.yml | 22 ++++++++++++++++++++++ ci/gha/tests/pre-commit-checks | 24 ++++++++++++++++++++++++ 2 files changed, 46 insertions(+) create mode 100755 ci/gha/tests/pre-commit-checks diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index e7e103b63..471494f22 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -29,7 +29,28 @@ jobs: github_token: ${{ secrets.GITHUB_TOKEN }} - run: nix flake show --all-systems --json + pre-commit-checks: + name: pre-commit checks + runs-on: ubuntu-24.04 + steps: + - uses: actions/checkout@v5 + - uses: ./.github/actions/install-nix-action + with: + dogfood: ${{ github.event_name == 'workflow_dispatch' && inputs.dogfood || github.event_name != 'workflow_dispatch' }} + extra_nix_config: experimental-features = nix-command flakes + github_token: ${{ secrets.GITHUB_TOKEN }} + - uses: DeterminateSystems/magic-nix-cache-action@main + - run: ./ci/gha/tests/pre-commit-checks + + basic-checks: + name: aggregate basic checks + runs-on: ubuntu-24.04 + needs: [pre-commit-checks, eval] + steps: + - run: ":" # Dummy step + tests: + needs: basic-checks strategy: fail-fast: false matrix: @@ -214,6 +235,7 @@ jobs: docker push $IMAGE_ID:master vm_tests: + needs: basic-checks runs-on: ubuntu-24.04 steps: - uses: actions/checkout@v5 diff --git a/ci/gha/tests/pre-commit-checks b/ci/gha/tests/pre-commit-checks new file mode 100755 index 000000000..8c9f64d6c --- /dev/null +++ b/ci/gha/tests/pre-commit-checks @@ -0,0 +1,24 @@ +#!/usr/bin/env bash + +set -euo pipefail + +system=$(nix eval --raw --impure --expr builtins.currentSystem) + +echo "::group::Running pre-commit checks" + +if nix build ".#checks.$system.pre-commit" -L; then + echo "::endgroup::" + exit 0 +fi + +echo "::error ::Changes do not pass pre-commit checks" + +cat < Date: Wed, 24 Sep 2025 01:04:23 +0300 Subject: [PATCH 047/332] libexpr: Split out `MakeBinOpMembers` from `MakeBinOp` --- src/libexpr/include/nix/expr/nixexpr.hh | 63 +++++++++++++------------ 1 file changed, 33 insertions(+), 30 deletions(-) diff --git a/src/libexpr/include/nix/expr/nixexpr.hh b/src/libexpr/include/nix/expr/nixexpr.hh index aa62760d8..2682e623b 100644 --- a/src/libexpr/include/nix/expr/nixexpr.hh +++ b/src/libexpr/include/nix/expr/nixexpr.hh @@ -574,38 +574,41 @@ struct ExprOpNot : Expr COMMON_METHODS }; -#define MakeBinOp(name, s) \ - struct name : Expr \ - { \ - PosIdx pos; \ - Expr *e1, *e2; \ - name(Expr * e1, Expr * e2) \ - : e1(e1) \ - , e2(e2) {}; \ - name(const PosIdx & pos, Expr * e1, Expr * e2) \ - : pos(pos) \ - , e1(e1) \ - , e2(e2) {}; \ - void show(const SymbolTable & symbols, std::ostream & str) const override \ - { \ - str << "("; \ - e1->show(symbols, str); \ - str << " " s " "; \ - e2->show(symbols, str); \ - str << ")"; \ - } \ - void bindVars(EvalState & es, const std::shared_ptr & env) override \ - { \ - e1->bindVars(es, env); \ - e2->bindVars(es, env); \ - } \ - void eval(EvalState & state, Env & env, Value & v) override; \ - PosIdx getPos() const override \ - { \ - return pos; \ - } \ +#define MakeBinOpMembers(name, s) \ + PosIdx pos; \ + Expr *e1, *e2; \ + name(Expr * e1, Expr * e2) \ + : e1(e1) \ + , e2(e2){}; \ + name(const PosIdx & pos, Expr * e1, Expr * e2) \ + : pos(pos) \ + , e1(e1) \ + , e2(e2){}; \ + void show(const SymbolTable & symbols, std::ostream & str) const override \ + { \ + str << "("; \ + e1->show(symbols, str); \ + str << " " s " "; \ + e2->show(symbols, str); \ + str << ")"; \ + } \ + void bindVars(EvalState & es, const std::shared_ptr & env) override \ + { \ + e1->bindVars(es, env); \ + e2->bindVars(es, env); \ + } \ + void eval(EvalState & state, Env & env, Value & v) override; \ + PosIdx getPos() const override \ + { \ + return pos; \ } +#define MakeBinOp(name, s) \ + struct name : Expr \ + { \ + MakeBinOpMembers(name, s) \ + }; + MakeBinOp(ExprOpEq, "=="); MakeBinOp(ExprOpNEq, "!="); MakeBinOp(ExprOpAnd, "&&"); From b7c6cf900f4dba8c4464d5c355ee0559d06afe9b Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Wed, 24 Sep 2025 01:04:26 +0300 Subject: [PATCH 048/332] libexpr: Explicitly define `ExprOpUpdate` --- src/libexpr/include/nix/expr/nixexpr.hh | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/libexpr/include/nix/expr/nixexpr.hh b/src/libexpr/include/nix/expr/nixexpr.hh index 2682e623b..e04e4f23c 100644 --- a/src/libexpr/include/nix/expr/nixexpr.hh +++ b/src/libexpr/include/nix/expr/nixexpr.hh @@ -614,9 +614,13 @@ MakeBinOp(ExprOpNEq, "!="); MakeBinOp(ExprOpAnd, "&&"); MakeBinOp(ExprOpOr, "||"); MakeBinOp(ExprOpImpl, "->"); -MakeBinOp(ExprOpUpdate, "//"); MakeBinOp(ExprOpConcatLists, "++"); +struct ExprOpUpdate : Expr +{ + MakeBinOpMembers(ExprOpUpdate, "//") +}; + struct ExprConcatStrings : Expr { PosIdx pos; From 9789019a5042e40ad34f52c007f53364fcbbbe9c Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Wed, 24 Sep 2025 01:04:58 +0300 Subject: [PATCH 049/332] libexpr: Move *StackReservation constants to gc-small-vector.hh There are other places where it's useful to use these constants (notably in eval.hh). --- src/libexpr/include/nix/expr/gc-small-vector.hh | 16 ++++++++++++++++ src/libexpr/include/nix/expr/primops.hh | 16 ---------------- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/src/libexpr/include/nix/expr/gc-small-vector.hh b/src/libexpr/include/nix/expr/gc-small-vector.hh index fdd80b2c7..95c028e5a 100644 --- a/src/libexpr/include/nix/expr/gc-small-vector.hh +++ b/src/libexpr/include/nix/expr/gc-small-vector.hh @@ -26,4 +26,20 @@ using SmallValueVector = SmallVector; template using SmallTemporaryValueVector = SmallVector; +/** + * For functions where we do not expect deep recursion, we can use a sizable + * part of the stack a free allocation space. + * + * Note: this is expected to be multiplied by sizeof(Value), or about 24 bytes. + */ +constexpr size_t nonRecursiveStackReservation = 128; + +/** + * Functions that maybe applied to self-similar inputs, such as concatMap on a + * tree, should reserve a smaller part of the stack for allocation. + * + * Note: this is expected to be multiplied by sizeof(Value), or about 24 bytes. + */ +constexpr size_t conservativeStackReservation = 16; + } // namespace nix diff --git a/src/libexpr/include/nix/expr/primops.hh b/src/libexpr/include/nix/expr/primops.hh index 885a53e9a..6407ba84e 100644 --- a/src/libexpr/include/nix/expr/primops.hh +++ b/src/libexpr/include/nix/expr/primops.hh @@ -8,22 +8,6 @@ namespace nix { -/** - * For functions where we do not expect deep recursion, we can use a sizable - * part of the stack a free allocation space. - * - * Note: this is expected to be multiplied by sizeof(Value), or about 24 bytes. - */ -constexpr size_t nonRecursiveStackReservation = 128; - -/** - * Functions that maybe applied to self-similar inputs, such as concatMap on a - * tree, should reserve a smaller part of the stack for allocation. - * - * Note: this is expected to be multiplied by sizeof(Value), or about 24 bytes. - */ -constexpr size_t conservativeStackReservation = 16; - struct RegisterPrimOp { typedef std::vector PrimOps; From 00775ad83cb98761af8299deea6d2428da24bd30 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rg=20Thalheim?= Date: Wed, 24 Sep 2025 13:14:00 +0200 Subject: [PATCH 050/332] Apply suggestion from @getchoo Co-authored-by: Seth Flynn --- .github/workflows/ci.yml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 471494f22..dcf0814d8 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -44,10 +44,14 @@ jobs: basic-checks: name: aggregate basic checks + if: ${{ always() }} runs-on: ubuntu-24.04 needs: [pre-commit-checks, eval] steps: - - run: ":" # Dummy step + - name: Exit with any errors + if: ${{ contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled') }} + run: | + exit 1 tests: needs: basic-checks From 97ce7759d07fc44967e7fb3030fe9cbb8ebc2c92 Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Wed, 24 Sep 2025 21:47:59 +0300 Subject: [PATCH 051/332] libexpr: Use same naive iterative merging but with `evalForUpdate` --- src/libexpr/eval.cc | 38 ++++++++++++++++--- src/libexpr/include/nix/expr/nixexpr.hh | 22 ++++++++++- .../lang/eval-fail-recursion.err.exp | 4 +- 3 files changed, 55 insertions(+), 9 deletions(-) diff --git a/src/libexpr/eval.cc b/src/libexpr/eval.cc index 18212940e..87b1e73a5 100644 --- a/src/libexpr/eval.cc +++ b/src/libexpr/eval.cc @@ -1866,12 +1866,8 @@ void ExprOpImpl::eval(EvalState & state, Env & env, Value & v) || state.evalBool(env, e2, pos, "in the right operand of the IMPL (->) operator")); } -void ExprOpUpdate::eval(EvalState & state, Env & env, Value & v) +void ExprOpUpdate::eval(EvalState & state, Value & v, Value & v1, Value & v2) { - Value v1, v2; - state.evalAttrs(env, e1, v1, pos, "in the left operand of the update (//) operator"); - state.evalAttrs(env, e2, v2, pos, "in the right operand of the update (//) operator"); - state.nrOpUpdates++; const Bindings & bindings1 = *v1.attrs(); @@ -1945,6 +1941,38 @@ void ExprOpUpdate::eval(EvalState & state, Env & env, Value & v) state.nrOpUpdateValuesCopied += v.attrs()->size(); } +void ExprOpUpdate::eval(EvalState & state, Env & env, Value & v) +{ + UpdateQueue q; + evalForUpdate(state, env, q); + + v.mkAttrs(&Bindings::emptyBindings); + for (auto & rhs : std::views::reverse(q)) { + /* Remember that queue is sorted rightmost attrset first. */ + eval(state, /*v=*/v, /*v1=*/v, /*v2=*/rhs); + } +} + +void Expr::evalForUpdate(EvalState & state, Env & env, UpdateQueue & q, std::string_view errorCtx) +{ + Value v; + state.evalAttrs(env, this, v, getPos(), errorCtx); + q.push_back(v); +} + +void ExprOpUpdate::evalForUpdate(EvalState & state, Env & env, UpdateQueue & q) +{ + /* Output rightmost attrset first to the merge queue as the one + with the most priority. */ + e2->evalForUpdate(state, env, q, "in the right operand of the update (//) operator"); + e1->evalForUpdate(state, env, q, "in the left operand of the update (//) operator"); +} + +void ExprOpUpdate::evalForUpdate(EvalState & state, Env & env, UpdateQueue & q, std::string_view errorCtx) +{ + evalForUpdate(state, env, q); +} + void ExprOpConcatLists::eval(EvalState & state, Env & env, Value & v) { Value v1; diff --git a/src/libexpr/include/nix/expr/nixexpr.hh b/src/libexpr/include/nix/expr/nixexpr.hh index e04e4f23c..7721918c3 100644 --- a/src/libexpr/include/nix/expr/nixexpr.hh +++ b/src/libexpr/include/nix/expr/nixexpr.hh @@ -4,6 +4,7 @@ #include #include +#include "nix/expr/gc-small-vector.hh" #include "nix/expr/value.hh" #include "nix/expr/symbol-table.hh" #include "nix/expr/eval-error.hh" @@ -80,6 +81,8 @@ typedef std::vector AttrPath; std::string showAttrPath(const SymbolTable & symbols, const AttrPath & attrPath); +using UpdateQueue = SmallTemporaryValueVector; + /* Abstract syntax of Nix expressions. */ struct Expr @@ -110,6 +113,14 @@ struct Expr * of thunks allocated. */ virtual Value * maybeThunk(EvalState & state, Env & env); + + /** + * Only called when performing an attrset update: `//` or similar. + * Instead of writing to a Value &, this function writes to an UpdateQueue. + * This allows the expression to perform multiple updates in a delayed manner, gathering up all the updates before + * applying them. + */ + virtual void evalForUpdate(EvalState & state, Env & env, UpdateQueue & q, std::string_view errorCtx); virtual void setName(Symbol name); virtual void setDocComment(DocComment docComment) {}; @@ -607,7 +618,7 @@ struct ExprOpNot : Expr struct name : Expr \ { \ MakeBinOpMembers(name, s) \ - }; + } MakeBinOp(ExprOpEq, "=="); MakeBinOp(ExprOpNEq, "!="); @@ -618,7 +629,14 @@ MakeBinOp(ExprOpConcatLists, "++"); struct ExprOpUpdate : Expr { - MakeBinOpMembers(ExprOpUpdate, "//") +private: + /** Special case for merging of two attrsets. */ + void eval(EvalState & state, Value & v, Value & v1, Value & v2); + void evalForUpdate(EvalState & state, Env & env, UpdateQueue & q); + +public: + MakeBinOpMembers(ExprOpUpdate, "//"); + virtual void evalForUpdate(EvalState & state, Env & env, UpdateQueue & q, std::string_view errorCtx) override; }; struct ExprConcatStrings : Expr diff --git a/tests/functional/lang/eval-fail-recursion.err.exp b/tests/functional/lang/eval-fail-recursion.err.exp index 8bfb4e12e..ee41ff46b 100644 --- a/tests/functional/lang/eval-fail-recursion.err.exp +++ b/tests/functional/lang/eval-fail-recursion.err.exp @@ -1,9 +1,9 @@ error: … in the right operand of the update (//) operator - at /pwd/lang/eval-fail-recursion.nix:2:11: + at /pwd/lang/eval-fail-recursion.nix:2:14: 1| let 2| a = { } // a; - | ^ + | ^ 3| in error: infinite recursion encountered From a97d6d89d8961a94f593b2e3797fa7e3ca583fc9 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Mon, 22 Sep 2025 14:49:29 -0400 Subject: [PATCH 052/332] Create a second `Store::getFSAccessor` for a single store object This is sometimes easier / more performant to implement, and independently it is also a more convenient interface for many callers. The existing store-wide `getFSAccessor` is only used for - `nix why-depends` - the evaluator I hope we can get rid of it for those, too, and then we have the option of getting rid of the store-wide method. Co-authored-by: Sergei Zimmerman --- src/libfetchers/fetchers.cc | 4 ++-- src/libfetchers/github.cc | 6 ++---- .../include/nix/fetchers/meson.build | 1 - .../include/nix/fetchers/store-path-accessor.hh | 14 -------------- src/libfetchers/mercurial.cc | 4 ++-- src/libfetchers/meson.build | 1 - src/libfetchers/path.cc | 3 +-- src/libfetchers/store-path-accessor.cc | 11 ----------- src/libfetchers/tarball.cc | 3 +-- src/libstore/binary-cache-store.cc | 12 +++++++++++- src/libstore/dummy-store.cc | 9 ++++++++- .../include/nix/store/binary-cache-store.hh | 8 ++++++++ .../include/nix/store/legacy-ssh-store.hh | 7 ++++++- .../include/nix/store/local-fs-store.hh | 1 + .../include/nix/store/remote-fs-accessor.hh | 5 +++++ src/libstore/include/nix/store/remote-store.hh | 9 +++++++++ src/libstore/include/nix/store/store-api.hh | 12 +++++++++++- .../include/nix/store/uds-remote-store.hh | 5 +++++ src/libstore/local-fs-store.cc | 17 +++++++++++++++++ src/libstore/remote-fs-accessor.cc | 16 +++++++++------- src/libstore/remote-store.cc | 12 +++++++++++- src/libstore/ssh-store.cc | 5 +++++ src/libstore/store-api.cc | 5 ++--- src/nix/cat.cc | 5 ++++- src/nix/ls.cc | 5 ++++- src/nix/nix-store/nix-store.cc | 2 +- 26 files changed, 125 insertions(+), 57 deletions(-) delete mode 100644 src/libfetchers/include/nix/fetchers/store-path-accessor.hh delete mode 100644 src/libfetchers/store-path-accessor.cc diff --git a/src/libfetchers/fetchers.cc b/src/libfetchers/fetchers.cc index a6b5e295a..d40e97aa9 100644 --- a/src/libfetchers/fetchers.cc +++ b/src/libfetchers/fetchers.cc @@ -3,7 +3,6 @@ #include "nix/util/source-path.hh" #include "nix/fetchers/fetch-to-store.hh" #include "nix/util/json-utils.hh" -#include "nix/fetchers/store-path-accessor.hh" #include "nix/fetchers/fetch-settings.hh" #include @@ -332,7 +331,8 @@ std::pair, Input> Input::getAccessorUnchecked(ref sto debug("using substituted/cached input '%s' in '%s'", to_string(), store->printStorePath(storePath)); - auto accessor = makeStorePathAccessor(store, storePath); + // We just ensured the store object was there + auto accessor = ref{store->getFSAccessor(storePath)}; accessor->fingerprint = getFingerprint(store); diff --git a/src/libfetchers/github.cc b/src/libfetchers/github.cc index 15a19021d..3b723d7d8 100644 --- a/src/libfetchers/github.cc +++ b/src/libfetchers/github.cc @@ -398,9 +398,8 @@ struct GitHubInputScheme : GitArchiveInputScheme Headers headers = makeHeadersWithAuthTokens(*input.settings, host, input); - auto accessor = store->getFSAccessor(); auto downloadResult = downloadFile(store, *input.settings, url, "source", headers); - auto json = nlohmann::json::parse(accessor->readFile(CanonPath(downloadResult.storePath.to_string()))); + auto json = nlohmann::json::parse(store->getFSAccessor(downloadResult.storePath)->readFile(CanonPath::root)); return RefInfo{ .rev = Hash::parseAny(std::string{json["sha"]}, HashAlgorithm::SHA1), @@ -473,9 +472,8 @@ struct GitLabInputScheme : GitArchiveInputScheme Headers headers = makeHeadersWithAuthTokens(*input.settings, host, input); - auto accessor = store->getFSAccessor(); auto downloadResult = downloadFile(store, *input.settings, url, "source", headers); - auto json = nlohmann::json::parse(accessor->readFile(CanonPath(downloadResult.storePath.to_string()))); + auto json = nlohmann::json::parse(store->getFSAccessor(downloadResult.storePath)->readFile(CanonPath::root)); if (json.is_array() && json.size() >= 1 && json[0]["id"] != nullptr) { return RefInfo{.rev = Hash::parseAny(std::string(json[0]["id"]), HashAlgorithm::SHA1)}; diff --git a/src/libfetchers/include/nix/fetchers/meson.build b/src/libfetchers/include/nix/fetchers/meson.build index fcd446a6d..a313b1e0b 100644 --- a/src/libfetchers/include/nix/fetchers/meson.build +++ b/src/libfetchers/include/nix/fetchers/meson.build @@ -11,6 +11,5 @@ headers = files( 'git-utils.hh', 'input-cache.hh', 'registry.hh', - 'store-path-accessor.hh', 'tarball.hh', ) diff --git a/src/libfetchers/include/nix/fetchers/store-path-accessor.hh b/src/libfetchers/include/nix/fetchers/store-path-accessor.hh deleted file mode 100644 index a107293f8..000000000 --- a/src/libfetchers/include/nix/fetchers/store-path-accessor.hh +++ /dev/null @@ -1,14 +0,0 @@ -#pragma once - -#include "nix/util/source-path.hh" - -namespace nix { - -class StorePath; -class Store; - -ref makeStorePathAccessor(ref store, const StorePath & storePath); - -SourcePath getUnfilteredRootPath(CanonPath path); - -} // namespace nix diff --git a/src/libfetchers/mercurial.cc b/src/libfetchers/mercurial.cc index 641b3d6a8..bf460d9c6 100644 --- a/src/libfetchers/mercurial.cc +++ b/src/libfetchers/mercurial.cc @@ -6,7 +6,6 @@ #include "nix/util/tarfile.hh" #include "nix/store/store-api.hh" #include "nix/util/url-parts.hh" -#include "nix/fetchers/store-path-accessor.hh" #include "nix/fetchers/fetch-settings.hh" #include @@ -331,7 +330,8 @@ struct MercurialInputScheme : InputScheme auto storePath = fetchToStore(store, input); - auto accessor = makeStorePathAccessor(store, storePath); + // We just added it, it should be there. + auto accessor = ref{store->getFSAccessor(storePath)}; accessor->setPathDisplay("«" + input.to_string() + "»"); diff --git a/src/libfetchers/meson.build b/src/libfetchers/meson.build index 070c82b8c..5b53a147b 100644 --- a/src/libfetchers/meson.build +++ b/src/libfetchers/meson.build @@ -50,7 +50,6 @@ sources = files( 'mercurial.cc', 'path.cc', 'registry.cc', - 'store-path-accessor.cc', 'tarball.cc', ) diff --git a/src/libfetchers/path.cc b/src/libfetchers/path.cc index b66459fb9..3c4b9c06d 100644 --- a/src/libfetchers/path.cc +++ b/src/libfetchers/path.cc @@ -1,7 +1,6 @@ #include "nix/fetchers/fetchers.hh" #include "nix/store/store-api.hh" #include "nix/util/archive.hh" -#include "nix/fetchers/store-path-accessor.hh" #include "nix/fetchers/cache.hh" #include "nix/fetchers/fetch-to-store.hh" #include "nix/fetchers/fetch-settings.hh" @@ -153,7 +152,7 @@ struct PathInputScheme : InputScheme if (!input.getLastModified()) input.attrs.insert_or_assign("lastModified", uint64_t(mtime)); - return {makeStorePathAccessor(store, *storePath), std::move(input)}; + return {ref{store->getFSAccessor(*storePath)}, std::move(input)}; } std::optional getFingerprint(ref store, const Input & input) const override diff --git a/src/libfetchers/store-path-accessor.cc b/src/libfetchers/store-path-accessor.cc deleted file mode 100644 index 65160e311..000000000 --- a/src/libfetchers/store-path-accessor.cc +++ /dev/null @@ -1,11 +0,0 @@ -#include "nix/fetchers/store-path-accessor.hh" -#include "nix/store/store-api.hh" - -namespace nix { - -ref makeStorePathAccessor(ref store, const StorePath & storePath) -{ - return projectSubdirSourceAccessor(store->getFSAccessor(), storePath.to_string()); -} - -} // namespace nix diff --git a/src/libfetchers/tarball.cc b/src/libfetchers/tarball.cc index b55837c9e..31d5ab460 100644 --- a/src/libfetchers/tarball.cc +++ b/src/libfetchers/tarball.cc @@ -6,7 +6,6 @@ #include "nix/util/archive.hh" #include "nix/util/tarfile.hh" #include "nix/util/types.hh" -#include "nix/fetchers/store-path-accessor.hh" #include "nix/store/store-api.hh" #include "nix/fetchers/git-utils.hh" #include "nix/fetchers/fetch-settings.hh" @@ -354,7 +353,7 @@ struct FileInputScheme : CurlInputScheme auto narHash = store->queryPathInfo(file.storePath)->narHash; input.attrs.insert_or_assign("narHash", narHash.to_string(HashFormat::SRI, true)); - auto accessor = makeStorePathAccessor(store, file.storePath); + auto accessor = ref{store->getFSAccessor(file.storePath)}; accessor->setPathDisplay("«" + input.to_string() + "»"); diff --git a/src/libstore/binary-cache-store.cc b/src/libstore/binary-cache-store.cc index d5184b1bf..badfb4b14 100644 --- a/src/libstore/binary-cache-store.cc +++ b/src/libstore/binary-cache-store.cc @@ -539,11 +539,21 @@ void BinaryCacheStore::registerDrvOutput(const Realisation & info) upsertFile(filePath, static_cast(info).dump(), "application/json"); } -ref BinaryCacheStore::getFSAccessor(bool requireValidPath) +ref BinaryCacheStore::getRemoteFSAccessor(bool requireValidPath) { return make_ref(ref(shared_from_this()), requireValidPath, config.localNarCache); } +ref BinaryCacheStore::getFSAccessor(bool requireValidPath) +{ + return getRemoteFSAccessor(requireValidPath); +} + +std::shared_ptr BinaryCacheStore::getFSAccessor(const StorePath & storePath, bool requireValidPath) +{ + return getRemoteFSAccessor(requireValidPath)->accessObject(storePath); +} + void BinaryCacheStore::addSignatures(const StorePath & storePath, const StringSet & sigs) { /* Note: this is inherently racy since there is no locking on diff --git a/src/libstore/dummy-store.cc b/src/libstore/dummy-store.cc index 367cdb5d2..4b485ca66 100644 --- a/src/libstore/dummy-store.cc +++ b/src/libstore/dummy-store.cc @@ -276,7 +276,14 @@ struct DummyStore : virtual Store callback(nullptr); } - virtual ref getFSAccessor(bool requireValidPath) override + std::shared_ptr getFSAccessor(const StorePath & path, bool requireValidPath) override + { + std::shared_ptr res; + contents.cvisit(path, [&](const auto & kv) { res = kv.second.contents.get_ptr(); }); + return res; + } + + ref getFSAccessor(bool requireValidPath) override { return wholeStoreView; } diff --git a/src/libstore/include/nix/store/binary-cache-store.hh b/src/libstore/include/nix/store/binary-cache-store.hh index 908500b42..c316b1199 100644 --- a/src/libstore/include/nix/store/binary-cache-store.hh +++ b/src/libstore/include/nix/store/binary-cache-store.hh @@ -12,6 +12,7 @@ namespace nix { struct NarInfo; +class RemoteFSAccessor; struct BinaryCacheStoreConfig : virtual StoreConfig { @@ -136,6 +137,11 @@ private: CheckSigsFlag checkSigs, std::function mkInfo); + /** + * Same as `getFSAccessor`, but with a more preceise return type. + */ + ref getRemoteFSAccessor(bool requireValidPath = true); + public: bool isValidPathUncached(const StorePath & path) override; @@ -175,6 +181,8 @@ public: ref getFSAccessor(bool requireValidPath = true) override; + std::shared_ptr getFSAccessor(const StorePath &, bool requireValidPath = true) override; + void addSignatures(const StorePath & storePath, const StringSet & sigs) override; std::optional getBuildLogExact(const StorePath & path) override; diff --git a/src/libstore/include/nix/store/legacy-ssh-store.hh b/src/libstore/include/nix/store/legacy-ssh-store.hh index ac31506d0..75751e2d1 100644 --- a/src/libstore/include/nix/store/legacy-ssh-store.hh +++ b/src/libstore/include/nix/store/legacy-ssh-store.hh @@ -142,7 +142,12 @@ public: unsupported("ensurePath"); } - virtual ref getFSAccessor(bool requireValidPath) override + ref getFSAccessor(bool requireValidPath) override + { + unsupported("getFSAccessor"); + } + + std::shared_ptr getFSAccessor(const StorePath & path, bool requireValidPath) override { unsupported("getFSAccessor"); } diff --git a/src/libstore/include/nix/store/local-fs-store.hh b/src/libstore/include/nix/store/local-fs-store.hh index 84777f3d7..f7d6d65b1 100644 --- a/src/libstore/include/nix/store/local-fs-store.hh +++ b/src/libstore/include/nix/store/local-fs-store.hh @@ -68,6 +68,7 @@ struct LocalFSStore : virtual Store, virtual GcStore, virtual LogStore void narFromPath(const StorePath & path, Sink & sink) override; ref getFSAccessor(bool requireValidPath = true) override; + std::shared_ptr getFSAccessor(const StorePath & path, bool requireValidPath = true) override; /** * Creates symlink from the `gcRoot` to the `storePath` and diff --git a/src/libstore/include/nix/store/remote-fs-accessor.hh b/src/libstore/include/nix/store/remote-fs-accessor.hh index fa0555d9b..9e1999cc0 100644 --- a/src/libstore/include/nix/store/remote-fs-accessor.hh +++ b/src/libstore/include/nix/store/remote-fs-accessor.hh @@ -27,6 +27,11 @@ class RemoteFSAccessor : public SourceAccessor public: + /** + * @return nullptr if the store does not contain any object at that path. + */ + std::shared_ptr accessObject(const StorePath & path); + RemoteFSAccessor( ref store, bool requireValidPath = true, const /* FIXME: use std::optional */ Path & cacheDir = ""); diff --git a/src/libstore/include/nix/store/remote-store.hh b/src/libstore/include/nix/store/remote-store.hh index 76591cf93..1aaf29d37 100644 --- a/src/libstore/include/nix/store/remote-store.hh +++ b/src/libstore/include/nix/store/remote-store.hh @@ -16,6 +16,7 @@ struct FdSink; struct FdSource; template class Pool; +class RemoteFSAccessor; struct RemoteStoreConfig : virtual StoreConfig { @@ -176,10 +177,18 @@ protected: virtual ref getFSAccessor(bool requireValidPath = true) override; + virtual std::shared_ptr + getFSAccessor(const StorePath & path, bool requireValidPath = true) override; + virtual void narFromPath(const StorePath & path, Sink & sink) override; private: + /** + * Same as the default implemenation of `RemoteStore::getFSAccessor`, but with a more preceise return type. + */ + ref getRemoteFSAccessor(bool requireValidPath = true); + std::atomic_bool failed{false}; void copyDrvsFromEvalStore(const std::vector & paths, std::shared_ptr evalStore); diff --git a/src/libstore/include/nix/store/store-api.hh b/src/libstore/include/nix/store/store-api.hh index 2519002b3..6d3f6b8d0 100644 --- a/src/libstore/include/nix/store/store-api.hh +++ b/src/libstore/include/nix/store/store-api.hh @@ -717,10 +717,20 @@ public: }; /** - * @return An object to access files in the Nix store. + * @return An object to access files in the Nix store, across all + * store objects. */ virtual ref getFSAccessor(bool requireValidPath = true) = 0; + /** + * @return An object to access files for a specific store object in + * the Nix store. + * + * @return nullptr if the store doesn't contain an object at the + * givine path. + */ + virtual std::shared_ptr getFSAccessor(const StorePath & path, bool requireValidPath = true) = 0; + /** * Repair the contents of the given path by redownloading it using * a substituter (if available). diff --git a/src/libstore/include/nix/store/uds-remote-store.hh b/src/libstore/include/nix/store/uds-remote-store.hh index 37c239796..fe6e486f4 100644 --- a/src/libstore/include/nix/store/uds-remote-store.hh +++ b/src/libstore/include/nix/store/uds-remote-store.hh @@ -61,6 +61,11 @@ struct UDSRemoteStore : virtual IndirectRootStore, virtual RemoteStore return LocalFSStore::getFSAccessor(requireValidPath); } + std::shared_ptr getFSAccessor(const StorePath & path, bool requireValidPath = true) override + { + return LocalFSStore::getFSAccessor(path, requireValidPath); + } + void narFromPath(const StorePath & path, Sink & sink) override { LocalFSStore::narFromPath(path, sink); diff --git a/src/libstore/local-fs-store.cc b/src/libstore/local-fs-store.cc index e0f07b91b..66ae85d89 100644 --- a/src/libstore/local-fs-store.cc +++ b/src/libstore/local-fs-store.cc @@ -91,6 +91,23 @@ ref LocalFSStore::getFSAccessor(bool requireValidPath) ref(std::dynamic_pointer_cast(shared_from_this())), requireValidPath); } +std::shared_ptr LocalFSStore::getFSAccessor(const StorePath & path, bool requireValidPath) +{ + auto absPath = std::filesystem::path{config.realStoreDir.get()} / path.to_string(); + if (requireValidPath) { + /* Only return non-null if the store object is a fully-valid + member of the store. */ + if (!isValidPath(path)) + return nullptr; + } else { + /* Return non-null as long as the some file system data exists, + even if the store object is not fully registered. */ + if (!pathExists(absPath)) + return nullptr; + } + return std::make_shared(std::move(absPath)); +} + void LocalFSStore::narFromPath(const StorePath & path, Sink & sink) { if (!isValidPath(path)) diff --git a/src/libstore/remote-fs-accessor.cc b/src/libstore/remote-fs-accessor.cc index 12c810eca..e6715cbdf 100644 --- a/src/libstore/remote-fs-accessor.cc +++ b/src/libstore/remote-fs-accessor.cc @@ -51,15 +51,17 @@ ref RemoteFSAccessor::addToCache(std::string_view hashPart, std: std::pair, CanonPath> RemoteFSAccessor::fetch(const CanonPath & path) { - auto [storePath, restPath_] = store->toStorePath(store->storeDir + path.abs()); - auto restPath = CanonPath(restPath_); - + auto [storePath, restPath] = store->toStorePath(store->storeDir + path.abs()); if (requireValidPath && !store->isValidPath(storePath)) throw InvalidPath("path '%1%' is not a valid store path", store->printStorePath(storePath)); + return {ref{accessObject(storePath)}, CanonPath{restPath}}; +} +std::shared_ptr RemoteFSAccessor::accessObject(const StorePath & storePath) +{ auto i = nars.find(std::string(storePath.hashPart())); if (i != nars.end()) - return {i->second, restPath}; + return i->second; std::string listing; Path cacheFile; @@ -90,7 +92,7 @@ std::pair, CanonPath> RemoteFSAccessor::fetch(const CanonPat }); nars.emplace(storePath.hashPart(), narAccessor); - return {narAccessor, restPath}; + return narAccessor; } catch (SystemError &) { } @@ -98,14 +100,14 @@ std::pair, CanonPath> RemoteFSAccessor::fetch(const CanonPat try { auto narAccessor = makeNarAccessor(nix::readFile(cacheFile)); nars.emplace(storePath.hashPart(), narAccessor); - return {narAccessor, restPath}; + return narAccessor; } catch (SystemError &) { } } StringSink sink; store->narFromPath(storePath, sink); - return {addToCache(storePath.hashPart(), std::move(sink.s)), restPath}; + return addToCache(storePath.hashPart(), std::move(sink.s)); } std::optional RemoteFSAccessor::maybeLstat(const CanonPath & path) diff --git a/src/libstore/remote-store.cc b/src/libstore/remote-store.cc index b918871fa..bb7425081 100644 --- a/src/libstore/remote-store.cc +++ b/src/libstore/remote-store.cc @@ -794,9 +794,19 @@ void RemoteStore::narFromPath(const StorePath & path, Sink & sink) conn->narFromPath(*this, &conn.daemonException, path, [&](Source & source) { copyNAR(conn->from, sink); }); } +ref RemoteStore::getRemoteFSAccessor(bool requireValidPath) +{ + return make_ref(ref(shared_from_this()), requireValidPath); +} + ref RemoteStore::getFSAccessor(bool requireValidPath) { - return make_ref(ref(shared_from_this())); + return getRemoteFSAccessor(requireValidPath); +} + +std::shared_ptr RemoteStore::getFSAccessor(const StorePath & path, bool requireValidPath) +{ + return getRemoteFSAccessor(requireValidPath)->accessObject(path); } void RemoteStore::ConnectionHandle::withFramedSink(std::function fun) diff --git a/src/libstore/ssh-store.cc b/src/libstore/ssh-store.cc index dafe14fea..a7e28017f 100644 --- a/src/libstore/ssh-store.cc +++ b/src/libstore/ssh-store.cc @@ -151,6 +151,11 @@ struct MountedSSHStore : virtual SSHStore, virtual LocalFSStore return LocalFSStore::getFSAccessor(requireValidPath); } + std::shared_ptr getFSAccessor(const StorePath & path, bool requireValidPath) override + { + return LocalFSStore::getFSAccessor(path, requireValidPath); + } + std::optional getBuildLogExact(const StorePath & path) override { return LocalFSStore::getBuildLogExact(path); diff --git a/src/libstore/store-api.cc b/src/libstore/store-api.cc index 17748ec53..a0b06db54 100644 --- a/src/libstore/store-api.cc +++ b/src/libstore/store-api.cc @@ -1120,10 +1120,9 @@ Derivation Store::derivationFromPath(const StorePath & drvPath) static Derivation readDerivationCommon(Store & store, const StorePath & drvPath, bool requireValidPath) { - auto accessor = store.getFSAccessor(requireValidPath); + auto accessor = store.getFSAccessor(drvPath, requireValidPath); try { - return parseDerivation( - store, accessor->readFile(CanonPath(drvPath.to_string())), Derivation::nameFromPath(drvPath)); + return parseDerivation(store, accessor->readFile(CanonPath::root), Derivation::nameFromPath(drvPath)); } catch (FormatError & e) { throw Error("error parsing derivation '%s': %s", store.printStorePath(drvPath), e.msg()); } diff --git a/src/nix/cat.cc b/src/nix/cat.cc index 276e01f5d..145336723 100644 --- a/src/nix/cat.cc +++ b/src/nix/cat.cc @@ -41,7 +41,10 @@ struct CmdCatStore : StoreCommand, MixCat void run(ref store) override { auto [storePath, rest] = store->toStorePath(path); - cat(store->getFSAccessor(), CanonPath{storePath.to_string()} / CanonPath{rest}); + auto accessor = store->getFSAccessor(storePath); + if (!accessor) + throw InvalidPath("path '%1%' is not a valid store path", store->printStorePath(storePath)); + cat(ref{std::move(accessor)}, CanonPath{rest}); } }; diff --git a/src/nix/ls.cc b/src/nix/ls.cc index dcc46fa14..4952d5243 100644 --- a/src/nix/ls.cc +++ b/src/nix/ls.cc @@ -115,7 +115,10 @@ struct CmdLsStore : StoreCommand, MixLs void run(ref store) override { auto [storePath, rest] = store->toStorePath(path); - list(store->getFSAccessor(), CanonPath{storePath.to_string()} / CanonPath{rest}); + auto accessor = store->getFSAccessor(storePath); + if (!accessor) + throw InvalidPath("path '%1%' is not a valid store path", store->printStorePath(storePath)); + list(ref{std::move(accessor)}, CanonPath{rest}); } }; diff --git a/src/nix/nix-store/nix-store.cc b/src/nix/nix-store/nix-store.cc index 5f85e06f0..f8078426c 100644 --- a/src/nix/nix-store/nix-store.cc +++ b/src/nix/nix-store/nix-store.cc @@ -603,7 +603,7 @@ static void registerValidity(bool reregister, bool hashGiven, bool canonicalise) #endif if (!hashGiven) { HashResult hash = hashPath( - {store->getFSAccessor(false), CanonPath{info->path.to_string()}}, + {ref{store->getFSAccessor(info->path, false)}}, FileSerialisationMethod::NixArchive, HashAlgorithm::SHA256); info->narHash = hash.hash; From 30691c38c26fce14e378357c068a11749c5a914e Mon Sep 17 00:00:00 2001 From: John Ericson Date: Wed, 24 Sep 2025 13:11:00 -0400 Subject: [PATCH 053/332] Add JSON tests for `Realisation` --- .../data/realisation/simple.json | 6 + .../with-dependent-realisations.json | 8 ++ .../data/realisation/with-signature.json | 8 ++ src/libstore-tests/meson.build | 1 + src/libstore-tests/realisation.cc | 105 ++++++++++++++++++ 5 files changed, 128 insertions(+) create mode 100644 src/libstore-tests/data/realisation/simple.json create mode 100644 src/libstore-tests/data/realisation/with-dependent-realisations.json create mode 100644 src/libstore-tests/data/realisation/with-signature.json create mode 100644 src/libstore-tests/realisation.cc diff --git a/src/libstore-tests/data/realisation/simple.json b/src/libstore-tests/data/realisation/simple.json new file mode 100644 index 000000000..2ccb1e721 --- /dev/null +++ b/src/libstore-tests/data/realisation/simple.json @@ -0,0 +1,6 @@ +{ + "dependentRealisations": {}, + "id": "sha256:ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad!foo", + "outPath": "g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo.drv", + "signatures": [] +} diff --git a/src/libstore-tests/data/realisation/with-dependent-realisations.json b/src/libstore-tests/data/realisation/with-dependent-realisations.json new file mode 100644 index 000000000..a58e0d7fe --- /dev/null +++ b/src/libstore-tests/data/realisation/with-dependent-realisations.json @@ -0,0 +1,8 @@ +{ + "dependentRealisations": { + "sha256:ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad!foo": "g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo.drv" + }, + "id": "sha256:ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad!foo", + "outPath": "g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo.drv", + "signatures": [] +} diff --git a/src/libstore-tests/data/realisation/with-signature.json b/src/libstore-tests/data/realisation/with-signature.json new file mode 100644 index 000000000..a28848cb0 --- /dev/null +++ b/src/libstore-tests/data/realisation/with-signature.json @@ -0,0 +1,8 @@ +{ + "dependentRealisations": {}, + "id": "sha256:ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad!foo", + "outPath": "g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo.drv", + "signatures": [ + "asdfasdfasdf" + ] +} diff --git a/src/libstore-tests/meson.build b/src/libstore-tests/meson.build index e3984d62f..915c10a38 100644 --- a/src/libstore-tests/meson.build +++ b/src/libstore-tests/meson.build @@ -74,6 +74,7 @@ sources = files( 'outputs-spec.cc', 'path-info.cc', 'path.cc', + 'realisation.cc', 'references.cc', 's3-binary-cache-store.cc', 's3.cc', diff --git a/src/libstore-tests/realisation.cc b/src/libstore-tests/realisation.cc new file mode 100644 index 000000000..2e4d592dc --- /dev/null +++ b/src/libstore-tests/realisation.cc @@ -0,0 +1,105 @@ +#include + +#include +#include +#include + +#include "nix/store/store-api.hh" + +#include "nix/util/tests/characterization.hh" +#include "nix/store/tests/libstore.hh" + +namespace nix { + +class RealisationTest : public CharacterizationTest, public LibStoreTest +{ + std::filesystem::path unitTestData = getUnitTestData() / "realisation"; + +public: + + std::filesystem::path goldenMaster(std::string_view testStem) const override + { + return unitTestData / testStem; + } +}; + +/* ---------------------------------------------------------------------------- + * JSON + * --------------------------------------------------------------------------*/ + +using nlohmann::json; + +struct RealisationJsonTest : RealisationTest, ::testing::WithParamInterface> +{}; + +TEST_P(RealisationJsonTest, from_json) +{ + auto [name, expected] = GetParam(); + readTest(name + ".json", [&](const auto & encoded_) { + auto encoded = json::parse(encoded_); + Realisation got = static_cast(encoded); + ASSERT_EQ(got, expected); + }); +} + +TEST_P(RealisationJsonTest, to_json) +{ + auto [name, value] = GetParam(); + writeTest( + name + ".json", + [&]() -> json { return static_cast(value); }, + [](const auto & file) { return json::parse(readFile(file)); }, + [](const auto & file, const auto & got) { return writeFile(file, got.dump(2) + "\n"); }); +} + +INSTANTIATE_TEST_SUITE_P( + RealisationJSON, + RealisationJsonTest, + ([] { + Realisation simple{ + + .id = + { + .drvHash = Hash::parseExplicitFormatUnprefixed( + "ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad", + HashAlgorithm::SHA256, + HashFormat::Base16), + .outputName = "foo", + }, + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo.drv"}, + }; + return ::testing::Values( + std::pair{ + "simple", + simple, + }, + std::pair{ + "with-signature", + [&] { + auto r = simple; + // FIXME actually sign properly + r.signatures = {"asdfasdfasdf"}; + return r; + }()}, + std::pair{ + "with-dependent-realisations", + [&] { + auto r = simple; + r.dependentRealisations = {{ + { + .drvHash = Hash::parseExplicitFormatUnprefixed( + "ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad", + HashAlgorithm::SHA256, + HashFormat::Base16), + .outputName = "foo", + }, + StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo.drv"}, + }}; + return r; + }(), + }); + } + + ())); + +} // namespace nix From c77b15a178d3cd792d1607afe40016c4aed2e4a2 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Wed, 24 Sep 2025 18:49:53 -0700 Subject: [PATCH 054/332] shellcheck fix scripts/install-multi-user.sh --- maintainers/flake-module.nix | 1 - scripts/install-multi-user.sh | 24 ++++++++++++++++-------- 2 files changed, 16 insertions(+), 9 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index a5360675f..ea120b7f3 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^scripts/install-multi-user\.sh$'' ''^scripts/install-systemd-multi-user\.sh$'' ''^tests/functional/completions\.sh$'' ''^tests/functional/compute-levels\.sh$'' diff --git a/scripts/install-multi-user.sh b/scripts/install-multi-user.sh index 450a773e9..f577e79c8 100644 --- a/scripts/install-multi-user.sh +++ b/scripts/install-multi-user.sh @@ -55,18 +55,22 @@ readonly NIX_INSTALLED_NIX="@nix@" readonly NIX_INSTALLED_CACERT="@cacert@" #readonly NIX_INSTALLED_NIX="/nix/store/j8dbv5w6jl34caywh2ygdy88knx1mdf7-nix-2.3.6" #readonly NIX_INSTALLED_CACERT="/nix/store/7dxhzymvy330i28ii676fl1pqwcahv2f-nss-cacert-3.49.2" -readonly EXTRACTED_NIX_PATH="$(dirname "$0")" +EXTRACTED_NIX_PATH="$(dirname "$0")" +readonly EXTRACTED_NIX_PATH # allow to override identity change command -readonly NIX_BECOME=${NIX_BECOME:-sudo} +NIX_BECOME=${NIX_BECOME:-sudo} +readonly NIX_BECOME -readonly ROOT_HOME=~root +ROOT_HOME=~root +readonly ROOT_HOME if [ -t 0 ] && [ -z "${NIX_INSTALLER_YES:-}" ]; then - readonly IS_HEADLESS='no' + IS_HEADLESS='no' else - readonly IS_HEADLESS='yes' + IS_HEADLESS='yes' fi +readonly IS_HEADLESS headless() { if [ "$IS_HEADLESS" = "yes" ]; then @@ -156,7 +160,7 @@ EOF } nix_user_for_core() { - printf "$NIX_BUILD_USER_NAME_TEMPLATE" "$1" + printf "%s%s" "$NIX_BUILD_USER_NAME_TEMPLATE" "$1" } nix_uid_for_core() { @@ -381,10 +385,12 @@ _sudo() { # Ensure that $TMPDIR exists if defined. if [[ -n "${TMPDIR:-}" ]] && [[ ! -d "${TMPDIR:-}" ]]; then + # shellcheck disable=SC2174 mkdir -m 0700 -p "${TMPDIR:-}" fi -readonly SCRATCH=$(mktemp -d) +SCRATCH=$(mktemp -d) +readonly SCRATCH finish_cleanup() { rm -rf "$SCRATCH" } @@ -677,7 +683,8 @@ create_directories() { # hiding behind || true, and the general state # should be one the user can repair once they # figure out where chown is... - local get_chr_own="$(PATH="$(getconf PATH 2>/dev/null)" command -vp chown)" + local get_chr_own + get_chr_own="$(PATH="$(getconf PATH 2>/dev/null)" command -vp chown)" if [[ -z "$get_chr_own" ]]; then get_chr_own="$(command -v chown)" fi @@ -1015,6 +1022,7 @@ main() { # Set profile targets after OS-specific scripts are loaded if command -v poly_configure_default_profile_targets > /dev/null 2>&1; then + # shellcheck disable=SC2207 PROFILE_TARGETS=($(poly_configure_default_profile_targets)) else PROFILE_TARGETS=("/etc/bashrc" "/etc/profile.d/nix.sh" "/etc/zshrc" "/etc/bash.bashrc" "/etc/zsh/zshrc") From 76b956541498a803df6df3be32cbdd3494d0beb5 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Wed, 24 Sep 2025 18:52:46 -0700 Subject: [PATCH 055/332] shellcheck fix scipts/install-systemd-multi-user.sh --- maintainers/flake-module.nix | 1 - scripts/install-systemd-multi-user.sh | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index a5360675f..368ea8cf2 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -107,7 +107,6 @@ excludes = [ # We haven't linted these files yet ''^scripts/install-multi-user\.sh$'' - ''^scripts/install-systemd-multi-user\.sh$'' ''^tests/functional/completions\.sh$'' ''^tests/functional/compute-levels\.sh$'' ''^tests/functional/config\.sh$'' diff --git a/scripts/install-systemd-multi-user.sh b/scripts/install-systemd-multi-user.sh index dc373f4db..8abbb7af4 100755 --- a/scripts/install-systemd-multi-user.sh +++ b/scripts/install-systemd-multi-user.sh @@ -39,7 +39,7 @@ create_systemd_proxy_env() { vars="http_proxy https_proxy ftp_proxy all_proxy no_proxy HTTP_PROXY HTTPS_PROXY FTP_PROXY ALL_PROXY NO_PROXY" for v in $vars; do if [ "x${!v:-}" != "x" ]; then - echo "Environment=${v}=$(escape_systemd_env ${!v})" + echo "Environment=${v}=$(escape_systemd_env "${!v}")" fi done } From 92f8f87dd160b3378f5f8712908e6f596e6cd414 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Wed, 24 Sep 2025 18:56:00 -0700 Subject: [PATCH 056/332] shellcheck fix tests/functional/completions.sh --- maintainers/flake-module.nix | 1 - tests/functional/completions.sh | 2 ++ 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index a5360675f..140857dea 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -108,7 +108,6 @@ # We haven't linted these files yet ''^scripts/install-multi-user\.sh$'' ''^scripts/install-systemd-multi-user\.sh$'' - ''^tests/functional/completions\.sh$'' ''^tests/functional/compute-levels\.sh$'' ''^tests/functional/config\.sh$'' ''^tests/functional/db-migration\.sh$'' diff --git a/tests/functional/completions.sh b/tests/functional/completions.sh index 9164c5013..b521d35fb 100755 --- a/tests/functional/completions.sh +++ b/tests/functional/completions.sh @@ -53,7 +53,9 @@ cd .. ## With multiple input flakes [[ "$(NIX_GET_COMPLETIONS=5 nix build ./foo ./bar --override-input '')" == $'normal\na\t\nb\t' ]] ## With tilde expansion +# shellcheck disable=SC2088 [[ "$(HOME=$PWD NIX_GET_COMPLETIONS=4 nix build '~/foo' --override-input '')" == $'normal\na\t' ]] +# shellcheck disable=SC2088 [[ "$(HOME=$PWD NIX_GET_COMPLETIONS=5 nix flake update --flake '~/foo' '')" == $'normal\na\t' ]] ## Out of order [[ "$(NIX_GET_COMPLETIONS=3 nix build --override-input '' '' ./foo)" == $'normal\na\t' ]] From 2732812524cf64fbdcf293f9e3f50a59e657b182 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Wed, 24 Sep 2025 18:57:30 -0700 Subject: [PATCH 057/332] Enable shellcheck for functional/compute-levels.sh --- maintainers/flake-module.nix | 1 - 1 file changed, 1 deletion(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index a5360675f..a749b1637 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -109,7 +109,6 @@ ''^scripts/install-multi-user\.sh$'' ''^scripts/install-systemd-multi-user\.sh$'' ''^tests/functional/completions\.sh$'' - ''^tests/functional/compute-levels\.sh$'' ''^tests/functional/config\.sh$'' ''^tests/functional/db-migration\.sh$'' ''^tests/functional/debugger\.sh$'' From 832100f543206eab33fa5cd18419e75e7fa5e7f3 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Wed, 24 Sep 2025 18:59:41 -0700 Subject: [PATCH 058/332] shellcheck fix functional/config.sh --- maintainers/flake-module.nix | 1 - tests/functional/config.sh | 4 ++-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index a5360675f..b5c2cfe12 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -110,7 +110,6 @@ ''^scripts/install-systemd-multi-user\.sh$'' ''^tests/functional/completions\.sh$'' ''^tests/functional/compute-levels\.sh$'' - ''^tests/functional/config\.sh$'' ''^tests/functional/db-migration\.sh$'' ''^tests/functional/debugger\.sh$'' ''^tests/functional/dependencies\.builder0\.sh$'' diff --git a/tests/functional/config.sh b/tests/functional/config.sh index 50858eaa4..c1d47454e 100755 --- a/tests/functional/config.sh +++ b/tests/functional/config.sh @@ -62,7 +62,7 @@ prev=$(nix config show | grep '^cores' | cut -d '=' -f 2 | xargs) export NIX_CONFIG="cores = 4242"$'\n'"experimental-features = nix-command flakes" exp_cores=$(nix config show | grep '^cores' | cut -d '=' -f 2 | xargs) exp_features=$(nix config show | grep '^experimental-features' | cut -d '=' -f 2 | xargs) -[[ $prev != $exp_cores ]] +[[ $prev != "$exp_cores" ]] [[ $exp_cores == "4242" ]] # flakes implies fetch-tree [[ $exp_features == "fetch-tree flakes nix-command" ]] @@ -70,7 +70,7 @@ exp_features=$(nix config show | grep '^experimental-features' | cut -d '=' -f 2 # Test that it's possible to retrieve a single setting's value val=$(nix config show | grep '^warn-dirty' | cut -d '=' -f 2 | xargs) val2=$(nix config show warn-dirty) -[[ $val == $val2 ]] +[[ $val == "$val2" ]] # Test unit prefixes. [[ $(nix config show --min-free 64K min-free) = 65536 ]] From 67d43f3b1226bba254f26755994c9c4ea5366df5 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Wed, 24 Sep 2025 19:06:23 -0700 Subject: [PATCH 059/332] shellcheck fix: functional/debugger.sh --- maintainers/flake-module.nix | 1 - 1 file changed, 1 deletion(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index a5360675f..894308c64 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -112,7 +112,6 @@ ''^tests/functional/compute-levels\.sh$'' ''^tests/functional/config\.sh$'' ''^tests/functional/db-migration\.sh$'' - ''^tests/functional/debugger\.sh$'' ''^tests/functional/dependencies\.builder0\.sh$'' ''^tests/functional/dependencies\.sh$'' ''^tests/functional/dump-db\.sh$'' From 121a8ab3ec4b161cf6e24ebb0251127060def557 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Wed, 24 Sep 2025 19:09:21 -0700 Subject: [PATCH 060/332] shellcheck fix functional/dependencies.builder0.sh --- maintainers/flake-module.nix | 1 - tests/functional/dependencies.builder0.sh | 16 ++++++++++------ 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index a5360675f..3b2ab6785 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -113,7 +113,6 @@ ''^tests/functional/config\.sh$'' ''^tests/functional/db-migration\.sh$'' ''^tests/functional/debugger\.sh$'' - ''^tests/functional/dependencies\.builder0\.sh$'' ''^tests/functional/dependencies\.sh$'' ''^tests/functional/dump-db\.sh$'' ''^tests/functional/dyn-drv/build-built-drv\.sh$'' diff --git a/tests/functional/dependencies.builder0.sh b/tests/functional/dependencies.builder0.sh index 9b11576e0..6fbe4a07a 100644 --- a/tests/functional/dependencies.builder0.sh +++ b/tests/functional/dependencies.builder0.sh @@ -1,16 +1,20 @@ +# shellcheck shell=bash +# shellcheck disable=SC2154 [ "${input1: -2}" = /. ] +# shellcheck disable=SC2154 [ "${input2: -2}" = /. ] -mkdir $out -echo $(cat $input1/foo)$(cat $input2/bar) > $out/foobar +# shellcheck disable=SC2154 +mkdir "$out" +echo "$(cat "$input1"/foo)$(cat "$input2"/bar)" > "$out"/foobar -ln -s $input2 $out/reference-to-input-2 +ln -s "$input2" "$out"/reference-to-input-2 # Self-reference. -ln -s $out $out/self +ln -s "$out" "$out"/self # Executable. -echo program > $out/program -chmod +x $out/program +echo program > "$out"/program +chmod +x "$out"/program echo FOO From c7c74fec674d94bef680ce1e732bcf3ff8ba450c Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Wed, 24 Sep 2025 19:11:00 -0700 Subject: [PATCH 061/332] shellcheck fix functional/dependencies.sh --- maintainers/flake-module.nix | 1 - tests/functional/dependencies.sh | 12 ++++++------ 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index a5360675f..1add49064 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -114,7 +114,6 @@ ''^tests/functional/db-migration\.sh$'' ''^tests/functional/debugger\.sh$'' ''^tests/functional/dependencies\.builder0\.sh$'' - ''^tests/functional/dependencies\.sh$'' ''^tests/functional/dump-db\.sh$'' ''^tests/functional/dyn-drv/build-built-drv\.sh$'' ''^tests/functional/dyn-drv/common\.sh$'' diff --git a/tests/functional/dependencies.sh b/tests/functional/dependencies.sh index 972bc5a9b..68c0d3f2e 100755 --- a/tests/functional/dependencies.sh +++ b/tests/functional/dependencies.sh @@ -11,22 +11,22 @@ echo "derivation is $drvPath" nix-store -q --tree "$drvPath" | grep '───.*builder-dependencies-input-1.sh' # Test Graphviz graph generation. -nix-store -q --graph "$drvPath" > $TEST_ROOT/graph +nix-store -q --graph "$drvPath" > "$TEST_ROOT"/graph if test -n "$dot"; then # Does it parse? - $dot < $TEST_ROOT/graph + $dot < "$TEST_ROOT"/graph fi # Test GraphML graph generation -nix-store -q --graphml "$drvPath" > $TEST_ROOT/graphml +nix-store -q --graphml "$drvPath" > "$TEST_ROOT"/graphml outPath=$(nix-store -rvv "$drvPath") || fail "build failed" # Test Graphviz graph generation. -nix-store -q --graph "$outPath" > $TEST_ROOT/graph +nix-store -q --graph "$outPath" > "$TEST_ROOT"/graph if test -n "$dot"; then # Does it parse? - $dot < $TEST_ROOT/graph + $dot < "$TEST_ROOT"/graph fi nix-store -q --tree "$outPath" | grep '───.*dependencies-input-2' @@ -53,7 +53,7 @@ input2OutPath=$(echo "$deps" | grep "dependencies-input-2") nix-store -q --referrers-closure "$input2OutPath" | grep "$outPath" # Check that the derivers are set properly. -test $(nix-store -q --deriver "$outPath") = "$drvPath" +test "$(nix-store -q --deriver "$outPath")" = "$drvPath" nix-store -q --deriver "$input2OutPath" | grepQuiet -- "-input-2.drv" # --valid-derivers returns the currently single valid .drv file From 98f716f78cfb0a43b37fcffbc22a7eeba77c3dc8 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Wed, 24 Sep 2025 19:13:46 -0700 Subject: [PATCH 062/332] Revert change for SC2059 for nix_user_for_core --- scripts/install-multi-user.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/scripts/install-multi-user.sh b/scripts/install-multi-user.sh index f577e79c8..b013190f9 100644 --- a/scripts/install-multi-user.sh +++ b/scripts/install-multi-user.sh @@ -160,7 +160,8 @@ EOF } nix_user_for_core() { - printf "%s%s" "$NIX_BUILD_USER_NAME_TEMPLATE" "$1" + # shellcheck disable=SC2059 + printf "$NIX_BUILD_USER_NAME_TEMPLATE" "$1" } nix_uid_for_core() { From 59791082fa2bdf8ccfd23f752980d3ed48767f97 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Wed, 24 Sep 2025 19:17:12 -0700 Subject: [PATCH 063/332] shellcheck fix functional/dyn-drv/build-built-drv.sh --- maintainers/flake-module.nix | 1 - tests/functional/dyn-drv/build-built-drv.sh | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index a5360675f..a492e2fe2 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -116,7 +116,6 @@ ''^tests/functional/dependencies\.builder0\.sh$'' ''^tests/functional/dependencies\.sh$'' ''^tests/functional/dump-db\.sh$'' - ''^tests/functional/dyn-drv/build-built-drv\.sh$'' ''^tests/functional/dyn-drv/common\.sh$'' ''^tests/functional/dyn-drv/dep-built-drv\.sh$'' ''^tests/functional/dyn-drv/eval-outputOf\.sh$'' diff --git a/tests/functional/dyn-drv/build-built-drv.sh b/tests/functional/dyn-drv/build-built-drv.sh index 49d61c6ce..78db41327 100644 --- a/tests/functional/dyn-drv/build-built-drv.sh +++ b/tests/functional/dyn-drv/build-built-drv.sh @@ -23,4 +23,4 @@ requireDaemonNewerThan "2.30pre20250515" out2=$(nix build "${drvDep}^out^out" --no-link) -test $out1 == $out2 +test "$out1" == "$out2" From 614ef6cfb1e5603253cadcb17ac883dcba8d35e2 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Wed, 24 Sep 2025 19:18:30 -0700 Subject: [PATCH 064/332] shellcheck fix functional/dyn-drv/common.sh --- maintainers/flake-module.nix | 1 - tests/functional/dyn-drv/common.sh | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index a5360675f..121675599 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -117,7 +117,6 @@ ''^tests/functional/dependencies\.sh$'' ''^tests/functional/dump-db\.sh$'' ''^tests/functional/dyn-drv/build-built-drv\.sh$'' - ''^tests/functional/dyn-drv/common\.sh$'' ''^tests/functional/dyn-drv/dep-built-drv\.sh$'' ''^tests/functional/dyn-drv/eval-outputOf\.sh$'' ''^tests/functional/dyn-drv/old-daemon-error-hack\.sh$'' diff --git a/tests/functional/dyn-drv/common.sh b/tests/functional/dyn-drv/common.sh index 0d95881b6..ca24498d0 100644 --- a/tests/functional/dyn-drv/common.sh +++ b/tests/functional/dyn-drv/common.sh @@ -1,3 +1,4 @@ +# shellcheck shell=bash source ../common.sh # Need backend to support text-hashing too From d26dee20b2bb0f6d0b5aba6a3f5edaff8d17dec6 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Mon, 22 Sep 2025 16:46:38 -0400 Subject: [PATCH 065/332] Clean up `nix why-depends` store accessor usage, and put back store dir in output With this change, the store-wide `getFSAccessor` has only one usage left --- the evaluator. If we get rid of that (as is planned), we can then remove that method altogether, simplifying `Store`. Hurray! I removed the store dir by mistake from the pretty-printed (for humans) output in eb643d034fc1b0586d9547e99ce96ad00a4a6f27. That change was not supposed to change output. --- src/nix/why-depends.cc | 19 +++++++------------ 1 file changed, 7 insertions(+), 12 deletions(-) diff --git a/src/nix/why-depends.cc b/src/nix/why-depends.cc index 7869e33a7..473827a93 100644 --- a/src/nix/why-depends.cc +++ b/src/nix/why-depends.cc @@ -108,8 +108,6 @@ struct CmdWhyDepends : SourceExprCommand, MixOperateOnOptions auto dependencyPath = *optDependencyPath; auto dependencyPathHash = dependencyPath.hashPart(); - auto accessor = store->getFSAccessor(); - auto const inf = std::numeric_limits::max(); struct Node @@ -172,8 +170,6 @@ struct CmdWhyDepends : SourceExprCommand, MixOperateOnOptions {}; printNode = [&](Node & node, const std::string & firstPad, const std::string & tailPad) { - CanonPath pathS(node.path.to_string()); - assert(node.dist != inf); if (precise) { logger->cout( @@ -181,7 +177,7 @@ struct CmdWhyDepends : SourceExprCommand, MixOperateOnOptions firstPad, node.visited ? "\e[38;5;244m" : "", firstPad != "" ? "→ " : "", - pathS.abs()); + store->printStorePath(node.path)); } if (node.path == dependencyPath && !all && packagePath != dependencyPath) @@ -211,13 +207,13 @@ struct CmdWhyDepends : SourceExprCommand, MixOperateOnOptions contain the reference. */ std::map hits; - std::function visitPath; + auto accessor = store->getFSAccessor(node.path); - visitPath = [&](const CanonPath & p) { + auto visitPath = [&](this auto && recur, const CanonPath & p) -> void { auto st = accessor->maybeLstat(p); assert(st); - auto p2 = p == pathS ? "/" : p.abs().substr(pathS.abs().size() + 1); + auto p2 = p.isRoot() ? p.abs() : p.rel(); auto getColour = [&](const std::string & hash) { return hash == dependencyPathHash ? ANSI_GREEN : ANSI_BLUE; @@ -226,7 +222,7 @@ struct CmdWhyDepends : SourceExprCommand, MixOperateOnOptions if (st->type == SourceAccessor::Type::tDirectory) { auto names = accessor->readDirectory(p); for (auto & [name, type] : names) - visitPath(p / name); + recur(p / name); } else if (st->type == SourceAccessor::Type::tRegular) { @@ -264,7 +260,7 @@ struct CmdWhyDepends : SourceExprCommand, MixOperateOnOptions // FIXME: should use scanForReferences(). if (precise) - visitPath(pathS); + visitPath(CanonPath::root); for (auto & ref : refs) { std::string hash(ref.second->path.hashPart()); @@ -280,13 +276,12 @@ struct CmdWhyDepends : SourceExprCommand, MixOperateOnOptions } if (!precise) { - auto pathS = store->printStorePath(ref.second->path); logger->cout( "%s%s%s%s" ANSI_NORMAL, firstPad, ref.second->visited ? "\e[38;5;244m" : "", last ? treeLast : treeConn, - pathS); + store->printStorePath(ref.second->path)); node.visited = true; } From bc13130497c9b2514e7dc1864b534645aa03e49d Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Wed, 24 Sep 2025 20:28:16 -0700 Subject: [PATCH 066/332] shellcheck fix tests/functional/dyn-drv/dep-built-drv.sh (#14078) --- maintainers/flake-module.nix | 1 - tests/functional/dyn-drv/dep-built-drv.sh | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 8db3a2933..a975aedb8 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -112,7 +112,6 @@ ''^tests/functional/dependencies\.builder0\.sh$'' ''^tests/functional/dump-db\.sh$'' ''^tests/functional/dyn-drv/build-built-drv\.sh$'' - ''^tests/functional/dyn-drv/dep-built-drv\.sh$'' ''^tests/functional/dyn-drv/eval-outputOf\.sh$'' ''^tests/functional/dyn-drv/old-daemon-error-hack\.sh$'' ''^tests/functional/dyn-drv/recursive-mod-json\.sh$'' diff --git a/tests/functional/dyn-drv/dep-built-drv.sh b/tests/functional/dyn-drv/dep-built-drv.sh index e9a8b6b83..f5be23645 100644 --- a/tests/functional/dyn-drv/dep-built-drv.sh +++ b/tests/functional/dyn-drv/dep-built-drv.sh @@ -11,4 +11,4 @@ clearStore out2=$(nix-build ./text-hashed-output.nix -A wrapper --no-out-link) -diff -r $out1 $out2 +diff -r "$out1" "$out2" From 339338e166d9ba8fd0b56028ed87859e1717973e Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 22 Sep 2025 22:28:12 +0200 Subject: [PATCH 067/332] MountedSourceAccessor: Move into a separate header, add mount method --- src/libexpr/eval.cc | 1 + src/libfetchers/git.cc | 1 + src/libutil/include/nix/util/meson.build | 1 + .../nix/util/mounted-source-accessor.hh | 20 ++++++++++ .../include/nix/util/source-accessor.hh | 2 - src/libutil/mounted-source-accessor.cc | 38 +++++++++++++------ src/nix/env.cc | 1 + 7 files changed, 51 insertions(+), 13 deletions(-) create mode 100644 src/libutil/include/nix/util/mounted-source-accessor.hh diff --git a/src/libexpr/eval.cc b/src/libexpr/eval.cc index 87b1e73a5..5b69a2174 100644 --- a/src/libexpr/eval.cc +++ b/src/libexpr/eval.cc @@ -17,6 +17,7 @@ #include "nix/expr/print.hh" #include "nix/fetchers/filtering-source-accessor.hh" #include "nix/util/memory-source-accessor.hh" +#include "nix/util/mounted-source-accessor.hh" #include "nix/expr/gc-small-vector.hh" #include "nix/util/url.hh" #include "nix/fetchers/fetch-to-store.hh" diff --git a/src/libfetchers/git.cc b/src/libfetchers/git.cc index f750d907d..f6f5c30ee 100644 --- a/src/libfetchers/git.cc +++ b/src/libfetchers/git.cc @@ -15,6 +15,7 @@ #include "nix/fetchers/fetch-settings.hh" #include "nix/util/json-utils.hh" #include "nix/util/archive.hh" +#include "nix/util/mounted-source-accessor.hh" #include #include diff --git a/src/libutil/include/nix/util/meson.build b/src/libutil/include/nix/util/meson.build index 07a4f1d11..dcfaa8e3f 100644 --- a/src/libutil/include/nix/util/meson.build +++ b/src/libutil/include/nix/util/meson.build @@ -47,6 +47,7 @@ headers = files( 'logging.hh', 'lru-cache.hh', 'memory-source-accessor.hh', + 'mounted-source-accessor.hh', 'muxable-pipe.hh', 'os-string.hh', 'pool.hh', diff --git a/src/libutil/include/nix/util/mounted-source-accessor.hh b/src/libutil/include/nix/util/mounted-source-accessor.hh new file mode 100644 index 000000000..518ae4f09 --- /dev/null +++ b/src/libutil/include/nix/util/mounted-source-accessor.hh @@ -0,0 +1,20 @@ +#pragma once + +#include "source-accessor.hh" + +namespace nix { + +struct MountedSourceAccessor : SourceAccessor +{ + virtual void mount(CanonPath mountPoint, ref accessor) = 0; + + /** + * Return the accessor mounted on `mountPoint`, or `nullptr` if + * there is no such mount point. + */ + virtual std::shared_ptr getMount(CanonPath mountPoint) = 0; +}; + +ref makeMountedSourceAccessor(std::map> mounts); + +} // namespace nix diff --git a/src/libutil/include/nix/util/source-accessor.hh b/src/libutil/include/nix/util/source-accessor.hh index aa937da48..7419ef392 100644 --- a/src/libutil/include/nix/util/source-accessor.hh +++ b/src/libutil/include/nix/util/source-accessor.hh @@ -214,8 +214,6 @@ ref getFSSourceAccessor(); */ ref makeFSSourceAccessor(std::filesystem::path root); -ref makeMountedSourceAccessor(std::map> mounts); - /** * Construct an accessor that presents a "union" view of a vector of * underlying accessors. Earlier accessors take precedence over later. diff --git a/src/libutil/mounted-source-accessor.cc b/src/libutil/mounted-source-accessor.cc index 4c32147f9..5c0ecc1ff 100644 --- a/src/libutil/mounted-source-accessor.cc +++ b/src/libutil/mounted-source-accessor.cc @@ -1,18 +1,22 @@ -#include "nix/util/source-accessor.hh" +#include "nix/util/mounted-source-accessor.hh" + +#include namespace nix { -struct MountedSourceAccessor : SourceAccessor +struct MountedSourceAccessorImpl : MountedSourceAccessor { - std::map> mounts; + boost::concurrent_flat_map> mounts; - MountedSourceAccessor(std::map> _mounts) - : mounts(std::move(_mounts)) + MountedSourceAccessorImpl(std::map> _mounts) { displayPrefix.clear(); // Currently we require a root filesystem. This could be relaxed. - assert(mounts.contains(CanonPath::root)); + assert(_mounts.contains(CanonPath::root)); + + for (auto & [path, accessor] : _mounts) + mount(path, accessor); // FIXME: return dummy parent directories automatically? } @@ -52,10 +56,9 @@ struct MountedSourceAccessor : SourceAccessor // Find the nearest parent of `path` that is a mount point. std::vector subpath; while (true) { - auto i = mounts.find(path); - if (i != mounts.end()) { + if (auto mount = getMount(path)) { std::reverse(subpath.begin(), subpath.end()); - return {i->second, CanonPath(subpath)}; + return {ref(mount), CanonPath(subpath)}; } assert(!path.isRoot()); @@ -69,11 +72,24 @@ struct MountedSourceAccessor : SourceAccessor auto [accessor, subpath] = resolve(path); return accessor->getPhysicalPath(subpath); } + + void mount(CanonPath mountPoint, ref accessor) override + { + mounts.emplace(std::move(mountPoint), std::move(accessor)); + } + + std::shared_ptr getMount(CanonPath mountPoint) override + { + if (auto res = getConcurrent(mounts, mountPoint)) + return *res; + else + return nullptr; + } }; -ref makeMountedSourceAccessor(std::map> mounts) +ref makeMountedSourceAccessor(std::map> mounts) { - return make_ref(std::move(mounts)); + return make_ref(std::move(mounts)); } } // namespace nix diff --git a/src/nix/env.cc b/src/nix/env.cc index c8fb5bee0..0a211399a 100644 --- a/src/nix/env.cc +++ b/src/nix/env.cc @@ -7,6 +7,7 @@ #include "nix/util/strings.hh" #include "nix/util/executable-path.hh" #include "nix/util/environment-variables.hh" +#include "nix/util/mounted-source-accessor.hh" using namespace nix; From 8ef70ef522a468781bf69ccee6ffe93b75b99f65 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Wed, 24 Sep 2025 23:37:07 -0400 Subject: [PATCH 068/332] Rename one overload to `allowPathLegacy` Makes it easier to tell when it is isued. --- src/libexpr/eval.cc | 4 ++-- src/libexpr/include/nix/expr/eval.hh | 5 ++++- src/nix/profile.cc | 4 ++-- 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/src/libexpr/eval.cc b/src/libexpr/eval.cc index 5b69a2174..1473a7660 100644 --- a/src/libexpr/eval.cc +++ b/src/libexpr/eval.cc @@ -334,7 +334,7 @@ EvalState::EvalState( EvalState::~EvalState() {} -void EvalState::allowPath(const Path & path) +void EvalState::allowPathLegacy(const Path & path) { if (auto rootFS2 = rootFS.dynamic_pointer_cast()) rootFS2->allowPrefix(CanonPath(path)); @@ -3177,7 +3177,7 @@ std::optional EvalState::resolveLookupPathPath(const LookupPath::Pat /* Allow access to paths in the search path. */ if (initAccessControl) { - allowPath(path.path.abs()); + allowPathLegacy(path.path.abs()); if (store->isInStore(path.path.abs())) { try { allowClosure(store->toStorePath(path.path.abs()).first); diff --git a/src/libexpr/include/nix/expr/eval.hh b/src/libexpr/include/nix/expr/eval.hh index 8f7a0ec32..b841f0bc6 100644 --- a/src/libexpr/include/nix/expr/eval.hh +++ b/src/libexpr/include/nix/expr/eval.hh @@ -488,8 +488,11 @@ public: /** * Allow access to a path. + * + * Only for restrict eval: pure eval just whitelist store paths, + * never arbitrary paths. */ - void allowPath(const Path & path); + void allowPathLegacy(const Path & path); /** * Allow access to a store path. Note that this gets remapped to diff --git a/src/nix/profile.cc b/src/nix/profile.cc index 68005171f..80177cf13 100644 --- a/src/nix/profile.cc +++ b/src/nix/profile.cc @@ -177,8 +177,8 @@ struct ProfileManifest else if (std::filesystem::exists(profile / "manifest.nix")) { // FIXME: needed because of pure mode; ugly. - state.allowPath(state.store->followLinksToStore(profile.string())); - state.allowPath(state.store->followLinksToStore((profile / "manifest.nix").string())); + state.allowPath(state.store->followLinksToStorePath(profile.string())); + state.allowPath(state.store->followLinksToStorePath((profile / "manifest.nix").string())); auto packageInfos = queryInstalled(state, state.store->followLinksToStore(profile.string())); From 35189c0ae0a271b726f0eafed8756a13f37eaae4 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 22 Sep 2025 23:04:58 +0200 Subject: [PATCH 069/332] Expose the fact that `storeFS` is a `MountedSourceAccessor` This will become useful. --- src/libexpr/eval.cc | 2 +- src/libexpr/include/nix/expr/eval.hh | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/src/libexpr/eval.cc b/src/libexpr/eval.cc index 1473a7660..049f2e5a2 100644 --- a/src/libexpr/eval.cc +++ b/src/libexpr/eval.cc @@ -240,7 +240,7 @@ EvalState::EvalState( auto realStoreDir = dirOf(store->toRealPath(StorePath::dummy)); if (settings.pureEval || store->storeDir != realStoreDir) { - accessor = settings.pureEval ? storeFS : makeUnionSourceAccessor({accessor, storeFS}); + accessor = settings.pureEval ? storeFS.cast() : makeUnionSourceAccessor({accessor, storeFS}); } /* Apply access control if needed. */ diff --git a/src/libexpr/include/nix/expr/eval.hh b/src/libexpr/include/nix/expr/eval.hh index b841f0bc6..66ff7d6ea 100644 --- a/src/libexpr/include/nix/expr/eval.hh +++ b/src/libexpr/include/nix/expr/eval.hh @@ -48,6 +48,7 @@ class StorePath; struct SingleDerivedPath; enum RepairFlag : bool; struct MemorySourceAccessor; +struct MountedSourceAccessor; namespace eval_cache { class EvalCache; @@ -319,7 +320,7 @@ public: /** * The accessor corresponding to `store`. */ - const ref storeFS; + const ref storeFS; /** * The accessor for the root filesystem. From 9b2f282af59443627a4fde9320485fb3ca640507 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Wed, 24 Sep 2025 23:54:39 -0400 Subject: [PATCH 070/332] Simplify the definition of `rootFS` It was getting very hard to follow. --- src/libexpr/eval.cc | 35 +++++++++++++++++++---------------- 1 file changed, 19 insertions(+), 16 deletions(-) diff --git a/src/libexpr/eval.cc b/src/libexpr/eval.cc index 049f2e5a2..ad84c6ccb 100644 --- a/src/libexpr/eval.cc +++ b/src/libexpr/eval.cc @@ -226,22 +226,25 @@ EvalState::EvalState( */ {CanonPath(store->storeDir), store->getFSAccessor(settings.pureEval)}, })) - , rootFS(({ - /* In pure eval mode, we provide a filesystem that only - contains the Nix store. + , rootFS([&] { + auto accessor = [&]() -> decltype(rootFS) { + /* In pure eval mode, we provide a filesystem that only + contains the Nix store. */ + if (settings.pureEval) + return storeFS; - If we have a chroot store and pure eval is not enabled, - use a union accessor to make the chroot store available - at its logical location while still having the - underlying directory available. This is necessary for - instance if we're evaluating a file from the physical - /nix/store while using a chroot store. */ - auto accessor = getFSSourceAccessor(); + /* If we have a chroot store and pure eval is not enabled, + use a union accessor to make the chroot store available + at its logical location while still having the underlying + directory available. This is necessary for instance if + we're evaluating a file from the physical /nix/store + while using a chroot store. */ + auto realStoreDir = dirOf(store->toRealPath(StorePath::dummy)); + if (store->storeDir != realStoreDir) + return makeUnionSourceAccessor({getFSSourceAccessor(), storeFS}); - auto realStoreDir = dirOf(store->toRealPath(StorePath::dummy)); - if (settings.pureEval || store->storeDir != realStoreDir) { - accessor = settings.pureEval ? storeFS.cast() : makeUnionSourceAccessor({accessor, storeFS}); - } + return getFSSourceAccessor(); + }(); /* Apply access control if needed. */ if (settings.restrictEval || settings.pureEval) @@ -252,8 +255,8 @@ EvalState::EvalState( throw RestrictedPathError("access to absolute path '%1%' is forbidden %2%", path, modeInformation); }); - accessor; - })) + return accessor; + }()) , corepkgsFS(make_ref()) , internalFS(make_ref()) , derivationInternal{corepkgsFS->addFile( From e15c44d46b8d5f1388608cb813e35322a084aab4 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Wed, 24 Sep 2025 19:04:53 -0700 Subject: [PATCH 071/332] shellcheck fix functional/db-migration.sh --- maintainers/flake-module.nix | 1 - tests/functional/db-migration.sh | 7 ++++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 38a4fa8ed..81ce862ce 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -108,7 +108,6 @@ # We haven't linted these files yet ''^scripts/install-systemd-multi-user\.sh$'' ''^tests/functional/compute-levels\.sh$'' - ''^tests/functional/db-migration\.sh$'' ''^tests/functional/dependencies\.builder0\.sh$'' ''^tests/functional/dump-db\.sh$'' ''^tests/functional/dyn-drv/eval-outputOf\.sh$'' diff --git a/tests/functional/db-migration.sh b/tests/functional/db-migration.sh index 6feabb90d..bdbdd21fa 100755 --- a/tests/functional/db-migration.sh +++ b/tests/functional/db-migration.sh @@ -19,14 +19,15 @@ PATH_WITH_NEW_NIX="$PATH" export PATH="${NIX_DAEMON_PACKAGE}/bin:$PATH" clearStore nix-build simple.nix --no-out-link -nix-store --generate-binary-cache-key cache1.example.org $TEST_ROOT/sk1 $TEST_ROOT/pk1 +nix-store --generate-binary-cache-key cache1.example.org "$TEST_ROOT/sk1" "$TEST_ROOT/pk1" dependenciesOutPath=$(nix-build dependencies.nix --no-out-link --secret-key-files "$TEST_ROOT/sk1") fixedOutPath=$(IMPURE_VAR1=foo IMPURE_VAR2=bar nix-build fixed.nix -A good.0 --no-out-link) # Migrate to the new schema and ensure that everything's there export PATH="$PATH_WITH_NEW_NIX" -info=$(nix path-info --json $dependenciesOutPath) +info=$(nix path-info --json "$dependenciesOutPath") [[ $info =~ '"ultimate":true' ]] +# shellcheck disable=SC2076 [[ $info =~ 'cache1.example.org' ]] nix verify -r "$fixedOutPath" -nix verify -r "$dependenciesOutPath" --sigs-needed 1 --trusted-public-keys $(cat $TEST_ROOT/pk1) +nix verify -r "$dependenciesOutPath" --sigs-needed 1 --trusted-public-keys "$(cat "$TEST_ROOT/pk1")" From 8d257f5510dbfbab8a74e2b7b0ff60bcd720e141 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 3 Jul 2025 13:21:30 +0200 Subject: [PATCH 072/332] EvalState: Make the counters atomic --- src/libexpr/eval.cc | 30 ++++++++++++++-------------- src/libexpr/include/nix/expr/eval.hh | 26 ++++++++++++------------ 2 files changed, 28 insertions(+), 28 deletions(-) diff --git a/src/libexpr/eval.cc b/src/libexpr/eval.cc index 87b1e73a5..a953e20d7 100644 --- a/src/libexpr/eval.cc +++ b/src/libexpr/eval.cc @@ -892,7 +892,7 @@ Value * EvalState::getBool(bool b) return b ? &Value::vTrue : &Value::vFalse; } -unsigned long nrThunks = 0; +static std::atomic nrThunks = 0; static inline void mkThunk(Value & v, Env & env, Expr * expr) { @@ -2940,18 +2940,18 @@ void EvalState::printStatistics() #endif }; topObj["envs"] = { - {"number", nrEnvs}, - {"elements", nrValuesInEnvs}, + {"number", nrEnvs.load()}, + {"elements", nrValuesInEnvs.load()}, {"bytes", bEnvs}, }; topObj["nrExprs"] = Expr::nrExprs; topObj["list"] = { - {"elements", nrListElems}, + {"elements", nrListElems.load()}, {"bytes", bLists}, - {"concats", nrListConcats}, + {"concats", nrListConcats.load()}, }; topObj["values"] = { - {"number", nrValues}, + {"number", nrValues.load()}, {"bytes", bValues}, }; topObj["symbols"] = { @@ -2959,9 +2959,9 @@ void EvalState::printStatistics() {"bytes", symbols.totalSize()}, }; topObj["sets"] = { - {"number", nrAttrsets}, + {"number", nrAttrsets.load()}, {"bytes", bAttrsets}, - {"elements", nrAttrsInAttrsets}, + {"elements", nrAttrsInAttrsets.load()}, }; topObj["sizes"] = { {"Env", sizeof(Env)}, @@ -2969,13 +2969,13 @@ void EvalState::printStatistics() {"Bindings", sizeof(Bindings)}, {"Attr", sizeof(Attr)}, }; - topObj["nrOpUpdates"] = nrOpUpdates; - topObj["nrOpUpdateValuesCopied"] = nrOpUpdateValuesCopied; - topObj["nrThunks"] = nrThunks; - topObj["nrAvoided"] = nrAvoided; - topObj["nrLookups"] = nrLookups; - topObj["nrPrimOpCalls"] = nrPrimOpCalls; - topObj["nrFunctionCalls"] = nrFunctionCalls; + topObj["nrOpUpdates"] = nrOpUpdates.load(); + topObj["nrOpUpdateValuesCopied"] = nrOpUpdateValuesCopied.load(); + topObj["nrThunks"] = nrThunks.load(); + topObj["nrAvoided"] = nrAvoided.load(); + topObj["nrLookups"] = nrLookups.load(); + topObj["nrPrimOpCalls"] = nrPrimOpCalls.load(); + topObj["nrFunctionCalls"] = nrFunctionCalls.load(); #if NIX_USE_BOEHMGC topObj["gc"] = { {"heapSize", heapSize}, diff --git a/src/libexpr/include/nix/expr/eval.hh b/src/libexpr/include/nix/expr/eval.hh index 8f7a0ec32..958b6fbee 100644 --- a/src/libexpr/include/nix/expr/eval.hh +++ b/src/libexpr/include/nix/expr/eval.hh @@ -961,19 +961,19 @@ private: */ std::string mkSingleDerivedPathStringRaw(const SingleDerivedPath & p); - unsigned long nrEnvs = 0; - unsigned long nrValuesInEnvs = 0; - unsigned long nrValues = 0; - unsigned long nrListElems = 0; - unsigned long nrLookups = 0; - unsigned long nrAttrsets = 0; - unsigned long nrAttrsInAttrsets = 0; - unsigned long nrAvoided = 0; - unsigned long nrOpUpdates = 0; - unsigned long nrOpUpdateValuesCopied = 0; - unsigned long nrListConcats = 0; - unsigned long nrPrimOpCalls = 0; - unsigned long nrFunctionCalls = 0; + std::atomic nrEnvs = 0; + std::atomic nrValuesInEnvs = 0; + std::atomic nrValues = 0; + std::atomic nrListElems = 0; + std::atomic nrLookups = 0; + std::atomic nrAttrsets = 0; + std::atomic nrAttrsInAttrsets = 0; + std::atomic nrAvoided = 0; + std::atomic nrOpUpdates = 0; + std::atomic nrOpUpdateValuesCopied = 0; + std::atomic nrListConcats = 0; + std::atomic nrPrimOpCalls = 0; + std::atomic nrFunctionCalls = 0; bool countCalls; From e8f951289fa44bc36ead0d51b283f09ecac9103b Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 25 Aug 2025 20:06:00 +0200 Subject: [PATCH 073/332] EvalState: Don't maintain stats by default These counters are extremely expensive in a multi-threaded program. For instance, disabling them speeds up evaluation of the NixOS/nix/2.21.2 from 32.6s to 17.8s. --- src/libexpr/eval.cc | 11 ++-- src/libexpr/include/nix/expr/counter.hh | 70 ++++++++++++++++++++++++ src/libexpr/include/nix/expr/eval.hh | 27 ++++----- src/libexpr/include/nix/expr/meson.build | 1 + src/libexpr/include/nix/expr/nixexpr.hh | 3 +- src/libexpr/nixexpr.cc | 2 +- 6 files changed, 94 insertions(+), 20 deletions(-) create mode 100644 src/libexpr/include/nix/expr/counter.hh diff --git a/src/libexpr/eval.cc b/src/libexpr/eval.cc index a953e20d7..0ec819809 100644 --- a/src/libexpr/eval.cc +++ b/src/libexpr/eval.cc @@ -287,6 +287,7 @@ EvalState::EvalState( assertGCInitialized(); static_assert(sizeof(Env) <= 16, "environment must be <= 16 bytes"); + static_assert(sizeof(Counter) == 64, "counters must be 64 bytes"); /* Construct the Nix expression search path. */ assert(lookupPath.elements.empty()); @@ -892,7 +893,7 @@ Value * EvalState::getBool(bool b) return b ? &Value::vTrue : &Value::vFalse; } -static std::atomic nrThunks = 0; +static Counter nrThunks; static inline void mkThunk(Value & v, Env & env, Expr * expr) { @@ -2891,11 +2892,11 @@ bool EvalState::fullGC() #endif } +bool Counter::enabled = getEnv("NIX_SHOW_STATS").value_or("0") != "0"; + void EvalState::maybePrintStats() { - bool showStats = getEnv("NIX_SHOW_STATS").value_or("0") != "0"; - - if (showStats) { + if (Counter::enabled) { // Make the final heap size more deterministic. #if NIX_USE_BOEHMGC if (!fullGC()) { @@ -2944,7 +2945,7 @@ void EvalState::printStatistics() {"elements", nrValuesInEnvs.load()}, {"bytes", bEnvs}, }; - topObj["nrExprs"] = Expr::nrExprs; + topObj["nrExprs"] = Expr::nrExprs.load(); topObj["list"] = { {"elements", nrListElems.load()}, {"bytes", bLists}, diff --git a/src/libexpr/include/nix/expr/counter.hh b/src/libexpr/include/nix/expr/counter.hh new file mode 100644 index 000000000..efbf23de3 --- /dev/null +++ b/src/libexpr/include/nix/expr/counter.hh @@ -0,0 +1,70 @@ +#pragma once + +#include +#include + +namespace nix { + +/** + * An atomic counter aligned on a cache line to prevent false sharing. + * The counter is only enabled when the `NIX_SHOW_STATS` environment + * variable is set. This is to prevent contention on these counters + * when multi-threaded evaluation is enabled. + */ +struct alignas(64) Counter +{ + using value_type = uint64_t; + + std::atomic inner{0}; + + static bool enabled; + + Counter() {} + + operator value_type() const noexcept + { + return inner; + } + + void operator=(value_type n) noexcept + { + inner = n; + } + + value_type load() const noexcept + { + return inner; + } + + value_type operator++() noexcept + { + return enabled ? ++inner : 0; + } + + value_type operator++(int) noexcept + { + return enabled ? inner++ : 0; + } + + value_type operator--() noexcept + { + return enabled ? --inner : 0; + } + + value_type operator--(int) noexcept + { + return enabled ? inner-- : 0; + } + + value_type operator+=(value_type n) noexcept + { + return enabled ? inner += n : 0; + } + + value_type operator-=(value_type n) noexcept + { + return enabled ? inner -= n : 0; + } +}; + +} // namespace nix diff --git a/src/libexpr/include/nix/expr/eval.hh b/src/libexpr/include/nix/expr/eval.hh index 958b6fbee..1c2552991 100644 --- a/src/libexpr/include/nix/expr/eval.hh +++ b/src/libexpr/include/nix/expr/eval.hh @@ -16,6 +16,7 @@ #include "nix/expr/search-path.hh" #include "nix/expr/repl-exit-status.hh" #include "nix/util/ref.hh" +#include "nix/expr/counter.hh" // For `NIX_USE_BOEHMGC`, and if that's set, `GC_THREADS` #include "nix/expr/config.hh" @@ -961,19 +962,19 @@ private: */ std::string mkSingleDerivedPathStringRaw(const SingleDerivedPath & p); - std::atomic nrEnvs = 0; - std::atomic nrValuesInEnvs = 0; - std::atomic nrValues = 0; - std::atomic nrListElems = 0; - std::atomic nrLookups = 0; - std::atomic nrAttrsets = 0; - std::atomic nrAttrsInAttrsets = 0; - std::atomic nrAvoided = 0; - std::atomic nrOpUpdates = 0; - std::atomic nrOpUpdateValuesCopied = 0; - std::atomic nrListConcats = 0; - std::atomic nrPrimOpCalls = 0; - std::atomic nrFunctionCalls = 0; + Counter nrEnvs; + Counter nrValuesInEnvs; + Counter nrValues; + Counter nrListElems; + Counter nrLookups; + Counter nrAttrsets; + Counter nrAttrsInAttrsets; + Counter nrAvoided; + Counter nrOpUpdates; + Counter nrOpUpdateValuesCopied; + Counter nrListConcats; + Counter nrPrimOpCalls; + Counter nrFunctionCalls; bool countCalls; diff --git a/src/libexpr/include/nix/expr/meson.build b/src/libexpr/include/nix/expr/meson.build index 04f8eaf71..44ff171c2 100644 --- a/src/libexpr/include/nix/expr/meson.build +++ b/src/libexpr/include/nix/expr/meson.build @@ -10,6 +10,7 @@ config_pub_h = configure_file( headers = [ config_pub_h ] + files( 'attr-path.hh', 'attr-set.hh', + 'counter.hh', 'eval-cache.hh', 'eval-error.hh', 'eval-gc.hh', diff --git a/src/libexpr/include/nix/expr/nixexpr.hh b/src/libexpr/include/nix/expr/nixexpr.hh index 7721918c3..e0203c732 100644 --- a/src/libexpr/include/nix/expr/nixexpr.hh +++ b/src/libexpr/include/nix/expr/nixexpr.hh @@ -9,6 +9,7 @@ #include "nix/expr/symbol-table.hh" #include "nix/expr/eval-error.hh" #include "nix/util/pos-idx.hh" +#include "nix/expr/counter.hh" namespace nix { @@ -92,7 +93,7 @@ struct Expr Symbol sub, lessThan, mul, div, or_, findFile, nixPath, body; }; - static unsigned long nrExprs; + static Counter nrExprs; Expr() { diff --git a/src/libexpr/nixexpr.cc b/src/libexpr/nixexpr.cc index c0a25d1d4..43e85cb16 100644 --- a/src/libexpr/nixexpr.cc +++ b/src/libexpr/nixexpr.cc @@ -11,7 +11,7 @@ namespace nix { -unsigned long Expr::nrExprs = 0; +Counter Expr::nrExprs; ExprBlackHole eBlackHole; From a08ae1d024ab32e014e847ae7f70a661e7e380a3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rg=20Thalheim?= Date: Thu, 25 Sep 2025 08:45:27 +0200 Subject: [PATCH 074/332] doc: Add release notes for C API lazy accessors --- doc/manual/rl-next/c-api-lazy-accessors.md | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) create mode 100644 doc/manual/rl-next/c-api-lazy-accessors.md diff --git a/doc/manual/rl-next/c-api-lazy-accessors.md b/doc/manual/rl-next/c-api-lazy-accessors.md new file mode 100644 index 000000000..bd0604f0d --- /dev/null +++ b/doc/manual/rl-next/c-api-lazy-accessors.md @@ -0,0 +1,16 @@ +--- +synopsis: "C API: Add lazy attribute and list item accessors" +prs: [14030] +--- + +The C API now includes lazy accessor functions for retrieving values from lists and attribute sets without forcing evaluation: + +- `nix_get_list_byidx_lazy()` - Get a list element without forcing its evaluation +- `nix_get_attr_byname_lazy()` - Get an attribute value by name without forcing evaluation +- `nix_get_attr_byidx_lazy()` - Get an attribute by index without forcing evaluation + +These functions are useful when forwarding unevaluated sub-values to other lists, attribute sets, or function calls. They allow more efficient handling of Nix values by deferring evaluation until actually needed. + +Additionally, bounds checking has been improved for all `_byidx` functions to properly validate indices before access, preventing potential out-of-bounds errors. + +The documentation for `NIX_ERR_KEY` error handling has also been clarified to specify when this error code is returned. \ No newline at end of file From 6e2c11e296a6b52c33c8d276796c568460889ef8 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Wed, 24 Sep 2025 19:15:53 -0700 Subject: [PATCH 075/332] shellcheck fix functional/dump-db.sh Add back the path variable --- tests/functional/dump-db.sh | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/tests/functional/dump-db.sh b/tests/functional/dump-db.sh index 14181b4b6..70d79e9fb 100755 --- a/tests/functional/dump-db.sh +++ b/tests/functional/dump-db.sh @@ -8,19 +8,18 @@ needLocalStore "--dump-db requires a local store" clearStore -path=$(nix-build dependencies.nix -o $TEST_ROOT/result) +nix-build dependencies.nix -o "$TEST_ROOT"/result +deps="$(nix-store -qR "$TEST_ROOT"/result)" -deps="$(nix-store -qR $TEST_ROOT/result)" +nix-store --dump-db > "$TEST_ROOT"/dump -nix-store --dump-db > $TEST_ROOT/dump +rm -rf "$NIX_STATE_DIR"/db -rm -rf $NIX_STATE_DIR/db +nix-store --load-db < "$TEST_ROOT"/dump -nix-store --load-db < $TEST_ROOT/dump - -deps2="$(nix-store -qR $TEST_ROOT/result)" +deps2="$(nix-store -qR "$TEST_ROOT"/result)" [ "$deps" = "$deps2" ]; -nix-store --dump-db > $TEST_ROOT/dump2 -cmp $TEST_ROOT/dump $TEST_ROOT/dump2 +nix-store --dump-db > "$TEST_ROOT"/dump2 +cmp "$TEST_ROOT"/dump "$TEST_ROOT"/dump2 From 74305d52606d21baeb4e07946b6e84fac87a6c52 Mon Sep 17 00:00:00 2001 From: Seth Flynn Date: Mon, 22 Sep 2025 05:59:11 -0400 Subject: [PATCH 076/332] libfetchers: avoid re-copying substituted inputs Previously, Nix would not create a cache entry for substituted/cached inputs This led to severe slowdowns in some scenarios where a large input (like Nixpkgs) had already been unpacked to the store but didn't exist in a users cache, as described in https://github.com/NixOS/nix/issues/11228 Using the same method as https://github.com/NixOS/nix/pull/12911, we can create a cache entry for the fingerprint of substituted/cached inputs and avoid this problem entirely --- doc/manual/rl-next/cached-substituted-inputs.md | 10 ++++++++++ src/libfetchers/fetchers.cc | 10 ++++++++++ 2 files changed, 20 insertions(+) create mode 100644 doc/manual/rl-next/cached-substituted-inputs.md diff --git a/doc/manual/rl-next/cached-substituted-inputs.md b/doc/manual/rl-next/cached-substituted-inputs.md new file mode 100644 index 000000000..b0b53a213 --- /dev/null +++ b/doc/manual/rl-next/cached-substituted-inputs.md @@ -0,0 +1,10 @@ +--- +synopsis: "Substituted flake inputs are no longer re-copied to the store" +prs: [14041] +--- + +Since 2.25, Nix would fail to store a cache entry for substituted flake inputs, +which in turn would cause them to be re-copied to the store on initial +evaluation. Caching these inputs results in a near doubling of a performance in +some cases — especially on I/O-bound machines and when using commands that +fetch many inputs, like `nix flake archive/prefetch-inputs` diff --git a/src/libfetchers/fetchers.cc b/src/libfetchers/fetchers.cc index 54013bf55..b056c137d 100644 --- a/src/libfetchers/fetchers.cc +++ b/src/libfetchers/fetchers.cc @@ -5,6 +5,7 @@ #include "nix/util/json-utils.hh" #include "nix/fetchers/store-path-accessor.hh" #include "nix/fetchers/fetch-settings.hh" +#include "nix/fetchers/fetch-to-store.hh" #include @@ -336,6 +337,15 @@ std::pair, Input> Input::getAccessorUnchecked(ref sto accessor->fingerprint = getFingerprint(store); + // Store a cache entry for the substituted tree so later fetches + // can reuse the existing nar instead of copying the unpacked + // input back into the store on every evaluation. + if (accessor->fingerprint) { + ContentAddressMethod method = ContentAddressMethod::Raw::NixArchive; + auto cacheKey = makeFetchToStoreCacheKey(getName(), *accessor->fingerprint, method, "/"); + settings->getCache()->upsert(cacheKey, *store, {}, storePath); + } + accessor->setPathDisplay("«" + to_string() + "»"); return {accessor, *this}; From 55c7ef9d40f1c473034701810ac43b398a9492eb Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 22 Sep 2025 22:43:12 +0200 Subject: [PATCH 077/332] SourceAccessor: Make lstat() virtual With FilteringSourceAccessor, lstat() needs to throw a different exception if the path is inaccessible than if it doesn't exist. --- src/libexpr/eval.cc | 10 ++++++++++ src/libfetchers/filtering-source-accessor.cc | 13 ++++++++++++- .../nix/fetchers/filtering-source-accessor.hh | 4 ++++ src/libutil/include/nix/util/source-accessor.hh | 2 +- src/libutil/mounted-source-accessor.cc | 6 ++++++ 5 files changed, 33 insertions(+), 2 deletions(-) diff --git a/src/libexpr/eval.cc b/src/libexpr/eval.cc index f82fd93b5..4db598871 100644 --- a/src/libexpr/eval.cc +++ b/src/libexpr/eval.cc @@ -3127,6 +3127,11 @@ SourcePath EvalState::findFile(const LookupPath & lookupPath, const std::string_ auto res = (r / CanonPath(suffix)).resolveSymlinks(); if (res.pathExists()) return res; + + // Backward compatibility hack: throw an exception if access + // to this path is not allowed. + if (auto accessor = res.accessor.dynamic_pointer_cast()) + accessor->checkAccess(res.path); } if (hasPrefix(path, "nix/")) @@ -3193,6 +3198,11 @@ std::optional EvalState::resolveLookupPathPath(const LookupPath::Pat if (path.resolveSymlinks().pathExists()) return finish(std::move(path)); else { + // Backward compatibility hack: throw an exception if access + // to this path is not allowed. + if (auto accessor = path.accessor.dynamic_pointer_cast()) + accessor->checkAccess(path.path); + logWarning({.msg = HintFmt("Nix search path entry '%1%' does not exist, ignoring", value)}); } } diff --git a/src/libfetchers/filtering-source-accessor.cc b/src/libfetchers/filtering-source-accessor.cc index a99ecacef..5a3a0f07b 100644 --- a/src/libfetchers/filtering-source-accessor.cc +++ b/src/libfetchers/filtering-source-accessor.cc @@ -16,15 +16,26 @@ std::string FilteringSourceAccessor::readFile(const CanonPath & path) return next->readFile(prefix / path); } +void FilteringSourceAccessor::readFile(const CanonPath & path, Sink & sink, std::function sizeCallback) +{ + checkAccess(path); + return next->readFile(prefix / path, sink, sizeCallback); +} + bool FilteringSourceAccessor::pathExists(const CanonPath & path) { return isAllowed(path) && next->pathExists(prefix / path); } std::optional FilteringSourceAccessor::maybeLstat(const CanonPath & path) +{ + return isAllowed(path) ? next->maybeLstat(prefix / path) : std::nullopt; +} + +SourceAccessor::Stat FilteringSourceAccessor::lstat(const CanonPath & path) { checkAccess(path); - return next->maybeLstat(prefix / path); + return next->lstat(prefix / path); } SourceAccessor::DirEntries FilteringSourceAccessor::readDirectory(const CanonPath & path) diff --git a/src/libfetchers/include/nix/fetchers/filtering-source-accessor.hh b/src/libfetchers/include/nix/fetchers/filtering-source-accessor.hh index f8a57bfb3..1c2fd60b0 100644 --- a/src/libfetchers/include/nix/fetchers/filtering-source-accessor.hh +++ b/src/libfetchers/include/nix/fetchers/filtering-source-accessor.hh @@ -36,8 +36,12 @@ struct FilteringSourceAccessor : SourceAccessor std::string readFile(const CanonPath & path) override; + void readFile(const CanonPath & path, Sink & sink, std::function sizeCallback) override; + bool pathExists(const CanonPath & path) override; + Stat lstat(const CanonPath & path) override; + std::optional maybeLstat(const CanonPath & path) override; DirEntries readDirectory(const CanonPath & path) override; diff --git a/src/libutil/include/nix/util/source-accessor.hh b/src/libutil/include/nix/util/source-accessor.hh index 7419ef392..e57b85411 100644 --- a/src/libutil/include/nix/util/source-accessor.hh +++ b/src/libutil/include/nix/util/source-accessor.hh @@ -121,7 +121,7 @@ struct SourceAccessor : std::enable_shared_from_this std::string typeString(); }; - Stat lstat(const CanonPath & path); + virtual Stat lstat(const CanonPath & path); virtual std::optional maybeLstat(const CanonPath & path) = 0; diff --git a/src/libutil/mounted-source-accessor.cc b/src/libutil/mounted-source-accessor.cc index 5c0ecc1ff..cd7e3d496 100644 --- a/src/libutil/mounted-source-accessor.cc +++ b/src/libutil/mounted-source-accessor.cc @@ -27,6 +27,12 @@ struct MountedSourceAccessorImpl : MountedSourceAccessor return accessor->readFile(subpath); } + Stat lstat(const CanonPath & path) override + { + auto [accessor, subpath] = resolve(path); + return accessor->lstat(subpath); + } + std::optional maybeLstat(const CanonPath & path) override { auto [accessor, subpath] = resolve(path); From 28d11c5bcc9930ec20293d672c90585d1dbc1557 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 22 Sep 2025 22:52:18 +0200 Subject: [PATCH 078/332] Add SourceAccessor::getFingerprint() This returns the fingerprint for a specific subpath. This is intended for "composite" accessors like MountedSourceAccessor, where different subdirectories can have different fingerprints. --- src/libfetchers/filtering-source-accessor.cc | 7 +++++++ .../nix/fetchers/filtering-source-accessor.hh | 2 ++ .../include/nix/util/source-accessor.hh | 21 +++++++++++++++++++ src/libutil/mounted-source-accessor.cc | 8 +++++++ src/libutil/union-source-accessor.cc | 12 +++++++++++ 5 files changed, 50 insertions(+) diff --git a/src/libfetchers/filtering-source-accessor.cc b/src/libfetchers/filtering-source-accessor.cc index 5a3a0f07b..8f1b50eb9 100644 --- a/src/libfetchers/filtering-source-accessor.cc +++ b/src/libfetchers/filtering-source-accessor.cc @@ -60,6 +60,13 @@ std::string FilteringSourceAccessor::showPath(const CanonPath & path) return displayPrefix + next->showPath(prefix / path) + displaySuffix; } +std::pair> FilteringSourceAccessor::getFingerprint(const CanonPath & path) +{ + if (fingerprint) + return {path, fingerprint}; + return next->getFingerprint(prefix / path); +} + void FilteringSourceAccessor::checkAccess(const CanonPath & path) { if (!isAllowed(path)) diff --git a/src/libfetchers/include/nix/fetchers/filtering-source-accessor.hh b/src/libfetchers/include/nix/fetchers/filtering-source-accessor.hh index 1c2fd60b0..5e98caa58 100644 --- a/src/libfetchers/include/nix/fetchers/filtering-source-accessor.hh +++ b/src/libfetchers/include/nix/fetchers/filtering-source-accessor.hh @@ -50,6 +50,8 @@ struct FilteringSourceAccessor : SourceAccessor std::string showPath(const CanonPath & path) override; + std::pair> getFingerprint(const CanonPath & path) override; + /** * Call `makeNotAllowedError` to throw a `RestrictedPathError` * exception if `isAllowed()` returns `false` for `path`. diff --git a/src/libutil/include/nix/util/source-accessor.hh b/src/libutil/include/nix/util/source-accessor.hh index e57b85411..671444e6f 100644 --- a/src/libutil/include/nix/util/source-accessor.hh +++ b/src/libutil/include/nix/util/source-accessor.hh @@ -180,6 +180,27 @@ struct SourceAccessor : std::enable_shared_from_this */ std::optional fingerprint; + /** + * Return the fingerprint for `path`. This is usually the + * fingerprint of the current accessor, but for composite + * accessors (like `MountedSourceAccessor`), we want to return the + * fingerprint of the "inner" accessor if the current one lacks a + * fingerprint. + * + * So this method is intended to return the most-outer accessor + * that has a fingerprint for `path`. It also returns the path that `path` + * corresponds to in that accessor. + * + * For example: in a `MountedSourceAccessor` that has + * `/nix/store/foo` mounted, + * `getFingerprint("/nix/store/foo/bar")` will return the path + * `/bar` and the fingerprint of the `/nix/store/foo` accessor. + */ + virtual std::pair> getFingerprint(const CanonPath & path) + { + return {path, fingerprint}; + } + /** * Return the maximum last-modified time of the files in this * tree, if available. diff --git a/src/libutil/mounted-source-accessor.cc b/src/libutil/mounted-source-accessor.cc index cd7e3d496..d9398045c 100644 --- a/src/libutil/mounted-source-accessor.cc +++ b/src/libutil/mounted-source-accessor.cc @@ -91,6 +91,14 @@ struct MountedSourceAccessorImpl : MountedSourceAccessor else return nullptr; } + + std::pair> getFingerprint(const CanonPath & path) override + { + if (fingerprint) + return {path, fingerprint}; + auto [accessor, subpath] = resolve(path); + return accessor->getFingerprint(subpath); + } }; ref makeMountedSourceAccessor(std::map> mounts) diff --git a/src/libutil/union-source-accessor.cc b/src/libutil/union-source-accessor.cc index 96b6a643a..e3b39f14e 100644 --- a/src/libutil/union-source-accessor.cc +++ b/src/libutil/union-source-accessor.cc @@ -72,6 +72,18 @@ struct UnionSourceAccessor : SourceAccessor } return std::nullopt; } + + std::pair> getFingerprint(const CanonPath & path) override + { + if (fingerprint) + return {path, fingerprint}; + for (auto & accessor : accessors) { + auto [subpath, fingerprint] = accessor->getFingerprint(path); + if (fingerprint) + return {subpath, fingerprint}; + } + return {path, std::nullopt}; + } }; ref makeUnionSourceAccessor(std::vector> && accessors) From 3450a72ba02ccd5311cfc75b0e02c4d773013794 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 16 Jun 2025 18:16:30 +0200 Subject: [PATCH 079/332] Git fetcher: Make dirty repos with no commits cacheable --- src/libfetchers/git.cc | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/libfetchers/git.cc b/src/libfetchers/git.cc index f6f5c30ee..7c1630167 100644 --- a/src/libfetchers/git.cc +++ b/src/libfetchers/git.cc @@ -893,8 +893,7 @@ struct GitInputScheme : InputScheme return makeFingerprint(*rev); else { auto repoInfo = getRepoInfo(input); - if (auto repoPath = repoInfo.getPath(); - repoPath && repoInfo.workdirInfo.headRev && repoInfo.workdirInfo.submodules.empty()) { + if (auto repoPath = repoInfo.getPath(); repoPath && repoInfo.workdirInfo.submodules.empty()) { /* Calculate a fingerprint that takes into account the deleted and modified/added files. */ HashSink hashSink{HashAlgorithm::SHA512}; @@ -907,7 +906,7 @@ struct GitInputScheme : InputScheme writeString("deleted:", hashSink); writeString(file.abs(), hashSink); } - return makeFingerprint(*repoInfo.workdirInfo.headRev) + return makeFingerprint(repoInfo.workdirInfo.headRev.value_or(nullRev)) + ";d=" + hashSink.finish().hash.to_string(HashFormat::Base16, false); } return std::nullopt; From ec6d5c7de3b3701a74cbc16515813cab7c7ef580 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 22 Sep 2025 23:02:02 +0200 Subject: [PATCH 080/332] Path fetcher: Simplify fingerprint computation --- src/libfetchers/path.cc | 42 +++++++++++++++-------------------------- 1 file changed, 15 insertions(+), 27 deletions(-) diff --git a/src/libfetchers/path.cc b/src/libfetchers/path.cc index 3c4b9c06d..aa0411ff9 100644 --- a/src/libfetchers/path.cc +++ b/src/libfetchers/path.cc @@ -123,8 +123,6 @@ struct PathInputScheme : InputScheme auto absPath = getAbsPath(input); - Activity act(*logger, lvlTalkative, actUnknown, fmt("copying %s to the store", absPath)); - // FIXME: check whether access to 'path' is allowed. auto storePath = store->maybeParseStorePath(absPath.string()); @@ -133,43 +131,33 @@ struct PathInputScheme : InputScheme time_t mtime = 0; if (!storePath || storePath->name() != "source" || !store->isValidPath(*storePath)) { + Activity act(*logger, lvlTalkative, actUnknown, fmt("copying %s to the store", absPath)); // FIXME: try to substitute storePath. auto src = sinkToSource( [&](Sink & sink) { mtime = dumpPathAndGetMtime(absPath.string(), sink, defaultPathFilter); }); storePath = store->addToStoreFromDump(*src, "source"); } - // To avoid copying the path again to the /nix/store, we need to add a cache entry. - ContentAddressMethod method = ContentAddressMethod::Raw::NixArchive; - auto fp = getFingerprint(store, input); - if (fp) { - auto cacheKey = makeFetchToStoreCacheKey(input.getName(), *fp, method, "/"); - input.settings->getCache()->upsert(cacheKey, *store, {}, *storePath); - } + auto accessor = ref{store->getFSAccessor(*storePath)}; + + // To prevent `fetchToStore()` copying the path again to Nix + // store, pre-create an entry in the fetcher cache. + auto info = store->queryPathInfo(*storePath); + accessor->fingerprint = + fmt("path:%s", store->queryPathInfo(*storePath)->narHash.to_string(HashFormat::SRI, true)); + input.settings->getCache()->upsert( + makeFetchToStoreCacheKey( + input.getName(), *accessor->fingerprint, ContentAddressMethod::Raw::NixArchive, "/"), + *store, + {}, + *storePath); /* Trust the lastModified value supplied by the user, if any. It's not a "secure" attribute so we don't care. */ if (!input.getLastModified()) input.attrs.insert_or_assign("lastModified", uint64_t(mtime)); - return {ref{store->getFSAccessor(*storePath)}, std::move(input)}; - } - - std::optional getFingerprint(ref store, const Input & input) const override - { - if (isRelative(input)) - return std::nullopt; - - /* If this path is in the Nix store, use the hash of the - store object and the subpath. */ - auto path = getAbsPath(input); - try { - auto [storePath, subPath] = store->toStorePath(path.string()); - auto info = store->queryPathInfo(storePath); - return fmt("path:%s:%s", info->narHash.to_string(HashFormat::Base16, false), subPath); - } catch (Error &) { - return std::nullopt; - } + return {accessor, std::move(input)}; } std::optional experimentalFeature() const override From 1d130492d743345715107d24f0204fda19896db1 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 22 Sep 2025 23:04:58 +0200 Subject: [PATCH 081/332] Mount inputs on storeFS to restore fetchToStore() caching fetchToStore() caching was broken because it uses the fingerprint of the accessor, but now that the accessor (typically storeFS) is a composite (like MountedSourceAccessor or AllowListSourceAccessor), there was no fingerprint anymore. So fetchToStore now uses the new getFingerprint() method to get the specific fingerprint for the subpath. --- src/libexpr/eval.cc | 27 ++++++-------- src/libexpr/include/nix/expr/eval.hh | 6 ++++ src/libexpr/paths.cc | 25 +++++++++++++ src/libexpr/primops/fetchTree.cc | 7 ++-- src/libfetchers/fetch-to-store.cc | 11 ++++-- src/libfetchers/fetchers.cc | 6 ++-- src/libflake/flake.cc | 35 ++++++------------- .../lang/eval-fail-hashfile-missing.err.exp | 2 +- 8 files changed, 69 insertions(+), 50 deletions(-) diff --git a/src/libexpr/eval.cc b/src/libexpr/eval.cc index 4db598871..98219fb17 100644 --- a/src/libexpr/eval.cc +++ b/src/libexpr/eval.cc @@ -227,24 +227,17 @@ EvalState::EvalState( {CanonPath(store->storeDir), store->getFSAccessor(settings.pureEval)}, })) , rootFS([&] { - auto accessor = [&]() -> decltype(rootFS) { - /* In pure eval mode, we provide a filesystem that only - contains the Nix store. */ - if (settings.pureEval) - return storeFS; + /* In pure eval mode, we provide a filesystem that only + contains the Nix store. - /* If we have a chroot store and pure eval is not enabled, - use a union accessor to make the chroot store available - at its logical location while still having the underlying - directory available. This is necessary for instance if - we're evaluating a file from the physical /nix/store - while using a chroot store. */ - auto realStoreDir = dirOf(store->toRealPath(StorePath::dummy)); - if (store->storeDir != realStoreDir) - return makeUnionSourceAccessor({getFSSourceAccessor(), storeFS}); - - return getFSSourceAccessor(); - }(); + Otherwise, use a union accessor to make the augmented store + available at its logical location while still having the + underlying directory available. This is necessary for + instance if we're evaluating a file from the physical + /nix/store while using a chroot store, and also for lazy + mounted fetchTree. */ + auto accessor = settings.pureEval ? storeFS.cast() + : makeUnionSourceAccessor({getFSSourceAccessor(), storeFS}); /* Apply access control if needed. */ if (settings.restrictEval || settings.pureEval) diff --git a/src/libexpr/include/nix/expr/eval.hh b/src/libexpr/include/nix/expr/eval.hh index e5b87cc97..c56836076 100644 --- a/src/libexpr/include/nix/expr/eval.hh +++ b/src/libexpr/include/nix/expr/eval.hh @@ -42,6 +42,7 @@ class Store; namespace fetchers { struct Settings; struct InputCache; +struct Input; } // namespace fetchers struct EvalSettings; class EvalState; @@ -514,6 +515,11 @@ public: void checkURI(const std::string & uri); + /** + * Mount an input on the Nix store. + */ + StorePath mountInput(fetchers::Input & input, const fetchers::Input & originalInput, ref accessor); + /** * Parse a Nix expression from the specified file. */ diff --git a/src/libexpr/paths.cc b/src/libexpr/paths.cc index f90bc37df..8622ab208 100644 --- a/src/libexpr/paths.cc +++ b/src/libexpr/paths.cc @@ -1,5 +1,7 @@ #include "nix/store/store-api.hh" #include "nix/expr/eval.hh" +#include "nix/util/mounted-source-accessor.hh" +#include "nix/fetchers/fetch-to-store.hh" namespace nix { @@ -18,4 +20,27 @@ SourcePath EvalState::storePath(const StorePath & path) return {rootFS, CanonPath{store->printStorePath(path)}}; } +StorePath +EvalState::mountInput(fetchers::Input & input, const fetchers::Input & originalInput, ref accessor) +{ + auto storePath = fetchToStore(fetchSettings, *store, accessor, FetchMode::Copy, input.getName()); + + allowPath(storePath); // FIXME: should just whitelist the entire virtual store + + storeFS->mount(CanonPath(store->printStorePath(storePath)), accessor); + + auto narHash = store->queryPathInfo(storePath)->narHash; + input.attrs.insert_or_assign("narHash", narHash.to_string(HashFormat::SRI, true)); + + if (originalInput.getNarHash() && narHash != *originalInput.getNarHash()) + throw Error( + (unsigned int) 102, + "NAR hash mismatch in input '%s', expected '%s' but got '%s'", + originalInput.to_string(), + narHash.to_string(HashFormat::SRI, true), + originalInput.getNarHash()->to_string(HashFormat::SRI, true)); + + return storePath; +} + } // namespace nix diff --git a/src/libexpr/primops/fetchTree.cc b/src/libexpr/primops/fetchTree.cc index e673e55a0..e76e39f7d 100644 --- a/src/libexpr/primops/fetchTree.cc +++ b/src/libexpr/primops/fetchTree.cc @@ -10,6 +10,7 @@ #include "nix/util/url.hh" #include "nix/expr/value-to-json.hh" #include "nix/fetchers/fetch-to-store.hh" +#include "nix/fetchers/input-cache.hh" #include @@ -218,11 +219,11 @@ static void fetchTree( throw Error("input '%s' is not allowed to use the '__final' attribute", input.to_string()); } - auto [storePath, input2] = input.fetchToStore(state.store); + auto cachedInput = state.inputCache->getAccessor(state.store, input, fetchers::UseRegistries::No); - state.allowPath(storePath); + auto storePath = state.mountInput(cachedInput.lockedInput, input, cachedInput.accessor); - emitTreeAttrs(state, storePath, input2, v, params.emptyRevFallback, false); + emitTreeAttrs(state, storePath, cachedInput.lockedInput, v, params.emptyRevFallback, false); } static void prim_fetchTree(EvalState & state, const PosIdx pos, Value ** args, Value & v) diff --git a/src/libfetchers/fetch-to-store.cc b/src/libfetchers/fetch-to-store.cc index 6ce78e115..5961379ee 100644 --- a/src/libfetchers/fetch-to-store.cc +++ b/src/libfetchers/fetch-to-store.cc @@ -27,14 +27,19 @@ StorePath fetchToStore( std::optional cacheKey; - if (!filter && path.accessor->fingerprint) { - cacheKey = makeFetchToStoreCacheKey(std::string{name}, *path.accessor->fingerprint, method, path.path.abs()); + auto [subpath, fingerprint] = filter ? std::pair>{path.path, std::nullopt} + : path.accessor->getFingerprint(path.path); + + if (fingerprint) { + cacheKey = makeFetchToStoreCacheKey(std::string{name}, *fingerprint, method, subpath.abs()); if (auto res = settings.getCache()->lookupStorePath(*cacheKey, store)) { debug("store path cache hit for '%s'", path); return res->storePath; } - } else + } else { + // FIXME: could still provide in-memory caching keyed on `SourcePath`. debug("source path '%s' is uncacheable", path); + } Activity act( *logger, diff --git a/src/libfetchers/fetchers.cc b/src/libfetchers/fetchers.cc index 045aafdcb..f697ec6f5 100644 --- a/src/libfetchers/fetchers.cc +++ b/src/libfetchers/fetchers.cc @@ -356,8 +356,10 @@ std::pair, Input> Input::getAccessorUnchecked(ref sto auto [accessor, result] = scheme->getAccessor(store, *this); - assert(!accessor->fingerprint); - accessor->fingerprint = result.getFingerprint(store); + if (!accessor->fingerprint) + accessor->fingerprint = result.getFingerprint(store); + else + result.cachedFingerprint = accessor->fingerprint; return {accessor, std::move(result)}; } diff --git a/src/libflake/flake.cc b/src/libflake/flake.cc index 3acf589a5..486118963 100644 --- a/src/libflake/flake.cc +++ b/src/libflake/flake.cc @@ -24,21 +24,6 @@ using namespace flake; namespace flake { -static StorePath copyInputToStore( - EvalState & state, fetchers::Input & input, const fetchers::Input & originalInput, ref accessor) -{ - auto storePath = fetchToStore(*input.settings, *state.store, accessor, FetchMode::Copy, input.getName()); - - state.allowPath(storePath); - - auto narHash = state.store->queryPathInfo(storePath)->narHash; - input.attrs.insert_or_assign("narHash", narHash.to_string(HashFormat::SRI, true)); - - assert(!originalInput.getNarHash() || storePath == originalInput.computeStorePath(*state.store)); - - return storePath; -} - static void forceTrivialValue(EvalState & state, Value & value, const PosIdx pos) { if (value.isThunk() && value.isTrivial()) @@ -360,11 +345,14 @@ static Flake getFlake( lockedRef = FlakeRef(std::move(cachedInput2.lockedInput), newLockedRef.subdir); } - // Copy the tree to the store. - auto storePath = copyInputToStore(state, lockedRef.input, originalRef.input, cachedInput.accessor); - // Re-parse flake.nix from the store. - return readFlake(state, originalRef, resolvedRef, lockedRef, state.storePath(storePath), lockRootAttrPath); + return readFlake( + state, + originalRef, + resolvedRef, + lockedRef, + state.storePath(state.mountInput(lockedRef.input, originalRef.input, cachedInput.accessor)), + lockRootAttrPath); } Flake getFlake(EvalState & state, const FlakeRef & originalRef, fetchers::UseRegistries useRegistries) @@ -721,11 +709,10 @@ lockFlake(const Settings & settings, EvalState & state, const FlakeRef & topRef, auto lockedRef = FlakeRef(std::move(cachedInput.lockedInput), input.ref->subdir); - // FIXME: allow input to be lazy. - auto storePath = copyInputToStore( - state, lockedRef.input, input.ref->input, cachedInput.accessor); - - return {state.storePath(storePath), lockedRef}; + return { + state.storePath( + state.mountInput(lockedRef.input, input.ref->input, cachedInput.accessor)), + lockedRef}; } }(); diff --git a/tests/functional/lang/eval-fail-hashfile-missing.err.exp b/tests/functional/lang/eval-fail-hashfile-missing.err.exp index 0d3747a6d..901dea2b5 100644 --- a/tests/functional/lang/eval-fail-hashfile-missing.err.exp +++ b/tests/functional/lang/eval-fail-hashfile-missing.err.exp @@ -10,4 +10,4 @@ error: … while calling the 'hashFile' builtin - error: opening file '/pwd/lang/this-file-is-definitely-not-there-7392097': No such file or directory + error: path '/pwd/lang/this-file-is-definitely-not-there-7392097' does not exist From 4b9735b761047d6cb606229919fc3d71468fb241 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 22 Sep 2025 23:09:47 +0200 Subject: [PATCH 082/332] Test against uncacheable paths This is to test the non-functional property that most paths should be cacheable. We've had frequent cases where caching broken but we didn't notice. --- src/libfetchers/fetch-to-store.cc | 4 ++++ tests/functional/flakes/common.sh | 2 ++ tests/functional/flakes/flake-in-submodule.sh | 6 +++--- tests/functional/flakes/follow-paths.sh | 2 +- tests/functional/flakes/mercurial.sh | 4 ++-- tests/functional/flakes/non-flake-inputs.sh | 9 +++++---- tests/functional/flakes/relative-paths-lockfile.sh | 2 ++ 7 files changed, 19 insertions(+), 10 deletions(-) diff --git a/src/libfetchers/fetch-to-store.cc b/src/libfetchers/fetch-to-store.cc index 5961379ee..b1e8b9d72 100644 --- a/src/libfetchers/fetch-to-store.cc +++ b/src/libfetchers/fetch-to-store.cc @@ -1,6 +1,7 @@ #include "nix/fetchers/fetch-to-store.hh" #include "nix/fetchers/fetchers.hh" #include "nix/fetchers/fetch-settings.hh" +#include "nix/util/environment-variables.hh" namespace nix { @@ -37,6 +38,9 @@ StorePath fetchToStore( return res->storePath; } } else { + static auto barf = getEnv("_NIX_TEST_BARF_ON_UNCACHEABLE").value_or("") == "1"; + if (barf && !filter) + throw Error("source path '%s' is uncacheable (filter=%d)", path, (bool) filter); // FIXME: could still provide in-memory caching keyed on `SourcePath`. debug("source path '%s' is uncacheable", path); } diff --git a/tests/functional/flakes/common.sh b/tests/functional/flakes/common.sh index 422cab96c..77bc03060 100644 --- a/tests/functional/flakes/common.sh +++ b/tests/functional/flakes/common.sh @@ -2,6 +2,8 @@ source ../common.sh +export _NIX_TEST_BARF_ON_UNCACHEABLE=1 + # shellcheck disable=SC2034 # this variable is used by tests that source this file registry=$TEST_ROOT/registry.json diff --git a/tests/functional/flakes/flake-in-submodule.sh b/tests/functional/flakes/flake-in-submodule.sh index fe5acf26d..a7d86698d 100755 --- a/tests/functional/flakes/flake-in-submodule.sh +++ b/tests/functional/flakes/flake-in-submodule.sh @@ -62,8 +62,8 @@ flakeref=git+file://$rootRepo\?submodules=1\&dir=submodule # Check that dirtying a submodule makes the entire thing dirty. [[ $(nix flake metadata --json "$flakeref" | jq -r .locked.rev) != null ]] echo '"foo"' > "$rootRepo"/submodule/sub.nix -[[ $(nix eval --json "$flakeref#sub" ) = '"foo"' ]] -[[ $(nix flake metadata --json "$flakeref" | jq -r .locked.rev) = null ]] +[[ $(_NIX_TEST_BARF_ON_UNCACHEABLE='' nix eval --json "$flakeref#sub" ) = '"foo"' ]] +[[ $(_NIX_TEST_BARF_ON_UNCACHEABLE='' nix flake metadata --json "$flakeref" | jq -r .locked.rev) = null ]] # Test that `nix flake metadata` parses `submodule` correctly. cat > "$rootRepo"/flake.nix <&1 | grep '/flakeB.*is forbidden in pure evaluation mode' -expect 1 nix flake lock --impure $flakeFollowsA 2>&1 | grep '/flakeB.*does not exist' +expect 1 nix flake lock --impure $flakeFollowsA 2>&1 | grep "'flakeB' is too short to be a valid store path" # Test relative non-flake inputs. cat > $flakeFollowsA/flake.nix < Date: Thu, 25 Sep 2025 10:29:27 -0700 Subject: [PATCH 083/332] shellcheck fix: functional/dyn-drv/eval-outputOf.sh --- maintainers/flake-module.nix | 1 - 1 file changed, 1 deletion(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 5f8a80a23..54f53fb6a 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -107,7 +107,6 @@ excludes = [ # We haven't linted these files yet ''^tests/functional/dump-db\.sh$'' - ''^tests/functional/dyn-drv/eval-outputOf\.sh$'' ''^tests/functional/dyn-drv/old-daemon-error-hack\.sh$'' ''^tests/functional/dyn-drv/recursive-mod-json\.sh$'' ''^tests/functional/eval-store\.sh$'' From 119489f2535cab51b65c8a7908b013487e5fe3cb Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Thu, 25 Sep 2025 10:30:03 -0700 Subject: [PATCH 084/332] shellcheck fix: tests/functional/dyn-drv/old-daemon-error-hack.sh --- maintainers/flake-module.nix | 1 - tests/functional/dyn-drv/old-daemon-error-hack.sh | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 54f53fb6a..671591ab4 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -107,7 +107,6 @@ excludes = [ # We haven't linted these files yet ''^tests/functional/dump-db\.sh$'' - ''^tests/functional/dyn-drv/old-daemon-error-hack\.sh$'' ''^tests/functional/dyn-drv/recursive-mod-json\.sh$'' ''^tests/functional/eval-store\.sh$'' ''^tests/functional/export-graph\.sh$'' diff --git a/tests/functional/dyn-drv/old-daemon-error-hack.sh b/tests/functional/dyn-drv/old-daemon-error-hack.sh index 43b049973..02129bd73 100644 --- a/tests/functional/dyn-drv/old-daemon-error-hack.sh +++ b/tests/functional/dyn-drv/old-daemon-error-hack.sh @@ -1,3 +1,4 @@ +# shellcheck shell=bash # Purposely bypassing our usual common for this subgroup source ../common.sh From dc69e2e5205ee062e67795f37740ab1d179a2c95 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Thu, 25 Sep 2025 10:31:06 -0700 Subject: [PATCH 085/332] shellcheck fix: tests/functional/dyn-drv/recursive-mod-json.sh --- maintainers/flake-module.nix | 1 - tests/functional/dyn-drv/recursive-mod-json.sh | 11 ++++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 671591ab4..505424633 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -107,7 +107,6 @@ excludes = [ # We haven't linted these files yet ''^tests/functional/dump-db\.sh$'' - ''^tests/functional/dyn-drv/recursive-mod-json\.sh$'' ''^tests/functional/eval-store\.sh$'' ''^tests/functional/export-graph\.sh$'' ''^tests/functional/export\.sh$'' diff --git a/tests/functional/dyn-drv/recursive-mod-json.sh b/tests/functional/dyn-drv/recursive-mod-json.sh index 0698b81bd..01e8f16e9 100644 --- a/tests/functional/dyn-drv/recursive-mod-json.sh +++ b/tests/functional/dyn-drv/recursive-mod-json.sh @@ -1,3 +1,4 @@ +# shellcheck shell=bash source common.sh # FIXME @@ -10,18 +11,18 @@ restartDaemon clearStore -rm -f $TEST_ROOT/result +rm -f "$TEST_ROOT"/result -EXTRA_PATH=$(dirname $(type -p nix)):$(dirname $(type -p jq)) +EXTRA_PATH=$(dirname "$(type -p nix)"):$(dirname "$(type -p jq)") export EXTRA_PATH # Will produce a drv metaDrv=$(nix-instantiate ./recursive-mod-json.nix) # computed "dynamic" derivation -drv=$(nix-store -r $metaDrv) +drv=$(nix-store -r "$metaDrv") # build that dyn drv -res=$(nix-store -r $drv) +res=$(nix-store -r "$drv") -grep 'I am alive!' $res/hello +grep 'I am alive!' "$res"/hello From b8c24cdaef3a7439b381eae2cc050ffff9dcb68a Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Thu, 25 Sep 2025 10:33:40 -0700 Subject: [PATCH 086/332] shellcheck fix: tests/functional/eval-store.sh --- maintainers/flake-module.nix | 1 - tests/functional/eval-store.sh | 21 +++++++++++---------- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 505424633..85a4d90f1 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -107,7 +107,6 @@ excludes = [ # We haven't linted these files yet ''^tests/functional/dump-db\.sh$'' - ''^tests/functional/eval-store\.sh$'' ''^tests/functional/export-graph\.sh$'' ''^tests/functional/export\.sh$'' ''^tests/functional/extra-sandbox-profile\.sh$'' diff --git a/tests/functional/eval-store.sh b/tests/functional/eval-store.sh index 92faa4005..9f4b3b036 100755 --- a/tests/functional/eval-store.sh +++ b/tests/functional/eval-store.sh @@ -6,6 +6,7 @@ TODO_NixOS # Using `--eval-store` with the daemon will eventually copy everything # to the build store, invalidating most of the tests here +# shellcheck disable=SC1111 needLocalStore "“--eval-store” doesn't achieve much with the daemon" eval_store=$TEST_ROOT/eval-store @@ -15,7 +16,7 @@ rm -rf "$eval_store" nix build -f dependencies.nix --eval-store "$eval_store" -o "$TEST_ROOT/result" [[ -e $TEST_ROOT/result/foobar ]] -if [[ ! -n "${NIX_TESTS_CA_BY_DEFAULT:-}" ]]; then +if [[ -z "${NIX_TESTS_CA_BY_DEFAULT:-}" ]]; then # Resolved CA derivations are written to store for building # # TODO when we something more systematic @@ -23,35 +24,35 @@ if [[ ! -n "${NIX_TESTS_CA_BY_DEFAULT:-}" ]]; then # between scratch storage for building and the final destination # store, we'll be able to make this unconditional again -- resolved # derivations should only appear in the scratch store. - (! ls $NIX_STORE_DIR/*.drv) + (! ls "$NIX_STORE_DIR"/*.drv) fi -ls $eval_store/nix/store/*.drv +ls "$eval_store"/nix/store/*.drv clearStore rm -rf "$eval_store" nix-instantiate dependencies.nix --eval-store "$eval_store" -(! ls $NIX_STORE_DIR/*.drv) -ls $eval_store/nix/store/*.drv +(! ls "$NIX_STORE_DIR"/*.drv) +ls "$eval_store"/nix/store/*.drv clearStore rm -rf "$eval_store" nix-build dependencies.nix --eval-store "$eval_store" -o "$TEST_ROOT/result" [[ -e $TEST_ROOT/result/foobar ]] -if [[ ! -n "${NIX_TESTS_CA_BY_DEFAULT:-}" ]]; then +if [[ -z "${NIX_TESTS_CA_BY_DEFAULT:-}" ]]; then # See above - (! ls $NIX_STORE_DIR/*.drv) + (! ls "$NIX_STORE_DIR"/*.drv) fi -ls $eval_store/nix/store/*.drv +ls "$eval_store"/nix/store/*.drv clearStore rm -rf "$eval_store" # Confirm that import-from-derivation builds on the build store [[ $(nix eval --eval-store "$eval_store?require-sigs=false" --impure --raw --file ./ifd.nix) = hi ]] -ls $NIX_STORE_DIR/*dependencies-top/foobar -(! ls $eval_store/nix/store/*dependencies-top/foobar) +ls "$NIX_STORE_DIR"/*dependencies-top/foobar +(! ls "$eval_store"/nix/store/*dependencies-top/foobar) # Can't write .drv by default (! nix-instantiate dependencies.nix --eval-store "dummy://") From a209748ec04cc7142bf3c01edffc058fa4846661 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Thu, 25 Sep 2025 10:35:01 -0700 Subject: [PATCH 087/332] shellcheck fix: tests/functional/export-graph.sh --- maintainers/flake-module.nix | 1 - tests/functional/export-graph.sh | 14 ++++++++------ 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 85a4d90f1..8277788c6 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -107,7 +107,6 @@ excludes = [ # We haven't linted these files yet ''^tests/functional/dump-db\.sh$'' - ''^tests/functional/export-graph\.sh$'' ''^tests/functional/export\.sh$'' ''^tests/functional/extra-sandbox-profile\.sh$'' ''^tests/functional/fetchClosure\.sh$'' diff --git a/tests/functional/export-graph.sh b/tests/functional/export-graph.sh index b507b6d3a..0490b580d 100755 --- a/tests/functional/export-graph.sh +++ b/tests/functional/export-graph.sh @@ -8,27 +8,29 @@ clearStore clearProfiles checkRef() { - nix-store -q --references $TEST_ROOT/result | grepQuiet "$1"'$' || fail "missing reference $1" + nix-store -q --references "$TEST_ROOT"/result | grepQuiet "$1"'$' || fail "missing reference $1" } # Test the export of the runtime dependency graph. -outPath=$(nix-build ./export-graph.nix -A 'foo."bar.runtimeGraph"' -o $TEST_ROOT/result) +outPath=$(nix-build ./export-graph.nix -A 'foo."bar.runtimeGraph"' -o "$TEST_ROOT"/result) -test $(nix-store -q --references $TEST_ROOT/result | wc -l) = 3 || fail "bad nr of references" +test "$(nix-store -q --references "$TEST_ROOT"/result | wc -l)" = 3 || fail "bad nr of references" checkRef input-2 -for i in $(cat $outPath); do checkRef $i; done +# shellcheck disable=SC2013 +for i in $(cat "$outPath"); do checkRef "$i"; done # Test the export of the build-time dependency graph. nix-store --gc # should force rebuild of input-1 -outPath=$(nix-build ./export-graph.nix -A 'foo."bar.buildGraph"' -o $TEST_ROOT/result) +outPath=$(nix-build ./export-graph.nix -A 'foo."bar.buildGraph"' -o "$TEST_ROOT"/result) checkRef input-1 checkRef input-1.drv checkRef input-2 checkRef input-2.drv -for i in $(cat $outPath); do checkRef $i; done +# shellcheck disable=SC2013 +for i in $(cat "$outPath"); do checkRef "$i"; done From 9e3c5025218ede84c26ac1c86fcf2711b9b83567 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Thu, 25 Sep 2025 10:35:26 -0700 Subject: [PATCH 088/332] shellcheck fix: tests/functional/extra-sandbox-profile.sh --- maintainers/flake-module.nix | 1 - 1 file changed, 1 deletion(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 8277788c6..7d593d26e 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -108,7 +108,6 @@ # We haven't linted these files yet ''^tests/functional/dump-db\.sh$'' ''^tests/functional/export\.sh$'' - ''^tests/functional/extra-sandbox-profile\.sh$'' ''^tests/functional/fetchClosure\.sh$'' ''^tests/functional/fetchGit\.sh$'' ''^tests/functional/fetchGitRefs\.sh$'' From cf595b81d53a4fdeb30694a271bf1cfe1bd55c34 Mon Sep 17 00:00:00 2001 From: Tristan Ross Date: Tue, 23 Sep 2025 12:04:06 -0700 Subject: [PATCH 089/332] libmain-c: add nix_set_log_format function --- src/libmain-c/nix_api_main.cc | 13 +++++++++++++ src/libmain-c/nix_api_main.h | 8 ++++++++ 2 files changed, 21 insertions(+) diff --git a/src/libmain-c/nix_api_main.cc b/src/libmain-c/nix_api_main.cc index 2d4f588a8..0ee965dc8 100644 --- a/src/libmain-c/nix_api_main.cc +++ b/src/libmain-c/nix_api_main.cc @@ -4,6 +4,7 @@ #include "nix_api_util_internal.h" #include "nix/main/plugin.hh" +#include "nix/main/loggers.hh" extern "C" { @@ -17,4 +18,16 @@ nix_err nix_init_plugins(nix_c_context * context) NIXC_CATCH_ERRS } +nix_err nix_set_log_format(nix_c_context * context, const char * format) +{ + if (context) + context->last_err_code = NIX_OK; + if (format == nullptr) + return nix_set_err_msg(context, NIX_ERR_UNKNOWN, "Log format is null"); + try { + nix::setLogFormat(format); + } + NIXC_CATCH_ERRS +} + } // extern "C" diff --git a/src/libmain-c/nix_api_main.h b/src/libmain-c/nix_api_main.h index 3957b992f..3d5d12c15 100644 --- a/src/libmain-c/nix_api_main.h +++ b/src/libmain-c/nix_api_main.h @@ -30,6 +30,14 @@ extern "C" { */ nix_err nix_init_plugins(nix_c_context * context); +/** + * @brief Sets the log format + * + * @param[out] context Optional, stores error information + * @param[in] format The string name of the format. + */ +nix_err nix_set_log_format(nix_c_context * context, const char * format); + // cffi end #ifdef __cplusplus } From 230da1cbe73800524d3fd3373b3eecb988d9b435 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Thu, 25 Sep 2025 10:36:12 -0700 Subject: [PATCH 090/332] shellcheck fix: tests/functional/export.sh --- maintainers/flake-module.nix | 1 - tests/functional/export.sh | 16 +++++++++------- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 7d593d26e..0a46dc57f 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -107,7 +107,6 @@ excludes = [ # We haven't linted these files yet ''^tests/functional/dump-db\.sh$'' - ''^tests/functional/export\.sh$'' ''^tests/functional/fetchClosure\.sh$'' ''^tests/functional/fetchGit\.sh$'' ''^tests/functional/fetchGitRefs\.sh$'' diff --git a/tests/functional/export.sh b/tests/functional/export.sh index 3e895a540..53bbdd9ac 100755 --- a/tests/functional/export.sh +++ b/tests/functional/export.sh @@ -8,11 +8,12 @@ clearStore outPath=$(nix-build dependencies.nix --no-out-link) -nix-store --export $outPath > $TEST_ROOT/exp +nix-store --export "$outPath" > "$TEST_ROOT"/exp -nix-store --export $(nix-store -qR $outPath) > $TEST_ROOT/exp_all +# shellcheck disable=SC2046 +nix-store --export $(nix-store -qR "$outPath") > "$TEST_ROOT"/exp_all -if nix-store --export $outPath >/dev/full ; then +if nix-store --export "$outPath" >/dev/full ; then echo "exporting to a bad file descriptor should fail" exit 1 fi @@ -20,7 +21,7 @@ fi clearStore -if nix-store --import < $TEST_ROOT/exp; then +if nix-store --import < "$TEST_ROOT"/exp; then echo "importing a non-closure should fail" exit 1 fi @@ -28,13 +29,14 @@ fi clearStore -nix-store --import < $TEST_ROOT/exp_all +nix-store --import < "$TEST_ROOT"/exp_all -nix-store --export $(nix-store -qR $outPath) > $TEST_ROOT/exp_all2 +# shellcheck disable=SC2046 +nix-store --export $(nix-store -qR "$outPath") > "$TEST_ROOT"/exp_all2 clearStore # Regression test: the derivers in exp_all2 are empty, which shouldn't # cause a failure. -nix-store --import < $TEST_ROOT/exp_all2 +nix-store --import < "$TEST_ROOT"/exp_all2 From d07dd92db30d9e560d01a6cf9013a07d530dce24 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Thu, 25 Sep 2025 10:38:51 -0700 Subject: [PATCH 091/332] shellcheck fix: tests/functional/fetchClosure.sh --- maintainers/flake-module.nix | 1 - tests/functional/fetchClosure.sh | 48 +++++++++++++++++--------------- 2 files changed, 25 insertions(+), 24 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 0a46dc57f..5ad6e05eb 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -107,7 +107,6 @@ excludes = [ # We haven't linted these files yet ''^tests/functional/dump-db\.sh$'' - ''^tests/functional/fetchClosure\.sh$'' ''^tests/functional/fetchGit\.sh$'' ''^tests/functional/fetchGitRefs\.sh$'' ''^tests/functional/fetchGitSubmodules\.sh$'' diff --git a/tests/functional/fetchClosure.sh b/tests/functional/fetchClosure.sh index 7ef635d36..9b79ab396 100755 --- a/tests/functional/fetchClosure.sh +++ b/tests/functional/fetchClosure.sh @@ -17,14 +17,14 @@ requireDaemonNewerThan "2.16.0pre20230524" # Initialize binary cache. nonCaPath=$(nix build --json --file ./dependencies.nix --no-link | jq -r .[].outputs.out) -caPath=$(nix store make-content-addressed --json $nonCaPath | jq -r '.rewrites | map(.) | .[]') -nix copy --to file://$cacheDir $nonCaPath +caPath=$(nix store make-content-addressed --json "$nonCaPath" | jq -r '.rewrites | map(.) | .[]') +nix copy --to file://"$cacheDir" "$nonCaPath" # Test basic fetchClosure rewriting from non-CA to CA. clearStore -[ ! -e $nonCaPath ] -[ ! -e $caPath ] +[ ! -e "$nonCaPath" ] +[ ! -e "$caPath" ] [[ $(nix eval -v --raw --expr " builtins.fetchClosure { @@ -32,10 +32,10 @@ clearStore fromPath = $nonCaPath; toPath = $caPath; } -") = $caPath ]] +") = "$caPath" ]] -[ ! -e $nonCaPath ] -[ -e $caPath ] +[ ! -e "$nonCaPath" ] +[ -e "$caPath" ] clearStore @@ -55,7 +55,7 @@ if [[ "$NIX_REMOTE" != "daemon" ]]; then # TODO: Should the closure be rejected, despite single user mode? # [ ! -e $nonCaPath ] - [ ! -e $caPath ] + [ ! -e "$caPath" ] # We can use non-CA paths when we ask explicitly. [[ $(nix eval --raw --no-require-sigs --expr " @@ -64,15 +64,15 @@ if [[ "$NIX_REMOTE" != "daemon" ]]; then fromPath = $nonCaPath; inputAddressed = true; } - ") = $nonCaPath ]] + ") = "$nonCaPath" ]] - [ -e $nonCaPath ] - [ ! -e $caPath ] + [ -e "$nonCaPath" ] + [ ! -e "$caPath" ] fi -[ ! -e $caPath ] +[ ! -e "$caPath" ] # 'toPath' set to empty string should fail but print the expected path. expectStderr 1 nix eval -v --json --expr " @@ -84,39 +84,41 @@ expectStderr 1 nix eval -v --json --expr " " | grep "error: rewriting.*$nonCaPath.*yielded.*$caPath" # If fromPath is CA, then toPath isn't needed. -nix copy --to file://$cacheDir $caPath +nix copy --to file://"$cacheDir" "$caPath" clearStore -[ ! -e $caPath ] +[ ! -e "$caPath" ] [[ $(nix eval -v --raw --expr " builtins.fetchClosure { fromStore = \"file://$cacheDir\"; fromPath = $caPath; } -") = $caPath ]] +") = "$caPath" ]] -[ -e $caPath ] +[ -e "$caPath" ] # Check that URL query parameters aren't allowed. clearStore narCache=$TEST_ROOT/nar-cache -rm -rf $narCache +rm -rf "$narCache" (! nix eval -v --raw --expr " builtins.fetchClosure { fromStore = \"file://$cacheDir?local-nar-cache=$narCache\"; fromPath = $caPath; } ") -(! [ -e $narCache ]) +# shellcheck disable=SC2235 +(! [ -e "$narCache" ]) # If toPath is specified but wrong, we check it (only) when the path is missing. clearStore -badPath=$(echo $caPath | sed -e 's!/store/................................-!/store/xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx-!') +# shellcheck disable=SC2001 +badPath=$(echo "$caPath" | sed -e 's!/store/................................-!/store/xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx-!') -[ ! -e $badPath ] +[ ! -e "$badPath" ] expectStderr 1 nix eval -v --raw --expr " builtins.fetchClosure { @@ -126,11 +128,11 @@ expectStderr 1 nix eval -v --raw --expr " } " | grep "error: rewriting.*$nonCaPath.*yielded.*$caPath.*while.*$badPath.*was expected" -[ ! -e $badPath ] +[ ! -e "$badPath" ] # We only check it when missing, as a performance optimization similar to what we do for fixed output derivations. So if it's already there, we don't check it. # It would be nice for this to fail, but checking it would be too(?) slow. -[ -e $caPath ] +[ -e "$caPath" ] [[ $(nix eval -v --raw --expr " builtins.fetchClosure { @@ -138,7 +140,7 @@ expectStderr 1 nix eval -v --raw --expr " fromPath = $badPath; toPath = $caPath; } -") = $caPath ]] +") = "$caPath" ]] # However, if the output address is unexpected, we can report it From 32e1b5209bc0d6925fb33e593b61569c7b06b86d Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Thu, 25 Sep 2025 10:44:29 -0700 Subject: [PATCH 092/332] shellcheck fix: tests/functional/fetchGit.sh --- maintainers/flake-module.nix | 1 - tests/functional/fetchGit.sh | 192 ++++++++++++++++++----------------- 2 files changed, 98 insertions(+), 95 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 5ad6e05eb..54d02d9f1 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -107,7 +107,6 @@ excludes = [ # We haven't linted these files yet ''^tests/functional/dump-db\.sh$'' - ''^tests/functional/fetchGit\.sh$'' ''^tests/functional/fetchGitRefs\.sh$'' ''^tests/functional/fetchGitSubmodules\.sh$'' ''^tests/functional/fetchGitVerification\.sh$'' diff --git a/tests/functional/fetchGit.sh b/tests/functional/fetchGit.sh index e7c9c77a5..be8b5cb34 100755 --- a/tests/functional/fetchGit.sh +++ b/tests/functional/fetchGit.sh @@ -12,25 +12,25 @@ repo=$TEST_ROOT/./git export _NIX_FORCE_HTTP=1 -rm -rf $repo ${repo}-tmp $TEST_HOME/.cache/nix $TEST_ROOT/worktree $TEST_ROOT/minimal +rm -rf "$repo" "${repo}"-tmp "$TEST_HOME"/.cache/nix "$TEST_ROOT"/worktree "$TEST_ROOT"/minimal -git init $repo -git -C $repo config user.email "foobar@example.com" -git -C $repo config user.name "Foobar" +git init "$repo" +git -C "$repo" config user.email "foobar@example.com" +git -C "$repo" config user.name "Foobar" -echo utrecht > $repo/hello -touch $repo/.gitignore -git -C $repo add hello .gitignore -git -C $repo commit -m 'Bla1' -rev1=$(git -C $repo rev-parse HEAD) -git -C $repo tag -a tag1 -m tag1 +echo utrecht > "$repo"/hello +touch "$repo"/.gitignore +git -C "$repo" add hello .gitignore +git -C "$repo" commit -m 'Bla1' +rev1=$(git -C "$repo" rev-parse HEAD) +git -C "$repo" tag -a tag1 -m tag1 -echo world > $repo/hello -git -C $repo commit -m 'Bla2' -a -git -C $repo worktree add $TEST_ROOT/worktree -echo hello >> $TEST_ROOT/worktree/hello -rev2=$(git -C $repo rev-parse HEAD) -git -C $repo tag -a tag2 -m tag2 +echo world > "$repo"/hello +git -C "$repo" commit -m 'Bla2' -a +git -C "$repo" worktree add "$TEST_ROOT"/worktree +echo hello >> "$TEST_ROOT"/worktree/hello +rev2=$(git -C "$repo" rev-parse HEAD) +git -C "$repo" tag -a tag2 -m tag2 # Check whether fetching in read-only mode works. nix-instantiate --eval -E "builtins.readFile ((builtins.fetchGit file://$TEST_ROOT/worktree) + \"/hello\") == \"utrecht\\n\"" @@ -40,52 +40,52 @@ unset _NIX_FORCE_HTTP expectStderr 0 nix eval -vvvv --impure --raw --expr "(builtins.fetchGit file://$TEST_ROOT/worktree).outPath" | grepQuiet "copying '$TEST_ROOT/worktree/' to the store" path0=$(nix eval --impure --raw --expr "(builtins.fetchGit file://$TEST_ROOT/worktree).outPath") path0_=$(nix eval --impure --raw --expr "(builtins.fetchTree { type = \"git\"; url = file://$TEST_ROOT/worktree; }).outPath") -[[ $path0 = $path0_ ]] +[[ $path0 = "$path0_" ]] path0_=$(nix eval --impure --raw --expr "(builtins.fetchTree git+file://$TEST_ROOT/worktree).outPath") -[[ $path0 = $path0_ ]] +[[ $path0 = "$path0_" ]] export _NIX_FORCE_HTTP=1 -[[ $(tail -n 1 $path0/hello) = "hello" ]] +[[ $(tail -n 1 "$path0"/hello) = "hello" ]] # Nuke the cache -rm -rf $TEST_HOME/.cache/nix +rm -rf "$TEST_HOME"/.cache/nix # Fetch the default branch. path=$(nix eval --impure --raw --expr "(builtins.fetchGit file://$repo).outPath") -[[ $(cat $path/hello) = world ]] +[[ $(cat "$path"/hello) = world ]] # Fetch again. This should be cached. # NOTE: This has to be done before the test case below which tries to pack-refs # the reason being that the lookup on the cache uses the ref-file `/refs/heads/master` # which does not exist after packing. -mv $repo ${repo}-tmp +mv "$repo" "${repo}"-tmp path2=$(nix eval --impure --raw --expr "(builtins.fetchGit file://$repo).outPath") -[[ $path = $path2 ]] +[[ $path = "$path2" ]] [[ $(nix eval --impure --expr "(builtins.fetchGit file://$repo).revCount") = 2 ]] -[[ $(nix eval --impure --raw --expr "(builtins.fetchGit file://$repo).rev") = $rev2 ]] -[[ $(nix eval --impure --raw --expr "(builtins.fetchGit file://$repo).shortRev") = ${rev2:0:7} ]] +[[ $(nix eval --impure --raw --expr "(builtins.fetchGit file://$repo).rev") = "$rev2" ]] +[[ $(nix eval --impure --raw --expr "(builtins.fetchGit file://$repo).shortRev") = "${rev2:0:7}" ]] # Fetching with a explicit hash should succeed. path2=$(nix eval --refresh --raw --expr "(builtins.fetchGit { url = file://$repo; rev = \"$rev2\"; }).outPath") -[[ $path = $path2 ]] +[[ $path = "$path2" ]] path2=$(nix eval --refresh --raw --expr "(builtins.fetchGit { url = file://$repo; rev = \"$rev1\"; }).outPath") -[[ $(cat $path2/hello) = utrecht ]] +[[ $(cat "$path2"/hello) = utrecht ]] -mv ${repo}-tmp $repo +mv "${repo}"-tmp "$repo" # Fetch when the cache has packed-refs # Regression test of #8822 -git -C $TEST_HOME/.cache/nix/gitv3/*/ pack-refs --all +git -C "$TEST_HOME"/.cache/nix/gitv3/*/ pack-refs --all path=$(nix eval --impure --raw --expr "(builtins.fetchGit file://$repo).outPath") # Fetch a rev from another branch -git -C $repo checkout -b devtest -echo "different file" >> $TEST_ROOT/git/differentbranch -git -C $repo add differentbranch -git -C $repo commit -m 'Test2' -git -C $repo checkout master -devrev=$(git -C $repo rev-parse devtest) +git -C "$repo" checkout -b devtest +echo "different file" >> "$TEST_ROOT"/git/differentbranch +git -C "$repo" add differentbranch +git -C "$repo" commit -m 'Test2' +git -C "$repo" checkout master +devrev=$(git -C "$repo" rev-parse devtest) nix eval --raw --expr "builtins.fetchGit { url = file://$repo; rev = \"$devrev\"; }" [[ $(nix eval --raw --expr "builtins.readFile (builtins.fetchGit { url = file://$repo; rev = \"$devrev\"; allRefs = true; } + \"/differentbranch\")") = 'different file' ]] @@ -96,7 +96,7 @@ nix eval --raw --expr "builtins.fetchGit { url = file://$repo; rev = \"$devrev\" # Fetch using an explicit revision hash. path2=$(nix eval --raw --expr "(builtins.fetchGit { url = file://$repo; rev = \"$rev2\"; }).outPath") -[[ $path = $path2 ]] +[[ $path = "$path2" ]] # In pure eval mode, fetchGit with a revision should succeed. [[ $(nix eval --raw --expr "builtins.readFile (fetchGit { url = file://$repo; rev = \"$rev2\"; } + \"/hello\")") = world ]] @@ -106,23 +106,23 @@ expectStderr 1 nix eval --expr 'builtins.fetchGit "file:///foo"' | grepQuiet "'f # Using a clean working tree should produce the same result. path2=$(nix eval --impure --raw --expr "(builtins.fetchGit $repo).outPath") -[[ $path = $path2 ]] +[[ $path = "$path2" ]] # Using an unclean tree should yield the tracked but uncommitted changes. -mkdir $repo/dir1 $repo/dir2 -echo foo > $repo/dir1/foo -echo bar > $repo/bar -echo bar > $repo/dir2/bar -git -C $repo add dir1/foo -git -C $repo rm hello +mkdir "$repo"/dir1 "$repo"/dir2 +echo foo > "$repo"/dir1/foo +echo bar > "$repo"/bar +echo bar > "$repo"/dir2/bar +git -C "$repo" add dir1/foo +git -C "$repo" rm hello unset _NIX_FORCE_HTTP path2=$(nix eval --impure --raw --expr "(builtins.fetchGit $repo).outPath") -[ ! -e $path2/hello ] -[ ! -e $path2/bar ] -[ ! -e $path2/dir2/bar ] -[ ! -e $path2/.git ] -[[ $(cat $path2/dir1/foo) = foo ]] +[ ! -e "$path2"/hello ] +[ ! -e "$path2"/bar ] +[ ! -e "$path2"/dir2/bar ] +[ ! -e "$path2"/.git ] +[[ $(cat "$path2"/dir1/foo) = foo ]] [[ $(nix eval --impure --raw --expr "(builtins.fetchGit $repo).rev") = 0000000000000000000000000000000000000000 ]] [[ $(nix eval --impure --raw --expr "(builtins.fetchGit $repo).dirtyRev") = "${rev2}-dirty" ]] @@ -130,16 +130,16 @@ path2=$(nix eval --impure --raw --expr "(builtins.fetchGit $repo).outPath") # ... unless we're using an explicit ref or rev. path3=$(nix eval --impure --raw --expr "(builtins.fetchGit { url = $repo; ref = \"master\"; }).outPath") -[[ $path = $path3 ]] +[[ $path = "$path3" ]] path3=$(nix eval --raw --expr "(builtins.fetchGit { url = $repo; rev = \"$rev2\"; }).outPath") -[[ $path = $path3 ]] +[[ $path = "$path3" ]] # Committing should not affect the store path. -git -C $repo commit -m 'Bla3' -a +git -C "$repo" commit -m 'Bla3' -a path4=$(nix eval --impure --refresh --raw --expr "(builtins.fetchGit file://$repo).outPath") -[[ $path2 = $path4 ]] +[[ $path2 = "$path4" ]] [[ $(nix eval --impure --expr "builtins.hasAttr \"rev\" (builtins.fetchGit $repo)") == "true" ]] [[ $(nix eval --impure --expr "builtins.hasAttr \"dirtyRev\" (builtins.fetchGit $repo)") == "false" ]] @@ -148,7 +148,7 @@ path4=$(nix eval --impure --refresh --raw --expr "(builtins.fetchGit file://$rep expect 102 nix eval --raw --expr "(builtins.fetchGit { url = $repo; rev = \"$rev2\"; narHash = \"sha256-B5yIPHhEm0eysJKEsO7nqxprh9vcblFxpJG11gXJus1=\"; }).outPath" path5=$(nix eval --raw --expr "(builtins.fetchGit { url = $repo; rev = \"$rev2\"; narHash = \"sha256-Hr8g6AqANb3xqX28eu1XnjK/3ab8Gv6TJSnkb1LezG9=\"; }).outPath") -[[ $path = $path5 ]] +[[ $path = "$path5" ]] # Ensure that NAR hashes are checked. expectStderr 102 nix eval --raw --expr "(builtins.fetchGit { url = $repo; rev = \"$rev2\"; narHash = \"sha256-Hr8g6AqANb4xqX28eu1XnjK/3ab8Gv6TJSnkb1LezG9=\"; }).outPath" | grepQuiet "error: NAR hash mismatch" @@ -157,22 +157,22 @@ expectStderr 102 nix eval --raw --expr "(builtins.fetchGit { url = $repo; rev = expectStderr 0 nix eval --raw --expr "(builtins.fetchGit { url = $repo; ref = \"tag2\"; narHash = \"sha256-Hr8g6AqANb3xqX28eu1XnjK/3ab8Gv6TJSnkb1LezG9=\"; }).outPath" | grepQuiet "warning: Input .* is unlocked" # tarball-ttl should be ignored if we specify a rev -echo delft > $repo/hello -git -C $repo add hello -git -C $repo commit -m 'Bla4' -rev3=$(git -C $repo rev-parse HEAD) +echo delft > "$repo"/hello +git -C "$repo" add hello +git -C "$repo" commit -m 'Bla4' +rev3=$(git -C "$repo" rev-parse HEAD) nix eval --tarball-ttl 3600 --expr "builtins.fetchGit { url = $repo; rev = \"$rev3\"; }" >/dev/null # Update 'path' to reflect latest master path=$(nix eval --impure --raw --expr "(builtins.fetchGit file://$repo).outPath") # Check behavior when non-master branch is used -git -C $repo checkout $rev2 -b dev -echo dev > $repo/hello +git -C "$repo" checkout "$rev2" -b dev +echo dev > "$repo"/hello # File URI uses dirty tree unless specified otherwise path2=$(nix eval --impure --raw --expr "(builtins.fetchGit file://$repo).outPath") -[ $(cat $path2/hello) = dev ] +[ "$(cat "$path2"/hello)" = dev ] # Using local path with branch other than 'master' should work when clean or dirty path3=$(nix eval --impure --raw --expr "(builtins.fetchGit $repo).outPath") @@ -181,53 +181,53 @@ path3=$(nix eval --impure --raw --expr "(builtins.fetchGit $repo).outPath") [[ $(nix eval --impure --raw --expr "(builtins.fetchGit $repo).shortRev") = 0000000 ]] # Making a dirty tree clean again and fetching it should # record correct revision information. See: #4140 -echo world > $repo/hello -[[ $(nix eval --impure --raw --expr "(builtins.fetchGit $repo).rev") = $rev2 ]] +echo world > "$repo"/hello +[[ $(nix eval --impure --raw --expr "(builtins.fetchGit $repo).rev") = "$rev2" ]] # Committing shouldn't change store path, or switch to using 'master' -echo dev > $repo/hello -git -C $repo commit -m 'Bla5' -a +echo dev > "$repo"/hello +git -C "$repo" commit -m 'Bla5' -a path4=$(nix eval --impure --raw --expr "(builtins.fetchGit $repo).outPath") -[[ $(cat $path4/hello) = dev ]] -[[ $path3 = $path4 ]] +[[ $(cat "$path4"/hello) = dev ]] +[[ $path3 = "$path4" ]] # Using remote path with branch other than 'master' should fetch the HEAD revision. # (--tarball-ttl 0 to prevent using the cached repo above) export _NIX_FORCE_HTTP=1 path4=$(nix eval --tarball-ttl 0 --impure --raw --expr "(builtins.fetchGit $repo).outPath") -[[ $(cat $path4/hello) = dev ]] -[[ $path3 = $path4 ]] +[[ $(cat "$path4"/hello) = dev ]] +[[ $path3 = "$path4" ]] unset _NIX_FORCE_HTTP # Confirm same as 'dev' branch path5=$(nix eval --impure --raw --expr "(builtins.fetchGit { url = $repo; ref = \"dev\"; }).outPath") -[[ $path3 = $path5 ]] +[[ $path3 = "$path5" ]] # Nuke the cache -rm -rf $TEST_HOME/.cache/nix +rm -rf "$TEST_HOME"/.cache/nix # Try again. This should work. path5=$(nix eval --impure --raw --expr "(builtins.fetchGit { url = $repo; ref = \"dev\"; }).outPath") -[[ $path3 = $path5 ]] +[[ $path3 = "$path5" ]] # Fetching from a repo with only a specific revision and no branches should # not fall back to copying files and record correct revision information. See: #5302 -mkdir $TEST_ROOT/minimal -git -C $TEST_ROOT/minimal init -git -C $TEST_ROOT/minimal fetch $repo $rev2 -git -C $TEST_ROOT/minimal checkout $rev2 -[[ $(nix eval --impure --raw --expr "(builtins.fetchGit { url = $TEST_ROOT/minimal; }).rev") = $rev2 ]] +mkdir "$TEST_ROOT"/minimal +git -C "$TEST_ROOT"/minimal init +git -C "$TEST_ROOT"/minimal fetch "$repo" "$rev2" +git -C "$TEST_ROOT"/minimal checkout "$rev2" +[[ $(nix eval --impure --raw --expr "(builtins.fetchGit { url = $TEST_ROOT/minimal; }).rev") = "$rev2" ]] # Explicit ref = "HEAD" should work, and produce the same outPath as without ref path7=$(nix eval --impure --raw --expr "(builtins.fetchGit { url = \"file://$repo\"; ref = \"HEAD\"; }).outPath") path8=$(nix eval --impure --raw --expr "(builtins.fetchGit { url = \"file://$repo\"; }).outPath") -[[ $path7 = $path8 ]] +[[ $path7 = "$path8" ]] # ref = "HEAD" should fetch the HEAD revision -rev4=$(git -C $repo rev-parse HEAD) +rev4=$(git -C "$repo" rev-parse HEAD) rev4_nix=$(nix eval --impure --raw --expr "(builtins.fetchGit { url = \"file://$repo\"; ref = \"HEAD\"; }).rev") -[[ $rev4 = $rev4_nix ]] +[[ $rev4 = "$rev4_nix" ]] # The name argument should be handled path9=$(nix eval --impure --raw --expr "(builtins.fetchGit { url = \"file://$repo\"; ref = \"HEAD\"; name = \"foo\"; }).outPath") @@ -236,33 +236,36 @@ path9=$(nix eval --impure --raw --expr "(builtins.fetchGit { url = \"file://$rep # Specifying a ref without a rev shouldn't pick a cached rev for a different ref export _NIX_FORCE_HTTP=1 rev_tag1_nix=$(nix eval --impure --raw --expr "(builtins.fetchGit { url = \"file://$repo\"; ref = \"refs/tags/tag1\"; }).rev") -rev_tag1=$(git -C $repo rev-parse refs/tags/tag1^{commit}) -[[ $rev_tag1_nix = $rev_tag1 ]] +# shellcheck disable=SC1083 +rev_tag1=$(git -C "$repo" rev-parse refs/tags/tag1^{commit}) +[[ $rev_tag1_nix = "$rev_tag1" ]] rev_tag2_nix=$(nix eval --impure --raw --expr "(builtins.fetchGit { url = \"file://$repo\"; ref = \"refs/tags/tag2\"; }).rev") -rev_tag2=$(git -C $repo rev-parse refs/tags/tag2^{commit}) -[[ $rev_tag2_nix = $rev_tag2 ]] +# shellcheck disable=SC1083 +rev_tag2=$(git -C "$repo" rev-parse refs/tags/tag2^{commit}) +[[ $rev_tag2_nix = "$rev_tag2" ]] unset _NIX_FORCE_HTTP # Ensure .gitattributes is respected -touch $repo/not-exported-file -touch $repo/exported-wonky -echo "/not-exported-file export-ignore" >> $repo/.gitattributes -echo "/exported-wonky export-ignore=wonk" >> $repo/.gitattributes -git -C $repo add not-exported-file exported-wonky .gitattributes -git -C $repo commit -m 'Bla6' -rev5=$(git -C $repo rev-parse HEAD) +touch "$repo"/not-exported-file +touch "$repo"/exported-wonky +echo "/not-exported-file export-ignore" >> "$repo"/.gitattributes +echo "/exported-wonky export-ignore=wonk" >> "$repo"/.gitattributes +git -C "$repo" add not-exported-file exported-wonky .gitattributes +git -C "$repo" commit -m 'Bla6' +rev5=$(git -C "$repo" rev-parse HEAD) path12=$(nix eval --raw --expr "(builtins.fetchGit { url = file://$repo; rev = \"$rev5\"; }).outPath") [[ ! -e $path12/not-exported-file ]] [[ -e $path12/exported-wonky ]] # should fail if there is no repo -rm -rf $repo/.git -rm -rf $TEST_HOME/.cache/nix +rm -rf "$repo"/.git +rm -rf "$TEST_HOME"/.cache/nix (! nix eval --impure --raw --expr "(builtins.fetchGit \"file://$repo\").outPath") # should succeed for a repo without commits -git init $repo -git -C $repo add hello # need to add at least one file to cause the root of the repo to be visible +git init "$repo" +git -C "$repo" add hello # need to add at least one file to cause the root of the repo to be visible +# shellcheck disable=SC2034 path10=$(nix eval --impure --raw --expr "(builtins.fetchGit \"file://$repo\").outPath") # should succeed for a path with a space @@ -277,6 +280,7 @@ touch "$repo/.gitignore" git -C "$repo" add hello .gitignore git -C "$repo" commit -m 'Bla1' cd "$repo" +# shellcheck disable=SC2034 path11=$(nix eval --impure --raw --expr "(builtins.fetchGit ./.).outPath") # Test a workdir with no commits. From 1619409bf2f655248128cdb8bd1effd0f646dbae Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Thu, 25 Sep 2025 10:46:34 -0700 Subject: [PATCH 093/332] shellcheck fix: tests/functional/fetchGitRefs.sh --- maintainers/flake-module.nix | 1 - tests/functional/fetchGitRefs.sh | 7 ++++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 54d02d9f1..50574c129 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -107,7 +107,6 @@ excludes = [ # We haven't linted these files yet ''^tests/functional/dump-db\.sh$'' - ''^tests/functional/fetchGitRefs\.sh$'' ''^tests/functional/fetchGitSubmodules\.sh$'' ''^tests/functional/fetchGitVerification\.sh$'' ''^tests/functional/fetchMercurial\.sh$'' diff --git a/tests/functional/fetchGitRefs.sh b/tests/functional/fetchGitRefs.sh index 258a65525..288b26591 100755 --- a/tests/functional/fetchGitRefs.sh +++ b/tests/functional/fetchGitRefs.sh @@ -38,16 +38,16 @@ path=$(nix eval --raw --impure --expr "(builtins.fetchGit { url = $repo; ref = \ # 10. They cannot contain a \. valid_ref() { - { set +x; printf >&2 '\n>>>>>>>>>> valid_ref %s\b <<<<<<<<<<\n' $(printf %s "$1" | sed -n -e l); set -x; } + { set +x; printf >&2 '\n>>>>>>>>>> valid_ref %s\b <<<<<<<<<<\n' "$(printf %s "$1" | sed -n -e l)"; set -x; } git check-ref-format --branch "$1" >/dev/null git -C "$repo" branch "$1" master >/dev/null path1=$(nix eval --raw --impure --expr "(builtins.fetchGit { url = $repo; ref = ''$1''; }).outPath") - [[ $path1 = $path ]] + [[ $path1 = "$path" ]] git -C "$repo" branch -D "$1" >/dev/null } invalid_ref() { - { set +x; printf >&2 '\n>>>>>>>>>> invalid_ref %s\b <<<<<<<<<<\n' $(printf %s "$1" | sed -n -e l); set -x; } + { set +x; printf >&2 '\n>>>>>>>>>> invalid_ref %s\b <<<<<<<<<<\n' "$(printf %s "$1" | sed -n -e l)"; set -x; } # special case for a sole @: # --branch @ will try to interpret @ as a branch reference and not fail. Thus we need --allow-onelevel if [ "$1" = "@" ]; then @@ -68,6 +68,7 @@ valid_ref 'heads/foo@bar' valid_ref "$(printf 'heads/fu\303\237')" valid_ref 'foo-bar-baz' valid_ref 'branch#' +# shellcheck disable=SC2016 valid_ref '$1' valid_ref 'foo.locke' From ea035ae16599861b3cd05c9f86c694ddf527d279 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Thu, 25 Sep 2025 13:07:41 -0700 Subject: [PATCH 094/332] shellcheck fix: tests/tests/functional/dump-db.sh --- maintainers/flake-module.nix | 1 - 1 file changed, 1 deletion(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 50574c129..b2acc36d9 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/dump-db\.sh$'' ''^tests/functional/fetchGitSubmodules\.sh$'' ''^tests/functional/fetchGitVerification\.sh$'' ''^tests/functional/fetchMercurial\.sh$'' From 53ad2433b4263bd844c43d67fc3681983acc6fd6 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Thu, 25 Sep 2025 13:09:36 -0700 Subject: [PATCH 095/332] shellcheck fix: tests/functional/fetchGitSubmodules.sh --- maintainers/flake-module.nix | 1 - tests/functional/fetchGitSubmodules.sh | 153 +++++++++++++------------ 2 files changed, 77 insertions(+), 77 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index b2acc36d9..ba38633bc 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/fetchGitSubmodules\.sh$'' ''^tests/functional/fetchGitVerification\.sh$'' ''^tests/functional/fetchMercurial\.sh$'' ''^tests/functional/fixed\.builder1\.sh$'' diff --git a/tests/functional/fetchGitSubmodules.sh b/tests/functional/fetchGitSubmodules.sh index cd3b51674..2a25245be 100755 --- a/tests/functional/fetchGitSubmodules.sh +++ b/tests/functional/fetchGitSubmodules.sh @@ -11,7 +11,7 @@ clearStoreIfPossible rootRepo=$TEST_ROOT/gitSubmodulesRoot subRepo=$TEST_ROOT/gitSubmodulesSub -rm -rf ${rootRepo} ${subRepo} $TEST_HOME/.cache/nix +rm -rf "${rootRepo}" "${subRepo}" "$TEST_HOME"/.cache/nix # Submodules can't be fetched locally by default, which can cause # information leakage vulnerabilities, but for these tests our @@ -23,35 +23,35 @@ export XDG_CONFIG_HOME=$TEST_HOME/.config git config --global protocol.file.allow always initGitRepo() { - git init $1 - git -C $1 config user.email "foobar@example.com" - git -C $1 config user.name "Foobar" + git init "$1" + git -C "$1" config user.email "foobar@example.com" + git -C "$1" config user.name "Foobar" } addGitContent() { - echo "lorem ipsum" > $1/content - git -C $1 add content - git -C $1 commit -m "Initial commit" + echo "lorem ipsum" > "$1"/content + git -C "$1" add content + git -C "$1" commit -m "Initial commit" } -initGitRepo $subRepo -addGitContent $subRepo +initGitRepo "$subRepo" +addGitContent "$subRepo" -initGitRepo $rootRepo +initGitRepo "$rootRepo" -git -C $rootRepo submodule init -git -C $rootRepo submodule add $subRepo sub -git -C $rootRepo add sub -git -C $rootRepo commit -m "Add submodule" +git -C "$rootRepo" submodule init +git -C "$rootRepo" submodule add "$subRepo" sub +git -C "$rootRepo" add sub +git -C "$rootRepo" commit -m "Add submodule" -rev=$(git -C $rootRepo rev-parse HEAD) +rev=$(git -C "$rootRepo" rev-parse HEAD) r1=$(nix eval --raw --expr "(builtins.fetchGit { url = file://$rootRepo; rev = \"$rev\"; }).outPath") r2=$(nix eval --raw --expr "(builtins.fetchGit { url = file://$rootRepo; rev = \"$rev\"; submodules = false; }).outPath") r3=$(nix eval --raw --expr "(builtins.fetchGit { url = file://$rootRepo; rev = \"$rev\"; submodules = true; }).outPath") -[[ $r1 == $r2 ]] -[[ $r2 != $r3 ]] +[[ $r1 == "$r2" ]] +[[ $r2 != "$r3" ]] r4=$(nix eval --raw --expr "(builtins.fetchGit { url = file://$rootRepo; ref = \"master\"; rev = \"$rev\"; }).outPath") r5=$(nix eval --raw --expr "(builtins.fetchGit { url = file://$rootRepo; ref = \"master\"; rev = \"$rev\"; submodules = false; }).outPath") @@ -59,11 +59,11 @@ r6=$(nix eval --raw --expr "(builtins.fetchGit { url = file://$rootRepo; ref = \ r7=$(nix eval --raw --expr "(builtins.fetchGit { url = $rootRepo; ref = \"master\"; rev = \"$rev\"; submodules = true; }).outPath") r8=$(nix eval --raw --expr "(builtins.fetchGit { url = $rootRepo; rev = \"$rev\"; submodules = true; }).outPath") -[[ $r1 == $r4 ]] -[[ $r4 == $r5 ]] -[[ $r3 == $r6 ]] -[[ $r6 == $r7 ]] -[[ $r7 == $r8 ]] +[[ $r1 == "$r4" ]] +[[ $r4 == "$r5" ]] +[[ $r3 == "$r6" ]] +[[ $r6 == "$r7" ]] +[[ $r7 == "$r8" ]] have_submodules=$(nix eval --expr "(builtins.fetchGit { url = $rootRepo; rev = \"$rev\"; }).submodules") [[ $have_submodules == false ]] @@ -80,13 +80,13 @@ pathWithSubmodulesAgain=$(nix eval --raw --expr "(builtins.fetchGit { url = file pathWithSubmodulesAgainWithRef=$(nix eval --raw --expr "(builtins.fetchGit { url = file://$rootRepo; ref = \"master\"; rev = \"$rev\"; submodules = true; }).outPath") # The resulting store path cannot be the same. -[[ $pathWithoutSubmodules != $pathWithSubmodules ]] +[[ $pathWithoutSubmodules != "$pathWithSubmodules" ]] # Checking out the same repo with submodules returns in the same store path. -[[ $pathWithSubmodules == $pathWithSubmodulesAgain ]] +[[ $pathWithSubmodules == "$pathWithSubmodulesAgain" ]] # Checking out the same repo with submodules returns in the same store path. -[[ $pathWithSubmodulesAgain == $pathWithSubmodulesAgainWithRef ]] +[[ $pathWithSubmodulesAgain == "$pathWithSubmodulesAgainWithRef" ]] # The submodules flag is actually honored. [[ ! -e $pathWithoutSubmodules/sub/content ]] @@ -98,14 +98,14 @@ pathWithSubmodulesAgainWithRef=$(nix eval --raw --expr "(builtins.fetchGit { url test "$(find "$pathWithSubmodules" -name .git)" = "" # Git repos without submodules can be fetched with submodules = true. -subRev=$(git -C $subRepo rev-parse HEAD) +subRev=$(git -C "$subRepo" rev-parse HEAD) noSubmoduleRepoBaseline=$(nix eval --raw --expr "(builtins.fetchGit { url = file://$subRepo; rev = \"$subRev\"; }).outPath") noSubmoduleRepo=$(nix eval --raw --expr "(builtins.fetchGit { url = file://$subRepo; rev = \"$subRev\"; submodules = true; }).outPath") -[[ $noSubmoduleRepoBaseline == $noSubmoduleRepo ]] +[[ $noSubmoduleRepoBaseline == "$noSubmoduleRepo" ]] # Test .gitmodules with entries that refer to non-existent objects or objects that are not submodules. -cat >> $rootRepo/.gitmodules <> "$rootRepo"/.gitmodules <> $rootRepo/.gitmodules < $rootRepo/file -git -C $rootRepo add file -git -C $rootRepo commit -a -m "Add bad submodules" +echo foo > "$rootRepo"/file +git -C "$rootRepo" add file +git -C "$rootRepo" commit -a -m "Add bad submodules" -rev=$(git -C $rootRepo rev-parse HEAD) +rev=$(git -C "$rootRepo" rev-parse HEAD) r=$(nix eval --raw --expr "builtins.fetchGit { url = file://$rootRepo; rev = \"$rev\"; submodules = true; }") @@ -126,44 +126,44 @@ r=$(nix eval --raw --expr "builtins.fetchGit { url = file://$rootRepo; rev = \"$ [[ ! -e $r/missing ]] # Test relative submodule URLs. -rm $TEST_HOME/.cache/nix/fetcher-cache* -rm -rf $rootRepo/.git $rootRepo/.gitmodules $rootRepo/sub -initGitRepo $rootRepo -git -C $rootRepo submodule add ../gitSubmodulesSub sub -git -C $rootRepo commit -m "Add submodule" -rev2=$(git -C $rootRepo rev-parse HEAD) +rm "$TEST_HOME"/.cache/nix/fetcher-cache* +rm -rf "$rootRepo"/.git "$rootRepo"/.gitmodules "$rootRepo"/sub +initGitRepo "$rootRepo" +git -C "$rootRepo" submodule add ../gitSubmodulesSub sub +git -C "$rootRepo" commit -m "Add submodule" +rev2=$(git -C "$rootRepo" rev-parse HEAD) pathWithRelative=$(nix eval --raw --expr "(builtins.fetchGit { url = file://$rootRepo; rev = \"$rev2\"; submodules = true; }).outPath") -diff -r -x .gitmodules $pathWithSubmodules $pathWithRelative +diff -r -x .gitmodules "$pathWithSubmodules" "$pathWithRelative" # Test clones that have an upstream with relative submodule URLs. -rm $TEST_HOME/.cache/nix/fetcher-cache* +rm "$TEST_HOME"/.cache/nix/fetcher-cache* cloneRepo=$TEST_ROOT/a/b/gitSubmodulesClone # NB /a/b to make the relative path not work relative to $cloneRepo -git clone $rootRepo $cloneRepo +git clone "$rootRepo" "$cloneRepo" pathIndirect=$(nix eval --raw --expr "(builtins.fetchGit { url = file://$cloneRepo; rev = \"$rev2\"; submodules = true; }).outPath") -[[ $pathIndirect = $pathWithRelative ]] +[[ $pathIndirect = "$pathWithRelative" ]] # Test submodule export-ignore interaction -git -C $rootRepo/sub config user.email "foobar@example.com" -git -C $rootRepo/sub config user.name "Foobar" +git -C "$rootRepo"/sub config user.email "foobar@example.com" +git -C "$rootRepo"/sub config user.name "Foobar" -echo "/exclude-from-root export-ignore" >> $rootRepo/.gitattributes +echo "/exclude-from-root export-ignore" >> "$rootRepo"/.gitattributes # TBD possible semantics for submodules + exportIgnore # echo "/sub/exclude-deep export-ignore" >> $rootRepo/.gitattributes -echo nope > $rootRepo/exclude-from-root -git -C $rootRepo add .gitattributes exclude-from-root -git -C $rootRepo commit -m "Add export-ignore" +echo nope > "$rootRepo"/exclude-from-root +git -C "$rootRepo" add .gitattributes exclude-from-root +git -C "$rootRepo" commit -m "Add export-ignore" -echo "/exclude-from-sub export-ignore" >> $rootRepo/sub/.gitattributes -echo nope > $rootRepo/sub/exclude-from-sub +echo "/exclude-from-sub export-ignore" >> "$rootRepo"/sub/.gitattributes +echo nope > "$rootRepo"/sub/exclude-from-sub # TBD possible semantics for submodules + exportIgnore # echo aye > $rootRepo/sub/exclude-from-root -git -C $rootRepo/sub add .gitattributes exclude-from-sub -git -C $rootRepo/sub commit -m "Add export-ignore (sub)" +git -C "$rootRepo"/sub add .gitattributes exclude-from-sub +git -C "$rootRepo"/sub commit -m "Add export-ignore (sub)" -git -C $rootRepo add sub -git -C $rootRepo commit -m "Update submodule" +git -C "$rootRepo" add sub +git -C "$rootRepo" commit -m "Update submodule" -git -C $rootRepo status +git -C "$rootRepo" status # # TBD: not supported yet, because semantics are undecided and current implementation leaks rules from the root to submodules # # exportIgnore can be used with submodules @@ -199,39 +199,40 @@ test_submodule_nested() { local repoB=$TEST_ROOT/submodule_nested/b local repoC=$TEST_ROOT/submodule_nested/c - rm -rf $repoA $repoB $repoC $TEST_HOME/.cache/nix + rm -rf "$repoA" "$repoB" "$repoC" "$TEST_HOME"/.cache/nix - initGitRepo $repoC - touch $repoC/inside-c - git -C $repoC add inside-c - addGitContent $repoC + initGitRepo "$repoC" + touch "$repoC"/inside-c + git -C "$repoC" add inside-c + addGitContent "$repoC" - initGitRepo $repoB - git -C $repoB submodule add $repoC c - git -C $repoB add c - addGitContent $repoB + initGitRepo "$repoB" + git -C "$repoB" submodule add "$repoC" c + git -C "$repoB" add c + addGitContent "$repoB" - initGitRepo $repoA - git -C $repoA submodule add $repoB b - git -C $repoA add b - addGitContent $repoA + initGitRepo "$repoA" + git -C "$repoA" submodule add "$repoB" b + git -C "$repoA" add b + addGitContent "$repoA" # Check non-worktree fetch - local rev=$(git -C $repoA rev-parse HEAD) + local rev + rev=$(git -C "$repoA" rev-parse HEAD) out=$(nix eval --impure --raw --expr "(builtins.fetchGit { url = \"file://$repoA\"; rev = \"$rev\"; submodules = true; }).outPath") - test -e $out/b/c/inside-c - test -e $out/content - test -e $out/b/content - test -e $out/b/c/content + test -e "$out"/b/c/inside-c + test -e "$out"/content + test -e "$out"/b/content + test -e "$out"/b/c/content local nonWorktree=$out # Check worktree based fetch # TODO: make it work without git submodule update - git -C $repoA submodule update --init --recursive + git -C "$repoA" submodule update --init --recursive out=$(nix eval --impure --raw --expr "(builtins.fetchGit { url = \"file://$repoA\"; submodules = true; }).outPath") - find $out - [[ $out == $nonWorktree ]] || { find $out; false; } + find "$out" + [[ $out == "$nonWorktree" ]] || { find "$out"; false; } } test_submodule_nested From 7b3c193bd3586eb8ab9f8d7a26c1fdc5e213f514 Mon Sep 17 00:00:00 2001 From: Taeer Bar-Yam Date: Mon, 22 Sep 2025 14:38:55 -0400 Subject: [PATCH 096/332] libexpr: move eval memory allocation to own struct Co-authored-by: eldritch horrors Co-authored-by: Sergei Zimmerman See original commit on lix: https://git.lix.systems/lix-project/lix/commit/f5754dc90ae9b1207656d0e29ad2704d3ef1e554 --- src/libcmd/repl.cc | 2 +- src/libexpr-c/nix_api_value.cc | 2 +- src/libexpr/attr-set.cc | 10 +-- src/libexpr/eval.cc | 52 ++++++----- src/libexpr/include/nix/expr/attr-set.hh | 14 +-- src/libexpr/include/nix/expr/eval-inline.hh | 10 +-- src/libexpr/include/nix/expr/eval.hh | 98 +++++++++++++++------ src/libexpr/include/nix/expr/value.hh | 2 +- src/libexpr/primops.cc | 4 +- 9 files changed, 122 insertions(+), 72 deletions(-) diff --git a/src/libcmd/repl.cc b/src/libcmd/repl.cc index 01d786deb..5c6dd7ffb 100644 --- a/src/libcmd/repl.cc +++ b/src/libcmd/repl.cc @@ -760,7 +760,7 @@ void NixRepl::loadFlake(const std::string & flakeRefS) void NixRepl::initEnv() { - env = &state->allocEnv(envSize); + env = &state->mem.allocEnv(envSize); env->up = &state->baseEnv; displ = 0; staticEnv->vars.clear(); diff --git a/src/libexpr-c/nix_api_value.cc b/src/libexpr-c/nix_api_value.cc index c58d4fe89..3b8c7dd04 100644 --- a/src/libexpr-c/nix_api_value.cc +++ b/src/libexpr-c/nix_api_value.cc @@ -679,7 +679,7 @@ nix_err nix_bindings_builder_insert(nix_c_context * context, BindingsBuilder * b context->last_err_code = NIX_OK; try { auto & v = check_value_not_null(value); - nix::Symbol s = bb->builder.state.get().symbols.create(name); + nix::Symbol s = bb->builder.symbols.get().create(name); bb->builder.insert(s, &v); } NIXC_CATCH_ERRS diff --git a/src/libexpr/attr-set.cc b/src/libexpr/attr-set.cc index a1b646120..92b67f6ad 100644 --- a/src/libexpr/attr-set.cc +++ b/src/libexpr/attr-set.cc @@ -10,27 +10,27 @@ Bindings Bindings::emptyBindings; /* Allocate a new array of attributes for an attribute set with a specific capacity. The space is implicitly reserved after the Bindings structure. */ -Bindings * EvalState::allocBindings(size_t capacity) +Bindings * EvalMemory::allocBindings(size_t capacity) { if (capacity == 0) return &Bindings::emptyBindings; if (capacity > std::numeric_limits::max()) throw Error("attribute set of size %d is too big", capacity); - nrAttrsets++; - nrAttrsInAttrsets += capacity; + stats.nrAttrsets++; + stats.nrAttrsInAttrsets += capacity; return new (allocBytes(sizeof(Bindings) + sizeof(Attr) * capacity)) Bindings(); } Value & BindingsBuilder::alloc(Symbol name, PosIdx pos) { - auto value = state.get().allocValue(); + auto value = mem.get().allocValue(); bindings->push_back(Attr(name, value, pos)); return *value; } Value & BindingsBuilder::alloc(std::string_view name, PosIdx pos) { - return alloc(state.get().symbols.create(name), pos); + return alloc(symbols.get().create(name), pos); } void Bindings::sort() diff --git a/src/libexpr/eval.cc b/src/libexpr/eval.cc index f82fd93b5..6cf902e35 100644 --- a/src/libexpr/eval.cc +++ b/src/libexpr/eval.cc @@ -194,6 +194,15 @@ static Symbol getName(const AttrName & name, EvalState & state, Env & env) static constexpr size_t BASE_ENV_SIZE = 128; +EvalMemory::EvalMemory() +#if NIX_USE_BOEHMGC + : valueAllocCache(std::allocate_shared(traceable_allocator(), nullptr)) + , env1AllocCache(std::allocate_shared(traceable_allocator(), nullptr)) +#endif +{ + assertGCInitialized(); +} + EvalState::EvalState( const LookupPath & lookupPathFromArguments, ref store, @@ -274,12 +283,10 @@ EvalState::EvalState( , fileEvalCache(make_ref()) , regexCache(makeRegexCache()) #if NIX_USE_BOEHMGC - , valueAllocCache(std::allocate_shared(traceable_allocator(), nullptr)) - , env1AllocCache(std::allocate_shared(traceable_allocator(), nullptr)) - , baseEnvP(std::allocate_shared(traceable_allocator(), &allocEnv(BASE_ENV_SIZE))) + , baseEnvP(std::allocate_shared(traceable_allocator(), &mem.allocEnv(BASE_ENV_SIZE))) , baseEnv(**baseEnvP) #else - , baseEnv(allocEnv(BASE_ENV_SIZE)) + , baseEnv(mem.allocEnv(BASE_ENV_SIZE)) #endif , staticBaseEnv{std::make_shared(nullptr, nullptr)} { @@ -288,8 +295,6 @@ EvalState::EvalState( countCalls = getEnv("NIX_COUNT_CALLS").value_or("0") != "0"; - assertGCInitialized(); - static_assert(sizeof(Env) <= 16, "environment must be <= 16 bytes"); static_assert(sizeof(Counter) == 64, "counters must be 64 bytes"); @@ -885,11 +890,10 @@ inline Value * EvalState::lookupVar(Env * env, const ExprVar & var, bool noEval) } } -ListBuilder::ListBuilder(EvalState & state, size_t size) +ListBuilder::ListBuilder(size_t size) : size(size) , elems(size <= 2 ? inlineElems : (Value **) allocBytes(size * sizeof(Value *))) { - state.nrListElems += size; } Value * EvalState::getBool(bool b) @@ -1183,7 +1187,7 @@ void ExprPath::eval(EvalState & state, Env & env, Value & v) Env * ExprAttrs::buildInheritFromEnv(EvalState & state, Env & up) { - Env & inheritEnv = state.allocEnv(inheritFromExprs->size()); + Env & inheritEnv = state.mem.allocEnv(inheritFromExprs->size()); inheritEnv.up = &up; Displacement displ = 0; @@ -1202,7 +1206,7 @@ void ExprAttrs::eval(EvalState & state, Env & env, Value & v) if (recursive) { /* Create a new environment that contains the attributes in this `rec'. */ - Env & env2(state.allocEnv(attrs.size())); + Env & env2(state.mem.allocEnv(attrs.size())); env2.up = &env; dynamicEnv = &env2; Env * inheritEnv = inheritFromExprs ? buildInheritFromEnv(state, env2) : nullptr; @@ -1294,7 +1298,7 @@ void ExprLet::eval(EvalState & state, Env & env, Value & v) { /* Create a new environment that contains the attributes in this `let'. */ - Env & env2(state.allocEnv(attrs->attrs.size())); + Env & env2(state.mem.allocEnv(attrs->attrs.size())); env2.up = &env; Env * inheritEnv = attrs->inheritFromExprs ? attrs->buildInheritFromEnv(state, env2) : nullptr; @@ -1500,7 +1504,7 @@ void EvalState::callFunction(Value & fun, std::span args, Value & vRes, ExprLambda & lambda(*vCur.lambda().fun); auto size = (!lambda.arg ? 0 : 1) + (lambda.hasFormals() ? lambda.formals->formals.size() : 0); - Env & env2(allocEnv(size)); + Env & env2(mem.allocEnv(size)); env2.up = vCur.lambda().env; Displacement displ = 0; @@ -1789,7 +1793,7 @@ https://nix.dev/manual/nix/stable/language/syntax.html#functions.)", void ExprWith::eval(EvalState & state, Env & env, Value & v) { - Env & env2(state.allocEnv(1)); + Env & env2(state.mem.allocEnv(1)); env2.up = &env; env2.values[0] = attrs->maybeThunk(state, env); @@ -2916,10 +2920,12 @@ void EvalState::printStatistics() std::chrono::microseconds cpuTimeDuration = getCpuUserTime(); float cpuTime = std::chrono::duration_cast>(cpuTimeDuration).count(); - uint64_t bEnvs = nrEnvs * sizeof(Env) + nrValuesInEnvs * sizeof(Value *); - uint64_t bLists = nrListElems * sizeof(Value *); - uint64_t bValues = nrValues * sizeof(Value); - uint64_t bAttrsets = nrAttrsets * sizeof(Bindings) + nrAttrsInAttrsets * sizeof(Attr); + auto & memstats = mem.getStats(); + + uint64_t bEnvs = memstats.nrEnvs * sizeof(Env) + memstats.nrValuesInEnvs * sizeof(Value *); + uint64_t bLists = memstats.nrListElems * sizeof(Value *); + uint64_t bValues = memstats.nrValues * sizeof(Value); + uint64_t bAttrsets = memstats.nrAttrsets * sizeof(Bindings) + memstats.nrAttrsInAttrsets * sizeof(Attr); #if NIX_USE_BOEHMGC GC_word heapSize, totalBytes; @@ -2945,18 +2951,18 @@ void EvalState::printStatistics() #endif }; topObj["envs"] = { - {"number", nrEnvs.load()}, - {"elements", nrValuesInEnvs.load()}, + {"number", memstats.nrEnvs.load()}, + {"elements", memstats.nrValuesInEnvs.load()}, {"bytes", bEnvs}, }; topObj["nrExprs"] = Expr::nrExprs.load(); topObj["list"] = { - {"elements", nrListElems.load()}, + {"elements", memstats.nrListElems.load()}, {"bytes", bLists}, {"concats", nrListConcats.load()}, }; topObj["values"] = { - {"number", nrValues.load()}, + {"number", memstats.nrValues.load()}, {"bytes", bValues}, }; topObj["symbols"] = { @@ -2964,9 +2970,9 @@ void EvalState::printStatistics() {"bytes", symbols.totalSize()}, }; topObj["sets"] = { - {"number", nrAttrsets.load()}, + {"number", memstats.nrAttrsets.load()}, {"bytes", bAttrsets}, - {"elements", nrAttrsInAttrsets.load()}, + {"elements", memstats.nrAttrsInAttrsets.load()}, }; topObj["sizes"] = { {"Env", sizeof(Env)}, diff --git a/src/libexpr/include/nix/expr/attr-set.hh b/src/libexpr/include/nix/expr/attr-set.hh index 52ce958ce..46eecd9bd 100644 --- a/src/libexpr/include/nix/expr/attr-set.hh +++ b/src/libexpr/include/nix/expr/attr-set.hh @@ -13,7 +13,7 @@ namespace nix { -class EvalState; +class EvalMemory; struct Value; /** @@ -426,7 +426,7 @@ public: return res; } - friend class EvalState; + friend class EvalMemory; }; static_assert(std::forward_iterator); @@ -448,12 +448,13 @@ private: Bindings * bindings; Bindings::size_type capacity_; - friend class EvalState; + friend class EvalMemory; - BindingsBuilder(EvalState & state, Bindings * bindings, size_type capacity) + BindingsBuilder(EvalMemory & mem, SymbolTable & symbols, Bindings * bindings, size_type capacity) : bindings(bindings) , capacity_(capacity) - , state(state) + , mem(mem) + , symbols(symbols) { } @@ -471,7 +472,8 @@ private: } public: - std::reference_wrapper state; + std::reference_wrapper mem; + std::reference_wrapper symbols; void insert(Symbol name, Value * value, PosIdx pos = noPos) { diff --git a/src/libexpr/include/nix/expr/eval-inline.hh b/src/libexpr/include/nix/expr/eval-inline.hh index 749e51537..1320da914 100644 --- a/src/libexpr/include/nix/expr/eval-inline.hh +++ b/src/libexpr/include/nix/expr/eval-inline.hh @@ -26,7 +26,7 @@ inline void * allocBytes(size_t n) } [[gnu::always_inline]] -Value * EvalState::allocValue() +Value * EvalMemory::allocValue() { #if NIX_USE_BOEHMGC /* We use the boehm batch allocator to speed up allocations of Values (of which there are many). @@ -48,15 +48,15 @@ Value * EvalState::allocValue() void * p = allocBytes(sizeof(Value)); #endif - nrValues++; + stats.nrValues++; return (Value *) p; } [[gnu::always_inline]] -Env & EvalState::allocEnv(size_t size) +Env & EvalMemory::allocEnv(size_t size) { - nrEnvs++; - nrValuesInEnvs += size; + stats.nrEnvs++; + stats.nrValuesInEnvs += size; Env * env; diff --git a/src/libexpr/include/nix/expr/eval.hh b/src/libexpr/include/nix/expr/eval.hh index e5b87cc97..f61dab3a8 100644 --- a/src/libexpr/include/nix/expr/eval.hh +++ b/src/libexpr/include/nix/expr/eval.hh @@ -302,6 +302,63 @@ struct StaticEvalSymbols } }; +class EvalMemory +{ +#if NIX_USE_BOEHMGC + /** + * Allocation cache for GC'd Value objects. + */ + std::shared_ptr valueAllocCache; + + /** + * Allocation cache for size-1 Env objects. + */ + std::shared_ptr env1AllocCache; +#endif + +public: + struct Statistics + { + Counter nrEnvs; + Counter nrValuesInEnvs; + Counter nrValues; + Counter nrAttrsets; + Counter nrAttrsInAttrsets; + Counter nrListElems; + }; + + EvalMemory(); + + EvalMemory(const EvalMemory &) = delete; + EvalMemory(EvalMemory &&) = delete; + EvalMemory & operator=(const EvalMemory &) = delete; + EvalMemory & operator=(EvalMemory &&) = delete; + + inline Value * allocValue(); + inline Env & allocEnv(size_t size); + + Bindings * allocBindings(size_t capacity); + + BindingsBuilder buildBindings(SymbolTable & symbols, size_t capacity) + { + return BindingsBuilder(*this, symbols, allocBindings(capacity), capacity); + } + + ListBuilder buildList(size_t size) + { + stats.nrListElems += size; + return ListBuilder(size); + } + + const Statistics & getStats() const & + { + return stats; + } + +private: + Statistics stats; +}; + class EvalState : public std::enable_shared_from_this { public: @@ -312,6 +369,8 @@ public: SymbolTable symbols; PosTable positions; + EvalMemory mem; + /** * If set, force copying files to the Nix store even if they * already exist there. @@ -441,18 +500,6 @@ private: */ std::shared_ptr regexCache; -#if NIX_USE_BOEHMGC - /** - * Allocation cache for GC'd Value objects. - */ - std::shared_ptr valueAllocCache; - - /** - * Allocation cache for size-1 Env objects. - */ - std::shared_ptr env1AllocCache; -#endif - public: EvalState( @@ -463,6 +510,15 @@ public: std::shared_ptr buildStore = nullptr); ~EvalState(); + /** + * A wrapper around EvalMemory::allocValue() to avoid code churn when it + * was introduced. + */ + inline Value * allocValue() + { + return mem.allocValue(); + } + LookupPath getLookupPath() { return lookupPath; @@ -834,22 +890,14 @@ public: */ void autoCallFunction(const Bindings & args, Value & fun, Value & res); - /** - * Allocation primitives. - */ - inline Value * allocValue(); - inline Env & allocEnv(size_t size); - - Bindings * allocBindings(size_t capacity); - BindingsBuilder buildBindings(size_t capacity) { - return BindingsBuilder(*this, allocBindings(capacity), capacity); + return mem.buildBindings(symbols, capacity); } ListBuilder buildList(size_t size) { - return ListBuilder(*this, size); + return mem.buildList(size); } /** @@ -966,13 +1014,7 @@ private: */ std::string mkSingleDerivedPathStringRaw(const SingleDerivedPath & p); - Counter nrEnvs; - Counter nrValuesInEnvs; - Counter nrValues; - Counter nrListElems; Counter nrLookups; - Counter nrAttrsets; - Counter nrAttrsInAttrsets; Counter nrAvoided; Counter nrOpUpdates; Counter nrOpUpdateValuesCopied; diff --git a/src/libexpr/include/nix/expr/value.hh b/src/libexpr/include/nix/expr/value.hh index e526fcde0..22d85dc99 100644 --- a/src/libexpr/include/nix/expr/value.hh +++ b/src/libexpr/include/nix/expr/value.hh @@ -155,7 +155,7 @@ class ListBuilder Value * inlineElems[2] = {nullptr, nullptr}; public: Value ** elems; - ListBuilder(EvalState & state, size_t size); + ListBuilder(size_t size); // NOTE: Can be noexcept because we are just copying integral values and // raw pointers. diff --git a/src/libexpr/primops.cc b/src/libexpr/primops.cc index a046a2c28..a8ac8d159 100644 --- a/src/libexpr/primops.cc +++ b/src/libexpr/primops.cc @@ -262,7 +262,7 @@ static void scopedImport(EvalState & state, const PosIdx pos, SourcePath & path, { state.forceAttrs(*vScope, pos, "while evaluating the first argument passed to builtins.scopedImport"); - Env * env = &state.allocEnv(vScope->attrs()->size()); + Env * env = &state.mem.allocEnv(vScope->attrs()->size()); env->up = &state.baseEnv; auto staticEnv = std::make_shared(nullptr, state.staticBaseEnv, vScope->attrs()->size()); @@ -3161,7 +3161,7 @@ static void prim_listToAttrs(EvalState & state, const PosIdx pos, Value ** args, // Step 1. Sort the name-value attrsets in place using the memory we allocate for the result auto listView = args[0]->listView(); size_t listSize = listView.size(); - auto & bindings = *state.allocBindings(listSize); + auto & bindings = *state.mem.allocBindings(listSize); using ElemPtr = decltype(&bindings[0].value); for (const auto & [n, v2] : enumerate(listView)) { From ff82de86da4308b3a79b1c1d1bcb5f33edef066d Mon Sep 17 00:00:00 2001 From: Seth Flynn Date: Fri, 26 Sep 2025 02:03:50 -0400 Subject: [PATCH 097/332] nix-cli: use pure/restricted eval for help pages This avoids any complications that can arise from the environment affecting evaluation of the help pages (which don't need to be calling out to anything external anyways) A recent example of one of these problems is https://github.com/NixOS/nix/issues/14085, which would break help pages by causing them to make invalid calls to the dummy store they're evaluated with Fixes: https://github.com/NixOS/nix/issues/14062 Co-authored-by: Sergei Zimmerman --- src/nix/main.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/nix/main.cc b/src/nix/main.cc index a6077f5e9..ed889a189 100644 --- a/src/nix/main.cc +++ b/src/nix/main.cc @@ -226,8 +226,8 @@ static void showHelp(std::vector subcommand, NixArgs & toplevel) auto mdName = subcommand.empty() ? "nix" : fmt("nix3-%s", concatStringsSep("-", subcommand)); - evalSettings.restrictEval = false; - evalSettings.pureEval = false; + evalSettings.restrictEval = true; + evalSettings.pureEval = true; EvalState state({}, openStore("dummy://"), fetchSettings, evalSettings); auto vGenerateManpage = state.allocValue(); From eec4dece33c23a2d8ae66e051ec0de2387a8b059 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 10 Sep 2025 17:40:54 +0200 Subject: [PATCH 098/332] Args::Flag: Add required attribute --- src/libutil/args.cc | 22 +++++++++++++++++++++- src/libutil/include/nix/util/args.hh | 8 ++++++++ src/nix/sigs.cc | 8 +++----- 3 files changed, 32 insertions(+), 6 deletions(-) diff --git a/src/libutil/args.cc b/src/libutil/args.cc index f4309473b..05b5a25c7 100644 --- a/src/libutil/args.cc +++ b/src/libutil/args.cc @@ -318,6 +318,7 @@ void RootArgs::parseCmdline(const Strings & _cmdline, bool allowShebang) } catch (SystemError &) { } } + for (auto pos = cmdline.begin(); pos != cmdline.end();) { auto arg = *pos; @@ -354,6 +355,9 @@ void RootArgs::parseCmdline(const Strings & _cmdline, bool allowShebang) processArgs(pendingArgs, true); + if (!completions) + checkArgs(); + initialFlagsProcessed(); /* Now that we are done parsing, make sure that any experimental @@ -384,7 +388,7 @@ bool Args::processFlag(Strings::iterator & pos, Strings::iterator end) auto & rootArgs = getRoot(); - auto process = [&](const std::string & name, const Flag & flag) -> bool { + auto process = [&](const std::string & name, Flag & flag) -> bool { ++pos; if (auto & f = flag.experimentalFeature) @@ -413,6 +417,7 @@ bool Args::processFlag(Strings::iterator & pos, Strings::iterator end) } if (!anyCompleted) flag.handler.fun(std::move(args)); + flag.timesUsed++; return true; }; @@ -504,6 +509,14 @@ bool Args::processArgs(const Strings & args, bool finish) return res; } +void Args::checkArgs() +{ + for (auto & [name, flag] : longFlags) { + if (flag->required && flag->timesUsed == 0) + throw UsageError("required argument '--%s' is missing", name); + } +} + nlohmann::json Args::toJSON() { auto flags = nlohmann::json::object(); @@ -643,6 +656,13 @@ bool MultiCommand::processArgs(const Strings & args, bool finish) return Args::processArgs(args, finish); } +void MultiCommand::checkArgs() +{ + Args::checkArgs(); + if (command) + command->second->checkArgs(); +} + nlohmann::json MultiCommand::toJSON() { auto cmds = nlohmann::json::object(); diff --git a/src/libutil/include/nix/util/args.hh b/src/libutil/include/nix/util/args.hh index 443db445f..99f6e23e8 100644 --- a/src/libutil/include/nix/util/args.hh +++ b/src/libutil/include/nix/util/args.hh @@ -202,8 +202,12 @@ public: Strings labels; Handler handler; CompleterClosure completer; + bool required = false; std::optional experimentalFeature; + + // FIXME: this should be private, but that breaks designated initializers. + size_t timesUsed = 0; }; protected: @@ -283,6 +287,8 @@ protected: StringSet hiddenCategories; + virtual void checkArgs(); + /** * Called after all command line flags before the first non-flag * argument (if any) have been processed. @@ -428,6 +434,8 @@ public: protected: std::string commandName = ""; bool aliasUsed = false; + + void checkArgs() override; }; Strings argvToStrings(int argc, char ** argv); diff --git a/src/nix/sigs.cc b/src/nix/sigs.cc index 92bb00500..470cd3951 100644 --- a/src/nix/sigs.cc +++ b/src/nix/sigs.cc @@ -144,7 +144,7 @@ static auto rCmdSign = registerCommand2({"store", "sign"}); struct CmdKeyGenerateSecret : Command { - std::optional keyName; + std::string keyName; CmdKeyGenerateSecret() { @@ -153,6 +153,7 @@ struct CmdKeyGenerateSecret : Command .description = "Identifier of the key (e.g. `cache.example.org-1`).", .labels = {"name"}, .handler = {&keyName}, + .required = true, }); } @@ -170,11 +171,8 @@ struct CmdKeyGenerateSecret : Command void run() override { - if (!keyName) - throw UsageError("required argument '--key-name' is missing"); - logger->stop(); - writeFull(getStandardOutput(), SecretKey::generate(*keyName).to_string()); + writeFull(getStandardOutput(), SecretKey::generate(keyName).to_string()); } }; From bb6a4dccdf2c828fda29ce34e55757b9a59b11f2 Mon Sep 17 00:00:00 2001 From: Tristan Ross Date: Tue, 23 Sep 2025 12:53:43 -0700 Subject: [PATCH 099/332] libutil-c: add nix_set_verbosity function --- src/libutil-c/nix_api_util.cc | 12 ++++++++++++ src/libutil-c/nix_api_util.h | 26 ++++++++++++++++++++++++++ 2 files changed, 38 insertions(+) diff --git a/src/libutil-c/nix_api_util.cc b/src/libutil-c/nix_api_util.cc index a43e7103b..3903823aa 100644 --- a/src/libutil-c/nix_api_util.cc +++ b/src/libutil-c/nix_api_util.cc @@ -159,4 +159,16 @@ nix_err call_nix_get_string_callback(const std::string str, nix_get_string_callb return NIX_OK; } +nix_err nix_set_verbosity(nix_c_context * context, nix_verbosity level) +{ + if (context) + context->last_err_code = NIX_OK; + if (level > NIX_LVL_VOMIT || level < NIX_LVL_ERROR) + return nix_set_err_msg(context, NIX_ERR_UNKNOWN, "Invalid verbosity level"); + try { + nix::verbosity = static_cast(level); + } + NIXC_CATCH_ERRS +} + } // extern "C" diff --git a/src/libutil-c/nix_api_util.h b/src/libutil-c/nix_api_util.h index 5f42641d4..608b463c0 100644 --- a/src/libutil-c/nix_api_util.h +++ b/src/libutil-c/nix_api_util.h @@ -102,6 +102,24 @@ enum nix_err { typedef enum nix_err nix_err; +/** + * @brief Verbosity level + * + * @note This should be kept in sync with the C++ implementation (nix::Verbosity) + */ +enum nix_verbosity { + NIX_LVL_ERROR = 0, + NIX_LVL_WARN, + NIX_LVL_NOTICE, + NIX_LVL_INFO, + NIX_LVL_TALKATIVE, + NIX_LVL_CHATTY, + NIX_LVL_DEBUG, + NIX_LVL_VOMIT, +}; + +typedef enum nix_verbosity nix_verbosity; + /** * @brief This object stores error state. * @struct nix_c_context @@ -316,6 +334,14 @@ nix_err nix_set_err_msg(nix_c_context * context, nix_err err, const char * msg); */ void nix_clear_err(nix_c_context * context); +/** + * @brief Sets the verbosity level + * + * @param[out] context Optional, additional error context. + * @param[in] level Verbosity level + */ +nix_err nix_set_verbosity(nix_c_context * context, nix_verbosity level); + /** * @} */ From c4c35243180c7ed55c115bb9258aeb412572cb61 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Thu, 25 Sep 2025 13:11:07 -0700 Subject: [PATCH 100/332] shellcheck fix: tests/functional/fetchGitVerification.sh --- maintainers/flake-module.nix | 1 - tests/functional/fetchGitVerification.sh | 25 ++++++++++++------------ 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index ba38633bc..d303cb213 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/fetchGitVerification\.sh$'' ''^tests/functional/fetchMercurial\.sh$'' ''^tests/functional/fixed\.builder1\.sh$'' ''^tests/functional/fixed\.builder2\.sh$'' diff --git a/tests/functional/fetchGitVerification.sh b/tests/functional/fetchGitVerification.sh index 4012d8229..79c78d0c9 100755 --- a/tests/functional/fetchGitVerification.sh +++ b/tests/functional/fetchGitVerification.sh @@ -21,29 +21,29 @@ ssh-keygen -f "$keysDir/testkey2" -t rsa -P "" -C "test key 2" key2File="$keysDir/testkey2.pub" publicKey2=$(awk '{print $2}' "$key2File") -git init $repo -git -C $repo config user.email "foobar@example.com" -git -C $repo config user.name "Foobar" -git -C $repo config gpg.format ssh +git init "$repo" +git -C "$repo" config user.email "foobar@example.com" +git -C "$repo" config user.name "Foobar" +git -C "$repo" config gpg.format ssh -echo 'hello' > $repo/text -git -C $repo add text -git -C $repo -c "user.signingkey=$key1File" commit -S -m 'initial commit' +echo 'hello' > "$repo"/text +git -C "$repo" add text +git -C "$repo" -c "user.signingkey=$key1File" commit -S -m 'initial commit' out=$(nix eval --impure --raw --expr "builtins.fetchGit { url = \"file://$repo\"; keytype = \"ssh-rsa\"; publicKey = \"$publicKey2\"; }" 2>&1) || status=$? [[ $status == 1 ]] -[[ $out =~ 'No principal matched.' ]] +[[ $out == *'No principal matched.'* ]] [[ $(nix eval --impure --raw --expr "builtins.readFile (builtins.fetchGit { url = \"file://$repo\"; publicKey = \"$publicKey1\"; } + \"/text\")") = 'hello' ]] -echo 'hello world' > $repo/text +echo 'hello world' > "$repo"/text # Verification on a dirty repo should fail. out=$(nix eval --impure --raw --expr "builtins.fetchGit { url = \"file://$repo\"; keytype = \"ssh-rsa\"; publicKey = \"$publicKey2\"; }" 2>&1) || status=$? [[ $status == 1 ]] [[ $out =~ 'dirty' ]] -git -C $repo add text -git -C $repo -c "user.signingkey=$key2File" commit -S -m 'second commit' +git -C "$repo" add text +git -C "$repo" -c "user.signingkey=$key2File" commit -S -m 'second commit' [[ $(nix eval --impure --raw --expr "builtins.readFile (builtins.fetchGit { url = \"file://$repo\"; publicKeys = [{key = \"$publicKey1\";} {type = \"ssh-rsa\"; key = \"$publicKey2\";}]; } + \"/text\")") = 'hello world' ]] @@ -80,5 +80,6 @@ cat > "$flakeDir/flake.nix" <&1) || status=$? + [[ $status == 1 ]] -[[ $out =~ 'No principal matched.' ]] +[[ $out == *'No principal matched.'* ]] From 4cec876319d84e6033a184ef6fb81d9d6fac265e Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Thu, 25 Sep 2025 13:13:38 -0700 Subject: [PATCH 101/332] shellcheck fix: tests/functional/fetchMercurial.sh --- maintainers/flake-module.nix | 1 - tests/functional/fetchMercurial.sh | 85 +++++++++++++++--------------- 2 files changed, 43 insertions(+), 43 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index d303cb213..3470f853f 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/fetchMercurial\.sh$'' ''^tests/functional/fixed\.builder1\.sh$'' ''^tests/functional/fixed\.builder2\.sh$'' ''^tests/functional/fixed\.sh$'' diff --git a/tests/functional/fetchMercurial.sh b/tests/functional/fetchMercurial.sh index 6de192865..6293fb76a 100755 --- a/tests/functional/fetchMercurial.sh +++ b/tests/functional/fetchMercurial.sh @@ -12,34 +12,35 @@ clearStore # See https://github.com/NixOS/nix/issues/6195 repo=$TEST_ROOT/./hg -rm -rf $repo ${repo}-tmp $TEST_HOME/.cache/nix +rm -rf "$repo" "${repo}"-tmp "$TEST_HOME"/.cache/nix -hg init $repo -echo '[ui]' >> $repo/.hg/hgrc -echo 'username = Foobar ' >> $repo/.hg/hgrc +hg init "$repo" +{ + echo '[ui]' + echo 'username = Foobar ' + # Set ui.tweakdefaults to ensure HGPLAIN is being set. + echo 'tweakdefaults = True' +} >> "$repo"/.hg/hgrc -# Set ui.tweakdefaults to ensure HGPLAIN is being set. -echo 'tweakdefaults = True' >> $repo/.hg/hgrc +echo utrecht > "$repo"/hello +touch "$repo"/.hgignore +hg add --cwd "$repo" hello .hgignore +hg commit --cwd "$repo" -m 'Bla1' +rev1=$(hg log --cwd "$repo" -r tip --template '{node}') -echo utrecht > $repo/hello -touch $repo/.hgignore -hg add --cwd $repo hello .hgignore -hg commit --cwd $repo -m 'Bla1' -rev1=$(hg log --cwd $repo -r tip --template '{node}') - -echo world > $repo/hello -hg commit --cwd $repo -m 'Bla2' -rev2=$(hg log --cwd $repo -r tip --template '{node}') +echo world > "$repo"/hello +hg commit --cwd "$repo" -m 'Bla2' +rev2=$(hg log --cwd "$repo" -r tip --template '{node}') # Fetch an unclean branch. -echo unclean > $repo/hello +echo unclean > "$repo"/hello path=$(nix eval --impure --raw --expr "(builtins.fetchMercurial file://$repo).outPath") -[[ $(cat $path/hello) = unclean ]] -hg revert --cwd $repo --all +[[ $(cat "$path"/hello) = unclean ]] +hg revert --cwd "$repo" --all # Fetch the default branch. path=$(nix eval --impure --raw --expr "(builtins.fetchMercurial file://$repo).outPath") -[[ $(cat $path/hello) = world ]] +[[ $(cat "$path"/hello) = world ]] # In pure eval mode, fetchGit without a revision should fail. [[ $(nix eval --impure --raw --expr "(builtins.readFile (fetchMercurial file://$repo + \"/hello\"))") = world ]] @@ -47,64 +48,64 @@ path=$(nix eval --impure --raw --expr "(builtins.fetchMercurial file://$repo).ou # Fetch using an explicit revision hash. path2=$(nix eval --impure --raw --expr "(builtins.fetchMercurial { url = file://$repo; rev = \"$rev2\"; }).outPath") -[[ $path = $path2 ]] +[[ $path = "$path2" ]] # In pure eval mode, fetchGit with a revision should succeed. [[ $(nix eval --raw --expr "builtins.readFile (fetchMercurial { url = file://$repo; rev = \"$rev2\"; } + \"/hello\")") = world ]] # Fetch again. This should be cached. -mv $repo ${repo}-tmp +mv "$repo" "${repo}"-tmp path2=$(nix eval --impure --raw --expr "(builtins.fetchMercurial file://$repo).outPath") -[[ $path = $path2 ]] +[[ $path = "$path2" ]] [[ $(nix eval --impure --raw --expr "(builtins.fetchMercurial file://$repo).branch") = default ]] [[ $(nix eval --impure --expr "(builtins.fetchMercurial file://$repo).revCount") = 1 ]] -[[ $(nix eval --impure --raw --expr "(builtins.fetchMercurial file://$repo).rev") = $rev2 ]] +[[ $(nix eval --impure --raw --expr "(builtins.fetchMercurial file://$repo).rev") = "$rev2" ]] # But with TTL 0, it should fail. (! nix eval --impure --refresh --expr "builtins.fetchMercurial file://$repo") # Fetching with a explicit hash should succeed. path2=$(nix eval --refresh --raw --expr "(builtins.fetchMercurial { url = file://$repo; rev = \"$rev2\"; }).outPath") -[[ $path = $path2 ]] +[[ $path = "$path2" ]] path2=$(nix eval --refresh --raw --expr "(builtins.fetchMercurial { url = file://$repo; rev = \"$rev1\"; }).outPath") -[[ $(cat $path2/hello) = utrecht ]] +[[ $(cat "$path2"/hello) = utrecht ]] -mv ${repo}-tmp $repo +mv "${repo}"-tmp "$repo" # Using a clean working tree should produce the same result. path2=$(nix eval --impure --raw --expr "(builtins.fetchMercurial $repo).outPath") -[[ $path = $path2 ]] +[[ $path = "$path2" ]] # Using an unclean tree should yield the tracked but uncommitted changes. -mkdir $repo/dir1 $repo/dir2 -echo foo > $repo/dir1/foo -echo bar > $repo/bar -echo bar > $repo/dir2/bar -hg add --cwd $repo dir1/foo -hg rm --cwd $repo hello +mkdir "$repo"/dir1 "$repo"/dir2 +echo foo > "$repo"/dir1/foo +echo bar > "$repo"/bar +echo bar > "$repo"/dir2/bar +hg add --cwd "$repo" dir1/foo +hg rm --cwd "$repo" hello path2=$(nix eval --impure --raw --expr "(builtins.fetchMercurial $repo).outPath") -[ ! -e $path2/hello ] -[ ! -e $path2/bar ] -[ ! -e $path2/dir2/bar ] -[ ! -e $path2/.hg ] -[[ $(cat $path2/dir1/foo) = foo ]] +[ ! -e "$path2"/hello ] +[ ! -e "$path2"/bar ] +[ ! -e "$path2"/dir2/bar ] +[ ! -e "$path2"/.hg ] +[[ $(cat "$path2"/dir1/foo) = foo ]] [[ $(nix eval --impure --raw --expr "(builtins.fetchMercurial $repo).rev") = 0000000000000000000000000000000000000000 ]] # ... unless we're using an explicit ref. path3=$(nix eval --impure --raw --expr "(builtins.fetchMercurial { url = $repo; rev = \"default\"; }).outPath") -[[ $path = $path3 ]] +[[ $path = "$path3" ]] # Committing should not affect the store path. -hg commit --cwd $repo -m 'Bla3' +hg commit --cwd "$repo" -m 'Bla3' path4=$(nix eval --impure --refresh --raw --expr "(builtins.fetchMercurial file://$repo).outPath") -[[ $path2 = $path4 ]] +[[ $path2 = "$path4" ]] -echo paris > $repo/hello +echo paris > "$repo"/hello # Passing a `name` argument should be reflected in the output path path5=$(nix eval -vvvvv --impure --refresh --raw --expr "(builtins.fetchMercurial { url = \"file://$repo\"; name = \"foo\"; } ).outPath") From f8e351cd945f4ae444b57c3917b3648af3b0c709 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Thu, 25 Sep 2025 13:16:47 -0700 Subject: [PATCH 102/332] shellcheck fix: tests/functional/fixed --- maintainers/flake-module.nix | 3 --- tests/functional/fixed.builder1.sh | 4 +++- tests/functional/fixed.builder2.sh | 13 ++++++++----- tests/functional/fixed.sh | 20 ++++++++++---------- 4 files changed, 21 insertions(+), 19 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 3470f853f..0742d2a6a 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,9 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/fixed\.builder1\.sh$'' - ''^tests/functional/fixed\.builder2\.sh$'' - ''^tests/functional/fixed\.sh$'' ''^tests/functional/flakes/absolute-paths\.sh$'' ''^tests/functional/flakes/check\.sh$'' ''^tests/functional/flakes/config\.sh$'' diff --git a/tests/functional/fixed.builder1.sh b/tests/functional/fixed.builder1.sh index c41bb2b9a..172f65e6b 100644 --- a/tests/functional/fixed.builder1.sh +++ b/tests/functional/fixed.builder1.sh @@ -1,3 +1,5 @@ +# shellcheck shell=bash if test "$IMPURE_VAR1" != "foo"; then exit 1; fi if test "$IMPURE_VAR2" != "bar"; then exit 1; fi -echo "Hello World!" > $out +# shellcheck disable=SC2154 +echo "Hello World!" > "$out" diff --git a/tests/functional/fixed.builder2.sh b/tests/functional/fixed.builder2.sh index 31ea1579a..9fbcf022e 100644 --- a/tests/functional/fixed.builder2.sh +++ b/tests/functional/fixed.builder2.sh @@ -1,6 +1,9 @@ -echo dummy: $dummy +# shellcheck shell=bash +# shellcheck disable=SC2154 +echo dummy: "$dummy" if test -n "$dummy"; then sleep 2; fi -mkdir $out -mkdir $out/bla -echo "Hello World!" > $out/foo -ln -s foo $out/bar +# shellcheck disable=SC2154 +mkdir "$out" +mkdir "$out"/bla +echo "Hello World!" > "$out"/foo +ln -s foo "$out"/bar diff --git a/tests/functional/fixed.sh b/tests/functional/fixed.sh index d98769e64..edf6f88d4 100755 --- a/tests/functional/fixed.sh +++ b/tests/functional/fixed.sh @@ -6,7 +6,7 @@ TODO_NixOS clearStore -path=$(nix-store -q $(nix-instantiate fixed.nix -A good.0)) +path=$(nix-store -q "$(nix-instantiate fixed.nix -A good.0)") echo 'testing bad...' nix-build fixed.nix -A bad --no-out-link && fail "should fail" @@ -14,7 +14,7 @@ nix-build fixed.nix -A bad --no-out-link && fail "should fail" # Building with the bad hash should produce the "good" output path as # a side-effect. [[ -e $path ]] -nix path-info --json $path | grep fixed:md5:2qk15sxzzjlnpjk9brn7j8ppcd +nix path-info --json "$path" | grep fixed:md5:2qk15sxzzjlnpjk9brn7j8ppcd echo 'testing good...' nix-build fixed.nix -A good --no-out-link @@ -37,7 +37,7 @@ fi # While we're at it, check attribute selection a bit more. echo 'testing attribute selection...' -test $(nix-instantiate fixed.nix -A good.1 | wc -l) = 1 +test "$(nix-instantiate fixed.nix -A good.1 | wc -l)" = 1 # Test parallel builds of derivations that produce the same output. # Only one should run at the same time. @@ -51,16 +51,16 @@ echo 'testing sameAsAdd...' out=$(nix-build fixed.nix -A sameAsAdd --no-out-link) # This is what fixed.builder2 produces... -rm -rf $TEST_ROOT/fixed -mkdir $TEST_ROOT/fixed -mkdir $TEST_ROOT/fixed/bla -echo "Hello World!" > $TEST_ROOT/fixed/foo -ln -s foo $TEST_ROOT/fixed/bar +rm -rf "$TEST_ROOT"/fixed +mkdir "$TEST_ROOT"/fixed +mkdir "$TEST_ROOT"/fixed/bla +echo "Hello World!" > "$TEST_ROOT"/fixed/foo +ln -s foo "$TEST_ROOT"/fixed/bar -out2=$(nix-store --add $TEST_ROOT/fixed) +out2=$(nix-store --add "$TEST_ROOT"/fixed) [ "$out" = "$out2" ] -out3=$(nix-store --add-fixed --recursive sha256 $TEST_ROOT/fixed) +out3=$(nix-store --add-fixed --recursive sha256 "$TEST_ROOT"/fixed) [ "$out" = "$out3" ] out4=$(nix-store --print-fixed-path --recursive sha256 "1ixr6yd3297ciyp9im522dfxpqbkhcw0pylkb2aab915278fqaik" fixed) From 8839bab84d97acc8531bf44399b5ca1269dd6f21 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Thu, 25 Sep 2025 13:20:00 -0700 Subject: [PATCH 103/332] shellcheck fix: completion files --- maintainers/flake-module.nix | 4 ---- misc/fish/completion.fish | 1 + misc/zsh/completion.zsh | 1 + 3 files changed, 2 insertions(+), 4 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 0742d2a6a..3de348703 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -197,10 +197,6 @@ ''^tests/functional/user-envs\.sh$'' ''^tests/functional/why-depends\.sh$'' - # Shellcheck doesn't support fish or zsh shell syntax - ''^misc/fish/completion\.fish$'' - ''^misc/zsh/completion\.zsh$'' - # Content-addressed test files that use recursive-*looking* sourcing # (cd .. && source ), causing shellcheck to loop # They're small wrapper scripts with not a lot going on diff --git a/misc/fish/completion.fish b/misc/fish/completion.fish index c6b8ef16a..b6584963b 100644 --- a/misc/fish/completion.fish +++ b/misc/fish/completion.fish @@ -1,3 +1,4 @@ +# shellcheck disable=all function _nix_complete # Get the current command up to a cursor. # - Behaves correctly even with pipes and nested in commands like env. diff --git a/misc/zsh/completion.zsh b/misc/zsh/completion.zsh index f9b3dca74..eb26a16cb 100644 --- a/misc/zsh/completion.zsh +++ b/misc/zsh/completion.zsh @@ -1,3 +1,4 @@ +# shellcheck disable=all #compdef nix function _nix() { From 9bf8e7b73019615ffa14a73086205ab424a328a8 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Thu, 25 Sep 2025 13:20:51 -0700 Subject: [PATCH 104/332] shellcheck fix: tests/functional/flakes/absolute-paths.sh --- maintainers/flake-module.nix | 1 - tests/functional/flakes/absolute-paths.sh | 10 +++++----- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 3de348703..84f444480 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/flakes/absolute-paths\.sh$'' ''^tests/functional/flakes/check\.sh$'' ''^tests/functional/flakes/config\.sh$'' ''^tests/functional/flakes/flakes\.sh$'' diff --git a/tests/functional/flakes/absolute-paths.sh b/tests/functional/flakes/absolute-paths.sh index a355a7a1c..6565857cb 100755 --- a/tests/functional/flakes/absolute-paths.sh +++ b/tests/functional/flakes/absolute-paths.sh @@ -7,13 +7,13 @@ requireGit flake1Dir=$TEST_ROOT/flake1 flake2Dir=$TEST_ROOT/flake2 -createGitRepo $flake1Dir -cat > $flake1Dir/flake.nix < "$flake1Dir"/flake.nix < Date: Thu, 25 Sep 2025 13:23:02 -0700 Subject: [PATCH 105/332] shellcheck fix: tests/functional/flakes/check.sh --- maintainers/flake-module.nix | 1 - tests/functional/flakes/check.sh | 61 +++++++++++++++++--------------- 2 files changed, 33 insertions(+), 29 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 84f444480..6f9985e0b 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/flakes/check\.sh$'' ''^tests/functional/flakes/config\.sh$'' ''^tests/functional/flakes/flakes\.sh$'' ''^tests/functional/flakes/follow-paths\.sh$'' diff --git a/tests/functional/flakes/check.sh b/tests/functional/flakes/check.sh index 9a356c2ed..55cd3805f 100755 --- a/tests/functional/flakes/check.sh +++ b/tests/functional/flakes/check.sh @@ -3,9 +3,9 @@ source common.sh flakeDir=$TEST_ROOT/flake3 -mkdir -p $flakeDir +mkdir -p "$flakeDir" -cat > $flakeDir/flake.nix < "$flakeDir"/flake.nix < $flakeDir/flake.nix < $flakeDir/flake.nix < "$flakeDir"/flake.nix < $flakeDir/flake.nix < $flakeDir/flake.nix < "$flakeDir"/flake.nix < $flakeDir/flake.nix <&1 && fail "nix flake check --all-systems should have failed" || true) +# shellcheck disable=SC2015 +checkRes=$(nix flake check "$flakeDir" 2>&1 && fail "nix flake check --all-systems should have failed" || true) echo "$checkRes" | grepQuiet "error: overlay is not a function, but a set instead" -cat > $flakeDir/flake.nix < "$flakeDir"/flake.nix < $flakeDir/flake.nix < $flakeDir/flake.nix < "$flakeDir"/flake.nix < $flakeDir/flake.nix < $flakeDir/flake.nix < "$flakeDir"/flake.nix < $flakeDir/flake.nix < $flakeDir/flake.nix < "$flakeDir"/flake.nix < $flakeDir/flake.nix <&1 && fail "nix flake check --all-systems should have failed" || true) +# shellcheck disable=SC2015 +checkRes=$(nix flake check --all-systems --keep-going "$flakeDir" 2>&1 && fail "nix flake check --all-systems should have failed" || true) echo "$checkRes" | grepQuiet "packages.system-1.default" echo "$checkRes" | grepQuiet "packages.system-2.default" -cat > $flakeDir/flake.nix < "$flakeDir"/flake.nix < $flakeDir/flake.nix < $flakeDir/flake.nix < "$flakeDir"/flake.nix < $flakeDir/flake.nix <&1 && fail "nix flake check --all-systems should have failed" || true) +# shellcheck disable=SC2015 +checkRes=$(nix flake check --all-systems "$flakeDir" 2>&1 && fail "nix flake check --all-systems should have failed" || true) echo "$checkRes" | grepQuiet "unknown-attr" -cat > $flakeDir/flake.nix < "$flakeDir"/flake.nix < $flakeDir/flake.nix <&1 && fail "nix flake check --all-systems should have failed" || true) +# shellcheck disable=SC2015 +checkRes=$(nix flake check --all-systems "$flakeDir" 2>&1 && fail "nix flake check --all-systems should have failed" || true) echo "$checkRes" | grepQuiet "formatter.system-1" # Test whether `nix flake check` builds checks. -cat > $flakeDir/flake.nix < "$flakeDir"/flake.nix < $flakeDir/flake.nix < "$flakeDir"/flake.nix < $flakeDir/flake.nix < $flakeDir/flake.nix < "$flakeDir"/flake.nix <&1 && fail "nix flake check should have failed" || true) +# shellcheck disable=SC2015 +checkRes=$(nix flake check "$flakeDir" 2>&1 && fail "nix flake check should have failed" || true) echo "$checkRes" | grepQuiet -E "builder( for .*)? failed with exit code 1" From ac5615dd91e042711177f25e6e01d778697b55f1 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Thu, 25 Sep 2025 13:24:30 -0700 Subject: [PATCH 106/332] shellcheck fix: tests/functional/flakes/config.sh --- maintainers/flake-module.nix | 1 - tests/functional/flakes/config.sh | 5 +++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 6f9985e0b..024565116 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/flakes/config\.sh$'' ''^tests/functional/flakes/flakes\.sh$'' ''^tests/functional/flakes/follow-paths\.sh$'' ''^tests/functional/flakes/prefetch\.sh$'' diff --git a/tests/functional/flakes/config.sh b/tests/functional/flakes/config.sh index ab2d9f47c..87714b5db 100755 --- a/tests/functional/flakes/config.sh +++ b/tests/functional/flakes/config.sh @@ -2,9 +2,9 @@ source common.sh -cp ../simple.nix ../simple.builder.sh "${config_nix}" $TEST_HOME +cp ../simple.nix ../simple.builder.sh "${config_nix}" "$TEST_HOME" -cd $TEST_HOME +cd "$TEST_HOME" rm -f post-hook-ran cat < echoing-post-hook.sh @@ -37,6 +37,7 @@ if type -p script >/dev/null && script -q -c true /dev/null; then else echo "script is not available or not GNU-like, so we skip testing with an added tty" fi +# shellcheck disable=SC2235 (! [[ -f post-hook-ran ]]) TODO_NixOS clearStore From 6fc8f04ecb3241c39a10988d358cdcd226de5ac9 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Thu, 25 Sep 2025 13:26:53 -0700 Subject: [PATCH 107/332] shellcheck fix: tests/functional/flakes/flakes.sh --- maintainers/flake-module.nix | 1 - tests/functional/flakes/flakes.sh | 90 ++++++++++++++++--------------- 2 files changed, 46 insertions(+), 45 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 024565116..50601e06f 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/flakes/flakes\.sh$'' ''^tests/functional/flakes/follow-paths\.sh$'' ''^tests/functional/flakes/prefetch\.sh$'' ''^tests/functional/flakes/run\.sh$'' diff --git a/tests/functional/flakes/flakes.sh b/tests/functional/flakes/flakes.sh index 97d238654..5b1da0f02 100755 --- a/tests/functional/flakes/flakes.sh +++ b/tests/functional/flakes/flakes.sh @@ -7,7 +7,7 @@ TODO_NixOS requireGit clearStore -rm -rf $TEST_HOME/.cache $TEST_HOME/.config +rm -rf "$TEST_HOME"/.cache "$TEST_HOME"/.config createFlake1 createFlake2 @@ -59,7 +59,7 @@ nix flake metadata flake1 nix flake metadata flake1 | grepQuiet 'Locked URL:.*flake1.*' # Test 'nix flake metadata' on a chroot store. -nix flake metadata --store $TEST_ROOT/chroot-store flake1 +nix flake metadata --store "$TEST_ROOT"/chroot-store flake1 # Test 'nix flake metadata' on a local flake. (cd "$flake1Dir" && nix flake metadata) | grepQuiet 'URL:.*flake1.*' @@ -75,17 +75,18 @@ hash1=$(echo "$json" | jq -r .revision) [[ -n $(echo "$json" | jq -r .fingerprint) ]] echo foo > "$flake1Dir/foo" -git -C "$flake1Dir" add $flake1Dir/foo +git -C "$flake1Dir" add "$flake1Dir"/foo [[ $(nix flake metadata flake1 --json --refresh | jq -r .dirtyRevision) == "$hash1-dirty" ]] [[ "$(nix flake metadata flake1 --json | jq -r .fingerprint)" != null ]] echo -n '# foo' >> "$flake1Dir/flake.nix" flake1OriginalCommit=$(git -C "$flake1Dir" rev-parse HEAD) git -C "$flake1Dir" commit -a -m 'Foo' +# shellcheck disable=SC2034 flake1NewCommit=$(git -C "$flake1Dir" rev-parse HEAD) hash2=$(nix flake metadata flake1 --json --refresh | jq -r .revision) [[ $(nix flake metadata flake1 --json --refresh | jq -r .dirtyRevision) == "null" ]] -[[ $hash1 != $hash2 ]] +[[ $hash1 != "$hash2" ]] # Test 'nix build' on a flake. nix build -o "$TEST_ROOT/result" flake1#foo @@ -204,8 +205,8 @@ git -C "$flake3Dir" add flake.nix git -C "$flake3Dir" commit -m 'Update flake.nix' # Check whether `nix build` works with an incomplete lockfile -nix build -o $TEST_ROOT/result "$flake3Dir#sth sth" -nix build -o $TEST_ROOT/result "$flake3Dir#sth%20sth" +nix build -o "$TEST_ROOT"/result "$flake3Dir#sth sth" +nix build -o "$TEST_ROOT"/result "$flake3Dir#sth%20sth" # Check whether it saved the lockfile [[ -n $(git -C "$flake3Dir" diff master) ]] @@ -249,7 +250,7 @@ nix flake lock "$flake3Dir" [[ -z $(git -C "$flake3Dir" diff master || echo failed) ]] nix flake update --flake "$flake3Dir" --override-flake flake2 nixpkgs -[[ ! -z $(git -C "$flake3Dir" diff master || echo failed) ]] +[[ -n $(git -C "$flake3Dir" diff master || echo failed) ]] # Testing the nix CLI nix registry add flake1 flake3 @@ -262,7 +263,7 @@ nix registry remove flake1 [[ $(nix registry list | wc -l) == 4 ]] # Test 'nix registry list' with a disabled global registry. -nix registry add user-flake1 git+file://$flake1Dir +nix registry add user-flake1 git+file://"$flake1Dir" nix registry add user-flake2 "git+file://$percentEncodedFlake2Dir" [[ $(nix --flake-registry "" registry list | wc -l) == 2 ]] nix --flake-registry "" registry list | grepQuietInverse '^global' # nothing in global registry @@ -273,9 +274,9 @@ nix registry remove user-flake2 [[ $(nix registry list | wc -l) == 4 ]] # Test 'nix flake clone'. -rm -rf $TEST_ROOT/flake1-v2 -nix flake clone flake1 --dest $TEST_ROOT/flake1-v2 -[ -e $TEST_ROOT/flake1-v2/flake.nix ] +rm -rf "$TEST_ROOT"/flake1-v2 +nix flake clone flake1 --dest "$TEST_ROOT"/flake1-v2 +[ -e "$TEST_ROOT"/flake1-v2/flake.nix ] # Test 'follows' inputs. cat > "$flake3Dir/flake.nix" < "$flake3Dir/flake.nix" < "$flake3Dir/flake.nix" < $badFlakeDir/flake.nix -nix store delete $(nix store add-path $badFlakeDir) +rm -rf "$badFlakeDir" +mkdir "$badFlakeDir" +echo INVALID > "$badFlakeDir"/flake.nix +nix store delete "$(nix store add-path "$badFlakeDir")" -[[ $(nix path-info $(nix store add-path $flake1Dir)) =~ flake1 ]] -[[ $(nix path-info path:$(nix store add-path $flake1Dir)) =~ simple ]] +[[ $(nix path-info "$(nix store add-path "$flake1Dir")") =~ flake1 ]] +[[ $(nix path-info path:"$(nix store add-path "$flake1Dir")") =~ simple ]] # Test fetching flakerefs in the legacy CLI. [[ $(nix-instantiate --eval flake:flake3 -A x) = 123 ]] @@ -424,15 +426,15 @@ nix store delete $(nix store add-path $badFlakeDir) [[ $(NIX_PATH=flake3=flake:flake3 nix-instantiate --eval '' -A x) = 123 ]] # Test alternate lockfile paths. -nix flake lock "$flake2Dir" --output-lock-file $TEST_ROOT/flake2.lock -cmp "$flake2Dir/flake.lock" $TEST_ROOT/flake2.lock >/dev/null # lockfiles should be identical, since we're referencing flake2's original one +nix flake lock "$flake2Dir" --output-lock-file "$TEST_ROOT"/flake2.lock +cmp "$flake2Dir/flake.lock" "$TEST_ROOT"/flake2.lock >/dev/null # lockfiles should be identical, since we're referencing flake2's original one -nix flake lock "$flake2Dir" --output-lock-file $TEST_ROOT/flake2-overridden.lock --override-input flake1 git+file://$flake1Dir?rev=$flake1OriginalCommit -expectStderr 1 cmp "$flake2Dir/flake.lock" $TEST_ROOT/flake2-overridden.lock -nix flake metadata "$flake2Dir" --reference-lock-file $TEST_ROOT/flake2-overridden.lock | grepQuiet $flake1OriginalCommit +nix flake lock "$flake2Dir" --output-lock-file "$TEST_ROOT"/flake2-overridden.lock --override-input flake1 git+file://"$flake1Dir"?rev="$flake1OriginalCommit" +expectStderr 1 cmp "$flake2Dir/flake.lock" "$TEST_ROOT"/flake2-overridden.lock +nix flake metadata "$flake2Dir" --reference-lock-file "$TEST_ROOT"/flake2-overridden.lock | grepQuiet "$flake1OriginalCommit" # reference-lock-file can only be used if allow-dirty is set. -expectStderr 1 nix flake metadata "$flake2Dir" --no-allow-dirty --reference-lock-file $TEST_ROOT/flake2-overridden.lock +expectStderr 1 nix flake metadata "$flake2Dir" --no-allow-dirty --reference-lock-file "$TEST_ROOT"/flake2-overridden.lock # After changing an input (flake2 from newFlake2Rev to prevFlake2Rev), we should have the transitive inputs locked by revision $prevFlake2Rev of flake2. prevFlake1Rev=$(nix flake metadata --json "$flake1Dir" | jq -r .revision) @@ -459,7 +461,7 @@ git -C "$flake3Dir" commit flake.nix -m 'bla' rm "$flake3Dir/flake.lock" nix flake lock "$flake3Dir" -[[ "$(nix flake metadata --json "$flake3Dir" | jq -r .locks.nodes.flake1.locked.rev)" = $newFlake1Rev ]] +[[ "$(nix flake metadata --json "$flake3Dir" | jq -r .locks.nodes.flake1.locked.rev)" = "$newFlake1Rev" ]] cat > "$flake3Dir/flake.nix" < "$flake3Dir/flake.nix" < Date: Thu, 25 Sep 2025 13:27:34 -0700 Subject: [PATCH 108/332] shellcheck fix: tests/functional/flakes/follow-paths.sh --- maintainers/flake-module.nix | 1 - tests/functional/flakes/follow-paths.sh | 97 +++++++++++++------------ 2 files changed, 49 insertions(+), 49 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 50601e06f..0e9363408 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/flakes/follow-paths\.sh$'' ''^tests/functional/flakes/prefetch\.sh$'' ''^tests/functional/flakes/run\.sh$'' ''^tests/functional/flakes/show\.sh$'' diff --git a/tests/functional/flakes/follow-paths.sh b/tests/functional/flakes/follow-paths.sh index cf27681cb..1a6661be5 100755 --- a/tests/functional/flakes/follow-paths.sh +++ b/tests/functional/flakes/follow-paths.sh @@ -11,13 +11,13 @@ flakeFollowsD=$TEST_ROOT/follows/flakeA/flakeD flakeFollowsE=$TEST_ROOT/follows/flakeA/flakeE # Test following path flakerefs. -createGitRepo $flakeFollowsA -mkdir -p $flakeFollowsB -mkdir -p $flakeFollowsC -mkdir -p $flakeFollowsD -mkdir -p $flakeFollowsE +createGitRepo "$flakeFollowsA" +mkdir -p "$flakeFollowsB" +mkdir -p "$flakeFollowsC" +mkdir -p "$flakeFollowsD" +mkdir -p "$flakeFollowsE" -cat > $flakeFollowsA/flake.nix < "$flakeFollowsA"/flake.nix < $flakeFollowsA/flake.nix < $flakeFollowsB/flake.nix < "$flakeFollowsB"/flake.nix < $flakeFollowsB/flake.nix < $flakeFollowsC/flake.nix < "$flakeFollowsC"/flake.nix < $flakeFollowsC/flake.nix < $flakeFollowsD/flake.nix < "$flakeFollowsD"/flake.nix < $flakeFollowsD/flake.nix < $flakeFollowsE/flake.nix < "$flakeFollowsE"/flake.nix < $flakeFollowsE/flake.nix < $flakeFollowsA/flake.nix < "$flakeFollowsA"/flake.nix < $flakeFollowsA/flake.nix < $flakeFollowsA/flake.nix < "$flakeFollowsA"/flake.nix < $flakeFollowsA/flake.nix <&1 | grep '/flakeB.*is forbidden in pure evaluation mode' -expect 1 nix flake lock --impure $flakeFollowsA 2>&1 | grep '/flakeB.*does not exist' +expect 1 nix flake lock "$flakeFollowsA" 2>&1 | grep '/flakeB.*is forbidden in pure evaluation mode' +expect 1 nix flake lock --impure "$flakeFollowsA" 2>&1 | grep '/flakeB.*does not exist' # Test relative non-flake inputs. -cat > $flakeFollowsA/flake.nix < "$flakeFollowsA"/flake.nix < $flakeFollowsA/flake.nix < $flakeFollowsA/foo.nix +echo 123 > "$flakeFollowsA"/foo.nix -git -C $flakeFollowsA add flake.nix foo.nix +git -C "$flakeFollowsA" add flake.nix foo.nix -nix flake lock $flakeFollowsA +nix flake lock "$flakeFollowsA" -[[ $(nix eval --json $flakeFollowsA#e) = 123 ]] +[[ $(nix eval --json "$flakeFollowsA"#e) = 123 ]] # Non-existant follows should print a warning. -cat >$flakeFollowsA/flake.nix <"$flakeFollowsA"/flake.nix <$flakeFollowsA/flake.nix <&1 | grep "warning: input 'B' has an override for a non-existent input 'invalid'" nix flake lock "$flakeFollowsA" 2>&1 | grep "warning: input 'B' has an override for a non-existent input 'invalid2'" @@ -269,7 +269,7 @@ flakeFollowCycle="$TEST_ROOT/follows/followCycle" # Test following path flakerefs. mkdir -p "$flakeFollowCycle" -cat > $flakeFollowCycle/flake.nix < "$flakeFollowCycle"/flake.nix < $flakeFollowCycle/flake.nix <&1 && fail "nix flake lock should have failed." || true) -echo $checkRes | grep -F "error: follow cycle detected: [baz -> foo -> bar -> baz]" +echo "$checkRes" | grep -F "error: follow cycle detected: [baz -> foo -> bar -> baz]" # Test transitive input url locking @@ -362,22 +363,22 @@ echo "$json" | jq .locks.nodes.C.original # Test deep overrides, e.g. `inputs.B.inputs.C.inputs.D.follows = ...`. -cat < $flakeFollowsD/flake.nix +cat < "$flakeFollowsD"/flake.nix { outputs = _: {}; } EOF -cat < $flakeFollowsC/flake.nix +cat < "$flakeFollowsC"/flake.nix { inputs.D.url = "path:nosuchflake"; outputs = _: {}; } EOF -cat < $flakeFollowsB/flake.nix +cat < "$flakeFollowsB"/flake.nix { inputs.C.url = "path:$flakeFollowsC"; outputs = _: {}; } EOF -cat < $flakeFollowsA/flake.nix +cat < "$flakeFollowsA"/flake.nix { inputs.B.url = "path:$flakeFollowsB"; inputs.D.url = "path:$flakeFollowsD"; @@ -386,26 +387,26 @@ cat < $flakeFollowsA/flake.nix } EOF -nix flake lock $flakeFollowsA +nix flake lock "$flakeFollowsA" -[[ $(jq -c .nodes.C.inputs.D $flakeFollowsA/flake.lock) = '["D"]' ]] +[[ $(jq -c .nodes.C.inputs.D "$flakeFollowsA"/flake.lock) = '["D"]' ]] # Test overlapping flake follows: B has D follow C/D, while A has B/C follow C -cat < $flakeFollowsC/flake.nix +cat < "$flakeFollowsC"/flake.nix { inputs.D.url = "path:$flakeFollowsD"; outputs = _: {}; } EOF -cat < $flakeFollowsB/flake.nix +cat < "$flakeFollowsB"/flake.nix { inputs.C.url = "path:nosuchflake"; inputs.D.follows = "C/D"; outputs = _: {}; } EOF -cat < $flakeFollowsA/flake.nix +cat < "$flakeFollowsA"/flake.nix { inputs.B.url = "path:$flakeFollowsB"; inputs.C.url = "path:$flakeFollowsC"; @@ -415,12 +416,12 @@ cat < $flakeFollowsA/flake.nix EOF # bug was not triggered without recreating the lockfile -nix flake lock $flakeFollowsA --recreate-lock-file +nix flake lock "$flakeFollowsA" --recreate-lock-file -[[ $(jq -c .nodes.B.inputs.D $flakeFollowsA/flake.lock) = '["B","C","D"]' ]] +[[ $(jq -c .nodes.B.inputs.D "$flakeFollowsA"/flake.lock) = '["B","C","D"]' ]] # Check that you can't have both a flakeref and a follows attribute on an input. -cat < $flakeFollowsB/flake.nix +cat < "$flakeFollowsB"/flake.nix { inputs.C.url = "path:nosuchflake"; inputs.D.url = "path:nosuchflake"; @@ -429,4 +430,4 @@ cat < $flakeFollowsB/flake.nix } EOF -expectStderr 1 nix flake lock $flakeFollowsA --recreate-lock-file | grepQuiet "flake input has both a flake reference and a follows attribute" +expectStderr 1 nix flake lock "$flakeFollowsA" --recreate-lock-file | grepQuiet "flake input has both a flake reference and a follows attribute" From 7bd67cd8dcf0efacd91d5984ed4bf25eda5e4784 Mon Sep 17 00:00:00 2001 From: rszyma Date: Fri, 26 Sep 2025 19:49:36 +0200 Subject: [PATCH 109/332] doc: Fix invalid devshell attrpath `native-clangStdenvPackages` devshell attrpath was being mentioned in development docs, but doesn't work anymore (since 69fde530). --- doc/manual/source/development/building.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/manual/source/development/building.md b/doc/manual/source/development/building.md index a07232a5f..889d81d80 100644 --- a/doc/manual/source/development/building.md +++ b/doc/manual/source/development/building.md @@ -23,7 +23,7 @@ $ nix-shell To get a shell with one of the other [supported compilation environments](#compilation-environments): ```console -$ nix-shell --attr devShells.x86_64-linux.native-clangStdenvPackages +$ nix-shell --attr devShells.x86_64-linux.native-clangStdenv ``` > **Note** From 866c9179a04d2ac758e75877fa97248327305fec Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rg=20Thalheim?= Date: Fri, 26 Sep 2025 23:29:24 +0200 Subject: [PATCH 110/332] document thread-unsafe mutation in PosixSourceAccessor --- src/libutil/posix-source-accessor.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/libutil/posix-source-accessor.cc b/src/libutil/posix-source-accessor.cc index c524f3e4f..fe3bcb1c1 100644 --- a/src/libutil/posix-source-accessor.cc +++ b/src/libutil/posix-source-accessor.cc @@ -114,6 +114,8 @@ std::optional PosixSourceAccessor::maybeLstat(const CanonP auto st = cachedLstat(path); if (!st) return std::nullopt; + // This makes the accessor thread-unsafe, but we only seem to use the actual value in a single threaded context in + // `src/libfetchers/path.cc`. mtime = std::max(mtime, st->st_mtime); return Stat{ .type = S_ISREG(st->st_mode) ? tRegular From 0a3eb22360bdb5e948ef8e7cb8f41958c541b54b Mon Sep 17 00:00:00 2001 From: Yaroslav Bolyukin Date: Sat, 19 Jul 2025 23:52:32 +0200 Subject: [PATCH 111/332] fix: wait on incomplete assignment in REPL Fixes: https://github.com/NixOS/nix/issues/13507 --- src/libcmd/repl.cc | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/src/libcmd/repl.cc b/src/libcmd/repl.cc index 5c6dd7ffb..38d06336b 100644 --- a/src/libcmd/repl.cc +++ b/src/libcmd/repl.cc @@ -869,14 +869,8 @@ void NixRepl::addVarToScope(const Symbol name, Value & v) Expr * NixRepl::parseString(std::string s) { - return state->parseExprFromString(std::move(s), state->rootPath("."), staticEnv); -} - -void NixRepl::evalString(std::string s, Value & v) -{ - Expr * e; try { - e = parseString(s); + return state->parseExprFromString(std::move(s), state->rootPath("."), staticEnv); } catch (ParseError & e) { if (e.msg().find("unexpected end of file") != std::string::npos) // For parse errors on incomplete input, we continue waiting for the next line of @@ -885,6 +879,11 @@ void NixRepl::evalString(std::string s, Value & v) else throw; } +} + +void NixRepl::evalString(std::string s, Value & v) +{ + Expr * e = parseString(s); e->eval(*state, *env, v); state->forceValue(v, v.determinePos(noPos)); } From 3c610df550be35d9696efe9dd3217a6e1ec100f2 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Fri, 26 Sep 2025 00:22:54 -0400 Subject: [PATCH 112/332] Delete scratch data for CA derivation that produced already-extant output In the case where the store object doesn't exist, we do correctly move (rather than copy) the scratch data into place. In this case, the destination store object already exists, but we still want to clean up after ourselves. --- src/libstore/unix/build/derivation-builder.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/libstore/unix/build/derivation-builder.cc b/src/libstore/unix/build/derivation-builder.cc index 770bdad4d..3a6f71555 100644 --- a/src/libstore/unix/build/derivation-builder.cc +++ b/src/libstore/unix/build/derivation-builder.cc @@ -1712,6 +1712,8 @@ SingleDrvOutputs DerivationBuilderImpl::registerOutputs() /* Path already exists because CA path produced by something else. No moving needed. */ assert(newInfo.ca); + /* Can delete our scratch copy now. */ + deletePath(actualPath); } else { auto destPath = store.toRealPath(finalDestPath); deletePath(destPath); From 43550e8edb81e423619c2bc6d18018e095c5c468 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Tue, 23 Sep 2025 16:21:56 -0400 Subject: [PATCH 113/332] Lock down `BuildResult::Status` enum values This allows refactoring without changing wire protocol by mistake. --- .../include/nix/store/build-result.hh | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/src/libstore/include/nix/store/build-result.hh b/src/libstore/include/nix/store/build-result.hh index d7249d420..1911fef39 100644 --- a/src/libstore/include/nix/store/build-result.hh +++ b/src/libstore/include/nix/store/build-result.hh @@ -20,26 +20,26 @@ struct BuildResult */ enum Status { Built = 0, - Substituted, - AlreadyValid, - PermanentFailure, - InputRejected, - OutputRejected, + Substituted = 1, + AlreadyValid = 2, + PermanentFailure = 3, + InputRejected = 4, + OutputRejected = 5, /// possibly transient - TransientFailure, + TransientFailure = 6, /// no longer used - CachedFailure, - TimedOut, - MiscFailure, - DependencyFailed, - LogLimitExceeded, - NotDeterministic, - ResolvesToAlreadyValid, - NoSubstituters, + CachedFailure = 7, + TimedOut = 8, + MiscFailure = 9, + DependencyFailed = 10, + LogLimitExceeded = 11, + NotDeterministic = 12, + ResolvesToAlreadyValid = 13, + NoSubstituters = 14, /// A certain type of `OutputRejected`. The protocols do not yet /// know about this one, so change it back to `OutputRejected` /// before serialization. - HashMismatch, + HashMismatch = 15, } status = MiscFailure; /** From e731c43eae9c08b8649708dcc5a76e8a99eda929 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Tue, 23 Sep 2025 18:09:56 -0400 Subject: [PATCH 114/332] Use `std::variant` to enforce `BuildResult` invariants There is now a clean separation between successful and failing build results. --- src/libcmd/installables.cc | 18 +- src/libstore-c/nix_api_store.cc | 8 +- src/libstore-tests/serve-protocol.cc | 108 +++++----- src/libstore-tests/worker-protocol.cc | 194 +++++++++--------- src/libstore/build-result.cc | 6 + .../build/derivation-building-goal.cc | 87 ++++---- src/libstore/build/derivation-check.cc | 12 +- src/libstore/build/derivation-goal.cc | 72 ++++--- .../build/derivation-trampoline-goal.cc | 9 +- src/libstore/build/entry-points.cc | 6 +- src/libstore/build/substitution-goal.cc | 32 +-- src/libstore/derivation-options.cc | 4 +- .../include/nix/store/build-result.hh | 174 ++++++++++------ .../nix/store/build/derivation-builder.hh | 2 +- .../store/build/derivation-building-goal.hh | 2 +- .../nix/store/build/derivation-goal.hh | 2 +- .../nix/store/build/substitution-goal.hh | 4 +- src/libstore/legacy-ssh-store.cc | 13 +- src/libstore/local-store.cc | 2 +- src/libstore/misc.cc | 2 +- src/libstore/posix-fs-canonicalise.cc | 2 +- src/libstore/remote-store.cc | 21 +- src/libstore/restricted-store.cc | 12 +- src/libstore/serve-protocol.cc | 60 ++++-- src/libstore/store-api.cc | 2 +- src/libstore/unix/build/derivation-builder.cc | 20 +- src/libstore/worker-protocol.cc | 68 ++++-- src/nix/build-remote/build-remote.cc | 17 +- .../functional/test-libstoreconsumer/main.cc | 6 +- 29 files changed, 568 insertions(+), 397 deletions(-) diff --git a/src/libcmd/installables.cc b/src/libcmd/installables.cc index 96ff06ad3..91ad74308 100644 --- a/src/libcmd/installables.cc +++ b/src/libcmd/installables.cc @@ -604,28 +604,28 @@ std::vector Installable::build( static void throwBuildErrors(std::vector & buildResults, const Store & store) { - std::vector failed; + std::vector> failed; for (auto & buildResult : buildResults) { - if (!buildResult.success()) { - failed.push_back(buildResult); + if (auto * failure = buildResult.tryGetFailure()) { + failed.push_back({&buildResult, failure}); } } auto failedResult = failed.begin(); if (failedResult != failed.end()) { if (failed.size() == 1) { - failedResult->rethrow(); + failedResult->second->rethrow(); } else { StringSet failedPaths; for (; failedResult != failed.end(); failedResult++) { - if (!failedResult->errorMsg.empty()) { + if (!failedResult->second->errorMsg.empty()) { logError( ErrorInfo{ .level = lvlError, - .msg = failedResult->errorMsg, + .msg = failedResult->second->errorMsg, }); } - failedPaths.insert(failedResult->path.to_string(store)); + failedPaths.insert(failedResult->first->path.to_string(store)); } throw Error("build of %s failed", concatStringsSep(", ", quoteStrings(failedPaths))); } @@ -695,12 +695,14 @@ std::vector, BuiltPathWithResult>> Installable::build auto buildResults = store->buildPathsWithResults(pathsToBuild, bMode, evalStore); throwBuildErrors(buildResults, *store); for (auto & buildResult : buildResults) { + // If we didn't throw, they must all be sucesses + auto & success = std::get(buildResult.inner); for (auto & aux : backmap[buildResult.path]) { std::visit( overloaded{ [&](const DerivedPath::Built & bfd) { std::map outputs; - for (auto & [outputName, realisation] : buildResult.builtOutputs) + for (auto & [outputName, realisation] : success.builtOutputs) outputs.emplace(outputName, realisation.outPath); res.push_back( {aux.installable, diff --git a/src/libstore-c/nix_api_store.cc b/src/libstore-c/nix_api_store.cc index c4c17f127..68b642d86 100644 --- a/src/libstore-c/nix_api_store.cc +++ b/src/libstore-c/nix_api_store.cc @@ -145,9 +145,11 @@ nix_err nix_store_realise( if (callback) { for (const auto & result : results) { - for (const auto & [outputName, realisation] : result.builtOutputs) { - StorePath p{realisation.outPath}; - callback(userdata, outputName.c_str(), &p); + if (auto * success = result.tryGetSuccess()) { + for (const auto & [outputName, realisation] : success->builtOutputs) { + StorePath p{realisation.outPath}; + callback(userdata, outputName.c_str(), &p); + } } } } diff --git a/src/libstore-tests/serve-protocol.cc b/src/libstore-tests/serve-protocol.cc index b513e1365..a63201164 100644 --- a/src/libstore-tests/serve-protocol.cc +++ b/src/libstore-tests/serve-protocol.cc @@ -127,17 +127,17 @@ VERSIONED_CHARACTERIZATION_TEST( VERSIONED_CHARACTERIZATION_TEST(ServeProtoTest, buildResult_2_2, "build-result-2.2", 2 << 8 | 2, ({ using namespace std::literals::chrono_literals; std::tuple t{ - BuildResult{ - .status = BuildResult::OutputRejected, + BuildResult{.inner{BuildResult::Failure{ + .status = BuildResult::Failure::OutputRejected, .errorMsg = "no idea why", - }, - BuildResult{ - .status = BuildResult::NotDeterministic, + }}}, + BuildResult{.inner{BuildResult::Failure{ + .status = BuildResult::Failure::NotDeterministic, .errorMsg = "no idea why", - }, - BuildResult{ - .status = BuildResult::Built, - }, + }}}, + BuildResult{.inner{BuildResult::Success{ + .status = BuildResult::Success::Built, + }}}, }; t; })) @@ -145,20 +145,24 @@ VERSIONED_CHARACTERIZATION_TEST(ServeProtoTest, buildResult_2_2, "build-result-2 VERSIONED_CHARACTERIZATION_TEST(ServeProtoTest, buildResult_2_3, "build-result-2.3", 2 << 8 | 3, ({ using namespace std::literals::chrono_literals; std::tuple t{ - BuildResult{ - .status = BuildResult::OutputRejected, + BuildResult{.inner{BuildResult::Failure{ + .status = BuildResult::Failure::OutputRejected, .errorMsg = "no idea why", - }, + }}}, BuildResult{ - .status = BuildResult::NotDeterministic, - .errorMsg = "no idea why", + .inner{BuildResult::Failure{ + .status = BuildResult::Failure::NotDeterministic, + .errorMsg = "no idea why", + .isNonDeterministic = true, + }}, .timesBuilt = 3, - .isNonDeterministic = true, .startTime = 30, .stopTime = 50, }, BuildResult{ - .status = BuildResult::Built, + .inner{BuildResult::Success{ + .status = BuildResult::Success::Built, + }}, .startTime = 30, .stopTime = 50, }, @@ -170,48 +174,52 @@ VERSIONED_CHARACTERIZATION_TEST( ServeProtoTest, buildResult_2_6, "build-result-2.6", 2 << 8 | 6, ({ using namespace std::literals::chrono_literals; std::tuple t{ - BuildResult{ - .status = BuildResult::OutputRejected, + BuildResult{.inner{BuildResult::Failure{ + .status = BuildResult::Failure::OutputRejected, .errorMsg = "no idea why", - }, + }}}, BuildResult{ - .status = BuildResult::NotDeterministic, - .errorMsg = "no idea why", + .inner{BuildResult::Failure{ + .status = BuildResult::Failure::NotDeterministic, + .errorMsg = "no idea why", + .isNonDeterministic = true, + }}, .timesBuilt = 3, - .isNonDeterministic = true, .startTime = 30, .stopTime = 50, }, BuildResult{ - .status = BuildResult::Built, + .inner{BuildResult::Success{ + .status = BuildResult::Success::Built, + .builtOutputs = + { + { + "foo", + { + .id = + DrvOutput{ + .drvHash = + Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "foo", + }, + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + }, + }, + { + "bar", + { + .id = + DrvOutput{ + .drvHash = + Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "bar", + }, + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar"}, + }, + }, + }, + }}, .timesBuilt = 1, - .builtOutputs = - { - { - "foo", - { - .id = - DrvOutput{ - .drvHash = - Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "foo", - }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, - }, - }, - { - "bar", - { - .id = - DrvOutput{ - .drvHash = - Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "bar", - }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar"}, - }, - }, - }, .startTime = 30, .stopTime = 50, #if 0 diff --git a/src/libstore-tests/worker-protocol.cc b/src/libstore-tests/worker-protocol.cc index 823d8d85a..489151c8c 100644 --- a/src/libstore-tests/worker-protocol.cc +++ b/src/libstore-tests/worker-protocol.cc @@ -180,17 +180,17 @@ VERSIONED_CHARACTERIZATION_TEST( VERSIONED_CHARACTERIZATION_TEST(WorkerProtoTest, buildResult_1_27, "build-result-1.27", 1 << 8 | 27, ({ using namespace std::literals::chrono_literals; std::tuple t{ - BuildResult{ - .status = BuildResult::OutputRejected, + BuildResult{.inner{BuildResult::Failure{ + .status = BuildResult::Failure::OutputRejected, .errorMsg = "no idea why", - }, - BuildResult{ - .status = BuildResult::NotDeterministic, + }}}, + BuildResult{.inner{BuildResult::Failure{ + .status = BuildResult::Failure::NotDeterministic, .errorMsg = "no idea why", - }, - BuildResult{ - .status = BuildResult::Built, - }, + }}}, + BuildResult{.inner{BuildResult::Success{ + .status = BuildResult::Success::Built, + }}}, }; t; })) @@ -199,16 +199,16 @@ VERSIONED_CHARACTERIZATION_TEST( WorkerProtoTest, buildResult_1_28, "build-result-1.28", 1 << 8 | 28, ({ using namespace std::literals::chrono_literals; std::tuple t{ - BuildResult{ - .status = BuildResult::OutputRejected, + BuildResult{.inner{BuildResult::Failure{ + .status = BuildResult::Failure::OutputRejected, .errorMsg = "no idea why", - }, - BuildResult{ - .status = BuildResult::NotDeterministic, + }}}, + BuildResult{.inner{BuildResult::Failure{ + .status = BuildResult::Failure::NotDeterministic, .errorMsg = "no idea why", - }, - BuildResult{ - .status = BuildResult::Built, + }}}, + BuildResult{.inner{BuildResult::Success{ + .status = BuildResult::Success::Built, .builtOutputs = { { @@ -236,7 +236,7 @@ VERSIONED_CHARACTERIZATION_TEST( }, }, }, - }, + }}}, }; t; })) @@ -245,48 +245,52 @@ VERSIONED_CHARACTERIZATION_TEST( WorkerProtoTest, buildResult_1_29, "build-result-1.29", 1 << 8 | 29, ({ using namespace std::literals::chrono_literals; std::tuple t{ - BuildResult{ - .status = BuildResult::OutputRejected, + BuildResult{.inner{BuildResult::Failure{ + .status = BuildResult::Failure::OutputRejected, .errorMsg = "no idea why", - }, + }}}, BuildResult{ - .status = BuildResult::NotDeterministic, - .errorMsg = "no idea why", + .inner{BuildResult::Failure{ + .status = BuildResult::Failure::NotDeterministic, + .errorMsg = "no idea why", + .isNonDeterministic = true, + }}, .timesBuilt = 3, - .isNonDeterministic = true, .startTime = 30, .stopTime = 50, }, BuildResult{ - .status = BuildResult::Built, + .inner{BuildResult::Success{ + .status = BuildResult::Success::Built, + .builtOutputs = + { + { + "foo", + { + .id = + DrvOutput{ + .drvHash = + Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "foo", + }, + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + }, + }, + { + "bar", + { + .id = + DrvOutput{ + .drvHash = + Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "bar", + }, + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar"}, + }, + }, + }, + }}, .timesBuilt = 1, - .builtOutputs = - { - { - "foo", - { - .id = - DrvOutput{ - .drvHash = - Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "foo", - }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, - }, - }, - { - "bar", - { - .id = - DrvOutput{ - .drvHash = - Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "bar", - }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar"}, - }, - }, - }, .startTime = 30, .stopTime = 50, }, @@ -298,48 +302,52 @@ VERSIONED_CHARACTERIZATION_TEST( WorkerProtoTest, buildResult_1_37, "build-result-1.37", 1 << 8 | 37, ({ using namespace std::literals::chrono_literals; std::tuple t{ - BuildResult{ - .status = BuildResult::OutputRejected, + BuildResult{.inner{BuildResult::Failure{ + .status = BuildResult::Failure::OutputRejected, .errorMsg = "no idea why", - }, + }}}, BuildResult{ - .status = BuildResult::NotDeterministic, - .errorMsg = "no idea why", + .inner{BuildResult::Failure{ + .status = BuildResult::Failure::NotDeterministic, + .errorMsg = "no idea why", + .isNonDeterministic = true, + }}, .timesBuilt = 3, - .isNonDeterministic = true, .startTime = 30, .stopTime = 50, }, BuildResult{ - .status = BuildResult::Built, + .inner{BuildResult::Success{ + .status = BuildResult::Success::Built, + .builtOutputs = + { + { + "foo", + { + .id = + DrvOutput{ + .drvHash = + Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "foo", + }, + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + }, + }, + { + "bar", + { + .id = + DrvOutput{ + .drvHash = + Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "bar", + }, + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar"}, + }, + }, + }, + }}, .timesBuilt = 1, - .builtOutputs = - { - { - "foo", - { - .id = - DrvOutput{ - .drvHash = - Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "foo", - }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, - }, - }, - { - "bar", - { - .id = - DrvOutput{ - .drvHash = - Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "bar", - }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar"}, - }, - }, - }, .startTime = 30, .stopTime = 50, .cpuUser = std::chrono::microseconds(500s), @@ -353,10 +361,10 @@ VERSIONED_CHARACTERIZATION_TEST(WorkerProtoTest, keyedBuildResult_1_29, "keyed-b using namespace std::literals::chrono_literals; std::tuple t{ KeyedBuildResult{ - { - .status = KeyedBuildResult::OutputRejected, + {.inner{BuildResult::Failure{ + .status = KeyedBuildResult::Failure::OutputRejected, .errorMsg = "no idea why", - }, + }}}, /* .path = */ DerivedPath::Opaque{ StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-xxx"}, @@ -364,10 +372,12 @@ VERSIONED_CHARACTERIZATION_TEST(WorkerProtoTest, keyedBuildResult_1_29, "keyed-b }, KeyedBuildResult{ { - .status = KeyedBuildResult::NotDeterministic, - .errorMsg = "no idea why", + .inner{BuildResult::Failure{ + .status = KeyedBuildResult::Failure::NotDeterministic, + .errorMsg = "no idea why", + .isNonDeterministic = true, + }}, .timesBuilt = 3, - .isNonDeterministic = true, .startTime = 30, .stopTime = 50, }, diff --git a/src/libstore/build-result.cc b/src/libstore/build-result.cc index 43c7adb11..ecbd27b49 100644 --- a/src/libstore/build-result.cc +++ b/src/libstore/build-result.cc @@ -5,4 +5,10 @@ namespace nix { bool BuildResult::operator==(const BuildResult &) const noexcept = default; std::strong_ordering BuildResult::operator<=>(const BuildResult &) const noexcept = default; +bool BuildResult::Success::operator==(const BuildResult::Success &) const noexcept = default; +std::strong_ordering BuildResult::Success::operator<=>(const BuildResult::Success &) const noexcept = default; + +bool BuildResult::Failure::operator==(const BuildResult::Failure &) const noexcept = default; +std::strong_ordering BuildResult::Failure::operator<=>(const BuildResult::Failure &) const noexcept = default; + } // namespace nix diff --git a/src/libstore/build/derivation-building-goal.cc b/src/libstore/build/derivation-building-goal.cc index ebef2a375..001816ca0 100644 --- a/src/libstore/build/derivation-building-goal.cc +++ b/src/libstore/build/derivation-building-goal.cc @@ -90,7 +90,7 @@ void DerivationBuildingGoal::timedOut(Error && ex) killChild(); // We're not inside a coroutine, hence we can't use co_return here. // Thus we ignore the return value. - [[maybe_unused]] Done _ = doneFailure({BuildResult::TimedOut, std::move(ex)}); + [[maybe_unused]] Done _ = doneFailure({BuildResult::Failure::TimedOut, std::move(ex)}); } /** @@ -205,7 +205,7 @@ Goal::Co DerivationBuildingGoal::gaveUpOnSubstitution() nrFailed, nrFailed == 1 ? "dependency" : "dependencies"); msg += showKnownOutputs(worker.store, *drv); - co_return doneFailure(BuildError(BuildResult::DependencyFailed, msg)); + co_return doneFailure(BuildError(BuildResult::Failure::DependencyFailed, msg)); } /* Gather information necessary for computing the closure and/or @@ -256,14 +256,18 @@ Goal::Co DerivationBuildingGoal::gaveUpOnSubstitution() return std::nullopt; auto & buildResult = (*mEntry)->buildResult; - if (!buildResult.success()) - return std::nullopt; + return std::visit( + overloaded{ + [](const BuildResult::Failure &) -> std::optional { return std::nullopt; }, + [&](const BuildResult::Success & success) -> std::optional { + auto i = get(success.builtOutputs, outputName); + if (!i) + return std::nullopt; - auto i = get(buildResult.builtOutputs, outputName); - if (!i) - return std::nullopt; - - return i->outPath; + return i->outPath; + }, + }, + buildResult.inner); }); if (!attempt) { /* TODO (impure derivations-induced tech debt) (see below): @@ -306,7 +310,9 @@ Goal::Co DerivationBuildingGoal::gaveUpOnSubstitution() auto resolvedResult = resolvedDrvGoal->buildResult; - if (resolvedResult.success()) { + // No `std::visit` for coroutines yet + if (auto * successP = resolvedResult.tryGetSuccess()) { + auto & success = *successP; SingleDrvOutputs builtOutputs; auto outputHashes = staticOutputHashes(worker.evalStore, *drv); @@ -324,7 +330,7 @@ Goal::Co DerivationBuildingGoal::gaveUpOnSubstitution() outputName); auto realisation = [&] { - auto take1 = get(resolvedResult.builtOutputs, outputName); + auto take1 = get(success.builtOutputs, outputName); if (take1) return *take1; @@ -360,18 +366,19 @@ Goal::Co DerivationBuildingGoal::gaveUpOnSubstitution() runPostBuildHook(worker.store, *logger, drvPath, outputPaths); - auto status = resolvedResult.status; - if (status == BuildResult::AlreadyValid) - status = BuildResult::ResolvesToAlreadyValid; + auto status = success.status; + if (status == BuildResult::Success::AlreadyValid) + status = BuildResult::Success::ResolvesToAlreadyValid; - co_return doneSuccess(status, std::move(builtOutputs)); - } else { + co_return doneSuccess(success.status, std::move(builtOutputs)); + } else if (resolvedResult.tryGetFailure()) { co_return doneFailure({ - BuildResult::DependencyFailed, + BuildResult::Failure::DependencyFailed, "build of resolved derivation '%s' failed", worker.store.printStorePath(pathResolved), }); - } + } else + assert(false); } /* If we get this far, we know no dynamic drvs inputs */ @@ -536,7 +543,7 @@ Goal::Co DerivationBuildingGoal::tryToBuild() debug("skipping build of derivation '%s', someone beat us to it", worker.store.printStorePath(drvPath)); outputLocks.setDeletion(true); outputLocks.unlock(); - co_return doneSuccess(BuildResult::AlreadyValid, std::move(validOutputs)); + co_return doneSuccess(BuildResult::Success::AlreadyValid, std::move(validOutputs)); } /* If any of the outputs already exist but are not valid, delete @@ -628,7 +635,7 @@ Goal::Co DerivationBuildingGoal::tryToBuild() /* Check the exit status. */ if (!statusOk(status)) { - auto e = fixupBuilderFailureErrorMessage({BuildResult::MiscFailure, status, ""}); + auto e = fixupBuilderFailureErrorMessage({BuildResult::Failure::MiscFailure, status, ""}); outputLocks.unlock(); @@ -669,7 +676,7 @@ Goal::Co DerivationBuildingGoal::tryToBuild() outputLocks.setDeletion(true); outputLocks.unlock(); - co_return doneSuccess(BuildResult::Built, std::move(builtOutputs)); + co_return doneSuccess(BuildResult::Success::Built, std::move(builtOutputs)); } co_await yield(); @@ -832,15 +839,15 @@ Goal::Co DerivationBuildingGoal::tryToBuild() # pragma GCC diagnostic push # pragma GCC diagnostic ignored "-Wswitch-enum" switch (e.status) { - case BuildResult::HashMismatch: + case BuildResult::Failure::HashMismatch: worker.hashMismatch = true; /* See header, the protocols don't know about `HashMismatch` yet, so change it to `OutputRejected`, which they expect for this case (hash mismatch is a type of output rejection). */ - e.status = BuildResult::OutputRejected; + e.status = BuildResult::Failure::OutputRejected; break; - case BuildResult::NotDeterministic: + case BuildResult::Failure::NotDeterministic: worker.checkMismatch = true; break; default: @@ -866,7 +873,7 @@ Goal::Co DerivationBuildingGoal::tryToBuild() (unlinked) lock files. */ outputLocks.setDeletion(true); outputLocks.unlock(); - co_return doneSuccess(BuildResult::Built, std::move(builtOutputs)); + co_return doneSuccess(BuildResult::Success::Built, std::move(builtOutputs)); } #endif } @@ -1149,7 +1156,7 @@ void DerivationBuildingGoal::handleChildOutput(Descriptor fd, std::string_view d // We're not inside a coroutine, hence we can't use co_return here. // Thus we ignore the return value. [[maybe_unused]] Done _ = doneFailure(BuildError( - BuildResult::LogLimitExceeded, + BuildResult::Failure::LogLimitExceeded, "%s killed after writing more than %d bytes of log output", getName(), settings.maxLogSize)); @@ -1306,16 +1313,16 @@ DerivationBuildingGoal::checkPathValidity(std::map & return {allValid, validOutputs}; } -Goal::Done DerivationBuildingGoal::doneSuccess(BuildResult::Status status, SingleDrvOutputs builtOutputs) +Goal::Done DerivationBuildingGoal::doneSuccess(BuildResult::Success::Status status, SingleDrvOutputs builtOutputs) { - buildResult.status = status; - - assert(buildResult.success()); + buildResult.inner = BuildResult::Success{ + .status = status, + .builtOutputs = std::move(builtOutputs), + }; mcRunningBuilds.reset(); - buildResult.builtOutputs = std::move(builtOutputs); - if (status == BuildResult::Built) + if (status == BuildResult::Success::Built) worker.doneBuilds++; worker.updateProgress(); @@ -1325,16 +1332,18 @@ Goal::Done DerivationBuildingGoal::doneSuccess(BuildResult::Status status, Singl Goal::Done DerivationBuildingGoal::doneFailure(BuildError ex) { - buildResult.status = ex.status; - buildResult.errorMsg = fmt("%s", Uncolored(ex.info().msg)); - if (buildResult.status == BuildResult::TimedOut) - worker.timedOut = true; - if (buildResult.status == BuildResult::PermanentFailure) - worker.permanentFailure = true; + buildResult.inner = BuildResult::Failure{ + .status = ex.status, + .errorMsg = fmt("%s", Uncolored(ex.info().msg)), + }; mcRunningBuilds.reset(); - if (ex.status != BuildResult::DependencyFailed) + if (ex.status == BuildResult::Failure::TimedOut) + worker.timedOut = true; + if (ex.status == BuildResult::Failure::PermanentFailure) + worker.permanentFailure = true; + if (ex.status != BuildResult::Failure::DependencyFailed) worker.failedBuilds++; worker.updateProgress(); diff --git a/src/libstore/build/derivation-check.cc b/src/libstore/build/derivation-check.cc index 82e92e1f3..db3ec7c3d 100644 --- a/src/libstore/build/derivation-check.cc +++ b/src/libstore/build/derivation-check.cc @@ -33,7 +33,7 @@ void checkOutputs( /* Throw an error after registering the path as valid. */ throw BuildError( - BuildResult::HashMismatch, + BuildResult::Failure::HashMismatch, "hash mismatch in fixed-output derivation '%s':\n specified: %s\n got: %s", store.printStorePath(drvPath), wanted.to_string(HashFormat::SRI, true), @@ -42,7 +42,7 @@ void checkOutputs( if (!info.references.empty()) { auto numViolations = info.references.size(); throw BuildError( - BuildResult::HashMismatch, + BuildResult::Failure::HashMismatch, "fixed-output derivations must not reference store paths: '%s' references %d distinct paths, e.g. '%s'", store.printStorePath(drvPath), numViolations, @@ -84,7 +84,7 @@ void checkOutputs( auto applyChecks = [&](const DerivationOptions::OutputChecks & checks) { if (checks.maxSize && info.narSize > *checks.maxSize) throw BuildError( - BuildResult::OutputRejected, + BuildResult::Failure::OutputRejected, "path '%s' is too large at %d bytes; limit is %d bytes", store.printStorePath(info.path), info.narSize, @@ -94,7 +94,7 @@ void checkOutputs( uint64_t closureSize = getClosure(info.path).second; if (closureSize > *checks.maxClosureSize) throw BuildError( - BuildResult::OutputRejected, + BuildResult::Failure::OutputRejected, "closure of path '%s' is too large at %d bytes; limit is %d bytes", store.printStorePath(info.path), closureSize, @@ -115,7 +115,7 @@ void checkOutputs( std::string outputsListing = concatMapStringsSep(", ", outputs, [](auto & o) { return o.first; }); throw BuildError( - BuildResult::OutputRejected, + BuildResult::Failure::OutputRejected, "derivation '%s' output check for '%s' contains an illegal reference specifier '%s'," " expected store path or output name (one of [%s])", store.printStorePath(drvPath), @@ -148,7 +148,7 @@ void checkOutputs( badPathsStr += store.printStorePath(i); } throw BuildError( - BuildResult::OutputRejected, + BuildResult::Failure::OutputRejected, "output '%s' is not allowed to refer to the following paths:%s", store.printStorePath(info.path), badPathsStr); diff --git a/src/libstore/build/derivation-goal.cc b/src/libstore/build/derivation-goal.cc index b9046744a..5dfc334a8 100644 --- a/src/libstore/build/derivation-goal.cc +++ b/src/libstore/build/derivation-goal.cc @@ -94,7 +94,7 @@ Goal::Co DerivationGoal::haveDerivation() /* If they are all valid, then we're done. */ if (checkResult && checkResult->second == PathStatus::Valid && buildMode == bmNormal) { - co_return doneSuccess(BuildResult::AlreadyValid, checkResult->first); + co_return doneSuccess(BuildResult::Success::AlreadyValid, checkResult->first); } Goals waitees; @@ -123,7 +123,7 @@ Goal::Co DerivationGoal::haveDerivation() if (nrFailed > 0 && nrFailed > nrNoSubstituters && !settings.tryFallback) { co_return doneFailure(BuildError( - BuildResult::TransientFailure, + BuildResult::Failure::TransientFailure, "some substitutes for the outputs of derivation '%s' failed (usually happens due to networking issues); try '--fallback' to build derivation from source ", worker.store.printStorePath(drvPath))); } @@ -135,7 +135,7 @@ Goal::Co DerivationGoal::haveDerivation() bool allValid = checkResult && checkResult->second == PathStatus::Valid; if (buildMode == bmNormal && allValid) { - co_return doneSuccess(BuildResult::Substituted, checkResult->first); + co_return doneSuccess(BuildResult::Success::Substituted, checkResult->first); } if (buildMode == bmRepair && allValid) { co_return repairClosure(); @@ -163,25 +163,27 @@ Goal::Co DerivationGoal::haveDerivation() buildResult = g->buildResult; - if (buildMode == bmCheck) { - /* In checking mode, the builder will not register any outputs. - So we want to make sure the ones that we wanted to check are - properly there. */ - buildResult.builtOutputs = {{wantedOutput, assertPathValidity()}}; - } else { - /* Otherwise the builder will give us info for out output, but - also for other outputs. Filter down to just our output so as - not to leak info on unrelated things. */ - for (auto it = buildResult.builtOutputs.begin(); it != buildResult.builtOutputs.end();) { - if (it->first != wantedOutput) { - it = buildResult.builtOutputs.erase(it); - } else { - ++it; + if (auto * successP = buildResult.tryGetSuccess()) { + auto & success = *successP; + if (buildMode == bmCheck) { + /* In checking mode, the builder will not register any outputs. + So we want to make sure the ones that we wanted to check are + properly there. */ + success.builtOutputs = {{wantedOutput, assertPathValidity()}}; + } else { + /* Otherwise the builder will give us info for out output, but + also for other outputs. Filter down to just our output so as + not to leak info on unrelated things. */ + for (auto it = success.builtOutputs.begin(); it != success.builtOutputs.end();) { + if (it->first != wantedOutput) { + it = success.builtOutputs.erase(it); + } else { + ++it; + } } - } - if (buildResult.success()) - assert(buildResult.builtOutputs.count(wantedOutput) > 0); + assert(success.builtOutputs.count(wantedOutput) > 0); + } } co_return amDone(g->exitCode, g->ex); @@ -279,7 +281,7 @@ Goal::Co DerivationGoal::repairClosure() "some paths in the output closure of derivation '%s' could not be repaired", worker.store.printStorePath(drvPath)); } - co_return doneSuccess(BuildResult::AlreadyValid, assertPathValidity()); + co_return doneSuccess(BuildResult::Success::AlreadyValid, assertPathValidity()); } std::optional> DerivationGoal::checkPathValidity() @@ -337,16 +339,16 @@ Realisation DerivationGoal::assertPathValidity() return checkResult->first; } -Goal::Done DerivationGoal::doneSuccess(BuildResult::Status status, Realisation builtOutput) +Goal::Done DerivationGoal::doneSuccess(BuildResult::Success::Status status, Realisation builtOutput) { - buildResult.status = status; - - assert(buildResult.success()); + buildResult.inner = BuildResult::Success{ + .status = status, + .builtOutputs = {{wantedOutput, std::move(builtOutput)}}, + }; mcExpectedBuilds.reset(); - buildResult.builtOutputs = {{wantedOutput, std::move(builtOutput)}}; - if (status == BuildResult::Built) + if (status == BuildResult::Success::Built) worker.doneBuilds++; worker.updateProgress(); @@ -356,16 +358,18 @@ Goal::Done DerivationGoal::doneSuccess(BuildResult::Status status, Realisation b Goal::Done DerivationGoal::doneFailure(BuildError ex) { - buildResult.status = ex.status; - buildResult.errorMsg = fmt("%s", Uncolored(ex.info().msg)); - if (buildResult.status == BuildResult::TimedOut) - worker.timedOut = true; - if (buildResult.status == BuildResult::PermanentFailure) - worker.permanentFailure = true; + buildResult.inner = BuildResult::Failure{ + .status = ex.status, + .errorMsg = fmt("%s", Uncolored(ex.info().msg)), + }; mcExpectedBuilds.reset(); - if (ex.status != BuildResult::DependencyFailed) + if (ex.status == BuildResult::Failure::TimedOut) + worker.timedOut = true; + if (ex.status == BuildResult::Failure::PermanentFailure) + worker.permanentFailure = true; + if (ex.status != BuildResult::Failure::DependencyFailed) worker.failedBuilds++; worker.updateProgress(); diff --git a/src/libstore/build/derivation-trampoline-goal.cc b/src/libstore/build/derivation-trampoline-goal.cc index 5038a4ea0..205f5c427 100644 --- a/src/libstore/build/derivation-trampoline-goal.cc +++ b/src/libstore/build/derivation-trampoline-goal.cc @@ -164,10 +164,11 @@ Goal::Co DerivationTrampolineGoal::haveDerivation(StorePath drvPath, Derivation auto & g = *concreteDrvGoals.begin(); buildResult = g->buildResult; - for (auto & g2 : concreteDrvGoals) { - for (auto && [x, y] : g2->buildResult.builtOutputs) - buildResult.builtOutputs.insert_or_assign(x, y); - } + if (auto * successP = buildResult.tryGetSuccess()) + for (auto & g2 : concreteDrvGoals) + if (auto * successP2 = g2->buildResult.tryGetSuccess()) + for (auto && [x, y] : successP2->builtOutputs) + successP->builtOutputs.insert_or_assign(x, y); co_return amDone(g->exitCode, g->ex); } diff --git a/src/libstore/build/entry-points.cc b/src/libstore/build/entry-points.cc index 1dd540265..4bbd4c8f0 100644 --- a/src/libstore/build/entry-points.cc +++ b/src/libstore/build/entry-points.cc @@ -82,10 +82,10 @@ BuildResult Store::buildDerivation(const StorePath & drvPath, const BasicDerivat worker.run(Goals{goal}); return goal->buildResult; } catch (Error & e) { - return BuildResult{ - .status = BuildResult::MiscFailure, + return BuildResult{.inner{BuildResult::Failure{ + .status = BuildResult::Failure::MiscFailure, .errorMsg = e.msg(), - }; + }}}; }; } diff --git a/src/libstore/build/substitution-goal.cc b/src/libstore/build/substitution-goal.cc index d219834f2..d16e530a4 100644 --- a/src/libstore/build/substitution-goal.cc +++ b/src/libstore/build/substitution-goal.cc @@ -27,13 +27,21 @@ PathSubstitutionGoal::~PathSubstitutionGoal() cleanup(); } -Goal::Done PathSubstitutionGoal::done(ExitCode result, BuildResult::Status status, std::optional errorMsg) +Goal::Done PathSubstitutionGoal::doneSuccess(BuildResult::Success::Status status) { - buildResult.status = status; - if (errorMsg) { - debug(*errorMsg); - buildResult.errorMsg = *errorMsg; - } + buildResult.inner = BuildResult::Success{ + .status = status, + }; + return amDone(ecSuccess); +} + +Goal::Done PathSubstitutionGoal::doneFailure(ExitCode result, BuildResult::Failure::Status status, std::string errorMsg) +{ + debug(errorMsg); + buildResult.inner = BuildResult::Failure{ + .status = status, + .errorMsg = std::move(errorMsg), + }; return amDone(result); } @@ -45,7 +53,7 @@ Goal::Co PathSubstitutionGoal::init() /* If the path already exists we're done. */ if (!repair && worker.store.isValidPath(storePath)) { - co_return done(ecSuccess, BuildResult::AlreadyValid); + co_return doneSuccess(BuildResult::Success::AlreadyValid); } if (settings.readOnlyMode) @@ -165,9 +173,9 @@ Goal::Co PathSubstitutionGoal::init() /* Hack: don't indicate failure if there were no substituters. In that case the calling derivation should just do a build. */ - co_return done( + co_return doneFailure( substituterFailed ? ecFailed : ecNoSubstituters, - BuildResult::NoSubstituters, + BuildResult::Failure::NoSubstituters, fmt("path '%s' is required, but there is no substituter that can build it", worker.store.printStorePath(storePath))); } @@ -178,9 +186,9 @@ Goal::Co PathSubstitutionGoal::tryToRun( trace("all references realised"); if (nrFailed > 0) { - co_return done( + co_return doneFailure( nrNoSubstituters > 0 ? ecNoSubstituters : ecFailed, - BuildResult::DependencyFailed, + BuildResult::Failure::DependencyFailed, fmt("some references of path '%s' could not be realised", worker.store.printStorePath(storePath))); } @@ -297,7 +305,7 @@ Goal::Co PathSubstitutionGoal::tryToRun( worker.updateProgress(); - co_return done(ecSuccess, BuildResult::Substituted); + co_return doneSuccess(BuildResult::Success::Substituted); } void PathSubstitutionGoal::handleEOF(Descriptor fd) diff --git a/src/libstore/derivation-options.cc b/src/libstore/derivation-options.cc index 4cb9bf726..844bce840 100644 --- a/src/libstore/derivation-options.cc +++ b/src/libstore/derivation-options.cc @@ -266,7 +266,9 @@ DerivationOptions::getParsedExportReferencesGraph(const StoreDirConfig & store) for (auto & storePathS : ss) { if (!store.isInStore(storePathS)) throw BuildError( - BuildResult::InputRejected, "'exportReferencesGraph' contains a non-store path '%1%'", storePathS); + BuildResult::Failure::InputRejected, + "'exportReferencesGraph' contains a non-store path '%1%'", + storePathS); storePaths.insert(store.toStorePath(storePathS).first); } res.insert_or_assign(fileName, storePaths); diff --git a/src/libstore/include/nix/store/build-result.hh b/src/libstore/include/nix/store/build-result.hh index 1911fef39..0446c4038 100644 --- a/src/libstore/include/nix/store/build-result.hh +++ b/src/libstore/include/nix/store/build-result.hh @@ -12,63 +12,121 @@ namespace nix { struct BuildResult { - /** - * @note This is directly used in the nix-store --serve protocol. - * That means we need to worry about compatibility across versions. - * Therefore, don't remove status codes, and only add new status - * codes at the end of the list. - */ - enum Status { - Built = 0, - Substituted = 1, - AlreadyValid = 2, - PermanentFailure = 3, - InputRejected = 4, - OutputRejected = 5, - /// possibly transient - TransientFailure = 6, - /// no longer used - CachedFailure = 7, - TimedOut = 8, - MiscFailure = 9, - DependencyFailed = 10, - LogLimitExceeded = 11, - NotDeterministic = 12, - ResolvesToAlreadyValid = 13, - NoSubstituters = 14, - /// A certain type of `OutputRejected`. The protocols do not yet - /// know about this one, so change it back to `OutputRejected` - /// before serialization. - HashMismatch = 15, - } status = MiscFailure; + struct Success + { + /** + * @note This is directly used in the nix-store --serve protocol. + * That means we need to worry about compatibility across versions. + * Therefore, don't remove status codes, and only add new status + * codes at the end of the list. + * + * Must be disjoint with `Failure::Status`. + */ + enum Status : uint8_t { + Built = 0, + Substituted = 1, + AlreadyValid = 2, + ResolvesToAlreadyValid = 13, + } status; + + /** + * For derivations, a mapping from the names of the wanted outputs + * to actual paths. + */ + SingleDrvOutputs builtOutputs; + + bool operator==(const BuildResult::Success &) const noexcept; + std::strong_ordering operator<=>(const BuildResult::Success &) const noexcept; + + static bool statusIs(uint8_t status) + { + return status == Built || status == Substituted || status == AlreadyValid + || status == ResolvesToAlreadyValid; + } + }; + + struct Failure + { + /** + * @note This is directly used in the nix-store --serve protocol. + * That means we need to worry about compatibility across versions. + * Therefore, don't remove status codes, and only add new status + * codes at the end of the list. + * + * Must be disjoint with `Success::Status`. + */ + enum Status : uint8_t { + PermanentFailure = 3, + InputRejected = 4, + OutputRejected = 5, + /// possibly transient + TransientFailure = 6, + /// no longer used + CachedFailure = 7, + TimedOut = 8, + MiscFailure = 9, + DependencyFailed = 10, + LogLimitExceeded = 11, + NotDeterministic = 12, + NoSubstituters = 14, + /// A certain type of `OutputRejected`. The protocols do not yet + /// know about this one, so change it back to `OutputRejected` + /// before serialization. + HashMismatch = 15, + } status = MiscFailure; + + /** + * Information about the error if the build failed. + * + * @todo This should be an entire ErrorInfo object, not just a + * string, for richer information. + */ + std::string errorMsg; + + /** + * If timesBuilt > 1, whether some builds did not produce the same + * result. (Note that 'isNonDeterministic = false' does not mean + * the build is deterministic, just that we don't have evidence of + * non-determinism.) + */ + bool isNonDeterministic = false; + + bool operator==(const BuildResult::Failure &) const noexcept; + std::strong_ordering operator<=>(const BuildResult::Failure &) const noexcept; + + [[noreturn]] void rethrow() const + { + throw Error("%s", errorMsg); + } + }; + + std::variant inner = Failure{}; /** - * Information about the error if the build failed. - * - * @todo This should be an entire ErrorInfo object, not just a - * string, for richer information. + * Convenience wrapper to avoid a longer `std::get_if` usage by the + * caller (which will have to add more `BuildResult::` than we do + * below also, do note.) */ - std::string errorMsg; + auto * tryGetSuccess(this auto & self) + { + return std::get_if(&self.inner); + } + + /** + * Convenience wrapper to avoid a longer `std::get_if` usage by the + * caller (which will have to add more `BuildResult::` than we do + * below also, do note.) + */ + auto * tryGetFailure(this auto & self) + { + return std::get_if(&self.inner); + } /** * How many times this build was performed. */ unsigned int timesBuilt = 0; - /** - * If timesBuilt > 1, whether some builds did not produce the same - * result. (Note that 'isNonDeterministic = false' does not mean - * the build is deterministic, just that we don't have evidence of - * non-determinism.) - */ - bool isNonDeterministic = false; - - /** - * For derivations, a mapping from the names of the wanted outputs - * to actual paths. - */ - SingleDrvOutputs builtOutputs; - /** * The start/stop times of the build (or one of the rounds, if it * was repeated). @@ -82,16 +140,6 @@ struct BuildResult bool operator==(const BuildResult &) const noexcept; std::strong_ordering operator<=>(const BuildResult &) const noexcept; - - bool success() - { - return status == Built || status == Substituted || status == AlreadyValid || status == ResolvesToAlreadyValid; - } - - void rethrow() - { - throw Error("%s", errorMsg); - } }; /** @@ -99,15 +147,9 @@ struct BuildResult */ struct BuildError : public Error { - BuildResult::Status status; + BuildResult::Failure::Status status; - BuildError(BuildResult::Status status, BuildError && error) - : Error{std::move(error)} - , status{status} - { - } - - BuildError(BuildResult::Status status, auto &&... args) + BuildError(BuildResult::Failure::Status status, auto &&... args) : Error{args...} , status{status} { diff --git a/src/libstore/include/nix/store/build/derivation-builder.hh b/src/libstore/include/nix/store/build/derivation-builder.hh index 7fad2837a..63ef2b665 100644 --- a/src/libstore/include/nix/store/build/derivation-builder.hh +++ b/src/libstore/include/nix/store/build/derivation-builder.hh @@ -22,7 +22,7 @@ struct BuilderFailureError : BuildError std::string extraMsgAfter; - BuilderFailureError(BuildResult::Status status, int builderStatus, std::string extraMsgAfter) + BuilderFailureError(BuildResult::Failure::Status status, int builderStatus, std::string extraMsgAfter) : BuildError{ status, /* No message for now, because the caller will make for diff --git a/src/libstore/include/nix/store/build/derivation-building-goal.hh b/src/libstore/include/nix/store/build/derivation-building-goal.hh index d394eb3c9..edb496024 100644 --- a/src/libstore/include/nix/store/build/derivation-building-goal.hh +++ b/src/libstore/include/nix/store/build/derivation-building-goal.hh @@ -147,7 +147,7 @@ private: */ void killChild(); - Done doneSuccess(BuildResult::Status status, SingleDrvOutputs builtOutputs); + Done doneSuccess(BuildResult::Success::Status status, SingleDrvOutputs builtOutputs); Done doneFailure(BuildError ex); diff --git a/src/libstore/include/nix/store/build/derivation-goal.hh b/src/libstore/include/nix/store/build/derivation-goal.hh index 85b471e28..e05bf1c0b 100644 --- a/src/libstore/include/nix/store/build/derivation-goal.hh +++ b/src/libstore/include/nix/store/build/derivation-goal.hh @@ -99,7 +99,7 @@ private: Co repairClosure(); - Done doneSuccess(BuildResult::Status status, Realisation builtOutput); + Done doneSuccess(BuildResult::Success::Status status, Realisation builtOutput); Done doneFailure(BuildError ex); }; diff --git a/src/libstore/include/nix/store/build/substitution-goal.hh b/src/libstore/include/nix/store/build/substitution-goal.hh index 9fc6450b1..5f6cb6a18 100644 --- a/src/libstore/include/nix/store/build/substitution-goal.hh +++ b/src/libstore/include/nix/store/build/substitution-goal.hh @@ -41,7 +41,9 @@ struct PathSubstitutionGoal : public Goal */ std::optional ca; - Done done(ExitCode result, BuildResult::Status status, std::optional errorMsg = {}); + Done doneSuccess(BuildResult::Success::Status status); + + Done doneFailure(ExitCode result, BuildResult::Failure::Status status, std::string errorMsg); public: PathSubstitutionGoal( diff --git a/src/libstore/legacy-ssh-store.cc b/src/libstore/legacy-ssh-store.cc index f935de206..3b466c9bb 100644 --- a/src/libstore/legacy-ssh-store.cc +++ b/src/libstore/legacy-ssh-store.cc @@ -241,12 +241,13 @@ void LegacySSHStore::buildPaths( conn->to.flush(); - BuildResult result; - result.status = (BuildResult::Status) readInt(conn->from); - - if (!result.success()) { - conn->from >> result.errorMsg; - throw Error(result.status, result.errorMsg); + auto status = readInt(conn->from); + if (!BuildResult::Success::statusIs(status)) { + BuildResult::Failure failure{ + .status = (BuildResult::Failure::Status) status, + }; + conn->from >> failure.errorMsg; + throw Error(failure.status, std::move(failure.errorMsg)); } } diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc index 4cadf5282..ebc987ee0 100644 --- a/src/libstore/local-store.cc +++ b/src/libstore/local-store.cc @@ -997,7 +997,7 @@ void LocalStore::registerValidPaths(const ValidPathInfos & infos) }}, {[&](const StorePath & path, const StorePath & parent) { return BuildError( - BuildResult::OutputRejected, + BuildResult::Failure::OutputRejected, "cycle detected in the references of '%s' from '%s'", printStorePath(path), printStorePath(parent)); diff --git a/src/libstore/misc.cc b/src/libstore/misc.cc index c5e1747c1..7efaa4f86 100644 --- a/src/libstore/misc.cc +++ b/src/libstore/misc.cc @@ -322,7 +322,7 @@ StorePaths Store::topoSortPaths(const StorePathSet & paths) }}, {[&](const StorePath & path, const StorePath & parent) { return BuildError( - BuildResult::OutputRejected, + BuildResult::Failure::OutputRejected, "cycle detected in the references of '%s' from '%s'", printStorePath(path), printStorePath(parent)); diff --git a/src/libstore/posix-fs-canonicalise.cc b/src/libstore/posix-fs-canonicalise.cc index b6a64e65b..a274468c3 100644 --- a/src/libstore/posix-fs-canonicalise.cc +++ b/src/libstore/posix-fs-canonicalise.cc @@ -98,7 +98,7 @@ static void canonicalisePathMetaData_( (i.e. "touch $out/foo; ln $out/foo $out/bar"). */ if (uidRange && (st.st_uid < uidRange->first || st.st_uid > uidRange->second)) { if (S_ISDIR(st.st_mode) || !inodesSeen.count(Inode(st.st_dev, st.st_ino))) - throw BuildError(BuildResult::OutputRejected, "invalid ownership on file '%1%'", path); + throw BuildError(BuildResult::Failure::OutputRejected, "invalid ownership on file '%1%'", path); mode_t mode = st.st_mode & ~S_IFMT; assert( S_ISLNK(st.st_mode) diff --git a/src/libstore/remote-store.cc b/src/libstore/remote-store.cc index bb7425081..a6994f844 100644 --- a/src/libstore/remote-store.cc +++ b/src/libstore/remote-store.cc @@ -598,16 +598,15 @@ std::vector RemoteStore::buildPathsWithResults( [&](const DerivedPath::Opaque & bo) { results.push_back( KeyedBuildResult{ - { - .status = BuildResult::Substituted, - }, + {.inner{BuildResult::Success{ + .status = BuildResult::Success::Substituted, + }}}, /* .path = */ bo, }); }, [&](const DerivedPath::Built & bfd) { - KeyedBuildResult res{ - {.status = BuildResult::Built}, - /* .path = */ bfd, + BuildResult::Success success{ + .status = BuildResult::Success::Built, }; OutputPathMap outputs; @@ -627,9 +626,9 @@ std::vector RemoteStore::buildPathsWithResults( auto realisation = queryRealisation(outputId); if (!realisation) throw MissingRealisation(outputId); - res.builtOutputs.emplace(output, *realisation); + success.builtOutputs.emplace(output, *realisation); } else { - res.builtOutputs.emplace( + success.builtOutputs.emplace( output, Realisation{ .id = outputId, @@ -638,7 +637,11 @@ std::vector RemoteStore::buildPathsWithResults( } } - results.push_back(res); + results.push_back( + KeyedBuildResult{ + {.inner = std::move(success)}, + /* .path = */ bfd, + }); }}, path.raw()); } diff --git a/src/libstore/restricted-store.cc b/src/libstore/restricted-store.cc index e0f43ab6c..a1cb41606 100644 --- a/src/libstore/restricted-store.cc +++ b/src/libstore/restricted-store.cc @@ -257,8 +257,8 @@ void RestrictedStore::buildPaths( const std::vector & paths, BuildMode buildMode, std::shared_ptr evalStore) { for (auto & result : buildPathsWithResults(paths, buildMode, evalStore)) - if (!result.success()) - result.rethrow(); + if (auto * failureP = result.tryGetFailure()) + failureP->rethrow(); } std::vector RestrictedStore::buildPathsWithResults( @@ -280,9 +280,11 @@ std::vector RestrictedStore::buildPathsWithResults( auto results = next->buildPathsWithResults(paths, buildMode); for (auto & result : results) { - for (auto & [outputName, output] : result.builtOutputs) { - newPaths.insert(output.outPath); - newRealisations.insert(output); + if (auto * successP = result.tryGetSuccess()) { + for (auto & [outputName, output] : successP->builtOutputs) { + newPaths.insert(output.outPath); + newRealisations.insert(output); + } } } diff --git a/src/libstore/serve-protocol.cc b/src/libstore/serve-protocol.cc index 7cf5e6997..51b575fcd 100644 --- a/src/libstore/serve-protocol.cc +++ b/src/libstore/serve-protocol.cc @@ -16,32 +16,62 @@ namespace nix { BuildResult ServeProto::Serialise::read(const StoreDirConfig & store, ServeProto::ReadConn conn) { BuildResult status; - status.status = (BuildResult::Status) readInt(conn.from); - conn.from >> status.errorMsg; + BuildResult::Success success; + BuildResult::Failure failure; + + auto rawStatus = readInt(conn.from); + conn.from >> failure.errorMsg; if (GET_PROTOCOL_MINOR(conn.version) >= 3) - conn.from >> status.timesBuilt >> status.isNonDeterministic >> status.startTime >> status.stopTime; + conn.from >> status.timesBuilt >> failure.isNonDeterministic >> status.startTime >> status.stopTime; if (GET_PROTOCOL_MINOR(conn.version) >= 6) { auto builtOutputs = ServeProto::Serialise::read(store, conn); for (auto && [output, realisation] : builtOutputs) - status.builtOutputs.insert_or_assign(std::move(output.outputName), std::move(realisation)); + success.builtOutputs.insert_or_assign(std::move(output.outputName), std::move(realisation)); } + + if (BuildResult::Success::statusIs(rawStatus)) { + success.status = static_cast(rawStatus); + status.inner = std::move(success); + } else { + failure.status = static_cast(rawStatus); + status.inner = std::move(failure); + } + return status; } void ServeProto::Serialise::write( - const StoreDirConfig & store, ServeProto::WriteConn conn, const BuildResult & status) + const StoreDirConfig & store, ServeProto::WriteConn conn, const BuildResult & res) { - conn.to << status.status << status.errorMsg; - - if (GET_PROTOCOL_MINOR(conn.version) >= 3) - conn.to << status.timesBuilt << status.isNonDeterministic << status.startTime << status.stopTime; - if (GET_PROTOCOL_MINOR(conn.version) >= 6) { - DrvOutputs builtOutputs; - for (auto & [output, realisation] : status.builtOutputs) - builtOutputs.insert_or_assign(realisation.id, realisation); - ServeProto::write(store, conn, builtOutputs); - } + /* The protocol predates the use of sum types (std::variant) to + separate the success or failure cases. As such, it transits some + success- or failure-only fields in both cases. This helper + function helps support this: in each case, we just pass the old + default value for the fields that don't exist in that case. */ + auto common = [&](std::string_view errorMsg, bool isNonDeterministic, const auto & builtOutputs) { + conn.to << errorMsg; + if (GET_PROTOCOL_MINOR(conn.version) >= 3) + conn.to << res.timesBuilt << isNonDeterministic << res.startTime << res.stopTime; + if (GET_PROTOCOL_MINOR(conn.version) >= 6) { + DrvOutputs builtOutputsFullKey; + for (auto & [output, realisation] : builtOutputs) + builtOutputsFullKey.insert_or_assign(realisation.id, realisation); + ServeProto::write(store, conn, builtOutputsFullKey); + } + }; + std::visit( + overloaded{ + [&](const BuildResult::Failure & failure) { + conn.to << failure.status; + common(failure.errorMsg, failure.isNonDeterministic, decltype(BuildResult::Success::builtOutputs){}); + }, + [&](const BuildResult::Success & success) { + conn.to << success.status; + common(/*errorMsg=*/"", /*isNonDeterministic=*/false, success.builtOutputs); + }, + }, + res.inner); } UnkeyedValidPathInfo ServeProto::Serialise::read(const StoreDirConfig & store, ReadConn conn) diff --git a/src/libstore/store-api.cc b/src/libstore/store-api.cc index a0b06db54..56dffe19d 100644 --- a/src/libstore/store-api.cc +++ b/src/libstore/store-api.cc @@ -764,7 +764,7 @@ StorePathSet Store::exportReferences(const StorePathSet & storePaths, const Stor for (auto & storePath : storePaths) { if (!inputPaths.count(storePath)) throw BuildError( - BuildResult::InputRejected, + BuildResult::Failure::InputRejected, "cannot export references of path '%s' because it is not in the input closure of the derivation", printStorePath(storePath)); diff --git a/src/libstore/unix/build/derivation-builder.cc b/src/libstore/unix/build/derivation-builder.cc index 770bdad4d..d765de562 100644 --- a/src/libstore/unix/build/derivation-builder.cc +++ b/src/libstore/unix/build/derivation-builder.cc @@ -50,7 +50,7 @@ namespace nix { struct NotDeterministic : BuildError { NotDeterministic(auto &&... args) - : BuildError(BuildResult::NotDeterministic, args...) + : BuildError(BuildResult::Failure::NotDeterministic, args...) { } }; @@ -518,7 +518,8 @@ SingleDrvOutputs DerivationBuilderImpl::unprepareBuild() cleanupBuild(false); throw BuilderFailureError{ - !derivationType.isSandboxed() || diskFull ? BuildResult::TransientFailure : BuildResult::PermanentFailure, + !derivationType.isSandboxed() || diskFull ? BuildResult::Failure::TransientFailure + : BuildResult::Failure::PermanentFailure, status, diskFull ? "\nnote: build failure may have been caused by lack of free disk space" : "", }; @@ -700,7 +701,7 @@ std::optional DerivationBuilderImpl::startBuild() fmt("\nNote: run `%s` to run programs for x86_64-darwin", Magenta("/usr/sbin/softwareupdate --install-rosetta && launchctl stop org.nixos.nix-daemon")); - throw BuildError(BuildResult::InputRejected, msg); + throw BuildError(BuildResult::Failure::InputRejected, msg); } auto buildDir = store.config->getBuildDir(); @@ -1389,7 +1390,7 @@ SingleDrvOutputs DerivationBuilderImpl::registerOutputs() auto optSt = maybeLstat(actualPath.c_str()); if (!optSt) throw BuildError( - BuildResult::OutputRejected, + BuildResult::Failure::OutputRejected, "builder for '%s' failed to produce output path for output '%s' at '%s'", store.printStorePath(drvPath), outputName, @@ -1404,7 +1405,7 @@ SingleDrvOutputs DerivationBuilderImpl::registerOutputs() if ((!S_ISLNK(st.st_mode) && (st.st_mode & (S_IWGRP | S_IWOTH))) || (buildUser && st.st_uid != buildUser->getUID())) throw BuildError( - BuildResult::OutputRejected, + BuildResult::Failure::OutputRejected, "suspicious ownership or permission on '%s' for output '%s'; rejecting this build output", actualPath, outputName); @@ -1442,7 +1443,7 @@ SingleDrvOutputs DerivationBuilderImpl::registerOutputs() auto orifu = get(outputReferencesIfUnregistered, name); if (!orifu) throw BuildError( - BuildResult::OutputRejected, + BuildResult::Failure::OutputRejected, "no output reference for '%s' in build of '%s'", name, store.printStorePath(drvPath)); @@ -1467,7 +1468,7 @@ SingleDrvOutputs DerivationBuilderImpl::registerOutputs() {[&](const std::string & path, const std::string & parent) { // TODO with more -vvvv also show the temporary paths for manual inspection. return BuildError( - BuildResult::OutputRejected, + BuildResult::Failure::OutputRejected, "cycle detected in build of '%s' in the references of output '%s' from output '%s'", store.printStorePath(drvPath), path, @@ -1561,12 +1562,13 @@ SingleDrvOutputs DerivationBuilderImpl::registerOutputs() auto newInfoFromCA = [&](const DerivationOutput::CAFloating outputHash) -> ValidPathInfo { auto st = get(outputStats, outputName); if (!st) - throw BuildError(BuildResult::OutputRejected, "output path %1% without valid stats info", actualPath); + throw BuildError( + BuildResult::Failure::OutputRejected, "output path %1% without valid stats info", actualPath); if (outputHash.method.getFileIngestionMethod() == FileIngestionMethod::Flat) { /* The output path should be a regular file without execute permission. */ if (!S_ISREG(st->st_mode) || (st->st_mode & S_IXUSR) != 0) throw BuildError( - BuildResult::OutputRejected, + BuildResult::Failure::OutputRejected, "output path '%1%' should be a non-executable regular file " "since recursive hashing is not enabled (one of outputHashMode={flat,text} is true)", actualPath); diff --git a/src/libstore/worker-protocol.cc b/src/libstore/worker-protocol.cc index 1bbff64a2..4f7c28409 100644 --- a/src/libstore/worker-protocol.cc +++ b/src/libstore/worker-protocol.cc @@ -165,10 +165,14 @@ void WorkerProto::Serialise::write( BuildResult WorkerProto::Serialise::read(const StoreDirConfig & store, WorkerProto::ReadConn conn) { BuildResult res; - res.status = static_cast(readInt(conn.from)); - conn.from >> res.errorMsg; + BuildResult::Success success; + BuildResult::Failure failure; + + auto rawStatus = readInt(conn.from); + conn.from >> failure.errorMsg; + if (GET_PROTOCOL_MINOR(conn.version) >= 29) { - conn.from >> res.timesBuilt >> res.isNonDeterministic >> res.startTime >> res.stopTime; + conn.from >> res.timesBuilt >> failure.isNonDeterministic >> res.startTime >> res.stopTime; } if (GET_PROTOCOL_MINOR(conn.version) >= 37) { res.cpuUser = WorkerProto::Serialise>::read(store, conn); @@ -177,28 +181,56 @@ BuildResult WorkerProto::Serialise::read(const StoreDirConfig & sto if (GET_PROTOCOL_MINOR(conn.version) >= 28) { auto builtOutputs = WorkerProto::Serialise::read(store, conn); for (auto && [output, realisation] : builtOutputs) - res.builtOutputs.insert_or_assign(std::move(output.outputName), std::move(realisation)); + success.builtOutputs.insert_or_assign(std::move(output.outputName), std::move(realisation)); } + + if (BuildResult::Success::statusIs(rawStatus)) { + success.status = static_cast(rawStatus); + res.inner = std::move(success); + } else { + failure.status = static_cast(rawStatus); + res.inner = std::move(failure); + } + return res; } void WorkerProto::Serialise::write( const StoreDirConfig & store, WorkerProto::WriteConn conn, const BuildResult & res) { - conn.to << res.status << res.errorMsg; - if (GET_PROTOCOL_MINOR(conn.version) >= 29) { - conn.to << res.timesBuilt << res.isNonDeterministic << res.startTime << res.stopTime; - } - if (GET_PROTOCOL_MINOR(conn.version) >= 37) { - WorkerProto::write(store, conn, res.cpuUser); - WorkerProto::write(store, conn, res.cpuSystem); - } - if (GET_PROTOCOL_MINOR(conn.version) >= 28) { - DrvOutputs builtOutputs; - for (auto & [output, realisation] : res.builtOutputs) - builtOutputs.insert_or_assign(realisation.id, realisation); - WorkerProto::write(store, conn, builtOutputs); - } + /* The protocol predates the use of sum types (std::variant) to + separate the success or failure cases. As such, it transits some + success- or failure-only fields in both cases. This helper + function helps support this: in each case, we just pass the old + default value for the fields that don't exist in that case. */ + auto common = [&](std::string_view errorMsg, bool isNonDeterministic, const auto & builtOutputs) { + conn.to << errorMsg; + if (GET_PROTOCOL_MINOR(conn.version) >= 29) { + conn.to << res.timesBuilt << isNonDeterministic << res.startTime << res.stopTime; + } + if (GET_PROTOCOL_MINOR(conn.version) >= 37) { + WorkerProto::write(store, conn, res.cpuUser); + WorkerProto::write(store, conn, res.cpuSystem); + } + if (GET_PROTOCOL_MINOR(conn.version) >= 28) { + DrvOutputs builtOutputsFullKey; + for (auto & [output, realisation] : builtOutputs) + builtOutputsFullKey.insert_or_assign(realisation.id, realisation); + WorkerProto::write(store, conn, builtOutputsFullKey); + } + }; + std::visit( + overloaded{ + [&](const BuildResult::Failure & failure) { + conn.to << failure.status; + common(failure.errorMsg, failure.isNonDeterministic, decltype(BuildResult::Success::builtOutputs){}); + }, + [&](const BuildResult::Success & success) { + conn.to << success.status; + common(/*errorMsg=*/"", /*isNonDeterministic=*/false, success.builtOutputs); + }, + }, + res.inner); } ValidPathInfo WorkerProto::Serialise::read(const StoreDirConfig & store, ReadConn conn) diff --git a/src/nix/build-remote/build-remote.cc b/src/nix/build-remote/build-remote.cc index 11df8cc5e..ffb77ddf1 100644 --- a/src/nix/build-remote/build-remote.cc +++ b/src/nix/build-remote/build-remote.cc @@ -324,7 +324,7 @@ static int main_build_remote(int argc, char ** argv) drv.inputSrcs = store->parseStorePathSet(inputs); optResult = sshStore->buildDerivation(*drvPath, (const BasicDerivation &) drv); auto & result = *optResult; - if (!result.success()) { + if (auto * failureP = result.tryGetFailure()) { if (settings.keepFailed) { warn( "The failed build directory was kept on the remote builder due to `--keep-failed`.%s", @@ -333,7 +333,7 @@ static int main_build_remote(int argc, char ** argv) : ""); } throw Error( - "build of '%s' on '%s' failed: %s", store->printStorePath(*drvPath), storeUri, result.errorMsg); + "build of '%s' on '%s' failed: %s", store->printStorePath(*drvPath), storeUri, failureP->errorMsg); } } else { copyClosure(*store, *sshStore, StorePathSet{*drvPath}, NoRepair, NoCheckSigs, substitute); @@ -357,11 +357,14 @@ static int main_build_remote(int argc, char ** argv) debug("missing output %s", outputName); assert(optResult); auto & result = *optResult; - auto i = result.builtOutputs.find(outputName); - assert(i != result.builtOutputs.end()); - auto & newRealisation = i->second; - missingRealisations.insert(newRealisation); - missingPaths.insert(newRealisation.outPath); + if (auto * successP = result.tryGetSuccess()) { + auto & success = *successP; + auto i = success.builtOutputs.find(outputName); + assert(i != success.builtOutputs.end()); + auto & newRealisation = i->second; + missingRealisations.insert(newRealisation); + missingPaths.insert(newRealisation.outPath); + } } } } else { diff --git a/tests/functional/test-libstoreconsumer/main.cc b/tests/functional/test-libstoreconsumer/main.cc index d8db67a4d..5b0132934 100644 --- a/tests/functional/test-libstoreconsumer/main.cc +++ b/tests/functional/test-libstoreconsumer/main.cc @@ -34,8 +34,10 @@ int main(int argc, char ** argv) const auto results = store->buildPathsWithResults(paths, bmNormal, store); for (const auto & result : results) { - for (const auto & [outputName, realisation] : result.builtOutputs) { - std::cout << store->printStorePath(realisation.outPath) << "\n"; + if (auto * successP = result.tryGetSuccess()) { + for (const auto & [outputName, realisation] : successP->builtOutputs) { + std::cout << store->printStorePath(realisation.outPath) << "\n"; + } } } From e35abb110264c692b1d442f1433f691e4d0efbc2 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Fri, 26 Sep 2025 00:21:41 -0400 Subject: [PATCH 115/332] Create test for issue 13247 This test ends up being skipped, since the bug has not yet been fixed. A future commit will fix the bug. Progress on #13247, naturally. --- tests/functional/ca/issue-13247.nix | 46 +++++++++++++++++++ tests/functional/ca/issue-13247.sh | 71 +++++++++++++++++++++++++++++ tests/functional/ca/meson.build | 1 + 3 files changed, 118 insertions(+) create mode 100644 tests/functional/ca/issue-13247.nix create mode 100755 tests/functional/ca/issue-13247.sh diff --git a/tests/functional/ca/issue-13247.nix b/tests/functional/ca/issue-13247.nix new file mode 100644 index 000000000..78c622ed9 --- /dev/null +++ b/tests/functional/ca/issue-13247.nix @@ -0,0 +1,46 @@ +with import ./config.nix; + +rec { + + a = mkDerivation { + name = "issue-13247-a"; + builder = builtins.toFile "builder.sh" '' + mkdir $out + test -z $all + echo "output" > $out/file + ''; + }; + + # Same output, different drv + a-prime = mkDerivation { + name = "issue-13247-a"; + builder = builtins.toFile "builder.sh" '' + echo 'will make the same stuff as `a`, but different drv hash' + + mkdir $out + test -z $all + echo "output" > $out/file + ''; + }; + + # Multiple outputs in a derivation that depends on other derivations + f = + dep: + mkDerivation { + name = "use-a-more-outputs"; + outputs = [ + "first" + "second" + ]; + inherit dep; + builder = builtins.toFile "builder.sh" '' + ln -s $dep/file $first + ln -s $first $second + ''; + }; + + use-a-more-outputs = f a; + + use-a-prime-more-outputs = f a-prime; + +} diff --git a/tests/functional/ca/issue-13247.sh b/tests/functional/ca/issue-13247.sh new file mode 100755 index 000000000..686d90ced --- /dev/null +++ b/tests/functional/ca/issue-13247.sh @@ -0,0 +1,71 @@ +#!/usr/bin/env bash + +# https://github.com/NixOS/nix/issues/13247 + +export NIX_TESTS_CA_BY_DEFAULT=1 + +source common.sh + +clearStoreIfPossible + +set -x + +# Build derivation (both outputs) +nix build -f issue-13247.nix --json a a-prime use-a-more-outputs --no-link > "$TEST_ROOT"/a.json + +cache="file://$TEST_ROOT/cache" + +# Copy all outputs and realisations to cache +declare -a drvs +for d in "$NIX_STORE_DIR"/*-issue-13247-a.drv "$NIX_STORE_DIR"/*-use-a-more-outputs.drv; do + drvs+=("$d" "$d"^*) +done +nix copy --to "$cache" "${drvs[@]}" + +function delete () { + # Delete local copy + # shellcheck disable=SC2046 + nix-store --delete \ + $(jq -r <"$TEST_ROOT"/a.json '.[] | .drvPath, .outputs.[]') \ + "$NIX_STORE_DIR"/*-issue-13247-a.drv \ + "$NIX_STORE_DIR"/*-use-a-more-outputs.drv + + [[ ! -e "$(jq -r <"$TEST_ROOT"/a.json '.[0].outputs.out')" ]] + [[ ! -e "$(jq -r <"$TEST_ROOT"/a.json '.[1].outputs.out')" ]] + [[ ! -e "$(jq -r <"$TEST_ROOT"/a.json '.[2].outputs.first')" ]] + [[ ! -e "$(jq -r <"$TEST_ROOT"/a.json '.[2].outputs.second')" ]] +} + +delete + +buildViaSubstitute () { + nix build -f issue-13247.nix "$1" --no-link --max-jobs 0 --substituters "$cache" --no-require-sigs --offline --substitute +} + +# Substitue just the first output +buildViaSubstitute use-a-more-outputs^first + +# Should only fetch the output we asked for +[[ -d "$(jq -r <"$TEST_ROOT"/a.json '.[0].outputs.out')" ]] +[[ -f "$(jq -r <"$TEST_ROOT"/a.json '.[2].outputs.first')" ]] +[[ ! -e "$(jq -r <"$TEST_ROOT"/a.json '.[2].outputs.second')" ]] + +delete + +# Failure with 2.28 encountered in CI +requireDaemonNewerThan "2.29" + +# Substitue just the first output +# +# This derivation is the same after normalization, so we should get +# early cut-off, and thus a chance to download just the output we want +# rather than building more +buildViaSubstitute use-a-prime-more-outputs^first + +# Should only fetch the output we asked for +[[ -d "$(jq -r <"$TEST_ROOT"/a.json '.[0].outputs.out')" ]] +[[ -f "$(jq -r <"$TEST_ROOT"/a.json '.[2].outputs.first')" ]] + +# Output should *not* be here, this is the bug +[[ -e "$(jq -r <"$TEST_ROOT"/a.json '.[2].outputs.second')" ]] +skipTest "bug is not yet fixed" diff --git a/tests/functional/ca/meson.build b/tests/functional/ca/meson.build index 06aa19b22..b1912fd86 100644 --- a/tests/functional/ca/meson.build +++ b/tests/functional/ca/meson.build @@ -19,6 +19,7 @@ suites += { 'eval-store.sh', 'gc.sh', 'import-from-derivation.sh', + 'issue-13247.sh', 'multiple-outputs.sh', 'new-build-cmd.sh', 'nix-copy.sh', From 426a72c9cf0ae513a1254943dc3efd9d71ebb549 Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Sun, 28 Sep 2025 16:29:12 +0300 Subject: [PATCH 116/332] libstore: Make all StoreConfig::getReference implementations return store parameters These stragglers have been accidentally left out when implementing the StoreConfig::getReference. Also HttpBinaryCacheStore::getReference now returns the actual store parameters, not the cacheUri parameters. --- src/libstore-tests/http-binary-cache-store.cc | 16 +++++++++++++++ src/libstore-tests/uds-remote-store.cc | 20 +++++++++++++++++++ src/libstore/http-binary-cache-store.cc | 2 +- src/libstore/include/nix/store/dummy-store.hh | 1 + src/libstore/s3-binary-cache-store.cc | 1 + src/libstore/uds-remote-store.cc | 6 +++++- 6 files changed, 44 insertions(+), 2 deletions(-) diff --git a/src/libstore-tests/http-binary-cache-store.cc b/src/libstore-tests/http-binary-cache-store.cc index 0e3be4ced..4b3754a1f 100644 --- a/src/libstore-tests/http-binary-cache-store.cc +++ b/src/libstore-tests/http-binary-cache-store.cc @@ -18,4 +18,20 @@ TEST(HttpBinaryCacheStore, constructConfigNoTrailingSlash) EXPECT_EQ(config.cacheUri.to_string(), "https://foo.bar.baz/a/b"); } +TEST(HttpBinaryCacheStore, constructConfigWithParams) +{ + StoreConfig::Params params{{"compression", "xz"}}; + HttpBinaryCacheStoreConfig config{"https", "foo.bar.baz/a/b/", params}; + EXPECT_EQ(config.cacheUri.to_string(), "https://foo.bar.baz/a/b"); + EXPECT_EQ(config.getReference().params, params); +} + +TEST(HttpBinaryCacheStore, constructConfigWithParamsAndUrlWithParams) +{ + StoreConfig::Params params{{"compression", "xz"}}; + HttpBinaryCacheStoreConfig config{"https", "foo.bar.baz/a/b?some-param=some-value", params}; + EXPECT_EQ(config.cacheUri.to_string(), "https://foo.bar.baz/a/b?some-param=some-value"); + EXPECT_EQ(config.getReference().params, params); +} + } // namespace nix diff --git a/src/libstore-tests/uds-remote-store.cc b/src/libstore-tests/uds-remote-store.cc index 11e6b04a3..415dfc4ac 100644 --- a/src/libstore-tests/uds-remote-store.cc +++ b/src/libstore-tests/uds-remote-store.cc @@ -22,4 +22,24 @@ TEST(UDSRemoteStore, constructConfig_to_string) EXPECT_EQ(config.getReference().to_string(), "daemon"); } +TEST(UDSRemoteStore, constructConfigWithParams) +{ + StoreConfig::Params params{{"max-connections", "1"}}; + UDSRemoteStoreConfig config{"unix", "/tmp/socket", params}; + auto storeReference = config.getReference(); + EXPECT_EQ(storeReference.to_string(), "unix:///tmp/socket?max-connections=1"); + EXPECT_EQ(storeReference.render(/*withParams=*/false), "unix:///tmp/socket"); + EXPECT_EQ(storeReference.params, params); +} + +TEST(UDSRemoteStore, constructConfigWithParamsNoPath) +{ + StoreConfig::Params params{{"max-connections", "1"}}; + UDSRemoteStoreConfig config{"unix", "", params}; + auto storeReference = config.getReference(); + EXPECT_EQ(storeReference.to_string(), "daemon?max-connections=1"); + EXPECT_EQ(storeReference.render(/*withParams=*/false), "daemon"); + EXPECT_EQ(storeReference.params, params); +} + } // namespace nix diff --git a/src/libstore/http-binary-cache-store.cc b/src/libstore/http-binary-cache-store.cc index 7737389a3..6922c0f69 100644 --- a/src/libstore/http-binary-cache-store.cc +++ b/src/libstore/http-binary-cache-store.cc @@ -39,7 +39,7 @@ StoreReference HttpBinaryCacheStoreConfig::getReference() const .scheme = cacheUri.scheme, .authority = cacheUri.renderAuthorityAndPath(), }, - .params = cacheUri.query, + .params = getQueryParams(), }; } diff --git a/src/libstore/include/nix/store/dummy-store.hh b/src/libstore/include/nix/store/dummy-store.hh index 4898e8a5b..47e3375cd 100644 --- a/src/libstore/include/nix/store/dummy-store.hh +++ b/src/libstore/include/nix/store/dummy-store.hh @@ -48,6 +48,7 @@ struct DummyStoreConfig : public std::enable_shared_from_this, StoreReference::Specified{ .scheme = *uriSchemes().begin(), }, + .params = getQueryParams(), }; } }; diff --git a/src/libstore/s3-binary-cache-store.cc b/src/libstore/s3-binary-cache-store.cc index 4ad09aff2..b70f04be7 100644 --- a/src/libstore/s3-binary-cache-store.cc +++ b/src/libstore/s3-binary-cache-store.cc @@ -262,6 +262,7 @@ StoreReference S3BinaryCacheStoreConfig::getReference() const .scheme = *uriSchemes().begin(), .authority = bucketName, }, + .params = getQueryParams(), }; } diff --git a/src/libstore/uds-remote-store.cc b/src/libstore/uds-remote-store.cc index 9725fe8a0..6106a99ce 100644 --- a/src/libstore/uds-remote-store.cc +++ b/src/libstore/uds-remote-store.cc @@ -61,13 +61,17 @@ StoreReference UDSRemoteStoreConfig::getReference() const * to be more compatible with older versions of nix. Some tooling out there * tries hard to parse store references and it might not be able to handle "unix://". */ if (path == settings.nixDaemonSocketFile) - return {.variant = StoreReference::Daemon{}}; + return { + .variant = StoreReference::Daemon{}, + .params = getQueryParams(), + }; return { .variant = StoreReference::Specified{ .scheme = *uriSchemes().begin(), .authority = path, }, + .params = getQueryParams(), }; } From 01b2037bc077d3a9567a8e6911ac53985cb280ad Mon Sep 17 00:00:00 2001 From: John Ericson Date: Wed, 24 Sep 2025 11:55:21 -0400 Subject: [PATCH 117/332] Minimize the use of C Macros for characterization tests Fewer macros is better! Introduce a new `JsonChacterizationTest` mixin class to help with this. Also, avoid some needless copies with `GetParam`. Part of my effort shoring up the JSON formats with #13570. --- src/libexpr-tests/primops.cc | 6 +- src/libfetchers-tests/public-key.cc | 55 ++-- ...ivationDeps.drv => dyn-dep-derivation.drv} | 0 ...ationDeps.json => dyn-dep-derivation.json} | 0 .../{simple.drv => simple-derivation.drv} | 0 .../{simple.json => simple-derivation.json} | 0 src/libstore-tests/derivation.cc | 299 ++++++++++-------- src/libstore-tests/outputs-spec.cc | 78 +++-- src/libstore-tests/path.cc | 49 +-- src/libstore-tests/realisation.cc | 20 +- src/libstore/derivations.cc | 32 +- src/libstore/include/nix/store/derivations.hh | 10 +- .../nix/util/tests/json-characterization.hh | 54 ++++ .../include/nix/util/tests/meson.build | 1 + src/libutil-tests/sort.cc | 12 +- 15 files changed, 364 insertions(+), 252 deletions(-) rename src/libstore-tests/data/derivation/{dynDerivationDeps.drv => dyn-dep-derivation.drv} (100%) rename src/libstore-tests/data/derivation/{dynDerivationDeps.json => dyn-dep-derivation.json} (100%) rename src/libstore-tests/data/derivation/{simple.drv => simple-derivation.drv} (100%) rename src/libstore-tests/data/derivation/{simple.json => simple-derivation.json} (100%) create mode 100644 src/libutil-test-support/include/nix/util/tests/json-characterization.hh diff --git a/src/libexpr-tests/primops.cc b/src/libexpr-tests/primops.cc index aa4ef5e21..74d676844 100644 --- a/src/libexpr-tests/primops.cc +++ b/src/libexpr-tests/primops.cc @@ -642,7 +642,7 @@ class ToStringPrimOpTest : public PrimOpTest, TEST_P(ToStringPrimOpTest, toString) { - const auto [input, output] = GetParam(); + const auto & [input, output] = GetParam(); auto v = eval(input); ASSERT_THAT(v, IsStringEq(output)); } @@ -798,7 +798,7 @@ class CompareVersionsPrimOpTest : public PrimOpTest, TEST_P(CompareVersionsPrimOpTest, compareVersions) { - auto [expression, expectation] = GetParam(); + const auto & [expression, expectation] = GetParam(); auto v = eval(expression); ASSERT_THAT(v, IsIntEq(expectation)); } @@ -834,7 +834,7 @@ class ParseDrvNamePrimOpTest TEST_P(ParseDrvNamePrimOpTest, parseDrvName) { - auto [input, expectedName, expectedVersion] = GetParam(); + const auto & [input, expectedName, expectedVersion] = GetParam(); const auto expr = fmt("builtins.parseDrvName \"%1%\"", input); auto v = eval(expr); ASSERT_THAT(v, IsAttrsOfSize(2)); diff --git a/src/libfetchers-tests/public-key.cc b/src/libfetchers-tests/public-key.cc index 97a232447..2991223f6 100644 --- a/src/libfetchers-tests/public-key.cc +++ b/src/libfetchers-tests/public-key.cc @@ -1,14 +1,14 @@ #include #include "nix/fetchers/fetchers.hh" #include "nix/util/json-utils.hh" -#include -#include "nix/util/tests/characterization.hh" +#include "nix/util/tests/json-characterization.hh" namespace nix { using nlohmann::json; -class PublicKeyTest : public CharacterizationTest +class PublicKeyTest : public JsonCharacterizationTest, + public ::testing::WithParamInterface> { std::filesystem::path unitTestData = getUnitTestData() / "public-key"; @@ -19,30 +19,35 @@ public: } }; -#define TEST_JSON(FIXTURE, NAME, VAL) \ - TEST_F(FIXTURE, PublicKey_##NAME##_from_json) \ - { \ - readTest(#NAME ".json", [&](const auto & encoded_) { \ - fetchers::PublicKey expected{VAL}; \ - fetchers::PublicKey got = nlohmann::json::parse(encoded_); \ - ASSERT_EQ(got, expected); \ - }); \ - } \ - \ - TEST_F(FIXTURE, PublicKey_##NAME##_to_json) \ - { \ - writeTest( \ - #NAME ".json", \ - [&]() -> json { return nlohmann::json(fetchers::PublicKey{VAL}); }, \ - [](const auto & file) { return json::parse(readFile(file)); }, \ - [](const auto & file, const auto & got) { return writeFile(file, got.dump(2) + "\n"); }); \ - } +TEST_P(PublicKeyTest, from_json) +{ + const auto & [name, expected] = GetParam(); + readJsonTest(name, expected); +} -TEST_JSON(PublicKeyTest, simple, (fetchers::PublicKey{.type = "ssh-rsa", .key = "ABCDE"})) +TEST_P(PublicKeyTest, to_json) +{ + const auto & [name, value] = GetParam(); + writeJsonTest(name, value); +} -TEST_JSON(PublicKeyTest, defaultType, fetchers::PublicKey{.key = "ABCDE"}) - -#undef TEST_JSON +INSTANTIATE_TEST_SUITE_P( + PublicKeyJSON, + PublicKeyTest, + ::testing::Values( + std::pair{ + "simple", + fetchers::PublicKey{ + .type = "ssh-rsa", + .key = "ABCDE", + }, + }, + std::pair{ + "defaultType", + fetchers::PublicKey{ + .key = "ABCDE", + }, + })); TEST_F(PublicKeyTest, PublicKey_noRoundTrip_from_json) { diff --git a/src/libstore-tests/data/derivation/dynDerivationDeps.drv b/src/libstore-tests/data/derivation/dyn-dep-derivation.drv similarity index 100% rename from src/libstore-tests/data/derivation/dynDerivationDeps.drv rename to src/libstore-tests/data/derivation/dyn-dep-derivation.drv diff --git a/src/libstore-tests/data/derivation/dynDerivationDeps.json b/src/libstore-tests/data/derivation/dyn-dep-derivation.json similarity index 100% rename from src/libstore-tests/data/derivation/dynDerivationDeps.json rename to src/libstore-tests/data/derivation/dyn-dep-derivation.json diff --git a/src/libstore-tests/data/derivation/simple.drv b/src/libstore-tests/data/derivation/simple-derivation.drv similarity index 100% rename from src/libstore-tests/data/derivation/simple.drv rename to src/libstore-tests/data/derivation/simple-derivation.drv diff --git a/src/libstore-tests/data/derivation/simple.json b/src/libstore-tests/data/derivation/simple-derivation.json similarity index 100% rename from src/libstore-tests/data/derivation/simple.json rename to src/libstore-tests/data/derivation/simple-derivation.json diff --git a/src/libstore-tests/derivation.cc b/src/libstore-tests/derivation.cc index 35992c5ec..65a5d011d 100644 --- a/src/libstore-tests/derivation.cc +++ b/src/libstore-tests/derivation.cc @@ -5,13 +5,13 @@ #include "nix/store/derivations.hh" #include "nix/store/tests/libstore.hh" -#include "nix/util/tests/characterization.hh" +#include "nix/util/tests/json-characterization.hh" namespace nix { using nlohmann::json; -class DerivationTest : public CharacterizationTest, public LibStoreTest +class DerivationTest : public virtual CharacterizationTest, public LibStoreTest { std::filesystem::path unitTestData = getUnitTestData() / "derivation"; @@ -66,146 +66,183 @@ TEST_F(DynDerivationTest, BadATerm_oldVersionDynDeps) FormatError); } -#define TEST_JSON(FIXTURE, NAME, VAL, DRV_NAME, OUTPUT_NAME) \ - TEST_F(FIXTURE, DerivationOutput_##NAME##_from_json) \ - { \ - readTest("output-" #NAME ".json", [&](const auto & encoded_) { \ - auto encoded = json::parse(encoded_); \ - DerivationOutput got = DerivationOutput::fromJSON(DRV_NAME, OUTPUT_NAME, encoded, mockXpSettings); \ - DerivationOutput expected{VAL}; \ - ASSERT_EQ(got, expected); \ - }); \ - } \ - \ - TEST_F(FIXTURE, DerivationOutput_##NAME##_to_json) \ - { \ - writeTest( \ - "output-" #NAME ".json", \ - [&]() -> json { return DerivationOutput{(VAL)}.toJSON((DRV_NAME), (OUTPUT_NAME)); }, \ - [](const auto & file) { return json::parse(readFile(file)); }, \ - [](const auto & file, const auto & got) { return writeFile(file, got.dump(2) + "\n"); }); \ +#define MAKE_OUTPUT_JSON_TEST_P(FIXTURE) \ + TEST_P(FIXTURE, from_json) \ + { \ + const auto & [name, expected] = GetParam(); \ + /* Don't use readJsonTest because we want to check experimental \ + features. */ \ + readTest(Path{"output-"} + name + ".json", [&](const auto & encoded_) { \ + json j = json::parse(encoded_); \ + DerivationOutput got = DerivationOutput::fromJSON(j, mockXpSettings); \ + ASSERT_EQ(got, expected); \ + }); \ + } \ + \ + TEST_P(FIXTURE, to_json) \ + { \ + const auto & [name, value] = GetParam(); \ + writeJsonTest("output-" + name, value); \ } -TEST_JSON( - DerivationTest, - inputAddressed, - (DerivationOutput::InputAddressed{ - .path = store->parseStorePath("/nix/store/c015dhfh5l0lp6wxyvdn7bmwhbbr6hr9-drv-name-output-name"), - }), - "drv-name", - "output-name") +struct DerivationOutputJsonTest : DerivationTest, + JsonCharacterizationTest, + ::testing::WithParamInterface> +{}; -TEST_JSON( - DerivationTest, - caFixedFlat, - (DerivationOutput::CAFixed{ - .ca = - { - .method = ContentAddressMethod::Raw::Flat, - .hash = Hash::parseAnyPrefixed("sha256-iUUXyRY8iW7DGirb0zwGgf1fRbLA7wimTJKgP7l/OQ8="), - }, - }), - "drv-name", - "output-name") +MAKE_OUTPUT_JSON_TEST_P(DerivationOutputJsonTest) -TEST_JSON( - DerivationTest, - caFixedNAR, - (DerivationOutput::CAFixed{ - .ca = - { +INSTANTIATE_TEST_SUITE_P( + DerivationOutputJSON, + DerivationOutputJsonTest, + ::testing::Values( + std::pair{ + "inputAddressed", + DerivationOutput{DerivationOutput::InputAddressed{ + .path = StorePath{"c015dhfh5l0lp6wxyvdn7bmwhbbr6hr9-drv-name-output-name"}, + }}, + }, + std::pair{ + "caFixedFlat", + DerivationOutput{DerivationOutput::CAFixed{ + .ca = + { + .method = ContentAddressMethod::Raw::Flat, + .hash = Hash::parseAnyPrefixed("sha256-iUUXyRY8iW7DGirb0zwGgf1fRbLA7wimTJKgP7l/OQ8="), + }, + }}, + }, + std::pair{ + "caFixedNAR", + DerivationOutput{DerivationOutput::CAFixed{ + .ca = + { + .method = ContentAddressMethod::Raw::NixArchive, + .hash = Hash::parseAnyPrefixed("sha256-iUUXyRY8iW7DGirb0zwGgf1fRbLA7wimTJKgP7l/OQ8="), + }, + }}, + }, + std::pair{ + "deferred", + DerivationOutput{DerivationOutput::Deferred{}}, + })); + +struct DynDerivationOutputJsonTest : DynDerivationTest, + JsonCharacterizationTest, + ::testing::WithParamInterface> +{}; + +MAKE_OUTPUT_JSON_TEST_P(DynDerivationOutputJsonTest); + +INSTANTIATE_TEST_SUITE_P( + DynDerivationOutputJSON, + DynDerivationOutputJsonTest, + ::testing::Values( + std::pair{ + "caFixedText", + DerivationOutput{DerivationOutput::CAFixed{ + .ca = + { + .method = ContentAddressMethod::Raw::Text, + .hash = Hash::parseAnyPrefixed("sha256-iUUXyRY8iW7DGirb0zwGgf1fRbLA7wimTJKgP7l/OQ8="), + }, + }}, + })); + +struct CaDerivationOutputJsonTest : CaDerivationTest, + JsonCharacterizationTest, + ::testing::WithParamInterface> +{}; + +MAKE_OUTPUT_JSON_TEST_P(CaDerivationOutputJsonTest); + +INSTANTIATE_TEST_SUITE_P( + CaDerivationOutputJSON, + CaDerivationOutputJsonTest, + ::testing::Values( + std::pair{ + "caFloating", + DerivationOutput{DerivationOutput::CAFloating{ .method = ContentAddressMethod::Raw::NixArchive, - .hash = Hash::parseAnyPrefixed("sha256-iUUXyRY8iW7DGirb0zwGgf1fRbLA7wimTJKgP7l/OQ8="), - }, - }), - "drv-name", - "output-name") + .hashAlgo = HashAlgorithm::SHA256, + }}, + })); -TEST_JSON( - DynDerivationTest, - caFixedText, - (DerivationOutput::CAFixed{ - .ca = - { - .method = ContentAddressMethod::Raw::Text, - .hash = Hash::parseAnyPrefixed("sha256-iUUXyRY8iW7DGirb0zwGgf1fRbLA7wimTJKgP7l/OQ8="), - }, - }), - "drv-name", - "output-name") +struct ImpureDerivationOutputJsonTest : ImpureDerivationTest, + JsonCharacterizationTest, + ::testing::WithParamInterface> +{}; -TEST_JSON( - CaDerivationTest, - caFloating, - (DerivationOutput::CAFloating{ - .method = ContentAddressMethod::Raw::NixArchive, - .hashAlgo = HashAlgorithm::SHA256, - }), - "drv-name", - "output-name") +MAKE_OUTPUT_JSON_TEST_P(ImpureDerivationOutputJsonTest); -TEST_JSON(DerivationTest, deferred, DerivationOutput::Deferred{}, "drv-name", "output-name") +INSTANTIATE_TEST_SUITE_P( + ImpureDerivationOutputJSON, + ImpureDerivationOutputJsonTest, + ::testing::Values( + std::pair{ + "impure", + DerivationOutput{DerivationOutput::Impure{ + .method = ContentAddressMethod::Raw::NixArchive, + .hashAlgo = HashAlgorithm::SHA256, + }}, + })); -TEST_JSON( - ImpureDerivationTest, - impure, - (DerivationOutput::Impure{ - .method = ContentAddressMethod::Raw::NixArchive, - .hashAlgo = HashAlgorithm::SHA256, - }), - "drv-name", - "output-name") +#undef MAKE_OUTPUT_JSON_TEST_P -#undef TEST_JSON - -#define TEST_JSON(FIXTURE, NAME, VAL) \ - TEST_F(FIXTURE, Derivation_##NAME##_from_json) \ - { \ - readTest(#NAME ".json", [&](const auto & encoded_) { \ - auto encoded = json::parse(encoded_); \ - Derivation expected{VAL}; \ - Derivation got = Derivation::fromJSON(encoded, mockXpSettings); \ - ASSERT_EQ(got, expected); \ - }); \ - } \ - \ - TEST_F(FIXTURE, Derivation_##NAME##_to_json) \ - { \ - writeTest( \ - #NAME ".json", \ - [&]() -> json { return Derivation{VAL}.toJSON(); }, \ - [](const auto & file) { return json::parse(readFile(file)); }, \ - [](const auto & file, const auto & got) { return writeFile(file, got.dump(2) + "\n"); }); \ +#define MAKE_TEST_P(FIXTURE) \ + TEST_P(FIXTURE, from_json) \ + { \ + const auto & drv = GetParam(); \ + /* Don't use readJsonTest because we want to check experimental \ + features. */ \ + readTest(drv.name + ".json", [&](const auto & encoded_) { \ + auto encoded = json::parse(encoded_); \ + Derivation got = Derivation::fromJSON(encoded, mockXpSettings); \ + ASSERT_EQ(got, drv); \ + }); \ + } \ + \ + TEST_P(FIXTURE, to_json) \ + { \ + const auto & drv = GetParam(); \ + writeJsonTest(drv.name, drv); \ + } \ + \ + TEST_P(FIXTURE, from_aterm) \ + { \ + const auto & drv = GetParam(); \ + readTest(drv.name + ".drv", [&](auto encoded) { \ + auto got = parseDerivation(*store, std::move(encoded), drv.name, mockXpSettings); \ + ASSERT_EQ(got.toJSON(), drv.toJSON()); \ + ASSERT_EQ(got, drv); \ + }); \ + } \ + \ + TEST_P(FIXTURE, to_aterm) \ + { \ + const auto & drv = GetParam(); \ + writeTest(drv.name + ".drv", [&]() -> std::string { return drv.unparse(*store, false); }); \ } -#define TEST_ATERM(FIXTURE, NAME, VAL, DRV_NAME) \ - TEST_F(FIXTURE, Derivation_##NAME##_from_aterm) \ - { \ - readTest(#NAME ".drv", [&](auto encoded) { \ - Derivation expected{VAL}; \ - auto got = parseDerivation(*store, std::move(encoded), DRV_NAME, mockXpSettings); \ - ASSERT_EQ(got.toJSON(), expected.toJSON()); \ - ASSERT_EQ(got, expected); \ - }); \ - } \ - \ - TEST_F(FIXTURE, Derivation_##NAME##_to_aterm) \ - { \ - writeTest(#NAME ".drv", [&]() -> std::string { return (VAL).unparse(*store, false); }); \ - } +struct DerivationJsonAtermTest : DerivationTest, + JsonCharacterizationTest, + ::testing::WithParamInterface +{}; -Derivation makeSimpleDrv(const Store & store) +MAKE_TEST_P(DerivationJsonAtermTest); + +Derivation makeSimpleDrv() { Derivation drv; drv.name = "simple-derivation"; drv.inputSrcs = { - store.parseStorePath("/nix/store/c015dhfh5l0lp6wxyvdn7bmwhbbr6hr9-dep1"), + StorePath("c015dhfh5l0lp6wxyvdn7bmwhbbr6hr9-dep1"), }; drv.inputDrvs = { .map = { { - store.parseStorePath("/nix/store/c015dhfh5l0lp6wxyvdn7bmwhbbr6hr9-dep2.drv"), + StorePath("c015dhfh5l0lp6wxyvdn7bmwhbbr6hr9-dep2.drv"), { .value = { @@ -231,22 +268,27 @@ Derivation makeSimpleDrv(const Store & store) return drv; } -TEST_JSON(DerivationTest, simple, makeSimpleDrv(*store)) +INSTANTIATE_TEST_SUITE_P(DerivationJSONATerm, DerivationJsonAtermTest, ::testing::Values(makeSimpleDrv())); -TEST_ATERM(DerivationTest, simple, makeSimpleDrv(*store), "simple-derivation") +struct DynDerivationJsonAtermTest : DynDerivationTest, + JsonCharacterizationTest, + ::testing::WithParamInterface +{}; -Derivation makeDynDepDerivation(const Store & store) +MAKE_TEST_P(DynDerivationJsonAtermTest); + +Derivation makeDynDepDerivation() { Derivation drv; drv.name = "dyn-dep-derivation"; drv.inputSrcs = { - store.parseStorePath("/nix/store/c015dhfh5l0lp6wxyvdn7bmwhbbr6hr9-dep1"), + StorePath{"c015dhfh5l0lp6wxyvdn7bmwhbbr6hr9-dep1"}, }; drv.inputDrvs = { .map = { { - store.parseStorePath("/nix/store/c015dhfh5l0lp6wxyvdn7bmwhbbr6hr9-dep2.drv"), + StorePath{"c015dhfh5l0lp6wxyvdn7bmwhbbr6hr9-dep2.drv"}, DerivedPathMap::ChildNode{ .value = { @@ -293,11 +335,8 @@ Derivation makeDynDepDerivation(const Store & store) return drv; } -TEST_JSON(DynDerivationTest, dynDerivationDeps, makeDynDepDerivation(*store)) +INSTANTIATE_TEST_SUITE_P(DynDerivationJSONATerm, DynDerivationJsonAtermTest, ::testing::Values(makeDynDepDerivation())); -TEST_ATERM(DynDerivationTest, dynDerivationDeps, makeDynDepDerivation(*store), "dyn-dep-derivation") - -#undef TEST_JSON -#undef TEST_ATERM +#undef MAKE_TEST_P } // namespace nix diff --git a/src/libstore-tests/outputs-spec.cc b/src/libstore-tests/outputs-spec.cc index 7b3fc8f45..1fac222fc 100644 --- a/src/libstore-tests/outputs-spec.cc +++ b/src/libstore-tests/outputs-spec.cc @@ -3,12 +3,11 @@ #include #include "nix/store/tests/outputs-spec.hh" - -#include "nix/util/tests/characterization.hh" +#include "nix/util/tests/json-characterization.hh" namespace nix { -class OutputsSpecTest : public CharacterizationTest +class OutputsSpecTest : public virtual CharacterizationTest { std::filesystem::path unitTestData = getUnitTestData() / "outputs-spec"; @@ -20,7 +19,7 @@ public: } }; -class ExtendedOutputsSpecTest : public CharacterizationTest +class ExtendedOutputsSpecTest : public virtual CharacterizationTest { std::filesystem::path unitTestData = getUnitTestData() / "outputs-spec" / "extended"; @@ -214,40 +213,49 @@ TEST_F(ExtendedOutputsSpecTest, many_carrot) ASSERT_EQ(std::string{prefix} + expected.to_string(), "foo^bar^bin,out"); } -#define TEST_JSON(FIXTURE, TYPE, NAME, VAL) \ - static const TYPE FIXTURE##_##NAME = VAL; \ - \ - TEST_F(FIXTURE, NAME##_from_json) \ - { \ - using namespace nlohmann; \ - \ - readTest(#NAME ".json", [&](const auto & encoded_) { \ - auto encoded = json::parse(encoded_); \ - TYPE got = adl_serializer::from_json(encoded); \ - ASSERT_EQ(got, FIXTURE##_##NAME); \ - }); \ - } \ - \ - TEST_F(FIXTURE, NAME##_to_json) \ - { \ - using namespace nlohmann; \ - \ - writeTest( \ - #NAME ".json", \ - [&]() -> json { return static_cast(FIXTURE##_##NAME); }, \ - [](const auto & file) { return json::parse(readFile(file)); }, \ - [](const auto & file, const auto & got) { return writeFile(file, got.dump(2) + "\n"); }); \ +#define MAKE_TEST_P(FIXTURE, TYPE) \ + TEST_P(FIXTURE, from_json) \ + { \ + const auto & [name, value] = GetParam(); \ + readJsonTest(name, value); \ + } \ + \ + TEST_P(FIXTURE, to_json) \ + { \ + const auto & [name, value] = GetParam(); \ + writeJsonTest(name, value); \ } -TEST_JSON(OutputsSpecTest, OutputsSpec, all, OutputsSpec::All{}) -TEST_JSON(OutputsSpecTest, OutputsSpec, name, OutputsSpec::Names{"a"}) -TEST_JSON(OutputsSpecTest, OutputsSpec, names, (OutputsSpec::Names{"a", "b"})) +struct OutputsSpecJsonTest : OutputsSpecTest, + JsonCharacterizationTest, + ::testing::WithParamInterface> +{}; -TEST_JSON(ExtendedOutputsSpecTest, ExtendedOutputsSpec, def, ExtendedOutputsSpec::Default{}) -TEST_JSON(ExtendedOutputsSpecTest, ExtendedOutputsSpec, all, ExtendedOutputsSpec::Explicit{OutputsSpec::All{}}) -TEST_JSON(ExtendedOutputsSpecTest, ExtendedOutputsSpec, name, ExtendedOutputsSpec::Explicit{OutputsSpec::Names{"a"}}) -TEST_JSON( - ExtendedOutputsSpecTest, ExtendedOutputsSpec, names, (ExtendedOutputsSpec::Explicit{OutputsSpec::Names{"a", "b"}})) +MAKE_TEST_P(OutputsSpecJsonTest, OutputsSpec); + +INSTANTIATE_TEST_SUITE_P( + OutputsSpecJSON, + OutputsSpecJsonTest, + ::testing::Values( + std::pair{"all", OutputsSpec{OutputsSpec::All{}}}, + std::pair{"name", OutputsSpec{OutputsSpec::Names{"a"}}}, + std::pair{"names", OutputsSpec{OutputsSpec::Names{"a", "b"}}})); + +struct ExtendedOutputsSpecJsonTest : ExtendedOutputsSpecTest, + JsonCharacterizationTest, + ::testing::WithParamInterface> +{}; + +MAKE_TEST_P(ExtendedOutputsSpecJsonTest, ExtendedOutputsSpec); + +INSTANTIATE_TEST_SUITE_P( + ExtendedOutputsSpecJSON, + ExtendedOutputsSpecJsonTest, + ::testing::Values( + std::pair{"def", ExtendedOutputsSpec{ExtendedOutputsSpec::Default{}}}, + std::pair{"all", ExtendedOutputsSpec{ExtendedOutputsSpec::Explicit{OutputsSpec::All{}}}}, + std::pair{"name", ExtendedOutputsSpec{ExtendedOutputsSpec::Explicit{OutputsSpec::Names{"a"}}}}, + std::pair{"names", ExtendedOutputsSpec{ExtendedOutputsSpec::Explicit{OutputsSpec::Names{"a", "b"}}}})); #undef TEST_JSON diff --git a/src/libstore-tests/path.cc b/src/libstore-tests/path.cc index b6a1a541f..eb860a34d 100644 --- a/src/libstore-tests/path.cc +++ b/src/libstore-tests/path.cc @@ -7,7 +7,7 @@ #include "nix/store/path-regex.hh" #include "nix/store/store-api.hh" -#include "nix/util/tests/characterization.hh" +#include "nix/util/tests/json-characterization.hh" #include "nix/store/tests/libstore.hh" #include "nix/store/tests/path.hh" @@ -16,7 +16,7 @@ namespace nix { #define STORE_DIR "/nix/store/" #define HASH_PART "g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q" -class StorePathTest : public CharacterizationTest, public LibStoreTest +class StorePathTest : public virtual CharacterizationTest, public LibStoreTest { std::filesystem::path unitTestData = getUnitTestData() / "store-path"; @@ -149,27 +149,30 @@ RC_GTEST_FIXTURE_PROP(StorePathTest, prop_check_regex_eq_parse, ()) using nlohmann::json; -#define TEST_JSON(FIXTURE, NAME, VAL) \ - static const StorePath NAME = VAL; \ - \ - TEST_F(FIXTURE, NAME##_from_json) \ - { \ - readTest(#NAME ".json", [&](const auto & encoded_) { \ - auto encoded = json::parse(encoded_); \ - StorePath got = static_cast(encoded); \ - ASSERT_EQ(got, NAME); \ - }); \ - } \ - \ - TEST_F(FIXTURE, NAME##_to_json) \ - { \ - writeTest( \ - #NAME ".json", \ - [&]() -> json { return static_cast(NAME); }, \ - [](const auto & file) { return json::parse(readFile(file)); }, \ - [](const auto & file, const auto & got) { return writeFile(file, got.dump(2) + "\n"); }); \ - } +struct StorePathJsonTest : StorePathTest, + JsonCharacterizationTest, + ::testing::WithParamInterface> +{}; -TEST_JSON(StorePathTest, simple, StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo.drv"}); +TEST_P(StorePathJsonTest, from_json) +{ + auto & [name, expected] = GetParam(); + readJsonTest(name, expected); +} + +TEST_P(StorePathJsonTest, to_json) +{ + auto & [name, value] = GetParam(); + writeJsonTest(name, value); +} + +INSTANTIATE_TEST_SUITE_P( + StorePathJSON, + StorePathJsonTest, + ::testing::Values( + std::pair{ + "simple", + StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo.drv"}, + })); } // namespace nix diff --git a/src/libstore-tests/realisation.cc b/src/libstore-tests/realisation.cc index 2e4d592dc..a5a5bee50 100644 --- a/src/libstore-tests/realisation.cc +++ b/src/libstore-tests/realisation.cc @@ -6,12 +6,12 @@ #include "nix/store/store-api.hh" -#include "nix/util/tests/characterization.hh" +#include "nix/util/tests/json-characterization.hh" #include "nix/store/tests/libstore.hh" namespace nix { -class RealisationTest : public CharacterizationTest, public LibStoreTest +class RealisationTest : public JsonCharacterizationTest, public LibStoreTest { std::filesystem::path unitTestData = getUnitTestData() / "realisation"; @@ -34,22 +34,14 @@ struct RealisationJsonTest : RealisationTest, ::testing::WithParamInterface(encoded); - ASSERT_EQ(got, expected); - }); + const auto & [name, expected] = GetParam(); + readJsonTest(name, expected); } TEST_P(RealisationJsonTest, to_json) { - auto [name, value] = GetParam(); - writeTest( - name + ".json", - [&]() -> json { return static_cast(value); }, - [](const auto & file) { return json::parse(readFile(file)); }, - [](const auto & file, const auto & got) { return writeFile(file, got.dump(2) + "\n"); }); + const auto & [name, value] = GetParam(); + writeJsonTest(name, value); } INSTANTIATE_TEST_SUITE_P( diff --git a/src/libstore/derivations.cc b/src/libstore/derivations.cc index a0c709791..6d7dbc99c 100644 --- a/src/libstore/derivations.cc +++ b/src/libstore/derivations.cc @@ -1257,14 +1257,18 @@ void Derivation::checkInvariants(Store & store, const StorePath & drvPath) const const Hash impureOutputHash = hashString(HashAlgorithm::SHA256, "impure"); -nlohmann::json DerivationOutput::toJSON(std::string_view drvName, OutputNameView outputName) const +nlohmann::json DerivationOutput::toJSON() const { nlohmann::json res = nlohmann::json::object(); std::visit( overloaded{ [&](const DerivationOutput::InputAddressed & doi) { res["path"] = doi.path; }, [&](const DerivationOutput::CAFixed & dof) { - // res["path"] = dof.path(store, drvName, outputName); + /* it would be nice to output the path for user convenience, but + this would require us to know the store dir. */ +#if 0 + res["path"] = dof.path(store, drvName, outputName); +#endif res["method"] = std::string{dof.ca.method.render()}; res["hashAlgo"] = printHashAlgo(dof.ca.hash.algo); res["hash"] = dof.ca.hash.to_string(HashFormat::Base16, false); @@ -1285,11 +1289,8 @@ nlohmann::json DerivationOutput::toJSON(std::string_view drvName, OutputNameView return res; } -DerivationOutput DerivationOutput::fromJSON( - std::string_view drvName, - OutputNameView outputName, - const nlohmann::json & _json, - const ExperimentalFeatureSettings & xpSettings) +DerivationOutput +DerivationOutput::fromJSON(const nlohmann::json & _json, const ExperimentalFeatureSettings & xpSettings) { std::set keys; auto & json = getObject(_json); @@ -1321,6 +1322,8 @@ DerivationOutput DerivationOutput::fromJSON( .hash = Hash::parseNonSRIUnprefixed(getString(valueAt(json, "hash")), hashAlgo), }, }; + /* We no longer produce this (denormalized) field (for the + reasons described above), so we don't need to check it. */ #if 0 if (dof.path(store, drvName, outputName) != static_cast(valueAt(json, "path"))) throw Error("Path doesn't match derivation output"); @@ -1367,7 +1370,7 @@ nlohmann::json Derivation::toJSON() const nlohmann::json & outputsObj = res["outputs"]; outputsObj = nlohmann::json::object(); for (auto & [outputName, output] : outputs) { - outputsObj[outputName] = output.toJSON(name, outputName); + outputsObj[outputName] = output; } } @@ -1427,8 +1430,7 @@ Derivation Derivation::fromJSON(const nlohmann::json & _json, const Experimental try { auto outputs = getObject(valueAt(json, "outputs")); for (auto & [outputName, output] : outputs) { - res.outputs.insert_or_assign( - outputName, DerivationOutput::fromJSON(res.name, outputName, output, xpSettings)); + res.outputs.insert_or_assign(outputName, DerivationOutput::fromJSON(output, xpSettings)); } } catch (Error & e) { e.addTrace({}, "while reading key 'outputs'"); @@ -1489,6 +1491,16 @@ namespace nlohmann { using namespace nix; +DerivationOutput adl_serializer::from_json(const json & json) +{ + return DerivationOutput::fromJSON(json); +} + +void adl_serializer::to_json(json & json, const DerivationOutput & c) +{ + json = c.toJSON(); +} + Derivation adl_serializer::from_json(const json & json) { return Derivation::fromJSON(json); diff --git a/src/libstore/include/nix/store/derivations.hh b/src/libstore/include/nix/store/derivations.hh index d66bcef2e..0dfb80347 100644 --- a/src/libstore/include/nix/store/derivations.hh +++ b/src/libstore/include/nix/store/derivations.hh @@ -135,15 +135,12 @@ struct DerivationOutput std::optional path(const StoreDirConfig & store, std::string_view drvName, OutputNameView outputName) const; - nlohmann::json toJSON(std::string_view drvName, OutputNameView outputName) const; + nlohmann::json toJSON() const; /** * @param xpSettings Stop-gap to avoid globals during unit tests. */ - static DerivationOutput fromJSON( - std::string_view drvName, - OutputNameView outputName, - const nlohmann::json & json, - const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings); + static DerivationOutput + fromJSON(const nlohmann::json & json, const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings); }; typedef std::map DerivationOutputs; @@ -540,4 +537,5 @@ std::string hashPlaceholder(const OutputNameView outputName); } // namespace nix +JSON_IMPL(nix::DerivationOutput) JSON_IMPL(nix::Derivation) diff --git a/src/libutil-test-support/include/nix/util/tests/json-characterization.hh b/src/libutil-test-support/include/nix/util/tests/json-characterization.hh new file mode 100644 index 000000000..5a38b8e2c --- /dev/null +++ b/src/libutil-test-support/include/nix/util/tests/json-characterization.hh @@ -0,0 +1,54 @@ +#pragma once +///@file + +#include +#include + +#include "nix/util/types.hh" +#include "nix/util/file-system.hh" + +#include "nix/util/tests/characterization.hh" + +namespace nix { + +/** + * Mixin class for writing characterization tests for `nlohmann::json` + * conversions for a given type. + */ +template +struct JsonCharacterizationTest : virtual CharacterizationTest +{ + /** + * Golden test for reading + * + * @param test hook that takes the contents of the file and does the + * actual work + */ + void readJsonTest(PathView testStem, const T & expected) + { + using namespace nlohmann; + readTest(Path{testStem} + ".json", [&](const auto & encodedRaw) { + auto encoded = json::parse(encodedRaw); + T decoded = adl_serializer::from_json(encoded); + ASSERT_EQ(decoded, expected); + }); + } + + /** + * Golden test for writing + * + * @param test hook that produces contents of the file and does the + * actual work + */ + void writeJsonTest(PathView testStem, const T & value) + { + using namespace nlohmann; + writeTest( + Path{testStem} + ".json", + [&]() -> json { return static_cast(value); }, + [](const auto & file) { return json::parse(readFile(file)); }, + [](const auto & file, const auto & got) { return writeFile(file, got.dump(2) + "\n"); }); + } +}; + +} // namespace nix diff --git a/src/libutil-test-support/include/nix/util/tests/meson.build b/src/libutil-test-support/include/nix/util/tests/meson.build index ab143757c..3be085892 100644 --- a/src/libutil-test-support/include/nix/util/tests/meson.build +++ b/src/libutil-test-support/include/nix/util/tests/meson.build @@ -7,6 +7,7 @@ headers = files( 'gmock-matchers.hh', 'gtest-with-params.hh', 'hash.hh', + 'json-characterization.hh', 'nix_api_util.hh', 'string_callback.hh', ) diff --git a/src/libutil-tests/sort.cc b/src/libutil-tests/sort.cc index 8eee961c8..11d8e5938 100644 --- a/src/libutil-tests/sort.cc +++ b/src/libutil-tests/sort.cc @@ -102,14 +102,14 @@ struct RandomPeekSort : public ::testing::TestWithParam< void SetUp() override { - auto [maxSize, min, max, iterations] = GetParam(); + const auto & [maxSize, min, max, iterations] = GetParam(); urng_ = std::mt19937(GTEST_FLAG_GET(random_seed)); distribution_ = std::uniform_int_distribution(min, max); } auto regenerate() { - auto [maxSize, min, max, iterations] = GetParam(); + const auto & [maxSize, min, max, iterations] = GetParam(); std::size_t dataSize = std::uniform_int_distribution(0, maxSize)(urng_); data_.resize(dataSize); std::generate(data_.begin(), data_.end(), [&]() { return distribution_(urng_); }); @@ -118,7 +118,7 @@ struct RandomPeekSort : public ::testing::TestWithParam< TEST_P(RandomPeekSort, defaultComparator) { - auto [maxSize, min, max, iterations] = GetParam(); + const auto & [maxSize, min, max, iterations] = GetParam(); for (std::size_t i = 0; i < iterations; ++i) { regenerate(); @@ -132,7 +132,7 @@ TEST_P(RandomPeekSort, defaultComparator) TEST_P(RandomPeekSort, greater) { - auto [maxSize, min, max, iterations] = GetParam(); + const auto & [maxSize, min, max, iterations] = GetParam(); for (std::size_t i = 0; i < iterations; ++i) { regenerate(); @@ -146,7 +146,7 @@ TEST_P(RandomPeekSort, greater) TEST_P(RandomPeekSort, brokenComparator) { - auto [maxSize, min, max, iterations] = GetParam(); + const auto & [maxSize, min, max, iterations] = GetParam(); /* This is a pretty nice way of modeling a worst-case scenario for a broken comparator. If the sorting algorithm doesn't break in such case, then surely all deterministic @@ -170,7 +170,7 @@ TEST_P(RandomPeekSort, brokenComparator) TEST_P(RandomPeekSort, stability) { - auto [maxSize, min, max, iterations] = GetParam(); + const auto & [maxSize, min, max, iterations] = GetParam(); for (std::size_t i = 0; i < iterations; ++i) { regenerate(); From 3a64d3c0da2b169383256fd3198cf7d18f8ab163 Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Sun, 28 Sep 2025 17:42:19 +0300 Subject: [PATCH 118/332] libstore: Call canonPath for constructing LocalFSStoreConfig::rootDir This mirrors what OptionalPathSetting does. Otherwise we run into an assertion failure for relative paths specified as the authority + path: nix build nixpkgs#hello --store "local://a/b" nix: ../posix-source-accessor.cc:13: nix::PosixSourceAccessor::PosixSourceAccessor(std::filesystem::__cxx11::path&&): Assertion `root.empty() || root.is_absolute()' failed. This is now diagnosed properly: error: not an absolute path: 'a/b' Just as you'd specify the root via a query parameter: nix build nixpkgs#hello --store "local?root=a/b" --- src/libstore/local-fs-store.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libstore/local-fs-store.cc b/src/libstore/local-fs-store.cc index 66ae85d89..b16fc86e9 100644 --- a/src/libstore/local-fs-store.cc +++ b/src/libstore/local-fs-store.cc @@ -24,7 +24,7 @@ LocalFSStoreConfig::LocalFSStoreConfig(PathView rootDir, const Params & params) // FIXME don't duplicate description once we don't have root setting , rootDir{ this, - !rootDir.empty() && params.count("root") == 0 ? (std::optional{rootDir}) : std::nullopt, + !rootDir.empty() && params.count("root") == 0 ? (std::optional{canonPath(rootDir)}) : std::nullopt, "root", "Directory prefixed to all other paths."} { From 0866ba0b4ad14ddc1aa7ad7d3211ab4a981b9c5d Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Sun, 28 Sep 2025 18:38:57 +0300 Subject: [PATCH 119/332] libstore: Deduplicate LocalFSStoreConfig::rootDir initializers Co-authored-by: John Ericson --- .../include/nix/store/local-fs-store.hh | 14 +++++++++++++- src/libstore/local-fs-store.cc | 18 +++++++++++------- 2 files changed, 24 insertions(+), 8 deletions(-) diff --git a/src/libstore/include/nix/store/local-fs-store.hh b/src/libstore/include/nix/store/local-fs-store.hh index f7d6d65b1..08f8e1656 100644 --- a/src/libstore/include/nix/store/local-fs-store.hh +++ b/src/libstore/include/nix/store/local-fs-store.hh @@ -9,6 +9,18 @@ namespace nix { struct LocalFSStoreConfig : virtual StoreConfig { +private: + static OptionalPathSetting makeRootDirSetting(LocalFSStoreConfig & self, std::optional defaultValue) + { + return { + &self, + std::move(defaultValue), + "root", + "Directory prefixed to all other paths.", + }; + } + +public: using StoreConfig::StoreConfig; /** @@ -20,7 +32,7 @@ struct LocalFSStoreConfig : virtual StoreConfig */ LocalFSStoreConfig(PathView path, const Params & params); - OptionalPathSetting rootDir{this, std::nullopt, "root", "Directory prefixed to all other paths."}; + OptionalPathSetting rootDir = makeRootDirSetting(*this, std::nullopt); private: diff --git a/src/libstore/local-fs-store.cc b/src/libstore/local-fs-store.cc index b16fc86e9..28069dcaf 100644 --- a/src/libstore/local-fs-store.cc +++ b/src/libstore/local-fs-store.cc @@ -20,13 +20,17 @@ Path LocalFSStoreConfig::getDefaultLogDir() LocalFSStoreConfig::LocalFSStoreConfig(PathView rootDir, const Params & params) : StoreConfig(params) - // Default `?root` from `rootDir` if non set - // FIXME don't duplicate description once we don't have root setting - , rootDir{ - this, - !rootDir.empty() && params.count("root") == 0 ? (std::optional{canonPath(rootDir)}) : std::nullopt, - "root", - "Directory prefixed to all other paths."} + /* Default `?root` from `rootDir` if non set + * NOTE: We would like to just do rootDir.set(...), which would take care of + * all normalization and error checking for us. Unfortunately we cannot do + * that because of the complicated initialization order of other fields with + * the virtual class hierarchy of nix store configs, and the design of the + * settings system. As such, we have no choice but to redefine the field and + * manually repeat the same normalization logic. + */ + , rootDir{makeRootDirSetting( + *this, + !rootDir.empty() && params.count("root") == 0 ? std::optional{canonPath(rootDir)} : std::nullopt)} { } From 582d3ee6115c58a5816f15504d231b20c6aad86f Mon Sep 17 00:00:00 2001 From: John Ericson Date: Sun, 28 Sep 2025 12:12:24 -0400 Subject: [PATCH 120/332] Add `#pragma once` to `dummy-store.hh` We should have a lint for this. In later (yet to be merged at this time) commits, this started causing problems that only the sanitzer caught. --- src/libstore/include/nix/store/dummy-store.hh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/libstore/include/nix/store/dummy-store.hh b/src/libstore/include/nix/store/dummy-store.hh index 47e3375cd..e93aad366 100644 --- a/src/libstore/include/nix/store/dummy-store.hh +++ b/src/libstore/include/nix/store/dummy-store.hh @@ -1,3 +1,6 @@ +#pragma once +///@file + #include "nix/store/store-api.hh" namespace nix { From eab467ecfb829182548276df7d56a4d1c525057a Mon Sep 17 00:00:00 2001 From: Taeer Bar-Yam Date: Fri, 19 Sep 2025 14:06:15 -0400 Subject: [PATCH 121/332] libexpr: introduce arena to hold ExprString strings 1. Saves 24-32 bytes per string (size of std::string) 2. Saves additional bytes by not over-allocating strings (in total we save ~1% memory) 3. Sets us up to perform a similar transformation on the other Expr subclasses 4. Makes ExprString trivially moveable (before the string data might move, causing the Value's pointer to become invalid). This is important so we can put ExprStrings in an std::vector and refer to them by index We have introduced a string copy in ParserState::stripIndentation(). This could be removed by pre-allocating the right sized string in the arena, but this adds complexity and doesn't seem to improve performance, so for now we've left the copy in. --- src/libexpr/eval.cc | 3 +- src/libexpr/include/nix/expr/eval.hh | 5 ++ src/libexpr/include/nix/expr/nixexpr.hh | 31 ++++++-- src/libexpr/include/nix/expr/parser-state.hh | 3 +- src/libexpr/nixexpr.cc | 2 +- src/libexpr/parser.y | 74 +++++++++++--------- 6 files changed, 79 insertions(+), 39 deletions(-) diff --git a/src/libexpr/eval.cc b/src/libexpr/eval.cc index 6cf902e35..2df373520 100644 --- a/src/libexpr/eval.cc +++ b/src/libexpr/eval.cc @@ -3217,7 +3217,8 @@ Expr * EvalState::parse( docComments = &it->second; } - auto result = parseExprFromBuf(text, length, origin, basePath, symbols, settings, positions, *docComments, rootFS); + auto result = parseExprFromBuf( + text, length, origin, basePath, mem.exprs.alloc, symbols, settings, positions, *docComments, rootFS); result->bindVars(*this, staticEnv); diff --git a/src/libexpr/include/nix/expr/eval.hh b/src/libexpr/include/nix/expr/eval.hh index f61dab3a8..2601d8de8 100644 --- a/src/libexpr/include/nix/expr/eval.hh +++ b/src/libexpr/include/nix/expr/eval.hh @@ -355,6 +355,11 @@ public: return stats; } + /** + * Storage for the AST nodes + */ + Exprs exprs; + private: Statistics stats; }; diff --git a/src/libexpr/include/nix/expr/nixexpr.hh b/src/libexpr/include/nix/expr/nixexpr.hh index e0203c732..747a8e4b2 100644 --- a/src/libexpr/include/nix/expr/nixexpr.hh +++ b/src/libexpr/include/nix/expr/nixexpr.hh @@ -3,6 +3,7 @@ #include #include +#include #include "nix/expr/gc-small-vector.hh" #include "nix/expr/value.hh" @@ -84,6 +85,13 @@ std::string showAttrPath(const SymbolTable & symbols, const AttrPath & attrPath) using UpdateQueue = SmallTemporaryValueVector; +class Exprs +{ + std::pmr::monotonic_buffer_resource buffer; +public: + std::pmr::polymorphic_allocator alloc{&buffer}; +}; + /* Abstract syntax of Nix expressions. */ struct Expr @@ -173,13 +181,28 @@ struct ExprFloat : Expr struct ExprString : Expr { - std::string s; Value v; - ExprString(std::string && s) - : s(std::move(s)) + /** + * This is only for strings already allocated in our polymorphic allocator, + * or that live at least that long (e.g. c++ string literals) + */ + ExprString(const char * s) { - v.mkStringNoCopy(this->s.data()); + v.mkStringNoCopy(s); + }; + + ExprString(std::pmr::polymorphic_allocator & alloc, std::string_view sv) + { + auto len = sv.length(); + if (len == 0) { + v.mkStringNoCopy(""); + return; + } + char * s = alloc.allocate(len + 1); + sv.copy(s, len); + s[len] = '\0'; + v.mkStringNoCopy(s); }; Value * maybeThunk(EvalState & state, Env & env) override; diff --git a/src/libexpr/include/nix/expr/parser-state.hh b/src/libexpr/include/nix/expr/parser-state.hh index e689678de..758bedd97 100644 --- a/src/libexpr/include/nix/expr/parser-state.hh +++ b/src/libexpr/include/nix/expr/parser-state.hh @@ -82,6 +82,7 @@ struct LexerState struct ParserState { const LexerState & lexerState; + std::pmr::polymorphic_allocator & alloc; SymbolTable & symbols; PosTable & positions; Expr * result; @@ -327,7 +328,7 @@ ParserState::stripIndentation(const PosIdx pos, std::vectoremplace_back(i->first, new ExprString(std::move(s2))); + es2->emplace_back(i->first, new ExprString(alloc, s2)); } }; for (; i != es.end(); ++i, --n) { diff --git a/src/libexpr/nixexpr.cc b/src/libexpr/nixexpr.cc index 43e85cb16..a2980af6b 100644 --- a/src/libexpr/nixexpr.cc +++ b/src/libexpr/nixexpr.cc @@ -40,7 +40,7 @@ void ExprFloat::show(const SymbolTable & symbols, std::ostream & str) const void ExprString::show(const SymbolTable & symbols, std::ostream & str) const { - printLiteralString(str, s); + printLiteralString(str, v.string_view()); } void ExprPath::show(const SymbolTable & symbols, std::ostream & str) const diff --git a/src/libexpr/parser.y b/src/libexpr/parser.y index 89da001ef..515e08e62 100644 --- a/src/libexpr/parser.y +++ b/src/libexpr/parser.y @@ -64,6 +64,7 @@ Expr * parseExprFromBuf( size_t length, Pos::Origin origin, const SourcePath & basePath, + std::pmr::polymorphic_allocator & alloc, SymbolTable & symbols, const EvalSettings & settings, PosTable & positions, @@ -134,6 +135,7 @@ static Expr * makeCall(PosIdx pos, Expr * fn, Expr * arg) { std::vector * attrNames; std::vector> * inheritAttrs; std::vector> * string_parts; + std::variant * to_be_string; std::vector>> * ind_string_parts; } @@ -148,7 +150,8 @@ static Expr * makeCall(PosIdx pos, Expr * fn, Expr * arg) { %type attrs %type string_parts_interpolated %type ind_string_parts -%type path_start string_parts string_attr +%type path_start +%type string_parts string_attr %type attr %token ID %token STR IND_STR @@ -303,7 +306,13 @@ expr_simple } | INT_LIT { $$ = new ExprInt($1); } | FLOAT_LIT { $$ = new ExprFloat($1); } - | '"' string_parts '"' { $$ = $2; } + | '"' string_parts '"' { + std::visit(overloaded{ + [&](std::string_view str) { $$ = new ExprString(state->alloc, str); }, + [&](Expr * expr) { $$ = expr; }}, + *$2); + delete $2; + } | IND_STRING_OPEN ind_string_parts IND_STRING_CLOSE { $$ = state->stripIndentation(CUR_POS, std::move(*$2)); delete $2; @@ -314,11 +323,11 @@ expr_simple $$ = new ExprConcatStrings(CUR_POS, false, $2); } | SPATH { - std::string path($1.p + 1, $1.l - 2); + std::string_view path($1.p + 1, $1.l - 2); $$ = new ExprCall(CUR_POS, new ExprVar(state->s.findFile), {new ExprVar(state->s.nixPath), - new ExprString(std::move(path))}); + new ExprString(state->alloc, path)}); } | URI { static bool noURLLiterals = experimentalFeatureSettings.isEnabled(Xp::NoUrlLiterals); @@ -327,7 +336,7 @@ expr_simple .msg = HintFmt("URL literals are disabled"), .pos = state->positions[CUR_POS] }); - $$ = new ExprString(std::string($1)); + $$ = new ExprString(state->alloc, $1); } | '(' expr ')' { $$ = $2; } /* Let expressions `let {..., body = ...}' are just desugared @@ -344,19 +353,19 @@ expr_simple ; string_parts - : STR { $$ = new ExprString(std::string($1)); } - | string_parts_interpolated { $$ = new ExprConcatStrings(CUR_POS, true, $1); } - | { $$ = new ExprString(""); } + : STR { $$ = new std::variant($1); } + | string_parts_interpolated { $$ = new std::variant(new ExprConcatStrings(CUR_POS, true, $1)); } + | { $$ = new std::variant(std::string_view()); } ; string_parts_interpolated : string_parts_interpolated STR - { $$ = $1; $1->emplace_back(state->at(@2), new ExprString(std::string($2))); } + { $$ = $1; $1->emplace_back(state->at(@2), new ExprString(state->alloc, $2)); } | string_parts_interpolated DOLLAR_CURLY expr '}' { $$ = $1; $1->emplace_back(state->at(@2), $3); } | DOLLAR_CURLY expr '}' { $$ = new std::vector>; $$->emplace_back(state->at(@1), $2); } | STR DOLLAR_CURLY expr '}' { $$ = new std::vector>; - $$->emplace_back(state->at(@1), new ExprString(std::string($1))); + $$->emplace_back(state->at(@1), new ExprString(state->alloc, $1)); $$->emplace_back(state->at(@2), $3); } ; @@ -454,15 +463,16 @@ attrs : attrs attr { $$ = $1; $1->emplace_back(AttrName(state->symbols.create($2)), state->at(@2)); } | attrs string_attr { $$ = $1; - ExprString * str = dynamic_cast($2); - if (str) { - $$->emplace_back(AttrName(state->symbols.create(str->s)), state->at(@2)); - delete str; - } else - throw ParseError({ - .msg = HintFmt("dynamic attributes not allowed in inherit"), - .pos = state->positions[state->at(@2)] - }); + std::visit(overloaded { + [&](std::string_view str) { $$->emplace_back(AttrName(state->symbols.create(str)), state->at(@2)); }, + [&](Expr * expr) { + throw ParseError({ + .msg = HintFmt("dynamic attributes not allowed in inherit"), + .pos = state->positions[state->at(@2)] + }); + } + }, *$2); + delete $2; } | { $$ = new std::vector>; } ; @@ -471,22 +481,20 @@ attrpath : attrpath '.' attr { $$ = $1; $1->push_back(AttrName(state->symbols.create($3))); } | attrpath '.' string_attr { $$ = $1; - ExprString * str = dynamic_cast($3); - if (str) { - $$->push_back(AttrName(state->symbols.create(str->s))); - delete str; - } else - $$->push_back(AttrName($3)); + std::visit(overloaded { + [&](std::string_view str) { $$->push_back(AttrName(state->symbols.create(str))); }, + [&](Expr * expr) { $$->push_back(AttrName(expr)); } + }, *$3); + delete $3; } | attr { $$ = new std::vector; $$->push_back(AttrName(state->symbols.create($1))); } | string_attr { $$ = new std::vector; - ExprString *str = dynamic_cast($1); - if (str) { - $$->push_back(AttrName(state->symbols.create(str->s))); - delete str; - } else - $$->push_back(AttrName($1)); + std::visit(overloaded { + [&](std::string_view str) { $$->push_back(AttrName(state->symbols.create(str))); }, + [&](Expr * expr) { $$->push_back(AttrName(expr)); } + }, *$1); + delete $1; } ; @@ -497,7 +505,7 @@ attr string_attr : '"' string_parts '"' { $$ = $2; } - | DOLLAR_CURLY expr '}' { $$ = $2; } + | DOLLAR_CURLY expr '}' { $$ = new std::variant($2); } ; expr_list @@ -537,6 +545,7 @@ Expr * parseExprFromBuf( size_t length, Pos::Origin origin, const SourcePath & basePath, + std::pmr::polymorphic_allocator & alloc, SymbolTable & symbols, const EvalSettings & settings, PosTable & positions, @@ -551,6 +560,7 @@ Expr * parseExprFromBuf( }; ParserState state { .lexerState = lexerState, + .alloc = alloc, .symbols = symbols, .positions = positions, .basePath = basePath, From 0f08feaa58819c6e03568f2bbb147f3b1a8fef16 Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Sun, 28 Sep 2025 22:57:11 +0300 Subject: [PATCH 122/332] libexpr: Remove unused members from ParserLocation --- src/libexpr/include/nix/expr/parser-state.hh | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/libexpr/include/nix/expr/parser-state.hh b/src/libexpr/include/nix/expr/parser-state.hh index e689678de..193d955c2 100644 --- a/src/libexpr/include/nix/expr/parser-state.hh +++ b/src/libexpr/include/nix/expr/parser-state.hh @@ -44,9 +44,6 @@ struct ParserLocation beginOffset = stashedBeginOffset; endOffset = stashedEndOffset; } - - /** Latest doc comment position, or 0. */ - int doc_comment_first_column, doc_comment_last_column; }; struct LexerState From a8715a2d6e66eb3add6e98b56a40931056cef7d3 Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Sun, 28 Sep 2025 21:44:54 +0300 Subject: [PATCH 123/332] libexpr: Switch parser.y to %skeleton lalr1.cc Since the parser is now LALR we can easily switch over to the less ugly sketelon than the default C one. This would allow us to switch from %union to %define api.value.type variant in the future to avoid the need for triviall POD types. --- src/libexpr/include/nix/expr/parser-state.hh | 1 - src/libexpr/lexer-helpers.cc | 4 +- src/libexpr/lexer-helpers.hh | 10 ++-- src/libexpr/lexer.l | 4 ++ src/libexpr/parser-scanner-decls.hh | 17 +++++++ src/libexpr/parser.y | 48 ++++++++++---------- 6 files changed, 51 insertions(+), 33 deletions(-) create mode 100644 src/libexpr/parser-scanner-decls.hh diff --git a/src/libexpr/include/nix/expr/parser-state.hh b/src/libexpr/include/nix/expr/parser-state.hh index 193d955c2..32e9f5db0 100644 --- a/src/libexpr/include/nix/expr/parser-state.hh +++ b/src/libexpr/include/nix/expr/parser-state.hh @@ -24,7 +24,6 @@ struct StringToken } }; -// This type must be trivially copyable; see YYLTYPE_IS_TRIVIAL in parser.y. struct ParserLocation { int beginOffset; diff --git a/src/libexpr/lexer-helpers.cc b/src/libexpr/lexer-helpers.cc index 927e3cc73..59f6f6f70 100644 --- a/src/libexpr/lexer-helpers.cc +++ b/src/libexpr/lexer-helpers.cc @@ -1,11 +1,11 @@ #include "lexer-helpers.hh" -void nix::lexer::internal::initLoc(YYLTYPE * loc) +void nix::lexer::internal::initLoc(Parser::location_type * loc) { loc->beginOffset = loc->endOffset = 0; } -void nix::lexer::internal::adjustLoc(yyscan_t yyscanner, YYLTYPE * loc, const char * s, size_t len) +void nix::lexer::internal::adjustLoc(yyscan_t yyscanner, Parser::location_type * loc, const char * s, size_t len) { loc->stash(); diff --git a/src/libexpr/lexer-helpers.hh b/src/libexpr/lexer-helpers.hh index 49865f794..b60fb9e7d 100644 --- a/src/libexpr/lexer-helpers.hh +++ b/src/libexpr/lexer-helpers.hh @@ -2,16 +2,12 @@ #include -// including the generated headers twice leads to errors -#ifndef BISON_HEADER -# include "lexer-tab.hh" -# include "parser-tab.hh" -#endif +#include "parser-scanner-decls.hh" namespace nix::lexer::internal { -void initLoc(YYLTYPE * loc); +void initLoc(Parser::location_type * loc); -void adjustLoc(yyscan_t yyscanner, YYLTYPE * loc, const char * s, size_t len); +void adjustLoc(yyscan_t yyscanner, Parser::location_type * loc, const char * s, size_t len); } // namespace nix::lexer::internal diff --git a/src/libexpr/lexer.l b/src/libexpr/lexer.l index 1005f9f7e..f420fc13f 100644 --- a/src/libexpr/lexer.l +++ b/src/libexpr/lexer.l @@ -82,6 +82,10 @@ static void requireExperimentalFeature(const ExperimentalFeature & feature, cons } +using enum nix::Parser::token::token_kind_type; +using YYSTYPE = nix::Parser::value_type; +using YYLTYPE = nix::Parser::location_type; + // yacc generates code that uses unannotated fallthrough. #pragma GCC diagnostic ignored "-Wimplicit-fallthrough" diff --git a/src/libexpr/parser-scanner-decls.hh b/src/libexpr/parser-scanner-decls.hh new file mode 100644 index 000000000..e4e061883 --- /dev/null +++ b/src/libexpr/parser-scanner-decls.hh @@ -0,0 +1,17 @@ +#pragma once + +#ifndef BISON_HEADER +# include "parser-tab.hh" +using YYSTYPE = nix::parser::BisonParser::value_type; +using YYLTYPE = nix::parser::BisonParser::location_type; +# include "lexer-tab.hh" // IWYU pragma: export +#endif + +namespace nix { + +class Parser : public parser::BisonParser +{ + using BisonParser::BisonParser; +}; + +} // namespace nix diff --git a/src/libexpr/parser.y b/src/libexpr/parser.y index 89da001ef..8f77b4b0a 100644 --- a/src/libexpr/parser.y +++ b/src/libexpr/parser.y @@ -1,5 +1,7 @@ +%skeleton "lalr1.cc" %define api.location.type { ::nix::ParserLocation } -%define api.pure +%define api.namespace { ::nix::parser } +%define api.parser.class { BisonParser } %locations %define parse.error verbose %defines @@ -26,19 +28,12 @@ #include "nix/expr/eval-settings.hh" #include "nix/expr/parser-state.hh" -// Bison seems to have difficulty growing the parser stack when using C++ with -// a custom location type. This undocumented macro tells Bison that our -// location type is "trivially copyable" in C++-ese, so it is safe to use the -// same memcpy macro it uses to grow the stack that it uses with its own -// default location type. Without this, we get "error: memory exhausted" when -// parsing some large Nix files. Our other options are to increase the initial -// stack size (200 by default) to be as large as we ever want to support (so -// that growing the stack is unnecessary), or redefine the stack-relocation -// macro ourselves (which is also undocumented). -#define YYLTYPE_IS_TRIVIAL 1 - -#define YY_DECL int yylex \ - (YYSTYPE * yylval_param, YYLTYPE * yylloc_param, yyscan_t yyscanner, nix::ParserState * state) +#define YY_DECL \ + int yylex( \ + nix::Parser::value_type * yylval_param, \ + nix::Parser::location_type * yylloc_param, \ + yyscan_t yyscanner, \ + nix::ParserState * state) // For efficiency, we only track offsets; not line,column coordinates # define YYLLOC_DEFAULT(Current, Rhs, N) \ @@ -78,24 +73,30 @@ Expr * parseExprFromBuf( %{ -#include "parser-tab.hh" -#include "lexer-tab.hh" +/* The parser is very performance sensitive and loses out on a lot + of performance even with basic stdlib assertions. Since those don't + affect ABI we can disable those just for this file. */ +#if defined(_GLIBCXX_ASSERTIONS) && !defined(_GLIBCXX_DEBUG) +#undef _GLIBCXX_ASSERTIONS +#endif + +#include "parser-scanner-decls.hh" YY_DECL; using namespace nix; -#define CUR_POS state->at(yyloc) +#define CUR_POS state->at(yylhs.location) - -void yyerror(YYLTYPE * loc, yyscan_t scanner, ParserState * state, const char * error) +void parser::BisonParser::error(const location_type &loc_, const std::string &error) { + auto loc = loc_; if (std::string_view(error).starts_with("syntax error, unexpected end of file")) { - loc->beginOffset = loc->endOffset; + loc.beginOffset = loc.endOffset; } throw ParseError({ .msg = HintFmt(error), - .pos = state->positions[state->at(*loc)] + .pos = state->positions[state->at(loc)] }); } @@ -182,7 +183,7 @@ start: expr { state->result = $1; // This parser does not use yynerrs; suppress the warning. - (void) yynerrs; + (void) yynerrs_; }; expr: expr_function; @@ -563,7 +564,8 @@ Expr * parseExprFromBuf( Finally _destroy([&] { yylex_destroy(scanner); }); yy_scan_buffer(text, length, scanner); - yyparse(scanner, &state); + Parser parser(scanner, &state); + parser.parse(); return state.result; } From c1f805b8569d1f66aed813a3b49820936618c9d5 Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Mon, 29 Sep 2025 01:46:40 +0300 Subject: [PATCH 124/332] packaging: Build without symbolic interposition on GCC This turns out to be a big problem for performance of Bison generated code, that for whatever reason cannot be made internal to the shared library. This causes GCC to make a bunch of function calls go through PLT. Ideally these hot functions (like move/copy ctor) could become inline in upstream Bison. That will make sure that GCC can do interprocedular optimizations without -fno-semantic-interposition [^]. Considering that LLVM already does inlining and whatnot is a good motivation for this change. I don't know of any case where Nix relies on LD_PRELOAD tricks for the shared libraries in production use-cases. [^]: https://maskray.me/blog/2021-05-09-fno-semantic-interposition --- packaging/components.nix | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/packaging/components.nix b/packaging/components.nix index b5fad4043..2be4fa61d 100644 --- a/packaging/components.nix +++ b/packaging/components.nix @@ -164,6 +164,24 @@ let }; mesonLibraryLayer = finalAttrs: prevAttrs: { + preConfigure = + let + interpositionFlags = [ + "-fno-semantic-interposition" + "-Wl,-Bsymbolic-functions" + ]; + in + # NOTE: By default GCC disables interprocedular optimizations (in particular inlining) for + # position-independent code and thus shared libraries. + # Since LD_PRELOAD tricks aren't worth losing out on optimizations, we disable it for good. + # This is not the case for Clang, where inlining is done by default even without -fno-semantic-interposition. + # https://reviews.llvm.org/D102453 + # https://fedoraproject.org/wiki/Changes/PythonNoSemanticInterpositionSpeedup + prevAttrs.preConfigure or "" + + lib.optionalString stdenv.cc.isGNU '' + export CFLAGS="''${CFLAGS:-} ${toString interpositionFlags}" + export CXXFLAGS="''${CXXFLAGS:-} ${toString interpositionFlags}" + ''; outputs = prevAttrs.outputs or [ "out" ] ++ [ "dev" ]; }; From 76c9d3885ca353755b9b7331f63a30ca805739d7 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Sun, 28 Sep 2025 20:22:55 -0700 Subject: [PATCH 125/332] shellcheck fix: tests/functional/local-overlay-store/verify.sh --- maintainers/flake-module.nix | 1 - tests/functional/local-overlay-store/verify.sh | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 0e9363408..2d18cf9e2 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -144,7 +144,6 @@ ''^tests/functional/local-overlay-store/stale-file-handle-inner\.sh$'' ''^tests/functional/local-overlay-store/stale-file-handle\.sh$'' ''^tests/functional/local-overlay-store/verify-inner\.sh$'' - ''^tests/functional/local-overlay-store/verify\.sh$'' ''^tests/functional/logging\.sh$'' ''^tests/functional/misc\.sh$'' ''^tests/functional/multiple-outputs\.sh$'' diff --git a/tests/functional/local-overlay-store/verify.sh b/tests/functional/local-overlay-store/verify.sh index d73d1a57d..f5242fadc 100755 --- a/tests/functional/local-overlay-store/verify.sh +++ b/tests/functional/local-overlay-store/verify.sh @@ -1,3 +1,4 @@ +# shellcheck shell=bash source common.sh source ../common/init.sh From 3a1ba8e41efb02d2627b7aec9eec1acfa62f18a6 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Sun, 28 Sep 2025 20:23:19 -0700 Subject: [PATCH 126/332] shellcheck fix: tests/functional/local-overlay-store/verify-inner.sh --- maintainers/flake-module.nix | 1 - tests/functional/local-overlay-store/verify-inner.sh | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 2d18cf9e2..ef8d81782 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -143,7 +143,6 @@ ''^tests/functional/local-overlay-store/remount\.sh$'' ''^tests/functional/local-overlay-store/stale-file-handle-inner\.sh$'' ''^tests/functional/local-overlay-store/stale-file-handle\.sh$'' - ''^tests/functional/local-overlay-store/verify-inner\.sh$'' ''^tests/functional/logging\.sh$'' ''^tests/functional/misc\.sh$'' ''^tests/functional/multiple-outputs\.sh$'' diff --git a/tests/functional/local-overlay-store/verify-inner.sh b/tests/functional/local-overlay-store/verify-inner.sh index 659f2ae50..1edc11cc7 100755 --- a/tests/functional/local-overlay-store/verify-inner.sh +++ b/tests/functional/local-overlay-store/verify-inner.sh @@ -20,7 +20,7 @@ mountOverlayfs ## Initialise stores for test # Realise a derivation from the lower store to propagate paths to overlay DB -nix-store --store "$storeB" --realise $drvPath +nix-store --store "$storeB" --realise "$drvPath" # Also ensure dummy file exists in overlay DB dummyPath=$(nix-store --store "$storeB" --add ../dummy) From e896bf1cb12e0c53faebfcb63c157b55a96b622e Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Sun, 28 Sep 2025 20:24:09 -0700 Subject: [PATCH 127/332] shellcheck fix: tests/functional/local-overlay-store/stale-file-handle.sh --- maintainers/flake-module.nix | 1 - tests/functional/local-overlay-store/stale-file-handle.sh | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index ef8d81782..149913bd1 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -142,7 +142,6 @@ ''^tests/functional/local-overlay-store/redundant-add\.sh$'' ''^tests/functional/local-overlay-store/remount\.sh$'' ''^tests/functional/local-overlay-store/stale-file-handle-inner\.sh$'' - ''^tests/functional/local-overlay-store/stale-file-handle\.sh$'' ''^tests/functional/logging\.sh$'' ''^tests/functional/misc\.sh$'' ''^tests/functional/multiple-outputs\.sh$'' diff --git a/tests/functional/local-overlay-store/stale-file-handle.sh b/tests/functional/local-overlay-store/stale-file-handle.sh index 684b8ce23..fbc4c6497 100755 --- a/tests/functional/local-overlay-store/stale-file-handle.sh +++ b/tests/functional/local-overlay-store/stale-file-handle.sh @@ -1,3 +1,4 @@ +# shellcheck shell=bash source common.sh source ../common/init.sh From c4c95f3d39b00024a217b571fd5f7cdd3610eb31 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Sun, 28 Sep 2025 20:24:38 -0700 Subject: [PATCH 128/332] shellcheck fix: tests/functional/local-overlay-store/stale-file-handle-inner.sh --- maintainers/flake-module.nix | 1 - 1 file changed, 1 deletion(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 149913bd1..3b4a1c42e 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -141,7 +141,6 @@ ''^tests/functional/local-overlay-store/redundant-add-inner\.sh$'' ''^tests/functional/local-overlay-store/redundant-add\.sh$'' ''^tests/functional/local-overlay-store/remount\.sh$'' - ''^tests/functional/local-overlay-store/stale-file-handle-inner\.sh$'' ''^tests/functional/logging\.sh$'' ''^tests/functional/misc\.sh$'' ''^tests/functional/multiple-outputs\.sh$'' From 4ef4e967883506c04c051e5eaa239fd4f9f1489a Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Sun, 28 Sep 2025 20:25:09 -0700 Subject: [PATCH 129/332] shellcheck fix: tests/functional/local-overlay-store/remount.sh --- maintainers/flake-module.nix | 1 - 1 file changed, 1 deletion(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 3b4a1c42e..5c783ec10 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -140,7 +140,6 @@ ''^tests/functional/local-overlay-store/optimise\.sh$'' ''^tests/functional/local-overlay-store/redundant-add-inner\.sh$'' ''^tests/functional/local-overlay-store/redundant-add\.sh$'' - ''^tests/functional/local-overlay-store/remount\.sh$'' ''^tests/functional/logging\.sh$'' ''^tests/functional/misc\.sh$'' ''^tests/functional/multiple-outputs\.sh$'' From 1bee4d098835c9c941dacfcaae1383db58c578e1 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Sun, 28 Sep 2025 20:25:42 -0700 Subject: [PATCH 130/332] shellcheck fix: tests/functional/local-overlay-store/redundant-add.sh --- maintainers/flake-module.nix | 1 - 1 file changed, 1 deletion(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 5c783ec10..a48f55322 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -139,7 +139,6 @@ ''^tests/functional/local-overlay-store/optimise-inner\.sh$'' ''^tests/functional/local-overlay-store/optimise\.sh$'' ''^tests/functional/local-overlay-store/redundant-add-inner\.sh$'' - ''^tests/functional/local-overlay-store/redundant-add\.sh$'' ''^tests/functional/logging\.sh$'' ''^tests/functional/misc\.sh$'' ''^tests/functional/multiple-outputs\.sh$'' From 0c50d5b25ab90e50c88065f574a03fc8bbd4be0e Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Sun, 28 Sep 2025 20:27:21 -0700 Subject: [PATCH 131/332] shellcheck fix: tests/functional/local-overlay-store/redundant-add-inner.sh --- maintainers/flake-module.nix | 1 - .../functional/local-overlay-store/redundant-add-inner.sh | 8 ++++---- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index a48f55322..c509eb85b 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -138,7 +138,6 @@ ''^tests/functional/local-overlay-store/gc\.sh$'' ''^tests/functional/local-overlay-store/optimise-inner\.sh$'' ''^tests/functional/local-overlay-store/optimise\.sh$'' - ''^tests/functional/local-overlay-store/redundant-add-inner\.sh$'' ''^tests/functional/logging\.sh$'' ''^tests/functional/misc\.sh$'' ''^tests/functional/multiple-outputs\.sh$'' diff --git a/tests/functional/local-overlay-store/redundant-add-inner.sh b/tests/functional/local-overlay-store/redundant-add-inner.sh index e37ef90e5..2be122fc6 100755 --- a/tests/functional/local-overlay-store/redundant-add-inner.sh +++ b/tests/functional/local-overlay-store/redundant-add-inner.sh @@ -22,14 +22,14 @@ mountOverlayfs pathInLowerStore=$(nix-store --store "$storeA" --add ../dummy) # upper layer should not have it -expect 1 stat $(toRealPath "$storeBTop/nix/store" "$pathInLowerStore") +expect 1 stat "$(toRealPath "$storeBTop/nix/store" "$pathInLowerStore")" pathFromB=$(nix-store --store "$storeB" --add ../dummy) -[[ $pathInLowerStore == $pathFromB ]] +[[ $pathInLowerStore == "$pathFromB" ]] # lower store should have it from before -stat $(toRealPath "$storeA/nix/store" "$pathInLowerStore") +stat "$(toRealPath "$storeA/nix/store" "$pathInLowerStore")" # upper layer should still not have it (no redundant copy) -expect 1 stat $(toRealPath "$storeBTop" "$pathInLowerStore") +expect 1 stat "$(toRealPath "$storeBTop" "$pathInLowerStore")" From dbb53de9d3db672bc09df33c3e4534b51aa12e87 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Sun, 28 Sep 2025 20:28:18 -0700 Subject: [PATCH 132/332] shellcheck fix: tests/functional/local-overlay-store/redundant-add.sh --- maintainers/flake-module.nix | 1 - tests/functional/local-overlay-store/redundant-add.sh | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index c509eb85b..e5b38a931 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -137,7 +137,6 @@ ''^tests/functional/local-overlay-store/gc-inner\.sh$'' ''^tests/functional/local-overlay-store/gc\.sh$'' ''^tests/functional/local-overlay-store/optimise-inner\.sh$'' - ''^tests/functional/local-overlay-store/optimise\.sh$'' ''^tests/functional/logging\.sh$'' ''^tests/functional/misc\.sh$'' ''^tests/functional/multiple-outputs\.sh$'' diff --git a/tests/functional/local-overlay-store/redundant-add.sh b/tests/functional/local-overlay-store/redundant-add.sh index b4f04b2e1..898c3ffeb 100755 --- a/tests/functional/local-overlay-store/redundant-add.sh +++ b/tests/functional/local-overlay-store/redundant-add.sh @@ -1,3 +1,4 @@ +# shellcheck shell=bash source common.sh source ../common/init.sh From 283a9c4c5aefc4d55b6643a483b64bfea8ae597b Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Sun, 28 Sep 2025 20:29:35 -0700 Subject: [PATCH 133/332] shellcheck fix: tests/functional/local-overlay-store/optimise-inner.sh --- maintainers/flake-module.nix | 1 - tests/functional/local-overlay-store/optimise-inner.sh | 4 ++-- tests/functional/local-overlay-store/optimise.sh | 1 + 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index e5b38a931..388a9755c 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -136,7 +136,6 @@ ''^tests/functional/local-overlay-store/delete-refs\.sh$'' ''^tests/functional/local-overlay-store/gc-inner\.sh$'' ''^tests/functional/local-overlay-store/gc\.sh$'' - ''^tests/functional/local-overlay-store/optimise-inner\.sh$'' ''^tests/functional/logging\.sh$'' ''^tests/functional/misc\.sh$'' ''^tests/functional/multiple-outputs\.sh$'' diff --git a/tests/functional/local-overlay-store/optimise-inner.sh b/tests/functional/local-overlay-store/optimise-inner.sh index eafbc77f7..40cd1c531 100755 --- a/tests/functional/local-overlay-store/optimise-inner.sh +++ b/tests/functional/local-overlay-store/optimise-inner.sh @@ -38,8 +38,8 @@ overlayPath="$storeBRoot/nix/store/$dupFilename" lowerInode=$(stat -c %i "$lowerPath") upperInode=$(stat -c %i "$upperPath") overlayInode=$(stat -c %i "$overlayPath") -[[ $upperInode == $overlayInode ]] -[[ $upperInode != $lowerInode ]] +[[ $upperInode == "$overlayInode" ]] +[[ $upperInode != "$lowerInode" ]] # Run optimise to deduplicate store paths nix-store --store "$storeB" --optimise diff --git a/tests/functional/local-overlay-store/optimise.sh b/tests/functional/local-overlay-store/optimise.sh index a524a675e..a11c450d7 100755 --- a/tests/functional/local-overlay-store/optimise.sh +++ b/tests/functional/local-overlay-store/optimise.sh @@ -1,3 +1,4 @@ +# shellcheck shell=bash source common.sh source ../common/init.sh From 675179a51008d45723c18ca5eac5c911c76c0d18 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Sun, 28 Sep 2025 20:30:02 -0700 Subject: [PATCH 134/332] shellcheck fix: tests/functional/local-overlay-store/gc.sh --- maintainers/flake-module.nix | 1 - tests/functional/local-overlay-store/gc.sh | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 388a9755c..3f38ffc7d 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -135,7 +135,6 @@ ''^tests/functional/local-overlay-store/delete-refs-inner\.sh$'' ''^tests/functional/local-overlay-store/delete-refs\.sh$'' ''^tests/functional/local-overlay-store/gc-inner\.sh$'' - ''^tests/functional/local-overlay-store/gc\.sh$'' ''^tests/functional/logging\.sh$'' ''^tests/functional/misc\.sh$'' ''^tests/functional/multiple-outputs\.sh$'' diff --git a/tests/functional/local-overlay-store/gc.sh b/tests/functional/local-overlay-store/gc.sh index f3420d0b8..5b6720fd3 100755 --- a/tests/functional/local-overlay-store/gc.sh +++ b/tests/functional/local-overlay-store/gc.sh @@ -1,3 +1,4 @@ +# shellcheck shell=bash source common.sh source ../common/init.sh From 62b36eba1183dcca45fc5f59be681958855d6fa6 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Sun, 28 Sep 2025 20:31:09 -0700 Subject: [PATCH 135/332] shellcheck fix: tests/functional/local-overlay-store/gc-inner.sh --- maintainers/flake-module.nix | 1 - .../local-overlay-store/gc-inner.sh | 21 ++++++++++--------- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 3f38ffc7d..522e7cdb8 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -134,7 +134,6 @@ ''^tests/functional/local-overlay-store/delete-duplicate\.sh$'' ''^tests/functional/local-overlay-store/delete-refs-inner\.sh$'' ''^tests/functional/local-overlay-store/delete-refs\.sh$'' - ''^tests/functional/local-overlay-store/gc-inner\.sh$'' ''^tests/functional/logging\.sh$'' ''^tests/functional/misc\.sh$'' ''^tests/functional/multiple-outputs\.sh$'' diff --git a/tests/functional/local-overlay-store/gc-inner.sh b/tests/functional/local-overlay-store/gc-inner.sh index 687fed897..3e63c9398 100644 --- a/tests/functional/local-overlay-store/gc-inner.sh +++ b/tests/functional/local-overlay-store/gc-inner.sh @@ -21,24 +21,24 @@ outPath=$(nix-build ../hermetic.nix --no-out-link --arg busybox "$busybox" --arg # Set a GC root. mkdir -p "$stateB" rm -f "$stateB/gcroots/foo" -ln -sf $outPath "$stateB/gcroots/foo" +ln -sf "$outPath" "$stateB/gcroots/foo" -[ "$(nix-store -q --roots $outPath)" = "$stateB/gcroots/foo -> $outPath" ] +[ "$(nix-store -q --roots "$outPath")" = "$stateB/gcroots/foo -> $outPath" ] -nix-store --gc --print-roots | grep $outPath -nix-store --gc --print-live | grep $outPath -if nix-store --gc --print-dead | grep -E $outPath$; then false; fi +nix-store --gc --print-roots | grep "$outPath" +nix-store --gc --print-live | grep "$outPath" +if nix-store --gc --print-dead | grep -E "$outPath"$; then false; fi nix-store --gc --print-dead -expect 1 nix-store --delete $outPath +expect 1 nix-store --delete "$outPath" test -e "$storeBRoot/$outPath" shopt -s nullglob -for i in $storeBRoot/*; do +for i in "$storeBRoot"/*; do if [[ $i =~ /trash ]]; then continue; fi # compat with old daemon - touch $i.lock - touch $i.chroot + touch "$i".lock + touch "$i".chroot done nix-collect-garbage @@ -51,7 +51,8 @@ rm "$stateB/gcroots/foo" nix-collect-garbage # Check that the output has been GC'd. -test ! -e $outPath +test ! -e "$outPath" # Check that the store is empty. +# shellcheck disable=SC2012 [ "$(ls -1 "$storeBTop" | wc -l)" = "0" ] From 326d626ad7f1708ed107bf0dd704e89bbed31720 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Sun, 28 Sep 2025 20:32:12 -0700 Subject: [PATCH 136/332] shellcheck fix: tests/functional/local-overlay-store/delete-refs-inner.sh --- maintainers/flake-module.nix | 2 -- .../local-overlay-store/delete-refs-inner.sh | 17 +++++++++-------- .../local-overlay-store/delete-refs.sh | 1 + 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 522e7cdb8..f527a6d39 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -132,8 +132,6 @@ ''^tests/functional/local-overlay-store/common\.sh$'' ''^tests/functional/local-overlay-store/delete-duplicate-inner\.sh$'' ''^tests/functional/local-overlay-store/delete-duplicate\.sh$'' - ''^tests/functional/local-overlay-store/delete-refs-inner\.sh$'' - ''^tests/functional/local-overlay-store/delete-refs\.sh$'' ''^tests/functional/logging\.sh$'' ''^tests/functional/misc\.sh$'' ''^tests/functional/multiple-outputs\.sh$'' diff --git a/tests/functional/local-overlay-store/delete-refs-inner.sh b/tests/functional/local-overlay-store/delete-refs-inner.sh index 385eeadc9..f54ef2bb6 100644 --- a/tests/functional/local-overlay-store/delete-refs-inner.sh +++ b/tests/functional/local-overlay-store/delete-refs-inner.sh @@ -15,6 +15,7 @@ initLowerStore mountOverlayfs export NIX_REMOTE="$storeB" +# shellcheck disable=SC2034 stateB="$storeBRoot/nix/var/nix" hermetic=$(nix-build ../hermetic.nix --no-out-link --arg busybox "$busybox" --arg withFinalRefs true --arg seed 2) input1=$(nix-build ../hermetic.nix --no-out-link --arg busybox "$busybox" --arg withFinalRefs true --arg seed 2 -A passthru.input1 -j0) @@ -22,18 +23,18 @@ input2=$(nix-build ../hermetic.nix --no-out-link --arg busybox "$busybox" --arg input3=$(nix-build ../hermetic.nix --no-out-link --arg busybox "$busybox" --arg withFinalRefs true --arg seed 2 -A passthru.input3 -j0) # Can't delete because referenced -expectStderr 1 nix-store --delete $input1 | grepQuiet "Cannot delete path" -expectStderr 1 nix-store --delete $input2 | grepQuiet "Cannot delete path" -expectStderr 1 nix-store --delete $input3 | grepQuiet "Cannot delete path" +expectStderr 1 nix-store --delete "$input1" | grepQuiet "Cannot delete path" +expectStderr 1 nix-store --delete "$input2" | grepQuiet "Cannot delete path" +expectStderr 1 nix-store --delete "$input3" | grepQuiet "Cannot delete path" # These same paths are referenced in the lower layer (by the seed 1 # build done in `initLowerStore`). -expectStderr 1 nix-store --store "$storeA" --delete $input2 | grepQuiet "Cannot delete path" -expectStderr 1 nix-store --store "$storeA" --delete $input3 | grepQuiet "Cannot delete path" +expectStderr 1 nix-store --store "$storeA" --delete "$input2" | grepQuiet "Cannot delete path" +expectStderr 1 nix-store --store "$storeA" --delete "$input3" | grepQuiet "Cannot delete path" # Can delete -nix-store --delete $hermetic +nix-store --delete "$hermetic" # Now unreferenced in upper layer, can delete -nix-store --delete $input3 -nix-store --delete $input2 +nix-store --delete "$input3" +nix-store --delete "$input2" diff --git a/tests/functional/local-overlay-store/delete-refs.sh b/tests/functional/local-overlay-store/delete-refs.sh index 62295aaa1..4fe08a077 100755 --- a/tests/functional/local-overlay-store/delete-refs.sh +++ b/tests/functional/local-overlay-store/delete-refs.sh @@ -1,3 +1,4 @@ +# shellcheck shell=bash source common.sh source ../common/init.sh From 20665e1c3d4e9b775a6da18337cb0d0e7eacc43d Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Sun, 28 Sep 2025 20:32:49 -0700 Subject: [PATCH 137/332] shellcheck fix: tests/functional/local-overlay-store/delete-duplicate-inner.sh --- maintainers/flake-module.nix | 2 -- tests/functional/local-overlay-store/delete-duplicate.sh | 1 + 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index f527a6d39..f83ad2a7c 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -130,8 +130,6 @@ ''^tests/functional/local-overlay-store/check-post-init-inner\.sh$'' ''^tests/functional/local-overlay-store/check-post-init\.sh$'' ''^tests/functional/local-overlay-store/common\.sh$'' - ''^tests/functional/local-overlay-store/delete-duplicate-inner\.sh$'' - ''^tests/functional/local-overlay-store/delete-duplicate\.sh$'' ''^tests/functional/logging\.sh$'' ''^tests/functional/misc\.sh$'' ''^tests/functional/multiple-outputs\.sh$'' diff --git a/tests/functional/local-overlay-store/delete-duplicate.sh b/tests/functional/local-overlay-store/delete-duplicate.sh index e3b94e1cb..8a11350dc 100644 --- a/tests/functional/local-overlay-store/delete-duplicate.sh +++ b/tests/functional/local-overlay-store/delete-duplicate.sh @@ -1,3 +1,4 @@ +# shellcheck shell=bash source common.sh source ../common/init.sh From bb97f4b07aec9842d0e5663b186d674d34b16981 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Sun, 28 Sep 2025 20:35:47 -0700 Subject: [PATCH 138/332] shellcheck fix: tests/functional/local-overlay-store/common.sh --- maintainers/flake-module.nix | 1 - tests/functional/local-overlay-store/common.sh | 14 ++++++++++---- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index f83ad2a7c..4d235f0fa 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -129,7 +129,6 @@ ''^tests/functional/local-overlay-store/build\.sh$'' ''^tests/functional/local-overlay-store/check-post-init-inner\.sh$'' ''^tests/functional/local-overlay-store/check-post-init\.sh$'' - ''^tests/functional/local-overlay-store/common\.sh$'' ''^tests/functional/logging\.sh$'' ''^tests/functional/misc\.sh$'' ''^tests/functional/multiple-outputs\.sh$'' diff --git a/tests/functional/local-overlay-store/common.sh b/tests/functional/local-overlay-store/common.sh index ba9b2805b..39ffa6e5a 100644 --- a/tests/functional/local-overlay-store/common.sh +++ b/tests/functional/local-overlay-store/common.sh @@ -1,3 +1,4 @@ +# shellcheck shell=bash source ../common/vars.sh source ../common/functions.sh @@ -54,6 +55,7 @@ setupStoreDirs () { storeA="$storeVolume/store-a" storeBTop="$storeVolume/store-b" storeBRoot="$storeVolume/merged-store" + # shellcheck disable=SC2034 storeB="local-overlay://?root=$storeBRoot&lower-store=$storeA&upper-layer=$storeBTop" # Creating testing directories mkdir -p "$storeVolume"/{store-a/nix/store,store-b,merged-store/nix/store,workdir} @@ -69,8 +71,10 @@ mountOverlayfs () { || skipTest "overlayfs is not supported" cleanupOverlay () { + # shellcheck disable=2317 umount -n "$storeBRoot/nix/store" - rm -r $storeVolume/workdir + # shellcheck disable=2317 + rm -r "$storeVolume"/workdir } trap cleanupOverlay EXIT } @@ -82,7 +86,8 @@ remountOverlayfs () { toRealPath () { storeDir=$1; shift storePath=$1; shift - echo $storeDir$(echo $storePath | sed "s^${NIX_STORE_DIR:-/nix/store}^^") + # shellcheck disable=SC2001 + echo "$storeDir""$(echo "$storePath" | sed "s^${NIX_STORE_DIR:-/nix/store}^^")" } initLowerStore () { @@ -90,8 +95,9 @@ initLowerStore () { nix-store --store "$storeA" --add ../dummy # Build something in lower store - drvPath=$(nix-instantiate --store $storeA ../hermetic.nix --arg withFinalRefs true --arg busybox "$busybox" --arg seed 1) - pathInLowerStore=$(nix-store --store "$storeA" --realise $drvPath) + drvPath=$(nix-instantiate --store "$storeA" ../hermetic.nix --arg withFinalRefs true --arg busybox "$busybox" --arg seed 1) + # shellcheck disable=SC2034 + pathInLowerStore=$(nix-store --store "$storeA" --realise "$drvPath") } addTextToStore() { From 6cae8da29dc1a12c8defbf0105c420e75507bee0 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Sun, 28 Sep 2025 20:37:23 -0700 Subject: [PATCH 139/332] shellcheck fix: tests/functional/local-overlay-store/check-post-init.sh --- maintainers/flake-module.nix | 2 -- .../check-post-init-inner.sh | 30 +++++++++---------- .../local-overlay-store/check-post-init.sh | 1 + 3 files changed, 16 insertions(+), 17 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 4d235f0fa..e78766669 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -127,8 +127,6 @@ ''^tests/functional/local-overlay-store/bad-uris\.sh$'' ''^tests/functional/local-overlay-store/build-inner\.sh$'' ''^tests/functional/local-overlay-store/build\.sh$'' - ''^tests/functional/local-overlay-store/check-post-init-inner\.sh$'' - ''^tests/functional/local-overlay-store/check-post-init\.sh$'' ''^tests/functional/logging\.sh$'' ''^tests/functional/misc\.sh$'' ''^tests/functional/multiple-outputs\.sh$'' diff --git a/tests/functional/local-overlay-store/check-post-init-inner.sh b/tests/functional/local-overlay-store/check-post-init-inner.sh index ac2499002..5f8050f89 100755 --- a/tests/functional/local-overlay-store/check-post-init-inner.sh +++ b/tests/functional/local-overlay-store/check-post-init-inner.sh @@ -19,41 +19,41 @@ mountOverlayfs ### Check status # Checking for path in lower layer -stat $(toRealPath "$storeA/nix/store" "$pathInLowerStore") +stat "$(toRealPath "$storeA/nix/store" "$pathInLowerStore")" # Checking for path in upper layer (should fail) -expect 1 stat $(toRealPath "$storeBTop" "$pathInLowerStore") +expect 1 stat "$(toRealPath "$storeBTop" "$pathInLowerStore")" # Checking for path in overlay store matching lower layer -diff $(toRealPath "$storeA/nix/store" "$pathInLowerStore") $(toRealPath "$storeBRoot/nix/store" "$pathInLowerStore") +diff "$(toRealPath "$storeA/nix/store" "$pathInLowerStore")" "$(toRealPath "$storeBRoot/nix/store" "$pathInLowerStore")" # Checking requisites query agreement [[ \ - $(nix-store --store $storeA --query --requisites $drvPath) \ + $(nix-store --store "$storeA" --query --requisites "$drvPath") \ == \ - $(nix-store --store $storeB --query --requisites $drvPath) \ + $(nix-store --store "$storeB" --query --requisites "$drvPath") \ ]] # Checking referrers query agreement -busyboxStore=$(nix store --store $storeA add-path $busybox) +busyboxStore=$(nix store --store "$storeA" add-path "$busybox") [[ \ - $(nix-store --store $storeA --query --referrers $busyboxStore) \ + $(nix-store --store "$storeA" --query --referrers "$busyboxStore") \ == \ - $(nix-store --store $storeB --query --referrers $busyboxStore) \ + $(nix-store --store "$storeB" --query --referrers "$busyboxStore") \ ]] # Checking derivers query agreement [[ \ - $(nix-store --store $storeA --query --deriver $pathInLowerStore) \ + $(nix-store --store "$storeA" --query --deriver "$pathInLowerStore") \ == \ - $(nix-store --store $storeB --query --deriver $pathInLowerStore) \ + $(nix-store --store "$storeB" --query --deriver "$pathInLowerStore") \ ]] # Checking outputs query agreement [[ \ - $(nix-store --store $storeA --query --outputs $drvPath) \ + $(nix-store --store "$storeA" --query --outputs "$drvPath") \ == \ - $(nix-store --store $storeB --query --outputs $drvPath) \ + $(nix-store --store "$storeB" --query --outputs "$drvPath") \ ]] # Verifying path in lower layer @@ -62,10 +62,10 @@ nix-store --verify-path --store "$storeA" "$pathInLowerStore" # Verifying path in merged-store nix-store --verify-path --store "$storeB" "$pathInLowerStore" -hashPart=$(echo $pathInLowerStore | sed "s^${NIX_STORE_DIR:-/nix/store}/^^" | sed 's/-.*//') +hashPart=$(echo "$pathInLowerStore" | sed "s^${NIX_STORE_DIR:-/nix/store}/^^" | sed 's/-.*//') # Lower store can find from hash part -[[ $(nix store --store $storeA path-from-hash-part $hashPart) == $pathInLowerStore ]] +[[ $(nix store --store "$storeA" path-from-hash-part "$hashPart") == "$pathInLowerStore" ]] # merged store can find from hash part -[[ $(nix store --store $storeB path-from-hash-part $hashPart) == $pathInLowerStore ]] +[[ $(nix store --store "$storeB" path-from-hash-part "$hashPart") == "$pathInLowerStore" ]] diff --git a/tests/functional/local-overlay-store/check-post-init.sh b/tests/functional/local-overlay-store/check-post-init.sh index e0c260276..323b9e489 100755 --- a/tests/functional/local-overlay-store/check-post-init.sh +++ b/tests/functional/local-overlay-store/check-post-init.sh @@ -1,3 +1,4 @@ +# shellcheck shell=bash source common.sh source ../common/init.sh From 8f1430153360dee20c62ce2a2d01f3be467b5450 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Sun, 28 Sep 2025 20:38:46 -0700 Subject: [PATCH 140/332] shellcheck fix: tests/functional/local-overlay-store/build-inner.sh --- maintainers/flake-module.nix | 2 -- tests/functional/local-overlay-store/build-inner.sh | 6 +++--- tests/functional/local-overlay-store/build.sh | 1 + 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index e78766669..feb69ada0 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -125,8 +125,6 @@ ''^tests/functional/local-overlay-store/add-lower-inner\.sh$'' ''^tests/functional/local-overlay-store/add-lower\.sh$'' ''^tests/functional/local-overlay-store/bad-uris\.sh$'' - ''^tests/functional/local-overlay-store/build-inner\.sh$'' - ''^tests/functional/local-overlay-store/build\.sh$'' ''^tests/functional/logging\.sh$'' ''^tests/functional/misc\.sh$'' ''^tests/functional/multiple-outputs\.sh$'' diff --git a/tests/functional/local-overlay-store/build-inner.sh b/tests/functional/local-overlay-store/build-inner.sh index 1f3ddded7..2463e4467 100755 --- a/tests/functional/local-overlay-store/build-inner.sh +++ b/tests/functional/local-overlay-store/build-inner.sh @@ -18,13 +18,13 @@ mountOverlayfs ### Do a build in overlay store -path=$(nix-build ../hermetic.nix --arg busybox $busybox --arg seed 2 --store "$storeB" --no-out-link) +path=$(nix-build ../hermetic.nix --arg busybox "$busybox" --arg seed 2 --store "$storeB" --no-out-link) # Checking for path in lower layer (should fail) -expect 1 stat $(toRealPath "$storeA/nix/store" "$path") +expect 1 stat "$(toRealPath "$storeA/nix/store" "$path")" # Checking for path in upper layer -stat $(toRealPath "$storeBTop" "$path") +stat "$(toRealPath "$storeBTop" "$path")" # Verifying path in overlay store nix-store --verify-path --store "$storeB" "$path" diff --git a/tests/functional/local-overlay-store/build.sh b/tests/functional/local-overlay-store/build.sh index 2251be7e7..d4a29d6c2 100755 --- a/tests/functional/local-overlay-store/build.sh +++ b/tests/functional/local-overlay-store/build.sh @@ -1,3 +1,4 @@ +# shellcheck shell=bash source common.sh source ../common/init.sh From 8f0d9412baf0690713fac92b708c6d3bd6239905 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Sun, 28 Sep 2025 20:39:40 -0700 Subject: [PATCH 141/332] shellcheck fix: tests/functional/local-overlay-store/bad-uris.sh --- maintainers/flake-module.nix | 1 - tests/functional/local-overlay-store/bad-uris.sh | 6 ++++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index feb69ada0..500a05c92 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -124,7 +124,6 @@ ''^tests/functional/linux-sandbox\.sh$'' ''^tests/functional/local-overlay-store/add-lower-inner\.sh$'' ''^tests/functional/local-overlay-store/add-lower\.sh$'' - ''^tests/functional/local-overlay-store/bad-uris\.sh$'' ''^tests/functional/logging\.sh$'' ''^tests/functional/misc\.sh$'' ''^tests/functional/multiple-outputs\.sh$'' diff --git a/tests/functional/local-overlay-store/bad-uris.sh b/tests/functional/local-overlay-store/bad-uris.sh index f0c6a151c..1b5b7fc54 100644 --- a/tests/functional/local-overlay-store/bad-uris.sh +++ b/tests/functional/local-overlay-store/bad-uris.sh @@ -1,3 +1,4 @@ +# shellcheck shell=bash source common.sh source ../common/init.sh @@ -5,7 +6,7 @@ requireEnvironment setupConfig setupStoreDirs -mkdir -p $TEST_ROOT/bad_test +mkdir -p "$TEST_ROOT"/bad_test badTestRoot=$TEST_ROOT/bad_test storeBadRoot="local-overlay://?root=$badTestRoot&lower-store=$storeA&upper-layer=$storeBTop" storeBadLower="local-overlay://?root=$storeBRoot&lower-store=$badTestRoot&upper-layer=$storeBTop" @@ -18,7 +19,8 @@ declare -a storesBad=( TODO_NixOS for i in "${storesBad[@]}"; do - echo $i + echo "$i" + # shellcheck disable=SC2119 execUnshare < Date: Sun, 28 Sep 2025 20:40:08 -0700 Subject: [PATCH 142/332] shellcheck fix: tests/functional/local-overlay-store/add-lower-inner.sh --- maintainers/flake-module.nix | 2 -- tests/functional/local-overlay-store/add-lower.sh | 1 + 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 500a05c92..8c84d0517 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -122,8 +122,6 @@ ''^tests/functional/install-darwin\.sh$'' ''^tests/functional/legacy-ssh-store\.sh$'' ''^tests/functional/linux-sandbox\.sh$'' - ''^tests/functional/local-overlay-store/add-lower-inner\.sh$'' - ''^tests/functional/local-overlay-store/add-lower\.sh$'' ''^tests/functional/logging\.sh$'' ''^tests/functional/misc\.sh$'' ''^tests/functional/multiple-outputs\.sh$'' diff --git a/tests/functional/local-overlay-store/add-lower.sh b/tests/functional/local-overlay-store/add-lower.sh index 33bf20ebd..87cdb4f59 100755 --- a/tests/functional/local-overlay-store/add-lower.sh +++ b/tests/functional/local-overlay-store/add-lower.sh @@ -1,3 +1,4 @@ +# shellcheck shell=bash source common.sh source ../common/init.sh From 69eae7770a9be67a7ff253cb7bef844a9fb80821 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rg=20Thalheim?= Date: Mon, 29 Sep 2025 10:28:52 +0200 Subject: [PATCH 143/332] fix mingw build --- src/libstore/gc.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/libstore/gc.cc b/src/libstore/gc.cc index fdbc670df..86c4e37a6 100644 --- a/src/libstore/gc.cc +++ b/src/libstore/gc.cc @@ -330,7 +330,7 @@ static void readProcLink(const std::filesystem::path & file, UncheckedRoots & ro throw; } if (buf.is_absolute()) - roots[buf].emplace(file.string()); + roots[buf.string()].emplace(file.string()); } static std::string quoteRegexChars(const std::string & raw) @@ -343,7 +343,7 @@ static std::string quoteRegexChars(const std::string & raw) static void readFileRoots(const std::filesystem::path & path, UncheckedRoots & roots) { try { - roots[readFile(path)].emplace(path); + roots[readFile(path)].emplace(path.string()); } catch (SysError & e) { if (e.errNo != ENOENT && e.errNo != EACCES) throw; From a9ffa42ddab1edb5e99fff517751c7906dafb224 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rg=20Thalheim?= Date: Mon, 29 Sep 2025 12:01:45 +0200 Subject: [PATCH 144/332] Fix thread-safety issue with ptsname() usage Replace non-thread-safe ptsname() calls with a new getPtsName() helper function that: - Uses thread-safe ptsname_r() on Linux/BSD platforms - Uses mutex-protected ptsname() on macOS (which lacks ptsname_r()) --- src/libstore/unix/build/derivation-builder.cc | 6 ++-- src/libutil/include/nix/util/terminal.hh | 8 ++++++ src/libutil/terminal.cc | 28 +++++++++++++++++++ 3 files changed, 39 insertions(+), 3 deletions(-) diff --git a/src/libstore/unix/build/derivation-builder.cc b/src/libstore/unix/build/derivation-builder.cc index 3a6f71555..04e8cb176 100644 --- a/src/libstore/unix/build/derivation-builder.cc +++ b/src/libstore/unix/build/derivation-builder.cc @@ -18,6 +18,7 @@ #include "nix/store/user-lock.hh" #include "nix/store/globals.hh" #include "nix/store/build/derivation-env-desugar.hh" +#include "nix/util/terminal.hh" #include @@ -808,8 +809,7 @@ std::optional DerivationBuilderImpl::startBuild() if (!builderOut) throw SysError("opening pseudoterminal master"); - // FIXME: not thread-safe, use ptsname_r - std::string slaveName = ptsname(builderOut.get()); + std::string slaveName = getPtsName(builderOut.get()); if (buildUser) { if (chmod(slaveName.c_str(), 0600)) @@ -923,7 +923,7 @@ void DerivationBuilderImpl::prepareSandbox() void DerivationBuilderImpl::openSlave() { - std::string slaveName = ptsname(builderOut.get()); + std::string slaveName = getPtsName(builderOut.get()); AutoCloseFD builderOut = open(slaveName.c_str(), O_RDWR | O_NOCTTY); if (!builderOut) diff --git a/src/libutil/include/nix/util/terminal.hh b/src/libutil/include/nix/util/terminal.hh index f19de268c..fa71e074e 100644 --- a/src/libutil/include/nix/util/terminal.hh +++ b/src/libutil/include/nix/util/terminal.hh @@ -36,4 +36,12 @@ void updateWindowSize(); */ std::pair getWindowSize(); +/** + * Get the slave name of a pseudoterminal in a thread-safe manner. + * + * @param fd The file descriptor of the pseudoterminal master + * @return The slave device name as a string + */ +std::string getPtsName(int fd); + } // namespace nix diff --git a/src/libutil/terminal.cc b/src/libutil/terminal.cc index b5765487c..656847487 100644 --- a/src/libutil/terminal.cc +++ b/src/libutil/terminal.cc @@ -1,6 +1,7 @@ #include "nix/util/terminal.hh" #include "nix/util/environment-variables.hh" #include "nix/util/sync.hh" +#include "nix/util/error.hh" #ifdef _WIN32 # include @@ -12,6 +13,8 @@ #endif #include #include +#include +#include // for ptsname and ptsname_r namespace { @@ -176,4 +179,29 @@ std::pair getWindowSize() return *windowSize.lock(); } +std::string getPtsName(int fd) +{ +#ifdef __APPLE__ + static std::mutex ptsnameMutex; + // macOS doesn't have ptsname_r, use mutex-protected ptsname + std::lock_guard lock(ptsnameMutex); + const char * name = ptsname(fd); + if (!name) { + throw SysError("getting pseudoterminal slave name"); + } + return name; +#else + // Use thread-safe ptsname_r on platforms that support it + // PTY names are typically short: + // - Linux: /dev/pts/N (where N is usually < 1000) + // - FreeBSD: /dev/pts/N + // 64 bytes is more than sufficient for any Unix PTY name + char buf[64]; + if (ptsname_r(fd, buf, sizeof(buf)) != 0) { + throw SysError("getting pseudoterminal slave name"); + } + return buf; +#endif +} + } // namespace nix From 5ec91381795ffd4df4a12ba3ca6febb37129f66e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rg=20Thalheim?= Date: Mon, 29 Sep 2025 12:21:57 +0200 Subject: [PATCH 145/332] Prevent infinite symlink loop in followLinksToStore() The followLinksToStore() function could hang indefinitely when encountering symlink cycles outside the Nix store, causing 100% CPU usage and blocking any operations that use this function. This affects multiple commands including nix-store --query, --delete, --verify, nix-env, and nix-copy-closure when given paths with symlink cycles. The fix adds a maximum limit of 1024 symlink follows (matching the limit used by canonPath) and throws an error when exceeded, preventing the infinite loop while preserving the original semantics of stopping at the first path inside the store. --- src/libstore/store-api.cc | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/src/libstore/store-api.cc b/src/libstore/store-api.cc index a0b06db54..c26c7d826 100644 --- a/src/libstore/store-api.cc +++ b/src/libstore/store-api.cc @@ -58,12 +58,22 @@ std::pair StoreDirConfig::toStorePath(PathView path) const Path Store::followLinksToStore(std::string_view _path) const { Path path = absPath(std::string(_path)); + + // Limit symlink follows to prevent infinite loops + unsigned int followCount = 0; + const unsigned int maxFollow = 1024; + while (!isInStore(path)) { if (!std::filesystem::is_symlink(path)) break; + + if (++followCount >= maxFollow) + throw Error("too many symbolic links encountered while resolving '%s'", _path); + auto target = readLink(path); path = absPath(target, dirOf(path)); } + if (!isInStore(path)) throw BadStorePath("path '%1%' is not in the Nix store", path); return path; From 121dda0f1f5fbb861ca38d7225b8923ee53337b5 Mon Sep 17 00:00:00 2001 From: Ephraim Siegfried Date: Mon, 29 Sep 2025 14:07:26 +0200 Subject: [PATCH 146/332] docs: fix build command in make-content-addressed.md --- src/nix/make-content-addressed.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/nix/make-content-addressed.md b/src/nix/make-content-addressed.md index b1f7da525..e6a51c83a 100644 --- a/src/nix/make-content-addressed.md +++ b/src/nix/make-content-addressed.md @@ -51,7 +51,7 @@ be verified without any additional information such as signatures. This means that a command like ```console -# nix store build /nix/store/5skmmcb9svys5lj3kbsrjg7vf2irid63-hello-2.10 \ +# nix build /nix/store/5skmmcb9svys5lj3kbsrjg7vf2irid63-hello-2.10 \ --substituters https://my-cache.example.org ``` From 020f67a653fc6cf67bc16585d2969af624bd694a Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 09:14:41 -0700 Subject: [PATCH 147/332] shellcheck fix: tests/functional/flakes/prefetch.sh --- maintainers/flake-module.nix | 1 - tests/functional/flakes/prefetch.sh | 6 +++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 8c84d0517..a3e126d3f 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/flakes/prefetch\.sh$'' ''^tests/functional/flakes/run\.sh$'' ''^tests/functional/flakes/show\.sh$'' ''^tests/functional/formatter\.sh$'' diff --git a/tests/functional/flakes/prefetch.sh b/tests/functional/flakes/prefetch.sh index a451b7120..999270c1e 100755 --- a/tests/functional/flakes/prefetch.sh +++ b/tests/functional/flakes/prefetch.sh @@ -3,6 +3,6 @@ source common.sh # Test symlinks in zip files (#10649). -path=$(nix flake prefetch --json file://$(pwd)/tree.zip | jq -r .storePath) -[[ $(cat $path/foo) = foo ]] -[[ $(readlink $path/bar) = foo ]] +path=$(nix flake prefetch --json file://"$(pwd)"/tree.zip | jq -r .storePath) +[[ $(cat "$path"/foo) = foo ]] +[[ $(readlink "$path"/bar) = foo ]] From cb22518754b553d0d830e48a7caea26c48cb345a Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 09:15:11 -0700 Subject: [PATCH 148/332] shellcheck fix: tests/functional/flakes/run.sh --- maintainers/flake-module.nix | 1 - tests/functional/flakes/run.sh | 20 ++++++++++---------- 2 files changed, 10 insertions(+), 11 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index a3e126d3f..f5ac5c489 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/flakes/run\.sh$'' ''^tests/functional/flakes/show\.sh$'' ''^tests/functional/formatter\.sh$'' ''^tests/functional/formatter\.simple\.sh$'' diff --git a/tests/functional/flakes/run.sh b/tests/functional/flakes/run.sh index 0a2947825..107b3dfb8 100755 --- a/tests/functional/flakes/run.sh +++ b/tests/functional/flakes/run.sh @@ -5,10 +5,10 @@ source ../common.sh TODO_NixOS clearStore -rm -rf $TEST_HOME/.cache $TEST_HOME/.config $TEST_HOME/.local +rm -rf "$TEST_HOME"/.cache "$TEST_HOME"/.config "$TEST_HOME"/.local -cp ../shell-hello.nix "${config_nix}" $TEST_HOME -cd $TEST_HOME +cp ../shell-hello.nix "${config_nix}" "$TEST_HOME" +cd "$TEST_HOME" cat < flake.nix { @@ -34,8 +34,8 @@ nix run --no-write-lock-file .#pkgAsPkg # For instance, we might set an environment variable temporarily to affect some # initialization or whatnot, but this must not leak into the environment of the # command being run. -env > $TEST_ROOT/expected-env -nix run -f shell-hello.nix env > $TEST_ROOT/actual-env +env > "$TEST_ROOT"/expected-env +nix run -f shell-hello.nix env > "$TEST_ROOT"/actual-env # Remove/reset variables we expect to be different. # - PATH is modified by nix shell # - we unset TMPDIR on macOS if it contains /var/folders. bad. https://github.com/NixOS/nix/issues/7731 @@ -48,12 +48,12 @@ sed -i \ -e '/^TMPDIR=\/var\/folders\/.*/d' \ -e '/^__CF_USER_TEXT_ENCODING=.*$/d' \ -e '/^__LLVM_PROFILE_RT_INIT_ONCE=.*$/d' \ - $TEST_ROOT/expected-env $TEST_ROOT/actual-env -sort $TEST_ROOT/expected-env | uniq > $TEST_ROOT/expected-env.sorted + "$TEST_ROOT"/expected-env "$TEST_ROOT"/actual-env +sort "$TEST_ROOT"/expected-env | uniq > "$TEST_ROOT"/expected-env.sorted # nix run appears to clear _. I don't understand why. Is this ok? -echo "_=..." >> $TEST_ROOT/actual-env -sort $TEST_ROOT/actual-env | uniq > $TEST_ROOT/actual-env.sorted -diff $TEST_ROOT/expected-env.sorted $TEST_ROOT/actual-env.sorted +echo "_=..." >> "$TEST_ROOT"/actual-env +sort "$TEST_ROOT"/actual-env | uniq > "$TEST_ROOT"/actual-env.sorted +diff "$TEST_ROOT"/expected-env.sorted "$TEST_ROOT"/actual-env.sorted clearStore From f596c9b8c392e2a67d9fe5a6701ccaec5df18a24 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 09:16:29 -0700 Subject: [PATCH 149/332] shellcheck fix: tests/functional/flakes/show.sh --- maintainers/flake-module.nix | 1 - tests/functional/flakes/show.sh | 9 +++++++-- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index f5ac5c489..8350fea5c 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/flakes/show\.sh$'' ''^tests/functional/formatter\.sh$'' ''^tests/functional/formatter\.simple\.sh$'' ''^tests/functional/gc-auto\.sh$'' diff --git a/tests/functional/flakes/show.sh b/tests/functional/flakes/show.sh index 7fcc6aca9..a08db115a 100755 --- a/tests/functional/flakes/show.sh +++ b/tests/functional/flakes/show.sh @@ -12,6 +12,7 @@ pushd "$flakeDir" # By default: Only show the packages content for the current system and no # legacyPackages at all nix flake show --json > show-output.json +# shellcheck disable=SC2016 nix eval --impure --expr ' let show_output = builtins.fromJSON (builtins.readFile ./show-output.json); in @@ -23,6 +24,7 @@ true # With `--all-systems`, show the packages for all systems nix flake show --json --all-systems > show-output.json +# shellcheck disable=SC2016 nix eval --impure --expr ' let show_output = builtins.fromJSON (builtins.readFile ./show-output.json); in @@ -33,6 +35,7 @@ true # With `--legacy`, show the legacy packages nix flake show --json --legacy > show-output.json +# shellcheck disable=SC2016 nix eval --impure --expr ' let show_output = builtins.fromJSON (builtins.readFile ./show-output.json); in @@ -80,6 +83,7 @@ cat >flake.nix < show-output.json +# shellcheck disable=SC2016 nix eval --impure --expr ' let show_output = builtins.fromJSON (builtins.readFile ./show-output.json); in @@ -91,11 +95,12 @@ true # Test that nix flake show doesn't fail if one of the outputs contains # an IFD popd -writeIfdFlake $flakeDir -pushd $flakeDir +writeIfdFlake "$flakeDir" +pushd "$flakeDir" nix flake show --json > show-output.json +# shellcheck disable=SC2016 nix eval --impure --expr ' let show_output = builtins.fromJSON (builtins.readFile ./show-output.json); in From 08a82f46821e7c875dc6d39a75bec82c633043db Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 09:17:24 -0700 Subject: [PATCH 150/332] shellcheck fix: tests/functional/formatter.simple.sh --- maintainers/flake-module.nix | 2 -- tests/functional/formatter.sh | 1 + 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 8350fea5c..12732bf90 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,8 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/formatter\.sh$'' - ''^tests/functional/formatter\.simple\.sh$'' ''^tests/functional/gc-auto\.sh$'' ''^tests/functional/gc-concurrent\.builder\.sh$'' ''^tests/functional/gc-concurrent\.sh$'' diff --git a/tests/functional/formatter.sh b/tests/functional/formatter.sh index 6631dd6b8..03b31708d 100755 --- a/tests/functional/formatter.sh +++ b/tests/functional/formatter.sh @@ -16,6 +16,7 @@ nix fmt --help | grep "reformat your code" nix fmt run --help | grep "reformat your code" nix fmt build --help | grep "build" +# shellcheck disable=SC2154 cat << EOF > flake.nix { outputs = _: { From 4192ca9131ce93ac51cde4110dfc4b1bf251e243 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 09:18:50 -0700 Subject: [PATCH 151/332] shellcheck fix: tests/functional/gc-auto.sh --- maintainers/flake-module.nix | 1 - tests/functional/gc-auto.sh | 22 +++++++++++++--------- 2 files changed, 13 insertions(+), 10 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 12732bf90..51ac3a629 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/gc-auto\.sh$'' ''^tests/functional/gc-concurrent\.builder\.sh$'' ''^tests/functional/gc-concurrent\.sh$'' ''^tests/functional/gc-concurrent2\.builder\.sh$'' diff --git a/tests/functional/gc-auto.sh b/tests/functional/gc-auto.sh index efe3e4b2b..ea877f27f 100755 --- a/tests/functional/gc-auto.sh +++ b/tests/functional/gc-auto.sh @@ -2,22 +2,26 @@ source common.sh +# shellcheck disable=SC1111 needLocalStore "“min-free” and “max-free” are daemon options" TODO_NixOS clearStore +# shellcheck disable=SC2034 garbage1=$(nix store add-path --name garbage1 ./nar-access.sh) +# shellcheck disable=SC2034 garbage2=$(nix store add-path --name garbage2 ./nar-access.sh) +# shellcheck disable=SC2034 garbage3=$(nix store add-path --name garbage3 ./nar-access.sh) -ls -l $garbage3 -POSIXLY_CORRECT=1 du $garbage3 +ls -l "$garbage3" +POSIXLY_CORRECT=1 du "$garbage3" fake_free=$TEST_ROOT/fake-free export _NIX_TEST_FREE_SPACE_FILE=$fake_free -echo 1100 > $fake_free +echo 1100 > "$fake_free" fifoLock=$TEST_ROOT/fifoLock mkfifo "$fifoLock" @@ -65,11 +69,11 @@ with import ${config_nix}; mkDerivation { EOF ) -nix build --impure -v -o $TEST_ROOT/result-A -L --expr "$expr" \ +nix build --impure -v -o "$TEST_ROOT"/result-A -L --expr "$expr" \ --min-free 1K --max-free 2K --min-free-check-interval 1 & pid1=$! -nix build --impure -v -o $TEST_ROOT/result-B -L --expr "$expr2" \ +nix build --impure -v -o "$TEST_ROOT"/result-B -L --expr "$expr2" \ --min-free 1K --max-free 2K --min-free-check-interval 1 & pid2=$! @@ -77,9 +81,9 @@ pid2=$! # If the first build fails, we need to postpone the failure to still allow # the second one to finish wait "$pid1" || FIRSTBUILDSTATUS=$? -echo "unlock" > $fifoLock -( exit ${FIRSTBUILDSTATUS:-0} ) +echo "unlock" > "$fifoLock" +( exit "${FIRSTBUILDSTATUS:-0}" ) wait "$pid2" -[[ foo = $(cat $TEST_ROOT/result-A/bar) ]] -[[ foo = $(cat $TEST_ROOT/result-B/bar) ]] +[[ foo = $(cat "$TEST_ROOT"/result-A/bar) ]] +[[ foo = $(cat "$TEST_ROOT"/result-B/bar) ]] From 613bd67574c1455577b70ba435bcbfcc8329e13b Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 09:20:02 -0700 Subject: [PATCH 152/332] shellcheck fix: tests/functional/gc-concurrent.builder.sh --- maintainers/flake-module.nix | 1 - tests/functional/gc-concurrent.builder.sh | 11 +++++++---- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 51ac3a629..65c94c415 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/gc-concurrent\.builder\.sh$'' ''^tests/functional/gc-concurrent\.sh$'' ''^tests/functional/gc-concurrent2\.builder\.sh$'' ''^tests/functional/gc-non-blocking\.sh$'' diff --git a/tests/functional/gc-concurrent.builder.sh b/tests/functional/gc-concurrent.builder.sh index bb6dcd4cf..b3c7abeb1 100644 --- a/tests/functional/gc-concurrent.builder.sh +++ b/tests/functional/gc-concurrent.builder.sh @@ -1,16 +1,19 @@ +# shellcheck shell=bash +# shellcheck disable=SC2154 echo "Build started" > "$lockFifo" -mkdir $out -echo $(cat $input1/foo)$(cat $input2/bar) > $out/foobar +# shellcheck disable=SC2154 +mkdir "$out" +echo "$(cat "$input1"/foo)""$(cat "$input2"/bar)" > "$out"/foobar # Wait for someone to write on the fifo cat "$lockFifo" # $out should not have been GC'ed while we were sleeping, but just in # case... -mkdir -p $out +mkdir -p "$out" # Check that the GC hasn't deleted the lock on our output. test -e "$out.lock" -ln -s $input2 $out/input-2 +ln -s "$input2" "$out"/input-2 From 75df03204b2505e1132fa67a45ae589239ccdaec Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 09:21:47 -0700 Subject: [PATCH 153/332] shellcheck fix: tests/functional/gc-concurrent.sh --- maintainers/flake-module.nix | 1 - tests/functional/gc-concurrent.sh | 34 +++++++++++++++++-------------- 2 files changed, 19 insertions(+), 16 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 65c94c415..a2edadebb 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/gc-concurrent\.sh$'' ''^tests/functional/gc-concurrent2\.builder\.sh$'' ''^tests/functional/gc-non-blocking\.sh$'' ''^tests/functional/hash-convert\.sh$'' diff --git a/tests/functional/gc-concurrent.sh b/tests/functional/gc-concurrent.sh index df180b14f..dcfcea3e9 100755 --- a/tests/functional/gc-concurrent.sh +++ b/tests/functional/gc-concurrent.sh @@ -10,54 +10,58 @@ lockFifo1=$TEST_ROOT/test1.fifo mkfifo "$lockFifo1" drvPath1=$(nix-instantiate gc-concurrent.nix -A test1 --argstr lockFifo "$lockFifo1") -outPath1=$(nix-store -q $drvPath1) +outPath1=$(nix-store -q "$drvPath1") drvPath2=$(nix-instantiate gc-concurrent.nix -A test2) -outPath2=$(nix-store -q $drvPath2) +outPath2=$(nix-store -q "$drvPath2") drvPath3=$(nix-instantiate simple.nix) -outPath3=$(nix-store -r $drvPath3) +outPath3=$(nix-store -r "$drvPath3") -(! test -e $outPath3.lock) -touch $outPath3.lock +# shellcheck disable=SC2235 +(! test -e "$outPath3".lock) +touch "$outPath3".lock rm -f "$NIX_STATE_DIR"/gcroots/foo* -ln -s $drvPath2 "$NIX_STATE_DIR/gcroots/foo" -ln -s $outPath3 "$NIX_STATE_DIR/gcroots/foo2" +ln -s "$drvPath2" "$NIX_STATE_DIR/gcroots/foo" +ln -s "$outPath3" "$NIX_STATE_DIR/gcroots/foo2" # Start build #1 in the background. It starts immediately. nix-store -rvv "$drvPath1" & pid1=$! # Wait for the build of $drvPath1 to start -cat $lockFifo1 +cat "$lockFifo1" # Run the garbage collector while the build is running. nix-collect-garbage # Unlock the build of $drvPath1 -echo "" > $lockFifo1 +echo "" > "$lockFifo1" echo waiting for pid $pid1 to finish... wait $pid1 # Check that the root of build #1 and its dependencies haven't been # deleted. The should not be deleted by the GC because they were # being built during the GC. -cat $outPath1/foobar -cat $outPath1/input-2/bar +cat "$outPath1"/foobar +cat "$outPath1"/input-2/bar # Check that the build build $drvPath2 succeeds. # It should succeed because the derivation is a GC root. nix-store -rvv "$drvPath2" -cat $outPath2/foobar +cat "$outPath2"/foobar rm -f "$NIX_STATE_DIR"/gcroots/foo* # The collector should have deleted lock files for paths that have # been built previously. -(! test -e $outPath3.lock) +# shellcheck disable=SC2235 +(! test -e "$outPath3".lock) # If we run the collector now, it should delete outPath1/2. nix-collect-garbage -(! test -e $outPath1) -(! test -e $outPath2) +# shellcheck disable=SC2235 +(! test -e "$outPath1") +# shellcheck disable=SC2235 +(! test -e "$outPath2") From 2e5952fb6aed7015af50f09a1c60f94cd0649f22 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 09:22:45 -0700 Subject: [PATCH 154/332] shellcheck fix: tests/functional/gc-concurrent2.builder.sh --- maintainers/flake-module.nix | 1 - tests/functional/gc-concurrent2.builder.sh | 7 +++++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index a2edadebb..dd7d1d338 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/gc-concurrent2\.builder\.sh$'' ''^tests/functional/gc-non-blocking\.sh$'' ''^tests/functional/hash-convert\.sh$'' ''^tests/functional/impure-derivations\.sh$'' diff --git a/tests/functional/gc-concurrent2.builder.sh b/tests/functional/gc-concurrent2.builder.sh index 4f6c58b96..4b1ad6f5e 100644 --- a/tests/functional/gc-concurrent2.builder.sh +++ b/tests/functional/gc-concurrent2.builder.sh @@ -1,5 +1,8 @@ -mkdir $out -echo $(cat $input1/foo)$(cat $input2/bar)xyzzy > $out/foobar +# shellcheck shell=bash +# shellcheck disable=SC2154 +mkdir "$out" +# shellcheck disable=SC2154 +echo "$(cat "$input1"/foo)""$(cat "$input2"/bar)"xyzzy > "$out"/foobar # Check that the GC hasn't deleted the lock on our output. test -e "$out.lock" From 52b9fb38e0dfc0af226a25d21197b40fa44e6c78 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 09:23:41 -0700 Subject: [PATCH 155/332] shellcheck fix: tests/functional/gc-non-blocking.sh --- maintainers/flake-module.nix | 1 - tests/functional/gc-non-blocking.sh | 14 ++++++++------ 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index dd7d1d338..b080683ff 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/gc-non-blocking\.sh$'' ''^tests/functional/hash-convert\.sh$'' ''^tests/functional/impure-derivations\.sh$'' ''^tests/functional/impure-eval\.sh$'' diff --git a/tests/functional/gc-non-blocking.sh b/tests/functional/gc-non-blocking.sh index 9cd5c0e1c..a85b8e5db 100755 --- a/tests/functional/gc-non-blocking.sh +++ b/tests/functional/gc-non-blocking.sh @@ -23,17 +23,17 @@ mkfifo "$fifo2" dummy=$(nix store add-path ./simple.nix) running=$TEST_ROOT/running -touch $running +touch "$running" # Start GC. -(_NIX_TEST_GC_SYNC_1=$fifo1 _NIX_TEST_GC_SYNC_2=$fifo2 nix-store --gc -vvvvv; rm $running) & +(_NIX_TEST_GC_SYNC_1=$fifo1 _NIX_TEST_GC_SYNC_2=$fifo2 nix-store --gc -vvvvv; rm "$running") & pid=$! sleep 2 # Delay the start of the root server to check that the build below # correctly handles ENOENT when connecting to the root server. -(sleep 1; echo > $fifo1) & +(sleep 1; echo > "$fifo1") & pid2=$! # Start a build. This should not be blocked by the GC in progress. @@ -47,6 +47,8 @@ outPath=$(nix-build --max-silent-time 60 -o "$TEST_ROOT/result" -E " wait $pid wait $pid2 -(! test -e $running) -(! test -e $dummy) -test -e $outPath +# shellcheck disable=SC2235 +(! test -e "$running") +# shellcheck disable=SC2235 +(! test -e "$dummy") +test -e "$outPath" From 745d1f95191c90f46032c607bb07037ef2d614cb Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 10:11:29 -0700 Subject: [PATCH 156/332] shellcheck fix: tests/functional/ca/build-delete.sh --- maintainers/flake-module.nix | 1 - tests/functional/ca/build-delete.sh | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 8c84d0517..5ba8aa505 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -172,7 +172,6 @@ # Content-addressed test files that use recursive-*looking* sourcing # (cd .. && source ), causing shellcheck to loop # They're small wrapper scripts with not a lot going on - ''^tests/functional/ca/build-delete\.sh$'' ''^tests/functional/ca/build-dry\.sh$'' ''^tests/functional/ca/eval-store\.sh$'' ''^tests/functional/ca/gc\.sh$'' diff --git a/tests/functional/ca/build-delete.sh b/tests/functional/ca/build-delete.sh index 3ad3d0a80..173cfb224 100644 --- a/tests/functional/ca/build-delete.sh +++ b/tests/functional/ca/build-delete.sh @@ -4,4 +4,5 @@ source common.sh export NIX_TESTS_CA_BY_DEFAULT=1 cd .. +# shellcheck source=/dev/null source ./build-delete.sh From 5846d9d4dcdbe7604c34c046c075344a9859abc7 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 10:12:04 -0700 Subject: [PATCH 157/332] shellcheck fix: tests/functional/ca/build-dry.sh --- maintainers/flake-module.nix | 1 - tests/functional/ca/build-dry.sh | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 5ba8aa505..5a92e624f 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -172,7 +172,6 @@ # Content-addressed test files that use recursive-*looking* sourcing # (cd .. && source ), causing shellcheck to loop # They're small wrapper scripts with not a lot going on - ''^tests/functional/ca/build-dry\.sh$'' ''^tests/functional/ca/eval-store\.sh$'' ''^tests/functional/ca/gc\.sh$'' ''^tests/functional/ca/import-from-derivation\.sh$'' diff --git a/tests/functional/ca/build-dry.sh b/tests/functional/ca/build-dry.sh index 9a72075ec..0b8b959ea 100644 --- a/tests/functional/ca/build-dry.sh +++ b/tests/functional/ca/build-dry.sh @@ -2,5 +2,6 @@ source common.sh export NIX_TESTS_CA_BY_DEFAULT=1 +# shellcheck source=/dev/null cd .. && source build-dry.sh From 4232cb045afba8f5dfba2231525a638ec0c0ae67 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 10:13:58 -0700 Subject: [PATCH 158/332] Remaining functional/ca tests for shellcheck --- maintainers/flake-module.nix | 15 --------------- tests/functional/ca/build-dry.sh | 1 + tests/functional/ca/eval-store.sh | 1 + tests/functional/ca/gc.sh | 1 + tests/functional/ca/import-from-derivation.sh | 2 +- tests/functional/ca/multiple-outputs.sh | 1 + tests/functional/ca/new-build-cmd.sh | 1 + tests/functional/ca/nix-shell.sh | 2 ++ tests/functional/ca/post-hook.sh | 1 + tests/functional/ca/recursive.sh | 1 + tests/functional/ca/repl.sh | 2 +- tests/functional/ca/selfref-gc.sh | 1 + tests/functional/ca/why-depends.sh | 2 +- 13 files changed, 13 insertions(+), 18 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 5a92e624f..7752ee2ce 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -168,21 +168,6 @@ ''^tests/functional/user-envs\.builder\.sh$'' ''^tests/functional/user-envs\.sh$'' ''^tests/functional/why-depends\.sh$'' - - # Content-addressed test files that use recursive-*looking* sourcing - # (cd .. && source ), causing shellcheck to loop - # They're small wrapper scripts with not a lot going on - ''^tests/functional/ca/eval-store\.sh$'' - ''^tests/functional/ca/gc\.sh$'' - ''^tests/functional/ca/import-from-derivation\.sh$'' - ''^tests/functional/ca/multiple-outputs\.sh$'' - ''^tests/functional/ca/new-build-cmd\.sh$'' - ''^tests/functional/ca/nix-shell\.sh$'' - ''^tests/functional/ca/post-hook\.sh$'' - ''^tests/functional/ca/recursive\.sh$'' - ''^tests/functional/ca/repl\.sh$'' - ''^tests/functional/ca/selfref-gc\.sh$'' - ''^tests/functional/ca/why-depends\.sh$'' ]; }; }; diff --git a/tests/functional/ca/build-dry.sh b/tests/functional/ca/build-dry.sh index 0b8b959ea..44bd7202b 100644 --- a/tests/functional/ca/build-dry.sh +++ b/tests/functional/ca/build-dry.sh @@ -1,3 +1,4 @@ +# shellcheck shell=bash source common.sh export NIX_TESTS_CA_BY_DEFAULT=1 diff --git a/tests/functional/ca/eval-store.sh b/tests/functional/ca/eval-store.sh index 9cc499606..0ffdef839 100644 --- a/tests/functional/ca/eval-store.sh +++ b/tests/functional/ca/eval-store.sh @@ -7,4 +7,5 @@ source common.sh export NIX_TESTS_CA_BY_DEFAULT=1 cd .. +# shellcheck source=/dev/null source eval-store.sh diff --git a/tests/functional/ca/gc.sh b/tests/functional/ca/gc.sh index e9b6c5ab5..26b037f64 100755 --- a/tests/functional/ca/gc.sh +++ b/tests/functional/ca/gc.sh @@ -7,4 +7,5 @@ source common.sh export NIX_TESTS_CA_BY_DEFAULT=1 cd .. +# shellcheck source=/dev/null source gc.sh diff --git a/tests/functional/ca/import-from-derivation.sh b/tests/functional/ca/import-from-derivation.sh index 708d2fc78..a3101cc3f 100644 --- a/tests/functional/ca/import-from-derivation.sh +++ b/tests/functional/ca/import-from-derivation.sh @@ -3,6 +3,6 @@ source common.sh export NIX_TESTS_CA_BY_DEFAULT=1 - +# shellcheck source=/dev/null cd .. && source import-from-derivation.sh diff --git a/tests/functional/ca/multiple-outputs.sh b/tests/functional/ca/multiple-outputs.sh index 63b7d3197..e4e05b5f5 100644 --- a/tests/functional/ca/multiple-outputs.sh +++ b/tests/functional/ca/multiple-outputs.sh @@ -4,4 +4,5 @@ source common.sh export NIX_TESTS_CA_BY_DEFAULT=1 cd .. +# shellcheck source=/dev/null source ./multiple-outputs.sh diff --git a/tests/functional/ca/new-build-cmd.sh b/tests/functional/ca/new-build-cmd.sh index 408bfb0f6..e5cb644d1 100644 --- a/tests/functional/ca/new-build-cmd.sh +++ b/tests/functional/ca/new-build-cmd.sh @@ -4,4 +4,5 @@ source common.sh export NIX_TESTS_CA_BY_DEFAULT=1 cd .. +# shellcheck source=/dev/null source ./build.sh diff --git a/tests/functional/ca/nix-shell.sh b/tests/functional/ca/nix-shell.sh index 7b30b2ac8..05115c126 100755 --- a/tests/functional/ca/nix-shell.sh +++ b/tests/functional/ca/nix-shell.sh @@ -2,6 +2,8 @@ source common.sh +# shellcheck disable=SC2034 NIX_TESTS_CA_BY_DEFAULT=true cd .. +# shellcheck source=/dev/null source ./nix-shell.sh diff --git a/tests/functional/ca/post-hook.sh b/tests/functional/ca/post-hook.sh index 705bde9d4..e1adffc47 100755 --- a/tests/functional/ca/post-hook.sh +++ b/tests/functional/ca/post-hook.sh @@ -6,6 +6,7 @@ requireDaemonNewerThan "2.4pre20210626" export NIX_TESTS_CA_BY_DEFAULT=1 cd .. +# shellcheck source=/dev/null source ./post-hook.sh diff --git a/tests/functional/ca/recursive.sh b/tests/functional/ca/recursive.sh index cd6736b24..e3fb98ab2 100755 --- a/tests/functional/ca/recursive.sh +++ b/tests/functional/ca/recursive.sh @@ -6,4 +6,5 @@ requireDaemonNewerThan "2.4pre20210623" export NIX_TESTS_CA_BY_DEFAULT=1 cd .. +# shellcheck source=/dev/null source ./recursive.sh diff --git a/tests/functional/ca/repl.sh b/tests/functional/ca/repl.sh index 0bbbebd85..f96ecfcf2 100644 --- a/tests/functional/ca/repl.sh +++ b/tests/functional/ca/repl.sh @@ -3,5 +3,5 @@ source common.sh export NIX_TESTS_CA_BY_DEFAULT=1 - +# shellcheck source=/dev/null cd .. && source repl.sh diff --git a/tests/functional/ca/selfref-gc.sh b/tests/functional/ca/selfref-gc.sh index 248778894..7ac9ec9f7 100755 --- a/tests/functional/ca/selfref-gc.sh +++ b/tests/functional/ca/selfref-gc.sh @@ -8,4 +8,5 @@ enableFeatures "ca-derivations nix-command flakes" export NIX_TESTS_CA_BY_DEFAULT=1 cd .. +# shellcheck source=/dev/null source ./selfref-gc.sh diff --git a/tests/functional/ca/why-depends.sh b/tests/functional/ca/why-depends.sh index 0af8a5440..2a3c7d083 100644 --- a/tests/functional/ca/why-depends.sh +++ b/tests/functional/ca/why-depends.sh @@ -3,5 +3,5 @@ source common.sh export NIX_TESTS_CA_BY_DEFAULT=1 - +# shellcheck source=/dev/null cd .. && source why-depends.sh From f3a2876c3a830bfc073ebd11f725657e03e98935 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 09:24:43 -0700 Subject: [PATCH 159/332] shellcheck fix: tests/functional/hash-convert.sh --- maintainers/flake-module.nix | 1 - tests/functional/hash-convert.sh | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index b080683ff..3bf41bc14 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/hash-convert\.sh$'' ''^tests/functional/impure-derivations\.sh$'' ''^tests/functional/impure-eval\.sh$'' ''^tests/functional/install-darwin\.sh$'' diff --git a/tests/functional/hash-convert.sh b/tests/functional/hash-convert.sh index c40cb469c..9ef4c189d 100755 --- a/tests/functional/hash-convert.sh +++ b/tests/functional/hash-convert.sh @@ -99,7 +99,7 @@ try3() { expectStderr 1 nix hash convert --hash-algo "$1" --from nix32 "$4" | grepQuiet "input hash" # Base-16 hashes can be in uppercase. - nix hash convert --hash-algo "$1" --from base16 "$(echo $2 | tr [a-z] [A-Z])" + nix hash convert --hash-algo "$1" --from base16 "$(echo "$2" | tr '[:lower:]' '[:upper:]')" } try3 sha1 "800d59cfcd3c05e900cb4e214be48f6b886a08df" "vw46m23bizj4n8afrc0fj19wrp7mj3c0" "gA1Zz808BekAy04hS+SPa4hqCN8=" From 1cd96f22c045ce3aa16e7fc40f4f9d56f069bf6e Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 09:25:07 -0700 Subject: [PATCH 160/332] shellcheck fix: tests/functional/impure-derivations.sh --- maintainers/flake-module.nix | 1 - tests/functional/impure-derivations.sh | 46 +++++++++++++------------- 2 files changed, 23 insertions(+), 24 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 3bf41bc14..43c84d5ae 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/impure-derivations\.sh$'' ''^tests/functional/impure-eval\.sh$'' ''^tests/functional/install-darwin\.sh$'' ''^tests/functional/legacy-ssh-store\.sh$'' diff --git a/tests/functional/impure-derivations.sh b/tests/functional/impure-derivations.sh index 9e483d376..e0b7c3eea 100755 --- a/tests/functional/impure-derivations.sh +++ b/tests/functional/impure-derivations.sh @@ -12,62 +12,62 @@ restartDaemon clearStoreIfPossible # Basic test of impure derivations: building one a second time should not use the previous result. -printf 0 > $TEST_ROOT/counter +printf 0 > "$TEST_ROOT"/counter # `nix derivation add` with impure derivations work drvPath=$(nix-instantiate ./impure-derivations.nix -A impure) -nix derivation show $drvPath | jq .[] > $TEST_HOME/impure-drv.json -drvPath2=$(nix derivation add < $TEST_HOME/impure-drv.json) +nix derivation show "$drvPath" | jq .[] > "$TEST_HOME"/impure-drv.json +drvPath2=$(nix derivation add < "$TEST_HOME"/impure-drv.json) [[ "$drvPath" = "$drvPath2" ]] # But only with the experimental feature! -expectStderr 1 nix derivation add < $TEST_HOME/impure-drv.json --experimental-features nix-command | grepQuiet "experimental Nix feature 'impure-derivations' is disabled" +expectStderr 1 nix derivation add < "$TEST_HOME"/impure-drv.json --experimental-features nix-command | grepQuiet "experimental Nix feature 'impure-derivations' is disabled" nix build --dry-run --json --file ./impure-derivations.nix impure.all json=$(nix build -L --no-link --json --file ./impure-derivations.nix impure.all) -path1=$(echo $json | jq -r .[].outputs.out) -path1_stuff=$(echo $json | jq -r .[].outputs.stuff) -[[ $(< $path1/n) = 0 ]] -[[ $(< $path1_stuff/bla) = 0 ]] +path1=$(echo "$json" | jq -r .[].outputs.out) +path1_stuff=$(echo "$json" | jq -r .[].outputs.stuff) +[[ $(< "$path1"/n) = 0 ]] +[[ $(< "$path1_stuff"/bla) = 0 ]] -[[ $(nix path-info --json $path1 | jq .[].ca) =~ fixed:r:sha256: ]] +[[ $(nix path-info --json "$path1" | jq .[].ca) =~ fixed:r:sha256: ]] path2=$(nix build -L --no-link --json --file ./impure-derivations.nix impure | jq -r .[].outputs.out) -[[ $(< $path2/n) = 1 ]] +[[ $(< "$path2"/n) = 1 ]] # Test impure derivations that depend on impure derivations. path3=$(nix build -L --no-link --json --file ./impure-derivations.nix impureOnImpure | jq -r .[].outputs.out) -[[ $(< $path3/n) = X2 ]] +[[ $(< "$path3"/n) = X2 ]] path4=$(nix build -L --no-link --json --file ./impure-derivations.nix impureOnImpure | jq -r .[].outputs.out) -[[ $(< $path4/n) = X3 ]] +[[ $(< "$path4"/n) = X3 ]] # Test that (self-)references work. -[[ $(< $path4/symlink/bla) = 3 ]] -[[ $(< $path4/self/n) = X3 ]] +[[ $(< "$path4"/symlink/bla) = 3 ]] +[[ $(< "$path4"/self/n) = X3 ]] # Input-addressed derivations cannot depend on impure derivations directly. (! nix build -L --no-link --json --file ./impure-derivations.nix inputAddressed 2>&1) | grep 'depends on impure derivation' drvPath=$(nix eval --json --file ./impure-derivations.nix impure.drvPath | jq -r .) -[[ $(nix derivation show $drvPath | jq ".[\"$(basename "$drvPath")\"].outputs.out.impure") = true ]] -[[ $(nix derivation show $drvPath | jq ".[\"$(basename "$drvPath")\"].outputs.stuff.impure") = true ]] +[[ $(nix derivation show "$drvPath" | jq ".[\"$(basename "$drvPath")\"].outputs.out.impure") = true ]] +[[ $(nix derivation show "$drvPath" | jq ".[\"$(basename "$drvPath")\"].outputs.stuff.impure") = true ]] # Fixed-output derivations *can* depend on impure derivations. path5=$(nix build -L --no-link --json --file ./impure-derivations.nix contentAddressed | jq -r .[].outputs.out) -[[ $(< $path5) = X ]] -[[ $(< $TEST_ROOT/counter) = 5 ]] +[[ $(< "$path5") = X ]] +[[ $(< "$TEST_ROOT"/counter) = 5 ]] # And they should not be rebuilt. path5=$(nix build -L --no-link --json --file ./impure-derivations.nix contentAddressed | jq -r .[].outputs.out) -[[ $(< $path5) = X ]] -[[ $(< $TEST_ROOT/counter) = 5 ]] +[[ $(< "$path5") = X ]] +[[ $(< "$TEST_ROOT"/counter) = 5 ]] # Input-addressed derivations can depend on fixed-output derivations that depend on impure derivations. path6=$(nix build -L --no-link --json --file ./impure-derivations.nix inputAddressedAfterCA | jq -r .[].outputs.out) -[[ $(< $path6) = X ]] -[[ $(< $TEST_ROOT/counter) = 5 ]] +[[ $(< "$path6") = X ]] +[[ $(< "$TEST_ROOT"/counter) = 5 ]] # Test nix/fetchurl.nix. path7=$(nix build -L --no-link --print-out-paths --expr "import { impure = true; url = file://$PWD/impure-derivations.sh; }") -cmp $path7 $PWD/impure-derivations.sh +cmp "$path7" "$PWD"/impure-derivations.sh From 78d9a8d92b7033ffa673767183fe6936d8f3d0d0 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 09:25:29 -0700 Subject: [PATCH 161/332] shellcheck fix: tests/functional/impure-eval.sh --- maintainers/flake-module.nix | 1 - 1 file changed, 1 deletion(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 43c84d5ae..eac332920 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/impure-eval\.sh$'' ''^tests/functional/install-darwin\.sh$'' ''^tests/functional/legacy-ssh-store\.sh$'' ''^tests/functional/linux-sandbox\.sh$'' From f702101224eba2bd322d99efa7dafc09f6e47569 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 09:27:09 -0700 Subject: [PATCH 162/332] shellcheck fix: tests/functional/install-darwin.sh --- maintainers/flake-module.nix | 1 - tests/functional/install-darwin.sh | 9 +++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index eac332920..2d10cc870 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/install-darwin\.sh$'' ''^tests/functional/legacy-ssh-store\.sh$'' ''^tests/functional/linux-sandbox\.sh$'' ''^tests/functional/logging\.sh$'' diff --git a/tests/functional/install-darwin.sh b/tests/functional/install-darwin.sh index ea2b75323..0070e9dce 100755 --- a/tests/functional/install-darwin.sh +++ b/tests/functional/install-darwin.sh @@ -1,4 +1,4 @@ -#!/bin/sh +#!/bin/bash set -eux @@ -21,12 +21,13 @@ cleanup() { for file in ~/.bash_profile ~/.bash_login ~/.profile ~/.zshenv ~/.zprofile ~/.zshrc ~/.zlogin; do if [ -e "$file" ]; then + # shellcheck disable=SC2002 cat "$file" | grep -v nix-profile > "$file.next" mv "$file.next" "$file" fi done - for i in $(seq 1 $(sysctl -n hw.ncpu)); do + for i in $(seq 1 "$(sysctl -n hw.ncpu)"); do sudo /usr/bin/dscl . -delete "/Users/nixbld$i" || true done sudo /usr/bin/dscl . -delete "/Groups/nixbld" || true @@ -65,11 +66,11 @@ verify echo nix-build ./release.nix -A binaryTarball.x86_64-darwin ) | bash -l set -e - cp ./result/nix-*.tar.bz2 $scratch/nix.tar.bz2 + cp ./result/nix-*.tar.bz2 "$scratch"/nix.tar.bz2 ) ( - cd $scratch + cd "$scratch" tar -xf ./nix.tar.bz2 cd nix-* From 5341d82428744f1c2afa3f4298abb106d4261faf Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 09:27:30 -0700 Subject: [PATCH 163/332] shellcheck fix: tests/functional/legacy-ssh-store.sh --- maintainers/flake-module.nix | 1 - 1 file changed, 1 deletion(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 2d10cc870..8ef74498d 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/legacy-ssh-store\.sh$'' ''^tests/functional/linux-sandbox\.sh$'' ''^tests/functional/logging\.sh$'' ''^tests/functional/misc\.sh$'' From c4da98c8f480e90fe35df3edce95635fd60fb8e7 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 09:27:55 -0700 Subject: [PATCH 164/332] shellcheck fix: tests/functional/linux-sandbox.sh --- maintainers/flake-module.nix | 1 - tests/functional/linux-sandbox.sh | 26 +++++++++++++------------- 2 files changed, 13 insertions(+), 14 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 8ef74498d..baa240a04 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/linux-sandbox\.sh$'' ''^tests/functional/logging\.sh$'' ''^tests/functional/misc\.sh$'' ''^tests/functional/multiple-outputs\.sh$'' diff --git a/tests/functional/linux-sandbox.sh b/tests/functional/linux-sandbox.sh index abb635f11..c3ddf6ce6 100755 --- a/tests/functional/linux-sandbox.sh +++ b/tests/functional/linux-sandbox.sh @@ -19,8 +19,8 @@ if [[ ! $SHELL =~ /nix/store ]]; then skipTest "Shell is not from Nix store"; fi # An alias to automatically bind-mount the $SHELL on nix-build invocations nix-sandbox-build () { nix-build --no-out-link --sandbox-paths /nix/store "$@"; } -chmod -R u+w $TEST_ROOT/store0 || true -rm -rf $TEST_ROOT/store0 +chmod -R u+w "$TEST_ROOT"/store0 || true +rm -rf "$TEST_ROOT"/store0 export NIX_STORE_DIR=/my/store export NIX_REMOTE=$TEST_ROOT/store0 @@ -29,11 +29,11 @@ outPath=$(nix-sandbox-build dependencies.nix) [[ $outPath =~ /my/store/.*-dependencies ]] -nix path-info -r $outPath | grep input-2 +nix path-info -r "$outPath" | grep input-2 -nix store ls -R -l $outPath | grep foobar +nix store ls -R -l "$outPath" | grep foobar -nix store cat $outPath/foobar | grep FOOBAR +nix store cat "$outPath"/foobar | grep FOOBAR # Test --check without hash rewriting. nix-sandbox-build dependencies.nix --check @@ -42,9 +42,9 @@ nix-sandbox-build dependencies.nix --check nix-sandbox-build check.nix -A nondeterministic # `100 + 4` means non-determinstic, see doc/manual/source/command-ref/status-build-failure.md -expectStderr 104 nix-sandbox-build check.nix -A nondeterministic --check -K > $TEST_ROOT/log -grepQuietInverse 'error: renaming' $TEST_ROOT/log -grepQuiet 'may not be deterministic' $TEST_ROOT/log +expectStderr 104 nix-sandbox-build check.nix -A nondeterministic --check -K > "$TEST_ROOT"/log +grepQuietInverse 'error: renaming' "$TEST_ROOT"/log +grepQuiet 'may not be deterministic' "$TEST_ROOT"/log # Test that sandboxed builds cannot write to /etc easily # `100` means build failure without extra info, see doc/manual/source/command-ref/status-build-failure.md @@ -59,7 +59,7 @@ testCert () { certFile=$3 # a string that can be the path to a cert file # `100` means build failure without extra info, see doc/manual/source/command-ref/status-build-failure.md [ "$mode" == fixed-output ] && ret=1 || ret=100 - expectStderr $ret nix-sandbox-build linux-sandbox-cert-test.nix --argstr mode "$mode" --option ssl-cert-file "$certFile" | + expectStderr "$ret" nix-sandbox-build linux-sandbox-cert-test.nix --argstr mode "$mode" --option ssl-cert-file "$certFile" | grepQuiet "CERT_${expectation}_IN_SANDBOX" } @@ -68,10 +68,10 @@ cert=$TEST_ROOT/some-cert-file.pem symlinkcert=$TEST_ROOT/symlink-cert-file.pem transitivesymlinkcert=$TEST_ROOT/transitive-symlink-cert-file.pem symlinkDir=$TEST_ROOT/symlink-dir -echo -n "CERT_CONTENT" > $cert -ln -s $cert $symlinkcert -ln -s $symlinkcert $transitivesymlinkcert -ln -s $TEST_ROOT $symlinkDir +echo -n "CERT_CONTENT" > "$cert" +ln -s "$cert" "$symlinkcert" +ln -s "$symlinkcert" "$transitivesymlinkcert" +ln -s "$TEST_ROOT" "$symlinkDir" # No cert in sandbox when not a fixed-output derivation testCert missing normal "$cert" From 5a13f9fc91f993f936f4582ba12f7d30328ce15c Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 09:28:39 -0700 Subject: [PATCH 165/332] shellcheck fix: tests/functional/logging.sh --- maintainers/flake-module.nix | 1 - tests/functional/logging.sh | 10 +++++----- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index baa240a04..5f2a837f9 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/logging\.sh$'' ''^tests/functional/misc\.sh$'' ''^tests/functional/multiple-outputs\.sh$'' ''^tests/functional/nested-sandboxing\.sh$'' diff --git a/tests/functional/logging.sh b/tests/functional/logging.sh index 83df9a45d..600fce43e 100755 --- a/tests/functional/logging.sh +++ b/tests/functional/logging.sh @@ -9,14 +9,14 @@ clearStore path=$(nix-build dependencies.nix --no-out-link) # Test nix-store -l. -[ "$(nix-store -l $path)" = FOO ] +[ "$(nix-store -l "$path")" = FOO ] # Test compressed logs. clearStore -rm -rf $NIX_LOG_DIR -(! nix-store -l $path) +rm -rf "$NIX_LOG_DIR" +(! nix-store -l "$path") nix-build dependencies.nix --no-out-link --compress-build-log -[ "$(nix-store -l $path)" = FOO ] +[ "$(nix-store -l "$path")" = FOO ] # test whether empty logs work fine with `nix log`. builder="$(realpath "$(mktemp)")" @@ -40,5 +40,5 @@ if [[ "$NIX_REMOTE" != "daemon" ]]; then nix build -vv --file dependencies.nix --no-link --json-log-path "$TEST_ROOT/log.json" 2>&1 | grepQuiet 'building.*dependencies-top.drv' jq < "$TEST_ROOT/log.json" grep '{"action":"start","fields":\[".*-dependencies-top.drv","",1,1\],"id":.*,"level":3,"parent":0' "$TEST_ROOT/log.json" >&2 - (( $(grep '{"action":"msg","level":5,"msg":"executing builder .*"}' "$TEST_ROOT/log.json" | wc -l) == 5 )) + (( $(grep -c '{"action":"msg","level":5,"msg":"executing builder .*"}' "$TEST_ROOT/log.json" ) == 5 )) fi From f2eef5b0a49bef1beb5fbc7c4451676828d1c8c8 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 09:29:35 -0700 Subject: [PATCH 166/332] shellcheck fix: tests/functional/misc.sh --- maintainers/flake-module.nix | 1 - tests/functional/misc.sh | 13 +++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 5f2a837f9..ee306a4ee 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/misc\.sh$'' ''^tests/functional/multiple-outputs\.sh$'' ''^tests/functional/nested-sandboxing\.sh$'' ''^tests/functional/nested-sandboxing/command\.sh$'' diff --git a/tests/functional/misc.sh b/tests/functional/misc.sh index b94a5fc57..131b63323 100755 --- a/tests/functional/misc.sh +++ b/tests/functional/misc.sh @@ -14,6 +14,7 @@ source common.sh nix-env --version | grep -F "${_NIX_TEST_CLIENT_VERSION:-$version}" nix_env=$(type -P nix-env) +# shellcheck disable=SC2123 (PATH=""; ! $nix_env --help 2>&1 ) | grepQuiet -F "The 'man' command was not found, but it is needed for 'nix-env' and some other 'nix-*' commands' help text. Perhaps you could install the 'man' command?" # Usage errors. @@ -22,12 +23,12 @@ expect 1 nix-env -q --foo 2>&1 | grep "unknown flag" # Eval Errors. eval_arg_res=$(nix-instantiate --eval -E 'let a = {} // a; in a.foo' 2>&1 || true) -echo $eval_arg_res | grep "at «string»:1:15:" -echo $eval_arg_res | grep "infinite recursion encountered" +echo "$eval_arg_res" | grep "at «string»:1:15:" +echo "$eval_arg_res" | grep "infinite recursion encountered" eval_stdin_res=$(echo 'let a = {} // a; in a.foo' | nix-instantiate --eval -E - 2>&1 || true) -echo $eval_stdin_res | grep "at «stdin»:1:15:" -echo $eval_stdin_res | grep "infinite recursion encountered" +echo "$eval_stdin_res" | grep "at «stdin»:1:15:" +echo "$eval_stdin_res" | grep "infinite recursion encountered" # Attribute path errors expectStderr 1 nix-instantiate --eval -E '{}' -A '"x' | grepQuiet "missing closing quote in selection path" @@ -40,10 +41,10 @@ expectStderr 1 nix-instantiate --eval -E '[]' -A '1' | grepQuiet "out of range" # NOTE(cole-h): behavior is different depending on the order, which is why we test an unknown option # before and after the `'{}'`! out="$(expectStderr 0 nix-instantiate --option foobar baz --expr '{}')" -[[ "$(echo "$out" | grep foobar | wc -l)" = 1 ]] +[[ "$(echo "$out" | grep -c foobar )" = 1 ]] out="$(expectStderr 0 nix-instantiate '{}' --option foobar baz --expr )" -[[ "$(echo "$out" | grep foobar | wc -l)" = 1 ]] +[[ "$(echo "$out" | grep -c foobar )" = 1 ]] if [[ $(uname) = Linux && $(uname -m) = i686 ]]; then [[ $(nix config show system) = i686-linux ]] From e26b0c66b0ca2e44f2fcf1c389d4e27d5008ddc4 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 09:34:26 -0700 Subject: [PATCH 167/332] shellcheck fix: tests/functional/multiple-outputs.sh --- maintainers/flake-module.nix | 1 - tests/functional/multiple-outputs.sh | 68 +++++++++++++++------------- 2 files changed, 37 insertions(+), 32 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index ee306a4ee..742a9d313 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/multiple-outputs\.sh$'' ''^tests/functional/nested-sandboxing\.sh$'' ''^tests/functional/nested-sandboxing/command\.sh$'' ''^tests/functional/nix-build\.sh$'' diff --git a/tests/functional/multiple-outputs.sh b/tests/functional/multiple-outputs.sh index c4e0be15e..f703fb02b 100755 --- a/tests/functional/multiple-outputs.sh +++ b/tests/functional/multiple-outputs.sh @@ -6,15 +6,17 @@ TODO_NixOS clearStoreIfPossible -rm -f $TEST_ROOT/result* +rm -f "$TEST_ROOT"/result* # Placeholder strings are opaque, so cannot do this check for floating # content-addressing derivations. -if [[ ! -n "${NIX_TESTS_CA_BY_DEFAULT:-}" ]]; then +if [[ -z "${NIX_TESTS_CA_BY_DEFAULT:-}" ]]; then # Test whether the output names match our expectations outPath=$(nix-instantiate multiple-outputs.nix --eval -A nameCheck.out.outPath) + # shellcheck disable=SC2016 [ "$(echo "$outPath" | sed -E 's_^".*/[^-/]*-([^/]*)"$_\1_')" = "multiple-outputs-a" ] outPath=$(nix-instantiate multiple-outputs.nix --eval -A nameCheck.dev.outPath) + # shellcheck disable=SC2016 [ "$(echo "$outPath" | sed -E 's_^".*/[^-/]*-([^/]*)"$_\1_')" = "multiple-outputs-a-dev" ] fi @@ -27,16 +29,17 @@ echo "evaluating c..." # outputs. drvPath=$(nix-instantiate multiple-outputs.nix -A c) #[ "$drvPath" = "$drvPath2" ] -grepQuiet 'multiple-outputs-a.drv",\["first","second"\]' $drvPath -grepQuiet 'multiple-outputs-b.drv",\["out"\]' $drvPath +grepQuiet 'multiple-outputs-a.drv",\["first","second"\]' "$drvPath" +grepQuiet 'multiple-outputs-b.drv",\["out"\]' "$drvPath" # While we're at it, test the ‘unsafeDiscardOutputDependency’ primop. outPath=$(nix-build multiple-outputs.nix -A d --no-out-link) -drvPath=$(cat $outPath/drv) +drvPath=$(cat "$outPath"/drv) if [[ -n "${NIX_TESTS_CA_BY_DEFAULT:-}" ]]; then - expectStderr 1 nix-store -q $drvPath | grepQuiet "Cannot use output path of floating content-addressing derivation until we know what it is (e.g. by building it)" + expectStderr 1 nix-store -q "$drvPath" | grepQuiet "Cannot use output path of floating content-addressing derivation until we know what it is (e.g. by building it)" else - outPath=$(nix-store -q $drvPath) + outPath=$(nix-store -q "$drvPath") + # shellcheck disable=SC2233 (! [ -e "$outPath" ]) fi @@ -48,34 +51,37 @@ echo "output path is $outPath" [ "$(cat "$outPath/file")" = "success" ] # Test nix-build on a derivation with multiple outputs. -outPath1=$(nix-build multiple-outputs.nix -A a -o $TEST_ROOT/result) -[ -e $TEST_ROOT/result-first ] -(! [ -e $TEST_ROOT/result-second ]) -nix-build multiple-outputs.nix -A a.all -o $TEST_ROOT/result -[ "$(cat $TEST_ROOT/result-first/file)" = "first" ] -[ "$(cat $TEST_ROOT/result-second/file)" = "second" ] -[ "$(cat $TEST_ROOT/result-second/link/file)" = "first" ] -hash1=$(nix-store -q --hash $TEST_ROOT/result-second) +outPath1=$(nix-build multiple-outputs.nix -A a -o "$TEST_ROOT"/result) +[ -e "$TEST_ROOT"/result-first ] +# shellcheck disable=SC2235 +(! [ -e "$TEST_ROOT"/result-second ]) +nix-build multiple-outputs.nix -A a.all -o "$TEST_ROOT"/result +[ "$(cat "$TEST_ROOT"/result-first/file)" = "first" ] +[ "$(cat "$TEST_ROOT"/result-second/file)" = "second" ] +[ "$(cat "$TEST_ROOT"/result-second/link/file)" = "first" ] +hash1=$(nix-store -q --hash "$TEST_ROOT"/result-second) -outPath2=$(nix-build $(nix-instantiate multiple-outputs.nix -A a) --no-out-link) -[[ $outPath1 = $outPath2 ]] +outPath2=$(nix-build "$(nix-instantiate multiple-outputs.nix -A a)" --no-out-link) +[[ $outPath1 = "$outPath2" ]] -outPath2=$(nix-build $(nix-instantiate multiple-outputs.nix -A a.first) --no-out-link) -[[ $outPath1 = $outPath2 ]] +outPath2=$(nix-build "$(nix-instantiate multiple-outputs.nix -A a.first)" --no-out-link) +[[ $outPath1 = "$outPath2" ]] -outPath2=$(nix-build $(nix-instantiate multiple-outputs.nix -A a.second) --no-out-link) -[[ $(cat $outPath2/file) = second ]] +outPath2=$(nix-build "$(nix-instantiate multiple-outputs.nix -A a.second)" --no-out-link) +[[ $(cat "$outPath2"/file) = second ]] +# FIXME: Fixing this shellcheck causes the test to fail. +# shellcheck disable=SC2046 [[ $(nix-build $(nix-instantiate multiple-outputs.nix -A a.all) --no-out-link | wc -l) -eq 2 ]] -if [[ ! -n "${NIX_TESTS_CA_BY_DEFAULT:-}" ]]; then +if [[ -z "${NIX_TESTS_CA_BY_DEFAULT:-}" ]]; then # Delete one of the outputs and rebuild it. This will cause a hash # rewrite. - env -u NIX_REMOTE nix store delete $TEST_ROOT/result-second --ignore-liveness - nix-build multiple-outputs.nix -A a.all -o $TEST_ROOT/result - [ "$(cat $TEST_ROOT/result-second/file)" = "second" ] - [ "$(cat $TEST_ROOT/result-second/link/file)" = "first" ] - hash2=$(nix-store -q --hash $TEST_ROOT/result-second) + env -u NIX_REMOTE nix store delete "$TEST_ROOT"/result-second --ignore-liveness + nix-build multiple-outputs.nix -A a.all -o "$TEST_ROOT"/result + [ "$(cat "$TEST_ROOT"/result-second/file)" = "second" ] + [ "$(cat "$TEST_ROOT"/result-second/link/file)" = "first" ] + hash2=$(nix-store -q --hash "$TEST_ROOT"/result-second) [ "$hash1" = "$hash2" ] fi @@ -92,15 +98,15 @@ fi # Do a GC. This should leave an empty store. echo "collecting garbage..." -rm $TEST_ROOT/result* +rm "$TEST_ROOT"/result* nix-store --gc --keep-derivations --keep-outputs nix-store --gc --print-roots -rm -rf $NIX_STORE_DIR/.links -rmdir $NIX_STORE_DIR +rm -rf "$NIX_STORE_DIR"/.links +rmdir "$NIX_STORE_DIR" # TODO inspect why this doesn't work with floating content-addressing # derivations. -if [[ ! -n "${NIX_TESTS_CA_BY_DEFAULT:-}" ]]; then +if [[ -z "${NIX_TESTS_CA_BY_DEFAULT:-}" ]]; then expect 1 nix build -f multiple-outputs.nix invalid-output-name-1 2>&1 | grep 'contains illegal character' expect 1 nix build -f multiple-outputs.nix invalid-output-name-2 2>&1 | grep 'contains illegal character' fi From 1a71c1ef9fa1cf925e053c56c953e698f0af4dfa Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 09:43:38 -0700 Subject: [PATCH 168/332] shellcheck fix: tests/functional/nested-sandboxing.sh --- maintainers/flake-module.nix | 1 - tests/functional/nested-sandboxing.sh | 3 ++- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 742a9d313..ffb55b767 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/nested-sandboxing\.sh$'' ''^tests/functional/nested-sandboxing/command\.sh$'' ''^tests/functional/nix-build\.sh$'' ''^tests/functional/nix-channel\.sh$'' diff --git a/tests/functional/nested-sandboxing.sh b/tests/functional/nested-sandboxing.sh index 4d4cf125e..8788c7d90 100755 --- a/tests/functional/nested-sandboxing.sh +++ b/tests/functional/nested-sandboxing.sh @@ -11,7 +11,7 @@ requiresUnprivilegedUserNamespaces start="$TEST_ROOT/start" mkdir -p "$start" -cp -r common common.sh ${config_nix} ./nested-sandboxing "$start" +cp -r common common.sh "${config_nix}" ./nested-sandboxing "$start" cp "${_NIX_TEST_BUILD_DIR}/common/subst-vars.sh" "$start/common" # N.B. redefine _NIX_TEST_SOURCE_DIR="$start" @@ -20,6 +20,7 @@ cd "$start" source ./nested-sandboxing/command.sh +# shellcheck disable=SC2016 expectStderr 100 runNixBuild badStoreUrl 2 | grepQuiet '`sandbox-build-dir` must not contain' runNixBuild goodStoreUrl 5 From 794723142ba1ac70577c58fba37f0a0200945a54 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 09:44:52 -0700 Subject: [PATCH 169/332] shellcheck fix: tests/functional/nested-sandboxing/command.sh --- maintainers/flake-module.nix | 1 - tests/functional/nested-sandboxing/command.sh | 11 +++++++---- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index ffb55b767..cf13e1e80 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/nested-sandboxing/command\.sh$'' ''^tests/functional/nix-build\.sh$'' ''^tests/functional/nix-channel\.sh$'' ''^tests/functional/nix-collect-garbage-d\.sh$'' diff --git a/tests/functional/nested-sandboxing/command.sh b/tests/functional/nested-sandboxing/command.sh index 7c04e82f5..c01133d93 100644 --- a/tests/functional/nested-sandboxing/command.sh +++ b/tests/functional/nested-sandboxing/command.sh @@ -1,17 +1,20 @@ +# shellcheck shell=bash set -eu -o pipefail -export NIX_BIN_DIR=$(dirname $(type -p nix)) +NIX_BIN_DIR=$(dirname "$(type -p nix)") +export NIX_BIN_DIR # TODO Get Nix and its closure more flexibly -export EXTRA_SANDBOX="/nix/store $(dirname $NIX_BIN_DIR)" +EXTRA_SANDBOX="/nix/store $(dirname "$NIX_BIN_DIR")" +export EXTRA_SANDBOX badStoreUrl () { local altitude=$1 - echo $TEST_ROOT/store-$altitude + echo "$TEST_ROOT"/store-"$altitude" } goodStoreUrl () { local altitude=$1 - echo $("badStoreUrl" "$altitude")?store=/foo-$altitude + echo "$("badStoreUrl" "$altitude")"?store=/foo-"$altitude" } # The non-standard sandbox-build-dir helps ensure that we get the same behavior From 2bfc9019fad4cd1521bb42aa2244eb9cf6d15578 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 09:46:43 -0700 Subject: [PATCH 170/332] shellcheck fix: tests/functional/nix-build.sh --- maintainers/flake-module.nix | 1 - tests/functional/nix-build.sh | 28 +++++++++++++++------------- 2 files changed, 15 insertions(+), 14 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index cf13e1e80..a21fb214c 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/nix-build\.sh$'' ''^tests/functional/nix-channel\.sh$'' ''^tests/functional/nix-collect-garbage-d\.sh$'' ''^tests/functional/nix-copy-ssh-common\.sh$'' diff --git a/tests/functional/nix-build.sh b/tests/functional/nix-build.sh index 091e429e0..33973c628 100755 --- a/tests/functional/nix-build.sh +++ b/tests/functional/nix-build.sh @@ -6,30 +6,30 @@ TODO_NixOS clearStoreIfPossible -outPath=$(nix-build dependencies.nix -o $TEST_ROOT/result) -test "$(cat $TEST_ROOT/result/foobar)" = FOOBAR +outPath=$(nix-build dependencies.nix -o "$TEST_ROOT"/result) +test "$(cat "$TEST_ROOT"/result/foobar)" = FOOBAR # The result should be retained by a GC. echo A -target=$(readLink $TEST_ROOT/result) +target=$(readLink "$TEST_ROOT"/result) echo B -echo target is $target +echo target is "$target" nix-store --gc -test -e $target/foobar +test -e "$target"/foobar # But now it should be gone. -rm $TEST_ROOT/result +rm "$TEST_ROOT"/result nix-store --gc -if test -e $target/foobar; then false; fi +if test -e "$target"/foobar; then false; fi -outPath2=$(nix-build $(nix-instantiate dependencies.nix) --no-out-link) -[[ $outPath = $outPath2 ]] +outPath2=$(nix-build "$(nix-instantiate dependencies.nix)" --no-out-link) +[[ $outPath = "$outPath2" ]] -outPath2=$(nix-build $(nix-instantiate dependencies.nix)!out --no-out-link) -[[ $outPath = $outPath2 ]] +outPath2=$(nix-build "$(nix-instantiate dependencies.nix)"!out --no-out-link) +[[ $outPath = "$outPath2" ]] -outPath2=$(nix-store -r $(nix-instantiate --add-root $TEST_ROOT/indirect dependencies.nix)!out) -[[ $outPath = $outPath2 ]] +outPath2=$(nix-store -r "$(nix-instantiate --add-root "$TEST_ROOT"/indirect dependencies.nix)"!out) +[[ $outPath = "$outPath2" ]] # The order of the paths on stdout must correspond to the -A options # https://github.com/NixOS/nix/issues/4197 @@ -39,9 +39,11 @@ input1="$(nix-build nix-build-examples.nix -A input1 --no-out-link)" input2="$(nix-build nix-build-examples.nix -A input2 --no-out-link)" body="$(nix-build nix-build-examples.nix -A body --no-out-link)" +# shellcheck disable=SC2046,SC2005 outPathsA="$(echo $(nix-build nix-build-examples.nix -A input0 -A input1 -A input2 -A body --no-out-link))" [[ "$outPathsA" = "$input0 $input1 $input2 $body" ]] # test a different ordering to make sure it fails, not just in 23 out of 24 permutations +# shellcheck disable=SC2046,SC2005 outPathsB="$(echo $(nix-build nix-build-examples.nix -A body -A input1 -A input2 -A input0 --no-out-link))" [[ "$outPathsB" = "$body $input1 $input2 $input0" ]] From 2b1a0963f9771238d5cb985f4c91b9e3c39c3e0d Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 09:51:46 -0700 Subject: [PATCH 171/332] shellcheck fix: tests/functional/nix-channel.sh --- maintainers/flake-module.nix | 1 - tests/functional/nix-channel.sh | 52 ++++++++++++++++----------------- 2 files changed, 26 insertions(+), 27 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index a21fb214c..6b41b291d 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/nix-channel\.sh$'' ''^tests/functional/nix-collect-garbage-d\.sh$'' ''^tests/functional/nix-copy-ssh-common\.sh$'' ''^tests/functional/nix-copy-ssh-ng\.sh$'' diff --git a/tests/functional/nix-channel.sh b/tests/functional/nix-channel.sh index d0b772850..f23d4bbde 100755 --- a/tests/functional/nix-channel.sh +++ b/tests/functional/nix-channel.sh @@ -4,7 +4,7 @@ source common.sh clearProfiles -rm -f $TEST_HOME/.nix-channels $TEST_HOME/.nix-profile +rm -f "$TEST_HOME"/.nix-channels "$TEST_HOME"/.nix-profile # Test add/list/remove. nix-channel --add http://foo/bar xyzzy @@ -12,8 +12,8 @@ nix-channel --list | grepQuiet http://foo/bar nix-channel --remove xyzzy [[ $(nix-channel --list-generations | wc -l) == 1 ]] -[ -e $TEST_HOME/.nix-channels ] -[ "$(cat $TEST_HOME/.nix-channels)" = '' ] +[ -e "$TEST_HOME"/.nix-channels ] +[ "$(cat "$TEST_HOME"/.nix-channels)" = '' ] # Test the XDG Base Directories support @@ -25,47 +25,47 @@ nix-channel --remove xyzzy unset NIX_CONFIG -[ -e $TEST_HOME/.local/state/nix/channels ] -[ "$(cat $TEST_HOME/.local/state/nix/channels)" = '' ] +[ -e "$TEST_HOME"/.local/state/nix/channels ] +[ "$(cat "$TEST_HOME"/.local/state/nix/channels)" = '' ] # Create a channel. -rm -rf $TEST_ROOT/foo -mkdir -p $TEST_ROOT/foo +rm -rf "$TEST_ROOT"/foo +mkdir -p "$TEST_ROOT"/foo drvPath=$(nix-instantiate dependencies.nix) -nix copy --to file://$TEST_ROOT/foo?compression="bzip2" $(nix-store -r "$drvPath") -rm -rf $TEST_ROOT/nixexprs -mkdir -p $TEST_ROOT/nixexprs -cp "${config_nix}" dependencies.nix dependencies.builder*.sh $TEST_ROOT/nixexprs/ -ln -s dependencies.nix $TEST_ROOT/nixexprs/default.nix -(cd $TEST_ROOT && tar cvf - nixexprs) | bzip2 > $TEST_ROOT/foo/nixexprs.tar.bz2 +nix copy --to file://"$TEST_ROOT"/foo?compression="bzip2" "$(nix-store -r "$drvPath")" +rm -rf "$TEST_ROOT"/nixexprs +mkdir -p "$TEST_ROOT"/nixexprs +cp "${config_nix}" dependencies.nix dependencies.builder*.sh "$TEST_ROOT"/nixexprs/ +ln -s dependencies.nix "$TEST_ROOT"/nixexprs/default.nix +(cd "$TEST_ROOT" && tar cvf - nixexprs) | bzip2 > "$TEST_ROOT"/foo/nixexprs.tar.bz2 # Test the update action. -nix-channel --add file://$TEST_ROOT/foo +nix-channel --add file://"$TEST_ROOT"/foo nix-channel --update [[ $(nix-channel --list-generations | wc -l) == 2 ]] # Do a query. -nix-env -qa \* --meta --xml --out-path > $TEST_ROOT/meta.xml -grepQuiet 'meta.*description.*Random test package' $TEST_ROOT/meta.xml -grepQuiet 'item.*attrPath="foo".*name="dependencies-top"' $TEST_ROOT/meta.xml +nix-env -qa \* --meta --xml --out-path > "$TEST_ROOT"/meta.xml +grepQuiet 'meta.*description.*Random test package' "$TEST_ROOT"/meta.xml +grepQuiet 'item.*attrPath="foo".*name="dependencies-top"' "$TEST_ROOT"/meta.xml # Do an install. nix-env -i dependencies-top -[ -e $TEST_HOME/.nix-profile/foobar ] +[ -e "$TEST_HOME"/.nix-profile/foobar ] # Test updating from a tarball -nix-channel --add file://$TEST_ROOT/foo/nixexprs.tar.bz2 bar +nix-channel --add file://"$TEST_ROOT"/foo/nixexprs.tar.bz2 bar nix-channel --update # Do a query. -nix-env -qa \* --meta --xml --out-path > $TEST_ROOT/meta.xml -grepQuiet 'meta.*description.*Random test package' $TEST_ROOT/meta.xml -grepQuiet 'item.*attrPath="bar".*name="dependencies-top"' $TEST_ROOT/meta.xml -grepQuiet 'item.*attrPath="foo".*name="dependencies-top"' $TEST_ROOT/meta.xml +nix-env -qa \* --meta --xml --out-path > "$TEST_ROOT"/meta.xml +grepQuiet 'meta.*description.*Random test package' "$TEST_ROOT"/meta.xml +grepQuiet 'item.*attrPath="bar".*name="dependencies-top"' "$TEST_ROOT"/meta.xml +grepQuiet 'item.*attrPath="foo".*name="dependencies-top"' "$TEST_ROOT"/meta.xml # Do an install. nix-env -i dependencies-top -[ -e $TEST_HOME/.nix-profile/foobar ] +[ -e "$TEST_HOME"/.nix-profile/foobar ] # Test evaluation through a channel symlink (#9882). drvPath=$(nix-instantiate '') @@ -73,9 +73,9 @@ drvPath=$(nix-instantiate '') # Add a test for the special case behaviour of 'nixpkgs' in the # channels for root (see EvalSettings::getDefaultNixPath()). if ! isTestOnNixOS; then - nix-channel --add file://$TEST_ROOT/foo nixpkgs + nix-channel --add file://"$TEST_ROOT"/foo nixpkgs nix-channel --update - mv $TEST_HOME/.local/state/nix/profiles $TEST_ROOT/var/nix/profiles/per-user/root + mv "$TEST_HOME"/.local/state/nix/profiles "$TEST_ROOT"/var/nix/profiles/per-user/root drvPath2=$(nix-instantiate '') [[ "$drvPath" = "$drvPath2" ]] fi From 83e203fe453f1a3448b24dbb0630de1338d5e1e6 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 09:52:08 -0700 Subject: [PATCH 172/332] shellcheck fix: tests/functional/nix-collect-garbage-d.sh --- maintainers/flake-module.nix | 1 - tests/functional/nix-collect-garbage-d.sh | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 6b41b291d..492c85bb0 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/nix-collect-garbage-d\.sh$'' ''^tests/functional/nix-copy-ssh-common\.sh$'' ''^tests/functional/nix-copy-ssh-ng\.sh$'' ''^tests/functional/nix-copy-ssh\.sh$'' diff --git a/tests/functional/nix-collect-garbage-d.sh b/tests/functional/nix-collect-garbage-d.sh index 119efe629..44de90711 100755 --- a/tests/functional/nix-collect-garbage-d.sh +++ b/tests/functional/nix-collect-garbage-d.sh @@ -29,7 +29,7 @@ testCollectGarbageD # Run the same test, but forcing the profiles an arbitrary location. rm ~/.nix-profile -ln -s $TEST_ROOT/blah ~/.nix-profile +ln -s "$TEST_ROOT"/blah ~/.nix-profile testCollectGarbageD # Run the same test, but forcing the profiles at their legacy location under From c9fd721be95eb34516e78910bc7e49396c28e830 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 09:52:31 -0700 Subject: [PATCH 173/332] shellcheck fix: tests/functional/nix-copy-ssh-common.sh --- maintainers/flake-module.nix | 1 - tests/functional/nix-copy-ssh-common.sh | 15 ++++++++------- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 492c85bb0..a2c6801e9 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/nix-copy-ssh-common\.sh$'' ''^tests/functional/nix-copy-ssh-ng\.sh$'' ''^tests/functional/nix-copy-ssh\.sh$'' ''^tests/functional/nix-daemon-untrusting\.sh$'' diff --git a/tests/functional/nix-copy-ssh-common.sh b/tests/functional/nix-copy-ssh-common.sh index 5eea9612d..8154585af 100644 --- a/tests/functional/nix-copy-ssh-common.sh +++ b/tests/functional/nix-copy-ssh-common.sh @@ -1,3 +1,4 @@ +# shellcheck shell=bash proto=$1 shift (( $# == 0 )) @@ -7,7 +8,7 @@ TODO_NixOS clearStore clearCache -mkdir -p $TEST_ROOT/stores +mkdir -p "$TEST_ROOT"/stores # Create path to copy back and forth outPath=$(nix-build --no-out-link dependencies.nix) @@ -37,17 +38,17 @@ if [[ "$proto" == "ssh-ng" ]]; then args+=(--no-check-sigs) fi -[ ! -f ${remoteRoot}${outPath}/foobar ] -nix copy "${args[@]}" --to "$remoteStore" $outPath -[ -f ${remoteRoot}${outPath}/foobar ] +[ ! -f "${remoteRoot}""${outPath}"/foobar ] +nix copy "${args[@]}" --to "$remoteStore" "$outPath" +[ -f "${remoteRoot}""${outPath}"/foobar ] # Copy back from store clearStore -[ ! -f $outPath/foobar ] -nix copy --no-check-sigs --from "$remoteStore" $outPath -[ -f $outPath/foobar ] +[ ! -f "$outPath"/foobar ] +nix copy --no-check-sigs --from "$remoteStore" "$outPath" +[ -f "$outPath"/foobar ] # Check --substitute-on-destination, avoid corrupted store From ca7414cd18985f50486c42451c4f5fa1839c9695 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 09:52:52 -0700 Subject: [PATCH 174/332] shellcheck fix: tests/functional/nix-copy-ssh-ng.sh --- maintainers/flake-module.nix | 1 - tests/functional/nix-copy-ssh-ng.sh | 4 ++-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index a2c6801e9..81f384e57 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/nix-copy-ssh-ng\.sh$'' ''^tests/functional/nix-copy-ssh\.sh$'' ''^tests/functional/nix-daemon-untrusting\.sh$'' ''^tests/functional/nix-profile\.sh$'' diff --git a/tests/functional/nix-copy-ssh-ng.sh b/tests/functional/nix-copy-ssh-ng.sh index 41958c2c3..f74f3bb86 100755 --- a/tests/functional/nix-copy-ssh-ng.sh +++ b/tests/functional/nix-copy-ssh-ng.sh @@ -14,5 +14,5 @@ outPath=$(nix-build --no-out-link dependencies.nix) nix store info --store "$remoteStore" # Regression test for https://github.com/NixOS/nix/issues/6253 -nix copy --to "$remoteStore" $outPath --no-check-sigs & -nix copy --to "$remoteStore" $outPath --no-check-sigs +nix copy --to "$remoteStore" "$outPath" --no-check-sigs & +nix copy --to "$remoteStore" "$outPath" --no-check-sigs From 8c2664ed15ab12fe49d4a8c8126c79a401106880 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 09:53:17 -0700 Subject: [PATCH 175/332] shellcheck fix: tests/functional/nix-copy-ssh.sh --- maintainers/flake-module.nix | 1 - 1 file changed, 1 deletion(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 81f384e57..2741ff143 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/nix-copy-ssh\.sh$'' ''^tests/functional/nix-daemon-untrusting\.sh$'' ''^tests/functional/nix-profile\.sh$'' ''^tests/functional/nix-shell\.sh$'' From cf206ef61e25a7727e0ee493c01240f3ae29c376 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 09:53:41 -0700 Subject: [PATCH 176/332] shellcheck fix: tests/functional/nix-daemon-untrusting.sh --- maintainers/flake-module.nix | 1 - 1 file changed, 1 deletion(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 2741ff143..64d22d2ac 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/nix-daemon-untrusting\.sh$'' ''^tests/functional/nix-profile\.sh$'' ''^tests/functional/nix-shell\.sh$'' ''^tests/functional/nix_path\.sh$'' From 78833ca8d091d90b81979974679558fa3f667241 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 09:57:45 -0700 Subject: [PATCH 177/332] shellcheck fix: tests/functional/nix-profile.sh --- maintainers/flake-module.nix | 1 - tests/functional/nix-profile.sh | 145 +++++++++++++++++--------------- 2 files changed, 76 insertions(+), 70 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 64d22d2ac..5c373cdb9 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/nix-profile\.sh$'' ''^tests/functional/nix-shell\.sh$'' ''^tests/functional/nix_path\.sh$'' ''^tests/functional/optimise-store\.sh$'' diff --git a/tests/functional/nix-profile.sh b/tests/functional/nix-profile.sh index b1cfef6b0..922162d4b 100755 --- a/tests/functional/nix-profile.sh +++ b/tests/functional/nix-profile.sh @@ -12,9 +12,10 @@ restartDaemon # Make a flake. flake1Dir=$TEST_ROOT/flake1 -mkdir -p $flake1Dir +mkdir -p "$flake1Dir" -cat > $flake1Dir/flake.nix < "$flake1Dir"/flake.nix < $flake1Dir/flake.nix < $flake1Dir/who -printf 1.0 > $flake1Dir/version -printf false > $flake1Dir/ca.nix +printf World > "$flake1Dir"/who +printf 1.0 > "$flake1Dir"/version +printf false > "$flake1Dir"/ca.nix -cp "${config_nix}" $flake1Dir/ +cp "${config_nix}" "$flake1Dir"/ # Test upgrading from nix-env. nix-env -f ./user-envs.nix -i foo-1.0 nix profile list | grep -A2 'Name:.*foo' | grep 'Store paths:.*foo-1.0' -nix profile add $flake1Dir -L +nix profile add "$flake1Dir" -L nix profile list | grep -A4 'Name:.*flake1' | grep 'Locked flake URL:.*narHash' -[[ $($TEST_HOME/.nix-profile/bin/hello) = "Hello World" ]] -[ -e $TEST_HOME/.nix-profile/share/man ] -(! [ -e $TEST_HOME/.nix-profile/include ]) +[[ $("$TEST_HOME"/.nix-profile/bin/hello) = "Hello World" ]] +[ -e "$TEST_HOME"/.nix-profile/share/man ] +# shellcheck disable=SC2235 +(! [ -e "$TEST_HOME"/.nix-profile/include ]) nix profile history nix profile history | grep "packages.$system.default: ∅ -> 1.0" nix profile diff-closures | grep 'env-manifest.nix: ε → ∅' @@ -64,32 +66,32 @@ nix profile diff-closures | grep 'env-manifest.nix: ε → ∅' # Test XDG Base Directories support export NIX_CONFIG="use-xdg-base-directories = true" nix profile remove flake1 2>&1 | grep 'removed 1 packages' -nix profile add $flake1Dir -[[ $($TEST_HOME/.local/state/nix/profile/bin/hello) = "Hello World" ]] +nix profile add "$flake1Dir" +[[ $("$TEST_HOME"/.local/state/nix/profile/bin/hello) = "Hello World" ]] unset NIX_CONFIG # Test conflicting package add. -nix profile add $flake1Dir 2>&1 | grep "warning: 'flake1' is already added" +nix profile add "$flake1Dir" 2>&1 | grep "warning: 'flake1' is already added" # Test upgrading a package. -printf NixOS > $flake1Dir/who -printf 2.0 > $flake1Dir/version +printf NixOS > "$flake1Dir"/who +printf 2.0 > "$flake1Dir"/version nix profile upgrade flake1 -[[ $($TEST_HOME/.nix-profile/bin/hello) = "Hello NixOS" ]] +[[ $("$TEST_HOME"/.nix-profile/bin/hello) = "Hello NixOS" ]] nix profile history | grep "packages.$system.default: 1.0, 1.0-man -> 2.0, 2.0-man" # Test upgrading package using regular expression. -printf 2.1 > $flake1Dir/version +printf 2.1 > "$flake1Dir"/version nix profile upgrade --regex '.*' -[[ $(readlink $TEST_HOME/.nix-profile/bin/hello) =~ .*-profile-test-2\.1/bin/hello ]] +[[ $(readlink "$TEST_HOME"/.nix-profile/bin/hello) =~ .*-profile-test-2\.1/bin/hello ]] nix profile rollback # Test upgrading all packages -printf 2.2 > $flake1Dir/version +printf 2.2 > "$flake1Dir"/version nix profile upgrade --all -[[ $(readlink $TEST_HOME/.nix-profile/bin/hello) =~ .*-profile-test-2\.2/bin/hello ]] +[[ $(readlink "$TEST_HOME"/.nix-profile/bin/hello) =~ .*-profile-test-2\.2/bin/hello ]] nix profile rollback -printf 1.0 > $flake1Dir/version +printf 1.0 > "$flake1Dir"/version # Test --all exclusivity. assertStderr nix --offline profile upgrade --all foo << EOF @@ -117,98 +119,102 @@ nix profile rollback nix profile diff-closures # Test rollback. -printf World > $flake1Dir/who +printf World > "$flake1Dir"/who nix profile upgrade flake1 -printf NixOS > $flake1Dir/who +printf NixOS > "$flake1Dir"/who nix profile upgrade flake1 nix profile rollback -[[ $($TEST_HOME/.nix-profile/bin/hello) = "Hello World" ]] +[[ $("$TEST_HOME"/.nix-profile/bin/hello) = "Hello World" ]] # Test uninstall. -[ -e $TEST_HOME/.nix-profile/bin/foo ] +[ -e "$TEST_HOME"/.nix-profile/bin/foo ] +# shellcheck disable=SC2235 nix profile remove foo 2>&1 | grep 'removed 1 packages' -(! [ -e $TEST_HOME/.nix-profile/bin/foo ]) +# shellcheck disable=SC2235 +(! [ -e "$TEST_HOME"/.nix-profile/bin/foo ]) nix profile history | grep 'foo: 1.0 -> ∅' nix profile diff-closures | grep 'Version 3 -> 4' # Test installing a non-flake package. nix profile add --file ./simple.nix '' -[[ $(cat $TEST_HOME/.nix-profile/hello) = "Hello World!" ]] +[[ $(cat "$TEST_HOME"/.nix-profile/hello) = "Hello World!" ]] nix profile remove simple 2>&1 | grep 'removed 1 packages' -nix profile add $(nix-build --no-out-link ./simple.nix) -[[ $(cat $TEST_HOME/.nix-profile/hello) = "Hello World!" ]] +nix profile add "$(nix-build --no-out-link ./simple.nix)" +[[ $(cat "$TEST_HOME"/.nix-profile/hello) = "Hello World!" ]] # Test packages with same name from different sources -mkdir $TEST_ROOT/simple-too -cp ./simple.nix "${config_nix}" simple.builder.sh $TEST_ROOT/simple-too -nix profile add --file $TEST_ROOT/simple-too/simple.nix '' +mkdir "$TEST_ROOT"/simple-too +cp ./simple.nix "${config_nix}" simple.builder.sh "$TEST_ROOT"/simple-too +nix profile add --file "$TEST_ROOT"/simple-too/simple.nix '' nix profile list | grep -A4 'Name:.*simple' | grep 'Name:.*simple-1' nix profile remove simple 2>&1 | grep 'removed 1 packages' nix profile remove simple-1 2>&1 | grep 'removed 1 packages' # Test wipe-history. nix profile wipe-history -[[ $(nix profile history | grep Version | wc -l) -eq 1 ]] +[[ $(nix profile history | grep -c Version) -eq 1 ]] # Test upgrade to CA package. -printf true > $flake1Dir/ca.nix -printf 3.0 > $flake1Dir/version +printf true > "$flake1Dir"/ca.nix +printf 3.0 > "$flake1Dir"/version nix profile upgrade flake1 nix profile history | grep "packages.$system.default: 1.0, 1.0-man -> 3.0, 3.0-man" # Test new install of CA package. nix profile remove flake1 2>&1 | grep 'removed 1 packages' -printf 4.0 > $flake1Dir/version -printf Utrecht > $flake1Dir/who -nix profile add $flake1Dir -[[ $($TEST_HOME/.nix-profile/bin/hello) = "Hello Utrecht" ]] -[[ $(nix path-info --json $(realpath $TEST_HOME/.nix-profile/bin/hello) | jq -r .[].ca) =~ fixed:r:sha256: ]] +printf 4.0 > "$flake1Dir"/version +printf Utrecht > "$flake1Dir"/who +nix profile add "$flake1Dir" +[[ $("$TEST_HOME"/.nix-profile/bin/hello) = "Hello Utrecht" ]] +[[ $(nix path-info --json "$(realpath "$TEST_HOME"/.nix-profile/bin/hello)" | jq -r .[].ca) =~ fixed:r:sha256: ]] # Override the outputs. nix profile remove simple flake1 nix profile add "$flake1Dir^*" -[[ $($TEST_HOME/.nix-profile/bin/hello) = "Hello Utrecht" ]] -[ -e $TEST_HOME/.nix-profile/share/man ] -[ -e $TEST_HOME/.nix-profile/include ] +[[ $("$TEST_HOME"/.nix-profile/bin/hello) = "Hello Utrecht" ]] +[ -e "$TEST_HOME"/.nix-profile/share/man ] +[ -e "$TEST_HOME"/.nix-profile/include ] -printf Nix > $flake1Dir/who +printf Nix > "$flake1Dir"/who nix profile list nix profile upgrade flake1 -[[ $($TEST_HOME/.nix-profile/bin/hello) = "Hello Nix" ]] -[ -e $TEST_HOME/.nix-profile/share/man ] -[ -e $TEST_HOME/.nix-profile/include ] +[[ $("$TEST_HOME"/.nix-profile/bin/hello) = "Hello Nix" ]] +[ -e "$TEST_HOME"/.nix-profile/share/man ] +[ -e "$TEST_HOME"/.nix-profile/include ] nix profile remove flake1 2>&1 | grep 'removed 1 packages' nix profile add "$flake1Dir^man" -(! [ -e $TEST_HOME/.nix-profile/bin/hello ]) -[ -e $TEST_HOME/.nix-profile/share/man ] -(! [ -e $TEST_HOME/.nix-profile/include ]) +# shellcheck disable=SC2235 +(! [ -e "$TEST_HOME"/.nix-profile/bin/hello ]) +[ -e "$TEST_HOME"/.nix-profile/share/man ] +# shellcheck disable=SC2235 +(! [ -e "$TEST_HOME"/.nix-profile/include ]) # test priority nix profile remove flake1 2>&1 | grep 'removed 1 packages' # Make another flake. flake2Dir=$TEST_ROOT/flake2 -printf World > $flake1Dir/who -cp -r $flake1Dir $flake2Dir -printf World2 > $flake2Dir/who +printf World > "$flake1Dir"/who +cp -r "$flake1Dir" "$flake2Dir" +printf World2 > "$flake2Dir"/who -nix profile add $flake1Dir -[[ $($TEST_HOME/.nix-profile/bin/hello) = "Hello World" ]] -expect 1 nix profile add $flake2Dir +nix profile add "$flake1Dir" +[[ $("$TEST_HOME"/.nix-profile/bin/hello) = "Hello World" ]] +expect 1 nix profile add "$flake2Dir" diff -u <( - nix --offline profile install $flake2Dir 2>&1 1> /dev/null \ + nix --offline profile install "$flake2Dir" 2>&1 1> /dev/null \ | grep -vE "^warning: " \ | grep -vE "^error \(ignored\): " \ || true ) <(cat << EOF error: An existing package already provides the following file: - $(nix build --no-link --print-out-paths ${flake1Dir}"#default.out")/bin/hello + $(nix build --no-link --print-out-paths "${flake1Dir}""#default.out")/bin/hello This is the conflicting file from the new package: - $(nix build --no-link --print-out-paths ${flake2Dir}"#default.out")/bin/hello + $(nix build --no-link --print-out-paths "${flake2Dir}""#default.out")/bin/hello To remove the existing package: @@ -225,11 +231,11 @@ error: An existing package already provides the following file: nix profile add path:${flake2Dir}#packages.${system}.default --priority 6 EOF ) -[[ $($TEST_HOME/.nix-profile/bin/hello) = "Hello World" ]] -nix profile add $flake2Dir --priority 100 -[[ $($TEST_HOME/.nix-profile/bin/hello) = "Hello World" ]] -nix profile add $flake2Dir --priority 0 -[[ $($TEST_HOME/.nix-profile/bin/hello) = "Hello World2" ]] +[[ $("$TEST_HOME"/.nix-profile/bin/hello) = "Hello World" ]] +nix profile add "$flake2Dir" --priority 100 +[[ $("$TEST_HOME"/.nix-profile/bin/hello) = "Hello World" ]] +nix profile add "$flake2Dir" --priority 0 +[[ $("$TEST_HOME"/.nix-profile/bin/hello) = "Hello World2" ]] # nix profile add $flake1Dir --priority 100 # [[ $($TEST_HOME/.nix-profile/bin/hello) = "Hello World" ]] @@ -237,14 +243,15 @@ nix profile add $flake2Dir --priority 0 # flake references. # Regression test for https://github.com/NixOS/nix/issues/8284 clearProfiles -nix profile add $(nix build $flake1Dir --no-link --print-out-paths) +# shellcheck disable=SC2046 +nix profile add $(nix build "$flake1Dir" --no-link --print-out-paths) expect 1 nix profile add --impure --expr "(builtins.getFlake ''$flake2Dir'').packages.$system.default" # Test upgrading from profile version 2. clearProfiles -mkdir -p $TEST_ROOT/import-profile -outPath=$(nix build --no-link --print-out-paths $flake1Dir/flake.nix^out) -printf '{ "version": 2, "elements": [ { "active": true, "attrPath": "legacyPackages.x86_64-linux.hello", "originalUrl": "flake:nixpkgs", "outputs": null, "priority": 5, "storePaths": [ "%s" ], "url": "github:NixOS/nixpkgs/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" } ] }' "$outPath" > $TEST_ROOT/import-profile/manifest.json -nix build --profile $TEST_HOME/.nix-profile $(nix store add-path $TEST_ROOT/import-profile) --no-link +mkdir -p "$TEST_ROOT"/import-profile +outPath=$(nix build --no-link --print-out-paths "$flake1Dir"/flake.nix^out) +printf '{ "version": 2, "elements": [ { "active": true, "attrPath": "legacyPackages.x86_64-linux.hello", "originalUrl": "flake:nixpkgs", "outputs": null, "priority": 5, "storePaths": [ "%s" ], "url": "github:NixOS/nixpkgs/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" } ] }' "$outPath" > "$TEST_ROOT"/import-profile/manifest.json +nix build --profile "$TEST_HOME"/.nix-profile "$(nix store add-path "$TEST_ROOT"/import-profile)" --no-link nix profile list | grep -A4 'Name:.*hello' | grep "Store paths:.*$outPath" nix profile remove hello 2>&1 | grep 'removed 1 packages, kept 0 packages' From fe4e476d1339cf30aa910a954c8d0d05cd4c1c2c Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 10:02:36 -0700 Subject: [PATCH 178/332] shellcheck fix: tests/functional/nix-shell.sh --- maintainers/flake-module.nix | 1 - tests/functional/nix-shell.sh | 147 +++++++++++++++++++--------------- 2 files changed, 82 insertions(+), 66 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 5c373cdb9..24eedaa9b 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/nix-shell\.sh$'' ''^tests/functional/nix_path\.sh$'' ''^tests/functional/optimise-store\.sh$'' ''^tests/functional/output-normalization\.sh$'' diff --git a/tests/functional/nix-shell.sh b/tests/functional/nix-shell.sh index bc49333b5..cf650e2c3 100755 --- a/tests/functional/nix-shell.sh +++ b/tests/functional/nix-shell.sh @@ -16,16 +16,19 @@ export NIX_PATH=nixpkgs="$shellDotNix" export IMPURE_VAR=foo export SELECTED_IMPURE_VAR=baz +# shellcheck disable=SC2016 output=$(nix-shell --pure "$shellDotNix" -A shellDrv --run \ 'echo "$IMPURE_VAR - $VAR_FROM_STDENV_SETUP - $VAR_FROM_NIX - $TEST_inNixShell"') [ "$output" = " - foo - bar - true" ] +# shellcheck disable=SC2016 output=$(nix-shell --pure "$shellDotNix" -A shellDrv --option nix-shell-always-looks-for-shell-nix false --run \ 'echo "$IMPURE_VAR - $VAR_FROM_STDENV_SETUP - $VAR_FROM_NIX - $TEST_inNixShell"') [ "$output" = " - foo - bar - true" ] # Test --keep +# shellcheck disable=SC2016 output=$(nix-shell --pure --keep SELECTED_IMPURE_VAR "$shellDotNix" -A shellDrv --run \ 'echo "$IMPURE_VAR - $VAR_FROM_STDENV_SETUP - $VAR_FROM_NIX - $SELECTED_IMPURE_VAR"') @@ -34,6 +37,7 @@ output=$(nix-shell --pure --keep SELECTED_IMPURE_VAR "$shellDotNix" -A shellDrv # test NIX_BUILD_TOP testTmpDir=$(pwd)/nix-shell mkdir -p "$testTmpDir" +# shellcheck disable=SC2016 output=$(TMPDIR="$testTmpDir" nix-shell --pure "$shellDotNix" -A shellDrv --run 'echo $NIX_BUILD_TOP') [[ "$output" =~ ${testTmpDir}.* ]] || { echo "expected $output =~ ${testTmpDir}.*" >&2 @@ -41,105 +45,111 @@ output=$(TMPDIR="$testTmpDir" nix-shell --pure "$shellDotNix" -A shellDrv --run } # Test nix-shell on a .drv -[[ $(nix-shell --pure $(nix-instantiate "$shellDotNix" -A shellDrv) --run \ +# shellcheck disable=SC2016 +[[ $(nix-shell --pure "$(nix-instantiate "$shellDotNix" -A shellDrv)" --run \ 'echo "$IMPURE_VAR - $VAR_FROM_STDENV_SETUP - $VAR_FROM_NIX - $TEST_inNixShell"') = " - foo - bar - false" ]] - -[[ $(nix-shell --pure $(nix-instantiate "$shellDotNix" -A shellDrv) --run \ +# shellcheck disable=SC2016 +[[ $(nix-shell --pure "$(nix-instantiate "$shellDotNix" -A shellDrv)" --run \ 'echo "$IMPURE_VAR - $VAR_FROM_STDENV_SETUP - $VAR_FROM_NIX - $TEST_inNixShell"') = " - foo - bar - false" ]] # Test nix-shell on a .drv symlink # Legacy: absolute path and .drv extension required -nix-instantiate "$shellDotNix" -A shellDrv --add-root $TEST_ROOT/shell.drv -[[ $(nix-shell --pure $TEST_ROOT/shell.drv --run \ +nix-instantiate "$shellDotNix" -A shellDrv --add-root "$TEST_ROOT"/shell.drv +# shellcheck disable=SC2016 +[[ $(nix-shell --pure "$TEST_ROOT"/shell.drv --run \ 'echo "$IMPURE_VAR - $VAR_FROM_STDENV_SETUP - $VAR_FROM_NIX"') = " - foo - bar" ]] # New behaviour: just needs to resolve to a derivation in the store -nix-instantiate "$shellDotNix" -A shellDrv --add-root $TEST_ROOT/shell -[[ $(nix-shell --pure $TEST_ROOT/shell --run \ +nix-instantiate "$shellDotNix" -A shellDrv --add-root "$TEST_ROOT"/shell +# shellcheck disable=SC2016 +[[ $(nix-shell --pure "$TEST_ROOT"/shell --run \ 'echo "$IMPURE_VAR - $VAR_FROM_STDENV_SETUP - $VAR_FROM_NIX"') = " - foo - bar" ]] # Test nix-shell -p +# shellcheck disable=SC2016 output=$(NIX_PATH=nixpkgs="$shellDotNix" nix-shell --pure -p foo bar --run 'echo "$(foo) $(bar)"') [ "$output" = "foo bar" ] # Test nix-shell -p --arg x y +# shellcheck disable=SC2016 output=$(NIX_PATH=nixpkgs="$shellDotNix" nix-shell --pure -p foo --argstr fooContents baz --run 'echo "$(foo)"') [ "$output" = "baz" ] # Test nix-shell shebang mode -sed -e "s|@ENV_PROG@|$(type -P env)|" shell.shebang.sh > $TEST_ROOT/shell.shebang.sh -chmod a+rx $TEST_ROOT/shell.shebang.sh +sed -e "s|@ENV_PROG@|$(type -P env)|" shell.shebang.sh > "$TEST_ROOT"/shell.shebang.sh +chmod a+rx "$TEST_ROOT"/shell.shebang.sh -output=$($TEST_ROOT/shell.shebang.sh abc def) +output=$("$TEST_ROOT"/shell.shebang.sh abc def) [ "$output" = "foo bar abc def" ] # Test nix-shell shebang mode with an alternate working directory -sed -e "s|@ENV_PROG@|$(type -P env)|" shell.shebang.expr > $TEST_ROOT/shell.shebang.expr -chmod a+rx $TEST_ROOT/shell.shebang.expr +sed -e "s|@ENV_PROG@|$(type -P env)|" shell.shebang.expr > "$TEST_ROOT"/shell.shebang.expr +chmod a+rx "$TEST_ROOT"/shell.shebang.expr # Should fail due to expressions using relative path -! $TEST_ROOT/shell.shebang.expr bar -cp shell.nix "${config_nix}" $TEST_ROOT + "$TEST_ROOT"/shell.shebang.expr bar && exit 1 +cp shell.nix "${config_nix}" "$TEST_ROOT" # Should succeed echo "cwd: $PWD" -output=$($TEST_ROOT/shell.shebang.expr bar) +output=$("$TEST_ROOT"/shell.shebang.expr bar) [ "$output" = foo ] # Test nix-shell shebang mode with an alternate working directory -sed -e "s|@ENV_PROG@|$(type -P env)|" shell.shebang.legacy.expr > $TEST_ROOT/shell.shebang.legacy.expr -chmod a+rx $TEST_ROOT/shell.shebang.legacy.expr +sed -e "s|@ENV_PROG@|$(type -P env)|" shell.shebang.legacy.expr > "$TEST_ROOT"/shell.shebang.legacy.expr +chmod a+rx "$TEST_ROOT"/shell.shebang.legacy.expr # Should fail due to expressions using relative path mkdir -p "$TEST_ROOT/somewhere-unrelated" -output="$(cd "$TEST_ROOT/somewhere-unrelated"; $TEST_ROOT/shell.shebang.legacy.expr bar;)" +output="$(cd "$TEST_ROOT/somewhere-unrelated"; "$TEST_ROOT"/shell.shebang.legacy.expr bar;)" [[ $(realpath "$output") = $(realpath "$TEST_ROOT/somewhere-unrelated") ]] # Test nix-shell shebang mode again with metacharacters in the filename. # First word of filename is chosen to not match any file in the test root. -sed -e "s|@ENV_PROG@|$(type -P env)|" shell.shebang.sh > $TEST_ROOT/spaced\ \\\'\"shell.shebang.sh -chmod a+rx $TEST_ROOT/spaced\ \\\'\"shell.shebang.sh +sed -e "s|@ENV_PROG@|$(type -P env)|" shell.shebang.sh > "$TEST_ROOT"/spaced\ \\\'\"shell.shebang.sh +chmod a+rx "$TEST_ROOT"/spaced\ \\\'\"shell.shebang.sh -output=$($TEST_ROOT/spaced\ \\\'\"shell.shebang.sh abc def) +output=$("$TEST_ROOT"/spaced\ \\\'\"shell.shebang.sh abc def) [ "$output" = "foo bar abc def" ] # Test nix-shell shebang mode for ruby # This uses a fake interpreter that returns the arguments passed # This, in turn, verifies the `rc` script is valid and the `load()` script (given using `-e`) is as expected. -sed -e "s|@SHELL_PROG@|$(type -P nix-shell)|" shell.shebang.rb > $TEST_ROOT/shell.shebang.rb -chmod a+rx $TEST_ROOT/shell.shebang.rb +sed -e "s|@SHELL_PROG@|$(type -P nix-shell)|" shell.shebang.rb > "$TEST_ROOT"/shell.shebang.rb +chmod a+rx "$TEST_ROOT"/shell.shebang.rb -output=$($TEST_ROOT/shell.shebang.rb abc ruby) +output=$("$TEST_ROOT"/shell.shebang.rb abc ruby) [ "$output" = '-e load(ARGV.shift) -- '"$TEST_ROOT"'/shell.shebang.rb abc ruby' ] # Test nix-shell shebang mode for ruby again with metacharacters in the filename. # Note: fake interpreter only space-separates args without adding escapes to its output. -sed -e "s|@SHELL_PROG@|$(type -P nix-shell)|" shell.shebang.rb > $TEST_ROOT/spaced\ \\\'\"shell.shebang.rb -chmod a+rx $TEST_ROOT/spaced\ \\\'\"shell.shebang.rb +sed -e "s|@SHELL_PROG@|$(type -P nix-shell)|" shell.shebang.rb > "$TEST_ROOT"/spaced\ \\\'\"shell.shebang.rb +chmod a+rx "$TEST_ROOT"/spaced\ \\\'\"shell.shebang.rb -output=$($TEST_ROOT/spaced\ \\\'\"shell.shebang.rb abc ruby) +output=$("$TEST_ROOT"/spaced\ \\\'\"shell.shebang.rb abc ruby) +# shellcheck disable=SC1003 [ "$output" = '-e load(ARGV.shift) -- '"$TEST_ROOT"'/spaced \'\''"shell.shebang.rb abc ruby' ] # Test nix-shell shebang quoting -sed -e "s|@ENV_PROG@|$(type -P env)|" shell.shebang.nix > $TEST_ROOT/shell.shebang.nix -chmod a+rx $TEST_ROOT/shell.shebang.nix -$TEST_ROOT/shell.shebang.nix +sed -e "s|@ENV_PROG@|$(type -P env)|" shell.shebang.nix > "$TEST_ROOT"/shell.shebang.nix +chmod a+rx "$TEST_ROOT"/shell.shebang.nix +"$TEST_ROOT"/shell.shebang.nix -mkdir $TEST_ROOT/lookup-test $TEST_ROOT/empty +mkdir "$TEST_ROOT"/lookup-test "$TEST_ROOT"/empty -echo "import $shellDotNix" > $TEST_ROOT/lookup-test/shell.nix -cp "${config_nix}" $TEST_ROOT/lookup-test/ -echo 'abort "do not load default.nix!"' > $TEST_ROOT/lookup-test/default.nix +echo "import $shellDotNix" > "$TEST_ROOT"/lookup-test/shell.nix +cp "${config_nix}" "$TEST_ROOT"/lookup-test/ +echo 'abort "do not load default.nix!"' > "$TEST_ROOT"/lookup-test/default.nix -nix-shell $TEST_ROOT/lookup-test -A shellDrv --run 'echo "it works"' | grepQuiet "it works" +nix-shell "$TEST_ROOT"/lookup-test -A shellDrv --run 'echo "it works"' | grepQuiet "it works" # https://github.com/NixOS/nix/issues/4529 nix-shell -I "testRoot=$TEST_ROOT" '' -A shellDrv --run 'echo "it works"' | grepQuiet "it works" -expectStderr 1 nix-shell $TEST_ROOT/lookup-test -A shellDrv --run 'echo "it works"' --option nix-shell-always-looks-for-shell-nix false \ +expectStderr 1 nix-shell "$TEST_ROOT"/lookup-test -A shellDrv --run 'echo "it works"' --option nix-shell-always-looks-for-shell-nix false \ | grepQuiet -F "do not load default.nix!" # we did, because we chose to enable legacy behavior -expectStderr 1 nix-shell $TEST_ROOT/lookup-test -A shellDrv --run 'echo "it works"' --option nix-shell-always-looks-for-shell-nix false \ +expectStderr 1 nix-shell "$TEST_ROOT"/lookup-test -A shellDrv --run 'echo "it works"' --option nix-shell-always-looks-for-shell-nix false \ | grepQuiet "Skipping .*lookup-test/shell\.nix.*, because the setting .*nix-shell-always-looks-for-shell-nix.* is disabled. This is a deprecated behavior\. Consider enabling .*nix-shell-always-looks-for-shell-nix.*" ( - cd $TEST_ROOT/empty; + cd "$TEST_ROOT"/empty; expectStderr 1 nix-shell | \ grepQuiet "error.*no argument specified and no .*shell\.nix.* or .*default\.nix.* file found in the working directory" ) @@ -147,29 +157,29 @@ expectStderr 1 nix-shell $TEST_ROOT/lookup-test -A shellDrv --run 'echo "it work expectStderr 1 nix-shell -I "testRoot=$TEST_ROOT" '' | grepQuiet "error.*neither .*shell\.nix.* nor .*default\.nix.* found in .*/empty" -cat >$TEST_ROOT/lookup-test/shebangscript <"$TEST_ROOT"/lookup-test/shebangscript < $TEST_ROOT/marco/shell.nix -cat >$TEST_ROOT/marco/polo/default.nix < "$TEST_ROOT"/marco/shell.nix +cat >"$TEST_ROOT"/marco/polo/default.nix <$TEST_ROOT/issue-11892/shebangscript <"$TEST_ROOT"/issue-11892/shebangscript <$TEST_ROOT/issue-11892/shebangscript <$TEST_ROOT/issue-11892/my_package.nix <"$TEST_ROOT"/issue-11892/my_package.nix < $TEST_ROOT/dev-env.sh -nix print-dev-env -f "$shellDotNix" shellDrv --json > $TEST_ROOT/dev-env.json +nix print-dev-env -f "$shellDotNix" shellDrv > "$TEST_ROOT"/dev-env.sh +nix print-dev-env -f "$shellDotNix" shellDrv --json > "$TEST_ROOT"/dev-env.json # Test with raw drv shellDrv=$(nix-instantiate "$shellDotNix" -A shellDrv.out) -nix develop $shellDrv -c bash -c '[[ -n $stdenv ]]' +# shellcheck disable=SC2016 +nix develop "$shellDrv" -c bash -c '[[ -n $stdenv ]]' -nix print-dev-env $shellDrv > $TEST_ROOT/dev-env2.sh -nix print-dev-env $shellDrv --json > $TEST_ROOT/dev-env2.json +nix print-dev-env "$shellDrv" > "$TEST_ROOT"/dev-env2.sh +nix print-dev-env "$shellDrv" --json > "$TEST_ROOT"/dev-env2.json -diff $TEST_ROOT/dev-env{,2}.sh -diff $TEST_ROOT/dev-env{,2}.json +diff "$TEST_ROOT"/dev-env{,2}.sh +diff "$TEST_ROOT"/dev-env{,2}.json # Ensure `nix print-dev-env --json` contains variable assignments. -[[ $(jq -r .variables.arr1.value[2] $TEST_ROOT/dev-env.json) = '3 4' ]] +[[ $(jq -r .variables.arr1.value[2] "$TEST_ROOT"/dev-env.json) = '3 4' ]] # Run tests involving `source <(nix print-dev-env)` in subshells to avoid modifying the current # environment. @@ -238,27 +250,32 @@ set -u # Ensure `source <(nix print-dev-env)` modifies the environment. ( path=$PATH - source $TEST_ROOT/dev-env.sh + # shellcheck disable=SC1091 + source "$TEST_ROOT"/dev-env.sh [[ -n $stdenv ]] + # shellcheck disable=SC2154 [[ ${arr1[2]} = "3 4" ]] + # shellcheck disable=SC2154 [[ ${arr2[1]} = $'\n' ]] [[ ${arr2[2]} = $'x\ny' ]] [[ $(fun) = blabla ]] - [[ $PATH = $(jq -r .variables.PATH.value $TEST_ROOT/dev-env.json):$path ]] + [[ $PATH = $(jq -r .variables.PATH.value "$TEST_ROOT"/dev-env.json):$path ]] ) # Ensure `source <(nix print-dev-env)` handles the case when PATH is empty. ( path=$PATH + # shellcheck disable=SC2123 PATH= - source $TEST_ROOT/dev-env.sh - [[ $PATH = $(PATH=$path jq -r .variables.PATH.value $TEST_ROOT/dev-env.json) ]] + # shellcheck disable=SC1091 + source "$TEST_ROOT"/dev-env.sh + [[ $PATH = $(PATH=$path jq -r .variables.PATH.value "$TEST_ROOT"/dev-env.json) ]] ) # Test nix-shell with ellipsis and no `inNixShell` argument (for backwards compat with old nixpkgs) -cat >$TEST_ROOT/shell-ellipsis.nix <"$TEST_ROOT"/shell-ellipsis.nix < Date: Mon, 29 Sep 2025 10:03:10 -0700 Subject: [PATCH 179/332] shellcheck fix: tests/functional/nix_path.sh --- maintainers/flake-module.nix | 1 - tests/functional/nix_path.sh | 35 ++++++++++++++++++----------------- 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 24eedaa9b..f783f0261 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/nix_path\.sh$'' ''^tests/functional/optimise-store\.sh$'' ''^tests/functional/output-normalization\.sh$'' ''^tests/functional/parallel\.builder\.sh$'' diff --git a/tests/functional/nix_path.sh b/tests/functional/nix_path.sh index 90cba1f0c..24ddcdd01 100755 --- a/tests/functional/nix_path.sh +++ b/tests/functional/nix_path.sh @@ -34,12 +34,13 @@ nix-instantiate --eval -E '' --restrict-eval unset NIX_PATH -mkdir -p $TEST_ROOT/{from-nix-path-file,from-NIX_PATH,from-nix-path,from-extra-nix-path,from-I} +mkdir -p "$TEST_ROOT"/{from-nix-path-file,from-NIX_PATH,from-nix-path,from-extra-nix-path,from-I} for i in from-nix-path-file from-NIX_PATH from-nix-path from-extra-nix-path from-I; do - touch $TEST_ROOT/$i/only-$i.nix + touch "$TEST_ROOT"/$i/only-$i.nix done # finding something that's not in any of the default paths fails +# shellcheck disable=SC2091 ( ! $(nix-instantiate --find-file test) ) echo "nix-path = test=$TEST_ROOT/from-nix-path-file" >> "$test_nix_conf" @@ -53,36 +54,36 @@ echo "nix-path = test=$TEST_ROOT/from-nix-path-file" >> "$test_nix_conf" (! NIX_PATH=test=$TEST_ROOT nix-instantiate --find-file test/only-from-nix-path-file.nix) # -I extends nix.conf -[[ $(nix-instantiate -I test=$TEST_ROOT/from-I --find-file test/only-from-I.nix) = $TEST_ROOT/from-I/only-from-I.nix ]] +[[ $(nix-instantiate -I test="$TEST_ROOT"/from-I --find-file test/only-from-I.nix) = $TEST_ROOT/from-I/only-from-I.nix ]] # if -I does not have the desired entry, the value from nix.conf is used -[[ $(nix-instantiate -I test=$TEST_ROOT/from-I --find-file test/only-from-nix-path-file.nix) = $TEST_ROOT/from-nix-path-file/only-from-nix-path-file.nix ]] +[[ $(nix-instantiate -I test="$TEST_ROOT"/from-I --find-file test/only-from-nix-path-file.nix) = $TEST_ROOT/from-nix-path-file/only-from-nix-path-file.nix ]] # -I extends NIX_PATH -[[ $(NIX_PATH=test=$TEST_ROOT/from-NIX_PATH nix-instantiate -I test=$TEST_ROOT/from-I --find-file test/only-from-I.nix) = $TEST_ROOT/from-I/only-from-I.nix ]] +[[ $(NIX_PATH=test=$TEST_ROOT/from-NIX_PATH nix-instantiate -I test="$TEST_ROOT"/from-I --find-file test/only-from-I.nix) = $TEST_ROOT/from-I/only-from-I.nix ]] # -I takes precedence over NIX_PATH -[[ $(NIX_PATH=test=$TEST_ROOT/from-NIX_PATH nix-instantiate -I test=$TEST_ROOT/from-I --find-file test) = $TEST_ROOT/from-I ]] +[[ $(NIX_PATH=test=$TEST_ROOT/from-NIX_PATH nix-instantiate -I test="$TEST_ROOT"/from-I --find-file test) = $TEST_ROOT/from-I ]] # if -I does not have the desired entry, the value from NIX_PATH is used -[[ $(NIX_PATH=test=$TEST_ROOT/from-NIX_PATH nix-instantiate -I test=$TEST_ROOT/from-I --find-file test/only-from-NIX_PATH.nix) = $TEST_ROOT/from-NIX_PATH/only-from-NIX_PATH.nix ]] +[[ $(NIX_PATH=test=$TEST_ROOT/from-NIX_PATH nix-instantiate -I test="$TEST_ROOT"/from-I --find-file test/only-from-NIX_PATH.nix) = $TEST_ROOT/from-NIX_PATH/only-from-NIX_PATH.nix ]] # --extra-nix-path extends NIX_PATH -[[ $(NIX_PATH=test=$TEST_ROOT/from-NIX_PATH nix-instantiate --extra-nix-path test=$TEST_ROOT/from-extra-nix-path --find-file test/only-from-extra-nix-path.nix) = $TEST_ROOT/from-extra-nix-path/only-from-extra-nix-path.nix ]] +[[ $(NIX_PATH=test=$TEST_ROOT/from-NIX_PATH nix-instantiate --extra-nix-path test="$TEST_ROOT"/from-extra-nix-path --find-file test/only-from-extra-nix-path.nix) = $TEST_ROOT/from-extra-nix-path/only-from-extra-nix-path.nix ]] # if --extra-nix-path does not have the desired entry, the value from NIX_PATH is used -[[ $(NIX_PATH=test=$TEST_ROOT/from-NIX_PATH nix-instantiate --extra-nix-path test=$TEST_ROOT/from-extra-nix-path --find-file test/only-from-NIX_PATH.nix) = $TEST_ROOT/from-NIX_PATH/only-from-NIX_PATH.nix ]] +[[ $(NIX_PATH=test=$TEST_ROOT/from-NIX_PATH nix-instantiate --extra-nix-path test="$TEST_ROOT"/from-extra-nix-path --find-file test/only-from-NIX_PATH.nix) = $TEST_ROOT/from-NIX_PATH/only-from-NIX_PATH.nix ]] # --nix-path overrides NIX_PATH -[[ $(NIX_PATH=test=$TEST_ROOT/from-NIX_PATH nix-instantiate --nix-path test=$TEST_ROOT/from-nix-path --find-file test) = $TEST_ROOT/from-nix-path ]] +[[ $(NIX_PATH=test=$TEST_ROOT/from-NIX_PATH nix-instantiate --nix-path test="$TEST_ROOT"/from-nix-path --find-file test) = $TEST_ROOT/from-nix-path ]] # if --nix-path does not have the desired entry, it fails -(! NIX_PATH=test=$TEST_ROOT/from-NIX_PATH nix-instantiate --nix-path test=$TEST_ROOT/from-nix-path --find-file test/only-from-NIX_PATH.nix) +(! NIX_PATH=test=$TEST_ROOT/from-NIX_PATH nix-instantiate --nix-path test="$TEST_ROOT"/from-nix-path --find-file test/only-from-NIX_PATH.nix) # --nix-path overrides nix.conf -[[ $(nix-instantiate --nix-path test=$TEST_ROOT/from-nix-path --find-file test) = $TEST_ROOT/from-nix-path ]] -(! nix-instantiate --nix-path test=$TEST_ROOT/from-nix-path --find-file test/only-from-nix-path-file.nix) +[[ $(nix-instantiate --nix-path test="$TEST_ROOT"/from-nix-path --find-file test) = $TEST_ROOT/from-nix-path ]] +(! nix-instantiate --nix-path test="$TEST_ROOT"/from-nix-path --find-file test/only-from-nix-path-file.nix) # --extra-nix-path extends nix.conf -[[ $(nix-instantiate --extra-nix-path test=$TEST_ROOT/from-extra-nix-path --find-file test/only-from-extra-nix-path.nix) = $TEST_ROOT/from-extra-nix-path/only-from-extra-nix-path.nix ]] +[[ $(nix-instantiate --extra-nix-path test="$TEST_ROOT"/from-extra-nix-path --find-file test/only-from-extra-nix-path.nix) = $TEST_ROOT/from-extra-nix-path/only-from-extra-nix-path.nix ]] # if --extra-nix-path does not have the desired entry, it is taken from nix.conf -[[ $(nix-instantiate --extra-nix-path test=$TEST_ROOT/from-extra-nix-path --find-file test) = $TEST_ROOT/from-nix-path-file ]] +[[ $(nix-instantiate --extra-nix-path test="$TEST_ROOT"/from-extra-nix-path --find-file test) = $TEST_ROOT/from-nix-path-file ]] # -I extends --nix-path -[[ $(nix-instantiate --nix-path test=$TEST_ROOT/from-nix-path -I test=$TEST_ROOT/from-I --find-file test/only-from-I.nix) = $TEST_ROOT/from-I/only-from-I.nix ]] -[[ $(nix-instantiate --nix-path test=$TEST_ROOT/from-nix-path -I test=$TEST_ROOT/from-I --find-file test/only-from-nix-path.nix) = $TEST_ROOT/from-nix-path/only-from-nix-path.nix ]] +[[ $(nix-instantiate --nix-path test="$TEST_ROOT"/from-nix-path -I test="$TEST_ROOT"/from-I --find-file test/only-from-I.nix) = $TEST_ROOT/from-I/only-from-I.nix ]] +[[ $(nix-instantiate --nix-path test="$TEST_ROOT"/from-nix-path -I test="$TEST_ROOT"/from-I --find-file test/only-from-nix-path.nix) = $TEST_ROOT/from-nix-path/only-from-nix-path.nix ]] From 32818483a52750cac727e2f5b53ae16f46fc14d2 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 10:03:50 -0700 Subject: [PATCH 180/332] shellcheck fix: tests/functional/optimise-store.sh --- maintainers/flake-module.nix | 1 - tests/functional/optimise-store.sh | 17 ++++++++++------- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index f783f0261..5b743e61d 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/optimise-store\.sh$'' ''^tests/functional/output-normalization\.sh$'' ''^tests/functional/parallel\.builder\.sh$'' ''^tests/functional/parallel\.sh$'' diff --git a/tests/functional/optimise-store.sh b/tests/functional/optimise-store.sh index 05c4c41e4..332a308c2 100755 --- a/tests/functional/optimise-store.sh +++ b/tests/functional/optimise-store.sh @@ -4,28 +4,31 @@ source common.sh clearStoreIfPossible +# shellcheck disable=SC2016 outPath1=$(echo 'with import '"${config_nix}"'; mkDerivation { name = "foo1"; builder = builtins.toFile "builder" "mkdir $out; echo hello > $out/foo"; }' | nix-build - --no-out-link --auto-optimise-store) +# shellcheck disable=SC2016 outPath2=$(echo 'with import '"${config_nix}"'; mkDerivation { name = "foo2"; builder = builtins.toFile "builder" "mkdir $out; echo hello > $out/foo"; }' | nix-build - --no-out-link --auto-optimise-store) TODO_NixOS # ignoring the client-specified setting 'auto-optimise-store', because it is a restricted setting and you are not a trusted user # TODO: only continue when trusted user or root -inode1="$(stat --format=%i $outPath1/foo)" -inode2="$(stat --format=%i $outPath2/foo)" +inode1="$(stat --format=%i "$outPath1"/foo)" +inode2="$(stat --format=%i "$outPath2"/foo)" if [ "$inode1" != "$inode2" ]; then echo "inodes do not match" exit 1 fi -nlink="$(stat --format=%h $outPath1/foo)" +nlink="$(stat --format=%h "$outPath1"/foo)" if [ "$nlink" != 3 ]; then echo "link count incorrect" exit 1 fi +# shellcheck disable=SC2016 outPath3=$(echo 'with import '"${config_nix}"'; mkDerivation { name = "foo3"; builder = builtins.toFile "builder" "mkdir $out; echo hello > $out/foo"; }' | nix-build - --no-out-link) -inode3="$(stat --format=%i $outPath3/foo)" +inode3="$(stat --format=%i "$outPath3"/foo)" if [ "$inode1" = "$inode3" ]; then echo "inodes match unexpectedly" exit 1 @@ -34,8 +37,8 @@ fi # XXX: This should work through the daemon too NIX_REMOTE="" nix-store --optimise -inode1="$(stat --format=%i $outPath1/foo)" -inode3="$(stat --format=%i $outPath3/foo)" +inode1="$(stat --format=%i "$outPath1"/foo)" +inode3="$(stat --format=%i "$outPath3"/foo)" if [ "$inode1" != "$inode3" ]; then echo "inodes do not match" exit 1 @@ -43,7 +46,7 @@ fi nix-store --gc -if [ -n "$(ls $NIX_STORE_DIR/.links)" ]; then +if [ -n "$(ls "$NIX_STORE_DIR"/.links)" ]; then echo ".links directory not empty after GC" exit 1 fi From c09cf33a3ac25291a4e4c095ee3e898f57187445 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 10:04:14 -0700 Subject: [PATCH 181/332] shellcheck fix: tests/functional/output-normalization.sh --- maintainers/flake-module.nix | 1 - tests/functional/output-normalization.sh | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 5b743e61d..db232f179 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/output-normalization\.sh$'' ''^tests/functional/parallel\.builder\.sh$'' ''^tests/functional/parallel\.sh$'' ''^tests/functional/pass-as-file\.sh$'' diff --git a/tests/functional/output-normalization.sh b/tests/functional/output-normalization.sh index c55f1b1d1..bd1668db9 100755 --- a/tests/functional/output-normalization.sh +++ b/tests/functional/output-normalization.sh @@ -6,7 +6,7 @@ testNormalization () { TODO_NixOS clearStore outPath=$(nix-build ./simple.nix --no-out-link) - test "$(stat -c %Y $outPath)" -eq 1 + test "$(stat -c %Y "$outPath")" -eq 1 } testNormalization From 4dc5dbaba270e6122b94986f4dc82d028e448c1f Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 10:05:33 -0700 Subject: [PATCH 182/332] shellcheck fix: tests/functional/parallel.builder.sh --- maintainers/flake-module.nix | 1 - tests/functional/parallel.builder.sh | 30 +++++++++++++++------------- 2 files changed, 16 insertions(+), 15 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index db232f179..59adb8fdb 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/parallel\.builder\.sh$'' ''^tests/functional/parallel\.sh$'' ''^tests/functional/pass-as-file\.sh$'' ''^tests/functional/path-from-hash-part\.sh$'' diff --git a/tests/functional/parallel.builder.sh b/tests/functional/parallel.builder.sh index d092bc5a6..436246571 100644 --- a/tests/functional/parallel.builder.sh +++ b/tests/functional/parallel.builder.sh @@ -1,29 +1,31 @@ +# shellcheck shell=bash +# shellcheck disable=SC2154 echo "DOING $text" # increase counter -while ! ln -s x $shared.lock 2> /dev/null; do +while ! ln -s x "$shared".lock 2> /dev/null; do sleep 1 done -test -f $shared.cur || echo 0 > $shared.cur -test -f $shared.max || echo 0 > $shared.max -new=$(($(cat $shared.cur) + 1)) -if test $new -gt $(cat $shared.max); then - echo $new > $shared.max +test -f "$shared".cur || echo 0 > "$shared".cur +test -f "$shared".max || echo 0 > "$shared".max +new=$(($(cat "$shared".cur) + 1)) +if test $new -gt "$(cat "$shared".max)"; then + echo $new > "$shared".max fi -echo $new > $shared.cur -rm $shared.lock +echo $new > "$shared".cur +rm "$shared".lock -echo -n $(cat $inputs)$text > $out +echo -n "$(cat "$inputs")""$text" > "$out" -sleep $sleepTime +sleep "$sleepTime" # decrease counter -while ! ln -s x $shared.lock 2> /dev/null; do +while ! ln -s x "$shared".lock 2> /dev/null; do sleep 1 done -test -f $shared.cur || echo 0 > $shared.cur -echo $(($(cat $shared.cur) - 1)) > $shared.cur -rm $shared.lock +test -f "$shared".cur || echo 0 > "$shared".cur +echo $(($(cat "$shared".cur) - 1)) > "$shared".cur +rm "$shared".lock From ef17baf50d262c40a0761b39f1da6d24e0add375 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Mon, 29 Sep 2025 10:05:59 -0700 Subject: [PATCH 183/332] shellcheck fix: tests/functional/parallel.sh --- maintainers/flake-module.nix | 1 - tests/functional/parallel.sh | 13 +++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 59adb8fdb..0a15c2362 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/parallel\.sh$'' ''^tests/functional/pass-as-file\.sh$'' ''^tests/functional/path-from-hash-part\.sh$'' ''^tests/functional/path-info\.sh$'' diff --git a/tests/functional/parallel.sh b/tests/functional/parallel.sh index 7e420688d..4d0bf0f1b 100644 --- a/tests/functional/parallel.sh +++ b/tests/functional/parallel.sh @@ -1,3 +1,4 @@ +# shellcheck shell=bash source common.sh @@ -8,7 +9,7 @@ TODO_NixOS clearStore -rm -f $_NIX_TEST_SHARED.cur $_NIX_TEST_SHARED.max +rm -f "$_NIX_TEST_SHARED".cur "$_NIX_TEST_SHARED".max outPath=$(nix-build -j10000 parallel.nix --no-out-link) @@ -17,8 +18,8 @@ echo "output path is $outPath" text=$(cat "$outPath") if test "$text" != "abacade"; then exit 1; fi -if test "$(cat $_NIX_TEST_SHARED.cur)" != 0; then fail "wrong current process count"; fi -if test "$(cat $_NIX_TEST_SHARED.max)" != 3; then fail "not enough parallelism"; fi +if test "$(cat "$_NIX_TEST_SHARED".cur)" != 0; then fail "wrong current process count"; fi +if test "$(cat "$_NIX_TEST_SHARED".max)" != 3; then fail "not enough parallelism"; fi # Second, test that parallel invocations of nix-build perform builds @@ -27,7 +28,7 @@ echo "testing multiple nix-build -j1..." clearStore -rm -f $_NIX_TEST_SHARED.cur $_NIX_TEST_SHARED.max +rm -f "$_NIX_TEST_SHARED".cur "$_NIX_TEST_SHARED".max drvPath=$(nix-instantiate parallel.nix --argstr sleepTime 15) @@ -54,5 +55,5 @@ wait $pid2 || fail "instance 2 failed: $?" wait $pid3 || fail "instance 3 failed: $?" wait $pid4 || fail "instance 4 failed: $?" -if test "$(cat $_NIX_TEST_SHARED.cur)" != 0; then fail "wrong current process count"; fi -if test "$(cat $_NIX_TEST_SHARED.max)" != 3; then fail "not enough parallelism"; fi +if test "$(cat "$_NIX_TEST_SHARED".cur)" != 0; then fail "wrong current process count"; fi +if test "$(cat "$_NIX_TEST_SHARED".max)" != 3; then fail "not enough parallelism"; fi From 1830f5f967c1726d07104fb9b65e8ae84aac287c Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Mon, 29 Sep 2025 23:16:28 +0300 Subject: [PATCH 184/332] libutil: Create empty directory at the root for makeEmptySourceAccessor This is my SNAFU. Accidentally broken in 02c9ac445ff527a7b4c5105d20d9ab401117dcee. There's very dubious behavior for 'builtins.readDir /.': { outputs = { ... }: { lib.a = builtins.readDir /.; }; } nix eval /tmp/test-flake#lib.a Starting from 2.27 this now returns an empty set. This really isn't supposed to happen, but this change in the semantics of makeEmptySourceAccessor accidentally changed the behavior of this. --- src/libutil/memory-source-accessor.cc | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/src/libutil/memory-source-accessor.cc b/src/libutil/memory-source-accessor.cc index caff5b56a..a9ffb7746 100644 --- a/src/libutil/memory-source-accessor.cc +++ b/src/libutil/memory-source-accessor.cc @@ -208,11 +208,16 @@ void MemorySink::createSymlink(const CanonPath & path, const std::string & targe ref makeEmptySourceAccessor() { - static auto empty = make_ref().cast(); - /* Don't forget to clear the display prefix, as the default constructed - SourceAccessor has the «unknown» prefix. Since this accessor is supposed - to mimic an empty root directory the prefix needs to be empty. */ - empty->setPathDisplay(""); + static auto empty = []() { + auto empty = make_ref(); + MemorySink sink{*empty}; + sink.createDirectory(CanonPath::root); + /* Don't forget to clear the display prefix, as the default constructed + SourceAccessor has the «unknown» prefix. Since this accessor is supposed + to mimic an empty root directory the prefix needs to be empty. */ + empty->setPathDisplay(""); + return empty.cast(); + }(); return empty; } From f70b0b599c75e05c42c2be4f85167fd8f4805e0e Mon Sep 17 00:00:00 2001 From: Taeer Bar-Yam Date: Sun, 28 Sep 2025 11:02:54 -0400 Subject: [PATCH 185/332] libexpr: allocate ExprPath strings in the allocator --- src/libexpr/include/nix/expr/nixexpr.hh | 10 ++++++---- src/libexpr/nixexpr.cc | 2 +- src/libexpr/parser.y | 6 +++--- 3 files changed, 10 insertions(+), 8 deletions(-) diff --git a/src/libexpr/include/nix/expr/nixexpr.hh b/src/libexpr/include/nix/expr/nixexpr.hh index 747a8e4b2..2af6039cd 100644 --- a/src/libexpr/include/nix/expr/nixexpr.hh +++ b/src/libexpr/include/nix/expr/nixexpr.hh @@ -212,14 +212,16 @@ struct ExprString : Expr struct ExprPath : Expr { ref accessor; - std::string s; Value v; - ExprPath(ref accessor, std::string s) + ExprPath(std::pmr::polymorphic_allocator & alloc, ref accessor, std::string_view sv) : accessor(accessor) - , s(std::move(s)) { - v.mkPath(&*accessor, this->s.c_str()); + auto len = sv.length(); + char * s = alloc.allocate(len + 1); + sv.copy(s, len); + s[len] = '\0'; + v.mkPath(&*accessor, s); } Value * maybeThunk(EvalState & state, Env & env) override; diff --git a/src/libexpr/nixexpr.cc b/src/libexpr/nixexpr.cc index a2980af6b..014b85f20 100644 --- a/src/libexpr/nixexpr.cc +++ b/src/libexpr/nixexpr.cc @@ -45,7 +45,7 @@ void ExprString::show(const SymbolTable & symbols, std::ostream & str) const void ExprPath::show(const SymbolTable & symbols, std::ostream & str) const { - str << s; + str << v.pathStr(); } void ExprVar::show(const SymbolTable & symbols, std::ostream & str) const diff --git a/src/libexpr/parser.y b/src/libexpr/parser.y index 7dabd6b56..bc1eb056e 100644 --- a/src/libexpr/parser.y +++ b/src/libexpr/parser.y @@ -392,8 +392,8 @@ path_start root filesystem accessor, rather than the accessor of the current Nix expression. */ literal.front() == '/' - ? new ExprPath(state->rootFS, std::move(path)) - : new ExprPath(state->basePath.accessor, std::move(path)); + ? new ExprPath(state->alloc, state->rootFS, path) + : new ExprPath(state->alloc, state->basePath.accessor, path); } | HPATH { if (state->settings.pureEval) { @@ -403,7 +403,7 @@ path_start ); } Path path(getHome() + std::string($1.p + 1, $1.l - 1)); - $$ = new ExprPath(ref(state->rootFS), std::move(path)); + $$ = new ExprPath(state->alloc, ref(state->rootFS), path); } ; From 689fa81dc9fb3a8368a4f1b7b8d18f5b1ce8526b Mon Sep 17 00:00:00 2001 From: Bernardo Meurer Costa Date: Mon, 29 Sep 2025 21:31:46 +0000 Subject: [PATCH 186/332] feat(libstore/http-binary-cache-store): narinfo/ls/log compression --- src/libstore/http-binary-cache-store.cc | 22 ++++++++++++++++++- .../include/nix/store/binary-cache-store.hh | 15 +++++++++++++ 2 files changed, 36 insertions(+), 1 deletion(-) diff --git a/src/libstore/http-binary-cache-store.cc b/src/libstore/http-binary-cache-store.cc index 6922c0f69..5d4fba163 100644 --- a/src/libstore/http-binary-cache-store.cc +++ b/src/libstore/http-binary-cache-store.cc @@ -4,6 +4,7 @@ #include "nix/store/nar-info-disk-cache.hh" #include "nix/util/callback.hh" #include "nix/store/store-registration.hh" +#include "nix/util/compression.hh" namespace nix { @@ -142,8 +143,27 @@ protected: const std::string & mimeType) override { auto req = makeRequest(path); - req.data = StreamToSourceAdapter(istream).drain(); + + auto data = StreamToSourceAdapter(istream).drain(); + + // Determine compression method based on file type + std::string compressionMethod; + if (hasSuffix(path, ".narinfo")) + compressionMethod = config->narinfoCompression; + else if (hasSuffix(path, ".ls")) + compressionMethod = config->lsCompression; + else if (hasPrefix(path, "log/")) + compressionMethod = config->logCompression; + + // Apply compression if configured + if (!compressionMethod.empty()) { + data = compress(compressionMethod, data); + req.headers.emplace_back("Content-Encoding", compressionMethod); + } + + req.data = std::move(data); req.mimeType = mimeType; + try { getFileTransfer()->upload(req); } catch (FileTransferError & e) { diff --git a/src/libstore/include/nix/store/binary-cache-store.hh b/src/libstore/include/nix/store/binary-cache-store.hh index c316b1199..3a2c90022 100644 --- a/src/libstore/include/nix/store/binary-cache-store.hh +++ b/src/libstore/include/nix/store/binary-cache-store.hh @@ -59,6 +59,21 @@ struct BinaryCacheStoreConfig : virtual StoreConfig The meaning and accepted values depend on the compression method selected. `-1` specifies that the default compression level should be used. )"}; + + const Setting narinfoCompression{ + this, "", "narinfo-compression", "Compression method for `.narinfo` files."}; + + const Setting lsCompression{this, "", "ls-compression", "Compression method for `.ls` files."}; + + const Setting logCompression{ + this, + "", + "log-compression", + R"( + Compression method for `log/*` files. It is recommended to + use a compression method supported by most web browsers + (e.g. `brotli`). + )"}; }; /** From d5402b8527a87a887b516d5cdf630acb54ecbcb5 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Mon, 29 Sep 2025 16:35:59 -0400 Subject: [PATCH 187/332] Encapsulate `curlFileTransfer::State:quit` It is allowed to read it, and to set it to `false`, but not to set it to `true`. --- src/libstore/filetransfer.cc | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/src/libstore/filetransfer.cc b/src/libstore/filetransfer.cc index a162df1ad..72153dfdd 100644 --- a/src/libstore/filetransfer.cc +++ b/src/libstore/filetransfer.cc @@ -594,10 +594,21 @@ struct curlFileTransfer : public FileTransfer } }; - bool quit = false; std:: priority_queue, std::vector>, EmbargoComparator> incoming; + private: + bool quitting = false; + public: + void quit() + { + quitting = true; + } + + bool isQuitting() + { + return quitting; + } }; Sync state_; @@ -649,7 +660,7 @@ struct curlFileTransfer : public FileTransfer /* Signal the worker thread to exit. */ { auto state(state_.lock()); - state->quit = true; + state->quit(); } #ifndef _WIN32 // TODO need graceful async exit support on Windows? writeFull(wakeupPipe.writeSide.get(), " ", false); @@ -750,7 +761,7 @@ struct curlFileTransfer : public FileTransfer break; } } - quit = state->quit; + quit = state->isQuitting(); } for (auto & item : incoming) { @@ -778,7 +789,7 @@ struct curlFileTransfer : public FileTransfer auto state(state_.lock()); while (!state->incoming.empty()) state->incoming.pop(); - state->quit = true; + state->quit(); } } @@ -789,7 +800,7 @@ struct curlFileTransfer : public FileTransfer { auto state(state_.lock()); - if (state->quit) + if (state->isQuitting()) throw nix::Error("cannot enqueue download request because the download thread is shutting down"); state->incoming.push(item); } @@ -845,7 +856,7 @@ ref getFileTransfer() { static ref fileTransfer = makeCurlFileTransfer(); - if (fileTransfer->state_.lock()->quit) + if (fileTransfer->state_.lock()->isQuitting()) fileTransfer = makeCurlFileTransfer(); return fileTransfer; From 1f65b08d947d9ab7eb397eebe49609963e003641 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Mon, 29 Sep 2025 16:37:12 -0400 Subject: [PATCH 188/332] `curlFileTransfer::State:quit` emptys the queue Whoever first calls `quit` now empties the queue, instead of waiting for the worker thread to do it. (Note that in the unwinding case, the worker thread is still the first to call `quit`, though.) --- src/libstore/filetransfer.cc | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/libstore/filetransfer.cc b/src/libstore/filetransfer.cc index 72153dfdd..f8f5b48e0 100644 --- a/src/libstore/filetransfer.cc +++ b/src/libstore/filetransfer.cc @@ -603,6 +603,9 @@ struct curlFileTransfer : public FileTransfer void quit() { quitting = true; + /* We wil not be processing any more incomming requests */ + while (!incoming.empty()) + incoming.pop(); } bool isQuitting() @@ -787,8 +790,6 @@ struct curlFileTransfer : public FileTransfer { auto state(state_.lock()); - while (!state->incoming.empty()) - state->incoming.pop(); state->quit(); } } From 86fb5b24a9cb528d87cb02efb89483353a4b6c44 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Mon, 29 Sep 2025 16:43:45 -0400 Subject: [PATCH 189/332] `curlFileTransfer::workerThreadEntry` Only call `quit` if we need to. --- src/libstore/filetransfer.cc | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/libstore/filetransfer.cc b/src/libstore/filetransfer.cc index f8f5b48e0..59fc75ed0 100644 --- a/src/libstore/filetransfer.cc +++ b/src/libstore/filetransfer.cc @@ -781,14 +781,18 @@ struct curlFileTransfer : public FileTransfer void workerThreadEntry() { + // Unwinding or because someone called `quit`. + bool normalExit = true; try { workerThreadMain(); } catch (nix::Interrupted & e) { + normalExit = false; } catch (std::exception & e) { printError("unexpected error in download thread: %s", e.what()); + normalExit = false; } - { + if (!normalExit) { auto state(state_.lock()); state->quit(); } From a8670e8a7da337e230ecd31bc81a040af208f9d0 Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Tue, 30 Sep 2025 03:16:35 +0300 Subject: [PATCH 190/332] libexpr-tests: Add unit tests for broken readDir /. for pure eval A very unfortunate interaction of current filtering with pure eval is that the following actually leads to `lib.a = {}`. This just adds a unit test for this broken behavior. This is really good to be done as a unit test via the in-memory store. { outputs = { ... }: { lib.a = builtins.readDir /.; }; } --- .../include/nix/expr/tests/libexpr.hh | 13 ++++++- src/libexpr-tests/eval.cc | 38 +++++++++++++++++++ .../include/nix/store/tests/libstore.hh | 13 +++---- 3 files changed, 55 insertions(+), 9 deletions(-) diff --git a/src/libexpr-test-support/include/nix/expr/tests/libexpr.hh b/src/libexpr-test-support/include/nix/expr/tests/libexpr.hh index 4cf985e15..a1320e14a 100644 --- a/src/libexpr-test-support/include/nix/expr/tests/libexpr.hh +++ b/src/libexpr-test-support/include/nix/expr/tests/libexpr.hh @@ -26,11 +26,20 @@ public: } protected: - LibExprTest() + LibExprTest(ref store, auto && makeEvalSettings) : LibStoreTest() + , evalSettings(makeEvalSettings(readOnlyMode)) , state({}, store, fetchSettings, evalSettings, nullptr) { - evalSettings.nixPath = {}; + } + + LibExprTest() + : LibExprTest(openStore("dummy://"), [](bool & readOnlyMode) { + EvalSettings settings{readOnlyMode}; + settings.nixPath = {}; + return settings; + }) + { } Value eval(std::string input, bool forceValue = true) diff --git a/src/libexpr-tests/eval.cc b/src/libexpr-tests/eval.cc index ad70ea5b8..7562a9da2 100644 --- a/src/libexpr-tests/eval.cc +++ b/src/libexpr-tests/eval.cc @@ -3,6 +3,7 @@ #include "nix/expr/eval.hh" #include "nix/expr/tests/libexpr.hh" +#include "nix/util/memory-source-accessor.hh" namespace nix { @@ -174,4 +175,41 @@ TEST_F(EvalStateTest, getBuiltin_fail) ASSERT_THROW(state.getBuiltin("nonexistent"), EvalError); } +class PureEvalTest : public LibExprTest +{ +public: + PureEvalTest() + : LibExprTest(openStore("dummy://", {{"read-only", "false"}}), [](bool & readOnlyMode) { + EvalSettings settings{readOnlyMode}; + settings.pureEval = true; + settings.restrictEval = true; + return settings; + }) + { + } +}; + +TEST_F(PureEvalTest, pathExists) +{ + ASSERT_THAT(eval("builtins.pathExists /."), IsFalse()); + ASSERT_THAT(eval("builtins.pathExists /nix"), IsFalse()); + ASSERT_THAT(eval("builtins.pathExists /nix/store"), IsFalse()); + + { + std::string contents = "Lorem ipsum"; + + StringSource s{contents}; + auto path = state.store->addToStoreFromDump( + s, "source", FileSerialisationMethod::Flat, ContentAddressMethod::Raw::Text, HashAlgorithm::SHA256); + auto printed = store->printStorePath(path); + + ASSERT_THROW(eval(fmt("builtins.readFile %s", printed)), RestrictedPathError); + ASSERT_THAT(eval(fmt("builtins.pathExists %s", printed)), IsFalse()); + + ASSERT_THROW(eval("builtins.readDir /."), RestrictedPathError); + state.allowPath(path); // FIXME: This shouldn't behave this way. + ASSERT_THAT(eval("builtins.readDir /."), IsAttrsOfSize(0)); + } +} + } // namespace nix diff --git a/src/libstore-test-support/include/nix/store/tests/libstore.hh b/src/libstore-test-support/include/nix/store/tests/libstore.hh index 28b29fa31..d79b55312 100644 --- a/src/libstore-test-support/include/nix/store/tests/libstore.hh +++ b/src/libstore-test-support/include/nix/store/tests/libstore.hh @@ -19,14 +19,13 @@ public: } protected: + LibStoreTest(ref store) + : store(std::move(store)) + { + } + LibStoreTest() - : store(openStore({ - .variant = - StoreReference::Specified{ - .scheme = "dummy", - }, - .params = {}, - })) + : LibStoreTest(openStore("dummy://")) { } From 3fcd33079cc8100d44d9252307c3390b0765db69 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rg=20Thalheim?= Date: Tue, 30 Sep 2025 10:32:33 +0200 Subject: [PATCH 191/332] add http binary cache test for compression options --- tests/nixos/content-encoding.nix | 190 ++++++++++++++++++++++++++ tests/nixos/default.nix | 2 +- tests/nixos/gzip-content-encoding.nix | 74 ---------- 3 files changed, 191 insertions(+), 75 deletions(-) create mode 100644 tests/nixos/content-encoding.nix delete mode 100644 tests/nixos/gzip-content-encoding.nix diff --git a/tests/nixos/content-encoding.nix b/tests/nixos/content-encoding.nix new file mode 100644 index 000000000..debee377b --- /dev/null +++ b/tests/nixos/content-encoding.nix @@ -0,0 +1,190 @@ +# Test content encoding support in Nix: +# 1. Fetching compressed files from servers with Content-Encoding headers +# (e.g., fetching a zstd archive from a server using gzip Content-Encoding +# should preserve the zstd format, not double-decompress) +# 2. HTTP binary cache store upload/download with compression support + +{ lib, config, ... }: + +let + pkgs = config.nodes.machine.nixpkgs.pkgs; + + ztdCompressedFile = pkgs.stdenv.mkDerivation { + name = "dummy-zstd-compressed-archive"; + dontUnpack = true; + nativeBuildInputs = with pkgs; [ zstd ]; + buildPhase = '' + mkdir archive + for _ in {1..100}; do echo "lorem" > archive/file1; done + for _ in {1..100}; do echo "ipsum" > archive/file2; done + tar --zstd -cf archive.tar.zst archive + ''; + installPhase = '' + install -Dm 644 -T archive.tar.zst $out/share/archive + ''; + }; + + # Bare derivation for testing binary cache with logs + testDrv = builtins.toFile "test.nix" '' + derivation { + name = "test-package"; + builder = "/bin/sh"; + args = [ "-c" "echo 'Building test package...' >&2; echo 'hello from test package' > $out; echo 'Build complete!' >&2" ]; + system = builtins.currentSystem; + } + ''; +in + +{ + name = "content-encoding"; + + nodes = { + machine = + { pkgs, ... }: + { + networking.firewall.allowedTCPPorts = [ 80 ]; + + services.nginx.enable = true; + services.nginx.virtualHosts."localhost" = { + root = "${ztdCompressedFile}/share/"; + # Make sure that nginx really tries to compress the + # file on the fly with no regard to size/mime. + # http://nginx.org/en/docs/http/ngx_http_gzip_module.html + extraConfig = '' + gzip on; + gzip_types *; + gzip_proxied any; + gzip_min_length 0; + ''; + + # Upload endpoint with WebDAV + locations."/cache-upload" = { + root = "/var/lib/nginx-cache"; + extraConfig = '' + client_body_temp_path /var/lib/nginx-cache/tmp; + create_full_put_path on; + dav_methods PUT DELETE; + dav_access user:rw group:rw all:r; + + # Don't try to compress already compressed files + gzip off; + + # Rewrite to remove -upload suffix when writing files + rewrite ^/cache-upload/(.*)$ /cache/$1 break; + ''; + }; + + # Download endpoint with Content-Encoding headers + locations."/cache" = { + root = "/var/lib/nginx-cache"; + extraConfig = '' + gzip off; + + # Serve .narinfo files with gzip encoding + location ~ \.narinfo$ { + add_header Content-Encoding gzip; + default_type "text/x-nix-narinfo"; + } + + # Serve .ls files with gzip encoding + location ~ \.ls$ { + add_header Content-Encoding gzip; + default_type "application/json"; + } + + # Serve log files with brotli encoding + location ~ ^/cache/log/ { + add_header Content-Encoding br; + default_type "text/plain"; + } + ''; + }; + }; + + systemd.services.nginx = { + serviceConfig = { + StateDirectory = "nginx-cache"; + StateDirectoryMode = "0755"; + }; + }; + + environment.systemPackages = with pkgs; [ + file + gzip + brotli + curl + ]; + + virtualisation.writableStore = true; + nix.settings.substituters = lib.mkForce [ ]; + nix.settings.experimental-features = [ + "nix-command" + "flakes" + ]; + }; + }; + + # Check that when nix-prefetch-url is used with a zst tarball it does not get decompressed. + # Also test HTTP binary cache store with compression support. + testScript = '' + # fmt: off + start_all() + + machine.wait_for_unit("nginx.service") + + # Original test: zstd archive with gzip content-encoding + # Make sure that the file is properly compressed as the test would be meaningless otherwise + curl_output = machine.succeed("curl --compressed -v http://localhost/archive 2>&1") + assert "content-encoding: gzip" in curl_output.lower(), f"Expected 'content-encoding: gzip' in curl output, but got: {curl_output}" + + archive_path = machine.succeed("nix-prefetch-url http://localhost/archive --print-path | tail -n1").strip() + mime_type = machine.succeed(f"file --brief --mime-type {archive_path}").strip() + assert mime_type == "application/zstd", f"Expected archive to be 'application/zstd', but got: {mime_type}" + machine.succeed(f"tar --zstd -xf {archive_path}") + + # Test HTTP binary cache store with compression + outPath = machine.succeed(""" + nix build --store /var/lib/build-store -f ${testDrv} --print-out-paths --print-build-logs + """).strip() + + drvPath = machine.succeed(f""" + nix path-info --store /var/lib/build-store --derivation {outPath} + """).strip() + + # Upload to cache with compression (use cache-upload endpoint) + machine.succeed(f""" + nix copy --store /var/lib/build-store --to 'http://localhost/cache-upload?narinfo-compression=gzip&ls-compression=gzip&write-nar-listing=1' {outPath} -vvvvv 2>&1 | tail -100 + """) + machine.succeed(f""" + nix store copy-log --store /var/lib/build-store --to 'http://localhost/cache-upload?log-compression=br' {drvPath} -vvvvv 2>&1 | tail -100 + """) + + # List cache contents + print(machine.succeed("find /var/lib/nginx-cache -type f")) + + narinfoHash = outPath.split('/')[3].split('-')[0] + drvName = drvPath.split('/')[3] + + # Verify compression + machine.succeed(f"gzip -t /var/lib/nginx-cache/cache/{narinfoHash}.narinfo") + machine.succeed(f"gzip -t /var/lib/nginx-cache/cache/{narinfoHash}.ls") + machine.succeed(f"brotli -t /var/lib/nginx-cache/cache/log/{drvName}") + + # Check Content-Encoding headers on the download endpoint + narinfo_headers = machine.succeed(f"curl -I http://localhost/cache/{narinfoHash}.narinfo 2>&1") + assert "content-encoding: gzip" in narinfo_headers.lower(), f"Expected 'content-encoding: gzip' for .narinfo file, but headers were: {narinfo_headers}" + + ls_headers = machine.succeed(f"curl -I http://localhost/cache/{narinfoHash}.ls 2>&1") + assert "content-encoding: gzip" in ls_headers.lower(), f"Expected 'content-encoding: gzip' for .ls file, but headers were: {ls_headers}" + + log_headers = machine.succeed(f"curl -I http://localhost/cache/log/{drvName} 2>&1") + assert "content-encoding: br" in log_headers.lower(), f"Expected 'content-encoding: br' for log file, but headers were: {log_headers}" + + # Test fetching from cache + machine.succeed(f"nix copy --from 'http://localhost/cache' --no-check-sigs {outPath}") + + # Test log retrieval + log_output = machine.succeed(f"nix log --store 'http://localhost/cache' {drvPath} 2>&1") + assert "Building test package" in log_output, f"Expected 'Building test package' in log output, but got: {log_output}" + ''; +} diff --git a/tests/nixos/default.nix b/tests/nixos/default.nix index 2031e02a4..5a1e08528 100644 --- a/tests/nixos/default.nix +++ b/tests/nixos/default.nix @@ -187,7 +187,7 @@ in ca-fd-leak = runNixOSTest ./ca-fd-leak; - gzip-content-encoding = runNixOSTest ./gzip-content-encoding.nix; + content-encoding = runNixOSTest ./content-encoding.nix; functional_user = runNixOSTest ./functional/as-user.nix; diff --git a/tests/nixos/gzip-content-encoding.nix b/tests/nixos/gzip-content-encoding.nix deleted file mode 100644 index 22d196c61..000000000 --- a/tests/nixos/gzip-content-encoding.nix +++ /dev/null @@ -1,74 +0,0 @@ -# Test that compressed files fetched from server with compressed responses -# do not get excessively decompressed. -# E.g. fetching a zstd compressed tarball from a server, -# which compresses the response with `Content-Encoding: gzip`. -# The expected result is that the fetched file is a zstd archive. - -{ lib, config, ... }: - -let - pkgs = config.nodes.machine.nixpkgs.pkgs; - - ztdCompressedFile = pkgs.stdenv.mkDerivation { - name = "dummy-zstd-compressed-archive"; - dontUnpack = true; - nativeBuildInputs = with pkgs; [ zstd ]; - buildPhase = '' - mkdir archive - for _ in {1..100}; do echo "lorem" > archive/file1; done - for _ in {1..100}; do echo "ipsum" > archive/file2; done - tar --zstd -cf archive.tar.zst archive - ''; - installPhase = '' - install -Dm 644 -T archive.tar.zst $out/share/archive - ''; - }; - - fileCmd = "${pkgs.file}/bin/file"; -in - -{ - name = "gzip-content-encoding"; - - nodes = { - machine = - { config, pkgs, ... }: - { - networking.firewall.allowedTCPPorts = [ 80 ]; - - services.nginx.enable = true; - services.nginx.virtualHosts."localhost" = { - root = "${ztdCompressedFile}/share/"; - # Make sure that nginx really tries to compress the - # file on the fly with no regard to size/mime. - # http://nginx.org/en/docs/http/ngx_http_gzip_module.html - extraConfig = '' - gzip on; - gzip_types *; - gzip_proxied any; - gzip_min_length 0; - ''; - }; - virtualisation.writableStore = true; - virtualisation.additionalPaths = with pkgs; [ file ]; - nix.settings.substituters = lib.mkForce [ ]; - }; - }; - - # Check that when nix-prefetch-url is used with a zst tarball it does not get decompressed. - testScript = - { nodes }: - '' - # fmt: off - start_all() - - machine.wait_for_unit("nginx.service") - machine.succeed(""" - # Make sure that the file is properly compressed as the test would be meaningless otherwise - curl --compressed -v http://localhost/archive |& tr -s ' ' |& grep --ignore-case 'content-encoding: gzip' - archive_path=$(nix-prefetch-url http://localhost/archive --print-path | tail -n1) - [[ $(${fileCmd} --brief --mime-type $archive_path) == "application/zstd" ]] - tar --zstd -xf $archive_path - """) - ''; -} From 6e6f88ac4557109fddab5d46a225199ca763f226 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rg=20Thalheim?= Date: Tue, 30 Sep 2025 11:05:20 +0200 Subject: [PATCH 192/332] add changelog for http binary cache compression --- .../rl-next/http-binary-cache-compression.md | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) create mode 100644 doc/manual/rl-next/http-binary-cache-compression.md diff --git a/doc/manual/rl-next/http-binary-cache-compression.md b/doc/manual/rl-next/http-binary-cache-compression.md new file mode 100644 index 000000000..88f1de6d9 --- /dev/null +++ b/doc/manual/rl-next/http-binary-cache-compression.md @@ -0,0 +1,19 @@ +--- +synopsis: "HTTP binary caches now support transparent compression for metadata" +prs: [] +--- + +HTTP binary cache stores can now compress `.narinfo`, `.ls`, and build log files before uploading them, +reducing bandwidth usage and storage requirements. The compression is applied transparently using the +`Content-Encoding` header, allowing compatible clients to automatically decompress the files. + +Three new configuration options control this behavior: +- `narinfo-compression`: Compression method for `.narinfo` files +- `ls-compression`: Compression method for `.ls` files +- `log-compression`: Compression method for build logs in `log/` directory + +Example usage: +``` +nix copy --to 'http://cache.example.com?narinfo-compression=gzip&ls-compression=gzip' /nix/store/... +nix store copy-log --to 'http://cache.example.com?log-compression=br' /nix/store/... +``` From 8f4a739d0fa05e44589d578f1860b45b8a48f1cc Mon Sep 17 00:00:00 2001 From: John Ericson Date: Thu, 18 Sep 2025 15:54:43 -0400 Subject: [PATCH 193/332] Split out `DerivationResolutionGoal` This prepares the way for fixing a few issues. --- .../build/derivation-building-goal.cc | 135 ++--------- .../build/derivation-resolution-goal.cc | 210 ++++++++++++++++++ src/libstore/build/worker.cc | 9 + .../store/build/derivation-building-goal.hh | 2 +- .../store/build/derivation-resolution-goal.hh | 82 +++++++ .../include/nix/store/build/worker.hh | 10 +- src/libstore/include/nix/store/meson.build | 1 + src/libstore/meson.build | 1 + tests/functional/build.sh | 9 +- 9 files changed, 334 insertions(+), 125 deletions(-) create mode 100644 src/libstore/build/derivation-resolution-goal.cc create mode 100644 src/libstore/include/nix/store/build/derivation-resolution-goal.hh diff --git a/src/libstore/build/derivation-building-goal.cc b/src/libstore/build/derivation-building-goal.cc index 001816ca0..bf7f332c7 100644 --- a/src/libstore/build/derivation-building-goal.cc +++ b/src/libstore/build/derivation-building-goal.cc @@ -1,4 +1,5 @@ #include "nix/store/build/derivation-building-goal.hh" +#include "nix/store/build/derivation-resolution-goal.hh" #include "nix/store/build/derivation-env-desugar.hh" #include "nix/store/build/derivation-trampoline-goal.hh" #ifndef _WIN32 // TODO enable build hook on Windows @@ -129,46 +130,6 @@ Goal::Co DerivationBuildingGoal::gaveUpOnSubstitution() { Goals waitees; - std::map, GoalPtr, value_comparison> inputGoals; - - { - std::function, const DerivedPathMap::ChildNode &)> - addWaiteeDerivedPath; - - addWaiteeDerivedPath = [&](ref inputDrv, - const DerivedPathMap::ChildNode & inputNode) { - if (!inputNode.value.empty()) { - auto g = worker.makeGoal( - DerivedPath::Built{ - .drvPath = inputDrv, - .outputs = inputNode.value, - }, - buildMode == bmRepair ? bmRepair : bmNormal); - inputGoals.insert_or_assign(inputDrv, g); - waitees.insert(std::move(g)); - } - for (const auto & [outputName, childNode] : inputNode.childMap) - addWaiteeDerivedPath( - make_ref(SingleDerivedPath::Built{inputDrv, outputName}), childNode); - }; - - for (const auto & [inputDrvPath, inputNode] : drv->inputDrvs.map) { - /* Ensure that pure, non-fixed-output derivations don't - depend on impure derivations. */ - if (experimentalFeatureSettings.isEnabled(Xp::ImpureDerivations) && !drv->type().isImpure() - && !drv->type().isFixed()) { - auto inputDrv = worker.evalStore.readDerivation(inputDrvPath); - if (inputDrv.type().isImpure()) - throw Error( - "pure derivation '%s' depends on impure derivation '%s'", - worker.store.printStorePath(drvPath), - worker.store.printStorePath(inputDrvPath)); - } - - addWaiteeDerivedPath(makeConstantStorePathRef(inputDrvPath), inputNode); - } - } - /* Copy the input sources from the eval store to the build store. @@ -213,88 +174,22 @@ Goal::Co DerivationBuildingGoal::gaveUpOnSubstitution() /* Determine the full set of input paths. */ - /* First, the input derivations. */ { - auto & fullDrv = *drv; + auto resolutionGoal = worker.makeDerivationResolutionGoal(drvPath, *drv, buildMode); + { + Goals waitees{resolutionGoal}; + co_await await(std::move(waitees)); + } + if (nrFailed != 0) { + co_return doneFailure({BuildResult::Failure::DependencyFailed, "resolution failed"}); + } - auto drvType = fullDrv.type(); - bool resolveDrv = - std::visit( - overloaded{ - [&](const DerivationType::InputAddressed & ia) { - /* must resolve if deferred. */ - return ia.deferred; - }, - [&](const DerivationType::ContentAddressed & ca) { - return !fullDrv.inputDrvs.map.empty() - && (ca.fixed - /* Can optionally resolve if fixed, which is good - for avoiding unnecessary rebuilds. */ - ? experimentalFeatureSettings.isEnabled(Xp::CaDerivations) - /* Must resolve if floating and there are any inputs - drvs. */ - : true); - }, - [&](const DerivationType::Impure &) { return true; }}, - drvType.raw) - /* no inputs are outputs of dynamic derivations */ - || std::ranges::any_of(fullDrv.inputDrvs.map.begin(), fullDrv.inputDrvs.map.end(), [](auto & pair) { - return !pair.second.childMap.empty(); - }); + if (resolutionGoal->resolvedDrv) { + auto & [pathResolved, drvResolved] = *resolutionGoal->resolvedDrv; - if (resolveDrv && !fullDrv.inputDrvs.map.empty()) { - experimentalFeatureSettings.require(Xp::CaDerivations); - - /* We are be able to resolve this derivation based on the - now-known results of dependencies. If so, we become a - stub goal aliasing that resolved derivation goal. */ - std::optional attempt = fullDrv.tryResolve( - worker.store, - [&](ref drvPath, const std::string & outputName) -> std::optional { - auto mEntry = get(inputGoals, drvPath); - if (!mEntry) - return std::nullopt; - - auto & buildResult = (*mEntry)->buildResult; - return std::visit( - overloaded{ - [](const BuildResult::Failure &) -> std::optional { return std::nullopt; }, - [&](const BuildResult::Success & success) -> std::optional { - auto i = get(success.builtOutputs, outputName); - if (!i) - return std::nullopt; - - return i->outPath; - }, - }, - buildResult.inner); - }); - if (!attempt) { - /* TODO (impure derivations-induced tech debt) (see below): - The above attempt should have found it, but because we manage - inputDrvOutputs statefully, sometimes it gets out of sync with - the real source of truth (store). So we query the store - directly if there's a problem. */ - attempt = fullDrv.tryResolve(worker.store, &worker.evalStore); - } - assert(attempt); - Derivation drvResolved{std::move(*attempt)}; - - auto pathResolved = writeDerivation(worker.store, drvResolved); - - auto msg = - fmt("resolved derivation: '%s' -> '%s'", - worker.store.printStorePath(drvPath), - worker.store.printStorePath(pathResolved)); - act = std::make_unique( - *logger, - lvlInfo, - actBuildWaiting, - msg, - Logger::Fields{ - worker.store.printStorePath(drvPath), - worker.store.printStorePath(pathResolved), - }); + /* Store the resolved derivation, as part of the record of + what we're actually building */ + writeDerivation(worker.store, drvResolved); /* TODO https://github.com/NixOS/nix/issues/13247 we should let the calling goal do this, so it has a change to pass @@ -383,7 +278,7 @@ Goal::Co DerivationBuildingGoal::gaveUpOnSubstitution() /* If we get this far, we know no dynamic drvs inputs */ - for (auto & [depDrvPath, depNode] : fullDrv.inputDrvs.map) { + for (auto & [depDrvPath, depNode] : drv->inputDrvs.map) { for (auto & outputName : depNode.value) { /* Don't need to worry about `inputGoals`, because impure derivations are always resolved above. Can diff --git a/src/libstore/build/derivation-resolution-goal.cc b/src/libstore/build/derivation-resolution-goal.cc new file mode 100644 index 000000000..584169ef3 --- /dev/null +++ b/src/libstore/build/derivation-resolution-goal.cc @@ -0,0 +1,210 @@ +#include "nix/store/build/derivation-resolution-goal.hh" +#include "nix/store/build/derivation-env-desugar.hh" +#include "nix/store/build/worker.hh" +#include "nix/util/util.hh" +#include "nix/store/common-protocol.hh" +#include "nix/store/globals.hh" + +#include +#include +#include + +#include + +namespace nix { + +DerivationResolutionGoal::DerivationResolutionGoal( + const StorePath & drvPath, const Derivation & drv_, Worker & worker, BuildMode buildMode) + : Goal(worker, resolveDerivation()) + , drvPath(drvPath) +{ + drv = std::make_unique(drv_); + + name = fmt("building of '%s' from in-memory derivation", worker.store.printStorePath(drvPath)); + trace("created"); + + /* Prevent the .chroot directory from being + garbage-collected. (See isActiveTempFile() in gc.cc.) */ + worker.store.addTempRoot(this->drvPath); +} + +void DerivationResolutionGoal::timedOut(Error && ex) {} + +std::string DerivationResolutionGoal::key() +{ + /* Ensure that derivations get built in order of their name, + i.e. a derivation named "aardvark" always comes before + "baboon". And substitution goals always happen before + derivation goals (due to "bd$"). */ + return "rd$" + std::string(drvPath.name()) + "$" + worker.store.printStorePath(drvPath); +} + +/** + * Used for `inputGoals` local variable below + */ +struct value_comparison +{ + template + bool operator()(const ref & lhs, const ref & rhs) const + { + return *lhs < *rhs; + } +}; + +/* At least one of the output paths could not be + produced using a substitute. So we have to build instead. */ +Goal::Co DerivationResolutionGoal::resolveDerivation() +{ + Goals waitees; + + std::map, GoalPtr, value_comparison> inputGoals; + + { + std::function, const DerivedPathMap::ChildNode &)> + addWaiteeDerivedPath; + + addWaiteeDerivedPath = [&](ref inputDrv, + const DerivedPathMap::ChildNode & inputNode) { + if (!inputNode.value.empty()) { + auto g = worker.makeGoal( + DerivedPath::Built{ + .drvPath = inputDrv, + .outputs = inputNode.value, + }, + buildMode == bmRepair ? bmRepair : bmNormal); + inputGoals.insert_or_assign(inputDrv, g); + waitees.insert(std::move(g)); + } + for (const auto & [outputName, childNode] : inputNode.childMap) + addWaiteeDerivedPath( + make_ref(SingleDerivedPath::Built{inputDrv, outputName}), childNode); + }; + + for (const auto & [inputDrvPath, inputNode] : drv->inputDrvs.map) { + /* Ensure that pure, non-fixed-output derivations don't + depend on impure derivations. */ + if (experimentalFeatureSettings.isEnabled(Xp::ImpureDerivations) && !drv->type().isImpure() + && !drv->type().isFixed()) { + auto inputDrv = worker.evalStore.readDerivation(inputDrvPath); + if (inputDrv.type().isImpure()) + throw Error( + "pure derivation '%s' depends on impure derivation '%s'", + worker.store.printStorePath(drvPath), + worker.store.printStorePath(inputDrvPath)); + } + + addWaiteeDerivedPath(makeConstantStorePathRef(inputDrvPath), inputNode); + } + } + + co_await await(std::move(waitees)); + + trace("all inputs realised"); + + if (nrFailed != 0) { + auto msg = + fmt("Cannot build '%s'.\n" + "Reason: " ANSI_RED "%d %s failed" ANSI_NORMAL ".", + Magenta(worker.store.printStorePath(drvPath)), + nrFailed, + nrFailed == 1 ? "dependency" : "dependencies"); + msg += showKnownOutputs(worker.store, *drv); + co_return amDone(ecFailed, {BuildError(BuildResult::Failure::DependencyFailed, msg)}); + } + + /* Gather information necessary for computing the closure and/or + running the build hook. */ + + /* Determine the full set of input paths. */ + + /* First, the input derivations. */ + { + auto & fullDrv = *drv; + + auto drvType = fullDrv.type(); + bool resolveDrv = + std::visit( + overloaded{ + [&](const DerivationType::InputAddressed & ia) { + /* must resolve if deferred. */ + return ia.deferred; + }, + [&](const DerivationType::ContentAddressed & ca) { + return !fullDrv.inputDrvs.map.empty() + && (ca.fixed + /* Can optionally resolve if fixed, which is good + for avoiding unnecessary rebuilds. */ + ? experimentalFeatureSettings.isEnabled(Xp::CaDerivations) + /* Must resolve if floating and there are any inputs + drvs. */ + : true); + }, + [&](const DerivationType::Impure &) { return true; }}, + drvType.raw) + /* no inputs are outputs of dynamic derivations */ + || std::ranges::any_of(fullDrv.inputDrvs.map.begin(), fullDrv.inputDrvs.map.end(), [](auto & pair) { + return !pair.second.childMap.empty(); + }); + + if (resolveDrv && !fullDrv.inputDrvs.map.empty()) { + experimentalFeatureSettings.require(Xp::CaDerivations); + + /* We are be able to resolve this derivation based on the + now-known results of dependencies. If so, we become a + stub goal aliasing that resolved derivation goal. */ + std::optional attempt = fullDrv.tryResolve( + worker.store, + [&](ref drvPath, const std::string & outputName) -> std::optional { + auto mEntry = get(inputGoals, drvPath); + if (!mEntry) + return std::nullopt; + + auto & buildResult = (*mEntry)->buildResult; + return std::visit( + overloaded{ + [](const BuildResult::Failure &) -> std::optional { return std::nullopt; }, + [&](const BuildResult::Success & success) -> std::optional { + auto i = get(success.builtOutputs, outputName); + if (!i) + return std::nullopt; + + return i->outPath; + }, + }, + buildResult.inner); + }); + if (!attempt) { + /* TODO (impure derivations-induced tech debt) (see below): + The above attempt should have found it, but because we manage + inputDrvOutputs statefully, sometimes it gets out of sync with + the real source of truth (store). So we query the store + directly if there's a problem. */ + attempt = fullDrv.tryResolve(worker.store, &worker.evalStore); + } + assert(attempt); + + auto pathResolved = writeDerivation(worker.store, *attempt, NoRepair, /*readOnly =*/true); + + auto msg = + fmt("resolved derivation: '%s' -> '%s'", + worker.store.printStorePath(drvPath), + worker.store.printStorePath(pathResolved)); + act = std::make_unique( + *logger, + lvlInfo, + actBuildWaiting, + msg, + Logger::Fields{ + worker.store.printStorePath(drvPath), + worker.store.printStorePath(pathResolved), + }); + + resolvedDrv = + std::make_unique>(std::move(pathResolved), *std::move(attempt)); + } + } + + co_return amDone(ecSuccess, std::nullopt); +} + +} // namespace nix diff --git a/src/libstore/build/worker.cc b/src/libstore/build/worker.cc index 3e6e0bef0..f597abb63 100644 --- a/src/libstore/build/worker.cc +++ b/src/libstore/build/worker.cc @@ -4,6 +4,7 @@ #include "nix/store/build/substitution-goal.hh" #include "nix/store/build/drv-output-substitution-goal.hh" #include "nix/store/build/derivation-goal.hh" +#include "nix/store/build/derivation-resolution-goal.hh" #include "nix/store/build/derivation-building-goal.hh" #include "nix/store/build/derivation-trampoline-goal.hh" #ifndef _WIN32 // TODO Enable building on Windows @@ -80,6 +81,12 @@ std::shared_ptr Worker::makeDerivationGoal( return initGoalIfNeeded(derivationGoals[drvPath][wantedOutput], drvPath, drv, wantedOutput, *this, buildMode); } +std::shared_ptr +Worker::makeDerivationResolutionGoal(const StorePath & drvPath, const Derivation & drv, BuildMode buildMode) +{ + return initGoalIfNeeded(derivationResolutionGoals[drvPath], drvPath, drv, *this, buildMode); +} + std::shared_ptr Worker::makeDerivationBuildingGoal(const StorePath & drvPath, const Derivation & drv, BuildMode buildMode) { @@ -158,6 +165,8 @@ void Worker::removeGoal(GoalPtr goal) nix::removeGoal(drvGoal, derivationTrampolineGoals.map); else if (auto drvGoal = std::dynamic_pointer_cast(goal)) nix::removeGoal(drvGoal, derivationGoals); + else if (auto drvResolutionGoal = std::dynamic_pointer_cast(goal)) + nix::removeGoal(drvResolutionGoal, derivationResolutionGoals); else if (auto drvBuildingGoal = std::dynamic_pointer_cast(goal)) nix::removeGoal(drvBuildingGoal, derivationBuildingGoals); else if (auto subGoal = std::dynamic_pointer_cast(goal)) diff --git a/src/libstore/include/nix/store/build/derivation-building-goal.hh b/src/libstore/include/nix/store/build/derivation-building-goal.hh index edb496024..8192dc778 100644 --- a/src/libstore/include/nix/store/build/derivation-building-goal.hh +++ b/src/libstore/include/nix/store/build/derivation-building-goal.hh @@ -155,7 +155,7 @@ private: JobCategory jobCategory() const override { - return JobCategory::Build; + return JobCategory::Administration; }; }; diff --git a/src/libstore/include/nix/store/build/derivation-resolution-goal.hh b/src/libstore/include/nix/store/build/derivation-resolution-goal.hh new file mode 100644 index 000000000..ebaab4f06 --- /dev/null +++ b/src/libstore/include/nix/store/build/derivation-resolution-goal.hh @@ -0,0 +1,82 @@ +#pragma once +///@file + +#include "nix/store/derivations.hh" +#include "nix/store/derivation-options.hh" +#include "nix/store/build/derivation-building-misc.hh" +#include "nix/store/store-api.hh" +#include "nix/store/build/goal.hh" + +namespace nix { + +struct BuilderFailureError; + +/** + * A goal for resolving a derivation. Resolving a derivation (@see + * `Derivation::tryResolve`) simplifies its inputs, replacing + * `inputDrvs` with `inputSrcs. + * + * Conceptually, we resolve all derivations. For input-addressed + * derivations (that don't transtively depend on content-addressed + * derivations), however, we don't actually use the resolved derivation, + * because the output paths would appear invalid (if we tried to verify + * them), since they are computed from the original, unresolved inputs. + * + * That said, if we ever made the new flavor of input-addressing as described + * in issue #9259, then the input-addressing would be based on the resolved + * inputs, and we like the CA case *would* use the output of this goal. + * + * (The point of this discussion is not to randomly stuff information on + * a yet-unimplemented feature (issue #9259) in the codebase, but + * rather, to illustrate that there is no inherent tension between + * explicit derivation resolution and input-addressing in general. That + * tension only exists with the type of input-addressing we've + * historically used.) + */ +struct DerivationResolutionGoal : public Goal +{ + DerivationResolutionGoal( + const StorePath & drvPath, const Derivation & drv, Worker & worker, BuildMode buildMode = bmNormal); + + /** + * If the derivation needed to be resolved, this is resulting + * resolved derivations and its path. + */ + std::unique_ptr> resolvedDrv; + + void timedOut(Error && ex) override; + +private: + + /** + * The path of the derivation. + */ + StorePath drvPath; + + /** + * The derivation stored at drvPath. + */ + std::unique_ptr drv; + + /** + * The remainder is state held during the build. + */ + + BuildMode buildMode; + + std::unique_ptr act; + + std::string key() override; + + /** + * The states. + */ + Co resolveDerivation(); + + JobCategory jobCategory() const override + { + return JobCategory::Administration; + }; +}; + +} // namespace nix diff --git a/src/libstore/include/nix/store/build/worker.hh b/src/libstore/include/nix/store/build/worker.hh index a6de780c1..9660d66b2 100644 --- a/src/libstore/include/nix/store/build/worker.hh +++ b/src/libstore/include/nix/store/build/worker.hh @@ -16,6 +16,7 @@ namespace nix { /* Forward definition. */ struct DerivationTrampolineGoal; struct DerivationGoal; +struct DerivationResolutionGoal; struct DerivationBuildingGoal; struct PathSubstitutionGoal; class DrvOutputSubstitutionGoal; @@ -111,6 +112,7 @@ private: DerivedPathMap>> derivationTrampolineGoals; std::map>> derivationGoals; + std::map> derivationResolutionGoals; std::map> derivationBuildingGoals; std::map> substitutionGoals; std::map> drvOutputSubstitutionGoals; @@ -224,7 +226,13 @@ public: BuildMode buildMode = bmNormal); /** - * @ref DerivationBuildingGoal "derivation goal" + * @ref DerivationResolutionGoal "derivation resolution goal" + */ + std::shared_ptr + makeDerivationResolutionGoal(const StorePath & drvPath, const Derivation & drv, BuildMode buildMode = bmNormal); + + /** + * @ref DerivationBuildingGoal "derivation building goal" */ std::shared_ptr makeDerivationBuildingGoal(const StorePath & drvPath, const Derivation & drv, BuildMode buildMode = bmNormal); diff --git a/src/libstore/include/nix/store/meson.build b/src/libstore/include/nix/store/meson.build index 428ef00f3..3e115fc08 100644 --- a/src/libstore/include/nix/store/meson.build +++ b/src/libstore/include/nix/store/meson.build @@ -17,6 +17,7 @@ headers = [ config_pub_h ] + files( 'build/derivation-building-misc.hh', 'build/derivation-env-desugar.hh', 'build/derivation-goal.hh', + 'build/derivation-resolution-goal.hh', 'build/derivation-trampoline-goal.hh', 'build/drv-output-substitution-goal.hh', 'build/goal.hh', diff --git a/src/libstore/meson.build b/src/libstore/meson.build index e3004ebf5..f5eb858ef 100644 --- a/src/libstore/meson.build +++ b/src/libstore/meson.build @@ -274,6 +274,7 @@ sources = files( 'build/derivation-check.cc', 'build/derivation-env-desugar.cc', 'build/derivation-goal.cc', + 'build/derivation-resolution-goal.cc', 'build/derivation-trampoline-goal.cc', 'build/drv-output-substitution-goal.cc', 'build/entry-points.cc', diff --git a/tests/functional/build.sh b/tests/functional/build.sh index 0a19ff7da..c9a39438d 100755 --- a/tests/functional/build.sh +++ b/tests/functional/build.sh @@ -178,7 +178,8 @@ test "$(<<<"$out" grep -cE '^error:')" = 4 out="$(nix build -f fod-failing.nix -L x4 2>&1)" && status=0 || status=$? test "$status" = 1 -test "$(<<<"$out" grep -cE '^error:')" = 2 +# Precise number of errors depends on daemon version / goal refactorings +(( "$(<<<"$out" grep -cE '^error:')" >= 2 )) if isDaemonNewer "2.29pre"; then <<<"$out" grepQuiet -E "error: Cannot build '.*-x4\\.drv'" @@ -186,11 +187,13 @@ if isDaemonNewer "2.29pre"; then else <<<"$out" grepQuiet -E "error: 1 dependencies of derivation '.*-x4\\.drv' failed to build" fi -<<<"$out" grepQuiet -E "hash mismatch in fixed-output derivation '.*-x2\\.drv'" +# Either x2 or x3 could have failed, x4 depends on both symmetrically +<<<"$out" grepQuiet -E "hash mismatch in fixed-output derivation '.*-x[23]\\.drv'" out="$(nix build -f fod-failing.nix -L x4 --keep-going 2>&1)" && status=0 || status=$? test "$status" = 1 -test "$(<<<"$out" grep -cE '^error:')" = 3 +# Precise number of errors depends on daemon version / goal refactorings +(( "$(<<<"$out" grep -cE '^error:')" >= 3 )) if isDaemonNewer "2.29pre"; then <<<"$out" grepQuiet -E "error: Cannot build '.*-x4\\.drv'" <<<"$out" grepQuiet -E "Reason: 2 dependencies failed." From 39f6fd9b464298f37a08cfe7485271b9294fd278 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Fri, 26 Sep 2025 01:13:22 -0400 Subject: [PATCH 194/332] Fix #13247 Resolve the derivation before creating a building goal, in a context where we know what output(s) we want. That way we have a chance just to download the outputs we want. Fix #13247 --- .../build/derivation-building-goal.cc | 103 ------------------ src/libstore/build/derivation-goal.cc | 91 ++++++++++++++++ tests/functional/ca/issue-13247.sh | 5 +- 3 files changed, 92 insertions(+), 107 deletions(-) diff --git a/src/libstore/build/derivation-building-goal.cc b/src/libstore/build/derivation-building-goal.cc index bf7f332c7..98b80862d 100644 --- a/src/libstore/build/derivation-building-goal.cc +++ b/src/libstore/build/derivation-building-goal.cc @@ -1,7 +1,5 @@ #include "nix/store/build/derivation-building-goal.hh" -#include "nix/store/build/derivation-resolution-goal.hh" #include "nix/store/build/derivation-env-desugar.hh" -#include "nix/store/build/derivation-trampoline-goal.hh" #ifndef _WIN32 // TODO enable build hook on Windows # include "nix/store/build/hook-instance.hh" # include "nix/store/build/derivation-builder.hh" @@ -175,107 +173,6 @@ Goal::Co DerivationBuildingGoal::gaveUpOnSubstitution() /* Determine the full set of input paths. */ { - auto resolutionGoal = worker.makeDerivationResolutionGoal(drvPath, *drv, buildMode); - { - Goals waitees{resolutionGoal}; - co_await await(std::move(waitees)); - } - if (nrFailed != 0) { - co_return doneFailure({BuildResult::Failure::DependencyFailed, "resolution failed"}); - } - - if (resolutionGoal->resolvedDrv) { - auto & [pathResolved, drvResolved] = *resolutionGoal->resolvedDrv; - - /* Store the resolved derivation, as part of the record of - what we're actually building */ - writeDerivation(worker.store, drvResolved); - - /* TODO https://github.com/NixOS/nix/issues/13247 we should - let the calling goal do this, so it has a change to pass - just the output(s) it cares about. */ - auto resolvedDrvGoal = - worker.makeDerivationTrampolineGoal(pathResolved, OutputsSpec::All{}, drvResolved, buildMode); - { - Goals waitees{resolvedDrvGoal}; - co_await await(std::move(waitees)); - } - - trace("resolved derivation finished"); - - auto resolvedResult = resolvedDrvGoal->buildResult; - - // No `std::visit` for coroutines yet - if (auto * successP = resolvedResult.tryGetSuccess()) { - auto & success = *successP; - SingleDrvOutputs builtOutputs; - - auto outputHashes = staticOutputHashes(worker.evalStore, *drv); - auto resolvedHashes = staticOutputHashes(worker.store, drvResolved); - - StorePathSet outputPaths; - - for (auto & outputName : drvResolved.outputNames()) { - auto outputHash = get(outputHashes, outputName); - auto resolvedHash = get(resolvedHashes, outputName); - if ((!outputHash) || (!resolvedHash)) - throw Error( - "derivation '%s' doesn't have expected output '%s' (derivation-goal.cc/resolve)", - worker.store.printStorePath(drvPath), - outputName); - - auto realisation = [&] { - auto take1 = get(success.builtOutputs, outputName); - if (take1) - return *take1; - - /* The above `get` should work. But stateful tracking of - outputs in resolvedResult, this can get out of sync with the - store, which is our actual source of truth. For now we just - check the store directly if it fails. */ - auto take2 = worker.evalStore.queryRealisation(DrvOutput{*resolvedHash, outputName}); - if (take2) - return *take2; - - throw Error( - "derivation '%s' doesn't have expected output '%s' (derivation-goal.cc/realisation)", - worker.store.printStorePath(pathResolved), - outputName); - }(); - - if (!drv->type().isImpure()) { - auto newRealisation = realisation; - newRealisation.id = DrvOutput{*outputHash, outputName}; - newRealisation.signatures.clear(); - if (!drv->type().isFixed()) { - auto & drvStore = worker.evalStore.isValidPath(drvPath) ? worker.evalStore : worker.store; - newRealisation.dependentRealisations = - drvOutputReferences(worker.store, *drv, realisation.outPath, &drvStore); - } - worker.store.signRealisation(newRealisation); - worker.store.registerDrvOutput(newRealisation); - } - outputPaths.insert(realisation.outPath); - builtOutputs.emplace(outputName, realisation); - } - - runPostBuildHook(worker.store, *logger, drvPath, outputPaths); - - auto status = success.status; - if (status == BuildResult::Success::AlreadyValid) - status = BuildResult::Success::ResolvesToAlreadyValid; - - co_return doneSuccess(success.status, std::move(builtOutputs)); - } else if (resolvedResult.tryGetFailure()) { - co_return doneFailure({ - BuildResult::Failure::DependencyFailed, - "build of resolved derivation '%s' failed", - worker.store.printStorePath(pathResolved), - }); - } else - assert(false); - } - /* If we get this far, we know no dynamic drvs inputs */ for (auto & [depDrvPath, depNode] : drv->inputDrvs.map) { diff --git a/src/libstore/build/derivation-goal.cc b/src/libstore/build/derivation-goal.cc index 5dfc334a8..8e924fd4a 100644 --- a/src/libstore/build/derivation-goal.cc +++ b/src/libstore/build/derivation-goal.cc @@ -1,5 +1,6 @@ #include "nix/store/build/derivation-goal.hh" #include "nix/store/build/derivation-building-goal.hh" +#include "nix/store/build/derivation-resolution-goal.hh" #ifndef _WIN32 // TODO enable build hook on Windows # include "nix/store/build/hook-instance.hh" # include "nix/store/build/derivation-builder.hh" @@ -146,6 +147,96 @@ Goal::Co DerivationGoal::haveDerivation() worker.store.printStorePath(drvPath)); } + auto resolutionGoal = worker.makeDerivationResolutionGoal(drvPath, *drv, buildMode); + { + Goals waitees{resolutionGoal}; + co_await await(std::move(waitees)); + } + if (nrFailed != 0) { + co_return doneFailure({BuildResult::Failure::DependencyFailed, "resolution failed"}); + } + + if (resolutionGoal->resolvedDrv) { + auto & [pathResolved, drvResolved] = *resolutionGoal->resolvedDrv; + + /* Store the resolved derivation, as part of the record of + what we're actually building */ + writeDerivation(worker.store, drvResolved); + + auto resolvedDrvGoal = worker.makeDerivationGoal(pathResolved, drvResolved, wantedOutput, buildMode); + { + Goals waitees{resolvedDrvGoal}; + co_await await(std::move(waitees)); + } + + trace("resolved derivation finished"); + + auto resolvedResult = resolvedDrvGoal->buildResult; + + // No `std::visit` for coroutines yet + if (auto * successP = resolvedResult.tryGetSuccess()) { + auto & success = *successP; + auto outputHashes = staticOutputHashes(worker.evalStore, *drv); + auto resolvedHashes = staticOutputHashes(worker.store, drvResolved); + + StorePathSet outputPaths; + + auto outputHash = get(outputHashes, wantedOutput); + auto resolvedHash = get(resolvedHashes, wantedOutput); + if ((!outputHash) || (!resolvedHash)) + throw Error( + "derivation '%s' doesn't have expected output '%s' (derivation-goal.cc/resolve)", + worker.store.printStorePath(drvPath), + wantedOutput); + + auto realisation = [&] { + auto take1 = get(success.builtOutputs, wantedOutput); + if (take1) + return *take1; + + /* The above `get` should work. But stateful tracking of + outputs in resolvedResult, this can get out of sync with the + store, which is our actual source of truth. For now we just + check the store directly if it fails. */ + auto take2 = worker.evalStore.queryRealisation(DrvOutput{*resolvedHash, wantedOutput}); + if (take2) + return *take2; + + throw Error( + "derivation '%s' doesn't have expected output '%s' (derivation-goal.cc/realisation)", + worker.store.printStorePath(pathResolved), + wantedOutput); + }(); + + if (!drv->type().isImpure()) { + auto newRealisation = realisation; + newRealisation.id = DrvOutput{*outputHash, wantedOutput}; + newRealisation.signatures.clear(); + if (!drv->type().isFixed()) { + auto & drvStore = worker.evalStore.isValidPath(drvPath) ? worker.evalStore : worker.store; + newRealisation.dependentRealisations = + drvOutputReferences(worker.store, *drv, realisation.outPath, &drvStore); + } + worker.store.signRealisation(newRealisation); + worker.store.registerDrvOutput(newRealisation); + } + outputPaths.insert(realisation.outPath); + + auto status = success.status; + if (status == BuildResult::Success::AlreadyValid) + status = BuildResult::Success::ResolvesToAlreadyValid; + + co_return doneSuccess(status, std::move(realisation)); + } else if (resolvedResult.tryGetFailure()) { + co_return doneFailure({ + BuildResult::Failure::DependencyFailed, + "build of resolved derivation '%s' failed", + worker.store.printStorePath(pathResolved), + }); + } else + assert(false); + } + /* Give up on substitution for the output we want, actually build this derivation */ auto g = worker.makeDerivationBuildingGoal(drvPath, *drv, buildMode); diff --git a/tests/functional/ca/issue-13247.sh b/tests/functional/ca/issue-13247.sh index 686d90ced..705919513 100755 --- a/tests/functional/ca/issue-13247.sh +++ b/tests/functional/ca/issue-13247.sh @@ -65,7 +65,4 @@ buildViaSubstitute use-a-prime-more-outputs^first # Should only fetch the output we asked for [[ -d "$(jq -r <"$TEST_ROOT"/a.json '.[0].outputs.out')" ]] [[ -f "$(jq -r <"$TEST_ROOT"/a.json '.[2].outputs.first')" ]] - -# Output should *not* be here, this is the bug -[[ -e "$(jq -r <"$TEST_ROOT"/a.json '.[2].outputs.second')" ]] -skipTest "bug is not yet fixed" +[[ ! -e "$(jq -r <"$TEST_ROOT"/a.json '.[2].outputs.second')" ]] From c97b050a6c212d0b748303080b5604309b7abdce Mon Sep 17 00:00:00 2001 From: John Ericson Date: Fri, 26 Sep 2025 01:40:00 -0400 Subject: [PATCH 195/332] Fix `ca/eval-store.sh` test The refactor in the last commit fixed the bug it was supposed to fix, but introduced a new bug in that sometimes we tried to write a resolved derivation to a store before all its `inputSrcs` were in that store. The solution is to defer writing the derivation until inside `DerivationBuildingGoal`, just before we do an actual build. At this point, we are sure that all inputs in are the store. This does have the side effect of meaning we don't write down the resolved derivation in the substituting case, only the building case, but I think that is actually fine. The store that actually does the building should make a record of what it built by storing the resolved derivation. Other stores that just substitute from that store don't necessary want that derivation however. They can trust the substituter to keep the record around, or baring that, they can attempt to re resolve everything, if they need to be audited. --- src/libstore/build/derivation-building-goal.cc | 13 ++++++++++--- src/libstore/build/derivation-goal.cc | 16 +++++++--------- src/libstore/build/worker.cc | 15 ++++++++++----- .../nix/store/build/derivation-building-goal.hh | 17 +++++++++++++++-- .../include/nix/store/build/derivation-goal.hh | 8 ++++++-- src/libstore/include/nix/store/build/worker.hh | 10 +++++++--- 6 files changed, 55 insertions(+), 24 deletions(-) diff --git a/src/libstore/build/derivation-building-goal.cc b/src/libstore/build/derivation-building-goal.cc index 98b80862d..fa819c96b 100644 --- a/src/libstore/build/derivation-building-goal.cc +++ b/src/libstore/build/derivation-building-goal.cc @@ -26,8 +26,8 @@ namespace nix { DerivationBuildingGoal::DerivationBuildingGoal( - const StorePath & drvPath, const Derivation & drv_, Worker & worker, BuildMode buildMode) - : Goal(worker, gaveUpOnSubstitution()) + const StorePath & drvPath, const Derivation & drv_, Worker & worker, BuildMode buildMode, bool storeDerivation) + : Goal(worker, gaveUpOnSubstitution(storeDerivation)) , drvPath(drvPath) , buildMode(buildMode) { @@ -124,7 +124,7 @@ static void runPostBuildHook( /* At least one of the output paths could not be produced using a substitute. So we have to build instead. */ -Goal::Co DerivationBuildingGoal::gaveUpOnSubstitution() +Goal::Co DerivationBuildingGoal::gaveUpOnSubstitution(bool storeDerivation) { Goals waitees; @@ -172,6 +172,13 @@ Goal::Co DerivationBuildingGoal::gaveUpOnSubstitution() /* Determine the full set of input paths. */ + if (storeDerivation) { + assert(drv->inputDrvs.map.empty()); + /* Store the resolved derivation, as part of the record of + what we're actually building */ + writeDerivation(worker.store, *drv); + } + { /* If we get this far, we know no dynamic drvs inputs */ diff --git a/src/libstore/build/derivation-goal.cc b/src/libstore/build/derivation-goal.cc index 8e924fd4a..cc3ba2b7b 100644 --- a/src/libstore/build/derivation-goal.cc +++ b/src/libstore/build/derivation-goal.cc @@ -30,8 +30,9 @@ DerivationGoal::DerivationGoal( const Derivation & drv, const OutputName & wantedOutput, Worker & worker, - BuildMode buildMode) - : Goal(worker, haveDerivation()) + BuildMode buildMode, + bool storeDerivation) + : Goal(worker, haveDerivation(storeDerivation)) , drvPath(drvPath) , wantedOutput(wantedOutput) , outputHash{[&] { @@ -65,7 +66,7 @@ std::string DerivationGoal::key() }.to_string(worker.store); } -Goal::Co DerivationGoal::haveDerivation() +Goal::Co DerivationGoal::haveDerivation(bool storeDerivation) { trace("have derivation"); @@ -159,11 +160,8 @@ Goal::Co DerivationGoal::haveDerivation() if (resolutionGoal->resolvedDrv) { auto & [pathResolved, drvResolved] = *resolutionGoal->resolvedDrv; - /* Store the resolved derivation, as part of the record of - what we're actually building */ - writeDerivation(worker.store, drvResolved); - - auto resolvedDrvGoal = worker.makeDerivationGoal(pathResolved, drvResolved, wantedOutput, buildMode); + auto resolvedDrvGoal = + worker.makeDerivationGoal(pathResolved, drvResolved, wantedOutput, buildMode, /*storeDerivation=*/true); { Goals waitees{resolvedDrvGoal}; co_await await(std::move(waitees)); @@ -239,7 +237,7 @@ Goal::Co DerivationGoal::haveDerivation() /* Give up on substitution for the output we want, actually build this derivation */ - auto g = worker.makeDerivationBuildingGoal(drvPath, *drv, buildMode); + auto g = worker.makeDerivationBuildingGoal(drvPath, *drv, buildMode, storeDerivation); /* We will finish with it ourselves, as if we were the derivational goal. */ g->preserveException = true; diff --git a/src/libstore/build/worker.cc b/src/libstore/build/worker.cc index f597abb63..53175a8c4 100644 --- a/src/libstore/build/worker.cc +++ b/src/libstore/build/worker.cc @@ -76,9 +76,14 @@ std::shared_ptr Worker::makeDerivationTrampolineGoal( } std::shared_ptr Worker::makeDerivationGoal( - const StorePath & drvPath, const Derivation & drv, const OutputName & wantedOutput, BuildMode buildMode) + const StorePath & drvPath, + const Derivation & drv, + const OutputName & wantedOutput, + BuildMode buildMode, + bool storeDerivation) { - return initGoalIfNeeded(derivationGoals[drvPath][wantedOutput], drvPath, drv, wantedOutput, *this, buildMode); + return initGoalIfNeeded( + derivationGoals[drvPath][wantedOutput], drvPath, drv, wantedOutput, *this, buildMode, storeDerivation); } std::shared_ptr @@ -87,10 +92,10 @@ Worker::makeDerivationResolutionGoal(const StorePath & drvPath, const Derivation return initGoalIfNeeded(derivationResolutionGoals[drvPath], drvPath, drv, *this, buildMode); } -std::shared_ptr -Worker::makeDerivationBuildingGoal(const StorePath & drvPath, const Derivation & drv, BuildMode buildMode) +std::shared_ptr Worker::makeDerivationBuildingGoal( + const StorePath & drvPath, const Derivation & drv, BuildMode buildMode, bool storeDerivation) { - return initGoalIfNeeded(derivationBuildingGoals[drvPath], drvPath, drv, *this, buildMode); + return initGoalIfNeeded(derivationBuildingGoals[drvPath], drvPath, drv, *this, buildMode, storeDerivation); } std::shared_ptr diff --git a/src/libstore/include/nix/store/build/derivation-building-goal.hh b/src/libstore/include/nix/store/build/derivation-building-goal.hh index 8192dc778..ab063ff3f 100644 --- a/src/libstore/include/nix/store/build/derivation-building-goal.hh +++ b/src/libstore/include/nix/store/build/derivation-building-goal.hh @@ -29,8 +29,21 @@ typedef enum { rpAccept, rpDecline, rpPostpone } HookReply; */ struct DerivationBuildingGoal : public Goal { + /** + * @param storeDerivation Whether to store the derivation in + * `worker.store`. This is useful for newly-resolved derivations. In this + * case, the derivation was not created a priori, e.g. purely (or close + * enough) from evaluation of the Nix language, but also depends on the + * exact content produced by upstream builds. It is strongly advised to + * have a permanent record of such a resolved derivation in order to + * faithfully reconstruct the build history. + */ DerivationBuildingGoal( - const StorePath & drvPath, const Derivation & drv, Worker & worker, BuildMode buildMode = bmNormal); + const StorePath & drvPath, + const Derivation & drv, + Worker & worker, + BuildMode buildMode = bmNormal, + bool storeDerivation = false); ~DerivationBuildingGoal(); private: @@ -100,7 +113,7 @@ private: /** * The states. */ - Co gaveUpOnSubstitution(); + Co gaveUpOnSubstitution(bool storeDerivation); Co tryToBuild(); /** diff --git a/src/libstore/include/nix/store/build/derivation-goal.hh b/src/libstore/include/nix/store/build/derivation-goal.hh index e05bf1c0b..353e7c489 100644 --- a/src/libstore/include/nix/store/build/derivation-goal.hh +++ b/src/libstore/include/nix/store/build/derivation-goal.hh @@ -40,12 +40,16 @@ struct DerivationGoal : public Goal */ OutputName wantedOutput; + /** + * @param storeDerivation See `DerivationBuildingGoal`. This is just passed along. + */ DerivationGoal( const StorePath & drvPath, const Derivation & drv, const OutputName & wantedOutput, Worker & worker, - BuildMode buildMode = bmNormal); + BuildMode buildMode = bmNormal, + bool storeDerivation = false); ~DerivationGoal() = default; void timedOut(Error && ex) override @@ -80,7 +84,7 @@ private: /** * The states. */ - Co haveDerivation(); + Co haveDerivation(bool storeDerivation); /** * Return `std::nullopt` if the output is unknown, e.g. un unbuilt diff --git a/src/libstore/include/nix/store/build/worker.hh b/src/libstore/include/nix/store/build/worker.hh index 9660d66b2..9767590ac 100644 --- a/src/libstore/include/nix/store/build/worker.hh +++ b/src/libstore/include/nix/store/build/worker.hh @@ -223,7 +223,8 @@ public: const StorePath & drvPath, const Derivation & drv, const OutputName & wantedOutput, - BuildMode buildMode = bmNormal); + BuildMode buildMode = bmNormal, + bool storeDerivation = false); /** * @ref DerivationResolutionGoal "derivation resolution goal" @@ -234,8 +235,11 @@ public: /** * @ref DerivationBuildingGoal "derivation building goal" */ - std::shared_ptr - makeDerivationBuildingGoal(const StorePath & drvPath, const Derivation & drv, BuildMode buildMode = bmNormal); + std::shared_ptr makeDerivationBuildingGoal( + const StorePath & drvPath, + const Derivation & drv, + BuildMode buildMode = bmNormal, + bool storeDerivation = false); /** * @ref PathSubstitutionGoal "substitution goal" From 88bd0c25f2f0fda6502653f40e88c6d377bc4617 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Sat, 27 Sep 2025 16:03:43 -0400 Subject: [PATCH 196/332] `Store::registerDrvOutput` make pure virtual It should be the responsibility of implementations that don't implement it to say so. See also PR #9799, and issue #5729 --- src/libstore/dummy-store.cc | 5 +++++ src/libstore/include/nix/store/legacy-ssh-store.hh | 7 ++++++- src/libstore/include/nix/store/store-api.hh | 5 +---- 3 files changed, 12 insertions(+), 5 deletions(-) diff --git a/src/libstore/dummy-store.cc b/src/libstore/dummy-store.cc index 4b485ca66..43c575263 100644 --- a/src/libstore/dummy-store.cc +++ b/src/libstore/dummy-store.cc @@ -258,6 +258,11 @@ struct DummyStore : virtual Store return path; } + void registerDrvOutput(const Realisation & output) override + { + unsupported("registerDrvOutput"); + } + void narFromPath(const StorePath & path, Sink & sink) override { bool visited = contents.cvisit(path, [&](const auto & kv) { diff --git a/src/libstore/include/nix/store/legacy-ssh-store.hh b/src/libstore/include/nix/store/legacy-ssh-store.hh index 75751e2d1..c91f88a84 100644 --- a/src/libstore/include/nix/store/legacy-ssh-store.hh +++ b/src/libstore/include/nix/store/legacy-ssh-store.hh @@ -109,7 +109,7 @@ struct LegacySSHStore : public virtual Store unsupported("addToStore"); } - virtual StorePath addToStoreFromDump( + StorePath addToStoreFromDump( Source & dump, std::string_view name, FileSerialisationMethod dumpMethod = FileSerialisationMethod::NixArchive, @@ -121,6 +121,11 @@ struct LegacySSHStore : public virtual Store unsupported("addToStore"); } + void registerDrvOutput(const Realisation & output) override + { + unsupported("registerDrvOutput"); + } + public: BuildResult buildDerivation(const StorePath & drvPath, const BasicDerivation & drv, BuildMode buildMode) override; diff --git a/src/libstore/include/nix/store/store-api.hh b/src/libstore/include/nix/store/store-api.hh index 6d3f6b8d0..1131ec975 100644 --- a/src/libstore/include/nix/store/store-api.hh +++ b/src/libstore/include/nix/store/store-api.hh @@ -598,10 +598,7 @@ public: * floating-ca derivations and their dependencies as there's no way to * retrieve this information otherwise. */ - virtual void registerDrvOutput(const Realisation & output) - { - unsupported("registerDrvOutput"); - } + virtual void registerDrvOutput(const Realisation & output) = 0; virtual void registerDrvOutput(const Realisation & output, CheckSigsFlag checkSigs) { From 9ac306c4dfb1ff94b85656c32ff55c55a8d1d7f7 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Sat, 27 Sep 2025 16:52:36 -0400 Subject: [PATCH 197/332] Expose some core implementation details and write a basic unit test for the dummy store This test currently doesn't use the new-exposed functionality, but with future changes the tests will be expanded and they will be used. --- src/libstore-tests/dummy-store.cc | 27 +++++++++++++ src/libstore-tests/meson.build | 1 + src/libstore/dummy-store.cc | 31 +++++--------- .../include/nix/store/dummy-store-impl.hh | 40 +++++++++++++++++++ src/libstore/include/nix/store/dummy-store.hh | 7 ++++ src/libstore/include/nix/store/meson.build | 1 + 6 files changed, 87 insertions(+), 20 deletions(-) create mode 100644 src/libstore-tests/dummy-store.cc create mode 100644 src/libstore/include/nix/store/dummy-store-impl.hh diff --git a/src/libstore-tests/dummy-store.cc b/src/libstore-tests/dummy-store.cc new file mode 100644 index 000000000..b841d7890 --- /dev/null +++ b/src/libstore-tests/dummy-store.cc @@ -0,0 +1,27 @@ +#include + +#include "nix/store/dummy-store.hh" +#include "nix/store/globals.hh" +#include "nix/store/realisation.hh" + +namespace nix { + +TEST(DummyStore, realisation_read) +{ + initLibStore(/*loadConfig=*/false); + + auto store = [] { + auto cfg = make_ref(StoreReference::Params{}); + cfg->readOnly = false; + return cfg->openStore(); + }(); + + auto drvHash = Hash::parseExplicitFormatUnprefixed( + "ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad", HashAlgorithm::SHA256, HashFormat::Base16); + + auto outputName = "foo"; + + EXPECT_EQ(store->queryRealisation({drvHash, outputName}), nullptr); +} + +} // namespace nix diff --git a/src/libstore-tests/meson.build b/src/libstore-tests/meson.build index 915c10a38..dd817de32 100644 --- a/src/libstore-tests/meson.build +++ b/src/libstore-tests/meson.build @@ -61,6 +61,7 @@ sources = files( 'derivation.cc', 'derived-path.cc', 'downstream-placeholder.cc', + 'dummy-store.cc', 'http-binary-cache-store.cc', 'legacy-ssh-store.cc', 'local-binary-cache-store.cc', diff --git a/src/libstore/dummy-store.cc b/src/libstore/dummy-store.cc index 4b485ca66..f60a72df4 100644 --- a/src/libstore/dummy-store.cc +++ b/src/libstore/dummy-store.cc @@ -2,7 +2,7 @@ #include "nix/util/archive.hh" #include "nix/util/callback.hh" #include "nix/util/memory-source-accessor.hh" -#include "nix/store/dummy-store.hh" +#include "nix/store/dummy-store-impl.hh" #include @@ -108,24 +108,15 @@ public: } // namespace -struct DummyStore : virtual Store +ref DummyStoreConfig::openStore() const +{ + return openDummyStore(); +} + +struct DummyStoreImpl : DummyStore { using Config = DummyStoreConfig; - ref config; - - struct PathInfoAndContents - { - UnkeyedValidPathInfo info; - ref contents; - }; - - /** - * This is map conceptually owns the file system objects for each - * store object. - */ - boost::concurrent_flat_map contents; - /** * This view conceptually just borrows the file systems objects of * each store object from `contents`, and combines them together @@ -135,9 +126,9 @@ struct DummyStore : virtual Store */ ref wholeStoreView = make_ref(); - DummyStore(ref config) + DummyStoreImpl(ref config) : Store{*config} - , config(config) + , DummyStore{config} { wholeStoreView->setPathDisplay(config->storeDir); } @@ -289,9 +280,9 @@ struct DummyStore : virtual Store } }; -ref DummyStore::Config::openStore() const +ref DummyStore::Config::openDummyStore() const { - return make_ref(ref{shared_from_this()}); + return make_ref(ref{shared_from_this()}); } static RegisterStoreImplementation regDummyStore; diff --git a/src/libstore/include/nix/store/dummy-store-impl.hh b/src/libstore/include/nix/store/dummy-store-impl.hh new file mode 100644 index 000000000..e05bb94ff --- /dev/null +++ b/src/libstore/include/nix/store/dummy-store-impl.hh @@ -0,0 +1,40 @@ +#pragma once +///@file + +#include "nix/store/dummy-store.hh" + +#include + +namespace nix { + +struct MemorySourceAccessor; + +/** + * Enough of the Dummy Store exposed for sake of writing unit tests + */ +struct DummyStore : virtual Store +{ + using Config = DummyStoreConfig; + + ref config; + + struct PathInfoAndContents + { + UnkeyedValidPathInfo info; + ref contents; + }; + + /** + * This is map conceptually owns the file system objects for each + * store object. + */ + boost::concurrent_flat_map contents; + + DummyStore(ref config) + : Store{*config} + , config(config) + { + } +}; + +} // namespace nix diff --git a/src/libstore/include/nix/store/dummy-store.hh b/src/libstore/include/nix/store/dummy-store.hh index e93aad366..95c09078c 100644 --- a/src/libstore/include/nix/store/dummy-store.hh +++ b/src/libstore/include/nix/store/dummy-store.hh @@ -5,6 +5,8 @@ namespace nix { +struct DummyStore; + struct DummyStoreConfig : public std::enable_shared_from_this, virtual StoreConfig { DummyStoreConfig(const Params & params) @@ -42,6 +44,11 @@ struct DummyStoreConfig : public std::enable_shared_from_this, return {"dummy"}; } + /** + * Same as `openStore`, just with a more precise return type. + */ + ref openDummyStore() const; + ref openStore() const override; StoreReference getReference() const override diff --git a/src/libstore/include/nix/store/meson.build b/src/libstore/include/nix/store/meson.build index 428ef00f3..ac72f04e2 100644 --- a/src/libstore/include/nix/store/meson.build +++ b/src/libstore/include/nix/store/meson.build @@ -34,6 +34,7 @@ headers = [ config_pub_h ] + files( 'derived-path-map.hh', 'derived-path.hh', 'downstream-placeholder.hh', + 'dummy-store-impl.hh', 'dummy-store.hh', 'export-import.hh', 'filetransfer.hh', From 32cbf5f55af9eb9d10493f06d42f723ef0657064 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Tue, 30 Sep 2025 19:52:44 -0700 Subject: [PATCH 198/332] shellcheck fix: tests/functional/pass-as-file.sh --- maintainers/flake-module.nix | 1 - tests/functional/pass-as-file.sh | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index e9a820d72..ef345bbe4 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/pass-as-file\.sh$'' ''^tests/functional/path-from-hash-part\.sh$'' ''^tests/functional/path-info\.sh$'' ''^tests/functional/placeholders\.sh$'' diff --git a/tests/functional/pass-as-file.sh b/tests/functional/pass-as-file.sh index 66a8e588e..68f68b8cf 100755 --- a/tests/functional/pass-as-file.sh +++ b/tests/functional/pass-as-file.sh @@ -4,6 +4,7 @@ source common.sh clearStoreIfPossible +# shellcheck disable=SC2034 outPath=$(nix-build --no-out-link -E " with import ${config_nix}; From 112c9d8f547446e28df5d01d91be3a17d8f12bc6 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Tue, 30 Sep 2025 19:53:33 -0700 Subject: [PATCH 199/332] shellcheck fix: tests/functional/path-from-hash-part.sh --- maintainers/flake-module.nix | 1 - tests/functional/path-from-hash-part.sh | 6 +++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index ef345bbe4..06915c2ed 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/path-from-hash-part\.sh$'' ''^tests/functional/path-info\.sh$'' ''^tests/functional/placeholders\.sh$'' ''^tests/functional/post-hook\.sh$'' diff --git a/tests/functional/path-from-hash-part.sh b/tests/functional/path-from-hash-part.sh index 41d1b7410..0b258a6ea 100755 --- a/tests/functional/path-from-hash-part.sh +++ b/tests/functional/path-from-hash-part.sh @@ -4,9 +4,9 @@ source common.sh path=$(nix build --no-link --print-out-paths -f simple.nix) -hash_part=$(basename $path) +hash_part=$(basename "$path") hash_part=${hash_part:0:32} -path2=$(nix store path-from-hash-part $hash_part) +path2=$(nix store path-from-hash-part "$hash_part") -[[ $path = $path2 ]] +[[ $path = "$path2" ]] From c82aa04a3d80b9d42d71f3d075119b30184da321 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Tue, 30 Sep 2025 19:53:54 -0700 Subject: [PATCH 200/332] shellcheck fix: tests/functional/path-info.sh --- maintainers/flake-module.nix | 1 - tests/functional/path-info.sh | 12 ++++++------ 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 06915c2ed..3c37f58f6 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/path-info\.sh$'' ''^tests/functional/placeholders\.sh$'' ''^tests/functional/post-hook\.sh$'' ''^tests/functional/pure-eval\.sh$'' diff --git a/tests/functional/path-info.sh b/tests/functional/path-info.sh index 8597de683..463ac6214 100755 --- a/tests/functional/path-info.sh +++ b/tests/functional/path-info.sh @@ -2,14 +2,14 @@ source common.sh -echo foo > $TEST_ROOT/foo -foo=$(nix store add-file $TEST_ROOT/foo) +echo foo > "$TEST_ROOT"/foo +foo=$(nix store add-file "$TEST_ROOT"/foo) -echo bar > $TEST_ROOT/bar -bar=$(nix store add-file $TEST_ROOT/bar) +echo bar > "$TEST_ROOT"/bar +bar=$(nix store add-file "$TEST_ROOT"/bar) -echo baz > $TEST_ROOT/baz -baz=$(nix store add-file $TEST_ROOT/baz) +echo baz > "$TEST_ROOT"/baz +baz=$(nix store add-file "$TEST_ROOT"/baz) nix-store --delete "$baz" diff --unified --color=always \ From 1aaa3dafeee303062fbcf3c7c266fde9101f2db2 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Tue, 30 Sep 2025 19:54:29 -0700 Subject: [PATCH 201/332] shellcheck fix: tests/functional/placeholders.sh --- maintainers/flake-module.nix | 1 - tests/functional/placeholders.sh | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 3c37f58f6..f7cf94e54 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/placeholders\.sh$'' ''^tests/functional/post-hook\.sh$'' ''^tests/functional/pure-eval\.sh$'' ''^tests/functional/push-to-store-old\.sh$'' diff --git a/tests/functional/placeholders.sh b/tests/functional/placeholders.sh index 374203af8..5791d8006 100755 --- a/tests/functional/placeholders.sh +++ b/tests/functional/placeholders.sh @@ -4,6 +4,7 @@ source common.sh clearStoreIfPossible +# shellcheck disable=SC2016 nix-build --no-out-link -E ' with import '"${config_nix}"'; From bcd8311ec6b9893697e42eb44f3f205a121673ed Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Tue, 30 Sep 2025 19:55:03 -0700 Subject: [PATCH 202/332] shellcheck fix: tests/functional/post-hook.sh --- maintainers/flake-module.nix | 1 - tests/functional/post-hook.sh | 13 ++++++++----- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index f7cf94e54..0caa97b23 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/post-hook\.sh$'' ''^tests/functional/pure-eval\.sh$'' ''^tests/functional/push-to-store-old\.sh$'' ''^tests/functional/push-to-store\.sh$'' diff --git a/tests/functional/post-hook.sh b/tests/functional/post-hook.sh index 94a6d0d69..67bb46377 100755 --- a/tests/functional/post-hook.sh +++ b/tests/functional/post-hook.sh @@ -6,10 +6,10 @@ TODO_NixOS clearStore -rm -f $TEST_ROOT/result +rm -f "$TEST_ROOT"/result export REMOTE_STORE=file:$TEST_ROOT/remote_store -echo 'require-sigs = false' >> $test_nix_conf +echo 'require-sigs = false' >> "$test_nix_conf" restartDaemon @@ -20,11 +20,14 @@ else fi # Build the dependencies and push them to the remote store. -nix-build -o $TEST_ROOT/result dependencies.nix --post-build-hook "$pushToStore" +nix-build -o "$TEST_ROOT"/result dependencies.nix --post-build-hook "$pushToStore" # See if all outputs are passed to the post-build hook by only specifying one # We're not able to test CA tests this way -export BUILD_HOOK_ONLY_OUT_PATHS=$([ ! $NIX_TESTS_CA_BY_DEFAULT ]) -nix-build -o $TEST_ROOT/result-mult multiple-outputs.nix -A a.first --post-build-hook "$pushToStore" +# +# FIXME: This export is hiding error condition +# shellcheck disable=SC2155 +export BUILD_HOOK_ONLY_OUT_PATHS=$([ ! "$NIX_TESTS_CA_BY_DEFAULT" ]) +nix-build -o "$TEST_ROOT"/result-mult multiple-outputs.nix -A a.first --post-build-hook "$pushToStore" clearStore From b951e6e1ed555719157e982f0493faf97f504322 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Tue, 30 Sep 2025 19:56:20 -0700 Subject: [PATCH 203/332] shellcheck fix: tests/functional/pure-eval.sh --- tests/functional/pure-eval.sh | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/tests/functional/pure-eval.sh b/tests/functional/pure-eval.sh index 45a65f9ab..b769b2150 100755 --- a/tests/functional/pure-eval.sh +++ b/tests/functional/pure-eval.sh @@ -10,6 +10,7 @@ nix eval --expr 'assert 1 + 2 == 3; true' missingImpureErrorMsg=$(! nix eval --expr 'builtins.readFile ./pure-eval.sh' 2>&1) +# shellcheck disable=SC1111 echo "$missingImpureErrorMsg" | grepQuiet -- --impure || \ fail "The error message should mention the “--impure” flag to unblock users" @@ -25,14 +26,15 @@ echo "$missingImpureErrorMsg" | grepQuiet -- --impure || \ (! nix eval --expr "(import (builtins.fetchurl { url = file://$(pwd)/pure-eval.nix; })).x") nix eval --expr "(import (builtins.fetchurl { url = file://$(pwd)/pure-eval.nix; sha256 = \"$(nix hash file pure-eval.nix --type sha256)\"; })).x" -rm -rf $TEST_ROOT/eval-out -nix eval --store dummy:// --write-to $TEST_ROOT/eval-out --expr '{ x = "foo" + "bar"; y = { z = "bla"; }; }' -[[ $(cat $TEST_ROOT/eval-out/x) = foobar ]] -[[ $(cat $TEST_ROOT/eval-out/y/z) = bla ]] +rm -rf "$TEST_ROOT"/eval-out +nix eval --store dummy:// --write-to "$TEST_ROOT"/eval-out --expr '{ x = "foo" + "bar"; y = { z = "bla"; }; }' +[[ $(cat "$TEST_ROOT"/eval-out/x) = foobar ]] +[[ $(cat "$TEST_ROOT"/eval-out/y/z) = bla ]] -rm -rf $TEST_ROOT/eval-out -(! nix eval --store dummy:// --write-to $TEST_ROOT/eval-out --expr '{ "." = "bla"; }') +rm -rf "$TEST_ROOT"/eval-out +(! nix eval --store dummy:// --write-to "$TEST_ROOT"/eval-out --expr '{ "." = "bla"; }') +# shellcheck disable=SC2088 (! nix eval --expr '~/foo') expectStderr 0 nix eval --expr "/some/absolute/path" \ From a11195d6cefbbc3cf5140f1024fd69c54b30b6d9 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Tue, 30 Sep 2025 19:56:51 -0700 Subject: [PATCH 204/332] shellcheck fix: tests/functional/push-to-store-old.sh --- maintainers/flake-module.nix | 2 -- 1 file changed, 2 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 0caa97b23..285a76f59 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,8 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/pure-eval\.sh$'' - ''^tests/functional/push-to-store-old\.sh$'' ''^tests/functional/push-to-store\.sh$'' ''^tests/functional/read-only-store\.sh$'' ''^tests/functional/readfile-context\.sh$'' From 1492c1bc5dd1eb39326bae5e3bcae67813d17b7c Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Tue, 30 Sep 2025 19:57:07 -0700 Subject: [PATCH 205/332] shellcheck fix: tests/functional/push-to-store.sh --- maintainers/flake-module.nix | 1 - 1 file changed, 1 deletion(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 285a76f59..392ba4387 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/push-to-store\.sh$'' ''^tests/functional/read-only-store\.sh$'' ''^tests/functional/readfile-context\.sh$'' ''^tests/functional/recursive\.sh$'' From c8a77196148f9027caaa885ee96d0c45b9ec5a7e Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Tue, 30 Sep 2025 19:57:29 -0700 Subject: [PATCH 206/332] shellcheck fix: tests/functional/read-only-store.sh --- maintainers/flake-module.nix | 1 - tests/functional/read-only-store.sh | 8 ++++---- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 392ba4387..24d2e08d4 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/read-only-store\.sh$'' ''^tests/functional/readfile-context\.sh$'' ''^tests/functional/recursive\.sh$'' ''^tests/functional/referrers\.sh$'' diff --git a/tests/functional/read-only-store.sh b/tests/functional/read-only-store.sh index ea96bba41..8ccca2192 100755 --- a/tests/functional/read-only-store.sh +++ b/tests/functional/read-only-store.sh @@ -12,10 +12,10 @@ clearStore happy () { # We can do a read-only query just fine with a read-only store - nix --store local?read-only=true path-info $dummyPath + nix --store local?read-only=true path-info "$dummyPath" # `local://` also works. - nix --store local://?read-only=true path-info $dummyPath + nix --store local://?read-only=true path-info "$dummyPath" # We can "write" an already-present store-path a read-only store, because no IO is actually required nix-store --store local?read-only=true --add dummy @@ -37,8 +37,8 @@ happy ## Testing read-only mode with an underlying store that is actually read-only # Ensure store is actually read-only -chmod -R -w $TEST_ROOT/store -chmod -R -w $TEST_ROOT/var +chmod -R -w "$TEST_ROOT"/store +chmod -R -w "$TEST_ROOT"/var # Make sure we fail on add operations on the read-only store # This is only for adding files that are not *already* in the store From 8a36cf4422a094ba1b60a5ad8afaf632ac8236ae Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Tue, 30 Sep 2025 19:58:11 -0700 Subject: [PATCH 207/332] shellcheck fix: tests/functional/readfile-context.sh --- maintainers/flake-module.nix | 1 - tests/functional/readfile-context.sh | 6 +++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 24d2e08d4..57e0f9997 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/readfile-context\.sh$'' ''^tests/functional/recursive\.sh$'' ''^tests/functional/referrers\.sh$'' ''^tests/functional/remote-store\.sh$'' diff --git a/tests/functional/readfile-context.sh b/tests/functional/readfile-context.sh index cb9ef6234..effe483dc 100755 --- a/tests/functional/readfile-context.sh +++ b/tests/functional/readfile-context.sh @@ -9,12 +9,12 @@ clearStore outPath=$(nix-build --no-out-link readfile-context.nix) # Set a GC root. -ln -s $outPath "$NIX_STATE_DIR/gcroots/foo" +ln -s "$outPath" "$NIX_STATE_DIR/gcroots/foo" # Check that file exists. -[ "$(cat $(cat $outPath))" = "Hello World!" ] +[ "$(cat "$(cat "$outPath")")" = "Hello World!" ] nix-collect-garbage # Check that file still exists. -[ "$(cat $(cat $outPath))" = "Hello World!" ] +[ "$(cat "$(cat "$outPath")")" = "Hello World!" ] From 5d1333bf4bf6277f1a10643a3b82d9f15ebcb7ea Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Tue, 30 Sep 2025 19:59:03 -0700 Subject: [PATCH 208/332] shellcheck fix: tests/functional/recursive.sh --- maintainers/flake-module.nix | 1 - tests/functional/recursive.sh | 15 ++++++++------- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 57e0f9997..5bafcd640 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/recursive\.sh$'' ''^tests/functional/referrers\.sh$'' ''^tests/functional/remote-store\.sh$'' ''^tests/functional/repair\.sh$'' diff --git a/tests/functional/recursive.sh b/tests/functional/recursive.sh index 640fb92d2..9115aa775 100755 --- a/tests/functional/recursive.sh +++ b/tests/functional/recursive.sh @@ -9,15 +9,16 @@ restartDaemon clearStore -rm -f $TEST_ROOT/result +rm -f "$TEST_ROOT"/result -export unreachable=$(nix store add-path ./recursive.sh) +unreachable=$(nix store add-path ./recursive.sh) +export unreachable -NIX_BIN_DIR=$(dirname $(type -p nix)) nix --extra-experimental-features 'nix-command recursive-nix' build -o $TEST_ROOT/result -L --impure --file ./recursive.nix +NIX_BIN_DIR=$(dirname "$(type -p nix)") nix --extra-experimental-features 'nix-command recursive-nix' build -o "$TEST_ROOT"/result -L --impure --file ./recursive.nix -[[ $(cat $TEST_ROOT/result/inner1) =~ blaat ]] +[[ $(cat "$TEST_ROOT"/result/inner1) =~ blaat ]] # Make sure the recursively created paths are in the closure. -nix path-info -r $TEST_ROOT/result | grep foobar -nix path-info -r $TEST_ROOT/result | grep fnord -nix path-info -r $TEST_ROOT/result | grep inner1 +nix path-info -r "$TEST_ROOT"/result | grep foobar +nix path-info -r "$TEST_ROOT"/result | grep fnord +nix path-info -r "$TEST_ROOT"/result | grep inner1 From 7ed40119906e60ff2548c3ac3bc0265b158e02c7 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Tue, 30 Sep 2025 20:00:38 -0700 Subject: [PATCH 209/332] shellcheck fix: tests/functional/referrers.sh --- maintainers/flake-module.nix | 1 - tests/functional/referrers.sh | 18 ++++++++++-------- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 5bafcd640..3f27668c8 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/referrers\.sh$'' ''^tests/functional/remote-store\.sh$'' ''^tests/functional/repair\.sh$'' ''^tests/functional/restricted\.sh$'' diff --git a/tests/functional/referrers.sh b/tests/functional/referrers.sh index 411cdb7c1..ae6b39ae1 100755 --- a/tests/functional/referrers.sh +++ b/tests/functional/referrers.sh @@ -11,32 +11,34 @@ clearStore max=500 reference=$NIX_STORE_DIR/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa-bla -touch $reference -(echo $reference && echo && echo 0) | nix-store --register-validity +touch "$reference" +(echo "$reference" && echo && echo 0) | nix-store --register-validity echo "making registration..." set +x +# shellcheck disable=SC2004 for ((n = 0; n < $max; n++)); do storePath=$NIX_STORE_DIR/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa-$n - echo -n > $storePath + echo -n > "$storePath" ref2=$NIX_STORE_DIR/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa-$((n+1)) if test $((n+1)) = $max; then ref2=$reference fi - echo $storePath; echo; echo 2; echo $reference; echo $ref2 -done > $TEST_ROOT/reg_info + echo "$storePath"; echo; echo 2; echo "$reference"; echo "$ref2" +done > "$TEST_ROOT"/reg_info set -x echo "registering..." -nix-store --register-validity < $TEST_ROOT/reg_info +nix-store --register-validity < "$TEST_ROOT"/reg_info echo "collecting garbage..." -ln -sfn $reference "$NIX_STATE_DIR/gcroots/ref" +ln -sfn "$reference" "$NIX_STATE_DIR/gcroots/ref" nix-store --gc -if [ -n "$(type -p sqlite3)" -a "$(sqlite3 $NIX_STATE_DIR/db/db.sqlite 'select count(*) from Refs')" -ne 0 ]; then +# shellcheck disable=SC2166 +if [ -n "$(type -p sqlite3)" -a "$(sqlite3 "$NIX_STATE_DIR"/db/db.sqlite 'select count(*) from Refs')" -ne 0 ]; then echo "referrers not cleaned up" exit 1 fi From 06f21e101f9180926027bb1c1c2043d9fc904b61 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Tue, 30 Sep 2025 20:02:04 -0700 Subject: [PATCH 210/332] shellcheck fix: tests/functional/remote-store.sh --- tests/functional/remote-store.sh | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/functional/remote-store.sh b/tests/functional/remote-store.sh index 841b6b27a..f125ae137 100755 --- a/tests/functional/remote-store.sh +++ b/tests/functional/remote-store.sh @@ -7,10 +7,10 @@ TODO_NixOS clearStore # Ensure "fake ssh" remote store works just as legacy fake ssh would. -nix --store ssh-ng://localhost?remote-store=$TEST_ROOT/other-store doctor +nix --store ssh-ng://localhost?remote-store="$TEST_ROOT"/other-store doctor # Ensure that store info trusted works with ssh-ng:// -nix --store ssh-ng://localhost?remote-store=$TEST_ROOT/other-store store info --json | jq -e '.trusted' +nix --store ssh-ng://localhost?remote-store="$TEST_ROOT"/other-store store info --json | jq -e '.trusted' startDaemon @@ -31,8 +31,8 @@ NIX_REMOTE_=$NIX_REMOTE $SHELL ./user-envs-test-case.sh nix-store --gc --max-freed 1K -nix-store --dump-db > $TEST_ROOT/d1 -NIX_REMOTE= nix-store --dump-db > $TEST_ROOT/d2 -cmp $TEST_ROOT/d1 $TEST_ROOT/d2 +nix-store --dump-db > "$TEST_ROOT"/d1 +NIX_REMOTE='' nix-store --dump-db > "$TEST_ROOT"/d2 +cmp "$TEST_ROOT"/d1 "$TEST_ROOT"/d2 killDaemon From d35d86da89b14b19eb0855a357fa5e945d2ce4f2 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Tue, 30 Sep 2025 20:04:26 -0700 Subject: [PATCH 211/332] shellcheck fix: tests/functional/repair.sh --- maintainers/flake-module.nix | 2 - tests/functional/repair.sh | 84 ++++++++++++++++++++---------------- 2 files changed, 46 insertions(+), 40 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 3f27668c8..12bb8375e 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,8 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/remote-store\.sh$'' - ''^tests/functional/repair\.sh$'' ''^tests/functional/restricted\.sh$'' ''^tests/functional/search\.sh$'' ''^tests/functional/secure-drv-outputs\.sh$'' diff --git a/tests/functional/repair.sh b/tests/functional/repair.sh index 1f6004b2c..a90bdcfd5 100755 --- a/tests/functional/repair.sh +++ b/tests/functional/repair.sh @@ -8,39 +8,43 @@ TODO_NixOS clearStore -path=$(nix-build dependencies.nix -o $TEST_ROOT/result) -path2=$(nix-store -qR $path | grep input-2) +path=$(nix-build dependencies.nix -o "$TEST_ROOT"/result) +path2=$(nix-store -qR "$path" | grep input-2) nix-store --verify --check-contents -v -hash=$(nix-hash $path2) +hash=$(nix-hash "$path2") # Corrupt a path and check whether nix-build --repair can fix it. -chmod u+w $path2 -touch $path2/bad +chmod u+w "$path2" +touch "$path2"/bad (! nix-store --verify --check-contents -v) # The path can be repaired by rebuilding the derivation. nix-store --verify --check-contents --repair -(! [ -e $path2/bad ]) -(! [ -w $path2 ]) +# shellcheck disable=SC2235 +(! [ -e "$path2"/bad ]) +# shellcheck disable=SC2235 +(! [ -w "$path2" ]) -nix-store --verify-path $path2 +nix-store --verify-path "$path2" # Re-corrupt and delete the deriver. Now --verify --repair should # not work. -chmod u+w $path2 -touch $path2/bad +chmod u+w "$path2" +touch "$path2"/bad -nix-store --delete $(nix-store -q --referrers-closure $(nix-store -qd $path2)) +# shellcheck disable=SC2046 +nix-store --delete $(nix-store -q --referrers-closure "$(nix-store -qd "$path2")") (! nix-store --verify --check-contents --repair) -nix-build dependencies.nix -o $TEST_ROOT/result --repair +nix-build dependencies.nix -o "$TEST_ROOT"/result --repair -if [ "$(nix-hash $path2)" != "$hash" -o -e $path2/bad ]; then +# shellcheck disable=SC2166 +if [ "$(nix-hash "$path2")" != "$hash" -o -e "$path2"/bad ]; then echo "path not repaired properly" >&2 exit 1 fi @@ -49,79 +53,83 @@ fi # --verify can fix it. clearCache -nix copy --to file://$cacheDir $path +nix copy --to file://"$cacheDir" "$path" -chmod u+w $path2 -rm -rf $path2 +chmod u+w "$path2" +rm -rf "$path2" nix-store --verify --check-contents --repair --substituters "file://$cacheDir" --no-require-sigs -if [ "$(nix-hash $path2)" != "$hash" -o -e $path2/bad ]; then +# shellcheck disable=SC2166 +if [ "$(nix-hash "$path2")" != "$hash" -o -e "$path2"/bad ]; then echo "path not repaired properly" >&2 exit 1 fi # Check --verify-path and --repair-path. -nix-store --verify-path $path2 +nix-store --verify-path "$path2" -chmod u+w $path2 -rm -rf $path2 +chmod u+w "$path2" +rm -rf "$path2" -if nix-store --verify-path $path2; then +if nix-store --verify-path "$path2"; then echo "nix-store --verify-path succeeded unexpectedly" >&2 exit 1 fi -nix-store --repair-path $path2 --substituters "file://$cacheDir" --no-require-sigs +nix-store --repair-path "$path2" --substituters "file://$cacheDir" --no-require-sigs -if [ "$(nix-hash $path2)" != "$hash" -o -e $path2/bad ]; then +# shellcheck disable=SC2166 +if [ "$(nix-hash "$path2")" != "$hash" -o -e "$path2"/bad ]; then echo "path not repaired properly" >&2 exit 1 fi # Check that --repair-path also checks content of optimised symlinks (1/2) -nix-store --verify-path $path2 +nix-store --verify-path "$path2" if (! nix-store --optimize); then echo "nix-store --optimize failed to optimize the store" >&2 exit 1 fi -chmod u+w $path2/bar -echo 'rabrab' > $path2/bar # different length +chmod u+w "$path2"/bar +echo 'rabrab' > "$path2"/bar # different length -if nix-store --verify-path $path2; then +if nix-store --verify-path "$path2"; then echo "nix-store --verify-path did not detect .links file corruption" >&2 exit 1 fi -nix-store --repair-path $path2 --option auto-optimise-store true +nix-store --repair-path "$path2" --option auto-optimise-store true -if [ "$(nix-hash $path2)" != "$hash" -o "BAR" != "$(< $path2/bar)" ]; then +# shellcheck disable=SC2166 +if [ "$(nix-hash "$path2")" != "$hash" -o "BAR" != "$(< "$path2"/bar)" ]; then echo "path not repaired properly" >&2 exit 1 fi # Check that --repair-path also checks content of optimised symlinks (2/2) -nix-store --verify-path $path2 +nix-store --verify-path "$path2" if (! nix-store --optimize); then echo "nix-store --optimize failed to optimize the store" >&2 exit 1 fi -chmod u+w $path2 -chmod u+w $path2/bar -sed -e 's/./X/g' < $path2/bar > $path2/tmp # same length, different content. -cp $path2/tmp $path2/bar -rm $path2/tmp +chmod u+w "$path2" +chmod u+w "$path2"/bar +sed -e 's/./X/g' < "$path2"/bar > "$path2"/tmp # same length, different content. +cp "$path2"/tmp "$path2"/bar +rm "$path2"/tmp -if nix-store --verify-path $path2; then +if nix-store --verify-path "$path2"; then echo "nix-store --verify-path did not detect .links file corruption" >&2 exit 1 fi -nix-store --repair-path $path2 --substituters "file://$cacheDir" --no-require-sigs --option auto-optimise-store true +nix-store --repair-path "$path2" --substituters "file://$cacheDir" --no-require-sigs --option auto-optimise-store true -if [ "$(nix-hash $path2)" != "$hash" -o "BAR" != "$(< $path2/bar)" ]; then +# shellcheck disable=SC2166 +if [ "$(nix-hash "$path2")" != "$hash" -o "BAR" != "$(< "$path2"/bar)" ]; then echo "path not repaired properly" >&2 exit 1 fi From b42ed6a74d281763e32285ae8e96900294cb4173 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Tue, 30 Sep 2025 20:05:29 -0700 Subject: [PATCH 212/332] shellcheck fix: tests/functional/restricted.sh --- maintainers/flake-module.nix | 1 - tests/functional/restricted.sh | 22 ++++++++++++---------- 2 files changed, 12 insertions(+), 11 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 12bb8375e..c56599785 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/restricted\.sh$'' ''^tests/functional/search\.sh$'' ''^tests/functional/secure-drv-outputs\.sh$'' ''^tests/functional/selfref-gc\.sh$'' diff --git a/tests/functional/restricted.sh b/tests/functional/restricted.sh index 00ee4ddc8..2f65f15fe 100755 --- a/tests/functional/restricted.sh +++ b/tests/functional/restricted.sh @@ -40,30 +40,32 @@ nix eval --raw --expr "builtins.fetchurl file://${_NIX_TEST_SOURCE_DIR}/restrict (! nix eval --raw --expr "fetchGit git://github.com/NixOS/patchelf.git" --impure --restrict-eval) ln -sfn "${_NIX_TEST_SOURCE_DIR}/restricted.nix" "$TEST_ROOT/restricted.nix" -[[ $(nix-instantiate --eval $TEST_ROOT/restricted.nix) == 3 ]] -(! nix-instantiate --eval --restrict-eval $TEST_ROOT/restricted.nix) -(! nix-instantiate --eval --restrict-eval $TEST_ROOT/restricted.nix -I $TEST_ROOT) -(! nix-instantiate --eval --restrict-eval $TEST_ROOT/restricted.nix -I .) +[[ $(nix-instantiate --eval "$TEST_ROOT"/restricted.nix) == 3 ]] +(! nix-instantiate --eval --restrict-eval "$TEST_ROOT"/restricted.nix) +(! nix-instantiate --eval --restrict-eval "$TEST_ROOT"/restricted.nix -I "$TEST_ROOT") +(! nix-instantiate --eval --restrict-eval "$TEST_ROOT"/restricted.nix -I .) nix-instantiate --eval --restrict-eval "$TEST_ROOT/restricted.nix" -I "$TEST_ROOT" -I "${_NIX_TEST_SOURCE_DIR}" +# shellcheck disable=SC2016 [[ $(nix eval --raw --impure --restrict-eval -I . --expr 'builtins.readFile "${import ./simple.nix}/hello"') == 'Hello World!' ]] # Check that we can't follow a symlink outside of the allowed paths. -mkdir -p $TEST_ROOT/tunnel.d $TEST_ROOT/foo2 -ln -sfn .. $TEST_ROOT/tunnel.d/tunnel -echo foo > $TEST_ROOT/bar +mkdir -p "$TEST_ROOT"/tunnel.d "$TEST_ROOT"/foo2 +ln -sfn .. "$TEST_ROOT"/tunnel.d/tunnel +echo foo > "$TEST_ROOT"/bar -expectStderr 1 nix-instantiate --restrict-eval --eval -E "let __nixPath = [ { prefix = \"foo\"; path = $TEST_ROOT/tunnel.d; } ]; in builtins.readFile " -I $TEST_ROOT/tunnel.d | grepQuiet "forbidden in restricted mode" +expectStderr 1 nix-instantiate --restrict-eval --eval -E "let __nixPath = [ { prefix = \"foo\"; path = $TEST_ROOT/tunnel.d; } ]; in builtins.readFile " -I "$TEST_ROOT"/tunnel.d | grepQuiet "forbidden in restricted mode" -expectStderr 1 nix-instantiate --restrict-eval --eval -E "let __nixPath = [ { prefix = \"foo\"; path = $TEST_ROOT/tunnel.d; } ]; in builtins.readDir " -I $TEST_ROOT/tunnel.d | grepQuiet "forbidden in restricted mode" +expectStderr 1 nix-instantiate --restrict-eval --eval -E "let __nixPath = [ { prefix = \"foo\"; path = $TEST_ROOT/tunnel.d; } ]; in builtins.readDir " -I "$TEST_ROOT"/tunnel.d | grepQuiet "forbidden in restricted mode" # Reading the parents of allowed paths should show only the ancestors of the allowed paths. -[[ $(nix-instantiate --restrict-eval --eval -E "let __nixPath = [ { prefix = \"foo\"; path = $TEST_ROOT/tunnel.d; } ]; in builtins.readDir " -I $TEST_ROOT/tunnel.d) == '{ "tunnel.d" = "directory"; }' ]] +[[ $(nix-instantiate --restrict-eval --eval -E "let __nixPath = [ { prefix = \"foo\"; path = $TEST_ROOT/tunnel.d; } ]; in builtins.readDir " -I "$TEST_ROOT"/tunnel.d) == '{ "tunnel.d" = "directory"; }' ]] # Check whether we can leak symlink information through directory traversal. traverseDir="${_NIX_TEST_SOURCE_DIR}/restricted-traverse-me" ln -sfn "${_NIX_TEST_SOURCE_DIR}/restricted-secret" "${_NIX_TEST_SOURCE_DIR}/restricted-innocent" mkdir -p "$traverseDir" +# shellcheck disable=SC2001 goUp="..$(echo "$traverseDir" | sed -e 's,[^/]\+,..,g')" output="$(nix eval --raw --restrict-eval -I "$traverseDir" \ --expr "builtins.readFile \"$traverseDir/$goUp${_NIX_TEST_SOURCE_DIR}/restricted-innocent\"" \ From 64d828b8c417b94eb168b3a6e0b296329f42ef2d Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Tue, 30 Sep 2025 20:05:56 -0700 Subject: [PATCH 213/332] shellcheck fix: tests/functional/search.sh --- maintainers/flake-module.nix | 1 - 1 file changed, 1 deletion(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index c56599785..21dcf9c2e 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/search\.sh$'' ''^tests/functional/secure-drv-outputs\.sh$'' ''^tests/functional/selfref-gc\.sh$'' ''^tests/functional/shell\.shebang\.sh$'' From 1a5ccbeafc4ee7074283e1b0d095969f52793252 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Tue, 30 Sep 2025 20:06:37 -0700 Subject: [PATCH 214/332] shellcheck fix: tests/functional/secure-drv-outputs.sh --- maintainers/flake-module.nix | 1 - tests/functional/secure-drv-outputs.sh | 10 +++++----- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 21dcf9c2e..711b31ee4 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/secure-drv-outputs\.sh$'' ''^tests/functional/selfref-gc\.sh$'' ''^tests/functional/shell\.shebang\.sh$'' ''^tests/functional/simple\.builder\.sh$'' diff --git a/tests/functional/secure-drv-outputs.sh b/tests/functional/secure-drv-outputs.sh index 5cc4af435..876d3c817 100755 --- a/tests/functional/secure-drv-outputs.sh +++ b/tests/functional/secure-drv-outputs.sh @@ -13,20 +13,20 @@ clearStore startDaemon # Determine the output path of the "good" derivation. -goodOut=$(nix-store -q $(nix-instantiate ./secure-drv-outputs.nix -A good)) +goodOut=$(nix-store -q "$(nix-instantiate ./secure-drv-outputs.nix -A good)") # Instantiate the "bad" derivation. badDrv=$(nix-instantiate ./secure-drv-outputs.nix -A bad) -badOut=$(nix-store -q $badDrv) +badOut=$(nix-store -q "$badDrv") # Rewrite the bad derivation to produce the output path of the good # derivation. -rm -f $TEST_ROOT/bad.drv -sed -e "s|$badOut|$goodOut|g" < $badDrv > $TEST_ROOT/bad.drv +rm -f "$TEST_ROOT"/bad.drv +sed -e "s|$badOut|$goodOut|g" < "$badDrv" > "$TEST_ROOT"/bad.drv # Add the manipulated derivation to the store and build it. This # should fail. -if badDrv2=$(nix-store --add $TEST_ROOT/bad.drv); then +if badDrv2=$(nix-store --add "$TEST_ROOT"/bad.drv); then nix-store -r "$badDrv2" fi From b8f1a8a0c170e133c1390027d3341b11dae2fdbf Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Tue, 30 Sep 2025 20:07:09 -0700 Subject: [PATCH 215/332] shellcheck fix: tests/functional/selfref-gc.sh --- maintainers/flake-module.nix | 1 - tests/functional/selfref-gc.sh | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 711b31ee4..458aaa777 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/selfref-gc\.sh$'' ''^tests/functional/shell\.shebang\.sh$'' ''^tests/functional/simple\.builder\.sh$'' ''^tests/functional/supplementary-groups\.sh$'' diff --git a/tests/functional/selfref-gc.sh b/tests/functional/selfref-gc.sh index dc4f14cc1..de202a09d 100755 --- a/tests/functional/selfref-gc.sh +++ b/tests/functional/selfref-gc.sh @@ -6,6 +6,7 @@ requireDaemonNewerThan "2.6.0pre20211215" clearStoreIfPossible +# shellcheck disable=SC2016 nix-build --no-out-link -E ' with import '"${config_nix}"'; From 7266a514124444379358ae4f60e975e208981feb Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Tue, 30 Sep 2025 20:08:27 -0700 Subject: [PATCH 216/332] shellcheck fix: tests/functional/selfref-gc.sh --- maintainers/flake-module.nix | 1 - tests/functional/shell.shebang.sh | 3 ++- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 458aaa777..c52201229 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/shell\.shebang\.sh$'' ''^tests/functional/simple\.builder\.sh$'' ''^tests/functional/supplementary-groups\.sh$'' ''^tests/functional/toString-path\.sh$'' diff --git a/tests/functional/shell.shebang.sh b/tests/functional/shell.shebang.sh index f7132043d..b6e4ee286 100755 --- a/tests/functional/shell.shebang.sh +++ b/tests/functional/shell.shebang.sh @@ -1,4 +1,5 @@ #! @ENV_PROG@ nix-shell #! nix-shell -I nixpkgs=shell.nix --no-substitute #! nix-shell --pure -i bash -p foo bar -echo "$(foo) $(bar) $@" +# shellcheck shell=bash +echo "$(foo) $(bar)" "$@" From 8c9bfb6e1249453ec984afb16a62d6d78b5f646b Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Tue, 30 Sep 2025 20:18:48 -0700 Subject: [PATCH 217/332] shellcheck fix: tests/functional/simple.builder.sh --- maintainers/flake-module.nix | 1 - tests/functional/simple.builder.sh | 2 ++ 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index c52201229..806444df4 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/simple\.builder\.sh$'' ''^tests/functional/supplementary-groups\.sh$'' ''^tests/functional/toString-path\.sh$'' ''^tests/functional/user-envs-migration\.sh$'' diff --git a/tests/functional/simple.builder.sh b/tests/functional/simple.builder.sh index 97abf0676..27cdfe684 100644 --- a/tests/functional/simple.builder.sh +++ b/tests/functional/simple.builder.sh @@ -6,7 +6,9 @@ echo "PATH=$PATH" if mkdir foo 2> /dev/null; then exit 1; fi # Set a PATH (!!! impure). +# shellcheck disable=SC2154 export PATH=$goodPath +# shellcheck disable=SC2154 mkdir "$out" echo "Hello World!" > "$out"/hello From b349783830d1d82c3cc43c19e402977bdbf29ddd Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Tue, 30 Sep 2025 20:19:27 -0700 Subject: [PATCH 218/332] shellcheck fix: tests/functional/supplementary-groups.sh --- maintainers/flake-module.nix | 1 - tests/functional/supplementary-groups.sh | 5 +++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 806444df4..829cc5c0f 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/supplementary-groups\.sh$'' ''^tests/functional/toString-path\.sh$'' ''^tests/functional/user-envs-migration\.sh$'' ''^tests/functional/user-envs-test-case\.sh$'' diff --git a/tests/functional/supplementary-groups.sh b/tests/functional/supplementary-groups.sh index a667d3e99..0f614a130 100755 --- a/tests/functional/supplementary-groups.sh +++ b/tests/functional/supplementary-groups.sh @@ -9,6 +9,7 @@ needLocalStore "The test uses --store always so we would just be bypassing the d TODO_NixOS +# shellcheck disable=SC2119 execUnshare < Date: Tue, 30 Sep 2025 20:19:47 -0700 Subject: [PATCH 219/332] shellcheck fix: tests/functional/toString-path.sh --- maintainers/flake-module.nix | 1 - tests/functional/toString-path.sh | 4 ++-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 829cc5c0f..913957519 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/toString-path\.sh$'' ''^tests/functional/user-envs-migration\.sh$'' ''^tests/functional/user-envs-test-case\.sh$'' ''^tests/functional/user-envs\.builder\.sh$'' diff --git a/tests/functional/toString-path.sh b/tests/functional/toString-path.sh index d790109f4..c425b61be 100755 --- a/tests/functional/toString-path.sh +++ b/tests/functional/toString-path.sh @@ -2,8 +2,8 @@ source common.sh -mkdir -p $TEST_ROOT/foo -echo bla > $TEST_ROOT/foo/bar +mkdir -p "$TEST_ROOT"/foo +echo bla > "$TEST_ROOT"/foo/bar [[ $(nix eval --raw --impure --expr "builtins.readFile (builtins.toString (builtins.fetchTree { type = \"path\"; path = \"$TEST_ROOT/foo\"; } + \"/bar\"))") = bla ]] From 359e73a6db92179478a4298c4a5bc9c083897499 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Tue, 30 Sep 2025 20:20:13 -0700 Subject: [PATCH 220/332] shellcheck fix: tests/functional/user-envs-migration.sh --- maintainers/flake-module.nix | 1 - tests/functional/user-envs-migration.sh | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 913957519..2d1a1bb10 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/user-envs-migration\.sh$'' ''^tests/functional/user-envs-test-case\.sh$'' ''^tests/functional/user-envs\.builder\.sh$'' ''^tests/functional/user-envs\.sh$'' diff --git a/tests/functional/user-envs-migration.sh b/tests/functional/user-envs-migration.sh index 0f33074e1..46337cdda 100755 --- a/tests/functional/user-envs-migration.sh +++ b/tests/functional/user-envs-migration.sh @@ -29,6 +29,7 @@ nix-env -f user-envs.nix -i bar-0.1 # Migrate to the new profile dir, and ensure that everything’s there export PATH="$PATH_WITH_NEW_NIX" nix-env -q # Trigger the migration +# shellcheck disable=SC2235 ( [[ -L ~/.nix-profile ]] && \ [[ $(readlink ~/.nix-profile) == ~/.local/share/nix/profiles/profile ]] ) || \ fail "The nix profile should point to the new location" From 049c4c7546e1bb87796b8dafcbe76bc818eb8129 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Tue, 30 Sep 2025 20:22:11 -0700 Subject: [PATCH 221/332] shellcheck fix: tests/functional/user-envs-test-case.sh --- maintainers/flake-module.nix | 1 - tests/functional/user-envs-test-case.sh | 72 +++++++++++++------------ 2 files changed, 39 insertions(+), 34 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 2d1a1bb10..c13578ec9 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/user-envs-test-case\.sh$'' ''^tests/functional/user-envs\.builder\.sh$'' ''^tests/functional/user-envs\.sh$'' ''^tests/functional/why-depends\.sh$'' diff --git a/tests/functional/user-envs-test-case.sh b/tests/functional/user-envs-test-case.sh index 3483a4600..9f4450161 100644 --- a/tests/functional/user-envs-test-case.sh +++ b/tests/functional/user-envs-test-case.sh @@ -1,14 +1,17 @@ +# shellcheck shell=bash clearProfiles # Query installed: should be empty. -test "$(nix-env -p $profiles/test -q '*' | wc -l)" -eq 0 +# shellcheck disable=SC2154 +test "$(nix-env -p "$profiles"/test -q '*' | wc -l)" -eq 0 -nix-env --switch-profile $profiles/test +nix-env --switch-profile "$profiles"/test # Query available: should contain several. test "$(nix-env -f ./user-envs.nix -qa '*' | wc -l)" -eq 6 outPath10=$(nix-env -f ./user-envs.nix -qa --out-path --no-name '*' | grep foo-1.0) drvPath10=$(nix-env -f ./user-envs.nix -qa --drv-path --no-name '*' | grep foo-1.0) +# shellcheck disable=SC2166 [ -n "$outPath10" -a -n "$drvPath10" ] TODO_NixOS @@ -20,18 +23,19 @@ nix-env -f ./user-envs.nix -qa --json | jq -e '.[] | select(.name == "bar-0.1") ] | all' nix-env -f ./user-envs.nix -qa --json --out-path | jq -e '.[] | select(.name == "bar-0.1") | [ .outputName == "out", - (.outputs.out | test("'$NIX_STORE_DIR'.*-0\\.1")) + (.outputs.out | test("'"$NIX_STORE_DIR"'.*-0\\.1")) ] | all' -nix-env -f ./user-envs.nix -qa --json --drv-path | jq -e '.[] | select(.name == "bar-0.1") | (.drvPath | test("'$NIX_STORE_DIR'.*-0\\.1\\.drv"))' +nix-env -f ./user-envs.nix -qa --json --drv-path | jq -e '.[] | select(.name == "bar-0.1") | (.drvPath | test("'"$NIX_STORE_DIR"'.*-0\\.1\\.drv"))' # Query descriptions. nix-env -f ./user-envs.nix -qa '*' --description | grepQuiet silly -rm -rf $HOME/.nix-defexpr -ln -s $(pwd)/user-envs.nix $HOME/.nix-defexpr +rm -rf "$HOME"/.nix-defexpr +ln -s "$(pwd)"/user-envs.nix "$HOME"/.nix-defexpr nix-env -qa '*' --description | grepQuiet silly # Query the system. -nix-env -qa '*' --system | grepQuiet $system +# shellcheck disable=SC2154 +nix-env -qa '*' --system | grepQuiet "$system" # Install "foo-1.0". nix-env -i foo-1.0 @@ -40,7 +44,7 @@ nix-env -i foo-1.0 # executable). test "$(nix-env -q '*' | wc -l)" -eq 1 nix-env -q '*' | grepQuiet foo-1.0 -test "$($profiles/test/bin/foo)" = "foo-1.0" +test "$("$profiles"/test/bin/foo)" = "foo-1.0" # Test nix-env -qc to compare installed against available packages, and vice versa. nix-env -qc '*' | grepQuiet '< 2.0' @@ -55,6 +59,7 @@ nix-env -qas | grepQuiet -- '--- bar-0.1' # Disable foo. nix-env --set-flag active false foo +# shellcheck disable=SC2235 (! [ -e "$profiles/test/bin/foo" ]) # Enable foo. @@ -72,7 +77,7 @@ nix-env -i foo-2.0pre1 # Query installed: should contain foo-2.0pre1 now. test "$(nix-env -q '*' | wc -l)" -eq 1 nix-env -q '*' | grepQuiet foo-2.0pre1 -test "$($profiles/test/bin/foo)" = "foo-2.0pre1" +test "$("$profiles"/test/bin/foo)" = "foo-2.0pre1" # Upgrade "foo": should install foo-2.0. NIX_PATH=nixpkgs=./user-envs.nix:${NIX_PATH-} nix-env -f '' -u foo @@ -80,7 +85,7 @@ NIX_PATH=nixpkgs=./user-envs.nix:${NIX_PATH-} nix-env -f '' -u foo # Query installed: should contain foo-2.0 now. test "$(nix-env -q '*' | wc -l)" -eq 1 nix-env -q '*' | grepQuiet foo-2.0 -test "$($profiles/test/bin/foo)" = "foo-2.0" +test "$("$profiles"/test/bin/foo)" = "foo-2.0" # Store the path of foo-2.0. outPath20=$(nix-env -q --out-path --no-name '*' | grep foo-2.0) @@ -95,9 +100,9 @@ if nix-env -q '*' | grepQuiet foo; then false; fi nix-env -q '*' | grepQuiet bar # Rollback: should bring "foo" back. -oldGen="$(nix-store -q --resolve $profiles/test)" +oldGen="$(nix-store -q --resolve "$profiles"/test)" nix-env --rollback -[ "$(nix-store -q --resolve $profiles/test)" != "$oldGen" ] +[ "$(nix-store -q --resolve "$profiles"/test)" != "$oldGen" ] nix-env -q '*' | grepQuiet foo-2.0 nix-env -q '*' | grepQuiet bar @@ -122,23 +127,23 @@ test "$(nix-env --list-generations | wc -l)" -eq 8 # Switch to a specified generation. nix-env --switch-generation 7 -[ "$(nix-store -q --resolve $profiles/test)" = "$oldGen" ] +[ "$(nix-store -q --resolve "$profiles"/test)" = "$oldGen" ] # Install foo-1.0, now using its store path. nix-env -i "$outPath10" nix-env -q '*' | grepQuiet foo-1.0 -nix-store -qR $profiles/test | grep "$outPath10" -nix-store -q --referrers-closure $profiles/test | grep "$(nix-store -q --resolve $profiles/test)" -[ "$(nix-store -q --deriver "$outPath10")" = $drvPath10 ] +nix-store -qR "$profiles"/test | grep "$outPath10" +nix-store -q --referrers-closure "$profiles"/test | grep "$(nix-store -q --resolve "$profiles"/test)" +[ "$(nix-store -q --deriver "$outPath10")" = "$drvPath10" ] # Uninstall foo-1.0, using a symlink to its store path. -ln -sfn $outPath10/bin/foo $TEST_ROOT/symlink -nix-env -e $TEST_ROOT/symlink +ln -sfn "$outPath10"/bin/foo "$TEST_ROOT"/symlink +nix-env -e "$TEST_ROOT"/symlink if nix-env -q '*' | grepQuiet foo; then false; fi -nix-store -qR $profiles/test | grepInverse "$outPath10" +nix-store -qR "$profiles"/test | grepInverse "$outPath10" # Install foo-1.0, now using a symlink to its store path. -nix-env -i $TEST_ROOT/symlink +nix-env -i "$TEST_ROOT"/symlink nix-env -q '*' | grepQuiet foo # Delete all old generations. @@ -148,15 +153,16 @@ nix-env --delete-generations old # foo-1.0. nix-collect-garbage test -e "$outPath10" +# shellcheck disable=SC2235 (! [ -e "$outPath20" ]) # Uninstall everything nix-env -e '*' -test "$(nix-env -q '*' | wc -l)" -eq 0 +test "$(nix-env -q '*' -c)" -eq 0 # Installing "foo" should only install the newest foo. nix-env -i foo -test "$(nix-env -q '*' | grep foo- | wc -l)" -eq 1 +test "$(nix-env -q '*' | grep foo- -c)" -eq 1 nix-env -q '*' | grepQuiet foo-2.0 # On the other hand, this should install both (and should fail due to @@ -177,25 +183,25 @@ nix-env -q '*' | grepQuiet bar-0.1.1 # declared priorities. nix-env -e '*' nix-env -i foo-0.1 foo-1.0 -[ "$($profiles/test/bin/foo)" = "foo-1.0" ] +[ "$("$profiles"/test/bin/foo)" = "foo-1.0" ] nix-env --set-flag priority 1 foo-0.1 -[ "$($profiles/test/bin/foo)" = "foo-0.1" ] +[ "$("$profiles"/test/bin/foo)" = "foo-0.1" ] # Priorities can be overridden with the --priority flag nix-env -e '*' nix-env -i foo-1.0 -[ "$($profiles/test/bin/foo)" = "foo-1.0" ] +[ "$("$profiles"/test/bin/foo)" = "foo-1.0" ] nix-env -i --priority 1 foo-0.1 -[ "$($profiles/test/bin/foo)" = "foo-0.1" ] +[ "$("$profiles"/test/bin/foo)" = "foo-0.1" ] # Test nix-env --set. -nix-env --set $outPath10 -[ "$(nix-store -q --resolve $profiles/test)" = $outPath10 ] -nix-env --set $drvPath10 -[ "$(nix-store -q --resolve $profiles/test)" = $outPath10 ] +nix-env --set "$outPath10" +[ "$(nix-store -q --resolve "$profiles"/test)" = "$outPath10" ] +nix-env --set "$drvPath10" +[ "$(nix-store -q --resolve "$profiles"/test)" = "$outPath10" ] # Test the case where $HOME contains a symlink. -mkdir -p $TEST_ROOT/real-home/alice/.nix-defexpr/channels -ln -sfn $TEST_ROOT/real-home $TEST_ROOT/home -ln -sfn $(pwd)/user-envs.nix $TEST_ROOT/home/alice/.nix-defexpr/channels/foo +mkdir -p "$TEST_ROOT"/real-home/alice/.nix-defexpr/channels +ln -sfn "$TEST_ROOT"/real-home "$TEST_ROOT"/home +ln -sfn "$(pwd)"/user-envs.nix "$TEST_ROOT"/home/alice/.nix-defexpr/channels/foo HOME=$TEST_ROOT/home/alice nix-env -i foo-0.1 From 13eac5295d1b15f7708ad193e164ece615d1dc44 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Tue, 30 Sep 2025 20:23:02 -0700 Subject: [PATCH 222/332] shellcheck fix: tests/functional/user-envs.builder.sh --- maintainers/flake-module.nix | 1 - tests/functional/user-envs.builder.sh | 13 ++++++++----- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index c13578ec9..e1c89f71a 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/user-envs\.builder\.sh$'' ''^tests/functional/user-envs\.sh$'' ''^tests/functional/why-depends\.sh$'' ]; diff --git a/tests/functional/user-envs.builder.sh b/tests/functional/user-envs.builder.sh index 5fafa797f..e875c2fe5 100644 --- a/tests/functional/user-envs.builder.sh +++ b/tests/functional/user-envs.builder.sh @@ -1,5 +1,8 @@ -mkdir $out -mkdir $out/bin -echo "#! $shell" > $out/bin/$progName -echo "echo $name" >> $out/bin/$progName -chmod +x $out/bin/$progName +# shellcheck shell=bash +# shellcheck disable=SC2154 +mkdir "$out" +mkdir "$out"/bin +echo "#! $shell" > "$out"/bin/"$progName" +# shellcheck disable=SC2154 +echo "echo $name" >> "$out"/bin/"$progName" +chmod +x "$out"/bin/"$progName" From c8ef6dfa5a9c9a869b0bfd08a2cd9b2bb35a6ce6 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Tue, 30 Sep 2025 20:23:17 -0700 Subject: [PATCH 223/332] shellcheck fix: tests/functional/user-envs.sh --- maintainers/flake-module.nix | 1 - tests/functional/user-envs-test-case.sh | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index e1c89f71a..83891daa2 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -106,7 +106,6 @@ enable = true; excludes = [ # We haven't linted these files yet - ''^tests/functional/user-envs\.sh$'' ''^tests/functional/why-depends\.sh$'' ]; }; diff --git a/tests/functional/user-envs-test-case.sh b/tests/functional/user-envs-test-case.sh index 9f4450161..f6a8ab8c6 100644 --- a/tests/functional/user-envs-test-case.sh +++ b/tests/functional/user-envs-test-case.sh @@ -158,7 +158,7 @@ test -e "$outPath10" # Uninstall everything nix-env -e '*' -test "$(nix-env -q '*' -c)" -eq 0 +test "$(nix-env -q '*' | wc -l)" -eq 0 # Installing "foo" should only install the newest foo. nix-env -i foo From 015b639cea34a4fa4f3d716fe3cbfe5a26e85ee6 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Tue, 30 Sep 2025 20:23:39 -0700 Subject: [PATCH 224/332] shellcheck fix: tests/functional/why-depends.sh --- maintainers/flake-module.nix | 4 ---- tests/functional/why-depends.sh | 4 ++-- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 83891daa2..8dcff9c63 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -104,10 +104,6 @@ }; shellcheck = { enable = true; - excludes = [ - # We haven't linted these files yet - ''^tests/functional/why-depends\.sh$'' - ]; }; }; }; diff --git a/tests/functional/why-depends.sh b/tests/functional/why-depends.sh index 45d1f2f0b..fe9ff9a62 100755 --- a/tests/functional/why-depends.sh +++ b/tests/functional/why-depends.sh @@ -4,9 +4,9 @@ source common.sh clearStoreIfPossible -cp ./dependencies.nix ./dependencies.builder0.sh "${config_nix}" $TEST_HOME +cp ./dependencies.nix ./dependencies.builder0.sh "${config_nix}" "$TEST_HOME" -cd $TEST_HOME +cd "$TEST_HOME" nix why-depends --derivation --file ./dependencies.nix input2_drv input1_drv nix why-depends --file ./dependencies.nix input2_drv input1_drv From b72898b2aa4f5d7fe32fee009539daf066251dbf Mon Sep 17 00:00:00 2001 From: Bernardo Meurer Costa Date: Wed, 1 Oct 2025 16:01:28 +0000 Subject: [PATCH 225/332] refactor(libstore): extract S3 URL parsing into separate files Move ParsedS3URL from s3.cc/.hh into dedicated s3-url.cc/.hh files. This separates URL parsing utilities (which are protocol-agnostic) from the AWS SDK-specific S3Helper implementation, making the code cleaner and enabling reuse by future curl-based S3 implementation. --- src/libstore-tests/meson.build | 2 +- src/libstore-tests/{s3.cc => s3-url.cc} | 2 +- src/libstore/include/nix/store/meson.build | 1 + src/libstore/include/nix/store/s3-url.hh | 60 ++++++++++++++++++++++ src/libstore/include/nix/store/s3.hh | 46 +---------------- src/libstore/meson.build | 2 +- src/libstore/{s3.cc => s3-url.cc} | 22 ++++---- 7 files changed, 76 insertions(+), 59 deletions(-) rename src/libstore-tests/{s3.cc => s3-url.cc} (99%) create mode 100644 src/libstore/include/nix/store/s3-url.hh rename src/libstore/{s3.cc => s3-url.cc} (95%) diff --git a/src/libstore-tests/meson.build b/src/libstore-tests/meson.build index 915c10a38..1908e5cbc 100644 --- a/src/libstore-tests/meson.build +++ b/src/libstore-tests/meson.build @@ -77,7 +77,7 @@ sources = files( 'realisation.cc', 'references.cc', 's3-binary-cache-store.cc', - 's3.cc', + 's3-url.cc', 'serve-protocol.cc', 'ssh-store.cc', 'store-reference.cc', diff --git a/src/libstore-tests/s3.cc b/src/libstore-tests/s3-url.cc similarity index 99% rename from src/libstore-tests/s3.cc rename to src/libstore-tests/s3-url.cc index 799e102fe..56ec4e40e 100644 --- a/src/libstore-tests/s3.cc +++ b/src/libstore-tests/s3-url.cc @@ -1,4 +1,4 @@ -#include "nix/store/s3.hh" +#include "nix/store/s3-url.hh" #include "nix/util/tests/gmock-matchers.hh" #if NIX_WITH_S3_SUPPORT diff --git a/src/libstore/include/nix/store/meson.build b/src/libstore/include/nix/store/meson.build index 428ef00f3..f945f25ad 100644 --- a/src/libstore/include/nix/store/meson.build +++ b/src/libstore/include/nix/store/meson.build @@ -72,6 +72,7 @@ headers = [ config_pub_h ] + files( 'remote-store.hh', 'restricted-store.hh', 's3-binary-cache-store.hh', + 's3-url.hh', 's3.hh', 'serve-protocol-connection.hh', 'serve-protocol-impl.hh', diff --git a/src/libstore/include/nix/store/s3-url.hh b/src/libstore/include/nix/store/s3-url.hh new file mode 100644 index 000000000..4f0a7b0c2 --- /dev/null +++ b/src/libstore/include/nix/store/s3-url.hh @@ -0,0 +1,60 @@ +#pragma once +///@file +#include "nix/store/config.hh" + +#if NIX_WITH_S3_SUPPORT + +# include "nix/util/url.hh" +# include "nix/util/util.hh" + +# include +# include +# include +# include + +namespace nix { + +/** + * Parsed S3 URL. + */ +struct ParsedS3URL +{ + std::string bucket; + /** + * @see ParsedURL::path. This is a vector for the same reason. + * Unlike ParsedURL::path this doesn't include the leading empty segment, + * since the bucket name is necessary. + */ + std::vector key; + std::optional profile; + std::optional region; + std::optional scheme; + /** + * The endpoint can be either missing, be an absolute URI (with a scheme like `http:`) + * or an authority (so an IP address or a registered name). + */ + std::variant endpoint; + + std::optional getEncodedEndpoint() const + { + return std::visit( + overloaded{ + [](std::monostate) -> std::optional { return std::nullopt; }, + [](const auto & authorityOrUrl) -> std::optional { return authorityOrUrl.to_string(); }, + }, + endpoint); + } + + static ParsedS3URL parse(const ParsedURL & uri); + + /** + * Convert this ParsedS3URL to HTTPS ParsedURL for use with curl's AWS SigV4 authentication + */ + ParsedURL toHttpsUrl() const; + + auto operator<=>(const ParsedS3URL & other) const = default; +}; + +} // namespace nix + +#endif diff --git a/src/libstore/include/nix/store/s3.hh b/src/libstore/include/nix/store/s3.hh index 0270eeda6..ba3adbc2a 100644 --- a/src/libstore/include/nix/store/s3.hh +++ b/src/libstore/include/nix/store/s3.hh @@ -4,12 +4,9 @@ #if NIX_WITH_S3_SUPPORT # include "nix/util/ref.hh" -# include "nix/util/url.hh" -# include "nix/util/util.hh" +# include "nix/store/s3-url.hh" -# include # include -# include namespace Aws { namespace Client { @@ -48,47 +45,6 @@ struct S3Helper FileTransferResult getObject(const std::string & bucketName, const std::string & key); }; -/** - * Parsed S3 URL. - */ -struct ParsedS3URL -{ - std::string bucket; - /** - * @see ParsedURL::path. This is a vector for the same reason. - * Unlike ParsedURL::path this doesn't include the leading empty segment, - * since the bucket name is necessary. - */ - std::vector key; - std::optional profile; - std::optional region; - std::optional scheme; - /** - * The endpoint can be either missing, be an absolute URI (with a scheme like `http:`) - * or an authority (so an IP address or a registered name). - */ - std::variant endpoint; - - std::optional getEncodedEndpoint() const - { - return std::visit( - overloaded{ - [](std::monostate) -> std::optional { return std::nullopt; }, - [](const auto & authorityOrUrl) -> std::optional { return authorityOrUrl.to_string(); }, - }, - endpoint); - } - - static ParsedS3URL parse(const ParsedURL & uri); - - /** - * Convert this ParsedS3URL to HTTPS ParsedURL for use with curl's AWS SigV4 authentication - */ - ParsedURL toHttpsUrl() const; - - auto operator<=>(const ParsedS3URL & other) const = default; -}; - } // namespace nix #endif diff --git a/src/libstore/meson.build b/src/libstore/meson.build index e3004ebf5..80c234bd5 100644 --- a/src/libstore/meson.build +++ b/src/libstore/meson.build @@ -329,7 +329,7 @@ sources = files( 'remote-store.cc', 'restricted-store.cc', 's3-binary-cache-store.cc', - 's3.cc', + 's3-url.cc', 'serve-protocol-connection.cc', 'serve-protocol.cc', 'sqlite.cc', diff --git a/src/libstore/s3.cc b/src/libstore/s3-url.cc similarity index 95% rename from src/libstore/s3.cc rename to src/libstore/s3-url.cc index 5396f43b9..947de60b0 100644 --- a/src/libstore/s3.cc +++ b/src/libstore/s3-url.cc @@ -1,17 +1,17 @@ -#include "nix/store/s3.hh" -#include "nix/util/split.hh" -#include "nix/util/url.hh" -#include "nix/util/util.hh" -#include "nix/util/canon-path.hh" -#include "nix/util/strings-inline.hh" +#include "nix/store/s3-url.hh" -#include +#if NIX_WITH_S3_SUPPORT -namespace nix { +# include "nix/util/error.hh" +# include "nix/util/split.hh" +# include "nix/util/strings-inline.hh" + +# include +# include using namespace std::string_view_literals; -#if NIX_WITH_S3_SUPPORT +namespace nix { ParsedS3URL ParsedS3URL::parse(const ParsedURL & parsed) try { @@ -116,6 +116,6 @@ ParsedURL ParsedS3URL::toHttpsUrl() const endpoint); } -#endif - } // namespace nix + +#endif From 140b08ae3e8a766fc04e70b7a281abb746f06241 Mon Sep 17 00:00:00 2001 From: Jami Kettunen Date: Wed, 1 Oct 2025 22:19:08 +0300 Subject: [PATCH 226/332] libstore: Include missing header to fix compile with libc++ 20 https://en.cppreference.com/w/cpp/thread.html src/libstore/gc.cc:121:39: error: no member named 'sleep_for' in namespace 'std::this_thread' 121 | std::this_thread::sleep_for(std::chrono::milliseconds(100)); | ~~~~~~~~~~~~~~~~~~^ --- src/libstore/gc.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/src/libstore/gc.cc b/src/libstore/gc.cc index 86c4e37a6..47f40ab8e 100644 --- a/src/libstore/gc.cc +++ b/src/libstore/gc.cc @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include From 2a0fddc7d5c44845253267e28c2dedc5c56bf4ac Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Wed, 1 Oct 2025 23:13:11 +0300 Subject: [PATCH 227/332] libexpr: Move derivation-internal.nix from corepkgsFS to internalFS Best I can tell this was never supposed to be exposed to the user and has been this way since 2.19. 2.18 did not expose this file to the user: nix run nix/2.18-maintenance -- eval --expr "import " error: getting status of '/__corepkgs__/derivation-internal.nix': No such file or directory --- src/libexpr/eval.cc | 2 +- tests/functional/lang/eval-fail-derivation-name.err.exp | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/libexpr/eval.cc b/src/libexpr/eval.cc index 2df373520..20ebe026a 100644 --- a/src/libexpr/eval.cc +++ b/src/libexpr/eval.cc @@ -268,7 +268,7 @@ EvalState::EvalState( }()) , corepkgsFS(make_ref()) , internalFS(make_ref()) - , derivationInternal{corepkgsFS->addFile( + , derivationInternal{internalFS->addFile( CanonPath("derivation-internal.nix"), #include "primops/derivation.nix.gen.hh" )} diff --git a/tests/functional/lang/eval-fail-derivation-name.err.exp b/tests/functional/lang/eval-fail-derivation-name.err.exp index 017326c34..ba5ff2d00 100644 --- a/tests/functional/lang/eval-fail-derivation-name.err.exp +++ b/tests/functional/lang/eval-fail-derivation-name.err.exp @@ -1,20 +1,20 @@ error: … while evaluating the attribute 'outPath' - at ::: + at «nix-internal»/derivation-internal.nix::: | value = commonAttrs // { | outPath = builtins.getAttr outputName strict; | ^ | drvPath = strict.drvPath; … while calling the 'getAttr' builtin - at ::: + at «nix-internal»/derivation-internal.nix::: | value = commonAttrs // { | outPath = builtins.getAttr outputName strict; | ^ | drvPath = strict.drvPath; … while calling the 'derivationStrict' builtin - at ::: + at «nix-internal»/derivation-internal.nix::: | | strict = derivationStrict drvAttrs; | ^ From 85d6c8af4da6a1405563b81f3afb0dbe79e5ef7b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rg=20Thalheim?= Date: Wed, 1 Oct 2025 22:23:10 +0200 Subject: [PATCH 228/332] link to jitsi meeting in the PR docs --- .github/PULL_REQUEST_TEMPLATE.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index c6843d86f..c155bf8bf 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -15,6 +15,10 @@ so you understand the process and the expectations. - volunteering contributions effectively - how to get help and our review process. +PR stuck in review? We have two Nix team meetings per week online that are open for everyone in a jitsi conference: + +- https://calendar.google.com/calendar/u/0/embed?src=b9o52fobqjak8oq8lfkhg3t0qg@group.calendar.google.com + --> ## Motivation From e06968ec2586a9ccd18e58d1796de6d9ac628bc6 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Wed, 24 Sep 2025 00:06:47 -0400 Subject: [PATCH 229/332] Split out `UnkeyedRealisation` from `Realisation` Realisations are conceptually key-value pairs, mapping `DrvOutputs` (the key) to information about that derivation output. This separate the value type, which will be useful in maps, etc., where we don't want to denormalize by including the key twice. This matches similar changes for existing types: | keyed | unkeyed | |--------------------|------------------------| | `ValidPathInfo` | `UnkeyedValidPathInfo` | | `KeyedBuildResult` | `BuildResult` | | `Realisation` | `UnkeyedRealisation` | --- src/libcmd/built-path.cc | 5 +- src/libstore-tests/common-protocol.cc | 44 +++--- src/libstore-tests/realisation.cc | 20 +-- src/libstore-tests/serve-protocol.cc | 74 +++++----- src/libstore-tests/worker-protocol.cc | 132 +++++++++--------- src/libstore/binary-cache-store.cc | 21 +-- .../build/derivation-building-goal.cc | 13 +- src/libstore/build/derivation-goal.cc | 59 ++++++-- .../build/drv-output-substitution-goal.cc | 10 +- src/libstore/daemon.cc | 4 +- src/libstore/dummy-store.cc | 4 +- .../include/nix/store/binary-cache-store.hh | 17 ++- .../nix/store/build/derivation-goal.hh | 6 +- .../build/drv-output-substitution-goal.hh | 3 +- .../include/nix/store/legacy-ssh-store.hh | 4 +- .../include/nix/store/local-overlay-store.hh | 2 +- src/libstore/include/nix/store/local-store.hh | 6 +- src/libstore/include/nix/store/realisation.hh | 50 ++++--- .../include/nix/store/remote-store.hh | 2 +- src/libstore/include/nix/store/store-api.hh | 9 +- src/libstore/local-overlay-store.cc | 8 +- src/libstore/local-store.cc | 17 ++- src/libstore/misc.cc | 5 +- src/libstore/realisation.cc | 50 ++++--- src/libstore/remote-store.cc | 18 +-- src/libstore/restricted-store.cc | 4 +- src/libstore/store-api.cc | 20 +-- src/libstore/unix/build/derivation-builder.cc | 7 +- 28 files changed, 363 insertions(+), 251 deletions(-) diff --git a/src/libcmd/built-path.cc b/src/libcmd/built-path.cc index 4d76dd6da..fc7f18493 100644 --- a/src/libcmd/built-path.cc +++ b/src/libcmd/built-path.cc @@ -117,10 +117,11 @@ RealisedPath::Set BuiltPath::toRealisedPaths(Store & store) const "the derivation '%s' has unrealised output '%s' (derived-path.cc/toRealisedPaths)", store.printStorePath(p.drvPath->outPath()), outputName); - auto thisRealisation = store.queryRealisation(DrvOutput{*drvOutput, outputName}); + DrvOutput key{*drvOutput, outputName}; + auto thisRealisation = store.queryRealisation(key); assert(thisRealisation); // We’ve built it, so we must // have the realisation - res.insert(*thisRealisation); + res.insert(Realisation{*thisRealisation, std::move(key)}); } else { res.insert(outputPath); } diff --git a/src/libstore-tests/common-protocol.cc b/src/libstore-tests/common-protocol.cc index 35fca165d..2c001957b 100644 --- a/src/libstore-tests/common-protocol.cc +++ b/src/libstore-tests/common-protocol.cc @@ -112,32 +112,34 @@ CHARACTERIZATION_TEST( "realisation", (std::tuple{ Realisation{ - .id = - DrvOutput{ - .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), - .outputName = "baz", - }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, - .signatures = {"asdf", "qwer"}, + { + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + .signatures = {"asdf", "qwer"}, + }, + DrvOutput{ + .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), + .outputName = "baz", + }, }, Realisation{ - .id = - { - .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), - .outputName = "baz", - }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, - .signatures = {"asdf", "qwer"}, - .dependentRealisations = - { + { + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + .signatures = {"asdf", "qwer"}, + .dependentRealisations = { - DrvOutput{ - .drvHash = Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "quux", + { + DrvOutput{ + .drvHash = Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "quux", + }, + StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, }, - StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, }, - }, + }, + { + .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), + .outputName = "baz", + }, }, })) diff --git a/src/libstore-tests/realisation.cc b/src/libstore-tests/realisation.cc index a5a5bee50..d16049bc5 100644 --- a/src/libstore-tests/realisation.cc +++ b/src/libstore-tests/realisation.cc @@ -49,16 +49,16 @@ INSTANTIATE_TEST_SUITE_P( RealisationJsonTest, ([] { Realisation simple{ - - .id = - { - .drvHash = Hash::parseExplicitFormatUnprefixed( - "ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad", - HashAlgorithm::SHA256, - HashFormat::Base16), - .outputName = "foo", - }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo.drv"}, + { + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo.drv"}, + }, + { + .drvHash = Hash::parseExplicitFormatUnprefixed( + "ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad", + HashAlgorithm::SHA256, + HashFormat::Base16), + .outputName = "foo", + }, }; return ::testing::Values( std::pair{ diff --git a/src/libstore-tests/serve-protocol.cc b/src/libstore-tests/serve-protocol.cc index a63201164..10aa21e9d 100644 --- a/src/libstore-tests/serve-protocol.cc +++ b/src/libstore-tests/serve-protocol.cc @@ -95,32 +95,34 @@ VERSIONED_CHARACTERIZATION_TEST( defaultVersion, (std::tuple{ Realisation{ - .id = - DrvOutput{ - .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), - .outputName = "baz", - }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, - .signatures = {"asdf", "qwer"}, + { + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + .signatures = {"asdf", "qwer"}, + }, + { + .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), + .outputName = "baz", + }, }, Realisation{ - .id = - { - .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), - .outputName = "baz", - }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, - .signatures = {"asdf", "qwer"}, - .dependentRealisations = - { + { + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + .signatures = {"asdf", "qwer"}, + .dependentRealisations = { - DrvOutput{ - .drvHash = Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "quux", + { + DrvOutput{ + .drvHash = Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "quux", + }, + StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, }, - StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, }, - }, + }, + { + .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), + .outputName = "baz", + }, }, })) @@ -196,25 +198,27 @@ VERSIONED_CHARACTERIZATION_TEST( { "foo", { - .id = - DrvOutput{ - .drvHash = - Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "foo", - }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + { + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + }, + DrvOutput{ + .drvHash = + Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "foo", + }, }, }, { "bar", { - .id = - DrvOutput{ - .drvHash = - Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "bar", - }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar"}, + { + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar"}, + }, + DrvOutput{ + .drvHash = + Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "bar", + }, }, }, }, diff --git a/src/libstore-tests/worker-protocol.cc b/src/libstore-tests/worker-protocol.cc index 489151c8c..c4afde3bd 100644 --- a/src/libstore-tests/worker-protocol.cc +++ b/src/libstore-tests/worker-protocol.cc @@ -148,32 +148,34 @@ VERSIONED_CHARACTERIZATION_TEST( defaultVersion, (std::tuple{ Realisation{ - .id = - DrvOutput{ - .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), - .outputName = "baz", - }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, - .signatures = {"asdf", "qwer"}, + { + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + .signatures = {"asdf", "qwer"}, + }, + DrvOutput{ + .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), + .outputName = "baz", + }, }, Realisation{ - .id = - { - .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), - .outputName = "baz", - }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, - .signatures = {"asdf", "qwer"}, - .dependentRealisations = - { + { + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + .signatures = {"asdf", "qwer"}, + .dependentRealisations = { - DrvOutput{ - .drvHash = Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "quux", + { + DrvOutput{ + .drvHash = Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "quux", + }, + StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, }, - StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, }, - }, + }, + DrvOutput{ + .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), + .outputName = "baz", + }, }, })) @@ -214,25 +216,25 @@ VERSIONED_CHARACTERIZATION_TEST( { "foo", { - .id = - DrvOutput{ - .drvHash = - Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "foo", - }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + { + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + }, + DrvOutput{ + .drvHash = Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "foo", + }, }, }, { "bar", { - .id = - DrvOutput{ - .drvHash = - Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "bar", - }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar"}, + { + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar"}, + }, + DrvOutput{ + .drvHash = Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "bar", + }, }, }, }, @@ -267,25 +269,27 @@ VERSIONED_CHARACTERIZATION_TEST( { "foo", { - .id = - DrvOutput{ - .drvHash = - Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "foo", - }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + { + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + }, + DrvOutput{ + .drvHash = + Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "foo", + }, }, }, { "bar", { - .id = - DrvOutput{ - .drvHash = - Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "bar", - }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar"}, + { + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar"}, + }, + DrvOutput{ + .drvHash = + Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "bar", + }, }, }, }, @@ -324,25 +328,27 @@ VERSIONED_CHARACTERIZATION_TEST( { "foo", { - .id = - DrvOutput{ - .drvHash = - Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "foo", - }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + { + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + }, + DrvOutput{ + .drvHash = + Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "foo", + }, }, }, { "bar", { - .id = - DrvOutput{ - .drvHash = - Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "bar", - }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar"}, + { + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar"}, + }, + DrvOutput{ + .drvHash = + Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "bar", + }, }, }, }, diff --git a/src/libstore/binary-cache-store.cc b/src/libstore/binary-cache-store.cc index badfb4b14..3705f3d4d 100644 --- a/src/libstore/binary-cache-store.cc +++ b/src/libstore/binary-cache-store.cc @@ -502,10 +502,15 @@ StorePath BinaryCacheStore::addToStore( ->path; } -void BinaryCacheStore::queryRealisationUncached( - const DrvOutput & id, Callback> callback) noexcept +std::string BinaryCacheStore::makeRealisationPath(const DrvOutput & id) { - auto outputInfoFilePath = realisationsPrefix + "/" + id.to_string() + ".doi"; + return realisationsPrefix + "/" + id.to_string() + ".doi"; +} + +void BinaryCacheStore::queryRealisationUncached( + const DrvOutput & id, Callback> callback) noexcept +{ + auto outputInfoFilePath = makeRealisationPath(id); auto callbackPtr = std::make_shared(std::move(callback)); @@ -515,11 +520,12 @@ void BinaryCacheStore::queryRealisationUncached( if (!data) return (*callbackPtr)({}); - std::shared_ptr realisation; + std::shared_ptr realisation; try { - realisation = std::make_shared(nlohmann::json::parse(*data)); + realisation = std::make_shared(nlohmann::json::parse(*data)); } catch (Error & e) { - e.addTrace({}, "while parsing file '%s' as a realisation", outputInfoFilePath); + e.addTrace( + {}, "while parsing file '%s' as a realisation for key '%s'", outputInfoFilePath, id.to_string()); throw; } return (*callbackPtr)(std::move(realisation)); @@ -535,8 +541,7 @@ void BinaryCacheStore::registerDrvOutput(const Realisation & info) { if (diskCache) diskCache->upsertRealisation(config.getReference().render(/*FIXME withParams=*/false), info); - auto filePath = realisationsPrefix + "/" + info.id.to_string() + ".doi"; - upsertFile(filePath, static_cast(info).dump(), "application/json"); + upsertFile(makeRealisationPath(info.id), static_cast(info).dump(), "application/json"); } ref BinaryCacheStore::getRemoteFSAccessor(bool requireValidPath) diff --git a/src/libstore/build/derivation-building-goal.cc b/src/libstore/build/derivation-building-goal.cc index fa819c96b..c39fd8c1c 100644 --- a/src/libstore/build/derivation-building-goal.cc +++ b/src/libstore/build/derivation-building-goal.cc @@ -1092,13 +1092,22 @@ DerivationBuildingGoal::checkPathValidity(std::map & // without the `ca-derivations` experimental flag). worker.store.registerDrvOutput( Realisation{ + { + .outPath = info.known->path, + }, drvOutput, - info.known->path, }); } } if (info.known && info.known->isValid()) - validOutputs.emplace(i.first, Realisation{drvOutput, info.known->path}); + validOutputs.emplace( + i.first, + Realisation{ + { + .outPath = info.known->path, + }, + drvOutput, + }); } bool allValid = true; diff --git a/src/libstore/build/derivation-goal.cc b/src/libstore/build/derivation-goal.cc index cc3ba2b7b..81f4e6654 100644 --- a/src/libstore/build/derivation-goal.cc +++ b/src/libstore/build/derivation-goal.cc @@ -190,13 +190,17 @@ Goal::Co DerivationGoal::haveDerivation(bool storeDerivation) auto realisation = [&] { auto take1 = get(success.builtOutputs, wantedOutput); if (take1) - return *take1; + return static_cast(*take1); /* The above `get` should work. But stateful tracking of outputs in resolvedResult, this can get out of sync with the store, which is our actual source of truth. For now we just check the store directly if it fails. */ - auto take2 = worker.evalStore.queryRealisation(DrvOutput{*resolvedHash, wantedOutput}); + auto take2 = worker.evalStore.queryRealisation( + DrvOutput{ + .drvHash = *resolvedHash, + .outputName = wantedOutput, + }); if (take2) return *take2; @@ -207,8 +211,12 @@ Goal::Co DerivationGoal::haveDerivation(bool storeDerivation) }(); if (!drv->type().isImpure()) { - auto newRealisation = realisation; - newRealisation.id = DrvOutput{*outputHash, wantedOutput}; + Realisation newRealisation{ + realisation, + { + .drvHash = *outputHash, + .outputName = wantedOutput, + }}; newRealisation.signatures.clear(); if (!drv->type().isFixed()) { auto & drvStore = worker.evalStore.isValidPath(drvPath) ? worker.evalStore : worker.store; @@ -258,7 +266,16 @@ Goal::Co DerivationGoal::haveDerivation(bool storeDerivation) /* In checking mode, the builder will not register any outputs. So we want to make sure the ones that we wanted to check are properly there. */ - success.builtOutputs = {{wantedOutput, assertPathValidity()}}; + success.builtOutputs = {{ + wantedOutput, + { + assertPathValidity(), + { + .drvHash = outputHash, + .outputName = wantedOutput, + }, + }, + }}; } else { /* Otherwise the builder will give us info for out output, but also for other outputs. Filter down to just our output so as @@ -373,18 +390,20 @@ Goal::Co DerivationGoal::repairClosure() co_return doneSuccess(BuildResult::Success::AlreadyValid, assertPathValidity()); } -std::optional> DerivationGoal::checkPathValidity() +std::optional> DerivationGoal::checkPathValidity() { if (drv->type().isImpure()) return std::nullopt; auto drvOutput = DrvOutput{outputHash, wantedOutput}; - std::optional mRealisation; + std::optional mRealisation; if (auto * mOutput = get(drv->outputs, wantedOutput)) { if (auto mPath = mOutput->path(worker.store, drv->name, wantedOutput)) { - mRealisation = Realisation{drvOutput, std::move(*mPath)}; + mRealisation = UnkeyedRealisation{ + .outPath = std::move(*mPath), + }; } } else { throw Error( @@ -412,7 +431,14 @@ std::optional> DerivationGoal::checkPathValid // derivation, and the output path is valid, but we don't have // its realisation stored (probably because it has been built // without the `ca-derivations` experimental flag). - worker.store.registerDrvOutput(*mRealisation); + worker.store.registerDrvOutput( + Realisation{ + *mRealisation, + { + .drvHash = outputHash, + .outputName = wantedOutput, + }, + }); } return {{*mRealisation, status}}; @@ -420,7 +446,7 @@ std::optional> DerivationGoal::checkPathValid return std::nullopt; } -Realisation DerivationGoal::assertPathValidity() +UnkeyedRealisation DerivationGoal::assertPathValidity() { auto checkResult = checkPathValidity(); if (!(checkResult && checkResult->second == PathStatus::Valid)) @@ -428,11 +454,20 @@ Realisation DerivationGoal::assertPathValidity() return checkResult->first; } -Goal::Done DerivationGoal::doneSuccess(BuildResult::Success::Status status, Realisation builtOutput) +Goal::Done DerivationGoal::doneSuccess(BuildResult::Success::Status status, UnkeyedRealisation builtOutput) { buildResult.inner = BuildResult::Success{ .status = status, - .builtOutputs = {{wantedOutput, std::move(builtOutput)}}, + .builtOutputs = {{ + wantedOutput, + { + std::move(builtOutput), + DrvOutput{ + .drvHash = outputHash, + .outputName = wantedOutput, + }, + }, + }}, }; mcExpectedBuilds.reset(); diff --git a/src/libstore/build/drv-output-substitution-goal.cc b/src/libstore/build/drv-output-substitution-goal.cc index b6ace4784..a969b905b 100644 --- a/src/libstore/build/drv-output-substitution-goal.cc +++ b/src/libstore/build/drv-output-substitution-goal.cc @@ -43,10 +43,10 @@ Goal::Co DrvOutputSubstitutionGoal::init() outPipe->createAsyncPipe(worker.ioport.get()); #endif - auto promise = std::make_shared>>(); + auto promise = std::make_shared>>(); sub->queryRealisation( - id, {[outPipe(outPipe), promise(promise)](std::future> res) { + id, {[outPipe(outPipe), promise(promise)](std::future> res) { try { Finally updateStats([&]() { outPipe->writeSide.close(); }); promise->set_value(res.get()); @@ -75,7 +75,7 @@ Goal::Co DrvOutputSubstitutionGoal::init() * The realisation corresponding to the given output id. * Will be filled once we can get it. */ - std::shared_ptr outputInfo; + std::shared_ptr outputInfo; try { outputInfo = promise->get_future().get(); @@ -132,7 +132,7 @@ Goal::Co DrvOutputSubstitutionGoal::init() } Goal::Co DrvOutputSubstitutionGoal::realisationFetched( - Goals waitees, std::shared_ptr outputInfo, nix::ref sub) + Goals waitees, std::shared_ptr outputInfo, nix::ref sub) { waitees.insert(worker.makePathSubstitutionGoal(outputInfo->outPath)); @@ -145,7 +145,7 @@ Goal::Co DrvOutputSubstitutionGoal::realisationFetched( co_return amDone(nrNoSubstituters > 0 ? ecNoSubstituters : ecFailed); } - worker.store.registerDrvOutput(*outputInfo); + worker.store.registerDrvOutput({*outputInfo, id}); trace("finished"); co_return amDone(ecSuccess); diff --git a/src/libstore/daemon.cc b/src/libstore/daemon.cc index 2bd0698a0..2898f113f 100644 --- a/src/libstore/daemon.cc +++ b/src/libstore/daemon.cc @@ -964,7 +964,7 @@ static void performOp( if (GET_PROTOCOL_MINOR(conn.protoVersion) < 31) { auto outputId = DrvOutput::parse(readString(conn.from)); auto outputPath = StorePath(readString(conn.from)); - store->registerDrvOutput(Realisation{.id = outputId, .outPath = outputPath}); + store->registerDrvOutput(Realisation{{.outPath = outputPath}, outputId}); } else { auto realisation = WorkerProto::Serialise::read(*store, rconn); store->registerDrvOutput(realisation); @@ -986,7 +986,7 @@ static void performOp( } else { std::set realisations; if (info) - realisations.insert(*info); + realisations.insert({*info, outputId}); WorkerProto::write(*store, wconn, realisations); } break; diff --git a/src/libstore/dummy-store.cc b/src/libstore/dummy-store.cc index 1eb51fe3e..209be3ce9 100644 --- a/src/libstore/dummy-store.cc +++ b/src/libstore/dummy-store.cc @@ -266,8 +266,8 @@ struct DummyStoreImpl : DummyStore throw Error("path '%s' is not valid", printStorePath(path)); } - void - queryRealisationUncached(const DrvOutput &, Callback> callback) noexcept override + void queryRealisationUncached( + const DrvOutput &, Callback> callback) noexcept override { callback(nullptr); } diff --git a/src/libstore/include/nix/store/binary-cache-store.hh b/src/libstore/include/nix/store/binary-cache-store.hh index 3a2c90022..660dd870a 100644 --- a/src/libstore/include/nix/store/binary-cache-store.hh +++ b/src/libstore/include/nix/store/binary-cache-store.hh @@ -95,13 +95,22 @@ private: protected: - // The prefix under which realisation infos will be stored - const std::string realisationsPrefix = "realisations"; + /** + * The prefix under which realisation infos will be stored + */ + constexpr const static std::string realisationsPrefix = "realisations"; - const std::string cacheInfoFile = "nix-cache-info"; + constexpr const static std::string cacheInfoFile = "nix-cache-info"; BinaryCacheStore(Config &); + /** + * Compute the path to the given realisation + * + * It's `${realisationsPrefix}/${drvOutput}.doi`. + */ + std::string makeRealisationPath(const DrvOutput & id); + public: virtual bool fileExists(const std::string & path) = 0; @@ -190,7 +199,7 @@ public: void registerDrvOutput(const Realisation & info) override; void queryRealisationUncached( - const DrvOutput &, Callback> callback) noexcept override; + const DrvOutput &, Callback> callback) noexcept override; void narFromPath(const StorePath & path, Sink & sink) override; diff --git a/src/libstore/include/nix/store/build/derivation-goal.hh b/src/libstore/include/nix/store/build/derivation-goal.hh index 353e7c489..c31645fff 100644 --- a/src/libstore/include/nix/store/build/derivation-goal.hh +++ b/src/libstore/include/nix/store/build/derivation-goal.hh @@ -93,17 +93,17 @@ private: * of the wanted output, and a `PathStatus` with the * current status of that output. */ - std::optional> checkPathValidity(); + std::optional> checkPathValidity(); /** * Aborts if any output is not valid or corrupt, and otherwise * returns a 'Realisation' for the wanted output. */ - Realisation assertPathValidity(); + UnkeyedRealisation assertPathValidity(); Co repairClosure(); - Done doneSuccess(BuildResult::Success::Status status, Realisation builtOutput); + Done doneSuccess(BuildResult::Success::Status status, UnkeyedRealisation builtOutput); Done doneFailure(BuildError ex); }; diff --git a/src/libstore/include/nix/store/build/drv-output-substitution-goal.hh b/src/libstore/include/nix/store/build/drv-output-substitution-goal.hh index b42336427..1a5a4ea26 100644 --- a/src/libstore/include/nix/store/build/drv-output-substitution-goal.hh +++ b/src/libstore/include/nix/store/build/drv-output-substitution-goal.hh @@ -39,7 +39,8 @@ public: GoalState state; Co init(); - Co realisationFetched(Goals waitees, std::shared_ptr outputInfo, nix::ref sub); + Co + realisationFetched(Goals waitees, std::shared_ptr outputInfo, nix::ref sub); void timedOut(Error && ex) override { diff --git a/src/libstore/include/nix/store/legacy-ssh-store.hh b/src/libstore/include/nix/store/legacy-ssh-store.hh index c91f88a84..994918f90 100644 --- a/src/libstore/include/nix/store/legacy-ssh-store.hh +++ b/src/libstore/include/nix/store/legacy-ssh-store.hh @@ -208,8 +208,8 @@ public: */ std::optional isTrustedClient() override; - void - queryRealisationUncached(const DrvOutput &, Callback> callback) noexcept override + void queryRealisationUncached( + const DrvOutput &, Callback> callback) noexcept override // TODO: Implement { unsupported("queryRealisation"); diff --git a/src/libstore/include/nix/store/local-overlay-store.hh b/src/libstore/include/nix/store/local-overlay-store.hh index b89d0a1a0..1d69d3417 100644 --- a/src/libstore/include/nix/store/local-overlay-store.hh +++ b/src/libstore/include/nix/store/local-overlay-store.hh @@ -173,7 +173,7 @@ private: * Check lower store if upper DB does not have. */ void queryRealisationUncached( - const DrvOutput &, Callback> callback) noexcept override; + const DrvOutput &, Callback> callback) noexcept override; /** * Call `remountIfNecessary` after collecting garbage normally. diff --git a/src/libstore/include/nix/store/local-store.hh b/src/libstore/include/nix/store/local-store.hh index b871aaee2..ab255fba8 100644 --- a/src/libstore/include/nix/store/local-store.hh +++ b/src/libstore/include/nix/store/local-store.hh @@ -385,10 +385,10 @@ public: void cacheDrvOutputMapping( State & state, const uint64_t deriver, const std::string & outputName, const StorePath & output); - std::optional queryRealisation_(State & state, const DrvOutput & id); - std::optional> queryRealisationCore_(State & state, const DrvOutput & id); + std::optional queryRealisation_(State & state, const DrvOutput & id); + std::optional> queryRealisationCore_(State & state, const DrvOutput & id); void queryRealisationUncached( - const DrvOutput &, Callback> callback) noexcept override; + const DrvOutput &, Callback> callback) noexcept override; std::optional getVersion() override; diff --git a/src/libstore/include/nix/store/realisation.hh b/src/libstore/include/nix/store/realisation.hh index 3424a39c9..c7e0a4483 100644 --- a/src/libstore/include/nix/store/realisation.hh +++ b/src/libstore/include/nix/store/realisation.hh @@ -46,12 +46,12 @@ struct DrvOutput static DrvOutput parse(const std::string &); - GENERATE_CMP(DrvOutput, me->drvHash, me->outputName); + bool operator==(const DrvOutput &) const = default; + auto operator<=>(const DrvOutput &) const = default; }; -struct Realisation +struct UnkeyedRealisation { - DrvOutput id; StorePath outPath; StringSet signatures; @@ -64,22 +64,35 @@ struct Realisation */ std::map dependentRealisations; - std::string fingerprint() const; - void sign(const Signer &); - bool checkSignature(const PublicKeys & publicKeys, const std::string & sig) const; - size_t checkSignatures(const PublicKeys & publicKeys) const; + std::string fingerprint(const DrvOutput & key) const; - static std::set closure(Store &, const std::set &); - static void closure(Store &, const std::set &, std::set & res); + void sign(const DrvOutput & key, const Signer &); - bool isCompatibleWith(const Realisation & other) const; + bool checkSignature(const DrvOutput & key, const PublicKeys & publicKeys, const std::string & sig) const; - StorePath getPath() const + size_t checkSignatures(const DrvOutput & key, const PublicKeys & publicKeys) const; + + const StorePath & getPath() const { return outPath; } - GENERATE_CMP(Realisation, me->id, me->outPath); + // TODO sketchy that it avoids signatures + GENERATE_CMP(UnkeyedRealisation, me->outPath); +}; + +struct Realisation : UnkeyedRealisation +{ + DrvOutput id; + + bool isCompatibleWith(const UnkeyedRealisation & other) const; + + static std::set closure(Store &, const std::set &); + + static void closure(Store &, const std::set &, std::set & res); + + bool operator==(const Realisation &) const = default; + auto operator<=>(const Realisation &) const = default; }; /** @@ -103,12 +116,13 @@ struct OpaquePath { StorePath path; - StorePath getPath() const + const StorePath & getPath() const { return path; } - GENERATE_CMP(OpaquePath, me->path); + bool operator==(const OpaquePath &) const = default; + auto operator<=>(const OpaquePath &) const = default; }; /** @@ -116,7 +130,7 @@ struct OpaquePath */ struct RealisedPath { - /* + /** * A path is either the result of the realisation of a derivation or * an opaque blob that has been directly added to the store */ @@ -138,13 +152,14 @@ struct RealisedPath /** * Get the raw store path associated to this */ - StorePath path() const; + const StorePath & path() const; void closure(Store & store, Set & ret) const; static void closure(Store & store, const Set & startPaths, Set & ret); Set closure(Store & store) const; - GENERATE_CMP(RealisedPath, me->raw); + bool operator==(const RealisedPath &) const = default; + auto operator<=>(const RealisedPath &) const = default; }; class MissingRealisation : public Error @@ -167,4 +182,5 @@ public: } // namespace nix +JSON_IMPL(nix::UnkeyedRealisation) JSON_IMPL(nix::Realisation) diff --git a/src/libstore/include/nix/store/remote-store.hh b/src/libstore/include/nix/store/remote-store.hh index 1aaf29d37..b152e054b 100644 --- a/src/libstore/include/nix/store/remote-store.hh +++ b/src/libstore/include/nix/store/remote-store.hh @@ -102,7 +102,7 @@ struct RemoteStore : public virtual Store, public virtual GcStore, public virtua void registerDrvOutput(const Realisation & info) override; void queryRealisationUncached( - const DrvOutput &, Callback> callback) noexcept override; + const DrvOutput &, Callback> callback) noexcept override; void buildPaths(const std::vector & paths, BuildMode buildMode, std::shared_ptr evalStore) override; diff --git a/src/libstore/include/nix/store/store-api.hh b/src/libstore/include/nix/store/store-api.hh index 1131ec975..c9fd00513 100644 --- a/src/libstore/include/nix/store/store-api.hh +++ b/src/libstore/include/nix/store/store-api.hh @@ -31,6 +31,7 @@ MakeError(SubstituterDisabled, Error); MakeError(InvalidStoreReference, Error); +struct UnkeyedRealisation; struct Realisation; struct RealisedPath; struct DrvOutput; @@ -398,12 +399,12 @@ public: /** * Query the information about a realisation. */ - std::shared_ptr queryRealisation(const DrvOutput &); + std::shared_ptr queryRealisation(const DrvOutput &); /** * Asynchronous version of queryRealisation(). */ - void queryRealisation(const DrvOutput &, Callback> callback) noexcept; + void queryRealisation(const DrvOutput &, Callback> callback) noexcept; /** * Check whether the given valid path info is sufficiently attested, by @@ -430,8 +431,8 @@ protected: virtual void queryPathInfoUncached(const StorePath & path, Callback> callback) noexcept = 0; - virtual void - queryRealisationUncached(const DrvOutput &, Callback> callback) noexcept = 0; + virtual void queryRealisationUncached( + const DrvOutput &, Callback> callback) noexcept = 0; public: diff --git a/src/libstore/local-overlay-store.cc b/src/libstore/local-overlay-store.cc index 2b000b3db..f23feb8fb 100644 --- a/src/libstore/local-overlay-store.cc +++ b/src/libstore/local-overlay-store.cc @@ -77,7 +77,7 @@ void LocalOverlayStore::registerDrvOutput(const Realisation & info) // First do queryRealisation on lower layer to populate DB auto res = lowerStore->queryRealisation(info.id); if (res) - LocalStore::registerDrvOutput(*res); + LocalStore::registerDrvOutput({*res, info.id}); LocalStore::registerDrvOutput(info); } @@ -108,12 +108,12 @@ void LocalOverlayStore::queryPathInfoUncached( } void LocalOverlayStore::queryRealisationUncached( - const DrvOutput & drvOutput, Callback> callback) noexcept + const DrvOutput & drvOutput, Callback> callback) noexcept { auto callbackPtr = std::make_shared(std::move(callback)); LocalStore::queryRealisationUncached( - drvOutput, {[this, drvOutput, callbackPtr](std::future> fut) { + drvOutput, {[this, drvOutput, callbackPtr](std::future> fut) { try { auto info = fut.get(); if (info) @@ -123,7 +123,7 @@ void LocalOverlayStore::queryRealisationUncached( } // If we don't have it, check lower store lowerStore->queryRealisation( - drvOutput, {[callbackPtr](std::future> fut) { + drvOutput, {[callbackPtr](std::future> fut) { try { (*callbackPtr)(fut.get()); } catch (...) { diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc index ebc987ee0..6425819c5 100644 --- a/src/libstore/local-store.cc +++ b/src/libstore/local-store.cc @@ -1036,7 +1036,7 @@ bool LocalStore::pathInfoIsUntrusted(const ValidPathInfo & info) bool LocalStore::realisationIsUntrusted(const Realisation & realisation) { - return config->requireSigs && !realisation.checkSignatures(getPublicKeys()); + return config->requireSigs && !realisation.checkSignatures(realisation.id, getPublicKeys()); } void LocalStore::addToStore(const ValidPathInfo & info, Source & source, RepairFlag repair, CheckSigsFlag checkSigs) @@ -1586,7 +1586,7 @@ void LocalStore::addSignatures(const StorePath & storePath, const StringSet & si }); } -std::optional> +std::optional> LocalStore::queryRealisationCore_(LocalStore::State & state, const DrvOutput & id) { auto useQueryRealisedOutput(state.stmts->QueryRealisedOutput.use()(id.strHash())(id.outputName)); @@ -1598,14 +1598,13 @@ LocalStore::queryRealisationCore_(LocalStore::State & state, const DrvOutput & i return { {realisationDbId, - Realisation{ - .id = id, + UnkeyedRealisation{ .outPath = outputPath, .signatures = signatures, }}}; } -std::optional LocalStore::queryRealisation_(LocalStore::State & state, const DrvOutput & id) +std::optional LocalStore::queryRealisation_(LocalStore::State & state, const DrvOutput & id) { auto maybeCore = queryRealisationCore_(state, id); if (!maybeCore) @@ -1631,13 +1630,13 @@ std::optional LocalStore::queryRealisation_(LocalStore::State } void LocalStore::queryRealisationUncached( - const DrvOutput & id, Callback> callback) noexcept + const DrvOutput & id, Callback> callback) noexcept { try { - auto maybeRealisation = - retrySQLite>([&]() { return queryRealisation_(*_state->lock(), id); }); + auto maybeRealisation = retrySQLite>( + [&]() { return queryRealisation_(*_state->lock(), id); }); if (maybeRealisation) - callback(std::make_shared(maybeRealisation.value())); + callback(std::make_shared(maybeRealisation.value())); else callback(nullptr); diff --git a/src/libstore/misc.cc b/src/libstore/misc.cc index 7efaa4f86..a31d149c2 100644 --- a/src/libstore/misc.cc +++ b/src/libstore/misc.cc @@ -360,11 +360,12 @@ drvOutputReferences(Store & store, const Derivation & drv, const StorePath & out if (!outputHash) throw Error( "output '%s' of derivation '%s' isn't realised", outputName, store.printStorePath(inputDrv)); - auto thisRealisation = store.queryRealisation(DrvOutput{*outputHash, outputName}); + DrvOutput key{*outputHash, outputName}; + auto thisRealisation = store.queryRealisation(key); if (!thisRealisation) throw Error( "output '%s' of derivation '%s' isn’t built", outputName, store.printStorePath(inputDrv)); - inputRealisations.insert(*thisRealisation); + inputRealisations.insert({*thisRealisation, std::move(key)}); } } if (!inputNode.value.empty()) { diff --git a/src/libstore/realisation.cc b/src/libstore/realisation.cc index febd67bd2..e08d5ee8a 100644 --- a/src/libstore/realisation.cc +++ b/src/libstore/realisation.cc @@ -39,7 +39,7 @@ void Realisation::closure(Store & store, const std::set & startOutp std::set res; for (auto & [currentDep, _] : current.dependentRealisations) { if (auto currentRealisation = store.queryRealisation(currentDep)) - res.insert(*currentRealisation); + res.insert({*currentRealisation, currentDep}); else throw Error("Unrealised derivation '%s'", currentDep.to_string()); } @@ -61,24 +61,25 @@ void Realisation::closure(Store & store, const std::set & startOutp }); } -std::string Realisation::fingerprint() const +std::string UnkeyedRealisation::fingerprint(const DrvOutput & key) const { - nlohmann::json serialized = *this; + nlohmann::json serialized = Realisation{*this, key}; serialized.erase("signatures"); return serialized.dump(); } -void Realisation::sign(const Signer & signer) +void UnkeyedRealisation::sign(const DrvOutput & key, const Signer & signer) { - signatures.insert(signer.signDetached(fingerprint())); + signatures.insert(signer.signDetached(fingerprint(key))); } -bool Realisation::checkSignature(const PublicKeys & publicKeys, const std::string & sig) const +bool UnkeyedRealisation::checkSignature( + const DrvOutput & key, const PublicKeys & publicKeys, const std::string & sig) const { - return verifyDetached(fingerprint(), sig, publicKeys); + return verifyDetached(fingerprint(key), sig, publicKeys); } -size_t Realisation::checkSignatures(const PublicKeys & publicKeys) const +size_t UnkeyedRealisation::checkSignatures(const DrvOutput & key, const PublicKeys & publicKeys) const { // FIXME: Maybe we should return `maxSigs` if the realisation corresponds to // an input-addressed one − because in that case the drv is enough to check @@ -86,19 +87,18 @@ size_t Realisation::checkSignatures(const PublicKeys & publicKeys) const size_t good = 0; for (auto & sig : signatures) - if (checkSignature(publicKeys, sig)) + if (checkSignature(key, publicKeys, sig)) good++; return good; } -StorePath RealisedPath::path() const +const StorePath & RealisedPath::path() const { - return std::visit([](auto && arg) { return arg.getPath(); }, raw); + return std::visit([](auto && arg) -> auto & { return arg.getPath(); }, raw); } -bool Realisation::isCompatibleWith(const Realisation & other) const +bool Realisation::isCompatibleWith(const UnkeyedRealisation & other) const { - assert(id == other.id); if (outPath == other.outPath) { if (dependentRealisations.empty() != other.dependentRealisations.empty()) { warn( @@ -144,7 +144,7 @@ namespace nlohmann { using namespace nix; -Realisation adl_serializer::from_json(const json & json0) +UnkeyedRealisation adl_serializer::from_json(const json & json0) { auto json = getObject(json0); @@ -157,25 +157,39 @@ Realisation adl_serializer::from_json(const json & json0) for (auto & [jsonDepId, jsonDepOutPath] : getObject(*jsonDependencies)) dependentRealisations.insert({DrvOutput::parse(jsonDepId), jsonDepOutPath}); - return Realisation{ - .id = DrvOutput::parse(valueAt(json, "id")), + return UnkeyedRealisation{ .outPath = valueAt(json, "outPath"), .signatures = signatures, .dependentRealisations = dependentRealisations, }; } -void adl_serializer::to_json(json & json, const Realisation & r) +void adl_serializer::to_json(json & json, const UnkeyedRealisation & r) { auto jsonDependentRealisations = nlohmann::json::object(); for (auto & [depId, depOutPath] : r.dependentRealisations) jsonDependentRealisations.emplace(depId.to_string(), depOutPath); json = { - {"id", r.id.to_string()}, {"outPath", r.outPath}, {"signatures", r.signatures}, {"dependentRealisations", jsonDependentRealisations}, }; } +Realisation adl_serializer::from_json(const json & json0) +{ + auto json = getObject(json0); + + return Realisation{ + static_cast(json0), + DrvOutput::parse(valueAt(json, "id")), + }; +} + +void adl_serializer::to_json(json & json, const Realisation & r) +{ + json = static_cast(r); + json["id"] = r.id.to_string(); +} + } // namespace nlohmann diff --git a/src/libstore/remote-store.cc b/src/libstore/remote-store.cc index a6994f844..8dd5bc064 100644 --- a/src/libstore/remote-store.cc +++ b/src/libstore/remote-store.cc @@ -501,7 +501,7 @@ void RemoteStore::registerDrvOutput(const Realisation & info) } void RemoteStore::queryRealisationUncached( - const DrvOutput & id, Callback> callback) noexcept + const DrvOutput & id, Callback> callback) noexcept { try { auto conn(getConnection()); @@ -515,21 +515,21 @@ void RemoteStore::queryRealisationUncached( conn->to << id.to_string(); conn.processStderr(); - auto real = [&]() -> std::shared_ptr { + auto real = [&]() -> std::shared_ptr { if (GET_PROTOCOL_MINOR(conn->protoVersion) < 31) { auto outPaths = WorkerProto::Serialise>::read(*this, *conn); if (outPaths.empty()) return nullptr; - return std::make_shared(Realisation{.id = id, .outPath = *outPaths.begin()}); + return std::make_shared(UnkeyedRealisation{.outPath = *outPaths.begin()}); } else { auto realisations = WorkerProto::Serialise>::read(*this, *conn); if (realisations.empty()) return nullptr; - return std::make_shared(*realisations.begin()); + return std::make_shared(*realisations.begin()); } }(); - callback(std::shared_ptr(real)); + callback(std::shared_ptr(real)); } catch (...) { return callback.rethrow(); } @@ -626,13 +626,15 @@ std::vector RemoteStore::buildPathsWithResults( auto realisation = queryRealisation(outputId); if (!realisation) throw MissingRealisation(outputId); - success.builtOutputs.emplace(output, *realisation); + success.builtOutputs.emplace(output, Realisation{*realisation, outputId}); } else { success.builtOutputs.emplace( output, Realisation{ - .id = outputId, - .outPath = outputPath, + UnkeyedRealisation{ + .outPath = outputPath, + }, + outputId, }); } } diff --git a/src/libstore/restricted-store.cc b/src/libstore/restricted-store.cc index a1cb41606..5270f7d10 100644 --- a/src/libstore/restricted-store.cc +++ b/src/libstore/restricted-store.cc @@ -107,7 +107,7 @@ struct RestrictedStore : public virtual IndirectRootStore, public virtual GcStor void registerDrvOutput(const Realisation & info) override; void queryRealisationUncached( - const DrvOutput & id, Callback> callback) noexcept override; + const DrvOutput & id, Callback> callback) noexcept override; void buildPaths(const std::vector & paths, BuildMode buildMode, std::shared_ptr evalStore) override; @@ -244,7 +244,7 @@ void RestrictedStore::registerDrvOutput(const Realisation & info) } void RestrictedStore::queryRealisationUncached( - const DrvOutput & id, Callback> callback) noexcept + const DrvOutput & id, Callback> callback) noexcept // XXX: This should probably be allowed if the realisation corresponds to // an allowed derivation { diff --git a/src/libstore/store-api.cc b/src/libstore/store-api.cc index 4ce6b15fa..df00dc179 100644 --- a/src/libstore/store-api.cc +++ b/src/libstore/store-api.cc @@ -598,7 +598,8 @@ void Store::queryPathInfo(const StorePath & storePath, Callback> callback) noexcept +void Store::queryRealisation( + const DrvOutput & id, Callback> callback) noexcept { try { @@ -624,20 +625,20 @@ void Store::queryRealisation(const DrvOutput & id, Callback(std::move(callback)); - queryRealisationUncached(id, {[this, id, callbackPtr](std::future> fut) { + queryRealisationUncached(id, {[this, id, callbackPtr](std::future> fut) { try { auto info = fut.get(); if (diskCache) { if (info) diskCache->upsertRealisation( - config.getReference().render(/*FIXME withParams=*/false), *info); + config.getReference().render(/*FIXME withParams=*/false), {*info, id}); else diskCache->upsertAbsentRealisation( config.getReference().render(/*FIXME withParams=*/false), id); } - (*callbackPtr)(std::shared_ptr(info)); + (*callbackPtr)(std::shared_ptr(info)); } catch (...) { callbackPtr->rethrow(); @@ -645,9 +646,9 @@ void Store::queryRealisation(const DrvOutput & id, Callback Store::queryRealisation(const DrvOutput & id) +std::shared_ptr Store::queryRealisation(const DrvOutput & id) { - using RealPtr = std::shared_ptr; + using RealPtr = std::shared_ptr; std::promise promise; queryRealisation(id, {[&](std::future result) { @@ -910,11 +911,12 @@ std::map copyPaths( std::set toplevelRealisations; for (auto & path : paths) { storePaths.insert(path.path()); - if (auto realisation = std::get_if(&path.raw)) { + if (auto * realisation = std::get_if(&path.raw)) { experimentalFeatureSettings.require(Xp::CaDerivations); toplevelRealisations.insert(*realisation); } } + auto pathsMap = copyPaths(srcStore, dstStore, storePaths, repair, checkSigs, substitute); try { @@ -931,7 +933,7 @@ std::map copyPaths( "dependency of '%s' but isn't registered", drvOutput.to_string(), current.id.to_string()); - children.insert(*currentChild); + children.insert({*currentChild, drvOutput}); } return children; }, @@ -1199,7 +1201,7 @@ void Store::signRealisation(Realisation & realisation) for (auto & secretKeyFile : secretKeyFiles.get()) { SecretKey secretKey(readFile(secretKeyFile)); LocalSigner signer(std::move(secretKey)); - realisation.sign(signer); + realisation.sign(realisation.id, signer); } } diff --git a/src/libstore/unix/build/derivation-builder.cc b/src/libstore/unix/build/derivation-builder.cc index a04056599..7cf72fb84 100644 --- a/src/libstore/unix/build/derivation-builder.cc +++ b/src/libstore/unix/build/derivation-builder.cc @@ -1830,7 +1830,12 @@ SingleDrvOutputs DerivationBuilderImpl::registerOutputs() for (auto & [outputName, newInfo] : infos) { auto oldinfo = get(initialOutputs, outputName); assert(oldinfo); - auto thisRealisation = Realisation{.id = DrvOutput{oldinfo->outputHash, outputName}, .outPath = newInfo.path}; + auto thisRealisation = Realisation{ + { + .outPath = newInfo.path, + }, + DrvOutput{oldinfo->outputHash, outputName}, + }; if (experimentalFeatureSettings.isEnabled(Xp::CaDerivations) && !drv.type().isImpure()) { store.signRealisation(thisRealisation); store.registerDrvOutput(thisRealisation); From 5592bb717beb7afa43a232a13e78d2c62a794fb1 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Sat, 27 Sep 2025 16:30:36 -0400 Subject: [PATCH 230/332] Implement realisation operations on dummy store --- src/libstore-tests/dummy-store.cc | 15 +++++++++++++-- src/libstore/dummy-store.cc | 19 ++++++++++++++++--- .../include/nix/store/dummy-store-impl.hh | 12 ++++++++++++ src/libstore/include/nix/store/dummy-store.hh | 2 ++ src/libutil/include/nix/util/hash.hh | 19 +++++++++++++++++++ 5 files changed, 62 insertions(+), 5 deletions(-) diff --git a/src/libstore-tests/dummy-store.cc b/src/libstore-tests/dummy-store.cc index b841d7890..3dd8137a3 100644 --- a/src/libstore-tests/dummy-store.cc +++ b/src/libstore-tests/dummy-store.cc @@ -1,6 +1,6 @@ #include -#include "nix/store/dummy-store.hh" +#include "nix/store/dummy-store-impl.hh" #include "nix/store/globals.hh" #include "nix/store/realisation.hh" @@ -13,7 +13,7 @@ TEST(DummyStore, realisation_read) auto store = [] { auto cfg = make_ref(StoreReference::Params{}); cfg->readOnly = false; - return cfg->openStore(); + return cfg->openDummyStore(); }(); auto drvHash = Hash::parseExplicitFormatUnprefixed( @@ -22,6 +22,17 @@ TEST(DummyStore, realisation_read) auto outputName = "foo"; EXPECT_EQ(store->queryRealisation({drvHash, outputName}), nullptr); + + UnkeyedRealisation value{ + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo.drv"}, + }; + + store->buildTrace.insert({drvHash, {{outputName, make_ref(value)}}}); + + auto value2 = store->queryRealisation({drvHash, outputName}); + + ASSERT_TRUE(value2); + EXPECT_EQ(*value2, value); } } // namespace nix diff --git a/src/libstore/dummy-store.cc b/src/libstore/dummy-store.cc index 209be3ce9..509b7a0b1 100644 --- a/src/libstore/dummy-store.cc +++ b/src/libstore/dummy-store.cc @@ -3,6 +3,7 @@ #include "nix/util/callback.hh" #include "nix/util/memory-source-accessor.hh" #include "nix/store/dummy-store-impl.hh" +#include "nix/store/realisation.hh" #include @@ -251,7 +252,10 @@ struct DummyStoreImpl : DummyStore void registerDrvOutput(const Realisation & output) override { - unsupported("registerDrvOutput"); + auto ref = make_ref(output); + buildTrace.insert_or_visit({output.id.drvHash, {{output.id.outputName, ref}}}, [&](auto & kv) { + kv.second.insert_or_assign(output.id.outputName, make_ref(output)); + }); } void narFromPath(const StorePath & path, Sink & sink) override @@ -267,9 +271,18 @@ struct DummyStoreImpl : DummyStore } void queryRealisationUncached( - const DrvOutput &, Callback> callback) noexcept override + const DrvOutput & drvOutput, Callback> callback) noexcept override { - callback(nullptr); + bool visited = false; + buildTrace.cvisit(drvOutput.drvHash, [&](const auto & kv) { + if (auto it = kv.second.find(drvOutput.outputName); it != kv.second.end()) { + visited = true; + callback(it->second.get_ptr()); + } + }); + + if (!visited) + callback(nullptr); } std::shared_ptr getFSAccessor(const StorePath & path, bool requireValidPath) override diff --git a/src/libstore/include/nix/store/dummy-store-impl.hh b/src/libstore/include/nix/store/dummy-store-impl.hh index e05bb94ff..4c9f54e98 100644 --- a/src/libstore/include/nix/store/dummy-store-impl.hh +++ b/src/libstore/include/nix/store/dummy-store-impl.hh @@ -30,6 +30,18 @@ struct DummyStore : virtual Store */ boost::concurrent_flat_map contents; + /** + * The build trace maps the pair of a content-addressing (fixed or + * floating) derivations an one of its output to a + * (content-addressed) store object. + * + * It is [curried](https://en.wikipedia.org/wiki/Currying), so we + * instead having a single output with a `DrvOutput` key, we have an + * outer map for the derivation, and inner maps for the outputs of a + * given derivation. + */ + boost::concurrent_flat_map>> buildTrace; + DummyStore(ref config) : Store{*config} , config(config) diff --git a/src/libstore/include/nix/store/dummy-store.hh b/src/libstore/include/nix/store/dummy-store.hh index 95c09078c..d371c4e51 100644 --- a/src/libstore/include/nix/store/dummy-store.hh +++ b/src/libstore/include/nix/store/dummy-store.hh @@ -3,6 +3,8 @@ #include "nix/store/store-api.hh" +#include + namespace nix { struct DummyStore; diff --git a/src/libutil/include/nix/util/hash.hh b/src/libutil/include/nix/util/hash.hh index 571b6acca..0b16b423c 100644 --- a/src/libutil/include/nix/util/hash.hh +++ b/src/libutil/include/nix/util/hash.hh @@ -222,3 +222,22 @@ public: }; } // namespace nix + +template<> +struct std::hash +{ + std::size_t operator()(const nix::Hash & hash) const noexcept + { + assert(hash.hashSize > sizeof(size_t)); + return *reinterpret_cast(&hash.hash); + } +}; + +namespace nix { + +inline std::size_t hash_value(const Hash & hash) +{ + return std::hash{}(hash); +} + +} // namespace nix From a4e792cba7afc38ac3d4c3f85ae12622c39fd340 Mon Sep 17 00:00:00 2001 From: Bernardo Meurer Costa Date: Wed, 1 Oct 2025 19:47:18 +0000 Subject: [PATCH 231/332] feat(libstore): add AWS CRT-based credential infrastructure Add lightweight AWS credential resolution using AWS CRT (Common Runtime) instead of the full AWS SDK. This provides credential management for the upcoming curl-based S3 implementation. --- src/libstore/aws-creds.cc | 178 ++++++++++++++++++++ src/libstore/include/nix/store/aws-creds.hh | 73 ++++++++ src/libstore/include/nix/store/meson.build | 1 + src/libstore/meson.build | 1 + 4 files changed, 253 insertions(+) create mode 100644 src/libstore/aws-creds.cc create mode 100644 src/libstore/include/nix/store/aws-creds.hh diff --git a/src/libstore/aws-creds.cc b/src/libstore/aws-creds.cc new file mode 100644 index 000000000..576f932d5 --- /dev/null +++ b/src/libstore/aws-creds.cc @@ -0,0 +1,178 @@ +#include "nix/store/aws-creds.hh" + +#if NIX_WITH_S3_SUPPORT + +# include +# include "nix/store/s3-url.hh" +# include "nix/util/finally.hh" +# include "nix/util/logging.hh" +# include "nix/util/url.hh" +# include "nix/util/util.hh" + +# include +# include +# include + +# include + +# include +# include +# include +# include + +namespace nix { + +namespace { + +static void initAwsCrt() +{ + struct CrtWrapper + { + Aws::Crt::ApiHandle apiHandle; + + CrtWrapper() + { + apiHandle.InitializeLogging(Aws::Crt::LogLevel::Warn, static_cast(nullptr)); + } + + ~CrtWrapper() + { + try { + // CRITICAL: Clear credential provider cache BEFORE AWS CRT shuts down + // This ensures all providers (which hold references to ClientBootstrap) + // are destroyed while AWS CRT is still valid + clearAwsCredentialsCache(); + // Now it's safe for ApiHandle destructor to run + } catch (...) { + ignoreExceptionInDestructor(); + } + } + }; + + static CrtWrapper crt; +} + +static AwsCredentials getCredentialsFromProvider(std::shared_ptr provider) +{ + if (!provider || !provider->IsValid()) { + throw AwsAuthError("AWS credential provider is invalid"); + } + + auto prom = std::make_shared>(); + auto fut = prom->get_future(); + + provider->GetCredentials([prom](std::shared_ptr credentials, int errorCode) { + if (errorCode != 0 || !credentials) { + prom->set_exception( + std::make_exception_ptr(AwsAuthError("Failed to resolve AWS credentials: error code %d", errorCode))); + } else { + auto accessKeyId = Aws::Crt::ByteCursorToStringView(credentials->GetAccessKeyId()); + auto secretAccessKey = Aws::Crt::ByteCursorToStringView(credentials->GetSecretAccessKey()); + auto sessionToken = Aws::Crt::ByteCursorToStringView(credentials->GetSessionToken()); + + std::optional sessionTokenStr; + if (!sessionToken.empty()) { + sessionTokenStr = std::string(sessionToken.data(), sessionToken.size()); + } + + prom->set_value(AwsCredentials( + std::string(accessKeyId.data(), accessKeyId.size()), + std::string(secretAccessKey.data(), secretAccessKey.size()), + sessionTokenStr)); + } + }); + + // AWS CRT GetCredentials is asynchronous and only guarantees the callback will be + // invoked if the initial call returns success. There's no documented timeout mechanism, + // so we add a timeout to prevent indefinite hanging if the callback is never called. + auto timeout = std::chrono::seconds(30); + if (fut.wait_for(timeout) == std::future_status::timeout) { + throw AwsAuthError( + "Timeout waiting for AWS credentials (%d seconds)", + std::chrono::duration_cast(timeout).count()); + } + + return fut.get(); // This will throw if set_exception was called +} + +// Global credential provider cache using boost's concurrent map +// Key: profile name (empty string for default profile) +using CredentialProviderCache = + boost::concurrent_flat_map>; + +static CredentialProviderCache credentialProviderCache; + +} // anonymous namespace + +AwsCredentials getAwsCredentials(const std::string & profile) +{ + // Get or create credential provider with caching + std::shared_ptr provider; + + // Try to find existing provider + credentialProviderCache.visit(profile, [&](const auto & pair) { provider = pair.second; }); + + if (!provider) { + // Create new provider if not found + debug( + "[pid=%d] creating new AWS credential provider for profile '%s'", + getpid(), + profile.empty() ? "(default)" : profile.c_str()); + + try { + initAwsCrt(); + + if (profile.empty()) { + Aws::Crt::Auth::CredentialsProviderChainDefaultConfig config; + config.Bootstrap = Aws::Crt::ApiHandle::GetOrCreateStaticDefaultClientBootstrap(); + provider = Aws::Crt::Auth::CredentialsProvider::CreateCredentialsProviderChainDefault(config); + } else { + Aws::Crt::Auth::CredentialsProviderProfileConfig config; + config.Bootstrap = Aws::Crt::ApiHandle::GetOrCreateStaticDefaultClientBootstrap(); + // This is safe because the underlying C library will copy this string + // c.f. https://github.com/awslabs/aws-c-auth/blob/main/source/credentials_provider_profile.c#L220 + config.ProfileNameOverride = Aws::Crt::ByteCursorFromCString(profile.c_str()); + provider = Aws::Crt::Auth::CredentialsProvider::CreateCredentialsProviderProfile(config); + } + } catch (Error & e) { + e.addTrace( + {}, + "while creating AWS credentials provider for %s", + profile.empty() ? "default profile" : fmt("profile '%s'", profile)); + throw; + } + + if (!provider) { + throw AwsAuthError( + "Failed to create AWS credentials provider for %s", + profile.empty() ? "default profile" : fmt("profile '%s'", profile)); + } + + // Insert into cache (try_emplace is thread-safe and won't overwrite if another thread added it) + credentialProviderCache.try_emplace(profile, provider); + } + + return getCredentialsFromProvider(provider); +} + +void invalidateAwsCredentials(const std::string & profile) +{ + credentialProviderCache.erase(profile); +} + +void clearAwsCredentialsCache() +{ + credentialProviderCache.clear(); +} + +AwsCredentials preResolveAwsCredentials(const ParsedS3URL & s3Url) +{ + std::string profile = s3Url.profile.value_or(""); + + // Get credentials (automatically cached) + return getAwsCredentials(profile); +} + +} // namespace nix + +#endif diff --git a/src/libstore/include/nix/store/aws-creds.hh b/src/libstore/include/nix/store/aws-creds.hh new file mode 100644 index 000000000..67ff2e49c --- /dev/null +++ b/src/libstore/include/nix/store/aws-creds.hh @@ -0,0 +1,73 @@ +#pragma once +///@file +#include "nix/store/config.hh" + +#if NIX_WITH_S3_SUPPORT + +# include "nix/store/s3-url.hh" +# include "nix/util/error.hh" + +# include +# include +# include + +namespace nix { + +/** + * AWS credentials obtained from credential providers + */ +struct AwsCredentials +{ + std::string accessKeyId; + std::string secretAccessKey; + std::optional sessionToken; + + AwsCredentials( + const std::string & accessKeyId, + const std::string & secretAccessKey, + const std::optional & sessionToken = std::nullopt) + : accessKeyId(accessKeyId) + , secretAccessKey(secretAccessKey) + , sessionToken(sessionToken) + { + } +}; + +/** + * Exception thrown when AWS authentication fails + */ +MakeError(AwsAuthError, Error); + +/** + * Get AWS credentials for the given profile. + * This function automatically caches credential providers to avoid + * creating multiple providers for the same profile. + * + * @param profile The AWS profile name (empty string for default profile) + * @return AWS credentials + * @throws AwsAuthError if credentials cannot be resolved + */ +AwsCredentials getAwsCredentials(const std::string & profile = ""); + +/** + * Invalidate cached credentials for a profile (e.g., on authentication failure). + * The next request for this profile will create a new provider. + * + * @param profile The AWS profile name to invalidate + */ +void invalidateAwsCredentials(const std::string & profile); + +/** + * Clear all cached credential providers. + * Typically called during application cleanup. + */ +void clearAwsCredentialsCache(); + +/** + * Pre-resolve AWS credentials for S3 URLs. + * Used to cache credentials in parent process before forking. + */ +AwsCredentials preResolveAwsCredentials(const ParsedS3URL & s3Url); + +} // namespace nix +#endif diff --git a/src/libstore/include/nix/store/meson.build b/src/libstore/include/nix/store/meson.build index f945f25ad..1aa32cf2c 100644 --- a/src/libstore/include/nix/store/meson.build +++ b/src/libstore/include/nix/store/meson.build @@ -10,6 +10,7 @@ config_pub_h = configure_file( ) headers = [ config_pub_h ] + files( + 'aws-creds.hh', 'binary-cache-store.hh', 'build-result.hh', 'build/derivation-builder.hh', diff --git a/src/libstore/meson.build b/src/libstore/meson.build index 80c234bd5..713a40382 100644 --- a/src/libstore/meson.build +++ b/src/libstore/meson.build @@ -268,6 +268,7 @@ subdir('nix-meson-build-support/common') subdir('nix-meson-build-support/asan-options') sources = files( + 'aws-creds.cc', 'binary-cache-store.cc', 'build-result.cc', 'build/derivation-building-goal.cc', From 7f3f0f2a0b98cf05a04fe6d1c305856afb3370b7 Mon Sep 17 00:00:00 2001 From: osbm Date: Thu, 2 Oct 2025 10:44:30 +0300 Subject: [PATCH 232/332] docs: Update documentation regarding the flake outputs --- src/nix/flake-check.md | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/src/nix/flake-check.md b/src/nix/flake-check.md index c8307f8d8..007640c27 100644 --- a/src/nix/flake-check.md +++ b/src/nix/flake-check.md @@ -31,39 +31,49 @@ at the first error. The following flake output attributes must be derivations: * `checks.`*system*`.`*name* -* `defaultPackage.`*system* -* `devShell.`*system* +* `devShells.`*system*`.default` * `devShells.`*system*`.`*name* * `nixosConfigurations.`*name*`.config.system.build.toplevel` +* `packages.`*system*`.default` * `packages.`*system*`.`*name* The following flake output attributes must be [app definitions](./nix3-run.md): +* `apps.`*system*`.default` * `apps.`*system*`.`*name* -* `defaultApp.`*system* The following flake output attributes must be [template definitions](./nix3-flake-init.md): -* `defaultTemplate` +* `templates.default` * `templates.`*name* The following flake output attributes must be *Nixpkgs overlays*: -* `overlay` +* `overlays.default` * `overlays.`*name* The following flake output attributes must be *NixOS modules*: -* `nixosModule` +* `nixosModules.default` * `nixosModules.`*name* The following flake output attributes must be [bundlers](./nix3-bundle.md): +* `bundlers.default` * `bundlers.`*name* -* `defaultBundler` + +Old default attributes are renamed, they will work but will emit a warning: + +* `defaultPackage.` → `packages.`*system*`.default` +* `defaultApps.` → `apps.`*system*`.default` +* `defaultTemplate` → `templates.default` +* `defaultBundler.` → `bundlers.`*system*`.default` +* `overlay` → `overlays.default` +* `devShell.` → `devShells.`*system*`.default` +* `nixosModule` → `nixosModules.default` In addition, the `hydraJobs` output is evaluated in the same way as Hydra's `hydra-eval-jobs` (i.e. as a arbitrarily deeply nested From 1e92b61750c88783c36372e48ab411d482bb5421 Mon Sep 17 00:00:00 2001 From: Bernardo Meurer Costa Date: Thu, 2 Oct 2025 03:51:31 +0000 Subject: [PATCH 233/332] fix(libfetchers): substitute fetchTarball and fetchurl Fixes #4313 by enabling builtins.fetchurl, builtins.fetchTarball to use binary cache substituters before attempting to download from the original URL. --- src/libexpr/primops/fetchTree.cc | 14 ++- tests/nixos/default.nix | 2 + tests/nixos/fetchers-substitute.nix | 176 ++++++++++++++++++++++++++++ 3 files changed, 189 insertions(+), 3 deletions(-) create mode 100644 tests/nixos/fetchers-substitute.nix diff --git a/src/libexpr/primops/fetchTree.cc b/src/libexpr/primops/fetchTree.cc index e673e55a0..ee2ca375a 100644 --- a/src/libexpr/primops/fetchTree.cc +++ b/src/libexpr/primops/fetchTree.cc @@ -561,14 +561,22 @@ static void fetch( .hash = *expectedHash, .references = {}}); - if (state.store->isValidPath(expectedPath)) { + // Try to get the path from the local store or substituters + try { + state.store->ensurePath(expectedPath); + debug("using substituted/cached path '%s' for '%s'", state.store->printStorePath(expectedPath), *url); state.allowAndSetStorePathString(expectedPath, v); return; + } catch (Error & e) { + debug( + "substitution of '%s' failed, will try to download: %s", + state.store->printStorePath(expectedPath), + e.what()); + // Fall through to download } } - // TODO: fetching may fail, yet the path may be substitutable. - // https://github.com/NixOS/nix/issues/4313 + // Download the file/tarball if substitution failed or no hash was provided auto storePath = unpack ? fetchToStore( state.fetchSettings, *state.store, diff --git a/tests/nixos/default.nix b/tests/nixos/default.nix index 5a1e08528..edfa4124f 100644 --- a/tests/nixos/default.nix +++ b/tests/nixos/default.nix @@ -207,5 +207,7 @@ in fetchurl = runNixOSTest ./fetchurl.nix; + fetchersSubstitute = runNixOSTest ./fetchers-substitute.nix; + chrootStore = runNixOSTest ./chroot-store.nix; } diff --git a/tests/nixos/fetchers-substitute.nix b/tests/nixos/fetchers-substitute.nix new file mode 100644 index 000000000..453982677 --- /dev/null +++ b/tests/nixos/fetchers-substitute.nix @@ -0,0 +1,176 @@ +{ + name = "fetchers-substitute"; + + nodes.substituter = + { pkgs, ... }: + { + virtualisation.writableStore = true; + + nix.settings.extra-experimental-features = [ + "nix-command" + "fetch-tree" + ]; + + networking.firewall.allowedTCPPorts = [ 5000 ]; + + services.nix-serve = { + enable = true; + secretKeyFile = + let + key = pkgs.writeTextFile { + name = "secret-key"; + text = '' + substituter:SerxxAca5NEsYY0DwVo+subokk+OoHcD9m6JwuctzHgSQVfGHe6nCc+NReDjV3QdFYPMGix4FMg0+K/TM1B3aA== + ''; + }; + in + "${key}"; + }; + }; + + nodes.importer = + { lib, ... }: + { + virtualisation.writableStore = true; + + nix.settings = { + extra-experimental-features = [ + "nix-command" + "fetch-tree" + ]; + substituters = lib.mkForce [ "http://substituter:5000" ]; + trusted-public-keys = lib.mkForce [ "substituter:EkFXxh3upwnPjUXg41d0HRWDzBoseBTINPiv0zNQd2g=" ]; + }; + }; + + testScript = + { nodes }: # python + '' + import json + + start_all() + + substituter.wait_for_unit("multi-user.target") + + ########################################## + # Test 1: builtins.fetchurl with substitution + ########################################## + + missing_file = "/only-on-substituter.txt" + + substituter.succeed(f"echo 'this should only exist on the substituter' > {missing_file}") + + file_hash = substituter.succeed(f"nix hash file {missing_file}").strip() + + file_store_path_json = substituter.succeed(f""" + nix-instantiate --eval --json --read-write-mode --expr ' + builtins.fetchurl {{ + url = "file://{missing_file}"; + sha256 = "{file_hash}"; + }} + ' + """) + + file_store_path = json.loads(file_store_path_json) + + substituter.succeed(f"nix store sign --key-file ${nodes.substituter.services.nix-serve.secretKeyFile} {file_store_path}") + + importer.wait_for_unit("multi-user.target") + + print("Testing fetchurl with substitution...") + importer.succeed(f""" + nix-instantiate -vvvvv --eval --json --read-write-mode --expr ' + builtins.fetchurl {{ + url = "file://{missing_file}"; + sha256 = "{file_hash}"; + }} + ' + """) + print("✓ fetchurl substitution works!") + + ########################################## + # Test 2: builtins.fetchTarball with substitution + ########################################## + + missing_tarball = "/only-on-substituter.tar.gz" + + # Create a directory with some content + substituter.succeed(""" + mkdir -p /tmp/test-tarball + echo 'Hello from tarball!' > /tmp/test-tarball/hello.txt + echo 'Another file' > /tmp/test-tarball/file2.txt + """) + + # Create a tarball + substituter.succeed(f"tar czf {missing_tarball} -C /tmp test-tarball") + + # For fetchTarball, we need to first fetch it without hash to get the store path, + # then compute the NAR hash of that path + tarball_store_path_json = substituter.succeed(f""" + nix-instantiate --eval --json --read-write-mode --expr ' + builtins.fetchTarball {{ + url = "file://{missing_tarball}"; + }} + ' + """) + + tarball_store_path = json.loads(tarball_store_path_json) + + # Get the NAR hash of the unpacked tarball in SRI format + path_info_json = substituter.succeed(f"nix path-info --json {tarball_store_path}").strip() + path_info_dict = json.loads(path_info_json) + # nix path-info returns a dict with store paths as keys + tarball_hash_sri = path_info_dict[tarball_store_path]["narHash"] + print(f"Tarball NAR hash (SRI): {tarball_hash_sri}") + + # Also get the old format hash for fetchTarball (which uses sha256 parameter) + tarball_hash = substituter.succeed(f"nix-store --query --hash {tarball_store_path}").strip() + + # Sign the tarball's store path + substituter.succeed(f"nix store sign --recursive --key-file ${nodes.substituter.services.nix-serve.secretKeyFile} {tarball_store_path}") + + # Now try to fetch the same tarball on the importer + # The file doesn't exist locally, so it should be substituted + print("Testing fetchTarball with substitution...") + result = importer.succeed(f""" + nix-instantiate -vvvvv --eval --json --read-write-mode --expr ' + builtins.fetchTarball {{ + url = "file://{missing_tarball}"; + sha256 = "{tarball_hash}"; + }} + ' + """) + + result_path = json.loads(result) + print(f"✓ fetchTarball substitution works! Result: {result_path}") + + # Verify the content is correct + # fetchTarball strips the top-level directory if there's only one + content = importer.succeed(f"cat {result_path}/hello.txt").strip() + assert content == "Hello from tarball!", f"Content mismatch: {content}" + print("✓ fetchTarball content verified!") + + ########################################## + # Test 3: Verify fetchTree does NOT substitute (preserves metadata) + ########################################## + + print("Testing that fetchTree without __final does NOT use substitution...") + + # fetchTree with just narHash (not __final) should try to download, which will fail + # since the file doesn't exist on the importer + exit_code = importer.fail(f""" + nix-instantiate --eval --json --read-write-mode --expr ' + builtins.fetchTree {{ + type = "tarball"; + url = "file:///only-on-substituter.tar.gz"; + narHash = "{tarball_hash_sri}"; + }} + ' 2>&1 + """) + + # Should fail with "does not exist" since it tries to download instead of substituting + assert "does not exist" in exit_code or "Couldn't open file" in exit_code, f"Expected download failure, got: {exit_code}" + print("✓ fetchTree correctly does NOT substitute non-final inputs!") + print(" (This preserves metadata like lastModified from the actual fetch)") + ''; +} From d2017e0e1a687af3b1a297acc43b004cd69a9793 Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Thu, 2 Oct 2025 23:11:16 +0300 Subject: [PATCH 234/332] libstore: Move {narinfo,ls,log}-compression settings from BinaryCacheStoreConfig to HttpBinaryCacheStoreConfig These settings are only implemented for the http store and should not be there for the file:// stores. --- .../include/nix/store/binary-cache-store.hh | 15 --------------- .../include/nix/store/http-binary-cache-store.hh | 15 +++++++++++++++ 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/src/libstore/include/nix/store/binary-cache-store.hh b/src/libstore/include/nix/store/binary-cache-store.hh index 660dd870a..3f4de2bd4 100644 --- a/src/libstore/include/nix/store/binary-cache-store.hh +++ b/src/libstore/include/nix/store/binary-cache-store.hh @@ -59,21 +59,6 @@ struct BinaryCacheStoreConfig : virtual StoreConfig The meaning and accepted values depend on the compression method selected. `-1` specifies that the default compression level should be used. )"}; - - const Setting narinfoCompression{ - this, "", "narinfo-compression", "Compression method for `.narinfo` files."}; - - const Setting lsCompression{this, "", "ls-compression", "Compression method for `.ls` files."}; - - const Setting logCompression{ - this, - "", - "log-compression", - R"( - Compression method for `log/*` files. It is recommended to - use a compression method supported by most web browsers - (e.g. `brotli`). - )"}; }; /** diff --git a/src/libstore/include/nix/store/http-binary-cache-store.hh b/src/libstore/include/nix/store/http-binary-cache-store.hh index 4102c858f..e0b7ac1ea 100644 --- a/src/libstore/include/nix/store/http-binary-cache-store.hh +++ b/src/libstore/include/nix/store/http-binary-cache-store.hh @@ -17,6 +17,21 @@ struct HttpBinaryCacheStoreConfig : std::enable_shared_from_this narinfoCompression{ + this, "", "narinfo-compression", "Compression method for `.narinfo` files."}; + + const Setting lsCompression{this, "", "ls-compression", "Compression method for `.ls` files."}; + + const Setting logCompression{ + this, + "", + "log-compression", + R"( + Compression method for `log/*` files. It is recommended to + use a compression method supported by most web browsers + (e.g. `brotli`). + )"}; + static const std::string name() { return "HTTP Binary Cache Store"; From 27f64171281812b403eba40becd5a63d9594179a Mon Sep 17 00:00:00 2001 From: Bernardo Meurer Costa Date: Fri, 3 Oct 2025 00:45:49 +0000 Subject: [PATCH 235/332] build(libstore): add NIX_WITH_CURL_S3 build option Introduce a new build option 'curl-s3-store' for the curl-based S3 implementation, separate from the existing AWS SDK-based 's3-store'. The two options are mutually exclusive to avoid conflicts. Users can enable the new implementation with: -Dcurl-s3-store=enabled -Ds3-store=disabled --- src/libstore-tests/s3-url.cc | 2 +- src/libstore/aws-creds.cc | 2 +- src/libstore/include/nix/store/aws-creds.hh | 2 +- src/libstore/include/nix/store/s3-url.hh | 2 +- src/libstore/meson.build | 27 +++++++++++++++++++++ src/libstore/meson.options | 7 ++++++ src/libstore/package.nix | 7 +++++- src/libstore/s3-url.cc | 2 +- 8 files changed, 45 insertions(+), 6 deletions(-) diff --git a/src/libstore-tests/s3-url.cc b/src/libstore-tests/s3-url.cc index 56ec4e40e..60652dd9c 100644 --- a/src/libstore-tests/s3-url.cc +++ b/src/libstore-tests/s3-url.cc @@ -1,7 +1,7 @@ #include "nix/store/s3-url.hh" #include "nix/util/tests/gmock-matchers.hh" -#if NIX_WITH_S3_SUPPORT +#if NIX_WITH_S3_SUPPORT || NIX_WITH_CURL_S3 # include # include diff --git a/src/libstore/aws-creds.cc b/src/libstore/aws-creds.cc index 576f932d5..dc8584e1b 100644 --- a/src/libstore/aws-creds.cc +++ b/src/libstore/aws-creds.cc @@ -1,6 +1,6 @@ #include "nix/store/aws-creds.hh" -#if NIX_WITH_S3_SUPPORT +#if NIX_WITH_CURL_S3 # include # include "nix/store/s3-url.hh" diff --git a/src/libstore/include/nix/store/aws-creds.hh b/src/libstore/include/nix/store/aws-creds.hh index 67ff2e49c..16643c555 100644 --- a/src/libstore/include/nix/store/aws-creds.hh +++ b/src/libstore/include/nix/store/aws-creds.hh @@ -2,7 +2,7 @@ ///@file #include "nix/store/config.hh" -#if NIX_WITH_S3_SUPPORT +#if NIX_WITH_CURL_S3 # include "nix/store/s3-url.hh" # include "nix/util/error.hh" diff --git a/src/libstore/include/nix/store/s3-url.hh b/src/libstore/include/nix/store/s3-url.hh index 4f0a7b0c2..45c3b2d1c 100644 --- a/src/libstore/include/nix/store/s3-url.hh +++ b/src/libstore/include/nix/store/s3-url.hh @@ -2,7 +2,7 @@ ///@file #include "nix/store/config.hh" -#if NIX_WITH_S3_SUPPORT +#if NIX_WITH_S3_SUPPORT || NIX_WITH_CURL_S3 # include "nix/util/url.hh" # include "nix/util/util.hh" diff --git a/src/libstore/meson.build b/src/libstore/meson.build index 1086df3c2..e220e65cd 100644 --- a/src/libstore/meson.build +++ b/src/libstore/meson.build @@ -164,6 +164,33 @@ if aws_s3.found() endif deps_other += aws_s3 +# Curl-based S3 store support (alternative to AWS SDK) +# Check if curl supports AWS SigV4 (requires >= 7.75.0) +curl_supports_aws_sigv4 = curl.version().version_compare('>= 7.75.0') +# AWS CRT C++ for lightweight credential management +aws_crt_cpp = cxx.find_library('aws-crt-cpp', required : false) + +curl_s3_store_opt = get_option('curl-s3-store').require( + curl_supports_aws_sigv4, + error_message : 'curl-based S3 support requires curl >= 7.75.0', +).require( + aws_crt_cpp.found(), + error_message : 'curl-based S3 support requires aws-crt-cpp', +) + +# Make AWS SDK and curl-based S3 mutually exclusive +if aws_s3.found() and curl_s3_store_opt.enabled() + error( + 'Cannot enable both AWS SDK S3 support and curl-based S3 support. Please choose one.', + ) +endif + +if curl_s3_store_opt.enabled() + deps_other += aws_crt_cpp +endif + +configdata_pub.set('NIX_WITH_CURL_S3', curl_s3_store_opt.enabled().to_int()) + subdir('nix-meson-build-support/generate-header') generated_headers = [] diff --git a/src/libstore/meson.options b/src/libstore/meson.options index b8414068d..edc43bd45 100644 --- a/src/libstore/meson.options +++ b/src/libstore/meson.options @@ -33,3 +33,10 @@ option( value : '/nix/var/log/nix', description : 'path to store logs in for Nix', ) + +option( + 'curl-s3-store', + type : 'feature', + value : 'disabled', + description : 'Enable curl-based S3 binary cache store support (requires aws-crt-cpp and curl >= 7.75.0)', +) diff --git a/src/libstore/package.nix b/src/libstore/package.nix index d890d2256..1c08e466e 100644 --- a/src/libstore/package.nix +++ b/src/libstore/package.nix @@ -10,6 +10,7 @@ boost, curl, aws-sdk-cpp, + aws-crt-cpp, libseccomp, nlohmann_json, sqlite, @@ -25,6 +26,8 @@ withAWS ? # Default is this way because there have been issues building this dependency stdenv.hostPlatform == stdenv.buildPlatform && (stdenv.isLinux || stdenv.isDarwin), + + withCurlS3 ? false, }: let @@ -64,7 +67,8 @@ mkMesonLibrary (finalAttrs: { sqlite ] ++ lib.optional stdenv.hostPlatform.isLinux libseccomp - ++ lib.optional withAWS aws-sdk-cpp; + ++ lib.optional withAWS aws-sdk-cpp + ++ lib.optional withCurlS3 aws-crt-cpp; propagatedBuildInputs = [ nix-util @@ -74,6 +78,7 @@ mkMesonLibrary (finalAttrs: { mesonFlags = [ (lib.mesonEnable "seccomp-sandboxing" stdenv.hostPlatform.isLinux) (lib.mesonBool "embedded-sandbox-shell" embeddedSandboxShell) + (lib.mesonEnable "curl-s3-store" withCurlS3) ] ++ lib.optionals stdenv.hostPlatform.isLinux [ (lib.mesonOption "sandbox-shell" "${busybox-sandbox-shell}/bin/busybox") diff --git a/src/libstore/s3-url.cc b/src/libstore/s3-url.cc index 947de60b0..baefe5cba 100644 --- a/src/libstore/s3-url.cc +++ b/src/libstore/s3-url.cc @@ -1,6 +1,6 @@ #include "nix/store/s3-url.hh" -#if NIX_WITH_S3_SUPPORT +#if NIX_WITH_S3_SUPPORT || NIX_WITH_CURL_S3 # include "nix/util/error.hh" # include "nix/util/split.hh" From 2cbbb63628adf5e18150c59f49676d3d074e5eff Mon Sep 17 00:00:00 2001 From: Seth Flynn Date: Mon, 15 Sep 2025 22:58:34 -0400 Subject: [PATCH 236/332] ci: enable use of the experimental installer --- .../actions/install-nix-action/action.yaml | 49 +++++++++++++++++++ 1 file changed, 49 insertions(+) diff --git a/.github/actions/install-nix-action/action.yaml b/.github/actions/install-nix-action/action.yaml index c299b3956..b9861131d 100644 --- a/.github/actions/install-nix-action/action.yaml +++ b/.github/actions/install-nix-action/action.yaml @@ -4,12 +4,18 @@ inputs: dogfood: description: "Whether to use Nix installed from the latest artifact from master branch" required: true # Be explicit about the fact that we are using unreleased artifacts + experimental-installer: + description: "Whether to use the experimental installer to install Nix" + default: false extra_nix_config: description: "Gets appended to `/etc/nix/nix.conf` if passed." install_url: description: "URL of the Nix installer" required: false default: "https://releases.nixos.org/nix/nix-2.30.2/install" + tarball_url: + description: "URL of the Nix tarball to use with the experimental installer" + required: false github_token: description: "Github token" required: true @@ -37,14 +43,57 @@ runs: gh run download "$RUN_ID" --repo "$DOGFOOD_REPO" -n "$INSTALLER_ARTIFACT" -D "$INSTALLER_DOWNLOAD_DIR" echo "installer-path=file://$INSTALLER_DOWNLOAD_DIR" >> "$GITHUB_OUTPUT" + TARBALL_PATH="$(find "$INSTALLER_DOWNLOAD_DIR" -name 'nix*.tar.xz' -print | head -n 1)" + echo "tarball-path=file://$TARBALL_PATH" >> "$GITHUB_OUTPUT" echo "::notice ::Dogfooding Nix installer from master (https://github.com/$DOGFOOD_REPO/actions/runs/$RUN_ID)" env: GH_TOKEN: ${{ inputs.github_token }} DOGFOOD_REPO: "NixOS/nix" + - name: "Download experimental installer" + shell: bash + id: download-experimental-nix-installer + if: ${{ inputs.experimental-installer == 'true' }} + run: | + if [ "$RUNNER_OS" == "Linux" ]; then + INSTALLER_OS="linux" + elif [ "$RUNNER_OS" == "macOS" ]; then + INSTALLER_OS="darwin" + else + echo "::error ::Unsupported RUNNER_OS: $RUNNER_OS" + fi + + if [ "$RUNNER_ARCH" == "X64" ]; then + INSTALLER_ARCH="x86_64" + elif [ "$RUNNER_ARCH" == "ARM64" ]; then + INSTALLER_ARCH="aarch64" + else + echo "::error ::Unsupported RUNNER_ARCH: $RUNNER_ARCH" + fi + + EXPERIMENTAL_INSTALLER_ARTIFACT="nix-installer-$INSTALLER_ARCH-$INSTALLER_OS" + EXPERIMENTAL_INSTALLER_PATH="$GITHUB_WORKSPACE/$EXPERIMENTAL_INSTALLER_ARTIFACT" + # TODO: This uses the latest release. It should probably be pinned, or dogfood the experimental repo's default branch - similar to the above + gh release download -R "$EXPERIMENTAL_INSTALLER_REPO" -D "$EXPERIMENTAL_INSTALLER_PATH" -p "nix-installer.sh" -p "$EXPERIMENTAL_INSTALLER_ARTIFACT" + chmod +x "$EXPERIMENTAL_INSTALLER_PATH/$EXPERIMENTAL_INSTALLER_ARTIFACT" + + echo "installer-path=$EXPERIMENTAL_INSTALLER_PATH" >> "$GITHUB_OUTPUT" + + echo "::notice Using experimental installer from $EXPERIMENTAL_INSTALLER_REPO (https://github.com/$EXPERIMENTAL_INSTALLER_REPO)" + env: + GH_TOKEN: ${{ inputs.github_token }} + EXPERIMENTAL_INSTALLER_REPO: "NixOS/experimental-nix-installer" - uses: cachix/install-nix-action@c134e4c9e34bac6cab09cf239815f9339aaaf84e # v31.5.1 + if: ${{ inputs.experimental-installer != 'true' }} with: # Ternary operator in GHA: https://www.github.com/actions/runner/issues/409#issuecomment-752775072 install_url: ${{ inputs.dogfood == 'true' && format('{0}/install', steps.download-nix-installer.outputs.installer-path) || inputs.install_url }} install_options: ${{ inputs.dogfood == 'true' && format('--tarball-url-prefix {0}', steps.download-nix-installer.outputs.installer-path) || '' }} extra_nix_config: ${{ inputs.extra_nix_config }} + - uses: DeterminateSystems/nix-installer-action@786fff0690178f1234e4e1fe9b536e94f5433196 # v20 + if: ${{ inputs.experimental-installer == 'true' }} + with: + diagnostic-endpoint: "" + local-root: ${{ steps.download-experimental-nix-installer.outputs.installer-path }} + nix-package-url: ${{ inputs.dogfood == 'true' && steps.download-nix-installer.outputs.tarball-path || (inputs.tarball_url || '') }} + extra-conf: ${{ inputs.extra_nix_config }} From d2293fb458feb3b75d4ed81b32136b335610218b Mon Sep 17 00:00:00 2001 From: Seth Flynn Date: Tue, 16 Sep 2025 00:47:02 -0400 Subject: [PATCH 237/332] ci: enable experimental installer tests --- .github/workflows/ci.yml | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index dcf0814d8..145bbe6d9 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -135,9 +135,19 @@ jobs: - scenario: on ubuntu runs-on: ubuntu-24.04 os: linux + experimental-installer: false - scenario: on macos runs-on: macos-14 os: darwin + experimental-installer: false + - scenario: on ubuntu (experimental) + runs-on: ubuntu-24.04 + os: linux + experimental-installer: true + - scenario: on macos (experimental) + runs-on: macos-14 + os: darwin + experimental-installer: true name: installer test ${{ matrix.scenario }} runs-on: ${{ matrix.runs-on }} steps: @@ -149,11 +159,22 @@ jobs: path: out - name: Looking up the installer tarball URL id: installer-tarball-url - run: echo "installer-url=file://$GITHUB_WORKSPACE/out" >> "$GITHUB_OUTPUT" + run: | + echo "installer-url=file://$GITHUB_WORKSPACE/out" >> "$GITHUB_OUTPUT" + TARBALL_PATH="$(find "$GITHUB_WORKSPACE/out" -name 'nix*.tar.xz' -print | head -n 1)" + echo "tarball-path=file://$TARBALL_PATH" >> "$GITHUB_OUTPUT" - uses: cachix/install-nix-action@v31 + if: ${{ !matrix.experimental-installer }} with: install_url: ${{ format('{0}/install', steps.installer-tarball-url.outputs.installer-url) }} install_options: ${{ format('--tarball-url-prefix {0}', steps.installer-tarball-url.outputs.installer-url) }} + - uses: ./.github/actions/install-nix-action + if: ${{ matrix.experimental-installer }} + with: + dogfood: false + experimental-installer: true + tarball_url: ${{ steps.installer-tarball-url.outputs.tarball-path }} + github_token: ${{ secrets.GITHUB_TOKEN }} - run: sudo apt install fish zsh if: matrix.os == 'linux' - run: brew install fish From 92d7381826982f7193145e9fa786eb0f0b1420a2 Mon Sep 17 00:00:00 2001 From: Seth Flynn Date: Fri, 3 Oct 2025 02:01:03 -0400 Subject: [PATCH 238/332] ci: allow for using the latest build of the experimental installer Until these repos are potentially merged, this is good for dogfooding alongside the experimental installer. It also uses the more official `artifacts.nixos.org` endpoint to install stable releases now More immediately though, we need a patch for the experimental installer to really work in CI at all, and that hasn't landed in a tag yet. So, this lets us use it right from `main`! --- .../actions/install-nix-action/action.yaml | 49 +++++++++++++------ 1 file changed, 35 insertions(+), 14 deletions(-) diff --git a/.github/actions/install-nix-action/action.yaml b/.github/actions/install-nix-action/action.yaml index b9861131d..46abea179 100644 --- a/.github/actions/install-nix-action/action.yaml +++ b/.github/actions/install-nix-action/action.yaml @@ -7,6 +7,10 @@ inputs: experimental-installer: description: "Whether to use the experimental installer to install Nix" default: false + experimental-installer-version: + description: "Version of the experimental installer to use. If `latest`, the newest artifact from the default branch is used." + # TODO: This should probably be pinned to a release after https://github.com/NixOS/experimental-nix-installer/pull/49 lands in one + default: "latest" extra_nix_config: description: "Gets appended to `/etc/nix/nix.conf` if passed." install_url: @@ -50,36 +54,51 @@ runs: env: GH_TOKEN: ${{ inputs.github_token }} DOGFOOD_REPO: "NixOS/nix" - - name: "Download experimental installer" + - name: "Gather system info for experimental installer" shell: bash - id: download-experimental-nix-installer if: ${{ inputs.experimental-installer == 'true' }} run: | + echo "::notice Using experimental installer from $EXPERIMENTAL_INSTALLER_REPO (https://github.com/$EXPERIMENTAL_INSTALLER_REPO)" + if [ "$RUNNER_OS" == "Linux" ]; then - INSTALLER_OS="linux" + EXPERIMENTAL_INSTALLER_SYSTEM="linux" + echo "EXPERIMENTAL_INSTALLER_SYSTEM=$EXPERIMENTAL_INSTALLER_SYSTEM" >> "$GITHUB_ENV" elif [ "$RUNNER_OS" == "macOS" ]; then - INSTALLER_OS="darwin" + EXPERIMENTAL_INSTALLER_SYSTEM="darwin" + echo "EXPERIMENTAL_INSTALLER_SYSTEM=$EXPERIMENTAL_INSTALLER_SYSTEM" >> "$GITHUB_ENV" else echo "::error ::Unsupported RUNNER_OS: $RUNNER_OS" + exit 1 fi if [ "$RUNNER_ARCH" == "X64" ]; then - INSTALLER_ARCH="x86_64" + EXPERIMENTAL_INSTALLER_ARCH=x86_64 + echo "EXPERIMENTAL_INSTALLER_ARCH=$EXPERIMENTAL_INSTALLER_ARCH" >> "$GITHUB_ENV" elif [ "$RUNNER_ARCH" == "ARM64" ]; then - INSTALLER_ARCH="aarch64" + EXPERIMENTAL_INSTALLER_ARCH=aarch64 + echo "EXPERIMENTAL_INSTALLER_ARCH=$EXPERIMENTAL_INSTALLER_ARCH" >> "$GITHUB_ENV" else echo "::error ::Unsupported RUNNER_ARCH: $RUNNER_ARCH" + exit 1 fi - EXPERIMENTAL_INSTALLER_ARTIFACT="nix-installer-$INSTALLER_ARCH-$INSTALLER_OS" - EXPERIMENTAL_INSTALLER_PATH="$GITHUB_WORKSPACE/$EXPERIMENTAL_INSTALLER_ARTIFACT" - # TODO: This uses the latest release. It should probably be pinned, or dogfood the experimental repo's default branch - similar to the above - gh release download -R "$EXPERIMENTAL_INSTALLER_REPO" -D "$EXPERIMENTAL_INSTALLER_PATH" -p "nix-installer.sh" -p "$EXPERIMENTAL_INSTALLER_ARTIFACT" - chmod +x "$EXPERIMENTAL_INSTALLER_PATH/$EXPERIMENTAL_INSTALLER_ARTIFACT" + echo "EXPERIMENTAL_INSTALLER_ARTIFACT=nix-installer-$EXPERIMENTAL_INSTALLER_ARCH-$EXPERIMENTAL_INSTALLER_SYSTEM" >> "$GITHUB_ENV" + env: + EXPERIMENTAL_INSTALLER_REPO: "NixOS/experimental-nix-installer" + - name: "Download latest experimental installer" + shell: bash + id: download-latest-experimental-installer + if: ${{ inputs.experimental-installer == 'true' && inputs.experimental-installer-version == 'latest' }} + run: | + RUN_ID=$(gh run list --repo "$EXPERIMENTAL_INSTALLER_REPO" --workflow ci.yml --branch main --status success --json databaseId --jq ".[0].databaseId") - echo "installer-path=$EXPERIMENTAL_INSTALLER_PATH" >> "$GITHUB_OUTPUT" + EXPERIMENTAL_INSTALLER_DOWNLOAD_DIR="$GITHUB_WORKSPACE/$EXPERIMENTAL_INSTALLER_ARTIFACT" + mkdir -p "$EXPERIMENTAL_INSTALLER_DOWNLOAD_DIR" - echo "::notice Using experimental installer from $EXPERIMENTAL_INSTALLER_REPO (https://github.com/$EXPERIMENTAL_INSTALLER_REPO)" + gh run download "$RUN_ID" --repo "$EXPERIMENTAL_INSTALLER_REPO" -n "$EXPERIMENTAL_INSTALLER_ARTIFACT" -D "$EXPERIMENTAL_INSTALLER_DOWNLOAD_DIR" + # Executable permissions are lost in artifacts + find $EXPERIMENTAL_INSTALLER_DOWNLOAD_DIR -type f -exec chmod +x {} + + echo "installer-path=$EXPERIMENTAL_INSTALLER_DOWNLOAD_DIR" >> "$GITHUB_OUTPUT" env: GH_TOKEN: ${{ inputs.github_token }} EXPERIMENTAL_INSTALLER_REPO: "NixOS/experimental-nix-installer" @@ -94,6 +113,8 @@ runs: if: ${{ inputs.experimental-installer == 'true' }} with: diagnostic-endpoint: "" - local-root: ${{ steps.download-experimental-nix-installer.outputs.installer-path }} + # TODO: It'd be nice to use `artifacts.nixos.org` for both of these, maybe through an `/experimental-installer/latest` endpoint? or `/commit/`? + local-root: ${{ inputs.experimental-installer-version == 'latest' && steps.download-latest-experimental-installer.outputs.installer-path || '' }} + source-url: ${{ inputs.experimental-installer-version != 'latest' && 'https://artifacts.nixos.org/experimental-installer/tag/${{ inputs.experimental-installer-version }}/${{ env.EXPERIMENTAL_INSTALLER_ARTIFACT }}' || '' }} nix-package-url: ${{ inputs.dogfood == 'true' && steps.download-nix-installer.outputs.tarball-path || (inputs.tarball_url || '') }} extra-conf: ${{ inputs.extra_nix_config }} From 584ef0ffd30c4a06b6d664219b794e2dedf7e844 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 3 Oct 2025 14:34:13 +0200 Subject: [PATCH 239/332] Add external builders These are helper programs that execute derivations for specified system types (e.g. using QEMU to emulate another system type). To use, set `external-builders`: external-builders = [{"systems": ["aarch64-linux"], "program": "/path/to/external-builder.py"}] The external builder gets one command line argument, the path to a JSON file containing all necessary information about the derivation: { "args": [...], "builder": "/nix/store/kwcyvgdg98n98hqapaz8sw92pc2s78x6-bash-5.2p37/bin/bash", "env": { "HOME": "/homeless-shelter", ... }, "realStoreDir": "/tmp/nix/nix/store", "storeDir": "/nix/store", "tmpDir": "/tmp/nix-shell.dzQ2hE/nix-build-patchelf-0.14.3.drv-46/build", "tmpDirInSandbox": "/build" } Co-authored-by: Cole Helbling --- src/libstore/globals.cc | 31 ++++- src/libstore/include/nix/store/globals.hh | 99 ++++++++++++++++ src/libstore/unix/build/derivation-builder.cc | 43 ++++--- .../unix/build/external-derivation-builder.cc | 110 ++++++++++++++++++ src/libutil/experimental-features.cc | 8 ++ .../include/nix/util/experimental-features.hh | 1 + 6 files changed, 274 insertions(+), 18 deletions(-) create mode 100644 src/libstore/unix/build/external-derivation-builder.cc diff --git a/src/libstore/globals.cc b/src/libstore/globals.cc index 612e79ab0..58a649fc5 100644 --- a/src/libstore/globals.cc +++ b/src/libstore/globals.cc @@ -341,10 +341,15 @@ PathsInChroot BaseSetting::parse(const std::string & str) const i.pop_back(); } size_t p = i.find('='); - if (p == std::string::npos) - pathsInChroot[i] = {.source = i, .optional = optional}; - else - pathsInChroot[i.substr(0, p)] = {.source = i.substr(p + 1), .optional = optional}; + std::string inside, outside; + if (p == std::string::npos) { + inside = i; + outside = i; + } else { + inside = i.substr(0, p); + outside = i.substr(p + 1); + } + pathsInChroot[inside] = {.source = outside, .optional = optional}; } return pathsInChroot; } @@ -374,6 +379,24 @@ unsigned int MaxBuildJobsSetting::parse(const std::string & str) const } } +NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE(Settings::ExternalBuilder, systems, program, args); + +template<> +Settings::ExternalBuilders BaseSetting::parse(const std::string & str) const +{ + try { + return nlohmann::json::parse(str).template get(); + } catch (std::exception & e) { + throw UsageError("parsing setting '%s': %s", name, e.what()); + } +} + +template<> +std::string BaseSetting::to_string() const +{ + return nlohmann::json(value).dump(); +} + template<> void BaseSetting::appendOrSet(PathsInChroot newValue, bool append) { diff --git a/src/libstore/include/nix/store/globals.hh b/src/libstore/include/nix/store/globals.hh index 2cd92467c..ae8990eab 100644 --- a/src/libstore/include/nix/store/globals.hh +++ b/src/libstore/include/nix/store/globals.hh @@ -1372,6 +1372,105 @@ public: Default is 0, which disables the warning. Set it to 1 to warn on all paths. )"}; + + struct ExternalBuilder + { + std::vector systems; + Path program; + std::vector args; + }; + + using ExternalBuilders = std::vector; + + Setting externalBuilders{ + this, + {}, + "external-builders", + R"( + Helper programs that execute derivations. + + The program is passed a JSON document that describes the build environment as the final argument. + The JSON document looks like this: + + { + "args": [ + "-e", + "/nix/store/vj1c3wf9…-source-stdenv.sh", + "/nix/store/shkw4qm9…-default-builder.sh" + ], + "builder": "/nix/store/s1qkj0ph…-bash-5.2p37/bin/bash", + "env": { + "HOME": "/homeless-shelter", + "NIX_BUILD_CORES": "14", + "NIX_BUILD_TOP": "/build", + "NIX_LOG_FD": "2", + "NIX_STORE": "/nix/store", + "PATH": "/path-not-set", + "PWD": "/build", + "TEMP": "/build", + "TEMPDIR": "/build", + "TERM": "xterm-256color", + "TMP": "/build", + "TMPDIR": "/build", + "__structuredAttrs": "", + "buildInputs": "", + "builder": "/nix/store/s1qkj0ph…-bash-5.2p37/bin/bash", + "cmakeFlags": "", + "configureFlags": "", + "depsBuildBuild": "", + "depsBuildBuildPropagated": "", + "depsBuildTarget": "", + "depsBuildTargetPropagated": "", + "depsHostHost": "", + "depsHostHostPropagated": "", + "depsTargetTarget": "", + "depsTargetTargetPropagated": "", + "doCheck": "1", + "doInstallCheck": "1", + "mesonFlags": "", + "name": "hello-2.12.2", + "nativeBuildInputs": "/nix/store/l31j72f1…-version-check-hook", + "out": "/nix/store/2yx2prgx…-hello-2.12.2", + "outputs": "out", + "patches": "", + "pname": "hello", + "postInstallCheck": "stat \"${!outputBin}/bin/hello\"\n", + "propagatedBuildInputs": "", + "propagatedNativeBuildInputs": "", + "src": "/nix/store/dw402azx…-hello-2.12.2.tar.gz", + "stdenv": "/nix/store/i8bw5nqg…-stdenv-linux", + "strictDeps": "", + "system": "aarch64-linux", + "version": "2.12.2" + }, + "realStoreDir": "/nix/store", + "storeDir": "/nix/store", + "system": "aarch64-linux", + "tmpDir": "/private/tmp/nix-build-hello-2.12.2.drv-0/build", + "tmpDirInSandbox": "/build", + "topTmpDir": "/private/tmp/nix-build-hello-2.12.2.drv-0" + } + )", + {}, // aliases + true, // document default + // NOTE(cole-h): even though we can make the experimental feature required here, the errors + // are not as good (it just becomes a warning if you try to use this setting without the + // experimental feature) + // + // With this commented out: + // + // error: experimental Nix feature 'external-builders' is disabled; add '--extra-experimental-features + // external-builders' to enable it + // + // With this uncommented: + // + // warning: Ignoring setting 'external-builders' because experimental feature 'external-builders' is not enabled + // error: Cannot build '/nix/store/vwsp4qd8…-opentofu-1.10.2.drv'. + // Reason: required system or feature not available + // Required system: 'aarch64-linux' with features {} + // Current system: 'aarch64-darwin' with features {apple-virt, benchmark, big-parallel, nixos-test} + // Xp::ExternalBuilders + }; }; // FIXME: don't use a global variable. diff --git a/src/libstore/unix/build/derivation-builder.cc b/src/libstore/unix/build/derivation-builder.cc index 7cf72fb84..e2bcb1b84 100644 --- a/src/libstore/unix/build/derivation-builder.cc +++ b/src/libstore/unix/build/derivation-builder.cc @@ -229,6 +229,12 @@ protected: return acquireUserLock(1, false); } + /** + * Throw an exception if we can't do this derivation because of + * missing system features. + */ + virtual void checkSystem(); + /** * Return the paths that should be made available in the sandbox. * This includes: @@ -666,21 +672,8 @@ static bool checkNotWorldWritable(std::filesystem::path path) return true; } -std::optional DerivationBuilderImpl::startBuild() +void DerivationBuilderImpl::checkSystem() { - if (useBuildUsers()) { - if (!buildUser) - buildUser = getBuildUser(); - - if (!buildUser) - return std::nullopt; - } - - /* Make sure that no other processes are executing under the - sandbox uids. This must be done before any chownToBuilder() - calls. */ - prepareUser(); - /* Right platform? */ if (!drvOptions.canBuildLocally(store, drv)) { auto msg = @@ -704,6 +697,24 @@ std::optional DerivationBuilderImpl::startBuild() throw BuildError(BuildResult::Failure::InputRejected, msg); } +} + +std::optional DerivationBuilderImpl::startBuild() +{ + if (useBuildUsers()) { + if (!buildUser) + buildUser = getBuildUser(); + + if (!buildUser) + return std::nullopt; + } + + checkSystem(); + + /* Make sure that no other processes are executing under the + sandbox uids. This must be done before any chownToBuilder() + calls. */ + prepareUser(); auto buildDir = store.config->getBuildDir(); @@ -1909,12 +1920,16 @@ StorePath DerivationBuilderImpl::makeFallbackPath(const StorePath & path) #include "chroot-derivation-builder.cc" #include "linux-derivation-builder.cc" #include "darwin-derivation-builder.cc" +#include "external-derivation-builder.cc" namespace nix { std::unique_ptr makeDerivationBuilder( LocalStore & store, std::unique_ptr miscMethods, DerivationBuilderParams params) { + if (auto builder = ExternalDerivationBuilder::newIfSupported(store, miscMethods, params)) + return builder; + bool useSandbox = false; /* Are we doing a sandboxed build? */ diff --git a/src/libstore/unix/build/external-derivation-builder.cc b/src/libstore/unix/build/external-derivation-builder.cc new file mode 100644 index 000000000..4d3eba6db --- /dev/null +++ b/src/libstore/unix/build/external-derivation-builder.cc @@ -0,0 +1,110 @@ +namespace nix { + +struct ExternalDerivationBuilder : DerivationBuilderImpl +{ + Settings::ExternalBuilder externalBuilder; + + ExternalDerivationBuilder( + LocalStore & store, + std::unique_ptr miscMethods, + DerivationBuilderParams params, + Settings::ExternalBuilder externalBuilder) + : DerivationBuilderImpl(store, std::move(miscMethods), std::move(params)) + , externalBuilder(std::move(externalBuilder)) + { + experimentalFeatureSettings.require(Xp::ExternalBuilders); + } + + static std::unique_ptr newIfSupported( + LocalStore & store, std::unique_ptr & miscMethods, DerivationBuilderParams & params) + { + for (auto & handler : settings.externalBuilders.get()) { + for (auto & system : handler.systems) + if (params.drv.platform == system) + return std::make_unique( + store, std::move(miscMethods), std::move(params), handler); + } + return {}; + } + + Path tmpDirInSandbox() override + { + /* In a sandbox, for determinism, always use the same temporary + directory. */ + return "/build"; + } + + void setBuildTmpDir() override + { + tmpDir = topTmpDir + "/build"; + createDir(tmpDir, 0700); + } + + void checkSystem() override {} + + void startChild() override + { + if (drvOptions.getRequiredSystemFeatures(drv).count("recursive-nix")) + throw Error("'recursive-nix' is not supported yet by external derivation builders"); + + auto json = nlohmann::json::object(); + + json.emplace("builder", drv.builder); + { + auto l = nlohmann::json::array(); + for (auto & i : drv.args) + l.push_back(rewriteStrings(i, inputRewrites)); + json.emplace("args", std::move(l)); + } + { + auto j = nlohmann::json::object(); + for (auto & [name, value] : env) + j.emplace(name, rewriteStrings(value, inputRewrites)); + json.emplace("env", std::move(j)); + } + json.emplace("topTmpDir", topTmpDir); + json.emplace("tmpDir", tmpDir); + json.emplace("tmpDirInSandbox", tmpDirInSandbox()); + json.emplace("storeDir", store.storeDir); + json.emplace("realStoreDir", store.config->realStoreDir.get()); + json.emplace("system", drv.platform); + + // TODO(cole-h): writing this to stdin is too much effort right now, if we want to revisit + // that, see this comment by Eelco about how to make it not suck: + // https://github.com/DeterminateSystems/nix-src/pull/141#discussion_r2205493257 + auto jsonFile = std::filesystem::path{topTmpDir} / "build.json"; + writeFile(jsonFile, json.dump()); + + pid = startProcess([&]() { + openSlave(); + try { + commonChildInit(); + + Strings args = {externalBuilder.program}; + + if (!externalBuilder.args.empty()) { + args.insert(args.end(), externalBuilder.args.begin(), externalBuilder.args.end()); + } + + args.insert(args.end(), jsonFile); + + if (chdir(tmpDir.c_str()) == -1) + throw SysError("changing into '%1%'", tmpDir); + + chownToBuilder(topTmpDir); + + setUser(); + + debug("executing external builder: %s", concatStringsSep(" ", args)); + execv(externalBuilder.program.c_str(), stringsToCharPtrs(args).data()); + + throw SysError("executing '%s'", externalBuilder.program); + } catch (...) { + handleChildException(true); + _exit(1); + } + }); + } +}; + +} // namespace nix diff --git a/src/libutil/experimental-features.cc b/src/libutil/experimental-features.cc index 60d6bf74d..0edd5a585 100644 --- a/src/libutil/experimental-features.cc +++ b/src/libutil/experimental-features.cc @@ -304,6 +304,14 @@ constexpr std::array xpFeatureDetails )", .trackingUrl = "https://github.com/NixOS/nix/milestone/55", }, + { + .tag = Xp::ExternalBuilders, + .name = "external-builders", + .description = R"( + Enables support for external builders / sandbox providers. + )", + .trackingUrl = "", + }, { .tag = Xp::BLAKE3Hashes, .name = "blake3-hashes", diff --git a/src/libutil/include/nix/util/experimental-features.hh b/src/libutil/include/nix/util/experimental-features.hh index 0a8f15863..73c4eeca4 100644 --- a/src/libutil/include/nix/util/experimental-features.hh +++ b/src/libutil/include/nix/util/experimental-features.hh @@ -37,6 +37,7 @@ enum struct ExperimentalFeature { MountedSSHStore, VerifiedFetches, PipeOperators, + ExternalBuilders, BLAKE3Hashes, }; From 73e4c40e648f6bd3053648df66b1b9c391217b9b Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 3 Oct 2025 15:07:08 +0200 Subject: [PATCH 240/332] Add test for external-builders --- tests/functional/external-builders.sh | 50 +++++++++++++++++++++++++++ tests/functional/meson.build | 1 + 2 files changed, 51 insertions(+) create mode 100644 tests/functional/external-builders.sh diff --git a/tests/functional/external-builders.sh b/tests/functional/external-builders.sh new file mode 100644 index 000000000..4c1d5636a --- /dev/null +++ b/tests/functional/external-builders.sh @@ -0,0 +1,50 @@ +#!/usr/bin/env bash + +source common.sh + +TODO_NixOS + +needLocalStore "'--external-builders' can’t be used with the daemon" + +expr="$TEST_ROOT/expr.nix" +cat > "$expr" < \$out + ''; +} +EOF + +external_builder="$TEST_ROOT/external-builder.sh" +cat > "$external_builder" <> \$out +EOF +chmod +x "$external_builder" + +nix build -L --file "$expr" --out-link "$TEST_ROOT/result" \ + --extra-experimental-features external-builders \ + --external-builders "[{\"systems\": [\"x68_46-xunil\"], \"args\": [\"bla\"], \"program\": \"$external_builder\"}]" + +[[ $(cat "$TEST_ROOT/result") = foobar ]] diff --git a/tests/functional/meson.build b/tests/functional/meson.build index 368f60452..6f649c836 100644 --- a/tests/functional/meson.build +++ b/tests/functional/meson.build @@ -174,6 +174,7 @@ suites = [ 'extra-sandbox-profile.sh', 'help.sh', 'symlinks.sh', + 'external-builders.sh', ], 'workdir' : meson.current_source_dir(), }, From 7ec1427fc33e2287dd4c1d3f750f9a2ba416a6dc Mon Sep 17 00:00:00 2001 From: Cole Helbling Date: Fri, 3 Oct 2025 12:03:25 -0700 Subject: [PATCH 241/332] libstore: fixup fakeSSH check This broke invocations like: NIX_SSHOPTS='-p2222 -oUserKnownHostsFile=/dev/null -oStrictHostKeyChecking=no' nix copy /nix/store/......-foo --to ssh-ng://root@localhost In Nix 2.30.2, fakeSSH was enabled when the "thing I want to connect to" was plain old "localhost". Previously, this check was written as: , fakeSSH(host == "localhost") Given the above invocation, `host` would have been `root@localhost`, and thus `fakeSSH` would be `false` because `root@localhost` != `localhost`. However, since 49ba06175ebc632a4c043e944ac6d9faf6a3ef2a, `authority.host` returned _just_ the host (`localhost`, no user) and erroneously enabled `fakeSSH` in this case, causing `NIX_SSHOPTS` to be ignored (since, when `fakeSSH` is `true`, `SSHMaster::startCommand` doesn't call `addCommonSSHOpts`). `authority.to_string()` accurately returns the expected `root@localhost` format (given the above invocation), fixing this. --- src/libstore/ssh.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libstore/ssh.cc b/src/libstore/ssh.cc index 0f1dba1e9..1a9908366 100644 --- a/src/libstore/ssh.cc +++ b/src/libstore/ssh.cc @@ -78,7 +78,7 @@ SSHMaster::SSHMaster( oss << authority.host; return std::move(oss).str(); }()) - , fakeSSH(authority.host == "localhost") + , fakeSSH(authority.to_string() == "localhost") , keyFile(keyFile) , sshPublicHostKey(parsePublicHostKey(authority.host, sshPublicHostKey)) , useMaster(useMaster && !fakeSSH) From 76a92985d7c8495ec45aa426c9f85c1cc36ddd6d Mon Sep 17 00:00:00 2001 From: Taeer Bar-Yam Date: Mon, 29 Sep 2025 13:13:15 -0400 Subject: [PATCH 242/332] libexpr: allocate ExprSelect's AttrName vector in Expr::alloc --- src/libexpr/eval.cc | 14 +++++----- src/libexpr/include/nix/expr/nixexpr.hh | 34 ++++++++++++++++++++----- src/libexpr/nixexpr.cc | 6 ++--- src/libexpr/parser.y | 8 +++--- 4 files changed, 42 insertions(+), 20 deletions(-) diff --git a/src/libexpr/eval.cc b/src/libexpr/eval.cc index 20ebe026a..8cb647c5f 100644 --- a/src/libexpr/eval.cc +++ b/src/libexpr/eval.cc @@ -1341,7 +1341,7 @@ void ExprVar::eval(EvalState & state, Env & env, Value & v) v = *v2; } -static std::string showAttrPath(EvalState & state, Env & env, const AttrPath & attrPath) +static std::string showAttrPath(EvalState & state, Env & env, std::span attrPath) { std::ostringstream out; bool first = true; @@ -1377,10 +1377,10 @@ void ExprSelect::eval(EvalState & state, Env & env, Value & v) env, getPos(), "while evaluating the attribute '%1%'", - showAttrPath(state, env, attrPath)) + showAttrPath(state, env, getAttrPath())) : nullptr; - for (auto & i : attrPath) { + for (auto & i : getAttrPath()) { state.nrLookups++; const Attr * j; auto name = getName(i, state, env); @@ -1418,7 +1418,7 @@ void ExprSelect::eval(EvalState & state, Env & env, Value & v) auto origin = std::get_if(&pos2r.origin); if (!(origin && *origin == state.derivationInternal)) state.addErrorTrace( - e, pos2, "while evaluating the attribute '%1%'", showAttrPath(state, env, attrPath)); + e, pos2, "while evaluating the attribute '%1%'", showAttrPath(state, env, getAttrPath())); } throw; } @@ -1429,13 +1429,13 @@ void ExprSelect::eval(EvalState & state, Env & env, Value & v) Symbol ExprSelect::evalExceptFinalSelect(EvalState & state, Env & env, Value & attrs) { Value vTmp; - Symbol name = getName(attrPath[attrPath.size() - 1], state, env); + Symbol name = getName(attrPathStart[nAttrPath - 1], state, env); - if (attrPath.size() == 1) { + if (nAttrPath == 1) { e->eval(state, env, vTmp); } else { ExprSelect init(*this); - init.attrPath.pop_back(); + init.nAttrPath--; init.eval(state, env, vTmp); } attrs = vTmp; diff --git a/src/libexpr/include/nix/expr/nixexpr.hh b/src/libexpr/include/nix/expr/nixexpr.hh index 2af6039cd..512999020 100644 --- a/src/libexpr/include/nix/expr/nixexpr.hh +++ b/src/libexpr/include/nix/expr/nixexpr.hh @@ -2,8 +2,10 @@ ///@file #include +#include #include #include +#include #include "nix/expr/gc-small-vector.hh" #include "nix/expr/value.hh" @@ -79,9 +81,11 @@ struct AttrName : expr(e) {}; }; +static_assert(std::is_trivially_copy_constructible_v); + typedef std::vector AttrPath; -std::string showAttrPath(const SymbolTable & symbols, const AttrPath & attrPath); +std::string showAttrPath(const SymbolTable & symbols, std::span attrPath); using UpdateQueue = SmallTemporaryValueVector; @@ -288,20 +292,33 @@ struct ExprInheritFrom : ExprVar struct ExprSelect : Expr { PosIdx pos; + uint32_t nAttrPath; Expr *e, *def; - AttrPath attrPath; - ExprSelect(const PosIdx & pos, Expr * e, AttrPath attrPath, Expr * def) + AttrName * attrPathStart; + + ExprSelect( + std::pmr::polymorphic_allocator & alloc, + const PosIdx & pos, + Expr * e, + std::span attrPath, + Expr * def) : pos(pos) + , nAttrPath(attrPath.size()) , e(e) , def(def) - , attrPath(std::move(attrPath)) {}; + , attrPathStart(alloc.allocate_object(nAttrPath)) + { + std::ranges::copy(attrPath, attrPathStart); + }; - ExprSelect(const PosIdx & pos, Expr * e, Symbol name) + ExprSelect(std::pmr::polymorphic_allocator & alloc, const PosIdx & pos, Expr * e, Symbol name) : pos(pos) + , nAttrPath(1) , e(e) , def(0) + , attrPathStart((alloc.allocate_object())) { - attrPath.push_back(AttrName(name)); + *attrPathStart = AttrName(name); }; PosIdx getPos() const override @@ -309,6 +326,11 @@ struct ExprSelect : Expr return pos; } + std::span getAttrPath() const + { + return {attrPathStart, nAttrPath}; + } + /** * Evaluate the `a.b.c` part of `a.b.c.d`. This exists mostly for the purpose of :doc in the repl. * diff --git a/src/libexpr/nixexpr.cc b/src/libexpr/nixexpr.cc index 014b85f20..5b9d17d49 100644 --- a/src/libexpr/nixexpr.cc +++ b/src/libexpr/nixexpr.cc @@ -57,7 +57,7 @@ void ExprSelect::show(const SymbolTable & symbols, std::ostream & str) const { str << "("; e->show(symbols, str); - str << ")." << showAttrPath(symbols, attrPath); + str << ")." << showAttrPath(symbols, getAttrPath()); if (def) { str << " or ("; def->show(symbols, str); @@ -261,7 +261,7 @@ void ExprPos::show(const SymbolTable & symbols, std::ostream & str) const str << "__curPos"; } -std::string showAttrPath(const SymbolTable & symbols, const AttrPath & attrPath) +std::string showAttrPath(const SymbolTable & symbols, std::span attrPath) { std::ostringstream out; bool first = true; @@ -362,7 +362,7 @@ void ExprSelect::bindVars(EvalState & es, const std::shared_ptr e->bindVars(es, env); if (def) def->bindVars(es, env); - for (auto & i : attrPath) + for (auto & i : getAttrPath()) if (!i.symbol) i.expr->bindVars(es, env); } diff --git a/src/libexpr/parser.y b/src/libexpr/parser.y index bc1eb056e..56e65acfb 100644 --- a/src/libexpr/parser.y +++ b/src/libexpr/parser.y @@ -282,9 +282,9 @@ expr_app expr_select : expr_simple '.' attrpath - { $$ = new ExprSelect(CUR_POS, $1, std::move(*$3), nullptr); delete $3; } + { $$ = new ExprSelect(state->alloc, CUR_POS, $1, std::move(*$3), nullptr); delete $3; } | expr_simple '.' attrpath OR_KW expr_select - { $$ = new ExprSelect(CUR_POS, $1, std::move(*$3), $5); delete $3; $5->warnIfCursedOr(state->symbols, state->positions); } + { $$ = new ExprSelect(state->alloc, CUR_POS, $1, std::move(*$3), $5); delete $3; $5->warnIfCursedOr(state->symbols, state->positions); } | /* Backwards compatibility: because Nixpkgs has a function named ‘or’, allow stuff like ‘map or [...]’. This production is problematic (see https://github.com/NixOS/nix/issues/11118) and will be refactored in the @@ -343,7 +343,7 @@ expr_simple /* Let expressions `let {..., body = ...}' are just desugared into `(rec {..., body = ...}).body'. */ | LET '{' binds '}' - { $3->recursive = true; $3->pos = CUR_POS; $$ = new ExprSelect(noPos, $3, state->s.body); } + { $3->recursive = true; $3->pos = CUR_POS; $$ = new ExprSelect(state->alloc, noPos, $3, state->s.body); } | REC '{' binds '}' { $3->recursive = true; $3->pos = CUR_POS; $$ = $3; } | '{' binds1 '}' @@ -447,7 +447,7 @@ binds1 $accum->attrs.emplace( i.symbol, ExprAttrs::AttrDef( - new ExprSelect(iPos, from, i.symbol), + new ExprSelect(state->alloc, iPos, from, i.symbol), iPos, ExprAttrs::AttrDef::Kind::InheritedFrom)); } From 39109c05be66c7dde854be3021c24183c92bf6bb Mon Sep 17 00:00:00 2001 From: Taeer Bar-Yam Date: Fri, 3 Oct 2025 12:49:55 -0400 Subject: [PATCH 243/332] libexpr: allocate ExprOpHasAttr's AttrPath in Exprs::alloc --- src/libexpr/include/nix/expr/nixexpr.hh | 10 +++++++--- src/libexpr/parser.y | 2 +- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/src/libexpr/include/nix/expr/nixexpr.hh b/src/libexpr/include/nix/expr/nixexpr.hh index 512999020..b66dba4f3 100644 --- a/src/libexpr/include/nix/expr/nixexpr.hh +++ b/src/libexpr/include/nix/expr/nixexpr.hh @@ -348,10 +348,14 @@ struct ExprSelect : Expr struct ExprOpHasAttr : Expr { Expr * e; - AttrPath attrPath; - ExprOpHasAttr(Expr * e, AttrPath attrPath) + std::span attrPath; + + ExprOpHasAttr(std::pmr::polymorphic_allocator alloc, Expr * e, std::vector attrPath) : e(e) - , attrPath(std::move(attrPath)) {}; + , attrPath({alloc.allocate_object(attrPath.size()), attrPath.size()}) + { + std::ranges::copy(attrPath, this->attrPath.begin()); + }; PosIdx getPos() const override { diff --git a/src/libexpr/parser.y b/src/libexpr/parser.y index 56e65acfb..9186fcf4b 100644 --- a/src/libexpr/parser.y +++ b/src/libexpr/parser.y @@ -261,7 +261,7 @@ expr_op | expr_op OR expr_op { $$ = new ExprOpOr(state->at(@2), $1, $3); } | expr_op IMPL expr_op { $$ = new ExprOpImpl(state->at(@2), $1, $3); } | expr_op UPDATE expr_op { $$ = new ExprOpUpdate(state->at(@2), $1, $3); } - | expr_op '?' attrpath { $$ = new ExprOpHasAttr($1, std::move(*$3)); delete $3; } + | expr_op '?' attrpath { $$ = new ExprOpHasAttr(state->alloc, $1, std::move(*$3)); delete $3; } | expr_op '+' expr_op { $$ = new ExprConcatStrings(state->at(@2), false, new std::vector >({{state->at(@1), $1}, {state->at(@3), $3}})); } | expr_op '-' expr_op { $$ = new ExprCall(state->at(@2), new ExprVar(state->s.sub), {$1, $3}); } From dce1a893d0206083cbab19b9211ddb01eaa53f70 Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Sun, 5 Oct 2025 02:30:21 +0300 Subject: [PATCH 244/332] treewide: Remove toView() because it leads to segfaults when compiled with newer nixpkgs Firstly, this is now available on darwin where the default in llvm 19. Secondly, this leads to very weird segfaults when building with newer nixpkgs for some reason. (It's UB after all). This appears when building with the following: mesonComponentOverrides = finalAttrs: prevAttrs: { mesonBuildType = "debugoptimized"; dontStrip = true; doCheck = false; separateDebugInfo = false; preConfigure = (prevAttrs.preConfigure or "") + '' case "$mesonBuildType" in release|minsize|debugoptimized) appendToVar mesonFlags "-Db_lto=true" ;; *) appendToVar mesonFlags "-Db_lto=false" ;; esac ''; }; And with the following nixpkgs input: nix build ".#nix-cli" -L --override-input nixpkgs "https://releases.nixos.org/nixos/unstable/nixos-25.11pre870157.7df7ff7d8e00/nixexprs.tar.xz" Stacktrace: #0 0x00000000006afdc0 in ?? () #1 0x00007ffff71cebb6 in _Unwind_ForcedUnwind_Phase2 () from /nix/store/41ym1jm1b7j3rhglk82gwg9jml26z1km-gcc-14.3.0-lib/lib/libgcc_s.so.1 #2 0x00007ffff71cf5b5 in _Unwind_Resume () from /nix/store/41ym1jm1b7j3rhglk82gwg9jml26z1km-gcc-14.3.0-lib/lib/libgcc_s.so.1 #3 0x00007ffff7eac7d8 in std::basic_ios >::~basic_ios (this=, this=) at /nix/store/82kmz7r96navanrc2fgckh2bamiqrgsw-gcc-14.3.0/include/c++/14.3.0/bits/basic_ios.h:286 #4 std::__cxx11::basic_ostringstream, std::allocator >::basic_ostringstream (this=, this=) at /nix/store/82kmz7r96navanrc2fgckh2bamiqrgsw-gcc-14.3.0/include/c++/14.3.0/sstream:806 #5 nix::SimpleLogger::logEI (this=, ei=...) at ../logging.cc:121 #6 0x00007ffff7515794 in nix::Logger::logEI (this=0x675450, lvl=nix::lvlError, ei=...) at /nix/store/bkshji3nnxmrmgwa4n2kaxadajkwvn65-nix-util-2.32.0pre-dev/include/nix/util/logging.hh:144 #7 nix::handleExceptions (programName=..., fun=...) at ../shared.cc:336 #8 0x000000000047b76b in main (argc=, argv=) at /nix/store/82kmz7r96navanrc2fgckh2bamiqrgsw-gcc-14.3.0/include/c++/14.3.0/bits/new_allocator.h:88 --- src/libcmd/repl.cc | 2 +- src/libexpr/eval.cc | 4 ++-- src/libexpr/primops.cc | 4 ++-- src/libexpr/primops/fromTOML.cc | 2 +- src/libexpr/print.cc | 2 +- src/libmain/progress-bar.cc | 2 +- src/libstore/daemon.cc | 2 +- src/libutil/include/nix/util/strings.hh | 5 ----- src/libutil/logging.cc | 2 +- src/libutil/strings.cc | 17 ----------------- src/nix/config-check.cc | 6 +++--- src/nix/nix-build/nix-build.cc | 4 ++-- src/nix/nix-env/user-env.cc | 2 +- 13 files changed, 16 insertions(+), 38 deletions(-) diff --git a/src/libcmd/repl.cc b/src/libcmd/repl.cc index 38d06336b..a308b731d 100644 --- a/src/libcmd/repl.cc +++ b/src/libcmd/repl.cc @@ -669,7 +669,7 @@ ProcessLineResult NixRepl::processLine(std::string line) ss << "No documentation found.\n\n"; } - auto markdown = toView(ss); + auto markdown = ss.view(); logger->cout(trim(renderMarkdownToTerminal(markdown))); } else diff --git a/src/libexpr/eval.cc b/src/libexpr/eval.cc index 8cb647c5f..db17f103b 100644 --- a/src/libexpr/eval.cc +++ b/src/libexpr/eval.cc @@ -591,7 +591,7 @@ std::optional EvalState::getDoc(Value & v) .name = name, .arity = 0, // FIXME: figure out how deep by syntax only? It's not semantically useful though... .args = {}, - .doc = makeImmutableString(toView(s)), // NOTE: memory leak when compiled without GC + .doc = makeImmutableString(s.view()), // NOTE: memory leak when compiled without GC }; } if (isFunctor(v)) { @@ -1811,7 +1811,7 @@ void ExprAssert::eval(EvalState & state, Env & env, Value & v) if (!state.evalBool(env, cond, pos, "in the condition of the assert statement")) { std::ostringstream out; cond->show(state.symbols, out); - auto exprStr = toView(out); + auto exprStr = out.view(); if (auto eq = dynamic_cast(cond)) { try { diff --git a/src/libexpr/primops.cc b/src/libexpr/primops.cc index a8ac8d159..86cb00131 100644 --- a/src/libexpr/primops.cc +++ b/src/libexpr/primops.cc @@ -2412,7 +2412,7 @@ static void prim_toXML(EvalState & state, const PosIdx pos, Value ** args, Value std::ostringstream out; NixStringContext context; printValueAsXML(state, true, false, *args[0], out, context, pos); - v.mkString(toView(out), context); + v.mkString(out.view(), context); } static RegisterPrimOp primop_toXML({ @@ -2520,7 +2520,7 @@ static void prim_toJSON(EvalState & state, const PosIdx pos, Value ** args, Valu std::ostringstream out; NixStringContext context; printValueAsJSON(state, true, *args[0], pos, out, context); - v.mkString(toView(out), context); + v.mkString(out.view(), context); } static RegisterPrimOp primop_toJSON({ diff --git a/src/libexpr/primops/fromTOML.cc b/src/libexpr/primops/fromTOML.cc index 3ab594905..d2f91a75b 100644 --- a/src/libexpr/primops/fromTOML.cc +++ b/src/libexpr/primops/fromTOML.cc @@ -139,7 +139,7 @@ static void prim_fromTOML(EvalState & state, const PosIdx pos, Value ** args, Va attrs.alloc("_type").mkStringNoCopy("timestamp"); std::ostringstream s; s << t; - auto str = toView(s); + auto str = s.view(); forceNoNullByte(str); attrs.alloc("value").mkString(str); v.mkAttrs(attrs); diff --git a/src/libexpr/print.cc b/src/libexpr/print.cc index 071addc1a..4776be033 100644 --- a/src/libexpr/print.cc +++ b/src/libexpr/print.cc @@ -461,7 +461,7 @@ private: std::ostringstream s; s << state.positions[v.lambda().fun->pos]; - output << " @ " << filterANSIEscapes(toView(s)); + output << " @ " << filterANSIEscapes(s.view()); } } else if (v.isPrimOp()) { if (v.primOp()) diff --git a/src/libmain/progress-bar.cc b/src/libmain/progress-bar.cc index c00f5d86b..edec8460d 100644 --- a/src/libmain/progress-bar.cc +++ b/src/libmain/progress-bar.cc @@ -183,7 +183,7 @@ public: std::ostringstream oss; showErrorInfo(oss, ei, loggerSettings.showTrace.get()); - log(*state, ei.level, toView(oss)); + log(*state, ei.level, oss.view()); } void log(State & state, Verbosity lvl, std::string_view s) diff --git a/src/libstore/daemon.cc b/src/libstore/daemon.cc index 2898f113f..00c0a1fdd 100644 --- a/src/libstore/daemon.cc +++ b/src/libstore/daemon.cc @@ -102,7 +102,7 @@ struct TunnelLogger : public Logger showErrorInfo(oss, ei, false); StringSink buf; - buf << STDERR_NEXT << toView(oss); + buf << STDERR_NEXT << oss.view(); enqueueMsg(buf.s); } diff --git a/src/libutil/include/nix/util/strings.hh b/src/libutil/include/nix/util/strings.hh index b4ef66bfe..ba37ce79f 100644 --- a/src/libutil/include/nix/util/strings.hh +++ b/src/libutil/include/nix/util/strings.hh @@ -12,11 +12,6 @@ namespace nix { -/* - * workaround for unavailable view() method (C++20) of std::ostringstream under MacOS with clang-16 - */ -std::string_view toView(const std::ostringstream & os); - /** * String tokenizer. * diff --git a/src/libutil/logging.cc b/src/libutil/logging.cc index 997110617..e2f28f553 100644 --- a/src/libutil/logging.cc +++ b/src/libutil/logging.cc @@ -121,7 +121,7 @@ public: std::ostringstream oss; showErrorInfo(oss, ei, loggerSettings.showTrace.get()); - log(ei.level, toView(oss)); + log(ei.level, oss.view()); } void startActivity( diff --git a/src/libutil/strings.cc b/src/libutil/strings.cc index a95390089..a87567cef 100644 --- a/src/libutil/strings.cc +++ b/src/libutil/strings.cc @@ -8,23 +8,6 @@ namespace nix { -struct view_stringbuf : public std::stringbuf -{ - inline std::string_view toView() - { - auto begin = pbase(); - return {begin, begin + pubseekoff(0, std::ios_base::cur, std::ios_base::out)}; - } -}; - -__attribute__((no_sanitize("undefined"))) std::string_view toView(const std::ostringstream & os) -{ - /* Downcasting like this is very much undefined behavior, so we disable - UBSAN for this function. */ - auto buf = static_cast(os.rdbuf()); - return buf->toView(); -} - template std::list tokenizeString(std::string_view s, std::string_view separators); template StringSet tokenizeString(std::string_view s, std::string_view separators); template std::vector tokenizeString(std::string_view s, std::string_view separators); diff --git a/src/nix/config-check.cc b/src/nix/config-check.cc index c04943eab..e1efb40eb 100644 --- a/src/nix/config-check.cc +++ b/src/nix/config-check.cc @@ -100,7 +100,7 @@ struct CmdConfigCheck : StoreCommand ss << "Multiple versions of nix found in PATH:\n"; for (auto & dir : dirs) ss << " " << dir << "\n"; - return checkFail(toView(ss)); + return checkFail(ss.view()); } return checkPass("PATH contains only one nix version."); @@ -143,7 +143,7 @@ struct CmdConfigCheck : StoreCommand for (auto & dir : dirs) ss << " " << dir << "\n"; ss << "\n"; - return checkFail(toView(ss)); + return checkFail(ss.view()); } return checkPass("All profiles are gcroots."); @@ -162,7 +162,7 @@ struct CmdConfigCheck : StoreCommand << "sync with the daemon.\n\n" << "Client protocol: " << formatProtocol(clientProto) << "\n" << "Store protocol: " << formatProtocol(storeProto) << "\n\n"; - return checkFail(toView(ss)); + return checkFail(ss.view()); } return checkPass("Client protocol matches store protocol."); diff --git a/src/nix/nix-build/nix-build.cc b/src/nix/nix-build/nix-build.cc index d3902f2a6..eef97aa19 100644 --- a/src/nix/nix-build/nix-build.cc +++ b/src/nix/nix-build/nix-build.cc @@ -285,10 +285,10 @@ static void main_nix_build(int argc, char ** argv) execArgs, interpreter, escapeShellArgAlways(script), - toView(joined)); + joined.view()); } else { envCommand = - fmt("exec %1% %2% %3% %4%", execArgs, interpreter, escapeShellArgAlways(script), toView(joined)); + fmt("exec %1% %2% %3% %4%", execArgs, interpreter, escapeShellArgAlways(script), joined.view()); } } diff --git a/src/nix/nix-env/user-env.cc b/src/nix/nix-env/user-env.cc index fbdcb14f8..81e2c4f80 100644 --- a/src/nix/nix-env/user-env.cc +++ b/src/nix/nix-env/user-env.cc @@ -108,7 +108,7 @@ bool createUserEnv( auto manifestFile = ({ std::ostringstream str; printAmbiguous(manifest, state.symbols, str, nullptr, std::numeric_limits::max()); - StringSource source{toView(str)}; + StringSource source{str.view()}; state.store->addToStoreFromDump( source, "env-manifest.nix", From 452ec09fe0d027565defb804c29bde6d62996a95 Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Sun, 5 Oct 2025 16:55:41 +0300 Subject: [PATCH 245/332] libstore: Fix use-after-move in DerivationGoal::repairClosure --- src/libstore/build/derivation-goal.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/libstore/build/derivation-goal.cc b/src/libstore/build/derivation-goal.cc index 81f4e6654..3c26a6922 100644 --- a/src/libstore/build/derivation-goal.cc +++ b/src/libstore/build/derivation-goal.cc @@ -378,9 +378,10 @@ Goal::Co DerivationGoal::repairClosure() bmRepair)); } + bool haveWaitees = !waitees.empty(); co_await await(std::move(waitees)); - if (!waitees.empty()) { + if (haveWaitees) { trace("closure repaired"); if (nrFailed > 0) throw Error( From be1ade737391a6656b3ffb872fb9ec7b36c89ca0 Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Sun, 5 Oct 2025 16:57:13 +0300 Subject: [PATCH 246/332] libexpr: Use use-after-move in SampleStack::saveProfile() --- src/libexpr/eval-profiler.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libexpr/eval-profiler.cc b/src/libexpr/eval-profiler.cc index ba92faf18..e9dc1e021 100644 --- a/src/libexpr/eval-profiler.cc +++ b/src/libexpr/eval-profiler.cc @@ -324,7 +324,7 @@ void SampleStack::saveProfile() std::visit([&](auto && info) { info.symbolize(state, os, posCache); }, pos); } os << " " << count; - writeLine(profileFd.get(), std::move(os).str()); + writeLine(profileFd.get(), os.str()); /* Clear ostringstream. */ os.str(""); os.clear(); From 06a82da6f54bda38355171d061485a1119f36300 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Sun, 5 Oct 2025 11:18:30 -0700 Subject: [PATCH 247/332] clang-tidy fix for src/libstore/build/derivation-check.cc --- src/libstore/build/derivation-check.cc | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/libstore/build/derivation-check.cc b/src/libstore/build/derivation-check.cc index db3ec7c3d..181221ba5 100644 --- a/src/libstore/build/derivation-check.cc +++ b/src/libstore/build/derivation-check.cc @@ -18,7 +18,11 @@ void checkOutputs( for (auto & output : outputs) outputsByPath.emplace(store.printStorePath(output.second.path), output.second); - for (auto & [outputName, info] : outputs) { + for (auto & pair : outputs) { + // We can't use auto destructuring here because + // clang-tidy seems to complain about it. + const std::string & outputName = pair.first; + const auto & info = pair.second; auto * outputSpec = get(drvOutputs, outputName); assert(outputSpec); From 7e39ab4dc73dff2cc451e503fc300784f8c67224 Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Sun, 5 Oct 2025 21:54:32 +0300 Subject: [PATCH 248/332] Revert "Merge pull request #14097 from obsidiansystems/light-realisation-improvements" This reverts commit dc8c1461daa7e8db2a78f14ba0edd25e9df93e60, reversing changes made to 28adcfda3200c7f1f281f80686a1ab40311e0e5d. --- src/libcmd/built-path.cc | 5 +- src/libstore-tests/common-protocol.cc | 44 +++--- src/libstore-tests/dummy-store.cc | 15 +- src/libstore-tests/realisation.cc | 20 +-- src/libstore-tests/serve-protocol.cc | 74 +++++----- src/libstore-tests/worker-protocol.cc | 132 +++++++++--------- src/libstore/binary-cache-store.cc | 19 +-- .../build/derivation-building-goal.cc | 13 +- src/libstore/build/derivation-goal.cc | 59 ++------ .../build/drv-output-substitution-goal.cc | 10 +- src/libstore/daemon.cc | 4 +- src/libstore/dummy-store.cc | 21 +-- .../include/nix/store/binary-cache-store.hh | 17 +-- .../nix/store/build/derivation-goal.hh | 6 +- .../build/drv-output-substitution-goal.hh | 3 +- .../include/nix/store/dummy-store-impl.hh | 12 -- src/libstore/include/nix/store/dummy-store.hh | 2 - .../include/nix/store/legacy-ssh-store.hh | 4 +- .../include/nix/store/local-overlay-store.hh | 2 +- src/libstore/include/nix/store/local-store.hh | 6 +- src/libstore/include/nix/store/realisation.hh | 50 +++---- .../include/nix/store/remote-store.hh | 2 +- src/libstore/include/nix/store/store-api.hh | 9 +- src/libstore/local-overlay-store.cc | 8 +- src/libstore/local-store.cc | 17 +-- src/libstore/misc.cc | 5 +- src/libstore/realisation.cc | 50 +++---- src/libstore/remote-store.cc | 18 ++- src/libstore/restricted-store.cc | 4 +- src/libstore/store-api.cc | 20 ++- src/libstore/unix/build/derivation-builder.cc | 7 +- src/libutil/include/nix/util/hash.hh | 19 --- 32 files changed, 254 insertions(+), 423 deletions(-) diff --git a/src/libcmd/built-path.cc b/src/libcmd/built-path.cc index fc7f18493..4d76dd6da 100644 --- a/src/libcmd/built-path.cc +++ b/src/libcmd/built-path.cc @@ -117,11 +117,10 @@ RealisedPath::Set BuiltPath::toRealisedPaths(Store & store) const "the derivation '%s' has unrealised output '%s' (derived-path.cc/toRealisedPaths)", store.printStorePath(p.drvPath->outPath()), outputName); - DrvOutput key{*drvOutput, outputName}; - auto thisRealisation = store.queryRealisation(key); + auto thisRealisation = store.queryRealisation(DrvOutput{*drvOutput, outputName}); assert(thisRealisation); // We’ve built it, so we must // have the realisation - res.insert(Realisation{*thisRealisation, std::move(key)}); + res.insert(*thisRealisation); } else { res.insert(outputPath); } diff --git a/src/libstore-tests/common-protocol.cc b/src/libstore-tests/common-protocol.cc index 2c001957b..35fca165d 100644 --- a/src/libstore-tests/common-protocol.cc +++ b/src/libstore-tests/common-protocol.cc @@ -112,34 +112,32 @@ CHARACTERIZATION_TEST( "realisation", (std::tuple{ Realisation{ - { - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, - .signatures = {"asdf", "qwer"}, - }, - DrvOutput{ - .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), - .outputName = "baz", - }, + .id = + DrvOutput{ + .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), + .outputName = "baz", + }, + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + .signatures = {"asdf", "qwer"}, }, Realisation{ - { - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, - .signatures = {"asdf", "qwer"}, - .dependentRealisations = + .id = + { + .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), + .outputName = "baz", + }, + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + .signatures = {"asdf", "qwer"}, + .dependentRealisations = + { { - { - DrvOutput{ - .drvHash = Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "quux", - }, - StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + DrvOutput{ + .drvHash = Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "quux", }, + StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, }, - }, - { - .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), - .outputName = "baz", - }, + }, }, })) diff --git a/src/libstore-tests/dummy-store.cc b/src/libstore-tests/dummy-store.cc index 3dd8137a3..b841d7890 100644 --- a/src/libstore-tests/dummy-store.cc +++ b/src/libstore-tests/dummy-store.cc @@ -1,6 +1,6 @@ #include -#include "nix/store/dummy-store-impl.hh" +#include "nix/store/dummy-store.hh" #include "nix/store/globals.hh" #include "nix/store/realisation.hh" @@ -13,7 +13,7 @@ TEST(DummyStore, realisation_read) auto store = [] { auto cfg = make_ref(StoreReference::Params{}); cfg->readOnly = false; - return cfg->openDummyStore(); + return cfg->openStore(); }(); auto drvHash = Hash::parseExplicitFormatUnprefixed( @@ -22,17 +22,6 @@ TEST(DummyStore, realisation_read) auto outputName = "foo"; EXPECT_EQ(store->queryRealisation({drvHash, outputName}), nullptr); - - UnkeyedRealisation value{ - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo.drv"}, - }; - - store->buildTrace.insert({drvHash, {{outputName, make_ref(value)}}}); - - auto value2 = store->queryRealisation({drvHash, outputName}); - - ASSERT_TRUE(value2); - EXPECT_EQ(*value2, value); } } // namespace nix diff --git a/src/libstore-tests/realisation.cc b/src/libstore-tests/realisation.cc index d16049bc5..a5a5bee50 100644 --- a/src/libstore-tests/realisation.cc +++ b/src/libstore-tests/realisation.cc @@ -49,16 +49,16 @@ INSTANTIATE_TEST_SUITE_P( RealisationJsonTest, ([] { Realisation simple{ - { - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo.drv"}, - }, - { - .drvHash = Hash::parseExplicitFormatUnprefixed( - "ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad", - HashAlgorithm::SHA256, - HashFormat::Base16), - .outputName = "foo", - }, + + .id = + { + .drvHash = Hash::parseExplicitFormatUnprefixed( + "ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad", + HashAlgorithm::SHA256, + HashFormat::Base16), + .outputName = "foo", + }, + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo.drv"}, }; return ::testing::Values( std::pair{ diff --git a/src/libstore-tests/serve-protocol.cc b/src/libstore-tests/serve-protocol.cc index 10aa21e9d..a63201164 100644 --- a/src/libstore-tests/serve-protocol.cc +++ b/src/libstore-tests/serve-protocol.cc @@ -95,34 +95,32 @@ VERSIONED_CHARACTERIZATION_TEST( defaultVersion, (std::tuple{ Realisation{ - { - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, - .signatures = {"asdf", "qwer"}, - }, - { - .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), - .outputName = "baz", - }, + .id = + DrvOutput{ + .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), + .outputName = "baz", + }, + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + .signatures = {"asdf", "qwer"}, }, Realisation{ - { - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, - .signatures = {"asdf", "qwer"}, - .dependentRealisations = + .id = + { + .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), + .outputName = "baz", + }, + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + .signatures = {"asdf", "qwer"}, + .dependentRealisations = + { { - { - DrvOutput{ - .drvHash = Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "quux", - }, - StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + DrvOutput{ + .drvHash = Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "quux", }, + StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, }, - }, - { - .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), - .outputName = "baz", - }, + }, }, })) @@ -198,27 +196,25 @@ VERSIONED_CHARACTERIZATION_TEST( { "foo", { - { - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, - }, - DrvOutput{ - .drvHash = - Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "foo", - }, + .id = + DrvOutput{ + .drvHash = + Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "foo", + }, + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, }, }, { "bar", { - { - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar"}, - }, - DrvOutput{ - .drvHash = - Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "bar", - }, + .id = + DrvOutput{ + .drvHash = + Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "bar", + }, + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar"}, }, }, }, diff --git a/src/libstore-tests/worker-protocol.cc b/src/libstore-tests/worker-protocol.cc index c4afde3bd..489151c8c 100644 --- a/src/libstore-tests/worker-protocol.cc +++ b/src/libstore-tests/worker-protocol.cc @@ -148,34 +148,32 @@ VERSIONED_CHARACTERIZATION_TEST( defaultVersion, (std::tuple{ Realisation{ - { - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, - .signatures = {"asdf", "qwer"}, - }, - DrvOutput{ - .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), - .outputName = "baz", - }, + .id = + DrvOutput{ + .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), + .outputName = "baz", + }, + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + .signatures = {"asdf", "qwer"}, }, Realisation{ - { - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, - .signatures = {"asdf", "qwer"}, - .dependentRealisations = + .id = + { + .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), + .outputName = "baz", + }, + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + .signatures = {"asdf", "qwer"}, + .dependentRealisations = + { { - { - DrvOutput{ - .drvHash = Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "quux", - }, - StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + DrvOutput{ + .drvHash = Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "quux", }, + StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, }, - }, - DrvOutput{ - .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), - .outputName = "baz", - }, + }, }, })) @@ -216,25 +214,25 @@ VERSIONED_CHARACTERIZATION_TEST( { "foo", { - { - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, - }, - DrvOutput{ - .drvHash = Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "foo", - }, + .id = + DrvOutput{ + .drvHash = + Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "foo", + }, + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, }, }, { "bar", { - { - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar"}, - }, - DrvOutput{ - .drvHash = Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "bar", - }, + .id = + DrvOutput{ + .drvHash = + Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "bar", + }, + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar"}, }, }, }, @@ -269,27 +267,25 @@ VERSIONED_CHARACTERIZATION_TEST( { "foo", { - { - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, - }, - DrvOutput{ - .drvHash = - Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "foo", - }, + .id = + DrvOutput{ + .drvHash = + Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "foo", + }, + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, }, }, { "bar", { - { - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar"}, - }, - DrvOutput{ - .drvHash = - Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "bar", - }, + .id = + DrvOutput{ + .drvHash = + Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "bar", + }, + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar"}, }, }, }, @@ -328,27 +324,25 @@ VERSIONED_CHARACTERIZATION_TEST( { "foo", { - { - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, - }, - DrvOutput{ - .drvHash = - Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "foo", - }, + .id = + DrvOutput{ + .drvHash = + Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "foo", + }, + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, }, }, { "bar", { - { - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar"}, - }, - DrvOutput{ - .drvHash = - Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "bar", - }, + .id = + DrvOutput{ + .drvHash = + Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "bar", + }, + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar"}, }, }, }, diff --git a/src/libstore/binary-cache-store.cc b/src/libstore/binary-cache-store.cc index 3705f3d4d..badfb4b14 100644 --- a/src/libstore/binary-cache-store.cc +++ b/src/libstore/binary-cache-store.cc @@ -502,15 +502,10 @@ StorePath BinaryCacheStore::addToStore( ->path; } -std::string BinaryCacheStore::makeRealisationPath(const DrvOutput & id) -{ - return realisationsPrefix + "/" + id.to_string() + ".doi"; -} - void BinaryCacheStore::queryRealisationUncached( - const DrvOutput & id, Callback> callback) noexcept + const DrvOutput & id, Callback> callback) noexcept { - auto outputInfoFilePath = makeRealisationPath(id); + auto outputInfoFilePath = realisationsPrefix + "/" + id.to_string() + ".doi"; auto callbackPtr = std::make_shared(std::move(callback)); @@ -520,12 +515,11 @@ void BinaryCacheStore::queryRealisationUncached( if (!data) return (*callbackPtr)({}); - std::shared_ptr realisation; + std::shared_ptr realisation; try { - realisation = std::make_shared(nlohmann::json::parse(*data)); + realisation = std::make_shared(nlohmann::json::parse(*data)); } catch (Error & e) { - e.addTrace( - {}, "while parsing file '%s' as a realisation for key '%s'", outputInfoFilePath, id.to_string()); + e.addTrace({}, "while parsing file '%s' as a realisation", outputInfoFilePath); throw; } return (*callbackPtr)(std::move(realisation)); @@ -541,7 +535,8 @@ void BinaryCacheStore::registerDrvOutput(const Realisation & info) { if (diskCache) diskCache->upsertRealisation(config.getReference().render(/*FIXME withParams=*/false), info); - upsertFile(makeRealisationPath(info.id), static_cast(info).dump(), "application/json"); + auto filePath = realisationsPrefix + "/" + info.id.to_string() + ".doi"; + upsertFile(filePath, static_cast(info).dump(), "application/json"); } ref BinaryCacheStore::getRemoteFSAccessor(bool requireValidPath) diff --git a/src/libstore/build/derivation-building-goal.cc b/src/libstore/build/derivation-building-goal.cc index c39fd8c1c..fa819c96b 100644 --- a/src/libstore/build/derivation-building-goal.cc +++ b/src/libstore/build/derivation-building-goal.cc @@ -1092,22 +1092,13 @@ DerivationBuildingGoal::checkPathValidity(std::map & // without the `ca-derivations` experimental flag). worker.store.registerDrvOutput( Realisation{ - { - .outPath = info.known->path, - }, drvOutput, + info.known->path, }); } } if (info.known && info.known->isValid()) - validOutputs.emplace( - i.first, - Realisation{ - { - .outPath = info.known->path, - }, - drvOutput, - }); + validOutputs.emplace(i.first, Realisation{drvOutput, info.known->path}); } bool allValid = true; diff --git a/src/libstore/build/derivation-goal.cc b/src/libstore/build/derivation-goal.cc index 81f4e6654..cc3ba2b7b 100644 --- a/src/libstore/build/derivation-goal.cc +++ b/src/libstore/build/derivation-goal.cc @@ -190,17 +190,13 @@ Goal::Co DerivationGoal::haveDerivation(bool storeDerivation) auto realisation = [&] { auto take1 = get(success.builtOutputs, wantedOutput); if (take1) - return static_cast(*take1); + return *take1; /* The above `get` should work. But stateful tracking of outputs in resolvedResult, this can get out of sync with the store, which is our actual source of truth. For now we just check the store directly if it fails. */ - auto take2 = worker.evalStore.queryRealisation( - DrvOutput{ - .drvHash = *resolvedHash, - .outputName = wantedOutput, - }); + auto take2 = worker.evalStore.queryRealisation(DrvOutput{*resolvedHash, wantedOutput}); if (take2) return *take2; @@ -211,12 +207,8 @@ Goal::Co DerivationGoal::haveDerivation(bool storeDerivation) }(); if (!drv->type().isImpure()) { - Realisation newRealisation{ - realisation, - { - .drvHash = *outputHash, - .outputName = wantedOutput, - }}; + auto newRealisation = realisation; + newRealisation.id = DrvOutput{*outputHash, wantedOutput}; newRealisation.signatures.clear(); if (!drv->type().isFixed()) { auto & drvStore = worker.evalStore.isValidPath(drvPath) ? worker.evalStore : worker.store; @@ -266,16 +258,7 @@ Goal::Co DerivationGoal::haveDerivation(bool storeDerivation) /* In checking mode, the builder will not register any outputs. So we want to make sure the ones that we wanted to check are properly there. */ - success.builtOutputs = {{ - wantedOutput, - { - assertPathValidity(), - { - .drvHash = outputHash, - .outputName = wantedOutput, - }, - }, - }}; + success.builtOutputs = {{wantedOutput, assertPathValidity()}}; } else { /* Otherwise the builder will give us info for out output, but also for other outputs. Filter down to just our output so as @@ -390,20 +373,18 @@ Goal::Co DerivationGoal::repairClosure() co_return doneSuccess(BuildResult::Success::AlreadyValid, assertPathValidity()); } -std::optional> DerivationGoal::checkPathValidity() +std::optional> DerivationGoal::checkPathValidity() { if (drv->type().isImpure()) return std::nullopt; auto drvOutput = DrvOutput{outputHash, wantedOutput}; - std::optional mRealisation; + std::optional mRealisation; if (auto * mOutput = get(drv->outputs, wantedOutput)) { if (auto mPath = mOutput->path(worker.store, drv->name, wantedOutput)) { - mRealisation = UnkeyedRealisation{ - .outPath = std::move(*mPath), - }; + mRealisation = Realisation{drvOutput, std::move(*mPath)}; } } else { throw Error( @@ -431,14 +412,7 @@ std::optional> DerivationGoal::checkPa // derivation, and the output path is valid, but we don't have // its realisation stored (probably because it has been built // without the `ca-derivations` experimental flag). - worker.store.registerDrvOutput( - Realisation{ - *mRealisation, - { - .drvHash = outputHash, - .outputName = wantedOutput, - }, - }); + worker.store.registerDrvOutput(*mRealisation); } return {{*mRealisation, status}}; @@ -446,7 +420,7 @@ std::optional> DerivationGoal::checkPa return std::nullopt; } -UnkeyedRealisation DerivationGoal::assertPathValidity() +Realisation DerivationGoal::assertPathValidity() { auto checkResult = checkPathValidity(); if (!(checkResult && checkResult->second == PathStatus::Valid)) @@ -454,20 +428,11 @@ UnkeyedRealisation DerivationGoal::assertPathValidity() return checkResult->first; } -Goal::Done DerivationGoal::doneSuccess(BuildResult::Success::Status status, UnkeyedRealisation builtOutput) +Goal::Done DerivationGoal::doneSuccess(BuildResult::Success::Status status, Realisation builtOutput) { buildResult.inner = BuildResult::Success{ .status = status, - .builtOutputs = {{ - wantedOutput, - { - std::move(builtOutput), - DrvOutput{ - .drvHash = outputHash, - .outputName = wantedOutput, - }, - }, - }}, + .builtOutputs = {{wantedOutput, std::move(builtOutput)}}, }; mcExpectedBuilds.reset(); diff --git a/src/libstore/build/drv-output-substitution-goal.cc b/src/libstore/build/drv-output-substitution-goal.cc index a969b905b..b6ace4784 100644 --- a/src/libstore/build/drv-output-substitution-goal.cc +++ b/src/libstore/build/drv-output-substitution-goal.cc @@ -43,10 +43,10 @@ Goal::Co DrvOutputSubstitutionGoal::init() outPipe->createAsyncPipe(worker.ioport.get()); #endif - auto promise = std::make_shared>>(); + auto promise = std::make_shared>>(); sub->queryRealisation( - id, {[outPipe(outPipe), promise(promise)](std::future> res) { + id, {[outPipe(outPipe), promise(promise)](std::future> res) { try { Finally updateStats([&]() { outPipe->writeSide.close(); }); promise->set_value(res.get()); @@ -75,7 +75,7 @@ Goal::Co DrvOutputSubstitutionGoal::init() * The realisation corresponding to the given output id. * Will be filled once we can get it. */ - std::shared_ptr outputInfo; + std::shared_ptr outputInfo; try { outputInfo = promise->get_future().get(); @@ -132,7 +132,7 @@ Goal::Co DrvOutputSubstitutionGoal::init() } Goal::Co DrvOutputSubstitutionGoal::realisationFetched( - Goals waitees, std::shared_ptr outputInfo, nix::ref sub) + Goals waitees, std::shared_ptr outputInfo, nix::ref sub) { waitees.insert(worker.makePathSubstitutionGoal(outputInfo->outPath)); @@ -145,7 +145,7 @@ Goal::Co DrvOutputSubstitutionGoal::realisationFetched( co_return amDone(nrNoSubstituters > 0 ? ecNoSubstituters : ecFailed); } - worker.store.registerDrvOutput({*outputInfo, id}); + worker.store.registerDrvOutput(*outputInfo); trace("finished"); co_return amDone(ecSuccess); diff --git a/src/libstore/daemon.cc b/src/libstore/daemon.cc index 00c0a1fdd..1fc568e87 100644 --- a/src/libstore/daemon.cc +++ b/src/libstore/daemon.cc @@ -964,7 +964,7 @@ static void performOp( if (GET_PROTOCOL_MINOR(conn.protoVersion) < 31) { auto outputId = DrvOutput::parse(readString(conn.from)); auto outputPath = StorePath(readString(conn.from)); - store->registerDrvOutput(Realisation{{.outPath = outputPath}, outputId}); + store->registerDrvOutput(Realisation{.id = outputId, .outPath = outputPath}); } else { auto realisation = WorkerProto::Serialise::read(*store, rconn); store->registerDrvOutput(realisation); @@ -986,7 +986,7 @@ static void performOp( } else { std::set realisations; if (info) - realisations.insert({*info, outputId}); + realisations.insert(*info); WorkerProto::write(*store, wconn, realisations); } break; diff --git a/src/libstore/dummy-store.cc b/src/libstore/dummy-store.cc index 509b7a0b1..1eb51fe3e 100644 --- a/src/libstore/dummy-store.cc +++ b/src/libstore/dummy-store.cc @@ -3,7 +3,6 @@ #include "nix/util/callback.hh" #include "nix/util/memory-source-accessor.hh" #include "nix/store/dummy-store-impl.hh" -#include "nix/store/realisation.hh" #include @@ -252,10 +251,7 @@ struct DummyStoreImpl : DummyStore void registerDrvOutput(const Realisation & output) override { - auto ref = make_ref(output); - buildTrace.insert_or_visit({output.id.drvHash, {{output.id.outputName, ref}}}, [&](auto & kv) { - kv.second.insert_or_assign(output.id.outputName, make_ref(output)); - }); + unsupported("registerDrvOutput"); } void narFromPath(const StorePath & path, Sink & sink) override @@ -270,19 +266,10 @@ struct DummyStoreImpl : DummyStore throw Error("path '%s' is not valid", printStorePath(path)); } - void queryRealisationUncached( - const DrvOutput & drvOutput, Callback> callback) noexcept override + void + queryRealisationUncached(const DrvOutput &, Callback> callback) noexcept override { - bool visited = false; - buildTrace.cvisit(drvOutput.drvHash, [&](const auto & kv) { - if (auto it = kv.second.find(drvOutput.outputName); it != kv.second.end()) { - visited = true; - callback(it->second.get_ptr()); - } - }); - - if (!visited) - callback(nullptr); + callback(nullptr); } std::shared_ptr getFSAccessor(const StorePath & path, bool requireValidPath) override diff --git a/src/libstore/include/nix/store/binary-cache-store.hh b/src/libstore/include/nix/store/binary-cache-store.hh index 3f4de2bd4..c316b1199 100644 --- a/src/libstore/include/nix/store/binary-cache-store.hh +++ b/src/libstore/include/nix/store/binary-cache-store.hh @@ -80,22 +80,13 @@ private: protected: - /** - * The prefix under which realisation infos will be stored - */ - constexpr const static std::string realisationsPrefix = "realisations"; + // The prefix under which realisation infos will be stored + const std::string realisationsPrefix = "realisations"; - constexpr const static std::string cacheInfoFile = "nix-cache-info"; + const std::string cacheInfoFile = "nix-cache-info"; BinaryCacheStore(Config &); - /** - * Compute the path to the given realisation - * - * It's `${realisationsPrefix}/${drvOutput}.doi`. - */ - std::string makeRealisationPath(const DrvOutput & id); - public: virtual bool fileExists(const std::string & path) = 0; @@ -184,7 +175,7 @@ public: void registerDrvOutput(const Realisation & info) override; void queryRealisationUncached( - const DrvOutput &, Callback> callback) noexcept override; + const DrvOutput &, Callback> callback) noexcept override; void narFromPath(const StorePath & path, Sink & sink) override; diff --git a/src/libstore/include/nix/store/build/derivation-goal.hh b/src/libstore/include/nix/store/build/derivation-goal.hh index c31645fff..353e7c489 100644 --- a/src/libstore/include/nix/store/build/derivation-goal.hh +++ b/src/libstore/include/nix/store/build/derivation-goal.hh @@ -93,17 +93,17 @@ private: * of the wanted output, and a `PathStatus` with the * current status of that output. */ - std::optional> checkPathValidity(); + std::optional> checkPathValidity(); /** * Aborts if any output is not valid or corrupt, and otherwise * returns a 'Realisation' for the wanted output. */ - UnkeyedRealisation assertPathValidity(); + Realisation assertPathValidity(); Co repairClosure(); - Done doneSuccess(BuildResult::Success::Status status, UnkeyedRealisation builtOutput); + Done doneSuccess(BuildResult::Success::Status status, Realisation builtOutput); Done doneFailure(BuildError ex); }; diff --git a/src/libstore/include/nix/store/build/drv-output-substitution-goal.hh b/src/libstore/include/nix/store/build/drv-output-substitution-goal.hh index 1a5a4ea26..b42336427 100644 --- a/src/libstore/include/nix/store/build/drv-output-substitution-goal.hh +++ b/src/libstore/include/nix/store/build/drv-output-substitution-goal.hh @@ -39,8 +39,7 @@ public: GoalState state; Co init(); - Co - realisationFetched(Goals waitees, std::shared_ptr outputInfo, nix::ref sub); + Co realisationFetched(Goals waitees, std::shared_ptr outputInfo, nix::ref sub); void timedOut(Error && ex) override { diff --git a/src/libstore/include/nix/store/dummy-store-impl.hh b/src/libstore/include/nix/store/dummy-store-impl.hh index 4c9f54e98..e05bb94ff 100644 --- a/src/libstore/include/nix/store/dummy-store-impl.hh +++ b/src/libstore/include/nix/store/dummy-store-impl.hh @@ -30,18 +30,6 @@ struct DummyStore : virtual Store */ boost::concurrent_flat_map contents; - /** - * The build trace maps the pair of a content-addressing (fixed or - * floating) derivations an one of its output to a - * (content-addressed) store object. - * - * It is [curried](https://en.wikipedia.org/wiki/Currying), so we - * instead having a single output with a `DrvOutput` key, we have an - * outer map for the derivation, and inner maps for the outputs of a - * given derivation. - */ - boost::concurrent_flat_map>> buildTrace; - DummyStore(ref config) : Store{*config} , config(config) diff --git a/src/libstore/include/nix/store/dummy-store.hh b/src/libstore/include/nix/store/dummy-store.hh index d371c4e51..95c09078c 100644 --- a/src/libstore/include/nix/store/dummy-store.hh +++ b/src/libstore/include/nix/store/dummy-store.hh @@ -3,8 +3,6 @@ #include "nix/store/store-api.hh" -#include - namespace nix { struct DummyStore; diff --git a/src/libstore/include/nix/store/legacy-ssh-store.hh b/src/libstore/include/nix/store/legacy-ssh-store.hh index 994918f90..c91f88a84 100644 --- a/src/libstore/include/nix/store/legacy-ssh-store.hh +++ b/src/libstore/include/nix/store/legacy-ssh-store.hh @@ -208,8 +208,8 @@ public: */ std::optional isTrustedClient() override; - void queryRealisationUncached( - const DrvOutput &, Callback> callback) noexcept override + void + queryRealisationUncached(const DrvOutput &, Callback> callback) noexcept override // TODO: Implement { unsupported("queryRealisation"); diff --git a/src/libstore/include/nix/store/local-overlay-store.hh b/src/libstore/include/nix/store/local-overlay-store.hh index 1d69d3417..b89d0a1a0 100644 --- a/src/libstore/include/nix/store/local-overlay-store.hh +++ b/src/libstore/include/nix/store/local-overlay-store.hh @@ -173,7 +173,7 @@ private: * Check lower store if upper DB does not have. */ void queryRealisationUncached( - const DrvOutput &, Callback> callback) noexcept override; + const DrvOutput &, Callback> callback) noexcept override; /** * Call `remountIfNecessary` after collecting garbage normally. diff --git a/src/libstore/include/nix/store/local-store.hh b/src/libstore/include/nix/store/local-store.hh index ab255fba8..b871aaee2 100644 --- a/src/libstore/include/nix/store/local-store.hh +++ b/src/libstore/include/nix/store/local-store.hh @@ -385,10 +385,10 @@ public: void cacheDrvOutputMapping( State & state, const uint64_t deriver, const std::string & outputName, const StorePath & output); - std::optional queryRealisation_(State & state, const DrvOutput & id); - std::optional> queryRealisationCore_(State & state, const DrvOutput & id); + std::optional queryRealisation_(State & state, const DrvOutput & id); + std::optional> queryRealisationCore_(State & state, const DrvOutput & id); void queryRealisationUncached( - const DrvOutput &, Callback> callback) noexcept override; + const DrvOutput &, Callback> callback) noexcept override; std::optional getVersion() override; diff --git a/src/libstore/include/nix/store/realisation.hh b/src/libstore/include/nix/store/realisation.hh index c7e0a4483..3424a39c9 100644 --- a/src/libstore/include/nix/store/realisation.hh +++ b/src/libstore/include/nix/store/realisation.hh @@ -46,12 +46,12 @@ struct DrvOutput static DrvOutput parse(const std::string &); - bool operator==(const DrvOutput &) const = default; - auto operator<=>(const DrvOutput &) const = default; + GENERATE_CMP(DrvOutput, me->drvHash, me->outputName); }; -struct UnkeyedRealisation +struct Realisation { + DrvOutput id; StorePath outPath; StringSet signatures; @@ -64,35 +64,22 @@ struct UnkeyedRealisation */ std::map dependentRealisations; - std::string fingerprint(const DrvOutput & key) const; + std::string fingerprint() const; + void sign(const Signer &); + bool checkSignature(const PublicKeys & publicKeys, const std::string & sig) const; + size_t checkSignatures(const PublicKeys & publicKeys) const; - void sign(const DrvOutput & key, const Signer &); + static std::set closure(Store &, const std::set &); + static void closure(Store &, const std::set &, std::set & res); - bool checkSignature(const DrvOutput & key, const PublicKeys & publicKeys, const std::string & sig) const; + bool isCompatibleWith(const Realisation & other) const; - size_t checkSignatures(const DrvOutput & key, const PublicKeys & publicKeys) const; - - const StorePath & getPath() const + StorePath getPath() const { return outPath; } - // TODO sketchy that it avoids signatures - GENERATE_CMP(UnkeyedRealisation, me->outPath); -}; - -struct Realisation : UnkeyedRealisation -{ - DrvOutput id; - - bool isCompatibleWith(const UnkeyedRealisation & other) const; - - static std::set closure(Store &, const std::set &); - - static void closure(Store &, const std::set &, std::set & res); - - bool operator==(const Realisation &) const = default; - auto operator<=>(const Realisation &) const = default; + GENERATE_CMP(Realisation, me->id, me->outPath); }; /** @@ -116,13 +103,12 @@ struct OpaquePath { StorePath path; - const StorePath & getPath() const + StorePath getPath() const { return path; } - bool operator==(const OpaquePath &) const = default; - auto operator<=>(const OpaquePath &) const = default; + GENERATE_CMP(OpaquePath, me->path); }; /** @@ -130,7 +116,7 @@ struct OpaquePath */ struct RealisedPath { - /** + /* * A path is either the result of the realisation of a derivation or * an opaque blob that has been directly added to the store */ @@ -152,14 +138,13 @@ struct RealisedPath /** * Get the raw store path associated to this */ - const StorePath & path() const; + StorePath path() const; void closure(Store & store, Set & ret) const; static void closure(Store & store, const Set & startPaths, Set & ret); Set closure(Store & store) const; - bool operator==(const RealisedPath &) const = default; - auto operator<=>(const RealisedPath &) const = default; + GENERATE_CMP(RealisedPath, me->raw); }; class MissingRealisation : public Error @@ -182,5 +167,4 @@ public: } // namespace nix -JSON_IMPL(nix::UnkeyedRealisation) JSON_IMPL(nix::Realisation) diff --git a/src/libstore/include/nix/store/remote-store.hh b/src/libstore/include/nix/store/remote-store.hh index b152e054b..1aaf29d37 100644 --- a/src/libstore/include/nix/store/remote-store.hh +++ b/src/libstore/include/nix/store/remote-store.hh @@ -102,7 +102,7 @@ struct RemoteStore : public virtual Store, public virtual GcStore, public virtua void registerDrvOutput(const Realisation & info) override; void queryRealisationUncached( - const DrvOutput &, Callback> callback) noexcept override; + const DrvOutput &, Callback> callback) noexcept override; void buildPaths(const std::vector & paths, BuildMode buildMode, std::shared_ptr evalStore) override; diff --git a/src/libstore/include/nix/store/store-api.hh b/src/libstore/include/nix/store/store-api.hh index c9fd00513..1131ec975 100644 --- a/src/libstore/include/nix/store/store-api.hh +++ b/src/libstore/include/nix/store/store-api.hh @@ -31,7 +31,6 @@ MakeError(SubstituterDisabled, Error); MakeError(InvalidStoreReference, Error); -struct UnkeyedRealisation; struct Realisation; struct RealisedPath; struct DrvOutput; @@ -399,12 +398,12 @@ public: /** * Query the information about a realisation. */ - std::shared_ptr queryRealisation(const DrvOutput &); + std::shared_ptr queryRealisation(const DrvOutput &); /** * Asynchronous version of queryRealisation(). */ - void queryRealisation(const DrvOutput &, Callback> callback) noexcept; + void queryRealisation(const DrvOutput &, Callback> callback) noexcept; /** * Check whether the given valid path info is sufficiently attested, by @@ -431,8 +430,8 @@ protected: virtual void queryPathInfoUncached(const StorePath & path, Callback> callback) noexcept = 0; - virtual void queryRealisationUncached( - const DrvOutput &, Callback> callback) noexcept = 0; + virtual void + queryRealisationUncached(const DrvOutput &, Callback> callback) noexcept = 0; public: diff --git a/src/libstore/local-overlay-store.cc b/src/libstore/local-overlay-store.cc index f23feb8fb..2b000b3db 100644 --- a/src/libstore/local-overlay-store.cc +++ b/src/libstore/local-overlay-store.cc @@ -77,7 +77,7 @@ void LocalOverlayStore::registerDrvOutput(const Realisation & info) // First do queryRealisation on lower layer to populate DB auto res = lowerStore->queryRealisation(info.id); if (res) - LocalStore::registerDrvOutput({*res, info.id}); + LocalStore::registerDrvOutput(*res); LocalStore::registerDrvOutput(info); } @@ -108,12 +108,12 @@ void LocalOverlayStore::queryPathInfoUncached( } void LocalOverlayStore::queryRealisationUncached( - const DrvOutput & drvOutput, Callback> callback) noexcept + const DrvOutput & drvOutput, Callback> callback) noexcept { auto callbackPtr = std::make_shared(std::move(callback)); LocalStore::queryRealisationUncached( - drvOutput, {[this, drvOutput, callbackPtr](std::future> fut) { + drvOutput, {[this, drvOutput, callbackPtr](std::future> fut) { try { auto info = fut.get(); if (info) @@ -123,7 +123,7 @@ void LocalOverlayStore::queryRealisationUncached( } // If we don't have it, check lower store lowerStore->queryRealisation( - drvOutput, {[callbackPtr](std::future> fut) { + drvOutput, {[callbackPtr](std::future> fut) { try { (*callbackPtr)(fut.get()); } catch (...) { diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc index 6425819c5..ebc987ee0 100644 --- a/src/libstore/local-store.cc +++ b/src/libstore/local-store.cc @@ -1036,7 +1036,7 @@ bool LocalStore::pathInfoIsUntrusted(const ValidPathInfo & info) bool LocalStore::realisationIsUntrusted(const Realisation & realisation) { - return config->requireSigs && !realisation.checkSignatures(realisation.id, getPublicKeys()); + return config->requireSigs && !realisation.checkSignatures(getPublicKeys()); } void LocalStore::addToStore(const ValidPathInfo & info, Source & source, RepairFlag repair, CheckSigsFlag checkSigs) @@ -1586,7 +1586,7 @@ void LocalStore::addSignatures(const StorePath & storePath, const StringSet & si }); } -std::optional> +std::optional> LocalStore::queryRealisationCore_(LocalStore::State & state, const DrvOutput & id) { auto useQueryRealisedOutput(state.stmts->QueryRealisedOutput.use()(id.strHash())(id.outputName)); @@ -1598,13 +1598,14 @@ LocalStore::queryRealisationCore_(LocalStore::State & state, const DrvOutput & i return { {realisationDbId, - UnkeyedRealisation{ + Realisation{ + .id = id, .outPath = outputPath, .signatures = signatures, }}}; } -std::optional LocalStore::queryRealisation_(LocalStore::State & state, const DrvOutput & id) +std::optional LocalStore::queryRealisation_(LocalStore::State & state, const DrvOutput & id) { auto maybeCore = queryRealisationCore_(state, id); if (!maybeCore) @@ -1630,13 +1631,13 @@ std::optional LocalStore::queryRealisation_(LocalStore } void LocalStore::queryRealisationUncached( - const DrvOutput & id, Callback> callback) noexcept + const DrvOutput & id, Callback> callback) noexcept { try { - auto maybeRealisation = retrySQLite>( - [&]() { return queryRealisation_(*_state->lock(), id); }); + auto maybeRealisation = + retrySQLite>([&]() { return queryRealisation_(*_state->lock(), id); }); if (maybeRealisation) - callback(std::make_shared(maybeRealisation.value())); + callback(std::make_shared(maybeRealisation.value())); else callback(nullptr); diff --git a/src/libstore/misc.cc b/src/libstore/misc.cc index a31d149c2..7efaa4f86 100644 --- a/src/libstore/misc.cc +++ b/src/libstore/misc.cc @@ -360,12 +360,11 @@ drvOutputReferences(Store & store, const Derivation & drv, const StorePath & out if (!outputHash) throw Error( "output '%s' of derivation '%s' isn't realised", outputName, store.printStorePath(inputDrv)); - DrvOutput key{*outputHash, outputName}; - auto thisRealisation = store.queryRealisation(key); + auto thisRealisation = store.queryRealisation(DrvOutput{*outputHash, outputName}); if (!thisRealisation) throw Error( "output '%s' of derivation '%s' isn’t built", outputName, store.printStorePath(inputDrv)); - inputRealisations.insert({*thisRealisation, std::move(key)}); + inputRealisations.insert(*thisRealisation); } } if (!inputNode.value.empty()) { diff --git a/src/libstore/realisation.cc b/src/libstore/realisation.cc index e08d5ee8a..febd67bd2 100644 --- a/src/libstore/realisation.cc +++ b/src/libstore/realisation.cc @@ -39,7 +39,7 @@ void Realisation::closure(Store & store, const std::set & startOutp std::set res; for (auto & [currentDep, _] : current.dependentRealisations) { if (auto currentRealisation = store.queryRealisation(currentDep)) - res.insert({*currentRealisation, currentDep}); + res.insert(*currentRealisation); else throw Error("Unrealised derivation '%s'", currentDep.to_string()); } @@ -61,25 +61,24 @@ void Realisation::closure(Store & store, const std::set & startOutp }); } -std::string UnkeyedRealisation::fingerprint(const DrvOutput & key) const +std::string Realisation::fingerprint() const { - nlohmann::json serialized = Realisation{*this, key}; + nlohmann::json serialized = *this; serialized.erase("signatures"); return serialized.dump(); } -void UnkeyedRealisation::sign(const DrvOutput & key, const Signer & signer) +void Realisation::sign(const Signer & signer) { - signatures.insert(signer.signDetached(fingerprint(key))); + signatures.insert(signer.signDetached(fingerprint())); } -bool UnkeyedRealisation::checkSignature( - const DrvOutput & key, const PublicKeys & publicKeys, const std::string & sig) const +bool Realisation::checkSignature(const PublicKeys & publicKeys, const std::string & sig) const { - return verifyDetached(fingerprint(key), sig, publicKeys); + return verifyDetached(fingerprint(), sig, publicKeys); } -size_t UnkeyedRealisation::checkSignatures(const DrvOutput & key, const PublicKeys & publicKeys) const +size_t Realisation::checkSignatures(const PublicKeys & publicKeys) const { // FIXME: Maybe we should return `maxSigs` if the realisation corresponds to // an input-addressed one − because in that case the drv is enough to check @@ -87,18 +86,19 @@ size_t UnkeyedRealisation::checkSignatures(const DrvOutput & key, const PublicKe size_t good = 0; for (auto & sig : signatures) - if (checkSignature(key, publicKeys, sig)) + if (checkSignature(publicKeys, sig)) good++; return good; } -const StorePath & RealisedPath::path() const +StorePath RealisedPath::path() const { - return std::visit([](auto && arg) -> auto & { return arg.getPath(); }, raw); + return std::visit([](auto && arg) { return arg.getPath(); }, raw); } -bool Realisation::isCompatibleWith(const UnkeyedRealisation & other) const +bool Realisation::isCompatibleWith(const Realisation & other) const { + assert(id == other.id); if (outPath == other.outPath) { if (dependentRealisations.empty() != other.dependentRealisations.empty()) { warn( @@ -144,7 +144,7 @@ namespace nlohmann { using namespace nix; -UnkeyedRealisation adl_serializer::from_json(const json & json0) +Realisation adl_serializer::from_json(const json & json0) { auto json = getObject(json0); @@ -157,39 +157,25 @@ UnkeyedRealisation adl_serializer::from_json(const json & js for (auto & [jsonDepId, jsonDepOutPath] : getObject(*jsonDependencies)) dependentRealisations.insert({DrvOutput::parse(jsonDepId), jsonDepOutPath}); - return UnkeyedRealisation{ + return Realisation{ + .id = DrvOutput::parse(valueAt(json, "id")), .outPath = valueAt(json, "outPath"), .signatures = signatures, .dependentRealisations = dependentRealisations, }; } -void adl_serializer::to_json(json & json, const UnkeyedRealisation & r) +void adl_serializer::to_json(json & json, const Realisation & r) { auto jsonDependentRealisations = nlohmann::json::object(); for (auto & [depId, depOutPath] : r.dependentRealisations) jsonDependentRealisations.emplace(depId.to_string(), depOutPath); json = { + {"id", r.id.to_string()}, {"outPath", r.outPath}, {"signatures", r.signatures}, {"dependentRealisations", jsonDependentRealisations}, }; } -Realisation adl_serializer::from_json(const json & json0) -{ - auto json = getObject(json0); - - return Realisation{ - static_cast(json0), - DrvOutput::parse(valueAt(json, "id")), - }; -} - -void adl_serializer::to_json(json & json, const Realisation & r) -{ - json = static_cast(r); - json["id"] = r.id.to_string(); -} - } // namespace nlohmann diff --git a/src/libstore/remote-store.cc b/src/libstore/remote-store.cc index 8dd5bc064..a6994f844 100644 --- a/src/libstore/remote-store.cc +++ b/src/libstore/remote-store.cc @@ -501,7 +501,7 @@ void RemoteStore::registerDrvOutput(const Realisation & info) } void RemoteStore::queryRealisationUncached( - const DrvOutput & id, Callback> callback) noexcept + const DrvOutput & id, Callback> callback) noexcept { try { auto conn(getConnection()); @@ -515,21 +515,21 @@ void RemoteStore::queryRealisationUncached( conn->to << id.to_string(); conn.processStderr(); - auto real = [&]() -> std::shared_ptr { + auto real = [&]() -> std::shared_ptr { if (GET_PROTOCOL_MINOR(conn->protoVersion) < 31) { auto outPaths = WorkerProto::Serialise>::read(*this, *conn); if (outPaths.empty()) return nullptr; - return std::make_shared(UnkeyedRealisation{.outPath = *outPaths.begin()}); + return std::make_shared(Realisation{.id = id, .outPath = *outPaths.begin()}); } else { auto realisations = WorkerProto::Serialise>::read(*this, *conn); if (realisations.empty()) return nullptr; - return std::make_shared(*realisations.begin()); + return std::make_shared(*realisations.begin()); } }(); - callback(std::shared_ptr(real)); + callback(std::shared_ptr(real)); } catch (...) { return callback.rethrow(); } @@ -626,15 +626,13 @@ std::vector RemoteStore::buildPathsWithResults( auto realisation = queryRealisation(outputId); if (!realisation) throw MissingRealisation(outputId); - success.builtOutputs.emplace(output, Realisation{*realisation, outputId}); + success.builtOutputs.emplace(output, *realisation); } else { success.builtOutputs.emplace( output, Realisation{ - UnkeyedRealisation{ - .outPath = outputPath, - }, - outputId, + .id = outputId, + .outPath = outputPath, }); } } diff --git a/src/libstore/restricted-store.cc b/src/libstore/restricted-store.cc index 5270f7d10..a1cb41606 100644 --- a/src/libstore/restricted-store.cc +++ b/src/libstore/restricted-store.cc @@ -107,7 +107,7 @@ struct RestrictedStore : public virtual IndirectRootStore, public virtual GcStor void registerDrvOutput(const Realisation & info) override; void queryRealisationUncached( - const DrvOutput & id, Callback> callback) noexcept override; + const DrvOutput & id, Callback> callback) noexcept override; void buildPaths(const std::vector & paths, BuildMode buildMode, std::shared_ptr evalStore) override; @@ -244,7 +244,7 @@ void RestrictedStore::registerDrvOutput(const Realisation & info) } void RestrictedStore::queryRealisationUncached( - const DrvOutput & id, Callback> callback) noexcept + const DrvOutput & id, Callback> callback) noexcept // XXX: This should probably be allowed if the realisation corresponds to // an allowed derivation { diff --git a/src/libstore/store-api.cc b/src/libstore/store-api.cc index df00dc179..4ce6b15fa 100644 --- a/src/libstore/store-api.cc +++ b/src/libstore/store-api.cc @@ -598,8 +598,7 @@ void Store::queryPathInfo(const StorePath & storePath, Callback> callback) noexcept +void Store::queryRealisation(const DrvOutput & id, Callback> callback) noexcept { try { @@ -625,20 +624,20 @@ void Store::queryRealisation( auto callbackPtr = std::make_shared(std::move(callback)); - queryRealisationUncached(id, {[this, id, callbackPtr](std::future> fut) { + queryRealisationUncached(id, {[this, id, callbackPtr](std::future> fut) { try { auto info = fut.get(); if (diskCache) { if (info) diskCache->upsertRealisation( - config.getReference().render(/*FIXME withParams=*/false), {*info, id}); + config.getReference().render(/*FIXME withParams=*/false), *info); else diskCache->upsertAbsentRealisation( config.getReference().render(/*FIXME withParams=*/false), id); } - (*callbackPtr)(std::shared_ptr(info)); + (*callbackPtr)(std::shared_ptr(info)); } catch (...) { callbackPtr->rethrow(); @@ -646,9 +645,9 @@ void Store::queryRealisation( }}); } -std::shared_ptr Store::queryRealisation(const DrvOutput & id) +std::shared_ptr Store::queryRealisation(const DrvOutput & id) { - using RealPtr = std::shared_ptr; + using RealPtr = std::shared_ptr; std::promise promise; queryRealisation(id, {[&](std::future result) { @@ -911,12 +910,11 @@ std::map copyPaths( std::set toplevelRealisations; for (auto & path : paths) { storePaths.insert(path.path()); - if (auto * realisation = std::get_if(&path.raw)) { + if (auto realisation = std::get_if(&path.raw)) { experimentalFeatureSettings.require(Xp::CaDerivations); toplevelRealisations.insert(*realisation); } } - auto pathsMap = copyPaths(srcStore, dstStore, storePaths, repair, checkSigs, substitute); try { @@ -933,7 +931,7 @@ std::map copyPaths( "dependency of '%s' but isn't registered", drvOutput.to_string(), current.id.to_string()); - children.insert({*currentChild, drvOutput}); + children.insert(*currentChild); } return children; }, @@ -1201,7 +1199,7 @@ void Store::signRealisation(Realisation & realisation) for (auto & secretKeyFile : secretKeyFiles.get()) { SecretKey secretKey(readFile(secretKeyFile)); LocalSigner signer(std::move(secretKey)); - realisation.sign(realisation.id, signer); + realisation.sign(signer); } } diff --git a/src/libstore/unix/build/derivation-builder.cc b/src/libstore/unix/build/derivation-builder.cc index 7cf72fb84..a04056599 100644 --- a/src/libstore/unix/build/derivation-builder.cc +++ b/src/libstore/unix/build/derivation-builder.cc @@ -1830,12 +1830,7 @@ SingleDrvOutputs DerivationBuilderImpl::registerOutputs() for (auto & [outputName, newInfo] : infos) { auto oldinfo = get(initialOutputs, outputName); assert(oldinfo); - auto thisRealisation = Realisation{ - { - .outPath = newInfo.path, - }, - DrvOutput{oldinfo->outputHash, outputName}, - }; + auto thisRealisation = Realisation{.id = DrvOutput{oldinfo->outputHash, outputName}, .outPath = newInfo.path}; if (experimentalFeatureSettings.isEnabled(Xp::CaDerivations) && !drv.type().isImpure()) { store.signRealisation(thisRealisation); store.registerDrvOutput(thisRealisation); diff --git a/src/libutil/include/nix/util/hash.hh b/src/libutil/include/nix/util/hash.hh index 0b16b423c..571b6acca 100644 --- a/src/libutil/include/nix/util/hash.hh +++ b/src/libutil/include/nix/util/hash.hh @@ -222,22 +222,3 @@ public: }; } // namespace nix - -template<> -struct std::hash -{ - std::size_t operator()(const nix::Hash & hash) const noexcept - { - assert(hash.hashSize > sizeof(size_t)); - return *reinterpret_cast(&hash.hash); - } -}; - -namespace nix { - -inline std::size_t hash_value(const Hash & hash) -{ - return std::hash{}(hash); -} - -} // namespace nix From ce749454dc3e7685092cafdb4d1e05876a065b07 Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Sun, 5 Oct 2025 21:54:59 +0300 Subject: [PATCH 249/332] Revert "Merge pull request #14022 from obsidiansystems/derivation-resolution-goal" This reverts commit d02dca099f2f7411489b57fc5c97968013498f9a, reversing changes made to 9bd09155ac7659f07dfefbd47e4e76ec499f38cd. --- .../build/derivation-building-goal.cc | 223 +++++++++++++++++- src/libstore/build/derivation-goal.cc | 97 +------- .../build/derivation-resolution-goal.cc | 210 ----------------- src/libstore/build/worker.cc | 24 +- .../store/build/derivation-building-goal.hh | 19 +- .../nix/store/build/derivation-goal.hh | 8 +- .../store/build/derivation-resolution-goal.hh | 82 ------- .../include/nix/store/build/worker.hh | 20 +- src/libstore/include/nix/store/meson.build | 1 - src/libstore/meson.build | 1 - tests/functional/build.sh | 9 +- tests/functional/ca/issue-13247.sh | 5 +- 12 files changed, 237 insertions(+), 462 deletions(-) delete mode 100644 src/libstore/build/derivation-resolution-goal.cc delete mode 100644 src/libstore/include/nix/store/build/derivation-resolution-goal.hh diff --git a/src/libstore/build/derivation-building-goal.cc b/src/libstore/build/derivation-building-goal.cc index fa819c96b..001816ca0 100644 --- a/src/libstore/build/derivation-building-goal.cc +++ b/src/libstore/build/derivation-building-goal.cc @@ -1,5 +1,6 @@ #include "nix/store/build/derivation-building-goal.hh" #include "nix/store/build/derivation-env-desugar.hh" +#include "nix/store/build/derivation-trampoline-goal.hh" #ifndef _WIN32 // TODO enable build hook on Windows # include "nix/store/build/hook-instance.hh" # include "nix/store/build/derivation-builder.hh" @@ -26,8 +27,8 @@ namespace nix { DerivationBuildingGoal::DerivationBuildingGoal( - const StorePath & drvPath, const Derivation & drv_, Worker & worker, BuildMode buildMode, bool storeDerivation) - : Goal(worker, gaveUpOnSubstitution(storeDerivation)) + const StorePath & drvPath, const Derivation & drv_, Worker & worker, BuildMode buildMode) + : Goal(worker, gaveUpOnSubstitution()) , drvPath(drvPath) , buildMode(buildMode) { @@ -124,10 +125,50 @@ static void runPostBuildHook( /* At least one of the output paths could not be produced using a substitute. So we have to build instead. */ -Goal::Co DerivationBuildingGoal::gaveUpOnSubstitution(bool storeDerivation) +Goal::Co DerivationBuildingGoal::gaveUpOnSubstitution() { Goals waitees; + std::map, GoalPtr, value_comparison> inputGoals; + + { + std::function, const DerivedPathMap::ChildNode &)> + addWaiteeDerivedPath; + + addWaiteeDerivedPath = [&](ref inputDrv, + const DerivedPathMap::ChildNode & inputNode) { + if (!inputNode.value.empty()) { + auto g = worker.makeGoal( + DerivedPath::Built{ + .drvPath = inputDrv, + .outputs = inputNode.value, + }, + buildMode == bmRepair ? bmRepair : bmNormal); + inputGoals.insert_or_assign(inputDrv, g); + waitees.insert(std::move(g)); + } + for (const auto & [outputName, childNode] : inputNode.childMap) + addWaiteeDerivedPath( + make_ref(SingleDerivedPath::Built{inputDrv, outputName}), childNode); + }; + + for (const auto & [inputDrvPath, inputNode] : drv->inputDrvs.map) { + /* Ensure that pure, non-fixed-output derivations don't + depend on impure derivations. */ + if (experimentalFeatureSettings.isEnabled(Xp::ImpureDerivations) && !drv->type().isImpure() + && !drv->type().isFixed()) { + auto inputDrv = worker.evalStore.readDerivation(inputDrvPath); + if (inputDrv.type().isImpure()) + throw Error( + "pure derivation '%s' depends on impure derivation '%s'", + worker.store.printStorePath(drvPath), + worker.store.printStorePath(inputDrvPath)); + } + + addWaiteeDerivedPath(makeConstantStorePathRef(inputDrvPath), inputNode); + } + } + /* Copy the input sources from the eval store to the build store. @@ -172,17 +213,177 @@ Goal::Co DerivationBuildingGoal::gaveUpOnSubstitution(bool storeDerivation) /* Determine the full set of input paths. */ - if (storeDerivation) { - assert(drv->inputDrvs.map.empty()); - /* Store the resolved derivation, as part of the record of - what we're actually building */ - writeDerivation(worker.store, *drv); - } - + /* First, the input derivations. */ { + auto & fullDrv = *drv; + + auto drvType = fullDrv.type(); + bool resolveDrv = + std::visit( + overloaded{ + [&](const DerivationType::InputAddressed & ia) { + /* must resolve if deferred. */ + return ia.deferred; + }, + [&](const DerivationType::ContentAddressed & ca) { + return !fullDrv.inputDrvs.map.empty() + && (ca.fixed + /* Can optionally resolve if fixed, which is good + for avoiding unnecessary rebuilds. */ + ? experimentalFeatureSettings.isEnabled(Xp::CaDerivations) + /* Must resolve if floating and there are any inputs + drvs. */ + : true); + }, + [&](const DerivationType::Impure &) { return true; }}, + drvType.raw) + /* no inputs are outputs of dynamic derivations */ + || std::ranges::any_of(fullDrv.inputDrvs.map.begin(), fullDrv.inputDrvs.map.end(), [](auto & pair) { + return !pair.second.childMap.empty(); + }); + + if (resolveDrv && !fullDrv.inputDrvs.map.empty()) { + experimentalFeatureSettings.require(Xp::CaDerivations); + + /* We are be able to resolve this derivation based on the + now-known results of dependencies. If so, we become a + stub goal aliasing that resolved derivation goal. */ + std::optional attempt = fullDrv.tryResolve( + worker.store, + [&](ref drvPath, const std::string & outputName) -> std::optional { + auto mEntry = get(inputGoals, drvPath); + if (!mEntry) + return std::nullopt; + + auto & buildResult = (*mEntry)->buildResult; + return std::visit( + overloaded{ + [](const BuildResult::Failure &) -> std::optional { return std::nullopt; }, + [&](const BuildResult::Success & success) -> std::optional { + auto i = get(success.builtOutputs, outputName); + if (!i) + return std::nullopt; + + return i->outPath; + }, + }, + buildResult.inner); + }); + if (!attempt) { + /* TODO (impure derivations-induced tech debt) (see below): + The above attempt should have found it, but because we manage + inputDrvOutputs statefully, sometimes it gets out of sync with + the real source of truth (store). So we query the store + directly if there's a problem. */ + attempt = fullDrv.tryResolve(worker.store, &worker.evalStore); + } + assert(attempt); + Derivation drvResolved{std::move(*attempt)}; + + auto pathResolved = writeDerivation(worker.store, drvResolved); + + auto msg = + fmt("resolved derivation: '%s' -> '%s'", + worker.store.printStorePath(drvPath), + worker.store.printStorePath(pathResolved)); + act = std::make_unique( + *logger, + lvlInfo, + actBuildWaiting, + msg, + Logger::Fields{ + worker.store.printStorePath(drvPath), + worker.store.printStorePath(pathResolved), + }); + + /* TODO https://github.com/NixOS/nix/issues/13247 we should + let the calling goal do this, so it has a change to pass + just the output(s) it cares about. */ + auto resolvedDrvGoal = + worker.makeDerivationTrampolineGoal(pathResolved, OutputsSpec::All{}, drvResolved, buildMode); + { + Goals waitees{resolvedDrvGoal}; + co_await await(std::move(waitees)); + } + + trace("resolved derivation finished"); + + auto resolvedResult = resolvedDrvGoal->buildResult; + + // No `std::visit` for coroutines yet + if (auto * successP = resolvedResult.tryGetSuccess()) { + auto & success = *successP; + SingleDrvOutputs builtOutputs; + + auto outputHashes = staticOutputHashes(worker.evalStore, *drv); + auto resolvedHashes = staticOutputHashes(worker.store, drvResolved); + + StorePathSet outputPaths; + + for (auto & outputName : drvResolved.outputNames()) { + auto outputHash = get(outputHashes, outputName); + auto resolvedHash = get(resolvedHashes, outputName); + if ((!outputHash) || (!resolvedHash)) + throw Error( + "derivation '%s' doesn't have expected output '%s' (derivation-goal.cc/resolve)", + worker.store.printStorePath(drvPath), + outputName); + + auto realisation = [&] { + auto take1 = get(success.builtOutputs, outputName); + if (take1) + return *take1; + + /* The above `get` should work. But stateful tracking of + outputs in resolvedResult, this can get out of sync with the + store, which is our actual source of truth. For now we just + check the store directly if it fails. */ + auto take2 = worker.evalStore.queryRealisation(DrvOutput{*resolvedHash, outputName}); + if (take2) + return *take2; + + throw Error( + "derivation '%s' doesn't have expected output '%s' (derivation-goal.cc/realisation)", + worker.store.printStorePath(pathResolved), + outputName); + }(); + + if (!drv->type().isImpure()) { + auto newRealisation = realisation; + newRealisation.id = DrvOutput{*outputHash, outputName}; + newRealisation.signatures.clear(); + if (!drv->type().isFixed()) { + auto & drvStore = worker.evalStore.isValidPath(drvPath) ? worker.evalStore : worker.store; + newRealisation.dependentRealisations = + drvOutputReferences(worker.store, *drv, realisation.outPath, &drvStore); + } + worker.store.signRealisation(newRealisation); + worker.store.registerDrvOutput(newRealisation); + } + outputPaths.insert(realisation.outPath); + builtOutputs.emplace(outputName, realisation); + } + + runPostBuildHook(worker.store, *logger, drvPath, outputPaths); + + auto status = success.status; + if (status == BuildResult::Success::AlreadyValid) + status = BuildResult::Success::ResolvesToAlreadyValid; + + co_return doneSuccess(success.status, std::move(builtOutputs)); + } else if (resolvedResult.tryGetFailure()) { + co_return doneFailure({ + BuildResult::Failure::DependencyFailed, + "build of resolved derivation '%s' failed", + worker.store.printStorePath(pathResolved), + }); + } else + assert(false); + } + /* If we get this far, we know no dynamic drvs inputs */ - for (auto & [depDrvPath, depNode] : drv->inputDrvs.map) { + for (auto & [depDrvPath, depNode] : fullDrv.inputDrvs.map) { for (auto & outputName : depNode.value) { /* Don't need to worry about `inputGoals`, because impure derivations are always resolved above. Can diff --git a/src/libstore/build/derivation-goal.cc b/src/libstore/build/derivation-goal.cc index cc3ba2b7b..5dfc334a8 100644 --- a/src/libstore/build/derivation-goal.cc +++ b/src/libstore/build/derivation-goal.cc @@ -1,6 +1,5 @@ #include "nix/store/build/derivation-goal.hh" #include "nix/store/build/derivation-building-goal.hh" -#include "nix/store/build/derivation-resolution-goal.hh" #ifndef _WIN32 // TODO enable build hook on Windows # include "nix/store/build/hook-instance.hh" # include "nix/store/build/derivation-builder.hh" @@ -30,9 +29,8 @@ DerivationGoal::DerivationGoal( const Derivation & drv, const OutputName & wantedOutput, Worker & worker, - BuildMode buildMode, - bool storeDerivation) - : Goal(worker, haveDerivation(storeDerivation)) + BuildMode buildMode) + : Goal(worker, haveDerivation()) , drvPath(drvPath) , wantedOutput(wantedOutput) , outputHash{[&] { @@ -66,7 +64,7 @@ std::string DerivationGoal::key() }.to_string(worker.store); } -Goal::Co DerivationGoal::haveDerivation(bool storeDerivation) +Goal::Co DerivationGoal::haveDerivation() { trace("have derivation"); @@ -148,96 +146,9 @@ Goal::Co DerivationGoal::haveDerivation(bool storeDerivation) worker.store.printStorePath(drvPath)); } - auto resolutionGoal = worker.makeDerivationResolutionGoal(drvPath, *drv, buildMode); - { - Goals waitees{resolutionGoal}; - co_await await(std::move(waitees)); - } - if (nrFailed != 0) { - co_return doneFailure({BuildResult::Failure::DependencyFailed, "resolution failed"}); - } - - if (resolutionGoal->resolvedDrv) { - auto & [pathResolved, drvResolved] = *resolutionGoal->resolvedDrv; - - auto resolvedDrvGoal = - worker.makeDerivationGoal(pathResolved, drvResolved, wantedOutput, buildMode, /*storeDerivation=*/true); - { - Goals waitees{resolvedDrvGoal}; - co_await await(std::move(waitees)); - } - - trace("resolved derivation finished"); - - auto resolvedResult = resolvedDrvGoal->buildResult; - - // No `std::visit` for coroutines yet - if (auto * successP = resolvedResult.tryGetSuccess()) { - auto & success = *successP; - auto outputHashes = staticOutputHashes(worker.evalStore, *drv); - auto resolvedHashes = staticOutputHashes(worker.store, drvResolved); - - StorePathSet outputPaths; - - auto outputHash = get(outputHashes, wantedOutput); - auto resolvedHash = get(resolvedHashes, wantedOutput); - if ((!outputHash) || (!resolvedHash)) - throw Error( - "derivation '%s' doesn't have expected output '%s' (derivation-goal.cc/resolve)", - worker.store.printStorePath(drvPath), - wantedOutput); - - auto realisation = [&] { - auto take1 = get(success.builtOutputs, wantedOutput); - if (take1) - return *take1; - - /* The above `get` should work. But stateful tracking of - outputs in resolvedResult, this can get out of sync with the - store, which is our actual source of truth. For now we just - check the store directly if it fails. */ - auto take2 = worker.evalStore.queryRealisation(DrvOutput{*resolvedHash, wantedOutput}); - if (take2) - return *take2; - - throw Error( - "derivation '%s' doesn't have expected output '%s' (derivation-goal.cc/realisation)", - worker.store.printStorePath(pathResolved), - wantedOutput); - }(); - - if (!drv->type().isImpure()) { - auto newRealisation = realisation; - newRealisation.id = DrvOutput{*outputHash, wantedOutput}; - newRealisation.signatures.clear(); - if (!drv->type().isFixed()) { - auto & drvStore = worker.evalStore.isValidPath(drvPath) ? worker.evalStore : worker.store; - newRealisation.dependentRealisations = - drvOutputReferences(worker.store, *drv, realisation.outPath, &drvStore); - } - worker.store.signRealisation(newRealisation); - worker.store.registerDrvOutput(newRealisation); - } - outputPaths.insert(realisation.outPath); - - auto status = success.status; - if (status == BuildResult::Success::AlreadyValid) - status = BuildResult::Success::ResolvesToAlreadyValid; - - co_return doneSuccess(status, std::move(realisation)); - } else if (resolvedResult.tryGetFailure()) { - co_return doneFailure({ - BuildResult::Failure::DependencyFailed, - "build of resolved derivation '%s' failed", - worker.store.printStorePath(pathResolved), - }); - } else - assert(false); - } - /* Give up on substitution for the output we want, actually build this derivation */ - auto g = worker.makeDerivationBuildingGoal(drvPath, *drv, buildMode, storeDerivation); + auto g = worker.makeDerivationBuildingGoal(drvPath, *drv, buildMode); /* We will finish with it ourselves, as if we were the derivational goal. */ g->preserveException = true; diff --git a/src/libstore/build/derivation-resolution-goal.cc b/src/libstore/build/derivation-resolution-goal.cc deleted file mode 100644 index 584169ef3..000000000 --- a/src/libstore/build/derivation-resolution-goal.cc +++ /dev/null @@ -1,210 +0,0 @@ -#include "nix/store/build/derivation-resolution-goal.hh" -#include "nix/store/build/derivation-env-desugar.hh" -#include "nix/store/build/worker.hh" -#include "nix/util/util.hh" -#include "nix/store/common-protocol.hh" -#include "nix/store/globals.hh" - -#include -#include -#include - -#include - -namespace nix { - -DerivationResolutionGoal::DerivationResolutionGoal( - const StorePath & drvPath, const Derivation & drv_, Worker & worker, BuildMode buildMode) - : Goal(worker, resolveDerivation()) - , drvPath(drvPath) -{ - drv = std::make_unique(drv_); - - name = fmt("building of '%s' from in-memory derivation", worker.store.printStorePath(drvPath)); - trace("created"); - - /* Prevent the .chroot directory from being - garbage-collected. (See isActiveTempFile() in gc.cc.) */ - worker.store.addTempRoot(this->drvPath); -} - -void DerivationResolutionGoal::timedOut(Error && ex) {} - -std::string DerivationResolutionGoal::key() -{ - /* Ensure that derivations get built in order of their name, - i.e. a derivation named "aardvark" always comes before - "baboon". And substitution goals always happen before - derivation goals (due to "bd$"). */ - return "rd$" + std::string(drvPath.name()) + "$" + worker.store.printStorePath(drvPath); -} - -/** - * Used for `inputGoals` local variable below - */ -struct value_comparison -{ - template - bool operator()(const ref & lhs, const ref & rhs) const - { - return *lhs < *rhs; - } -}; - -/* At least one of the output paths could not be - produced using a substitute. So we have to build instead. */ -Goal::Co DerivationResolutionGoal::resolveDerivation() -{ - Goals waitees; - - std::map, GoalPtr, value_comparison> inputGoals; - - { - std::function, const DerivedPathMap::ChildNode &)> - addWaiteeDerivedPath; - - addWaiteeDerivedPath = [&](ref inputDrv, - const DerivedPathMap::ChildNode & inputNode) { - if (!inputNode.value.empty()) { - auto g = worker.makeGoal( - DerivedPath::Built{ - .drvPath = inputDrv, - .outputs = inputNode.value, - }, - buildMode == bmRepair ? bmRepair : bmNormal); - inputGoals.insert_or_assign(inputDrv, g); - waitees.insert(std::move(g)); - } - for (const auto & [outputName, childNode] : inputNode.childMap) - addWaiteeDerivedPath( - make_ref(SingleDerivedPath::Built{inputDrv, outputName}), childNode); - }; - - for (const auto & [inputDrvPath, inputNode] : drv->inputDrvs.map) { - /* Ensure that pure, non-fixed-output derivations don't - depend on impure derivations. */ - if (experimentalFeatureSettings.isEnabled(Xp::ImpureDerivations) && !drv->type().isImpure() - && !drv->type().isFixed()) { - auto inputDrv = worker.evalStore.readDerivation(inputDrvPath); - if (inputDrv.type().isImpure()) - throw Error( - "pure derivation '%s' depends on impure derivation '%s'", - worker.store.printStorePath(drvPath), - worker.store.printStorePath(inputDrvPath)); - } - - addWaiteeDerivedPath(makeConstantStorePathRef(inputDrvPath), inputNode); - } - } - - co_await await(std::move(waitees)); - - trace("all inputs realised"); - - if (nrFailed != 0) { - auto msg = - fmt("Cannot build '%s'.\n" - "Reason: " ANSI_RED "%d %s failed" ANSI_NORMAL ".", - Magenta(worker.store.printStorePath(drvPath)), - nrFailed, - nrFailed == 1 ? "dependency" : "dependencies"); - msg += showKnownOutputs(worker.store, *drv); - co_return amDone(ecFailed, {BuildError(BuildResult::Failure::DependencyFailed, msg)}); - } - - /* Gather information necessary for computing the closure and/or - running the build hook. */ - - /* Determine the full set of input paths. */ - - /* First, the input derivations. */ - { - auto & fullDrv = *drv; - - auto drvType = fullDrv.type(); - bool resolveDrv = - std::visit( - overloaded{ - [&](const DerivationType::InputAddressed & ia) { - /* must resolve if deferred. */ - return ia.deferred; - }, - [&](const DerivationType::ContentAddressed & ca) { - return !fullDrv.inputDrvs.map.empty() - && (ca.fixed - /* Can optionally resolve if fixed, which is good - for avoiding unnecessary rebuilds. */ - ? experimentalFeatureSettings.isEnabled(Xp::CaDerivations) - /* Must resolve if floating and there are any inputs - drvs. */ - : true); - }, - [&](const DerivationType::Impure &) { return true; }}, - drvType.raw) - /* no inputs are outputs of dynamic derivations */ - || std::ranges::any_of(fullDrv.inputDrvs.map.begin(), fullDrv.inputDrvs.map.end(), [](auto & pair) { - return !pair.second.childMap.empty(); - }); - - if (resolveDrv && !fullDrv.inputDrvs.map.empty()) { - experimentalFeatureSettings.require(Xp::CaDerivations); - - /* We are be able to resolve this derivation based on the - now-known results of dependencies. If so, we become a - stub goal aliasing that resolved derivation goal. */ - std::optional attempt = fullDrv.tryResolve( - worker.store, - [&](ref drvPath, const std::string & outputName) -> std::optional { - auto mEntry = get(inputGoals, drvPath); - if (!mEntry) - return std::nullopt; - - auto & buildResult = (*mEntry)->buildResult; - return std::visit( - overloaded{ - [](const BuildResult::Failure &) -> std::optional { return std::nullopt; }, - [&](const BuildResult::Success & success) -> std::optional { - auto i = get(success.builtOutputs, outputName); - if (!i) - return std::nullopt; - - return i->outPath; - }, - }, - buildResult.inner); - }); - if (!attempt) { - /* TODO (impure derivations-induced tech debt) (see below): - The above attempt should have found it, but because we manage - inputDrvOutputs statefully, sometimes it gets out of sync with - the real source of truth (store). So we query the store - directly if there's a problem. */ - attempt = fullDrv.tryResolve(worker.store, &worker.evalStore); - } - assert(attempt); - - auto pathResolved = writeDerivation(worker.store, *attempt, NoRepair, /*readOnly =*/true); - - auto msg = - fmt("resolved derivation: '%s' -> '%s'", - worker.store.printStorePath(drvPath), - worker.store.printStorePath(pathResolved)); - act = std::make_unique( - *logger, - lvlInfo, - actBuildWaiting, - msg, - Logger::Fields{ - worker.store.printStorePath(drvPath), - worker.store.printStorePath(pathResolved), - }); - - resolvedDrv = - std::make_unique>(std::move(pathResolved), *std::move(attempt)); - } - } - - co_return amDone(ecSuccess, std::nullopt); -} - -} // namespace nix diff --git a/src/libstore/build/worker.cc b/src/libstore/build/worker.cc index 53175a8c4..3e6e0bef0 100644 --- a/src/libstore/build/worker.cc +++ b/src/libstore/build/worker.cc @@ -4,7 +4,6 @@ #include "nix/store/build/substitution-goal.hh" #include "nix/store/build/drv-output-substitution-goal.hh" #include "nix/store/build/derivation-goal.hh" -#include "nix/store/build/derivation-resolution-goal.hh" #include "nix/store/build/derivation-building-goal.hh" #include "nix/store/build/derivation-trampoline-goal.hh" #ifndef _WIN32 // TODO Enable building on Windows @@ -76,26 +75,15 @@ std::shared_ptr Worker::makeDerivationTrampolineGoal( } std::shared_ptr Worker::makeDerivationGoal( - const StorePath & drvPath, - const Derivation & drv, - const OutputName & wantedOutput, - BuildMode buildMode, - bool storeDerivation) + const StorePath & drvPath, const Derivation & drv, const OutputName & wantedOutput, BuildMode buildMode) { - return initGoalIfNeeded( - derivationGoals[drvPath][wantedOutput], drvPath, drv, wantedOutput, *this, buildMode, storeDerivation); + return initGoalIfNeeded(derivationGoals[drvPath][wantedOutput], drvPath, drv, wantedOutput, *this, buildMode); } -std::shared_ptr -Worker::makeDerivationResolutionGoal(const StorePath & drvPath, const Derivation & drv, BuildMode buildMode) +std::shared_ptr +Worker::makeDerivationBuildingGoal(const StorePath & drvPath, const Derivation & drv, BuildMode buildMode) { - return initGoalIfNeeded(derivationResolutionGoals[drvPath], drvPath, drv, *this, buildMode); -} - -std::shared_ptr Worker::makeDerivationBuildingGoal( - const StorePath & drvPath, const Derivation & drv, BuildMode buildMode, bool storeDerivation) -{ - return initGoalIfNeeded(derivationBuildingGoals[drvPath], drvPath, drv, *this, buildMode, storeDerivation); + return initGoalIfNeeded(derivationBuildingGoals[drvPath], drvPath, drv, *this, buildMode); } std::shared_ptr @@ -170,8 +158,6 @@ void Worker::removeGoal(GoalPtr goal) nix::removeGoal(drvGoal, derivationTrampolineGoals.map); else if (auto drvGoal = std::dynamic_pointer_cast(goal)) nix::removeGoal(drvGoal, derivationGoals); - else if (auto drvResolutionGoal = std::dynamic_pointer_cast(goal)) - nix::removeGoal(drvResolutionGoal, derivationResolutionGoals); else if (auto drvBuildingGoal = std::dynamic_pointer_cast(goal)) nix::removeGoal(drvBuildingGoal, derivationBuildingGoals); else if (auto subGoal = std::dynamic_pointer_cast(goal)) diff --git a/src/libstore/include/nix/store/build/derivation-building-goal.hh b/src/libstore/include/nix/store/build/derivation-building-goal.hh index ab063ff3f..edb496024 100644 --- a/src/libstore/include/nix/store/build/derivation-building-goal.hh +++ b/src/libstore/include/nix/store/build/derivation-building-goal.hh @@ -29,21 +29,8 @@ typedef enum { rpAccept, rpDecline, rpPostpone } HookReply; */ struct DerivationBuildingGoal : public Goal { - /** - * @param storeDerivation Whether to store the derivation in - * `worker.store`. This is useful for newly-resolved derivations. In this - * case, the derivation was not created a priori, e.g. purely (or close - * enough) from evaluation of the Nix language, but also depends on the - * exact content produced by upstream builds. It is strongly advised to - * have a permanent record of such a resolved derivation in order to - * faithfully reconstruct the build history. - */ DerivationBuildingGoal( - const StorePath & drvPath, - const Derivation & drv, - Worker & worker, - BuildMode buildMode = bmNormal, - bool storeDerivation = false); + const StorePath & drvPath, const Derivation & drv, Worker & worker, BuildMode buildMode = bmNormal); ~DerivationBuildingGoal(); private: @@ -113,7 +100,7 @@ private: /** * The states. */ - Co gaveUpOnSubstitution(bool storeDerivation); + Co gaveUpOnSubstitution(); Co tryToBuild(); /** @@ -168,7 +155,7 @@ private: JobCategory jobCategory() const override { - return JobCategory::Administration; + return JobCategory::Build; }; }; diff --git a/src/libstore/include/nix/store/build/derivation-goal.hh b/src/libstore/include/nix/store/build/derivation-goal.hh index 353e7c489..e05bf1c0b 100644 --- a/src/libstore/include/nix/store/build/derivation-goal.hh +++ b/src/libstore/include/nix/store/build/derivation-goal.hh @@ -40,16 +40,12 @@ struct DerivationGoal : public Goal */ OutputName wantedOutput; - /** - * @param storeDerivation See `DerivationBuildingGoal`. This is just passed along. - */ DerivationGoal( const StorePath & drvPath, const Derivation & drv, const OutputName & wantedOutput, Worker & worker, - BuildMode buildMode = bmNormal, - bool storeDerivation = false); + BuildMode buildMode = bmNormal); ~DerivationGoal() = default; void timedOut(Error && ex) override @@ -84,7 +80,7 @@ private: /** * The states. */ - Co haveDerivation(bool storeDerivation); + Co haveDerivation(); /** * Return `std::nullopt` if the output is unknown, e.g. un unbuilt diff --git a/src/libstore/include/nix/store/build/derivation-resolution-goal.hh b/src/libstore/include/nix/store/build/derivation-resolution-goal.hh deleted file mode 100644 index ebaab4f06..000000000 --- a/src/libstore/include/nix/store/build/derivation-resolution-goal.hh +++ /dev/null @@ -1,82 +0,0 @@ -#pragma once -///@file - -#include "nix/store/derivations.hh" -#include "nix/store/derivation-options.hh" -#include "nix/store/build/derivation-building-misc.hh" -#include "nix/store/store-api.hh" -#include "nix/store/build/goal.hh" - -namespace nix { - -struct BuilderFailureError; - -/** - * A goal for resolving a derivation. Resolving a derivation (@see - * `Derivation::tryResolve`) simplifies its inputs, replacing - * `inputDrvs` with `inputSrcs. - * - * Conceptually, we resolve all derivations. For input-addressed - * derivations (that don't transtively depend on content-addressed - * derivations), however, we don't actually use the resolved derivation, - * because the output paths would appear invalid (if we tried to verify - * them), since they are computed from the original, unresolved inputs. - * - * That said, if we ever made the new flavor of input-addressing as described - * in issue #9259, then the input-addressing would be based on the resolved - * inputs, and we like the CA case *would* use the output of this goal. - * - * (The point of this discussion is not to randomly stuff information on - * a yet-unimplemented feature (issue #9259) in the codebase, but - * rather, to illustrate that there is no inherent tension between - * explicit derivation resolution and input-addressing in general. That - * tension only exists with the type of input-addressing we've - * historically used.) - */ -struct DerivationResolutionGoal : public Goal -{ - DerivationResolutionGoal( - const StorePath & drvPath, const Derivation & drv, Worker & worker, BuildMode buildMode = bmNormal); - - /** - * If the derivation needed to be resolved, this is resulting - * resolved derivations and its path. - */ - std::unique_ptr> resolvedDrv; - - void timedOut(Error && ex) override; - -private: - - /** - * The path of the derivation. - */ - StorePath drvPath; - - /** - * The derivation stored at drvPath. - */ - std::unique_ptr drv; - - /** - * The remainder is state held during the build. - */ - - BuildMode buildMode; - - std::unique_ptr act; - - std::string key() override; - - /** - * The states. - */ - Co resolveDerivation(); - - JobCategory jobCategory() const override - { - return JobCategory::Administration; - }; -}; - -} // namespace nix diff --git a/src/libstore/include/nix/store/build/worker.hh b/src/libstore/include/nix/store/build/worker.hh index 9767590ac..a6de780c1 100644 --- a/src/libstore/include/nix/store/build/worker.hh +++ b/src/libstore/include/nix/store/build/worker.hh @@ -16,7 +16,6 @@ namespace nix { /* Forward definition. */ struct DerivationTrampolineGoal; struct DerivationGoal; -struct DerivationResolutionGoal; struct DerivationBuildingGoal; struct PathSubstitutionGoal; class DrvOutputSubstitutionGoal; @@ -112,7 +111,6 @@ private: DerivedPathMap>> derivationTrampolineGoals; std::map>> derivationGoals; - std::map> derivationResolutionGoals; std::map> derivationBuildingGoals; std::map> substitutionGoals; std::map> drvOutputSubstitutionGoals; @@ -223,23 +221,13 @@ public: const StorePath & drvPath, const Derivation & drv, const OutputName & wantedOutput, - BuildMode buildMode = bmNormal, - bool storeDerivation = false); + BuildMode buildMode = bmNormal); /** - * @ref DerivationResolutionGoal "derivation resolution goal" + * @ref DerivationBuildingGoal "derivation goal" */ - std::shared_ptr - makeDerivationResolutionGoal(const StorePath & drvPath, const Derivation & drv, BuildMode buildMode = bmNormal); - - /** - * @ref DerivationBuildingGoal "derivation building goal" - */ - std::shared_ptr makeDerivationBuildingGoal( - const StorePath & drvPath, - const Derivation & drv, - BuildMode buildMode = bmNormal, - bool storeDerivation = false); + std::shared_ptr + makeDerivationBuildingGoal(const StorePath & drvPath, const Derivation & drv, BuildMode buildMode = bmNormal); /** * @ref PathSubstitutionGoal "substitution goal" diff --git a/src/libstore/include/nix/store/meson.build b/src/libstore/include/nix/store/meson.build index 1f04e357a..c9e4c36dd 100644 --- a/src/libstore/include/nix/store/meson.build +++ b/src/libstore/include/nix/store/meson.build @@ -18,7 +18,6 @@ headers = [ config_pub_h ] + files( 'build/derivation-building-misc.hh', 'build/derivation-env-desugar.hh', 'build/derivation-goal.hh', - 'build/derivation-resolution-goal.hh', 'build/derivation-trampoline-goal.hh', 'build/drv-output-substitution-goal.hh', 'build/goal.hh', diff --git a/src/libstore/meson.build b/src/libstore/meson.build index e220e65cd..a3502c2e0 100644 --- a/src/libstore/meson.build +++ b/src/libstore/meson.build @@ -302,7 +302,6 @@ sources = files( 'build/derivation-check.cc', 'build/derivation-env-desugar.cc', 'build/derivation-goal.cc', - 'build/derivation-resolution-goal.cc', 'build/derivation-trampoline-goal.cc', 'build/drv-output-substitution-goal.cc', 'build/entry-points.cc', diff --git a/tests/functional/build.sh b/tests/functional/build.sh index c9a39438d..0a19ff7da 100755 --- a/tests/functional/build.sh +++ b/tests/functional/build.sh @@ -178,8 +178,7 @@ test "$(<<<"$out" grep -cE '^error:')" = 4 out="$(nix build -f fod-failing.nix -L x4 2>&1)" && status=0 || status=$? test "$status" = 1 -# Precise number of errors depends on daemon version / goal refactorings -(( "$(<<<"$out" grep -cE '^error:')" >= 2 )) +test "$(<<<"$out" grep -cE '^error:')" = 2 if isDaemonNewer "2.29pre"; then <<<"$out" grepQuiet -E "error: Cannot build '.*-x4\\.drv'" @@ -187,13 +186,11 @@ if isDaemonNewer "2.29pre"; then else <<<"$out" grepQuiet -E "error: 1 dependencies of derivation '.*-x4\\.drv' failed to build" fi -# Either x2 or x3 could have failed, x4 depends on both symmetrically -<<<"$out" grepQuiet -E "hash mismatch in fixed-output derivation '.*-x[23]\\.drv'" +<<<"$out" grepQuiet -E "hash mismatch in fixed-output derivation '.*-x2\\.drv'" out="$(nix build -f fod-failing.nix -L x4 --keep-going 2>&1)" && status=0 || status=$? test "$status" = 1 -# Precise number of errors depends on daemon version / goal refactorings -(( "$(<<<"$out" grep -cE '^error:')" >= 3 )) +test "$(<<<"$out" grep -cE '^error:')" = 3 if isDaemonNewer "2.29pre"; then <<<"$out" grepQuiet -E "error: Cannot build '.*-x4\\.drv'" <<<"$out" grepQuiet -E "Reason: 2 dependencies failed." diff --git a/tests/functional/ca/issue-13247.sh b/tests/functional/ca/issue-13247.sh index 705919513..686d90ced 100755 --- a/tests/functional/ca/issue-13247.sh +++ b/tests/functional/ca/issue-13247.sh @@ -65,4 +65,7 @@ buildViaSubstitute use-a-prime-more-outputs^first # Should only fetch the output we asked for [[ -d "$(jq -r <"$TEST_ROOT"/a.json '.[0].outputs.out')" ]] [[ -f "$(jq -r <"$TEST_ROOT"/a.json '.[2].outputs.first')" ]] -[[ ! -e "$(jq -r <"$TEST_ROOT"/a.json '.[2].outputs.second')" ]] + +# Output should *not* be here, this is the bug +[[ -e "$(jq -r <"$TEST_ROOT"/a.json '.[2].outputs.second')" ]] +skipTest "bug is not yet fixed" From 14b119c948476cc24e83bb08880eeab47ff92986 Mon Sep 17 00:00:00 2001 From: Taeer Bar-Yam Date: Sun, 5 Oct 2025 12:07:10 -0400 Subject: [PATCH 250/332] libexpr: fixup ExprOpHasAttr() to take allocator reference --- src/libexpr/include/nix/expr/nixexpr.hh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libexpr/include/nix/expr/nixexpr.hh b/src/libexpr/include/nix/expr/nixexpr.hh index b66dba4f3..863a1369d 100644 --- a/src/libexpr/include/nix/expr/nixexpr.hh +++ b/src/libexpr/include/nix/expr/nixexpr.hh @@ -350,7 +350,7 @@ struct ExprOpHasAttr : Expr Expr * e; std::span attrPath; - ExprOpHasAttr(std::pmr::polymorphic_allocator alloc, Expr * e, std::vector attrPath) + ExprOpHasAttr(std::pmr::polymorphic_allocator & alloc, Expr * e, std::vector attrPath) : e(e) , attrPath({alloc.allocate_object(attrPath.size()), attrPath.size()}) { From 6c0d67769d99800cbbc294abba722d9ba3b19fcc Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 6 Oct 2025 11:29:15 +0200 Subject: [PATCH 251/332] ExternalDerivationBuilder: Pass inputPaths --- src/libstore/include/nix/store/globals.hh | 45 +++---------------- .../unix/build/external-derivation-builder.cc | 6 +++ 2 files changed, 12 insertions(+), 39 deletions(-) diff --git a/src/libstore/include/nix/store/globals.hh b/src/libstore/include/nix/store/globals.hh index ae8990eab..f97b261f8 100644 --- a/src/libstore/include/nix/store/globals.hh +++ b/src/libstore/include/nix/store/globals.hh @@ -1401,48 +1401,15 @@ public: "builder": "/nix/store/s1qkj0ph…-bash-5.2p37/bin/bash", "env": { "HOME": "/homeless-shelter", - "NIX_BUILD_CORES": "14", - "NIX_BUILD_TOP": "/build", - "NIX_LOG_FD": "2", - "NIX_STORE": "/nix/store", - "PATH": "/path-not-set", - "PWD": "/build", - "TEMP": "/build", - "TEMPDIR": "/build", - "TERM": "xterm-256color", - "TMP": "/build", - "TMPDIR": "/build", - "__structuredAttrs": "", - "buildInputs": "", "builder": "/nix/store/s1qkj0ph…-bash-5.2p37/bin/bash", - "cmakeFlags": "", - "configureFlags": "", - "depsBuildBuild": "", - "depsBuildBuildPropagated": "", - "depsBuildTarget": "", - "depsBuildTargetPropagated": "", - "depsHostHost": "", - "depsHostHostPropagated": "", - "depsTargetTarget": "", - "depsTargetTargetPropagated": "", - "doCheck": "1", - "doInstallCheck": "1", - "mesonFlags": "", - "name": "hello-2.12.2", "nativeBuildInputs": "/nix/store/l31j72f1…-version-check-hook", - "out": "/nix/store/2yx2prgx…-hello-2.12.2", - "outputs": "out", - "patches": "", - "pname": "hello", - "postInstallCheck": "stat \"${!outputBin}/bin/hello\"\n", - "propagatedBuildInputs": "", - "propagatedNativeBuildInputs": "", - "src": "/nix/store/dw402azx…-hello-2.12.2.tar.gz", - "stdenv": "/nix/store/i8bw5nqg…-stdenv-linux", - "strictDeps": "", - "system": "aarch64-linux", - "version": "2.12.2" + … }, + "inputPaths": [ + "/nix/store/14dciax3mm8j70hjy4c0d68mds9ppx2s-glibc-2.32-54-dev", + "/nix/store/1azs5s8zc0z7m6sssvq1np0m7z873zml-gettext-0.21", + … + ], "realStoreDir": "/nix/store", "storeDir": "/nix/store", "system": "aarch64-linux", diff --git a/src/libstore/unix/build/external-derivation-builder.cc b/src/libstore/unix/build/external-derivation-builder.cc index 4d3eba6db..e30a92db7 100644 --- a/src/libstore/unix/build/external-derivation-builder.cc +++ b/src/libstore/unix/build/external-derivation-builder.cc @@ -68,6 +68,12 @@ struct ExternalDerivationBuilder : DerivationBuilderImpl json.emplace("storeDir", store.storeDir); json.emplace("realStoreDir", store.config->realStoreDir.get()); json.emplace("system", drv.platform); + { + auto l = nlohmann::json::array(); + for (auto & i : inputPaths) + l.push_back(store.printStorePath(i)); + json.emplace("inputPaths", std::move(l)); + } // TODO(cole-h): writing this to stdin is too much effort right now, if we want to revisit // that, see this comment by Eelco about how to make it not suck: From 68bd2e40f4629f760886e2934f1506c54c795415 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 6 Oct 2025 11:33:29 +0200 Subject: [PATCH 252/332] ExternalDerivationBuilder: Pass the (scratch) outputs --- src/libstore/include/nix/store/globals.hh | 8 ++++++-- src/libstore/unix/build/external-derivation-builder.cc | 6 ++++++ 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/src/libstore/include/nix/store/globals.hh b/src/libstore/include/nix/store/globals.hh index f97b261f8..385f8cd7a 100644 --- a/src/libstore/include/nix/store/globals.hh +++ b/src/libstore/include/nix/store/globals.hh @@ -1403,13 +1403,17 @@ public: "HOME": "/homeless-shelter", "builder": "/nix/store/s1qkj0ph…-bash-5.2p37/bin/bash", "nativeBuildInputs": "/nix/store/l31j72f1…-version-check-hook", + "out": "/nix/store/2yx2prgx…-hello-2.12.2" … }, "inputPaths": [ - "/nix/store/14dciax3mm8j70hjy4c0d68mds9ppx2s-glibc-2.32-54-dev", - "/nix/store/1azs5s8zc0z7m6sssvq1np0m7z873zml-gettext-0.21", + "/nix/store/14dciax3…-glibc-2.32-54-dev", + "/nix/store/1azs5s8z…-gettext-0.21", … ], + "outputs": { + "out": "/nix/store/2yx2prgx…-hello-2.12.2" + }, "realStoreDir": "/nix/store", "storeDir": "/nix/store", "system": "aarch64-linux", diff --git a/src/libstore/unix/build/external-derivation-builder.cc b/src/libstore/unix/build/external-derivation-builder.cc index e30a92db7..12ac77542 100644 --- a/src/libstore/unix/build/external-derivation-builder.cc +++ b/src/libstore/unix/build/external-derivation-builder.cc @@ -74,6 +74,12 @@ struct ExternalDerivationBuilder : DerivationBuilderImpl l.push_back(store.printStorePath(i)); json.emplace("inputPaths", std::move(l)); } + { + auto l = nlohmann::json::object(); + for (auto & i : scratchOutputs) + l.emplace(i.first, store.printStorePath(i.second)); + json.emplace("outputs", std::move(l)); + } // TODO(cole-h): writing this to stdin is too much effort right now, if we want to revisit // that, see this comment by Eelco about how to make it not suck: From e9c5d721d871d5c78c577c5c47edc87c5e1af476 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 6 Oct 2025 11:36:26 +0200 Subject: [PATCH 253/332] ExternalDerivationBuilder: Emit a version field --- src/libstore/include/nix/store/globals.hh | 3 ++- src/libstore/unix/build/external-derivation-builder.cc | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/src/libstore/include/nix/store/globals.hh b/src/libstore/include/nix/store/globals.hh index 385f8cd7a..1b59bd6fc 100644 --- a/src/libstore/include/nix/store/globals.hh +++ b/src/libstore/include/nix/store/globals.hh @@ -1419,7 +1419,8 @@ public: "system": "aarch64-linux", "tmpDir": "/private/tmp/nix-build-hello-2.12.2.drv-0/build", "tmpDirInSandbox": "/build", - "topTmpDir": "/private/tmp/nix-build-hello-2.12.2.drv-0" + "topTmpDir": "/private/tmp/nix-build-hello-2.12.2.drv-0", + "version": 1 } )", {}, // aliases diff --git a/src/libstore/unix/build/external-derivation-builder.cc b/src/libstore/unix/build/external-derivation-builder.cc index 12ac77542..71cfd1a62 100644 --- a/src/libstore/unix/build/external-derivation-builder.cc +++ b/src/libstore/unix/build/external-derivation-builder.cc @@ -49,6 +49,7 @@ struct ExternalDerivationBuilder : DerivationBuilderImpl auto json = nlohmann::json::object(); + json.emplace("version", 1); json.emplace("builder", drv.builder); { auto l = nlohmann::json::array(); From 8aa0acb9e8260c2713cabb8407a30ae54f6eebb5 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 6 Oct 2025 13:25:33 +0200 Subject: [PATCH 254/332] Don't build getPtsName() on Windows It's not needed. https://hydra.nixos.org/build/309215536 --- src/libutil/terminal.cc | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/libutil/terminal.cc b/src/libutil/terminal.cc index 656847487..fe22146ab 100644 --- a/src/libutil/terminal.cc +++ b/src/libutil/terminal.cc @@ -179,9 +179,10 @@ std::pair getWindowSize() return *windowSize.lock(); } +#ifndef _WIN32 std::string getPtsName(int fd) { -#ifdef __APPLE__ +# ifdef __APPLE__ static std::mutex ptsnameMutex; // macOS doesn't have ptsname_r, use mutex-protected ptsname std::lock_guard lock(ptsnameMutex); @@ -190,7 +191,7 @@ std::string getPtsName(int fd) throw SysError("getting pseudoterminal slave name"); } return name; -#else +# else // Use thread-safe ptsname_r on platforms that support it // PTY names are typically short: // - Linux: /dev/pts/N (where N is usually < 1000) @@ -201,7 +202,8 @@ std::string getPtsName(int fd) throw SysError("getting pseudoterminal slave name"); } return buf; -#endif +# endif } +#endif } // namespace nix From 9f6ed7042986693eb76f338697ec446d1c69c88c Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 6 Oct 2025 16:04:58 +0200 Subject: [PATCH 255/332] release notes: 2.32.0 --- doc/manual/rl-next/c-api-byidx.md | 7 -- doc/manual/rl-next/c-api-lazy-accessors.md | 16 --- .../rl-next/cached-substituted-inputs.md | 10 -- doc/manual/rl-next/derivation-json.md | 15 --- doc/manual/rl-next/dropped-compat.md | 6 - doc/manual/rl-next/faster-nix-flake-check.md | 9 -- .../rl-next/http-binary-cache-compression.md | 19 --- doc/manual/rl-next/shorter-build-dir-names.md | 6 - doc/manual/source/SUMMARY.md.in | 1 + doc/manual/source/release-notes/rl-2.32.md | 112 ++++++++++++++++++ 10 files changed, 113 insertions(+), 88 deletions(-) delete mode 100644 doc/manual/rl-next/c-api-byidx.md delete mode 100644 doc/manual/rl-next/c-api-lazy-accessors.md delete mode 100644 doc/manual/rl-next/cached-substituted-inputs.md delete mode 100644 doc/manual/rl-next/derivation-json.md delete mode 100644 doc/manual/rl-next/dropped-compat.md delete mode 100644 doc/manual/rl-next/faster-nix-flake-check.md delete mode 100644 doc/manual/rl-next/http-binary-cache-compression.md delete mode 100644 doc/manual/rl-next/shorter-build-dir-names.md create mode 100644 doc/manual/source/release-notes/rl-2.32.md diff --git a/doc/manual/rl-next/c-api-byidx.md b/doc/manual/rl-next/c-api-byidx.md deleted file mode 100644 index 9b5bb3fcb..000000000 --- a/doc/manual/rl-next/c-api-byidx.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -synopsis: "C API: `nix_get_attr_name_byidx`, `nix_get_attr_byidx` take a `nix_value *` instead of `const nix_value *`" -prs: [13987] ---- - -In order to accommodate a more optimized internal representation of attribute set merges these functions require -a mutable `nix_value *` that might be modified on access. This does *not* break the ABI of these functions. diff --git a/doc/manual/rl-next/c-api-lazy-accessors.md b/doc/manual/rl-next/c-api-lazy-accessors.md deleted file mode 100644 index bd0604f0d..000000000 --- a/doc/manual/rl-next/c-api-lazy-accessors.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -synopsis: "C API: Add lazy attribute and list item accessors" -prs: [14030] ---- - -The C API now includes lazy accessor functions for retrieving values from lists and attribute sets without forcing evaluation: - -- `nix_get_list_byidx_lazy()` - Get a list element without forcing its evaluation -- `nix_get_attr_byname_lazy()` - Get an attribute value by name without forcing evaluation -- `nix_get_attr_byidx_lazy()` - Get an attribute by index without forcing evaluation - -These functions are useful when forwarding unevaluated sub-values to other lists, attribute sets, or function calls. They allow more efficient handling of Nix values by deferring evaluation until actually needed. - -Additionally, bounds checking has been improved for all `_byidx` functions to properly validate indices before access, preventing potential out-of-bounds errors. - -The documentation for `NIX_ERR_KEY` error handling has also been clarified to specify when this error code is returned. \ No newline at end of file diff --git a/doc/manual/rl-next/cached-substituted-inputs.md b/doc/manual/rl-next/cached-substituted-inputs.md deleted file mode 100644 index b0b53a213..000000000 --- a/doc/manual/rl-next/cached-substituted-inputs.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -synopsis: "Substituted flake inputs are no longer re-copied to the store" -prs: [14041] ---- - -Since 2.25, Nix would fail to store a cache entry for substituted flake inputs, -which in turn would cause them to be re-copied to the store on initial -evaluation. Caching these inputs results in a near doubling of a performance in -some cases — especially on I/O-bound machines and when using commands that -fetch many inputs, like `nix flake archive/prefetch-inputs` diff --git a/doc/manual/rl-next/derivation-json.md b/doc/manual/rl-next/derivation-json.md deleted file mode 100644 index be7ab1cfe..000000000 --- a/doc/manual/rl-next/derivation-json.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -synopsis: Derivation JSON format now uses store path basenames (no store dir) only -prs: [13980] -issues: [13570] ---- - -Experience with many JSON frameworks (e.g. nlohmann/json in C++, Serde in Rust, and Aeson in Haskell), has shown that the use of the store dir in JSON formats is an impediment to systematic JSON formats, -because it requires the serializer/deserializer to take an extra paramater (the store dir). - -We ultimately want to rectify this issue with all (non-stable, able to be changed) JSON formats. -To start with, we are changing the JSON format for derivations because the `nix derivation` commands are ---- in addition to being formally unstable ---- less widely used than other unstable commands. - -See the documentation on the [JSON format for derivations](@docroot@/protocols/json/derivation.md) for further details. diff --git a/doc/manual/rl-next/dropped-compat.md b/doc/manual/rl-next/dropped-compat.md deleted file mode 100644 index d6cc7704a..000000000 --- a/doc/manual/rl-next/dropped-compat.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -synopsis: "Removed support for daemons and clients older than Nix 2.0" -prs: [13951] ---- - -We have dropped support in the daemon worker protocol for daemons and clients that don't speak at least version 18 of the protocol. This first Nix release that supports this version is Nix 2.0, released in February 2018. diff --git a/doc/manual/rl-next/faster-nix-flake-check.md b/doc/manual/rl-next/faster-nix-flake-check.md deleted file mode 100644 index c195023c3..000000000 --- a/doc/manual/rl-next/faster-nix-flake-check.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -synopsis: "`nix flake check` now skips derivations that can be substituted" -prs: [13574] ---- - -Previously, `nix flake check` would evaluate and build/substitute all -derivations. Now, it will skip downloading derivations that can be substituted. -This can drastically decrease the time invocations take in environments where -checks may already be cached (like in CI). diff --git a/doc/manual/rl-next/http-binary-cache-compression.md b/doc/manual/rl-next/http-binary-cache-compression.md deleted file mode 100644 index 88f1de6d9..000000000 --- a/doc/manual/rl-next/http-binary-cache-compression.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -synopsis: "HTTP binary caches now support transparent compression for metadata" -prs: [] ---- - -HTTP binary cache stores can now compress `.narinfo`, `.ls`, and build log files before uploading them, -reducing bandwidth usage and storage requirements. The compression is applied transparently using the -`Content-Encoding` header, allowing compatible clients to automatically decompress the files. - -Three new configuration options control this behavior: -- `narinfo-compression`: Compression method for `.narinfo` files -- `ls-compression`: Compression method for `.ls` files -- `log-compression`: Compression method for build logs in `log/` directory - -Example usage: -``` -nix copy --to 'http://cache.example.com?narinfo-compression=gzip&ls-compression=gzip' /nix/store/... -nix store copy-log --to 'http://cache.example.com?log-compression=br' /nix/store/... -``` diff --git a/doc/manual/rl-next/shorter-build-dir-names.md b/doc/manual/rl-next/shorter-build-dir-names.md deleted file mode 100644 index e87fa5d04..000000000 --- a/doc/manual/rl-next/shorter-build-dir-names.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -synopsis: "Temporary build directories no longer include derivation names" -prs: [13839] ---- - -Temporary build directories created during derivation builds no longer include the derivation name in their path to avoid build failures when the derivation name is too long. This change ensures predictable prefix lengths for build directories under `/nix/var/nix/builds`. \ No newline at end of file diff --git a/doc/manual/source/SUMMARY.md.in b/doc/manual/source/SUMMARY.md.in index 8fed98c2c..25e68811d 100644 --- a/doc/manual/source/SUMMARY.md.in +++ b/doc/manual/source/SUMMARY.md.in @@ -138,6 +138,7 @@ - [Contributing](development/contributing.md) - [Releases](release-notes/index.md) {{#include ./SUMMARY-rl-next.md}} + - [Release 2.32 (2025-10-06)](release-notes/rl-2.32.md) - [Release 2.31 (2025-08-21)](release-notes/rl-2.31.md) - [Release 2.30 (2025-07-07)](release-notes/rl-2.30.md) - [Release 2.29 (2025-05-14)](release-notes/rl-2.29.md) diff --git a/doc/manual/source/release-notes/rl-2.32.md b/doc/manual/source/release-notes/rl-2.32.md new file mode 100644 index 000000000..5c1c314db --- /dev/null +++ b/doc/manual/source/release-notes/rl-2.32.md @@ -0,0 +1,112 @@ +# Release 2.32.0 (2025-10-06) + +- C API: `nix_get_attr_name_byidx`, `nix_get_attr_byidx` take a `nix_value *` instead of `const nix_value *` [#13987](https://github.com/NixOS/nix/pull/13987) + + In order to accommodate a more optimized internal representation of attribute set merges these functions require + a mutable `nix_value *` that might be modified on access. This does *not* break the ABI of these functions. + +- C API: Add lazy attribute and list item accessors [#14030](https://github.com/NixOS/nix/pull/14030) + + The C API now includes lazy accessor functions for retrieving values from lists and attribute sets without forcing evaluation: + + - `nix_get_list_byidx_lazy()` - Get a list element without forcing its evaluation + - `nix_get_attr_byname_lazy()` - Get an attribute value by name without forcing evaluation + - `nix_get_attr_byidx_lazy()` - Get an attribute by index without forcing evaluation + + These functions are useful when forwarding unevaluated sub-values to other lists, attribute sets, or function calls. They allow more efficient handling of Nix values by deferring evaluation until actually needed. + + Additionally, bounds checking has been improved for all `_byidx` functions to properly validate indices before access, preventing potential out-of-bounds errors. + + The documentation for `NIX_ERR_KEY` error handling has also been clarified to specify when this error code is returned. + +- Substituted flake inputs are no longer re-copied to the store [#14041](https://github.com/NixOS/nix/pull/14041) + + Since 2.25, Nix would fail to store a cache entry for substituted flake inputs, + which in turn would cause them to be re-copied to the store on initial + evaluation. Caching these inputs results in a near doubling of a performance in + some cases — especially on I/O-bound machines and when using commands that + fetch many inputs, like `nix flake archive/prefetch-inputs` + +- Derivation JSON format now uses store path basenames (no store dir) only [#13570](https://github.com/NixOS/nix/issues/13570) [#13980](https://github.com/NixOS/nix/pull/13980) + + Experience with many JSON frameworks (e.g. nlohmann/json in C++, Serde in Rust, and Aeson in Haskell), has shown that the use of the store dir in JSON formats is an impediment to systematic JSON formats, + because it requires the serializer/deserializer to take an extra paramater (the store dir). + + We ultimately want to rectify this issue with all (non-stable, able to be changed) JSON formats. + To start with, we are changing the JSON format for derivations because the `nix derivation` commands are + --- in addition to being formally unstable + --- less widely used than other unstable commands. + + See the documentation on the [JSON format for derivations](@docroot@/protocols/json/derivation.md) for further details. + +- Removed support for daemons and clients older than Nix 2.0 [#13951](https://github.com/NixOS/nix/pull/13951) + + We have dropped support in the daemon worker protocol for daemons and clients that don't speak at least version 18 of the protocol. This first Nix release that supports this version is Nix 2.0, released in February 2018. + +- `nix flake check` now skips derivations that can be substituted [#13574](https://github.com/NixOS/nix/pull/13574) + + Previously, `nix flake check` would evaluate and build/substitute all + derivations. Now, it will skip downloading derivations that can be substituted. + This can drastically decrease the time invocations take in environments where + checks may already be cached (like in CI). + +- HTTP binary caches now support transparent compression for metadata + + HTTP binary cache stores can now compress `.narinfo`, `.ls`, and build log files before uploading them, + reducing bandwidth usage and storage requirements. The compression is applied transparently using the + `Content-Encoding` header, allowing compatible clients to automatically decompress the files. + + Three new configuration options control this behavior: + - `narinfo-compression`: Compression method for `.narinfo` files + - `ls-compression`: Compression method for `.ls` files + - `log-compression`: Compression method for build logs in `log/` directory + + Example usage: + ``` + nix copy --to 'http://cache.example.com?narinfo-compression=gzip&ls-compression=gzip' /nix/store/... + nix store copy-log --to 'http://cache.example.com?log-compression=br' /nix/store/... + ``` + +- Temporary build directories no longer include derivation names [#13839](https://github.com/NixOS/nix/pull/13839) + + Temporary build directories created during derivation builds no longer include the derivation name in their path to avoid build failures when the derivation name is too long. This change ensures predictable prefix lengths for build directories under `/nix/var/nix/builds`. + + +## Contributors + + +This release was made possible by the following 32 contributors: + +- Farid Zakaria [**(@fzakaria)**](https://github.com/fzakaria) +- dram [**(@dramforever)**](https://github.com/dramforever) +- Ephraim Siegfried [**(@EphraimSiegfried)**](https://github.com/EphraimSiegfried) +- Robert Hensing [**(@roberth)**](https://github.com/roberth) +- Taeer Bar-Yam [**(@Radvendii)**](https://github.com/Radvendii) +- Emily [**(@emilazy)**](https://github.com/emilazy) +- Jens Petersen [**(@juhp)**](https://github.com/juhp) +- Bernardo Meurer [**(@lovesegfault)**](https://github.com/lovesegfault) +- Jörg Thalheim [**(@Mic92)**](https://github.com/Mic92) +- Leandro Emmanuel Reina Kiperman [**(@kip93)**](https://github.com/kip93) +- Marie [**(@NyCodeGHG)**](https://github.com/NyCodeGHG) +- Ethan Evans [**(@ethanavatar)**](https://github.com/ethanavatar) +- Yaroslav Bolyukin [**(@CertainLach)**](https://github.com/CertainLach) +- Matej Urbas [**(@urbas)**](https://github.com/urbas) +- Jami Kettunen [**(@JamiKettunen)**](https://github.com/JamiKettunen) +- Clayton [**(@netadr)**](https://github.com/netadr) +- Grégory Marti [**(@gmarti)**](https://github.com/gmarti) +- Eelco Dolstra [**(@edolstra)**](https://github.com/edolstra) +- rszyma [**(@rszyma)**](https://github.com/rszyma) +- Philip Wilk [**(@philipwilk)**](https://github.com/philipwilk) +- John Ericson [**(@Ericson2314)**](https://github.com/Ericson2314) +- Tom Westerhout [**(@twesterhout)**](https://github.com/twesterhout) +- Tristan Ross [**(@RossComputerGuy)**](https://github.com/RossComputerGuy) +- Sergei Zimmerman [**(@xokdvium)**](https://github.com/xokdvium) +- Jean-François Roche [**(@jfroche)**](https://github.com/jfroche) +- Seth Flynn [**(@getchoo)**](https://github.com/getchoo) +- éclairevoyant [**(@eclairevoyant)**](https://github.com/eclairevoyant) +- Glen Huang [**(@hgl)**](https://github.com/hgl) +- osman - オスマン [**(@osbm)**](https://github.com/osbm) +- David McFarland [**(@corngood)**](https://github.com/corngood) +- Cole Helbling [**(@cole-h)**](https://github.com/cole-h) +- Sinan Mohd [**(@sinanmohd)**](https://github.com/sinanmohd) +- Philipp Otterbein From c1761b867b5ba1df81c5c2e87a05131bca9ce459 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 6 Oct 2025 16:11:15 +0200 Subject: [PATCH 256/332] Contributors --- .../data/release-credits-email-to-handle.json | 23 ++++++++++++++++++- .../data/release-credits-handle-to-name.json | 21 ++++++++++++++++- 2 files changed, 42 insertions(+), 2 deletions(-) diff --git a/maintainers/data/release-credits-email-to-handle.json b/maintainers/data/release-credits-email-to-handle.json index ea37afb90..0dbbf8fa6 100644 --- a/maintainers/data/release-credits-email-to-handle.json +++ b/maintainers/data/release-credits-email-to-handle.json @@ -203,5 +203,26 @@ "ConnorBaker01@Gmail.com": "ConnorBaker", "jsoo1@asu.edu": "jsoo1", "hsngrmpf+github@gmail.com": "DavHau", - "matthew@floxdev.com": "mkenigs" + "matthew@floxdev.com": "mkenigs", + "taeer@bar-yam.me": "Radvendii", + "beme@anthropic.com": "lovesegfault", + "osbm@osbm.dev": "osbm", + "jami.kettunen@protonmail.com": "JamiKettunen", + "ephraim.siegfried@hotmail.com": "EphraimSiegfried", + "rszyma.dev@gmail.com": "rszyma", + "tristan.ross@determinate.systems": "RossComputerGuy", + "corngood@gmail.com": "corngood", + "jfroche@pyxel.be": "jfroche", + "848000+eclairevoyant@users.noreply.github.com": "eclairevoyant", + "petersen@redhat.com": "juhp", + "dramforever@live.com": "dramforever", + "me@glenhuang.com": "hgl", + "philip.wilk@fivium.co.uk": "philipwilk", + "me@nycode.dev": "NyCodeGHG", + "14264576+twesterhout@users.noreply.github.com": "twesterhout", + "sinan@sinanmohd.com": "sinanmohd", + "42688647+netadr@users.noreply.github.com": "netadr", + "matej.urbas@gmail.com": "urbas", + "ethanalexevans@gmail.com": "ethanavatar", + "greg.marti@gmail.com": "gmarti" } \ No newline at end of file diff --git a/maintainers/data/release-credits-handle-to-name.json b/maintainers/data/release-credits-handle-to-name.json index e2510548d..8abffc65c 100644 --- a/maintainers/data/release-credits-handle-to-name.json +++ b/maintainers/data/release-credits-handle-to-name.json @@ -177,5 +177,24 @@ "avnik": "Alexander V. Nikolaev", "DavHau": null, "aln730": "AGawas", - "vog": "Volker Diels-Grabsch" + "vog": "Volker Diels-Grabsch", + "corngood": "David McFarland", + "twesterhout": "Tom Westerhout", + "JamiKettunen": "Jami Kettunen", + "dramforever": "dram", + "philipwilk": "Philip Wilk", + "netadr": "Clayton", + "NyCodeGHG": "Marie", + "jfroche": "Jean-Fran\u00e7ois Roche", + "urbas": "Matej Urbas", + "osbm": "osman - \u30aa\u30b9\u30de\u30f3", + "rszyma": null, + "eclairevoyant": "\u00e9clairevoyant", + "Radvendii": "Taeer Bar-Yam", + "sinanmohd": "Sinan Mohd", + "ethanavatar": "Ethan Evans", + "gmarti": "Gr\u00e9gory Marti", + "lovesegfault": "Bernardo Meurer", + "EphraimSiegfried": "Ephraim Siegfried", + "hgl": "Glen Huang" } \ No newline at end of file From 0376112a512b7fb8d283e613d6ed6419e741c189 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 6 Oct 2025 16:11:24 +0200 Subject: [PATCH 257/332] Organize release notes --- doc/manual/source/release-notes/rl-2.32.md | 60 ++++++++++------------ 1 file changed, 28 insertions(+), 32 deletions(-) diff --git a/doc/manual/source/release-notes/rl-2.32.md b/doc/manual/source/release-notes/rl-2.32.md index 5c1c314db..885e86631 100644 --- a/doc/manual/source/release-notes/rl-2.32.md +++ b/doc/manual/source/release-notes/rl-2.32.md @@ -1,10 +1,26 @@ # Release 2.32.0 (2025-10-06) +## Incompatible changes + +- Removed support for daemons and clients older than Nix 2.0 [#13951](https://github.com/NixOS/nix/pull/13951) + + We have dropped support in the daemon worker protocol for daemons and clients that don't speak at least version 18 of the protocol. This first Nix release that supports this version is Nix 2.0, released in February 2018. + +- Derivation JSON format now uses store path basenames only [#13570](https://github.com/NixOS/nix/issues/13570) [#13980](https://github.com/NixOS/nix/pull/13980) + + Experience with many JSON frameworks (e.g. nlohmann/json in C++, Serde in Rust, and Aeson in Haskell) has shown that the use of the store directory in JSON formats is an impediment to systematic JSON formats, because it requires the serializer/deserializer to take an extra paramater (the store directory). + + We ultimately want to rectify this issue with all JSON formats to the extent allowed by our stability promises. To start with, we are changing the JSON format for derivations because the `nix derivation` commands are — in addition to being formally unstable — less widely used than other unstable commands. + + See the documentation on the [JSON format for derivations](@docroot@/protocols/json/derivation.md) for further details. + - C API: `nix_get_attr_name_byidx`, `nix_get_attr_byidx` take a `nix_value *` instead of `const nix_value *` [#13987](https://github.com/NixOS/nix/pull/13987) In order to accommodate a more optimized internal representation of attribute set merges these functions require a mutable `nix_value *` that might be modified on access. This does *not* break the ABI of these functions. +## New features + - C API: Add lazy attribute and list item accessors [#14030](https://github.com/NixOS/nix/pull/14030) The C API now includes lazy accessor functions for retrieving values from lists and attribute sets without forcing evaluation: @@ -19,37 +35,6 @@ The documentation for `NIX_ERR_KEY` error handling has also been clarified to specify when this error code is returned. -- Substituted flake inputs are no longer re-copied to the store [#14041](https://github.com/NixOS/nix/pull/14041) - - Since 2.25, Nix would fail to store a cache entry for substituted flake inputs, - which in turn would cause them to be re-copied to the store on initial - evaluation. Caching these inputs results in a near doubling of a performance in - some cases — especially on I/O-bound machines and when using commands that - fetch many inputs, like `nix flake archive/prefetch-inputs` - -- Derivation JSON format now uses store path basenames (no store dir) only [#13570](https://github.com/NixOS/nix/issues/13570) [#13980](https://github.com/NixOS/nix/pull/13980) - - Experience with many JSON frameworks (e.g. nlohmann/json in C++, Serde in Rust, and Aeson in Haskell), has shown that the use of the store dir in JSON formats is an impediment to systematic JSON formats, - because it requires the serializer/deserializer to take an extra paramater (the store dir). - - We ultimately want to rectify this issue with all (non-stable, able to be changed) JSON formats. - To start with, we are changing the JSON format for derivations because the `nix derivation` commands are - --- in addition to being formally unstable - --- less widely used than other unstable commands. - - See the documentation on the [JSON format for derivations](@docroot@/protocols/json/derivation.md) for further details. - -- Removed support for daemons and clients older than Nix 2.0 [#13951](https://github.com/NixOS/nix/pull/13951) - - We have dropped support in the daemon worker protocol for daemons and clients that don't speak at least version 18 of the protocol. This first Nix release that supports this version is Nix 2.0, released in February 2018. - -- `nix flake check` now skips derivations that can be substituted [#13574](https://github.com/NixOS/nix/pull/13574) - - Previously, `nix flake check` would evaluate and build/substitute all - derivations. Now, it will skip downloading derivations that can be substituted. - This can drastically decrease the time invocations take in environments where - checks may already be cached (like in CI). - - HTTP binary caches now support transparent compression for metadata HTTP binary cache stores can now compress `.narinfo`, `.ls`, and build log files before uploading them, @@ -71,10 +56,21 @@ Temporary build directories created during derivation builds no longer include the derivation name in their path to avoid build failures when the derivation name is too long. This change ensures predictable prefix lengths for build directories under `/nix/var/nix/builds`. +## Performance improvements + +- Substituted flake inputs are no longer re-copied to the store [#14041](https://github.com/NixOS/nix/pull/14041) + + Since 2.25, Nix would fail to store a cache entry for substituted flake inputs, which in turn would cause them to be re-copied to the store on initial evaluation. Caching these inputs results in a near doubling of performance in some cases — especially on I/O-bound machines and when using commands that fetch many inputs, like `nix flake [archive|prefetch-inputs]`. + +- `nix flake check` now skips derivations that can be substituted [#13574](https://github.com/NixOS/nix/pull/13574) + + Previously, `nix flake check` would evaluate and build/substitute all + derivations. Now, it will skip downloading derivations that can be substituted. + This can drastically decrease the time invocations take in environments where + checks may already be cached (like in CI). ## Contributors - This release was made possible by the following 32 contributors: - Farid Zakaria [**(@fzakaria)**](https://github.com/fzakaria) From f4e44040d4c92d4ca87601c437922962dffae548 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 6 Oct 2025 16:26:29 +0200 Subject: [PATCH 258/332] Release note for external derivation builders --- doc/manual/source/release-notes/rl-2.32.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/doc/manual/source/release-notes/rl-2.32.md b/doc/manual/source/release-notes/rl-2.32.md index 885e86631..c2f0eb27f 100644 --- a/doc/manual/source/release-notes/rl-2.32.md +++ b/doc/manual/source/release-notes/rl-2.32.md @@ -56,6 +56,12 @@ Temporary build directories created during derivation builds no longer include the derivation name in their path to avoid build failures when the derivation name is too long. This change ensures predictable prefix lengths for build directories under `/nix/var/nix/builds`. +- External derivation builders [#14145](https://github.com/NixOS/nix/pull/14145) + + These are helper programs that Nix calls to perform derivations for specified system types, e.g. by using QEMU to emulate a different type of platform. For more information, see the [`external-builders` setting](../command-ref/conf-file.md#conf-external-builders). + + This is currently an experimental feature. + ## Performance improvements - Substituted flake inputs are no longer re-copied to the store [#14041](https://github.com/NixOS/nix/pull/14041) From 7f22a40e3b515d0a99233a1eb36ef8191628629f Mon Sep 17 00:00:00 2001 From: Bernardo Meurer Costa Date: Fri, 3 Oct 2025 15:58:47 +0000 Subject: [PATCH 259/332] build(libstore): assert withAWS xor withCurlS3 --- src/libstore/package.nix | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/libstore/package.nix b/src/libstore/package.nix index 1c08e466e..0eb8e3687 100644 --- a/src/libstore/package.nix +++ b/src/libstore/package.nix @@ -34,6 +34,9 @@ let inherit (lib) fileset; in +assert lib.assertMsg (!withAWS || !withCurlS3) + "withAWS and withCurlS3 are mutually exclusive - cannot enable both S3 implementations simultaneously"; + mkMesonLibrary (finalAttrs: { pname = "nix-store"; inherit version; From 8c28283876799be6ef21a228e8c6d8168118ed86 Mon Sep 17 00:00:00 2001 From: Bernardo Meurer Costa Date: Fri, 3 Oct 2025 15:48:24 +0000 Subject: [PATCH 260/332] ci: test without s3 and with curl-based-s3 --- .github/workflows/ci.yml | 57 ++++++++++++++++++++++++++++++++----- ci/gha/tests/default.nix | 8 ++++++ ci/gha/tests/wrapper.nix | 3 ++ ci/gha/vm-tests/wrapper.nix | 45 +++++++++++++++++++++++++++++ 4 files changed, 106 insertions(+), 7 deletions(-) create mode 100644 ci/gha/vm-tests/wrapper.nix diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index dcf0814d8..00a808951 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -65,18 +65,42 @@ jobs: instrumented: false primary: true stdenv: stdenv + withAWS: true + withCurlS3: false + # TODO: remove once curl-based-s3 fully lands + - scenario: on ubuntu (no s3) + runs-on: ubuntu-24.04 + os: linux + instrumented: false + primary: false + stdenv: stdenv + withAWS: false + withCurlS3: false + # TODO: remove once curl-based-s3 fully lands + - scenario: on ubuntu (curl s3) + runs-on: ubuntu-24.04 + os: linux + instrumented: false + primary: false + stdenv: stdenv + withAWS: false + withCurlS3: true - scenario: on macos runs-on: macos-14 os: darwin instrumented: false primary: true stdenv: stdenv + withAWS: true + withCurlS3: false - scenario: on ubuntu (with sanitizers / coverage) runs-on: ubuntu-24.04 os: linux instrumented: true primary: false stdenv: clangStdenv + withAWS: true + withCurlS3: false name: tests ${{ matrix.scenario }} runs-on: ${{ matrix.runs-on }} timeout-minutes: 60 @@ -99,7 +123,9 @@ jobs: run: | nix build --file ci/gha/tests/wrapper.nix componentTests -L \ --arg withInstrumentation ${{ matrix.instrumented }} \ - --argstr stdenv "${{ matrix.stdenv }}" + --argstr stdenv "${{ matrix.stdenv }}" \ + ${{ format('--arg withAWS {0}', matrix.withAWS) }} \ + ${{ format('--arg withCurlS3 {0}', matrix.withCurlS3) }} - name: Run flake checks and prepare the installer tarball run: | ci/gha/tests/build-checks @@ -110,6 +136,8 @@ jobs: nix build --file ci/gha/tests/wrapper.nix codeCoverage.coverageReports -L \ --arg withInstrumentation ${{ matrix.instrumented }} \ --argstr stdenv "${{ matrix.stdenv }}" \ + ${{ format('--arg withAWS {0}', matrix.withAWS) }} \ + ${{ format('--arg withCurlS3 {0}', matrix.withCurlS3) }} \ --out-link coverage-reports cat coverage-reports/index.txt >> $GITHUB_STEP_SUMMARY if: ${{ matrix.instrumented }} @@ -240,6 +268,18 @@ jobs: vm_tests: needs: basic-checks + strategy: + fail-fast: false + matrix: + include: + # TODO: remove once curl-based-s3 fully lands + - scenario: legacy s3 + withAWS: true + withCurlS3: false + - scenario: curl s3 + withAWS: false + withCurlS3: true + name: vm_tests (${{ matrix.scenario }}) runs-on: ubuntu-24.04 steps: - uses: actions/checkout@v5 @@ -250,13 +290,16 @@ jobs: experimental-features = nix-command flakes github_token: ${{ secrets.GITHUB_TOKEN }} - uses: DeterminateSystems/magic-nix-cache-action@main - - run: | + - name: Build VM tests + run: | nix build -L \ - .#hydraJobs.tests.functional_user \ - .#hydraJobs.tests.githubFlakes \ - .#hydraJobs.tests.nix-docker \ - .#hydraJobs.tests.tarballFlakes \ - ; + --file ci/gha/vm-tests/wrapper.nix \ + ${{ format('--arg withAWS {0}', matrix.withAWS) }} \ + ${{ format('--arg withCurlS3 {0}', matrix.withCurlS3) }} \ + functional_user \ + githubFlakes \ + nix-docker \ + tarballFlakes flake_regressions: needs: vm_tests diff --git a/ci/gha/tests/default.nix b/ci/gha/tests/default.nix index b89d51c76..bbcd7e6b7 100644 --- a/ci/gha/tests/default.nix +++ b/ci/gha/tests/default.nix @@ -12,6 +12,8 @@ componentTestsPrefix ? "", withSanitizers ? false, withCoverage ? false, + withAWS ? null, + withCurlS3 ? null, ... }: @@ -65,6 +67,12 @@ rec { # Boehm is incompatible with ASAN. nix-expr = prev.nix-expr.override { enableGC = !withSanitizers; }; + # Override AWS configuration if specified + nix-store = prev.nix-store.override ( + lib.optionalAttrs (withAWS != null) { inherit withAWS; } + // lib.optionalAttrs (withCurlS3 != null) { inherit withCurlS3; } + ); + mesonComponentOverrides = lib.composeManyExtensions componentOverrides; # Unclear how to make Perl bindings work with a dynamically linked ASAN. nix-perl-bindings = if withSanitizers then null else prev.nix-perl-bindings; diff --git a/ci/gha/tests/wrapper.nix b/ci/gha/tests/wrapper.nix index dc280ebbb..c1655f8c0 100644 --- a/ci/gha/tests/wrapper.nix +++ b/ci/gha/tests/wrapper.nix @@ -5,6 +5,8 @@ stdenv ? "stdenv", componentTestsPrefix ? "", withInstrumentation ? false, + withAWS ? null, + withCurlS3 ? null, }@args: import ./. ( args @@ -12,5 +14,6 @@ import ./. ( getStdenv = p: p.${stdenv}; withSanitizers = withInstrumentation; withCoverage = withInstrumentation; + inherit withAWS withCurlS3; } ) diff --git a/ci/gha/vm-tests/wrapper.nix b/ci/gha/vm-tests/wrapper.nix new file mode 100644 index 000000000..2ca80974c --- /dev/null +++ b/ci/gha/vm-tests/wrapper.nix @@ -0,0 +1,45 @@ +{ + nixFlake ? builtins.getFlake ("git+file://" + toString ../../..), + system ? "x86_64-linux", + withAWS ? null, + withCurlS3 ? null, +}: + +let + pkgs = nixFlake.inputs.nixpkgs.legacyPackages.${system}; + lib = pkgs.lib; + + # Create base nixComponents using the flake's makeComponents + baseNixComponents = nixFlake.lib.makeComponents { + inherit pkgs; + }; + + # Override nixComponents if AWS parameters are specified + nixComponents = + if (withAWS == null && withCurlS3 == null) then + baseNixComponents + else + baseNixComponents.overrideScope ( + final: prev: { + nix-store = prev.nix-store.override ( + lib.optionalAttrs (withAWS != null) { inherit withAWS; } + // lib.optionalAttrs (withCurlS3 != null) { inherit withCurlS3; } + ); + } + ); + + # Import NixOS tests with the overridden nixComponents + tests = import ../../../tests/nixos { + inherit lib pkgs nixComponents; + nixpkgs = nixFlake.inputs.nixpkgs; + inherit (nixFlake.inputs) nixpkgs-23-11; + }; +in +{ + inherit (tests) + functional_user + githubFlakes + nix-docker + tarballFlakes + ; +} From 776038f842d5b4844f9f3411a698733b1d1c0547 Mon Sep 17 00:00:00 2001 From: Bernardo Meurer Costa Date: Mon, 6 Oct 2025 17:09:34 +0000 Subject: [PATCH 261/332] docs(release-notes): note fix for fetchTarball/fetchurl substitution --- doc/manual/source/release-notes/rl-2.32.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/doc/manual/source/release-notes/rl-2.32.md b/doc/manual/source/release-notes/rl-2.32.md index c2f0eb27f..04f06e6b1 100644 --- a/doc/manual/source/release-notes/rl-2.32.md +++ b/doc/manual/source/release-notes/rl-2.32.md @@ -75,6 +75,10 @@ This can drastically decrease the time invocations take in environments where checks may already be cached (like in CI). +- `fetchTarball` and `fetchurl` now correctly substitute (#14138) + + At some point we stopped substituting calls to `fetchTarball` and `fetchurl` with a set `narHash` to avoid incorrectly substituting things in `fetchTree`, even though it would be safe to substitute when calling the legacy `fetch{Tarball,url}`. This fixes that regression where it is safe. + ## Contributors This release was made possible by the following 32 contributors: From 8f71ef7edee5876af20df403d38d5ef7c4d81008 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 6 Oct 2025 19:27:30 +0200 Subject: [PATCH 262/332] Update doc/manual/source/release-notes/rl-2.32.md Co-authored-by: Taeer Bar-Yam --- doc/manual/source/release-notes/rl-2.32.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/doc/manual/source/release-notes/rl-2.32.md b/doc/manual/source/release-notes/rl-2.32.md index 04f06e6b1..d85a4c2ea 100644 --- a/doc/manual/source/release-notes/rl-2.32.md +++ b/doc/manual/source/release-notes/rl-2.32.md @@ -78,7 +78,9 @@ - `fetchTarball` and `fetchurl` now correctly substitute (#14138) At some point we stopped substituting calls to `fetchTarball` and `fetchurl` with a set `narHash` to avoid incorrectly substituting things in `fetchTree`, even though it would be safe to substitute when calling the legacy `fetch{Tarball,url}`. This fixes that regression where it is safe. +- Started moving AST allocations into a bump allocator [#14088](https://github.com/NixOS/nix/issues/14088) + This leaves smaller, immutable structures in the AST. So far this saves about 2% memory on a NixOS config evaluation. ## Contributors This release was made possible by the following 32 contributors: From 0068ee6ca72b0596b67117823e2c73343bade0c0 Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Mon, 6 Oct 2025 22:16:21 +0300 Subject: [PATCH 263/332] Release note for attrset optimization --- doc/manual/source/release-notes/rl-2.32.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/doc/manual/source/release-notes/rl-2.32.md b/doc/manual/source/release-notes/rl-2.32.md index d85a4c2ea..3a925198d 100644 --- a/doc/manual/source/release-notes/rl-2.32.md +++ b/doc/manual/source/release-notes/rl-2.32.md @@ -64,6 +64,16 @@ ## Performance improvements +- Optimize memory usage of attribute set merges [#13987](https://github.com/NixOS/nix/pull/13987) + + [Attribute set update operations](@docroot@/language/operators.md#update) have been optimized to + reduce reallocations in cases when the second operand is small. + + For typical evaluations of nixpkgs this optimization leads to ~20% less memory allocated in total + without significantly affecting evaluation performance. + + See [eval-attrset-update-layer-rhs-threshold](@docroot@/command-ref/conf-file.md#conf-eval-attrset-update-layer-rhs-threshold) + - Substituted flake inputs are no longer re-copied to the store [#14041](https://github.com/NixOS/nix/pull/14041) Since 2.25, Nix would fail to store a cache entry for substituted flake inputs, which in turn would cause them to be re-copied to the store on initial evaluation. Caching these inputs results in a near doubling of performance in some cases — especially on I/O-bound machines and when using commands that fetch many inputs, like `nix flake [archive|prefetch-inputs]`. From 242f3625675cc06069edfd0936ad6f42acb068a8 Mon Sep 17 00:00:00 2001 From: Samuel Connelly <140354451+myclevorname@users.noreply.github.com> Date: Fri, 3 Oct 2025 18:41:01 -0400 Subject: [PATCH 264/332] libutil: Throw if `str("contents")` not found This was broken in 7aa3e7e3a5281acf350eff0fe039656cd4986e2c (since 2.25). --- src/libutil-tests/archive.cc | 47 ++++++++++++++++++ .../nars/invalid-tag-instead-of-contents.nar | Bin 0 -> 104 bytes src/libutil-tests/meson.build | 1 + src/libutil/archive.cc | 6 ++- 4 files changed, 52 insertions(+), 2 deletions(-) create mode 100644 src/libutil-tests/archive.cc create mode 100644 src/libutil-tests/data/nars/invalid-tag-instead-of-contents.nar diff --git a/src/libutil-tests/archive.cc b/src/libutil-tests/archive.cc new file mode 100644 index 000000000..386f7b857 --- /dev/null +++ b/src/libutil-tests/archive.cc @@ -0,0 +1,47 @@ +#include "nix/util/archive.hh" +#include "nix/util/tests/characterization.hh" +#include "nix/util/tests/gmock-matchers.hh" + +#include + +namespace nix { + +namespace { + +class NarTest : public CharacterizationTest +{ + std::filesystem::path unitTestData = getUnitTestData() / "nars"; + +public: + std::filesystem::path goldenMaster(std::string_view testStem) const override + { + return unitTestData / (std::string(testStem) + ".nar"); + } +}; + +class InvalidNarTest : public NarTest, public ::testing::WithParamInterface> +{}; + +} // namespace + +TEST_P(InvalidNarTest, throwsErrorMessage) +{ + const auto & [name, message] = GetParam(); + readTest(name, [&](const std::string & narContents) { + ASSERT_THAT( + [&]() { + StringSource source{narContents}; + NullFileSystemObjectSink sink; + parseDump(sink, source); + }, + ::testing::ThrowsMessage(testing::HasSubstrIgnoreANSIMatcher(message))); + }); +} + +INSTANTIATE_TEST_SUITE_P( + NarTest, + InvalidNarTest, + ::testing::Values( + std::pair{"invalid-tag-instead-of-contents", "bad archive: expected tag 'contents', got 'AAAAAAAA'"})); + +} // namespace nix diff --git a/src/libutil-tests/data/nars/invalid-tag-instead-of-contents.nar b/src/libutil-tests/data/nars/invalid-tag-instead-of-contents.nar new file mode 100644 index 0000000000000000000000000000000000000000..80dbf5a12ff8cd03fb1cadcc8a827982d1f9d5aa GIT binary patch literal 104 zcmd;OfPlQr3f;t_ Date: Tue, 7 Oct 2025 17:15:28 +0200 Subject: [PATCH 265/332] Bump version --- .version | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.version b/.version index 7cca401c7..3afbaeb2b 100644 --- a/.version +++ b/.version @@ -1 +1 @@ -2.32.0 +2.33.0 From b0f567e18b5bacb0ec2faadad24b321fbb60c08b Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 7 Oct 2025 17:16:57 +0200 Subject: [PATCH 266/332] Update mergify.yml --- .mergify.yml | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/.mergify.yml b/.mergify.yml index 1c220045a..8fdcb05b4 100644 --- a/.mergify.yml +++ b/.mergify.yml @@ -172,3 +172,14 @@ pull_request_rules: labels: - automatic backport - merge-queue + + - name: backport patches to 2.32 + conditions: + - label=backport 2.32-maintenance + actions: + backport: + branches: + - "2.32-maintenance" + labels: + - automatic backport + - merge-queue From 63e8b5f94aa6d9a4f3fb68f2b51e3e3a1b1457d4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rg=20Thalheim?= Date: Sun, 5 Oct 2025 07:17:15 +0200 Subject: [PATCH 267/332] ci: Switch away from mergify to backport action We want to use github native queues. --- .github/workflows/backport.yml | 37 +++++++ .mergify.yml | 185 --------------------------------- 2 files changed, 37 insertions(+), 185 deletions(-) create mode 100644 .github/workflows/backport.yml delete mode 100644 .mergify.yml diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml new file mode 100644 index 000000000..99b75621e --- /dev/null +++ b/.github/workflows/backport.yml @@ -0,0 +1,37 @@ +name: Backport +on: + pull_request_target: + types: [closed, labeled] +permissions: + contents: read +jobs: + backport: + name: Backport Pull Request + permissions: + # for korthout/backport-action + contents: write + pull-requests: write + if: github.repository_owner == 'NixOS' && github.event.pull_request.merged == true && (github.event_name != 'labeled' || startsWith('backport', github.event.label.name)) + runs-on: ubuntu-24.04-arm + steps: + - name: Generate GitHub App token + id: generate-token + uses: actions/create-github-app-token@v1 + with: + app-id: ${{ vars.CI_APP_ID }} + private-key: ${{ secrets.CI_APP_PRIVATE_KEY }} + - uses: actions/checkout@v4 + with: + ref: ${{ github.event.pull_request.head.sha }} + # required to find all branches + fetch-depth: 0 + - name: Create backport PRs + uses: korthout/backport-action@d07416681cab29bf2661702f925f020aaa962997 # v3.4.1 + id: backport + with: + # Config README: https://github.com/korthout/backport-action#backport-action + github_token: ${{ steps.generate-token.outputs.token }} + github_workspace: ${{ github.workspace }} + auto_merge_enabled: true + pull_description: |- + Automatic backport to `${target_branch}`, triggered by a label in #${pull_number}. diff --git a/.mergify.yml b/.mergify.yml deleted file mode 100644 index 8fdcb05b4..000000000 --- a/.mergify.yml +++ /dev/null @@ -1,185 +0,0 @@ -queue_rules: - - name: default - # all required tests need to go here - merge_conditions: - - check-success=tests on macos - - check-success=tests on ubuntu - - check-success=installer test on macos - - check-success=installer test on ubuntu - - check-success=vm_tests - batch_size: 5 - -pull_request_rules: - - name: merge using the merge queue - conditions: - - base~=master|.+-maintenance - - label~=merge-queue|dependencies - actions: - queue: {} - -# The rules below will first create backport pull requests and put those in a merge queue. - - - name: backport patches to 2.18 - conditions: - - label=backport 2.18-maintenance - actions: - backport: - branches: - - 2.18-maintenance - labels: - - automatic backport - - merge-queue - - - name: backport patches to 2.19 - conditions: - - label=backport 2.19-maintenance - actions: - backport: - branches: - - 2.19-maintenance - labels: - - automatic backport - - merge-queue - - - name: backport patches to 2.20 - conditions: - - label=backport 2.20-maintenance - actions: - backport: - branches: - - 2.20-maintenance - labels: - - automatic backport - - merge-queue - - - name: backport patches to 2.21 - conditions: - - label=backport 2.21-maintenance - actions: - backport: - branches: - - 2.21-maintenance - labels: - - automatic backport - - merge-queue - - - name: backport patches to 2.22 - conditions: - - label=backport 2.22-maintenance - actions: - backport: - branches: - - 2.22-maintenance - labels: - - automatic backport - - merge-queue - - - name: backport patches to 2.23 - conditions: - - label=backport 2.23-maintenance - actions: - backport: - branches: - - 2.23-maintenance - labels: - - automatic backport - - merge-queue - - - name: backport patches to 2.24 - conditions: - - label=backport 2.24-maintenance - actions: - backport: - branches: - - "2.24-maintenance" - labels: - - automatic backport - - merge-queue - - - name: backport patches to 2.25 - conditions: - - label=backport 2.25-maintenance - actions: - backport: - branches: - - "2.25-maintenance" - labels: - - automatic backport - - merge-queue - - - name: backport patches to 2.26 - conditions: - - label=backport 2.26-maintenance - actions: - backport: - branches: - - "2.26-maintenance" - labels: - - automatic backport - - merge-queue - - - name: backport patches to 2.27 - conditions: - - label=backport 2.27-maintenance - actions: - backport: - branches: - - "2.27-maintenance" - labels: - - automatic backport - - merge-queue - - - name: backport patches to 2.28 - conditions: - - label=backport 2.28-maintenance - actions: - backport: - branches: - - "2.28-maintenance" - labels: - - automatic backport - - merge-queue - - - name: backport patches to 2.29 - conditions: - - label=backport 2.29-maintenance - actions: - backport: - branches: - - "2.29-maintenance" - labels: - - automatic backport - - merge-queue - - - name: backport patches to 2.30 - conditions: - - label=backport 2.30-maintenance - actions: - backport: - branches: - - "2.30-maintenance" - labels: - - automatic backport - - merge-queue - - - name: backport patches to 2.31 - conditions: - - label=backport 2.31-maintenance - actions: - backport: - branches: - - "2.31-maintenance" - labels: - - automatic backport - - merge-queue - - - name: backport patches to 2.32 - conditions: - - label=backport 2.32-maintenance - actions: - backport: - branches: - - "2.32-maintenance" - labels: - - automatic backport - - merge-queue From 75b18a6e47f30381d838d4f0c8a5d9905452fea2 Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Wed, 8 Oct 2025 00:51:18 +0300 Subject: [PATCH 268/332] maintainers: Remove mergify note from release-process.md --- maintainers/release-process.md | 1 - 1 file changed, 1 deletion(-) diff --git a/maintainers/release-process.md b/maintainers/release-process.md index 790618b7f..68de3b677 100644 --- a/maintainers/release-process.md +++ b/maintainers/release-process.md @@ -142,7 +142,6 @@ release: $ git pull $ NEW_VERSION=2.13.0 $ echo $NEW_VERSION > .version - $ ... edit .mergify.yml to add the previous version ... $ git checkout -b bump-$NEW_VERSION $ git commit -a -m 'Bump version' $ git push --set-upstream origin bump-$NEW_VERSION From c5b88c22fa2033fb10ee16ee2849ca46847806ea Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Tue, 7 Oct 2025 01:35:37 +0300 Subject: [PATCH 269/332] dev-shell: Disable separateDebugInfo This breaks gdb pretty-printers inserted into .debug_gdb_scripts section, because it implies --compress-debug-sections=zlib, -Wa,--compress-debug-sections. This is very unfortunate, because then gdb can't use pretty printers for Boost.Unordered (which are very useful, since boost::unoredred_flat_map is impossible to debug). This seems perfectly fine to disable in the dev-shell for the time being. See [1-3] for further references. With this change I'm able to use boost's pretty-printers out-of-the box: ``` p *importResolutionCache $2 = boost::concurrent_flat_map with 1 elements = {[{accessor = {p = std::shared_ptr (use count 5, weak count 1) = { get() = 0x555555d830a8}}, path = {static root = {static root = , path = "/"}, path = "/derivation-internal.nix"}}] = {accessor = {p = std::shared_ptr (use count 5, weak count 1) = { get() = 0x555555d830a8}}, path = {static root = {static root = , path = "/"}, path = "/derivation-internal.nix"}}} ``` When combined with a simple `add-auto-load-safe-path ~/code` in .gdbinit [1]: https://gerrit.lix.systems/c/lix/+/3880 [2]: https://git.lix.systems/lix-project/lix/issues/1003 [3]: https://sourceware.org/pipermail/gdb-patches/2025-October/221398.html --- packaging/dev-shell.nix | 73 +++++++++++++++++++++++------------------ 1 file changed, 41 insertions(+), 32 deletions(-) diff --git a/packaging/dev-shell.nix b/packaging/dev-shell.nix index ccfb9c4ae..37e92e363 100644 --- a/packaging/dev-shell.nix +++ b/packaging/dev-shell.nix @@ -70,6 +70,9 @@ pkgs.nixComponents2.nix-util.overrideAttrs ( # We use this shell with the local checkout, not unpackPhase. src = null; + # Workaround https://sourceware.org/pipermail/gdb-patches/2025-October/221398.html + # Remove when gdb fix is rolled out everywhere. + separateDebugInfo = false; env = { # For `make format`, to work without installing pre-commit @@ -93,38 +96,44 @@ pkgs.nixComponents2.nix-util.overrideAttrs ( ++ map (transformFlag "libcmd") (ignoreCrossFile pkgs.nixComponents2.nix-cmd.mesonFlags); nativeBuildInputs = - attrs.nativeBuildInputs or [ ] - ++ pkgs.nixComponents2.nix-util.nativeBuildInputs - ++ pkgs.nixComponents2.nix-store.nativeBuildInputs - ++ pkgs.nixComponents2.nix-fetchers.nativeBuildInputs - ++ pkgs.nixComponents2.nix-expr.nativeBuildInputs - ++ lib.optionals havePerl pkgs.nixComponents2.nix-perl-bindings.nativeBuildInputs - ++ lib.optionals buildCanExecuteHost pkgs.nixComponents2.nix-manual.externalNativeBuildInputs - ++ pkgs.nixComponents2.nix-internal-api-docs.nativeBuildInputs - ++ pkgs.nixComponents2.nix-external-api-docs.nativeBuildInputs - ++ pkgs.nixComponents2.nix-functional-tests.externalNativeBuildInputs - ++ lib.optional ( - !buildCanExecuteHost - # Hack around https://github.com/nixos/nixpkgs/commit/bf7ad8cfbfa102a90463433e2c5027573b462479 - && !(stdenv.hostPlatform.isWindows && stdenv.buildPlatform.isDarwin) - && stdenv.hostPlatform.emulatorAvailable pkgs.buildPackages - && lib.meta.availableOn stdenv.buildPlatform (stdenv.hostPlatform.emulator pkgs.buildPackages) - ) pkgs.buildPackages.mesonEmulatorHook - ++ [ - pkgs.buildPackages.cmake - pkgs.buildPackages.gnused - pkgs.buildPackages.shellcheck - pkgs.buildPackages.changelog-d - modular.pre-commit.settings.package - (pkgs.writeScriptBin "pre-commit-hooks-install" modular.pre-commit.settings.installationScript) - pkgs.buildPackages.nixfmt-rfc-style - pkgs.buildPackages.shellcheck - pkgs.buildPackages.gdb - ] - ++ lib.optional (stdenv.cc.isClang && stdenv.hostPlatform == stdenv.buildPlatform) ( - lib.hiPrio pkgs.buildPackages.clang-tools - ) - ++ lib.optional stdenv.hostPlatform.isLinux pkgs.buildPackages.mold-wrapped; + let + inputs = + attrs.nativeBuildInputs or [ ] + ++ pkgs.nixComponents2.nix-util.nativeBuildInputs + ++ pkgs.nixComponents2.nix-store.nativeBuildInputs + ++ pkgs.nixComponents2.nix-fetchers.nativeBuildInputs + ++ pkgs.nixComponents2.nix-expr.nativeBuildInputs + ++ lib.optionals havePerl pkgs.nixComponents2.nix-perl-bindings.nativeBuildInputs + ++ lib.optionals buildCanExecuteHost pkgs.nixComponents2.nix-manual.externalNativeBuildInputs + ++ pkgs.nixComponents2.nix-internal-api-docs.nativeBuildInputs + ++ pkgs.nixComponents2.nix-external-api-docs.nativeBuildInputs + ++ pkgs.nixComponents2.nix-functional-tests.externalNativeBuildInputs + ++ lib.optional ( + !buildCanExecuteHost + # Hack around https://github.com/nixos/nixpkgs/commit/bf7ad8cfbfa102a90463433e2c5027573b462479 + && !(stdenv.hostPlatform.isWindows && stdenv.buildPlatform.isDarwin) + && stdenv.hostPlatform.emulatorAvailable pkgs.buildPackages + && lib.meta.availableOn stdenv.buildPlatform (stdenv.hostPlatform.emulator pkgs.buildPackages) + ) pkgs.buildPackages.mesonEmulatorHook + ++ [ + pkgs.buildPackages.cmake + pkgs.buildPackages.gnused + pkgs.buildPackages.shellcheck + pkgs.buildPackages.changelog-d + modular.pre-commit.settings.package + (pkgs.writeScriptBin "pre-commit-hooks-install" modular.pre-commit.settings.installationScript) + pkgs.buildPackages.nixfmt-rfc-style + pkgs.buildPackages.shellcheck + pkgs.buildPackages.gdb + ] + ++ lib.optional (stdenv.cc.isClang && stdenv.hostPlatform == stdenv.buildPlatform) ( + lib.hiPrio pkgs.buildPackages.clang-tools + ) + ++ lib.optional stdenv.hostPlatform.isLinux pkgs.buildPackages.mold-wrapped; + in + # FIXME: separateDebugInfo = false doesn't actually prevent -Wa,--compress-debug-sections + # from making its way into NIX_CFLAGS_COMPILE. + lib.filter (p: !lib.hasInfix "separate-debug-info" p) inputs; buildInputs = [ pkgs.gbenchmark From 0619351326bb7b7aa2a05d7e97a71ea61f8a7bff Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Wed, 8 Oct 2025 01:59:04 +0300 Subject: [PATCH 270/332] tests: Move invalid nar tests from tests/functional to libutil-tests Since 242f3625675cc06069edfd0936ad6f42acb068a8 we have better infrastructure for this kind of tests. --- src/libutil-tests/archive.cc | 16 +++++++++- .../libutil-tests/data/nars}/dot.nar | Bin .../libutil-tests/data/nars}/dotdot.nar | Bin .../libutil-tests/data/nars}/empty.nar | Bin .../data/nars}/executable-after-contents.nar | Bin .../data/nars}/name-after-node.nar | Bin .../data/nars}/nul-character.nar | Bin .../libutil-tests/data/nars}/slash.nar | Bin tests/functional/nars.sh | 28 ------------------ 9 files changed, 15 insertions(+), 29 deletions(-) rename {tests/functional => src/libutil-tests/data/nars}/dot.nar (100%) rename {tests/functional => src/libutil-tests/data/nars}/dotdot.nar (100%) rename {tests/functional => src/libutil-tests/data/nars}/empty.nar (100%) rename {tests/functional => src/libutil-tests/data/nars}/executable-after-contents.nar (100%) rename {tests/functional => src/libutil-tests/data/nars}/name-after-node.nar (100%) rename {tests/functional => src/libutil-tests/data/nars}/nul-character.nar (100%) rename {tests/functional => src/libutil-tests/data/nars}/slash.nar (100%) diff --git a/src/libutil-tests/archive.cc b/src/libutil-tests/archive.cc index 386f7b857..427b29d41 100644 --- a/src/libutil-tests/archive.cc +++ b/src/libutil-tests/archive.cc @@ -42,6 +42,20 @@ INSTANTIATE_TEST_SUITE_P( NarTest, InvalidNarTest, ::testing::Values( - std::pair{"invalid-tag-instead-of-contents", "bad archive: expected tag 'contents', got 'AAAAAAAA'"})); + std::pair{"invalid-tag-instead-of-contents", "bad archive: expected tag 'contents', got 'AAAAAAAA'"}, + // Unpacking a NAR with a NUL character in a file name should fail. + std::pair{"nul-character", "bad archive: NAR contains invalid file name 'f"}, + // Likewise for a '.' filename. + std::pair{"dot", "bad archive: NAR contains invalid file name '.'"}, + // Likewise for a '..' filename. + std::pair{"dotdot", "bad archive: NAR contains invalid file name '..'"}, + // Likewise for a filename containing a slash. + std::pair{"slash", "bad archive: NAR contains invalid file name 'x/y'"}, + // Likewise for an empty filename. + std::pair{"empty", "bad archive: NAR contains invalid file name ''"}, + // Test that the 'executable' field cannot come before the 'contents' field. + std::pair{"executable-after-contents", "bad archive: expected tag ')', got 'executable'"}, + // Test that the 'name' field cannot come before the 'node' field in a directory entry. + std::pair{"name-after-node", "bad archive: expected tag 'name'"})); } // namespace nix diff --git a/tests/functional/dot.nar b/src/libutil-tests/data/nars/dot.nar similarity index 100% rename from tests/functional/dot.nar rename to src/libutil-tests/data/nars/dot.nar diff --git a/tests/functional/dotdot.nar b/src/libutil-tests/data/nars/dotdot.nar similarity index 100% rename from tests/functional/dotdot.nar rename to src/libutil-tests/data/nars/dotdot.nar diff --git a/tests/functional/empty.nar b/src/libutil-tests/data/nars/empty.nar similarity index 100% rename from tests/functional/empty.nar rename to src/libutil-tests/data/nars/empty.nar diff --git a/tests/functional/executable-after-contents.nar b/src/libutil-tests/data/nars/executable-after-contents.nar similarity index 100% rename from tests/functional/executable-after-contents.nar rename to src/libutil-tests/data/nars/executable-after-contents.nar diff --git a/tests/functional/name-after-node.nar b/src/libutil-tests/data/nars/name-after-node.nar similarity index 100% rename from tests/functional/name-after-node.nar rename to src/libutil-tests/data/nars/name-after-node.nar diff --git a/tests/functional/nul-character.nar b/src/libutil-tests/data/nars/nul-character.nar similarity index 100% rename from tests/functional/nul-character.nar rename to src/libutil-tests/data/nars/nul-character.nar diff --git a/tests/functional/slash.nar b/src/libutil-tests/data/nars/slash.nar similarity index 100% rename from tests/functional/slash.nar rename to src/libutil-tests/data/nars/slash.nar diff --git a/tests/functional/nars.sh b/tests/functional/nars.sh index dd90345a6..a52c257bc 100755 --- a/tests/functional/nars.sh +++ b/tests/functional/nars.sh @@ -131,31 +131,3 @@ else fi rm -f "$TEST_ROOT/unicode-*" - -# Unpacking a NAR with a NUL character in a file name should fail. -rm -rf "$TEST_ROOT/out" -expectStderr 1 nix-store --restore "$TEST_ROOT/out" < nul-character.nar | grepQuiet "NAR contains invalid file name 'f" - -# Likewise for a '.' filename. -rm -rf "$TEST_ROOT/out" -expectStderr 1 nix-store --restore "$TEST_ROOT/out" < dot.nar | grepQuiet "NAR contains invalid file name '.'" - -# Likewise for a '..' filename. -rm -rf "$TEST_ROOT/out" -expectStderr 1 nix-store --restore "$TEST_ROOT/out" < dotdot.nar | grepQuiet "NAR contains invalid file name '..'" - -# Likewise for a filename containing a slash. -rm -rf "$TEST_ROOT/out" -expectStderr 1 nix-store --restore "$TEST_ROOT/out" < slash.nar | grepQuiet "NAR contains invalid file name 'x/y'" - -# Likewise for an empty filename. -rm -rf "$TEST_ROOT/out" -expectStderr 1 nix-store --restore "$TEST_ROOT/out" < empty.nar | grepQuiet "NAR contains invalid file name ''" - -# Test that the 'executable' field cannot come before the 'contents' field. -rm -rf "$TEST_ROOT/out" -expectStderr 1 nix-store --restore "$TEST_ROOT/out" < executable-after-contents.nar | grepQuiet "expected tag ')', got 'executable'" - -# Test that the 'name' field cannot come before the 'node' field in a directory entry. -rm -rf "$TEST_ROOT/out" -expectStderr 1 nix-store --restore "$TEST_ROOT/out" < name-after-node.nar | grepQuiet "expected tag 'name'" From a400ea42575470b1f95d0199a3cc87f788577dcb Mon Sep 17 00:00:00 2001 From: Bernardo Meurer Costa Date: Wed, 8 Oct 2025 00:04:37 +0000 Subject: [PATCH 271/332] ci: integrate vm_tests into main tests job This consolidates the separate vm_tests job into the main tests job, simplifying the CI workflow. VM tests now run as part of the regular test matrix. --- .github/workflows/ci.yml | 47 ++++++++----------------------------- ci/gha/tests/default.nix | 28 ++++++++++++++++++++++ ci/gha/vm-tests/wrapper.nix | 45 ----------------------------------- 3 files changed, 38 insertions(+), 82 deletions(-) delete mode 100644 ci/gha/vm-tests/wrapper.nix diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 00a808951..e82e59309 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -126,6 +126,14 @@ jobs: --argstr stdenv "${{ matrix.stdenv }}" \ ${{ format('--arg withAWS {0}', matrix.withAWS) }} \ ${{ format('--arg withCurlS3 {0}', matrix.withCurlS3) }} + - name: Run VM tests + run: | + nix build --file ci/gha/tests/wrapper.nix vmTests -L \ + --arg withInstrumentation ${{ matrix.instrumented }} \ + --argstr stdenv "${{ matrix.stdenv }}" \ + ${{ format('--arg withAWS {0}', matrix.withAWS) }} \ + ${{ format('--arg withCurlS3 {0}', matrix.withCurlS3) }} + if: ${{ matrix.os == 'linux' }} - name: Run flake checks and prepare the installer tarball run: | ci/gha/tests/build-checks @@ -213,7 +221,7 @@ jobs: echo "docker=${{ env._DOCKER_SECRETS != '' }}" >> $GITHUB_OUTPUT docker_push_image: - needs: [tests, vm_tests, check_secrets] + needs: [tests, check_secrets] permissions: contents: read packages: write @@ -266,43 +274,8 @@ jobs: docker tag nix:$NIX_VERSION $IMAGE_ID:master docker push $IMAGE_ID:master - vm_tests: - needs: basic-checks - strategy: - fail-fast: false - matrix: - include: - # TODO: remove once curl-based-s3 fully lands - - scenario: legacy s3 - withAWS: true - withCurlS3: false - - scenario: curl s3 - withAWS: false - withCurlS3: true - name: vm_tests (${{ matrix.scenario }}) - runs-on: ubuntu-24.04 - steps: - - uses: actions/checkout@v5 - - uses: ./.github/actions/install-nix-action - with: - dogfood: ${{ github.event_name == 'workflow_dispatch' && inputs.dogfood || github.event_name != 'workflow_dispatch' }} - extra_nix_config: - experimental-features = nix-command flakes - github_token: ${{ secrets.GITHUB_TOKEN }} - - uses: DeterminateSystems/magic-nix-cache-action@main - - name: Build VM tests - run: | - nix build -L \ - --file ci/gha/vm-tests/wrapper.nix \ - ${{ format('--arg withAWS {0}', matrix.withAWS) }} \ - ${{ format('--arg withCurlS3 {0}', matrix.withCurlS3) }} \ - functional_user \ - githubFlakes \ - nix-docker \ - tarballFlakes - flake_regressions: - needs: vm_tests + needs: tests runs-on: ubuntu-24.04 steps: - name: Checkout nix diff --git a/ci/gha/tests/default.nix b/ci/gha/tests/default.nix index bbcd7e6b7..d9115f92c 100644 --- a/ci/gha/tests/default.nix +++ b/ci/gha/tests/default.nix @@ -79,6 +79,14 @@ rec { } ); + # Import NixOS tests using the instrumented components + nixosTests = import ../../../tests/nixos { + inherit lib pkgs; + nixComponents = nixComponentsInstrumented; + nixpkgs = nixFlake.inputs.nixpkgs; + inherit (nixFlake.inputs) nixpkgs-23-11; + }; + /** Top-level tests for the flake outputs, as they would be built by hydra. These tests generally can't be overridden to run with sanitizers. @@ -229,4 +237,24 @@ rec { { inherit coverageProfileDrvs mergedProfdata coverageReports; }; + + vmTests = { + } + # FIXME: when the curlS3 implementation is complete, it should also enable these tests. + // lib.optionalAttrs (withAWS == true) { + # S3 binary cache store test only runs when S3 support is enabled + inherit (nixosTests) s3-binary-cache-store; + } + // lib.optionalAttrs (!withSanitizers && !withCoverage) { + # evalNixpkgs uses non-instrumented components from hydraJobs, so only run it + # when not testing with sanitizers to avoid rebuilding nix + inherit (hydraJobs.tests) evalNixpkgs; + # FIXME: CI times out when building vm tests instrumented + inherit (nixosTests) + functional_user + githubFlakes + nix-docker + tarballFlakes + ; + }; } diff --git a/ci/gha/vm-tests/wrapper.nix b/ci/gha/vm-tests/wrapper.nix deleted file mode 100644 index 2ca80974c..000000000 --- a/ci/gha/vm-tests/wrapper.nix +++ /dev/null @@ -1,45 +0,0 @@ -{ - nixFlake ? builtins.getFlake ("git+file://" + toString ../../..), - system ? "x86_64-linux", - withAWS ? null, - withCurlS3 ? null, -}: - -let - pkgs = nixFlake.inputs.nixpkgs.legacyPackages.${system}; - lib = pkgs.lib; - - # Create base nixComponents using the flake's makeComponents - baseNixComponents = nixFlake.lib.makeComponents { - inherit pkgs; - }; - - # Override nixComponents if AWS parameters are specified - nixComponents = - if (withAWS == null && withCurlS3 == null) then - baseNixComponents - else - baseNixComponents.overrideScope ( - final: prev: { - nix-store = prev.nix-store.override ( - lib.optionalAttrs (withAWS != null) { inherit withAWS; } - // lib.optionalAttrs (withCurlS3 != null) { inherit withCurlS3; } - ); - } - ); - - # Import NixOS tests with the overridden nixComponents - tests = import ../../../tests/nixos { - inherit lib pkgs nixComponents; - nixpkgs = nixFlake.inputs.nixpkgs; - inherit (nixFlake.inputs) nixpkgs-23-11; - }; -in -{ - inherit (tests) - functional_user - githubFlakes - nix-docker - tarballFlakes - ; -} From 1d8dd77e1d71f8cc97e59ee11362e0cb8312bdce Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Wed, 8 Oct 2025 22:05:14 +0300 Subject: [PATCH 272/332] libutil: Fix renderAuthorityAndPath unreachable for path:/ URLs This was mistakenly triggered by path:/ URL, since the `//` would correspond to 3 empty segments. --- src/libutil-tests/url.cc | 13 +++++++++++++ src/libutil/url.cc | 2 +- 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/src/libutil-tests/url.cc b/src/libutil-tests/url.cc index 5c7b02248..cd6816096 100644 --- a/src/libutil-tests/url.cc +++ b/src/libutil-tests/url.cc @@ -868,6 +868,12 @@ TEST_P(ParsedURLPathSegmentsTest, segmentsAreCorrect) EXPECT_EQ(encodeUrlPath(segments), testCase.path); } +TEST_P(ParsedURLPathSegmentsTest, to_string) +{ + const auto & testCase = GetParam(); + EXPECT_EQ(testCase.url, parseURL(testCase.url).to_string()); +} + INSTANTIATE_TEST_SUITE_P( ParsedURL, ParsedURLPathSegmentsTest, @@ -886,6 +892,13 @@ INSTANTIATE_TEST_SUITE_P( .skipEmpty = false, .description = "empty_authority_empty_path", }, + ParsedURLPathSegmentsTestCase{ + .url = "path:/", + .segments = {"", ""}, + .path = "/", + .skipEmpty = false, + .description = "empty_authority_root_path", + }, ParsedURLPathSegmentsTestCase{ .url = "scheme:///", .segments = {"", ""}, diff --git a/src/libutil/url.cc b/src/libutil/url.cc index 1c7fd3f0f..a50de0944 100644 --- a/src/libutil/url.cc +++ b/src/libutil/url.cc @@ -350,7 +350,7 @@ std::string ParsedURL::renderAuthorityAndPath() const must either be empty or begin with a slash ("/") character. */ assert(path.empty() || path.front().empty()); res += authority->to_string(); - } else if (std::ranges::equal(std::views::take(path, 2), std::views::repeat("", 2))) { + } else if (std::ranges::equal(std::views::take(path, 3), std::views::repeat("", 3))) { /* If a URI does not contain an authority component, then the path cannot begin with two slash characters ("//") */ unreachable(); From 3c1e2e56ea21b975103e227fabc79574b811da15 Mon Sep 17 00:00:00 2001 From: Bernardo Meurer Costa Date: Wed, 8 Oct 2025 18:37:18 +0000 Subject: [PATCH 273/332] feat(libstore/filetransfer): add username/password authentication support Add a `UsernameAuth` struct and optional `usernameAuth` field to `FileTransferRequest` to support programmatic username/password authentication. This uses curl's `CURLOPT_USERNAME`/`CURLOPT_PASSWORD` options, which works with multiple protocols (HTTP, FTP, etc.) and is not specific to any particular authentication scheme. The primary motivation is to enable S3 authentication refactoring where AWS credentials (access key ID and secret access key) can be passed through this general-purpose mechanism, reducing the amount of S3-specific code behind `#if NIX_WITH_CURL_S3` guards. --- src/libstore/filetransfer.cc | 8 ++++++++ src/libstore/include/nix/store/filetransfer.hh | 16 ++++++++++++++++ 2 files changed, 24 insertions(+) diff --git a/src/libstore/filetransfer.cc b/src/libstore/filetransfer.cc index 59fc75ed0..03bf3cda4 100644 --- a/src/libstore/filetransfer.cc +++ b/src/libstore/filetransfer.cc @@ -426,6 +426,14 @@ struct curlFileTransfer : public FileTransfer curl_easy_setopt(req, CURLOPT_ERRORBUFFER, errbuf); errbuf[0] = 0; + // Set up username/password authentication if provided + if (request.usernameAuth) { + curl_easy_setopt(req, CURLOPT_USERNAME, request.usernameAuth->username.c_str()); + if (request.usernameAuth->password) { + curl_easy_setopt(req, CURLOPT_PASSWORD, request.usernameAuth->password->c_str()); + } + } + result.data.clear(); result.bodySize = 0; } diff --git a/src/libstore/include/nix/store/filetransfer.hh b/src/libstore/include/nix/store/filetransfer.hh index 2f2d59036..abd9ece5b 100644 --- a/src/libstore/include/nix/store/filetransfer.hh +++ b/src/libstore/include/nix/store/filetransfer.hh @@ -77,6 +77,17 @@ extern FileTransferSettings fileTransferSettings; extern const unsigned int RETRY_TIME_MS_DEFAULT; +/** + * Username and optional password for HTTP basic authentication. + * These are used with curl's CURLOPT_USERNAME and CURLOPT_PASSWORD options + * for various protocols including HTTP, FTP, and others. + */ +struct UsernameAuth +{ + std::string username; + std::optional password; +}; + struct FileTransferRequest { ValidURL uri; @@ -92,6 +103,11 @@ struct FileTransferRequest std::optional data; std::string mimeType; std::function dataCallback; + /** + * Optional username and password for HTTP basic authentication. + * When provided, these credentials will be used with curl's CURLOPT_USERNAME/PASSWORD option. + */ + std::optional usernameAuth; FileTransferRequest(ValidURL uri) : uri(std::move(uri)) From 94f410b628ede2ecec6ed06cbb0f62e1f9d9e8cf Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 8 Oct 2025 19:59:04 +0200 Subject: [PATCH 274/332] exportReferencesGraph: Handle heterogeneous arrays This barfed with error: [json.exception.type_error.302] type must be string, but is array on `nix build github:malt3/bazel-env#bazel-env` because it has a `exportReferencesGraph` with a value like `["string",...["string"]]`. --- src/libstore/derivation-options.cc | 20 ++++++++++++++------ tests/functional/structured-attrs.nix | 4 ++++ tests/functional/structured-attrs.sh | 5 ++--- 3 files changed, 20 insertions(+), 9 deletions(-) diff --git a/src/libstore/derivation-options.cc b/src/libstore/derivation-options.cc index 844bce840..698485c0d 100644 --- a/src/libstore/derivation-options.cc +++ b/src/libstore/derivation-options.cc @@ -99,6 +99,17 @@ DerivationOptions DerivationOptions::fromStructuredAttrs( return fromStructuredAttrs(env, parsed ? &*parsed : nullptr); } +static void flatten(const nlohmann::json & value, StringSet & res) +{ + if (value.is_array()) + for (auto & v : value) + flatten(v, res); + else if (value.is_string()) + res.insert(value); + else + throw Error("'exportReferencesGraph' value is not an array or a string"); +} + DerivationOptions DerivationOptions::fromStructuredAttrs(const StringMap & env, const StructuredAttrs * parsed, bool shouldWarn) { @@ -219,12 +230,9 @@ DerivationOptions::fromStructuredAttrs(const StringMap & env, const StructuredAt if (!e || !e->is_object()) return ret; for (auto & [key, value] : getObject(*e)) { - if (value.is_array()) - ret.insert_or_assign(key, value); - else if (value.is_string()) - ret.insert_or_assign(key, StringSet{value}); - else - throw Error("'exportReferencesGraph' value is not an array or a string"); + StringSet ss; + flatten(value, ss); + ret.insert_or_assign(key, std::move(ss)); } } else { auto s = getOr(env, "exportReferencesGraph", ""); diff --git a/tests/functional/structured-attrs.nix b/tests/functional/structured-attrs.nix index 4e1984517..70ac807ab 100644 --- a/tests/functional/structured-attrs.nix +++ b/tests/functional/structured-attrs.nix @@ -82,4 +82,8 @@ mkDerivation { "foo$" = "BAD"; exportReferencesGraph.refs = [ dep ]; + exportReferencesGraph.refs2 = [ + dep + [ dep ] + ]; # regression test for heterogeneous arrays } diff --git a/tests/functional/structured-attrs.sh b/tests/functional/structured-attrs.sh index dfd5a1412..473a037f9 100755 --- a/tests/functional/structured-attrs.sh +++ b/tests/functional/structured-attrs.sh @@ -2,9 +2,8 @@ source common.sh -# 27ce722638 required some incompatible changes to the nix file, so skip this -# tests for the older versions -requireDaemonNewerThan "2.4pre20210712" +# https://github.com/NixOS/nix/pull/14189 +requireDaemonNewerThan "2.33" clearStoreIfPossible From 00c2a576668cc2eb7f44318c88c1790edfe38438 Mon Sep 17 00:00:00 2001 From: Bernardo Meurer Costa Date: Tue, 7 Oct 2025 03:44:46 +0000 Subject: [PATCH 275/332] feat(libstore/filetransfer): add S3 signing support --- src/libstore/aws-creds.cc | 28 ++++---- src/libstore/filetransfer.cc | 71 +++++++++++++++++-- src/libstore/include/nix/store/aws-creds.hh | 6 -- .../include/nix/store/filetransfer.hh | 19 +++++ 4 files changed, 100 insertions(+), 24 deletions(-) diff --git a/src/libstore/aws-creds.cc b/src/libstore/aws-creds.cc index dc8584e1b..cd404a554 100644 --- a/src/libstore/aws-creds.cc +++ b/src/libstore/aws-creds.cc @@ -24,6 +24,22 @@ namespace nix { namespace { +// Global credential provider cache using boost's concurrent map +// Key: profile name (empty string for default profile) +using CredentialProviderCache = + boost::concurrent_flat_map>; + +static CredentialProviderCache credentialProviderCache; + +/** + * Clear all cached credential providers. + * Called automatically by CrtWrapper destructor during static destruction. + */ +static void clearAwsCredentialsCache() +{ + credentialProviderCache.clear(); +} + static void initAwsCrt() { struct CrtWrapper @@ -95,13 +111,6 @@ static AwsCredentials getCredentialsFromProvider(std::shared_ptr>; - -static CredentialProviderCache credentialProviderCache; - } // anonymous namespace AwsCredentials getAwsCredentials(const std::string & profile) @@ -160,11 +169,6 @@ void invalidateAwsCredentials(const std::string & profile) credentialProviderCache.erase(profile); } -void clearAwsCredentialsCache() -{ - credentialProviderCache.clear(); -} - AwsCredentials preResolveAwsCredentials(const ParsedS3URL & s3Url) { std::string profile = s3Url.profile.value_or(""); diff --git a/src/libstore/filetransfer.cc b/src/libstore/filetransfer.cc index 03bf3cda4..d6e21f3e6 100644 --- a/src/libstore/filetransfer.cc +++ b/src/libstore/filetransfer.cc @@ -9,9 +9,14 @@ #include "nix/util/signals.hh" #include "store-config-private.hh" +#include #if NIX_WITH_S3_SUPPORT # include #endif +#if NIX_WITH_CURL_S3 +# include "nix/store/aws-creds.hh" +# include "nix/store/s3-url.hh" +#endif #ifdef __linux__ # include "nix/util/linux-namespaces.hh" @@ -434,6 +439,16 @@ struct curlFileTransfer : public FileTransfer } } +#if NIX_WITH_CURL_S3 + // Set up AWS SigV4 signing if this is an S3 request + // Note: AWS SigV4 support guaranteed available (curl >= 7.75.0 checked at build time) + // The username/password (access key ID and secret key) are set via the general + // usernameAuth mechanism above. + if (request.awsSigV4Provider) { + curl_easy_setopt(req, CURLOPT_AWS_SIGV4, request.awsSigV4Provider->c_str()); + } +#endif + result.data.clear(); result.bodySize = 0; } @@ -808,7 +823,11 @@ struct curlFileTransfer : public FileTransfer void enqueueItem(std::shared_ptr item) { - if (item->request.data && item->request.uri.scheme() != "http" && item->request.uri.scheme() != "https") + if (item->request.data && item->request.uri.scheme() != "http" && item->request.uri.scheme() != "https" +#if NIX_WITH_CURL_S3 + && item->request.uri.scheme() != "s3" +#endif + ) throw nix::Error("uploading to '%s' is not supported", item->request.uri.to_string()); { @@ -826,9 +845,15 @@ struct curlFileTransfer : public FileTransfer { /* Ugly hack to support s3:// URIs. */ if (request.uri.scheme() == "s3") { +#if NIX_WITH_CURL_S3 + // New curl-based S3 implementation + auto modifiedRequest = request; + modifiedRequest.setupForS3(); + enqueueItem(std::make_shared(*this, std::move(modifiedRequest), std::move(callback))); +#elif NIX_WITH_S3_SUPPORT + // Old AWS SDK-based implementation // FIXME: do this on a worker thread try { -#if NIX_WITH_S3_SUPPORT auto parsed = ParsedS3URL::parse(request.uri.parsed()); std::string profile = parsed.profile.value_or(""); @@ -846,13 +871,12 @@ struct curlFileTransfer : public FileTransfer res.data = std::move(*s3Res.data); res.urls.push_back(request.uri.to_string()); callback(std::move(res)); -#else - throw nix::Error( - "cannot download '%s' because Nix is not built with S3 support", request.uri.to_string()); -#endif } catch (...) { callback.rethrow(); } +#else + throw nix::Error("cannot download '%s' because Nix is not built with S3 support", request.uri.to_string()); +#endif return; } @@ -880,6 +904,41 @@ ref makeFileTransfer() return makeCurlFileTransfer(); } +#if NIX_WITH_CURL_S3 +void FileTransferRequest::setupForS3() +{ + auto parsedS3 = ParsedS3URL::parse(uri.parsed()); + // Update the request URI to use HTTPS + uri = parsedS3.toHttpsUrl(); + // This gets used later in a curl setopt + awsSigV4Provider = "aws:amz:" + parsedS3.region.value_or("us-east-1") + ":s3"; + // check if the request already has pre-resolved credentials + std::optional sessionToken; + if (usernameAuth) { + debug("Using pre-resolved AWS credentials from parent process"); + sessionToken = preResolvedAwsSessionToken; + } else { + std::string profile = parsedS3.profile.value_or(""); + try { + auto creds = getAwsCredentials(profile); + usernameAuth = UsernameAuth{ + .username = creds.accessKeyId, + .password = creds.secretAccessKey, + }; + sessionToken = creds.sessionToken; + } catch (const AwsAuthError & e) { + warn("AWS authentication failed for S3 request %s: %s", uri, e.what()); + // Invalidate the cached credentials so next request will retry + invalidateAwsCredentials(profile); + // Continue without authentication - might be a public bucket + return; + } + } + if (sessionToken) + headers.emplace_back("x-amz-security-token", *sessionToken); +} +#endif + std::future FileTransfer::enqueueFileTransfer(const FileTransferRequest & request) { auto promise = std::make_shared>(); diff --git a/src/libstore/include/nix/store/aws-creds.hh b/src/libstore/include/nix/store/aws-creds.hh index 16643c555..4930dc9d8 100644 --- a/src/libstore/include/nix/store/aws-creds.hh +++ b/src/libstore/include/nix/store/aws-creds.hh @@ -57,12 +57,6 @@ AwsCredentials getAwsCredentials(const std::string & profile = ""); */ void invalidateAwsCredentials(const std::string & profile); -/** - * Clear all cached credential providers. - * Typically called during application cleanup. - */ -void clearAwsCredentialsCache(); - /** * Pre-resolve AWS credentials for S3 URLs. * Used to cache credentials in parent process before forking. diff --git a/src/libstore/include/nix/store/filetransfer.hh b/src/libstore/include/nix/store/filetransfer.hh index abd9ece5b..942e05a61 100644 --- a/src/libstore/include/nix/store/filetransfer.hh +++ b/src/libstore/include/nix/store/filetransfer.hh @@ -11,6 +11,11 @@ #include "nix/util/serialise.hh" #include "nix/util/url.hh" +#include "nix/store/config.hh" +#if NIX_WITH_CURL_S3 +# include "nix/store/aws-creds.hh" +#endif + namespace nix { struct FileTransferSettings : Config @@ -108,6 +113,13 @@ struct FileTransferRequest * When provided, these credentials will be used with curl's CURLOPT_USERNAME/PASSWORD option. */ std::optional usernameAuth; +#if NIX_WITH_CURL_S3 + /** + * Pre-resolved AWS session token for S3 requests. + * When provided along with usernameAuth, this will be used instead of fetching fresh credentials. + */ + std::optional preResolvedAwsSessionToken; +#endif FileTransferRequest(ValidURL uri) : uri(std::move(uri)) @@ -119,6 +131,13 @@ struct FileTransferRequest { return data ? "upload" : "download"; } + +#if NIX_WITH_CURL_S3 +private: + friend struct curlFileTransfer; + void setupForS3(); + std::optional awsSigV4Provider; +#endif }; struct FileTransferResult From 0f016f9bf55eba195e5a47490e370812f4b0d505 Mon Sep 17 00:00:00 2001 From: Seth Flynn Date: Thu, 9 Oct 2025 03:11:56 -0400 Subject: [PATCH 276/332] packaging: only override `toml11` when necessary v4.4.0 hit Nixpkgs in https://github.com/NixOS/nixpkgs/pull/442682. Ideally we'd just use that, but this keeps the fallback behavior until it's more widespread --- packaging/dependencies.nix | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/packaging/dependencies.nix b/packaging/dependencies.nix index 981c1aa48..7f815f128 100644 --- a/packaging/dependencies.nix +++ b/packaging/dependencies.nix @@ -57,15 +57,20 @@ scope: { prevAttrs.postInstall; }); - toml11 = pkgs.toml11.overrideAttrs rec { - version = "4.4.0"; - src = pkgs.fetchFromGitHub { - owner = "ToruNiina"; - repo = "toml11"; - tag = "v${version}"; - hash = "sha256-sgWKYxNT22nw376ttGsTdg0AMzOwp8QH3E8mx0BZJTQ="; - }; - }; + # TODO: Remove this when https://github.com/NixOS/nixpkgs/pull/442682 is included in a stable release + toml11 = + if lib.versionAtLeast pkgs.toml11.version "4.4.0" then + pkgs.toml11 + else + pkgs.toml11.overrideAttrs rec { + version = "4.4.0"; + src = pkgs.fetchFromGitHub { + owner = "ToruNiina"; + repo = "toml11"; + tag = "v${version}"; + hash = "sha256-sgWKYxNT22nw376ttGsTdg0AMzOwp8QH3E8mx0BZJTQ="; + }; + }; # TODO Hack until https://github.com/NixOS/nixpkgs/issues/45462 is fixed. boost = From 118acc84ba029a48e58f92cdfab6c3fda5e7f9a4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rg=20Thalheim?= Date: Thu, 9 Oct 2025 14:15:33 +0100 Subject: [PATCH 277/332] only build on push to master we have now merge queues for maintainance branches. We still build it for master to have our installer beeing updated. In future this part could go in new workflow instead. --- .github/workflows/ci.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index f23a6c6e1..6e08b5a9e 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -4,6 +4,8 @@ on: pull_request: merge_group: push: + branches: + - master workflow_dispatch: inputs: dogfood: From 0387b7d6db14a682dd8fd2bd2bd3aa5c04b4c06b Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Thu, 9 Oct 2025 20:40:40 +0200 Subject: [PATCH 278/332] Move openEvalCache to libflake Most of the eval cache logic is flake-independent and libexpr, but the loading part is not. `nix-flake` is the right component for this, as the eval cache isn't exactly specific to the command line. --- .../include/nix/cmd/installable-flake.hh | 2 - src/libcmd/installables.cc | 36 ---------------- src/libflake/flake.cc | 42 ++++++++++++++++++- src/libflake/include/nix/flake/flake.hh | 6 +++ 4 files changed, 46 insertions(+), 40 deletions(-) diff --git a/src/libcmd/include/nix/cmd/installable-flake.hh b/src/libcmd/include/nix/cmd/installable-flake.hh index 935ea8779..f3237c915 100644 --- a/src/libcmd/include/nix/cmd/installable-flake.hh +++ b/src/libcmd/include/nix/cmd/installable-flake.hh @@ -87,6 +87,4 @@ static inline FlakeRef defaultNixpkgsFlakeRef() return FlakeRef::fromAttrs(fetchSettings, {{"type", "indirect"}, {"id", "nixpkgs"}}); } -ref openEvalCache(EvalState & state, std::shared_ptr lockedFlake); - } // namespace nix diff --git a/src/libcmd/installables.cc b/src/libcmd/installables.cc index 91ad74308..7d6ec5199 100644 --- a/src/libcmd/installables.cc +++ b/src/libcmd/installables.cc @@ -443,42 +443,6 @@ static StorePath getDeriver(ref store, const Installable & i, const Store return *derivers.begin(); } -ref openEvalCache(EvalState & state, std::shared_ptr lockedFlake) -{ - auto fingerprint = evalSettings.useEvalCache && evalSettings.pureEval - ? lockedFlake->getFingerprint(state.store, state.fetchSettings) - : std::nullopt; - auto rootLoader = [&state, lockedFlake]() { - /* For testing whether the evaluation cache is - complete. */ - if (getEnv("NIX_ALLOW_EVAL").value_or("1") == "0") - throw Error("not everything is cached, but evaluation is not allowed"); - - auto vFlake = state.allocValue(); - flake::callFlake(state, *lockedFlake, *vFlake); - - state.forceAttrs(*vFlake, noPos, "while parsing cached flake data"); - - auto aOutputs = vFlake->attrs()->get(state.symbols.create("outputs")); - assert(aOutputs); - - return aOutputs->value; - }; - - if (fingerprint) { - auto search = state.evalCaches.find(fingerprint.value()); - if (search == state.evalCaches.end()) { - search = - state.evalCaches - .emplace(fingerprint.value(), make_ref(fingerprint, state, rootLoader)) - .first; - } - return search->second; - } else { - return make_ref(std::nullopt, state, rootLoader); - } -} - Installables SourceExprCommand::parseInstallables(ref store, std::vector ss) { Installables result; diff --git a/src/libflake/flake.cc b/src/libflake/flake.cc index 486118963..b9a2388c7 100644 --- a/src/libflake/flake.cc +++ b/src/libflake/flake.cc @@ -1,6 +1,9 @@ #include "nix/util/terminal.hh" +#include "nix/util/ref.hh" +#include "nix/util/environment-variables.hh" #include "nix/flake/flake.hh" #include "nix/expr/eval.hh" +#include "nix/expr/eval-cache.hh" #include "nix/expr/eval-settings.hh" #include "nix/flake/lockfile.hh" #include "nix/expr/primops.hh" @@ -924,8 +927,6 @@ void callFlake(EvalState & state, const LockedFlake & lockedFlake, Value & vRes) state.callFunction(*vCallFlake, args, vRes, noPos); } -} // namespace flake - std::optional LockedFlake::getFingerprint(ref store, const fetchers::Settings & fetchSettings) const { if (lockFile.isUnlocked(fetchSettings)) @@ -953,4 +954,41 @@ std::optional LockedFlake::getFingerprint(ref store, const f Flake::~Flake() {} +ref openEvalCache(EvalState & state, std::shared_ptr lockedFlake) +{ + auto fingerprint = state.settings.useEvalCache && state.settings.pureEval + ? lockedFlake->getFingerprint(state.store, state.fetchSettings) + : std::nullopt; + auto rootLoader = [&state, lockedFlake]() { + /* For testing whether the evaluation cache is + complete. */ + if (getEnv("NIX_ALLOW_EVAL").value_or("1") == "0") + throw Error("not everything is cached, but evaluation is not allowed"); + + auto vFlake = state.allocValue(); + callFlake(state, *lockedFlake, *vFlake); + + state.forceAttrs(*vFlake, noPos, "while parsing cached flake data"); + + auto aOutputs = vFlake->attrs()->get(state.symbols.create("outputs")); + assert(aOutputs); + + return aOutputs->value; + }; + + if (fingerprint) { + auto search = state.evalCaches.find(fingerprint.value()); + if (search == state.evalCaches.end()) { + search = state.evalCaches + .emplace(fingerprint.value(), make_ref(fingerprint, state, rootLoader)) + .first; + } + return search->second; + } else { + return make_ref(std::nullopt, state, rootLoader); + } +} + +} // namespace flake + } // namespace nix diff --git a/src/libflake/include/nix/flake/flake.hh b/src/libflake/include/nix/flake/flake.hh index 13002b47c..ba27bd09e 100644 --- a/src/libflake/include/nix/flake/flake.hh +++ b/src/libflake/include/nix/flake/flake.hh @@ -5,6 +5,7 @@ #include "nix/flake/flakeref.hh" #include "nix/flake/lockfile.hh" #include "nix/expr/value.hh" +#include "nix/expr/eval-cache.hh" namespace nix { @@ -218,6 +219,11 @@ lockFlake(const Settings & settings, EvalState & state, const FlakeRef & flakeRe void callFlake(EvalState & state, const LockedFlake & lockedFlake, Value & v); +/** + * Open an evaluation cache for a flake. + */ +ref openEvalCache(EvalState & state, std::shared_ptr lockedFlake); + } // namespace flake void emitTreeAttrs( From 42c9cbf9ca6edad0c4beabee137d3ce6384c42e2 Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Thu, 9 Oct 2025 20:53:48 +0200 Subject: [PATCH 279/332] Use ref where non-null --- src/libcmd/include/nix/cmd/installable-flake.hh | 2 +- src/libcmd/installable-flake.cc | 8 ++++---- src/libcmd/installables.cc | 3 +-- src/libflake/flake.cc | 2 +- src/libflake/include/nix/flake/flake.hh | 2 +- src/nix/flake.cc | 4 ++-- 6 files changed, 10 insertions(+), 11 deletions(-) diff --git a/src/libcmd/include/nix/cmd/installable-flake.hh b/src/libcmd/include/nix/cmd/installable-flake.hh index f3237c915..9f449ad48 100644 --- a/src/libcmd/include/nix/cmd/installable-flake.hh +++ b/src/libcmd/include/nix/cmd/installable-flake.hh @@ -69,7 +69,7 @@ struct InstallableFlake : InstallableValue */ std::vector> getCursors(EvalState & state) override; - std::shared_ptr getLockedFlake() const; + ref getLockedFlake() const; FlakeRef nixpkgsFlakeRef() const; }; diff --git a/src/libcmd/installable-flake.cc b/src/libcmd/installable-flake.cc index 5431100d3..65f48fa2b 100644 --- a/src/libcmd/installable-flake.cc +++ b/src/libcmd/installable-flake.cc @@ -185,16 +185,16 @@ std::vector> InstallableFlake::getCursors(EvalState return res; } -std::shared_ptr InstallableFlake::getLockedFlake() const +ref InstallableFlake::getLockedFlake() const { if (!_lockedFlake) { flake::LockFlags lockFlagsApplyConfig = lockFlags; // FIXME why this side effect? lockFlagsApplyConfig.applyNixConfig = true; - _lockedFlake = - std::make_shared(lockFlake(flakeSettings, *state, flakeRef, lockFlagsApplyConfig)); + _lockedFlake = make_ref(lockFlake(flakeSettings, *state, flakeRef, lockFlagsApplyConfig)); } - return _lockedFlake; + // _lockedFlake is now non-null but still just a shared_ptr + return ref(_lockedFlake); } FlakeRef InstallableFlake::nixpkgsFlakeRef() const diff --git a/src/libcmd/installables.cc b/src/libcmd/installables.cc index 7d6ec5199..f0f36378b 100644 --- a/src/libcmd/installables.cc +++ b/src/libcmd/installables.cc @@ -342,8 +342,7 @@ void completeFlakeRefWithFragment( parseFlakeRef(fetchSettings, expandTilde(flakeRefS), std::filesystem::current_path().string()); auto evalCache = openEvalCache( - *evalState, - std::make_shared(lockFlake(flakeSettings, *evalState, flakeRef, lockFlags))); + *evalState, make_ref(lockFlake(flakeSettings, *evalState, flakeRef, lockFlags))); auto root = evalCache->getRoot(); diff --git a/src/libflake/flake.cc b/src/libflake/flake.cc index b9a2388c7..26b3ef2a0 100644 --- a/src/libflake/flake.cc +++ b/src/libflake/flake.cc @@ -954,7 +954,7 @@ std::optional LockedFlake::getFingerprint(ref store, const f Flake::~Flake() {} -ref openEvalCache(EvalState & state, std::shared_ptr lockedFlake) +ref openEvalCache(EvalState & state, ref lockedFlake) { auto fingerprint = state.settings.useEvalCache && state.settings.pureEval ? lockedFlake->getFingerprint(state.store, state.fetchSettings) diff --git a/src/libflake/include/nix/flake/flake.hh b/src/libflake/include/nix/flake/flake.hh index ba27bd09e..b3168144c 100644 --- a/src/libflake/include/nix/flake/flake.hh +++ b/src/libflake/include/nix/flake/flake.hh @@ -222,7 +222,7 @@ void callFlake(EvalState & state, const LockedFlake & lockedFlake, Value & v); /** * Open an evaluation cache for a flake. */ -ref openEvalCache(EvalState & state, std::shared_ptr lockedFlake); +ref openEvalCache(EvalState & state, ref lockedFlake); } // namespace flake diff --git a/src/nix/flake.cc b/src/nix/flake.cc index 18be64bba..cf05f6943 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -1155,7 +1155,7 @@ struct CmdFlakeShow : FlakeCommand, MixJSON evalSettings.enableImportFromDerivation.setDefault(false); auto state = getEvalState(); - auto flake = std::make_shared(lockFlake()); + auto flake = make_ref(lockFlake()); auto localSystem = std::string(settings.thisSystem.get()); std::function & attrPath, const Symbol & attr)> @@ -1443,7 +1443,7 @@ struct CmdFlakeShow : FlakeCommand, MixJSON return j; }; - auto cache = openEvalCache(*state, flake); + auto cache = openEvalCache(*state, ref(flake)); auto j = visit(*cache->getRoot(), {}, fmt(ANSI_BOLD "%s" ANSI_NORMAL, flake->flake.lockedRef), ""); if (json) From abcceafbce41374b70ed090aeb0627ebdc26d3af Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Thu, 9 Oct 2025 21:23:26 +0200 Subject: [PATCH 280/332] Use const for lock in openEvalCache --- src/libflake/flake.cc | 2 +- src/libflake/include/nix/flake/flake.hh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/libflake/flake.cc b/src/libflake/flake.cc index 26b3ef2a0..147bff820 100644 --- a/src/libflake/flake.cc +++ b/src/libflake/flake.cc @@ -954,7 +954,7 @@ std::optional LockedFlake::getFingerprint(ref store, const f Flake::~Flake() {} -ref openEvalCache(EvalState & state, ref lockedFlake) +ref openEvalCache(EvalState & state, ref lockedFlake) { auto fingerprint = state.settings.useEvalCache && state.settings.pureEval ? lockedFlake->getFingerprint(state.store, state.fetchSettings) diff --git a/src/libflake/include/nix/flake/flake.hh b/src/libflake/include/nix/flake/flake.hh index b3168144c..79a50f0f7 100644 --- a/src/libflake/include/nix/flake/flake.hh +++ b/src/libflake/include/nix/flake/flake.hh @@ -222,7 +222,7 @@ void callFlake(EvalState & state, const LockedFlake & lockedFlake, Value & v); /** * Open an evaluation cache for a flake. */ -ref openEvalCache(EvalState & state, ref lockedFlake); +ref openEvalCache(EvalState & state, ref lockedFlake); } // namespace flake From c58acff42afd591762746538f0d2226ee63cbef0 Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Fri, 10 Oct 2025 00:11:25 +0300 Subject: [PATCH 281/332] libfetchers: Remove toRealPath in SourceHutInputScheme::getRevFromRef This code had several issues: 1. Not going through the SourceAccessor means that we can only work with physical paths. 2. It did not actually check that the file exists. (std::ifstream does not check it by default). --- src/libfetchers/github.cc | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/src/libfetchers/github.cc b/src/libfetchers/github.cc index 3b723d7d8..a905bb384 100644 --- a/src/libfetchers/github.cc +++ b/src/libfetchers/github.cc @@ -548,13 +548,10 @@ struct SourceHutInputScheme : GitArchiveInputScheme std::string refUri; if (ref == "HEAD") { - auto file = store->toRealPath( - downloadFile(store, *input.settings, fmt("%s/HEAD", base_url), "source", headers).storePath); - std::ifstream is(file); - std::string line; - getline(is, line); + auto downloadFileResult = downloadFile(store, *input.settings, fmt("%s/HEAD", base_url), "source", headers); + auto contents = nix::ref(store->getFSAccessor(downloadFileResult.storePath))->readFile(CanonPath::root); - auto remoteLine = git::parseLsRemoteLine(line); + auto remoteLine = git::parseLsRemoteLine(getLine(contents).first); if (!remoteLine) { throw BadURL("in '%d', couldn't resolve HEAD ref '%d'", input.to_string(), ref); } @@ -564,9 +561,10 @@ struct SourceHutInputScheme : GitArchiveInputScheme } std::regex refRegex(refUri); - auto file = store->toRealPath( - downloadFile(store, *input.settings, fmt("%s/info/refs", base_url), "source", headers).storePath); - std::ifstream is(file); + auto downloadFileResult = + downloadFile(store, *input.settings, fmt("%s/info/refs", base_url), "source", headers); + auto contents = nix::ref(store->getFSAccessor(downloadFileResult.storePath))->readFile(CanonPath::root); + std::istringstream is(contents); std::string line; std::optional id; From 0855b715a97a44cbcb23492c94ed91fcf7162c4d Mon Sep 17 00:00:00 2001 From: Bernardo Meurer Costa Date: Thu, 9 Oct 2025 02:42:14 +0000 Subject: [PATCH 282/332] feat(libstore): add curl-based S3 store implementation Add a new S3BinaryCacheStore implementation that inherits from HttpBinaryCacheStore. The implementation is activated with NIX_WITH_CURL_S3, keeping the existing NIX_WITH_S3_SUPPORT (AWS SDK) implementation unchanged. --- src/libstore-tests/s3-binary-cache-store.cc | 127 ++++++++++++++++++ .../nix/store/s3-binary-cache-store.hh | 75 +++++++++++ src/libstore/s3-binary-cache-store.cc | 46 +++++++ 3 files changed, 248 insertions(+) diff --git a/src/libstore-tests/s3-binary-cache-store.cc b/src/libstore-tests/s3-binary-cache-store.cc index 251e96172..8c58b8408 100644 --- a/src/libstore-tests/s3-binary-cache-store.cc +++ b/src/libstore-tests/s3-binary-cache-store.cc @@ -15,4 +15,131 @@ TEST(S3BinaryCacheStore, constructConfig) } // namespace nix +#elif NIX_WITH_CURL_S3 + +# include "nix/store/http-binary-cache-store.hh" +# include "nix/store/filetransfer.hh" +# include "nix/store/s3-url.hh" + +# include + +namespace nix { + +TEST(S3BinaryCacheStore, constructConfig) +{ + S3BinaryCacheStoreConfig config{"s3", "foobar", {}}; + + // The bucket name is stored as the host part of the authority in cacheUri + EXPECT_EQ( + config.cacheUri, + (ParsedURL{ + .scheme = "s3", + .authority = ParsedURL::Authority{.host = "foobar"}, + })); +} + +TEST(S3BinaryCacheStore, constructConfigWithRegion) +{ + Store::Config::Params params{{"region", "eu-west-1"}}; + S3BinaryCacheStoreConfig config{"s3", "my-bucket", params}; + + EXPECT_EQ( + config.cacheUri, + (ParsedURL{ + .scheme = "s3", + .authority = ParsedURL::Authority{.host = "my-bucket"}, + .query = (StringMap) {{"region", "eu-west-1"}}, + })); + EXPECT_EQ(config.region.get(), "eu-west-1"); +} + +TEST(S3BinaryCacheStore, defaultSettings) +{ + S3BinaryCacheStoreConfig config{"s3", "test-bucket", {}}; + + EXPECT_EQ( + config.cacheUri, + (ParsedURL{ + .scheme = "s3", + .authority = ParsedURL::Authority{.host = "test-bucket"}, + })); + + // Check default values + EXPECT_EQ(config.region.get(), "us-east-1"); + EXPECT_EQ(config.profile.get(), "default"); + EXPECT_EQ(config.scheme.get(), "https"); + EXPECT_EQ(config.endpoint.get(), ""); +} + +/** + * Test that S3BinaryCacheStore properly preserves S3-specific parameters + */ +TEST(S3BinaryCacheStore, s3StoreConfigPreservesParameters) +{ + StringMap params; + params["region"] = "eu-west-1"; + params["endpoint"] = "custom.s3.com"; + + S3BinaryCacheStoreConfig config("s3", "test-bucket", params); + + // The config should preserve S3-specific parameters + EXPECT_EQ( + config.cacheUri, + (ParsedURL{ + .scheme = "s3", + .authority = ParsedURL::Authority{.host = "test-bucket"}, + .query = (StringMap) {{"region", "eu-west-1"}, {"endpoint", "custom.s3.com"}}, + })); +} + +/** + * Test that S3 store scheme is properly registered + */ +TEST(S3BinaryCacheStore, s3SchemeRegistration) +{ + auto schemes = S3BinaryCacheStoreConfig::uriSchemes(); + EXPECT_TRUE(schemes.count("s3") > 0) << "S3 scheme should be supported"; + + // Verify HttpBinaryCacheStoreConfig doesn't directly list S3 + auto httpSchemes = HttpBinaryCacheStoreConfig::uriSchemes(); + EXPECT_FALSE(httpSchemes.count("s3") > 0) << "HTTP store shouldn't directly list S3 scheme"; +} + +/** + * Test that only S3-specific parameters are preserved in cacheUri, + * while non-S3 store parameters are not propagated to the URL + */ +TEST(S3BinaryCacheStore, parameterFiltering) +{ + StringMap params; + params["region"] = "eu-west-1"; + params["endpoint"] = "minio.local"; + params["want-mass-query"] = "true"; // Non-S3 store parameter + params["priority"] = "10"; // Non-S3 store parameter + + S3BinaryCacheStoreConfig config("s3", "test-bucket", params); + + // Only S3-specific params should be in cacheUri.query + EXPECT_EQ( + config.cacheUri, + (ParsedURL{ + .scheme = "s3", + .authority = ParsedURL::Authority{.host = "test-bucket"}, + .query = (StringMap) {{"region", "eu-west-1"}, {"endpoint", "minio.local"}}, + })); + + // But the non-S3 params should still be set on the config + EXPECT_EQ(config.wantMassQuery.get(), true); + EXPECT_EQ(config.priority.get(), 10); + + // And all params (S3 and non-S3) should be returned by getReference() + auto ref = config.getReference(); + EXPECT_EQ(ref.params["region"], "eu-west-1"); + EXPECT_EQ(ref.params["endpoint"], "minio.local"); + EXPECT_EQ(ref.params["want-mass-query"], "true"); + EXPECT_EQ(ref.params["priority"], "10"); +} + +} // namespace nix + #endif diff --git a/src/libstore/include/nix/store/s3-binary-cache-store.hh b/src/libstore/include/nix/store/s3-binary-cache-store.hh index 2fe66b0ad..0f8fff030 100644 --- a/src/libstore/include/nix/store/s3-binary-cache-store.hh +++ b/src/libstore/include/nix/store/s3-binary-cache-store.hh @@ -134,4 +134,79 @@ struct S3BinaryCacheStore : virtual BinaryCacheStore } // namespace nix +#elif NIX_WITH_CURL_S3 + +# include "nix/store/http-binary-cache-store.hh" + +namespace nix { + +struct S3BinaryCacheStoreConfig : HttpBinaryCacheStoreConfig +{ + using HttpBinaryCacheStoreConfig::HttpBinaryCacheStoreConfig; + + S3BinaryCacheStoreConfig(std::string_view uriScheme, std::string_view bucketName, const Params & params); + + const Setting profile{ + this, + "default", + "profile", + R"( + The name of the AWS configuration profile to use. By default + Nix uses the `default` profile. + )"}; + +public: + + const Setting region{ + this, + "us-east-1", + "region", + R"( + The region of the S3 bucket. If your bucket is not in + `us-east-1`, you should always explicitly specify the region + parameter. + )"}; + + const Setting scheme{ + this, + "https", + "scheme", + R"( + The scheme used for S3 requests, `https` (default) or `http`. This + option allows you to disable HTTPS for binary caches which don't + support it. + + > **Note** + > + > HTTPS should be used if the cache might contain sensitive + > information. + )"}; + + const Setting endpoint{ + this, + "", + "endpoint", + R"( + The S3 endpoint to use. When empty (default), uses AWS S3 with + region-specific endpoints (e.g., s3.us-east-1.amazonaws.com). + For S3-compatible services such as MinIO, set this to your service's endpoint. + + > **Note** + > + > Custom endpoints must support HTTPS and use path-based + > addressing instead of virtual host based addressing. + )"}; + + static const std::string name() + { + return "S3 Binary Cache Store"; + } + + static StringSet uriSchemes(); + + static std::string doc(); +}; + +} // namespace nix + #endif diff --git a/src/libstore/s3-binary-cache-store.cc b/src/libstore/s3-binary-cache-store.cc index b70f04be7..ab0847bb1 100644 --- a/src/libstore/s3-binary-cache-store.cc +++ b/src/libstore/s3-binary-cache-store.cc @@ -589,4 +589,50 @@ static RegisterStoreImplementation regS3BinaryCa } // namespace nix +#elif NIX_WITH_CURL_S3 + +# include + +# include "nix/store/s3-binary-cache-store.hh" +# include "nix/store/http-binary-cache-store.hh" +# include "nix/store/store-registration.hh" + +namespace nix { + +StringSet S3BinaryCacheStoreConfig::uriSchemes() +{ + return {"s3"}; +} + +S3BinaryCacheStoreConfig::S3BinaryCacheStoreConfig( + std::string_view scheme, std::string_view _cacheUri, const Params & params) + : StoreConfig(params) + , HttpBinaryCacheStoreConfig(scheme, _cacheUri, params) +{ + // For S3 stores, preserve S3-specific query parameters as part of the URL + // These are needed for region specification and other S3-specific settings + assert(cacheUri.query.empty()); + + // Only copy S3-specific parameters to the URL query + static const std::set s3Params = {"region", "endpoint", "profile", "scheme"}; + for (const auto & [key, value] : params) { + if (s3Params.contains(key)) { + cacheUri.query[key] = value; + } + } +} + +std::string S3BinaryCacheStoreConfig::doc() +{ + return R"( + **Store URL format**: `s3://bucket-name` + + This store allows reading and writing a binary cache stored in an AWS S3 bucket. + )"; +} + +static RegisterStoreImplementation registerS3BinaryCacheStore; + +} // namespace nix + #endif From b56dd21c311b1ad1e19bfb1180a0b5f94834b85d Mon Sep 17 00:00:00 2001 From: John Ericson Date: Fri, 10 Oct 2025 17:18:40 -0400 Subject: [PATCH 283/332] `Settings::ExternalBuilder::systems` make set Nothing cares about the order, actually. --- src/libstore/include/nix/store/globals.hh | 2 +- src/libstore/unix/build/external-derivation-builder.cc | 7 +++---- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/src/libstore/include/nix/store/globals.hh b/src/libstore/include/nix/store/globals.hh index 1b59bd6fc..be3561848 100644 --- a/src/libstore/include/nix/store/globals.hh +++ b/src/libstore/include/nix/store/globals.hh @@ -1375,7 +1375,7 @@ public: struct ExternalBuilder { - std::vector systems; + StringSet systems; Path program; std::vector args; }; diff --git a/src/libstore/unix/build/external-derivation-builder.cc b/src/libstore/unix/build/external-derivation-builder.cc index 71cfd1a62..f20badb85 100644 --- a/src/libstore/unix/build/external-derivation-builder.cc +++ b/src/libstore/unix/build/external-derivation-builder.cc @@ -19,10 +19,9 @@ struct ExternalDerivationBuilder : DerivationBuilderImpl LocalStore & store, std::unique_ptr & miscMethods, DerivationBuilderParams & params) { for (auto & handler : settings.externalBuilders.get()) { - for (auto & system : handler.systems) - if (params.drv.platform == system) - return std::make_unique( - store, std::move(miscMethods), std::move(params), handler); + if (handler.systems.contains(params.drv.platform)) + return std::make_unique( + store, std::move(miscMethods), std::move(params), handler); } return {}; } From f30cb8667bab3856f083dde308ec35df7c4adbc3 Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Fri, 10 Oct 2025 23:57:36 +0300 Subject: [PATCH 284/332] libstore: Fix double-quoting of paths in logs std::filesystem::path is already quoted by boost::format with double quotes ("). --- src/libstore/local-store.cc | 6 +++--- src/libstore/optimise-store.cc | 12 ++++++------ 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc index ebc987ee0..cbd3fa6d8 100644 --- a/src/libstore/local-store.cc +++ b/src/libstore/local-store.cc @@ -1383,7 +1383,7 @@ bool LocalStore::verifyStore(bool checkContents, RepairFlag repair) for (auto & link : DirectoryIterator{linksDir}) { checkInterrupt(); auto name = link.path().filename(); - printMsg(lvlTalkative, "checking contents of '%s'", name); + printMsg(lvlTalkative, "checking contents of %s", name); PosixSourceAccessor accessor; std::string hash = hashPath( PosixSourceAccessor::createAtRoot(link.path()), @@ -1391,10 +1391,10 @@ bool LocalStore::verifyStore(bool checkContents, RepairFlag repair) HashAlgorithm::SHA256) .first.to_string(HashFormat::Nix32, false); if (hash != name.string()) { - printError("link '%s' was modified! expected hash '%s', got '%s'", link.path(), name, hash); + printError("link %s was modified! expected hash %s, got '%s'", link.path(), name, hash); if (repair) { std::filesystem::remove(link.path()); - printInfo("removed link '%s'", link.path()); + printInfo("removed link %s", link.path()); } else { errors = true; } diff --git a/src/libstore/optimise-store.cc b/src/libstore/optimise-store.cc index 1cf28e022..8f2878136 100644 --- a/src/libstore/optimise-store.cc +++ b/src/libstore/optimise-store.cc @@ -202,7 +202,7 @@ void LocalStore::optimisePath_( full. When that happens, it's fine to ignore it: we just effectively disable deduplication of this file. */ - printInfo("cannot link '%s' to '%s': %s", linkPath, path, strerror(errno)); + printInfo("cannot link %s to '%s': %s", linkPath, path, strerror(errno)); return; } @@ -216,11 +216,11 @@ void LocalStore::optimisePath_( auto stLink = lstat(linkPath.string()); if (st.st_ino == stLink.st_ino) { - debug("'%1%' is already linked to '%2%'", path, linkPath); + debug("'%1%' is already linked to %2%", path, linkPath); return; } - printMsg(lvlTalkative, "linking '%1%' to '%2%'", path, linkPath); + printMsg(lvlTalkative, "linking '%1%' to %2%", path, linkPath); /* Make the containing directory writable, but only if it's not the store itself (we don't want or need to mess with its @@ -245,7 +245,7 @@ void LocalStore::optimisePath_( systems). This is likely to happen with empty files. Just shrug and ignore. */ if (st.st_size) - printInfo("'%1%' has maximum number of links", linkPath); + printInfo("%1% has maximum number of links", linkPath); return; } throw; @@ -256,13 +256,13 @@ void LocalStore::optimisePath_( std::filesystem::rename(tempLink, path); } catch (std::filesystem::filesystem_error & e) { std::filesystem::remove(tempLink); - printError("unable to unlink '%1%'", tempLink); + printError("unable to unlink %1%", tempLink); if (e.code() == std::errc::too_many_links) { /* Some filesystems generate too many links on the rename, rather than on the original link. (Probably it temporarily increases the st_nlink field before decreasing it again.) */ - debug("'%s' has reached maximum number of links", linkPath); + debug("%s has reached maximum number of links", linkPath); return; } throw; From 2ff59ec3e0fc093dcd0064bc5df21c5d62ea2445 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Fri, 10 Oct 2025 17:27:41 -0400 Subject: [PATCH 285/332] Use `std::ranges::find_if` for finding external builders Co-authored-by: Sergei Zimmerman --- src/libstore/unix/build/external-derivation-builder.cc | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/libstore/unix/build/external-derivation-builder.cc b/src/libstore/unix/build/external-derivation-builder.cc index f20badb85..ebcaad525 100644 --- a/src/libstore/unix/build/external-derivation-builder.cc +++ b/src/libstore/unix/build/external-derivation-builder.cc @@ -18,10 +18,12 @@ struct ExternalDerivationBuilder : DerivationBuilderImpl static std::unique_ptr newIfSupported( LocalStore & store, std::unique_ptr & miscMethods, DerivationBuilderParams & params) { - for (auto & handler : settings.externalBuilders.get()) { - if (handler.systems.contains(params.drv.platform)) - return std::make_unique( - store, std::move(miscMethods), std::move(params), handler); + if (auto it = std::ranges::find_if( + settings.externalBuilders.get(), + [&](const auto & handler) { return handler.systems.contains(params.drv.platform); }); + it != settings.externalBuilders.get().end()) { + return std::make_unique( + store, std::move(miscMethods), std::move(params), *it); } return {}; } From b57caaa1a273323b596097ab5509797b38e2e272 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Sat, 16 Aug 2025 14:25:28 -0400 Subject: [PATCH 286/332] Consolidate logic choosing where we can/should build a bit I want to separate "policy" from "mechanism". Now the logic to decide how to build (a policy choice, though with some hard constraints) is all in derivation building goal, and all in the same spot. build hook, external builder, or local builder --- the choice between all three is made in the same spot --- pure policy. Now, if you want to use the external deriation builder, you simply provide the `ExternalBuilder` you wish to use, and there is no additional checking --- pure mechanism. It is the responsibility of the caller to choose an external builder that works for the derivation in question. Also, `checkSystem()` was the only thing throwing `BuildError` from `startBuilder`. Now that that is gone, we can now remove the `try...catch` around that. --- src/libstore/build/derivation-builder.cc | 27 ++++++ .../build/derivation-building-goal.cc | 94 +++++++++++++------ src/libstore/globals.cc | 11 ++- .../nix/store/build/derivation-builder.hh | 22 +++++ src/libstore/include/nix/store/globals.hh | 13 ++- src/libstore/meson.build | 1 + src/libstore/unix/build/derivation-builder.cc | 38 -------- .../unix/build/external-derivation-builder.cc | 28 +++--- 8 files changed, 140 insertions(+), 94 deletions(-) create mode 100644 src/libstore/build/derivation-builder.cc diff --git a/src/libstore/build/derivation-builder.cc b/src/libstore/build/derivation-builder.cc new file mode 100644 index 000000000..39ac40175 --- /dev/null +++ b/src/libstore/build/derivation-builder.cc @@ -0,0 +1,27 @@ +#include "nix/util/json-utils.hh" +#include "nix/store/build/derivation-builder.hh" + +namespace nlohmann { + +using namespace nix; + +ExternalBuilder adl_serializer::from_json(const json & json) +{ + auto obj = getObject(json); + return { + .systems = valueAt(obj, "systems"), + .program = valueAt(obj, "program"), + .args = valueAt(obj, "args"), + }; +} + +void adl_serializer::to_json(json & json, const ExternalBuilder & eb) +{ + json = { + {"systems", eb.systems}, + {"program", eb.program}, + {"args", eb.args}, + }; +} + +} // namespace nlohmann diff --git a/src/libstore/build/derivation-building-goal.cc b/src/libstore/build/derivation-building-goal.cc index 001816ca0..e8ee945d9 100644 --- a/src/libstore/build/derivation-building-goal.cc +++ b/src/libstore/build/derivation-building-goal.cc @@ -491,6 +491,8 @@ Goal::Co DerivationBuildingGoal::tryToBuild() bool useHook; + const ExternalBuilder * externalBuilder = nullptr; + while (true) { trace("trying to build"); @@ -584,7 +586,42 @@ Goal::Co DerivationBuildingGoal::tryToBuild() co_await waitForAWhile(); continue; case rpDecline: - /* We should do it ourselves. */ + /* We should do it ourselves. + + Now that we've decided we can't / won't do a remote build, check + that we can in fact build locally. First see if there is an + external builder for a "semi-local build". If there is, prefer to + use that. If there is not, then check if we can do a "true" local + build. */ + + externalBuilder = settings.findExternalDerivationBuilderIfSupported(*drv); + + if (!externalBuilder && !drvOptions->canBuildLocally(worker.store, *drv)) { + auto msg = + fmt("Cannot build '%s'.\n" + "Reason: " ANSI_RED "required system or feature not available" ANSI_NORMAL + "\n" + "Required system: '%s' with features {%s}\n" + "Current system: '%s' with features {%s}", + Magenta(worker.store.printStorePath(drvPath)), + Magenta(drv->platform), + concatStringsSep(", ", drvOptions->getRequiredSystemFeatures(*drv)), + Magenta(settings.thisSystem), + concatStringsSep(", ", worker.store.Store::config.systemFeatures)); + + // since aarch64-darwin has Rosetta 2, this user can actually run x86_64-darwin on their hardware - + // we should tell them to run the command to install Darwin 2 + if (drv->platform == "x86_64-darwin" && settings.thisSystem == "aarch64-darwin") + msg += fmt( + "\nNote: run `%s` to run programs for x86_64-darwin", + Magenta( + "/usr/sbin/softwareupdate --install-rosetta && launchctl stop org.nixos.nix-daemon")); + + builder.reset(); + outputLocks.unlock(); + worker.permanentFailure = true; + co_return doneFailure({BuildResult::Failure::InputRejected, std::move(msg)}); + } useHook = false; break; } @@ -771,36 +808,35 @@ Goal::Co DerivationBuildingGoal::tryToBuild() co_return doneFailure(std::move(e)); } + DerivationBuilderParams params{ + .drvPath = drvPath, + .buildResult = buildResult, + .drv = *drv, + .drvOptions = *drvOptions, + .inputPaths = inputPaths, + .initialOutputs = initialOutputs, + .buildMode = buildMode, + .defaultPathsInChroot = std::move(defaultPathsInChroot), + .systemFeatures = worker.store.config.systemFeatures.get(), + .desugaredEnv = std::move(desugaredEnv), + }; + /* If we have to wait and retry (see below), then `builder` will already be created, so we don't need to create it again. */ - builder = makeDerivationBuilder( - *localStoreP, - std::make_unique(*this, builder), - DerivationBuilderParams{ - .drvPath = drvPath, - .buildResult = buildResult, - .drv = *drv, - .drvOptions = *drvOptions, - .inputPaths = inputPaths, - .initialOutputs = initialOutputs, - .buildMode = buildMode, - .defaultPathsInChroot = std::move(defaultPathsInChroot), - .systemFeatures = worker.store.config.systemFeatures.get(), - .desugaredEnv = std::move(desugaredEnv), - }); + builder = externalBuilder ? makeExternalDerivationBuilder( + *localStoreP, + std::make_unique(*this, builder), + std::move(params), + *externalBuilder) + : makeDerivationBuilder( + *localStoreP, + std::make_unique(*this, builder), + std::move(params)); } - std::optional builderOutOpt; - try { - /* Okay, we have to build. */ - builderOutOpt = builder->startBuild(); - } catch (BuildError & e) { - builder.reset(); - outputLocks.unlock(); - worker.permanentFailure = true; - co_return doneFailure(std::move(e)); // InputRejected - } - if (!builderOutOpt) { + if (auto builderOutOpt = builder->startBuild()) { + builderOut = *std::move(builderOutOpt); + } else { if (!actLock) actLock = std::make_unique( *logger, @@ -809,9 +845,7 @@ Goal::Co DerivationBuildingGoal::tryToBuild() fmt("waiting for a free build user ID for '%s'", Magenta(worker.store.printStorePath(drvPath)))); co_await waitForAWhile(); continue; - } else { - builderOut = *std::move(builderOutOpt); - }; + } break; } diff --git a/src/libstore/globals.cc b/src/libstore/globals.cc index 58a649fc5..4fdb820a9 100644 --- a/src/libstore/globals.cc +++ b/src/libstore/globals.cc @@ -258,6 +258,15 @@ Path Settings::getDefaultSSLCertFile() return ""; } +const ExternalBuilder * Settings::findExternalDerivationBuilderIfSupported(const Derivation & drv) +{ + if (auto it = std::ranges::find_if( + externalBuilders.get(), [&](const auto & handler) { return handler.systems.contains(drv.platform); }); + it != externalBuilders.get().end()) + return &*it; + return nullptr; +} + std::string nixVersion = PACKAGE_VERSION; NLOHMANN_JSON_SERIALIZE_ENUM( @@ -379,8 +388,6 @@ unsigned int MaxBuildJobsSetting::parse(const std::string & str) const } } -NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE(Settings::ExternalBuilder, systems, program, args); - template<> Settings::ExternalBuilders BaseSetting::parse(const std::string & str) const { diff --git a/src/libstore/include/nix/store/build/derivation-builder.hh b/src/libstore/include/nix/store/build/derivation-builder.hh index 63ef2b665..5fad26e83 100644 --- a/src/libstore/include/nix/store/build/derivation-builder.hh +++ b/src/libstore/include/nix/store/build/derivation-builder.hh @@ -1,12 +1,15 @@ #pragma once ///@file +#include + #include "nix/store/build-result.hh" #include "nix/store/derivation-options.hh" #include "nix/store/build/derivation-building-misc.hh" #include "nix/store/derivations.hh" #include "nix/store/parsed-derivations.hh" #include "nix/util/processes.hh" +#include "nix/util/json-impls.hh" #include "nix/store/restricted-store.hh" #include "nix/store/build/derivation-env-desugar.hh" @@ -179,9 +182,28 @@ struct DerivationBuilder : RestrictionContext virtual bool killChild() = 0; }; +struct ExternalBuilder +{ + StringSet systems; + Path program; + std::vector args; +}; + #ifndef _WIN32 // TODO enable `DerivationBuilder` on Windows std::unique_ptr makeDerivationBuilder( LocalStore & store, std::unique_ptr miscMethods, DerivationBuilderParams params); + +/** + * @param handler Must be chosen such that it supports the given + * derivation. + */ +std::unique_ptr makeExternalDerivationBuilder( + LocalStore & store, + std::unique_ptr miscMethods, + DerivationBuilderParams params, + const ExternalBuilder & handler); #endif } // namespace nix + +JSON_IMPL(nix::ExternalBuilder) diff --git a/src/libstore/include/nix/store/globals.hh b/src/libstore/include/nix/store/globals.hh index be3561848..14647c05f 100644 --- a/src/libstore/include/nix/store/globals.hh +++ b/src/libstore/include/nix/store/globals.hh @@ -1373,13 +1373,6 @@ public: Set it to 1 to warn on all paths. )"}; - struct ExternalBuilder - { - StringSet systems; - Path program; - std::vector args; - }; - using ExternalBuilders = std::vector; Setting externalBuilders{ @@ -1443,6 +1436,12 @@ public: // Current system: 'aarch64-darwin' with features {apple-virt, benchmark, big-parallel, nixos-test} // Xp::ExternalBuilders }; + + /** + * Finds the first external derivation builder that supports this + * derivation, or else returns a null pointer. + */ + const ExternalBuilder * findExternalDerivationBuilderIfSupported(const Derivation & drv); }; // FIXME: don't use a global variable. diff --git a/src/libstore/meson.build b/src/libstore/meson.build index a3502c2e0..728de2dfd 100644 --- a/src/libstore/meson.build +++ b/src/libstore/meson.build @@ -298,6 +298,7 @@ sources = files( 'aws-creds.cc', 'binary-cache-store.cc', 'build-result.cc', + 'build/derivation-builder.cc', 'build/derivation-building-goal.cc', 'build/derivation-check.cc', 'build/derivation-env-desugar.cc', diff --git a/src/libstore/unix/build/derivation-builder.cc b/src/libstore/unix/build/derivation-builder.cc index 5bdd843bd..0158505a5 100644 --- a/src/libstore/unix/build/derivation-builder.cc +++ b/src/libstore/unix/build/derivation-builder.cc @@ -229,12 +229,6 @@ protected: return acquireUserLock(1, false); } - /** - * Throw an exception if we can't do this derivation because of - * missing system features. - */ - virtual void checkSystem(); - /** * Return the paths that should be made available in the sandbox. * This includes: @@ -672,33 +666,6 @@ static bool checkNotWorldWritable(std::filesystem::path path) return true; } -void DerivationBuilderImpl::checkSystem() -{ - /* Right platform? */ - if (!drvOptions.canBuildLocally(store, drv)) { - auto msg = - fmt("Cannot build '%s'.\n" - "Reason: " ANSI_RED "required system or feature not available" ANSI_NORMAL - "\n" - "Required system: '%s' with features {%s}\n" - "Current system: '%s' with features {%s}", - Magenta(store.printStorePath(drvPath)), - Magenta(drv.platform), - concatStringsSep(", ", drvOptions.getRequiredSystemFeatures(drv)), - Magenta(settings.thisSystem), - concatStringsSep(", ", store.Store::config.systemFeatures)); - - // since aarch64-darwin has Rosetta 2, this user can actually run x86_64-darwin on their hardware - we should - // tell them to run the command to install Darwin 2 - if (drv.platform == "x86_64-darwin" && settings.thisSystem == "aarch64-darwin") - msg += - fmt("\nNote: run `%s` to run programs for x86_64-darwin", - Magenta("/usr/sbin/softwareupdate --install-rosetta && launchctl stop org.nixos.nix-daemon")); - - throw BuildError(BuildResult::Failure::InputRejected, msg); - } -} - std::optional DerivationBuilderImpl::startBuild() { if (useBuildUsers()) { @@ -709,8 +676,6 @@ std::optional DerivationBuilderImpl::startBuild() return std::nullopt; } - checkSystem(); - /* Make sure that no other processes are executing under the sandbox uids. This must be done before any chownToBuilder() calls. */ @@ -1922,9 +1887,6 @@ namespace nix { std::unique_ptr makeDerivationBuilder( LocalStore & store, std::unique_ptr miscMethods, DerivationBuilderParams params) { - if (auto builder = ExternalDerivationBuilder::newIfSupported(store, miscMethods, params)) - return builder; - bool useSandbox = false; /* Are we doing a sandboxed build? */ diff --git a/src/libstore/unix/build/external-derivation-builder.cc b/src/libstore/unix/build/external-derivation-builder.cc index ebcaad525..7ddb6e093 100644 --- a/src/libstore/unix/build/external-derivation-builder.cc +++ b/src/libstore/unix/build/external-derivation-builder.cc @@ -2,32 +2,19 @@ namespace nix { struct ExternalDerivationBuilder : DerivationBuilderImpl { - Settings::ExternalBuilder externalBuilder; + ExternalBuilder externalBuilder; ExternalDerivationBuilder( LocalStore & store, std::unique_ptr miscMethods, DerivationBuilderParams params, - Settings::ExternalBuilder externalBuilder) + ExternalBuilder externalBuilder) : DerivationBuilderImpl(store, std::move(miscMethods), std::move(params)) , externalBuilder(std::move(externalBuilder)) { experimentalFeatureSettings.require(Xp::ExternalBuilders); } - static std::unique_ptr newIfSupported( - LocalStore & store, std::unique_ptr & miscMethods, DerivationBuilderParams & params) - { - if (auto it = std::ranges::find_if( - settings.externalBuilders.get(), - [&](const auto & handler) { return handler.systems.contains(params.drv.platform); }); - it != settings.externalBuilders.get().end()) { - return std::make_unique( - store, std::move(miscMethods), std::move(params), *it); - } - return {}; - } - Path tmpDirInSandbox() override { /* In a sandbox, for determinism, always use the same temporary @@ -41,8 +28,6 @@ struct ExternalDerivationBuilder : DerivationBuilderImpl createDir(tmpDir, 0700); } - void checkSystem() override {} - void startChild() override { if (drvOptions.getRequiredSystemFeatures(drv).count("recursive-nix")) @@ -121,4 +106,13 @@ struct ExternalDerivationBuilder : DerivationBuilderImpl } }; +std::unique_ptr makeExternalDerivationBuilder( + LocalStore & store, + std::unique_ptr miscMethods, + DerivationBuilderParams params, + const ExternalBuilder & handler) +{ + return std::make_unique(store, std::move(miscMethods), std::move(params), handler); +} + } // namespace nix From f02218873e846d93e079b96de3a2ba1bb369c12a Mon Sep 17 00:00:00 2001 From: Bernardo Meurer Costa Date: Fri, 10 Oct 2025 19:39:09 +0000 Subject: [PATCH 287/332] fix(libstore): improve http-binary-cache-store S3 compatibility This commit adds two key fixes to http-binary-cache-store.cc to properly support the new curl-based S3 implementation: 1. **Consistent cache key handling**: Use `getReference().render(withParams=false)` for disk cache keys instead of `cacheUri.to_string()`. This ensures cache keys are consistent with the S3 implementation and don't include query parameters, which matches the behavior expected by Store::queryPathInfo() lookups. 2. **S3 query parameter preservation**: When generating file transfer requests for S3 URLs, preserve query parameters from the base URL (region, endpoint, etc.) when the relative path doesn't have its own query parameters. This ensures S3-specific configuration is propagated to all requests. --- src/libstore/http-binary-cache-store.cc | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/src/libstore/http-binary-cache-store.cc b/src/libstore/http-binary-cache-store.cc index 5d4fba163..8d5f427af 100644 --- a/src/libstore/http-binary-cache-store.cc +++ b/src/libstore/http-binary-cache-store.cc @@ -78,7 +78,11 @@ public: void init() override { // FIXME: do this lazily? - if (auto cacheInfo = diskCache->upToDateCacheExists(config->cacheUri.to_string())) { + // For consistent cache key handling, use the reference without parameters + // This matches what's used in Store::queryPathInfo() lookups + auto cacheKey = config->getReference().render(/*withParams=*/false); + + if (auto cacheInfo = diskCache->upToDateCacheExists(cacheKey)) { config->wantMassQuery.setDefault(cacheInfo->wantMassQuery); config->priority.setDefault(cacheInfo->priority); } else { @@ -87,8 +91,7 @@ public: } catch (UploadToHTTP &) { throw Error("'%s' does not appear to be a binary cache", config->cacheUri.to_string()); } - diskCache->createCache( - config->cacheUri.to_string(), config->storeDir, config->wantMassQuery, config->priority); + diskCache->createCache(cacheKey, config->storeDir, config->wantMassQuery, config->priority); } } @@ -184,7 +187,16 @@ protected: field which is `nar/15f99rdaf26k39knmzry4xd0d97wp6yfpnfk1z9avakis7ipb9yg.nar?hash=zphkqn2wg8mnvbkixnl2aadkbn0rcnfj` (note the query param) and that gets passed here. */ - return FileTransferRequest(parseURLRelative(path, cacheUriWithTrailingSlash)); + auto result = parseURLRelative(path, cacheUriWithTrailingSlash); + + /* For S3 URLs, preserve query parameters from the base URL when the + relative path doesn't have its own query parameters. This is needed + to preserve S3-specific parameters like endpoint and region. */ + if (config->cacheUri.scheme == "s3" && result.query.empty()) { + result.query = config->cacheUri.query; + } + + return FileTransferRequest(result); } void getFile(const std::string & path, Sink & sink) override From 46382ade74bdd811ddeab7da33d57effaa76852a Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Sat, 11 Oct 2025 01:30:21 +0300 Subject: [PATCH 288/332] libutil: Print stack trace on assertion failure This change overrides __assert_fail on glibc/musl to instead call std::terminate that we have a custom handler for. This ensures that we have more context to diagnose issues encountered by users in the wild. --- .../common/assert-fail/meson.build | 32 +++++++++++++++++++ .../common/assert-fail/wrap-assert-fail.cc | 17 ++++++++++ nix-meson-build-support/common/meson.build | 2 ++ 3 files changed, 51 insertions(+) create mode 100644 nix-meson-build-support/common/assert-fail/meson.build create mode 100644 nix-meson-build-support/common/assert-fail/wrap-assert-fail.cc diff --git a/nix-meson-build-support/common/assert-fail/meson.build b/nix-meson-build-support/common/assert-fail/meson.build new file mode 100644 index 000000000..7539b3921 --- /dev/null +++ b/nix-meson-build-support/common/assert-fail/meson.build @@ -0,0 +1,32 @@ +can_wrap_assert_fail_test_code = ''' +#include +#include + +int main() +{ + assert(0); +} + +extern "C" void * __real___assert_fail(const char *, const char *, unsigned int, const char *); + +extern "C" void * +__wrap___assert_fail(const char *, const char *, unsigned int, const char *) +{ + return __real___assert_fail(nullptr, nullptr, 0, nullptr); +} +''' + +wrap_assert_fail_args = [ '-Wl,--wrap=__assert_fail' ] + +can_wrap_assert_fail = cxx.links( + can_wrap_assert_fail_test_code, + args : wrap_assert_fail_args, + name : 'linker can wrap __assert_fail', +) + +if can_wrap_assert_fail + deps_other += declare_dependency( + sources : 'wrap-assert-fail.cc', + link_args : wrap_assert_fail_args, + ) +endif diff --git a/nix-meson-build-support/common/assert-fail/wrap-assert-fail.cc b/nix-meson-build-support/common/assert-fail/wrap-assert-fail.cc new file mode 100644 index 000000000..d9e34168b --- /dev/null +++ b/nix-meson-build-support/common/assert-fail/wrap-assert-fail.cc @@ -0,0 +1,17 @@ +#include "nix/util/error.hh" + +#include +#include +#include +#include + +extern "C" [[noreturn]] void __attribute__((weak)) +__wrap___assert_fail(const char * assertion, const char * file, unsigned int line, const char * function) +{ + char buf[512]; + int n = + snprintf(buf, sizeof(buf), "Assertion '%s' failed in %s at %s:%" PRIuLEAST32, assertion, function, file, line); + if (n < 0) + nix::panic("Assertion failed and could not format error message"); + nix::panic(std::string_view(buf, std::min(static_cast(sizeof(buf)), n))); +} diff --git a/nix-meson-build-support/common/meson.build b/nix-meson-build-support/common/meson.build index 8c4e98862..2944a733b 100644 --- a/nix-meson-build-support/common/meson.build +++ b/nix-meson-build-support/common/meson.build @@ -44,3 +44,5 @@ endif # Darwin ld doesn't like "X.Y.Zpre" nix_soversion = meson.project_version().split('pre')[0] + +subdir('assert-fail') From d26a337c09baf6d5d0c8310efd534f6c806afe20 Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Sat, 11 Oct 2025 16:08:35 +0300 Subject: [PATCH 289/332] meson: Move asan-options to common This way we don't have to duplicate the subdir everywhere. Less copy-pasta is good. --- nix-meson-build-support/{ => common}/asan-options/meson.build | 0 nix-meson-build-support/common/meson.build | 1 + src/libcmd/meson.build | 1 - src/libexpr-c/meson.build | 1 - src/libexpr-test-support/meson.build | 1 - src/libexpr-tests/meson.build | 1 - src/libexpr/meson.build | 1 - src/libfetchers-c/meson.build | 1 - src/libfetchers-tests/meson.build | 1 - src/libfetchers/meson.build | 1 - src/libflake-c/meson.build | 1 - src/libflake-tests/meson.build | 1 - src/libflake/meson.build | 1 - src/libmain-c/meson.build | 1 - src/libmain/meson.build | 1 - src/libstore-c/meson.build | 1 - src/libstore-test-support/meson.build | 1 - src/libstore-tests/meson.build | 1 - src/libstore/meson.build | 1 - src/libutil-c/meson.build | 1 - src/libutil-test-support/meson.build | 1 - src/libutil-tests/meson.build | 1 - src/libutil/meson.build | 1 - src/nix/meson.build | 1 - tests/functional/test-libstoreconsumer/meson.build | 2 +- 25 files changed, 2 insertions(+), 23 deletions(-) rename nix-meson-build-support/{ => common}/asan-options/meson.build (100%) diff --git a/nix-meson-build-support/asan-options/meson.build b/nix-meson-build-support/common/asan-options/meson.build similarity index 100% rename from nix-meson-build-support/asan-options/meson.build rename to nix-meson-build-support/common/asan-options/meson.build diff --git a/nix-meson-build-support/common/meson.build b/nix-meson-build-support/common/meson.build index 2944a733b..99bfbd486 100644 --- a/nix-meson-build-support/common/meson.build +++ b/nix-meson-build-support/common/meson.build @@ -46,3 +46,4 @@ endif nix_soversion = meson.project_version().split('pre')[0] subdir('assert-fail') +subdir('asan-options') diff --git a/src/libcmd/meson.build b/src/libcmd/meson.build index 3833d7e0a..f553afa0b 100644 --- a/src/libcmd/meson.build +++ b/src/libcmd/meson.build @@ -67,7 +67,6 @@ config_priv_h = configure_file( ) subdir('nix-meson-build-support/common') -subdir('nix-meson-build-support/asan-options') sources = files( 'built-path.cc', diff --git a/src/libexpr-c/meson.build b/src/libexpr-c/meson.build index 03cee41a0..c47704ce4 100644 --- a/src/libexpr-c/meson.build +++ b/src/libexpr-c/meson.build @@ -28,7 +28,6 @@ deps_public_maybe_subproject = [ subdir('nix-meson-build-support/subprojects') subdir('nix-meson-build-support/common') -subdir('nix-meson-build-support/asan-options') sources = files( 'nix_api_expr.cc', diff --git a/src/libexpr-test-support/meson.build b/src/libexpr-test-support/meson.build index 01a3f3bcb..df28661b7 100644 --- a/src/libexpr-test-support/meson.build +++ b/src/libexpr-test-support/meson.build @@ -31,7 +31,6 @@ rapidcheck = dependency('rapidcheck') deps_public += rapidcheck subdir('nix-meson-build-support/common') -subdir('nix-meson-build-support/asan-options') sources = files( 'tests/value/context.cc', diff --git a/src/libexpr-tests/meson.build b/src/libexpr-tests/meson.build index 7f7c08955..d1700b11d 100644 --- a/src/libexpr-tests/meson.build +++ b/src/libexpr-tests/meson.build @@ -45,7 +45,6 @@ config_priv_h = configure_file( ) subdir('nix-meson-build-support/common') -subdir('nix-meson-build-support/asan-options') sources = files( 'derived-path.cc', diff --git a/src/libexpr/meson.build b/src/libexpr/meson.build index d24e7fae3..1314ab65b 100644 --- a/src/libexpr/meson.build +++ b/src/libexpr/meson.build @@ -97,7 +97,6 @@ config_priv_h = configure_file( ) subdir('nix-meson-build-support/common') -subdir('nix-meson-build-support/asan-options') parser_tab = custom_target( input : 'parser.y', diff --git a/src/libfetchers-c/meson.build b/src/libfetchers-c/meson.build index 3761b0df2..db415d917 100644 --- a/src/libfetchers-c/meson.build +++ b/src/libfetchers-c/meson.build @@ -32,7 +32,6 @@ add_project_arguments( ) subdir('nix-meson-build-support/common') -subdir('nix-meson-build-support/asan-options') sources = files( 'nix_api_fetchers.cc', diff --git a/src/libfetchers-tests/meson.build b/src/libfetchers-tests/meson.build index 858d7f3af..905e06db0 100644 --- a/src/libfetchers-tests/meson.build +++ b/src/libfetchers-tests/meson.build @@ -37,7 +37,6 @@ libgit2 = dependency('libgit2') deps_private += libgit2 subdir('nix-meson-build-support/common') -subdir('nix-meson-build-support/asan-options') sources = files( 'access-tokens.cc', diff --git a/src/libfetchers/meson.build b/src/libfetchers/meson.build index 5b53a147b..d34dd4f43 100644 --- a/src/libfetchers/meson.build +++ b/src/libfetchers/meson.build @@ -32,7 +32,6 @@ libgit2 = dependency('libgit2', version : '>= 1.9') deps_private += libgit2 subdir('nix-meson-build-support/common') -subdir('nix-meson-build-support/asan-options') sources = files( 'attrs.cc', diff --git a/src/libflake-c/meson.build b/src/libflake-c/meson.build index d0d45cfa8..fddb39bdf 100644 --- a/src/libflake-c/meson.build +++ b/src/libflake-c/meson.build @@ -32,7 +32,6 @@ deps_public_maybe_subproject = [ subdir('nix-meson-build-support/subprojects') subdir('nix-meson-build-support/common') -subdir('nix-meson-build-support/asan-options') sources = files( 'nix_api_flake.cc', diff --git a/src/libflake-tests/meson.build b/src/libflake-tests/meson.build index 41ae6cf3d..a75603970 100644 --- a/src/libflake-tests/meson.build +++ b/src/libflake-tests/meson.build @@ -34,7 +34,6 @@ gtest = dependency('gtest', main : true) deps_private += gtest subdir('nix-meson-build-support/common') -subdir('nix-meson-build-support/asan-options') sources = files( 'flakeref.cc', diff --git a/src/libflake/meson.build b/src/libflake/meson.build index 3bd04fcf4..58916ecd9 100644 --- a/src/libflake/meson.build +++ b/src/libflake/meson.build @@ -29,7 +29,6 @@ nlohmann_json = dependency('nlohmann_json', version : '>= 3.9') deps_public += nlohmann_json subdir('nix-meson-build-support/common') -subdir('nix-meson-build-support/asan-options') subdir('nix-meson-build-support/generate-header') diff --git a/src/libmain-c/meson.build b/src/libmain-c/meson.build index 2ac2b799b..36332fdb7 100644 --- a/src/libmain-c/meson.build +++ b/src/libmain-c/meson.build @@ -28,7 +28,6 @@ deps_public_maybe_subproject = [ subdir('nix-meson-build-support/subprojects') subdir('nix-meson-build-support/common') -subdir('nix-meson-build-support/asan-options') sources = files( 'nix_api_main.cc', diff --git a/src/libmain/meson.build b/src/libmain/meson.build index 21bfbea3e..2ac59924e 100644 --- a/src/libmain/meson.build +++ b/src/libmain/meson.build @@ -53,7 +53,6 @@ config_priv_h = configure_file( ) subdir('nix-meson-build-support/common') -subdir('nix-meson-build-support/asan-options') sources = files( 'common-args.cc', diff --git a/src/libstore-c/meson.build b/src/libstore-c/meson.build index a92771efc..c6b6174c7 100644 --- a/src/libstore-c/meson.build +++ b/src/libstore-c/meson.build @@ -26,7 +26,6 @@ deps_public_maybe_subproject = [ subdir('nix-meson-build-support/subprojects') subdir('nix-meson-build-support/common') -subdir('nix-meson-build-support/asan-options') sources = files( 'nix_api_store.cc', diff --git a/src/libstore-test-support/meson.build b/src/libstore-test-support/meson.build index e929ae2b4..8617225d7 100644 --- a/src/libstore-test-support/meson.build +++ b/src/libstore-test-support/meson.build @@ -29,7 +29,6 @@ rapidcheck = dependency('rapidcheck') deps_public += rapidcheck subdir('nix-meson-build-support/common') -subdir('nix-meson-build-support/asan-options') sources = files( 'derived-path.cc', diff --git a/src/libstore-tests/meson.build b/src/libstore-tests/meson.build index 2784b31dc..399e2abd5 100644 --- a/src/libstore-tests/meson.build +++ b/src/libstore-tests/meson.build @@ -52,7 +52,6 @@ gtest = dependency('gmock') deps_private += gtest subdir('nix-meson-build-support/common') -subdir('nix-meson-build-support/asan-options') sources = files( 'common-protocol.cc', diff --git a/src/libstore/meson.build b/src/libstore/meson.build index 728de2dfd..8ec39dac1 100644 --- a/src/libstore/meson.build +++ b/src/libstore/meson.build @@ -292,7 +292,6 @@ config_priv_h = configure_file( ) subdir('nix-meson-build-support/common') -subdir('nix-meson-build-support/asan-options') sources = files( 'aws-creds.cc', diff --git a/src/libutil-c/meson.build b/src/libutil-c/meson.build index 54fd53c74..1806dbb6f 100644 --- a/src/libutil-c/meson.build +++ b/src/libutil-c/meson.build @@ -32,7 +32,6 @@ config_priv_h = configure_file( ) subdir('nix-meson-build-support/common') -subdir('nix-meson-build-support/asan-options') sources = files( 'nix_api_util.cc', diff --git a/src/libutil-test-support/meson.build b/src/libutil-test-support/meson.build index 1ca251ce8..64231107e 100644 --- a/src/libutil-test-support/meson.build +++ b/src/libutil-test-support/meson.build @@ -27,7 +27,6 @@ rapidcheck = dependency('rapidcheck') deps_public += rapidcheck subdir('nix-meson-build-support/common') -subdir('nix-meson-build-support/asan-options') sources = files( 'hash.cc', diff --git a/src/libutil-tests/meson.build b/src/libutil-tests/meson.build index 83245a73d..87af49933 100644 --- a/src/libutil-tests/meson.build +++ b/src/libutil-tests/meson.build @@ -42,7 +42,6 @@ config_priv_h = configure_file( ) subdir('nix-meson-build-support/common') -subdir('nix-meson-build-support/asan-options') sources = files( 'archive.cc', diff --git a/src/libutil/meson.build b/src/libutil/meson.build index 8c9e1f1eb..f4b8dbb61 100644 --- a/src/libutil/meson.build +++ b/src/libutil/meson.build @@ -118,7 +118,6 @@ config_priv_h = configure_file( ) subdir('nix-meson-build-support/common') -subdir('nix-meson-build-support/asan-options') sources = [ config_priv_h ] + files( 'archive.cc', diff --git a/src/nix/meson.build b/src/nix/meson.build index f67a2948f..9bee2d147 100644 --- a/src/nix/meson.build +++ b/src/nix/meson.build @@ -56,7 +56,6 @@ config_priv_h = configure_file( ) subdir('nix-meson-build-support/common') -subdir('nix-meson-build-support/asan-options') subdir('nix-meson-build-support/generate-header') nix_sources = [ config_priv_h ] + files( diff --git a/tests/functional/test-libstoreconsumer/meson.build b/tests/functional/test-libstoreconsumer/meson.build index 7f619d01b..7c95b0c4a 100644 --- a/tests/functional/test-libstoreconsumer/meson.build +++ b/tests/functional/test-libstoreconsumer/meson.build @@ -1,6 +1,6 @@ cxx = meson.get_compiler('cpp') -subdir('nix-meson-build-support/asan-options') +subdir('nix-meson-build-support/common/asan-options') libstoreconsumer_tester = executable( 'test-libstoreconsumer', From 47705139c92bc8bec9dff316d005f2b152258121 Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Sat, 11 Oct 2025 16:30:55 +0300 Subject: [PATCH 290/332] packaging: Remove no longer necessary libgit2 patches 25.05 already has 1.9.0 and we don't support older nixpkgs versions. --- packaging/dependencies.nix | 34 - .../libgit2-mempack-thin-packfile.patch | 282 ------ ...2-packbuilder-callback-interruptible.patch | 930 ------------------ 3 files changed, 1246 deletions(-) delete mode 100644 packaging/patches/libgit2-mempack-thin-packfile.patch delete mode 100644 packaging/patches/libgit2-packbuilder-callback-interruptible.patch diff --git a/packaging/dependencies.nix b/packaging/dependencies.nix index 7f815f128..5581719b5 100644 --- a/packaging/dependencies.nix +++ b/packaging/dependencies.nix @@ -89,38 +89,4 @@ scope: { buildPhase = lib.replaceStrings [ "--without-python" ] [ "" ] old.buildPhase; installPhase = lib.replaceStrings [ "--without-python" ] [ "" ] old.installPhase; }); - - libgit2 = - if lib.versionAtLeast pkgs.libgit2.version "1.9.0" then - pkgs.libgit2 - else - pkgs.libgit2.overrideAttrs (attrs: { - # libgit2: Nixpkgs 24.11 has < 1.9.0, which needs our patches - nativeBuildInputs = - attrs.nativeBuildInputs or [ ] - # gitMinimal does not build on Windows. See packbuilder patch. - ++ lib.optionals (!stdenv.hostPlatform.isWindows) [ - # Needed for `git apply`; see `prePatch` - pkgs.buildPackages.gitMinimal - ]; - # Only `git apply` can handle git binary patches - prePatch = - attrs.prePatch or "" - + lib.optionalString (!stdenv.hostPlatform.isWindows) '' - patch() { - git apply - } - ''; - patches = - attrs.patches or [ ] - ++ [ - ./patches/libgit2-mempack-thin-packfile.patch - ] - # gitMinimal does not build on Windows, but fortunately this patch only - # impacts interruptibility - ++ lib.optionals (!stdenv.hostPlatform.isWindows) [ - # binary patch; see `prePatch` - ./patches/libgit2-packbuilder-callback-interruptible.patch - ]; - }); } diff --git a/packaging/patches/libgit2-mempack-thin-packfile.patch b/packaging/patches/libgit2-mempack-thin-packfile.patch deleted file mode 100644 index fb74b1683..000000000 --- a/packaging/patches/libgit2-mempack-thin-packfile.patch +++ /dev/null @@ -1,282 +0,0 @@ -commit 9bacade4a3ef4b6b26e2c02f549eef0e9eb9eaa2 -Author: Robert Hensing -Date: Sun Aug 18 20:20:36 2024 +0200 - - Add unoptimized git_mempack_write_thin_pack - -diff --git a/include/git2/sys/mempack.h b/include/git2/sys/mempack.h -index 17da590a3..3688bdd50 100644 ---- a/include/git2/sys/mempack.h -+++ b/include/git2/sys/mempack.h -@@ -44,6 +44,29 @@ GIT_BEGIN_DECL - */ - GIT_EXTERN(int) git_mempack_new(git_odb_backend **out); - -+/** -+ * Write a thin packfile with the objects in the memory store. -+ * -+ * A thin packfile is a packfile that does not contain its transitive closure of -+ * references. This is useful for efficiently distributing additions to a -+ * repository over the network, but also finds use in the efficient bulk -+ * addition of objects to a repository, locally. -+ * -+ * This operation performs the (shallow) insert operations into the -+ * `git_packbuilder`, but does not write the packfile to disk; -+ * see `git_packbuilder_write_buf`. -+ * -+ * It also does not reset the memory store; see `git_mempack_reset`. -+ * -+ * @note This function may or may not write trees and blobs that are not -+ * referenced by commits. Currently everything is written, but this -+ * behavior may change in the future as the packer is optimized. -+ * -+ * @param backend The mempack backend -+ * @param pb The packbuilder to use to write the packfile -+ */ -+GIT_EXTERN(int) git_mempack_write_thin_pack(git_odb_backend *backend, git_packbuilder *pb); -+ - /** - * Dump all the queued in-memory writes to a packfile. - * -diff --git a/src/libgit2/odb_mempack.c b/src/libgit2/odb_mempack.c -index 6f27f45f8..0b61e2b66 100644 ---- a/src/libgit2/odb_mempack.c -+++ b/src/libgit2/odb_mempack.c -@@ -132,6 +132,35 @@ cleanup: - return err; - } - -+int git_mempack_write_thin_pack(git_odb_backend *backend, git_packbuilder *pb) -+{ -+ struct memory_packer_db *db = (struct memory_packer_db *)backend; -+ const git_oid *oid; -+ size_t iter = 0; -+ int err = -1; -+ -+ /* TODO: Implement the recency heuristics. -+ For this it probably makes sense to only write what's referenced -+ through commits, an option I've carved out for you in the docs. -+ wrt heuristics: ask your favorite LLM to translate https://git-scm.com/docs/pack-heuristics/en -+ to actual normal reference documentation. */ -+ while (true) { -+ err = git_oidmap_iterate(NULL, db->objects, &iter, &oid); -+ if (err == GIT_ITEROVER) { -+ err = 0; -+ break; -+ } -+ if (err != 0) -+ return err; -+ -+ err = git_packbuilder_insert(pb, oid, NULL); -+ if (err != 0) -+ return err; -+ } -+ -+ return 0; -+} -+ - int git_mempack_dump( - git_buf *pack, - git_repository *repo, -diff --git a/tests/libgit2/mempack/thinpack.c b/tests/libgit2/mempack/thinpack.c -new file mode 100644 -index 000000000..604a4dda2 ---- /dev/null -+++ b/tests/libgit2/mempack/thinpack.c -@@ -0,0 +1,196 @@ -+#include "clar_libgit2.h" -+#include "git2/indexer.h" -+#include "git2/odb_backend.h" -+#include "git2/tree.h" -+#include "git2/types.h" -+#include "git2/sys/mempack.h" -+#include "git2/sys/odb_backend.h" -+#include "util.h" -+ -+static git_repository *_repo; -+static git_odb_backend * _mempack_backend; -+ -+void test_mempack_thinpack__initialize(void) -+{ -+ git_odb *odb; -+ -+ _repo = cl_git_sandbox_init_new("mempack_thinpack_repo"); -+ -+ cl_git_pass(git_mempack_new(&_mempack_backend)); -+ cl_git_pass(git_repository_odb(&odb, _repo)); -+ cl_git_pass(git_odb_add_backend(odb, _mempack_backend, 999)); -+ git_odb_free(odb); -+} -+ -+void _mempack_thinpack__cleanup(void) -+{ -+ cl_git_sandbox_cleanup(); -+} -+ -+/* -+ Generating a packfile for an unchanged repo works and produces an empty packfile. -+ Even if we allow this scenario to be detected, it shouldn't misbehave if the -+ application is unaware of it. -+*/ -+void test_mempack_thinpack__empty(void) -+{ -+ git_packbuilder *pb; -+ int version; -+ int n; -+ git_buf buf = GIT_BUF_INIT; -+ -+ git_packbuilder_new(&pb, _repo); -+ -+ cl_git_pass(git_mempack_write_thin_pack(_mempack_backend, pb)); -+ cl_git_pass(git_packbuilder_write_buf(&buf, pb)); -+ cl_assert_in_range(12, buf.size, 1024 /* empty packfile is >0 bytes, but certainly not that big */); -+ cl_assert(buf.ptr[0] == 'P'); -+ cl_assert(buf.ptr[1] == 'A'); -+ cl_assert(buf.ptr[2] == 'C'); -+ cl_assert(buf.ptr[3] == 'K'); -+ version = (buf.ptr[4] << 24) | (buf.ptr[5] << 16) | (buf.ptr[6] << 8) | buf.ptr[7]; -+ /* Subject to change. https://git-scm.com/docs/pack-format: Git currently accepts version number 2 or 3 but generates version 2 only.*/ -+ cl_assert_equal_i(2, version); -+ n = (buf.ptr[8] << 24) | (buf.ptr[9] << 16) | (buf.ptr[10] << 8) | buf.ptr[11]; -+ cl_assert_equal_i(0, n); -+ git_buf_dispose(&buf); -+ -+ git_packbuilder_free(pb); -+} -+ -+#define LIT_LEN(x) x, sizeof(x) - 1 -+ -+/* -+ Check that git_mempack_write_thin_pack produces a thin packfile. -+*/ -+void test_mempack_thinpack__thin(void) -+{ -+ /* Outline: -+ - Create tree 1 -+ - Flush to packfile A -+ - Create tree 2 -+ - Flush to packfile B -+ -+ Tree 2 has a new blob and a reference to a blob from tree 1. -+ -+ Expectation: -+ - Packfile B is thin and does not contain the objects from packfile A -+ */ -+ -+ -+ git_oid oid_blob_1; -+ git_oid oid_blob_2; -+ git_oid oid_blob_3; -+ git_oid oid_tree_1; -+ git_oid oid_tree_2; -+ git_treebuilder *tb; -+ -+ git_packbuilder *pb; -+ git_buf buf = GIT_BUF_INIT; -+ git_indexer *indexer; -+ git_indexer_progress stats; -+ char pack_dir_path[1024]; -+ -+ char sbuf[1024]; -+ const char * repo_path; -+ const char * pack_name_1; -+ const char * pack_name_2; -+ git_str pack_path_1 = GIT_STR_INIT; -+ git_str pack_path_2 = GIT_STR_INIT; -+ git_odb_backend * pack_odb_backend_1; -+ git_odb_backend * pack_odb_backend_2; -+ -+ -+ cl_assert_in_range(0, snprintf(pack_dir_path, sizeof(pack_dir_path), "%s/objects/pack", git_repository_path(_repo)), sizeof(pack_dir_path)); -+ -+ /* Create tree 1 */ -+ -+ cl_git_pass(git_blob_create_from_buffer(&oid_blob_1, _repo, LIT_LEN("thinpack blob 1"))); -+ cl_git_pass(git_blob_create_from_buffer(&oid_blob_2, _repo, LIT_LEN("thinpack blob 2"))); -+ -+ -+ cl_git_pass(git_treebuilder_new(&tb, _repo, NULL)); -+ cl_git_pass(git_treebuilder_insert(NULL, tb, "blob1", &oid_blob_1, GIT_FILEMODE_BLOB)); -+ cl_git_pass(git_treebuilder_insert(NULL, tb, "blob2", &oid_blob_2, GIT_FILEMODE_BLOB)); -+ cl_git_pass(git_treebuilder_write(&oid_tree_1, tb)); -+ -+ /* Flush */ -+ -+ cl_git_pass(git_packbuilder_new(&pb, _repo)); -+ cl_git_pass(git_mempack_write_thin_pack(_mempack_backend, pb)); -+ cl_git_pass(git_packbuilder_write_buf(&buf, pb)); -+ cl_git_pass(git_indexer_new(&indexer, pack_dir_path, 0, NULL, NULL)); -+ cl_git_pass(git_indexer_append(indexer, buf.ptr, buf.size, &stats)); -+ cl_git_pass(git_indexer_commit(indexer, &stats)); -+ pack_name_1 = strdup(git_indexer_name(indexer)); -+ cl_assert(pack_name_1); -+ git_buf_dispose(&buf); -+ git_mempack_reset(_mempack_backend); -+ git_indexer_free(indexer); -+ git_packbuilder_free(pb); -+ -+ /* Create tree 2 */ -+ -+ cl_git_pass(git_treebuilder_clear(tb)); -+ /* blob 1 won't be used, but we add it anyway to test that just "declaring" an object doesn't -+ necessarily cause its inclusion in the next thin packfile. It must only be included if new. */ -+ cl_git_pass(git_blob_create_from_buffer(&oid_blob_1, _repo, LIT_LEN("thinpack blob 1"))); -+ cl_git_pass(git_blob_create_from_buffer(&oid_blob_3, _repo, LIT_LEN("thinpack blob 3"))); -+ cl_git_pass(git_treebuilder_insert(NULL, tb, "blob1", &oid_blob_1, GIT_FILEMODE_BLOB)); -+ cl_git_pass(git_treebuilder_insert(NULL, tb, "blob3", &oid_blob_3, GIT_FILEMODE_BLOB)); -+ cl_git_pass(git_treebuilder_write(&oid_tree_2, tb)); -+ -+ /* Flush */ -+ -+ cl_git_pass(git_packbuilder_new(&pb, _repo)); -+ cl_git_pass(git_mempack_write_thin_pack(_mempack_backend, pb)); -+ cl_git_pass(git_packbuilder_write_buf(&buf, pb)); -+ cl_git_pass(git_indexer_new(&indexer, pack_dir_path, 0, NULL, NULL)); -+ cl_git_pass(git_indexer_append(indexer, buf.ptr, buf.size, &stats)); -+ cl_git_pass(git_indexer_commit(indexer, &stats)); -+ pack_name_2 = strdup(git_indexer_name(indexer)); -+ cl_assert(pack_name_2); -+ git_buf_dispose(&buf); -+ git_mempack_reset(_mempack_backend); -+ git_indexer_free(indexer); -+ git_packbuilder_free(pb); -+ git_treebuilder_free(tb); -+ -+ /* Assertions */ -+ -+ assert(pack_name_1); -+ assert(pack_name_2); -+ -+ repo_path = git_repository_path(_repo); -+ -+ snprintf(sbuf, sizeof(sbuf), "objects/pack/pack-%s.pack", pack_name_1); -+ git_str_joinpath(&pack_path_1, repo_path, sbuf); -+ snprintf(sbuf, sizeof(sbuf), "objects/pack/pack-%s.pack", pack_name_2); -+ git_str_joinpath(&pack_path_2, repo_path, sbuf); -+ -+ /* If they're the same, something definitely went wrong. */ -+ cl_assert(strcmp(pack_name_1, pack_name_2) != 0); -+ -+ cl_git_pass(git_odb_backend_one_pack(&pack_odb_backend_1, pack_path_1.ptr)); -+ cl_assert(pack_odb_backend_1->exists(pack_odb_backend_1, &oid_blob_1)); -+ cl_assert(pack_odb_backend_1->exists(pack_odb_backend_1, &oid_blob_2)); -+ cl_assert(!pack_odb_backend_1->exists(pack_odb_backend_1, &oid_blob_3)); -+ cl_assert(pack_odb_backend_1->exists(pack_odb_backend_1, &oid_tree_1)); -+ cl_assert(!pack_odb_backend_1->exists(pack_odb_backend_1, &oid_tree_2)); -+ -+ cl_git_pass(git_odb_backend_one_pack(&pack_odb_backend_2, pack_path_2.ptr)); -+ /* blob 1 is already in the packfile 1, so packfile 2 must not include it, in order to be _thin_. */ -+ cl_assert(!pack_odb_backend_2->exists(pack_odb_backend_2, &oid_blob_1)); -+ cl_assert(!pack_odb_backend_2->exists(pack_odb_backend_2, &oid_blob_2)); -+ cl_assert(pack_odb_backend_2->exists(pack_odb_backend_2, &oid_blob_3)); -+ cl_assert(!pack_odb_backend_2->exists(pack_odb_backend_2, &oid_tree_1)); -+ cl_assert(pack_odb_backend_2->exists(pack_odb_backend_2, &oid_tree_2)); -+ -+ pack_odb_backend_1->free(pack_odb_backend_1); -+ pack_odb_backend_2->free(pack_odb_backend_2); -+ free((void *)pack_name_1); -+ free((void *)pack_name_2); -+ git_str_dispose(&pack_path_1); -+ git_str_dispose(&pack_path_2); -+ -+} diff --git a/packaging/patches/libgit2-packbuilder-callback-interruptible.patch b/packaging/patches/libgit2-packbuilder-callback-interruptible.patch deleted file mode 100644 index c67822ff7..000000000 --- a/packaging/patches/libgit2-packbuilder-callback-interruptible.patch +++ /dev/null @@ -1,930 +0,0 @@ -commit e9823c5da4fa977c46bcb97167fbdd0d70adb5ff -Author: Robert Hensing -Date: Mon Aug 26 20:07:04 2024 +0200 - - Make packbuilder interruptible using progress callback - - Forward errors from packbuilder->progress_cb - - This allows the callback to terminate long-running operations when - the application is interrupted. - -diff --git a/include/git2/pack.h b/include/git2/pack.h -index 0f6bd2ab9..bee72a6c0 100644 ---- a/include/git2/pack.h -+++ b/include/git2/pack.h -@@ -247,6 +247,9 @@ typedef int GIT_CALLBACK(git_packbuilder_progress)( - * @param progress_cb Function to call with progress information during - * pack building. Be aware that this is called inline with pack building - * operations, so performance may be affected. -+ * When progress_cb returns an error, the pack building process will be -+ * aborted and the error will be returned from the invoked function. -+ * `pb` must then be freed. - * @param progress_cb_payload Payload for progress callback. - * @return 0 or an error code - */ -diff --git a/src/libgit2/pack-objects.c b/src/libgit2/pack-objects.c -index b2d80cba9..7c331c2d5 100644 ---- a/src/libgit2/pack-objects.c -+++ b/src/libgit2/pack-objects.c -@@ -932,6 +932,9 @@ static int report_delta_progress( - { - int ret; - -+ if (pb->failure) -+ return pb->failure; -+ - if (pb->progress_cb) { - uint64_t current_time = git_time_monotonic(); - uint64_t elapsed = current_time - pb->last_progress_report_time; -@@ -943,8 +946,10 @@ static int report_delta_progress( - GIT_PACKBUILDER_DELTAFICATION, - count, pb->nr_objects, pb->progress_cb_payload); - -- if (ret) -+ if (ret) { -+ pb->failure = ret; - return git_error_set_after_callback(ret); -+ } - } - } - -@@ -976,7 +981,10 @@ static int find_deltas(git_packbuilder *pb, git_pobject **list, - } - - pb->nr_deltified += 1; -- report_delta_progress(pb, pb->nr_deltified, false); -+ if ((error = report_delta_progress(pb, pb->nr_deltified, false)) < 0) { -+ GIT_ASSERT(git_packbuilder__progress_unlock(pb) == 0); -+ goto on_error; -+ } - - po = *list++; - (*list_size)--; -@@ -1124,6 +1132,10 @@ struct thread_params { - size_t depth; - size_t working; - size_t data_ready; -+ -+ /* A pb->progress_cb can stop the packing process by returning an error. -+ When that happens, all threads observe the error and stop voluntarily. */ -+ bool stopped; - }; - - static void *threaded_find_deltas(void *arg) -@@ -1133,7 +1145,12 @@ static void *threaded_find_deltas(void *arg) - while (me->remaining) { - if (find_deltas(me->pb, me->list, &me->remaining, - me->window, me->depth) < 0) { -- ; /* TODO */ -+ me->stopped = true; -+ GIT_ASSERT_WITH_RETVAL(git_packbuilder__progress_lock(me->pb) == 0, NULL); -+ me->working = false; -+ git_cond_signal(&me->pb->progress_cond); -+ GIT_ASSERT_WITH_RETVAL(git_packbuilder__progress_unlock(me->pb) == 0, NULL); -+ return NULL; - } - - GIT_ASSERT_WITH_RETVAL(git_packbuilder__progress_lock(me->pb) == 0, NULL); -@@ -1175,8 +1192,7 @@ static int ll_find_deltas(git_packbuilder *pb, git_pobject **list, - pb->nr_threads = git__online_cpus(); - - if (pb->nr_threads <= 1) { -- find_deltas(pb, list, &list_size, window, depth); -- return 0; -+ return find_deltas(pb, list, &list_size, window, depth); - } - - p = git__mallocarray(pb->nr_threads, sizeof(*p)); -@@ -1195,6 +1211,7 @@ static int ll_find_deltas(git_packbuilder *pb, git_pobject **list, - p[i].depth = depth; - p[i].working = 1; - p[i].data_ready = 0; -+ p[i].stopped = 0; - - /* try to split chunks on "path" boundaries */ - while (sub_size && sub_size < list_size && -@@ -1262,7 +1279,7 @@ static int ll_find_deltas(git_packbuilder *pb, git_pobject **list, - (!victim || victim->remaining < p[i].remaining)) - victim = &p[i]; - -- if (victim) { -+ if (victim && !target->stopped) { - sub_size = victim->remaining / 2; - list = victim->list + victim->list_size - sub_size; - while (sub_size && list[0]->hash && -@@ -1286,7 +1303,7 @@ static int ll_find_deltas(git_packbuilder *pb, git_pobject **list, - } - target->list_size = sub_size; - target->remaining = sub_size; -- target->working = 1; -+ target->working = 1; /* even when target->stopped, so that we don't process this thread again */ - GIT_ASSERT(git_packbuilder__progress_unlock(pb) == 0); - - if (git_mutex_lock(&target->mutex)) { -@@ -1299,7 +1316,7 @@ static int ll_find_deltas(git_packbuilder *pb, git_pobject **list, - git_cond_signal(&target->cond); - git_mutex_unlock(&target->mutex); - -- if (!sub_size) { -+ if (target->stopped || !sub_size) { - git_thread_join(&target->thread, NULL); - git_cond_free(&target->cond); - git_mutex_free(&target->mutex); -@@ -1308,7 +1325,7 @@ static int ll_find_deltas(git_packbuilder *pb, git_pobject **list, - } - - git__free(p); -- return 0; -+ return pb->failure; - } - - #else -@@ -1319,6 +1336,7 @@ int git_packbuilder__prepare(git_packbuilder *pb) - { - git_pobject **delta_list; - size_t i, n = 0; -+ int error; - - if (pb->nr_objects == 0 || pb->done) - return 0; /* nothing to do */ -@@ -1327,8 +1345,10 @@ int git_packbuilder__prepare(git_packbuilder *pb) - * Although we do not report progress during deltafication, we - * at least report that we are in the deltafication stage - */ -- if (pb->progress_cb) -- pb->progress_cb(GIT_PACKBUILDER_DELTAFICATION, 0, pb->nr_objects, pb->progress_cb_payload); -+ if (pb->progress_cb) { -+ if ((error = pb->progress_cb(GIT_PACKBUILDER_DELTAFICATION, 0, pb->nr_objects, pb->progress_cb_payload)) < 0) -+ return git_error_set_after_callback(error); -+ } - - delta_list = git__mallocarray(pb->nr_objects, sizeof(*delta_list)); - GIT_ERROR_CHECK_ALLOC(delta_list); -@@ -1345,31 +1365,33 @@ int git_packbuilder__prepare(git_packbuilder *pb) - - if (n > 1) { - git__tsort((void **)delta_list, n, type_size_sort); -- if (ll_find_deltas(pb, delta_list, n, -+ if ((error = ll_find_deltas(pb, delta_list, n, - GIT_PACK_WINDOW + 1, -- GIT_PACK_DEPTH) < 0) { -+ GIT_PACK_DEPTH)) < 0) { - git__free(delta_list); -- return -1; -+ return error; - } - } - -- report_delta_progress(pb, pb->nr_objects, true); -+ error = report_delta_progress(pb, pb->nr_objects, true); - - pb->done = true; - git__free(delta_list); -- return 0; -+ return error; - } - --#define PREPARE_PACK if (git_packbuilder__prepare(pb) < 0) { return -1; } -+#define PREPARE_PACK error = git_packbuilder__prepare(pb); if (error < 0) { return error; } - - int git_packbuilder_foreach(git_packbuilder *pb, int (*cb)(void *buf, size_t size, void *payload), void *payload) - { -+ int error; - PREPARE_PACK; - return write_pack(pb, cb, payload); - } - - int git_packbuilder__write_buf(git_str *buf, git_packbuilder *pb) - { -+ int error; - PREPARE_PACK; - - return write_pack(pb, &write_pack_buf, buf); -diff --git a/src/libgit2/pack-objects.h b/src/libgit2/pack-objects.h -index bbc8b9430..380a28ebe 100644 ---- a/src/libgit2/pack-objects.h -+++ b/src/libgit2/pack-objects.h -@@ -100,6 +100,10 @@ struct git_packbuilder { - uint64_t last_progress_report_time; - - bool done; -+ -+ /* A non-zero error code in failure causes all threads to shut themselves -+ down. Some functions will return this error code. */ -+ volatile int failure; - }; - - int git_packbuilder__write_buf(git_str *buf, git_packbuilder *pb); -diff --git a/tests/libgit2/pack/cancel.c b/tests/libgit2/pack/cancel.c -new file mode 100644 -index 000000000..a0aa9716a ---- /dev/null -+++ b/tests/libgit2/pack/cancel.c -@@ -0,0 +1,240 @@ -+#include "clar_libgit2.h" -+#include "futils.h" -+#include "pack.h" -+#include "hash.h" -+#include "iterator.h" -+#include "vector.h" -+#include "posix.h" -+#include "hash.h" -+#include "pack-objects.h" -+ -+static git_repository *_repo; -+static git_revwalk *_revwalker; -+static git_packbuilder *_packbuilder; -+static git_indexer *_indexer; -+static git_vector _commits; -+static int _commits_is_initialized; -+static git_indexer_progress _stats; -+ -+extern bool git_disable_pack_keep_file_checks; -+ -+static void pack_packbuilder_init(const char *sandbox) { -+ _repo = cl_git_sandbox_init(sandbox); -+ /* cl_git_pass(p_chdir(sandbox)); */ -+ cl_git_pass(git_revwalk_new(&_revwalker, _repo)); -+ cl_git_pass(git_packbuilder_new(&_packbuilder, _repo)); -+ cl_git_pass(git_vector_init(&_commits, 0, NULL)); -+ _commits_is_initialized = 1; -+ memset(&_stats, 0, sizeof(_stats)); -+ p_fsync__cnt = 0; -+} -+ -+void test_pack_cancel__initialize(void) -+{ -+ pack_packbuilder_init("small.git"); -+} -+ -+void test_pack_cancel__cleanup(void) -+{ -+ git_oid *o; -+ unsigned int i; -+ -+ cl_git_pass(git_libgit2_opts(GIT_OPT_ENABLE_FSYNC_GITDIR, 0)); -+ cl_git_pass(git_libgit2_opts(GIT_OPT_DISABLE_PACK_KEEP_FILE_CHECKS, false)); -+ -+ if (_commits_is_initialized) { -+ _commits_is_initialized = 0; -+ git_vector_foreach(&_commits, i, o) { -+ git__free(o); -+ } -+ git_vector_free(&_commits); -+ } -+ -+ git_packbuilder_free(_packbuilder); -+ _packbuilder = NULL; -+ -+ git_revwalk_free(_revwalker); -+ _revwalker = NULL; -+ -+ git_indexer_free(_indexer); -+ _indexer = NULL; -+ -+ /* cl_git_pass(p_chdir("..")); */ -+ cl_git_sandbox_cleanup(); -+ _repo = NULL; -+} -+ -+static int seed_packbuilder(void) -+{ -+ int error; -+ git_oid oid, *o; -+ unsigned int i; -+ -+ git_revwalk_sorting(_revwalker, GIT_SORT_TIME); -+ cl_git_pass(git_revwalk_push_ref(_revwalker, "HEAD")); -+ -+ while (git_revwalk_next(&oid, _revwalker) == 0) { -+ o = git__malloc(sizeof(git_oid)); -+ cl_assert(o != NULL); -+ git_oid_cpy(o, &oid); -+ cl_git_pass(git_vector_insert(&_commits, o)); -+ } -+ -+ git_vector_foreach(&_commits, i, o) { -+ if((error = git_packbuilder_insert(_packbuilder, o, NULL)) < 0) -+ return error; -+ } -+ -+ git_vector_foreach(&_commits, i, o) { -+ git_object *obj; -+ cl_git_pass(git_object_lookup(&obj, _repo, o, GIT_OBJECT_COMMIT)); -+ error = git_packbuilder_insert_tree(_packbuilder, -+ git_commit_tree_id((git_commit *)obj)); -+ git_object_free(obj); -+ if (error < 0) -+ return error; -+ } -+ -+ return 0; -+} -+ -+static int fail_stage; -+ -+static int packbuilder_cancel_after_n_calls_cb(int stage, uint32_t current, uint32_t total, void *payload) -+{ -+ -+ /* Force the callback to run again on the next opportunity regardless -+ of how fast we're running. */ -+ _packbuilder->last_progress_report_time = 0; -+ -+ if (stage == fail_stage) { -+ int *calls = (int *)payload; -+ int n = *calls; -+ /* Always decrement, including past zero. This way the error is only -+ triggered once, making sure it is picked up immediately. */ -+ --*calls; -+ if (n == 0) -+ return GIT_EUSER; -+ } -+ -+ return 0; -+} -+ -+static void test_cancel(int n) -+{ -+ -+ int calls_remaining = n; -+ int err; -+ git_buf buf = GIT_BUF_INIT; -+ -+ /* Switch to a small repository, so that `packbuilder_cancel_after_n_calls_cb` -+ can hack the time to call the callback on every opportunity. */ -+ -+ cl_git_pass(git_packbuilder_set_callbacks(_packbuilder, &packbuilder_cancel_after_n_calls_cb, &calls_remaining)); -+ err = seed_packbuilder(); -+ if (!err) -+ err = git_packbuilder_write_buf(&buf, _packbuilder); -+ -+ cl_assert_equal_i(GIT_EUSER, err); -+} -+void test_pack_cancel__cancel_after_add_0(void) -+{ -+ fail_stage = GIT_PACKBUILDER_ADDING_OBJECTS; -+ test_cancel(0); -+} -+ -+void test_pack_cancel__cancel_after_add_1(void) -+{ -+ cl_skip(); -+ fail_stage = GIT_PACKBUILDER_ADDING_OBJECTS; -+ test_cancel(1); -+} -+ -+void test_pack_cancel__cancel_after_delta_0(void) -+{ -+ fail_stage = GIT_PACKBUILDER_DELTAFICATION; -+ test_cancel(0); -+} -+ -+void test_pack_cancel__cancel_after_delta_1(void) -+{ -+ fail_stage = GIT_PACKBUILDER_DELTAFICATION; -+ test_cancel(1); -+} -+ -+void test_pack_cancel__cancel_after_delta_0_threaded(void) -+{ -+#ifdef GIT_THREADS -+ git_packbuilder_set_threads(_packbuilder, 8); -+ fail_stage = GIT_PACKBUILDER_DELTAFICATION; -+ test_cancel(0); -+#else -+ cl_skip(); -+#endif -+} -+ -+void test_pack_cancel__cancel_after_delta_1_threaded(void) -+{ -+#ifdef GIT_THREADS -+ git_packbuilder_set_threads(_packbuilder, 8); -+ fail_stage = GIT_PACKBUILDER_DELTAFICATION; -+ test_cancel(1); -+#else -+ cl_skip(); -+#endif -+} -+ -+static int foreach_cb(void *buf, size_t len, void *payload) -+{ -+ git_indexer *idx = (git_indexer *) payload; -+ cl_git_pass(git_indexer_append(idx, buf, len, &_stats)); -+ return 0; -+} -+ -+void test_pack_cancel__foreach(void) -+{ -+ git_indexer *idx; -+ -+ seed_packbuilder(); -+ -+#ifdef GIT_EXPERIMENTAL_SHA256 -+ cl_git_pass(git_indexer_new(&idx, ".", GIT_OID_SHA1, NULL)); -+#else -+ cl_git_pass(git_indexer_new(&idx, ".", 0, NULL, NULL)); -+#endif -+ -+ cl_git_pass(git_packbuilder_foreach(_packbuilder, foreach_cb, idx)); -+ cl_git_pass(git_indexer_commit(idx, &_stats)); -+ git_indexer_free(idx); -+} -+ -+static int foreach_cancel_cb(void *buf, size_t len, void *payload) -+{ -+ git_indexer *idx = (git_indexer *)payload; -+ cl_git_pass(git_indexer_append(idx, buf, len, &_stats)); -+ return (_stats.total_objects > 2) ? -1111 : 0; -+} -+ -+void test_pack_cancel__foreach_with_cancel(void) -+{ -+ git_indexer *idx; -+ -+ seed_packbuilder(); -+ -+#ifdef GIT_EXPERIMENTAL_SHA256 -+ cl_git_pass(git_indexer_new(&idx, ".", GIT_OID_SHA1, NULL)); -+#else -+ cl_git_pass(git_indexer_new(&idx, ".", 0, NULL, NULL)); -+#endif -+ -+ cl_git_fail_with( -+ git_packbuilder_foreach(_packbuilder, foreach_cancel_cb, idx), -1111); -+ git_indexer_free(idx); -+} -+ -+void test_pack_cancel__keep_file_check(void) -+{ -+ assert(!git_disable_pack_keep_file_checks); -+ cl_git_pass(git_libgit2_opts(GIT_OPT_DISABLE_PACK_KEEP_FILE_CHECKS, true)); -+ assert(git_disable_pack_keep_file_checks); -+} -diff --git a/tests/resources/small.git/HEAD b/tests/resources/small.git/HEAD -new file mode 100644 -index 0000000000000000000000000000000000000000..cb089cd89a7d7686d284d8761201649346b5aa1c -GIT binary patch -literal 23 -ecmXR)O|w!cN=+-)&qz&7Db~+TEG|hc;sO9;xClW2 - -literal 0 -HcmV?d00001 - -diff --git a/tests/resources/small.git/config b/tests/resources/small.git/config -new file mode 100644 -index 0000000000000000000000000000000000000000..07d359d07cf1ed0c0074fdad71ffff5942f0adfa -GIT binary patch -literal 66 -zcmaz}&M!)h<>D+#Eyypk5{uv*03B5png9R* - -literal 0 -HcmV?d00001 - -diff --git a/tests/resources/small.git/description b/tests/resources/small.git/description -new file mode 100644 -index 0000000000000000000000000000000000000000..498b267a8c7812490d6479839c5577eaaec79d62 -GIT binary patch -literal 73 -zcmWH|%S+5nO;IRHEyyp$t+PQ$;d2LNXyJgRZve!Elw`VEGWs$&r??@ -Q$yWgB0LrH#Y0~2Y0PnOK(EtDd - -literal 0 -HcmV?d00001 - -diff --git a/tests/resources/small.git/hooks/applypatch-msg.sample b/tests/resources/small.git/hooks/applypatch-msg.sample -new file mode 100755 -index 0000000000000000000000000000000000000000..dcbf8167fa503f96ff6a39c68409007eadc9b1f3 -GIT binary patch -literal 535 -zcmY+AX;Q;542A#a6e8^~FyI8r&I~hf2QJ{GO6(?HuvEG*+#R{4EI%zhfA8r{j%sh$ -zHE~E-UtQd8{bq4@*S%jq3@bmxwQDXGv#o!N`o3AHMw3xD)hy0#>&E&zzl%vRffomqo=v6>_2NRa#TwDdYvTVQyueO*15Nlo%=#DXgC0bhF3vTa`LQGaO9;jeD$OP?~ -za$G4Q{z+Q_{5V?5h;a-noM$P{<>Q~j4o7u%#P6^o^16{y*jU=-K8GYD_dUtdj4FSx -zSC0C!DvAnv%S!4dgk -XB^)11aoGMJPCqWs%IS0YSv(eBT&%T6 - -literal 0 -HcmV?d00001 - -diff --git a/tests/resources/small.git/hooks/commit-msg.sample b/tests/resources/small.git/hooks/commit-msg.sample -new file mode 100755 -index 0000000000000000000000000000000000000000..f3780f92349638ebe32f6baf24c7c3027675d7c9 -GIT binary patch -literal 953 -zcmaJy@-{3h^^Cx;#d0zEA@DDc$nY4ez&|=%jTg@_HU*ub=!!y$xW09TSjlj -z(`I@QCsM`!9&80$I98wsQ8yK#)Orb<8re8FjkKh630D$QUDwi~(gkX=RunYm$rDjk -zlp%RUSnzA#6yjdG5?T?2DcYKp+v_lts0ljn&bh3J0bD5@N@1UKZ190O6ZeWr-BuZ^ -zWRebCX%(%=Xoj#(xYk1Cjtr!=tyBesf@m6}8zY6Ijbz9i9ziI_jG9MvR -zDH*e>^ga9IR?2wrSrAVm;eButj4Y>7(E2?b~jsu>& -zRKCJ7bp#19sqYh627wD%D9R$8=Ml$TNlumDypl~$jBu*G>5fIR^FB0h0Ex&TGZNr> -zL5hs1_K>taRb!|ThN9ns7^@4MXKP+6aGI_UK)T-M#rcP$;kN(Vcf#P)+5GzWa{l@J -z>-E{`$1iiNVYxq27}j;uo%;)r3kJI2xCFF~Ux;$Q%) -wjbk6JlDCM`jU&P+UVOvg`|iYl<7~9k>HHB4I;pdlQ=I-^$DrHaN$@lH1?P!0U;qFB - -literal 0 -HcmV?d00001 - -diff --git a/tests/resources/small.git/hooks/fsmonitor-watchman.sample b/tests/resources/small.git/hooks/fsmonitor-watchman.sample -new file mode 100755 -index 0000000000000000000000000000000000000000..41184ebc318c159f51cd1ebe2290559805df89d8 -GIT binary patch -literal 4777 -zcmbtYYi}F368$Xwipg4lq(BeHMvzvH-4;n7DGJBPqq#tw3aed8+IU5-m)yvL>;Cqh -z8FFRGj$`9CA8aoJ?j^$%==FV``-=rhLcPW`McSytRm~mEO7_&_cAVZrf1fFy*ha@8oe%*-aBYE -zcjzZg>LOkgxuUr-XJnHyD;zmPnRaSc#!k_P*d_BttRdc+J6G7za5#+^Y1nkc2Oowk`ya47uUR3Feu?B(w;S{(VYzxh}q-=#zP@uxSx{wbyPUMFU;K(06)$o{07&3yI?q{GqMcQ1c_^M<0< -zF4acAV)Il-V(rCTC1(;bsZ*}bl8dmejAk~yb`B}!^0;g^(o9kGUfZfDOvyp@x4OQt -zSgWh6T|3eq;9MFs8-#z+FDM1h(IjRUP|``PxupgJ7CUHOH90gbgl^2~97`?_X{P)) -zB*$r1cDlF-%azKND}?Gv`2K8-9v5e`gQoft=j?T<&a13c^!wY_$D`5z-X1g?ty&6- -zQN50{8?bUk9AI->^W@~~nkOghHIC2YN+AXkLQG_2-{Pq3%{`3KUMeG$iIn%%^6*NYb -zn|_BdV#C)n4565VccX;uT8&z3vSi!HXGbUj2B!R -zdz~&#fk#L-&k$fLwo$4?>12g@AXOKFekuo#6EHB%gmpD?1eyh%N8s{2wGoTu -z*@6cEZ^ZW!FAF_|JL`NkV7k}0ow|-2jHwbgH0;c@Dq*o?@&c*HnGdyx6^su8Qk%2{ -z*ye(dxO*6-&>qn1+zw}tc6;=sOX{4WB=VqjTS^))y1jlX2Q;=e!qMmFA5lC$#;BxC -z=Y%tRpWxb+_uQAvAw7Q{HGV#R$xb&udLCzZ+HN?kTyB};1EJ8UlQ5!>5eGW@)RX0n -zkjj>EF!3=0Gl^8dzv$B^NMGRxJoqN4A`xq-@wCbrx*u2NmIJ1xZ%H -zh;{|4T3(!E9sY#Ni(wUJYs1MmIc9bl)(4Nl3_wD_BWB>i<1S(LX7m*{Q7PU$muMS* -zM!%0EZx-Vw=Zey;erC?SNxF;pY@^A%-krqzfLV2meBp1vWdyArFYn`DD19T)Hw(?n -z)}{NP(Lk(o*?gl#B@pP7^*r|=;PIDT4|F#{2Hzh-AL0Rv$6uT;n|WzE4=slK?on@(fZeGhRgQCu56qB -z{+n81Az96qnQjMY*-*r-KV*7;Z#4QuJRJJV$M^KdldiMhj?ImK6~FvwJ*L5a){QoM=L5TYHkGO1$UrO3`a>{?Opw|b -zG(#59NQ#jFL9v~vgOVkM@^^(^A}onOE))yWEwhIlk&{ZyseZ^O0b=w8&O=BK{k<5B -k^Q-B@eG}LeHrquz%(SVEp_N)VhYZikCW__82JXfD17`J9Qvd(} - -literal 0 -HcmV?d00001 - -diff --git a/tests/resources/small.git/hooks/pre-applypatch.sample b/tests/resources/small.git/hooks/pre-applypatch.sample -new file mode 100755 -index 0000000000000000000000000000000000000000..625837e25f91421b8809a097f4a3103dd387ef31 -GIT binary patch -literal 481 -zcmY+ATTa6;5Jms9iouO45IBJXEg&Jm9@v1LPHMM_ZR|;#6tQh$71hSXq*MxP;V& -zj0cY7SCL=x4`a46sF)C>94Gk%=3q$W2s;j6iHtB2$R0%gix4oK@&T~=ALd_o*CKxt -I-`Pv{1Bpzc>;M1& - -literal 0 -HcmV?d00001 - -diff --git a/tests/resources/small.git/hooks/pre-commit.sample b/tests/resources/small.git/hooks/pre-commit.sample -new file mode 100755 -index 0000000000000000000000000000000000000000..10b39b2e26981b8f87ea424e735ef87359066dbb -GIT binary patch -literal 1706 -zcmZuxU2ohr5PY_N#pZ0-F<{-v&v-X^RA+u>k}E$4d&uD7=g_fA8+pNNV=4s0|iD3p<=DTXClTS -zXV23tJ;ECmN@M0j@zUAKEYW@3bv!SeYZ8ZH`YQNTApFVNc;F|9r5p4TqGs=>8E?6y -zi|gY{iM#PG1nL?UE9YCnWTk72kgZPG*Usqw!~Qd3c?~@w2?%eg@~)+VlSs6N5Yf2^ -zz;owF#K#r^&KMq1A`oqVGFpD&-!Pv|Rc -zO3KSqA@h9nSc%bm`0)Amk6*J}@14J*1-219l%%7D!Pl}UK>|lVi0Dfgu2jN3WC!uL -z0ej??b2iSehVgdnWHmZV4kUo*QL#aiIp}U=9x)IXk}JJ7VQ;CI9Rtn5e0VcjbYcVt+`x5D+svCGD;Z5hm*E$jSEQZ%SQ(}oLgslTvrKK@9Qf#b!hajVFnp9@oIix;NcI9Wk -xjnh0ya!AWet{I7YpD;y6HXyzI*lfSvH=o6*7mJZPkuaYpm>vzZ`wyGEBtOQPo|pgt - -literal 0 -HcmV?d00001 - -diff --git a/tests/resources/small.git/hooks/pre-push.sample b/tests/resources/small.git/hooks/pre-push.sample -new file mode 100755 -index 0000000000000000000000000000000000000000..02cbd80c287f959fe33975bb66c56293e3f5b396 -GIT binary patch -literal 1431 -zcmaJ>U60!~5PUX&#a1@z9B{IIZkjLT0t5kq9#8~D(I5{+8&J~9;#ndUk~-ZT`r|uG -z$#K$$J{TsKs*LP1}9!GoZ@4I4myMMG_di|of -z%?llx{O8TS-#^;(OioEmPy%kwWQBA1OMzV{hsQ8XFzS1k!~YQoLa5 -zhtP1fA$q6VmMbbAC_9)4I628k*O5J$NR19uHe4QYDK<==I~SQk)Nu%xQ~KH -z53w=!ke(FGb_PpnZfd*+hnXDTn;2*`u^~;?+5C~cn?bRka7NR%06%e6O91{MAgN6J -zmlO8{Biw4&wr&&(z4p3eln`E}XR9m9bNYZ7Ibrg(4yZIXrfgD7N*AFD7L3YSM#j}% -zo__rOS5fr;@8UM<6cl+cv_$YB$PQ&9dv($eM*))g!_cu!QcSh-mqE9i#QDZT)=o#` -z?8!RtE?w6p?GkGZ-6yt_p~5~4ecu|Sf^)6096%h*q-eNiEA1;Xwg)p~Q&iGSG7-IQ -z9aII&`ps$WOojFA`*bjGkFk|E@sHHuD}W^d`7YJ3YE^zrQnqR -zGoq?;YGKe)93o|_=^f%3U1KYZGPOXRRxK7w`UUbMMa3<86OmVH!EKP$8RCrn9mWX+ -zC?9yF!fRVLmud3hF<}x;;sR}f(*r}6Gap3fR6zLHR~kbMgD{98N`L+r&?3p~*0+FX -zcAL%j=(SO}xTJUTvA`&Lf`2mv4koPG9&|;2+68$XxiXKL@ma;l5d2^5Ba_rPh_DHI-u1#&_upttZXp;no03$20|NFiM -zK#D#xQ>!Z3JkX8T-LDVm!B5j7y_{;JDmmTTef+K1oIiPzeEr+Ai*<2PUgnG4^ZB>p -z_fkAvoR1emuf~ri^K$-px=4#D-vY9w& -z`bCv#2zVn=YnJyeNey(Y -zRh`9vtLw~A+5zsjp|W0Nsa|29Rm!B>OoG5a+vi;ari8O>KkU!KAWg_fa3btK2x*_@ -z0bEc7J;Ubghm}n9bOi(Sv_B66nQ7U)J7f0fO}8Wuf*uorcIgEG -zOHc|-V6+HlRhOP}?Cn?@5iwSl43abmBA^2lyL$+cpabCGVES+v^j^FO_}?FIp%En%Ll?Z*7*}TwrZyg5OSZ9rY-`aU~Mc-jjv{Ll)FLMgtB4ujktfQ`Xhqrka -zT=P!A;9w^;Z?PqpLwOLu=cj3L>TdUKw2;DMu)`oVkj}#bcDx4tYg=j%D`+i{W~fVM -zVmZ>W9VMyin9c-0KzI_;iZ-g|OyzuG`Yq%(%dvl;ifnVr0;jWE&S`z|rQu=!yHBBO -zx`OJ;oOQ(KKM<$(bC38o>pD0%|HA(E0TRw7qj$fJ_pRN+7Nm>dSC(gLg{(`t+5Z=?o+}wXU4tHy+&%F&aRhFebeEhR2R5|$#Ycbp^w@t -zTl%=f1t=w+WpJzF<|CE@?SCNAz)%9?w33lQ8vrHJqPfH9@}qs*QXOG71W=ylx;wOB -zcx!Bj^)Yy6WX$a^vBkBJ5CobqlaDx_B0c<3b+8)f84LCrt;e;qxc+7>VbwVK{skNv!wvBiTa^9Iu -zkwP;VK)jH$WJ{`MRwAA9fal!y0dtV;FWg8PTkWU>CwnqD>1ZX2B@;$DlX%C5MI+}{ -z9xQVnffR*~v2KAUj*hCdgul~`bk#mk`o>zk9)<2Uc8?hUZAEvd!`9em)~$Z)zev>w^8 -zyAgCP_$&Y)7HSQ84`xG}OeTavaEswwF|8Xpi5iZzZa@hCiv(J-%bfFC&)HLlO+Rhw -zG6g?9eL5&A!SuJnQ6}LxG%tU+@vZ`i+!+Rz6iYvsTdhnPo7lW{m-}{hya@viX4)XZ -zngaw+j;gloB#|UwI@8sOmQpc`h+bicQJnQIB5eifIMQNgD2+oai33m!34~xU|0Azj -zhu$8z+T5^;Pxx@d{N)pzOJLSa^e;aDf$W%N5XcOf!mGC9l9j$Ev2h6N+6ZQC+CJzl -zaM7?S!SrFLS2DASjj(h6y1WN3N?|bmqmyzm!&nLoE|`rKBOc_yDF$a#FsUn!IQf(t -zdC&Us(kQz*7mvH^j*^MC@>wTDb}g%~sx*ng#>{@lR=XG-Z5_ -z#<9*Oh0joMzt;nS)ObAp)347`D=}r-;nV!TbIq&xrGRGsF6fZg+!VkfUei@_&l-M& -zPqQ+Dw)RV}+)I8RuqAxa`Pv8e&!_gXS=e2-un>=Ktn}-;%lLZxaVn?Q>yZCb2R3Wk -z77zr%;Rq&h|2ncqyKYmFI0148JVY7Q$V5p=dWj+Qqpu%i|xp2C=WaOb2Wudn^h0EcD%$p9YVU1fnoRV9`(cy(vv6K>FXS!2jY>1GnU--7)4usH&K -zao*&P^@9~YmUe|ZdLW@C>H;!*Vt3>Nw4M*;=?j(TBD#O@XCv0|MEhA;z}kTFRv@`tPHhp=&Yh -zg%Zhg4i7o_k{a5i&f5;tZ==%}^Sn4aD_6%qs_XAuJt&EumdH4Yu`UjT<-+XHTuHss+b -YOmM2;hq8Egm*4=7_P9T{21QBYH*F=mfB*mh - -literal 0 -HcmV?d00001 - -diff --git a/tests/resources/small.git/hooks/prepare-commit-msg.sample b/tests/resources/small.git/hooks/prepare-commit-msg.sample -new file mode 100755 -index 0000000000000000000000000000000000000000..b1970da1b6d3f42f00069fd17c325de72cda812e -GIT binary patch -literal 1702 -zcmb_cTW{Mo6n>t6#i?x6xmZ$SFLf{QfG*3r0L?Pg?px55l8$UTGO3bO;spKi{V3XX -z))weX0X>M9bNMcZ-6yG%>(n}JI2|25dr}WZBP@ih?JX^+@ -zu#5O48P>yRX(mfDIhYP)doc1&TADZa@ZGpusJ$6G+e$ZMcmC -zoOosDQPS}l{H?YPsq(4;0SGkATa9eeqAaDcjq8n2wALbFwU@2i@FAaRV!=uw-nwx1gKn2SvY -z>Ff>;2sg!+Hxfkwv1lsiii=p6WenF=5)6LZcQaZ=aS_}+-4Y&?!@HWh|<^gJ21!|T@+%On#w6azxPHV}XsRbe*w -zR_TZ2XEsQa1lPK~biYqg@0-RW@5J1@=<87cFzEUABdCoFH2CZo?}l(Z*!OFqUxo>K -z_d`l#4d9|H6;VPT{X?^{VJ>oL|D7K{BJwwqB>`YcPoGk+9hbvHnoQ{EM|kPgD_`wk -zKm4#2xu;-y`RAm!=L_BnLvJ8$AZm8@?)v<%vwvsw8AF2x6!mTT;c72A_~U9nIq0ST -zv)N0!I!^1p=g8-RQfx5)E_Mb_4I2vtQpI30XZ&t-9h5!Hn - -literal 0 -HcmV?d00001 - -diff --git a/tests/resources/small.git/hooks/push-to-checkout.sample b/tests/resources/small.git/hooks/push-to-checkout.sample -new file mode 100755 -index 0000000000000000000000000000000000000000..a80611e18896f212c390d845e49a3f6d5693b41d -GIT binary patch -literal 2840 -zcmai0U31$u5PXh)#YOS7cE^-rw@uolNhe9&aUS|HtvhX>G$45tVUYj>fRdF?|9kfU -zNR~aG=E)WbEbeyq7JTw}ZuHIE2kUtL<AoeCNptd-NM1aZLhESzC;I`+Ns -zfmNNjdAp^W8#Q*}l>CT7RB9F5(BbI8ly2l~+E};JW|>&d1)=epZ-8vm8ppkbEVn#R -zt30a5A-c(YQR8eM5%;|UAnO>rt!&@x@G@yp+92%w-}%(5P_+P&Wf_zb$f-Qrl5(7z -z2ah(bkE;!DK(&aAMuQ%1TS>ai?wSXCOCSj=_}8x4IbCx^$}9q)whwv)SBt| -zg#MX4;;Oau`m=MI9(^&zPbueY@~>3*ixX%mvR5m_1&nAg@ZKvY1E$O}&EtLiG;mhV -z1xhMIm~fGjmf_#{62f`y;09?I7M1W2tWQvz<}i9lR>OpQyUJi45_&*pQus&EkwY<> -zI|ZAx=*3i9a-)g)hXkvO7>UJ5MNgL(Z+-wpXVcgbSgpmFmbf1~DPA(OVGI&FNLeIE -zNH!_aiH$vsif$_j7=T2{cS(!DOI`~bn@)vSd-0d7xL=DF;UNP|tW}4ih>DvHtu9tY_pbJ6x(6E*hxgC -zzNDao%qlr-IE%YGbS4hF!n!on7#W3$bX-_hbZAaws^nHu#)Dx=WzdbJ>AKzAy@T$x -zSWE^x9+|TEHVEPyaPYa0DOChp?AeHSBBDbZNokQpAY{lE!7geZI=jV)G^2@l)&91Zb1+`T+oq9wWF -zRV~kGTGce0O~p^6mj{kT5kL(pv>r;Lvd7VDX*P>A^Th`$3cWO0L81p4Ysdo3ZP1(SrR-peEdTo;-@bkB((G -zPHYQXUL!@Q$e(OQ;R9r%@Afz+50I7>*^^c&&|E*r-jN)LH=pM4AqMwWxSv|nqjddE -Z4{_hwv8!W(T -zYw`X3V>TCdnSD1ru8&`j=2DIPbCT@SnIgUw>$+lEYP}+x8(BMYnr=iT3*ndq)xzaV -z>I+qjv}vC#8_9M+b1p#uNS0M0)q

8!3p_LRQ0MA3M`!2foxzRUjbFY@}O~(ki=S -zqscnq8cU*dY)D$$cqE}n)V0yIk>CNKHCrndOtSP*HbOb;nbwAHSb;R+gs^?^Dve%) -zoW}t(*D}$>O3ab0TS^-;J|u&sb-PkZzo#kn*#xYt(;FGuwzSb^g&RDiGcOz9TB;Hu`nJh)$W=C=XCSm2AY=$w3G3P-V#Oo+N*;#2 -z4ijJ-pBZ=;T(RTgp_HYrD!uW-dTMfkuqY5jwOy)~gM;#=P^i{!l7`pXTS^s(&^{RU -zydaw}OpS#^D1cXM8?FW+fh`t7D(g;yr6|}fdaNtZBx3hlK~IpkTu3!Qq%R+zAo#t}Bs8^3$vHD+-TGT@`F>H1Cc#WAVW;&$S6%fE2d6@kLS0g&ihIM{}0z -z8#XhD>b>3{(BH|Px7}&lJ4%y1v(CihZJx@8MPoGdl*BJGD;usf*iS7%;{Joe; -zNFuBa>*~o&qETDPo~u&~$FxE1xb^x&(CbE`Y3GfsibL2rl+L;>P6j&Y3U>K$mkp*6 -zd`Q{<^+^&;GskGjwD-%!boR&i-TCA9UOR|@=GYb5x#+dhd7fkaVIR^pol`Mv+rUbmZ43dVL6^S7g3{NsPiG$iy$5EDB% -z6KIgnb$H(n&t3e4E6d4V7w^B?JS}JkG)PM6+X3Co`SQs($O*AA+MG~{S7RJ=cy-l& -z>~%3y`tjfx2>uOutB_^s -ziwG=e=ch|FQ0IkN91US7rhdQkXhwwt$gU0WEVDjo=IPb+?6PC=s8}J*ua(Ms))`UL -fi$|vMHn?H_tSE3ettp-hLlsZCxaLX8(nU;bVRB;Ce6@s#eu2|WvLz>- -zvy(&>Gyfp@+BtKnpqWkKi^+v{4jn_pNw_zeuxETifiGO|)w}OANj2n2D^K=o3j6P6uOL70#cbA{uzWXDlk1wr9GV1X(2W{RuTvjXV -zCmd8u -zH%V`94=q3)Dk)PHNrnFC(T1)Om6f{Usj;u1R->&XoCYVK2V3ZlgZuF?N}1+33OER*x -z*9Z=L=zI8CN>A_^jYjt0F$psO$sL=38q5q|SG)qCN6{^>RFh5E&l5GZ$pEahnF&d+ -z5c>64t}uJPkf~_!VUj#&N%nC-gUMj%=@B=!V>&}xtj2%@-mOm#rQUSJ3(ccmc+fza -znZ#uxF>N?QN5UrIEd!5RgHEfW#;(nKYF+D<*rdshJ$X-z2OZ2X;)nn@KSVdVhaA?}@3;6gZxb4v -zozoWSr{{+!h}zGpumG3H`=AvWpm^9kW;J$Jp^Xl*?8ckr`fqN%c|Z;VC0|cM4vSrk -zH_O8Yvh85nvJp^;``wo8=z0f`FWg?`>gO#y1hjX1{}rTlg9rwIKia8eyGexA3GnuR -z`Rg~XZoW;0pA)vI8=p5!+6sIn#C^FCvR>ffv39h6SCNi9v);%WD;WZ`of_MgwyRWy -z-yY%n*Y>X89W-v4`Ff%bx$Vkn}$!Ay}rnY6F$m-Kg*KD_+;Lx#g4|^&N -I02NaX#p`nv=Kufz - -literal 0 -HcmV?d00001 - -diff --git a/tests/resources/small.git/objects/af/5626b4a114abcb82d63db7c8082c3c4756e51b b/tests/resources/small.git/objects/af/5626b4a114abcb82d63db7c8082c3c4756e51b -new file mode 100644 -index 0000000000000000000000000000000000000000..822bc151862ec3763cf2d3fa2372b93bbd3a4b65 -GIT binary patch -literal 30 -mcmb>0i}&W3IZ_@1U=^!a~EV1casc=c+{&un1qQN*i9hD|0|m(2n|iwp*q%W -z%N;b$hu%cM`$TMo*~EnC1BFP&Pfj~;jZVKXQ96s_PhV<-XAROi+@-v8dBLUa`!;GB -k^iXlEv8$>R)1G>9th&t3j;s7J{?^9n|7U^`%mXoWC24Q^m!3%@{ - -literal 0 -HcmV?d00001 - From ba7bbcd1daf7deb41afeaf1a9d2d1dce3df0044d Mon Sep 17 00:00:00 2001 From: John Ericson Date: Sat, 11 Oct 2025 18:30:27 -0400 Subject: [PATCH 291/332] Cleanup `Derivation*Goal` names --- src/libstore/build/derivation-building-goal.cc | 2 +- src/libstore/build/derivation-goal.cc | 4 +--- src/libstore/build/derivation-trampoline-goal.cc | 2 +- 3 files changed, 3 insertions(+), 5 deletions(-) diff --git a/src/libstore/build/derivation-building-goal.cc b/src/libstore/build/derivation-building-goal.cc index e8ee945d9..50315dede 100644 --- a/src/libstore/build/derivation-building-goal.cc +++ b/src/libstore/build/derivation-building-goal.cc @@ -42,7 +42,7 @@ DerivationBuildingGoal::DerivationBuildingGoal( throw; } - name = fmt("building of '%s' from in-memory derivation", worker.store.printStorePath(drvPath)); + name = fmt("building derivation '%s'", worker.store.printStorePath(drvPath)); trace("created"); /* Prevent the .chroot directory from being diff --git a/src/libstore/build/derivation-goal.cc b/src/libstore/build/derivation-goal.cc index 2e57c1708..e50ce8f79 100644 --- a/src/libstore/build/derivation-goal.cc +++ b/src/libstore/build/derivation-goal.cc @@ -43,9 +43,7 @@ DerivationGoal::DerivationGoal( { this->drv = std::make_unique(drv); - name = - fmt("building of '%s' from in-memory derivation", - DerivedPath::Built{makeConstantStorePathRef(drvPath), drv.outputNames()}.to_string(worker.store)); + name = fmt("getting output '%s' from derivation '%s'", wantedOutput, worker.store.printStorePath(drvPath)); trace("created"); mcExpectedBuilds = std::make_unique>(worker.expectedBuilds); diff --git a/src/libstore/build/derivation-trampoline-goal.cc b/src/libstore/build/derivation-trampoline-goal.cc index 205f5c427..01c1de75b 100644 --- a/src/libstore/build/derivation-trampoline-goal.cc +++ b/src/libstore/build/derivation-trampoline-goal.cc @@ -31,7 +31,7 @@ DerivationTrampolineGoal::DerivationTrampolineGoal( void DerivationTrampolineGoal::commonInit() { name = - fmt("outer obtaining drv from '%s' and then building outputs %s", + fmt("obtaining derivation from '%s' and then building outputs %s", drvReq->to_string(worker.store), std::visit( overloaded{ From a629ce3dec9dda5019f7acbe6575dac906564095 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Sat, 11 Oct 2025 18:36:29 -0400 Subject: [PATCH 292/332] Use member initializer list for `Derivation*Goal::drv` --- src/libstore/build/derivation-building-goal.cc | 7 +++---- src/libstore/build/derivation-goal.cc | 2 +- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/src/libstore/build/derivation-building-goal.cc b/src/libstore/build/derivation-building-goal.cc index 50315dede..e0412c3dd 100644 --- a/src/libstore/build/derivation-building-goal.cc +++ b/src/libstore/build/derivation-building-goal.cc @@ -27,16 +27,15 @@ namespace nix { DerivationBuildingGoal::DerivationBuildingGoal( - const StorePath & drvPath, const Derivation & drv_, Worker & worker, BuildMode buildMode) + const StorePath & drvPath, const Derivation & drv, Worker & worker, BuildMode buildMode) : Goal(worker, gaveUpOnSubstitution()) , drvPath(drvPath) + , drv{std::make_unique(drv)} , buildMode(buildMode) { - drv = std::make_unique(drv_); - try { drvOptions = - std::make_unique(DerivationOptions::fromStructuredAttrs(drv->env, drv->structuredAttrs)); + std::make_unique(DerivationOptions::fromStructuredAttrs(drv.env, drv.structuredAttrs)); } catch (Error & e) { e.addTrace({}, "while parsing derivation '%s'", worker.store.printStorePath(drvPath)); throw; diff --git a/src/libstore/build/derivation-goal.cc b/src/libstore/build/derivation-goal.cc index e50ce8f79..1939ddbfe 100644 --- a/src/libstore/build/derivation-goal.cc +++ b/src/libstore/build/derivation-goal.cc @@ -33,6 +33,7 @@ DerivationGoal::DerivationGoal( : Goal(worker, haveDerivation()) , drvPath(drvPath) , wantedOutput(wantedOutput) + , drv{std::make_unique(drv)} , outputHash{[&] { auto outputHashes = staticOutputHashes(worker.evalStore, drv); if (auto * mOutputHash = get(outputHashes, wantedOutput)) @@ -41,7 +42,6 @@ DerivationGoal::DerivationGoal( }()} , buildMode(buildMode) { - this->drv = std::make_unique(drv); name = fmt("getting output '%s' from derivation '%s'", wantedOutput, worker.store.printStorePath(drvPath)); trace("created"); From 07df87652c6883ca6198a1cc6a1202685bb92099 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Sat, 11 Oct 2025 18:38:23 -0400 Subject: [PATCH 293/332] Make keys of `Derivation*Goal` more legible The property that substitution goals come first is still preserved --- src/libstore/build/derivation-building-goal.cc | 6 +----- src/libstore/build/derivation-goal.cc | 6 +----- src/libstore/build/derivation-trampoline-goal.cc | 6 +----- src/libstore/build/drv-output-substitution-goal.cc | 2 -- src/libstore/include/nix/store/build/goal.hh | 12 ++++++++++++ .../include/nix/store/build/substitution-goal.hh | 4 ---- 6 files changed, 15 insertions(+), 21 deletions(-) diff --git a/src/libstore/build/derivation-building-goal.cc b/src/libstore/build/derivation-building-goal.cc index e0412c3dd..65500ac2d 100644 --- a/src/libstore/build/derivation-building-goal.cc +++ b/src/libstore/build/derivation-building-goal.cc @@ -66,11 +66,7 @@ DerivationBuildingGoal::~DerivationBuildingGoal() std::string DerivationBuildingGoal::key() { - /* Ensure that derivations get built in order of their name, - i.e. a derivation named "aardvark" always comes before - "baboon". And substitution goals always happen before - derivation goals (due to "bd$"). */ - return "bd$" + std::string(drvPath.name()) + "$" + worker.store.printStorePath(drvPath); + return "dd$" + std::string(drvPath.name()) + "$" + worker.store.printStorePath(drvPath); } void DerivationBuildingGoal::killChild() diff --git a/src/libstore/build/derivation-goal.cc b/src/libstore/build/derivation-goal.cc index 1939ddbfe..0509d524f 100644 --- a/src/libstore/build/derivation-goal.cc +++ b/src/libstore/build/derivation-goal.cc @@ -52,11 +52,7 @@ DerivationGoal::DerivationGoal( std::string DerivationGoal::key() { - /* Ensure that derivations get built in order of their name, - i.e. a derivation named "aardvark" always comes before - "baboon". And substitution goals always happen before - derivation goals (due to "b$"). */ - return "b$" + std::string(drvPath.name()) + "$" + SingleDerivedPath::Built{ + return "db$" + std::string(drvPath.name()) + "$" + SingleDerivedPath::Built{ .drvPath = makeConstantStorePathRef(drvPath), .output = wantedOutput, }.to_string(worker.store); diff --git a/src/libstore/build/derivation-trampoline-goal.cc b/src/libstore/build/derivation-trampoline-goal.cc index 01c1de75b..83384c589 100644 --- a/src/libstore/build/derivation-trampoline-goal.cc +++ b/src/libstore/build/derivation-trampoline-goal.cc @@ -58,11 +58,7 @@ static StorePath pathPartOfReq(const SingleDerivedPath & req) std::string DerivationTrampolineGoal::key() { - /* Ensure that derivations get built in order of their name, - i.e. a derivation named "aardvark" always comes before "baboon". And - substitution goals, derivation goals, and derivation building goals always happen before - derivation goals (due to "bt$"). */ - return "bt$" + std::string(pathPartOfReq(*drvReq).name()) + "$" + DerivedPath::Built{ + return "da$" + std::string(pathPartOfReq(*drvReq).name()) + "$" + DerivedPath::Built{ .drvPath = drvReq, .outputs = wantedOutputs, }.to_string(worker.store); diff --git a/src/libstore/build/drv-output-substitution-goal.cc b/src/libstore/build/drv-output-substitution-goal.cc index b6ace4784..209d6d542 100644 --- a/src/libstore/build/drv-output-substitution-goal.cc +++ b/src/libstore/build/drv-output-substitution-goal.cc @@ -153,8 +153,6 @@ Goal::Co DrvOutputSubstitutionGoal::realisationFetched( std::string DrvOutputSubstitutionGoal::key() { - /* "a$" ensures substitution goals happen before derivation - goals. */ return "a$" + std::string(id.to_string()); } diff --git a/src/libstore/include/nix/store/build/goal.hh b/src/libstore/include/nix/store/build/goal.hh index 52700d12e..4d57afc0f 100644 --- a/src/libstore/include/nix/store/build/goal.hh +++ b/src/libstore/include/nix/store/build/goal.hh @@ -456,6 +456,18 @@ public: */ virtual void timedOut(Error && ex) = 0; + /** + * Used for comparisons. The order matters a bit for scheduling. We + * want: + * + * 1. Substitution + * 2. Derivation administrativia + * 3. Actual building + * + * Also, ensure that derivations get processed in order of their + * name, i.e. a derivation named "aardvark" always comes before + * "baboon". + */ virtual std::string key() = 0; /** diff --git a/src/libstore/include/nix/store/build/substitution-goal.hh b/src/libstore/include/nix/store/build/substitution-goal.hh index 5f6cb6a18..5f33b9aa5 100644 --- a/src/libstore/include/nix/store/build/substitution-goal.hh +++ b/src/libstore/include/nix/store/build/substitution-goal.hh @@ -58,10 +58,6 @@ public: unreachable(); }; - /** - * We prepend "a$" to the key name to ensure substitution goals - * happen before derivation goals. - */ std::string key() override { return "a$" + std::string(storePath.name()) + "$" + worker.store.printStorePath(storePath); From 0da430be35ea37abc06428359a35f931fbe51ca8 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Thu, 18 Sep 2025 15:54:43 -0400 Subject: [PATCH 294/332] Split out `DerivationResolutionGoal` This prepares the way for fixing a few issues. Take 2: was landed before in 8f4a739d0fa05e44589d578f1860b45b8a48f1cc. --- .../build/derivation-building-goal.cc | 147 ++------------ src/libstore/build/derivation-goal.cc | 12 -- .../build/derivation-resolution-goal.cc | 191 ++++++++++++++++++ .../build/derivation-trampoline-goal.cc | 2 - src/libstore/build/worker.cc | 9 + .../store/build/derivation-resolution-goal.hh | 82 ++++++++ .../store/build/derivation-trampoline-goal.hh | 2 +- .../include/nix/store/build/worker.hh | 10 +- src/libstore/include/nix/store/meson.build | 1 + src/libstore/meson.build | 1 + tests/functional/build.sh | 9 +- 11 files changed, 315 insertions(+), 151 deletions(-) create mode 100644 src/libstore/build/derivation-resolution-goal.cc create mode 100644 src/libstore/include/nix/store/build/derivation-resolution-goal.hh diff --git a/src/libstore/build/derivation-building-goal.cc b/src/libstore/build/derivation-building-goal.cc index 65500ac2d..6bda17d37 100644 --- a/src/libstore/build/derivation-building-goal.cc +++ b/src/libstore/build/derivation-building-goal.cc @@ -1,4 +1,5 @@ #include "nix/store/build/derivation-building-goal.hh" +#include "nix/store/build/derivation-resolution-goal.hh" #include "nix/store/build/derivation-env-desugar.hh" #include "nix/store/build/derivation-trampoline-goal.hh" #ifndef _WIN32 // TODO enable build hook on Windows @@ -88,18 +89,6 @@ void DerivationBuildingGoal::timedOut(Error && ex) [[maybe_unused]] Done _ = doneFailure({BuildResult::Failure::TimedOut, std::move(ex)}); } -/** - * Used for `inputGoals` local variable below - */ -struct value_comparison -{ - template - bool operator()(const ref & lhs, const ref & rhs) const - { - return *lhs < *rhs; - } -}; - std::string showKnownOutputs(const StoreDirConfig & store, const Derivation & drv) { std::string msg; @@ -124,46 +113,6 @@ Goal::Co DerivationBuildingGoal::gaveUpOnSubstitution() { Goals waitees; - std::map, GoalPtr, value_comparison> inputGoals; - - { - std::function, const DerivedPathMap::ChildNode &)> - addWaiteeDerivedPath; - - addWaiteeDerivedPath = [&](ref inputDrv, - const DerivedPathMap::ChildNode & inputNode) { - if (!inputNode.value.empty()) { - auto g = worker.makeGoal( - DerivedPath::Built{ - .drvPath = inputDrv, - .outputs = inputNode.value, - }, - buildMode == bmRepair ? bmRepair : bmNormal); - inputGoals.insert_or_assign(inputDrv, g); - waitees.insert(std::move(g)); - } - for (const auto & [outputName, childNode] : inputNode.childMap) - addWaiteeDerivedPath( - make_ref(SingleDerivedPath::Built{inputDrv, outputName}), childNode); - }; - - for (const auto & [inputDrvPath, inputNode] : drv->inputDrvs.map) { - /* Ensure that pure, non-fixed-output derivations don't - depend on impure derivations. */ - if (experimentalFeatureSettings.isEnabled(Xp::ImpureDerivations) && !drv->type().isImpure() - && !drv->type().isFixed()) { - auto inputDrv = worker.evalStore.readDerivation(inputDrvPath); - if (inputDrv.type().isImpure()) - throw Error( - "pure derivation '%s' depends on impure derivation '%s'", - worker.store.printStorePath(drvPath), - worker.store.printStorePath(inputDrvPath)); - } - - addWaiteeDerivedPath(makeConstantStorePathRef(inputDrvPath), inputNode); - } - } - /* Copy the input sources from the eval store to the build store. @@ -208,88 +157,22 @@ Goal::Co DerivationBuildingGoal::gaveUpOnSubstitution() /* Determine the full set of input paths. */ - /* First, the input derivations. */ { - auto & fullDrv = *drv; + auto resolutionGoal = worker.makeDerivationResolutionGoal(drvPath, *drv, buildMode); + { + Goals waitees{resolutionGoal}; + co_await await(std::move(waitees)); + } + if (nrFailed != 0) { + co_return doneFailure({BuildResult::Failure::DependencyFailed, "resolution failed"}); + } - auto drvType = fullDrv.type(); - bool resolveDrv = - std::visit( - overloaded{ - [&](const DerivationType::InputAddressed & ia) { - /* must resolve if deferred. */ - return ia.deferred; - }, - [&](const DerivationType::ContentAddressed & ca) { - return !fullDrv.inputDrvs.map.empty() - && (ca.fixed - /* Can optionally resolve if fixed, which is good - for avoiding unnecessary rebuilds. */ - ? experimentalFeatureSettings.isEnabled(Xp::CaDerivations) - /* Must resolve if floating and there are any inputs - drvs. */ - : true); - }, - [&](const DerivationType::Impure &) { return true; }}, - drvType.raw) - /* no inputs are outputs of dynamic derivations */ - || std::ranges::any_of(fullDrv.inputDrvs.map.begin(), fullDrv.inputDrvs.map.end(), [](auto & pair) { - return !pair.second.childMap.empty(); - }); + if (resolutionGoal->resolvedDrv) { + auto & [pathResolved, drvResolved] = *resolutionGoal->resolvedDrv; - if (resolveDrv && !fullDrv.inputDrvs.map.empty()) { - experimentalFeatureSettings.require(Xp::CaDerivations); - - /* We are be able to resolve this derivation based on the - now-known results of dependencies. If so, we become a - stub goal aliasing that resolved derivation goal. */ - std::optional attempt = fullDrv.tryResolve( - worker.store, - [&](ref drvPath, const std::string & outputName) -> std::optional { - auto mEntry = get(inputGoals, drvPath); - if (!mEntry) - return std::nullopt; - - auto & buildResult = (*mEntry)->buildResult; - return std::visit( - overloaded{ - [](const BuildResult::Failure &) -> std::optional { return std::nullopt; }, - [&](const BuildResult::Success & success) -> std::optional { - auto i = get(success.builtOutputs, outputName); - if (!i) - return std::nullopt; - - return i->outPath; - }, - }, - buildResult.inner); - }); - if (!attempt) { - /* TODO (impure derivations-induced tech debt) (see below): - The above attempt should have found it, but because we manage - inputDrvOutputs statefully, sometimes it gets out of sync with - the real source of truth (store). So we query the store - directly if there's a problem. */ - attempt = fullDrv.tryResolve(worker.store, &worker.evalStore); - } - assert(attempt); - Derivation drvResolved{std::move(*attempt)}; - - auto pathResolved = writeDerivation(worker.store, drvResolved); - - auto msg = - fmt("resolved derivation: '%s' -> '%s'", - worker.store.printStorePath(drvPath), - worker.store.printStorePath(pathResolved)); - act = std::make_unique( - *logger, - lvlInfo, - actBuildWaiting, - msg, - Logger::Fields{ - worker.store.printStorePath(drvPath), - worker.store.printStorePath(pathResolved), - }); + /* Store the resolved derivation, as part of the record of + what we're actually building */ + writeDerivation(worker.store, drvResolved); /* TODO https://github.com/NixOS/nix/issues/13247 we should let the calling goal do this, so it has a change to pass @@ -378,7 +261,7 @@ Goal::Co DerivationBuildingGoal::gaveUpOnSubstitution() /* If we get this far, we know no dynamic drvs inputs */ - for (auto & [depDrvPath, depNode] : fullDrv.inputDrvs.map) { + for (auto & [depDrvPath, depNode] : drv->inputDrvs.map) { for (auto & outputName : depNode.value) { /* Don't need to worry about `inputGoals`, because impure derivations are always resolved above. Can diff --git a/src/libstore/build/derivation-goal.cc b/src/libstore/build/derivation-goal.cc index 0509d524f..dc12ab55a 100644 --- a/src/libstore/build/derivation-goal.cc +++ b/src/libstore/build/derivation-goal.cc @@ -183,18 +183,6 @@ Goal::Co DerivationGoal::haveDerivation() co_return amDone(g->exitCode, g->ex); } -/** - * Used for `inputGoals` local variable below - */ -struct value_comparison -{ - template - bool operator()(const ref & lhs, const ref & rhs) const - { - return *lhs < *rhs; - } -}; - Goal::Co DerivationGoal::repairClosure() { assert(!drv->type().isImpure()); diff --git a/src/libstore/build/derivation-resolution-goal.cc b/src/libstore/build/derivation-resolution-goal.cc new file mode 100644 index 000000000..6cb9702f4 --- /dev/null +++ b/src/libstore/build/derivation-resolution-goal.cc @@ -0,0 +1,191 @@ +#include "nix/store/build/derivation-resolution-goal.hh" +#include "nix/store/build/worker.hh" +#include "nix/util/util.hh" + +#include + +namespace nix { + +DerivationResolutionGoal::DerivationResolutionGoal( + const StorePath & drvPath, const Derivation & drv, Worker & worker, BuildMode buildMode) + : Goal(worker, resolveDerivation()) + , drvPath(drvPath) + , drv{std::make_unique(drv)} + , buildMode{buildMode} +{ + name = fmt("resolving derivation '%s'", worker.store.printStorePath(drvPath)); + trace("created"); +} + +std::string DerivationResolutionGoal::key() +{ + return "dc$" + std::string(drvPath.name()) + "$" + worker.store.printStorePath(drvPath); +} + +/** + * Used for `inputGoals` local variable below + */ +struct value_comparison +{ + template + bool operator()(const ref & lhs, const ref & rhs) const + { + return *lhs < *rhs; + } +}; + +Goal::Co DerivationResolutionGoal::resolveDerivation() +{ + Goals waitees; + + std::map, GoalPtr, value_comparison> inputGoals; + + { + std::function, const DerivedPathMap::ChildNode &)> + addWaiteeDerivedPath; + + addWaiteeDerivedPath = [&](ref inputDrv, + const DerivedPathMap::ChildNode & inputNode) { + if (!inputNode.value.empty()) { + auto g = worker.makeGoal( + DerivedPath::Built{ + .drvPath = inputDrv, + .outputs = inputNode.value, + }, + buildMode == bmRepair ? bmRepair : bmNormal); + inputGoals.insert_or_assign(inputDrv, g); + waitees.insert(std::move(g)); + } + for (const auto & [outputName, childNode] : inputNode.childMap) + addWaiteeDerivedPath( + make_ref(SingleDerivedPath::Built{inputDrv, outputName}), childNode); + }; + + for (const auto & [inputDrvPath, inputNode] : drv->inputDrvs.map) { + /* Ensure that pure, non-fixed-output derivations don't + depend on impure derivations. */ + if (experimentalFeatureSettings.isEnabled(Xp::ImpureDerivations) && !drv->type().isImpure() + && !drv->type().isFixed()) { + auto inputDrv = worker.evalStore.readDerivation(inputDrvPath); + if (inputDrv.type().isImpure()) + throw Error( + "pure derivation '%s' depends on impure derivation '%s'", + worker.store.printStorePath(drvPath), + worker.store.printStorePath(inputDrvPath)); + } + + addWaiteeDerivedPath(makeConstantStorePathRef(inputDrvPath), inputNode); + } + } + + co_await await(std::move(waitees)); + + trace("all inputs realised"); + + if (nrFailed != 0) { + auto msg = + fmt("Cannot build '%s'.\n" + "Reason: " ANSI_RED "%d %s failed" ANSI_NORMAL ".", + Magenta(worker.store.printStorePath(drvPath)), + nrFailed, + nrFailed == 1 ? "dependency" : "dependencies"); + msg += showKnownOutputs(worker.store, *drv); + co_return amDone(ecFailed, {BuildError(BuildResult::Failure::DependencyFailed, msg)}); + } + + /* Gather information necessary for computing the closure and/or + running the build hook. */ + + /* Determine the full set of input paths. */ + + /* First, the input derivations. */ + { + auto & fullDrv = *drv; + + auto drvType = fullDrv.type(); + bool resolveDrv = + std::visit( + overloaded{ + [&](const DerivationType::InputAddressed & ia) { + /* must resolve if deferred. */ + return ia.deferred; + }, + [&](const DerivationType::ContentAddressed & ca) { + return !fullDrv.inputDrvs.map.empty() + && (ca.fixed + /* Can optionally resolve if fixed, which is good + for avoiding unnecessary rebuilds. */ + ? experimentalFeatureSettings.isEnabled(Xp::CaDerivations) + /* Must resolve if floating and there are any inputs + drvs. */ + : true); + }, + [&](const DerivationType::Impure &) { return true; }}, + drvType.raw) + /* no inputs are outputs of dynamic derivations */ + || std::ranges::any_of(fullDrv.inputDrvs.map.begin(), fullDrv.inputDrvs.map.end(), [](auto & pair) { + return !pair.second.childMap.empty(); + }); + + if (resolveDrv && !fullDrv.inputDrvs.map.empty()) { + experimentalFeatureSettings.require(Xp::CaDerivations); + + /* We are be able to resolve this derivation based on the + now-known results of dependencies. If so, we become a + stub goal aliasing that resolved derivation goal. */ + std::optional attempt = fullDrv.tryResolve( + worker.store, + [&](ref drvPath, const std::string & outputName) -> std::optional { + auto mEntry = get(inputGoals, drvPath); + if (!mEntry) + return std::nullopt; + + auto & buildResult = (*mEntry)->buildResult; + return std::visit( + overloaded{ + [](const BuildResult::Failure &) -> std::optional { return std::nullopt; }, + [&](const BuildResult::Success & success) -> std::optional { + auto i = get(success.builtOutputs, outputName); + if (!i) + return std::nullopt; + + return i->outPath; + }, + }, + buildResult.inner); + }); + if (!attempt) { + /* TODO (impure derivations-induced tech debt) (see below): + The above attempt should have found it, but because we manage + inputDrvOutputs statefully, sometimes it gets out of sync with + the real source of truth (store). So we query the store + directly if there's a problem. */ + attempt = fullDrv.tryResolve(worker.store, &worker.evalStore); + } + assert(attempt); + + auto pathResolved = writeDerivation(worker.store, *attempt, NoRepair, /*readOnly =*/true); + + auto msg = + fmt("resolved derivation: '%s' -> '%s'", + worker.store.printStorePath(drvPath), + worker.store.printStorePath(pathResolved)); + act = std::make_unique( + *logger, + lvlInfo, + actBuildWaiting, + msg, + Logger::Fields{ + worker.store.printStorePath(drvPath), + worker.store.printStorePath(pathResolved), + }); + + resolvedDrv = + std::make_unique>(std::move(pathResolved), *std::move(attempt)); + } + } + + co_return amDone(ecSuccess, std::nullopt); +} + +} // namespace nix diff --git a/src/libstore/build/derivation-trampoline-goal.cc b/src/libstore/build/derivation-trampoline-goal.cc index 83384c589..310d23d70 100644 --- a/src/libstore/build/derivation-trampoline-goal.cc +++ b/src/libstore/build/derivation-trampoline-goal.cc @@ -64,8 +64,6 @@ std::string DerivationTrampolineGoal::key() }.to_string(worker.store); } -void DerivationTrampolineGoal::timedOut(Error && ex) {} - Goal::Co DerivationTrampolineGoal::init() { trace("need to load derivation from file"); diff --git a/src/libstore/build/worker.cc b/src/libstore/build/worker.cc index 3e6e0bef0..f597abb63 100644 --- a/src/libstore/build/worker.cc +++ b/src/libstore/build/worker.cc @@ -4,6 +4,7 @@ #include "nix/store/build/substitution-goal.hh" #include "nix/store/build/drv-output-substitution-goal.hh" #include "nix/store/build/derivation-goal.hh" +#include "nix/store/build/derivation-resolution-goal.hh" #include "nix/store/build/derivation-building-goal.hh" #include "nix/store/build/derivation-trampoline-goal.hh" #ifndef _WIN32 // TODO Enable building on Windows @@ -80,6 +81,12 @@ std::shared_ptr Worker::makeDerivationGoal( return initGoalIfNeeded(derivationGoals[drvPath][wantedOutput], drvPath, drv, wantedOutput, *this, buildMode); } +std::shared_ptr +Worker::makeDerivationResolutionGoal(const StorePath & drvPath, const Derivation & drv, BuildMode buildMode) +{ + return initGoalIfNeeded(derivationResolutionGoals[drvPath], drvPath, drv, *this, buildMode); +} + std::shared_ptr Worker::makeDerivationBuildingGoal(const StorePath & drvPath, const Derivation & drv, BuildMode buildMode) { @@ -158,6 +165,8 @@ void Worker::removeGoal(GoalPtr goal) nix::removeGoal(drvGoal, derivationTrampolineGoals.map); else if (auto drvGoal = std::dynamic_pointer_cast(goal)) nix::removeGoal(drvGoal, derivationGoals); + else if (auto drvResolutionGoal = std::dynamic_pointer_cast(goal)) + nix::removeGoal(drvResolutionGoal, derivationResolutionGoals); else if (auto drvBuildingGoal = std::dynamic_pointer_cast(goal)) nix::removeGoal(drvBuildingGoal, derivationBuildingGoals); else if (auto subGoal = std::dynamic_pointer_cast(goal)) diff --git a/src/libstore/include/nix/store/build/derivation-resolution-goal.hh b/src/libstore/include/nix/store/build/derivation-resolution-goal.hh new file mode 100644 index 000000000..a284843f0 --- /dev/null +++ b/src/libstore/include/nix/store/build/derivation-resolution-goal.hh @@ -0,0 +1,82 @@ +#pragma once +///@file + +#include "nix/store/derivations.hh" +#include "nix/store/derivation-options.hh" +#include "nix/store/build/derivation-building-misc.hh" +#include "nix/store/store-api.hh" +#include "nix/store/build/goal.hh" + +namespace nix { + +struct BuilderFailureError; + +/** + * A goal for resolving a derivation. Resolving a derivation (@see + * `Derivation::tryResolve`) simplifies its inputs, replacing + * `inputDrvs` with `inputSrcs`. + * + * Conceptually, we resolve all derivations. For input-addressed + * derivations (that don't transtively depend on content-addressed + * derivations), however, we don't actually use the resolved derivation, + * because the output paths would appear invalid (if we tried to verify + * them), since they are computed from the original, unresolved inputs. + * + * That said, if we ever made the new flavor of input-addressing as described + * in issue #9259, then the input-addressing would be based on the resolved + * inputs, and we like the CA case *would* use the output of this goal. + * + * (The point of this discussion is not to randomly stuff information on + * a yet-unimplemented feature (issue #9259) in the codebase, but + * rather, to illustrate that there is no inherent tension between + * explicit derivation resolution and input-addressing in general. That + * tension only exists with the type of input-addressing we've + * historically used.) + */ +struct DerivationResolutionGoal : public Goal +{ + DerivationResolutionGoal( + const StorePath & drvPath, const Derivation & drv, Worker & worker, BuildMode buildMode = bmNormal); + + /** + * If the derivation needed to be resolved, this is resulting + * resolved derivations and its path. + */ + std::unique_ptr> resolvedDrv; + + void timedOut(Error && ex) override {} + +private: + + /** + * The path of the derivation. + */ + StorePath drvPath; + + /** + * The derivation stored at drvPath. + */ + std::unique_ptr drv; + + /** + * The remainder is state held during the build. + */ + + BuildMode buildMode; + + std::unique_ptr act; + + std::string key() override; + + /** + * The states. + */ + Co resolveDerivation(); + + JobCategory jobCategory() const override + { + return JobCategory::Administration; + }; +}; + +} // namespace nix diff --git a/src/libstore/include/nix/store/build/derivation-trampoline-goal.hh b/src/libstore/include/nix/store/build/derivation-trampoline-goal.hh index 79b74f4c1..bfed67f63 100644 --- a/src/libstore/include/nix/store/build/derivation-trampoline-goal.hh +++ b/src/libstore/include/nix/store/build/derivation-trampoline-goal.hh @@ -109,7 +109,7 @@ struct DerivationTrampolineGoal : public Goal virtual ~DerivationTrampolineGoal(); - void timedOut(Error && ex) override; + void timedOut(Error && ex) override {} std::string key() override; diff --git a/src/libstore/include/nix/store/build/worker.hh b/src/libstore/include/nix/store/build/worker.hh index a6de780c1..9660d66b2 100644 --- a/src/libstore/include/nix/store/build/worker.hh +++ b/src/libstore/include/nix/store/build/worker.hh @@ -16,6 +16,7 @@ namespace nix { /* Forward definition. */ struct DerivationTrampolineGoal; struct DerivationGoal; +struct DerivationResolutionGoal; struct DerivationBuildingGoal; struct PathSubstitutionGoal; class DrvOutputSubstitutionGoal; @@ -111,6 +112,7 @@ private: DerivedPathMap>> derivationTrampolineGoals; std::map>> derivationGoals; + std::map> derivationResolutionGoals; std::map> derivationBuildingGoals; std::map> substitutionGoals; std::map> drvOutputSubstitutionGoals; @@ -224,7 +226,13 @@ public: BuildMode buildMode = bmNormal); /** - * @ref DerivationBuildingGoal "derivation goal" + * @ref DerivationResolutionGoal "derivation resolution goal" + */ + std::shared_ptr + makeDerivationResolutionGoal(const StorePath & drvPath, const Derivation & drv, BuildMode buildMode = bmNormal); + + /** + * @ref DerivationBuildingGoal "derivation building goal" */ std::shared_ptr makeDerivationBuildingGoal(const StorePath & drvPath, const Derivation & drv, BuildMode buildMode = bmNormal); diff --git a/src/libstore/include/nix/store/meson.build b/src/libstore/include/nix/store/meson.build index c9e4c36dd..1f04e357a 100644 --- a/src/libstore/include/nix/store/meson.build +++ b/src/libstore/include/nix/store/meson.build @@ -18,6 +18,7 @@ headers = [ config_pub_h ] + files( 'build/derivation-building-misc.hh', 'build/derivation-env-desugar.hh', 'build/derivation-goal.hh', + 'build/derivation-resolution-goal.hh', 'build/derivation-trampoline-goal.hh', 'build/drv-output-substitution-goal.hh', 'build/goal.hh', diff --git a/src/libstore/meson.build b/src/libstore/meson.build index 8ec39dac1..a50a3f5fd 100644 --- a/src/libstore/meson.build +++ b/src/libstore/meson.build @@ -302,6 +302,7 @@ sources = files( 'build/derivation-check.cc', 'build/derivation-env-desugar.cc', 'build/derivation-goal.cc', + 'build/derivation-resolution-goal.cc', 'build/derivation-trampoline-goal.cc', 'build/drv-output-substitution-goal.cc', 'build/entry-points.cc', diff --git a/tests/functional/build.sh b/tests/functional/build.sh index 0a19ff7da..c9a39438d 100755 --- a/tests/functional/build.sh +++ b/tests/functional/build.sh @@ -178,7 +178,8 @@ test "$(<<<"$out" grep -cE '^error:')" = 4 out="$(nix build -f fod-failing.nix -L x4 2>&1)" && status=0 || status=$? test "$status" = 1 -test "$(<<<"$out" grep -cE '^error:')" = 2 +# Precise number of errors depends on daemon version / goal refactorings +(( "$(<<<"$out" grep -cE '^error:')" >= 2 )) if isDaemonNewer "2.29pre"; then <<<"$out" grepQuiet -E "error: Cannot build '.*-x4\\.drv'" @@ -186,11 +187,13 @@ if isDaemonNewer "2.29pre"; then else <<<"$out" grepQuiet -E "error: 1 dependencies of derivation '.*-x4\\.drv' failed to build" fi -<<<"$out" grepQuiet -E "hash mismatch in fixed-output derivation '.*-x2\\.drv'" +# Either x2 or x3 could have failed, x4 depends on both symmetrically +<<<"$out" grepQuiet -E "hash mismatch in fixed-output derivation '.*-x[23]\\.drv'" out="$(nix build -f fod-failing.nix -L x4 --keep-going 2>&1)" && status=0 || status=$? test "$status" = 1 -test "$(<<<"$out" grep -cE '^error:')" = 3 +# Precise number of errors depends on daemon version / goal refactorings +(( "$(<<<"$out" grep -cE '^error:')" >= 3 )) if isDaemonNewer "2.29pre"; then <<<"$out" grepQuiet -E "error: Cannot build '.*-x4\\.drv'" <<<"$out" grepQuiet -E "Reason: 2 dependencies failed." From 711e738bf9449c384a065c2b98f6585f3da0da42 Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Sun, 12 Oct 2025 18:04:57 +0300 Subject: [PATCH 295/332] meson: Simplify asan-options handling even more Instead of specifying env variables all the time we can instead embed the __asan_default_options symbol in all executables / shared objects. This reduces code duplication. --- doc/manual/meson.build | 1 - .../common/asan-options}/asan-options.cc | 2 +- nix-meson-build-support/common/asan-options/meson.build | 7 +++---- src/libexpr-tests/meson.build | 2 +- src/libexpr-tests/package.nix | 1 - src/libfetchers-tests/meson.build | 2 +- src/libfetchers-tests/package.nix | 1 - src/libflake-tests/meson.build | 2 +- src/libflake-tests/package.nix | 1 - src/libstore-tests/meson.build | 4 ++-- src/libstore-tests/package.nix | 1 - src/libutil-tests/meson.build | 2 +- src/libutil-tests/package.nix | 1 - src/nix/meson.build | 1 - tests/functional/test-libstoreconsumer/main.cc | 7 ------- tests/functional/test-libstoreconsumer/meson.build | 3 ++- 16 files changed, 12 insertions(+), 26 deletions(-) rename {src/nix => nix-meson-build-support/common/asan-options}/asan-options.cc (71%) diff --git a/doc/manual/meson.build b/doc/manual/meson.build index a5672f0ad..2e372dedd 100644 --- a/doc/manual/meson.build +++ b/doc/manual/meson.build @@ -15,7 +15,6 @@ pymod = import('python') python = pymod.find_installation('python3') nix_env_for_docs = { - 'ASAN_OPTIONS' : 'abort_on_error=1:print_summary=1:detect_leaks=0', 'HOME' : '/dummy', 'NIX_CONF_DIR' : '/dummy', 'NIX_SSL_CERT_FILE' : '/dummy/no-ca-bundle.crt', diff --git a/src/nix/asan-options.cc b/nix-meson-build-support/common/asan-options/asan-options.cc similarity index 71% rename from src/nix/asan-options.cc rename to nix-meson-build-support/common/asan-options/asan-options.cc index 256f34cbe..651354bac 100644 --- a/src/nix/asan-options.cc +++ b/nix-meson-build-support/common/asan-options/asan-options.cc @@ -1,4 +1,4 @@ -extern "C" [[gnu::retain]] const char * __asan_default_options() +extern "C" [[gnu::retain, gnu::weak]] const char * __asan_default_options() { // We leak a bunch of memory knowingly on purpose. It's not worthwhile to // diagnose that memory being leaked for now. diff --git a/nix-meson-build-support/common/asan-options/meson.build b/nix-meson-build-support/common/asan-options/meson.build index 17880b0ed..80527b5a9 100644 --- a/nix-meson-build-support/common/asan-options/meson.build +++ b/nix-meson-build-support/common/asan-options/meson.build @@ -1,7 +1,3 @@ -asan_test_options_env = { - 'ASAN_OPTIONS' : 'abort_on_error=1:print_summary=1:detect_leaks=0', -} - # Clang gets grumpy about missing libasan symbols if -shared-libasan is not # passed when building shared libs, at least on Linux if cxx.get_id() == 'clang' and ('address' in get_option('b_sanitize') or 'undefined' in get_option( @@ -10,3 +6,6 @@ if cxx.get_id() == 'clang' and ('address' in get_option('b_sanitize') or 'undefi add_project_link_arguments('-shared-libasan', language : 'cpp') endif +if 'address' in get_option('b_sanitize') + deps_other += declare_dependency(sources : 'asan-options.cc') +endif diff --git a/src/libexpr-tests/meson.build b/src/libexpr-tests/meson.build index d1700b11d..c5dafe0de 100644 --- a/src/libexpr-tests/meson.build +++ b/src/libexpr-tests/meson.build @@ -82,7 +82,7 @@ this_exe = executable( test( meson.project_name(), this_exe, - env : asan_test_options_env + { + env : { '_NIX_TEST_UNIT_DATA' : meson.current_source_dir() / 'data', }, protocol : 'gtest', diff --git a/src/libexpr-tests/package.nix b/src/libexpr-tests/package.nix index c36aa2dc7..51d52e935 100644 --- a/src/libexpr-tests/package.nix +++ b/src/libexpr-tests/package.nix @@ -62,7 +62,6 @@ mkMesonExecutable (finalAttrs: { mkdir -p "$HOME" '' + '' - export ASAN_OPTIONS=abort_on_error=1:print_summary=1:detect_leaks=0 export _NIX_TEST_UNIT_DATA=${resolvePath ./data} ${stdenv.hostPlatform.emulator buildPackages} ${lib.getExe finalAttrs.finalPackage} touch $out diff --git a/src/libfetchers-tests/meson.build b/src/libfetchers-tests/meson.build index 905e06db0..a18f64d79 100644 --- a/src/libfetchers-tests/meson.build +++ b/src/libfetchers-tests/meson.build @@ -63,7 +63,7 @@ this_exe = executable( test( meson.project_name(), this_exe, - env : asan_test_options_env + { + env : { '_NIX_TEST_UNIT_DATA' : meson.current_source_dir() / 'data', }, protocol : 'gtest', diff --git a/src/libfetchers-tests/package.nix b/src/libfetchers-tests/package.nix index 8e82430d7..780618725 100644 --- a/src/libfetchers-tests/package.nix +++ b/src/libfetchers-tests/package.nix @@ -61,7 +61,6 @@ mkMesonExecutable (finalAttrs: { buildInputs = [ writableTmpDirAsHomeHook ]; } '' - export ASAN_OPTIONS=abort_on_error=1:print_summary=1:detect_leaks=0 export _NIX_TEST_UNIT_DATA=${resolvePath ./data} ${stdenv.hostPlatform.emulator buildPackages} ${lib.getExe finalAttrs.finalPackage} touch $out diff --git a/src/libflake-tests/meson.build b/src/libflake-tests/meson.build index a75603970..59094abe8 100644 --- a/src/libflake-tests/meson.build +++ b/src/libflake-tests/meson.build @@ -58,7 +58,7 @@ this_exe = executable( test( meson.project_name(), this_exe, - env : asan_test_options_env + { + env : { '_NIX_TEST_UNIT_DATA' : meson.current_source_dir() / 'data', 'NIX_CONFIG' : 'extra-experimental-features = flakes', 'HOME' : meson.current_build_dir() / 'test-home', diff --git a/src/libflake-tests/package.nix b/src/libflake-tests/package.nix index 09812a57b..397ef4192 100644 --- a/src/libflake-tests/package.nix +++ b/src/libflake-tests/package.nix @@ -59,7 +59,6 @@ mkMesonExecutable (finalAttrs: { buildInputs = [ writableTmpDirAsHomeHook ]; } ('' - export ASAN_OPTIONS=abort_on_error=1:print_summary=1:detect_leaks=0 export _NIX_TEST_UNIT_DATA=${resolvePath ./data} export NIX_CONFIG="extra-experimental-features = flakes" ${stdenv.hostPlatform.emulator buildPackages} ${lib.getExe finalAttrs.finalPackage} diff --git a/src/libstore-tests/meson.build b/src/libstore-tests/meson.build index 399e2abd5..e8e90ad81 100644 --- a/src/libstore-tests/meson.build +++ b/src/libstore-tests/meson.build @@ -104,7 +104,7 @@ this_exe = executable( test( meson.project_name(), this_exe, - env : asan_test_options_env + { + env : { '_NIX_TEST_UNIT_DATA' : meson.current_source_dir() / 'data', 'HOME' : meson.current_build_dir() / 'test-home', 'NIX_REMOTE' : meson.current_build_dir() / 'test-home' / 'store', @@ -138,7 +138,7 @@ if get_option('benchmarks') benchmark( 'nix-store-benchmarks', benchmark_exe, - env : asan_test_options_env + { + env : { '_NIX_TEST_UNIT_DATA' : meson.current_source_dir() / 'data', }, ) diff --git a/src/libstore-tests/package.nix b/src/libstore-tests/package.nix index d5255f4f9..90e6af519 100644 --- a/src/libstore-tests/package.nix +++ b/src/libstore-tests/package.nix @@ -83,7 +83,6 @@ mkMesonExecutable (finalAttrs: { } ( '' - export ASAN_OPTIONS=abort_on_error=1:print_summary=1:detect_leaks=0 export _NIX_TEST_UNIT_DATA=${data + "/src/libstore-tests/data"} export NIX_REMOTE=$HOME/store ${stdenv.hostPlatform.emulator buildPackages} ${lib.getExe finalAttrs.finalPackage} diff --git a/src/libutil-tests/meson.build b/src/libutil-tests/meson.build index 87af49933..d84dbbb68 100644 --- a/src/libutil-tests/meson.build +++ b/src/libutil-tests/meson.build @@ -97,7 +97,7 @@ this_exe = executable( test( meson.project_name(), this_exe, - env : asan_test_options_env + { + env : { '_NIX_TEST_UNIT_DATA' : meson.current_source_dir() / 'data', }, protocol : 'gtest', diff --git a/src/libutil-tests/package.nix b/src/libutil-tests/package.nix index 077d36a4d..c06de6894 100644 --- a/src/libutil-tests/package.nix +++ b/src/libutil-tests/package.nix @@ -61,7 +61,6 @@ mkMesonExecutable (finalAttrs: { mkdir -p "$HOME" '' + '' - export ASAN_OPTIONS=abort_on_error=1:print_summary=1:detect_leaks=0 export _NIX_TEST_UNIT_DATA=${./data} ${stdenv.hostPlatform.emulator buildPackages} ${lib.getExe finalAttrs.finalPackage} touch $out diff --git a/src/nix/meson.build b/src/nix/meson.build index 9bee2d147..e989e8016 100644 --- a/src/nix/meson.build +++ b/src/nix/meson.build @@ -61,7 +61,6 @@ subdir('nix-meson-build-support/generate-header') nix_sources = [ config_priv_h ] + files( 'add-to-store.cc', 'app.cc', - 'asan-options.cc', 'build.cc', 'bundle.cc', 'cat.cc', diff --git a/tests/functional/test-libstoreconsumer/main.cc b/tests/functional/test-libstoreconsumer/main.cc index 5b0132934..6cfe50047 100644 --- a/tests/functional/test-libstoreconsumer/main.cc +++ b/tests/functional/test-libstoreconsumer/main.cc @@ -5,13 +5,6 @@ using namespace nix; -extern "C" [[gnu::retain]] const char * __asan_default_options() -{ - // We leak a bunch of memory knowingly on purpose. It's not worthwhile to - // diagnose that memory being leaked for now. - return "abort_on_error=1:print_summary=1:detect_leaks=0"; -} - int main(int argc, char ** argv) { try { diff --git a/tests/functional/test-libstoreconsumer/meson.build b/tests/functional/test-libstoreconsumer/meson.build index 7c95b0c4a..b2f1c1ca3 100644 --- a/tests/functional/test-libstoreconsumer/meson.build +++ b/tests/functional/test-libstoreconsumer/meson.build @@ -1,11 +1,12 @@ cxx = meson.get_compiler('cpp') +deps_other = [] subdir('nix-meson-build-support/common/asan-options') libstoreconsumer_tester = executable( 'test-libstoreconsumer', 'main.cc', - dependencies : [ + dependencies : deps_other + [ dependency('nix-store'), ], build_by_default : false, From 199b6ff3fb91f7d7c81f5bfaaaea0935bd2fcbea Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Sun, 12 Oct 2025 17:36:45 +0300 Subject: [PATCH 296/332] Disable detect_odr_violation for ASan There's some unfortunate ODR violations that get dianosed with GCC but not Clang for static inline constexpr variables defined inside the class body: template struct static_const { static JSON_INLINE_VARIABLE constexpr T value{}; }; This can be ignored pretty much. There is the same problem for std::piecewise_construct: http://lists.boost.org/Archives/boost/2007/06/123353.php ==2455704==ERROR: AddressSanitizer: odr-violation (0x7efddc460e20): [1] size=1 'value' /nix/store/235hvgzcbl06fxy53515q8sr6lljvf68-nlohmann_json-3.11.3/include/nlohmann/detail/meta/cpp_future.hpp:156:45 in /nix/store/pkmljfq97a83dbanr0n64zbm8cyhna33-nix-store-2.33.0pre/lib/libnixstore.so.2.33.0 [2] size=1 'value' /nix/store/235hvgzcbl06fxy53515q8sr6lljvf68-nlohmann_json-3.11.3/include/nlohmann/detail/meta/cpp_future.hpp:156:45 in /nix/store/gbjpkjj0g8vk20fzlyrwj491gwp6g1qw-nix-util-2.33.0pre/lib/libnixutil.so.2.33.0 --- nix-meson-build-support/common/asan-options/asan-options.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nix-meson-build-support/common/asan-options/asan-options.cc b/nix-meson-build-support/common/asan-options/asan-options.cc index 651354bac..c9782fea0 100644 --- a/nix-meson-build-support/common/asan-options/asan-options.cc +++ b/nix-meson-build-support/common/asan-options/asan-options.cc @@ -2,5 +2,5 @@ extern "C" [[gnu::retain, gnu::weak]] const char * __asan_default_options() { // We leak a bunch of memory knowingly on purpose. It's not worthwhile to // diagnose that memory being leaked for now. - return "abort_on_error=1:print_summary=1:detect_leaks=0"; + return "abort_on_error=1:print_summary=1:detect_leaks=0:detect_odr_violation=0"; } From a491173369c18bd3c079e8180ccb07c6edf49d54 Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Sun, 12 Oct 2025 17:42:19 +0300 Subject: [PATCH 297/332] packaging: Add withASan,withUBSan options to the scope --- ci/gha/tests/default.nix | 17 ++++------------- packaging/components.nix | 32 ++++++++++++++++++++++++++++++++ 2 files changed, 36 insertions(+), 13 deletions(-) diff --git a/ci/gha/tests/default.nix b/ci/gha/tests/default.nix index d9115f92c..09fb6ec23 100644 --- a/ci/gha/tests/default.nix +++ b/ci/gha/tests/default.nix @@ -23,16 +23,6 @@ let packages' = nixFlake.packages.${system}; stdenv = (getStdenv pkgs); - enableSanitizersLayer = finalAttrs: prevAttrs: { - mesonFlags = - (prevAttrs.mesonFlags or [ ]) - ++ [ (lib.mesonOption "b_sanitize" "address,undefined") ] - ++ (lib.optionals stdenv.cc.isClang [ - # https://www.github.com/mesonbuild/meson/issues/764 - (lib.mesonBool "b_lundef" false) - ]); - }; - collectCoverageLayer = finalAttrs: prevAttrs: { env = let @@ -55,14 +45,15 @@ let ''; }; - componentOverrides = - (lib.optional withSanitizers enableSanitizersLayer) - ++ (lib.optional withCoverage collectCoverageLayer); + componentOverrides = (lib.optional withCoverage collectCoverageLayer); in rec { nixComponentsInstrumented = nixComponents.overrideScope ( final: prev: { + withASan = withSanitizers; + withUBSan = withSanitizers; + nix-store-tests = prev.nix-store-tests.override { withBenchmarks = true; }; # Boehm is incompatible with ASAN. nix-expr = prev.nix-expr.override { enableGC = !withSanitizers; }; diff --git a/packaging/components.nix b/packaging/components.nix index 2be4fa61d..106e96723 100644 --- a/packaging/components.nix +++ b/packaging/components.nix @@ -204,6 +204,25 @@ let mesonFlags = [ (lib.mesonBool "b_asneeded" false) ] ++ prevAttrs.mesonFlags or [ ]; }; + enableSanitizersLayer = + finalAttrs: prevAttrs: + let + sanitizers = lib.optional scope.withASan "address" ++ lib.optional scope.withUBSan "undefined"; + in + { + mesonFlags = + (prevAttrs.mesonFlags or [ ]) + ++ lib.optionals (lib.length sanitizers > 0) ( + [ + (lib.mesonOption "b_sanitize" (lib.concatStringsSep "," sanitizers)) + ] + ++ (lib.optionals stdenv.cc.isClang [ + # https://www.github.com/mesonbuild/meson/issues/764 + (lib.mesonBool "b_lundef" false) + ]) + ); + }; + nixDefaultsLayer = finalAttrs: prevAttrs: { strictDeps = prevAttrs.strictDeps or true; enableParallelBuilding = true; @@ -246,6 +265,16 @@ in inherit filesetToSource; + /** + Whether meson components are built with [AddressSanitizer](https://clang.llvm.org/docs/AddressSanitizer.html). + */ + withASan = false; + + /** + Whether meson components are built with [UndefinedBehaviorSanitizer](https://clang.llvm.org/docs/UndefinedBehaviorSanitizer.html). + */ + withUBSan = false; + /** A user-provided extension function to apply to each component derivation. */ @@ -332,6 +361,7 @@ in setVersionLayer mesonLayer fixupStaticLayer + enableSanitizersLayer scope.mesonComponentOverrides ]; mkMesonExecutable = mkPackageBuilder [ @@ -342,6 +372,7 @@ in mesonLayer mesonBuildLayer fixupStaticLayer + enableSanitizersLayer scope.mesonComponentOverrides ]; mkMesonLibrary = mkPackageBuilder [ @@ -353,6 +384,7 @@ in mesonBuildLayer mesonLibraryLayer fixupStaticLayer + enableSanitizersLayer scope.mesonComponentOverrides ]; From de75a180cbe89d40053b8c4c163df4fbc172c5be Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Sun, 12 Oct 2025 19:38:01 +0300 Subject: [PATCH 298/332] packaging: Add buildWithSanitizers to hydraJobs --- packaging/hydra.nix | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/packaging/hydra.nix b/packaging/hydra.nix index 9f9749bde..ae2e6ab98 100644 --- a/packaging/hydra.nix +++ b/packaging/hydra.nix @@ -158,6 +158,27 @@ in in forAllPackages (pkgName: forAllSystems (system: components.${system}.${pkgName})); + buildWithSanitizers = + let + components = forAllSystems ( + system: + let + pkgs = nixpkgsFor.${system}.native; + in + pkgs.nixComponents2.overrideScope ( + self: super: { + # Boost coroutines fail with ASAN on darwin. + withASan = !pkgs.stdenv.buildPlatform.isDarwin; + withUBSan = true; + nix-expr = super.nix-expr.override { enableGC = false; }; + # Unclear how to make Perl bindings work with a dynamically linked ASAN. + nix-perl-bindings = null; + } + ) + ); + in + forAllPackages (pkgName: forAllSystems (system: components.${system}.${pkgName})); + buildNoTests = forAllSystems (system: nixpkgsFor.${system}.native.nixComponents2.nix-cli); # Toggles some settings for better coverage. Windows needs these From 9150ccb89e1da05ac731f34a860022a69a215edd Mon Sep 17 00:00:00 2001 From: John Ericson Date: Sun, 12 Oct 2025 13:16:50 -0400 Subject: [PATCH 299/332] Fix Windows dev shell (mostly) gbenchmark still has too-narrow supported systems, however. That needs to be fixed in Nixpkgs. --- tests/functional/package.nix | 30 +++++++++++------------------- 1 file changed, 11 insertions(+), 19 deletions(-) diff --git a/tests/functional/package.nix b/tests/functional/package.nix index 1f1d10ea8..a36c2e2d3 100644 --- a/tests/functional/package.nix +++ b/tests/functional/package.nix @@ -2,16 +2,7 @@ lib, stdenv, mkMesonDerivation, - - meson, - ninja, - pkg-config, - - jq, - git, - mercurial, - util-linux, - unixtools, + buildPackages, nix-store, nix-expr, @@ -46,16 +37,17 @@ mkMesonDerivation ( ./. ]; - # Hack for sake of the dev shell + # Hack for sake of the dev shell. Need to "manually splice" since + # this isn't a specially-recognized list of dependencies. passthru.externalNativeBuildInputs = [ - meson - ninja - pkg-config + buildPackages.meson + buildPackages.ninja + buildPackages.pkg-config - jq - git - mercurial - unixtools.script + buildPackages.jq + buildPackages.git + buildPackages.mercurial + buildPackages.unixtools.script ] ++ lib.optionals stdenv.hostPlatform.isLinux [ # For various sandboxing tests that needs a statically-linked shell, @@ -64,7 +56,7 @@ mkMesonDerivation ( # For Overlay FS tests need `mount`, `umount`, and `unshare`. # For `script` command (ensuring a TTY) # TODO use `unixtools` to be precise over which executables instead? - util-linux + buildPackages.util-linux ]; nativeBuildInputs = finalAttrs.passthru.externalNativeBuildInputs ++ [ From 10223fae8623ce2b919f620878bf7af2a95a8680 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Sun, 12 Oct 2025 13:22:14 -0400 Subject: [PATCH 300/332] Fix windows build I forget to add some CPP in b57caaa1a273323b596097ab5509797b38e2e272. Hopefully, as we relyon RAII more, these explicit resets become unneeded. --- src/libstore/build/derivation-building-goal.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/libstore/build/derivation-building-goal.cc b/src/libstore/build/derivation-building-goal.cc index 6bda17d37..037401ccb 100644 --- a/src/libstore/build/derivation-building-goal.cc +++ b/src/libstore/build/derivation-building-goal.cc @@ -495,7 +495,9 @@ Goal::Co DerivationBuildingGoal::tryToBuild() Magenta( "/usr/sbin/softwareupdate --install-rosetta && launchctl stop org.nixos.nix-daemon")); +#ifndef _WIN32 // TODO enable `DerivationBuilder` on Windows builder.reset(); +#endif outputLocks.unlock(); worker.permanentFailure = true; co_return doneFailure({BuildResult::Failure::InputRejected, std::move(msg)}); From 89b35ec0dce03b41a01899b04b9b116f7cdf85c5 Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Sun, 12 Oct 2025 22:10:35 +0300 Subject: [PATCH 301/332] packaging/hydra: buildNoGC is the same as buildWithSanitizers This will reduce the load on hydra. It doesn't make sense to build 2 slightly different variations where the difference is only in the nix-perl-bindings and additional sanitizers. --- packaging/hydra.nix | 17 ++++------------- 1 file changed, 4 insertions(+), 13 deletions(-) diff --git a/packaging/hydra.nix b/packaging/hydra.nix index ae2e6ab98..bc75b5dfb 100644 --- a/packaging/hydra.nix +++ b/packaging/hydra.nix @@ -73,7 +73,7 @@ let ] ); in -{ +rec { /** An internal check to make sure our package listing is complete. */ @@ -145,18 +145,9 @@ in ) ); - buildNoGc = - let - components = forAllSystems ( - system: - nixpkgsFor.${system}.native.nixComponents2.overrideScope ( - self: super: { - nix-expr = super.nix-expr.override { enableGC = false; }; - } - ) - ); - in - forAllPackages (pkgName: forAllSystems (system: components.${system}.${pkgName})); + # Builds with sanitizers already have GC disabled, so this buildNoGc can just + # point to buildWithSanitizers in order to reduce the load on hydra. + buildNoGc = buildWithSanitizers; buildWithSanitizers = let From f0e1f652607a4423ac10393cdb9250f15fead512 Mon Sep 17 00:00:00 2001 From: Bernardo Meurer Costa Date: Sun, 12 Oct 2025 02:00:27 +0000 Subject: [PATCH 302/332] fix(libstore): fix race condition in AWS credential provider caching The previous implementation had a check-then-create race condition where multiple threads could simultaneously: 1. Check the cache and find no provider (line 122) 2. Create their own providers (lines 126-145) 3. Insert into cache (line 161) This resulted in multiple credential providers being created when downloading multiple packages in parallel, as each .narinfo download would trigger provider creation on its own thread. Fix by using boost::concurrent_flat_map's try_emplace_and_cvisit, which provides atomic get-or-create semantics: - f1 callback: Called atomically during insertion, creates the provider - f2 callback: Called if key exists, returns cached provider - Other threads are blocked during f1, so no nullptr is ever visible --- src/libstore/aws-creds.cc | 85 ++++++++++++++++++++++----------------- 1 file changed, 47 insertions(+), 38 deletions(-) diff --git a/src/libstore/aws-creds.cc b/src/libstore/aws-creds.cc index cd404a554..05c11d24a 100644 --- a/src/libstore/aws-creds.cc +++ b/src/libstore/aws-creds.cc @@ -118,48 +118,57 @@ AwsCredentials getAwsCredentials(const std::string & profile) // Get or create credential provider with caching std::shared_ptr provider; - // Try to find existing provider - credentialProviderCache.visit(profile, [&](const auto & pair) { provider = pair.second; }); + // Use try_emplace_and_cvisit for atomic get-or-create + // This prevents race conditions where multiple threads create providers + credentialProviderCache.try_emplace_and_cvisit( + profile, + nullptr, // Placeholder - will be replaced in f1 before any thread can see it + [&](auto & kv) { + // f1: Called atomically during insertion with non-const reference + // Other threads are blocked until we finish, so nullptr is never visible + debug( + "[pid=%d] creating new AWS credential provider for profile '%s'", + getpid(), + profile.empty() ? "(default)" : profile.c_str()); - if (!provider) { - // Create new provider if not found - debug( - "[pid=%d] creating new AWS credential provider for profile '%s'", - getpid(), - profile.empty() ? "(default)" : profile.c_str()); + try { + initAwsCrt(); - try { - initAwsCrt(); + if (profile.empty()) { + Aws::Crt::Auth::CredentialsProviderChainDefaultConfig config; + config.Bootstrap = Aws::Crt::ApiHandle::GetOrCreateStaticDefaultClientBootstrap(); + kv.second = Aws::Crt::Auth::CredentialsProvider::CreateCredentialsProviderChainDefault(config); + } else { + Aws::Crt::Auth::CredentialsProviderProfileConfig config; + config.Bootstrap = Aws::Crt::ApiHandle::GetOrCreateStaticDefaultClientBootstrap(); + // This is safe because the underlying C library will copy this string + // c.f. https://github.com/awslabs/aws-c-auth/blob/main/source/credentials_provider_profile.c#L220 + config.ProfileNameOverride = Aws::Crt::ByteCursorFromCString(profile.c_str()); + kv.second = Aws::Crt::Auth::CredentialsProvider::CreateCredentialsProviderProfile(config); + } - if (profile.empty()) { - Aws::Crt::Auth::CredentialsProviderChainDefaultConfig config; - config.Bootstrap = Aws::Crt::ApiHandle::GetOrCreateStaticDefaultClientBootstrap(); - provider = Aws::Crt::Auth::CredentialsProvider::CreateCredentialsProviderChainDefault(config); - } else { - Aws::Crt::Auth::CredentialsProviderProfileConfig config; - config.Bootstrap = Aws::Crt::ApiHandle::GetOrCreateStaticDefaultClientBootstrap(); - // This is safe because the underlying C library will copy this string - // c.f. https://github.com/awslabs/aws-c-auth/blob/main/source/credentials_provider_profile.c#L220 - config.ProfileNameOverride = Aws::Crt::ByteCursorFromCString(profile.c_str()); - provider = Aws::Crt::Auth::CredentialsProvider::CreateCredentialsProviderProfile(config); + if (!kv.second) { + throw AwsAuthError( + "Failed to create AWS credentials provider for %s", + profile.empty() ? "default profile" : fmt("profile '%s'", profile)); + } + + provider = kv.second; + } catch (Error & e) { + // Exception during creation - remove the entry to allow retry + credentialProviderCache.erase(profile); + e.addTrace({}, "for AWS profile: %s", profile.empty() ? "(default)" : profile); + throw; + } catch (...) { + // Non-Error exception - still need to clean up + credentialProviderCache.erase(profile); + throw; } - } catch (Error & e) { - e.addTrace( - {}, - "while creating AWS credentials provider for %s", - profile.empty() ? "default profile" : fmt("profile '%s'", profile)); - throw; - } - - if (!provider) { - throw AwsAuthError( - "Failed to create AWS credentials provider for %s", - profile.empty() ? "default profile" : fmt("profile '%s'", profile)); - } - - // Insert into cache (try_emplace is thread-safe and won't overwrite if another thread added it) - credentialProviderCache.try_emplace(profile, provider); - } + }, + [&](const auto & kv) { + // f2: Called if key already exists (const reference) + provider = kv.second; + }); return getCredentialsFromProvider(provider); } From 18ec3d1094e821df381dc1b12b13472086bfe021 Mon Sep 17 00:00:00 2001 From: Bernardo Meurer Costa Date: Mon, 13 Oct 2025 01:44:40 +0300 Subject: [PATCH 303/332] libstore: Avoid copying derivations to the store if they are already valid MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This avoids the quite costly copying of derivations to the daemon over the wire in case it already exists in the eval store. For a fresh instantiatation (after running nix-collect-garbage) this doesn't significantly slow down eval: taskset -c 2,3 hyperfine --reference "result-old/bin/nix eval -f ../nixpkgs hello --store unix:///tmp/nix_socket" --prepare "nix-collect-garbage --store /tmp/store1111 --no-keep-derivations" "result/bin/nix eval -f ../nixpkgs hello --store unix:///tmp/nix_socket" Benchmark 1: result-old/bin/nix eval -f ../nixpkgs hello --store unix:///tmp/nix_socket Time (mean ± σ): 388.7 ms ± 10.5 ms [User: 157.0 ms, System: 61.3 ms] Range (min … max): 379.4 ms … 415.9 ms 10 runs Benchmark 2: result/bin/nix eval -f ../nixpkgs hello --store unix:///tmp/nix_socket Time (mean ± σ): 389.2 ms ± 4.8 ms [User: 158.5 ms, System: 60.7 ms] Range (min … max): 381.2 ms … 397.6 ms 10 runs But if the derivations are already instantiated this shows a pretty neat speedup: taskset -c 2,3 hyperfine --reference "result-old/bin/nix eval -f ../nixpkgs hello --store unix:///tmp/nix_socket" "result/bin/nix eval -f ../nixpkgs hello --store unix:///tmp/nix_socket" Benchmark 1: result-old/bin/nix eval -f ../nixpkgs hello --store unix:///tmp/nix_socket Time (mean ± σ): 240.4 ms ± 3.1 ms [User: 148.1 ms, System: 57.0 ms] Range (min … max): 233.8 ms … 245.0 ms 12 runs Benchmark 2: result/bin/nix eval -f ../nixpkgs hello --store unix:///tmp/nix_socket Time (mean ± σ): 226.5 ms ± 4.5 ms [User: 147.8 ms, System: 55.2 ms] Range (min … max): 214.0 ms … 231.2 ms 13 runs Co-authored-by: Sergei Zimmerman --- src/libstore-tests/meson.build | 1 + src/libstore-tests/write-derivation.cc | 57 ++++++++++++++++++++++++++ src/libstore/derivations.cc | 36 ++++++++-------- 3 files changed, 77 insertions(+), 17 deletions(-) create mode 100644 src/libstore-tests/write-derivation.cc diff --git a/src/libstore-tests/meson.build b/src/libstore-tests/meson.build index e8e90ad81..4d464ad89 100644 --- a/src/libstore-tests/meson.build +++ b/src/libstore-tests/meson.build @@ -83,6 +83,7 @@ sources = files( 'store-reference.cc', 'uds-remote-store.cc', 'worker-protocol.cc', + 'write-derivation.cc', ) include_dirs = [ include_directories('.') ] diff --git a/src/libstore-tests/write-derivation.cc b/src/libstore-tests/write-derivation.cc new file mode 100644 index 000000000..3f7de05d3 --- /dev/null +++ b/src/libstore-tests/write-derivation.cc @@ -0,0 +1,57 @@ +#include +#include + +#include "nix/util/tests/gmock-matchers.hh" +#include "nix/store/derivations.hh" +#include "nix/store/dummy-store-impl.hh" +#include "nix/store/tests/libstore.hh" + +namespace nix { +namespace { + +class WriteDerivationTest : public LibStoreTest +{ +protected: + WriteDerivationTest(ref config_) + : LibStoreTest(config_->openDummyStore()) + , config(std::move(config_)) + { + config->readOnly = false; + } + + WriteDerivationTest() + : WriteDerivationTest(make_ref(DummyStoreConfig::Params{})) + { + } + + ref config; +}; + +static Derivation makeSimpleDrv() +{ + Derivation drv; + drv.name = "simple-derivation"; + drv.platform = "system"; + drv.builder = "foo"; + drv.args = {"bar", "baz"}; + drv.env = StringPairs{{"BIG_BAD", "WOLF"}}; + return drv; +} + +} // namespace + +TEST_F(WriteDerivationTest, addToStoreFromDumpCalledOnce) +{ + auto drv = makeSimpleDrv(); + + auto path1 = writeDerivation(*store, drv, NoRepair); + config->readOnly = true; + auto path2 = writeDerivation(*store, drv, NoRepair); + EXPECT_EQ(path1, path2); + EXPECT_THAT( + [&] { writeDerivation(*store, drv, Repair); }, + ::testing::ThrowsMessage(testing::HasSubstrIgnoreANSIMatcher( + "operation 'addToStoreFromDump' is not supported by store 'dummy://'"))); +} + +} // namespace nix diff --git a/src/libstore/derivations.cc b/src/libstore/derivations.cc index 6d7dbc99c..f634bccfb 100644 --- a/src/libstore/derivations.cc +++ b/src/libstore/derivations.cc @@ -115,23 +115,25 @@ StorePath writeDerivation(Store & store, const Derivation & drv, RepairFlag repa held during a garbage collection). */ auto suffix = std::string(drv.name) + drvExtension; auto contents = drv.unparse(store, false); - return readOnly || settings.readOnlyMode ? store.makeFixedOutputPathFromCA( - suffix, - TextInfo{ - .hash = hashString(HashAlgorithm::SHA256, contents), - .references = std::move(references), - }) - : ({ - StringSource s{contents}; - store.addToStoreFromDump( - s, - suffix, - FileSerialisationMethod::Flat, - ContentAddressMethod::Raw::Text, - HashAlgorithm::SHA256, - references, - repair); - }); + auto hash = hashString(HashAlgorithm::SHA256, contents); + auto ca = TextInfo{.hash = hash, .references = references}; + auto path = store.makeFixedOutputPathFromCA(suffix, ca); + + if (readOnly || settings.readOnlyMode || (store.isValidPath(path) && !repair)) + return path; + + StringSource s{contents}; + auto path2 = store.addToStoreFromDump( + s, + suffix, + FileSerialisationMethod::Flat, + ContentAddressMethod::Raw::Text, + HashAlgorithm::SHA256, + references, + repair); + assert(path2 == path); + + return path; } namespace { From 000e6f628221ae94a1e08a0ba4d5b64544ffeb8d Mon Sep 17 00:00:00 2001 From: Bernardo Meurer Costa Date: Fri, 10 Oct 2025 14:45:06 +0000 Subject: [PATCH 304/332] feat(libstore): add builtin fetchurl S3 credential pre-resolution Add support for pre-resolving AWS credentials in the parent process before forking for builtin:fetchurl. This avoids recreating credential providers in the forked child process. --- src/libstore/builtins/fetchurl.cc | 13 ++++ src/libstore/include/nix/store/builtins.hh | 13 ++++ src/libstore/unix/build/derivation-builder.cc | 68 +++++++++++++++++-- .../unix/build/linux-derivation-builder.cc | 8 ++- 4 files changed, 97 insertions(+), 5 deletions(-) diff --git a/src/libstore/builtins/fetchurl.cc b/src/libstore/builtins/fetchurl.cc index 7abfa4495..3b2d5b866 100644 --- a/src/libstore/builtins/fetchurl.cc +++ b/src/libstore/builtins/fetchurl.cc @@ -33,6 +33,7 @@ static void builtinFetchurl(const BuiltinBuilderContext & ctx) /* Note: have to use a fresh fileTransfer here because we're in a forked process. */ + debug("[pid=%d] builtin:fetchurl creating fresh FileTransfer instance", getpid()); auto fileTransfer = makeFileTransfer(); auto fetch = [&](const std::string & url) { @@ -40,6 +41,18 @@ static void builtinFetchurl(const BuiltinBuilderContext & ctx) FileTransferRequest request(ValidURL{url}); request.decompress = false; +#if NIX_WITH_CURL_S3 + // Use pre-resolved credentials if available + if (ctx.awsCredentials && request.uri.scheme() == "s3") { + debug("[pid=%d] Using pre-resolved AWS credentials from parent process", getpid()); + request.usernameAuth = UsernameAuth{ + .username = ctx.awsCredentials->accessKeyId, + .password = ctx.awsCredentials->secretAccessKey, + }; + request.preResolvedAwsSessionToken = ctx.awsCredentials->sessionToken; + } +#endif + auto decompressor = makeDecompressionSink(unpack && hasSuffix(mainUrl, ".xz") ? "xz" : "none", sink); fileTransfer->download(std::move(request), *decompressor); decompressor->finish(); diff --git a/src/libstore/include/nix/store/builtins.hh b/src/libstore/include/nix/store/builtins.hh index cc164fe82..5c15b2e9b 100644 --- a/src/libstore/include/nix/store/builtins.hh +++ b/src/libstore/include/nix/store/builtins.hh @@ -2,6 +2,11 @@ ///@file #include "nix/store/derivations.hh" +#include "nix/store/config.hh" + +#if NIX_WITH_CURL_S3 +# include "nix/store/aws-creds.hh" +#endif namespace nix { @@ -12,6 +17,14 @@ struct BuiltinBuilderContext std::string netrcData; std::string caFileData; Path tmpDirInSandbox; + +#if NIX_WITH_CURL_S3 + /** + * Pre-resolved AWS credentials for S3 URLs in builtin:fetchurl. + * When present, these should be used instead of creating new credential providers. + */ + std::optional awsCredentials; +#endif }; using BuiltinBuilder = std::function; diff --git a/src/libstore/unix/build/derivation-builder.cc b/src/libstore/unix/build/derivation-builder.cc index 0158505a5..f7bab7057 100644 --- a/src/libstore/unix/build/derivation-builder.cc +++ b/src/libstore/unix/build/derivation-builder.cc @@ -46,6 +46,12 @@ #include "store-config-private.hh" #include "build/derivation-check.hh" +#if NIX_WITH_CURL_S3 +# include "nix/store/aws-creds.hh" +# include "nix/store/s3-url.hh" +# include "nix/util/url.hh" +#endif + namespace nix { struct NotDeterministic : BuildError @@ -290,6 +296,15 @@ protected: */ virtual void startChild(); +#if NIX_WITH_CURL_S3 + /** + * Pre-resolve AWS credentials for S3 URLs in builtin:fetchurl. + * This should be called before forking to ensure credentials are available in child. + * Returns the credentials if successfully resolved, or std::nullopt otherwise. + */ + std::optional preResolveAwsCredentials(); +#endif + private: /** @@ -339,10 +354,20 @@ protected: */ void writeBuilderFile(const std::string & name, std::string_view contents); + /** + * Arguments passed to runChild(). + */ + struct RunChildArgs + { +#if NIX_WITH_CURL_S3 + std::optional awsCredentials; +#endif + }; + /** * Run the builder's process. */ - void runChild(); + void runChild(RunChildArgs args); /** * Move the current process into the chroot, if any. Called early @@ -920,11 +945,43 @@ void DerivationBuilderImpl::openSlave() throw SysError("cannot pipe standard error into log file"); } +#if NIX_WITH_CURL_S3 +std::optional DerivationBuilderImpl::preResolveAwsCredentials() +{ + if (drv.isBuiltin() && drv.builder == "builtin:fetchurl") { + auto url = drv.env.find("url"); + if (url != drv.env.end()) { + try { + auto parsedUrl = parseURL(url->second); + if (parsedUrl.scheme == "s3") { + debug("Pre-resolving AWS credentials for S3 URL in builtin:fetchurl"); + auto s3Url = ParsedS3URL::parse(parsedUrl); + + // Use the preResolveAwsCredentials from aws-creds + auto credentials = nix::preResolveAwsCredentials(s3Url); + debug("Successfully pre-resolved AWS credentials in parent process"); + return credentials; + } + } catch (const std::exception & e) { + debug("Error pre-resolving S3 credentials: %s", e.what()); + } + } + } + return std::nullopt; +} +#endif + void DerivationBuilderImpl::startChild() { - pid = startProcess([&]() { + RunChildArgs args{ +#if NIX_WITH_CURL_S3 + .awsCredentials = preResolveAwsCredentials(), +#endif + }; + + pid = startProcess([this, args = std::move(args)]() { openSlave(); - runChild(); + runChild(std::move(args)); }); } @@ -1181,7 +1238,7 @@ void DerivationBuilderImpl::writeBuilderFile(const std::string & name, std::stri chownToBuilder(fd.get(), path); } -void DerivationBuilderImpl::runChild() +void DerivationBuilderImpl::runChild(RunChildArgs args) { /* Warning: in the child we should absolutely not make any SQLite calls! */ @@ -1198,6 +1255,9 @@ void DerivationBuilderImpl::runChild() BuiltinBuilderContext ctx{ .drv = drv, .tmpDirInSandbox = tmpDirInSandbox(), +#if NIX_WITH_CURL_S3 + .awsCredentials = args.awsCredentials, +#endif }; if (drv.isBuiltin() && drv.builder == "builtin:fetchurl") { diff --git a/src/libstore/unix/build/linux-derivation-builder.cc b/src/libstore/unix/build/linux-derivation-builder.cc index f6e910d08..be064566f 100644 --- a/src/libstore/unix/build/linux-derivation-builder.cc +++ b/src/libstore/unix/build/linux-derivation-builder.cc @@ -276,6 +276,12 @@ struct ChrootLinuxDerivationBuilder : ChrootDerivationBuilder, LinuxDerivationBu void startChild() override { + RunChildArgs args{ +# if NIX_WITH_CURL_S3 + .awsCredentials = preResolveAwsCredentials(), +# endif + }; + /* Set up private namespaces for the build: - The PID namespace causes the build to start as PID 1. @@ -343,7 +349,7 @@ struct ChrootLinuxDerivationBuilder : ChrootDerivationBuilder, LinuxDerivationBu if (usingUserNamespace) options.cloneFlags |= CLONE_NEWUSER; - pid_t child = startProcess([&]() { runChild(); }, options); + pid_t child = startProcess([this, args = std::move(args)]() { runChild(std::move(args)); }, options); writeFull(sendPid.writeSide.get(), fmt("%d\n", child)); _exit(0); From 6db86389ce89ac777d297e463021e549d6838d93 Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Fri, 10 Oct 2025 19:08:38 +0200 Subject: [PATCH 305/332] util/error: Document addTrace params ... and rename e -> pos. That was weird. --- src/libutil/include/nix/util/error.hh | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/src/libutil/include/nix/util/error.hh b/src/libutil/include/nix/util/error.hh index e564ca5b9..49dd75991 100644 --- a/src/libutil/include/nix/util/error.hh +++ b/src/libutil/include/nix/util/error.hh @@ -192,13 +192,23 @@ public: err.traces.push_front(trace); } + /** + * @param pos Nullable `shared_ptr` + * @param fs Format string, see `HintFmt` + * @param args... Format string arguments. + */ template - void addTrace(std::shared_ptr && e, std::string_view fs, const Args &... args) + void addTrace(std::shared_ptr && pos, std::string_view fs, const Args &... args) { - addTrace(std::move(e), HintFmt(std::string(fs), args...)); + addTrace(std::move(pos), HintFmt(std::string(fs), args...)); } - void addTrace(std::shared_ptr && e, HintFmt hint, TracePrint print = TracePrint::Default); + /** + * @param pos Nullable `shared_ptr` + * @param hint Formatted error message + * @param print Optional, whether to always print (e.g. `addErrorContext`) + */ + void addTrace(std::shared_ptr && pos, HintFmt hint, TracePrint print = TracePrint::Default); bool hasTrace() const { From 48a5e2dde2625ebb0d7f6aa2e77051e152fb3411 Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Mon, 13 Oct 2025 13:14:05 +0200 Subject: [PATCH 306/332] EvalState: add doc comment --- src/libexpr/include/nix/expr/eval.hh | 9 ++++++++- src/libutil/include/nix/util/error.hh | 10 +++++++--- 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/src/libexpr/include/nix/expr/eval.hh b/src/libexpr/include/nix/expr/eval.hh index b87c45ce3..76ce62b87 100644 --- a/src/libexpr/include/nix/expr/eval.hh +++ b/src/libexpr/include/nix/expr/eval.hh @@ -508,8 +508,15 @@ private: public: + /** + * @param lookupPath Only used during construction. + * @param store The store to use for instantiation + * @param fetchSettings Must outlive the lifetime of this EvalState! + * @param settings Must outlive the lifetime of this EvalState! + * @param buildStore The store to use for builds ("import from derivation", C API `nix_string_realise`) + */ EvalState( - const LookupPath & _lookupPath, + const LookupPath & lookupPath, ref store, const fetchers::Settings & fetchSettings, const EvalSettings & settings, diff --git a/src/libutil/include/nix/util/error.hh b/src/libutil/include/nix/util/error.hh index 49dd75991..cc8460592 100644 --- a/src/libutil/include/nix/util/error.hh +++ b/src/libutil/include/nix/util/error.hh @@ -193,7 +193,9 @@ public: } /** - * @param pos Nullable `shared_ptr` + * Prepends an item to the error trace, as is usual for extra context. + * + * @param pos Nullable source position to put in trace item * @param fs Format string, see `HintFmt` * @param args... Format string arguments. */ @@ -204,9 +206,11 @@ public: } /** - * @param pos Nullable `shared_ptr` + * Prepends an item to the error trace, as is usual for extra context. + * + * @param pos Nullable source position to put in trace item * @param hint Formatted error message - * @param print Optional, whether to always print (e.g. `addErrorContext`) + * @param print Optional, whether to always print (used by `addErrorContext`) */ void addTrace(std::shared_ptr && pos, HintFmt hint, TracePrint print = TracePrint::Default); From 5dcfddf9972fadf3a188397757eb1727289ab854 Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Mon, 13 Oct 2025 13:59:39 +0200 Subject: [PATCH 307/332] strings: Add optionalBracket helper --- src/libutil-tests/strings.cc | 59 +++++++++++++++++++++++++ src/libutil/include/nix/util/strings.hh | 39 ++++++++++++++++ src/libutil/strings.cc | 14 ++++++ 3 files changed, 112 insertions(+) diff --git a/src/libutil-tests/strings.cc b/src/libutil-tests/strings.cc index bd740ce0c..dbbecd514 100644 --- a/src/libutil-tests/strings.cc +++ b/src/libutil-tests/strings.cc @@ -494,4 +494,63 @@ TEST(shellSplitString, testUnbalancedQuotes) ASSERT_THROW(shellSplitString("foo\"bar\\\""), Error); } +/* ---------------------------------------------------------------------------- + * optionalBracket + * --------------------------------------------------------------------------*/ + +TEST(optionalBracket, emptyContent) +{ + ASSERT_EQ(optionalBracket(" (", "", ")"), ""); +} + +TEST(optionalBracket, nonEmptyContent) +{ + ASSERT_EQ(optionalBracket(" (", "foo", ")"), " (foo)"); +} + +TEST(optionalBracket, emptyPrefixAndSuffix) +{ + ASSERT_EQ(optionalBracket("", "foo", ""), "foo"); +} + +TEST(optionalBracket, emptyContentEmptyBrackets) +{ + ASSERT_EQ(optionalBracket("", "", ""), ""); +} + +TEST(optionalBracket, complexBrackets) +{ + ASSERT_EQ(optionalBracket(" [[[", "content", "]]]"), " [[[content]]]"); +} + +TEST(optionalBracket, onlyPrefix) +{ + ASSERT_EQ(optionalBracket("prefix", "content", ""), "prefixcontent"); +} + +TEST(optionalBracket, onlySuffix) +{ + ASSERT_EQ(optionalBracket("", "content", "suffix"), "contentsuffix"); +} + +TEST(optionalBracket, optionalWithValue) +{ + ASSERT_EQ(optionalBracket(" (", std::optional("foo"), ")"), " (foo)"); +} + +TEST(optionalBracket, optionalNullopt) +{ + ASSERT_EQ(optionalBracket(" (", std::optional(std::nullopt), ")"), ""); +} + +TEST(optionalBracket, optionalEmptyString) +{ + ASSERT_EQ(optionalBracket(" (", std::optional(""), ")"), ""); +} + +TEST(optionalBracket, optionalStringViewWithValue) +{ + ASSERT_EQ(optionalBracket(" (", std::optional("bar"), ")"), " (bar)"); +} + } // namespace nix diff --git a/src/libutil/include/nix/util/strings.hh b/src/libutil/include/nix/util/strings.hh index ba37ce79f..da6decc31 100644 --- a/src/libutil/include/nix/util/strings.hh +++ b/src/libutil/include/nix/util/strings.hh @@ -3,6 +3,7 @@ #include "nix/util/types.hh" #include +#include #include #include #include @@ -93,6 +94,44 @@ extern template std::string dropEmptyInitThenConcatStringsSep(std::string_view, */ std::list shellSplitString(std::string_view s); +/** + * Conditionally wrap a string with prefix and suffix brackets. + * + * If `content` is empty, returns an empty string. + * Otherwise, returns `prefix + content + suffix`. + * + * Example: + * optionalBracket(" (", "foo", ")") == " (foo)" + * optionalBracket(" (", "", ")") == "" + * + * Design note: this would have been called `optionalParentheses`, except this + * function is more general and more explicit. Parentheses typically *also* need + * to be prefixed with a space in order to fit nicely in a piece of natural + * language. + */ +std::string optionalBracket(std::string_view prefix, std::string_view content, std::string_view suffix); + +/** + * Overload for optional content. + * + * If `content` is nullopt or contains an empty string, returns an empty string. + * Otherwise, returns `prefix + *content + suffix`. + * + * Example: + * optionalBracket(" (", std::optional("foo"), ")") == " (foo)" + * optionalBracket(" (", std::nullopt, ")") == "" + * optionalBracket(" (", std::optional(""), ")") == "" + */ +template + requires std::convertible_to +std::string optionalBracket(std::string_view prefix, const std::optional & content, std::string_view suffix) +{ + if (!content || std::string_view(*content).empty()) { + return ""; + } + return optionalBracket(prefix, std::string_view(*content), suffix); +} + /** * Hash implementation that can be used for zero-copy heterogenous lookup from * P1690R1[1] in unordered containers. diff --git a/src/libutil/strings.cc b/src/libutil/strings.cc index a87567cef..c0c3d6602 100644 --- a/src/libutil/strings.cc +++ b/src/libutil/strings.cc @@ -138,4 +138,18 @@ std::list shellSplitString(std::string_view s) return result; } + +std::string optionalBracket(std::string_view prefix, std::string_view content, std::string_view suffix) +{ + if (content.empty()) { + return ""; + } + std::string result; + result.reserve(prefix.size() + content.size() + suffix.size()); + result.append(prefix); + result.append(content); + result.append(suffix); + return result; +} + } // namespace nix From 583f5e37fc508e2307fb790188791214fb646b05 Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Mon, 13 Oct 2025 14:02:59 +0200 Subject: [PATCH 308/332] Refactor: use optionalBracket in nix search --- src/nix/search.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/nix/search.cc b/src/nix/search.cc index 910450e95..20bb4cd5d 100644 --- a/src/nix/search.cc +++ b/src/nix/search.cc @@ -159,7 +159,7 @@ struct CmdSearch : InstallableValueCommand, MixJSON logger->cout( "* %s%s", wrap("\e[0;1m", hiliteMatches(attrPath2, attrPathMatches, ANSI_GREEN, "\e[0;1m")), - name.version != "" ? " (" + name.version + ")" : ""); + optionalBracket(" (", name.version, ")")); if (description != "") logger->cout( " %s", hiliteMatches(description, descriptionMatches, ANSI_GREEN, ANSI_NORMAL)); From 998f93f267832c672511eed259339dd0fd142464 Mon Sep 17 00:00:00 2001 From: Soumyadip Sarkar Date: Mon, 13 Oct 2025 18:15:52 +0530 Subject: [PATCH 309/332] Fix typos --- doc/manual/source/protocols/json/derivation.md | 4 ++-- doc/manual/source/store/derivation/index.md | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/manual/source/protocols/json/derivation.md b/doc/manual/source/protocols/json/derivation.md index 566288962..cc9389f7c 100644 --- a/doc/manual/source/protocols/json/derivation.md +++ b/doc/manual/source/protocols/json/derivation.md @@ -25,7 +25,7 @@ is a JSON object with the following fields: - Version 2: Separate `method` and `hashAlgo` fields in output specs - - Verison 3: Drop store dir from store paths, just include base name. + - Version 3: Drop store dir from store paths, just include base name. Note that while this format is experimental, the maintenance of versions is best-effort, and not promised to identify every change. @@ -116,5 +116,5 @@ is a JSON object with the following fields: The environment passed to the `builder`. * `structuredAttrs`: - [Strucutured Attributes](@docroot@/store/derivation/index.md#structured-attrs), only defined if the derivation contains them. + [Structured Attributes](@docroot@/store/derivation/index.md#structured-attrs), only defined if the derivation contains them. Structured attributes are JSON, and thus embedded as-is. diff --git a/doc/manual/source/store/derivation/index.md b/doc/manual/source/store/derivation/index.md index 0e12b4d5e..5b179273d 100644 --- a/doc/manual/source/store/derivation/index.md +++ b/doc/manual/source/store/derivation/index.md @@ -106,7 +106,7 @@ The system type on which the [`builder`](#attr-builder) executable is meant to b A necessary condition for Nix to schedule a given derivation on some [Nix instance] is for the "system" of that derivation to match that instance's [`system` configuration option] or [`extra-platforms` configuration option]. -By putting the `system` in each derivation, Nix allows *heterogenous* build plans, where not all steps can be run on the same machine or same sort of machine. +By putting the `system` in each derivation, Nix allows *heterogeneous* build plans, where not all steps can be run on the same machine or same sort of machine. Nix can schedule builds such that it automatically builds on other platforms by [forwarding build requests](@docroot@/advanced-topics/distributed-builds.md) to other Nix instances. [`system` configuration option]: @docroot@/command-ref/conf-file.md#conf-system From 47f427a1723ba36e4f48dc3db6dcdafa206932e6 Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Mon, 13 Oct 2025 22:05:46 +0300 Subject: [PATCH 310/332] Remove validation of URLs passed to FileTransferRequest verbatim CURL is not very strict about validation of URLs passed to it. We should reflect this in our handling of URLs that we get from the user in or builtins.fetchurl. ValidURL was an attempt to rectify this, but it turned out to be too strict. The only good way to resolve this is to pass (in some cases) the user-provided string verbatim to CURL. Other usages in libfetchers still benefit from using structured ParsedURL and validation though. nix store prefetch-file --name foo 'https://cdn.skypack.dev/big.js@^5.2.2' error: 'https://cdn.skypack.dev/big.js@^5.2.2' is not a valid URL: leftover --- src/libfetchers/tarball.cc | 12 ++--- src/libstore/builtins/fetchurl.cc | 2 +- .../include/nix/store/filetransfer.hh | 4 +- src/libutil/include/nix/util/url.hh | 54 +++++++++++-------- src/libutil/url.cc | 2 +- src/nix/prefetch.cc | 2 +- tests/functional/fetchurl.sh | 5 -- 7 files changed, 44 insertions(+), 37 deletions(-) diff --git a/src/libfetchers/tarball.cc b/src/libfetchers/tarball.cc index 31d5ab460..863a0d680 100644 --- a/src/libfetchers/tarball.cc +++ b/src/libfetchers/tarball.cc @@ -42,7 +42,7 @@ DownloadFileResult downloadFile( if (cached && !cached->expired) return useCached(); - FileTransferRequest request(ValidURL{url}); + FileTransferRequest request(VerbatimURL{url}); request.headers = headers; if (cached) request.expectedETag = getStrAttr(cached->value, "etag"); @@ -107,13 +107,13 @@ DownloadFileResult downloadFile( static DownloadTarballResult downloadTarball_( const Settings & settings, const std::string & urlS, const Headers & headers, const std::string & displayPrefix) { - ValidURL url = urlS; + ParsedURL url = parseURL(urlS); // Some friendly error messages for common mistakes. // Namely lets catch when the url is a local file path, but // it is not in fact a tarball. - if (url.scheme() == "file") { - std::filesystem::path localPath = renderUrlPathEnsureLegal(url.path()); + if (url.scheme == "file") { + std::filesystem::path localPath = renderUrlPathEnsureLegal(url.path); if (!exists(localPath)) { throw Error("tarball '%s' does not exist.", localPath); } @@ -164,7 +164,7 @@ static DownloadTarballResult downloadTarball_( /* Note: if the download is cached, `importTarball()` will receive no data, which causes it to import an empty tarball. */ - auto archive = !url.path().empty() && hasSuffix(toLower(url.path().back()), ".zip") ? ({ + auto archive = !url.path.empty() && hasSuffix(toLower(url.path.back()), ".zip") ? ({ /* In streaming mode, libarchive doesn't handle symlinks in zip files correctly (#10649). So write the entire file to disk so libarchive can access it @@ -178,7 +178,7 @@ static DownloadTarballResult downloadTarball_( } TarArchive{path}; }) - : TarArchive{*source}; + : TarArchive{*source}; auto tarballCache = getTarballCache(); auto parseSink = tarballCache->getFileSystemObjectSink(); auto lastModified = unpackTarfileToSink(archive, *parseSink); diff --git a/src/libstore/builtins/fetchurl.cc b/src/libstore/builtins/fetchurl.cc index 7abfa4495..df056954e 100644 --- a/src/libstore/builtins/fetchurl.cc +++ b/src/libstore/builtins/fetchurl.cc @@ -37,7 +37,7 @@ static void builtinFetchurl(const BuiltinBuilderContext & ctx) auto fetch = [&](const std::string & url) { auto source = sinkToSource([&](Sink & sink) { - FileTransferRequest request(ValidURL{url}); + FileTransferRequest request(VerbatimURL{url}); request.decompress = false; auto decompressor = makeDecompressionSink(unpack && hasSuffix(mainUrl, ".xz") ? "xz" : "none", sink); diff --git a/src/libstore/include/nix/store/filetransfer.hh b/src/libstore/include/nix/store/filetransfer.hh index 942e05a61..78ce439ae 100644 --- a/src/libstore/include/nix/store/filetransfer.hh +++ b/src/libstore/include/nix/store/filetransfer.hh @@ -95,7 +95,7 @@ struct UsernameAuth struct FileTransferRequest { - ValidURL uri; + VerbatimURL uri; Headers headers; std::string expectedETag; bool verifyTLS = true; @@ -121,7 +121,7 @@ struct FileTransferRequest std::optional preResolvedAwsSessionToken; #endif - FileTransferRequest(ValidURL uri) + FileTransferRequest(VerbatimURL uri) : uri(std::move(uri)) , parentAct(getCurActivity()) { diff --git a/src/libutil/include/nix/util/url.hh b/src/libutil/include/nix/util/url.hh index f2bd79b08..4ed80feb3 100644 --- a/src/libutil/include/nix/util/url.hh +++ b/src/libutil/include/nix/util/url.hh @@ -6,6 +6,9 @@ #include "nix/util/error.hh" #include "nix/util/canon-path.hh" +#include "nix/util/split.hh" +#include "nix/util/util.hh" +#include "nix/util/variant-wrapper.hh" namespace nix { @@ -342,8 +345,7 @@ ParsedURL fixGitURL(const std::string & url); bool isValidSchemeName(std::string_view scheme); /** - * Either a ParsedURL or a verbatim string, but the string must be a valid - * ParsedURL. This is necessary because in certain cases URI must be passed + * Either a ParsedURL or a verbatim string. This is necessary because in certain cases URI must be passed * verbatim (e.g. in builtin fetchers), since those are specified by the user. * In those cases normalizations performed by the ParsedURL might be surprising * and undesirable, since Nix must be a universal client that has to work with @@ -354,23 +356,23 @@ bool isValidSchemeName(std::string_view scheme); * * Though we perform parsing and validation for internal needs. */ -struct ValidURL : private ParsedURL +struct VerbatimURL { - std::optional encoded; + using Raw = std::variant; + Raw raw; - ValidURL(std::string str) - : ParsedURL(parseURL(str, /*lenient=*/false)) - , encoded(std::move(str)) + VerbatimURL(std::string_view s) + : raw(std::string{s}) { } - ValidURL(std::string_view str) - : ValidURL(std::string{str}) + VerbatimURL(std::string s) + : raw(std::move(s)) { } - ValidURL(ParsedURL parsed) - : ParsedURL{std::move(parsed)} + VerbatimURL(ParsedURL url) + : raw(std::move(url)) { } @@ -379,25 +381,35 @@ struct ValidURL : private ParsedURL */ std::string to_string() const { - return encoded.or_else([&]() -> std::optional { return ParsedURL::to_string(); }).value(); + return std::visit( + overloaded{ + [](const std::string & str) { return str; }, [](const ParsedURL & url) { return url.to_string(); }}, + raw); } - const ParsedURL & parsed() const & + const ParsedURL parsed() const { - return *this; + return std::visit( + overloaded{ + [](const std::string & str) { return parseURL(str); }, [](const ParsedURL & url) { return url; }}, + raw); } std::string_view scheme() const & { - return ParsedURL::scheme; - } - - const auto & path() const & - { - return ParsedURL::path; + return std::visit( + overloaded{ + [](std::string_view str) { + auto scheme = splitPrefixTo(str, ':'); + if (!scheme) + throw BadURL("URL '%s' doesn't have a scheme", str); + return *scheme; + }, + [](const ParsedURL & url) -> std::string_view { return url.scheme; }}, + raw); } }; -std::ostream & operator<<(std::ostream & os, const ValidURL & url); +std::ostream & operator<<(std::ostream & os, const VerbatimURL & url); } // namespace nix diff --git a/src/libutil/url.cc b/src/libutil/url.cc index a50de0944..7410e4062 100644 --- a/src/libutil/url.cc +++ b/src/libutil/url.cc @@ -434,7 +434,7 @@ bool isValidSchemeName(std::string_view s) return std::regex_match(s.begin(), s.end(), regex, std::regex_constants::match_default); } -std::ostream & operator<<(std::ostream & os, const ValidURL & url) +std::ostream & operator<<(std::ostream & os, const VerbatimURL & url) { os << url.to_string(); return os; diff --git a/src/nix/prefetch.cc b/src/nix/prefetch.cc index 26905e34c..18abfa0aa 100644 --- a/src/nix/prefetch.cc +++ b/src/nix/prefetch.cc @@ -105,7 +105,7 @@ std::tuple prefetchFile( FdSink sink(fd.get()); - FileTransferRequest req(ValidURL{url}); + FileTransferRequest req(VerbatimURL{url}); req.decompress = false; getFileTransfer()->download(std::move(req), sink); } diff --git a/tests/functional/fetchurl.sh b/tests/functional/fetchurl.sh index 5bc8ca625..c25ac3216 100755 --- a/tests/functional/fetchurl.sh +++ b/tests/functional/fetchurl.sh @@ -88,8 +88,3 @@ requireDaemonNewerThan "2.20" expected=100 if [[ -v NIX_DAEMON_PACKAGE ]]; then expected=1; fi # work around the daemon not returning a 100 status correctly expectStderr $expected nix-build --expr '{ url }: builtins.derivation { name = "nix-cache-info"; system = "x86_64-linux"; builder = "builtin:fetchurl"; inherit url; outputHashMode = "flat"; }' --argstr url "file://$narxz" 2>&1 | grep 'must be a fixed-output or impure derivation' - -requireDaemonNewerThan "2.32.0pre20250831" - -expect 1 nix-build --expr 'import ' --argstr name 'name' --argstr url "file://authority.not.allowed/fetchurl.sh?a=1&a=2" --no-out-link |& - grepQuiet "error: file:// URL 'file://authority.not.allowed/fetchurl.sh?a=1&a=2' has unexpected authority 'authority.not.allowed'" From 3ba221025f3d5e78e5f5fde22d704b403f2090e9 Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Mon, 13 Oct 2025 23:50:58 +0300 Subject: [PATCH 311/332] libstore/outputs-spec: Drop usage of std::regex std::regex is a really bad tool for parsing things, since it tends to overflow the stack pretty badly. See the build failure under ASan in [^]. [^]: https://hydra.nixos.org/build/310077167/nixlog/5 --- src/libstore/outputs-spec.cc | 38 ++++++++++++++++-------------------- 1 file changed, 17 insertions(+), 21 deletions(-) diff --git a/src/libstore/outputs-spec.cc b/src/libstore/outputs-spec.cc index aacc964cd..622df5fc3 100644 --- a/src/libstore/outputs-spec.cc +++ b/src/libstore/outputs-spec.cc @@ -1,10 +1,10 @@ -#include #include +#include +#include "nix/store/path.hh" +#include "nix/store/store-dir-config.hh" #include "nix/util/util.hh" -#include "nix/util/regex-combinators.hh" #include "nix/store/outputs-spec.hh" -#include "nix/store/path-regex.hh" #include "nix/util/strings-inline.hh" namespace nix { @@ -19,31 +19,27 @@ bool OutputsSpec::contains(const std::string & outputName) const raw); } -static std::string outputSpecRegexStr = regex::either(regex::group(R"(\*)"), regex::group(regex::list(nameRegexStr))); - std::optional OutputsSpec::parseOpt(std::string_view s) { - static std::regex regex(std::string{outputSpecRegexStr}); - - std::cmatch match; - if (!std::regex_match(s.cbegin(), s.cend(), match, regex)) + try { + return parse(s); + } catch (BadStorePathName &) { return std::nullopt; - - if (match[1].matched) - return {OutputsSpec::All{}}; - - if (match[2].matched) - return OutputsSpec::Names{tokenizeString({match[2].first, match[2].second}, ",")}; - - assert(false); + } } OutputsSpec OutputsSpec::parse(std::string_view s) { - std::optional spec = parseOpt(s); - if (!spec) - throw Error("invalid outputs specifier '%s'", s); - return std::move(*spec); + using namespace std::string_view_literals; + + if (s == "*"sv) + return OutputsSpec::All{}; + + auto names = splitString(s, ","); + for (const auto & name : names) + checkName(name); + + return OutputsSpec::Names{std::move(names)}; } std::optional> ExtendedOutputsSpec::parseOpt(std::string_view s) From 0fd890a8d68b128ff4c1e8eefc063589d7910fe1 Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Mon, 13 Oct 2025 14:09:49 +0200 Subject: [PATCH 312/332] Add reason string support to MissingExperimentalFeature --- src/libutil/configuration.cc | 4 ++-- src/libutil/experimental-features.cc | 8 +++++--- src/libutil/include/nix/util/configuration.hh | 2 +- src/libutil/include/nix/util/experimental-features.hh | 2 +- 4 files changed, 9 insertions(+), 7 deletions(-) diff --git a/src/libutil/configuration.cc b/src/libutil/configuration.cc index dc9d91f63..7a0ed22ea 100644 --- a/src/libutil/configuration.cc +++ b/src/libutil/configuration.cc @@ -500,10 +500,10 @@ bool ExperimentalFeatureSettings::isEnabled(const ExperimentalFeature & feature) return std::find(f.begin(), f.end(), feature) != f.end(); } -void ExperimentalFeatureSettings::require(const ExperimentalFeature & feature) const +void ExperimentalFeatureSettings::require(const ExperimentalFeature & feature, std::string reason) const { if (!isEnabled(feature)) - throw MissingExperimentalFeature(feature); + throw MissingExperimentalFeature(feature, std::move(reason)); } bool ExperimentalFeatureSettings::isEnabled(const std::optional & feature) const diff --git a/src/libutil/experimental-features.cc b/src/libutil/experimental-features.cc index 0edd5a585..11b8ceadf 100644 --- a/src/libutil/experimental-features.cc +++ b/src/libutil/experimental-features.cc @@ -1,5 +1,6 @@ #include "nix/util/experimental-features.hh" #include "nix/util/fmt.hh" +#include "nix/util/strings.hh" #include "nix/util/util.hh" #include @@ -376,10 +377,11 @@ std::set parseFeatures(const StringSet & rawFeatures) return res; } -MissingExperimentalFeature::MissingExperimentalFeature(ExperimentalFeature feature) +MissingExperimentalFeature::MissingExperimentalFeature(ExperimentalFeature feature, std::string reason) : Error( - "experimental Nix feature '%1%' is disabled; add '--extra-experimental-features %1%' to enable it", - showExperimentalFeature(feature)) + "experimental Nix feature '%1%' is disabled%2%; add '--extra-experimental-features %1%' to enable it", + showExperimentalFeature(feature), + Uncolored(optionalBracket(" (", reason, ")"))) , missingFeature(feature) { } diff --git a/src/libutil/include/nix/util/configuration.hh b/src/libutil/include/nix/util/configuration.hh index 65391721c..c8d7b7f24 100644 --- a/src/libutil/include/nix/util/configuration.hh +++ b/src/libutil/include/nix/util/configuration.hh @@ -463,7 +463,7 @@ struct ExperimentalFeatureSettings : Config * Require an experimental feature be enabled, throwing an error if it is * not. */ - void require(const ExperimentalFeature &) const; + void require(const ExperimentalFeature &, std::string reason = "") const; /** * `std::nullopt` pointer means no feature, which means there is nothing that could be diff --git a/src/libutil/include/nix/util/experimental-features.hh b/src/libutil/include/nix/util/experimental-features.hh index 73c4eeca4..6ffc0e0c0 100644 --- a/src/libutil/include/nix/util/experimental-features.hh +++ b/src/libutil/include/nix/util/experimental-features.hh @@ -88,7 +88,7 @@ public: */ ExperimentalFeature missingFeature; - MissingExperimentalFeature(ExperimentalFeature missingFeature); + MissingExperimentalFeature(ExperimentalFeature missingFeature, std::string reason = ""); }; /** From 71aa9a479883cdf372ed49e717abd277e58f449e Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Mon, 13 Oct 2025 14:20:08 +0200 Subject: [PATCH 313/332] Add reasons to dyndrv xp messages --- src/libexpr/primops.cc | 3 ++- src/libstore/derivations.cc | 8 ++++---- src/libstore/derived-path.cc | 6 +++++- src/libstore/downstream-placeholder.cc | 2 +- 4 files changed, 12 insertions(+), 7 deletions(-) diff --git a/src/libexpr/primops.cc b/src/libexpr/primops.cc index 86cb00131..5f06bf009 100644 --- a/src/libexpr/primops.cc +++ b/src/libexpr/primops.cc @@ -1420,7 +1420,8 @@ static void derivationStrictInternal(EvalState & state, std::string_view drvName .debugThrow(); } if (ingestionMethod == ContentAddressMethod::Raw::Text) - experimentalFeatureSettings.require(Xp::DynamicDerivations); + experimentalFeatureSettings.require( + Xp::DynamicDerivations, fmt("text-hashed derivation '%s', outputHashMode = \"text\"", drvName)); if (ingestionMethod == ContentAddressMethod::Raw::Git) experimentalFeatureSettings.require(Xp::GitHashing); }; diff --git a/src/libstore/derivations.cc b/src/libstore/derivations.cc index 6d7dbc99c..b5d8d1a1c 100644 --- a/src/libstore/derivations.cc +++ b/src/libstore/derivations.cc @@ -288,7 +288,7 @@ static DerivationOutput parseDerivationOutput( if (!hashAlgoStr.empty()) { ContentAddressMethod method = ContentAddressMethod::parsePrefix(hashAlgoStr); if (method == ContentAddressMethod::Raw::Text) - xpSettings.require(Xp::DynamicDerivations); + xpSettings.require(Xp::DynamicDerivations, "text-hashed derivation output"); const auto hashAlgo = parseHashAlgo(hashAlgoStr); if (hashS == "impure"sv) { xpSettings.require(Xp::ImpureDerivations); @@ -426,7 +426,7 @@ Derivation parseDerivation( if (*versionS == "xp-dyn-drv"sv) { // Only version we have so far version = DerivationATermVersion::DynamicDerivations; - xpSettings.require(Xp::DynamicDerivations); + xpSettings.require(Xp::DynamicDerivations, fmt("derivation '%s', ATerm format version 'xp-dyn-drv'", name)); } else { throw FormatError("Unknown derivation ATerm format version '%s'", *versionS); } @@ -1301,7 +1301,7 @@ DerivationOutput::fromJSON(const nlohmann::json & _json, const ExperimentalFeatu auto methodAlgo = [&]() -> std::pair { ContentAddressMethod method = ContentAddressMethod::parse(getString(valueAt(json, "method"))); if (method == ContentAddressMethod::Raw::Text) - xpSettings.require(Xp::DynamicDerivations); + xpSettings.require(Xp::DynamicDerivations, "text-hashed derivation output in JSON"); auto hashAlgo = parseHashAlgo(getString(valueAt(json, "hashAlgo"))); return {std::move(method), std::move(hashAlgo)}; @@ -1454,7 +1454,7 @@ Derivation Derivation::fromJSON(const nlohmann::json & _json, const Experimental node.value = getStringSet(valueAt(json, "outputs")); auto drvs = getObject(valueAt(json, "dynamicOutputs")); for (auto & [outputId, childNode] : drvs) { - xpSettings.require(Xp::DynamicDerivations); + xpSettings.require(Xp::DynamicDerivations, fmt("dynamic output '%s' in JSON", outputId)); node.childMap[outputId] = doInput(childNode); } return node; diff --git a/src/libstore/derived-path.cc b/src/libstore/derived-path.cc index 2cf720b82..34e591666 100644 --- a/src/libstore/derived-path.cc +++ b/src/libstore/derived-path.cc @@ -85,7 +85,11 @@ void drvRequireExperiment(const SingleDerivedPath & drv, const ExperimentalFeatu [&](const SingleDerivedPath::Opaque &) { // plain drv path; no experimental features required. }, - [&](const SingleDerivedPath::Built &) { xpSettings.require(Xp::DynamicDerivations); }, + [&](const SingleDerivedPath::Built & b) { + xpSettings.require( + Xp::DynamicDerivations, + fmt("building output '%s' of '%s'", b.output, b.drvPath->getBaseStorePath().to_string())); + }, }, drv.raw()); } diff --git a/src/libstore/downstream-placeholder.cc b/src/libstore/downstream-placeholder.cc index b3ac1c8c4..30044501b 100644 --- a/src/libstore/downstream-placeholder.cc +++ b/src/libstore/downstream-placeholder.cc @@ -24,7 +24,7 @@ DownstreamPlaceholder DownstreamPlaceholder::unknownDerivation( OutputNameView outputName, const ExperimentalFeatureSettings & xpSettings) { - xpSettings.require(Xp::DynamicDerivations); + xpSettings.require(Xp::DynamicDerivations, fmt("placeholder for unknown derivation output '%s'", outputName)); auto compressed = compressHash(placeholder.hash, 20); auto clearText = "nix-computed-output:" + compressed.to_string(HashFormat::Nix32, false) + ":" + std::string{outputName}; From 39c46654880d66a1bdfe107f6726630ff831707e Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Mon, 13 Oct 2025 23:48:58 +0200 Subject: [PATCH 314/332] Store reason as a field in MissingExperimentalFeature Store the reason string as a field in the exception class rather than only embedding it in the error message. This supports better structured error handling and future JSON error reporting. Suggested by Ericson2314 in PR review. --- src/libutil/experimental-features.cc | 1 + src/libutil/include/nix/util/experimental-features.hh | 2 ++ 2 files changed, 3 insertions(+) diff --git a/src/libutil/experimental-features.cc b/src/libutil/experimental-features.cc index 11b8ceadf..198d021bb 100644 --- a/src/libutil/experimental-features.cc +++ b/src/libutil/experimental-features.cc @@ -383,6 +383,7 @@ MissingExperimentalFeature::MissingExperimentalFeature(ExperimentalFeature featu showExperimentalFeature(feature), Uncolored(optionalBracket(" (", reason, ")"))) , missingFeature(feature) + , reason{reason} { } diff --git a/src/libutil/include/nix/util/experimental-features.hh b/src/libutil/include/nix/util/experimental-features.hh index 6ffc0e0c0..aca14bfbb 100644 --- a/src/libutil/include/nix/util/experimental-features.hh +++ b/src/libutil/include/nix/util/experimental-features.hh @@ -88,6 +88,8 @@ public: */ ExperimentalFeature missingFeature; + std::string reason; + MissingExperimentalFeature(ExperimentalFeature missingFeature, std::string reason = ""); }; From 962862e9e00a088b27178985153783b0ff3cceed Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Oct 2025 22:00:55 +0000 Subject: [PATCH 315/332] build(deps): bump actions/create-github-app-token from 1 to 2 Bumps [actions/create-github-app-token](https://github.com/actions/create-github-app-token) from 1 to 2. - [Release notes](https://github.com/actions/create-github-app-token/releases) - [Commits](https://github.com/actions/create-github-app-token/compare/v1...v2) --- updated-dependencies: - dependency-name: actions/create-github-app-token dependency-version: '2' dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/backport.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml index 99b75621e..7785e53c2 100644 --- a/.github/workflows/backport.yml +++ b/.github/workflows/backport.yml @@ -16,7 +16,7 @@ jobs: steps: - name: Generate GitHub App token id: generate-token - uses: actions/create-github-app-token@v1 + uses: actions/create-github-app-token@v2 with: app-id: ${{ vars.CI_APP_ID }} private-key: ${{ secrets.CI_APP_PRIVATE_KEY }} From b846f27682d27f8674c586e97a758eced52912da Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Oct 2025 22:00:59 +0000 Subject: [PATCH 316/332] build(deps): bump actions/checkout from 4 to 5 Bumps [actions/checkout](https://github.com/actions/checkout) from 4 to 5. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/v4...v5) --- updated-dependencies: - dependency-name: actions/checkout dependency-version: '5' dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/backport.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml index 99b75621e..b9abc720b 100644 --- a/.github/workflows/backport.yml +++ b/.github/workflows/backport.yml @@ -20,7 +20,7 @@ jobs: with: app-id: ${{ vars.CI_APP_ID }} private-key: ${{ secrets.CI_APP_PRIVATE_KEY }} - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 with: ref: ${{ github.event.pull_request.head.sha }} # required to find all branches From 2ee41976c22f1252a439f1940d6190b82830283b Mon Sep 17 00:00:00 2001 From: John Ericson Date: Fri, 26 Sep 2025 01:13:22 -0400 Subject: [PATCH 317/332] Fix #13247 Resolve the derivation before creating a building goal, in a context where we know what output(s) we want. That way we have a chance just to download the outputs we want. Fix #13247 (cherry picked from commit 39f6fd9b464298f37a08cfe7485271b9294fd278) --- .../build/derivation-building-goal.cc | 103 ------------------ src/libstore/build/derivation-goal.cc | 91 ++++++++++++++++ tests/functional/ca/issue-13247.sh | 5 +- 3 files changed, 92 insertions(+), 107 deletions(-) diff --git a/src/libstore/build/derivation-building-goal.cc b/src/libstore/build/derivation-building-goal.cc index 037401ccb..c00123634 100644 --- a/src/libstore/build/derivation-building-goal.cc +++ b/src/libstore/build/derivation-building-goal.cc @@ -1,7 +1,5 @@ #include "nix/store/build/derivation-building-goal.hh" -#include "nix/store/build/derivation-resolution-goal.hh" #include "nix/store/build/derivation-env-desugar.hh" -#include "nix/store/build/derivation-trampoline-goal.hh" #ifndef _WIN32 // TODO enable build hook on Windows # include "nix/store/build/hook-instance.hh" # include "nix/store/build/derivation-builder.hh" @@ -158,107 +156,6 @@ Goal::Co DerivationBuildingGoal::gaveUpOnSubstitution() /* Determine the full set of input paths. */ { - auto resolutionGoal = worker.makeDerivationResolutionGoal(drvPath, *drv, buildMode); - { - Goals waitees{resolutionGoal}; - co_await await(std::move(waitees)); - } - if (nrFailed != 0) { - co_return doneFailure({BuildResult::Failure::DependencyFailed, "resolution failed"}); - } - - if (resolutionGoal->resolvedDrv) { - auto & [pathResolved, drvResolved] = *resolutionGoal->resolvedDrv; - - /* Store the resolved derivation, as part of the record of - what we're actually building */ - writeDerivation(worker.store, drvResolved); - - /* TODO https://github.com/NixOS/nix/issues/13247 we should - let the calling goal do this, so it has a change to pass - just the output(s) it cares about. */ - auto resolvedDrvGoal = - worker.makeDerivationTrampolineGoal(pathResolved, OutputsSpec::All{}, drvResolved, buildMode); - { - Goals waitees{resolvedDrvGoal}; - co_await await(std::move(waitees)); - } - - trace("resolved derivation finished"); - - auto resolvedResult = resolvedDrvGoal->buildResult; - - // No `std::visit` for coroutines yet - if (auto * successP = resolvedResult.tryGetSuccess()) { - auto & success = *successP; - SingleDrvOutputs builtOutputs; - - auto outputHashes = staticOutputHashes(worker.evalStore, *drv); - auto resolvedHashes = staticOutputHashes(worker.store, drvResolved); - - StorePathSet outputPaths; - - for (auto & outputName : drvResolved.outputNames()) { - auto outputHash = get(outputHashes, outputName); - auto resolvedHash = get(resolvedHashes, outputName); - if ((!outputHash) || (!resolvedHash)) - throw Error( - "derivation '%s' doesn't have expected output '%s' (derivation-goal.cc/resolve)", - worker.store.printStorePath(drvPath), - outputName); - - auto realisation = [&] { - auto take1 = get(success.builtOutputs, outputName); - if (take1) - return *take1; - - /* The above `get` should work. But stateful tracking of - outputs in resolvedResult, this can get out of sync with the - store, which is our actual source of truth. For now we just - check the store directly if it fails. */ - auto take2 = worker.evalStore.queryRealisation(DrvOutput{*resolvedHash, outputName}); - if (take2) - return *take2; - - throw Error( - "derivation '%s' doesn't have expected output '%s' (derivation-goal.cc/realisation)", - worker.store.printStorePath(pathResolved), - outputName); - }(); - - if (!drv->type().isImpure()) { - auto newRealisation = realisation; - newRealisation.id = DrvOutput{*outputHash, outputName}; - newRealisation.signatures.clear(); - if (!drv->type().isFixed()) { - auto & drvStore = worker.evalStore.isValidPath(drvPath) ? worker.evalStore : worker.store; - newRealisation.dependentRealisations = - drvOutputReferences(worker.store, *drv, realisation.outPath, &drvStore); - } - worker.store.signRealisation(newRealisation); - worker.store.registerDrvOutput(newRealisation); - } - outputPaths.insert(realisation.outPath); - builtOutputs.emplace(outputName, realisation); - } - - runPostBuildHook(worker.store, *logger, drvPath, outputPaths); - - auto status = success.status; - if (status == BuildResult::Success::AlreadyValid) - status = BuildResult::Success::ResolvesToAlreadyValid; - - co_return doneSuccess(success.status, std::move(builtOutputs)); - } else if (resolvedResult.tryGetFailure()) { - co_return doneFailure({ - BuildResult::Failure::DependencyFailed, - "build of resolved derivation '%s' failed", - worker.store.printStorePath(pathResolved), - }); - } else - assert(false); - } - /* If we get this far, we know no dynamic drvs inputs */ for (auto & [depDrvPath, depNode] : drv->inputDrvs.map) { diff --git a/src/libstore/build/derivation-goal.cc b/src/libstore/build/derivation-goal.cc index dc12ab55a..1f8eb1262 100644 --- a/src/libstore/build/derivation-goal.cc +++ b/src/libstore/build/derivation-goal.cc @@ -1,5 +1,6 @@ #include "nix/store/build/derivation-goal.hh" #include "nix/store/build/derivation-building-goal.hh" +#include "nix/store/build/derivation-resolution-goal.hh" #ifndef _WIN32 // TODO enable build hook on Windows # include "nix/store/build/hook-instance.hh" # include "nix/store/build/derivation-builder.hh" @@ -140,6 +141,96 @@ Goal::Co DerivationGoal::haveDerivation() worker.store.printStorePath(drvPath)); } + auto resolutionGoal = worker.makeDerivationResolutionGoal(drvPath, *drv, buildMode); + { + Goals waitees{resolutionGoal}; + co_await await(std::move(waitees)); + } + if (nrFailed != 0) { + co_return doneFailure({BuildResult::Failure::DependencyFailed, "resolution failed"}); + } + + if (resolutionGoal->resolvedDrv) { + auto & [pathResolved, drvResolved] = *resolutionGoal->resolvedDrv; + + /* Store the resolved derivation, as part of the record of + what we're actually building */ + writeDerivation(worker.store, drvResolved); + + auto resolvedDrvGoal = worker.makeDerivationGoal(pathResolved, drvResolved, wantedOutput, buildMode); + { + Goals waitees{resolvedDrvGoal}; + co_await await(std::move(waitees)); + } + + trace("resolved derivation finished"); + + auto resolvedResult = resolvedDrvGoal->buildResult; + + // No `std::visit` for coroutines yet + if (auto * successP = resolvedResult.tryGetSuccess()) { + auto & success = *successP; + auto outputHashes = staticOutputHashes(worker.evalStore, *drv); + auto resolvedHashes = staticOutputHashes(worker.store, drvResolved); + + StorePathSet outputPaths; + + auto outputHash = get(outputHashes, wantedOutput); + auto resolvedHash = get(resolvedHashes, wantedOutput); + if ((!outputHash) || (!resolvedHash)) + throw Error( + "derivation '%s' doesn't have expected output '%s' (derivation-goal.cc/resolve)", + worker.store.printStorePath(drvPath), + wantedOutput); + + auto realisation = [&] { + auto take1 = get(success.builtOutputs, wantedOutput); + if (take1) + return *take1; + + /* The above `get` should work. But stateful tracking of + outputs in resolvedResult, this can get out of sync with the + store, which is our actual source of truth. For now we just + check the store directly if it fails. */ + auto take2 = worker.evalStore.queryRealisation(DrvOutput{*resolvedHash, wantedOutput}); + if (take2) + return *take2; + + throw Error( + "derivation '%s' doesn't have expected output '%s' (derivation-goal.cc/realisation)", + worker.store.printStorePath(pathResolved), + wantedOutput); + }(); + + if (!drv->type().isImpure()) { + auto newRealisation = realisation; + newRealisation.id = DrvOutput{*outputHash, wantedOutput}; + newRealisation.signatures.clear(); + if (!drv->type().isFixed()) { + auto & drvStore = worker.evalStore.isValidPath(drvPath) ? worker.evalStore : worker.store; + newRealisation.dependentRealisations = + drvOutputReferences(worker.store, *drv, realisation.outPath, &drvStore); + } + worker.store.signRealisation(newRealisation); + worker.store.registerDrvOutput(newRealisation); + } + outputPaths.insert(realisation.outPath); + + auto status = success.status; + if (status == BuildResult::Success::AlreadyValid) + status = BuildResult::Success::ResolvesToAlreadyValid; + + co_return doneSuccess(status, std::move(realisation)); + } else if (resolvedResult.tryGetFailure()) { + co_return doneFailure({ + BuildResult::Failure::DependencyFailed, + "build of resolved derivation '%s' failed", + worker.store.printStorePath(pathResolved), + }); + } else + assert(false); + } + /* Give up on substitution for the output we want, actually build this derivation */ auto g = worker.makeDerivationBuildingGoal(drvPath, *drv, buildMode); diff --git a/tests/functional/ca/issue-13247.sh b/tests/functional/ca/issue-13247.sh index 686d90ced..705919513 100755 --- a/tests/functional/ca/issue-13247.sh +++ b/tests/functional/ca/issue-13247.sh @@ -65,7 +65,4 @@ buildViaSubstitute use-a-prime-more-outputs^first # Should only fetch the output we asked for [[ -d "$(jq -r <"$TEST_ROOT"/a.json '.[0].outputs.out')" ]] [[ -f "$(jq -r <"$TEST_ROOT"/a.json '.[2].outputs.first')" ]] - -# Output should *not* be here, this is the bug -[[ -e "$(jq -r <"$TEST_ROOT"/a.json '.[2].outputs.second')" ]] -skipTest "bug is not yet fixed" +[[ ! -e "$(jq -r <"$TEST_ROOT"/a.json '.[2].outputs.second')" ]] From 06bb1c2f93f73fdfd93c04502fbd59f4489e4378 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Mon, 13 Oct 2025 18:40:10 -0400 Subject: [PATCH 318/332] Remove some `buildMode` default parameters Force the internals to be more explicit. --- .../nix/store/build/derivation-building-goal.hh | 3 +-- .../include/nix/store/build/derivation-goal.hh | 2 +- .../store/build/derivation-resolution-goal.hh | 3 +-- src/libstore/include/nix/store/build/worker.hh | 16 +++++----------- 4 files changed, 8 insertions(+), 16 deletions(-) diff --git a/src/libstore/include/nix/store/build/derivation-building-goal.hh b/src/libstore/include/nix/store/build/derivation-building-goal.hh index edb496024..1dd11160f 100644 --- a/src/libstore/include/nix/store/build/derivation-building-goal.hh +++ b/src/libstore/include/nix/store/build/derivation-building-goal.hh @@ -29,8 +29,7 @@ typedef enum { rpAccept, rpDecline, rpPostpone } HookReply; */ struct DerivationBuildingGoal : public Goal { - DerivationBuildingGoal( - const StorePath & drvPath, const Derivation & drv, Worker & worker, BuildMode buildMode = bmNormal); + DerivationBuildingGoal(const StorePath & drvPath, const Derivation & drv, Worker & worker, BuildMode buildMode); ~DerivationBuildingGoal(); private: diff --git a/src/libstore/include/nix/store/build/derivation-goal.hh b/src/libstore/include/nix/store/build/derivation-goal.hh index e05bf1c0b..13369d889 100644 --- a/src/libstore/include/nix/store/build/derivation-goal.hh +++ b/src/libstore/include/nix/store/build/derivation-goal.hh @@ -45,7 +45,7 @@ struct DerivationGoal : public Goal const Derivation & drv, const OutputName & wantedOutput, Worker & worker, - BuildMode buildMode = bmNormal); + BuildMode buildMode); ~DerivationGoal() = default; void timedOut(Error && ex) override diff --git a/src/libstore/include/nix/store/build/derivation-resolution-goal.hh b/src/libstore/include/nix/store/build/derivation-resolution-goal.hh index a284843f0..fb4c2a346 100644 --- a/src/libstore/include/nix/store/build/derivation-resolution-goal.hh +++ b/src/libstore/include/nix/store/build/derivation-resolution-goal.hh @@ -35,8 +35,7 @@ struct BuilderFailureError; */ struct DerivationResolutionGoal : public Goal { - DerivationResolutionGoal( - const StorePath & drvPath, const Derivation & drv, Worker & worker, BuildMode buildMode = bmNormal); + DerivationResolutionGoal(const StorePath & drvPath, const Derivation & drv, Worker & worker, BuildMode buildMode); /** * If the derivation needed to be resolved, this is resulting diff --git a/src/libstore/include/nix/store/build/worker.hh b/src/libstore/include/nix/store/build/worker.hh index 9660d66b2..542e3ff33 100644 --- a/src/libstore/include/nix/store/build/worker.hh +++ b/src/libstore/include/nix/store/build/worker.hh @@ -210,32 +210,26 @@ private: std::shared_ptr initGoalIfNeeded(std::weak_ptr & goal_weak, Args &&... args); std::shared_ptr makeDerivationTrampolineGoal( - ref drvReq, const OutputsSpec & wantedOutputs, BuildMode buildMode = bmNormal); + ref drvReq, const OutputsSpec & wantedOutputs, BuildMode buildMode); public: std::shared_ptr makeDerivationTrampolineGoal( - const StorePath & drvPath, - const OutputsSpec & wantedOutputs, - const Derivation & drv, - BuildMode buildMode = bmNormal); + const StorePath & drvPath, const OutputsSpec & wantedOutputs, const Derivation & drv, BuildMode buildMode); std::shared_ptr makeDerivationGoal( - const StorePath & drvPath, - const Derivation & drv, - const OutputName & wantedOutput, - BuildMode buildMode = bmNormal); + const StorePath & drvPath, const Derivation & drv, const OutputName & wantedOutput, BuildMode buildMode); /** * @ref DerivationResolutionGoal "derivation resolution goal" */ std::shared_ptr - makeDerivationResolutionGoal(const StorePath & drvPath, const Derivation & drv, BuildMode buildMode = bmNormal); + makeDerivationResolutionGoal(const StorePath & drvPath, const Derivation & drv, BuildMode buildMode); /** * @ref DerivationBuildingGoal "derivation building goal" */ std::shared_ptr - makeDerivationBuildingGoal(const StorePath & drvPath, const Derivation & drv, BuildMode buildMode = bmNormal); + makeDerivationBuildingGoal(const StorePath & drvPath, const Derivation & drv, BuildMode buildMode); /** * @ref PathSubstitutionGoal "substitution goal" From ad893acf466ad889fecd459ed0e1554d97c27e97 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Fri, 26 Sep 2025 01:40:00 -0400 Subject: [PATCH 319/332] Fix `ca/eval-store.sh` test The refactor in the last commit fixed the bug it was supposed to fix, but introduced a new bug in that sometimes we tried to write a resolved derivation to a store before all its `inputSrcs` were in that store. The solution is to defer writing the derivation until inside `DerivationBuildingGoal`, just before we do an actual build. At this point, we are sure that all inputs in are the store. This does have the side effect of meaning we don't write down the resolved derivation in the substituting case, only the building case, but I think that is actually fine. The store that actually does the building should make a record of what it built by storing the resolved derivation. Other stores that just substitute from that store don't necessary want that derivation however. They can trust the substituter to keep the record around, or baring that, they can attempt to re resolve everything, if they need to be audited. (cherry picked from commit c97b050a6c212d0b748303080b5604309b7abdce) --- src/libstore/build/derivation-building-goal.cc | 13 ++++++++++--- src/libstore/build/derivation-goal.cc | 16 +++++++--------- src/libstore/build/derivation-trampoline-goal.cc | 2 +- src/libstore/build/worker.cc | 15 ++++++++++----- .../nix/store/build/derivation-building-goal.hh | 14 ++++++++++++-- .../include/nix/store/build/derivation-goal.hh | 8 ++++++-- src/libstore/include/nix/store/build/worker.hh | 10 +++++++--- 7 files changed, 53 insertions(+), 25 deletions(-) diff --git a/src/libstore/build/derivation-building-goal.cc b/src/libstore/build/derivation-building-goal.cc index c00123634..4230ed465 100644 --- a/src/libstore/build/derivation-building-goal.cc +++ b/src/libstore/build/derivation-building-goal.cc @@ -26,8 +26,8 @@ namespace nix { DerivationBuildingGoal::DerivationBuildingGoal( - const StorePath & drvPath, const Derivation & drv, Worker & worker, BuildMode buildMode) - : Goal(worker, gaveUpOnSubstitution()) + const StorePath & drvPath, const Derivation & drv, Worker & worker, BuildMode buildMode, bool storeDerivation) + : Goal(worker, gaveUpOnSubstitution(storeDerivation)) , drvPath(drvPath) , drv{std::make_unique(drv)} , buildMode(buildMode) @@ -107,7 +107,7 @@ static void runPostBuildHook( /* At least one of the output paths could not be produced using a substitute. So we have to build instead. */ -Goal::Co DerivationBuildingGoal::gaveUpOnSubstitution() +Goal::Co DerivationBuildingGoal::gaveUpOnSubstitution(bool storeDerivation) { Goals waitees; @@ -155,6 +155,13 @@ Goal::Co DerivationBuildingGoal::gaveUpOnSubstitution() /* Determine the full set of input paths. */ + if (storeDerivation) { + assert(drv->inputDrvs.map.empty()); + /* Store the resolved derivation, as part of the record of + what we're actually building */ + writeDerivation(worker.store, *drv); + } + { /* If we get this far, we know no dynamic drvs inputs */ diff --git a/src/libstore/build/derivation-goal.cc b/src/libstore/build/derivation-goal.cc index 1f8eb1262..b0081f709 100644 --- a/src/libstore/build/derivation-goal.cc +++ b/src/libstore/build/derivation-goal.cc @@ -30,8 +30,9 @@ DerivationGoal::DerivationGoal( const Derivation & drv, const OutputName & wantedOutput, Worker & worker, - BuildMode buildMode) - : Goal(worker, haveDerivation()) + BuildMode buildMode, + bool storeDerivation) + : Goal(worker, haveDerivation(storeDerivation)) , drvPath(drvPath) , wantedOutput(wantedOutput) , drv{std::make_unique(drv)} @@ -59,7 +60,7 @@ std::string DerivationGoal::key() }.to_string(worker.store); } -Goal::Co DerivationGoal::haveDerivation() +Goal::Co DerivationGoal::haveDerivation(bool storeDerivation) { trace("have derivation"); @@ -153,11 +154,8 @@ Goal::Co DerivationGoal::haveDerivation() if (resolutionGoal->resolvedDrv) { auto & [pathResolved, drvResolved] = *resolutionGoal->resolvedDrv; - /* Store the resolved derivation, as part of the record of - what we're actually building */ - writeDerivation(worker.store, drvResolved); - - auto resolvedDrvGoal = worker.makeDerivationGoal(pathResolved, drvResolved, wantedOutput, buildMode); + auto resolvedDrvGoal = + worker.makeDerivationGoal(pathResolved, drvResolved, wantedOutput, buildMode, /*storeDerivation=*/true); { Goals waitees{resolvedDrvGoal}; co_await await(std::move(waitees)); @@ -233,7 +231,7 @@ Goal::Co DerivationGoal::haveDerivation() /* Give up on substitution for the output we want, actually build this derivation */ - auto g = worker.makeDerivationBuildingGoal(drvPath, *drv, buildMode); + auto g = worker.makeDerivationBuildingGoal(drvPath, *drv, buildMode, storeDerivation); /* We will finish with it ourselves, as if we were the derivational goal. */ g->preserveException = true; diff --git a/src/libstore/build/derivation-trampoline-goal.cc b/src/libstore/build/derivation-trampoline-goal.cc index 310d23d70..963156aa5 100644 --- a/src/libstore/build/derivation-trampoline-goal.cc +++ b/src/libstore/build/derivation-trampoline-goal.cc @@ -145,7 +145,7 @@ Goal::Co DerivationTrampolineGoal::haveDerivation(StorePath drvPath, Derivation /* Build this step! */ for (auto & output : resolvedWantedOutputs) { - auto g = upcast_goal(worker.makeDerivationGoal(drvPath, drv, output, buildMode)); + auto g = upcast_goal(worker.makeDerivationGoal(drvPath, drv, output, buildMode, false)); g->preserveException = true; /* We will finish with it ourselves, as if we were the derivational goal. */ concreteDrvGoals.insert(std::move(g)); diff --git a/src/libstore/build/worker.cc b/src/libstore/build/worker.cc index f597abb63..53175a8c4 100644 --- a/src/libstore/build/worker.cc +++ b/src/libstore/build/worker.cc @@ -76,9 +76,14 @@ std::shared_ptr Worker::makeDerivationTrampolineGoal( } std::shared_ptr Worker::makeDerivationGoal( - const StorePath & drvPath, const Derivation & drv, const OutputName & wantedOutput, BuildMode buildMode) + const StorePath & drvPath, + const Derivation & drv, + const OutputName & wantedOutput, + BuildMode buildMode, + bool storeDerivation) { - return initGoalIfNeeded(derivationGoals[drvPath][wantedOutput], drvPath, drv, wantedOutput, *this, buildMode); + return initGoalIfNeeded( + derivationGoals[drvPath][wantedOutput], drvPath, drv, wantedOutput, *this, buildMode, storeDerivation); } std::shared_ptr @@ -87,10 +92,10 @@ Worker::makeDerivationResolutionGoal(const StorePath & drvPath, const Derivation return initGoalIfNeeded(derivationResolutionGoals[drvPath], drvPath, drv, *this, buildMode); } -std::shared_ptr -Worker::makeDerivationBuildingGoal(const StorePath & drvPath, const Derivation & drv, BuildMode buildMode) +std::shared_ptr Worker::makeDerivationBuildingGoal( + const StorePath & drvPath, const Derivation & drv, BuildMode buildMode, bool storeDerivation) { - return initGoalIfNeeded(derivationBuildingGoals[drvPath], drvPath, drv, *this, buildMode); + return initGoalIfNeeded(derivationBuildingGoals[drvPath], drvPath, drv, *this, buildMode, storeDerivation); } std::shared_ptr diff --git a/src/libstore/include/nix/store/build/derivation-building-goal.hh b/src/libstore/include/nix/store/build/derivation-building-goal.hh index 1dd11160f..547e533e2 100644 --- a/src/libstore/include/nix/store/build/derivation-building-goal.hh +++ b/src/libstore/include/nix/store/build/derivation-building-goal.hh @@ -29,7 +29,17 @@ typedef enum { rpAccept, rpDecline, rpPostpone } HookReply; */ struct DerivationBuildingGoal : public Goal { - DerivationBuildingGoal(const StorePath & drvPath, const Derivation & drv, Worker & worker, BuildMode buildMode); + /** + * @param storeDerivation Whether to store the derivation in + * `worker.store`. This is useful for newly-resolved derivations. In this + * case, the derivation was not created a priori, e.g. purely (or close + * enough) from evaluation of the Nix language, but also depends on the + * exact content produced by upstream builds. It is strongly advised to + * have a permanent record of such a resolved derivation in order to + * faithfully reconstruct the build history. + */ + DerivationBuildingGoal( + const StorePath & drvPath, const Derivation & drv, Worker & worker, BuildMode buildMode, bool storeDerivation); ~DerivationBuildingGoal(); private: @@ -99,7 +109,7 @@ private: /** * The states. */ - Co gaveUpOnSubstitution(); + Co gaveUpOnSubstitution(bool storeDerivation); Co tryToBuild(); /** diff --git a/src/libstore/include/nix/store/build/derivation-goal.hh b/src/libstore/include/nix/store/build/derivation-goal.hh index 13369d889..c5eb2fe79 100644 --- a/src/libstore/include/nix/store/build/derivation-goal.hh +++ b/src/libstore/include/nix/store/build/derivation-goal.hh @@ -40,12 +40,16 @@ struct DerivationGoal : public Goal */ OutputName wantedOutput; + /** + * @param storeDerivation See `DerivationBuildingGoal`. This is just passed along. + */ DerivationGoal( const StorePath & drvPath, const Derivation & drv, const OutputName & wantedOutput, Worker & worker, - BuildMode buildMode); + BuildMode buildMode, + bool storeDerivation); ~DerivationGoal() = default; void timedOut(Error && ex) override @@ -80,7 +84,7 @@ private: /** * The states. */ - Co haveDerivation(); + Co haveDerivation(bool storeDerivation); /** * Return `std::nullopt` if the output is unknown, e.g. un unbuilt diff --git a/src/libstore/include/nix/store/build/worker.hh b/src/libstore/include/nix/store/build/worker.hh index 542e3ff33..bb0202dfd 100644 --- a/src/libstore/include/nix/store/build/worker.hh +++ b/src/libstore/include/nix/store/build/worker.hh @@ -217,7 +217,11 @@ public: const StorePath & drvPath, const OutputsSpec & wantedOutputs, const Derivation & drv, BuildMode buildMode); std::shared_ptr makeDerivationGoal( - const StorePath & drvPath, const Derivation & drv, const OutputName & wantedOutput, BuildMode buildMode); + const StorePath & drvPath, + const Derivation & drv, + const OutputName & wantedOutput, + BuildMode buildMode, + bool storeDerivation); /** * @ref DerivationResolutionGoal "derivation resolution goal" @@ -228,8 +232,8 @@ public: /** * @ref DerivationBuildingGoal "derivation building goal" */ - std::shared_ptr - makeDerivationBuildingGoal(const StorePath & drvPath, const Derivation & drv, BuildMode buildMode); + std::shared_ptr makeDerivationBuildingGoal( + const StorePath & drvPath, const Derivation & drv, BuildMode buildMode, bool storeDerivation); /** * @ref PathSubstitutionGoal "substitution goal" From edf9163c2259b7267d9b3fe39347a22744ecdb8b Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Tue, 14 Oct 2025 02:24:03 +0300 Subject: [PATCH 320/332] libutil: Make CanonPath::root const By all means CanonPath::root must be immutable. Let's enforce this with in the code. --- src/libutil/canon-path.cc | 2 +- src/libutil/include/nix/util/canon-path.hh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/libutil/canon-path.cc b/src/libutil/canon-path.cc index 07a3a6193..3b4777ef7 100644 --- a/src/libutil/canon-path.cc +++ b/src/libutil/canon-path.cc @@ -5,7 +5,7 @@ namespace nix { -CanonPath CanonPath::root = CanonPath("/"); +const CanonPath CanonPath::root = CanonPath("/"); static std::string absPathPure(std::string_view path) { diff --git a/src/libutil/include/nix/util/canon-path.hh b/src/libutil/include/nix/util/canon-path.hh index dd07929b4..a9c173d71 100644 --- a/src/libutil/include/nix/util/canon-path.hh +++ b/src/libutil/include/nix/util/canon-path.hh @@ -69,7 +69,7 @@ public: */ CanonPath(const std::vector & elems); - static CanonPath root; + static const CanonPath root; /** * If `raw` starts with a slash, return From 1633ceaff25535de9419d992dd4753c6cc221796 Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Tue, 14 Oct 2025 02:33:38 +0300 Subject: [PATCH 321/332] libutil: Ensure that CanonPath does not contain NUL bytes This, alongside the other invariants of the CanonPath is important to uphold. std::filesystem happily crashes on NUL bytes in the constructor, as we've seen with `path:%00` prior to c436b7a32afaf01d62f828697ddf5c49d4f8678c. Best to stay clear of NUL bytes when we're talking about syscalls, especially on Unix where strings are null terminated. Very nice to have if we decide to switch over to pascal-style strings. --- src/libutil-tests/canon-path.cc | 9 +++++++++ src/libutil/canon-path.cc | 19 +++++++++++++++++++ src/libutil/include/nix/util/canon-path.hh | 10 ++++++---- 3 files changed, 34 insertions(+), 4 deletions(-) diff --git a/src/libutil-tests/canon-path.cc b/src/libutil-tests/canon-path.cc index 971a9cc96..aae9285c4 100644 --- a/src/libutil-tests/canon-path.cc +++ b/src/libutil-tests/canon-path.cc @@ -42,6 +42,15 @@ TEST(CanonPath, basic) } } +TEST(CanonPath, nullBytes) +{ + std::string s = "/hello/world"; + s[8] = '\0'; + ASSERT_THROW(CanonPath("/").push(std::string(1, '\0')), BadCanonPath); + ASSERT_THROW(CanonPath(std::string_view(s)), BadCanonPath); + ASSERT_THROW(CanonPath(s, CanonPath::root), BadCanonPath); +} + TEST(CanonPath, from_existing) { CanonPath p0("foo//bar/"); diff --git a/src/libutil/canon-path.cc b/src/libutil/canon-path.cc index 3b4777ef7..22ca3e066 100644 --- a/src/libutil/canon-path.cc +++ b/src/libutil/canon-path.cc @@ -3,6 +3,8 @@ #include "nix/util/file-path-impl.hh" #include "nix/util/strings-inline.hh" +#include + namespace nix { const CanonPath CanonPath::root = CanonPath("/"); @@ -12,14 +14,30 @@ static std::string absPathPure(std::string_view path) return canonPathInner(path, [](auto &, auto &) {}); } +static void ensureNoNullBytes(std::string_view s) +{ + if (std::memchr(s.data(), '\0', s.size())) [[unlikely]] { + using namespace std::string_view_literals; + auto str = replaceStrings(std::string(s), "\0"sv, "␀"sv); + throw BadCanonPath("path segment '%s' must not contain null (\\0) bytes", str); + } +} + CanonPath::CanonPath(std::string_view raw) : path(absPathPure(concatStrings("/", raw))) +{ + ensureNoNullBytes(raw); +} + +CanonPath::CanonPath(const char * raw) + : path(absPathPure(concatStrings("/", raw))) { } CanonPath::CanonPath(std::string_view raw, const CanonPath & root) : path(absPathPure(raw.size() > 0 && raw[0] == '/' ? raw : concatStrings(root.abs(), "/", raw))) { + ensureNoNullBytes(raw); } CanonPath::CanonPath(const std::vector & elems) @@ -80,6 +98,7 @@ void CanonPath::push(std::string_view c) { assert(c.find('/') == c.npos); assert(c != "." && c != ".."); + ensureNoNullBytes(c); if (!isRoot()) path += '/'; path += c; diff --git a/src/libutil/include/nix/util/canon-path.hh b/src/libutil/include/nix/util/canon-path.hh index a9c173d71..b9b2fff25 100644 --- a/src/libutil/include/nix/util/canon-path.hh +++ b/src/libutil/include/nix/util/canon-path.hh @@ -1,6 +1,7 @@ #pragma once ///@file +#include "nix/util/error.hh" #include #include #include @@ -12,6 +13,8 @@ namespace nix { +MakeError(BadCanonPath, Error); + /** * A canonical representation of a path. It ensures the following: * @@ -23,6 +26,8 @@ namespace nix { * * - There are no components equal to '.' or '..'. * + * - It does not contain NUL bytes. + * * `CanonPath` are "virtual" Nix paths for abstract file system objects; * they are always Unix-style paths, regardless of what OS Nix is * running on. The `/` root doesn't denote the ambient host file system @@ -51,10 +56,7 @@ public: */ CanonPath(std::string_view raw); - explicit CanonPath(const char * raw) - : CanonPath(std::string_view(raw)) - { - } + explicit CanonPath(const char * raw); struct unchecked_t {}; From 1b96a704d38b38804d317a7dac3663630ac599e7 Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Tue, 14 Oct 2025 16:49:59 +0200 Subject: [PATCH 322/332] Add lazy evaluation for experimental feature reasons Wrap fmt() calls in lambdas to defer string formatting until the feature check fails. This avoids unnecessary string formatting in the common case where the feature is enabled. Addresses performance concern raised by xokdvium in PR review. --- src/libstore/derivations.cc | 7 +++++-- src/libstore/derived-path.cc | 6 +++--- src/libstore/downstream-placeholder.cc | 3 ++- src/libutil/include/nix/util/configuration.hh | 13 +++++++++++++ 4 files changed, 23 insertions(+), 6 deletions(-) diff --git a/src/libstore/derivations.cc b/src/libstore/derivations.cc index b5d8d1a1c..fa8bc58ac 100644 --- a/src/libstore/derivations.cc +++ b/src/libstore/derivations.cc @@ -426,7 +426,9 @@ Derivation parseDerivation( if (*versionS == "xp-dyn-drv"sv) { // Only version we have so far version = DerivationATermVersion::DynamicDerivations; - xpSettings.require(Xp::DynamicDerivations, fmt("derivation '%s', ATerm format version 'xp-dyn-drv'", name)); + xpSettings.require(Xp::DynamicDerivations, [&] { + return fmt("derivation '%s', ATerm format version 'xp-dyn-drv'", name); + }); } else { throw FormatError("Unknown derivation ATerm format version '%s'", *versionS); } @@ -1454,7 +1456,8 @@ Derivation Derivation::fromJSON(const nlohmann::json & _json, const Experimental node.value = getStringSet(valueAt(json, "outputs")); auto drvs = getObject(valueAt(json, "dynamicOutputs")); for (auto & [outputId, childNode] : drvs) { - xpSettings.require(Xp::DynamicDerivations, fmt("dynamic output '%s' in JSON", outputId)); + xpSettings.require( + Xp::DynamicDerivations, [&] { return fmt("dynamic output '%s' in JSON", outputId); }); node.childMap[outputId] = doInput(childNode); } return node; diff --git a/src/libstore/derived-path.cc b/src/libstore/derived-path.cc index 34e591666..8d606cb41 100644 --- a/src/libstore/derived-path.cc +++ b/src/libstore/derived-path.cc @@ -86,9 +86,9 @@ void drvRequireExperiment(const SingleDerivedPath & drv, const ExperimentalFeatu // plain drv path; no experimental features required. }, [&](const SingleDerivedPath::Built & b) { - xpSettings.require( - Xp::DynamicDerivations, - fmt("building output '%s' of '%s'", b.output, b.drvPath->getBaseStorePath().to_string())); + xpSettings.require(Xp::DynamicDerivations, [&] { + return fmt("building output '%s' of '%s'", b.output, b.drvPath->getBaseStorePath().to_string()); + }); }, }, drv.raw()); diff --git a/src/libstore/downstream-placeholder.cc b/src/libstore/downstream-placeholder.cc index 30044501b..780717a62 100644 --- a/src/libstore/downstream-placeholder.cc +++ b/src/libstore/downstream-placeholder.cc @@ -24,7 +24,8 @@ DownstreamPlaceholder DownstreamPlaceholder::unknownDerivation( OutputNameView outputName, const ExperimentalFeatureSettings & xpSettings) { - xpSettings.require(Xp::DynamicDerivations, fmt("placeholder for unknown derivation output '%s'", outputName)); + xpSettings.require( + Xp::DynamicDerivations, [&] { return fmt("placeholder for unknown derivation output '%s'", outputName); }); auto compressed = compressHash(placeholder.hash, 20); auto clearText = "nix-computed-output:" + compressed.to_string(HashFormat::Nix32, false) + ":" + std::string{outputName}; diff --git a/src/libutil/include/nix/util/configuration.hh b/src/libutil/include/nix/util/configuration.hh index c8d7b7f24..541febdb5 100644 --- a/src/libutil/include/nix/util/configuration.hh +++ b/src/libutil/include/nix/util/configuration.hh @@ -465,6 +465,19 @@ struct ExperimentalFeatureSettings : Config */ void require(const ExperimentalFeature &, std::string reason = "") const; + /** + * Require an experimental feature be enabled, throwing an error if it is + * not. The reason is lazily evaluated only if the feature is disabled. + */ + template + requires std::invocable && std::convertible_to, std::string> + void require(const ExperimentalFeature & feature, GetReason && getReason) const + { + if (isEnabled(feature)) + return; + require(feature, getReason()); + } + /** * `std::nullopt` pointer means no feature, which means there is nothing that could be * disabled, and so the function returns true in that case. From d18f959d4fb381ec4e3a489410fb336731cff7d3 Mon Sep 17 00:00:00 2001 From: Bernardo Meurer Costa Date: Sun, 12 Oct 2025 02:07:04 +0000 Subject: [PATCH 323/332] test(nixos): add comprehensive curl-based S3 VM tests Add `curl-s3-binary-cache-store.nix` with comprehensive test coverage for the curl-based S3 implementation. Depends-On: #14206, #14222 --- ci/gha/tests/default.nix | 4 + tests/nixos/curl-s3-binary-cache-store.nix | 507 +++++++++++++++++++++ tests/nixos/default.nix | 2 + 3 files changed, 513 insertions(+) create mode 100644 tests/nixos/curl-s3-binary-cache-store.nix diff --git a/ci/gha/tests/default.nix b/ci/gha/tests/default.nix index 09fb6ec23..46310bc36 100644 --- a/ci/gha/tests/default.nix +++ b/ci/gha/tests/default.nix @@ -236,6 +236,10 @@ rec { # S3 binary cache store test only runs when S3 support is enabled inherit (nixosTests) s3-binary-cache-store; } + // lib.optionalAttrs (withCurlS3 == true) { + # S3 binary cache store test using curl implementation + inherit (nixosTests) curl-s3-binary-cache-store; + } // lib.optionalAttrs (!withSanitizers && !withCoverage) { # evalNixpkgs uses non-instrumented components from hydraJobs, so only run it # when not testing with sanitizers to avoid rebuilding nix diff --git a/tests/nixos/curl-s3-binary-cache-store.nix b/tests/nixos/curl-s3-binary-cache-store.nix new file mode 100644 index 000000000..53d79689c --- /dev/null +++ b/tests/nixos/curl-s3-binary-cache-store.nix @@ -0,0 +1,507 @@ +{ + lib, + config, + nixpkgs, + ... +}: + +let + pkgs = config.nodes.client.nixpkgs.pkgs; + + # Test packages - minimal packages for fast copying + pkgA = pkgs.writeText "test-package-a" "test package a"; + pkgB = pkgs.writeText "test-package-b" "test package b"; + pkgC = pkgs.writeText "test-package-c" "test package c"; + + # S3 configuration + accessKey = "BKIKJAA5BMMU2RHO6IBB"; + secretKey = "V7f1CwQqAcwo80UEIJEjc5gVQUSSx5ohQ9GSrr12"; + +in +{ + name = "curl-s3-binary-cache-store"; + + nodes = { + server = + { + config, + lib, + pkgs, + ... + }: + { + virtualisation.writableStore = true; + virtualisation.cores = 2; + virtualisation.additionalPaths = [ + pkgA + pkgB + pkgC + ]; + environment.systemPackages = [ pkgs.minio-client ]; + nix.extraOptions = '' + experimental-features = nix-command + substituters = + ''; + services.minio = { + enable = true; + region = "eu-west-1"; + rootCredentialsFile = pkgs.writeText "minio-credentials-full" '' + MINIO_ROOT_USER=${accessKey} + MINIO_ROOT_PASSWORD=${secretKey} + ''; + }; + networking.firewall.allowedTCPPorts = [ 9000 ]; + }; + + client = + { config, pkgs, ... }: + { + virtualisation.writableStore = true; + virtualisation.cores = 2; + nix.extraOptions = '' + experimental-features = nix-command + substituters = + ''; + }; + }; + + testScript = + { nodes }: + # python + '' + import json + import random + import re + import uuid + + # ============================================================================ + # Configuration + # ============================================================================ + + ACCESS_KEY = '${accessKey}' + SECRET_KEY = '${secretKey}' + ENDPOINT = 'http://server:9000' + REGION = 'eu-west-1' + + PKG_A = '${pkgA}' + PKG_B = '${pkgB}' + PKG_C = '${pkgC}' + + ENV_WITH_CREDS = f"AWS_ACCESS_KEY_ID={ACCESS_KEY} AWS_SECRET_ACCESS_KEY={SECRET_KEY}" + + # ============================================================================ + # Helper Functions + # ============================================================================ + + def make_s3_url(bucket, path="", **params): + """Build S3 URL with optional path and query parameters""" + params.setdefault('endpoint', ENDPOINT) + params.setdefault('region', REGION) + query = '&'.join(f"{k}={v}" for k, v in params.items()) + bucket_and_path = f"{bucket}{path}" if path else bucket + return f"s3://{bucket_and_path}?{query}" + + def make_http_url(path): + """Build HTTP URL for direct S3 access""" + return f"{ENDPOINT}/{path}" + + def get_package_hash(pkg_path): + """Extract store hash from package path""" + return pkg_path.split("/")[-1].split("-")[0] + + def verify_content_encoding(machine, bucket, object_path, expected_encoding): + """Verify S3 object has expected Content-Encoding header""" + stat = machine.succeed(f"mc stat minio/{bucket}/{object_path}") + if "Content-Encoding" not in stat or expected_encoding not in stat: + print(f"mc stat output for {object_path}:") + print(stat) + raise Exception(f"Expected Content-Encoding: {expected_encoding} header on {object_path}") + + def verify_no_compression(machine, bucket, object_path): + """Verify S3 object has no compression headers""" + stat = machine.succeed(f"mc stat minio/{bucket}/{object_path}") + if "Content-Encoding" in stat and ("gzip" in stat or "xz" in stat): + print(f"mc stat output for {object_path}:") + print(stat) + raise Exception(f"Object {object_path} should not have compression Content-Encoding") + + def assert_count(output, pattern, expected, error_msg): + """Assert that pattern appears exactly expected times in output""" + actual = output.count(pattern) + if actual != expected: + print("Debug output:") + print(output) + raise Exception(f"{error_msg}: expected {expected}, got {actual}") + + def with_test_bucket(populate_with=[]): + """ + Decorator that creates/destroys a unique bucket for each test. + Optionally pre-populates bucket with specified packages. + + Args: + populate_with: List of packages to upload before test runs + """ + def decorator(test_func): + def wrapper(): + bucket = str(uuid.uuid4()) + server.succeed(f"mc mb minio/{bucket}") + try: + if populate_with: + store_url = make_s3_url(bucket) + for pkg in populate_with: + server.succeed(f"{ENV_WITH_CREDS} nix copy --to '{store_url}' {pkg}") + test_func(bucket) + finally: + server.succeed(f"mc rb --force minio/{bucket}") + return wrapper + return decorator + + # ============================================================================ + # Test Functions + # ============================================================================ + + @with_test_bucket() + def test_credential_caching(bucket): + """Verify credential providers are cached and reused""" + print("\n=== Testing Credential Caching ===") + + store_url = make_s3_url(bucket) + output = server.succeed( + f"{ENV_WITH_CREDS} nix copy --debug --to '{store_url}' " + f"{PKG_A} {PKG_B} {PKG_C} 2>&1" + ) + + assert_count( + output, + "creating new AWS credential provider", + 1, + "Credential provider caching failed" + ) + + print("✓ Credential provider created once and cached") + + @with_test_bucket(populate_with=[PKG_A]) + def test_fetchurl_basic(bucket): + """Test builtins.fetchurl works with s3:// URLs""" + print("\n=== Testing builtins.fetchurl ===") + + client.wait_for_unit("network-addresses-eth1.service") + + cache_info_url = make_s3_url(bucket, path="/nix-cache-info") + + client.succeed( + f"{ENV_WITH_CREDS} nix eval --impure --expr " + f"'builtins.fetchurl {{ name = \"foo\"; url = \"{cache_info_url}\"; }}'" + ) + + print("✓ builtins.fetchurl works with s3:// URLs") + + @with_test_bucket() + def test_error_message_formatting(bucket): + """Verify error messages display URLs correctly""" + print("\n=== Testing Error Message Formatting ===") + + nonexistent_url = make_s3_url(bucket, path="/foo-that-does-not-exist") + expected_http_url = make_http_url(f"{bucket}/foo-that-does-not-exist") + + error_msg = client.fail( + f"{ENV_WITH_CREDS} nix eval --impure --expr " + f"'builtins.fetchurl {{ name = \"foo\"; url = \"{nonexistent_url}\"; }}' 2>&1" + ) + + if f"unable to download '{expected_http_url}': HTTP error 404" not in error_msg: + print("Actual error message:") + print(error_msg) + raise Exception("Error message formatting failed - should show actual URL, not %s placeholder") + + print("✓ Error messages format URLs correctly") + + @with_test_bucket(populate_with=[PKG_A]) + def test_fork_credential_preresolution(bucket): + """Test credential pre-resolution in forked processes""" + print("\n=== Testing Fork Credential Pre-resolution ===") + + # Get hash of nix-cache-info for fixed-output derivation + cache_info_url = make_s3_url(bucket, path="/nix-cache-info") + + cache_info_path = client.succeed( + f"{ENV_WITH_CREDS} nix eval --impure --raw --expr " + f"'builtins.fetchurl {{ name = \"nix-cache-info\"; url = \"{cache_info_url}\"; }}'" + ).strip() + + cache_info_hash = client.succeed( + f"nix hash file --type sha256 --base32 {cache_info_path}" + ).strip() + + # Build derivation with unique test ID + test_id = random.randint(0, 10000) + test_url = make_s3_url(bucket, path="/nix-cache-info", test_id=test_id) + + fetchurl_expr = """ + import {{ + name = "s3-fork-test-{id}"; + url = "{url}"; + sha256 = "{hash}"; + }} + """.format(id=test_id, url=test_url, hash=cache_info_hash) + + output = client.succeed( + f"{ENV_WITH_CREDS} nix build --debug --impure --expr '{fetchurl_expr}' 2>&1" + ) + + # Verify fork behavior + if "builtin:fetchurl creating fresh FileTransfer instance" not in output: + print("Debug output:") + print(output) + raise Exception("Expected to find FileTransfer creation in forked process") + + print(" ✓ Forked process creates fresh FileTransfer") + + # Verify pre-resolution in parent + required_messages = [ + "Pre-resolving AWS credentials for S3 URL in builtin:fetchurl", + "Successfully pre-resolved AWS credentials in parent process", + ] + + for msg in required_messages: + if msg not in output: + print("Debug output:") + print(output) + raise Exception(f"Missing expected message: {msg}") + + print(" ✓ Parent pre-resolves credentials") + + # Verify child uses pre-resolved credentials + if "Using pre-resolved AWS credentials from parent process" not in output: + print("Debug output:") + print(output) + raise Exception("Child should use pre-resolved credentials") + + # Extract child PID and verify it doesn't create new providers + filetransfer_match = re.search( + r'\[pid=(\d+)\] builtin:fetchurl creating fresh FileTransfer instance', + output + ) + + if not filetransfer_match: + raise Exception("Could not extract child PID from debug output") + + child_pid = filetransfer_match.group(1) + child_provider_creation = f"[pid={child_pid}] creating new AWS credential provider" + + if child_provider_creation in output: + print("Debug output:") + print(output) + raise Exception(f"Child process (pid={child_pid}) should NOT create new credential providers") + + print(" ✓ Child uses pre-resolved credentials (no new providers)") + + @with_test_bucket(populate_with=[PKG_A, PKG_B, PKG_C]) + def test_store_operations(bucket): + """Test nix store info and copy operations""" + print("\n=== Testing Store Operations ===") + + store_url = make_s3_url(bucket) + + # Verify store info works + client.succeed(f"{ENV_WITH_CREDS} nix store info --store '{store_url}' >&2") + + # Get and validate store info JSON + info_json = client.succeed(f"{ENV_WITH_CREDS} nix store info --json --store '{store_url}'") + store_info = json.loads(info_json) + + if not store_info.get("url"): + raise Exception("Store should have a URL") + + print(f" ✓ Store URL: {store_info['url']}") + + # Test copy from store + client.fail(f"nix path-info {PKG_A}") + + output = client.succeed( + f"{ENV_WITH_CREDS} nix copy --debug --no-check-sigs " + f"--from '{store_url}' {PKG_A} {PKG_B} {PKG_C} 2>&1" + ) + + assert_count( + output, + "creating new AWS credential provider", + 1, + "Client credential provider caching failed" + ) + + client.succeed(f"nix path-info {PKG_A}") + + print(" ✓ nix copy works") + print(" ✓ Credentials cached on client") + + @with_test_bucket(populate_with=[PKG_A]) + def test_url_format_variations(bucket): + """Test different S3 URL parameter combinations""" + print("\n=== Testing URL Format Variations ===") + + # Test parameter order variation (region before endpoint) + url1 = f"s3://{bucket}?region={REGION}&endpoint={ENDPOINT}" + client.succeed(f"{ENV_WITH_CREDS} nix store info --store '{url1}' >&2") + print(" ✓ Parameter order: region before endpoint works") + + # Test parameter order variation (endpoint before region) + url2 = f"s3://{bucket}?endpoint={ENDPOINT}®ion={REGION}" + client.succeed(f"{ENV_WITH_CREDS} nix store info --store '{url2}' >&2") + print(" ✓ Parameter order: endpoint before region works") + + @with_test_bucket(populate_with=[PKG_A]) + def test_concurrent_fetches(bucket): + """Validate thread safety with concurrent S3 operations""" + print("\n=== Testing Concurrent Fetches ===") + + # Get hash for test derivations + cache_info_url = make_s3_url(bucket, path="/nix-cache-info") + + cache_info_path = client.succeed( + f"{ENV_WITH_CREDS} nix eval --impure --raw --expr " + f"'builtins.fetchurl {{ name = \"nix-cache-info\"; url = \"{cache_info_url}\"; }}'" + ).strip() + + cache_info_hash = client.succeed( + f"nix hash file --type sha256 --base32 {cache_info_path}" + ).strip() + + # Create 5 concurrent fetch derivations + # Build base URL for concurrent test (we'll add fetch_id in Nix interpolation) + base_url = make_s3_url(bucket, path="/nix-cache-info") + concurrent_expr = """ + let + mkFetch = i: import {{ + name = "concurrent-s3-fetch-''${{toString i}}"; + url = "{url}&fetch_id=''${{toString i}}"; + sha256 = "{hash}"; + }}; + fetches = builtins.listToAttrs (map (i: {{ + name = "fetch''${{toString i}}"; + value = mkFetch i; + }}) (builtins.genList (i: i) 5)); + in fetches + """.format(url=base_url, hash=cache_info_hash) + + try: + output = client.succeed( + f"{ENV_WITH_CREDS} nix build --debug --impure " + f"--expr '{concurrent_expr}' --max-jobs 5 2>&1" + ) + except: + output = client.fail( + f"{ENV_WITH_CREDS} nix build --debug --impure " + f"--expr '{concurrent_expr}' --max-jobs 5 2>&1" + ) + + if "error:" in output.lower(): + print("Found error during concurrent fetches:") + print(output) + + providers_created = output.count("creating new AWS credential provider") + transfers_created = output.count("builtin:fetchurl creating fresh FileTransfer instance") + + print(f" ✓ {providers_created} credential providers created") + print(f" ✓ {transfers_created} FileTransfer instances created") + + if transfers_created != 5: + print("Debug output:") + print(output) + raise Exception( + f"Expected 5 FileTransfer instances for 5 concurrent fetches, got {transfers_created}" + ) + + @with_test_bucket() + def test_compression_narinfo_gzip(bucket): + """Test narinfo compression with gzip""" + print("\n=== Testing Compression: narinfo (gzip) ===") + + store_url = make_s3_url(bucket, **{'narinfo-compression': 'gzip'}) + server.succeed(f"{ENV_WITH_CREDS} nix copy --to '{store_url}' {PKG_B}") + + pkg_hash = get_package_hash(PKG_B) + verify_content_encoding(server, bucket, f"{pkg_hash}.narinfo", "gzip") + + print(" ✓ .narinfo has Content-Encoding: gzip") + + # Verify client can download and decompress + client.succeed(f"{ENV_WITH_CREDS} nix copy --from '{store_url}' --no-check-sigs {PKG_B}") + client.succeed(f"nix path-info {PKG_B}") + + print(" ✓ Client decompressed .narinfo successfully") + + @with_test_bucket() + def test_compression_mixed(bucket): + """Test mixed compression (narinfo=xz, ls=gzip)""" + print("\n=== Testing Compression: mixed (narinfo=xz, ls=gzip) ===") + + store_url = make_s3_url( + bucket, + **{'narinfo-compression': 'xz', 'write-nar-listing': 'true', 'ls-compression': 'gzip'} + ) + + server.succeed(f"{ENV_WITH_CREDS} nix copy --to '{store_url}' {PKG_C}") + + pkg_hash = get_package_hash(PKG_C) + + # Verify .narinfo has xz compression + verify_content_encoding(server, bucket, f"{pkg_hash}.narinfo", "xz") + print(" ✓ .narinfo has Content-Encoding: xz") + + # Verify .ls has gzip compression + verify_content_encoding(server, bucket, f"{pkg_hash}.ls", "gzip") + print(" ✓ .ls has Content-Encoding: gzip") + + # Verify client can download with mixed compression + client.succeed(f"{ENV_WITH_CREDS} nix copy --from '{store_url}' --no-check-sigs {PKG_C}") + client.succeed(f"nix path-info {PKG_C}") + + print(" ✓ Client downloaded package with mixed compression") + + @with_test_bucket() + def test_compression_disabled(bucket): + """Verify no compression by default""" + print("\n=== Testing Compression: disabled (default) ===") + + store_url = make_s3_url(bucket) + server.succeed(f"{ENV_WITH_CREDS} nix copy --to '{store_url}' {PKG_A}") + + pkg_hash = get_package_hash(PKG_A) + verify_no_compression(server, bucket, f"{pkg_hash}.narinfo") + + print(" ✓ No compression applied by default") + + # ============================================================================ + # Main Test Execution + # ============================================================================ + + print("\n" + "="*80) + print("S3 Binary Cache Store Tests") + print("="*80) + + start_all() + + # Initialize MinIO server + server.wait_for_unit("minio") + server.wait_for_unit("network-addresses-eth1.service") + server.wait_for_open_port(9000) + server.succeed(f"mc config host add minio http://localhost:9000 {ACCESS_KEY} {SECRET_KEY} --api s3v4") + + # Run tests (each gets isolated bucket via decorator) + test_credential_caching() + test_fetchurl_basic() + test_error_message_formatting() + test_fork_credential_preresolution() + test_store_operations() + test_url_format_variations() + test_concurrent_fetches() + test_compression_narinfo_gzip() + test_compression_mixed() + test_compression_disabled() + + print("\n" + "="*80) + print("✓ All S3 Binary Cache Store Tests Passed!") + print("="*80) + ''; +} diff --git a/tests/nixos/default.nix b/tests/nixos/default.nix index edfa4124f..ea6a7e914 100644 --- a/tests/nixos/default.nix +++ b/tests/nixos/default.nix @@ -201,6 +201,8 @@ in s3-binary-cache-store = runNixOSTest ./s3-binary-cache-store.nix; + curl-s3-binary-cache-store = runNixOSTest ./curl-s3-binary-cache-store.nix; + fsync = runNixOSTest ./fsync.nix; cgroups = runNixOSTest ./cgroups; From 0c32fb3fa2d66448615744b502d06b6dea21d66e Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Tue, 14 Oct 2025 23:58:18 +0300 Subject: [PATCH 324/332] treewide: Add Store::requireStoreObjectAccessor, simplify uses of getFSAccessor This is a simple wrapper around getFSAccessor that throws an InvalidPath error. This simplifies usage in callsites that only care about getting a non-null accessor. --- src/libfetchers/fetchers.cc | 3 +-- src/libfetchers/github.cc | 10 ++++++---- src/libfetchers/mercurial.cc | 4 +--- src/libfetchers/path.cc | 2 +- src/libstore/include/nix/store/store-api.hh | 20 +++++++++++++++++++- src/libstore/store-api.cc | 2 +- src/nix/cat.cc | 5 +---- src/nix/ls.cc | 5 +---- src/nix/nix-store/nix-store.cc | 2 +- src/nix/why-depends.cc | 2 +- 10 files changed, 33 insertions(+), 22 deletions(-) diff --git a/src/libfetchers/fetchers.cc b/src/libfetchers/fetchers.cc index f697ec6f5..7c741a7a3 100644 --- a/src/libfetchers/fetchers.cc +++ b/src/libfetchers/fetchers.cc @@ -332,8 +332,7 @@ std::pair, Input> Input::getAccessorUnchecked(ref sto debug("using substituted/cached input '%s' in '%s'", to_string(), store->printStorePath(storePath)); - // We just ensured the store object was there - auto accessor = ref{store->getFSAccessor(storePath)}; + auto accessor = store->requireStoreObjectAccessor(storePath); accessor->fingerprint = getFingerprint(store); diff --git a/src/libfetchers/github.cc b/src/libfetchers/github.cc index a905bb384..2479a57d2 100644 --- a/src/libfetchers/github.cc +++ b/src/libfetchers/github.cc @@ -399,7 +399,8 @@ struct GitHubInputScheme : GitArchiveInputScheme Headers headers = makeHeadersWithAuthTokens(*input.settings, host, input); auto downloadResult = downloadFile(store, *input.settings, url, "source", headers); - auto json = nlohmann::json::parse(store->getFSAccessor(downloadResult.storePath)->readFile(CanonPath::root)); + auto json = nlohmann::json::parse( + store->requireStoreObjectAccessor(downloadResult.storePath)->readFile(CanonPath::root)); return RefInfo{ .rev = Hash::parseAny(std::string{json["sha"]}, HashAlgorithm::SHA1), @@ -473,7 +474,8 @@ struct GitLabInputScheme : GitArchiveInputScheme Headers headers = makeHeadersWithAuthTokens(*input.settings, host, input); auto downloadResult = downloadFile(store, *input.settings, url, "source", headers); - auto json = nlohmann::json::parse(store->getFSAccessor(downloadResult.storePath)->readFile(CanonPath::root)); + auto json = nlohmann::json::parse( + store->requireStoreObjectAccessor(downloadResult.storePath)->readFile(CanonPath::root)); if (json.is_array() && json.size() >= 1 && json[0]["id"] != nullptr) { return RefInfo{.rev = Hash::parseAny(std::string(json[0]["id"]), HashAlgorithm::SHA1)}; @@ -549,7 +551,7 @@ struct SourceHutInputScheme : GitArchiveInputScheme std::string refUri; if (ref == "HEAD") { auto downloadFileResult = downloadFile(store, *input.settings, fmt("%s/HEAD", base_url), "source", headers); - auto contents = nix::ref(store->getFSAccessor(downloadFileResult.storePath))->readFile(CanonPath::root); + auto contents = store->requireStoreObjectAccessor(downloadFileResult.storePath)->readFile(CanonPath::root); auto remoteLine = git::parseLsRemoteLine(getLine(contents).first); if (!remoteLine) { @@ -563,7 +565,7 @@ struct SourceHutInputScheme : GitArchiveInputScheme auto downloadFileResult = downloadFile(store, *input.settings, fmt("%s/info/refs", base_url), "source", headers); - auto contents = nix::ref(store->getFSAccessor(downloadFileResult.storePath))->readFile(CanonPath::root); + auto contents = store->requireStoreObjectAccessor(downloadFileResult.storePath)->readFile(CanonPath::root); std::istringstream is(contents); std::string line; diff --git a/src/libfetchers/mercurial.cc b/src/libfetchers/mercurial.cc index bf460d9c6..41bf6e2aa 100644 --- a/src/libfetchers/mercurial.cc +++ b/src/libfetchers/mercurial.cc @@ -329,9 +329,7 @@ struct MercurialInputScheme : InputScheme Input input(_input); auto storePath = fetchToStore(store, input); - - // We just added it, it should be there. - auto accessor = ref{store->getFSAccessor(storePath)}; + auto accessor = store->requireStoreObjectAccessor(storePath); accessor->setPathDisplay("«" + input.to_string() + "»"); diff --git a/src/libfetchers/path.cc b/src/libfetchers/path.cc index aa0411ff9..c4b5e2f1e 100644 --- a/src/libfetchers/path.cc +++ b/src/libfetchers/path.cc @@ -138,7 +138,7 @@ struct PathInputScheme : InputScheme storePath = store->addToStoreFromDump(*src, "source"); } - auto accessor = ref{store->getFSAccessor(*storePath)}; + auto accessor = store->requireStoreObjectAccessor(*storePath); // To prevent `fetchToStore()` copying the path again to Nix // store, pre-create an entry in the fetcher cache. diff --git a/src/libstore/include/nix/store/store-api.hh b/src/libstore/include/nix/store/store-api.hh index 1131ec975..5c96c5f80 100644 --- a/src/libstore/include/nix/store/store-api.hh +++ b/src/libstore/include/nix/store/store-api.hh @@ -724,10 +724,28 @@ public: * the Nix store. * * @return nullptr if the store doesn't contain an object at the - * givine path. + * given path. */ virtual std::shared_ptr getFSAccessor(const StorePath & path, bool requireValidPath = true) = 0; + /** + * Get an accessor for the store object or throw an Error if it's invalid or + * doesn't exist. + * + * @throws InvalidPath if the store object doesn't exist or (if requireValidPath = true) is + * invalid. + */ + [[nodiscard]] ref requireStoreObjectAccessor(const StorePath & path, bool requireValidPath = true) + { + auto accessor = getFSAccessor(path, requireValidPath); + if (!accessor) { + throw InvalidPath( + requireValidPath ? "path '%1%' is not a valid store path" : "store path '%1%' does not exist", + printStorePath(path)); + } + return ref{accessor}; + } + /** * Repair the contents of the given path by redownloading it using * a substituter (if available). diff --git a/src/libstore/store-api.cc b/src/libstore/store-api.cc index 4ce6b15fa..1335eb76a 100644 --- a/src/libstore/store-api.cc +++ b/src/libstore/store-api.cc @@ -1130,7 +1130,7 @@ Derivation Store::derivationFromPath(const StorePath & drvPath) static Derivation readDerivationCommon(Store & store, const StorePath & drvPath, bool requireValidPath) { - auto accessor = store.getFSAccessor(drvPath, requireValidPath); + auto accessor = store.requireStoreObjectAccessor(drvPath, requireValidPath); try { return parseDerivation(store, accessor->readFile(CanonPath::root), Derivation::nameFromPath(drvPath)); } catch (FormatError & e) { diff --git a/src/nix/cat.cc b/src/nix/cat.cc index 145336723..effe544e6 100644 --- a/src/nix/cat.cc +++ b/src/nix/cat.cc @@ -41,10 +41,7 @@ struct CmdCatStore : StoreCommand, MixCat void run(ref store) override { auto [storePath, rest] = store->toStorePath(path); - auto accessor = store->getFSAccessor(storePath); - if (!accessor) - throw InvalidPath("path '%1%' is not a valid store path", store->printStorePath(storePath)); - cat(ref{std::move(accessor)}, CanonPath{rest}); + cat(store->requireStoreObjectAccessor(storePath), CanonPath{rest}); } }; diff --git a/src/nix/ls.cc b/src/nix/ls.cc index 4952d5243..5cdfc2c0f 100644 --- a/src/nix/ls.cc +++ b/src/nix/ls.cc @@ -115,10 +115,7 @@ struct CmdLsStore : StoreCommand, MixLs void run(ref store) override { auto [storePath, rest] = store->toStorePath(path); - auto accessor = store->getFSAccessor(storePath); - if (!accessor) - throw InvalidPath("path '%1%' is not a valid store path", store->printStorePath(storePath)); - list(ref{std::move(accessor)}, CanonPath{rest}); + list(store->requireStoreObjectAccessor(storePath), CanonPath{rest}); } }; diff --git a/src/nix/nix-store/nix-store.cc b/src/nix/nix-store/nix-store.cc index f8078426c..313a6398c 100644 --- a/src/nix/nix-store/nix-store.cc +++ b/src/nix/nix-store/nix-store.cc @@ -603,7 +603,7 @@ static void registerValidity(bool reregister, bool hashGiven, bool canonicalise) #endif if (!hashGiven) { HashResult hash = hashPath( - {ref{store->getFSAccessor(info->path, false)}}, + {store->requireStoreObjectAccessor(info->path, /*requireValidPath=*/false)}, FileSerialisationMethod::NixArchive, HashAlgorithm::SHA256); info->narHash = hash.hash; diff --git a/src/nix/why-depends.cc b/src/nix/why-depends.cc index 473827a93..dc30fabd7 100644 --- a/src/nix/why-depends.cc +++ b/src/nix/why-depends.cc @@ -207,7 +207,7 @@ struct CmdWhyDepends : SourceExprCommand, MixOperateOnOptions contain the reference. */ std::map hits; - auto accessor = store->getFSAccessor(node.path); + auto accessor = store->requireStoreObjectAccessor(node.path); auto visitPath = [&](this auto && recur, const CanonPath & p) -> void { auto st = accessor->maybeLstat(p); From 69c005e805859364bc98061852602d6ea2dd37c3 Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Wed, 15 Oct 2025 00:15:48 +0300 Subject: [PATCH 325/332] libstore: Use getFSAccessor for store object in Worker::pathContentsGood We only care about the accessor for a single store object anyway, but the validity gets ignored. Also `pathExists(store.printStorePath(path))` is definitely incorrect since it confuses the logical location vs physical location in case of a chroot store. --- src/libstore/build/worker.cc | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/src/libstore/build/worker.cc b/src/libstore/build/worker.cc index 53175a8c4..d23c53e77 100644 --- a/src/libstore/build/worker.cc +++ b/src/libstore/build/worker.cc @@ -529,15 +529,9 @@ bool Worker::pathContentsGood(const StorePath & path) return i->second; printInfo("checking path '%s'...", store.printStorePath(path)); auto info = store.queryPathInfo(path); - bool res; - if (!pathExists(store.printStorePath(path))) - res = false; - else { - auto current = hashPath( - {store.getFSAccessor(), CanonPath(path.to_string())}, - FileIngestionMethod::NixArchive, - info->narHash.algo) - .first; + bool res = false; + if (auto accessor = store.getFSAccessor(path, /*requireValidPath=*/false)) { + auto current = hashPath({ref{accessor}}, FileIngestionMethod::NixArchive, info->narHash.algo).first; Hash nullHash(HashAlgorithm::SHA256); res = info->narHash == nullHash || info->narHash == current; } From 918a3cebaa439d2baba46c9ca7d0f1fc6da0db2b Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Wed, 15 Oct 2025 00:25:14 +0300 Subject: [PATCH 326/332] libexpr: Use Store::requireStoreObjectAccessor instead or toRealPath in fetch MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This forces the code to go through proper abstractions instead of the raw filesystem API. This issue is evident from this reproducer: nix eval --expr 'builtins.fetchurl { url = "https://example.com"; sha256 = ""; }' --json --eval-store "dummy://?read-only=false" error: … while calling the 'fetchurl' builtin at «string»:1:1: 1| builtins.fetchurl { url = "https://example.com"; sha256 = ""; } | ^ error: opening file '/nix/store/r4f87yrl98f2m6v9z8ai2rbg4qwlcakq-example.com': No such file or directory --- src/libexpr/primops/fetchTree.cc | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/libexpr/primops/fetchTree.cc b/src/libexpr/primops/fetchTree.cc index 48c03f177..ad76af5b5 100644 --- a/src/libexpr/primops/fetchTree.cc +++ b/src/libexpr/primops/fetchTree.cc @@ -588,7 +588,11 @@ static void fetch( if (expectedHash) { auto hash = unpack ? state.store->queryPathInfo(storePath)->narHash - : hashFile(HashAlgorithm::SHA256, state.store->toRealPath(storePath)); + : hashPath( + {state.store->requireStoreObjectAccessor(storePath)}, + FileSerialisationMethod::Flat, + HashAlgorithm::SHA256) + .hash; if (hash != *expectedHash) { state .error( From 0347958dd2c53763146e0227a1fbf6ffaa3d2c86 Mon Sep 17 00:00:00 2001 From: Sergei Zimmerman Date: Wed, 15 Oct 2025 00:48:39 +0300 Subject: [PATCH 327/332] nix/develop: Remove usage of toRealPath, replace with SourceAccessor --- src/nix/develop.cc | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/nix/develop.cc b/src/nix/develop.cc index f78eee59a..28d0a7080 100644 --- a/src/nix/develop.cc +++ b/src/nix/develop.cc @@ -299,11 +299,9 @@ static StorePath getDerivationEnvironment(ref store, ref evalStore for (auto & [_0, optPath] : evalStore->queryPartialDerivationOutputMap(shellDrvPath)) { assert(optPath); - auto & outPath = *optPath; - assert(store->isValidPath(outPath)); - auto outPathS = store->toRealPath(outPath); - if (lstat(outPathS).st_size) - return outPath; + auto accessor = evalStore->requireStoreObjectAccessor(*optPath); + if (auto st = accessor->maybeLstat(CanonPath::root); st && st->fileSize.value_or(0)) + return *optPath; } throw Error("get-env.sh failed to produce an environment"); @@ -502,7 +500,9 @@ struct Common : InstallableCommand, MixProfile debug("reading environment file '%s'", strPath); - return {BuildEnvironment::parseJSON(readFile(store->toRealPath(shellOutPath))), strPath}; + return { + BuildEnvironment::parseJSON(store->requireStoreObjectAccessor(shellOutPath)->readFile(CanonPath::root)), + strPath}; } }; From 092639709f8cfa6ee2b896bb560ae1b37dfe81cf Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Tue, 14 Oct 2025 19:25:06 -0700 Subject: [PATCH 328/332] Remove duplicate shellcheck in dev-shell.nix --- packaging/dev-shell.nix | 1 - 1 file changed, 1 deletion(-) diff --git a/packaging/dev-shell.nix b/packaging/dev-shell.nix index 37e92e363..bfa219d2d 100644 --- a/packaging/dev-shell.nix +++ b/packaging/dev-shell.nix @@ -118,7 +118,6 @@ pkgs.nixComponents2.nix-util.overrideAttrs ( ++ [ pkgs.buildPackages.cmake pkgs.buildPackages.gnused - pkgs.buildPackages.shellcheck pkgs.buildPackages.changelog-d modular.pre-commit.settings.package (pkgs.writeScriptBin "pre-commit-hooks-install" modular.pre-commit.settings.installationScript) From 7bc3d9b9a9ba1f84a5b6b631143276be767234a4 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Tue, 14 Oct 2025 22:53:13 -0400 Subject: [PATCH 329/332] First attempt at uwyu for libflake --- packaging/dev-shell.nix | 1 + src/libflake/config.cc | 27 +++++++++- src/libflake/flake-primops.cc | 26 +++++++++ src/libflake/flake.cc | 52 ++++++++++++++++-- src/libflake/flakeref.cc | 20 ++++++- .../include/nix/flake/flake-primops.hh | 6 +++ src/libflake/include/nix/flake/flakeref.hh | 14 +++++ src/libflake/include/nix/flake/settings.hh | 7 ++- src/libflake/include/nix/flake/url-name.hh | 4 ++ src/libflake/lockfile.cc | 54 +++++++++++++++---- src/libflake/settings.cc | 4 ++ src/libflake/url-name.cc | 6 ++- 12 files changed, 201 insertions(+), 20 deletions(-) diff --git a/packaging/dev-shell.nix b/packaging/dev-shell.nix index 37e92e363..7eec45bfb 100644 --- a/packaging/dev-shell.nix +++ b/packaging/dev-shell.nix @@ -124,6 +124,7 @@ pkgs.nixComponents2.nix-util.overrideAttrs ( (pkgs.writeScriptBin "pre-commit-hooks-install" modular.pre-commit.settings.installationScript) pkgs.buildPackages.nixfmt-rfc-style pkgs.buildPackages.shellcheck + pkgs.buildPackages.include-what-you-use pkgs.buildPackages.gdb ] ++ lib.optional (stdenv.cc.isClang && stdenv.hostPlatform == stdenv.buildPlatform) ( diff --git a/src/libflake/config.cc b/src/libflake/config.cc index c9071f601..08e6ff038 100644 --- a/src/libflake/config.cc +++ b/src/libflake/config.cc @@ -1,9 +1,32 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + #include "nix/util/users.hh" #include "nix/util/config-global.hh" #include "nix/flake/settings.hh" #include "nix/flake/flake.hh" - -#include +#include "nix/util/ansicolor.hh" +#include "nix/util/configuration.hh" +#include "nix/util/file-system.hh" +#include "nix/util/fmt.hh" +#include "nix/util/logging.hh" +#include "nix/util/strings.hh" +#include "nix/util/types.hh" +#include "nix/util/util.hh" namespace nix::flake { diff --git a/src/libflake/flake-primops.cc b/src/libflake/flake-primops.cc index 7c5ce01b2..eeff9a966 100644 --- a/src/libflake/flake-primops.cc +++ b/src/libflake/flake-primops.cc @@ -1,8 +1,34 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + #include "nix/flake/flake-primops.hh" #include "nix/expr/eval.hh" #include "nix/flake/flake.hh" #include "nix/flake/flakeref.hh" #include "nix/flake/settings.hh" +#include "nix/expr/attr-set.hh" +#include "nix/expr/eval-error.hh" +#include "nix/expr/eval-inline.hh" +#include "nix/expr/eval-settings.hh" +#include "nix/expr/symbol-table.hh" +#include "nix/expr/value.hh" +#include "nix/fetchers/attrs.hh" +#include "nix/fetchers/fetchers.hh" +#include "nix/util/configuration.hh" +#include "nix/util/error.hh" +#include "nix/util/experimental-features.hh" +#include "nix/util/pos-idx.hh" +#include "nix/util/pos-table.hh" +#include "nix/util/source-path.hh" +#include "nix/util/types.hh" +#include "nix/util/util.hh" namespace nix::flake::primops { diff --git a/src/libflake/flake.cc b/src/libflake/flake.cc index 147bff820..ae93f2f39 100644 --- a/src/libflake/flake.cc +++ b/src/libflake/flake.cc @@ -1,3 +1,26 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + #include "nix/util/terminal.hh" #include "nix/util/ref.hh" #include "nix/util/environment-variables.hh" @@ -6,7 +29,6 @@ #include "nix/expr/eval-cache.hh" #include "nix/expr/eval-settings.hh" #include "nix/flake/lockfile.hh" -#include "nix/expr/primops.hh" #include "nix/expr/eval-inline.hh" #include "nix/store/store-api.hh" #include "nix/fetchers/fetchers.hh" @@ -14,14 +36,36 @@ #include "nix/fetchers/fetch-settings.hh" #include "nix/flake/settings.hh" #include "nix/expr/value-to-json.hh" -#include "nix/store/local-fs-store.hh" #include "nix/fetchers/fetch-to-store.hh" #include "nix/util/memory-source-accessor.hh" #include "nix/fetchers/input-cache.hh" - -#include +#include "nix/expr/attr-set.hh" +#include "nix/expr/eval-error.hh" +#include "nix/expr/nixexpr.hh" +#include "nix/expr/symbol-table.hh" +#include "nix/expr/value.hh" +#include "nix/expr/value/context.hh" +#include "nix/fetchers/attrs.hh" +#include "nix/fetchers/registry.hh" +#include "nix/flake/flakeref.hh" +#include "nix/store/path.hh" +#include "nix/util/canon-path.hh" +#include "nix/util/configuration.hh" +#include "nix/util/error.hh" +#include "nix/util/experimental-features.hh" +#include "nix/util/file-system.hh" +#include "nix/util/fmt.hh" +#include "nix/util/hash.hh" +#include "nix/util/logging.hh" +#include "nix/util/pos-idx.hh" +#include "nix/util/pos-table.hh" +#include "nix/util/position.hh" +#include "nix/util/source-path.hh" +#include "nix/util/types.hh" +#include "nix/util/util.hh" namespace nix { +struct SourceAccessor; using namespace flake; diff --git a/src/libflake/flakeref.cc b/src/libflake/flakeref.cc index 38979783d..a3448c88d 100644 --- a/src/libflake/flakeref.cc +++ b/src/libflake/flakeref.cc @@ -1,10 +1,28 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + #include "nix/flake/flakeref.hh" -#include "nix/store/store-api.hh" #include "nix/util/url.hh" #include "nix/util/url-parts.hh" #include "nix/fetchers/fetchers.hh" +#include "nix/util/error.hh" +#include "nix/util/file-system.hh" +#include "nix/util/fmt.hh" +#include "nix/util/logging.hh" +#include "nix/util/strings.hh" +#include "nix/util/util.hh" namespace nix { +namespace fetchers { +struct Settings; +} // namespace fetchers #if 0 // 'dir' path elements cannot start with a '.'. We also reject diff --git a/src/libflake/include/nix/flake/flake-primops.hh b/src/libflake/include/nix/flake/flake-primops.hh index 35a7128f4..57a5e3422 100644 --- a/src/libflake/include/nix/flake/flake-primops.hh +++ b/src/libflake/include/nix/flake/flake-primops.hh @@ -3,6 +3,12 @@ #include "nix/expr/eval.hh" #include "nix/flake/settings.hh" +namespace nix { +namespace flake { +struct Settings; +} // namespace flake +} // namespace nix + namespace nix::flake::primops { /** diff --git a/src/libflake/include/nix/flake/flakeref.hh b/src/libflake/include/nix/flake/flakeref.hh index c8c536bce..7a26382a7 100644 --- a/src/libflake/include/nix/flake/flakeref.hh +++ b/src/libflake/include/nix/flake/flakeref.hh @@ -1,16 +1,30 @@ #pragma once ///@file +#include #include +#include +#include +#include +#include +#include +#include +#include #include "nix/util/types.hh" #include "nix/fetchers/fetchers.hh" #include "nix/store/outputs-spec.hh" #include "nix/fetchers/registry.hh" +#include "nix/fetchers/attrs.hh" +#include "nix/util/ref.hh" +#include "nix/util/source-accessor.hh" namespace nix { class Store; +namespace fetchers { +struct Settings; +} // namespace fetchers typedef std::string FlakeId; diff --git a/src/libflake/include/nix/flake/settings.hh b/src/libflake/include/nix/flake/settings.hh index 618ed4d38..7e5d18746 100644 --- a/src/libflake/include/nix/flake/settings.hh +++ b/src/libflake/include/nix/flake/settings.hh @@ -1,9 +1,12 @@ #pragma once ///@file -#include "nix/util/configuration.hh" - #include +#include +#include + +#include "nix/util/configuration.hh" +#include "nix/util/experimental-features.hh" namespace nix { // Forward declarations diff --git a/src/libflake/include/nix/flake/url-name.hh b/src/libflake/include/nix/flake/url-name.hh index b95d2dff6..0c79b74aa 100644 --- a/src/libflake/include/nix/flake/url-name.hh +++ b/src/libflake/include/nix/flake/url-name.hh @@ -1,9 +1,13 @@ +#include +#include + #include "nix/util/url.hh" #include "nix/util/url-parts.hh" #include "nix/util/util.hh" #include "nix/util/split.hh" namespace nix { +struct ParsedURL; /** * Try to extract a reasonably unique and meaningful, human-readable diff --git a/src/libflake/lockfile.cc b/src/libflake/lockfile.cc index f381a57e6..421f872cc 100644 --- a/src/libflake/lockfile.cc +++ b/src/libflake/lockfile.cc @@ -1,15 +1,51 @@ -#include "nix/fetchers/fetch-settings.hh" -#include "nix/flake/settings.hh" -#include "nix/flake/lockfile.hh" -#include "nix/store/store-api.hh" -#include "nix/util/strings.hh" - +#include +#include +#include +#include +#include +#include +#include +#include +#include #include #include - -#include #include -#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "nix/fetchers/fetch-settings.hh" +#include "nix/flake/lockfile.hh" +#include "nix/util/strings.hh" +#include "nix/fetchers/attrs.hh" +#include "nix/fetchers/fetchers.hh" +#include "nix/flake/flakeref.hh" +#include "nix/store/path.hh" +#include "nix/util/ansicolor.hh" +#include "nix/util/configuration.hh" +#include "nix/util/error.hh" +#include "nix/util/fmt.hh" +#include "nix/util/hash.hh" +#include "nix/util/logging.hh" +#include "nix/util/ref.hh" +#include "nix/util/types.hh" +#include "nix/util/util.hh" + +namespace nix { +class Store; +} // namespace nix namespace nix::flake { diff --git a/src/libflake/settings.cc b/src/libflake/settings.cc index e77bded30..52fa1b49d 100644 --- a/src/libflake/settings.cc +++ b/src/libflake/settings.cc @@ -1,5 +1,9 @@ +#include + #include "nix/flake/settings.hh" #include "nix/flake/flake-primops.hh" +#include "nix/expr/eval-settings.hh" +#include "nix/expr/eval.hh" namespace nix::flake { diff --git a/src/libflake/url-name.cc b/src/libflake/url-name.cc index 3bba3692e..a63b107c3 100644 --- a/src/libflake/url-name.cc +++ b/src/libflake/url-name.cc @@ -1,6 +1,8 @@ -#include "nix/flake/url-name.hh" #include -#include + +#include "nix/flake/url-name.hh" +#include "nix/util/strings.hh" +#include "nix/util/url.hh" namespace nix { From 902faf4fe5d0d0b8947f0001c66c4d67e5282e08 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Tue, 14 Oct 2025 23:20:35 -0400 Subject: [PATCH 330/332] More fixes for iwyu --- src/libflake/config.cc | 4 +--- src/libflake/flake.cc | 6 ++---- src/libflake/flakeref.cc | 15 +++++++++++++-- src/libflake/lockfile.cc | 2 -- src/libflake/url-name.cc | 2 ++ 5 files changed, 18 insertions(+), 11 deletions(-) diff --git a/src/libflake/config.cc b/src/libflake/config.cc index 08e6ff038..c248ed0a6 100644 --- a/src/libflake/config.cc +++ b/src/libflake/config.cc @@ -1,19 +1,17 @@ #include #include #include -#include -#include #include #include #include #include #include #include -#include #include #include #include #include +#include #include "nix/util/users.hh" #include "nix/util/config-global.hh" diff --git a/src/libflake/flake.cc b/src/libflake/flake.cc index ae93f2f39..8e7e2be26 100644 --- a/src/libflake/flake.cc +++ b/src/libflake/flake.cc @@ -3,8 +3,6 @@ #include #include #include -#include -#include #include #include #include @@ -12,7 +10,6 @@ #include #include #include -#include #include #include #include @@ -20,6 +17,7 @@ #include #include #include +#include #include "nix/util/terminal.hh" #include "nix/util/ref.hh" @@ -909,7 +907,7 @@ static ref makeInternalFS() internalFS->setPathDisplay("«flakes-internal»", ""); internalFS->addFile( CanonPath("call-flake.nix"), -#include "call-flake.nix.gen.hh" +#include "call-flake.nix.gen.hh" // IWYU pragma: keep ); return internalFS; } diff --git a/src/libflake/flakeref.cc b/src/libflake/flakeref.cc index a3448c88d..b4a5c106e 100644 --- a/src/libflake/flakeref.cc +++ b/src/libflake/flakeref.cc @@ -1,12 +1,15 @@ #include #include #include -#include -#include #include #include #include #include +#include +#include +#include +#include +#include #include "nix/flake/flakeref.hh" #include "nix/util/url.hh" @@ -18,8 +21,16 @@ #include "nix/util/logging.hh" #include "nix/util/strings.hh" #include "nix/util/util.hh" +#include "nix/fetchers/attrs.hh" +#include "nix/fetchers/registry.hh" +#include "nix/store/outputs-spec.hh" +#include "nix/util/ref.hh" +#include "nix/util/types.hh" namespace nix { +class Store; +struct SourceAccessor; + namespace fetchers { struct Settings; } // namespace fetchers diff --git a/src/libflake/lockfile.cc b/src/libflake/lockfile.cc index 421f872cc..fbf17a383 100644 --- a/src/libflake/lockfile.cc +++ b/src/libflake/lockfile.cc @@ -1,8 +1,6 @@ #include #include #include -#include -#include #include #include #include diff --git a/src/libflake/url-name.cc b/src/libflake/url-name.cc index a63b107c3..f4b5c6a7f 100644 --- a/src/libflake/url-name.cc +++ b/src/libflake/url-name.cc @@ -1,4 +1,6 @@ #include +#include +#include #include "nix/flake/url-name.hh" #include "nix/util/strings.hh" From e8b126fa909e9745cbc0f4cdcc99a2a5d05258d4 Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Tue, 14 Oct 2025 23:43:33 -0400 Subject: [PATCH 331/332] Remove unecessary includes --- src/libflake-tests/flakeref.cc | 7 +++++++ src/libflake-tests/nix_api_flake.cc | 12 +++++++----- src/libflake-tests/url-name.cc | 4 +++- src/libflake/include/nix/flake/flake-primops.hh | 1 - src/libflake/include/nix/flake/flakeref.hh | 9 --------- src/libflake/include/nix/flake/settings.hh | 2 -- src/libflake/include/nix/flake/url-name.hh | 5 ----- 7 files changed, 17 insertions(+), 23 deletions(-) diff --git a/src/libflake-tests/flakeref.cc b/src/libflake-tests/flakeref.cc index e2cb91bb8..34d281c52 100644 --- a/src/libflake-tests/flakeref.cc +++ b/src/libflake-tests/flakeref.cc @@ -1,8 +1,15 @@ #include +#include +#include +#include #include "nix/fetchers/fetch-settings.hh" #include "nix/flake/flakeref.hh" #include "nix/fetchers/attrs.hh" +#include "nix/fetchers/fetchers.hh" +#include "nix/util/configuration.hh" +#include "nix/util/error.hh" +#include "nix/util/experimental-features.hh" namespace nix { diff --git a/src/libflake-tests/nix_api_flake.cc b/src/libflake-tests/nix_api_flake.cc index f7e0cb719..da7f01401 100644 --- a/src/libflake-tests/nix_api_flake.cc +++ b/src/libflake-tests/nix_api_flake.cc @@ -1,15 +1,17 @@ +#include +#include +#include + #include "nix/util/file-system.hh" #include "nix_api_store.h" #include "nix_api_util.h" #include "nix_api_expr.h" #include "nix_api_value.h" #include "nix_api_flake.h" - -#include "nix/expr/tests/nix_api_expr.hh" #include "nix/util/tests/string_callback.hh" - -#include -#include +#include "nix/store/tests/nix_api_store.hh" +#include "nix/util/tests/nix_api_util.hh" +#include "nix_api_fetchers.h" namespace nixC { diff --git a/src/libflake-tests/url-name.cc b/src/libflake-tests/url-name.cc index 81ba516c8..64cbe5c9d 100644 --- a/src/libflake-tests/url-name.cc +++ b/src/libflake-tests/url-name.cc @@ -1,6 +1,8 @@ -#include "nix/flake/url-name.hh" #include +#include "nix/flake/url-name.hh" +#include "nix/util/url.hh" + namespace nix { /* ----------- tests for url-name.hh --------------------------------------------------*/ diff --git a/src/libflake/include/nix/flake/flake-primops.hh b/src/libflake/include/nix/flake/flake-primops.hh index 57a5e3422..a2a3d1612 100644 --- a/src/libflake/include/nix/flake/flake-primops.hh +++ b/src/libflake/include/nix/flake/flake-primops.hh @@ -1,7 +1,6 @@ #pragma once #include "nix/expr/eval.hh" -#include "nix/flake/settings.hh" namespace nix { namespace flake { diff --git a/src/libflake/include/nix/flake/flakeref.hh b/src/libflake/include/nix/flake/flakeref.hh index 7a26382a7..65a2dfed5 100644 --- a/src/libflake/include/nix/flake/flakeref.hh +++ b/src/libflake/include/nix/flake/flakeref.hh @@ -1,23 +1,14 @@ #pragma once ///@file -#include #include -#include #include -#include #include #include #include -#include -#include "nix/util/types.hh" -#include "nix/fetchers/fetchers.hh" #include "nix/store/outputs-spec.hh" #include "nix/fetchers/registry.hh" -#include "nix/fetchers/attrs.hh" -#include "nix/util/ref.hh" -#include "nix/util/source-accessor.hh" namespace nix { diff --git a/src/libflake/include/nix/flake/settings.hh b/src/libflake/include/nix/flake/settings.hh index 7e5d18746..7187a3294 100644 --- a/src/libflake/include/nix/flake/settings.hh +++ b/src/libflake/include/nix/flake/settings.hh @@ -2,11 +2,9 @@ ///@file #include -#include #include #include "nix/util/configuration.hh" -#include "nix/util/experimental-features.hh" namespace nix { // Forward declarations diff --git a/src/libflake/include/nix/flake/url-name.hh b/src/libflake/include/nix/flake/url-name.hh index 0c79b74aa..d313db33b 100644 --- a/src/libflake/include/nix/flake/url-name.hh +++ b/src/libflake/include/nix/flake/url-name.hh @@ -1,11 +1,6 @@ #include #include -#include "nix/util/url.hh" -#include "nix/util/url-parts.hh" -#include "nix/util/util.hh" -#include "nix/util/split.hh" - namespace nix { struct ParsedURL; From 01a8499d2f7baede36827cc6138468329757551f Mon Sep 17 00:00:00 2001 From: Farid Zakaria Date: Tue, 14 Oct 2025 23:51:40 -0400 Subject: [PATCH 332/332] Format cpp files --- src/libflake/flakeref.cc | 2 +- src/libflake/include/nix/flake/flake-primops.hh | 4 ++-- src/libflake/include/nix/flake/flakeref.hh | 3 ++- src/libflake/lockfile.cc | 2 +- 4 files changed, 6 insertions(+), 5 deletions(-) diff --git a/src/libflake/flakeref.cc b/src/libflake/flakeref.cc index b4a5c106e..a26f269c3 100644 --- a/src/libflake/flakeref.cc +++ b/src/libflake/flakeref.cc @@ -33,7 +33,7 @@ struct SourceAccessor; namespace fetchers { struct Settings; -} // namespace fetchers +} // namespace fetchers #if 0 // 'dir' path elements cannot start with a '.'. We also reject diff --git a/src/libflake/include/nix/flake/flake-primops.hh b/src/libflake/include/nix/flake/flake-primops.hh index a2a3d1612..b333e33d7 100644 --- a/src/libflake/include/nix/flake/flake-primops.hh +++ b/src/libflake/include/nix/flake/flake-primops.hh @@ -5,8 +5,8 @@ namespace nix { namespace flake { struct Settings; -} // namespace flake -} // namespace nix +} // namespace flake +} // namespace nix namespace nix::flake::primops { diff --git a/src/libflake/include/nix/flake/flakeref.hh b/src/libflake/include/nix/flake/flakeref.hh index 65a2dfed5..1af8c5afd 100644 --- a/src/libflake/include/nix/flake/flakeref.hh +++ b/src/libflake/include/nix/flake/flakeref.hh @@ -13,9 +13,10 @@ namespace nix { class Store; + namespace fetchers { struct Settings; -} // namespace fetchers +} // namespace fetchers typedef std::string FlakeId; diff --git a/src/libflake/lockfile.cc b/src/libflake/lockfile.cc index fbf17a383..d3dac19c5 100644 --- a/src/libflake/lockfile.cc +++ b/src/libflake/lockfile.cc @@ -43,7 +43,7 @@ namespace nix { class Store; -} // namespace nix +} // namespace nix namespace nix::flake {