diff --git a/.coderabbit.yaml b/.coderabbit.yaml new file mode 100644 index 000000000..00244700a --- /dev/null +++ b/.coderabbit.yaml @@ -0,0 +1,14 @@ +# Disable CodeRabbit auto-review to prevent verbose comments on PRs. +# When enabled: false, CodeRabbit won't attempt reviews and won't post +# "Review skipped" or other automated comments. +reviews: + auto_review: + enabled: false + review_status: false + high_level_summary: false + poem: false + sequence_diagrams: false + changed_files_summary: false + tools: + github-checks: + enabled: false diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index c6843d86f..c155bf8bf 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -15,6 +15,10 @@ so you understand the process and the expectations. - volunteering contributions effectively - how to get help and our review process. +PR stuck in review? We have two Nix team meetings per week online that are open for everyone in a jitsi conference: + +- https://calendar.google.com/calendar/u/0/embed?src=b9o52fobqjak8oq8lfkhg3t0qg@group.calendar.google.com + --> ## Motivation diff --git a/.github/actions/install-nix-action/action.yaml b/.github/actions/install-nix-action/action.yaml index c299b3956..00d02d6a2 100644 --- a/.github/actions/install-nix-action/action.yaml +++ b/.github/actions/install-nix-action/action.yaml @@ -4,15 +4,29 @@ inputs: dogfood: description: "Whether to use Nix installed from the latest artifact from master branch" required: true # Be explicit about the fact that we are using unreleased artifacts + experimental-installer: + description: "Whether to use the experimental installer to install Nix" + default: false + experimental-installer-version: + description: "Version of the experimental installer to use. If `latest`, the newest artifact from the default branch is used." + # TODO: This should probably be pinned to a release after https://github.com/NixOS/experimental-nix-installer/pull/49 lands in one + default: "latest" extra_nix_config: description: "Gets appended to `/etc/nix/nix.conf` if passed." install_url: description: "URL of the Nix installer" required: false - default: "https://releases.nixos.org/nix/nix-2.30.2/install" + default: "https://releases.nixos.org/nix/nix-2.32.1/install" + tarball_url: + description: "URL of the Nix tarball to use with the experimental installer" + required: false github_token: description: "Github token" required: true + use_cache: + description: "Whether to setup magic-nix-cache" + default: true + required: false runs: using: "composite" steps: @@ -37,14 +51,81 @@ runs: gh run download "$RUN_ID" --repo "$DOGFOOD_REPO" -n "$INSTALLER_ARTIFACT" -D "$INSTALLER_DOWNLOAD_DIR" echo "installer-path=file://$INSTALLER_DOWNLOAD_DIR" >> "$GITHUB_OUTPUT" + TARBALL_PATH="$(find "$INSTALLER_DOWNLOAD_DIR" -name 'nix*.tar.xz' -print | head -n 1)" + echo "tarball-path=file://$TARBALL_PATH" >> "$GITHUB_OUTPUT" echo "::notice ::Dogfooding Nix installer from master (https://github.com/$DOGFOOD_REPO/actions/runs/$RUN_ID)" env: GH_TOKEN: ${{ inputs.github_token }} DOGFOOD_REPO: "NixOS/nix" + - name: "Gather system info for experimental installer" + shell: bash + if: ${{ inputs.experimental-installer == 'true' }} + run: | + echo "::notice Using experimental installer from $EXPERIMENTAL_INSTALLER_REPO (https://github.com/$EXPERIMENTAL_INSTALLER_REPO)" + + if [ "$RUNNER_OS" == "Linux" ]; then + EXPERIMENTAL_INSTALLER_SYSTEM="linux" + echo "EXPERIMENTAL_INSTALLER_SYSTEM=$EXPERIMENTAL_INSTALLER_SYSTEM" >> "$GITHUB_ENV" + elif [ "$RUNNER_OS" == "macOS" ]; then + EXPERIMENTAL_INSTALLER_SYSTEM="darwin" + echo "EXPERIMENTAL_INSTALLER_SYSTEM=$EXPERIMENTAL_INSTALLER_SYSTEM" >> "$GITHUB_ENV" + else + echo "::error ::Unsupported RUNNER_OS: $RUNNER_OS" + exit 1 + fi + + if [ "$RUNNER_ARCH" == "X64" ]; then + EXPERIMENTAL_INSTALLER_ARCH=x86_64 + echo "EXPERIMENTAL_INSTALLER_ARCH=$EXPERIMENTAL_INSTALLER_ARCH" >> "$GITHUB_ENV" + elif [ "$RUNNER_ARCH" == "ARM64" ]; then + EXPERIMENTAL_INSTALLER_ARCH=aarch64 + echo "EXPERIMENTAL_INSTALLER_ARCH=$EXPERIMENTAL_INSTALLER_ARCH" >> "$GITHUB_ENV" + else + echo "::error ::Unsupported RUNNER_ARCH: $RUNNER_ARCH" + exit 1 + fi + + echo "EXPERIMENTAL_INSTALLER_ARTIFACT=nix-installer-$EXPERIMENTAL_INSTALLER_ARCH-$EXPERIMENTAL_INSTALLER_SYSTEM" >> "$GITHUB_ENV" + env: + EXPERIMENTAL_INSTALLER_REPO: "NixOS/experimental-nix-installer" + - name: "Download latest experimental installer" + shell: bash + id: download-latest-experimental-installer + if: ${{ inputs.experimental-installer == 'true' && inputs.experimental-installer-version == 'latest' }} + run: | + RUN_ID=$(gh run list --repo "$EXPERIMENTAL_INSTALLER_REPO" --workflow ci.yml --branch main --status success --json databaseId --jq ".[0].databaseId") + + EXPERIMENTAL_INSTALLER_DOWNLOAD_DIR="$GITHUB_WORKSPACE/$EXPERIMENTAL_INSTALLER_ARTIFACT" + mkdir -p "$EXPERIMENTAL_INSTALLER_DOWNLOAD_DIR" + + gh run download "$RUN_ID" --repo "$EXPERIMENTAL_INSTALLER_REPO" -n "$EXPERIMENTAL_INSTALLER_ARTIFACT" -D "$EXPERIMENTAL_INSTALLER_DOWNLOAD_DIR" + # Executable permissions are lost in artifacts + find $EXPERIMENTAL_INSTALLER_DOWNLOAD_DIR -type f -exec chmod +x {} + + echo "installer-path=$EXPERIMENTAL_INSTALLER_DOWNLOAD_DIR" >> "$GITHUB_OUTPUT" + env: + GH_TOKEN: ${{ inputs.github_token }} + EXPERIMENTAL_INSTALLER_REPO: "NixOS/experimental-nix-installer" - uses: cachix/install-nix-action@c134e4c9e34bac6cab09cf239815f9339aaaf84e # v31.5.1 + if: ${{ inputs.experimental-installer != 'true' }} with: # Ternary operator in GHA: https://www.github.com/actions/runner/issues/409#issuecomment-752775072 install_url: ${{ inputs.dogfood == 'true' && format('{0}/install', steps.download-nix-installer.outputs.installer-path) || inputs.install_url }} install_options: ${{ inputs.dogfood == 'true' && format('--tarball-url-prefix {0}', steps.download-nix-installer.outputs.installer-path) || '' }} extra_nix_config: ${{ inputs.extra_nix_config }} + - uses: DeterminateSystems/nix-installer-action@786fff0690178f1234e4e1fe9b536e94f5433196 # v20 + if: ${{ inputs.experimental-installer == 'true' }} + with: + diagnostic-endpoint: "" + # TODO: It'd be nice to use `artifacts.nixos.org` for both of these, maybe through an `/experimental-installer/latest` endpoint? or `/commit/`? + local-root: ${{ inputs.experimental-installer-version == 'latest' && steps.download-latest-experimental-installer.outputs.installer-path || '' }} + source-url: ${{ inputs.experimental-installer-version != 'latest' && 'https://artifacts.nixos.org/experimental-installer/tag/${{ inputs.experimental-installer-version }}/${{ env.EXPERIMENTAL_INSTALLER_ARTIFACT }}' || '' }} + nix-package-url: ${{ inputs.dogfood == 'true' && steps.download-nix-installer.outputs.tarball-path || (inputs.tarball_url || '') }} + extra-conf: ${{ inputs.extra_nix_config }} + - uses: DeterminateSystems/magic-nix-cache-action@565684385bcd71bad329742eefe8d12f2e765b39 # v13 + if: ${{ inputs.use_cache == 'true' }} + with: + diagnostic-endpoint: '' + use-flakehub: false + use-gha-cache: true + source-revision: 92d9581367be2233c2d5714a2640e1339f4087d8 # main diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml new file mode 100644 index 000000000..1413a203c --- /dev/null +++ b/.github/workflows/backport.yml @@ -0,0 +1,37 @@ +name: Backport +on: + pull_request_target: + types: [closed, labeled] +permissions: + contents: read +jobs: + backport: + name: Backport Pull Request + permissions: + # for korthout/backport-action + contents: write + pull-requests: write + if: github.repository_owner == 'NixOS' && github.event.pull_request.merged == true && (github.event_name != 'labeled' || startsWith('backport', github.event.label.name)) + runs-on: ubuntu-24.04-arm + steps: + - name: Generate GitHub App token + id: generate-token + uses: actions/create-github-app-token@v2 + with: + app-id: ${{ vars.CI_APP_ID }} + private-key: ${{ secrets.CI_APP_PRIVATE_KEY }} + - uses: actions/checkout@v5 + with: + ref: ${{ github.event.pull_request.head.sha }} + # required to find all branches + fetch-depth: 0 + - name: Create backport PRs + uses: korthout/backport-action@d07416681cab29bf2661702f925f020aaa962997 # v3.4.1 + id: backport + with: + # Config README: https://github.com/korthout/backport-action#backport-action + github_token: ${{ steps.generate-token.outputs.token }} + github_workspace: ${{ github.workspace }} + auto_merge_enabled: true + pull_description: |- + Automatic backport to `${target_branch}`, triggered by a label in #${pull_number}. diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index dcf0814d8..67e97b188 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -4,6 +4,8 @@ on: pull_request: merge_group: push: + branches: + - master workflow_dispatch: inputs: dogfood: @@ -12,6 +14,10 @@ on: default: true type: boolean +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + permissions: read-all jobs: @@ -27,6 +33,7 @@ jobs: extra_nix_config: experimental-features = nix-command flakes github_token: ${{ secrets.GITHUB_TOKEN }} + use_cache: false - run: nix flake show --all-systems --json pre-commit-checks: @@ -39,7 +46,6 @@ jobs: dogfood: ${{ github.event_name == 'workflow_dispatch' && inputs.dogfood || github.event_name != 'workflow_dispatch' }} extra_nix_config: experimental-features = nix-command flakes github_token: ${{ secrets.GITHUB_TOKEN }} - - uses: DeterminateSystems/magic-nix-cache-action@main - run: ./ci/gha/tests/pre-commit-checks basic-checks: @@ -90,7 +96,6 @@ jobs: dogfood: ${{ github.event_name == 'workflow_dispatch' && inputs.dogfood || github.event_name != 'workflow_dispatch' }} # The sandbox would otherwise be disabled by default on Darwin extra_nix_config: "sandbox = true" - - uses: DeterminateSystems/magic-nix-cache-action@main # Since ubuntu 22.30, unprivileged usernamespaces are no longer allowed to map to the root user: # https://ubuntu.com/blog/ubuntu-23-10-restricted-unprivileged-user-namespaces - run: sudo sysctl -w kernel.apparmor_restrict_unprivileged_userns=0 @@ -100,6 +105,12 @@ jobs: nix build --file ci/gha/tests/wrapper.nix componentTests -L \ --arg withInstrumentation ${{ matrix.instrumented }} \ --argstr stdenv "${{ matrix.stdenv }}" + - name: Run VM tests + run: | + nix build --file ci/gha/tests/wrapper.nix vmTests -L \ + --arg withInstrumentation ${{ matrix.instrumented }} \ + --argstr stdenv "${{ matrix.stdenv }}" + if: ${{ matrix.os == 'linux' }} - name: Run flake checks and prepare the installer tarball run: | ci/gha/tests/build-checks @@ -135,9 +146,19 @@ jobs: - scenario: on ubuntu runs-on: ubuntu-24.04 os: linux + experimental-installer: false - scenario: on macos runs-on: macos-14 os: darwin + experimental-installer: false + - scenario: on ubuntu (experimental) + runs-on: ubuntu-24.04 + os: linux + experimental-installer: true + - scenario: on macos (experimental) + runs-on: macos-14 + os: darwin + experimental-installer: true name: installer test ${{ matrix.scenario }} runs-on: ${{ matrix.runs-on }} steps: @@ -149,11 +170,22 @@ jobs: path: out - name: Looking up the installer tarball URL id: installer-tarball-url - run: echo "installer-url=file://$GITHUB_WORKSPACE/out" >> "$GITHUB_OUTPUT" - - uses: cachix/install-nix-action@v31 + run: | + echo "installer-url=file://$GITHUB_WORKSPACE/out" >> "$GITHUB_OUTPUT" + TARBALL_PATH="$(find "$GITHUB_WORKSPACE/out" -name 'nix*.tar.xz' -print | head -n 1)" + echo "tarball-path=file://$TARBALL_PATH" >> "$GITHUB_OUTPUT" + - uses: cachix/install-nix-action@c134e4c9e34bac6cab09cf239815f9339aaaf84e # v31.5.1 + if: ${{ !matrix.experimental-installer }} with: install_url: ${{ format('{0}/install', steps.installer-tarball-url.outputs.installer-url) }} install_options: ${{ format('--tarball-url-prefix {0}', steps.installer-tarball-url.outputs.installer-url) }} + - uses: ./.github/actions/install-nix-action + if: ${{ matrix.experimental-installer }} + with: + dogfood: false + experimental-installer: true + tarball_url: ${{ steps.installer-tarball-url.outputs.tarball-path }} + github_token: ${{ secrets.GITHUB_TOKEN }} - run: sudo apt install fish zsh if: matrix.os == 'linux' - run: brew install fish @@ -185,7 +217,7 @@ jobs: echo "docker=${{ env._DOCKER_SECRETS != '' }}" >> $GITHUB_OUTPUT docker_push_image: - needs: [tests, vm_tests, check_secrets] + needs: [tests, check_secrets] permissions: contents: read packages: write @@ -198,12 +230,13 @@ jobs: - uses: actions/checkout@v5 with: fetch-depth: 0 - - uses: cachix/install-nix-action@v31 + - uses: ./.github/actions/install-nix-action with: - install_url: https://releases.nixos.org/nix/nix-2.20.3/install - - uses: DeterminateSystems/magic-nix-cache-action@main - - run: echo NIX_VERSION="$(nix --experimental-features 'nix-command flakes' eval .\#nix.version | tr -d \")" >> $GITHUB_ENV - - run: nix --experimental-features 'nix-command flakes' build .#dockerImage -L + dogfood: false + extra_nix_config: | + experimental-features = flakes nix-command + - run: echo NIX_VERSION="$(nix eval .\#nix.version | tr -d \")" >> $GITHUB_ENV + - run: nix build .#dockerImage -L - run: docker load -i ./result/image.tar.gz - run: docker tag nix:$NIX_VERSION ${{ secrets.DOCKERHUB_USERNAME }}/nix:$NIX_VERSION - run: docker tag nix:$NIX_VERSION ${{ secrets.DOCKERHUB_USERNAME }}/nix:master @@ -238,28 +271,8 @@ jobs: docker tag nix:$NIX_VERSION $IMAGE_ID:master docker push $IMAGE_ID:master - vm_tests: - needs: basic-checks - runs-on: ubuntu-24.04 - steps: - - uses: actions/checkout@v5 - - uses: ./.github/actions/install-nix-action - with: - dogfood: ${{ github.event_name == 'workflow_dispatch' && inputs.dogfood || github.event_name != 'workflow_dispatch' }} - extra_nix_config: - experimental-features = nix-command flakes - github_token: ${{ secrets.GITHUB_TOKEN }} - - uses: DeterminateSystems/magic-nix-cache-action@main - - run: | - nix build -L \ - .#hydraJobs.tests.functional_user \ - .#hydraJobs.tests.githubFlakes \ - .#hydraJobs.tests.nix-docker \ - .#hydraJobs.tests.tarballFlakes \ - ; - flake_regressions: - needs: vm_tests + needs: tests runs-on: ubuntu-24.04 steps: - name: Checkout nix @@ -280,7 +293,6 @@ jobs: extra_nix_config: experimental-features = nix-command flakes github_token: ${{ secrets.GITHUB_TOKEN }} - - uses: DeterminateSystems/magic-nix-cache-action@main - run: nix build -L --out-link ./new-nix && PATH=$(pwd)/new-nix/bin:$PATH MAX_FLAKES=25 flake-regressions/eval-all.sh profile_build: @@ -301,7 +313,6 @@ jobs: extra_nix_config: | experimental-features = flakes nix-command ca-derivations impure-derivations max-jobs = 1 - - uses: DeterminateSystems/magic-nix-cache-action@main - run: | nix build -L --file ./ci/gha/profile-build buildTimeReport --out-link build-time-report.md cat build-time-report.md >> $GITHUB_STEP_SUMMARY diff --git a/.mergify.yml b/.mergify.yml deleted file mode 100644 index 1c220045a..000000000 --- a/.mergify.yml +++ /dev/null @@ -1,174 +0,0 @@ -queue_rules: - - name: default - # all required tests need to go here - merge_conditions: - - check-success=tests on macos - - check-success=tests on ubuntu - - check-success=installer test on macos - - check-success=installer test on ubuntu - - check-success=vm_tests - batch_size: 5 - -pull_request_rules: - - name: merge using the merge queue - conditions: - - base~=master|.+-maintenance - - label~=merge-queue|dependencies - actions: - queue: {} - -# The rules below will first create backport pull requests and put those in a merge queue. - - - name: backport patches to 2.18 - conditions: - - label=backport 2.18-maintenance - actions: - backport: - branches: - - 2.18-maintenance - labels: - - automatic backport - - merge-queue - - - name: backport patches to 2.19 - conditions: - - label=backport 2.19-maintenance - actions: - backport: - branches: - - 2.19-maintenance - labels: - - automatic backport - - merge-queue - - - name: backport patches to 2.20 - conditions: - - label=backport 2.20-maintenance - actions: - backport: - branches: - - 2.20-maintenance - labels: - - automatic backport - - merge-queue - - - name: backport patches to 2.21 - conditions: - - label=backport 2.21-maintenance - actions: - backport: - branches: - - 2.21-maintenance - labels: - - automatic backport - - merge-queue - - - name: backport patches to 2.22 - conditions: - - label=backport 2.22-maintenance - actions: - backport: - branches: - - 2.22-maintenance - labels: - - automatic backport - - merge-queue - - - name: backport patches to 2.23 - conditions: - - label=backport 2.23-maintenance - actions: - backport: - branches: - - 2.23-maintenance - labels: - - automatic backport - - merge-queue - - - name: backport patches to 2.24 - conditions: - - label=backport 2.24-maintenance - actions: - backport: - branches: - - "2.24-maintenance" - labels: - - automatic backport - - merge-queue - - - name: backport patches to 2.25 - conditions: - - label=backport 2.25-maintenance - actions: - backport: - branches: - - "2.25-maintenance" - labels: - - automatic backport - - merge-queue - - - name: backport patches to 2.26 - conditions: - - label=backport 2.26-maintenance - actions: - backport: - branches: - - "2.26-maintenance" - labels: - - automatic backport - - merge-queue - - - name: backport patches to 2.27 - conditions: - - label=backport 2.27-maintenance - actions: - backport: - branches: - - "2.27-maintenance" - labels: - - automatic backport - - merge-queue - - - name: backport patches to 2.28 - conditions: - - label=backport 2.28-maintenance - actions: - backport: - branches: - - "2.28-maintenance" - labels: - - automatic backport - - merge-queue - - - name: backport patches to 2.29 - conditions: - - label=backport 2.29-maintenance - actions: - backport: - branches: - - "2.29-maintenance" - labels: - - automatic backport - - merge-queue - - - name: backport patches to 2.30 - conditions: - - label=backport 2.30-maintenance - actions: - backport: - branches: - - "2.30-maintenance" - labels: - - automatic backport - - merge-queue - - - name: backport patches to 2.31 - conditions: - - label=backport 2.31-maintenance - actions: - backport: - branches: - - "2.31-maintenance" - labels: - - automatic backport - - merge-queue diff --git a/.version b/.version index 7cca401c7..3afbaeb2b 100644 --- a/.version +++ b/.version @@ -1 +1 @@ -2.32.0 +2.33.0 diff --git a/ci/gha/tests/default.nix b/ci/gha/tests/default.nix index b89d51c76..0c5c103bf 100644 --- a/ci/gha/tests/default.nix +++ b/ci/gha/tests/default.nix @@ -21,16 +21,6 @@ let packages' = nixFlake.packages.${system}; stdenv = (getStdenv pkgs); - enableSanitizersLayer = finalAttrs: prevAttrs: { - mesonFlags = - (prevAttrs.mesonFlags or [ ]) - ++ [ (lib.mesonOption "b_sanitize" "address,undefined") ] - ++ (lib.optionals stdenv.cc.isClang [ - # https://www.github.com/mesonbuild/meson/issues/764 - (lib.mesonBool "b_lundef" false) - ]); - }; - collectCoverageLayer = finalAttrs: prevAttrs: { env = let @@ -53,14 +43,15 @@ let ''; }; - componentOverrides = - (lib.optional withSanitizers enableSanitizersLayer) - ++ (lib.optional withCoverage collectCoverageLayer); + componentOverrides = (lib.optional withCoverage collectCoverageLayer); in rec { nixComponentsInstrumented = nixComponents.overrideScope ( final: prev: { + withASan = withSanitizers; + withUBSan = withSanitizers; + nix-store-tests = prev.nix-store-tests.override { withBenchmarks = true; }; # Boehm is incompatible with ASAN. nix-expr = prev.nix-expr.override { enableGC = !withSanitizers; }; @@ -71,6 +62,14 @@ rec { } ); + # Import NixOS tests using the instrumented components + nixosTests = import ../../../tests/nixos { + inherit lib pkgs; + nixComponents = nixComponentsInstrumented; + nixpkgs = nixFlake.inputs.nixpkgs; + inherit (nixFlake.inputs) nixpkgs-23-11; + }; + /** Top-level tests for the flake outputs, as they would be built by hydra. These tests generally can't be overridden to run with sanitizers. @@ -117,6 +116,7 @@ rec { ) nixComponentsInstrumented) // lib.optionalAttrs (pkgs.stdenv.hostPlatform == pkgs.stdenv.buildPlatform) { "${componentTestsPrefix}nix-functional-tests" = nixComponentsInstrumented.nix-functional-tests; + "${componentTestsPrefix}nix-json-schema-checks" = nixComponentsInstrumented.nix-json-schema-checks; }; codeCoverage = @@ -221,4 +221,20 @@ rec { { inherit coverageProfileDrvs mergedProfdata coverageReports; }; + + vmTests = { + inherit (nixosTests) s3-binary-cache-store; + } + // lib.optionalAttrs (!withSanitizers && !withCoverage) { + # evalNixpkgs uses non-instrumented components from hydraJobs, so only run it + # when not testing with sanitizers to avoid rebuilding nix + inherit (hydraJobs.tests) evalNixpkgs; + # FIXME: CI times out when building vm tests instrumented + inherit (nixosTests) + functional_user + githubFlakes + nix-docker + tarballFlakes + ; + }; } diff --git a/doc/manual/meson.build b/doc/manual/meson.build index a5672f0ad..fdea40098 100644 --- a/doc/manual/meson.build +++ b/doc/manual/meson.build @@ -15,7 +15,6 @@ pymod = import('python') python = pymod.find_installation('python3') nix_env_for_docs = { - 'ASAN_OPTIONS' : 'abort_on_error=1:print_summary=1:detect_leaks=0', 'HOME' : '/dummy', 'NIX_CONF_DIR' : '/dummy', 'NIX_SSL_CERT_FILE' : '/dummy/no-ca-bundle.crt', @@ -89,7 +88,7 @@ manual = custom_target( @0@ @INPUT0@ @CURRENT_SOURCE_DIR@ > @DEPFILE@ @0@ @INPUT1@ summary @2@ < @CURRENT_SOURCE_DIR@/source/SUMMARY.md.in > @2@/source/SUMMARY.md sed -e 's|@version@|@3@|g' < @INPUT2@ > @2@/book.toml - @4@ -r --include='*.md' @CURRENT_SOURCE_DIR@/ @2@/ + @4@ -r -L --include='*.md' @CURRENT_SOURCE_DIR@/ @2@/ (cd @2@; RUST_LOG=warn @1@ build -d @2@ 3>&2 2>&1 1>&3) | { grep -Fv "because fragment resolution isn't implemented" || :; } 3>&2 2>&1 1>&3 rm -rf @2@/manual mv @2@/html @2@/manual @@ -116,6 +115,7 @@ manual = custom_target( builtins_md, rl_next_generated, summary_rl_next, + json_schema_generated_files, nix_input, ], output : [ diff --git a/doc/manual/package.nix b/doc/manual/package.nix index 69b7c0e49..7d29df3c3 100644 --- a/doc/manual/package.nix +++ b/doc/manual/package.nix @@ -12,6 +12,7 @@ rsync, nix-cli, changelog-d, + json-schema-for-humans, officialRelease, # Configuration Options @@ -32,6 +33,13 @@ mkMesonDerivation (finalAttrs: { fileset.difference (fileset.unions [ ../../.version + # For example JSON + ../../src/libutil-tests/data/hash + ../../src/libstore-tests/data/content-address + ../../src/libstore-tests/data/store-path + ../../src/libstore-tests/data/derived-path + ../../src/libstore-tests/data/path-info + ../../src/libstore-tests/data/nar-info # Too many different types of files to filter for now ../../doc/manual ./. @@ -55,6 +63,7 @@ mkMesonDerivation (finalAttrs: { jq python3 rsync + json-schema-for-humans changelog-d ] ++ lib.optionals (!officialRelease) [ diff --git a/doc/manual/rl-next/c-api-byidx.md b/doc/manual/rl-next/c-api-byidx.md deleted file mode 100644 index 9b5bb3fcb..000000000 --- a/doc/manual/rl-next/c-api-byidx.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -synopsis: "C API: `nix_get_attr_name_byidx`, `nix_get_attr_byidx` take a `nix_value *` instead of `const nix_value *`" -prs: [13987] ---- - -In order to accommodate a more optimized internal representation of attribute set merges these functions require -a mutable `nix_value *` that might be modified on access. This does *not* break the ABI of these functions. diff --git a/doc/manual/rl-next/c-api-lazy-accessors.md b/doc/manual/rl-next/c-api-lazy-accessors.md deleted file mode 100644 index bd0604f0d..000000000 --- a/doc/manual/rl-next/c-api-lazy-accessors.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -synopsis: "C API: Add lazy attribute and list item accessors" -prs: [14030] ---- - -The C API now includes lazy accessor functions for retrieving values from lists and attribute sets without forcing evaluation: - -- `nix_get_list_byidx_lazy()` - Get a list element without forcing its evaluation -- `nix_get_attr_byname_lazy()` - Get an attribute value by name without forcing evaluation -- `nix_get_attr_byidx_lazy()` - Get an attribute by index without forcing evaluation - -These functions are useful when forwarding unevaluated sub-values to other lists, attribute sets, or function calls. They allow more efficient handling of Nix values by deferring evaluation until actually needed. - -Additionally, bounds checking has been improved for all `_byidx` functions to properly validate indices before access, preventing potential out-of-bounds errors. - -The documentation for `NIX_ERR_KEY` error handling has also been clarified to specify when this error code is returned. \ No newline at end of file diff --git a/doc/manual/rl-next/cached-substituted-inputs.md b/doc/manual/rl-next/cached-substituted-inputs.md deleted file mode 100644 index b0b53a213..000000000 --- a/doc/manual/rl-next/cached-substituted-inputs.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -synopsis: "Substituted flake inputs are no longer re-copied to the store" -prs: [14041] ---- - -Since 2.25, Nix would fail to store a cache entry for substituted flake inputs, -which in turn would cause them to be re-copied to the store on initial -evaluation. Caching these inputs results in a near doubling of a performance in -some cases — especially on I/O-bound machines and when using commands that -fetch many inputs, like `nix flake archive/prefetch-inputs` diff --git a/doc/manual/rl-next/derivation-json.md b/doc/manual/rl-next/derivation-json.md deleted file mode 100644 index be7ab1cfe..000000000 --- a/doc/manual/rl-next/derivation-json.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -synopsis: Derivation JSON format now uses store path basenames (no store dir) only -prs: [13980] -issues: [13570] ---- - -Experience with many JSON frameworks (e.g. nlohmann/json in C++, Serde in Rust, and Aeson in Haskell), has shown that the use of the store dir in JSON formats is an impediment to systematic JSON formats, -because it requires the serializer/deserializer to take an extra paramater (the store dir). - -We ultimately want to rectify this issue with all (non-stable, able to be changed) JSON formats. -To start with, we are changing the JSON format for derivations because the `nix derivation` commands are ---- in addition to being formally unstable ---- less widely used than other unstable commands. - -See the documentation on the [JSON format for derivations](@docroot@/protocols/json/derivation.md) for further details. diff --git a/doc/manual/rl-next/dropped-compat.md b/doc/manual/rl-next/dropped-compat.md deleted file mode 100644 index d6cc7704a..000000000 --- a/doc/manual/rl-next/dropped-compat.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -synopsis: "Removed support for daemons and clients older than Nix 2.0" -prs: [13951] ---- - -We have dropped support in the daemon worker protocol for daemons and clients that don't speak at least version 18 of the protocol. This first Nix release that supports this version is Nix 2.0, released in February 2018. diff --git a/doc/manual/rl-next/faster-nix-flake-check.md b/doc/manual/rl-next/faster-nix-flake-check.md deleted file mode 100644 index c195023c3..000000000 --- a/doc/manual/rl-next/faster-nix-flake-check.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -synopsis: "`nix flake check` now skips derivations that can be substituted" -prs: [13574] ---- - -Previously, `nix flake check` would evaluate and build/substitute all -derivations. Now, it will skip downloading derivations that can be substituted. -This can drastically decrease the time invocations take in environments where -checks may already be cached (like in CI). diff --git a/doc/manual/rl-next/s3-curl-implementation.md b/doc/manual/rl-next/s3-curl-implementation.md new file mode 100644 index 000000000..fab010010 --- /dev/null +++ b/doc/manual/rl-next/s3-curl-implementation.md @@ -0,0 +1,26 @@ +--- +synopsis: "Improved S3 binary cache support via HTTP" +prs: [13823, 14026, 14120, 14131, 14135, 14144, 14170, 14190, 14198, 14206, 14209, 14222, 14223, 13752] +issues: [13084, 12671, 11748, 12403] +--- + +S3 binary cache operations now happen via HTTP, leveraging `libcurl`'s native +AWS SigV4 authentication instead of the AWS C++ SDK, providing significant +improvements: + +- **Reduced memory usage**: Eliminates memory buffering issues that caused + segfaults with large files +- **Fixed upload reliability**: Resolves AWS SDK chunking errors + (`InvalidChunkSizeError`) +- **Lighter dependencies**: Uses lightweight `aws-crt-cpp` instead of full + `aws-cpp-sdk`, reducing build complexity + +The new implementation requires curl >= 7.75.0 and `aws-crt-cpp` for credential +management. + +All existing S3 URL formats and parameters remain supported, with the notable +exception of multi-part uploads, which are no longer supported. + +Note that this change also means Nix now supports S3 binary cache stores even +if build without `aws-crt-cpp`, but only for public buckets which do not +require auth. diff --git a/doc/manual/rl-next/s3-object-versioning.md b/doc/manual/rl-next/s3-object-versioning.md new file mode 100644 index 000000000..3b85e0926 --- /dev/null +++ b/doc/manual/rl-next/s3-object-versioning.md @@ -0,0 +1,14 @@ +--- +synopsis: "S3 URLs now support object versioning via versionId parameter" +prs: [14274] +issues: [13955] +--- + +S3 URLs now support a `versionId` query parameter to fetch specific versions +of objects from S3 buckets with versioning enabled. This allows pinning to +exact object versions for reproducibility and protection against unexpected +changes: + +``` +s3://bucket/key?region=us-east-1&versionId=abc123def456 +``` diff --git a/doc/manual/rl-next/shorter-build-dir-names.md b/doc/manual/rl-next/shorter-build-dir-names.md deleted file mode 100644 index e87fa5d04..000000000 --- a/doc/manual/rl-next/shorter-build-dir-names.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -synopsis: "Temporary build directories no longer include derivation names" -prs: [13839] ---- - -Temporary build directories created during derivation builds no longer include the derivation name in their path to avoid build failures when the derivation name is too long. This change ensures predictable prefix lengths for build directories under `/nix/var/nix/builds`. \ No newline at end of file diff --git a/doc/manual/source/SUMMARY.md.in b/doc/manual/source/SUMMARY.md.in index 8fed98c2c..7f3b1a103 100644 --- a/doc/manual/source/SUMMARY.md.in +++ b/doc/manual/source/SUMMARY.md.in @@ -117,8 +117,12 @@ - [Architecture and Design](architecture/architecture.md) - [Formats and Protocols](protocols/index.md) - [JSON Formats](protocols/json/index.md) + - [Hash](protocols/json/hash.md) + - [Content Address](protocols/json/content-address.md) + - [Store Path](protocols/json/store-path.md) - [Store Object Info](protocols/json/store-object-info.md) - [Derivation](protocols/json/derivation.md) + - [Deriving Path](protocols/json/deriving-path.md) - [Serving Tarball Flakes](protocols/tarball-fetcher.md) - [Store Path Specification](protocols/store-path.md) - [Nix Archive (NAR) Format](protocols/nix-archive.md) @@ -138,6 +142,7 @@ - [Contributing](development/contributing.md) - [Releases](release-notes/index.md) {{#include ./SUMMARY-rl-next.md}} + - [Release 2.32 (2025-10-06)](release-notes/rl-2.32.md) - [Release 2.31 (2025-08-21)](release-notes/rl-2.31.md) - [Release 2.30 (2025-07-07)](release-notes/rl-2.30.md) - [Release 2.29 (2025-05-14)](release-notes/rl-2.29.md) diff --git a/doc/manual/source/command-ref/nix-shell.md b/doc/manual/source/command-ref/nix-shell.md index f2e2e3593..307f1934a 100644 --- a/doc/manual/source/command-ref/nix-shell.md +++ b/doc/manual/source/command-ref/nix-shell.md @@ -19,7 +19,7 @@ This man page describes the command `nix-shell`, which is distinct from `nix shell`. For documentation on the latter, run `nix shell --help` or see `man -nix3-shell`. +nix3-env-shell`. # Description diff --git a/doc/manual/source/command-ref/nix-store/gc.md b/doc/manual/source/command-ref/nix-store/gc.md index f432e00eb..8ec59d906 100644 --- a/doc/manual/source/command-ref/nix-store/gc.md +++ b/doc/manual/source/command-ref/nix-store/gc.md @@ -48,8 +48,7 @@ The behaviour of the collector is also influenced by the configuration file. By default, the collector prints the total number of freed bytes when it -finishes (or when it is interrupted). With `--print-dead`, it prints the -number of bytes that would be freed. +finishes (or when it is interrupted). {{#include ./opt-common.md}} diff --git a/doc/manual/source/development/documentation.md b/doc/manual/source/development/documentation.md index 30cc8adc4..dd40ef481 100644 --- a/doc/manual/source/development/documentation.md +++ b/doc/manual/source/development/documentation.md @@ -25,20 +25,31 @@ nix build .#nix-manual and open `./result/share/doc/nix/manual/index.html`. -To build the manual incrementally, [enter the development shell](./building.md) and run: +To build the manual incrementally, [enter the development shell](./building.md) and configure with `doc-gen` enabled: + +**If using interactive `nix develop`:** ```console -make manual-html-open -j $NIX_BUILD_CORES +$ nix develop +$ mesonFlags="$mesonFlags -Ddoc-gen=true" mesonConfigurePhase ``` -In order to reflect changes to the [Makefile for the manual], clear all generated files before re-building: - -[Makefile for the manual]: https://github.com/NixOS/nix/blob/master/doc/manual/local.mk +**If using direnv:** ```console -rm $(git ls-files doc/manual/ -o | grep -F '.md') && rmdir doc/manual/source/command-ref/new-cli && make manual-html -j $NIX_BUILD_CORES +$ direnv allow +$ bash -c 'source $stdenv/setup && mesonFlags="$mesonFlags -Ddoc-gen=true" mesonConfigurePhase' ``` +Then build the manual: + +```console +$ cd build +$ meson compile manual +``` + +The HTML manual will be generated at `build/src/nix-manual/manual/index.html`. + ## Style guide The goal of this style guide is to make it such that @@ -229,3 +240,9 @@ $ configurePhase $ ninja src/external-api-docs/html $ xdg-open src/external-api-docs/html/index.html ``` + +If you use direnv, or otherwise want to run `configurePhase` in a transient shell, use: + +```bash +nix-shell -A devShells.x86_64-linux.native-clangStdenv --command 'appendToVar mesonFlags "-Ddoc-gen=true"; mesonConfigurePhase' +``` diff --git a/doc/manual/source/installation/installing-docker.md b/doc/manual/source/installation/installing-docker.md index 9354c1a72..ccc75be5a 100644 --- a/doc/manual/source/installation/installing-docker.md +++ b/doc/manual/source/installation/installing-docker.md @@ -3,19 +3,21 @@ To run the latest stable release of Nix with Docker run the following command: ```console -$ docker run -ti ghcr.io/nixos/nix -Unable to find image 'ghcr.io/nixos/nix:latest' locally -latest: Pulling from ghcr.io/nixos/nix +$ docker run -ti docker.io/nixos/nix +Unable to find image 'docker.io/nixos/nix:latest' locally +latest: Pulling from docker.io/nixos/nix 5843afab3874: Pull complete b52bf13f109c: Pull complete 1e2415612aa3: Pull complete Digest: sha256:27f6e7f60227e959ee7ece361f75d4844a40e1cc6878b6868fe30140420031ff -Status: Downloaded newer image for ghcr.io/nixos/nix:latest +Status: Downloaded newer image for docker.io/nixos/nix:latest 35ca4ada6e96:/# nix --version nix (Nix) 2.3.12 35ca4ada6e96:/# exit ``` +> If you want the latest pre-release you can use ghcr.io/nixos/nix and view them at https://github.com/nixos/nix/pkgs/container/nix + # What is included in Nix's Docker image? The official Docker image is created using `pkgs.dockerTools.buildLayeredImage` diff --git a/doc/manual/source/meson.build b/doc/manual/source/meson.build index 949d26526..294d57ad9 100644 --- a/doc/manual/source/meson.build +++ b/doc/manual/source/meson.build @@ -1,3 +1,6 @@ +# Process JSON schema documentation +subdir('protocols') + summary_rl_next = custom_target( command : [ bash, diff --git a/doc/manual/source/protocols/json/content-address.md b/doc/manual/source/protocols/json/content-address.md new file mode 100644 index 000000000..2284e30aa --- /dev/null +++ b/doc/manual/source/protocols/json/content-address.md @@ -0,0 +1,21 @@ +{{#include content-address-v1-fixed.md}} + +## Examples + +### [Text](@docroot@/store/store-object/content-address.html#method-text) method + +```json +{{#include schema/content-address-v1/text.json}} +``` + +### [Nix Archive](@docroot@/store/store-object/content-address.html#method-nix-archive) method + +```json +{{#include schema/content-address-v1/nar.json}} +``` + + diff --git a/doc/manual/source/protocols/json/derivation.md b/doc/manual/source/protocols/json/derivation.md index 566288962..a4a4ea79d 100644 --- a/doc/manual/source/protocols/json/derivation.md +++ b/doc/manual/source/protocols/json/derivation.md @@ -1,120 +1,7 @@ -# Derivation JSON Format +{{#include derivation-v3-fixed.md}} -> **Warning** -> -> This JSON format is currently -> [**experimental**](@docroot@/development/experimental-features.md#xp-feature-nix-command) -> and subject to change. + diff --git a/doc/manual/source/protocols/json/deriving-path.md b/doc/manual/source/protocols/json/deriving-path.md new file mode 100644 index 000000000..9851b371d --- /dev/null +++ b/doc/manual/source/protocols/json/deriving-path.md @@ -0,0 +1,21 @@ +{{#include deriving-path-v1-fixed.md}} + +## Examples + +### Constant + +```json +{{#include schema/deriving-path-v1/single_opaque.json}} +``` + +### Output of static derivation + +```json +{{#include schema/deriving-path-v1/single_built.json}} +``` + +### Output of dynamic derivation + +```json +{{#include schema/deriving-path-v1/single_built_built.json}} +``` diff --git a/doc/manual/source/protocols/json/fixup-json-schema-generated-doc.sed b/doc/manual/source/protocols/json/fixup-json-schema-generated-doc.sed new file mode 100644 index 000000000..27895d42a --- /dev/null +++ b/doc/manual/source/protocols/json/fixup-json-schema-generated-doc.sed @@ -0,0 +1,17 @@ +# For some reason, backticks in the JSON schema are being escaped rather +# than being kept as intentional code spans. This removes all backtick +# escaping, which is an ugly solution, but one that is fine, because we +# are not using backticks for any other purpose. +s/\\`/`/g + +# The way that semi-external references are rendered (i.e. ones to +# sibling schema files, as opposed to separate website ones, is not nice +# for humans. Replace it with a nice relative link within the manual +# instead. +# +# As we have more such relative links, more replacements of this nature +# should appear below. +s^\(./hash-v1.yaml\)\?#/$defs/algorithm^[JSON format for `Hash`](./hash.html#algorithm)^g +s^\(./hash-v1.yaml\)^[JSON format for `Hash`](./hash.html)^g +s^\(./content-address-v1.yaml\)\?#/$defs/method^[JSON format for `ContentAddress`](./content-address.html#method)^g +s^\(./content-address-v1.yaml\)^[JSON format for `ContentAddress`](./content-address.html)^g diff --git a/doc/manual/source/protocols/json/hash.md b/doc/manual/source/protocols/json/hash.md new file mode 100644 index 000000000..988c8466b --- /dev/null +++ b/doc/manual/source/protocols/json/hash.md @@ -0,0 +1,33 @@ +{{#include hash-v1-fixed.md}} + +## Examples + +### SHA-256 with Base64 encoding + +```json +{{#include schema/hash-v1/sha256-base64.json}} +``` + +### SHA-256 with Base16 (hexadecimal) encoding + +```json +{{#include schema/hash-v1/sha256-base16.json}} +``` + +### SHA-256 with Nix32 encoding + +```json +{{#include schema/hash-v1/sha256-nix32.json}} +``` + +### BLAKE3 with Base64 encoding + +```json +{{#include schema/hash-v1/blake3-base64.json}} +``` + + diff --git a/doc/manual/source/protocols/json/json-schema-for-humans-config.yaml b/doc/manual/source/protocols/json/json-schema-for-humans-config.yaml new file mode 100644 index 000000000..cad098053 --- /dev/null +++ b/doc/manual/source/protocols/json/json-schema-for-humans-config.yaml @@ -0,0 +1,17 @@ +# Configuration file for json-schema-for-humans +# +# https://github.com/coveooss/json-schema-for-humans/blob/main/docs/examples/examples_md_default/Configuration.md + +template_name: md +show_toc: true +# impure timestamp and distracting +with_footer: false +recursive_detection_depth: 3 +show_breadcrumbs: false +description_is_markdown: true +template_md_options: + properties_table_columns: + - Property + - Type + - Pattern + - Title/Description diff --git a/doc/manual/source/protocols/json/meson.build b/doc/manual/source/protocols/json/meson.build new file mode 100644 index 000000000..7ebcff697 --- /dev/null +++ b/doc/manual/source/protocols/json/meson.build @@ -0,0 +1,78 @@ +# Tests in: ../../../../src/json-schema-checks + +fs = import('fs') + +# Find json-schema-for-humans if available +json_schema_for_humans = find_program('generate-schema-doc', required : false) + +# Configuration for json-schema-for-humans +json_schema_config = files('json-schema-for-humans-config.yaml') + +schemas = [ + 'hash-v1', + 'content-address-v1', + 'store-path-v1', + 'store-object-info-v1', + 'derivation-v3', + 'deriving-path-v1', +] + +schema_files = files() +foreach schema_name : schemas + schema_files += files('schema' / schema_name + '.yaml') +endforeach + + +schema_outputs = [] +foreach schema_name : schemas + schema_outputs += schema_name + '.md' +endforeach + +json_schema_generated_files = [] + +# Generate markdown documentation from JSON schema +# Note: output must be just a filename, not a path +gen_file = custom_target( + schema_name + '-schema-docs.tmp', + command : [ + json_schema_for_humans, + '--config-file', + json_schema_config, + meson.current_source_dir() / 'schema', + meson.current_build_dir(), + ], + input : schema_files + [ + json_schema_config, + ], + output : schema_outputs, + capture : false, + build_by_default : true, +) + +idx = 0 +if json_schema_for_humans.found() + foreach schema_name : schemas + #schema_file = 'schema' / schema_name + '.yaml' + + # There is one so-so hack, and one horrible hack being done here. + sedded_file = custom_target( + schema_name + '-schema-docs', + command : [ + 'sed', + '-f', + # Out of line to avoid https://github.com/mesonbuild/meson/issues/1564 + files('fixup-json-schema-generated-doc.sed'), + '@INPUT@', + ], + capture : true, + input : gen_file[idx], + output : schema_name + '-fixed.md', + ) + idx += 1 + json_schema_generated_files += [ sedded_file ] + endforeach +else + warning( + 'json-schema-for-humans not found, skipping JSON schema documentation generation', + ) +endif diff --git a/doc/manual/source/protocols/json/schema/content-address-v1 b/doc/manual/source/protocols/json/schema/content-address-v1 new file mode 120000 index 000000000..35a0dd865 --- /dev/null +++ b/doc/manual/source/protocols/json/schema/content-address-v1 @@ -0,0 +1 @@ +../../../../../../src/libstore-tests/data/content-address \ No newline at end of file diff --git a/doc/manual/source/protocols/json/schema/content-address-v1.yaml b/doc/manual/source/protocols/json/schema/content-address-v1.yaml new file mode 100644 index 000000000..d0f759201 --- /dev/null +++ b/doc/manual/source/protocols/json/schema/content-address-v1.yaml @@ -0,0 +1,55 @@ +"$schema": "http://json-schema.org/draft-04/schema" +"$id": "https://nix.dev/manual/nix/latest/protocols/json/schema/content-address-v1.json" +title: Content Address +description: | + This schema describes the JSON representation of Nix's `ContentAddress` type, which conveys information about [content-addressing store objects](@docroot@/store/store-object/content-address.md). + + > **Note** + > + > For current methods of content addressing, this data type is a bit suspicious, because it is neither simply a content address of a file system object (the `method` is richer), nor simply a content address of a store object (the `hash` doesn't account for the references). + > It should thus only be used in contexts where the references are also known / otherwise made tamper-resistant. + + + +type: object +properties: + method: + "$ref": "#/$defs/method" + hash: + title: Content Address + description: | + This would be the content-address itself. + + For all current methods, this is just a content address of the file system object of the store object, [as described in the store chapter](@docroot@/store/file-system-object/content-address.md), and not of the store object as a whole. + In particular, the references of the store object are *not* taken into account with this hash (and currently-supported methods). + "$ref": "./hash-v1.yaml" +required: +- method +- hash +additionalProperties: false +"$defs": + method: + type: string + enum: [flat, nar, text, git] + title: Content-Addressing Method + description: | + A string representing the [method](@docroot@/store/store-object/content-address.md) of content addressing that is chosen. + + Valid method strings are: + + - [`flat`](@docroot@/store/store-object/content-address.md#method-flat) (provided the contents are a single file) + - [`nar`](@docroot@/store/store-object/content-address.md#method-nix-archive) + - [`text`](@docroot@/store/store-object/content-address.md#method-text) + - [`git`](@docroot@/store/store-object/content-address.md#method-git) diff --git a/doc/manual/source/protocols/json/schema/derivation-v3.yaml b/doc/manual/source/protocols/json/schema/derivation-v3.yaml new file mode 100644 index 000000000..3275bcdd9 --- /dev/null +++ b/doc/manual/source/protocols/json/schema/derivation-v3.yaml @@ -0,0 +1,203 @@ +"$schema": "http://json-schema.org/draft-04/schema" +"$id": "https://nix.dev/manual/nix/latest/protocols/json/schema/derivation-v3.json" +title: Derivation +description: | + Experimental JSON representation of a Nix derivation (version 3). + + This schema describes the JSON representation of Nix's `Derivation` type. + + > **Warning** + > + > This JSON format is currently + > [**experimental**](@docroot@/development/experimental-features.md#xp-feature-nix-command) + > and subject to change. + +type: object +required: + - name + - version + - outputs + - inputSrcs + - inputDrvs + - system + - builder + - args + - env +properties: + name: + type: string + title: Derivation name + description: | + The name of the derivation. + Used when calculating store paths for the derivation’s outputs. + + version: + const: 3 + title: Format version (must be 3) + description: | + Must be `3`. + This is a guard that allows us to continue evolving this format. + The choice of `3` is fairly arbitrary, but corresponds to this informal version: + + - Version 0: A-Term format + + - Version 1: Original JSON format, with ugly `"r:sha256"` inherited from A-Term format. + + - Version 2: Separate `method` and `hashAlgo` fields in output specs + + - Version 3: Drop store dir from store paths, just include base name. + + Note that while this format is experimental, the maintenance of versions is best-effort, and not promised to identify every change. + + outputs: + type: object + title: Output specifications + description: | + Information about the output paths of the derivation. + This is a JSON object with one member per output, where the key is the output name and the value is a JSON object as described. + + > **Example** + > + > ```json + > "outputs": { + > "out": { + > "method": "nar", + > "hashAlgo": "sha256", + > "hash": "6fc80dcc62179dbc12fc0b5881275898f93444833d21b89dfe5f7fbcbb1d0d62" + > } + > } + > ``` + additionalProperties: + "$ref": "#/$defs/output" + + inputSrcs: + type: array + title: Input source paths + description: | + List of store paths on which this derivation depends. + + > **Example** + > + > ```json + > "inputSrcs": [ + > "47y241wqdhac3jm5l7nv0x4975mb1975-separate-debug-info.sh", + > "56d0w71pjj9bdr363ym3wj1zkwyqq97j-fix-pop-var-context-error.patch" + > ] + > ``` + items: + $ref: "store-path-v1.yaml" + + inputDrvs: + type: object + title: Input derivations + description: | + Mapping of derivation paths to lists of output names they provide. + + > **Example** + > + > ```json + > "inputDrvs": { + > "6lkh5yi7nlb7l6dr8fljlli5zfd9hq58-curl-7.73.0.drv": ["dev"], + > "fn3kgnfzl5dzym26j8g907gq3kbm8bfh-unzip-6.0.drv": ["out"] + > } + > ``` + > + > specifies that this derivation depends on the `dev` output of `curl`, and the `out` output of `unzip`. + patternProperties: + "^[0123456789abcdfghijklmnpqrsvwxyz]{32}-.+\\.drv$": + title: Store Path + description: | + A store path to a derivation, mapped to the outputs of that derivation. + oneOf: + - "$ref": "#/$defs/outputNames" + - "$ref": "#/$defs/dynamicOutputs" + additionalProperties: false + + system: + type: string + title: Build system type + description: | + The system type on which this derivation is to be built + (e.g. `x86_64-linux`). + + builder: + type: string + title: Build program path + description: | + Absolute path of the program used to perform the build. + Typically this is the `bash` shell + (e.g. `/nix/store/r3j288vpmczbl500w6zz89gyfa4nr0b1-bash-4.4-p23/bin/bash`). + + args: + type: array + title: Builder arguments + description: | + Command-line arguments passed to the `builder`. + items: + type: string + + env: + type: object + title: Environment variables + description: | + Environment variables passed to the `builder`. + additionalProperties: + type: string + + structuredAttrs: + title: Structured attributes + description: | + [Structured Attributes](@docroot@/store/derivation/index.md#structured-attrs), only defined if the derivation contains them. + Structured attributes are JSON, and thus embedded as-is. + type: object + additionalProperties: true + +"$defs": + output: + type: object + properties: + path: + $ref: "store-path-v1.yaml" + title: Output path + description: | + The output path, if known in advance. + + method: + "$ref": "./content-address-v1.yaml#/$defs/method" + description: | + For an output which will be [content addressed](@docroot@/store/derivation/outputs/content-address.md), a string representing the [method](@docroot@/store/store-object/content-address.md) of content addressing that is chosen. + See the linked original definition for further details. + hashAlgo: + title: Hash algorithm + "$ref": "./hash-v1.yaml#/$defs/algorithm" + + hash: + type: string + title: Expected hash value + description: | + For fixed-output derivations, the expected content hash in base-16. + + outputName: + type: string + title: Output name + description: Name of the derivation output to depend on + + outputNames: + type: array + title: Output Names + description: Set of names of derivation outputs to depend on + items: + "$ref": "#/$defs/outputName" + + dynamicOutputs: + type: object + title: Dynamic Outputs + description: | + **Experimental feature**: [`dynamic-derivations`](@docroot@/development/experimental-features.md#xp-feature-dynamic-derivations) + + This recursive data type allows for depending on outputs of outputs. + properties: + outputs: + "$ref": "#/$defs/outputNames" + dynamicOutputs: + "$ref": "#/$defs/dynamicOutputs" diff --git a/doc/manual/source/protocols/json/schema/deriving-path-v1 b/doc/manual/source/protocols/json/schema/deriving-path-v1 new file mode 120000 index 000000000..92ec6d01a --- /dev/null +++ b/doc/manual/source/protocols/json/schema/deriving-path-v1 @@ -0,0 +1 @@ +../../../../../../src/libstore-tests/data/derived-path \ No newline at end of file diff --git a/doc/manual/source/protocols/json/schema/deriving-path-v1.yaml b/doc/manual/source/protocols/json/schema/deriving-path-v1.yaml new file mode 100644 index 000000000..11a784d06 --- /dev/null +++ b/doc/manual/source/protocols/json/schema/deriving-path-v1.yaml @@ -0,0 +1,27 @@ +"$schema": "http://json-schema.org/draft-04/schema" +"$id": "https://nix.dev/manual/nix/latest/protocols/json/schema/deriving-path-v1.json" +title: Deriving Path +description: | + This schema describes the JSON representation of Nix's [Deriving Path](@docroot@/store/derivation/index.md#deriving-path). +oneOf: + - title: Constant + description: | + See [Constant](@docroot@/store/derivation/index.md#deriving-path-constant) deriving path. + $ref: "store-path-v1.yaml" + - title: Output + description: | + See [Output](@docroot@/store/derivation/index.md#deriving-path-output) deriving path. + type: object + properties: + drvPath: + "$ref": "#" + description: | + A deriving path to a [Derivation](@docroot@/store/derivation/index.md#store-derivation), whose output is being referred to. + output: + type: string + description: | + The name of an output produced by that derivation (e.g. "out", "doc", etc.). + required: + - drvPath + - output + additionalProperties: false diff --git a/doc/manual/source/protocols/json/schema/hash-v1 b/doc/manual/source/protocols/json/schema/hash-v1 new file mode 120000 index 000000000..06937e286 --- /dev/null +++ b/doc/manual/source/protocols/json/schema/hash-v1 @@ -0,0 +1 @@ +../../../../../../src/libutil-tests/data/hash/ \ No newline at end of file diff --git a/doc/manual/source/protocols/json/schema/hash-v1.yaml b/doc/manual/source/protocols/json/schema/hash-v1.yaml new file mode 100644 index 000000000..316fb6d73 --- /dev/null +++ b/doc/manual/source/protocols/json/schema/hash-v1.yaml @@ -0,0 +1,54 @@ +"$schema": "http://json-schema.org/draft-04/schema" +"$id": "https://nix.dev/manual/nix/latest/protocols/json/schema/hash-v1.json" +title: Hash +description: | + A cryptographic hash value used throughout Nix for content addressing and integrity verification. + + This schema describes the JSON representation of Nix's `Hash` type. +type: object +properties: + algorithm: + "$ref": "#/$defs/algorithm" + format: + type: string + enum: + - base64 + - nix32 + - base16 + - sri + title: Hash format + description: | + The encoding format of the hash value. + + - `base64` uses standard Base64 encoding [RFC 4648, section 4](https://datatracker.ietf.org/doc/html/rfc4648#section-4) + - `nix32` is Nix-specific base-32 encoding + - `base16` is lowercase hexadecimal + - `sri` is the [Subresource Integrity format](https://developer.mozilla.org/en-US/docs/Web/Security/Subresource_Integrity). + hash: + type: string + title: Hash + description: | + The encoded hash value, itself. + + It is specified in the format specified by the `format` field. + It must be the right length for the hash algorithm specified in the `algorithm` field, also. + The hash value does not include any algorithm prefix. +required: +- algorithm +- format +- hash +additionalProperties: false +"$defs": + algorithm: + type: string + enum: + - blake3 + - md5 + - sha1 + - sha256 + - sha512 + title: Hash algorithm + description: | + The hash algorithm used to compute the hash value. + + `blake3` is currently experimental and requires the [`blake-hashing`](@docroot@/development/experimental-features.md#xp-feature-blake-hashing) experimental feature. diff --git a/doc/manual/source/protocols/json/schema/nar-info-v1 b/doc/manual/source/protocols/json/schema/nar-info-v1 new file mode 120000 index 000000000..ded866b6f --- /dev/null +++ b/doc/manual/source/protocols/json/schema/nar-info-v1 @@ -0,0 +1 @@ +../../../../../../src/libstore-tests/data/nar-info \ No newline at end of file diff --git a/doc/manual/source/protocols/json/schema/store-object-info-v1 b/doc/manual/source/protocols/json/schema/store-object-info-v1 new file mode 120000 index 000000000..fc6f5c3f0 --- /dev/null +++ b/doc/manual/source/protocols/json/schema/store-object-info-v1 @@ -0,0 +1 @@ +../../../../../../src/libstore-tests/data/path-info \ No newline at end of file diff --git a/doc/manual/source/protocols/json/schema/store-object-info-v1.yaml b/doc/manual/source/protocols/json/schema/store-object-info-v1.yaml new file mode 100644 index 000000000..d79f25043 --- /dev/null +++ b/doc/manual/source/protocols/json/schema/store-object-info-v1.yaml @@ -0,0 +1,235 @@ +"$schema": "http://json-schema.org/draft-07/schema" +"$id": "https://nix.dev/manual/nix/latest/protocols/json/schema/store-object-info-v1.json" +title: Store Object Info +description: | + Information about a [store object](@docroot@/store/store-object.md). + + This schema describes the JSON representation of store object metadata as returned by commands like [`nix path-info --json`](@docroot@/command-ref/new-cli/nix3-path-info.md). + + > **Warning** + > + > This JSON format is currently + > [**experimental**](@docroot@/development/experimental-features.md#xp-feature-nix-command) + > and subject to change. + + ### Field Categories + + Store object information can come in a few different variations. + + Firstly, "impure" fields, which contain non-intrinsic information about the store object, may or may not be included. + + Second, binary cache stores have extra non-intrinsic infomation about the store objects they contain. + + Thirdly, [`nix path-info --json --closure-size`](@docroot@/command-ref/new-cli/nix3-path-info.html#opt-closure-size) can compute some extra information about not just the single store object in question, but the store object and its [closure](@docroot@/glossary.md#gloss-closure). + + The impure and NAR fields are grouped into separate variants below. + See their descriptions for additional information. + The closure fields however as just included as optional fields, to avoid a combinatorial explosion of variants. + +oneOf: + - $ref: "#/$defs/base" + + - $ref: "#/$defs/impure" + + - $ref: "#/$defs/narInfo" + +$defs: + base: + title: Store Object Info + description: | + Basic store object metadata containing only intrinsic properties. + This is the minimal set of fields that describe what a store object contains. + type: object + required: + - narHash + - narSize + - references + - ca + properties: + path: + type: string + title: Store Path + description: | + [Store path](@docroot@/store/store-path.md) to the given store object. + + Note: This field may not be present in all contexts, such as when the path is used as the key and the the store object info the value in map. + + narHash: + type: string + title: NAR Hash + description: | + Hash of the [file system object](@docroot@/store/file-system-object.md) part of the store object when serialized as a [Nix Archive](@docroot@/store/file-system-object/content-address.md#serial-nix-archive). + + narSize: + type: integer + minimum: 0 + title: NAR Size + description: | + Size of the [file system object](@docroot@/store/file-system-object.md) part of the store object when serialized as a [Nix Archive](@docroot@/store/file-system-object/content-address.md#serial-nix-archive). + + references: + type: array + title: References + description: | + An array of [store paths](@docroot@/store/store-path.md), possibly including this one. + items: + type: string + + ca: + type: ["string", "null"] + title: Content Address + description: | + If the store object is [content-addressed](@docroot@/store/store-object/content-address.md), + this is the content address of this store object's file system object, used to compute its store path. + Otherwise (i.e. if it is [input-addressed](@docroot@/glossary.md#gloss-input-addressed-store-object)), this is `null`. + additionalProperties: false + + impure: + title: Store Object Info with Impure Fields + description: | + Store object metadata including impure fields that are not *intrinsic* properties. + In other words, the same store object in different stores could have different values for these impure fields. + type: object + required: + - narHash + - narSize + - references + - ca + # impure + - deriver + - registrationTime + - ultimate + - signatures + properties: + path: { $ref: "#/$defs/base/properties/path" } + narHash: { $ref: "#/$defs/base/properties/narHash" } + narSize: { $ref: "#/$defs/base/properties/narSize" } + references: { $ref: "#/$defs/base/properties/references" } + ca: { $ref: "#/$defs/base/properties/ca" } + deriver: + type: ["string", "null"] + title: Deriver + description: | + If known, the path to the [store derivation](@docroot@/glossary.md#gloss-store-derivation) from which this store object was produced. + Otherwise `null`. + + > This is an "impure" field that may not be included in certain contexts. + + registrationTime: + type: ["integer", "null"] + title: Registration Time + description: | + If known, when this derivation was added to the store (Unix timestamp). + Otherwise `null`. + + > This is an "impure" field that may not be included in certain contexts. + + ultimate: + type: boolean + title: Ultimate + description: | + Whether this store object is trusted because we built it ourselves, rather than substituted a build product from elsewhere. + + > This is an "impure" field that may not be included in certain contexts. + + signatures: + type: array + title: Signatures + description: | + Signatures claiming that this store object is what it claims to be. + Not relevant for [content-addressed](@docroot@/store/store-object/content-address.md) store objects, + but useful for [input-addressed](@docroot@/glossary.md#gloss-input-addressed-store-object) store objects. + + > This is an "impure" field that may not be included in certain contexts. + items: + type: string + + # Computed closure fields + closureSize: + type: integer + minimum: 0 + title: Closure Size + description: | + The total size of this store object and every other object in its [closure](@docroot@/glossary.md#gloss-closure). + + > This field is not stored at all, but computed by traversing the other fields across all the store objects in a closure. + additionalProperties: false + + narInfo: + title: Store Object Info with Impure fields and NAR Info + description: | + The store object info in the "binary cache" family of Nix store type contain extra information pertaining to *downloads* of the store object in question. + (This store info is called "NAR info", since the downloads take the form of [Nix Archives](@docroot@/store/file-system-object/content-address.md#serial-nix-archive, and the metadata is served in a file with a `.narinfo` extension.) + + This download information, being specific to how the store object happens to be stored and transferred, is also considered to be non-intrinsic / impure. + type: object + required: + - narHash + - narSize + - references + - ca + # impure + - deriver + - registrationTime + - ultimate + - signatures + # nar + - url + - compression + - downloadHash + - downloadSize + properties: + path: { $ref: "#/$defs/base/properties/path" } + narHash: { $ref: "#/$defs/base/properties/narHash" } + narSize: { $ref: "#/$defs/base/properties/narSize" } + references: { $ref: "#/$defs/base/properties/references" } + ca: { $ref: "#/$defs/base/properties/ca" } + deriver: { $ref: "#/$defs/impure/properties/deriver" } + registrationTime: { $ref: "#/$defs/impure/properties/registrationTime" } + ultimate: { $ref: "#/$defs/impure/properties/ultimate" } + signatures: { $ref: "#/$defs/impure/properties/signatures" } + closureSize: { $ref: "#/$defs/impure/properties/closureSize" } + url: + type: string + title: URL + description: | + Where to download a compressed archive of the file system objects of this store object. + + > This is an impure "`.narinfo`" field that may not be included in certain contexts. + + compression: + type: string + title: Compression + description: | + The compression format that the archive is in. + + > This is an impure "`.narinfo`" field that may not be included in certain contexts. + + downloadHash: + type: string + title: Download Hash + description: | + A digest for the compressed archive itself, as opposed to the data contained within. + + > This is an impure "`.narinfo`" field that may not be included in certain contexts. + + downloadSize: + type: integer + minimum: 0 + title: Download Size + description: | + The size of the compressed archive itself. + + > This is an impure "`.narinfo`" field that may not be included in certain contexts. + + closureDownloadSize: + type: integer + minimum: 0 + title: Closure Download Size + description: | + The total size of the compressed archive itself for this object, and the compressed archive of every object in this object's [closure](@docroot@/glossary.md#gloss-closure). + + > This is an impure "`.narinfo`" field that may not be included in certain contexts. + + > This field is not stored at all, but computed by traversing the other fields across all the store objects in a closure. + additionalProperties: false diff --git a/doc/manual/source/protocols/json/schema/store-path-v1 b/doc/manual/source/protocols/json/schema/store-path-v1 new file mode 120000 index 000000000..31e7a6b2a --- /dev/null +++ b/doc/manual/source/protocols/json/schema/store-path-v1 @@ -0,0 +1 @@ +../../../../../../src/libstore-tests/data/store-path \ No newline at end of file diff --git a/doc/manual/source/protocols/json/schema/store-path-v1.yaml b/doc/manual/source/protocols/json/schema/store-path-v1.yaml new file mode 100644 index 000000000..2012aab99 --- /dev/null +++ b/doc/manual/source/protocols/json/schema/store-path-v1.yaml @@ -0,0 +1,32 @@ +"$schema": "http://json-schema.org/draft-07/schema" +"$id": "https://nix.dev/manual/nix/latest/protocols/json/schema/store-path-v1.json" +title: Store Path +description: | + A [store path](@docroot@/store/store-path.md) identifying a store object. + + This schema describes the JSON representation of store paths as used in various Nix JSON APIs. + + > **Warning** + > + > This JSON format is currently + > [**experimental**](@docroot@/development/experimental-features.md#xp-feature-nix-command) + > and subject to change. + + ## Format + + Store paths in JSON are represented as strings containing just the hash and name portion, without the store directory prefix. + + For example: `"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo.drv"` + + (If the store dir is `/nix/store`, then this corresponds to the path `/nix/store/g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo.drv`.) + + ## Structure + + The format follows this pattern: `${digest}-${name}` + + - **hash**: Digest rendered in a custom variant of [Base32](https://en.wikipedia.org/wiki/Base32) (20 arbitrary bytes become 32 ASCII characters) + - **name**: The package name and optional version/suffix information + +type: string +pattern: "^[0123456789abcdfghijklmnpqrsvwxyz]{32}-.+$" +minLength: 34 diff --git a/doc/manual/source/protocols/json/store-object-info.md b/doc/manual/source/protocols/json/store-object-info.md index b7348538c..4673dd773 100644 --- a/doc/manual/source/protocols/json/store-object-info.md +++ b/doc/manual/source/protocols/json/store-object-info.md @@ -1,102 +1,45 @@ -# Store object info JSON format +{{#include store-object-info-v1-fixed.md}} -> **Warning** -> -> This JSON format is currently -> [**experimental**](@docroot@/development/experimental-features.md#xp-feature-nix-command) -> and subject to change. +## Examples -Info about a [store object]. +### Minimal store object (content-addressed) -* `path`: +```json +{{#include schema/store-object-info-v1/pure.json}} +``` - [Store path][store path] to the given store object. +### Store object with impure fields -* `narHash`: +```json +{{#include schema/store-object-info-v1/impure.json}} +``` - Hash of the [file system object] part of the store object when serialized as a [Nix Archive]. +### Minimal store object (empty) -* `narSize`: +```json +{{#include schema/store-object-info-v1/empty_pure.json}} +``` - Size of the [file system object] part of the store object when serialized as a [Nix Archive]. +### Store object with all impure fields -* `references`: +```json +{{#include schema/store-object-info-v1/empty_impure.json}} +``` - An array of [store paths][store path], possibly including this one. +### NAR info (minimal) -* `ca`: +```json +{{#include schema/nar-info-v1/pure.json}} +``` - If the store object is [content-addressed], - this is the content address of this store object's file system object, used to compute its store path. - Otherwise (i.e. if it is [input-addressed]), this is `null`. +### NAR info (with binary cache fields) -[store path]: @docroot@/store/store-path.md -[file system object]: @docroot@/store/file-system-object.md -[Nix Archive]: @docroot@/store/file-system-object/content-address.md#serial-nix-archive +```json +{{#include schema/nar-info-v1/impure.json}} +``` -## Impure fields + diff --git a/doc/manual/source/protocols/json/store-path.md b/doc/manual/source/protocols/json/store-path.md new file mode 100644 index 000000000..cd18f6595 --- /dev/null +++ b/doc/manual/source/protocols/json/store-path.md @@ -0,0 +1,15 @@ +{{#include store-path-v1-fixed.md}} + +## Examples + +### Simple store path + +```json +{{#include schema/store-path-v1/simple.json}} +``` + + diff --git a/doc/manual/source/protocols/meson.build b/doc/manual/source/protocols/meson.build new file mode 100644 index 000000000..5b5eb900d --- /dev/null +++ b/doc/manual/source/protocols/meson.build @@ -0,0 +1,2 @@ +# Process JSON schema documentation +subdir('json') diff --git a/doc/manual/source/release-notes/rl-2.32.md b/doc/manual/source/release-notes/rl-2.32.md new file mode 100644 index 000000000..3a925198d --- /dev/null +++ b/doc/manual/source/release-notes/rl-2.32.md @@ -0,0 +1,130 @@ +# Release 2.32.0 (2025-10-06) + +## Incompatible changes + +- Removed support for daemons and clients older than Nix 2.0 [#13951](https://github.com/NixOS/nix/pull/13951) + + We have dropped support in the daemon worker protocol for daemons and clients that don't speak at least version 18 of the protocol. This first Nix release that supports this version is Nix 2.0, released in February 2018. + +- Derivation JSON format now uses store path basenames only [#13570](https://github.com/NixOS/nix/issues/13570) [#13980](https://github.com/NixOS/nix/pull/13980) + + Experience with many JSON frameworks (e.g. nlohmann/json in C++, Serde in Rust, and Aeson in Haskell) has shown that the use of the store directory in JSON formats is an impediment to systematic JSON formats, because it requires the serializer/deserializer to take an extra paramater (the store directory). + + We ultimately want to rectify this issue with all JSON formats to the extent allowed by our stability promises. To start with, we are changing the JSON format for derivations because the `nix derivation` commands are — in addition to being formally unstable — less widely used than other unstable commands. + + See the documentation on the [JSON format for derivations](@docroot@/protocols/json/derivation.md) for further details. + +- C API: `nix_get_attr_name_byidx`, `nix_get_attr_byidx` take a `nix_value *` instead of `const nix_value *` [#13987](https://github.com/NixOS/nix/pull/13987) + + In order to accommodate a more optimized internal representation of attribute set merges these functions require + a mutable `nix_value *` that might be modified on access. This does *not* break the ABI of these functions. + +## New features + +- C API: Add lazy attribute and list item accessors [#14030](https://github.com/NixOS/nix/pull/14030) + + The C API now includes lazy accessor functions for retrieving values from lists and attribute sets without forcing evaluation: + + - `nix_get_list_byidx_lazy()` - Get a list element without forcing its evaluation + - `nix_get_attr_byname_lazy()` - Get an attribute value by name without forcing evaluation + - `nix_get_attr_byidx_lazy()` - Get an attribute by index without forcing evaluation + + These functions are useful when forwarding unevaluated sub-values to other lists, attribute sets, or function calls. They allow more efficient handling of Nix values by deferring evaluation until actually needed. + + Additionally, bounds checking has been improved for all `_byidx` functions to properly validate indices before access, preventing potential out-of-bounds errors. + + The documentation for `NIX_ERR_KEY` error handling has also been clarified to specify when this error code is returned. + +- HTTP binary caches now support transparent compression for metadata + + HTTP binary cache stores can now compress `.narinfo`, `.ls`, and build log files before uploading them, + reducing bandwidth usage and storage requirements. The compression is applied transparently using the + `Content-Encoding` header, allowing compatible clients to automatically decompress the files. + + Three new configuration options control this behavior: + - `narinfo-compression`: Compression method for `.narinfo` files + - `ls-compression`: Compression method for `.ls` files + - `log-compression`: Compression method for build logs in `log/` directory + + Example usage: + ``` + nix copy --to 'http://cache.example.com?narinfo-compression=gzip&ls-compression=gzip' /nix/store/... + nix store copy-log --to 'http://cache.example.com?log-compression=br' /nix/store/... + ``` + +- Temporary build directories no longer include derivation names [#13839](https://github.com/NixOS/nix/pull/13839) + + Temporary build directories created during derivation builds no longer include the derivation name in their path to avoid build failures when the derivation name is too long. This change ensures predictable prefix lengths for build directories under `/nix/var/nix/builds`. + +- External derivation builders [#14145](https://github.com/NixOS/nix/pull/14145) + + These are helper programs that Nix calls to perform derivations for specified system types, e.g. by using QEMU to emulate a different type of platform. For more information, see the [`external-builders` setting](../command-ref/conf-file.md#conf-external-builders). + + This is currently an experimental feature. + +## Performance improvements + +- Optimize memory usage of attribute set merges [#13987](https://github.com/NixOS/nix/pull/13987) + + [Attribute set update operations](@docroot@/language/operators.md#update) have been optimized to + reduce reallocations in cases when the second operand is small. + + For typical evaluations of nixpkgs this optimization leads to ~20% less memory allocated in total + without significantly affecting evaluation performance. + + See [eval-attrset-update-layer-rhs-threshold](@docroot@/command-ref/conf-file.md#conf-eval-attrset-update-layer-rhs-threshold) + +- Substituted flake inputs are no longer re-copied to the store [#14041](https://github.com/NixOS/nix/pull/14041) + + Since 2.25, Nix would fail to store a cache entry for substituted flake inputs, which in turn would cause them to be re-copied to the store on initial evaluation. Caching these inputs results in a near doubling of performance in some cases — especially on I/O-bound machines and when using commands that fetch many inputs, like `nix flake [archive|prefetch-inputs]`. + +- `nix flake check` now skips derivations that can be substituted [#13574](https://github.com/NixOS/nix/pull/13574) + + Previously, `nix flake check` would evaluate and build/substitute all + derivations. Now, it will skip downloading derivations that can be substituted. + This can drastically decrease the time invocations take in environments where + checks may already be cached (like in CI). + +- `fetchTarball` and `fetchurl` now correctly substitute (#14138) + + At some point we stopped substituting calls to `fetchTarball` and `fetchurl` with a set `narHash` to avoid incorrectly substituting things in `fetchTree`, even though it would be safe to substitute when calling the legacy `fetch{Tarball,url}`. This fixes that regression where it is safe. +- Started moving AST allocations into a bump allocator [#14088](https://github.com/NixOS/nix/issues/14088) + + This leaves smaller, immutable structures in the AST. So far this saves about 2% memory on a NixOS config evaluation. +## Contributors + +This release was made possible by the following 32 contributors: + +- Farid Zakaria [**(@fzakaria)**](https://github.com/fzakaria) +- dram [**(@dramforever)**](https://github.com/dramforever) +- Ephraim Siegfried [**(@EphraimSiegfried)**](https://github.com/EphraimSiegfried) +- Robert Hensing [**(@roberth)**](https://github.com/roberth) +- Taeer Bar-Yam [**(@Radvendii)**](https://github.com/Radvendii) +- Emily [**(@emilazy)**](https://github.com/emilazy) +- Jens Petersen [**(@juhp)**](https://github.com/juhp) +- Bernardo Meurer [**(@lovesegfault)**](https://github.com/lovesegfault) +- Jörg Thalheim [**(@Mic92)**](https://github.com/Mic92) +- Leandro Emmanuel Reina Kiperman [**(@kip93)**](https://github.com/kip93) +- Marie [**(@NyCodeGHG)**](https://github.com/NyCodeGHG) +- Ethan Evans [**(@ethanavatar)**](https://github.com/ethanavatar) +- Yaroslav Bolyukin [**(@CertainLach)**](https://github.com/CertainLach) +- Matej Urbas [**(@urbas)**](https://github.com/urbas) +- Jami Kettunen [**(@JamiKettunen)**](https://github.com/JamiKettunen) +- Clayton [**(@netadr)**](https://github.com/netadr) +- Grégory Marti [**(@gmarti)**](https://github.com/gmarti) +- Eelco Dolstra [**(@edolstra)**](https://github.com/edolstra) +- rszyma [**(@rszyma)**](https://github.com/rszyma) +- Philip Wilk [**(@philipwilk)**](https://github.com/philipwilk) +- John Ericson [**(@Ericson2314)**](https://github.com/Ericson2314) +- Tom Westerhout [**(@twesterhout)**](https://github.com/twesterhout) +- Tristan Ross [**(@RossComputerGuy)**](https://github.com/RossComputerGuy) +- Sergei Zimmerman [**(@xokdvium)**](https://github.com/xokdvium) +- Jean-François Roche [**(@jfroche)**](https://github.com/jfroche) +- Seth Flynn [**(@getchoo)**](https://github.com/getchoo) +- éclairevoyant [**(@eclairevoyant)**](https://github.com/eclairevoyant) +- Glen Huang [**(@hgl)**](https://github.com/hgl) +- osman - オスマン [**(@osbm)**](https://github.com/osbm) +- David McFarland [**(@corngood)**](https://github.com/corngood) +- Cole Helbling [**(@cole-h)**](https://github.com/cole-h) +- Sinan Mohd [**(@sinanmohd)**](https://github.com/sinanmohd) +- Philipp Otterbein diff --git a/doc/manual/source/store/derivation/index.md b/doc/manual/source/store/derivation/index.md index 0e12b4d5e..5b179273d 100644 --- a/doc/manual/source/store/derivation/index.md +++ b/doc/manual/source/store/derivation/index.md @@ -106,7 +106,7 @@ The system type on which the [`builder`](#attr-builder) executable is meant to b A necessary condition for Nix to schedule a given derivation on some [Nix instance] is for the "system" of that derivation to match that instance's [`system` configuration option] or [`extra-platforms` configuration option]. -By putting the `system` in each derivation, Nix allows *heterogenous* build plans, where not all steps can be run on the same machine or same sort of machine. +By putting the `system` in each derivation, Nix allows *heterogeneous* build plans, where not all steps can be run on the same machine or same sort of machine. Nix can schedule builds such that it automatically builds on other platforms by [forwarding build requests](@docroot@/advanced-topics/distributed-builds.md) to other Nix instances. [`system` configuration option]: @docroot@/command-ref/conf-file.md#conf-system diff --git a/flake.nix b/flake.nix index fd623c807..418f3180f 100644 --- a/flake.nix +++ b/flake.nix @@ -413,6 +413,10 @@ supportsCross = false; }; + "nix-json-schema-checks" = { + supportsCross = false; + }; + "nix-perl-bindings" = { supportsCross = false; }; @@ -467,6 +471,27 @@ } ); + apps = forAllSystems ( + system: + let + pkgs = nixpkgsFor.${system}.native; + opener = if pkgs.stdenv.isDarwin then "open" else "xdg-open"; + in + { + open-manual = { + type = "app"; + program = "${pkgs.writeShellScript "open-nix-manual" '' + manual_path="${self.packages.${system}.nix-manual}/share/doc/nix/manual/index.html" + if ! ${opener} "$manual_path"; then + echo "Failed to open manual with ${opener}. Manual is located at:" + echo "$manual_path" + fi + ''}"; + meta.description = "Open the Nix manual in your browser"; + }; + } + ); + devShells = let makeShell = import ./packaging/dev-shell.nix { inherit lib devFlake; }; diff --git a/maintainers/data/release-credits-email-to-handle.json b/maintainers/data/release-credits-email-to-handle.json index ea37afb90..0dbbf8fa6 100644 --- a/maintainers/data/release-credits-email-to-handle.json +++ b/maintainers/data/release-credits-email-to-handle.json @@ -203,5 +203,26 @@ "ConnorBaker01@Gmail.com": "ConnorBaker", "jsoo1@asu.edu": "jsoo1", "hsngrmpf+github@gmail.com": "DavHau", - "matthew@floxdev.com": "mkenigs" + "matthew@floxdev.com": "mkenigs", + "taeer@bar-yam.me": "Radvendii", + "beme@anthropic.com": "lovesegfault", + "osbm@osbm.dev": "osbm", + "jami.kettunen@protonmail.com": "JamiKettunen", + "ephraim.siegfried@hotmail.com": "EphraimSiegfried", + "rszyma.dev@gmail.com": "rszyma", + "tristan.ross@determinate.systems": "RossComputerGuy", + "corngood@gmail.com": "corngood", + "jfroche@pyxel.be": "jfroche", + "848000+eclairevoyant@users.noreply.github.com": "eclairevoyant", + "petersen@redhat.com": "juhp", + "dramforever@live.com": "dramforever", + "me@glenhuang.com": "hgl", + "philip.wilk@fivium.co.uk": "philipwilk", + "me@nycode.dev": "NyCodeGHG", + "14264576+twesterhout@users.noreply.github.com": "twesterhout", + "sinan@sinanmohd.com": "sinanmohd", + "42688647+netadr@users.noreply.github.com": "netadr", + "matej.urbas@gmail.com": "urbas", + "ethanalexevans@gmail.com": "ethanavatar", + "greg.marti@gmail.com": "gmarti" } \ No newline at end of file diff --git a/maintainers/data/release-credits-handle-to-name.json b/maintainers/data/release-credits-handle-to-name.json index e2510548d..8abffc65c 100644 --- a/maintainers/data/release-credits-handle-to-name.json +++ b/maintainers/data/release-credits-handle-to-name.json @@ -177,5 +177,24 @@ "avnik": "Alexander V. Nikolaev", "DavHau": null, "aln730": "AGawas", - "vog": "Volker Diels-Grabsch" + "vog": "Volker Diels-Grabsch", + "corngood": "David McFarland", + "twesterhout": "Tom Westerhout", + "JamiKettunen": "Jami Kettunen", + "dramforever": "dram", + "philipwilk": "Philip Wilk", + "netadr": "Clayton", + "NyCodeGHG": "Marie", + "jfroche": "Jean-Fran\u00e7ois Roche", + "urbas": "Matej Urbas", + "osbm": "osman - \u30aa\u30b9\u30de\u30f3", + "rszyma": null, + "eclairevoyant": "\u00e9clairevoyant", + "Radvendii": "Taeer Bar-Yam", + "sinanmohd": "Sinan Mohd", + "ethanavatar": "Ethan Evans", + "gmarti": "Gr\u00e9gory Marti", + "lovesegfault": "Bernardo Meurer", + "EphraimSiegfried": "Ephraim Siegfried", + "hgl": "Glen Huang" } \ No newline at end of file diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 8c84d0517..8dcff9c63 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -104,88 +104,6 @@ }; shellcheck = { enable = true; - excludes = [ - # We haven't linted these files yet - ''^tests/functional/flakes/prefetch\.sh$'' - ''^tests/functional/flakes/run\.sh$'' - ''^tests/functional/flakes/show\.sh$'' - ''^tests/functional/formatter\.sh$'' - ''^tests/functional/formatter\.simple\.sh$'' - ''^tests/functional/gc-auto\.sh$'' - ''^tests/functional/gc-concurrent\.builder\.sh$'' - ''^tests/functional/gc-concurrent\.sh$'' - ''^tests/functional/gc-concurrent2\.builder\.sh$'' - ''^tests/functional/gc-non-blocking\.sh$'' - ''^tests/functional/hash-convert\.sh$'' - ''^tests/functional/impure-derivations\.sh$'' - ''^tests/functional/impure-eval\.sh$'' - ''^tests/functional/install-darwin\.sh$'' - ''^tests/functional/legacy-ssh-store\.sh$'' - ''^tests/functional/linux-sandbox\.sh$'' - ''^tests/functional/logging\.sh$'' - ''^tests/functional/misc\.sh$'' - ''^tests/functional/multiple-outputs\.sh$'' - ''^tests/functional/nested-sandboxing\.sh$'' - ''^tests/functional/nested-sandboxing/command\.sh$'' - ''^tests/functional/nix-build\.sh$'' - ''^tests/functional/nix-channel\.sh$'' - ''^tests/functional/nix-collect-garbage-d\.sh$'' - ''^tests/functional/nix-copy-ssh-common\.sh$'' - ''^tests/functional/nix-copy-ssh-ng\.sh$'' - ''^tests/functional/nix-copy-ssh\.sh$'' - ''^tests/functional/nix-daemon-untrusting\.sh$'' - ''^tests/functional/nix-profile\.sh$'' - ''^tests/functional/nix-shell\.sh$'' - ''^tests/functional/nix_path\.sh$'' - ''^tests/functional/optimise-store\.sh$'' - ''^tests/functional/output-normalization\.sh$'' - ''^tests/functional/parallel\.builder\.sh$'' - ''^tests/functional/parallel\.sh$'' - ''^tests/functional/pass-as-file\.sh$'' - ''^tests/functional/path-from-hash-part\.sh$'' - ''^tests/functional/path-info\.sh$'' - ''^tests/functional/placeholders\.sh$'' - ''^tests/functional/post-hook\.sh$'' - ''^tests/functional/pure-eval\.sh$'' - ''^tests/functional/push-to-store-old\.sh$'' - ''^tests/functional/push-to-store\.sh$'' - ''^tests/functional/read-only-store\.sh$'' - ''^tests/functional/readfile-context\.sh$'' - ''^tests/functional/recursive\.sh$'' - ''^tests/functional/referrers\.sh$'' - ''^tests/functional/remote-store\.sh$'' - ''^tests/functional/repair\.sh$'' - ''^tests/functional/restricted\.sh$'' - ''^tests/functional/search\.sh$'' - ''^tests/functional/secure-drv-outputs\.sh$'' - ''^tests/functional/selfref-gc\.sh$'' - ''^tests/functional/shell\.shebang\.sh$'' - ''^tests/functional/simple\.builder\.sh$'' - ''^tests/functional/supplementary-groups\.sh$'' - ''^tests/functional/toString-path\.sh$'' - ''^tests/functional/user-envs-migration\.sh$'' - ''^tests/functional/user-envs-test-case\.sh$'' - ''^tests/functional/user-envs\.builder\.sh$'' - ''^tests/functional/user-envs\.sh$'' - ''^tests/functional/why-depends\.sh$'' - - # Content-addressed test files that use recursive-*looking* sourcing - # (cd .. && source ), causing shellcheck to loop - # They're small wrapper scripts with not a lot going on - ''^tests/functional/ca/build-delete\.sh$'' - ''^tests/functional/ca/build-dry\.sh$'' - ''^tests/functional/ca/eval-store\.sh$'' - ''^tests/functional/ca/gc\.sh$'' - ''^tests/functional/ca/import-from-derivation\.sh$'' - ''^tests/functional/ca/multiple-outputs\.sh$'' - ''^tests/functional/ca/new-build-cmd\.sh$'' - ''^tests/functional/ca/nix-shell\.sh$'' - ''^tests/functional/ca/post-hook\.sh$'' - ''^tests/functional/ca/recursive\.sh$'' - ''^tests/functional/ca/repl\.sh$'' - ''^tests/functional/ca/selfref-gc\.sh$'' - ''^tests/functional/ca/why-depends\.sh$'' - ]; }; }; }; diff --git a/maintainers/release-process.md b/maintainers/release-process.md index 790618b7f..68de3b677 100644 --- a/maintainers/release-process.md +++ b/maintainers/release-process.md @@ -142,7 +142,6 @@ release: $ git pull $ NEW_VERSION=2.13.0 $ echo $NEW_VERSION > .version - $ ... edit .mergify.yml to add the previous version ... $ git checkout -b bump-$NEW_VERSION $ git commit -a -m 'Bump version' $ git push --set-upstream origin bump-$NEW_VERSION diff --git a/meson.build b/meson.build index 736756157..f3158ea6d 100644 --- a/meson.build +++ b/meson.build @@ -60,3 +60,4 @@ if get_option('unit-tests') subproject('libflake-tests') endif subproject('nix-functional-tests') +subproject('json-schema-checks') diff --git a/misc/zsh/completion.zsh b/misc/zsh/completion.zsh index eb26a16cb..6146455fe 100644 --- a/misc/zsh/completion.zsh +++ b/misc/zsh/completion.zsh @@ -1,5 +1,5 @@ -# shellcheck disable=all #compdef nix +# shellcheck disable=all function _nix() { local ifs_bk="$IFS" diff --git a/nix-meson-build-support/common/asan-options/asan-options.cc b/nix-meson-build-support/common/asan-options/asan-options.cc new file mode 100644 index 000000000..c9782fea0 --- /dev/null +++ b/nix-meson-build-support/common/asan-options/asan-options.cc @@ -0,0 +1,6 @@ +extern "C" [[gnu::retain, gnu::weak]] const char * __asan_default_options() +{ + // We leak a bunch of memory knowingly on purpose. It's not worthwhile to + // diagnose that memory being leaked for now. + return "abort_on_error=1:print_summary=1:detect_leaks=0:detect_odr_violation=0"; +} diff --git a/nix-meson-build-support/asan-options/meson.build b/nix-meson-build-support/common/asan-options/meson.build similarity index 74% rename from nix-meson-build-support/asan-options/meson.build rename to nix-meson-build-support/common/asan-options/meson.build index 17880b0ed..80527b5a9 100644 --- a/nix-meson-build-support/asan-options/meson.build +++ b/nix-meson-build-support/common/asan-options/meson.build @@ -1,7 +1,3 @@ -asan_test_options_env = { - 'ASAN_OPTIONS' : 'abort_on_error=1:print_summary=1:detect_leaks=0', -} - # Clang gets grumpy about missing libasan symbols if -shared-libasan is not # passed when building shared libs, at least on Linux if cxx.get_id() == 'clang' and ('address' in get_option('b_sanitize') or 'undefined' in get_option( @@ -10,3 +6,6 @@ if cxx.get_id() == 'clang' and ('address' in get_option('b_sanitize') or 'undefi add_project_link_arguments('-shared-libasan', language : 'cpp') endif +if 'address' in get_option('b_sanitize') + deps_other += declare_dependency(sources : 'asan-options.cc') +endif diff --git a/nix-meson-build-support/common/assert-fail/meson.build b/nix-meson-build-support/common/assert-fail/meson.build new file mode 100644 index 000000000..7539b3921 --- /dev/null +++ b/nix-meson-build-support/common/assert-fail/meson.build @@ -0,0 +1,32 @@ +can_wrap_assert_fail_test_code = ''' +#include +#include + +int main() +{ + assert(0); +} + +extern "C" void * __real___assert_fail(const char *, const char *, unsigned int, const char *); + +extern "C" void * +__wrap___assert_fail(const char *, const char *, unsigned int, const char *) +{ + return __real___assert_fail(nullptr, nullptr, 0, nullptr); +} +''' + +wrap_assert_fail_args = [ '-Wl,--wrap=__assert_fail' ] + +can_wrap_assert_fail = cxx.links( + can_wrap_assert_fail_test_code, + args : wrap_assert_fail_args, + name : 'linker can wrap __assert_fail', +) + +if can_wrap_assert_fail + deps_other += declare_dependency( + sources : 'wrap-assert-fail.cc', + link_args : wrap_assert_fail_args, + ) +endif diff --git a/nix-meson-build-support/common/assert-fail/wrap-assert-fail.cc b/nix-meson-build-support/common/assert-fail/wrap-assert-fail.cc new file mode 100644 index 000000000..d9e34168b --- /dev/null +++ b/nix-meson-build-support/common/assert-fail/wrap-assert-fail.cc @@ -0,0 +1,17 @@ +#include "nix/util/error.hh" + +#include +#include +#include +#include + +extern "C" [[noreturn]] void __attribute__((weak)) +__wrap___assert_fail(const char * assertion, const char * file, unsigned int line, const char * function) +{ + char buf[512]; + int n = + snprintf(buf, sizeof(buf), "Assertion '%s' failed in %s at %s:%" PRIuLEAST32, assertion, function, file, line); + if (n < 0) + nix::panic("Assertion failed and could not format error message"); + nix::panic(std::string_view(buf, std::min(static_cast(sizeof(buf)), n))); +} diff --git a/nix-meson-build-support/common/meson.build b/nix-meson-build-support/common/meson.build index 8c4e98862..1405974d2 100644 --- a/nix-meson-build-support/common/meson.build +++ b/nix-meson-build-support/common/meson.build @@ -42,5 +42,8 @@ if cxx.get_id() == 'clang' add_project_arguments('-fpch-instantiate-templates', language : 'cpp') endif -# Darwin ld doesn't like "X.Y.Zpre" -nix_soversion = meson.project_version().split('pre')[0] +# Darwin ld doesn't like "X.Y.ZpreABCD+W" +nix_soversion = meson.project_version().split('+')[0].split('pre')[0] + +subdir('assert-fail') +subdir('asan-options') diff --git a/packaging/components.nix b/packaging/components.nix index 2be4fa61d..f9d7b109a 100644 --- a/packaging/components.nix +++ b/packaging/components.nix @@ -204,6 +204,25 @@ let mesonFlags = [ (lib.mesonBool "b_asneeded" false) ] ++ prevAttrs.mesonFlags or [ ]; }; + enableSanitizersLayer = + finalAttrs: prevAttrs: + let + sanitizers = lib.optional scope.withASan "address" ++ lib.optional scope.withUBSan "undefined"; + in + { + mesonFlags = + (prevAttrs.mesonFlags or [ ]) + ++ lib.optionals (lib.length sanitizers > 0) ( + [ + (lib.mesonOption "b_sanitize" (lib.concatStringsSep "," sanitizers)) + ] + ++ (lib.optionals stdenv.cc.isClang [ + # https://www.github.com/mesonbuild/meson/issues/764 + (lib.mesonBool "b_lundef" false) + ]) + ); + }; + nixDefaultsLayer = finalAttrs: prevAttrs: { strictDeps = prevAttrs.strictDeps or true; enableParallelBuilding = true; @@ -246,6 +265,16 @@ in inherit filesetToSource; + /** + Whether meson components are built with [AddressSanitizer](https://clang.llvm.org/docs/AddressSanitizer.html). + */ + withASan = false; + + /** + Whether meson components are built with [UndefinedBehaviorSanitizer](https://clang.llvm.org/docs/UndefinedBehaviorSanitizer.html). + */ + withUBSan = false; + /** A user-provided extension function to apply to each component derivation. */ @@ -332,6 +361,7 @@ in setVersionLayer mesonLayer fixupStaticLayer + enableSanitizersLayer scope.mesonComponentOverrides ]; mkMesonExecutable = mkPackageBuilder [ @@ -342,6 +372,7 @@ in mesonLayer mesonBuildLayer fixupStaticLayer + enableSanitizersLayer scope.mesonComponentOverrides ]; mkMesonLibrary = mkPackageBuilder [ @@ -353,6 +384,7 @@ in mesonBuildLayer mesonLibraryLayer fixupStaticLayer + enableSanitizersLayer scope.mesonComponentOverrides ]; @@ -406,6 +438,11 @@ in */ nix-external-api-docs = callPackage ../src/external-api-docs/package.nix { version = fineVersion; }; + /** + JSON schema validation checks + */ + nix-json-schema-checks = callPackage ../src/json-schema-checks/package.nix { }; + nix-perl-bindings = callPackage ../src/perl/package.nix { }; /** @@ -458,7 +495,7 @@ in Example: ``` - overrideScope (finalScope: prevScope: { aws-sdk-cpp = null; }) + overrideScope (finalScope: prevScope: { aws-crt-cpp = null; }) ``` */ overrideScope = f: (scope.overrideScope f).nix-everything; diff --git a/packaging/dependencies.nix b/packaging/dependencies.nix index 981c1aa48..6b2dafcfa 100644 --- a/packaging/dependencies.nix +++ b/packaging/dependencies.nix @@ -16,21 +16,6 @@ in scope: { inherit stdenv; - aws-sdk-cpp = - (pkgs.aws-sdk-cpp.override { - apis = [ - "identity-management" - "s3" - "transfer" - ]; - customMemoryManagement = false; - }).overrideAttrs - { - # only a stripped down version is built, which takes a lot less resources - # to build, so we don't need a "big-parallel" machine. - requiredSystemFeatures = [ ]; - }; - boehmgc = (pkgs.boehmgc.override { enableLargeConfig = true; @@ -57,15 +42,20 @@ scope: { prevAttrs.postInstall; }); - toml11 = pkgs.toml11.overrideAttrs rec { - version = "4.4.0"; - src = pkgs.fetchFromGitHub { - owner = "ToruNiina"; - repo = "toml11"; - tag = "v${version}"; - hash = "sha256-sgWKYxNT22nw376ttGsTdg0AMzOwp8QH3E8mx0BZJTQ="; - }; - }; + # TODO: Remove this when https://github.com/NixOS/nixpkgs/pull/442682 is included in a stable release + toml11 = + if lib.versionAtLeast pkgs.toml11.version "4.4.0" then + pkgs.toml11 + else + pkgs.toml11.overrideAttrs rec { + version = "4.4.0"; + src = pkgs.fetchFromGitHub { + owner = "ToruNiina"; + repo = "toml11"; + tag = "v${version}"; + hash = "sha256-sgWKYxNT22nw376ttGsTdg0AMzOwp8QH3E8mx0BZJTQ="; + }; + }; # TODO Hack until https://github.com/NixOS/nixpkgs/issues/45462 is fixed. boost = @@ -84,38 +74,4 @@ scope: { buildPhase = lib.replaceStrings [ "--without-python" ] [ "" ] old.buildPhase; installPhase = lib.replaceStrings [ "--without-python" ] [ "" ] old.installPhase; }); - - libgit2 = - if lib.versionAtLeast pkgs.libgit2.version "1.9.0" then - pkgs.libgit2 - else - pkgs.libgit2.overrideAttrs (attrs: { - # libgit2: Nixpkgs 24.11 has < 1.9.0, which needs our patches - nativeBuildInputs = - attrs.nativeBuildInputs or [ ] - # gitMinimal does not build on Windows. See packbuilder patch. - ++ lib.optionals (!stdenv.hostPlatform.isWindows) [ - # Needed for `git apply`; see `prePatch` - pkgs.buildPackages.gitMinimal - ]; - # Only `git apply` can handle git binary patches - prePatch = - attrs.prePatch or "" - + lib.optionalString (!stdenv.hostPlatform.isWindows) '' - patch() { - git apply - } - ''; - patches = - attrs.patches or [ ] - ++ [ - ./patches/libgit2-mempack-thin-packfile.patch - ] - # gitMinimal does not build on Windows, but fortunately this patch only - # impacts interruptibility - ++ lib.optionals (!stdenv.hostPlatform.isWindows) [ - # binary patch; see `prePatch` - ./patches/libgit2-packbuilder-callback-interruptible.patch - ]; - }); } diff --git a/packaging/dev-shell.nix b/packaging/dev-shell.nix index ccfb9c4ae..153e7a3eb 100644 --- a/packaging/dev-shell.nix +++ b/packaging/dev-shell.nix @@ -70,6 +70,9 @@ pkgs.nixComponents2.nix-util.overrideAttrs ( # We use this shell with the local checkout, not unpackPhase. src = null; + # Workaround https://sourceware.org/pipermail/gdb-patches/2025-October/221398.html + # Remove when gdb fix is rolled out everywhere. + separateDebugInfo = false; env = { # For `make format`, to work without installing pre-commit @@ -93,38 +96,45 @@ pkgs.nixComponents2.nix-util.overrideAttrs ( ++ map (transformFlag "libcmd") (ignoreCrossFile pkgs.nixComponents2.nix-cmd.mesonFlags); nativeBuildInputs = - attrs.nativeBuildInputs or [ ] - ++ pkgs.nixComponents2.nix-util.nativeBuildInputs - ++ pkgs.nixComponents2.nix-store.nativeBuildInputs - ++ pkgs.nixComponents2.nix-fetchers.nativeBuildInputs - ++ pkgs.nixComponents2.nix-expr.nativeBuildInputs - ++ lib.optionals havePerl pkgs.nixComponents2.nix-perl-bindings.nativeBuildInputs - ++ lib.optionals buildCanExecuteHost pkgs.nixComponents2.nix-manual.externalNativeBuildInputs - ++ pkgs.nixComponents2.nix-internal-api-docs.nativeBuildInputs - ++ pkgs.nixComponents2.nix-external-api-docs.nativeBuildInputs - ++ pkgs.nixComponents2.nix-functional-tests.externalNativeBuildInputs - ++ lib.optional ( - !buildCanExecuteHost - # Hack around https://github.com/nixos/nixpkgs/commit/bf7ad8cfbfa102a90463433e2c5027573b462479 - && !(stdenv.hostPlatform.isWindows && stdenv.buildPlatform.isDarwin) - && stdenv.hostPlatform.emulatorAvailable pkgs.buildPackages - && lib.meta.availableOn stdenv.buildPlatform (stdenv.hostPlatform.emulator pkgs.buildPackages) - ) pkgs.buildPackages.mesonEmulatorHook - ++ [ - pkgs.buildPackages.cmake - pkgs.buildPackages.gnused - pkgs.buildPackages.shellcheck - pkgs.buildPackages.changelog-d - modular.pre-commit.settings.package - (pkgs.writeScriptBin "pre-commit-hooks-install" modular.pre-commit.settings.installationScript) - pkgs.buildPackages.nixfmt-rfc-style - pkgs.buildPackages.shellcheck - pkgs.buildPackages.gdb - ] - ++ lib.optional (stdenv.cc.isClang && stdenv.hostPlatform == stdenv.buildPlatform) ( - lib.hiPrio pkgs.buildPackages.clang-tools - ) - ++ lib.optional stdenv.hostPlatform.isLinux pkgs.buildPackages.mold-wrapped; + let + inputs = + attrs.nativeBuildInputs or [ ] + ++ pkgs.nixComponents2.nix-util.nativeBuildInputs + ++ pkgs.nixComponents2.nix-store.nativeBuildInputs + ++ pkgs.nixComponents2.nix-fetchers.nativeBuildInputs + ++ pkgs.nixComponents2.nix-expr.nativeBuildInputs + ++ lib.optionals havePerl pkgs.nixComponents2.nix-perl-bindings.nativeBuildInputs + ++ lib.optionals buildCanExecuteHost pkgs.nixComponents2.nix-manual.externalNativeBuildInputs + ++ pkgs.nixComponents2.nix-internal-api-docs.nativeBuildInputs + ++ pkgs.nixComponents2.nix-external-api-docs.nativeBuildInputs + ++ pkgs.nixComponents2.nix-functional-tests.externalNativeBuildInputs + ++ pkgs.nixComponents2.nix-json-schema-checks.externalNativeBuildInputs + ++ lib.optional ( + !buildCanExecuteHost + # Hack around https://github.com/nixos/nixpkgs/commit/bf7ad8cfbfa102a90463433e2c5027573b462479 + && !(stdenv.hostPlatform.isWindows && stdenv.buildPlatform.isDarwin) + && stdenv.hostPlatform.emulatorAvailable pkgs.buildPackages + && lib.meta.availableOn stdenv.buildPlatform (stdenv.hostPlatform.emulator pkgs.buildPackages) + ) pkgs.buildPackages.mesonEmulatorHook + ++ [ + pkgs.buildPackages.cmake + pkgs.buildPackages.gnused + pkgs.buildPackages.changelog-d + modular.pre-commit.settings.package + (pkgs.writeScriptBin "pre-commit-hooks-install" modular.pre-commit.settings.installationScript) + pkgs.buildPackages.nixfmt-rfc-style + pkgs.buildPackages.shellcheck + pkgs.buildPackages.include-what-you-use + pkgs.buildPackages.gdb + ] + ++ lib.optional (stdenv.cc.isClang && stdenv.hostPlatform == stdenv.buildPlatform) ( + lib.hiPrio pkgs.buildPackages.clang-tools + ) + ++ lib.optional stdenv.hostPlatform.isLinux pkgs.buildPackages.mold-wrapped; + in + # FIXME: separateDebugInfo = false doesn't actually prevent -Wa,--compress-debug-sections + # from making its way into NIX_CFLAGS_COMPILE. + lib.filter (p: !lib.hasInfix "separate-debug-info" p) inputs; buildInputs = [ pkgs.gbenchmark diff --git a/packaging/hydra.nix b/packaging/hydra.nix index 9f9749bde..3bbb6c15b 100644 --- a/packaging/hydra.nix +++ b/packaging/hydra.nix @@ -62,6 +62,7 @@ let "nix-cmd" "nix-cli" "nix-functional-tests" + "nix-json-schema-checks" ] ++ lib.optionals enableBindings [ "nix-perl-bindings" @@ -73,7 +74,7 @@ let ] ); in -{ +rec { /** An internal check to make sure our package listing is complete. */ @@ -145,13 +146,25 @@ in ) ); - buildNoGc = + # Builds with sanitizers already have GC disabled, so this buildNoGc can just + # point to buildWithSanitizers in order to reduce the load on hydra. + buildNoGc = buildWithSanitizers; + + buildWithSanitizers = let components = forAllSystems ( system: - nixpkgsFor.${system}.native.nixComponents2.overrideScope ( + let + pkgs = nixpkgsFor.${system}.native; + in + pkgs.nixComponents2.overrideScope ( self: super: { + # Boost coroutines fail with ASAN on darwin. + withASan = !pkgs.stdenv.buildPlatform.isDarwin; + withUBSan = true; nix-expr = super.nix-expr.override { enableGC = false; }; + # Unclear how to make Perl bindings work with a dynamically linked ASAN. + nix-perl-bindings = null; } ) ); diff --git a/packaging/patches/libgit2-mempack-thin-packfile.patch b/packaging/patches/libgit2-mempack-thin-packfile.patch deleted file mode 100644 index fb74b1683..000000000 --- a/packaging/patches/libgit2-mempack-thin-packfile.patch +++ /dev/null @@ -1,282 +0,0 @@ -commit 9bacade4a3ef4b6b26e2c02f549eef0e9eb9eaa2 -Author: Robert Hensing -Date: Sun Aug 18 20:20:36 2024 +0200 - - Add unoptimized git_mempack_write_thin_pack - -diff --git a/include/git2/sys/mempack.h b/include/git2/sys/mempack.h -index 17da590a3..3688bdd50 100644 ---- a/include/git2/sys/mempack.h -+++ b/include/git2/sys/mempack.h -@@ -44,6 +44,29 @@ GIT_BEGIN_DECL - */ - GIT_EXTERN(int) git_mempack_new(git_odb_backend **out); - -+/** -+ * Write a thin packfile with the objects in the memory store. -+ * -+ * A thin packfile is a packfile that does not contain its transitive closure of -+ * references. This is useful for efficiently distributing additions to a -+ * repository over the network, but also finds use in the efficient bulk -+ * addition of objects to a repository, locally. -+ * -+ * This operation performs the (shallow) insert operations into the -+ * `git_packbuilder`, but does not write the packfile to disk; -+ * see `git_packbuilder_write_buf`. -+ * -+ * It also does not reset the memory store; see `git_mempack_reset`. -+ * -+ * @note This function may or may not write trees and blobs that are not -+ * referenced by commits. Currently everything is written, but this -+ * behavior may change in the future as the packer is optimized. -+ * -+ * @param backend The mempack backend -+ * @param pb The packbuilder to use to write the packfile -+ */ -+GIT_EXTERN(int) git_mempack_write_thin_pack(git_odb_backend *backend, git_packbuilder *pb); -+ - /** - * Dump all the queued in-memory writes to a packfile. - * -diff --git a/src/libgit2/odb_mempack.c b/src/libgit2/odb_mempack.c -index 6f27f45f8..0b61e2b66 100644 ---- a/src/libgit2/odb_mempack.c -+++ b/src/libgit2/odb_mempack.c -@@ -132,6 +132,35 @@ cleanup: - return err; - } - -+int git_mempack_write_thin_pack(git_odb_backend *backend, git_packbuilder *pb) -+{ -+ struct memory_packer_db *db = (struct memory_packer_db *)backend; -+ const git_oid *oid; -+ size_t iter = 0; -+ int err = -1; -+ -+ /* TODO: Implement the recency heuristics. -+ For this it probably makes sense to only write what's referenced -+ through commits, an option I've carved out for you in the docs. -+ wrt heuristics: ask your favorite LLM to translate https://git-scm.com/docs/pack-heuristics/en -+ to actual normal reference documentation. */ -+ while (true) { -+ err = git_oidmap_iterate(NULL, db->objects, &iter, &oid); -+ if (err == GIT_ITEROVER) { -+ err = 0; -+ break; -+ } -+ if (err != 0) -+ return err; -+ -+ err = git_packbuilder_insert(pb, oid, NULL); -+ if (err != 0) -+ return err; -+ } -+ -+ return 0; -+} -+ - int git_mempack_dump( - git_buf *pack, - git_repository *repo, -diff --git a/tests/libgit2/mempack/thinpack.c b/tests/libgit2/mempack/thinpack.c -new file mode 100644 -index 000000000..604a4dda2 ---- /dev/null -+++ b/tests/libgit2/mempack/thinpack.c -@@ -0,0 +1,196 @@ -+#include "clar_libgit2.h" -+#include "git2/indexer.h" -+#include "git2/odb_backend.h" -+#include "git2/tree.h" -+#include "git2/types.h" -+#include "git2/sys/mempack.h" -+#include "git2/sys/odb_backend.h" -+#include "util.h" -+ -+static git_repository *_repo; -+static git_odb_backend * _mempack_backend; -+ -+void test_mempack_thinpack__initialize(void) -+{ -+ git_odb *odb; -+ -+ _repo = cl_git_sandbox_init_new("mempack_thinpack_repo"); -+ -+ cl_git_pass(git_mempack_new(&_mempack_backend)); -+ cl_git_pass(git_repository_odb(&odb, _repo)); -+ cl_git_pass(git_odb_add_backend(odb, _mempack_backend, 999)); -+ git_odb_free(odb); -+} -+ -+void _mempack_thinpack__cleanup(void) -+{ -+ cl_git_sandbox_cleanup(); -+} -+ -+/* -+ Generating a packfile for an unchanged repo works and produces an empty packfile. -+ Even if we allow this scenario to be detected, it shouldn't misbehave if the -+ application is unaware of it. -+*/ -+void test_mempack_thinpack__empty(void) -+{ -+ git_packbuilder *pb; -+ int version; -+ int n; -+ git_buf buf = GIT_BUF_INIT; -+ -+ git_packbuilder_new(&pb, _repo); -+ -+ cl_git_pass(git_mempack_write_thin_pack(_mempack_backend, pb)); -+ cl_git_pass(git_packbuilder_write_buf(&buf, pb)); -+ cl_assert_in_range(12, buf.size, 1024 /* empty packfile is >0 bytes, but certainly not that big */); -+ cl_assert(buf.ptr[0] == 'P'); -+ cl_assert(buf.ptr[1] == 'A'); -+ cl_assert(buf.ptr[2] == 'C'); -+ cl_assert(buf.ptr[3] == 'K'); -+ version = (buf.ptr[4] << 24) | (buf.ptr[5] << 16) | (buf.ptr[6] << 8) | buf.ptr[7]; -+ /* Subject to change. https://git-scm.com/docs/pack-format: Git currently accepts version number 2 or 3 but generates version 2 only.*/ -+ cl_assert_equal_i(2, version); -+ n = (buf.ptr[8] << 24) | (buf.ptr[9] << 16) | (buf.ptr[10] << 8) | buf.ptr[11]; -+ cl_assert_equal_i(0, n); -+ git_buf_dispose(&buf); -+ -+ git_packbuilder_free(pb); -+} -+ -+#define LIT_LEN(x) x, sizeof(x) - 1 -+ -+/* -+ Check that git_mempack_write_thin_pack produces a thin packfile. -+*/ -+void test_mempack_thinpack__thin(void) -+{ -+ /* Outline: -+ - Create tree 1 -+ - Flush to packfile A -+ - Create tree 2 -+ - Flush to packfile B -+ -+ Tree 2 has a new blob and a reference to a blob from tree 1. -+ -+ Expectation: -+ - Packfile B is thin and does not contain the objects from packfile A -+ */ -+ -+ -+ git_oid oid_blob_1; -+ git_oid oid_blob_2; -+ git_oid oid_blob_3; -+ git_oid oid_tree_1; -+ git_oid oid_tree_2; -+ git_treebuilder *tb; -+ -+ git_packbuilder *pb; -+ git_buf buf = GIT_BUF_INIT; -+ git_indexer *indexer; -+ git_indexer_progress stats; -+ char pack_dir_path[1024]; -+ -+ char sbuf[1024]; -+ const char * repo_path; -+ const char * pack_name_1; -+ const char * pack_name_2; -+ git_str pack_path_1 = GIT_STR_INIT; -+ git_str pack_path_2 = GIT_STR_INIT; -+ git_odb_backend * pack_odb_backend_1; -+ git_odb_backend * pack_odb_backend_2; -+ -+ -+ cl_assert_in_range(0, snprintf(pack_dir_path, sizeof(pack_dir_path), "%s/objects/pack", git_repository_path(_repo)), sizeof(pack_dir_path)); -+ -+ /* Create tree 1 */ -+ -+ cl_git_pass(git_blob_create_from_buffer(&oid_blob_1, _repo, LIT_LEN("thinpack blob 1"))); -+ cl_git_pass(git_blob_create_from_buffer(&oid_blob_2, _repo, LIT_LEN("thinpack blob 2"))); -+ -+ -+ cl_git_pass(git_treebuilder_new(&tb, _repo, NULL)); -+ cl_git_pass(git_treebuilder_insert(NULL, tb, "blob1", &oid_blob_1, GIT_FILEMODE_BLOB)); -+ cl_git_pass(git_treebuilder_insert(NULL, tb, "blob2", &oid_blob_2, GIT_FILEMODE_BLOB)); -+ cl_git_pass(git_treebuilder_write(&oid_tree_1, tb)); -+ -+ /* Flush */ -+ -+ cl_git_pass(git_packbuilder_new(&pb, _repo)); -+ cl_git_pass(git_mempack_write_thin_pack(_mempack_backend, pb)); -+ cl_git_pass(git_packbuilder_write_buf(&buf, pb)); -+ cl_git_pass(git_indexer_new(&indexer, pack_dir_path, 0, NULL, NULL)); -+ cl_git_pass(git_indexer_append(indexer, buf.ptr, buf.size, &stats)); -+ cl_git_pass(git_indexer_commit(indexer, &stats)); -+ pack_name_1 = strdup(git_indexer_name(indexer)); -+ cl_assert(pack_name_1); -+ git_buf_dispose(&buf); -+ git_mempack_reset(_mempack_backend); -+ git_indexer_free(indexer); -+ git_packbuilder_free(pb); -+ -+ /* Create tree 2 */ -+ -+ cl_git_pass(git_treebuilder_clear(tb)); -+ /* blob 1 won't be used, but we add it anyway to test that just "declaring" an object doesn't -+ necessarily cause its inclusion in the next thin packfile. It must only be included if new. */ -+ cl_git_pass(git_blob_create_from_buffer(&oid_blob_1, _repo, LIT_LEN("thinpack blob 1"))); -+ cl_git_pass(git_blob_create_from_buffer(&oid_blob_3, _repo, LIT_LEN("thinpack blob 3"))); -+ cl_git_pass(git_treebuilder_insert(NULL, tb, "blob1", &oid_blob_1, GIT_FILEMODE_BLOB)); -+ cl_git_pass(git_treebuilder_insert(NULL, tb, "blob3", &oid_blob_3, GIT_FILEMODE_BLOB)); -+ cl_git_pass(git_treebuilder_write(&oid_tree_2, tb)); -+ -+ /* Flush */ -+ -+ cl_git_pass(git_packbuilder_new(&pb, _repo)); -+ cl_git_pass(git_mempack_write_thin_pack(_mempack_backend, pb)); -+ cl_git_pass(git_packbuilder_write_buf(&buf, pb)); -+ cl_git_pass(git_indexer_new(&indexer, pack_dir_path, 0, NULL, NULL)); -+ cl_git_pass(git_indexer_append(indexer, buf.ptr, buf.size, &stats)); -+ cl_git_pass(git_indexer_commit(indexer, &stats)); -+ pack_name_2 = strdup(git_indexer_name(indexer)); -+ cl_assert(pack_name_2); -+ git_buf_dispose(&buf); -+ git_mempack_reset(_mempack_backend); -+ git_indexer_free(indexer); -+ git_packbuilder_free(pb); -+ git_treebuilder_free(tb); -+ -+ /* Assertions */ -+ -+ assert(pack_name_1); -+ assert(pack_name_2); -+ -+ repo_path = git_repository_path(_repo); -+ -+ snprintf(sbuf, sizeof(sbuf), "objects/pack/pack-%s.pack", pack_name_1); -+ git_str_joinpath(&pack_path_1, repo_path, sbuf); -+ snprintf(sbuf, sizeof(sbuf), "objects/pack/pack-%s.pack", pack_name_2); -+ git_str_joinpath(&pack_path_2, repo_path, sbuf); -+ -+ /* If they're the same, something definitely went wrong. */ -+ cl_assert(strcmp(pack_name_1, pack_name_2) != 0); -+ -+ cl_git_pass(git_odb_backend_one_pack(&pack_odb_backend_1, pack_path_1.ptr)); -+ cl_assert(pack_odb_backend_1->exists(pack_odb_backend_1, &oid_blob_1)); -+ cl_assert(pack_odb_backend_1->exists(pack_odb_backend_1, &oid_blob_2)); -+ cl_assert(!pack_odb_backend_1->exists(pack_odb_backend_1, &oid_blob_3)); -+ cl_assert(pack_odb_backend_1->exists(pack_odb_backend_1, &oid_tree_1)); -+ cl_assert(!pack_odb_backend_1->exists(pack_odb_backend_1, &oid_tree_2)); -+ -+ cl_git_pass(git_odb_backend_one_pack(&pack_odb_backend_2, pack_path_2.ptr)); -+ /* blob 1 is already in the packfile 1, so packfile 2 must not include it, in order to be _thin_. */ -+ cl_assert(!pack_odb_backend_2->exists(pack_odb_backend_2, &oid_blob_1)); -+ cl_assert(!pack_odb_backend_2->exists(pack_odb_backend_2, &oid_blob_2)); -+ cl_assert(pack_odb_backend_2->exists(pack_odb_backend_2, &oid_blob_3)); -+ cl_assert(!pack_odb_backend_2->exists(pack_odb_backend_2, &oid_tree_1)); -+ cl_assert(pack_odb_backend_2->exists(pack_odb_backend_2, &oid_tree_2)); -+ -+ pack_odb_backend_1->free(pack_odb_backend_1); -+ pack_odb_backend_2->free(pack_odb_backend_2); -+ free((void *)pack_name_1); -+ free((void *)pack_name_2); -+ git_str_dispose(&pack_path_1); -+ git_str_dispose(&pack_path_2); -+ -+} diff --git a/packaging/patches/libgit2-packbuilder-callback-interruptible.patch b/packaging/patches/libgit2-packbuilder-callback-interruptible.patch deleted file mode 100644 index c67822ff7..000000000 --- a/packaging/patches/libgit2-packbuilder-callback-interruptible.patch +++ /dev/null @@ -1,930 +0,0 @@ -commit e9823c5da4fa977c46bcb97167fbdd0d70adb5ff -Author: Robert Hensing -Date: Mon Aug 26 20:07:04 2024 +0200 - - Make packbuilder interruptible using progress callback - - Forward errors from packbuilder->progress_cb - - This allows the callback to terminate long-running operations when - the application is interrupted. - -diff --git a/include/git2/pack.h b/include/git2/pack.h -index 0f6bd2ab9..bee72a6c0 100644 ---- a/include/git2/pack.h -+++ b/include/git2/pack.h -@@ -247,6 +247,9 @@ typedef int GIT_CALLBACK(git_packbuilder_progress)( - * @param progress_cb Function to call with progress information during - * pack building. Be aware that this is called inline with pack building - * operations, so performance may be affected. -+ * When progress_cb returns an error, the pack building process will be -+ * aborted and the error will be returned from the invoked function. -+ * `pb` must then be freed. - * @param progress_cb_payload Payload for progress callback. - * @return 0 or an error code - */ -diff --git a/src/libgit2/pack-objects.c b/src/libgit2/pack-objects.c -index b2d80cba9..7c331c2d5 100644 ---- a/src/libgit2/pack-objects.c -+++ b/src/libgit2/pack-objects.c -@@ -932,6 +932,9 @@ static int report_delta_progress( - { - int ret; - -+ if (pb->failure) -+ return pb->failure; -+ - if (pb->progress_cb) { - uint64_t current_time = git_time_monotonic(); - uint64_t elapsed = current_time - pb->last_progress_report_time; -@@ -943,8 +946,10 @@ static int report_delta_progress( - GIT_PACKBUILDER_DELTAFICATION, - count, pb->nr_objects, pb->progress_cb_payload); - -- if (ret) -+ if (ret) { -+ pb->failure = ret; - return git_error_set_after_callback(ret); -+ } - } - } - -@@ -976,7 +981,10 @@ static int find_deltas(git_packbuilder *pb, git_pobject **list, - } - - pb->nr_deltified += 1; -- report_delta_progress(pb, pb->nr_deltified, false); -+ if ((error = report_delta_progress(pb, pb->nr_deltified, false)) < 0) { -+ GIT_ASSERT(git_packbuilder__progress_unlock(pb) == 0); -+ goto on_error; -+ } - - po = *list++; - (*list_size)--; -@@ -1124,6 +1132,10 @@ struct thread_params { - size_t depth; - size_t working; - size_t data_ready; -+ -+ /* A pb->progress_cb can stop the packing process by returning an error. -+ When that happens, all threads observe the error and stop voluntarily. */ -+ bool stopped; - }; - - static void *threaded_find_deltas(void *arg) -@@ -1133,7 +1145,12 @@ static void *threaded_find_deltas(void *arg) - while (me->remaining) { - if (find_deltas(me->pb, me->list, &me->remaining, - me->window, me->depth) < 0) { -- ; /* TODO */ -+ me->stopped = true; -+ GIT_ASSERT_WITH_RETVAL(git_packbuilder__progress_lock(me->pb) == 0, NULL); -+ me->working = false; -+ git_cond_signal(&me->pb->progress_cond); -+ GIT_ASSERT_WITH_RETVAL(git_packbuilder__progress_unlock(me->pb) == 0, NULL); -+ return NULL; - } - - GIT_ASSERT_WITH_RETVAL(git_packbuilder__progress_lock(me->pb) == 0, NULL); -@@ -1175,8 +1192,7 @@ static int ll_find_deltas(git_packbuilder *pb, git_pobject **list, - pb->nr_threads = git__online_cpus(); - - if (pb->nr_threads <= 1) { -- find_deltas(pb, list, &list_size, window, depth); -- return 0; -+ return find_deltas(pb, list, &list_size, window, depth); - } - - p = git__mallocarray(pb->nr_threads, sizeof(*p)); -@@ -1195,6 +1211,7 @@ static int ll_find_deltas(git_packbuilder *pb, git_pobject **list, - p[i].depth = depth; - p[i].working = 1; - p[i].data_ready = 0; -+ p[i].stopped = 0; - - /* try to split chunks on "path" boundaries */ - while (sub_size && sub_size < list_size && -@@ -1262,7 +1279,7 @@ static int ll_find_deltas(git_packbuilder *pb, git_pobject **list, - (!victim || victim->remaining < p[i].remaining)) - victim = &p[i]; - -- if (victim) { -+ if (victim && !target->stopped) { - sub_size = victim->remaining / 2; - list = victim->list + victim->list_size - sub_size; - while (sub_size && list[0]->hash && -@@ -1286,7 +1303,7 @@ static int ll_find_deltas(git_packbuilder *pb, git_pobject **list, - } - target->list_size = sub_size; - target->remaining = sub_size; -- target->working = 1; -+ target->working = 1; /* even when target->stopped, so that we don't process this thread again */ - GIT_ASSERT(git_packbuilder__progress_unlock(pb) == 0); - - if (git_mutex_lock(&target->mutex)) { -@@ -1299,7 +1316,7 @@ static int ll_find_deltas(git_packbuilder *pb, git_pobject **list, - git_cond_signal(&target->cond); - git_mutex_unlock(&target->mutex); - -- if (!sub_size) { -+ if (target->stopped || !sub_size) { - git_thread_join(&target->thread, NULL); - git_cond_free(&target->cond); - git_mutex_free(&target->mutex); -@@ -1308,7 +1325,7 @@ static int ll_find_deltas(git_packbuilder *pb, git_pobject **list, - } - - git__free(p); -- return 0; -+ return pb->failure; - } - - #else -@@ -1319,6 +1336,7 @@ int git_packbuilder__prepare(git_packbuilder *pb) - { - git_pobject **delta_list; - size_t i, n = 0; -+ int error; - - if (pb->nr_objects == 0 || pb->done) - return 0; /* nothing to do */ -@@ -1327,8 +1345,10 @@ int git_packbuilder__prepare(git_packbuilder *pb) - * Although we do not report progress during deltafication, we - * at least report that we are in the deltafication stage - */ -- if (pb->progress_cb) -- pb->progress_cb(GIT_PACKBUILDER_DELTAFICATION, 0, pb->nr_objects, pb->progress_cb_payload); -+ if (pb->progress_cb) { -+ if ((error = pb->progress_cb(GIT_PACKBUILDER_DELTAFICATION, 0, pb->nr_objects, pb->progress_cb_payload)) < 0) -+ return git_error_set_after_callback(error); -+ } - - delta_list = git__mallocarray(pb->nr_objects, sizeof(*delta_list)); - GIT_ERROR_CHECK_ALLOC(delta_list); -@@ -1345,31 +1365,33 @@ int git_packbuilder__prepare(git_packbuilder *pb) - - if (n > 1) { - git__tsort((void **)delta_list, n, type_size_sort); -- if (ll_find_deltas(pb, delta_list, n, -+ if ((error = ll_find_deltas(pb, delta_list, n, - GIT_PACK_WINDOW + 1, -- GIT_PACK_DEPTH) < 0) { -+ GIT_PACK_DEPTH)) < 0) { - git__free(delta_list); -- return -1; -+ return error; - } - } - -- report_delta_progress(pb, pb->nr_objects, true); -+ error = report_delta_progress(pb, pb->nr_objects, true); - - pb->done = true; - git__free(delta_list); -- return 0; -+ return error; - } - --#define PREPARE_PACK if (git_packbuilder__prepare(pb) < 0) { return -1; } -+#define PREPARE_PACK error = git_packbuilder__prepare(pb); if (error < 0) { return error; } - - int git_packbuilder_foreach(git_packbuilder *pb, int (*cb)(void *buf, size_t size, void *payload), void *payload) - { -+ int error; - PREPARE_PACK; - return write_pack(pb, cb, payload); - } - - int git_packbuilder__write_buf(git_str *buf, git_packbuilder *pb) - { -+ int error; - PREPARE_PACK; - - return write_pack(pb, &write_pack_buf, buf); -diff --git a/src/libgit2/pack-objects.h b/src/libgit2/pack-objects.h -index bbc8b9430..380a28ebe 100644 ---- a/src/libgit2/pack-objects.h -+++ b/src/libgit2/pack-objects.h -@@ -100,6 +100,10 @@ struct git_packbuilder { - uint64_t last_progress_report_time; - - bool done; -+ -+ /* A non-zero error code in failure causes all threads to shut themselves -+ down. Some functions will return this error code. */ -+ volatile int failure; - }; - - int git_packbuilder__write_buf(git_str *buf, git_packbuilder *pb); -diff --git a/tests/libgit2/pack/cancel.c b/tests/libgit2/pack/cancel.c -new file mode 100644 -index 000000000..a0aa9716a ---- /dev/null -+++ b/tests/libgit2/pack/cancel.c -@@ -0,0 +1,240 @@ -+#include "clar_libgit2.h" -+#include "futils.h" -+#include "pack.h" -+#include "hash.h" -+#include "iterator.h" -+#include "vector.h" -+#include "posix.h" -+#include "hash.h" -+#include "pack-objects.h" -+ -+static git_repository *_repo; -+static git_revwalk *_revwalker; -+static git_packbuilder *_packbuilder; -+static git_indexer *_indexer; -+static git_vector _commits; -+static int _commits_is_initialized; -+static git_indexer_progress _stats; -+ -+extern bool git_disable_pack_keep_file_checks; -+ -+static void pack_packbuilder_init(const char *sandbox) { -+ _repo = cl_git_sandbox_init(sandbox); -+ /* cl_git_pass(p_chdir(sandbox)); */ -+ cl_git_pass(git_revwalk_new(&_revwalker, _repo)); -+ cl_git_pass(git_packbuilder_new(&_packbuilder, _repo)); -+ cl_git_pass(git_vector_init(&_commits, 0, NULL)); -+ _commits_is_initialized = 1; -+ memset(&_stats, 0, sizeof(_stats)); -+ p_fsync__cnt = 0; -+} -+ -+void test_pack_cancel__initialize(void) -+{ -+ pack_packbuilder_init("small.git"); -+} -+ -+void test_pack_cancel__cleanup(void) -+{ -+ git_oid *o; -+ unsigned int i; -+ -+ cl_git_pass(git_libgit2_opts(GIT_OPT_ENABLE_FSYNC_GITDIR, 0)); -+ cl_git_pass(git_libgit2_opts(GIT_OPT_DISABLE_PACK_KEEP_FILE_CHECKS, false)); -+ -+ if (_commits_is_initialized) { -+ _commits_is_initialized = 0; -+ git_vector_foreach(&_commits, i, o) { -+ git__free(o); -+ } -+ git_vector_free(&_commits); -+ } -+ -+ git_packbuilder_free(_packbuilder); -+ _packbuilder = NULL; -+ -+ git_revwalk_free(_revwalker); -+ _revwalker = NULL; -+ -+ git_indexer_free(_indexer); -+ _indexer = NULL; -+ -+ /* cl_git_pass(p_chdir("..")); */ -+ cl_git_sandbox_cleanup(); -+ _repo = NULL; -+} -+ -+static int seed_packbuilder(void) -+{ -+ int error; -+ git_oid oid, *o; -+ unsigned int i; -+ -+ git_revwalk_sorting(_revwalker, GIT_SORT_TIME); -+ cl_git_pass(git_revwalk_push_ref(_revwalker, "HEAD")); -+ -+ while (git_revwalk_next(&oid, _revwalker) == 0) { -+ o = git__malloc(sizeof(git_oid)); -+ cl_assert(o != NULL); -+ git_oid_cpy(o, &oid); -+ cl_git_pass(git_vector_insert(&_commits, o)); -+ } -+ -+ git_vector_foreach(&_commits, i, o) { -+ if((error = git_packbuilder_insert(_packbuilder, o, NULL)) < 0) -+ return error; -+ } -+ -+ git_vector_foreach(&_commits, i, o) { -+ git_object *obj; -+ cl_git_pass(git_object_lookup(&obj, _repo, o, GIT_OBJECT_COMMIT)); -+ error = git_packbuilder_insert_tree(_packbuilder, -+ git_commit_tree_id((git_commit *)obj)); -+ git_object_free(obj); -+ if (error < 0) -+ return error; -+ } -+ -+ return 0; -+} -+ -+static int fail_stage; -+ -+static int packbuilder_cancel_after_n_calls_cb(int stage, uint32_t current, uint32_t total, void *payload) -+{ -+ -+ /* Force the callback to run again on the next opportunity regardless -+ of how fast we're running. */ -+ _packbuilder->last_progress_report_time = 0; -+ -+ if (stage == fail_stage) { -+ int *calls = (int *)payload; -+ int n = *calls; -+ /* Always decrement, including past zero. This way the error is only -+ triggered once, making sure it is picked up immediately. */ -+ --*calls; -+ if (n == 0) -+ return GIT_EUSER; -+ } -+ -+ return 0; -+} -+ -+static void test_cancel(int n) -+{ -+ -+ int calls_remaining = n; -+ int err; -+ git_buf buf = GIT_BUF_INIT; -+ -+ /* Switch to a small repository, so that `packbuilder_cancel_after_n_calls_cb` -+ can hack the time to call the callback on every opportunity. */ -+ -+ cl_git_pass(git_packbuilder_set_callbacks(_packbuilder, &packbuilder_cancel_after_n_calls_cb, &calls_remaining)); -+ err = seed_packbuilder(); -+ if (!err) -+ err = git_packbuilder_write_buf(&buf, _packbuilder); -+ -+ cl_assert_equal_i(GIT_EUSER, err); -+} -+void test_pack_cancel__cancel_after_add_0(void) -+{ -+ fail_stage = GIT_PACKBUILDER_ADDING_OBJECTS; -+ test_cancel(0); -+} -+ -+void test_pack_cancel__cancel_after_add_1(void) -+{ -+ cl_skip(); -+ fail_stage = GIT_PACKBUILDER_ADDING_OBJECTS; -+ test_cancel(1); -+} -+ -+void test_pack_cancel__cancel_after_delta_0(void) -+{ -+ fail_stage = GIT_PACKBUILDER_DELTAFICATION; -+ test_cancel(0); -+} -+ -+void test_pack_cancel__cancel_after_delta_1(void) -+{ -+ fail_stage = GIT_PACKBUILDER_DELTAFICATION; -+ test_cancel(1); -+} -+ -+void test_pack_cancel__cancel_after_delta_0_threaded(void) -+{ -+#ifdef GIT_THREADS -+ git_packbuilder_set_threads(_packbuilder, 8); -+ fail_stage = GIT_PACKBUILDER_DELTAFICATION; -+ test_cancel(0); -+#else -+ cl_skip(); -+#endif -+} -+ -+void test_pack_cancel__cancel_after_delta_1_threaded(void) -+{ -+#ifdef GIT_THREADS -+ git_packbuilder_set_threads(_packbuilder, 8); -+ fail_stage = GIT_PACKBUILDER_DELTAFICATION; -+ test_cancel(1); -+#else -+ cl_skip(); -+#endif -+} -+ -+static int foreach_cb(void *buf, size_t len, void *payload) -+{ -+ git_indexer *idx = (git_indexer *) payload; -+ cl_git_pass(git_indexer_append(idx, buf, len, &_stats)); -+ return 0; -+} -+ -+void test_pack_cancel__foreach(void) -+{ -+ git_indexer *idx; -+ -+ seed_packbuilder(); -+ -+#ifdef GIT_EXPERIMENTAL_SHA256 -+ cl_git_pass(git_indexer_new(&idx, ".", GIT_OID_SHA1, NULL)); -+#else -+ cl_git_pass(git_indexer_new(&idx, ".", 0, NULL, NULL)); -+#endif -+ -+ cl_git_pass(git_packbuilder_foreach(_packbuilder, foreach_cb, idx)); -+ cl_git_pass(git_indexer_commit(idx, &_stats)); -+ git_indexer_free(idx); -+} -+ -+static int foreach_cancel_cb(void *buf, size_t len, void *payload) -+{ -+ git_indexer *idx = (git_indexer *)payload; -+ cl_git_pass(git_indexer_append(idx, buf, len, &_stats)); -+ return (_stats.total_objects > 2) ? -1111 : 0; -+} -+ -+void test_pack_cancel__foreach_with_cancel(void) -+{ -+ git_indexer *idx; -+ -+ seed_packbuilder(); -+ -+#ifdef GIT_EXPERIMENTAL_SHA256 -+ cl_git_pass(git_indexer_new(&idx, ".", GIT_OID_SHA1, NULL)); -+#else -+ cl_git_pass(git_indexer_new(&idx, ".", 0, NULL, NULL)); -+#endif -+ -+ cl_git_fail_with( -+ git_packbuilder_foreach(_packbuilder, foreach_cancel_cb, idx), -1111); -+ git_indexer_free(idx); -+} -+ -+void test_pack_cancel__keep_file_check(void) -+{ -+ assert(!git_disable_pack_keep_file_checks); -+ cl_git_pass(git_libgit2_opts(GIT_OPT_DISABLE_PACK_KEEP_FILE_CHECKS, true)); -+ assert(git_disable_pack_keep_file_checks); -+} -diff --git a/tests/resources/small.git/HEAD b/tests/resources/small.git/HEAD -new file mode 100644 -index 0000000000000000000000000000000000000000..cb089cd89a7d7686d284d8761201649346b5aa1c -GIT binary patch -literal 23 -ecmXR)O|w!cN=+-)&qz&7Db~+TEG|hc;sO9;xClW2 - -literal 0 -HcmV?d00001 - -diff --git a/tests/resources/small.git/config b/tests/resources/small.git/config -new file mode 100644 -index 0000000000000000000000000000000000000000..07d359d07cf1ed0c0074fdad71ffff5942f0adfa -GIT binary patch -literal 66 -zcmaz}&M!)h<>D+#Eyypk5{uv*03B5png9R* - -literal 0 -HcmV?d00001 - -diff --git a/tests/resources/small.git/description b/tests/resources/small.git/description -new file mode 100644 -index 0000000000000000000000000000000000000000..498b267a8c7812490d6479839c5577eaaec79d62 -GIT binary patch -literal 73 -zcmWH|%S+5nO;IRHEyyp$t+PQ$;d2LNXyJgRZve!Elw`VEGWs$&r??@ -Q$yWgB0LrH#Y0~2Y0PnOK(EtDd - -literal 0 -HcmV?d00001 - -diff --git a/tests/resources/small.git/hooks/applypatch-msg.sample b/tests/resources/small.git/hooks/applypatch-msg.sample -new file mode 100755 -index 0000000000000000000000000000000000000000..dcbf8167fa503f96ff6a39c68409007eadc9b1f3 -GIT binary patch -literal 535 -zcmY+AX;Q;542A#a6e8^~FyI8r&I~hf2QJ{GO6(?HuvEG*+#R{4EI%zhfA8r{j%sh$ -zHE~E-UtQd8{bq4@*S%jq3@bmxwQDXGv#o!N`o3AHMw3xD)hy0#>&E&zzl%vRffomqo=v6>_2NRa#TwDdYvTVQyueO*15Nlo%=#DXgC0bhF3vTa`LQGaO9;jeD$OP?~ -za$G4Q{z+Q_{5V?5h;a-noM$P{<>Q~j4o7u%#P6^o^16{y*jU=-K8GYD_dUtdj4FSx -zSC0C!DvAnv%S!4dgk -XB^)11aoGMJPCqWs%IS0YSv(eBT&%T6 - -literal 0 -HcmV?d00001 - -diff --git a/tests/resources/small.git/hooks/commit-msg.sample b/tests/resources/small.git/hooks/commit-msg.sample -new file mode 100755 -index 0000000000000000000000000000000000000000..f3780f92349638ebe32f6baf24c7c3027675d7c9 -GIT binary patch -literal 953 -zcmaJy@-{3h^^Cx;#d0zEA@DDc$nY4ez&|=%jTg@_HU*ub=!!y$xW09TSjlj -z(`I@QCsM`!9&80$I98wsQ8yK#)Orb<8re8FjkKh630D$QUDwi~(gkX=RunYm$rDjk -zlp%RUSnzA#6yjdG5?T?2DcYKp+v_lts0ljn&bh3J0bD5@N@1UKZ190O6ZeWr-BuZ^ -zWRebCX%(%=Xoj#(xYk1Cjtr!=tyBesf@m6}8zY6Ijbz9i9ziI_jG9MvR -zDH*e>^ga9IR?2wrSrAVm;eButj4Y>7(E2?b~jsu>& -zRKCJ7bp#19sqYh627wD%D9R$8=Ml$TNlumDypl~$jBu*G>5fIR^FB0h0Ex&TGZNr> -zL5hs1_K>taRb!|ThN9ns7^@4MXKP+6aGI_UK)T-M#rcP$;kN(Vcf#P)+5GzWa{l@J -z>-E{`$1iiNVYxq27}j;uo%;)r3kJI2xCFF~Ux;$Q%) -wjbk6JlDCM`jU&P+UVOvg`|iYl<7~9k>HHB4I;pdlQ=I-^$DrHaN$@lH1?P!0U;qFB - -literal 0 -HcmV?d00001 - -diff --git a/tests/resources/small.git/hooks/fsmonitor-watchman.sample b/tests/resources/small.git/hooks/fsmonitor-watchman.sample -new file mode 100755 -index 0000000000000000000000000000000000000000..41184ebc318c159f51cd1ebe2290559805df89d8 -GIT binary patch -literal 4777 -zcmbtYYi}F368$Xwipg4lq(BeHMvzvH-4;n7DGJBPqq#tw3aed8+IU5-m)yvL>;Cqh -z8FFRGj$`9CA8aoJ?j^$%==FV``-=rhLcPW`McSytRm~mEO7_&_cAVZrf1fFy*ha@8oe%*-aBYE -zcjzZg>LOkgxuUr-XJnHyD;zmPnRaSc#!k_P*d_BttRdc+J6G7za5#+^Y1nkc2Oowk`ya47uUR3Feu?B(w;S{(VYzxh}q-=#zP@uxSx{wbyPUMFU;K(06)$o{07&3yI?q{GqMcQ1c_^M<0< -zF4acAV)Il-V(rCTC1(;bsZ*}bl8dmejAk~yb`B}!^0;g^(o9kGUfZfDOvyp@x4OQt -zSgWh6T|3eq;9MFs8-#z+FDM1h(IjRUP|``PxupgJ7CUHOH90gbgl^2~97`?_X{P)) -zB*$r1cDlF-%azKND}?Gv`2K8-9v5e`gQoft=j?T<&a13c^!wY_$D`5z-X1g?ty&6- -zQN50{8?bUk9AI->^W@~~nkOghHIC2YN+AXkLQG_2-{Pq3%{`3KUMeG$iIn%%^6*NYb -zn|_BdV#C)n4565VccX;uT8&z3vSi!HXGbUj2B!R -zdz~&#fk#L-&k$fLwo$4?>12g@AXOKFekuo#6EHB%gmpD?1eyh%N8s{2wGoTu -z*@6cEZ^ZW!FAF_|JL`NkV7k}0ow|-2jHwbgH0;c@Dq*o?@&c*HnGdyx6^su8Qk%2{ -z*ye(dxO*6-&>qn1+zw}tc6;=sOX{4WB=VqjTS^))y1jlX2Q;=e!qMmFA5lC$#;BxC -z=Y%tRpWxb+_uQAvAw7Q{HGV#R$xb&udLCzZ+HN?kTyB};1EJ8UlQ5!>5eGW@)RX0n -zkjj>EF!3=0Gl^8dzv$B^NMGRxJoqN4A`xq-@wCbrx*u2NmIJ1xZ%H -zh;{|4T3(!E9sY#Ni(wUJYs1MmIc9bl)(4Nl3_wD_BWB>i<1S(LX7m*{Q7PU$muMS* -zM!%0EZx-Vw=Zey;erC?SNxF;pY@^A%-krqzfLV2meBp1vWdyArFYn`DD19T)Hw(?n -z)}{NP(Lk(o*?gl#B@pP7^*r|=;PIDT4|F#{2Hzh-AL0Rv$6uT;n|WzE4=slK?on@(fZeGhRgQCu56qB -z{+n81Az96qnQjMY*-*r-KV*7;Z#4QuJRJJV$M^KdldiMhj?ImK6~FvwJ*L5a){QoM=L5TYHkGO1$UrO3`a>{?Opw|b -zG(#59NQ#jFL9v~vgOVkM@^^(^A}onOE))yWEwhIlk&{ZyseZ^O0b=w8&O=BK{k<5B -k^Q-B@eG}LeHrquz%(SVEp_N)VhYZikCW__82JXfD17`J9Qvd(} - -literal 0 -HcmV?d00001 - -diff --git a/tests/resources/small.git/hooks/pre-applypatch.sample b/tests/resources/small.git/hooks/pre-applypatch.sample -new file mode 100755 -index 0000000000000000000000000000000000000000..625837e25f91421b8809a097f4a3103dd387ef31 -GIT binary patch -literal 481 -zcmY+ATTa6;5Jms9iouO45IBJXEg&Jm9@v1LPHMM_ZR|;#6tQh$71hSXq*MxP;V& -zj0cY7SCL=x4`a46sF)C>94Gk%=3q$W2s;j6iHtB2$R0%gix4oK@&T~=ALd_o*CKxt -I-`Pv{1Bpzc>;M1& - -literal 0 -HcmV?d00001 - -diff --git a/tests/resources/small.git/hooks/pre-commit.sample b/tests/resources/small.git/hooks/pre-commit.sample -new file mode 100755 -index 0000000000000000000000000000000000000000..10b39b2e26981b8f87ea424e735ef87359066dbb -GIT binary patch -literal 1706 -zcmZuxU2ohr5PY_N#pZ0-F<{-v&v-X^RA+u>k}E$4d&uD7=g_fA8+pNNV=4s0|iD3p<=DTXClTS -zXV23tJ;ECmN@M0j@zUAKEYW@3bv!SeYZ8ZH`YQNTApFVNc;F|9r5p4TqGs=>8E?6y -zi|gY{iM#PG1nL?UE9YCnWTk72kgZPG*Usqw!~Qd3c?~@w2?%eg@~)+VlSs6N5Yf2^ -zz;owF#K#r^&KMq1A`oqVGFpD&-!Pv|Rc -zO3KSqA@h9nSc%bm`0)Amk6*J}@14J*1-219l%%7D!Pl}UK>|lVi0Dfgu2jN3WC!uL -z0ej??b2iSehVgdnWHmZV4kUo*QL#aiIp}U=9x)IXk}JJ7VQ;CI9Rtn5e0VcjbYcVt+`x5D+svCGD;Z5hm*E$jSEQZ%SQ(}oLgslTvrKK@9Qf#b!hajVFnp9@oIix;NcI9Wk -xjnh0ya!AWet{I7YpD;y6HXyzI*lfSvH=o6*7mJZPkuaYpm>vzZ`wyGEBtOQPo|pgt - -literal 0 -HcmV?d00001 - -diff --git a/tests/resources/small.git/hooks/pre-push.sample b/tests/resources/small.git/hooks/pre-push.sample -new file mode 100755 -index 0000000000000000000000000000000000000000..02cbd80c287f959fe33975bb66c56293e3f5b396 -GIT binary patch -literal 1431 -zcmaJ>U60!~5PUX&#a1@z9B{IIZkjLT0t5kq9#8~D(I5{+8&J~9;#ndUk~-ZT`r|uG -z$#K$$J{TsKs*LP1}9!GoZ@4I4myMMG_di|of -z%?llx{O8TS-#^;(OioEmPy%kwWQBA1OMzV{hsQ8XFzS1k!~YQoLa5 -zhtP1fA$q6VmMbbAC_9)4I628k*O5J$NR19uHe4QYDK<==I~SQk)Nu%xQ~KH -z53w=!ke(FGb_PpnZfd*+hnXDTn;2*`u^~;?+5C~cn?bRka7NR%06%e6O91{MAgN6J -zmlO8{Biw4&wr&&(z4p3eln`E}XR9m9bNYZ7Ibrg(4yZIXrfgD7N*AFD7L3YSM#j}% -zo__rOS5fr;@8UM<6cl+cv_$YB$PQ&9dv($eM*))g!_cu!QcSh-mqE9i#QDZT)=o#` -z?8!RtE?w6p?GkGZ-6yt_p~5~4ecu|Sf^)6096%h*q-eNiEA1;Xwg)p~Q&iGSG7-IQ -z9aII&`ps$WOojFA`*bjGkFk|E@sHHuD}W^d`7YJ3YE^zrQnqR -zGoq?;YGKe)93o|_=^f%3U1KYZGPOXRRxK7w`UUbMMa3<86OmVH!EKP$8RCrn9mWX+ -zC?9yF!fRVLmud3hF<}x;;sR}f(*r}6Gap3fR6zLHR~kbMgD{98N`L+r&?3p~*0+FX -zcAL%j=(SO}xTJUTvA`&Lf`2mv4koPG9&|;2+68$XxiXKL@ma;l5d2^5Ba_rPh_DHI-u1#&_upttZXp;no03$20|NFiM -zK#D#xQ>!Z3JkX8T-LDVm!B5j7y_{;JDmmTTef+K1oIiPzeEr+Ai*<2PUgnG4^ZB>p -z_fkAvoR1emuf~ri^K$-px=4#D-vY9w& -z`bCv#2zVn=YnJyeNey(Y -zRh`9vtLw~A+5zsjp|W0Nsa|29Rm!B>OoG5a+vi;ari8O>KkU!KAWg_fa3btK2x*_@ -z0bEc7J;Ubghm}n9bOi(Sv_B66nQ7U)J7f0fO}8Wuf*uorcIgEG -zOHc|-V6+HlRhOP}?Cn?@5iwSl43abmBA^2lyL$+cpabCGVES+v^j^FO_}?FIp%En%Ll?Z*7*}TwrZyg5OSZ9rY-`aU~Mc-jjv{Ll)FLMgtB4ujktfQ`Xhqrka -zT=P!A;9w^;Z?PqpLwOLu=cj3L>TdUKw2;DMu)`oVkj}#bcDx4tYg=j%D`+i{W~fVM -zVmZ>W9VMyin9c-0KzI_;iZ-g|OyzuG`Yq%(%dvl;ifnVr0;jWE&S`z|rQu=!yHBBO -zx`OJ;oOQ(KKM<$(bC38o>pD0%|HA(E0TRw7qj$fJ_pRN+7Nm>dSC(gLg{(`t+5Z=?o+}wXU4tHy+&%F&aRhFebeEhR2R5|$#Ycbp^w@t -zTl%=f1t=w+WpJzF<|CE@?SCNAz)%9?w33lQ8vrHJqPfH9@}qs*QXOG71W=ylx;wOB -zcx!Bj^)Yy6WX$a^vBkBJ5CobqlaDx_B0c<3b+8)f84LCrt;e;qxc+7>VbwVK{skNv!wvBiTa^9Iu -zkwP;VK)jH$WJ{`MRwAA9fal!y0dtV;FWg8PTkWU>CwnqD>1ZX2B@;$DlX%C5MI+}{ -z9xQVnffR*~v2KAUj*hCdgul~`bk#mk`o>zk9)<2Uc8?hUZAEvd!`9em)~$Z)zev>w^8 -zyAgCP_$&Y)7HSQ84`xG}OeTavaEswwF|8Xpi5iZzZa@hCiv(J-%bfFC&)HLlO+Rhw -zG6g?9eL5&A!SuJnQ6}LxG%tU+@vZ`i+!+Rz6iYvsTdhnPo7lW{m-}{hya@viX4)XZ -zngaw+j;gloB#|UwI@8sOmQpc`h+bicQJnQIB5eifIMQNgD2+oai33m!34~xU|0Azj -zhu$8z+T5^;Pxx@d{N)pzOJLSa^e;aDf$W%N5XcOf!mGC9l9j$Ev2h6N+6ZQC+CJzl -zaM7?S!SrFLS2DASjj(h6y1WN3N?|bmqmyzm!&nLoE|`rKBOc_yDF$a#FsUn!IQf(t -zdC&Us(kQz*7mvH^j*^MC@>wTDb}g%~sx*ng#>{@lR=XG-Z5_ -z#<9*Oh0joMzt;nS)ObAp)347`D=}r-;nV!TbIq&xrGRGsF6fZg+!VkfUei@_&l-M& -zPqQ+Dw)RV}+)I8RuqAxa`Pv8e&!_gXS=e2-un>=Ktn}-;%lLZxaVn?Q>yZCb2R3Wk -z77zr%;Rq&h|2ncqyKYmFI0148JVY7Q$V5p=dWj+Qqpu%i|xp2C=WaOb2Wudn^h0EcD%$p9YVU1fnoRV9`(cy(vv6K>FXS!2jY>1GnU--7)4usH&K -zao*&P^@9~YmUe|ZdLW@C>H;!*Vt3>Nw4M*;=?j(TBD#O@XCv0|MEhA;z}kTFRv@`tPHhp=&Yh -zg%Zhg4i7o_k{a5i&f5;tZ==%}^Sn4aD_6%qs_XAuJt&EumdH4Yu`UjT<-+XHTuHss+b -YOmM2;hq8Egm*4=7_P9T{21QBYH*F=mfB*mh - -literal 0 -HcmV?d00001 - -diff --git a/tests/resources/small.git/hooks/prepare-commit-msg.sample b/tests/resources/small.git/hooks/prepare-commit-msg.sample -new file mode 100755 -index 0000000000000000000000000000000000000000..b1970da1b6d3f42f00069fd17c325de72cda812e -GIT binary patch -literal 1702 -zcmb_cTW{Mo6n>t6#i?x6xmZ$SFLf{QfG*3r0L?Pg?px55l8$UTGO3bO;spKi{V3XX -z))weX0X>M9bNMcZ-6yG%>(n}JI2|25dr}WZBP@ih?JX^+@ -zu#5O48P>yRX(mfDIhYP)doc1&TADZa@ZGpusJ$6G+e$ZMcmC -zoOosDQPS}l{H?YPsq(4;0SGkATa9eeqAaDcjq8n2wALbFwU@2i@FAaRV!=uw-nwx1gKn2SvY -z>Ff>;2sg!+Hxfkwv1lsiii=p6WenF=5)6LZcQaZ=aS_}+-4Y&?!@HWh|<^gJ21!|T@+%On#w6azxPHV}XsRbe*w -zR_TZ2XEsQa1lPK~biYqg@0-RW@5J1@=<87cFzEUABdCoFH2CZo?}l(Z*!OFqUxo>K -z_d`l#4d9|H6;VPT{X?^{VJ>oL|D7K{BJwwqB>`YcPoGk+9hbvHnoQ{EM|kPgD_`wk -zKm4#2xu;-y`RAm!=L_BnLvJ8$AZm8@?)v<%vwvsw8AF2x6!mTT;c72A_~U9nIq0ST -zv)N0!I!^1p=g8-RQfx5)E_Mb_4I2vtQpI30XZ&t-9h5!Hn - -literal 0 -HcmV?d00001 - -diff --git a/tests/resources/small.git/hooks/push-to-checkout.sample b/tests/resources/small.git/hooks/push-to-checkout.sample -new file mode 100755 -index 0000000000000000000000000000000000000000..a80611e18896f212c390d845e49a3f6d5693b41d -GIT binary patch -literal 2840 -zcmai0U31$u5PXh)#YOS7cE^-rw@uolNhe9&aUS|HtvhX>G$45tVUYj>fRdF?|9kfU -zNR~aG=E)WbEbeyq7JTw}ZuHIE2kUtL<AoeCNptd-NM1aZLhESzC;I`+Ns -zfmNNjdAp^W8#Q*}l>CT7RB9F5(BbI8ly2l~+E};JW|>&d1)=epZ-8vm8ppkbEVn#R -zt30a5A-c(YQR8eM5%;|UAnO>rt!&@x@G@yp+92%w-}%(5P_+P&Wf_zb$f-Qrl5(7z -z2ah(bkE;!DK(&aAMuQ%1TS>ai?wSXCOCSj=_}8x4IbCx^$}9q)whwv)SBt| -zg#MX4;;Oau`m=MI9(^&zPbueY@~>3*ixX%mvR5m_1&nAg@ZKvY1E$O}&EtLiG;mhV -z1xhMIm~fGjmf_#{62f`y;09?I7M1W2tWQvz<}i9lR>OpQyUJi45_&*pQus&EkwY<> -zI|ZAx=*3i9a-)g)hXkvO7>UJ5MNgL(Z+-wpXVcgbSgpmFmbf1~DPA(OVGI&FNLeIE -zNH!_aiH$vsif$_j7=T2{cS(!DOI`~bn@)vSd-0d7xL=DF;UNP|tW}4ih>DvHtu9tY_pbJ6x(6E*hxgC -zzNDao%qlr-IE%YGbS4hF!n!on7#W3$bX-_hbZAaws^nHu#)Dx=WzdbJ>AKzAy@T$x -zSWE^x9+|TEHVEPyaPYa0DOChp?AeHSBBDbZNokQpAY{lE!7geZI=jV)G^2@l)&91Zb1+`T+oq9wWF -zRV~kGTGce0O~p^6mj{kT5kL(pv>r;Lvd7VDX*P>A^Th`$3cWO0L81p4Ysdo3ZP1(SrR-peEdTo;-@bkB((G -zPHYQXUL!@Q$e(OQ;R9r%@Afz+50I7>*^^c&&|E*r-jN)LH=pM4AqMwWxSv|nqjddE -Z4{_hwv8!W(T -zYw`X3V>TCdnSD1ru8&`j=2DIPbCT@SnIgUw>$+lEYP}+x8(BMYnr=iT3*ndq)xzaV -z>I+qjv}vC#8_9M+b1p#uNS0M0)q

8!3p_LRQ0MA3M`!2foxzRUjbFY@}O~(ki=S -zqscnq8cU*dY)D$$cqE}n)V0yIk>CNKHCrndOtSP*HbOb;nbwAHSb;R+gs^?^Dve%) -zoW}t(*D}$>O3ab0TS^-;J|u&sb-PkZzo#kn*#xYt(;FGuwzSb^g&RDiGcOz9TB;Hu`nJh)$W=C=XCSm2AY=$w3G3P-V#Oo+N*;#2 -z4ijJ-pBZ=;T(RTgp_HYrD!uW-dTMfkuqY5jwOy)~gM;#=P^i{!l7`pXTS^s(&^{RU -zydaw}OpS#^D1cXM8?FW+fh`t7D(g;yr6|}fdaNtZBx3hlK~IpkTu3!Qq%R+zAo#t}Bs8^3$vHD+-TGT@`F>H1Cc#WAVW;&$S6%fE2d6@kLS0g&ihIM{}0z -z8#XhD>b>3{(BH|Px7}&lJ4%y1v(CihZJx@8MPoGdl*BJGD;usf*iS7%;{Joe; -zNFuBa>*~o&qETDPo~u&~$FxE1xb^x&(CbE`Y3GfsibL2rl+L;>P6j&Y3U>K$mkp*6 -zd`Q{<^+^&;GskGjwD-%!boR&i-TCA9UOR|@=GYb5x#+dhd7fkaVIR^pol`Mv+rUbmZ43dVL6^S7g3{NsPiG$iy$5EDB% -z6KIgnb$H(n&t3e4E6d4V7w^B?JS}JkG)PM6+X3Co`SQs($O*AA+MG~{S7RJ=cy-l& -z>~%3y`tjfx2>uOutB_^s -ziwG=e=ch|FQ0IkN91US7rhdQkXhwwt$gU0WEVDjo=IPb+?6PC=s8}J*ua(Ms))`UL -fi$|vMHn?H_tSE3ettp-hLlsZCxaLX8(nU;bVRB;Ce6@s#eu2|WvLz>- -zvy(&>Gyfp@+BtKnpqWkKi^+v{4jn_pNw_zeuxETifiGO|)w}OANj2n2D^K=o3j6P6uOL70#cbA{uzWXDlk1wr9GV1X(2W{RuTvjXV -zCmd8u -zH%V`94=q3)Dk)PHNrnFC(T1)Om6f{Usj;u1R->&XoCYVK2V3ZlgZuF?N}1+33OER*x -z*9Z=L=zI8CN>A_^jYjt0F$psO$sL=38q5q|SG)qCN6{^>RFh5E&l5GZ$pEahnF&d+ -z5c>64t}uJPkf~_!VUj#&N%nC-gUMj%=@B=!V>&}xtj2%@-mOm#rQUSJ3(ccmc+fza -znZ#uxF>N?QN5UrIEd!5RgHEfW#;(nKYF+D<*rdshJ$X-z2OZ2X;)nn@KSVdVhaA?}@3;6gZxb4v -zozoWSr{{+!h}zGpumG3H`=AvWpm^9kW;J$Jp^Xl*?8ckr`fqN%c|Z;VC0|cM4vSrk -zH_O8Yvh85nvJp^;``wo8=z0f`FWg?`>gO#y1hjX1{}rTlg9rwIKia8eyGexA3GnuR -z`Rg~XZoW;0pA)vI8=p5!+6sIn#C^FCvR>ffv39h6SCNi9v);%WD;WZ`of_MgwyRWy -z-yY%n*Y>X89W-v4`Ff%bx$Vkn}$!Ay}rnY6F$m-Kg*KD_+;Lx#g4|^&N -I02NaX#p`nv=Kufz - -literal 0 -HcmV?d00001 - -diff --git a/tests/resources/small.git/objects/af/5626b4a114abcb82d63db7c8082c3c4756e51b b/tests/resources/small.git/objects/af/5626b4a114abcb82d63db7c8082c3c4756e51b -new file mode 100644 -index 0000000000000000000000000000000000000000..822bc151862ec3763cf2d3fa2372b93bbd3a4b65 -GIT binary patch -literal 30 -mcmb>0i}&W3IZ_@1U=^!a~EV1casc=c+{&un1qQN*i9hD|0|m(2n|iwp*q%W -z%N;b$hu%cM`$TMo*~EnC1BFP&Pfj~;jZVKXQ96s_PhV<-XAROi+@-v8dBLUa`!;GB -k^iXlEv8$>R)1G>9th&t3j;s7J{?^9n|7U^`%mXoWC24Q^m!3%@{ - -literal 0 -HcmV?d00001 - diff --git a/src/external-api-docs/README.md b/src/external-api-docs/README.md index 8760ac88b..1940cc1c0 100644 --- a/src/external-api-docs/README.md +++ b/src/external-api-docs/README.md @@ -15,7 +15,7 @@ programmatically: 1. Embedding the evaluator 2. Writing language plug-ins -Embedding means you link the Nix C libraries in your program and use them from +Embedding means you link the Nix C API libraries in your program and use them from there. Adding a plug-in means you make a library that gets loaded by the Nix language evaluator, specified through a configuration option. diff --git a/src/json-schema-checks/.version b/src/json-schema-checks/.version new file mode 120000 index 000000000..b7badcd0c --- /dev/null +++ b/src/json-schema-checks/.version @@ -0,0 +1 @@ +../../.version \ No newline at end of file diff --git a/src/json-schema-checks/content-address b/src/json-schema-checks/content-address new file mode 120000 index 000000000..194a265a1 --- /dev/null +++ b/src/json-schema-checks/content-address @@ -0,0 +1 @@ +../../src/libstore-tests/data/content-address \ No newline at end of file diff --git a/src/json-schema-checks/derivation b/src/json-schema-checks/derivation new file mode 120000 index 000000000..3dc1cbe06 --- /dev/null +++ b/src/json-schema-checks/derivation @@ -0,0 +1 @@ +../../src/libstore-tests/data/derivation \ No newline at end of file diff --git a/src/json-schema-checks/deriving-path b/src/json-schema-checks/deriving-path new file mode 120000 index 000000000..4f50b2ee9 --- /dev/null +++ b/src/json-schema-checks/deriving-path @@ -0,0 +1 @@ +../../src/libstore-tests/data/derived-path \ No newline at end of file diff --git a/src/json-schema-checks/hash b/src/json-schema-checks/hash new file mode 120000 index 000000000..d68763879 --- /dev/null +++ b/src/json-schema-checks/hash @@ -0,0 +1 @@ +../../src/libutil-tests/data/hash \ No newline at end of file diff --git a/src/json-schema-checks/meson.build b/src/json-schema-checks/meson.build new file mode 100644 index 000000000..8437ccefc --- /dev/null +++ b/src/json-schema-checks/meson.build @@ -0,0 +1,157 @@ +# Run with: +# meson test --suite json-schema +# Run with: (without shell / configure) +# nix build .#nix-json-schema-checks + +project( + 'nix-json-schema-checks', + version : files('.version'), + meson_version : '>= 1.1', + license : 'LGPL-2.1-or-later', +) + +fs = import('fs') + +# Note: The 'jsonschema' package provides the 'jv' command +jv = find_program('jv', required : true) + +# The schema directory is a committed symlink to the actual schema location +schema_dir = meson.current_source_dir() / 'schema' + +# Get all example files +schemas = [ + { + 'stem' : 'hash', + 'schema' : schema_dir / 'hash-v1.yaml', + 'files' : [ + 'sha256-base64.json', + 'sha256-base16.json', + 'sha256-nix32.json', + 'blake3-base64.json', + ], + }, + { + 'stem' : 'content-address', + 'schema' : schema_dir / 'content-address-v1.yaml', + 'files' : [ + 'text.json', + 'nar.json', + ], + }, + { + 'stem' : 'store-path', + 'schema' : schema_dir / 'store-path-v1.yaml', + 'files' : [ + 'simple.json', + ], + }, + { + 'stem' : 'derivation', + 'schema' : schema_dir / 'derivation-v3.yaml', + 'files' : [ + 'dyn-dep-derivation.json', + 'simple-derivation.json', + ], + }, + { + 'stem' : 'derivation', + 'schema' : schema_dir / 'derivation-v3.yaml#/$defs/output', + 'files' : [ + 'output-caFixedFlat.json', + 'output-caFixedNAR.json', + 'output-caFixedText.json', + 'output-caFloating.json', + 'output-deferred.json', + 'output-impure.json', + 'output-inputAddressed.json', + ], + }, + { + 'stem' : 'deriving-path', + 'schema' : schema_dir / 'deriving-path-v1.yaml', + 'files' : [ + 'single_opaque.json', + 'single_built.json', + 'single_built_built.json', + ], + }, + # Match overall + { + 'stem' : 'store-object-info', + 'schema' : schema_dir / 'store-object-info-v1.yaml', + 'files' : [ + 'pure.json', + 'impure.json', + 'empty_pure.json', + 'empty_impure.json', + ], + }, + { + 'stem' : 'nar-info', + 'schema' : schema_dir / 'store-object-info-v1.yaml', + 'files' : [ + 'pure.json', + 'impure.json', + ], + }, + # Match exact variant + { + 'stem' : 'store-object-info', + 'schema' : schema_dir / 'store-object-info-v1.yaml#/$defs/base', + 'files' : [ + 'pure.json', + 'empty_pure.json', + ], + }, + { + 'stem' : 'store-object-info', + 'schema' : schema_dir / 'store-object-info-v1.yaml#/$defs/impure', + 'files' : [ + 'impure.json', + 'empty_impure.json', + ], + }, + { + 'stem' : 'nar-info', + 'schema' : schema_dir / 'store-object-info-v1.yaml#/$defs/base', + 'files' : [ + 'pure.json', + ], + }, + { + 'stem' : 'nar-info', + 'schema' : schema_dir / 'store-object-info-v1.yaml#/$defs/narInfo', + 'files' : [ + 'impure.json', + ], + }, +] + +# Validate each example against the schema +foreach schema : schemas + stem = schema['stem'] + schema_file = schema['schema'] + if '#' not in schema_file + # Validate the schema itself against JSON Schema Draft 04 + test( + stem + '-schema-valid', + jv, + args : [ + 'http://json-schema.org/draft-04/schema', + schema_file, + ], + suite : 'json-schema', + ) + endif + foreach example : schema['files'] + test( + stem + '-example-' + fs.stem(example), + jv, + args : [ + schema_file, + files(stem / example), + ], + suite : 'json-schema', + ) + endforeach +endforeach diff --git a/src/json-schema-checks/nar-info b/src/json-schema-checks/nar-info new file mode 120000 index 000000000..0ba4d5870 --- /dev/null +++ b/src/json-schema-checks/nar-info @@ -0,0 +1 @@ +../../src/libstore-tests/data/nar-info \ No newline at end of file diff --git a/src/json-schema-checks/package.nix b/src/json-schema-checks/package.nix new file mode 100644 index 000000000..160db003f --- /dev/null +++ b/src/json-schema-checks/package.nix @@ -0,0 +1,56 @@ +# Run with: nix build .#nix-json-schema-checks +{ + lib, + mkMesonDerivation, + + meson, + ninja, + jsonschema, + + # Configuration Options + + version, +}: + +mkMesonDerivation (finalAttrs: { + pname = "nix-json-schema-checks"; + inherit version; + + workDir = ./.; + fileset = lib.fileset.unions [ + ../../.version + ../../doc/manual/source/protocols/json/schema + ../../src/libutil-tests/data/hash + ../../src/libstore-tests/data/content-address + ../../src/libstore-tests/data/store-path + ../../src/libstore-tests/data/derivation + ../../src/libstore-tests/data/derived-path + ../../src/libstore-tests/data/path-info + ../../src/libstore-tests/data/nar-info + ./. + ]; + + outputs = [ "out" ]; + + passthru.externalNativeBuildInputs = [ + jsonschema + ]; + + nativeBuildInputs = [ + meson + ninja + ] + ++ finalAttrs.passthru.externalNativeBuildInputs; + + doCheck = true; + + mesonCheckFlags = [ "--print-errorlogs" ]; + + postInstall = '' + touch $out + ''; + + meta = { + platforms = lib.platforms.all; + }; +}) diff --git a/src/json-schema-checks/schema b/src/json-schema-checks/schema new file mode 120000 index 000000000..473e47b1b --- /dev/null +++ b/src/json-schema-checks/schema @@ -0,0 +1 @@ +../../doc/manual/source/protocols/json/schema \ No newline at end of file diff --git a/src/json-schema-checks/store-object-info b/src/json-schema-checks/store-object-info new file mode 120000 index 000000000..a3c9e07c4 --- /dev/null +++ b/src/json-schema-checks/store-object-info @@ -0,0 +1 @@ +../../src/libstore-tests/data/path-info \ No newline at end of file diff --git a/src/json-schema-checks/store-path b/src/json-schema-checks/store-path new file mode 120000 index 000000000..003b1dbbb --- /dev/null +++ b/src/json-schema-checks/store-path @@ -0,0 +1 @@ +../../src/libstore-tests/data/store-path \ No newline at end of file diff --git a/src/libcmd/built-path.cc b/src/libcmd/built-path.cc index 4d76dd6da..fc7f18493 100644 --- a/src/libcmd/built-path.cc +++ b/src/libcmd/built-path.cc @@ -117,10 +117,11 @@ RealisedPath::Set BuiltPath::toRealisedPaths(Store & store) const "the derivation '%s' has unrealised output '%s' (derived-path.cc/toRealisedPaths)", store.printStorePath(p.drvPath->outPath()), outputName); - auto thisRealisation = store.queryRealisation(DrvOutput{*drvOutput, outputName}); + DrvOutput key{*drvOutput, outputName}; + auto thisRealisation = store.queryRealisation(key); assert(thisRealisation); // We’ve built it, so we must // have the realisation - res.insert(*thisRealisation); + res.insert(Realisation{*thisRealisation, std::move(key)}); } else { res.insert(outputPath); } diff --git a/src/libcmd/include/nix/cmd/command.hh b/src/libcmd/include/nix/cmd/command.hh index 20cd1abc1..2bff11dc1 100644 --- a/src/libcmd/include/nix/cmd/command.hh +++ b/src/libcmd/include/nix/cmd/command.hh @@ -350,6 +350,20 @@ struct MixEnvironment : virtual Args void setEnviron(); }; +struct MixNoCheckSigs : virtual Args +{ + CheckSigsFlag checkSigs = CheckSigs; + + MixNoCheckSigs() + { + addFlag({ + .longName = "no-check-sigs", + .description = "Do not require that paths are signed by trusted keys.", + .handler = {&checkSigs, NoCheckSigs}, + }); + } +}; + void completeFlakeInputAttrPath( AddCompletions & completions, ref evalState, diff --git a/src/libcmd/include/nix/cmd/installable-flake.hh b/src/libcmd/include/nix/cmd/installable-flake.hh index 935ea8779..9f449ad48 100644 --- a/src/libcmd/include/nix/cmd/installable-flake.hh +++ b/src/libcmd/include/nix/cmd/installable-flake.hh @@ -69,7 +69,7 @@ struct InstallableFlake : InstallableValue */ std::vector> getCursors(EvalState & state) override; - std::shared_ptr getLockedFlake() const; + ref getLockedFlake() const; FlakeRef nixpkgsFlakeRef() const; }; @@ -87,6 +87,4 @@ static inline FlakeRef defaultNixpkgsFlakeRef() return FlakeRef::fromAttrs(fetchSettings, {{"type", "indirect"}, {"id", "nixpkgs"}}); } -ref openEvalCache(EvalState & state, std::shared_ptr lockedFlake); - } // namespace nix diff --git a/src/libcmd/installable-flake.cc b/src/libcmd/installable-flake.cc index 5431100d3..65f48fa2b 100644 --- a/src/libcmd/installable-flake.cc +++ b/src/libcmd/installable-flake.cc @@ -185,16 +185,16 @@ std::vector> InstallableFlake::getCursors(EvalState return res; } -std::shared_ptr InstallableFlake::getLockedFlake() const +ref InstallableFlake::getLockedFlake() const { if (!_lockedFlake) { flake::LockFlags lockFlagsApplyConfig = lockFlags; // FIXME why this side effect? lockFlagsApplyConfig.applyNixConfig = true; - _lockedFlake = - std::make_shared(lockFlake(flakeSettings, *state, flakeRef, lockFlagsApplyConfig)); + _lockedFlake = make_ref(lockFlake(flakeSettings, *state, flakeRef, lockFlagsApplyConfig)); } - return _lockedFlake; + // _lockedFlake is now non-null but still just a shared_ptr + return ref(_lockedFlake); } FlakeRef InstallableFlake::nixpkgsFlakeRef() const diff --git a/src/libcmd/installables.cc b/src/libcmd/installables.cc index 96ff06ad3..f0f36378b 100644 --- a/src/libcmd/installables.cc +++ b/src/libcmd/installables.cc @@ -342,8 +342,7 @@ void completeFlakeRefWithFragment( parseFlakeRef(fetchSettings, expandTilde(flakeRefS), std::filesystem::current_path().string()); auto evalCache = openEvalCache( - *evalState, - std::make_shared(lockFlake(flakeSettings, *evalState, flakeRef, lockFlags))); + *evalState, make_ref(lockFlake(flakeSettings, *evalState, flakeRef, lockFlags))); auto root = evalCache->getRoot(); @@ -443,42 +442,6 @@ static StorePath getDeriver(ref store, const Installable & i, const Store return *derivers.begin(); } -ref openEvalCache(EvalState & state, std::shared_ptr lockedFlake) -{ - auto fingerprint = evalSettings.useEvalCache && evalSettings.pureEval - ? lockedFlake->getFingerprint(state.store, state.fetchSettings) - : std::nullopt; - auto rootLoader = [&state, lockedFlake]() { - /* For testing whether the evaluation cache is - complete. */ - if (getEnv("NIX_ALLOW_EVAL").value_or("1") == "0") - throw Error("not everything is cached, but evaluation is not allowed"); - - auto vFlake = state.allocValue(); - flake::callFlake(state, *lockedFlake, *vFlake); - - state.forceAttrs(*vFlake, noPos, "while parsing cached flake data"); - - auto aOutputs = vFlake->attrs()->get(state.symbols.create("outputs")); - assert(aOutputs); - - return aOutputs->value; - }; - - if (fingerprint) { - auto search = state.evalCaches.find(fingerprint.value()); - if (search == state.evalCaches.end()) { - search = - state.evalCaches - .emplace(fingerprint.value(), make_ref(fingerprint, state, rootLoader)) - .first; - } - return search->second; - } else { - return make_ref(std::nullopt, state, rootLoader); - } -} - Installables SourceExprCommand::parseInstallables(ref store, std::vector ss) { Installables result; @@ -604,28 +567,28 @@ std::vector Installable::build( static void throwBuildErrors(std::vector & buildResults, const Store & store) { - std::vector failed; + std::vector> failed; for (auto & buildResult : buildResults) { - if (!buildResult.success()) { - failed.push_back(buildResult); + if (auto * failure = buildResult.tryGetFailure()) { + failed.push_back({&buildResult, failure}); } } auto failedResult = failed.begin(); if (failedResult != failed.end()) { if (failed.size() == 1) { - failedResult->rethrow(); + failedResult->second->rethrow(); } else { StringSet failedPaths; for (; failedResult != failed.end(); failedResult++) { - if (!failedResult->errorMsg.empty()) { + if (!failedResult->second->errorMsg.empty()) { logError( ErrorInfo{ .level = lvlError, - .msg = failedResult->errorMsg, + .msg = failedResult->second->errorMsg, }); } - failedPaths.insert(failedResult->path.to_string(store)); + failedPaths.insert(failedResult->first->path.to_string(store)); } throw Error("build of %s failed", concatStringsSep(", ", quoteStrings(failedPaths))); } @@ -695,12 +658,14 @@ std::vector, BuiltPathWithResult>> Installable::build auto buildResults = store->buildPathsWithResults(pathsToBuild, bMode, evalStore); throwBuildErrors(buildResults, *store); for (auto & buildResult : buildResults) { + // If we didn't throw, they must all be sucesses + auto & success = std::get(buildResult.inner); for (auto & aux : backmap[buildResult.path]) { std::visit( overloaded{ [&](const DerivedPath::Built & bfd) { std::map outputs; - for (auto & [outputName, realisation] : buildResult.builtOutputs) + for (auto & [outputName, realisation] : success.builtOutputs) outputs.emplace(outputName, realisation.outPath); res.push_back( {aux.installable, diff --git a/src/libcmd/meson.build b/src/libcmd/meson.build index 3833d7e0a..f553afa0b 100644 --- a/src/libcmd/meson.build +++ b/src/libcmd/meson.build @@ -67,7 +67,6 @@ config_priv_h = configure_file( ) subdir('nix-meson-build-support/common') -subdir('nix-meson-build-support/asan-options') sources = files( 'built-path.cc', diff --git a/src/libcmd/repl.cc b/src/libcmd/repl.cc index 38d06336b..a308b731d 100644 --- a/src/libcmd/repl.cc +++ b/src/libcmd/repl.cc @@ -669,7 +669,7 @@ ProcessLineResult NixRepl::processLine(std::string line) ss << "No documentation found.\n\n"; } - auto markdown = toView(ss); + auto markdown = ss.view(); logger->cout(trim(renderMarkdownToTerminal(markdown))); } else diff --git a/src/libexpr-c/meson.build b/src/libexpr-c/meson.build index 03cee41a0..c47704ce4 100644 --- a/src/libexpr-c/meson.build +++ b/src/libexpr-c/meson.build @@ -28,7 +28,6 @@ deps_public_maybe_subproject = [ subdir('nix-meson-build-support/subprojects') subdir('nix-meson-build-support/common') -subdir('nix-meson-build-support/asan-options') sources = files( 'nix_api_expr.cc', diff --git a/src/libexpr-c/nix_api_expr.h b/src/libexpr-c/nix_api_expr.h index 2be739955..3623ee076 100644 --- a/src/libexpr-c/nix_api_expr.h +++ b/src/libexpr-c/nix_api_expr.h @@ -4,11 +4,14 @@ * @brief Bindings to the Nix language evaluator * * See *[Embedding the Nix Evaluator](@ref nix_evaluator_example)* for an example. - * @{ */ /** @file * @brief Main entry for the libexpr C bindings */ +/** @defgroup libexpr_init Initialization + * @ingroup libexpr + * @{ + */ #include "nix_api_store.h" #include "nix_api_util.h" @@ -45,7 +48,10 @@ typedef struct nix_eval_state_builder nix_eval_state_builder; */ typedef struct EvalState EvalState; // nix::EvalState +/** @} */ + /** @brief A Nix language value, or thunk that may evaluate to a value. + * @ingroup value * * Values are the primary objects manipulated in the Nix language. * They are considered to be immutable from a user's perspective, but the process of evaluating a value changes its @@ -56,7 +62,8 @@ typedef struct EvalState EvalState; // nix::EvalState * * The evaluator manages its own memory, but your use of the C API must follow the reference counting rules. * - * @see value_manip + * @struct nix_value + * @see value_create, value_extract * @see nix_value_incref, nix_value_decref */ typedef struct nix_value nix_value; @@ -65,6 +72,7 @@ NIX_DEPRECATED("use nix_value instead") typedef nix_value Value; // Function prototypes /** * @brief Initialize the Nix language evaluator. + * @ingroup libexpr_init * * This function must be called at least once, * at some point before constructing a EvalState for the first time. @@ -77,6 +85,7 @@ nix_err nix_libexpr_init(nix_c_context * context); /** * @brief Parses and evaluates a Nix expression from a string. + * @ingroup value_create * * @param[out] context Optional, stores error information * @param[in] state The state of the evaluation. @@ -93,6 +102,7 @@ nix_err nix_expr_eval_from_string( /** * @brief Calls a Nix function with an argument. + * @ingroup value_create * * @param[out] context Optional, stores error information * @param[in] state The state of the evaluation. @@ -107,6 +117,7 @@ nix_err nix_value_call(nix_c_context * context, EvalState * state, nix_value * f /** * @brief Calls a Nix function with multiple arguments. + * @ingroup value_create * * Technically these are functions that return functions. It is common for Nix * functions to be curried, so this function is useful for calling them. @@ -126,10 +137,12 @@ nix_err nix_value_call_multi( /** * @brief Calls a Nix function with multiple arguments. + * @ingroup value_create * * Technically these are functions that return functions. It is common for Nix * functions to be curried, so this function is useful for calling them. * + * @def NIX_VALUE_CALL * @param[out] context Optional, stores error information * @param[in] state The state of the evaluation. * @param[out] value The result of the function call. @@ -147,6 +160,7 @@ nix_err nix_value_call_multi( /** * @brief Forces the evaluation of a Nix value. + * @ingroup value_create * * The Nix interpreter is lazy, and not-yet-evaluated values can be * of type NIX_TYPE_THUNK instead of their actual value. @@ -180,18 +194,20 @@ nix_err nix_value_force_deep(nix_c_context * context, EvalState * state, nix_val /** * @brief Create a new nix_eval_state_builder + * @ingroup libexpr_init * * The settings are initialized to their default value. * Values can be sourced elsewhere with nix_eval_state_builder_load. * * @param[out] context Optional, stores error information * @param[in] store The Nix store to use. - * @return A new nix_eval_state_builder or NULL on failure. + * @return A new nix_eval_state_builder or NULL on failure. Call nix_eval_state_builder_free() when you're done. */ nix_eval_state_builder * nix_eval_state_builder_new(nix_c_context * context, Store * store); /** * @brief Read settings from the ambient environment + * @ingroup libexpr_init * * Settings are sourced from environment variables and configuration files, * as documented in the Nix manual. @@ -204,6 +220,7 @@ nix_err nix_eval_state_builder_load(nix_c_context * context, nix_eval_state_buil /** * @brief Set the lookup path for `<...>` expressions + * @ingroup libexpr_init * * @param[in] context Optional, stores error information * @param[in] builder The builder to modify. @@ -214,18 +231,21 @@ nix_err nix_eval_state_builder_set_lookup_path( /** * @brief Create a new Nix language evaluator state + * @ingroup libexpr_init * - * Remember to nix_eval_state_builder_free after building the state. + * The builder becomes unusable after this call. Remember to call nix_eval_state_builder_free() + * after building the state. * * @param[out] context Optional, stores error information * @param[in] builder The builder to use and free - * @return A new Nix state or NULL on failure. + * @return A new Nix state or NULL on failure. Call nix_state_free() when you're done. * @see nix_eval_state_builder_new, nix_eval_state_builder_free */ EvalState * nix_eval_state_build(nix_c_context * context, nix_eval_state_builder * builder); /** * @brief Free a nix_eval_state_builder + * @ingroup libexpr_init * * Does not fail. * @@ -235,19 +255,21 @@ void nix_eval_state_builder_free(nix_eval_state_builder * builder); /** * @brief Create a new Nix language evaluator state + * @ingroup libexpr_init * * For more control, use nix_eval_state_builder * * @param[out] context Optional, stores error information * @param[in] lookupPath Null-terminated array of strings corresponding to entries in NIX_PATH. * @param[in] store The Nix store to use. - * @return A new Nix state or NULL on failure. + * @return A new Nix state or NULL on failure. Call nix_state_free() when you're done. * @see nix_state_builder_new */ EvalState * nix_state_create(nix_c_context * context, const char ** lookupPath, Store * store); /** * @brief Frees a Nix state. + * @ingroup libexpr_init * * Does not fail. * @@ -256,6 +278,7 @@ EvalState * nix_state_create(nix_c_context * context, const char ** lookupPath, void nix_state_free(EvalState * state); /** @addtogroup GC + * @ingroup libexpr * @brief Reference counting and garbage collector operations * * The Nix language evaluator uses a garbage collector. To ease C interop, we implement @@ -286,6 +309,9 @@ nix_err nix_gc_incref(nix_c_context * context, const void * object); /** * @brief Decrement the garbage collector reference counter for the given object * + * @deprecated We are phasing out the general nix_gc_decref() in favor of type-specified free functions, such as + * nix_value_decref(). + * * We also provide typed `nix_*_decref` functions, which are * - safer to use * - easier to integrate when deriving bindings @@ -314,12 +340,11 @@ void nix_gc_now(); */ void nix_gc_register_finalizer(void * obj, void * cd, void (*finalizer)(void * obj, void * cd)); -/** @} */ +/** @} */ // doxygen group GC + // cffi end #ifdef __cplusplus } #endif -/** @} */ - #endif // NIX_API_EXPR_H diff --git a/src/libexpr-c/nix_api_external.h b/src/libexpr-c/nix_api_external.h index f4a327281..96c479d57 100644 --- a/src/libexpr-c/nix_api_external.h +++ b/src/libexpr-c/nix_api_external.h @@ -2,11 +2,12 @@ #define NIX_API_EXTERNAL_H /** @ingroup libexpr * @addtogroup Externals - * @brief Deal with external values + * @brief Externals let Nix expressions work with foreign values that aren't part of the normal Nix value data model * @{ */ /** @file * @brief libexpr C bindings dealing with external values + * @see Externals */ #include "nix_api_expr.h" @@ -115,7 +116,7 @@ typedef struct NixCExternalValueDesc * @brief Try to compare two external values * * Optional, the default is always false. - * If the other object was not a Nix C external value, this comparison will + * If the other object was not a Nix C API external value, this comparison will * also return false * @param[in] self the void* passed to nix_create_external_value * @param[in] other the void* passed to the other object's @@ -168,7 +169,7 @@ typedef struct NixCExternalValueDesc /** * @brief Create an external value, that can be given to nix_init_external * - * Owned by the GC. Use nix_gc_decref when you're done with the pointer. + * Call nix_gc_decref() when you're done with the pointer. * * @param[out] context Optional, stores error information * @param[in] desc a NixCExternalValueDesc, you should keep this alive as long @@ -180,10 +181,11 @@ typedef struct NixCExternalValueDesc ExternalValue * nix_create_external_value(nix_c_context * context, NixCExternalValueDesc * desc, void * v); /** - * @brief Extract the pointer from a nix c external value. + * @brief Extract the pointer from a Nix C API external value. * @param[out] context Optional, stores error information * @param[in] b The external value - * @returns The pointer, or null if the external value was not from nix c. + * @returns The pointer, valid while the external value is valid, or null if the external value was not from the Nix C + * API. * @see nix_get_external */ void * nix_get_external_value_content(nix_c_context * context, ExternalValue * b); diff --git a/src/libexpr-c/nix_api_value.h b/src/libexpr-c/nix_api_value.h index 835eaec6e..5bd45da90 100644 --- a/src/libexpr-c/nix_api_value.h +++ b/src/libexpr-c/nix_api_value.h @@ -1,9 +1,6 @@ #ifndef NIX_API_VALUE_H #define NIX_API_VALUE_H -/** @addtogroup libexpr - * @{ - */ /** @file * @brief libexpr C bindings dealing with values */ @@ -20,18 +17,89 @@ extern "C" { #endif // cffi start +/** @defgroup value Value + * @ingroup libexpr + * @brief nix_value type and core operations for working with Nix values + * @see value_create + * @see value_extract + */ + +/** @defgroup value_create Value Creation + * @ingroup libexpr + * @brief Functions for allocating and initializing Nix values + * + * Values are usually created with `nix_alloc_value` followed by `nix_init_*` functions. + * In primop callbacks, allocation is already done and only initialization is needed. + */ + +/** @defgroup value_extract Value Extraction + * @ingroup libexpr + * @brief Functions for extracting data from Nix values + */ + +/** @defgroup primops PrimOps and Builtins + * @ingroup libexpr + */ + // Type definitions +/** @brief Represents the state of a Nix value + * + * Thunk values (NIX_TYPE_THUNK) change to their final, unchanging type when forced. + * + * @see https://nix.dev/manual/nix/latest/language/evaluation.html + * @enum ValueType + * @ingroup value + */ typedef enum { + /** Unevaluated expression + * + * Thunks often contain an expression and closure, but may contain other + * representations too. + * + * Their state is mutable, unlike that of the other types. + */ NIX_TYPE_THUNK, + /** + * A 64 bit signed integer. + */ NIX_TYPE_INT, + /** @brief IEEE 754 double precision floating point number + * @see https://nix.dev/manual/nix/latest/language/types.html#type-float + */ NIX_TYPE_FLOAT, + /** @brief Boolean true or false value + * @see https://nix.dev/manual/nix/latest/language/types.html#type-bool + */ NIX_TYPE_BOOL, + /** @brief String value with context + * + * String content may contain arbitrary bytes, not necessarily UTF-8. + * @see https://nix.dev/manual/nix/latest/language/types.html#type-string + */ NIX_TYPE_STRING, + /** @brief Filesystem path + * @see https://nix.dev/manual/nix/latest/language/types.html#type-path + */ NIX_TYPE_PATH, + /** @brief Null value + * @see https://nix.dev/manual/nix/latest/language/types.html#type-null + */ NIX_TYPE_NULL, + /** @brief Attribute set (key-value mapping) + * @see https://nix.dev/manual/nix/latest/language/types.html#type-attrs + */ NIX_TYPE_ATTRS, + /** @brief Ordered list of values + * @see https://nix.dev/manual/nix/latest/language/types.html#type-list + */ NIX_TYPE_LIST, + /** @brief Function (lambda or builtin) + * @see https://nix.dev/manual/nix/latest/language/types.html#type-function + */ NIX_TYPE_FUNCTION, + /** @brief External value from C++ plugins or C API + * @see Externals + */ NIX_TYPE_EXTERNAL } ValueType; @@ -39,22 +107,41 @@ typedef enum { typedef struct nix_value nix_value; typedef struct EvalState EvalState; +/** @deprecated Use nix_value instead */ [[deprecated("use nix_value instead")]] typedef nix_value Value; // type defs /** @brief Stores an under-construction set of bindings - * @ingroup value_manip + * @ingroup value_create * - * Do not reuse. + * Each builder can only be used once. After calling nix_make_attrs(), the builder + * becomes invalid and must not be used again. Call nix_bindings_builder_free() to release it. + * + * Typical usage pattern: + * 1. Create with nix_make_bindings_builder() + * 2. Insert attributes with nix_bindings_builder_insert() + * 3. Create final attribute set with nix_make_attrs() + * 4. Free builder with nix_bindings_builder_free() + * + * @struct BindingsBuilder * @see nix_make_bindings_builder, nix_bindings_builder_free, nix_make_attrs * @see nix_bindings_builder_insert */ typedef struct BindingsBuilder BindingsBuilder; /** @brief Stores an under-construction list - * @ingroup value_manip + * @ingroup value_create * - * Do not reuse. + * Each builder can only be used once. After calling nix_make_list(), the builder + * becomes invalid and must not be used again. Call nix_list_builder_free() to release it. + * + * Typical usage pattern: + * 1. Create with nix_make_list_builder() + * 2. Insert elements with nix_list_builder_insert() + * 3. Create final list with nix_make_list() + * 4. Free builder with nix_list_builder_free() + * + * @struct ListBuilder * @see nix_make_list_builder, nix_list_builder_free, nix_make_list * @see nix_list_builder_insert */ @@ -63,25 +150,28 @@ typedef struct ListBuilder ListBuilder; /** @brief PrimOp function * @ingroup primops * - * Owned by the GC - * @see nix_alloc_primop, nix_init_primop + * Can be released with nix_gc_decref() when necessary. + * @struct PrimOp + * @see nix_alloc_primop, nix_init_primop, nix_register_primop */ typedef struct PrimOp PrimOp; /** @brief External Value * @ingroup Externals * - * Owned by the GC + * Can be released with nix_gc_decref() when necessary. + * @struct ExternalValue + * @see nix_create_external_value, nix_init_external, nix_get_external */ typedef struct ExternalValue ExternalValue; /** @brief String without placeholders, and realised store paths + * @struct nix_realised_string + * @see nix_string_realise, nix_realised_string_free */ typedef struct nix_realised_string nix_realised_string; -/** @defgroup primops Adding primops - * @{ - */ /** @brief Function pointer for primops + * @ingroup primops * * When you want to return an error, call nix_set_err_msg(context, NIX_ERR_UNKNOWN, "your error message here"). * @@ -97,9 +187,9 @@ typedef void (*PrimOpFun)( void * user_data, nix_c_context * context, EvalState * state, nix_value ** args, nix_value * ret); /** @brief Allocate a PrimOp + * @ingroup primops * - * Owned by the garbage collector. - * Use nix_gc_decref() when you're done with the returned PrimOp. + * Call nix_gc_decref() when you're done with the returned PrimOp. * * @param[out] context Optional, stores error information * @param[in] fun callback @@ -121,35 +211,38 @@ PrimOp * nix_alloc_primop( void * user_data); /** @brief add a primop to the `builtins` attribute set + * @ingroup primops * * Only applies to States created after this call. * - * Moves your PrimOp content into the global evaluator - * registry, meaning your input PrimOp pointer is no longer usable. - * You are free to remove your references to it, - * after which it will be garbage collected. + * Moves your PrimOp content into the global evaluator registry, meaning + * your input PrimOp pointer becomes invalid. The PrimOp must not be used + * with nix_init_primop() before or after this call, as this would cause + * undefined behavior. + * You must call nix_gc_decref() on the original PrimOp pointer + * after this call to release your reference. * * @param[out] context Optional, stores error information - * @return primop, or null in case of errors - * + * @param[in] primOp PrimOp to register + * @return error code, NIX_OK on success */ nix_err nix_register_primop(nix_c_context * context, PrimOp * primOp); -/** @} */ // Function prototypes /** @brief Allocate a Nix value + * @ingroup value_create * - * Owned by the GC. Use nix_gc_decref() when you're done with the pointer + * Call nix_value_decref() when you're done with the pointer * @param[out] context Optional, stores error information * @param[in] state nix evaluator state * @return value, or null in case of errors - * */ nix_value * nix_alloc_value(nix_c_context * context, EvalState * state); /** * @brief Increment the garbage collector reference counter for the given `nix_value`. + * @ingroup value * * The Nix language evaluator C API keeps track of alive objects by reference counting. * When you're done with a refcounted pointer, call nix_value_decref(). @@ -161,21 +254,19 @@ nix_err nix_value_incref(nix_c_context * context, nix_value * value); /** * @brief Decrement the garbage collector reference counter for the given object + * @ingroup value + * + * When the counter reaches zero, the `nix_value` object becomes invalid. + * The data referenced by `nix_value` may not be deallocated until the memory + * garbage collector has run, but deallocation is not guaranteed. * * @param[out] context Optional, stores error information * @param[in] value The object to stop referencing */ nix_err nix_value_decref(nix_c_context * context, nix_value * value); -/** @addtogroup value_manip Manipulating values - * @brief Functions to inspect and change Nix language values, represented by nix_value. - * @{ - */ -/** @anchor getters - * @name Getters - */ -/**@{*/ /** @brief Get value type + * @ingroup value_extract * @param[out] context Optional, stores error information * @param[in] value Nix value to inspect * @return type of nix value @@ -183,14 +274,15 @@ nix_err nix_value_decref(nix_c_context * context, nix_value * value); ValueType nix_get_type(nix_c_context * context, const nix_value * value); /** @brief Get type name of value as defined in the evaluator + * @ingroup value_extract * @param[out] context Optional, stores error information * @param[in] value Nix value to inspect - * @return type name, owned string - * @todo way to free the result + * @return type name string, free with free() */ const char * nix_get_typename(nix_c_context * context, const nix_value * value); /** @brief Get boolean value + * @ingroup value_extract * @param[out] context Optional, stores error information * @param[in] value Nix value to inspect * @return true or false, error info via context @@ -198,6 +290,7 @@ const char * nix_get_typename(nix_c_context * context, const nix_value * value); bool nix_get_bool(nix_c_context * context, const nix_value * value); /** @brief Get the raw string + * @ingroup value_extract * * This may contain placeholders. * @@ -205,21 +298,21 @@ bool nix_get_bool(nix_c_context * context, const nix_value * value); * @param[in] value Nix value to inspect * @param[in] callback Called with the string value. * @param[in] user_data optional, arbitrary data, passed to the callback when it's called. - * @return string * @return error code, NIX_OK on success. */ nix_err nix_get_string(nix_c_context * context, const nix_value * value, nix_get_string_callback callback, void * user_data); /** @brief Get path as string + * @ingroup value_extract * @param[out] context Optional, stores error information * @param[in] value Nix value to inspect - * @return string, if the type is NIX_TYPE_PATH - * @return NULL in case of error. + * @return string valid while value is valid, NULL in case of error */ const char * nix_get_path_string(nix_c_context * context, const nix_value * value); /** @brief Get the length of a list + * @ingroup value_extract * @param[out] context Optional, stores error information * @param[in] value Nix value to inspect * @return length of list, error info via context @@ -227,6 +320,7 @@ const char * nix_get_path_string(nix_c_context * context, const nix_value * valu unsigned int nix_get_list_size(nix_c_context * context, const nix_value * value); /** @brief Get the element count of an attrset + * @ingroup value_extract * @param[out] context Optional, stores error information * @param[in] value Nix value to inspect * @return attrset element count, error info via context @@ -234,6 +328,7 @@ unsigned int nix_get_list_size(nix_c_context * context, const nix_value * value) unsigned int nix_get_attrs_size(nix_c_context * context, const nix_value * value); /** @brief Get float value in 64 bits + * @ingroup value_extract * @param[out] context Optional, stores error information * @param[in] value Nix value to inspect * @return float contents, error info via context @@ -241,6 +336,7 @@ unsigned int nix_get_attrs_size(nix_c_context * context, const nix_value * value double nix_get_float(nix_c_context * context, const nix_value * value); /** @brief Get int value + * @ingroup value_extract * @param[out] context Optional, stores error information * @param[in] value Nix value to inspect * @return int contents, error info via context @@ -248,15 +344,18 @@ double nix_get_float(nix_c_context * context, const nix_value * value); int64_t nix_get_int(nix_c_context * context, const nix_value * value); /** @brief Get external reference + * @ingroup value_extract * @param[out] context Optional, stores error information * @param[in] value Nix value to inspect - * @return reference to external, NULL in case of error + * @return reference valid while value is valid. Call nix_gc_incref() if you need it to live longer, then only in that + * case call nix_gc_decref() when done. NULL in case of error */ ExternalValue * nix_get_external(nix_c_context * context, nix_value * value); /** @brief Get the ix'th element of a list + * @ingroup value_extract * - * Owned by the GC. Use nix_gc_decref when you're done with the pointer + * Call nix_value_decref() when you're done with the pointer * @param[out] context Optional, stores error information * @param[in] value Nix value to inspect * @param[in] state nix evaluator state @@ -266,11 +365,12 @@ ExternalValue * nix_get_external(nix_c_context * context, nix_value * value); nix_value * nix_get_list_byidx(nix_c_context * context, const nix_value * value, EvalState * state, unsigned int ix); /** @brief Get the ix'th element of a list without forcing evaluation of the element + * @ingroup value_extract * * Returns the list element without forcing its evaluation, allowing access to lazy values. * The list value itself must already be evaluated. * - * Owned by the GC. Use nix_gc_decref when you're done with the pointer + * Call nix_value_decref() when you're done with the pointer * @param[out] context Optional, stores error information * @param[in] value Nix value to inspect (must be an evaluated list) * @param[in] state nix evaluator state @@ -281,8 +381,9 @@ nix_value * nix_get_list_byidx_lazy(nix_c_context * context, const nix_value * value, EvalState * state, unsigned int ix); /** @brief Get an attr by name + * @ingroup value_extract * - * Use nix_gc_decref when you're done with the pointer + * Call nix_value_decref() when you're done with the pointer * @param[out] context Optional, stores error information * @param[in] value Nix value to inspect * @param[in] state nix evaluator state @@ -292,11 +393,12 @@ nix_get_list_byidx_lazy(nix_c_context * context, const nix_value * value, EvalSt nix_value * nix_get_attr_byname(nix_c_context * context, const nix_value * value, EvalState * state, const char * name); /** @brief Get an attribute value by attribute name, without forcing evaluation of the attribute's value + * @ingroup value_extract * * Returns the attribute value without forcing its evaluation, allowing access to lazy values. * The attribute set value itself must already be evaluated. * - * Use nix_gc_decref when you're done with the pointer + * Call nix_value_decref() when you're done with the pointer * @param[out] context Optional, stores error information * @param[in] value Nix value to inspect (must be an evaluated attribute set) * @param[in] state nix evaluator state @@ -307,6 +409,7 @@ nix_value * nix_get_attr_byname_lazy(nix_c_context * context, const nix_value * value, EvalState * state, const char * name); /** @brief Check if an attribute name exists on a value + * @ingroup value_extract * @param[out] context Optional, stores error information * @param[in] value Nix value to inspect * @param[in] state nix evaluator state @@ -316,6 +419,7 @@ nix_get_attr_byname_lazy(nix_c_context * context, const nix_value * value, EvalS bool nix_has_attr_byname(nix_c_context * context, const nix_value * value, EvalState * state, const char * name); /** @brief Get an attribute by index + * @ingroup value_extract * * Also gives you the name. * @@ -329,18 +433,19 @@ bool nix_has_attr_byname(nix_c_context * context, const nix_value * value, EvalS * lexicographic order by Unicode scalar value for valid UTF-8). We recommend * applying this same ordering for consistency. * - * Use nix_gc_decref when you're done with the pointer + * Call nix_value_decref() when you're done with the pointer * @param[out] context Optional, stores error information * @param[in] value Nix value to inspect * @param[in] state nix evaluator state * @param[in] i attribute index - * @param[out] name will store a pointer to the attribute name + * @param[out] name will store a pointer to the attribute name, valid until state is freed * @return value, NULL in case of errors */ nix_value * nix_get_attr_byidx(nix_c_context * context, nix_value * value, EvalState * state, unsigned int i, const char ** name); /** @brief Get an attribute by index, without forcing evaluation of the attribute's value + * @ingroup value_extract * * Also gives you the name. * @@ -357,18 +462,19 @@ nix_get_attr_byidx(nix_c_context * context, nix_value * value, EvalState * state * lexicographic order by Unicode scalar value for valid UTF-8). We recommend * applying this same ordering for consistency. * - * Use nix_gc_decref when you're done with the pointer + * Call nix_value_decref() when you're done with the pointer * @param[out] context Optional, stores error information * @param[in] value Nix value to inspect (must be an evaluated attribute set) * @param[in] state nix evaluator state * @param[in] i attribute index - * @param[out] name will store a pointer to the attribute name + * @param[out] name will store a pointer to the attribute name, valid until state is freed * @return value, NULL in case of errors */ nix_value * nix_get_attr_byidx_lazy( nix_c_context * context, nix_value * value, EvalState * state, unsigned int i, const char ** name); /** @brief Get an attribute name by index + * @ingroup value_extract * * Returns the attribute name without forcing evaluation of the attribute's value. * @@ -382,16 +488,14 @@ nix_value * nix_get_attr_byidx_lazy( * lexicographic order by Unicode scalar value for valid UTF-8). We recommend * applying this same ordering for consistency. * - * Owned by the nix EvalState * @param[out] context Optional, stores error information * @param[in] value Nix value to inspect * @param[in] state nix evaluator state * @param[in] i attribute index - * @return name, NULL in case of errors + * @return name string valid until state is freed, NULL in case of errors */ const char * nix_get_attr_name_byidx(nix_c_context * context, nix_value * value, EvalState * state, unsigned int i); -/**@}*/ /** @name Initializers * * Values are typically "returned" by initializing already allocated memory that serves as the return value. @@ -401,6 +505,7 @@ const char * nix_get_attr_name_byidx(nix_c_context * context, nix_value * value, */ /**@{*/ /** @brief Set boolean value + * @ingroup value_create * @param[out] context Optional, stores error information * @param[out] value Nix value to modify * @param[in] b the boolean value @@ -409,6 +514,7 @@ const char * nix_get_attr_name_byidx(nix_c_context * context, nix_value * value, nix_err nix_init_bool(nix_c_context * context, nix_value * value, bool b); /** @brief Set a string + * @ingroup value_create * @param[out] context Optional, stores error information * @param[out] value Nix value to modify * @param[in] str the string, copied @@ -417,6 +523,7 @@ nix_err nix_init_bool(nix_c_context * context, nix_value * value, bool b); nix_err nix_init_string(nix_c_context * context, nix_value * value, const char * str); /** @brief Set a path + * @ingroup value_create * @param[out] context Optional, stores error information * @param[out] value Nix value to modify * @param[in] str the path string, copied @@ -425,6 +532,7 @@ nix_err nix_init_string(nix_c_context * context, nix_value * value, const char * nix_err nix_init_path_string(nix_c_context * context, EvalState * s, nix_value * value, const char * str); /** @brief Set a float + * @ingroup value_create * @param[out] context Optional, stores error information * @param[out] value Nix value to modify * @param[in] d the float, 64-bits @@ -433,6 +541,7 @@ nix_err nix_init_path_string(nix_c_context * context, EvalState * s, nix_value * nix_err nix_init_float(nix_c_context * context, nix_value * value, double d); /** @brief Set an int + * @ingroup value_create * @param[out] context Optional, stores error information * @param[out] value Nix value to modify * @param[in] i the int @@ -441,6 +550,7 @@ nix_err nix_init_float(nix_c_context * context, nix_value * value, double d); nix_err nix_init_int(nix_c_context * context, nix_value * value, int64_t i); /** @brief Set null + * @ingroup value_create * @param[out] context Optional, stores error information * @param[out] value Nix value to modify * @return error code, NIX_OK on success. @@ -448,6 +558,7 @@ nix_err nix_init_int(nix_c_context * context, nix_value * value, int64_t i); nix_err nix_init_null(nix_c_context * context, nix_value * value); /** @brief Set the value to a thunk that will perform a function application when needed. + * @ingroup value_create * * Thunks may be put into attribute sets and lists to perform some computation lazily; on demand. * However, note that in some places, a thunk must not be returned, such as in the return value of a PrimOp. @@ -464,6 +575,7 @@ nix_err nix_init_null(nix_c_context * context, nix_value * value); nix_err nix_init_apply(nix_c_context * context, nix_value * value, nix_value * fn, nix_value * arg); /** @brief Set an external value + * @ingroup value_create * @param[out] context Optional, stores error information * @param[out] value Nix value to modify * @param[in] val the external value to set. Will be GC-referenced by the value. @@ -472,18 +584,25 @@ nix_err nix_init_apply(nix_c_context * context, nix_value * value, nix_value * f nix_err nix_init_external(nix_c_context * context, nix_value * value, ExternalValue * val); /** @brief Create a list from a list builder + * @ingroup value_create + * + * After this call, the list builder becomes invalid and cannot be used again. + * The only necessary next step is to free it with nix_list_builder_free(). + * * @param[out] context Optional, stores error information - * @param[in] list_builder list builder to use. Make sure to unref this afterwards. + * @param[in] list_builder list builder to use * @param[out] value Nix value to modify * @return error code, NIX_OK on success. + * @see nix_list_builder_free */ nix_err nix_make_list(nix_c_context * context, ListBuilder * list_builder, nix_value * value); /** @brief Create a list builder + * @ingroup value_create * @param[out] context Optional, stores error information * @param[in] state nix evaluator state * @param[in] capacity how many bindings you'll add. Don't exceed. - * @return owned reference to a list builder. Make sure to unref when you're done. + * @return list builder. Call nix_list_builder_free() when you're done. */ ListBuilder * nix_make_list_builder(nix_c_context * context, EvalState * state, size_t capacity); @@ -505,14 +624,21 @@ nix_list_builder_insert(nix_c_context * context, ListBuilder * list_builder, uns void nix_list_builder_free(ListBuilder * list_builder); /** @brief Create an attribute set from a bindings builder + * @ingroup value_create + * + * After this call, the bindings builder becomes invalid and cannot be used again. + * The only necessary next step is to free it with nix_bindings_builder_free(). + * * @param[out] context Optional, stores error information * @param[out] value Nix value to modify - * @param[in] b bindings builder to use. Make sure to unref this afterwards. + * @param[in] b bindings builder to use * @return error code, NIX_OK on success. + * @see nix_bindings_builder_free */ nix_err nix_make_attrs(nix_c_context * context, nix_value * value, BindingsBuilder * b); /** @brief Set primop + * @ingroup value_create * @param[out] context Optional, stores error information * @param[out] value Nix value to modify * @param[in] op primop, will be gc-referenced by the value @@ -521,6 +647,7 @@ nix_err nix_make_attrs(nix_c_context * context, nix_value * value, BindingsBuild */ nix_err nix_init_primop(nix_c_context * context, nix_value * value, PrimOp * op); /** @brief Copy from another value + * @ingroup value_create * @param[out] context Optional, stores error information * @param[out] value Nix value to modify * @param[in] source value to copy from @@ -530,12 +657,11 @@ nix_err nix_copy_value(nix_c_context * context, nix_value * value, const nix_val /**@}*/ /** @brief Create a bindings builder -* @param[out] context Optional, stores error information -* @param[in] state nix evaluator state -* @param[in] capacity how many bindings you'll add. Don't exceed. -* @return owned reference to a bindings builder. Make sure to unref when you're -done. -*/ + * @param[out] context Optional, stores error information + * @param[in] state nix evaluator state + * @param[in] capacity how many bindings you'll add. Don't exceed. + * @return bindings builder. Call nix_bindings_builder_free() when you're done. + */ BindingsBuilder * nix_make_bindings_builder(nix_c_context * context, EvalState * state, size_t capacity); /** @brief Insert bindings into a builder @@ -554,7 +680,6 @@ nix_bindings_builder_insert(nix_c_context * context, BindingsBuilder * builder, * @param[in] builder the builder to free */ void nix_bindings_builder_free(BindingsBuilder * builder); -/**@}*/ /** @brief Realise a string context. * @@ -571,13 +696,13 @@ void nix_bindings_builder_free(BindingsBuilder * builder); * @param[in] isIFD If true, disallow derivation outputs if setting `allow-import-from-derivation` is false. You should set this to true when this call is part of a primop. You should set this to false when building for your application's purpose. - * @return NULL if failed, are a new nix_realised_string, which must be freed with nix_realised_string_free + * @return NULL if failed, or a new nix_realised_string, which must be freed with nix_realised_string_free */ nix_realised_string * nix_string_realise(nix_c_context * context, EvalState * state, nix_value * value, bool isIFD); /** @brief Start of the string * @param[in] realised_string - * @return pointer to the start of the string. It may not be null-terminated. + * @return pointer to the start of the string, valid until realised_string is freed. It may not be null-terminated. */ const char * nix_realised_string_get_buffer_start(nix_realised_string * realised_string); @@ -596,7 +721,7 @@ size_t nix_realised_string_get_store_path_count(nix_realised_string * realised_s /** @brief Get a store path. The store paths are stored in an arbitrary order. * @param[in] realised_string * @param[in] index index of the store path, must be less than the count - * @return store path + * @return store path valid until realised_string is freed */ const StorePath * nix_realised_string_get_store_path(nix_realised_string * realised_string, size_t index); @@ -610,5 +735,4 @@ void nix_realised_string_free(nix_realised_string * realised_string); } #endif -/** @} */ #endif // NIX_API_VALUE_H diff --git a/src/libexpr-test-support/include/nix/expr/tests/libexpr.hh b/src/libexpr-test-support/include/nix/expr/tests/libexpr.hh index 4cf985e15..a1320e14a 100644 --- a/src/libexpr-test-support/include/nix/expr/tests/libexpr.hh +++ b/src/libexpr-test-support/include/nix/expr/tests/libexpr.hh @@ -26,11 +26,20 @@ public: } protected: - LibExprTest() + LibExprTest(ref store, auto && makeEvalSettings) : LibStoreTest() + , evalSettings(makeEvalSettings(readOnlyMode)) , state({}, store, fetchSettings, evalSettings, nullptr) { - evalSettings.nixPath = {}; + } + + LibExprTest() + : LibExprTest(openStore("dummy://"), [](bool & readOnlyMode) { + EvalSettings settings{readOnlyMode}; + settings.nixPath = {}; + return settings; + }) + { } Value eval(std::string input, bool forceValue = true) diff --git a/src/libexpr-test-support/meson.build b/src/libexpr-test-support/meson.build index 01a3f3bcb..df28661b7 100644 --- a/src/libexpr-test-support/meson.build +++ b/src/libexpr-test-support/meson.build @@ -31,7 +31,6 @@ rapidcheck = dependency('rapidcheck') deps_public += rapidcheck subdir('nix-meson-build-support/common') -subdir('nix-meson-build-support/asan-options') sources = files( 'tests/value/context.cc', diff --git a/src/libexpr-tests/eval.cc b/src/libexpr-tests/eval.cc index ad70ea5b8..7562a9da2 100644 --- a/src/libexpr-tests/eval.cc +++ b/src/libexpr-tests/eval.cc @@ -3,6 +3,7 @@ #include "nix/expr/eval.hh" #include "nix/expr/tests/libexpr.hh" +#include "nix/util/memory-source-accessor.hh" namespace nix { @@ -174,4 +175,41 @@ TEST_F(EvalStateTest, getBuiltin_fail) ASSERT_THROW(state.getBuiltin("nonexistent"), EvalError); } +class PureEvalTest : public LibExprTest +{ +public: + PureEvalTest() + : LibExprTest(openStore("dummy://", {{"read-only", "false"}}), [](bool & readOnlyMode) { + EvalSettings settings{readOnlyMode}; + settings.pureEval = true; + settings.restrictEval = true; + return settings; + }) + { + } +}; + +TEST_F(PureEvalTest, pathExists) +{ + ASSERT_THAT(eval("builtins.pathExists /."), IsFalse()); + ASSERT_THAT(eval("builtins.pathExists /nix"), IsFalse()); + ASSERT_THAT(eval("builtins.pathExists /nix/store"), IsFalse()); + + { + std::string contents = "Lorem ipsum"; + + StringSource s{contents}; + auto path = state.store->addToStoreFromDump( + s, "source", FileSerialisationMethod::Flat, ContentAddressMethod::Raw::Text, HashAlgorithm::SHA256); + auto printed = store->printStorePath(path); + + ASSERT_THROW(eval(fmt("builtins.readFile %s", printed)), RestrictedPathError); + ASSERT_THAT(eval(fmt("builtins.pathExists %s", printed)), IsFalse()); + + ASSERT_THROW(eval("builtins.readDir /."), RestrictedPathError); + state.allowPath(path); // FIXME: This shouldn't behave this way. + ASSERT_THAT(eval("builtins.readDir /."), IsAttrsOfSize(0)); + } +} + } // namespace nix diff --git a/src/libexpr-tests/meson.build b/src/libexpr-tests/meson.build index 7f7c08955..c5dafe0de 100644 --- a/src/libexpr-tests/meson.build +++ b/src/libexpr-tests/meson.build @@ -45,7 +45,6 @@ config_priv_h = configure_file( ) subdir('nix-meson-build-support/common') -subdir('nix-meson-build-support/asan-options') sources = files( 'derived-path.cc', @@ -83,7 +82,7 @@ this_exe = executable( test( meson.project_name(), this_exe, - env : asan_test_options_env + { + env : { '_NIX_TEST_UNIT_DATA' : meson.current_source_dir() / 'data', }, protocol : 'gtest', diff --git a/src/libexpr-tests/package.nix b/src/libexpr-tests/package.nix index c36aa2dc7..51d52e935 100644 --- a/src/libexpr-tests/package.nix +++ b/src/libexpr-tests/package.nix @@ -62,7 +62,6 @@ mkMesonExecutable (finalAttrs: { mkdir -p "$HOME" '' + '' - export ASAN_OPTIONS=abort_on_error=1:print_summary=1:detect_leaks=0 export _NIX_TEST_UNIT_DATA=${resolvePath ./data} ${stdenv.hostPlatform.emulator buildPackages} ${lib.getExe finalAttrs.finalPackage} touch $out diff --git a/src/libexpr-tests/primops.cc b/src/libexpr-tests/primops.cc index 74d676844..f00b4f475 100644 --- a/src/libexpr-tests/primops.cc +++ b/src/libexpr-tests/primops.cc @@ -771,7 +771,7 @@ TEST_F(PrimOpTest, derivation) ASSERT_EQ(v.type(), nFunction); ASSERT_TRUE(v.isLambda()); ASSERT_NE(v.lambda().fun, nullptr); - ASSERT_TRUE(v.lambda().fun->hasFormals()); + ASSERT_TRUE(v.lambda().fun->getFormals()); } TEST_F(PrimOpTest, currentTime) diff --git a/src/libexpr-tests/trivial.cc b/src/libexpr-tests/trivial.cc index a287ce4d1..d112c269a 100644 --- a/src/libexpr-tests/trivial.cc +++ b/src/libexpr-tests/trivial.cc @@ -1,4 +1,5 @@ #include "nix/expr/tests/libexpr.hh" +#include "nix/util/tests/gmock-matchers.hh" namespace nix { // Testing of trivial expressions @@ -160,7 +161,8 @@ TEST_F(TrivialExpressionTest, assertPassed) ASSERT_THAT(v, IsIntEq(123)); } -class AttrSetMergeTrvialExpressionTest : public TrivialExpressionTest, public testing::WithParamInterface +class AttrSetMergeTrvialExpressionTest : public TrivialExpressionTest, + public ::testing::WithParamInterface {}; TEST_P(AttrSetMergeTrvialExpressionTest, attrsetMergeLazy) @@ -196,7 +198,7 @@ TEST_P(AttrSetMergeTrvialExpressionTest, attrsetMergeLazy) INSTANTIATE_TEST_SUITE_P( attrsetMergeLazy, AttrSetMergeTrvialExpressionTest, - testing::Values("{ a.b = 1; a.c = 2; }", "{ a = { b = 1; }; a = { c = 2; }; }")); + ::testing::Values("{ a.b = 1; a.c = 2; }", "{ a = { b = 1; }; a = { c = 2; }; }")); // The following macros ultimately define 48 tests (16 variations on three // templates). Each template tests an expression that can be written in 2^4 @@ -339,4 +341,18 @@ TEST_F(TrivialExpressionTest, orCantBeUsed) { ASSERT_THROW(eval("let or = 1; in or"), Error); } + +TEST_F(TrivialExpressionTest, tooManyFormals) +{ + std::string expr = "let f = { "; + for (uint32_t i = 0; i <= std::numeric_limits::max(); ++i) { + expr += fmt("arg%d, ", i); + } + expr += " }: 0 in; f {}"; + ASSERT_THAT( + [&]() { eval(expr); }, + ::testing::ThrowsMessage(::nix::testing::HasSubstrIgnoreANSIMatcher( + "too many formal arguments, implementation supports at most 65535"))); +} + } /* namespace nix */ diff --git a/src/libexpr-tests/value/print.cc b/src/libexpr-tests/value/print.cc index 1959fddf2..0006da2ff 100644 --- a/src/libexpr-tests/value/print.cc +++ b/src/libexpr-tests/value/print.cc @@ -10,7 +10,7 @@ using namespace testing; struct ValuePrintingTests : LibExprTest { template - void test(Value v, std::string_view expected, A... args) + void test(Value & v, std::string_view expected, A... args) { std::stringstream out; v.print(state, out, args...); @@ -110,9 +110,8 @@ TEST_F(ValuePrintingTests, vLambda) PosTable::Origin origin = state.positions.addOrigin(std::monostate(), 1); auto posIdx = state.positions.add(origin, 0); auto body = ExprInt(0); - auto formals = Formals{}; - ExprLambda eLambda(posIdx, createSymbol("a"), &formals, &body); + ExprLambda eLambda(posIdx, createSymbol("a"), &body); Value vLambda; vLambda.mkLambda(&env, &eLambda); @@ -500,9 +499,8 @@ TEST_F(ValuePrintingTests, ansiColorsLambda) PosTable::Origin origin = state.positions.addOrigin(std::monostate(), 1); auto posIdx = state.positions.add(origin, 0); auto body = ExprInt(0); - auto formals = Formals{}; - ExprLambda eLambda(posIdx, createSymbol("a"), &formals, &body); + ExprLambda eLambda(posIdx, createSymbol("a"), &body); Value vLambda; vLambda.mkLambda(&env, &eLambda); @@ -625,10 +623,11 @@ TEST_F(ValuePrintingTests, ansiColorsAttrsElided) vThree.mkInt(3); builder.insert(state.symbols.create("three"), &vThree); - vAttrs.mkAttrs(builder.finish()); + Value vAttrs2; + vAttrs2.mkAttrs(builder.finish()); test( - vAttrs, + vAttrs2, "{ one = " ANSI_CYAN "1" ANSI_NORMAL "; " ANSI_FAINT "«2 attributes elided»" ANSI_NORMAL " }", PrintOptions{.ansiColors = true, .maxAttrs = 1}); } diff --git a/src/libexpr/eval-profiler.cc b/src/libexpr/eval-profiler.cc index ba92faf18..e9dc1e021 100644 --- a/src/libexpr/eval-profiler.cc +++ b/src/libexpr/eval-profiler.cc @@ -324,7 +324,7 @@ void SampleStack::saveProfile() std::visit([&](auto && info) { info.symbolize(state, os, posCache); }, pos); } os << " " << count; - writeLine(profileFd.get(), std::move(os).str()); + writeLine(profileFd.get(), os.str()); /* Clear ostringstream. */ os.str(""); os.clear(); diff --git a/src/libexpr/eval.cc b/src/libexpr/eval.cc index 2df373520..873b88986 100644 --- a/src/libexpr/eval.cc +++ b/src/libexpr/eval.cc @@ -236,24 +236,17 @@ EvalState::EvalState( {CanonPath(store->storeDir), store->getFSAccessor(settings.pureEval)}, })) , rootFS([&] { - auto accessor = [&]() -> decltype(rootFS) { - /* In pure eval mode, we provide a filesystem that only - contains the Nix store. */ - if (settings.pureEval) - return storeFS; + /* In pure eval mode, we provide a filesystem that only + contains the Nix store. - /* If we have a chroot store and pure eval is not enabled, - use a union accessor to make the chroot store available - at its logical location while still having the underlying - directory available. This is necessary for instance if - we're evaluating a file from the physical /nix/store - while using a chroot store. */ - auto realStoreDir = dirOf(store->toRealPath(StorePath::dummy)); - if (store->storeDir != realStoreDir) - return makeUnionSourceAccessor({getFSSourceAccessor(), storeFS}); - - return getFSSourceAccessor(); - }(); + Otherwise, use a union accessor to make the augmented store + available at its logical location while still having the + underlying directory available. This is necessary for + instance if we're evaluating a file from the physical + /nix/store while using a chroot store, and also for lazy + mounted fetchTree. */ + auto accessor = settings.pureEval ? storeFS.cast() + : makeUnionSourceAccessor({getFSSourceAccessor(), storeFS}); /* Apply access control if needed. */ if (settings.restrictEval || settings.pureEval) @@ -268,7 +261,7 @@ EvalState::EvalState( }()) , corepkgsFS(make_ref()) , internalFS(make_ref()) - , derivationInternal{corepkgsFS->addFile( + , derivationInternal{internalFS->addFile( CanonPath("derivation-internal.nix"), #include "primops/derivation.nix.gen.hh" )} @@ -591,7 +584,7 @@ std::optional EvalState::getDoc(Value & v) .name = name, .arity = 0, // FIXME: figure out how deep by syntax only? It's not semantically useful though... .args = {}, - .doc = makeImmutableString(toView(s)), // NOTE: memory leak when compiled without GC + .doc = makeImmutableString(s.view()), // NOTE: memory leak when compiled without GC }; } if (isFunctor(v)) { @@ -1341,7 +1334,7 @@ void ExprVar::eval(EvalState & state, Env & env, Value & v) v = *v2; } -static std::string showAttrPath(EvalState & state, Env & env, const AttrPath & attrPath) +static std::string showAttrPath(EvalState & state, Env & env, std::span attrPath) { std::ostringstream out; bool first = true; @@ -1377,10 +1370,10 @@ void ExprSelect::eval(EvalState & state, Env & env, Value & v) env, getPos(), "while evaluating the attribute '%1%'", - showAttrPath(state, env, attrPath)) + showAttrPath(state, env, getAttrPath())) : nullptr; - for (auto & i : attrPath) { + for (auto & i : getAttrPath()) { state.nrLookups++; const Attr * j; auto name = getName(i, state, env); @@ -1418,7 +1411,7 @@ void ExprSelect::eval(EvalState & state, Env & env, Value & v) auto origin = std::get_if(&pos2r.origin); if (!(origin && *origin == state.derivationInternal)) state.addErrorTrace( - e, pos2, "while evaluating the attribute '%1%'", showAttrPath(state, env, attrPath)); + e, pos2, "while evaluating the attribute '%1%'", showAttrPath(state, env, getAttrPath())); } throw; } @@ -1429,13 +1422,13 @@ void ExprSelect::eval(EvalState & state, Env & env, Value & v) Symbol ExprSelect::evalExceptFinalSelect(EvalState & state, Env & env, Value & attrs) { Value vTmp; - Symbol name = getName(attrPath[attrPath.size() - 1], state, env); + Symbol name = getName(attrPathStart[nAttrPath - 1], state, env); - if (attrPath.size() == 1) { + if (nAttrPath == 1) { e->eval(state, env, vTmp); } else { ExprSelect init(*this); - init.attrPath.pop_back(); + init.nAttrPath--; init.eval(state, env, vTmp); } attrs = vTmp; @@ -1503,15 +1496,13 @@ void EvalState::callFunction(Value & fun, std::span args, Value & vRes, ExprLambda & lambda(*vCur.lambda().fun); - auto size = (!lambda.arg ? 0 : 1) + (lambda.hasFormals() ? lambda.formals->formals.size() : 0); + auto size = (!lambda.arg ? 0 : 1) + (lambda.getFormals() ? lambda.getFormals()->formals.size() : 0); Env & env2(mem.allocEnv(size)); env2.up = vCur.lambda().env; Displacement displ = 0; - if (!lambda.hasFormals()) - env2.values[displ++] = args[0]; - else { + if (auto formals = lambda.getFormals()) { try { forceAttrs(*args[0], lambda.pos, "while evaluating the value passed for the lambda argument"); } catch (Error & e) { @@ -1527,7 +1518,7 @@ void EvalState::callFunction(Value & fun, std::span args, Value & vRes, there is no matching actual argument but the formal argument has a default, use the default. */ size_t attrsUsed = 0; - for (auto & i : lambda.formals->formals) { + for (auto & i : formals->formals) { auto j = args[0]->attrs()->get(i.name); if (!j) { if (!i.def) { @@ -1549,13 +1540,13 @@ void EvalState::callFunction(Value & fun, std::span args, Value & vRes, /* Check that each actual argument is listed as a formal argument (unless the attribute match specifies a `...'). */ - if (!lambda.formals->ellipsis && attrsUsed != args[0]->attrs()->size()) { + if (!formals->ellipsis && attrsUsed != args[0]->attrs()->size()) { /* Nope, so show the first unexpected argument to the user. */ for (auto & i : *args[0]->attrs()) - if (!lambda.formals->has(i.name)) { + if (!formals->has(i.name)) { StringSet formalNames; - for (auto & formal : lambda.formals->formals) + for (auto & formal : formals->formals) formalNames.insert(std::string(symbols[formal.name])); auto suggestions = Suggestions::bestMatches(formalNames, symbols[i.name]); error( @@ -1570,6 +1561,8 @@ void EvalState::callFunction(Value & fun, std::span args, Value & vRes, } unreachable(); } + } else { + env2.values[displ++] = args[0]; } nrFunctionCalls++; @@ -1754,14 +1747,15 @@ void EvalState::autoCallFunction(const Bindings & args, Value & fun, Value & res } } - if (!fun.isLambda() || !fun.lambda().fun->hasFormals()) { + if (!fun.isLambda() || !fun.lambda().fun->getFormals()) { res = fun; return; } + auto formals = fun.lambda().fun->getFormals(); - auto attrs = buildBindings(std::max(static_cast(fun.lambda().fun->formals->formals.size()), args.size())); + auto attrs = buildBindings(std::max(static_cast(formals->formals.size()), args.size())); - if (fun.lambda().fun->formals->ellipsis) { + if (formals->ellipsis) { // If the formals have an ellipsis (eg the function accepts extra args) pass // all available automatic arguments (which includes arguments specified on // the command line via --arg/--argstr) @@ -1769,7 +1763,7 @@ void EvalState::autoCallFunction(const Bindings & args, Value & fun, Value & res attrs.insert(v); } else { // Otherwise, only pass the arguments that the function accepts - for (auto & i : fun.lambda().fun->formals->formals) { + for (auto & i : formals->formals) { auto j = args.get(i.name); if (j) { attrs.insert(*j); @@ -1811,7 +1805,7 @@ void ExprAssert::eval(EvalState & state, Env & env, Value & v) if (!state.evalBool(env, cond, pos, "in the condition of the assert statement")) { std::ostringstream out; cond->show(state.symbols, out); - auto exprStr = toView(out); + auto exprStr = out.view(); if (auto eq = dynamic_cast(cond)) { try { @@ -2027,7 +2021,7 @@ void EvalState::concatLists( void ExprConcatStrings::eval(EvalState & state, Env & env, Value & v) { NixStringContext context; - std::vector s; + std::vector strings; size_t sSize = 0; NixInt n{0}; NixFloat nf = 0; @@ -2035,32 +2029,11 @@ void ExprConcatStrings::eval(EvalState & state, Env & env, Value & v) bool first = !forceString; ValueType firstType = nString; - const auto str = [&] { - std::string result; - result.reserve(sSize); - for (const auto & part : s) - result += *part; - return result; - }; - /* c_str() is not str().c_str() because we want to create a string - Value. allocating a GC'd string directly and moving it into a - Value lets us avoid an allocation and copy. */ - const auto c_str = [&] { - char * result = allocString(sSize + 1); - char * tmp = result; - for (const auto & part : s) { - memcpy(tmp, part->data(), part->size()); - tmp += part->size(); - } - *tmp = 0; - return result; - }; - // List of returned strings. References to these Values must NOT be persisted. - SmallTemporaryValueVector values(es->size()); + SmallTemporaryValueVector values(es.size()); Value * vTmpP = values.data(); - for (auto & [i_pos, i] : *es) { + for (auto & [i_pos, i] : es) { Value & vTmp = *vTmpP++; i->eval(state, env, vTmp); @@ -2103,33 +2076,46 @@ void ExprConcatStrings::eval(EvalState & state, Env & env, Value & v) .withFrame(env, *this) .debugThrow(); } else { - if (s.empty()) - s.reserve(es->size()); + if (strings.empty()) + strings.reserve(es.size()); /* skip canonization of first path, which would only be not canonized in the first place if it's coming from a ./${foo} type path */ auto part = state.coerceToString( i_pos, vTmp, context, "while evaluating a path segment", false, firstType == nString, !first); sSize += part->size(); - s.emplace_back(std::move(part)); + strings.emplace_back(std::move(part)); } first = false; } - if (firstType == nInt) + if (firstType == nInt) { v.mkInt(n); - else if (firstType == nFloat) + } else if (firstType == nFloat) { v.mkFloat(nf); - else if (firstType == nPath) { + } else if (firstType == nPath) { if (!context.empty()) state.error("a string that refers to a store path cannot be appended to a path") .atPos(pos) .withFrame(env, *this) .debugThrow(); - v.mkPath(state.rootPath(CanonPath(str()))); - } else - v.mkStringMove(c_str(), context); + std::string result_str; + result_str.reserve(sSize); + for (const auto & part : strings) { + result_str += *part; + } + v.mkPath(state.rootPath(CanonPath(result_str))); + } else { + char * result_str = allocString(sSize + 1); + char * tmp = result_str; + for (const auto & part : strings) { + memcpy(tmp, part->data(), part->size()); + tmp += part->size(); + } + *tmp = 0; + v.mkStringMove(result_str, context); + } } void ExprPos::eval(EvalState & state, Env & env, Value & v) @@ -2167,30 +2153,28 @@ void EvalState::forceValueDeep(Value & v) { std::set seen; - std::function recurse; - - recurse = [&](Value & v) { + [&, &state(*this)](this const auto & recurse, Value & v) { if (!seen.insert(&v).second) return; - forceValue(v, v.determinePos(noPos)); + state.forceValue(v, v.determinePos(noPos)); if (v.type() == nAttrs) { for (auto & i : *v.attrs()) try { // If the value is a thunk, we're evaling. Otherwise no trace necessary. - auto dts = debugRepl && i.value->isThunk() ? makeDebugTraceStacker( - *this, - *i.value->thunk().expr, - *i.value->thunk().env, - i.pos, - "while evaluating the attribute '%1%'", - symbols[i.name]) - : nullptr; + auto dts = state.debugRepl && i.value->isThunk() ? makeDebugTraceStacker( + state, + *i.value->thunk().expr, + *i.value->thunk().env, + i.pos, + "while evaluating the attribute '%1%'", + state.symbols[i.name]) + : nullptr; recurse(*i.value); } catch (Error & e) { - addErrorTrace(e, i.pos, "while evaluating the attribute '%1%'", symbols[i.name]); + state.addErrorTrace(e, i.pos, "while evaluating the attribute '%1%'", state.symbols[i.name]); throw; } } @@ -2199,9 +2183,7 @@ void EvalState::forceValueDeep(Value & v) for (auto v2 : v.listView()) recurse(*v2); } - }; - - recurse(v); + }(v); } NixInt EvalState::forceInt(Value & v, const PosIdx pos, std::string_view errorCtx) @@ -3074,7 +3056,7 @@ Expr * EvalState::parseExprFromFile(const SourcePath & path) return parseExprFromFile(path, staticBaseEnv); } -Expr * EvalState::parseExprFromFile(const SourcePath & path, std::shared_ptr & staticEnv) +Expr * EvalState::parseExprFromFile(const SourcePath & path, const std::shared_ptr & staticEnv) { auto buffer = path.resolveSymlinks().readFile(); // readFile hopefully have left some extra space for terminators @@ -3082,8 +3064,8 @@ Expr * EvalState::parseExprFromFile(const SourcePath & path, std::shared_ptr & staticEnv) +Expr * EvalState::parseExprFromString( + std::string s_, const SourcePath & basePath, const std::shared_ptr & staticEnv) { // NOTE this method (and parseStdin) must take care to *fully copy* their input // into their respective Pos::Origin until the parser stops overwriting its input @@ -3133,6 +3115,11 @@ SourcePath EvalState::findFile(const LookupPath & lookupPath, const std::string_ auto res = (r / CanonPath(suffix)).resolveSymlinks(); if (res.pathExists()) return res; + + // Backward compatibility hack: throw an exception if access + // to this path is not allowed. + if (auto accessor = res.accessor.dynamic_pointer_cast()) + accessor->checkAccess(res.path); } if (hasPrefix(path, "nix/")) @@ -3199,6 +3186,11 @@ std::optional EvalState::resolveLookupPathPath(const LookupPath::Pat if (path.resolveSymlinks().pathExists()) return finish(std::move(path)); else { + // Backward compatibility hack: throw an exception if access + // to this path is not allowed. + if (auto accessor = path.accessor.dynamic_pointer_cast()) + accessor->checkAccess(path.path); + logWarning({.msg = HintFmt("Nix search path entry '%1%' does not exist, ignoring", value)}); } } @@ -3207,7 +3199,11 @@ std::optional EvalState::resolveLookupPathPath(const LookupPath::Pat } Expr * EvalState::parse( - char * text, size_t length, Pos::Origin origin, const SourcePath & basePath, std::shared_ptr & staticEnv) + char * text, + size_t length, + Pos::Origin origin, + const SourcePath & basePath, + const std::shared_ptr & staticEnv) { DocCommentMap tmpDocComments; // Only used when not origin is not a SourcePath DocCommentMap * docComments = &tmpDocComments; diff --git a/src/libexpr/include/nix/expr/attr-set.hh b/src/libexpr/include/nix/expr/attr-set.hh index 46eecd9bd..f57302c42 100644 --- a/src/libexpr/include/nix/expr/attr-set.hh +++ b/src/libexpr/include/nix/expr/attr-set.hh @@ -5,6 +5,7 @@ #include "nix/expr/symbol-table.hh" #include +#include #include #include @@ -463,12 +464,48 @@ private: return bindings->baseLayer; } + /** + * If the bindings gets "layered" on top of another we need to recalculate + * the number of unique attributes in the chain. + * + * This is done by either iterating over the base "layer" and the newly added + * attributes and counting duplicates. If the base "layer" is big this approach + * is inefficient and we fall back to doing per-element binary search in the base + * "layer". + */ void finishSizeIfNecessary() { - if (hasBaseLayer()) - /* NOTE: Do not use std::ranges::distance, since Bindings is a sized - range, but we are calculating this size here. */ - bindings->numAttrsInChain = std::distance(bindings->begin(), bindings->end()); + if (!hasBaseLayer()) + return; + + auto & base = *bindings->baseLayer; + auto attrs = std::span(bindings->attrs, bindings->numAttrs); + + Bindings::size_type duplicates = 0; + + /* If the base bindings is smaller than the newly added attributes + iterate using std::set_intersection to run in O(|base| + |attrs|) = + O(|attrs|). Otherwise use an O(|attrs| * log(|base|)) per-attr binary + search to check for duplicates. Note that if we are in this code path then + |attrs| <= bindingsUpdateLayerRhsSizeThreshold, which 16 by default. We are + optimizing for the case when a small attribute set gets "layered" on top of + a much larger one. When attrsets are already small it's fine to do a linear + scan, but we should avoid expensive iterations over large "base" attrsets. */ + if (attrs.size() > base.size()) { + std::set_intersection( + base.begin(), + base.end(), + attrs.begin(), + attrs.end(), + boost::make_function_output_iterator([&]([[maybe_unused]] auto && _) { ++duplicates; })); + } else { + for (const auto & attr : attrs) { + if (base.get(attr.name)) + ++duplicates; + } + } + + bindings->numAttrsInChain = base.numAttrsInChain + attrs.size() - duplicates; } public: diff --git a/src/libexpr/include/nix/expr/eval.hh b/src/libexpr/include/nix/expr/eval.hh index 2601d8de8..0c7f9cf09 100644 --- a/src/libexpr/include/nix/expr/eval.hh +++ b/src/libexpr/include/nix/expr/eval.hh @@ -42,6 +42,7 @@ class Store; namespace fetchers { struct Settings; struct InputCache; +struct Input; } // namespace fetchers struct EvalSettings; class EvalState; @@ -190,7 +191,7 @@ std::ostream & operator<<(std::ostream & os, const ValueType t); struct RegexCache; -std::shared_ptr makeRegexCache(); +ref makeRegexCache(); struct DebugTrace { @@ -371,6 +372,7 @@ public: const fetchers::Settings & fetchSettings; const EvalSettings & settings; + SymbolTable symbols; PosTable positions; @@ -417,7 +419,7 @@ public: RootValue vImportedDrvToDerivation = nullptr; - ref inputCache; + const ref inputCache; /** * Debugger @@ -470,18 +472,18 @@ private: /* Cache for calls to addToStore(); maps source paths to the store paths. */ - ref> srcToStore; + const ref> srcToStore; /** * A cache that maps paths to "resolved" paths for importing Nix * expressions, i.e. `/foo` to `/foo/default.nix`. */ - ref> importResolutionCache; + const ref> importResolutionCache; /** * A cache from resolved paths to values. */ - ref, @@ -503,12 +505,19 @@ private: /** * Cache used by prim_match(). */ - std::shared_ptr regexCache; + const ref regexCache; public: + /** + * @param lookupPath Only used during construction. + * @param store The store to use for instantiation + * @param fetchSettings Must outlive the lifetime of this EvalState! + * @param settings Must outlive the lifetime of this EvalState! + * @param buildStore The store to use for builds ("import from derivation", C API `nix_string_realise`) + */ EvalState( - const LookupPath & _lookupPath, + const LookupPath & lookupPath, ref store, const fetchers::Settings & fetchSettings, const EvalSettings & settings, @@ -575,16 +584,22 @@ public: void checkURI(const std::string & uri); + /** + * Mount an input on the Nix store. + */ + StorePath mountInput(fetchers::Input & input, const fetchers::Input & originalInput, ref accessor); + /** * Parse a Nix expression from the specified file. */ Expr * parseExprFromFile(const SourcePath & path); - Expr * parseExprFromFile(const SourcePath & path, std::shared_ptr & staticEnv); + Expr * parseExprFromFile(const SourcePath & path, const std::shared_ptr & staticEnv); /** * Parse a Nix expression from the specified string. */ - Expr * parseExprFromString(std::string s, const SourcePath & basePath, std::shared_ptr & staticEnv); + Expr * + parseExprFromString(std::string s, const SourcePath & basePath, const std::shared_ptr & staticEnv); Expr * parseExprFromString(std::string s, const SourcePath & basePath); Expr * parseStdin(); @@ -753,7 +768,7 @@ public: #if NIX_USE_BOEHMGC /** A GC root for the baseEnv reference. */ - std::shared_ptr baseEnvP; + const std::shared_ptr baseEnvP; #endif public: @@ -767,7 +782,7 @@ public: /** * The same, but used during parsing to resolve variables. */ - std::shared_ptr staticBaseEnv; // !!! should be private + const std::shared_ptr staticBaseEnv; // !!! should be private /** * Internal primops not exposed to the user. @@ -849,7 +864,7 @@ private: size_t length, Pos::Origin origin, const SourcePath & basePath, - std::shared_ptr & staticEnv); + const std::shared_ptr & staticEnv); /** * Current Nix call stack depth, used with `max-call-depth` setting to throw stack overflow hopefully before we run diff --git a/src/libexpr/include/nix/expr/nixexpr.hh b/src/libexpr/include/nix/expr/nixexpr.hh index 747a8e4b2..fd7ed2a67 100644 --- a/src/libexpr/include/nix/expr/nixexpr.hh +++ b/src/libexpr/include/nix/expr/nixexpr.hh @@ -2,8 +2,10 @@ ///@file #include +#include #include #include +#include #include "nix/expr/gc-small-vector.hh" #include "nix/expr/value.hh" @@ -11,6 +13,8 @@ #include "nix/expr/eval-error.hh" #include "nix/util/pos-idx.hh" #include "nix/expr/counter.hh" +#include "nix/util/pos-table.hh" +#include "nix/util/error.hh" namespace nix { @@ -79,9 +83,11 @@ struct AttrName : expr(e) {}; }; +static_assert(std::is_trivially_copy_constructible_v); + typedef std::vector AttrPath; -std::string showAttrPath(const SymbolTable & symbols, const AttrPath & attrPath); +std::string showAttrPath(const SymbolTable & symbols, std::span attrPath); using UpdateQueue = SmallTemporaryValueVector; @@ -212,14 +218,16 @@ struct ExprString : Expr struct ExprPath : Expr { ref accessor; - std::string s; Value v; - ExprPath(ref accessor, std::string s) + ExprPath(std::pmr::polymorphic_allocator & alloc, ref accessor, std::string_view sv) : accessor(accessor) - , s(std::move(s)) { - v.mkPath(&*accessor, this->s.c_str()); + auto len = sv.length(); + char * s = alloc.allocate(len + 1); + sv.copy(s, len); + s[len] = '\0'; + v.mkPath(&*accessor, s); } Value * maybeThunk(EvalState & state, Env & env) override; @@ -286,20 +294,33 @@ struct ExprInheritFrom : ExprVar struct ExprSelect : Expr { PosIdx pos; + uint32_t nAttrPath; Expr *e, *def; - AttrPath attrPath; - ExprSelect(const PosIdx & pos, Expr * e, AttrPath attrPath, Expr * def) + AttrName * attrPathStart; + + ExprSelect( + std::pmr::polymorphic_allocator & alloc, + const PosIdx & pos, + Expr * e, + std::span attrPath, + Expr * def) : pos(pos) + , nAttrPath(attrPath.size()) , e(e) , def(def) - , attrPath(std::move(attrPath)) {}; + , attrPathStart(alloc.allocate_object(nAttrPath)) + { + std::ranges::copy(attrPath, attrPathStart); + }; - ExprSelect(const PosIdx & pos, Expr * e, Symbol name) + ExprSelect(std::pmr::polymorphic_allocator & alloc, const PosIdx & pos, Expr * e, Symbol name) : pos(pos) + , nAttrPath(1) , e(e) , def(0) + , attrPathStart((alloc.allocate_object())) { - attrPath.push_back(AttrName(name)); + *attrPathStart = AttrName(name); }; PosIdx getPos() const override @@ -307,6 +328,11 @@ struct ExprSelect : Expr return pos; } + std::span getAttrPath() const + { + return {attrPathStart, nAttrPath}; + } + /** * Evaluate the `a.b.c` part of `a.b.c.d`. This exists mostly for the purpose of :doc in the repl. * @@ -324,10 +350,14 @@ struct ExprSelect : Expr struct ExprOpHasAttr : Expr { Expr * e; - AttrPath attrPath; - ExprOpHasAttr(Expr * e, AttrPath attrPath) + std::span attrPath; + + ExprOpHasAttr(std::pmr::polymorphic_allocator & alloc, Expr * e, std::vector attrPath) : e(e) - , attrPath(std::move(attrPath)) {}; + , attrPath({alloc.allocate_object(attrPath.size()), attrPath.size()}) + { + std::ranges::copy(attrPath, this->attrPath.begin()); + }; PosIdx getPos() const override { @@ -414,8 +444,14 @@ struct ExprAttrs : Expr struct ExprList : Expr { - std::vector elems; - ExprList() {}; + std::span elems; + + ExprList(std::pmr::polymorphic_allocator & alloc, std::vector exprs) + : elems({alloc.allocate_object(exprs.size()), exprs.size()}) + { + std::ranges::copy(exprs, elems.begin()); + }; + COMMON_METHODS Value * maybeThunk(EvalState & state, Env & env) override; @@ -432,7 +468,7 @@ struct Formal Expr * def; }; -struct Formals +struct FormalsBuilder { typedef std::vector Formals_; /** @@ -447,6 +483,23 @@ struct Formals formals.begin(), formals.end(), arg, [](const Formal & f, const Symbol & sym) { return f.name < sym; }); return it != formals.end() && it->name == arg; } +}; + +struct Formals +{ + std::span formals; + bool ellipsis; + + Formals(std::span formals, bool ellipsis) + : formals(formals) + , ellipsis(ellipsis) {}; + + bool has(Symbol arg) const + { + auto it = std::lower_bound( + formals.begin(), formals.end(), arg, [](const Formal & f, const Symbol & sym) { return f.name < sym; }); + return it != formals.end() && it->name == arg; + } std::vector lexicographicOrder(const SymbolTable & symbols) const { @@ -464,31 +517,71 @@ struct ExprLambda : Expr PosIdx pos; Symbol name; Symbol arg; - Formals * formals; + +private: + bool hasFormals; + bool ellipsis; + uint16_t nFormals; + Formal * formalsStart; +public: + + std::optional getFormals() const + { + if (hasFormals) + return Formals{{formalsStart, nFormals}, ellipsis}; + else + return std::nullopt; + } + Expr * body; DocComment docComment; - ExprLambda(PosIdx pos, Symbol arg, Formals * formals, Expr * body) + ExprLambda( + const PosTable & positions, + std::pmr::polymorphic_allocator & alloc, + PosIdx pos, + Symbol arg, + const FormalsBuilder & formals, + Expr * body) : pos(pos) , arg(arg) - , formals(formals) - , body(body) {}; - - ExprLambda(PosIdx pos, Formals * formals, Expr * body) - : pos(pos) - , formals(formals) + , hasFormals(true) + , ellipsis(formals.ellipsis) + , nFormals(formals.formals.size()) + , formalsStart(alloc.allocate_object(nFormals)) , body(body) { - } + if (formals.formals.size() > nFormals) [[unlikely]] { + auto err = Error( + "too many formal arguments, implementation supports at most %1%", + std::numeric_limits::max()); + if (pos) + err.atPos(positions[pos]); + throw err; + } + std::uninitialized_copy_n(formals.formals.begin(), nFormals, formalsStart); + }; + + ExprLambda(PosIdx pos, Symbol arg, Expr * body) + : pos(pos) + , arg(arg) + , hasFormals(false) + , ellipsis(false) + , nFormals(0) + , formalsStart(nullptr) + , body(body) {}; + + ExprLambda( + const PosTable & positions, + std::pmr::polymorphic_allocator & alloc, + PosIdx pos, + FormalsBuilder formals, + Expr * body) + : ExprLambda(positions, alloc, pos, Symbol(), formals, body) {}; void setName(Symbol name) override; std::string showNamePos(const EvalState & state) const; - inline bool hasFormals() const - { - return formals != nullptr; - } - PosIdx getPos() const override { return pos; @@ -667,11 +760,11 @@ struct ExprConcatStrings : Expr { PosIdx pos; bool forceString; - std::vector> * es; - ExprConcatStrings(const PosIdx & pos, bool forceString, std::vector> * es) + std::vector> es; + ExprConcatStrings(const PosIdx & pos, bool forceString, std::vector> && es) : pos(pos) , forceString(forceString) - , es(es) {}; + , es(std::move(es)) {}; PosIdx getPos() const override { diff --git a/src/libexpr/include/nix/expr/parser-state.hh b/src/libexpr/include/nix/expr/parser-state.hh index 55dce3047..4cffaa497 100644 --- a/src/libexpr/include/nix/expr/parser-state.hh +++ b/src/libexpr/include/nix/expr/parser-state.hh @@ -93,7 +93,7 @@ struct ParserState void addAttr( ExprAttrs * attrs, AttrPath && attrPath, const ParserLocation & loc, Expr * e, const ParserLocation & exprLoc); void addAttr(ExprAttrs * attrs, AttrPath & attrPath, const Symbol & symbol, ExprAttrs::AttrDef && def); - Formals * validateFormals(Formals * formals, PosIdx pos = noPos, Symbol arg = {}); + void validateFormals(FormalsBuilder & formals, PosIdx pos = noPos, Symbol arg = {}); Expr * stripIndentation(const PosIdx pos, std::vector>> && es); PosIdx at(const ParserLocation & loc); }; @@ -213,17 +213,17 @@ ParserState::addAttr(ExprAttrs * attrs, AttrPath & attrPath, const Symbol & symb } } -inline Formals * ParserState::validateFormals(Formals * formals, PosIdx pos, Symbol arg) +inline void ParserState::validateFormals(FormalsBuilder & formals, PosIdx pos, Symbol arg) { - std::sort(formals->formals.begin(), formals->formals.end(), [](const auto & a, const auto & b) { + std::sort(formals.formals.begin(), formals.formals.end(), [](const auto & a, const auto & b) { return std::tie(a.name, a.pos) < std::tie(b.name, b.pos); }); std::optional> duplicate; - for (size_t i = 0; i + 1 < formals->formals.size(); i++) { - if (formals->formals[i].name != formals->formals[i + 1].name) + for (size_t i = 0; i + 1 < formals.formals.size(); i++) { + if (formals.formals[i].name != formals.formals[i + 1].name) continue; - std::pair thisDup{formals->formals[i].name, formals->formals[i + 1].pos}; + std::pair thisDup{formals.formals[i].name, formals.formals[i + 1].pos}; duplicate = std::min(thisDup, duplicate.value_or(thisDup)); } if (duplicate) @@ -231,11 +231,9 @@ inline Formals * ParserState::validateFormals(Formals * formals, PosIdx pos, Sym {.msg = HintFmt("duplicate formal function argument '%1%'", symbols[duplicate->first]), .pos = positions[duplicate->second]}); - if (arg && formals->has(arg)) + if (arg && formals.has(arg)) throw ParseError( {.msg = HintFmt("duplicate formal function argument '%1%'", symbols[arg]), .pos = positions[pos]}); - - return formals; } inline Expr * @@ -282,7 +280,7 @@ ParserState::stripIndentation(const PosIdx pos, std::vector>; + std::vector> es2{}; atStartOfLine = true; size_t curDropped = 0; size_t n = es.size(); @@ -290,7 +288,7 @@ ParserState::stripIndentation(const PosIdx pos, std::vectoremplace_back(i->first, e); + es2.emplace_back(i->first, e); }; const auto trimString = [&](const StringToken & t) { std::string s2; @@ -324,7 +322,7 @@ ParserState::stripIndentation(const PosIdx pos, std::vectoremplace_back(i->first, new ExprString(alloc, s2)); + es2.emplace_back(i->first, new ExprString(alloc, s2)); } }; for (; i != es.end(); ++i, --n) { @@ -333,19 +331,17 @@ ParserState::stripIndentation(const PosIdx pos, std::vectorsize() == 0) { + if (es2.size() == 0) { auto * const result = new ExprString(""); - delete es2; return result; } /* If this is a single string, then don't do a concatenation. */ - if (es2->size() == 1 && dynamic_cast((*es2)[0].second)) { - auto * const result = (*es2)[0].second; - delete es2; + if (es2.size() == 1 && dynamic_cast((es2)[0].second)) { + auto * const result = (es2)[0].second; return result; } - return new ExprConcatStrings(pos, true, es2); + return new ExprConcatStrings(pos, true, std::move(es2)); } inline PosIdx LexerState::at(const ParserLocation & loc) diff --git a/src/libexpr/lexer.l b/src/libexpr/lexer.l index f420fc13f..810503bdc 100644 --- a/src/libexpr/lexer.l +++ b/src/libexpr/lexer.l @@ -142,11 +142,11 @@ or { return OR_KW; } return PIPE_INTO; } -{ID} { yylval->id = {yytext, (size_t) yyleng}; return ID; } +{ID} { yylval->emplace(yytext, (size_t) yyleng); return ID; } {INT} { errno = 0; std::optional numMay = string2Int(yytext); if (numMay.has_value()) { - yylval->n = NixInt{*numMay}; + yylval->emplace(*numMay); } else { throw ParseError(ErrorInfo{ .msg = HintFmt("invalid integer '%1%'", yytext), @@ -156,7 +156,7 @@ or { return OR_KW; } return INT_LIT; } {FLOAT} { errno = 0; - yylval->nf = strtod(yytext, 0); + yylval->emplace(strtod(yytext, 0)); if (errno != 0) throw ParseError(ErrorInfo{ .msg = HintFmt("invalid float '%1%'", yytext), @@ -183,7 +183,7 @@ or { return OR_KW; } /* It is impossible to match strings ending with '$' with one regex because trailing contexts are only valid at the end of a rule. (A sane but undocumented limitation.) */ - yylval->str = unescapeStr(yytext, yyleng, [&]() { return state->positions[CUR_POS]; }); + yylval->emplace(unescapeStr(yytext, yyleng, [&]() { return state->positions[CUR_POS]; })); return STR; } \$\{ { PUSH_STATE(DEFAULT); return DOLLAR_CURLY; } @@ -198,27 +198,27 @@ or { return OR_KW; } \'\'(\ *\n)? { PUSH_STATE(IND_STRING); return IND_STRING_OPEN; } ([^\$\']|\$[^\{\']|\'[^\'\$])+ { - yylval->str = {yytext, (size_t) yyleng, true}; - forceNoNullByte(yylval->str, [&]() { return state->positions[CUR_POS]; }); + yylval->emplace(yytext, (size_t) yyleng, true); + forceNoNullByte(yylval->as(), [&]() { return state->positions[CUR_POS]; }); return IND_STR; } \'\'\$ | \$ { - yylval->str = {"$", 1}; + yylval->emplace("$", 1); return IND_STR; } \'\'\' { - yylval->str = {"''", 2}; + yylval->emplace("''", 2); return IND_STR; } \'\'\\{ANY} { - yylval->str = unescapeStr(yytext + 2, yyleng - 2, [&]() { return state->positions[CUR_POS]; }); + yylval->emplace(unescapeStr(yytext + 2, yyleng - 2, [&]() { return state->positions[CUR_POS]; })); return IND_STR; } \$\{ { PUSH_STATE(DEFAULT); return DOLLAR_CURLY; } \'\' { POP_STATE(); return IND_STRING_CLOSE; } \' { - yylval->str = {"'", 1}; + yylval->emplace("'", 1); return IND_STR; } @@ -232,23 +232,31 @@ or { return OR_KW; } {PATH_SEG} { POP_STATE(); PUSH_STATE(INPATH_SLASH); - yylval->path = {yytext, (size_t) yyleng}; + yylval->emplace(yytext, (size_t) yyleng); return PATH; } {HPATH_START} { POP_STATE(); PUSH_STATE(INPATH_SLASH); - yylval->path = {yytext, (size_t) yyleng}; + yylval->emplace(yytext, (size_t) yyleng); return HPATH; } +{ANY} | +<> { + /* This should be unreachable: PATH_START is only entered after matching + PATH_SEG or HPATH_START, and we rewind to re-parse those same patterns. + This rule exists to satisfy flex's %option nodefault requirement. */ + unreachable(); +} + {PATH} { if (yytext[yyleng-1] == '/') PUSH_STATE(INPATH_SLASH); else PUSH_STATE(INPATH); - yylval->path = {yytext, (size_t) yyleng}; + yylval->emplace(yytext, (size_t) yyleng); return PATH; } {HPATH} { @@ -256,7 +264,7 @@ or { return OR_KW; } PUSH_STATE(INPATH_SLASH); else PUSH_STATE(INPATH); - yylval->path = {yytext, (size_t) yyleng}; + yylval->emplace(yytext, (size_t) yyleng); return HPATH; } @@ -272,7 +280,7 @@ or { return OR_KW; } PUSH_STATE(INPATH_SLASH); else PUSH_STATE(INPATH); - yylval->str = {yytext, (size_t) yyleng}; + yylval->emplace(yytext, (size_t) yyleng); return STR; } {ANY} | @@ -294,8 +302,8 @@ or { return OR_KW; } }); } -{SPATH} { yylval->path = {yytext, (size_t) yyleng}; return SPATH; } -{URI} { yylval->uri = {yytext, (size_t) yyleng}; return URI; } +{SPATH} { yylval->emplace(yytext, (size_t) yyleng); return SPATH; } +{URI} { yylval->emplace(yytext, (size_t) yyleng); return URI; } %{ // Doc comment rule diff --git a/src/libexpr/meson.build b/src/libexpr/meson.build index d24e7fae3..18c4c7fa3 100644 --- a/src/libexpr/meson.build +++ b/src/libexpr/meson.build @@ -97,7 +97,6 @@ config_priv_h = configure_file( ) subdir('nix-meson-build-support/common') -subdir('nix-meson-build-support/asan-options') parser_tab = custom_target( input : 'parser.y', @@ -184,17 +183,62 @@ subdir('primops') subdir('nix-meson-build-support/export-all-symbols') subdir('nix-meson-build-support/windows-version') +# Turns out that Bison/Flex are particularly sensitive to compilers +# failing to inline functions. For that reason we crank up the inlining +# threshold manually for optimized builds. Yes, this can be considered 'ricing' +# the compiler, but it does pay off. +# +# NOTE: missed inlining can be spotted (for Clang) using -Rpass-missed=inline +# and -fdump-ipa-inline-missed (for GCC). +parser_library_cpp_args = [] + +if not get_option('debug') + if cxx.get_id() == 'clang' + # The default as of LLVM 21 is 225: + # llc --help-hidden | grep inline-threshold + parser_library_cpp_args += [ + '-mllvm', + '-inline-threshold=5000', + ] + elif cxx.get_id() == 'gcc' + parser_library_cpp_args += [ + '--param=max-inline-insns-single=1000', + '--param=max-inline-insns-auto=1000', + '--param=inline-unit-growth=400', + ] + endif +endif + +# Working around https://github.com/mesonbuild/meson/issues/1367. +parser_library = static_library( + 'nixexpr-parser', + parser_tab, + lexer_tab, + cpp_args : parser_library_cpp_args, + dependencies : deps_public + deps_private + deps_other, + include_directories : include_dirs, + # 1. Stdlib and regular assertions regress parser performance significantly, so build without + # them for this one library when building in a release configuration. + # 2. Disable LTO for GCC because then inlining flags won't apply, since LTO in GCC is done + # by plonking down GIMPLE in the archive. + override_options : [ + 'b_ndebug=@0@'.format(not get_option('debug')), + 'b_lto=@0@'.format(get_option('b_lto') and cxx.get_id() != 'gcc'), + ], +) + this_library = library( 'nixexpr', sources, config_priv_h, - parser_tab, - lexer_tab, + parser_tab[1], + lexer_tab[1], generated_headers, soversion : nix_soversion, dependencies : deps_public + deps_private + deps_other, include_directories : include_dirs, link_args : linker_export_flags, + link_whole : [ parser_library ], prelink : true, # For C++ static initializers install : true, cpp_pch : do_pch ? [ 'pch/precompiled-headers.hh' ] : [], diff --git a/src/libexpr/nixexpr.cc b/src/libexpr/nixexpr.cc index a2980af6b..b183f1bbf 100644 --- a/src/libexpr/nixexpr.cc +++ b/src/libexpr/nixexpr.cc @@ -45,7 +45,7 @@ void ExprString::show(const SymbolTable & symbols, std::ostream & str) const void ExprPath::show(const SymbolTable & symbols, std::ostream & str) const { - str << s; + str << v.pathStr(); } void ExprVar::show(const SymbolTable & symbols, std::ostream & str) const @@ -57,7 +57,7 @@ void ExprSelect::show(const SymbolTable & symbols, std::ostream & str) const { str << "("; e->show(symbols, str); - str << ")." << showAttrPath(symbols, attrPath); + str << ")." << showAttrPath(symbols, getAttrPath()); if (def) { str << " or ("; def->show(symbols, str); @@ -154,7 +154,7 @@ void ExprList::show(const SymbolTable & symbols, std::ostream & str) const void ExprLambda::show(const SymbolTable & symbols, std::ostream & str) const { str << "("; - if (hasFormals()) { + if (auto formals = getFormals()) { str << "{ "; bool first = true; // the natural Symbol ordering is by creation time, which can lead to the @@ -171,7 +171,7 @@ void ExprLambda::show(const SymbolTable & symbols, std::ostream & str) const i.def->show(symbols, str); } } - if (formals->ellipsis) { + if (ellipsis) { if (!first) str << ", "; str << "..."; @@ -246,7 +246,7 @@ void ExprConcatStrings::show(const SymbolTable & symbols, std::ostream & str) co { bool first = true; str << "("; - for (auto & i : *es) { + for (auto & i : es) { if (first) first = false; else @@ -261,7 +261,7 @@ void ExprPos::show(const SymbolTable & symbols, std::ostream & str) const str << "__curPos"; } -std::string showAttrPath(const SymbolTable & symbols, const AttrPath & attrPath) +std::string showAttrPath(const SymbolTable & symbols, std::span attrPath) { std::ostringstream out; bool first = true; @@ -362,7 +362,7 @@ void ExprSelect::bindVars(EvalState & es, const std::shared_ptr e->bindVars(es, env); if (def) def->bindVars(es, env); - for (auto & i : attrPath) + for (auto & i : getAttrPath()) if (!i.symbol) i.expr->bindVars(es, env); } @@ -452,14 +452,14 @@ void ExprLambda::bindVars(EvalState & es, const std::shared_ptr es.exprEnvs.insert(std::make_pair(this, env)); auto newEnv = - std::make_shared(nullptr, env, (hasFormals() ? formals->formals.size() : 0) + (!arg ? 0 : 1)); + std::make_shared(nullptr, env, (getFormals() ? getFormals()->formals.size() : 0) + (!arg ? 0 : 1)); Displacement displ = 0; if (arg) newEnv->vars.emplace_back(arg, displ++); - if (hasFormals()) { + if (auto formals = getFormals()) { for (auto & i : formals->formals) newEnv->vars.emplace_back(i.name, displ++); @@ -564,7 +564,7 @@ void ExprConcatStrings::bindVars(EvalState & es, const std::shared_ptres) + for (auto & i : this->es) i.second->bindVars(es, env); } diff --git a/src/libexpr/parser.y b/src/libexpr/parser.y index 7dabd6b56..29586ed98 100644 --- a/src/libexpr/parser.y +++ b/src/libexpr/parser.y @@ -14,6 +14,10 @@ %code requires { +// bison adds a bunch of switch statements with default: +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wswitch-enum" + #ifndef BISON_HEADER #define BISON_HEADER @@ -120,46 +124,28 @@ static Expr * makeCall(PosIdx pos, Expr * fn, Expr * arg) { %} -%union { - // !!! We're probably leaking stuff here. - nix::Expr * e; - nix::ExprList * list; - nix::ExprAttrs * attrs; - nix::Formals * formals; - nix::Formal * formal; - nix::NixInt n; - nix::NixFloat nf; - nix::StringToken id; // !!! -> Symbol - nix::StringToken path; - nix::StringToken uri; - nix::StringToken str; - std::vector * attrNames; - std::vector> * inheritAttrs; - std::vector> * string_parts; - std::variant * to_be_string; - std::vector>> * ind_string_parts; -} +%define api.value.type variant -%type start expr expr_function expr_if expr_op -%type expr_select expr_simple expr_app -%type expr_pipe_from expr_pipe_into -%type expr_list -%type binds binds1 -%type formals formal_set -%type formal -%type attrpath -%type attrs -%type string_parts_interpolated -%type ind_string_parts -%type path_start -%type string_parts string_attr -%type attr -%token ID -%token STR IND_STR -%token INT_LIT -%token FLOAT_LIT -%token PATH HPATH SPATH PATH_END -%token URI +%type start expr expr_function expr_if expr_op +%type expr_select expr_simple expr_app +%type expr_pipe_from expr_pipe_into +%type > list +%type binds binds1 +%type formals formal_set +%type formal +%type > attrpath +%type >> attrs +%type >> string_parts_interpolated +%type >>> ind_string_parts +%type path_start +%type > string_parts string_attr +%type attr +%token ID +%token STR IND_STR +%token INT_LIT +%token FLOAT_LIT +%token PATH HPATH SPATH PATH_END +%token URI %token IF THEN ELSE ASSERT WITH LET IN_KW REC INHERIT EQ NEQ AND OR IMPL OR_KW %token PIPE_FROM PIPE_INTO /* <| and |> */ %token DOLLAR_CURLY /* == ${ */ @@ -193,26 +179,30 @@ expr: expr_function; expr_function : ID ':' expr_function - { auto me = new ExprLambda(CUR_POS, state->symbols.create($1), 0, $3); + { auto me = new ExprLambda(CUR_POS, state->symbols.create($1), $3); $$ = me; SET_DOC_POS(me, @1); } | formal_set ':' expr_function[body] - { auto me = new ExprLambda(CUR_POS, state->validateFormals($formal_set), $body); + { + state->validateFormals($formal_set); + auto me = new ExprLambda(state->positions, state->alloc, CUR_POS, std::move($formal_set), $body); $$ = me; SET_DOC_POS(me, @1); } | formal_set '@' ID ':' expr_function[body] { auto arg = state->symbols.create($ID); - auto me = new ExprLambda(CUR_POS, arg, state->validateFormals($formal_set, CUR_POS, arg), $body); + state->validateFormals($formal_set, CUR_POS, arg); + auto me = new ExprLambda(state->positions, state->alloc, CUR_POS, arg, std::move($formal_set), $body); $$ = me; SET_DOC_POS(me, @1); } | ID '@' formal_set ':' expr_function[body] { auto arg = state->symbols.create($ID); - auto me = new ExprLambda(CUR_POS, arg, state->validateFormals($formal_set, CUR_POS, arg), $body); + state->validateFormals($formal_set, CUR_POS, arg); + auto me = new ExprLambda(state->positions, state->alloc, CUR_POS, arg, std::move($formal_set), $body); $$ = me; SET_DOC_POS(me, @1); } @@ -261,9 +251,9 @@ expr_op | expr_op OR expr_op { $$ = new ExprOpOr(state->at(@2), $1, $3); } | expr_op IMPL expr_op { $$ = new ExprOpImpl(state->at(@2), $1, $3); } | expr_op UPDATE expr_op { $$ = new ExprOpUpdate(state->at(@2), $1, $3); } - | expr_op '?' attrpath { $$ = new ExprOpHasAttr($1, std::move(*$3)); delete $3; } + | expr_op '?' attrpath { $$ = new ExprOpHasAttr(state->alloc, $1, std::move($3)); } | expr_op '+' expr_op - { $$ = new ExprConcatStrings(state->at(@2), false, new std::vector >({{state->at(@1), $1}, {state->at(@3), $3}})); } + { $$ = new ExprConcatStrings(state->at(@2), false, {{state->at(@1), $1}, {state->at(@3), $3}}); } | expr_op '-' expr_op { $$ = new ExprCall(state->at(@2), new ExprVar(state->s.sub), {$1, $3}); } | expr_op '*' expr_op { $$ = new ExprCall(state->at(@2), new ExprVar(state->s.mul), {$1, $3}); } | expr_op '/' expr_op { $$ = new ExprCall(state->at(@2), new ExprVar(state->s.div), {$1, $3}); } @@ -282,9 +272,9 @@ expr_app expr_select : expr_simple '.' attrpath - { $$ = new ExprSelect(CUR_POS, $1, std::move(*$3), nullptr); delete $3; } + { $$ = new ExprSelect(state->alloc, CUR_POS, $1, std::move($3), nullptr); } | expr_simple '.' attrpath OR_KW expr_select - { $$ = new ExprSelect(CUR_POS, $1, std::move(*$3), $5); delete $3; $5->warnIfCursedOr(state->symbols, state->positions); } + { $$ = new ExprSelect(state->alloc, CUR_POS, $1, std::move($3), $5); $5->warnIfCursedOr(state->symbols, state->positions); } | /* Backwards compatibility: because Nixpkgs has a function named ‘or’, allow stuff like ‘map or [...]’. This production is problematic (see https://github.com/NixOS/nix/issues/11118) and will be refactored in the @@ -311,17 +301,15 @@ expr_simple std::visit(overloaded{ [&](std::string_view str) { $$ = new ExprString(state->alloc, str); }, [&](Expr * expr) { $$ = expr; }}, - *$2); - delete $2; + $2); } | IND_STRING_OPEN ind_string_parts IND_STRING_CLOSE { - $$ = state->stripIndentation(CUR_POS, std::move(*$2)); - delete $2; + $$ = state->stripIndentation(CUR_POS, std::move($2)); } | path_start PATH_END | path_start string_parts_interpolated PATH_END { - $2->insert($2->begin(), {state->at(@1), $1}); - $$ = new ExprConcatStrings(CUR_POS, false, $2); + $2.insert($2.begin(), {state->at(@1), $1}); + $$ = new ExprConcatStrings(CUR_POS, false, std::move($2)); } | SPATH { std::string_view path($1.p + 1, $1.l - 2); @@ -343,31 +331,30 @@ expr_simple /* Let expressions `let {..., body = ...}' are just desugared into `(rec {..., body = ...}).body'. */ | LET '{' binds '}' - { $3->recursive = true; $3->pos = CUR_POS; $$ = new ExprSelect(noPos, $3, state->s.body); } + { $3->recursive = true; $3->pos = CUR_POS; $$ = new ExprSelect(state->alloc, noPos, $3, state->s.body); } | REC '{' binds '}' { $3->recursive = true; $3->pos = CUR_POS; $$ = $3; } | '{' binds1 '}' { $2->pos = CUR_POS; $$ = $2; } | '{' '}' { $$ = new ExprAttrs(CUR_POS); } - | '[' expr_list ']' { $$ = $2; } + | '[' list ']' { $$ = new ExprList(state->alloc, std::move($2)); } ; string_parts - : STR { $$ = new std::variant($1); } - | string_parts_interpolated { $$ = new std::variant(new ExprConcatStrings(CUR_POS, true, $1)); } - | { $$ = new std::variant(std::string_view()); } + : STR { $$ = $1; } + | string_parts_interpolated { $$ = new ExprConcatStrings(CUR_POS, true, std::move($1)); } + | { $$ = std::string_view(); } ; string_parts_interpolated : string_parts_interpolated STR - { $$ = $1; $1->emplace_back(state->at(@2), new ExprString(state->alloc, $2)); } - | string_parts_interpolated DOLLAR_CURLY expr '}' { $$ = $1; $1->emplace_back(state->at(@2), $3); } - | DOLLAR_CURLY expr '}' { $$ = new std::vector>; $$->emplace_back(state->at(@1), $2); } + { $$ = std::move($1); $$.emplace_back(state->at(@2), new ExprString(state->alloc, $2)); } + | string_parts_interpolated DOLLAR_CURLY expr '}' { $$ = std::move($1); $$.emplace_back(state->at(@2), $3); } + | DOLLAR_CURLY expr '}' { $$.emplace_back(state->at(@1), $2); } | STR DOLLAR_CURLY expr '}' { - $$ = new std::vector>; - $$->emplace_back(state->at(@1), new ExprString(state->alloc, $1)); - $$->emplace_back(state->at(@2), $3); + $$.emplace_back(state->at(@1), new ExprString(state->alloc, $1)); + $$.emplace_back(state->at(@2), $3); } ; @@ -392,8 +379,8 @@ path_start root filesystem accessor, rather than the accessor of the current Nix expression. */ literal.front() == '/' - ? new ExprPath(state->rootFS, std::move(path)) - : new ExprPath(state->basePath.accessor, std::move(path)); + ? new ExprPath(state->alloc, state->rootFS, path) + : new ExprPath(state->alloc, state->basePath.accessor, path); } | HPATH { if (state->settings.pureEval) { @@ -403,14 +390,14 @@ path_start ); } Path path(getHome() + std::string($1.p + 1, $1.l - 1)); - $$ = new ExprPath(ref(state->rootFS), std::move(path)); + $$ = new ExprPath(state->alloc, ref(state->rootFS), path); } ; ind_string_parts - : ind_string_parts IND_STR { $$ = $1; $1->emplace_back(state->at(@2), $2); } - | ind_string_parts DOLLAR_CURLY expr '}' { $$ = $1; $1->emplace_back(state->at(@2), $3); } - | { $$ = new std::vector>>; } + : ind_string_parts IND_STR { $$ = std::move($1); $$.emplace_back(state->at(@2), $2); } + | ind_string_parts DOLLAR_CURLY expr '}' { $$ = std::move($1); $$.emplace_back(state->at(@2), $3); } + | { } ; binds @@ -421,19 +408,17 @@ binds binds1 : binds1[accum] attrpath '=' expr ';' { $$ = $accum; - state->addAttr($$, std::move(*$attrpath), @attrpath, $expr, @expr); - delete $attrpath; + state->addAttr($$, std::move($attrpath), @attrpath, $expr, @expr); } | binds[accum] INHERIT attrs ';' { $$ = $accum; - for (auto & [i, iPos] : *$attrs) { + for (auto & [i, iPos] : $attrs) { if ($accum->attrs.find(i.symbol) != $accum->attrs.end()) state->dupAttr(i.symbol, iPos, $accum->attrs[i.symbol].pos); $accum->attrs.emplace( i.symbol, ExprAttrs::AttrDef(new ExprVar(iPos, i.symbol), iPos, ExprAttrs::AttrDef::Kind::Inherited)); } - delete $attrs; } | binds[accum] INHERIT '(' expr ')' attrs ';' { $$ = $accum; @@ -441,61 +426,55 @@ binds1 $accum->inheritFromExprs = std::make_unique>(); $accum->inheritFromExprs->push_back($expr); auto from = new nix::ExprInheritFrom(state->at(@expr), $accum->inheritFromExprs->size() - 1); - for (auto & [i, iPos] : *$attrs) { + for (auto & [i, iPos] : $attrs) { if ($accum->attrs.find(i.symbol) != $accum->attrs.end()) state->dupAttr(i.symbol, iPos, $accum->attrs[i.symbol].pos); $accum->attrs.emplace( i.symbol, ExprAttrs::AttrDef( - new ExprSelect(iPos, from, i.symbol), + new ExprSelect(state->alloc, iPos, from, i.symbol), iPos, ExprAttrs::AttrDef::Kind::InheritedFrom)); } - delete $attrs; } | attrpath '=' expr ';' { $$ = new ExprAttrs; - state->addAttr($$, std::move(*$attrpath), @attrpath, $expr, @expr); - delete $attrpath; + state->addAttr($$, std::move($attrpath), @attrpath, $expr, @expr); } ; attrs - : attrs attr { $$ = $1; $1->emplace_back(AttrName(state->symbols.create($2)), state->at(@2)); } + : attrs attr { $$ = std::move($1); $$.emplace_back(state->symbols.create($2), state->at(@2)); } | attrs string_attr - { $$ = $1; + { $$ = std::move($1); std::visit(overloaded { - [&](std::string_view str) { $$->emplace_back(AttrName(state->symbols.create(str)), state->at(@2)); }, + [&](std::string_view str) { $$.emplace_back(state->symbols.create(str), state->at(@2)); }, [&](Expr * expr) { throw ParseError({ .msg = HintFmt("dynamic attributes not allowed in inherit"), .pos = state->positions[state->at(@2)] }); } - }, *$2); - delete $2; + }, $2); } - | { $$ = new std::vector>; } + | { } ; attrpath - : attrpath '.' attr { $$ = $1; $1->push_back(AttrName(state->symbols.create($3))); } + : attrpath '.' attr { $$ = std::move($1); $$.emplace_back(state->symbols.create($3)); } | attrpath '.' string_attr - { $$ = $1; + { $$ = std::move($1); std::visit(overloaded { - [&](std::string_view str) { $$->push_back(AttrName(state->symbols.create(str))); }, - [&](Expr * expr) { $$->push_back(AttrName(expr)); } - }, *$3); - delete $3; + [&](std::string_view str) { $$.emplace_back(state->symbols.create(str)); }, + [&](Expr * expr) { $$.emplace_back(expr); } + }, std::move($3)); } - | attr { $$ = new std::vector; $$->push_back(AttrName(state->symbols.create($1))); } + | attr { $$.emplace_back(state->symbols.create($1)); } | string_attr - { $$ = new std::vector; - std::visit(overloaded { - [&](std::string_view str) { $$->push_back(AttrName(state->symbols.create(str))); }, - [&](Expr * expr) { $$->push_back(AttrName(expr)); } - }, *$1); - delete $1; + { std::visit(overloaded { + [&](std::string_view str) { $$.emplace_back(state->symbols.create(str)); }, + [&](Expr * expr) { $$.emplace_back(expr); } + }, std::move($1)); } ; @@ -505,33 +484,33 @@ attr ; string_attr - : '"' string_parts '"' { $$ = $2; } - | DOLLAR_CURLY expr '}' { $$ = new std::variant($2); } + : '"' string_parts '"' { $$ = std::move($2); } + | DOLLAR_CURLY expr '}' { $$ = $2; } ; -expr_list - : expr_list expr_select { $$ = $1; $1->elems.push_back($2); /* !!! dangerous */; $2->warnIfCursedOr(state->symbols, state->positions); } - | { $$ = new ExprList; } +list + : list expr_select { $$ = std::move($1); $$.push_back($2); /* !!! dangerous */; $2->warnIfCursedOr(state->symbols, state->positions); } + | { } ; formal_set - : '{' formals ',' ELLIPSIS '}' { $$ = $formals; $$->ellipsis = true; } - | '{' ELLIPSIS '}' { $$ = new Formals; $$->ellipsis = true; } - | '{' formals ',' '}' { $$ = $formals; $$->ellipsis = false; } - | '{' formals '}' { $$ = $formals; $$->ellipsis = false; } - | '{' '}' { $$ = new Formals; $$->ellipsis = false; } + : '{' formals ',' ELLIPSIS '}' { $$ = std::move($formals); $$.ellipsis = true; } + | '{' ELLIPSIS '}' { $$.ellipsis = true; } + | '{' formals ',' '}' { $$ = std::move($formals); $$.ellipsis = false; } + | '{' formals '}' { $$ = std::move($formals); $$.ellipsis = false; } + | '{' '}' { $$.ellipsis = false; } ; formals : formals[accum] ',' formal - { $$ = $accum; $$->formals.emplace_back(*$formal); delete $formal; } + { $$ = std::move($accum); $$.formals.emplace_back(std::move($formal)); } | formal - { $$ = new Formals; $$->formals.emplace_back(*$formal); delete $formal; } + { $$.formals.emplace_back(std::move($formal)); } ; formal - : ID { $$ = new Formal{CUR_POS, state->symbols.create($1), 0}; } - | ID '?' expr { $$ = new Formal{CUR_POS, state->symbols.create($1), $3}; } + : ID { $$ = Formal{CUR_POS, state->symbols.create($1), 0}; } + | ID '?' expr { $$ = Formal{CUR_POS, state->symbols.create($1), $3}; } ; %% @@ -582,3 +561,4 @@ Expr * parseExprFromBuf( } +#pragma GCC diagnostic pop // end ignored "-Wswitch-enum" diff --git a/src/libexpr/paths.cc b/src/libexpr/paths.cc index f90bc37df..8622ab208 100644 --- a/src/libexpr/paths.cc +++ b/src/libexpr/paths.cc @@ -1,5 +1,7 @@ #include "nix/store/store-api.hh" #include "nix/expr/eval.hh" +#include "nix/util/mounted-source-accessor.hh" +#include "nix/fetchers/fetch-to-store.hh" namespace nix { @@ -18,4 +20,27 @@ SourcePath EvalState::storePath(const StorePath & path) return {rootFS, CanonPath{store->printStorePath(path)}}; } +StorePath +EvalState::mountInput(fetchers::Input & input, const fetchers::Input & originalInput, ref accessor) +{ + auto storePath = fetchToStore(fetchSettings, *store, accessor, FetchMode::Copy, input.getName()); + + allowPath(storePath); // FIXME: should just whitelist the entire virtual store + + storeFS->mount(CanonPath(store->printStorePath(storePath)), accessor); + + auto narHash = store->queryPathInfo(storePath)->narHash; + input.attrs.insert_or_assign("narHash", narHash.to_string(HashFormat::SRI, true)); + + if (originalInput.getNarHash() && narHash != *originalInput.getNarHash()) + throw Error( + (unsigned int) 102, + "NAR hash mismatch in input '%s', expected '%s' but got '%s'", + originalInput.to_string(), + narHash.to_string(HashFormat::SRI, true), + originalInput.getNarHash()->to_string(HashFormat::SRI, true)); + + return storePath; +} + } // namespace nix diff --git a/src/libexpr/primops.cc b/src/libexpr/primops.cc index a8ac8d159..04196bc1f 100644 --- a/src/libexpr/primops.cc +++ b/src/libexpr/primops.cc @@ -1374,7 +1374,7 @@ static void derivationStrictInternal(EvalState & state, std::string_view drvName pos, "while evaluating the `__structuredAttrs` " "attribute passed to builtins.derivationStrict")) - jsonObject = StructuredAttrs{.structuredAttrs = json::object()}; + jsonObject = StructuredAttrs{}; /* Check whether null attributes should be ignored. */ bool ignoreNulls = false; @@ -1420,7 +1420,8 @@ static void derivationStrictInternal(EvalState & state, std::string_view drvName .debugThrow(); } if (ingestionMethod == ContentAddressMethod::Raw::Text) - experimentalFeatureSettings.require(Xp::DynamicDerivations); + experimentalFeatureSettings.require( + Xp::DynamicDerivations, fmt("text-hashed derivation '%s', outputHashMode = \"text\"", drvName)); if (ingestionMethod == ContentAddressMethod::Raw::Git) experimentalFeatureSettings.require(Xp::GitHashing); }; @@ -2412,7 +2413,7 @@ static void prim_toXML(EvalState & state, const PosIdx pos, Value ** args, Value std::ostringstream out; NixStringContext context; printValueAsXML(state, true, false, *args[0], out, context, pos); - v.mkString(toView(out), context); + v.mkString(out.view(), context); } static RegisterPrimOp primop_toXML({ @@ -2520,7 +2521,7 @@ static void prim_toJSON(EvalState & state, const PosIdx pos, Value ** args, Valu std::ostringstream out; NixStringContext context; printValueAsJSON(state, true, *args[0], pos, out, context); - v.mkString(toView(out), context); + v.mkString(out.view(), context); } static RegisterPrimOp primop_toJSON({ @@ -3362,21 +3363,20 @@ static void prim_functionArgs(EvalState & state, const PosIdx pos, Value ** args if (!args[0]->isLambda()) state.error("'functionArgs' requires a function").atPos(pos).debugThrow(); - if (!args[0]->lambda().fun->hasFormals()) { + if (const auto & formals = args[0]->lambda().fun->getFormals()) { + auto attrs = state.buildBindings(formals->formals.size()); + for (auto & i : formals->formals) + attrs.insert(i.name, state.getBool(i.def), i.pos); + /* Optimization: avoid sorting bindings. `formals` must already be sorted according to + (std::tie(a.name, a.pos) < std::tie(b.name, b.pos)) predicate, so the following assertion + always holds: + assert(std::is_sorted(attrs.alreadySorted()->begin(), attrs.alreadySorted()->end())); + .*/ + v.mkAttrs(attrs.alreadySorted()); + } else { v.mkAttrs(&Bindings::emptyBindings); return; } - - const auto & formals = args[0]->lambda().fun->formals->formals; - auto attrs = state.buildBindings(formals.size()); - for (auto & i : formals) - attrs.insert(i.name, state.getBool(i.def), i.pos); - /* Optimization: avoid sorting bindings. `formals` must already be sorted according to - (std::tie(a.name, a.pos) < std::tie(b.name, b.pos)) predicate, so the following assertion - always holds: - assert(std::is_sorted(attrs.alreadySorted()->begin(), attrs.alreadySorted()->end())); - .*/ - v.mkAttrs(attrs.alreadySorted()); } static RegisterPrimOp primop_functionArgs({ @@ -4610,9 +4610,9 @@ struct RegexCache } }; -std::shared_ptr makeRegexCache() +ref makeRegexCache() { - return std::make_shared(); + return make_ref(); } void prim_match(EvalState & state, const PosIdx pos, Value ** args, Value & v) diff --git a/src/libexpr/primops/fetchClosure.cc b/src/libexpr/primops/fetchClosure.cc index 63da53aa9..6e1389814 100644 --- a/src/libexpr/primops/fetchClosure.cc +++ b/src/libexpr/primops/fetchClosure.cc @@ -64,6 +64,8 @@ static void runFetchClosureWithRewrite( .pos = state.positions[pos]}); } + state.allowClosure(toPath); + state.mkStorePathString(toPath, v); } @@ -91,6 +93,8 @@ static void runFetchClosureWithContentAddressedPath( .pos = state.positions[pos]}); } + state.allowClosure(fromPath); + state.mkStorePathString(fromPath, v); } @@ -115,6 +119,8 @@ static void runFetchClosureWithInputAddressedPath( .pos = state.positions[pos]}); } + state.allowClosure(fromPath); + state.mkStorePathString(fromPath, v); } diff --git a/src/libexpr/primops/fetchTree.cc b/src/libexpr/primops/fetchTree.cc index e673e55a0..b49bd02e7 100644 --- a/src/libexpr/primops/fetchTree.cc +++ b/src/libexpr/primops/fetchTree.cc @@ -10,6 +10,7 @@ #include "nix/util/url.hh" #include "nix/expr/value-to-json.hh" #include "nix/fetchers/fetch-to-store.hh" +#include "nix/fetchers/input-cache.hh" #include @@ -198,8 +199,8 @@ static void fetchTree( if (state.settings.pureEval && !input.isLocked()) { if (input.getNarHash()) warn( - "Input '%s' is unlocked (e.g. lacks a Git revision) but does have a NAR hash. " - "This is deprecated since such inputs are verifiable but may not be reproducible.", + "Input '%s' is unlocked (e.g. lacks a Git revision) but is checked by NAR hash. " + "This is not reproducible and will break after garbage collection or when shared.", input.to_string()); else state @@ -218,11 +219,11 @@ static void fetchTree( throw Error("input '%s' is not allowed to use the '__final' attribute", input.to_string()); } - auto [storePath, input2] = input.fetchToStore(state.store); + auto cachedInput = state.inputCache->getAccessor(state.store, input, fetchers::UseRegistries::No); - state.allowPath(storePath); + auto storePath = state.mountInput(cachedInput.lockedInput, input, cachedInput.accessor); - emitTreeAttrs(state, storePath, input2, v, params.emptyRevFallback, false); + emitTreeAttrs(state, storePath, cachedInput.lockedInput, v, params.emptyRevFallback, false); } static void prim_fetchTree(EvalState & state, const PosIdx pos, Value ** args, Value & v) @@ -561,14 +562,22 @@ static void fetch( .hash = *expectedHash, .references = {}}); - if (state.store->isValidPath(expectedPath)) { + // Try to get the path from the local store or substituters + try { + state.store->ensurePath(expectedPath); + debug("using substituted/cached path '%s' for '%s'", state.store->printStorePath(expectedPath), *url); state.allowAndSetStorePathString(expectedPath, v); return; + } catch (Error & e) { + debug( + "substitution of '%s' failed, will try to download: %s", + state.store->printStorePath(expectedPath), + e.what()); + // Fall through to download } } - // TODO: fetching may fail, yet the path may be substitutable. - // https://github.com/NixOS/nix/issues/4313 + // Download the file/tarball if substitution failed or no hash was provided auto storePath = unpack ? fetchToStore( state.fetchSettings, *state.store, @@ -579,7 +588,11 @@ static void fetch( if (expectedHash) { auto hash = unpack ? state.store->queryPathInfo(storePath)->narHash - : hashFile(HashAlgorithm::SHA256, state.store->toRealPath(storePath)); + : hashPath( + {state.store->requireStoreObjectAccessor(storePath)}, + FileSerialisationMethod::Flat, + HashAlgorithm::SHA256) + .hash; if (hash != *expectedHash) { state .error( diff --git a/src/libexpr/primops/fromTOML.cc b/src/libexpr/primops/fromTOML.cc index 3ab594905..0d165f5c3 100644 --- a/src/libexpr/primops/fromTOML.cc +++ b/src/libexpr/primops/fromTOML.cc @@ -92,7 +92,7 @@ static void prim_fromTOML(EvalState & state, const PosIdx pos, Value ** args, Va std::istringstream tomlStream(std::string{toml}); - auto visit = [&](auto & self, Value & v, toml::value t) -> void { + auto visit = [&](this auto & self, Value & v, toml::value t) -> void { switch (t.type()) { case toml::value_t::table: { auto table = toml::get(t); @@ -100,7 +100,7 @@ static void prim_fromTOML(EvalState & state, const PosIdx pos, Value ** args, Va for (auto & elem : table) { forceNoNullByte(elem.first); - self(self, attrs.alloc(elem.first), elem.second); + self(attrs.alloc(elem.first), elem.second); } v.mkAttrs(attrs); @@ -110,7 +110,7 @@ static void prim_fromTOML(EvalState & state, const PosIdx pos, Value ** args, Va auto list = state.buildList(array.size()); for (const auto & [n, v] : enumerate(list)) - self(self, *(v = state.allocValue()), array[n]); + self(*(v = state.allocValue()), array[n]); v.mkList(list); } break; case toml::value_t::boolean: @@ -139,7 +139,7 @@ static void prim_fromTOML(EvalState & state, const PosIdx pos, Value ** args, Va attrs.alloc("_type").mkStringNoCopy("timestamp"); std::ostringstream s; s << t; - auto str = toView(s); + auto str = s.view(); forceNoNullByte(str); attrs.alloc("value").mkString(str); v.mkAttrs(attrs); @@ -155,7 +155,6 @@ static void prim_fromTOML(EvalState & state, const PosIdx pos, Value ** args, Va try { visit( - visit, val, toml::parse( tomlStream, diff --git a/src/libexpr/print.cc b/src/libexpr/print.cc index 071addc1a..4776be033 100644 --- a/src/libexpr/print.cc +++ b/src/libexpr/print.cc @@ -461,7 +461,7 @@ private: std::ostringstream s; s << state.positions[v.lambda().fun->pos]; - output << " @ " << filterANSIEscapes(toView(s)); + output << " @ " << filterANSIEscapes(s.view()); } } else if (v.isPrimOp()) { if (v.primOp()) diff --git a/src/libexpr/value-to-xml.cc b/src/libexpr/value-to-xml.cc index 31400e439..d5959e894 100644 --- a/src/libexpr/value-to-xml.cc +++ b/src/libexpr/value-to-xml.cc @@ -145,14 +145,14 @@ static void printValueAsXML( posToXML(state, xmlAttrs, state.positions[v.lambda().fun->pos]); XMLOpenElement _(doc, "function", xmlAttrs); - if (v.lambda().fun->hasFormals()) { + if (auto formals = v.lambda().fun->getFormals()) { XMLAttrs attrs; if (v.lambda().fun->arg) attrs["name"] = state.symbols[v.lambda().fun->arg]; - if (v.lambda().fun->formals->ellipsis) + if (formals->ellipsis) attrs["ellipsis"] = "1"; XMLOpenElement _(doc, "attrspat", attrs); - for (auto & i : v.lambda().fun->formals->lexicographicOrder(state.symbols)) + for (auto & i : formals->lexicographicOrder(state.symbols)) doc.writeEmptyElement("attr", singletonAttrs("name", state.symbols[i.name])); } else doc.writeEmptyElement("varpat", singletonAttrs("name", state.symbols[v.lambda().fun->arg])); diff --git a/src/libexpr/value/context.cc b/src/libexpr/value/context.cc index 6eb313211..dcc577f05 100644 --- a/src/libexpr/value/context.cc +++ b/src/libexpr/value/context.cc @@ -9,8 +9,7 @@ NixStringContextElem NixStringContextElem::parse(std::string_view s0, const Expe { std::string_view s = s0; - std::function parseRest; - parseRest = [&]() -> SingleDerivedPath { + auto parseRest = [&](this auto & parseRest) -> SingleDerivedPath { // Case on whether there is a '!' size_t index = s.find("!"); if (index == std::string_view::npos) { diff --git a/src/libfetchers-c/meson.build b/src/libfetchers-c/meson.build index 3761b0df2..db415d917 100644 --- a/src/libfetchers-c/meson.build +++ b/src/libfetchers-c/meson.build @@ -32,7 +32,6 @@ add_project_arguments( ) subdir('nix-meson-build-support/common') -subdir('nix-meson-build-support/asan-options') sources = files( 'nix_api_fetchers.cc', diff --git a/src/libfetchers-tests/git-utils.cc b/src/libfetchers-tests/git-utils.cc index f9fae23da..774934d26 100644 --- a/src/libfetchers-tests/git-utils.cc +++ b/src/libfetchers-tests/git-utils.cc @@ -175,6 +175,12 @@ TEST_F(GitUtilsTest, peel_reference) TEST(GitUtils, isLegalRefName) { + ASSERT_TRUE(isLegalRefName("A/b")); + ASSERT_TRUE(isLegalRefName("AaA/b")); + ASSERT_TRUE(isLegalRefName("FOO/BAR/BAZ")); + ASSERT_TRUE(isLegalRefName("HEAD")); + ASSERT_TRUE(isLegalRefName("refs/tags/1.2.3")); + ASSERT_TRUE(isLegalRefName("refs/heads/master")); ASSERT_TRUE(isLegalRefName("foox")); ASSERT_TRUE(isLegalRefName("1337")); ASSERT_TRUE(isLegalRefName("foo.baz")); diff --git a/src/libfetchers-tests/meson.build b/src/libfetchers-tests/meson.build index 858d7f3af..a18f64d79 100644 --- a/src/libfetchers-tests/meson.build +++ b/src/libfetchers-tests/meson.build @@ -37,7 +37,6 @@ libgit2 = dependency('libgit2') deps_private += libgit2 subdir('nix-meson-build-support/common') -subdir('nix-meson-build-support/asan-options') sources = files( 'access-tokens.cc', @@ -64,7 +63,7 @@ this_exe = executable( test( meson.project_name(), this_exe, - env : asan_test_options_env + { + env : { '_NIX_TEST_UNIT_DATA' : meson.current_source_dir() / 'data', }, protocol : 'gtest', diff --git a/src/libfetchers-tests/package.nix b/src/libfetchers-tests/package.nix index 8e82430d7..780618725 100644 --- a/src/libfetchers-tests/package.nix +++ b/src/libfetchers-tests/package.nix @@ -61,7 +61,6 @@ mkMesonExecutable (finalAttrs: { buildInputs = [ writableTmpDirAsHomeHook ]; } '' - export ASAN_OPTIONS=abort_on_error=1:print_summary=1:detect_leaks=0 export _NIX_TEST_UNIT_DATA=${resolvePath ./data} ${stdenv.hostPlatform.emulator buildPackages} ${lib.getExe finalAttrs.finalPackage} touch $out diff --git a/src/libfetchers/fetch-to-store.cc b/src/libfetchers/fetch-to-store.cc index 6ce78e115..b1e8b9d72 100644 --- a/src/libfetchers/fetch-to-store.cc +++ b/src/libfetchers/fetch-to-store.cc @@ -1,6 +1,7 @@ #include "nix/fetchers/fetch-to-store.hh" #include "nix/fetchers/fetchers.hh" #include "nix/fetchers/fetch-settings.hh" +#include "nix/util/environment-variables.hh" namespace nix { @@ -27,14 +28,22 @@ StorePath fetchToStore( std::optional cacheKey; - if (!filter && path.accessor->fingerprint) { - cacheKey = makeFetchToStoreCacheKey(std::string{name}, *path.accessor->fingerprint, method, path.path.abs()); + auto [subpath, fingerprint] = filter ? std::pair>{path.path, std::nullopt} + : path.accessor->getFingerprint(path.path); + + if (fingerprint) { + cacheKey = makeFetchToStoreCacheKey(std::string{name}, *fingerprint, method, subpath.abs()); if (auto res = settings.getCache()->lookupStorePath(*cacheKey, store)) { debug("store path cache hit for '%s'", path); return res->storePath; } - } else + } else { + static auto barf = getEnv("_NIX_TEST_BARF_ON_UNCACHEABLE").value_or("") == "1"; + if (barf && !filter) + throw Error("source path '%s' is uncacheable (filter=%d)", path, (bool) filter); + // FIXME: could still provide in-memory caching keyed on `SourcePath`. debug("source path '%s' is uncacheable", path); + } Activity act( *logger, diff --git a/src/libfetchers/fetchers.cc b/src/libfetchers/fetchers.cc index 045aafdcb..c9c0fffa2 100644 --- a/src/libfetchers/fetchers.cc +++ b/src/libfetchers/fetchers.cc @@ -5,6 +5,7 @@ #include "nix/util/json-utils.hh" #include "nix/fetchers/fetch-settings.hh" #include "nix/fetchers/fetch-to-store.hh" +#include "nix/util/url.hh" #include @@ -65,6 +66,12 @@ Input Input::fromURL(const Settings & settings, const ParsedURL & url, bool requ } } + // Provide a helpful hint when user tries file+git instead of git+file + auto parsedScheme = parseUrlScheme(url.scheme); + if (parsedScheme.application == "file" && parsedScheme.transport == "git") { + throw Error("input '%s' is unsupported; did you mean 'git+file' instead of 'file+git'?", url); + } + throw Error("input '%s' is unsupported", url); } @@ -332,8 +339,7 @@ std::pair, Input> Input::getAccessorUnchecked(ref sto debug("using substituted/cached input '%s' in '%s'", to_string(), store->printStorePath(storePath)); - // We just ensured the store object was there - auto accessor = ref{store->getFSAccessor(storePath)}; + auto accessor = store->requireStoreObjectAccessor(storePath); accessor->fingerprint = getFingerprint(store); @@ -356,8 +362,10 @@ std::pair, Input> Input::getAccessorUnchecked(ref sto auto [accessor, result] = scheme->getAccessor(store, *this); - assert(!accessor->fingerprint); - accessor->fingerprint = result.getFingerprint(store); + if (!accessor->fingerprint) + accessor->fingerprint = result.getFingerprint(store); + else + result.cachedFingerprint = accessor->fingerprint; return {accessor, std::move(result)}; } @@ -511,10 +519,11 @@ using namespace nix; fetchers::PublicKey adl_serializer::from_json(const json & json) { fetchers::PublicKey res = {}; - if (auto type = optionalValueAt(json, "type")) + auto & obj = getObject(json); + if (auto * type = optionalValueAt(obj, "type")) res.type = getString(*type); - res.key = getString(valueAt(json, "key")); + res.key = getString(valueAt(obj, "key")); return res; } diff --git a/src/libfetchers/filtering-source-accessor.cc b/src/libfetchers/filtering-source-accessor.cc index a99ecacef..8f1b50eb9 100644 --- a/src/libfetchers/filtering-source-accessor.cc +++ b/src/libfetchers/filtering-source-accessor.cc @@ -16,15 +16,26 @@ std::string FilteringSourceAccessor::readFile(const CanonPath & path) return next->readFile(prefix / path); } +void FilteringSourceAccessor::readFile(const CanonPath & path, Sink & sink, std::function sizeCallback) +{ + checkAccess(path); + return next->readFile(prefix / path, sink, sizeCallback); +} + bool FilteringSourceAccessor::pathExists(const CanonPath & path) { return isAllowed(path) && next->pathExists(prefix / path); } std::optional FilteringSourceAccessor::maybeLstat(const CanonPath & path) +{ + return isAllowed(path) ? next->maybeLstat(prefix / path) : std::nullopt; +} + +SourceAccessor::Stat FilteringSourceAccessor::lstat(const CanonPath & path) { checkAccess(path); - return next->maybeLstat(prefix / path); + return next->lstat(prefix / path); } SourceAccessor::DirEntries FilteringSourceAccessor::readDirectory(const CanonPath & path) @@ -49,6 +60,13 @@ std::string FilteringSourceAccessor::showPath(const CanonPath & path) return displayPrefix + next->showPath(prefix / path) + displaySuffix; } +std::pair> FilteringSourceAccessor::getFingerprint(const CanonPath & path) +{ + if (fingerprint) + return {path, fingerprint}; + return next->getFingerprint(prefix / path); +} + void FilteringSourceAccessor::checkAccess(const CanonPath & path) { if (!isAllowed(path)) diff --git a/src/libfetchers/git-lfs-fetch.cc b/src/libfetchers/git-lfs-fetch.cc index 9688daa4a..936976e55 100644 --- a/src/libfetchers/git-lfs-fetch.cc +++ b/src/libfetchers/git-lfs-fetch.cc @@ -209,7 +209,7 @@ std::vector Fetch::fetchUrls(const std::vector & pointe auto url = api.endpoint + "/objects/batch"; const auto & authHeader = api.authHeader; FileTransferRequest request(parseURL(url)); - request.post = true; + request.method = HttpMethod::POST; Headers headers; if (authHeader.has_value()) headers.push_back({"Authorization", *authHeader}); @@ -219,7 +219,9 @@ std::vector Fetch::fetchUrls(const std::vector & pointe nlohmann::json oidList = pointerToPayload(pointers); nlohmann::json data = {{"operation", "download"}}; data["objects"] = oidList; - request.data = data.dump(); + auto payload = data.dump(); + StringSource source{payload}; + request.data = {source}; FileTransferResult result = getFileTransfer()->upload(request); auto responseString = result.data; diff --git a/src/libfetchers/git-utils.cc b/src/libfetchers/git-utils.cc index a3652e522..65587b43a 100644 --- a/src/libfetchers/git-utils.cc +++ b/src/libfetchers/git-utils.cc @@ -9,9 +9,11 @@ #include "nix/util/users.hh" #include "nix/util/fs-sink.hh" #include "nix/util/sync.hh" +#include "nix/util/util.hh" #include #include +#include #include #include #include @@ -28,6 +30,7 @@ #include #include #include +#include #include #include @@ -528,12 +531,12 @@ struct GitRepoImpl : GitRepo, std::enable_shared_from_this auto act = (Activity *) payload; act->result( resFetchStatus, - fmt("%d/%d objects received, %d/%d deltas indexed, %.1f MiB", + fmt("%d/%d objects received, %d/%d deltas indexed, %s", stats->received_objects, stats->total_objects, stats->indexed_deltas, stats->total_deltas, - stats->received_bytes / (1024.0 * 1024.0))); + renderSize(stats->received_bytes))); return getInterrupted() ? -1 : 0; } @@ -1323,63 +1326,33 @@ GitRepo::WorkdirInfo GitRepo::getCachedWorkdirInfo(const std::filesystem::path & return workdirInfo; } -/** - * Checks that the git reference is valid and normalizes slash '/' sequences. - * - * Accepts shorthand references (one-level refnames are allowed). - */ -bool isValidRefNameAllowNormalizations(const std::string & refName) -{ - /* Unfortunately libgit2 doesn't expose the limit in headers, but its internal - limit is also 1024. */ - std::array normalizedRefBuffer; - - /* It would be nice to have a better API like git_reference_name_is_valid, but - * with GIT_REFERENCE_FORMAT_REFSPEC_SHORTHAND flag. libgit2 uses it internally - * but doesn't expose it in public headers [1]. - * [1]: - * https://github.com/libgit2/libgit2/blob/9d5f1bacc23594c2ba324c8f0d41b88bf0e9ef04/src/libgit2/refs.c#L1362-L1365 - */ - - auto res = git_reference_normalize_name( - normalizedRefBuffer.data(), - normalizedRefBuffer.size(), - refName.c_str(), - GIT_REFERENCE_FORMAT_ALLOW_ONELEVEL | GIT_REFERENCE_FORMAT_REFSPEC_SHORTHAND); - - return res == 0; -} - bool isLegalRefName(const std::string & refName) { initLibGit2(); - /* Since `git_reference_normalize_name` is the best API libgit2 has for verifying - * reference names with shorthands (see comment in normalizeRefName), we need to - * ensure that exceptions to the validity checks imposed by normalization [1] are checked - * explicitly. - * [1]: https://git-scm.com/docs/git-check-ref-format#Documentation/git-check-ref-format.txt---normalize - */ - /* Check for cases that don't get rejected by libgit2. * FIXME: libgit2 should reject this. */ if (refName == "@") return false; - /* Leading slashes and consecutive slashes are stripped during normalizatiton. */ - if (refName.starts_with('/') || refName.find("//") != refName.npos) - return false; - - /* Refer to libgit2. */ - if (!isValidRefNameAllowNormalizations(refName)) - return false; - /* libgit2 doesn't barf on DEL symbol. * FIXME: libgit2 should reject this. */ if (refName.find('\177') != refName.npos) return false; - return true; + for (auto * func : { + git_reference_name_is_valid, + git_branch_name_is_valid, + git_tag_name_is_valid, + }) { + int valid = 0; + if (func(&valid, refName.c_str())) + throw Error("checking git reference '%s': %s", refName, git_error_last()->message); + if (valid) + return true; + } + + return false; } } // namespace nix diff --git a/src/libfetchers/git.cc b/src/libfetchers/git.cc index f6f5c30ee..c8311c17f 100644 --- a/src/libfetchers/git.cc +++ b/src/libfetchers/git.cc @@ -164,8 +164,7 @@ struct GitInputScheme : InputScheme { std::optional inputFromURL(const Settings & settings, const ParsedURL & url, bool requireTree) const override { - auto parsedScheme = parseUrlScheme(url.scheme); - if (parsedScheme.application != "git") + if (url.scheme != "git" && parseUrlScheme(url.scheme).application != "git") return {}; auto url2(url); @@ -496,6 +495,36 @@ struct GitInputScheme : InputScheme Git interprets them as part of the file name. So get rid of them. */ url.query.clear(); + /* Backward compatibility hack: In old versions of Nix, if you had + a flake input like + + inputs.foo.url = "git+https://foo/bar?dir=subdir"; + + it would result in a lock file entry like + + "original": { + "dir": "subdir", + "type": "git", + "url": "https://foo/bar?dir=subdir" + } + + New versions of Nix remove `?dir=subdir` from the `url` field, + since the subdirectory is intended for `FlakeRef`, not the + fetcher (and specifically the remote server), that is, the + flakeref is parsed into + + "original": { + "dir": "subdir", + "type": "git", + "url": "https://foo/bar" + } + + However, new versions of nix parsing old flake.lock files would pass the dir= + query parameter in the "url" attribute to git, which will then complain. + + For this reason, we are filtering the `dir` query parameter from the URL + before passing it to git. */ + url.query.erase("dir"); repoInfo.location = url; } @@ -893,8 +922,7 @@ struct GitInputScheme : InputScheme return makeFingerprint(*rev); else { auto repoInfo = getRepoInfo(input); - if (auto repoPath = repoInfo.getPath(); - repoPath && repoInfo.workdirInfo.headRev && repoInfo.workdirInfo.submodules.empty()) { + if (auto repoPath = repoInfo.getPath(); repoPath && repoInfo.workdirInfo.submodules.empty()) { /* Calculate a fingerprint that takes into account the deleted and modified/added files. */ HashSink hashSink{HashAlgorithm::SHA512}; @@ -907,7 +935,7 @@ struct GitInputScheme : InputScheme writeString("deleted:", hashSink); writeString(file.abs(), hashSink); } - return makeFingerprint(*repoInfo.workdirInfo.headRev) + return makeFingerprint(repoInfo.workdirInfo.headRev.value_or(nullRev)) + ";d=" + hashSink.finish().hash.to_string(HashFormat::Base16, false); } return std::nullopt; diff --git a/src/libfetchers/github.cc b/src/libfetchers/github.cc index 3b723d7d8..2479a57d2 100644 --- a/src/libfetchers/github.cc +++ b/src/libfetchers/github.cc @@ -399,7 +399,8 @@ struct GitHubInputScheme : GitArchiveInputScheme Headers headers = makeHeadersWithAuthTokens(*input.settings, host, input); auto downloadResult = downloadFile(store, *input.settings, url, "source", headers); - auto json = nlohmann::json::parse(store->getFSAccessor(downloadResult.storePath)->readFile(CanonPath::root)); + auto json = nlohmann::json::parse( + store->requireStoreObjectAccessor(downloadResult.storePath)->readFile(CanonPath::root)); return RefInfo{ .rev = Hash::parseAny(std::string{json["sha"]}, HashAlgorithm::SHA1), @@ -473,7 +474,8 @@ struct GitLabInputScheme : GitArchiveInputScheme Headers headers = makeHeadersWithAuthTokens(*input.settings, host, input); auto downloadResult = downloadFile(store, *input.settings, url, "source", headers); - auto json = nlohmann::json::parse(store->getFSAccessor(downloadResult.storePath)->readFile(CanonPath::root)); + auto json = nlohmann::json::parse( + store->requireStoreObjectAccessor(downloadResult.storePath)->readFile(CanonPath::root)); if (json.is_array() && json.size() >= 1 && json[0]["id"] != nullptr) { return RefInfo{.rev = Hash::parseAny(std::string(json[0]["id"]), HashAlgorithm::SHA1)}; @@ -548,13 +550,10 @@ struct SourceHutInputScheme : GitArchiveInputScheme std::string refUri; if (ref == "HEAD") { - auto file = store->toRealPath( - downloadFile(store, *input.settings, fmt("%s/HEAD", base_url), "source", headers).storePath); - std::ifstream is(file); - std::string line; - getline(is, line); + auto downloadFileResult = downloadFile(store, *input.settings, fmt("%s/HEAD", base_url), "source", headers); + auto contents = store->requireStoreObjectAccessor(downloadFileResult.storePath)->readFile(CanonPath::root); - auto remoteLine = git::parseLsRemoteLine(line); + auto remoteLine = git::parseLsRemoteLine(getLine(contents).first); if (!remoteLine) { throw BadURL("in '%d', couldn't resolve HEAD ref '%d'", input.to_string(), ref); } @@ -564,9 +563,10 @@ struct SourceHutInputScheme : GitArchiveInputScheme } std::regex refRegex(refUri); - auto file = store->toRealPath( - downloadFile(store, *input.settings, fmt("%s/info/refs", base_url), "source", headers).storePath); - std::ifstream is(file); + auto downloadFileResult = + downloadFile(store, *input.settings, fmt("%s/info/refs", base_url), "source", headers); + auto contents = store->requireStoreObjectAccessor(downloadFileResult.storePath)->readFile(CanonPath::root); + std::istringstream is(contents); std::string line; std::optional id; diff --git a/src/libfetchers/include/nix/fetchers/filtering-source-accessor.hh b/src/libfetchers/include/nix/fetchers/filtering-source-accessor.hh index f8a57bfb3..5e98caa58 100644 --- a/src/libfetchers/include/nix/fetchers/filtering-source-accessor.hh +++ b/src/libfetchers/include/nix/fetchers/filtering-source-accessor.hh @@ -36,8 +36,12 @@ struct FilteringSourceAccessor : SourceAccessor std::string readFile(const CanonPath & path) override; + void readFile(const CanonPath & path, Sink & sink, std::function sizeCallback) override; + bool pathExists(const CanonPath & path) override; + Stat lstat(const CanonPath & path) override; + std::optional maybeLstat(const CanonPath & path) override; DirEntries readDirectory(const CanonPath & path) override; @@ -46,6 +50,8 @@ struct FilteringSourceAccessor : SourceAccessor std::string showPath(const CanonPath & path) override; + std::pair> getFingerprint(const CanonPath & path) override; + /** * Call `makeNotAllowedError` to throw a `RestrictedPathError` * exception if `isAllowed()` returns `false` for `path`. diff --git a/src/libfetchers/include/nix/fetchers/git-utils.hh b/src/libfetchers/include/nix/fetchers/git-utils.hh index 07b985541..8357ce4cd 100644 --- a/src/libfetchers/include/nix/fetchers/git-utils.hh +++ b/src/libfetchers/include/nix/fetchers/git-utils.hh @@ -158,9 +158,12 @@ struct Setter }; /** - * Checks that the git reference is valid and normalized. + * Checks that the string can be a valid git reference, branch or tag name. + * Accepts shorthand references (one-level refnames are allowed), pseudorefs + * like `HEAD`. * - * Accepts shorthand references (one-level refnames are allowed). + * @note This is a coarse test to make sure that the refname is at least something + * that Git can make sense of. */ bool isLegalRefName(const std::string & refName); diff --git a/src/libfetchers/mercurial.cc b/src/libfetchers/mercurial.cc index bf460d9c6..41bf6e2aa 100644 --- a/src/libfetchers/mercurial.cc +++ b/src/libfetchers/mercurial.cc @@ -329,9 +329,7 @@ struct MercurialInputScheme : InputScheme Input input(_input); auto storePath = fetchToStore(store, input); - - // We just added it, it should be there. - auto accessor = ref{store->getFSAccessor(storePath)}; + auto accessor = store->requireStoreObjectAccessor(storePath); accessor->setPathDisplay("«" + input.to_string() + "»"); diff --git a/src/libfetchers/meson.build b/src/libfetchers/meson.build index 5b53a147b..d34dd4f43 100644 --- a/src/libfetchers/meson.build +++ b/src/libfetchers/meson.build @@ -32,7 +32,6 @@ libgit2 = dependency('libgit2', version : '>= 1.9') deps_private += libgit2 subdir('nix-meson-build-support/common') -subdir('nix-meson-build-support/asan-options') sources = files( 'attrs.cc', diff --git a/src/libfetchers/path.cc b/src/libfetchers/path.cc index 3c4b9c06d..c4b5e2f1e 100644 --- a/src/libfetchers/path.cc +++ b/src/libfetchers/path.cc @@ -123,8 +123,6 @@ struct PathInputScheme : InputScheme auto absPath = getAbsPath(input); - Activity act(*logger, lvlTalkative, actUnknown, fmt("copying %s to the store", absPath)); - // FIXME: check whether access to 'path' is allowed. auto storePath = store->maybeParseStorePath(absPath.string()); @@ -133,43 +131,33 @@ struct PathInputScheme : InputScheme time_t mtime = 0; if (!storePath || storePath->name() != "source" || !store->isValidPath(*storePath)) { + Activity act(*logger, lvlTalkative, actUnknown, fmt("copying %s to the store", absPath)); // FIXME: try to substitute storePath. auto src = sinkToSource( [&](Sink & sink) { mtime = dumpPathAndGetMtime(absPath.string(), sink, defaultPathFilter); }); storePath = store->addToStoreFromDump(*src, "source"); } - // To avoid copying the path again to the /nix/store, we need to add a cache entry. - ContentAddressMethod method = ContentAddressMethod::Raw::NixArchive; - auto fp = getFingerprint(store, input); - if (fp) { - auto cacheKey = makeFetchToStoreCacheKey(input.getName(), *fp, method, "/"); - input.settings->getCache()->upsert(cacheKey, *store, {}, *storePath); - } + auto accessor = store->requireStoreObjectAccessor(*storePath); + + // To prevent `fetchToStore()` copying the path again to Nix + // store, pre-create an entry in the fetcher cache. + auto info = store->queryPathInfo(*storePath); + accessor->fingerprint = + fmt("path:%s", store->queryPathInfo(*storePath)->narHash.to_string(HashFormat::SRI, true)); + input.settings->getCache()->upsert( + makeFetchToStoreCacheKey( + input.getName(), *accessor->fingerprint, ContentAddressMethod::Raw::NixArchive, "/"), + *store, + {}, + *storePath); /* Trust the lastModified value supplied by the user, if any. It's not a "secure" attribute so we don't care. */ if (!input.getLastModified()) input.attrs.insert_or_assign("lastModified", uint64_t(mtime)); - return {ref{store->getFSAccessor(*storePath)}, std::move(input)}; - } - - std::optional getFingerprint(ref store, const Input & input) const override - { - if (isRelative(input)) - return std::nullopt; - - /* If this path is in the Nix store, use the hash of the - store object and the subpath. */ - auto path = getAbsPath(input); - try { - auto [storePath, subPath] = store->toStorePath(path.string()); - auto info = store->queryPathInfo(storePath); - return fmt("path:%s:%s", info->narHash.to_string(HashFormat::Base16, false), subPath); - } catch (Error &) { - return std::nullopt; - } + return {accessor, std::move(input)}; } std::optional experimentalFeature() const override diff --git a/src/libfetchers/tarball.cc b/src/libfetchers/tarball.cc index 31d5ab460..863a0d680 100644 --- a/src/libfetchers/tarball.cc +++ b/src/libfetchers/tarball.cc @@ -42,7 +42,7 @@ DownloadFileResult downloadFile( if (cached && !cached->expired) return useCached(); - FileTransferRequest request(ValidURL{url}); + FileTransferRequest request(VerbatimURL{url}); request.headers = headers; if (cached) request.expectedETag = getStrAttr(cached->value, "etag"); @@ -107,13 +107,13 @@ DownloadFileResult downloadFile( static DownloadTarballResult downloadTarball_( const Settings & settings, const std::string & urlS, const Headers & headers, const std::string & displayPrefix) { - ValidURL url = urlS; + ParsedURL url = parseURL(urlS); // Some friendly error messages for common mistakes. // Namely lets catch when the url is a local file path, but // it is not in fact a tarball. - if (url.scheme() == "file") { - std::filesystem::path localPath = renderUrlPathEnsureLegal(url.path()); + if (url.scheme == "file") { + std::filesystem::path localPath = renderUrlPathEnsureLegal(url.path); if (!exists(localPath)) { throw Error("tarball '%s' does not exist.", localPath); } @@ -164,7 +164,7 @@ static DownloadTarballResult downloadTarball_( /* Note: if the download is cached, `importTarball()` will receive no data, which causes it to import an empty tarball. */ - auto archive = !url.path().empty() && hasSuffix(toLower(url.path().back()), ".zip") ? ({ + auto archive = !url.path.empty() && hasSuffix(toLower(url.path.back()), ".zip") ? ({ /* In streaming mode, libarchive doesn't handle symlinks in zip files correctly (#10649). So write the entire file to disk so libarchive can access it @@ -178,7 +178,7 @@ static DownloadTarballResult downloadTarball_( } TarArchive{path}; }) - : TarArchive{*source}; + : TarArchive{*source}; auto tarballCache = getTarballCache(); auto parseSink = tarballCache->getFileSystemObjectSink(); auto lastModified = unpackTarfileToSink(archive, *parseSink); diff --git a/src/libflake-c/meson.build b/src/libflake-c/meson.build index d0d45cfa8..fddb39bdf 100644 --- a/src/libflake-c/meson.build +++ b/src/libflake-c/meson.build @@ -32,7 +32,6 @@ deps_public_maybe_subproject = [ subdir('nix-meson-build-support/subprojects') subdir('nix-meson-build-support/common') -subdir('nix-meson-build-support/asan-options') sources = files( 'nix_api_flake.cc', diff --git a/src/libflake-tests/flakeref.cc b/src/libflake-tests/flakeref.cc index e2cb91bb8..eb8b56ea2 100644 --- a/src/libflake-tests/flakeref.cc +++ b/src/libflake-tests/flakeref.cc @@ -1,8 +1,15 @@ #include +#include +#include +#include #include "nix/fetchers/fetch-settings.hh" #include "nix/flake/flakeref.hh" #include "nix/fetchers/attrs.hh" +#include "nix/fetchers/fetchers.hh" +#include "nix/util/configuration.hh" +#include "nix/util/error.hh" +#include "nix/util/experimental-features.hh" namespace nix { @@ -199,6 +206,28 @@ INSTANTIATE_TEST_SUITE_P( .description = "flake_id_ref_branch_ignore_empty_segments_ref_rev", .expectedUrl = "flake:nixpkgs/branch/2aae6c35c94fcfb415dbe95f408b9ce91ee846ed", }, + InputFromURLTestCase{ + .url = "git://somewhere/repo?ref=branch", + .attrs = + { + {"type", Attr("git")}, + {"ref", Attr("branch")}, + {"url", Attr("git://somewhere/repo")}, + }, + .description = "plain_git_with_ref", + .expectedUrl = "git://somewhere/repo?ref=branch", + }, + InputFromURLTestCase{ + .url = "git+https://somewhere.aaaaaaa/repo?ref=branch", + .attrs = + { + {"type", Attr("git")}, + {"ref", Attr("branch")}, + {"url", Attr("https://somewhere.aaaaaaa/repo")}, + }, + .description = "git_https_with_ref", + .expectedUrl = "git+https://somewhere.aaaaaaa/repo?ref=branch", + }, InputFromURLTestCase{ // Note that this is different from above because the "flake id" shorthand // doesn't allow this. diff --git a/src/libflake-tests/meson.build b/src/libflake-tests/meson.build index 41ae6cf3d..59094abe8 100644 --- a/src/libflake-tests/meson.build +++ b/src/libflake-tests/meson.build @@ -34,7 +34,6 @@ gtest = dependency('gtest', main : true) deps_private += gtest subdir('nix-meson-build-support/common') -subdir('nix-meson-build-support/asan-options') sources = files( 'flakeref.cc', @@ -59,7 +58,7 @@ this_exe = executable( test( meson.project_name(), this_exe, - env : asan_test_options_env + { + env : { '_NIX_TEST_UNIT_DATA' : meson.current_source_dir() / 'data', 'NIX_CONFIG' : 'extra-experimental-features = flakes', 'HOME' : meson.current_build_dir() / 'test-home', diff --git a/src/libflake-tests/nix_api_flake.cc b/src/libflake-tests/nix_api_flake.cc index f7e0cb719..da7f01401 100644 --- a/src/libflake-tests/nix_api_flake.cc +++ b/src/libflake-tests/nix_api_flake.cc @@ -1,15 +1,17 @@ +#include +#include +#include + #include "nix/util/file-system.hh" #include "nix_api_store.h" #include "nix_api_util.h" #include "nix_api_expr.h" #include "nix_api_value.h" #include "nix_api_flake.h" - -#include "nix/expr/tests/nix_api_expr.hh" #include "nix/util/tests/string_callback.hh" - -#include -#include +#include "nix/store/tests/nix_api_store.hh" +#include "nix/util/tests/nix_api_util.hh" +#include "nix_api_fetchers.h" namespace nixC { diff --git a/src/libflake-tests/package.nix b/src/libflake-tests/package.nix index 09812a57b..397ef4192 100644 --- a/src/libflake-tests/package.nix +++ b/src/libflake-tests/package.nix @@ -59,7 +59,6 @@ mkMesonExecutable (finalAttrs: { buildInputs = [ writableTmpDirAsHomeHook ]; } ('' - export ASAN_OPTIONS=abort_on_error=1:print_summary=1:detect_leaks=0 export _NIX_TEST_UNIT_DATA=${resolvePath ./data} export NIX_CONFIG="extra-experimental-features = flakes" ${stdenv.hostPlatform.emulator buildPackages} ${lib.getExe finalAttrs.finalPackage} diff --git a/src/libflake-tests/url-name.cc b/src/libflake-tests/url-name.cc index 81ba516c8..64cbe5c9d 100644 --- a/src/libflake-tests/url-name.cc +++ b/src/libflake-tests/url-name.cc @@ -1,6 +1,8 @@ -#include "nix/flake/url-name.hh" #include +#include "nix/flake/url-name.hh" +#include "nix/util/url.hh" + namespace nix { /* ----------- tests for url-name.hh --------------------------------------------------*/ diff --git a/src/libflake/config.cc b/src/libflake/config.cc index c9071f601..c248ed0a6 100644 --- a/src/libflake/config.cc +++ b/src/libflake/config.cc @@ -1,9 +1,30 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + #include "nix/util/users.hh" #include "nix/util/config-global.hh" #include "nix/flake/settings.hh" #include "nix/flake/flake.hh" - -#include +#include "nix/util/ansicolor.hh" +#include "nix/util/configuration.hh" +#include "nix/util/file-system.hh" +#include "nix/util/fmt.hh" +#include "nix/util/logging.hh" +#include "nix/util/strings.hh" +#include "nix/util/types.hh" +#include "nix/util/util.hh" namespace nix::flake { diff --git a/src/libflake/flake-primops.cc b/src/libflake/flake-primops.cc index 7c5ce01b2..eeff9a966 100644 --- a/src/libflake/flake-primops.cc +++ b/src/libflake/flake-primops.cc @@ -1,8 +1,34 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + #include "nix/flake/flake-primops.hh" #include "nix/expr/eval.hh" #include "nix/flake/flake.hh" #include "nix/flake/flakeref.hh" #include "nix/flake/settings.hh" +#include "nix/expr/attr-set.hh" +#include "nix/expr/eval-error.hh" +#include "nix/expr/eval-inline.hh" +#include "nix/expr/eval-settings.hh" +#include "nix/expr/symbol-table.hh" +#include "nix/expr/value.hh" +#include "nix/fetchers/attrs.hh" +#include "nix/fetchers/fetchers.hh" +#include "nix/util/configuration.hh" +#include "nix/util/error.hh" +#include "nix/util/experimental-features.hh" +#include "nix/util/pos-idx.hh" +#include "nix/util/pos-table.hh" +#include "nix/util/source-path.hh" +#include "nix/util/types.hh" +#include "nix/util/util.hh" namespace nix::flake::primops { diff --git a/src/libflake/flake.cc b/src/libflake/flake.cc index 3acf589a5..42385712c 100644 --- a/src/libflake/flake.cc +++ b/src/libflake/flake.cc @@ -1,9 +1,32 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + #include "nix/util/terminal.hh" +#include "nix/util/ref.hh" +#include "nix/util/environment-variables.hh" #include "nix/flake/flake.hh" #include "nix/expr/eval.hh" +#include "nix/expr/eval-cache.hh" #include "nix/expr/eval-settings.hh" #include "nix/flake/lockfile.hh" -#include "nix/expr/primops.hh" #include "nix/expr/eval-inline.hh" #include "nix/store/store-api.hh" #include "nix/fetchers/fetchers.hh" @@ -11,34 +34,41 @@ #include "nix/fetchers/fetch-settings.hh" #include "nix/flake/settings.hh" #include "nix/expr/value-to-json.hh" -#include "nix/store/local-fs-store.hh" #include "nix/fetchers/fetch-to-store.hh" #include "nix/util/memory-source-accessor.hh" #include "nix/fetchers/input-cache.hh" - -#include +#include "nix/expr/attr-set.hh" +#include "nix/expr/eval-error.hh" +#include "nix/expr/nixexpr.hh" +#include "nix/expr/symbol-table.hh" +#include "nix/expr/value.hh" +#include "nix/expr/value/context.hh" +#include "nix/fetchers/attrs.hh" +#include "nix/fetchers/registry.hh" +#include "nix/flake/flakeref.hh" +#include "nix/store/path.hh" +#include "nix/util/canon-path.hh" +#include "nix/util/configuration.hh" +#include "nix/util/error.hh" +#include "nix/util/experimental-features.hh" +#include "nix/util/file-system.hh" +#include "nix/util/fmt.hh" +#include "nix/util/hash.hh" +#include "nix/util/logging.hh" +#include "nix/util/pos-idx.hh" +#include "nix/util/pos-table.hh" +#include "nix/util/position.hh" +#include "nix/util/source-path.hh" +#include "nix/util/types.hh" +#include "nix/util/util.hh" namespace nix { +struct SourceAccessor; using namespace flake; namespace flake { -static StorePath copyInputToStore( - EvalState & state, fetchers::Input & input, const fetchers::Input & originalInput, ref accessor) -{ - auto storePath = fetchToStore(*input.settings, *state.store, accessor, FetchMode::Copy, input.getName()); - - state.allowPath(storePath); - - auto narHash = state.store->queryPathInfo(storePath)->narHash; - input.attrs.insert_or_assign("narHash", narHash.to_string(HashFormat::SRI, true)); - - assert(!originalInput.getNarHash() || storePath == originalInput.computeStorePath(*state.store)); - - return storePath; -} - static void forceTrivialValue(EvalState & state, Value & value, const PosIdx pos) { if (value.isThunk() && value.isTrivial()) @@ -251,12 +281,15 @@ static Flake readFlake( if (auto outputs = vInfo.attrs()->get(sOutputs)) { expectType(state, nFunction, *outputs->value, outputs->pos); - if (outputs->value->isLambda() && outputs->value->lambda().fun->hasFormals()) { - for (auto & formal : outputs->value->lambda().fun->formals->formals) { - if (formal.name != state.s.self) - flake.inputs.emplace( - state.symbols[formal.name], - FlakeInput{.ref = parseFlakeRef(state.fetchSettings, std::string(state.symbols[formal.name]))}); + if (outputs->value->isLambda()) { + if (auto formals = outputs->value->lambda().fun->getFormals()) { + for (auto & formal : formals->formals) { + if (formal.name != state.s.self) + flake.inputs.emplace( + state.symbols[formal.name], + FlakeInput{ + .ref = parseFlakeRef(state.fetchSettings, std::string(state.symbols[formal.name]))}); + } } } @@ -360,11 +393,14 @@ static Flake getFlake( lockedRef = FlakeRef(std::move(cachedInput2.lockedInput), newLockedRef.subdir); } - // Copy the tree to the store. - auto storePath = copyInputToStore(state, lockedRef.input, originalRef.input, cachedInput.accessor); - // Re-parse flake.nix from the store. - return readFlake(state, originalRef, resolvedRef, lockedRef, state.storePath(storePath), lockRootAttrPath); + return readFlake( + state, + originalRef, + resolvedRef, + lockedRef, + state.storePath(state.mountInput(lockedRef.input, originalRef.input, cachedInput.accessor)), + lockRootAttrPath); } Flake getFlake(EvalState & state, const FlakeRef & originalRef, fetchers::UseRegistries useRegistries) @@ -469,8 +505,8 @@ lockFlake(const Settings & settings, EvalState & state, const FlakeRef & topRef, /* Get the overrides (i.e. attributes of the form 'inputs.nixops.inputs.nixpkgs.url = ...'). */ - std::function addOverrides; - addOverrides = [&](const FlakeInput & input, const InputAttrPath & prefix) { + auto addOverrides = + [&](this const auto & addOverrides, const FlakeInput & input, const InputAttrPath & prefix) -> void { for (auto & [idOverride, inputOverride] : input.overrides) { auto inputAttrPath(prefix); inputAttrPath.push_back(idOverride); @@ -721,11 +757,10 @@ lockFlake(const Settings & settings, EvalState & state, const FlakeRef & topRef, auto lockedRef = FlakeRef(std::move(cachedInput.lockedInput), input.ref->subdir); - // FIXME: allow input to be lazy. - auto storePath = copyInputToStore( - state, lockedRef.input, input.ref->input, cachedInput.accessor); - - return {state.storePath(storePath), lockedRef}; + return { + state.storePath( + state.mountInput(lockedRef.input, input.ref->input, cachedInput.accessor)), + lockedRef}; } }(); @@ -875,7 +910,7 @@ static ref makeInternalFS() internalFS->setPathDisplay("«flakes-internal»", ""); internalFS->addFile( CanonPath("call-flake.nix"), -#include "call-flake.nix.gen.hh" +#include "call-flake.nix.gen.hh" // IWYU pragma: keep ); return internalFS; } @@ -937,8 +972,6 @@ void callFlake(EvalState & state, const LockedFlake & lockedFlake, Value & vRes) state.callFunction(*vCallFlake, args, vRes, noPos); } -} // namespace flake - std::optional LockedFlake::getFingerprint(ref store, const fetchers::Settings & fetchSettings) const { if (lockFile.isUnlocked(fetchSettings)) @@ -966,4 +999,41 @@ std::optional LockedFlake::getFingerprint(ref store, const f Flake::~Flake() {} +ref openEvalCache(EvalState & state, ref lockedFlake) +{ + auto fingerprint = state.settings.useEvalCache && state.settings.pureEval + ? lockedFlake->getFingerprint(state.store, state.fetchSettings) + : std::nullopt; + auto rootLoader = [&state, lockedFlake]() { + /* For testing whether the evaluation cache is + complete. */ + if (getEnv("NIX_ALLOW_EVAL").value_or("1") == "0") + throw Error("not everything is cached, but evaluation is not allowed"); + + auto vFlake = state.allocValue(); + callFlake(state, *lockedFlake, *vFlake); + + state.forceAttrs(*vFlake, noPos, "while parsing cached flake data"); + + auto aOutputs = vFlake->attrs()->get(state.symbols.create("outputs")); + assert(aOutputs); + + return aOutputs->value; + }; + + if (fingerprint) { + auto search = state.evalCaches.find(fingerprint.value()); + if (search == state.evalCaches.end()) { + search = state.evalCaches + .emplace(fingerprint.value(), make_ref(fingerprint, state, rootLoader)) + .first; + } + return search->second; + } else { + return make_ref(std::nullopt, state, rootLoader); + } +} + +} // namespace flake + } // namespace nix diff --git a/src/libflake/flakeref.cc b/src/libflake/flakeref.cc index 38979783d..a26f269c3 100644 --- a/src/libflake/flakeref.cc +++ b/src/libflake/flakeref.cc @@ -1,10 +1,39 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + #include "nix/flake/flakeref.hh" -#include "nix/store/store-api.hh" #include "nix/util/url.hh" #include "nix/util/url-parts.hh" #include "nix/fetchers/fetchers.hh" +#include "nix/util/error.hh" +#include "nix/util/file-system.hh" +#include "nix/util/fmt.hh" +#include "nix/util/logging.hh" +#include "nix/util/strings.hh" +#include "nix/util/util.hh" +#include "nix/fetchers/attrs.hh" +#include "nix/fetchers/registry.hh" +#include "nix/store/outputs-spec.hh" +#include "nix/util/ref.hh" +#include "nix/util/types.hh" namespace nix { +class Store; +struct SourceAccessor; + +namespace fetchers { +struct Settings; +} // namespace fetchers #if 0 // 'dir' path elements cannot start with a '.'. We also reject diff --git a/src/libflake/include/nix/flake/flake-primops.hh b/src/libflake/include/nix/flake/flake-primops.hh index 35a7128f4..b333e33d7 100644 --- a/src/libflake/include/nix/flake/flake-primops.hh +++ b/src/libflake/include/nix/flake/flake-primops.hh @@ -1,7 +1,12 @@ #pragma once #include "nix/expr/eval.hh" -#include "nix/flake/settings.hh" + +namespace nix { +namespace flake { +struct Settings; +} // namespace flake +} // namespace nix namespace nix::flake::primops { diff --git a/src/libflake/include/nix/flake/flake.hh b/src/libflake/include/nix/flake/flake.hh index 13002b47c..79a50f0f7 100644 --- a/src/libflake/include/nix/flake/flake.hh +++ b/src/libflake/include/nix/flake/flake.hh @@ -5,6 +5,7 @@ #include "nix/flake/flakeref.hh" #include "nix/flake/lockfile.hh" #include "nix/expr/value.hh" +#include "nix/expr/eval-cache.hh" namespace nix { @@ -218,6 +219,11 @@ lockFlake(const Settings & settings, EvalState & state, const FlakeRef & flakeRe void callFlake(EvalState & state, const LockedFlake & lockedFlake, Value & v); +/** + * Open an evaluation cache for a flake. + */ +ref openEvalCache(EvalState & state, ref lockedFlake); + } // namespace flake void emitTreeAttrs( diff --git a/src/libflake/include/nix/flake/flakeref.hh b/src/libflake/include/nix/flake/flakeref.hh index c8c536bce..1af8c5afd 100644 --- a/src/libflake/include/nix/flake/flakeref.hh +++ b/src/libflake/include/nix/flake/flakeref.hh @@ -2,9 +2,11 @@ ///@file #include +#include +#include +#include +#include -#include "nix/util/types.hh" -#include "nix/fetchers/fetchers.hh" #include "nix/store/outputs-spec.hh" #include "nix/fetchers/registry.hh" @@ -12,6 +14,10 @@ namespace nix { class Store; +namespace fetchers { +struct Settings; +} // namespace fetchers + typedef std::string FlakeId; /** diff --git a/src/libflake/include/nix/flake/settings.hh b/src/libflake/include/nix/flake/settings.hh index 618ed4d38..7187a3294 100644 --- a/src/libflake/include/nix/flake/settings.hh +++ b/src/libflake/include/nix/flake/settings.hh @@ -1,9 +1,10 @@ #pragma once ///@file -#include "nix/util/configuration.hh" - #include +#include + +#include "nix/util/configuration.hh" namespace nix { // Forward declarations diff --git a/src/libflake/include/nix/flake/url-name.hh b/src/libflake/include/nix/flake/url-name.hh index b95d2dff6..d313db33b 100644 --- a/src/libflake/include/nix/flake/url-name.hh +++ b/src/libflake/include/nix/flake/url-name.hh @@ -1,9 +1,8 @@ -#include "nix/util/url.hh" -#include "nix/util/url-parts.hh" -#include "nix/util/util.hh" -#include "nix/util/split.hh" +#include +#include namespace nix { +struct ParsedURL; /** * Try to extract a reasonably unique and meaningful, human-readable diff --git a/src/libflake/lockfile.cc b/src/libflake/lockfile.cc index f381a57e6..ecad5df6f 100644 --- a/src/libflake/lockfile.cc +++ b/src/libflake/lockfile.cc @@ -1,15 +1,49 @@ -#include "nix/fetchers/fetch-settings.hh" -#include "nix/flake/settings.hh" -#include "nix/flake/lockfile.hh" -#include "nix/store/store-api.hh" -#include "nix/util/strings.hh" - +#include +#include +#include +#include +#include +#include +#include #include #include - -#include #include -#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "nix/fetchers/fetch-settings.hh" +#include "nix/flake/lockfile.hh" +#include "nix/util/strings.hh" +#include "nix/fetchers/attrs.hh" +#include "nix/fetchers/fetchers.hh" +#include "nix/flake/flakeref.hh" +#include "nix/store/path.hh" +#include "nix/util/ansicolor.hh" +#include "nix/util/configuration.hh" +#include "nix/util/error.hh" +#include "nix/util/fmt.hh" +#include "nix/util/hash.hh" +#include "nix/util/logging.hh" +#include "nix/util/ref.hh" +#include "nix/util/types.hh" +#include "nix/util/util.hh" + +namespace nix { +class Store; +} // namespace nix namespace nix::flake { @@ -43,8 +77,8 @@ LockedNode::LockedNode(const fetchers::Settings & fetchSettings, const nlohmann: if (!lockedRef.input.isLocked() && !lockedRef.input.isRelative()) { if (lockedRef.input.getNarHash()) warn( - "Lock file entry '%s' is unlocked (e.g. lacks a Git revision) but does have a NAR hash. " - "This is deprecated since such inputs are verifiable but may not be reproducible.", + "Lock file entry '%s' is unlocked (e.g. lacks a Git revision) but is checked by NAR hash. " + "This is not reproducible and will break after garbage collection or when shared.", lockedRef.to_string()); else throw Error( @@ -113,11 +147,10 @@ LockFile::LockFile(const fetchers::Settings & fetchSettings, std::string_view co if (version < 5 || version > 7) throw Error("lock file '%s' has unsupported version %d", path, version); - std::map> nodeMap; + std::string rootKey = json["root"]; + std::map> nodeMap{{rootKey, root}}; - std::function getInputs; - - getInputs = [&](Node & node, const nlohmann::json & jsonNode) { + [&](this const auto & getInputs, Node & node, const nlohmann::json & jsonNode) { if (jsonNode.find("inputs") == jsonNode.end()) return; for (auto & i : jsonNode["inputs"].items()) { @@ -145,11 +178,7 @@ LockFile::LockFile(const fetchers::Settings & fetchSettings, std::string_view co throw Error("lock file contains cycle to root node"); } } - }; - - std::string rootKey = json["root"]; - nodeMap.insert_or_assign(rootKey, root); - getInputs(*root, json["nodes"][rootKey]); + }(*root, json["nodes"][rootKey]); // FIXME: check that there are no cycles in version >= 7. Cycles // between inputs are only possible using 'follows' indirections. @@ -163,9 +192,7 @@ std::pair LockFile::toJSON() const KeyMap nodeKeys; boost::unordered_flat_set keys; - std::function node)> dumpNode; - - dumpNode = [&](std::string key, ref node) -> std::string { + auto dumpNode = [&](this auto & dumpNode, std::string key, ref node) -> std::string { auto k = nodeKeys.find(node); if (k != nodeKeys.end()) return k->second; @@ -242,17 +269,13 @@ std::optional LockFile::isUnlocked(const fetchers::Settings & fetchSet { std::set> nodes; - std::function node)> visit; - - visit = [&](ref node) { + [&](this const auto & visit, ref node) { if (!nodes.insert(node).second) return; for (auto & i : node->inputs) if (auto child = std::get_if<0>(&i.second)) visit(*child); - }; - - visit(root); + }(root); /* Return whether the input is either locked, or, if `allow-dirty-locks` is enabled, it has a NAR hash. In the @@ -298,9 +321,7 @@ std::map LockFile::getAllInputs() const std::set> done; std::map res; - std::function node)> recurse; - - recurse = [&](const InputAttrPath & prefix, ref node) { + [&](this const auto & recurse, const InputAttrPath & prefix, ref node) { if (!done.insert(node).second) return; @@ -311,9 +332,7 @@ std::map LockFile::getAllInputs() const if (auto child = std::get_if<0>(&input)) recurse(inputAttrPath, *child); } - }; - - recurse({}, root); + }({}, root); return res; } diff --git a/src/libflake/meson.build b/src/libflake/meson.build index 3bd04fcf4..58916ecd9 100644 --- a/src/libflake/meson.build +++ b/src/libflake/meson.build @@ -29,7 +29,6 @@ nlohmann_json = dependency('nlohmann_json', version : '>= 3.9') deps_public += nlohmann_json subdir('nix-meson-build-support/common') -subdir('nix-meson-build-support/asan-options') subdir('nix-meson-build-support/generate-header') diff --git a/src/libflake/settings.cc b/src/libflake/settings.cc index e77bded30..52fa1b49d 100644 --- a/src/libflake/settings.cc +++ b/src/libflake/settings.cc @@ -1,5 +1,9 @@ +#include + #include "nix/flake/settings.hh" #include "nix/flake/flake-primops.hh" +#include "nix/expr/eval-settings.hh" +#include "nix/expr/eval.hh" namespace nix::flake { diff --git a/src/libflake/url-name.cc b/src/libflake/url-name.cc index 3bba3692e..f4b5c6a7f 100644 --- a/src/libflake/url-name.cc +++ b/src/libflake/url-name.cc @@ -1,6 +1,10 @@ -#include "nix/flake/url-name.hh" #include -#include +#include +#include + +#include "nix/flake/url-name.hh" +#include "nix/util/strings.hh" +#include "nix/util/url.hh" namespace nix { diff --git a/src/libmain-c/meson.build b/src/libmain-c/meson.build index 2ac2b799b..36332fdb7 100644 --- a/src/libmain-c/meson.build +++ b/src/libmain-c/meson.build @@ -28,7 +28,6 @@ deps_public_maybe_subproject = [ subdir('nix-meson-build-support/subprojects') subdir('nix-meson-build-support/common') -subdir('nix-meson-build-support/asan-options') sources = files( 'nix_api_main.cc', diff --git a/src/libmain/include/nix/main/shared.hh b/src/libmain/include/nix/main/shared.hh index 3bba9205d..12c54c89b 100644 --- a/src/libmain/include/nix/main/shared.hh +++ b/src/libmain/include/nix/main/shared.hh @@ -93,8 +93,6 @@ extern volatile ::sig_atomic_t blockInt; /* GC helpers. */ -std::string showBytes(uint64_t bytes); - struct GCResults; struct PrintFreed diff --git a/src/libmain/meson.build b/src/libmain/meson.build index 21bfbea3e..2ac59924e 100644 --- a/src/libmain/meson.build +++ b/src/libmain/meson.build @@ -53,7 +53,6 @@ config_priv_h = configure_file( ) subdir('nix-meson-build-support/common') -subdir('nix-meson-build-support/asan-options') sources = files( 'common-args.cc', diff --git a/src/libmain/progress-bar.cc b/src/libmain/progress-bar.cc index c00f5d86b..edec8460d 100644 --- a/src/libmain/progress-bar.cc +++ b/src/libmain/progress-bar.cc @@ -183,7 +183,7 @@ public: std::ostringstream oss; showErrorInfo(oss, ei, loggerSettings.showTrace.get()); - log(*state, ei.level, toView(oss)); + log(*state, ei.level, oss.view()); } void log(State & state, Verbosity lvl, std::string_view s) diff --git a/src/libmain/shared.cc b/src/libmain/shared.cc index 7187e9720..19733fb3e 100644 --- a/src/libmain/shared.cc +++ b/src/libmain/shared.cc @@ -6,6 +6,7 @@ #include "nix/main/loggers.hh" #include "nix/main/progress-bar.hh" #include "nix/util/signals.hh" +#include "nix/util/util.hh" #include #include @@ -64,18 +65,19 @@ void printMissing(ref store, const MissingPaths & missing, Verbosity lvl) } if (!missing.willSubstitute.empty()) { - const float downloadSizeMiB = missing.downloadSize / (1024.f * 1024.f); - const float narSizeMiB = missing.narSize / (1024.f * 1024.f); if (missing.willSubstitute.size() == 1) { printMsg( - lvl, "this path will be fetched (%.2f MiB download, %.2f MiB unpacked):", downloadSizeMiB, narSizeMiB); + lvl, + "this path will be fetched (%s download, %s unpacked):", + renderSize(missing.downloadSize), + renderSize(missing.narSize)); } else { printMsg( lvl, - "these %d paths will be fetched (%.2f MiB download, %.2f MiB unpacked):", + "these %d paths will be fetched (%s download, %s unpacked):", missing.willSubstitute.size(), - downloadSizeMiB, - narSizeMiB); + renderSize(missing.downloadSize), + renderSize(missing.narSize)); } std::vector willSubstituteSorted = {}; std::for_each(missing.willSubstitute.begin(), missing.willSubstitute.end(), [&](const StorePath & p) { @@ -320,34 +322,29 @@ int handleExceptions(const std::string & programName, std::function fun) std::string error = ANSI_RED "error:" ANSI_NORMAL " "; try { try { - try { - fun(); - } catch (...) { - /* Subtle: we have to make sure that any `interrupted' - condition is discharged before we reach printMsg() - below, since otherwise it will throw an (uncaught) - exception. */ - setInterruptThrown(); - throw; - } - } catch (Exit & e) { - return e.status; - } catch (UsageError & e) { - logError(e.info()); - printError("Try '%1% --help' for more information.", programName); - return 1; - } catch (BaseError & e) { - logError(e.info()); - return e.info().status; - } catch (std::bad_alloc & e) { - printError(error + "out of memory"); - return 1; - } catch (std::exception & e) { - printError(error + e.what()); - return 1; + fun(); + } catch (...) { + /* Subtle: we have to make sure that any `interrupted' + condition is discharged before we reach printMsg() + below, since otherwise it will throw an (uncaught) + exception. */ + setInterruptThrown(); + throw; } - } catch (...) { - /* In case logger also throws just give up. */ + } catch (Exit & e) { + return e.status; + } catch (UsageError & e) { + logError(e.info()); + printError("Try '%1% --help' for more information.", programName); + return 1; + } catch (BaseError & e) { + logError(e.info()); + return e.info().status; + } catch (std::bad_alloc & e) { + printError(error + "out of memory"); + return 1; + } catch (std::exception & e) { + printError(error + e.what()); return 1; } @@ -411,7 +408,7 @@ RunPager::~RunPager() PrintFreed::~PrintFreed() { if (show) - std::cout << fmt("%d store paths deleted, %s freed\n", results.paths.size(), showBytes(results.bytesFreed)); + std::cout << fmt("%d store paths deleted, %s freed\n", results.paths.size(), renderSize(results.bytesFreed)); } } // namespace nix diff --git a/src/libstore-c/meson.build b/src/libstore-c/meson.build index a92771efc..c6b6174c7 100644 --- a/src/libstore-c/meson.build +++ b/src/libstore-c/meson.build @@ -26,7 +26,6 @@ deps_public_maybe_subproject = [ subdir('nix-meson-build-support/subprojects') subdir('nix-meson-build-support/common') -subdir('nix-meson-build-support/asan-options') sources = files( 'nix_api_store.cc', diff --git a/src/libstore-c/nix_api_store.cc b/src/libstore-c/nix_api_store.cc index c4c17f127..313a77563 100644 --- a/src/libstore-c/nix_api_store.cc +++ b/src/libstore-c/nix_api_store.cc @@ -126,6 +126,36 @@ StorePath * nix_store_parse_path(nix_c_context * context, Store * store, const c NIXC_CATCH_ERRS_NULL } +nix_err nix_store_get_fs_closure( + nix_c_context * context, + Store * store, + const StorePath * store_path, + bool flip_direction, + bool include_outputs, + bool include_derivers, + void * userdata, + void (*callback)(nix_c_context * context, void * userdata, const StorePath * store_path)) +{ + if (context) + context->last_err_code = NIX_OK; + try { + const auto nixStore = store->ptr; + + nix::StorePathSet set; + nixStore->computeFSClosure(store_path->path, set, flip_direction, include_outputs, include_derivers); + + if (callback) { + for (const auto & path : set) { + const StorePath tmp{path}; + callback(context, userdata, &tmp); + if (context && context->last_err_code != NIX_OK) + return context->last_err_code; + } + } + } + NIXC_CATCH_ERRS +} + nix_err nix_store_realise( nix_c_context * context, Store * store, @@ -143,11 +173,21 @@ nix_err nix_store_realise( const auto nixStore = store->ptr; auto results = nixStore->buildPathsWithResults(paths, nix::bmNormal, nixStore); + assert(results.size() == 1); + + // Check if any builds failed + for (auto & result : results) { + if (auto * failureP = result.tryGetFailure()) + failureP->rethrow(); + } + if (callback) { for (const auto & result : results) { - for (const auto & [outputName, realisation] : result.builtOutputs) { - StorePath p{realisation.outPath}; - callback(userdata, outputName.c_str(), &p); + if (auto * success = result.tryGetSuccess()) { + for (const auto & [outputName, realisation] : success->builtOutputs) { + StorePath p{realisation.outPath}; + callback(userdata, outputName.c_str(), &p); + } } } } diff --git a/src/libstore-c/nix_api_store.h b/src/libstore-c/nix_api_store.h index e76e376b4..964f6d6d5 100644 --- a/src/libstore-c/nix_api_store.h +++ b/src/libstore-c/nix_api_store.h @@ -186,6 +186,8 @@ nix_err nix_store_real_path( * @param[in] path Path to build * @param[in] userdata data to pass to every callback invocation * @param[in] callback called for every realised output + * @return NIX_OK if the build succeeded, or an error code if the build/scheduling/outputs/copying/etc failed. + * On error, the callback is never invoked and error information is stored in context. */ nix_err nix_store_realise( nix_c_context * context, @@ -245,6 +247,35 @@ void nix_derivation_free(nix_derivation * drv); */ nix_err nix_store_copy_closure(nix_c_context * context, Store * srcStore, Store * dstStore, StorePath * path); +/** + * @brief Gets the closure of a specific store path + * + * @note The callback borrows each StorePath only for the duration of the call. + * + * @param[out] context Optional, stores error information + * @param[in] store nix store reference + * @param[in] store_path The path to compute from + * @param[in] flip_direction If false, compute the forward closure (paths referenced by any store path in the closure). + * If true, compute the backward closure (paths that reference any store path in the closure). + * @param[in] include_outputs If flip_direction is false: for any derivation in the closure, include its outputs. + * If flip_direction is true: for any output in the closure, include derivations that produce + * it. + * @param[in] include_derivers If flip_direction is false: for any output in the closure, include the derivation that + * produced it. + * If flip_direction is true: for any derivation in the closure, include its outputs. + * @param[in] callback The function to call for every store path, in no particular order + * @param[in] userdata The userdata to pass to the callback + */ +nix_err nix_store_get_fs_closure( + nix_c_context * context, + Store * store, + const StorePath * store_path, + bool flip_direction, + bool include_outputs, + bool include_derivers, + void * userdata, + void (*callback)(nix_c_context * context, void * userdata, const StorePath * store_path)); + // cffi end #ifdef __cplusplus } diff --git a/src/libstore-test-support/include/nix/store/tests/libstore.hh b/src/libstore-test-support/include/nix/store/tests/libstore.hh index 28b29fa31..d79b55312 100644 --- a/src/libstore-test-support/include/nix/store/tests/libstore.hh +++ b/src/libstore-test-support/include/nix/store/tests/libstore.hh @@ -19,14 +19,13 @@ public: } protected: + LibStoreTest(ref store) + : store(std::move(store)) + { + } + LibStoreTest() - : store(openStore({ - .variant = - StoreReference::Specified{ - .scheme = "dummy", - }, - .params = {}, - })) + : LibStoreTest(openStore("dummy://")) { } diff --git a/src/libstore-test-support/meson.build b/src/libstore-test-support/meson.build index e929ae2b4..8617225d7 100644 --- a/src/libstore-test-support/meson.build +++ b/src/libstore-test-support/meson.build @@ -29,7 +29,6 @@ rapidcheck = dependency('rapidcheck') deps_public += rapidcheck subdir('nix-meson-build-support/common') -subdir('nix-meson-build-support/asan-options') sources = files( 'derived-path.cc', diff --git a/src/libstore-tests/common-protocol.cc b/src/libstore-tests/common-protocol.cc index 35fca165d..2c001957b 100644 --- a/src/libstore-tests/common-protocol.cc +++ b/src/libstore-tests/common-protocol.cc @@ -112,32 +112,34 @@ CHARACTERIZATION_TEST( "realisation", (std::tuple{ Realisation{ - .id = - DrvOutput{ - .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), - .outputName = "baz", - }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, - .signatures = {"asdf", "qwer"}, + { + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + .signatures = {"asdf", "qwer"}, + }, + DrvOutput{ + .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), + .outputName = "baz", + }, }, Realisation{ - .id = - { - .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), - .outputName = "baz", - }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, - .signatures = {"asdf", "qwer"}, - .dependentRealisations = - { + { + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + .signatures = {"asdf", "qwer"}, + .dependentRealisations = { - DrvOutput{ - .drvHash = Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "quux", + { + DrvOutput{ + .drvHash = Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "quux", + }, + StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, }, - StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, }, - }, + }, + { + .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), + .outputName = "baz", + }, }, })) diff --git a/src/libstore-tests/content-address.cc b/src/libstore-tests/content-address.cc index 51d591c38..0474fb2e0 100644 --- a/src/libstore-tests/content-address.cc +++ b/src/libstore-tests/content-address.cc @@ -1,6 +1,7 @@ #include #include "nix/store/content-address.hh" +#include "nix/util/tests/json-characterization.hh" namespace nix { @@ -8,33 +9,93 @@ namespace nix { * ContentAddressMethod::parse, ContentAddressMethod::render * --------------------------------------------------------------------------*/ -TEST(ContentAddressMethod, testRoundTripPrintParse_1) +static auto methods = ::testing::Values( + std::pair{ContentAddressMethod::Raw::Text, "text"}, + std::pair{ContentAddressMethod::Raw::Flat, "flat"}, + std::pair{ContentAddressMethod::Raw::NixArchive, "nar"}, + std::pair{ContentAddressMethod::Raw::Git, "git"}); + +struct ContentAddressMethodTest : ::testing::Test, + ::testing::WithParamInterface> +{}; + +TEST_P(ContentAddressMethodTest, testRoundTripPrintParse_1) { - for (ContentAddressMethod cam : { - ContentAddressMethod::Raw::Text, - ContentAddressMethod::Raw::Flat, - ContentAddressMethod::Raw::NixArchive, - ContentAddressMethod::Raw::Git, - }) { - EXPECT_EQ(ContentAddressMethod::parse(cam.render()), cam); - } + auto & [cam, _] = GetParam(); + EXPECT_EQ(ContentAddressMethod::parse(cam.render()), cam); } -TEST(ContentAddressMethod, testRoundTripPrintParse_2) +TEST_P(ContentAddressMethodTest, testRoundTripPrintParse_2) { - for (const std::string_view camS : { - "text", - "flat", - "nar", - "git", - }) { - EXPECT_EQ(ContentAddressMethod::parse(camS).render(), camS); - } + auto & [cam, camS] = GetParam(); + EXPECT_EQ(ContentAddressMethod::parse(camS).render(), camS); } +INSTANTIATE_TEST_SUITE_P(ContentAddressMethod, ContentAddressMethodTest, methods); + TEST(ContentAddressMethod, testParseContentAddressMethodOptException) { EXPECT_THROW(ContentAddressMethod::parse("narwhal"), UsageError); } +/* ---------------------------------------------------------------------------- + * JSON + * --------------------------------------------------------------------------*/ + +class ContentAddressTest : public virtual CharacterizationTest +{ + std::filesystem::path unitTestData = getUnitTestData() / "content-address"; + +public: + + /** + * We set these in tests rather than the regular globals so we don't have + * to worry about race conditions if the tests run concurrently. + */ + ExperimentalFeatureSettings mockXpSettings; + + std::filesystem::path goldenMaster(std::string_view testStem) const override + { + return unitTestData / testStem; + } +}; + +using nlohmann::json; + +struct ContentAddressJsonTest : ContentAddressTest, + JsonCharacterizationTest, + ::testing::WithParamInterface> +{}; + +TEST_P(ContentAddressJsonTest, from_json) +{ + auto & [name, expected] = GetParam(); + readJsonTest(name, expected); +} + +TEST_P(ContentAddressJsonTest, to_json) +{ + auto & [name, value] = GetParam(); + writeJsonTest(name, value); +} + +INSTANTIATE_TEST_SUITE_P( + ContentAddressJSON, + ContentAddressJsonTest, + ::testing::Values( + std::pair{ + "text", + ContentAddress{ + .method = ContentAddressMethod::Raw::Text, + .hash = hashString(HashAlgorithm::SHA256, "asdf"), + }, + }, + std::pair{ + "nar", + ContentAddress{ + .method = ContentAddressMethod::Raw::NixArchive, + .hash = hashString(HashAlgorithm::SHA256, "qwer"), + }, + })); + } // namespace nix diff --git a/src/libstore-tests/data/content-address/nar.json b/src/libstore-tests/data/content-address/nar.json new file mode 100644 index 000000000..21e065cd3 --- /dev/null +++ b/src/libstore-tests/data/content-address/nar.json @@ -0,0 +1,8 @@ +{ + "hash": { + "algorithm": "sha256", + "format": "base64", + "hash": "9vLqj0XYoFfJVmoz+ZR02i5camYE1zYSFlDicwxvsKM=" + }, + "method": "nar" +} diff --git a/src/libstore-tests/data/content-address/text.json b/src/libstore-tests/data/content-address/text.json new file mode 100644 index 000000000..04bc8ac20 --- /dev/null +++ b/src/libstore-tests/data/content-address/text.json @@ -0,0 +1,8 @@ +{ + "hash": { + "algorithm": "sha256", + "format": "base64", + "hash": "8OTC92xYkW7CWPJGhRvqCR0U1CR6L8PhhpRGGxgW4Ts=" + }, + "method": "text" +} diff --git a/src/libstore-tests/data/store-reference/local_3.txt b/src/libstore-tests/data/store-reference/local_3.txt index 2a67a3426..cd015d74f 100644 --- a/src/libstore-tests/data/store-reference/local_3.txt +++ b/src/libstore-tests/data/store-reference/local_3.txt @@ -1 +1 @@ -local://?root=/foo bar/baz \ No newline at end of file +local://?root=/foo%20bar/baz \ No newline at end of file diff --git a/src/libstore-tests/data/store-reference/local_3_no_percent.txt b/src/libstore-tests/data/store-reference/local_3_no_percent.txt new file mode 100644 index 000000000..2a67a3426 --- /dev/null +++ b/src/libstore-tests/data/store-reference/local_3_no_percent.txt @@ -0,0 +1 @@ +local://?root=/foo bar/baz \ No newline at end of file diff --git a/src/libstore-tests/data/store-reference/ssh_unbracketed_ipv6_4.txt b/src/libstore-tests/data/store-reference/ssh_unbracketed_ipv6_4.txt new file mode 100644 index 000000000..e093c3f30 --- /dev/null +++ b/src/libstore-tests/data/store-reference/ssh_unbracketed_ipv6_4.txt @@ -0,0 +1 @@ +ssh://userinfo@[fea5:23e1:3916:fc24:cb52:2837:2ecb:ea8e%eth0]?a=b&c=d \ No newline at end of file diff --git a/src/libstore-tests/data/store-reference/ssh_unbracketed_ipv6_5.txt b/src/libstore-tests/data/store-reference/ssh_unbracketed_ipv6_5.txt new file mode 100644 index 000000000..8375d3c6d --- /dev/null +++ b/src/libstore-tests/data/store-reference/ssh_unbracketed_ipv6_5.txt @@ -0,0 +1 @@ +ssh://userinfo@[fea5:23e1:3916:fc24:cb52:2837:2ecb:ea8e%25eth0]?a=b&c=d \ No newline at end of file diff --git a/src/libstore-tests/data/store-reference/ssh_unbracketed_ipv6_6.txt b/src/libstore-tests/data/store-reference/ssh_unbracketed_ipv6_6.txt new file mode 100644 index 000000000..f5a09c2f7 --- /dev/null +++ b/src/libstore-tests/data/store-reference/ssh_unbracketed_ipv6_6.txt @@ -0,0 +1 @@ +ssh://userinfo@fea5:23e1:3916:fc24:cb52:2837:2ecb:ea8e%25?a=b&c=d \ No newline at end of file diff --git a/src/libstore-tests/data/store-reference/ssh_unbracketed_ipv6_7.txt b/src/libstore-tests/data/store-reference/ssh_unbracketed_ipv6_7.txt new file mode 100644 index 000000000..3bef5e73f --- /dev/null +++ b/src/libstore-tests/data/store-reference/ssh_unbracketed_ipv6_7.txt @@ -0,0 +1 @@ +ssh://userinfo@fea5:23e1:3916:fc24:cb52:2837:2ecb:ea8e%eth0?a=b&c=d \ No newline at end of file diff --git a/src/libstore-tests/data/store-reference/ssh_unbracketed_ipv6_8.txt b/src/libstore-tests/data/store-reference/ssh_unbracketed_ipv6_8.txt new file mode 100644 index 000000000..3db9f9910 --- /dev/null +++ b/src/libstore-tests/data/store-reference/ssh_unbracketed_ipv6_8.txt @@ -0,0 +1 @@ +ssh://fea5:23e1:3916:fc24:cb52:2837:2ecb:ea8e%eth0?a=b&c=d \ No newline at end of file diff --git a/src/libstore-tests/data/store-reference/ssh_unbracketed_ipv6_9.txt b/src/libstore-tests/data/store-reference/ssh_unbracketed_ipv6_9.txt new file mode 100644 index 000000000..ad199cfde --- /dev/null +++ b/src/libstore-tests/data/store-reference/ssh_unbracketed_ipv6_9.txt @@ -0,0 +1 @@ +ssh://fea5:23e1:3916:fc24:cb52:2837:2ecb:ea8e%eth0 \ No newline at end of file diff --git a/src/libstore-tests/derivation-advanced-attrs.cc b/src/libstore-tests/derivation-advanced-attrs.cc index 9c13bf048..02bc8fa24 100644 --- a/src/libstore-tests/derivation-advanced-attrs.cc +++ b/src/libstore-tests/derivation-advanced-attrs.cc @@ -14,7 +14,7 @@ namespace nix { -using nlohmann::json; +using namespace nlohmann; class DerivationAdvancedAttrsTest : public CharacterizationTest, public LibStoreTest { @@ -51,44 +51,44 @@ using BothFixtures = ::testing::TypesreadTest(NAME ".json", [&](const auto & encoded_) { \ - auto encoded = json::parse(encoded_); \ - /* Use DRV file instead of C++ literal as source of truth. */ \ - auto aterm = readFile(this->goldenMaster(NAME ".drv")); \ - auto expected = parseDerivation(*this->store, std::move(aterm), NAME, this->mockXpSettings); \ - Derivation got = Derivation::fromJSON(encoded, this->mockXpSettings); \ - EXPECT_EQ(got, expected); \ - }); \ - } \ - \ - TYPED_TEST(DerivationAdvancedAttrsBothTest, Derivation_##STEM##_to_json) \ - { \ - this->writeTest( \ - NAME ".json", \ - [&]() -> json { \ - /* Use DRV file instead of C++ literal as source of truth. */ \ - auto aterm = readFile(this->goldenMaster(NAME ".drv")); \ - return parseDerivation(*this->store, std::move(aterm), NAME, this->mockXpSettings).toJSON(); \ - }, \ - [](const auto & file) { return json::parse(readFile(file)); }, \ - [](const auto & file, const auto & got) { return writeFile(file, got.dump(2) + "\n"); }); \ - } \ - \ - TYPED_TEST(DerivationAdvancedAttrsBothTest, Derivation_##STEM##_from_aterm) \ - { \ - this->readTest(NAME ".drv", [&](auto encoded) { \ - /* Use JSON file instead of C++ literal as source of truth. */ \ - auto json = json::parse(readFile(this->goldenMaster(NAME ".json"))); \ - auto expected = Derivation::fromJSON(json, this->mockXpSettings); \ - auto got = parseDerivation(*this->store, std::move(encoded), NAME, this->mockXpSettings); \ - EXPECT_EQ(got.toJSON(), expected.toJSON()); \ - EXPECT_EQ(got, expected); \ - }); \ - } \ - \ +#define TEST_ATERM_JSON(STEM, NAME) \ + TYPED_TEST(DerivationAdvancedAttrsBothTest, Derivation_##STEM##_from_json) \ + { \ + this->readTest(NAME ".json", [&](const auto & encoded_) { \ + auto encoded = json::parse(encoded_); \ + /* Use DRV file instead of C++ literal as source of truth. */ \ + auto aterm = readFile(this->goldenMaster(NAME ".drv")); \ + auto expected = parseDerivation(*this->store, std::move(aterm), NAME, this->mockXpSettings); \ + Derivation got = adl_serializer::from_json(encoded, this->mockXpSettings); \ + EXPECT_EQ(got, expected); \ + }); \ + } \ + \ + TYPED_TEST(DerivationAdvancedAttrsBothTest, Derivation_##STEM##_to_json) \ + { \ + this->writeTest( \ + NAME ".json", \ + [&]() -> json { \ + /* Use DRV file instead of C++ literal as source of truth. */ \ + auto aterm = readFile(this->goldenMaster(NAME ".drv")); \ + return parseDerivation(*this->store, std::move(aterm), NAME, this->mockXpSettings); \ + }, \ + [](const auto & file) { return json::parse(readFile(file)); }, \ + [](const auto & file, const auto & got) { return writeFile(file, got.dump(2) + "\n"); }); \ + } \ + \ + TYPED_TEST(DerivationAdvancedAttrsBothTest, Derivation_##STEM##_from_aterm) \ + { \ + this->readTest(NAME ".drv", [&](auto encoded) { \ + /* Use JSON file instead of C++ literal as source of truth. */ \ + auto j = json::parse(readFile(this->goldenMaster(NAME ".json"))); \ + auto expected = adl_serializer::from_json(j, this->mockXpSettings); \ + auto got = parseDerivation(*this->store, std::move(encoded), NAME, this->mockXpSettings); \ + EXPECT_EQ(static_cast(got), static_cast(expected)); \ + EXPECT_EQ(got, expected); \ + }); \ + } \ + \ /* No corresponding write test, because we need to read the drv to write the json file */ TEST_ATERM_JSON(advancedAttributes, "advanced-attributes-defaults"); diff --git a/src/libstore-tests/derivation.cc b/src/libstore-tests/derivation.cc index 65a5d011d..6b33e5442 100644 --- a/src/libstore-tests/derivation.cc +++ b/src/libstore-tests/derivation.cc @@ -66,23 +66,17 @@ TEST_F(DynDerivationTest, BadATerm_oldVersionDynDeps) FormatError); } -#define MAKE_OUTPUT_JSON_TEST_P(FIXTURE) \ - TEST_P(FIXTURE, from_json) \ - { \ - const auto & [name, expected] = GetParam(); \ - /* Don't use readJsonTest because we want to check experimental \ - features. */ \ - readTest(Path{"output-"} + name + ".json", [&](const auto & encoded_) { \ - json j = json::parse(encoded_); \ - DerivationOutput got = DerivationOutput::fromJSON(j, mockXpSettings); \ - ASSERT_EQ(got, expected); \ - }); \ - } \ - \ - TEST_P(FIXTURE, to_json) \ - { \ - const auto & [name, value] = GetParam(); \ - writeJsonTest("output-" + name, value); \ +#define MAKE_OUTPUT_JSON_TEST_P(FIXTURE) \ + TEST_P(FIXTURE, from_json) \ + { \ + const auto & [name, expected] = GetParam(); \ + readJsonTest(Path{"output-"} + name, expected, mockXpSettings); \ + } \ + \ + TEST_P(FIXTURE, to_json) \ + { \ + const auto & [name, value] = GetParam(); \ + writeJsonTest("output-" + name, value); \ } struct DerivationOutputJsonTest : DerivationTest, @@ -193,13 +187,7 @@ INSTANTIATE_TEST_SUITE_P( TEST_P(FIXTURE, from_json) \ { \ const auto & drv = GetParam(); \ - /* Don't use readJsonTest because we want to check experimental \ - features. */ \ - readTest(drv.name + ".json", [&](const auto & encoded_) { \ - auto encoded = json::parse(encoded_); \ - Derivation got = Derivation::fromJSON(encoded, mockXpSettings); \ - ASSERT_EQ(got, drv); \ - }); \ + readJsonTest(drv.name, drv, mockXpSettings); \ } \ \ TEST_P(FIXTURE, to_json) \ @@ -213,7 +201,8 @@ INSTANTIATE_TEST_SUITE_P( const auto & drv = GetParam(); \ readTest(drv.name + ".drv", [&](auto encoded) { \ auto got = parseDerivation(*store, std::move(encoded), drv.name, mockXpSettings); \ - ASSERT_EQ(got.toJSON(), drv.toJSON()); \ + using nlohmann::json; \ + ASSERT_EQ(static_cast(got), static_cast(drv)); \ ASSERT_EQ(got, drv); \ }); \ } \ diff --git a/src/libstore-tests/derived-path.cc b/src/libstore-tests/derived-path.cc index 6e7648f25..70e789c0c 100644 --- a/src/libstore-tests/derived-path.cc +++ b/src/libstore-tests/derived-path.cc @@ -3,13 +3,13 @@ #include #include -#include "nix/util/tests/characterization.hh" +#include "nix/util/tests/json-characterization.hh" #include "nix/store/tests/derived-path.hh" #include "nix/store/tests/libstore.hh" namespace nix { -class DerivedPathTest : public CharacterizationTest, public LibStoreTest +class DerivedPathTest : public virtual CharacterizationTest, public LibStoreTest { std::filesystem::path unitTestData = getUnitTestData() / "derived-path"; @@ -123,25 +123,51 @@ RC_GTEST_FIXTURE_PROP(DerivedPathTest, prop_round_rip, (const DerivedPath & o)) using nlohmann::json; -#define TEST_JSON(TYPE, NAME, VAL) \ - static const TYPE NAME = VAL; \ - \ - TEST_F(DerivedPathTest, NAME##_from_json) \ - { \ - readTest(#NAME ".json", [&](const auto & encoded_) { \ - auto encoded = json::parse(encoded_); \ - TYPE got = static_cast(encoded); \ - ASSERT_EQ(got, NAME); \ - }); \ - } \ - \ - TEST_F(DerivedPathTest, NAME##_to_json) \ - { \ - writeTest( \ - #NAME ".json", \ - [&]() -> json { return static_cast(NAME); }, \ - [](const auto & file) { return json::parse(readFile(file)); }, \ - [](const auto & file, const auto & got) { return writeFile(file, got.dump(2) + "\n"); }); \ +struct SingleDerivedPathJsonTest : DerivedPathTest, + JsonCharacterizationTest, + ::testing::WithParamInterface +{}; + +struct DerivedPathJsonTest : DerivedPathTest, + JsonCharacterizationTest, + ::testing::WithParamInterface +{}; + +#define TEST_JSON(TYPE, NAME, VAL) \ + static const TYPE NAME = VAL; \ + \ + TEST_F(TYPE##JsonTest, NAME##_from_json) \ + { \ + readJsonTest(#NAME, NAME); \ + } \ + \ + TEST_F(TYPE##JsonTest, NAME##_to_json) \ + { \ + writeJsonTest(#NAME, NAME); \ + } + +#define TEST_JSON_XP_DYN(TYPE, NAME, VAL) \ + static const TYPE NAME = VAL; \ + \ + TEST_F(TYPE##JsonTest, NAME##_from_json_throws_without_xp) \ + { \ + std::optional ret; \ + readTest(#NAME ".json", [&](const auto & encoded_) { ret = json::parse(encoded_); }); \ + if (ret) { \ + EXPECT_THROW(nlohmann::adl_serializer::from_json(*ret), MissingExperimentalFeature); \ + } \ + } \ + \ + TEST_F(TYPE##JsonTest, NAME##_from_json) \ + { \ + ExperimentalFeatureSettings xpSettings; \ + xpSettings.set("experimental-features", "dynamic-derivations"); \ + readJsonTest(#NAME, NAME, xpSettings); \ + } \ + \ + TEST_F(TYPE##JsonTest, NAME##_to_json) \ + { \ + writeJsonTest(#NAME, NAME); \ } TEST_JSON( @@ -156,7 +182,7 @@ TEST_JSON( .output = "bar", })); -TEST_JSON( +TEST_JSON_XP_DYN( SingleDerivedPath, single_built_built, (SingleDerivedPath::Built{ @@ -179,7 +205,7 @@ TEST_JSON( .outputs = OutputsSpec::Names{"bar", "baz"}, })); -TEST_JSON( +TEST_JSON_XP_DYN( DerivedPath, multi_built_built, (DerivedPath::Built{ @@ -191,7 +217,7 @@ TEST_JSON( .outputs = OutputsSpec::Names{"baz", "quux"}, })); -TEST_JSON( +TEST_JSON_XP_DYN( DerivedPath, multi_built_built_wildcard, (DerivedPath::Built{ diff --git a/src/libstore-tests/dummy-store.cc b/src/libstore-tests/dummy-store.cc new file mode 100644 index 000000000..3dd8137a3 --- /dev/null +++ b/src/libstore-tests/dummy-store.cc @@ -0,0 +1,38 @@ +#include + +#include "nix/store/dummy-store-impl.hh" +#include "nix/store/globals.hh" +#include "nix/store/realisation.hh" + +namespace nix { + +TEST(DummyStore, realisation_read) +{ + initLibStore(/*loadConfig=*/false); + + auto store = [] { + auto cfg = make_ref(StoreReference::Params{}); + cfg->readOnly = false; + return cfg->openDummyStore(); + }(); + + auto drvHash = Hash::parseExplicitFormatUnprefixed( + "ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad", HashAlgorithm::SHA256, HashFormat::Base16); + + auto outputName = "foo"; + + EXPECT_EQ(store->queryRealisation({drvHash, outputName}), nullptr); + + UnkeyedRealisation value{ + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo.drv"}, + }; + + store->buildTrace.insert({drvHash, {{outputName, make_ref(value)}}}); + + auto value2 = store->queryRealisation({drvHash, outputName}); + + ASSERT_TRUE(value2); + EXPECT_EQ(*value2, value); +} + +} // namespace nix diff --git a/src/libstore-tests/meson.build b/src/libstore-tests/meson.build index 915c10a38..4d464ad89 100644 --- a/src/libstore-tests/meson.build +++ b/src/libstore-tests/meson.build @@ -52,7 +52,6 @@ gtest = dependency('gmock') deps_private += gtest subdir('nix-meson-build-support/common') -subdir('nix-meson-build-support/asan-options') sources = files( 'common-protocol.cc', @@ -61,6 +60,7 @@ sources = files( 'derivation.cc', 'derived-path.cc', 'downstream-placeholder.cc', + 'dummy-store.cc', 'http-binary-cache-store.cc', 'legacy-ssh-store.cc', 'local-binary-cache-store.cc', @@ -77,12 +77,13 @@ sources = files( 'realisation.cc', 'references.cc', 's3-binary-cache-store.cc', - 's3.cc', + 's3-url.cc', 'serve-protocol.cc', 'ssh-store.cc', 'store-reference.cc', 'uds-remote-store.cc', 'worker-protocol.cc', + 'write-derivation.cc', ) include_dirs = [ include_directories('.') ] @@ -104,7 +105,7 @@ this_exe = executable( test( meson.project_name(), this_exe, - env : asan_test_options_env + { + env : { '_NIX_TEST_UNIT_DATA' : meson.current_source_dir() / 'data', 'HOME' : meson.current_build_dir() / 'test-home', 'NIX_REMOTE' : meson.current_build_dir() / 'test-home' / 'store', @@ -138,7 +139,7 @@ if get_option('benchmarks') benchmark( 'nix-store-benchmarks', benchmark_exe, - env : asan_test_options_env + { + env : { '_NIX_TEST_UNIT_DATA' : meson.current_source_dir() / 'data', }, ) diff --git a/src/libstore-tests/nix_api_store.cc b/src/libstore-tests/nix_api_store.cc index dfd554ec1..228b8069f 100644 --- a/src/libstore-tests/nix_api_store.cc +++ b/src/libstore-tests/nix_api_store.cc @@ -218,6 +218,70 @@ struct LambdaAdapter } }; +class NixApiStoreTestWithRealisedPath : public nix_api_store_test_base +{ +public: + StorePath * drvPath = nullptr; + nix_derivation * drv = nullptr; + Store * store = nullptr; + StorePath * outPath = nullptr; + + void SetUp() override + { + nix_api_store_test_base::SetUp(); + + nix::experimentalFeatureSettings.set("extra-experimental-features", "ca-derivations"); + nix::settings.substituters = {}; + + store = open_local_store(); + + std::filesystem::path unitTestData{getenv("_NIX_TEST_UNIT_DATA")}; + std::ifstream t{unitTestData / "derivation/ca/self-contained.json"}; + std::stringstream buffer; + buffer << t.rdbuf(); + + // Replace the hardcoded system with the current system + std::string jsonStr = nix::replaceStrings(buffer.str(), "x86_64-linux", nix::settings.thisSystem.get()); + + drv = nix_derivation_from_json(ctx, store, jsonStr.c_str()); + assert_ctx_ok(); + ASSERT_NE(drv, nullptr); + + drvPath = nix_add_derivation(ctx, store, drv); + assert_ctx_ok(); + ASSERT_NE(drvPath, nullptr); + + auto cb = LambdaAdapter{.fun = [&](const char * outname, const StorePath * outPath_) { + ASSERT_NE(outname, nullptr) << "Output name should not be NULL"; + auto is_valid_path = nix_store_is_valid_path(ctx, store, outPath_); + ASSERT_EQ(is_valid_path, true); + ASSERT_STREQ(outname, "out") << "Expected single 'out' output"; + ASSERT_EQ(outPath, nullptr) << "Output path callback should only be called once"; + outPath = nix_store_path_clone(outPath_); + }}; + + auto ret = nix_store_realise( + ctx, store, drvPath, static_cast(&cb), decltype(cb)::call_void); + assert_ctx_ok(); + ASSERT_EQ(ret, NIX_OK); + ASSERT_NE(outPath, nullptr) << "Derivation should have produced an output"; + } + + void TearDown() override + { + if (drvPath) + nix_store_path_free(drvPath); + if (outPath) + nix_store_path_free(outPath); + if (drv) + nix_derivation_free(drv); + if (store) + nix_store_free(store); + + nix_api_store_test_base::TearDown(); + } +}; + TEST_F(nix_api_store_test_base, build_from_json) { // FIXME get rid of these @@ -232,7 +296,10 @@ TEST_F(nix_api_store_test_base, build_from_json) std::stringstream buffer; buffer << t.rdbuf(); - auto * drv = nix_derivation_from_json(ctx, store, buffer.str().c_str()); + // Replace the hardcoded system with the current system + std::string jsonStr = nix::replaceStrings(buffer.str(), "x86_64-linux", nix::settings.thisSystem.get()); + + auto * drv = nix_derivation_from_json(ctx, store, jsonStr.c_str()); assert_ctx_ok(); ASSERT_NE(drv, nullptr); @@ -240,15 +307,21 @@ TEST_F(nix_api_store_test_base, build_from_json) assert_ctx_ok(); ASSERT_NE(drv, nullptr); + int callbackCount = 0; auto cb = LambdaAdapter{.fun = [&](const char * outname, const StorePath * outPath) { + ASSERT_NE(outname, nullptr); + ASSERT_STREQ(outname, "out"); + ASSERT_NE(outPath, nullptr); auto is_valid_path = nix_store_is_valid_path(ctx, store, outPath); ASSERT_EQ(is_valid_path, true); + callbackCount++; }}; auto ret = nix_store_realise( ctx, store, drvPath, static_cast(&cb), decltype(cb)::call_void); assert_ctx_ok(); ASSERT_EQ(ret, NIX_OK); + ASSERT_EQ(callbackCount, 1) << "Callback should have been invoked exactly once"; // Clean up nix_store_path_free(drvPath); @@ -256,4 +329,468 @@ TEST_F(nix_api_store_test_base, build_from_json) nix_store_free(store); } +TEST_F(nix_api_store_test_base, nix_store_realise_invalid_system) +{ + // Test that nix_store_realise properly reports errors when the system is invalid + nix::experimentalFeatureSettings.set("extra-experimental-features", "ca-derivations"); + nix::settings.substituters = {}; + + auto * store = open_local_store(); + + std::filesystem::path unitTestData{getenv("_NIX_TEST_UNIT_DATA")}; + std::ifstream t{unitTestData / "derivation/ca/self-contained.json"}; + std::stringstream buffer; + buffer << t.rdbuf(); + + // Use an invalid system that cannot be built + std::string jsonStr = nix::replaceStrings(buffer.str(), "x86_64-linux", "bogus65-bogusos"); + + auto * drv = nix_derivation_from_json(ctx, store, jsonStr.c_str()); + assert_ctx_ok(); + ASSERT_NE(drv, nullptr); + + auto * drvPath = nix_add_derivation(ctx, store, drv); + assert_ctx_ok(); + ASSERT_NE(drvPath, nullptr); + + int callbackCount = 0; + auto cb = LambdaAdapter{.fun = [&](const char * outname, const StorePath * outPath) { callbackCount++; }}; + + auto ret = nix_store_realise( + ctx, store, drvPath, static_cast(&cb), decltype(cb)::call_void); + + // Should fail with an error + ASSERT_NE(ret, NIX_OK); + ASSERT_EQ(callbackCount, 0) << "Callback should not be invoked when build fails"; + + // Check that error message is set + std::string errMsg = nix_err_msg(nullptr, ctx, nullptr); + ASSERT_FALSE(errMsg.empty()) << "Error message should be set"; + ASSERT_NE(errMsg.find("system"), std::string::npos) << "Error should mention system"; + + // Clean up + nix_store_path_free(drvPath); + nix_derivation_free(drv); + nix_store_free(store); +} + +TEST_F(nix_api_store_test_base, nix_store_realise_builder_fails) +{ + // Test that nix_store_realise properly reports errors when the builder fails + nix::experimentalFeatureSettings.set("extra-experimental-features", "ca-derivations"); + nix::settings.substituters = {}; + + auto * store = open_local_store(); + + std::filesystem::path unitTestData{getenv("_NIX_TEST_UNIT_DATA")}; + std::ifstream t{unitTestData / "derivation/ca/self-contained.json"}; + std::stringstream buffer; + buffer << t.rdbuf(); + + // Replace with current system and make builder command fail + std::string jsonStr = nix::replaceStrings(buffer.str(), "x86_64-linux", nix::settings.thisSystem.get()); + jsonStr = nix::replaceStrings(jsonStr, "echo $name foo > $out", "exit 1"); + + auto * drv = nix_derivation_from_json(ctx, store, jsonStr.c_str()); + assert_ctx_ok(); + ASSERT_NE(drv, nullptr); + + auto * drvPath = nix_add_derivation(ctx, store, drv); + assert_ctx_ok(); + ASSERT_NE(drvPath, nullptr); + + int callbackCount = 0; + auto cb = LambdaAdapter{.fun = [&](const char * outname, const StorePath * outPath) { callbackCount++; }}; + + auto ret = nix_store_realise( + ctx, store, drvPath, static_cast(&cb), decltype(cb)::call_void); + + // Should fail with an error + ASSERT_NE(ret, NIX_OK); + ASSERT_EQ(callbackCount, 0) << "Callback should not be invoked when build fails"; + + // Check that error message is set + std::string errMsg = nix_err_msg(nullptr, ctx, nullptr); + ASSERT_FALSE(errMsg.empty()) << "Error message should be set"; + + // Clean up + nix_store_path_free(drvPath); + nix_derivation_free(drv); + nix_store_free(store); +} + +TEST_F(nix_api_store_test_base, nix_store_realise_builder_no_output) +{ + // Test that nix_store_realise properly reports errors when builder succeeds but produces no output + nix::experimentalFeatureSettings.set("extra-experimental-features", "ca-derivations"); + nix::settings.substituters = {}; + + auto * store = open_local_store(); + + std::filesystem::path unitTestData{getenv("_NIX_TEST_UNIT_DATA")}; + std::ifstream t{unitTestData / "derivation/ca/self-contained.json"}; + std::stringstream buffer; + buffer << t.rdbuf(); + + // Replace with current system and make builder succeed but not produce output + std::string jsonStr = nix::replaceStrings(buffer.str(), "x86_64-linux", nix::settings.thisSystem.get()); + jsonStr = nix::replaceStrings(jsonStr, "echo $name foo > $out", "true"); + + auto * drv = nix_derivation_from_json(ctx, store, jsonStr.c_str()); + assert_ctx_ok(); + ASSERT_NE(drv, nullptr); + + auto * drvPath = nix_add_derivation(ctx, store, drv); + assert_ctx_ok(); + ASSERT_NE(drvPath, nullptr); + + int callbackCount = 0; + auto cb = LambdaAdapter{.fun = [&](const char * outname, const StorePath * outPath) { callbackCount++; }}; + + auto ret = nix_store_realise( + ctx, store, drvPath, static_cast(&cb), decltype(cb)::call_void); + + // Should fail with an error + ASSERT_NE(ret, NIX_OK); + ASSERT_EQ(callbackCount, 0) << "Callback should not be invoked when build produces no output"; + + // Check that error message is set + std::string errMsg = nix_err_msg(nullptr, ctx, nullptr); + ASSERT_FALSE(errMsg.empty()) << "Error message should be set"; + + // Clean up + nix_store_path_free(drvPath); + nix_derivation_free(drv); + nix_store_free(store); +} + +TEST_F(NixApiStoreTestWithRealisedPath, nix_store_get_fs_closure_with_outputs) +{ + // Test closure computation with include_outputs on a derivation path + struct CallbackData + { + std::set * paths; + }; + + std::set closure_paths; + CallbackData data{&closure_paths}; + + auto ret = nix_store_get_fs_closure( + ctx, + store, + drvPath, // Use derivation path + false, // flip_direction + true, // include_outputs - include the outputs in the closure + false, // include_derivers + &data, + [](nix_c_context * context, void * userdata, const StorePath * path) { + auto * data = static_cast(userdata); + std::string path_str; + nix_store_path_name(path, OBSERVE_STRING(path_str)); + auto [it, inserted] = data->paths->insert(path_str); + ASSERT_TRUE(inserted) << "Duplicate path in closure: " << path_str; + }); + assert_ctx_ok(); + ASSERT_EQ(ret, NIX_OK); + + // The closure should contain the derivation and its outputs + ASSERT_GE(closure_paths.size(), 2); + + // Verify the output path is in the closure + std::string outPathName; + nix_store_path_name(outPath, OBSERVE_STRING(outPathName)); + ASSERT_EQ(closure_paths.count(outPathName), 1); +} + +TEST_F(NixApiStoreTestWithRealisedPath, nix_store_get_fs_closure_without_outputs) +{ + // Test closure computation WITHOUT include_outputs on a derivation path + struct CallbackData + { + std::set * paths; + }; + + std::set closure_paths; + CallbackData data{&closure_paths}; + + auto ret = nix_store_get_fs_closure( + ctx, + store, + drvPath, // Use derivation path + false, // flip_direction + false, // include_outputs - do NOT include the outputs + false, // include_derivers + &data, + [](nix_c_context * context, void * userdata, const StorePath * path) { + auto * data = static_cast(userdata); + std::string path_str; + nix_store_path_name(path, OBSERVE_STRING(path_str)); + auto [it, inserted] = data->paths->insert(path_str); + ASSERT_TRUE(inserted) << "Duplicate path in closure: " << path_str; + }); + assert_ctx_ok(); + ASSERT_EQ(ret, NIX_OK); + + // Verify the output path is NOT in the closure + std::string outPathName; + nix_store_path_name(outPath, OBSERVE_STRING(outPathName)); + ASSERT_EQ(closure_paths.count(outPathName), 0) << "Output path should not be in closure when includeOutputs=false"; +} + +TEST_F(NixApiStoreTestWithRealisedPath, nix_store_get_fs_closure_flip_direction) +{ + // Test closure computation with flip_direction on a derivation path + // When flip_direction=true, we get the reverse dependencies (what depends on this path) + // For a derivation, this should NOT include outputs even with include_outputs=true + struct CallbackData + { + std::set * paths; + }; + + std::set closure_paths; + CallbackData data{&closure_paths}; + + auto ret = nix_store_get_fs_closure( + ctx, + store, + drvPath, // Use derivation path + true, // flip_direction - get reverse dependencies + true, // include_outputs + false, // include_derivers + &data, + [](nix_c_context * context, void * userdata, const StorePath * path) { + auto * data = static_cast(userdata); + std::string path_str; + nix_store_path_name(path, OBSERVE_STRING(path_str)); + auto [it, inserted] = data->paths->insert(path_str); + ASSERT_TRUE(inserted) << "Duplicate path in closure: " << path_str; + }); + assert_ctx_ok(); + ASSERT_EQ(ret, NIX_OK); + + // Verify the output path is NOT in the closure when direction is flipped + std::string outPathName; + nix_store_path_name(outPath, OBSERVE_STRING(outPathName)); + ASSERT_EQ(closure_paths.count(outPathName), 0) << "Output path should not be in closure when flip_direction=true"; +} + +TEST_F(NixApiStoreTestWithRealisedPath, nix_store_get_fs_closure_include_derivers) +{ + // Test closure computation with include_derivers on an output path + // This should include the derivation that produced the output + struct CallbackData + { + std::set * paths; + }; + + std::set closure_paths; + CallbackData data{&closure_paths}; + + auto ret = nix_store_get_fs_closure( + ctx, + store, + outPath, // Use output path (not derivation) + false, // flip_direction + false, // include_outputs + true, // include_derivers - include the derivation + &data, + [](nix_c_context * context, void * userdata, const StorePath * path) { + auto * data = static_cast(userdata); + std::string path_str; + nix_store_path_name(path, OBSERVE_STRING(path_str)); + auto [it, inserted] = data->paths->insert(path_str); + ASSERT_TRUE(inserted) << "Duplicate path in closure: " << path_str; + }); + assert_ctx_ok(); + ASSERT_EQ(ret, NIX_OK); + + // Verify the derivation path is in the closure + // Deriver is nasty stateful, and this assertion is only guaranteed because + // we're using an empty store as our starting point. Otherwise, if the + // output happens to exist, the deriver could be anything. + std::string drvPathName; + nix_store_path_name(drvPath, OBSERVE_STRING(drvPathName)); + ASSERT_EQ(closure_paths.count(drvPathName), 1) << "Derivation should be in closure when include_derivers=true"; +} + +TEST_F(NixApiStoreTestWithRealisedPath, nix_store_realise_output_ordering) +{ + // Test that nix_store_realise returns outputs in alphabetical order by output name. + // This test uses a CA derivation with 10 outputs in randomized input order + // to verify that the callback order is deterministic and alphabetical. + nix::experimentalFeatureSettings.set("extra-experimental-features", "ca-derivations"); + nix::settings.substituters = {}; + + auto * store = open_local_store(); + + // Create a CA derivation with 10 outputs using proper placeholders + auto outa_ph = nix::hashPlaceholder("outa"); + auto outb_ph = nix::hashPlaceholder("outb"); + auto outc_ph = nix::hashPlaceholder("outc"); + auto outd_ph = nix::hashPlaceholder("outd"); + auto oute_ph = nix::hashPlaceholder("oute"); + auto outf_ph = nix::hashPlaceholder("outf"); + auto outg_ph = nix::hashPlaceholder("outg"); + auto outh_ph = nix::hashPlaceholder("outh"); + auto outi_ph = nix::hashPlaceholder("outi"); + auto outj_ph = nix::hashPlaceholder("outj"); + + std::string drvJson = R"({ + "version": 3, + "name": "multi-output-test", + "system": ")" + nix::settings.thisSystem.get() + + R"(", + "builder": "/bin/sh", + "args": ["-c", "echo a > $outa; echo b > $outb; echo c > $outc; echo d > $outd; echo e > $oute; echo f > $outf; echo g > $outg; echo h > $outh; echo i > $outi; echo j > $outj"], + "env": { + "builder": "/bin/sh", + "name": "multi-output-test", + "system": ")" + nix::settings.thisSystem.get() + + R"(", + "outf": ")" + outf_ph + + R"(", + "outd": ")" + outd_ph + + R"(", + "outi": ")" + outi_ph + + R"(", + "oute": ")" + oute_ph + + R"(", + "outh": ")" + outh_ph + + R"(", + "outc": ")" + outc_ph + + R"(", + "outb": ")" + outb_ph + + R"(", + "outg": ")" + outg_ph + + R"(", + "outj": ")" + outj_ph + + R"(", + "outa": ")" + outa_ph + + R"(" + }, + "inputDrvs": {}, + "inputSrcs": [], + "outputs": { + "outd": { "hashAlgo": "sha256", "method": "nar" }, + "outf": { "hashAlgo": "sha256", "method": "nar" }, + "outg": { "hashAlgo": "sha256", "method": "nar" }, + "outb": { "hashAlgo": "sha256", "method": "nar" }, + "outc": { "hashAlgo": "sha256", "method": "nar" }, + "outi": { "hashAlgo": "sha256", "method": "nar" }, + "outj": { "hashAlgo": "sha256", "method": "nar" }, + "outh": { "hashAlgo": "sha256", "method": "nar" }, + "outa": { "hashAlgo": "sha256", "method": "nar" }, + "oute": { "hashAlgo": "sha256", "method": "nar" } + } + })"; + + auto * drv = nix_derivation_from_json(ctx, store, drvJson.c_str()); + assert_ctx_ok(); + ASSERT_NE(drv, nullptr); + + auto * drvPath = nix_add_derivation(ctx, store, drv); + assert_ctx_ok(); + ASSERT_NE(drvPath, nullptr); + + // Realise the derivation - capture the order outputs are returned + std::map outputs; + std::vector output_order; + auto cb = LambdaAdapter{.fun = [&](const char * outname, const StorePath * outPath) { + ASSERT_NE(outname, nullptr); + ASSERT_NE(outPath, nullptr); + output_order.push_back(outname); + outputs.emplace(outname, outPath->path); + }}; + + auto ret = nix_store_realise( + ctx, store, drvPath, static_cast(&cb), decltype(cb)::call_void); + assert_ctx_ok(); + ASSERT_EQ(ret, NIX_OK); + ASSERT_EQ(outputs.size(), 10); + + // Verify outputs are returned in alphabetical order by output name + std::vector expected_order = { + "outa", "outb", "outc", "outd", "oute", "outf", "outg", "outh", "outi", "outj"}; + ASSERT_EQ(output_order, expected_order) << "Outputs should be returned in alphabetical order by output name"; + + // Now compute closure with include_outputs and collect paths in order + struct CallbackData + { + std::vector * paths; + }; + + std::vector closure_paths; + CallbackData data{&closure_paths}; + + ret = nix_store_get_fs_closure( + ctx, + store, + drvPath, + false, // flip_direction + true, // include_outputs - include the outputs in the closure + false, // include_derivers + &data, + [](nix_c_context * context, void * userdata, const StorePath * path) { + auto * data = static_cast(userdata); + std::string path_str; + nix_store_path_name(path, OBSERVE_STRING(path_str)); + data->paths->push_back(path_str); + }); + assert_ctx_ok(); + ASSERT_EQ(ret, NIX_OK); + + // Should contain at least the derivation and 10 outputs + ASSERT_GE(closure_paths.size(), 11); + + // Verify all outputs are present in the closure + for (const auto & [outname, outPath] : outputs) { + std::string outPathName = store->ptr->printStorePath(outPath); + + bool found = false; + for (const auto & p : closure_paths) { + // nix_store_path_name returns just the name part, so match against full path name + if (outPathName.find(p) != std::string::npos) { + found = true; + break; + } + } + ASSERT_TRUE(found) << "Output " << outname << " (" << outPathName << ") not found in closure"; + } + + nix_store_path_free(drvPath); + nix_derivation_free(drv); + nix_store_free(store); +} + +TEST_F(NixApiStoreTestWithRealisedPath, nix_store_get_fs_closure_error_propagation) +{ + // Test that errors in the callback abort the closure computation + struct CallbackData + { + int * count; + }; + + int call_count = 0; + CallbackData data{&call_count}; + + auto ret = nix_store_get_fs_closure( + ctx, + store, + drvPath, // Use derivation path + false, // flip_direction + true, // include_outputs + false, // include_derivers + &data, + [](nix_c_context * context, void * userdata, const StorePath * path) { + auto * data = static_cast(userdata); + (*data->count)++; + // Set an error immediately + nix_set_err_msg(context, NIX_ERR_UNKNOWN, "Test error"); + }); + + // Should have aborted with error + ASSERT_EQ(ret, NIX_ERR_UNKNOWN); + ASSERT_EQ(call_count, 1); // Should have been called exactly once, then aborted +} + } // namespace nixC diff --git a/src/libstore-tests/package.nix b/src/libstore-tests/package.nix index d5255f4f9..90e6af519 100644 --- a/src/libstore-tests/package.nix +++ b/src/libstore-tests/package.nix @@ -83,7 +83,6 @@ mkMesonExecutable (finalAttrs: { } ( '' - export ASAN_OPTIONS=abort_on_error=1:print_summary=1:detect_leaks=0 export _NIX_TEST_UNIT_DATA=${data + "/src/libstore-tests/data"} export NIX_REMOTE=$HOME/store ${stdenv.hostPlatform.emulator buildPackages} ${lib.getExe finalAttrs.finalPackage} diff --git a/src/libstore-tests/realisation.cc b/src/libstore-tests/realisation.cc index a5a5bee50..d16049bc5 100644 --- a/src/libstore-tests/realisation.cc +++ b/src/libstore-tests/realisation.cc @@ -49,16 +49,16 @@ INSTANTIATE_TEST_SUITE_P( RealisationJsonTest, ([] { Realisation simple{ - - .id = - { - .drvHash = Hash::parseExplicitFormatUnprefixed( - "ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad", - HashAlgorithm::SHA256, - HashFormat::Base16), - .outputName = "foo", - }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo.drv"}, + { + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo.drv"}, + }, + { + .drvHash = Hash::parseExplicitFormatUnprefixed( + "ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad", + HashAlgorithm::SHA256, + HashFormat::Base16), + .outputName = "foo", + }, }; return ::testing::Values( std::pair{ diff --git a/src/libstore-tests/references.cc b/src/libstore-tests/references.cc index 27ecad08f..9cecd573e 100644 --- a/src/libstore-tests/references.cc +++ b/src/libstore-tests/references.cc @@ -1,4 +1,6 @@ #include "nix/store/references.hh" +#include "nix/store/path-references.hh" +#include "nix/util/memory-source-accessor.hh" #include @@ -79,4 +81,145 @@ TEST(references, scan) } } +TEST(references, scanForReferencesDeep) +{ + using File = MemorySourceAccessor::File; + + // Create store paths to search for + StorePath path1{"dc04vv14dak1c1r48qa0m23vr9jy8sm0-foo"}; + StorePath path2{"zc842j0rz61mjsp3h3wp5ly71ak6qgdn-bar"}; + StorePath path3{"a5cn2i4b83gnsm60d38l3kgb8qfplm11-baz"}; + + StorePathSet refs{path1, path2, path3}; + + std::string_view hash1 = path1.hashPart(); + std::string_view hash2 = path2.hashPart(); + std::string_view hash3 = path3.hashPart(); + + // Create an in-memory file system with various reference patterns + auto accessor = make_ref(); + accessor->root = File::Directory{ + .contents{ + { + // file1.txt: contains hash1 + "file1.txt", + File::Regular{ + .contents = "This file references " + hash1 + " in its content", + }, + }, + { + // file2.txt: contains hash2 and hash3 + "file2.txt", + File::Regular{ + .contents = "Multiple refs: " + hash2 + " and also " + hash3, + }, + }, + { + // file3.txt: contains no references + "file3.txt", + File::Regular{ + .contents = "This file has no store path references at all", + }, + }, + { + // subdir: a subdirectory + "subdir", + File::Directory{ + .contents{ + { + // subdir/file4.txt: contains hash1 again + "file4.txt", + File::Regular{ + .contents = "Subdirectory file with " + hash1, + }, + }, + }, + }, + }, + { + // link1: a symlink that contains a reference in its target + "link1", + File::Symlink{ + .target = hash2 + "-target", + }, + }, + }, + }; + + // Test the callback-based API + { + std::map foundRefs; + + scanForReferencesDeep(*accessor, CanonPath::root, refs, [&](FileRefScanResult result) { + foundRefs[std::move(result.filePath)] = std::move(result.foundRefs); + }); + + // Verify we found the expected references + EXPECT_EQ(foundRefs.size(), 4); // file1, file2, file4, link1 + + // Check file1.txt found path1 + { + CanonPath f1Path("/file1.txt"); + auto it = foundRefs.find(f1Path); + ASSERT_TRUE(it != foundRefs.end()); + EXPECT_EQ(it->second.size(), 1); + EXPECT_TRUE(it->second.count(path1)); + } + + // Check file2.txt found path2 and path3 + { + CanonPath f2Path("/file2.txt"); + auto it = foundRefs.find(f2Path); + ASSERT_TRUE(it != foundRefs.end()); + EXPECT_EQ(it->second.size(), 2); + EXPECT_TRUE(it->second.count(path2)); + EXPECT_TRUE(it->second.count(path3)); + } + + // Check file3.txt is not in results (no refs) + { + CanonPath f3Path("/file3.txt"); + EXPECT_FALSE(foundRefs.count(f3Path)); + } + + // Check subdir/file4.txt found path1 + { + CanonPath f4Path("/subdir/file4.txt"); + auto it = foundRefs.find(f4Path); + ASSERT_TRUE(it != foundRefs.end()); + EXPECT_EQ(it->second.size(), 1); + EXPECT_TRUE(it->second.count(path1)); + } + + // Check symlink found path2 + { + CanonPath linkPath("/link1"); + auto it = foundRefs.find(linkPath); + ASSERT_TRUE(it != foundRefs.end()); + EXPECT_EQ(it->second.size(), 1); + EXPECT_TRUE(it->second.count(path2)); + } + } + + // Test the map-based convenience API + { + auto results = scanForReferencesDeep(*accessor, CanonPath::root, refs); + + EXPECT_EQ(results.size(), 4); // file1, file2, file4, link1 + + // Verify all expected files are in the results + EXPECT_TRUE(results.count(CanonPath("/file1.txt"))); + EXPECT_TRUE(results.count(CanonPath("/file2.txt"))); + EXPECT_TRUE(results.count(CanonPath("/subdir/file4.txt"))); + EXPECT_TRUE(results.count(CanonPath("/link1"))); + EXPECT_FALSE(results.count(CanonPath("/file3.txt"))); + + // Verify the references found in each file are correct + EXPECT_EQ(results.at(CanonPath("/file1.txt")), StorePathSet{path1}); + EXPECT_EQ(results.at(CanonPath("/file2.txt")), StorePathSet({path2, path3})); + EXPECT_EQ(results.at(CanonPath("/subdir/file4.txt")), StorePathSet{path1}); + EXPECT_EQ(results.at(CanonPath("/link1")), StorePathSet{path2}); + } +} + } // namespace nix diff --git a/src/libstore-tests/s3-binary-cache-store.cc b/src/libstore-tests/s3-binary-cache-store.cc index 251e96172..f01759771 100644 --- a/src/libstore-tests/s3-binary-cache-store.cc +++ b/src/libstore-tests/s3-binary-cache-store.cc @@ -1,8 +1,9 @@ #include "nix/store/s3-binary-cache-store.hh" +#include "nix/store/http-binary-cache-store.hh" +#include "nix/store/filetransfer.hh" +#include "nix/store/s3-url.hh" -#if NIX_WITH_S3_SUPPORT - -# include +#include namespace nix { @@ -10,9 +11,115 @@ TEST(S3BinaryCacheStore, constructConfig) { S3BinaryCacheStoreConfig config{"s3", "foobar", {}}; - EXPECT_EQ(config.bucketName, "foobar"); + // The bucket name is stored as the host part of the authority in cacheUri + EXPECT_EQ( + config.cacheUri, + (ParsedURL{ + .scheme = "s3", + .authority = ParsedURL::Authority{.host = "foobar"}, + })); +} + +TEST(S3BinaryCacheStore, constructConfigWithRegion) +{ + Store::Config::Params params{{"region", "eu-west-1"}}; + S3BinaryCacheStoreConfig config{"s3", "my-bucket", params}; + + EXPECT_EQ( + config.cacheUri, + (ParsedURL{ + .scheme = "s3", + .authority = ParsedURL::Authority{.host = "my-bucket"}, + .query = (StringMap) {{"region", "eu-west-1"}}, + })); + EXPECT_EQ(config.region.get(), "eu-west-1"); +} + +TEST(S3BinaryCacheStore, defaultSettings) +{ + S3BinaryCacheStoreConfig config{"s3", "test-bucket", {}}; + + EXPECT_EQ( + config.cacheUri, + (ParsedURL{ + .scheme = "s3", + .authority = ParsedURL::Authority{.host = "test-bucket"}, + })); + + // Check default values + EXPECT_EQ(config.region.get(), "us-east-1"); + EXPECT_EQ(config.profile.get(), "default"); + EXPECT_EQ(config.scheme.get(), "https"); + EXPECT_EQ(config.endpoint.get(), ""); +} + +/** + * Test that S3BinaryCacheStore properly preserves S3-specific parameters + */ +TEST(S3BinaryCacheStore, s3StoreConfigPreservesParameters) +{ + StringMap params; + params["region"] = "eu-west-1"; + params["endpoint"] = "custom.s3.com"; + + S3BinaryCacheStoreConfig config("s3", "test-bucket", params); + + // The config should preserve S3-specific parameters + EXPECT_EQ( + config.cacheUri, + (ParsedURL{ + .scheme = "s3", + .authority = ParsedURL::Authority{.host = "test-bucket"}, + .query = (StringMap) {{"region", "eu-west-1"}, {"endpoint", "custom.s3.com"}}, + })); +} + +/** + * Test that S3 store scheme is properly registered + */ +TEST(S3BinaryCacheStore, s3SchemeRegistration) +{ + auto schemes = S3BinaryCacheStoreConfig::uriSchemes(); + EXPECT_TRUE(schemes.count("s3") > 0) << "S3 scheme should be supported"; + + // Verify HttpBinaryCacheStoreConfig doesn't directly list S3 + auto httpSchemes = HttpBinaryCacheStoreConfig::uriSchemes(); + EXPECT_FALSE(httpSchemes.count("s3") > 0) << "HTTP store shouldn't directly list S3 scheme"; +} + +/** + * Test that only S3-specific parameters are preserved in cacheUri, + * while non-S3 store parameters are not propagated to the URL + */ +TEST(S3BinaryCacheStore, parameterFiltering) +{ + StringMap params; + params["region"] = "eu-west-1"; + params["endpoint"] = "minio.local"; + params["want-mass-query"] = "true"; // Non-S3 store parameter + params["priority"] = "10"; // Non-S3 store parameter + + S3BinaryCacheStoreConfig config("s3", "test-bucket", params); + + // Only S3-specific params should be in cacheUri.query + EXPECT_EQ( + config.cacheUri, + (ParsedURL{ + .scheme = "s3", + .authority = ParsedURL::Authority{.host = "test-bucket"}, + .query = (StringMap) {{"region", "eu-west-1"}, {"endpoint", "minio.local"}}, + })); + + // But the non-S3 params should still be set on the config + EXPECT_EQ(config.wantMassQuery.get(), true); + EXPECT_EQ(config.priority.get(), 10); + + // And all params (S3 and non-S3) should be returned by getReference() + auto ref = config.getReference(); + EXPECT_EQ(ref.params["region"], "eu-west-1"); + EXPECT_EQ(ref.params["endpoint"], "minio.local"); + EXPECT_EQ(ref.params["want-mass-query"], "true"); + EXPECT_EQ(ref.params["priority"], "10"); } } // namespace nix - -#endif diff --git a/src/libstore-tests/s3.cc b/src/libstore-tests/s3-url.cc similarity index 80% rename from src/libstore-tests/s3.cc rename to src/libstore-tests/s3-url.cc index 799e102fe..9fa625fd6 100644 --- a/src/libstore-tests/s3.cc +++ b/src/libstore-tests/s3-url.cc @@ -1,10 +1,8 @@ -#include "nix/store/s3.hh" +#include "nix/store/s3-url.hh" #include "nix/util/tests/gmock-matchers.hh" -#if NIX_WITH_S3_SUPPORT - -# include -# include +#include +#include namespace nix { @@ -72,6 +70,25 @@ INSTANTIATE_TEST_SUITE_P( }, "with_profile_and_region", }, + ParsedS3URLTestCase{ + "s3://my-bucket/my-key.txt?versionId=abc123xyz", + { + .bucket = "my-bucket", + .key = {"my-key.txt"}, + .versionId = "abc123xyz", + }, + "with_versionId", + }, + ParsedS3URLTestCase{ + "s3://bucket/path/to/object?region=eu-west-1&versionId=version456", + { + .bucket = "bucket", + .key = {"path", "to", "object"}, + .region = "eu-west-1", + .versionId = "version456", + }, + "with_region_and_versionId", + }, ParsedS3URLTestCase{ "s3://bucket/key?endpoint=https://minio.local&scheme=http", { @@ -224,9 +241,38 @@ INSTANTIATE_TEST_SUITE_P( }, "https://s3.ap-southeast-2.amazonaws.com/bucket/path/to/file.txt", "complex_path_and_region", + }, + S3ToHttpsConversionTestCase{ + ParsedS3URL{ + .bucket = "my-bucket", + .key = {"my-key.txt"}, + .versionId = "abc123xyz", + }, + ParsedURL{ + .scheme = "https", + .authority = ParsedURL::Authority{.host = "s3.us-east-1.amazonaws.com"}, + .path = {"", "my-bucket", "my-key.txt"}, + .query = {{"versionId", "abc123xyz"}}, + }, + "https://s3.us-east-1.amazonaws.com/my-bucket/my-key.txt?versionId=abc123xyz", + "with_versionId", + }, + S3ToHttpsConversionTestCase{ + ParsedS3URL{ + .bucket = "versioned-bucket", + .key = {"path", "to", "object"}, + .region = "eu-west-1", + .versionId = "version456", + }, + ParsedURL{ + .scheme = "https", + .authority = ParsedURL::Authority{.host = "s3.eu-west-1.amazonaws.com"}, + .path = {"", "versioned-bucket", "path", "to", "object"}, + .query = {{"versionId", "version456"}}, + }, + "https://s3.eu-west-1.amazonaws.com/versioned-bucket/path/to/object?versionId=version456", + "with_region_and_versionId", }), [](const ::testing::TestParamInfo & info) { return info.param.description; }); } // namespace nix - -#endif diff --git a/src/libstore-tests/serve-protocol.cc b/src/libstore-tests/serve-protocol.cc index b513e1365..10aa21e9d 100644 --- a/src/libstore-tests/serve-protocol.cc +++ b/src/libstore-tests/serve-protocol.cc @@ -95,49 +95,51 @@ VERSIONED_CHARACTERIZATION_TEST( defaultVersion, (std::tuple{ Realisation{ - .id = - DrvOutput{ - .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), - .outputName = "baz", - }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, - .signatures = {"asdf", "qwer"}, + { + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + .signatures = {"asdf", "qwer"}, + }, + { + .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), + .outputName = "baz", + }, }, Realisation{ - .id = - { - .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), - .outputName = "baz", - }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, - .signatures = {"asdf", "qwer"}, - .dependentRealisations = - { + { + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + .signatures = {"asdf", "qwer"}, + .dependentRealisations = { - DrvOutput{ - .drvHash = Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "quux", + { + DrvOutput{ + .drvHash = Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "quux", + }, + StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, }, - StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, }, - }, + }, + { + .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), + .outputName = "baz", + }, }, })) VERSIONED_CHARACTERIZATION_TEST(ServeProtoTest, buildResult_2_2, "build-result-2.2", 2 << 8 | 2, ({ using namespace std::literals::chrono_literals; std::tuple t{ - BuildResult{ - .status = BuildResult::OutputRejected, + BuildResult{.inner{BuildResult::Failure{ + .status = BuildResult::Failure::OutputRejected, .errorMsg = "no idea why", - }, - BuildResult{ - .status = BuildResult::NotDeterministic, + }}}, + BuildResult{.inner{BuildResult::Failure{ + .status = BuildResult::Failure::NotDeterministic, .errorMsg = "no idea why", - }, - BuildResult{ - .status = BuildResult::Built, - }, + }}}, + BuildResult{.inner{BuildResult::Success{ + .status = BuildResult::Success::Built, + }}}, }; t; })) @@ -145,20 +147,24 @@ VERSIONED_CHARACTERIZATION_TEST(ServeProtoTest, buildResult_2_2, "build-result-2 VERSIONED_CHARACTERIZATION_TEST(ServeProtoTest, buildResult_2_3, "build-result-2.3", 2 << 8 | 3, ({ using namespace std::literals::chrono_literals; std::tuple t{ - BuildResult{ - .status = BuildResult::OutputRejected, + BuildResult{.inner{BuildResult::Failure{ + .status = BuildResult::Failure::OutputRejected, .errorMsg = "no idea why", - }, + }}}, BuildResult{ - .status = BuildResult::NotDeterministic, - .errorMsg = "no idea why", + .inner{BuildResult::Failure{ + .status = BuildResult::Failure::NotDeterministic, + .errorMsg = "no idea why", + .isNonDeterministic = true, + }}, .timesBuilt = 3, - .isNonDeterministic = true, .startTime = 30, .stopTime = 50, }, BuildResult{ - .status = BuildResult::Built, + .inner{BuildResult::Success{ + .status = BuildResult::Success::Built, + }}, .startTime = 30, .stopTime = 50, }, @@ -170,48 +176,54 @@ VERSIONED_CHARACTERIZATION_TEST( ServeProtoTest, buildResult_2_6, "build-result-2.6", 2 << 8 | 6, ({ using namespace std::literals::chrono_literals; std::tuple t{ - BuildResult{ - .status = BuildResult::OutputRejected, + BuildResult{.inner{BuildResult::Failure{ + .status = BuildResult::Failure::OutputRejected, .errorMsg = "no idea why", - }, + }}}, BuildResult{ - .status = BuildResult::NotDeterministic, - .errorMsg = "no idea why", + .inner{BuildResult::Failure{ + .status = BuildResult::Failure::NotDeterministic, + .errorMsg = "no idea why", + .isNonDeterministic = true, + }}, .timesBuilt = 3, - .isNonDeterministic = true, .startTime = 30, .stopTime = 50, }, BuildResult{ - .status = BuildResult::Built, - .timesBuilt = 1, - .builtOutputs = - { + .inner{BuildResult::Success{ + .status = BuildResult::Success::Built, + .builtOutputs = { - "foo", { - .id = + "foo", + { + { + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + }, DrvOutput{ .drvHash = Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), .outputName = "foo", }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + }, }, - }, - { - "bar", { - .id = + "bar", + { + { + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar"}, + }, DrvOutput{ .drvHash = Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), .outputName = "bar", }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar"}, + }, }, }, - }, + }}, + .timesBuilt = 1, .startTime = 30, .stopTime = 50, #if 0 diff --git a/src/libstore-tests/store-reference.cc b/src/libstore-tests/store-reference.cc index 7b42b45a2..272d6732a 100644 --- a/src/libstore-tests/store-reference.cc +++ b/src/libstore-tests/store-reference.cc @@ -100,9 +100,12 @@ URI_TEST(local_1, localExample_1) URI_TEST(local_2, localExample_2) -/* Test path with spaces */ +/* Test path with encoded spaces */ URI_TEST(local_3, localExample_3) +/* Test path with spaces that are improperly not encoded */ +URI_TEST_READ(local_3_no_percent, localExample_3) + URI_TEST_READ(local_shorthand_1, localExample_1) URI_TEST_READ(local_shorthand_2, localExample_2) @@ -183,4 +186,64 @@ static StoreReference sshIPv6AuthorityWithUserinfoAndParams{ URI_TEST_READ(ssh_unbracketed_ipv6_3, sshIPv6AuthorityWithUserinfoAndParams) +static const StoreReference sshIPv6AuthorityWithUserinfoAndParamsAndZoneId{ + .variant = + StoreReference::Specified{ + .scheme = "ssh", + .authority = "userinfo@[fea5:23e1:3916:fc24:cb52:2837:2ecb:ea8e%25eth0]", + }, + .params = + { + {"a", "b"}, + {"c", "d"}, + }, +}; + +URI_TEST_READ(ssh_unbracketed_ipv6_4, sshIPv6AuthorityWithUserinfoAndParamsAndZoneId) +URI_TEST_READ(ssh_unbracketed_ipv6_5, sshIPv6AuthorityWithUserinfoAndParamsAndZoneId) + +static const StoreReference sshIPv6AuthorityWithUserinfoAndParamsAndZoneIdTricky{ + .variant = + StoreReference::Specified{ + .scheme = "ssh", + .authority = "userinfo@[fea5:23e1:3916:fc24:cb52:2837:2ecb:ea8e%2525]", + }, + .params = + { + {"a", "b"}, + {"c", "d"}, + }, +}; + +// Non-standard syntax where the IPv6 literal appears without brackets. In +// this case don't considering %25 to be a pct-encoded % and just take it as a +// literal value. 25 is a perfectly legal ZoneId value in theory. +URI_TEST_READ(ssh_unbracketed_ipv6_6, sshIPv6AuthorityWithUserinfoAndParamsAndZoneIdTricky) +URI_TEST_READ(ssh_unbracketed_ipv6_7, sshIPv6AuthorityWithUserinfoAndParamsAndZoneId) + +static const StoreReference sshIPv6AuthorityWithParamsAndZoneId{ + .variant = + StoreReference::Specified{ + .scheme = "ssh", + .authority = "[fea5:23e1:3916:fc24:cb52:2837:2ecb:ea8e%25eth0]", + }, + .params = + { + {"a", "b"}, + {"c", "d"}, + }, +}; + +URI_TEST_READ(ssh_unbracketed_ipv6_8, sshIPv6AuthorityWithParamsAndZoneId) + +static const StoreReference sshIPv6AuthorityWithZoneId{ + .variant = + StoreReference::Specified{ + .scheme = "ssh", + .authority = "[fea5:23e1:3916:fc24:cb52:2837:2ecb:ea8e%25eth0]", + }, +}; + +URI_TEST_READ(ssh_unbracketed_ipv6_9, sshIPv6AuthorityWithZoneId) + } // namespace nix diff --git a/src/libstore-tests/worker-protocol.cc b/src/libstore-tests/worker-protocol.cc index 823d8d85a..c4afde3bd 100644 --- a/src/libstore-tests/worker-protocol.cc +++ b/src/libstore-tests/worker-protocol.cc @@ -148,49 +148,51 @@ VERSIONED_CHARACTERIZATION_TEST( defaultVersion, (std::tuple{ Realisation{ - .id = - DrvOutput{ - .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), - .outputName = "baz", - }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, - .signatures = {"asdf", "qwer"}, + { + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + .signatures = {"asdf", "qwer"}, + }, + DrvOutput{ + .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), + .outputName = "baz", + }, }, Realisation{ - .id = - { - .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), - .outputName = "baz", - }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, - .signatures = {"asdf", "qwer"}, - .dependentRealisations = - { + { + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + .signatures = {"asdf", "qwer"}, + .dependentRealisations = { - DrvOutput{ - .drvHash = Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "quux", + { + DrvOutput{ + .drvHash = Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "quux", + }, + StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, }, - StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, }, - }, + }, + DrvOutput{ + .drvHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), + .outputName = "baz", + }, }, })) VERSIONED_CHARACTERIZATION_TEST(WorkerProtoTest, buildResult_1_27, "build-result-1.27", 1 << 8 | 27, ({ using namespace std::literals::chrono_literals; std::tuple t{ - BuildResult{ - .status = BuildResult::OutputRejected, + BuildResult{.inner{BuildResult::Failure{ + .status = BuildResult::Failure::OutputRejected, .errorMsg = "no idea why", - }, - BuildResult{ - .status = BuildResult::NotDeterministic, + }}}, + BuildResult{.inner{BuildResult::Failure{ + .status = BuildResult::Failure::NotDeterministic, .errorMsg = "no idea why", - }, - BuildResult{ - .status = BuildResult::Built, - }, + }}}, + BuildResult{.inner{BuildResult::Success{ + .status = BuildResult::Success::Built, + }}}, }; t; })) @@ -199,44 +201,44 @@ VERSIONED_CHARACTERIZATION_TEST( WorkerProtoTest, buildResult_1_28, "build-result-1.28", 1 << 8 | 28, ({ using namespace std::literals::chrono_literals; std::tuple t{ - BuildResult{ - .status = BuildResult::OutputRejected, + BuildResult{.inner{BuildResult::Failure{ + .status = BuildResult::Failure::OutputRejected, .errorMsg = "no idea why", - }, - BuildResult{ - .status = BuildResult::NotDeterministic, + }}}, + BuildResult{.inner{BuildResult::Failure{ + .status = BuildResult::Failure::NotDeterministic, .errorMsg = "no idea why", - }, - BuildResult{ - .status = BuildResult::Built, + }}}, + BuildResult{.inner{BuildResult::Success{ + .status = BuildResult::Success::Built, .builtOutputs = { { "foo", { - .id = - DrvOutput{ - .drvHash = - Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "foo", - }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + { + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + }, + DrvOutput{ + .drvHash = Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "foo", + }, }, }, { "bar", { - .id = - DrvOutput{ - .drvHash = - Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), - .outputName = "bar", - }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar"}, + { + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar"}, + }, + DrvOutput{ + .drvHash = Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), + .outputName = "bar", + }, }, }, }, - }, + }}}, }; t; })) @@ -245,48 +247,54 @@ VERSIONED_CHARACTERIZATION_TEST( WorkerProtoTest, buildResult_1_29, "build-result-1.29", 1 << 8 | 29, ({ using namespace std::literals::chrono_literals; std::tuple t{ - BuildResult{ - .status = BuildResult::OutputRejected, + BuildResult{.inner{BuildResult::Failure{ + .status = BuildResult::Failure::OutputRejected, .errorMsg = "no idea why", - }, + }}}, BuildResult{ - .status = BuildResult::NotDeterministic, - .errorMsg = "no idea why", + .inner{BuildResult::Failure{ + .status = BuildResult::Failure::NotDeterministic, + .errorMsg = "no idea why", + .isNonDeterministic = true, + }}, .timesBuilt = 3, - .isNonDeterministic = true, .startTime = 30, .stopTime = 50, }, BuildResult{ - .status = BuildResult::Built, - .timesBuilt = 1, - .builtOutputs = - { + .inner{BuildResult::Success{ + .status = BuildResult::Success::Built, + .builtOutputs = { - "foo", { - .id = + "foo", + { + { + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + }, DrvOutput{ .drvHash = Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), .outputName = "foo", }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + }, }, - }, - { - "bar", { - .id = + "bar", + { + { + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar"}, + }, DrvOutput{ .drvHash = Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), .outputName = "bar", }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar"}, + }, }, }, - }, + }}, + .timesBuilt = 1, .startTime = 30, .stopTime = 50, }, @@ -298,48 +306,54 @@ VERSIONED_CHARACTERIZATION_TEST( WorkerProtoTest, buildResult_1_37, "build-result-1.37", 1 << 8 | 37, ({ using namespace std::literals::chrono_literals; std::tuple t{ - BuildResult{ - .status = BuildResult::OutputRejected, + BuildResult{.inner{BuildResult::Failure{ + .status = BuildResult::Failure::OutputRejected, .errorMsg = "no idea why", - }, + }}}, BuildResult{ - .status = BuildResult::NotDeterministic, - .errorMsg = "no idea why", + .inner{BuildResult::Failure{ + .status = BuildResult::Failure::NotDeterministic, + .errorMsg = "no idea why", + .isNonDeterministic = true, + }}, .timesBuilt = 3, - .isNonDeterministic = true, .startTime = 30, .stopTime = 50, }, BuildResult{ - .status = BuildResult::Built, - .timesBuilt = 1, - .builtOutputs = - { + .inner{BuildResult::Success{ + .status = BuildResult::Success::Built, + .builtOutputs = { - "foo", { - .id = + "foo", + { + { + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + }, DrvOutput{ .drvHash = Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), .outputName = "foo", }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo"}, + }, }, - }, - { - "bar", { - .id = + "bar", + { + { + .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar"}, + }, DrvOutput{ .drvHash = Hash::parseSRI("sha256-b4afnqKCO9oWXgYHb9DeQ2berSwOjS27rSd9TxXDc/U="), .outputName = "bar", }, - .outPath = StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar"}, + }, }, }, - }, + }}, + .timesBuilt = 1, .startTime = 30, .stopTime = 50, .cpuUser = std::chrono::microseconds(500s), @@ -353,10 +367,10 @@ VERSIONED_CHARACTERIZATION_TEST(WorkerProtoTest, keyedBuildResult_1_29, "keyed-b using namespace std::literals::chrono_literals; std::tuple t{ KeyedBuildResult{ - { - .status = KeyedBuildResult::OutputRejected, + {.inner{BuildResult::Failure{ + .status = KeyedBuildResult::Failure::OutputRejected, .errorMsg = "no idea why", - }, + }}}, /* .path = */ DerivedPath::Opaque{ StorePath{"g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-xxx"}, @@ -364,10 +378,12 @@ VERSIONED_CHARACTERIZATION_TEST(WorkerProtoTest, keyedBuildResult_1_29, "keyed-b }, KeyedBuildResult{ { - .status = KeyedBuildResult::NotDeterministic, - .errorMsg = "no idea why", + .inner{BuildResult::Failure{ + .status = KeyedBuildResult::Failure::NotDeterministic, + .errorMsg = "no idea why", + .isNonDeterministic = true, + }}, .timesBuilt = 3, - .isNonDeterministic = true, .startTime = 30, .stopTime = 50, }, diff --git a/src/libstore-tests/write-derivation.cc b/src/libstore-tests/write-derivation.cc new file mode 100644 index 000000000..3f7de05d3 --- /dev/null +++ b/src/libstore-tests/write-derivation.cc @@ -0,0 +1,57 @@ +#include +#include + +#include "nix/util/tests/gmock-matchers.hh" +#include "nix/store/derivations.hh" +#include "nix/store/dummy-store-impl.hh" +#include "nix/store/tests/libstore.hh" + +namespace nix { +namespace { + +class WriteDerivationTest : public LibStoreTest +{ +protected: + WriteDerivationTest(ref config_) + : LibStoreTest(config_->openDummyStore()) + , config(std::move(config_)) + { + config->readOnly = false; + } + + WriteDerivationTest() + : WriteDerivationTest(make_ref(DummyStoreConfig::Params{})) + { + } + + ref config; +}; + +static Derivation makeSimpleDrv() +{ + Derivation drv; + drv.name = "simple-derivation"; + drv.platform = "system"; + drv.builder = "foo"; + drv.args = {"bar", "baz"}; + drv.env = StringPairs{{"BIG_BAD", "WOLF"}}; + return drv; +} + +} // namespace + +TEST_F(WriteDerivationTest, addToStoreFromDumpCalledOnce) +{ + auto drv = makeSimpleDrv(); + + auto path1 = writeDerivation(*store, drv, NoRepair); + config->readOnly = true; + auto path2 = writeDerivation(*store, drv, NoRepair); + EXPECT_EQ(path1, path2); + EXPECT_THAT( + [&] { writeDerivation(*store, drv, Repair); }, + ::testing::ThrowsMessage(testing::HasSubstrIgnoreANSIMatcher( + "operation 'addToStoreFromDump' is not supported by store 'dummy://'"))); +} + +} // namespace nix diff --git a/src/libstore/aws-creds.cc b/src/libstore/aws-creds.cc new file mode 100644 index 000000000..ff7b0f0ef --- /dev/null +++ b/src/libstore/aws-creds.cc @@ -0,0 +1,166 @@ +#include "nix/store/aws-creds.hh" + +#if NIX_WITH_AWS_AUTH + +# include +# include "nix/store/s3-url.hh" +# include "nix/util/finally.hh" +# include "nix/util/logging.hh" +# include "nix/util/url.hh" +# include "nix/util/util.hh" + +# include +# include +# include + +# include + +# include +# include +# include +# include + +namespace nix { + +AwsAuthError::AwsAuthError(int errorCode) + : Error("AWS authentication error: '%s' (%d)", aws_error_str(errorCode), errorCode) + , errorCode(errorCode) +{ +} + +namespace { + +static AwsCredentials getCredentialsFromProvider(std::shared_ptr provider) +{ + if (!provider || !provider->IsValid()) { + throw AwsAuthError("AWS credential provider is invalid"); + } + + auto prom = std::make_shared>(); + auto fut = prom->get_future(); + + provider->GetCredentials([prom](std::shared_ptr credentials, int errorCode) { + if (errorCode != 0 || !credentials) { + prom->set_exception(std::make_exception_ptr(AwsAuthError(errorCode))); + } else { + auto accessKeyId = Aws::Crt::ByteCursorToStringView(credentials->GetAccessKeyId()); + auto secretAccessKey = Aws::Crt::ByteCursorToStringView(credentials->GetSecretAccessKey()); + auto sessionToken = Aws::Crt::ByteCursorToStringView(credentials->GetSessionToken()); + + std::optional sessionTokenStr; + if (!sessionToken.empty()) { + sessionTokenStr = std::string(sessionToken.data(), sessionToken.size()); + } + + prom->set_value(AwsCredentials( + std::string(accessKeyId.data(), accessKeyId.size()), + std::string(secretAccessKey.data(), secretAccessKey.size()), + sessionTokenStr)); + } + }); + + // AWS CRT GetCredentials is asynchronous and only guarantees the callback will be + // invoked if the initial call returns success. There's no documented timeout mechanism, + // so we add a timeout to prevent indefinite hanging if the callback is never called. + auto timeout = std::chrono::seconds(30); + if (fut.wait_for(timeout) == std::future_status::timeout) { + throw AwsAuthError( + "Timeout waiting for AWS credentials (%d seconds)", + std::chrono::duration_cast(timeout).count()); + } + + return fut.get(); // This will throw if set_exception was called +} + +} // anonymous namespace + +class AwsCredentialProviderImpl : public AwsCredentialProvider +{ +public: + AwsCredentialProviderImpl() + { + apiHandle.InitializeLogging(Aws::Crt::LogLevel::Warn, static_cast(nullptr)); + } + + AwsCredentials getCredentialsRaw(const std::string & profile); + + AwsCredentials getCredentials(const ParsedS3URL & url) override + { + auto profile = url.profile.value_or(""); + try { + return getCredentialsRaw(profile); + } catch (AwsAuthError & e) { + warn("AWS authentication failed for S3 request %s: %s", url.toHttpsUrl(), e.message()); + credentialProviderCache.erase(profile); + throw; + } + } + + std::shared_ptr createProviderForProfile(const std::string & profile); + +private: + Aws::Crt::ApiHandle apiHandle; + boost::concurrent_flat_map> + credentialProviderCache; +}; + +std::shared_ptr +AwsCredentialProviderImpl::createProviderForProfile(const std::string & profile) +{ + debug( + "[pid=%d] creating new AWS credential provider for profile '%s'", + getpid(), + profile.empty() ? "(default)" : profile.c_str()); + + if (profile.empty()) { + Aws::Crt::Auth::CredentialsProviderChainDefaultConfig config; + config.Bootstrap = Aws::Crt::ApiHandle::GetOrCreateStaticDefaultClientBootstrap(); + return Aws::Crt::Auth::CredentialsProvider::CreateCredentialsProviderChainDefault(config); + } + + Aws::Crt::Auth::CredentialsProviderProfileConfig config; + config.Bootstrap = Aws::Crt::ApiHandle::GetOrCreateStaticDefaultClientBootstrap(); + // This is safe because the underlying C library will copy this string + // c.f. https://github.com/awslabs/aws-c-auth/blob/main/source/credentials_provider_profile.c#L220 + config.ProfileNameOverride = Aws::Crt::ByteCursorFromCString(profile.c_str()); + return Aws::Crt::Auth::CredentialsProvider::CreateCredentialsProviderProfile(config); +} + +AwsCredentials AwsCredentialProviderImpl::getCredentialsRaw(const std::string & profile) +{ + std::shared_ptr provider; + + credentialProviderCache.try_emplace_and_cvisit( + profile, + nullptr, + [&](auto & kv) { provider = kv.second = createProviderForProfile(profile); }, + [&](const auto & kv) { provider = kv.second; }); + + if (!provider) { + credentialProviderCache.erase_if(profile, [](const auto & kv) { + [[maybe_unused]] auto [_, provider] = kv; + return !provider; + }); + + throw AwsAuthError( + "Failed to create AWS credentials provider for %s", + profile.empty() ? "default profile" : fmt("profile '%s'", profile)); + } + + return getCredentialsFromProvider(provider); +} + +ref makeAwsCredentialsProvider() +{ + return make_ref(); +} + +ref getAwsCredentialsProvider() +{ + static auto instance = makeAwsCredentialsProvider(); + return instance; +} + +} // namespace nix + +#endif diff --git a/src/libstore/binary-cache-store.cc b/src/libstore/binary-cache-store.cc index badfb4b14..274e47271 100644 --- a/src/libstore/binary-cache-store.cc +++ b/src/libstore/binary-cache-store.cc @@ -76,9 +76,11 @@ std::optional BinaryCacheStore::getNixCacheInfo() return getFile(cacheInfoFile); } -void BinaryCacheStore::upsertFile(const std::string & path, std::string && data, const std::string & mimeType) +void BinaryCacheStore::upsertFile( + const std::string & path, std::string && data, const std::string & mimeType, uint64_t sizeHint) { - upsertFile(path, std::make_shared(std::move(data)), mimeType); + auto source = restartableSourceFromFactory([data = std::move(data)]() { return make_unique(data); }); + upsertFile(path, *source, mimeType, sizeHint); } void BinaryCacheStore::getFile(const std::string & path, Callback> callback) noexcept @@ -270,11 +272,19 @@ ref BinaryCacheStore::addToStoreCommon( /* Atomically write the NAR file. */ if (repair || !fileExists(narInfo->url)) { + auto source = restartableSourceFromFactory([fnTemp]() { + struct AutoCloseFDSource : AutoCloseFD, FdSource + { + AutoCloseFDSource(AutoCloseFD fd) + : AutoCloseFD(std::move(fd)) + , FdSource(get()) + { + } + }; + return std::make_unique(toDescriptor(open(fnTemp.c_str(), O_RDONLY))); + }); stats.narWrite++; - upsertFile( - narInfo->url, - std::make_shared(fnTemp, std::ios_base::in | std::ios_base::binary), - "application/x-nix-nar"); + upsertFile(narInfo->url, *source, "application/x-nix-nar", narInfo->fileSize); } else stats.narWriteAverted++; @@ -502,10 +512,15 @@ StorePath BinaryCacheStore::addToStore( ->path; } -void BinaryCacheStore::queryRealisationUncached( - const DrvOutput & id, Callback> callback) noexcept +std::string BinaryCacheStore::makeRealisationPath(const DrvOutput & id) { - auto outputInfoFilePath = realisationsPrefix + "/" + id.to_string() + ".doi"; + return realisationsPrefix + "/" + id.to_string() + ".doi"; +} + +void BinaryCacheStore::queryRealisationUncached( + const DrvOutput & id, Callback> callback) noexcept +{ + auto outputInfoFilePath = makeRealisationPath(id); auto callbackPtr = std::make_shared(std::move(callback)); @@ -515,11 +530,12 @@ void BinaryCacheStore::queryRealisationUncached( if (!data) return (*callbackPtr)({}); - std::shared_ptr realisation; + std::shared_ptr realisation; try { - realisation = std::make_shared(nlohmann::json::parse(*data)); + realisation = std::make_shared(nlohmann::json::parse(*data)); } catch (Error & e) { - e.addTrace({}, "while parsing file '%s' as a realisation", outputInfoFilePath); + e.addTrace( + {}, "while parsing file '%s' as a realisation for key '%s'", outputInfoFilePath, id.to_string()); throw; } return (*callbackPtr)(std::move(realisation)); @@ -535,8 +551,7 @@ void BinaryCacheStore::registerDrvOutput(const Realisation & info) { if (diskCache) diskCache->upsertRealisation(config.getReference().render(/*FIXME withParams=*/false), info); - auto filePath = realisationsPrefix + "/" + info.id.to_string() + ".doi"; - upsertFile(filePath, static_cast(info).dump(), "application/json"); + upsertFile(makeRealisationPath(info.id), static_cast(info).dump(), "application/json"); } ref BinaryCacheStore::getRemoteFSAccessor(bool requireValidPath) diff --git a/src/libstore/build-result.cc b/src/libstore/build-result.cc index 43c7adb11..ecbd27b49 100644 --- a/src/libstore/build-result.cc +++ b/src/libstore/build-result.cc @@ -5,4 +5,10 @@ namespace nix { bool BuildResult::operator==(const BuildResult &) const noexcept = default; std::strong_ordering BuildResult::operator<=>(const BuildResult &) const noexcept = default; +bool BuildResult::Success::operator==(const BuildResult::Success &) const noexcept = default; +std::strong_ordering BuildResult::Success::operator<=>(const BuildResult::Success &) const noexcept = default; + +bool BuildResult::Failure::operator==(const BuildResult::Failure &) const noexcept = default; +std::strong_ordering BuildResult::Failure::operator<=>(const BuildResult::Failure &) const noexcept = default; + } // namespace nix diff --git a/src/libstore/build/derivation-builder.cc b/src/libstore/build/derivation-builder.cc new file mode 100644 index 000000000..39ac40175 --- /dev/null +++ b/src/libstore/build/derivation-builder.cc @@ -0,0 +1,27 @@ +#include "nix/util/json-utils.hh" +#include "nix/store/build/derivation-builder.hh" + +namespace nlohmann { + +using namespace nix; + +ExternalBuilder adl_serializer::from_json(const json & json) +{ + auto obj = getObject(json); + return { + .systems = valueAt(obj, "systems"), + .program = valueAt(obj, "program"), + .args = valueAt(obj, "args"), + }; +} + +void adl_serializer::to_json(json & json, const ExternalBuilder & eb) +{ + json = { + {"systems", eb.systems}, + {"program", eb.program}, + {"args", eb.args}, + }; +} + +} // namespace nlohmann diff --git a/src/libstore/build/derivation-building-goal.cc b/src/libstore/build/derivation-building-goal.cc index ebef2a375..164948390 100644 --- a/src/libstore/build/derivation-building-goal.cc +++ b/src/libstore/build/derivation-building-goal.cc @@ -1,6 +1,5 @@ #include "nix/store/build/derivation-building-goal.hh" #include "nix/store/build/derivation-env-desugar.hh" -#include "nix/store/build/derivation-trampoline-goal.hh" #ifndef _WIN32 // TODO enable build hook on Windows # include "nix/store/build/hook-instance.hh" # include "nix/store/build/derivation-builder.hh" @@ -27,22 +26,21 @@ namespace nix { DerivationBuildingGoal::DerivationBuildingGoal( - const StorePath & drvPath, const Derivation & drv_, Worker & worker, BuildMode buildMode) - : Goal(worker, gaveUpOnSubstitution()) + const StorePath & drvPath, const Derivation & drv, Worker & worker, BuildMode buildMode, bool storeDerivation) + : Goal(worker, gaveUpOnSubstitution(storeDerivation)) , drvPath(drvPath) + , drv{std::make_unique(drv)} , buildMode(buildMode) { - drv = std::make_unique(drv_); - try { drvOptions = - std::make_unique(DerivationOptions::fromStructuredAttrs(drv->env, drv->structuredAttrs)); + std::make_unique(DerivationOptions::fromStructuredAttrs(drv.env, drv.structuredAttrs)); } catch (Error & e) { e.addTrace({}, "while parsing derivation '%s'", worker.store.printStorePath(drvPath)); throw; } - name = fmt("building of '%s' from in-memory derivation", worker.store.printStorePath(drvPath)); + name = fmt("building derivation '%s'", worker.store.printStorePath(drvPath)); trace("created"); /* Prevent the .chroot directory from being @@ -67,11 +65,7 @@ DerivationBuildingGoal::~DerivationBuildingGoal() std::string DerivationBuildingGoal::key() { - /* Ensure that derivations get built in order of their name, - i.e. a derivation named "aardvark" always comes before - "baboon". And substitution goals always happen before - derivation goals (due to "bd$"). */ - return "bd$" + std::string(drvPath.name()) + "$" + worker.store.printStorePath(drvPath); + return "dd$" + std::string(drvPath.name()) + "$" + worker.store.printStorePath(drvPath); } void DerivationBuildingGoal::killChild() @@ -90,21 +84,9 @@ void DerivationBuildingGoal::timedOut(Error && ex) killChild(); // We're not inside a coroutine, hence we can't use co_return here. // Thus we ignore the return value. - [[maybe_unused]] Done _ = doneFailure({BuildResult::TimedOut, std::move(ex)}); + [[maybe_unused]] Done _ = doneFailure({BuildResult::Failure::TimedOut, std::move(ex)}); } -/** - * Used for `inputGoals` local variable below - */ -struct value_comparison -{ - template - bool operator()(const ref & lhs, const ref & rhs) const - { - return *lhs < *rhs; - } -}; - std::string showKnownOutputs(const StoreDirConfig & store, const Derivation & drv) { std::string msg; @@ -125,50 +107,10 @@ static void runPostBuildHook( /* At least one of the output paths could not be produced using a substitute. So we have to build instead. */ -Goal::Co DerivationBuildingGoal::gaveUpOnSubstitution() +Goal::Co DerivationBuildingGoal::gaveUpOnSubstitution(bool storeDerivation) { Goals waitees; - std::map, GoalPtr, value_comparison> inputGoals; - - { - std::function, const DerivedPathMap::ChildNode &)> - addWaiteeDerivedPath; - - addWaiteeDerivedPath = [&](ref inputDrv, - const DerivedPathMap::ChildNode & inputNode) { - if (!inputNode.value.empty()) { - auto g = worker.makeGoal( - DerivedPath::Built{ - .drvPath = inputDrv, - .outputs = inputNode.value, - }, - buildMode == bmRepair ? bmRepair : bmNormal); - inputGoals.insert_or_assign(inputDrv, g); - waitees.insert(std::move(g)); - } - for (const auto & [outputName, childNode] : inputNode.childMap) - addWaiteeDerivedPath( - make_ref(SingleDerivedPath::Built{inputDrv, outputName}), childNode); - }; - - for (const auto & [inputDrvPath, inputNode] : drv->inputDrvs.map) { - /* Ensure that pure, non-fixed-output derivations don't - depend on impure derivations. */ - if (experimentalFeatureSettings.isEnabled(Xp::ImpureDerivations) && !drv->type().isImpure() - && !drv->type().isFixed()) { - auto inputDrv = worker.evalStore.readDerivation(inputDrvPath); - if (inputDrv.type().isImpure()) - throw Error( - "pure derivation '%s' depends on impure derivation '%s'", - worker.store.printStorePath(drvPath), - worker.store.printStorePath(inputDrvPath)); - } - - addWaiteeDerivedPath(makeConstantStorePathRef(inputDrvPath), inputNode); - } - } - /* Copy the input sources from the eval store to the build store. @@ -205,7 +147,7 @@ Goal::Co DerivationBuildingGoal::gaveUpOnSubstitution() nrFailed, nrFailed == 1 ? "dependency" : "dependencies"); msg += showKnownOutputs(worker.store, *drv); - co_return doneFailure(BuildError(BuildResult::DependencyFailed, msg)); + co_return doneFailure(BuildError(BuildResult::Failure::DependencyFailed, msg)); } /* Gather information necessary for computing the closure and/or @@ -213,170 +155,17 @@ Goal::Co DerivationBuildingGoal::gaveUpOnSubstitution() /* Determine the full set of input paths. */ - /* First, the input derivations. */ + if (storeDerivation) { + assert(drv->inputDrvs.map.empty()); + /* Store the resolved derivation, as part of the record of + what we're actually building */ + writeDerivation(worker.store, *drv); + } + { - auto & fullDrv = *drv; - - auto drvType = fullDrv.type(); - bool resolveDrv = - std::visit( - overloaded{ - [&](const DerivationType::InputAddressed & ia) { - /* must resolve if deferred. */ - return ia.deferred; - }, - [&](const DerivationType::ContentAddressed & ca) { - return !fullDrv.inputDrvs.map.empty() - && (ca.fixed - /* Can optionally resolve if fixed, which is good - for avoiding unnecessary rebuilds. */ - ? experimentalFeatureSettings.isEnabled(Xp::CaDerivations) - /* Must resolve if floating and there are any inputs - drvs. */ - : true); - }, - [&](const DerivationType::Impure &) { return true; }}, - drvType.raw) - /* no inputs are outputs of dynamic derivations */ - || std::ranges::any_of(fullDrv.inputDrvs.map.begin(), fullDrv.inputDrvs.map.end(), [](auto & pair) { - return !pair.second.childMap.empty(); - }); - - if (resolveDrv && !fullDrv.inputDrvs.map.empty()) { - experimentalFeatureSettings.require(Xp::CaDerivations); - - /* We are be able to resolve this derivation based on the - now-known results of dependencies. If so, we become a - stub goal aliasing that resolved derivation goal. */ - std::optional attempt = fullDrv.tryResolve( - worker.store, - [&](ref drvPath, const std::string & outputName) -> std::optional { - auto mEntry = get(inputGoals, drvPath); - if (!mEntry) - return std::nullopt; - - auto & buildResult = (*mEntry)->buildResult; - if (!buildResult.success()) - return std::nullopt; - - auto i = get(buildResult.builtOutputs, outputName); - if (!i) - return std::nullopt; - - return i->outPath; - }); - if (!attempt) { - /* TODO (impure derivations-induced tech debt) (see below): - The above attempt should have found it, but because we manage - inputDrvOutputs statefully, sometimes it gets out of sync with - the real source of truth (store). So we query the store - directly if there's a problem. */ - attempt = fullDrv.tryResolve(worker.store, &worker.evalStore); - } - assert(attempt); - Derivation drvResolved{std::move(*attempt)}; - - auto pathResolved = writeDerivation(worker.store, drvResolved); - - auto msg = - fmt("resolved derivation: '%s' -> '%s'", - worker.store.printStorePath(drvPath), - worker.store.printStorePath(pathResolved)); - act = std::make_unique( - *logger, - lvlInfo, - actBuildWaiting, - msg, - Logger::Fields{ - worker.store.printStorePath(drvPath), - worker.store.printStorePath(pathResolved), - }); - - /* TODO https://github.com/NixOS/nix/issues/13247 we should - let the calling goal do this, so it has a change to pass - just the output(s) it cares about. */ - auto resolvedDrvGoal = - worker.makeDerivationTrampolineGoal(pathResolved, OutputsSpec::All{}, drvResolved, buildMode); - { - Goals waitees{resolvedDrvGoal}; - co_await await(std::move(waitees)); - } - - trace("resolved derivation finished"); - - auto resolvedResult = resolvedDrvGoal->buildResult; - - if (resolvedResult.success()) { - SingleDrvOutputs builtOutputs; - - auto outputHashes = staticOutputHashes(worker.evalStore, *drv); - auto resolvedHashes = staticOutputHashes(worker.store, drvResolved); - - StorePathSet outputPaths; - - for (auto & outputName : drvResolved.outputNames()) { - auto outputHash = get(outputHashes, outputName); - auto resolvedHash = get(resolvedHashes, outputName); - if ((!outputHash) || (!resolvedHash)) - throw Error( - "derivation '%s' doesn't have expected output '%s' (derivation-goal.cc/resolve)", - worker.store.printStorePath(drvPath), - outputName); - - auto realisation = [&] { - auto take1 = get(resolvedResult.builtOutputs, outputName); - if (take1) - return *take1; - - /* The above `get` should work. But stateful tracking of - outputs in resolvedResult, this can get out of sync with the - store, which is our actual source of truth. For now we just - check the store directly if it fails. */ - auto take2 = worker.evalStore.queryRealisation(DrvOutput{*resolvedHash, outputName}); - if (take2) - return *take2; - - throw Error( - "derivation '%s' doesn't have expected output '%s' (derivation-goal.cc/realisation)", - worker.store.printStorePath(pathResolved), - outputName); - }(); - - if (!drv->type().isImpure()) { - auto newRealisation = realisation; - newRealisation.id = DrvOutput{*outputHash, outputName}; - newRealisation.signatures.clear(); - if (!drv->type().isFixed()) { - auto & drvStore = worker.evalStore.isValidPath(drvPath) ? worker.evalStore : worker.store; - newRealisation.dependentRealisations = - drvOutputReferences(worker.store, *drv, realisation.outPath, &drvStore); - } - worker.store.signRealisation(newRealisation); - worker.store.registerDrvOutput(newRealisation); - } - outputPaths.insert(realisation.outPath); - builtOutputs.emplace(outputName, realisation); - } - - runPostBuildHook(worker.store, *logger, drvPath, outputPaths); - - auto status = resolvedResult.status; - if (status == BuildResult::AlreadyValid) - status = BuildResult::ResolvesToAlreadyValid; - - co_return doneSuccess(status, std::move(builtOutputs)); - } else { - co_return doneFailure({ - BuildResult::DependencyFailed, - "build of resolved derivation '%s' failed", - worker.store.printStorePath(pathResolved), - }); - } - } - /* If we get this far, we know no dynamic drvs inputs */ - for (auto & [depDrvPath, depNode] : fullDrv.inputDrvs.map) { + for (auto & [depDrvPath, depNode] : drv->inputDrvs.map) { for (auto & outputName : depNode.value) { /* Don't need to worry about `inputGoals`, because impure derivations are always resolved above. Can @@ -450,7 +239,6 @@ Goal::Co DerivationBuildingGoal::tryToBuild() : buildMode == bmCheck ? "checking outputs of '%s'" : "building '%s'", worker.store.printStorePath(drvPath)); - fmt("building '%s'", worker.store.printStorePath(drvPath)); #ifndef _WIN32 // TODO enable build hook on Windows if (hook) msg += fmt(" on '%s'", hook->machineName); @@ -484,6 +272,8 @@ Goal::Co DerivationBuildingGoal::tryToBuild() bool useHook; + const ExternalBuilder * externalBuilder = nullptr; + while (true) { trace("trying to build"); @@ -536,7 +326,7 @@ Goal::Co DerivationBuildingGoal::tryToBuild() debug("skipping build of derivation '%s', someone beat us to it", worker.store.printStorePath(drvPath)); outputLocks.setDeletion(true); outputLocks.unlock(); - co_return doneSuccess(BuildResult::AlreadyValid, std::move(validOutputs)); + co_return doneSuccess(BuildResult::Success::AlreadyValid, std::move(validOutputs)); } /* If any of the outputs already exist but are not valid, delete @@ -577,7 +367,44 @@ Goal::Co DerivationBuildingGoal::tryToBuild() co_await waitForAWhile(); continue; case rpDecline: - /* We should do it ourselves. */ + /* We should do it ourselves. + + Now that we've decided we can't / won't do a remote build, check + that we can in fact build locally. First see if there is an + external builder for a "semi-local build". If there is, prefer to + use that. If there is not, then check if we can do a "true" local + build. */ + + externalBuilder = settings.findExternalDerivationBuilderIfSupported(*drv); + + if (!externalBuilder && !drvOptions->canBuildLocally(worker.store, *drv)) { + auto msg = + fmt("Cannot build '%s'.\n" + "Reason: " ANSI_RED "required system or feature not available" ANSI_NORMAL + "\n" + "Required system: '%s' with features {%s}\n" + "Current system: '%s' with features {%s}", + Magenta(worker.store.printStorePath(drvPath)), + Magenta(drv->platform), + concatStringsSep(", ", drvOptions->getRequiredSystemFeatures(*drv)), + Magenta(settings.thisSystem), + concatStringsSep(", ", worker.store.Store::config.systemFeatures)); + + // since aarch64-darwin has Rosetta 2, this user can actually run x86_64-darwin on their hardware - + // we should tell them to run the command to install Darwin 2 + if (drv->platform == "x86_64-darwin" && settings.thisSystem == "aarch64-darwin") + msg += fmt( + "\nNote: run `%s` to run programs for x86_64-darwin", + Magenta( + "/usr/sbin/softwareupdate --install-rosetta && launchctl stop org.nixos.nix-daemon")); + +#ifndef _WIN32 // TODO enable `DerivationBuilder` on Windows + builder.reset(); +#endif + outputLocks.unlock(); + worker.permanentFailure = true; + co_return doneFailure({BuildResult::Failure::InputRejected, std::move(msg)}); + } useHook = false; break; } @@ -628,7 +455,7 @@ Goal::Co DerivationBuildingGoal::tryToBuild() /* Check the exit status. */ if (!statusOk(status)) { - auto e = fixupBuilderFailureErrorMessage({BuildResult::MiscFailure, status, ""}); + auto e = fixupBuilderFailureErrorMessage({BuildResult::Failure::MiscFailure, status, ""}); outputLocks.unlock(); @@ -669,7 +496,7 @@ Goal::Co DerivationBuildingGoal::tryToBuild() outputLocks.setDeletion(true); outputLocks.unlock(); - co_return doneSuccess(BuildResult::Built, std::move(builtOutputs)); + co_return doneSuccess(BuildResult::Success::Built, std::move(builtOutputs)); } co_await yield(); @@ -764,36 +591,35 @@ Goal::Co DerivationBuildingGoal::tryToBuild() co_return doneFailure(std::move(e)); } + DerivationBuilderParams params{ + .drvPath = drvPath, + .buildResult = buildResult, + .drv = *drv, + .drvOptions = *drvOptions, + .inputPaths = inputPaths, + .initialOutputs = initialOutputs, + .buildMode = buildMode, + .defaultPathsInChroot = std::move(defaultPathsInChroot), + .systemFeatures = worker.store.config.systemFeatures.get(), + .desugaredEnv = std::move(desugaredEnv), + }; + /* If we have to wait and retry (see below), then `builder` will already be created, so we don't need to create it again. */ - builder = makeDerivationBuilder( - *localStoreP, - std::make_unique(*this, builder), - DerivationBuilderParams{ - .drvPath = drvPath, - .buildResult = buildResult, - .drv = *drv, - .drvOptions = *drvOptions, - .inputPaths = inputPaths, - .initialOutputs = initialOutputs, - .buildMode = buildMode, - .defaultPathsInChroot = std::move(defaultPathsInChroot), - .systemFeatures = worker.store.config.systemFeatures.get(), - .desugaredEnv = std::move(desugaredEnv), - }); + builder = externalBuilder ? makeExternalDerivationBuilder( + *localStoreP, + std::make_unique(*this, builder), + std::move(params), + *externalBuilder) + : makeDerivationBuilder( + *localStoreP, + std::make_unique(*this, builder), + std::move(params)); } - std::optional builderOutOpt; - try { - /* Okay, we have to build. */ - builderOutOpt = builder->startBuild(); - } catch (BuildError & e) { - builder.reset(); - outputLocks.unlock(); - worker.permanentFailure = true; - co_return doneFailure(std::move(e)); // InputRejected - } - if (!builderOutOpt) { + if (auto builderOutOpt = builder->startBuild()) { + builderOut = *std::move(builderOutOpt); + } else { if (!actLock) actLock = std::make_unique( *logger, @@ -802,9 +628,7 @@ Goal::Co DerivationBuildingGoal::tryToBuild() fmt("waiting for a free build user ID for '%s'", Magenta(worker.store.printStorePath(drvPath)))); co_await waitForAWhile(); continue; - } else { - builderOut = *std::move(builderOutOpt); - }; + } break; } @@ -832,15 +656,15 @@ Goal::Co DerivationBuildingGoal::tryToBuild() # pragma GCC diagnostic push # pragma GCC diagnostic ignored "-Wswitch-enum" switch (e.status) { - case BuildResult::HashMismatch: + case BuildResult::Failure::HashMismatch: worker.hashMismatch = true; /* See header, the protocols don't know about `HashMismatch` yet, so change it to `OutputRejected`, which they expect for this case (hash mismatch is a type of output rejection). */ - e.status = BuildResult::OutputRejected; + e.status = BuildResult::Failure::OutputRejected; break; - case BuildResult::NotDeterministic: + case BuildResult::Failure::NotDeterministic: worker.checkMismatch = true; break; default: @@ -853,7 +677,15 @@ Goal::Co DerivationBuildingGoal::tryToBuild() { builder.reset(); StorePathSet outputPaths; - for (auto & [_, output] : builtOutputs) { + /* In the check case we install no store objects, and so + `builtOutputs` is empty. However, per issue #14287, there is + an expectation that the post-build hook is still executed. + (This is useful for e.g. logging successful deterministic rebuilds.) + + In order to make that work, in the check case just load the + (preexisting) infos from scratch, rather than relying on what + `DerivationBuilder` returned to us. */ + for (auto & [_, output] : buildMode == bmCheck ? checkPathValidity(initialOutputs).second : builtOutputs) { // for sake of `bmRepair` worker.markContentsGood(output.outPath); outputPaths.insert(output.outPath); @@ -866,7 +698,7 @@ Goal::Co DerivationBuildingGoal::tryToBuild() (unlinked) lock files. */ outputLocks.setDeletion(true); outputLocks.unlock(); - co_return doneSuccess(BuildResult::Built, std::move(builtOutputs)); + co_return doneSuccess(BuildResult::Success::Built, std::move(builtOutputs)); } #endif } @@ -1149,7 +981,7 @@ void DerivationBuildingGoal::handleChildOutput(Descriptor fd, std::string_view d // We're not inside a coroutine, hence we can't use co_return here. // Thus we ignore the return value. [[maybe_unused]] Done _ = doneFailure(BuildError( - BuildResult::LogLimitExceeded, + BuildResult::Failure::LogLimitExceeded, "%s killed after writing more than %d bytes of log output", getName(), settings.maxLogSize)); @@ -1286,13 +1118,22 @@ DerivationBuildingGoal::checkPathValidity(std::map & // without the `ca-derivations` experimental flag). worker.store.registerDrvOutput( Realisation{ + { + .outPath = info.known->path, + }, drvOutput, - info.known->path, }); } } if (info.known && info.known->isValid()) - validOutputs.emplace(i.first, Realisation{drvOutput, info.known->path}); + validOutputs.emplace( + i.first, + Realisation{ + { + .outPath = info.known->path, + }, + drvOutput, + }); } bool allValid = true; @@ -1306,16 +1147,16 @@ DerivationBuildingGoal::checkPathValidity(std::map & return {allValid, validOutputs}; } -Goal::Done DerivationBuildingGoal::doneSuccess(BuildResult::Status status, SingleDrvOutputs builtOutputs) +Goal::Done DerivationBuildingGoal::doneSuccess(BuildResult::Success::Status status, SingleDrvOutputs builtOutputs) { - buildResult.status = status; - - assert(buildResult.success()); + buildResult.inner = BuildResult::Success{ + .status = status, + .builtOutputs = std::move(builtOutputs), + }; mcRunningBuilds.reset(); - buildResult.builtOutputs = std::move(builtOutputs); - if (status == BuildResult::Built) + if (status == BuildResult::Success::Built) worker.doneBuilds++; worker.updateProgress(); @@ -1325,16 +1166,18 @@ Goal::Done DerivationBuildingGoal::doneSuccess(BuildResult::Status status, Singl Goal::Done DerivationBuildingGoal::doneFailure(BuildError ex) { - buildResult.status = ex.status; - buildResult.errorMsg = fmt("%s", Uncolored(ex.info().msg)); - if (buildResult.status == BuildResult::TimedOut) - worker.timedOut = true; - if (buildResult.status == BuildResult::PermanentFailure) - worker.permanentFailure = true; + buildResult.inner = BuildResult::Failure{ + .status = ex.status, + .errorMsg = fmt("%s", Uncolored(ex.info().msg)), + }; mcRunningBuilds.reset(); - if (ex.status != BuildResult::DependencyFailed) + if (ex.status == BuildResult::Failure::TimedOut) + worker.timedOut = true; + if (ex.status == BuildResult::Failure::PermanentFailure) + worker.permanentFailure = true; + if (ex.status != BuildResult::Failure::DependencyFailed) worker.failedBuilds++; worker.updateProgress(); diff --git a/src/libstore/build/derivation-check.cc b/src/libstore/build/derivation-check.cc index 82e92e1f3..181221ba5 100644 --- a/src/libstore/build/derivation-check.cc +++ b/src/libstore/build/derivation-check.cc @@ -18,7 +18,11 @@ void checkOutputs( for (auto & output : outputs) outputsByPath.emplace(store.printStorePath(output.second.path), output.second); - for (auto & [outputName, info] : outputs) { + for (auto & pair : outputs) { + // We can't use auto destructuring here because + // clang-tidy seems to complain about it. + const std::string & outputName = pair.first; + const auto & info = pair.second; auto * outputSpec = get(drvOutputs, outputName); assert(outputSpec); @@ -33,7 +37,7 @@ void checkOutputs( /* Throw an error after registering the path as valid. */ throw BuildError( - BuildResult::HashMismatch, + BuildResult::Failure::HashMismatch, "hash mismatch in fixed-output derivation '%s':\n specified: %s\n got: %s", store.printStorePath(drvPath), wanted.to_string(HashFormat::SRI, true), @@ -42,7 +46,7 @@ void checkOutputs( if (!info.references.empty()) { auto numViolations = info.references.size(); throw BuildError( - BuildResult::HashMismatch, + BuildResult::Failure::HashMismatch, "fixed-output derivations must not reference store paths: '%s' references %d distinct paths, e.g. '%s'", store.printStorePath(drvPath), numViolations, @@ -84,7 +88,7 @@ void checkOutputs( auto applyChecks = [&](const DerivationOptions::OutputChecks & checks) { if (checks.maxSize && info.narSize > *checks.maxSize) throw BuildError( - BuildResult::OutputRejected, + BuildResult::Failure::OutputRejected, "path '%s' is too large at %d bytes; limit is %d bytes", store.printStorePath(info.path), info.narSize, @@ -94,7 +98,7 @@ void checkOutputs( uint64_t closureSize = getClosure(info.path).second; if (closureSize > *checks.maxClosureSize) throw BuildError( - BuildResult::OutputRejected, + BuildResult::Failure::OutputRejected, "closure of path '%s' is too large at %d bytes; limit is %d bytes", store.printStorePath(info.path), closureSize, @@ -115,7 +119,7 @@ void checkOutputs( std::string outputsListing = concatMapStringsSep(", ", outputs, [](auto & o) { return o.first; }); throw BuildError( - BuildResult::OutputRejected, + BuildResult::Failure::OutputRejected, "derivation '%s' output check for '%s' contains an illegal reference specifier '%s'," " expected store path or output name (one of [%s])", store.printStorePath(drvPath), @@ -148,7 +152,7 @@ void checkOutputs( badPathsStr += store.printStorePath(i); } throw BuildError( - BuildResult::OutputRejected, + BuildResult::Failure::OutputRejected, "output '%s' is not allowed to refer to the following paths:%s", store.printStorePath(info.path), badPathsStr); diff --git a/src/libstore/build/derivation-env-desugar.cc b/src/libstore/build/derivation-env-desugar.cc index d6e002d91..8d552fc4d 100644 --- a/src/libstore/build/derivation-env-desugar.cc +++ b/src/libstore/build/derivation-env-desugar.cc @@ -25,7 +25,7 @@ DesugaredEnv DesugaredEnv::create( if (drv.structuredAttrs) { auto json = drv.structuredAttrs->prepareStructuredAttrs(store, drvOptions, inputPaths, drv.outputs); res.atFileEnvPair("NIX_ATTRS_SH_FILE", ".attrs.sh") = StructuredAttrs::writeShell(json); - res.atFileEnvPair("NIX_ATTRS_JSON_FILE", ".attrs.json") = json.dump(); + res.atFileEnvPair("NIX_ATTRS_JSON_FILE", ".attrs.json") = static_cast(std::move(json)).dump(); } else { /* In non-structured mode, set all bindings either directory in the environment or via a file, as specified by diff --git a/src/libstore/build/derivation-goal.cc b/src/libstore/build/derivation-goal.cc index b9046744a..14aa044ea 100644 --- a/src/libstore/build/derivation-goal.cc +++ b/src/libstore/build/derivation-goal.cc @@ -1,5 +1,6 @@ #include "nix/store/build/derivation-goal.hh" #include "nix/store/build/derivation-building-goal.hh" +#include "nix/store/build/derivation-resolution-goal.hh" #ifndef _WIN32 // TODO enable build hook on Windows # include "nix/store/build/hook-instance.hh" # include "nix/store/build/derivation-builder.hh" @@ -29,10 +30,12 @@ DerivationGoal::DerivationGoal( const Derivation & drv, const OutputName & wantedOutput, Worker & worker, - BuildMode buildMode) - : Goal(worker, haveDerivation()) + BuildMode buildMode, + bool storeDerivation) + : Goal(worker, haveDerivation(storeDerivation)) , drvPath(drvPath) , wantedOutput(wantedOutput) + , drv{std::make_unique(drv)} , outputHash{[&] { auto outputHashes = staticOutputHashes(worker.evalStore, drv); if (auto * mOutputHash = get(outputHashes, wantedOutput)) @@ -41,11 +44,8 @@ DerivationGoal::DerivationGoal( }()} , buildMode(buildMode) { - this->drv = std::make_unique(drv); - name = - fmt("building of '%s' from in-memory derivation", - DerivedPath::Built{makeConstantStorePathRef(drvPath), drv.outputNames()}.to_string(worker.store)); + name = fmt("getting output '%s' from derivation '%s'", wantedOutput, worker.store.printStorePath(drvPath)); trace("created"); mcExpectedBuilds = std::make_unique>(worker.expectedBuilds); @@ -54,17 +54,13 @@ DerivationGoal::DerivationGoal( std::string DerivationGoal::key() { - /* Ensure that derivations get built in order of their name, - i.e. a derivation named "aardvark" always comes before - "baboon". And substitution goals always happen before - derivation goals (due to "b$"). */ - return "b$" + std::string(drvPath.name()) + "$" + SingleDerivedPath::Built{ + return "db$" + std::string(drvPath.name()) + "$" + SingleDerivedPath::Built{ .drvPath = makeConstantStorePathRef(drvPath), .output = wantedOutput, }.to_string(worker.store); } -Goal::Co DerivationGoal::haveDerivation() +Goal::Co DerivationGoal::haveDerivation(bool storeDerivation) { trace("have derivation"); @@ -94,7 +90,7 @@ Goal::Co DerivationGoal::haveDerivation() /* If they are all valid, then we're done. */ if (checkResult && checkResult->second == PathStatus::Valid && buildMode == bmNormal) { - co_return doneSuccess(BuildResult::AlreadyValid, checkResult->first); + co_return doneSuccess(BuildResult::Success::AlreadyValid, checkResult->first); } Goals waitees; @@ -104,8 +100,7 @@ Goal::Co DerivationGoal::haveDerivation() them. */ if (settings.useSubstitutes && drvOptions.substitutesAllowed()) { if (!checkResult) - waitees.insert(upcast_goal(worker.makeDrvOutputSubstitutionGoal( - DrvOutput{outputHash, wantedOutput}, buildMode == bmRepair ? Repair : NoRepair))); + waitees.insert(upcast_goal(worker.makeDrvOutputSubstitutionGoal(DrvOutput{outputHash, wantedOutput}))); else { auto * cap = getDerivationCA(*drv); waitees.insert(upcast_goal(worker.makePathSubstitutionGoal( @@ -123,7 +118,7 @@ Goal::Co DerivationGoal::haveDerivation() if (nrFailed > 0 && nrFailed > nrNoSubstituters && !settings.tryFallback) { co_return doneFailure(BuildError( - BuildResult::TransientFailure, + BuildResult::Failure::TransientFailure, "some substitutes for the outputs of derivation '%s' failed (usually happens due to networking issues); try '--fallback' to build derivation from source ", worker.store.printStorePath(drvPath))); } @@ -135,7 +130,7 @@ Goal::Co DerivationGoal::haveDerivation() bool allValid = checkResult && checkResult->second == PathStatus::Valid; if (buildMode == bmNormal && allValid) { - co_return doneSuccess(BuildResult::Substituted, checkResult->first); + co_return doneSuccess(BuildResult::Success::Substituted, checkResult->first); } if (buildMode == bmRepair && allValid) { co_return repairClosure(); @@ -146,9 +141,101 @@ Goal::Co DerivationGoal::haveDerivation() worker.store.printStorePath(drvPath)); } + auto resolutionGoal = worker.makeDerivationResolutionGoal(drvPath, *drv, buildMode); + { + Goals waitees{resolutionGoal}; + co_await await(std::move(waitees)); + } + if (nrFailed != 0) { + co_return doneFailure({BuildResult::Failure::DependencyFailed, "Build failed due to failed dependency"}); + } + + if (resolutionGoal->resolvedDrv) { + auto & [pathResolved, drvResolved] = *resolutionGoal->resolvedDrv; + + auto resolvedDrvGoal = + worker.makeDerivationGoal(pathResolved, drvResolved, wantedOutput, buildMode, /*storeDerivation=*/true); + { + Goals waitees{resolvedDrvGoal}; + co_await await(std::move(waitees)); + } + + trace("resolved derivation finished"); + + auto resolvedResult = resolvedDrvGoal->buildResult; + + // No `std::visit` for coroutines yet + if (auto * successP = resolvedResult.tryGetSuccess()) { + auto & success = *successP; + auto outputHashes = staticOutputHashes(worker.evalStore, *drv); + auto resolvedHashes = staticOutputHashes(worker.store, drvResolved); + + auto outputHash = get(outputHashes, wantedOutput); + auto resolvedHash = get(resolvedHashes, wantedOutput); + if ((!outputHash) || (!resolvedHash)) + throw Error( + "derivation '%s' doesn't have expected output '%s' (derivation-goal.cc/resolve)", + worker.store.printStorePath(drvPath), + wantedOutput); + + auto realisation = [&] { + auto take1 = get(success.builtOutputs, wantedOutput); + if (take1) + return static_cast(*take1); + + /* The above `get` should work. But stateful tracking of + outputs in resolvedResult, this can get out of sync with the + store, which is our actual source of truth. For now we just + check the store directly if it fails. */ + auto take2 = worker.evalStore.queryRealisation( + DrvOutput{ + .drvHash = *resolvedHash, + .outputName = wantedOutput, + }); + if (take2) + return *take2; + + throw Error( + "derivation '%s' doesn't have expected output '%s' (derivation-goal.cc/realisation)", + worker.store.printStorePath(pathResolved), + wantedOutput); + }(); + + if (!drv->type().isImpure()) { + Realisation newRealisation{ + realisation, + { + .drvHash = *outputHash, + .outputName = wantedOutput, + }}; + newRealisation.signatures.clear(); + if (!drv->type().isFixed()) { + auto & drvStore = worker.evalStore.isValidPath(drvPath) ? worker.evalStore : worker.store; + newRealisation.dependentRealisations = + drvOutputReferences(worker.store, *drv, realisation.outPath, &drvStore); + } + worker.store.signRealisation(newRealisation); + worker.store.registerDrvOutput(newRealisation); + } + + auto status = success.status; + if (status == BuildResult::Success::AlreadyValid) + status = BuildResult::Success::ResolvesToAlreadyValid; + + co_return doneSuccess(status, std::move(realisation)); + } else if (resolvedResult.tryGetFailure()) { + co_return doneFailure({ + BuildResult::Failure::DependencyFailed, + "build of resolved derivation '%s' failed", + worker.store.printStorePath(pathResolved), + }); + } else + assert(false); + } + /* Give up on substitution for the output we want, actually build this derivation */ - auto g = worker.makeDerivationBuildingGoal(drvPath, *drv, buildMode); + auto g = worker.makeDerivationBuildingGoal(drvPath, *drv, buildMode, storeDerivation); /* We will finish with it ourselves, as if we were the derivational goal. */ g->preserveException = true; @@ -163,42 +250,57 @@ Goal::Co DerivationGoal::haveDerivation() buildResult = g->buildResult; - if (buildMode == bmCheck) { - /* In checking mode, the builder will not register any outputs. - So we want to make sure the ones that we wanted to check are - properly there. */ - buildResult.builtOutputs = {{wantedOutput, assertPathValidity()}}; - } else { - /* Otherwise the builder will give us info for out output, but - also for other outputs. Filter down to just our output so as - not to leak info on unrelated things. */ - for (auto it = buildResult.builtOutputs.begin(); it != buildResult.builtOutputs.end();) { - if (it->first != wantedOutput) { - it = buildResult.builtOutputs.erase(it); - } else { - ++it; + if (auto * successP = buildResult.tryGetSuccess()) { + auto & success = *successP; + if (buildMode == bmCheck) { + /* In checking mode, the builder will not register any outputs. + So we want to make sure the ones that we wanted to check are + properly there. */ + success.builtOutputs = {{ + wantedOutput, + { + assertPathValidity(), + { + .drvHash = outputHash, + .outputName = wantedOutput, + }, + }, + }}; + } else { + /* Otherwise the builder will give us info for out output, but + also for other outputs. Filter down to just our output so as + not to leak info on unrelated things. */ + for (auto it = success.builtOutputs.begin(); it != success.builtOutputs.end();) { + if (it->first != wantedOutput) { + it = success.builtOutputs.erase(it); + } else { + ++it; + } + } + + /* If the wanted output is not in builtOutputs (e.g., because it + was already valid and therefore not re-registered), we need to + add it ourselves to ensure we return the correct information. */ + if (success.builtOutputs.count(wantedOutput) == 0) { + debug( + "BUG! wanted output '%s' not in builtOutputs, working around by adding it manually", wantedOutput); + success.builtOutputs = {{ + wantedOutput, + { + assertPathValidity(), + { + .drvHash = outputHash, + .outputName = wantedOutput, + }, + }, + }}; } } - - if (buildResult.success()) - assert(buildResult.builtOutputs.count(wantedOutput) > 0); } co_return amDone(g->exitCode, g->ex); } -/** - * Used for `inputGoals` local variable below - */ -struct value_comparison -{ - template - bool operator()(const ref & lhs, const ref & rhs) const - { - return *lhs < *rhs; - } -}; - Goal::Co DerivationGoal::repairClosure() { assert(!drv->type().isImpure()); @@ -270,30 +372,33 @@ Goal::Co DerivationGoal::repairClosure() bmRepair)); } + bool haveWaitees = !waitees.empty(); co_await await(std::move(waitees)); - if (!waitees.empty()) { + if (haveWaitees) { trace("closure repaired"); if (nrFailed > 0) throw Error( "some paths in the output closure of derivation '%s' could not be repaired", worker.store.printStorePath(drvPath)); } - co_return doneSuccess(BuildResult::AlreadyValid, assertPathValidity()); + co_return doneSuccess(BuildResult::Success::AlreadyValid, assertPathValidity()); } -std::optional> DerivationGoal::checkPathValidity() +std::optional> DerivationGoal::checkPathValidity() { if (drv->type().isImpure()) return std::nullopt; auto drvOutput = DrvOutput{outputHash, wantedOutput}; - std::optional mRealisation; + std::optional mRealisation; if (auto * mOutput = get(drv->outputs, wantedOutput)) { if (auto mPath = mOutput->path(worker.store, drv->name, wantedOutput)) { - mRealisation = Realisation{drvOutput, std::move(*mPath)}; + mRealisation = UnkeyedRealisation{ + .outPath = std::move(*mPath), + }; } } else { throw Error( @@ -321,7 +426,14 @@ std::optional> DerivationGoal::checkPathValid // derivation, and the output path is valid, but we don't have // its realisation stored (probably because it has been built // without the `ca-derivations` experimental flag). - worker.store.registerDrvOutput(*mRealisation); + worker.store.registerDrvOutput( + Realisation{ + *mRealisation, + { + .drvHash = outputHash, + .outputName = wantedOutput, + }, + }); } return {{*mRealisation, status}}; @@ -329,7 +441,7 @@ std::optional> DerivationGoal::checkPathValid return std::nullopt; } -Realisation DerivationGoal::assertPathValidity() +UnkeyedRealisation DerivationGoal::assertPathValidity() { auto checkResult = checkPathValidity(); if (!(checkResult && checkResult->second == PathStatus::Valid)) @@ -337,16 +449,25 @@ Realisation DerivationGoal::assertPathValidity() return checkResult->first; } -Goal::Done DerivationGoal::doneSuccess(BuildResult::Status status, Realisation builtOutput) +Goal::Done DerivationGoal::doneSuccess(BuildResult::Success::Status status, UnkeyedRealisation builtOutput) { - buildResult.status = status; - - assert(buildResult.success()); + buildResult.inner = BuildResult::Success{ + .status = status, + .builtOutputs = {{ + wantedOutput, + { + std::move(builtOutput), + DrvOutput{ + .drvHash = outputHash, + .outputName = wantedOutput, + }, + }, + }}, + }; mcExpectedBuilds.reset(); - buildResult.builtOutputs = {{wantedOutput, std::move(builtOutput)}}; - if (status == BuildResult::Built) + if (status == BuildResult::Success::Built) worker.doneBuilds++; worker.updateProgress(); @@ -356,16 +477,18 @@ Goal::Done DerivationGoal::doneSuccess(BuildResult::Status status, Realisation b Goal::Done DerivationGoal::doneFailure(BuildError ex) { - buildResult.status = ex.status; - buildResult.errorMsg = fmt("%s", Uncolored(ex.info().msg)); - if (buildResult.status == BuildResult::TimedOut) - worker.timedOut = true; - if (buildResult.status == BuildResult::PermanentFailure) - worker.permanentFailure = true; + buildResult.inner = BuildResult::Failure{ + .status = ex.status, + .errorMsg = fmt("%s", Uncolored(ex.info().msg)), + }; mcExpectedBuilds.reset(); - if (ex.status != BuildResult::DependencyFailed) + if (ex.status == BuildResult::Failure::TimedOut) + worker.timedOut = true; + if (ex.status == BuildResult::Failure::PermanentFailure) + worker.permanentFailure = true; + if (ex.status != BuildResult::Failure::DependencyFailed) worker.failedBuilds++; worker.updateProgress(); diff --git a/src/libstore/build/derivation-resolution-goal.cc b/src/libstore/build/derivation-resolution-goal.cc new file mode 100644 index 000000000..6cb9702f4 --- /dev/null +++ b/src/libstore/build/derivation-resolution-goal.cc @@ -0,0 +1,191 @@ +#include "nix/store/build/derivation-resolution-goal.hh" +#include "nix/store/build/worker.hh" +#include "nix/util/util.hh" + +#include + +namespace nix { + +DerivationResolutionGoal::DerivationResolutionGoal( + const StorePath & drvPath, const Derivation & drv, Worker & worker, BuildMode buildMode) + : Goal(worker, resolveDerivation()) + , drvPath(drvPath) + , drv{std::make_unique(drv)} + , buildMode{buildMode} +{ + name = fmt("resolving derivation '%s'", worker.store.printStorePath(drvPath)); + trace("created"); +} + +std::string DerivationResolutionGoal::key() +{ + return "dc$" + std::string(drvPath.name()) + "$" + worker.store.printStorePath(drvPath); +} + +/** + * Used for `inputGoals` local variable below + */ +struct value_comparison +{ + template + bool operator()(const ref & lhs, const ref & rhs) const + { + return *lhs < *rhs; + } +}; + +Goal::Co DerivationResolutionGoal::resolveDerivation() +{ + Goals waitees; + + std::map, GoalPtr, value_comparison> inputGoals; + + { + std::function, const DerivedPathMap::ChildNode &)> + addWaiteeDerivedPath; + + addWaiteeDerivedPath = [&](ref inputDrv, + const DerivedPathMap::ChildNode & inputNode) { + if (!inputNode.value.empty()) { + auto g = worker.makeGoal( + DerivedPath::Built{ + .drvPath = inputDrv, + .outputs = inputNode.value, + }, + buildMode == bmRepair ? bmRepair : bmNormal); + inputGoals.insert_or_assign(inputDrv, g); + waitees.insert(std::move(g)); + } + for (const auto & [outputName, childNode] : inputNode.childMap) + addWaiteeDerivedPath( + make_ref(SingleDerivedPath::Built{inputDrv, outputName}), childNode); + }; + + for (const auto & [inputDrvPath, inputNode] : drv->inputDrvs.map) { + /* Ensure that pure, non-fixed-output derivations don't + depend on impure derivations. */ + if (experimentalFeatureSettings.isEnabled(Xp::ImpureDerivations) && !drv->type().isImpure() + && !drv->type().isFixed()) { + auto inputDrv = worker.evalStore.readDerivation(inputDrvPath); + if (inputDrv.type().isImpure()) + throw Error( + "pure derivation '%s' depends on impure derivation '%s'", + worker.store.printStorePath(drvPath), + worker.store.printStorePath(inputDrvPath)); + } + + addWaiteeDerivedPath(makeConstantStorePathRef(inputDrvPath), inputNode); + } + } + + co_await await(std::move(waitees)); + + trace("all inputs realised"); + + if (nrFailed != 0) { + auto msg = + fmt("Cannot build '%s'.\n" + "Reason: " ANSI_RED "%d %s failed" ANSI_NORMAL ".", + Magenta(worker.store.printStorePath(drvPath)), + nrFailed, + nrFailed == 1 ? "dependency" : "dependencies"); + msg += showKnownOutputs(worker.store, *drv); + co_return amDone(ecFailed, {BuildError(BuildResult::Failure::DependencyFailed, msg)}); + } + + /* Gather information necessary for computing the closure and/or + running the build hook. */ + + /* Determine the full set of input paths. */ + + /* First, the input derivations. */ + { + auto & fullDrv = *drv; + + auto drvType = fullDrv.type(); + bool resolveDrv = + std::visit( + overloaded{ + [&](const DerivationType::InputAddressed & ia) { + /* must resolve if deferred. */ + return ia.deferred; + }, + [&](const DerivationType::ContentAddressed & ca) { + return !fullDrv.inputDrvs.map.empty() + && (ca.fixed + /* Can optionally resolve if fixed, which is good + for avoiding unnecessary rebuilds. */ + ? experimentalFeatureSettings.isEnabled(Xp::CaDerivations) + /* Must resolve if floating and there are any inputs + drvs. */ + : true); + }, + [&](const DerivationType::Impure &) { return true; }}, + drvType.raw) + /* no inputs are outputs of dynamic derivations */ + || std::ranges::any_of(fullDrv.inputDrvs.map.begin(), fullDrv.inputDrvs.map.end(), [](auto & pair) { + return !pair.second.childMap.empty(); + }); + + if (resolveDrv && !fullDrv.inputDrvs.map.empty()) { + experimentalFeatureSettings.require(Xp::CaDerivations); + + /* We are be able to resolve this derivation based on the + now-known results of dependencies. If so, we become a + stub goal aliasing that resolved derivation goal. */ + std::optional attempt = fullDrv.tryResolve( + worker.store, + [&](ref drvPath, const std::string & outputName) -> std::optional { + auto mEntry = get(inputGoals, drvPath); + if (!mEntry) + return std::nullopt; + + auto & buildResult = (*mEntry)->buildResult; + return std::visit( + overloaded{ + [](const BuildResult::Failure &) -> std::optional { return std::nullopt; }, + [&](const BuildResult::Success & success) -> std::optional { + auto i = get(success.builtOutputs, outputName); + if (!i) + return std::nullopt; + + return i->outPath; + }, + }, + buildResult.inner); + }); + if (!attempt) { + /* TODO (impure derivations-induced tech debt) (see below): + The above attempt should have found it, but because we manage + inputDrvOutputs statefully, sometimes it gets out of sync with + the real source of truth (store). So we query the store + directly if there's a problem. */ + attempt = fullDrv.tryResolve(worker.store, &worker.evalStore); + } + assert(attempt); + + auto pathResolved = writeDerivation(worker.store, *attempt, NoRepair, /*readOnly =*/true); + + auto msg = + fmt("resolved derivation: '%s' -> '%s'", + worker.store.printStorePath(drvPath), + worker.store.printStorePath(pathResolved)); + act = std::make_unique( + *logger, + lvlInfo, + actBuildWaiting, + msg, + Logger::Fields{ + worker.store.printStorePath(drvPath), + worker.store.printStorePath(pathResolved), + }); + + resolvedDrv = + std::make_unique>(std::move(pathResolved), *std::move(attempt)); + } + } + + co_return amDone(ecSuccess, std::nullopt); +} + +} // namespace nix diff --git a/src/libstore/build/derivation-trampoline-goal.cc b/src/libstore/build/derivation-trampoline-goal.cc index 5038a4ea0..963156aa5 100644 --- a/src/libstore/build/derivation-trampoline-goal.cc +++ b/src/libstore/build/derivation-trampoline-goal.cc @@ -31,7 +31,7 @@ DerivationTrampolineGoal::DerivationTrampolineGoal( void DerivationTrampolineGoal::commonInit() { name = - fmt("outer obtaining drv from '%s' and then building outputs %s", + fmt("obtaining derivation from '%s' and then building outputs %s", drvReq->to_string(worker.store), std::visit( overloaded{ @@ -58,18 +58,12 @@ static StorePath pathPartOfReq(const SingleDerivedPath & req) std::string DerivationTrampolineGoal::key() { - /* Ensure that derivations get built in order of their name, - i.e. a derivation named "aardvark" always comes before "baboon". And - substitution goals, derivation goals, and derivation building goals always happen before - derivation goals (due to "bt$"). */ - return "bt$" + std::string(pathPartOfReq(*drvReq).name()) + "$" + DerivedPath::Built{ + return "da$" + std::string(pathPartOfReq(*drvReq).name()) + "$" + DerivedPath::Built{ .drvPath = drvReq, .outputs = wantedOutputs, }.to_string(worker.store); } -void DerivationTrampolineGoal::timedOut(Error && ex) {} - Goal::Co DerivationTrampolineGoal::init() { trace("need to load derivation from file"); @@ -151,7 +145,7 @@ Goal::Co DerivationTrampolineGoal::haveDerivation(StorePath drvPath, Derivation /* Build this step! */ for (auto & output : resolvedWantedOutputs) { - auto g = upcast_goal(worker.makeDerivationGoal(drvPath, drv, output, buildMode)); + auto g = upcast_goal(worker.makeDerivationGoal(drvPath, drv, output, buildMode, false)); g->preserveException = true; /* We will finish with it ourselves, as if we were the derivational goal. */ concreteDrvGoals.insert(std::move(g)); @@ -164,10 +158,11 @@ Goal::Co DerivationTrampolineGoal::haveDerivation(StorePath drvPath, Derivation auto & g = *concreteDrvGoals.begin(); buildResult = g->buildResult; - for (auto & g2 : concreteDrvGoals) { - for (auto && [x, y] : g2->buildResult.builtOutputs) - buildResult.builtOutputs.insert_or_assign(x, y); - } + if (auto * successP = buildResult.tryGetSuccess()) + for (auto & g2 : concreteDrvGoals) + if (auto * successP2 = g2->buildResult.tryGetSuccess()) + for (auto && [x, y] : successP2->builtOutputs) + successP->builtOutputs.insert_or_assign(x, y); co_return amDone(g->exitCode, g->ex); } diff --git a/src/libstore/build/drv-output-substitution-goal.cc b/src/libstore/build/drv-output-substitution-goal.cc index b6ace4784..8d0a307be 100644 --- a/src/libstore/build/drv-output-substitution-goal.cc +++ b/src/libstore/build/drv-output-substitution-goal.cc @@ -8,8 +8,7 @@ namespace nix { -DrvOutputSubstitutionGoal::DrvOutputSubstitutionGoal( - const DrvOutput & id, Worker & worker, RepairFlag repair, std::optional ca) +DrvOutputSubstitutionGoal::DrvOutputSubstitutionGoal(const DrvOutput & id, Worker & worker) : Goal(worker, init()) , id(id) { @@ -43,10 +42,10 @@ Goal::Co DrvOutputSubstitutionGoal::init() outPipe->createAsyncPipe(worker.ioport.get()); #endif - auto promise = std::make_shared>>(); + auto promise = std::make_shared>>(); sub->queryRealisation( - id, {[outPipe(outPipe), promise(promise)](std::future> res) { + id, {[outPipe(outPipe), promise(promise)](std::future> res) { try { Finally updateStats([&]() { outPipe->writeSide.close(); }); promise->set_value(res.get()); @@ -75,7 +74,7 @@ Goal::Co DrvOutputSubstitutionGoal::init() * The realisation corresponding to the given output id. * Will be filled once we can get it. */ - std::shared_ptr outputInfo; + std::shared_ptr outputInfo; try { outputInfo = promise->get_future().get(); @@ -113,7 +112,21 @@ Goal::Co DrvOutputSubstitutionGoal::init() if (failed) continue; - co_return realisationFetched(std::move(waitees), outputInfo, sub); + waitees.insert(worker.makePathSubstitutionGoal(outputInfo->outPath)); + + co_await await(std::move(waitees)); + + trace("output path substituted"); + + if (nrFailed > 0) { + debug("The output path of the derivation output '%s' could not be substituted", id.to_string()); + co_return amDone(nrNoSubstituters > 0 ? ecNoSubstituters : ecFailed); + } + + worker.store.registerDrvOutput({*outputInfo, id}); + + trace("finished"); + co_return amDone(ecSuccess); } /* None left. Terminate this goal and let someone else deal @@ -131,30 +144,8 @@ Goal::Co DrvOutputSubstitutionGoal::init() co_return amDone(substituterFailed ? ecFailed : ecNoSubstituters); } -Goal::Co DrvOutputSubstitutionGoal::realisationFetched( - Goals waitees, std::shared_ptr outputInfo, nix::ref sub) -{ - waitees.insert(worker.makePathSubstitutionGoal(outputInfo->outPath)); - - co_await await(std::move(waitees)); - - trace("output path substituted"); - - if (nrFailed > 0) { - debug("The output path of the derivation output '%s' could not be substituted", id.to_string()); - co_return amDone(nrNoSubstituters > 0 ? ecNoSubstituters : ecFailed); - } - - worker.store.registerDrvOutput(*outputInfo); - - trace("finished"); - co_return amDone(ecSuccess); -} - std::string DrvOutputSubstitutionGoal::key() { - /* "a$" ensures substitution goals happen before derivation - goals. */ return "a$" + std::string(id.to_string()); } diff --git a/src/libstore/build/entry-points.cc b/src/libstore/build/entry-points.cc index 1dd540265..4bbd4c8f0 100644 --- a/src/libstore/build/entry-points.cc +++ b/src/libstore/build/entry-points.cc @@ -82,10 +82,10 @@ BuildResult Store::buildDerivation(const StorePath & drvPath, const BasicDerivat worker.run(Goals{goal}); return goal->buildResult; } catch (Error & e) { - return BuildResult{ - .status = BuildResult::MiscFailure, + return BuildResult{.inner{BuildResult::Failure{ + .status = BuildResult::Failure::MiscFailure, .errorMsg = e.msg(), - }; + }}}; }; } diff --git a/src/libstore/build/substitution-goal.cc b/src/libstore/build/substitution-goal.cc index d219834f2..ac18de304 100644 --- a/src/libstore/build/substitution-goal.cc +++ b/src/libstore/build/substitution-goal.cc @@ -27,13 +27,21 @@ PathSubstitutionGoal::~PathSubstitutionGoal() cleanup(); } -Goal::Done PathSubstitutionGoal::done(ExitCode result, BuildResult::Status status, std::optional errorMsg) +Goal::Done PathSubstitutionGoal::doneSuccess(BuildResult::Success::Status status) { - buildResult.status = status; - if (errorMsg) { - debug(*errorMsg); - buildResult.errorMsg = *errorMsg; - } + buildResult.inner = BuildResult::Success{ + .status = status, + }; + return amDone(ecSuccess); +} + +Goal::Done PathSubstitutionGoal::doneFailure(ExitCode result, BuildResult::Failure::Status status, std::string errorMsg) +{ + debug(errorMsg); + buildResult.inner = BuildResult::Failure{ + .status = status, + .errorMsg = std::move(errorMsg), + }; return amDone(result); } @@ -45,7 +53,7 @@ Goal::Co PathSubstitutionGoal::init() /* If the path already exists we're done. */ if (!repair && worker.store.isValidPath(storePath)) { - co_return done(ecSuccess, BuildResult::AlreadyValid); + co_return doneSuccess(BuildResult::Success::AlreadyValid); } if (settings.readOnlyMode) @@ -165,9 +173,9 @@ Goal::Co PathSubstitutionGoal::init() /* Hack: don't indicate failure if there were no substituters. In that case the calling derivation should just do a build. */ - co_return done( + co_return doneFailure( substituterFailed ? ecFailed : ecNoSubstituters, - BuildResult::NoSubstituters, + BuildResult::Failure::NoSubstituters, fmt("path '%s' is required, but there is no substituter that can build it", worker.store.printStorePath(storePath))); } @@ -178,9 +186,9 @@ Goal::Co PathSubstitutionGoal::tryToRun( trace("all references realised"); if (nrFailed > 0) { - co_return done( + co_return doneFailure( nrNoSubstituters > 0 ? ecNoSubstituters : ecFailed, - BuildResult::DependencyFailed, + BuildResult::Failure::DependencyFailed, fmt("some references of path '%s' could not be realised", worker.store.printStorePath(storePath))); } @@ -260,16 +268,18 @@ Goal::Co PathSubstitutionGoal::tryToRun( try { promise.get_future().get(); } catch (std::exception & e) { - printError(e.what()); - /* Cause the parent build to fail unless --fallback is given, or the substitute has disappeared. The latter case behaves the same as the substitute never having existed in the first place. */ try { throw; - } catch (SubstituteGone &) { + } catch (SubstituteGone & sg) { + /* Missing NARs are expected when they've been garbage collected. + This is not a failure, so log as a warning instead of an error. */ + logWarning({.msg = sg.info().msg}); } catch (...) { + printError(e.what()); substituterFailed = true; } @@ -297,7 +307,7 @@ Goal::Co PathSubstitutionGoal::tryToRun( worker.updateProgress(); - co_return done(ecSuccess, BuildResult::Substituted); + co_return doneSuccess(BuildResult::Success::Substituted); } void PathSubstitutionGoal::handleEOF(Descriptor fd) diff --git a/src/libstore/build/worker.cc b/src/libstore/build/worker.cc index 3e6e0bef0..3663a2c91 100644 --- a/src/libstore/build/worker.cc +++ b/src/libstore/build/worker.cc @@ -4,6 +4,7 @@ #include "nix/store/build/substitution-goal.hh" #include "nix/store/build/drv-output-substitution-goal.hh" #include "nix/store/build/derivation-goal.hh" +#include "nix/store/build/derivation-resolution-goal.hh" #include "nix/store/build/derivation-building-goal.hh" #include "nix/store/build/derivation-trampoline-goal.hh" #ifndef _WIN32 // TODO Enable building on Windows @@ -75,15 +76,26 @@ std::shared_ptr Worker::makeDerivationTrampolineGoal( } std::shared_ptr Worker::makeDerivationGoal( - const StorePath & drvPath, const Derivation & drv, const OutputName & wantedOutput, BuildMode buildMode) + const StorePath & drvPath, + const Derivation & drv, + const OutputName & wantedOutput, + BuildMode buildMode, + bool storeDerivation) { - return initGoalIfNeeded(derivationGoals[drvPath][wantedOutput], drvPath, drv, wantedOutput, *this, buildMode); + return initGoalIfNeeded( + derivationGoals[drvPath][wantedOutput], drvPath, drv, wantedOutput, *this, buildMode, storeDerivation); } -std::shared_ptr -Worker::makeDerivationBuildingGoal(const StorePath & drvPath, const Derivation & drv, BuildMode buildMode) +std::shared_ptr +Worker::makeDerivationResolutionGoal(const StorePath & drvPath, const Derivation & drv, BuildMode buildMode) { - return initGoalIfNeeded(derivationBuildingGoals[drvPath], drvPath, drv, *this, buildMode); + return initGoalIfNeeded(derivationResolutionGoals[drvPath], drvPath, drv, *this, buildMode); +} + +std::shared_ptr Worker::makeDerivationBuildingGoal( + const StorePath & drvPath, const Derivation & drv, BuildMode buildMode, bool storeDerivation) +{ + return initGoalIfNeeded(derivationBuildingGoals[drvPath], drvPath, drv, *this, buildMode, storeDerivation); } std::shared_ptr @@ -92,10 +104,9 @@ Worker::makePathSubstitutionGoal(const StorePath & path, RepairFlag repair, std: return initGoalIfNeeded(substitutionGoals[path], path, *this, repair, ca); } -std::shared_ptr -Worker::makeDrvOutputSubstitutionGoal(const DrvOutput & id, RepairFlag repair, std::optional ca) +std::shared_ptr Worker::makeDrvOutputSubstitutionGoal(const DrvOutput & id) { - return initGoalIfNeeded(drvOutputSubstitutionGoals[id], id, *this, repair, ca); + return initGoalIfNeeded(drvOutputSubstitutionGoals[id], id, *this); } GoalPtr Worker::makeGoal(const DerivedPath & req, BuildMode buildMode) @@ -158,6 +169,8 @@ void Worker::removeGoal(GoalPtr goal) nix::removeGoal(drvGoal, derivationTrampolineGoals.map); else if (auto drvGoal = std::dynamic_pointer_cast(goal)) nix::removeGoal(drvGoal, derivationGoals); + else if (auto drvResolutionGoal = std::dynamic_pointer_cast(goal)) + nix::removeGoal(drvResolutionGoal, derivationResolutionGoals); else if (auto drvBuildingGoal = std::dynamic_pointer_cast(goal)) nix::removeGoal(drvBuildingGoal, derivationBuildingGoals); else if (auto subGoal = std::dynamic_pointer_cast(goal)) @@ -515,15 +528,9 @@ bool Worker::pathContentsGood(const StorePath & path) return i->second; printInfo("checking path '%s'...", store.printStorePath(path)); auto info = store.queryPathInfo(path); - bool res; - if (!pathExists(store.printStorePath(path))) - res = false; - else { - auto current = hashPath( - {store.getFSAccessor(), CanonPath(path.to_string())}, - FileIngestionMethod::NixArchive, - info->narHash.algo) - .first; + bool res = false; + if (auto accessor = store.getFSAccessor(path, /*requireValidPath=*/false)) { + auto current = hashPath({ref{accessor}}, FileIngestionMethod::NixArchive, info->narHash.algo).first; Hash nullHash(HashAlgorithm::SHA256); res = info->narHash == nullHash || info->narHash == current; } diff --git a/src/libstore/builtins/fetchurl.cc b/src/libstore/builtins/fetchurl.cc index 7abfa4495..126fb922e 100644 --- a/src/libstore/builtins/fetchurl.cc +++ b/src/libstore/builtins/fetchurl.cc @@ -33,13 +33,26 @@ static void builtinFetchurl(const BuiltinBuilderContext & ctx) /* Note: have to use a fresh fileTransfer here because we're in a forked process. */ + debug("[pid=%d] builtin:fetchurl creating fresh FileTransfer instance", getpid()); auto fileTransfer = makeFileTransfer(); auto fetch = [&](const std::string & url) { auto source = sinkToSource([&](Sink & sink) { - FileTransferRequest request(ValidURL{url}); + FileTransferRequest request(VerbatimURL{url}); request.decompress = false; +#if NIX_WITH_AWS_AUTH + // Use pre-resolved credentials if available + if (ctx.awsCredentials && request.uri.scheme() == "s3") { + debug("[pid=%d] Using pre-resolved AWS credentials from parent process", getpid()); + request.usernameAuth = UsernameAuth{ + .username = ctx.awsCredentials->accessKeyId, + .password = ctx.awsCredentials->secretAccessKey, + }; + request.preResolvedAwsSessionToken = ctx.awsCredentials->sessionToken; + } +#endif + auto decompressor = makeDecompressionSink(unpack && hasSuffix(mainUrl, ".xz") ? "xz" : "none", sink); fileTransfer->download(std::move(request), *decompressor); decompressor->finish(); diff --git a/src/libstore/content-address.cc b/src/libstore/content-address.cc index 9a57e3aa6..497c2c5b4 100644 --- a/src/libstore/content-address.cc +++ b/src/libstore/content-address.cc @@ -1,6 +1,7 @@ #include "nix/util/args.hh" #include "nix/store/content-address.hh" #include "nix/util/split.hh" +#include "nix/util/json-utils.hh" namespace nix { @@ -300,3 +301,36 @@ Hash ContentAddressWithReferences::getHash() const } } // namespace nix + +namespace nlohmann { + +using namespace nix; + +ContentAddressMethod adl_serializer::from_json(const json & json) +{ + return ContentAddressMethod::parse(getString(json)); +} + +void adl_serializer::to_json(json & json, const ContentAddressMethod & m) +{ + json = m.render(); +} + +ContentAddress adl_serializer::from_json(const json & json) +{ + auto obj = getObject(json); + return { + .method = adl_serializer::from_json(valueAt(obj, "method")), + .hash = valueAt(obj, "hash"), + }; +} + +void adl_serializer::to_json(json & json, const ContentAddress & ca) +{ + json = { + {"method", ca.method}, + {"hash", ca.hash}, + }; +} + +} // namespace nlohmann diff --git a/src/libstore/daemon.cc b/src/libstore/daemon.cc index 2bd0698a0..e6efd6c09 100644 --- a/src/libstore/daemon.cc +++ b/src/libstore/daemon.cc @@ -102,7 +102,7 @@ struct TunnelLogger : public Logger showErrorInfo(oss, ei, false); StringSink buf; - buf << STDERR_NEXT << toView(oss); + buf << STDERR_NEXT << oss.view(); enqueueMsg(buf.s); } @@ -312,7 +312,7 @@ static void performOp( switch (op) { case WorkerProto::Op::IsValidPath: { - auto path = store->parseStorePath(readString(conn.from)); + auto path = WorkerProto::Serialise::read(*store, rconn); logger->startWork(); bool result = store->isValidPath(path); logger->stopWork(); @@ -339,7 +339,7 @@ static void performOp( } case WorkerProto::Op::HasSubstitutes: { - auto path = store->parseStorePath(readString(conn.from)); + auto path = WorkerProto::Serialise::read(*store, rconn); logger->startWork(); StorePathSet paths; // FIXME paths.insert(path); @@ -359,7 +359,7 @@ static void performOp( } case WorkerProto::Op::QueryPathHash: { - auto path = store->parseStorePath(readString(conn.from)); + auto path = WorkerProto::Serialise::read(*store, rconn); logger->startWork(); auto hash = store->queryPathInfo(path)->narHash; logger->stopWork(); @@ -371,7 +371,7 @@ static void performOp( case WorkerProto::Op::QueryReferrers: case WorkerProto::Op::QueryValidDerivers: case WorkerProto::Op::QueryDerivationOutputs: { - auto path = store->parseStorePath(readString(conn.from)); + auto path = WorkerProto::Serialise::read(*store, rconn); logger->startWork(); StorePathSet paths; if (op == WorkerProto::Op::QueryReferences) @@ -389,7 +389,7 @@ static void performOp( } case WorkerProto::Op::QueryDerivationOutputNames: { - auto path = store->parseStorePath(readString(conn.from)); + auto path = WorkerProto::Serialise::read(*store, rconn); logger->startWork(); auto names = store->readDerivation(path).outputNames(); logger->stopWork(); @@ -398,7 +398,7 @@ static void performOp( } case WorkerProto::Op::QueryDerivationOutputMap: { - auto path = store->parseStorePath(readString(conn.from)); + auto path = WorkerProto::Serialise::read(*store, rconn); logger->startWork(); auto outputs = store->queryPartialDerivationOutputMap(path); logger->stopWork(); @@ -407,11 +407,11 @@ static void performOp( } case WorkerProto::Op::QueryDeriver: { - auto path = store->parseStorePath(readString(conn.from)); + auto path = WorkerProto::Serialise::read(*store, rconn); logger->startWork(); auto info = store->queryPathInfo(path); logger->stopWork(); - conn.to << (info->deriver ? store->printStorePath(*info->deriver) : ""); + WorkerProto::write(*store, conn, info->deriver); break; } @@ -420,7 +420,7 @@ static void performOp( logger->startWork(); auto path = store->queryPathFromHashPart(hashPart); logger->stopWork(); - conn.to << (path ? store->printStorePath(*path) : ""); + WorkerProto::write(*store, conn, path); break; } @@ -505,7 +505,7 @@ static void performOp( store->addToStoreFromDump(*dumpSource, baseName, FileSerialisationMethod::NixArchive, method, hashAlgo); logger->stopWork(); - conn.to << store->printStorePath(path); + WorkerProto::write(*store, wconn, path); } break; } @@ -542,7 +542,7 @@ static void performOp( NoRepair); }); logger->stopWork(); - conn.to << store->printStorePath(path); + WorkerProto::write(*store, wconn, path); break; } @@ -591,7 +591,7 @@ static void performOp( } case WorkerProto::Op::BuildDerivation: { - auto drvPath = store->parseStorePath(readString(conn.from)); + auto drvPath = WorkerProto::Serialise::read(*store, rconn); BasicDerivation drv; /* * Note: unlike wopEnsurePath, this operation reads a @@ -668,7 +668,7 @@ static void performOp( } case WorkerProto::Op::EnsurePath: { - auto path = store->parseStorePath(readString(conn.from)); + auto path = WorkerProto::Serialise::read(*store, rconn); logger->startWork(); store->ensurePath(path); logger->stopWork(); @@ -677,7 +677,7 @@ static void performOp( } case WorkerProto::Op::AddTempRoot: { - auto path = store->parseStorePath(readString(conn.from)); + auto path = WorkerProto::Serialise::read(*store, rconn); logger->startWork(); store->addTempRoot(path); logger->stopWork(); @@ -733,8 +733,10 @@ static void performOp( conn.to << size; for (auto & [target, links] : roots) - for (auto & link : links) - conn.to << link << store->printStorePath(target); + for (auto & link : links) { + conn.to << link; + WorkerProto::write(*store, wconn, target); + } break; } @@ -799,7 +801,7 @@ static void performOp( } case WorkerProto::Op::QuerySubstitutablePathInfo: { - auto path = store->parseStorePath(readString(conn.from)); + auto path = WorkerProto::Serialise::read(*store, rconn); logger->startWork(); SubstitutablePathInfos infos; store->querySubstitutablePathInfos({{path, std::nullopt}}, infos); @@ -808,7 +810,8 @@ static void performOp( if (i == infos.end()) conn.to << 0; else { - conn.to << 1 << (i->second.deriver ? store->printStorePath(*i->second.deriver) : ""); + conn.to << 1; + WorkerProto::write(*store, wconn, i->second.deriver); WorkerProto::write(*store, wconn, i->second.references); conn.to << i->second.downloadSize << i->second.narSize; } @@ -829,8 +832,8 @@ static void performOp( logger->stopWork(); conn.to << infos.size(); for (auto & i : infos) { - conn.to << store->printStorePath(i.first) - << (i.second.deriver ? store->printStorePath(*i.second.deriver) : ""); + WorkerProto::write(*store, wconn, i.first); + WorkerProto::write(*store, wconn, i.second.deriver); WorkerProto::write(*store, wconn, i.second.references); conn.to << i.second.downloadSize << i.second.narSize; } @@ -846,7 +849,7 @@ static void performOp( } case WorkerProto::Op::QueryPathInfo: { - auto path = store->parseStorePath(readString(conn.from)); + auto path = WorkerProto::Serialise::read(*store, rconn); std::shared_ptr info; logger->startWork(); info = store->queryPathInfo(path); @@ -880,7 +883,7 @@ static void performOp( } case WorkerProto::Op::AddSignatures: { - auto path = store->parseStorePath(readString(conn.from)); + auto path = WorkerProto::Serialise::read(*store, rconn); StringSet sigs = readStrings(conn.from); logger->startWork(); store->addSignatures(path, sigs); @@ -890,7 +893,7 @@ static void performOp( } case WorkerProto::Op::NarFromPath: { - auto path = store->parseStorePath(readString(conn.from)); + auto path = WorkerProto::Serialise::read(*store, rconn); logger->startWork(); logger->stopWork(); dumpPath(store->toRealPath(path), conn.to); @@ -899,12 +902,11 @@ static void performOp( case WorkerProto::Op::AddToStoreNar: { bool repair, dontCheckSigs; - auto path = store->parseStorePath(readString(conn.from)); - auto deriver = readString(conn.from); + auto path = WorkerProto::Serialise::read(*store, rconn); + auto deriver = WorkerProto::Serialise>::read(*store, rconn); auto narHash = Hash::parseAny(readString(conn.from), HashAlgorithm::SHA256); ValidPathInfo info{path, narHash}; - if (deriver != "") - info.deriver = store->parseStorePath(deriver); + info.deriver = std::move(deriver); info.references = WorkerProto::Serialise::read(*store, rconn); conn.from >> info.registrationTime >> info.narSize >> info.ultimate; info.sigs = readStrings(conn.from); @@ -964,7 +966,7 @@ static void performOp( if (GET_PROTOCOL_MINOR(conn.protoVersion) < 31) { auto outputId = DrvOutput::parse(readString(conn.from)); auto outputPath = StorePath(readString(conn.from)); - store->registerDrvOutput(Realisation{.id = outputId, .outPath = outputPath}); + store->registerDrvOutput(Realisation{{.outPath = outputPath}, outputId}); } else { auto realisation = WorkerProto::Serialise::read(*store, rconn); store->registerDrvOutput(realisation); @@ -986,7 +988,7 @@ static void performOp( } else { std::set realisations; if (info) - realisations.insert(*info); + realisations.insert({*info, outputId}); WorkerProto::write(*store, wconn, realisations); } break; @@ -1029,7 +1031,7 @@ void processConnection(ref store, FdSource && from, FdSink && to, Trusted auto [protoVersion, features] = WorkerProto::BasicServerConnection::handshake(to, from, PROTOCOL_VERSION, WorkerProto::allFeatures); - if (protoVersion < 256 + 18) + if (protoVersion < MINIMUM_PROTOCOL_VERSION) throw Error("the Nix client version is too old"); WorkerProto::BasicServerConnection conn; diff --git a/src/libstore/derivation-options.cc b/src/libstore/derivation-options.cc index 4cb9bf726..bd9704b44 100644 --- a/src/libstore/derivation-options.cc +++ b/src/libstore/derivation-options.cc @@ -22,9 +22,9 @@ getStringAttr(const StringMap & env, const StructuredAttrs * parsed, const std:: if (i == parsed->structuredAttrs.end()) return {}; else { - if (!i->is_string()) + if (!i->second.is_string()) throw Error("attribute '%s' of must be a string", name); - return i->get(); + return i->second.get(); } } else { auto i = env.find(name); @@ -42,9 +42,9 @@ static bool getBoolAttr(const StringMap & env, const StructuredAttrs * parsed, c if (i == parsed->structuredAttrs.end()) return def; else { - if (!i->is_boolean()) + if (!i->second.is_boolean()) throw Error("attribute '%s' must be a Boolean", name); - return i->get(); + return i->second.get(); } } else { auto i = env.find(name); @@ -63,10 +63,11 @@ getStringsAttr(const StringMap & env, const StructuredAttrs * parsed, const std: if (i == parsed->structuredAttrs.end()) return {}; else { - if (!i->is_array()) + if (!i->second.is_array()) throw Error("attribute '%s' must be a list of strings", name); + auto & a = getArray(i->second); Strings res; - for (auto j = i->begin(); j != i->end(); ++j) { + for (auto j = a.begin(); j != a.end(); ++j) { if (!j->is_string()) throw Error("attribute '%s' must be a list of strings", name); res.push_back(j->get()); @@ -99,33 +100,46 @@ DerivationOptions DerivationOptions::fromStructuredAttrs( return fromStructuredAttrs(env, parsed ? &*parsed : nullptr); } +static void flatten(const nlohmann::json & value, StringSet & res) +{ + if (value.is_array()) + for (auto & v : value) + flatten(v, res); + else if (value.is_string()) + res.insert(value); + else + throw Error("'exportReferencesGraph' value is not an array or a string"); +} + DerivationOptions DerivationOptions::fromStructuredAttrs(const StringMap & env, const StructuredAttrs * parsed, bool shouldWarn) { DerivationOptions defaults = {}; if (shouldWarn && parsed) { - if (get(parsed->structuredAttrs, "allowedReferences")) { + auto & structuredAttrs = parsed->structuredAttrs; + + if (get(structuredAttrs, "allowedReferences")) { warn( "'structuredAttrs' disables the effect of the top-level attribute 'allowedReferences'; use 'outputChecks' instead"); } - if (get(parsed->structuredAttrs, "allowedRequisites")) { + if (get(structuredAttrs, "allowedRequisites")) { warn( "'structuredAttrs' disables the effect of the top-level attribute 'allowedRequisites'; use 'outputChecks' instead"); } - if (get(parsed->structuredAttrs, "disallowedRequisites")) { + if (get(structuredAttrs, "disallowedRequisites")) { warn( "'structuredAttrs' disables the effect of the top-level attribute 'disallowedRequisites'; use 'outputChecks' instead"); } - if (get(parsed->structuredAttrs, "disallowedReferences")) { + if (get(structuredAttrs, "disallowedReferences")) { warn( "'structuredAttrs' disables the effect of the top-level attribute 'disallowedReferences'; use 'outputChecks' instead"); } - if (get(parsed->structuredAttrs, "maxSize")) { + if (get(structuredAttrs, "maxSize")) { warn( "'structuredAttrs' disables the effect of the top-level attribute 'maxSize'; use 'outputChecks' instead"); } - if (get(parsed->structuredAttrs, "maxClosureSize")) { + if (get(structuredAttrs, "maxClosureSize")) { warn( "'structuredAttrs' disables the effect of the top-level attribute 'maxClosureSize'; use 'outputChecks' instead"); } @@ -134,11 +148,15 @@ DerivationOptions::fromStructuredAttrs(const StringMap & env, const StructuredAt return { .outputChecks = [&]() -> OutputChecksVariant { if (parsed) { + auto & structuredAttrs = parsed->structuredAttrs; + std::map res; - if (auto outputChecks = get(parsed->structuredAttrs, "outputChecks")) { - for (auto & [outputName, output] : getObject(*outputChecks)) { + if (auto * outputChecks = get(structuredAttrs, "outputChecks")) { + for (auto & [outputName, output_] : getObject(*outputChecks)) { OutputChecks checks; + auto & output = getObject(output_); + if (auto maxSize = get(output, "maxSize")) checks.maxSize = maxSize->get(); @@ -184,7 +202,9 @@ DerivationOptions::fromStructuredAttrs(const StringMap & env, const StructuredAt std::map res; if (parsed) { - if (auto udr = get(parsed->structuredAttrs, "unsafeDiscardReferences")) { + auto & structuredAttrs = parsed->structuredAttrs; + + if (auto * udr = get(structuredAttrs, "unsafeDiscardReferences")) { for (auto & [outputName, output] : getObject(*udr)) { if (!output.is_boolean()) throw Error("attribute 'unsafeDiscardReferences.\"%s\"' must be a Boolean", outputName); @@ -215,16 +235,13 @@ DerivationOptions::fromStructuredAttrs(const StringMap & env, const StructuredAt std::map ret; if (parsed) { - auto e = optionalValueAt(parsed->structuredAttrs, "exportReferencesGraph"); + auto * e = optionalValueAt(parsed->structuredAttrs, "exportReferencesGraph"); if (!e || !e->is_object()) return ret; for (auto & [key, value] : getObject(*e)) { - if (value.is_array()) - ret.insert_or_assign(key, value); - else if (value.is_string()) - ret.insert_or_assign(key, StringSet{value}); - else - throw Error("'exportReferencesGraph' value is not an array or a string"); + StringSet ss; + flatten(value, ss); + ret.insert_or_assign(key, std::move(ss)); } } else { auto s = getOr(env, "exportReferencesGraph", ""); @@ -266,7 +283,9 @@ DerivationOptions::getParsedExportReferencesGraph(const StoreDirConfig & store) for (auto & storePathS : ss) { if (!store.isInStore(storePathS)) throw BuildError( - BuildResult::InputRejected, "'exportReferencesGraph' contains a non-store path '%1%'", storePathS); + BuildResult::Failure::InputRejected, + "'exportReferencesGraph' contains a non-store path '%1%'", + storePathS); storePaths.insert(store.toStorePath(storePathS).first); } res.insert_or_assign(fileName, storePaths); @@ -323,8 +342,10 @@ namespace nlohmann { using namespace nix; -DerivationOptions adl_serializer::from_json(const json & json) +DerivationOptions adl_serializer::from_json(const json & json_) { + auto & json = getObject(json_); + return { .outputChecks = [&]() -> OutputChecksVariant { auto outputChecks = getObject(valueAt(json, "outputChecks")); @@ -387,13 +408,24 @@ void adl_serializer::to_json(json & json, const DerivationOpt json["allowSubstitutes"] = o.allowSubstitutes; } -DerivationOptions::OutputChecks adl_serializer::from_json(const json & json) +template +static inline std::optional ptrToOwned(const json * ptr) { + if (ptr) + return std::optional{*ptr}; + else + return std::nullopt; +} + +DerivationOptions::OutputChecks adl_serializer::from_json(const json & json_) +{ + auto & json = getObject(json_); + return { .ignoreSelfRefs = getBoolean(valueAt(json, "ignoreSelfRefs")), - .allowedReferences = nullableValueAt(json, "allowedReferences"), + .allowedReferences = ptrToOwned(getNullable(valueAt(json, "allowedReferences"))), .disallowedReferences = getStringSet(valueAt(json, "disallowedReferences")), - .allowedRequisites = nullableValueAt(json, "allowedRequisites"), + .allowedRequisites = ptrToOwned(getNullable(valueAt(json, "allowedRequisites"))), .disallowedRequisites = getStringSet(valueAt(json, "disallowedRequisites")), }; } diff --git a/src/libstore/derivations.cc b/src/libstore/derivations.cc index 6d7dbc99c..b7de615fb 100644 --- a/src/libstore/derivations.cc +++ b/src/libstore/derivations.cc @@ -115,23 +115,25 @@ StorePath writeDerivation(Store & store, const Derivation & drv, RepairFlag repa held during a garbage collection). */ auto suffix = std::string(drv.name) + drvExtension; auto contents = drv.unparse(store, false); - return readOnly || settings.readOnlyMode ? store.makeFixedOutputPathFromCA( - suffix, - TextInfo{ - .hash = hashString(HashAlgorithm::SHA256, contents), - .references = std::move(references), - }) - : ({ - StringSource s{contents}; - store.addToStoreFromDump( - s, - suffix, - FileSerialisationMethod::Flat, - ContentAddressMethod::Raw::Text, - HashAlgorithm::SHA256, - references, - repair); - }); + auto hash = hashString(HashAlgorithm::SHA256, contents); + auto ca = TextInfo{.hash = hash, .references = references}; + auto path = store.makeFixedOutputPathFromCA(suffix, ca); + + if (readOnly || settings.readOnlyMode || (store.isValidPath(path) && !repair)) + return path; + + StringSource s{contents}; + auto path2 = store.addToStoreFromDump( + s, + suffix, + FileSerialisationMethod::Flat, + ContentAddressMethod::Raw::Text, + HashAlgorithm::SHA256, + references, + repair); + assert(path2 == path); + + return path; } namespace { @@ -288,7 +290,7 @@ static DerivationOutput parseDerivationOutput( if (!hashAlgoStr.empty()) { ContentAddressMethod method = ContentAddressMethod::parsePrefix(hashAlgoStr); if (method == ContentAddressMethod::Raw::Text) - xpSettings.require(Xp::DynamicDerivations); + xpSettings.require(Xp::DynamicDerivations, "text-hashed derivation output"); const auto hashAlgo = parseHashAlgo(hashAlgoStr); if (hashS == "impure"sv) { xpSettings.require(Xp::ImpureDerivations); @@ -426,7 +428,9 @@ Derivation parseDerivation( if (*versionS == "xp-dyn-drv"sv) { // Only version we have so far version = DerivationATermVersion::DynamicDerivations; - xpSettings.require(Xp::DynamicDerivations); + xpSettings.require(Xp::DynamicDerivations, [&] { + return fmt("derivation '%s', ATerm format version 'xp-dyn-drv'", name); + }); } else { throw FormatError("Unknown derivation ATerm format version '%s'", *versionS); } @@ -1257,9 +1261,15 @@ void Derivation::checkInvariants(Store & store, const StorePath & drvPath) const const Hash impureOutputHash = hashString(HashAlgorithm::SHA256, "impure"); -nlohmann::json DerivationOutput::toJSON() const +} // namespace nix + +namespace nlohmann { + +using namespace nix; + +void adl_serializer::to_json(json & res, const DerivationOutput & o) { - nlohmann::json res = nlohmann::json::object(); + res = nlohmann::json::object(); std::visit( overloaded{ [&](const DerivationOutput::InputAddressed & doi) { res["path"] = doi.path; }, @@ -1285,12 +1295,11 @@ nlohmann::json DerivationOutput::toJSON() const res["impure"] = true; }, }, - raw); - return res; + o.raw); } DerivationOutput -DerivationOutput::fromJSON(const nlohmann::json & _json, const ExperimentalFeatureSettings & xpSettings) +adl_serializer::from_json(const json & _json, const ExperimentalFeatureSettings & xpSettings) { std::set keys; auto & json = getObject(_json); @@ -1301,7 +1310,7 @@ DerivationOutput::fromJSON(const nlohmann::json & _json, const ExperimentalFeatu auto methodAlgo = [&]() -> std::pair { ContentAddressMethod method = ContentAddressMethod::parse(getString(valueAt(json, "method"))); if (method == ContentAddressMethod::Raw::Text) - xpSettings.require(Xp::DynamicDerivations); + xpSettings.require(Xp::DynamicDerivations, "text-hashed derivation output in JSON"); auto hashAlgo = parseHashAlgo(getString(valueAt(json, "hashAlgo"))); return {std::move(method), std::move(hashAlgo)}; @@ -1358,18 +1367,18 @@ DerivationOutput::fromJSON(const nlohmann::json & _json, const ExperimentalFeatu } } -nlohmann::json Derivation::toJSON() const +void adl_serializer::to_json(json & res, const Derivation & d) { - nlohmann::json res = nlohmann::json::object(); + res = nlohmann::json::object(); - res["name"] = name; + res["name"] = d.name; res["version"] = 3; { nlohmann::json & outputsObj = res["outputs"]; outputsObj = nlohmann::json::object(); - for (auto & [outputName, output] : outputs) { + for (auto & [outputName, output] : d.outputs) { outputsObj[outputName] = output; } } @@ -1377,13 +1386,12 @@ nlohmann::json Derivation::toJSON() const { auto & inputsList = res["inputSrcs"]; inputsList = nlohmann::json ::array(); - for (auto & input : inputSrcs) + for (auto & input : d.inputSrcs) inputsList.emplace_back(input); } { - std::function::ChildNode &)> doInput; - doInput = [&](const auto & inputNode) { + auto doInput = [&](this const auto & doInput, const auto & inputNode) -> nlohmann::json { auto value = nlohmann::json::object(); value["outputs"] = inputNode.value; { @@ -1397,24 +1405,22 @@ nlohmann::json Derivation::toJSON() const { auto & inputDrvsObj = res["inputDrvs"]; inputDrvsObj = nlohmann::json::object(); - for (auto & [inputDrv, inputNode] : inputDrvs.map) { + for (auto & [inputDrv, inputNode] : d.inputDrvs.map) { inputDrvsObj[inputDrv.to_string()] = doInput(inputNode); } } } - res["system"] = platform; - res["builder"] = builder; - res["args"] = args; - res["env"] = env; + res["system"] = d.platform; + res["builder"] = d.builder; + res["args"] = d.args; + res["env"] = d.env; - if (structuredAttrs) - res["structuredAttrs"] = structuredAttrs->structuredAttrs; - - return res; + if (d.structuredAttrs) + res["structuredAttrs"] = d.structuredAttrs->structuredAttrs; } -Derivation Derivation::fromJSON(const nlohmann::json & _json, const ExperimentalFeatureSettings & xpSettings) +Derivation adl_serializer::from_json(const json & _json, const ExperimentalFeatureSettings & xpSettings) { using nlohmann::detail::value_t; @@ -1430,7 +1436,7 @@ Derivation Derivation::fromJSON(const nlohmann::json & _json, const Experimental try { auto outputs = getObject(valueAt(json, "outputs")); for (auto & [outputName, output] : outputs) { - res.outputs.insert_or_assign(outputName, DerivationOutput::fromJSON(output, xpSettings)); + res.outputs.insert_or_assign(outputName, adl_serializer::from_json(output, xpSettings)); } } catch (Error & e) { e.addTrace({}, "while reading key 'outputs'"); @@ -1447,14 +1453,14 @@ Derivation Derivation::fromJSON(const nlohmann::json & _json, const Experimental } try { - std::function::ChildNode(const nlohmann::json &)> doInput; - doInput = [&](const auto & _json) { + auto doInput = [&](this const auto & doInput, const auto & _json) -> DerivedPathMap::ChildNode { auto & json = getObject(_json); DerivedPathMap::ChildNode node; node.value = getStringSet(valueAt(json, "outputs")); auto drvs = getObject(valueAt(json, "dynamicOutputs")); for (auto & [outputId, childNode] : drvs) { - xpSettings.require(Xp::DynamicDerivations); + xpSettings.require( + Xp::DynamicDerivations, [&] { return fmt("dynamic output '%s' in JSON", outputId); }); node.childMap[outputId] = doInput(childNode); } return node; @@ -1485,30 +1491,4 @@ Derivation Derivation::fromJSON(const nlohmann::json & _json, const Experimental return res; } -} // namespace nix - -namespace nlohmann { - -using namespace nix; - -DerivationOutput adl_serializer::from_json(const json & json) -{ - return DerivationOutput::fromJSON(json); -} - -void adl_serializer::to_json(json & json, const DerivationOutput & c) -{ - json = c.toJSON(); -} - -Derivation adl_serializer::from_json(const json & json) -{ - return Derivation::fromJSON(json); -} - -void adl_serializer::to_json(json & json, const Derivation & c) -{ - json = c.toJSON(); -} - } // namespace nlohmann diff --git a/src/libstore/derived-path.cc b/src/libstore/derived-path.cc index 2cf720b82..251e11251 100644 --- a/src/libstore/derived-path.cc +++ b/src/libstore/derived-path.cc @@ -85,7 +85,11 @@ void drvRequireExperiment(const SingleDerivedPath & drv, const ExperimentalFeatu [&](const SingleDerivedPath::Opaque &) { // plain drv path; no experimental features required. }, - [&](const SingleDerivedPath::Built &) { xpSettings.require(Xp::DynamicDerivations); }, + [&](const SingleDerivedPath::Built & b) { + xpSettings.require(Xp::DynamicDerivations, [&] { + return fmt("building output '%s' of '%s'", b.output, b.drvPath->getBaseStorePath().to_string()); + }); + }, }, drv.raw()); } @@ -248,20 +252,26 @@ void adl_serializer::to_json(json & json, const DerivedPath: }; } -SingleDerivedPath::Built adl_serializer::from_json(const json & json0) +SingleDerivedPath::Built +adl_serializer::from_json(const json & json0, const ExperimentalFeatureSettings & xpSettings) { auto & json = getObject(json0); + auto drvPath = make_ref(static_cast(valueAt(json, "drvPath"))); + drvRequireExperiment(*drvPath, xpSettings); return { - .drvPath = make_ref(static_cast(valueAt(json, "drvPath"))), + .drvPath = std::move(drvPath), .output = getString(valueAt(json, "output")), }; } -DerivedPath::Built adl_serializer::from_json(const json & json0) +DerivedPath::Built +adl_serializer::from_json(const json & json0, const ExperimentalFeatureSettings & xpSettings) { auto & json = getObject(json0); + auto drvPath = make_ref(static_cast(valueAt(json, "drvPath"))); + drvRequireExperiment(*drvPath, xpSettings); return { - .drvPath = make_ref(static_cast(valueAt(json, "drvPath"))), + .drvPath = std::move(drvPath), .outputs = adl_serializer::from_json(valueAt(json, "outputs")), }; } @@ -276,20 +286,21 @@ void adl_serializer::to_json(json & json, const DerivedPath & sdp) std::visit([&](const auto & buildable) { json = buildable; }, sdp.raw()); } -SingleDerivedPath adl_serializer::from_json(const json & json) +SingleDerivedPath +adl_serializer::from_json(const json & json, const ExperimentalFeatureSettings & xpSettings) { if (json.is_string()) return static_cast(json); else - return static_cast(json); + return adl_serializer::from_json(json, xpSettings); } -DerivedPath adl_serializer::from_json(const json & json) +DerivedPath adl_serializer::from_json(const json & json, const ExperimentalFeatureSettings & xpSettings) { if (json.is_string()) return static_cast(json); else - return static_cast(json); + return adl_serializer::from_json(json, xpSettings); } } // namespace nlohmann diff --git a/src/libstore/downstream-placeholder.cc b/src/libstore/downstream-placeholder.cc index b3ac1c8c4..780717a62 100644 --- a/src/libstore/downstream-placeholder.cc +++ b/src/libstore/downstream-placeholder.cc @@ -24,7 +24,8 @@ DownstreamPlaceholder DownstreamPlaceholder::unknownDerivation( OutputNameView outputName, const ExperimentalFeatureSettings & xpSettings) { - xpSettings.require(Xp::DynamicDerivations); + xpSettings.require( + Xp::DynamicDerivations, [&] { return fmt("placeholder for unknown derivation output '%s'", outputName); }); auto compressed = compressHash(placeholder.hash, 20); auto clearText = "nix-computed-output:" + compressed.to_string(HashFormat::Nix32, false) + ":" + std::string{outputName}; diff --git a/src/libstore/dummy-store.cc b/src/libstore/dummy-store.cc index 4b485ca66..1333e0aed 100644 --- a/src/libstore/dummy-store.cc +++ b/src/libstore/dummy-store.cc @@ -2,7 +2,8 @@ #include "nix/util/archive.hh" #include "nix/util/callback.hh" #include "nix/util/memory-source-accessor.hh" -#include "nix/store/dummy-store.hh" +#include "nix/store/dummy-store-impl.hh" +#include "nix/store/realisation.hh" #include @@ -108,24 +109,15 @@ public: } // namespace -struct DummyStore : virtual Store +ref DummyStoreConfig::openStore() const +{ + return openDummyStore(); +} + +struct DummyStoreImpl : DummyStore { using Config = DummyStoreConfig; - ref config; - - struct PathInfoAndContents - { - UnkeyedValidPathInfo info; - ref contents; - }; - - /** - * This is map conceptually owns the file system objects for each - * store object. - */ - boost::concurrent_flat_map contents; - /** * This view conceptually just borrows the file systems objects of * each store object from `contents`, and combines them together @@ -135,9 +127,9 @@ struct DummyStore : virtual Store */ ref wholeStoreView = make_ref(); - DummyStore(ref config) + DummyStoreImpl(ref config) : Store{*config} - , config(config) + , DummyStore{config} { wholeStoreView->setPathDisplay(config->storeDir); } @@ -156,7 +148,7 @@ struct DummyStore : virtual Store /** * The dummy store is incapable of *not* trusting! :) */ - virtual std::optional isTrustedClient() override + std::optional isTrustedClient() override { return Trusted; } @@ -258,22 +250,27 @@ struct DummyStore : virtual Store return path; } - void narFromPath(const StorePath & path, Sink & sink) override + void registerDrvOutput(const Realisation & output) override { - bool visited = contents.cvisit(path, [&](const auto & kv) { - const auto & [info, accessor] = kv.second; - SourcePath sourcePath(accessor); - dumpPath(sourcePath, sink, FileSerialisationMethod::NixArchive); + auto ref = make_ref(output); + buildTrace.insert_or_visit({output.id.drvHash, {{output.id.outputName, ref}}}, [&](auto & kv) { + kv.second.insert_or_assign(output.id.outputName, make_ref(output)); + }); + } + + void queryRealisationUncached( + const DrvOutput & drvOutput, Callback> callback) noexcept override + { + bool visited = false; + buildTrace.cvisit(drvOutput.drvHash, [&](const auto & kv) { + if (auto it = kv.second.find(drvOutput.outputName); it != kv.second.end()) { + visited = true; + callback(it->second.get_ptr()); + } }); if (!visited) - throw Error("path '%s' is not valid", printStorePath(path)); - } - - void - queryRealisationUncached(const DrvOutput &, Callback> callback) noexcept override - { - callback(nullptr); + callback(nullptr); } std::shared_ptr getFSAccessor(const StorePath & path, bool requireValidPath) override @@ -289,9 +286,9 @@ struct DummyStore : virtual Store } }; -ref DummyStore::Config::openStore() const +ref DummyStore::Config::openDummyStore() const { - return make_ref(ref{shared_from_this()}); + return make_ref(ref{shared_from_this()}); } static RegisterStoreImplementation regDummyStore; diff --git a/src/libstore/filetransfer.cc b/src/libstore/filetransfer.cc index a162df1ad..3f0b4f5cb 100644 --- a/src/libstore/filetransfer.cc +++ b/src/libstore/filetransfer.cc @@ -2,15 +2,16 @@ #include "nix/store/globals.hh" #include "nix/util/config-global.hh" #include "nix/store/store-api.hh" -#include "nix/store/s3.hh" #include "nix/util/compression.hh" #include "nix/util/finally.hh" #include "nix/util/callback.hh" #include "nix/util/signals.hh" #include "store-config-private.hh" -#if NIX_WITH_S3_SUPPORT -# include +#include "nix/store/s3-url.hh" +#include +#if NIX_WITH_AWS_AUTH +# include "nix/store/aws-creds.hh" #endif #ifdef __linux__ @@ -294,20 +295,17 @@ struct curlFileTransfer : public FileTransfer return 0; } - size_t readOffset = 0; - - size_t readCallback(char * buffer, size_t size, size_t nitems) - { - if (readOffset == request.data->length()) - return 0; - auto count = std::min(size * nitems, request.data->length() - readOffset); - assert(count); - memcpy(buffer, request.data->data() + readOffset, count); - readOffset += count; - return count; + size_t readCallback(char * buffer, size_t size, size_t nitems) noexcept + try { + auto data = request.data; + return data->source->read(buffer, nitems * size); + } catch (EndOfFile &) { + return 0; + } catch (...) { + return CURL_READFUNC_ABORT; } - static size_t readCallbackWrapper(char * buffer, size_t size, size_t nitems, void * userp) + static size_t readCallbackWrapper(char * buffer, size_t size, size_t nitems, void * userp) noexcept { return ((TransferItem *) userp)->readCallback(buffer, size, nitems); } @@ -321,19 +319,24 @@ struct curlFileTransfer : public FileTransfer } #endif - size_t seekCallback(curl_off_t offset, int origin) - { + size_t seekCallback(curl_off_t offset, int origin) noexcept + try { + auto source = request.data->source; if (origin == SEEK_SET) { - readOffset = offset; + source->restart(); + source->skip(offset); } else if (origin == SEEK_CUR) { - readOffset += offset; + source->skip(offset); } else if (origin == SEEK_END) { - readOffset = request.data->length() + offset; + NullSink sink{}; + source->drainInto(sink); } return CURL_SEEKFUNC_OK; + } catch (...) { + return CURL_SEEKFUNC_FAIL; } - static size_t seekCallbackWrapper(void * clientp, curl_off_t offset, int origin) + static size_t seekCallbackWrapper(void * clientp, curl_off_t offset, int origin) noexcept { return ((TransferItem *) clientp)->seekCallback(offset, origin); } @@ -383,28 +386,30 @@ struct curlFileTransfer : public FileTransfer if (settings.downloadSpeed.get() > 0) curl_easy_setopt(req, CURLOPT_MAX_RECV_SPEED_LARGE, (curl_off_t) (settings.downloadSpeed.get() * 1024)); - if (request.head) + if (request.method == HttpMethod::HEAD) curl_easy_setopt(req, CURLOPT_NOBODY, 1); + if (request.method == HttpMethod::DELETE) + curl_easy_setopt(req, CURLOPT_CUSTOMREQUEST, "DELETE"); + if (request.data) { - if (request.post) + if (request.method == HttpMethod::POST) { curl_easy_setopt(req, CURLOPT_POST, 1L); - else + curl_easy_setopt(req, CURLOPT_POSTFIELDSIZE_LARGE, (curl_off_t) request.data->sizeHint); + } else if (request.method == HttpMethod::PUT) { curl_easy_setopt(req, CURLOPT_UPLOAD, 1L); + curl_easy_setopt(req, CURLOPT_INFILESIZE_LARGE, (curl_off_t) request.data->sizeHint); + } else { + unreachable(); + } curl_easy_setopt(req, CURLOPT_READFUNCTION, readCallbackWrapper); curl_easy_setopt(req, CURLOPT_READDATA, this); - curl_easy_setopt(req, CURLOPT_INFILESIZE_LARGE, (curl_off_t) request.data->length()); curl_easy_setopt(req, CURLOPT_SEEKFUNCTION, seekCallbackWrapper); curl_easy_setopt(req, CURLOPT_SEEKDATA, this); } - if (request.verifyTLS) { - if (settings.caFile != "") - curl_easy_setopt(req, CURLOPT_CAINFO, settings.caFile.get().c_str()); - } else { - curl_easy_setopt(req, CURLOPT_SSL_VERIFYPEER, 0); - curl_easy_setopt(req, CURLOPT_SSL_VERIFYHOST, 0); - } + if (settings.caFile != "") + curl_easy_setopt(req, CURLOPT_CAINFO, settings.caFile.get().c_str()); #if !defined(_WIN32) && LIBCURL_VERSION_NUM >= 0x071000 curl_easy_setopt(req, CURLOPT_SOCKOPTFUNCTION, cloexec_callback); @@ -426,6 +431,24 @@ struct curlFileTransfer : public FileTransfer curl_easy_setopt(req, CURLOPT_ERRORBUFFER, errbuf); errbuf[0] = 0; + // Set up username/password authentication if provided + if (request.usernameAuth) { + curl_easy_setopt(req, CURLOPT_USERNAME, request.usernameAuth->username.c_str()); + if (request.usernameAuth->password) { + curl_easy_setopt(req, CURLOPT_PASSWORD, request.usernameAuth->password->c_str()); + } + } + +#if NIX_WITH_AWS_AUTH + // Set up AWS SigV4 signing if this is an S3 request + // Note: AWS SigV4 support guaranteed available (curl >= 7.75.0 checked at build time) + // The username/password (access key ID and secret key) are set via the general + // usernameAuth mechanism above. + if (request.awsSigV4Provider) { + curl_easy_setopt(req, CURLOPT_AWS_SIGV4, request.awsSigV4Provider->c_str()); + } +#endif + result.data.clear(); result.bodySize = 0; } @@ -577,7 +600,14 @@ struct curlFileTransfer : public FileTransfer decompressionSink.reset(); errorSink.reset(); embargo = std::chrono::steady_clock::now() + std::chrono::milliseconds(ms); - fileTransfer.enqueueItem(shared_from_this()); + try { + fileTransfer.enqueueItem(shared_from_this()); + } catch (const nix::Error & e) { + // If enqueue fails (e.g., during shutdown), fail the transfer properly + // instead of letting the exception propagate, which would leave done=false + // and cause the destructor to attempt a second callback invocation + fail(std::move(exc)); + } } else fail(std::move(exc)); } @@ -594,10 +624,24 @@ struct curlFileTransfer : public FileTransfer } }; - bool quit = false; std:: priority_queue, std::vector>, EmbargoComparator> incoming; + private: + bool quitting = false; + public: + void quit() + { + quitting = true; + /* We wil not be processing any more incoming requests */ + while (!incoming.empty()) + incoming.pop(); + } + + bool isQuitting() + { + return quitting; + } }; Sync state_; @@ -649,7 +693,7 @@ struct curlFileTransfer : public FileTransfer /* Signal the worker thread to exit. */ { auto state(state_.lock()); - state->quit = true; + state->quit(); } #ifndef _WIN32 // TODO need graceful async exit support on Windows? writeFull(wakeupPipe.writeSide.get(), " ", false); @@ -750,7 +794,7 @@ struct curlFileTransfer : public FileTransfer break; } } - quit = state->quit; + quit = state->isQuitting(); } for (auto & item : incoming) { @@ -767,29 +811,32 @@ struct curlFileTransfer : public FileTransfer void workerThreadEntry() { + // Unwinding or because someone called `quit`. + bool normalExit = true; try { workerThreadMain(); } catch (nix::Interrupted & e) { + normalExit = false; } catch (std::exception & e) { printError("unexpected error in download thread: %s", e.what()); + normalExit = false; } - { + if (!normalExit) { auto state(state_.lock()); - while (!state->incoming.empty()) - state->incoming.pop(); - state->quit = true; + state->quit(); } } void enqueueItem(std::shared_ptr item) { - if (item->request.data && item->request.uri.scheme() != "http" && item->request.uri.scheme() != "https") + if (item->request.data && item->request.uri.scheme() != "http" && item->request.uri.scheme() != "https" + && item->request.uri.scheme() != "s3") throw nix::Error("uploading to '%s' is not supported", item->request.uri.to_string()); { auto state(state_.lock()); - if (state->quit) + if (state->isQuitting()) throw nix::Error("cannot enqueue download request because the download thread is shutting down"); state->incoming.push(item); } @@ -800,35 +847,11 @@ struct curlFileTransfer : public FileTransfer void enqueueFileTransfer(const FileTransferRequest & request, Callback callback) override { - /* Ugly hack to support s3:// URIs. */ + /* Handle s3:// URIs by converting to HTTPS and optionally adding auth */ if (request.uri.scheme() == "s3") { - // FIXME: do this on a worker thread - try { -#if NIX_WITH_S3_SUPPORT - auto parsed = ParsedS3URL::parse(request.uri.parsed()); - - std::string profile = parsed.profile.value_or(""); - std::string region = parsed.region.value_or(Aws::Region::US_EAST_1); - std::string scheme = parsed.scheme.value_or(""); - std::string endpoint = parsed.getEncodedEndpoint().value_or(""); - - S3Helper s3Helper(profile, region, scheme, endpoint); - - // FIXME: implement ETag - auto s3Res = s3Helper.getObject(parsed.bucket, encodeUrlPath(parsed.key)); - FileTransferResult res; - if (!s3Res.data) - throw FileTransferError(NotFound, {}, "S3 object '%s' does not exist", request.uri); - res.data = std::move(*s3Res.data); - res.urls.push_back(request.uri.to_string()); - callback(std::move(res)); -#else - throw nix::Error( - "cannot download '%s' because Nix is not built with S3 support", request.uri.to_string()); -#endif - } catch (...) { - callback.rethrow(); - } + auto modifiedRequest = request; + modifiedRequest.setupForS3(); + enqueueItem(std::make_shared(*this, std::move(modifiedRequest), std::move(callback))); return; } @@ -845,7 +868,7 @@ ref getFileTransfer() { static ref fileTransfer = makeCurlFileTransfer(); - if (fileTransfer->state_.lock()->quit) + if (fileTransfer->state_.lock()->isQuitting()) fileTransfer = makeCurlFileTransfer(); return fileTransfer; @@ -856,6 +879,36 @@ ref makeFileTransfer() return makeCurlFileTransfer(); } +void FileTransferRequest::setupForS3() +{ + auto parsedS3 = ParsedS3URL::parse(uri.parsed()); + // Update the request URI to use HTTPS (works without AWS SDK) + uri = parsedS3.toHttpsUrl(); + +#if NIX_WITH_AWS_AUTH + // Auth-specific code only compiled when AWS support is available + awsSigV4Provider = "aws:amz:" + parsedS3.region.value_or("us-east-1") + ":s3"; + + // check if the request already has pre-resolved credentials + std::optional sessionToken; + if (usernameAuth) { + debug("Using pre-resolved AWS credentials from parent process"); + sessionToken = preResolvedAwsSessionToken; + } else if (auto creds = getAwsCredentialsProvider()->maybeGetCredentials(parsedS3)) { + usernameAuth = UsernameAuth{ + .username = creds->accessKeyId, + .password = creds->secretAccessKey, + }; + sessionToken = creds->sessionToken; + } + if (sessionToken) + headers.emplace_back("x-amz-security-token", *sessionToken); +#else + // When built without AWS support, just try as public bucket + debug("S3 request without authentication (built without AWS support)"); +#endif +} + std::future FileTransfer::enqueueFileTransfer(const FileTransferRequest & request) { auto promise = std::make_shared>(); @@ -880,6 +933,11 @@ FileTransferResult FileTransfer::upload(const FileTransferRequest & request) return enqueueFileTransfer(request).get(); } +FileTransferResult FileTransfer::deleteResource(const FileTransferRequest & request) +{ + return enqueueFileTransfer(request).get(); +} + void FileTransfer::download( FileTransferRequest && request, Sink & sink, std::function resultCallback) { diff --git a/src/libstore/gc.cc b/src/libstore/gc.cc index 86c4e37a6..4846d445f 100644 --- a/src/libstore/gc.cc +++ b/src/libstore/gc.cc @@ -5,6 +5,7 @@ #include "nix/util/finally.hh" #include "nix/util/unix-domain-socket.hh" #include "nix/util/signals.hh" +#include "nix/util/util.hh" #include "nix/store/posix-fs-canonicalise.hh" #include "store-config-private.hh" @@ -18,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -905,9 +907,7 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results) #endif ; - printInfo( - "note: currently hard linking saves %.2f MiB", - ((unsharedSize - actualSize - overhead) / (1024.0 * 1024.0))); + printInfo("note: hard linking is currently saving %s", renderSize(unsharedSize - actualSize - overhead)); } /* While we're at it, vacuum the database. */ diff --git a/src/libstore/globals.cc b/src/libstore/globals.cc index 612e79ab0..8c542b686 100644 --- a/src/libstore/globals.cc +++ b/src/libstore/globals.cc @@ -150,7 +150,7 @@ std::vector getUserConfigFiles() return files; } -unsigned int Settings::getDefaultCores() const +unsigned int Settings::getDefaultCores() { const unsigned int concurrency = std::max(1U, std::thread::hardware_concurrency()); const unsigned int maxCPU = getMaxCPU(); @@ -258,6 +258,15 @@ Path Settings::getDefaultSSLCertFile() return ""; } +const ExternalBuilder * Settings::findExternalDerivationBuilderIfSupported(const Derivation & drv) +{ + if (auto it = std::ranges::find_if( + externalBuilders.get(), [&](const auto & handler) { return handler.systems.contains(drv.platform); }); + it != externalBuilders.get().end()) + return &*it; + return nullptr; +} + std::string nixVersion = PACKAGE_VERSION; NLOHMANN_JSON_SERIALIZE_ENUM( @@ -341,10 +350,15 @@ PathsInChroot BaseSetting::parse(const std::string & str) const i.pop_back(); } size_t p = i.find('='); - if (p == std::string::npos) - pathsInChroot[i] = {.source = i, .optional = optional}; - else - pathsInChroot[i.substr(0, p)] = {.source = i.substr(p + 1), .optional = optional}; + std::string inside, outside; + if (p == std::string::npos) { + inside = i; + outside = i; + } else { + inside = i.substr(0, p); + outside = i.substr(p + 1); + } + pathsInChroot[inside] = {.source = outside, .optional = optional}; } return pathsInChroot; } @@ -374,6 +388,22 @@ unsigned int MaxBuildJobsSetting::parse(const std::string & str) const } } +template<> +Settings::ExternalBuilders BaseSetting::parse(const std::string & str) const +{ + try { + return nlohmann::json::parse(str).template get(); + } catch (std::exception & e) { + throw UsageError("parsing setting '%s': %s", name, e.what()); + } +} + +template<> +std::string BaseSetting::to_string() const +{ + return nlohmann::json(value).dump(); +} + template<> void BaseSetting::appendOrSet(PathsInChroot newValue, bool append) { diff --git a/src/libstore/http-binary-cache-store.cc b/src/libstore/http-binary-cache-store.cc index 6922c0f69..fdb7c74fc 100644 --- a/src/libstore/http-binary-cache-store.cc +++ b/src/libstore/http-binary-cache-store.cc @@ -4,6 +4,7 @@ #include "nix/store/nar-info-disk-cache.hh" #include "nix/util/callback.hh" #include "nix/store/store-registration.hh" +#include "nix/util/compression.hh" namespace nix { @@ -50,193 +51,224 @@ std::string HttpBinaryCacheStoreConfig::doc() ; } -class HttpBinaryCacheStore : public virtual BinaryCacheStore +HttpBinaryCacheStore::HttpBinaryCacheStore(ref config) + : Store{*config} // TODO it will actually mutate the configuration + , BinaryCacheStore{*config} + , config{config} { - struct State - { - bool enabled = true; - std::chrono::steady_clock::time_point disabledUntil; - }; + diskCache = getNarInfoDiskCache(); +} - Sync _state; - -public: - - using Config = HttpBinaryCacheStoreConfig; - - ref config; - - HttpBinaryCacheStore(ref config) - : Store{*config} // TODO it will actually mutate the configuration - , BinaryCacheStore{*config} - , config{config} - { - diskCache = getNarInfoDiskCache(); - } - - void init() override - { - // FIXME: do this lazily? - if (auto cacheInfo = diskCache->upToDateCacheExists(config->cacheUri.to_string())) { - config->wantMassQuery.setDefault(cacheInfo->wantMassQuery); - config->priority.setDefault(cacheInfo->priority); - } else { - try { - BinaryCacheStore::init(); - } catch (UploadToHTTP &) { - throw Error("'%s' does not appear to be a binary cache", config->cacheUri.to_string()); - } - diskCache->createCache( - config->cacheUri.to_string(), config->storeDir, config->wantMassQuery, config->priority); - } - } - -protected: - - void maybeDisable() - { - auto state(_state.lock()); - if (state->enabled && settings.tryFallback) { - int t = 60; - printError("disabling binary cache '%s' for %s seconds", config->getHumanReadableURI(), t); - state->enabled = false; - state->disabledUntil = std::chrono::steady_clock::now() + std::chrono::seconds(t); - } - } - - void checkEnabled() - { - auto state(_state.lock()); - if (state->enabled) - return; - if (std::chrono::steady_clock::now() > state->disabledUntil) { - state->enabled = true; - debug("re-enabling binary cache '%s'", config->getHumanReadableURI()); - return; - } - throw SubstituterDisabled("substituter '%s' is disabled", config->getHumanReadableURI()); - } - - bool fileExists(const std::string & path) override - { - checkEnabled(); +void HttpBinaryCacheStore::init() +{ + // FIXME: do this lazily? + // For consistent cache key handling, use the reference without parameters + // This matches what's used in Store::queryPathInfo() lookups + auto cacheKey = config->getReference().render(/*withParams=*/false); + if (auto cacheInfo = diskCache->upToDateCacheExists(cacheKey)) { + config->wantMassQuery.setDefault(cacheInfo->wantMassQuery); + config->priority.setDefault(cacheInfo->priority); + } else { try { - FileTransferRequest request(makeRequest(path)); - request.head = true; - getFileTransfer()->download(request); - return true; - } catch (FileTransferError & e) { - /* S3 buckets return 403 if a file doesn't exist and the - bucket is unlistable, so treat 403 as 404. */ - if (e.error == FileTransfer::NotFound || e.error == FileTransfer::Forbidden) - return false; - maybeDisable(); - throw; + BinaryCacheStore::init(); + } catch (UploadToHTTP &) { + throw Error("'%s' does not appear to be a binary cache", config->cacheUri.to_string()); } + diskCache->createCache(cacheKey, config->storeDir, config->wantMassQuery, config->priority); } +} - void upsertFile( - const std::string & path, - std::shared_ptr> istream, - const std::string & mimeType) override - { - auto req = makeRequest(path); - req.data = StreamToSourceAdapter(istream).drain(); - req.mimeType = mimeType; - try { - getFileTransfer()->upload(req); - } catch (FileTransferError & e) { - throw UploadToHTTP( - "while uploading to HTTP binary cache at '%s': %s", config->cacheUri.to_string(), e.msg()); - } - } - - FileTransferRequest makeRequest(const std::string & path) - { - /* Otherwise the last path fragment will get discarded. */ - auto cacheUriWithTrailingSlash = config->cacheUri; - if (!cacheUriWithTrailingSlash.path.empty()) - cacheUriWithTrailingSlash.path.push_back(""); - - /* path is not a path, but a full relative or absolute - URL, e.g. we've seen in the wild NARINFO files have a URL - field which is - `nar/15f99rdaf26k39knmzry4xd0d97wp6yfpnfk1z9avakis7ipb9yg.nar?hash=zphkqn2wg8mnvbkixnl2aadkbn0rcnfj` - (note the query param) and that gets passed here. */ - return FileTransferRequest(parseURLRelative(path, cacheUriWithTrailingSlash)); - } - - void getFile(const std::string & path, Sink & sink) override - { - checkEnabled(); - auto request(makeRequest(path)); - try { - getFileTransfer()->download(std::move(request), sink); - } catch (FileTransferError & e) { - if (e.error == FileTransfer::NotFound || e.error == FileTransfer::Forbidden) - throw NoSuchBinaryCacheFile( - "file '%s' does not exist in binary cache '%s'", path, config->getHumanReadableURI()); - maybeDisable(); - throw; - } - } - - void getFile(const std::string & path, Callback> callback) noexcept override - { - auto callbackPtr = std::make_shared(std::move(callback)); - - try { - checkEnabled(); - - auto request(makeRequest(path)); - - getFileTransfer()->enqueueFileTransfer( - request, {[callbackPtr, this](std::future result) { - try { - (*callbackPtr)(std::move(result.get().data)); - } catch (FileTransferError & e) { - if (e.error == FileTransfer::NotFound || e.error == FileTransfer::Forbidden) - return (*callbackPtr)({}); - maybeDisable(); - callbackPtr->rethrow(); - } catch (...) { - callbackPtr->rethrow(); - } - }}); - - } catch (...) { - callbackPtr->rethrow(); - return; - } - } - - std::optional getNixCacheInfo() override - { - try { - auto result = getFileTransfer()->download(makeRequest(cacheInfoFile)); - return result.data; - } catch (FileTransferError & e) { - if (e.error == FileTransfer::NotFound) - return std::nullopt; - maybeDisable(); - throw; - } - } - - /** - * This isn't actually necessary read only. We support "upsert" now, so we - * have a notion of authentication via HTTP POST/PUT. - * - * For now, we conservatively say we don't know. - * - * \todo try to expose our HTTP authentication status. - */ - std::optional isTrustedClient() override - { +std::optional HttpBinaryCacheStore::getCompressionMethod(const std::string & path) +{ + if (hasSuffix(path, ".narinfo") && !config->narinfoCompression.get().empty()) + return config->narinfoCompression; + else if (hasSuffix(path, ".ls") && !config->lsCompression.get().empty()) + return config->lsCompression; + else if (hasPrefix(path, "log/") && !config->logCompression.get().empty()) + return config->logCompression; + else return std::nullopt; +} + +void HttpBinaryCacheStore::maybeDisable() +{ + auto state(_state.lock()); + if (state->enabled && settings.tryFallback) { + int t = 60; + printError("disabling binary cache '%s' for %s seconds", config->getHumanReadableURI(), t); + state->enabled = false; + state->disabledUntil = std::chrono::steady_clock::now() + std::chrono::seconds(t); } -}; +} + +void HttpBinaryCacheStore::checkEnabled() +{ + auto state(_state.lock()); + if (state->enabled) + return; + if (std::chrono::steady_clock::now() > state->disabledUntil) { + state->enabled = true; + debug("re-enabling binary cache '%s'", config->getHumanReadableURI()); + return; + } + throw SubstituterDisabled("substituter '%s' is disabled", config->getHumanReadableURI()); +} + +bool HttpBinaryCacheStore::fileExists(const std::string & path) +{ + checkEnabled(); + + try { + FileTransferRequest request(makeRequest(path)); + request.method = HttpMethod::HEAD; + getFileTransfer()->download(request); + return true; + } catch (FileTransferError & e) { + /* S3 buckets return 403 if a file doesn't exist and the + bucket is unlistable, so treat 403 as 404. */ + if (e.error == FileTransfer::NotFound || e.error == FileTransfer::Forbidden) + return false; + maybeDisable(); + throw; + } +} + +void HttpBinaryCacheStore::upload( + std::string_view path, + RestartableSource & source, + uint64_t sizeHint, + std::string_view mimeType, + std::optional contentEncoding) +{ + auto req = makeRequest(path); + req.method = HttpMethod::PUT; + + if (contentEncoding) { + req.headers.emplace_back("Content-Encoding", *contentEncoding); + } + + req.data = {sizeHint, source}; + req.mimeType = mimeType; + + getFileTransfer()->upload(req); +} + +void HttpBinaryCacheStore::upload(std::string_view path, CompressedSource & source, std::string_view mimeType) +{ + upload(path, static_cast(source), source.size(), mimeType, source.getCompressionMethod()); +} + +void HttpBinaryCacheStore::upsertFile( + const std::string & path, RestartableSource & source, const std::string & mimeType, uint64_t sizeHint) +{ + try { + if (auto compressionMethod = getCompressionMethod(path)) { + CompressedSource compressed(source, *compressionMethod); + upload(path, compressed, mimeType); + } else { + upload(path, source, sizeHint, mimeType, std::nullopt); + } + } catch (FileTransferError & e) { + UploadToHTTP err(e.message()); + err.addTrace({}, "while uploading to HTTP binary cache at '%s'", config->cacheUri.to_string()); + throw err; + } +} + +FileTransferRequest HttpBinaryCacheStore::makeRequest(std::string_view path) +{ + /* Otherwise the last path fragment will get discarded. */ + auto cacheUriWithTrailingSlash = config->cacheUri; + if (!cacheUriWithTrailingSlash.path.empty()) + cacheUriWithTrailingSlash.path.push_back(""); + + /* path is not a path, but a full relative or absolute + URL, e.g. we've seen in the wild NARINFO files have a URL + field which is + `nar/15f99rdaf26k39knmzry4xd0d97wp6yfpnfk1z9avakis7ipb9yg.nar?hash=zphkqn2wg8mnvbkixnl2aadkbn0rcnfj` + (note the query param) and that gets passed here. */ + auto result = parseURLRelative(path, cacheUriWithTrailingSlash); + + /* For S3 URLs, preserve query parameters from the base URL when the + relative path doesn't have its own query parameters. This is needed + to preserve S3-specific parameters like endpoint and region. */ + if (config->cacheUri.scheme == "s3" && result.query.empty()) { + result.query = config->cacheUri.query; + } + + return FileTransferRequest(result); +} + +void HttpBinaryCacheStore::getFile(const std::string & path, Sink & sink) +{ + checkEnabled(); + auto request(makeRequest(path)); + try { + getFileTransfer()->download(std::move(request), sink); + } catch (FileTransferError & e) { + if (e.error == FileTransfer::NotFound || e.error == FileTransfer::Forbidden) + throw NoSuchBinaryCacheFile( + "file '%s' does not exist in binary cache '%s'", path, config->getHumanReadableURI()); + maybeDisable(); + throw; + } +} + +void HttpBinaryCacheStore::getFile(const std::string & path, Callback> callback) noexcept +{ + auto callbackPtr = std::make_shared(std::move(callback)); + + try { + checkEnabled(); + + auto request(makeRequest(path)); + + getFileTransfer()->enqueueFileTransfer(request, {[callbackPtr, this](std::future result) { + try { + (*callbackPtr)(std::move(result.get().data)); + } catch (FileTransferError & e) { + if (e.error == FileTransfer::NotFound + || e.error == FileTransfer::Forbidden) + return (*callbackPtr)({}); + maybeDisable(); + callbackPtr->rethrow(); + } catch (...) { + callbackPtr->rethrow(); + } + }}); + + } catch (...) { + callbackPtr->rethrow(); + return; + } +} + +std::optional HttpBinaryCacheStore::getNixCacheInfo() +{ + try { + auto result = getFileTransfer()->download(makeRequest(cacheInfoFile)); + return result.data; + } catch (FileTransferError & e) { + if (e.error == FileTransfer::NotFound) + return std::nullopt; + maybeDisable(); + throw; + } +} + +/** + * This isn't actually necessary read only. We support "upsert" now, so we + * have a notion of authentication via HTTP POST/PUT. + * + * For now, we conservatively say we don't know. + * + * \todo try to expose our HTTP authentication status. + */ +std::optional HttpBinaryCacheStore::isTrustedClient() +{ + return std::nullopt; +} ref HttpBinaryCacheStore::Config::openStore() const { diff --git a/src/libstore/include/nix/store/aws-creds.hh b/src/libstore/include/nix/store/aws-creds.hh new file mode 100644 index 000000000..30f6592a0 --- /dev/null +++ b/src/libstore/include/nix/store/aws-creds.hh @@ -0,0 +1,86 @@ +#pragma once +///@file +#include "nix/store/config.hh" + +#if NIX_WITH_AWS_AUTH + +# include "nix/store/s3-url.hh" +# include "nix/util/ref.hh" +# include "nix/util/error.hh" + +# include +# include +# include + +namespace nix { + +/** + * AWS credentials obtained from credential providers + */ +struct AwsCredentials +{ + std::string accessKeyId; + std::string secretAccessKey; + std::optional sessionToken; + + AwsCredentials( + const std::string & accessKeyId, + const std::string & secretAccessKey, + const std::optional & sessionToken = std::nullopt) + : accessKeyId(accessKeyId) + , secretAccessKey(secretAccessKey) + , sessionToken(sessionToken) + { + } +}; + +class AwsAuthError : public Error +{ + std::optional errorCode; + +public: + using Error::Error; + AwsAuthError(int errorCode); + + std::optional getErrorCode() const + { + return errorCode; + } +}; + +class AwsCredentialProvider +{ +public: + /** + * Get AWS credentials for the given URL. + * + * @param url The S3 url to get the credentials for + * @return AWS credentials + * @throws AwsAuthError if credentials cannot be resolved + */ + virtual AwsCredentials getCredentials(const ParsedS3URL & url) = 0; + + std::optional maybeGetCredentials(const ParsedS3URL & url) + { + try { + return getCredentials(url); + } catch (AwsAuthError & e) { + return std::nullopt; + } + } + + virtual ~AwsCredentialProvider() {} +}; + +/** + * Create a new instancee of AwsCredentialProvider. + */ +ref makeAwsCredentialsProvider(); + +/** + * Get a reference to the global AwsCredentialProvider. + */ +ref getAwsCredentialsProvider(); + +} // namespace nix +#endif diff --git a/src/libstore/include/nix/store/binary-cache-store.hh b/src/libstore/include/nix/store/binary-cache-store.hh index c316b1199..e64dc3eae 100644 --- a/src/libstore/include/nix/store/binary-cache-store.hh +++ b/src/libstore/include/nix/store/binary-cache-store.hh @@ -80,25 +80,45 @@ private: protected: - // The prefix under which realisation infos will be stored - const std::string realisationsPrefix = "realisations"; + /** + * The prefix under which realisation infos will be stored + */ + constexpr const static std::string realisationsPrefix = "realisations"; - const std::string cacheInfoFile = "nix-cache-info"; + constexpr const static std::string cacheInfoFile = "nix-cache-info"; BinaryCacheStore(Config &); + /** + * Compute the path to the given realisation + * + * It's `${realisationsPrefix}/${drvOutput}.doi`. + */ + std::string makeRealisationPath(const DrvOutput & id); + public: virtual bool fileExists(const std::string & path) = 0; virtual void upsertFile( - const std::string & path, std::shared_ptr> istream, const std::string & mimeType) = 0; + const std::string & path, RestartableSource & source, const std::string & mimeType, uint64_t sizeHint) = 0; void upsertFile( const std::string & path, // FIXME: use std::string_view std::string && data, - const std::string & mimeType); + const std::string & mimeType, + uint64_t sizeHint); + + void upsertFile( + const std::string & path, + // FIXME: use std::string_view + std::string && data, + const std::string & mimeType) + { + auto size = data.size(); + upsertFile(path, std::move(data), mimeType, size); + } /** * Dump the contents of the specified file to a sink. @@ -175,7 +195,7 @@ public: void registerDrvOutput(const Realisation & info) override; void queryRealisationUncached( - const DrvOutput &, Callback> callback) noexcept override; + const DrvOutput &, Callback> callback) noexcept override; void narFromPath(const StorePath & path, Sink & sink) override; diff --git a/src/libstore/include/nix/store/build-result.hh b/src/libstore/include/nix/store/build-result.hh index d7249d420..0446c4038 100644 --- a/src/libstore/include/nix/store/build-result.hh +++ b/src/libstore/include/nix/store/build-result.hh @@ -12,63 +12,121 @@ namespace nix { struct BuildResult { - /** - * @note This is directly used in the nix-store --serve protocol. - * That means we need to worry about compatibility across versions. - * Therefore, don't remove status codes, and only add new status - * codes at the end of the list. - */ - enum Status { - Built = 0, - Substituted, - AlreadyValid, - PermanentFailure, - InputRejected, - OutputRejected, - /// possibly transient - TransientFailure, - /// no longer used - CachedFailure, - TimedOut, - MiscFailure, - DependencyFailed, - LogLimitExceeded, - NotDeterministic, - ResolvesToAlreadyValid, - NoSubstituters, - /// A certain type of `OutputRejected`. The protocols do not yet - /// know about this one, so change it back to `OutputRejected` - /// before serialization. - HashMismatch, - } status = MiscFailure; + struct Success + { + /** + * @note This is directly used in the nix-store --serve protocol. + * That means we need to worry about compatibility across versions. + * Therefore, don't remove status codes, and only add new status + * codes at the end of the list. + * + * Must be disjoint with `Failure::Status`. + */ + enum Status : uint8_t { + Built = 0, + Substituted = 1, + AlreadyValid = 2, + ResolvesToAlreadyValid = 13, + } status; + + /** + * For derivations, a mapping from the names of the wanted outputs + * to actual paths. + */ + SingleDrvOutputs builtOutputs; + + bool operator==(const BuildResult::Success &) const noexcept; + std::strong_ordering operator<=>(const BuildResult::Success &) const noexcept; + + static bool statusIs(uint8_t status) + { + return status == Built || status == Substituted || status == AlreadyValid + || status == ResolvesToAlreadyValid; + } + }; + + struct Failure + { + /** + * @note This is directly used in the nix-store --serve protocol. + * That means we need to worry about compatibility across versions. + * Therefore, don't remove status codes, and only add new status + * codes at the end of the list. + * + * Must be disjoint with `Success::Status`. + */ + enum Status : uint8_t { + PermanentFailure = 3, + InputRejected = 4, + OutputRejected = 5, + /// possibly transient + TransientFailure = 6, + /// no longer used + CachedFailure = 7, + TimedOut = 8, + MiscFailure = 9, + DependencyFailed = 10, + LogLimitExceeded = 11, + NotDeterministic = 12, + NoSubstituters = 14, + /// A certain type of `OutputRejected`. The protocols do not yet + /// know about this one, so change it back to `OutputRejected` + /// before serialization. + HashMismatch = 15, + } status = MiscFailure; + + /** + * Information about the error if the build failed. + * + * @todo This should be an entire ErrorInfo object, not just a + * string, for richer information. + */ + std::string errorMsg; + + /** + * If timesBuilt > 1, whether some builds did not produce the same + * result. (Note that 'isNonDeterministic = false' does not mean + * the build is deterministic, just that we don't have evidence of + * non-determinism.) + */ + bool isNonDeterministic = false; + + bool operator==(const BuildResult::Failure &) const noexcept; + std::strong_ordering operator<=>(const BuildResult::Failure &) const noexcept; + + [[noreturn]] void rethrow() const + { + throw Error("%s", errorMsg); + } + }; + + std::variant inner = Failure{}; /** - * Information about the error if the build failed. - * - * @todo This should be an entire ErrorInfo object, not just a - * string, for richer information. + * Convenience wrapper to avoid a longer `std::get_if` usage by the + * caller (which will have to add more `BuildResult::` than we do + * below also, do note.) */ - std::string errorMsg; + auto * tryGetSuccess(this auto & self) + { + return std::get_if(&self.inner); + } + + /** + * Convenience wrapper to avoid a longer `std::get_if` usage by the + * caller (which will have to add more `BuildResult::` than we do + * below also, do note.) + */ + auto * tryGetFailure(this auto & self) + { + return std::get_if(&self.inner); + } /** * How many times this build was performed. */ unsigned int timesBuilt = 0; - /** - * If timesBuilt > 1, whether some builds did not produce the same - * result. (Note that 'isNonDeterministic = false' does not mean - * the build is deterministic, just that we don't have evidence of - * non-determinism.) - */ - bool isNonDeterministic = false; - - /** - * For derivations, a mapping from the names of the wanted outputs - * to actual paths. - */ - SingleDrvOutputs builtOutputs; - /** * The start/stop times of the build (or one of the rounds, if it * was repeated). @@ -82,16 +140,6 @@ struct BuildResult bool operator==(const BuildResult &) const noexcept; std::strong_ordering operator<=>(const BuildResult &) const noexcept; - - bool success() - { - return status == Built || status == Substituted || status == AlreadyValid || status == ResolvesToAlreadyValid; - } - - void rethrow() - { - throw Error("%s", errorMsg); - } }; /** @@ -99,15 +147,9 @@ struct BuildResult */ struct BuildError : public Error { - BuildResult::Status status; + BuildResult::Failure::Status status; - BuildError(BuildResult::Status status, BuildError && error) - : Error{std::move(error)} - , status{status} - { - } - - BuildError(BuildResult::Status status, auto &&... args) + BuildError(BuildResult::Failure::Status status, auto &&... args) : Error{args...} , status{status} { diff --git a/src/libstore/include/nix/store/build/derivation-builder.hh b/src/libstore/include/nix/store/build/derivation-builder.hh index 7fad2837a..5fad26e83 100644 --- a/src/libstore/include/nix/store/build/derivation-builder.hh +++ b/src/libstore/include/nix/store/build/derivation-builder.hh @@ -1,12 +1,15 @@ #pragma once ///@file +#include + #include "nix/store/build-result.hh" #include "nix/store/derivation-options.hh" #include "nix/store/build/derivation-building-misc.hh" #include "nix/store/derivations.hh" #include "nix/store/parsed-derivations.hh" #include "nix/util/processes.hh" +#include "nix/util/json-impls.hh" #include "nix/store/restricted-store.hh" #include "nix/store/build/derivation-env-desugar.hh" @@ -22,7 +25,7 @@ struct BuilderFailureError : BuildError std::string extraMsgAfter; - BuilderFailureError(BuildResult::Status status, int builderStatus, std::string extraMsgAfter) + BuilderFailureError(BuildResult::Failure::Status status, int builderStatus, std::string extraMsgAfter) : BuildError{ status, /* No message for now, because the caller will make for @@ -179,9 +182,28 @@ struct DerivationBuilder : RestrictionContext virtual bool killChild() = 0; }; +struct ExternalBuilder +{ + StringSet systems; + Path program; + std::vector args; +}; + #ifndef _WIN32 // TODO enable `DerivationBuilder` on Windows std::unique_ptr makeDerivationBuilder( LocalStore & store, std::unique_ptr miscMethods, DerivationBuilderParams params); + +/** + * @param handler Must be chosen such that it supports the given + * derivation. + */ +std::unique_ptr makeExternalDerivationBuilder( + LocalStore & store, + std::unique_ptr miscMethods, + DerivationBuilderParams params, + const ExternalBuilder & handler); #endif } // namespace nix + +JSON_IMPL(nix::ExternalBuilder) diff --git a/src/libstore/include/nix/store/build/derivation-building-goal.hh b/src/libstore/include/nix/store/build/derivation-building-goal.hh index d394eb3c9..547e533e2 100644 --- a/src/libstore/include/nix/store/build/derivation-building-goal.hh +++ b/src/libstore/include/nix/store/build/derivation-building-goal.hh @@ -29,8 +29,17 @@ typedef enum { rpAccept, rpDecline, rpPostpone } HookReply; */ struct DerivationBuildingGoal : public Goal { + /** + * @param storeDerivation Whether to store the derivation in + * `worker.store`. This is useful for newly-resolved derivations. In this + * case, the derivation was not created a priori, e.g. purely (or close + * enough) from evaluation of the Nix language, but also depends on the + * exact content produced by upstream builds. It is strongly advised to + * have a permanent record of such a resolved derivation in order to + * faithfully reconstruct the build history. + */ DerivationBuildingGoal( - const StorePath & drvPath, const Derivation & drv, Worker & worker, BuildMode buildMode = bmNormal); + const StorePath & drvPath, const Derivation & drv, Worker & worker, BuildMode buildMode, bool storeDerivation); ~DerivationBuildingGoal(); private: @@ -100,7 +109,7 @@ private: /** * The states. */ - Co gaveUpOnSubstitution(); + Co gaveUpOnSubstitution(bool storeDerivation); Co tryToBuild(); /** @@ -147,7 +156,7 @@ private: */ void killChild(); - Done doneSuccess(BuildResult::Status status, SingleDrvOutputs builtOutputs); + Done doneSuccess(BuildResult::Success::Status status, SingleDrvOutputs builtOutputs); Done doneFailure(BuildError ex); diff --git a/src/libstore/include/nix/store/build/derivation-goal.hh b/src/libstore/include/nix/store/build/derivation-goal.hh index 85b471e28..0fe610987 100644 --- a/src/libstore/include/nix/store/build/derivation-goal.hh +++ b/src/libstore/include/nix/store/build/derivation-goal.hh @@ -40,12 +40,16 @@ struct DerivationGoal : public Goal */ OutputName wantedOutput; + /** + * @param storeDerivation See `DerivationBuildingGoal`. This is just passed along. + */ DerivationGoal( const StorePath & drvPath, const Derivation & drv, const OutputName & wantedOutput, Worker & worker, - BuildMode buildMode = bmNormal); + BuildMode buildMode, + bool storeDerivation); ~DerivationGoal() = default; void timedOut(Error && ex) override @@ -80,7 +84,7 @@ private: /** * The states. */ - Co haveDerivation(); + Co haveDerivation(bool storeDerivation); /** * Return `std::nullopt` if the output is unknown, e.g. un unbuilt @@ -89,17 +93,17 @@ private: * of the wanted output, and a `PathStatus` with the * current status of that output. */ - std::optional> checkPathValidity(); + std::optional> checkPathValidity(); /** * Aborts if any output is not valid or corrupt, and otherwise * returns a 'Realisation' for the wanted output. */ - Realisation assertPathValidity(); + UnkeyedRealisation assertPathValidity(); Co repairClosure(); - Done doneSuccess(BuildResult::Status status, Realisation builtOutput); + Done doneSuccess(BuildResult::Success::Status status, UnkeyedRealisation builtOutput); Done doneFailure(BuildError ex); }; diff --git a/src/libstore/include/nix/store/build/derivation-resolution-goal.hh b/src/libstore/include/nix/store/build/derivation-resolution-goal.hh new file mode 100644 index 000000000..fb4c2a346 --- /dev/null +++ b/src/libstore/include/nix/store/build/derivation-resolution-goal.hh @@ -0,0 +1,81 @@ +#pragma once +///@file + +#include "nix/store/derivations.hh" +#include "nix/store/derivation-options.hh" +#include "nix/store/build/derivation-building-misc.hh" +#include "nix/store/store-api.hh" +#include "nix/store/build/goal.hh" + +namespace nix { + +struct BuilderFailureError; + +/** + * A goal for resolving a derivation. Resolving a derivation (@see + * `Derivation::tryResolve`) simplifies its inputs, replacing + * `inputDrvs` with `inputSrcs`. + * + * Conceptually, we resolve all derivations. For input-addressed + * derivations (that don't transtively depend on content-addressed + * derivations), however, we don't actually use the resolved derivation, + * because the output paths would appear invalid (if we tried to verify + * them), since they are computed from the original, unresolved inputs. + * + * That said, if we ever made the new flavor of input-addressing as described + * in issue #9259, then the input-addressing would be based on the resolved + * inputs, and we like the CA case *would* use the output of this goal. + * + * (The point of this discussion is not to randomly stuff information on + * a yet-unimplemented feature (issue #9259) in the codebase, but + * rather, to illustrate that there is no inherent tension between + * explicit derivation resolution and input-addressing in general. That + * tension only exists with the type of input-addressing we've + * historically used.) + */ +struct DerivationResolutionGoal : public Goal +{ + DerivationResolutionGoal(const StorePath & drvPath, const Derivation & drv, Worker & worker, BuildMode buildMode); + + /** + * If the derivation needed to be resolved, this is resulting + * resolved derivations and its path. + */ + std::unique_ptr> resolvedDrv; + + void timedOut(Error && ex) override {} + +private: + + /** + * The path of the derivation. + */ + StorePath drvPath; + + /** + * The derivation stored at drvPath. + */ + std::unique_ptr drv; + + /** + * The remainder is state held during the build. + */ + + BuildMode buildMode; + + std::unique_ptr act; + + std::string key() override; + + /** + * The states. + */ + Co resolveDerivation(); + + JobCategory jobCategory() const override + { + return JobCategory::Administration; + }; +}; + +} // namespace nix diff --git a/src/libstore/include/nix/store/build/derivation-trampoline-goal.hh b/src/libstore/include/nix/store/build/derivation-trampoline-goal.hh index 79b74f4c1..bfed67f63 100644 --- a/src/libstore/include/nix/store/build/derivation-trampoline-goal.hh +++ b/src/libstore/include/nix/store/build/derivation-trampoline-goal.hh @@ -109,7 +109,7 @@ struct DerivationTrampolineGoal : public Goal virtual ~DerivationTrampolineGoal(); - void timedOut(Error && ex) override; + void timedOut(Error && ex) override {} std::string key() override; diff --git a/src/libstore/include/nix/store/build/drv-output-substitution-goal.hh b/src/libstore/include/nix/store/build/drv-output-substitution-goal.hh index b42336427..6310e0d2c 100644 --- a/src/libstore/include/nix/store/build/drv-output-substitution-goal.hh +++ b/src/libstore/include/nix/store/build/drv-output-substitution-goal.hh @@ -29,17 +29,9 @@ class DrvOutputSubstitutionGoal : public Goal DrvOutput id; public: - DrvOutputSubstitutionGoal( - const DrvOutput & id, - Worker & worker, - RepairFlag repair = NoRepair, - std::optional ca = std::nullopt); - - typedef void (DrvOutputSubstitutionGoal::*GoalState)(); - GoalState state; + DrvOutputSubstitutionGoal(const DrvOutput & id, Worker & worker); Co init(); - Co realisationFetched(Goals waitees, std::shared_ptr outputInfo, nix::ref sub); void timedOut(Error && ex) override { diff --git a/src/libstore/include/nix/store/build/goal.hh b/src/libstore/include/nix/store/build/goal.hh index 52700d12e..4d57afc0f 100644 --- a/src/libstore/include/nix/store/build/goal.hh +++ b/src/libstore/include/nix/store/build/goal.hh @@ -456,6 +456,18 @@ public: */ virtual void timedOut(Error && ex) = 0; + /** + * Used for comparisons. The order matters a bit for scheduling. We + * want: + * + * 1. Substitution + * 2. Derivation administrativia + * 3. Actual building + * + * Also, ensure that derivations get processed in order of their + * name, i.e. a derivation named "aardvark" always comes before + * "baboon". + */ virtual std::string key() = 0; /** diff --git a/src/libstore/include/nix/store/build/substitution-goal.hh b/src/libstore/include/nix/store/build/substitution-goal.hh index 9fc6450b1..5f33b9aa5 100644 --- a/src/libstore/include/nix/store/build/substitution-goal.hh +++ b/src/libstore/include/nix/store/build/substitution-goal.hh @@ -41,7 +41,9 @@ struct PathSubstitutionGoal : public Goal */ std::optional ca; - Done done(ExitCode result, BuildResult::Status status, std::optional errorMsg = {}); + Done doneSuccess(BuildResult::Success::Status status); + + Done doneFailure(ExitCode result, BuildResult::Failure::Status status, std::string errorMsg); public: PathSubstitutionGoal( @@ -56,10 +58,6 @@ public: unreachable(); }; - /** - * We prepend "a$" to the key name to ensure substitution goals - * happen before derivation goals. - */ std::string key() override { return "a$" + std::string(storePath.name()) + "$" + worker.store.printStorePath(storePath); diff --git a/src/libstore/include/nix/store/build/worker.hh b/src/libstore/include/nix/store/build/worker.hh index a6de780c1..173f7b222 100644 --- a/src/libstore/include/nix/store/build/worker.hh +++ b/src/libstore/include/nix/store/build/worker.hh @@ -16,6 +16,7 @@ namespace nix { /* Forward definition. */ struct DerivationTrampolineGoal; struct DerivationGoal; +struct DerivationResolutionGoal; struct DerivationBuildingGoal; struct PathSubstitutionGoal; class DrvOutputSubstitutionGoal; @@ -111,6 +112,7 @@ private: DerivedPathMap>> derivationTrampolineGoals; std::map>> derivationGoals; + std::map> derivationResolutionGoals; std::map> derivationBuildingGoals; std::map> substitutionGoals; std::map> drvOutputSubstitutionGoals; @@ -208,34 +210,37 @@ private: std::shared_ptr initGoalIfNeeded(std::weak_ptr & goal_weak, Args &&... args); std::shared_ptr makeDerivationTrampolineGoal( - ref drvReq, const OutputsSpec & wantedOutputs, BuildMode buildMode = bmNormal); + ref drvReq, const OutputsSpec & wantedOutputs, BuildMode buildMode); public: std::shared_ptr makeDerivationTrampolineGoal( - const StorePath & drvPath, - const OutputsSpec & wantedOutputs, - const Derivation & drv, - BuildMode buildMode = bmNormal); + const StorePath & drvPath, const OutputsSpec & wantedOutputs, const Derivation & drv, BuildMode buildMode); std::shared_ptr makeDerivationGoal( const StorePath & drvPath, const Derivation & drv, const OutputName & wantedOutput, - BuildMode buildMode = bmNormal); + BuildMode buildMode, + bool storeDerivation); /** - * @ref DerivationBuildingGoal "derivation goal" + * @ref DerivationResolutionGoal "derivation resolution goal" */ - std::shared_ptr - makeDerivationBuildingGoal(const StorePath & drvPath, const Derivation & drv, BuildMode buildMode = bmNormal); + std::shared_ptr + makeDerivationResolutionGoal(const StorePath & drvPath, const Derivation & drv, BuildMode buildMode); + + /** + * @ref DerivationBuildingGoal "derivation building goal" + */ + std::shared_ptr makeDerivationBuildingGoal( + const StorePath & drvPath, const Derivation & drv, BuildMode buildMode, bool storeDerivation); /** * @ref PathSubstitutionGoal "substitution goal" */ std::shared_ptr makePathSubstitutionGoal( const StorePath & storePath, RepairFlag repair = NoRepair, std::optional ca = std::nullopt); - std::shared_ptr makeDrvOutputSubstitutionGoal( - const DrvOutput & id, RepairFlag repair = NoRepair, std::optional ca = std::nullopt); + std::shared_ptr makeDrvOutputSubstitutionGoal(const DrvOutput & id); /** * Make a goal corresponding to the `DerivedPath`. diff --git a/src/libstore/include/nix/store/builtins.hh b/src/libstore/include/nix/store/builtins.hh index cc164fe82..6925e61c1 100644 --- a/src/libstore/include/nix/store/builtins.hh +++ b/src/libstore/include/nix/store/builtins.hh @@ -2,6 +2,11 @@ ///@file #include "nix/store/derivations.hh" +#include "nix/store/config.hh" + +#if NIX_WITH_AWS_AUTH +# include "nix/store/aws-creds.hh" +#endif namespace nix { @@ -12,6 +17,14 @@ struct BuiltinBuilderContext std::string netrcData; std::string caFileData; Path tmpDirInSandbox; + +#if NIX_WITH_AWS_AUTH + /** + * Pre-resolved AWS credentials for S3 URLs in builtin:fetchurl. + * When present, these should be used instead of creating new credential providers. + */ + std::optional awsCredentials; +#endif }; using BuiltinBuilder = std::function; diff --git a/src/libstore/include/nix/store/content-address.hh b/src/libstore/include/nix/store/content-address.hh index 0a3dc79bd..41ccc69ae 100644 --- a/src/libstore/include/nix/store/content-address.hh +++ b/src/libstore/include/nix/store/content-address.hh @@ -6,6 +6,7 @@ #include "nix/store/path.hh" #include "nix/util/file-content-address.hh" #include "nix/util/variant-wrapper.hh" +#include "nix/util/json-impls.hh" namespace nix { @@ -308,4 +309,15 @@ struct ContentAddressWithReferences Hash getHash() const; }; +template<> +struct json_avoids_null : std::true_type +{}; + +template<> +struct json_avoids_null : std::true_type +{}; + } // namespace nix + +JSON_IMPL(nix::ContentAddressMethod) +JSON_IMPL(nix::ContentAddress) diff --git a/src/libstore/include/nix/store/derivations.hh b/src/libstore/include/nix/store/derivations.hh index 0dfb80347..4615d8acd 100644 --- a/src/libstore/include/nix/store/derivations.hh +++ b/src/libstore/include/nix/store/derivations.hh @@ -134,13 +134,6 @@ struct DerivationOutput */ std::optional path(const StoreDirConfig & store, std::string_view drvName, OutputNameView outputName) const; - - nlohmann::json toJSON() const; - /** - * @param xpSettings Stop-gap to avoid globals during unit tests. - */ - static DerivationOutput - fromJSON(const nlohmann::json & json, const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings); }; typedef std::map DerivationOutputs; @@ -390,10 +383,6 @@ struct Derivation : BasicDerivation { } - nlohmann::json toJSON() const; - static Derivation - fromJSON(const nlohmann::json & json, const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings); - bool operator==(const Derivation &) const = default; // TODO libc++ 16 (used by darwin) missing `std::map::operator <=>`, can't do yet. // auto operator <=> (const Derivation &) const = default; @@ -537,5 +526,5 @@ std::string hashPlaceholder(const OutputNameView outputName); } // namespace nix -JSON_IMPL(nix::DerivationOutput) -JSON_IMPL(nix::Derivation) +JSON_IMPL_WITH_XP_FEATURES(nix::DerivationOutput) +JSON_IMPL_WITH_XP_FEATURES(nix::Derivation) diff --git a/src/libstore/include/nix/store/derived-path.hh b/src/libstore/include/nix/store/derived-path.hh index 47b29b2d6..70074ea40 100644 --- a/src/libstore/include/nix/store/derived-path.hh +++ b/src/libstore/include/nix/store/derived-path.hh @@ -299,7 +299,7 @@ void drvRequireExperiment( } // namespace nix JSON_IMPL(nix::SingleDerivedPath::Opaque) -JSON_IMPL(nix::SingleDerivedPath::Built) -JSON_IMPL(nix::SingleDerivedPath) -JSON_IMPL(nix::DerivedPath::Built) -JSON_IMPL(nix::DerivedPath) +JSON_IMPL_WITH_XP_FEATURES(nix::SingleDerivedPath::Built) +JSON_IMPL_WITH_XP_FEATURES(nix::SingleDerivedPath) +JSON_IMPL_WITH_XP_FEATURES(nix::DerivedPath::Built) +JSON_IMPL_WITH_XP_FEATURES(nix::DerivedPath) diff --git a/src/libstore/include/nix/store/dummy-store-impl.hh b/src/libstore/include/nix/store/dummy-store-impl.hh new file mode 100644 index 000000000..4c9f54e98 --- /dev/null +++ b/src/libstore/include/nix/store/dummy-store-impl.hh @@ -0,0 +1,52 @@ +#pragma once +///@file + +#include "nix/store/dummy-store.hh" + +#include + +namespace nix { + +struct MemorySourceAccessor; + +/** + * Enough of the Dummy Store exposed for sake of writing unit tests + */ +struct DummyStore : virtual Store +{ + using Config = DummyStoreConfig; + + ref config; + + struct PathInfoAndContents + { + UnkeyedValidPathInfo info; + ref contents; + }; + + /** + * This is map conceptually owns the file system objects for each + * store object. + */ + boost::concurrent_flat_map contents; + + /** + * The build trace maps the pair of a content-addressing (fixed or + * floating) derivations an one of its output to a + * (content-addressed) store object. + * + * It is [curried](https://en.wikipedia.org/wiki/Currying), so we + * instead having a single output with a `DrvOutput` key, we have an + * outer map for the derivation, and inner maps for the outputs of a + * given derivation. + */ + boost::concurrent_flat_map>> buildTrace; + + DummyStore(ref config) + : Store{*config} + , config(config) + { + } +}; + +} // namespace nix diff --git a/src/libstore/include/nix/store/dummy-store.hh b/src/libstore/include/nix/store/dummy-store.hh index e93aad366..d371c4e51 100644 --- a/src/libstore/include/nix/store/dummy-store.hh +++ b/src/libstore/include/nix/store/dummy-store.hh @@ -3,8 +3,12 @@ #include "nix/store/store-api.hh" +#include + namespace nix { +struct DummyStore; + struct DummyStoreConfig : public std::enable_shared_from_this, virtual StoreConfig { DummyStoreConfig(const Params & params) @@ -42,6 +46,11 @@ struct DummyStoreConfig : public std::enable_shared_from_this, return {"dummy"}; } + /** + * Same as `openStore`, just with a more precise return type. + */ + ref openDummyStore() const; + ref openStore() const override; StoreReference getReference() const override diff --git a/src/libstore/include/nix/store/filetransfer.hh b/src/libstore/include/nix/store/filetransfer.hh index 2f2d59036..6419a686e 100644 --- a/src/libstore/include/nix/store/filetransfer.hh +++ b/src/libstore/include/nix/store/filetransfer.hh @@ -11,6 +11,12 @@ #include "nix/util/serialise.hh" #include "nix/util/url.hh" +#include "nix/store/config.hh" +#if NIX_WITH_AWS_AUTH +# include "nix/store/aws-creds.hh" +#endif +#include "nix/store/s3-url.hh" + namespace nix { struct FileTransferSettings : Config @@ -77,32 +83,107 @@ extern FileTransferSettings fileTransferSettings; extern const unsigned int RETRY_TIME_MS_DEFAULT; +/** + * HTTP methods supported by FileTransfer. + */ +enum struct HttpMethod { + GET, + PUT, + HEAD, + POST, + DELETE, +}; + +/** + * Username and optional password for HTTP basic authentication. + * These are used with curl's CURLOPT_USERNAME and CURLOPT_PASSWORD options + * for various protocols including HTTP, FTP, and others. + */ +struct UsernameAuth +{ + std::string username; + std::optional password; +}; + struct FileTransferRequest { - ValidURL uri; + VerbatimURL uri; Headers headers; std::string expectedETag; - bool verifyTLS = true; - bool head = false; - bool post = false; + HttpMethod method = HttpMethod::GET; size_t tries = fileTransferSettings.tries; unsigned int baseRetryTimeMs = RETRY_TIME_MS_DEFAULT; ActivityId parentAct; bool decompress = true; - std::optional data; + + struct UploadData + { + UploadData(StringSource & s) + : sizeHint(s.s.length()) + , source(&s) + { + } + + UploadData(std::size_t sizeHint, RestartableSource & source) + : sizeHint(sizeHint) + , source(&source) + { + } + + std::size_t sizeHint = 0; + RestartableSource * source = nullptr; + }; + + std::optional data; std::string mimeType; std::function dataCallback; + /** + * Optional username and password for HTTP basic authentication. + * When provided, these credentials will be used with curl's CURLOPT_USERNAME/PASSWORD option. + */ + std::optional usernameAuth; +#if NIX_WITH_AWS_AUTH + /** + * Pre-resolved AWS session token for S3 requests. + * When provided along with usernameAuth, this will be used instead of fetching fresh credentials. + */ + std::optional preResolvedAwsSessionToken; +#endif - FileTransferRequest(ValidURL uri) + FileTransferRequest(VerbatimURL uri) : uri(std::move(uri)) , parentAct(getCurActivity()) { } + /** + * Returns the verb root for logging purposes. + * The returned string is intended to be concatenated with "ing" to form the gerund, + * e.g., "download" + "ing" -> "downloading", "upload" + "ing" -> "uploading". + */ std::string verb() const { - return data ? "upload" : "download"; + switch (method) { + case HttpMethod::HEAD: + case HttpMethod::GET: + return "download"; + case HttpMethod::PUT: + case HttpMethod::POST: + assert(data); + return "upload"; + case HttpMethod::DELETE: + return "delet"; + } + unreachable(); } + + void setupForS3(); + +private: + friend struct curlFileTransfer; +#if NIX_WITH_AWS_AUTH + std::optional awsSigV4Provider; +#endif }; struct FileTransferResult @@ -166,6 +247,11 @@ struct FileTransfer */ FileTransferResult upload(const FileTransferRequest & request); + /** + * Synchronously delete a resource. + */ + FileTransferResult deleteResource(const FileTransferRequest & request); + /** * Download a file, writing its data to a sink. The sink will be * invoked on the thread of the caller. diff --git a/src/libstore/include/nix/store/globals.hh b/src/libstore/include/nix/store/globals.hh index 2cd92467c..8aa82c4a2 100644 --- a/src/libstore/include/nix/store/globals.hh +++ b/src/libstore/include/nix/store/globals.hh @@ -77,7 +77,7 @@ public: Settings(); - unsigned int getDefaultCores() const; + static unsigned int getDefaultCores(); Path nixPrefix; @@ -199,7 +199,7 @@ public: --> For instance, in Nixpkgs, if the attribute `enableParallelBuilding` for the `mkDerivation` build helper is set to `true`, it passes the `-j${NIX_BUILD_CORES}` flag to GNU Make. - If set to `0`, nix will detect the number of CPU cores and pass this number via NIX_BUILD_CORES. + If set to `0`, nix will detect the number of CPU cores and pass this number via `NIX_BUILD_CORES`. > **Note** > @@ -427,7 +427,7 @@ public: R"( If set to `true`, Nix instructs [remote build machines](#conf-builders) to use their own [`substituters`](#conf-substituters) if available. - It means that remote build hosts fetches as many dependencies as possible from their own substituters (e.g, from `cache.nixos.org`) instead of waiting for the local machine to upload them all. + It means that remote build hosts fetch as many dependencies as possible from their own substituters (e.g, from `cache.nixos.org`) instead of waiting for the local machine to upload them all. This can drastically reduce build times if the network connection between the local machine and the remote build host is slow. )"}; @@ -503,7 +503,7 @@ public: by the Nix account, its group should be the group specified here, and its mode should be `1775`. - If the build users group is empty, builds areperformed under + If the build users group is empty, builds are performed under the uid of the Nix process (that is, the uid of the caller if `NIX_REMOTE` is empty, the uid under which the Nix daemon runs if `NIX_REMOTE` is `daemon`). Obviously, this should not be used @@ -847,8 +847,8 @@ public: 4. The path to the build's scratch directory. This directory exists only if the build was run with `--keep-failed`. - The stderr and stdout output from the diff hook isn't - displayed to the user. Instead, it print to the nix-daemon's log. + The stderr and stdout output from the diff hook isn't displayed + to the user. Instead, it prints to the nix-daemon's log. When using the Nix daemon, `diff-hook` must be set in the `nix.conf` configuration file, and cannot be passed at the command line. @@ -1372,6 +1372,76 @@ public: Default is 0, which disables the warning. Set it to 1 to warn on all paths. )"}; + + using ExternalBuilders = std::vector; + + Setting externalBuilders{ + this, + {}, + "external-builders", + R"( + Helper programs that execute derivations. + + The program is passed a JSON document that describes the build environment as the final argument. + The JSON document looks like this: + + { + "args": [ + "-e", + "/nix/store/vj1c3wf9…-source-stdenv.sh", + "/nix/store/shkw4qm9…-default-builder.sh" + ], + "builder": "/nix/store/s1qkj0ph…-bash-5.2p37/bin/bash", + "env": { + "HOME": "/homeless-shelter", + "builder": "/nix/store/s1qkj0ph…-bash-5.2p37/bin/bash", + "nativeBuildInputs": "/nix/store/l31j72f1…-version-check-hook", + "out": "/nix/store/2yx2prgx…-hello-2.12.2" + … + }, + "inputPaths": [ + "/nix/store/14dciax3…-glibc-2.32-54-dev", + "/nix/store/1azs5s8z…-gettext-0.21", + … + ], + "outputs": { + "out": "/nix/store/2yx2prgx…-hello-2.12.2" + }, + "realStoreDir": "/nix/store", + "storeDir": "/nix/store", + "system": "aarch64-linux", + "tmpDir": "/private/tmp/nix-build-hello-2.12.2.drv-0/build", + "tmpDirInSandbox": "/build", + "topTmpDir": "/private/tmp/nix-build-hello-2.12.2.drv-0", + "version": 1 + } + )", + {}, // aliases + true, // document default + // NOTE(cole-h): even though we can make the experimental feature required here, the errors + // are not as good (it just becomes a warning if you try to use this setting without the + // experimental feature) + // + // With this commented out: + // + // error: experimental Nix feature 'external-builders' is disabled; add '--extra-experimental-features + // external-builders' to enable it + // + // With this uncommented: + // + // warning: Ignoring setting 'external-builders' because experimental feature 'external-builders' is not enabled + // error: Cannot build '/nix/store/vwsp4qd8…-opentofu-1.10.2.drv'. + // Reason: required system or feature not available + // Required system: 'aarch64-linux' with features {} + // Current system: 'aarch64-darwin' with features {apple-virt, benchmark, big-parallel, nixos-test} + // Xp::ExternalBuilders + }; + + /** + * Finds the first external derivation builder that supports this + * derivation, or else returns a null pointer. + */ + const ExternalBuilder * findExternalDerivationBuilderIfSupported(const Derivation & drv); }; // FIXME: don't use a global variable. diff --git a/src/libstore/include/nix/store/http-binary-cache-store.hh b/src/libstore/include/nix/store/http-binary-cache-store.hh index 4102c858f..b092b5b5e 100644 --- a/src/libstore/include/nix/store/http-binary-cache-store.hh +++ b/src/libstore/include/nix/store/http-binary-cache-store.hh @@ -3,6 +3,10 @@ #include "nix/util/url.hh" #include "nix/store/binary-cache-store.hh" +#include "nix/store/filetransfer.hh" +#include "nix/util/sync.hh" + +#include namespace nix { @@ -17,6 +21,21 @@ struct HttpBinaryCacheStoreConfig : std::enable_shared_from_this narinfoCompression{ + this, "", "narinfo-compression", "Compression method for `.narinfo` files."}; + + const Setting lsCompression{this, "", "ls-compression", "Compression method for `.ls` files."}; + + const Setting logCompression{ + this, + "", + "log-compression", + R"( + Compression method for `log/*` files. It is recommended to + use a compression method supported by most web browsers + (e.g. `brotli`). + )"}; + static const std::string name() { return "HTTP Binary Cache Store"; @@ -31,4 +50,79 @@ struct HttpBinaryCacheStoreConfig : std::enable_shared_from_this _state; + +public: + + using Config = HttpBinaryCacheStoreConfig; + + ref config; + + HttpBinaryCacheStore(ref config); + + void init() override; + +protected: + + std::optional getCompressionMethod(const std::string & path); + + void maybeDisable(); + + void checkEnabled(); + + bool fileExists(const std::string & path) override; + + void upsertFile( + const std::string & path, RestartableSource & source, const std::string & mimeType, uint64_t sizeHint) override; + + FileTransferRequest makeRequest(std::string_view path); + + /** + * Uploads data to the binary cache. + * + * This is a lower-level method that handles the actual upload after + * compression has been applied. It does not handle compression or + * error wrapping - those are the caller's responsibility. + * + * @param path The path in the binary cache to upload to + * @param source The data source (should already be compressed if needed) + * @param sizeHint Size hint for the data + * @param mimeType The MIME type of the content + * @param contentEncoding Optional Content-Encoding header value (e.g., "xz", "br") + */ + void upload( + std::string_view path, + RestartableSource & source, + uint64_t sizeHint, + std::string_view mimeType, + std::optional contentEncoding); + + /** + * Uploads data to the binary cache (CompressedSource overload). + * + * This overload infers both the size and compression method from the CompressedSource. + * + * @param path The path in the binary cache to upload to + * @param source The compressed source (knows size and compression method) + * @param mimeType The MIME type of the content + */ + void upload(std::string_view path, CompressedSource & source, std::string_view mimeType); + + void getFile(const std::string & path, Sink & sink) override; + + void getFile(const std::string & path, Callback> callback) noexcept override; + + std::optional getNixCacheInfo() override; + + std::optional isTrustedClient() override; +}; + } // namespace nix diff --git a/src/libstore/include/nix/store/legacy-ssh-store.hh b/src/libstore/include/nix/store/legacy-ssh-store.hh index 75751e2d1..994918f90 100644 --- a/src/libstore/include/nix/store/legacy-ssh-store.hh +++ b/src/libstore/include/nix/store/legacy-ssh-store.hh @@ -109,7 +109,7 @@ struct LegacySSHStore : public virtual Store unsupported("addToStore"); } - virtual StorePath addToStoreFromDump( + StorePath addToStoreFromDump( Source & dump, std::string_view name, FileSerialisationMethod dumpMethod = FileSerialisationMethod::NixArchive, @@ -121,6 +121,11 @@ struct LegacySSHStore : public virtual Store unsupported("addToStore"); } + void registerDrvOutput(const Realisation & output) override + { + unsupported("registerDrvOutput"); + } + public: BuildResult buildDerivation(const StorePath & drvPath, const BasicDerivation & drv, BuildMode buildMode) override; @@ -203,8 +208,8 @@ public: */ std::optional isTrustedClient() override; - void - queryRealisationUncached(const DrvOutput &, Callback> callback) noexcept override + void queryRealisationUncached( + const DrvOutput &, Callback> callback) noexcept override // TODO: Implement { unsupported("queryRealisation"); diff --git a/src/libstore/include/nix/store/local-fs-store.hh b/src/libstore/include/nix/store/local-fs-store.hh index 08f8e1656..100a4110d 100644 --- a/src/libstore/include/nix/store/local-fs-store.hh +++ b/src/libstore/include/nix/store/local-fs-store.hh @@ -78,7 +78,6 @@ struct LocalFSStore : virtual Store, virtual GcStore, virtual LogStore LocalFSStore(const Config & params); - void narFromPath(const StorePath & path, Sink & sink) override; ref getFSAccessor(bool requireValidPath = true) override; std::shared_ptr getFSAccessor(const StorePath & path, bool requireValidPath = true) override; diff --git a/src/libstore/include/nix/store/local-overlay-store.hh b/src/libstore/include/nix/store/local-overlay-store.hh index b89d0a1a0..1d69d3417 100644 --- a/src/libstore/include/nix/store/local-overlay-store.hh +++ b/src/libstore/include/nix/store/local-overlay-store.hh @@ -173,7 +173,7 @@ private: * Check lower store if upper DB does not have. */ void queryRealisationUncached( - const DrvOutput &, Callback> callback) noexcept override; + const DrvOutput &, Callback> callback) noexcept override; /** * Call `remountIfNecessary` after collecting garbage normally. diff --git a/src/libstore/include/nix/store/local-store.hh b/src/libstore/include/nix/store/local-store.hh index b871aaee2..ab255fba8 100644 --- a/src/libstore/include/nix/store/local-store.hh +++ b/src/libstore/include/nix/store/local-store.hh @@ -385,10 +385,10 @@ public: void cacheDrvOutputMapping( State & state, const uint64_t deriver, const std::string & outputName, const StorePath & output); - std::optional queryRealisation_(State & state, const DrvOutput & id); - std::optional> queryRealisationCore_(State & state, const DrvOutput & id); + std::optional queryRealisation_(State & state, const DrvOutput & id); + std::optional> queryRealisationCore_(State & state, const DrvOutput & id); void queryRealisationUncached( - const DrvOutput &, Callback> callback) noexcept override; + const DrvOutput &, Callback> callback) noexcept override; std::optional getVersion() override; diff --git a/src/libstore/include/nix/store/meson.build b/src/libstore/include/nix/store/meson.build index 428ef00f3..5d6626ff8 100644 --- a/src/libstore/include/nix/store/meson.build +++ b/src/libstore/include/nix/store/meson.build @@ -10,6 +10,7 @@ config_pub_h = configure_file( ) headers = [ config_pub_h ] + files( + 'aws-creds.hh', 'binary-cache-store.hh', 'build-result.hh', 'build/derivation-builder.hh', @@ -17,6 +18,7 @@ headers = [ config_pub_h ] + files( 'build/derivation-building-misc.hh', 'build/derivation-env-desugar.hh', 'build/derivation-goal.hh', + 'build/derivation-resolution-goal.hh', 'build/derivation-trampoline-goal.hh', 'build/drv-output-substitution-goal.hh', 'build/goal.hh', @@ -34,6 +36,7 @@ headers = [ config_pub_h ] + files( 'derived-path-map.hh', 'derived-path.hh', 'downstream-placeholder.hh', + 'dummy-store-impl.hh', 'dummy-store.hh', 'export-import.hh', 'filetransfer.hh', @@ -72,7 +75,7 @@ headers = [ config_pub_h ] + files( 'remote-store.hh', 'restricted-store.hh', 's3-binary-cache-store.hh', - 's3.hh', + 's3-url.hh', 'serve-protocol-connection.hh', 'serve-protocol-impl.hh', 'serve-protocol.hh', diff --git a/src/libstore/include/nix/store/nar-accessor.hh b/src/libstore/include/nix/store/nar-accessor.hh index 0e69d436e..bfba5da73 100644 --- a/src/libstore/include/nix/store/nar-accessor.hh +++ b/src/libstore/include/nix/store/nar-accessor.hh @@ -27,7 +27,12 @@ ref makeNarAccessor(Source & source); */ using GetNarBytes = std::function; -ref makeLazyNarAccessor(const std::string & listing, GetNarBytes getNarBytes); +/** + * The canonical GetNarBytes function for a seekable Source. + */ +GetNarBytes seekableGetNarBytes(const Path & path); + +ref makeLazyNarAccessor(const nlohmann::json & listing, GetNarBytes getNarBytes); /** * Write a JSON representation of the contents of a NAR (except file diff --git a/src/libstore/include/nix/store/parsed-derivations.hh b/src/libstore/include/nix/store/parsed-derivations.hh index edef1b2d2..52e97b0e7 100644 --- a/src/libstore/include/nix/store/parsed-derivations.hh +++ b/src/libstore/include/nix/store/parsed-derivations.hh @@ -18,7 +18,7 @@ struct StructuredAttrs { static constexpr std::string_view envVarName{"__json"}; - nlohmann::json structuredAttrs; + nlohmann::json::object_t structuredAttrs; bool operator==(const StructuredAttrs &) const = default; @@ -45,7 +45,7 @@ struct StructuredAttrs */ static void checkKeyNotInUse(const StringPairs & env); - nlohmann::json prepareStructuredAttrs( + nlohmann::json::object_t prepareStructuredAttrs( Store & store, const DerivationOptions & drvOptions, const StorePathSet & inputPaths, @@ -62,7 +62,7 @@ struct StructuredAttrs * `prepareStructuredAttrs`, *not* the original `structuredAttrs` * field. */ - static std::string writeShell(const nlohmann::json & prepared); + static std::string writeShell(const nlohmann::json::object_t & prepared); }; } // namespace nix diff --git a/src/libstore/include/nix/store/path-references.hh b/src/libstore/include/nix/store/path-references.hh index 66d0da268..6aa506da4 100644 --- a/src/libstore/include/nix/store/path-references.hh +++ b/src/libstore/include/nix/store/path-references.hh @@ -3,6 +3,10 @@ #include "nix/store/references.hh" #include "nix/store/path.hh" +#include "nix/util/source-accessor.hh" + +#include +#include namespace nix { @@ -21,4 +25,57 @@ public: StorePathSet getResultPaths(); }; +/** + * Result of scanning a single file for references. + */ +struct FileRefScanResult +{ + CanonPath filePath; ///< The file that was scanned + StorePathSet foundRefs; ///< Which store paths were found in this file +}; + +/** + * Scan a store path tree and report which references appear in which files. + * + * This is like scanForReferences() but provides per-file granularity. + * Useful for cycle detection and detailed dependency analysis like `nix why-depends --precise`. + * + * The function walks the tree using the provided accessor and streams each file's + * contents through a RefScanSink to detect hash references. For each file that + * contains at least one reference, a callback is invoked with the file path and + * the set of references found. + * + * Note: This function only searches for the hash part of store paths (e.g., + * "dc04vv14dak1c1r48qa0m23vr9jy8sm0"), not the name part. A store path like + * "/nix/store/dc04vv14dak1c1r48qa0m23vr9jy8sm0-foo" will be detected if the + * hash appears anywhere in the scanned content, regardless of the "-foo" suffix. + * + * @param accessor Source accessor to read the tree + * @param rootPath Root path to scan + * @param refs Set of store paths to search for + * @param callback Called for each file that contains at least one reference + */ +void scanForReferencesDeep( + SourceAccessor & accessor, + const CanonPath & rootPath, + const StorePathSet & refs, + std::function callback); + +/** + * Scan a store path tree and return which references appear in which files. + * + * This is a convenience wrapper around the callback-based scanForReferencesDeep() + * that collects all results into a map for efficient lookups. + * + * Note: This function only searches for the hash part of store paths, not the name part. + * See the callback-based overload for details. + * + * @param accessor Source accessor to read the tree + * @param rootPath Root path to scan + * @param refs Set of store paths to search for + * @return Map from file paths to the set of references found in each file + */ +std::map +scanForReferencesDeep(SourceAccessor & accessor, const CanonPath & rootPath, const StorePathSet & refs); + } // namespace nix diff --git a/src/libstore/include/nix/store/realisation.hh b/src/libstore/include/nix/store/realisation.hh index 3424a39c9..e8a71862e 100644 --- a/src/libstore/include/nix/store/realisation.hh +++ b/src/libstore/include/nix/store/realisation.hh @@ -46,12 +46,12 @@ struct DrvOutput static DrvOutput parse(const std::string &); - GENERATE_CMP(DrvOutput, me->drvHash, me->outputName); + bool operator==(const DrvOutput &) const = default; + auto operator<=>(const DrvOutput &) const = default; }; -struct Realisation +struct UnkeyedRealisation { - DrvOutput id; StorePath outPath; StringSet signatures; @@ -64,22 +64,35 @@ struct Realisation */ std::map dependentRealisations; - std::string fingerprint() const; - void sign(const Signer &); - bool checkSignature(const PublicKeys & publicKeys, const std::string & sig) const; - size_t checkSignatures(const PublicKeys & publicKeys) const; + std::string fingerprint(const DrvOutput & key) const; - static std::set closure(Store &, const std::set &); - static void closure(Store &, const std::set &, std::set & res); + void sign(const DrvOutput & key, const Signer &); - bool isCompatibleWith(const Realisation & other) const; + bool checkSignature(const DrvOutput & key, const PublicKeys & publicKeys, const std::string & sig) const; - StorePath getPath() const + size_t checkSignatures(const DrvOutput & key, const PublicKeys & publicKeys) const; + + const StorePath & getPath() const { return outPath; } - GENERATE_CMP(Realisation, me->id, me->outPath); + // TODO sketchy that it avoids signatures + GENERATE_CMP(UnkeyedRealisation, me->outPath); +}; + +struct Realisation : UnkeyedRealisation +{ + DrvOutput id; + + bool isCompatibleWith(const UnkeyedRealisation & other) const; + + static std::set closure(Store &, const std::set &); + + static void closure(Store &, const std::set &, std::set & res); + + bool operator==(const Realisation &) const = default; + auto operator<=>(const Realisation &) const = default; }; /** @@ -103,12 +116,13 @@ struct OpaquePath { StorePath path; - StorePath getPath() const + const StorePath & getPath() const & { return path; } - GENERATE_CMP(OpaquePath, me->path); + bool operator==(const OpaquePath &) const = default; + auto operator<=>(const OpaquePath &) const = default; }; /** @@ -116,7 +130,7 @@ struct OpaquePath */ struct RealisedPath { - /* + /** * A path is either the result of the realisation of a derivation or * an opaque blob that has been directly added to the store */ @@ -138,13 +152,14 @@ struct RealisedPath /** * Get the raw store path associated to this */ - StorePath path() const; + const StorePath & path() const &; void closure(Store & store, Set & ret) const; static void closure(Store & store, const Set & startPaths, Set & ret); Set closure(Store & store) const; - GENERATE_CMP(RealisedPath, me->raw); + bool operator==(const RealisedPath &) const = default; + auto operator<=>(const RealisedPath &) const = default; }; class MissingRealisation : public Error @@ -167,4 +182,5 @@ public: } // namespace nix +JSON_IMPL(nix::UnkeyedRealisation) JSON_IMPL(nix::Realisation) diff --git a/src/libstore/include/nix/store/remote-store.hh b/src/libstore/include/nix/store/remote-store.hh index 1aaf29d37..b152e054b 100644 --- a/src/libstore/include/nix/store/remote-store.hh +++ b/src/libstore/include/nix/store/remote-store.hh @@ -102,7 +102,7 @@ struct RemoteStore : public virtual Store, public virtual GcStore, public virtua void registerDrvOutput(const Realisation & info) override; void queryRealisationUncached( - const DrvOutput &, Callback> callback) noexcept override; + const DrvOutput &, Callback> callback) noexcept override; void buildPaths(const std::vector & paths, BuildMode buildMode, std::shared_ptr evalStore) override; diff --git a/src/libstore/include/nix/store/s3-binary-cache-store.hh b/src/libstore/include/nix/store/s3-binary-cache-store.hh index 2fe66b0ad..81a2d3f3f 100644 --- a/src/libstore/include/nix/store/s3-binary-cache-store.hh +++ b/src/libstore/include/nix/store/s3-binary-cache-store.hh @@ -2,41 +2,28 @@ ///@file #include "nix/store/config.hh" - -#if NIX_WITH_S3_SUPPORT - -# include "nix/store/binary-cache-store.hh" - -# include +#include "nix/store/http-binary-cache-store.hh" namespace nix { -struct S3BinaryCacheStoreConfig : std::enable_shared_from_this, virtual BinaryCacheStoreConfig +struct S3BinaryCacheStoreConfig : HttpBinaryCacheStoreConfig { - std::string bucketName; - - using BinaryCacheStoreConfig::BinaryCacheStoreConfig; + using HttpBinaryCacheStoreConfig::HttpBinaryCacheStoreConfig; S3BinaryCacheStoreConfig(std::string_view uriScheme, std::string_view bucketName, const Params & params); const Setting profile{ this, - "", + "default", "profile", R"( The name of the AWS configuration profile to use. By default Nix uses the `default` profile. )"}; -protected: - - constexpr static const char * defaultRegion = "us-east-1"; - -public: - const Setting region{ this, - defaultRegion, + "us-east-1", "region", R"( The region of the S3 bucket. If your bucket is not in @@ -46,7 +33,7 @@ public: const Setting scheme{ this, - "", + "https", "scheme", R"( The scheme used for S3 requests, `https` (default) or `http`. This @@ -64,74 +51,34 @@ public: "", "endpoint", R"( - The URL of the endpoint of an S3-compatible service such as MinIO. - Do not specify this setting if you're using Amazon S3. + The S3 endpoint to use. When empty (default), uses AWS S3 with + region-specific endpoints (e.g., s3.us-east-1.amazonaws.com). + For S3-compatible services such as MinIO, set this to your service's endpoint. > **Note** > - > This endpoint must support HTTPS and uses path-based + > Custom endpoints must support HTTPS and use path-based > addressing instead of virtual host based addressing. )"}; - const Setting narinfoCompression{ - this, "", "narinfo-compression", "Compression method for `.narinfo` files."}; - - const Setting lsCompression{this, "", "ls-compression", "Compression method for `.ls` files."}; - - const Setting logCompression{ - this, - "", - "log-compression", - R"( - Compression method for `log/*` files. It is recommended to - use a compression method supported by most web browsers - (e.g. `brotli`). - )"}; - - const Setting multipartUpload{this, false, "multipart-upload", "Whether to use multi-part uploads."}; - - const Setting bufferSize{ - this, 5 * 1024 * 1024, "buffer-size", "Size (in bytes) of each part in multi-part uploads."}; + /** + * Set of settings that are part of the S3 URI itself. + * These are needed for region specification and other S3-specific settings. + */ + const std::set s3UriSettings = {&profile, ®ion, &scheme, &endpoint}; static const std::string name() { return "S3 Binary Cache Store"; } - static StringSet uriSchemes() - { - return {"s3"}; - } + static StringSet uriSchemes(); static std::string doc(); + std::string getHumanReadableURI() const override; + ref openStore() const override; - - StoreReference getReference() const override; -}; - -struct S3BinaryCacheStore : virtual BinaryCacheStore -{ - using Config = S3BinaryCacheStoreConfig; - - ref config; - - S3BinaryCacheStore(ref); - - struct Stats - { - std::atomic put{0}; - std::atomic putBytes{0}; - std::atomic putTimeMs{0}; - std::atomic get{0}; - std::atomic getBytes{0}; - std::atomic getTimeMs{0}; - std::atomic head{0}; - }; - - virtual const Stats & getS3Stats() = 0; }; } // namespace nix - -#endif diff --git a/src/libstore/include/nix/store/s3.hh b/src/libstore/include/nix/store/s3-url.hh similarity index 59% rename from src/libstore/include/nix/store/s3.hh rename to src/libstore/include/nix/store/s3-url.hh index 0270eeda6..cf59dbea8 100644 --- a/src/libstore/include/nix/store/s3.hh +++ b/src/libstore/include/nix/store/s3-url.hh @@ -1,53 +1,16 @@ #pragma once ///@file #include "nix/store/config.hh" -#if NIX_WITH_S3_SUPPORT +#include "nix/util/url.hh" +#include "nix/util/util.hh" -# include "nix/util/ref.hh" -# include "nix/util/url.hh" -# include "nix/util/util.hh" - -# include -# include -# include - -namespace Aws { -namespace Client { -struct ClientConfiguration; -} -} // namespace Aws - -namespace Aws { -namespace S3 { -class S3Client; -} -} // namespace Aws +#include +#include +#include +#include namespace nix { -struct S3Helper -{ - ref config; - ref client; - - S3Helper( - const std::string & profile, - const std::string & region, - const std::string & scheme, - const std::string & endpoint); - - ref - makeConfig(const std::string & region, const std::string & scheme, const std::string & endpoint); - - struct FileTransferResult - { - std::optional data; - unsigned int durationMs; - }; - - FileTransferResult getObject(const std::string & bucketName, const std::string & key); -}; - /** * Parsed S3 URL. */ @@ -63,6 +26,7 @@ struct ParsedS3URL std::optional profile; std::optional region; std::optional scheme; + std::optional versionId; /** * The endpoint can be either missing, be an absolute URI (with a scheme like `http:`) * or an authority (so an IP address or a registered name). @@ -90,5 +54,3 @@ struct ParsedS3URL }; } // namespace nix - -#endif diff --git a/src/libstore/include/nix/store/serve-protocol-connection.hh b/src/libstore/include/nix/store/serve-protocol-connection.hh index 873277db9..fa50132c8 100644 --- a/src/libstore/include/nix/store/serve-protocol-connection.hh +++ b/src/libstore/include/nix/store/serve-protocol-connection.hh @@ -82,6 +82,8 @@ struct ServeProto::BasicClientConnection BuildResult getBuildDerivationResponse(const StoreDirConfig & store); void narFromPath(const StoreDirConfig & store, const StorePath & path, std::function fun); + + void importPaths(const StoreDirConfig & store, std::function fun); }; struct ServeProto::BasicServerConnection diff --git a/src/libstore/include/nix/store/serve-protocol.hh b/src/libstore/include/nix/store/serve-protocol.hh index 4c2043f17..974bf42d5 100644 --- a/src/libstore/include/nix/store/serve-protocol.hh +++ b/src/libstore/include/nix/store/serve-protocol.hh @@ -108,6 +108,13 @@ enum struct ServeProto::Command : uint64_t { QueryValidPaths = 1, QueryPathInfos = 2, DumpStorePath = 3, + /** + * @note This is no longer used by Nix (as a client), but it is used + * by Hydra. We should therefore not remove it until Hydra no longer + * uses it either. + */ + ImportPaths = 4, + // ExportPaths = 5, BuildPaths = 6, QueryClosure = 7, BuildDerivation = 8, diff --git a/src/libstore/include/nix/store/store-api.hh b/src/libstore/include/nix/store/store-api.hh index 6d3f6b8d0..8fa13de34 100644 --- a/src/libstore/include/nix/store/store-api.hh +++ b/src/libstore/include/nix/store/store-api.hh @@ -31,6 +31,7 @@ MakeError(SubstituterDisabled, Error); MakeError(InvalidStoreReference, Error); +struct UnkeyedRealisation; struct Realisation; struct RealisedPath; struct DrvOutput; @@ -398,12 +399,12 @@ public: /** * Query the information about a realisation. */ - std::shared_ptr queryRealisation(const DrvOutput &); + std::shared_ptr queryRealisation(const DrvOutput &); /** * Asynchronous version of queryRealisation(). */ - void queryRealisation(const DrvOutput &, Callback> callback) noexcept; + void queryRealisation(const DrvOutput &, Callback> callback) noexcept; /** * Check whether the given valid path info is sufficiently attested, by @@ -430,8 +431,8 @@ protected: virtual void queryPathInfoUncached(const StorePath & path, Callback> callback) noexcept = 0; - virtual void - queryRealisationUncached(const DrvOutput &, Callback> callback) noexcept = 0; + virtual void queryRealisationUncached( + const DrvOutput &, Callback> callback) noexcept = 0; public: @@ -598,10 +599,7 @@ public: * floating-ca derivations and their dependencies as there's no way to * retrieve this information otherwise. */ - virtual void registerDrvOutput(const Realisation & output) - { - unsupported("registerDrvOutput"); - } + virtual void registerDrvOutput(const Realisation & output) = 0; virtual void registerDrvOutput(const Realisation & output, CheckSigsFlag checkSigs) { @@ -611,7 +609,7 @@ public: /** * Write a NAR dump of a store path. */ - virtual void narFromPath(const StorePath & path, Sink & sink) = 0; + virtual void narFromPath(const StorePath & path, Sink & sink); /** * For each path, if it's a derivation, build it. Building a @@ -727,10 +725,28 @@ public: * the Nix store. * * @return nullptr if the store doesn't contain an object at the - * givine path. + * given path. */ virtual std::shared_ptr getFSAccessor(const StorePath & path, bool requireValidPath = true) = 0; + /** + * Get an accessor for the store object or throw an Error if it's invalid or + * doesn't exist. + * + * @throws InvalidPath if the store object doesn't exist or (if requireValidPath = true) is + * invalid. + */ + [[nodiscard]] ref requireStoreObjectAccessor(const StorePath & path, bool requireValidPath = true) + { + auto accessor = getFSAccessor(path, requireValidPath); + if (!accessor) { + throw InvalidPath( + requireValidPath ? "path '%1%' is not a valid store path" : "store path '%1%' does not exist", + printStorePath(path)); + } + return ref{accessor}; + } + /** * Repair the contents of the given path by redownloading it using * a substituter (if available). diff --git a/src/libstore/include/nix/store/store-dir-config.hh b/src/libstore/include/nix/store/store-dir-config.hh index 07cda5c12..34e928182 100644 --- a/src/libstore/include/nix/store/store-dir-config.hh +++ b/src/libstore/include/nix/store/store-dir-config.hh @@ -91,7 +91,7 @@ struct StoreDirConfig std::pair computeStorePath( std::string_view name, const SourcePath & path, - ContentAddressMethod method = FileIngestionMethod::NixArchive, + ContentAddressMethod method = ContentAddressMethod::Raw::NixArchive, HashAlgorithm hashAlgo = HashAlgorithm::SHA256, const StorePathSet & references = {}, PathFilter & filter = defaultPathFilter) const; diff --git a/src/libstore/include/nix/store/uds-remote-store.hh b/src/libstore/include/nix/store/uds-remote-store.hh index fe6e486f4..764e8768a 100644 --- a/src/libstore/include/nix/store/uds-remote-store.hh +++ b/src/libstore/include/nix/store/uds-remote-store.hh @@ -68,7 +68,7 @@ struct UDSRemoteStore : virtual IndirectRootStore, virtual RemoteStore void narFromPath(const StorePath & path, Sink & sink) override { - LocalFSStore::narFromPath(path, sink); + Store::narFromPath(path, sink); } /** diff --git a/src/libstore/include/nix/store/worker-protocol.hh b/src/libstore/include/nix/store/worker-protocol.hh index 29d4828c2..6ae5fdcbc 100644 --- a/src/libstore/include/nix/store/worker-protocol.hh +++ b/src/libstore/include/nix/store/worker-protocol.hh @@ -13,6 +13,7 @@ namespace nix { /* Note: you generally shouldn't change the protocol version. Define a new `WorkerProto::Feature` instead. */ #define PROTOCOL_VERSION (1 << 8 | 38) +#define MINIMUM_PROTOCOL_VERSION (1 << 8 | 18) #define GET_PROTOCOL_MAJOR(x) ((x) & 0xff00) #define GET_PROTOCOL_MINOR(x) ((x) & 0x00ff) @@ -152,6 +153,7 @@ enum struct WorkerProto::Op : uint64_t { AddIndirectRoot = 12, SyncWithGC = 13, FindRoots = 14, + // ExportPath = 16, // removed QueryDeriver = 18, // obsolete SetOptions = 19, CollectGarbage = 20, @@ -161,6 +163,7 @@ enum struct WorkerProto::Op : uint64_t { QueryFailedPaths = 24, ClearFailedPaths = 25, QueryPathInfo = 26, + // ImportPaths = 27, // removed QueryDerivationOutputNames = 28, // obsolete QueryPathFromHashPart = 29, QuerySubstitutablePathInfos = 30, diff --git a/src/libstore/legacy-ssh-store.cc b/src/libstore/legacy-ssh-store.cc index f935de206..3b466c9bb 100644 --- a/src/libstore/legacy-ssh-store.cc +++ b/src/libstore/legacy-ssh-store.cc @@ -241,12 +241,13 @@ void LegacySSHStore::buildPaths( conn->to.flush(); - BuildResult result; - result.status = (BuildResult::Status) readInt(conn->from); - - if (!result.success()) { - conn->from >> result.errorMsg; - throw Error(result.status, result.errorMsg); + auto status = readInt(conn->from); + if (!BuildResult::Success::statusIs(status)) { + BuildResult::Failure failure{ + .status = (BuildResult::Failure::Status) status, + }; + conn->from >> failure.errorMsg; + throw Error(failure.status, std::move(failure.errorMsg)); } } diff --git a/src/libstore/local-binary-cache-store.cc b/src/libstore/local-binary-cache-store.cc index b5e43de68..63730a01b 100644 --- a/src/libstore/local-binary-cache-store.cc +++ b/src/libstore/local-binary-cache-store.cc @@ -54,15 +54,12 @@ protected: bool fileExists(const std::string & path) override; void upsertFile( - const std::string & path, - std::shared_ptr> istream, - const std::string & mimeType) override + const std::string & path, RestartableSource & source, const std::string & mimeType, uint64_t sizeHint) override { auto path2 = config->binaryCacheDir + "/" + path; static std::atomic counter{0}; Path tmp = fmt("%s.tmp.%d.%d", path2, getpid(), ++counter); AutoDelete del(tmp, false); - StreamToSourceAdapter source(istream); writeFile(tmp, source); std::filesystem::rename(tmp, path2); del.cancel(); diff --git a/src/libstore/local-fs-store.cc b/src/libstore/local-fs-store.cc index 28069dcaf..1a38cac3b 100644 --- a/src/libstore/local-fs-store.cc +++ b/src/libstore/local-fs-store.cc @@ -112,13 +112,6 @@ std::shared_ptr LocalFSStore::getFSAccessor(const StorePath & pa return std::make_shared(std::move(absPath)); } -void LocalFSStore::narFromPath(const StorePath & path, Sink & sink) -{ - if (!isValidPath(path)) - throw Error("path '%s' is not valid", printStorePath(path)); - dumpPath(getRealStoreDir() + std::string(printStorePath(path), storeDir.size()), sink); -} - const std::string LocalFSStore::drvsLogDir = "drvs"; std::optional LocalFSStore::getBuildLogExact(const StorePath & path) diff --git a/src/libstore/local-overlay-store.cc b/src/libstore/local-overlay-store.cc index 2b000b3db..f23feb8fb 100644 --- a/src/libstore/local-overlay-store.cc +++ b/src/libstore/local-overlay-store.cc @@ -77,7 +77,7 @@ void LocalOverlayStore::registerDrvOutput(const Realisation & info) // First do queryRealisation on lower layer to populate DB auto res = lowerStore->queryRealisation(info.id); if (res) - LocalStore::registerDrvOutput(*res); + LocalStore::registerDrvOutput({*res, info.id}); LocalStore::registerDrvOutput(info); } @@ -108,12 +108,12 @@ void LocalOverlayStore::queryPathInfoUncached( } void LocalOverlayStore::queryRealisationUncached( - const DrvOutput & drvOutput, Callback> callback) noexcept + const DrvOutput & drvOutput, Callback> callback) noexcept { auto callbackPtr = std::make_shared(std::move(callback)); LocalStore::queryRealisationUncached( - drvOutput, {[this, drvOutput, callbackPtr](std::future> fut) { + drvOutput, {[this, drvOutput, callbackPtr](std::future> fut) { try { auto info = fut.get(); if (info) @@ -123,7 +123,7 @@ void LocalOverlayStore::queryRealisationUncached( } // If we don't have it, check lower store lowerStore->queryRealisation( - drvOutput, {[callbackPtr](std::future> fut) { + drvOutput, {[callbackPtr](std::future> fut) { try { (*callbackPtr)(fut.get()); } catch (...) { diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc index 4cadf5282..3f108f97e 100644 --- a/src/libstore/local-store.cc +++ b/src/libstore/local-store.cc @@ -997,7 +997,7 @@ void LocalStore::registerValidPaths(const ValidPathInfos & infos) }}, {[&](const StorePath & path, const StorePath & parent) { return BuildError( - BuildResult::OutputRejected, + BuildResult::Failure::OutputRejected, "cycle detected in the references of '%s' from '%s'", printStorePath(path), printStorePath(parent)); @@ -1036,7 +1036,7 @@ bool LocalStore::pathInfoIsUntrusted(const ValidPathInfo & info) bool LocalStore::realisationIsUntrusted(const Realisation & realisation) { - return config->requireSigs && !realisation.checkSignatures(getPublicKeys()); + return config->requireSigs && !realisation.checkSignatures(realisation.id, getPublicKeys()); } void LocalStore::addToStore(const ValidPathInfo & info, Source & source, RepairFlag repair, CheckSigsFlag checkSigs) @@ -1048,15 +1048,13 @@ void LocalStore::addToStore(const ValidPathInfo & info, Source & source, RepairF /* In case we are not interested in reading the NAR: discard it. */ bool narRead = false; Finally cleanup = [&]() { - if (!narRead) { - NullFileSystemObjectSink sink; + if (!narRead) try { - parseDump(sink, source); + source.skip(info.narSize); } catch (...) { // TODO: should Interrupted be handled here? ignoreExceptionInDestructor(); } - } }; addTempRoot(info.path); @@ -1383,7 +1381,7 @@ bool LocalStore::verifyStore(bool checkContents, RepairFlag repair) for (auto & link : DirectoryIterator{linksDir}) { checkInterrupt(); auto name = link.path().filename(); - printMsg(lvlTalkative, "checking contents of '%s'", name); + printMsg(lvlTalkative, "checking contents of %s", name); PosixSourceAccessor accessor; std::string hash = hashPath( PosixSourceAccessor::createAtRoot(link.path()), @@ -1391,10 +1389,10 @@ bool LocalStore::verifyStore(bool checkContents, RepairFlag repair) HashAlgorithm::SHA256) .first.to_string(HashFormat::Nix32, false); if (hash != name.string()) { - printError("link '%s' was modified! expected hash '%s', got '%s'", link.path(), name, hash); + printError("link %s was modified! expected hash %s, got '%s'", link.path(), name, hash); if (repair) { std::filesystem::remove(link.path()); - printInfo("removed link '%s'", link.path()); + printInfo("removed link %s", link.path()); } else { errors = true; } @@ -1586,7 +1584,7 @@ void LocalStore::addSignatures(const StorePath & storePath, const StringSet & si }); } -std::optional> +std::optional> LocalStore::queryRealisationCore_(LocalStore::State & state, const DrvOutput & id) { auto useQueryRealisedOutput(state.stmts->QueryRealisedOutput.use()(id.strHash())(id.outputName)); @@ -1598,14 +1596,13 @@ LocalStore::queryRealisationCore_(LocalStore::State & state, const DrvOutput & i return { {realisationDbId, - Realisation{ - .id = id, + UnkeyedRealisation{ .outPath = outputPath, .signatures = signatures, }}}; } -std::optional LocalStore::queryRealisation_(LocalStore::State & state, const DrvOutput & id) +std::optional LocalStore::queryRealisation_(LocalStore::State & state, const DrvOutput & id) { auto maybeCore = queryRealisationCore_(state, id); if (!maybeCore) @@ -1631,13 +1628,13 @@ std::optional LocalStore::queryRealisation_(LocalStore::State } void LocalStore::queryRealisationUncached( - const DrvOutput & id, Callback> callback) noexcept + const DrvOutput & id, Callback> callback) noexcept { try { - auto maybeRealisation = - retrySQLite>([&]() { return queryRealisation_(*_state->lock(), id); }); + auto maybeRealisation = retrySQLite>( + [&]() { return queryRealisation_(*_state->lock(), id); }); if (maybeRealisation) - callback(std::make_shared(maybeRealisation.value())); + callback(std::make_shared(maybeRealisation.value())); else callback(nullptr); diff --git a/src/libstore/meson.build b/src/libstore/meson.build index e3004ebf5..d1b3666cc 100644 --- a/src/libstore/meson.build +++ b/src/libstore/meson.build @@ -113,7 +113,7 @@ boost = dependency( # put in `deps_other`. deps_other += boost -curl = dependency('libcurl', 'curl') +curl = dependency('libcurl', 'curl', version : '>= 7.75.0') deps_private += curl # seccomp only makes sense on Linux @@ -142,27 +142,16 @@ deps_public += nlohmann_json sqlite = dependency('sqlite3', 'sqlite', version : '>=3.6.19') deps_private += sqlite -# AWS C++ SDK has bad pkg-config. See -# https://github.com/aws/aws-sdk-cpp/issues/2673 for details. -aws_s3 = dependency('aws-cpp-sdk-s3', required : false) -# The S3 store definitions in the header will be hidden based on this variables. -configdata_pub.set('NIX_WITH_S3_SUPPORT', aws_s3.found().to_int()) -if aws_s3.found() - aws_s3 = declare_dependency( - include_directories : include_directories(aws_s3.get_variable('includedir')), - link_args : [ - '-L' + aws_s3.get_variable('libdir'), - '-laws-cpp-sdk-transfer', - '-laws-cpp-sdk-s3', - '-laws-cpp-sdk-identity-management', - '-laws-cpp-sdk-cognito-identity', - '-laws-cpp-sdk-sts', - '-laws-cpp-sdk-core', - '-laws-crt-cpp', - ], - ).as_system('system') +s3_aws_auth = get_option('s3-aws-auth') +aws_crt_cpp = cxx.find_library('aws-crt-cpp', required : s3_aws_auth) + +if s3_aws_auth.enabled() + deps_other += aws_crt_cpp + aws_c_common = cxx.find_library('aws-c-common', required : true) + deps_other += aws_c_common endif -deps_other += aws_s3 + +configdata_pub.set('NIX_WITH_AWS_AUTH', s3_aws_auth.enabled().to_int()) subdir('nix-meson-build-support/generate-header') @@ -265,15 +254,16 @@ config_priv_h = configure_file( ) subdir('nix-meson-build-support/common') -subdir('nix-meson-build-support/asan-options') sources = files( 'binary-cache-store.cc', 'build-result.cc', + 'build/derivation-builder.cc', 'build/derivation-building-goal.cc', 'build/derivation-check.cc', 'build/derivation-env-desugar.cc', 'build/derivation-goal.cc', + 'build/derivation-resolution-goal.cc', 'build/derivation-trampoline-goal.cc', 'build/drv-output-substitution-goal.cc', 'build/entry-points.cc', @@ -329,7 +319,7 @@ sources = files( 'remote-store.cc', 'restricted-store.cc', 's3-binary-cache-store.cc', - 's3.cc', + 's3-url.cc', 'serve-protocol-connection.cc', 'serve-protocol.cc', 'sqlite.cc', @@ -344,6 +334,11 @@ sources = files( 'worker-protocol.cc', ) +# AWS credentials code requires AWS CRT, so only compile when enabled +if s3_aws_auth.enabled() + sources += files('aws-creds.cc') +endif + subdir('include/nix/store') if host_machine.system() == 'linux' diff --git a/src/libstore/meson.options b/src/libstore/meson.options index b8414068d..c822133df 100644 --- a/src/libstore/meson.options +++ b/src/libstore/meson.options @@ -33,3 +33,9 @@ option( value : '/nix/var/log/nix', description : 'path to store logs in for Nix', ) + +option( + 's3-aws-auth', + type : 'feature', + description : 'build support for AWS authentication with S3', +) diff --git a/src/libstore/misc.cc b/src/libstore/misc.cc index c5e1747c1..8b2a7287e 100644 --- a/src/libstore/misc.cc +++ b/src/libstore/misc.cc @@ -126,13 +126,13 @@ MissingPaths Store::queryMissing(const std::vector & targets) std::function doPath; - std::function, const DerivedPathMap::ChildNode &)> enqueueDerivedPaths; - - enqueueDerivedPaths = [&](ref inputDrv, const DerivedPathMap::ChildNode & inputNode) { + auto enqueueDerivedPaths = [&](this auto self, + ref inputDrv, + const DerivedPathMap::ChildNode & inputNode) -> void { if (!inputNode.value.empty()) pool.enqueue(std::bind(doPath, DerivedPath::Built{inputDrv, inputNode.value})); for (const auto & [outputName, childNode] : inputNode.childMap) - enqueueDerivedPaths(make_ref(SingleDerivedPath::Built{inputDrv, outputName}), childNode); + self(make_ref(SingleDerivedPath::Built{inputDrv, outputName}), childNode); }; auto mustBuildDrv = [&](const StorePath & drvPath, const Derivation & drv) { @@ -322,7 +322,7 @@ StorePaths Store::topoSortPaths(const StorePathSet & paths) }}, {[&](const StorePath & path, const StorePath & parent) { return BuildError( - BuildResult::OutputRejected, + BuildResult::Failure::OutputRejected, "cycle detected in the references of '%s' from '%s'", printStorePath(path), printStorePath(parent)); @@ -350,9 +350,9 @@ drvOutputReferences(Store & store, const Derivation & drv, const StorePath & out std::set inputRealisations; - std::function::ChildNode &)> accumRealisations; - - accumRealisations = [&](const StorePath & inputDrv, const DerivedPathMap::ChildNode & inputNode) { + auto accumRealisations = [&](this auto & self, + const StorePath & inputDrv, + const DerivedPathMap::ChildNode & inputNode) -> void { if (!inputNode.value.empty()) { auto outputHashes = staticOutputHashes(evalStore, evalStore.readDerivation(inputDrv)); for (const auto & outputName : inputNode.value) { @@ -360,18 +360,19 @@ drvOutputReferences(Store & store, const Derivation & drv, const StorePath & out if (!outputHash) throw Error( "output '%s' of derivation '%s' isn't realised", outputName, store.printStorePath(inputDrv)); - auto thisRealisation = store.queryRealisation(DrvOutput{*outputHash, outputName}); + DrvOutput key{*outputHash, outputName}; + auto thisRealisation = store.queryRealisation(key); if (!thisRealisation) throw Error( "output '%s' of derivation '%s' isn’t built", outputName, store.printStorePath(inputDrv)); - inputRealisations.insert(*thisRealisation); + inputRealisations.insert({*thisRealisation, std::move(key)}); } } if (!inputNode.value.empty()) { auto d = makeConstantStorePathRef(inputDrv); for (const auto & [outputName, childNode] : inputNode.childMap) { SingleDerivedPath next = SingleDerivedPath::Built{d, outputName}; - accumRealisations( + self( // TODO deep resolutions for dynamic derivations, issue #8947, would go here. resolveDerivedPath(store, next, evalStore_), childNode); diff --git a/src/libstore/nar-accessor.cc b/src/libstore/nar-accessor.cc index 63fe774c9..640b77540 100644 --- a/src/libstore/nar-accessor.cc +++ b/src/libstore/nar-accessor.cc @@ -141,14 +141,10 @@ struct NarAccessor : public SourceAccessor parseDump(indexer, indexer); } - NarAccessor(const std::string & listing, GetNarBytes getNarBytes) + NarAccessor(const nlohmann::json & listing, GetNarBytes getNarBytes) : getNarBytes(getNarBytes) { - using json = nlohmann::json; - - std::function recurse; - - recurse = [&](NarMember & member, json & v) { + [&](this const auto & recurse, NarMember & member, const nlohmann::json & v) -> void { std::string type = v["type"]; if (type == "directory") { @@ -167,10 +163,7 @@ struct NarAccessor : public SourceAccessor member.target = v.value("target", ""); } else return; - }; - - json v = json::parse(listing); - recurse(root, v); + }(root, listing); } NarMember * find(const CanonPath & path) @@ -251,11 +244,34 @@ ref makeNarAccessor(Source & source) return make_ref(source); } -ref makeLazyNarAccessor(const std::string & listing, GetNarBytes getNarBytes) +ref makeLazyNarAccessor(const nlohmann::json & listing, GetNarBytes getNarBytes) { return make_ref(listing, getNarBytes); } +GetNarBytes seekableGetNarBytes(const Path & path) +{ + return [path](uint64_t offset, uint64_t length) { + AutoCloseFD fd = toDescriptor(open( + path.c_str(), + O_RDONLY +#ifndef _WIN32 + | O_CLOEXEC +#endif + )); + if (!fd) + throw SysError("opening NAR cache file '%s'", path); + + if (lseek(fromDescriptorReadOnly(fd.get()), offset, SEEK_SET) != (off_t) offset) + throw SysError("seeking in '%s'", path); + + std::string buf(length, 0); + readFull(fd.get(), buf.data(), length); + + return buf; + }; +} + using nlohmann::json; json listNar(ref accessor, const CanonPath & path, bool recurse) diff --git a/src/libstore/nar-info.cc b/src/libstore/nar-info.cc index 1e7c48287..6f1abb273 100644 --- a/src/libstore/nar-info.cc +++ b/src/libstore/nar-info.cc @@ -159,17 +159,19 @@ NarInfo NarInfo::fromJSON(const StoreDirConfig & store, const StorePath & path, UnkeyedValidPathInfo::fromJSON(store, json), }}; + auto & obj = getObject(json); + if (json.contains("url")) - res.url = getString(valueAt(json, "url")); + res.url = getString(valueAt(obj, "url")); if (json.contains("compression")) - res.compression = getString(valueAt(json, "compression")); + res.compression = getString(valueAt(obj, "compression")); if (json.contains("downloadHash")) - res.fileHash = Hash::parseAny(getString(valueAt(json, "downloadHash")), std::nullopt); + res.fileHash = Hash::parseAny(getString(valueAt(obj, "downloadHash")), std::nullopt); if (json.contains("downloadSize")) - res.fileSize = getUnsigned(valueAt(json, "downloadSize")); + res.fileSize = getUnsigned(valueAt(obj, "downloadSize")); return res; } diff --git a/src/libstore/optimise-store.cc b/src/libstore/optimise-store.cc index 1cf28e022..3e02fa812 100644 --- a/src/libstore/optimise-store.cc +++ b/src/libstore/optimise-store.cc @@ -202,7 +202,7 @@ void LocalStore::optimisePath_( full. When that happens, it's fine to ignore it: we just effectively disable deduplication of this file. */ - printInfo("cannot link '%s' to '%s': %s", linkPath, path, strerror(errno)); + printInfo("cannot link %s to '%s': %s", linkPath, path, strerror(errno)); return; } @@ -216,11 +216,11 @@ void LocalStore::optimisePath_( auto stLink = lstat(linkPath.string()); if (st.st_ino == stLink.st_ino) { - debug("'%1%' is already linked to '%2%'", path, linkPath); + debug("'%1%' is already linked to %2%", path, linkPath); return; } - printMsg(lvlTalkative, "linking '%1%' to '%2%'", path, linkPath); + printMsg(lvlTalkative, "linking '%1%' to %2%", path, linkPath); /* Make the containing directory writable, but only if it's not the store itself (we don't want or need to mess with its @@ -245,7 +245,7 @@ void LocalStore::optimisePath_( systems). This is likely to happen with empty files. Just shrug and ignore. */ if (st.st_size) - printInfo("'%1%' has maximum number of links", linkPath); + printInfo("%1% has maximum number of links", linkPath); return; } throw; @@ -256,13 +256,13 @@ void LocalStore::optimisePath_( std::filesystem::rename(tempLink, path); } catch (std::filesystem::filesystem_error & e) { std::filesystem::remove(tempLink); - printError("unable to unlink '%1%'", tempLink); + printError("unable to unlink %1%", tempLink); if (e.code() == std::errc::too_many_links) { /* Some filesystems generate too many links on the rename, rather than on the original link. (Probably it temporarily increases the st_nlink field before decreasing it again.) */ - debug("'%s' has reached maximum number of links", linkPath); + debug("%s has reached maximum number of links", linkPath); return; } throw; @@ -312,7 +312,7 @@ void LocalStore::optimiseStore() optimiseStore(stats); - printInfo("%s freed by hard-linking %d files", showBytes(stats.bytesFreed), stats.filesLinked); + printInfo("%s freed by hard-linking %d files", renderSize(stats.bytesFreed), stats.filesLinked); } void LocalStore::optimisePath(const Path & path, RepairFlag repair) diff --git a/src/libstore/outputs-spec.cc b/src/libstore/outputs-spec.cc index aacc964cd..622df5fc3 100644 --- a/src/libstore/outputs-spec.cc +++ b/src/libstore/outputs-spec.cc @@ -1,10 +1,10 @@ -#include #include +#include +#include "nix/store/path.hh" +#include "nix/store/store-dir-config.hh" #include "nix/util/util.hh" -#include "nix/util/regex-combinators.hh" #include "nix/store/outputs-spec.hh" -#include "nix/store/path-regex.hh" #include "nix/util/strings-inline.hh" namespace nix { @@ -19,31 +19,27 @@ bool OutputsSpec::contains(const std::string & outputName) const raw); } -static std::string outputSpecRegexStr = regex::either(regex::group(R"(\*)"), regex::group(regex::list(nameRegexStr))); - std::optional OutputsSpec::parseOpt(std::string_view s) { - static std::regex regex(std::string{outputSpecRegexStr}); - - std::cmatch match; - if (!std::regex_match(s.cbegin(), s.cend(), match, regex)) + try { + return parse(s); + } catch (BadStorePathName &) { return std::nullopt; - - if (match[1].matched) - return {OutputsSpec::All{}}; - - if (match[2].matched) - return OutputsSpec::Names{tokenizeString({match[2].first, match[2].second}, ",")}; - - assert(false); + } } OutputsSpec OutputsSpec::parse(std::string_view s) { - std::optional spec = parseOpt(s); - if (!spec) - throw Error("invalid outputs specifier '%s'", s); - return std::move(*spec); + using namespace std::string_view_literals; + + if (s == "*"sv) + return OutputsSpec::All{}; + + auto names = splitString(s, ","); + for (const auto & name : names) + checkName(name); + + return OutputsSpec::Names{std::move(names)}; } std::optional> ExtendedOutputsSpec::parseOpt(std::string_view s) diff --git a/src/libstore/package.nix b/src/libstore/package.nix index d890d2256..b451b4041 100644 --- a/src/libstore/package.nix +++ b/src/libstore/package.nix @@ -9,7 +9,7 @@ nix-util, boost, curl, - aws-sdk-cpp, + aws-crt-cpp, libseccomp, nlohmann_json, sqlite, @@ -64,7 +64,7 @@ mkMesonLibrary (finalAttrs: { sqlite ] ++ lib.optional stdenv.hostPlatform.isLinux libseccomp - ++ lib.optional withAWS aws-sdk-cpp; + ++ lib.optional withAWS aws-crt-cpp; propagatedBuildInputs = [ nix-util @@ -74,6 +74,7 @@ mkMesonLibrary (finalAttrs: { mesonFlags = [ (lib.mesonEnable "seccomp-sandboxing" stdenv.hostPlatform.isLinux) (lib.mesonBool "embedded-sandbox-shell" embeddedSandboxShell) + (lib.mesonEnable "s3-aws-auth" withAWS) ] ++ lib.optionals stdenv.hostPlatform.isLinux [ (lib.mesonOption "sandbox-shell" "${busybox-sandbox-shell}/bin/busybox") diff --git a/src/libstore/parsed-derivations.cc b/src/libstore/parsed-derivations.cc index 9e8d44d6e..8d147f65f 100644 --- a/src/libstore/parsed-derivations.cc +++ b/src/libstore/parsed-derivations.cc @@ -33,7 +33,8 @@ std::optional StructuredAttrs::tryExtract(StringPairs & env) std::pair StructuredAttrs::unparse() const { - return {envVarName, structuredAttrs.dump()}; + // TODO don't copy the JSON object just to dump it. + return {envVarName, static_cast(structuredAttrs).dump()}; } void StructuredAttrs::checkKeyNotInUse(const StringPairs & env) @@ -97,7 +98,7 @@ static nlohmann::json pathInfoToJSON(Store & store, const StorePathSet & storePa return jsonList; } -nlohmann::json StructuredAttrs::prepareStructuredAttrs( +nlohmann::json::object_t StructuredAttrs::prepareStructuredAttrs( Store & store, const DerivationOptions & drvOptions, const StorePathSet & inputPaths, @@ -120,7 +121,7 @@ nlohmann::json StructuredAttrs::prepareStructuredAttrs( return json; } -std::string StructuredAttrs::writeShell(const nlohmann::json & json) +std::string StructuredAttrs::writeShell(const nlohmann::json::object_t & json) { auto handleSimpleType = [](const nlohmann::json & value) -> std::optional { @@ -144,7 +145,7 @@ std::string StructuredAttrs::writeShell(const nlohmann::json & json) std::string jsonSh; - for (auto & [key, value] : json.items()) { + for (auto & [key, value] : json) { if (!std::regex_match(key, shVarName)) continue; diff --git a/src/libstore/path-info.cc b/src/libstore/path-info.cc index 270c532bb..09a78a4ad 100644 --- a/src/libstore/path-info.cc +++ b/src/libstore/path-info.cc @@ -203,23 +203,23 @@ UnkeyedValidPathInfo UnkeyedValidPathInfo::fromJSON(const StoreDirConfig & store // New format as this as nullable but mandatory field; handling // missing is for back-compat. - if (json.contains("ca")) - if (auto * rawCa = getNullable(valueAt(json, "ca"))) + if (auto * rawCa0 = optionalValueAt(json, "ca")) + if (auto * rawCa = getNullable(*rawCa0)) res.ca = ContentAddress::parse(getString(*rawCa)); - if (json.contains("deriver")) - if (auto * rawDeriver = getNullable(valueAt(json, "deriver"))) + if (auto * rawDeriver0 = optionalValueAt(json, "deriver")) + if (auto * rawDeriver = getNullable(*rawDeriver0)) res.deriver = store.parseStorePath(getString(*rawDeriver)); - if (json.contains("registrationTime")) - if (auto * rawRegistrationTime = getNullable(valueAt(json, "registrationTime"))) + if (auto * rawRegistrationTime0 = optionalValueAt(json, "registrationTime")) + if (auto * rawRegistrationTime = getNullable(*rawRegistrationTime0)) res.registrationTime = getInteger(*rawRegistrationTime); - if (json.contains("ultimate")) - res.ultimate = getBoolean(valueAt(json, "ultimate")); + if (auto * rawUltimate = optionalValueAt(json, "ultimate")) + res.ultimate = getBoolean(*rawUltimate); - if (json.contains("signatures")) - res.sigs = getStringSet(valueAt(json, "signatures")); + if (auto * rawSignatures = optionalValueAt(json, "signatures")) + res.sigs = getStringSet(*rawSignatures); return res; } diff --git a/src/libstore/path-references.cc b/src/libstore/path-references.cc index 8b167e902..3d783bbe4 100644 --- a/src/libstore/path-references.cc +++ b/src/libstore/path-references.cc @@ -1,11 +1,15 @@ #include "nix/store/path-references.hh" #include "nix/util/hash.hh" #include "nix/util/archive.hh" +#include "nix/util/source-accessor.hh" +#include "nix/util/canon-path.hh" +#include "nix/util/logging.hh" #include #include #include #include +#include namespace nix { @@ -54,4 +58,90 @@ StorePathSet scanForReferences(Sink & toTee, const Path & path, const StorePathS return refsSink.getResultPaths(); } +void scanForReferencesDeep( + SourceAccessor & accessor, + const CanonPath & rootPath, + const StorePathSet & refs, + std::function callback) +{ + // Recursive tree walker + auto walk = [&](this auto & self, const CanonPath & path) -> void { + auto stat = accessor.lstat(path); + + switch (stat.type) { + case SourceAccessor::tRegular: { + // Create a fresh sink for each file to independently detect references. + // RefScanSink accumulates found hashes globally - once a hash is found, + // it remains in the result set. If we reused the same sink across files, + // we couldn't distinguish which files contain which references, as a hash + // found in an earlier file wouldn't be reported when found in later files. + PathRefScanSink sink = PathRefScanSink::fromPaths(refs); + + // Scan this file by streaming its contents through the sink + accessor.readFile(path, sink); + + // Get the references found in this file + auto foundRefs = sink.getResultPaths(); + + // Report if we found anything in this file + if (!foundRefs.empty()) { + debug("scanForReferencesDeep: found %d references in %s", foundRefs.size(), path.abs()); + callback(FileRefScanResult{.filePath = path, .foundRefs = std::move(foundRefs)}); + } + break; + } + + case SourceAccessor::tDirectory: { + // Recursively scan directory contents + auto entries = accessor.readDirectory(path); + for (const auto & [name, entryType] : entries) { + self(path / name); + } + break; + } + + case SourceAccessor::tSymlink: { + // Create a fresh sink for the symlink target (same reason as regular files) + PathRefScanSink sink = PathRefScanSink::fromPaths(refs); + + // Scan symlink target for references + auto target = accessor.readLink(path); + sink(std::string_view(target)); + + // Get the references found in this symlink target + auto foundRefs = sink.getResultPaths(); + + if (!foundRefs.empty()) { + debug("scanForReferencesDeep: found %d references in symlink %s", foundRefs.size(), path.abs()); + callback(FileRefScanResult{.filePath = path, .foundRefs = std::move(foundRefs)}); + } + break; + } + + case SourceAccessor::tChar: + case SourceAccessor::tBlock: + case SourceAccessor::tSocket: + case SourceAccessor::tFifo: + case SourceAccessor::tUnknown: + default: + throw Error("file '%s' has an unsupported type", path.abs()); + } + }; + + // Start the recursive walk from the root + walk(rootPath); +} + +std::map +scanForReferencesDeep(SourceAccessor & accessor, const CanonPath & rootPath, const StorePathSet & refs) +{ + std::map results; + + scanForReferencesDeep(accessor, rootPath, refs, [&](FileRefScanResult result) { + results[std::move(result.filePath)] = std::move(result.foundRefs); + }); + + return results; +} + } // namespace nix diff --git a/src/libstore/posix-fs-canonicalise.cc b/src/libstore/posix-fs-canonicalise.cc index b6a64e65b..a274468c3 100644 --- a/src/libstore/posix-fs-canonicalise.cc +++ b/src/libstore/posix-fs-canonicalise.cc @@ -98,7 +98,7 @@ static void canonicalisePathMetaData_( (i.e. "touch $out/foo; ln $out/foo $out/bar"). */ if (uidRange && (st.st_uid < uidRange->first || st.st_uid > uidRange->second)) { if (S_ISDIR(st.st_mode) || !inodesSeen.count(Inode(st.st_dev, st.st_ino))) - throw BuildError(BuildResult::OutputRejected, "invalid ownership on file '%1%'", path); + throw BuildError(BuildResult::Failure::OutputRejected, "invalid ownership on file '%1%'", path); mode_t mode = st.st_mode & ~S_IFMT; assert( S_ISLNK(st.st_mode) diff --git a/src/libstore/realisation.cc b/src/libstore/realisation.cc index febd67bd2..a7f3b98d6 100644 --- a/src/libstore/realisation.cc +++ b/src/libstore/realisation.cc @@ -39,7 +39,7 @@ void Realisation::closure(Store & store, const std::set & startOutp std::set res; for (auto & [currentDep, _] : current.dependentRealisations) { if (auto currentRealisation = store.queryRealisation(currentDep)) - res.insert(*currentRealisation); + res.insert({*currentRealisation, currentDep}); else throw Error("Unrealised derivation '%s'", currentDep.to_string()); } @@ -61,24 +61,25 @@ void Realisation::closure(Store & store, const std::set & startOutp }); } -std::string Realisation::fingerprint() const +std::string UnkeyedRealisation::fingerprint(const DrvOutput & key) const { - nlohmann::json serialized = *this; + nlohmann::json serialized = Realisation{*this, key}; serialized.erase("signatures"); return serialized.dump(); } -void Realisation::sign(const Signer & signer) +void UnkeyedRealisation::sign(const DrvOutput & key, const Signer & signer) { - signatures.insert(signer.signDetached(fingerprint())); + signatures.insert(signer.signDetached(fingerprint(key))); } -bool Realisation::checkSignature(const PublicKeys & publicKeys, const std::string & sig) const +bool UnkeyedRealisation::checkSignature( + const DrvOutput & key, const PublicKeys & publicKeys, const std::string & sig) const { - return verifyDetached(fingerprint(), sig, publicKeys); + return verifyDetached(fingerprint(key), sig, publicKeys); } -size_t Realisation::checkSignatures(const PublicKeys & publicKeys) const +size_t UnkeyedRealisation::checkSignatures(const DrvOutput & key, const PublicKeys & publicKeys) const { // FIXME: Maybe we should return `maxSigs` if the realisation corresponds to // an input-addressed one − because in that case the drv is enough to check @@ -86,19 +87,18 @@ size_t Realisation::checkSignatures(const PublicKeys & publicKeys) const size_t good = 0; for (auto & sig : signatures) - if (checkSignature(publicKeys, sig)) + if (checkSignature(key, publicKeys, sig)) good++; return good; } -StorePath RealisedPath::path() const +const StorePath & RealisedPath::path() const & { - return std::visit([](auto && arg) { return arg.getPath(); }, raw); + return std::visit([](auto & arg) -> auto & { return arg.getPath(); }, raw); } -bool Realisation::isCompatibleWith(const Realisation & other) const +bool Realisation::isCompatibleWith(const UnkeyedRealisation & other) const { - assert(id == other.id); if (outPath == other.outPath) { if (dependentRealisations.empty() != other.dependentRealisations.empty()) { warn( @@ -144,7 +144,7 @@ namespace nlohmann { using namespace nix; -Realisation adl_serializer::from_json(const json & json0) +UnkeyedRealisation adl_serializer::from_json(const json & json0) { auto json = getObject(json0); @@ -157,25 +157,39 @@ Realisation adl_serializer::from_json(const json & json0) for (auto & [jsonDepId, jsonDepOutPath] : getObject(*jsonDependencies)) dependentRealisations.insert({DrvOutput::parse(jsonDepId), jsonDepOutPath}); - return Realisation{ - .id = DrvOutput::parse(valueAt(json, "id")), + return UnkeyedRealisation{ .outPath = valueAt(json, "outPath"), .signatures = signatures, .dependentRealisations = dependentRealisations, }; } -void adl_serializer::to_json(json & json, const Realisation & r) +void adl_serializer::to_json(json & json, const UnkeyedRealisation & r) { auto jsonDependentRealisations = nlohmann::json::object(); for (auto & [depId, depOutPath] : r.dependentRealisations) jsonDependentRealisations.emplace(depId.to_string(), depOutPath); json = { - {"id", r.id.to_string()}, {"outPath", r.outPath}, {"signatures", r.signatures}, {"dependentRealisations", jsonDependentRealisations}, }; } +Realisation adl_serializer::from_json(const json & json0) +{ + auto json = getObject(json0); + + return Realisation{ + static_cast(json0), + DrvOutput::parse(valueAt(json, "id")), + }; +} + +void adl_serializer::to_json(json & json, const Realisation & r) +{ + json = static_cast(r); + json["id"] = r.id.to_string(); +} + } // namespace nlohmann diff --git a/src/libstore/remote-fs-accessor.cc b/src/libstore/remote-fs-accessor.cc index e6715cbdf..f7ca28ae2 100644 --- a/src/libstore/remote-fs-accessor.cc +++ b/src/libstore/remote-fs-accessor.cc @@ -70,26 +70,8 @@ std::shared_ptr RemoteFSAccessor::accessObject(const StorePath & try { listing = nix::readFile(makeCacheFile(storePath.hashPart(), "ls")); - - auto narAccessor = makeLazyNarAccessor(listing, [cacheFile](uint64_t offset, uint64_t length) { - AutoCloseFD fd = toDescriptor(open( - cacheFile.c_str(), - O_RDONLY -#ifndef _WIN32 - | O_CLOEXEC -#endif - )); - if (!fd) - throw SysError("opening NAR cache file '%s'", cacheFile); - - if (lseek(fromDescriptorReadOnly(fd.get()), offset, SEEK_SET) != (off_t) offset) - throw SysError("seeking in '%s'", cacheFile); - - std::string buf(length, 0); - readFull(fd.get(), buf.data(), length); - - return buf; - }); + auto listingJson = nlohmann::json::parse(listing); + auto narAccessor = makeLazyNarAccessor(listingJson, seekableGetNarBytes(cacheFile)); nars.emplace(storePath.hashPart(), narAccessor); return narAccessor; diff --git a/src/libstore/remote-store.cc b/src/libstore/remote-store.cc index bb7425081..949a51f18 100644 --- a/src/libstore/remote-store.cc +++ b/src/libstore/remote-store.cc @@ -73,7 +73,7 @@ void RemoteStore::initConnection(Connection & conn) try { auto [protoVersion, features] = WorkerProto::BasicClientConnection::handshake(conn.to, tee, PROTOCOL_VERSION, WorkerProto::allFeatures); - if (protoVersion < 256 + 18) + if (protoVersion < MINIMUM_PROTOCOL_VERSION) throw Error("the Nix daemon version is too old"); conn.protoVersion = protoVersion; conn.features = features; @@ -159,7 +159,8 @@ void RemoteStore::setOptions() bool RemoteStore::isValidPathUncached(const StorePath & path) { auto conn(getConnection()); - conn->to << WorkerProto::Op::IsValidPath << printStorePath(path); + conn->to << WorkerProto::Op::IsValidPath; + WorkerProto::write(*this, *conn, path); conn.processStderr(); return readInt(conn->from); } @@ -205,10 +206,8 @@ void RemoteStore::querySubstitutablePathInfos(const StorePathCAMap & pathsMap, S conn.processStderr(); size_t count = readNum(conn->from); for (size_t n = 0; n < count; n++) { - SubstitutablePathInfo & info(infos[parseStorePath(readString(conn->from))]); - auto deriver = readString(conn->from); - if (deriver != "") - info.deriver = parseStorePath(deriver); + SubstitutablePathInfo & info(infos[WorkerProto::Serialise::read(*this, *conn)]); + info.deriver = WorkerProto::Serialise>::read(*this, *conn); info.references = WorkerProto::Serialise::read(*this, *conn); info.downloadSize = readLongLong(conn->from); info.narSize = readLongLong(conn->from); @@ -235,7 +234,8 @@ void RemoteStore::queryPathInfoUncached( void RemoteStore::queryReferrers(const StorePath & path, StorePathSet & referrers) { auto conn(getConnection()); - conn->to << WorkerProto::Op::QueryReferrers << printStorePath(path); + conn->to << WorkerProto::Op::QueryReferrers; + WorkerProto::write(*this, *conn, path); conn.processStderr(); for (auto & i : WorkerProto::Serialise::read(*this, *conn)) referrers.insert(i); @@ -244,7 +244,8 @@ void RemoteStore::queryReferrers(const StorePath & path, StorePathSet & referrer StorePathSet RemoteStore::queryValidDerivers(const StorePath & path) { auto conn(getConnection()); - conn->to << WorkerProto::Op::QueryValidDerivers << printStorePath(path); + conn->to << WorkerProto::Op::QueryValidDerivers; + WorkerProto::write(*this, *conn, path); conn.processStderr(); return WorkerProto::Serialise::read(*this, *conn); } @@ -255,7 +256,8 @@ StorePathSet RemoteStore::queryDerivationOutputs(const StorePath & path) return Store::queryDerivationOutputs(path); } auto conn(getConnection()); - conn->to << WorkerProto::Op::QueryDerivationOutputs << printStorePath(path); + conn->to << WorkerProto::Op::QueryDerivationOutputs; + WorkerProto::write(*this, *conn, path); conn.processStderr(); return WorkerProto::Serialise::read(*this, *conn); } @@ -266,7 +268,8 @@ RemoteStore::queryPartialDerivationOutputMap(const StorePath & path, Store * eva if (GET_PROTOCOL_MINOR(getProtocol()) >= 0x16) { if (!evalStore_) { auto conn(getConnection()); - conn->to << WorkerProto::Op::QueryDerivationOutputMap << printStorePath(path); + conn->to << WorkerProto::Op::QueryDerivationOutputMap; + WorkerProto::write(*this, *conn, path); conn.processStderr(); return WorkerProto::Serialise>>::read(*this, *conn); } else { @@ -299,10 +302,7 @@ std::optional RemoteStore::queryPathFromHashPart(const std::string & auto conn(getConnection()); conn->to << WorkerProto::Op::QueryPathFromHashPart << hashPart; conn.processStderr(); - Path path = readString(conn->from); - if (path.empty()) - return {}; - return parseStorePath(path); + return WorkerProto::Serialise>::read(*this, *conn); } ref RemoteStore::addCAToStore( @@ -384,7 +384,7 @@ ref RemoteStore::addCAToStore( break; } } - auto path = parseStorePath(readString(conn->from)); + auto path = WorkerProto::Serialise::read(*this, *conn); // Release our connection to prevent a deadlock in queryPathInfo(). conn_.reset(); return queryPathInfo(path); @@ -426,9 +426,10 @@ void RemoteStore::addToStore(const ValidPathInfo & info, Source & source, Repair { auto conn(getConnection()); - conn->to << WorkerProto::Op::AddToStoreNar << printStorePath(info.path) - << (info.deriver ? printStorePath(*info.deriver) : "") - << info.narHash.to_string(HashFormat::Base16, false); + conn->to << WorkerProto::Op::AddToStoreNar; + WorkerProto::write(*this, *conn, info.path); + WorkerProto::write(*this, *conn, info.deriver); + conn->to << info.narHash.to_string(HashFormat::Base16, false); WorkerProto::write(*this, *conn, info.references); conn->to << info.registrationTime << info.narSize << info.ultimate << info.sigs << renderContentAddress(info.ca) << repair << !checkSigs; @@ -501,7 +502,7 @@ void RemoteStore::registerDrvOutput(const Realisation & info) } void RemoteStore::queryRealisationUncached( - const DrvOutput & id, Callback> callback) noexcept + const DrvOutput & id, Callback> callback) noexcept { try { auto conn(getConnection()); @@ -515,21 +516,21 @@ void RemoteStore::queryRealisationUncached( conn->to << id.to_string(); conn.processStderr(); - auto real = [&]() -> std::shared_ptr { + auto real = [&]() -> std::shared_ptr { if (GET_PROTOCOL_MINOR(conn->protoVersion) < 31) { auto outPaths = WorkerProto::Serialise>::read(*this, *conn); if (outPaths.empty()) return nullptr; - return std::make_shared(Realisation{.id = id, .outPath = *outPaths.begin()}); + return std::make_shared(UnkeyedRealisation{.outPath = *outPaths.begin()}); } else { auto realisations = WorkerProto::Serialise>::read(*this, *conn); if (realisations.empty()) return nullptr; - return std::make_shared(*realisations.begin()); + return std::make_shared(*realisations.begin()); } }(); - callback(std::shared_ptr(real)); + callback(std::shared_ptr(real)); } catch (...) { return callback.rethrow(); } @@ -598,16 +599,15 @@ std::vector RemoteStore::buildPathsWithResults( [&](const DerivedPath::Opaque & bo) { results.push_back( KeyedBuildResult{ - { - .status = BuildResult::Substituted, - }, + {.inner{BuildResult::Success{ + .status = BuildResult::Success::Substituted, + }}}, /* .path = */ bo, }); }, [&](const DerivedPath::Built & bfd) { - KeyedBuildResult res{ - {.status = BuildResult::Built}, - /* .path = */ bfd, + BuildResult::Success success{ + .status = BuildResult::Success::Built, }; OutputPathMap outputs; @@ -627,18 +627,24 @@ std::vector RemoteStore::buildPathsWithResults( auto realisation = queryRealisation(outputId); if (!realisation) throw MissingRealisation(outputId); - res.builtOutputs.emplace(output, *realisation); + success.builtOutputs.emplace(output, Realisation{*realisation, outputId}); } else { - res.builtOutputs.emplace( + success.builtOutputs.emplace( output, Realisation{ - .id = outputId, - .outPath = outputPath, + UnkeyedRealisation{ + .outPath = outputPath, + }, + outputId, }); } } - results.push_back(res); + results.push_back( + KeyedBuildResult{ + {.inner = std::move(success)}, + /* .path = */ bfd, + }); }}, path.raw()); } @@ -658,7 +664,8 @@ BuildResult RemoteStore::buildDerivation(const StorePath & drvPath, const BasicD void RemoteStore::ensurePath(const StorePath & path) { auto conn(getConnection()); - conn->to << WorkerProto::Op::EnsurePath << printStorePath(path); + conn->to << WorkerProto::Op::EnsurePath; + WorkerProto::write(*this, *conn, path); conn.processStderr(); readInt(conn->from); } @@ -678,8 +685,7 @@ Roots RemoteStore::findRoots(bool censor) Roots result; while (count--) { Path link = readString(conn->from); - auto target = parseStorePath(readString(conn->from)); - result[std::move(target)].emplace(link); + result[WorkerProto::Serialise::read(*this, *conn)].emplace(link); } return result; } @@ -723,7 +729,9 @@ bool RemoteStore::verifyStore(bool checkContents, RepairFlag repair) void RemoteStore::addSignatures(const StorePath & storePath, const StringSet & sigs) { auto conn(getConnection()); - conn->to << WorkerProto::Op::AddSignatures << printStorePath(storePath) << sigs; + conn->to << WorkerProto::Op::AddSignatures; + WorkerProto::write(*this, *conn, storePath); + conn->to << sigs; conn.processStderr(); readInt(conn->from); } diff --git a/src/libstore/restricted-store.cc b/src/libstore/restricted-store.cc index e0f43ab6c..ef8aaa380 100644 --- a/src/libstore/restricted-store.cc +++ b/src/libstore/restricted-store.cc @@ -107,7 +107,7 @@ struct RestrictedStore : public virtual IndirectRootStore, public virtual GcStor void registerDrvOutput(const Realisation & info) override; void queryRealisationUncached( - const DrvOutput & id, Callback> callback) noexcept override; + const DrvOutput & id, Callback> callback) noexcept override; void buildPaths(const std::vector & paths, BuildMode buildMode, std::shared_ptr evalStore) override; @@ -226,7 +226,7 @@ void RestrictedStore::narFromPath(const StorePath & path, Sink & sink) { if (!goal.isAllowed(path)) throw InvalidPath("cannot dump unknown path '%s' in recursive Nix", printStorePath(path)); - LocalFSStore::narFromPath(path, sink); + Store::narFromPath(path, sink); } void RestrictedStore::ensurePath(const StorePath & path) @@ -244,7 +244,7 @@ void RestrictedStore::registerDrvOutput(const Realisation & info) } void RestrictedStore::queryRealisationUncached( - const DrvOutput & id, Callback> callback) noexcept + const DrvOutput & id, Callback> callback) noexcept // XXX: This should probably be allowed if the realisation corresponds to // an allowed derivation { @@ -257,8 +257,8 @@ void RestrictedStore::buildPaths( const std::vector & paths, BuildMode buildMode, std::shared_ptr evalStore) { for (auto & result : buildPathsWithResults(paths, buildMode, evalStore)) - if (!result.success()) - result.rethrow(); + if (auto * failureP = result.tryGetFailure()) + failureP->rethrow(); } std::vector RestrictedStore::buildPathsWithResults( @@ -280,9 +280,11 @@ std::vector RestrictedStore::buildPathsWithResults( auto results = next->buildPathsWithResults(paths, buildMode); for (auto & result : results) { - for (auto & [outputName, output] : result.builtOutputs) { - newPaths.insert(output.outPath); - newRealisations.insert(output); + if (auto * successP = result.tryGetSuccess()) { + for (auto & [outputName, output] : successP->builtOutputs) { + newPaths.insert(output.outPath); + newRealisations.insert(output); + } } } diff --git a/src/libstore/s3-binary-cache-store.cc b/src/libstore/s3-binary-cache-store.cc index b70f04be7..9303a80f8 100644 --- a/src/libstore/s3-binary-cache-store.cc +++ b/src/libstore/s3-binary-cache-store.cc @@ -1,592 +1,289 @@ #include "nix/store/s3-binary-cache-store.hh" +#include "nix/store/http-binary-cache-store.hh" +#include "nix/store/store-registration.hh" +#include "nix/util/error.hh" +#include "nix/util/logging.hh" +#include "nix/util/serialise.hh" +#include "nix/util/util.hh" -#if NIX_WITH_S3_SUPPORT - -# include - -# include "nix/store/s3.hh" -# include "nix/store/nar-info.hh" -# include "nix/store/nar-info-disk-cache.hh" -# include "nix/store/globals.hh" -# include "nix/util/compression.hh" -# include "nix/store/filetransfer.hh" -# include "nix/util/signals.hh" -# include "nix/store/store-registration.hh" - -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include - -using namespace Aws::Transfer; +#include +#include +#include +#include namespace nix { -struct S3Error : public Error +MakeError(UploadToS3, Error); + +static constexpr uint64_t AWS_MAX_PART_SIZE = 5ULL * 1024 * 1024 * 1024; // 5GiB + +class S3BinaryCacheStore : public virtual HttpBinaryCacheStore { - Aws::S3::S3Errors err; - Aws::String exceptionName; - - template - S3Error(Aws::S3::S3Errors err, Aws::String exceptionName, const Args &... args) - : Error(args...) - , err(err) - , exceptionName(exceptionName){}; -}; - -/* Helper: given an Outcome, return R in case of success, or - throw an exception in case of an error. */ -template -R && checkAws(std::string_view s, Aws::Utils::Outcome && outcome) -{ - if (!outcome.IsSuccess()) - throw S3Error( - outcome.GetError().GetErrorType(), - outcome.GetError().GetExceptionName(), - fmt("%s: %s (request id: %s)", s, outcome.GetError().GetMessage(), outcome.GetError().GetRequestId())); - return outcome.GetResultWithOwnership(); -} - -class AwsLogger : public Aws::Utils::Logging::FormattedLogSystem -{ - using Aws::Utils::Logging::FormattedLogSystem::FormattedLogSystem; - - void ProcessFormattedStatement(Aws::String && statement) override +public: + S3BinaryCacheStore(ref config) + : Store{*config} + , BinaryCacheStore{*config} + , HttpBinaryCacheStore{config} + , s3Config{config} { - debug("AWS: %s", chomp(statement)); } -# if !(AWS_SDK_VERSION_MAJOR <= 1 && AWS_SDK_VERSION_MINOR <= 7 && AWS_SDK_VERSION_PATCH <= 115) - void Flush() override {} -# endif -}; + void upsertFile( + const std::string & path, RestartableSource & source, const std::string & mimeType, uint64_t sizeHint) override; -/* Retrieve the credentials from the list of AWS default providers, with the addition of the STS creds provider. This - last can be used to acquire further permissions with a specific IAM role. - Roughly based on https://github.com/aws/aws-sdk-cpp/issues/150#issuecomment-538548438 -*/ -struct CustomAwsCredentialsProviderChain : public Aws::Auth::AWSCredentialsProviderChain -{ - CustomAwsCredentialsProviderChain(const std::string & profile) +private: + ref s3Config; + + /** + * Uploads a file to S3 using a regular (non-multipart) upload. + * + * This method is suitable for files up to 5GiB in size. For larger files, + * multipart upload should be used instead. + * + * @see https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html + */ + void upload( + std::string_view path, + RestartableSource & source, + uint64_t sizeHint, + std::string_view mimeType, + std::optional contentEncoding); + + /** + * Uploads a file to S3 (CompressedSource overload). + */ + void upload(std::string_view path, CompressedSource & source, std::string_view mimeType); + + /** + * Creates a multipart upload for large objects to S3. + * + * @see + * https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html#API_CreateMultipartUpload_RequestSyntax + */ + std::string createMultipartUpload( + std::string_view key, std::string_view mimeType, std::optional contentEncoding); + + /** + * Uploads a single part of a multipart upload + * + * @see https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html#API_UploadPart_RequestSyntax + * + * @returns the [ETag](https://en.wikipedia.org/wiki/HTTP_ETag) + */ + std::string uploadPart(std::string_view key, std::string_view uploadId, uint64_t partNumber, std::string data); + + struct UploadedPart { - if (profile.empty()) { - // Use all the default AWS providers, plus the possibility to acquire a IAM role directly via a profile. - Aws::Auth::DefaultAWSCredentialsProviderChain default_aws_chain; - for (auto provider : default_aws_chain.GetProviders()) - AddProvider(provider); - AddProvider(std::make_shared()); - } else { - // Override the profile name to retrieve from the AWS config and credentials. I believe this option - // comes from the ?profile querystring in nix.conf. - AddProvider(std::make_shared(profile.c_str())); - AddProvider(std::make_shared(profile)); - } - } + uint64_t partNumber; + std::string etag; + }; + + /** + * Completes a multipart upload by combining all uploaded parts. + * @see + * https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html#API_CompleteMultipartUpload_RequestSyntax + */ + void completeMultipartUpload(std::string_view key, std::string_view uploadId, std::span parts); + + /** + * Abort a multipart upload + * + * @see + * https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html#API_AbortMultipartUpload_RequestSyntax + */ + void abortMultipartUpload(std::string_view key, std::string_view uploadId); }; -static void initAWS() +void S3BinaryCacheStore::upsertFile( + const std::string & path, RestartableSource & source, const std::string & mimeType, uint64_t sizeHint) { - static std::once_flag flag; - std::call_once(flag, []() { - Aws::SDKOptions options; - - /* We install our own OpenSSL locking function (see - shared.cc), so don't let aws-sdk-cpp override it. */ - options.cryptoOptions.initAndCleanupOpenSSL = false; - - if (verbosity >= lvlDebug) { - options.loggingOptions.logLevel = - verbosity == lvlDebug ? Aws::Utils::Logging::LogLevel::Debug : Aws::Utils::Logging::LogLevel::Trace; - options.loggingOptions.logger_create_fn = [options]() { - return std::make_shared(options.loggingOptions.logLevel); - }; - } - - Aws::InitAPI(options); - }); + if (auto compressionMethod = getCompressionMethod(path)) { + CompressedSource compressed(source, *compressionMethod); + upload(path, compressed, mimeType); + } else { + upload(path, source, sizeHint, mimeType, std::nullopt); + } } -S3Helper::S3Helper( - const std::string & profile, const std::string & region, const std::string & scheme, const std::string & endpoint) - : config(makeConfig(region, scheme, endpoint)) - , client( - make_ref( - std::make_shared(profile), - *config, -# if AWS_SDK_VERSION_MAJOR == 1 && AWS_SDK_VERSION_MINOR < 3 - false, -# else - Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy::Never, -# endif - endpoint.empty())) +void S3BinaryCacheStore::upload( + std::string_view path, + RestartableSource & source, + uint64_t sizeHint, + std::string_view mimeType, + std::optional contentEncoding) { -} - -/* Log AWS retries. */ -class RetryStrategy : public Aws::Client::DefaultRetryStrategy -{ - bool ShouldRetry(const Aws::Client::AWSError & error, long attemptedRetries) const override - { - checkInterrupt(); - auto retry = Aws::Client::DefaultRetryStrategy::ShouldRetry(error, attemptedRetries); - if (retry) - printError( - "AWS error '%s' (%s; request id: %s), will retry in %d ms", - error.GetExceptionName(), - error.GetMessage(), - error.GetRequestId(), - CalculateDelayBeforeNextRetry(error, attemptedRetries)); - return retry; - } -}; - -ref -S3Helper::makeConfig(const std::string & region, const std::string & scheme, const std::string & endpoint) -{ - initAWS(); - auto res = make_ref(); - res->allowSystemProxy = true; - res->region = region; - if (!scheme.empty()) { - res->scheme = Aws::Http::SchemeMapper::FromString(scheme.c_str()); - } - if (!endpoint.empty()) { - res->endpointOverride = endpoint; - } - res->requestTimeoutMs = 600 * 1000; - res->connectTimeoutMs = 5 * 1000; - res->retryStrategy = std::make_shared(); - res->caFile = settings.caFile; - return res; -} - -S3Helper::FileTransferResult S3Helper::getObject(const std::string & bucketName, const std::string & key) -{ - std::string uri = "s3://" + bucketName + "/" + key; - Activity act( - *logger, lvlTalkative, actFileTransfer, fmt("downloading '%s'", uri), Logger::Fields{uri}, getCurActivity()); - - auto request = Aws::S3::Model::GetObjectRequest().WithBucket(bucketName).WithKey(key); - - request.SetResponseStreamFactory([&]() { return Aws::New("STRINGSTREAM"); }); - - size_t bytesDone = 0; - size_t bytesExpected = 0; - request.SetDataReceivedEventHandler( - [&](const Aws::Http::HttpRequest * req, Aws::Http::HttpResponse * resp, long long l) { - if (!bytesExpected && resp->HasHeader("Content-Length")) { - if (auto length = string2Int(resp->GetHeader("Content-Length"))) { - bytesExpected = *length; - } - } - bytesDone += l; - act.progress(bytesDone, bytesExpected); - }); - - request.SetContinueRequestHandler([](const Aws::Http::HttpRequest *) { return !isInterrupted(); }); - - FileTransferResult res; - - auto now1 = std::chrono::steady_clock::now(); + debug("using S3 regular upload for '%s' (%d bytes)", path, sizeHint); + if (sizeHint > AWS_MAX_PART_SIZE) + throw Error( + "file too large for S3 upload without multipart: %s would exceed maximum size of %s. Consider enabling multipart-upload.", + renderSize(sizeHint), + renderSize(AWS_MAX_PART_SIZE)); try { + HttpBinaryCacheStore::upload(path, source, sizeHint, mimeType, contentEncoding); + } catch (FileTransferError & e) { + UploadToS3 err(e.message()); + err.addTrace({}, "while uploading to S3 binary cache at '%s'", config->cacheUri.to_string()); + throw err; + } +} - auto result = checkAws(fmt("AWS error fetching '%s'", key), client->GetObject(request)); +void S3BinaryCacheStore::upload(std::string_view path, CompressedSource & source, std::string_view mimeType) +{ + upload(path, static_cast(source), source.size(), mimeType, source.getCompressionMethod()); +} - act.progress(result.GetContentLength(), result.GetContentLength()); +std::string S3BinaryCacheStore::createMultipartUpload( + std::string_view key, std::string_view mimeType, std::optional contentEncoding) +{ + auto req = makeRequest(key); - res.data = decompress(result.GetContentEncoding(), dynamic_cast(result.GetBody()).str()); + // setupForS3() converts s3:// to https:// but strips query parameters + // So we call it first, then add our multipart parameters + req.setupForS3(); - } catch (S3Error & e) { - if ((e.err != Aws::S3::S3Errors::NO_SUCH_KEY) && (e.err != Aws::S3::S3Errors::ACCESS_DENIED) && - // Expired tokens are not really an error, more of a caching problem. Should be treated same as 403. - // - // AWS unwilling to provide a specific error type for the situation - // (https://github.com/aws/aws-sdk-cpp/issues/1843) so use this hack - (e.exceptionName != "ExpiredToken")) - throw; + auto url = req.uri.parsed(); + url.query["uploads"] = ""; + req.uri = VerbatimURL(url); + + req.method = HttpMethod::POST; + StringSource payload{std::string_view("")}; + req.data = {payload}; + req.mimeType = mimeType; + + if (contentEncoding) { + req.headers.emplace_back("Content-Encoding", *contentEncoding); } - auto now2 = std::chrono::steady_clock::now(); + auto result = getFileTransfer()->enqueueFileTransfer(req).get(); - res.durationMs = std::chrono::duration_cast(now2 - now1).count(); + std::regex uploadIdRegex("([^<]+)"); + std::smatch match; - return res; + if (std::regex_search(result.data, match, uploadIdRegex)) { + return match[1]; + } + + throw Error("S3 CreateMultipartUpload response missing "); +} + +std::string +S3BinaryCacheStore::uploadPart(std::string_view key, std::string_view uploadId, uint64_t partNumber, std::string data) +{ + auto req = makeRequest(key); + req.method = HttpMethod::PUT; + req.setupForS3(); + + auto url = req.uri.parsed(); + url.query["partNumber"] = std::to_string(partNumber); + url.query["uploadId"] = uploadId; + req.uri = VerbatimURL(url); + StringSource payload{data}; + req.data = {payload}; + req.mimeType = "application/octet-stream"; + + auto result = getFileTransfer()->enqueueFileTransfer(req).get(); + + if (result.etag.empty()) { + throw Error("S3 UploadPart response missing ETag for part %d", partNumber); + } + + return std::move(result.etag); +} + +void S3BinaryCacheStore::abortMultipartUpload(std::string_view key, std::string_view uploadId) +{ + auto req = makeRequest(key); + req.setupForS3(); + + auto url = req.uri.parsed(); + url.query["uploadId"] = uploadId; + req.uri = VerbatimURL(url); + req.method = HttpMethod::DELETE; + + getFileTransfer()->enqueueFileTransfer(req).get(); +} + +void S3BinaryCacheStore::completeMultipartUpload( + std::string_view key, std::string_view uploadId, std::span parts) +{ + auto req = makeRequest(key); + req.setupForS3(); + + auto url = req.uri.parsed(); + url.query["uploadId"] = uploadId; + req.uri = VerbatimURL(url); + req.method = HttpMethod::POST; + + std::string xml = ""; + for (const auto & part : parts) { + xml += ""; + xml += "" + std::to_string(part.partNumber) + ""; + xml += "" + part.etag + ""; + xml += ""; + } + xml += ""; + + debug("S3 CompleteMultipartUpload XML (%d parts): %s", parts.size(), xml); + + StringSource payload{xml}; + req.data = {payload}; + req.mimeType = "text/xml"; + + getFileTransfer()->enqueueFileTransfer(req).get(); +} + +StringSet S3BinaryCacheStoreConfig::uriSchemes() +{ + return {"s3"}; } S3BinaryCacheStoreConfig::S3BinaryCacheStoreConfig( - std::string_view uriScheme, std::string_view bucketName, const Params & params) + std::string_view scheme, std::string_view _cacheUri, const Params & params) : StoreConfig(params) - , BinaryCacheStoreConfig(params) - , bucketName(bucketName) + , HttpBinaryCacheStoreConfig(scheme, _cacheUri, params) { - // Don't want to use use AWS SDK in header, so we check the default - // here. TODO do this better after we overhaul the store settings - // system. - assert(std::string{defaultRegion} == std::string{Aws::Region::US_EAST_1}); + assert(cacheUri.query.empty()); + assert(cacheUri.scheme == "s3"); - if (bucketName.empty()) - throw UsageError("`%s` store requires a bucket name in its Store URI", uriScheme); + for (const auto & [key, value] : params) { + auto s3Params = + std::views::transform(s3UriSettings, [](const AbstractSetting * setting) { return setting->name; }); + if (std::ranges::contains(s3Params, key)) { + cacheUri.query[key] = value; + } + } } -S3BinaryCacheStore::S3BinaryCacheStore(ref config) - : BinaryCacheStore(*config) - , config{config} +std::string S3BinaryCacheStoreConfig::getHumanReadableURI() const { + auto reference = getReference(); + reference.params = [&]() { + Params relevantParams; + for (auto & setting : s3UriSettings) + if (setting->overridden) + relevantParams.insert({setting->name, reference.params.at(setting->name)}); + return relevantParams; + }(); + return reference.render(); } std::string S3BinaryCacheStoreConfig::doc() { - return -# include "s3-binary-cache-store.md" - ; + return R"( + **Store URL format**: `s3://bucket-name` + + This store allows reading and writing a binary cache stored in an AWS S3 bucket. + )"; } -StoreReference S3BinaryCacheStoreConfig::getReference() const +ref S3BinaryCacheStoreConfig::openStore() const { - return { - .variant = - StoreReference::Specified{ - .scheme = *uriSchemes().begin(), - .authority = bucketName, - }, - .params = getQueryParams(), - }; + auto sharedThis = std::const_pointer_cast( + std::static_pointer_cast(shared_from_this())); + return make_ref(ref{sharedThis}); } -struct S3BinaryCacheStoreImpl : virtual S3BinaryCacheStore -{ - Stats stats; - - S3Helper s3Helper; - - S3BinaryCacheStoreImpl(ref config) - : Store{*config} - , BinaryCacheStore{*config} - , S3BinaryCacheStore{config} - , s3Helper(config->profile, config->region, config->scheme, config->endpoint) - { - diskCache = getNarInfoDiskCache(); - } - - void init() override - { - /* FIXME: The URI (when used as a cache key) must have several parameters rendered (e.g. the endpoint). - This must be represented as a separate opaque string (probably a URI) that has the right query parameters. */ - auto cacheUri = config->getReference().render(/*withParams=*/false); - if (auto cacheInfo = diskCache->upToDateCacheExists(cacheUri)) { - config->wantMassQuery.setDefault(cacheInfo->wantMassQuery); - config->priority.setDefault(cacheInfo->priority); - } else { - BinaryCacheStore::init(); - diskCache->createCache(cacheUri, config->storeDir, config->wantMassQuery, config->priority); - } - } - - const Stats & getS3Stats() override - { - return stats; - } - - /* This is a specialisation of isValidPath() that optimistically - fetches the .narinfo file, rather than first checking for its - existence via a HEAD request. Since .narinfos are small, doing - a GET is unlikely to be slower than HEAD. */ - bool isValidPathUncached(const StorePath & storePath) override - { - try { - queryPathInfo(storePath); - return true; - } catch (InvalidPath & e) { - return false; - } - } - - bool fileExists(const std::string & path) override - { - stats.head++; - - auto res = s3Helper.client->HeadObject( - Aws::S3::Model::HeadObjectRequest().WithBucket(config->bucketName).WithKey(path)); - - if (!res.IsSuccess()) { - auto & error = res.GetError(); - if (error.GetErrorType() == Aws::S3::S3Errors::RESOURCE_NOT_FOUND - || error.GetErrorType() == Aws::S3::S3Errors::NO_SUCH_KEY - // Expired tokens are not really an error, more of a caching problem. Should be treated same as 403. - // AWS unwilling to provide a specific error type for the situation - // (https://github.com/aws/aws-sdk-cpp/issues/1843) so use this hack - || (error.GetErrorType() == Aws::S3::S3Errors::UNKNOWN && error.GetExceptionName() == "ExpiredToken") - // If bucket listing is disabled, 404s turn into 403s - || error.GetErrorType() == Aws::S3::S3Errors::ACCESS_DENIED) - return false; - throw Error("AWS error fetching '%s': %s", path, error.GetMessage()); - } - - return true; - } - - std::shared_ptr transferManager; - std::once_flag transferManagerCreated; - - struct AsyncContext : public Aws::Client::AsyncCallerContext - { - mutable std::mutex mutex; - mutable std::condition_variable cv; - const Activity & act; - - void notify() const - { - cv.notify_one(); - } - - void wait() const - { - std::unique_lock lk(mutex); - cv.wait(lk); - } - - AsyncContext(const Activity & act) - : act(act) - { - } - }; - - void uploadFile( - const std::string & path, - std::shared_ptr> istream, - const std::string & mimeType, - const std::string & contentEncoding) - { - std::string uri = "s3://" + config->bucketName + "/" + path; - Activity act( - *logger, lvlTalkative, actFileTransfer, fmt("uploading '%s'", uri), Logger::Fields{uri}, getCurActivity()); - istream->seekg(0, istream->end); - auto size = istream->tellg(); - istream->seekg(0, istream->beg); - - auto maxThreads = std::thread::hardware_concurrency(); - - static std::shared_ptr executor = - std::make_shared(maxThreads); - - std::call_once(transferManagerCreated, [&]() { - if (config->multipartUpload) { - TransferManagerConfiguration transferConfig(executor.get()); - - transferConfig.s3Client = s3Helper.client; - transferConfig.bufferSize = config->bufferSize; - - transferConfig.uploadProgressCallback = - [](const TransferManager * transferManager, - const std::shared_ptr & transferHandle) { - auto context = std::dynamic_pointer_cast(transferHandle->GetContext()); - size_t bytesDone = transferHandle->GetBytesTransferred(); - size_t bytesTotal = transferHandle->GetBytesTotalSize(); - try { - checkInterrupt(); - context->act.progress(bytesDone, bytesTotal); - } catch (...) { - context->notify(); - } - }; - transferConfig.transferStatusUpdatedCallback = - [](const TransferManager * transferManager, - const std::shared_ptr & transferHandle) { - auto context = std::dynamic_pointer_cast(transferHandle->GetContext()); - context->notify(); - }; - - transferManager = TransferManager::Create(transferConfig); - } - }); - - auto now1 = std::chrono::steady_clock::now(); - - auto & bucketName = config->bucketName; - - if (transferManager) { - - if (contentEncoding != "") - throw Error("setting a content encoding is not supported with S3 multi-part uploads"); - - auto context = std::make_shared(act); - std::shared_ptr transferHandle = transferManager->UploadFile( - istream, - bucketName, - path, - mimeType, - Aws::Map(), - context /*, contentEncoding */); - - TransferStatus status = transferHandle->GetStatus(); - while (status == TransferStatus::IN_PROGRESS || status == TransferStatus::NOT_STARTED) { - if (!isInterrupted()) { - context->wait(); - } else { - transferHandle->Cancel(); - transferHandle->WaitUntilFinished(); - } - status = transferHandle->GetStatus(); - } - act.progress(transferHandle->GetBytesTransferred(), transferHandle->GetBytesTotalSize()); - - if (status == TransferStatus::FAILED) - throw Error( - "AWS error: failed to upload 's3://%s/%s': %s", - bucketName, - path, - transferHandle->GetLastError().GetMessage()); - - if (status != TransferStatus::COMPLETED) - throw Error("AWS error: transfer status of 's3://%s/%s' in unexpected state", bucketName, path); - - } else { - act.progress(0, size); - - auto request = Aws::S3::Model::PutObjectRequest().WithBucket(bucketName).WithKey(path); - - size_t bytesSent = 0; - request.SetDataSentEventHandler([&](const Aws::Http::HttpRequest * req, long long l) { - bytesSent += l; - act.progress(bytesSent, size); - }); - - request.SetContinueRequestHandler([](const Aws::Http::HttpRequest *) { return !isInterrupted(); }); - - request.SetContentType(mimeType); - - if (contentEncoding != "") - request.SetContentEncoding(contentEncoding); - - request.SetBody(istream); - - auto result = checkAws(fmt("AWS error uploading '%s'", path), s3Helper.client->PutObject(request)); - - act.progress(size, size); - } - - auto now2 = std::chrono::steady_clock::now(); - - auto duration = std::chrono::duration_cast(now2 - now1).count(); - - printInfo("uploaded 's3://%s/%s' (%d bytes) in %d ms", bucketName, path, size, duration); - - stats.putTimeMs += duration; - stats.putBytes += std::max(size, (decltype(size)) 0); - stats.put++; - } - - void upsertFile( - const std::string & path, - std::shared_ptr> istream, - const std::string & mimeType) override - { - auto compress = [&](std::string compression) { - auto compressed = nix::compress(compression, StreamToSourceAdapter(istream).drain()); - return std::make_shared(std::move(compressed)); - }; - - if (config->narinfoCompression != "" && hasSuffix(path, ".narinfo")) - uploadFile(path, compress(config->narinfoCompression), mimeType, config->narinfoCompression); - else if (config->lsCompression != "" && hasSuffix(path, ".ls")) - uploadFile(path, compress(config->lsCompression), mimeType, config->lsCompression); - else if (config->logCompression != "" && hasPrefix(path, "log/")) - uploadFile(path, compress(config->logCompression), mimeType, config->logCompression); - else - uploadFile(path, istream, mimeType, ""); - } - - void getFile(const std::string & path, Sink & sink) override - { - stats.get++; - - // FIXME: stream output to sink. - auto res = s3Helper.getObject(config->bucketName, path); - - stats.getBytes += res.data ? res.data->size() : 0; - stats.getTimeMs += res.durationMs; - - if (res.data) { - printTalkative( - "downloaded 's3://%s/%s' (%d bytes) in %d ms", - config->bucketName, - path, - res.data->size(), - res.durationMs); - - sink(*res.data); - } else - throw NoSuchBinaryCacheFile( - "file '%s' does not exist in binary cache '%s'", path, config->getHumanReadableURI()); - } - - StorePathSet queryAllValidPaths() override - { - StorePathSet paths; - std::string marker; - - auto & bucketName = config->bucketName; - - do { - debug("listing bucket 's3://%s' from key '%s'...", bucketName, marker); - - auto res = checkAws( - fmt("AWS error listing bucket '%s'", bucketName), - s3Helper.client->ListObjects( - Aws::S3::Model::ListObjectsRequest().WithBucket(bucketName).WithDelimiter("/").WithMarker(marker))); - - auto & contents = res.GetContents(); - - debug("got %d keys, next marker '%s'", contents.size(), res.GetNextMarker()); - - for (const auto & object : contents) { - auto & key = object.GetKey(); - if (key.size() != 40 || !hasSuffix(key, ".narinfo")) - continue; - paths.insert(parseStorePath(storeDir + "/" + key.substr(0, key.size() - 8) + "-" + MissingName)); - } - - marker = res.GetNextMarker(); - } while (!marker.empty()); - - return paths; - } - - /** - * For now, we conservatively say we don't know. - * - * \todo try to expose our S3 authentication status. - */ - std::optional isTrustedClient() override - { - return std::nullopt; - } -}; - -ref S3BinaryCacheStoreImpl::Config::openStore() const -{ - auto store = - make_ref(ref{// FIXME we shouldn't actually need a mutable config - std::const_pointer_cast(shared_from_this())}); - store->init(); - return store; -} - -static RegisterStoreImplementation regS3BinaryCacheStore; +static RegisterStoreImplementation registerS3BinaryCacheStore; } // namespace nix - -#endif diff --git a/src/libstore/s3-binary-cache-store.md b/src/libstore/s3-binary-cache-store.md index daa41defd..0b0c26919 100644 --- a/src/libstore/s3-binary-cache-store.md +++ b/src/libstore/s3-binary-cache-store.md @@ -27,7 +27,8 @@ like the following to be accessible: "Sid": "AllowDirectReads", "Action": [ "s3:GetObject", - "s3:GetBucketLocation" + "s3:GetBucketLocation", + "s3:ListBucket" ], "Effect": "Allow", "Resource": [ @@ -51,7 +52,7 @@ Consult the documentation linked above for further details. ### Authenticated reads to your S3 binary cache -Your bucket will need a bucket policy allowing the desired users to perform the `s3:GetObject` and `s3:GetBucketLocation` action on all objects in the bucket. +Your bucket will need a bucket policy allowing the desired users to perform the `s3:GetObject`, `s3:GetBucketLocation`, and `s3:ListBucket` actions on all objects in the bucket. The [anonymous policy given above](#anonymous-reads-to-your-s3-compatible-binary-cache) can be updated to have a restricted `Principal` to support this. ### Authenticated writes to your S3-compatible binary cache diff --git a/src/libstore/s3.cc b/src/libstore/s3-url.cc similarity index 90% rename from src/libstore/s3.cc rename to src/libstore/s3-url.cc index 5396f43b9..503c0cd91 100644 --- a/src/libstore/s3.cc +++ b/src/libstore/s3-url.cc @@ -1,17 +1,14 @@ -#include "nix/store/s3.hh" +#include "nix/store/s3-url.hh" +#include "nix/util/error.hh" #include "nix/util/split.hh" -#include "nix/util/url.hh" -#include "nix/util/util.hh" -#include "nix/util/canon-path.hh" #include "nix/util/strings-inline.hh" #include - -namespace nix { +#include using namespace std::string_view_literals; -#if NIX_WITH_S3_SUPPORT +namespace nix { ParsedS3URL ParsedS3URL::parse(const ParsedURL & parsed) try { @@ -51,6 +48,7 @@ try { .profile = getOptionalParam("profile"), .region = getOptionalParam("region"), .scheme = getOptionalParam("scheme"), + .versionId = getOptionalParam("versionId"), .endpoint = [&]() -> decltype(ParsedS3URL::endpoint) { if (!endpoint) return std::monostate(); @@ -76,6 +74,12 @@ ParsedURL ParsedS3URL::toHttpsUrl() const auto regionStr = region.transform(toView).value_or("us-east-1"); auto schemeStr = scheme.transform(toView).value_or("https"); + // Build query parameters (e.g., versionId if present) + StringMap queryParams; + if (versionId) { + queryParams["versionId"] = *versionId; + } + // Handle endpoint configuration using std::visit return std::visit( overloaded{ @@ -88,6 +92,7 @@ ParsedURL ParsedS3URL::toHttpsUrl() const .scheme = std::string{schemeStr}, .authority = ParsedURL::Authority{.host = "s3." + regionStr + ".amazonaws.com"}, .path = std::move(path), + .query = std::move(queryParams), }; }, [&](const ParsedURL::Authority & auth) { @@ -99,6 +104,7 @@ ParsedURL ParsedS3URL::toHttpsUrl() const .scheme = std::string{schemeStr}, .authority = auth, .path = std::move(path), + .query = std::move(queryParams), }; }, [&](const ParsedURL & endpointUrl) { @@ -110,12 +116,11 @@ ParsedURL ParsedS3URL::toHttpsUrl() const .scheme = endpointUrl.scheme, .authority = endpointUrl.authority, .path = std::move(path), + .query = std::move(queryParams), }; }, }, endpoint); } -#endif - } // namespace nix diff --git a/src/libstore/serve-protocol-connection.cc b/src/libstore/serve-protocol-connection.cc index a90b104a6..baa3bf0ce 100644 --- a/src/libstore/serve-protocol-connection.cc +++ b/src/libstore/serve-protocol-connection.cc @@ -93,4 +93,14 @@ void ServeProto::BasicClientConnection::narFromPath( fun(from); } +void ServeProto::BasicClientConnection::importPaths(const StoreDirConfig & store, std::function fun) +{ + to << ServeProto::Command::ImportPaths; + fun(to); + to.flush(); + + if (readInt(from) != 1) + throw Error("remote machine failed to import closure"); +} + } // namespace nix diff --git a/src/libstore/serve-protocol.cc b/src/libstore/serve-protocol.cc index 7cf5e6997..51b575fcd 100644 --- a/src/libstore/serve-protocol.cc +++ b/src/libstore/serve-protocol.cc @@ -16,32 +16,62 @@ namespace nix { BuildResult ServeProto::Serialise::read(const StoreDirConfig & store, ServeProto::ReadConn conn) { BuildResult status; - status.status = (BuildResult::Status) readInt(conn.from); - conn.from >> status.errorMsg; + BuildResult::Success success; + BuildResult::Failure failure; + + auto rawStatus = readInt(conn.from); + conn.from >> failure.errorMsg; if (GET_PROTOCOL_MINOR(conn.version) >= 3) - conn.from >> status.timesBuilt >> status.isNonDeterministic >> status.startTime >> status.stopTime; + conn.from >> status.timesBuilt >> failure.isNonDeterministic >> status.startTime >> status.stopTime; if (GET_PROTOCOL_MINOR(conn.version) >= 6) { auto builtOutputs = ServeProto::Serialise::read(store, conn); for (auto && [output, realisation] : builtOutputs) - status.builtOutputs.insert_or_assign(std::move(output.outputName), std::move(realisation)); + success.builtOutputs.insert_or_assign(std::move(output.outputName), std::move(realisation)); } + + if (BuildResult::Success::statusIs(rawStatus)) { + success.status = static_cast(rawStatus); + status.inner = std::move(success); + } else { + failure.status = static_cast(rawStatus); + status.inner = std::move(failure); + } + return status; } void ServeProto::Serialise::write( - const StoreDirConfig & store, ServeProto::WriteConn conn, const BuildResult & status) + const StoreDirConfig & store, ServeProto::WriteConn conn, const BuildResult & res) { - conn.to << status.status << status.errorMsg; - - if (GET_PROTOCOL_MINOR(conn.version) >= 3) - conn.to << status.timesBuilt << status.isNonDeterministic << status.startTime << status.stopTime; - if (GET_PROTOCOL_MINOR(conn.version) >= 6) { - DrvOutputs builtOutputs; - for (auto & [output, realisation] : status.builtOutputs) - builtOutputs.insert_or_assign(realisation.id, realisation); - ServeProto::write(store, conn, builtOutputs); - } + /* The protocol predates the use of sum types (std::variant) to + separate the success or failure cases. As such, it transits some + success- or failure-only fields in both cases. This helper + function helps support this: in each case, we just pass the old + default value for the fields that don't exist in that case. */ + auto common = [&](std::string_view errorMsg, bool isNonDeterministic, const auto & builtOutputs) { + conn.to << errorMsg; + if (GET_PROTOCOL_MINOR(conn.version) >= 3) + conn.to << res.timesBuilt << isNonDeterministic << res.startTime << res.stopTime; + if (GET_PROTOCOL_MINOR(conn.version) >= 6) { + DrvOutputs builtOutputsFullKey; + for (auto & [output, realisation] : builtOutputs) + builtOutputsFullKey.insert_or_assign(realisation.id, realisation); + ServeProto::write(store, conn, builtOutputsFullKey); + } + }; + std::visit( + overloaded{ + [&](const BuildResult::Failure & failure) { + conn.to << failure.status; + common(failure.errorMsg, failure.isNonDeterministic, decltype(BuildResult::Success::builtOutputs){}); + }, + [&](const BuildResult::Success & success) { + conn.to << success.status; + common(/*errorMsg=*/"", /*isNonDeterministic=*/false, success.builtOutputs); + }, + }, + res.inner); } UnkeyedValidPathInfo ServeProto::Serialise::read(const StoreDirConfig & store, ReadConn conn) diff --git a/src/libstore/ssh-store.cc b/src/libstore/ssh-store.cc index a7e28017f..ce973e734 100644 --- a/src/libstore/ssh-store.cc +++ b/src/libstore/ssh-store.cc @@ -143,7 +143,7 @@ struct MountedSSHStore : virtual SSHStore, virtual LocalFSStore void narFromPath(const StorePath & path, Sink & sink) override { - return LocalFSStore::narFromPath(path, sink); + return Store::narFromPath(path, sink); } ref getFSAccessor(bool requireValidPath) override diff --git a/src/libstore/ssh.cc b/src/libstore/ssh.cc index 0f1dba1e9..1a9908366 100644 --- a/src/libstore/ssh.cc +++ b/src/libstore/ssh.cc @@ -78,7 +78,7 @@ SSHMaster::SSHMaster( oss << authority.host; return std::move(oss).str(); }()) - , fakeSSH(authority.host == "localhost") + , fakeSSH(authority.to_string() == "localhost") , keyFile(keyFile) , sshPublicHostKey(parsePublicHostKey(authority.host, sshPublicHostKey)) , useMaster(useMaster && !fakeSSH) diff --git a/src/libstore/store-api.cc b/src/libstore/store-api.cc index c26c7d826..08b75c8fa 100644 --- a/src/libstore/store-api.cc +++ b/src/libstore/store-api.cc @@ -300,6 +300,13 @@ ValidPathInfo Store::addToStoreSlow( return info; } +void Store::narFromPath(const StorePath & path, Sink & sink) +{ + auto accessor = requireStoreObjectAccessor(path); + SourcePath sourcePath{accessor}; + dumpPath(sourcePath, sink, FileSerialisationMethod::NixArchive); +} + StringSet Store::Config::getDefaultSystemFeatures() { auto res = settings.systemFeatures.get(); @@ -598,7 +605,8 @@ void Store::queryPathInfo(const StorePath & storePath, Callback> callback) noexcept +void Store::queryRealisation( + const DrvOutput & id, Callback> callback) noexcept { try { @@ -624,20 +632,20 @@ void Store::queryRealisation(const DrvOutput & id, Callback(std::move(callback)); - queryRealisationUncached(id, {[this, id, callbackPtr](std::future> fut) { + queryRealisationUncached(id, {[this, id, callbackPtr](std::future> fut) { try { auto info = fut.get(); if (diskCache) { if (info) diskCache->upsertRealisation( - config.getReference().render(/*FIXME withParams=*/false), *info); + config.getReference().render(/*FIXME withParams=*/false), {*info, id}); else diskCache->upsertAbsentRealisation( config.getReference().render(/*FIXME withParams=*/false), id); } - (*callbackPtr)(std::shared_ptr(info)); + (*callbackPtr)(std::shared_ptr(info)); } catch (...) { callbackPtr->rethrow(); @@ -645,9 +653,9 @@ void Store::queryRealisation(const DrvOutput & id, Callback Store::queryRealisation(const DrvOutput & id) +std::shared_ptr Store::queryRealisation(const DrvOutput & id) { - using RealPtr = std::shared_ptr; + using RealPtr = std::shared_ptr; std::promise promise; queryRealisation(id, {[&](std::future result) { @@ -774,7 +782,7 @@ StorePathSet Store::exportReferences(const StorePathSet & storePaths, const Stor for (auto & storePath : storePaths) { if (!inputPaths.count(storePath)) throw BuildError( - BuildResult::InputRejected, + BuildResult::Failure::InputRejected, "cannot export references of path '%s' because it is not in the input closure of the derivation", printStorePath(storePath)); @@ -910,11 +918,12 @@ std::map copyPaths( std::set toplevelRealisations; for (auto & path : paths) { storePaths.insert(path.path()); - if (auto realisation = std::get_if(&path.raw)) { + if (auto * realisation = std::get_if(&path.raw)) { experimentalFeatureSettings.require(Xp::CaDerivations); toplevelRealisations.insert(*realisation); } } + auto pathsMap = copyPaths(srcStore, dstStore, storePaths, repair, checkSigs, substitute); try { @@ -931,7 +940,7 @@ std::map copyPaths( "dependency of '%s' but isn't registered", drvOutput.to_string(), current.id.to_string()); - children.insert(*currentChild); + children.insert({*currentChild, drvOutput}); } return children; }, @@ -1130,7 +1139,7 @@ Derivation Store::derivationFromPath(const StorePath & drvPath) static Derivation readDerivationCommon(Store & store, const StorePath & drvPath, bool requireValidPath) { - auto accessor = store.getFSAccessor(drvPath, requireValidPath); + auto accessor = store.requireStoreObjectAccessor(drvPath, requireValidPath); try { return parseDerivation(store, accessor->readFile(CanonPath::root), Derivation::nameFromPath(drvPath)); } catch (FormatError & e) { @@ -1199,7 +1208,7 @@ void Store::signRealisation(Realisation & realisation) for (auto & secretKeyFile : secretKeyFiles.get()) { SecretKey secretKey(readFile(secretKeyFile)); LocalSigner signer(std::move(secretKey)); - realisation.sign(signer); + realisation.sign(realisation.id, signer); } } diff --git a/src/libstore/store-reference.cc b/src/libstore/store-reference.cc index 96ee829d0..01e197be7 100644 --- a/src/libstore/store-reference.cc +++ b/src/libstore/store-reference.cc @@ -121,7 +121,27 @@ StoreReference StoreReference::parse(const std::string & uri, const StoreReferen * greedily assumed to be the part of the host address. */ auto authorityString = schemeAndAuthority->authority; auto userinfo = splitPrefixTo(authorityString, '@'); - auto maybeIpv6 = boost::urls::parse_ipv6_address(authorityString); + /* Back-compat shim for ZoneId specifiers. Technically this isn't + * standard, but the expectation is this works with the old syntax + * for ZoneID specifiers. For the full story behind the fiasco that + * is ZoneID in URLs look at [^]. + * [^]: https://datatracker.ietf.org/doc/html/draft-schinazi-httpbis-link-local-uri-bcp-03 + */ + + /* Fish out the internals from inside square brackets. It might be that the pct-sign is unencoded and that's + * why we failed to parse it previously. */ + if (authorityString.starts_with('[') && authorityString.ends_with(']')) { + authorityString.remove_prefix(1); + authorityString.remove_suffix(1); + } + + auto maybeBeforePct = splitPrefixTo(authorityString, '%'); + bool hasZoneId = maybeBeforePct.has_value(); + auto maybeZoneId = hasZoneId ? std::optional{authorityString} : std::nullopt; + + std::string_view maybeIpv6S = maybeBeforePct.value_or(authorityString); + auto maybeIpv6 = boost::urls::parse_ipv6_address(maybeIpv6S); + if (maybeIpv6) { std::string fixedAuthority; if (userinfo) { @@ -129,7 +149,11 @@ StoreReference StoreReference::parse(const std::string & uri, const StoreReferen fixedAuthority += '@'; } fixedAuthority += '['; - fixedAuthority += authorityString; + fixedAuthority += maybeIpv6S; + if (maybeZoneId) { + fixedAuthority += "%25"; // pct-encoded percent character + fixedAuthority += *maybeZoneId; + } fixedAuthority += ']'; return { .variant = diff --git a/src/libstore/unix/build/derivation-builder.cc b/src/libstore/unix/build/derivation-builder.cc index 04e8cb176..d6abf85e3 100644 --- a/src/libstore/unix/build/derivation-builder.cc +++ b/src/libstore/unix/build/derivation-builder.cc @@ -46,12 +46,18 @@ #include "store-config-private.hh" #include "build/derivation-check.hh" +#if NIX_WITH_AWS_AUTH +# include "nix/store/aws-creds.hh" +# include "nix/store/s3-url.hh" +# include "nix/util/url.hh" +#endif + namespace nix { struct NotDeterministic : BuildError { NotDeterministic(auto &&... args) - : BuildError(BuildResult::NotDeterministic, args...) + : BuildError(BuildResult::Failure::NotDeterministic, args...) { } }; @@ -290,6 +296,15 @@ protected: */ virtual void startChild(); +#if NIX_WITH_AWS_AUTH + /** + * Pre-resolve AWS credentials for S3 URLs in builtin:fetchurl. + * This should be called before forking to ensure credentials are available in child. + * Returns the credentials if successfully resolved, or std::nullopt otherwise. + */ + std::optional preResolveAwsCredentials(); +#endif + private: /** @@ -339,10 +354,20 @@ protected: */ void writeBuilderFile(const std::string & name, std::string_view contents); + /** + * Arguments passed to runChild(). + */ + struct RunChildArgs + { +#if NIX_WITH_AWS_AUTH + std::optional awsCredentials; +#endif + }; + /** * Run the builder's process. */ - void runChild(); + void runChild(RunChildArgs args); /** * Move the current process into the chroot, if any. Called early @@ -519,7 +544,8 @@ SingleDrvOutputs DerivationBuilderImpl::unprepareBuild() cleanupBuild(false); throw BuilderFailureError{ - !derivationType.isSandboxed() || diskFull ? BuildResult::TransientFailure : BuildResult::PermanentFailure, + !derivationType.isSandboxed() || diskFull ? BuildResult::Failure::TransientFailure + : BuildResult::Failure::PermanentFailure, status, diskFull ? "\nnote: build failure may have been caused by lack of free disk space" : "", }; @@ -680,30 +706,6 @@ std::optional DerivationBuilderImpl::startBuild() calls. */ prepareUser(); - /* Right platform? */ - if (!drvOptions.canBuildLocally(store, drv)) { - auto msg = - fmt("Cannot build '%s'.\n" - "Reason: " ANSI_RED "required system or feature not available" ANSI_NORMAL - "\n" - "Required system: '%s' with features {%s}\n" - "Current system: '%s' with features {%s}", - Magenta(store.printStorePath(drvPath)), - Magenta(drv.platform), - concatStringsSep(", ", drvOptions.getRequiredSystemFeatures(drv)), - Magenta(settings.thisSystem), - concatStringsSep(", ", store.Store::config.systemFeatures)); - - // since aarch64-darwin has Rosetta 2, this user can actually run x86_64-darwin on their hardware - we should - // tell them to run the command to install Darwin 2 - if (drv.platform == "x86_64-darwin" && settings.thisSystem == "aarch64-darwin") - msg += - fmt("\nNote: run `%s` to run programs for x86_64-darwin", - Magenta("/usr/sbin/softwareupdate --install-rosetta && launchctl stop org.nixos.nix-daemon")); - - throw BuildError(BuildResult::InputRejected, msg); - } - auto buildDir = store.config->getBuildDir(); createDirs(buildDir); @@ -943,11 +945,43 @@ void DerivationBuilderImpl::openSlave() throw SysError("cannot pipe standard error into log file"); } +#if NIX_WITH_AWS_AUTH +std::optional DerivationBuilderImpl::preResolveAwsCredentials() +{ + if (drv.isBuiltin() && drv.builder == "builtin:fetchurl") { + auto url = drv.env.find("url"); + if (url != drv.env.end()) { + try { + auto parsedUrl = parseURL(url->second); + if (parsedUrl.scheme == "s3") { + debug("Pre-resolving AWS credentials for S3 URL in builtin:fetchurl"); + auto s3Url = ParsedS3URL::parse(parsedUrl); + + // Use the preResolveAwsCredentials from aws-creds + auto credentials = getAwsCredentialsProvider()->getCredentials(s3Url); + debug("Successfully pre-resolved AWS credentials in parent process"); + return credentials; + } + } catch (const std::exception & e) { + debug("Error pre-resolving S3 credentials: %s", e.what()); + } + } + } + return std::nullopt; +} +#endif + void DerivationBuilderImpl::startChild() { - pid = startProcess([&]() { + RunChildArgs args{ +#if NIX_WITH_AWS_AUTH + .awsCredentials = preResolveAwsCredentials(), +#endif + }; + + pid = startProcess([this, args = std::move(args)]() { openSlave(); - runChild(); + runChild(std::move(args)); }); } @@ -1204,7 +1238,7 @@ void DerivationBuilderImpl::writeBuilderFile(const std::string & name, std::stri chownToBuilder(fd.get(), path); } -void DerivationBuilderImpl::runChild() +void DerivationBuilderImpl::runChild(RunChildArgs args) { /* Warning: in the child we should absolutely not make any SQLite calls! */ @@ -1221,6 +1255,9 @@ void DerivationBuilderImpl::runChild() BuiltinBuilderContext ctx{ .drv = drv, .tmpDirInSandbox = tmpDirInSandbox(), +#if NIX_WITH_AWS_AUTH + .awsCredentials = args.awsCredentials, +#endif }; if (drv.isBuiltin() && drv.builder == "builtin:fetchurl") { @@ -1389,7 +1426,7 @@ SingleDrvOutputs DerivationBuilderImpl::registerOutputs() auto optSt = maybeLstat(actualPath.c_str()); if (!optSt) throw BuildError( - BuildResult::OutputRejected, + BuildResult::Failure::OutputRejected, "builder for '%s' failed to produce output path for output '%s' at '%s'", store.printStorePath(drvPath), outputName, @@ -1404,7 +1441,7 @@ SingleDrvOutputs DerivationBuilderImpl::registerOutputs() if ((!S_ISLNK(st.st_mode) && (st.st_mode & (S_IWGRP | S_IWOTH))) || (buildUser && st.st_uid != buildUser->getUID())) throw BuildError( - BuildResult::OutputRejected, + BuildResult::Failure::OutputRejected, "suspicious ownership or permission on '%s' for output '%s'; rejecting this build output", actualPath, outputName); @@ -1442,7 +1479,7 @@ SingleDrvOutputs DerivationBuilderImpl::registerOutputs() auto orifu = get(outputReferencesIfUnregistered, name); if (!orifu) throw BuildError( - BuildResult::OutputRejected, + BuildResult::Failure::OutputRejected, "no output reference for '%s' in build of '%s'", name, store.printStorePath(drvPath)); @@ -1467,7 +1504,7 @@ SingleDrvOutputs DerivationBuilderImpl::registerOutputs() {[&](const std::string & path, const std::string & parent) { // TODO with more -vvvv also show the temporary paths for manual inspection. return BuildError( - BuildResult::OutputRejected, + BuildResult::Failure::OutputRejected, "cycle detected in build of '%s' in the references of output '%s' from output '%s'", store.printStorePath(drvPath), path, @@ -1561,12 +1598,13 @@ SingleDrvOutputs DerivationBuilderImpl::registerOutputs() auto newInfoFromCA = [&](const DerivationOutput::CAFloating outputHash) -> ValidPathInfo { auto st = get(outputStats, outputName); if (!st) - throw BuildError(BuildResult::OutputRejected, "output path %1% without valid stats info", actualPath); + throw BuildError( + BuildResult::Failure::OutputRejected, "output path %1% without valid stats info", actualPath); if (outputHash.method.getFileIngestionMethod() == FileIngestionMethod::Flat) { /* The output path should be a regular file without execute permission. */ if (!S_ISREG(st->st_mode) || (st->st_mode & S_IXUSR) != 0) throw BuildError( - BuildResult::OutputRejected, + BuildResult::Failure::OutputRejected, "output path '%1%' should be a non-executable regular file " "since recursive hashing is not enabled (one of outputHashMode={flat,text} is true)", actualPath); @@ -1704,7 +1742,6 @@ SingleDrvOutputs DerivationBuilderImpl::registerOutputs() if (buildMode == bmRepair) { /* Path already exists, need to replace it */ replaceValidPath(store.toRealPath(finalDestPath), actualPath); - actualPath = store.toRealPath(finalDestPath); } else if (buildMode == bmCheck) { /* Path already exists, and we want to compare, so we leave out new path in place. */ @@ -1718,7 +1755,6 @@ SingleDrvOutputs DerivationBuilderImpl::registerOutputs() auto destPath = store.toRealPath(finalDestPath); deletePath(destPath); movePath(actualPath, destPath); - actualPath = destPath; } } @@ -1771,7 +1807,9 @@ SingleDrvOutputs DerivationBuilderImpl::registerOutputs() debug("unreferenced input: '%1%'", store.printStorePath(i)); } - store.optimisePath(actualPath, NoRepair); // FIXME: combine with scanForReferences() + if (!store.isValidPath(newInfo.path)) + store.optimisePath( + store.toRealPath(finalDestPath), NoRepair); // FIXME: combine with scanForReferences() newInfo.deriver = drvPath; newInfo.ultimate = true; @@ -1828,7 +1866,12 @@ SingleDrvOutputs DerivationBuilderImpl::registerOutputs() for (auto & [outputName, newInfo] : infos) { auto oldinfo = get(initialOutputs, outputName); assert(oldinfo); - auto thisRealisation = Realisation{.id = DrvOutput{oldinfo->outputHash, outputName}, .outPath = newInfo.path}; + auto thisRealisation = Realisation{ + { + .outPath = newInfo.path, + }, + DrvOutput{oldinfo->outputHash, outputName}, + }; if (experimentalFeatureSettings.isEnabled(Xp::CaDerivations) && !drv.type().isImpure()) { store.signRealisation(thisRealisation); store.registerDrvOutput(thisRealisation); @@ -1902,6 +1945,7 @@ StorePath DerivationBuilderImpl::makeFallbackPath(const StorePath & path) #include "chroot-derivation-builder.cc" #include "linux-derivation-builder.cc" #include "darwin-derivation-builder.cc" +#include "external-derivation-builder.cc" namespace nix { diff --git a/src/libstore/unix/build/external-derivation-builder.cc b/src/libstore/unix/build/external-derivation-builder.cc new file mode 100644 index 000000000..7ddb6e093 --- /dev/null +++ b/src/libstore/unix/build/external-derivation-builder.cc @@ -0,0 +1,118 @@ +namespace nix { + +struct ExternalDerivationBuilder : DerivationBuilderImpl +{ + ExternalBuilder externalBuilder; + + ExternalDerivationBuilder( + LocalStore & store, + std::unique_ptr miscMethods, + DerivationBuilderParams params, + ExternalBuilder externalBuilder) + : DerivationBuilderImpl(store, std::move(miscMethods), std::move(params)) + , externalBuilder(std::move(externalBuilder)) + { + experimentalFeatureSettings.require(Xp::ExternalBuilders); + } + + Path tmpDirInSandbox() override + { + /* In a sandbox, for determinism, always use the same temporary + directory. */ + return "/build"; + } + + void setBuildTmpDir() override + { + tmpDir = topTmpDir + "/build"; + createDir(tmpDir, 0700); + } + + void startChild() override + { + if (drvOptions.getRequiredSystemFeatures(drv).count("recursive-nix")) + throw Error("'recursive-nix' is not supported yet by external derivation builders"); + + auto json = nlohmann::json::object(); + + json.emplace("version", 1); + json.emplace("builder", drv.builder); + { + auto l = nlohmann::json::array(); + for (auto & i : drv.args) + l.push_back(rewriteStrings(i, inputRewrites)); + json.emplace("args", std::move(l)); + } + { + auto j = nlohmann::json::object(); + for (auto & [name, value] : env) + j.emplace(name, rewriteStrings(value, inputRewrites)); + json.emplace("env", std::move(j)); + } + json.emplace("topTmpDir", topTmpDir); + json.emplace("tmpDir", tmpDir); + json.emplace("tmpDirInSandbox", tmpDirInSandbox()); + json.emplace("storeDir", store.storeDir); + json.emplace("realStoreDir", store.config->realStoreDir.get()); + json.emplace("system", drv.platform); + { + auto l = nlohmann::json::array(); + for (auto & i : inputPaths) + l.push_back(store.printStorePath(i)); + json.emplace("inputPaths", std::move(l)); + } + { + auto l = nlohmann::json::object(); + for (auto & i : scratchOutputs) + l.emplace(i.first, store.printStorePath(i.second)); + json.emplace("outputs", std::move(l)); + } + + // TODO(cole-h): writing this to stdin is too much effort right now, if we want to revisit + // that, see this comment by Eelco about how to make it not suck: + // https://github.com/DeterminateSystems/nix-src/pull/141#discussion_r2205493257 + auto jsonFile = std::filesystem::path{topTmpDir} / "build.json"; + writeFile(jsonFile, json.dump()); + + pid = startProcess([&]() { + openSlave(); + try { + commonChildInit(); + + Strings args = {externalBuilder.program}; + + if (!externalBuilder.args.empty()) { + args.insert(args.end(), externalBuilder.args.begin(), externalBuilder.args.end()); + } + + args.insert(args.end(), jsonFile); + + if (chdir(tmpDir.c_str()) == -1) + throw SysError("changing into '%1%'", tmpDir); + + chownToBuilder(topTmpDir); + + setUser(); + + debug("executing external builder: %s", concatStringsSep(" ", args)); + execv(externalBuilder.program.c_str(), stringsToCharPtrs(args).data()); + + throw SysError("executing '%s'", externalBuilder.program); + } catch (...) { + handleChildException(true); + _exit(1); + } + }); + } +}; + +std::unique_ptr makeExternalDerivationBuilder( + LocalStore & store, + std::unique_ptr miscMethods, + DerivationBuilderParams params, + const ExternalBuilder & handler) +{ + return std::make_unique(store, std::move(miscMethods), std::move(params), handler); +} + +} // namespace nix diff --git a/src/libstore/unix/build/linux-derivation-builder.cc b/src/libstore/unix/build/linux-derivation-builder.cc index f6e910d08..e96f83700 100644 --- a/src/libstore/unix/build/linux-derivation-builder.cc +++ b/src/libstore/unix/build/linux-derivation-builder.cc @@ -276,6 +276,12 @@ struct ChrootLinuxDerivationBuilder : ChrootDerivationBuilder, LinuxDerivationBu void startChild() override { + RunChildArgs args{ +# if NIX_WITH_AWS_AUTH + .awsCredentials = preResolveAwsCredentials(), +# endif + }; + /* Set up private namespaces for the build: - The PID namespace causes the build to start as PID 1. @@ -343,7 +349,7 @@ struct ChrootLinuxDerivationBuilder : ChrootDerivationBuilder, LinuxDerivationBu if (usingUserNamespace) options.cloneFlags |= CLONE_NEWUSER; - pid_t child = startProcess([&]() { runChild(); }, options); + pid_t child = startProcess([this, args = std::move(args)]() { runChild(std::move(args)); }, options); writeFull(sendPid.writeSide.get(), fmt("%d\n", child)); _exit(0); diff --git a/src/libstore/worker-protocol.cc b/src/libstore/worker-protocol.cc index 1bbff64a2..a17d2c028 100644 --- a/src/libstore/worker-protocol.cc +++ b/src/libstore/worker-protocol.cc @@ -165,10 +165,14 @@ void WorkerProto::Serialise::write( BuildResult WorkerProto::Serialise::read(const StoreDirConfig & store, WorkerProto::ReadConn conn) { BuildResult res; - res.status = static_cast(readInt(conn.from)); - conn.from >> res.errorMsg; + BuildResult::Success success; + BuildResult::Failure failure; + + auto rawStatus = readInt(conn.from); + conn.from >> failure.errorMsg; + if (GET_PROTOCOL_MINOR(conn.version) >= 29) { - conn.from >> res.timesBuilt >> res.isNonDeterministic >> res.startTime >> res.stopTime; + conn.from >> res.timesBuilt >> failure.isNonDeterministic >> res.startTime >> res.stopTime; } if (GET_PROTOCOL_MINOR(conn.version) >= 37) { res.cpuUser = WorkerProto::Serialise>::read(store, conn); @@ -177,28 +181,56 @@ BuildResult WorkerProto::Serialise::read(const StoreDirConfig & sto if (GET_PROTOCOL_MINOR(conn.version) >= 28) { auto builtOutputs = WorkerProto::Serialise::read(store, conn); for (auto && [output, realisation] : builtOutputs) - res.builtOutputs.insert_or_assign(std::move(output.outputName), std::move(realisation)); + success.builtOutputs.insert_or_assign(std::move(output.outputName), std::move(realisation)); } + + if (BuildResult::Success::statusIs(rawStatus)) { + success.status = static_cast(rawStatus); + res.inner = std::move(success); + } else { + failure.status = static_cast(rawStatus); + res.inner = std::move(failure); + } + return res; } void WorkerProto::Serialise::write( const StoreDirConfig & store, WorkerProto::WriteConn conn, const BuildResult & res) { - conn.to << res.status << res.errorMsg; - if (GET_PROTOCOL_MINOR(conn.version) >= 29) { - conn.to << res.timesBuilt << res.isNonDeterministic << res.startTime << res.stopTime; - } - if (GET_PROTOCOL_MINOR(conn.version) >= 37) { - WorkerProto::write(store, conn, res.cpuUser); - WorkerProto::write(store, conn, res.cpuSystem); - } - if (GET_PROTOCOL_MINOR(conn.version) >= 28) { - DrvOutputs builtOutputs; - for (auto & [output, realisation] : res.builtOutputs) - builtOutputs.insert_or_assign(realisation.id, realisation); - WorkerProto::write(store, conn, builtOutputs); - } + /* The protocol predates the use of sum types (std::variant) to + separate the success or failure cases. As such, it transits some + success- or failure-only fields in both cases. This helper + function helps support this: in each case, we just pass the old + default value for the fields that don't exist in that case. */ + auto common = [&](std::string_view errorMsg, bool isNonDeterministic, const auto & builtOutputs) { + conn.to << errorMsg; + if (GET_PROTOCOL_MINOR(conn.version) >= 29) { + conn.to << res.timesBuilt << isNonDeterministic << res.startTime << res.stopTime; + } + if (GET_PROTOCOL_MINOR(conn.version) >= 37) { + WorkerProto::write(store, conn, res.cpuUser); + WorkerProto::write(store, conn, res.cpuSystem); + } + if (GET_PROTOCOL_MINOR(conn.version) >= 28) { + DrvOutputs builtOutputsFullKey; + for (auto & [output, realisation] : builtOutputs) + builtOutputsFullKey.insert_or_assign(realisation.id, realisation); + WorkerProto::write(store, conn, builtOutputsFullKey); + } + }; + std::visit( + overloaded{ + [&](const BuildResult::Failure & failure) { + conn.to << failure.status; + common(failure.errorMsg, failure.isNonDeterministic, decltype(BuildResult::Success::builtOutputs){}); + }, + [&](const BuildResult::Success & success) { + conn.to << success.status; + common(/*errorMsg=*/"", /*isNonDeterministic=*/false, success.builtOutputs); + }, + }, + res.inner); } ValidPathInfo WorkerProto::Serialise::read(const StoreDirConfig & store, ReadConn conn) @@ -219,11 +251,10 @@ void WorkerProto::Serialise::write( UnkeyedValidPathInfo WorkerProto::Serialise::read(const StoreDirConfig & store, ReadConn conn) { - auto deriver = readString(conn.from); + auto deriver = WorkerProto::Serialise>::read(store, conn); auto narHash = Hash::parseAny(readString(conn.from), HashAlgorithm::SHA256); UnkeyedValidPathInfo info(narHash); - if (deriver != "") - info.deriver = store.parseStorePath(deriver); + info.deriver = std::move(deriver); info.references = WorkerProto::Serialise::read(store, conn); conn.from >> info.registrationTime >> info.narSize; if (GET_PROTOCOL_MINOR(conn.version) >= 16) { @@ -237,8 +268,8 @@ UnkeyedValidPathInfo WorkerProto::Serialise::read(const St void WorkerProto::Serialise::write( const StoreDirConfig & store, WriteConn conn, const UnkeyedValidPathInfo & pathInfo) { - conn.to << (pathInfo.deriver ? store.printStorePath(*pathInfo.deriver) : "") - << pathInfo.narHash.to_string(HashFormat::Base16, false); + WorkerProto::write(store, conn, pathInfo.deriver); + conn.to << pathInfo.narHash.to_string(HashFormat::Base16, false); WorkerProto::write(store, conn, pathInfo.references); conn.to << pathInfo.registrationTime << pathInfo.narSize; if (GET_PROTOCOL_MINOR(conn.version) >= 16) { diff --git a/src/libutil-c/meson.build b/src/libutil-c/meson.build index 54fd53c74..1806dbb6f 100644 --- a/src/libutil-c/meson.build +++ b/src/libutil-c/meson.build @@ -32,7 +32,6 @@ config_priv_h = configure_file( ) subdir('nix-meson-build-support/common') -subdir('nix-meson-build-support/asan-options') sources = files( 'nix_api_util.cc', diff --git a/src/libutil-c/nix_api_util.h b/src/libutil-c/nix_api_util.h index 4d7f394fa..d301e5743 100644 --- a/src/libutil-c/nix_api_util.h +++ b/src/libutil-c/nix_api_util.h @@ -155,6 +155,8 @@ typedef struct nix_c_context nix_c_context; /** * @brief Called to get the value of a string owned by Nix. * + * The `start` data is borrowed and the function must not assume that the buffer persists after it returns. + * * @param[in] start the string to copy. * @param[in] n the string length. * @param[in] user_data optional, arbitrary data, passed to the nix_get_string_callback when it's called. diff --git a/src/libutil-test-support/include/nix/util/tests/json-characterization.hh b/src/libutil-test-support/include/nix/util/tests/json-characterization.hh index 5a38b8e2c..d713c615b 100644 --- a/src/libutil-test-support/include/nix/util/tests/json-characterization.hh +++ b/src/libutil-test-support/include/nix/util/tests/json-characterization.hh @@ -24,12 +24,12 @@ struct JsonCharacterizationTest : virtual CharacterizationTest * @param test hook that takes the contents of the file and does the * actual work */ - void readJsonTest(PathView testStem, const T & expected) + void readJsonTest(PathView testStem, const T & expected, auto... args) { using namespace nlohmann; readTest(Path{testStem} + ".json", [&](const auto & encodedRaw) { auto encoded = json::parse(encodedRaw); - T decoded = adl_serializer::from_json(encoded); + T decoded = adl_serializer::from_json(encoded, args...); ASSERT_EQ(decoded, expected); }); } diff --git a/src/libutil-test-support/meson.build b/src/libutil-test-support/meson.build index 1ca251ce8..64231107e 100644 --- a/src/libutil-test-support/meson.build +++ b/src/libutil-test-support/meson.build @@ -27,7 +27,6 @@ rapidcheck = dependency('rapidcheck') deps_public += rapidcheck subdir('nix-meson-build-support/common') -subdir('nix-meson-build-support/asan-options') sources = files( 'hash.cc', diff --git a/src/libutil-tests/alignment.cc b/src/libutil-tests/alignment.cc new file mode 100644 index 000000000..bef0c435d --- /dev/null +++ b/src/libutil-tests/alignment.cc @@ -0,0 +1,18 @@ +#include "nix/util/alignment.hh" + +#include + +namespace nix { + +TEST(alignUp, value) +{ + for (uint64_t i = 1; i <= 8; ++i) + EXPECT_EQ(alignUp(i, 8), 8); +} + +TEST(alignUp, notAPowerOf2) +{ + ASSERT_DEATH({ alignUp(1u, 42); }, "alignment must be a power of 2"); +} + +} // namespace nix diff --git a/src/libutil-tests/archive.cc b/src/libutil-tests/archive.cc new file mode 100644 index 000000000..427b29d41 --- /dev/null +++ b/src/libutil-tests/archive.cc @@ -0,0 +1,61 @@ +#include "nix/util/archive.hh" +#include "nix/util/tests/characterization.hh" +#include "nix/util/tests/gmock-matchers.hh" + +#include + +namespace nix { + +namespace { + +class NarTest : public CharacterizationTest +{ + std::filesystem::path unitTestData = getUnitTestData() / "nars"; + +public: + std::filesystem::path goldenMaster(std::string_view testStem) const override + { + return unitTestData / (std::string(testStem) + ".nar"); + } +}; + +class InvalidNarTest : public NarTest, public ::testing::WithParamInterface> +{}; + +} // namespace + +TEST_P(InvalidNarTest, throwsErrorMessage) +{ + const auto & [name, message] = GetParam(); + readTest(name, [&](const std::string & narContents) { + ASSERT_THAT( + [&]() { + StringSource source{narContents}; + NullFileSystemObjectSink sink; + parseDump(sink, source); + }, + ::testing::ThrowsMessage(testing::HasSubstrIgnoreANSIMatcher(message))); + }); +} + +INSTANTIATE_TEST_SUITE_P( + NarTest, + InvalidNarTest, + ::testing::Values( + std::pair{"invalid-tag-instead-of-contents", "bad archive: expected tag 'contents', got 'AAAAAAAA'"}, + // Unpacking a NAR with a NUL character in a file name should fail. + std::pair{"nul-character", "bad archive: NAR contains invalid file name 'f"}, + // Likewise for a '.' filename. + std::pair{"dot", "bad archive: NAR contains invalid file name '.'"}, + // Likewise for a '..' filename. + std::pair{"dotdot", "bad archive: NAR contains invalid file name '..'"}, + // Likewise for a filename containing a slash. + std::pair{"slash", "bad archive: NAR contains invalid file name 'x/y'"}, + // Likewise for an empty filename. + std::pair{"empty", "bad archive: NAR contains invalid file name ''"}, + // Test that the 'executable' field cannot come before the 'contents' field. + std::pair{"executable-after-contents", "bad archive: expected tag ')', got 'executable'"}, + // Test that the 'name' field cannot come before the 'node' field in a directory entry. + std::pair{"name-after-node", "bad archive: expected tag 'name'"})); + +} // namespace nix diff --git a/src/libutil-tests/canon-path.cc b/src/libutil-tests/canon-path.cc index 971a9cc96..aae9285c4 100644 --- a/src/libutil-tests/canon-path.cc +++ b/src/libutil-tests/canon-path.cc @@ -42,6 +42,15 @@ TEST(CanonPath, basic) } } +TEST(CanonPath, nullBytes) +{ + std::string s = "/hello/world"; + s[8] = '\0'; + ASSERT_THROW(CanonPath("/").push(std::string(1, '\0')), BadCanonPath); + ASSERT_THROW(CanonPath(std::string_view(s)), BadCanonPath); + ASSERT_THROW(CanonPath(s, CanonPath::root), BadCanonPath); +} + TEST(CanonPath, from_existing) { CanonPath p0("foo//bar/"); diff --git a/src/libutil-tests/data/hash/blake3-base64.json b/src/libutil-tests/data/hash/blake3-base64.json new file mode 100644 index 000000000..b9a20cdb4 --- /dev/null +++ b/src/libutil-tests/data/hash/blake3-base64.json @@ -0,0 +1,5 @@ +{ + "algorithm": "blake3", + "format": "base64", + "hash": "nnDuFEmWX7YtBJBAoe0G7Dd0MNpuwTFz58T//NKL6YA=" +} diff --git a/src/libutil-tests/data/hash/sha256-base16.json b/src/libutil-tests/data/hash/sha256-base16.json new file mode 100644 index 000000000..c813824ab --- /dev/null +++ b/src/libutil-tests/data/hash/sha256-base16.json @@ -0,0 +1,5 @@ +{ + "algorithm": "sha256", + "format": "base16", + "hash": "f0e4c2f76c58916ec258f246851bea091d14d4247a2fc3e18694461b1816e13b" +} diff --git a/src/libutil-tests/data/hash/sha256-base64.json b/src/libutil-tests/data/hash/sha256-base64.json new file mode 100644 index 000000000..838af80a7 --- /dev/null +++ b/src/libutil-tests/data/hash/sha256-base64.json @@ -0,0 +1,5 @@ +{ + "algorithm": "sha256", + "format": "base64", + "hash": "8OTC92xYkW7CWPJGhRvqCR0U1CR6L8PhhpRGGxgW4Ts=" +} diff --git a/src/libutil-tests/data/hash/sha256-nix32.json b/src/libutil-tests/data/hash/sha256-nix32.json new file mode 100644 index 000000000..0d6253a79 --- /dev/null +++ b/src/libutil-tests/data/hash/sha256-nix32.json @@ -0,0 +1,5 @@ +{ + "algorithm": "sha256", + "format": "nix32", + "hash": "0fz12qc1nillhvhw6bvs4ka18789x8dqaipjb316x4aqdkvw5r7h" +} diff --git a/src/libutil-tests/data/hash/simple.json b/src/libutil-tests/data/hash/simple.json new file mode 100644 index 000000000..838af80a7 --- /dev/null +++ b/src/libutil-tests/data/hash/simple.json @@ -0,0 +1,5 @@ +{ + "algorithm": "sha256", + "format": "base64", + "hash": "8OTC92xYkW7CWPJGhRvqCR0U1CR6L8PhhpRGGxgW4Ts=" +} diff --git a/tests/functional/dot.nar b/src/libutil-tests/data/nars/dot.nar similarity index 100% rename from tests/functional/dot.nar rename to src/libutil-tests/data/nars/dot.nar diff --git a/tests/functional/dotdot.nar b/src/libutil-tests/data/nars/dotdot.nar similarity index 100% rename from tests/functional/dotdot.nar rename to src/libutil-tests/data/nars/dotdot.nar diff --git a/tests/functional/empty.nar b/src/libutil-tests/data/nars/empty.nar similarity index 100% rename from tests/functional/empty.nar rename to src/libutil-tests/data/nars/empty.nar diff --git a/tests/functional/executable-after-contents.nar b/src/libutil-tests/data/nars/executable-after-contents.nar similarity index 100% rename from tests/functional/executable-after-contents.nar rename to src/libutil-tests/data/nars/executable-after-contents.nar diff --git a/src/libutil-tests/data/nars/invalid-tag-instead-of-contents.nar b/src/libutil-tests/data/nars/invalid-tag-instead-of-contents.nar new file mode 100644 index 000000000..80dbf5a12 Binary files /dev/null and b/src/libutil-tests/data/nars/invalid-tag-instead-of-contents.nar differ diff --git a/tests/functional/name-after-node.nar b/src/libutil-tests/data/nars/name-after-node.nar similarity index 100% rename from tests/functional/name-after-node.nar rename to src/libutil-tests/data/nars/name-after-node.nar diff --git a/tests/functional/nul-character.nar b/src/libutil-tests/data/nars/nul-character.nar similarity index 100% rename from tests/functional/nul-character.nar rename to src/libutil-tests/data/nars/nul-character.nar diff --git a/tests/functional/slash.nar b/src/libutil-tests/data/nars/slash.nar similarity index 100% rename from tests/functional/slash.nar rename to src/libutil-tests/data/nars/slash.nar diff --git a/src/libutil-tests/hash.cc b/src/libutil-tests/hash.cc index 15e639180..48b7c36be 100644 --- a/src/libutil-tests/hash.cc +++ b/src/libutil-tests/hash.cc @@ -4,30 +4,30 @@ #include #include "nix/util/hash.hh" -#include "nix/util/tests/characterization.hh" +#include "nix/util/tests/json-characterization.hh" namespace nix { -class HashTest : public CharacterizationTest +class HashTest : public virtual CharacterizationTest { std::filesystem::path unitTestData = getUnitTestData() / "hash"; public: - /** - * We set these in tests rather than the regular globals so we don't have - * to worry about race conditions if the tests run concurrently. - */ - ExperimentalFeatureSettings mockXpSettings; - std::filesystem::path goldenMaster(std::string_view testStem) const override { return unitTestData / testStem; } }; -class BLAKE3HashTest : public HashTest +struct BLAKE3HashTest : virtual HashTest { + /** + * We set these in tests rather than the regular globals so we don't have + * to worry about race conditions if the tests run concurrently. + */ + ExperimentalFeatureSettings mockXpSettings; + void SetUp() override { mockXpSettings.set("experimental-features", "blake3-hashes"); @@ -203,4 +203,97 @@ TEST(hashFormat, testParseHashFormatOptException) { ASSERT_EQ(parseHashFormatOpt("sha0042"), std::nullopt); } + +/* ---------------------------------------------------------------------------- + * JSON + * --------------------------------------------------------------------------*/ + +using nlohmann::json; + +struct HashJsonTest : virtual HashTest, + JsonCharacterizationTest, + ::testing::WithParamInterface> +{}; + +struct HashJsonParseOnlyTest : virtual HashTest, + JsonCharacterizationTest, + ::testing::WithParamInterface> +{}; + +struct BLAKE3HashJsonTest : virtual HashTest, + BLAKE3HashTest, + JsonCharacterizationTest, + ::testing::WithParamInterface> +{}; + +TEST_P(HashJsonTest, from_json) +{ + auto & [name, expected] = GetParam(); + readJsonTest(name, expected); +} + +TEST_P(HashJsonTest, to_json) +{ + auto & [name, value] = GetParam(); + writeJsonTest(name, value); +} + +TEST_P(HashJsonParseOnlyTest, from_json) +{ + auto & [name, expected] = GetParam(); + readJsonTest(name, expected); +} + +TEST_P(BLAKE3HashJsonTest, from_json) +{ + auto & [name, expected] = GetParam(); + readJsonTest(name, expected, mockXpSettings); +} + +TEST_P(BLAKE3HashJsonTest, to_json) +{ + auto & [name, expected] = GetParam(); + writeJsonTest(name, expected); +} + +// Round-trip tests (from_json + to_json) for base64 format only +// (to_json always outputs base64) +INSTANTIATE_TEST_SUITE_P( + HashJSON, + HashJsonTest, + ::testing::Values( + std::pair{ + "simple", + hashString(HashAlgorithm::SHA256, "asdf"), + }, + std::pair{ + "sha256-base64", + hashString(HashAlgorithm::SHA256, "asdf"), + })); + +// Parse-only tests for non-base64 formats +// These verify C++ can deserialize other formats correctly +INSTANTIATE_TEST_SUITE_P( + HashJSONParseOnly, + HashJsonParseOnlyTest, + ::testing::Values( + std::pair{ + "sha256-base16", + hashString(HashAlgorithm::SHA256, "asdf"), + }, + std::pair{ + "sha256-nix32", + hashString(HashAlgorithm::SHA256, "asdf"), + })); + +INSTANTIATE_TEST_SUITE_P(BLAKE3HashJSONParseOnly, BLAKE3HashJsonTest, ([] { + ExperimentalFeatureSettings mockXpSettings; + mockXpSettings.set("experimental-features", "blake3-hashes"); + return ::testing::Values( + std::pair{ + "blake3-base64", + hashString(HashAlgorithm::BLAKE3, "asdf", mockXpSettings), + }); + }())); + } // namespace nix diff --git a/src/libutil-tests/json-utils.cc b/src/libutil-tests/json-utils.cc index 7d02894c6..b5c011355 100644 --- a/src/libutil-tests/json-utils.cc +++ b/src/libutil-tests/json-utils.cc @@ -70,7 +70,7 @@ TEST(valueAt, simpleObject) auto nested = R"({ "hello": { "world": "" } })"_json; - ASSERT_EQ(valueAt(valueAt(getObject(nested), "hello"), "world"), ""); + ASSERT_EQ(valueAt(getObject(valueAt(getObject(nested), "hello")), "world"), ""); } TEST(valueAt, missingKey) @@ -119,10 +119,12 @@ TEST(getArray, wrongAssertions) { auto json = R"({ "object": {}, "array": [], "string": "", "int": 0, "boolean": false })"_json; - ASSERT_THROW(getArray(valueAt(json, "object")), Error); - ASSERT_THROW(getArray(valueAt(json, "string")), Error); - ASSERT_THROW(getArray(valueAt(json, "int")), Error); - ASSERT_THROW(getArray(valueAt(json, "boolean")), Error); + auto & obj = getObject(json); + + ASSERT_THROW(getArray(valueAt(obj, "object")), Error); + ASSERT_THROW(getArray(valueAt(obj, "string")), Error); + ASSERT_THROW(getArray(valueAt(obj, "int")), Error); + ASSERT_THROW(getArray(valueAt(obj, "boolean")), Error); } TEST(getString, rightAssertions) @@ -136,10 +138,12 @@ TEST(getString, wrongAssertions) { auto json = R"({ "object": {}, "array": [], "string": "", "int": 0, "boolean": false })"_json; - ASSERT_THROW(getString(valueAt(json, "object")), Error); - ASSERT_THROW(getString(valueAt(json, "array")), Error); - ASSERT_THROW(getString(valueAt(json, "int")), Error); - ASSERT_THROW(getString(valueAt(json, "boolean")), Error); + auto & obj = getObject(json); + + ASSERT_THROW(getString(valueAt(obj, "object")), Error); + ASSERT_THROW(getString(valueAt(obj, "array")), Error); + ASSERT_THROW(getString(valueAt(obj, "int")), Error); + ASSERT_THROW(getString(valueAt(obj, "boolean")), Error); } TEST(getIntegralNumber, rightAssertions) @@ -156,18 +160,20 @@ TEST(getIntegralNumber, wrongAssertions) auto json = R"({ "object": {}, "array": [], "string": "", "int": 0, "signed": -256, "large": 128, "boolean": false })"_json; - ASSERT_THROW(getUnsigned(valueAt(json, "object")), Error); - ASSERT_THROW(getUnsigned(valueAt(json, "array")), Error); - ASSERT_THROW(getUnsigned(valueAt(json, "string")), Error); - ASSERT_THROW(getUnsigned(valueAt(json, "boolean")), Error); - ASSERT_THROW(getUnsigned(valueAt(json, "signed")), Error); + auto & obj = getObject(json); - ASSERT_THROW(getInteger(valueAt(json, "object")), Error); - ASSERT_THROW(getInteger(valueAt(json, "array")), Error); - ASSERT_THROW(getInteger(valueAt(json, "string")), Error); - ASSERT_THROW(getInteger(valueAt(json, "boolean")), Error); - ASSERT_THROW(getInteger(valueAt(json, "large")), Error); - ASSERT_THROW(getInteger(valueAt(json, "signed")), Error); + ASSERT_THROW(getUnsigned(valueAt(obj, "object")), Error); + ASSERT_THROW(getUnsigned(valueAt(obj, "array")), Error); + ASSERT_THROW(getUnsigned(valueAt(obj, "string")), Error); + ASSERT_THROW(getUnsigned(valueAt(obj, "boolean")), Error); + ASSERT_THROW(getUnsigned(valueAt(obj, "signed")), Error); + + ASSERT_THROW(getInteger(valueAt(obj, "object")), Error); + ASSERT_THROW(getInteger(valueAt(obj, "array")), Error); + ASSERT_THROW(getInteger(valueAt(obj, "string")), Error); + ASSERT_THROW(getInteger(valueAt(obj, "boolean")), Error); + ASSERT_THROW(getInteger(valueAt(obj, "large")), Error); + ASSERT_THROW(getInteger(valueAt(obj, "signed")), Error); } TEST(getBoolean, rightAssertions) @@ -181,24 +187,28 @@ TEST(getBoolean, wrongAssertions) { auto json = R"({ "object": {}, "array": [], "string": "", "int": 0, "boolean": false })"_json; - ASSERT_THROW(getBoolean(valueAt(json, "object")), Error); - ASSERT_THROW(getBoolean(valueAt(json, "array")), Error); - ASSERT_THROW(getBoolean(valueAt(json, "string")), Error); - ASSERT_THROW(getBoolean(valueAt(json, "int")), Error); + auto & obj = getObject(json); + + ASSERT_THROW(getBoolean(valueAt(obj, "object")), Error); + ASSERT_THROW(getBoolean(valueAt(obj, "array")), Error); + ASSERT_THROW(getBoolean(valueAt(obj, "string")), Error); + ASSERT_THROW(getBoolean(valueAt(obj, "int")), Error); } TEST(optionalValueAt, existing) { auto json = R"({ "string": "ssh-rsa" })"_json; - ASSERT_EQ(optionalValueAt(json, "string"), std::optional{"ssh-rsa"}); + auto * ptr = optionalValueAt(getObject(json), "string"); + ASSERT_TRUE(ptr); + ASSERT_EQ(*ptr, R"("ssh-rsa")"_json); } TEST(optionalValueAt, empty) { auto json = R"({})"_json; - ASSERT_EQ(optionalValueAt(json, "string"), std::nullopt); + ASSERT_EQ(optionalValueAt(getObject(json), "string"), nullptr); } TEST(getNullable, null) diff --git a/src/libutil-tests/meson.build b/src/libutil-tests/meson.build index 2d28c8bb1..c75f4d90a 100644 --- a/src/libutil-tests/meson.build +++ b/src/libutil-tests/meson.build @@ -42,9 +42,10 @@ config_priv_h = configure_file( ) subdir('nix-meson-build-support/common') -subdir('nix-meson-build-support/asan-options') sources = files( + 'alignment.cc', + 'archive.cc', 'args.cc', 'base-n.cc', 'canon-path.cc', @@ -97,7 +98,7 @@ this_exe = executable( test( meson.project_name(), this_exe, - env : asan_test_options_env + { + env : { '_NIX_TEST_UNIT_DATA' : meson.current_source_dir() / 'data', }, protocol : 'gtest', diff --git a/src/libutil-tests/package.nix b/src/libutil-tests/package.nix index 077d36a4d..c06de6894 100644 --- a/src/libutil-tests/package.nix +++ b/src/libutil-tests/package.nix @@ -61,7 +61,6 @@ mkMesonExecutable (finalAttrs: { mkdir -p "$HOME" '' + '' - export ASAN_OPTIONS=abort_on_error=1:print_summary=1:detect_leaks=0 export _NIX_TEST_UNIT_DATA=${./data} ${stdenv.hostPlatform.emulator buildPackages} ${lib.getExe finalAttrs.finalPackage} touch $out diff --git a/src/libutil-tests/strings.cc b/src/libutil-tests/strings.cc index bd740ce0c..dbbecd514 100644 --- a/src/libutil-tests/strings.cc +++ b/src/libutil-tests/strings.cc @@ -494,4 +494,63 @@ TEST(shellSplitString, testUnbalancedQuotes) ASSERT_THROW(shellSplitString("foo\"bar\\\""), Error); } +/* ---------------------------------------------------------------------------- + * optionalBracket + * --------------------------------------------------------------------------*/ + +TEST(optionalBracket, emptyContent) +{ + ASSERT_EQ(optionalBracket(" (", "", ")"), ""); +} + +TEST(optionalBracket, nonEmptyContent) +{ + ASSERT_EQ(optionalBracket(" (", "foo", ")"), " (foo)"); +} + +TEST(optionalBracket, emptyPrefixAndSuffix) +{ + ASSERT_EQ(optionalBracket("", "foo", ""), "foo"); +} + +TEST(optionalBracket, emptyContentEmptyBrackets) +{ + ASSERT_EQ(optionalBracket("", "", ""), ""); +} + +TEST(optionalBracket, complexBrackets) +{ + ASSERT_EQ(optionalBracket(" [[[", "content", "]]]"), " [[[content]]]"); +} + +TEST(optionalBracket, onlyPrefix) +{ + ASSERT_EQ(optionalBracket("prefix", "content", ""), "prefixcontent"); +} + +TEST(optionalBracket, onlySuffix) +{ + ASSERT_EQ(optionalBracket("", "content", "suffix"), "contentsuffix"); +} + +TEST(optionalBracket, optionalWithValue) +{ + ASSERT_EQ(optionalBracket(" (", std::optional("foo"), ")"), " (foo)"); +} + +TEST(optionalBracket, optionalNullopt) +{ + ASSERT_EQ(optionalBracket(" (", std::optional(std::nullopt), ")"), ""); +} + +TEST(optionalBracket, optionalEmptyString) +{ + ASSERT_EQ(optionalBracket(" (", std::optional(""), ")"), ""); +} + +TEST(optionalBracket, optionalStringViewWithValue) +{ + ASSERT_EQ(optionalBracket(" (", std::optional("bar"), ")"), " (bar)"); +} + } // namespace nix diff --git a/src/libutil-tests/url.cc b/src/libutil-tests/url.cc index 5c7b02248..cd6816096 100644 --- a/src/libutil-tests/url.cc +++ b/src/libutil-tests/url.cc @@ -868,6 +868,12 @@ TEST_P(ParsedURLPathSegmentsTest, segmentsAreCorrect) EXPECT_EQ(encodeUrlPath(segments), testCase.path); } +TEST_P(ParsedURLPathSegmentsTest, to_string) +{ + const auto & testCase = GetParam(); + EXPECT_EQ(testCase.url, parseURL(testCase.url).to_string()); +} + INSTANTIATE_TEST_SUITE_P( ParsedURL, ParsedURLPathSegmentsTest, @@ -886,6 +892,13 @@ INSTANTIATE_TEST_SUITE_P( .skipEmpty = false, .description = "empty_authority_empty_path", }, + ParsedURLPathSegmentsTestCase{ + .url = "path:/", + .segments = {"", ""}, + .path = "/", + .skipEmpty = false, + .description = "empty_authority_root_path", + }, ParsedURLPathSegmentsTestCase{ .url = "scheme:///", .segments = {"", ""}, diff --git a/src/libutil-tests/util.cc b/src/libutil-tests/util.cc index c48b97e8e..32114d9da 100644 --- a/src/libutil-tests/util.cc +++ b/src/libutil-tests/util.cc @@ -158,6 +158,7 @@ TEST(renderSize, misc) ASSERT_EQ(renderSize(972, true), " 0.9 KiB"); ASSERT_EQ(renderSize(973, true), " 1.0 KiB"); // FIXME: should round down ASSERT_EQ(renderSize(1024, true), " 1.0 KiB"); + ASSERT_EQ(renderSize(-1024, true), " -1.0 KiB"); ASSERT_EQ(renderSize(1024 * 1024, true), "1024.0 KiB"); ASSERT_EQ(renderSize(1100 * 1024, true), " 1.1 MiB"); ASSERT_EQ(renderSize(2ULL * 1024 * 1024 * 1024, true), " 2.0 GiB"); diff --git a/src/libutil/archive.cc b/src/libutil/archive.cc index b978ac4db..737d9b2fe 100644 --- a/src/libutil/archive.cc +++ b/src/libutil/archive.cc @@ -6,6 +6,7 @@ #include // for strcasecmp #include "nix/util/archive.hh" +#include "nix/util/alignment.hh" #include "nix/util/config-global.hh" #include "nix/util/posix-source-accessor.hh" #include "nix/util/source-path.hh" @@ -46,12 +47,12 @@ void SourceAccessor::dumpPath(const CanonPath & path, Sink & sink, PathFilter & writePadding(*size, sink); }; - std::function dump; + sink << narVersionMagic1; - dump = [&](const CanonPath & path) { + [&, &this_(*this)](this const auto & dump, const CanonPath & path) -> void { checkInterrupt(); - auto st = lstat(path); + auto st = this_.lstat(path); sink << "("; @@ -68,7 +69,7 @@ void SourceAccessor::dumpPath(const CanonPath & path, Sink & sink, PathFilter & /* If we're on a case-insensitive system like macOS, undo the case hack applied by restorePath(). */ StringMap unhacked; - for (auto & i : readDirectory(path)) + for (auto & i : this_.readDirectory(path)) if (archiveSettings.useCaseHack) { std::string name(i.first); size_t pos = i.first.find(caseHackSuffix); @@ -91,16 +92,13 @@ void SourceAccessor::dumpPath(const CanonPath & path, Sink & sink, PathFilter & } else if (st.type == tSymlink) - sink << "type" << "symlink" << "target" << readLink(path); + sink << "type" << "symlink" << "target" << this_.readLink(path); else throw Error("file '%s' has an unsupported type", path); sink << ")"; - }; - - sink << narVersionMagic1; - dump(path); + }(path); } time_t dumpPathAndGetMtime(const Path & path, Sink & sink, PathFilter & filter) @@ -132,6 +130,11 @@ static void parseContents(CreateRegularFileSink & sink, Source & source) sink.preallocateContents(size); + if (sink.skipContents) { + source.skip(alignUp(size, 8)); + return; + } + uint64_t left = size; std::array buf; @@ -166,7 +169,7 @@ static void parse(FileSystemObjectSink & sink, Source & source, const CanonPath auto expectTag = [&](std::string_view expected) { auto tag = getString(); if (tag != expected) - throw badArchive("expected tag '%s', got '%s'", expected, tag); + throw badArchive("expected tag '%s', got '%s'", expected, tag.substr(0, 1024)); }; expectTag("("); @@ -187,8 +190,10 @@ static void parse(FileSystemObjectSink & sink, Source & source, const CanonPath tag = getString(); } - if (tag == "contents") - parseContents(crf, source); + if (tag != "contents") + throw badArchive("expected tag 'contents', got '%s'", tag); + + parseContents(crf, source); expectTag(")"); }); diff --git a/src/libutil/args.cc b/src/libutil/args.cc index f4309473b..05b5a25c7 100644 --- a/src/libutil/args.cc +++ b/src/libutil/args.cc @@ -318,6 +318,7 @@ void RootArgs::parseCmdline(const Strings & _cmdline, bool allowShebang) } catch (SystemError &) { } } + for (auto pos = cmdline.begin(); pos != cmdline.end();) { auto arg = *pos; @@ -354,6 +355,9 @@ void RootArgs::parseCmdline(const Strings & _cmdline, bool allowShebang) processArgs(pendingArgs, true); + if (!completions) + checkArgs(); + initialFlagsProcessed(); /* Now that we are done parsing, make sure that any experimental @@ -384,7 +388,7 @@ bool Args::processFlag(Strings::iterator & pos, Strings::iterator end) auto & rootArgs = getRoot(); - auto process = [&](const std::string & name, const Flag & flag) -> bool { + auto process = [&](const std::string & name, Flag & flag) -> bool { ++pos; if (auto & f = flag.experimentalFeature) @@ -413,6 +417,7 @@ bool Args::processFlag(Strings::iterator & pos, Strings::iterator end) } if (!anyCompleted) flag.handler.fun(std::move(args)); + flag.timesUsed++; return true; }; @@ -504,6 +509,14 @@ bool Args::processArgs(const Strings & args, bool finish) return res; } +void Args::checkArgs() +{ + for (auto & [name, flag] : longFlags) { + if (flag->required && flag->timesUsed == 0) + throw UsageError("required argument '--%s' is missing", name); + } +} + nlohmann::json Args::toJSON() { auto flags = nlohmann::json::object(); @@ -643,6 +656,13 @@ bool MultiCommand::processArgs(const Strings & args, bool finish) return Args::processArgs(args, finish); } +void MultiCommand::checkArgs() +{ + Args::checkArgs(); + if (command) + command->second->checkArgs(); +} + nlohmann::json MultiCommand::toJSON() { auto cmds = nlohmann::json::object(); diff --git a/src/libutil/canon-path.cc b/src/libutil/canon-path.cc index 07a3a6193..22ca3e066 100644 --- a/src/libutil/canon-path.cc +++ b/src/libutil/canon-path.cc @@ -3,23 +3,41 @@ #include "nix/util/file-path-impl.hh" #include "nix/util/strings-inline.hh" +#include + namespace nix { -CanonPath CanonPath::root = CanonPath("/"); +const CanonPath CanonPath::root = CanonPath("/"); static std::string absPathPure(std::string_view path) { return canonPathInner(path, [](auto &, auto &) {}); } +static void ensureNoNullBytes(std::string_view s) +{ + if (std::memchr(s.data(), '\0', s.size())) [[unlikely]] { + using namespace std::string_view_literals; + auto str = replaceStrings(std::string(s), "\0"sv, "␀"sv); + throw BadCanonPath("path segment '%s' must not contain null (\\0) bytes", str); + } +} + CanonPath::CanonPath(std::string_view raw) : path(absPathPure(concatStrings("/", raw))) +{ + ensureNoNullBytes(raw); +} + +CanonPath::CanonPath(const char * raw) + : path(absPathPure(concatStrings("/", raw))) { } CanonPath::CanonPath(std::string_view raw, const CanonPath & root) : path(absPathPure(raw.size() > 0 && raw[0] == '/' ? raw : concatStrings(root.abs(), "/", raw))) { + ensureNoNullBytes(raw); } CanonPath::CanonPath(const std::vector & elems) @@ -80,6 +98,7 @@ void CanonPath::push(std::string_view c) { assert(c.find('/') == c.npos); assert(c != "." && c != ".."); + ensureNoNullBytes(c); if (!isRoot()) path += '/'; path += c; diff --git a/src/libutil/configuration.cc b/src/libutil/configuration.cc index dc9d91f63..7a0ed22ea 100644 --- a/src/libutil/configuration.cc +++ b/src/libutil/configuration.cc @@ -500,10 +500,10 @@ bool ExperimentalFeatureSettings::isEnabled(const ExperimentalFeature & feature) return std::find(f.begin(), f.end(), feature) != f.end(); } -void ExperimentalFeatureSettings::require(const ExperimentalFeature & feature) const +void ExperimentalFeatureSettings::require(const ExperimentalFeature & feature, std::string reason) const { if (!isEnabled(feature)) - throw MissingExperimentalFeature(feature); + throw MissingExperimentalFeature(feature, std::move(reason)); } bool ExperimentalFeatureSettings::isEnabled(const std::optional & feature) const diff --git a/src/libutil/experimental-features.cc b/src/libutil/experimental-features.cc index 60d6bf74d..198d021bb 100644 --- a/src/libutil/experimental-features.cc +++ b/src/libutil/experimental-features.cc @@ -1,5 +1,6 @@ #include "nix/util/experimental-features.hh" #include "nix/util/fmt.hh" +#include "nix/util/strings.hh" #include "nix/util/util.hh" #include @@ -304,6 +305,14 @@ constexpr std::array xpFeatureDetails )", .trackingUrl = "https://github.com/NixOS/nix/milestone/55", }, + { + .tag = Xp::ExternalBuilders, + .name = "external-builders", + .description = R"( + Enables support for external builders / sandbox providers. + )", + .trackingUrl = "", + }, { .tag = Xp::BLAKE3Hashes, .name = "blake3-hashes", @@ -368,11 +377,13 @@ std::set parseFeatures(const StringSet & rawFeatures) return res; } -MissingExperimentalFeature::MissingExperimentalFeature(ExperimentalFeature feature) +MissingExperimentalFeature::MissingExperimentalFeature(ExperimentalFeature feature, std::string reason) : Error( - "experimental Nix feature '%1%' is disabled; add '--extra-experimental-features %1%' to enable it", - showExperimentalFeature(feature)) + "experimental Nix feature '%1%' is disabled%2%; add '--extra-experimental-features %1%' to enable it", + showExperimentalFeature(feature), + Uncolored(optionalBracket(" (", reason, ")"))) , missingFeature(feature) + , reason{reason} { } diff --git a/src/libutil/fs-sink.cc b/src/libutil/fs-sink.cc index 6efd5e0c7..45ef57a9f 100644 --- a/src/libutil/fs-sink.cc +++ b/src/libutil/fs-sink.cc @@ -196,6 +196,8 @@ void NullFileSystemObjectSink::createRegularFile( void isExecutable() override {} } crf; + crf.skipContents = true; + // Even though `NullFileSystemObjectSink` doesn't do anything, it's important // that we call the function, to e.g. advance the parser using this // sink. diff --git a/src/libutil/hash.cc b/src/libutil/hash.cc index b67dc7807..56eed931b 100644 --- a/src/libutil/hash.cc +++ b/src/libutil/hash.cc @@ -13,6 +13,7 @@ #include "nix/util/split.hh" #include "nix/util/base-n.hh" #include "nix/util/base-nix-32.hh" +#include "nix/util/json-utils.hh" #include #include @@ -141,9 +142,13 @@ static HashFormat baseFromSize(std::string_view rest, HashAlgorithm algo) * * @param rest the string view to parse. Must not include any `(:|-)` prefix. */ -static Hash parseLowLevel(std::string_view rest, HashAlgorithm algo, DecodeNamePair pair) +static Hash parseLowLevel( + std::string_view rest, + HashAlgorithm algo, + DecodeNamePair pair, + const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings) { - Hash res{algo}; + Hash res{algo, xpSettings}; std::string d; try { d = pair.decode(rest); @@ -244,9 +249,10 @@ Hash Hash::parseNonSRIUnprefixed(std::string_view s, HashAlgorithm algo) return parseExplicitFormatUnprefixed(s, algo, baseFromSize(s, algo)); } -Hash Hash::parseExplicitFormatUnprefixed(std::string_view s, HashAlgorithm algo, HashFormat format) +Hash Hash::parseExplicitFormatUnprefixed( + std::string_view s, HashAlgorithm algo, HashFormat format, const ExperimentalFeatureSettings & xpSettings) { - return parseLowLevel(s, algo, baseExplicit(format)); + return parseLowLevel(s, algo, baseExplicit(format), xpSettings); } Hash Hash::random(HashAlgorithm algo) @@ -446,10 +452,12 @@ std::string_view printHashFormat(HashFormat HashFormat) } } -std::optional parseHashAlgoOpt(std::string_view s) +std::optional parseHashAlgoOpt(std::string_view s, const ExperimentalFeatureSettings & xpSettings) { - if (s == "blake3") + if (s == "blake3") { + xpSettings.require(Xp::BLAKE3Hashes); return HashAlgorithm::BLAKE3; + } if (s == "md5") return HashAlgorithm::MD5; if (s == "sha1") @@ -461,9 +469,9 @@ std::optional parseHashAlgoOpt(std::string_view s) return std::nullopt; } -HashAlgorithm parseHashAlgo(std::string_view s) +HashAlgorithm parseHashAlgo(std::string_view s, const ExperimentalFeatureSettings & xpSettings) { - auto opt_h = parseHashAlgoOpt(s); + auto opt_h = parseHashAlgoOpt(s, xpSettings); if (opt_h) return *opt_h; else @@ -491,3 +499,27 @@ std::string_view printHashAlgo(HashAlgorithm ha) } } // namespace nix + +namespace nlohmann { + +using namespace nix; + +Hash adl_serializer::from_json(const json & json, const ExperimentalFeatureSettings & xpSettings) +{ + auto & obj = getObject(json); + auto algo = parseHashAlgo(getString(valueAt(obj, "algorithm")), xpSettings); + auto format = parseHashFormat(getString(valueAt(obj, "format"))); + auto & hashS = getString(valueAt(obj, "hash")); + return Hash::parseExplicitFormatUnprefixed(hashS, algo, format, xpSettings); +} + +void adl_serializer::to_json(json & json, const Hash & hash) +{ + json = { + {"format", printHashFormat(HashFormat::Base64)}, + {"algorithm", printHashAlgo(hash.algo)}, + {"hash", hash.to_string(HashFormat::Base64, false)}, + }; +} + +} // namespace nlohmann diff --git a/src/libutil/include/nix/util/alignment.hh b/src/libutil/include/nix/util/alignment.hh new file mode 100644 index 000000000..a4e5af4d6 --- /dev/null +++ b/src/libutil/include/nix/util/alignment.hh @@ -0,0 +1,23 @@ +#pragma once +///@file + +#include +#include +#include +#include + +namespace nix { + +/// Aligns val upwards to be a multiple of alignment. +/// +/// @pre alignment must be a power of 2. +template + requires std::is_unsigned_v +constexpr T alignUp(T val, unsigned alignment) +{ + assert(std::has_single_bit(alignment) && "alignment must be a power of 2"); + T mask = ~(T{alignment} - 1u); + return (val + alignment - 1) & mask; +} + +} // namespace nix diff --git a/src/libutil/include/nix/util/args.hh b/src/libutil/include/nix/util/args.hh index 443db445f..99f6e23e8 100644 --- a/src/libutil/include/nix/util/args.hh +++ b/src/libutil/include/nix/util/args.hh @@ -202,8 +202,12 @@ public: Strings labels; Handler handler; CompleterClosure completer; + bool required = false; std::optional experimentalFeature; + + // FIXME: this should be private, but that breaks designated initializers. + size_t timesUsed = 0; }; protected: @@ -283,6 +287,8 @@ protected: StringSet hiddenCategories; + virtual void checkArgs(); + /** * Called after all command line flags before the first non-flag * argument (if any) have been processed. @@ -428,6 +434,8 @@ public: protected: std::string commandName = ""; bool aliasUsed = false; + + void checkArgs() override; }; Strings argvToStrings(int argc, char ** argv); diff --git a/src/libutil/include/nix/util/canon-path.hh b/src/libutil/include/nix/util/canon-path.hh index dd07929b4..b9b2fff25 100644 --- a/src/libutil/include/nix/util/canon-path.hh +++ b/src/libutil/include/nix/util/canon-path.hh @@ -1,6 +1,7 @@ #pragma once ///@file +#include "nix/util/error.hh" #include #include #include @@ -12,6 +13,8 @@ namespace nix { +MakeError(BadCanonPath, Error); + /** * A canonical representation of a path. It ensures the following: * @@ -23,6 +26,8 @@ namespace nix { * * - There are no components equal to '.' or '..'. * + * - It does not contain NUL bytes. + * * `CanonPath` are "virtual" Nix paths for abstract file system objects; * they are always Unix-style paths, regardless of what OS Nix is * running on. The `/` root doesn't denote the ambient host file system @@ -51,10 +56,7 @@ public: */ CanonPath(std::string_view raw); - explicit CanonPath(const char * raw) - : CanonPath(std::string_view(raw)) - { - } + explicit CanonPath(const char * raw); struct unchecked_t {}; @@ -69,7 +71,7 @@ public: */ CanonPath(const std::vector & elems); - static CanonPath root; + static const CanonPath root; /** * If `raw` starts with a slash, return diff --git a/src/libutil/include/nix/util/closure.hh b/src/libutil/include/nix/util/closure.hh index d55d52c87..9e37b4cfb 100644 --- a/src/libutil/include/nix/util/closure.hh +++ b/src/libutil/include/nix/util/closure.hh @@ -24,11 +24,9 @@ void computeClosure(const set startElts, set & res, GetEdgesAsync getEd Sync state_(State{0, res, 0}); - std::function enqueue; - std::condition_variable done; - enqueue = [&](const T & current) -> void { + auto enqueue = [&](this auto & enqueue, const T & current) -> void { { auto state(state_.lock()); if (state->exc) diff --git a/src/libutil/include/nix/util/configuration.hh b/src/libutil/include/nix/util/configuration.hh index 65391721c..541febdb5 100644 --- a/src/libutil/include/nix/util/configuration.hh +++ b/src/libutil/include/nix/util/configuration.hh @@ -463,7 +463,20 @@ struct ExperimentalFeatureSettings : Config * Require an experimental feature be enabled, throwing an error if it is * not. */ - void require(const ExperimentalFeature &) const; + void require(const ExperimentalFeature &, std::string reason = "") const; + + /** + * Require an experimental feature be enabled, throwing an error if it is + * not. The reason is lazily evaluated only if the feature is disabled. + */ + template + requires std::invocable && std::convertible_to, std::string> + void require(const ExperimentalFeature & feature, GetReason && getReason) const + { + if (isEnabled(feature)) + return; + require(feature, getReason()); + } /** * `std::nullopt` pointer means no feature, which means there is nothing that could be diff --git a/src/libutil/include/nix/util/error.hh b/src/libutil/include/nix/util/error.hh index e564ca5b9..cc8460592 100644 --- a/src/libutil/include/nix/util/error.hh +++ b/src/libutil/include/nix/util/error.hh @@ -192,13 +192,27 @@ public: err.traces.push_front(trace); } + /** + * Prepends an item to the error trace, as is usual for extra context. + * + * @param pos Nullable source position to put in trace item + * @param fs Format string, see `HintFmt` + * @param args... Format string arguments. + */ template - void addTrace(std::shared_ptr && e, std::string_view fs, const Args &... args) + void addTrace(std::shared_ptr && pos, std::string_view fs, const Args &... args) { - addTrace(std::move(e), HintFmt(std::string(fs), args...)); + addTrace(std::move(pos), HintFmt(std::string(fs), args...)); } - void addTrace(std::shared_ptr && e, HintFmt hint, TracePrint print = TracePrint::Default); + /** + * Prepends an item to the error trace, as is usual for extra context. + * + * @param pos Nullable source position to put in trace item + * @param hint Formatted error message + * @param print Optional, whether to always print (used by `addErrorContext`) + */ + void addTrace(std::shared_ptr && pos, HintFmt hint, TracePrint print = TracePrint::Default); bool hasTrace() const { diff --git a/src/libutil/include/nix/util/experimental-features.hh b/src/libutil/include/nix/util/experimental-features.hh index 0a8f15863..aca14bfbb 100644 --- a/src/libutil/include/nix/util/experimental-features.hh +++ b/src/libutil/include/nix/util/experimental-features.hh @@ -37,6 +37,7 @@ enum struct ExperimentalFeature { MountedSSHStore, VerifiedFetches, PipeOperators, + ExternalBuilders, BLAKE3Hashes, }; @@ -87,7 +88,9 @@ public: */ ExperimentalFeature missingFeature; - MissingExperimentalFeature(ExperimentalFeature missingFeature); + std::string reason; + + MissingExperimentalFeature(ExperimentalFeature missingFeature, std::string reason = ""); }; /** diff --git a/src/libutil/include/nix/util/fs-sink.hh b/src/libutil/include/nix/util/fs-sink.hh index f96fe3ef9..bd2db7f53 100644 --- a/src/libutil/include/nix/util/fs-sink.hh +++ b/src/libutil/include/nix/util/fs-sink.hh @@ -14,6 +14,14 @@ namespace nix { */ struct CreateRegularFileSink : Sink { + /** + * If set to true, the sink will not be called with the contents + * of the file. `preallocateContents()` will still be called to + * convey the file size. Useful for sinks that want to efficiently + * discard the contents of the file. + */ + bool skipContents = false; + virtual void isExecutable() = 0; /** diff --git a/src/libutil/include/nix/util/hash.hh b/src/libutil/include/nix/util/hash.hh index 571b6acca..e4f596091 100644 --- a/src/libutil/include/nix/util/hash.hh +++ b/src/libutil/include/nix/util/hash.hh @@ -5,6 +5,7 @@ #include "nix/util/types.hh" #include "nix/util/serialise.hh" #include "nix/util/file-system.hh" +#include "nix/util/json-impls.hh" namespace nix { @@ -97,7 +98,11 @@ struct Hash * @param explicitFormat cannot be SRI, but must be one of the * "bases". */ - static Hash parseExplicitFormatUnprefixed(std::string_view s, HashAlgorithm algo, HashFormat explicitFormat); + static Hash parseExplicitFormatUnprefixed( + std::string_view s, + HashAlgorithm algo, + HashFormat explicitFormat, + const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings); static Hash parseSRI(std::string_view original); @@ -188,12 +193,14 @@ std::string_view printHashFormat(HashFormat hashFormat); /** * Parse a string representing a hash algorithm. */ -HashAlgorithm parseHashAlgo(std::string_view s); +HashAlgorithm +parseHashAlgo(std::string_view s, const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings); /** * Will return nothing on parse error */ -std::optional parseHashAlgoOpt(std::string_view s); +std::optional +parseHashAlgoOpt(std::string_view s, const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings); /** * And the reverse. @@ -221,4 +228,29 @@ public: HashResult currentHash(); }; +template<> +struct json_avoids_null : std::true_type +{}; + } // namespace nix + +template<> +struct std::hash +{ + std::size_t operator()(const nix::Hash & hash) const noexcept + { + assert(hash.hashSize > sizeof(size_t)); + return *reinterpret_cast(&hash.hash); + } +}; + +namespace nix { + +inline std::size_t hash_value(const Hash & hash) +{ + return std::hash{}(hash); +} + +} // namespace nix + +JSON_IMPL_WITH_XP_FEATURES(Hash) diff --git a/src/libutil/include/nix/util/json-impls.hh b/src/libutil/include/nix/util/json-impls.hh index 751fc410f..802c212e1 100644 --- a/src/libutil/include/nix/util/json-impls.hh +++ b/src/libutil/include/nix/util/json-impls.hh @@ -3,6 +3,8 @@ #include +#include "nix/util/experimental-features.hh" + // Following https://github.com/nlohmann/json#how-can-i-use-get-for-non-default-constructiblenon-copyable-types #define JSON_IMPL(TYPE) \ namespace nlohmann { \ @@ -14,3 +16,15 @@ static void to_json(json & json, const TYPE & t); \ }; \ } + +#define JSON_IMPL_WITH_XP_FEATURES(TYPE) \ + namespace nlohmann { \ + using namespace nix; \ + template<> \ + struct adl_serializer \ + { \ + static TYPE \ + from_json(const json & json, const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings); \ + static void to_json(json & json, const TYPE & t); \ + }; \ + } diff --git a/src/libutil/include/nix/util/json-utils.hh b/src/libutil/include/nix/util/json-utils.hh index 4b5fb4b21..51ebb2b6c 100644 --- a/src/libutil/include/nix/util/json-utils.hh +++ b/src/libutil/include/nix/util/json-utils.hh @@ -2,7 +2,6 @@ ///@file #include -#include #include "nix/util/error.hh" #include "nix/util/types.hh" @@ -12,20 +11,25 @@ namespace nix { enum struct ExperimentalFeature; -const nlohmann::json * get(const nlohmann::json & map, const std::string & key); - -nlohmann::json * get(nlohmann::json & map, const std::string & key); - /** * Get the value of a json object at a key safely, failing with a nice * error if the key does not exist. * * Use instead of nlohmann::json::at() to avoid ugly exceptions. */ -const nlohmann::json & valueAt(const nlohmann::json::object_t & map, const std::string & key); +const nlohmann::json & valueAt(const nlohmann::json::object_t & map, std::string_view key); -std::optional optionalValueAt(const nlohmann::json::object_t & value, const std::string & key); -std::optional nullableValueAt(const nlohmann::json::object_t & value, const std::string & key); +/** + * @return A pointer to the value assiocated with `key` if `value` + * contains `key`, otherwise return `nullptr` (not JSON `null`!). + */ +const nlohmann::json * optionalValueAt(const nlohmann::json::object_t & value, std::string_view key); + +/** + * Prevents bugs; see `get` for the same trick. + */ +const nlohmann::json & valueAt(nlohmann::json::object_t && map, std::string_view key) = delete; +const nlohmann::json * optionalValueAt(nlohmann::json::object_t && value, std::string_view key) = delete; /** * Downcast the json object, failing with a nice error if the conversion fails. diff --git a/src/libutil/include/nix/util/meson.build b/src/libutil/include/nix/util/meson.build index dcfaa8e3f..9a606e15d 100644 --- a/src/libutil/include/nix/util/meson.build +++ b/src/libutil/include/nix/util/meson.build @@ -4,6 +4,7 @@ include_dirs = [ include_directories('../..') ] headers = files( 'abstract-setting-to-json.hh', + 'alignment.hh', 'ansicolor.hh', 'archive.hh', 'args.hh', diff --git a/src/libutil/include/nix/util/ref.hh b/src/libutil/include/nix/util/ref.hh index 7cf5ef25e..7ba5349a6 100644 --- a/src/libutil/include/nix/util/ref.hh +++ b/src/libutil/include/nix/util/ref.hh @@ -17,6 +17,12 @@ private: std::shared_ptr p; + void assertNonNull() + { + if (!p) + throw std::invalid_argument("null pointer cast to ref"); + } + public: using element_type = T; @@ -24,15 +30,19 @@ public: explicit ref(const std::shared_ptr & p) : p(p) { - if (!p) - throw std::invalid_argument("null pointer cast to ref"); + assertNonNull(); + } + + explicit ref(std::shared_ptr && p) + : p(std::move(p)) + { + assertNonNull(); } explicit ref(T * p) : p(p) { - if (!p) - throw std::invalid_argument("null pointer cast to ref"); + assertNonNull(); } T * operator->() const @@ -45,14 +55,22 @@ public: return *p; } - operator std::shared_ptr() const + std::shared_ptr get_ptr() const & { return p; } - std::shared_ptr get_ptr() const + std::shared_ptr get_ptr() && { - return p; + return std::move(p); + } + + /** + * Convenience to avoid explicit `get_ptr()` call in some cases. + */ + operator std::shared_ptr(this auto && self) + { + return std::forward(self).get_ptr(); } template diff --git a/src/libutil/include/nix/util/serialise.hh b/src/libutil/include/nix/util/serialise.hh index 16e0d0fa5..09b33bf95 100644 --- a/src/libutil/include/nix/util/serialise.hh +++ b/src/libutil/include/nix/util/serialise.hh @@ -97,6 +97,8 @@ struct Source void drainInto(Sink & sink); std::string drain(); + + virtual void skip(size_t len); }; /** @@ -177,6 +179,7 @@ struct FdSource : BufferedSource Descriptor fd; size_t read = 0; BackedStringView endOfFileError{"unexpected end-of-file"}; + bool isSeekable = true; FdSource() : fd(INVALID_DESCRIPTOR) @@ -200,6 +203,8 @@ struct FdSource : BufferedSource */ bool hasData(); + void skip(size_t len) override; + protected: size_t readUnbuffered(char * data, size_t len) override; private: @@ -225,10 +230,18 @@ struct StringSink : Sink void operator()(std::string_view data) override; }; +/** + * Source type that can be restarted. + */ +struct RestartableSource : Source +{ + virtual void restart() = 0; +}; + /** * A source that reads data from a string. */ -struct StringSource : Source +struct StringSource : RestartableSource { std::string_view s; size_t pos; @@ -250,8 +263,68 @@ struct StringSource : Source } size_t read(char * data, size_t len) override; + + void skip(size_t len) override; + + void restart() override + { + pos = 0; + } }; +/** + * Compresses a RestartableSource using the specified compression method. + * + * @note currently this buffers the entire compressed data stream in memory. In the future it may instead compress data + * on demand, lazily pulling from the original `RestartableSource`. In that case, the `size()` method would go away + * because we would not in fact know the compressed size in advance. + */ +struct CompressedSource : RestartableSource +{ +private: + std::string compressedData; + std::string compressionMethod; + StringSource stringSource; + +public: + /** + * Compress a RestartableSource using the specified compression method. + * + * @param source The source data to compress + * @param compressionMethod The compression method to use (e.g., "xz", "br") + */ + CompressedSource(RestartableSource & source, const std::string & compressionMethod); + + size_t read(char * data, size_t len) override + { + return stringSource.read(data, len); + } + + void restart() override + { + stringSource.restart(); + } + + uint64_t size() const + { + return compressedData.size(); + } + + std::string_view getCompressionMethod() const + { + return compressionMethod; + } +}; + +/** + * Create a restartable Source from a factory function. + * + * @param factory Factory function that returns a fresh instance of the Source. Gets + * called for each source restart. + * @pre factory must return an equivalent source for each invocation. + */ +std::unique_ptr restartableSourceFromFactory(std::function()> factory); + /** * A sink that writes all incoming data to two other sinks. */ diff --git a/src/libutil/include/nix/util/source-accessor.hh b/src/libutil/include/nix/util/source-accessor.hh index 7419ef392..1006895b3 100644 --- a/src/libutil/include/nix/util/source-accessor.hh +++ b/src/libutil/include/nix/util/source-accessor.hh @@ -121,7 +121,7 @@ struct SourceAccessor : std::enable_shared_from_this std::string typeString(); }; - Stat lstat(const CanonPath & path); + virtual Stat lstat(const CanonPath & path); virtual std::optional maybeLstat(const CanonPath & path) = 0; @@ -180,6 +180,27 @@ struct SourceAccessor : std::enable_shared_from_this */ std::optional fingerprint; + /** + * Return the fingerprint for `path`. This is usually the + * fingerprint of the current accessor, but for composite + * accessors (like `MountedSourceAccessor`), we want to return the + * fingerprint of the "inner" accessor if the current one lacks a + * fingerprint. + * + * So this method is intended to return the most-outer accessor + * that has a fingerprint for `path`. It also returns the path that `path` + * corresponds to in that accessor. + * + * For example: in a `MountedSourceAccessor` that has + * `/nix/store/foo` mounted, + * `getFingerprint("/nix/store/foo/bar")` will return the path + * `/bar` and the fingerprint of the `/nix/store/foo` accessor. + */ + virtual std::pair> getFingerprint(const CanonPath & path) + { + return {path, fingerprint}; + } + /** * Return the maximum last-modified time of the files in this * tree, if available. @@ -220,10 +241,4 @@ ref makeFSSourceAccessor(std::filesystem::path root); */ ref makeUnionSourceAccessor(std::vector> && accessors); -/** - * Creates a new source accessor which is confined to the subdirectory - * of the given source accessor. - */ -ref projectSubdirSourceAccessor(ref, CanonPath subdirectory); - } // namespace nix diff --git a/src/libutil/include/nix/util/strings.hh b/src/libutil/include/nix/util/strings.hh index b4ef66bfe..da6decc31 100644 --- a/src/libutil/include/nix/util/strings.hh +++ b/src/libutil/include/nix/util/strings.hh @@ -3,6 +3,7 @@ #include "nix/util/types.hh" #include +#include #include #include #include @@ -12,11 +13,6 @@ namespace nix { -/* - * workaround for unavailable view() method (C++20) of std::ostringstream under MacOS with clang-16 - */ -std::string_view toView(const std::ostringstream & os); - /** * String tokenizer. * @@ -98,6 +94,44 @@ extern template std::string dropEmptyInitThenConcatStringsSep(std::string_view, */ std::list shellSplitString(std::string_view s); +/** + * Conditionally wrap a string with prefix and suffix brackets. + * + * If `content` is empty, returns an empty string. + * Otherwise, returns `prefix + content + suffix`. + * + * Example: + * optionalBracket(" (", "foo", ")") == " (foo)" + * optionalBracket(" (", "", ")") == "" + * + * Design note: this would have been called `optionalParentheses`, except this + * function is more general and more explicit. Parentheses typically *also* need + * to be prefixed with a space in order to fit nicely in a piece of natural + * language. + */ +std::string optionalBracket(std::string_view prefix, std::string_view content, std::string_view suffix); + +/** + * Overload for optional content. + * + * If `content` is nullopt or contains an empty string, returns an empty string. + * Otherwise, returns `prefix + *content + suffix`. + * + * Example: + * optionalBracket(" (", std::optional("foo"), ")") == " (foo)" + * optionalBracket(" (", std::nullopt, ")") == "" + * optionalBracket(" (", std::optional(""), ")") == "" + */ +template + requires std::convertible_to +std::string optionalBracket(std::string_view prefix, const std::optional & content, std::string_view suffix) +{ + if (!content || std::string_view(*content).empty()) { + return ""; + } + return optionalBracket(prefix, std::string_view(*content), suffix); +} + /** * Hash implementation that can be used for zero-copy heterogenous lookup from * P1690R1[1] in unordered containers. diff --git a/src/libutil/include/nix/util/topo-sort.hh b/src/libutil/include/nix/util/topo-sort.hh index 9f403e2e6..aaf5dff16 100644 --- a/src/libutil/include/nix/util/topo-sort.hh +++ b/src/libutil/include/nix/util/topo-sort.hh @@ -14,9 +14,7 @@ std::vector topoSort( std::vector sorted; decltype(items) visited, parents; - std::function dfsVisit; - - dfsVisit = [&](const T & path, const T * parent) { + auto dfsVisit = [&](this auto & dfsVisit, const T & path, const T * parent) { if (parents.count(path)) throw makeCycleError(path, *parent); diff --git a/src/libutil/include/nix/util/url.hh b/src/libutil/include/nix/util/url.hh index f2bd79b08..1fc8c3f2b 100644 --- a/src/libutil/include/nix/util/url.hh +++ b/src/libutil/include/nix/util/url.hh @@ -6,6 +6,9 @@ #include "nix/util/error.hh" #include "nix/util/canon-path.hh" +#include "nix/util/split.hh" +#include "nix/util/util.hh" +#include "nix/util/variant-wrapper.hh" namespace nix { @@ -342,8 +345,7 @@ ParsedURL fixGitURL(const std::string & url); bool isValidSchemeName(std::string_view scheme); /** - * Either a ParsedURL or a verbatim string, but the string must be a valid - * ParsedURL. This is necessary because in certain cases URI must be passed + * Either a ParsedURL or a verbatim string. This is necessary because in certain cases URI must be passed * verbatim (e.g. in builtin fetchers), since those are specified by the user. * In those cases normalizations performed by the ParsedURL might be surprising * and undesirable, since Nix must be a universal client that has to work with @@ -354,23 +356,23 @@ bool isValidSchemeName(std::string_view scheme); * * Though we perform parsing and validation for internal needs. */ -struct ValidURL : private ParsedURL +struct VerbatimURL { - std::optional encoded; + using Raw = std::variant; + Raw raw; - ValidURL(std::string str) - : ParsedURL(parseURL(str, /*lenient=*/false)) - , encoded(std::move(str)) + VerbatimURL(std::string_view s) + : raw(std::string{s}) { } - ValidURL(std::string_view str) - : ValidURL(std::string{str}) + VerbatimURL(std::string s) + : raw(std::move(s)) { } - ValidURL(ParsedURL parsed) - : ParsedURL{std::move(parsed)} + VerbatimURL(ParsedURL url) + : raw(std::move(url)) { } @@ -379,25 +381,46 @@ struct ValidURL : private ParsedURL */ std::string to_string() const { - return encoded.or_else([&]() -> std::optional { return ParsedURL::to_string(); }).value(); + return std::visit( + overloaded{ + [](const std::string & str) { return str; }, [](const ParsedURL & url) { return url.to_string(); }}, + raw); } - const ParsedURL & parsed() const & + const ParsedURL parsed() const { - return *this; + return std::visit( + overloaded{ + [](const std::string & str) { return parseURL(str); }, [](const ParsedURL & url) { return url; }}, + raw); } std::string_view scheme() const & { - return ParsedURL::scheme; + return std::visit( + overloaded{ + [](std::string_view str) { + auto scheme = splitPrefixTo(str, ':'); + if (!scheme) + throw BadURL("URL '%s' doesn't have a scheme", str); + return *scheme; + }, + [](const ParsedURL & url) -> std::string_view { return url.scheme; }}, + raw); } - const auto & path() const & - { - return ParsedURL::path; - } + /** + * Get the last non-empty path segment from the URL. + * + * This is useful for extracting filenames from URLs. + * For example, "https://example.com/path/to/file.txt?query=value" + * returns "file.txt". + * + * @return The last non-empty path segment, or std::nullopt if no such segment exists. + */ + std::optional lastPathSegment() const; }; -std::ostream & operator<<(std::ostream & os, const ValidURL & url); +std::ostream & operator<<(std::ostream & os, const VerbatimURL & url); } // namespace nix diff --git a/src/libutil/include/nix/util/util.hh b/src/libutil/include/nix/util/util.hh index 26f03938a..1234937b4 100644 --- a/src/libutil/include/nix/util/util.hh +++ b/src/libutil/include/nix/util/util.hh @@ -104,7 +104,7 @@ N string2IntWithUnitPrefix(std::string_view s) * GiB`. If `align` is set, the number will be right-justified by * padding with spaces on the left. */ -std::string renderSize(uint64_t value, bool align = false); +std::string renderSize(int64_t value, bool align = false); /** * Parse a string into a float. @@ -333,8 +333,6 @@ struct overloaded : Ts... template overloaded(Ts...) -> overloaded; -std::string showBytes(uint64_t bytes); - /** * Provide an addition operator between strings and string_views * inexplicably omitted from the standard library. diff --git a/src/libutil/json-utils.cc b/src/libutil/json-utils.cc index 74b3b27cc..1502384e9 100644 --- a/src/libutil/json-utils.cc +++ b/src/libutil/json-utils.cc @@ -1,52 +1,21 @@ #include "nix/util/json-utils.hh" #include "nix/util/error.hh" #include "nix/util/types.hh" -#include -#include -#include +#include "nix/util/util.hh" namespace nix { -const nlohmann::json * get(const nlohmann::json & map, const std::string & key) +const nlohmann::json & valueAt(const nlohmann::json::object_t & map, std::string_view key) { - auto i = map.find(key); - if (i == map.end()) - return nullptr; - return &*i; -} - -nlohmann::json * get(nlohmann::json & map, const std::string & key) -{ - auto i = map.find(key); - if (i == map.end()) - return nullptr; - return &*i; -} - -const nlohmann::json & valueAt(const nlohmann::json::object_t & map, const std::string & key) -{ - if (!map.contains(key)) + if (auto * p = optionalValueAt(map, key)) + return *p; + else throw Error("Expected JSON object to contain key '%s' but it doesn't: %s", key, nlohmann::json(map).dump()); - - return map.at(key); } -std::optional optionalValueAt(const nlohmann::json::object_t & map, const std::string & key) +const nlohmann::json * optionalValueAt(const nlohmann::json::object_t & map, std::string_view key) { - if (!map.contains(key)) - return std::nullopt; - - return std::optional{map.at(key)}; -} - -std::optional nullableValueAt(const nlohmann::json::object_t & map, const std::string & key) -{ - auto value = valueAt(map, key); - - if (value.is_null()) - return std::nullopt; - - return std::optional{std::move(value)}; + return get(map, key); } const nlohmann::json * getNullable(const nlohmann::json & value) diff --git a/src/libutil/logging.cc b/src/libutil/logging.cc index 997110617..e2f28f553 100644 --- a/src/libutil/logging.cc +++ b/src/libutil/logging.cc @@ -121,7 +121,7 @@ public: std::ostringstream oss; showErrorInfo(oss, ei, loggerSettings.showTrace.get()); - log(ei.level, toView(oss)); + log(ei.level, oss.view()); } void startActivity( diff --git a/src/libutil/memory-source-accessor.cc b/src/libutil/memory-source-accessor.cc index caff5b56a..a9ffb7746 100644 --- a/src/libutil/memory-source-accessor.cc +++ b/src/libutil/memory-source-accessor.cc @@ -208,11 +208,16 @@ void MemorySink::createSymlink(const CanonPath & path, const std::string & targe ref makeEmptySourceAccessor() { - static auto empty = make_ref().cast(); - /* Don't forget to clear the display prefix, as the default constructed - SourceAccessor has the «unknown» prefix. Since this accessor is supposed - to mimic an empty root directory the prefix needs to be empty. */ - empty->setPathDisplay(""); + static auto empty = []() { + auto empty = make_ref(); + MemorySink sink{*empty}; + sink.createDirectory(CanonPath::root); + /* Don't forget to clear the display prefix, as the default constructed + SourceAccessor has the «unknown» prefix. Since this accessor is supposed + to mimic an empty root directory the prefix needs to be empty. */ + empty->setPathDisplay(""); + return empty.cast(); + }(); return empty; } diff --git a/src/libutil/meson.build b/src/libutil/meson.build index 8c9e1f1eb..8b7a5d977 100644 --- a/src/libutil/meson.build +++ b/src/libutil/meson.build @@ -64,7 +64,7 @@ boost = dependency( 'url', ], include_type : 'system', - version : '>=1.82.0', + version : '>=1.87.0', ) # boost is a public dependency, but not a pkg-config dependency unfortunately, so we # put in `deps_other`. @@ -118,7 +118,6 @@ config_priv_h = configure_file( ) subdir('nix-meson-build-support/common') -subdir('nix-meson-build-support/asan-options') sources = [ config_priv_h ] + files( 'archive.cc', @@ -157,7 +156,6 @@ sources = [ config_priv_h ] + files( 'source-accessor.cc', 'source-path.cc', 'strings.cc', - 'subdir-source-accessor.cc', 'suggestions.cc', 'tarfile.cc', 'tee-logger.cc', diff --git a/src/libutil/mounted-source-accessor.cc b/src/libutil/mounted-source-accessor.cc index 5c0ecc1ff..d9398045c 100644 --- a/src/libutil/mounted-source-accessor.cc +++ b/src/libutil/mounted-source-accessor.cc @@ -27,6 +27,12 @@ struct MountedSourceAccessorImpl : MountedSourceAccessor return accessor->readFile(subpath); } + Stat lstat(const CanonPath & path) override + { + auto [accessor, subpath] = resolve(path); + return accessor->lstat(subpath); + } + std::optional maybeLstat(const CanonPath & path) override { auto [accessor, subpath] = resolve(path); @@ -85,6 +91,14 @@ struct MountedSourceAccessorImpl : MountedSourceAccessor else return nullptr; } + + std::pair> getFingerprint(const CanonPath & path) override + { + if (fingerprint) + return {path, fingerprint}; + auto [accessor, subpath] = resolve(path); + return accessor->getFingerprint(subpath); + } }; ref makeMountedSourceAccessor(std::map> mounts) diff --git a/src/libutil/serialise.cc b/src/libutil/serialise.cc index 15629935e..fea31fbb6 100644 --- a/src/libutil/serialise.cc +++ b/src/libutil/serialise.cc @@ -1,4 +1,5 @@ #include "nix/util/serialise.hh" +#include "nix/util/compression.hh" #include "nix/util/signals.hh" #include "nix/util/util.hh" @@ -94,9 +95,8 @@ void Source::drainInto(Sink & sink) { std::array buf; while (true) { - size_t n; try { - n = read(buf.data(), buf.size()); + auto n = read(buf.data(), buf.size()); sink({buf.data(), n}); } catch (EndOfFile &) { break; @@ -111,6 +111,16 @@ std::string Source::drain() return std::move(s.s); } +void Source::skip(size_t len) +{ + std::array buf; + while (len) { + auto n = read(buf.data(), std::min(len, buf.size())); + assert(n <= len); + len -= n; + } +} + size_t BufferedSource::read(char * data, size_t len) { if (!buffer) @@ -120,7 +130,7 @@ size_t BufferedSource::read(char * data, size_t len) bufPosIn = readUnbuffered(buffer.get(), bufSize); /* Copy out the data in the buffer. */ - size_t n = len > bufPosIn - bufPosOut ? bufPosIn - bufPosOut : len; + auto n = std::min(len, bufPosIn - bufPosOut); memcpy(data, buffer.get() + bufPosOut, n); bufPosOut += n; if (bufPosIn == bufPosOut) @@ -191,6 +201,39 @@ bool FdSource::hasData() } } +void FdSource::skip(size_t len) +{ + /* Discard data in the buffer. */ + if (len && buffer && bufPosIn - bufPosOut) { + if (len >= bufPosIn - bufPosOut) { + len -= bufPosIn - bufPosOut; + bufPosIn = bufPosOut = 0; + } else { + bufPosOut += len; + len = 0; + } + } + +#ifndef _WIN32 + /* If we can, seek forward in the file to skip the rest. */ + if (isSeekable && len) { + if (lseek(fd, len, SEEK_CUR) == -1) { + if (errno == ESPIPE) + isSeekable = false; + else + throw SysError("seeking forward in file"); + } else { + read += len; + return; + } + } +#endif + + /* Otherwise, skip by reading. */ + if (len) + BufferedSource::skip(len); +} + size_t StringSource::read(char * data, size_t len) { if (pos == s.size()) @@ -200,6 +243,29 @@ size_t StringSource::read(char * data, size_t len) return n; } +void StringSource::skip(size_t len) +{ + const size_t remain = s.size() - pos; + if (len > remain) { + pos = s.size(); + throw EndOfFile("end of string reached"); + } + pos += len; +} + +CompressedSource::CompressedSource(RestartableSource & source, const std::string & compressionMethod) + : compressedData([&]() { + StringSink sink; + auto compressionSink = makeCompressionSink(compressionMethod, sink); + source.drainInto(*compressionSink); + compressionSink->finish(); + return std::move(sink.s); + }()) + , compressionMethod(compressionMethod) + , stringSource(compressedData) +{ +} + std::unique_ptr sourceToSink(std::function fun) { struct SourceToSink : FinishSink @@ -461,4 +527,41 @@ size_t ChainSource::read(char * data, size_t len) } } +std::unique_ptr restartableSourceFromFactory(std::function()> factory) +{ + struct RestartableSourceImpl : RestartableSource + { + RestartableSourceImpl(decltype(factory) factory_) + : factory_(std::move(factory_)) + , impl(this->factory_()) + { + } + + decltype(factory) factory_; + std::unique_ptr impl = factory_(); + + size_t read(char * data, size_t len) override + { + return impl->read(data, len); + } + + bool good() override + { + return impl->good(); + } + + void skip(size_t len) override + { + return impl->skip(len); + } + + void restart() override + { + impl = factory_(); + } + }; + + return std::make_unique(std::move(factory)); +} + } // namespace nix diff --git a/src/libutil/strings.cc b/src/libutil/strings.cc index a95390089..c0c3d6602 100644 --- a/src/libutil/strings.cc +++ b/src/libutil/strings.cc @@ -8,23 +8,6 @@ namespace nix { -struct view_stringbuf : public std::stringbuf -{ - inline std::string_view toView() - { - auto begin = pbase(); - return {begin, begin + pubseekoff(0, std::ios_base::cur, std::ios_base::out)}; - } -}; - -__attribute__((no_sanitize("undefined"))) std::string_view toView(const std::ostringstream & os) -{ - /* Downcasting like this is very much undefined behavior, so we disable - UBSAN for this function. */ - auto buf = static_cast(os.rdbuf()); - return buf->toView(); -} - template std::list tokenizeString(std::string_view s, std::string_view separators); template StringSet tokenizeString(std::string_view s, std::string_view separators); template std::vector tokenizeString(std::string_view s, std::string_view separators); @@ -155,4 +138,18 @@ std::list shellSplitString(std::string_view s) return result; } + +std::string optionalBracket(std::string_view prefix, std::string_view content, std::string_view suffix) +{ + if (content.empty()) { + return ""; + } + std::string result; + result.reserve(prefix.size() + content.size() + suffix.size()); + result.append(prefix); + result.append(content); + result.append(suffix); + return result; +} + } // namespace nix diff --git a/src/libutil/subdir-source-accessor.cc b/src/libutil/subdir-source-accessor.cc deleted file mode 100644 index d4f57e2f7..000000000 --- a/src/libutil/subdir-source-accessor.cc +++ /dev/null @@ -1,59 +0,0 @@ -#include "nix/util/source-accessor.hh" - -namespace nix { - -struct SubdirSourceAccessor : SourceAccessor -{ - ref parent; - - CanonPath subdirectory; - - SubdirSourceAccessor(ref && parent, CanonPath && subdirectory) - : parent(std::move(parent)) - , subdirectory(std::move(subdirectory)) - { - displayPrefix.clear(); - } - - std::string readFile(const CanonPath & path) override - { - return parent->readFile(subdirectory / path); - } - - void readFile(const CanonPath & path, Sink & sink, std::function sizeCallback) override - { - return parent->readFile(subdirectory / path, sink, sizeCallback); - } - - bool pathExists(const CanonPath & path) override - { - return parent->pathExists(subdirectory / path); - } - - std::optional maybeLstat(const CanonPath & path) override - { - return parent->maybeLstat(subdirectory / path); - } - - DirEntries readDirectory(const CanonPath & path) override - { - return parent->readDirectory(subdirectory / path); - } - - std::string readLink(const CanonPath & path) override - { - return parent->readLink(subdirectory / path); - } - - std::string showPath(const CanonPath & path) override - { - return displayPrefix + parent->showPath(subdirectory / path) + displaySuffix; - } -}; - -ref projectSubdirSourceAccessor(ref parent, CanonPath subdirectory) -{ - return make_ref(std::move(parent), std::move(subdirectory)); -} - -} // namespace nix diff --git a/src/libutil/terminal.cc b/src/libutil/terminal.cc index 656847487..fe22146ab 100644 --- a/src/libutil/terminal.cc +++ b/src/libutil/terminal.cc @@ -179,9 +179,10 @@ std::pair getWindowSize() return *windowSize.lock(); } +#ifndef _WIN32 std::string getPtsName(int fd) { -#ifdef __APPLE__ +# ifdef __APPLE__ static std::mutex ptsnameMutex; // macOS doesn't have ptsname_r, use mutex-protected ptsname std::lock_guard lock(ptsnameMutex); @@ -190,7 +191,7 @@ std::string getPtsName(int fd) throw SysError("getting pseudoterminal slave name"); } return name; -#else +# else // Use thread-safe ptsname_r on platforms that support it // PTY names are typically short: // - Linux: /dev/pts/N (where N is usually < 1000) @@ -201,7 +202,8 @@ std::string getPtsName(int fd) throw SysError("getting pseudoterminal slave name"); } return buf; -#endif +# endif } +#endif } // namespace nix diff --git a/src/libutil/union-source-accessor.cc b/src/libutil/union-source-accessor.cc index 96b6a643a..e3b39f14e 100644 --- a/src/libutil/union-source-accessor.cc +++ b/src/libutil/union-source-accessor.cc @@ -72,6 +72,18 @@ struct UnionSourceAccessor : SourceAccessor } return std::nullopt; } + + std::pair> getFingerprint(const CanonPath & path) override + { + if (fingerprint) + return {path, fingerprint}; + for (auto & accessor : accessors) { + auto [subpath, fingerprint] = accessor->getFingerprint(path); + if (fingerprint) + return {subpath, fingerprint}; + } + return {path, std::nullopt}; + } }; ref makeUnionSourceAccessor(std::vector> && accessors) diff --git a/src/libutil/url.cc b/src/libutil/url.cc index 1c7fd3f0f..538792463 100644 --- a/src/libutil/url.cc +++ b/src/libutil/url.cc @@ -4,6 +4,7 @@ #include "nix/util/split.hh" #include "nix/util/canon-path.hh" #include "nix/util/strings-inline.hh" +#include "nix/util/file-system.hh" #include @@ -350,7 +351,7 @@ std::string ParsedURL::renderAuthorityAndPath() const must either be empty or begin with a slash ("/") character. */ assert(path.empty() || path.front().empty()); res += authority->to_string(); - } else if (std::ranges::equal(std::views::take(path, 2), std::views::repeat("", 2))) { + } else if (std::ranges::equal(std::views::take(path, 3), std::views::repeat("", 3))) { /* If a URI does not contain an authority component, then the path cannot begin with two slash characters ("//") */ unreachable(); @@ -434,10 +435,27 @@ bool isValidSchemeName(std::string_view s) return std::regex_match(s.begin(), s.end(), regex, std::regex_constants::match_default); } -std::ostream & operator<<(std::ostream & os, const ValidURL & url) +std::ostream & operator<<(std::ostream & os, const VerbatimURL & url) { os << url.to_string(); return os; } +std::optional VerbatimURL::lastPathSegment() const +{ + try { + auto parsedUrl = parsed(); + auto segments = parsedUrl.pathSegments(/*skipEmpty=*/true); + if (std::ranges::empty(segments)) + return std::nullopt; + return segments.back(); + } catch (BadURL &) { + // Fall back to baseNameOf for unparsable URLs + auto name = baseNameOf(to_string()); + if (name.empty()) + return std::nullopt; + return std::string{name}; + } +} + } // namespace nix diff --git a/src/libutil/util.cc b/src/libutil/util.cc index 383a904ad..f14bc63ac 100644 --- a/src/libutil/util.cc +++ b/src/libutil/util.cc @@ -132,15 +132,16 @@ std::optional string2Float(const std::string_view s) template std::optional string2Float(const std::string_view s); template std::optional string2Float(const std::string_view s); -std::string renderSize(uint64_t value, bool align) +std::string renderSize(int64_t value, bool align) { static const std::array prefixes{{'K', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y'}}; size_t power = 0; - double res = value; - while (res > 1024 && power < prefixes.size()) { + double abs_value = std::abs(value); + while (abs_value > 1024 && power < prefixes.size()) { ++power; - res /= 1024; + abs_value /= 1024; } + double res = (double) value / std::pow(1024.0, power); return fmt(align ? "%6.1f %ciB" : "%.1f %ciB", power == 0 ? res / 1024 : res, prefixes.at(power)); } @@ -256,9 +257,4 @@ std::pair getLine(std::string_view s) } } -std::string showBytes(uint64_t bytes) -{ - return fmt("%.2f MiB", bytes / (1024.0 * 1024.0)); -} - } // namespace nix diff --git a/src/nix/asan-options.cc b/src/nix/asan-options.cc deleted file mode 100644 index 256f34cbe..000000000 --- a/src/nix/asan-options.cc +++ /dev/null @@ -1,6 +0,0 @@ -extern "C" [[gnu::retain]] const char * __asan_default_options() -{ - // We leak a bunch of memory knowingly on purpose. It's not worthwhile to - // diagnose that memory being leaked for now. - return "abort_on_error=1:print_summary=1:detect_leaks=0"; -} diff --git a/src/nix/build-remote/build-remote.cc b/src/nix/build-remote/build-remote.cc index 11df8cc5e..ffb77ddf1 100644 --- a/src/nix/build-remote/build-remote.cc +++ b/src/nix/build-remote/build-remote.cc @@ -324,7 +324,7 @@ static int main_build_remote(int argc, char ** argv) drv.inputSrcs = store->parseStorePathSet(inputs); optResult = sshStore->buildDerivation(*drvPath, (const BasicDerivation &) drv); auto & result = *optResult; - if (!result.success()) { + if (auto * failureP = result.tryGetFailure()) { if (settings.keepFailed) { warn( "The failed build directory was kept on the remote builder due to `--keep-failed`.%s", @@ -333,7 +333,7 @@ static int main_build_remote(int argc, char ** argv) : ""); } throw Error( - "build of '%s' on '%s' failed: %s", store->printStorePath(*drvPath), storeUri, result.errorMsg); + "build of '%s' on '%s' failed: %s", store->printStorePath(*drvPath), storeUri, failureP->errorMsg); } } else { copyClosure(*store, *sshStore, StorePathSet{*drvPath}, NoRepair, NoCheckSigs, substitute); @@ -357,11 +357,14 @@ static int main_build_remote(int argc, char ** argv) debug("missing output %s", outputName); assert(optResult); auto & result = *optResult; - auto i = result.builtOutputs.find(outputName); - assert(i != result.builtOutputs.end()); - auto & newRealisation = i->second; - missingRealisations.insert(newRealisation); - missingPaths.insert(newRealisation.outPath); + if (auto * successP = result.tryGetSuccess()) { + auto & success = *successP; + auto i = success.builtOutputs.find(outputName); + assert(i != success.builtOutputs.end()); + auto & newRealisation = i->second; + missingRealisations.insert(newRealisation); + missingPaths.insert(newRealisation.outPath); + } } } } else { diff --git a/src/nix/cat.cc b/src/nix/cat.cc index 145336723..1284b50fd 100644 --- a/src/nix/cat.cc +++ b/src/nix/cat.cc @@ -1,6 +1,10 @@ #include "nix/cmd/command.hh" #include "nix/store/store-api.hh" #include "nix/store/nar-accessor.hh" +#include "nix/util/serialise.hh" +#include "nix/util/source-accessor.hh" + +#include using namespace nix; @@ -41,10 +45,7 @@ struct CmdCatStore : StoreCommand, MixCat void run(ref store) override { auto [storePath, rest] = store->toStorePath(path); - auto accessor = store->getFSAccessor(storePath); - if (!accessor) - throw InvalidPath("path '%1%' is not a valid store path", store->printStorePath(storePath)); - cat(ref{std::move(accessor)}, CanonPath{rest}); + cat(store->requireStoreObjectAccessor(storePath), CanonPath{rest}); } }; @@ -74,7 +75,13 @@ struct CmdCatNar : StoreCommand, MixCat void run(ref store) override { - cat(makeNarAccessor(readFile(narPath)), CanonPath{path}); + AutoCloseFD fd = open(narPath.c_str(), O_RDONLY); + if (!fd) + throw SysError("opening NAR file '%s'", narPath); + auto source = FdSource{fd.get()}; + auto narAccessor = makeNarAccessor(source); + auto listing = listNar(narAccessor, CanonPath::root, true); + cat(makeLazyNarAccessor(listing, seekableGetNarBytes(narPath)), CanonPath{path}); } }; diff --git a/src/nix/config-check.cc b/src/nix/config-check.cc index c04943eab..e1efb40eb 100644 --- a/src/nix/config-check.cc +++ b/src/nix/config-check.cc @@ -100,7 +100,7 @@ struct CmdConfigCheck : StoreCommand ss << "Multiple versions of nix found in PATH:\n"; for (auto & dir : dirs) ss << " " << dir << "\n"; - return checkFail(toView(ss)); + return checkFail(ss.view()); } return checkPass("PATH contains only one nix version."); @@ -143,7 +143,7 @@ struct CmdConfigCheck : StoreCommand for (auto & dir : dirs) ss << " " << dir << "\n"; ss << "\n"; - return checkFail(toView(ss)); + return checkFail(ss.view()); } return checkPass("All profiles are gcroots."); @@ -162,7 +162,7 @@ struct CmdConfigCheck : StoreCommand << "sync with the daemon.\n\n" << "Client protocol: " << formatProtocol(clientProto) << "\n" << "Store protocol: " << formatProtocol(storeProto) << "\n\n"; - return checkFail(toView(ss)); + return checkFail(ss.view()); } return checkPass("Client protocol matches store protocol."); diff --git a/src/nix/copy.cc b/src/nix/copy.cc index 62e8b64f5..706edc6c9 100644 --- a/src/nix/copy.cc +++ b/src/nix/copy.cc @@ -5,10 +5,9 @@ using namespace nix; -struct CmdCopy : virtual CopyCommand, virtual BuiltPathsCommand, MixProfile +struct CmdCopy : virtual CopyCommand, virtual BuiltPathsCommand, MixProfile, MixNoCheckSigs { std::optional outLink; - CheckSigsFlag checkSigs = CheckSigs; SubstituteFlag substitute = NoSubstitute; @@ -24,13 +23,6 @@ struct CmdCopy : virtual CopyCommand, virtual BuiltPathsCommand, MixProfile .handler = {&outLink}, .completer = completePath, }); - - addFlag({ - .longName = "no-check-sigs", - .description = "Do not require that paths are signed by trusted keys.", - .handler = {&checkSigs, NoCheckSigs}, - }); - addFlag({ .longName = "substitute-on-destination", .shortName = 's', diff --git a/src/nix/derivation-add.cc b/src/nix/derivation-add.cc index 2d13aba52..48e935092 100644 --- a/src/nix/derivation-add.cc +++ b/src/nix/derivation-add.cc @@ -33,7 +33,7 @@ struct CmdAddDerivation : MixDryRun, StoreCommand { auto json = nlohmann::json::parse(drainFD(STDIN_FILENO)); - auto drv = Derivation::fromJSON(json); + auto drv = static_cast(json); auto drvPath = writeDerivation(*store, drv, NoRepair, /* read only */ dryRun); diff --git a/src/nix/derivation-add.md b/src/nix/derivation-add.md index 35507d9ad..4e37c4e6f 100644 --- a/src/nix/derivation-add.md +++ b/src/nix/derivation-add.md @@ -12,8 +12,7 @@ a Nix expression evaluates. [store derivation]: @docroot@/glossary.md#gloss-store-derivation -`nix derivation add` takes a single derivation in the following format: - -{{#include ../../protocols/json/derivation.md}} +`nix derivation add` takes a single derivation in the JSON format. +See [the manual](@docroot@/protocols/json/derivation.md) for a documentation of this format. )"" diff --git a/src/nix/derivation-show.cc b/src/nix/derivation-show.cc index 20e54bba7..1528f5b51 100644 --- a/src/nix/derivation-show.cc +++ b/src/nix/derivation-show.cc @@ -58,7 +58,7 @@ struct CmdShowDerivation : InstallablesCommand, MixPrintJSON if (!drvPath.isDerivation()) continue; - jsonRoot[drvPath.to_string()] = store->readDerivation(drvPath).toJSON(); + jsonRoot[drvPath.to_string()] = store->readDerivation(drvPath); } printJSON(jsonRoot); } diff --git a/src/nix/derivation-show.md b/src/nix/derivation-show.md index 9fff58ef9..1784be44c 100644 --- a/src/nix/derivation-show.md +++ b/src/nix/derivation-show.md @@ -48,10 +48,9 @@ By default, this command only shows top-level derivations, but with [store derivation]: @docroot@/glossary.md#gloss-store-derivation -`nix derivation show` outputs a JSON map of [store path]s to derivations in the following format: +`nix derivation show` outputs a JSON map of [store path]s to derivations in JSON format. +See [the manual](@docroot@/protocols/json/derivation.md) for a documentation of this format. [store path]: @docroot@/store/store-path.md -{{#include ../../protocols/json/derivation.md}} - )"" diff --git a/src/nix/develop.cc b/src/nix/develop.cc index f78eee59a..d23dce10b 100644 --- a/src/nix/develop.cc +++ b/src/nix/develop.cc @@ -254,10 +254,15 @@ static StorePath getDerivationEnvironment(ref store, ref evalStore drv.args = {store->printStorePath(getEnvShPath)}; /* Remove derivation checks. */ - drv.env.erase("allowedReferences"); - drv.env.erase("allowedRequisites"); - drv.env.erase("disallowedReferences"); - drv.env.erase("disallowedRequisites"); + if (drv.structuredAttrs) { + drv.structuredAttrs->structuredAttrs.erase("outputChecks"); + } else { + drv.env.erase("allowedReferences"); + drv.env.erase("allowedRequisites"); + drv.env.erase("disallowedReferences"); + drv.env.erase("disallowedRequisites"); + } + drv.env.erase("name"); /* Rehash and write the derivation. FIXME: would be nice to use @@ -299,11 +304,9 @@ static StorePath getDerivationEnvironment(ref store, ref evalStore for (auto & [_0, optPath] : evalStore->queryPartialDerivationOutputMap(shellDrvPath)) { assert(optPath); - auto & outPath = *optPath; - assert(store->isValidPath(outPath)); - auto outPathS = store->toRealPath(outPath); - if (lstat(outPathS).st_size) - return outPath; + auto accessor = evalStore->requireStoreObjectAccessor(*optPath); + if (auto st = accessor->maybeLstat(CanonPath::root); st && st->fileSize.value_or(0)) + return *optPath; } throw Error("get-env.sh failed to produce an environment"); @@ -502,7 +505,9 @@ struct Common : InstallableCommand, MixProfile debug("reading environment file '%s'", strPath); - return {BuildEnvironment::parseJSON(readFile(store->toRealPath(shellOutPath))), strPath}; + return { + BuildEnvironment::parseJSON(store->requireStoreObjectAccessor(shellOutPath)->readFile(CanonPath::root)), + strPath}; } }; diff --git a/src/nix/diff-closures.cc b/src/nix/diff-closures.cc index cbf842e5c..d36a21d74 100644 --- a/src/nix/diff-closures.cc +++ b/src/nix/diff-closures.cc @@ -107,8 +107,7 @@ void printClosureDiff( if (!removed.empty() || !added.empty()) items.push_back(fmt("%s → %s", showVersions(removed), showVersions(added))); if (showDelta) - items.push_back( - fmt("%s%+.1f KiB" ANSI_NORMAL, sizeDelta > 0 ? ANSI_RED : ANSI_GREEN, sizeDelta / 1024.0)); + items.push_back(fmt("%s%s" ANSI_NORMAL, sizeDelta > 0 ? ANSI_RED : ANSI_GREEN, renderSize(sizeDelta))); logger->cout("%s%s: %s", indent, name, concatStringsSep(", ", items)); } } diff --git a/src/nix/dump-path.cc b/src/nix/dump-path.cc index 8475655e9..f375b0ac8 100644 --- a/src/nix/dump-path.cc +++ b/src/nix/dump-path.cc @@ -4,6 +4,14 @@ using namespace nix; +static FdSink getNarSink() +{ + auto fd = getStandardOutput(); + if (isatty(fd)) + throw UsageError("refusing to write NAR to a terminal"); + return FdSink(std::move(fd)); +} + struct CmdDumpPath : StorePathCommand { std::string description() override @@ -20,7 +28,7 @@ struct CmdDumpPath : StorePathCommand void run(ref store, const StorePath & storePath) override { - FdSink sink(getStandardOutput()); + auto sink = getNarSink(); store->narFromPath(storePath, sink); sink.flush(); } @@ -51,7 +59,7 @@ struct CmdDumpPath2 : Command void run() override { - FdSink sink(getStandardOutput()); + auto sink = getNarSink(); dumpPath(path, sink); sink.flush(); } diff --git a/src/nix/env.cc b/src/nix/env.cc index 0a211399a..a80bcda67 100644 --- a/src/nix/env.cc +++ b/src/nix/env.cc @@ -1,6 +1,7 @@ -#include #include +#include + #include "nix/cmd/command.hh" #include "nix/expr/eval.hh" #include "run.hh" diff --git a/src/nix/eval.cc b/src/nix/eval.cc index 10d0a1841..584b2122f 100644 --- a/src/nix/eval.cc +++ b/src/nix/eval.cc @@ -85,9 +85,7 @@ struct CmdEval : MixJSON, InstallableValueCommand, MixReadOnlyOption if (pathExists(*writeTo)) throw Error("path '%s' already exists", writeTo->string()); - std::function recurse; - - recurse = [&](Value & v, const PosIdx pos, const std::filesystem::path & path) { + [&](this const auto & recurse, Value & v, const PosIdx pos, const std::filesystem::path & path) -> void { state->forceValue(v, pos); if (v.type() == nString) // FIXME: disallow strings with contexts? @@ -111,9 +109,7 @@ struct CmdEval : MixJSON, InstallableValueCommand, MixReadOnlyOption } else state->error("value at '%s' is not a string or an attribute set", state->positions[pos]) .debugThrow(); - }; - - recurse(*v, pos, *writeTo); + }(*v, pos, *writeTo); } else if (raw) { diff --git a/src/nix/flake-check.md b/src/nix/flake-check.md index c8307f8d8..007640c27 100644 --- a/src/nix/flake-check.md +++ b/src/nix/flake-check.md @@ -31,39 +31,49 @@ at the first error. The following flake output attributes must be derivations: * `checks.`*system*`.`*name* -* `defaultPackage.`*system* -* `devShell.`*system* +* `devShells.`*system*`.default` * `devShells.`*system*`.`*name* * `nixosConfigurations.`*name*`.config.system.build.toplevel` +* `packages.`*system*`.default` * `packages.`*system*`.`*name* The following flake output attributes must be [app definitions](./nix3-run.md): +* `apps.`*system*`.default` * `apps.`*system*`.`*name* -* `defaultApp.`*system* The following flake output attributes must be [template definitions](./nix3-flake-init.md): -* `defaultTemplate` +* `templates.default` * `templates.`*name* The following flake output attributes must be *Nixpkgs overlays*: -* `overlay` +* `overlays.default` * `overlays.`*name* The following flake output attributes must be *NixOS modules*: -* `nixosModule` +* `nixosModules.default` * `nixosModules.`*name* The following flake output attributes must be [bundlers](./nix3-bundle.md): +* `bundlers.default` * `bundlers.`*name* -* `defaultBundler` + +Old default attributes are renamed, they will work but will emit a warning: + +* `defaultPackage.` → `packages.`*system*`.default` +* `defaultApps.` → `apps.`*system*`.default` +* `defaultTemplate` → `templates.default` +* `defaultBundler.` → `bundlers.`*system*`.default` +* `overlay` → `overlays.default` +* `devShell.` → `devShells.`*system*`.default` +* `nixosModule` → `nixosModules.default` In addition, the `hydraJobs` output is evaluated in the same way as Hydra's `hydra-eval-jobs` (i.e. as a arbitrarily deeply nested diff --git a/src/nix/flake-prefetch-inputs.cc b/src/nix/flake-prefetch-inputs.cc index 096eaf539..2a3e067c6 100644 --- a/src/nix/flake-prefetch-inputs.cc +++ b/src/nix/flake-prefetch-inputs.cc @@ -38,8 +38,7 @@ struct CmdFlakePrefetchInputs : FlakeCommand std::atomic nrFailed{0}; - std::function visit; - visit = [&](const Node & node) { + auto visit = [&](this const auto & visit, const Node & node) { if (!state_.lock()->done.insert(&node).second) return; diff --git a/src/nix/flake.cc b/src/nix/flake.cc index 18be64bba..b826e943c 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -267,11 +267,9 @@ struct CmdFlakeMetadata : FlakeCommand, MixJSON if (!lockedFlake.lockFile.root->inputs.empty()) logger->cout(ANSI_BOLD "Inputs:" ANSI_NORMAL); - std::set> visited; + std::set> visited{lockedFlake.lockFile.root}; - std::function recurse; - - recurse = [&](const Node & node, const std::string & prefix) { + [&](this const auto & recurse, const Node & node, const std::string & prefix) -> void { for (const auto & [i, input] : enumerate(node.inputs)) { bool last = i + 1 == node.inputs.size(); @@ -298,10 +296,7 @@ struct CmdFlakeMetadata : FlakeCommand, MixJSON printInputAttrPath(*follows)); } } - }; - - visited.insert(lockedFlake.lockFile.root); - recurse(*lockedFlake.lockFile.root, ""); + }(*lockedFlake.lockFile.root, ""); } } }; @@ -473,7 +468,7 @@ struct CmdFlakeCheck : FlakeCommand if (!v.isLambda()) { throw Error("overlay is not a function, but %s instead", showType(v)); } - if (v.lambda().fun->hasFormals() || !argHasName(v.lambda().fun->arg, "final")) + if (v.lambda().fun->getFormals() || !argHasName(v.lambda().fun->arg, "final")) throw Error("overlay does not take an argument named 'final'"); // FIXME: if we have a 'nixpkgs' input, use it to // evaluate the overlay. @@ -884,8 +879,7 @@ struct CmdFlakeInitCommon : virtual Args, EvalCommand std::vector changedFiles; std::vector conflictedFiles; - std::function copyDir; - copyDir = [&](const SourcePath & from, const std::filesystem::path & to) { + [&](this const auto & copyDir, const SourcePath & from, const std::filesystem::path & to) -> void { createDirs(to); for (auto & [name, entry] : from.readDirectory()) { @@ -935,9 +929,7 @@ struct CmdFlakeInitCommon : virtual Args, EvalCommand changedFiles.push_back(to2); notice("wrote: %s", to2); } - }; - - copyDir(templateDir, flakeDir); + }(templateDir, flakeDir); if (!changedFiles.empty() && std::filesystem::exists(std::filesystem::path{flakeDir} / ".git")) { Strings args = {"-C", flakeDir, "add", "--intent-to-add", "--force", "--"}; @@ -1032,12 +1024,10 @@ struct CmdFlakeClone : FlakeCommand } }; -struct CmdFlakeArchive : FlakeCommand, MixJSON, MixDryRun +struct CmdFlakeArchive : FlakeCommand, MixJSON, MixDryRun, MixNoCheckSigs { std::string dstUri; - CheckSigsFlag checkSigs = CheckSigs; - SubstituteFlag substitute = NoSubstitute; CmdFlakeArchive() @@ -1048,11 +1038,6 @@ struct CmdFlakeArchive : FlakeCommand, MixJSON, MixDryRun .labels = {"store-uri"}, .handler = {&dstUri}, }); - addFlag({ - .longName = "no-check-sigs", - .description = "Do not require that paths are signed by trusted keys.", - .handler = {&checkSigs, NoCheckSigs}, - }); } std::string description() override @@ -1155,7 +1140,7 @@ struct CmdFlakeShow : FlakeCommand, MixJSON evalSettings.enableImportFromDerivation.setDefault(false); auto state = getEvalState(); - auto flake = std::make_shared(lockFlake()); + auto flake = make_ref(lockFlake()); auto localSystem = std::string(settings.thisSystem.get()); std::function & attrPath, const Symbol & attr)> @@ -1443,7 +1428,7 @@ struct CmdFlakeShow : FlakeCommand, MixJSON return j; }; - auto cache = openEvalCache(*state, flake); + auto cache = openEvalCache(*state, ref(flake)); auto j = visit(*cache->getRoot(), {}, fmt(ANSI_BOLD "%s" ANSI_NORMAL, flake->flake.lockedRef), ""); if (json) diff --git a/src/nix/ls.cc b/src/nix/ls.cc index 4952d5243..82721222e 100644 --- a/src/nix/ls.cc +++ b/src/nix/ls.cc @@ -115,10 +115,7 @@ struct CmdLsStore : StoreCommand, MixLs void run(ref store) override { auto [storePath, rest] = store->toStorePath(path); - auto accessor = store->getFSAccessor(storePath); - if (!accessor) - throw InvalidPath("path '%1%' is not a valid store path", store->printStorePath(storePath)); - list(ref{std::move(accessor)}, CanonPath{rest}); + list(store->requireStoreObjectAccessor(storePath), CanonPath{rest}); } }; @@ -148,7 +145,13 @@ struct CmdLsNar : Command, MixLs void run() override { - list(makeNarAccessor(readFile(narPath)), CanonPath{path}); + AutoCloseFD fd = open(narPath.c_str(), O_RDONLY); + if (!fd) + throw SysError("opening NAR file '%s'", narPath); + auto source = FdSource{fd.get()}; + auto narAccessor = makeNarAccessor(source); + auto listing = listNar(narAccessor, CanonPath::root, true); + list(makeLazyNarAccessor(listing, seekableGetNarBytes(narPath)), CanonPath{path}); } }; diff --git a/src/nix/main.cc b/src/nix/main.cc index ed889a189..74d22e433 100644 --- a/src/nix/main.cc +++ b/src/nix/main.cc @@ -256,8 +256,8 @@ static void showHelp(std::vector subcommand, NixArgs & toplevel) vDump->mkString(toplevel.dumpCli()); auto vRes = state.allocValue(); - state.callFunction(*vGenerateManpage, state.getBuiltin("false"), *vRes, noPos); - state.callFunction(*vRes, *vDump, *vRes, noPos); + Value * args[]{&state.getBuiltin("false"), vDump}; + state.callFunction(*vGenerateManpage, args, *vRes, noPos); auto attr = vRes->attrs()->get(state.symbols.create(mdName + ".md")); if (!attr) diff --git a/src/nix/make-content-addressed.md b/src/nix/make-content-addressed.md index b1f7da525..e6a51c83a 100644 --- a/src/nix/make-content-addressed.md +++ b/src/nix/make-content-addressed.md @@ -51,7 +51,7 @@ be verified without any additional information such as signatures. This means that a command like ```console -# nix store build /nix/store/5skmmcb9svys5lj3kbsrjg7vf2irid63-hello-2.10 \ +# nix build /nix/store/5skmmcb9svys5lj3kbsrjg7vf2irid63-hello-2.10 \ --substituters https://my-cache.example.org ``` diff --git a/src/nix/meson.build b/src/nix/meson.build index f67a2948f..e989e8016 100644 --- a/src/nix/meson.build +++ b/src/nix/meson.build @@ -56,13 +56,11 @@ config_priv_h = configure_file( ) subdir('nix-meson-build-support/common') -subdir('nix-meson-build-support/asan-options') subdir('nix-meson-build-support/generate-header') nix_sources = [ config_priv_h ] + files( 'add-to-store.cc', 'app.cc', - 'asan-options.cc', 'build.cc', 'bundle.cc', 'cat.cc', diff --git a/src/nix/nix-build/nix-build.cc b/src/nix/nix-build/nix-build.cc index d3902f2a6..4d876c9eb 100644 --- a/src/nix/nix-build/nix-build.cc +++ b/src/nix/nix-build/nix-build.cc @@ -285,10 +285,10 @@ static void main_nix_build(int argc, char ** argv) execArgs, interpreter, escapeShellArgAlways(script), - toView(joined)); + joined.view()); } else { envCommand = - fmt("exec %1% %2% %3% %4%", execArgs, interpreter, escapeShellArgAlways(script), toView(joined)); + fmt("exec %1% %2% %3% %4%", execArgs, interpreter, escapeShellArgAlways(script), joined.view()); } } @@ -410,17 +410,18 @@ static void main_nix_build(int argc, char ** argv) Value vRoot; state->eval(e, vRoot); - std::function takesNixShellAttr; - takesNixShellAttr = [&](const Value & v) { + auto takesNixShellAttr = [&](const Value & v) { if (!isNixShell) { return false; } bool add = false; - if (v.type() == nFunction && v.lambda().fun->hasFormals()) { - for (auto & i : v.lambda().fun->formals->formals) { - if (state->symbols[i.name] == "inNixShell") { - add = true; - break; + if (v.type() == nFunction) { + if (auto formals = v.lambda().fun->getFormals()) { + for (auto & i : formals->formals) { + if (state->symbols[i.name] == "inNixShell") { + add = true; + break; + } } } } @@ -490,10 +491,9 @@ static void main_nix_build(int argc, char ** argv) } } - std::function, const DerivedPathMap::ChildNode &)> accumDerivedPath; - - accumDerivedPath = [&](ref inputDrv, - const DerivedPathMap::ChildNode & inputNode) { + auto accumDerivedPath = [&](this auto & self, + ref inputDrv, + const DerivedPathMap::ChildNode & inputNode) -> void { if (!inputNode.value.empty()) pathsToBuild.push_back( DerivedPath::Built{ @@ -501,8 +501,7 @@ static void main_nix_build(int argc, char ** argv) .outputs = OutputsSpec::Names{inputNode.value}, }); for (const auto & [outputName, childNode] : inputNode.childMap) - accumDerivedPath( - make_ref(SingleDerivedPath::Built{inputDrv, outputName}), childNode); + self(make_ref(SingleDerivedPath::Built{inputDrv, outputName}), childNode); }; // Build or fetch all dependencies of the derivation. @@ -600,7 +599,7 @@ static void main_nix_build(int argc, char ** argv) structuredAttrsRC = StructuredAttrs::writeShell(json); auto attrsJSON = (tmpDir.path() / ".attrs.json").string(); - writeFile(attrsJSON, json.dump()); + writeFile(attrsJSON, static_cast(std::move(json)).dump()); auto attrsSH = (tmpDir.path() / ".attrs.sh").string(); writeFile(attrsSH, structuredAttrsRC); diff --git a/src/nix/nix-env/user-env.cc b/src/nix/nix-env/user-env.cc index fbdcb14f8..81e2c4f80 100644 --- a/src/nix/nix-env/user-env.cc +++ b/src/nix/nix-env/user-env.cc @@ -108,7 +108,7 @@ bool createUserEnv( auto manifestFile = ({ std::ostringstream str; printAmbiguous(manifest, state.symbols, str, nullptr, std::numeric_limits::max()); - StringSource source{toView(str)}; + StringSource source{str.view()}; state.store->addToStoreFromDump( source, "env-manifest.nix", diff --git a/src/nix/nix-store/nix-store.cc b/src/nix/nix-store/nix-store.cc index f8078426c..3798c7fa0 100644 --- a/src/nix/nix-store/nix-store.cc +++ b/src/nix/nix-store/nix-store.cc @@ -603,7 +603,7 @@ static void registerValidity(bool reregister, bool hashGiven, bool canonicalise) #endif if (!hashGiven) { HashResult hash = hashPath( - {ref{store->getFSAccessor(info->path, false)}}, + {store->requireStoreObjectAccessor(info->path, /*requireValidPath=*/false)}, FileSerialisationMethod::NixArchive, HashAlgorithm::SHA256); info->narHash = hash.hash; @@ -986,6 +986,16 @@ static void opServe(Strings opFlags, Strings opArgs) store->narFromPath(store->parseStorePath(readString(in)), out); break; + case ServeProto::Command::ImportPaths: { + if (!writeAllowed) + throw Error("importing paths is not allowed"); + // FIXME: should we skip sig checking? + importPaths(*store, in, NoCheckSigs); + // indicate success + out << 1; + break; + } + case ServeProto::Command::BuildPaths: { if (!writeAllowed) diff --git a/src/nix/path-info.cc b/src/nix/path-info.cc index fef3ae120..146b775e5 100644 --- a/src/nix/path-info.cc +++ b/src/nix/path-info.cc @@ -141,7 +141,7 @@ struct CmdPathInfo : StorePathsCommand, MixJSON void printSize(std::ostream & str, uint64_t value) { if (humanReadable) - str << fmt("\t%s", renderSize(value, true)); + str << fmt("\t%s", renderSize((int64_t) value, true)); else str << fmt("\t%11d", value); } diff --git a/src/nix/prefetch.cc b/src/nix/prefetch.cc index 26905e34c..d875f8e4b 100644 --- a/src/nix/prefetch.cc +++ b/src/nix/prefetch.cc @@ -13,6 +13,8 @@ #include "nix/cmd/misc-store-flags.hh" #include "nix/util/terminal.hh" #include "nix/util/environment-variables.hh" +#include "nix/util/url.hh" +#include "nix/store/path.hh" #include "man-pages.hh" @@ -56,7 +58,7 @@ std::string resolveMirrorUrl(EvalState & state, const std::string & url) std::tuple prefetchFile( ref store, - std::string_view url, + const VerbatimURL & url, std::optional name, HashAlgorithm hashAlgo, std::optional expectedHash, @@ -68,9 +70,15 @@ std::tuple prefetchFile( /* Figure out a name in the Nix store. */ if (!name) { - name = baseNameOf(url); - if (name->empty()) - throw Error("cannot figure out file name for '%s'", url); + name = url.lastPathSegment(); + if (!name || name->empty()) + throw Error("cannot figure out file name for '%s'", url.to_string()); + } + try { + checkName(*name); + } catch (BadStorePathName & e) { + e.addTrace({}, "file name '%s' was extracted from URL '%s'", *name, url.to_string()); + throw; } std::optional storePath; @@ -105,14 +113,14 @@ std::tuple prefetchFile( FdSink sink(fd.get()); - FileTransferRequest req(ValidURL{url}); + FileTransferRequest req(url); req.decompress = false; getFileTransfer()->download(std::move(req), sink); } /* Optionally unpack the file. */ if (unpack) { - Activity act(*logger, lvlChatty, actUnknown, fmt("unpacking '%s'", url)); + Activity act(*logger, lvlChatty, actUnknown, fmt("unpacking '%s'", url.to_string())); auto unpacked = (tmpDir.path() / "unpacked").string(); createDirs(unpacked); unpackTarfile(tmpFile.string(), unpacked); @@ -128,7 +136,7 @@ std::tuple prefetchFile( } } - Activity act(*logger, lvlChatty, actUnknown, fmt("adding '%s' to the store", url)); + Activity act(*logger, lvlChatty, actUnknown, fmt("adding '%s' to the store", url.to_string())); auto info = store->addToStoreSlow( *name, PosixSourceAccessor::createAtRoot(tmpFile), method, hashAlgo, {}, expectedHash); diff --git a/src/nix/search.cc b/src/nix/search.cc index 910450e95..20bb4cd5d 100644 --- a/src/nix/search.cc +++ b/src/nix/search.cc @@ -159,7 +159,7 @@ struct CmdSearch : InstallableValueCommand, MixJSON logger->cout( "* %s%s", wrap("\e[0;1m", hiliteMatches(attrPath2, attrPathMatches, ANSI_GREEN, "\e[0;1m")), - name.version != "" ? " (" + name.version + ")" : ""); + optionalBracket(" (", name.version, ")")); if (description != "") logger->cout( " %s", hiliteMatches(description, descriptionMatches, ANSI_GREEN, ANSI_NORMAL)); diff --git a/src/nix/sigs.cc b/src/nix/sigs.cc index 92bb00500..e82f0d284 100644 --- a/src/nix/sigs.cc +++ b/src/nix/sigs.cc @@ -3,6 +3,7 @@ #include "nix/main/shared.hh" #include "nix/store/store-open.hh" #include "nix/util/thread-pool.hh" +#include "nix/store/filetransfer.hh" #include @@ -28,6 +29,13 @@ struct CmdCopySigs : StorePathsCommand return "copy store path signatures from substituters"; } + std::string doc() override + { + return +#include "store-copy-sigs.md" + ; + } + void run(ref store, StorePaths && storePaths) override { if (substituterUris.empty()) @@ -38,7 +46,7 @@ struct CmdCopySigs : StorePathsCommand for (auto & s : substituterUris) substituters.push_back(openStore(s)); - ThreadPool pool; + ThreadPool pool{fileTransferSettings.httpConnections}; std::atomic added{0}; @@ -104,6 +112,7 @@ struct CmdSign : StorePathsCommand .labels = {"file"}, .handler = {&secretKeyFile}, .completer = completePath, + .required = true, }); } @@ -114,9 +123,6 @@ struct CmdSign : StorePathsCommand void run(ref store, StorePaths && storePaths) override { - if (secretKeyFile.empty()) - throw UsageError("you must specify a secret key file using '-k'"); - SecretKey secretKey(readFile(secretKeyFile)); LocalSigner signer(std::move(secretKey)); @@ -144,7 +150,7 @@ static auto rCmdSign = registerCommand2({"store", "sign"}); struct CmdKeyGenerateSecret : Command { - std::optional keyName; + std::string keyName; CmdKeyGenerateSecret() { @@ -153,6 +159,7 @@ struct CmdKeyGenerateSecret : Command .description = "Identifier of the key (e.g. `cache.example.org-1`).", .labels = {"name"}, .handler = {&keyName}, + .required = true, }); } @@ -170,11 +177,8 @@ struct CmdKeyGenerateSecret : Command void run() override { - if (!keyName) - throw UsageError("required argument '--key-name' is missing"); - logger->stop(); - writeFull(getStandardOutput(), SecretKey::generate(*keyName).to_string()); + writeFull(getStandardOutput(), SecretKey::generate(keyName).to_string()); } }; diff --git a/src/nix/store-copy-sigs.md b/src/nix/store-copy-sigs.md new file mode 100644 index 000000000..678756221 --- /dev/null +++ b/src/nix/store-copy-sigs.md @@ -0,0 +1,30 @@ +R""( + +# Examples + +* To copy signatures from a binary cache to the local store: + + ```console + # nix store copy-sigs --substituter https://cache.nixos.org \ + --recursive /nix/store/y1x7ng5bmc9s8lqrf98brcpk1a7lbcl5-hello-2.12.1 + ``` + +* To copy signatures from one binary cache to another: + + ```console + # nix store copy-sigs --substituter https://cache.nixos.org \ + --store file:///tmp/binary-cache \ + --recursive -v \ + /nix/store/y1x7ng5bmc9s8lqrf98brcpk1a7lbcl5-hello-2.12.1 + imported 2 signatures + ``` + +# Description + +`nix store copy-sigs` copies store path signatures from one store to another. + +It is not advised to copy signatures to binary cache stores. Binary cache signatures are stored in `.narinfo` files. Since these are cached aggressively, clients may not see the new signatures quickly. It is therefore better to set any required signatures when the paths are first uploaded to the binary cache. + +Store paths are processed in parallel. The amount of parallelism is controlled by the [`http-connections`](@docroot@/command-ref/conf-file.md#conf-http-connections) settings. + +)"" diff --git a/src/nix/unix/daemon.cc b/src/nix/unix/daemon.cc index cb105a385..33ad8757a 100644 --- a/src/nix/unix/daemon.cc +++ b/src/nix/unix/daemon.cc @@ -87,7 +87,7 @@ struct AuthorizationSettings : Config {"*"}, "allowed-users", R"( - A list user names, separated by whitespace. + A list of user names, separated by whitespace. These users are allowed to connect to the Nix daemon. You can specify groups by prefixing names with `@`. diff --git a/src/nix/why-depends.cc b/src/nix/why-depends.cc index 473827a93..29da9e953 100644 --- a/src/nix/why-depends.cc +++ b/src/nix/why-depends.cc @@ -1,5 +1,6 @@ #include "nix/cmd/command.hh" #include "nix/store/store-api.hh" +#include "nix/store/path-references.hh" #include "nix/util/source-accessor.hh" #include "nix/main/shared.hh" @@ -191,7 +192,7 @@ struct CmdWhyDepends : SourceExprCommand, MixOperateOnOptions /* Sort the references by distance to `dependency` to ensure that the shortest path is printed first. */ std::multimap refs; - StringSet hashes; + StorePathSet refPaths; for (auto & ref : node.refs) { if (ref == node.path && packagePath != dependencyPath) @@ -200,67 +201,59 @@ struct CmdWhyDepends : SourceExprCommand, MixOperateOnOptions if (node2.dist == inf) continue; refs.emplace(node2.dist, &node2); - hashes.insert(std::string(node2.path.hashPart())); + refPaths.insert(node2.path); } /* For each reference, find the files and symlinks that contain the reference. */ std::map hits; - auto accessor = store->getFSAccessor(node.path); + auto accessor = store->requireStoreObjectAccessor(node.path); - auto visitPath = [&](this auto && recur, const CanonPath & p) -> void { - auto st = accessor->maybeLstat(p); - assert(st); + auto getColour = [&](const std::string & hash) { + return hash == dependencyPathHash ? ANSI_GREEN : ANSI_BLUE; + }; - auto p2 = p.isRoot() ? p.abs() : p.rel(); + if (precise) { + // Use scanForReferencesDeep to find files containing references + scanForReferencesDeep(*accessor, CanonPath::root, refPaths, [&](FileRefScanResult result) { + auto p2 = result.filePath.isRoot() ? result.filePath.abs() : result.filePath.rel(); + auto st = accessor->lstat(result.filePath); - auto getColour = [&](const std::string & hash) { - return hash == dependencyPathHash ? ANSI_GREEN : ANSI_BLUE; - }; + if (st.type == SourceAccessor::Type::tRegular) { + auto contents = accessor->readFile(result.filePath); - if (st->type == SourceAccessor::Type::tDirectory) { - auto names = accessor->readDirectory(p); - for (auto & [name, type] : names) - recur(p / name); - } - - else if (st->type == SourceAccessor::Type::tRegular) { - auto contents = accessor->readFile(p); - - for (auto & hash : hashes) { - auto pos = contents.find(hash); - if (pos != std::string::npos) { - size_t margin = 32; - auto pos2 = pos >= margin ? pos - margin : 0; - hits[hash].emplace_back( - fmt("%s: …%s…", + // For each reference found in this file, extract context + for (auto & foundRef : result.foundRefs) { + std::string hash(foundRef.hashPart()); + auto pos = contents.find(hash); + if (pos != std::string::npos) { + size_t margin = 32; + auto pos2 = pos >= margin ? pos - margin : 0; + hits[hash].emplace_back(fmt( + "%s: …%s…", p2, hilite( filterPrintable(std::string(contents, pos2, pos - pos2 + hash.size() + margin)), pos - pos2, StorePath::HashLen, getColour(hash)))); + } + } + } else if (st.type == SourceAccessor::Type::tSymlink) { + auto target = accessor->readLink(result.filePath); + + // For each reference found in this symlink, show it + for (auto & foundRef : result.foundRefs) { + std::string hash(foundRef.hashPart()); + auto pos = target.find(hash); + if (pos != std::string::npos) + hits[hash].emplace_back( + fmt("%s -> %s", p2, hilite(target, pos, StorePath::HashLen, getColour(hash)))); } } - } - - else if (st->type == SourceAccessor::Type::tSymlink) { - auto target = accessor->readLink(p); - - for (auto & hash : hashes) { - auto pos = target.find(hash); - if (pos != std::string::npos) - hits[hash].emplace_back( - fmt("%s -> %s", p2, hilite(target, pos, StorePath::HashLen, getColour(hash)))); - } - } - }; - - // FIXME: should use scanForReferences(). - - if (precise) - visitPath(CanonPath::root); + }); + } for (auto & ref : refs) { std::string hash(ref.second->path.hashPart()); diff --git a/tests/functional/binary-cache.sh b/tests/functional/binary-cache.sh index 2c102df07..d801ac6aa 100755 --- a/tests/functional/binary-cache.sh +++ b/tests/functional/binary-cache.sh @@ -111,7 +111,13 @@ clearStore mv "$cacheDir/nar" "$cacheDir/nar2" -nix-build --substituters "file://$cacheDir" --no-require-sigs dependencies.nix -o "$TEST_ROOT/result" +nix-build --substituters "file://$cacheDir" --no-require-sigs dependencies.nix -o "$TEST_ROOT/result" 2>&1 | tee "$TEST_ROOT/log" + +# Verify that missing NARs produce warnings, not errors +# The build should succeed despite the warnings +grepQuiet "does not exist in binary cache" "$TEST_ROOT/log" +# Ensure the message is not at error level by checking that the command succeeded +[ -e "$TEST_ROOT/result" ] mv "$cacheDir/nar2" "$cacheDir/nar" diff --git a/tests/functional/build-hook-list-paths.sh b/tests/functional/build-hook-list-paths.sh new file mode 100755 index 000000000..03691c2d2 --- /dev/null +++ b/tests/functional/build-hook-list-paths.sh @@ -0,0 +1,12 @@ +#!/bin/sh + +set -x +set -e + +[ -n "$OUT_PATHS" ] +[ -n "$DRV_PATH" ] +[ -n "$HOOK_DEST" ] + +for o in $OUT_PATHS; do + echo "$o" >> "$HOOK_DEST" +done diff --git a/tests/functional/build.sh b/tests/functional/build.sh index 0a19ff7da..0b06dcd91 100755 --- a/tests/functional/build.sh +++ b/tests/functional/build.sh @@ -178,19 +178,23 @@ test "$(<<<"$out" grep -cE '^error:')" = 4 out="$(nix build -f fod-failing.nix -L x4 2>&1)" && status=0 || status=$? test "$status" = 1 -test "$(<<<"$out" grep -cE '^error:')" = 2 +# Precise number of errors depends on daemon version / goal refactorings +(( "$(<<<"$out" grep -cE '^error:')" >= 2 )) if isDaemonNewer "2.29pre"; then <<<"$out" grepQuiet -E "error: Cannot build '.*-x4\\.drv'" <<<"$out" grepQuiet -E "Reason: 1 dependency failed." + <<<"$out" grepQuiet -E "Build failed due to failed dependency" else <<<"$out" grepQuiet -E "error: 1 dependencies of derivation '.*-x4\\.drv' failed to build" fi -<<<"$out" grepQuiet -E "hash mismatch in fixed-output derivation '.*-x2\\.drv'" +# Either x2 or x3 could have failed, x4 depends on both symmetrically +<<<"$out" grepQuiet -E "hash mismatch in fixed-output derivation '.*-x[23]\\.drv'" out="$(nix build -f fod-failing.nix -L x4 --keep-going 2>&1)" && status=0 || status=$? test "$status" = 1 -test "$(<<<"$out" grep -cE '^error:')" = 3 +# Precise number of errors depends on daemon version / goal refactorings +(( "$(<<<"$out" grep -cE '^error:')" >= 3 )) if isDaemonNewer "2.29pre"; then <<<"$out" grepQuiet -E "error: Cannot build '.*-x4\\.drv'" <<<"$out" grepQuiet -E "Reason: 2 dependencies failed." diff --git a/tests/functional/ca/build-delete.sh b/tests/functional/ca/build-delete.sh index 3ad3d0a80..173cfb224 100644 --- a/tests/functional/ca/build-delete.sh +++ b/tests/functional/ca/build-delete.sh @@ -4,4 +4,5 @@ source common.sh export NIX_TESTS_CA_BY_DEFAULT=1 cd .. +# shellcheck source=/dev/null source ./build-delete.sh diff --git a/tests/functional/ca/build-dry.sh b/tests/functional/ca/build-dry.sh index 9a72075ec..44bd7202b 100644 --- a/tests/functional/ca/build-dry.sh +++ b/tests/functional/ca/build-dry.sh @@ -1,6 +1,8 @@ +# shellcheck shell=bash source common.sh export NIX_TESTS_CA_BY_DEFAULT=1 +# shellcheck source=/dev/null cd .. && source build-dry.sh diff --git a/tests/functional/ca/eval-store.sh b/tests/functional/ca/eval-store.sh index 9cc499606..0ffdef839 100644 --- a/tests/functional/ca/eval-store.sh +++ b/tests/functional/ca/eval-store.sh @@ -7,4 +7,5 @@ source common.sh export NIX_TESTS_CA_BY_DEFAULT=1 cd .. +# shellcheck source=/dev/null source eval-store.sh diff --git a/tests/functional/ca/gc.sh b/tests/functional/ca/gc.sh index e9b6c5ab5..26b037f64 100755 --- a/tests/functional/ca/gc.sh +++ b/tests/functional/ca/gc.sh @@ -7,4 +7,5 @@ source common.sh export NIX_TESTS_CA_BY_DEFAULT=1 cd .. +# shellcheck source=/dev/null source gc.sh diff --git a/tests/functional/ca/import-from-derivation.sh b/tests/functional/ca/import-from-derivation.sh index 708d2fc78..a3101cc3f 100644 --- a/tests/functional/ca/import-from-derivation.sh +++ b/tests/functional/ca/import-from-derivation.sh @@ -3,6 +3,6 @@ source common.sh export NIX_TESTS_CA_BY_DEFAULT=1 - +# shellcheck source=/dev/null cd .. && source import-from-derivation.sh diff --git a/tests/functional/ca/issue-13247.sh b/tests/functional/ca/issue-13247.sh index 686d90ced..705919513 100755 --- a/tests/functional/ca/issue-13247.sh +++ b/tests/functional/ca/issue-13247.sh @@ -65,7 +65,4 @@ buildViaSubstitute use-a-prime-more-outputs^first # Should only fetch the output we asked for [[ -d "$(jq -r <"$TEST_ROOT"/a.json '.[0].outputs.out')" ]] [[ -f "$(jq -r <"$TEST_ROOT"/a.json '.[2].outputs.first')" ]] - -# Output should *not* be here, this is the bug -[[ -e "$(jq -r <"$TEST_ROOT"/a.json '.[2].outputs.second')" ]] -skipTest "bug is not yet fixed" +[[ ! -e "$(jq -r <"$TEST_ROOT"/a.json '.[2].outputs.second')" ]] diff --git a/tests/functional/ca/multiple-outputs.sh b/tests/functional/ca/multiple-outputs.sh index 63b7d3197..e4e05b5f5 100644 --- a/tests/functional/ca/multiple-outputs.sh +++ b/tests/functional/ca/multiple-outputs.sh @@ -4,4 +4,5 @@ source common.sh export NIX_TESTS_CA_BY_DEFAULT=1 cd .. +# shellcheck source=/dev/null source ./multiple-outputs.sh diff --git a/tests/functional/ca/new-build-cmd.sh b/tests/functional/ca/new-build-cmd.sh index 408bfb0f6..e5cb644d1 100644 --- a/tests/functional/ca/new-build-cmd.sh +++ b/tests/functional/ca/new-build-cmd.sh @@ -4,4 +4,5 @@ source common.sh export NIX_TESTS_CA_BY_DEFAULT=1 cd .. +# shellcheck source=/dev/null source ./build.sh diff --git a/tests/functional/ca/nix-shell.sh b/tests/functional/ca/nix-shell.sh index 7b30b2ac8..05115c126 100755 --- a/tests/functional/ca/nix-shell.sh +++ b/tests/functional/ca/nix-shell.sh @@ -2,6 +2,8 @@ source common.sh +# shellcheck disable=SC2034 NIX_TESTS_CA_BY_DEFAULT=true cd .. +# shellcheck source=/dev/null source ./nix-shell.sh diff --git a/tests/functional/ca/post-hook.sh b/tests/functional/ca/post-hook.sh index 705bde9d4..e1adffc47 100755 --- a/tests/functional/ca/post-hook.sh +++ b/tests/functional/ca/post-hook.sh @@ -6,6 +6,7 @@ requireDaemonNewerThan "2.4pre20210626" export NIX_TESTS_CA_BY_DEFAULT=1 cd .. +# shellcheck source=/dev/null source ./post-hook.sh diff --git a/tests/functional/ca/recursive.sh b/tests/functional/ca/recursive.sh index cd6736b24..e3fb98ab2 100755 --- a/tests/functional/ca/recursive.sh +++ b/tests/functional/ca/recursive.sh @@ -6,4 +6,5 @@ requireDaemonNewerThan "2.4pre20210623" export NIX_TESTS_CA_BY_DEFAULT=1 cd .. +# shellcheck source=/dev/null source ./recursive.sh diff --git a/tests/functional/ca/repl.sh b/tests/functional/ca/repl.sh index 0bbbebd85..f96ecfcf2 100644 --- a/tests/functional/ca/repl.sh +++ b/tests/functional/ca/repl.sh @@ -3,5 +3,5 @@ source common.sh export NIX_TESTS_CA_BY_DEFAULT=1 - +# shellcheck source=/dev/null cd .. && source repl.sh diff --git a/tests/functional/ca/selfref-gc.sh b/tests/functional/ca/selfref-gc.sh index 248778894..7ac9ec9f7 100755 --- a/tests/functional/ca/selfref-gc.sh +++ b/tests/functional/ca/selfref-gc.sh @@ -8,4 +8,5 @@ enableFeatures "ca-derivations nix-command flakes" export NIX_TESTS_CA_BY_DEFAULT=1 cd .. +# shellcheck source=/dev/null source ./selfref-gc.sh diff --git a/tests/functional/ca/why-depends.sh b/tests/functional/ca/why-depends.sh index 0af8a5440..2a3c7d083 100644 --- a/tests/functional/ca/why-depends.sh +++ b/tests/functional/ca/why-depends.sh @@ -3,5 +3,5 @@ source common.sh export NIX_TESTS_CA_BY_DEFAULT=1 - +# shellcheck source=/dev/null cd .. && source why-depends.sh diff --git a/tests/functional/dependencies.builder0.sh b/tests/functional/dependencies.builder0.sh index 6fbe4a07a..f680cf7f2 100644 --- a/tests/functional/dependencies.builder0.sh +++ b/tests/functional/dependencies.builder0.sh @@ -17,4 +17,6 @@ ln -s "$out" "$out"/self echo program > "$out"/program chmod +x "$out"/program +echo '1 + 2' > "$out"/foo.nix + echo FOO diff --git a/tests/functional/external-builders.sh b/tests/functional/external-builders.sh new file mode 100644 index 000000000..4c1d5636a --- /dev/null +++ b/tests/functional/external-builders.sh @@ -0,0 +1,50 @@ +#!/usr/bin/env bash + +source common.sh + +TODO_NixOS + +needLocalStore "'--external-builders' can’t be used with the daemon" + +expr="$TEST_ROOT/expr.nix" +cat > "$expr" < \$out + ''; +} +EOF + +external_builder="$TEST_ROOT/external-builder.sh" +cat > "$external_builder" <> \$out +EOF +chmod +x "$external_builder" + +nix build -L --file "$expr" --out-link "$TEST_ROOT/result" \ + --extra-experimental-features external-builders \ + --external-builders "[{\"systems\": [\"x68_46-xunil\"], \"args\": [\"bla\"], \"program\": \"$external_builder\"}]" + +[[ $(cat "$TEST_ROOT/result") = foobar ]] diff --git a/tests/functional/fetchClosure.sh b/tests/functional/fetchClosure.sh index 9b79ab396..85a83d192 100755 --- a/tests/functional/fetchClosure.sh +++ b/tests/functional/fetchClosure.sh @@ -99,6 +99,14 @@ clearStore [ -e "$caPath" ] +# Test import-from-derivation on the result of fetchClosure. +[[ $(nix eval -v --expr " + import \"\${builtins.fetchClosure { + fromStore = \"file://$cacheDir\"; + fromPath = $caPath; + }}/foo.nix\" +") = 3 ]] + # Check that URL query parameters aren't allowed. clearStore narCache=$TEST_ROOT/nar-cache diff --git a/tests/functional/fetchGitRefs.sh b/tests/functional/fetchGitRefs.sh index 288b26591..a7d1a2a29 100755 --- a/tests/functional/fetchGitRefs.sh +++ b/tests/functional/fetchGitRefs.sh @@ -59,6 +59,9 @@ invalid_ref() { } +valid_ref 'A/b' +valid_ref 'AaA/b' +valid_ref 'FOO/BAR/BAZ' valid_ref 'foox' valid_ref '1337' valid_ref 'foo.baz' diff --git a/tests/functional/fetchurl.sh b/tests/functional/fetchurl.sh index 5bc8ca625..c25ac3216 100755 --- a/tests/functional/fetchurl.sh +++ b/tests/functional/fetchurl.sh @@ -88,8 +88,3 @@ requireDaemonNewerThan "2.20" expected=100 if [[ -v NIX_DAEMON_PACKAGE ]]; then expected=1; fi # work around the daemon not returning a 100 status correctly expectStderr $expected nix-build --expr '{ url }: builtins.derivation { name = "nix-cache-info"; system = "x86_64-linux"; builder = "builtin:fetchurl"; inherit url; outputHashMode = "flat"; }' --argstr url "file://$narxz" 2>&1 | grep 'must be a fixed-output or impure derivation' - -requireDaemonNewerThan "2.32.0pre20250831" - -expect 1 nix-build --expr 'import ' --argstr name 'name' --argstr url "file://authority.not.allowed/fetchurl.sh?a=1&a=2" --no-out-link |& - grepQuiet "error: file:// URL 'file://authority.not.allowed/fetchurl.sh?a=1&a=2' has unexpected authority 'authority.not.allowed'" diff --git a/tests/functional/flakes/common.sh b/tests/functional/flakes/common.sh index 422cab96c..77bc03060 100644 --- a/tests/functional/flakes/common.sh +++ b/tests/functional/flakes/common.sh @@ -2,6 +2,8 @@ source ../common.sh +export _NIX_TEST_BARF_ON_UNCACHEABLE=1 + # shellcheck disable=SC2034 # this variable is used by tests that source this file registry=$TEST_ROOT/registry.json diff --git a/tests/functional/flakes/develop.sh b/tests/functional/flakes/develop.sh index c222f0fbb..ee646860b 100755 --- a/tests/functional/flakes/develop.sh +++ b/tests/functional/flakes/develop.sh @@ -18,6 +18,21 @@ cat <"$TEST_HOME/flake.nix" outputs = [ "out" "dev" ]; meta.outputsToInstall = [ "out" ]; buildCommand = ""; + # ensure we're stripping these from the environment derivation + disallowedReferences = [ "out" ]; + disallowedRequisites = [ "out" ]; + }; + packages.$system.hello-structured = (import ./config.nix).mkDerivation { + __structuredAttrs = true; + name = "hello"; + outputs = [ "out" "dev" ]; + meta.outputsToInstall = [ "out" ]; + buildCommand = ""; + # ensure we're stripping these from the environment derivation + outputChecks.out = { + disallowedReferences = [ "out" ]; + disallowedRequisites = [ "out" ]; + }; }; }; } @@ -142,4 +157,7 @@ echo "\$SHELL" EOF )" -ef "$BASH_INTERACTIVE_EXECUTABLE" ]] +# Test whether `nix develop` works with `__structuredAttrs` +[[ -z "$(nix develop --no-write-lock-file .#hello-structured "$rootRepo"/submodule/sub.nix -[[ $(nix eval --json "$flakeref#sub" ) = '"foo"' ]] -[[ $(nix flake metadata --json "$flakeref" | jq -r .locked.rev) = null ]] +[[ $(_NIX_TEST_BARF_ON_UNCACHEABLE='' nix eval --json "$flakeref#sub" ) = '"foo"' ]] +[[ $(_NIX_TEST_BARF_ON_UNCACHEABLE='' nix flake metadata --json "$flakeref" | jq -r .locked.rev) = null ]] # Test that `nix flake metadata` parses `submodule` correctly. cat > "$rootRepo"/flake.nix <&1 | grep '/flakeB.*is forbidden in pure evaluation mode' -expect 1 nix flake lock --impure "$flakeFollowsA" 2>&1 | grep '/flakeB.*does not exist' +expect 1 nix flake lock --impure "$flakeFollowsA" 2>&1 | grep "'flakeB' is too short to be a valid store path" # Test relative non-flake inputs. cat > "$flakeFollowsA"/flake.nix < flake.nix { @@ -34,8 +34,8 @@ nix run --no-write-lock-file .#pkgAsPkg # For instance, we might set an environment variable temporarily to affect some # initialization or whatnot, but this must not leak into the environment of the # command being run. -env > $TEST_ROOT/expected-env -nix run -f shell-hello.nix env > $TEST_ROOT/actual-env +env > "$TEST_ROOT"/expected-env +nix run -f shell-hello.nix env > "$TEST_ROOT"/actual-env # Remove/reset variables we expect to be different. # - PATH is modified by nix shell # - we unset TMPDIR on macOS if it contains /var/folders. bad. https://github.com/NixOS/nix/issues/7731 @@ -48,12 +48,12 @@ sed -i \ -e '/^TMPDIR=\/var\/folders\/.*/d' \ -e '/^__CF_USER_TEXT_ENCODING=.*$/d' \ -e '/^__LLVM_PROFILE_RT_INIT_ONCE=.*$/d' \ - $TEST_ROOT/expected-env $TEST_ROOT/actual-env -sort $TEST_ROOT/expected-env | uniq > $TEST_ROOT/expected-env.sorted + "$TEST_ROOT"/expected-env "$TEST_ROOT"/actual-env +sort "$TEST_ROOT"/expected-env | uniq > "$TEST_ROOT"/expected-env.sorted # nix run appears to clear _. I don't understand why. Is this ok? -echo "_=..." >> $TEST_ROOT/actual-env -sort $TEST_ROOT/actual-env | uniq > $TEST_ROOT/actual-env.sorted -diff $TEST_ROOT/expected-env.sorted $TEST_ROOT/actual-env.sorted +echo "_=..." >> "$TEST_ROOT"/actual-env +sort "$TEST_ROOT"/actual-env | uniq > "$TEST_ROOT"/actual-env.sorted +diff "$TEST_ROOT"/expected-env.sorted "$TEST_ROOT"/actual-env.sorted clearStore diff --git a/tests/functional/flakes/show.sh b/tests/functional/flakes/show.sh index 7fcc6aca9..a08db115a 100755 --- a/tests/functional/flakes/show.sh +++ b/tests/functional/flakes/show.sh @@ -12,6 +12,7 @@ pushd "$flakeDir" # By default: Only show the packages content for the current system and no # legacyPackages at all nix flake show --json > show-output.json +# shellcheck disable=SC2016 nix eval --impure --expr ' let show_output = builtins.fromJSON (builtins.readFile ./show-output.json); in @@ -23,6 +24,7 @@ true # With `--all-systems`, show the packages for all systems nix flake show --json --all-systems > show-output.json +# shellcheck disable=SC2016 nix eval --impure --expr ' let show_output = builtins.fromJSON (builtins.readFile ./show-output.json); in @@ -33,6 +35,7 @@ true # With `--legacy`, show the legacy packages nix flake show --json --legacy > show-output.json +# shellcheck disable=SC2016 nix eval --impure --expr ' let show_output = builtins.fromJSON (builtins.readFile ./show-output.json); in @@ -80,6 +83,7 @@ cat >flake.nix < show-output.json +# shellcheck disable=SC2016 nix eval --impure --expr ' let show_output = builtins.fromJSON (builtins.readFile ./show-output.json); in @@ -91,11 +95,12 @@ true # Test that nix flake show doesn't fail if one of the outputs contains # an IFD popd -writeIfdFlake $flakeDir -pushd $flakeDir +writeIfdFlake "$flakeDir" +pushd "$flakeDir" nix flake show --json > show-output.json +# shellcheck disable=SC2016 nix eval --impure --expr ' let show_output = builtins.fromJSON (builtins.readFile ./show-output.json); in diff --git a/tests/functional/flakes/source-paths.sh b/tests/functional/flakes/source-paths.sh index 4709bf2fc..3aa3683c2 100644 --- a/tests/functional/flakes/source-paths.sh +++ b/tests/functional/flakes/source-paths.sh @@ -12,6 +12,10 @@ cat > "$repo/flake.nix" < "$repo/foo" + +expectStderr 1 nix eval "$repo#z" | grepQuiet "error: Path 'foo' in the repository \"$repo\" is not tracked by Git." +expectStderr 1 nix eval "$repo#a" | grepQuiet "error: Path 'foo' in the repository \"$repo\" is not tracked by Git." + +git -C "$repo" add "$repo/foo" + +[[ $(nix eval --raw "$repo#z") = 123 ]] + +expectStderr 1 nix eval "$repo#b" | grepQuiet "error: Path 'dir' does not exist in Git repository \"$repo\"." + +mkdir -p "$repo/dir" +echo 456 > "$repo/dir/default.nix" + +expectStderr 1 nix eval "$repo#b" | grepQuiet "error: Path 'dir' in the repository \"$repo\" is not tracked by Git." + +git -C "$repo" add "$repo/dir/default.nix" + +[[ $(nix eval "$repo#b") = 456 ]] diff --git a/tests/functional/formatter.sh b/tests/functional/formatter.sh index 6631dd6b8..03b31708d 100755 --- a/tests/functional/formatter.sh +++ b/tests/functional/formatter.sh @@ -16,6 +16,7 @@ nix fmt --help | grep "reformat your code" nix fmt run --help | grep "reformat your code" nix fmt build --help | grep "build" +# shellcheck disable=SC2154 cat << EOF > flake.nix { outputs = _: { diff --git a/tests/functional/gc-auto.sh b/tests/functional/gc-auto.sh index efe3e4b2b..ea877f27f 100755 --- a/tests/functional/gc-auto.sh +++ b/tests/functional/gc-auto.sh @@ -2,22 +2,26 @@ source common.sh +# shellcheck disable=SC1111 needLocalStore "“min-free” and “max-free” are daemon options" TODO_NixOS clearStore +# shellcheck disable=SC2034 garbage1=$(nix store add-path --name garbage1 ./nar-access.sh) +# shellcheck disable=SC2034 garbage2=$(nix store add-path --name garbage2 ./nar-access.sh) +# shellcheck disable=SC2034 garbage3=$(nix store add-path --name garbage3 ./nar-access.sh) -ls -l $garbage3 -POSIXLY_CORRECT=1 du $garbage3 +ls -l "$garbage3" +POSIXLY_CORRECT=1 du "$garbage3" fake_free=$TEST_ROOT/fake-free export _NIX_TEST_FREE_SPACE_FILE=$fake_free -echo 1100 > $fake_free +echo 1100 > "$fake_free" fifoLock=$TEST_ROOT/fifoLock mkfifo "$fifoLock" @@ -65,11 +69,11 @@ with import ${config_nix}; mkDerivation { EOF ) -nix build --impure -v -o $TEST_ROOT/result-A -L --expr "$expr" \ +nix build --impure -v -o "$TEST_ROOT"/result-A -L --expr "$expr" \ --min-free 1K --max-free 2K --min-free-check-interval 1 & pid1=$! -nix build --impure -v -o $TEST_ROOT/result-B -L --expr "$expr2" \ +nix build --impure -v -o "$TEST_ROOT"/result-B -L --expr "$expr2" \ --min-free 1K --max-free 2K --min-free-check-interval 1 & pid2=$! @@ -77,9 +81,9 @@ pid2=$! # If the first build fails, we need to postpone the failure to still allow # the second one to finish wait "$pid1" || FIRSTBUILDSTATUS=$? -echo "unlock" > $fifoLock -( exit ${FIRSTBUILDSTATUS:-0} ) +echo "unlock" > "$fifoLock" +( exit "${FIRSTBUILDSTATUS:-0}" ) wait "$pid2" -[[ foo = $(cat $TEST_ROOT/result-A/bar) ]] -[[ foo = $(cat $TEST_ROOT/result-B/bar) ]] +[[ foo = $(cat "$TEST_ROOT"/result-A/bar) ]] +[[ foo = $(cat "$TEST_ROOT"/result-B/bar) ]] diff --git a/tests/functional/gc-concurrent.builder.sh b/tests/functional/gc-concurrent.builder.sh index bb6dcd4cf..b3c7abeb1 100644 --- a/tests/functional/gc-concurrent.builder.sh +++ b/tests/functional/gc-concurrent.builder.sh @@ -1,16 +1,19 @@ +# shellcheck shell=bash +# shellcheck disable=SC2154 echo "Build started" > "$lockFifo" -mkdir $out -echo $(cat $input1/foo)$(cat $input2/bar) > $out/foobar +# shellcheck disable=SC2154 +mkdir "$out" +echo "$(cat "$input1"/foo)""$(cat "$input2"/bar)" > "$out"/foobar # Wait for someone to write on the fifo cat "$lockFifo" # $out should not have been GC'ed while we were sleeping, but just in # case... -mkdir -p $out +mkdir -p "$out" # Check that the GC hasn't deleted the lock on our output. test -e "$out.lock" -ln -s $input2 $out/input-2 +ln -s "$input2" "$out"/input-2 diff --git a/tests/functional/gc-concurrent.sh b/tests/functional/gc-concurrent.sh index df180b14f..dcfcea3e9 100755 --- a/tests/functional/gc-concurrent.sh +++ b/tests/functional/gc-concurrent.sh @@ -10,54 +10,58 @@ lockFifo1=$TEST_ROOT/test1.fifo mkfifo "$lockFifo1" drvPath1=$(nix-instantiate gc-concurrent.nix -A test1 --argstr lockFifo "$lockFifo1") -outPath1=$(nix-store -q $drvPath1) +outPath1=$(nix-store -q "$drvPath1") drvPath2=$(nix-instantiate gc-concurrent.nix -A test2) -outPath2=$(nix-store -q $drvPath2) +outPath2=$(nix-store -q "$drvPath2") drvPath3=$(nix-instantiate simple.nix) -outPath3=$(nix-store -r $drvPath3) +outPath3=$(nix-store -r "$drvPath3") -(! test -e $outPath3.lock) -touch $outPath3.lock +# shellcheck disable=SC2235 +(! test -e "$outPath3".lock) +touch "$outPath3".lock rm -f "$NIX_STATE_DIR"/gcroots/foo* -ln -s $drvPath2 "$NIX_STATE_DIR/gcroots/foo" -ln -s $outPath3 "$NIX_STATE_DIR/gcroots/foo2" +ln -s "$drvPath2" "$NIX_STATE_DIR/gcroots/foo" +ln -s "$outPath3" "$NIX_STATE_DIR/gcroots/foo2" # Start build #1 in the background. It starts immediately. nix-store -rvv "$drvPath1" & pid1=$! # Wait for the build of $drvPath1 to start -cat $lockFifo1 +cat "$lockFifo1" # Run the garbage collector while the build is running. nix-collect-garbage # Unlock the build of $drvPath1 -echo "" > $lockFifo1 +echo "" > "$lockFifo1" echo waiting for pid $pid1 to finish... wait $pid1 # Check that the root of build #1 and its dependencies haven't been # deleted. The should not be deleted by the GC because they were # being built during the GC. -cat $outPath1/foobar -cat $outPath1/input-2/bar +cat "$outPath1"/foobar +cat "$outPath1"/input-2/bar # Check that the build build $drvPath2 succeeds. # It should succeed because the derivation is a GC root. nix-store -rvv "$drvPath2" -cat $outPath2/foobar +cat "$outPath2"/foobar rm -f "$NIX_STATE_DIR"/gcroots/foo* # The collector should have deleted lock files for paths that have # been built previously. -(! test -e $outPath3.lock) +# shellcheck disable=SC2235 +(! test -e "$outPath3".lock) # If we run the collector now, it should delete outPath1/2. nix-collect-garbage -(! test -e $outPath1) -(! test -e $outPath2) +# shellcheck disable=SC2235 +(! test -e "$outPath1") +# shellcheck disable=SC2235 +(! test -e "$outPath2") diff --git a/tests/functional/gc-concurrent2.builder.sh b/tests/functional/gc-concurrent2.builder.sh index 4f6c58b96..4b1ad6f5e 100644 --- a/tests/functional/gc-concurrent2.builder.sh +++ b/tests/functional/gc-concurrent2.builder.sh @@ -1,5 +1,8 @@ -mkdir $out -echo $(cat $input1/foo)$(cat $input2/bar)xyzzy > $out/foobar +# shellcheck shell=bash +# shellcheck disable=SC2154 +mkdir "$out" +# shellcheck disable=SC2154 +echo "$(cat "$input1"/foo)""$(cat "$input2"/bar)"xyzzy > "$out"/foobar # Check that the GC hasn't deleted the lock on our output. test -e "$out.lock" diff --git a/tests/functional/gc-non-blocking.sh b/tests/functional/gc-non-blocking.sh index 9cd5c0e1c..a85b8e5db 100755 --- a/tests/functional/gc-non-blocking.sh +++ b/tests/functional/gc-non-blocking.sh @@ -23,17 +23,17 @@ mkfifo "$fifo2" dummy=$(nix store add-path ./simple.nix) running=$TEST_ROOT/running -touch $running +touch "$running" # Start GC. -(_NIX_TEST_GC_SYNC_1=$fifo1 _NIX_TEST_GC_SYNC_2=$fifo2 nix-store --gc -vvvvv; rm $running) & +(_NIX_TEST_GC_SYNC_1=$fifo1 _NIX_TEST_GC_SYNC_2=$fifo2 nix-store --gc -vvvvv; rm "$running") & pid=$! sleep 2 # Delay the start of the root server to check that the build below # correctly handles ENOENT when connecting to the root server. -(sleep 1; echo > $fifo1) & +(sleep 1; echo > "$fifo1") & pid2=$! # Start a build. This should not be blocked by the GC in progress. @@ -47,6 +47,8 @@ outPath=$(nix-build --max-silent-time 60 -o "$TEST_ROOT/result" -E " wait $pid wait $pid2 -(! test -e $running) -(! test -e $dummy) -test -e $outPath +# shellcheck disable=SC2235 +(! test -e "$running") +# shellcheck disable=SC2235 +(! test -e "$dummy") +test -e "$outPath" diff --git a/tests/functional/hash-convert.sh b/tests/functional/hash-convert.sh index c40cb469c..9ef4c189d 100755 --- a/tests/functional/hash-convert.sh +++ b/tests/functional/hash-convert.sh @@ -99,7 +99,7 @@ try3() { expectStderr 1 nix hash convert --hash-algo "$1" --from nix32 "$4" | grepQuiet "input hash" # Base-16 hashes can be in uppercase. - nix hash convert --hash-algo "$1" --from base16 "$(echo $2 | tr [a-z] [A-Z])" + nix hash convert --hash-algo "$1" --from base16 "$(echo "$2" | tr '[:lower:]' '[:upper:]')" } try3 sha1 "800d59cfcd3c05e900cb4e214be48f6b886a08df" "vw46m23bizj4n8afrc0fj19wrp7mj3c0" "gA1Zz808BekAy04hS+SPa4hqCN8=" diff --git a/tests/functional/impure-derivations.sh b/tests/functional/impure-derivations.sh index 9e483d376..e0b7c3eea 100755 --- a/tests/functional/impure-derivations.sh +++ b/tests/functional/impure-derivations.sh @@ -12,62 +12,62 @@ restartDaemon clearStoreIfPossible # Basic test of impure derivations: building one a second time should not use the previous result. -printf 0 > $TEST_ROOT/counter +printf 0 > "$TEST_ROOT"/counter # `nix derivation add` with impure derivations work drvPath=$(nix-instantiate ./impure-derivations.nix -A impure) -nix derivation show $drvPath | jq .[] > $TEST_HOME/impure-drv.json -drvPath2=$(nix derivation add < $TEST_HOME/impure-drv.json) +nix derivation show "$drvPath" | jq .[] > "$TEST_HOME"/impure-drv.json +drvPath2=$(nix derivation add < "$TEST_HOME"/impure-drv.json) [[ "$drvPath" = "$drvPath2" ]] # But only with the experimental feature! -expectStderr 1 nix derivation add < $TEST_HOME/impure-drv.json --experimental-features nix-command | grepQuiet "experimental Nix feature 'impure-derivations' is disabled" +expectStderr 1 nix derivation add < "$TEST_HOME"/impure-drv.json --experimental-features nix-command | grepQuiet "experimental Nix feature 'impure-derivations' is disabled" nix build --dry-run --json --file ./impure-derivations.nix impure.all json=$(nix build -L --no-link --json --file ./impure-derivations.nix impure.all) -path1=$(echo $json | jq -r .[].outputs.out) -path1_stuff=$(echo $json | jq -r .[].outputs.stuff) -[[ $(< $path1/n) = 0 ]] -[[ $(< $path1_stuff/bla) = 0 ]] +path1=$(echo "$json" | jq -r .[].outputs.out) +path1_stuff=$(echo "$json" | jq -r .[].outputs.stuff) +[[ $(< "$path1"/n) = 0 ]] +[[ $(< "$path1_stuff"/bla) = 0 ]] -[[ $(nix path-info --json $path1 | jq .[].ca) =~ fixed:r:sha256: ]] +[[ $(nix path-info --json "$path1" | jq .[].ca) =~ fixed:r:sha256: ]] path2=$(nix build -L --no-link --json --file ./impure-derivations.nix impure | jq -r .[].outputs.out) -[[ $(< $path2/n) = 1 ]] +[[ $(< "$path2"/n) = 1 ]] # Test impure derivations that depend on impure derivations. path3=$(nix build -L --no-link --json --file ./impure-derivations.nix impureOnImpure | jq -r .[].outputs.out) -[[ $(< $path3/n) = X2 ]] +[[ $(< "$path3"/n) = X2 ]] path4=$(nix build -L --no-link --json --file ./impure-derivations.nix impureOnImpure | jq -r .[].outputs.out) -[[ $(< $path4/n) = X3 ]] +[[ $(< "$path4"/n) = X3 ]] # Test that (self-)references work. -[[ $(< $path4/symlink/bla) = 3 ]] -[[ $(< $path4/self/n) = X3 ]] +[[ $(< "$path4"/symlink/bla) = 3 ]] +[[ $(< "$path4"/self/n) = X3 ]] # Input-addressed derivations cannot depend on impure derivations directly. (! nix build -L --no-link --json --file ./impure-derivations.nix inputAddressed 2>&1) | grep 'depends on impure derivation' drvPath=$(nix eval --json --file ./impure-derivations.nix impure.drvPath | jq -r .) -[[ $(nix derivation show $drvPath | jq ".[\"$(basename "$drvPath")\"].outputs.out.impure") = true ]] -[[ $(nix derivation show $drvPath | jq ".[\"$(basename "$drvPath")\"].outputs.stuff.impure") = true ]] +[[ $(nix derivation show "$drvPath" | jq ".[\"$(basename "$drvPath")\"].outputs.out.impure") = true ]] +[[ $(nix derivation show "$drvPath" | jq ".[\"$(basename "$drvPath")\"].outputs.stuff.impure") = true ]] # Fixed-output derivations *can* depend on impure derivations. path5=$(nix build -L --no-link --json --file ./impure-derivations.nix contentAddressed | jq -r .[].outputs.out) -[[ $(< $path5) = X ]] -[[ $(< $TEST_ROOT/counter) = 5 ]] +[[ $(< "$path5") = X ]] +[[ $(< "$TEST_ROOT"/counter) = 5 ]] # And they should not be rebuilt. path5=$(nix build -L --no-link --json --file ./impure-derivations.nix contentAddressed | jq -r .[].outputs.out) -[[ $(< $path5) = X ]] -[[ $(< $TEST_ROOT/counter) = 5 ]] +[[ $(< "$path5") = X ]] +[[ $(< "$TEST_ROOT"/counter) = 5 ]] # Input-addressed derivations can depend on fixed-output derivations that depend on impure derivations. path6=$(nix build -L --no-link --json --file ./impure-derivations.nix inputAddressedAfterCA | jq -r .[].outputs.out) -[[ $(< $path6) = X ]] -[[ $(< $TEST_ROOT/counter) = 5 ]] +[[ $(< "$path6") = X ]] +[[ $(< "$TEST_ROOT"/counter) = 5 ]] # Test nix/fetchurl.nix. path7=$(nix build -L --no-link --print-out-paths --expr "import { impure = true; url = file://$PWD/impure-derivations.sh; }") -cmp $path7 $PWD/impure-derivations.sh +cmp "$path7" "$PWD"/impure-derivations.sh diff --git a/tests/functional/install-darwin.sh b/tests/functional/install-darwin.sh index ea2b75323..0070e9dce 100755 --- a/tests/functional/install-darwin.sh +++ b/tests/functional/install-darwin.sh @@ -1,4 +1,4 @@ -#!/bin/sh +#!/bin/bash set -eux @@ -21,12 +21,13 @@ cleanup() { for file in ~/.bash_profile ~/.bash_login ~/.profile ~/.zshenv ~/.zprofile ~/.zshrc ~/.zlogin; do if [ -e "$file" ]; then + # shellcheck disable=SC2002 cat "$file" | grep -v nix-profile > "$file.next" mv "$file.next" "$file" fi done - for i in $(seq 1 $(sysctl -n hw.ncpu)); do + for i in $(seq 1 "$(sysctl -n hw.ncpu)"); do sudo /usr/bin/dscl . -delete "/Users/nixbld$i" || true done sudo /usr/bin/dscl . -delete "/Groups/nixbld" || true @@ -65,11 +66,11 @@ verify echo nix-build ./release.nix -A binaryTarball.x86_64-darwin ) | bash -l set -e - cp ./result/nix-*.tar.bz2 $scratch/nix.tar.bz2 + cp ./result/nix-*.tar.bz2 "$scratch"/nix.tar.bz2 ) ( - cd $scratch + cd "$scratch" tar -xf ./nix.tar.bz2 cd nix-* diff --git a/tests/functional/lang/eval-fail-derivation-name.err.exp b/tests/functional/lang/eval-fail-derivation-name.err.exp index 017326c34..ba5ff2d00 100644 --- a/tests/functional/lang/eval-fail-derivation-name.err.exp +++ b/tests/functional/lang/eval-fail-derivation-name.err.exp @@ -1,20 +1,20 @@ error: … while evaluating the attribute 'outPath' - at ::: + at «nix-internal»/derivation-internal.nix::: | value = commonAttrs // { | outPath = builtins.getAttr outputName strict; | ^ | drvPath = strict.drvPath; … while calling the 'getAttr' builtin - at ::: + at «nix-internal»/derivation-internal.nix::: | value = commonAttrs // { | outPath = builtins.getAttr outputName strict; | ^ | drvPath = strict.drvPath; … while calling the 'derivationStrict' builtin - at ::: + at «nix-internal»/derivation-internal.nix::: | | strict = derivationStrict drvAttrs; | ^ diff --git a/tests/functional/lang/eval-fail-empty-formals.err.exp b/tests/functional/lang/eval-fail-empty-formals.err.exp new file mode 100644 index 000000000..5cd4829f7 --- /dev/null +++ b/tests/functional/lang/eval-fail-empty-formals.err.exp @@ -0,0 +1,12 @@ +error: + … from call site + at /pwd/lang/eval-fail-empty-formals.nix:1:1: + 1| (foo@{ }: 1) { a = 3; } + | ^ + 2| + + error: function 'anonymous lambda' called with unexpected argument 'a' + at /pwd/lang/eval-fail-empty-formals.nix:1:2: + 1| (foo@{ }: 1) { a = 3; } + | ^ + 2| diff --git a/tests/functional/lang/eval-fail-empty-formals.nix b/tests/functional/lang/eval-fail-empty-formals.nix new file mode 100644 index 000000000..597f40496 --- /dev/null +++ b/tests/functional/lang/eval-fail-empty-formals.nix @@ -0,0 +1 @@ +(foo@{ }: 1) { a = 3; } diff --git a/tests/functional/lang/eval-fail-hashfile-missing.err.exp b/tests/functional/lang/eval-fail-hashfile-missing.err.exp index 0d3747a6d..901dea2b5 100644 --- a/tests/functional/lang/eval-fail-hashfile-missing.err.exp +++ b/tests/functional/lang/eval-fail-hashfile-missing.err.exp @@ -10,4 +10,4 @@ error: … while calling the 'hashFile' builtin - error: opening file '/pwd/lang/this-file-is-definitely-not-there-7392097': No such file or directory + error: path '/pwd/lang/this-file-is-definitely-not-there-7392097' does not exist diff --git a/tests/functional/linux-sandbox.sh b/tests/functional/linux-sandbox.sh index abb635f11..c3ddf6ce6 100755 --- a/tests/functional/linux-sandbox.sh +++ b/tests/functional/linux-sandbox.sh @@ -19,8 +19,8 @@ if [[ ! $SHELL =~ /nix/store ]]; then skipTest "Shell is not from Nix store"; fi # An alias to automatically bind-mount the $SHELL on nix-build invocations nix-sandbox-build () { nix-build --no-out-link --sandbox-paths /nix/store "$@"; } -chmod -R u+w $TEST_ROOT/store0 || true -rm -rf $TEST_ROOT/store0 +chmod -R u+w "$TEST_ROOT"/store0 || true +rm -rf "$TEST_ROOT"/store0 export NIX_STORE_DIR=/my/store export NIX_REMOTE=$TEST_ROOT/store0 @@ -29,11 +29,11 @@ outPath=$(nix-sandbox-build dependencies.nix) [[ $outPath =~ /my/store/.*-dependencies ]] -nix path-info -r $outPath | grep input-2 +nix path-info -r "$outPath" | grep input-2 -nix store ls -R -l $outPath | grep foobar +nix store ls -R -l "$outPath" | grep foobar -nix store cat $outPath/foobar | grep FOOBAR +nix store cat "$outPath"/foobar | grep FOOBAR # Test --check without hash rewriting. nix-sandbox-build dependencies.nix --check @@ -42,9 +42,9 @@ nix-sandbox-build dependencies.nix --check nix-sandbox-build check.nix -A nondeterministic # `100 + 4` means non-determinstic, see doc/manual/source/command-ref/status-build-failure.md -expectStderr 104 nix-sandbox-build check.nix -A nondeterministic --check -K > $TEST_ROOT/log -grepQuietInverse 'error: renaming' $TEST_ROOT/log -grepQuiet 'may not be deterministic' $TEST_ROOT/log +expectStderr 104 nix-sandbox-build check.nix -A nondeterministic --check -K > "$TEST_ROOT"/log +grepQuietInverse 'error: renaming' "$TEST_ROOT"/log +grepQuiet 'may not be deterministic' "$TEST_ROOT"/log # Test that sandboxed builds cannot write to /etc easily # `100` means build failure without extra info, see doc/manual/source/command-ref/status-build-failure.md @@ -59,7 +59,7 @@ testCert () { certFile=$3 # a string that can be the path to a cert file # `100` means build failure without extra info, see doc/manual/source/command-ref/status-build-failure.md [ "$mode" == fixed-output ] && ret=1 || ret=100 - expectStderr $ret nix-sandbox-build linux-sandbox-cert-test.nix --argstr mode "$mode" --option ssl-cert-file "$certFile" | + expectStderr "$ret" nix-sandbox-build linux-sandbox-cert-test.nix --argstr mode "$mode" --option ssl-cert-file "$certFile" | grepQuiet "CERT_${expectation}_IN_SANDBOX" } @@ -68,10 +68,10 @@ cert=$TEST_ROOT/some-cert-file.pem symlinkcert=$TEST_ROOT/symlink-cert-file.pem transitivesymlinkcert=$TEST_ROOT/transitive-symlink-cert-file.pem symlinkDir=$TEST_ROOT/symlink-dir -echo -n "CERT_CONTENT" > $cert -ln -s $cert $symlinkcert -ln -s $symlinkcert $transitivesymlinkcert -ln -s $TEST_ROOT $symlinkDir +echo -n "CERT_CONTENT" > "$cert" +ln -s "$cert" "$symlinkcert" +ln -s "$symlinkcert" "$transitivesymlinkcert" +ln -s "$TEST_ROOT" "$symlinkDir" # No cert in sandbox when not a fixed-output derivation testCert missing normal "$cert" diff --git a/tests/functional/logging.sh b/tests/functional/logging.sh index 83df9a45d..600fce43e 100755 --- a/tests/functional/logging.sh +++ b/tests/functional/logging.sh @@ -9,14 +9,14 @@ clearStore path=$(nix-build dependencies.nix --no-out-link) # Test nix-store -l. -[ "$(nix-store -l $path)" = FOO ] +[ "$(nix-store -l "$path")" = FOO ] # Test compressed logs. clearStore -rm -rf $NIX_LOG_DIR -(! nix-store -l $path) +rm -rf "$NIX_LOG_DIR" +(! nix-store -l "$path") nix-build dependencies.nix --no-out-link --compress-build-log -[ "$(nix-store -l $path)" = FOO ] +[ "$(nix-store -l "$path")" = FOO ] # test whether empty logs work fine with `nix log`. builder="$(realpath "$(mktemp)")" @@ -40,5 +40,5 @@ if [[ "$NIX_REMOTE" != "daemon" ]]; then nix build -vv --file dependencies.nix --no-link --json-log-path "$TEST_ROOT/log.json" 2>&1 | grepQuiet 'building.*dependencies-top.drv' jq < "$TEST_ROOT/log.json" grep '{"action":"start","fields":\[".*-dependencies-top.drv","",1,1\],"id":.*,"level":3,"parent":0' "$TEST_ROOT/log.json" >&2 - (( $(grep '{"action":"msg","level":5,"msg":"executing builder .*"}' "$TEST_ROOT/log.json" | wc -l) == 5 )) + (( $(grep -c '{"action":"msg","level":5,"msg":"executing builder .*"}' "$TEST_ROOT/log.json" ) == 5 )) fi diff --git a/tests/functional/meson.build b/tests/functional/meson.build index 368f60452..6f649c836 100644 --- a/tests/functional/meson.build +++ b/tests/functional/meson.build @@ -174,6 +174,7 @@ suites = [ 'extra-sandbox-profile.sh', 'help.sh', 'symlinks.sh', + 'external-builders.sh', ], 'workdir' : meson.current_source_dir(), }, diff --git a/tests/functional/misc.sh b/tests/functional/misc.sh index b94a5fc57..131b63323 100755 --- a/tests/functional/misc.sh +++ b/tests/functional/misc.sh @@ -14,6 +14,7 @@ source common.sh nix-env --version | grep -F "${_NIX_TEST_CLIENT_VERSION:-$version}" nix_env=$(type -P nix-env) +# shellcheck disable=SC2123 (PATH=""; ! $nix_env --help 2>&1 ) | grepQuiet -F "The 'man' command was not found, but it is needed for 'nix-env' and some other 'nix-*' commands' help text. Perhaps you could install the 'man' command?" # Usage errors. @@ -22,12 +23,12 @@ expect 1 nix-env -q --foo 2>&1 | grep "unknown flag" # Eval Errors. eval_arg_res=$(nix-instantiate --eval -E 'let a = {} // a; in a.foo' 2>&1 || true) -echo $eval_arg_res | grep "at «string»:1:15:" -echo $eval_arg_res | grep "infinite recursion encountered" +echo "$eval_arg_res" | grep "at «string»:1:15:" +echo "$eval_arg_res" | grep "infinite recursion encountered" eval_stdin_res=$(echo 'let a = {} // a; in a.foo' | nix-instantiate --eval -E - 2>&1 || true) -echo $eval_stdin_res | grep "at «stdin»:1:15:" -echo $eval_stdin_res | grep "infinite recursion encountered" +echo "$eval_stdin_res" | grep "at «stdin»:1:15:" +echo "$eval_stdin_res" | grep "infinite recursion encountered" # Attribute path errors expectStderr 1 nix-instantiate --eval -E '{}' -A '"x' | grepQuiet "missing closing quote in selection path" @@ -40,10 +41,10 @@ expectStderr 1 nix-instantiate --eval -E '[]' -A '1' | grepQuiet "out of range" # NOTE(cole-h): behavior is different depending on the order, which is why we test an unknown option # before and after the `'{}'`! out="$(expectStderr 0 nix-instantiate --option foobar baz --expr '{}')" -[[ "$(echo "$out" | grep foobar | wc -l)" = 1 ]] +[[ "$(echo "$out" | grep -c foobar )" = 1 ]] out="$(expectStderr 0 nix-instantiate '{}' --option foobar baz --expr )" -[[ "$(echo "$out" | grep foobar | wc -l)" = 1 ]] +[[ "$(echo "$out" | grep -c foobar )" = 1 ]] if [[ $(uname) = Linux && $(uname -m) = i686 ]]; then [[ $(nix config show system) = i686-linux ]] diff --git a/tests/functional/multiple-outputs.sh b/tests/functional/multiple-outputs.sh index c4e0be15e..f703fb02b 100755 --- a/tests/functional/multiple-outputs.sh +++ b/tests/functional/multiple-outputs.sh @@ -6,15 +6,17 @@ TODO_NixOS clearStoreIfPossible -rm -f $TEST_ROOT/result* +rm -f "$TEST_ROOT"/result* # Placeholder strings are opaque, so cannot do this check for floating # content-addressing derivations. -if [[ ! -n "${NIX_TESTS_CA_BY_DEFAULT:-}" ]]; then +if [[ -z "${NIX_TESTS_CA_BY_DEFAULT:-}" ]]; then # Test whether the output names match our expectations outPath=$(nix-instantiate multiple-outputs.nix --eval -A nameCheck.out.outPath) + # shellcheck disable=SC2016 [ "$(echo "$outPath" | sed -E 's_^".*/[^-/]*-([^/]*)"$_\1_')" = "multiple-outputs-a" ] outPath=$(nix-instantiate multiple-outputs.nix --eval -A nameCheck.dev.outPath) + # shellcheck disable=SC2016 [ "$(echo "$outPath" | sed -E 's_^".*/[^-/]*-([^/]*)"$_\1_')" = "multiple-outputs-a-dev" ] fi @@ -27,16 +29,17 @@ echo "evaluating c..." # outputs. drvPath=$(nix-instantiate multiple-outputs.nix -A c) #[ "$drvPath" = "$drvPath2" ] -grepQuiet 'multiple-outputs-a.drv",\["first","second"\]' $drvPath -grepQuiet 'multiple-outputs-b.drv",\["out"\]' $drvPath +grepQuiet 'multiple-outputs-a.drv",\["first","second"\]' "$drvPath" +grepQuiet 'multiple-outputs-b.drv",\["out"\]' "$drvPath" # While we're at it, test the ‘unsafeDiscardOutputDependency’ primop. outPath=$(nix-build multiple-outputs.nix -A d --no-out-link) -drvPath=$(cat $outPath/drv) +drvPath=$(cat "$outPath"/drv) if [[ -n "${NIX_TESTS_CA_BY_DEFAULT:-}" ]]; then - expectStderr 1 nix-store -q $drvPath | grepQuiet "Cannot use output path of floating content-addressing derivation until we know what it is (e.g. by building it)" + expectStderr 1 nix-store -q "$drvPath" | grepQuiet "Cannot use output path of floating content-addressing derivation until we know what it is (e.g. by building it)" else - outPath=$(nix-store -q $drvPath) + outPath=$(nix-store -q "$drvPath") + # shellcheck disable=SC2233 (! [ -e "$outPath" ]) fi @@ -48,34 +51,37 @@ echo "output path is $outPath" [ "$(cat "$outPath/file")" = "success" ] # Test nix-build on a derivation with multiple outputs. -outPath1=$(nix-build multiple-outputs.nix -A a -o $TEST_ROOT/result) -[ -e $TEST_ROOT/result-first ] -(! [ -e $TEST_ROOT/result-second ]) -nix-build multiple-outputs.nix -A a.all -o $TEST_ROOT/result -[ "$(cat $TEST_ROOT/result-first/file)" = "first" ] -[ "$(cat $TEST_ROOT/result-second/file)" = "second" ] -[ "$(cat $TEST_ROOT/result-second/link/file)" = "first" ] -hash1=$(nix-store -q --hash $TEST_ROOT/result-second) +outPath1=$(nix-build multiple-outputs.nix -A a -o "$TEST_ROOT"/result) +[ -e "$TEST_ROOT"/result-first ] +# shellcheck disable=SC2235 +(! [ -e "$TEST_ROOT"/result-second ]) +nix-build multiple-outputs.nix -A a.all -o "$TEST_ROOT"/result +[ "$(cat "$TEST_ROOT"/result-first/file)" = "first" ] +[ "$(cat "$TEST_ROOT"/result-second/file)" = "second" ] +[ "$(cat "$TEST_ROOT"/result-second/link/file)" = "first" ] +hash1=$(nix-store -q --hash "$TEST_ROOT"/result-second) -outPath2=$(nix-build $(nix-instantiate multiple-outputs.nix -A a) --no-out-link) -[[ $outPath1 = $outPath2 ]] +outPath2=$(nix-build "$(nix-instantiate multiple-outputs.nix -A a)" --no-out-link) +[[ $outPath1 = "$outPath2" ]] -outPath2=$(nix-build $(nix-instantiate multiple-outputs.nix -A a.first) --no-out-link) -[[ $outPath1 = $outPath2 ]] +outPath2=$(nix-build "$(nix-instantiate multiple-outputs.nix -A a.first)" --no-out-link) +[[ $outPath1 = "$outPath2" ]] -outPath2=$(nix-build $(nix-instantiate multiple-outputs.nix -A a.second) --no-out-link) -[[ $(cat $outPath2/file) = second ]] +outPath2=$(nix-build "$(nix-instantiate multiple-outputs.nix -A a.second)" --no-out-link) +[[ $(cat "$outPath2"/file) = second ]] +# FIXME: Fixing this shellcheck causes the test to fail. +# shellcheck disable=SC2046 [[ $(nix-build $(nix-instantiate multiple-outputs.nix -A a.all) --no-out-link | wc -l) -eq 2 ]] -if [[ ! -n "${NIX_TESTS_CA_BY_DEFAULT:-}" ]]; then +if [[ -z "${NIX_TESTS_CA_BY_DEFAULT:-}" ]]; then # Delete one of the outputs and rebuild it. This will cause a hash # rewrite. - env -u NIX_REMOTE nix store delete $TEST_ROOT/result-second --ignore-liveness - nix-build multiple-outputs.nix -A a.all -o $TEST_ROOT/result - [ "$(cat $TEST_ROOT/result-second/file)" = "second" ] - [ "$(cat $TEST_ROOT/result-second/link/file)" = "first" ] - hash2=$(nix-store -q --hash $TEST_ROOT/result-second) + env -u NIX_REMOTE nix store delete "$TEST_ROOT"/result-second --ignore-liveness + nix-build multiple-outputs.nix -A a.all -o "$TEST_ROOT"/result + [ "$(cat "$TEST_ROOT"/result-second/file)" = "second" ] + [ "$(cat "$TEST_ROOT"/result-second/link/file)" = "first" ] + hash2=$(nix-store -q --hash "$TEST_ROOT"/result-second) [ "$hash1" = "$hash2" ] fi @@ -92,15 +98,15 @@ fi # Do a GC. This should leave an empty store. echo "collecting garbage..." -rm $TEST_ROOT/result* +rm "$TEST_ROOT"/result* nix-store --gc --keep-derivations --keep-outputs nix-store --gc --print-roots -rm -rf $NIX_STORE_DIR/.links -rmdir $NIX_STORE_DIR +rm -rf "$NIX_STORE_DIR"/.links +rmdir "$NIX_STORE_DIR" # TODO inspect why this doesn't work with floating content-addressing # derivations. -if [[ ! -n "${NIX_TESTS_CA_BY_DEFAULT:-}" ]]; then +if [[ -z "${NIX_TESTS_CA_BY_DEFAULT:-}" ]]; then expect 1 nix build -f multiple-outputs.nix invalid-output-name-1 2>&1 | grep 'contains illegal character' expect 1 nix build -f multiple-outputs.nix invalid-output-name-2 2>&1 | grep 'contains illegal character' fi diff --git a/tests/functional/nars.sh b/tests/functional/nars.sh index dd90345a6..a52c257bc 100755 --- a/tests/functional/nars.sh +++ b/tests/functional/nars.sh @@ -131,31 +131,3 @@ else fi rm -f "$TEST_ROOT/unicode-*" - -# Unpacking a NAR with a NUL character in a file name should fail. -rm -rf "$TEST_ROOT/out" -expectStderr 1 nix-store --restore "$TEST_ROOT/out" < nul-character.nar | grepQuiet "NAR contains invalid file name 'f" - -# Likewise for a '.' filename. -rm -rf "$TEST_ROOT/out" -expectStderr 1 nix-store --restore "$TEST_ROOT/out" < dot.nar | grepQuiet "NAR contains invalid file name '.'" - -# Likewise for a '..' filename. -rm -rf "$TEST_ROOT/out" -expectStderr 1 nix-store --restore "$TEST_ROOT/out" < dotdot.nar | grepQuiet "NAR contains invalid file name '..'" - -# Likewise for a filename containing a slash. -rm -rf "$TEST_ROOT/out" -expectStderr 1 nix-store --restore "$TEST_ROOT/out" < slash.nar | grepQuiet "NAR contains invalid file name 'x/y'" - -# Likewise for an empty filename. -rm -rf "$TEST_ROOT/out" -expectStderr 1 nix-store --restore "$TEST_ROOT/out" < empty.nar | grepQuiet "NAR contains invalid file name ''" - -# Test that the 'executable' field cannot come before the 'contents' field. -rm -rf "$TEST_ROOT/out" -expectStderr 1 nix-store --restore "$TEST_ROOT/out" < executable-after-contents.nar | grepQuiet "expected tag ')', got 'executable'" - -# Test that the 'name' field cannot come before the 'node' field in a directory entry. -rm -rf "$TEST_ROOT/out" -expectStderr 1 nix-store --restore "$TEST_ROOT/out" < name-after-node.nar | grepQuiet "expected tag 'name'" diff --git a/tests/functional/nested-sandboxing.sh b/tests/functional/nested-sandboxing.sh index 4d4cf125e..8788c7d90 100755 --- a/tests/functional/nested-sandboxing.sh +++ b/tests/functional/nested-sandboxing.sh @@ -11,7 +11,7 @@ requiresUnprivilegedUserNamespaces start="$TEST_ROOT/start" mkdir -p "$start" -cp -r common common.sh ${config_nix} ./nested-sandboxing "$start" +cp -r common common.sh "${config_nix}" ./nested-sandboxing "$start" cp "${_NIX_TEST_BUILD_DIR}/common/subst-vars.sh" "$start/common" # N.B. redefine _NIX_TEST_SOURCE_DIR="$start" @@ -20,6 +20,7 @@ cd "$start" source ./nested-sandboxing/command.sh +# shellcheck disable=SC2016 expectStderr 100 runNixBuild badStoreUrl 2 | grepQuiet '`sandbox-build-dir` must not contain' runNixBuild goodStoreUrl 5 diff --git a/tests/functional/nested-sandboxing/command.sh b/tests/functional/nested-sandboxing/command.sh index 7c04e82f5..c01133d93 100644 --- a/tests/functional/nested-sandboxing/command.sh +++ b/tests/functional/nested-sandboxing/command.sh @@ -1,17 +1,20 @@ +# shellcheck shell=bash set -eu -o pipefail -export NIX_BIN_DIR=$(dirname $(type -p nix)) +NIX_BIN_DIR=$(dirname "$(type -p nix)") +export NIX_BIN_DIR # TODO Get Nix and its closure more flexibly -export EXTRA_SANDBOX="/nix/store $(dirname $NIX_BIN_DIR)" +EXTRA_SANDBOX="/nix/store $(dirname "$NIX_BIN_DIR")" +export EXTRA_SANDBOX badStoreUrl () { local altitude=$1 - echo $TEST_ROOT/store-$altitude + echo "$TEST_ROOT"/store-"$altitude" } goodStoreUrl () { local altitude=$1 - echo $("badStoreUrl" "$altitude")?store=/foo-$altitude + echo "$("badStoreUrl" "$altitude")"?store=/foo-"$altitude" } # The non-standard sandbox-build-dir helps ensure that we get the same behavior diff --git a/tests/functional/nix-build.sh b/tests/functional/nix-build.sh index 091e429e0..33973c628 100755 --- a/tests/functional/nix-build.sh +++ b/tests/functional/nix-build.sh @@ -6,30 +6,30 @@ TODO_NixOS clearStoreIfPossible -outPath=$(nix-build dependencies.nix -o $TEST_ROOT/result) -test "$(cat $TEST_ROOT/result/foobar)" = FOOBAR +outPath=$(nix-build dependencies.nix -o "$TEST_ROOT"/result) +test "$(cat "$TEST_ROOT"/result/foobar)" = FOOBAR # The result should be retained by a GC. echo A -target=$(readLink $TEST_ROOT/result) +target=$(readLink "$TEST_ROOT"/result) echo B -echo target is $target +echo target is "$target" nix-store --gc -test -e $target/foobar +test -e "$target"/foobar # But now it should be gone. -rm $TEST_ROOT/result +rm "$TEST_ROOT"/result nix-store --gc -if test -e $target/foobar; then false; fi +if test -e "$target"/foobar; then false; fi -outPath2=$(nix-build $(nix-instantiate dependencies.nix) --no-out-link) -[[ $outPath = $outPath2 ]] +outPath2=$(nix-build "$(nix-instantiate dependencies.nix)" --no-out-link) +[[ $outPath = "$outPath2" ]] -outPath2=$(nix-build $(nix-instantiate dependencies.nix)!out --no-out-link) -[[ $outPath = $outPath2 ]] +outPath2=$(nix-build "$(nix-instantiate dependencies.nix)"!out --no-out-link) +[[ $outPath = "$outPath2" ]] -outPath2=$(nix-store -r $(nix-instantiate --add-root $TEST_ROOT/indirect dependencies.nix)!out) -[[ $outPath = $outPath2 ]] +outPath2=$(nix-store -r "$(nix-instantiate --add-root "$TEST_ROOT"/indirect dependencies.nix)"!out) +[[ $outPath = "$outPath2" ]] # The order of the paths on stdout must correspond to the -A options # https://github.com/NixOS/nix/issues/4197 @@ -39,9 +39,11 @@ input1="$(nix-build nix-build-examples.nix -A input1 --no-out-link)" input2="$(nix-build nix-build-examples.nix -A input2 --no-out-link)" body="$(nix-build nix-build-examples.nix -A body --no-out-link)" +# shellcheck disable=SC2046,SC2005 outPathsA="$(echo $(nix-build nix-build-examples.nix -A input0 -A input1 -A input2 -A body --no-out-link))" [[ "$outPathsA" = "$input0 $input1 $input2 $body" ]] # test a different ordering to make sure it fails, not just in 23 out of 24 permutations +# shellcheck disable=SC2046,SC2005 outPathsB="$(echo $(nix-build nix-build-examples.nix -A body -A input1 -A input2 -A input0 --no-out-link))" [[ "$outPathsB" = "$body $input1 $input2 $input0" ]] diff --git a/tests/functional/nix-channel.sh b/tests/functional/nix-channel.sh index d0b772850..f23d4bbde 100755 --- a/tests/functional/nix-channel.sh +++ b/tests/functional/nix-channel.sh @@ -4,7 +4,7 @@ source common.sh clearProfiles -rm -f $TEST_HOME/.nix-channels $TEST_HOME/.nix-profile +rm -f "$TEST_HOME"/.nix-channels "$TEST_HOME"/.nix-profile # Test add/list/remove. nix-channel --add http://foo/bar xyzzy @@ -12,8 +12,8 @@ nix-channel --list | grepQuiet http://foo/bar nix-channel --remove xyzzy [[ $(nix-channel --list-generations | wc -l) == 1 ]] -[ -e $TEST_HOME/.nix-channels ] -[ "$(cat $TEST_HOME/.nix-channels)" = '' ] +[ -e "$TEST_HOME"/.nix-channels ] +[ "$(cat "$TEST_HOME"/.nix-channels)" = '' ] # Test the XDG Base Directories support @@ -25,47 +25,47 @@ nix-channel --remove xyzzy unset NIX_CONFIG -[ -e $TEST_HOME/.local/state/nix/channels ] -[ "$(cat $TEST_HOME/.local/state/nix/channels)" = '' ] +[ -e "$TEST_HOME"/.local/state/nix/channels ] +[ "$(cat "$TEST_HOME"/.local/state/nix/channels)" = '' ] # Create a channel. -rm -rf $TEST_ROOT/foo -mkdir -p $TEST_ROOT/foo +rm -rf "$TEST_ROOT"/foo +mkdir -p "$TEST_ROOT"/foo drvPath=$(nix-instantiate dependencies.nix) -nix copy --to file://$TEST_ROOT/foo?compression="bzip2" $(nix-store -r "$drvPath") -rm -rf $TEST_ROOT/nixexprs -mkdir -p $TEST_ROOT/nixexprs -cp "${config_nix}" dependencies.nix dependencies.builder*.sh $TEST_ROOT/nixexprs/ -ln -s dependencies.nix $TEST_ROOT/nixexprs/default.nix -(cd $TEST_ROOT && tar cvf - nixexprs) | bzip2 > $TEST_ROOT/foo/nixexprs.tar.bz2 +nix copy --to file://"$TEST_ROOT"/foo?compression="bzip2" "$(nix-store -r "$drvPath")" +rm -rf "$TEST_ROOT"/nixexprs +mkdir -p "$TEST_ROOT"/nixexprs +cp "${config_nix}" dependencies.nix dependencies.builder*.sh "$TEST_ROOT"/nixexprs/ +ln -s dependencies.nix "$TEST_ROOT"/nixexprs/default.nix +(cd "$TEST_ROOT" && tar cvf - nixexprs) | bzip2 > "$TEST_ROOT"/foo/nixexprs.tar.bz2 # Test the update action. -nix-channel --add file://$TEST_ROOT/foo +nix-channel --add file://"$TEST_ROOT"/foo nix-channel --update [[ $(nix-channel --list-generations | wc -l) == 2 ]] # Do a query. -nix-env -qa \* --meta --xml --out-path > $TEST_ROOT/meta.xml -grepQuiet 'meta.*description.*Random test package' $TEST_ROOT/meta.xml -grepQuiet 'item.*attrPath="foo".*name="dependencies-top"' $TEST_ROOT/meta.xml +nix-env -qa \* --meta --xml --out-path > "$TEST_ROOT"/meta.xml +grepQuiet 'meta.*description.*Random test package' "$TEST_ROOT"/meta.xml +grepQuiet 'item.*attrPath="foo".*name="dependencies-top"' "$TEST_ROOT"/meta.xml # Do an install. nix-env -i dependencies-top -[ -e $TEST_HOME/.nix-profile/foobar ] +[ -e "$TEST_HOME"/.nix-profile/foobar ] # Test updating from a tarball -nix-channel --add file://$TEST_ROOT/foo/nixexprs.tar.bz2 bar +nix-channel --add file://"$TEST_ROOT"/foo/nixexprs.tar.bz2 bar nix-channel --update # Do a query. -nix-env -qa \* --meta --xml --out-path > $TEST_ROOT/meta.xml -grepQuiet 'meta.*description.*Random test package' $TEST_ROOT/meta.xml -grepQuiet 'item.*attrPath="bar".*name="dependencies-top"' $TEST_ROOT/meta.xml -grepQuiet 'item.*attrPath="foo".*name="dependencies-top"' $TEST_ROOT/meta.xml +nix-env -qa \* --meta --xml --out-path > "$TEST_ROOT"/meta.xml +grepQuiet 'meta.*description.*Random test package' "$TEST_ROOT"/meta.xml +grepQuiet 'item.*attrPath="bar".*name="dependencies-top"' "$TEST_ROOT"/meta.xml +grepQuiet 'item.*attrPath="foo".*name="dependencies-top"' "$TEST_ROOT"/meta.xml # Do an install. nix-env -i dependencies-top -[ -e $TEST_HOME/.nix-profile/foobar ] +[ -e "$TEST_HOME"/.nix-profile/foobar ] # Test evaluation through a channel symlink (#9882). drvPath=$(nix-instantiate '') @@ -73,9 +73,9 @@ drvPath=$(nix-instantiate '') # Add a test for the special case behaviour of 'nixpkgs' in the # channels for root (see EvalSettings::getDefaultNixPath()). if ! isTestOnNixOS; then - nix-channel --add file://$TEST_ROOT/foo nixpkgs + nix-channel --add file://"$TEST_ROOT"/foo nixpkgs nix-channel --update - mv $TEST_HOME/.local/state/nix/profiles $TEST_ROOT/var/nix/profiles/per-user/root + mv "$TEST_HOME"/.local/state/nix/profiles "$TEST_ROOT"/var/nix/profiles/per-user/root drvPath2=$(nix-instantiate '') [[ "$drvPath" = "$drvPath2" ]] fi diff --git a/tests/functional/nix-collect-garbage-d.sh b/tests/functional/nix-collect-garbage-d.sh index 119efe629..44de90711 100755 --- a/tests/functional/nix-collect-garbage-d.sh +++ b/tests/functional/nix-collect-garbage-d.sh @@ -29,7 +29,7 @@ testCollectGarbageD # Run the same test, but forcing the profiles an arbitrary location. rm ~/.nix-profile -ln -s $TEST_ROOT/blah ~/.nix-profile +ln -s "$TEST_ROOT"/blah ~/.nix-profile testCollectGarbageD # Run the same test, but forcing the profiles at their legacy location under diff --git a/tests/functional/nix-copy-ssh-common.sh b/tests/functional/nix-copy-ssh-common.sh index 5eea9612d..8154585af 100644 --- a/tests/functional/nix-copy-ssh-common.sh +++ b/tests/functional/nix-copy-ssh-common.sh @@ -1,3 +1,4 @@ +# shellcheck shell=bash proto=$1 shift (( $# == 0 )) @@ -7,7 +8,7 @@ TODO_NixOS clearStore clearCache -mkdir -p $TEST_ROOT/stores +mkdir -p "$TEST_ROOT"/stores # Create path to copy back and forth outPath=$(nix-build --no-out-link dependencies.nix) @@ -37,17 +38,17 @@ if [[ "$proto" == "ssh-ng" ]]; then args+=(--no-check-sigs) fi -[ ! -f ${remoteRoot}${outPath}/foobar ] -nix copy "${args[@]}" --to "$remoteStore" $outPath -[ -f ${remoteRoot}${outPath}/foobar ] +[ ! -f "${remoteRoot}""${outPath}"/foobar ] +nix copy "${args[@]}" --to "$remoteStore" "$outPath" +[ -f "${remoteRoot}""${outPath}"/foobar ] # Copy back from store clearStore -[ ! -f $outPath/foobar ] -nix copy --no-check-sigs --from "$remoteStore" $outPath -[ -f $outPath/foobar ] +[ ! -f "$outPath"/foobar ] +nix copy --no-check-sigs --from "$remoteStore" "$outPath" +[ -f "$outPath"/foobar ] # Check --substitute-on-destination, avoid corrupted store diff --git a/tests/functional/nix-copy-ssh-ng.sh b/tests/functional/nix-copy-ssh-ng.sh index 41958c2c3..f74f3bb86 100755 --- a/tests/functional/nix-copy-ssh-ng.sh +++ b/tests/functional/nix-copy-ssh-ng.sh @@ -14,5 +14,5 @@ outPath=$(nix-build --no-out-link dependencies.nix) nix store info --store "$remoteStore" # Regression test for https://github.com/NixOS/nix/issues/6253 -nix copy --to "$remoteStore" $outPath --no-check-sigs & -nix copy --to "$remoteStore" $outPath --no-check-sigs +nix copy --to "$remoteStore" "$outPath" --no-check-sigs & +nix copy --to "$remoteStore" "$outPath" --no-check-sigs diff --git a/tests/functional/nix-profile.sh b/tests/functional/nix-profile.sh index b1cfef6b0..922162d4b 100755 --- a/tests/functional/nix-profile.sh +++ b/tests/functional/nix-profile.sh @@ -12,9 +12,10 @@ restartDaemon # Make a flake. flake1Dir=$TEST_ROOT/flake1 -mkdir -p $flake1Dir +mkdir -p "$flake1Dir" -cat > $flake1Dir/flake.nix < "$flake1Dir"/flake.nix < $flake1Dir/flake.nix < $flake1Dir/who -printf 1.0 > $flake1Dir/version -printf false > $flake1Dir/ca.nix +printf World > "$flake1Dir"/who +printf 1.0 > "$flake1Dir"/version +printf false > "$flake1Dir"/ca.nix -cp "${config_nix}" $flake1Dir/ +cp "${config_nix}" "$flake1Dir"/ # Test upgrading from nix-env. nix-env -f ./user-envs.nix -i foo-1.0 nix profile list | grep -A2 'Name:.*foo' | grep 'Store paths:.*foo-1.0' -nix profile add $flake1Dir -L +nix profile add "$flake1Dir" -L nix profile list | grep -A4 'Name:.*flake1' | grep 'Locked flake URL:.*narHash' -[[ $($TEST_HOME/.nix-profile/bin/hello) = "Hello World" ]] -[ -e $TEST_HOME/.nix-profile/share/man ] -(! [ -e $TEST_HOME/.nix-profile/include ]) +[[ $("$TEST_HOME"/.nix-profile/bin/hello) = "Hello World" ]] +[ -e "$TEST_HOME"/.nix-profile/share/man ] +# shellcheck disable=SC2235 +(! [ -e "$TEST_HOME"/.nix-profile/include ]) nix profile history nix profile history | grep "packages.$system.default: ∅ -> 1.0" nix profile diff-closures | grep 'env-manifest.nix: ε → ∅' @@ -64,32 +66,32 @@ nix profile diff-closures | grep 'env-manifest.nix: ε → ∅' # Test XDG Base Directories support export NIX_CONFIG="use-xdg-base-directories = true" nix profile remove flake1 2>&1 | grep 'removed 1 packages' -nix profile add $flake1Dir -[[ $($TEST_HOME/.local/state/nix/profile/bin/hello) = "Hello World" ]] +nix profile add "$flake1Dir" +[[ $("$TEST_HOME"/.local/state/nix/profile/bin/hello) = "Hello World" ]] unset NIX_CONFIG # Test conflicting package add. -nix profile add $flake1Dir 2>&1 | grep "warning: 'flake1' is already added" +nix profile add "$flake1Dir" 2>&1 | grep "warning: 'flake1' is already added" # Test upgrading a package. -printf NixOS > $flake1Dir/who -printf 2.0 > $flake1Dir/version +printf NixOS > "$flake1Dir"/who +printf 2.0 > "$flake1Dir"/version nix profile upgrade flake1 -[[ $($TEST_HOME/.nix-profile/bin/hello) = "Hello NixOS" ]] +[[ $("$TEST_HOME"/.nix-profile/bin/hello) = "Hello NixOS" ]] nix profile history | grep "packages.$system.default: 1.0, 1.0-man -> 2.0, 2.0-man" # Test upgrading package using regular expression. -printf 2.1 > $flake1Dir/version +printf 2.1 > "$flake1Dir"/version nix profile upgrade --regex '.*' -[[ $(readlink $TEST_HOME/.nix-profile/bin/hello) =~ .*-profile-test-2\.1/bin/hello ]] +[[ $(readlink "$TEST_HOME"/.nix-profile/bin/hello) =~ .*-profile-test-2\.1/bin/hello ]] nix profile rollback # Test upgrading all packages -printf 2.2 > $flake1Dir/version +printf 2.2 > "$flake1Dir"/version nix profile upgrade --all -[[ $(readlink $TEST_HOME/.nix-profile/bin/hello) =~ .*-profile-test-2\.2/bin/hello ]] +[[ $(readlink "$TEST_HOME"/.nix-profile/bin/hello) =~ .*-profile-test-2\.2/bin/hello ]] nix profile rollback -printf 1.0 > $flake1Dir/version +printf 1.0 > "$flake1Dir"/version # Test --all exclusivity. assertStderr nix --offline profile upgrade --all foo << EOF @@ -117,98 +119,102 @@ nix profile rollback nix profile diff-closures # Test rollback. -printf World > $flake1Dir/who +printf World > "$flake1Dir"/who nix profile upgrade flake1 -printf NixOS > $flake1Dir/who +printf NixOS > "$flake1Dir"/who nix profile upgrade flake1 nix profile rollback -[[ $($TEST_HOME/.nix-profile/bin/hello) = "Hello World" ]] +[[ $("$TEST_HOME"/.nix-profile/bin/hello) = "Hello World" ]] # Test uninstall. -[ -e $TEST_HOME/.nix-profile/bin/foo ] +[ -e "$TEST_HOME"/.nix-profile/bin/foo ] +# shellcheck disable=SC2235 nix profile remove foo 2>&1 | grep 'removed 1 packages' -(! [ -e $TEST_HOME/.nix-profile/bin/foo ]) +# shellcheck disable=SC2235 +(! [ -e "$TEST_HOME"/.nix-profile/bin/foo ]) nix profile history | grep 'foo: 1.0 -> ∅' nix profile diff-closures | grep 'Version 3 -> 4' # Test installing a non-flake package. nix profile add --file ./simple.nix '' -[[ $(cat $TEST_HOME/.nix-profile/hello) = "Hello World!" ]] +[[ $(cat "$TEST_HOME"/.nix-profile/hello) = "Hello World!" ]] nix profile remove simple 2>&1 | grep 'removed 1 packages' -nix profile add $(nix-build --no-out-link ./simple.nix) -[[ $(cat $TEST_HOME/.nix-profile/hello) = "Hello World!" ]] +nix profile add "$(nix-build --no-out-link ./simple.nix)" +[[ $(cat "$TEST_HOME"/.nix-profile/hello) = "Hello World!" ]] # Test packages with same name from different sources -mkdir $TEST_ROOT/simple-too -cp ./simple.nix "${config_nix}" simple.builder.sh $TEST_ROOT/simple-too -nix profile add --file $TEST_ROOT/simple-too/simple.nix '' +mkdir "$TEST_ROOT"/simple-too +cp ./simple.nix "${config_nix}" simple.builder.sh "$TEST_ROOT"/simple-too +nix profile add --file "$TEST_ROOT"/simple-too/simple.nix '' nix profile list | grep -A4 'Name:.*simple' | grep 'Name:.*simple-1' nix profile remove simple 2>&1 | grep 'removed 1 packages' nix profile remove simple-1 2>&1 | grep 'removed 1 packages' # Test wipe-history. nix profile wipe-history -[[ $(nix profile history | grep Version | wc -l) -eq 1 ]] +[[ $(nix profile history | grep -c Version) -eq 1 ]] # Test upgrade to CA package. -printf true > $flake1Dir/ca.nix -printf 3.0 > $flake1Dir/version +printf true > "$flake1Dir"/ca.nix +printf 3.0 > "$flake1Dir"/version nix profile upgrade flake1 nix profile history | grep "packages.$system.default: 1.0, 1.0-man -> 3.0, 3.0-man" # Test new install of CA package. nix profile remove flake1 2>&1 | grep 'removed 1 packages' -printf 4.0 > $flake1Dir/version -printf Utrecht > $flake1Dir/who -nix profile add $flake1Dir -[[ $($TEST_HOME/.nix-profile/bin/hello) = "Hello Utrecht" ]] -[[ $(nix path-info --json $(realpath $TEST_HOME/.nix-profile/bin/hello) | jq -r .[].ca) =~ fixed:r:sha256: ]] +printf 4.0 > "$flake1Dir"/version +printf Utrecht > "$flake1Dir"/who +nix profile add "$flake1Dir" +[[ $("$TEST_HOME"/.nix-profile/bin/hello) = "Hello Utrecht" ]] +[[ $(nix path-info --json "$(realpath "$TEST_HOME"/.nix-profile/bin/hello)" | jq -r .[].ca) =~ fixed:r:sha256: ]] # Override the outputs. nix profile remove simple flake1 nix profile add "$flake1Dir^*" -[[ $($TEST_HOME/.nix-profile/bin/hello) = "Hello Utrecht" ]] -[ -e $TEST_HOME/.nix-profile/share/man ] -[ -e $TEST_HOME/.nix-profile/include ] +[[ $("$TEST_HOME"/.nix-profile/bin/hello) = "Hello Utrecht" ]] +[ -e "$TEST_HOME"/.nix-profile/share/man ] +[ -e "$TEST_HOME"/.nix-profile/include ] -printf Nix > $flake1Dir/who +printf Nix > "$flake1Dir"/who nix profile list nix profile upgrade flake1 -[[ $($TEST_HOME/.nix-profile/bin/hello) = "Hello Nix" ]] -[ -e $TEST_HOME/.nix-profile/share/man ] -[ -e $TEST_HOME/.nix-profile/include ] +[[ $("$TEST_HOME"/.nix-profile/bin/hello) = "Hello Nix" ]] +[ -e "$TEST_HOME"/.nix-profile/share/man ] +[ -e "$TEST_HOME"/.nix-profile/include ] nix profile remove flake1 2>&1 | grep 'removed 1 packages' nix profile add "$flake1Dir^man" -(! [ -e $TEST_HOME/.nix-profile/bin/hello ]) -[ -e $TEST_HOME/.nix-profile/share/man ] -(! [ -e $TEST_HOME/.nix-profile/include ]) +# shellcheck disable=SC2235 +(! [ -e "$TEST_HOME"/.nix-profile/bin/hello ]) +[ -e "$TEST_HOME"/.nix-profile/share/man ] +# shellcheck disable=SC2235 +(! [ -e "$TEST_HOME"/.nix-profile/include ]) # test priority nix profile remove flake1 2>&1 | grep 'removed 1 packages' # Make another flake. flake2Dir=$TEST_ROOT/flake2 -printf World > $flake1Dir/who -cp -r $flake1Dir $flake2Dir -printf World2 > $flake2Dir/who +printf World > "$flake1Dir"/who +cp -r "$flake1Dir" "$flake2Dir" +printf World2 > "$flake2Dir"/who -nix profile add $flake1Dir -[[ $($TEST_HOME/.nix-profile/bin/hello) = "Hello World" ]] -expect 1 nix profile add $flake2Dir +nix profile add "$flake1Dir" +[[ $("$TEST_HOME"/.nix-profile/bin/hello) = "Hello World" ]] +expect 1 nix profile add "$flake2Dir" diff -u <( - nix --offline profile install $flake2Dir 2>&1 1> /dev/null \ + nix --offline profile install "$flake2Dir" 2>&1 1> /dev/null \ | grep -vE "^warning: " \ | grep -vE "^error \(ignored\): " \ || true ) <(cat << EOF error: An existing package already provides the following file: - $(nix build --no-link --print-out-paths ${flake1Dir}"#default.out")/bin/hello + $(nix build --no-link --print-out-paths "${flake1Dir}""#default.out")/bin/hello This is the conflicting file from the new package: - $(nix build --no-link --print-out-paths ${flake2Dir}"#default.out")/bin/hello + $(nix build --no-link --print-out-paths "${flake2Dir}""#default.out")/bin/hello To remove the existing package: @@ -225,11 +231,11 @@ error: An existing package already provides the following file: nix profile add path:${flake2Dir}#packages.${system}.default --priority 6 EOF ) -[[ $($TEST_HOME/.nix-profile/bin/hello) = "Hello World" ]] -nix profile add $flake2Dir --priority 100 -[[ $($TEST_HOME/.nix-profile/bin/hello) = "Hello World" ]] -nix profile add $flake2Dir --priority 0 -[[ $($TEST_HOME/.nix-profile/bin/hello) = "Hello World2" ]] +[[ $("$TEST_HOME"/.nix-profile/bin/hello) = "Hello World" ]] +nix profile add "$flake2Dir" --priority 100 +[[ $("$TEST_HOME"/.nix-profile/bin/hello) = "Hello World" ]] +nix profile add "$flake2Dir" --priority 0 +[[ $("$TEST_HOME"/.nix-profile/bin/hello) = "Hello World2" ]] # nix profile add $flake1Dir --priority 100 # [[ $($TEST_HOME/.nix-profile/bin/hello) = "Hello World" ]] @@ -237,14 +243,15 @@ nix profile add $flake2Dir --priority 0 # flake references. # Regression test for https://github.com/NixOS/nix/issues/8284 clearProfiles -nix profile add $(nix build $flake1Dir --no-link --print-out-paths) +# shellcheck disable=SC2046 +nix profile add $(nix build "$flake1Dir" --no-link --print-out-paths) expect 1 nix profile add --impure --expr "(builtins.getFlake ''$flake2Dir'').packages.$system.default" # Test upgrading from profile version 2. clearProfiles -mkdir -p $TEST_ROOT/import-profile -outPath=$(nix build --no-link --print-out-paths $flake1Dir/flake.nix^out) -printf '{ "version": 2, "elements": [ { "active": true, "attrPath": "legacyPackages.x86_64-linux.hello", "originalUrl": "flake:nixpkgs", "outputs": null, "priority": 5, "storePaths": [ "%s" ], "url": "github:NixOS/nixpkgs/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" } ] }' "$outPath" > $TEST_ROOT/import-profile/manifest.json -nix build --profile $TEST_HOME/.nix-profile $(nix store add-path $TEST_ROOT/import-profile) --no-link +mkdir -p "$TEST_ROOT"/import-profile +outPath=$(nix build --no-link --print-out-paths "$flake1Dir"/flake.nix^out) +printf '{ "version": 2, "elements": [ { "active": true, "attrPath": "legacyPackages.x86_64-linux.hello", "originalUrl": "flake:nixpkgs", "outputs": null, "priority": 5, "storePaths": [ "%s" ], "url": "github:NixOS/nixpkgs/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" } ] }' "$outPath" > "$TEST_ROOT"/import-profile/manifest.json +nix build --profile "$TEST_HOME"/.nix-profile "$(nix store add-path "$TEST_ROOT"/import-profile)" --no-link nix profile list | grep -A4 'Name:.*hello' | grep "Store paths:.*$outPath" nix profile remove hello 2>&1 | grep 'removed 1 packages, kept 0 packages' diff --git a/tests/functional/nix-shell.sh b/tests/functional/nix-shell.sh index bc49333b5..cf650e2c3 100755 --- a/tests/functional/nix-shell.sh +++ b/tests/functional/nix-shell.sh @@ -16,16 +16,19 @@ export NIX_PATH=nixpkgs="$shellDotNix" export IMPURE_VAR=foo export SELECTED_IMPURE_VAR=baz +# shellcheck disable=SC2016 output=$(nix-shell --pure "$shellDotNix" -A shellDrv --run \ 'echo "$IMPURE_VAR - $VAR_FROM_STDENV_SETUP - $VAR_FROM_NIX - $TEST_inNixShell"') [ "$output" = " - foo - bar - true" ] +# shellcheck disable=SC2016 output=$(nix-shell --pure "$shellDotNix" -A shellDrv --option nix-shell-always-looks-for-shell-nix false --run \ 'echo "$IMPURE_VAR - $VAR_FROM_STDENV_SETUP - $VAR_FROM_NIX - $TEST_inNixShell"') [ "$output" = " - foo - bar - true" ] # Test --keep +# shellcheck disable=SC2016 output=$(nix-shell --pure --keep SELECTED_IMPURE_VAR "$shellDotNix" -A shellDrv --run \ 'echo "$IMPURE_VAR - $VAR_FROM_STDENV_SETUP - $VAR_FROM_NIX - $SELECTED_IMPURE_VAR"') @@ -34,6 +37,7 @@ output=$(nix-shell --pure --keep SELECTED_IMPURE_VAR "$shellDotNix" -A shellDrv # test NIX_BUILD_TOP testTmpDir=$(pwd)/nix-shell mkdir -p "$testTmpDir" +# shellcheck disable=SC2016 output=$(TMPDIR="$testTmpDir" nix-shell --pure "$shellDotNix" -A shellDrv --run 'echo $NIX_BUILD_TOP') [[ "$output" =~ ${testTmpDir}.* ]] || { echo "expected $output =~ ${testTmpDir}.*" >&2 @@ -41,105 +45,111 @@ output=$(TMPDIR="$testTmpDir" nix-shell --pure "$shellDotNix" -A shellDrv --run } # Test nix-shell on a .drv -[[ $(nix-shell --pure $(nix-instantiate "$shellDotNix" -A shellDrv) --run \ +# shellcheck disable=SC2016 +[[ $(nix-shell --pure "$(nix-instantiate "$shellDotNix" -A shellDrv)" --run \ 'echo "$IMPURE_VAR - $VAR_FROM_STDENV_SETUP - $VAR_FROM_NIX - $TEST_inNixShell"') = " - foo - bar - false" ]] - -[[ $(nix-shell --pure $(nix-instantiate "$shellDotNix" -A shellDrv) --run \ +# shellcheck disable=SC2016 +[[ $(nix-shell --pure "$(nix-instantiate "$shellDotNix" -A shellDrv)" --run \ 'echo "$IMPURE_VAR - $VAR_FROM_STDENV_SETUP - $VAR_FROM_NIX - $TEST_inNixShell"') = " - foo - bar - false" ]] # Test nix-shell on a .drv symlink # Legacy: absolute path and .drv extension required -nix-instantiate "$shellDotNix" -A shellDrv --add-root $TEST_ROOT/shell.drv -[[ $(nix-shell --pure $TEST_ROOT/shell.drv --run \ +nix-instantiate "$shellDotNix" -A shellDrv --add-root "$TEST_ROOT"/shell.drv +# shellcheck disable=SC2016 +[[ $(nix-shell --pure "$TEST_ROOT"/shell.drv --run \ 'echo "$IMPURE_VAR - $VAR_FROM_STDENV_SETUP - $VAR_FROM_NIX"') = " - foo - bar" ]] # New behaviour: just needs to resolve to a derivation in the store -nix-instantiate "$shellDotNix" -A shellDrv --add-root $TEST_ROOT/shell -[[ $(nix-shell --pure $TEST_ROOT/shell --run \ +nix-instantiate "$shellDotNix" -A shellDrv --add-root "$TEST_ROOT"/shell +# shellcheck disable=SC2016 +[[ $(nix-shell --pure "$TEST_ROOT"/shell --run \ 'echo "$IMPURE_VAR - $VAR_FROM_STDENV_SETUP - $VAR_FROM_NIX"') = " - foo - bar" ]] # Test nix-shell -p +# shellcheck disable=SC2016 output=$(NIX_PATH=nixpkgs="$shellDotNix" nix-shell --pure -p foo bar --run 'echo "$(foo) $(bar)"') [ "$output" = "foo bar" ] # Test nix-shell -p --arg x y +# shellcheck disable=SC2016 output=$(NIX_PATH=nixpkgs="$shellDotNix" nix-shell --pure -p foo --argstr fooContents baz --run 'echo "$(foo)"') [ "$output" = "baz" ] # Test nix-shell shebang mode -sed -e "s|@ENV_PROG@|$(type -P env)|" shell.shebang.sh > $TEST_ROOT/shell.shebang.sh -chmod a+rx $TEST_ROOT/shell.shebang.sh +sed -e "s|@ENV_PROG@|$(type -P env)|" shell.shebang.sh > "$TEST_ROOT"/shell.shebang.sh +chmod a+rx "$TEST_ROOT"/shell.shebang.sh -output=$($TEST_ROOT/shell.shebang.sh abc def) +output=$("$TEST_ROOT"/shell.shebang.sh abc def) [ "$output" = "foo bar abc def" ] # Test nix-shell shebang mode with an alternate working directory -sed -e "s|@ENV_PROG@|$(type -P env)|" shell.shebang.expr > $TEST_ROOT/shell.shebang.expr -chmod a+rx $TEST_ROOT/shell.shebang.expr +sed -e "s|@ENV_PROG@|$(type -P env)|" shell.shebang.expr > "$TEST_ROOT"/shell.shebang.expr +chmod a+rx "$TEST_ROOT"/shell.shebang.expr # Should fail due to expressions using relative path -! $TEST_ROOT/shell.shebang.expr bar -cp shell.nix "${config_nix}" $TEST_ROOT + "$TEST_ROOT"/shell.shebang.expr bar && exit 1 +cp shell.nix "${config_nix}" "$TEST_ROOT" # Should succeed echo "cwd: $PWD" -output=$($TEST_ROOT/shell.shebang.expr bar) +output=$("$TEST_ROOT"/shell.shebang.expr bar) [ "$output" = foo ] # Test nix-shell shebang mode with an alternate working directory -sed -e "s|@ENV_PROG@|$(type -P env)|" shell.shebang.legacy.expr > $TEST_ROOT/shell.shebang.legacy.expr -chmod a+rx $TEST_ROOT/shell.shebang.legacy.expr +sed -e "s|@ENV_PROG@|$(type -P env)|" shell.shebang.legacy.expr > "$TEST_ROOT"/shell.shebang.legacy.expr +chmod a+rx "$TEST_ROOT"/shell.shebang.legacy.expr # Should fail due to expressions using relative path mkdir -p "$TEST_ROOT/somewhere-unrelated" -output="$(cd "$TEST_ROOT/somewhere-unrelated"; $TEST_ROOT/shell.shebang.legacy.expr bar;)" +output="$(cd "$TEST_ROOT/somewhere-unrelated"; "$TEST_ROOT"/shell.shebang.legacy.expr bar;)" [[ $(realpath "$output") = $(realpath "$TEST_ROOT/somewhere-unrelated") ]] # Test nix-shell shebang mode again with metacharacters in the filename. # First word of filename is chosen to not match any file in the test root. -sed -e "s|@ENV_PROG@|$(type -P env)|" shell.shebang.sh > $TEST_ROOT/spaced\ \\\'\"shell.shebang.sh -chmod a+rx $TEST_ROOT/spaced\ \\\'\"shell.shebang.sh +sed -e "s|@ENV_PROG@|$(type -P env)|" shell.shebang.sh > "$TEST_ROOT"/spaced\ \\\'\"shell.shebang.sh +chmod a+rx "$TEST_ROOT"/spaced\ \\\'\"shell.shebang.sh -output=$($TEST_ROOT/spaced\ \\\'\"shell.shebang.sh abc def) +output=$("$TEST_ROOT"/spaced\ \\\'\"shell.shebang.sh abc def) [ "$output" = "foo bar abc def" ] # Test nix-shell shebang mode for ruby # This uses a fake interpreter that returns the arguments passed # This, in turn, verifies the `rc` script is valid and the `load()` script (given using `-e`) is as expected. -sed -e "s|@SHELL_PROG@|$(type -P nix-shell)|" shell.shebang.rb > $TEST_ROOT/shell.shebang.rb -chmod a+rx $TEST_ROOT/shell.shebang.rb +sed -e "s|@SHELL_PROG@|$(type -P nix-shell)|" shell.shebang.rb > "$TEST_ROOT"/shell.shebang.rb +chmod a+rx "$TEST_ROOT"/shell.shebang.rb -output=$($TEST_ROOT/shell.shebang.rb abc ruby) +output=$("$TEST_ROOT"/shell.shebang.rb abc ruby) [ "$output" = '-e load(ARGV.shift) -- '"$TEST_ROOT"'/shell.shebang.rb abc ruby' ] # Test nix-shell shebang mode for ruby again with metacharacters in the filename. # Note: fake interpreter only space-separates args without adding escapes to its output. -sed -e "s|@SHELL_PROG@|$(type -P nix-shell)|" shell.shebang.rb > $TEST_ROOT/spaced\ \\\'\"shell.shebang.rb -chmod a+rx $TEST_ROOT/spaced\ \\\'\"shell.shebang.rb +sed -e "s|@SHELL_PROG@|$(type -P nix-shell)|" shell.shebang.rb > "$TEST_ROOT"/spaced\ \\\'\"shell.shebang.rb +chmod a+rx "$TEST_ROOT"/spaced\ \\\'\"shell.shebang.rb -output=$($TEST_ROOT/spaced\ \\\'\"shell.shebang.rb abc ruby) +output=$("$TEST_ROOT"/spaced\ \\\'\"shell.shebang.rb abc ruby) +# shellcheck disable=SC1003 [ "$output" = '-e load(ARGV.shift) -- '"$TEST_ROOT"'/spaced \'\''"shell.shebang.rb abc ruby' ] # Test nix-shell shebang quoting -sed -e "s|@ENV_PROG@|$(type -P env)|" shell.shebang.nix > $TEST_ROOT/shell.shebang.nix -chmod a+rx $TEST_ROOT/shell.shebang.nix -$TEST_ROOT/shell.shebang.nix +sed -e "s|@ENV_PROG@|$(type -P env)|" shell.shebang.nix > "$TEST_ROOT"/shell.shebang.nix +chmod a+rx "$TEST_ROOT"/shell.shebang.nix +"$TEST_ROOT"/shell.shebang.nix -mkdir $TEST_ROOT/lookup-test $TEST_ROOT/empty +mkdir "$TEST_ROOT"/lookup-test "$TEST_ROOT"/empty -echo "import $shellDotNix" > $TEST_ROOT/lookup-test/shell.nix -cp "${config_nix}" $TEST_ROOT/lookup-test/ -echo 'abort "do not load default.nix!"' > $TEST_ROOT/lookup-test/default.nix +echo "import $shellDotNix" > "$TEST_ROOT"/lookup-test/shell.nix +cp "${config_nix}" "$TEST_ROOT"/lookup-test/ +echo 'abort "do not load default.nix!"' > "$TEST_ROOT"/lookup-test/default.nix -nix-shell $TEST_ROOT/lookup-test -A shellDrv --run 'echo "it works"' | grepQuiet "it works" +nix-shell "$TEST_ROOT"/lookup-test -A shellDrv --run 'echo "it works"' | grepQuiet "it works" # https://github.com/NixOS/nix/issues/4529 nix-shell -I "testRoot=$TEST_ROOT" '' -A shellDrv --run 'echo "it works"' | grepQuiet "it works" -expectStderr 1 nix-shell $TEST_ROOT/lookup-test -A shellDrv --run 'echo "it works"' --option nix-shell-always-looks-for-shell-nix false \ +expectStderr 1 nix-shell "$TEST_ROOT"/lookup-test -A shellDrv --run 'echo "it works"' --option nix-shell-always-looks-for-shell-nix false \ | grepQuiet -F "do not load default.nix!" # we did, because we chose to enable legacy behavior -expectStderr 1 nix-shell $TEST_ROOT/lookup-test -A shellDrv --run 'echo "it works"' --option nix-shell-always-looks-for-shell-nix false \ +expectStderr 1 nix-shell "$TEST_ROOT"/lookup-test -A shellDrv --run 'echo "it works"' --option nix-shell-always-looks-for-shell-nix false \ | grepQuiet "Skipping .*lookup-test/shell\.nix.*, because the setting .*nix-shell-always-looks-for-shell-nix.* is disabled. This is a deprecated behavior\. Consider enabling .*nix-shell-always-looks-for-shell-nix.*" ( - cd $TEST_ROOT/empty; + cd "$TEST_ROOT"/empty; expectStderr 1 nix-shell | \ grepQuiet "error.*no argument specified and no .*shell\.nix.* or .*default\.nix.* file found in the working directory" ) @@ -147,29 +157,29 @@ expectStderr 1 nix-shell $TEST_ROOT/lookup-test -A shellDrv --run 'echo "it work expectStderr 1 nix-shell -I "testRoot=$TEST_ROOT" '' | grepQuiet "error.*neither .*shell\.nix.* nor .*default\.nix.* found in .*/empty" -cat >$TEST_ROOT/lookup-test/shebangscript <"$TEST_ROOT"/lookup-test/shebangscript < $TEST_ROOT/marco/shell.nix -cat >$TEST_ROOT/marco/polo/default.nix < "$TEST_ROOT"/marco/shell.nix +cat >"$TEST_ROOT"/marco/polo/default.nix <$TEST_ROOT/issue-11892/shebangscript <"$TEST_ROOT"/issue-11892/shebangscript <$TEST_ROOT/issue-11892/shebangscript <$TEST_ROOT/issue-11892/my_package.nix <"$TEST_ROOT"/issue-11892/my_package.nix < $TEST_ROOT/dev-env.sh -nix print-dev-env -f "$shellDotNix" shellDrv --json > $TEST_ROOT/dev-env.json +nix print-dev-env -f "$shellDotNix" shellDrv > "$TEST_ROOT"/dev-env.sh +nix print-dev-env -f "$shellDotNix" shellDrv --json > "$TEST_ROOT"/dev-env.json # Test with raw drv shellDrv=$(nix-instantiate "$shellDotNix" -A shellDrv.out) -nix develop $shellDrv -c bash -c '[[ -n $stdenv ]]' +# shellcheck disable=SC2016 +nix develop "$shellDrv" -c bash -c '[[ -n $stdenv ]]' -nix print-dev-env $shellDrv > $TEST_ROOT/dev-env2.sh -nix print-dev-env $shellDrv --json > $TEST_ROOT/dev-env2.json +nix print-dev-env "$shellDrv" > "$TEST_ROOT"/dev-env2.sh +nix print-dev-env "$shellDrv" --json > "$TEST_ROOT"/dev-env2.json -diff $TEST_ROOT/dev-env{,2}.sh -diff $TEST_ROOT/dev-env{,2}.json +diff "$TEST_ROOT"/dev-env{,2}.sh +diff "$TEST_ROOT"/dev-env{,2}.json # Ensure `nix print-dev-env --json` contains variable assignments. -[[ $(jq -r .variables.arr1.value[2] $TEST_ROOT/dev-env.json) = '3 4' ]] +[[ $(jq -r .variables.arr1.value[2] "$TEST_ROOT"/dev-env.json) = '3 4' ]] # Run tests involving `source <(nix print-dev-env)` in subshells to avoid modifying the current # environment. @@ -238,27 +250,32 @@ set -u # Ensure `source <(nix print-dev-env)` modifies the environment. ( path=$PATH - source $TEST_ROOT/dev-env.sh + # shellcheck disable=SC1091 + source "$TEST_ROOT"/dev-env.sh [[ -n $stdenv ]] + # shellcheck disable=SC2154 [[ ${arr1[2]} = "3 4" ]] + # shellcheck disable=SC2154 [[ ${arr2[1]} = $'\n' ]] [[ ${arr2[2]} = $'x\ny' ]] [[ $(fun) = blabla ]] - [[ $PATH = $(jq -r .variables.PATH.value $TEST_ROOT/dev-env.json):$path ]] + [[ $PATH = $(jq -r .variables.PATH.value "$TEST_ROOT"/dev-env.json):$path ]] ) # Ensure `source <(nix print-dev-env)` handles the case when PATH is empty. ( path=$PATH + # shellcheck disable=SC2123 PATH= - source $TEST_ROOT/dev-env.sh - [[ $PATH = $(PATH=$path jq -r .variables.PATH.value $TEST_ROOT/dev-env.json) ]] + # shellcheck disable=SC1091 + source "$TEST_ROOT"/dev-env.sh + [[ $PATH = $(PATH=$path jq -r .variables.PATH.value "$TEST_ROOT"/dev-env.json) ]] ) # Test nix-shell with ellipsis and no `inNixShell` argument (for backwards compat with old nixpkgs) -cat >$TEST_ROOT/shell-ellipsis.nix <"$TEST_ROOT"/shell-ellipsis.nix <' --restrict-eval unset NIX_PATH -mkdir -p $TEST_ROOT/{from-nix-path-file,from-NIX_PATH,from-nix-path,from-extra-nix-path,from-I} +mkdir -p "$TEST_ROOT"/{from-nix-path-file,from-NIX_PATH,from-nix-path,from-extra-nix-path,from-I} for i in from-nix-path-file from-NIX_PATH from-nix-path from-extra-nix-path from-I; do - touch $TEST_ROOT/$i/only-$i.nix + touch "$TEST_ROOT"/$i/only-$i.nix done # finding something that's not in any of the default paths fails +# shellcheck disable=SC2091 ( ! $(nix-instantiate --find-file test) ) echo "nix-path = test=$TEST_ROOT/from-nix-path-file" >> "$test_nix_conf" @@ -53,36 +54,36 @@ echo "nix-path = test=$TEST_ROOT/from-nix-path-file" >> "$test_nix_conf" (! NIX_PATH=test=$TEST_ROOT nix-instantiate --find-file test/only-from-nix-path-file.nix) # -I extends nix.conf -[[ $(nix-instantiate -I test=$TEST_ROOT/from-I --find-file test/only-from-I.nix) = $TEST_ROOT/from-I/only-from-I.nix ]] +[[ $(nix-instantiate -I test="$TEST_ROOT"/from-I --find-file test/only-from-I.nix) = $TEST_ROOT/from-I/only-from-I.nix ]] # if -I does not have the desired entry, the value from nix.conf is used -[[ $(nix-instantiate -I test=$TEST_ROOT/from-I --find-file test/only-from-nix-path-file.nix) = $TEST_ROOT/from-nix-path-file/only-from-nix-path-file.nix ]] +[[ $(nix-instantiate -I test="$TEST_ROOT"/from-I --find-file test/only-from-nix-path-file.nix) = $TEST_ROOT/from-nix-path-file/only-from-nix-path-file.nix ]] # -I extends NIX_PATH -[[ $(NIX_PATH=test=$TEST_ROOT/from-NIX_PATH nix-instantiate -I test=$TEST_ROOT/from-I --find-file test/only-from-I.nix) = $TEST_ROOT/from-I/only-from-I.nix ]] +[[ $(NIX_PATH=test=$TEST_ROOT/from-NIX_PATH nix-instantiate -I test="$TEST_ROOT"/from-I --find-file test/only-from-I.nix) = $TEST_ROOT/from-I/only-from-I.nix ]] # -I takes precedence over NIX_PATH -[[ $(NIX_PATH=test=$TEST_ROOT/from-NIX_PATH nix-instantiate -I test=$TEST_ROOT/from-I --find-file test) = $TEST_ROOT/from-I ]] +[[ $(NIX_PATH=test=$TEST_ROOT/from-NIX_PATH nix-instantiate -I test="$TEST_ROOT"/from-I --find-file test) = $TEST_ROOT/from-I ]] # if -I does not have the desired entry, the value from NIX_PATH is used -[[ $(NIX_PATH=test=$TEST_ROOT/from-NIX_PATH nix-instantiate -I test=$TEST_ROOT/from-I --find-file test/only-from-NIX_PATH.nix) = $TEST_ROOT/from-NIX_PATH/only-from-NIX_PATH.nix ]] +[[ $(NIX_PATH=test=$TEST_ROOT/from-NIX_PATH nix-instantiate -I test="$TEST_ROOT"/from-I --find-file test/only-from-NIX_PATH.nix) = $TEST_ROOT/from-NIX_PATH/only-from-NIX_PATH.nix ]] # --extra-nix-path extends NIX_PATH -[[ $(NIX_PATH=test=$TEST_ROOT/from-NIX_PATH nix-instantiate --extra-nix-path test=$TEST_ROOT/from-extra-nix-path --find-file test/only-from-extra-nix-path.nix) = $TEST_ROOT/from-extra-nix-path/only-from-extra-nix-path.nix ]] +[[ $(NIX_PATH=test=$TEST_ROOT/from-NIX_PATH nix-instantiate --extra-nix-path test="$TEST_ROOT"/from-extra-nix-path --find-file test/only-from-extra-nix-path.nix) = $TEST_ROOT/from-extra-nix-path/only-from-extra-nix-path.nix ]] # if --extra-nix-path does not have the desired entry, the value from NIX_PATH is used -[[ $(NIX_PATH=test=$TEST_ROOT/from-NIX_PATH nix-instantiate --extra-nix-path test=$TEST_ROOT/from-extra-nix-path --find-file test/only-from-NIX_PATH.nix) = $TEST_ROOT/from-NIX_PATH/only-from-NIX_PATH.nix ]] +[[ $(NIX_PATH=test=$TEST_ROOT/from-NIX_PATH nix-instantiate --extra-nix-path test="$TEST_ROOT"/from-extra-nix-path --find-file test/only-from-NIX_PATH.nix) = $TEST_ROOT/from-NIX_PATH/only-from-NIX_PATH.nix ]] # --nix-path overrides NIX_PATH -[[ $(NIX_PATH=test=$TEST_ROOT/from-NIX_PATH nix-instantiate --nix-path test=$TEST_ROOT/from-nix-path --find-file test) = $TEST_ROOT/from-nix-path ]] +[[ $(NIX_PATH=test=$TEST_ROOT/from-NIX_PATH nix-instantiate --nix-path test="$TEST_ROOT"/from-nix-path --find-file test) = $TEST_ROOT/from-nix-path ]] # if --nix-path does not have the desired entry, it fails -(! NIX_PATH=test=$TEST_ROOT/from-NIX_PATH nix-instantiate --nix-path test=$TEST_ROOT/from-nix-path --find-file test/only-from-NIX_PATH.nix) +(! NIX_PATH=test=$TEST_ROOT/from-NIX_PATH nix-instantiate --nix-path test="$TEST_ROOT"/from-nix-path --find-file test/only-from-NIX_PATH.nix) # --nix-path overrides nix.conf -[[ $(nix-instantiate --nix-path test=$TEST_ROOT/from-nix-path --find-file test) = $TEST_ROOT/from-nix-path ]] -(! nix-instantiate --nix-path test=$TEST_ROOT/from-nix-path --find-file test/only-from-nix-path-file.nix) +[[ $(nix-instantiate --nix-path test="$TEST_ROOT"/from-nix-path --find-file test) = $TEST_ROOT/from-nix-path ]] +(! nix-instantiate --nix-path test="$TEST_ROOT"/from-nix-path --find-file test/only-from-nix-path-file.nix) # --extra-nix-path extends nix.conf -[[ $(nix-instantiate --extra-nix-path test=$TEST_ROOT/from-extra-nix-path --find-file test/only-from-extra-nix-path.nix) = $TEST_ROOT/from-extra-nix-path/only-from-extra-nix-path.nix ]] +[[ $(nix-instantiate --extra-nix-path test="$TEST_ROOT"/from-extra-nix-path --find-file test/only-from-extra-nix-path.nix) = $TEST_ROOT/from-extra-nix-path/only-from-extra-nix-path.nix ]] # if --extra-nix-path does not have the desired entry, it is taken from nix.conf -[[ $(nix-instantiate --extra-nix-path test=$TEST_ROOT/from-extra-nix-path --find-file test) = $TEST_ROOT/from-nix-path-file ]] +[[ $(nix-instantiate --extra-nix-path test="$TEST_ROOT"/from-extra-nix-path --find-file test) = $TEST_ROOT/from-nix-path-file ]] # -I extends --nix-path -[[ $(nix-instantiate --nix-path test=$TEST_ROOT/from-nix-path -I test=$TEST_ROOT/from-I --find-file test/only-from-I.nix) = $TEST_ROOT/from-I/only-from-I.nix ]] -[[ $(nix-instantiate --nix-path test=$TEST_ROOT/from-nix-path -I test=$TEST_ROOT/from-I --find-file test/only-from-nix-path.nix) = $TEST_ROOT/from-nix-path/only-from-nix-path.nix ]] +[[ $(nix-instantiate --nix-path test="$TEST_ROOT"/from-nix-path -I test="$TEST_ROOT"/from-I --find-file test/only-from-I.nix) = $TEST_ROOT/from-I/only-from-I.nix ]] +[[ $(nix-instantiate --nix-path test="$TEST_ROOT"/from-nix-path -I test="$TEST_ROOT"/from-I --find-file test/only-from-nix-path.nix) = $TEST_ROOT/from-nix-path/only-from-nix-path.nix ]] diff --git a/tests/functional/optimise-store.sh b/tests/functional/optimise-store.sh index 05c4c41e4..332a308c2 100755 --- a/tests/functional/optimise-store.sh +++ b/tests/functional/optimise-store.sh @@ -4,28 +4,31 @@ source common.sh clearStoreIfPossible +# shellcheck disable=SC2016 outPath1=$(echo 'with import '"${config_nix}"'; mkDerivation { name = "foo1"; builder = builtins.toFile "builder" "mkdir $out; echo hello > $out/foo"; }' | nix-build - --no-out-link --auto-optimise-store) +# shellcheck disable=SC2016 outPath2=$(echo 'with import '"${config_nix}"'; mkDerivation { name = "foo2"; builder = builtins.toFile "builder" "mkdir $out; echo hello > $out/foo"; }' | nix-build - --no-out-link --auto-optimise-store) TODO_NixOS # ignoring the client-specified setting 'auto-optimise-store', because it is a restricted setting and you are not a trusted user # TODO: only continue when trusted user or root -inode1="$(stat --format=%i $outPath1/foo)" -inode2="$(stat --format=%i $outPath2/foo)" +inode1="$(stat --format=%i "$outPath1"/foo)" +inode2="$(stat --format=%i "$outPath2"/foo)" if [ "$inode1" != "$inode2" ]; then echo "inodes do not match" exit 1 fi -nlink="$(stat --format=%h $outPath1/foo)" +nlink="$(stat --format=%h "$outPath1"/foo)" if [ "$nlink" != 3 ]; then echo "link count incorrect" exit 1 fi +# shellcheck disable=SC2016 outPath3=$(echo 'with import '"${config_nix}"'; mkDerivation { name = "foo3"; builder = builtins.toFile "builder" "mkdir $out; echo hello > $out/foo"; }' | nix-build - --no-out-link) -inode3="$(stat --format=%i $outPath3/foo)" +inode3="$(stat --format=%i "$outPath3"/foo)" if [ "$inode1" = "$inode3" ]; then echo "inodes match unexpectedly" exit 1 @@ -34,8 +37,8 @@ fi # XXX: This should work through the daemon too NIX_REMOTE="" nix-store --optimise -inode1="$(stat --format=%i $outPath1/foo)" -inode3="$(stat --format=%i $outPath3/foo)" +inode1="$(stat --format=%i "$outPath1"/foo)" +inode3="$(stat --format=%i "$outPath3"/foo)" if [ "$inode1" != "$inode3" ]; then echo "inodes do not match" exit 1 @@ -43,7 +46,7 @@ fi nix-store --gc -if [ -n "$(ls $NIX_STORE_DIR/.links)" ]; then +if [ -n "$(ls "$NIX_STORE_DIR"/.links)" ]; then echo ".links directory not empty after GC" exit 1 fi diff --git a/tests/functional/output-normalization.sh b/tests/functional/output-normalization.sh index c55f1b1d1..bd1668db9 100755 --- a/tests/functional/output-normalization.sh +++ b/tests/functional/output-normalization.sh @@ -6,7 +6,7 @@ testNormalization () { TODO_NixOS clearStore outPath=$(nix-build ./simple.nix --no-out-link) - test "$(stat -c %Y $outPath)" -eq 1 + test "$(stat -c %Y "$outPath")" -eq 1 } testNormalization diff --git a/tests/functional/package.nix b/tests/functional/package.nix index 1f1d10ea8..6830a9e58 100644 --- a/tests/functional/package.nix +++ b/tests/functional/package.nix @@ -2,16 +2,7 @@ lib, stdenv, mkMesonDerivation, - - meson, - ninja, - pkg-config, - - jq, - git, - mercurial, - util-linux, - unixtools, + buildPackages, nix-store, nix-expr, @@ -46,16 +37,17 @@ mkMesonDerivation ( ./. ]; - # Hack for sake of the dev shell + # Hack for sake of the dev shell. Need to "manually splice" since + # this isn't a specially-recognized list of dependencies. passthru.externalNativeBuildInputs = [ - meson - ninja - pkg-config + buildPackages.meson + buildPackages.ninja + buildPackages.pkg-config - jq - git - mercurial - unixtools.script + buildPackages.jq + buildPackages.git + buildPackages.mercurial + buildPackages.unixtools.script ] ++ lib.optionals stdenv.hostPlatform.isLinux [ # For various sandboxing tests that needs a statically-linked shell, @@ -64,11 +56,14 @@ mkMesonDerivation ( # For Overlay FS tests need `mount`, `umount`, and `unshare`. # For `script` command (ensuring a TTY) # TODO use `unixtools` to be precise over which executables instead? - util-linux + buildPackages.util-linux ]; nativeBuildInputs = finalAttrs.passthru.externalNativeBuildInputs ++ [ - nix-cli + # Explicitly splice the hostHost variant to fix LLVM tests. The nix-cli + # has to be in PATH, but must come from the host context where it's built + # with libc++. + (nix-cli.__spliced.hostHost or nix-cli) ]; buildInputs = [ diff --git a/tests/functional/parallel.builder.sh b/tests/functional/parallel.builder.sh index d092bc5a6..436246571 100644 --- a/tests/functional/parallel.builder.sh +++ b/tests/functional/parallel.builder.sh @@ -1,29 +1,31 @@ +# shellcheck shell=bash +# shellcheck disable=SC2154 echo "DOING $text" # increase counter -while ! ln -s x $shared.lock 2> /dev/null; do +while ! ln -s x "$shared".lock 2> /dev/null; do sleep 1 done -test -f $shared.cur || echo 0 > $shared.cur -test -f $shared.max || echo 0 > $shared.max -new=$(($(cat $shared.cur) + 1)) -if test $new -gt $(cat $shared.max); then - echo $new > $shared.max +test -f "$shared".cur || echo 0 > "$shared".cur +test -f "$shared".max || echo 0 > "$shared".max +new=$(($(cat "$shared".cur) + 1)) +if test $new -gt "$(cat "$shared".max)"; then + echo $new > "$shared".max fi -echo $new > $shared.cur -rm $shared.lock +echo $new > "$shared".cur +rm "$shared".lock -echo -n $(cat $inputs)$text > $out +echo -n "$(cat "$inputs")""$text" > "$out" -sleep $sleepTime +sleep "$sleepTime" # decrease counter -while ! ln -s x $shared.lock 2> /dev/null; do +while ! ln -s x "$shared".lock 2> /dev/null; do sleep 1 done -test -f $shared.cur || echo 0 > $shared.cur -echo $(($(cat $shared.cur) - 1)) > $shared.cur -rm $shared.lock +test -f "$shared".cur || echo 0 > "$shared".cur +echo $(($(cat "$shared".cur) - 1)) > "$shared".cur +rm "$shared".lock diff --git a/tests/functional/parallel.sh b/tests/functional/parallel.sh index 7e420688d..4d0bf0f1b 100644 --- a/tests/functional/parallel.sh +++ b/tests/functional/parallel.sh @@ -1,3 +1,4 @@ +# shellcheck shell=bash source common.sh @@ -8,7 +9,7 @@ TODO_NixOS clearStore -rm -f $_NIX_TEST_SHARED.cur $_NIX_TEST_SHARED.max +rm -f "$_NIX_TEST_SHARED".cur "$_NIX_TEST_SHARED".max outPath=$(nix-build -j10000 parallel.nix --no-out-link) @@ -17,8 +18,8 @@ echo "output path is $outPath" text=$(cat "$outPath") if test "$text" != "abacade"; then exit 1; fi -if test "$(cat $_NIX_TEST_SHARED.cur)" != 0; then fail "wrong current process count"; fi -if test "$(cat $_NIX_TEST_SHARED.max)" != 3; then fail "not enough parallelism"; fi +if test "$(cat "$_NIX_TEST_SHARED".cur)" != 0; then fail "wrong current process count"; fi +if test "$(cat "$_NIX_TEST_SHARED".max)" != 3; then fail "not enough parallelism"; fi # Second, test that parallel invocations of nix-build perform builds @@ -27,7 +28,7 @@ echo "testing multiple nix-build -j1..." clearStore -rm -f $_NIX_TEST_SHARED.cur $_NIX_TEST_SHARED.max +rm -f "$_NIX_TEST_SHARED".cur "$_NIX_TEST_SHARED".max drvPath=$(nix-instantiate parallel.nix --argstr sleepTime 15) @@ -54,5 +55,5 @@ wait $pid2 || fail "instance 2 failed: $?" wait $pid3 || fail "instance 3 failed: $?" wait $pid4 || fail "instance 4 failed: $?" -if test "$(cat $_NIX_TEST_SHARED.cur)" != 0; then fail "wrong current process count"; fi -if test "$(cat $_NIX_TEST_SHARED.max)" != 3; then fail "not enough parallelism"; fi +if test "$(cat "$_NIX_TEST_SHARED".cur)" != 0; then fail "wrong current process count"; fi +if test "$(cat "$_NIX_TEST_SHARED".max)" != 3; then fail "not enough parallelism"; fi diff --git a/tests/functional/pass-as-file.sh b/tests/functional/pass-as-file.sh index 66a8e588e..68f68b8cf 100755 --- a/tests/functional/pass-as-file.sh +++ b/tests/functional/pass-as-file.sh @@ -4,6 +4,7 @@ source common.sh clearStoreIfPossible +# shellcheck disable=SC2034 outPath=$(nix-build --no-out-link -E " with import ${config_nix}; diff --git a/tests/functional/path-from-hash-part.sh b/tests/functional/path-from-hash-part.sh index 41d1b7410..0b258a6ea 100755 --- a/tests/functional/path-from-hash-part.sh +++ b/tests/functional/path-from-hash-part.sh @@ -4,9 +4,9 @@ source common.sh path=$(nix build --no-link --print-out-paths -f simple.nix) -hash_part=$(basename $path) +hash_part=$(basename "$path") hash_part=${hash_part:0:32} -path2=$(nix store path-from-hash-part $hash_part) +path2=$(nix store path-from-hash-part "$hash_part") -[[ $path = $path2 ]] +[[ $path = "$path2" ]] diff --git a/tests/functional/path-info.sh b/tests/functional/path-info.sh index 8597de683..463ac6214 100755 --- a/tests/functional/path-info.sh +++ b/tests/functional/path-info.sh @@ -2,14 +2,14 @@ source common.sh -echo foo > $TEST_ROOT/foo -foo=$(nix store add-file $TEST_ROOT/foo) +echo foo > "$TEST_ROOT"/foo +foo=$(nix store add-file "$TEST_ROOT"/foo) -echo bar > $TEST_ROOT/bar -bar=$(nix store add-file $TEST_ROOT/bar) +echo bar > "$TEST_ROOT"/bar +bar=$(nix store add-file "$TEST_ROOT"/bar) -echo baz > $TEST_ROOT/baz -baz=$(nix store add-file $TEST_ROOT/baz) +echo baz > "$TEST_ROOT"/baz +baz=$(nix store add-file "$TEST_ROOT"/baz) nix-store --delete "$baz" diff --unified --color=always \ diff --git a/tests/functional/placeholders.sh b/tests/functional/placeholders.sh index 374203af8..5791d8006 100755 --- a/tests/functional/placeholders.sh +++ b/tests/functional/placeholders.sh @@ -4,6 +4,7 @@ source common.sh clearStoreIfPossible +# shellcheck disable=SC2016 nix-build --no-out-link -E ' with import '"${config_nix}"'; diff --git a/tests/functional/post-hook.sh b/tests/functional/post-hook.sh index 94a6d0d69..b16d8ab84 100755 --- a/tests/functional/post-hook.sh +++ b/tests/functional/post-hook.sh @@ -6,10 +6,10 @@ TODO_NixOS clearStore -rm -f $TEST_ROOT/result +rm -f "$TEST_ROOT"/result export REMOTE_STORE=file:$TEST_ROOT/remote_store -echo 'require-sigs = false' >> $test_nix_conf +echo 'require-sigs = false' >> "$test_nix_conf" restartDaemon @@ -20,11 +20,26 @@ else fi # Build the dependencies and push them to the remote store. -nix-build -o $TEST_ROOT/result dependencies.nix --post-build-hook "$pushToStore" +nix-build -o "$TEST_ROOT"/result dependencies.nix --post-build-hook "$pushToStore" # See if all outputs are passed to the post-build hook by only specifying one # We're not able to test CA tests this way -export BUILD_HOOK_ONLY_OUT_PATHS=$([ ! $NIX_TESTS_CA_BY_DEFAULT ]) -nix-build -o $TEST_ROOT/result-mult multiple-outputs.nix -A a.first --post-build-hook "$pushToStore" +# +# FIXME: This export is hiding error condition +# shellcheck disable=SC2155 +export BUILD_HOOK_ONLY_OUT_PATHS=$([ ! "$NIX_TESTS_CA_BY_DEFAULT" ]) +nix-build -o "$TEST_ROOT"/result-mult multiple-outputs.nix -A a.first --post-build-hook "$pushToStore" + +if isDaemonNewer "2.33.0pre20251029"; then + # Regression test for issue #14287: `--check` should re-run post build + # hook, even though nothing is getting newly registered. + export HOOK_DEST=$TEST_ROOT/listing + # Needed so the hook will get the above environment variable. + restartDaemon + nix-build -o "$TEST_ROOT"/result-mult multiple-outputs.nix --check -A a.first --post-build-hook "$PWD/build-hook-list-paths.sh" + grepQuiet a-first "$HOOK_DEST" + grepQuiet a-second "$HOOK_DEST" + unset HOOK_DEST +fi clearStore diff --git a/tests/functional/pure-eval.sh b/tests/functional/pure-eval.sh index 45a65f9ab..b769b2150 100755 --- a/tests/functional/pure-eval.sh +++ b/tests/functional/pure-eval.sh @@ -10,6 +10,7 @@ nix eval --expr 'assert 1 + 2 == 3; true' missingImpureErrorMsg=$(! nix eval --expr 'builtins.readFile ./pure-eval.sh' 2>&1) +# shellcheck disable=SC1111 echo "$missingImpureErrorMsg" | grepQuiet -- --impure || \ fail "The error message should mention the “--impure” flag to unblock users" @@ -25,14 +26,15 @@ echo "$missingImpureErrorMsg" | grepQuiet -- --impure || \ (! nix eval --expr "(import (builtins.fetchurl { url = file://$(pwd)/pure-eval.nix; })).x") nix eval --expr "(import (builtins.fetchurl { url = file://$(pwd)/pure-eval.nix; sha256 = \"$(nix hash file pure-eval.nix --type sha256)\"; })).x" -rm -rf $TEST_ROOT/eval-out -nix eval --store dummy:// --write-to $TEST_ROOT/eval-out --expr '{ x = "foo" + "bar"; y = { z = "bla"; }; }' -[[ $(cat $TEST_ROOT/eval-out/x) = foobar ]] -[[ $(cat $TEST_ROOT/eval-out/y/z) = bla ]] +rm -rf "$TEST_ROOT"/eval-out +nix eval --store dummy:// --write-to "$TEST_ROOT"/eval-out --expr '{ x = "foo" + "bar"; y = { z = "bla"; }; }' +[[ $(cat "$TEST_ROOT"/eval-out/x) = foobar ]] +[[ $(cat "$TEST_ROOT"/eval-out/y/z) = bla ]] -rm -rf $TEST_ROOT/eval-out -(! nix eval --store dummy:// --write-to $TEST_ROOT/eval-out --expr '{ "." = "bla"; }') +rm -rf "$TEST_ROOT"/eval-out +(! nix eval --store dummy:// --write-to "$TEST_ROOT"/eval-out --expr '{ "." = "bla"; }') +# shellcheck disable=SC2088 (! nix eval --expr '~/foo') expectStderr 0 nix eval --expr "/some/absolute/path" \ diff --git a/tests/functional/read-only-store.sh b/tests/functional/read-only-store.sh index ea96bba41..8ccca2192 100755 --- a/tests/functional/read-only-store.sh +++ b/tests/functional/read-only-store.sh @@ -12,10 +12,10 @@ clearStore happy () { # We can do a read-only query just fine with a read-only store - nix --store local?read-only=true path-info $dummyPath + nix --store local?read-only=true path-info "$dummyPath" # `local://` also works. - nix --store local://?read-only=true path-info $dummyPath + nix --store local://?read-only=true path-info "$dummyPath" # We can "write" an already-present store-path a read-only store, because no IO is actually required nix-store --store local?read-only=true --add dummy @@ -37,8 +37,8 @@ happy ## Testing read-only mode with an underlying store that is actually read-only # Ensure store is actually read-only -chmod -R -w $TEST_ROOT/store -chmod -R -w $TEST_ROOT/var +chmod -R -w "$TEST_ROOT"/store +chmod -R -w "$TEST_ROOT"/var # Make sure we fail on add operations on the read-only store # This is only for adding files that are not *already* in the store diff --git a/tests/functional/readfile-context.sh b/tests/functional/readfile-context.sh index cb9ef6234..effe483dc 100755 --- a/tests/functional/readfile-context.sh +++ b/tests/functional/readfile-context.sh @@ -9,12 +9,12 @@ clearStore outPath=$(nix-build --no-out-link readfile-context.nix) # Set a GC root. -ln -s $outPath "$NIX_STATE_DIR/gcroots/foo" +ln -s "$outPath" "$NIX_STATE_DIR/gcroots/foo" # Check that file exists. -[ "$(cat $(cat $outPath))" = "Hello World!" ] +[ "$(cat "$(cat "$outPath")")" = "Hello World!" ] nix-collect-garbage # Check that file still exists. -[ "$(cat $(cat $outPath))" = "Hello World!" ] +[ "$(cat "$(cat "$outPath")")" = "Hello World!" ] diff --git a/tests/functional/recursive.sh b/tests/functional/recursive.sh index 640fb92d2..9115aa775 100755 --- a/tests/functional/recursive.sh +++ b/tests/functional/recursive.sh @@ -9,15 +9,16 @@ restartDaemon clearStore -rm -f $TEST_ROOT/result +rm -f "$TEST_ROOT"/result -export unreachable=$(nix store add-path ./recursive.sh) +unreachable=$(nix store add-path ./recursive.sh) +export unreachable -NIX_BIN_DIR=$(dirname $(type -p nix)) nix --extra-experimental-features 'nix-command recursive-nix' build -o $TEST_ROOT/result -L --impure --file ./recursive.nix +NIX_BIN_DIR=$(dirname "$(type -p nix)") nix --extra-experimental-features 'nix-command recursive-nix' build -o "$TEST_ROOT"/result -L --impure --file ./recursive.nix -[[ $(cat $TEST_ROOT/result/inner1) =~ blaat ]] +[[ $(cat "$TEST_ROOT"/result/inner1) =~ blaat ]] # Make sure the recursively created paths are in the closure. -nix path-info -r $TEST_ROOT/result | grep foobar -nix path-info -r $TEST_ROOT/result | grep fnord -nix path-info -r $TEST_ROOT/result | grep inner1 +nix path-info -r "$TEST_ROOT"/result | grep foobar +nix path-info -r "$TEST_ROOT"/result | grep fnord +nix path-info -r "$TEST_ROOT"/result | grep inner1 diff --git a/tests/functional/referrers.sh b/tests/functional/referrers.sh index 411cdb7c1..ae6b39ae1 100755 --- a/tests/functional/referrers.sh +++ b/tests/functional/referrers.sh @@ -11,32 +11,34 @@ clearStore max=500 reference=$NIX_STORE_DIR/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa-bla -touch $reference -(echo $reference && echo && echo 0) | nix-store --register-validity +touch "$reference" +(echo "$reference" && echo && echo 0) | nix-store --register-validity echo "making registration..." set +x +# shellcheck disable=SC2004 for ((n = 0; n < $max; n++)); do storePath=$NIX_STORE_DIR/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa-$n - echo -n > $storePath + echo -n > "$storePath" ref2=$NIX_STORE_DIR/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa-$((n+1)) if test $((n+1)) = $max; then ref2=$reference fi - echo $storePath; echo; echo 2; echo $reference; echo $ref2 -done > $TEST_ROOT/reg_info + echo "$storePath"; echo; echo 2; echo "$reference"; echo "$ref2" +done > "$TEST_ROOT"/reg_info set -x echo "registering..." -nix-store --register-validity < $TEST_ROOT/reg_info +nix-store --register-validity < "$TEST_ROOT"/reg_info echo "collecting garbage..." -ln -sfn $reference "$NIX_STATE_DIR/gcroots/ref" +ln -sfn "$reference" "$NIX_STATE_DIR/gcroots/ref" nix-store --gc -if [ -n "$(type -p sqlite3)" -a "$(sqlite3 $NIX_STATE_DIR/db/db.sqlite 'select count(*) from Refs')" -ne 0 ]; then +# shellcheck disable=SC2166 +if [ -n "$(type -p sqlite3)" -a "$(sqlite3 "$NIX_STATE_DIR"/db/db.sqlite 'select count(*) from Refs')" -ne 0 ]; then echo "referrers not cleaned up" exit 1 fi diff --git a/tests/functional/remote-store.sh b/tests/functional/remote-store.sh index 841b6b27a..f125ae137 100755 --- a/tests/functional/remote-store.sh +++ b/tests/functional/remote-store.sh @@ -7,10 +7,10 @@ TODO_NixOS clearStore # Ensure "fake ssh" remote store works just as legacy fake ssh would. -nix --store ssh-ng://localhost?remote-store=$TEST_ROOT/other-store doctor +nix --store ssh-ng://localhost?remote-store="$TEST_ROOT"/other-store doctor # Ensure that store info trusted works with ssh-ng:// -nix --store ssh-ng://localhost?remote-store=$TEST_ROOT/other-store store info --json | jq -e '.trusted' +nix --store ssh-ng://localhost?remote-store="$TEST_ROOT"/other-store store info --json | jq -e '.trusted' startDaemon @@ -31,8 +31,8 @@ NIX_REMOTE_=$NIX_REMOTE $SHELL ./user-envs-test-case.sh nix-store --gc --max-freed 1K -nix-store --dump-db > $TEST_ROOT/d1 -NIX_REMOTE= nix-store --dump-db > $TEST_ROOT/d2 -cmp $TEST_ROOT/d1 $TEST_ROOT/d2 +nix-store --dump-db > "$TEST_ROOT"/d1 +NIX_REMOTE='' nix-store --dump-db > "$TEST_ROOT"/d2 +cmp "$TEST_ROOT"/d1 "$TEST_ROOT"/d2 killDaemon diff --git a/tests/functional/repair.sh b/tests/functional/repair.sh index 1f6004b2c..a90bdcfd5 100755 --- a/tests/functional/repair.sh +++ b/tests/functional/repair.sh @@ -8,39 +8,43 @@ TODO_NixOS clearStore -path=$(nix-build dependencies.nix -o $TEST_ROOT/result) -path2=$(nix-store -qR $path | grep input-2) +path=$(nix-build dependencies.nix -o "$TEST_ROOT"/result) +path2=$(nix-store -qR "$path" | grep input-2) nix-store --verify --check-contents -v -hash=$(nix-hash $path2) +hash=$(nix-hash "$path2") # Corrupt a path and check whether nix-build --repair can fix it. -chmod u+w $path2 -touch $path2/bad +chmod u+w "$path2" +touch "$path2"/bad (! nix-store --verify --check-contents -v) # The path can be repaired by rebuilding the derivation. nix-store --verify --check-contents --repair -(! [ -e $path2/bad ]) -(! [ -w $path2 ]) +# shellcheck disable=SC2235 +(! [ -e "$path2"/bad ]) +# shellcheck disable=SC2235 +(! [ -w "$path2" ]) -nix-store --verify-path $path2 +nix-store --verify-path "$path2" # Re-corrupt and delete the deriver. Now --verify --repair should # not work. -chmod u+w $path2 -touch $path2/bad +chmod u+w "$path2" +touch "$path2"/bad -nix-store --delete $(nix-store -q --referrers-closure $(nix-store -qd $path2)) +# shellcheck disable=SC2046 +nix-store --delete $(nix-store -q --referrers-closure "$(nix-store -qd "$path2")") (! nix-store --verify --check-contents --repair) -nix-build dependencies.nix -o $TEST_ROOT/result --repair +nix-build dependencies.nix -o "$TEST_ROOT"/result --repair -if [ "$(nix-hash $path2)" != "$hash" -o -e $path2/bad ]; then +# shellcheck disable=SC2166 +if [ "$(nix-hash "$path2")" != "$hash" -o -e "$path2"/bad ]; then echo "path not repaired properly" >&2 exit 1 fi @@ -49,79 +53,83 @@ fi # --verify can fix it. clearCache -nix copy --to file://$cacheDir $path +nix copy --to file://"$cacheDir" "$path" -chmod u+w $path2 -rm -rf $path2 +chmod u+w "$path2" +rm -rf "$path2" nix-store --verify --check-contents --repair --substituters "file://$cacheDir" --no-require-sigs -if [ "$(nix-hash $path2)" != "$hash" -o -e $path2/bad ]; then +# shellcheck disable=SC2166 +if [ "$(nix-hash "$path2")" != "$hash" -o -e "$path2"/bad ]; then echo "path not repaired properly" >&2 exit 1 fi # Check --verify-path and --repair-path. -nix-store --verify-path $path2 +nix-store --verify-path "$path2" -chmod u+w $path2 -rm -rf $path2 +chmod u+w "$path2" +rm -rf "$path2" -if nix-store --verify-path $path2; then +if nix-store --verify-path "$path2"; then echo "nix-store --verify-path succeeded unexpectedly" >&2 exit 1 fi -nix-store --repair-path $path2 --substituters "file://$cacheDir" --no-require-sigs +nix-store --repair-path "$path2" --substituters "file://$cacheDir" --no-require-sigs -if [ "$(nix-hash $path2)" != "$hash" -o -e $path2/bad ]; then +# shellcheck disable=SC2166 +if [ "$(nix-hash "$path2")" != "$hash" -o -e "$path2"/bad ]; then echo "path not repaired properly" >&2 exit 1 fi # Check that --repair-path also checks content of optimised symlinks (1/2) -nix-store --verify-path $path2 +nix-store --verify-path "$path2" if (! nix-store --optimize); then echo "nix-store --optimize failed to optimize the store" >&2 exit 1 fi -chmod u+w $path2/bar -echo 'rabrab' > $path2/bar # different length +chmod u+w "$path2"/bar +echo 'rabrab' > "$path2"/bar # different length -if nix-store --verify-path $path2; then +if nix-store --verify-path "$path2"; then echo "nix-store --verify-path did not detect .links file corruption" >&2 exit 1 fi -nix-store --repair-path $path2 --option auto-optimise-store true +nix-store --repair-path "$path2" --option auto-optimise-store true -if [ "$(nix-hash $path2)" != "$hash" -o "BAR" != "$(< $path2/bar)" ]; then +# shellcheck disable=SC2166 +if [ "$(nix-hash "$path2")" != "$hash" -o "BAR" != "$(< "$path2"/bar)" ]; then echo "path not repaired properly" >&2 exit 1 fi # Check that --repair-path also checks content of optimised symlinks (2/2) -nix-store --verify-path $path2 +nix-store --verify-path "$path2" if (! nix-store --optimize); then echo "nix-store --optimize failed to optimize the store" >&2 exit 1 fi -chmod u+w $path2 -chmod u+w $path2/bar -sed -e 's/./X/g' < $path2/bar > $path2/tmp # same length, different content. -cp $path2/tmp $path2/bar -rm $path2/tmp +chmod u+w "$path2" +chmod u+w "$path2"/bar +sed -e 's/./X/g' < "$path2"/bar > "$path2"/tmp # same length, different content. +cp "$path2"/tmp "$path2"/bar +rm "$path2"/tmp -if nix-store --verify-path $path2; then +if nix-store --verify-path "$path2"; then echo "nix-store --verify-path did not detect .links file corruption" >&2 exit 1 fi -nix-store --repair-path $path2 --substituters "file://$cacheDir" --no-require-sigs --option auto-optimise-store true +nix-store --repair-path "$path2" --substituters "file://$cacheDir" --no-require-sigs --option auto-optimise-store true -if [ "$(nix-hash $path2)" != "$hash" -o "BAR" != "$(< $path2/bar)" ]; then +# shellcheck disable=SC2166 +if [ "$(nix-hash "$path2")" != "$hash" -o "BAR" != "$(< "$path2"/bar)" ]; then echo "path not repaired properly" >&2 exit 1 fi diff --git a/tests/functional/repl.sh b/tests/functional/repl.sh index bfe18c9e5..aeff43d30 100755 --- a/tests/functional/repl.sh +++ b/tests/functional/repl.sh @@ -25,6 +25,13 @@ import $testDir/undefined-variable.nix TODO_NixOS +# FIXME: repl tests fail on systems with stack limits +stack_ulimit="$(ulimit -Hs)" +stack_required="$((64 * 1024 * 1024))" +if [[ "$stack_ulimit" != "unlimited" ]]; then + ((stack_ulimit < stack_required)) && skipTest "repl tests cannot run on systems with stack size <$stack_required ($stack_ulimit)" +fi + testRepl () { local nixArgs nixArgs=("$@") diff --git a/tests/functional/restricted.sh b/tests/functional/restricted.sh index 00ee4ddc8..2f65f15fe 100755 --- a/tests/functional/restricted.sh +++ b/tests/functional/restricted.sh @@ -40,30 +40,32 @@ nix eval --raw --expr "builtins.fetchurl file://${_NIX_TEST_SOURCE_DIR}/restrict (! nix eval --raw --expr "fetchGit git://github.com/NixOS/patchelf.git" --impure --restrict-eval) ln -sfn "${_NIX_TEST_SOURCE_DIR}/restricted.nix" "$TEST_ROOT/restricted.nix" -[[ $(nix-instantiate --eval $TEST_ROOT/restricted.nix) == 3 ]] -(! nix-instantiate --eval --restrict-eval $TEST_ROOT/restricted.nix) -(! nix-instantiate --eval --restrict-eval $TEST_ROOT/restricted.nix -I $TEST_ROOT) -(! nix-instantiate --eval --restrict-eval $TEST_ROOT/restricted.nix -I .) +[[ $(nix-instantiate --eval "$TEST_ROOT"/restricted.nix) == 3 ]] +(! nix-instantiate --eval --restrict-eval "$TEST_ROOT"/restricted.nix) +(! nix-instantiate --eval --restrict-eval "$TEST_ROOT"/restricted.nix -I "$TEST_ROOT") +(! nix-instantiate --eval --restrict-eval "$TEST_ROOT"/restricted.nix -I .) nix-instantiate --eval --restrict-eval "$TEST_ROOT/restricted.nix" -I "$TEST_ROOT" -I "${_NIX_TEST_SOURCE_DIR}" +# shellcheck disable=SC2016 [[ $(nix eval --raw --impure --restrict-eval -I . --expr 'builtins.readFile "${import ./simple.nix}/hello"') == 'Hello World!' ]] # Check that we can't follow a symlink outside of the allowed paths. -mkdir -p $TEST_ROOT/tunnel.d $TEST_ROOT/foo2 -ln -sfn .. $TEST_ROOT/tunnel.d/tunnel -echo foo > $TEST_ROOT/bar +mkdir -p "$TEST_ROOT"/tunnel.d "$TEST_ROOT"/foo2 +ln -sfn .. "$TEST_ROOT"/tunnel.d/tunnel +echo foo > "$TEST_ROOT"/bar -expectStderr 1 nix-instantiate --restrict-eval --eval -E "let __nixPath = [ { prefix = \"foo\"; path = $TEST_ROOT/tunnel.d; } ]; in builtins.readFile " -I $TEST_ROOT/tunnel.d | grepQuiet "forbidden in restricted mode" +expectStderr 1 nix-instantiate --restrict-eval --eval -E "let __nixPath = [ { prefix = \"foo\"; path = $TEST_ROOT/tunnel.d; } ]; in builtins.readFile " -I "$TEST_ROOT"/tunnel.d | grepQuiet "forbidden in restricted mode" -expectStderr 1 nix-instantiate --restrict-eval --eval -E "let __nixPath = [ { prefix = \"foo\"; path = $TEST_ROOT/tunnel.d; } ]; in builtins.readDir " -I $TEST_ROOT/tunnel.d | grepQuiet "forbidden in restricted mode" +expectStderr 1 nix-instantiate --restrict-eval --eval -E "let __nixPath = [ { prefix = \"foo\"; path = $TEST_ROOT/tunnel.d; } ]; in builtins.readDir " -I "$TEST_ROOT"/tunnel.d | grepQuiet "forbidden in restricted mode" # Reading the parents of allowed paths should show only the ancestors of the allowed paths. -[[ $(nix-instantiate --restrict-eval --eval -E "let __nixPath = [ { prefix = \"foo\"; path = $TEST_ROOT/tunnel.d; } ]; in builtins.readDir " -I $TEST_ROOT/tunnel.d) == '{ "tunnel.d" = "directory"; }' ]] +[[ $(nix-instantiate --restrict-eval --eval -E "let __nixPath = [ { prefix = \"foo\"; path = $TEST_ROOT/tunnel.d; } ]; in builtins.readDir " -I "$TEST_ROOT"/tunnel.d) == '{ "tunnel.d" = "directory"; }' ]] # Check whether we can leak symlink information through directory traversal. traverseDir="${_NIX_TEST_SOURCE_DIR}/restricted-traverse-me" ln -sfn "${_NIX_TEST_SOURCE_DIR}/restricted-secret" "${_NIX_TEST_SOURCE_DIR}/restricted-innocent" mkdir -p "$traverseDir" +# shellcheck disable=SC2001 goUp="..$(echo "$traverseDir" | sed -e 's,[^/]\+,..,g')" output="$(nix eval --raw --restrict-eval -I "$traverseDir" \ --expr "builtins.readFile \"$traverseDir/$goUp${_NIX_TEST_SOURCE_DIR}/restricted-innocent\"" \ diff --git a/tests/functional/secure-drv-outputs.sh b/tests/functional/secure-drv-outputs.sh index 5cc4af435..876d3c817 100755 --- a/tests/functional/secure-drv-outputs.sh +++ b/tests/functional/secure-drv-outputs.sh @@ -13,20 +13,20 @@ clearStore startDaemon # Determine the output path of the "good" derivation. -goodOut=$(nix-store -q $(nix-instantiate ./secure-drv-outputs.nix -A good)) +goodOut=$(nix-store -q "$(nix-instantiate ./secure-drv-outputs.nix -A good)") # Instantiate the "bad" derivation. badDrv=$(nix-instantiate ./secure-drv-outputs.nix -A bad) -badOut=$(nix-store -q $badDrv) +badOut=$(nix-store -q "$badDrv") # Rewrite the bad derivation to produce the output path of the good # derivation. -rm -f $TEST_ROOT/bad.drv -sed -e "s|$badOut|$goodOut|g" < $badDrv > $TEST_ROOT/bad.drv +rm -f "$TEST_ROOT"/bad.drv +sed -e "s|$badOut|$goodOut|g" < "$badDrv" > "$TEST_ROOT"/bad.drv # Add the manipulated derivation to the store and build it. This # should fail. -if badDrv2=$(nix-store --add $TEST_ROOT/bad.drv); then +if badDrv2=$(nix-store --add "$TEST_ROOT"/bad.drv); then nix-store -r "$badDrv2" fi diff --git a/tests/functional/selfref-gc.sh b/tests/functional/selfref-gc.sh index dc4f14cc1..de202a09d 100755 --- a/tests/functional/selfref-gc.sh +++ b/tests/functional/selfref-gc.sh @@ -6,6 +6,7 @@ requireDaemonNewerThan "2.6.0pre20211215" clearStoreIfPossible +# shellcheck disable=SC2016 nix-build --no-out-link -E ' with import '"${config_nix}"'; diff --git a/tests/functional/shell.shebang.sh b/tests/functional/shell.shebang.sh index f7132043d..b6e4ee286 100755 --- a/tests/functional/shell.shebang.sh +++ b/tests/functional/shell.shebang.sh @@ -1,4 +1,5 @@ #! @ENV_PROG@ nix-shell #! nix-shell -I nixpkgs=shell.nix --no-substitute #! nix-shell --pure -i bash -p foo bar -echo "$(foo) $(bar) $@" +# shellcheck shell=bash +echo "$(foo) $(bar)" "$@" diff --git a/tests/functional/simple.builder.sh b/tests/functional/simple.builder.sh index 97abf0676..27cdfe684 100644 --- a/tests/functional/simple.builder.sh +++ b/tests/functional/simple.builder.sh @@ -6,7 +6,9 @@ echo "PATH=$PATH" if mkdir foo 2> /dev/null; then exit 1; fi # Set a PATH (!!! impure). +# shellcheck disable=SC2154 export PATH=$goodPath +# shellcheck disable=SC2154 mkdir "$out" echo "Hello World!" > "$out"/hello diff --git a/tests/functional/structured-attrs.nix b/tests/functional/structured-attrs.nix index 4e1984517..70ac807ab 100644 --- a/tests/functional/structured-attrs.nix +++ b/tests/functional/structured-attrs.nix @@ -82,4 +82,8 @@ mkDerivation { "foo$" = "BAD"; exportReferencesGraph.refs = [ dep ]; + exportReferencesGraph.refs2 = [ + dep + [ dep ] + ]; # regression test for heterogeneous arrays } diff --git a/tests/functional/structured-attrs.sh b/tests/functional/structured-attrs.sh index dfd5a1412..473a037f9 100755 --- a/tests/functional/structured-attrs.sh +++ b/tests/functional/structured-attrs.sh @@ -2,9 +2,8 @@ source common.sh -# 27ce722638 required some incompatible changes to the nix file, so skip this -# tests for the older versions -requireDaemonNewerThan "2.4pre20210712" +# https://github.com/NixOS/nix/pull/14189 +requireDaemonNewerThan "2.33" clearStoreIfPossible diff --git a/tests/functional/supplementary-groups.sh b/tests/functional/supplementary-groups.sh index a667d3e99..0f614a130 100755 --- a/tests/functional/supplementary-groups.sh +++ b/tests/functional/supplementary-groups.sh @@ -9,6 +9,7 @@ needLocalStore "The test uses --store always so we would just be bypassing the d TODO_NixOS +# shellcheck disable=SC2119 execUnshare <buildPathsWithResults(paths, bmNormal, store); for (const auto & result : results) { - for (const auto & [outputName, realisation] : result.builtOutputs) { - std::cout << store->printStorePath(realisation.outPath) << "\n"; + if (auto * successP = result.tryGetSuccess()) { + for (const auto & [outputName, realisation] : successP->builtOutputs) { + std::cout << store->printStorePath(realisation.outPath) << "\n"; + } } } diff --git a/tests/functional/test-libstoreconsumer/meson.build b/tests/functional/test-libstoreconsumer/meson.build index 7f619d01b..b2f1c1ca3 100644 --- a/tests/functional/test-libstoreconsumer/meson.build +++ b/tests/functional/test-libstoreconsumer/meson.build @@ -1,11 +1,12 @@ cxx = meson.get_compiler('cpp') -subdir('nix-meson-build-support/asan-options') +deps_other = [] +subdir('nix-meson-build-support/common/asan-options') libstoreconsumer_tester = executable( 'test-libstoreconsumer', 'main.cc', - dependencies : [ + dependencies : deps_other + [ dependency('nix-store'), ], build_by_default : false, diff --git a/tests/functional/toString-path.sh b/tests/functional/toString-path.sh index d790109f4..c425b61be 100755 --- a/tests/functional/toString-path.sh +++ b/tests/functional/toString-path.sh @@ -2,8 +2,8 @@ source common.sh -mkdir -p $TEST_ROOT/foo -echo bla > $TEST_ROOT/foo/bar +mkdir -p "$TEST_ROOT"/foo +echo bla > "$TEST_ROOT"/foo/bar [[ $(nix eval --raw --impure --expr "builtins.readFile (builtins.toString (builtins.fetchTree { type = \"path\"; path = \"$TEST_ROOT/foo\"; } + \"/bar\"))") = bla ]] diff --git a/tests/functional/user-envs-migration.sh b/tests/functional/user-envs-migration.sh index 0f33074e1..46337cdda 100755 --- a/tests/functional/user-envs-migration.sh +++ b/tests/functional/user-envs-migration.sh @@ -29,6 +29,7 @@ nix-env -f user-envs.nix -i bar-0.1 # Migrate to the new profile dir, and ensure that everything’s there export PATH="$PATH_WITH_NEW_NIX" nix-env -q # Trigger the migration +# shellcheck disable=SC2235 ( [[ -L ~/.nix-profile ]] && \ [[ $(readlink ~/.nix-profile) == ~/.local/share/nix/profiles/profile ]] ) || \ fail "The nix profile should point to the new location" diff --git a/tests/functional/user-envs-test-case.sh b/tests/functional/user-envs-test-case.sh index 3483a4600..f6a8ab8c6 100644 --- a/tests/functional/user-envs-test-case.sh +++ b/tests/functional/user-envs-test-case.sh @@ -1,14 +1,17 @@ +# shellcheck shell=bash clearProfiles # Query installed: should be empty. -test "$(nix-env -p $profiles/test -q '*' | wc -l)" -eq 0 +# shellcheck disable=SC2154 +test "$(nix-env -p "$profiles"/test -q '*' | wc -l)" -eq 0 -nix-env --switch-profile $profiles/test +nix-env --switch-profile "$profiles"/test # Query available: should contain several. test "$(nix-env -f ./user-envs.nix -qa '*' | wc -l)" -eq 6 outPath10=$(nix-env -f ./user-envs.nix -qa --out-path --no-name '*' | grep foo-1.0) drvPath10=$(nix-env -f ./user-envs.nix -qa --drv-path --no-name '*' | grep foo-1.0) +# shellcheck disable=SC2166 [ -n "$outPath10" -a -n "$drvPath10" ] TODO_NixOS @@ -20,18 +23,19 @@ nix-env -f ./user-envs.nix -qa --json | jq -e '.[] | select(.name == "bar-0.1") ] | all' nix-env -f ./user-envs.nix -qa --json --out-path | jq -e '.[] | select(.name == "bar-0.1") | [ .outputName == "out", - (.outputs.out | test("'$NIX_STORE_DIR'.*-0\\.1")) + (.outputs.out | test("'"$NIX_STORE_DIR"'.*-0\\.1")) ] | all' -nix-env -f ./user-envs.nix -qa --json --drv-path | jq -e '.[] | select(.name == "bar-0.1") | (.drvPath | test("'$NIX_STORE_DIR'.*-0\\.1\\.drv"))' +nix-env -f ./user-envs.nix -qa --json --drv-path | jq -e '.[] | select(.name == "bar-0.1") | (.drvPath | test("'"$NIX_STORE_DIR"'.*-0\\.1\\.drv"))' # Query descriptions. nix-env -f ./user-envs.nix -qa '*' --description | grepQuiet silly -rm -rf $HOME/.nix-defexpr -ln -s $(pwd)/user-envs.nix $HOME/.nix-defexpr +rm -rf "$HOME"/.nix-defexpr +ln -s "$(pwd)"/user-envs.nix "$HOME"/.nix-defexpr nix-env -qa '*' --description | grepQuiet silly # Query the system. -nix-env -qa '*' --system | grepQuiet $system +# shellcheck disable=SC2154 +nix-env -qa '*' --system | grepQuiet "$system" # Install "foo-1.0". nix-env -i foo-1.0 @@ -40,7 +44,7 @@ nix-env -i foo-1.0 # executable). test "$(nix-env -q '*' | wc -l)" -eq 1 nix-env -q '*' | grepQuiet foo-1.0 -test "$($profiles/test/bin/foo)" = "foo-1.0" +test "$("$profiles"/test/bin/foo)" = "foo-1.0" # Test nix-env -qc to compare installed against available packages, and vice versa. nix-env -qc '*' | grepQuiet '< 2.0' @@ -55,6 +59,7 @@ nix-env -qas | grepQuiet -- '--- bar-0.1' # Disable foo. nix-env --set-flag active false foo +# shellcheck disable=SC2235 (! [ -e "$profiles/test/bin/foo" ]) # Enable foo. @@ -72,7 +77,7 @@ nix-env -i foo-2.0pre1 # Query installed: should contain foo-2.0pre1 now. test "$(nix-env -q '*' | wc -l)" -eq 1 nix-env -q '*' | grepQuiet foo-2.0pre1 -test "$($profiles/test/bin/foo)" = "foo-2.0pre1" +test "$("$profiles"/test/bin/foo)" = "foo-2.0pre1" # Upgrade "foo": should install foo-2.0. NIX_PATH=nixpkgs=./user-envs.nix:${NIX_PATH-} nix-env -f '' -u foo @@ -80,7 +85,7 @@ NIX_PATH=nixpkgs=./user-envs.nix:${NIX_PATH-} nix-env -f '' -u foo # Query installed: should contain foo-2.0 now. test "$(nix-env -q '*' | wc -l)" -eq 1 nix-env -q '*' | grepQuiet foo-2.0 -test "$($profiles/test/bin/foo)" = "foo-2.0" +test "$("$profiles"/test/bin/foo)" = "foo-2.0" # Store the path of foo-2.0. outPath20=$(nix-env -q --out-path --no-name '*' | grep foo-2.0) @@ -95,9 +100,9 @@ if nix-env -q '*' | grepQuiet foo; then false; fi nix-env -q '*' | grepQuiet bar # Rollback: should bring "foo" back. -oldGen="$(nix-store -q --resolve $profiles/test)" +oldGen="$(nix-store -q --resolve "$profiles"/test)" nix-env --rollback -[ "$(nix-store -q --resolve $profiles/test)" != "$oldGen" ] +[ "$(nix-store -q --resolve "$profiles"/test)" != "$oldGen" ] nix-env -q '*' | grepQuiet foo-2.0 nix-env -q '*' | grepQuiet bar @@ -122,23 +127,23 @@ test "$(nix-env --list-generations | wc -l)" -eq 8 # Switch to a specified generation. nix-env --switch-generation 7 -[ "$(nix-store -q --resolve $profiles/test)" = "$oldGen" ] +[ "$(nix-store -q --resolve "$profiles"/test)" = "$oldGen" ] # Install foo-1.0, now using its store path. nix-env -i "$outPath10" nix-env -q '*' | grepQuiet foo-1.0 -nix-store -qR $profiles/test | grep "$outPath10" -nix-store -q --referrers-closure $profiles/test | grep "$(nix-store -q --resolve $profiles/test)" -[ "$(nix-store -q --deriver "$outPath10")" = $drvPath10 ] +nix-store -qR "$profiles"/test | grep "$outPath10" +nix-store -q --referrers-closure "$profiles"/test | grep "$(nix-store -q --resolve "$profiles"/test)" +[ "$(nix-store -q --deriver "$outPath10")" = "$drvPath10" ] # Uninstall foo-1.0, using a symlink to its store path. -ln -sfn $outPath10/bin/foo $TEST_ROOT/symlink -nix-env -e $TEST_ROOT/symlink +ln -sfn "$outPath10"/bin/foo "$TEST_ROOT"/symlink +nix-env -e "$TEST_ROOT"/symlink if nix-env -q '*' | grepQuiet foo; then false; fi -nix-store -qR $profiles/test | grepInverse "$outPath10" +nix-store -qR "$profiles"/test | grepInverse "$outPath10" # Install foo-1.0, now using a symlink to its store path. -nix-env -i $TEST_ROOT/symlink +nix-env -i "$TEST_ROOT"/symlink nix-env -q '*' | grepQuiet foo # Delete all old generations. @@ -148,6 +153,7 @@ nix-env --delete-generations old # foo-1.0. nix-collect-garbage test -e "$outPath10" +# shellcheck disable=SC2235 (! [ -e "$outPath20" ]) # Uninstall everything @@ -156,7 +162,7 @@ test "$(nix-env -q '*' | wc -l)" -eq 0 # Installing "foo" should only install the newest foo. nix-env -i foo -test "$(nix-env -q '*' | grep foo- | wc -l)" -eq 1 +test "$(nix-env -q '*' | grep foo- -c)" -eq 1 nix-env -q '*' | grepQuiet foo-2.0 # On the other hand, this should install both (and should fail due to @@ -177,25 +183,25 @@ nix-env -q '*' | grepQuiet bar-0.1.1 # declared priorities. nix-env -e '*' nix-env -i foo-0.1 foo-1.0 -[ "$($profiles/test/bin/foo)" = "foo-1.0" ] +[ "$("$profiles"/test/bin/foo)" = "foo-1.0" ] nix-env --set-flag priority 1 foo-0.1 -[ "$($profiles/test/bin/foo)" = "foo-0.1" ] +[ "$("$profiles"/test/bin/foo)" = "foo-0.1" ] # Priorities can be overridden with the --priority flag nix-env -e '*' nix-env -i foo-1.0 -[ "$($profiles/test/bin/foo)" = "foo-1.0" ] +[ "$("$profiles"/test/bin/foo)" = "foo-1.0" ] nix-env -i --priority 1 foo-0.1 -[ "$($profiles/test/bin/foo)" = "foo-0.1" ] +[ "$("$profiles"/test/bin/foo)" = "foo-0.1" ] # Test nix-env --set. -nix-env --set $outPath10 -[ "$(nix-store -q --resolve $profiles/test)" = $outPath10 ] -nix-env --set $drvPath10 -[ "$(nix-store -q --resolve $profiles/test)" = $outPath10 ] +nix-env --set "$outPath10" +[ "$(nix-store -q --resolve "$profiles"/test)" = "$outPath10" ] +nix-env --set "$drvPath10" +[ "$(nix-store -q --resolve "$profiles"/test)" = "$outPath10" ] # Test the case where $HOME contains a symlink. -mkdir -p $TEST_ROOT/real-home/alice/.nix-defexpr/channels -ln -sfn $TEST_ROOT/real-home $TEST_ROOT/home -ln -sfn $(pwd)/user-envs.nix $TEST_ROOT/home/alice/.nix-defexpr/channels/foo +mkdir -p "$TEST_ROOT"/real-home/alice/.nix-defexpr/channels +ln -sfn "$TEST_ROOT"/real-home "$TEST_ROOT"/home +ln -sfn "$(pwd)"/user-envs.nix "$TEST_ROOT"/home/alice/.nix-defexpr/channels/foo HOME=$TEST_ROOT/home/alice nix-env -i foo-0.1 diff --git a/tests/functional/user-envs.builder.sh b/tests/functional/user-envs.builder.sh index 5fafa797f..e875c2fe5 100644 --- a/tests/functional/user-envs.builder.sh +++ b/tests/functional/user-envs.builder.sh @@ -1,5 +1,8 @@ -mkdir $out -mkdir $out/bin -echo "#! $shell" > $out/bin/$progName -echo "echo $name" >> $out/bin/$progName -chmod +x $out/bin/$progName +# shellcheck shell=bash +# shellcheck disable=SC2154 +mkdir "$out" +mkdir "$out"/bin +echo "#! $shell" > "$out"/bin/"$progName" +# shellcheck disable=SC2154 +echo "echo $name" >> "$out"/bin/"$progName" +chmod +x "$out"/bin/"$progName" diff --git a/tests/functional/why-depends.sh b/tests/functional/why-depends.sh index 45d1f2f0b..fe9ff9a62 100755 --- a/tests/functional/why-depends.sh +++ b/tests/functional/why-depends.sh @@ -4,9 +4,9 @@ source common.sh clearStoreIfPossible -cp ./dependencies.nix ./dependencies.builder0.sh "${config_nix}" $TEST_HOME +cp ./dependencies.nix ./dependencies.builder0.sh "${config_nix}" "$TEST_HOME" -cd $TEST_HOME +cd "$TEST_HOME" nix why-depends --derivation --file ./dependencies.nix input2_drv input1_drv nix why-depends --file ./dependencies.nix input2_drv input1_drv diff --git a/tests/nixos/content-encoding.nix b/tests/nixos/content-encoding.nix new file mode 100644 index 000000000..debee377b --- /dev/null +++ b/tests/nixos/content-encoding.nix @@ -0,0 +1,190 @@ +# Test content encoding support in Nix: +# 1. Fetching compressed files from servers with Content-Encoding headers +# (e.g., fetching a zstd archive from a server using gzip Content-Encoding +# should preserve the zstd format, not double-decompress) +# 2. HTTP binary cache store upload/download with compression support + +{ lib, config, ... }: + +let + pkgs = config.nodes.machine.nixpkgs.pkgs; + + ztdCompressedFile = pkgs.stdenv.mkDerivation { + name = "dummy-zstd-compressed-archive"; + dontUnpack = true; + nativeBuildInputs = with pkgs; [ zstd ]; + buildPhase = '' + mkdir archive + for _ in {1..100}; do echo "lorem" > archive/file1; done + for _ in {1..100}; do echo "ipsum" > archive/file2; done + tar --zstd -cf archive.tar.zst archive + ''; + installPhase = '' + install -Dm 644 -T archive.tar.zst $out/share/archive + ''; + }; + + # Bare derivation for testing binary cache with logs + testDrv = builtins.toFile "test.nix" '' + derivation { + name = "test-package"; + builder = "/bin/sh"; + args = [ "-c" "echo 'Building test package...' >&2; echo 'hello from test package' > $out; echo 'Build complete!' >&2" ]; + system = builtins.currentSystem; + } + ''; +in + +{ + name = "content-encoding"; + + nodes = { + machine = + { pkgs, ... }: + { + networking.firewall.allowedTCPPorts = [ 80 ]; + + services.nginx.enable = true; + services.nginx.virtualHosts."localhost" = { + root = "${ztdCompressedFile}/share/"; + # Make sure that nginx really tries to compress the + # file on the fly with no regard to size/mime. + # http://nginx.org/en/docs/http/ngx_http_gzip_module.html + extraConfig = '' + gzip on; + gzip_types *; + gzip_proxied any; + gzip_min_length 0; + ''; + + # Upload endpoint with WebDAV + locations."/cache-upload" = { + root = "/var/lib/nginx-cache"; + extraConfig = '' + client_body_temp_path /var/lib/nginx-cache/tmp; + create_full_put_path on; + dav_methods PUT DELETE; + dav_access user:rw group:rw all:r; + + # Don't try to compress already compressed files + gzip off; + + # Rewrite to remove -upload suffix when writing files + rewrite ^/cache-upload/(.*)$ /cache/$1 break; + ''; + }; + + # Download endpoint with Content-Encoding headers + locations."/cache" = { + root = "/var/lib/nginx-cache"; + extraConfig = '' + gzip off; + + # Serve .narinfo files with gzip encoding + location ~ \.narinfo$ { + add_header Content-Encoding gzip; + default_type "text/x-nix-narinfo"; + } + + # Serve .ls files with gzip encoding + location ~ \.ls$ { + add_header Content-Encoding gzip; + default_type "application/json"; + } + + # Serve log files with brotli encoding + location ~ ^/cache/log/ { + add_header Content-Encoding br; + default_type "text/plain"; + } + ''; + }; + }; + + systemd.services.nginx = { + serviceConfig = { + StateDirectory = "nginx-cache"; + StateDirectoryMode = "0755"; + }; + }; + + environment.systemPackages = with pkgs; [ + file + gzip + brotli + curl + ]; + + virtualisation.writableStore = true; + nix.settings.substituters = lib.mkForce [ ]; + nix.settings.experimental-features = [ + "nix-command" + "flakes" + ]; + }; + }; + + # Check that when nix-prefetch-url is used with a zst tarball it does not get decompressed. + # Also test HTTP binary cache store with compression support. + testScript = '' + # fmt: off + start_all() + + machine.wait_for_unit("nginx.service") + + # Original test: zstd archive with gzip content-encoding + # Make sure that the file is properly compressed as the test would be meaningless otherwise + curl_output = machine.succeed("curl --compressed -v http://localhost/archive 2>&1") + assert "content-encoding: gzip" in curl_output.lower(), f"Expected 'content-encoding: gzip' in curl output, but got: {curl_output}" + + archive_path = machine.succeed("nix-prefetch-url http://localhost/archive --print-path | tail -n1").strip() + mime_type = machine.succeed(f"file --brief --mime-type {archive_path}").strip() + assert mime_type == "application/zstd", f"Expected archive to be 'application/zstd', but got: {mime_type}" + machine.succeed(f"tar --zstd -xf {archive_path}") + + # Test HTTP binary cache store with compression + outPath = machine.succeed(""" + nix build --store /var/lib/build-store -f ${testDrv} --print-out-paths --print-build-logs + """).strip() + + drvPath = machine.succeed(f""" + nix path-info --store /var/lib/build-store --derivation {outPath} + """).strip() + + # Upload to cache with compression (use cache-upload endpoint) + machine.succeed(f""" + nix copy --store /var/lib/build-store --to 'http://localhost/cache-upload?narinfo-compression=gzip&ls-compression=gzip&write-nar-listing=1' {outPath} -vvvvv 2>&1 | tail -100 + """) + machine.succeed(f""" + nix store copy-log --store /var/lib/build-store --to 'http://localhost/cache-upload?log-compression=br' {drvPath} -vvvvv 2>&1 | tail -100 + """) + + # List cache contents + print(machine.succeed("find /var/lib/nginx-cache -type f")) + + narinfoHash = outPath.split('/')[3].split('-')[0] + drvName = drvPath.split('/')[3] + + # Verify compression + machine.succeed(f"gzip -t /var/lib/nginx-cache/cache/{narinfoHash}.narinfo") + machine.succeed(f"gzip -t /var/lib/nginx-cache/cache/{narinfoHash}.ls") + machine.succeed(f"brotli -t /var/lib/nginx-cache/cache/log/{drvName}") + + # Check Content-Encoding headers on the download endpoint + narinfo_headers = machine.succeed(f"curl -I http://localhost/cache/{narinfoHash}.narinfo 2>&1") + assert "content-encoding: gzip" in narinfo_headers.lower(), f"Expected 'content-encoding: gzip' for .narinfo file, but headers were: {narinfo_headers}" + + ls_headers = machine.succeed(f"curl -I http://localhost/cache/{narinfoHash}.ls 2>&1") + assert "content-encoding: gzip" in ls_headers.lower(), f"Expected 'content-encoding: gzip' for .ls file, but headers were: {ls_headers}" + + log_headers = machine.succeed(f"curl -I http://localhost/cache/log/{drvName} 2>&1") + assert "content-encoding: br" in log_headers.lower(), f"Expected 'content-encoding: br' for log file, but headers were: {log_headers}" + + # Test fetching from cache + machine.succeed(f"nix copy --from 'http://localhost/cache' --no-check-sigs {outPath}") + + # Test log retrieval + log_output = machine.succeed(f"nix log --store 'http://localhost/cache' {drvPath} 2>&1") + assert "Building test package" in log_output, f"Expected 'Building test package' in log output, but got: {log_output}" + ''; +} diff --git a/tests/nixos/default.nix b/tests/nixos/default.nix index 2031e02a4..edfa4124f 100644 --- a/tests/nixos/default.nix +++ b/tests/nixos/default.nix @@ -187,7 +187,7 @@ in ca-fd-leak = runNixOSTest ./ca-fd-leak; - gzip-content-encoding = runNixOSTest ./gzip-content-encoding.nix; + content-encoding = runNixOSTest ./content-encoding.nix; functional_user = runNixOSTest ./functional/as-user.nix; @@ -207,5 +207,7 @@ in fetchurl = runNixOSTest ./fetchurl.nix; + fetchersSubstitute = runNixOSTest ./fetchers-substitute.nix; + chrootStore = runNixOSTest ./chroot-store.nix; } diff --git a/tests/nixos/fetchers-substitute.nix b/tests/nixos/fetchers-substitute.nix new file mode 100644 index 000000000..453982677 --- /dev/null +++ b/tests/nixos/fetchers-substitute.nix @@ -0,0 +1,176 @@ +{ + name = "fetchers-substitute"; + + nodes.substituter = + { pkgs, ... }: + { + virtualisation.writableStore = true; + + nix.settings.extra-experimental-features = [ + "nix-command" + "fetch-tree" + ]; + + networking.firewall.allowedTCPPorts = [ 5000 ]; + + services.nix-serve = { + enable = true; + secretKeyFile = + let + key = pkgs.writeTextFile { + name = "secret-key"; + text = '' + substituter:SerxxAca5NEsYY0DwVo+subokk+OoHcD9m6JwuctzHgSQVfGHe6nCc+NReDjV3QdFYPMGix4FMg0+K/TM1B3aA== + ''; + }; + in + "${key}"; + }; + }; + + nodes.importer = + { lib, ... }: + { + virtualisation.writableStore = true; + + nix.settings = { + extra-experimental-features = [ + "nix-command" + "fetch-tree" + ]; + substituters = lib.mkForce [ "http://substituter:5000" ]; + trusted-public-keys = lib.mkForce [ "substituter:EkFXxh3upwnPjUXg41d0HRWDzBoseBTINPiv0zNQd2g=" ]; + }; + }; + + testScript = + { nodes }: # python + '' + import json + + start_all() + + substituter.wait_for_unit("multi-user.target") + + ########################################## + # Test 1: builtins.fetchurl with substitution + ########################################## + + missing_file = "/only-on-substituter.txt" + + substituter.succeed(f"echo 'this should only exist on the substituter' > {missing_file}") + + file_hash = substituter.succeed(f"nix hash file {missing_file}").strip() + + file_store_path_json = substituter.succeed(f""" + nix-instantiate --eval --json --read-write-mode --expr ' + builtins.fetchurl {{ + url = "file://{missing_file}"; + sha256 = "{file_hash}"; + }} + ' + """) + + file_store_path = json.loads(file_store_path_json) + + substituter.succeed(f"nix store sign --key-file ${nodes.substituter.services.nix-serve.secretKeyFile} {file_store_path}") + + importer.wait_for_unit("multi-user.target") + + print("Testing fetchurl with substitution...") + importer.succeed(f""" + nix-instantiate -vvvvv --eval --json --read-write-mode --expr ' + builtins.fetchurl {{ + url = "file://{missing_file}"; + sha256 = "{file_hash}"; + }} + ' + """) + print("✓ fetchurl substitution works!") + + ########################################## + # Test 2: builtins.fetchTarball with substitution + ########################################## + + missing_tarball = "/only-on-substituter.tar.gz" + + # Create a directory with some content + substituter.succeed(""" + mkdir -p /tmp/test-tarball + echo 'Hello from tarball!' > /tmp/test-tarball/hello.txt + echo 'Another file' > /tmp/test-tarball/file2.txt + """) + + # Create a tarball + substituter.succeed(f"tar czf {missing_tarball} -C /tmp test-tarball") + + # For fetchTarball, we need to first fetch it without hash to get the store path, + # then compute the NAR hash of that path + tarball_store_path_json = substituter.succeed(f""" + nix-instantiate --eval --json --read-write-mode --expr ' + builtins.fetchTarball {{ + url = "file://{missing_tarball}"; + }} + ' + """) + + tarball_store_path = json.loads(tarball_store_path_json) + + # Get the NAR hash of the unpacked tarball in SRI format + path_info_json = substituter.succeed(f"nix path-info --json {tarball_store_path}").strip() + path_info_dict = json.loads(path_info_json) + # nix path-info returns a dict with store paths as keys + tarball_hash_sri = path_info_dict[tarball_store_path]["narHash"] + print(f"Tarball NAR hash (SRI): {tarball_hash_sri}") + + # Also get the old format hash for fetchTarball (which uses sha256 parameter) + tarball_hash = substituter.succeed(f"nix-store --query --hash {tarball_store_path}").strip() + + # Sign the tarball's store path + substituter.succeed(f"nix store sign --recursive --key-file ${nodes.substituter.services.nix-serve.secretKeyFile} {tarball_store_path}") + + # Now try to fetch the same tarball on the importer + # The file doesn't exist locally, so it should be substituted + print("Testing fetchTarball with substitution...") + result = importer.succeed(f""" + nix-instantiate -vvvvv --eval --json --read-write-mode --expr ' + builtins.fetchTarball {{ + url = "file://{missing_tarball}"; + sha256 = "{tarball_hash}"; + }} + ' + """) + + result_path = json.loads(result) + print(f"✓ fetchTarball substitution works! Result: {result_path}") + + # Verify the content is correct + # fetchTarball strips the top-level directory if there's only one + content = importer.succeed(f"cat {result_path}/hello.txt").strip() + assert content == "Hello from tarball!", f"Content mismatch: {content}" + print("✓ fetchTarball content verified!") + + ########################################## + # Test 3: Verify fetchTree does NOT substitute (preserves metadata) + ########################################## + + print("Testing that fetchTree without __final does NOT use substitution...") + + # fetchTree with just narHash (not __final) should try to download, which will fail + # since the file doesn't exist on the importer + exit_code = importer.fail(f""" + nix-instantiate --eval --json --read-write-mode --expr ' + builtins.fetchTree {{ + type = "tarball"; + url = "file:///only-on-substituter.tar.gz"; + narHash = "{tarball_hash_sri}"; + }} + ' 2>&1 + """) + + # Should fail with "does not exist" since it tries to download instead of substituting + assert "does not exist" in exit_code or "Couldn't open file" in exit_code, f"Expected download failure, got: {exit_code}" + print("✓ fetchTree correctly does NOT substitute non-final inputs!") + print(" (This preserves metadata like lastModified from the actual fetch)") + ''; +} diff --git a/tests/nixos/gzip-content-encoding.nix b/tests/nixos/gzip-content-encoding.nix deleted file mode 100644 index 22d196c61..000000000 --- a/tests/nixos/gzip-content-encoding.nix +++ /dev/null @@ -1,74 +0,0 @@ -# Test that compressed files fetched from server with compressed responses -# do not get excessively decompressed. -# E.g. fetching a zstd compressed tarball from a server, -# which compresses the response with `Content-Encoding: gzip`. -# The expected result is that the fetched file is a zstd archive. - -{ lib, config, ... }: - -let - pkgs = config.nodes.machine.nixpkgs.pkgs; - - ztdCompressedFile = pkgs.stdenv.mkDerivation { - name = "dummy-zstd-compressed-archive"; - dontUnpack = true; - nativeBuildInputs = with pkgs; [ zstd ]; - buildPhase = '' - mkdir archive - for _ in {1..100}; do echo "lorem" > archive/file1; done - for _ in {1..100}; do echo "ipsum" > archive/file2; done - tar --zstd -cf archive.tar.zst archive - ''; - installPhase = '' - install -Dm 644 -T archive.tar.zst $out/share/archive - ''; - }; - - fileCmd = "${pkgs.file}/bin/file"; -in - -{ - name = "gzip-content-encoding"; - - nodes = { - machine = - { config, pkgs, ... }: - { - networking.firewall.allowedTCPPorts = [ 80 ]; - - services.nginx.enable = true; - services.nginx.virtualHosts."localhost" = { - root = "${ztdCompressedFile}/share/"; - # Make sure that nginx really tries to compress the - # file on the fly with no regard to size/mime. - # http://nginx.org/en/docs/http/ngx_http_gzip_module.html - extraConfig = '' - gzip on; - gzip_types *; - gzip_proxied any; - gzip_min_length 0; - ''; - }; - virtualisation.writableStore = true; - virtualisation.additionalPaths = with pkgs; [ file ]; - nix.settings.substituters = lib.mkForce [ ]; - }; - }; - - # Check that when nix-prefetch-url is used with a zst tarball it does not get decompressed. - testScript = - { nodes }: - '' - # fmt: off - start_all() - - machine.wait_for_unit("nginx.service") - machine.succeed(""" - # Make sure that the file is properly compressed as the test would be meaningless otherwise - curl --compressed -v http://localhost/archive |& tr -s ' ' |& grep --ignore-case 'content-encoding: gzip' - archive_path=$(nix-prefetch-url http://localhost/archive --print-path | tail -n1) - [[ $(${fileCmd} --brief --mime-type $archive_path) == "application/zstd" ]] - tar --zstd -xf $archive_path - """) - ''; -} diff --git a/tests/nixos/s3-binary-cache-store.nix b/tests/nixos/s3-binary-cache-store.nix index a22e4c2c2..a07375489 100644 --- a/tests/nixos/s3-binary-cache-store.nix +++ b/tests/nixos/s3-binary-cache-store.nix @@ -1,25 +1,23 @@ { - lib, config, - nixpkgs, ... }: let pkgs = config.nodes.client.nixpkgs.pkgs; - pkgA = pkgs.cowsay; + # Test packages - minimal packages for fast copying + pkgA = pkgs.writeText "test-package-a" "test package a"; + pkgB = pkgs.writeText "test-package-b" "test package b"; + pkgC = pkgs.writeText "test-package-c" "test package c"; + # S3 configuration accessKey = "BKIKJAA5BMMU2RHO6IBB"; secretKey = "V7f1CwQqAcwo80UEIJEjc5gVQUSSx5ohQ9GSrr12"; - env = "AWS_ACCESS_KEY_ID=${accessKey} AWS_SECRET_ACCESS_KEY=${secretKey}"; - - storeUrl = "s3://my-cache?endpoint=http://server:9000®ion=eu-west-1"; - objectThatDoesNotExist = "s3://my-cache/foo-that-does-not-exist?endpoint=http://server:9000®ion=eu-west-1"; in { - name = "s3-binary-cache-store"; + name = "curl-s3-binary-cache-store"; nodes = { server = @@ -31,8 +29,15 @@ in }: { virtualisation.writableStore = true; - virtualisation.additionalPaths = [ pkgA ]; + virtualisation.cores = 2; + virtualisation.additionalPaths = [ + pkgA + pkgB + pkgC + pkgs.coreutils + ]; environment.systemPackages = [ pkgs.minio-client ]; + nix.nixPath = [ "nixpkgs=${pkgs.path}" ]; nix.extraOptions = '' experimental-features = nix-command substituters = @@ -52,6 +57,7 @@ in { config, pkgs, ... }: { virtualisation.writableStore = true; + virtualisation.cores = 2; nix.extraOptions = '' experimental-features = nix-command substituters = @@ -61,38 +67,740 @@ in testScript = { nodes }: + # python '' - # fmt: off + import json + import random + import re + import uuid + + # ============================================================================ + # Configuration + # ============================================================================ + + ACCESS_KEY = '${accessKey}' + SECRET_KEY = '${secretKey}' + ENDPOINT = 'http://server:9000' + REGION = 'eu-west-1' + + PKGS = { + 'A': '${pkgA}', + 'B': '${pkgB}', + 'C': '${pkgC}', + } + + ENV_WITH_CREDS = f"AWS_ACCESS_KEY_ID={ACCESS_KEY} AWS_SECRET_ACCESS_KEY={SECRET_KEY}" + + # ============================================================================ + # Helper Functions + # ============================================================================ + + def make_s3_url(bucket, path="", **params): + """Build S3 URL with optional path and query parameters""" + params.setdefault('endpoint', ENDPOINT) + params.setdefault('region', REGION) + query = '&'.join(f"{k}={v}" for k, v in params.items()) + bucket_and_path = f"{bucket}{path}" if path else bucket + return f"s3://{bucket_and_path}?{query}" + + def get_package_hash(pkg_path): + """Extract store hash from package path""" + return pkg_path.split("/")[-1].split("-")[0] + + def verify_content_encoding(machine, bucket, object_path, expected_encoding): + """Verify S3 object has expected Content-Encoding header""" + stat = machine.succeed(f"mc stat minio/{bucket}/{object_path}") + if "Content-Encoding" not in stat or expected_encoding not in stat: + print(f"mc stat output for {object_path}:") + print(stat) + raise Exception(f"Expected Content-Encoding: {expected_encoding} header on {object_path}") + + def verify_no_compression(machine, bucket, object_path): + """Verify S3 object has no compression headers""" + stat = machine.succeed(f"mc stat minio/{bucket}/{object_path}") + if "Content-Encoding" in stat and ("gzip" in stat or "xz" in stat): + print(f"mc stat output for {object_path}:") + print(stat) + raise Exception(f"Object {object_path} should not have compression Content-Encoding") + + def assert_count(output, pattern, expected, error_msg): + """Assert that pattern appears exactly expected times in output""" + actual = output.count(pattern) + if actual != expected: + print("Debug output:") + print(output) + raise Exception(f"{error_msg}: expected {expected}, got {actual}") + + def verify_packages_in_store(machine, pkg_paths, should_exist=True): + """ + Verify whether packages exist in the store. + + Args: + machine: The machine to check on + pkg_paths: List of package paths to check (or single path) + should_exist: If True, verify packages exist; if False, verify they don't + """ + paths = [pkg_paths] if isinstance(pkg_paths, str) else pkg_paths + for pkg in paths: + if should_exist: + machine.succeed(f"nix path-info {pkg}") + else: + machine.fail(f"nix path-info {pkg}") + + def setup_s3(populate_bucket=[], public=False, versioned=False): + """ + Decorator that creates/destroys a unique bucket for each test. + Optionally pre-populates bucket with specified packages. + Cleans up client store after test completion. + + Args: + populate_bucket: List of packages to upload before test runs + public: If True, make the bucket publicly accessible + versioned: If True, enable versioning on the bucket before populating + """ + def decorator(test_func): + def wrapper(): + bucket = str(uuid.uuid4()) + server.succeed(f"mc mb minio/{bucket}") + try: + if public: + server.succeed(f"mc anonymous set download minio/{bucket}") + if versioned: + server.succeed(f"mc version enable minio/{bucket}") + if populate_bucket: + store_url = make_s3_url(bucket) + for pkg in populate_bucket: + server.succeed(f"{ENV_WITH_CREDS} nix copy --to '{store_url}' {pkg}") + test_func(bucket) + finally: + server.succeed(f"mc rb --force minio/{bucket}") + # Clean up client store - only delete if path exists + for pkg in PKGS.values(): + client.succeed(f"[ ! -e {pkg} ] || nix store delete --ignore-liveness {pkg}") + return wrapper + return decorator + + # ============================================================================ + # Test Functions + # ============================================================================ + + @setup_s3() + def test_credential_caching(bucket): + """Verify credential providers are cached and reused""" + print("\n=== Testing Credential Caching ===") + + store_url = make_s3_url(bucket) + output = server.succeed( + f"{ENV_WITH_CREDS} nix copy --debug --to '{store_url}' " + f"{PKGS['A']} {PKGS['B']} {PKGS['C']} 2>&1" + ) + + assert_count( + output, + "creating new AWS credential provider", + 1, + "Credential provider caching failed" + ) + + print("✓ Credential provider created once and cached") + + @setup_s3(populate_bucket=[PKGS['A']]) + def test_fetchurl_basic(bucket): + """Test builtins.fetchurl works with s3:// URLs""" + print("\n=== Testing builtins.fetchurl ===") + + client.wait_for_unit("network-addresses-eth1.service") + + cache_info_url = make_s3_url(bucket, path="/nix-cache-info") + + client.succeed( + f"{ENV_WITH_CREDS} nix eval --impure --expr " + f"'builtins.fetchurl {{ name = \"foo\"; url = \"{cache_info_url}\"; }}'" + ) + + print("✓ builtins.fetchurl works with s3:// URLs") + + @setup_s3() + def test_error_message_formatting(bucket): + """Verify error messages display URLs correctly""" + print("\n=== Testing Error Message Formatting ===") + + nonexistent_url = make_s3_url(bucket, path="/foo-that-does-not-exist") + expected_http_url = f"{ENDPOINT}/{bucket}/foo-that-does-not-exist" + + error_msg = client.fail( + f"{ENV_WITH_CREDS} nix eval --impure --expr " + f"'builtins.fetchurl {{ name = \"foo\"; url = \"{nonexistent_url}\"; }}' 2>&1" + ) + + if f"unable to download '{expected_http_url}': HTTP error 404" not in error_msg: + print("Actual error message:") + print(error_msg) + raise Exception("Error message formatting failed - should show actual URL, not %s placeholder") + + print("✓ Error messages format URLs correctly") + + @setup_s3(populate_bucket=[PKGS['A']]) + def test_fork_credential_preresolution(bucket): + """Test credential pre-resolution in forked processes""" + print("\n=== Testing Fork Credential Pre-resolution ===") + + # Get hash of nix-cache-info for fixed-output derivation + cache_info_url = make_s3_url(bucket, path="/nix-cache-info") + + cache_info_path = client.succeed( + f"{ENV_WITH_CREDS} nix eval --impure --raw --expr " + f"'builtins.fetchurl {{ name = \"nix-cache-info\"; url = \"{cache_info_url}\"; }}'" + ).strip() + + cache_info_hash = client.succeed( + f"nix hash file --type sha256 --base32 {cache_info_path}" + ).strip() + + # Build derivation with unique test ID + test_id = random.randint(0, 10000) + test_url = make_s3_url(bucket, path="/nix-cache-info", test_id=test_id) + + fetchurl_expr = """ + import {{ + name = "s3-fork-test-{id}"; + url = "{url}"; + sha256 = "{hash}"; + }} + """.format(id=test_id, url=test_url, hash=cache_info_hash) + + output = client.succeed( + f"{ENV_WITH_CREDS} nix build --debug --impure --no-link --expr '{fetchurl_expr}' 2>&1" + ) + + # Verify fork behavior + if "builtin:fetchurl creating fresh FileTransfer instance" not in output: + print("Debug output:") + print(output) + raise Exception("Expected to find FileTransfer creation in forked process") + + print(" ✓ Forked process creates fresh FileTransfer") + + # Verify pre-resolution in parent + required_messages = [ + "Pre-resolving AWS credentials for S3 URL in builtin:fetchurl", + "Successfully pre-resolved AWS credentials in parent process", + ] + + for msg in required_messages: + if msg not in output: + print("Debug output:") + print(output) + raise Exception(f"Missing expected message: {msg}") + + print(" ✓ Parent pre-resolves credentials") + + # Verify child uses pre-resolved credentials + if "Using pre-resolved AWS credentials from parent process" not in output: + print("Debug output:") + print(output) + raise Exception("Child should use pre-resolved credentials") + + # Extract child PID and verify it doesn't create new providers + filetransfer_match = re.search( + r'\[pid=(\d+)\] builtin:fetchurl creating fresh FileTransfer instance', + output + ) + + if not filetransfer_match: + raise Exception("Could not extract child PID from debug output") + + child_pid = filetransfer_match.group(1) + child_provider_creation = f"[pid={child_pid}] creating new AWS credential provider" + + if child_provider_creation in output: + print("Debug output:") + print(output) + raise Exception(f"Child process (pid={child_pid}) should NOT create new credential providers") + + print(" ✓ Child uses pre-resolved credentials (no new providers)") + + @setup_s3(populate_bucket=[PKGS['A'], PKGS['B'], PKGS['C']]) + def test_store_operations(bucket): + """Test nix store info and copy operations""" + print("\n=== Testing Store Operations ===") + + store_url = make_s3_url(bucket) + + # Verify store info works + client.succeed(f"{ENV_WITH_CREDS} nix store info --store '{store_url}' >&2") + + # Get and validate store info JSON + info_json = client.succeed(f"{ENV_WITH_CREDS} nix store info --json --store '{store_url}'") + store_info = json.loads(info_json) + + if not store_info.get("url"): + raise Exception("Store should have a URL") + + print(f" ✓ Store URL: {store_info['url']}") + + # Test copy from store + verify_packages_in_store(client, PKGS['A'], should_exist=False) + + output = client.succeed( + f"{ENV_WITH_CREDS} nix copy --debug --no-check-sigs " + f"--from '{store_url}' {PKGS['A']} {PKGS['B']} {PKGS['C']} 2>&1" + ) + + assert_count( + output, + "creating new AWS credential provider", + 1, + "Client credential provider caching failed" + ) + + verify_packages_in_store(client, [PKGS['A'], PKGS['B'], PKGS['C']]) + + print(" ✓ nix copy works") + print(" ✓ Credentials cached on client") + + @setup_s3(populate_bucket=[PKGS['A'], PKGS['B']], public=True) + def test_public_bucket_operations(bucket): + """Test store operations on public bucket without credentials""" + print("\n=== Testing Public Bucket Operations ===") + + store_url = make_s3_url(bucket) + + # Verify store info works without credentials + client.succeed(f"nix store info --store '{store_url}' >&2") + print(" ✓ nix store info works without credentials") + + # Get and validate store info JSON + info_json = client.succeed(f"nix store info --json --store '{store_url}'") + store_info = json.loads(info_json) + + if not store_info.get("url"): + raise Exception("Store should have a URL") + + print(f" ✓ Store URL: {store_info['url']}") + + # Verify packages are not yet in client store + verify_packages_in_store(client, [PKGS['A'], PKGS['B']], should_exist=False) + + # Test copy from public bucket without credentials + client.succeed( + f"nix copy --debug --no-check-sigs " + f"--from '{store_url}' {PKGS['A']} {PKGS['B']} 2>&1" + ) + + # Verify packages were copied successfully + verify_packages_in_store(client, [PKGS['A'], PKGS['B']]) + + print(" ✓ nix copy from public bucket works without credentials") + + @setup_s3(populate_bucket=[PKGS['A']]) + def test_url_format_variations(bucket): + """Test different S3 URL parameter combinations""" + print("\n=== Testing URL Format Variations ===") + + # Test parameter order variation (region before endpoint) + url1 = f"s3://{bucket}?region={REGION}&endpoint={ENDPOINT}" + client.succeed(f"{ENV_WITH_CREDS} nix store info --store '{url1}' >&2") + print(" ✓ Parameter order: region before endpoint works") + + # Test parameter order variation (endpoint before region) + url2 = f"s3://{bucket}?endpoint={ENDPOINT}®ion={REGION}" + client.succeed(f"{ENV_WITH_CREDS} nix store info --store '{url2}' >&2") + print(" ✓ Parameter order: endpoint before region works") + + @setup_s3(populate_bucket=[PKGS['A']]) + def test_concurrent_fetches(bucket): + """Validate thread safety with concurrent S3 operations""" + print("\n=== Testing Concurrent Fetches ===") + + # Get hash for test derivations + cache_info_url = make_s3_url(bucket, path="/nix-cache-info") + + cache_info_path = client.succeed( + f"{ENV_WITH_CREDS} nix eval --impure --raw --expr " + f"'builtins.fetchurl {{ name = \"nix-cache-info\"; url = \"{cache_info_url}\"; }}'" + ).strip() + + cache_info_hash = client.succeed( + f"nix hash file --type sha256 --base32 {cache_info_path}" + ).strip() + + # Create 5 concurrent fetch derivations + # Build base URL for concurrent test (we'll add fetch_id in Nix interpolation) + base_url = make_s3_url(bucket, path="/nix-cache-info") + concurrent_expr = """ + let + mkFetch = i: import {{ + name = "concurrent-s3-fetch-''${{toString i}}"; + url = "{url}&fetch_id=''${{toString i}}"; + sha256 = "{hash}"; + }}; + fetches = builtins.listToAttrs (map (i: {{ + name = "fetch''${{toString i}}"; + value = mkFetch i; + }}) (builtins.genList (i: i) 5)); + in fetches + """.format(url=base_url, hash=cache_info_hash) + + try: + output = client.succeed( + f"{ENV_WITH_CREDS} nix build --debug --impure --no-link " + f"--expr '{concurrent_expr}' --max-jobs 5 2>&1" + ) + except: + output = client.fail( + f"{ENV_WITH_CREDS} nix build --debug --impure --no-link " + f"--expr '{concurrent_expr}' --max-jobs 5 2>&1" + ) + + if "error:" in output.lower(): + print("Found error during concurrent fetches:") + print(output) + + providers_created = output.count("creating new AWS credential provider") + transfers_created = output.count("builtin:fetchurl creating fresh FileTransfer instance") + + print(f" ✓ {providers_created} credential providers created") + print(f" ✓ {transfers_created} FileTransfer instances created") + + if transfers_created != 5: + print("Debug output:") + print(output) + raise Exception( + f"Expected 5 FileTransfer instances for 5 concurrent fetches, got {transfers_created}" + ) + + if providers_created != 1: + print("Debug output:") + print(output) + raise Exception( + f"Expected 1 credential provider for concurrent fetches, got {providers_created}" + ) + + @setup_s3() + def test_compression_narinfo_gzip(bucket): + """Test narinfo compression with gzip""" + print("\n=== Testing Compression: narinfo (gzip) ===") + + store_url = make_s3_url(bucket, **{'narinfo-compression': 'gzip'}) + server.succeed(f"{ENV_WITH_CREDS} nix copy --to '{store_url}' {PKGS['B']}") + + pkg_hash = get_package_hash(PKGS['B']) + verify_content_encoding(server, bucket, f"{pkg_hash}.narinfo", "gzip") + + print(" ✓ .narinfo has Content-Encoding: gzip") + + # Verify client can download and decompress + client.succeed(f"{ENV_WITH_CREDS} nix copy --from '{store_url}' --no-check-sigs {PKGS['B']}") + verify_packages_in_store(client, PKGS['B']) + + print(" ✓ Client decompressed .narinfo successfully") + + @setup_s3() + def test_compression_mixed(bucket): + """Test mixed compression (narinfo=xz, ls=gzip)""" + print("\n=== Testing Compression: mixed (narinfo=xz, ls=gzip) ===") + + store_url = make_s3_url( + bucket, + **{'narinfo-compression': 'xz', 'write-nar-listing': 'true', 'ls-compression': 'gzip'} + ) + + server.succeed(f"{ENV_WITH_CREDS} nix copy --to '{store_url}' {PKGS['C']}") + + pkg_hash = get_package_hash(PKGS['C']) + + # Verify .narinfo has xz compression + verify_content_encoding(server, bucket, f"{pkg_hash}.narinfo", "xz") + print(" ✓ .narinfo has Content-Encoding: xz") + + # Verify .ls has gzip compression + verify_content_encoding(server, bucket, f"{pkg_hash}.ls", "gzip") + print(" ✓ .ls has Content-Encoding: gzip") + + # Verify client can download with mixed compression + client.succeed(f"{ENV_WITH_CREDS} nix copy --from '{store_url}' --no-check-sigs {PKGS['C']}") + verify_packages_in_store(client, PKGS['C']) + + print(" ✓ Client downloaded package with mixed compression") + + @setup_s3() + def test_compression_disabled(bucket): + """Verify no compression by default""" + print("\n=== Testing Compression: disabled (default) ===") + + store_url = make_s3_url(bucket) + server.succeed(f"{ENV_WITH_CREDS} nix copy --to '{store_url}' {PKGS['A']}") + + pkg_hash = get_package_hash(PKGS['A']) + verify_no_compression(server, bucket, f"{pkg_hash}.narinfo") + + print(" ✓ No compression applied by default") + + @setup_s3() + def test_nix_prefetch_url(bucket): + """Test that nix-prefetch-url retrieves actual file content from S3, not empty files (issue #8862)""" + print("\n=== Testing nix-prefetch-url S3 Content Retrieval (issue #8862) ===") + + # Create a test file with known content + test_content = "This is test content to verify S3 downloads work correctly!\n" + test_file_size = len(test_content) + + server.succeed(f"echo -n '{test_content}' > /tmp/test-file.txt") + + # Upload to S3 + server.succeed(f"mc cp /tmp/test-file.txt minio/{bucket}/test-file.txt") + + # Calculate expected hash + expected_hash = server.succeed( + "nix hash file --type sha256 --base32 /tmp/test-file.txt" + ).strip() + + print(f" ✓ Uploaded test file to S3 ({test_file_size} bytes)") + + # Use nix-prefetch-url to download from S3 + s3_url = make_s3_url(bucket, path="/test-file.txt") + + prefetch_output = client.succeed( + f"{ENV_WITH_CREDS} nix-prefetch-url --print-path '{s3_url}'" + ) + + # Extract hash and store path + # With --print-path, output is: \n + lines = prefetch_output.strip().split('\n') + prefetch_hash = lines[0] # First line is the hash + store_path = lines[1] # Second line is the store path + + # Verify hash matches + if prefetch_hash != expected_hash: + raise Exception( + f"Hash mismatch: expected {expected_hash}, got {prefetch_hash}" + ) + + print(" ✓ nix-prefetch-url completed with correct hash") + + # Verify the downloaded file is NOT empty (the bug in #8862) + file_size = int(client.succeed(f"stat -c %s {store_path}").strip()) + + if file_size == 0: + raise Exception("Downloaded file is EMPTY - issue #8862 regression detected!") + + if file_size != test_file_size: + raise Exception( + f"File size mismatch: expected {test_file_size}, got {file_size}" + ) + + print(f" ✓ File has correct size ({file_size} bytes, not empty)") + + # Verify actual content matches by comparing hashes instead of printing entire file + downloaded_hash = client.succeed(f"nix hash file --type sha256 --base32 {store_path}").strip() + + if downloaded_hash != expected_hash: + raise Exception(f"Content hash mismatch: expected {expected_hash}, got {downloaded_hash}") + + print(" ✓ File content verified correct (hash matches)") + + @setup_s3(populate_bucket=[PKGS['A']], versioned=True) + def test_versioned_urls(bucket): + """Test that versionId parameter is accepted in S3 URLs""" + print("\n=== Testing Versioned URLs ===") + + # Get the nix-cache-info file + cache_info_url = make_s3_url(bucket, path="/nix-cache-info") + + # Fetch without versionId should work + client.succeed( + f"{ENV_WITH_CREDS} nix eval --impure --expr " + f"'builtins.fetchurl {{ name = \"cache-info\"; url = \"{cache_info_url}\"; }}'" + ) + print(" ✓ Fetch without versionId works") + + # List versions to get a version ID + # MinIO output format: [timestamp] size tier versionId versionNumber method filename + versions_output = server.succeed(f"mc ls --versions minio/{bucket}/nix-cache-info") + + # Extract version ID from output (4th field after STANDARD) + import re + version_match = re.search(r'STANDARD\s+(\S+)\s+v\d+', versions_output) + if not version_match: + print(f"Debug: versions output: {versions_output}") + raise Exception("Could not extract version ID from MinIO output") + + version_id = version_match.group(1) + print(f" ✓ Found version ID: {version_id}") + + # Version ID should not be "null" since versioning was enabled before upload + if version_id == "null": + raise Exception("Version ID is 'null' - versioning may not be working correctly") + + # Fetch with versionId parameter + versioned_url = f"{cache_info_url}&versionId={version_id}" + client.succeed( + f"{ENV_WITH_CREDS} nix eval --impure --expr " + f"'builtins.fetchurl {{ name = \"cache-info-versioned\"; url = \"{versioned_url}\"; }}'" + ) + print(" ✓ Fetch with versionId parameter works") + + @setup_s3() + def test_multipart_upload_basic(bucket): + """Test basic multipart upload with a large file""" + print("\n--- Test: Multipart Upload Basic ---") + + large_file_size = 10 * 1024 * 1024 + large_pkg = server.succeed( + "nix-store --add $(dd if=/dev/urandom of=/tmp/large-file bs=1M count=10 2>/dev/null && echo /tmp/large-file)" + ).strip() + + chunk_size = 5 * 1024 * 1024 + expected_parts = 3 # 10 MB raw becomes ~10.5 MB compressed (NAR + xz overhead) + + store_url = make_s3_url( + bucket, + **{ + "multipart-upload": "true", + "multipart-threshold": str(5 * 1024 * 1024), + "multipart-chunk-size": str(chunk_size), + } + ) + + print(f" Uploading {large_file_size} byte file (expect {expected_parts} parts)") + output = server.succeed(f"{ENV_WITH_CREDS} nix copy --to '{store_url}' {large_pkg} --debug 2>&1") + + if "using S3 multipart upload" not in output: + raise Exception("Expected multipart upload to be used") + + expected_msg = f"{expected_parts} parts uploaded" + if expected_msg not in output: + print("Debug output:") + print(output) + raise Exception(f"Expected '{expected_msg}' in output") + + print(f" ✓ Multipart upload used with {expected_parts} parts") + + client.succeed(f"{ENV_WITH_CREDS} nix copy --from '{store_url}' {large_pkg} --no-check-sigs") + verify_packages_in_store(client, large_pkg, should_exist=True) + + print(" ✓ Large file downloaded and verified") + + @setup_s3() + def test_multipart_threshold(bucket): + """Test that files below threshold use regular upload""" + print("\n--- Test: Multipart Threshold Behavior ---") + + store_url = make_s3_url( + bucket, + **{ + "multipart-upload": "true", + "multipart-threshold": str(1024 * 1024 * 1024), + } + ) + + print(" Uploading small file with high threshold") + output = server.succeed(f"{ENV_WITH_CREDS} nix copy --to '{store_url}' {PKGS['A']} --debug 2>&1") + + if "using S3 multipart upload" in output: + raise Exception("Should not use multipart for file below threshold") + + if "using S3 regular upload" not in output: + raise Exception("Expected regular upload to be used") + + print(" ✓ Regular upload used for file below threshold") + + client.succeed(f"{ENV_WITH_CREDS} nix copy --no-check-sigs --from '{store_url}' {PKGS['A']}") + verify_packages_in_store(client, PKGS['A'], should_exist=True) + + print(" ✓ Small file uploaded and verified") + + @setup_s3() + def test_multipart_with_log_compression(bucket): + """Test multipart upload with compressed build logs""" + print("\n--- Test: Multipart Upload with Log Compression ---") + + # Create a derivation that produces a large text log (12 MB of base64 output) + drv_path = server.succeed( + """ + nix-instantiate --expr ' + let pkgs = import {}; + in derivation { + name = "large-log-builder"; + builder = "/bin/sh"; + args = ["-c" "$coreutils/bin/dd if=/dev/urandom bs=1M count=12 | $coreutils/bin/base64; echo success > $out"]; + coreutils = pkgs.coreutils; + system = builtins.currentSystem; + } + ' + """ + ).strip() + + print(" Building derivation to generate large log") + server.succeed(f"nix-store --realize {drv_path} &>/dev/null") + + # Upload logs with compression and multipart + store_url = make_s3_url( + bucket, + **{ + "multipart-upload": "true", + "multipart-threshold": str(5 * 1024 * 1024), + "multipart-chunk-size": str(5 * 1024 * 1024), + "log-compression": "xz", + } + ) + + print(" Uploading build log with compression and multipart") + output = server.succeed( + f"{ENV_WITH_CREDS} nix store copy-log --to '{store_url}' {drv_path} --debug 2>&1" + ) + + # Should use multipart for the compressed log + if "using S3 multipart upload" not in output or "log/" not in output: + print("Debug output:") + print(output) + raise Exception("Expected multipart upload to be used for compressed log") + + if "parts uploaded" not in output: + print("Debug output:") + print(output) + raise Exception("Expected multipart completion message") + + print(" ✓ Compressed log uploaded with multipart") + + # ============================================================================ + # Main Test Execution + # ============================================================================ + + print("\n" + "="*80) + print("S3 Binary Cache Store Tests") + print("="*80) + start_all() - # Create a binary cache. + # Initialize MinIO server server.wait_for_unit("minio") server.wait_for_unit("network-addresses-eth1.service") server.wait_for_open_port(9000) + server.succeed(f"mc config host add minio http://localhost:9000 {ACCESS_KEY} {SECRET_KEY} --api s3v4") - server.succeed("mc config host add minio http://localhost:9000 ${accessKey} ${secretKey} --api s3v4") - server.succeed("mc mb minio/my-cache") + # Run tests (each gets isolated bucket via decorator) + test_credential_caching() + test_fetchurl_basic() + test_error_message_formatting() + test_fork_credential_preresolution() + test_store_operations() + test_public_bucket_operations() + test_url_format_variations() + test_concurrent_fetches() + test_compression_narinfo_gzip() + test_compression_mixed() + test_compression_disabled() + test_nix_prefetch_url() + test_versioned_urls() + # FIXME: enable when multipart fully lands + # test_multipart_upload_basic() + # test_multipart_threshold() + # test_multipart_with_log_compression() - server.succeed("${env} nix copy --to '${storeUrl}' ${pkgA}") - - client.wait_for_unit("network-addresses-eth1.service") - - # Test fetchurl on s3:// URLs while we're at it. - client.succeed("${env} nix eval --impure --expr 'builtins.fetchurl { name = \"foo\"; url = \"s3://my-cache/nix-cache-info?endpoint=http://server:9000®ion=eu-west-1\"; }'") - - # Test that the format string in the error message is properly setup and won't display `%s` instead of the failed URI - msg = client.fail("${env} nix eval --impure --expr 'builtins.fetchurl { name = \"foo\"; url = \"${objectThatDoesNotExist}\"; }' 2>&1") - if "S3 object '${objectThatDoesNotExist}' does not exist" not in msg: - print(msg) # So that you can see the message that was improperly formatted - raise Exception("Error message formatting didn't work") - - # Copy a package from the binary cache. - client.fail("nix path-info ${pkgA}") - - client.succeed("${env} nix store info --store '${storeUrl}' >&2") - - client.succeed("${env} nix copy --no-check-sigs --from '${storeUrl}' ${pkgA}") - - client.succeed("nix path-info ${pkgA}") + print("\n" + "="*80) + print("✓ All S3 Binary Cache Store Tests Passed!") + print("="*80) ''; }