1
1
Fork 0
mirror of https://github.com/NixOS/nix.git synced 2025-11-08 11:36:03 +01:00

Merge remote-tracking branch 'upstream/master' into upstream-RossComputerGuy/feat/expose-computefsclosure

This commit is contained in:
Robert Hensing 2025-10-15 15:30:43 +02:00
commit a9d9b50b72
467 changed files with 9259 additions and 5039 deletions

View file

@ -15,6 +15,10 @@ so you understand the process and the expectations.
- volunteering contributions effectively
- how to get help and our review process.
PR stuck in review? We have two Nix team meetings per week online that are open for everyone in a jitsi conference:
- https://calendar.google.com/calendar/u/0/embed?src=b9o52fobqjak8oq8lfkhg3t0qg@group.calendar.google.com
-->
## Motivation

View file

@ -4,12 +4,22 @@ inputs:
dogfood:
description: "Whether to use Nix installed from the latest artifact from master branch"
required: true # Be explicit about the fact that we are using unreleased artifacts
experimental-installer:
description: "Whether to use the experimental installer to install Nix"
default: false
experimental-installer-version:
description: "Version of the experimental installer to use. If `latest`, the newest artifact from the default branch is used."
# TODO: This should probably be pinned to a release after https://github.com/NixOS/experimental-nix-installer/pull/49 lands in one
default: "latest"
extra_nix_config:
description: "Gets appended to `/etc/nix/nix.conf` if passed."
install_url:
description: "URL of the Nix installer"
required: false
default: "https://releases.nixos.org/nix/nix-2.30.2/install"
tarball_url:
description: "URL of the Nix tarball to use with the experimental installer"
required: false
github_token:
description: "Github token"
required: true
@ -37,14 +47,74 @@ runs:
gh run download "$RUN_ID" --repo "$DOGFOOD_REPO" -n "$INSTALLER_ARTIFACT" -D "$INSTALLER_DOWNLOAD_DIR"
echo "installer-path=file://$INSTALLER_DOWNLOAD_DIR" >> "$GITHUB_OUTPUT"
TARBALL_PATH="$(find "$INSTALLER_DOWNLOAD_DIR" -name 'nix*.tar.xz' -print | head -n 1)"
echo "tarball-path=file://$TARBALL_PATH" >> "$GITHUB_OUTPUT"
echo "::notice ::Dogfooding Nix installer from master (https://github.com/$DOGFOOD_REPO/actions/runs/$RUN_ID)"
env:
GH_TOKEN: ${{ inputs.github_token }}
DOGFOOD_REPO: "NixOS/nix"
- name: "Gather system info for experimental installer"
shell: bash
if: ${{ inputs.experimental-installer == 'true' }}
run: |
echo "::notice Using experimental installer from $EXPERIMENTAL_INSTALLER_REPO (https://github.com/$EXPERIMENTAL_INSTALLER_REPO)"
if [ "$RUNNER_OS" == "Linux" ]; then
EXPERIMENTAL_INSTALLER_SYSTEM="linux"
echo "EXPERIMENTAL_INSTALLER_SYSTEM=$EXPERIMENTAL_INSTALLER_SYSTEM" >> "$GITHUB_ENV"
elif [ "$RUNNER_OS" == "macOS" ]; then
EXPERIMENTAL_INSTALLER_SYSTEM="darwin"
echo "EXPERIMENTAL_INSTALLER_SYSTEM=$EXPERIMENTAL_INSTALLER_SYSTEM" >> "$GITHUB_ENV"
else
echo "::error ::Unsupported RUNNER_OS: $RUNNER_OS"
exit 1
fi
if [ "$RUNNER_ARCH" == "X64" ]; then
EXPERIMENTAL_INSTALLER_ARCH=x86_64
echo "EXPERIMENTAL_INSTALLER_ARCH=$EXPERIMENTAL_INSTALLER_ARCH" >> "$GITHUB_ENV"
elif [ "$RUNNER_ARCH" == "ARM64" ]; then
EXPERIMENTAL_INSTALLER_ARCH=aarch64
echo "EXPERIMENTAL_INSTALLER_ARCH=$EXPERIMENTAL_INSTALLER_ARCH" >> "$GITHUB_ENV"
else
echo "::error ::Unsupported RUNNER_ARCH: $RUNNER_ARCH"
exit 1
fi
echo "EXPERIMENTAL_INSTALLER_ARTIFACT=nix-installer-$EXPERIMENTAL_INSTALLER_ARCH-$EXPERIMENTAL_INSTALLER_SYSTEM" >> "$GITHUB_ENV"
env:
EXPERIMENTAL_INSTALLER_REPO: "NixOS/experimental-nix-installer"
- name: "Download latest experimental installer"
shell: bash
id: download-latest-experimental-installer
if: ${{ inputs.experimental-installer == 'true' && inputs.experimental-installer-version == 'latest' }}
run: |
RUN_ID=$(gh run list --repo "$EXPERIMENTAL_INSTALLER_REPO" --workflow ci.yml --branch main --status success --json databaseId --jq ".[0].databaseId")
EXPERIMENTAL_INSTALLER_DOWNLOAD_DIR="$GITHUB_WORKSPACE/$EXPERIMENTAL_INSTALLER_ARTIFACT"
mkdir -p "$EXPERIMENTAL_INSTALLER_DOWNLOAD_DIR"
gh run download "$RUN_ID" --repo "$EXPERIMENTAL_INSTALLER_REPO" -n "$EXPERIMENTAL_INSTALLER_ARTIFACT" -D "$EXPERIMENTAL_INSTALLER_DOWNLOAD_DIR"
# Executable permissions are lost in artifacts
find $EXPERIMENTAL_INSTALLER_DOWNLOAD_DIR -type f -exec chmod +x {} +
echo "installer-path=$EXPERIMENTAL_INSTALLER_DOWNLOAD_DIR" >> "$GITHUB_OUTPUT"
env:
GH_TOKEN: ${{ inputs.github_token }}
EXPERIMENTAL_INSTALLER_REPO: "NixOS/experimental-nix-installer"
- uses: cachix/install-nix-action@c134e4c9e34bac6cab09cf239815f9339aaaf84e # v31.5.1
if: ${{ inputs.experimental-installer != 'true' }}
with:
# Ternary operator in GHA: https://www.github.com/actions/runner/issues/409#issuecomment-752775072
install_url: ${{ inputs.dogfood == 'true' && format('{0}/install', steps.download-nix-installer.outputs.installer-path) || inputs.install_url }}
install_options: ${{ inputs.dogfood == 'true' && format('--tarball-url-prefix {0}', steps.download-nix-installer.outputs.installer-path) || '' }}
extra_nix_config: ${{ inputs.extra_nix_config }}
- uses: DeterminateSystems/nix-installer-action@786fff0690178f1234e4e1fe9b536e94f5433196 # v20
if: ${{ inputs.experimental-installer == 'true' }}
with:
diagnostic-endpoint: ""
# TODO: It'd be nice to use `artifacts.nixos.org` for both of these, maybe through an `/experimental-installer/latest` endpoint? or `/commit/<hash>`?
local-root: ${{ inputs.experimental-installer-version == 'latest' && steps.download-latest-experimental-installer.outputs.installer-path || '' }}
source-url: ${{ inputs.experimental-installer-version != 'latest' && 'https://artifacts.nixos.org/experimental-installer/tag/${{ inputs.experimental-installer-version }}/${{ env.EXPERIMENTAL_INSTALLER_ARTIFACT }}' || '' }}
nix-package-url: ${{ inputs.dogfood == 'true' && steps.download-nix-installer.outputs.tarball-path || (inputs.tarball_url || '') }}
extra-conf: ${{ inputs.extra_nix_config }}

37
.github/workflows/backport.yml vendored Normal file
View file

@ -0,0 +1,37 @@
name: Backport
on:
pull_request_target:
types: [closed, labeled]
permissions:
contents: read
jobs:
backport:
name: Backport Pull Request
permissions:
# for korthout/backport-action
contents: write
pull-requests: write
if: github.repository_owner == 'NixOS' && github.event.pull_request.merged == true && (github.event_name != 'labeled' || startsWith('backport', github.event.label.name))
runs-on: ubuntu-24.04-arm
steps:
- name: Generate GitHub App token
id: generate-token
uses: actions/create-github-app-token@v2
with:
app-id: ${{ vars.CI_APP_ID }}
private-key: ${{ secrets.CI_APP_PRIVATE_KEY }}
- uses: actions/checkout@v5
with:
ref: ${{ github.event.pull_request.head.sha }}
# required to find all branches
fetch-depth: 0
- name: Create backport PRs
uses: korthout/backport-action@d07416681cab29bf2661702f925f020aaa962997 # v3.4.1
id: backport
with:
# Config README: https://github.com/korthout/backport-action#backport-action
github_token: ${{ steps.generate-token.outputs.token }}
github_workspace: ${{ github.workspace }}
auto_merge_enabled: true
pull_description: |-
Automatic backport to `${target_branch}`, triggered by a label in #${pull_number}.

View file

@ -4,6 +4,8 @@ on:
pull_request:
merge_group:
push:
branches:
- master
workflow_dispatch:
inputs:
dogfood:
@ -29,7 +31,32 @@ jobs:
github_token: ${{ secrets.GITHUB_TOKEN }}
- run: nix flake show --all-systems --json
pre-commit-checks:
name: pre-commit checks
runs-on: ubuntu-24.04
steps:
- uses: actions/checkout@v5
- uses: ./.github/actions/install-nix-action
with:
dogfood: ${{ github.event_name == 'workflow_dispatch' && inputs.dogfood || github.event_name != 'workflow_dispatch' }}
extra_nix_config: experimental-features = nix-command flakes
github_token: ${{ secrets.GITHUB_TOKEN }}
- uses: DeterminateSystems/magic-nix-cache-action@main
- run: ./ci/gha/tests/pre-commit-checks
basic-checks:
name: aggregate basic checks
if: ${{ always() }}
runs-on: ubuntu-24.04
needs: [pre-commit-checks, eval]
steps:
- name: Exit with any errors
if: ${{ contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled') }}
run: |
exit 1
tests:
needs: basic-checks
strategy:
fail-fast: false
matrix:
@ -40,18 +67,42 @@ jobs:
instrumented: false
primary: true
stdenv: stdenv
withAWS: true
withCurlS3: false
# TODO: remove once curl-based-s3 fully lands
- scenario: on ubuntu (no s3)
runs-on: ubuntu-24.04
os: linux
instrumented: false
primary: false
stdenv: stdenv
withAWS: false
withCurlS3: false
# TODO: remove once curl-based-s3 fully lands
- scenario: on ubuntu (curl s3)
runs-on: ubuntu-24.04
os: linux
instrumented: false
primary: false
stdenv: stdenv
withAWS: false
withCurlS3: true
- scenario: on macos
runs-on: macos-14
os: darwin
instrumented: false
primary: true
stdenv: stdenv
withAWS: true
withCurlS3: false
- scenario: on ubuntu (with sanitizers / coverage)
runs-on: ubuntu-24.04
os: linux
instrumented: true
primary: false
stdenv: clangStdenv
withAWS: true
withCurlS3: false
name: tests ${{ matrix.scenario }}
runs-on: ${{ matrix.runs-on }}
timeout-minutes: 60
@ -74,7 +125,17 @@ jobs:
run: |
nix build --file ci/gha/tests/wrapper.nix componentTests -L \
--arg withInstrumentation ${{ matrix.instrumented }} \
--argstr stdenv "${{ matrix.stdenv }}"
--argstr stdenv "${{ matrix.stdenv }}" \
${{ format('--arg withAWS {0}', matrix.withAWS) }} \
${{ format('--arg withCurlS3 {0}', matrix.withCurlS3) }}
- name: Run VM tests
run: |
nix build --file ci/gha/tests/wrapper.nix vmTests -L \
--arg withInstrumentation ${{ matrix.instrumented }} \
--argstr stdenv "${{ matrix.stdenv }}" \
${{ format('--arg withAWS {0}', matrix.withAWS) }} \
${{ format('--arg withCurlS3 {0}', matrix.withCurlS3) }}
if: ${{ matrix.os == 'linux' }}
- name: Run flake checks and prepare the installer tarball
run: |
ci/gha/tests/build-checks
@ -85,6 +146,8 @@ jobs:
nix build --file ci/gha/tests/wrapper.nix codeCoverage.coverageReports -L \
--arg withInstrumentation ${{ matrix.instrumented }} \
--argstr stdenv "${{ matrix.stdenv }}" \
${{ format('--arg withAWS {0}', matrix.withAWS) }} \
${{ format('--arg withCurlS3 {0}', matrix.withCurlS3) }} \
--out-link coverage-reports
cat coverage-reports/index.txt >> $GITHUB_STEP_SUMMARY
if: ${{ matrix.instrumented }}
@ -110,9 +173,19 @@ jobs:
- scenario: on ubuntu
runs-on: ubuntu-24.04
os: linux
experimental-installer: false
- scenario: on macos
runs-on: macos-14
os: darwin
experimental-installer: false
- scenario: on ubuntu (experimental)
runs-on: ubuntu-24.04
os: linux
experimental-installer: true
- scenario: on macos (experimental)
runs-on: macos-14
os: darwin
experimental-installer: true
name: installer test ${{ matrix.scenario }}
runs-on: ${{ matrix.runs-on }}
steps:
@ -124,11 +197,22 @@ jobs:
path: out
- name: Looking up the installer tarball URL
id: installer-tarball-url
run: echo "installer-url=file://$GITHUB_WORKSPACE/out" >> "$GITHUB_OUTPUT"
run: |
echo "installer-url=file://$GITHUB_WORKSPACE/out" >> "$GITHUB_OUTPUT"
TARBALL_PATH="$(find "$GITHUB_WORKSPACE/out" -name 'nix*.tar.xz' -print | head -n 1)"
echo "tarball-path=file://$TARBALL_PATH" >> "$GITHUB_OUTPUT"
- uses: cachix/install-nix-action@v31
if: ${{ !matrix.experimental-installer }}
with:
install_url: ${{ format('{0}/install', steps.installer-tarball-url.outputs.installer-url) }}
install_options: ${{ format('--tarball-url-prefix {0}', steps.installer-tarball-url.outputs.installer-url) }}
- uses: ./.github/actions/install-nix-action
if: ${{ matrix.experimental-installer }}
with:
dogfood: false
experimental-installer: true
tarball_url: ${{ steps.installer-tarball-url.outputs.tarball-path }}
github_token: ${{ secrets.GITHUB_TOKEN }}
- run: sudo apt install fish zsh
if: matrix.os == 'linux'
- run: brew install fish
@ -160,7 +244,7 @@ jobs:
echo "docker=${{ env._DOCKER_SECRETS != '' }}" >> $GITHUB_OUTPUT
docker_push_image:
needs: [tests, vm_tests, check_secrets]
needs: [tests, check_secrets]
permissions:
contents: read
packages: write
@ -213,27 +297,8 @@ jobs:
docker tag nix:$NIX_VERSION $IMAGE_ID:master
docker push $IMAGE_ID:master
vm_tests:
runs-on: ubuntu-24.04
steps:
- uses: actions/checkout@v5
- uses: ./.github/actions/install-nix-action
with:
dogfood: ${{ github.event_name == 'workflow_dispatch' && inputs.dogfood || github.event_name != 'workflow_dispatch' }}
extra_nix_config:
experimental-features = nix-command flakes
github_token: ${{ secrets.GITHUB_TOKEN }}
- uses: DeterminateSystems/magic-nix-cache-action@main
- run: |
nix build -L \
.#hydraJobs.tests.functional_user \
.#hydraJobs.tests.githubFlakes \
.#hydraJobs.tests.nix-docker \
.#hydraJobs.tests.tarballFlakes \
;
flake_regressions:
needs: vm_tests
needs: tests
runs-on: ubuntu-24.04
steps:
- name: Checkout nix

View file

@ -1,174 +0,0 @@
queue_rules:
- name: default
# all required tests need to go here
merge_conditions:
- check-success=tests on macos
- check-success=tests on ubuntu
- check-success=installer test on macos
- check-success=installer test on ubuntu
- check-success=vm_tests
batch_size: 5
pull_request_rules:
- name: merge using the merge queue
conditions:
- base~=master|.+-maintenance
- label~=merge-queue|dependencies
actions:
queue: {}
# The rules below will first create backport pull requests and put those in a merge queue.
- name: backport patches to 2.18
conditions:
- label=backport 2.18-maintenance
actions:
backport:
branches:
- 2.18-maintenance
labels:
- automatic backport
- merge-queue
- name: backport patches to 2.19
conditions:
- label=backport 2.19-maintenance
actions:
backport:
branches:
- 2.19-maintenance
labels:
- automatic backport
- merge-queue
- name: backport patches to 2.20
conditions:
- label=backport 2.20-maintenance
actions:
backport:
branches:
- 2.20-maintenance
labels:
- automatic backport
- merge-queue
- name: backport patches to 2.21
conditions:
- label=backport 2.21-maintenance
actions:
backport:
branches:
- 2.21-maintenance
labels:
- automatic backport
- merge-queue
- name: backport patches to 2.22
conditions:
- label=backport 2.22-maintenance
actions:
backport:
branches:
- 2.22-maintenance
labels:
- automatic backport
- merge-queue
- name: backport patches to 2.23
conditions:
- label=backport 2.23-maintenance
actions:
backport:
branches:
- 2.23-maintenance
labels:
- automatic backport
- merge-queue
- name: backport patches to 2.24
conditions:
- label=backport 2.24-maintenance
actions:
backport:
branches:
- "2.24-maintenance"
labels:
- automatic backport
- merge-queue
- name: backport patches to 2.25
conditions:
- label=backport 2.25-maintenance
actions:
backport:
branches:
- "2.25-maintenance"
labels:
- automatic backport
- merge-queue
- name: backport patches to 2.26
conditions:
- label=backport 2.26-maintenance
actions:
backport:
branches:
- "2.26-maintenance"
labels:
- automatic backport
- merge-queue
- name: backport patches to 2.27
conditions:
- label=backport 2.27-maintenance
actions:
backport:
branches:
- "2.27-maintenance"
labels:
- automatic backport
- merge-queue
- name: backport patches to 2.28
conditions:
- label=backport 2.28-maintenance
actions:
backport:
branches:
- "2.28-maintenance"
labels:
- automatic backport
- merge-queue
- name: backport patches to 2.29
conditions:
- label=backport 2.29-maintenance
actions:
backport:
branches:
- "2.29-maintenance"
labels:
- automatic backport
- merge-queue
- name: backport patches to 2.30
conditions:
- label=backport 2.30-maintenance
actions:
backport:
branches:
- "2.30-maintenance"
labels:
- automatic backport
- merge-queue
- name: backport patches to 2.31
conditions:
- label=backport 2.31-maintenance
actions:
backport:
branches:
- "2.31-maintenance"
labels:
- automatic backport
- merge-queue

View file

@ -1 +1 @@
2.32.0
2.33.0

View file

@ -12,6 +12,8 @@
componentTestsPrefix ? "",
withSanitizers ? false,
withCoverage ? false,
withAWS ? null,
withCurlS3 ? null,
...
}:
@ -21,16 +23,6 @@ let
packages' = nixFlake.packages.${system};
stdenv = (getStdenv pkgs);
enableSanitizersLayer = finalAttrs: prevAttrs: {
mesonFlags =
(prevAttrs.mesonFlags or [ ])
++ [ (lib.mesonOption "b_sanitize" "address,undefined") ]
++ (lib.optionals stdenv.cc.isClang [
# https://www.github.com/mesonbuild/meson/issues/764
(lib.mesonBool "b_lundef" false)
]);
};
collectCoverageLayer = finalAttrs: prevAttrs: {
env =
let
@ -53,24 +45,39 @@ let
'';
};
componentOverrides =
(lib.optional withSanitizers enableSanitizersLayer)
++ (lib.optional withCoverage collectCoverageLayer);
componentOverrides = (lib.optional withCoverage collectCoverageLayer);
in
rec {
nixComponentsInstrumented = nixComponents.overrideScope (
final: prev: {
withASan = withSanitizers;
withUBSan = withSanitizers;
nix-store-tests = prev.nix-store-tests.override { withBenchmarks = true; };
# Boehm is incompatible with ASAN.
nix-expr = prev.nix-expr.override { enableGC = !withSanitizers; };
# Override AWS configuration if specified
nix-store = prev.nix-store.override (
lib.optionalAttrs (withAWS != null) { inherit withAWS; }
// lib.optionalAttrs (withCurlS3 != null) { inherit withCurlS3; }
);
mesonComponentOverrides = lib.composeManyExtensions componentOverrides;
# Unclear how to make Perl bindings work with a dynamically linked ASAN.
nix-perl-bindings = if withSanitizers then null else prev.nix-perl-bindings;
}
);
# Import NixOS tests using the instrumented components
nixosTests = import ../../../tests/nixos {
inherit lib pkgs;
nixComponents = nixComponentsInstrumented;
nixpkgs = nixFlake.inputs.nixpkgs;
inherit (nixFlake.inputs) nixpkgs-23-11;
};
/**
Top-level tests for the flake outputs, as they would be built by hydra.
These tests generally can't be overridden to run with sanitizers.
@ -221,4 +228,28 @@ rec {
{
inherit coverageProfileDrvs mergedProfdata coverageReports;
};
vmTests = {
}
# FIXME: when the curlS3 implementation is complete, it should also enable these tests.
// lib.optionalAttrs (withAWS == true) {
# S3 binary cache store test only runs when S3 support is enabled
inherit (nixosTests) s3-binary-cache-store;
}
// lib.optionalAttrs (withCurlS3 == true) {
# S3 binary cache store test using curl implementation
inherit (nixosTests) curl-s3-binary-cache-store;
}
// lib.optionalAttrs (!withSanitizers && !withCoverage) {
# evalNixpkgs uses non-instrumented components from hydraJobs, so only run it
# when not testing with sanitizers to avoid rebuilding nix
inherit (hydraJobs.tests) evalNixpkgs;
# FIXME: CI times out when building vm tests instrumented
inherit (nixosTests)
functional_user
githubFlakes
nix-docker
tarballFlakes
;
};
}

24
ci/gha/tests/pre-commit-checks Executable file
View file

@ -0,0 +1,24 @@
#!/usr/bin/env bash
set -euo pipefail
system=$(nix eval --raw --impure --expr builtins.currentSystem)
echo "::group::Running pre-commit checks"
if nix build ".#checks.$system.pre-commit" -L; then
echo "::endgroup::"
exit 0
fi
echo "::error ::Changes do not pass pre-commit checks"
cat <<EOF
The code isn't formatted or doesn't pass lints. You can run pre-commit locally with:
nix develop -c ./maintainers/format.sh
EOF
echo "::endgroup::"
exit 1

View file

@ -5,6 +5,8 @@
stdenv ? "stdenv",
componentTestsPrefix ? "",
withInstrumentation ? false,
withAWS ? null,
withCurlS3 ? null,
}@args:
import ./. (
args
@ -12,5 +14,6 @@ import ./. (
getStdenv = p: p.${stdenv};
withSanitizers = withInstrumentation;
withCoverage = withInstrumentation;
inherit withAWS withCurlS3;
}
)

View file

@ -15,7 +15,6 @@ pymod = import('python')
python = pymod.find_installation('python3')
nix_env_for_docs = {
'ASAN_OPTIONS' : 'abort_on_error=1:print_summary=1:detect_leaks=0',
'HOME' : '/dummy',
'NIX_CONF_DIR' : '/dummy',
'NIX_SSL_CERT_FILE' : '/dummy/no-ca-bundle.crt',

View file

@ -1,7 +0,0 @@
---
synopsis: "C API: `nix_get_attr_name_byidx`, `nix_get_attr_byidx` take a `nix_value *` instead of `const nix_value *`"
prs: [13987]
---
In order to accommodate a more optimized internal representation of attribute set merges these functions require
a mutable `nix_value *` that might be modified on access. This does *not* break the ABI of these functions.

View file

@ -1,17 +0,0 @@
---
synopsis: Derivation JSON format now uses store path basenames (no store dir) only
prs: [13980]
issues: [13570]
---
Experience with many JSON frameworks (e.g. nlohmann/json in C++, Serde
in Rust, and Aeson in Haskell), has show that the use of the store dir
in JSON formats is an impediment to systematic JSON formats, because it
requires the serializer/deserializer to take an extra paramater (the
store dir).
We ultimately want to rectify this issue with all (non-stable, able to
be changed) JSON formats. To start with, we are changing the JSON format
for derivations because the `nix derivation` commands are --- in
addition to being formally unstable --- less widely used than other
unstable commands.

View file

@ -1,6 +0,0 @@
---
synopsis: "Removed support for daemons and clients older than Nix 2.0"
prs: [13951]
---
We have dropped support in the daemon worker protocol for daemons and clients that don't speak at least version 18 of the protocol. This first Nix release that supports this version is Nix 2.0, released in February 2018.

View file

@ -1,9 +0,0 @@
---
synopsis: "`nix flake check` now skips derivations that can be substituted"
prs: [13574]
---
Previously, `nix flake check` would evaluate and build/substitute all
derivations. Now, it will skip downloading derivations that can be substituted.
This can drastically decrease the time invocations take in environments where
checks may already be cached (like in CI).

View file

@ -1,6 +0,0 @@
---
synopsis: "Temporary build directories no longer include derivation names"
prs: [13839]
---
Temporary build directories created during derivation builds no longer include the derivation name in their path to avoid build failures when the derivation name is too long. This change ensures predictable prefix lengths for build directories under `/nix/var/nix/builds`.

View file

@ -138,6 +138,7 @@
- [Contributing](development/contributing.md)
- [Releases](release-notes/index.md)
{{#include ./SUMMARY-rl-next.md}}
- [Release 2.32 (2025-10-06)](release-notes/rl-2.32.md)
- [Release 2.31 (2025-08-21)](release-notes/rl-2.31.md)
- [Release 2.30 (2025-07-07)](release-notes/rl-2.30.md)
- [Release 2.29 (2025-05-14)](release-notes/rl-2.29.md)

View file

@ -23,7 +23,7 @@ $ nix-shell
To get a shell with one of the other [supported compilation environments](#compilation-environments):
```console
$ nix-shell --attr devShells.x86_64-linux.native-clangStdenvPackages
$ nix-shell --attr devShells.x86_64-linux.native-clangStdenv
```
> **Note**

View file

@ -25,7 +25,7 @@ is a JSON object with the following fields:
- Version 2: Separate `method` and `hashAlgo` fields in output specs
- Verison 3: Drop store dir from store paths, just include base name.
- Version 3: Drop store dir from store paths, just include base name.
Note that while this format is experimental, the maintenance of versions is best-effort, and not promised to identify every change.
@ -116,5 +116,5 @@ is a JSON object with the following fields:
The environment passed to the `builder`.
* `structuredAttrs`:
[Strucutured Attributes](@docroot@/store/derivation/index.md#structured-attrs), only defined if the derivation contains them.
[Structured Attributes](@docroot@/store/derivation/index.md#structured-attrs), only defined if the derivation contains them.
Structured attributes are JSON, and thus embedded as-is.

View file

@ -0,0 +1,130 @@
# Release 2.32.0 (2025-10-06)
## Incompatible changes
- Removed support for daemons and clients older than Nix 2.0 [#13951](https://github.com/NixOS/nix/pull/13951)
We have dropped support in the daemon worker protocol for daemons and clients that don't speak at least version 18 of the protocol. This first Nix release that supports this version is Nix 2.0, released in February 2018.
- Derivation JSON format now uses store path basenames only [#13570](https://github.com/NixOS/nix/issues/13570) [#13980](https://github.com/NixOS/nix/pull/13980)
Experience with many JSON frameworks (e.g. nlohmann/json in C++, Serde in Rust, and Aeson in Haskell) has shown that the use of the store directory in JSON formats is an impediment to systematic JSON formats, because it requires the serializer/deserializer to take an extra paramater (the store directory).
We ultimately want to rectify this issue with all JSON formats to the extent allowed by our stability promises. To start with, we are changing the JSON format for derivations because the `nix derivation` commands are — in addition to being formally unstable — less widely used than other unstable commands.
See the documentation on the [JSON format for derivations](@docroot@/protocols/json/derivation.md) for further details.
- C API: `nix_get_attr_name_byidx`, `nix_get_attr_byidx` take a `nix_value *` instead of `const nix_value *` [#13987](https://github.com/NixOS/nix/pull/13987)
In order to accommodate a more optimized internal representation of attribute set merges these functions require
a mutable `nix_value *` that might be modified on access. This does *not* break the ABI of these functions.
## New features
- C API: Add lazy attribute and list item accessors [#14030](https://github.com/NixOS/nix/pull/14030)
The C API now includes lazy accessor functions for retrieving values from lists and attribute sets without forcing evaluation:
- `nix_get_list_byidx_lazy()` - Get a list element without forcing its evaluation
- `nix_get_attr_byname_lazy()` - Get an attribute value by name without forcing evaluation
- `nix_get_attr_byidx_lazy()` - Get an attribute by index without forcing evaluation
These functions are useful when forwarding unevaluated sub-values to other lists, attribute sets, or function calls. They allow more efficient handling of Nix values by deferring evaluation until actually needed.
Additionally, bounds checking has been improved for all `_byidx` functions to properly validate indices before access, preventing potential out-of-bounds errors.
The documentation for `NIX_ERR_KEY` error handling has also been clarified to specify when this error code is returned.
- HTTP binary caches now support transparent compression for metadata
HTTP binary cache stores can now compress `.narinfo`, `.ls`, and build log files before uploading them,
reducing bandwidth usage and storage requirements. The compression is applied transparently using the
`Content-Encoding` header, allowing compatible clients to automatically decompress the files.
Three new configuration options control this behavior:
- `narinfo-compression`: Compression method for `.narinfo` files
- `ls-compression`: Compression method for `.ls` files
- `log-compression`: Compression method for build logs in `log/` directory
Example usage:
```
nix copy --to 'http://cache.example.com?narinfo-compression=gzip&ls-compression=gzip' /nix/store/...
nix store copy-log --to 'http://cache.example.com?log-compression=br' /nix/store/...
```
- Temporary build directories no longer include derivation names [#13839](https://github.com/NixOS/nix/pull/13839)
Temporary build directories created during derivation builds no longer include the derivation name in their path to avoid build failures when the derivation name is too long. This change ensures predictable prefix lengths for build directories under `/nix/var/nix/builds`.
- External derivation builders [#14145](https://github.com/NixOS/nix/pull/14145)
These are helper programs that Nix calls to perform derivations for specified system types, e.g. by using QEMU to emulate a different type of platform. For more information, see the [`external-builders` setting](../command-ref/conf-file.md#conf-external-builders).
This is currently an experimental feature.
## Performance improvements
- Optimize memory usage of attribute set merges [#13987](https://github.com/NixOS/nix/pull/13987)
[Attribute set update operations](@docroot@/language/operators.md#update) have been optimized to
reduce reallocations in cases when the second operand is small.
For typical evaluations of nixpkgs this optimization leads to ~20% less memory allocated in total
without significantly affecting evaluation performance.
See [eval-attrset-update-layer-rhs-threshold](@docroot@/command-ref/conf-file.md#conf-eval-attrset-update-layer-rhs-threshold)
- Substituted flake inputs are no longer re-copied to the store [#14041](https://github.com/NixOS/nix/pull/14041)
Since 2.25, Nix would fail to store a cache entry for substituted flake inputs, which in turn would cause them to be re-copied to the store on initial evaluation. Caching these inputs results in a near doubling of performance in some cases — especially on I/O-bound machines and when using commands that fetch many inputs, like `nix flake [archive|prefetch-inputs]`.
- `nix flake check` now skips derivations that can be substituted [#13574](https://github.com/NixOS/nix/pull/13574)
Previously, `nix flake check` would evaluate and build/substitute all
derivations. Now, it will skip downloading derivations that can be substituted.
This can drastically decrease the time invocations take in environments where
checks may already be cached (like in CI).
- `fetchTarball` and `fetchurl` now correctly substitute (#14138)
At some point we stopped substituting calls to `fetchTarball` and `fetchurl` with a set `narHash` to avoid incorrectly substituting things in `fetchTree`, even though it would be safe to substitute when calling the legacy `fetch{Tarball,url}`. This fixes that regression where it is safe.
- Started moving AST allocations into a bump allocator [#14088](https://github.com/NixOS/nix/issues/14088)
This leaves smaller, immutable structures in the AST. So far this saves about 2% memory on a NixOS config evaluation.
## Contributors
This release was made possible by the following 32 contributors:
- Farid Zakaria [**(@fzakaria)**](https://github.com/fzakaria)
- dram [**(@dramforever)**](https://github.com/dramforever)
- Ephraim Siegfried [**(@EphraimSiegfried)**](https://github.com/EphraimSiegfried)
- Robert Hensing [**(@roberth)**](https://github.com/roberth)
- Taeer Bar-Yam [**(@Radvendii)**](https://github.com/Radvendii)
- Emily [**(@emilazy)**](https://github.com/emilazy)
- Jens Petersen [**(@juhp)**](https://github.com/juhp)
- Bernardo Meurer [**(@lovesegfault)**](https://github.com/lovesegfault)
- Jörg Thalheim [**(@Mic92)**](https://github.com/Mic92)
- Leandro Emmanuel Reina Kiperman [**(@kip93)**](https://github.com/kip93)
- Marie [**(@NyCodeGHG)**](https://github.com/NyCodeGHG)
- Ethan Evans [**(@ethanavatar)**](https://github.com/ethanavatar)
- Yaroslav Bolyukin [**(@CertainLach)**](https://github.com/CertainLach)
- Matej Urbas [**(@urbas)**](https://github.com/urbas)
- Jami Kettunen [**(@JamiKettunen)**](https://github.com/JamiKettunen)
- Clayton [**(@netadr)**](https://github.com/netadr)
- Grégory Marti [**(@gmarti)**](https://github.com/gmarti)
- Eelco Dolstra [**(@edolstra)**](https://github.com/edolstra)
- rszyma [**(@rszyma)**](https://github.com/rszyma)
- Philip Wilk [**(@philipwilk)**](https://github.com/philipwilk)
- John Ericson [**(@Ericson2314)**](https://github.com/Ericson2314)
- Tom Westerhout [**(@twesterhout)**](https://github.com/twesterhout)
- Tristan Ross [**(@RossComputerGuy)**](https://github.com/RossComputerGuy)
- Sergei Zimmerman [**(@xokdvium)**](https://github.com/xokdvium)
- Jean-François Roche [**(@jfroche)**](https://github.com/jfroche)
- Seth Flynn [**(@getchoo)**](https://github.com/getchoo)
- éclairevoyant [**(@eclairevoyant)**](https://github.com/eclairevoyant)
- Glen Huang [**(@hgl)**](https://github.com/hgl)
- osman - オスマン [**(@osbm)**](https://github.com/osbm)
- David McFarland [**(@corngood)**](https://github.com/corngood)
- Cole Helbling [**(@cole-h)**](https://github.com/cole-h)
- Sinan Mohd [**(@sinanmohd)**](https://github.com/sinanmohd)
- Philipp Otterbein

View file

@ -106,7 +106,7 @@ The system type on which the [`builder`](#attr-builder) executable is meant to b
A necessary condition for Nix to schedule a given derivation on some [Nix instance] is for the "system" of that derivation to match that instance's [`system` configuration option] or [`extra-platforms` configuration option].
By putting the `system` in each derivation, Nix allows *heterogenous* build plans, where not all steps can be run on the same machine or same sort of machine.
By putting the `system` in each derivation, Nix allows *heterogeneous* build plans, where not all steps can be run on the same machine or same sort of machine.
Nix can schedule builds such that it automatically builds on other platforms by [forwarding build requests](@docroot@/advanced-topics/distributed-builds.md) to other Nix instances.
[`system` configuration option]: @docroot@/command-ref/conf-file.md#conf-system

View file

@ -203,5 +203,26 @@
"ConnorBaker01@Gmail.com": "ConnorBaker",
"jsoo1@asu.edu": "jsoo1",
"hsngrmpf+github@gmail.com": "DavHau",
"matthew@floxdev.com": "mkenigs"
"matthew@floxdev.com": "mkenigs",
"taeer@bar-yam.me": "Radvendii",
"beme@anthropic.com": "lovesegfault",
"osbm@osbm.dev": "osbm",
"jami.kettunen@protonmail.com": "JamiKettunen",
"ephraim.siegfried@hotmail.com": "EphraimSiegfried",
"rszyma.dev@gmail.com": "rszyma",
"tristan.ross@determinate.systems": "RossComputerGuy",
"corngood@gmail.com": "corngood",
"jfroche@pyxel.be": "jfroche",
"848000+eclairevoyant@users.noreply.github.com": "eclairevoyant",
"petersen@redhat.com": "juhp",
"dramforever@live.com": "dramforever",
"me@glenhuang.com": "hgl",
"philip.wilk@fivium.co.uk": "philipwilk",
"me@nycode.dev": "NyCodeGHG",
"14264576+twesterhout@users.noreply.github.com": "twesterhout",
"sinan@sinanmohd.com": "sinanmohd",
"42688647+netadr@users.noreply.github.com": "netadr",
"matej.urbas@gmail.com": "urbas",
"ethanalexevans@gmail.com": "ethanavatar",
"greg.marti@gmail.com": "gmarti"
}

View file

@ -177,5 +177,24 @@
"avnik": "Alexander V. Nikolaev",
"DavHau": null,
"aln730": "AGawas",
"vog": "Volker Diels-Grabsch"
"vog": "Volker Diels-Grabsch",
"corngood": "David McFarland",
"twesterhout": "Tom Westerhout",
"JamiKettunen": "Jami Kettunen",
"dramforever": "dram",
"philipwilk": "Philip Wilk",
"netadr": "Clayton",
"NyCodeGHG": "Marie",
"jfroche": "Jean-Fran\u00e7ois Roche",
"urbas": "Matej Urbas",
"osbm": "osman - \u30aa\u30b9\u30de\u30f3",
"rszyma": null,
"eclairevoyant": "\u00e9clairevoyant",
"Radvendii": "Taeer Bar-Yam",
"sinanmohd": "Sinan Mohd",
"ethanavatar": "Ethan Evans",
"gmarti": "Gr\u00e9gory Marti",
"lovesegfault": "Bernardo Meurer",
"EphraimSiegfried": "Ephraim Siegfried",
"hgl": "Glen Huang"
}

View file

@ -104,151 +104,6 @@
};
shellcheck = {
enable = true;
excludes = [
# We haven't linted these files yet
''^config/install-sh$''
''^misc/bash/completion\.sh$''
''^misc/fish/completion\.fish$''
''^misc/zsh/completion\.zsh$''
''^scripts/create-darwin-volume\.sh$''
''^scripts/install-darwin-multi-user\.sh$''
''^scripts/install-multi-user\.sh$''
''^scripts/install-systemd-multi-user\.sh$''
''^src/nix/get-env\.sh$''
''^tests/functional/ca/build-dry\.sh$''
''^tests/functional/ca/build-with-garbage-path\.sh$''
''^tests/functional/ca/common\.sh$''
''^tests/functional/ca/concurrent-builds\.sh$''
''^tests/functional/ca/eval-store\.sh$''
''^tests/functional/ca/gc\.sh$''
''^tests/functional/ca/import-from-derivation\.sh$''
''^tests/functional/ca/new-build-cmd\.sh$''
''^tests/functional/ca/nix-shell\.sh$''
''^tests/functional/ca/post-hook\.sh$''
''^tests/functional/ca/recursive\.sh$''
''^tests/functional/ca/repl\.sh$''
''^tests/functional/ca/selfref-gc\.sh$''
''^tests/functional/ca/why-depends\.sh$''
''^tests/functional/characterisation-test-infra\.sh$''
''^tests/functional/common/vars-and-functions\.sh$''
''^tests/functional/completions\.sh$''
''^tests/functional/compute-levels\.sh$''
''^tests/functional/config\.sh$''
''^tests/functional/db-migration\.sh$''
''^tests/functional/debugger\.sh$''
''^tests/functional/dependencies\.builder0\.sh$''
''^tests/functional/dependencies\.sh$''
''^tests/functional/dump-db\.sh$''
''^tests/functional/dyn-drv/build-built-drv\.sh$''
''^tests/functional/dyn-drv/common\.sh$''
''^tests/functional/dyn-drv/dep-built-drv\.sh$''
''^tests/functional/dyn-drv/eval-outputOf\.sh$''
''^tests/functional/dyn-drv/old-daemon-error-hack\.sh$''
''^tests/functional/dyn-drv/recursive-mod-json\.sh$''
''^tests/functional/eval-store\.sh$''
''^tests/functional/export-graph\.sh$''
''^tests/functional/export\.sh$''
''^tests/functional/extra-sandbox-profile\.sh$''
''^tests/functional/fetchClosure\.sh$''
''^tests/functional/fetchGit\.sh$''
''^tests/functional/fetchGitRefs\.sh$''
''^tests/functional/fetchGitSubmodules\.sh$''
''^tests/functional/fetchGitVerification\.sh$''
''^tests/functional/fetchMercurial\.sh$''
''^tests/functional/fixed\.builder1\.sh$''
''^tests/functional/fixed\.builder2\.sh$''
''^tests/functional/fixed\.sh$''
''^tests/functional/flakes/absolute-paths\.sh$''
''^tests/functional/flakes/check\.sh$''
''^tests/functional/flakes/config\.sh$''
''^tests/functional/flakes/flakes\.sh$''
''^tests/functional/flakes/follow-paths\.sh$''
''^tests/functional/flakes/prefetch\.sh$''
''^tests/functional/flakes/run\.sh$''
''^tests/functional/flakes/show\.sh$''
''^tests/functional/formatter\.sh$''
''^tests/functional/formatter\.simple\.sh$''
''^tests/functional/gc-auto\.sh$''
''^tests/functional/gc-concurrent\.builder\.sh$''
''^tests/functional/gc-concurrent\.sh$''
''^tests/functional/gc-concurrent2\.builder\.sh$''
''^tests/functional/gc-non-blocking\.sh$''
''^tests/functional/hash-convert\.sh$''
''^tests/functional/impure-derivations\.sh$''
''^tests/functional/impure-eval\.sh$''
''^tests/functional/install-darwin\.sh$''
''^tests/functional/legacy-ssh-store\.sh$''
''^tests/functional/linux-sandbox\.sh$''
''^tests/functional/local-overlay-store/add-lower-inner\.sh$''
''^tests/functional/local-overlay-store/add-lower\.sh$''
''^tests/functional/local-overlay-store/bad-uris\.sh$''
''^tests/functional/local-overlay-store/build-inner\.sh$''
''^tests/functional/local-overlay-store/build\.sh$''
''^tests/functional/local-overlay-store/check-post-init-inner\.sh$''
''^tests/functional/local-overlay-store/check-post-init\.sh$''
''^tests/functional/local-overlay-store/common\.sh$''
''^tests/functional/local-overlay-store/delete-duplicate-inner\.sh$''
''^tests/functional/local-overlay-store/delete-duplicate\.sh$''
''^tests/functional/local-overlay-store/delete-refs-inner\.sh$''
''^tests/functional/local-overlay-store/delete-refs\.sh$''
''^tests/functional/local-overlay-store/gc-inner\.sh$''
''^tests/functional/local-overlay-store/gc\.sh$''
''^tests/functional/local-overlay-store/optimise-inner\.sh$''
''^tests/functional/local-overlay-store/optimise\.sh$''
''^tests/functional/local-overlay-store/redundant-add-inner\.sh$''
''^tests/functional/local-overlay-store/redundant-add\.sh$''
''^tests/functional/local-overlay-store/remount\.sh$''
''^tests/functional/local-overlay-store/stale-file-handle-inner\.sh$''
''^tests/functional/local-overlay-store/stale-file-handle\.sh$''
''^tests/functional/local-overlay-store/verify-inner\.sh$''
''^tests/functional/local-overlay-store/verify\.sh$''
''^tests/functional/logging\.sh$''
''^tests/functional/misc\.sh$''
''^tests/functional/multiple-outputs\.sh$''
''^tests/functional/nested-sandboxing\.sh$''
''^tests/functional/nested-sandboxing/command\.sh$''
''^tests/functional/nix-build\.sh$''
''^tests/functional/nix-channel\.sh$''
''^tests/functional/nix-collect-garbage-d\.sh$''
''^tests/functional/nix-copy-ssh-common\.sh$''
''^tests/functional/nix-copy-ssh-ng\.sh$''
''^tests/functional/nix-copy-ssh\.sh$''
''^tests/functional/nix-daemon-untrusting\.sh$''
''^tests/functional/nix-profile\.sh$''
''^tests/functional/nix-shell\.sh$''
''^tests/functional/nix_path\.sh$''
''^tests/functional/optimise-store\.sh$''
''^tests/functional/output-normalization\.sh$''
''^tests/functional/parallel\.builder\.sh$''
''^tests/functional/parallel\.sh$''
''^tests/functional/pass-as-file\.sh$''
''^tests/functional/path-from-hash-part\.sh$''
''^tests/functional/path-info\.sh$''
''^tests/functional/placeholders\.sh$''
''^tests/functional/post-hook\.sh$''
''^tests/functional/pure-eval\.sh$''
''^tests/functional/push-to-store-old\.sh$''
''^tests/functional/push-to-store\.sh$''
''^tests/functional/read-only-store\.sh$''
''^tests/functional/readfile-context\.sh$''
''^tests/functional/recursive\.sh$''
''^tests/functional/referrers\.sh$''
''^tests/functional/remote-store\.sh$''
''^tests/functional/repair\.sh$''
''^tests/functional/restricted\.sh$''
''^tests/functional/search\.sh$''
''^tests/functional/secure-drv-outputs\.sh$''
''^tests/functional/selfref-gc\.sh$''
''^tests/functional/shell\.shebang\.sh$''
''^tests/functional/simple\.builder\.sh$''
''^tests/functional/supplementary-groups\.sh$''
''^tests/functional/toString-path\.sh$''
''^tests/functional/user-envs-migration\.sh$''
''^tests/functional/user-envs-test-case\.sh$''
''^tests/functional/user-envs\.builder\.sh$''
''^tests/functional/user-envs\.sh$''
''^tests/functional/why-depends\.sh$''
];
};
};
};

View file

@ -142,7 +142,6 @@ release:
$ git pull
$ NEW_VERSION=2.13.0
$ echo $NEW_VERSION > .version
$ ... edit .mergify.yml to add the previous version ...
$ git checkout -b bump-$NEW_VERSION
$ git commit -a -m 'Bump version'
$ git push --set-upstream origin bump-$NEW_VERSION

View file

@ -1,3 +1,4 @@
# shellcheck shell=bash
function _complete_nix {
local -a words
local cword cur

View file

@ -1,3 +1,4 @@
# shellcheck disable=all
function _nix_complete
# Get the current command up to a cursor.
# - Behaves correctly even with pipes and nested in commands like env.

View file

@ -1,3 +1,4 @@
# shellcheck disable=all
#compdef nix
function _nix() {

View file

@ -0,0 +1,6 @@
extern "C" [[gnu::retain, gnu::weak]] const char * __asan_default_options()
{
// We leak a bunch of memory knowingly on purpose. It's not worthwhile to
// diagnose that memory being leaked for now.
return "abort_on_error=1:print_summary=1:detect_leaks=0:detect_odr_violation=0";
}

View file

@ -1,7 +1,3 @@
asan_test_options_env = {
'ASAN_OPTIONS' : 'abort_on_error=1:print_summary=1:detect_leaks=0',
}
# Clang gets grumpy about missing libasan symbols if -shared-libasan is not
# passed when building shared libs, at least on Linux
if cxx.get_id() == 'clang' and ('address' in get_option('b_sanitize') or 'undefined' in get_option(
@ -10,3 +6,6 @@ if cxx.get_id() == 'clang' and ('address' in get_option('b_sanitize') or 'undefi
add_project_link_arguments('-shared-libasan', language : 'cpp')
endif
if 'address' in get_option('b_sanitize')
deps_other += declare_dependency(sources : 'asan-options.cc')
endif

View file

@ -0,0 +1,32 @@
can_wrap_assert_fail_test_code = '''
#include <cstdlib>
#include <cassert>
int main()
{
assert(0);
}
extern "C" void * __real___assert_fail(const char *, const char *, unsigned int, const char *);
extern "C" void *
__wrap___assert_fail(const char *, const char *, unsigned int, const char *)
{
return __real___assert_fail(nullptr, nullptr, 0, nullptr);
}
'''
wrap_assert_fail_args = [ '-Wl,--wrap=__assert_fail' ]
can_wrap_assert_fail = cxx.links(
can_wrap_assert_fail_test_code,
args : wrap_assert_fail_args,
name : 'linker can wrap __assert_fail',
)
if can_wrap_assert_fail
deps_other += declare_dependency(
sources : 'wrap-assert-fail.cc',
link_args : wrap_assert_fail_args,
)
endif

View file

@ -0,0 +1,17 @@
#include "nix/util/error.hh"
#include <cstdio>
#include <cstdlib>
#include <cinttypes>
#include <string_view>
extern "C" [[noreturn]] void __attribute__((weak))
__wrap___assert_fail(const char * assertion, const char * file, unsigned int line, const char * function)
{
char buf[512];
int n =
snprintf(buf, sizeof(buf), "Assertion '%s' failed in %s at %s:%" PRIuLEAST32, assertion, function, file, line);
if (n < 0)
nix::panic("Assertion failed and could not format error message");
nix::panic(std::string_view(buf, std::min(static_cast<int>(sizeof(buf)), n)));
}

View file

@ -5,6 +5,15 @@ if not (host_machine.system() == 'windows' and cxx.get_id() == 'gcc')
deps_private += dependency('threads')
endif
if host_machine.system() == 'cygwin'
# -std=gnu on cygwin defines 'unix', which conflicts with the namespace
add_project_arguments(
'-D_POSIX_C_SOURCE=200809L',
'-D_GNU_SOURCE',
language : 'cpp',
)
endif
add_project_arguments(
'-Wdeprecated-copy',
'-Werror=suggest-override',
@ -35,3 +44,6 @@ endif
# Darwin ld doesn't like "X.Y.Zpre"
nix_soversion = meson.project_version().split('pre')[0]
subdir('assert-fail')
subdir('asan-options')

View file

@ -164,6 +164,24 @@ let
};
mesonLibraryLayer = finalAttrs: prevAttrs: {
preConfigure =
let
interpositionFlags = [
"-fno-semantic-interposition"
"-Wl,-Bsymbolic-functions"
];
in
# NOTE: By default GCC disables interprocedular optimizations (in particular inlining) for
# position-independent code and thus shared libraries.
# Since LD_PRELOAD tricks aren't worth losing out on optimizations, we disable it for good.
# This is not the case for Clang, where inlining is done by default even without -fno-semantic-interposition.
# https://reviews.llvm.org/D102453
# https://fedoraproject.org/wiki/Changes/PythonNoSemanticInterpositionSpeedup
prevAttrs.preConfigure or ""
+ lib.optionalString stdenv.cc.isGNU ''
export CFLAGS="''${CFLAGS:-} ${toString interpositionFlags}"
export CXXFLAGS="''${CXXFLAGS:-} ${toString interpositionFlags}"
'';
outputs = prevAttrs.outputs or [ "out" ] ++ [ "dev" ];
};
@ -186,6 +204,25 @@ let
mesonFlags = [ (lib.mesonBool "b_asneeded" false) ] ++ prevAttrs.mesonFlags or [ ];
};
enableSanitizersLayer =
finalAttrs: prevAttrs:
let
sanitizers = lib.optional scope.withASan "address" ++ lib.optional scope.withUBSan "undefined";
in
{
mesonFlags =
(prevAttrs.mesonFlags or [ ])
++ lib.optionals (lib.length sanitizers > 0) (
[
(lib.mesonOption "b_sanitize" (lib.concatStringsSep "," sanitizers))
]
++ (lib.optionals stdenv.cc.isClang [
# https://www.github.com/mesonbuild/meson/issues/764
(lib.mesonBool "b_lundef" false)
])
);
};
nixDefaultsLayer = finalAttrs: prevAttrs: {
strictDeps = prevAttrs.strictDeps or true;
enableParallelBuilding = true;
@ -228,6 +265,16 @@ in
inherit filesetToSource;
/**
Whether meson components are built with [AddressSanitizer](https://clang.llvm.org/docs/AddressSanitizer.html).
*/
withASan = false;
/**
Whether meson components are built with [UndefinedBehaviorSanitizer](https://clang.llvm.org/docs/UndefinedBehaviorSanitizer.html).
*/
withUBSan = false;
/**
A user-provided extension function to apply to each component derivation.
*/
@ -314,6 +361,7 @@ in
setVersionLayer
mesonLayer
fixupStaticLayer
enableSanitizersLayer
scope.mesonComponentOverrides
];
mkMesonExecutable = mkPackageBuilder [
@ -324,6 +372,7 @@ in
mesonLayer
mesonBuildLayer
fixupStaticLayer
enableSanitizersLayer
scope.mesonComponentOverrides
];
mkMesonLibrary = mkPackageBuilder [
@ -335,6 +384,7 @@ in
mesonBuildLayer
mesonLibraryLayer
fixupStaticLayer
enableSanitizersLayer
scope.mesonComponentOverrides
];

View file

@ -57,15 +57,20 @@ scope: {
prevAttrs.postInstall;
});
toml11 = pkgs.toml11.overrideAttrs rec {
version = "4.4.0";
src = pkgs.fetchFromGitHub {
owner = "ToruNiina";
repo = "toml11";
tag = "v${version}";
hash = "sha256-sgWKYxNT22nw376ttGsTdg0AMzOwp8QH3E8mx0BZJTQ=";
};
};
# TODO: Remove this when https://github.com/NixOS/nixpkgs/pull/442682 is included in a stable release
toml11 =
if lib.versionAtLeast pkgs.toml11.version "4.4.0" then
pkgs.toml11
else
pkgs.toml11.overrideAttrs rec {
version = "4.4.0";
src = pkgs.fetchFromGitHub {
owner = "ToruNiina";
repo = "toml11";
tag = "v${version}";
hash = "sha256-sgWKYxNT22nw376ttGsTdg0AMzOwp8QH3E8mx0BZJTQ=";
};
};
# TODO Hack until https://github.com/NixOS/nixpkgs/issues/45462 is fixed.
boost =
@ -84,38 +89,4 @@ scope: {
buildPhase = lib.replaceStrings [ "--without-python" ] [ "" ] old.buildPhase;
installPhase = lib.replaceStrings [ "--without-python" ] [ "" ] old.installPhase;
});
libgit2 =
if lib.versionAtLeast pkgs.libgit2.version "1.9.0" then
pkgs.libgit2
else
pkgs.libgit2.overrideAttrs (attrs: {
# libgit2: Nixpkgs 24.11 has < 1.9.0, which needs our patches
nativeBuildInputs =
attrs.nativeBuildInputs or [ ]
# gitMinimal does not build on Windows. See packbuilder patch.
++ lib.optionals (!stdenv.hostPlatform.isWindows) [
# Needed for `git apply`; see `prePatch`
pkgs.buildPackages.gitMinimal
];
# Only `git apply` can handle git binary patches
prePatch =
attrs.prePatch or ""
+ lib.optionalString (!stdenv.hostPlatform.isWindows) ''
patch() {
git apply
}
'';
patches =
attrs.patches or [ ]
++ [
./patches/libgit2-mempack-thin-packfile.patch
]
# gitMinimal does not build on Windows, but fortunately this patch only
# impacts interruptibility
++ lib.optionals (!stdenv.hostPlatform.isWindows) [
# binary patch; see `prePatch`
./patches/libgit2-packbuilder-callback-interruptible.patch
];
});
}

View file

@ -70,6 +70,9 @@ pkgs.nixComponents2.nix-util.overrideAttrs (
# We use this shell with the local checkout, not unpackPhase.
src = null;
# Workaround https://sourceware.org/pipermail/gdb-patches/2025-October/221398.html
# Remove when gdb fix is rolled out everywhere.
separateDebugInfo = false;
env = {
# For `make format`, to work without installing pre-commit
@ -93,37 +96,44 @@ pkgs.nixComponents2.nix-util.overrideAttrs (
++ map (transformFlag "libcmd") (ignoreCrossFile pkgs.nixComponents2.nix-cmd.mesonFlags);
nativeBuildInputs =
attrs.nativeBuildInputs or [ ]
++ pkgs.nixComponents2.nix-util.nativeBuildInputs
++ pkgs.nixComponents2.nix-store.nativeBuildInputs
++ pkgs.nixComponents2.nix-fetchers.nativeBuildInputs
++ pkgs.nixComponents2.nix-expr.nativeBuildInputs
++ lib.optionals havePerl pkgs.nixComponents2.nix-perl-bindings.nativeBuildInputs
++ lib.optionals buildCanExecuteHost pkgs.nixComponents2.nix-manual.externalNativeBuildInputs
++ pkgs.nixComponents2.nix-internal-api-docs.nativeBuildInputs
++ pkgs.nixComponents2.nix-external-api-docs.nativeBuildInputs
++ pkgs.nixComponents2.nix-functional-tests.externalNativeBuildInputs
++ lib.optional (
!buildCanExecuteHost
# Hack around https://github.com/nixos/nixpkgs/commit/bf7ad8cfbfa102a90463433e2c5027573b462479
&& !(stdenv.hostPlatform.isWindows && stdenv.buildPlatform.isDarwin)
&& stdenv.hostPlatform.emulatorAvailable pkgs.buildPackages
&& lib.meta.availableOn stdenv.buildPlatform (stdenv.hostPlatform.emulator pkgs.buildPackages)
) pkgs.buildPackages.mesonEmulatorHook
++ [
pkgs.buildPackages.cmake
pkgs.buildPackages.gnused
pkgs.buildPackages.shellcheck
pkgs.buildPackages.changelog-d
modular.pre-commit.settings.package
(pkgs.writeScriptBin "pre-commit-hooks-install" modular.pre-commit.settings.installationScript)
pkgs.buildPackages.nixfmt-rfc-style
pkgs.buildPackages.gdb
]
++ lib.optional (stdenv.cc.isClang && stdenv.hostPlatform == stdenv.buildPlatform) (
lib.hiPrio pkgs.buildPackages.clang-tools
)
++ lib.optional stdenv.hostPlatform.isLinux pkgs.buildPackages.mold-wrapped;
let
inputs =
attrs.nativeBuildInputs or [ ]
++ pkgs.nixComponents2.nix-util.nativeBuildInputs
++ pkgs.nixComponents2.nix-store.nativeBuildInputs
++ pkgs.nixComponents2.nix-fetchers.nativeBuildInputs
++ pkgs.nixComponents2.nix-expr.nativeBuildInputs
++ lib.optionals havePerl pkgs.nixComponents2.nix-perl-bindings.nativeBuildInputs
++ lib.optionals buildCanExecuteHost pkgs.nixComponents2.nix-manual.externalNativeBuildInputs
++ pkgs.nixComponents2.nix-internal-api-docs.nativeBuildInputs
++ pkgs.nixComponents2.nix-external-api-docs.nativeBuildInputs
++ pkgs.nixComponents2.nix-functional-tests.externalNativeBuildInputs
++ lib.optional (
!buildCanExecuteHost
# Hack around https://github.com/nixos/nixpkgs/commit/bf7ad8cfbfa102a90463433e2c5027573b462479
&& !(stdenv.hostPlatform.isWindows && stdenv.buildPlatform.isDarwin)
&& stdenv.hostPlatform.emulatorAvailable pkgs.buildPackages
&& lib.meta.availableOn stdenv.buildPlatform (stdenv.hostPlatform.emulator pkgs.buildPackages)
) pkgs.buildPackages.mesonEmulatorHook
++ [
pkgs.buildPackages.cmake
pkgs.buildPackages.gnused
pkgs.buildPackages.changelog-d
modular.pre-commit.settings.package
(pkgs.writeScriptBin "pre-commit-hooks-install" modular.pre-commit.settings.installationScript)
pkgs.buildPackages.nixfmt-rfc-style
pkgs.buildPackages.shellcheck
pkgs.buildPackages.include-what-you-use
pkgs.buildPackages.gdb
]
++ lib.optional (stdenv.cc.isClang && stdenv.hostPlatform == stdenv.buildPlatform) (
lib.hiPrio pkgs.buildPackages.clang-tools
)
++ lib.optional stdenv.hostPlatform.isLinux pkgs.buildPackages.mold-wrapped;
in
# FIXME: separateDebugInfo = false doesn't actually prevent -Wa,--compress-debug-sections
# from making its way into NIX_CFLAGS_COMPILE.
lib.filter (p: !lib.hasInfix "separate-debug-info" p) inputs;
buildInputs = [
pkgs.gbenchmark

View file

@ -73,7 +73,7 @@ let
]
);
in
{
rec {
/**
An internal check to make sure our package listing is complete.
*/
@ -145,13 +145,25 @@ in
)
);
buildNoGc =
# Builds with sanitizers already have GC disabled, so this buildNoGc can just
# point to buildWithSanitizers in order to reduce the load on hydra.
buildNoGc = buildWithSanitizers;
buildWithSanitizers =
let
components = forAllSystems (
system:
nixpkgsFor.${system}.native.nixComponents2.overrideScope (
let
pkgs = nixpkgsFor.${system}.native;
in
pkgs.nixComponents2.overrideScope (
self: super: {
# Boost coroutines fail with ASAN on darwin.
withASan = !pkgs.stdenv.buildPlatform.isDarwin;
withUBSan = true;
nix-expr = super.nix-expr.override { enableGC = false; };
# Unclear how to make Perl bindings work with a dynamically linked ASAN.
nix-perl-bindings = null;
}
)
);

View file

@ -1,282 +0,0 @@
commit 9bacade4a3ef4b6b26e2c02f549eef0e9eb9eaa2
Author: Robert Hensing <robert@roberthensing.nl>
Date: Sun Aug 18 20:20:36 2024 +0200
Add unoptimized git_mempack_write_thin_pack
diff --git a/include/git2/sys/mempack.h b/include/git2/sys/mempack.h
index 17da590a3..3688bdd50 100644
--- a/include/git2/sys/mempack.h
+++ b/include/git2/sys/mempack.h
@@ -44,6 +44,29 @@ GIT_BEGIN_DECL
*/
GIT_EXTERN(int) git_mempack_new(git_odb_backend **out);
+/**
+ * Write a thin packfile with the objects in the memory store.
+ *
+ * A thin packfile is a packfile that does not contain its transitive closure of
+ * references. This is useful for efficiently distributing additions to a
+ * repository over the network, but also finds use in the efficient bulk
+ * addition of objects to a repository, locally.
+ *
+ * This operation performs the (shallow) insert operations into the
+ * `git_packbuilder`, but does not write the packfile to disk;
+ * see `git_packbuilder_write_buf`.
+ *
+ * It also does not reset the memory store; see `git_mempack_reset`.
+ *
+ * @note This function may or may not write trees and blobs that are not
+ * referenced by commits. Currently everything is written, but this
+ * behavior may change in the future as the packer is optimized.
+ *
+ * @param backend The mempack backend
+ * @param pb The packbuilder to use to write the packfile
+ */
+GIT_EXTERN(int) git_mempack_write_thin_pack(git_odb_backend *backend, git_packbuilder *pb);
+
/**
* Dump all the queued in-memory writes to a packfile.
*
diff --git a/src/libgit2/odb_mempack.c b/src/libgit2/odb_mempack.c
index 6f27f45f8..0b61e2b66 100644
--- a/src/libgit2/odb_mempack.c
+++ b/src/libgit2/odb_mempack.c
@@ -132,6 +132,35 @@ cleanup:
return err;
}
+int git_mempack_write_thin_pack(git_odb_backend *backend, git_packbuilder *pb)
+{
+ struct memory_packer_db *db = (struct memory_packer_db *)backend;
+ const git_oid *oid;
+ size_t iter = 0;
+ int err = -1;
+
+ /* TODO: Implement the recency heuristics.
+ For this it probably makes sense to only write what's referenced
+ through commits, an option I've carved out for you in the docs.
+ wrt heuristics: ask your favorite LLM to translate https://git-scm.com/docs/pack-heuristics/en
+ to actual normal reference documentation. */
+ while (true) {
+ err = git_oidmap_iterate(NULL, db->objects, &iter, &oid);
+ if (err == GIT_ITEROVER) {
+ err = 0;
+ break;
+ }
+ if (err != 0)
+ return err;
+
+ err = git_packbuilder_insert(pb, oid, NULL);
+ if (err != 0)
+ return err;
+ }
+
+ return 0;
+}
+
int git_mempack_dump(
git_buf *pack,
git_repository *repo,
diff --git a/tests/libgit2/mempack/thinpack.c b/tests/libgit2/mempack/thinpack.c
new file mode 100644
index 000000000..604a4dda2
--- /dev/null
+++ b/tests/libgit2/mempack/thinpack.c
@@ -0,0 +1,196 @@
+#include "clar_libgit2.h"
+#include "git2/indexer.h"
+#include "git2/odb_backend.h"
+#include "git2/tree.h"
+#include "git2/types.h"
+#include "git2/sys/mempack.h"
+#include "git2/sys/odb_backend.h"
+#include "util.h"
+
+static git_repository *_repo;
+static git_odb_backend * _mempack_backend;
+
+void test_mempack_thinpack__initialize(void)
+{
+ git_odb *odb;
+
+ _repo = cl_git_sandbox_init_new("mempack_thinpack_repo");
+
+ cl_git_pass(git_mempack_new(&_mempack_backend));
+ cl_git_pass(git_repository_odb(&odb, _repo));
+ cl_git_pass(git_odb_add_backend(odb, _mempack_backend, 999));
+ git_odb_free(odb);
+}
+
+void _mempack_thinpack__cleanup(void)
+{
+ cl_git_sandbox_cleanup();
+}
+
+/*
+ Generating a packfile for an unchanged repo works and produces an empty packfile.
+ Even if we allow this scenario to be detected, it shouldn't misbehave if the
+ application is unaware of it.
+*/
+void test_mempack_thinpack__empty(void)
+{
+ git_packbuilder *pb;
+ int version;
+ int n;
+ git_buf buf = GIT_BUF_INIT;
+
+ git_packbuilder_new(&pb, _repo);
+
+ cl_git_pass(git_mempack_write_thin_pack(_mempack_backend, pb));
+ cl_git_pass(git_packbuilder_write_buf(&buf, pb));
+ cl_assert_in_range(12, buf.size, 1024 /* empty packfile is >0 bytes, but certainly not that big */);
+ cl_assert(buf.ptr[0] == 'P');
+ cl_assert(buf.ptr[1] == 'A');
+ cl_assert(buf.ptr[2] == 'C');
+ cl_assert(buf.ptr[3] == 'K');
+ version = (buf.ptr[4] << 24) | (buf.ptr[5] << 16) | (buf.ptr[6] << 8) | buf.ptr[7];
+ /* Subject to change. https://git-scm.com/docs/pack-format: Git currently accepts version number 2 or 3 but generates version 2 only.*/
+ cl_assert_equal_i(2, version);
+ n = (buf.ptr[8] << 24) | (buf.ptr[9] << 16) | (buf.ptr[10] << 8) | buf.ptr[11];
+ cl_assert_equal_i(0, n);
+ git_buf_dispose(&buf);
+
+ git_packbuilder_free(pb);
+}
+
+#define LIT_LEN(x) x, sizeof(x) - 1
+
+/*
+ Check that git_mempack_write_thin_pack produces a thin packfile.
+*/
+void test_mempack_thinpack__thin(void)
+{
+ /* Outline:
+ - Create tree 1
+ - Flush to packfile A
+ - Create tree 2
+ - Flush to packfile B
+
+ Tree 2 has a new blob and a reference to a blob from tree 1.
+
+ Expectation:
+ - Packfile B is thin and does not contain the objects from packfile A
+ */
+
+
+ git_oid oid_blob_1;
+ git_oid oid_blob_2;
+ git_oid oid_blob_3;
+ git_oid oid_tree_1;
+ git_oid oid_tree_2;
+ git_treebuilder *tb;
+
+ git_packbuilder *pb;
+ git_buf buf = GIT_BUF_INIT;
+ git_indexer *indexer;
+ git_indexer_progress stats;
+ char pack_dir_path[1024];
+
+ char sbuf[1024];
+ const char * repo_path;
+ const char * pack_name_1;
+ const char * pack_name_2;
+ git_str pack_path_1 = GIT_STR_INIT;
+ git_str pack_path_2 = GIT_STR_INIT;
+ git_odb_backend * pack_odb_backend_1;
+ git_odb_backend * pack_odb_backend_2;
+
+
+ cl_assert_in_range(0, snprintf(pack_dir_path, sizeof(pack_dir_path), "%s/objects/pack", git_repository_path(_repo)), sizeof(pack_dir_path));
+
+ /* Create tree 1 */
+
+ cl_git_pass(git_blob_create_from_buffer(&oid_blob_1, _repo, LIT_LEN("thinpack blob 1")));
+ cl_git_pass(git_blob_create_from_buffer(&oid_blob_2, _repo, LIT_LEN("thinpack blob 2")));
+
+
+ cl_git_pass(git_treebuilder_new(&tb, _repo, NULL));
+ cl_git_pass(git_treebuilder_insert(NULL, tb, "blob1", &oid_blob_1, GIT_FILEMODE_BLOB));
+ cl_git_pass(git_treebuilder_insert(NULL, tb, "blob2", &oid_blob_2, GIT_FILEMODE_BLOB));
+ cl_git_pass(git_treebuilder_write(&oid_tree_1, tb));
+
+ /* Flush */
+
+ cl_git_pass(git_packbuilder_new(&pb, _repo));
+ cl_git_pass(git_mempack_write_thin_pack(_mempack_backend, pb));
+ cl_git_pass(git_packbuilder_write_buf(&buf, pb));
+ cl_git_pass(git_indexer_new(&indexer, pack_dir_path, 0, NULL, NULL));
+ cl_git_pass(git_indexer_append(indexer, buf.ptr, buf.size, &stats));
+ cl_git_pass(git_indexer_commit(indexer, &stats));
+ pack_name_1 = strdup(git_indexer_name(indexer));
+ cl_assert(pack_name_1);
+ git_buf_dispose(&buf);
+ git_mempack_reset(_mempack_backend);
+ git_indexer_free(indexer);
+ git_packbuilder_free(pb);
+
+ /* Create tree 2 */
+
+ cl_git_pass(git_treebuilder_clear(tb));
+ /* blob 1 won't be used, but we add it anyway to test that just "declaring" an object doesn't
+ necessarily cause its inclusion in the next thin packfile. It must only be included if new. */
+ cl_git_pass(git_blob_create_from_buffer(&oid_blob_1, _repo, LIT_LEN("thinpack blob 1")));
+ cl_git_pass(git_blob_create_from_buffer(&oid_blob_3, _repo, LIT_LEN("thinpack blob 3")));
+ cl_git_pass(git_treebuilder_insert(NULL, tb, "blob1", &oid_blob_1, GIT_FILEMODE_BLOB));
+ cl_git_pass(git_treebuilder_insert(NULL, tb, "blob3", &oid_blob_3, GIT_FILEMODE_BLOB));
+ cl_git_pass(git_treebuilder_write(&oid_tree_2, tb));
+
+ /* Flush */
+
+ cl_git_pass(git_packbuilder_new(&pb, _repo));
+ cl_git_pass(git_mempack_write_thin_pack(_mempack_backend, pb));
+ cl_git_pass(git_packbuilder_write_buf(&buf, pb));
+ cl_git_pass(git_indexer_new(&indexer, pack_dir_path, 0, NULL, NULL));
+ cl_git_pass(git_indexer_append(indexer, buf.ptr, buf.size, &stats));
+ cl_git_pass(git_indexer_commit(indexer, &stats));
+ pack_name_2 = strdup(git_indexer_name(indexer));
+ cl_assert(pack_name_2);
+ git_buf_dispose(&buf);
+ git_mempack_reset(_mempack_backend);
+ git_indexer_free(indexer);
+ git_packbuilder_free(pb);
+ git_treebuilder_free(tb);
+
+ /* Assertions */
+
+ assert(pack_name_1);
+ assert(pack_name_2);
+
+ repo_path = git_repository_path(_repo);
+
+ snprintf(sbuf, sizeof(sbuf), "objects/pack/pack-%s.pack", pack_name_1);
+ git_str_joinpath(&pack_path_1, repo_path, sbuf);
+ snprintf(sbuf, sizeof(sbuf), "objects/pack/pack-%s.pack", pack_name_2);
+ git_str_joinpath(&pack_path_2, repo_path, sbuf);
+
+ /* If they're the same, something definitely went wrong. */
+ cl_assert(strcmp(pack_name_1, pack_name_2) != 0);
+
+ cl_git_pass(git_odb_backend_one_pack(&pack_odb_backend_1, pack_path_1.ptr));
+ cl_assert(pack_odb_backend_1->exists(pack_odb_backend_1, &oid_blob_1));
+ cl_assert(pack_odb_backend_1->exists(pack_odb_backend_1, &oid_blob_2));
+ cl_assert(!pack_odb_backend_1->exists(pack_odb_backend_1, &oid_blob_3));
+ cl_assert(pack_odb_backend_1->exists(pack_odb_backend_1, &oid_tree_1));
+ cl_assert(!pack_odb_backend_1->exists(pack_odb_backend_1, &oid_tree_2));
+
+ cl_git_pass(git_odb_backend_one_pack(&pack_odb_backend_2, pack_path_2.ptr));
+ /* blob 1 is already in the packfile 1, so packfile 2 must not include it, in order to be _thin_. */
+ cl_assert(!pack_odb_backend_2->exists(pack_odb_backend_2, &oid_blob_1));
+ cl_assert(!pack_odb_backend_2->exists(pack_odb_backend_2, &oid_blob_2));
+ cl_assert(pack_odb_backend_2->exists(pack_odb_backend_2, &oid_blob_3));
+ cl_assert(!pack_odb_backend_2->exists(pack_odb_backend_2, &oid_tree_1));
+ cl_assert(pack_odb_backend_2->exists(pack_odb_backend_2, &oid_tree_2));
+
+ pack_odb_backend_1->free(pack_odb_backend_1);
+ pack_odb_backend_2->free(pack_odb_backend_2);
+ free((void *)pack_name_1);
+ free((void *)pack_name_2);
+ git_str_dispose(&pack_path_1);
+ git_str_dispose(&pack_path_2);
+
+}

View file

@ -1,930 +0,0 @@
commit e9823c5da4fa977c46bcb97167fbdd0d70adb5ff
Author: Robert Hensing <robert@roberthensing.nl>
Date: Mon Aug 26 20:07:04 2024 +0200
Make packbuilder interruptible using progress callback
Forward errors from packbuilder->progress_cb
This allows the callback to terminate long-running operations when
the application is interrupted.
diff --git a/include/git2/pack.h b/include/git2/pack.h
index 0f6bd2ab9..bee72a6c0 100644
--- a/include/git2/pack.h
+++ b/include/git2/pack.h
@@ -247,6 +247,9 @@ typedef int GIT_CALLBACK(git_packbuilder_progress)(
* @param progress_cb Function to call with progress information during
* pack building. Be aware that this is called inline with pack building
* operations, so performance may be affected.
+ * When progress_cb returns an error, the pack building process will be
+ * aborted and the error will be returned from the invoked function.
+ * `pb` must then be freed.
* @param progress_cb_payload Payload for progress callback.
* @return 0 or an error code
*/
diff --git a/src/libgit2/pack-objects.c b/src/libgit2/pack-objects.c
index b2d80cba9..7c331c2d5 100644
--- a/src/libgit2/pack-objects.c
+++ b/src/libgit2/pack-objects.c
@@ -932,6 +932,9 @@ static int report_delta_progress(
{
int ret;
+ if (pb->failure)
+ return pb->failure;
+
if (pb->progress_cb) {
uint64_t current_time = git_time_monotonic();
uint64_t elapsed = current_time - pb->last_progress_report_time;
@@ -943,8 +946,10 @@ static int report_delta_progress(
GIT_PACKBUILDER_DELTAFICATION,
count, pb->nr_objects, pb->progress_cb_payload);
- if (ret)
+ if (ret) {
+ pb->failure = ret;
return git_error_set_after_callback(ret);
+ }
}
}
@@ -976,7 +981,10 @@ static int find_deltas(git_packbuilder *pb, git_pobject **list,
}
pb->nr_deltified += 1;
- report_delta_progress(pb, pb->nr_deltified, false);
+ if ((error = report_delta_progress(pb, pb->nr_deltified, false)) < 0) {
+ GIT_ASSERT(git_packbuilder__progress_unlock(pb) == 0);
+ goto on_error;
+ }
po = *list++;
(*list_size)--;
@@ -1124,6 +1132,10 @@ struct thread_params {
size_t depth;
size_t working;
size_t data_ready;
+
+ /* A pb->progress_cb can stop the packing process by returning an error.
+ When that happens, all threads observe the error and stop voluntarily. */
+ bool stopped;
};
static void *threaded_find_deltas(void *arg)
@@ -1133,7 +1145,12 @@ static void *threaded_find_deltas(void *arg)
while (me->remaining) {
if (find_deltas(me->pb, me->list, &me->remaining,
me->window, me->depth) < 0) {
- ; /* TODO */
+ me->stopped = true;
+ GIT_ASSERT_WITH_RETVAL(git_packbuilder__progress_lock(me->pb) == 0, NULL);
+ me->working = false;
+ git_cond_signal(&me->pb->progress_cond);
+ GIT_ASSERT_WITH_RETVAL(git_packbuilder__progress_unlock(me->pb) == 0, NULL);
+ return NULL;
}
GIT_ASSERT_WITH_RETVAL(git_packbuilder__progress_lock(me->pb) == 0, NULL);
@@ -1175,8 +1192,7 @@ static int ll_find_deltas(git_packbuilder *pb, git_pobject **list,
pb->nr_threads = git__online_cpus();
if (pb->nr_threads <= 1) {
- find_deltas(pb, list, &list_size, window, depth);
- return 0;
+ return find_deltas(pb, list, &list_size, window, depth);
}
p = git__mallocarray(pb->nr_threads, sizeof(*p));
@@ -1195,6 +1211,7 @@ static int ll_find_deltas(git_packbuilder *pb, git_pobject **list,
p[i].depth = depth;
p[i].working = 1;
p[i].data_ready = 0;
+ p[i].stopped = 0;
/* try to split chunks on "path" boundaries */
while (sub_size && sub_size < list_size &&
@@ -1262,7 +1279,7 @@ static int ll_find_deltas(git_packbuilder *pb, git_pobject **list,
(!victim || victim->remaining < p[i].remaining))
victim = &p[i];
- if (victim) {
+ if (victim && !target->stopped) {
sub_size = victim->remaining / 2;
list = victim->list + victim->list_size - sub_size;
while (sub_size && list[0]->hash &&
@@ -1286,7 +1303,7 @@ static int ll_find_deltas(git_packbuilder *pb, git_pobject **list,
}
target->list_size = sub_size;
target->remaining = sub_size;
- target->working = 1;
+ target->working = 1; /* even when target->stopped, so that we don't process this thread again */
GIT_ASSERT(git_packbuilder__progress_unlock(pb) == 0);
if (git_mutex_lock(&target->mutex)) {
@@ -1299,7 +1316,7 @@ static int ll_find_deltas(git_packbuilder *pb, git_pobject **list,
git_cond_signal(&target->cond);
git_mutex_unlock(&target->mutex);
- if (!sub_size) {
+ if (target->stopped || !sub_size) {
git_thread_join(&target->thread, NULL);
git_cond_free(&target->cond);
git_mutex_free(&target->mutex);
@@ -1308,7 +1325,7 @@ static int ll_find_deltas(git_packbuilder *pb, git_pobject **list,
}
git__free(p);
- return 0;
+ return pb->failure;
}
#else
@@ -1319,6 +1336,7 @@ int git_packbuilder__prepare(git_packbuilder *pb)
{
git_pobject **delta_list;
size_t i, n = 0;
+ int error;
if (pb->nr_objects == 0 || pb->done)
return 0; /* nothing to do */
@@ -1327,8 +1345,10 @@ int git_packbuilder__prepare(git_packbuilder *pb)
* Although we do not report progress during deltafication, we
* at least report that we are in the deltafication stage
*/
- if (pb->progress_cb)
- pb->progress_cb(GIT_PACKBUILDER_DELTAFICATION, 0, pb->nr_objects, pb->progress_cb_payload);
+ if (pb->progress_cb) {
+ if ((error = pb->progress_cb(GIT_PACKBUILDER_DELTAFICATION, 0, pb->nr_objects, pb->progress_cb_payload)) < 0)
+ return git_error_set_after_callback(error);
+ }
delta_list = git__mallocarray(pb->nr_objects, sizeof(*delta_list));
GIT_ERROR_CHECK_ALLOC(delta_list);
@@ -1345,31 +1365,33 @@ int git_packbuilder__prepare(git_packbuilder *pb)
if (n > 1) {
git__tsort((void **)delta_list, n, type_size_sort);
- if (ll_find_deltas(pb, delta_list, n,
+ if ((error = ll_find_deltas(pb, delta_list, n,
GIT_PACK_WINDOW + 1,
- GIT_PACK_DEPTH) < 0) {
+ GIT_PACK_DEPTH)) < 0) {
git__free(delta_list);
- return -1;
+ return error;
}
}
- report_delta_progress(pb, pb->nr_objects, true);
+ error = report_delta_progress(pb, pb->nr_objects, true);
pb->done = true;
git__free(delta_list);
- return 0;
+ return error;
}
-#define PREPARE_PACK if (git_packbuilder__prepare(pb) < 0) { return -1; }
+#define PREPARE_PACK error = git_packbuilder__prepare(pb); if (error < 0) { return error; }
int git_packbuilder_foreach(git_packbuilder *pb, int (*cb)(void *buf, size_t size, void *payload), void *payload)
{
+ int error;
PREPARE_PACK;
return write_pack(pb, cb, payload);
}
int git_packbuilder__write_buf(git_str *buf, git_packbuilder *pb)
{
+ int error;
PREPARE_PACK;
return write_pack(pb, &write_pack_buf, buf);
diff --git a/src/libgit2/pack-objects.h b/src/libgit2/pack-objects.h
index bbc8b9430..380a28ebe 100644
--- a/src/libgit2/pack-objects.h
+++ b/src/libgit2/pack-objects.h
@@ -100,6 +100,10 @@ struct git_packbuilder {
uint64_t last_progress_report_time;
bool done;
+
+ /* A non-zero error code in failure causes all threads to shut themselves
+ down. Some functions will return this error code. */
+ volatile int failure;
};
int git_packbuilder__write_buf(git_str *buf, git_packbuilder *pb);
diff --git a/tests/libgit2/pack/cancel.c b/tests/libgit2/pack/cancel.c
new file mode 100644
index 000000000..a0aa9716a
--- /dev/null
+++ b/tests/libgit2/pack/cancel.c
@@ -0,0 +1,240 @@
+#include "clar_libgit2.h"
+#include "futils.h"
+#include "pack.h"
+#include "hash.h"
+#include "iterator.h"
+#include "vector.h"
+#include "posix.h"
+#include "hash.h"
+#include "pack-objects.h"
+
+static git_repository *_repo;
+static git_revwalk *_revwalker;
+static git_packbuilder *_packbuilder;
+static git_indexer *_indexer;
+static git_vector _commits;
+static int _commits_is_initialized;
+static git_indexer_progress _stats;
+
+extern bool git_disable_pack_keep_file_checks;
+
+static void pack_packbuilder_init(const char *sandbox) {
+ _repo = cl_git_sandbox_init(sandbox);
+ /* cl_git_pass(p_chdir(sandbox)); */
+ cl_git_pass(git_revwalk_new(&_revwalker, _repo));
+ cl_git_pass(git_packbuilder_new(&_packbuilder, _repo));
+ cl_git_pass(git_vector_init(&_commits, 0, NULL));
+ _commits_is_initialized = 1;
+ memset(&_stats, 0, sizeof(_stats));
+ p_fsync__cnt = 0;
+}
+
+void test_pack_cancel__initialize(void)
+{
+ pack_packbuilder_init("small.git");
+}
+
+void test_pack_cancel__cleanup(void)
+{
+ git_oid *o;
+ unsigned int i;
+
+ cl_git_pass(git_libgit2_opts(GIT_OPT_ENABLE_FSYNC_GITDIR, 0));
+ cl_git_pass(git_libgit2_opts(GIT_OPT_DISABLE_PACK_KEEP_FILE_CHECKS, false));
+
+ if (_commits_is_initialized) {
+ _commits_is_initialized = 0;
+ git_vector_foreach(&_commits, i, o) {
+ git__free(o);
+ }
+ git_vector_free(&_commits);
+ }
+
+ git_packbuilder_free(_packbuilder);
+ _packbuilder = NULL;
+
+ git_revwalk_free(_revwalker);
+ _revwalker = NULL;
+
+ git_indexer_free(_indexer);
+ _indexer = NULL;
+
+ /* cl_git_pass(p_chdir("..")); */
+ cl_git_sandbox_cleanup();
+ _repo = NULL;
+}
+
+static int seed_packbuilder(void)
+{
+ int error;
+ git_oid oid, *o;
+ unsigned int i;
+
+ git_revwalk_sorting(_revwalker, GIT_SORT_TIME);
+ cl_git_pass(git_revwalk_push_ref(_revwalker, "HEAD"));
+
+ while (git_revwalk_next(&oid, _revwalker) == 0) {
+ o = git__malloc(sizeof(git_oid));
+ cl_assert(o != NULL);
+ git_oid_cpy(o, &oid);
+ cl_git_pass(git_vector_insert(&_commits, o));
+ }
+
+ git_vector_foreach(&_commits, i, o) {
+ if((error = git_packbuilder_insert(_packbuilder, o, NULL)) < 0)
+ return error;
+ }
+
+ git_vector_foreach(&_commits, i, o) {
+ git_object *obj;
+ cl_git_pass(git_object_lookup(&obj, _repo, o, GIT_OBJECT_COMMIT));
+ error = git_packbuilder_insert_tree(_packbuilder,
+ git_commit_tree_id((git_commit *)obj));
+ git_object_free(obj);
+ if (error < 0)
+ return error;
+ }
+
+ return 0;
+}
+
+static int fail_stage;
+
+static int packbuilder_cancel_after_n_calls_cb(int stage, uint32_t current, uint32_t total, void *payload)
+{
+
+ /* Force the callback to run again on the next opportunity regardless
+ of how fast we're running. */
+ _packbuilder->last_progress_report_time = 0;
+
+ if (stage == fail_stage) {
+ int *calls = (int *)payload;
+ int n = *calls;
+ /* Always decrement, including past zero. This way the error is only
+ triggered once, making sure it is picked up immediately. */
+ --*calls;
+ if (n == 0)
+ return GIT_EUSER;
+ }
+
+ return 0;
+}
+
+static void test_cancel(int n)
+{
+
+ int calls_remaining = n;
+ int err;
+ git_buf buf = GIT_BUF_INIT;
+
+ /* Switch to a small repository, so that `packbuilder_cancel_after_n_calls_cb`
+ can hack the time to call the callback on every opportunity. */
+
+ cl_git_pass(git_packbuilder_set_callbacks(_packbuilder, &packbuilder_cancel_after_n_calls_cb, &calls_remaining));
+ err = seed_packbuilder();
+ if (!err)
+ err = git_packbuilder_write_buf(&buf, _packbuilder);
+
+ cl_assert_equal_i(GIT_EUSER, err);
+}
+void test_pack_cancel__cancel_after_add_0(void)
+{
+ fail_stage = GIT_PACKBUILDER_ADDING_OBJECTS;
+ test_cancel(0);
+}
+
+void test_pack_cancel__cancel_after_add_1(void)
+{
+ cl_skip();
+ fail_stage = GIT_PACKBUILDER_ADDING_OBJECTS;
+ test_cancel(1);
+}
+
+void test_pack_cancel__cancel_after_delta_0(void)
+{
+ fail_stage = GIT_PACKBUILDER_DELTAFICATION;
+ test_cancel(0);
+}
+
+void test_pack_cancel__cancel_after_delta_1(void)
+{
+ fail_stage = GIT_PACKBUILDER_DELTAFICATION;
+ test_cancel(1);
+}
+
+void test_pack_cancel__cancel_after_delta_0_threaded(void)
+{
+#ifdef GIT_THREADS
+ git_packbuilder_set_threads(_packbuilder, 8);
+ fail_stage = GIT_PACKBUILDER_DELTAFICATION;
+ test_cancel(0);
+#else
+ cl_skip();
+#endif
+}
+
+void test_pack_cancel__cancel_after_delta_1_threaded(void)
+{
+#ifdef GIT_THREADS
+ git_packbuilder_set_threads(_packbuilder, 8);
+ fail_stage = GIT_PACKBUILDER_DELTAFICATION;
+ test_cancel(1);
+#else
+ cl_skip();
+#endif
+}
+
+static int foreach_cb(void *buf, size_t len, void *payload)
+{
+ git_indexer *idx = (git_indexer *) payload;
+ cl_git_pass(git_indexer_append(idx, buf, len, &_stats));
+ return 0;
+}
+
+void test_pack_cancel__foreach(void)
+{
+ git_indexer *idx;
+
+ seed_packbuilder();
+
+#ifdef GIT_EXPERIMENTAL_SHA256
+ cl_git_pass(git_indexer_new(&idx, ".", GIT_OID_SHA1, NULL));
+#else
+ cl_git_pass(git_indexer_new(&idx, ".", 0, NULL, NULL));
+#endif
+
+ cl_git_pass(git_packbuilder_foreach(_packbuilder, foreach_cb, idx));
+ cl_git_pass(git_indexer_commit(idx, &_stats));
+ git_indexer_free(idx);
+}
+
+static int foreach_cancel_cb(void *buf, size_t len, void *payload)
+{
+ git_indexer *idx = (git_indexer *)payload;
+ cl_git_pass(git_indexer_append(idx, buf, len, &_stats));
+ return (_stats.total_objects > 2) ? -1111 : 0;
+}
+
+void test_pack_cancel__foreach_with_cancel(void)
+{
+ git_indexer *idx;
+
+ seed_packbuilder();
+
+#ifdef GIT_EXPERIMENTAL_SHA256
+ cl_git_pass(git_indexer_new(&idx, ".", GIT_OID_SHA1, NULL));
+#else
+ cl_git_pass(git_indexer_new(&idx, ".", 0, NULL, NULL));
+#endif
+
+ cl_git_fail_with(
+ git_packbuilder_foreach(_packbuilder, foreach_cancel_cb, idx), -1111);
+ git_indexer_free(idx);
+}
+
+void test_pack_cancel__keep_file_check(void)
+{
+ assert(!git_disable_pack_keep_file_checks);
+ cl_git_pass(git_libgit2_opts(GIT_OPT_DISABLE_PACK_KEEP_FILE_CHECKS, true));
+ assert(git_disable_pack_keep_file_checks);
+}
diff --git a/tests/resources/small.git/HEAD b/tests/resources/small.git/HEAD
new file mode 100644
index 0000000000000000000000000000000000000000..cb089cd89a7d7686d284d8761201649346b5aa1c
GIT binary patch
literal 23
ecmXR)O|w!cN=+-)&qz&7Db~+TEG|hc;sO9;xClW2
literal 0
HcmV?d00001
diff --git a/tests/resources/small.git/config b/tests/resources/small.git/config
new file mode 100644
index 0000000000000000000000000000000000000000..07d359d07cf1ed0c0074fdad71ffff5942f0adfa
GIT binary patch
literal 66
zcmaz}&M!)h<>D+#Eyyp<EXgmbOv^9IO)M!(Eh^5;&r`5fFyP`$%gjm5%}+@M@=A(I
MQ@J>k5{uv*03B5png9R*
literal 0
HcmV?d00001
diff --git a/tests/resources/small.git/description b/tests/resources/small.git/description
new file mode 100644
index 0000000000000000000000000000000000000000..498b267a8c7812490d6479839c5577eaaec79d62
GIT binary patch
literal 73
zcmWH|%S+5nO;IRHEyyp<EXgmbv{pz>$t+PQ$;d2LNXyJgRZve!Elw`VEGWs$&r??@
Q$yWgB0LrH#Y0~2Y0PnOK(EtDd
literal 0
HcmV?d00001
diff --git a/tests/resources/small.git/hooks/applypatch-msg.sample b/tests/resources/small.git/hooks/applypatch-msg.sample
new file mode 100755
index 0000000000000000000000000000000000000000..dcbf8167fa503f96ff6a39c68409007eadc9b1f3
GIT binary patch
literal 535
zcmY+AX;Q;542A#a6e8^~FyI8r&I~hf2QJ{GO6(?HuvEG*+#R{4EI%zhfA8r{j%sh$
zHE~E-UtQd8{bq4@*S%jq3@bmxwQDXGv#o!N`o3AHMw3xD)hy0#>&E&zzl%vRffo<B
z)-H|+CWHZ~O*S%cfYx9;02_ohIA<Bg(1SxF-6OCb&_lBkf{t<AM9r;%E(Hf#h{|a@
z9>mqo=v6>_2NRa#TwDdYvTVQyueO*15Nlo%=#DXgC0bhF3vTa`LQGaO9;jeD$OP?~
za$G4Q{z+Q_{5V?5h;a-noM$P{<>Q~j4o7u%#P6^o^16{y*jU=-K8GYD_dUtdj4FSx
zSC0C!DvAnv%S!4d<Yg@O<;m`;oSw)=Fz+hrL<mY{rBr8j4pi^88FX3}jKrYUP)>gk
XB^)11aoGMJPCqWs%IS0YSv(eBT&%T6
literal 0
HcmV?d00001
diff --git a/tests/resources/small.git/hooks/commit-msg.sample b/tests/resources/small.git/hooks/commit-msg.sample
new file mode 100755
index 0000000000000000000000000000000000000000..f3780f92349638ebe32f6baf24c7c3027675d7c9
GIT binary patch
literal 953
zcmaJ<TW`}a6n<`g#Ya}rZCaYOzy?UGG&Tf#VG`?}7@eHtB=MTqneFUS%75oLS;atz
zm&iUo=ey->y@-{3h^^Cx;#d0zEA@DDc$nY4ez&|=%jTg@_HU*ub=!!y$xW09TSjlj
z(`I@QCsM`!9&80$I98wsQ8yK#)Orb<8re8FjkKh630D$QUDwi~(gkX=RunYm$rDjk
zlp%RUSnzA#6yjdG5?T?2DcYKp+v_lts0ljn&bh3J0bD5@N@1UKZ190O6ZeWr-BuZ^
zWRebCX%(%=Xoj#(xYk1Cjtr!=tyBesf@m6}8zY6Ijbz9i9ziI_jG9Mv<Cz(ymp*>R
zDH*e>^ga9IR?2wrSrAVm;eButj4<aWB@zzNl|1Wp@4;}1O?MUF>Y>7(E2?b~jsu>&
zRKCJ7bp#19sqYh627wD%D9R$8=Ml$TNlumDypl~$jBu*G>5fIR^FB0h0Ex&TGZNr>
zL5hs1_K>taRb!|ThN9ns7^@4MXKP+6aGI_UK)T-M#rcP$;kN(Vcf#P)+5GzWa{l@J
z>-E{`$1iiNVYxq27}<DnwLRXQUG0o_hw&da-s5T#H=`Y9D_8=eTZ?cpWatp#a1vs@
z2BjrO)z@aTuI#g#`)oJcnhM7oYLT@~CHX@CNXv4>j;uo%;)r3kJI2xCFF~Ux;$Q%)
wjbk6JlDCM`jU&P+UVOvg`|iYl<7~9k>HHB4I;pdlQ=I-^$DrHaN$@lH1?P!0U;qFB
literal 0
HcmV?d00001
diff --git a/tests/resources/small.git/hooks/fsmonitor-watchman.sample b/tests/resources/small.git/hooks/fsmonitor-watchman.sample
new file mode 100755
index 0000000000000000000000000000000000000000..41184ebc318c159f51cd1ebe2290559805df89d8
GIT binary patch
literal 4777
zcmbtYYi}F368$Xwipg4lq(BeHMvzvH-4;n7DGJBPqq#tw3aed8+IU5-m)yvL>;Cqh
z8FFRGj$`9CA8ao<GaSz2oMCnz4Rv-gw9b@j_$0GQT1?XYi|;S??Y`Z6`Z;}C6#27N
z8ShS>J?j^$%==FV``-=rhLcPW`McSytRm~mEO7_&_cAVZrf1fFy*ha@8oe%*-aBYE
zcjzZg>LOkgxuUr-XJnHyD;zmPnRaSc#!k_P*d_BttRdc+J6G7za5#+<HG#rlmbrN~
z8DwU-3}VABEwM=0VLP@^Dy6ERR5_J6cmg|GEh*M1EliqCGwe^ZT-iZ$2Yc`4!I#WZ
z5nGGhn7*jeW=2ydsmfAmm#=8AD<<;TI+#z{Q)kW;yE!%GB6f~7EtEMLdM47Qaz*8=
zIObA(VVj-VG{Ax|66d*hi`+bRG>^Y1nkc2Oowk`ya47uUR3Feu?B<phm31&PQB<lt
zb{W(W4wf#Bab%|Q_tKPS?3^o=5)Z8^Vh(#slNI}pO(f^|{U0GZhLnycSaNd&h?CaC
z0XklU6^<ky6rES9T=na$M8P<_)aKMAMo+UDewAu4wF{#&6diFshiudixAoh|&0<BJ
zR>(w;S{(VYzxh}q-=#zP@uxSx{wbyPUMFU;K(06)$o{07&3yI?q{GqMcQ1c_^M<0<
zF4acAV)Il-V(rCTC1(;bsZ*}bl8dmejAk~yb`B}!^0;g^(o9kGUfZfDOvyp@x4OQt
zSgWh6T|3eq;9MFs8-#z+FDM1h(IjRUP|``PxupgJ7CUHOH90gbgl^2~97`?_X{P))
zB*$r1cDlF-%azKND}?Gv`2K8-9v5e`gQoft=j?T<&a13c^!wY_$D`5z-X1g?ty&6-
zQN50{8?bUk9AI->^W@~~nkOghHIC2YN+<JiT_ob7ttND1oh`HH28Y+OV~HedG&uB`
zy}rA*r_xT#bR`Q7-*)3t*(!Hf?jKzyxk=8hdi3L^d<p<uU9q_<4k&xEr4@YWv_vsW
zp(#32bYtA5u|s#1+}h`0kwpz4kUD&+>AXkLQG_2-{Pq3%{`3KUMeG$iIn%%^6*NYb
zn|_BdV#C)n4565Vcc<EWC-nglECZGy!j9I4&;hUCzr(?6TftH=0^@!mI^E@y5HZw8
ztH&kaSNyg=O6riqR^MPOX6oD__Jz@K;*icS)?m$!p{S$n;*EwlO<L!d7;utu(W9D!
zaF!B~xV^2i?wH0s?Lw%Q)(`aPkajs1XojlPv@Y-d5#YFg#KG+!r7A(dJLnkiJMs`p
zV|_=d!upN{TsxH1?sZNdzxeHsmtzTV`1{pykJ_~+^*>X;uT8&z3vSi!HXGbUj2B!R
zdz~&#<?<tHJql=W&((YpzS06y-Z6Cn^H!*9qH?pOrD~(GV=JJ~z{tpLnGK|xp&C1`
zsbn7O86xjF<~G*S1l*;;Bh%6><Up=oKy99?62P^?M&22U6QFRBXLb&u%=Ur<74wRy
zMRG!f{AvZ>fk#L-&k$fLwo$4?>12g@AXOKFekuo#6EHB%gmpD?1eyh%N8s{2wGoTu
z*@6cEZ^ZW!FAF_|JL`NkV7k}0ow|-2jHwbgH0;c@Dq*o?@&c*HnGdyx6^su8Qk%2{
z*ye(dxO*6-&>qn1+zw}tc6;=sOX{4WB=VqjTS^))y1jlX2Q;=e!qMmFA5lC$#;BxC
z=Y%tRpWxb+_uQAvAw7Q{HGV#R$xb&udLCzZ+HN?kTyB};1EJ8UlQ5!>5eGW@)RX0n
zkjj>EF!3=0Gl^8dzv$B^NMGRxJoqN4A`xq-@wCbrx*u2NmIJ1<fUDQ=*^)h6`vzco
z3F+ro$F!F6pc<B;<;isobIgbVGKUBByoQ4#CO({S7g?<Dh0^!7uJ3gxS=6F;+^gQc
zeKi4`4`Fm3p|BU2v{M|-u!#QGCXiM=k=%np0<ZOPQqEjP_nneyOdgEuG9IH&YYPtp
zKB_dvcYCcyhyT#<uhUEL$o~!TUz;cb&|`uSM{Dkv%&W2lcpYL&kv)tUvVv?&>xZ%H
zh;{|4T3(!E9sY#Ni(wUJYs1MmIc9bl)(4Nl3_wD_BWB>i<1S(LX7m*{Q7PU$muMS*
zM!%0EZx-Vw=Zey;erC?SNxF;pY@^A%-krqzfLV2meBp1vWdyArFYn`DD19T)Hw(?n
z)}{NP(Lk(o*?gl#B@pP7^*r|=;PIDT4|F#{2Hzh-AL0Rv$6uT;<CP7qxbYms@WU7}
z%}TsHJ!V_56ZFF{BfI=8jTBxMEATG376pS6a;u1@c?{~sL<N52`U6fuLkz4P@Mb^b
z2`Q48y&!C0&A+WRCJAdmo3u2#?eI=si9Vm47$|`m**x<wKkM=QR&g?C63@P5X@xP8
zi5QG2b-Fdz%S0%1{kKvL%^RTdpK|SU^VJ7JCmKdwp1`;HRoGM7ef^k_s_}2An=cql
za|{IaxQWpdq<ae&x3BOJq+M5QNIk#C{Nv@u>n|WzE4=slK?on@(fZeGhRgQCu56qB
z{+n81Az96qnQjMY*-*r-KV*7;Z#4Q<xfjbcBx_6JAN-HP@bq+eI%HhAB9&vLyOap{
bw<Ywj(b#kdcCk7dCBY;|DBNpPjIa1F6*dXN
literal 0
HcmV?d00001
diff --git a/tests/resources/small.git/hooks/post-update.sample b/tests/resources/small.git/hooks/post-update.sample
new file mode 100755
index 0000000000000000000000000000000000000000..9f95e41a39cf974958277c8881ac6cce405ebb20
GIT binary patch
literal 246
zcmXZVO?HDY3<Ti4Poaiwjq^yGw9DKf7mz^&Ly=V5q$H=W^Rt|p_r9s#9Ea7VERo!9
zyT9>uJRJJV$M^KdldiMhj?ImK6~FvwJ*L5a){QoM=L5TYHkGO1$UrO3`a>{?Opw|b
zG(#59NQ#jFL9v~vgOVkM@^^(^A}onOE))yWEwhIlk&{ZyseZ^O0b=w8&O=BK{k<5B
k^Q-B@eG}LeHrquz%(SVEp_N)VhYZikCW__82JXfD17`J9Qvd(}
literal 0
HcmV?d00001
diff --git a/tests/resources/small.git/hooks/pre-applypatch.sample b/tests/resources/small.git/hooks/pre-applypatch.sample
new file mode 100755
index 0000000000000000000000000000000000000000..625837e25f91421b8809a097f4a3103dd387ef31
GIT binary patch
literal 481
zcmY+ATTa6;5Jms9iouO45IBJXEg&Jm9@v1LPHMM_ZR|;#6tQ<EeSrA%_2`_rGr1_8
z?aM?yVtIc%-@9SGSk&8x=grP-Lf`7!^=$7xgL=|ysZ}!av6zL~ywui}<2##V6L@!k
zy=p^)V7%Wzs-g`9<Y9}^)&uN}BCrXR_T3@Z2$gSJON2`X=mAs+%@7n-2I}ZrP|TFA
zvJJGDl3HPLP<@!Q!}zXQvey#qEE#a#$vs97i4=A0stF@YQ)k_ZajaoS^dVYBc&37_
zVI(L=X<V335r9~7T<;|HfKF+yM}}LB9d96V)Si;sj(;9Rh$#P>h$71hSXq*MxP;V&
zj0cY7SCL=x4`a46sF)C>94Gk%=3q$W2s;j6iHtB2$R0%gix4oK@&T~=ALd_o*CKxt
I-`Pv{1Bpzc>;M1&
literal 0
HcmV?d00001
diff --git a/tests/resources/small.git/hooks/pre-commit.sample b/tests/resources/small.git/hooks/pre-commit.sample
new file mode 100755
index 0000000000000000000000000000000000000000..10b39b2e26981b8f87ea424e735ef87359066dbb
GIT binary patch
literal 1706
zcmZuxU2ohr5PY_N#pZ0-F<{-<Zkx1j&cMNSQ3FN`GzR*R1O+9oPV`BnOj7q@1pV!u
zrF0Hl^i33(yR)-1d-!H%&2|=|^E~_R{N1zNJ-&Zmt-t?iwXv&i+ZN}Km(TX8Q$H4u
zd7(m`|1iDmF5k@xV`p;C4zojASmLc}yN0QDZbhN=ri&CEt=XGuN1IwjGJ#a#`t-kG
zDqY)}7+Ft|;YKwLYbtg$S(-TBO=x3cP1cd}%f4kB!<6Wu-dCwz-)KDMEuM^_Hh*UC
zC`1)|)T<(U6b`+yOH!6p*Ll}@qastwA*dyjsgOf5C=?LTprfORG6O{5L%@S0wyHpj
zu|_A-=NWnYYR5m7kvm6|&T~GzoJ_OKR3sgFUgw?ifho^NQhvK#{6g0=&Fh)%n}#m0
zk1sNmwb_AMDq};OOGw5|;OyX#?yQMMH6yAk(x$3tjFjHE?c$E2XC_xXav8tnIeIG?
zYMI|~MLEVGkuT*>v&v-X^RA+u>k}E$4d&uD7=g_fA8+pNNV=4s0|iD3p<=DTXClTS
zXV23tJ;ECmN@M0j@zUAKEYW@3bv!SeYZ8ZH`YQNTApFVNc;F|9r5p4TqGs=>8E?6y
zi|gY{iM#PG1nL?UE9YCnWTk72kgZPG*Usqw!~Qd3c?~@w2?%eg@~)+VlSs6N5Yf2^
zz;ow<fjf3n`imj7u5lnzt||q9QFM(H@<3EJCK|l5P!$yDfn~U-(5Vu7s+GqxNKyeI
zP=-Oa(KH&gK`NhUa`cLj3B8%qL};DR7dk!`A^h&3-hFB6p($5Ufy^tGir)3et}qK4
zpkPKYWzC+?=&gw-L6S)S=<lfq)%uLUAa%~81Jf9hk)w~U!DItnoSy`m^}#38M}K-o
z!PqisQkV!&z4E*V01ro~qlVK^0BI`pLk6D+)f~*y!hCvwHq8zY9BGh<2s$@b^A<8G
zRaqk}&qZyyv&|0QDFPA%h4TgF&vdlc|JUq*=>F#K#r^&KMq1A`oqVGFpD&-!Pv|Rc
zO3KSqA@h9nSc%bm`0)Amk6*J}@14J*1-219l%%7D!Pl}UK>|lVi0Dfgu2jN3WC!uL
z0ej??b2iSehVgdnWHmZV4kUo*QL#aiIp}U=9x)IXk}JJ7VQ;CI9Rtn5e0VcjbY<bp
zv{}WlG6L;H!EzFKdY>cVt+`x5D+svCGD<sXw4|)E|JX43I1_3P(sI4{wj87bPSrNG
w!SIr>;Z5hm*<gY+X;)Ryx4=nzaab9m`bwE*^s(%u*E3HbUuOR@+&s_z1=MCi2LJ#7
literal 0
HcmV?d00001
diff --git a/tests/resources/small.git/hooks/pre-merge-commit.sample b/tests/resources/small.git/hooks/pre-merge-commit.sample
new file mode 100755
index 0000000000000000000000000000000000000000..d8e8ad4bcf8337f23c78a98537ecab4831301202
GIT binary patch
literal 473
zcmaJ-O;f@!5WV+TJd4B0whSt$H%Dh2t`2u6q1!glCNbGU;n%yxdNsG~zR#WA6xIwy
zWEZHoU#u?nykD=Y<HPgeWDkDm^kTof*l(|%^gh!nHrZpo^vhMDjV;E1GD~K7wV*+D
zz9lry9T0cHcm_KhDVXYvQ==FrLTT4u=bEr{U1yl7%thf%wJnv<XQZ`ZbQEezaWdS%
zI;c?h9a)Y!ux<WK8rQd_aA^?61hv_Pf<t7*z1USuL40FxYz<|hybsO?qnN}aMpcuf
z6phFw1%Xx=wUk(m>E$jSEQZ%SQ(}oLgslTvrKK@9Qf#b!hajVFnp9@oIix;NcI9Wk
xjnh0ya!AWet{I7YpD;y6HXyzI*lfSvH=o6*7mJZPkuaYpm>vzZ`wyGEBtOQPo|pgt
literal 0
HcmV?d00001
diff --git a/tests/resources/small.git/hooks/pre-push.sample b/tests/resources/small.git/hooks/pre-push.sample
new file mode 100755
index 0000000000000000000000000000000000000000..02cbd80c287f959fe33975bb66c56293e3f5b396
GIT binary patch
literal 1431
zcmaJ>U60!~5PUX&#a1@z9B{IIZkjLT0t5kq9#8~D(I5{+8&J~9;#ndUk~-ZT`r|uG
z$#K$$J{TsK<m~Lsu9iP+t-0TZ=sa(K+C6);54X>s*LP1}9!GoZ@4I4myMMG_di|of
z%?llx{O8TS-#^<H#%^V=)RNv>;(OioEmPy%kwWQBA1OMzV{hsQ8XFzS1k!~YQoLa5
zhtP1fA$q6VmMbbAC_9)4I628k*O5J$NR19uHe4QYDK<==I~SQk)Nu%xQ~<Hy8U>KH
z53w=!ke(FGb_PpnZfd*+hnXDTn;2*`u^~;?+5C~cn?bRka7NR%06%e6O91{MAgN6J
zmlO8{Biw4&wr&&(z4p3eln`E}XR9m9bNYZ7Ibrg(4yZIXrfgD7N*AFD7L3YSM#j}%
zo__rOS5fr;@8UM<6cl+cv_$YB$PQ&9dv($eM*))g!_cu!QcSh-mqE9i#QDZT)=o#`
z?8!RtE?w6p?GkGZ-6yt_p~5~4ecu|Sf^)6096%h*q-eNiEA1;Xwg)p~Q&iGSG7-IQ
z9aII&`ps$WOojFA`*bjG<mBv1n0hcYZWN0~(X01-hx(ExqWqaXgSk*@-GMp|K_3!5
z9|O21N3%~izh(4fbp9wzd+!b&7cVwSP5H00)m5ej-(s=Pl#(90UOhn@OA9u+D{i@r
za4*CP0I#<d-)-#xq5q-iY5nIef2s5OuQjcA>kFk|E@sHHuD}W^d`7YJ3YE^zrQnqR
zGoq?;YGKe)93o|_=^f%3U1KYZGPOXRRxK7w`UUbMMa3<86OmVH!EKP$8RCrn9mWX+
zC?9yF!fRVLmud3hF<}x;;sR}f(*r}6Gap3fR6zLHR~kbMgD{98N`L+r&?3p~*0+FX
zcAL%j=(SO}xTJUTvA`&Lf`2mv4koPG9&|<CA~EHbWHMoFPwT(&U=7t0`RoFZPO9Kq
zwwe$i=T|AXY#hD$aZlNMH`wZ%gwilGJ(zeYpOn*F3cy0XKXio^Sj#WXx=PWV`WGaA
B&~*R+
literal 0
HcmV?d00001
diff --git a/tests/resources/small.git/hooks/pre-rebase.sample b/tests/resources/small.git/hooks/pre-rebase.sample
new file mode 100755
index 0000000000000000000000000000000000000000..193ee3f127387f469894675f0d621b5bb9c275da
GIT binary patch
literal 5006
zcmb7IX>;2+68$XxiXKL@ma;l5d2^5Ba_rPh_DHI-u1#&_upttZXp;no03$20|NFiM
zK#D#xQ>!Z3JkX8T-LDVm!B5j7y_{;JDmmTTef+K1oIiPzeEr+Ai*<2PUgnG4^ZB>p
z_fkAvoR1emuf~ri^K$-px=4#D-v<wZ2Xv&$O_eTJh6d4)=DWL(NBs9G{k<+yMMw0T
z$VH*-+LM)}u&m^`l8~1nt(3Z;R8v(KbY5#i3z+~8h0D}Xvq&3J8BMWDizPNpaeb~9
zBN9bSkthfXzskapf%Zt{*e#}{QaNiaAVZ4{$;;I6<vKNhO@%7P-(;l-x=pPoExHC!
zB(hA#cDdD?s4P=!)=-K{<kHAWKetl-8I8wwO<ihJNs-$dEvr;&S_@6E=mNSJ5;mg#
zyb)MbqKH<one{qrV;ZQ6WL}yLtyi*ekNLf|uC6M!)Cmq7*l?g0d6`MlE49|}>Y9w&
z`bCv#<YfTKtb`!}CyNYd;|(C?vRVQmWOfR9X?FZ#=f$No)^#4>2zVn=YnJyeNey(Y
zRh`9vtLw~A+5zsjp|W0Nsa|29Rm!B>OoG5a+vi;ari8O>KkU!KAWg_fa3btK2x*_@
z0bEc7J;Ubghm}n9bOi(Sv_B66nQ7U)J7f0fO}<cB8i8vG{r39s_>8Wuf*uorcIgEG
zOHc|-V6+HlRhOP}?Cn?@5iwSl43abmBA^2lyL$+cpabCGVES+v^j^FO_}?FIp<qP?
z#_?*YMHIkyZxJxS;h@A)WDJ0bN&+F-#_lFjAf^g_Pb#5YXqYe|dZUpa^zI)VOBXQQ
zAMhT>%En%Ll?Z*7*}TwrZyg5OSZ9rY-`aU~Mc-jjv{Ll)FLMgtB4ujktfQ`Xhqrka
zT=P!A;9w^;Z?PqpLwOLu=cj3L>TdUKw2;DMu)`oVkj}<z_EjO_2uWYuvKG==%Zu?h
zJiMVR^c30RbpW}<+z;jjoNC}YI4f6QC7d-08*4mCB1>#bcDx4tYg=j%D`+i{W~fVM
zVmZ>W9VMyin9c-0KzI_;iZ-g|OyzuG`Yq%(%dvl;ifnVr0;jWE&S`z|rQu=!yHBBO
zx`OJ;oOQ(KKM<$(bC38o>pD0%|HA(E0TRw7qj$fJ_pRN+7Nm>dS<q{AcOz#-;d7_2
zL$z(_nhH{vOzT(}>C(gLg{(`t+5Z=?o+}wXU4tHy+&%F&aRhFebeEhR2R5|<c6J);
zEY(q5F5<n*XP0|=PtPBn$B)V~d$Os-?&8UlaVe_|jdkzoWNsTP-_uyq4$R6o<h`&@
z{loXa{^#TF=NJBYu9qB?hs}x=It_O}ZjX(_wy9@X(lmkRpNi0{8T{O_b_j*JC^_SM
zz3G?1$KCNWF-|`Jbx2cQ-y5LW?Z2eikngTZmsx5C(@({8<l)Ue+gIp##Mosfa~iZN
zZ|NLN9uE6Xaqpwk+@D+f?$tg2JRCY`pwZwbR9OvEn*zYm`ffKIzl4{r{ZgjfpbuX)
z_r0=0y3)T-j$gljPyEJO*6Y<pj7G72aLoqaTpc=#u)*xJcVUm0;k(n;r)^DQNa6Oj
zWvlHEGg~lz`Q_8`yQ9<BZ;ylE1Z}bD<8}<uB9Y5lRB=;JUD0h?_)4H&EhJjvwzJx?
zr<o_vk&75j_5^d$8Z$_Oc1=R-I_DlNZ2@~81oV*J6%o3RuiCz}^VEW>$#Ycbp^w@t
zTl%=f1t=w+WpJzF<|CE@?SCNAz)%9?w33lQ8vrHJqPfH9@}qs*QXOG71W=ylx;wOB
zcx!Bj^)Yy6WX$a^vBkBJ5Cob<oubBW+a#9bX{0bkMTX_2sIrs`HMk@$q{dK5h2-g}
z({`~#gm#G?c#>qlaDx_B0c<3b+8)f84LCrt;e;qxc+7>VbwVK{skNv!wvBiTa^9Iu
zkwP;VK)jH$WJ{`MRwAA9fal!y0dtV;FWg8PTkWU>CwnqD>1ZX2B@;$DlX%C5MI+}{
z9xQVnffR*~v2KAUj*hCd<gRCjR7~6Q(vF%VT7j97iv3j4Z0p%mU`7t061~l7b$!#t
z3*Pveii}aQ?QD9aiX>gul~`bk#mk`o>zk9)<2Uc8?hUZAEvd!`9em)~$Z)zev>w^8
zyAgCP_$&Y)7HSQ84`xG}OeTavaEswwF|8Xpi5iZzZa@hCiv(J-%bfFC&)HLlO+Rhw
zG6g?9eL5&A!SuJnQ6}LxG%tU+@vZ`i+!+Rz6iYvsTdhnPo7lW{m-}{hya@viX4)XZ
zngaw+j;gloB#|UwI@8sOmQpc`h+bicQJnQIB5eifIMQNgD2+oai33m!34~xU|0Azj
zhu$8z+T5^;Pxx@d{N)pzOJLSa^e;aDf$W%N5XcOf!mGC9l9j$Ev2h6N+6ZQC+CJzl
zaM7?S!SrFLS2DASjj(h6y1WN3N?|bmqmyzm!&nLoE|`rKBOc_yDF$a#FsUn!IQf(t
zdC&Us(kQz*7mv<H12p@UD8XaLXdK{>H^j*^MC@>wTDb}g%~sx*ng#>{@lR=XG-Z5_
z#<9*Oh0joMzt;nS)ObAp)347`D=}r-;nV!TbIq&xrGRGsF6fZg+!VkfUei@_&l-M&
zPqQ+Dw)RV}+)I8RuqAxa`Pv8e&!_gXS=e2-un>=Ktn}-;%lLZxaVn?Q>yZCb2R3Wk
z77zr%;Rq&h|2ncqyKYmFI0148JVY7Q$V5p=dWj<MQ<8r+@QLq+UROk&%quICq^O2C
zp(17882jXI)_GaqzAY4AjoB_nh8k*r1mJ>+Qqpu%i|xp2<qF`Tw6&3`h654ceBjZ7
z4>C=WaOb2Wudn^h0EcD%$p9YVU1fnoRV9`(cy(vv6K>FXS!2jY>1GnU--7)4usH&K
zao*&P^@9~YmUe|ZdLW@C>H;!*<TIU}-!Tw#3oqZA*99}b3&uHiGO<{I6!pMnAiQdS
P!fA@Yk4s_DjDP<F98V`a
literal 0
HcmV?d00001
diff --git a/tests/resources/small.git/hooks/pre-receive.sample b/tests/resources/small.git/hooks/pre-receive.sample
new file mode 100755
index 0000000000000000000000000000000000000000..32b597f8c32399017c3d7d51134d1f6490ad9575
GIT binary patch
literal 601
zcmZ`#+iu%141Kn~f>Vt3>Nw4M*;=?j(TBD#O@XCv0|MEhA;z}kTFRv@`tPHhp=&Yh
zg%Zhg4i7o_k{a5i&f5;tZ==%}^Sn4aD_6%qs<o-wO_Prn;}`SPs_*$C$(7T|$#C3`
zPt%-C8gelZ1GqAP8`ZQmg0{8-S9H{R@D>_XAuJt&EumdH4Yu`UjT<s+s_~uXh}qA8
zg|_HG)%7Pdc&$7*uR0HF@)~vmFjqyD?XZwCbLen^h5t)sm9<90Oa!@Y%8!~rF8G?W
zkzmCF8kMtuuelMHIAlqqnm?72LeGM1J4`w(kX9&%LQn}ForlDLjBoCyvxmo@x3kH^
z^loxLyPiDWPo-cBMnsg2M6}kuPGHEGBqVkC{D&9Kt%xFAsTw4QC1$_=fwBl=3dI+e
zaSxI}JS}=Z(Ec80eF`!Zq3mqapXI|UN!a)t;@4hcu%Eq2xcoW}%!><-+XHTuHss+b
YOmM2;hq8Egm*4=7_P9T{21QBYH*F=mfB*mh
literal 0
HcmV?d00001
diff --git a/tests/resources/small.git/hooks/prepare-commit-msg.sample b/tests/resources/small.git/hooks/prepare-commit-msg.sample
new file mode 100755
index 0000000000000000000000000000000000000000..b1970da1b6d3f42f00069fd17c325de72cda812e
GIT binary patch
literal 1702
zcmb_cTW{Mo6n>t6#i?x6xmZ$SFLf{QfG*3r0L?Pg?px55l8$UTGO3bO;spKi{V3XX
z))weX0X>M9bNMcZ-6yG%>(n}JI2|25dr<ew@wmMG{l(3lx~bQz>}WZBP@ih?JX^+@
zu#5O48P>yRX(m<b*PU*sORp92TCD1dX`%HE+1$w5k<(Ngu7zQ83#MGJR?<<W=d@yL
z#heqwo{FmCg0g#x<~R+PBD#}q(MBn;V$x;%UrJPP3*l%XtlvTWChI2SfJ$9e`YvSj
zRSOQ?NUgSMLI`3vL48YBHzwzVXoe7v0ef|0YHgV$N@?N(-R)rPqRDrK$n(%+OF$`P
zWdjC5N~`#RjV9}aYwQ4_yF5O-$h2`>fDIhYP)doc1&TADZa@ZGpusJ$6G+e$ZMcmC
zoOosDQPS}l{H?YPsq(4;0SGkATa9eeqAaDcj<jNAU+LTSmM1jo(ti~T0B7acJnnTX
zTarYy;Husdxal0!S<ba8=uu&a*SNYtr7|d7$g-q3_JHER2*oBsVW~i~XXdMx%LW~0
zT*960LF<qZ6K&FZ;vGmtyywBU3^Q>q8n2wALbFwU@2i@FAaRV!=uw-nwx1gKn2SvY
z>Ff>;2sg!+Hxfkwv1lsiii=p6WenF=5)6LZc<a$zDCD$GRuwvG4Fr+B#h?#9gTbio
zk#MdxC@WY%zSGN#i}Ts_#q`bf-{)`7CcWeB*7WlIyHjioJJWw&A5VItPUq3^9!r}S
zbykelFV-VFvcr>QaZ=aS_}+-4Y&?!@HWh|<^gJ21!|T@+%On#w6azxPHV}XsRbe*w
zR_TZ2XEsQa1lPK~biYqg@0-RW@5J1@=<87cFzEUABdCoFH2CZo?}l(Z*!OFqUxo>K
z_d`l#4d9|H6;VPT{X?^{VJ>oL|D7K{BJwwqB>`YcPoGk+9hbvHnoQ{EM|kPgD_`wk
zKm4#2xu;-y`RAm!=L_BnLvJ8$AZm8@?)v<%vwvsw8AF2x6!mTT;c72A_~U9nIq0ST
zv)N0!I!^1p=g8-RQfx5)E_Mb_4I2vtQpI30XZ&t<!9D6nI|;V7YR3)l6=S~QhuwLQ
g$e&^kTY-M99*<-Iw@(78*M5W!4}U}|8YyMx3->-9h5!Hn
literal 0
HcmV?d00001
diff --git a/tests/resources/small.git/hooks/push-to-checkout.sample b/tests/resources/small.git/hooks/push-to-checkout.sample
new file mode 100755
index 0000000000000000000000000000000000000000..a80611e18896f212c390d845e49a3f6d5693b41d
GIT binary patch
literal 2840
zcmai0U31$u5PXh)#YOS7cE^-rw@uolNhe9&aUS|HtvhX>G$45tVUYj>fRdF?|9kfU
zNR~aG=E)WbEbeyq7JTw}ZuHIE2kUtL<<n;$&G!2F^Je|kx2ug=4L5!H^!ogx`7o$&
z%Il(3zAe6<oe$^F=A|}s`8}CDp*M#3M)gC-)LOeDUpYMl3YNy9R)I-T)pE7sy09aj
zJ7%&5PnSB-F#2{jc><WLR{I2izuK%VHc+{hRfXe<^_q)8RjcE(6WX+F2)iAtDtI{x
ztAHVBq)eSp_E^xcV^i_5KLIHA$g{zEjh?rsacu+(Eyvve2~Kmw%;n3g(kWB56j~Js
z<yE5tYUsAR&PY0wgRvM8x!zgLX8SI!eVY&}YZ|>AoeCNptd-NM1aZLhESzC;I`+Ns
zfmNNjdAp^W8#Q*}l>CT7RB9F5(BbI8ly2l~+E};JW|>&d1)=epZ-8vm8ppkbEVn#R
zt30a5A-c(YQR8eM5%;|UAnO>rt!&@x@G@yp+92%w-}%(5P_+P&Wf_zb$f-Qrl5(7z
z2ah(bkE;!DK(&aAMuQ%1TS>ai?wSXCOCSj=_}8x4IbCx^$}9q)<W{Y<9o<WqZo^oV
z3bR;Qa%VT-M~kU@2n{=+=)C!MD`12;@<F*EtPfV3K#g^1%&ggH?4{m<R$WEKcUI4%
zl6{ik6Bo46pmNjdXg5@?hn;SjG$}rr3Gy#-BGgVD$8oD)OcK(oqca)L_kk)UBZ_&6
z*ourb#Yc8l3J+uSda_Y$GY--5Zp3QK9w^?P%E0xb57?fY+Q#+KU4)+R>whwv)SBt|
zg#MX4;;Oau`m=MI9(^&zPbueY@~>3*ixX%mvR5m_1&nAg@ZKvY1E$O}&EtLiG;mhV
z1xhMIm~fGjmf_#{62f`y;09?I7M1W2tWQvz<}i9lR>OpQyUJi45_&*pQus&EkwY<>
zI|ZAx=*3i9a-)g)hXkvO7>UJ5MNgL(Z+-wpXVcgbSgpmFmbf1~DPA(OVGI&FNLeIE
zNH!_aiH$vsif$_j7=T2{cS(!DOI`~bn@)vSd-0d7xL=DF;UNP|tW}4i<qWTSNCe|y
zg9kV&L9g;FhC@tvsVu#V-brqShHy2qZpA!gg{ZTY>h>DvHtu9tY_pbJ6x(6E*hxgC
zzNDao%qlr-IE%YGbS4hF!n!on7#W3$bX-_hbZAaws^nHu#)Dx=WzdbJ>AKzAy@T$x
zSWE^x9+|TEHVEPyaPYa0DOChp?AeHSBBDbZNokQpAY{lE!7geZI=jV)G^2@<iI(N4
zJLJjy@Y<VI$yq;3bVpJICW3D?YDMDl4Oe5pDf{d`i1_3Qx%4uX`$dOz<9e}jm2B-u
z8-?%!7XuiNF2O&MAdl-iw{drG+$F^zT2Z7WRV#c6;IRZEf>l)&91Zb1+`T+oq9wWF
zRV~kGTGce0O~p^6mj{kT5kL(pv>r;Lvd7VDX*P>A^Th`$3cWO<svk?_?FeP@458*2
zA1PqUOdd%VP5&4~ocLK16{1GLll64c=mU81RMFstpnMoLuI7hNIE4N)U%h*#<Fz{C
z9#>0<l7lRrls|W(8=W1O80P~6+U7<4BqC}N4-4GR3w#{O7YmH?JxwJfru2d?e){$5
z@5R+`7Z;1)FW;OkE-(HPisCS;5F4O^Q>L81p4Ysdo3ZP1(SrR-peEdTo;-@bkB((G
zPHYQXUL!@Q$e(OQ;R9r%@Afz+50I7>*^^c&&|E*r-jN)LH=pM4AqMwWxSv|nqjddE
Z4{_hwv8!W(<d3>T<BC%y-6O5i(|^OS%`gA}
literal 0
HcmV?d00001
diff --git a/tests/resources/small.git/hooks/sendemail-validate.sample b/tests/resources/small.git/hooks/sendemail-validate.sample
new file mode 100755
index 0000000000000000000000000000000000000000..effa6f54f2a5c1f0cebe258bf4805904be4b4852
GIT binary patch
literal 2365
zcmb_dU2oe)5PUX&#g-Nv2{5JDX_MA~3%Iq?8g*kpRgpYZIFU=~BI=I5J6e{T{`bz^
zk+$48h#x9Ika!=nv$M0y{clD}-j1x(hDWbnzP?l2k8j?TH{brS+Nf21yPm)Nczma>
zYw`X3V>TCdnSD1ru8&`j=2DIPbCT@SnIgUw>$+lEYP}+x8(BMYnr=iT3*ndq)xzaV
z>I+qjv}vC#8_9M+b1p#uNS0M0)q<p>8!3p_LRQ0MA3M`!2foxzRUjbFY@}O~(ki=S
zqscnq8cU*dY)D$$cqE}n)V0yIk>CNKHCrndOtSP*HbOb;nbwAHSb;R+gs^?^Dve%)
zoW}t(*D}$>O3ab0TS^-;J|u&sb-PkZzo#kn*#xYt(;<xzKW(YtQZ$u23?yV#kyh1~
z@+Idh;EG5jXx8@%<;Y_W8SA=|T;MPQ)TB!!<QcbU)YR4)79eeeg4|vp-8jm%Dl3^I
zRS7+i3>FGuwzSb^g&RDiGcOz9TB;Hu`nJh)$W=C=XCSm2AY=$w3G3P-V#Oo+N*;#2
z4ijJ-pBZ=;T(RTgp_HYrD!uW-dTMfkuqY5jwOy)~gM;#=P^i{!l7`pXTS^s(&^{RU
zydaw}OpS#^D1cXM8?FW+fh`t7D(g;yr6|}fdaNtZBx3hlK~IpkTu3!Qq%R+zAo#<L
zU&m+XIFB0>t}Bs8^3$vHD+-TGT@`F>H1Cc#WAVW;&$S6%fE2d6@kLS0g&ihIM{}0z
z8#XhD>b>3{(BH|Px7}&lJ4%y1v<t$W+!F|W@<gaU4;MqSHKR(wdML<XnCv;zaPrSi
zxVCvek26-bf#Q!H+uAgy_{e_1UZCq>(CihZJx@8MPoGdl*BJGD;usf*iS7%;{Joe;
zNFuBa>*~o&qETDPo~u&~$FxE1xb^x&(CbE`Y3GfsibL2rl+L;>P6j&Y3U>K$mkp*6
zd`Q{<^+^&;GskGjwD-%!boR&i-TC<Uvy02w+l$Nb?B}aL-%ZDpluqd=K_@}z*fyuV
zzAs1Hf?3v$k!X7GTc8Q=r`WJ_Uu=>A9UOR|@=GYb5x#<f&WSMH%mCJU<#=7=qFdL6
zG?Wz&6z&J<@I(B>+dhd7fkaVIR^pol`Mv+rUbmZ43dVL6^S7g3{NsPiG$iy$5EDB%
z6KIgnb$H(n&t3e4E6d4V7w^B?JS}JkG)PM6+X3Co`SQs($O*AA+MG~{S7RJ=cy-l&
z>~%3y`tjfx2>uOu<lDGWewcb=oL@}B@B6FCZ?oxSJWldrm%UfOduahk%C0H>tB_^s
ziwG=e=ch|FQ0IkN91US7rhdQkXhwwt$gU0WEVDjo=IPb+?6PC=s8}J*ua(Ms))`UL
fi$|vMHn?H<rred|1&u#kOoJ`%vx)-*g-ZSfgGvdP
literal 0
HcmV?d00001
diff --git a/tests/resources/small.git/hooks/update.sample b/tests/resources/small.git/hooks/update.sample
new file mode 100755
index 0000000000000000000000000000000000000000..c3430f6fe27e572a1cebdb2f2bec70add0642a60
GIT binary patch
literal 3707
zcmb_fU2oeq6n(aS#jRq*br9K3y0z;^QgkUc^kKlTrB6js&@yE)mPu8l<0$L?`wmIT
zrsXba))@gJi@e|G+<SfSXe`CeSQ}OG@sr8ZTUlQ{dzM}Q@O-hBi}GeUom`#X%FiYH
zX?m4Rna-0RN2lfK)A3ZuvHcz$L<jUn62D=~vfz{}wIH2VqBLX_O$(JSXeF7H$}q!c
zWY}C&R;eX%X?P{%d;|>_tSE3ettp-hLlsZCxaLX8(nU;bVRB;Ce6@s#eu2|WvLz>-
zvy(&>Gyfp@+BtKnpqWkKi^+v{4jn_pNw_zeuxE<mRXKx8G3;9pl+45&4~hHW!G@wo
za7?X(0B}HbX*ExkDmas*xzV)FxygC8AL?2Z1x-0QJqS@qn8sD7r{bm30@<%eL_gOw
z;~85O$Xw2AS}Qp)5ViRUe3|ir8;&&I<B7Y6^!kkNyYXF4EY(b8_5DsTYn_&?wkdEz
z0y$tADo<&}nGs5kg2-J=0KlEGPb(%<An(pXY{K`qIZCuwiT|2{8JD&5o_~`o6<;dD
zi@J#zCE4={8j%<uy|iutvHu1QKo5Tno-BAF2FwD%%O#UDDum=w!;!PNe-cOFNX4)5
zd>TifiGO|)w}OANj2n2D^K=o3j6P6uOL70#cbA{uzWXDlk1wr9GV1X(2W{RuTvjXV
zCmd<W?kH^?PXjkbF`XZtwu1B+%4@ZvHIwGpJ*8@8`MWAhq^B|Hj1lzj3R8bVuMpNb
zz4Gzk!3T3bY;WEGIww&kq9BYW6EP*q$K|EB-@TH(Fjtz*`HMTO?i+3E;5tdSah&xZ
z+t!x4K7)dpy5wiJhlJz~8qF|r8a&-SV7^I3C@_q=P`yt@_x_F-;PQR)fzP<zNN>8u
zH%V`94=q3)Dk)PHNrnFC(T1)Om6f{Usj;u1R->&XoCYVK2V3ZlgZuF?N}1+33<P7e
z<0yVF?Qoa{l#7q(3&jv=Ab)gpM8A7`p%3InNzSxy)ZER2U0C#9zKpnLY0I?>OER*x
z*9Z=L=zI8CN>A_^jYjt0F$psO$sL=38q5q|SG)qCN6{^>RFh5E&l5GZ$pEahnF&d+
z5c>64t}uJPkf~_!VUj#&N%nC-gUMj%=@B=!V>&}xtj2%@-mOm#rQUSJ3(ccmc+fza
znZ#uxF>N?QN5UrIEd!5RgHEf<eGg}P45aAs(Xs6u!XW9r1I*E6XJ^1movX@xYLuPz
z|7xBN4z@b}#x>W#;(nKYF+D<*rdshJ$X-z2OZ2X;)nn@KSVdVhaA?}@3;6gZxb4<W
z`9sa`0lR_azMX|=t_(FvG@%w2);9P}SG0u&K1(*oX3};~=<<!N*F$UTSxD{V&6mgL
ztw9Ntc2eOF@c!OJytNC4T^#)Ien7-`dI{6s#co~0f^E3J<0Ty)l3xq2u@Y8DXTK>v
zozoWSr{{+!h}zGpumG3H`=AvWpm^9kW;J$Jp^Xl*?8ckr`fqN%c|Z;VC0|cM4vSrk
zH_O8Yvh85nvJp^;``wo8=z0f`FWg?`>gO#y1hjX1{}rTlg9rwIKia8eyGexA3GnuR
z`Rg~XZoW;0pA)vI8=p5!+6sIn#C^FCvR>ffv39h6SCNi9v);%WD;WZ`of_MgwyRWy
z-yY%n*Y>X8<Sf+RyB|Sr2YG@1w~%U$o`(5EDkJ|3$u=eMZA&_wxEsy<Xxh2k^tP?4
RGx&ZHQs^8zuIpu!=pQhfp8Eg*
literal 0
HcmV?d00001
diff --git a/tests/resources/small.git/info/exclude b/tests/resources/small.git/info/exclude
new file mode 100644
index 0000000000000000000000000000000000000000..a5196d1be8fb59edf8062bef36d3a602e0812139
GIT binary patch
literal 240
zcmXYqK?=e!6h!x)VxWtv*mf_t5?px$aS_{}Hj?C*<cHXeXE&AZhT*-L3ZoI&*l1%Z
zqG?zr3TvQGZ__}H4(u*%p*rI=cU!%ya5ugfGATh66$IJHgu1Gs0-<N;$V+SsdE)?u
zIq;i$f#WE4f$_MWicZjMEob9LWKMR#iwZq54~QgST^6=i%u0lUkJu-_J**QBMq}ZG
Wth_)NDbl|`oQr&HAFQ5h`0jqAIZ*BZ
literal 0
HcmV?d00001
diff --git a/tests/resources/small.git/objects/88/f9a6eaa2cf008b9bc92847178621f21fa99f3e b/tests/resources/small.git/objects/88/f9a6eaa2cf008b9bc92847178621f21fa99f3e
new file mode 100644
index 0000000000000000000000000000000000000000..7d80b8d78e0dc55669d831a6638f48ec9fed0982
GIT binary patch
literal 50
zcmV-20L}k+0V^p=O;s>9W-v4`Ff%bx$Vkn}$!Ay}rnY6F$m-Kg*KD_+;Lx#g4|^&N
I02NaX#p`nv=Kufz
literal 0
HcmV?d00001
diff --git a/tests/resources/small.git/objects/af/5626b4a114abcb82d63db7c8082c3c4756e51b b/tests/resources/small.git/objects/af/5626b4a114abcb82d63db7c8082c3c4756e51b
new file mode 100644
index 0000000000000000000000000000000000000000..822bc151862ec3763cf2d3fa2372b93bbd3a4b65
GIT binary patch
literal 30
mcmb<m^geacKghr&@q@?NlP9kSYMj?U<r(;diNWtH+YSKNt_|)0
literal 0
HcmV?d00001
diff --git a/tests/resources/small.git/objects/c6/97d4f7a6eac8d7b131673c340bd3cc5bac14d4 b/tests/resources/small.git/objects/c6/97d4f7a6eac8d7b131673c340bd3cc5bac14d4
new file mode 100644
index 0000000000000000000000000000000000000000..5adfa88cc6b00bb19f2031b8ab61f6d07f9ccdb8
GIT binary patch
literal 130
zcmV-|0Db>>0i}&W3IZ_@1U=^!a~EV1casc=c+{&un1qQN*i9hD|0|m(2n|iwp*q%W
z%N;b$hu%cM`$TMo*~EnC1BFP&Pfj~;jZVKXQ96s_PhV<-XAROi+@-v8dBLUa`!;GB
k^i<X>XlEv8$>R)1G>9th&t3j;s7J{?^9n<zzF|~BaA?ar-~a#s
literal 0
HcmV?d00001
diff --git a/tests/resources/small.git/refs/heads/master b/tests/resources/small.git/refs/heads/master
new file mode 100644
index 0000000000000000000000000000000000000000..4eb36d22298f060fd324155ab854d9d6486fc498
GIT binary patch
literal 41
ucmV~$!4Uu;2m`Rc)5uXl$AMP&AHjriQg~T$i(A>|7U^`%mXoWC24Q^m!3%@{
literal 0
HcmV?d00001

View file

@ -55,18 +55,22 @@ readonly NIX_INSTALLED_NIX="@nix@"
readonly NIX_INSTALLED_CACERT="@cacert@"
#readonly NIX_INSTALLED_NIX="/nix/store/j8dbv5w6jl34caywh2ygdy88knx1mdf7-nix-2.3.6"
#readonly NIX_INSTALLED_CACERT="/nix/store/7dxhzymvy330i28ii676fl1pqwcahv2f-nss-cacert-3.49.2"
readonly EXTRACTED_NIX_PATH="$(dirname "$0")"
EXTRACTED_NIX_PATH="$(dirname "$0")"
readonly EXTRACTED_NIX_PATH
# allow to override identity change command
readonly NIX_BECOME=${NIX_BECOME:-sudo}
NIX_BECOME=${NIX_BECOME:-sudo}
readonly NIX_BECOME
readonly ROOT_HOME=~root
ROOT_HOME=~root
readonly ROOT_HOME
if [ -t 0 ] && [ -z "${NIX_INSTALLER_YES:-}" ]; then
readonly IS_HEADLESS='no'
IS_HEADLESS='no'
else
readonly IS_HEADLESS='yes'
IS_HEADLESS='yes'
fi
readonly IS_HEADLESS
headless() {
if [ "$IS_HEADLESS" = "yes" ]; then
@ -156,6 +160,7 @@ EOF
}
nix_user_for_core() {
# shellcheck disable=SC2059
printf "$NIX_BUILD_USER_NAME_TEMPLATE" "$1"
}
@ -381,10 +386,12 @@ _sudo() {
# Ensure that $TMPDIR exists if defined.
if [[ -n "${TMPDIR:-}" ]] && [[ ! -d "${TMPDIR:-}" ]]; then
# shellcheck disable=SC2174
mkdir -m 0700 -p "${TMPDIR:-}"
fi
readonly SCRATCH=$(mktemp -d)
SCRATCH=$(mktemp -d)
readonly SCRATCH
finish_cleanup() {
rm -rf "$SCRATCH"
}
@ -677,7 +684,8 @@ create_directories() {
# hiding behind || true, and the general state
# should be one the user can repair once they
# figure out where chown is...
local get_chr_own="$(PATH="$(getconf PATH 2>/dev/null)" command -vp chown)"
local get_chr_own
get_chr_own="$(PATH="$(getconf PATH 2>/dev/null)" command -vp chown)"
if [[ -z "$get_chr_own" ]]; then
get_chr_own="$(command -v chown)"
fi
@ -915,9 +923,11 @@ configure_shell_profile() {
fi
if [ -e "$profile_target" ]; then
shell_source_lines \
| _sudo "extend your $profile_target with nix-daemon settings" \
tee -a "$profile_target"
{
shell_source_lines
cat "$profile_target"
} | _sudo "extend your $profile_target with nix-daemon settings" \
tee "$profile_target"
fi
done
@ -1013,6 +1023,7 @@ main() {
# Set profile targets after OS-specific scripts are loaded
if command -v poly_configure_default_profile_targets > /dev/null 2>&1; then
# shellcheck disable=SC2207
PROFILE_TARGETS=($(poly_configure_default_profile_targets))
else
PROFILE_TARGETS=("/etc/bashrc" "/etc/profile.d/nix.sh" "/etc/zshrc" "/etc/bash.bashrc" "/etc/zsh/zshrc")

View file

@ -39,7 +39,7 @@ create_systemd_proxy_env() {
vars="http_proxy https_proxy ftp_proxy all_proxy no_proxy HTTP_PROXY HTTPS_PROXY FTP_PROXY ALL_PROXY NO_PROXY"
for v in $vars; do
if [ "x${!v:-}" != "x" ]; then
echo "Environment=${v}=$(escape_systemd_env ${!v})"
echo "Environment=${v}=$(escape_systemd_env "${!v}")"
fi
done
}

View file

@ -83,12 +83,22 @@ nlohmann::json SingleBuiltPath::Built::toJSON(const StoreDirConfig & store) cons
nlohmann::json SingleBuiltPath::toJSON(const StoreDirConfig & store) const
{
return std::visit([&](const auto & buildable) { return buildable.toJSON(store); }, raw());
return std::visit(
overloaded{
[&](const SingleBuiltPath::Opaque & o) -> nlohmann::json { return store.printStorePath(o.path); },
[&](const SingleBuiltPath::Built & b) { return b.toJSON(store); },
},
raw());
}
nlohmann::json BuiltPath::toJSON(const StoreDirConfig & store) const
{
return std::visit([&](const auto & buildable) { return buildable.toJSON(store); }, raw());
return std::visit(
overloaded{
[&](const BuiltPath::Opaque & o) -> nlohmann::json { return store.printStorePath(o.path); },
[&](const BuiltPath::Built & b) { return b.toJSON(store); },
},
raw());
}
RealisedPath::Set BuiltPath::toRealisedPaths(Store & store) const

View file

@ -69,7 +69,7 @@ struct InstallableFlake : InstallableValue
*/
std::vector<ref<eval_cache::AttrCursor>> getCursors(EvalState & state) override;
std::shared_ptr<flake::LockedFlake> getLockedFlake() const;
ref<flake::LockedFlake> getLockedFlake() const;
FlakeRef nixpkgsFlakeRef() const;
};
@ -87,6 +87,4 @@ static inline FlakeRef defaultNixpkgsFlakeRef()
return FlakeRef::fromAttrs(fetchSettings, {{"type", "indirect"}, {"id", "nixpkgs"}});
}
ref<eval_cache::EvalCache> openEvalCache(EvalState & state, std::shared_ptr<flake::LockedFlake> lockedFlake);
} // namespace nix

View file

@ -185,16 +185,16 @@ std::vector<ref<eval_cache::AttrCursor>> InstallableFlake::getCursors(EvalState
return res;
}
std::shared_ptr<flake::LockedFlake> InstallableFlake::getLockedFlake() const
ref<flake::LockedFlake> InstallableFlake::getLockedFlake() const
{
if (!_lockedFlake) {
flake::LockFlags lockFlagsApplyConfig = lockFlags;
// FIXME why this side effect?
lockFlagsApplyConfig.applyNixConfig = true;
_lockedFlake =
std::make_shared<flake::LockedFlake>(lockFlake(flakeSettings, *state, flakeRef, lockFlagsApplyConfig));
_lockedFlake = make_ref<flake::LockedFlake>(lockFlake(flakeSettings, *state, flakeRef, lockFlagsApplyConfig));
}
return _lockedFlake;
// _lockedFlake is now non-null but still just a shared_ptr
return ref<flake::LockedFlake>(_lockedFlake);
}
FlakeRef InstallableFlake::nixpkgsFlakeRef() const

View file

@ -342,8 +342,7 @@ void completeFlakeRefWithFragment(
parseFlakeRef(fetchSettings, expandTilde(flakeRefS), std::filesystem::current_path().string());
auto evalCache = openEvalCache(
*evalState,
std::make_shared<flake::LockedFlake>(lockFlake(flakeSettings, *evalState, flakeRef, lockFlags)));
*evalState, make_ref<flake::LockedFlake>(lockFlake(flakeSettings, *evalState, flakeRef, lockFlags)));
auto root = evalCache->getRoot();
@ -443,42 +442,6 @@ static StorePath getDeriver(ref<Store> store, const Installable & i, const Store
return *derivers.begin();
}
ref<eval_cache::EvalCache> openEvalCache(EvalState & state, std::shared_ptr<flake::LockedFlake> lockedFlake)
{
auto fingerprint = evalSettings.useEvalCache && evalSettings.pureEval
? lockedFlake->getFingerprint(state.store, state.fetchSettings)
: std::nullopt;
auto rootLoader = [&state, lockedFlake]() {
/* For testing whether the evaluation cache is
complete. */
if (getEnv("NIX_ALLOW_EVAL").value_or("1") == "0")
throw Error("not everything is cached, but evaluation is not allowed");
auto vFlake = state.allocValue();
flake::callFlake(state, *lockedFlake, *vFlake);
state.forceAttrs(*vFlake, noPos, "while parsing cached flake data");
auto aOutputs = vFlake->attrs()->get(state.symbols.create("outputs"));
assert(aOutputs);
return aOutputs->value;
};
if (fingerprint) {
auto search = state.evalCaches.find(fingerprint.value());
if (search == state.evalCaches.end()) {
search =
state.evalCaches
.emplace(fingerprint.value(), make_ref<nix::eval_cache::EvalCache>(fingerprint, state, rootLoader))
.first;
}
return search->second;
} else {
return make_ref<nix::eval_cache::EvalCache>(std::nullopt, state, rootLoader);
}
}
Installables SourceExprCommand::parseInstallables(ref<Store> store, std::vector<std::string> ss)
{
Installables result;
@ -604,28 +567,28 @@ std::vector<BuiltPathWithResult> Installable::build(
static void throwBuildErrors(std::vector<KeyedBuildResult> & buildResults, const Store & store)
{
std::vector<KeyedBuildResult> failed;
std::vector<std::pair<const KeyedBuildResult *, const KeyedBuildResult::Failure *>> failed;
for (auto & buildResult : buildResults) {
if (!buildResult.success()) {
failed.push_back(buildResult);
if (auto * failure = buildResult.tryGetFailure()) {
failed.push_back({&buildResult, failure});
}
}
auto failedResult = failed.begin();
if (failedResult != failed.end()) {
if (failed.size() == 1) {
failedResult->rethrow();
failedResult->second->rethrow();
} else {
StringSet failedPaths;
for (; failedResult != failed.end(); failedResult++) {
if (!failedResult->errorMsg.empty()) {
if (!failedResult->second->errorMsg.empty()) {
logError(
ErrorInfo{
.level = lvlError,
.msg = failedResult->errorMsg,
.msg = failedResult->second->errorMsg,
});
}
failedPaths.insert(failedResult->path.to_string(store));
failedPaths.insert(failedResult->first->path.to_string(store));
}
throw Error("build of %s failed", concatStringsSep(", ", quoteStrings(failedPaths)));
}
@ -695,12 +658,14 @@ std::vector<std::pair<ref<Installable>, BuiltPathWithResult>> Installable::build
auto buildResults = store->buildPathsWithResults(pathsToBuild, bMode, evalStore);
throwBuildErrors(buildResults, *store);
for (auto & buildResult : buildResults) {
// If we didn't throw, they must all be sucesses
auto & success = std::get<nix::BuildResult::Success>(buildResult.inner);
for (auto & aux : backmap[buildResult.path]) {
std::visit(
overloaded{
[&](const DerivedPath::Built & bfd) {
std::map<std::string, StorePath> outputs;
for (auto & [outputName, realisation] : buildResult.builtOutputs)
for (auto & [outputName, realisation] : success.builtOutputs)
outputs.emplace(outputName, realisation.outPath);
res.push_back(
{aux.installable,

View file

@ -67,7 +67,6 @@ config_priv_h = configure_file(
)
subdir('nix-meson-build-support/common')
subdir('nix-meson-build-support/asan-options')
sources = files(
'built-path.cc',

View file

@ -669,7 +669,7 @@ ProcessLineResult NixRepl::processLine(std::string line)
ss << "No documentation found.\n\n";
}
auto markdown = toView(ss);
auto markdown = ss.view();
logger->cout(trim(renderMarkdownToTerminal(markdown)));
} else
@ -760,7 +760,7 @@ void NixRepl::loadFlake(const std::string & flakeRefS)
void NixRepl::initEnv()
{
env = &state->allocEnv(envSize);
env = &state->mem.allocEnv(envSize);
env->up = &state->baseEnv;
displ = 0;
staticEnv->vars.clear();
@ -869,14 +869,8 @@ void NixRepl::addVarToScope(const Symbol name, Value & v)
Expr * NixRepl::parseString(std::string s)
{
return state->parseExprFromString(std::move(s), state->rootPath("."), staticEnv);
}
void NixRepl::evalString(std::string s, Value & v)
{
Expr * e;
try {
e = parseString(s);
return state->parseExprFromString(std::move(s), state->rootPath("."), staticEnv);
} catch (ParseError & e) {
if (e.msg().find("unexpected end of file") != std::string::npos)
// For parse errors on incomplete input, we continue waiting for the next line of
@ -885,6 +879,11 @@ void NixRepl::evalString(std::string s, Value & v)
else
throw;
}
}
void NixRepl::evalString(std::string s, Value & v)
{
Expr * e = parseString(s);
e->eval(*state, *env, v);
state->forceValue(v, v.determinePos(noPos));
}

View file

@ -28,7 +28,6 @@ deps_public_maybe_subproject = [
subdir('nix-meson-build-support/subprojects')
subdir('nix-meson-build-support/common')
subdir('nix-meson-build-support/asan-options')
sources = files(
'nix_api_expr.cc',

View file

@ -326,6 +326,10 @@ nix_value * nix_get_list_byidx(nix_c_context * context, const nix_value * value,
try {
auto & v = check_value_in(value);
assert(v.type() == nix::nList);
if (ix >= v.listSize()) {
nix_set_err_msg(context, NIX_ERR_KEY, "list index out of bounds");
return nullptr;
}
auto * p = v.listView()[ix];
nix_gc_incref(nullptr, p);
if (p != nullptr)
@ -335,6 +339,26 @@ nix_value * nix_get_list_byidx(nix_c_context * context, const nix_value * value,
NIXC_CATCH_ERRS_NULL
}
nix_value *
nix_get_list_byidx_lazy(nix_c_context * context, const nix_value * value, EvalState * state, unsigned int ix)
{
if (context)
context->last_err_code = NIX_OK;
try {
auto & v = check_value_in(value);
assert(v.type() == nix::nList);
if (ix >= v.listSize()) {
nix_set_err_msg(context, NIX_ERR_KEY, "list index out of bounds");
return nullptr;
}
auto * p = v.listView()[ix];
nix_gc_incref(nullptr, p);
// Note: intentionally NOT calling forceValue() to keep the element lazy
return as_nix_value_ptr(p);
}
NIXC_CATCH_ERRS_NULL
}
nix_value * nix_get_attr_byname(nix_c_context * context, const nix_value * value, EvalState * state, const char * name)
{
if (context)
@ -355,6 +379,27 @@ nix_value * nix_get_attr_byname(nix_c_context * context, const nix_value * value
NIXC_CATCH_ERRS_NULL
}
nix_value *
nix_get_attr_byname_lazy(nix_c_context * context, const nix_value * value, EvalState * state, const char * name)
{
if (context)
context->last_err_code = NIX_OK;
try {
auto & v = check_value_in(value);
assert(v.type() == nix::nAttrs);
nix::Symbol s = state->state.symbols.create(name);
auto attr = v.attrs()->get(s);
if (attr) {
nix_gc_incref(nullptr, attr->value);
// Note: intentionally NOT calling forceValue() to keep the attribute lazy
return as_nix_value_ptr(attr->value);
}
nix_set_err_msg(context, NIX_ERR_KEY, "missing attribute");
return nullptr;
}
NIXC_CATCH_ERRS_NULL
}
bool nix_has_attr_byname(nix_c_context * context, const nix_value * value, EvalState * state, const char * name)
{
if (context)
@ -389,6 +434,10 @@ nix_get_attr_byidx(nix_c_context * context, nix_value * value, EvalState * state
try {
auto & v = check_value_in(value);
collapse_attrset_layer_chain_if_needed(v, state);
if (i >= v.attrs()->size()) {
nix_set_err_msg(context, NIX_ERR_KEY, "attribute index out of bounds");
return nullptr;
}
const nix::Attr & a = (*v.attrs())[i];
*name = state->state.symbols[a.name].c_str();
nix_gc_incref(nullptr, a.value);
@ -398,6 +447,27 @@ nix_get_attr_byidx(nix_c_context * context, nix_value * value, EvalState * state
NIXC_CATCH_ERRS_NULL
}
nix_value * nix_get_attr_byidx_lazy(
nix_c_context * context, nix_value * value, EvalState * state, unsigned int i, const char ** name)
{
if (context)
context->last_err_code = NIX_OK;
try {
auto & v = check_value_in(value);
collapse_attrset_layer_chain_if_needed(v, state);
if (i >= v.attrs()->size()) {
nix_set_err_msg(context, NIX_ERR_KEY, "attribute index out of bounds (Nix C API contract violation)");
return nullptr;
}
const nix::Attr & a = (*v.attrs())[i];
*name = state->state.symbols[a.name].c_str();
nix_gc_incref(nullptr, a.value);
// Note: intentionally NOT calling forceValue() to keep the attribute lazy
return as_nix_value_ptr(a.value);
}
NIXC_CATCH_ERRS_NULL
}
const char * nix_get_attr_name_byidx(nix_c_context * context, nix_value * value, EvalState * state, unsigned int i)
{
if (context)
@ -405,6 +475,10 @@ const char * nix_get_attr_name_byidx(nix_c_context * context, nix_value * value,
try {
auto & v = check_value_in(value);
collapse_attrset_layer_chain_if_needed(v, state);
if (i >= v.attrs()->size()) {
nix_set_err_msg(context, NIX_ERR_KEY, "attribute index out of bounds (Nix C API contract violation)");
return nullptr;
}
const nix::Attr & a = (*v.attrs())[i];
return state->state.symbols[a.name].c_str();
}
@ -605,7 +679,7 @@ nix_err nix_bindings_builder_insert(nix_c_context * context, BindingsBuilder * b
context->last_err_code = NIX_OK;
try {
auto & v = check_value_not_null(value);
nix::Symbol s = bb->builder.state.get().symbols.create(name);
nix::Symbol s = bb->builder.symbols.get().create(name);
bb->builder.insert(s, &v);
}
NIXC_CATCH_ERRS

View file

@ -265,10 +265,25 @@ ExternalValue * nix_get_external(nix_c_context * context, nix_value * value);
*/
nix_value * nix_get_list_byidx(nix_c_context * context, const nix_value * value, EvalState * state, unsigned int ix);
/** @brief Get an attr by name
/** @brief Get the ix'th element of a list without forcing evaluation of the element
*
* Returns the list element without forcing its evaluation, allowing access to lazy values.
* The list value itself must already be evaluated.
*
* Owned by the GC. Use nix_gc_decref when you're done with the pointer
* @param[out] context Optional, stores error information
* @param[in] value Nix value to inspect (must be an evaluated list)
* @param[in] state nix evaluator state
* @param[in] ix list element to get
* @return value, NULL in case of errors
*/
nix_value *
nix_get_list_byidx_lazy(nix_c_context * context, const nix_value * value, EvalState * state, unsigned int ix);
/** @brief Get an attr by name
*
* Use nix_gc_decref when you're done with the pointer
* @param[out] context Optional, stores error information
* @param[in] value Nix value to inspect
* @param[in] state nix evaluator state
* @param[in] name attribute name
@ -276,6 +291,21 @@ nix_value * nix_get_list_byidx(nix_c_context * context, const nix_value * value,
*/
nix_value * nix_get_attr_byname(nix_c_context * context, const nix_value * value, EvalState * state, const char * name);
/** @brief Get an attribute value by attribute name, without forcing evaluation of the attribute's value
*
* Returns the attribute value without forcing its evaluation, allowing access to lazy values.
* The attribute set value itself must already be evaluated.
*
* Use nix_gc_decref when you're done with the pointer
* @param[out] context Optional, stores error information
* @param[in] value Nix value to inspect (must be an evaluated attribute set)
* @param[in] state nix evaluator state
* @param[in] name attribute name
* @return value, NULL in case of errors
*/
nix_value *
nix_get_attr_byname_lazy(nix_c_context * context, const nix_value * value, EvalState * state, const char * name);
/** @brief Check if an attribute name exists on a value
* @param[out] context Optional, stores error information
* @param[in] value Nix value to inspect
@ -285,11 +315,21 @@ nix_value * nix_get_attr_byname(nix_c_context * context, const nix_value * value
*/
bool nix_has_attr_byname(nix_c_context * context, const nix_value * value, EvalState * state, const char * name);
/** @brief Get an attribute by index in the sorted bindings
/** @brief Get an attribute by index
*
* Also gives you the name.
*
* Owned by the GC. Use nix_gc_decref when you're done with the pointer
* Attributes are returned in an unspecified order which is NOT suitable for
* reproducible operations. In Nix's domain, reproducibility is paramount. The caller
* is responsible for sorting the attributes or storing them in an ordered map to
* ensure deterministic behavior in your application.
*
* @note When Nix does sort attributes, which it does for virtually all intermediate
* operations and outputs, it uses byte-wise lexicographic order (equivalent to
* lexicographic order by Unicode scalar value for valid UTF-8). We recommend
* applying this same ordering for consistency.
*
* Use nix_gc_decref when you're done with the pointer
* @param[out] context Optional, stores error information
* @param[in] value Nix value to inspect
* @param[in] state nix evaluator state
@ -300,9 +340,47 @@ bool nix_has_attr_byname(nix_c_context * context, const nix_value * value, EvalS
nix_value *
nix_get_attr_byidx(nix_c_context * context, nix_value * value, EvalState * state, unsigned int i, const char ** name);
/** @brief Get an attribute name by index in the sorted bindings
/** @brief Get an attribute by index, without forcing evaluation of the attribute's value
*
* Useful when you want the name but want to avoid evaluation.
* Also gives you the name.
*
* Returns the attribute value without forcing its evaluation, allowing access to lazy values.
* The attribute set value itself must already have been evaluated.
*
* Attributes are returned in an unspecified order which is NOT suitable for
* reproducible operations. In Nix's domain, reproducibility is paramount. The caller
* is responsible for sorting the attributes or storing them in an ordered map to
* ensure deterministic behavior in your application.
*
* @note When Nix does sort attributes, which it does for virtually all intermediate
* operations and outputs, it uses byte-wise lexicographic order (equivalent to
* lexicographic order by Unicode scalar value for valid UTF-8). We recommend
* applying this same ordering for consistency.
*
* Use nix_gc_decref when you're done with the pointer
* @param[out] context Optional, stores error information
* @param[in] value Nix value to inspect (must be an evaluated attribute set)
* @param[in] state nix evaluator state
* @param[in] i attribute index
* @param[out] name will store a pointer to the attribute name
* @return value, NULL in case of errors
*/
nix_value * nix_get_attr_byidx_lazy(
nix_c_context * context, nix_value * value, EvalState * state, unsigned int i, const char ** name);
/** @brief Get an attribute name by index
*
* Returns the attribute name without forcing evaluation of the attribute's value.
*
* Attributes are returned in an unspecified order which is NOT suitable for
* reproducible operations. In Nix's domain, reproducibility is paramount. The caller
* is responsible for sorting the attributes or storing them in an ordered map to
* ensure deterministic behavior in your application.
*
* @note When Nix does sort attributes, which it does for virtually all intermediate
* operations and outputs, it uses byte-wise lexicographic order (equivalent to
* lexicographic order by Unicode scalar value for valid UTF-8). We recommend
* applying this same ordering for consistency.
*
* Owned by the nix EvalState
* @param[out] context Optional, stores error information

View file

@ -26,11 +26,20 @@ public:
}
protected:
LibExprTest()
LibExprTest(ref<Store> store, auto && makeEvalSettings)
: LibStoreTest()
, evalSettings(makeEvalSettings(readOnlyMode))
, state({}, store, fetchSettings, evalSettings, nullptr)
{
evalSettings.nixPath = {};
}
LibExprTest()
: LibExprTest(openStore("dummy://"), [](bool & readOnlyMode) {
EvalSettings settings{readOnlyMode};
settings.nixPath = {};
return settings;
})
{
}
Value eval(std::string input, bool forceValue = true)

View file

@ -31,7 +31,6 @@ rapidcheck = dependency('rapidcheck')
deps_public += rapidcheck
subdir('nix-meson-build-support/common')
subdir('nix-meson-build-support/asan-options')
sources = files(
'tests/value/context.cc',

View file

@ -3,6 +3,7 @@
#include "nix/expr/eval.hh"
#include "nix/expr/tests/libexpr.hh"
#include "nix/util/memory-source-accessor.hh"
namespace nix {
@ -174,4 +175,41 @@ TEST_F(EvalStateTest, getBuiltin_fail)
ASSERT_THROW(state.getBuiltin("nonexistent"), EvalError);
}
class PureEvalTest : public LibExprTest
{
public:
PureEvalTest()
: LibExprTest(openStore("dummy://", {{"read-only", "false"}}), [](bool & readOnlyMode) {
EvalSettings settings{readOnlyMode};
settings.pureEval = true;
settings.restrictEval = true;
return settings;
})
{
}
};
TEST_F(PureEvalTest, pathExists)
{
ASSERT_THAT(eval("builtins.pathExists /."), IsFalse());
ASSERT_THAT(eval("builtins.pathExists /nix"), IsFalse());
ASSERT_THAT(eval("builtins.pathExists /nix/store"), IsFalse());
{
std::string contents = "Lorem ipsum";
StringSource s{contents};
auto path = state.store->addToStoreFromDump(
s, "source", FileSerialisationMethod::Flat, ContentAddressMethod::Raw::Text, HashAlgorithm::SHA256);
auto printed = store->printStorePath(path);
ASSERT_THROW(eval(fmt("builtins.readFile %s", printed)), RestrictedPathError);
ASSERT_THAT(eval(fmt("builtins.pathExists %s", printed)), IsFalse());
ASSERT_THROW(eval("builtins.readDir /."), RestrictedPathError);
state.allowPath(path); // FIXME: This shouldn't behave this way.
ASSERT_THAT(eval("builtins.readDir /."), IsAttrsOfSize(0));
}
}
} // namespace nix

View file

@ -1,15 +1,19 @@
#include <gtest/gtest.h>
#include "nix/store/tests/test-main.hh"
#include "nix/util/config-global.hh"
using namespace nix;
int main(int argc, char ** argv)
{
auto res = testMainForBuidingPre(argc, argv);
if (!res)
if (res)
return res;
// For pipe operator tests in trivial.cc
experimentalFeatureSettings.set("experimental-features", "pipe-operators");
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

View file

@ -45,7 +45,6 @@ config_priv_h = configure_file(
)
subdir('nix-meson-build-support/common')
subdir('nix-meson-build-support/asan-options')
sources = files(
'derived-path.cc',
@ -83,7 +82,7 @@ this_exe = executable(
test(
meson.project_name(),
this_exe,
env : asan_test_options_env + {
env : {
'_NIX_TEST_UNIT_DATA' : meson.current_source_dir() / 'data',
},
protocol : 'gtest',

View file

@ -423,6 +423,55 @@ TEST_F(nix_api_expr_test, nix_expr_primop_bad_return_thunk)
ASSERT_THAT(nix_err_msg(nullptr, ctx, nullptr), testing::HasSubstr("badReturnThunk"));
}
static void primop_with_nix_err_key(
void * user_data, nix_c_context * context, EvalState * state, nix_value ** args, nix_value * ret)
{
nix_set_err_msg(context, NIX_ERR_KEY, "Test error from primop");
}
TEST_F(nix_api_expr_test, nix_expr_primop_nix_err_key_conversion)
{
// Test that NIX_ERR_KEY from a custom primop gets converted to a generic EvalError
//
// RATIONALE: NIX_ERR_KEY must not be propagated from custom primops because it would
// create semantic confusion. NIX_ERR_KEY indicates missing keys/indices in C API functions
// (like nix_get_attr_byname, nix_get_list_byidx). If custom primops could return NIX_ERR_KEY,
// an evaluation error would be indistinguishable from an actual missing attribute.
//
// For example, if nix_get_attr_byname returned NIX_ERR_KEY when the attribute is present
// but the value evaluation fails, callers expecting NIX_ERR_KEY to mean "missing attribute"
// would incorrectly handle evaluation failures as missing attributes. In places where
// missing attributes are tolerated (like optional attributes), this would cause the
// program to continue after swallowing the error, leading to silent failures.
PrimOp * primop = nix_alloc_primop(
ctx, primop_with_nix_err_key, 1, "testErrorPrimop", nullptr, "a test primop that sets NIX_ERR_KEY", nullptr);
assert_ctx_ok();
nix_value * primopValue = nix_alloc_value(ctx, state);
assert_ctx_ok();
nix_init_primop(ctx, primopValue, primop);
assert_ctx_ok();
nix_value * arg = nix_alloc_value(ctx, state);
assert_ctx_ok();
nix_init_int(ctx, arg, 42);
assert_ctx_ok();
nix_value * result = nix_alloc_value(ctx, state);
assert_ctx_ok();
nix_value_call(ctx, state, primopValue, arg, result);
// Verify that NIX_ERR_KEY gets converted to NIX_ERR_NIX_ERROR (generic evaluation error)
ASSERT_EQ(nix_err_code(ctx), NIX_ERR_NIX_ERROR);
ASSERT_THAT(nix_err_msg(nullptr, ctx, nullptr), testing::HasSubstr("Error from custom function"));
ASSERT_THAT(nix_err_msg(nullptr, ctx, nullptr), testing::HasSubstr("Test error from primop"));
ASSERT_THAT(nix_err_msg(nullptr, ctx, nullptr), testing::HasSubstr("testErrorPrimop"));
// Clean up
nix_gc_decref(ctx, primopValue);
nix_gc_decref(ctx, arg);
nix_gc_decref(ctx, result);
}
TEST_F(nix_api_expr_test, nix_value_call_multi_no_args)
{
nix_value * n = nix_alloc_value(ctx, state);

View file

@ -162,6 +162,114 @@ TEST_F(nix_api_expr_test, nix_build_and_init_list)
nix_gc_decref(ctx, intValue);
}
TEST_F(nix_api_expr_test, nix_get_list_byidx_large_indices)
{
// Create a small list to test extremely large out-of-bounds access
ListBuilder * builder = nix_make_list_builder(ctx, state, 2);
nix_value * intValue = nix_alloc_value(ctx, state);
nix_init_int(ctx, intValue, 42);
nix_list_builder_insert(ctx, builder, 0, intValue);
nix_list_builder_insert(ctx, builder, 1, intValue);
nix_make_list(ctx, builder, value);
nix_list_builder_free(builder);
// Test extremely large indices that would definitely crash without bounds checking
ASSERT_EQ(nullptr, nix_get_list_byidx(ctx, value, state, 1000000));
ASSERT_EQ(NIX_ERR_KEY, nix_err_code(ctx));
ASSERT_EQ(nullptr, nix_get_list_byidx(ctx, value, state, UINT_MAX / 2));
ASSERT_EQ(NIX_ERR_KEY, nix_err_code(ctx));
ASSERT_EQ(nullptr, nix_get_list_byidx(ctx, value, state, UINT_MAX / 2 + 1000000));
ASSERT_EQ(NIX_ERR_KEY, nix_err_code(ctx));
// Clean up
nix_gc_decref(ctx, intValue);
}
TEST_F(nix_api_expr_test, nix_get_list_byidx_lazy)
{
// Create a list with a throwing lazy element, an already-evaluated int, and a lazy function call
// 1. Throwing lazy element - create a function application thunk that will throw when forced
nix_value * throwingFn = nix_alloc_value(ctx, state);
nix_value * throwingValue = nix_alloc_value(ctx, state);
nix_expr_eval_from_string(
ctx,
state,
R"(
_: throw "This should not be evaluated by the lazy accessor"
)",
"<test>",
throwingFn);
assert_ctx_ok();
nix_init_apply(ctx, throwingValue, throwingFn, throwingFn);
assert_ctx_ok();
// 2. Already evaluated int (not lazy)
nix_value * intValue = nix_alloc_value(ctx, state);
nix_init_int(ctx, intValue, 42);
assert_ctx_ok();
// 3. Lazy function application that would compute increment 5 = 6
nix_value * lazyApply = nix_alloc_value(ctx, state);
nix_value * incrementFn = nix_alloc_value(ctx, state);
nix_value * argFive = nix_alloc_value(ctx, state);
nix_expr_eval_from_string(ctx, state, "x: x + 1", "<test>", incrementFn);
assert_ctx_ok();
nix_init_int(ctx, argFive, 5);
// Create a lazy application: (x: x + 1) 5
nix_init_apply(ctx, lazyApply, incrementFn, argFive);
assert_ctx_ok();
ListBuilder * builder = nix_make_list_builder(ctx, state, 3);
nix_list_builder_insert(ctx, builder, 0, throwingValue);
nix_list_builder_insert(ctx, builder, 1, intValue);
nix_list_builder_insert(ctx, builder, 2, lazyApply);
nix_make_list(ctx, builder, value);
nix_list_builder_free(builder);
// Test 1: Lazy accessor should return the throwing element without forcing evaluation
nix_value * lazyThrowingElement = nix_get_list_byidx_lazy(ctx, value, state, 0);
assert_ctx_ok();
ASSERT_NE(nullptr, lazyThrowingElement);
// Verify the element is still lazy by checking that forcing it throws
nix_value_force(ctx, state, lazyThrowingElement);
assert_ctx_err();
ASSERT_THAT(
nix_err_msg(nullptr, ctx, nullptr), testing::HasSubstr("This should not be evaluated by the lazy accessor"));
// Test 2: Lazy accessor should return the already-evaluated int
nix_value * intElement = nix_get_list_byidx_lazy(ctx, value, state, 1);
assert_ctx_ok();
ASSERT_NE(nullptr, intElement);
ASSERT_EQ(42, nix_get_int(ctx, intElement));
// Test 3: Lazy accessor should return the lazy function application without forcing
nix_value * lazyFunctionElement = nix_get_list_byidx_lazy(ctx, value, state, 2);
assert_ctx_ok();
ASSERT_NE(nullptr, lazyFunctionElement);
// Force the lazy function application - should compute 5 + 1 = 6
nix_value_force(ctx, state, lazyFunctionElement);
assert_ctx_ok();
ASSERT_EQ(6, nix_get_int(ctx, lazyFunctionElement));
// Clean up
nix_gc_decref(ctx, throwingFn);
nix_gc_decref(ctx, throwingValue);
nix_gc_decref(ctx, intValue);
nix_gc_decref(ctx, lazyApply);
nix_gc_decref(ctx, incrementFn);
nix_gc_decref(ctx, argFive);
nix_gc_decref(ctx, lazyThrowingElement);
nix_gc_decref(ctx, intElement);
nix_gc_decref(ctx, lazyFunctionElement);
}
TEST_F(nix_api_expr_test, nix_build_and_init_attr_invalid)
{
ASSERT_EQ(nullptr, nix_get_attr_byname(ctx, nullptr, state, 0));
@ -244,6 +352,225 @@ TEST_F(nix_api_expr_test, nix_build_and_init_attr)
free(out_name);
}
TEST_F(nix_api_expr_test, nix_get_attr_byidx_large_indices)
{
// Create a small attribute set to test extremely large out-of-bounds access
const char ** out_name = (const char **) malloc(sizeof(char *));
BindingsBuilder * builder = nix_make_bindings_builder(ctx, state, 2);
nix_value * intValue = nix_alloc_value(ctx, state);
nix_init_int(ctx, intValue, 42);
nix_bindings_builder_insert(ctx, builder, "test", intValue);
nix_make_attrs(ctx, value, builder);
nix_bindings_builder_free(builder);
// Test extremely large indices that would definitely crash without bounds checking
ASSERT_EQ(nullptr, nix_get_attr_byidx(ctx, value, state, 1000000, out_name));
ASSERT_EQ(NIX_ERR_KEY, nix_err_code(ctx));
ASSERT_EQ(nullptr, nix_get_attr_byidx(ctx, value, state, UINT_MAX / 2, out_name));
ASSERT_EQ(NIX_ERR_KEY, nix_err_code(ctx));
ASSERT_EQ(nullptr, nix_get_attr_byidx(ctx, value, state, UINT_MAX / 2 + 1000000, out_name));
ASSERT_EQ(NIX_ERR_KEY, nix_err_code(ctx));
// Test nix_get_attr_name_byidx with large indices too
ASSERT_EQ(nullptr, nix_get_attr_name_byidx(ctx, value, state, 1000000));
ASSERT_EQ(NIX_ERR_KEY, nix_err_code(ctx));
ASSERT_EQ(nullptr, nix_get_attr_name_byidx(ctx, value, state, UINT_MAX / 2));
ASSERT_EQ(NIX_ERR_KEY, nix_err_code(ctx));
ASSERT_EQ(nullptr, nix_get_attr_name_byidx(ctx, value, state, UINT_MAX / 2 + 1000000));
ASSERT_EQ(NIX_ERR_KEY, nix_err_code(ctx));
// Clean up
nix_gc_decref(ctx, intValue);
free(out_name);
}
TEST_F(nix_api_expr_test, nix_get_attr_byname_lazy)
{
// Create an attribute set with a throwing lazy attribute, an already-evaluated int, and a lazy function call
// 1. Throwing lazy element - create a function application thunk that will throw when forced
nix_value * throwingFn = nix_alloc_value(ctx, state);
nix_value * throwingValue = nix_alloc_value(ctx, state);
nix_expr_eval_from_string(
ctx,
state,
R"(
_: throw "This should not be evaluated by the lazy accessor"
)",
"<test>",
throwingFn);
assert_ctx_ok();
nix_init_apply(ctx, throwingValue, throwingFn, throwingFn);
assert_ctx_ok();
// 2. Already evaluated int (not lazy)
nix_value * intValue = nix_alloc_value(ctx, state);
nix_init_int(ctx, intValue, 42);
assert_ctx_ok();
// 3. Lazy function application that would compute increment 7 = 8
nix_value * lazyApply = nix_alloc_value(ctx, state);
nix_value * incrementFn = nix_alloc_value(ctx, state);
nix_value * argSeven = nix_alloc_value(ctx, state);
nix_expr_eval_from_string(ctx, state, "x: x + 1", "<test>", incrementFn);
assert_ctx_ok();
nix_init_int(ctx, argSeven, 7);
// Create a lazy application: (x: x + 1) 7
nix_init_apply(ctx, lazyApply, incrementFn, argSeven);
assert_ctx_ok();
BindingsBuilder * builder = nix_make_bindings_builder(ctx, state, 3);
nix_bindings_builder_insert(ctx, builder, "throwing", throwingValue);
nix_bindings_builder_insert(ctx, builder, "normal", intValue);
nix_bindings_builder_insert(ctx, builder, "lazy", lazyApply);
nix_make_attrs(ctx, value, builder);
nix_bindings_builder_free(builder);
// Test 1: Lazy accessor should return the throwing attribute without forcing evaluation
nix_value * lazyThrowingAttr = nix_get_attr_byname_lazy(ctx, value, state, "throwing");
assert_ctx_ok();
ASSERT_NE(nullptr, lazyThrowingAttr);
// Verify the attribute is still lazy by checking that forcing it throws
nix_value_force(ctx, state, lazyThrowingAttr);
assert_ctx_err();
ASSERT_THAT(
nix_err_msg(nullptr, ctx, nullptr), testing::HasSubstr("This should not be evaluated by the lazy accessor"));
// Test 2: Lazy accessor should return the already-evaluated int
nix_value * intAttr = nix_get_attr_byname_lazy(ctx, value, state, "normal");
assert_ctx_ok();
ASSERT_NE(nullptr, intAttr);
ASSERT_EQ(42, nix_get_int(ctx, intAttr));
// Test 3: Lazy accessor should return the lazy function application without forcing
nix_value * lazyFunctionAttr = nix_get_attr_byname_lazy(ctx, value, state, "lazy");
assert_ctx_ok();
ASSERT_NE(nullptr, lazyFunctionAttr);
// Force the lazy function application - should compute 7 + 1 = 8
nix_value_force(ctx, state, lazyFunctionAttr);
assert_ctx_ok();
ASSERT_EQ(8, nix_get_int(ctx, lazyFunctionAttr));
// Test 4: Missing attribute should return NULL with NIX_ERR_KEY
nix_value * missingAttr = nix_get_attr_byname_lazy(ctx, value, state, "nonexistent");
ASSERT_EQ(nullptr, missingAttr);
ASSERT_EQ(NIX_ERR_KEY, nix_err_code(ctx));
// Clean up
nix_gc_decref(ctx, throwingFn);
nix_gc_decref(ctx, throwingValue);
nix_gc_decref(ctx, intValue);
nix_gc_decref(ctx, lazyApply);
nix_gc_decref(ctx, incrementFn);
nix_gc_decref(ctx, argSeven);
nix_gc_decref(ctx, lazyThrowingAttr);
nix_gc_decref(ctx, intAttr);
nix_gc_decref(ctx, lazyFunctionAttr);
}
TEST_F(nix_api_expr_test, nix_get_attr_byidx_lazy)
{
// Create an attribute set with a throwing lazy attribute, an already-evaluated int, and a lazy function call
// 1. Throwing lazy element - create a function application thunk that will throw when forced
nix_value * throwingFn = nix_alloc_value(ctx, state);
nix_value * throwingValue = nix_alloc_value(ctx, state);
nix_expr_eval_from_string(
ctx,
state,
R"(
_: throw "This should not be evaluated by the lazy accessor"
)",
"<test>",
throwingFn);
assert_ctx_ok();
nix_init_apply(ctx, throwingValue, throwingFn, throwingFn);
assert_ctx_ok();
// 2. Already evaluated int (not lazy)
nix_value * intValue = nix_alloc_value(ctx, state);
nix_init_int(ctx, intValue, 99);
assert_ctx_ok();
// 3. Lazy function application that would compute increment 10 = 11
nix_value * lazyApply = nix_alloc_value(ctx, state);
nix_value * incrementFn = nix_alloc_value(ctx, state);
nix_value * argTen = nix_alloc_value(ctx, state);
nix_expr_eval_from_string(ctx, state, "x: x + 1", "<test>", incrementFn);
assert_ctx_ok();
nix_init_int(ctx, argTen, 10);
// Create a lazy application: (x: x + 1) 10
nix_init_apply(ctx, lazyApply, incrementFn, argTen);
assert_ctx_ok();
BindingsBuilder * builder = nix_make_bindings_builder(ctx, state, 3);
nix_bindings_builder_insert(ctx, builder, "a_throwing", throwingValue);
nix_bindings_builder_insert(ctx, builder, "b_normal", intValue);
nix_bindings_builder_insert(ctx, builder, "c_lazy", lazyApply);
nix_make_attrs(ctx, value, builder);
nix_bindings_builder_free(builder);
// Proper usage: first get the size and gather all attributes into a map
unsigned int attrCount = nix_get_attrs_size(ctx, value);
assert_ctx_ok();
ASSERT_EQ(3u, attrCount);
// Gather all attributes into a map (proper contract usage)
std::map<std::string, nix_value *> attrMap;
const char * name;
for (unsigned int i = 0; i < attrCount; i++) {
nix_value * attr = nix_get_attr_byidx_lazy(ctx, value, state, i, &name);
assert_ctx_ok();
ASSERT_NE(nullptr, attr);
attrMap[std::string(name)] = attr;
}
// Now test the gathered attributes
ASSERT_EQ(3u, attrMap.size());
ASSERT_TRUE(attrMap.count("a_throwing"));
ASSERT_TRUE(attrMap.count("b_normal"));
ASSERT_TRUE(attrMap.count("c_lazy"));
// Test 1: Throwing attribute should be lazy
nix_value * throwingAttr = attrMap["a_throwing"];
nix_value_force(ctx, state, throwingAttr);
assert_ctx_err();
ASSERT_THAT(
nix_err_msg(nullptr, ctx, nullptr), testing::HasSubstr("This should not be evaluated by the lazy accessor"));
// Test 2: Normal attribute should be already evaluated
nix_value * normalAttr = attrMap["b_normal"];
ASSERT_EQ(99, nix_get_int(ctx, normalAttr));
// Test 3: Lazy function should compute when forced
nix_value * lazyAttr = attrMap["c_lazy"];
nix_value_force(ctx, state, lazyAttr);
assert_ctx_ok();
ASSERT_EQ(11, nix_get_int(ctx, lazyAttr));
// Clean up
nix_gc_decref(ctx, throwingFn);
nix_gc_decref(ctx, throwingValue);
nix_gc_decref(ctx, intValue);
nix_gc_decref(ctx, lazyApply);
nix_gc_decref(ctx, incrementFn);
nix_gc_decref(ctx, argTen);
for (auto & pair : attrMap) {
nix_gc_decref(ctx, pair.second);
}
}
TEST_F(nix_api_expr_test, nix_value_init)
{
// Setup

View file

@ -62,7 +62,6 @@ mkMesonExecutable (finalAttrs: {
mkdir -p "$HOME"
''
+ ''
export ASAN_OPTIONS=abort_on_error=1:print_summary=1:detect_leaks=0
export _NIX_TEST_UNIT_DATA=${resolvePath ./data}
${stdenv.hostPlatform.emulator buildPackages} ${lib.getExe finalAttrs.finalPackage}
touch $out

View file

@ -642,7 +642,7 @@ class ToStringPrimOpTest : public PrimOpTest,
TEST_P(ToStringPrimOpTest, toString)
{
const auto [input, output] = GetParam();
const auto & [input, output] = GetParam();
auto v = eval(input);
ASSERT_THAT(v, IsStringEq(output));
}
@ -798,7 +798,7 @@ class CompareVersionsPrimOpTest : public PrimOpTest,
TEST_P(CompareVersionsPrimOpTest, compareVersions)
{
auto [expression, expectation] = GetParam();
const auto & [expression, expectation] = GetParam();
auto v = eval(expression);
ASSERT_THAT(v, IsIntEq(expectation));
}
@ -834,7 +834,7 @@ class ParseDrvNamePrimOpTest
TEST_P(ParseDrvNamePrimOpTest, parseDrvName)
{
auto [input, expectedName, expectedVersion] = GetParam();
const auto & [input, expectedName, expectedVersion] = GetParam();
const auto expr = fmt("builtins.parseDrvName \"%1%\"", input);
auto v = eval(expr);
ASSERT_THAT(v, IsAttrsOfSize(2));

View file

@ -10,27 +10,27 @@ Bindings Bindings::emptyBindings;
/* Allocate a new array of attributes for an attribute set with a specific
capacity. The space is implicitly reserved after the Bindings
structure. */
Bindings * EvalState::allocBindings(size_t capacity)
Bindings * EvalMemory::allocBindings(size_t capacity)
{
if (capacity == 0)
return &Bindings::emptyBindings;
if (capacity > std::numeric_limits<Bindings::size_type>::max())
throw Error("attribute set of size %d is too big", capacity);
nrAttrsets++;
nrAttrsInAttrsets += capacity;
stats.nrAttrsets++;
stats.nrAttrsInAttrsets += capacity;
return new (allocBytes(sizeof(Bindings) + sizeof(Attr) * capacity)) Bindings();
}
Value & BindingsBuilder::alloc(Symbol name, PosIdx pos)
{
auto value = state.get().allocValue();
auto value = mem.get().allocValue();
bindings->push_back(Attr(name, value, pos));
return *value;
}
Value & BindingsBuilder::alloc(std::string_view name, PosIdx pos)
{
return alloc(state.get().symbols.create(name), pos);
return alloc(symbols.get().create(name), pos);
}
void Bindings::sort()

View file

@ -324,7 +324,7 @@ void SampleStack::saveProfile()
std::visit([&](auto && info) { info.symbolize(state, os, posCache); }, pos);
}
os << " " << count;
writeLine(profileFd.get(), std::move(os).str());
writeLine(profileFd.get(), os.str());
/* Clear ostringstream. */
os.str("");
os.clear();

View file

@ -17,6 +17,7 @@
#include "nix/expr/print.hh"
#include "nix/fetchers/filtering-source-accessor.hh"
#include "nix/util/memory-source-accessor.hh"
#include "nix/util/mounted-source-accessor.hh"
#include "nix/expr/gc-small-vector.hh"
#include "nix/util/url.hh"
#include "nix/fetchers/fetch-to-store.hh"
@ -38,6 +39,7 @@
#include <nlohmann/json.hpp>
#include <boost/container/small_vector.hpp>
#include <boost/unordered/concurrent_flat_map.hpp>
#include "nix/util/strings-inline.hh"
@ -192,6 +194,15 @@ static Symbol getName(const AttrName & name, EvalState & state, Env & env)
static constexpr size_t BASE_ENV_SIZE = 128;
EvalMemory::EvalMemory()
#if NIX_USE_BOEHMGC
: valueAllocCache(std::allocate_shared<void *>(traceable_allocator<void *>(), nullptr))
, env1AllocCache(std::allocate_shared<void *>(traceable_allocator<void *>(), nullptr))
#endif
{
assertGCInitialized();
}
EvalState::EvalState(
const LookupPath & lookupPathFromArguments,
ref<Store> store,
@ -224,22 +235,18 @@ EvalState::EvalState(
*/
{CanonPath(store->storeDir), store->getFSAccessor(settings.pureEval)},
}))
, rootFS(({
, rootFS([&] {
/* In pure eval mode, we provide a filesystem that only
contains the Nix store.
If we have a chroot store and pure eval is not enabled,
use a union accessor to make the chroot store available
at its logical location while still having the
Otherwise, use a union accessor to make the augmented store
available at its logical location while still having the
underlying directory available. This is necessary for
instance if we're evaluating a file from the physical
/nix/store while using a chroot store. */
auto accessor = getFSSourceAccessor();
auto realStoreDir = dirOf(store->toRealPath(StorePath::dummy));
if (settings.pureEval || store->storeDir != realStoreDir) {
accessor = settings.pureEval ? storeFS : makeUnionSourceAccessor({accessor, storeFS});
}
/nix/store while using a chroot store, and also for lazy
mounted fetchTree. */
auto accessor = settings.pureEval ? storeFS.cast<SourceAccessor>()
: makeUnionSourceAccessor({getFSSourceAccessor(), storeFS});
/* Apply access control if needed. */
if (settings.restrictEval || settings.pureEval)
@ -250,11 +257,11 @@ EvalState::EvalState(
throw RestrictedPathError("access to absolute path '%1%' is forbidden %2%", path, modeInformation);
});
accessor;
}))
return accessor;
}())
, corepkgsFS(make_ref<MemorySourceAccessor>())
, internalFS(make_ref<MemorySourceAccessor>())
, derivationInternal{corepkgsFS->addFile(
, derivationInternal{internalFS->addFile(
CanonPath("derivation-internal.nix"),
#include "primops/derivation.nix.gen.hh"
)}
@ -264,14 +271,15 @@ EvalState::EvalState(
, debugRepl(nullptr)
, debugStop(false)
, trylevel(0)
, srcToStore(make_ref<decltype(srcToStore)::element_type>())
, importResolutionCache(make_ref<decltype(importResolutionCache)::element_type>())
, fileEvalCache(make_ref<decltype(fileEvalCache)::element_type>())
, regexCache(makeRegexCache())
#if NIX_USE_BOEHMGC
, valueAllocCache(std::allocate_shared<void *>(traceable_allocator<void *>(), nullptr))
, env1AllocCache(std::allocate_shared<void *>(traceable_allocator<void *>(), nullptr))
, baseEnvP(std::allocate_shared<Env *>(traceable_allocator<Env *>(), &allocEnv(BASE_ENV_SIZE)))
, baseEnvP(std::allocate_shared<Env *>(traceable_allocator<Env *>(), &mem.allocEnv(BASE_ENV_SIZE)))
, baseEnv(**baseEnvP)
#else
, baseEnv(allocEnv(BASE_ENV_SIZE))
, baseEnv(mem.allocEnv(BASE_ENV_SIZE))
#endif
, staticBaseEnv{std::make_shared<StaticEnv>(nullptr, nullptr)}
{
@ -280,9 +288,8 @@ EvalState::EvalState(
countCalls = getEnv("NIX_COUNT_CALLS").value_or("0") != "0";
assertGCInitialized();
static_assert(sizeof(Env) <= 16, "environment must be <= 16 bytes");
static_assert(sizeof(Counter) == 64, "counters must be 64 bytes");
/* Construct the Nix expression search path. */
assert(lookupPath.elements.empty());
@ -329,7 +336,7 @@ EvalState::EvalState(
EvalState::~EvalState() {}
void EvalState::allowPath(const Path & path)
void EvalState::allowPathLegacy(const Path & path)
{
if (auto rootFS2 = rootFS.dynamic_pointer_cast<AllowListSourceAccessor>())
rootFS2->allowPrefix(CanonPath(path));
@ -577,7 +584,7 @@ std::optional<EvalState::Doc> EvalState::getDoc(Value & v)
.name = name,
.arity = 0, // FIXME: figure out how deep by syntax only? It's not semantically useful though...
.args = {},
.doc = makeImmutableString(toView(s)), // NOTE: memory leak when compiled without GC
.doc = makeImmutableString(s.view()), // NOTE: memory leak when compiled without GC
};
}
if (isFunctor(v)) {
@ -876,11 +883,10 @@ inline Value * EvalState::lookupVar(Env * env, const ExprVar & var, bool noEval)
}
}
ListBuilder::ListBuilder(EvalState & state, size_t size)
ListBuilder::ListBuilder(size_t size)
: size(size)
, elems(size <= 2 ? inlineElems : (Value **) allocBytes(size * sizeof(Value *)))
{
state.nrListElems += size;
}
Value * EvalState::getBool(bool b)
@ -888,7 +894,7 @@ Value * EvalState::getBool(bool b)
return b ? &Value::vTrue : &Value::vFalse;
}
unsigned long nrThunks = 0;
static Counter nrThunks;
static inline void mkThunk(Value & v, Env & env, Expr * expr)
{
@ -979,10 +985,6 @@ void EvalState::mkSingleDerivedPathString(const SingleDerivedPath & p, Value & v
});
}
/* Create a thunk for the delayed computation of the given expression
in the given environment. But if the expression is a variable,
then look it up right away. This significantly reduces the number
of thunks allocated. */
Value * Expr::maybeThunk(EvalState & state, Env & env)
{
Value * v = state.allocValue();
@ -1026,63 +1028,90 @@ Value * ExprPath::maybeThunk(EvalState & state, Env & env)
return &v;
}
/**
* A helper `Expr` class to lets us parse and evaluate Nix expressions
* from a thunk, ensuring that every file is parsed/evaluated only
* once (via the thunk stored in `EvalState::fileEvalCache`).
*/
struct ExprParseFile : Expr, gc
{
// FIXME: make this a reference (see below).
SourcePath path;
bool mustBeTrivial;
ExprParseFile(SourcePath & path, bool mustBeTrivial)
: path(path)
, mustBeTrivial(mustBeTrivial)
{
}
void eval(EvalState & state, Env & env, Value & v) override
{
printTalkative("evaluating file '%s'", path);
auto e = state.parseExprFromFile(path);
try {
auto dts =
state.debugRepl
? makeDebugTraceStacker(
state, *e, state.baseEnv, e->getPos(), "while evaluating the file '%s':", path.to_string())
: nullptr;
// Enforce that 'flake.nix' is a direct attrset, not a
// computation.
if (mustBeTrivial && !(dynamic_cast<ExprAttrs *>(e)))
state.error<EvalError>("file '%s' must be an attribute set", path).debugThrow();
state.eval(e, v);
} catch (Error & e) {
state.addErrorTrace(e, "while evaluating the file '%s':", path.to_string());
throw;
}
}
};
void EvalState::evalFile(const SourcePath & path, Value & v, bool mustBeTrivial)
{
FileEvalCache::iterator i;
if ((i = fileEvalCache.find(path)) != fileEvalCache.end()) {
v = i->second;
auto resolvedPath = getConcurrent(*importResolutionCache, path);
if (!resolvedPath) {
resolvedPath = resolveExprPath(path);
importResolutionCache->emplace(path, *resolvedPath);
}
if (auto v2 = getConcurrent(*fileEvalCache, *resolvedPath)) {
forceValue(**v2, noPos);
v = **v2;
return;
}
auto resolvedPath = resolveExprPath(path);
if ((i = fileEvalCache.find(resolvedPath)) != fileEvalCache.end()) {
v = i->second;
return;
}
Value * vExpr;
// FIXME: put ExprParseFile on the stack instead of the heap once
// https://github.com/NixOS/nix/pull/13930 is merged. That will ensure
// the post-condition that `expr` is unreachable after
// `forceValue()` returns.
auto expr = new ExprParseFile{*resolvedPath, mustBeTrivial};
printTalkative("evaluating file '%1%'", resolvedPath);
Expr * e = nullptr;
fileEvalCache->try_emplace_and_cvisit(
*resolvedPath,
nullptr,
[&](auto & i) {
vExpr = allocValue();
vExpr->mkThunk(&baseEnv, expr);
i.second = vExpr;
},
[&](auto & i) { vExpr = i.second; });
auto j = fileParseCache.find(resolvedPath);
if (j != fileParseCache.end())
e = j->second;
forceValue(*vExpr, noPos);
if (!e)
e = parseExprFromFile(resolvedPath);
fileParseCache.emplace(resolvedPath, e);
try {
auto dts = debugRepl ? makeDebugTraceStacker(
*this,
*e,
this->baseEnv,
e->getPos(),
"while evaluating the file '%1%':",
resolvedPath.to_string())
: nullptr;
// Enforce that 'flake.nix' is a direct attrset, not a
// computation.
if (mustBeTrivial && !(dynamic_cast<ExprAttrs *>(e)))
error<EvalError>("file '%s' must be an attribute set", path).debugThrow();
eval(e, v);
} catch (Error & e) {
addErrorTrace(e, "while evaluating the file '%1%':", resolvedPath.to_string());
throw;
}
fileEvalCache.emplace(resolvedPath, v);
if (path != resolvedPath)
fileEvalCache.emplace(path, v);
v = *vExpr;
}
void EvalState::resetFileCache()
{
fileEvalCache.clear();
fileEvalCache.rehash(0);
fileParseCache.clear();
fileParseCache.rehash(0);
importResolutionCache->clear();
fileEvalCache->clear();
inputCache->clear();
}
@ -1151,7 +1180,7 @@ void ExprPath::eval(EvalState & state, Env & env, Value & v)
Env * ExprAttrs::buildInheritFromEnv(EvalState & state, Env & up)
{
Env & inheritEnv = state.allocEnv(inheritFromExprs->size());
Env & inheritEnv = state.mem.allocEnv(inheritFromExprs->size());
inheritEnv.up = &up;
Displacement displ = 0;
@ -1170,7 +1199,7 @@ void ExprAttrs::eval(EvalState & state, Env & env, Value & v)
if (recursive) {
/* Create a new environment that contains the attributes in
this `rec'. */
Env & env2(state.allocEnv(attrs.size()));
Env & env2(state.mem.allocEnv(attrs.size()));
env2.up = &env;
dynamicEnv = &env2;
Env * inheritEnv = inheritFromExprs ? buildInheritFromEnv(state, env2) : nullptr;
@ -1262,7 +1291,7 @@ void ExprLet::eval(EvalState & state, Env & env, Value & v)
{
/* Create a new environment that contains the attributes in this
`let'. */
Env & env2(state.allocEnv(attrs->attrs.size()));
Env & env2(state.mem.allocEnv(attrs->attrs.size()));
env2.up = &env;
Env * inheritEnv = attrs->inheritFromExprs ? attrs->buildInheritFromEnv(state, env2) : nullptr;
@ -1305,7 +1334,7 @@ void ExprVar::eval(EvalState & state, Env & env, Value & v)
v = *v2;
}
static std::string showAttrPath(EvalState & state, Env & env, const AttrPath & attrPath)
static std::string showAttrPath(EvalState & state, Env & env, std::span<const AttrName> attrPath)
{
std::ostringstream out;
bool first = true;
@ -1341,10 +1370,10 @@ void ExprSelect::eval(EvalState & state, Env & env, Value & v)
env,
getPos(),
"while evaluating the attribute '%1%'",
showAttrPath(state, env, attrPath))
showAttrPath(state, env, getAttrPath()))
: nullptr;
for (auto & i : attrPath) {
for (auto & i : getAttrPath()) {
state.nrLookups++;
const Attr * j;
auto name = getName(i, state, env);
@ -1382,7 +1411,7 @@ void ExprSelect::eval(EvalState & state, Env & env, Value & v)
auto origin = std::get_if<SourcePath>(&pos2r.origin);
if (!(origin && *origin == state.derivationInternal))
state.addErrorTrace(
e, pos2, "while evaluating the attribute '%1%'", showAttrPath(state, env, attrPath));
e, pos2, "while evaluating the attribute '%1%'", showAttrPath(state, env, getAttrPath()));
}
throw;
}
@ -1393,13 +1422,13 @@ void ExprSelect::eval(EvalState & state, Env & env, Value & v)
Symbol ExprSelect::evalExceptFinalSelect(EvalState & state, Env & env, Value & attrs)
{
Value vTmp;
Symbol name = getName(attrPath[attrPath.size() - 1], state, env);
Symbol name = getName(attrPathStart[nAttrPath - 1], state, env);
if (attrPath.size() == 1) {
if (nAttrPath == 1) {
e->eval(state, env, vTmp);
} else {
ExprSelect init(*this);
init.attrPath.pop_back();
init.nAttrPath--;
init.eval(state, env, vTmp);
}
attrs = vTmp;
@ -1468,7 +1497,7 @@ void EvalState::callFunction(Value & fun, std::span<Value *> args, Value & vRes,
ExprLambda & lambda(*vCur.lambda().fun);
auto size = (!lambda.arg ? 0 : 1) + (lambda.hasFormals() ? lambda.formals->formals.size() : 0);
Env & env2(allocEnv(size));
Env & env2(mem.allocEnv(size));
env2.up = vCur.lambda().env;
Displacement displ = 0;
@ -1757,7 +1786,7 @@ https://nix.dev/manual/nix/stable/language/syntax.html#functions.)",
void ExprWith::eval(EvalState & state, Env & env, Value & v)
{
Env & env2(state.allocEnv(1));
Env & env2(state.mem.allocEnv(1));
env2.up = &env;
env2.values[0] = attrs->maybeThunk(state, env);
@ -1775,7 +1804,7 @@ void ExprAssert::eval(EvalState & state, Env & env, Value & v)
if (!state.evalBool(env, cond, pos, "in the condition of the assert statement")) {
std::ostringstream out;
cond->show(state.symbols, out);
auto exprStr = toView(out);
auto exprStr = out.view();
if (auto eq = dynamic_cast<ExprOpEq *>(cond)) {
try {
@ -1839,12 +1868,8 @@ void ExprOpImpl::eval(EvalState & state, Env & env, Value & v)
|| state.evalBool(env, e2, pos, "in the right operand of the IMPL (->) operator"));
}
void ExprOpUpdate::eval(EvalState & state, Env & env, Value & v)
void ExprOpUpdate::eval(EvalState & state, Value & v, Value & v1, Value & v2)
{
Value v1, v2;
state.evalAttrs(env, e1, v1, pos, "in the left operand of the update (//) operator");
state.evalAttrs(env, e2, v2, pos, "in the right operand of the update (//) operator");
state.nrOpUpdates++;
const Bindings & bindings1 = *v1.attrs();
@ -1918,6 +1943,38 @@ void ExprOpUpdate::eval(EvalState & state, Env & env, Value & v)
state.nrOpUpdateValuesCopied += v.attrs()->size();
}
void ExprOpUpdate::eval(EvalState & state, Env & env, Value & v)
{
UpdateQueue q;
evalForUpdate(state, env, q);
v.mkAttrs(&Bindings::emptyBindings);
for (auto & rhs : std::views::reverse(q)) {
/* Remember that queue is sorted rightmost attrset first. */
eval(state, /*v=*/v, /*v1=*/v, /*v2=*/rhs);
}
}
void Expr::evalForUpdate(EvalState & state, Env & env, UpdateQueue & q, std::string_view errorCtx)
{
Value v;
state.evalAttrs(env, this, v, getPos(), errorCtx);
q.push_back(v);
}
void ExprOpUpdate::evalForUpdate(EvalState & state, Env & env, UpdateQueue & q)
{
/* Output rightmost attrset first to the merge queue as the one
with the most priority. */
e2->evalForUpdate(state, env, q, "in the right operand of the update (//) operator");
e1->evalForUpdate(state, env, q, "in the left operand of the update (//) operator");
}
void ExprOpUpdate::evalForUpdate(EvalState & state, Env & env, UpdateQueue & q, std::string_view errorCtx)
{
evalForUpdate(state, env, q);
}
void ExprOpConcatLists::eval(EvalState & state, Env & env, Value & v)
{
Value v1;
@ -2401,9 +2458,10 @@ StorePath EvalState::copyPathToStore(NixStringContext & context, const SourcePat
if (nix::isDerivation(path.path.abs()))
error<EvalError>("file names are not allowed to end in '%1%'", drvExtension).debugThrow();
std::optional<StorePath> dstPath;
if (!srcToStore.cvisit(path, [&dstPath](const auto & kv) { dstPath.emplace(kv.second); })) {
dstPath.emplace(fetchToStore(
auto dstPathCached = getConcurrent(*srcToStore, path);
auto dstPath = dstPathCached ? *dstPathCached : [&]() {
auto dstPath = fetchToStore(
fetchSettings,
*store,
path.resolveSymlinks(SymlinkResolution::Ancestors),
@ -2411,14 +2469,15 @@ StorePath EvalState::copyPathToStore(NixStringContext & context, const SourcePat
path.baseName(),
ContentAddressMethod::Raw::NixArchive,
nullptr,
repair));
allowPath(*dstPath);
srcToStore.try_emplace(path, *dstPath);
printMsg(lvlChatty, "copied source '%1%' -> '%2%'", path, store->printStorePath(*dstPath));
}
repair);
allowPath(dstPath);
srcToStore->try_emplace(path, dstPath);
printMsg(lvlChatty, "copied source '%1%' -> '%2%'", path, store->printStorePath(dstPath));
return dstPath;
}();
context.insert(NixStringContextElem::Opaque{.path = *dstPath});
return *dstPath;
context.insert(NixStringContextElem::Opaque{.path = dstPath});
return dstPath;
}
SourcePath EvalState::coerceToPath(const PosIdx pos, Value & v, NixStringContext & context, std::string_view errorCtx)
@ -2834,11 +2893,11 @@ bool EvalState::fullGC()
#endif
}
bool Counter::enabled = getEnv("NIX_SHOW_STATS").value_or("0") != "0";
void EvalState::maybePrintStats()
{
bool showStats = getEnv("NIX_SHOW_STATS").value_or("0") != "0";
if (showStats) {
if (Counter::enabled) {
// Make the final heap size more deterministic.
#if NIX_USE_BOEHMGC
if (!fullGC()) {
@ -2854,10 +2913,12 @@ void EvalState::printStatistics()
std::chrono::microseconds cpuTimeDuration = getCpuUserTime();
float cpuTime = std::chrono::duration_cast<std::chrono::duration<float>>(cpuTimeDuration).count();
uint64_t bEnvs = nrEnvs * sizeof(Env) + nrValuesInEnvs * sizeof(Value *);
uint64_t bLists = nrListElems * sizeof(Value *);
uint64_t bValues = nrValues * sizeof(Value);
uint64_t bAttrsets = nrAttrsets * sizeof(Bindings) + nrAttrsInAttrsets * sizeof(Attr);
auto & memstats = mem.getStats();
uint64_t bEnvs = memstats.nrEnvs * sizeof(Env) + memstats.nrValuesInEnvs * sizeof(Value *);
uint64_t bLists = memstats.nrListElems * sizeof(Value *);
uint64_t bValues = memstats.nrValues * sizeof(Value);
uint64_t bAttrsets = memstats.nrAttrsets * sizeof(Bindings) + memstats.nrAttrsInAttrsets * sizeof(Attr);
#if NIX_USE_BOEHMGC
GC_word heapSize, totalBytes;
@ -2883,18 +2944,18 @@ void EvalState::printStatistics()
#endif
};
topObj["envs"] = {
{"number", nrEnvs},
{"elements", nrValuesInEnvs},
{"number", memstats.nrEnvs.load()},
{"elements", memstats.nrValuesInEnvs.load()},
{"bytes", bEnvs},
};
topObj["nrExprs"] = Expr::nrExprs;
topObj["nrExprs"] = Expr::nrExprs.load();
topObj["list"] = {
{"elements", nrListElems},
{"elements", memstats.nrListElems.load()},
{"bytes", bLists},
{"concats", nrListConcats},
{"concats", nrListConcats.load()},
};
topObj["values"] = {
{"number", nrValues},
{"number", memstats.nrValues.load()},
{"bytes", bValues},
};
topObj["symbols"] = {
@ -2902,9 +2963,9 @@ void EvalState::printStatistics()
{"bytes", symbols.totalSize()},
};
topObj["sets"] = {
{"number", nrAttrsets},
{"number", memstats.nrAttrsets.load()},
{"bytes", bAttrsets},
{"elements", nrAttrsInAttrsets},
{"elements", memstats.nrAttrsInAttrsets.load()},
};
topObj["sizes"] = {
{"Env", sizeof(Env)},
@ -2912,13 +2973,13 @@ void EvalState::printStatistics()
{"Bindings", sizeof(Bindings)},
{"Attr", sizeof(Attr)},
};
topObj["nrOpUpdates"] = nrOpUpdates;
topObj["nrOpUpdateValuesCopied"] = nrOpUpdateValuesCopied;
topObj["nrThunks"] = nrThunks;
topObj["nrAvoided"] = nrAvoided;
topObj["nrLookups"] = nrLookups;
topObj["nrPrimOpCalls"] = nrPrimOpCalls;
topObj["nrFunctionCalls"] = nrFunctionCalls;
topObj["nrOpUpdates"] = nrOpUpdates.load();
topObj["nrOpUpdateValuesCopied"] = nrOpUpdateValuesCopied.load();
topObj["nrThunks"] = nrThunks.load();
topObj["nrAvoided"] = nrAvoided.load();
topObj["nrLookups"] = nrLookups.load();
topObj["nrPrimOpCalls"] = nrPrimOpCalls.load();
topObj["nrFunctionCalls"] = nrFunctionCalls.load();
#if NIX_USE_BOEHMGC
topObj["gc"] = {
{"heapSize", heapSize},
@ -3065,6 +3126,11 @@ SourcePath EvalState::findFile(const LookupPath & lookupPath, const std::string_
auto res = (r / CanonPath(suffix)).resolveSymlinks();
if (res.pathExists())
return res;
// Backward compatibility hack: throw an exception if access
// to this path is not allowed.
if (auto accessor = res.accessor.dynamic_pointer_cast<FilteringSourceAccessor>())
accessor->checkAccess(res.path);
}
if (hasPrefix(path, "nix/"))
@ -3119,7 +3185,7 @@ std::optional<SourcePath> EvalState::resolveLookupPathPath(const LookupPath::Pat
/* Allow access to paths in the search path. */
if (initAccessControl) {
allowPath(path.path.abs());
allowPathLegacy(path.path.abs());
if (store->isInStore(path.path.abs())) {
try {
allowClosure(store->toStorePath(path.path.abs()).first);
@ -3131,6 +3197,11 @@ std::optional<SourcePath> EvalState::resolveLookupPathPath(const LookupPath::Pat
if (path.resolveSymlinks().pathExists())
return finish(std::move(path));
else {
// Backward compatibility hack: throw an exception if access
// to this path is not allowed.
if (auto accessor = path.accessor.dynamic_pointer_cast<FilteringSourceAccessor>())
accessor->checkAccess(path.path);
logWarning({.msg = HintFmt("Nix search path entry '%1%' does not exist, ignoring", value)});
}
}
@ -3149,7 +3220,8 @@ Expr * EvalState::parse(
docComments = &it->second;
}
auto result = parseExprFromBuf(text, length, origin, basePath, symbols, settings, positions, *docComments, rootFS);
auto result = parseExprFromBuf(
text, length, origin, basePath, mem.exprs.alloc, symbols, settings, positions, *docComments, rootFS);
result->bindVars(*this, staticEnv);

View file

@ -13,7 +13,7 @@
namespace nix {
class EvalState;
class EvalMemory;
struct Value;
/**
@ -426,7 +426,7 @@ public:
return res;
}
friend class EvalState;
friend class EvalMemory;
};
static_assert(std::forward_iterator<Bindings::iterator>);
@ -448,12 +448,13 @@ private:
Bindings * bindings;
Bindings::size_type capacity_;
friend class EvalState;
friend class EvalMemory;
BindingsBuilder(EvalState & state, Bindings * bindings, size_type capacity)
BindingsBuilder(EvalMemory & mem, SymbolTable & symbols, Bindings * bindings, size_type capacity)
: bindings(bindings)
, capacity_(capacity)
, state(state)
, mem(mem)
, symbols(symbols)
{
}
@ -471,7 +472,8 @@ private:
}
public:
std::reference_wrapper<EvalState> state;
std::reference_wrapper<EvalMemory> mem;
std::reference_wrapper<SymbolTable> symbols;
void insert(Symbol name, Value * value, PosIdx pos = noPos)
{

View file

@ -0,0 +1,70 @@
#pragma once
#include <atomic>
#include <cstdint>
namespace nix {
/**
* An atomic counter aligned on a cache line to prevent false sharing.
* The counter is only enabled when the `NIX_SHOW_STATS` environment
* variable is set. This is to prevent contention on these counters
* when multi-threaded evaluation is enabled.
*/
struct alignas(64) Counter
{
using value_type = uint64_t;
std::atomic<value_type> inner{0};
static bool enabled;
Counter() {}
operator value_type() const noexcept
{
return inner;
}
void operator=(value_type n) noexcept
{
inner = n;
}
value_type load() const noexcept
{
return inner;
}
value_type operator++() noexcept
{
return enabled ? ++inner : 0;
}
value_type operator++(int) noexcept
{
return enabled ? inner++ : 0;
}
value_type operator--() noexcept
{
return enabled ? --inner : 0;
}
value_type operator--(int) noexcept
{
return enabled ? inner-- : 0;
}
value_type operator+=(value_type n) noexcept
{
return enabled ? inner += n : 0;
}
value_type operator-=(value_type n) noexcept
{
return enabled ? inner -= n : 0;
}
};
} // namespace nix

View file

@ -26,7 +26,7 @@ inline void * allocBytes(size_t n)
}
[[gnu::always_inline]]
Value * EvalState::allocValue()
Value * EvalMemory::allocValue()
{
#if NIX_USE_BOEHMGC
/* We use the boehm batch allocator to speed up allocations of Values (of which there are many).
@ -48,15 +48,15 @@ Value * EvalState::allocValue()
void * p = allocBytes(sizeof(Value));
#endif
nrValues++;
stats.nrValues++;
return (Value *) p;
}
[[gnu::always_inline]]
Env & EvalState::allocEnv(size_t size)
Env & EvalMemory::allocEnv(size_t size)
{
nrEnvs++;
nrValuesInEnvs += size;
stats.nrEnvs++;
stats.nrValuesInEnvs += size;
Env * env;

View file

@ -16,12 +16,14 @@
#include "nix/expr/search-path.hh"
#include "nix/expr/repl-exit-status.hh"
#include "nix/util/ref.hh"
#include "nix/expr/counter.hh"
// For `NIX_USE_BOEHMGC`, and if that's set, `GC_THREADS`
#include "nix/expr/config.hh"
#include <boost/unordered/concurrent_flat_map.hpp>
#include <boost/unordered/unordered_flat_map.hpp>
#include <boost/unordered/concurrent_flat_map_fwd.hpp>
#include <map>
#include <optional>
#include <functional>
@ -40,6 +42,7 @@ class Store;
namespace fetchers {
struct Settings;
struct InputCache;
struct Input;
} // namespace fetchers
struct EvalSettings;
class EvalState;
@ -47,6 +50,7 @@ class StorePath;
struct SingleDerivedPath;
enum RepairFlag : bool;
struct MemorySourceAccessor;
struct MountedSourceAccessor;
namespace eval_cache {
class EvalCache;
@ -299,6 +303,68 @@ struct StaticEvalSymbols
}
};
class EvalMemory
{
#if NIX_USE_BOEHMGC
/**
* Allocation cache for GC'd Value objects.
*/
std::shared_ptr<void *> valueAllocCache;
/**
* Allocation cache for size-1 Env objects.
*/
std::shared_ptr<void *> env1AllocCache;
#endif
public:
struct Statistics
{
Counter nrEnvs;
Counter nrValuesInEnvs;
Counter nrValues;
Counter nrAttrsets;
Counter nrAttrsInAttrsets;
Counter nrListElems;
};
EvalMemory();
EvalMemory(const EvalMemory &) = delete;
EvalMemory(EvalMemory &&) = delete;
EvalMemory & operator=(const EvalMemory &) = delete;
EvalMemory & operator=(EvalMemory &&) = delete;
inline Value * allocValue();
inline Env & allocEnv(size_t size);
Bindings * allocBindings(size_t capacity);
BindingsBuilder buildBindings(SymbolTable & symbols, size_t capacity)
{
return BindingsBuilder(*this, symbols, allocBindings(capacity), capacity);
}
ListBuilder buildList(size_t size)
{
stats.nrListElems += size;
return ListBuilder(size);
}
const Statistics & getStats() const &
{
return stats;
}
/**
* Storage for the AST nodes
*/
Exprs exprs;
private:
Statistics stats;
};
class EvalState : public std::enable_shared_from_this<EvalState>
{
public:
@ -309,6 +375,8 @@ public:
SymbolTable symbols;
PosTable positions;
EvalMemory mem;
/**
* If set, force copying files to the Nix store even if they
* already exist there.
@ -318,7 +386,7 @@ public:
/**
* The accessor corresponding to `store`.
*/
const ref<SourceAccessor> storeFS;
const ref<MountedSourceAccessor> storeFS;
/**
* The accessor for the root filesystem.
@ -403,37 +471,30 @@ private:
/* Cache for calls to addToStore(); maps source paths to the store
paths. */
boost::concurrent_flat_map<SourcePath, StorePath, std::hash<SourcePath>> srcToStore;
ref<boost::concurrent_flat_map<SourcePath, StorePath>> srcToStore;
/**
* A cache from path names to parse trees.
* A cache that maps paths to "resolved" paths for importing Nix
* expressions, i.e. `/foo` to `/foo/default.nix`.
*/
typedef boost::unordered_flat_map<
SourcePath,
Expr *,
std::hash<SourcePath>,
std::equal_to<SourcePath>,
traceable_allocator<std::pair<const SourcePath, Expr *>>>
FileParseCache;
FileParseCache fileParseCache;
ref<boost::concurrent_flat_map<SourcePath, SourcePath>> importResolutionCache;
/**
* A cache from path names to values.
* A cache from resolved paths to values.
*/
typedef boost::unordered_flat_map<
ref<boost::concurrent_flat_map<
SourcePath,
Value,
Value *,
std::hash<SourcePath>,
std::equal_to<SourcePath>,
traceable_allocator<std::pair<const SourcePath, Value>>>
FileEvalCache;
FileEvalCache fileEvalCache;
traceable_allocator<std::pair<const SourcePath, Value *>>>>
fileEvalCache;
/**
* Associate source positions of certain AST nodes with their preceding doc comment, if they have one.
* Grouped by file.
*/
boost::unordered_flat_map<SourcePath, DocCommentMap, std::hash<SourcePath>> positionToDocComment;
boost::unordered_flat_map<SourcePath, DocCommentMap> positionToDocComment;
LookupPath lookupPath;
@ -445,28 +506,32 @@ private:
*/
std::shared_ptr<RegexCache> regexCache;
#if NIX_USE_BOEHMGC
/**
* Allocation cache for GC'd Value objects.
*/
std::shared_ptr<void *> valueAllocCache;
/**
* Allocation cache for size-1 Env objects.
*/
std::shared_ptr<void *> env1AllocCache;
#endif
public:
/**
* @param lookupPath Only used during construction.
* @param store The store to use for instantiation
* @param fetchSettings Must outlive the lifetime of this EvalState!
* @param settings Must outlive the lifetime of this EvalState!
* @param buildStore The store to use for builds ("import from derivation", C API `nix_string_realise`)
*/
EvalState(
const LookupPath & _lookupPath,
const LookupPath & lookupPath,
ref<Store> store,
const fetchers::Settings & fetchSettings,
const EvalSettings & settings,
std::shared_ptr<Store> buildStore = nullptr);
~EvalState();
/**
* A wrapper around EvalMemory::allocValue() to avoid code churn when it
* was introduced.
*/
inline Value * allocValue()
{
return mem.allocValue();
}
LookupPath getLookupPath()
{
return lookupPath;
@ -494,8 +559,11 @@ public:
/**
* Allow access to a path.
*
* Only for restrict eval: pure eval just whitelist store paths,
* never arbitrary paths.
*/
void allowPath(const Path & path);
void allowPathLegacy(const Path & path);
/**
* Allow access to a store path. Note that this gets remapped to
@ -515,6 +583,11 @@ public:
void checkURI(const std::string & uri);
/**
* Mount an input on the Nix store.
*/
StorePath mountInput(fetchers::Input & input, const fetchers::Input & originalInput, ref<SourceAccessor> accessor);
/**
* Parse a Nix expression from the specified file.
*/
@ -835,22 +908,14 @@ public:
*/
void autoCallFunction(const Bindings & args, Value & fun, Value & res);
/**
* Allocation primitives.
*/
inline Value * allocValue();
inline Env & allocEnv(size_t size);
Bindings * allocBindings(size_t capacity);
BindingsBuilder buildBindings(size_t capacity)
{
return BindingsBuilder(*this, allocBindings(capacity), capacity);
return mem.buildBindings(symbols, capacity);
}
ListBuilder buildList(size_t size)
{
return ListBuilder(*this, size);
return mem.buildList(size);
}
/**
@ -967,19 +1032,13 @@ private:
*/
std::string mkSingleDerivedPathStringRaw(const SingleDerivedPath & p);
unsigned long nrEnvs = 0;
unsigned long nrValuesInEnvs = 0;
unsigned long nrValues = 0;
unsigned long nrListElems = 0;
unsigned long nrLookups = 0;
unsigned long nrAttrsets = 0;
unsigned long nrAttrsInAttrsets = 0;
unsigned long nrAvoided = 0;
unsigned long nrOpUpdates = 0;
unsigned long nrOpUpdateValuesCopied = 0;
unsigned long nrListConcats = 0;
unsigned long nrPrimOpCalls = 0;
unsigned long nrFunctionCalls = 0;
Counter nrLookups;
Counter nrAvoided;
Counter nrOpUpdates;
Counter nrOpUpdateValuesCopied;
Counter nrListConcats;
Counter nrPrimOpCalls;
Counter nrFunctionCalls;
bool countCalls;

View file

@ -26,4 +26,20 @@ using SmallValueVector = SmallVector<Value *, nItems>;
template<size_t nItems>
using SmallTemporaryValueVector = SmallVector<Value, nItems>;
/**
* For functions where we do not expect deep recursion, we can use a sizable
* part of the stack a free allocation space.
*
* Note: this is expected to be multiplied by sizeof(Value), or about 24 bytes.
*/
constexpr size_t nonRecursiveStackReservation = 128;
/**
* Functions that maybe applied to self-similar inputs, such as concatMap on a
* tree, should reserve a smaller part of the stack for allocation.
*
* Note: this is expected to be multiplied by sizeof(Value), or about 24 bytes.
*/
constexpr size_t conservativeStackReservation = 16;
} // namespace nix

View file

@ -10,6 +10,7 @@ config_pub_h = configure_file(
headers = [ config_pub_h ] + files(
'attr-path.hh',
'attr-set.hh',
'counter.hh',
'eval-cache.hh',
'eval-error.hh',
'eval-gc.hh',

View file

@ -2,12 +2,17 @@
///@file
#include <map>
#include <span>
#include <vector>
#include <memory_resource>
#include <algorithm>
#include "nix/expr/gc-small-vector.hh"
#include "nix/expr/value.hh"
#include "nix/expr/symbol-table.hh"
#include "nix/expr/eval-error.hh"
#include "nix/util/pos-idx.hh"
#include "nix/expr/counter.hh"
namespace nix {
@ -76,9 +81,20 @@ struct AttrName
: expr(e) {};
};
static_assert(std::is_trivially_copy_constructible_v<AttrName>);
typedef std::vector<AttrName> AttrPath;
std::string showAttrPath(const SymbolTable & symbols, const AttrPath & attrPath);
std::string showAttrPath(const SymbolTable & symbols, std::span<const AttrName> attrPath);
using UpdateQueue = SmallTemporaryValueVector<conservativeStackReservation>;
class Exprs
{
std::pmr::monotonic_buffer_resource buffer;
public:
std::pmr::polymorphic_allocator<char> alloc{&buffer};
};
/* Abstract syntax of Nix expressions. */
@ -89,7 +105,7 @@ struct Expr
Symbol sub, lessThan, mul, div, or_, findFile, nixPath, body;
};
static unsigned long nrExprs;
static Counter nrExprs;
Expr()
{
@ -99,8 +115,25 @@ struct Expr
virtual ~Expr() {};
virtual void show(const SymbolTable & symbols, std::ostream & str) const;
virtual void bindVars(EvalState & es, const std::shared_ptr<const StaticEnv> & env);
/** Normal evaluation, implemented directly by all subclasses. */
virtual void eval(EvalState & state, Env & env, Value & v);
/**
* Create a thunk for the delayed computation of the given expression
* in the given environment. But if the expression is a variable,
* then look it up right away. This significantly reduces the number
* of thunks allocated.
*/
virtual Value * maybeThunk(EvalState & state, Env & env);
/**
* Only called when performing an attrset update: `//` or similar.
* Instead of writing to a Value &, this function writes to an UpdateQueue.
* This allows the expression to perform multiple updates in a delayed manner, gathering up all the updates before
* applying them.
*/
virtual void evalForUpdate(EvalState & state, Env & env, UpdateQueue & q, std::string_view errorCtx);
virtual void setName(Symbol name);
virtual void setDocComment(DocComment docComment) {};
@ -152,13 +185,28 @@ struct ExprFloat : Expr
struct ExprString : Expr
{
std::string s;
Value v;
ExprString(std::string && s)
: s(std::move(s))
/**
* This is only for strings already allocated in our polymorphic allocator,
* or that live at least that long (e.g. c++ string literals)
*/
ExprString(const char * s)
{
v.mkStringNoCopy(this->s.data());
v.mkStringNoCopy(s);
};
ExprString(std::pmr::polymorphic_allocator<char> & alloc, std::string_view sv)
{
auto len = sv.length();
if (len == 0) {
v.mkStringNoCopy("");
return;
}
char * s = alloc.allocate(len + 1);
sv.copy(s, len);
s[len] = '\0';
v.mkStringNoCopy(s);
};
Value * maybeThunk(EvalState & state, Env & env) override;
@ -168,14 +216,16 @@ struct ExprString : Expr
struct ExprPath : Expr
{
ref<SourceAccessor> accessor;
std::string s;
Value v;
ExprPath(ref<SourceAccessor> accessor, std::string s)
ExprPath(std::pmr::polymorphic_allocator<char> & alloc, ref<SourceAccessor> accessor, std::string_view sv)
: accessor(accessor)
, s(std::move(s))
{
v.mkPath(&*accessor, this->s.c_str());
auto len = sv.length();
char * s = alloc.allocate(len + 1);
sv.copy(s, len);
s[len] = '\0';
v.mkPath(&*accessor, s);
}
Value * maybeThunk(EvalState & state, Env & env) override;
@ -242,20 +292,33 @@ struct ExprInheritFrom : ExprVar
struct ExprSelect : Expr
{
PosIdx pos;
uint32_t nAttrPath;
Expr *e, *def;
AttrPath attrPath;
ExprSelect(const PosIdx & pos, Expr * e, AttrPath attrPath, Expr * def)
AttrName * attrPathStart;
ExprSelect(
std::pmr::polymorphic_allocator<char> & alloc,
const PosIdx & pos,
Expr * e,
std::span<const AttrName> attrPath,
Expr * def)
: pos(pos)
, nAttrPath(attrPath.size())
, e(e)
, def(def)
, attrPath(std::move(attrPath)) {};
, attrPathStart(alloc.allocate_object<AttrName>(nAttrPath))
{
std::ranges::copy(attrPath, attrPathStart);
};
ExprSelect(const PosIdx & pos, Expr * e, Symbol name)
ExprSelect(std::pmr::polymorphic_allocator<char> & alloc, const PosIdx & pos, Expr * e, Symbol name)
: pos(pos)
, nAttrPath(1)
, e(e)
, def(0)
, attrPathStart((alloc.allocate_object<AttrName>()))
{
attrPath.push_back(AttrName(name));
*attrPathStart = AttrName(name);
};
PosIdx getPos() const override
@ -263,6 +326,11 @@ struct ExprSelect : Expr
return pos;
}
std::span<const AttrName> getAttrPath() const
{
return {attrPathStart, nAttrPath};
}
/**
* Evaluate the `a.b.c` part of `a.b.c.d`. This exists mostly for the purpose of :doc in the repl.
*
@ -280,10 +348,14 @@ struct ExprSelect : Expr
struct ExprOpHasAttr : Expr
{
Expr * e;
AttrPath attrPath;
ExprOpHasAttr(Expr * e, AttrPath attrPath)
std::span<AttrName> attrPath;
ExprOpHasAttr(std::pmr::polymorphic_allocator<char> & alloc, Expr * e, std::vector<AttrName> attrPath)
: e(e)
, attrPath(std::move(attrPath)) {};
, attrPath({alloc.allocate_object<AttrName>(attrPath.size()), attrPath.size()})
{
std::ranges::copy(attrPath, this->attrPath.begin());
};
PosIdx getPos() const override
{
@ -565,36 +637,39 @@ struct ExprOpNot : Expr
COMMON_METHODS
};
#define MakeBinOp(name, s) \
struct name : Expr \
{ \
PosIdx pos; \
Expr *e1, *e2; \
name(Expr * e1, Expr * e2) \
: e1(e1) \
, e2(e2) {}; \
name(const PosIdx & pos, Expr * e1, Expr * e2) \
: pos(pos) \
, e1(e1) \
, e2(e2) {}; \
void show(const SymbolTable & symbols, std::ostream & str) const override \
{ \
str << "("; \
e1->show(symbols, str); \
str << " " s " "; \
e2->show(symbols, str); \
str << ")"; \
} \
void bindVars(EvalState & es, const std::shared_ptr<const StaticEnv> & env) override \
{ \
e1->bindVars(es, env); \
e2->bindVars(es, env); \
} \
void eval(EvalState & state, Env & env, Value & v) override; \
PosIdx getPos() const override \
{ \
return pos; \
} \
#define MakeBinOpMembers(name, s) \
PosIdx pos; \
Expr *e1, *e2; \
name(Expr * e1, Expr * e2) \
: e1(e1) \
, e2(e2){}; \
name(const PosIdx & pos, Expr * e1, Expr * e2) \
: pos(pos) \
, e1(e1) \
, e2(e2){}; \
void show(const SymbolTable & symbols, std::ostream & str) const override \
{ \
str << "("; \
e1->show(symbols, str); \
str << " " s " "; \
e2->show(symbols, str); \
str << ")"; \
} \
void bindVars(EvalState & es, const std::shared_ptr<const StaticEnv> & env) override \
{ \
e1->bindVars(es, env); \
e2->bindVars(es, env); \
} \
void eval(EvalState & state, Env & env, Value & v) override; \
PosIdx getPos() const override \
{ \
return pos; \
}
#define MakeBinOp(name, s) \
struct name : Expr \
{ \
MakeBinOpMembers(name, s) \
}
MakeBinOp(ExprOpEq, "==");
@ -602,9 +677,20 @@ MakeBinOp(ExprOpNEq, "!=");
MakeBinOp(ExprOpAnd, "&&");
MakeBinOp(ExprOpOr, "||");
MakeBinOp(ExprOpImpl, "->");
MakeBinOp(ExprOpUpdate, "//");
MakeBinOp(ExprOpConcatLists, "++");
struct ExprOpUpdate : Expr
{
private:
/** Special case for merging of two attrsets. */
void eval(EvalState & state, Value & v, Value & v1, Value & v2);
void evalForUpdate(EvalState & state, Env & env, UpdateQueue & q);
public:
MakeBinOpMembers(ExprOpUpdate, "//");
virtual void evalForUpdate(EvalState & state, Env & env, UpdateQueue & q, std::string_view errorCtx) override;
};
struct ExprConcatStrings : Expr
{
PosIdx pos;

View file

@ -24,7 +24,6 @@ struct StringToken
}
};
// This type must be trivially copyable; see YYLTYPE_IS_TRIVIAL in parser.y.
struct ParserLocation
{
int beginOffset;
@ -44,9 +43,6 @@ struct ParserLocation
beginOffset = stashedBeginOffset;
endOffset = stashedEndOffset;
}
/** Latest doc comment position, or 0. */
int doc_comment_first_column, doc_comment_last_column;
};
struct LexerState
@ -82,6 +78,7 @@ struct LexerState
struct ParserState
{
const LexerState & lexerState;
std::pmr::polymorphic_allocator<char> & alloc;
SymbolTable & symbols;
PosTable & positions;
Expr * result;
@ -327,7 +324,7 @@ ParserState::stripIndentation(const PosIdx pos, std::vector<std::pair<PosIdx, st
// Ignore empty strings for a minor optimisation and AST simplification
if (s2 != "") {
es2->emplace_back(i->first, new ExprString(std::move(s2)));
es2->emplace_back(i->first, new ExprString(alloc, s2));
}
};
for (; i != es.end(); ++i, --n) {

View file

@ -8,22 +8,6 @@
namespace nix {
/**
* For functions where we do not expect deep recursion, we can use a sizable
* part of the stack a free allocation space.
*
* Note: this is expected to be multiplied by sizeof(Value), or about 24 bytes.
*/
constexpr size_t nonRecursiveStackReservation = 128;
/**
* Functions that maybe applied to self-similar inputs, such as concatMap on a
* tree, should reserve a smaller part of the stack for allocation.
*
* Note: this is expected to be multiplied by sizeof(Value), or about 24 bytes.
*/
constexpr size_t conservativeStackReservation = 16;
struct RegisterPrimOp
{
typedef std::vector<PrimOp> PrimOps;

View file

@ -155,7 +155,7 @@ class ListBuilder
Value * inlineElems[2] = {nullptr, nullptr};
public:
Value ** elems;
ListBuilder(EvalState & state, size_t size);
ListBuilder(size_t size);
// NOTE: Can be noexcept because we are just copying integral values and
// raw pointers.

View file

@ -1,11 +1,11 @@
#include "lexer-helpers.hh"
void nix::lexer::internal::initLoc(YYLTYPE * loc)
void nix::lexer::internal::initLoc(Parser::location_type * loc)
{
loc->beginOffset = loc->endOffset = 0;
}
void nix::lexer::internal::adjustLoc(yyscan_t yyscanner, YYLTYPE * loc, const char * s, size_t len)
void nix::lexer::internal::adjustLoc(yyscan_t yyscanner, Parser::location_type * loc, const char * s, size_t len)
{
loc->stash();

View file

@ -2,16 +2,12 @@
#include <cstddef>
// including the generated headers twice leads to errors
#ifndef BISON_HEADER
# include "lexer-tab.hh"
# include "parser-tab.hh"
#endif
#include "parser-scanner-decls.hh"
namespace nix::lexer::internal {
void initLoc(YYLTYPE * loc);
void initLoc(Parser::location_type * loc);
void adjustLoc(yyscan_t yyscanner, YYLTYPE * loc, const char * s, size_t len);
void adjustLoc(yyscan_t yyscanner, Parser::location_type * loc, const char * s, size_t len);
} // namespace nix::lexer::internal

View file

@ -82,6 +82,10 @@ static void requireExperimentalFeature(const ExperimentalFeature & feature, cons
}
using enum nix::Parser::token::token_kind_type;
using YYSTYPE = nix::Parser::value_type;
using YYLTYPE = nix::Parser::location_type;
// yacc generates code that uses unannotated fallthrough.
#pragma GCC diagnostic ignored "-Wimplicit-fallthrough"

View file

@ -69,6 +69,10 @@ if bdw_gc.found()
define_value = cxx.has_function(funcspec).to_int()
configdata_priv.set(define_name, define_value)
endforeach
if host_machine.system() == 'cygwin'
# undefined reference to `__wrap__Znwm'
configdata_pub.set('GC_NO_INLINE_STD_NEW', 1)
endif
endif
# Used in public header. Affects ABI!
configdata_pub.set('NIX_USE_BOEHMGC', bdw_gc.found().to_int())
@ -93,7 +97,6 @@ config_priv_h = configure_file(
)
subdir('nix-meson-build-support/common')
subdir('nix-meson-build-support/asan-options')
parser_tab = custom_target(
input : 'parser.y',

View file

@ -11,7 +11,7 @@
namespace nix {
unsigned long Expr::nrExprs = 0;
Counter Expr::nrExprs;
ExprBlackHole eBlackHole;
@ -40,12 +40,12 @@ void ExprFloat::show(const SymbolTable & symbols, std::ostream & str) const
void ExprString::show(const SymbolTable & symbols, std::ostream & str) const
{
printLiteralString(str, s);
printLiteralString(str, v.string_view());
}
void ExprPath::show(const SymbolTable & symbols, std::ostream & str) const
{
str << s;
str << v.pathStr();
}
void ExprVar::show(const SymbolTable & symbols, std::ostream & str) const
@ -57,7 +57,7 @@ void ExprSelect::show(const SymbolTable & symbols, std::ostream & str) const
{
str << "(";
e->show(symbols, str);
str << ")." << showAttrPath(symbols, attrPath);
str << ")." << showAttrPath(symbols, getAttrPath());
if (def) {
str << " or (";
def->show(symbols, str);
@ -261,7 +261,7 @@ void ExprPos::show(const SymbolTable & symbols, std::ostream & str) const
str << "__curPos";
}
std::string showAttrPath(const SymbolTable & symbols, const AttrPath & attrPath)
std::string showAttrPath(const SymbolTable & symbols, std::span<const AttrName> attrPath)
{
std::ostringstream out;
bool first = true;
@ -362,7 +362,7 @@ void ExprSelect::bindVars(EvalState & es, const std::shared_ptr<const StaticEnv>
e->bindVars(es, env);
if (def)
def->bindVars(es, env);
for (auto & i : attrPath)
for (auto & i : getAttrPath())
if (!i.symbol)
i.expr->bindVars(es, env);
}

View file

@ -0,0 +1,17 @@
#pragma once
#ifndef BISON_HEADER
# include "parser-tab.hh"
using YYSTYPE = nix::parser::BisonParser::value_type;
using YYLTYPE = nix::parser::BisonParser::location_type;
# include "lexer-tab.hh" // IWYU pragma: export
#endif
namespace nix {
class Parser : public parser::BisonParser
{
using BisonParser::BisonParser;
};
} // namespace nix

View file

@ -1,5 +1,7 @@
%skeleton "lalr1.cc"
%define api.location.type { ::nix::ParserLocation }
%define api.pure
%define api.namespace { ::nix::parser }
%define api.parser.class { BisonParser }
%locations
%define parse.error verbose
%defines
@ -26,19 +28,12 @@
#include "nix/expr/eval-settings.hh"
#include "nix/expr/parser-state.hh"
// Bison seems to have difficulty growing the parser stack when using C++ with
// a custom location type. This undocumented macro tells Bison that our
// location type is "trivially copyable" in C++-ese, so it is safe to use the
// same memcpy macro it uses to grow the stack that it uses with its own
// default location type. Without this, we get "error: memory exhausted" when
// parsing some large Nix files. Our other options are to increase the initial
// stack size (200 by default) to be as large as we ever want to support (so
// that growing the stack is unnecessary), or redefine the stack-relocation
// macro ourselves (which is also undocumented).
#define YYLTYPE_IS_TRIVIAL 1
#define YY_DECL int yylex \
(YYSTYPE * yylval_param, YYLTYPE * yylloc_param, yyscan_t yyscanner, nix::ParserState * state)
#define YY_DECL \
int yylex( \
nix::Parser::value_type * yylval_param, \
nix::Parser::location_type * yylloc_param, \
yyscan_t yyscanner, \
nix::ParserState * state)
// For efficiency, we only track offsets; not line,column coordinates
# define YYLLOC_DEFAULT(Current, Rhs, N) \
@ -64,6 +59,7 @@ Expr * parseExprFromBuf(
size_t length,
Pos::Origin origin,
const SourcePath & basePath,
std::pmr::polymorphic_allocator<char> & alloc,
SymbolTable & symbols,
const EvalSettings & settings,
PosTable & positions,
@ -78,24 +74,30 @@ Expr * parseExprFromBuf(
%{
#include "parser-tab.hh"
#include "lexer-tab.hh"
/* The parser is very performance sensitive and loses out on a lot
of performance even with basic stdlib assertions. Since those don't
affect ABI we can disable those just for this file. */
#if defined(_GLIBCXX_ASSERTIONS) && !defined(_GLIBCXX_DEBUG)
#undef _GLIBCXX_ASSERTIONS
#endif
#include "parser-scanner-decls.hh"
YY_DECL;
using namespace nix;
#define CUR_POS state->at(yyloc)
#define CUR_POS state->at(yylhs.location)
void yyerror(YYLTYPE * loc, yyscan_t scanner, ParserState * state, const char * error)
void parser::BisonParser::error(const location_type &loc_, const std::string &error)
{
auto loc = loc_;
if (std::string_view(error).starts_with("syntax error, unexpected end of file")) {
loc->beginOffset = loc->endOffset;
loc.beginOffset = loc.endOffset;
}
throw ParseError({
.msg = HintFmt(error),
.pos = state->positions[state->at(*loc)]
.pos = state->positions[state->at(loc)]
});
}
@ -134,6 +136,7 @@ static Expr * makeCall(PosIdx pos, Expr * fn, Expr * arg) {
std::vector<nix::AttrName> * attrNames;
std::vector<std::pair<nix::AttrName, nix::PosIdx>> * inheritAttrs;
std::vector<std::pair<nix::PosIdx, nix::Expr *>> * string_parts;
std::variant<nix::Expr *, std::string_view> * to_be_string;
std::vector<std::pair<nix::PosIdx, std::variant<nix::Expr *, nix::StringToken>>> * ind_string_parts;
}
@ -148,7 +151,8 @@ static Expr * makeCall(PosIdx pos, Expr * fn, Expr * arg) {
%type <inheritAttrs> attrs
%type <string_parts> string_parts_interpolated
%type <ind_string_parts> ind_string_parts
%type <e> path_start string_parts string_attr
%type <e> path_start
%type <to_be_string> string_parts string_attr
%type <id> attr
%token <id> ID
%token <str> STR IND_STR
@ -182,7 +186,7 @@ start: expr {
state->result = $1;
// This parser does not use yynerrs; suppress the warning.
(void) yynerrs;
(void) yynerrs_;
};
expr: expr_function;
@ -257,7 +261,7 @@ expr_op
| expr_op OR expr_op { $$ = new ExprOpOr(state->at(@2), $1, $3); }
| expr_op IMPL expr_op { $$ = new ExprOpImpl(state->at(@2), $1, $3); }
| expr_op UPDATE expr_op { $$ = new ExprOpUpdate(state->at(@2), $1, $3); }
| expr_op '?' attrpath { $$ = new ExprOpHasAttr($1, std::move(*$3)); delete $3; }
| expr_op '?' attrpath { $$ = new ExprOpHasAttr(state->alloc, $1, std::move(*$3)); delete $3; }
| expr_op '+' expr_op
{ $$ = new ExprConcatStrings(state->at(@2), false, new std::vector<std::pair<PosIdx, Expr *> >({{state->at(@1), $1}, {state->at(@3), $3}})); }
| expr_op '-' expr_op { $$ = new ExprCall(state->at(@2), new ExprVar(state->s.sub), {$1, $3}); }
@ -278,9 +282,9 @@ expr_app
expr_select
: expr_simple '.' attrpath
{ $$ = new ExprSelect(CUR_POS, $1, std::move(*$3), nullptr); delete $3; }
{ $$ = new ExprSelect(state->alloc, CUR_POS, $1, std::move(*$3), nullptr); delete $3; }
| expr_simple '.' attrpath OR_KW expr_select
{ $$ = new ExprSelect(CUR_POS, $1, std::move(*$3), $5); delete $3; $5->warnIfCursedOr(state->symbols, state->positions); }
{ $$ = new ExprSelect(state->alloc, CUR_POS, $1, std::move(*$3), $5); delete $3; $5->warnIfCursedOr(state->symbols, state->positions); }
| /* Backwards compatibility: because Nixpkgs has a function named or,
allow stuff like map or [...]. This production is problematic (see
https://github.com/NixOS/nix/issues/11118) and will be refactored in the
@ -303,7 +307,13 @@ expr_simple
}
| INT_LIT { $$ = new ExprInt($1); }
| FLOAT_LIT { $$ = new ExprFloat($1); }
| '"' string_parts '"' { $$ = $2; }
| '"' string_parts '"' {
std::visit(overloaded{
[&](std::string_view str) { $$ = new ExprString(state->alloc, str); },
[&](Expr * expr) { $$ = expr; }},
*$2);
delete $2;
}
| IND_STRING_OPEN ind_string_parts IND_STRING_CLOSE {
$$ = state->stripIndentation(CUR_POS, std::move(*$2));
delete $2;
@ -314,11 +324,11 @@ expr_simple
$$ = new ExprConcatStrings(CUR_POS, false, $2);
}
| SPATH {
std::string path($1.p + 1, $1.l - 2);
std::string_view path($1.p + 1, $1.l - 2);
$$ = new ExprCall(CUR_POS,
new ExprVar(state->s.findFile),
{new ExprVar(state->s.nixPath),
new ExprString(std::move(path))});
new ExprString(state->alloc, path)});
}
| URI {
static bool noURLLiterals = experimentalFeatureSettings.isEnabled(Xp::NoUrlLiterals);
@ -327,13 +337,13 @@ expr_simple
.msg = HintFmt("URL literals are disabled"),
.pos = state->positions[CUR_POS]
});
$$ = new ExprString(std::string($1));
$$ = new ExprString(state->alloc, $1);
}
| '(' expr ')' { $$ = $2; }
/* Let expressions `let {..., body = ...}' are just desugared
into `(rec {..., body = ...}).body'. */
| LET '{' binds '}'
{ $3->recursive = true; $3->pos = CUR_POS; $$ = new ExprSelect(noPos, $3, state->s.body); }
{ $3->recursive = true; $3->pos = CUR_POS; $$ = new ExprSelect(state->alloc, noPos, $3, state->s.body); }
| REC '{' binds '}'
{ $3->recursive = true; $3->pos = CUR_POS; $$ = $3; }
| '{' binds1 '}'
@ -344,19 +354,19 @@ expr_simple
;
string_parts
: STR { $$ = new ExprString(std::string($1)); }
| string_parts_interpolated { $$ = new ExprConcatStrings(CUR_POS, true, $1); }
| { $$ = new ExprString(""); }
: STR { $$ = new std::variant<Expr *, std::string_view>($1); }
| string_parts_interpolated { $$ = new std::variant<Expr *, std::string_view>(new ExprConcatStrings(CUR_POS, true, $1)); }
| { $$ = new std::variant<Expr *, std::string_view>(std::string_view()); }
;
string_parts_interpolated
: string_parts_interpolated STR
{ $$ = $1; $1->emplace_back(state->at(@2), new ExprString(std::string($2))); }
{ $$ = $1; $1->emplace_back(state->at(@2), new ExprString(state->alloc, $2)); }
| string_parts_interpolated DOLLAR_CURLY expr '}' { $$ = $1; $1->emplace_back(state->at(@2), $3); }
| DOLLAR_CURLY expr '}' { $$ = new std::vector<std::pair<PosIdx, Expr *>>; $$->emplace_back(state->at(@1), $2); }
| STR DOLLAR_CURLY expr '}' {
$$ = new std::vector<std::pair<PosIdx, Expr *>>;
$$->emplace_back(state->at(@1), new ExprString(std::string($1)));
$$->emplace_back(state->at(@1), new ExprString(state->alloc, $1));
$$->emplace_back(state->at(@2), $3);
}
;
@ -382,8 +392,8 @@ path_start
root filesystem accessor, rather than the accessor of the
current Nix expression. */
literal.front() == '/'
? new ExprPath(state->rootFS, std::move(path))
: new ExprPath(state->basePath.accessor, std::move(path));
? new ExprPath(state->alloc, state->rootFS, path)
: new ExprPath(state->alloc, state->basePath.accessor, path);
}
| HPATH {
if (state->settings.pureEval) {
@ -393,7 +403,7 @@ path_start
);
}
Path path(getHome() + std::string($1.p + 1, $1.l - 1));
$$ = new ExprPath(ref<SourceAccessor>(state->rootFS), std::move(path));
$$ = new ExprPath(state->alloc, ref<SourceAccessor>(state->rootFS), path);
}
;
@ -437,7 +447,7 @@ binds1
$accum->attrs.emplace(
i.symbol,
ExprAttrs::AttrDef(
new ExprSelect(iPos, from, i.symbol),
new ExprSelect(state->alloc, iPos, from, i.symbol),
iPos,
ExprAttrs::AttrDef::Kind::InheritedFrom));
}
@ -454,15 +464,16 @@ attrs
: attrs attr { $$ = $1; $1->emplace_back(AttrName(state->symbols.create($2)), state->at(@2)); }
| attrs string_attr
{ $$ = $1;
ExprString * str = dynamic_cast<ExprString *>($2);
if (str) {
$$->emplace_back(AttrName(state->symbols.create(str->s)), state->at(@2));
delete str;
} else
throw ParseError({
.msg = HintFmt("dynamic attributes not allowed in inherit"),
.pos = state->positions[state->at(@2)]
});
std::visit(overloaded {
[&](std::string_view str) { $$->emplace_back(AttrName(state->symbols.create(str)), state->at(@2)); },
[&](Expr * expr) {
throw ParseError({
.msg = HintFmt("dynamic attributes not allowed in inherit"),
.pos = state->positions[state->at(@2)]
});
}
}, *$2);
delete $2;
}
| { $$ = new std::vector<std::pair<AttrName, PosIdx>>; }
;
@ -471,22 +482,20 @@ attrpath
: attrpath '.' attr { $$ = $1; $1->push_back(AttrName(state->symbols.create($3))); }
| attrpath '.' string_attr
{ $$ = $1;
ExprString * str = dynamic_cast<ExprString *>($3);
if (str) {
$$->push_back(AttrName(state->symbols.create(str->s)));
delete str;
} else
$$->push_back(AttrName($3));
std::visit(overloaded {
[&](std::string_view str) { $$->push_back(AttrName(state->symbols.create(str))); },
[&](Expr * expr) { $$->push_back(AttrName(expr)); }
}, *$3);
delete $3;
}
| attr { $$ = new std::vector<AttrName>; $$->push_back(AttrName(state->symbols.create($1))); }
| string_attr
{ $$ = new std::vector<AttrName>;
ExprString *str = dynamic_cast<ExprString *>($1);
if (str) {
$$->push_back(AttrName(state->symbols.create(str->s)));
delete str;
} else
$$->push_back(AttrName($1));
std::visit(overloaded {
[&](std::string_view str) { $$->push_back(AttrName(state->symbols.create(str))); },
[&](Expr * expr) { $$->push_back(AttrName(expr)); }
}, *$1);
delete $1;
}
;
@ -497,7 +506,7 @@ attr
string_attr
: '"' string_parts '"' { $$ = $2; }
| DOLLAR_CURLY expr '}' { $$ = $2; }
| DOLLAR_CURLY expr '}' { $$ = new std::variant<Expr *, std::string_view>($2); }
;
expr_list
@ -537,6 +546,7 @@ Expr * parseExprFromBuf(
size_t length,
Pos::Origin origin,
const SourcePath & basePath,
std::pmr::polymorphic_allocator<char> & alloc,
SymbolTable & symbols,
const EvalSettings & settings,
PosTable & positions,
@ -551,6 +561,7 @@ Expr * parseExprFromBuf(
};
ParserState state {
.lexerState = lexerState,
.alloc = alloc,
.symbols = symbols,
.positions = positions,
.basePath = basePath,
@ -563,7 +574,8 @@ Expr * parseExprFromBuf(
Finally _destroy([&] { yylex_destroy(scanner); });
yy_scan_buffer(text, length, scanner);
yyparse(scanner, &state);
Parser parser(scanner, &state);
parser.parse();
return state.result;
}

View file

@ -1,5 +1,7 @@
#include "nix/store/store-api.hh"
#include "nix/expr/eval.hh"
#include "nix/util/mounted-source-accessor.hh"
#include "nix/fetchers/fetch-to-store.hh"
namespace nix {
@ -18,4 +20,27 @@ SourcePath EvalState::storePath(const StorePath & path)
return {rootFS, CanonPath{store->printStorePath(path)}};
}
StorePath
EvalState::mountInput(fetchers::Input & input, const fetchers::Input & originalInput, ref<SourceAccessor> accessor)
{
auto storePath = fetchToStore(fetchSettings, *store, accessor, FetchMode::Copy, input.getName());
allowPath(storePath); // FIXME: should just whitelist the entire virtual store
storeFS->mount(CanonPath(store->printStorePath(storePath)), accessor);
auto narHash = store->queryPathInfo(storePath)->narHash;
input.attrs.insert_or_assign("narHash", narHash.to_string(HashFormat::SRI, true));
if (originalInput.getNarHash() && narHash != *originalInput.getNarHash())
throw Error(
(unsigned int) 102,
"NAR hash mismatch in input '%s', expected '%s' but got '%s'",
originalInput.to_string(),
narHash.to_string(HashFormat::SRI, true),
originalInput.getNarHash()->to_string(HashFormat::SRI, true));
return storePath;
}
} // namespace nix

View file

@ -262,7 +262,7 @@ static void scopedImport(EvalState & state, const PosIdx pos, SourcePath & path,
{
state.forceAttrs(*vScope, pos, "while evaluating the first argument passed to builtins.scopedImport");
Env * env = &state.allocEnv(vScope->attrs()->size());
Env * env = &state.mem.allocEnv(vScope->attrs()->size());
env->up = &state.baseEnv;
auto staticEnv = std::make_shared<StaticEnv>(nullptr, state.staticBaseEnv, vScope->attrs()->size());
@ -1420,7 +1420,8 @@ static void derivationStrictInternal(EvalState & state, std::string_view drvName
.debugThrow();
}
if (ingestionMethod == ContentAddressMethod::Raw::Text)
experimentalFeatureSettings.require(Xp::DynamicDerivations);
experimentalFeatureSettings.require(
Xp::DynamicDerivations, fmt("text-hashed derivation '%s', outputHashMode = \"text\"", drvName));
if (ingestionMethod == ContentAddressMethod::Raw::Git)
experimentalFeatureSettings.require(Xp::GitHashing);
};
@ -2412,7 +2413,7 @@ static void prim_toXML(EvalState & state, const PosIdx pos, Value ** args, Value
std::ostringstream out;
NixStringContext context;
printValueAsXML(state, true, false, *args[0], out, context, pos);
v.mkString(toView(out), context);
v.mkString(out.view(), context);
}
static RegisterPrimOp primop_toXML({
@ -2520,7 +2521,7 @@ static void prim_toJSON(EvalState & state, const PosIdx pos, Value ** args, Valu
std::ostringstream out;
NixStringContext context;
printValueAsJSON(state, true, *args[0], pos, out, context);
v.mkString(toView(out), context);
v.mkString(out.view(), context);
}
static RegisterPrimOp primop_toJSON({
@ -3161,7 +3162,7 @@ static void prim_listToAttrs(EvalState & state, const PosIdx pos, Value ** args,
// Step 1. Sort the name-value attrsets in place using the memory we allocate for the result
auto listView = args[0]->listView();
size_t listSize = listView.size();
auto & bindings = *state.allocBindings(listSize);
auto & bindings = *state.mem.allocBindings(listSize);
using ElemPtr = decltype(&bindings[0].value);
for (const auto & [n, v2] : enumerate(listView)) {

View file

@ -10,6 +10,7 @@
#include "nix/util/url.hh"
#include "nix/expr/value-to-json.hh"
#include "nix/fetchers/fetch-to-store.hh"
#include "nix/fetchers/input-cache.hh"
#include <nlohmann/json.hpp>
@ -218,11 +219,11 @@ static void fetchTree(
throw Error("input '%s' is not allowed to use the '__final' attribute", input.to_string());
}
auto [storePath, input2] = input.fetchToStore(state.store);
auto cachedInput = state.inputCache->getAccessor(state.store, input, fetchers::UseRegistries::No);
state.allowPath(storePath);
auto storePath = state.mountInput(cachedInput.lockedInput, input, cachedInput.accessor);
emitTreeAttrs(state, storePath, input2, v, params.emptyRevFallback, false);
emitTreeAttrs(state, storePath, cachedInput.lockedInput, v, params.emptyRevFallback, false);
}
static void prim_fetchTree(EvalState & state, const PosIdx pos, Value ** args, Value & v)
@ -561,14 +562,22 @@ static void fetch(
.hash = *expectedHash,
.references = {}});
if (state.store->isValidPath(expectedPath)) {
// Try to get the path from the local store or substituters
try {
state.store->ensurePath(expectedPath);
debug("using substituted/cached path '%s' for '%s'", state.store->printStorePath(expectedPath), *url);
state.allowAndSetStorePathString(expectedPath, v);
return;
} catch (Error & e) {
debug(
"substitution of '%s' failed, will try to download: %s",
state.store->printStorePath(expectedPath),
e.what());
// Fall through to download
}
}
// TODO: fetching may fail, yet the path may be substitutable.
// https://github.com/NixOS/nix/issues/4313
// Download the file/tarball if substitution failed or no hash was provided
auto storePath = unpack ? fetchToStore(
state.fetchSettings,
*state.store,
@ -579,7 +588,11 @@ static void fetch(
if (expectedHash) {
auto hash = unpack ? state.store->queryPathInfo(storePath)->narHash
: hashFile(HashAlgorithm::SHA256, state.store->toRealPath(storePath));
: hashPath(
{state.store->requireStoreObjectAccessor(storePath)},
FileSerialisationMethod::Flat,
HashAlgorithm::SHA256)
.hash;
if (hash != *expectedHash) {
state
.error<EvalError>(

View file

@ -139,7 +139,7 @@ static void prim_fromTOML(EvalState & state, const PosIdx pos, Value ** args, Va
attrs.alloc("_type").mkStringNoCopy("timestamp");
std::ostringstream s;
s << t;
auto str = toView(s);
auto str = s.view();
forceNoNullByte(str);
attrs.alloc("value").mkString(str);
v.mkAttrs(attrs);

View file

@ -461,7 +461,7 @@ private:
std::ostringstream s;
s << state.positions[v.lambda().fun->pos];
output << " @ " << filterANSIEscapes(toView(s));
output << " @ " << filterANSIEscapes(s.view());
}
} else if (v.isPrimOp()) {
if (v.primOp())

View file

@ -32,7 +32,6 @@ add_project_arguments(
)
subdir('nix-meson-build-support/common')
subdir('nix-meson-build-support/asan-options')
sources = files(
'nix_api_fetchers.cc',

View file

@ -37,7 +37,6 @@ libgit2 = dependency('libgit2')
deps_private += libgit2
subdir('nix-meson-build-support/common')
subdir('nix-meson-build-support/asan-options')
sources = files(
'access-tokens.cc',
@ -64,7 +63,7 @@ this_exe = executable(
test(
meson.project_name(),
this_exe,
env : asan_test_options_env + {
env : {
'_NIX_TEST_UNIT_DATA' : meson.current_source_dir() / 'data',
},
protocol : 'gtest',

View file

@ -61,7 +61,6 @@ mkMesonExecutable (finalAttrs: {
buildInputs = [ writableTmpDirAsHomeHook ];
}
''
export ASAN_OPTIONS=abort_on_error=1:print_summary=1:detect_leaks=0
export _NIX_TEST_UNIT_DATA=${resolvePath ./data}
${stdenv.hostPlatform.emulator buildPackages} ${lib.getExe finalAttrs.finalPackage}
touch $out

View file

@ -1,14 +1,14 @@
#include <gtest/gtest.h>
#include "nix/fetchers/fetchers.hh"
#include "nix/util/json-utils.hh"
#include <nlohmann/json.hpp>
#include "nix/util/tests/characterization.hh"
#include "nix/util/tests/json-characterization.hh"
namespace nix {
using nlohmann::json;
class PublicKeyTest : public CharacterizationTest
class PublicKeyTest : public JsonCharacterizationTest<fetchers::PublicKey>,
public ::testing::WithParamInterface<std::pair<std::string_view, fetchers::PublicKey>>
{
std::filesystem::path unitTestData = getUnitTestData() / "public-key";
@ -19,30 +19,35 @@ public:
}
};
#define TEST_JSON(FIXTURE, NAME, VAL) \
TEST_F(FIXTURE, PublicKey_##NAME##_from_json) \
{ \
readTest(#NAME ".json", [&](const auto & encoded_) { \
fetchers::PublicKey expected{VAL}; \
fetchers::PublicKey got = nlohmann::json::parse(encoded_); \
ASSERT_EQ(got, expected); \
}); \
} \
\
TEST_F(FIXTURE, PublicKey_##NAME##_to_json) \
{ \
writeTest( \
#NAME ".json", \
[&]() -> json { return nlohmann::json(fetchers::PublicKey{VAL}); }, \
[](const auto & file) { return json::parse(readFile(file)); }, \
[](const auto & file, const auto & got) { return writeFile(file, got.dump(2) + "\n"); }); \
}
TEST_P(PublicKeyTest, from_json)
{
const auto & [name, expected] = GetParam();
readJsonTest(name, expected);
}
TEST_JSON(PublicKeyTest, simple, (fetchers::PublicKey{.type = "ssh-rsa", .key = "ABCDE"}))
TEST_P(PublicKeyTest, to_json)
{
const auto & [name, value] = GetParam();
writeJsonTest(name, value);
}
TEST_JSON(PublicKeyTest, defaultType, fetchers::PublicKey{.key = "ABCDE"})
#undef TEST_JSON
INSTANTIATE_TEST_SUITE_P(
PublicKeyJSON,
PublicKeyTest,
::testing::Values(
std::pair{
"simple",
fetchers::PublicKey{
.type = "ssh-rsa",
.key = "ABCDE",
},
},
std::pair{
"defaultType",
fetchers::PublicKey{
.key = "ABCDE",
},
}));
TEST_F(PublicKeyTest, PublicKey_noRoundTrip_from_json)
{

View file

@ -1,6 +1,7 @@
#include "nix/fetchers/fetch-to-store.hh"
#include "nix/fetchers/fetchers.hh"
#include "nix/fetchers/fetch-settings.hh"
#include "nix/util/environment-variables.hh"
namespace nix {
@ -27,14 +28,22 @@ StorePath fetchToStore(
std::optional<fetchers::Cache::Key> cacheKey;
if (!filter && path.accessor->fingerprint) {
cacheKey = makeFetchToStoreCacheKey(std::string{name}, *path.accessor->fingerprint, method, path.path.abs());
auto [subpath, fingerprint] = filter ? std::pair<CanonPath, std::optional<std::string>>{path.path, std::nullopt}
: path.accessor->getFingerprint(path.path);
if (fingerprint) {
cacheKey = makeFetchToStoreCacheKey(std::string{name}, *fingerprint, method, subpath.abs());
if (auto res = settings.getCache()->lookupStorePath(*cacheKey, store)) {
debug("store path cache hit for '%s'", path);
return res->storePath;
}
} else
} else {
static auto barf = getEnv("_NIX_TEST_BARF_ON_UNCACHEABLE").value_or("") == "1";
if (barf && !filter)
throw Error("source path '%s' is uncacheable (filter=%d)", path, (bool) filter);
// FIXME: could still provide in-memory caching keyed on `SourcePath`.
debug("source path '%s' is uncacheable", path);
}
Activity act(
*logger,

View file

@ -3,8 +3,8 @@
#include "nix/util/source-path.hh"
#include "nix/fetchers/fetch-to-store.hh"
#include "nix/util/json-utils.hh"
#include "nix/fetchers/store-path-accessor.hh"
#include "nix/fetchers/fetch-settings.hh"
#include "nix/fetchers/fetch-to-store.hh"
#include <nlohmann/json.hpp>
@ -332,10 +332,19 @@ std::pair<ref<SourceAccessor>, Input> Input::getAccessorUnchecked(ref<Store> sto
debug("using substituted/cached input '%s' in '%s'", to_string(), store->printStorePath(storePath));
auto accessor = makeStorePathAccessor(store, storePath);
auto accessor = store->requireStoreObjectAccessor(storePath);
accessor->fingerprint = getFingerprint(store);
// Store a cache entry for the substituted tree so later fetches
// can reuse the existing nar instead of copying the unpacked
// input back into the store on every evaluation.
if (accessor->fingerprint) {
ContentAddressMethod method = ContentAddressMethod::Raw::NixArchive;
auto cacheKey = makeFetchToStoreCacheKey(getName(), *accessor->fingerprint, method, "/");
settings->getCache()->upsert(cacheKey, *store, {}, storePath);
}
accessor->setPathDisplay("«" + to_string() + "»");
return {accessor, *this};
@ -346,8 +355,10 @@ std::pair<ref<SourceAccessor>, Input> Input::getAccessorUnchecked(ref<Store> sto
auto [accessor, result] = scheme->getAccessor(store, *this);
assert(!accessor->fingerprint);
accessor->fingerprint = result.getFingerprint(store);
if (!accessor->fingerprint)
accessor->fingerprint = result.getFingerprint(store);
else
result.cachedFingerprint = accessor->fingerprint;
return {accessor, std::move(result)};
}
@ -509,7 +520,7 @@ fetchers::PublicKey adl_serializer<fetchers::PublicKey>::from_json(const json &
return res;
}
void adl_serializer<fetchers::PublicKey>::to_json(json & json, fetchers::PublicKey p)
void adl_serializer<fetchers::PublicKey>::to_json(json & json, const fetchers::PublicKey & p)
{
json["type"] = p.type;
json["key"] = p.key;

View file

@ -16,15 +16,26 @@ std::string FilteringSourceAccessor::readFile(const CanonPath & path)
return next->readFile(prefix / path);
}
void FilteringSourceAccessor::readFile(const CanonPath & path, Sink & sink, std::function<void(uint64_t)> sizeCallback)
{
checkAccess(path);
return next->readFile(prefix / path, sink, sizeCallback);
}
bool FilteringSourceAccessor::pathExists(const CanonPath & path)
{
return isAllowed(path) && next->pathExists(prefix / path);
}
std::optional<SourceAccessor::Stat> FilteringSourceAccessor::maybeLstat(const CanonPath & path)
{
return isAllowed(path) ? next->maybeLstat(prefix / path) : std::nullopt;
}
SourceAccessor::Stat FilteringSourceAccessor::lstat(const CanonPath & path)
{
checkAccess(path);
return next->maybeLstat(prefix / path);
return next->lstat(prefix / path);
}
SourceAccessor::DirEntries FilteringSourceAccessor::readDirectory(const CanonPath & path)
@ -49,6 +60,13 @@ std::string FilteringSourceAccessor::showPath(const CanonPath & path)
return displayPrefix + next->showPath(prefix / path) + displaySuffix;
}
std::pair<CanonPath, std::optional<std::string>> FilteringSourceAccessor::getFingerprint(const CanonPath & path)
{
if (fingerprint)
return {path, fingerprint};
return next->getFingerprint(prefix / path);
}
void FilteringSourceAccessor::checkAccess(const CanonPath & path)
{
if (!isAllowed(path))
@ -59,12 +77,12 @@ void FilteringSourceAccessor::checkAccess(const CanonPath & path)
struct AllowListSourceAccessorImpl : AllowListSourceAccessor
{
std::set<CanonPath> allowedPrefixes;
boost::unordered_flat_set<CanonPath, std::hash<CanonPath>> allowedPaths;
boost::unordered_flat_set<CanonPath> allowedPaths;
AllowListSourceAccessorImpl(
ref<SourceAccessor> next,
std::set<CanonPath> && allowedPrefixes,
boost::unordered_flat_set<CanonPath, std::hash<CanonPath>> && allowedPaths,
boost::unordered_flat_set<CanonPath> && allowedPaths,
MakeNotAllowedError && makeNotAllowedError)
: AllowListSourceAccessor(SourcePath(next), std::move(makeNotAllowedError))
, allowedPrefixes(std::move(allowedPrefixes))
@ -86,7 +104,7 @@ struct AllowListSourceAccessorImpl : AllowListSourceAccessor
ref<AllowListSourceAccessor> AllowListSourceAccessor::create(
ref<SourceAccessor> next,
std::set<CanonPath> && allowedPrefixes,
boost::unordered_flat_set<CanonPath, std::hash<CanonPath>> && allowedPaths,
boost::unordered_flat_set<CanonPath> && allowedPaths,
MakeNotAllowedError && makeNotAllowedError)
{
return make_ref<AllowListSourceAccessorImpl>(

View file

@ -817,7 +817,7 @@ struct GitSourceAccessor : SourceAccessor
return toHash(*git_tree_entry_id(entry));
}
boost::unordered_flat_map<CanonPath, TreeEntry, std::hash<CanonPath>> lookupCache;
boost::unordered_flat_map<CanonPath, TreeEntry> lookupCache;
/* Recursively look up 'path' relative to the root. */
git_tree_entry * lookup(State & state, const CanonPath & path)
@ -1254,7 +1254,7 @@ GitRepoImpl::getAccessor(const WorkdirInfo & wd, bool exportIgnore, MakeNotAllow
makeFSSourceAccessor(path),
std::set<CanonPath>{wd.files},
// Always allow access to the root, but not its children.
boost::unordered_flat_set<CanonPath, std::hash<CanonPath>>{CanonPath::root},
boost::unordered_flat_set<CanonPath>{CanonPath::root},
std::move(makeNotAllowedError))
.cast<SourceAccessor>();
if (exportIgnore)

View file

@ -15,6 +15,7 @@
#include "nix/fetchers/fetch-settings.hh"
#include "nix/util/json-utils.hh"
#include "nix/util/archive.hh"
#include "nix/util/mounted-source-accessor.hh"
#include <regex>
#include <string.h>
@ -892,8 +893,7 @@ struct GitInputScheme : InputScheme
return makeFingerprint(*rev);
else {
auto repoInfo = getRepoInfo(input);
if (auto repoPath = repoInfo.getPath();
repoPath && repoInfo.workdirInfo.headRev && repoInfo.workdirInfo.submodules.empty()) {
if (auto repoPath = repoInfo.getPath(); repoPath && repoInfo.workdirInfo.submodules.empty()) {
/* Calculate a fingerprint that takes into account the
deleted and modified/added files. */
HashSink hashSink{HashAlgorithm::SHA512};
@ -906,7 +906,7 @@ struct GitInputScheme : InputScheme
writeString("deleted:", hashSink);
writeString(file.abs(), hashSink);
}
return makeFingerprint(*repoInfo.workdirInfo.headRev)
return makeFingerprint(repoInfo.workdirInfo.headRev.value_or(nullRev))
+ ";d=" + hashSink.finish().hash.to_string(HashFormat::Base16, false);
}
return std::nullopt;

View file

@ -398,8 +398,9 @@ struct GitHubInputScheme : GitArchiveInputScheme
Headers headers = makeHeadersWithAuthTokens(*input.settings, host, input);
auto downloadResult = downloadFile(store, *input.settings, url, "source", headers);
auto json = nlohmann::json::parse(
readFile(store->toRealPath(downloadFile(store, *input.settings, url, "source", headers).storePath)));
store->requireStoreObjectAccessor(downloadResult.storePath)->readFile(CanonPath::root));
return RefInfo{
.rev = Hash::parseAny(std::string{json["sha"]}, HashAlgorithm::SHA1),
@ -472,8 +473,9 @@ struct GitLabInputScheme : GitArchiveInputScheme
Headers headers = makeHeadersWithAuthTokens(*input.settings, host, input);
auto downloadResult = downloadFile(store, *input.settings, url, "source", headers);
auto json = nlohmann::json::parse(
readFile(store->toRealPath(downloadFile(store, *input.settings, url, "source", headers).storePath)));
store->requireStoreObjectAccessor(downloadResult.storePath)->readFile(CanonPath::root));
if (json.is_array() && json.size() >= 1 && json[0]["id"] != nullptr) {
return RefInfo{.rev = Hash::parseAny(std::string(json[0]["id"]), HashAlgorithm::SHA1)};
@ -548,13 +550,10 @@ struct SourceHutInputScheme : GitArchiveInputScheme
std::string refUri;
if (ref == "HEAD") {
auto file = store->toRealPath(
downloadFile(store, *input.settings, fmt("%s/HEAD", base_url), "source", headers).storePath);
std::ifstream is(file);
std::string line;
getline(is, line);
auto downloadFileResult = downloadFile(store, *input.settings, fmt("%s/HEAD", base_url), "source", headers);
auto contents = store->requireStoreObjectAccessor(downloadFileResult.storePath)->readFile(CanonPath::root);
auto remoteLine = git::parseLsRemoteLine(line);
auto remoteLine = git::parseLsRemoteLine(getLine(contents).first);
if (!remoteLine) {
throw BadURL("in '%d', couldn't resolve HEAD ref '%d'", input.to_string(), ref);
}
@ -564,9 +563,10 @@ struct SourceHutInputScheme : GitArchiveInputScheme
}
std::regex refRegex(refUri);
auto file = store->toRealPath(
downloadFile(store, *input.settings, fmt("%s/info/refs", base_url), "source", headers).storePath);
std::ifstream is(file);
auto downloadFileResult =
downloadFile(store, *input.settings, fmt("%s/info/refs", base_url), "source", headers);
auto contents = store->requireStoreObjectAccessor(downloadFileResult.storePath)->readFile(CanonPath::root);
std::istringstream is(contents);
std::string line;
std::optional<std::string> id;

View file

@ -36,8 +36,12 @@ struct FilteringSourceAccessor : SourceAccessor
std::string readFile(const CanonPath & path) override;
void readFile(const CanonPath & path, Sink & sink, std::function<void(uint64_t)> sizeCallback) override;
bool pathExists(const CanonPath & path) override;
Stat lstat(const CanonPath & path) override;
std::optional<Stat> maybeLstat(const CanonPath & path) override;
DirEntries readDirectory(const CanonPath & path) override;
@ -46,6 +50,8 @@ struct FilteringSourceAccessor : SourceAccessor
std::string showPath(const CanonPath & path) override;
std::pair<CanonPath, std::optional<std::string>> getFingerprint(const CanonPath & path) override;
/**
* Call `makeNotAllowedError` to throw a `RestrictedPathError`
* exception if `isAllowed()` returns `false` for `path`.
@ -72,7 +78,7 @@ struct AllowListSourceAccessor : public FilteringSourceAccessor
static ref<AllowListSourceAccessor> create(
ref<SourceAccessor> next,
std::set<CanonPath> && allowedPrefixes,
boost::unordered_flat_set<CanonPath, std::hash<CanonPath>> && allowedPaths,
boost::unordered_flat_set<CanonPath> && allowedPaths,
MakeNotAllowedError && makeNotAllowedError);
using FilteringSourceAccessor::FilteringSourceAccessor;

View file

@ -11,6 +11,5 @@ headers = files(
'git-utils.hh',
'input-cache.hh',
'registry.hh',
'store-path-accessor.hh',
'tarball.hh',
)

View file

@ -1,14 +0,0 @@
#pragma once
#include "nix/util/source-path.hh"
namespace nix {
class StorePath;
class Store;
ref<SourceAccessor> makeStorePathAccessor(ref<Store> store, const StorePath & storePath);
SourcePath getUnfilteredRootPath(CanonPath path);
} // namespace nix

View file

@ -6,7 +6,6 @@
#include "nix/util/tarfile.hh"
#include "nix/store/store-api.hh"
#include "nix/util/url-parts.hh"
#include "nix/fetchers/store-path-accessor.hh"
#include "nix/fetchers/fetch-settings.hh"
#include <sys/time.h>
@ -330,8 +329,7 @@ struct MercurialInputScheme : InputScheme
Input input(_input);
auto storePath = fetchToStore(store, input);
auto accessor = makeStorePathAccessor(store, storePath);
auto accessor = store->requireStoreObjectAccessor(storePath);
accessor->setPathDisplay("«" + input.to_string() + "»");

View file

@ -32,7 +32,6 @@ libgit2 = dependency('libgit2', version : '>= 1.9')
deps_private += libgit2
subdir('nix-meson-build-support/common')
subdir('nix-meson-build-support/asan-options')
sources = files(
'attrs.cc',
@ -50,7 +49,6 @@ sources = files(
'mercurial.cc',
'path.cc',
'registry.cc',
'store-path-accessor.cc',
'tarball.cc',
)

View file

@ -1,7 +1,6 @@
#include "nix/fetchers/fetchers.hh"
#include "nix/store/store-api.hh"
#include "nix/util/archive.hh"
#include "nix/fetchers/store-path-accessor.hh"
#include "nix/fetchers/cache.hh"
#include "nix/fetchers/fetch-to-store.hh"
#include "nix/fetchers/fetch-settings.hh"
@ -124,8 +123,6 @@ struct PathInputScheme : InputScheme
auto absPath = getAbsPath(input);
Activity act(*logger, lvlTalkative, actUnknown, fmt("copying %s to the store", absPath));
// FIXME: check whether access to 'path' is allowed.
auto storePath = store->maybeParseStorePath(absPath.string());
@ -134,43 +131,33 @@ struct PathInputScheme : InputScheme
time_t mtime = 0;
if (!storePath || storePath->name() != "source" || !store->isValidPath(*storePath)) {
Activity act(*logger, lvlTalkative, actUnknown, fmt("copying %s to the store", absPath));
// FIXME: try to substitute storePath.
auto src = sinkToSource(
[&](Sink & sink) { mtime = dumpPathAndGetMtime(absPath.string(), sink, defaultPathFilter); });
storePath = store->addToStoreFromDump(*src, "source");
}
// To avoid copying the path again to the /nix/store, we need to add a cache entry.
ContentAddressMethod method = ContentAddressMethod::Raw::NixArchive;
auto fp = getFingerprint(store, input);
if (fp) {
auto cacheKey = makeFetchToStoreCacheKey(input.getName(), *fp, method, "/");
input.settings->getCache()->upsert(cacheKey, *store, {}, *storePath);
}
auto accessor = store->requireStoreObjectAccessor(*storePath);
// To prevent `fetchToStore()` copying the path again to Nix
// store, pre-create an entry in the fetcher cache.
auto info = store->queryPathInfo(*storePath);
accessor->fingerprint =
fmt("path:%s", store->queryPathInfo(*storePath)->narHash.to_string(HashFormat::SRI, true));
input.settings->getCache()->upsert(
makeFetchToStoreCacheKey(
input.getName(), *accessor->fingerprint, ContentAddressMethod::Raw::NixArchive, "/"),
*store,
{},
*storePath);
/* Trust the lastModified value supplied by the user, if
any. It's not a "secure" attribute so we don't care. */
if (!input.getLastModified())
input.attrs.insert_or_assign("lastModified", uint64_t(mtime));
return {makeStorePathAccessor(store, *storePath), std::move(input)};
}
std::optional<std::string> getFingerprint(ref<Store> store, const Input & input) const override
{
if (isRelative(input))
return std::nullopt;
/* If this path is in the Nix store, use the hash of the
store object and the subpath. */
auto path = getAbsPath(input);
try {
auto [storePath, subPath] = store->toStorePath(path.string());
auto info = store->queryPathInfo(storePath);
return fmt("path:%s:%s", info->narHash.to_string(HashFormat::Base16, false), subPath);
} catch (Error &) {
return std::nullopt;
}
return {accessor, std::move(input)};
}
std::optional<ExperimentalFeature> experimentalFeature() const override

View file

@ -1,11 +0,0 @@
#include "nix/fetchers/store-path-accessor.hh"
#include "nix/store/store-api.hh"
namespace nix {
ref<SourceAccessor> makeStorePathAccessor(ref<Store> store, const StorePath & storePath)
{
return projectSubdirSourceAccessor(store->getFSAccessor(), storePath.to_string());
}
} // namespace nix

Some files were not shown because too many files have changed in this diff Show more