1
1
Fork 0
mirror of https://github.com/NixOS/nix.git synced 2025-11-24 19:29:36 +01:00

Merge commit '8388d2c7c6' into progress-bar

This commit is contained in:
John Ericson 2023-03-11 16:59:40 -05:00
commit e73dcf2cdd
95 changed files with 1215 additions and 717 deletions

26
.github/workflows/backport.yml vendored Normal file
View file

@ -0,0 +1,26 @@
name: Backport
on:
pull_request_target:
types: [closed, labeled]
jobs:
backport:
name: Backport Pull Request
if: github.repository_owner == 'NixOS' && github.event.pull_request.merged == true && (github.event_name != 'labeled' || startsWith('backport', github.event.label.name))
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
with:
ref: ${{ github.event.pull_request.head.sha }}
# required to find all branches
fetch-depth: 0
- name: Create backport PRs
# should be kept in sync with `version`
uses: zeebe-io/backport-action@v0.0.7
with:
# Config README: https://github.com/zeebe-io/backport-action#backport-action
github_token: ${{ secrets.GITHUB_TOKEN }}
github_workspace: ${{ github.workspace }}
pull_description: |-
Bot-based backport to `${target_branch}`, triggered by a label in #${pull_number}.
# should be kept in sync with `uses`
version: v0.0.5

View file

@ -14,10 +14,10 @@ jobs:
runs-on: ${{ matrix.os }}
timeout-minutes: 60
steps:
- uses: actions/checkout@v2.3.5
- uses: actions/checkout@v2.4.0
with:
fetch-depth: 0
- uses: cachix/install-nix-action@v14.1
- uses: cachix/install-nix-action@v16
- run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV
- uses: cachix/cachix-action@v10
if: needs.check_cachix.outputs.secret == 'true'
@ -46,11 +46,11 @@ jobs:
outputs:
installerURL: ${{ steps.prepare-installer.outputs.installerURL }}
steps:
- uses: actions/checkout@v2.3.5
- uses: actions/checkout@v2.4.0
with:
fetch-depth: 0
- run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV
- uses: cachix/install-nix-action@v14.1
- uses: cachix/install-nix-action@v16
- uses: cachix/cachix-action@v10
with:
name: '${{ env.CACHIX_NAME }}'
@ -67,9 +67,9 @@ jobs:
os: [ubuntu-latest, macos-latest]
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v2.3.5
- uses: actions/checkout@v2.4.0
- run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV
- uses: cachix/install-nix-action@v14.1
- uses: cachix/install-nix-action@v16
with:
install_url: '${{needs.installer.outputs.installerURL}}'
install_options: "--tarball-url-prefix https://${{ env.CACHIX_NAME }}.cachix.org/serve"

2
.gitignore vendored
View file

@ -26,8 +26,6 @@ perl/Makefile.config
# /scripts/
/scripts/nix-profile.sh
/scripts/nix-reduce-build
/scripts/nix-http-export.cgi
/scripts/nix-profile-daemon.sh
# /src/libexpr/

View file

@ -1,8 +1,8 @@
diff --git a/pthread_stop_world.c b/pthread_stop_world.c
index 1cee6a0b..46c3acd9 100644
index 4b2c429..1fb4c52 100644
--- a/pthread_stop_world.c
+++ b/pthread_stop_world.c
@@ -674,6 +674,8 @@ GC_INNER void GC_push_all_stacks(void)
@@ -673,6 +673,8 @@ GC_INNER void GC_push_all_stacks(void)
struct GC_traced_stack_sect_s *traced_stack_sect;
pthread_t self = pthread_self();
word total_size = 0;
@ -11,7 +11,7 @@ index 1cee6a0b..46c3acd9 100644
if (!EXPECT(GC_thr_initialized, TRUE))
GC_thr_init();
@@ -723,6 +725,28 @@ GC_INNER void GC_push_all_stacks(void)
@@ -722,6 +724,31 @@ GC_INNER void GC_push_all_stacks(void)
hi = p->altstack + p->altstack_size;
/* FIXME: Need to scan the normal stack too, but how ? */
/* FIXME: Assume stack grows down */
@ -22,6 +22,9 @@ index 1cee6a0b..46c3acd9 100644
+ if (pthread_attr_getstacksize(&pattr, &stack_limit)) {
+ ABORT("GC_push_all_stacks: pthread_attr_getstacksize failed!");
+ }
+ if (pthread_attr_destroy(&pattr)) {
+ ABORT("GC_push_all_stacks: pthread_attr_destroy failed!");
+ }
+ // When a thread goes into a coroutine, we lose its original sp until
+ // control flow returns to the thread.
+ // While in the coroutine, the sp points outside the thread stack,

View file

@ -9,6 +9,7 @@
- [Prerequisites](installation/prerequisites-source.md)
- [Obtaining a Source Distribution](installation/obtaining-source.md)
- [Building Nix from Source](installation/building-source.md)
- [Using Nix within Docker](installation/installing-docker.md)
- [Security](installation/nix-security.md)
- [Single-User Mode](installation/single-user.md)
- [Multi-User Mode](installation/multi-user.md)

View file

@ -16,8 +16,9 @@ By default Nix reads settings from the following places:
will be loaded in reverse order.
Otherwise it will look for `nix/nix.conf` files in `XDG_CONFIG_DIRS`
and `XDG_CONFIG_HOME`. If these are unset, it will look in
`$HOME/.config/nix.conf`.
and `XDG_CONFIG_HOME`. If unset, `XDG_CONFIG_DIRS` defaults to
`/etc/xdg`, and `XDG_CONFIG_HOME` defaults to `$HOME/.config`
as per [XDG Base Directory Specification](https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html).
- If `NIX_CONFIG` is set, its contents is treated as the contents of
a configuration file.

View file

@ -238,7 +238,16 @@ a number of possible ways:
## Examples
To install a specific version of `gcc` from the active Nix expression:
To install a package using a specific attribute path from the active Nix expression:
```console
$ nix-env -iA gcc40mips
installing `gcc-4.0.2'
$ nix-env -iA xorg.xorgserver
installing `xorg-server-1.2.0'
```
To install a specific version of `gcc` using the derivation name:
```console
$ nix-env --install gcc-3.3.2
@ -246,6 +255,9 @@ installing `gcc-3.3.2'
uninstalling `gcc-3.1'
```
Using attribute path for selecting a package is preferred,
as it is much faster and there will not be multiple matches.
Note the previously installed version is removed, since
`--preserve-installed` was not specified.
@ -256,13 +268,6 @@ $ nix-env --install gcc
installing `gcc-3.3.2'
```
To install using a specific attribute:
```console
$ nix-env -i -A gcc40mips
$ nix-env -i -A xorg.xorgserver
```
To install all derivations in the Nix expression `foo.nix`:
```console
@ -374,22 +379,29 @@ For the other flags, see `--install`.
## Examples
```console
$ nix-env --upgrade gcc
$ nix-env --upgrade -A nixpkgs.gcc
upgrading `gcc-3.3.1' to `gcc-3.4'
```
When there are no updates available, nothing will happen:
```console
$ nix-env -u gcc-3.3.2 --always (switch to a specific version)
$ nix-env --upgrade -A nixpkgs.pan
```
Using `-A` is preferred when possible, as it is faster and unambiguous but
it is also possible to upgrade to a specific version by matching the derivation name:
```console
$ nix-env -u gcc-3.3.2 --always
upgrading `gcc-3.4' to `gcc-3.3.2'
```
```console
$ nix-env --upgrade pan
(no upgrades available, so nothing happens)
```
To try to upgrade everything
(matching packages based on the part of the derivation name without version):
```console
$ nix-env -u (try to upgrade everything)
$ nix-env -u
upgrading `hello-2.1.2' to `hello-2.1.3'
upgrading `mozilla-1.2' to `mozilla-1.4'
```
@ -401,7 +413,7 @@ of a derivation `x` by looking at their respective `name` attributes.
The names (e.g., `gcc-3.3.1` are split into two parts: the package name
(`gcc`), and the version (`3.3.1`). The version part starts after the
first dash not followed by a letter. `x` is considered an upgrade of `y`
if their package names match, and the version of `y` is higher that that
if their package names match, and the version of `y` is higher than that
of `x`.
The versions are compared by splitting them into contiguous components

View file

@ -125,7 +125,7 @@ Special exit codes:
- `104`\
Not deterministic, the build succeeded in check mode but the
resulting output is not binary reproducable.
resulting output is not binary reproducible.
With the `--keep-going` flag it's possible for multiple failures to
occur, in this case the 1xx status codes are or combined using binary

View file

@ -162,11 +162,11 @@ Most Nix commands accept the following command-line options:
}: ...
```
So if you call this Nix expression (e.g., when you do `nix-env -i
So if you call this Nix expression (e.g., when you do `nix-env -iA
pkgname`), the function will be called automatically using the
value [`builtins.currentSystem`](../expressions/builtins.md) for
the `system` argument. You can override this using `--arg`, e.g.,
`nix-env -i pkgname --arg system \"i686-freebsd\"`. (Note that
`nix-env -iA pkgname --arg system \"i686-freebsd\"`. (Note that
since the argument is a Nix string literal, you have to escape the
quotes.)

View file

@ -3,7 +3,7 @@
## Goals
Purpose of this document is to provide a clear direction to **help design
delightful command line** experience. This document contain guidelines to
delightful command line** experience. This document contains guidelines to
follow to ensure a consistent and approachable user experience.
## Overview
@ -115,7 +115,7 @@ The rules are:
- Help is shown by using `--help` or `help` command (eg `nix` `--``help` or
`nix help`).
- For non-COMMANDs (eg. `nix` `--``help` and `nix store` `--``help`) we **show
- For non-COMMANDs (eg. `nix` `--``help` and `nix store` `--``help`) we **show
a summary** of most common use cases. Summary is presented on the STDOUT
without any use of PAGER.
- For COMMANDs (eg. `nix init` `--``help` or `nix help init`) we display the
@ -176,7 +176,7 @@ $ nix init --template=template#pyton
------------------------------------------------------------------------
Initializing Nix project at `/path/to/here`.
Select a template for you new project:
|> template#pyton
|> template#python
template#python-pip
template#python-poetry
```
@ -230,8 +230,8 @@ Now **Learn** part of the output is where you educate users. You should only
show it when you know that a build will take some time and not annoy users of
the builds that take only few seconds.
Every feature like this should go though a intensive review and testing to
collect as much a feedback as possible and to fine tune every little detail. If
Every feature like this should go through an intensive review and testing to
collect as much feedback as possible and to fine tune every little detail. If
done right this can be an awesome features beginners and advance users will
love, but if not done perfectly it will annoy users and leave bad impression.
@ -240,7 +240,7 @@ love, but if not done perfectly it will annoy users and leave bad impression.
Input to a command is provided via `ARGUMENTS` and `OPTIONS`.
`ARGUMENTS` represent a required input for a function. When choosing to use
`ARGUMENT` over function please be aware of the downsides that come with it:
`ARGUMENTS` over `OPTIONS` please be aware of the downsides that come with it:
- User will need to remember the order of `ARGUMENTS`. This is not a problem if
there is only one `ARGUMENT`.
@ -253,7 +253,7 @@ developer consider the downsides and choose wisely.
## Naming the `OPTIONS`
Then only naming convention - apart from the ones mentioned in Naming the
The only naming convention - apart from the ones mentioned in Naming the
`COMMANDS` section is how flags are named.
Flags are a type of `OPTION` that represent an option that can be turned ON of
@ -271,12 +271,12 @@ to improve the discoverability of possible input. A new user will most likely
not know which `ARGUMENTS` and `OPTIONS` are required or which values are
possible for those options.
In cases, the user might not provide the input or they provide wrong input,
rather then show the error, prompt a user with an option to find and select
In case the user does not provide the input or they provide wrong input,
rather than show the error, prompt a user with an option to find and select
correct input (see examples).
Prompting is of course not required when TTY is not attached to STDIN. This
would mean that scripts wont need to handle prompt, but rather handle errors.
would mean that scripts won't need to handle prompt, but rather handle errors.
A place to use prompt and provide user with interactive select
@ -300,7 +300,7 @@ going to happen.
```shell
$ nix build --option substitutors https://cache.example.org
------------------------------------------------------------------------
Warning! A security related question need to be answered.
Warning! A security related question needs to be answered.
------------------------------------------------------------------------
The following substitutors will be used to in `my-project`:
- https://cache.example.org
@ -311,14 +311,14 @@ $ nix build --option substitutors https://cache.example.org
# Output
Terminal output can be quite limiting in many ways. Which should forces us to
Terminal output can be quite limiting in many ways. Which should force us to
think about the experience even more. As with every design the output is a
compromise between being terse and being verbose, between showing help to
beginners and annoying advance users. For this it is important that we know
what are the priorities.
Nix command line should be first and foremost written with beginners in mind.
But users wont stay beginners for long and what was once useful might quickly
But users won't stay beginners for long and what was once useful might quickly
become annoying. There is no golden rule that we can give in this guideline
that would make it easier how to draw a line and find best compromise.
@ -342,7 +342,7 @@ also allowing them to redirect content to a file. For example:
```shell
$ nix build > build.txt
------------------------------------------------------------------------
Error! Atrribute `bin` missing at (1:94) from string.
Error! Attribute `bin` missing at (1:94) from string.
------------------------------------------------------------------------
1| with import <nixpkgs> { }; (pkgs.runCommandCC or pkgs.runCommand) "shell" { buildInputs = [ (surge.bin) ]; } ""
@ -508,7 +508,7 @@ can, with a few key strokes, be changed into and advance introspection tool.
### Progress
For longer running commands we should provide and overview of the progress.
For longer running commands we should provide and overview the progress.
This is shown best in `nix build` example:
```shell
@ -553,7 +553,7 @@ going to happen.
```shell
$ nix build --option substitutors https://cache.example.org
------------------------------------------------------------------------
Warning! A security related question need to be answered.
Warning! A security related question needs to be answered.
------------------------------------------------------------------------
The following substitutors will be used to in `my-project`:
- https://cache.example.org

View file

@ -237,7 +237,7 @@ Derivations can declare some infrequently used optional attributes.
- `preferLocalBuild`\
If this attribute is set to `true` and [distributed building is
enabled](../advanced-topics/distributed-builds.md), then, if
possible, the derivaton will be built locally instead of forwarded
possible, the derivation will be built locally instead of forwarded
to a remote machine. This is appropriate for trivial builders
where the cost of doing a download or remote build would exceed
the cost of building locally.

View file

@ -12,5 +12,5 @@ For instance, `derivation` is also available as `builtins.derivation`.
<dl>
<dt><code>derivation <var>attrs</var></code>;
<code>builtins.derivation <var>attrs</var></code></dt>
<dd><p><var>derivation</var> in described in
<dd><p><var>derivation</var> is described in
<a href="derivations.md">its own section</a>.</p></dd>

View file

@ -26,7 +26,7 @@ elements (referenced from the figure by number):
called with three arguments: `stdenv`, `fetchurl`, and `perl`. They
are needed to build Hello, but we don't know how to build them here;
that's why they are function arguments. `stdenv` is a package that
is used by almost all Nix Packages packages; it provides a
is used by almost all Nix Packages; it provides a
“standard” environment consisting of the things you would expect
in a basic Unix environment: a C/C++ compiler (GCC, to be precise),
the Bash shell, fundamental Unix tools such as `cp`, `grep`, `tar`,

View file

@ -17,12 +17,12 @@ order of precedence (from strongest to weakest binding).
| String Concatenation | *string1* `+` *string2* | left | String concatenation. | 7 |
| Not | `!` *e* | none | Boolean negation. | 8 |
| Update | *e1* `//` *e2* | right | Return a set consisting of the attributes in *e1* and *e2* (with the latter taking precedence over the former in case of equally named attributes). | 9 |
| Less Than | *e1* `<` *e2*, | none | Arithmetic comparison. | 10 |
| Less Than or Equal To | *e1* `<=` *e2* | none | Arithmetic comparison. | 10 |
| Greater Than | *e1* `>` *e2* | none | Arithmetic comparison. | 10 |
| Greater Than or Equal To | *e1* `>=` *e2* | none | Arithmetic comparison. | 10 |
| Less Than | *e1* `<` *e2*, | none | Arithmetic/lexicographic comparison. | 10 |
| Less Than or Equal To | *e1* `<=` *e2* | none | Arithmetic/lexicographic comparison. | 10 |
| Greater Than | *e1* `>` *e2* | none | Arithmetic/lexicographic comparison. | 10 |
| Greater Than or Equal To | *e1* `>=` *e2* | none | Arithmetic/lexicographic comparison. | 10 |
| Equality | *e1* `==` *e2* | none | Equality. | 11 |
| Inequality | *e1* `!=` *e2* | none | Inequality. | 11 |
| Logical AND | *e1* `&&` *e2* | left | Logical AND. | 12 |
| Logical OR | *e1* `\|\|` *e2* | left | Logical OR. | 13 |
| Logical Implication | *e1* `->` *e2* | none | Logical implication (equivalent to `!e1 \|\| e2`). | 14 |
| Logical OR | *e1* <code>&#124;&#124;</code> *e2* | left | Logical OR. | 13 |
| Logical Implication | *e1* `->` *e2* | none | Logical implication (equivalent to <code>!e1 &#124;&#124; e2</code>). | 14 |

View file

@ -64,7 +64,7 @@ Nix has the following basic data types:
the start of each line. To be precise, it strips from each line a
number of spaces equal to the minimal indentation of the string as a
whole (disregarding the indentation of empty lines). For instance,
the first and second line are indented two space, while the third
the first and second line are indented two spaces, while the third
line is indented four spaces. Thus, two spaces are stripped from
each line, so the resulting string is

View file

@ -1,6 +1,6 @@
# Building and Testing
You can now try to build Hello. Of course, you could do `nix-env -i
You can now try to build Hello. Of course, you could do `nix-env -f . -iA
hello`, but you may not want to install a possibly broken package just
yet. The best way to test the package is by using the command
`nix-build`, which builds a Nix expression and creates a symlink named

View file

@ -0,0 +1,59 @@
# Using Nix within Docker
To run the latest stable release of Nix with Docker run the following command:
```console
$ docker -ti run nixos/nix
Unable to find image 'nixos/nix:latest' locally
latest: Pulling from nixos/nix
5843afab3874: Pull complete
b52bf13f109c: Pull complete
1e2415612aa3: Pull complete
Digest: sha256:27f6e7f60227e959ee7ece361f75d4844a40e1cc6878b6868fe30140420031ff
Status: Downloaded newer image for nixos/nix:latest
35ca4ada6e96:/# nix --version
nix (Nix) 2.3.12
35ca4ada6e96:/# exit
```
# What is included in Nix' Docker image?
The official Docker image is created using `pkgs.dockerTools.buildLayeredImage`
(and not with `Dockerfile` as it is usual with Docker images). You can still
base your custom Docker image on it as you would do with any other Docker
image.
The Docker image is also not based on any other image and includes minimal set
of runtime dependencies that are required to use Nix:
- pkgs.nix
- pkgs.bashInteractive
- pkgs.coreutils-full
- pkgs.gnutar
- pkgs.gzip
- pkgs.gnugrep
- pkgs.which
- pkgs.curl
- pkgs.less
- pkgs.wget
- pkgs.man
- pkgs.cacert.out
- pkgs.findutils
# Docker image with the latest development version of Nix
To get the latest image that was built by [Hydra](https://hydra.nixos.org) run
the following command:
```console
$ curl -L https://hydra.nixos.org/job/nix/master/dockerImage.x86_64-linux/latest/download/1 | docker load
$ docker run -ti nix:2.5pre20211105
```
You can also build a Docker image from source yourself:
```console
$ nix build ./\#hydraJobs.dockerImage.x86_64-linux
$ docker load -i ./result
$ docker run -ti nix:2.5pre20211105
```

View file

@ -76,7 +76,7 @@ there after an upgrade. This means that you can _roll back_ to the
old version:
```console
$ nix-env --upgrade some-packages
$ nix-env --upgrade -A nixpkgs.some-package
$ nix-env --rollback
```
@ -122,12 +122,12 @@ Nix expressions generally describe how to build a package from
source, so an installation action like
```console
$ nix-env --install firefox
$ nix-env --install -A nixpkgs.firefox
```
_could_ cause quite a bit of build activity, as not only Firefox but
also all its dependencies (all the way up to the C library and the
compiler) would have to built, at least if they are not already in the
compiler) would have to be built, at least if they are not already in the
Nix store. This is a _source deployment model_. For most users,
building from source is not very pleasant as it takes far too long.
However, Nix can automatically skip building from source and instead

View file

@ -24,7 +24,7 @@ collection; you could write your own Nix expressions based on Nixpkgs,
or completely new ones.)
You can manually download the latest version of Nixpkgs from
<http://nixos.org/nixpkgs/download.html>. However, its much more
<https://github.com/NixOS/nixpkgs>. However, its much more
convenient to use the Nixpkgs [*channel*](channels.md), since it makes
it easy to stay up to date with new versions of Nixpkgs. Nixpkgs is
automatically added to your list of “subscribed” channels when you
@ -47,41 +47,45 @@ $ nix-channel --update
You can view the set of available packages in Nixpkgs:
```console
$ nix-env -qa
aterm-2.2
bash-3.0
binutils-2.15
bison-1.875d
blackdown-1.4.2
bzip2-1.0.2
$ nix-env -qaP
nixpkgs.aterm aterm-2.2
nixpkgs.bash bash-3.0
nixpkgs.binutils binutils-2.15
nixpkgs.bison bison-1.875d
nixpkgs.blackdown blackdown-1.4.2
nixpkgs.bzip2 bzip2-1.0.2
```
The flag `-q` specifies a query operation, and `-a` means that you want
The flag `-q` specifies a query operation, `-a` means that you want
to show the “available” (i.e., installable) packages, as opposed to the
installed packages. If you downloaded Nixpkgs yourself, or if you
checked it out from GitHub, then you need to pass the path to your
Nixpkgs tree using the `-f` flag:
installed packages, and `-P` prints the attribute paths that can be used
to unambiguously select a package for installation (listed in the first column).
If you downloaded Nixpkgs yourself, or if you checked it out from GitHub,
then you need to pass the path to your Nixpkgs tree using the `-f` flag:
```console
$ nix-env -qaf /path/to/nixpkgs
$ nix-env -qaPf /path/to/nixpkgs
aterm aterm-2.2
bash bash-3.0
```
where */path/to/nixpkgs* is where youve unpacked or checked out
Nixpkgs.
You can select specific packages by name:
You can filter the packages by name:
```console
$ nix-env -qa firefox
firefox-34.0.5
firefox-with-plugins-34.0.5
$ nix-env -qaP firefox
nixpkgs.firefox-esr firefox-91.3.0esr
nixpkgs.firefox firefox-94.0.1
```
and using regular expressions:
```console
$ nix-env -qa 'firefox.*'
$ nix-env -qaP 'firefox.*'
```
It is also possible to see the *status* of available packages, i.e.,
@ -89,11 +93,11 @@ whether they are installed into the user environment and/or present in
the system:
```console
$ nix-env -qas
$ nix-env -qaPs
-PS bash-3.0
--S binutils-2.15
IPS bison-1.875d
-PS nixpkgs.bash bash-3.0
--S nixpkgs.binutils binutils-2.15
IPS nixpkgs.bison bison-1.875d
```
@ -106,13 +110,13 @@ which is Nixs mechanism for doing binary deployment. It just means that
Nix knows that it can fetch a pre-built package from somewhere
(typically a network server) instead of building it locally.
You can install a package using `nix-env -i`. For instance,
You can install a package using `nix-env -iA`. For instance,
```console
$ nix-env -i subversion
$ nix-env -iA nixpkgs.subversion
```
will install the package called `subversion` (which is, of course, the
will install the package called `subversion` from `nixpkgs` channel (which is, of course, the
[Subversion version management system](http://subversion.tigris.org/)).
> **Note**
@ -122,7 +126,7 @@ will install the package called `subversion` (which is, of course, the
> binary cache <https://cache.nixos.org>; it contains binaries for most
> packages in Nixpkgs. Only if no binary is available in the binary
> cache, Nix will build the package from source. So if `nix-env
> -i subversion` results in Nix building stuff from source, then either
> -iA nixpkgs.subversion` results in Nix building stuff from source, then either
> the package is not built for your platform by the Nixpkgs build
> servers, or your version of Nixpkgs is too old or too new. For
> instance, if you have a very recent checkout of Nixpkgs, then the
@ -133,7 +137,10 @@ will install the package called `subversion` (which is, of course, the
> using a Git checkout of the Nixpkgs tree), you will get binaries for
> most packages.
Naturally, packages can also be uninstalled:
Naturally, packages can also be uninstalled. Unlike when installing, you will
need to use the derivation name (though the version part can be omitted),
instead of the attribute path, as `nix-env` does not record which attribute
was used for installing:
```console
$ nix-env -e subversion
@ -143,7 +150,7 @@ Upgrading to a new version is just as easy. If you have a new release of
Nix Packages, you can do:
```console
$ nix-env -u subversion
$ nix-env -uA nixpkgs.subversion
```
This will *only* upgrade Subversion if there is a “newer” version in the

View file

@ -9,7 +9,7 @@ The daemon that handles binary cache requests via HTTP, `nix-serve`, is
not part of the Nix distribution, but you can install it from Nixpkgs:
```console
$ nix-env -i nix-serve
$ nix-env -iA nixpkgs.nix-serve
```
You can then start the server, listening for HTTP connections on
@ -35,7 +35,7 @@ On the client side, you can tell Nix to use your binary cache using
`--option extra-binary-caches`, e.g.:
```console
$ nix-env -i firefox --option extra-binary-caches http://avalon:8080/
$ nix-env -iA nixpkgs.firefox --option extra-binary-caches http://avalon:8080/
```
The option `extra-binary-caches` tells Nix to use this binary cache in

View file

@ -44,7 +44,7 @@ collector as follows:
$ nix-store --gc
```
The behaviour of the gargage collector is affected by the
The behaviour of the garbage collector is affected by the
`keep-derivations` (default: true) and `keep-outputs` (default: false)
options in the Nix configuration file. The defaults will ensure that all
derivations that are build-time dependencies of garbage collector roots

View file

@ -39,7 +39,7 @@ just Subversion 1.1.2 (arrows in the figure indicate symlinks). This
would be what we would obtain if we had done
```console
$ nix-env -i subversion
$ nix-env -iA nixpkgs.subversion
```
on a set of Nix expressions that contained Subversion 1.1.2.
@ -54,7 +54,7 @@ environment is generated based on the current one. For instance,
generation 43 was created from generation 42 when we did
```console
$ nix-env -i subversion firefox
$ nix-env -iA nixpkgs.subversion nixpkgs.firefox
```
on a set of Nix expressions that contained Firefox and a new version of
@ -127,7 +127,7 @@ All `nix-env` operations work on the profile pointed to by
(abbreviation `-p`):
```console
$ nix-env -p /nix/var/nix/profiles/other-profile -i subversion
$ nix-env -p /nix/var/nix/profiles/other-profile -iA nixpkgs.subversion
```
This will *not* change the `~/.nix-profile` symlink.

View file

@ -6,7 +6,7 @@ automatically fetching any store paths in Firefoxs closure if they are
available on the server `avalon`:
```console
$ nix-env -i firefox --substituters ssh://alice@avalon
$ nix-env -iA nixpkgs.firefox --substituters ssh://alice@avalon
```
This works similar to the binary cache substituter that Nix usually

View file

@ -19,19 +19,19 @@ to subsequent chapters.
channel:
```console
$ nix-env -qa
docbook-xml-4.3
docbook-xml-4.5
firefox-33.0.2
hello-2.9
libxslt-1.1.28
$ nix-env -qaP
nixpkgs.docbook_xml_dtd_43 docbook-xml-4.3
nixpkgs.docbook_xml_dtd_45 docbook-xml-4.5
nixpkgs.firefox firefox-33.0.2
nixpkgs.hello hello-2.9
nixpkgs.libxslt libxslt-1.1.28
```
1. Install some packages from the channel:
```console
$ nix-env -i hello
$ nix-env -iA nixpkgs.hello
```
This should download pre-built packages; it should not build them

View file

@ -3,3 +3,5 @@
* Binary cache stores now have a setting `compression-level`.
* `nix develop` now has a flag `--unpack` to run `unpackPhase`.
* Lists can now be compared lexicographically using the `<` operator.

251
docker.nix Normal file
View file

@ -0,0 +1,251 @@
{ pkgs ? import <nixpkgs> { }
, lib ? pkgs.lib
, name ? "nix"
, tag ? "latest"
, channelName ? "nixpkgs"
, channelURL ? "https://nixos.org/channels/nixpkgs-unstable"
}:
let
defaultPkgs = with pkgs; [
nix
bashInteractive
coreutils-full
gnutar
gzip
gnugrep
which
curl
less
wget
man
cacert.out
findutils
];
users = {
root = {
uid = 0;
shell = "/bin/bash";
home = "/root";
gid = 0;
};
} // lib.listToAttrs (
map
(
n: {
name = "nixbld${toString n}";
value = {
uid = 30000 + n;
gid = 30000;
groups = [ "nixbld" ];
description = "Nix build user ${toString n}";
};
}
)
(lib.lists.range 1 32)
);
groups = {
root.gid = 0;
nixbld.gid = 30000;
};
userToPasswd = (
k:
{ uid
, gid ? 65534
, home ? "/var/empty"
, description ? ""
, shell ? "/bin/false"
, groups ? [ ]
}: "${k}:x:${toString uid}:${toString gid}:${description}:${home}:${shell}"
);
passwdContents = (
lib.concatStringsSep "\n"
(lib.attrValues (lib.mapAttrs userToPasswd users))
);
userToShadow = k: { ... }: "${k}:!:1::::::";
shadowContents = (
lib.concatStringsSep "\n"
(lib.attrValues (lib.mapAttrs userToShadow users))
);
# Map groups to members
# {
# group = [ "user1" "user2" ];
# }
groupMemberMap = (
let
# Create a flat list of user/group mappings
mappings = (
builtins.foldl'
(
acc: user:
let
groups = users.${user}.groups or [ ];
in
acc ++ map
(group: {
inherit user group;
})
groups
)
[ ]
(lib.attrNames users)
);
in
(
builtins.foldl'
(
acc: v: acc // {
${v.group} = acc.${v.group} or [ ] ++ [ v.user ];
}
)
{ }
mappings)
);
groupToGroup = k: { gid }:
let
members = groupMemberMap.${k} or [ ];
in
"${k}:x:${toString gid}:${lib.concatStringsSep "," members}";
groupContents = (
lib.concatStringsSep "\n"
(lib.attrValues (lib.mapAttrs groupToGroup groups))
);
nixConf = {
sandbox = "false";
build-users-group = "nixbld";
trusted-public-keys = "cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=";
};
nixConfContents = (lib.concatStringsSep "\n" (lib.mapAttrsFlatten (n: v: "${n} = ${v}") nixConf)) + "\n";
baseSystem =
let
nixpkgs = pkgs.path;
channel = pkgs.runCommand "channel-nixos" { } ''
mkdir $out
ln -s ${nixpkgs} $out/nixpkgs
echo "[]" > $out/manifest.nix
'';
rootEnv = pkgs.buildPackages.buildEnv {
name = "root-profile-env";
paths = defaultPkgs;
};
profile = pkgs.buildPackages.runCommand "user-environment" { } ''
mkdir $out
cp -a ${rootEnv}/* $out/
cat > $out/manifest.nix <<EOF
[
${lib.concatStringsSep "\n" (builtins.map (drv: let
outputs = drv.outputsToInstall or [ "out" ];
in ''
{
${lib.concatStringsSep "\n" (builtins.map (output: ''
${output} = { outPath = "${lib.getOutput output drv}"; };
'') outputs)}
outputs = [ ${lib.concatStringsSep " " (builtins.map (x: "\"${x}\"") outputs)} ];
name = "${drv.name}";
outPath = "${drv}";
system = "${drv.system}";
type = "derivation";
meta = { };
}
'') defaultPkgs)}
]
EOF
'';
in
pkgs.runCommand "base-system"
{
inherit passwdContents groupContents shadowContents nixConfContents;
passAsFile = [
"passwdContents"
"groupContents"
"shadowContents"
"nixConfContents"
];
allowSubstitutes = false;
preferLocalBuild = true;
} ''
env
set -x
mkdir -p $out/etc
cat $passwdContentsPath > $out/etc/passwd
echo "" >> $out/etc/passwd
cat $groupContentsPath > $out/etc/group
echo "" >> $out/etc/group
cat $shadowContentsPath > $out/etc/shadow
echo "" >> $out/etc/shadow
mkdir -p $out/usr
ln -s /nix/var/nix/profiles/share $out/usr/
mkdir -p $out/nix/var/nix/gcroots
mkdir $out/tmp
mkdir -p $out/etc/nix
cat $nixConfContentsPath > $out/etc/nix/nix.conf
mkdir -p $out/root
mkdir -p $out/nix/var/nix/profiles/per-user/root
ln -s ${profile} $out/nix/var/nix/profiles/default-1-link
ln -s $out/nix/var/nix/profiles/default-1-link $out/nix/var/nix/profiles/default
ln -s /nix/var/nix/profiles/default $out/root/.nix-profile
ln -s ${channel} $out/nix/var/nix/profiles/per-user/root/channels-1-link
ln -s $out/nix/var/nix/profiles/per-user/root/channels-1-link $out/nix/var/nix/profiles/per-user/root/channels
mkdir -p $out/root/.nix-defexpr
ln -s $out/nix/var/nix/profiles/per-user/root/channels $out/root/.nix-defexpr/channels
echo "${channelURL} ${channelName}" > $out/root/.nix-channels
mkdir -p $out/bin $out/usr/bin
ln -s ${pkgs.coreutils}/bin/env $out/usr/bin/env
ln -s ${pkgs.bashInteractive}/bin/bash $out/bin/sh
'';
in
pkgs.dockerTools.buildLayeredImageWithNixDb {
inherit name tag;
contents = [ baseSystem ];
extraCommands = ''
rm -rf nix-support
ln -s /nix/var/nix/profiles nix/var/nix/gcroots/profiles
'';
config = {
Cmd = [ "/root/.nix-profile/bin/bash" ];
Env = [
"USER=root"
"PATH=${lib.concatStringsSep ":" [
"/root/.nix-profile/bin"
"/nix/var/nix/profiles/default/bin"
"/nix/var/nix/profiles/default/sbin"
]}"
"MANPATH=${lib.concatStringsSep ":" [
"/root/.nix-profile/share/man"
"/nix/var/nix/profiles/default/share/man"
]}"
"SSL_CERT_FILE=/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt"
"GIT_SSL_CAINFO=/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt"
"NIX_SSL_CERT_FILE=/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt"
"NIX_PATH=/nix/var/nix/profiles/per-user/root/channels:/root/.nix-defexpr/channels"
];
};
}

View file

@ -91,7 +91,7 @@
libarchive
boost
lowdown-nix
gmock
gtest
]
++ lib.optionals stdenv.isLinux [libseccomp]
++ lib.optional (stdenv.isLinux || stdenv.isDarwin) libsodium
@ -405,6 +405,21 @@
installerScript = installScriptFor [ "x86_64-linux" "i686-linux" "aarch64-linux" "x86_64-darwin" "aarch64-darwin" "armv6l-linux" "armv7l-linux" ];
installerScriptForGHA = installScriptFor [ "x86_64-linux" "x86_64-darwin" "armv6l-linux" "armv7l-linux"];
# docker image with Nix inside
dockerImage = nixpkgs.lib.genAttrs linux64BitSystems (system:
let
pkgs = nixpkgsFor.${system};
image = import ./docker.nix { inherit pkgs; tag = version; };
in pkgs.runCommand "docker-image-tarball-${version}"
{ meta.description = "Docker image with Nix for ${system}";
}
''
mkdir -p $out/nix-support
image=$out/image.tar.gz
ln -s ${image} $image
echo "file binary-dist $image" >> $out/nix-support/hydra-build-products
'');
# Line coverage analysis.
coverage =
with nixpkgsFor.x86_64-linux;
@ -509,6 +524,7 @@
binaryTarball = self.hydraJobs.binaryTarball.${system};
perlBindings = self.hydraJobs.perlBindings.${system};
installTests = self.hydraJobs.installTests.${system};
dockerImage = self.hydraJobs.dockerImage.${system};
});
packages = forAllSystems (system: {

View file

@ -1,6 +1,7 @@
package Nix::Config;
use MIME::Base64;
use Nix::Store;
$version = "@PACKAGE_VERSION@";

View file

@ -742,6 +742,9 @@ setup_volume() {
use_special="${NIX_VOLUME_USE_SPECIAL:-$(create_volume)}"
_sudo "to ensure the Nix volume is not mounted" \
/usr/sbin/diskutil unmount force "$use_special" || true # might not be mounted
use_uuid=${NIX_VOLUME_USE_UUID:-$(volume_uuid_from_special "$use_special")}
setup_fstab "$use_uuid"

View file

@ -387,19 +387,28 @@ EOF
fi
for profile_target in "${PROFILE_TARGETS[@]}"; do
# TODO: I think it would be good to accumulate a list of all
# of the copies so that people don't hit this 2 or 3x in
# a row for different files.
if [ -e "$profile_target$PROFILE_BACKUP_SUFFIX" ]; then
# this backup process first released in Nix 2.1
failure <<EOF
When this script runs, it backs up the current $profile_target to
$profile_target$PROFILE_BACKUP_SUFFIX. This backup file already exists, though.
I back up shell profile/rc scripts before I add Nix to them.
I need to back up $profile_target to $profile_target$PROFILE_BACKUP_SUFFIX,
but the latter already exists.
Please follow these instructions to clean up the old backup file:
Here's how to clean up the old backup file:
1. Copy $profile_target and $profile_target$PROFILE_BACKUP_SUFFIX to another place, just
in case.
1. Back up (copy) $profile_target and $profile_target$PROFILE_BACKUP_SUFFIX
to another location, just in case.
2. Take care to make sure that $profile_target$PROFILE_BACKUP_SUFFIX doesn't look like
it has anything nix-related in it. If it does, something is probably
quite wrong. Please open an issue or get in touch immediately.
2. Ensure $profile_target$PROFILE_BACKUP_SUFFIX does not have anything
Nix-related in it. If it does, something is probably quite
wrong. Please open an issue or get in touch immediately.
3. Once you confirm $profile_target is backed up and
$profile_target$PROFILE_BACKUP_SUFFIX doesn't mention Nix, run:
mv $profile_target$PROFILE_BACKUP_SUFFIX $profile_target
EOF
fi
done
@ -599,7 +608,7 @@ manager. This will happen in a few stages:
1. Make sure your computer doesn't already have Nix. If it does, I
will show you instructions on how to clean up your old install.
2. Show you what we are going to install and where. Then we will ask
2. Show you what I am going to install and where. Then I will ask
if you are ready to continue.
3. Create the system users and groups that the Nix daemon uses to run
@ -614,14 +623,14 @@ manager. This will happen in a few stages:
EOF
if ui_confirm "Would you like to see a more detailed list of what we will do?"; then
if ui_confirm "Would you like to see a more detailed list of what I will do?"; then
cat <<EOF
We will:
I will:
- make sure your computer doesn't already have Nix files
(if it does, I will tell you how to clean them up.)
- create local users (see the list above for the users we'll make)
- create local users (see the list above for the users I'll make)
- create a local group ($NIX_BUILD_GROUP_NAME)
- install Nix in to $NIX_ROOT
- create a configuration file in /etc/nix
@ -656,7 +665,7 @@ run in a headless fashion, like this:
$ curl -L https://nixos.org/nix/install | sh
or maybe in a CI pipeline. Because of that, we're going to skip the
or maybe in a CI pipeline. Because of that, I'm going to skip the
verbose output in the interest of brevity.
If you would like to
@ -670,7 +679,7 @@ EOF
fi
cat <<EOF
This script is going to call sudo a lot. Every time we do, it'll
This script is going to call sudo a lot. Every time I do, it'll
output exactly what it'll do, and why.
Just like this:
@ -682,15 +691,15 @@ EOF
cat <<EOF
This might look scary, but everything can be undone by running just a
few commands. We used to ask you to confirm each time sudo ran, but it
few commands. I used to ask you to confirm each time sudo ran, but it
was too many times. Instead, I'll just ask you this one time:
EOF
if ui_confirm "Can we use sudo?"; then
if ui_confirm "Can I use sudo?"; then
ok "Yay! Thanks! Let's get going!"
else
failure <<EOF
That is okay, but we can't install.
That is okay, but I can't install.
EOF
fi
}
@ -809,10 +818,10 @@ main() {
# can fail faster in this case. Sourcing install-darwin... now runs
# `touch /` to detect Read-only root, but it could update times on
# pre-Catalina macOS if run as root user.
if [ $EUID -eq 0 ]; then
if [ "$EUID" -eq 0 ]; then
failure <<EOF
Please do not run this script with root privileges. We will call sudo
when we need to.
Please do not run this script with root privileges. I will call sudo
when I need to.
EOF
fi

View file

@ -134,7 +134,7 @@ fi
echo "performing a single-user installation of Nix..." >&2
if ! [ -e $dest ]; then
if ! [ -e "$dest" ]; then
cmd="mkdir -m 0755 $dest && chown $USER $dest"
echo "directory $dest does not exist; creating it by running '$cmd' using sudo" >&2
if ! sudo sh -c "$cmd"; then
@ -143,12 +143,12 @@ if ! [ -e $dest ]; then
fi
fi
if ! [ -w $dest ]; then
if ! [ -w "$dest" ]; then
echo "$0: directory $dest exists, but is not writable by you. This could indicate that another user has already performed a single-user installation of Nix on this system. If you wish to enable multi-user support see https://nixos.org/nix/manual/#ssec-multi-user. If you wish to continue with a single-user install for $USER please run 'chown -R $USER $dest' as root." >&2
exit 1
fi
mkdir -p $dest/store
mkdir -p "$dest/store"
printf "copying Nix to %s..." "${dest}/store" >&2
# Insert a newline if no progress is shown.
@ -189,17 +189,17 @@ fi
# Install an SSL certificate bundle.
if [ -z "$NIX_SSL_CERT_FILE" ] || ! [ -f "$NIX_SSL_CERT_FILE" ]; then
$nix/bin/nix-env -i "$cacert"
"$nix/bin/nix-env" -i "$cacert"
export NIX_SSL_CERT_FILE="$HOME/.nix-profile/etc/ssl/certs/ca-bundle.crt"
fi
# Subscribe the user to the Nixpkgs channel and fetch it.
if [ -z "$NIX_INSTALLER_NO_CHANNEL_ADD" ]; then
if ! $nix/bin/nix-channel --list | grep -q "^nixpkgs "; then
$nix/bin/nix-channel --add https://nixos.org/channels/nixpkgs-unstable
if ! "$nix/bin/nix-channel" --list | grep -q "^nixpkgs "; then
"$nix/bin/nix-channel" --add https://nixos.org/channels/nixpkgs-unstable
fi
if [ -z "$_NIX_INSTALLER_TEST" ]; then
if ! $nix/bin/nix-channel --update nixpkgs; then
if ! "$nix/bin/nix-channel" --update nixpkgs; then
echo "Fetching the nixpkgs channel failed. (Are you offline?)"
echo "To try again later, run \"nix-channel --update nixpkgs\"."
fi

View file

@ -15,7 +15,7 @@ readonly SERVICE_OVERRIDE=${SERVICE_DEST}.d/override.conf
create_systemd_override() {
header "Configuring proxy for the nix-daemon service"
_sudo "create directory for systemd unit override" mkdir -p "$(dirname $SERVICE_OVERRIDE)"
_sudo "create directory for systemd unit override" mkdir -p "$(dirname "$SERVICE_OVERRIDE")"
cat <<EOF | _sudo "create systemd unit override" tee "$SERVICE_OVERRIDE"
[Service]
$1

View file

@ -81,10 +81,10 @@ if [ "$(uname -s)" != "Darwin" ]; then
require_util xz "unpack the binary tarball"
fi
if command -v wget > /dev/null 2>&1; then
fetch() { wget "$1" -O "$2"; }
elif command -v curl > /dev/null 2>&1; then
if command -v curl > /dev/null 2>&1; then
fetch() { curl -L "$1" -o "$2"; }
elif command -v wget > /dev/null 2>&1; then
fetch() { wget "$1" -O "$2"; }
else
oops "you don't have wget or curl installed, which I need to download the binary tarball"
fi

View file

@ -1,7 +1,5 @@
nix_noinst_scripts := \
$(d)/nix-http-export.cgi \
$(d)/nix-profile.sh \
$(d)/nix-reduce-build
$(d)/nix-profile.sh
noinst-scripts += $(nix_noinst_scripts)

View file

@ -1,51 +0,0 @@
#! /bin/sh
export HOME=/tmp
export NIX_REMOTE=daemon
TMP_DIR="${TMP_DIR:-/tmp/nix-export}"
@coreutils@/mkdir -p "$TMP_DIR" || true
@coreutils@/chmod a+r "$TMP_DIR"
needed_path="?$QUERY_STRING"
needed_path="${needed_path#*[?&]needed_path=}"
needed_path="${needed_path%%&*}"
#needed_path="$(echo $needed_path | ./unhttp)"
needed_path="${needed_path//%2B/+}"
needed_path="${needed_path//%3D/=}"
echo needed_path: "$needed_path" >&2
NIX_STORE="${NIX_STORE_DIR:-/nix/store}"
echo NIX_STORE: "${NIX_STORE}" >&2
full_path="${NIX_STORE}"/"$needed_path"
if [ "$needed_path" != "${needed_path%.drv}" ]; then
echo "Status: 403 You should create the derivation file yourself"
echo "Content-Type: text/plain"
echo
echo "Refusing to disclose derivation contents"
exit
fi
if @bindir@/nix-store --check-validity "$full_path"; then
if ! [ -e nix-export/"$needed_path".nar.gz ]; then
@bindir@/nix-store --export "$full_path" | @gzip@ > "$TMP_DIR"/"$needed_path".nar.gz
@coreutils@/ln -fs "$TMP_DIR"/"$needed_path".nar.gz nix-export/"$needed_path".nar.gz
fi;
echo "Status: 301 Moved"
echo "Location: nix-export/"$needed_path".nar.gz"
echo
else
echo "Status: 404 No such path found"
echo "Content-Type: text/plain"
echo
echo "Path not found:"
echo "$needed_path"
echo "checked:"
echo "$full_path"
fi

View file

@ -5,7 +5,7 @@ __ETC_PROFILE_NIX_SOURCED=1
export NIX_PROFILES="@localstatedir@/nix/profiles/default $HOME/.nix-profile"
# Set $NIX_SSL_CERT_FILE so that Nixpkgs applications like curl work.
if [ ! -z "${NIX_SSL_CERT_FILE:-}" ]; then
if [ -n "${NIX_SSL_CERT_FILE:-}" ]; then
: # Allow users to override the NIX_SSL_CERT_FILE
elif [ -e /etc/ssl/certs/ca-certificates.crt ]; then # NixOS, Ubuntu, Debian, Gentoo, Arch
export NIX_SSL_CERT_FILE=/etc/ssl/certs/ca-certificates.crt
@ -18,14 +18,14 @@ elif [ -e /etc/pki/tls/certs/ca-bundle.crt ]; then # Fedora, CentOS
else
# Fall back to what is in the nix profiles, favouring whatever is defined last.
check_nix_profiles() {
if [ "$ZSH_VERSION" ]; then
if [ -n "$ZSH_VERSION" ]; then
# Zsh by default doesn't split words in unquoted parameter expansion.
# Set local_options for these options to be reverted at the end of the function
# and shwordsplit to force splitting words in $NIX_PROFILES below.
setopt local_options shwordsplit
fi
for i in $NIX_PROFILES; do
if [ -e $i/etc/ssl/certs/ca-bundle.crt ]; then
if [ -e "$i/etc/ssl/certs/ca-bundle.crt" ]; then
export NIX_SSL_CERT_FILE=$i/etc/ssl/certs/ca-bundle.crt
fi
done

View file

@ -1,171 +0,0 @@
#! @bash@
WORKING_DIRECTORY=$(mktemp -d "${TMPDIR:-/tmp}"/nix-reduce-build-XXXXXX);
cd "$WORKING_DIRECTORY";
if test -z "$1" || test "a--help" = "a$1" ; then
echo 'nix-reduce-build (paths or Nix expressions) -- (package sources)' >&2
echo As in: >&2
echo nix-reduce-build /etc/nixos/nixos -- ssh://user@somewhere.nowhere.example.org >&2
echo nix-reduce-build /etc/nixos/nixos -- \\
echo " " \''http://somewhere.nowhere.example.org/nix/nix-http-export.cgi?needed_path='\' >&2
echo " store path name will be added into the end of the URL" >&2
echo nix-reduce-build /etc/nixos/nixos -- file://home/user/nar/ >&2
echo " that should be a directory where gzipped 'nix-store --export' ">&2
echo " files are located (they should have .nar.gz extension)" >&2
echo " Or all together: " >&2
echo -e nix-reduce-build /expr.nix /e2.nix -- \\\\\\\n\
" ssh://a@b.example.com http://n.example.com/get-nar?q= file://nar/" >&2
echo " Also supports best-effort local builds of failing expression set:" >&2
echo "nix-reduce-build /e.nix -- nix-daemon:// nix-self://" >&2
echo " nix-daemon:// builds using daemon"
echo " nix-self:// builds directly using nix-store from current installation" >&2
echo " nix-daemon-fixed:// and nix-self-fixed:// do the same, but only for" >&2;
echo "derivations with specified output hash (sha256, sha1 or md5)." >&2
echo " nix-daemon-substitute:// and nix-self-substitute:// try to substitute" >&2;
echo "maximum amount of paths" >&2;
echo " nix-daemon-build:// and nix-self-build:// try to build (not substitute)" >&2;
echo "maximum amount of paths" >&2;
echo " If no package sources are specified, required paths are listed." >&2;
exit;
fi;
while ! test "$1" = "--" || test "$1" = "" ; do
echo "$1" >> initial; >&2
shift;
done
shift;
echo Will work on $(cat initial | wc -l) targets. >&2
while read ; do
case "$REPLY" in
${NIX_STORE_DIR:-/nix/store}/*)
echo "$REPLY" >> paths; >&2
;;
*)
(
IFS=: ;
nix-instantiate $REPLY >> paths;
);
;;
esac;
done < initial;
echo Proceeding $(cat paths | wc -l) paths. >&2
while read; do
case "$REPLY" in
*.drv)
echo "$REPLY" >> derivers; >&2
;;
*)
nix-store --query --deriver "$REPLY" >>derivers;
;;
esac;
done < paths;
echo Found $(cat derivers | wc -l) derivers. >&2
cat derivers | xargs nix-store --query -R > derivers-closure;
echo Proceeding at most $(cat derivers-closure | wc -l) derivers. >&2
cat derivers-closure | egrep '[.]drv$' | xargs nix-store --query --outputs > wanted-paths;
cat derivers-closure | egrep -v '[.]drv$' >> wanted-paths;
echo Prepared $(cat wanted-paths | wc -l) paths to get. >&2
cat wanted-paths | xargs nix-store --check-validity --print-invalid > needed-paths;
echo We need $(cat needed-paths | wc -l) paths. >&2
egrep '[.]drv$' derivers-closure > critical-derivers;
if test -z "$1" ; then
cat needed-paths;
fi;
refresh_critical_derivers() {
echo "Finding needed derivers..." >&2;
cat critical-derivers | while read; do
if ! (nix-store --query --outputs "$REPLY" | xargs nix-store --check-validity &> /dev/null;); then
echo "$REPLY";
fi;
done > new-critical-derivers;
mv new-critical-derivers critical-derivers;
echo The needed paths are realized by $(cat critical-derivers | wc -l) derivers. >&2
}
build_here() {
cat critical-derivers | while read; do
echo "Realising $REPLY using nix-daemon" >&2
@bindir@/nix-store -r "${REPLY}"
done;
}
try_to_substitute(){
cat needed-paths | while read ; do
echo "Building $REPLY using nix-daemon" >&2
@bindir@/nix-store -r "${NIX_STORE_DIR:-/nix/store}/${REPLY##*/}"
done;
}
for i in "$@"; do
sshHost="${i#ssh://}";
httpHost="${i#http://}";
httpsHost="${i#https://}";
filePath="${i#file:/}";
if [ "$i" != "$sshHost" ]; then
cat needed-paths | while read; do
echo "Getting $REPLY and its closure over ssh" >&2
nix-copy-closure --from "$sshHost" --gzip "$REPLY" </dev/null || true;
done;
elif [ "$i" != "$httpHost" ] || [ "$i" != "$httpsHost" ]; then
cat needed-paths | while read; do
echo "Getting $REPLY over http/https" >&2
curl ${BAD_CERTIFICATE:+-k} -L "$i${REPLY##*/}" | gunzip | nix-store --import;
done;
elif [ "$i" != "$filePath" ] ; then
cat needed-paths | while read; do
echo "Installing $REPLY from file" >&2
gunzip < "$filePath/${REPLY##*/}".nar.gz | nix-store --import;
done;
elif [ "$i" = "nix-daemon://" ] ; then
NIX_REMOTE=daemon try_to_substitute;
refresh_critical_derivers;
NIX_REMOTE=daemon build_here;
elif [ "$i" = "nix-self://" ] ; then
NIX_REMOTE= try_to_substitute;
refresh_critical_derivers;
NIX_REMOTE= build_here;
elif [ "$i" = "nix-daemon-fixed://" ] ; then
refresh_critical_derivers;
cat critical-derivers | while read; do
if egrep '"(md5|sha1|sha256)"' "$REPLY" &>/dev/null; then
echo "Realising $REPLY using nix-daemon" >&2
NIX_REMOTE=daemon @bindir@/nix-store -r "${REPLY}"
fi;
done;
elif [ "$i" = "nix-self-fixed://" ] ; then
refresh_critical_derivers;
cat critical-derivers | while read; do
if egrep '"(md5|sha1|sha256)"' "$REPLY" &>/dev/null; then
echo "Realising $REPLY using direct Nix build" >&2
NIX_REMOTE= @bindir@/nix-store -r "${REPLY}"
fi;
done;
elif [ "$i" = "nix-daemon-substitute://" ] ; then
NIX_REMOTE=daemon try_to_substitute;
elif [ "$i" = "nix-self-substitute://" ] ; then
NIX_REMOTE= try_to_substitute;
elif [ "$i" = "nix-daemon-build://" ] ; then
refresh_critical_derivers;
NIX_REMOTE=daemon build_here;
elif [ "$i" = "nix-self-build://" ] ; then
refresh_critical_derivers;
NIX_REMOTE= build_here;
fi;
mv needed-paths wanted-paths;
cat wanted-paths | xargs nix-store --check-validity --print-invalid > needed-paths;
echo We still need $(cat needed-paths | wc -l) paths. >&2
done;
cd /
rm -r "$WORKING_DIRECTORY"

View file

@ -3,7 +3,7 @@
set -e
script=$(nix-build -A outputs.hydraJobs.installerScriptForGHA --no-out-link)
installerHash=$(echo $script | cut -b12-43 -)
installerHash=$(echo "$script" | cut -b12-43 -)
installerURL=https://$CACHIX_NAME.cachix.org/serve/$installerHash/install

View file

@ -520,7 +520,7 @@ Path EvalState::checkSourcePath(const Path & path_)
}
if (!found)
throw RestrictedPathError("access to path '%1%' is forbidden in restricted mode", abspath);
throw RestrictedPathError("access to absolute path '%1%' is forbidden in restricted mode", abspath);
/* Resolve symlinks. */
debug(format("checking access to '%s'") % abspath);
@ -533,7 +533,7 @@ Path EvalState::checkSourcePath(const Path & path_)
}
}
throw RestrictedPathError("access to path '%1%' is forbidden in restricted mode", path);
throw RestrictedPathError("access to canonical path '%1%' is forbidden in restricted mode", path);
}
@ -583,14 +583,20 @@ Value * EvalState::addConstant(const string & name, Value & v)
{
Value * v2 = allocValue();
*v2 = v;
staticBaseEnv.vars[symbols.create(name)] = baseEnvDispl;
baseEnv.values[baseEnvDispl++] = v2;
string name2 = string(name, 0, 2) == "__" ? string(name, 2) : name;
baseEnv.values[0]->attrs->push_back(Attr(symbols.create(name2), v2));
addConstant(name, v2);
return v2;
}
void EvalState::addConstant(const string & name, Value * v)
{
staticBaseEnv.vars.emplace_back(symbols.create(name), baseEnvDispl);
baseEnv.values[baseEnvDispl++] = v;
string name2 = string(name, 0, 2) == "__" ? string(name, 2) : name;
baseEnv.values[0]->attrs->push_back(Attr(symbols.create(name2), v));
}
Value * EvalState::addPrimOp(const string & name,
size_t arity, PrimOpFun primOp)
{
@ -609,7 +615,7 @@ Value * EvalState::addPrimOp(const string & name,
Value * v = allocValue();
v->mkPrimOp(new PrimOp { .fun = primOp, .arity = arity, .name = sym });
staticBaseEnv.vars[symbols.create(name)] = baseEnvDispl;
staticBaseEnv.vars.emplace_back(symbols.create(name), baseEnvDispl);
baseEnv.values[baseEnvDispl++] = v;
baseEnv.values[0]->attrs->push_back(Attr(sym, v));
return v;
@ -635,7 +641,7 @@ Value * EvalState::addPrimOp(PrimOp && primOp)
Value * v = allocValue();
v->mkPrimOp(new PrimOp(std::move(primOp)));
staticBaseEnv.vars[envName] = baseEnvDispl;
staticBaseEnv.vars.emplace_back(envName, baseEnvDispl);
baseEnv.values[baseEnvDispl++] = v;
baseEnv.values[0]->attrs->push_back(Attr(primOp.name, v));
return v;
@ -785,7 +791,7 @@ void mkPath(Value & v, const char * s)
inline Value * EvalState::lookupVar(Env * env, const ExprVar & var, bool noEval)
{
for (size_t l = var.level; l; --l, env = env->up) ;
for (auto l = var.level; l; --l, env = env->up) ;
if (!var.fromWith) return env->values[var.displ];
@ -1058,7 +1064,7 @@ void ExprAttrs::eval(EvalState & state, Env & env, Value & v)
/* The recursive attributes are evaluated in the new
environment, while the inherited attributes are evaluated
in the original environment. */
size_t displ = 0;
Displacement displ = 0;
for (auto & i : attrs) {
Value * vAttr;
if (hasOverrides && !i.second.inherited) {
@ -1134,7 +1140,7 @@ void ExprLet::eval(EvalState & state, Env & env, Value & v)
/* The recursive attributes are evaluated in the new environment,
while the inherited attributes are evaluated in the original
environment. */
size_t displ = 0;
Displacement displ = 0;
for (auto & i : attrs->attrs)
env2.values[displ++] = i.second.e->maybeThunk(state, i.second.inherited ? env : env2);
@ -1251,144 +1257,184 @@ void ExprLambda::eval(EvalState & state, Env & env, Value & v)
}
void ExprApp::eval(EvalState & state, Env & env, Value & v)
{
/* FIXME: vFun prevents GCC from doing tail call optimisation. */
Value vFun;
e1->eval(state, env, vFun);
state.callFunction(vFun, *(e2->maybeThunk(state, env)), v, pos);
}
void EvalState::callPrimOp(Value & fun, Value & arg, Value & v, const Pos & pos)
{
/* Figure out the number of arguments still needed. */
size_t argsDone = 0;
Value * primOp = &fun;
while (primOp->isPrimOpApp()) {
argsDone++;
primOp = primOp->primOpApp.left;
}
assert(primOp->isPrimOp());
auto arity = primOp->primOp->arity;
auto argsLeft = arity - argsDone;
if (argsLeft == 1) {
/* We have all the arguments, so call the primop. */
/* Put all the arguments in an array. */
Value * vArgs[arity];
auto n = arity - 1;
vArgs[n--] = &arg;
for (Value * arg = &fun; arg->isPrimOpApp(); arg = arg->primOpApp.left)
vArgs[n--] = arg->primOpApp.right;
/* And call the primop. */
nrPrimOpCalls++;
if (countCalls) primOpCalls[primOp->primOp->name]++;
primOp->primOp->fun(*this, pos, vArgs, v);
} else {
Value * fun2 = allocValue();
*fun2 = fun;
v.mkPrimOpApp(fun2, &arg);
}
}
void EvalState::callFunction(Value & fun, Value & arg, Value & v, const Pos & pos)
void EvalState::callFunction(Value & fun, size_t nrArgs, Value * * args, Value & vRes, const Pos & pos)
{
auto trace = evalSettings.traceFunctionCalls ? std::make_unique<FunctionCallTrace>(pos) : nullptr;
forceValue(fun, pos);
if (fun.isPrimOp() || fun.isPrimOpApp()) {
callPrimOp(fun, arg, v, pos);
return;
}
Value vCur(fun);
if (fun.type() == nAttrs) {
auto found = fun.attrs->find(sFunctor);
if (found != fun.attrs->end()) {
/* fun may be allocated on the stack of the calling function,
* but for functors we may keep a reference, so heap-allocate
* a copy and use that instead.
*/
auto & fun2 = *allocValue();
fun2 = fun;
/* !!! Should we use the attr pos here? */
Value v2;
callFunction(*found->value, fun2, v2, pos);
return callFunction(v2, arg, v, pos);
}
}
auto makeAppChain = [&]()
{
vRes = vCur;
for (size_t i = 0; i < nrArgs; ++i) {
auto fun2 = allocValue();
*fun2 = vRes;
vRes.mkPrimOpApp(fun2, args[i]);
}
};
if (!fun.isLambda())
throwTypeError(pos, "attempt to call something which is not a function but %1%", fun);
Attr * functor;
ExprLambda & lambda(*fun.lambda.fun);
while (nrArgs > 0) {
auto size =
(lambda.arg.empty() ? 0 : 1) +
(lambda.hasFormals() ? lambda.formals->formals.size() : 0);
Env & env2(allocEnv(size));
env2.up = fun.lambda.env;
if (vCur.isLambda()) {
size_t displ = 0;
ExprLambda & lambda(*vCur.lambda.fun);
if (!lambda.hasFormals())
env2.values[displ++] = &arg;
auto size =
(lambda.arg.empty() ? 0 : 1) +
(lambda.hasFormals() ? lambda.formals->formals.size() : 0);
Env & env2(allocEnv(size));
env2.up = vCur.lambda.env;
else {
forceAttrs(arg, pos);
Displacement displ = 0;
if (!lambda.arg.empty())
env2.values[displ++] = &arg;
if (!lambda.hasFormals())
env2.values[displ++] = args[0];
/* For each formal argument, get the actual argument. If
there is no matching actual argument but the formal
argument has a default, use the default. */
size_t attrsUsed = 0;
for (auto & i : lambda.formals->formals) {
Bindings::iterator j = arg.attrs->find(i.name);
if (j == arg.attrs->end()) {
if (!i.def) throwTypeError(pos, "%1% called without required argument '%2%'",
lambda, i.name);
env2.values[displ++] = i.def->maybeThunk(*this, env2);
else {
forceAttrs(*args[0], pos);
if (!lambda.arg.empty())
env2.values[displ++] = args[0];
/* For each formal argument, get the actual argument. If
there is no matching actual argument but the formal
argument has a default, use the default. */
size_t attrsUsed = 0;
for (auto & i : lambda.formals->formals) {
auto j = args[0]->attrs->get(i.name);
if (!j) {
if (!i.def) throwTypeError(pos, "%1% called without required argument '%2%'",
lambda, i.name);
env2.values[displ++] = i.def->maybeThunk(*this, env2);
} else {
attrsUsed++;
env2.values[displ++] = j->value;
}
}
/* Check that each actual argument is listed as a formal
argument (unless the attribute match specifies a `...'). */
if (!lambda.formals->ellipsis && attrsUsed != args[0]->attrs->size()) {
/* Nope, so show the first unexpected argument to the
user. */
for (auto & i : *args[0]->attrs)
if (lambda.formals->argNames.find(i.name) == lambda.formals->argNames.end())
throwTypeError(pos, "%1% called with unexpected argument '%2%'", lambda, i.name);
abort(); // can't happen
}
}
nrFunctionCalls++;
if (countCalls) incrFunctionCall(&lambda);
/* Evaluate the body. */
try {
lambda.body->eval(*this, env2, vCur);
} catch (Error & e) {
if (loggerSettings.showTrace.get()) {
addErrorTrace(e, lambda.pos, "while evaluating %s",
(lambda.name.set()
? "'" + (string) lambda.name + "'"
: "anonymous lambda"));
addErrorTrace(e, pos, "from call site%s", "");
}
throw;
}
nrArgs--;
args += 1;
}
else if (vCur.isPrimOp()) {
size_t argsLeft = vCur.primOp->arity;
if (nrArgs < argsLeft) {
/* We don't have enough arguments, so create a tPrimOpApp chain. */
makeAppChain();
return;
} else {
attrsUsed++;
env2.values[displ++] = j->value;
/* We have all the arguments, so call the primop. */
nrPrimOpCalls++;
if (countCalls) primOpCalls[vCur.primOp->name]++;
vCur.primOp->fun(*this, pos, args, vCur);
nrArgs -= argsLeft;
args += argsLeft;
}
}
/* Check that each actual argument is listed as a formal
argument (unless the attribute match specifies a `...'). */
if (!lambda.formals->ellipsis && attrsUsed != arg.attrs->size()) {
/* Nope, so show the first unexpected argument to the
user. */
for (auto & i : *arg.attrs)
if (lambda.formals->argNames.find(i.name) == lambda.formals->argNames.end())
throwTypeError(pos, "%1% called with unexpected argument '%2%'", lambda, i.name);
abort(); // can't happen
else if (vCur.isPrimOpApp()) {
/* Figure out the number of arguments still needed. */
size_t argsDone = 0;
Value * primOp = &vCur;
while (primOp->isPrimOpApp()) {
argsDone++;
primOp = primOp->primOpApp.left;
}
assert(primOp->isPrimOp());
auto arity = primOp->primOp->arity;
auto argsLeft = arity - argsDone;
if (nrArgs < argsLeft) {
/* We still don't have enough arguments, so extend the tPrimOpApp chain. */
makeAppChain();
return;
} else {
/* We have all the arguments, so call the primop with
the previous and new arguments. */
Value * vArgs[arity];
auto n = argsDone;
for (Value * arg = &vCur; arg->isPrimOpApp(); arg = arg->primOpApp.left)
vArgs[--n] = arg->primOpApp.right;
for (size_t i = 0; i < argsLeft; ++i)
vArgs[argsDone + i] = args[i];
nrPrimOpCalls++;
if (countCalls) primOpCalls[primOp->primOp->name]++;
primOp->primOp->fun(*this, pos, vArgs, vCur);
nrArgs -= argsLeft;
args += argsLeft;
}
}
else if (vCur.type() == nAttrs && (functor = vCur.attrs->get(sFunctor))) {
/* 'vCur' may be allocated on the stack of the calling
function, but for functors we may keep a reference, so
heap-allocate a copy and use that instead. */
Value * args2[] = {allocValue(), args[0]};
*args2[0] = vCur;
/* !!! Should we use the attr pos here? */
callFunction(*functor->value, 2, args2, vCur, pos);
nrArgs--;
args++;
}
else
throwTypeError(pos, "attempt to call something which is not a function but %1%", vCur);
}
nrFunctionCalls++;
if (countCalls) incrFunctionCall(&lambda);
vRes = vCur;
}
/* Evaluate the body. This is conditional on showTrace, because
catching exceptions makes this function not tail-recursive. */
if (loggerSettings.showTrace.get())
try {
lambda.body->eval(*this, env2, v);
} catch (Error & e) {
addErrorTrace(e, lambda.pos, "while evaluating %s",
(lambda.name.set()
? "'" + (string) lambda.name + "'"
: "anonymous lambda"));
addErrorTrace(e, pos, "from call site%s", "");
throw;
}
else
fun.lambda.fun->body->eval(*this, env2, v);
void ExprCall::eval(EvalState & state, Env & env, Value & v)
{
Value vFun;
fun->eval(state, env, vFun);
Value * vArgs[args.size()];
for (size_t i = 0; i < args.size(); ++i)
vArgs[i] = args[i]->maybeThunk(state, env);
state.callFunction(vFun, args.size(), vArgs, v, pos);
}

View file

@ -277,6 +277,8 @@ private:
Value * addConstant(const string & name, Value & v);
void addConstant(const string & name, Value * v);
Value * addPrimOp(const string & name,
size_t arity, PrimOpFun primOp);
@ -316,8 +318,14 @@ public:
bool isFunctor(Value & fun);
void callFunction(Value & fun, Value & arg, Value & v, const Pos & pos);
void callPrimOp(Value & fun, Value & arg, Value & v, const Pos & pos);
// FIXME: use std::span
void callFunction(Value & fun, size_t nrArgs, Value * * args, Value & vRes, const Pos & pos);
void callFunction(Value & fun, Value & arg, Value & vRes, const Pos & pos)
{
Value * args[] = {&arg};
callFunction(fun, 1, args, vRes, pos);
}
/* Automatically call a function for which each argument has a
default value or has a binding in the `args' map. */

View file

@ -1,4 +1,5 @@
#include "flake.hh"
#include "globals.hh"
#include <nlohmann/json.hpp>
@ -52,21 +53,19 @@ void ConfigFile::apply()
auto trustedList = readTrustedList();
bool trusted = false;
if (auto saved = get(get(trustedList, name).value_or(std::map<std::string, bool>()), valueS)) {
if (nix::settings.acceptFlakeConfig){
trusted = true;
} else if (auto saved = get(get(trustedList, name).value_or(std::map<std::string, bool>()), valueS)) {
trusted = *saved;
warn("Using saved setting for '%s = %s' from ~/.local/share/nix/trusted-settings.json.", name,valueS);
} else {
// FIXME: filter ANSI escapes, newlines, \r, etc.
if (std::tolower(logger->ask(fmt("do you want to allow configuration setting '%s' to be set to '" ANSI_RED "%s" ANSI_NORMAL "' (y/N)?", name, valueS)).value_or('n')) != 'y') {
if (std::tolower(logger->ask("do you want to permanently mark this value as untrusted (y/N)?").value_or('n')) == 'y') {
trustedList[name][valueS] = false;
writeTrustedList(trustedList);
}
} else {
if (std::tolower(logger->ask("do you want to permanently mark this value as trusted (y/N)?").value_or('n')) == 'y') {
trustedList[name][valueS] = trusted = true;
writeTrustedList(trustedList);
}
if (std::tolower(logger->ask(fmt("do you want to allow configuration setting '%s' to be set to '" ANSI_RED "%s" ANSI_NORMAL "' (y/N)?", name, valueS)).value_or('n')) == 'y') {
trusted = true;
}
if (std::tolower(logger->ask(fmt("do you want to permanently mark this value as %s (y/N)?", trusted ? "trusted": "untrusted" )).value_or('n')) == 'y') {
trustedList[name][valueS] = trusted;
writeTrustedList(trustedList);
}
}

View file

@ -309,7 +309,7 @@ LockedFlake lockFlake(
if (lockFlags.applyNixConfig) {
flake.config.apply();
// FIXME: send new config to the daemon.
state.store->setOptions();
}
try {
@ -448,22 +448,18 @@ LockedFlake lockFlake(
update it. */
auto lb = lockFlags.inputUpdates.lower_bound(inputPath);
auto hasChildUpdate =
auto mustRefetch =
lb != lockFlags.inputUpdates.end()
&& lb->size() > inputPath.size()
&& std::equal(inputPath.begin(), inputPath.end(), lb->begin());
if (hasChildUpdate) {
auto inputFlake = getFlake(
state, oldLock->lockedRef, false, flakeCache);
computeLocks(inputFlake.inputs, childNode, inputPath, oldLock, parent, parentPath);
} else {
FlakeInputs fakeInputs;
if (!mustRefetch) {
/* No need to fetch this flake, we can be
lazy. However there may be new overrides on the
inputs of this flake, so we need to check
those. */
FlakeInputs fakeInputs;
for (auto & i : oldLock->inputs) {
if (auto lockedNode = std::get_if<0>(&i.second)) {
fakeInputs.emplace(i.first, FlakeInput {
@ -471,15 +467,28 @@ LockedFlake lockFlake(
.isFlake = (*lockedNode)->isFlake,
});
} else if (auto follows = std::get_if<1>(&i.second)) {
auto o = input.overrides.find(i.first);
// If the override disappeared, we have to refetch the flake,
// since some of the inputs may not be present in the lockfile.
if (o == input.overrides.end()) {
mustRefetch = true;
// There's no point populating the rest of the fake inputs,
// since we'll refetch the flake anyways.
break;
}
fakeInputs.emplace(i.first, FlakeInput {
.follows = *follows,
});
}
}
computeLocks(fakeInputs, childNode, inputPath, oldLock, parent, parentPath);
}
computeLocks(
mustRefetch
? getFlake(state, oldLock->lockedRef, false, flakeCache).inputs
: fakeInputs,
childNode, inputPath, oldLock, parent, parentPath);
} else {
/* We need to create a new lock file entry. So fetch
this input. */

View file

@ -64,6 +64,7 @@ static void adjustLoc(YYLTYPE * loc, const char * s, size_t len)
}
// FIXME: optimize
static Expr * unescapeStr(SymbolTable & symbols, const char * s, size_t length)
{
string t;

View file

@ -143,6 +143,16 @@ void ExprLambda::show(std::ostream & str) const
str << ": " << *body << ")";
}
void ExprCall::show(std::ostream & str) const
{
str << '(' << *fun;
for (auto e : args) {
str << ' ';
str << *e;
}
str << ')';
}
void ExprLet::show(std::ostream & str) const
{
str << "(let ";
@ -263,13 +273,13 @@ void ExprVar::bindVars(const StaticEnv & env)
/* Check whether the variable appears in the environment. If so,
set its level and displacement. */
const StaticEnv * curEnv;
unsigned int level;
Level level;
int withLevel = -1;
for (curEnv = &env, level = 0; curEnv; curEnv = curEnv->up, level++) {
if (curEnv->isWith) {
if (withLevel == -1) withLevel = level;
} else {
StaticEnv::Vars::const_iterator i = curEnv->vars.find(name);
auto i = curEnv->find(name);
if (i != curEnv->vars.end()) {
fromWith = false;
this->level = level;
@ -311,14 +321,16 @@ void ExprOpHasAttr::bindVars(const StaticEnv & env)
void ExprAttrs::bindVars(const StaticEnv & env)
{
const StaticEnv * dynamicEnv = &env;
StaticEnv newEnv(false, &env);
StaticEnv newEnv(false, &env, recursive ? attrs.size() : 0);
if (recursive) {
dynamicEnv = &newEnv;
unsigned int displ = 0;
Displacement displ = 0;
for (auto & i : attrs)
newEnv.vars[i.first] = i.second.displ = displ++;
newEnv.vars.emplace_back(i.first, i.second.displ = displ++);
// No need to sort newEnv since attrs is in sorted order.
for (auto & i : attrs)
i.second.e->bindVars(i.second.inherited ? env : newEnv);
@ -342,15 +354,20 @@ void ExprList::bindVars(const StaticEnv & env)
void ExprLambda::bindVars(const StaticEnv & env)
{
StaticEnv newEnv(false, &env);
StaticEnv newEnv(
false, &env,
(hasFormals() ? formals->formals.size() : 0) +
(arg.empty() ? 0 : 1));
unsigned int displ = 0;
Displacement displ = 0;
if (!arg.empty()) newEnv.vars[arg] = displ++;
if (!arg.empty()) newEnv.vars.emplace_back(arg, displ++);
if (hasFormals()) {
for (auto & i : formals->formals)
newEnv.vars[i.name] = displ++;
newEnv.vars.emplace_back(i.name, displ++);
newEnv.sort();
for (auto & i : formals->formals)
if (i.def) i.def->bindVars(newEnv);
@ -359,13 +376,22 @@ void ExprLambda::bindVars(const StaticEnv & env)
body->bindVars(newEnv);
}
void ExprCall::bindVars(const StaticEnv & env)
{
fun->bindVars(env);
for (auto e : args)
e->bindVars(env);
}
void ExprLet::bindVars(const StaticEnv & env)
{
StaticEnv newEnv(false, &env);
StaticEnv newEnv(false, &env, attrs->attrs.size());
unsigned int displ = 0;
Displacement displ = 0;
for (auto & i : attrs->attrs)
newEnv.vars[i.first] = i.second.displ = displ++;
newEnv.vars.emplace_back(i.first, i.second.displ = displ++);
// No need to sort newEnv since attrs->attrs is in sorted order.
for (auto & i : attrs->attrs)
i.second.e->bindVars(i.second.inherited ? env : newEnv);
@ -379,7 +405,7 @@ void ExprWith::bindVars(const StaticEnv & env)
level so that `lookupVar' can look up variables in the previous
`with' if this one doesn't contain the desired attribute. */
const StaticEnv * curEnv;
unsigned int level;
Level level;
prevWith = 0;
for (curEnv = &env, level = 1; curEnv; curEnv = curEnv->up, level++)
if (curEnv->isWith) {
@ -452,5 +478,4 @@ size_t SymbolTable::totalSize() const
return n;
}
}

View file

@ -4,8 +4,6 @@
#include "symbol-table.hh"
#include "error.hh"
#include <map>
namespace nix {
@ -135,6 +133,9 @@ struct ExprPath : Expr
Value * maybeThunk(EvalState & state, Env & env);
};
typedef uint32_t Level;
typedef uint32_t Displacement;
struct ExprVar : Expr
{
Pos pos;
@ -150,8 +151,8 @@ struct ExprVar : Expr
value is obtained by getting the attribute named `name' from
the set stored in the environment that is `level' levels up
from the current one.*/
unsigned int level;
unsigned int displ;
Level level;
Displacement displ;
ExprVar(const Symbol & name) : name(name) { };
ExprVar(const Pos & pos, const Symbol & name) : pos(pos), name(name) { };
@ -185,7 +186,7 @@ struct ExprAttrs : Expr
bool inherited;
Expr * e;
Pos pos;
unsigned int displ; // displacement
Displacement displ; // displacement
AttrDef(Expr * e, const Pos & pos, bool inherited=false)
: inherited(inherited), e(e), pos(pos) { };
AttrDef() { };
@ -250,6 +251,17 @@ struct ExprLambda : Expr
COMMON_METHODS
};
struct ExprCall : Expr
{
Expr * fun;
std::vector<Expr *> args;
Pos pos;
ExprCall(const Pos & pos, Expr * fun, std::vector<Expr *> && args)
: fun(fun), args(args), pos(pos)
{ }
COMMON_METHODS
};
struct ExprLet : Expr
{
ExprAttrs * attrs;
@ -308,7 +320,6 @@ struct ExprOpNot : Expr
void eval(EvalState & state, Env & env, Value & v); \
};
MakeBinOp(ExprApp, "")
MakeBinOp(ExprOpEq, "==")
MakeBinOp(ExprOpNEq, "!=")
MakeBinOp(ExprOpAnd, "&&")
@ -342,9 +353,28 @@ struct StaticEnv
{
bool isWith;
const StaticEnv * up;
typedef std::map<Symbol, unsigned int> Vars;
// Note: these must be in sorted order.
typedef std::vector<std::pair<Symbol, Displacement>> Vars;
Vars vars;
StaticEnv(bool isWith, const StaticEnv * up) : isWith(isWith), up(up) { };
StaticEnv(bool isWith, const StaticEnv * up, size_t expectedSize = 0) : isWith(isWith), up(up) {
vars.reserve(expectedSize);
};
void sort()
{
std::sort(vars.begin(), vars.end(),
[](const Vars::value_type & a, const Vars::value_type & b) { return a.first < b.first; });
}
Vars::const_iterator find(const Symbol & name) const
{
Vars::value_type key(name, 0);
auto i = std::lower_bound(vars.begin(), vars.end(), key);
if (i != vars.end() && i->first == name) return i;
return vars.end();
}
};

View file

@ -33,11 +33,9 @@ namespace nix {
Symbol file;
FileOrigin origin;
std::optional<ErrorInfo> error;
Symbol sLetBody;
ParseData(EvalState & state)
: state(state)
, symbols(state.symbols)
, sLetBody(symbols.create("<let-body>"))
{ };
};
@ -126,14 +124,14 @@ static void addAttr(ExprAttrs * attrs, AttrPath & attrPath,
auto j2 = jAttrs->attrs.find(ad.first);
if (j2 != jAttrs->attrs.end()) // Attr already defined in iAttrs, error.
dupAttr(ad.first, j2->second.pos, ad.second.pos);
jAttrs->attrs[ad.first] = ad.second;
jAttrs->attrs.emplace(ad.first, ad.second);
}
} else {
dupAttr(attrPath, pos, j->second.pos);
}
} else {
// This attr path is not defined. Let's create it.
attrs->attrs[i->symbol] = ExprAttrs::AttrDef(e, pos);
attrs->attrs.emplace(i->symbol, ExprAttrs::AttrDef(e, pos));
e->setName(i->symbol);
}
} else {
@ -283,7 +281,7 @@ void yyerror(YYLTYPE * loc, yyscan_t scanner, ParseData * data, const char * err
}
%type <e> start expr expr_function expr_if expr_op
%type <e> expr_app expr_select expr_simple
%type <e> expr_select expr_simple expr_app
%type <list> expr_list
%type <attrs> binds
%type <formals> formals
@ -353,13 +351,13 @@ expr_if
expr_op
: '!' expr_op %prec NOT { $$ = new ExprOpNot($2); }
| '-' expr_op %prec NEGATE { $$ = new ExprApp(CUR_POS, new ExprApp(new ExprVar(data->symbols.create("__sub")), new ExprInt(0)), $2); }
| '-' expr_op %prec NEGATE { $$ = new ExprCall(CUR_POS, new ExprVar(data->symbols.create("__sub")), {new ExprInt(0), $2}); }
| expr_op EQ expr_op { $$ = new ExprOpEq($1, $3); }
| expr_op NEQ expr_op { $$ = new ExprOpNEq($1, $3); }
| expr_op '<' expr_op { $$ = new ExprApp(CUR_POS, new ExprApp(new ExprVar(data->symbols.create("__lessThan")), $1), $3); }
| expr_op LEQ expr_op { $$ = new ExprOpNot(new ExprApp(CUR_POS, new ExprApp(new ExprVar(data->symbols.create("__lessThan")), $3), $1)); }
| expr_op '>' expr_op { $$ = new ExprApp(CUR_POS, new ExprApp(new ExprVar(data->symbols.create("__lessThan")), $3), $1); }
| expr_op GEQ expr_op { $$ = new ExprOpNot(new ExprApp(CUR_POS, new ExprApp(new ExprVar(data->symbols.create("__lessThan")), $1), $3)); }
| expr_op '<' expr_op { $$ = new ExprCall(CUR_POS, new ExprVar(data->symbols.create("__lessThan")), {$1, $3}); }
| expr_op LEQ expr_op { $$ = new ExprOpNot(new ExprCall(CUR_POS, new ExprVar(data->symbols.create("__lessThan")), {$3, $1})); }
| expr_op '>' expr_op { $$ = new ExprCall(CUR_POS, new ExprVar(data->symbols.create("__lessThan")), {$3, $1}); }
| expr_op GEQ expr_op { $$ = new ExprOpNot(new ExprCall(CUR_POS, new ExprVar(data->symbols.create("__lessThan")), {$1, $3})); }
| expr_op AND expr_op { $$ = new ExprOpAnd(CUR_POS, $1, $3); }
| expr_op OR expr_op { $$ = new ExprOpOr(CUR_POS, $1, $3); }
| expr_op IMPL expr_op { $$ = new ExprOpImpl(CUR_POS, $1, $3); }
@ -367,17 +365,22 @@ expr_op
| expr_op '?' attrpath { $$ = new ExprOpHasAttr($1, *$3); }
| expr_op '+' expr_op
{ $$ = new ExprConcatStrings(CUR_POS, false, new vector<Expr *>({$1, $3})); }
| expr_op '-' expr_op { $$ = new ExprApp(CUR_POS, new ExprApp(new ExprVar(data->symbols.create("__sub")), $1), $3); }
| expr_op '*' expr_op { $$ = new ExprApp(CUR_POS, new ExprApp(new ExprVar(data->symbols.create("__mul")), $1), $3); }
| expr_op '/' expr_op { $$ = new ExprApp(CUR_POS, new ExprApp(new ExprVar(data->symbols.create("__div")), $1), $3); }
| expr_op '-' expr_op { $$ = new ExprCall(CUR_POS, new ExprVar(data->symbols.create("__sub")), {$1, $3}); }
| expr_op '*' expr_op { $$ = new ExprCall(CUR_POS, new ExprVar(data->symbols.create("__mul")), {$1, $3}); }
| expr_op '/' expr_op { $$ = new ExprCall(CUR_POS, new ExprVar(data->symbols.create("__div")), {$1, $3}); }
| expr_op CONCAT expr_op { $$ = new ExprOpConcatLists(CUR_POS, $1, $3); }
| expr_app
;
expr_app
: expr_app expr_select
{ $$ = new ExprApp(CUR_POS, $1, $2); }
| expr_select { $$ = $1; }
: expr_app expr_select {
if (auto e2 = dynamic_cast<ExprCall *>($1)) {
e2->args.push_back($2);
$$ = $1;
} else
$$ = new ExprCall(CUR_POS, $1, {$2});
}
| expr_select
;
expr_select
@ -388,7 +391,7 @@ expr_select
| /* Backwards compatibility: because Nixpkgs has a rarely used
function named or, allow stuff like map or [...]. */
expr_simple OR_KW
{ $$ = new ExprApp(CUR_POS, $1, new ExprVar(CUR_POS, data->symbols.create("or"))); }
{ $$ = new ExprCall(CUR_POS, $1, {new ExprVar(CUR_POS, data->symbols.create("or"))}); }
| expr_simple { $$ = $1; }
;
@ -412,10 +415,10 @@ expr_simple
}
| SPATH {
string path($1 + 1, strlen($1) - 2);
$$ = new ExprApp(CUR_POS,
new ExprApp(new ExprVar(data->symbols.create("__findFile")),
new ExprVar(data->symbols.create("__nixPath"))),
new ExprString(data->symbols.create(path)));
$$ = new ExprCall(CUR_POS,
new ExprVar(data->symbols.create("__findFile")),
{new ExprVar(data->symbols.create("__nixPath")),
new ExprString(data->symbols.create(path))});
}
| URI {
static bool noURLLiterals = settings.isExperimentalFeatureEnabled(Xp::NoUrlLiterals);
@ -483,7 +486,7 @@ binds
if ($$->attrs.find(i.symbol) != $$->attrs.end())
dupAttr(i.symbol, makeCurPos(@3, data), $$->attrs[i.symbol].pos);
Pos pos = makeCurPos(@3, data);
$$->attrs[i.symbol] = ExprAttrs::AttrDef(new ExprVar(CUR_POS, i.symbol), pos, true);
$$->attrs.emplace(i.symbol, ExprAttrs::AttrDef(new ExprVar(CUR_POS, i.symbol), pos, true));
}
}
| binds INHERIT '(' expr ')' attrs ';'
@ -492,7 +495,7 @@ binds
for (auto & i : *$6) {
if ($$->attrs.find(i.symbol) != $$->attrs.end())
dupAttr(i.symbol, makeCurPos(@6, data), $$->attrs[i.symbol].pos);
$$->attrs[i.symbol] = ExprAttrs::AttrDef(new ExprSelect(CUR_POS, $4, i.symbol), makeCurPos(@6, data));
$$->attrs.emplace(i.symbol, ExprAttrs::AttrDef(new ExprSelect(CUR_POS, $4, i.symbol), makeCurPos(@6, data)));
}
}
| { $$ = new ExprAttrs(makeCurPos(@0, data)); }

View file

@ -70,7 +70,7 @@ void EvalState::realiseContext(const PathSet & context)
if (outputPaths.count(outputName) == 0)
throw Error("derivation '%s' does not have an output named '%s'",
store->printStorePath(drvPath), outputName);
allowedPaths->insert(store->printStorePath(outputPaths.at(outputName)));
allowPath(outputPaths.at(outputName));
}
}
}
@ -184,14 +184,17 @@ static void import(EvalState & state, const Pos & pos, Value & vPath, Value * vS
Env * env = &state.allocEnv(vScope->attrs->size());
env->up = &state.baseEnv;
StaticEnv staticEnv(false, &state.staticBaseEnv);
StaticEnv staticEnv(false, &state.staticBaseEnv, vScope->attrs->size());
unsigned int displ = 0;
for (auto & attr : *vScope->attrs) {
staticEnv.vars[attr.name] = displ;
staticEnv.vars.emplace_back(attr.name, displ);
env->values[displ++] = attr.value;
}
// No need to call staticEnv.sort(), because
// args[0]->attrs is already sorted.
printTalkative("evaluating file '%1%'", realPath);
Expr * e = state.parseExprFromFile(resolveExprPath(realPath), staticEnv);
@ -514,7 +517,11 @@ static RegisterPrimOp primop_isPath({
struct CompareValues
{
bool operator () (const Value * v1, const Value * v2) const
EvalState & state;
CompareValues(EvalState & state) : state(state) { };
bool operator () (Value * v1, Value * v2) const
{
if (v1->type() == nFloat && v2->type() == nInt)
return v1->fpoint < v2->integer;
@ -531,6 +538,17 @@ struct CompareValues
return strcmp(v1->string.s, v2->string.s) < 0;
case nPath:
return strcmp(v1->path, v2->path) < 0;
case nList:
// Lexicographic comparison
for (size_t i = 0;; i++) {
if (i == v2->listSize()) {
return false;
} else if (i == v1->listSize()) {
return true;
} else if (!state.eqValues(*v1->listElems()[i], *v2->listElems()[i])) {
return (*this)(v1->listElems()[i], v2->listElems()[i]);
}
}
default:
throw EvalError("cannot compare %1% with %2%", showType(*v1), showType(*v2));
}
@ -618,7 +636,8 @@ static void prim_genericClosure(EvalState & state, const Pos & pos, Value * * ar
ValueList res;
// `doneKeys' doesn't need to be a GC root, because its values are
// reachable from res.
set<Value *, CompareValues> doneKeys;
auto cmp = CompareValues(state);
set<Value *, decltype(cmp)> doneKeys(cmp);
while (!workSet.empty()) {
Value * e = *(workSet.begin());
workSet.pop_front();
@ -1009,7 +1028,7 @@ static void prim_derivationStrict(EvalState & state, const Pos & pos, Value * *
if (i->name == state.sStructuredAttrs) continue;
auto placeholder(jsonObject->placeholder(key));
printValueAsJSON(state, true, *i->value, placeholder, context);
printValueAsJSON(state, true, *i->value, pos, placeholder, context);
if (i->name == state.sBuilder)
drv.builder = state.forceString(*i->value, context, posDrvName);
@ -1580,7 +1599,7 @@ static void prim_toXML(EvalState & state, const Pos & pos, Value * * args, Value
{
std::ostringstream out;
PathSet context;
printValueAsXML(state, true, false, *args[0], out, context);
printValueAsXML(state, true, false, *args[0], out, context, pos);
mkString(v, out.str(), context);
}
@ -1688,7 +1707,7 @@ static void prim_toJSON(EvalState & state, const Pos & pos, Value * * args, Valu
{
std::ostringstream out;
PathSet context;
printValueAsJSON(state, true, *args[0], out, context);
printValueAsJSON(state, true, *args[0], pos, out, context);
mkString(v, out.str(), context);
}
@ -1860,12 +1879,12 @@ static void addPath(
// be rewritten to the actual output).
state.realiseContext(context);
StorePathSet refs;
if (state.store->isInStore(path)) {
auto [storePath, subPath] = state.store->toStorePath(path);
auto info = state.store->queryPathInfo(storePath);
if (!info->references.empty())
throw EvalError("store path '%s' is not allowed to have references",
state.store->printStorePath(storePath));
// FIXME: we should scanForReferences on the path before adding it
refs = state.store->queryPathInfo(storePath)->references;
path = state.store->toRealPath(storePath) + subPath;
}
@ -1881,9 +1900,6 @@ static void addPath(
Value arg1;
mkString(arg1, path);
Value fun2;
state.callFunction(*filterFun, arg1, fun2, noPos);
Value arg2;
mkString(arg2,
S_ISREG(st.st_mode) ? "regular" :
@ -1891,8 +1907,9 @@ static void addPath(
S_ISLNK(st.st_mode) ? "symlink" :
"unknown" /* not supported, will fail! */);
Value * args []{&arg1, &arg2};
Value res;
state.callFunction(fun2, arg2, res, noPos);
state.callFunction(*filterFun, 2, args, res, pos);
return state.forceBool(res, pos);
}) : defaultPathFilter;
@ -1905,7 +1922,7 @@ static void addPath(
if (!expectedHash || !state.store->isValidPath(*expectedStorePath)) {
dstPath = state.store->printStorePath(settings.readOnlyMode
? state.store->computeStorePathForPath(name, path, method, htSHA256, filter).first
: state.store->addToStore(name, path, method, htSHA256, filter, state.repair));
: state.store->addToStore(name, path, method, htSHA256, filter, state.repair, refs));
if (expectedHash && expectedStorePath != state.store->parseStorePath(dstPath))
throw Error("store path mismatch in (possibly filtered) path added from '%s'", path);
} else
@ -2693,10 +2710,9 @@ static void prim_foldlStrict(EvalState & state, const Pos & pos, Value * * args,
Value * vCur = args[1];
for (unsigned int n = 0; n < args[2]->listSize(); ++n) {
Value vTmp;
state.callFunction(*args[0], *vCur, vTmp, pos);
Value * vs []{vCur, args[2]->listElems()[n]};
vCur = n == args[2]->listSize() - 1 ? &v : state.allocValue();
state.callFunction(vTmp, *args[2]->listElems()[n], *vCur, pos);
state.callFunction(*args[0], 2, vs, *vCur, pos);
}
state.forceValue(v, pos);
} else {
@ -2710,9 +2726,9 @@ static RegisterPrimOp primop_foldlStrict({
.args = {"op", "nul", "list"},
.doc = R"(
Reduce a list by applying a binary operator, from left to right,
e.g. `foldl op nul [x0 x1 x2 ...] = op (op (op nul x0) x1) x2)
e.g. `foldl' op nul [x0 x1 x2 ...] = op (op (op nul x0) x1) x2)
...`. The operator is applied strictly, i.e., its arguments are
evaluated first. For example, `foldl (x: y: x + y) 0 [1 2 3]`
evaluated first. For example, `foldl' (x: y: x + y) 0 [1 2 3]`
evaluates to 6.
)",
.fun = prim_foldlStrict,
@ -2817,17 +2833,16 @@ static void prim_sort(EvalState & state, const Pos & pos, Value * * args, Value
v.listElems()[n] = args[1]->listElems()[n];
}
auto comparator = [&](Value * a, Value * b) {
/* Optimization: if the comparator is lessThan, bypass
callFunction. */
if (args[0]->isPrimOp() && args[0]->primOp->fun == prim_lessThan)
return CompareValues()(a, b);
return CompareValues(state)(a, b);
Value vTmp1, vTmp2;
state.callFunction(*args[0], *a, vTmp1, pos);
state.callFunction(vTmp1, *b, vTmp2, pos);
return state.forceBool(vTmp2, pos);
Value * vs[] = {a, b};
Value vBool;
state.callFunction(*args[0], 2, vs, vBool, pos);
return state.forceBool(vBool, pos);
};
/* FIXME: std::sort can segfault if the comparator is not a strict
@ -3104,7 +3119,7 @@ static void prim_lessThan(EvalState & state, const Pos & pos, Value * * args, Va
{
state.forceValue(*args[0], pos);
state.forceValue(*args[1], pos);
CompareValues comp;
CompareValues comp{state};
mkBool(v, comp(args[0], args[1]));
}
@ -3694,7 +3709,7 @@ void EvalState::createBaseEnv()
language feature gets added. It's not necessary to increase it
when primops get added, because you can just use `builtins ?
primOp' to check. */
mkInt(v, 5);
mkInt(v, 6);
addConstant("__langVersion", v);
// Miscellaneous
@ -3728,14 +3743,20 @@ void EvalState::createBaseEnv()
/* Add a wrapper around the derivation primop that computes the
`drvPath' and `outPath' attributes lazily. */
sDerivationNix = symbols.create("//builtin/derivation.nix");
eval(parse(
#include "primops/derivation.nix.gen.hh"
, foFile, sDerivationNix, "/", staticBaseEnv), v);
addConstant("derivation", v);
auto vDerivation = allocValue();
addConstant("derivation", vDerivation);
/* Now that we've added all primops, sort the `builtins' set,
because attribute lookups expect it to be sorted. */
baseEnv.values[0]->attrs->sort();
staticBaseEnv.sort();
/* Note: we have to initialize the 'derivation' constant *after*
building baseEnv/staticBaseEnv because it uses 'builtins'. */
eval(parse(
#include "primops/derivation.nix.gen.hh"
, foFile, sDerivationNix, "/", staticBaseEnv), *vDerivation);
}

View file

@ -74,7 +74,10 @@ std::string fixURI(std::string uri, EvalState & state, const std::string & defau
std::string fixURIForGit(std::string uri, EvalState & state)
{
static std::regex scp_uri("([^/].*)@(.*):(.*)");
/* Detects scp-style uris (e.g. git@github.com:NixOS/nix) and fixes
* them by removing the `:` and assuming a scheme of `ssh://`
* */
static std::regex scp_uri("([^/]*)@(.*):(.*)");
if (uri[0] != '/' && std::regex_match(uri, scp_uri))
return fixURI(std::regex_replace(uri, scp_uri, "$1@$2/$3"), state, "ssh");
else

View file

@ -10,11 +10,11 @@
namespace nix {
void printValueAsJSON(EvalState & state, bool strict,
Value & v, JSONPlaceholder & out, PathSet & context)
Value & v, const Pos & pos, JSONPlaceholder & out, PathSet & context)
{
checkInterrupt();
if (strict) state.forceValue(v);
if (strict) state.forceValue(v, pos);
switch (v.type()) {
@ -40,7 +40,7 @@ void printValueAsJSON(EvalState & state, bool strict,
break;
case nAttrs: {
auto maybeString = state.tryAttrsToString(noPos, v, context, false, false);
auto maybeString = state.tryAttrsToString(pos, v, context, false, false);
if (maybeString) {
out.write(*maybeString);
break;
@ -54,10 +54,10 @@ void printValueAsJSON(EvalState & state, bool strict,
for (auto & j : names) {
Attr & a(*v.attrs->find(state.symbols.create(j)));
auto placeholder(obj.placeholder(j));
printValueAsJSON(state, strict, *a.value, placeholder, context);
printValueAsJSON(state, strict, *a.value, *a.pos, placeholder, context);
}
} else
printValueAsJSON(state, strict, *i->value, out, context);
printValueAsJSON(state, strict, *i->value, *i->pos, out, context);
break;
}
@ -65,7 +65,7 @@ void printValueAsJSON(EvalState & state, bool strict,
auto list(out.list());
for (unsigned int n = 0; n < v.listSize(); ++n) {
auto placeholder(list.placeholder());
printValueAsJSON(state, strict, *v.listElems()[n], placeholder, context);
printValueAsJSON(state, strict, *v.listElems()[n], pos, placeholder, context);
}
break;
}
@ -79,18 +79,20 @@ void printValueAsJSON(EvalState & state, bool strict,
break;
case nThunk:
throw TypeError("cannot convert %1% to JSON", showType(v));
case nFunction:
throw TypeError("cannot convert %1% to JSON", showType(v));
auto e = TypeError({
.msg = hintfmt("cannot convert %1% to JSON", showType(v)),
.errPos = v.determinePos(pos)
});
throw e.addTrace(pos, hintfmt("message for the trace"));
}
}
void printValueAsJSON(EvalState & state, bool strict,
Value & v, std::ostream & str, PathSet & context)
Value & v, const Pos & pos, std::ostream & str, PathSet & context)
{
JSONPlaceholder out(str);
printValueAsJSON(state, strict, v, out, context);
printValueAsJSON(state, strict, v, pos, out, context);
}
void ExternalValueBase::printValueAsJSON(EvalState & state, bool strict,

View file

@ -11,9 +11,9 @@ namespace nix {
class JSONPlaceholder;
void printValueAsJSON(EvalState & state, bool strict,
Value & v, JSONPlaceholder & out, PathSet & context);
Value & v, const Pos & pos, JSONPlaceholder & out, PathSet & context);
void printValueAsJSON(EvalState & state, bool strict,
Value & v, std::ostream & str, PathSet & context);
Value & v, const Pos & pos, std::ostream & str, PathSet & context);
}

View file

@ -18,7 +18,8 @@ static XMLAttrs singletonAttrs(const string & name, const string & value)
static void printValueAsXML(EvalState & state, bool strict, bool location,
Value & v, XMLWriter & doc, PathSet & context, PathSet & drvsSeen);
Value & v, XMLWriter & doc, PathSet & context, PathSet & drvsSeen,
const Pos & pos);
static void posToXML(XMLAttrs & xmlAttrs, const Pos & pos)
@ -46,17 +47,18 @@ static void showAttrs(EvalState & state, bool strict, bool location,
XMLOpenElement _(doc, "attr", xmlAttrs);
printValueAsXML(state, strict, location,
*a.value, doc, context, drvsSeen);
*a.value, doc, context, drvsSeen, *a.pos);
}
}
static void printValueAsXML(EvalState & state, bool strict, bool location,
Value & v, XMLWriter & doc, PathSet & context, PathSet & drvsSeen)
Value & v, XMLWriter & doc, PathSet & context, PathSet & drvsSeen,
const Pos & pos)
{
checkInterrupt();
if (strict) state.forceValue(v);
if (strict) state.forceValue(v, pos);
switch (v.type()) {
@ -91,14 +93,14 @@ static void printValueAsXML(EvalState & state, bool strict, bool location,
Path drvPath;
a = v.attrs->find(state.sDrvPath);
if (a != v.attrs->end()) {
if (strict) state.forceValue(*a->value);
if (strict) state.forceValue(*a->value, *a->pos);
if (a->value->type() == nString)
xmlAttrs["drvPath"] = drvPath = a->value->string.s;
}
a = v.attrs->find(state.sOutPath);
if (a != v.attrs->end()) {
if (strict) state.forceValue(*a->value);
if (strict) state.forceValue(*a->value, *a->pos);
if (a->value->type() == nString)
xmlAttrs["outPath"] = a->value->string.s;
}
@ -121,7 +123,7 @@ static void printValueAsXML(EvalState & state, bool strict, bool location,
case nList: {
XMLOpenElement _(doc, "list");
for (unsigned int n = 0; n < v.listSize(); ++n)
printValueAsXML(state, strict, location, *v.listElems()[n], doc, context, drvsSeen);
printValueAsXML(state, strict, location, *v.listElems()[n], doc, context, drvsSeen, pos);
break;
}
@ -149,7 +151,7 @@ static void printValueAsXML(EvalState & state, bool strict, bool location,
}
case nExternal:
v.external->printValueAsXML(state, strict, location, doc, context, drvsSeen);
v.external->printValueAsXML(state, strict, location, doc, context, drvsSeen, pos);
break;
case nFloat:
@ -163,19 +165,20 @@ static void printValueAsXML(EvalState & state, bool strict, bool location,
void ExternalValueBase::printValueAsXML(EvalState & state, bool strict,
bool location, XMLWriter & doc, PathSet & context, PathSet & drvsSeen) const
bool location, XMLWriter & doc, PathSet & context, PathSet & drvsSeen,
const Pos & pos) const
{
doc.writeEmptyElement("unevaluated");
}
void printValueAsXML(EvalState & state, bool strict, bool location,
Value & v, std::ostream & out, PathSet & context)
Value & v, std::ostream & out, PathSet & context, const Pos & pos)
{
XMLWriter doc(true, out);
XMLOpenElement root(doc, "expr");
PathSet drvsSeen;
printValueAsXML(state, strict, location, v, doc, context, drvsSeen);
printValueAsXML(state, strict, location, v, doc, context, drvsSeen, pos);
}

View file

@ -9,6 +9,6 @@
namespace nix {
void printValueAsXML(EvalState & state, bool strict, bool location,
Value & v, std::ostream & out, PathSet & context);
Value & v, std::ostream & out, PathSet & context, const Pos & pos);
}

View file

@ -94,7 +94,8 @@ class ExternalValueBase
/* Print the value as XML. Defaults to unevaluated */
virtual void printValueAsXML(EvalState & state, bool strict, bool location,
XMLWriter & doc, PathSet & context, PathSet & drvsSeen) const;
XMLWriter & doc, PathSet & context, PathSet & drvsSeen,
const Pos & pos) const;
virtual ~ExternalValueBase()
{

View file

@ -51,7 +51,7 @@ struct GitInputScheme : InputScheme
for (auto &[name, value] : url.query) {
if (name == "rev" || name == "ref")
attrs.emplace(name, value);
else if (name == "shallow")
else if (name == "shallow" || name == "submodules")
attrs.emplace(name, Explicit<bool> { value == "1" });
else
url2.query.emplace(name, value);
@ -324,17 +324,13 @@ struct GitInputScheme : InputScheme
Path cacheDir = getCacheDir() + "/nix/gitv3/" + hashString(htSHA256, actualUrl).to_string(Base32, false);
repoDir = cacheDir;
Path cacheDirLock = cacheDir + ".lock";
createDirs(dirOf(cacheDir));
AutoCloseFD lock = openLockFile(cacheDirLock, true);
lockFile(lock.get(), ltWrite, true);
PathLocks cacheDirLock({cacheDir + ".lock"});
if (!pathExists(cacheDir)) {
runProgram("git", true, { "-c", "init.defaultBranch=" + gitInitialBranch, "init", "--bare", repoDir });
}
deleteLockFile(cacheDirLock, lock.get());
Path localRefFile =
input.getRef()->compare(0, 5, "refs/") == 0
? cacheDir + "/" + *input.getRef()
@ -399,6 +395,8 @@ struct GitInputScheme : InputScheme
if (!input.getRev())
input.attrs.insert_or_assign("rev", Hash::parseAny(chomp(readFile(localRefFile)), htSHA1).gitRev());
// cache dir lock is removed at scope end; we will only use read-only operations on specific revisions in the remainder
}
bool isShallow = chomp(runProgram("git", true, { "-C", repoDir, "rev-parse", "--is-shallow-repository" })) == "true";

View file

@ -300,7 +300,7 @@ struct GitLabInputScheme : GitArchiveInputScheme
if ("PAT" == token.substr(0, fldsplit))
return std::make_pair("Private-token", token.substr(fldsplit+1));
warn("Unrecognized GitLab token type %s", token.substr(0, fldsplit));
return std::nullopt;
return std::make_pair(token.substr(0,fldsplit), token.substr(fldsplit+1));
}
Hash getRevFromRef(nix::ref<Store> store, const Input & input) const override

View file

@ -308,16 +308,17 @@ void BinaryCacheStore::addToStore(const ValidPathInfo & info, Source & narSource
}
StorePath BinaryCacheStore::addToStoreFromDump(Source & dump, const string & name,
FileIngestionMethod method, HashType hashAlgo, RepairFlag repair)
FileIngestionMethod method, HashType hashAlgo, RepairFlag repair, const StorePathSet & references)
{
if (method != FileIngestionMethod::Recursive || hashAlgo != htSHA256)
unsupported("addToStoreFromDump");
return addToStoreCommon(dump, repair, CheckSigs, [&](HashResult nar) {
ValidPathInfo info {
makeFixedOutputPath(method, nar.first, name),
makeFixedOutputPath(method, nar.first, name, references),
nar.first,
};
info.narSize = nar.second;
info.references = references;
return info;
})->path;
}
@ -385,7 +386,7 @@ void BinaryCacheStore::queryPathInfoUncached(const StorePath & storePath,
}
StorePath BinaryCacheStore::addToStore(const string & name, const Path & srcPath,
FileIngestionMethod method, HashType hashAlgo, PathFilter & filter, RepairFlag repair)
FileIngestionMethod method, HashType hashAlgo, PathFilter & filter, RepairFlag repair, const StorePathSet & references)
{
/* FIXME: Make BinaryCacheStore::addToStoreCommon support
non-recursive+sha256 so we can just use the default
@ -404,10 +405,11 @@ StorePath BinaryCacheStore::addToStore(const string & name, const Path & srcPath
});
return addToStoreCommon(*source, repair, CheckSigs, [&](HashResult nar) {
ValidPathInfo info {
makeFixedOutputPath(method, h, name),
makeFixedOutputPath(method, h, name, references),
nar.first,
};
info.narSize = nar.second;
info.references = references;
info.ca = FixedOutputHash {
.method = method,
.hash = h,

View file

@ -97,11 +97,11 @@ public:
RepairFlag repair, CheckSigsFlag checkSigs) override;
StorePath addToStoreFromDump(Source & dump, const string & name,
FileIngestionMethod method, HashType hashAlgo, RepairFlag repair) override;
FileIngestionMethod method, HashType hashAlgo, RepairFlag repair, const StorePathSet & references ) override;
StorePath addToStore(const string & name, const Path & srcPath,
FileIngestionMethod method, HashType hashAlgo,
PathFilter & filter, RepairFlag repair) override;
PathFilter & filter, RepairFlag repair, const StorePathSet & references) override;
StorePath addTextToStore(const string & name, const string & s,
const StorePathSet & references, RepairFlag repair) override;

View file

@ -260,6 +260,7 @@ void LocalDerivationGoal::cleanupHookFinally()
void LocalDerivationGoal::cleanupPreChildKill()
{
sandboxMountNamespace = -1;
sandboxUserNamespace = -1;
}
@ -342,7 +343,7 @@ int childEntry(void * arg)
return 1;
}
#if __linux__
static void linkOrCopy(const Path & from, const Path & to)
{
if (link(from.c_str(), to.c_str()) == -1) {
@ -358,6 +359,7 @@ static void linkOrCopy(const Path & from, const Path & to)
copyPath(from, to);
}
}
#endif
void LocalDerivationGoal::startBuilder()
@ -906,11 +908,14 @@ void LocalDerivationGoal::startBuilder()
"nobody:x:65534:65534:Nobody:/:/noshell\n",
sandboxUid(), sandboxGid(), settings.sandboxBuildDir));
/* Save the mount namespace of the child. We have to do this
/* Save the mount- and user namespace of the child. We have to do this
*before* the child does a chroot. */
sandboxMountNamespace = open(fmt("/proc/%d/ns/mnt", (pid_t) pid).c_str(), O_RDONLY);
if (sandboxMountNamespace.get() == -1)
throw SysError("getting sandbox mount namespace");
sandboxUserNamespace = open(fmt("/proc/%d/ns/user", (pid_t) pid).c_str(), O_RDONLY);
if (sandboxUserNamespace.get() == -1)
throw SysError("getting sandbox user namespace");
/* Signal the builder that we've updated its user namespace. */
writeFull(userNamespaceSync.writeSide.get(), "1");
@ -918,7 +923,9 @@ void LocalDerivationGoal::startBuilder()
} else
#endif
{
#if __linux__
fallback:
#endif
pid = startProcess([&]() {
runChild();
});
@ -1180,7 +1187,8 @@ struct RestrictedStore : public virtual RestrictedStoreConfig, public virtual Lo
StorePath addToStore(const string & name, const Path & srcPath,
FileIngestionMethod method = FileIngestionMethod::Recursive, HashType hashAlgo = htSHA256,
PathFilter & filter = defaultPathFilter, RepairFlag repair = NoRepair) override
PathFilter & filter = defaultPathFilter, RepairFlag repair = NoRepair,
const StorePathSet & references = StorePathSet()) override
{ throw Error("addToStore"); }
void addToStore(const ValidPathInfo & info, Source & narSource,
@ -1199,9 +1207,10 @@ struct RestrictedStore : public virtual RestrictedStoreConfig, public virtual Lo
}
StorePath addToStoreFromDump(Source & dump, const string & name,
FileIngestionMethod method = FileIngestionMethod::Recursive, HashType hashAlgo = htSHA256, RepairFlag repair = NoRepair) override
FileIngestionMethod method = FileIngestionMethod::Recursive, HashType hashAlgo = htSHA256, RepairFlag repair = NoRepair,
const StorePathSet & references = StorePathSet()) override
{
auto path = next->addToStoreFromDump(dump, name, method, hashAlgo, repair);
auto path = next->addToStoreFromDump(dump, name, method, hashAlgo, repair, references);
goal.addDependency(path);
return path;
}
@ -1419,7 +1428,7 @@ void LocalDerivationGoal::addDependency(const StorePath & path)
Path source = worker.store.Store::toRealPath(path);
Path target = chrootRootDir + worker.store.printStorePath(path);
debug("bind-mounting %s -> %s", target, source);
debug("bind-mounting %s -> %s", source, target);
if (pathExists(target))
throw Error("store path '%s' already exists in the sandbox", worker.store.printStorePath(path));
@ -1434,6 +1443,9 @@ void LocalDerivationGoal::addDependency(const StorePath & path)
child process.*/
Pid child(startProcess([&]() {
if (usingUserNamespace && (setns(sandboxUserNamespace.get(), 0) == -1))
throw SysError("entering sandbox user namespace");
if (setns(sandboxMountNamespace.get(), 0) == -1)
throw SysError("entering sandbox mount namespace");
@ -1993,7 +2005,7 @@ void LocalDerivationGoal::runChild()
else if (drv->builder == "builtin:unpack-channel")
builtinUnpackChannel(drv2);
else
throw Error("unsupported builtin function '%1%'", string(drv->builder, 8));
throw Error("unsupported builtin builder '%1%'", string(drv->builder, 8));
_exit(0);
} catch (std::exception & e) {
writeFull(STDERR_FILENO, e.what() + std::string("\n"));

View file

@ -27,9 +27,10 @@ struct LocalDerivationGoal : public DerivationGoal
/* Pipe for synchronising updates to the builder namespaces. */
Pipe userNamespaceSync;
/* The mount namespace of the builder, used to add additional
/* The mount namespace and user namespace of the builder, used to add additional
paths to the sandbox as a result of recursive Nix calls. */
AutoCloseFD sandboxMountNamespace;
AutoCloseFD sandboxUserNamespace;
/* On Linux, whether we're doing the build in its own user
namespace. */

View file

@ -120,8 +120,10 @@ ContentAddress parseContentAddress(std::string_view rawCa) {
ContentAddressMethod parseContentAddressMethod(std::string_view caMethod)
{
std::string_view asPrefix {std::string{caMethod} + ":"};
return parseContentAddressMethodPrefix(asPrefix);
std::string asPrefix = std::string{caMethod} + ":";
// parseContentAddressMethodPrefix takes its argument by reference
std::string_view asPrefixView = asPrefix;
return parseContentAddressMethodPrefix(asPrefixView);
}
std::optional<ContentAddress> parseContentAddressOpt(std::string_view rawCaOpt)

View file

@ -403,9 +403,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
return store->queryPathInfo(path);
},
[&](FixedOutputHashMethod & fohm) {
if (!refs.empty())
throw UnimplementedError("cannot yet have refs with flat or nar-hashed data");
auto path = store->addToStoreFromDump(source, name, fohm.fileIngestionMethod, fohm.hashType, repair);
auto path = store->addToStoreFromDump(source, name, fohm.fileIngestionMethod, fohm.hashType, repair, refs);
return store->queryPathInfo(path);
},
}, contentAddressMethod);

View file

@ -544,6 +544,14 @@ struct curlFileTransfer : public FileTransfer
stopWorkerThread();
});
#ifdef __linux__
/* Cause this thread to not share any FS attributes with the main thread,
because this causes setns() in restoreMountNamespace() to fail.
Ideally, this would happen in the std::thread() constructor. */
if (unshare(CLONE_FS) != 0)
throw SysError("unsharing filesystem state in download thread");
#endif
std::map<CURL *, std::shared_ptr<TransferItem>> items;
bool quit = false;

View file

@ -324,6 +324,7 @@ static string quoteRegexChars(const string & raw)
return std::regex_replace(raw, specialRegex, R"(\$&)");
}
#if __linux__
static void readFileRoots(const char * path, UncheckedRoots & roots)
{
try {
@ -333,6 +334,7 @@ static void readFileRoots(const char * path, UncheckedRoots & roots)
throw;
}
}
#endif
void LocalStore::findRuntimeRoots(Roots & roots, bool censor)
{
@ -414,7 +416,7 @@ void LocalStore::findRuntimeRoots(Roots & roots, bool censor)
}
#endif
#if defined(__linux__)
#if __linux__
readFileRoots("/proc/sys/kernel/modprobe", unchecked);
readFileRoots("/proc/sys/kernel/fbsplash", unchecked);
readFileRoots("/proc/sys/kernel/poweroff_cmd", unchecked);

View file

@ -951,6 +951,9 @@ public:
Setting<bool> useRegistries{this, true, "use-registries",
"Whether to use flake registries to resolve flake references."};
Setting<bool> acceptFlakeConfig{this, false, "accept-flake-config",
"Whether to accept nix configuration from a flake without prompting."};
};

View file

@ -227,7 +227,7 @@ struct LegacySSHStore : public virtual LegacySSHStoreConfig, public virtual Stor
StorePath addToStore(const string & name, const Path & srcPath,
FileIngestionMethod method, HashType hashAlgo,
PathFilter & filter, RepairFlag repair) override
PathFilter & filter, RepairFlag repair, const StorePathSet & references) override
{ unsupported("addToStore"); }
StorePath addTextToStore(const string & name, const string & s,

View file

@ -504,9 +504,6 @@ void LocalStore::makeStoreWritable()
throw SysError("getting info about the Nix store mount point");
if (stat.f_flag & ST_RDONLY) {
if (unshare(CLONE_NEWNS) == -1)
throw SysError("setting up a private mount namespace");
if (mount(0, realStoreDir.get().c_str(), "none", MS_REMOUNT | MS_BIND, 0) == -1)
throw SysError("remounting %1% writable", realStoreDir);
}
@ -1311,7 +1308,7 @@ void LocalStore::addToStore(const ValidPathInfo & info, Source & source,
StorePath LocalStore::addToStoreFromDump(Source & source0, const string & name,
FileIngestionMethod method, HashType hashAlgo, RepairFlag repair)
FileIngestionMethod method, HashType hashAlgo, RepairFlag repair, const StorePathSet & references)
{
/* For computing the store path. */
auto hashSink = std::make_unique<HashSink>(hashAlgo);
@ -1367,7 +1364,7 @@ StorePath LocalStore::addToStoreFromDump(Source & source0, const string & name,
auto [hash, size] = hashSink->finish();
auto dstPath = makeFixedOutputPath(method, hash, name);
auto dstPath = makeFixedOutputPath(method, hash, name, references);
addTempRoot(dstPath);
@ -1414,6 +1411,7 @@ StorePath LocalStore::addToStoreFromDump(Source & source0, const string & name,
ValidPathInfo info { dstPath, narHash.first };
info.narSize = narHash.second;
info.references = references;
info.ca = FixedOutputHash { .method = method, .hash = hash };
registerValidPath(info);
}

View file

@ -145,7 +145,7 @@ public:
RepairFlag repair, CheckSigsFlag checkSigs) override;
StorePath addToStoreFromDump(Source & dump, const string & name,
FileIngestionMethod method, HashType hashAlgo, RepairFlag repair) override;
FileIngestionMethod method, HashType hashAlgo, RepairFlag repair, const StorePathSet & references) override;
StorePath addTextToStore(const string & name, const string & s,
const StorePathSet & references, RepairFlag repair) override;

View file

@ -54,12 +54,12 @@ void RefScanSink::operator () (std::string_view data)
fragment, so search in the concatenation of the tail of the
previous fragment and the start of the current fragment. */
auto s = tail;
s.append(data.data(), refLength);
auto tailLen = std::min(data.size(), refLength);
s.append(data.data(), tailLen);
search(s, hashes, seen);
search(data, hashes, seen);
auto tailLen = std::min(data.size(), refLength);
auto rest = refLength - tailLen;
if (rest < tail.size())
tail = tail.substr(tail.size() - rest);

View file

@ -290,6 +290,10 @@ ConnectionHandle RemoteStore::getConnection()
return ConnectionHandle(connections->get());
}
void RemoteStore::setOptions()
{
setOptions(*(getConnection().handle));
}
bool RemoteStore::isValidPathUncached(const StorePath & path)
{
@ -578,9 +582,8 @@ ref<const ValidPathInfo> RemoteStore::addCAToStore(
StorePath RemoteStore::addToStoreFromDump(Source & dump, const string & name,
FileIngestionMethod method, HashType hashType, RepairFlag repair)
FileIngestionMethod method, HashType hashType, RepairFlag repair, const StorePathSet & references)
{
StorePathSet references;
return addCAToStore(dump, name, FixedOutputHashMethod{ .fileIngestionMethod = method, .hashType = hashType }, references, repair)->path;
}

View file

@ -73,7 +73,7 @@ public:
/* Add a content-addressable store path. Does not support references. `dump` will be drained. */
StorePath addToStoreFromDump(Source & dump, const string & name,
FileIngestionMethod method = FileIngestionMethod::Recursive, HashType hashAlgo = htSHA256, RepairFlag repair = NoRepair) override;
FileIngestionMethod method = FileIngestionMethod::Recursive, HashType hashAlgo = htSHA256, RepairFlag repair = NoRepair, const StorePathSet & references = StorePathSet()) override;
void addToStore(const ValidPathInfo & info, Source & nar,
RepairFlag repair, CheckSigsFlag checkSigs) override;
@ -148,6 +148,8 @@ protected:
virtual void setOptions(Connection & conn);
void setOptions() override;
ConnectionHandle getConnection();
friend struct ConnectionHandle;

View file

@ -100,4 +100,5 @@
; Allow Rosetta 2 to run x86_64 binaries on aarch64-darwin.
(allow file-read*
(subpath "/Library/Apple/usr/libexec/oah"))
(subpath "/Library/Apple/usr/libexec/oah")
(subpath "/System/Library/Apple/usr/libexec/oah"))

View file

@ -1,4 +1,5 @@
#include "sqlite.hh"
#include "globals.hh"
#include "util.hh"
#include <sqlite3.h>
@ -27,8 +28,12 @@ namespace nix {
SQLite::SQLite(const Path & path, bool create)
{
// useSQLiteWAL also indicates what virtual file system we need. Using
// `unix-dotfile` is needed on NFS file systems and on Windows' Subsystem
// for Linux (WSL) where useSQLiteWAL should be false by default.
const char *vfs = settings.useSQLiteWAL ? 0 : "unix-dotfile";
if (sqlite3_open_v2(path.c_str(), &db,
SQLITE_OPEN_READWRITE | (create ? SQLITE_OPEN_CREATE : 0), 0) != SQLITE_OK)
SQLITE_OPEN_READWRITE | (create ? SQLITE_OPEN_CREATE : 0), vfs) != SQLITE_OK)
throw Error("cannot open SQLite database '%s'", path);
if (sqlite3_busy_timeout(db, 60 * 60 * 1000) != SQLITE_OK)

View file

@ -237,7 +237,7 @@ StorePath Store::computeStorePathForText(const string & name, const string & s,
StorePath Store::addToStore(const string & name, const Path & _srcPath,
FileIngestionMethod method, HashType hashAlgo, PathFilter & filter, RepairFlag repair)
FileIngestionMethod method, HashType hashAlgo, PathFilter & filter, RepairFlag repair, const StorePathSet & references)
{
Path srcPath(absPath(_srcPath));
auto source = sinkToSource([&](Sink & sink) {
@ -246,7 +246,7 @@ StorePath Store::addToStore(const string & name, const Path & _srcPath,
else
readFile(srcPath, sink);
});
return addToStoreFromDump(*source, name, method, hashAlgo, repair);
return addToStoreFromDump(*source, name, method, hashAlgo, repair, references);
}

View file

@ -460,7 +460,7 @@ public:
libutil/archive.hh). */
virtual StorePath addToStore(const string & name, const Path & srcPath,
FileIngestionMethod method = FileIngestionMethod::Recursive, HashType hashAlgo = htSHA256,
PathFilter & filter = defaultPathFilter, RepairFlag repair = NoRepair);
PathFilter & filter = defaultPathFilter, RepairFlag repair = NoRepair, const StorePathSet & references = StorePathSet());
/* Copy the contents of a path to the store and register the
validity the resulting path, using a constant amount of
@ -476,7 +476,8 @@ public:
`dump` may be drained */
// FIXME: remove?
virtual StorePath addToStoreFromDump(Source & dump, const string & name,
FileIngestionMethod method = FileIngestionMethod::Recursive, HashType hashAlgo = htSHA256, RepairFlag repair = NoRepair)
FileIngestionMethod method = FileIngestionMethod::Recursive, HashType hashAlgo = htSHA256, RepairFlag repair = NoRepair,
const StorePathSet & references = StorePathSet())
{ unsupported("addToStoreFromDump"); }
/* Like addToStore, but the contents written to the output path is
@ -732,6 +733,11 @@ public:
virtual void createUser(const std::string & userName, uid_t userId)
{ }
/*
* Synchronises the options of the client with those of the daemon
* (a no-op when theres no daemon)
*/
virtual void setOptions() { }
protected:
Stats stats;

View file

@ -562,7 +562,7 @@ Path getConfigDir()
std::vector<Path> getConfigDirs()
{
Path configHome = getConfigDir();
string configDirs = getEnv("XDG_CONFIG_DIRS").value_or("");
string configDirs = getEnv("XDG_CONFIG_DIRS").value_or("/etc/xdg");
std::vector<Path> result = tokenizeString<std::vector<string>>(configDirs, ":");
result.insert(result.begin(), configHome);
return result;
@ -1632,9 +1632,39 @@ void setStackSize(size_t stackSize)
#endif
}
void restoreProcessContext()
static AutoCloseFD fdSavedMountNamespace;
void saveMountNamespace()
{
#if __linux__
static std::once_flag done;
std::call_once(done, []() {
AutoCloseFD fd = open("/proc/self/ns/mnt", O_RDONLY);
if (!fd)
throw SysError("saving parent mount namespace");
fdSavedMountNamespace = std::move(fd);
});
#endif
}
void restoreMountNamespace()
{
#if __linux__
try {
if (fdSavedMountNamespace && setns(fdSavedMountNamespace.get(), CLONE_NEWNS) == -1)
throw SysError("restoring parent mount namespace");
} catch (Error & e) {
debug(e.msg());
}
#endif
}
void restoreProcessContext(bool restoreMounts)
{
restoreSignals();
if (restoreMounts) {
restoreMountNamespace();
}
restoreAffinity();
@ -1774,7 +1804,7 @@ void commonChildInit(Pipe & logPipe)
logger = makeSimpleLogger();
const static string pathNullDevice = "/dev/null";
restoreProcessContext();
restoreProcessContext(false);
/* Put the child in a separate session (and thus a separate
process group) so that it has no controlling terminal (meaning

View file

@ -300,7 +300,15 @@ void setStackSize(size_t stackSize);
/* Restore the original inherited Unix process context (such as signal
masks, stack size, CPU affinity). */
void restoreProcessContext();
void restoreProcessContext(bool restoreMounts = true);
/* Save the current mount namespace. Ignored if called more than
once. */
void saveMountNamespace();
/* Restore the mount namespace saved by saveMountNamespace(). Ignored
if saveMountNamespace() was never called. */
void restoreMountNamespace();
class ExecError : public Error

View file

@ -105,7 +105,8 @@ static void main_nix_build(int argc, char * * argv)
// List of environment variables kept for --pure
std::set<string> keepVars{
"HOME", "USER", "LOGNAME", "DISPLAY", "PATH", "TERM", "IN_NIX_SHELL",
"HOME", "XDG_RUNTIME_DIR", "USER", "LOGNAME", "DISPLAY",
"WAYLAND_DISPLAY", "WAYLAND_SOCKET", "PATH", "TERM", "IN_NIX_SHELL",
"NIX_SHELL_PRESERVE_PROMPT", "TZ", "PAGER", "NIX_BUILD_SHELL", "SHLVL",
"http_proxy", "https_proxy", "ftp_proxy", "all_proxy", "no_proxy"
};

View file

@ -879,7 +879,7 @@ static void queryJSON(Globals & globals, vector<DrvInfo> & elems)
placeholder.write(nullptr);
} else {
PathSet context;
printValueAsJSON(*globals.state, true, *v, placeholder, context);
printValueAsJSON(*globals.state, true, *v, noPos, placeholder, context);
}
}
}

View file

@ -50,9 +50,9 @@ void processExpr(EvalState & state, const Strings & attrPaths,
else
state.autoCallFunction(autoArgs, v, vRes);
if (output == okXML)
printValueAsXML(state, strict, location, vRes, std::cout, context);
printValueAsXML(state, strict, location, vRes, std::cout, context, noPos);
else if (output == okJSON)
printValueAsJSON(state, strict, vRes, std::cout, context);
printValueAsJSON(state, strict, vRes, v.determinePos(noPos), std::cout, context);
else {
if (strict) state.forceValueDeep(vRes);
std::cout << vRes << std::endl;

View file

@ -112,7 +112,7 @@ struct CmdEval : MixJSON, InstallableCommand
else if (json) {
JSONPlaceholder jsonOut(std::cout);
printValueAsJSON(*state, true, *v, jsonOut, context);
printValueAsJSON(*state, true, *v, pos, jsonOut, context);
}
else {

View file

@ -31,38 +31,38 @@ at the first error.
The following flake output attributes must be derivations:
* `checks.`*system*`.`*name*
* `defaultPackage.`*system*`
* `devShell.`*system*`
* `devShells.`*system*`.`*name*`
* `nixosConfigurations.`*name*`.config.system.build.toplevel
* `defaultPackage.`*system*
* `devShell.`*system*
* `devShells.`*system*`.`*name*
* `nixosConfigurations.`*name*`.config.system.build.toplevel`
* `packages.`*system*`.`*name*
The following flake output attributes must be [app
definitions](./nix3-run.md):
* `apps.`*system*`.`*name*
* `defaultApp.`*system*`
* `defaultApp.`*system*
The following flake output attributes must be [template
definitions](./nix3-flake-init.md):
* `defaultTemplate`
* `templates`.`*name*
* `templates.`*name*
The following flake output attributes must be *Nixpkgs overlays*:
* `overlay`
* `overlays`.`*name*
* `overlays.`*name*
The following flake output attributes must be *NixOS modules*:
* `nixosModule`
* `nixosModules`.`*name*
* `nixosModules.`*name*
The following flake output attributes must be
[bundlers](./nix3-bundle.md):
* `bundlers`.`*name*
* `bundlers.`*name*
* `defaultBundler`
In addition, the `hydraJobs` output is evaluated in the same way as

View file

@ -252,6 +252,14 @@ struct CmdFlakeInfo : CmdFlakeMetadata
}
};
static bool argHasName(std::string_view arg, std::string_view expected)
{
return
arg == expected
|| arg == "_"
|| (hasPrefix(arg, "_") && arg.substr(1) == expected);
}
struct CmdFlakeCheck : FlakeCommand
{
bool build = true;
@ -346,10 +354,14 @@ struct CmdFlakeCheck : FlakeCommand
auto checkOverlay = [&](const std::string & attrPath, Value & v, const Pos & pos) {
try {
state->forceValue(v, pos);
if (!v.isLambda() || v.lambda.fun->hasFormals() || std::string(v.lambda.fun->arg) != "final")
if (!v.isLambda()
|| v.lambda.fun->hasFormals()
|| !argHasName(v.lambda.fun->arg, "final"))
throw Error("overlay does not take an argument named 'final'");
auto body = dynamic_cast<ExprLambda *>(v.lambda.fun->body);
if (!body || body->hasFormals() || std::string(body->arg) != "prev")
if (!body
|| body->hasFormals()
|| !argHasName(body->arg, "prev"))
throw Error("overlay does not take an argument named 'prev'");
// FIXME: if we have a 'nixpkgs' input, use it to
// evaluate the overlay.
@ -1040,7 +1052,8 @@ struct CmdFlakeShow : FlakeCommand, MixJSON
(attrPath.size() == 1 && attrPath[0] == "overlay")
|| (attrPath.size() == 2 && attrPath[0] == "overlays") ? std::make_pair("nixpkgs-overlay", "Nixpkgs overlay") :
attrPath.size() == 2 && attrPath[0] == "nixosConfigurations" ? std::make_pair("nixos-configuration", "NixOS configuration") :
attrPath.size() == 2 && attrPath[0] == "nixosModules" ? std::make_pair("nixos-module", "NixOS module") :
(attrPath.size() == 1 && attrPath[0] == "nixosModule")
|| (attrPath.size() == 2 && attrPath[0] == "nixosModules") ? std::make_pair("nixos-module", "NixOS module") :
std::make_pair("unknown", "unknown");
if (json) {
j.emplace("type", type);

View file

@ -255,6 +255,16 @@ void mainWrapped(int argc, char * * argv)
initNix();
initGC();
#if __linux__
if (getuid() == 0) {
try {
saveMountNamespace();
if (unshare(CLONE_NEWNS) == -1)
throw SysError("setting up a private mount namespace");
} catch (Error & e) { }
}
#endif
programPath = argv[0];
auto programName = std::string(baseNameOf(programPath));

View file

@ -226,6 +226,7 @@ struct CmdRegistry : virtual NixMultiCommand
void run() override
{
settings.requireExperimentalFeature(Xp::Flakes);
if (!command)
throw UsageError("'nix registry' requires a sub-command.");
command->second->prepare();

View file

@ -471,7 +471,10 @@ bool NixRepl::processLine(string line)
auto args = editorFor(pos);
auto editor = args.front();
args.pop_front();
runProgram(editor, true, args);
// runProgram redirects stdout to a StringSink,
// using runProgram2 to allow editors to display their UI
runProgram2(RunOptions { .program = editor, .searchPath = true, .args = args });
// Reload right after exiting the editor
state->resetFileCache();
@ -504,8 +507,8 @@ bool NixRepl::processLine(string line)
state->store->buildPaths({DerivedPath::Built{drvPath}});
auto drv = state->store->readDerivation(drvPath);
logger->cout("\nThis derivation produced the following outputs:");
for (auto & i : drv.outputsAndOptPaths(*state->store))
logger->cout(" %s -> %s", i.first, state->store->printStorePath(*i.second.second));
for (auto & [outputName, outputPath] : state->store->queryDerivationOutputMap(drvPath))
logger->cout(" %s -> %s", outputName, state->store->printStorePath(outputPath));
} else if (command == ":i") {
runNix("nix-env", {"-i", drvPathRaw});
} else {
@ -644,7 +647,8 @@ void NixRepl::addVarToScope(const Symbol & name, Value & v)
{
if (displ >= envSize)
throw Error("environment full; cannot add more variables");
staticEnv.vars[name] = displ;
staticEnv.vars.emplace_back(name, displ);
staticEnv.sort();
env->values[displ++] = &v;
varNames.insert((string) name);
}

View file

@ -218,8 +218,7 @@ struct CmdKey : NixMultiCommand
void run() override
{
if (!command)
throw UsageError("'nix flake' requires a sub-command.");
settings.requireExperimentalFeature(Xp::Flakes);
throw UsageError("'nix key' requires a sub-command.");
command->second->prepare();
command->second->run();
}

5
tests/ca/repl.sh Normal file
View file

@ -0,0 +1,5 @@
source common.sh
export NIX_TESTS_CA_BY_DEFAULT=1
cd .. && source repl.sh

View file

@ -36,8 +36,9 @@ export PATH=@bindir@:$PATH
if [[ -n "${NIX_CLIENT_PACKAGE:-}" ]]; then
export PATH="$NIX_CLIENT_PACKAGE/bin":$PATH
fi
DAEMON_PATH="$PATH"
if [[ -n "${NIX_DAEMON_PACKAGE:-}" ]]; then
export NIX_DAEMON_COMMAND="$NIX_DAEMON_PACKAGE/bin/nix-daemon"
DAEMON_PATH="${NIX_DAEMON_PACKAGE}/bin:$DAEMON_PATH"
fi
coreutils=@coreutils@
@ -89,7 +90,7 @@ startDaemon() {
# Start the daemon, wait for the socket to appear. !!!
# nix-daemon should have an option to fork into the background.
rm -f $NIX_DAEMON_SOCKET_PATH
${NIX_DAEMON_COMMAND:-nix daemon} &
PATH=$DAEMON_PATH nix daemon &
for ((i = 0; i < 30; i++)); do
if [[ -S $NIX_DAEMON_SOCKET_PATH ]]; then break; fi
sleep 1

View file

@ -0,0 +1,34 @@
source common.sh
clearStore
rm -rf $TEST_HOME/.cache $TEST_HOME/.config $TEST_HOME/.local
cp ./simple.nix ./simple.builder.sh ./config.nix $TEST_HOME
cd $TEST_HOME
rm -f post-hook-ran
cat <<EOF > echoing-post-hook.sh
#!/bin/sh
echo "ThePostHookRan" > $PWD/post-hook-ran
EOF
chmod +x echoing-post-hook.sh
cat <<EOF > flake.nix
{
nixConfig.post-build-hook = "$PWD/echoing-post-hook.sh";
outputs = a: {
defaultPackage.$system = import ./simple.nix;
};
}
EOF
# Without --accept-flake-config, the post hook should not run.
nix build < /dev/null
(! [[ -f post-hook-ran ]])
clearStore
nix build --accept-flake-config
test -f post-hook-ran || fail "The post hook should have ran"

View file

@ -707,10 +707,10 @@ cat > $flakeFollowsA/flake.nix <<EOF
B = {
url = "path:./flakeB";
inputs.foobar.follows = "D";
inputs.nonFlake.follows = "D";
};
D.url = "path:./flakeD";
foobar.url = "path:./flakeE";
};
outputs = { ... }: {};
}
@ -720,7 +720,8 @@ cat > $flakeFollowsB/flake.nix <<EOF
{
description = "Flake B";
inputs = {
foobar.url = "path:./../flakeE";
foobar.url = "path:$flakeFollowsA/flakeE";
nonFlake.url = "path:$nonFlakeDir";
C = {
url = "path:./flakeC";
inputs.foobar.follows = "foobar";
@ -734,7 +735,7 @@ cat > $flakeFollowsC/flake.nix <<EOF
{
description = "Flake C";
inputs = {
foobar.url = "path:./../../flakeE";
foobar.url = "path:$flakeFollowsA/flakeE";
};
outputs = { ... }: {};
}
@ -765,6 +766,27 @@ nix flake lock $flakeFollowsA
[[ $(jq -c .nodes.B.inputs.foobar $flakeFollowsA/flake.lock) = '["D"]' ]]
[[ $(jq -c .nodes.C.inputs.foobar $flakeFollowsA/flake.lock) = '["B","foobar"]' ]]
# Ensure removing follows from flake.nix removes them from the lockfile
cat > $flakeFollowsA/flake.nix <<EOF
{
description = "Flake A";
inputs = {
B = {
url = "path:./flakeB";
inputs.nonFlake.follows = "D";
};
D.url = "path:./flakeD";
};
outputs = { ... }: {};
}
EOF
nix flake lock $flakeFollowsA
[[ $(jq -c .nodes.B.inputs.foobar $flakeFollowsA/flake.lock) = '"foobar"' ]]
jq -r -c '.nodes | keys | .[]' $flakeFollowsA/flake.lock | grep "^foobar$"
# Ensure a relative path is not allowed to go outside the store path
cat > $flakeFollowsA/flake.nix <<EOF
{

View file

@ -60,8 +60,6 @@ function-trace exited (string):1:1 at
expect_trace '(x: x) 1 2' "
function-trace entered (string):1:1 at
function-trace exited (string):1:1 at
function-trace entered (string):1:1 at
function-trace exited (string):1:1 at
"
# Not a function

View file

@ -1 +1 @@
[ [ 42 77 147 249 483 526 ] [ 526 483 249 147 77 42 ] [ "bar" "fnord" "foo" "xyzzy" ] [ { key = 1; value = "foo"; } { key = 1; value = "fnord"; } { key = 2; value = "bar"; } ] ]
[ [ 42 77 147 249 483 526 ] [ 526 483 249 147 77 42 ] [ "bar" "fnord" "foo" "xyzzy" ] [ { key = 1; value = "foo"; } { key = 1; value = "fnord"; } { key = 2; value = "bar"; } ] [ [ ] [ ] [ 1 ] [ 1 4 ] [ 1 5 ] [ 1 6 ] [ 2 ] [ 2 3 ] [ 3 ] [ 3 ] ] ]

View file

@ -5,4 +5,16 @@ with builtins;
(sort lessThan [ "foo" "bar" "xyzzy" "fnord" ])
(sort (x: y: x.key < y.key)
[ { key = 1; value = "foo"; } { key = 2; value = "bar"; } { key = 1; value = "fnord"; } ])
(sort lessThan [
[ 1 6 ]
[ ]
[ 2 3 ]
[ 3 ]
[ 1 5 ]
[ 2 ]
[ 1 ]
[ ]
[ 1 4 ]
[ 3 ]
])
]

View file

@ -46,9 +46,10 @@ nix_tests = \
recursive.sh \
describe-stores.sh \
flakes.sh \
flake-local-settings.sh \
build.sh \
compute-levels.sh \
repl.sh \
repl.sh ca/repl.sh \
ca/build.sh \
ca/build-with-garbage-path.sh \
ca/duplicate-realisation-in-closure.sh \

View file

@ -7,7 +7,9 @@ simple = import ./simple.nix
testRepl () {
local nixArgs=("$@")
local outPath=$(nix repl "${nixArgs[@]}" <<< "$replCmds" |&
local replOutput="$(nix repl "${nixArgs[@]}" <<< "$replCmds")"
echo "$replOutput"
local outPath=$(echo "$replOutput" |&
grep -o -E "$NIX_STORE_DIR/\w*-simple")
nix path-info "${nixArgs[@]}" "$outPath"
}