diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 1f504a8ea..829111b67 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -12,8 +12,6 @@ jobs: - uses: actions/checkout@v2 with: fetch-depth: 0 - - uses: cachix/install-nix-action@v10 - with: - skip_adding_nixpkgs_channel: true + - uses: cachix/install-nix-action@v11 #- run: nix flake check - run: nix-build -A checks.$(if [[ `uname` = Linux ]]; then echo x86_64-linux; else echo x86_64-darwin; fi) diff --git a/.gitignore b/.gitignore index 4711fa7fa..b087cd8d5 100644 --- a/.gitignore +++ b/.gitignore @@ -5,7 +5,6 @@ perl/Makefile.config /aclocal.m4 /autom4te.cache /precompiled-headers.h.gch -/precompiled-headers.h.pch /config.* /configure /stamp-h1 diff --git a/README.md b/README.md index 3cf4e44fa..11fe5f932 100644 --- a/README.md +++ b/README.md @@ -9,7 +9,7 @@ for more details. ## Installation -On Linux and macOS the easiest way to Install Nix is to run the following shell command +On Linux and macOS the easiest way to install Nix is to run the following shell command (as a user other than root): ```console diff --git a/doc/manual/generate-manpage.nix b/doc/manual/generate-manpage.nix index 4709c0e2c..db266750a 100644 --- a/doc/manual/generate-manpage.nix +++ b/doc/manual/generate-manpage.nix @@ -52,5 +52,4 @@ in command: -"Title: nix\n\n" -+ showCommand { command = "nix"; section = "#"; def = command; } +showCommand { command = "nix"; section = "#"; def = command; } diff --git a/doc/manual/generate-options.nix b/doc/manual/generate-options.nix index 7afe279c3..3c31a4eec 100644 --- a/doc/manual/generate-options.nix +++ b/doc/manual/generate-options.nix @@ -13,7 +13,12 @@ concatStrings (map then "*empty*" else if isBool option.value then (if option.value then "`true`" else "`false`") - else "`" + toString option.value + "`") + "\n\n" + else + # n.b. a StringMap value type is specified as a string, but + # this shows the value type. The empty stringmap is "null" in + # JSON, but that converts to "{ }" here. + (if isAttrs option.value then "`\"\"`" + else "`" + toString option.value + "`")) + "\n\n" + (if option.aliases != [] then " **Deprecated alias:** " + (concatStringsSep ", " (map (s: "`${s}`") option.aliases)) + "\n\n" else "") diff --git a/doc/manual/local.mk b/doc/manual/local.mk index 3b8e7e2df..7d9a1a3e8 100644 --- a/doc/manual/local.mk +++ b/doc/manual/local.mk @@ -18,13 +18,22 @@ dist-files += $(man-pages) nix-eval = $(bindir)/nix eval --experimental-features nix-command -I nix/corepkgs=corepkgs --store dummy:// --impure --raw --expr $(d)/%.1: $(d)/src/command-ref/%.md - $(trace-gen) lowdown -sT man $^ -o $@ + @printf "Title: %s\n\n" "$$(basename $@ .1)" > $^.tmp + @cat $^ >> $^.tmp + $(trace-gen) lowdown -sT man $^.tmp -o $@ + @rm $^.tmp $(d)/%.8: $(d)/src/command-ref/%.md - $(trace-gen) lowdown -sT man $^ -o $@ + @printf "Title: %s\n\n" "$$(basename $@ .8)" > $^.tmp + @cat $^ >> $^.tmp + $(trace-gen) lowdown -sT man $^.tmp -o $@ + @rm $^.tmp $(d)/nix.conf.5: $(d)/src/command-ref/conf-file.md - $(trace-gen) lowdown -sT man $^ -o $@ + @printf "Title: %s\n\n" "$$(basename $@ .5)" > $^.tmp + @cat $^ >> $^.tmp + $(trace-gen) lowdown -sT man $^.tmp -o $@ + @rm $^.tmp $(d)/src/command-ref/nix.md: $(d)/nix.json $(d)/generate-manpage.nix $(bindir)/nix $(trace-gen) $(nix-eval) 'import doc/manual/generate-manpage.nix (builtins.fromJSON (builtins.readFile $<))' > $@.tmp @@ -40,7 +49,7 @@ $(d)/nix.json: $(bindir)/nix @mv $@.tmp $@ $(d)/conf-file.json: $(bindir)/nix - $(trace-gen) env -i NIX_CONF_DIR=/dummy HOME=/dummy $(bindir)/nix show-config --json --experimental-features nix-command > $@.tmp + $(trace-gen) env -i NIX_CONF_DIR=/dummy HOME=/dummy NIX_SSL_CERT_FILE=/dummy/no-ca-bundle.crt $(bindir)/nix show-config --json --experimental-features nix-command > $@.tmp @mv $@.tmp $@ $(d)/src/expressions/builtins.md: $(d)/builtins.json $(d)/generate-builtins.nix $(d)/src/expressions/builtins-prefix.md $(bindir)/nix diff --git a/doc/manual/src/command-ref/conf-file-prefix.md b/doc/manual/src/command-ref/conf-file-prefix.md index 04c6cd859..9987393d2 100644 --- a/doc/manual/src/command-ref/conf-file-prefix.md +++ b/doc/manual/src/command-ref/conf-file-prefix.md @@ -1,5 +1,3 @@ -Title: nix.conf - # Name `nix.conf` - Nix configuration file diff --git a/doc/manual/src/command-ref/nix-build.md b/doc/manual/src/command-ref/nix-build.md index 4bcb8db40..4565bfbc2 100644 --- a/doc/manual/src/command-ref/nix-build.md +++ b/doc/manual/src/command-ref/nix-build.md @@ -1,5 +1,3 @@ -Title: nix-build - # Name `nix-build` - build a Nix expression diff --git a/doc/manual/src/command-ref/nix-channel.md b/doc/manual/src/command-ref/nix-channel.md index f0e205967..4ca12d2cc 100644 --- a/doc/manual/src/command-ref/nix-channel.md +++ b/doc/manual/src/command-ref/nix-channel.md @@ -1,5 +1,3 @@ -Title: nix-channel - # Name `nix-channel` - manage Nix channels diff --git a/doc/manual/src/command-ref/nix-collect-garbage.md b/doc/manual/src/command-ref/nix-collect-garbage.md index 62a6b7ca0..296165993 100644 --- a/doc/manual/src/command-ref/nix-collect-garbage.md +++ b/doc/manual/src/command-ref/nix-collect-garbage.md @@ -1,5 +1,3 @@ -Title: nix-collect-garbage - # Name `nix-collect-garbage` - delete unreachable store paths diff --git a/doc/manual/src/command-ref/nix-copy-closure.md b/doc/manual/src/command-ref/nix-copy-closure.md index 5ce320af7..dcb844a72 100644 --- a/doc/manual/src/command-ref/nix-copy-closure.md +++ b/doc/manual/src/command-ref/nix-copy-closure.md @@ -1,5 +1,3 @@ -Title: nix-copy-closure - # Name `nix-copy-closure` - copy a closure to or from a remote machine via SSH diff --git a/doc/manual/src/command-ref/nix-daemon.md b/doc/manual/src/command-ref/nix-daemon.md index bd5d25026..e91cb01dd 100644 --- a/doc/manual/src/command-ref/nix-daemon.md +++ b/doc/manual/src/command-ref/nix-daemon.md @@ -1,5 +1,3 @@ -Title: nix-daemon - # Name `nix-daemon` - Nix multi-user support daemon diff --git a/doc/manual/src/command-ref/nix-env.md b/doc/manual/src/command-ref/nix-env.md index ee838581b..1c23bb0ad 100644 --- a/doc/manual/src/command-ref/nix-env.md +++ b/doc/manual/src/command-ref/nix-env.md @@ -1,5 +1,3 @@ -Title: nix-env - # Name `nix-env` - manipulate or query Nix user environments diff --git a/doc/manual/src/command-ref/nix-hash.md b/doc/manual/src/command-ref/nix-hash.md index d3f91f8e9..7ed82cdfc 100644 --- a/doc/manual/src/command-ref/nix-hash.md +++ b/doc/manual/src/command-ref/nix-hash.md @@ -1,5 +1,3 @@ -Title: nix-hash - # Name `nix-hash` - compute the cryptographic hash of a path diff --git a/doc/manual/src/command-ref/nix-instantiate.md b/doc/manual/src/command-ref/nix-instantiate.md index d09f5ed6a..c369397b6 100644 --- a/doc/manual/src/command-ref/nix-instantiate.md +++ b/doc/manual/src/command-ref/nix-instantiate.md @@ -1,5 +1,3 @@ -Title: nix-instantiate - # Name `nix-instantiate` - instantiate store derivations from Nix expressions diff --git a/doc/manual/src/command-ref/nix-prefetch-url.md b/doc/manual/src/command-ref/nix-prefetch-url.md index 1307c7c37..78c612cd4 100644 --- a/doc/manual/src/command-ref/nix-prefetch-url.md +++ b/doc/manual/src/command-ref/nix-prefetch-url.md @@ -1,5 +1,3 @@ -Title: nix-prefetch-url - # Name `nix-prefetch-url` - copy a file from a URL into the store and print its hash diff --git a/doc/manual/src/command-ref/nix-shell.md b/doc/manual/src/command-ref/nix-shell.md index fc42a202a..45a5ff08c 100644 --- a/doc/manual/src/command-ref/nix-shell.md +++ b/doc/manual/src/command-ref/nix-shell.md @@ -1,5 +1,3 @@ -Title: nix-shell - # Name `nix-shell` - start an interactive shell based on a Nix expression diff --git a/doc/manual/src/command-ref/nix-store.md b/doc/manual/src/command-ref/nix-store.md index 4680339e4..827adbd05 100644 --- a/doc/manual/src/command-ref/nix-store.md +++ b/doc/manual/src/command-ref/nix-store.md @@ -1,5 +1,3 @@ -Title: nix-store - # Name `nix-store` - manipulate or query the Nix store diff --git a/doc/manual/src/hacking.md b/doc/manual/src/hacking.md index 5bd884ce8..2a1e55e5b 100644 --- a/doc/manual/src/hacking.md +++ b/doc/manual/src/hacking.md @@ -39,17 +39,17 @@ To build Nix itself in this shell: ```console [nix-shell]$ ./bootstrap.sh -[nix-shell]$ ./configure $configureFlags --prefix=$(pwd)/inst +[nix-shell]$ ./configure $configureFlags --prefix=$(pwd)/outputs/out [nix-shell]$ make -j $NIX_BUILD_CORES ``` -To install it in `$(pwd)/inst` and test it: +To install it in `$(pwd)/outputs` and test it: ```console [nix-shell]$ make install -[nix-shell]$ make installcheck -[nix-shell]$ ./inst/bin/nix --version -nix (Nix) 2.4 +[nix-shell]$ make installcheck -j $NIX_BUILD_CORES +[nix-shell]$ ./outputs/out/bin/nix --version +nix (Nix) 3.0 ``` To run a functional test: @@ -58,6 +58,12 @@ To run a functional test: make tests/test-name-should-auto-complete.sh.test ``` +To run the unit-tests for C++ code: + +``` +make check +``` + If you have a flakes-enabled Nix you can replace: ```console diff --git a/local.mk b/local.mk index b1ca832a6..fe314b902 100644 --- a/local.mk +++ b/local.mk @@ -11,6 +11,6 @@ GLOBAL_CXXFLAGS += -Wno-deprecated-declarations $(foreach i, config.h $(wildcard src/lib*/*.hh), \ $(eval $(call install-file-in, $(i), $(includedir)/nix, 0644))) -$(GCH) $(PCH): src/libutil/util.hh config.h +$(GCH): src/libutil/util.hh config.h GCH_CXXFLAGS = -I src/libutil diff --git a/misc/bash/completion.sh b/misc/bash/completion.sh index bc184edd6..bea2a40bc 100644 --- a/misc/bash/completion.sh +++ b/misc/bash/completion.sh @@ -4,13 +4,14 @@ function _complete_nix { _get_comp_words_by_ref -n ':=&' words cword cur local have_type while IFS= read -r line; do + local completion=${line%% *} if [[ -z $have_type ]]; then have_type=1 - if [[ $line = filenames ]]; then + if [[ $completion = filenames ]]; then compopt -o filenames fi else - COMPREPLY+=("$line") + COMPREPLY+=("$completion") fi done < <(NIX_GET_COMPLETIONS=$cword "${words[@]}") __ltrim_colon_completions "$cur" diff --git a/misc/zsh/completion.zsh b/misc/zsh/completion.zsh new file mode 100644 index 000000000..d4df6447e --- /dev/null +++ b/misc/zsh/completion.zsh @@ -0,0 +1,21 @@ +function _nix() { + local ifs_bk="$IFS" + local input=("${(Q)words[@]}") + IFS=$'\n' + local res=($(NIX_GET_COMPLETIONS=$((CURRENT - 1)) "$input[@]")) + IFS="$ifs_bk" + local tpe="${${res[1]}%%> *}" + local -a suggestions + declare -a suggestions + for suggestion in ${res:1}; do + # FIXME: This doesn't work properly if the suggestion word contains a `:` + # itself + suggestions+="${suggestion/ /:}" + done + if [[ "$tpe" == filenames ]]; then + compadd -f + fi + _describe 'nix' suggestions +} + +compdef _nix nix diff --git a/mk/precompiled-headers.mk b/mk/precompiled-headers.mk index 1fdb4b3a4..cdd3daecd 100644 --- a/mk/precompiled-headers.mk +++ b/mk/precompiled-headers.mk @@ -10,33 +10,12 @@ $(GCH): precompiled-headers.h @mkdir -p "$(dir $@)" $(trace-gen) $(CXX) -x c++-header -o $@ $< $(GLOBAL_CXXFLAGS) $(GCH_CXXFLAGS) -PCH = $(buildprefix)precompiled-headers.h.pch - -$(PCH): precompiled-headers.h - @rm -f $@ - @mkdir -p "$(dir $@)" - $(trace-gen) $(CXX) -x c++-header -o $@ $< $(GLOBAL_CXXFLAGS) $(GCH_CXXFLAGS) - -clean-files += $(GCH) $(PCH) +clean-files += $(GCH) ifeq ($(PRECOMPILE_HEADERS), 1) - ifeq ($(findstring g++,$(CXX)), g++) + GLOBAL_CXXFLAGS_PCH += -include $(buildprefix)precompiled-headers.h -Winvalid-pch - GLOBAL_CXXFLAGS_PCH += -include $(buildprefix)precompiled-headers.h -Winvalid-pch - - GLOBAL_ORDER_AFTER += $(GCH) - - else ifeq ($(findstring clang++,$(CXX)), clang++) - - GLOBAL_CXXFLAGS_PCH += -include-pch $(PCH) -Winvalid-pch - - GLOBAL_ORDER_AFTER += $(PCH) - - else - - $(error Don't know how to precompile headers on $(CXX)) - - endif + GLOBAL_ORDER_AFTER += $(GCH) endif diff --git a/nix-rust/src/lib.rs b/nix-rust/src/lib.rs index 27ea69fbd..101de106f 100644 --- a/nix-rust/src/lib.rs +++ b/nix-rust/src/lib.rs @@ -1,3 +1,4 @@ +#[allow(improper_ctypes_definitions)] #[cfg(not(test))] mod c; mod error; diff --git a/nix-rust/src/store/path.rs b/nix-rust/src/store/path.rs index 47b5975c0..99f7a1f18 100644 --- a/nix-rust/src/store/path.rs +++ b/nix-rust/src/store/path.rs @@ -19,9 +19,9 @@ impl StorePath { } Self::new_from_base_name( path.file_name() - .ok_or(Error::BadStorePath(path.into()))? + .ok_or_else(|| Error::BadStorePath(path.into()))? .to_str() - .ok_or(Error::BadStorePath(path.into()))?, + .ok_or_else(|| Error::BadStorePath(path.into()))?, ) } @@ -34,7 +34,7 @@ impl StorePath { pub fn new_from_base_name(base_name: &str) -> Result { if base_name.len() < STORE_PATH_HASH_CHARS + 1 - || base_name.as_bytes()[STORE_PATH_HASH_CHARS] != '-' as u8 + || base_name.as_bytes()[STORE_PATH_HASH_CHARS] != b'-' { return Err(Error::BadStorePath(base_name.into())); } @@ -65,7 +65,7 @@ impl StorePathHash { Ok(Self(bytes)) } - pub fn hash<'a>(&'a self) -> &'a [u8; STORE_PATH_HASH_BYTES] { + pub fn hash(&self) -> &[u8; STORE_PATH_HASH_BYTES] { &self.0 } } @@ -98,7 +98,7 @@ pub struct StorePathName(String); impl StorePathName { pub fn new(s: &str) -> Result { - if s.len() == 0 { + if s.is_empty() { return Err(Error::StorePathNameEmpty); } @@ -106,25 +106,24 @@ impl StorePathName { return Err(Error::StorePathNameTooLong); } - if s.starts_with('.') - || !s.chars().all(|c| { - c.is_ascii_alphabetic() - || c.is_ascii_digit() - || c == '+' - || c == '-' - || c == '.' - || c == '_' - || c == '?' - || c == '=' - }) - { + let is_good_path_name = s.chars().all(|c| { + c.is_ascii_alphabetic() + || c.is_ascii_digit() + || c == '+' + || c == '-' + || c == '.' + || c == '_' + || c == '?' + || c == '=' + }); + if s.starts_with('.') || !is_good_path_name { return Err(Error::BadStorePathName); } Ok(Self(s.to_string())) } - pub fn name<'a>(&'a self) -> &'a str { + pub fn name(&self) -> &str { &self.0 } } diff --git a/nix-rust/src/util/base32.rs b/nix-rust/src/util/base32.rs index efd4a2901..7e71dc920 100644 --- a/nix-rust/src/util/base32.rs +++ b/nix-rust/src/util/base32.rs @@ -13,7 +13,7 @@ pub fn decoded_len(input_len: usize) -> usize { input_len * 5 / 8 } -static BASE32_CHARS: &'static [u8; 32] = &b"0123456789abcdfghijklmnpqrsvwxyz"; +static BASE32_CHARS: &[u8; 32] = &b"0123456789abcdfghijklmnpqrsvwxyz"; lazy_static! { static ref BASE32_CHARS_REVERSE: Box<[u8; 256]> = { diff --git a/scripts/install-nix-from-closure.sh b/scripts/install-nix-from-closure.sh index 6efd8af18..14fb91534 100644 --- a/scripts/install-nix-from-closure.sh +++ b/scripts/install-nix-from-closure.sh @@ -2,6 +2,8 @@ set -e +umask 0022 + dest="/nix" self="$(dirname "$0")" nix="@nix@" diff --git a/scripts/install.in b/scripts/install.in index 1d26c4ff0..39fae37e3 100644 --- a/scripts/install.in +++ b/scripts/install.in @@ -10,6 +10,8 @@ oops() { exit 1 } +umask 0022 + tmpDir="$(mktemp -d -t nix-binary-tarball-unpack.XXXXXXXXXX || \ oops "Can't create temporary directory for downloading the Nix binary tarball")" cleanup() { diff --git a/src/build-remote/build-remote.cc b/src/build-remote/build-remote.cc index ce5127113..9a0e9f08d 100644 --- a/src/build-remote/build-remote.cc +++ b/src/build-remote/build-remote.cc @@ -44,7 +44,7 @@ static bool allSupportedLocally(Store & store, const std::set& requ return true; } -static int _main(int argc, char * * argv) +static int main_build_remote(int argc, char * * argv) { { logger = makeJSONLogger(*logger); @@ -297,4 +297,4 @@ connected: } } -static RegisterLegacyCommand s1("build-remote", _main); +static RegisterLegacyCommand r_build_remote("build-remote", main_build_remote); diff --git a/src/libexpr/eval-cache.hh b/src/libexpr/eval-cache.hh index 8ffffc0ed..e23e45c94 100644 --- a/src/libexpr/eval-cache.hh +++ b/src/libexpr/eval-cache.hh @@ -11,7 +11,7 @@ namespace nix::eval_cache { MakeError(CachedEvalError, EvalError); -class AttrDb; +struct AttrDb; class AttrCursor; class EvalCache : public std::enable_shared_from_this diff --git a/src/libexpr/eval.cc b/src/libexpr/eval.cc index 883fc27a7..d6366050c 100644 --- a/src/libexpr/eval.cc +++ b/src/libexpr/eval.cc @@ -2081,7 +2081,7 @@ Strings EvalSettings::getDefaultNixPath() EvalSettings evalSettings; -static GlobalConfig::Register r1(&evalSettings); +static GlobalConfig::Register rEvalSettings(&evalSettings); } diff --git a/src/libexpr/flake/flake.cc b/src/libexpr/flake/flake.cc index bef4a26fb..6d5210848 100644 --- a/src/libexpr/flake/flake.cc +++ b/src/libexpr/flake/flake.cc @@ -12,7 +12,7 @@ using namespace flake; namespace flake { -typedef std::pair FetchedFlake; +typedef std::pair FetchedFlake; typedef std::vector> FlakeCache; static std::optional lookupInFlakeCache( @@ -274,7 +274,7 @@ Flake getFlake(EvalState & state, const FlakeRef & originalRef, bool allowLookup } /* Compute an in-memory lock file for the specified top-level flake, - and optionally write it to file, it the flake is writable. */ + and optionally write it to file, if the flake is writable. */ LockedFlake lockFlake( EvalState & state, const FlakeRef & topRef, diff --git a/src/libexpr/flake/flakeref.cc b/src/libexpr/flake/flakeref.cc index 97d6c61cd..37e24fd19 100644 --- a/src/libexpr/flake/flakeref.cc +++ b/src/libexpr/flake/flakeref.cc @@ -16,10 +16,10 @@ const static std::string subDirRegex = subDirElemRegex + "(?:/" + subDirElemRege std::string FlakeRef::to_string() const { - auto url = input.toURL(); + std::map extraQuery; if (subdir != "") - url.query.insert_or_assign("dir", subdir); - return url.to_string(); + extraQuery.insert_or_assign("dir", subdir); + return input.toURLString(extraQuery); } fetchers::Attrs FlakeRef::toAttrs() const @@ -157,7 +157,8 @@ std::pair parseFlakeRefWithFragment( } else { if (!hasPrefix(path, "/")) throw BadURL("flake reference '%s' is not an absolute path", url); - path = canonPath(path); + auto query = decodeQuery(match[2]); + path = canonPath(path + "/" + get(query, "dir").value_or("")); } fetchers::Attrs attrs; diff --git a/src/libexpr/flake/lockfile.cc b/src/libexpr/flake/lockfile.cc index 78431f000..bb46e1bb4 100644 --- a/src/libexpr/flake/lockfile.cc +++ b/src/libexpr/flake/lockfile.cc @@ -13,12 +13,12 @@ FlakeRef getFlakeRef( { auto i = json.find(attr); if (i != json.end()) { - auto attrs = jsonToAttrs(*i); + auto attrs = fetchers::jsonToAttrs(*i); // FIXME: remove when we drop support for version 5. if (info) { auto j = json.find(info); if (j != json.end()) { - for (auto k : jsonToAttrs(*j)) + for (auto k : fetchers::jsonToAttrs(*j)) attrs.insert_or_assign(k.first, k.second); } } diff --git a/src/libexpr/flake/lockfile.hh b/src/libexpr/flake/lockfile.hh index 9ec8b39c3..627794d8c 100644 --- a/src/libexpr/flake/lockfile.hh +++ b/src/libexpr/flake/lockfile.hh @@ -11,8 +11,6 @@ class StorePath; namespace nix::flake { -using namespace fetchers; - typedef std::vector InputPath; struct LockedNode; diff --git a/src/libexpr/primops.cc b/src/libexpr/primops.cc index 236b8b5a8..38343745d 100644 --- a/src/libexpr/primops.cc +++ b/src/libexpr/primops.cc @@ -2236,6 +2236,10 @@ static RegisterPrimOp primop_catAttrs({ static void prim_functionArgs(EvalState & state, const Pos & pos, Value * * args, Value & v) { state.forceValue(*args[0], pos); + if (args[0]->type == tPrimOpApp || args[0]->type == tPrimOp) { + state.mkAttrs(v, 0); + return; + } if (args[0]->type != tLambda) throw TypeError({ .hint = hintfmt("'functionArgs' requires a function"), diff --git a/src/libexpr/primops.hh b/src/libexpr/primops.hh index cef7667e0..dd6df0484 100644 --- a/src/libexpr/primops.hh +++ b/src/libexpr/primops.hh @@ -1,3 +1,5 @@ +#pragma once + #include "eval.hh" #include diff --git a/src/libexpr/primops/context.cc b/src/libexpr/primops/context.cc index dbb93bae6..b570fca31 100644 --- a/src/libexpr/primops/context.cc +++ b/src/libexpr/primops/context.cc @@ -11,7 +11,7 @@ static void prim_unsafeDiscardStringContext(EvalState & state, const Pos & pos, mkString(v, s, PathSet()); } -static RegisterPrimOp r1("__unsafeDiscardStringContext", 1, prim_unsafeDiscardStringContext); +static RegisterPrimOp primop_unsafeDiscardStringContext("__unsafeDiscardStringContext", 1, prim_unsafeDiscardStringContext); static void prim_hasContext(EvalState & state, const Pos & pos, Value * * args, Value & v) @@ -21,7 +21,7 @@ static void prim_hasContext(EvalState & state, const Pos & pos, Value * * args, mkBool(v, !context.empty()); } -static RegisterPrimOp r2("__hasContext", 1, prim_hasContext); +static RegisterPrimOp primop_hasContext("__hasContext", 1, prim_hasContext); /* Sometimes we want to pass a derivation path (i.e. pkg.drvPath) to a @@ -42,7 +42,7 @@ static void prim_unsafeDiscardOutputDependency(EvalState & state, const Pos & po mkString(v, s, context2); } -static RegisterPrimOp r3("__unsafeDiscardOutputDependency", 1, prim_unsafeDiscardOutputDependency); +static RegisterPrimOp primop_unsafeDiscardOutputDependency("__unsafeDiscardOutputDependency", 1, prim_unsafeDiscardOutputDependency); /* Extract the context of a string as a structured Nix value. @@ -127,7 +127,7 @@ static void prim_getContext(EvalState & state, const Pos & pos, Value * * args, v.attrs->sort(); } -static RegisterPrimOp r4("__getContext", 1, prim_getContext); +static RegisterPrimOp primop_getContext("__getContext", 1, prim_getContext); /* Append the given context to a given string. @@ -191,6 +191,6 @@ static void prim_appendContext(EvalState & state, const Pos & pos, Value * * arg mkString(v, orig, context); } -static RegisterPrimOp r5("__appendContext", 2, prim_appendContext); +static RegisterPrimOp primop_appendContext("__appendContext", 2, prim_appendContext); } diff --git a/src/libexpr/primops/fetchMercurial.cc b/src/libexpr/primops/fetchMercurial.cc index 1a064ed5c..a77035c16 100644 --- a/src/libexpr/primops/fetchMercurial.cc +++ b/src/libexpr/primops/fetchMercurial.cc @@ -87,6 +87,6 @@ static void prim_fetchMercurial(EvalState & state, const Pos & pos, Value * * ar state.allowedPaths->insert(tree.actualPath); } -static RegisterPrimOp r("fetchMercurial", 1, prim_fetchMercurial); +static RegisterPrimOp r_fetchMercurial("fetchMercurial", 1, prim_fetchMercurial); } diff --git a/src/libexpr/primops/fetchTree.cc b/src/libexpr/primops/fetchTree.cc index 06e8304b8..7cd4d0fbf 100644 --- a/src/libexpr/primops/fetchTree.cc +++ b/src/libexpr/primops/fetchTree.cc @@ -152,7 +152,7 @@ static void prim_fetchTree(EvalState & state, const Pos & pos, Value * * args, V fetchTree(state, pos, args, v, std::nullopt); } -static RegisterPrimOp r("fetchTree", 1, prim_fetchTree); +static RegisterPrimOp primop_fetchTree("fetchTree", 1, prim_fetchTree); static void fetch(EvalState & state, const Pos & pos, Value * * args, Value & v, const string & who, bool unpack, std::string name) diff --git a/src/libexpr/primops/fromTOML.cc b/src/libexpr/primops/fromTOML.cc index 955373796..7fdc70303 100644 --- a/src/libexpr/primops/fromTOML.cc +++ b/src/libexpr/primops/fromTOML.cc @@ -90,6 +90,6 @@ void prim_fromTOML(EvalState & state, const Pos & pos, Value * * args, Value & v } } -static RegisterPrimOp r("fromTOML", 1, prim_fromTOML); +static RegisterPrimOp primop_fromTOML("fromTOML", 1, prim_fromTOML); } diff --git a/src/libfetchers/fetchers.cc b/src/libfetchers/fetchers.cc index eaa635595..49851f7bc 100644 --- a/src/libfetchers/fetchers.cc +++ b/src/libfetchers/fetchers.cc @@ -69,6 +69,14 @@ ParsedURL Input::toURL() const return scheme->toURL(*this); } +std::string Input::toURLString(const std::map & extraQuery) const +{ + auto url = toURL(); + for (auto & attr : extraQuery) + url.query.insert(attr); + return url.to_string(); +} + std::string Input::to_string() const { return toURL().to_string(); diff --git a/src/libfetchers/fetchers.hh b/src/libfetchers/fetchers.hh index 89b1e6e7d..e8ae59143 100644 --- a/src/libfetchers/fetchers.hh +++ b/src/libfetchers/fetchers.hh @@ -39,6 +39,8 @@ public: ParsedURL toURL() const; + std::string toURLString(const std::map & extraQuery = {}) const; + std::string to_string() const; Attrs toAttrs() const; @@ -73,7 +75,7 @@ public: StorePath computeStorePath(Store & store) const; - // Convience functions for common attributes. + // Convenience functions for common attributes. std::string getType() const; std::optional getNarHash() const; std::optional getRef() const; @@ -84,6 +86,9 @@ public: struct InputScheme { + virtual ~InputScheme() + { } + virtual std::optional inputFromURL(const ParsedURL & url) = 0; virtual std::optional inputFromAttrs(const Attrs & attrs) = 0; @@ -119,12 +124,14 @@ DownloadFileResult downloadFile( ref store, const std::string & url, const std::string & name, - bool immutable); + bool immutable, + const Headers & headers = {}); std::pair downloadTarball( ref store, const std::string & url, const std::string & name, - bool immutable); + bool immutable, + const Headers & headers = {}); } diff --git a/src/libfetchers/git.cc b/src/libfetchers/git.cc index ad7638d73..a6411b02b 100644 --- a/src/libfetchers/git.cc +++ b/src/libfetchers/git.cc @@ -452,6 +452,6 @@ struct GitInputScheme : InputScheme } }; -static auto r1 = OnStartup([] { registerInputScheme(std::make_unique()); }); +static auto rGitInputScheme = OnStartup([] { registerInputScheme(std::make_unique()); }); } diff --git a/src/libfetchers/github.cc b/src/libfetchers/github.cc index 1737658a7..92ff224f7 100644 --- a/src/libfetchers/github.cc +++ b/src/libfetchers/github.cc @@ -3,12 +3,20 @@ #include "fetchers.hh" #include "globals.hh" #include "store-api.hh" +#include "types.hh" #include "url-parts.hh" +#include #include namespace nix::fetchers { +struct DownloadUrl +{ + std::string url; + Headers headers; +}; + // A github or gitlab host const static std::string hostRegexS = "[a-zA-Z0-9.]*"; // FIXME: check std::regex hostRegex(hostRegexS, std::regex::ECMAScript); @@ -17,6 +25,8 @@ struct GitArchiveInputScheme : InputScheme { virtual std::string type() = 0; + virtual std::optional> accessHeaderFromToken(const std::string & token) const = 0; + std::optional inputFromURL(const ParsedURL & url) override { if (url.scheme != type()) return {}; @@ -130,9 +140,31 @@ struct GitArchiveInputScheme : InputScheme return input; } + std::optional getAccessToken(const std::string & host) const + { + auto tokens = settings.accessTokens.get(); + if (auto token = get(tokens, host)) + return *token; + return {}; + } + + Headers makeHeadersWithAuthTokens(const std::string & host) const + { + Headers headers; + auto accessToken = getAccessToken(host); + if (accessToken) { + auto hdr = accessHeaderFromToken(*accessToken); + if (hdr) + headers.push_back(*hdr); + else + warn("Unrecognized access token for host '%s'", host); + } + return headers; + } + virtual Hash getRevFromRef(nix::ref store, const Input & input) const = 0; - virtual std::string getDownloadUrl(const Input & input) const = 0; + virtual DownloadUrl getDownloadUrl(const Input & input) const = 0; std::pair fetch(ref store, const Input & _input) override { @@ -161,7 +193,7 @@ struct GitArchiveInputScheme : InputScheme auto url = getDownloadUrl(input); - auto [tree, lastModified] = downloadTarball(store, url, "source", true); + auto [tree, lastModified] = downloadTarball(store, url.url, "source", true, url.headers); input.attrs.insert_or_assign("lastModified", lastModified); @@ -183,49 +215,52 @@ struct GitHubInputScheme : GitArchiveInputScheme { std::string type() override { return "github"; } - void addAccessToken(std::string & url) const + std::optional> accessHeaderFromToken(const std::string & token) const override { - std::string accessToken = settings.githubAccessToken.get(); - if (accessToken != "") - url += "?access_token=" + accessToken; + // Github supports PAT/OAuth2 tokens and HTTP Basic + // Authentication. The former simply specifies the token, the + // latter can use the token as the password. Only the first + // is used here. See + // https://developer.github.com/v3/#authentication and + // https://docs.github.com/en/developers/apps/authorizing-oath-apps + return std::pair("Authorization", fmt("token %s", token)); } Hash getRevFromRef(nix::ref store, const Input & input) const override { - auto host_url = maybeGetStrAttr(input.attrs, "url").value_or("github.com"); + auto host = maybeGetStrAttr(input.attrs, "host").value_or("github.com"); auto url = fmt("https://api.%s/repos/%s/%s/commits/%s", // FIXME: check - host_url, getStrAttr(input.attrs, "owner"), getStrAttr(input.attrs, "repo"), *input.getRef()); + host, getStrAttr(input.attrs, "owner"), getStrAttr(input.attrs, "repo"), *input.getRef()); - addAccessToken(url); + Headers headers = makeHeadersWithAuthTokens(host); auto json = nlohmann::json::parse( readFile( store->toRealPath( - downloadFile(store, url, "source", false).storePath))); + downloadFile(store, url, "source", false, headers).storePath))); auto rev = Hash::parseAny(std::string { json["sha"] }, htSHA1); debug("HEAD revision for '%s' is %s", url, rev.gitRev()); return rev; } - std::string getDownloadUrl(const Input & input) const override + DownloadUrl getDownloadUrl(const Input & input) const override { // FIXME: use regular /archive URLs instead? api.github.com // might have stricter rate limits. - auto host_url = maybeGetStrAttr(input.attrs, "host").value_or("github.com"); + auto host = maybeGetStrAttr(input.attrs, "host").value_or("github.com"); auto url = fmt("https://api.%s/repos/%s/%s/tarball/%s", // FIXME: check if this is correct for self hosted instances - host_url, getStrAttr(input.attrs, "owner"), getStrAttr(input.attrs, "repo"), + host, getStrAttr(input.attrs, "owner"), getStrAttr(input.attrs, "repo"), input.getRev()->to_string(Base16, false)); - addAccessToken(url); - - return url; + Headers headers = makeHeadersWithAuthTokens(host); + return DownloadUrl { url, headers }; } void clone(const Input & input, const Path & destDir) override { - auto host_url = maybeGetStrAttr(input.attrs, "url").value_or("github.com"); + auto host = maybeGetStrAttr(input.attrs, "host").value_or("github.com"); Input::fromURL(fmt("git+ssh://git@%s/%s/%s.git", - host_url, getStrAttr(input.attrs, "owner"), getStrAttr(input.attrs, "repo"))) + host, getStrAttr(input.attrs, "owner"), getStrAttr(input.attrs, "repo"))) .applyOverrides(input.getRef().value_or("HEAD"), input.getRev()) .clone(destDir); } @@ -235,48 +270,71 @@ struct GitLabInputScheme : GitArchiveInputScheme { std::string type() override { return "gitlab"; } + std::optional> accessHeaderFromToken(const std::string & token) const override + { + // Gitlab supports 4 kinds of authorization, two of which are + // relevant here: OAuth2 and PAT (Private Access Token). The + // user can indicate which token is used by specifying the + // token as :, where type is "OAuth2" or "PAT". + // If the is unrecognized, this will fall back to + // treating this simply has :. See + // https://docs.gitlab.com/12.10/ee/api/README.html#authentication + auto fldsplit = token.find_first_of(':'); + // n.b. C++20 would allow: if (token.starts_with("OAuth2:")) ... + if ("OAuth2" == token.substr(0, fldsplit)) + return std::make_pair("Authorization", fmt("Bearer %s", token.substr(fldsplit+1))); + if ("PAT" == token.substr(0, fldsplit)) + return std::make_pair("Private-token", token.substr(fldsplit+1)); + warn("Unrecognized GitLab token type %s", token.substr(0, fldsplit)); + return std::nullopt; + } + Hash getRevFromRef(nix::ref store, const Input & input) const override { - auto host_url = maybeGetStrAttr(input.attrs, "host").value_or("gitlab.com"); + auto host = maybeGetStrAttr(input.attrs, "host").value_or("gitlab.com"); + // See rate limiting note below auto url = fmt("https://%s/api/v4/projects/%s%%2F%s/repository/commits?ref_name=%s", - host_url, getStrAttr(input.attrs, "owner"), getStrAttr(input.attrs, "repo"), *input.getRef()); + host, getStrAttr(input.attrs, "owner"), getStrAttr(input.attrs, "repo"), *input.getRef()); + + Headers headers = makeHeadersWithAuthTokens(host); + auto json = nlohmann::json::parse( readFile( store->toRealPath( - downloadFile(store, url, "source", false).storePath))); + downloadFile(store, url, "source", false, headers).storePath))); auto rev = Hash::parseAny(std::string(json[0]["id"]), htSHA1); debug("HEAD revision for '%s' is %s", url, rev.gitRev()); return rev; } - std::string getDownloadUrl(const Input & input) const override + DownloadUrl getDownloadUrl(const Input & input) const override { - // FIXME: This endpoint has a rate limit threshold of 5 requests per minute - auto host_url = maybeGetStrAttr(input.attrs, "url").value_or("gitlab.com"); + // This endpoint has a rate limit threshold that may be + // server-specific and vary based whether the user is + // authenticated via an accessToken or not, but the usual rate + // is 10 reqs/sec/ip-addr. See + // https://docs.gitlab.com/ee/user/gitlab_com/index.html#gitlabcom-specific-rate-limits + auto host = maybeGetStrAttr(input.attrs, "host").value_or("gitlab.com"); auto url = fmt("https://%s/api/v4/projects/%s%%2F%s/repository/archive.tar.gz?sha=%s", - host_url, getStrAttr(input.attrs, "owner"), getStrAttr(input.attrs, "repo"), + host, getStrAttr(input.attrs, "owner"), getStrAttr(input.attrs, "repo"), input.getRev()->to_string(Base16, false)); - /* # FIXME: add privat token auth (`curl --header "PRIVATE-TOKEN: "`) - std::string accessToken = settings.githubAccessToken.get(); - if (accessToken != "") - url += "?access_token=" + accessToken;*/ - - return url; + Headers headers = makeHeadersWithAuthTokens(host); + return DownloadUrl { url, headers }; } void clone(const Input & input, const Path & destDir) override { - auto host_url = maybeGetStrAttr(input.attrs, "url").value_or("gitlab.com"); + auto host = maybeGetStrAttr(input.attrs, "host").value_or("gitlab.com"); // FIXME: get username somewhere Input::fromURL(fmt("git+ssh://git@%s/%s/%s.git", - host_url, getStrAttr(input.attrs, "owner"), getStrAttr(input.attrs, "repo"))) + host, getStrAttr(input.attrs, "owner"), getStrAttr(input.attrs, "repo"))) .applyOverrides(input.getRef().value_or("HEAD"), input.getRev()) .clone(destDir); } }; -static auto r1 = OnStartup([] { registerInputScheme(std::make_unique()); }); -static auto r2 = OnStartup([] { registerInputScheme(std::make_unique()); }); +static auto rGitHubInputScheme = OnStartup([] { registerInputScheme(std::make_unique()); }); +static auto rGitLabInputScheme = OnStartup([] { registerInputScheme(std::make_unique()); }); } diff --git a/src/libfetchers/indirect.cc b/src/libfetchers/indirect.cc index 74332ae3d..10e59919a 100644 --- a/src/libfetchers/indirect.cc +++ b/src/libfetchers/indirect.cc @@ -100,6 +100,6 @@ struct IndirectInputScheme : InputScheme } }; -static auto r1 = OnStartup([] { registerInputScheme(std::make_unique()); }); +static auto rIndirectInputScheme = OnStartup([] { registerInputScheme(std::make_unique()); }); } diff --git a/src/libfetchers/mercurial.cc b/src/libfetchers/mercurial.cc index d80c2ea7a..7d3d52751 100644 --- a/src/libfetchers/mercurial.cc +++ b/src/libfetchers/mercurial.cc @@ -293,6 +293,6 @@ struct MercurialInputScheme : InputScheme } }; -static auto r1 = OnStartup([] { registerInputScheme(std::make_unique()); }); +static auto rMercurialInputScheme = OnStartup([] { registerInputScheme(std::make_unique()); }); } diff --git a/src/libfetchers/path.cc b/src/libfetchers/path.cc index 99d4b4e8f..bcb904c0d 100644 --- a/src/libfetchers/path.cc +++ b/src/libfetchers/path.cc @@ -102,6 +102,6 @@ struct PathInputScheme : InputScheme } }; -static auto r1 = OnStartup([] { registerInputScheme(std::make_unique()); }); +static auto rPathInputScheme = OnStartup([] { registerInputScheme(std::make_unique()); }); } diff --git a/src/libfetchers/registry.cc b/src/libfetchers/registry.cc index 4367ee810..2426882ca 100644 --- a/src/libfetchers/registry.cc +++ b/src/libfetchers/registry.cc @@ -3,6 +3,7 @@ #include "util.hh" #include "globals.hh" #include "store-api.hh" +#include "local-fs-store.hh" #include diff --git a/src/libfetchers/tarball.cc b/src/libfetchers/tarball.cc index a2d16365e..8c0f20475 100644 --- a/src/libfetchers/tarball.cc +++ b/src/libfetchers/tarball.cc @@ -5,6 +5,7 @@ #include "store-api.hh" #include "archive.hh" #include "tarfile.hh" +#include "types.hh" namespace nix::fetchers { @@ -12,7 +13,8 @@ DownloadFileResult downloadFile( ref store, const std::string & url, const std::string & name, - bool immutable) + bool immutable, + const Headers & headers) { // FIXME: check store @@ -37,6 +39,7 @@ DownloadFileResult downloadFile( return useCached(); FileTransferRequest request(url); + request.headers = headers; if (cached) request.expectedETag = getStrAttr(cached->infoAttrs, "etag"); FileTransferResult res; @@ -111,7 +114,8 @@ std::pair downloadTarball( ref store, const std::string & url, const std::string & name, - bool immutable) + bool immutable, + const Headers & headers) { Attrs inAttrs({ {"type", "tarball"}, @@ -127,7 +131,7 @@ std::pair downloadTarball( getIntAttr(cached->infoAttrs, "lastModified") }; - auto res = downloadFile(store, url, name, immutable); + auto res = downloadFile(store, url, name, immutable, headers); std::optional unpackedStorePath; time_t lastModified; @@ -227,6 +231,6 @@ struct TarballInputScheme : InputScheme } }; -static auto r1 = OnStartup([] { registerInputScheme(std::make_unique()); }); +static auto rTarballInputScheme = OnStartup([] { registerInputScheme(std::make_unique()); }); } diff --git a/src/libmain/common-args.cc b/src/libmain/common-args.cc index 3411e2d7a..9151a0344 100644 --- a/src/libmain/common-args.cc +++ b/src/libmain/common-args.cc @@ -44,7 +44,7 @@ MixCommonArgs::MixCommonArgs(const string & programName) globalConfig.getSettings(settings); for (auto & s : settings) if (hasPrefix(s.first, prefix)) - completions->insert(s.first); + completions->add(s.first, s.second.description); } } }); diff --git a/src/libmain/progress-bar.cc b/src/libmain/progress-bar.cc index be3c06a38..07b45b3b5 100644 --- a/src/libmain/progress-bar.cc +++ b/src/libmain/progress-bar.cc @@ -256,7 +256,7 @@ public: } else if (type == resBuildLogLine || type == resPostBuildLogLine) { - auto lastLine = trim(getS(fields, 0)); + auto lastLine = chomp(getS(fields, 0)); if (!lastLine.empty()) { auto i = state->its.find(act); assert(i != state->its.end()); diff --git a/src/libmain/shared.cc b/src/libmain/shared.cc index 22ae51e47..2247aeca4 100644 --- a/src/libmain/shared.cc +++ b/src/libmain/shared.cc @@ -386,18 +386,12 @@ RunPager::~RunPager() } -string showBytes(uint64_t bytes) -{ - return (format("%.2f MiB") % (bytes / (1024.0 * 1024.0))).str(); -} - - PrintFreed::~PrintFreed() { if (show) - std::cout << format("%1% store paths deleted, %2% freed\n") - % results.paths.size() - % showBytes(results.bytesFreed); + std::cout << fmt("%d store paths deleted, %s freed\n", + results.paths.size(), + showBytes(results.bytesFreed)); } Exit::~Exit() { } diff --git a/src/libstore/binary-cache-store.cc b/src/libstore/binary-cache-store.cc index ebc0bd6a4..f6224d6a0 100644 --- a/src/libstore/binary-cache-store.cc +++ b/src/libstore/binary-cache-store.cc @@ -142,17 +142,10 @@ struct FileSource : FdSource } }; -void BinaryCacheStore::addToStore(const ValidPathInfo & info, Source & narSource, - RepairFlag repair, CheckSigsFlag checkSigs) +ref BinaryCacheStore::addToStoreCommon( + Source & narSource, RepairFlag repair, CheckSigsFlag checkSigs, + std::function mkInfo) { - assert(info.narSize); - - if (!repair && isValidPath(info.path)) { - // FIXME: copyNAR -> null sink - narSource.drain(); - return; - } - auto [fdTemp, fnTemp] = createTempFile(); AutoDelete autoDelete(fnTemp); @@ -162,13 +155,15 @@ void BinaryCacheStore::addToStore(const ValidPathInfo & info, Source & narSource /* Read the NAR simultaneously into a CompressionSink+FileSink (to write the compressed NAR to disk), into a HashSink (to get the NAR hash), and into a NarAccessor (to get the NAR listing). */ - HashSink fileHashSink(htSHA256); + HashSink fileHashSink { htSHA256 }; std::shared_ptr narAccessor; + HashSink narHashSink { htSHA256 }; { FdSink fileSink(fdTemp.get()); - TeeSink teeSink(fileSink, fileHashSink); - auto compressionSink = makeCompressionSink(compression, teeSink); - TeeSource teeSource(narSource, *compressionSink); + TeeSink teeSinkCompressed { fileSink, fileHashSink }; + auto compressionSink = makeCompressionSink(compression, teeSinkCompressed); + TeeSink teeSinkUncompressed { *compressionSink, narHashSink }; + TeeSource teeSource { narSource, teeSinkUncompressed }; narAccessor = makeNarAccessor(teeSource); compressionSink->finish(); fileSink.flush(); @@ -176,9 +171,8 @@ void BinaryCacheStore::addToStore(const ValidPathInfo & info, Source & narSource auto now2 = std::chrono::steady_clock::now(); + auto info = mkInfo(narHashSink.finish()); auto narInfo = make_ref(info); - narInfo->narSize = info.narSize; - narInfo->narHash = info.narHash; narInfo->compression = compression; auto [fileHash, fileSize] = fileHashSink.finish(); narInfo->fileHash = fileHash; @@ -300,6 +294,41 @@ void BinaryCacheStore::addToStore(const ValidPathInfo & info, Source & narSource writeNarInfo(narInfo); stats.narInfoWrite++; + + return narInfo; +} + +void BinaryCacheStore::addToStore(const ValidPathInfo & info, Source & narSource, + RepairFlag repair, CheckSigsFlag checkSigs) +{ + if (!repair && isValidPath(info.path)) { + // FIXME: copyNAR -> null sink + narSource.drain(); + return; + } + + addToStoreCommon(narSource, repair, checkSigs, {[&](HashResult nar) { + /* FIXME reinstate these, once we can correctly do hash modulo sink as + needed. We need to throw here in case we uploaded a corrupted store path. */ + // assert(info.narHash == nar.first); + // assert(info.narSize == nar.second); + return info; + }}); +} + +StorePath BinaryCacheStore::addToStoreFromDump(Source & dump, const string & name, + FileIngestionMethod method, HashType hashAlgo, RepairFlag repair) +{ + if (method != FileIngestionMethod::Recursive || hashAlgo != htSHA256) + unsupported("addToStoreFromDump"); + return addToStoreCommon(dump, repair, CheckSigs, [&](HashResult nar) { + ValidPathInfo info { + makeFixedOutputPath(method, nar.first, name), + nar.first, + }; + info.narSize = nar.second; + return info; + })->path; } bool BinaryCacheStore::isValidPathUncached(const StorePath & storePath) @@ -367,50 +396,52 @@ void BinaryCacheStore::queryPathInfoUncached(const StorePath & storePath, StorePath BinaryCacheStore::addToStore(const string & name, const Path & srcPath, FileIngestionMethod method, HashType hashAlgo, PathFilter & filter, RepairFlag repair) { - // FIXME: some cut&paste from LocalStore::addToStore(). + /* FIXME: Make BinaryCacheStore::addToStoreCommon support + non-recursive+sha256 so we can just use the default + implementation of this method in terms of addToStoreFromDump. */ - /* Read the whole path into memory. This is not a very scalable - method for very large paths, but `copyPath' is mainly used for - small files. */ - StringSink sink; - std::optional h; + HashSink sink { hashAlgo }; if (method == FileIngestionMethod::Recursive) { dumpPath(srcPath, sink, filter); - h = hashString(hashAlgo, *sink.s); } else { - auto s = readFile(srcPath); - dumpString(s, sink); - h = hashString(hashAlgo, s); + readFile(srcPath, sink); } + auto h = sink.finish().first; - ValidPathInfo info { - makeFixedOutputPath(method, *h, name), - Hash::dummy, // Will be fixed in addToStore, which recomputes nar hash - }; - - auto source = StringSource { *sink.s }; - addToStore(info, source, repair, CheckSigs); - - return std::move(info.path); + auto source = sinkToSource([&](Sink & sink) { + dumpPath(srcPath, sink, filter); + }); + return addToStoreCommon(*source, repair, CheckSigs, [&](HashResult nar) { + ValidPathInfo info { + makeFixedOutputPath(method, h, name), + nar.first, + }; + info.narSize = nar.second; + info.ca = FixedOutputHash { + .method = method, + .hash = h, + }; + return info; + })->path; } StorePath BinaryCacheStore::addTextToStore(const string & name, const string & s, const StorePathSet & references, RepairFlag repair) { - ValidPathInfo info { - computeStorePathForText(name, s, references), - Hash::dummy, // Will be fixed in addToStore, which recomputes nar hash - }; - info.references = references; + auto textHash = hashString(htSHA256, s); + auto path = makeTextPath(name, textHash, references); - if (repair || !isValidPath(info.path)) { - StringSink sink; - dumpString(s, sink); - auto source = StringSource { *sink.s }; - addToStore(info, source, repair, CheckSigs); - } + if (!repair && isValidPath(path)) + return path; - return std::move(info.path); + auto source = StringSource { s }; + return addToStoreCommon(source, repair, CheckSigs, [&](HashResult nar) { + ValidPathInfo info { path, nar.first }; + info.narSize = nar.second; + info.ca = TextHash { textHash }; + info.references = references; + return info; + })->path; } ref BinaryCacheStore::getFSAccessor() diff --git a/src/libstore/binary-cache-store.hh b/src/libstore/binary-cache-store.hh index 4b779cdd4..5224d7ec8 100644 --- a/src/libstore/binary-cache-store.hh +++ b/src/libstore/binary-cache-store.hh @@ -72,6 +72,10 @@ private: void writeNarInfo(ref narInfo); + ref addToStoreCommon( + Source & narSource, RepairFlag repair, CheckSigsFlag checkSigs, + std::function mkInfo); + public: bool isValidPathUncached(const StorePath & path) override; @@ -85,6 +89,9 @@ public: void addToStore(const ValidPathInfo & info, Source & narSource, RepairFlag repair, CheckSigsFlag checkSigs) override; + StorePath addToStoreFromDump(Source & dump, const string & name, + FileIngestionMethod method, HashType hashAlgo, RepairFlag repair) override; + StorePath addToStore(const string & name, const Path & srcPath, FileIngestionMethod method, HashType hashAlgo, PathFilter & filter, RepairFlag repair) override; diff --git a/src/libstore/build.cc b/src/libstore/build/derivation-goal.cc similarity index 69% rename from src/libstore/build.cc rename to src/libstore/build/derivation-goal.cc index db7dbc17e..fda05f0e9 100644 --- a/src/libstore/build.cc +++ b/src/libstore/build/derivation-goal.cc @@ -1,53 +1,36 @@ -#include "references.hh" -#include "pathlocks.hh" -#include "globals.hh" -#include "local-store.hh" -#include "util.hh" -#include "archive.hh" -#include "affinity.hh" +#include "derivation-goal.hh" +#include "hook-instance.hh" +#include "worker.hh" #include "builtins.hh" #include "builtins/buildenv.hh" -#include "filetransfer.hh" +#include "references.hh" #include "finally.hh" -#include "compression.hh" +#include "util.hh" +#include "archive.hh" #include "json.hh" -#include "nar-info.hh" -#include "parsed-derivations.hh" -#include "machines.hh" +#include "compression.hh" #include "daemon.hh" #include "worker-protocol.hh" #include "topo-sort.hh" #include "callback.hh" -#include -#include -#include -#include -#include -#include -#include #include #include -#include -#include -#include #include -#include -#include -#include #include #include -#include #include -#include -#include -#include +#include #include -#include +#include +#include +#include +#include -#include -#include +#if HAVE_STATVFS +#include +#endif /* Includes required for chroot support. */ #if __linux__ @@ -67,412 +50,13 @@ #define pivot_root(new_root, put_old) (syscall(SYS_pivot_root, new_root, put_old)) #endif -#if HAVE_STATVFS -#include -#endif +#include +#include #include - namespace nix { -using std::map; - - -static string pathNullDevice = "/dev/null"; - - -/* Forward definition. */ -class Worker; -struct HookInstance; - - -/* A pointer to a goal. */ -struct Goal; -class DerivationGoal; -typedef std::shared_ptr GoalPtr; -typedef std::weak_ptr WeakGoalPtr; - -struct CompareGoalPtrs { - bool operator() (const GoalPtr & a, const GoalPtr & b) const; -}; - -/* Set of goals. */ -typedef set Goals; -typedef list WeakGoals; - -/* A map of paths to goals (and the other way around). */ -typedef std::map WeakGoalMap; - - - -struct Goal : public std::enable_shared_from_this -{ - typedef enum {ecBusy, ecSuccess, ecFailed, ecNoSubstituters, ecIncompleteClosure} ExitCode; - - /* Backlink to the worker. */ - Worker & worker; - - /* Goals that this goal is waiting for. */ - Goals waitees; - - /* Goals waiting for this one to finish. Must use weak pointers - here to prevent cycles. */ - WeakGoals waiters; - - /* Number of goals we are/were waiting for that have failed. */ - unsigned int nrFailed; - - /* Number of substitution goals we are/were waiting for that - failed because there are no substituters. */ - unsigned int nrNoSubstituters; - - /* Number of substitution goals we are/were waiting for that - failed because othey had unsubstitutable references. */ - unsigned int nrIncompleteClosure; - - /* Name of this goal for debugging purposes. */ - string name; - - /* Whether the goal is finished. */ - ExitCode exitCode; - - /* Exception containing an error message, if any. */ - std::optional ex; - - Goal(Worker & worker) : worker(worker) - { - nrFailed = nrNoSubstituters = nrIncompleteClosure = 0; - exitCode = ecBusy; - } - - virtual ~Goal() - { - trace("goal destroyed"); - } - - virtual void work() = 0; - - void addWaitee(GoalPtr waitee); - - virtual void waiteeDone(GoalPtr waitee, ExitCode result); - - virtual void handleChildOutput(int fd, const string & data) - { - abort(); - } - - virtual void handleEOF(int fd) - { - abort(); - } - - void trace(const FormatOrString & fs); - - string getName() - { - return name; - } - - /* Callback in case of a timeout. It should wake up its waiters, - get rid of any running child processes that are being monitored - by the worker (important!), etc. */ - virtual void timedOut(Error && ex) = 0; - - virtual string key() = 0; - - void amDone(ExitCode result, std::optional ex = {}); -}; - - -bool CompareGoalPtrs::operator() (const GoalPtr & a, const GoalPtr & b) const { - string s1 = a->key(); - string s2 = b->key(); - return s1 < s2; -} - - -typedef std::chrono::time_point steady_time_point; - - -/* A mapping used to remember for each child process to what goal it - belongs, and file descriptors for receiving log data and output - path creation commands. */ -struct Child -{ - WeakGoalPtr goal; - Goal * goal2; // ugly hackery - set fds; - bool respectTimeouts; - bool inBuildSlot; - steady_time_point lastOutput; /* time we last got output on stdout/stderr */ - steady_time_point timeStarted; -}; - - -/* The worker class. */ -class Worker -{ -private: - - /* Note: the worker should only have strong pointers to the - top-level goals. */ - - /* The top-level goals of the worker. */ - Goals topGoals; - - /* Goals that are ready to do some work. */ - WeakGoals awake; - - /* Goals waiting for a build slot. */ - WeakGoals wantingToBuild; - - /* Child processes currently running. */ - std::list children; - - /* Number of build slots occupied. This includes local builds and - substitutions but not remote builds via the build hook. */ - unsigned int nrLocalBuilds; - - /* Maps used to prevent multiple instantiations of a goal for the - same derivation / path. */ - WeakGoalMap derivationGoals; - WeakGoalMap substitutionGoals; - - /* Goals waiting for busy paths to be unlocked. */ - WeakGoals waitingForAnyGoal; - - /* Goals sleeping for a few seconds (polling a lock). */ - WeakGoals waitingForAWhile; - - /* Last time the goals in `waitingForAWhile' where woken up. */ - steady_time_point lastWokenUp; - - /* Cache for pathContentsGood(). */ - std::map pathContentsGoodCache; - -public: - - const Activity act; - const Activity actDerivations; - const Activity actSubstitutions; - - /* Set if at least one derivation had a BuildError (i.e. permanent - failure). */ - bool permanentFailure; - - /* Set if at least one derivation had a timeout. */ - bool timedOut; - - /* Set if at least one derivation fails with a hash mismatch. */ - bool hashMismatch; - - /* Set if at least one derivation is not deterministic in check mode. */ - bool checkMismatch; - - LocalStore & store; - - std::unique_ptr hook; - - uint64_t expectedBuilds = 0; - uint64_t doneBuilds = 0; - uint64_t failedBuilds = 0; - uint64_t runningBuilds = 0; - - uint64_t expectedSubstitutions = 0; - uint64_t doneSubstitutions = 0; - uint64_t failedSubstitutions = 0; - uint64_t runningSubstitutions = 0; - uint64_t expectedDownloadSize = 0; - uint64_t doneDownloadSize = 0; - uint64_t expectedNarSize = 0; - uint64_t doneNarSize = 0; - - /* Whether to ask the build hook if it can build a derivation. If - it answers with "decline-permanently", we don't try again. */ - bool tryBuildHook = true; - - Worker(LocalStore & store); - ~Worker(); - - /* Make a goal (with caching). */ - GoalPtr makeDerivationGoal(const StorePath & drvPath, const StringSet & wantedOutputs, BuildMode buildMode = bmNormal); - std::shared_ptr makeBasicDerivationGoal(const StorePath & drvPath, - const BasicDerivation & drv, BuildMode buildMode = bmNormal); - GoalPtr makeSubstitutionGoal(const StorePath & storePath, RepairFlag repair = NoRepair, std::optional ca = std::nullopt); - - /* Remove a dead goal. */ - void removeGoal(GoalPtr goal); - - /* Wake up a goal (i.e., there is something for it to do). */ - void wakeUp(GoalPtr goal); - - /* Return the number of local build and substitution processes - currently running (but not remote builds via the build - hook). */ - unsigned int getNrLocalBuilds(); - - /* Registers a running child process. `inBuildSlot' means that - the process counts towards the jobs limit. */ - void childStarted(GoalPtr goal, const set & fds, - bool inBuildSlot, bool respectTimeouts); - - /* Unregisters a running child process. `wakeSleepers' should be - false if there is no sense in waking up goals that are sleeping - because they can't run yet (e.g., there is no free build slot, - or the hook would still say `postpone'). */ - void childTerminated(Goal * goal, bool wakeSleepers = true); - - /* Put `goal' to sleep until a build slot becomes available (which - might be right away). */ - void waitForBuildSlot(GoalPtr goal); - - /* Wait for any goal to finish. Pretty indiscriminate way to - wait for some resource that some other goal is holding. */ - void waitForAnyGoal(GoalPtr goal); - - /* Wait for a few seconds and then retry this goal. Used when - waiting for a lock held by another process. This kind of - polling is inefficient, but POSIX doesn't really provide a way - to wait for multiple locks in the main select() loop. */ - void waitForAWhile(GoalPtr goal); - - /* Loop until the specified top-level goals have finished. */ - void run(const Goals & topGoals); - - /* Wait for input to become available. */ - void waitForInput(); - - unsigned int exitStatus(); - - /* Check whether the given valid path exists and has the right - contents. */ - bool pathContentsGood(const StorePath & path); - - void markContentsGood(const StorePath & path); - - void updateProgress() - { - actDerivations.progress(doneBuilds, expectedBuilds + doneBuilds, runningBuilds, failedBuilds); - actSubstitutions.progress(doneSubstitutions, expectedSubstitutions + doneSubstitutions, runningSubstitutions, failedSubstitutions); - act.setExpected(actFileTransfer, expectedDownloadSize + doneDownloadSize); - act.setExpected(actCopyPath, expectedNarSize + doneNarSize); - } -}; - - -////////////////////////////////////////////////////////////////////// - - -void addToWeakGoals(WeakGoals & goals, GoalPtr p) -{ - // FIXME: necessary? - // FIXME: O(n) - for (auto & i : goals) - if (i.lock() == p) return; - goals.push_back(p); -} - - -void Goal::addWaitee(GoalPtr waitee) -{ - waitees.insert(waitee); - addToWeakGoals(waitee->waiters, shared_from_this()); -} - - -void Goal::waiteeDone(GoalPtr waitee, ExitCode result) -{ - assert(waitees.find(waitee) != waitees.end()); - waitees.erase(waitee); - - trace(fmt("waitee '%s' done; %d left", waitee->name, waitees.size())); - - if (result == ecFailed || result == ecNoSubstituters || result == ecIncompleteClosure) ++nrFailed; - - if (result == ecNoSubstituters) ++nrNoSubstituters; - - if (result == ecIncompleteClosure) ++nrIncompleteClosure; - - if (waitees.empty() || (result == ecFailed && !settings.keepGoing)) { - - /* If we failed and keepGoing is not set, we remove all - remaining waitees. */ - for (auto & goal : waitees) { - WeakGoals waiters2; - for (auto & j : goal->waiters) - if (j.lock() != shared_from_this()) waiters2.push_back(j); - goal->waiters = waiters2; - } - waitees.clear(); - - worker.wakeUp(shared_from_this()); - } -} - - -void Goal::amDone(ExitCode result, std::optional ex) -{ - trace("done"); - assert(exitCode == ecBusy); - assert(result == ecSuccess || result == ecFailed || result == ecNoSubstituters || result == ecIncompleteClosure); - exitCode = result; - - if (ex) { - if (!waiters.empty()) - logError(ex->info()); - else - this->ex = std::move(*ex); - } - - for (auto & i : waiters) { - GoalPtr goal = i.lock(); - if (goal) goal->waiteeDone(shared_from_this(), result); - } - waiters.clear(); - worker.removeGoal(shared_from_this()); -} - - -void Goal::trace(const FormatOrString & fs) -{ - debug("%1%: %2%", name, fs.s); -} - - - -////////////////////////////////////////////////////////////////////// - - -/* Common initialisation performed in child processes. */ -static void commonChildInit(Pipe & logPipe) -{ - restoreSignals(); - - /* Put the child in a separate session (and thus a separate - process group) so that it has no controlling terminal (meaning - that e.g. ssh cannot open /dev/tty) and it doesn't receive - terminal signals. */ - if (setsid() == -1) - throw SysError("creating a new session"); - - /* Dup the write side of the logger pipe into stderr. */ - if (dup2(logPipe.writeSide.get(), STDERR_FILENO) == -1) - throw SysError("cannot pipe standard error into log file"); - - /* Dup stderr to stdout. */ - if (dup2(STDERR_FILENO, STDOUT_FILENO) == -1) - throw SysError("cannot dup stderr into stdout"); - - /* Reroute stdin to /dev/null. */ - int fdDevNull = open(pathNullDevice.c_str(), O_RDWR); - if (fdDevNull == -1) - throw SysError("cannot open '%1%'", pathNullDevice); - if (dup2(fdDevNull, STDIN_FILENO) == -1) - throw SysError("cannot dup null device into stdin"); - close(fdDevNull); -} - void handleDiffHook( uid_t uid, uid_t gid, const Path & tryA, const Path & tryB, @@ -505,588 +89,10 @@ void handleDiffHook( } } -////////////////////////////////////////////////////////////////////// - - -class UserLock -{ -private: - Path fnUserLock; - AutoCloseFD fdUserLock; - - bool isEnabled = false; - string user; - uid_t uid = 0; - gid_t gid = 0; - std::vector supplementaryGIDs; - -public: - UserLock(); - - void kill(); - - string getUser() { return user; } - uid_t getUID() { assert(uid); return uid; } - uid_t getGID() { assert(gid); return gid; } - std::vector getSupplementaryGIDs() { return supplementaryGIDs; } - - bool findFreeUser(); - - bool enabled() { return isEnabled; } - -}; - - -UserLock::UserLock() -{ - assert(settings.buildUsersGroup != ""); - createDirs(settings.nixStateDir + "/userpool"); -} - -bool UserLock::findFreeUser() { - if (enabled()) return true; - - /* Get the members of the build-users-group. */ - struct group * gr = getgrnam(settings.buildUsersGroup.get().c_str()); - if (!gr) - throw Error("the group '%1%' specified in 'build-users-group' does not exist", - settings.buildUsersGroup); - gid = gr->gr_gid; - - /* Copy the result of getgrnam. */ - Strings users; - for (char * * p = gr->gr_mem; *p; ++p) { - debug("found build user '%1%'", *p); - users.push_back(*p); - } - - if (users.empty()) - throw Error("the build users group '%1%' has no members", - settings.buildUsersGroup); - - /* Find a user account that isn't currently in use for another - build. */ - for (auto & i : users) { - debug("trying user '%1%'", i); - - struct passwd * pw = getpwnam(i.c_str()); - if (!pw) - throw Error("the user '%1%' in the group '%2%' does not exist", - i, settings.buildUsersGroup); - - - fnUserLock = (format("%1%/userpool/%2%") % settings.nixStateDir % pw->pw_uid).str(); - - AutoCloseFD fd = open(fnUserLock.c_str(), O_RDWR | O_CREAT | O_CLOEXEC, 0600); - if (!fd) - throw SysError("opening user lock '%1%'", fnUserLock); - - if (lockFile(fd.get(), ltWrite, false)) { - fdUserLock = std::move(fd); - user = i; - uid = pw->pw_uid; - - /* Sanity check... */ - if (uid == getuid() || uid == geteuid()) - throw Error("the Nix user should not be a member of '%1%'", - settings.buildUsersGroup); - -#if __linux__ - /* Get the list of supplementary groups of this build user. This - is usually either empty or contains a group such as "kvm". */ - supplementaryGIDs.resize(10); - int ngroups = supplementaryGIDs.size(); - int err = getgrouplist(pw->pw_name, pw->pw_gid, - supplementaryGIDs.data(), &ngroups); - if (err == -1) - throw Error("failed to get list of supplementary groups for '%1%'", pw->pw_name); - - supplementaryGIDs.resize(ngroups); -#endif - - isEnabled = true; - return true; - } - } - - return false; -} - -void UserLock::kill() -{ - killUser(uid); -} - - -////////////////////////////////////////////////////////////////////// - - -struct HookInstance -{ - /* Pipes for talking to the build hook. */ - Pipe toHook; - - /* Pipe for the hook's standard output/error. */ - Pipe fromHook; - - /* Pipe for the builder's standard output/error. */ - Pipe builderOut; - - /* The process ID of the hook. */ - Pid pid; - - FdSink sink; - - std::map activities; - - HookInstance(); - - ~HookInstance(); -}; - - -HookInstance::HookInstance() -{ - debug("starting build hook '%s'", settings.buildHook); - - /* Create a pipe to get the output of the child. */ - fromHook.create(); - - /* Create the communication pipes. */ - toHook.create(); - - /* Create a pipe to get the output of the builder. */ - builderOut.create(); - - /* Fork the hook. */ - pid = startProcess([&]() { - - commonChildInit(fromHook); - - if (chdir("/") == -1) throw SysError("changing into /"); - - /* Dup the communication pipes. */ - if (dup2(toHook.readSide.get(), STDIN_FILENO) == -1) - throw SysError("dupping to-hook read side"); - - /* Use fd 4 for the builder's stdout/stderr. */ - if (dup2(builderOut.writeSide.get(), 4) == -1) - throw SysError("dupping builder's stdout/stderr"); - - /* Hack: pass the read side of that fd to allow build-remote - to read SSH error messages. */ - if (dup2(builderOut.readSide.get(), 5) == -1) - throw SysError("dupping builder's stdout/stderr"); - - Strings args = { - std::string(baseNameOf(settings.buildHook.get())), - std::to_string(verbosity), - }; - - execv(settings.buildHook.get().c_str(), stringsToCharPtrs(args).data()); - - throw SysError("executing '%s'", settings.buildHook); - }); - - pid.setSeparatePG(true); - fromHook.writeSide = -1; - toHook.readSide = -1; - - sink = FdSink(toHook.writeSide.get()); - std::map settings; - globalConfig.getSettings(settings); - for (auto & setting : settings) - sink << 1 << setting.first << setting.second.value; - sink << 0; -} - - -HookInstance::~HookInstance() -{ - try { - toHook.writeSide = -1; - if (pid != -1) pid.kill(); - } catch (...) { - ignoreException(); - } -} - - -////////////////////////////////////////////////////////////////////// - - -typedef enum {rpAccept, rpDecline, rpPostpone} HookReply; - -class SubstitutionGoal; - -/* Unless we are repairing, we don't both to test validity and just assume it, - so the choices are `Absent` or `Valid`. */ -enum struct PathStatus { - Corrupt, - Absent, - Valid, -}; - -struct InitialOutputStatus { - StorePath path; - PathStatus status; - /* Valid in the store, and additionally non-corrupt if we are repairing */ - bool isValid() const { - return status == PathStatus::Valid; - } - /* Merely present, allowed to be corrupt */ - bool isPresent() const { - return status == PathStatus::Corrupt - || status == PathStatus::Valid; - } -}; - -struct InitialOutput { - bool wanted; - std::optional known; -}; - -class DerivationGoal : public Goal -{ -private: - /* Whether to use an on-disk .drv file. */ - bool useDerivation; - - /* The path of the derivation. */ - StorePath drvPath; - - /* The specific outputs that we need to build. Empty means all of - them. */ - StringSet wantedOutputs; - - /* Whether additional wanted outputs have been added. */ - bool needRestart = false; - - /* Whether to retry substituting the outputs after building the - inputs. */ - bool retrySubstitution; - - /* The derivation stored at drvPath. */ - std::unique_ptr drv; - - std::unique_ptr parsedDrv; - - /* The remainder is state held during the build. */ - - /* Locks on (fixed) output paths. */ - PathLocks outputLocks; - - /* All input paths (that is, the union of FS closures of the - immediate input paths). */ - StorePathSet inputPaths; - - std::map initialOutputs; - - /* User selected for running the builder. */ - std::unique_ptr buildUser; - - /* The process ID of the builder. */ - Pid pid; - - /* The temporary directory. */ - Path tmpDir; - - /* The path of the temporary directory in the sandbox. */ - Path tmpDirInSandbox; - - /* File descriptor for the log file. */ - AutoCloseFD fdLogFile; - std::shared_ptr logFileSink, logSink; - - /* Number of bytes received from the builder's stdout/stderr. */ - unsigned long logSize; - - /* The most recent log lines. */ - std::list logTail; - - std::string currentLogLine; - size_t currentLogLinePos = 0; // to handle carriage return - - std::string currentHookLine; - - /* Pipe for the builder's standard output/error. */ - Pipe builderOut; - - /* Pipe for synchronising updates to the builder namespaces. */ - Pipe userNamespaceSync; - - /* The mount namespace of the builder, used to add additional - paths to the sandbox as a result of recursive Nix calls. */ - AutoCloseFD sandboxMountNamespace; - - /* The build hook. */ - std::unique_ptr hook; - - /* Whether we're currently doing a chroot build. */ - bool useChroot = false; - - Path chrootRootDir; - - /* RAII object to delete the chroot directory. */ - std::shared_ptr autoDelChroot; - - /* The sort of derivation we are building. */ - DerivationType derivationType; - - /* Whether to run the build in a private network namespace. */ - bool privateNetwork = false; - - typedef void (DerivationGoal::*GoalState)(); - GoalState state; - - /* Stuff we need to pass to initChild(). */ - struct ChrootPath { - Path source; - bool optional; - ChrootPath(Path source = "", bool optional = false) - : source(source), optional(optional) - { } - }; - typedef map DirsInChroot; // maps target path to source path - DirsInChroot dirsInChroot; - - typedef map Environment; - Environment env; - -#if __APPLE__ - typedef string SandboxProfile; - SandboxProfile additionalSandboxProfile; -#endif - - /* Hash rewriting. */ - StringMap inputRewrites, outputRewrites; - typedef map RedirectedOutputs; - RedirectedOutputs redirectedOutputs; - - /* The outputs paths used during the build. - - - Input-addressed derivations or fixed content-addressed outputs are - sometimes built when some of their outputs already exist, and can not - be hidden via sandboxing. We use temporary locations instead and - rewrite after the build. Otherwise the regular predetermined paths are - put here. - - - Floating content-addressed derivations do not know their final build - output paths until the outputs are hashed, so random locations are - used, and then renamed. The randomness helps guard against hidden - self-references. - */ - OutputPathMap scratchOutputs; - - /* The final output paths of the build. - - - For input-addressed derivations, always the precomputed paths - - - For content-addressed derivations, calcuated from whatever the hash - ends up being. (Note that fixed outputs derivations that produce the - "wrong" output still install that data under its true content-address.) - */ - OutputPathMap finalOutputs; - - BuildMode buildMode; - - /* If we're repairing without a chroot, there may be outputs that - are valid but corrupt. So we redirect these outputs to - temporary paths. */ - StorePathSet redirectedBadOutputs; - - BuildResult result; - - /* The current round, if we're building multiple times. */ - size_t curRound = 1; - - size_t nrRounds; - - /* Path registration info from the previous round, if we're - building multiple times. Since this contains the hash, it - allows us to compare whether two rounds produced the same - result. */ - std::map prevInfos; - - const uid_t sandboxUid = 1000; - const gid_t sandboxGid = 100; - - const static Path homeDir; - - std::unique_ptr> mcExpectedBuilds, mcRunningBuilds; - - std::unique_ptr act; - - /* Activity that denotes waiting for a lock. */ - std::unique_ptr actLock; - - std::map builderActivities; - - /* The remote machine on which we're building. */ - std::string machineName; - - /* The recursive Nix daemon socket. */ - AutoCloseFD daemonSocket; - - /* The daemon main thread. */ - std::thread daemonThread; - - /* The daemon worker threads. */ - std::vector daemonWorkerThreads; - - /* Paths that were added via recursive Nix calls. */ - StorePathSet addedPaths; - - /* Recursive Nix calls are only allowed to build or realize paths - in the original input closure or added via a recursive Nix call - (so e.g. you can't do 'nix-store -r /nix/store/' where - /nix/store/ is some arbitrary path in a binary cache). */ - bool isAllowed(const StorePath & path) - { - return inputPaths.count(path) || addedPaths.count(path); - } - - friend struct RestrictedStore; - -public: - DerivationGoal(const StorePath & drvPath, const StringSet & wantedOutputs, - Worker & worker, BuildMode buildMode = bmNormal); - DerivationGoal(const StorePath & drvPath, const BasicDerivation & drv, - Worker & worker, BuildMode buildMode = bmNormal); - ~DerivationGoal(); - - /* Whether we need to perform hash rewriting if there are valid output paths. */ - bool needsHashRewrite(); - - void timedOut(Error && ex) override; - - string key() override - { - /* Ensure that derivations get built in order of their name, - i.e. a derivation named "aardvark" always comes before - "baboon". And substitution goals always happen before - derivation goals (due to "b$"). */ - return "b$" + std::string(drvPath.name()) + "$" + worker.store.printStorePath(drvPath); - } - - void work() override; - - StorePath getDrvPath() - { - return drvPath; - } - - /* Add wanted outputs to an already existing derivation goal. */ - void addWantedOutputs(const StringSet & outputs); - - BuildResult getResult() { return result; } - -private: - /* The states. */ - void getDerivation(); - void loadDerivation(); - void haveDerivation(); - void outputsSubstitutionTried(); - void gaveUpOnSubstitution(); - void closureRepaired(); - void inputsRealised(); - void tryToBuild(); - void tryLocalBuild(); - void buildDone(); - - /* Is the build hook willing to perform the build? */ - HookReply tryBuildHook(); - - /* Start building a derivation. */ - void startBuilder(); - - /* Fill in the environment for the builder. */ - void initEnv(); - - /* Setup tmp dir location. */ - void initTmpDir(); - - /* Write a JSON file containing the derivation attributes. */ - void writeStructuredAttrs(); - - void startDaemon(); - - void stopDaemon(); - - /* Add 'path' to the set of paths that may be referenced by the - outputs, and make it appear in the sandbox. */ - void addDependency(const StorePath & path); - - /* Make a file owned by the builder. */ - void chownToBuilder(const Path & path); - - /* Run the builder's process. */ - void runChild(); - - friend int childEntry(void *); - - /* Check that the derivation outputs all exist and register them - as valid. */ - void registerOutputs(); - - /* Check that an output meets the requirements specified by the - 'outputChecks' attribute (or the legacy - '{allowed,disallowed}{References,Requisites}' attributes). */ - void checkOutputs(const std::map & outputs); - - /* Open a log file and a pipe to it. */ - Path openLogFile(); - - /* Close the log file. */ - void closeLogFile(); - - /* Delete the temporary directory, if we have one. */ - void deleteTmpDir(bool force); - - /* Callback used by the worker to write to the log. */ - void handleChildOutput(int fd, const string & data) override; - void handleEOF(int fd) override; - void flushLine(); - - /* Wrappers around the corresponding Store methods that first consult the - derivation. This is currently needed because when there is no drv file - there also is no DB entry. */ - std::map> queryPartialDerivationOutputMap(); - OutputPathMap queryDerivationOutputMap(); - - /* Return the set of (in)valid paths. */ - void checkPathValidity(); - - /* Forcibly kill the child process, if any. */ - void killChild(); - - /* Create alternative path calculated from but distinct from the - input, so we can avoid overwriting outputs (or other store paths) - that already exist. */ - StorePath makeFallbackPath(const StorePath & path); - /* Make a path to another based on the output name along with the - derivation hash. */ - /* FIXME add option to randomize, so we can audit whether our - rewrites caught everything */ - StorePath makeFallbackPath(std::string_view outputName); - - void repairClosure(); - - void started(); - - void done( - BuildResult::Status status, - std::optional ex = {}); - - StorePathSet exportReferences(const StorePathSet & storePaths); -}; - - const Path DerivationGoal::homeDir = "/homeless-shelter"; - -DerivationGoal::DerivationGoal(const StorePath & drvPath, const StringSet & wantedOutputs, - Worker & worker, BuildMode buildMode) +DerivationGoal::DerivationGoal(const StorePath & drvPath, + const StringSet & wantedOutputs, Worker & worker, BuildMode buildMode) : Goal(worker) , useDerivation(true) , drvPath(drvPath) @@ -1094,7 +100,9 @@ DerivationGoal::DerivationGoal(const StorePath & drvPath, const StringSet & want , buildMode(buildMode) { state = &DerivationGoal::getDerivation; - name = fmt("building of '%s'", worker.store.printStorePath(this->drvPath)); + name = fmt( + "building of '%s' from .drv file", + StorePathWithOutputs { drvPath, wantedOutputs }.to_string(worker.store)); trace("created"); mcExpectedBuilds = std::make_unique>(worker.expectedBuilds); @@ -1103,15 +111,18 @@ DerivationGoal::DerivationGoal(const StorePath & drvPath, const StringSet & want DerivationGoal::DerivationGoal(const StorePath & drvPath, const BasicDerivation & drv, - Worker & worker, BuildMode buildMode) + const StringSet & wantedOutputs, Worker & worker, BuildMode buildMode) : Goal(worker) , useDerivation(false) , drvPath(drvPath) + , wantedOutputs(wantedOutputs) , buildMode(buildMode) { this->drv = std::make_unique(BasicDerivation(drv)); state = &DerivationGoal::haveDerivation; - name = fmt("building of %s", StorePathWithOutputs { drvPath, drv.outputNames() }.to_string(worker.store)); + name = fmt( + "building of '%s' from in-memory derivation", + StorePathWithOutputs { drvPath, drv.outputNames() }.to_string(worker.store)); trace("created"); mcExpectedBuilds = std::make_unique>(worker.expectedBuilds); @@ -1134,6 +145,16 @@ DerivationGoal::~DerivationGoal() } +string DerivationGoal::key() +{ + /* Ensure that derivations get built in order of their name, + i.e. a derivation named "aardvark" always comes before + "baboon". And substitution goals always happen before + derivation goals (due to "b$"). */ + return "b$" + std::string(drvPath.name()) + "$" + worker.store.printStorePath(drvPath); +} + + inline bool DerivationGoal::needsHashRewrite() { #if __linux__ @@ -1464,8 +485,40 @@ void DerivationGoal::inputsRealised() /* Determine the full set of input paths. */ /* First, the input derivations. */ - if (useDerivation) - for (auto & [depDrvPath, wantedDepOutputs] : dynamic_cast(drv.get())->inputDrvs) { + if (useDerivation) { + auto & fullDrv = *dynamic_cast(drv.get()); + + if (!fullDrv.inputDrvs.empty() && fullDrv.type() == DerivationType::CAFloating) { + /* We are be able to resolve this derivation based on the + now-known results of dependencies. If so, we become a stub goal + aliasing that resolved derivation goal */ + std::optional attempt = fullDrv.tryResolve(worker.store); + assert(attempt); + Derivation drvResolved { *std::move(attempt) }; + + auto pathResolved = writeDerivation(worker.store, drvResolved); + /* Add to memotable to speed up downstream goal's queries with the + original derivation. */ + drvPathResolutions.lock()->insert_or_assign(drvPath, pathResolved); + + auto msg = fmt("Resolved derivation: '%s' -> '%s'", + worker.store.printStorePath(drvPath), + worker.store.printStorePath(pathResolved)); + act = std::make_unique(*logger, lvlInfo, actBuildWaiting, msg, + Logger::Fields { + worker.store.printStorePath(drvPath), + worker.store.printStorePath(pathResolved), + }); + + auto resolvedGoal = worker.makeDerivationGoal( + pathResolved, wantedOutputs, buildMode); + addWaitee(resolvedGoal); + + state = &DerivationGoal::resolvedFinished; + return; + } + + for (auto & [depDrvPath, wantedDepOutputs] : fullDrv.inputDrvs) { /* Add the relevant output closures of the input derivation `i' as input paths. Only add the closures of output paths that are specified as inputs. */ @@ -1485,6 +538,7 @@ void DerivationGoal::inputsRealised() worker.store.printStorePath(drvPath), j, worker.store.printStorePath(drvPath)); } } + } /* Second, the input sources. */ worker.store.computeFSClosure(drv->inputSrcs, inputPaths); @@ -1612,6 +666,13 @@ void DerivationGoal::tryToBuild() actLock.reset(); + state = &DerivationGoal::tryLocalBuild; + worker.wakeUp(shared_from_this()); +} + +void DerivationGoal::tryLocalBuild() { + bool buildLocally = buildMode != bmNormal || parsedDrv->willBuildLocally(worker.store); + /* Make sure that we are allowed to start a build. If this derivation prefers to be done locally, do it even if maxBuildJobs is 0. */ @@ -1622,12 +683,6 @@ void DerivationGoal::tryToBuild() return; } - state = &DerivationGoal::tryLocalBuild; - worker.wakeUp(shared_from_this()); -} - -void DerivationGoal::tryLocalBuild() { - /* If `build-users-group' is not empty, then we have to build as one of the members of that group. */ if (settings.buildUsersGroup != "" && getuid() == 0) { @@ -1942,6 +997,9 @@ void DerivationGoal::buildDone() done(BuildResult::Built); } +void DerivationGoal::resolvedFinished() { + done(BuildResult::Built); +} HookReply DerivationGoal::tryBuildHook() { @@ -2012,7 +1070,7 @@ HookReply DerivationGoal::tryBuildHook() /* Tell the hook all the inputs that have to be copied to the remote system. */ - writeStorePaths(worker.store, hook->sink, inputPaths); + worker_proto::write(worker.store, hook->sink, inputPaths); /* Tell the hooks the missing outputs that have to be copied back from the remote system. */ @@ -2023,7 +1081,7 @@ HookReply DerivationGoal::tryBuildHook() if (buildMode != bmCheck && status.known->isValid()) continue; missingPaths.insert(status.known->path); } - writeStorePaths(worker.store, hook->sink, missingPaths); + worker_proto::write(worker.store, hook->sink, missingPaths); } hook->sink = FdSink(); @@ -2297,7 +1355,8 @@ void DerivationGoal::startBuilder() worker.store.computeFSClosure(worker.store.toStorePath(i.second.source).first, closure); } catch (InvalidPath & e) { } catch (Error & e) { - throw Error("while processing 'sandbox-paths': %s", e.what()); + e.addTrace({}, "while processing 'sandbox-paths'"); + throw; } for (auto & i : closure) { auto p = worker.store.printStorePath(i); @@ -2361,19 +1420,12 @@ void DerivationGoal::startBuilder() Samba-in-QEMU. */ createDirs(chrootRootDir + "/etc"); - writeFile(chrootRootDir + "/etc/passwd", fmt( - "root:x:0:0:Nix build user:%3%:/noshell\n" - "nixbld:x:%1%:%2%:Nix build user:%3%:/noshell\n" - "nobody:x:65534:65534:Nobody:/:/noshell\n", - sandboxUid, sandboxGid, settings.sandboxBuildDir)); - /* Declare the build user's group so that programs get a consistent view of the system (e.g., "id -gn"). */ writeFile(chrootRootDir + "/etc/group", - (format( - "root:x:0:\n" + fmt("root:x:0:\n" "nixbld:!:%1%:\n" - "nogroup:x:65534:\n") % sandboxGid).str()); + "nogroup:x:65534:\n", sandboxGid())); /* Create /etc/hosts with localhost entry. */ if (!(derivationIsImpure(derivationType))) @@ -2570,6 +1622,13 @@ void DerivationGoal::startBuilder() options.allowVfork = false; + Path maxUserNamespaces = "/proc/sys/user/max_user_namespaces"; + static bool userNamespacesEnabled = + pathExists(maxUserNamespaces) + && trim(readFile(maxUserNamespaces)) != "0"; + + usingUserNamespace = userNamespacesEnabled; + Pid helper = startProcess([&]() { /* Drop additional groups here because we can't do it @@ -2588,9 +1647,11 @@ void DerivationGoal::startBuilder() PROT_WRITE | PROT_READ, MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0); if (stack == MAP_FAILED) throw SysError("allocating stack"); - int flags = CLONE_NEWUSER | CLONE_NEWPID | CLONE_NEWNS | CLONE_NEWIPC | CLONE_NEWUTS | CLONE_PARENT | SIGCHLD; + int flags = CLONE_NEWPID | CLONE_NEWNS | CLONE_NEWIPC | CLONE_NEWUTS | CLONE_PARENT | SIGCHLD; if (privateNetwork) flags |= CLONE_NEWNET; + if (usingUserNamespace) + flags |= CLONE_NEWUSER; pid_t child = clone(childEntry, stack + stackSize, flags, this); if (child == -1 && errno == EINVAL) { @@ -2599,11 +1660,12 @@ void DerivationGoal::startBuilder() flags &= ~CLONE_NEWPID; child = clone(childEntry, stack + stackSize, flags, this); } - if (child == -1 && (errno == EPERM || errno == EINVAL)) { + if (usingUserNamespace && child == -1 && (errno == EPERM || errno == EINVAL)) { /* Some distros patch Linux to not allow unprivileged * user namespaces. If we get EPERM or EINVAL, try * without CLONE_NEWUSER and see if that works. */ + usingUserNamespace = false; flags &= ~CLONE_NEWUSER; child = clone(childEntry, stack + stackSize, flags, this); } @@ -2614,7 +1676,8 @@ void DerivationGoal::startBuilder() _exit(1); if (child == -1) throw SysError("cloning builder process"); - writeFull(builderOut.writeSide.get(), std::to_string(child) + "\n"); + writeFull(builderOut.writeSide.get(), + fmt("%d %d\n", usingUserNamespace, child)); _exit(0); }, options); @@ -2628,23 +1691,46 @@ void DerivationGoal::startBuilder() userNamespaceSync.readSide = -1; + /* Close the write side to prevent runChild() from hanging + reading from this. */ + Finally cleanup([&]() { + userNamespaceSync.writeSide = -1; + }); + pid_t tmp; - if (!string2Int(readLine(builderOut.readSide.get()), tmp)) abort(); + auto ss = tokenizeString>(readLine(builderOut.readSide.get())); + assert(ss.size() == 2); + usingUserNamespace = ss[0] == "1"; + if (!string2Int(ss[1], tmp)) abort(); pid = tmp; - /* Set the UID/GID mapping of the builder's user namespace - such that the sandbox user maps to the build user, or to - the calling user (if build users are disabled). */ - uid_t hostUid = buildUser ? buildUser->getUID() : getuid(); - uid_t hostGid = buildUser ? buildUser->getGID() : getgid(); + if (usingUserNamespace) { + /* Set the UID/GID mapping of the builder's user namespace + such that the sandbox user maps to the build user, or to + the calling user (if build users are disabled). */ + uid_t hostUid = buildUser ? buildUser->getUID() : getuid(); + uid_t hostGid = buildUser ? buildUser->getGID() : getgid(); - writeFile("/proc/" + std::to_string(pid) + "/uid_map", - (format("%d %d 1") % sandboxUid % hostUid).str()); + writeFile("/proc/" + std::to_string(pid) + "/uid_map", + fmt("%d %d 1", sandboxUid(), hostUid)); - writeFile("/proc/" + std::to_string(pid) + "/setgroups", "deny"); + writeFile("/proc/" + std::to_string(pid) + "/setgroups", "deny"); - writeFile("/proc/" + std::to_string(pid) + "/gid_map", - (format("%d %d 1") % sandboxGid % hostGid).str()); + writeFile("/proc/" + std::to_string(pid) + "/gid_map", + fmt("%d %d 1", sandboxGid(), hostGid)); + } else { + debug("note: not using a user namespace"); + if (!buildUser) + throw Error("cannot perform a sandboxed build because user namespaces are not enabled; check /proc/sys/user/max_user_namespaces"); + } + + /* Now that we now the sandbox uid, we can write + /etc/passwd. */ + writeFile(chrootRootDir + "/etc/passwd", fmt( + "root:x:0:0:Nix build user:%3%:/noshell\n" + "nixbld:x:%1%:%2%:Nix build user:%3%:/noshell\n" + "nobody:x:65534:65534:Nobody:/:/noshell\n", + sandboxUid(), sandboxGid(), settings.sandboxBuildDir)); /* Save the mount namespace of the child. We have to do this *before* the child does a chroot. */ @@ -2654,7 +1740,6 @@ void DerivationGoal::startBuilder() /* Signal the builder that we've updated its user namespace. */ writeFull(userNamespaceSync.writeSide.get(), "1"); - userNamespaceSync.writeSide = -1; } else #endif @@ -2674,11 +1759,14 @@ void DerivationGoal::startBuilder() /* Check if setting up the build environment failed. */ while (true) { string msg = readLine(builderOut.readSide.get()); + if (string(msg, 0, 1) == "\2") break; if (string(msg, 0, 1) == "\1") { - if (msg.size() == 1) break; - throw Error(string(msg, 1)); + FdSource source(builderOut.readSide.get()); + auto ex = readError(source); + ex.addTrace({}, "while setting up the build environment"); + throw ex; } - debug(msg); + debug("sandbox setup: " + msg); } } @@ -3502,9 +2590,9 @@ void DerivationGoal::runChild() /* Switch to the sandbox uid/gid in the user namespace, which corresponds to the build user or calling user in the parent namespace. */ - if (setgid(sandboxGid) == -1) + if (setgid(sandboxGid()) == -1) throw SysError("setgid failed"); - if (setuid(sandboxUid) == -1) + if (setuid(sandboxUid()) == -1) throw SysError("setuid failed"); setUser = false; @@ -3722,7 +2810,7 @@ void DerivationGoal::runChild() args.push_back(rewriteStrings(i, inputRewrites)); /* Indicate that we managed to set up the build environment. */ - writeFull(STDERR_FILENO, string("\1\n")); + writeFull(STDERR_FILENO, string("\2\n")); /* Execute the program. This should not return. */ if (drv->isBuiltin()) { @@ -3743,7 +2831,7 @@ void DerivationGoal::runChild() throw Error("unsupported builtin function '%1%'", string(drv->builder, 8)); _exit(0); } catch (std::exception & e) { - writeFull(STDERR_FILENO, "error: " + string(e.what()) + "\n"); + writeFull(STDERR_FILENO, e.what() + std::string("\n")); _exit(1); } } @@ -3752,8 +2840,11 @@ void DerivationGoal::runChild() throw SysError("executing '%1%'", drv->builder); - } catch (std::exception & e) { - writeFull(STDERR_FILENO, "\1while setting up the build environment: " + string(e.what()) + "\n"); + } catch (Error & e) { + writeFull(STDERR_FILENO, "\1\n"); + FdSink sink(STDERR_FILENO); + sink << e; + sink.flush(); _exit(1); } } @@ -4237,11 +3328,13 @@ void DerivationGoal::registerOutputs() /* Register each output path as valid, and register the sets of paths referenced by each of them. If there are cycles in the outputs, this will fail. */ - ValidPathInfos infos2; - for (auto & [outputName, newInfo] : infos) { - infos2.push_back(newInfo); + { + ValidPathInfos infos2; + for (auto & [outputName, newInfo] : infos) { + infos2.push_back(newInfo); + } + worker.store.registerValidPaths(infos2); } - worker.store.registerValidPaths(infos2); /* In case of a fixed-output derivation hash mismatch, throw an exception now that we have registered the output as valid. */ @@ -4253,12 +3346,21 @@ void DerivationGoal::registerOutputs() means it's safe to link the derivation to the output hash. We must do that for floating CA derivations, which otherwise couldn't be cached, but it's fine to do in all cases. */ - for (auto & [outputName, newInfo] : infos) { - /* FIXME: we will want to track this mapping in the DB whether or - not we have a drv file. */ - if (useDerivation) - worker.store.linkDeriverToPath(drvPath, outputName, newInfo.path); + bool isCaFloating = drv->type() == DerivationType::CAFloating; + + auto drvPathResolved = drvPath; + if (!useDerivation && isCaFloating) { + /* Once a floating CA derivations reaches this point, it + must already be resolved, so we don't bother trying to + downcast drv to get would would just be an empty + inputDrvs field. */ + Derivation drv2 { *drv }; + drvPathResolved = writeDerivation(worker.store, drv2); } + + if (useDerivation || isCaFloating) + for (auto & [outputName, newInfo] : infos) + worker.store.linkDeriverToPath(drvPathResolved, outputName, newInfo.path); } @@ -4548,7 +3650,7 @@ void DerivationGoal::flushLine() std::map> DerivationGoal::queryPartialDerivationOutputMap() { - if (drv->type() != DerivationType::CAFloating) { + if (!useDerivation || drv->type() != DerivationType::CAFloating) { std::map> res; for (auto & [name, output] : drv->outputs) res.insert_or_assign(name, output.path(worker.store, drv->name, name)); @@ -4560,7 +3662,7 @@ std::map> DerivationGoal::queryPartialDeri OutputPathMap DerivationGoal::queryDerivationOutputMap() { - if (drv->type() != DerivationType::CAFloating) { + if (!useDerivation || drv->type() != DerivationType::CAFloating) { OutputPathMap res; for (auto & [name, output] : drv->outputsAndOptPaths(worker.store)) res.insert_or_assign(name, *output.second); @@ -4636,927 +3738,4 @@ void DerivationGoal::done(BuildResult::Status status, std::optional ex) } -////////////////////////////////////////////////////////////////////// - - -class SubstitutionGoal : public Goal -{ - friend class Worker; - -private: - /* The store path that should be realised through a substitute. */ - StorePath storePath; - - /* The path the substituter refers to the path as. This will be - * different when the stores have different names. */ - std::optional subPath; - - /* The remaining substituters. */ - std::list> subs; - - /* The current substituter. */ - std::shared_ptr sub; - - /* Whether a substituter failed. */ - bool substituterFailed = false; - - /* Path info returned by the substituter's query info operation. */ - std::shared_ptr info; - - /* Pipe for the substituter's standard output. */ - Pipe outPipe; - - /* The substituter thread. */ - std::thread thr; - - std::promise promise; - - /* Whether to try to repair a valid path. */ - RepairFlag repair; - - /* Location where we're downloading the substitute. Differs from - storePath when doing a repair. */ - Path destPath; - - std::unique_ptr> maintainExpectedSubstitutions, - maintainRunningSubstitutions, maintainExpectedNar, maintainExpectedDownload; - - typedef void (SubstitutionGoal::*GoalState)(); - GoalState state; - - /* Content address for recomputing store path */ - std::optional ca; - -public: - SubstitutionGoal(const StorePath & storePath, Worker & worker, RepairFlag repair = NoRepair, std::optional ca = std::nullopt); - ~SubstitutionGoal(); - - void timedOut(Error && ex) override { abort(); }; - - string key() override - { - /* "a$" ensures substitution goals happen before derivation - goals. */ - return "a$" + std::string(storePath.name()) + "$" + worker.store.printStorePath(storePath); - } - - void work() override; - - /* The states. */ - void init(); - void tryNext(); - void gotInfo(); - void referencesValid(); - void tryToRun(); - void finished(); - - /* Callback used by the worker to write to the log. */ - void handleChildOutput(int fd, const string & data) override; - void handleEOF(int fd) override; - - StorePath getStorePath() { return storePath; } -}; - - -SubstitutionGoal::SubstitutionGoal(const StorePath & storePath, Worker & worker, RepairFlag repair, std::optional ca) - : Goal(worker) - , storePath(storePath) - , repair(repair) - , ca(ca) -{ - state = &SubstitutionGoal::init; - name = fmt("substitution of '%s'", worker.store.printStorePath(this->storePath)); - trace("created"); - maintainExpectedSubstitutions = std::make_unique>(worker.expectedSubstitutions); -} - - -SubstitutionGoal::~SubstitutionGoal() -{ - try { - if (thr.joinable()) { - // FIXME: signal worker thread to quit. - thr.join(); - worker.childTerminated(this); - } - } catch (...) { - ignoreException(); - } -} - - -void SubstitutionGoal::work() -{ - (this->*state)(); -} - - -void SubstitutionGoal::init() -{ - trace("init"); - - worker.store.addTempRoot(storePath); - - /* If the path already exists we're done. */ - if (!repair && worker.store.isValidPath(storePath)) { - amDone(ecSuccess); - return; - } - - if (settings.readOnlyMode) - throw Error("cannot substitute path '%s' - no write access to the Nix store", worker.store.printStorePath(storePath)); - - subs = settings.useSubstitutes ? getDefaultSubstituters() : std::list>(); - - tryNext(); -} - - -void SubstitutionGoal::tryNext() -{ - trace("trying next substituter"); - - if (subs.size() == 0) { - /* None left. Terminate this goal and let someone else deal - with it. */ - debug("path '%s' is required, but there is no substituter that can build it", worker.store.printStorePath(storePath)); - - /* Hack: don't indicate failure if there were no substituters. - In that case the calling derivation should just do a - build. */ - amDone(substituterFailed ? ecFailed : ecNoSubstituters); - - if (substituterFailed) { - worker.failedSubstitutions++; - worker.updateProgress(); - } - - return; - } - - sub = subs.front(); - subs.pop_front(); - - if (ca) { - subPath = sub->makeFixedOutputPathFromCA(storePath.name(), *ca); - if (sub->storeDir == worker.store.storeDir) - assert(subPath == storePath); - } else if (sub->storeDir != worker.store.storeDir) { - tryNext(); - return; - } - - try { - // FIXME: make async - info = sub->queryPathInfo(subPath ? *subPath : storePath); - } catch (InvalidPath &) { - tryNext(); - return; - } catch (SubstituterDisabled &) { - if (settings.tryFallback) { - tryNext(); - return; - } - throw; - } catch (Error & e) { - if (settings.tryFallback) { - logError(e.info()); - tryNext(); - return; - } - throw; - } - - if (info->path != storePath) { - if (info->isContentAddressed(*sub) && info->references.empty()) { - auto info2 = std::make_shared(*info); - info2->path = storePath; - info = info2; - } else { - printError("asked '%s' for '%s' but got '%s'", - sub->getUri(), worker.store.printStorePath(storePath), sub->printStorePath(info->path)); - tryNext(); - return; - } - } - - /* Update the total expected download size. */ - auto narInfo = std::dynamic_pointer_cast(info); - - maintainExpectedNar = std::make_unique>(worker.expectedNarSize, info->narSize); - - maintainExpectedDownload = - narInfo && narInfo->fileSize - ? std::make_unique>(worker.expectedDownloadSize, narInfo->fileSize) - : nullptr; - - worker.updateProgress(); - - /* Bail out early if this substituter lacks a valid - signature. LocalStore::addToStore() also checks for this, but - only after we've downloaded the path. */ - if (worker.store.requireSigs - && !sub->isTrusted - && !info->checkSignatures(worker.store, worker.store.getPublicKeys())) - { - logWarning({ - .name = "Invalid path signature", - .hint = hintfmt("substituter '%s' does not have a valid signature for path '%s'", - sub->getUri(), worker.store.printStorePath(storePath)) - }); - tryNext(); - return; - } - - /* To maintain the closure invariant, we first have to realise the - paths referenced by this one. */ - for (auto & i : info->references) - if (i != storePath) /* ignore self-references */ - addWaitee(worker.makeSubstitutionGoal(i)); - - if (waitees.empty()) /* to prevent hang (no wake-up event) */ - referencesValid(); - else - state = &SubstitutionGoal::referencesValid; -} - - -void SubstitutionGoal::referencesValid() -{ - trace("all references realised"); - - if (nrFailed > 0) { - debug("some references of path '%s' could not be realised", worker.store.printStorePath(storePath)); - amDone(nrNoSubstituters > 0 || nrIncompleteClosure > 0 ? ecIncompleteClosure : ecFailed); - return; - } - - for (auto & i : info->references) - if (i != storePath) /* ignore self-references */ - assert(worker.store.isValidPath(i)); - - state = &SubstitutionGoal::tryToRun; - worker.wakeUp(shared_from_this()); -} - - -void SubstitutionGoal::tryToRun() -{ - trace("trying to run"); - - /* Make sure that we are allowed to start a build. Note that even - if maxBuildJobs == 0 (no local builds allowed), we still allow - a substituter to run. This is because substitutions cannot be - distributed to another machine via the build hook. */ - if (worker.getNrLocalBuilds() >= std::max(1U, (unsigned int) settings.maxBuildJobs)) { - worker.waitForBuildSlot(shared_from_this()); - return; - } - - maintainRunningSubstitutions = std::make_unique>(worker.runningSubstitutions); - worker.updateProgress(); - - outPipe.create(); - - promise = std::promise(); - - thr = std::thread([this]() { - try { - /* Wake up the worker loop when we're done. */ - Finally updateStats([this]() { outPipe.writeSide = -1; }); - - Activity act(*logger, actSubstitute, Logger::Fields{worker.store.printStorePath(storePath), sub->getUri()}); - PushActivity pact(act.id); - - copyStorePath(ref(sub), ref(worker.store.shared_from_this()), - subPath ? *subPath : storePath, repair, sub->isTrusted ? NoCheckSigs : CheckSigs); - - promise.set_value(); - } catch (...) { - promise.set_exception(std::current_exception()); - } - }); - - worker.childStarted(shared_from_this(), {outPipe.readSide.get()}, true, false); - - state = &SubstitutionGoal::finished; -} - - -void SubstitutionGoal::finished() -{ - trace("substitute finished"); - - thr.join(); - worker.childTerminated(this); - - try { - promise.get_future().get(); - } catch (std::exception & e) { - printError(e.what()); - - /* Cause the parent build to fail unless --fallback is given, - or the substitute has disappeared. The latter case behaves - the same as the substitute never having existed in the - first place. */ - try { - throw; - } catch (SubstituteGone &) { - } catch (...) { - substituterFailed = true; - } - - /* Try the next substitute. */ - state = &SubstitutionGoal::tryNext; - worker.wakeUp(shared_from_this()); - return; - } - - worker.markContentsGood(storePath); - - printMsg(lvlChatty, "substitution of path '%s' succeeded", worker.store.printStorePath(storePath)); - - maintainRunningSubstitutions.reset(); - - maintainExpectedSubstitutions.reset(); - worker.doneSubstitutions++; - - if (maintainExpectedDownload) { - auto fileSize = maintainExpectedDownload->delta; - maintainExpectedDownload.reset(); - worker.doneDownloadSize += fileSize; - } - - worker.doneNarSize += maintainExpectedNar->delta; - maintainExpectedNar.reset(); - - worker.updateProgress(); - - amDone(ecSuccess); -} - - -void SubstitutionGoal::handleChildOutput(int fd, const string & data) -{ -} - - -void SubstitutionGoal::handleEOF(int fd) -{ - if (fd == outPipe.readSide.get()) worker.wakeUp(shared_from_this()); -} - -////////////////////////////////////////////////////////////////////// - - -Worker::Worker(LocalStore & store) - : act(*logger, actRealise) - , actDerivations(*logger, actBuilds) - , actSubstitutions(*logger, actCopyPaths) - , store(store) -{ - /* Debugging: prevent recursive workers. */ - nrLocalBuilds = 0; - lastWokenUp = steady_time_point::min(); - permanentFailure = false; - timedOut = false; - hashMismatch = false; - checkMismatch = false; -} - - -Worker::~Worker() -{ - /* Explicitly get rid of all strong pointers now. After this all - goals that refer to this worker should be gone. (Otherwise we - are in trouble, since goals may call childTerminated() etc. in - their destructors). */ - topGoals.clear(); - - assert(expectedSubstitutions == 0); - assert(expectedDownloadSize == 0); - assert(expectedNarSize == 0); -} - - -GoalPtr Worker::makeDerivationGoal(const StorePath & path, - const StringSet & wantedOutputs, BuildMode buildMode) -{ - GoalPtr goal = derivationGoals[path].lock(); // FIXME - if (!goal) { - goal = std::make_shared(path, wantedOutputs, *this, buildMode); - derivationGoals.insert_or_assign(path, goal); - wakeUp(goal); - } else - (dynamic_cast(goal.get()))->addWantedOutputs(wantedOutputs); - return goal; -} - - -std::shared_ptr Worker::makeBasicDerivationGoal(const StorePath & drvPath, - const BasicDerivation & drv, BuildMode buildMode) -{ - auto goal = std::make_shared(drvPath, drv, *this, buildMode); - wakeUp(goal); - return goal; -} - - -GoalPtr Worker::makeSubstitutionGoal(const StorePath & path, RepairFlag repair, std::optional ca) -{ - GoalPtr goal = substitutionGoals[path].lock(); // FIXME - if (!goal) { - goal = std::make_shared(path, *this, repair, ca); - substitutionGoals.insert_or_assign(path, goal); - wakeUp(goal); - } - return goal; -} - - -static void removeGoal(GoalPtr goal, WeakGoalMap & goalMap) -{ - /* !!! inefficient */ - for (WeakGoalMap::iterator i = goalMap.begin(); - i != goalMap.end(); ) - if (i->second.lock() == goal) { - WeakGoalMap::iterator j = i; ++j; - goalMap.erase(i); - i = j; - } - else ++i; -} - - -void Worker::removeGoal(GoalPtr goal) -{ - nix::removeGoal(goal, derivationGoals); - nix::removeGoal(goal, substitutionGoals); - if (topGoals.find(goal) != topGoals.end()) { - topGoals.erase(goal); - /* If a top-level goal failed, then kill all other goals - (unless keepGoing was set). */ - if (goal->exitCode == Goal::ecFailed && !settings.keepGoing) - topGoals.clear(); - } - - /* Wake up goals waiting for any goal to finish. */ - for (auto & i : waitingForAnyGoal) { - GoalPtr goal = i.lock(); - if (goal) wakeUp(goal); - } - - waitingForAnyGoal.clear(); -} - - -void Worker::wakeUp(GoalPtr goal) -{ - goal->trace("woken up"); - addToWeakGoals(awake, goal); -} - - -unsigned Worker::getNrLocalBuilds() -{ - return nrLocalBuilds; -} - - -void Worker::childStarted(GoalPtr goal, const set & fds, - bool inBuildSlot, bool respectTimeouts) -{ - Child child; - child.goal = goal; - child.goal2 = goal.get(); - child.fds = fds; - child.timeStarted = child.lastOutput = steady_time_point::clock::now(); - child.inBuildSlot = inBuildSlot; - child.respectTimeouts = respectTimeouts; - children.emplace_back(child); - if (inBuildSlot) nrLocalBuilds++; -} - - -void Worker::childTerminated(Goal * goal, bool wakeSleepers) -{ - auto i = std::find_if(children.begin(), children.end(), - [&](const Child & child) { return child.goal2 == goal; }); - if (i == children.end()) return; - - if (i->inBuildSlot) { - assert(nrLocalBuilds > 0); - nrLocalBuilds--; - } - - children.erase(i); - - if (wakeSleepers) { - - /* Wake up goals waiting for a build slot. */ - for (auto & j : wantingToBuild) { - GoalPtr goal = j.lock(); - if (goal) wakeUp(goal); - } - - wantingToBuild.clear(); - } -} - - -void Worker::waitForBuildSlot(GoalPtr goal) -{ - debug("wait for build slot"); - if (getNrLocalBuilds() < settings.maxBuildJobs) - wakeUp(goal); /* we can do it right away */ - else - addToWeakGoals(wantingToBuild, goal); -} - - -void Worker::waitForAnyGoal(GoalPtr goal) -{ - debug("wait for any goal"); - addToWeakGoals(waitingForAnyGoal, goal); -} - - -void Worker::waitForAWhile(GoalPtr goal) -{ - debug("wait for a while"); - addToWeakGoals(waitingForAWhile, goal); -} - - -void Worker::run(const Goals & _topGoals) -{ - for (auto & i : _topGoals) topGoals.insert(i); - - debug("entered goal loop"); - - while (1) { - - checkInterrupt(); - - store.autoGC(false); - - /* Call every wake goal (in the ordering established by - CompareGoalPtrs). */ - while (!awake.empty() && !topGoals.empty()) { - Goals awake2; - for (auto & i : awake) { - GoalPtr goal = i.lock(); - if (goal) awake2.insert(goal); - } - awake.clear(); - for (auto & goal : awake2) { - checkInterrupt(); - goal->work(); - if (topGoals.empty()) break; // stuff may have been cancelled - } - } - - if (topGoals.empty()) break; - - /* Wait for input. */ - if (!children.empty() || !waitingForAWhile.empty()) - waitForInput(); - else { - if (awake.empty() && 0 == settings.maxBuildJobs) - { - if (getMachines().empty()) - throw Error("unable to start any build; either increase '--max-jobs' " - "or enable remote builds." - "\nhttps://nixos.org/nix/manual/#chap-distributed-builds"); - else - throw Error("unable to start any build; remote machines may not have " - "all required system features." - "\nhttps://nixos.org/nix/manual/#chap-distributed-builds"); - - } - assert(!awake.empty()); - } - } - - /* If --keep-going is not set, it's possible that the main goal - exited while some of its subgoals were still active. But if - --keep-going *is* set, then they must all be finished now. */ - assert(!settings.keepGoing || awake.empty()); - assert(!settings.keepGoing || wantingToBuild.empty()); - assert(!settings.keepGoing || children.empty()); -} - -void Worker::waitForInput() -{ - printMsg(lvlVomit, "waiting for children"); - - /* Process output from the file descriptors attached to the - children, namely log output and output path creation commands. - We also use this to detect child termination: if we get EOF on - the logger pipe of a build, we assume that the builder has - terminated. */ - - bool useTimeout = false; - long timeout = 0; - auto before = steady_time_point::clock::now(); - - /* If we're monitoring for silence on stdout/stderr, or if there - is a build timeout, then wait for input until the first - deadline for any child. */ - auto nearest = steady_time_point::max(); // nearest deadline - if (settings.minFree.get() != 0) - // Periodicallty wake up to see if we need to run the garbage collector. - nearest = before + std::chrono::seconds(10); - for (auto & i : children) { - if (!i.respectTimeouts) continue; - if (0 != settings.maxSilentTime) - nearest = std::min(nearest, i.lastOutput + std::chrono::seconds(settings.maxSilentTime)); - if (0 != settings.buildTimeout) - nearest = std::min(nearest, i.timeStarted + std::chrono::seconds(settings.buildTimeout)); - } - if (nearest != steady_time_point::max()) { - timeout = std::max(1L, (long) std::chrono::duration_cast(nearest - before).count()); - useTimeout = true; - } - - /* If we are polling goals that are waiting for a lock, then wake - up after a few seconds at most. */ - if (!waitingForAWhile.empty()) { - useTimeout = true; - if (lastWokenUp == steady_time_point::min() || lastWokenUp > before) lastWokenUp = before; - timeout = std::max(1L, - (long) std::chrono::duration_cast( - lastWokenUp + std::chrono::seconds(settings.pollInterval) - before).count()); - } else lastWokenUp = steady_time_point::min(); - - if (useTimeout) - vomit("sleeping %d seconds", timeout); - - /* Use select() to wait for the input side of any logger pipe to - become `available'. Note that `available' (i.e., non-blocking) - includes EOF. */ - std::vector pollStatus; - std::map fdToPollStatus; - for (auto & i : children) { - for (auto & j : i.fds) { - pollStatus.push_back((struct pollfd) { .fd = j, .events = POLLIN }); - fdToPollStatus[j] = pollStatus.size() - 1; - } - } - - if (poll(pollStatus.data(), pollStatus.size(), - useTimeout ? timeout * 1000 : -1) == -1) { - if (errno == EINTR) return; - throw SysError("waiting for input"); - } - - auto after = steady_time_point::clock::now(); - - /* Process all available file descriptors. FIXME: this is - O(children * fds). */ - decltype(children)::iterator i; - for (auto j = children.begin(); j != children.end(); j = i) { - i = std::next(j); - - checkInterrupt(); - - GoalPtr goal = j->goal.lock(); - assert(goal); - - set fds2(j->fds); - std::vector buffer(4096); - for (auto & k : fds2) { - if (pollStatus.at(fdToPollStatus.at(k)).revents) { - ssize_t rd = ::read(k, buffer.data(), buffer.size()); - // FIXME: is there a cleaner way to handle pt close - // than EIO? Is this even standard? - if (rd == 0 || (rd == -1 && errno == EIO)) { - debug("%1%: got EOF", goal->getName()); - goal->handleEOF(k); - j->fds.erase(k); - } else if (rd == -1) { - if (errno != EINTR) - throw SysError("%s: read failed", goal->getName()); - } else { - printMsg(lvlVomit, "%1%: read %2% bytes", - goal->getName(), rd); - string data((char *) buffer.data(), rd); - j->lastOutput = after; - goal->handleChildOutput(k, data); - } - } - } - - if (goal->exitCode == Goal::ecBusy && - 0 != settings.maxSilentTime && - j->respectTimeouts && - after - j->lastOutput >= std::chrono::seconds(settings.maxSilentTime)) - { - goal->timedOut(Error( - "%1% timed out after %2% seconds of silence", - goal->getName(), settings.maxSilentTime)); - } - - else if (goal->exitCode == Goal::ecBusy && - 0 != settings.buildTimeout && - j->respectTimeouts && - after - j->timeStarted >= std::chrono::seconds(settings.buildTimeout)) - { - goal->timedOut(Error( - "%1% timed out after %2% seconds", - goal->getName(), settings.buildTimeout)); - } - } - - if (!waitingForAWhile.empty() && lastWokenUp + std::chrono::seconds(settings.pollInterval) <= after) { - lastWokenUp = after; - for (auto & i : waitingForAWhile) { - GoalPtr goal = i.lock(); - if (goal) wakeUp(goal); - } - waitingForAWhile.clear(); - } -} - - -unsigned int Worker::exitStatus() -{ - /* - * 1100100 - * ^^^^ - * |||`- timeout - * ||`-- output hash mismatch - * |`--- build failure - * `---- not deterministic - */ - unsigned int mask = 0; - bool buildFailure = permanentFailure || timedOut || hashMismatch; - if (buildFailure) - mask |= 0x04; // 100 - if (timedOut) - mask |= 0x01; // 101 - if (hashMismatch) - mask |= 0x02; // 102 - if (checkMismatch) { - mask |= 0x08; // 104 - } - - if (mask) - mask |= 0x60; - return mask ? mask : 1; -} - - -bool Worker::pathContentsGood(const StorePath & path) -{ - auto i = pathContentsGoodCache.find(path); - if (i != pathContentsGoodCache.end()) return i->second; - printInfo("checking path '%s'...", store.printStorePath(path)); - auto info = store.queryPathInfo(path); - bool res; - if (!pathExists(store.printStorePath(path))) - res = false; - else { - HashResult current = hashPath(info->narHash.type, store.printStorePath(path)); - Hash nullHash(htSHA256); - res = info->narHash == nullHash || info->narHash == current.first; - } - pathContentsGoodCache.insert_or_assign(path, res); - if (!res) - logError({ - .name = "Corrupted path", - .hint = hintfmt("path '%s' is corrupted or missing!", store.printStorePath(path)) - }); - return res; -} - - -void Worker::markContentsGood(const StorePath & path) -{ - pathContentsGoodCache.insert_or_assign(path, true); -} - - -////////////////////////////////////////////////////////////////////// - - -static void primeCache(Store & store, const std::vector & paths) -{ - StorePathSet willBuild, willSubstitute, unknown; - uint64_t downloadSize, narSize; - store.queryMissing(paths, willBuild, willSubstitute, unknown, downloadSize, narSize); - - if (!willBuild.empty() && 0 == settings.maxBuildJobs && getMachines().empty()) - throw Error( - "%d derivations need to be built, but neither local builds ('--max-jobs') " - "nor remote builds ('--builders') are enabled", willBuild.size()); -} - - -void LocalStore::buildPaths(const std::vector & drvPaths, BuildMode buildMode) -{ - Worker worker(*this); - - primeCache(*this, drvPaths); - - Goals goals; - for (auto & path : drvPaths) { - if (path.path.isDerivation()) - goals.insert(worker.makeDerivationGoal(path.path, path.outputs, buildMode)); - else - goals.insert(worker.makeSubstitutionGoal(path.path, buildMode == bmRepair ? Repair : NoRepair)); - } - - worker.run(goals); - - StorePathSet failed; - std::optional ex; - for (auto & i : goals) { - if (i->ex) { - if (ex) - logError(i->ex->info()); - else - ex = i->ex; - } - if (i->exitCode != Goal::ecSuccess) { - DerivationGoal * i2 = dynamic_cast(i.get()); - if (i2) failed.insert(i2->getDrvPath()); - else failed.insert(dynamic_cast(i.get())->getStorePath()); - } - } - - if (failed.size() == 1 && ex) { - ex->status = worker.exitStatus(); - throw *ex; - } else if (!failed.empty()) { - if (ex) logError(ex->info()); - throw Error(worker.exitStatus(), "build of %s failed", showPaths(failed)); - } -} - -BuildResult LocalStore::buildDerivation(const StorePath & drvPath, const BasicDerivation & drv, - BuildMode buildMode) -{ - Worker worker(*this); - auto goal = worker.makeBasicDerivationGoal(drvPath, drv, buildMode); - - BuildResult result; - - try { - worker.run(Goals{goal}); - result = goal->getResult(); - } catch (Error & e) { - result.status = BuildResult::MiscFailure; - result.errorMsg = e.msg(); - } - - return result; -} - - -void LocalStore::ensurePath(const StorePath & path) -{ - /* If the path is already valid, we're done. */ - if (isValidPath(path)) return; - - primeCache(*this, {{path}}); - - Worker worker(*this); - GoalPtr goal = worker.makeSubstitutionGoal(path); - Goals goals = {goal}; - - worker.run(goals); - - if (goal->exitCode != Goal::ecSuccess) { - if (goal->ex) { - goal->ex->status = worker.exitStatus(); - throw *goal->ex; - } else - throw Error(worker.exitStatus(), "path '%s' does not exist and cannot be created", printStorePath(path)); - } -} - - -void LocalStore::repairPath(const StorePath & path) -{ - Worker worker(*this); - GoalPtr goal = worker.makeSubstitutionGoal(path, Repair); - Goals goals = {goal}; - - worker.run(goals); - - if (goal->exitCode != Goal::ecSuccess) { - /* Since substituting the path didn't work, if we have a valid - deriver, then rebuild the deriver. */ - auto info = queryPathInfo(path); - if (info->deriver && isValidPath(*info->deriver)) { - goals.clear(); - goals.insert(worker.makeDerivationGoal(*info->deriver, StringSet(), bmRepair)); - worker.run(goals); - } else - throw Error(worker.exitStatus(), "cannot repair path '%s'", printStorePath(path)); - } -} - - } diff --git a/src/libstore/build/derivation-goal.hh b/src/libstore/build/derivation-goal.hh new file mode 100644 index 000000000..4976207e0 --- /dev/null +++ b/src/libstore/build/derivation-goal.hh @@ -0,0 +1,379 @@ +#pragma once + +#include "parsed-derivations.hh" +#include "lock.hh" +#include "local-store.hh" +#include "goal.hh" + +namespace nix { + +using std::map; + +struct HookInstance; + +typedef enum {rpAccept, rpDecline, rpPostpone} HookReply; + +/* Unless we are repairing, we don't both to test validity and just assume it, + so the choices are `Absent` or `Valid`. */ +enum struct PathStatus { + Corrupt, + Absent, + Valid, +}; + +struct InitialOutputStatus { + StorePath path; + PathStatus status; + /* Valid in the store, and additionally non-corrupt if we are repairing */ + bool isValid() const { + return status == PathStatus::Valid; + } + /* Merely present, allowed to be corrupt */ + bool isPresent() const { + return status == PathStatus::Corrupt + || status == PathStatus::Valid; + } +}; + +struct InitialOutput { + bool wanted; + std::optional known; +}; + +class DerivationGoal : public Goal +{ +private: + /* Whether to use an on-disk .drv file. */ + bool useDerivation; + + /* The path of the derivation. */ + StorePath drvPath; + + /* The specific outputs that we need to build. Empty means all of + them. */ + StringSet wantedOutputs; + + /* Whether additional wanted outputs have been added. */ + bool needRestart = false; + + /* Whether to retry substituting the outputs after building the + inputs. */ + bool retrySubstitution; + + /* The derivation stored at drvPath. */ + std::unique_ptr drv; + + std::unique_ptr parsedDrv; + + /* The remainder is state held during the build. */ + + /* Locks on (fixed) output paths. */ + PathLocks outputLocks; + + /* All input paths (that is, the union of FS closures of the + immediate input paths). */ + StorePathSet inputPaths; + + std::map initialOutputs; + + /* User selected for running the builder. */ + std::unique_ptr buildUser; + + /* The process ID of the builder. */ + Pid pid; + + /* The temporary directory. */ + Path tmpDir; + + /* The path of the temporary directory in the sandbox. */ + Path tmpDirInSandbox; + + /* File descriptor for the log file. */ + AutoCloseFD fdLogFile; + std::shared_ptr logFileSink, logSink; + + /* Number of bytes received from the builder's stdout/stderr. */ + unsigned long logSize; + + /* The most recent log lines. */ + std::list logTail; + + std::string currentLogLine; + size_t currentLogLinePos = 0; // to handle carriage return + + std::string currentHookLine; + + /* Pipe for the builder's standard output/error. */ + Pipe builderOut; + + /* Pipe for synchronising updates to the builder namespaces. */ + Pipe userNamespaceSync; + + /* The mount namespace of the builder, used to add additional + paths to the sandbox as a result of recursive Nix calls. */ + AutoCloseFD sandboxMountNamespace; + + /* On Linux, whether we're doing the build in its own user + namespace. */ + bool usingUserNamespace = true; + + /* The build hook. */ + std::unique_ptr hook; + + /* Whether we're currently doing a chroot build. */ + bool useChroot = false; + + Path chrootRootDir; + + /* RAII object to delete the chroot directory. */ + std::shared_ptr autoDelChroot; + + /* The sort of derivation we are building. */ + DerivationType derivationType; + + /* Whether to run the build in a private network namespace. */ + bool privateNetwork = false; + + typedef void (DerivationGoal::*GoalState)(); + GoalState state; + + /* Stuff we need to pass to initChild(). */ + struct ChrootPath { + Path source; + bool optional; + ChrootPath(Path source = "", bool optional = false) + : source(source), optional(optional) + { } + }; + typedef map DirsInChroot; // maps target path to source path + DirsInChroot dirsInChroot; + + typedef map Environment; + Environment env; + +#if __APPLE__ + typedef string SandboxProfile; + SandboxProfile additionalSandboxProfile; +#endif + + /* Hash rewriting. */ + StringMap inputRewrites, outputRewrites; + typedef map RedirectedOutputs; + RedirectedOutputs redirectedOutputs; + + /* The outputs paths used during the build. + + - Input-addressed derivations or fixed content-addressed outputs are + sometimes built when some of their outputs already exist, and can not + be hidden via sandboxing. We use temporary locations instead and + rewrite after the build. Otherwise the regular predetermined paths are + put here. + + - Floating content-addressed derivations do not know their final build + output paths until the outputs are hashed, so random locations are + used, and then renamed. The randomness helps guard against hidden + self-references. + */ + OutputPathMap scratchOutputs; + + /* The final output paths of the build. + + - For input-addressed derivations, always the precomputed paths + + - For content-addressed derivations, calcuated from whatever the hash + ends up being. (Note that fixed outputs derivations that produce the + "wrong" output still install that data under its true content-address.) + */ + OutputPathMap finalOutputs; + + BuildMode buildMode; + + /* If we're repairing without a chroot, there may be outputs that + are valid but corrupt. So we redirect these outputs to + temporary paths. */ + StorePathSet redirectedBadOutputs; + + BuildResult result; + + /* The current round, if we're building multiple times. */ + size_t curRound = 1; + + size_t nrRounds; + + /* Path registration info from the previous round, if we're + building multiple times. Since this contains the hash, it + allows us to compare whether two rounds produced the same + result. */ + std::map prevInfos; + + uid_t sandboxUid() { return usingUserNamespace ? 1000 : buildUser->getUID(); } + gid_t sandboxGid() { return usingUserNamespace ? 100 : buildUser->getGID(); } + + const static Path homeDir; + + std::unique_ptr> mcExpectedBuilds, mcRunningBuilds; + + std::unique_ptr act; + + /* Activity that denotes waiting for a lock. */ + std::unique_ptr actLock; + + std::map builderActivities; + + /* The remote machine on which we're building. */ + std::string machineName; + + /* The recursive Nix daemon socket. */ + AutoCloseFD daemonSocket; + + /* The daemon main thread. */ + std::thread daemonThread; + + /* The daemon worker threads. */ + std::vector daemonWorkerThreads; + + /* Paths that were added via recursive Nix calls. */ + StorePathSet addedPaths; + + /* Recursive Nix calls are only allowed to build or realize paths + in the original input closure or added via a recursive Nix call + (so e.g. you can't do 'nix-store -r /nix/store/' where + /nix/store/ is some arbitrary path in a binary cache). */ + bool isAllowed(const StorePath & path) + { + return inputPaths.count(path) || addedPaths.count(path); + } + + friend struct RestrictedStore; + +public: + DerivationGoal(const StorePath & drvPath, + const StringSet & wantedOutputs, Worker & worker, + BuildMode buildMode = bmNormal); + DerivationGoal(const StorePath & drvPath, const BasicDerivation & drv, + const StringSet & wantedOutputs, Worker & worker, + BuildMode buildMode = bmNormal); + ~DerivationGoal(); + + /* Whether we need to perform hash rewriting if there are valid output paths. */ + bool needsHashRewrite(); + + void timedOut(Error && ex) override; + + string key() override; + + void work() override; + + StorePath getDrvPath() + { + return drvPath; + } + + /* Add wanted outputs to an already existing derivation goal. */ + void addWantedOutputs(const StringSet & outputs); + + BuildResult getResult() { return result; } + +private: + /* The states. */ + void getDerivation(); + void loadDerivation(); + void haveDerivation(); + void outputsSubstitutionTried(); + void gaveUpOnSubstitution(); + void closureRepaired(); + void inputsRealised(); + void tryToBuild(); + void tryLocalBuild(); + void buildDone(); + + void resolvedFinished(); + + /* Is the build hook willing to perform the build? */ + HookReply tryBuildHook(); + + /* Start building a derivation. */ + void startBuilder(); + + /* Fill in the environment for the builder. */ + void initEnv(); + + /* Setup tmp dir location. */ + void initTmpDir(); + + /* Write a JSON file containing the derivation attributes. */ + void writeStructuredAttrs(); + + void startDaemon(); + + void stopDaemon(); + + /* Add 'path' to the set of paths that may be referenced by the + outputs, and make it appear in the sandbox. */ + void addDependency(const StorePath & path); + + /* Make a file owned by the builder. */ + void chownToBuilder(const Path & path); + + /* Run the builder's process. */ + void runChild(); + + friend int childEntry(void *); + + /* Check that the derivation outputs all exist and register them + as valid. */ + void registerOutputs(); + + /* Check that an output meets the requirements specified by the + 'outputChecks' attribute (or the legacy + '{allowed,disallowed}{References,Requisites}' attributes). */ + void checkOutputs(const std::map & outputs); + + /* Open a log file and a pipe to it. */ + Path openLogFile(); + + /* Close the log file. */ + void closeLogFile(); + + /* Delete the temporary directory, if we have one. */ + void deleteTmpDir(bool force); + + /* Callback used by the worker to write to the log. */ + void handleChildOutput(int fd, const string & data) override; + void handleEOF(int fd) override; + void flushLine(); + + /* Wrappers around the corresponding Store methods that first consult the + derivation. This is currently needed because when there is no drv file + there also is no DB entry. */ + std::map> queryPartialDerivationOutputMap(); + OutputPathMap queryDerivationOutputMap(); + + /* Return the set of (in)valid paths. */ + void checkPathValidity(); + + /* Forcibly kill the child process, if any. */ + void killChild(); + + /* Create alternative path calculated from but distinct from the + input, so we can avoid overwriting outputs (or other store paths) + that already exist. */ + StorePath makeFallbackPath(const StorePath & path); + /* Make a path to another based on the output name along with the + derivation hash. */ + /* FIXME add option to randomize, so we can audit whether our + rewrites caught everything */ + StorePath makeFallbackPath(std::string_view outputName); + + void repairClosure(); + + void started(); + + void done( + BuildResult::Status status, + std::optional ex = {}); + + StorePathSet exportReferences(const StorePathSet & storePaths); +}; + +} diff --git a/src/libstore/build/goal.cc b/src/libstore/build/goal.cc new file mode 100644 index 000000000..2dd7a4d37 --- /dev/null +++ b/src/libstore/build/goal.cc @@ -0,0 +1,89 @@ +#include "goal.hh" +#include "worker.hh" + +namespace nix { + + +bool CompareGoalPtrs::operator() (const GoalPtr & a, const GoalPtr & b) const { + string s1 = a->key(); + string s2 = b->key(); + return s1 < s2; +} + + +void addToWeakGoals(WeakGoals & goals, GoalPtr p) +{ + // FIXME: necessary? + // FIXME: O(n) + for (auto & i : goals) + if (i.lock() == p) return; + goals.push_back(p); +} + + +void Goal::addWaitee(GoalPtr waitee) +{ + waitees.insert(waitee); + addToWeakGoals(waitee->waiters, shared_from_this()); +} + + +void Goal::waiteeDone(GoalPtr waitee, ExitCode result) +{ + assert(waitees.find(waitee) != waitees.end()); + waitees.erase(waitee); + + trace(fmt("waitee '%s' done; %d left", waitee->name, waitees.size())); + + if (result == ecFailed || result == ecNoSubstituters || result == ecIncompleteClosure) ++nrFailed; + + if (result == ecNoSubstituters) ++nrNoSubstituters; + + if (result == ecIncompleteClosure) ++nrIncompleteClosure; + + if (waitees.empty() || (result == ecFailed && !settings.keepGoing)) { + + /* If we failed and keepGoing is not set, we remove all + remaining waitees. */ + for (auto & goal : waitees) { + WeakGoals waiters2; + for (auto & j : goal->waiters) + if (j.lock() != shared_from_this()) waiters2.push_back(j); + goal->waiters = waiters2; + } + waitees.clear(); + + worker.wakeUp(shared_from_this()); + } +} + + +void Goal::amDone(ExitCode result, std::optional ex) +{ + trace("done"); + assert(exitCode == ecBusy); + assert(result == ecSuccess || result == ecFailed || result == ecNoSubstituters || result == ecIncompleteClosure); + exitCode = result; + + if (ex) { + if (!waiters.empty()) + logError(ex->info()); + else + this->ex = std::move(*ex); + } + + for (auto & i : waiters) { + GoalPtr goal = i.lock(); + if (goal) goal->waiteeDone(shared_from_this(), result); + } + waiters.clear(); + worker.removeGoal(shared_from_this()); +} + + +void Goal::trace(const FormatOrString & fs) +{ + debug("%1%: %2%", name, fs.s); +} + +} diff --git a/src/libstore/build/goal.hh b/src/libstore/build/goal.hh new file mode 100644 index 000000000..360c160ce --- /dev/null +++ b/src/libstore/build/goal.hh @@ -0,0 +1,107 @@ +#pragma once + +#include "types.hh" +#include "store-api.hh" + +namespace nix { + +/* Forward definition. */ +struct Goal; +struct Worker; + +/* A pointer to a goal. */ +typedef std::shared_ptr GoalPtr; +typedef std::weak_ptr WeakGoalPtr; + +struct CompareGoalPtrs { + bool operator() (const GoalPtr & a, const GoalPtr & b) const; +}; + +/* Set of goals. */ +typedef set Goals; +typedef list WeakGoals; + +/* A map of paths to goals (and the other way around). */ +typedef std::map WeakGoalMap; + +struct Goal : public std::enable_shared_from_this +{ + typedef enum {ecBusy, ecSuccess, ecFailed, ecNoSubstituters, ecIncompleteClosure} ExitCode; + + /* Backlink to the worker. */ + Worker & worker; + + /* Goals that this goal is waiting for. */ + Goals waitees; + + /* Goals waiting for this one to finish. Must use weak pointers + here to prevent cycles. */ + WeakGoals waiters; + + /* Number of goals we are/were waiting for that have failed. */ + unsigned int nrFailed; + + /* Number of substitution goals we are/were waiting for that + failed because there are no substituters. */ + unsigned int nrNoSubstituters; + + /* Number of substitution goals we are/were waiting for that + failed because othey had unsubstitutable references. */ + unsigned int nrIncompleteClosure; + + /* Name of this goal for debugging purposes. */ + string name; + + /* Whether the goal is finished. */ + ExitCode exitCode; + + /* Exception containing an error message, if any. */ + std::optional ex; + + Goal(Worker & worker) : worker(worker) + { + nrFailed = nrNoSubstituters = nrIncompleteClosure = 0; + exitCode = ecBusy; + } + + virtual ~Goal() + { + trace("goal destroyed"); + } + + virtual void work() = 0; + + void addWaitee(GoalPtr waitee); + + virtual void waiteeDone(GoalPtr waitee, ExitCode result); + + virtual void handleChildOutput(int fd, const string & data) + { + abort(); + } + + virtual void handleEOF(int fd) + { + abort(); + } + + void trace(const FormatOrString & fs); + + string getName() + { + return name; + } + + /* Callback in case of a timeout. It should wake up its waiters, + get rid of any running child processes that are being monitored + by the worker (important!), etc. */ + virtual void timedOut(Error && ex) = 0; + + virtual string key() = 0; + + void amDone(ExitCode result, std::optional ex = {}); +}; + +void addToWeakGoals(WeakGoals & goals, GoalPtr p); + +} diff --git a/src/libstore/build/hook-instance.cc b/src/libstore/build/hook-instance.cc new file mode 100644 index 000000000..0f6f580be --- /dev/null +++ b/src/libstore/build/hook-instance.cc @@ -0,0 +1,72 @@ +#include "globals.hh" +#include "hook-instance.hh" + +namespace nix { + +HookInstance::HookInstance() +{ + debug("starting build hook '%s'", settings.buildHook); + + /* Create a pipe to get the output of the child. */ + fromHook.create(); + + /* Create the communication pipes. */ + toHook.create(); + + /* Create a pipe to get the output of the builder. */ + builderOut.create(); + + /* Fork the hook. */ + pid = startProcess([&]() { + + commonChildInit(fromHook); + + if (chdir("/") == -1) throw SysError("changing into /"); + + /* Dup the communication pipes. */ + if (dup2(toHook.readSide.get(), STDIN_FILENO) == -1) + throw SysError("dupping to-hook read side"); + + /* Use fd 4 for the builder's stdout/stderr. */ + if (dup2(builderOut.writeSide.get(), 4) == -1) + throw SysError("dupping builder's stdout/stderr"); + + /* Hack: pass the read side of that fd to allow build-remote + to read SSH error messages. */ + if (dup2(builderOut.readSide.get(), 5) == -1) + throw SysError("dupping builder's stdout/stderr"); + + Strings args = { + std::string(baseNameOf(settings.buildHook.get())), + std::to_string(verbosity), + }; + + execv(settings.buildHook.get().c_str(), stringsToCharPtrs(args).data()); + + throw SysError("executing '%s'", settings.buildHook); + }); + + pid.setSeparatePG(true); + fromHook.writeSide = -1; + toHook.readSide = -1; + + sink = FdSink(toHook.writeSide.get()); + std::map settings; + globalConfig.getSettings(settings); + for (auto & setting : settings) + sink << 1 << setting.first << setting.second.value; + sink << 0; +} + + +HookInstance::~HookInstance() +{ + try { + toHook.writeSide = -1; + if (pid != -1) pid.kill(); + } catch (...) { + ignoreException(); + } +} + +} diff --git a/src/libstore/build/hook-instance.hh b/src/libstore/build/hook-instance.hh new file mode 100644 index 000000000..9e8cff128 --- /dev/null +++ b/src/libstore/build/hook-instance.hh @@ -0,0 +1,31 @@ +#pragma once + +#include "logging.hh" +#include "serialise.hh" + +namespace nix { + +struct HookInstance +{ + /* Pipes for talking to the build hook. */ + Pipe toHook; + + /* Pipe for the hook's standard output/error. */ + Pipe fromHook; + + /* Pipe for the builder's standard output/error. */ + Pipe builderOut; + + /* The process ID of the hook. */ + Pid pid; + + FdSink sink; + + std::map activities; + + HookInstance(); + + ~HookInstance(); +}; + +} diff --git a/src/libstore/build/local-store-build.cc b/src/libstore/build/local-store-build.cc new file mode 100644 index 000000000..a05fb5805 --- /dev/null +++ b/src/libstore/build/local-store-build.cc @@ -0,0 +1,126 @@ +#include "machines.hh" +#include "worker.hh" +#include "substitution-goal.hh" +#include "derivation-goal.hh" + +namespace nix { + +static void primeCache(Store & store, const std::vector & paths) +{ + StorePathSet willBuild, willSubstitute, unknown; + uint64_t downloadSize, narSize; + store.queryMissing(paths, willBuild, willSubstitute, unknown, downloadSize, narSize); + + if (!willBuild.empty() && 0 == settings.maxBuildJobs && getMachines().empty()) + throw Error( + "%d derivations need to be built, but neither local builds ('--max-jobs') " + "nor remote builds ('--builders') are enabled", willBuild.size()); +} + + +void LocalStore::buildPaths(const std::vector & drvPaths, BuildMode buildMode) +{ + Worker worker(*this); + + primeCache(*this, drvPaths); + + Goals goals; + for (auto & path : drvPaths) { + if (path.path.isDerivation()) + goals.insert(worker.makeDerivationGoal(path.path, path.outputs, buildMode)); + else + goals.insert(worker.makeSubstitutionGoal(path.path, buildMode == bmRepair ? Repair : NoRepair)); + } + + worker.run(goals); + + StorePathSet failed; + std::optional ex; + for (auto & i : goals) { + if (i->ex) { + if (ex) + logError(i->ex->info()); + else + ex = i->ex; + } + if (i->exitCode != Goal::ecSuccess) { + DerivationGoal * i2 = dynamic_cast(i.get()); + if (i2) failed.insert(i2->getDrvPath()); + else failed.insert(dynamic_cast(i.get())->getStorePath()); + } + } + + if (failed.size() == 1 && ex) { + ex->status = worker.exitStatus(); + throw *ex; + } else if (!failed.empty()) { + if (ex) logError(ex->info()); + throw Error(worker.exitStatus(), "build of %s failed", showPaths(failed)); + } +} + +BuildResult LocalStore::buildDerivation(const StorePath & drvPath, const BasicDerivation & drv, + BuildMode buildMode) +{ + Worker worker(*this); + auto goal = worker.makeBasicDerivationGoal(drvPath, drv, {}, buildMode); + + BuildResult result; + + try { + worker.run(Goals{goal}); + result = goal->getResult(); + } catch (Error & e) { + result.status = BuildResult::MiscFailure; + result.errorMsg = e.msg(); + } + + return result; +} + + +void LocalStore::ensurePath(const StorePath & path) +{ + /* If the path is already valid, we're done. */ + if (isValidPath(path)) return; + + primeCache(*this, {{path}}); + + Worker worker(*this); + GoalPtr goal = worker.makeSubstitutionGoal(path); + Goals goals = {goal}; + + worker.run(goals); + + if (goal->exitCode != Goal::ecSuccess) { + if (goal->ex) { + goal->ex->status = worker.exitStatus(); + throw *goal->ex; + } else + throw Error(worker.exitStatus(), "path '%s' does not exist and cannot be created", printStorePath(path)); + } +} + + +void LocalStore::repairPath(const StorePath & path) +{ + Worker worker(*this); + GoalPtr goal = worker.makeSubstitutionGoal(path, Repair); + Goals goals = {goal}; + + worker.run(goals); + + if (goal->exitCode != Goal::ecSuccess) { + /* Since substituting the path didn't work, if we have a valid + deriver, then rebuild the deriver. */ + auto info = queryPathInfo(path); + if (info->deriver && isValidPath(*info->deriver)) { + goals.clear(); + goals.insert(worker.makeDerivationGoal(*info->deriver, StringSet(), bmRepair)); + worker.run(goals); + } else + throw Error(worker.exitStatus(), "cannot repair path '%s'", printStorePath(path)); + } +} + +} diff --git a/src/libstore/build/substitution-goal.cc b/src/libstore/build/substitution-goal.cc new file mode 100644 index 000000000..d16584f65 --- /dev/null +++ b/src/libstore/build/substitution-goal.cc @@ -0,0 +1,296 @@ +#include "worker.hh" +#include "substitution-goal.hh" +#include "nar-info.hh" +#include "finally.hh" + +namespace nix { + +SubstitutionGoal::SubstitutionGoal(const StorePath & storePath, Worker & worker, RepairFlag repair, std::optional ca) + : Goal(worker) + , storePath(storePath) + , repair(repair) + , ca(ca) +{ + state = &SubstitutionGoal::init; + name = fmt("substitution of '%s'", worker.store.printStorePath(this->storePath)); + trace("created"); + maintainExpectedSubstitutions = std::make_unique>(worker.expectedSubstitutions); +} + + +SubstitutionGoal::~SubstitutionGoal() +{ + try { + if (thr.joinable()) { + // FIXME: signal worker thread to quit. + thr.join(); + worker.childTerminated(this); + } + } catch (...) { + ignoreException(); + } +} + + +void SubstitutionGoal::work() +{ + (this->*state)(); +} + + +void SubstitutionGoal::init() +{ + trace("init"); + + worker.store.addTempRoot(storePath); + + /* If the path already exists we're done. */ + if (!repair && worker.store.isValidPath(storePath)) { + amDone(ecSuccess); + return; + } + + if (settings.readOnlyMode) + throw Error("cannot substitute path '%s' - no write access to the Nix store", worker.store.printStorePath(storePath)); + + subs = settings.useSubstitutes ? getDefaultSubstituters() : std::list>(); + + tryNext(); +} + + +void SubstitutionGoal::tryNext() +{ + trace("trying next substituter"); + + if (subs.size() == 0) { + /* None left. Terminate this goal and let someone else deal + with it. */ + debug("path '%s' is required, but there is no substituter that can build it", worker.store.printStorePath(storePath)); + + /* Hack: don't indicate failure if there were no substituters. + In that case the calling derivation should just do a + build. */ + amDone(substituterFailed ? ecFailed : ecNoSubstituters); + + if (substituterFailed) { + worker.failedSubstitutions++; + worker.updateProgress(); + } + + return; + } + + sub = subs.front(); + subs.pop_front(); + + if (ca) { + subPath = sub->makeFixedOutputPathFromCA(storePath.name(), *ca); + if (sub->storeDir == worker.store.storeDir) + assert(subPath == storePath); + } else if (sub->storeDir != worker.store.storeDir) { + tryNext(); + return; + } + + try { + // FIXME: make async + info = sub->queryPathInfo(subPath ? *subPath : storePath); + } catch (InvalidPath &) { + tryNext(); + return; + } catch (SubstituterDisabled &) { + if (settings.tryFallback) { + tryNext(); + return; + } + throw; + } catch (Error & e) { + if (settings.tryFallback) { + logError(e.info()); + tryNext(); + return; + } + throw; + } + + if (info->path != storePath) { + if (info->isContentAddressed(*sub) && info->references.empty()) { + auto info2 = std::make_shared(*info); + info2->path = storePath; + info = info2; + } else { + printError("asked '%s' for '%s' but got '%s'", + sub->getUri(), worker.store.printStorePath(storePath), sub->printStorePath(info->path)); + tryNext(); + return; + } + } + + /* Update the total expected download size. */ + auto narInfo = std::dynamic_pointer_cast(info); + + maintainExpectedNar = std::make_unique>(worker.expectedNarSize, info->narSize); + + maintainExpectedDownload = + narInfo && narInfo->fileSize + ? std::make_unique>(worker.expectedDownloadSize, narInfo->fileSize) + : nullptr; + + worker.updateProgress(); + + /* Bail out early if this substituter lacks a valid + signature. LocalStore::addToStore() also checks for this, but + only after we've downloaded the path. */ + if (worker.store.requireSigs + && !sub->isTrusted + && !info->checkSignatures(worker.store, worker.store.getPublicKeys())) + { + logWarning({ + .name = "Invalid path signature", + .hint = hintfmt("substituter '%s' does not have a valid signature for path '%s'", + sub->getUri(), worker.store.printStorePath(storePath)) + }); + tryNext(); + return; + } + + /* To maintain the closure invariant, we first have to realise the + paths referenced by this one. */ + for (auto & i : info->references) + if (i != storePath) /* ignore self-references */ + addWaitee(worker.makeSubstitutionGoal(i)); + + if (waitees.empty()) /* to prevent hang (no wake-up event) */ + referencesValid(); + else + state = &SubstitutionGoal::referencesValid; +} + + +void SubstitutionGoal::referencesValid() +{ + trace("all references realised"); + + if (nrFailed > 0) { + debug("some references of path '%s' could not be realised", worker.store.printStorePath(storePath)); + amDone(nrNoSubstituters > 0 || nrIncompleteClosure > 0 ? ecIncompleteClosure : ecFailed); + return; + } + + for (auto & i : info->references) + if (i != storePath) /* ignore self-references */ + assert(worker.store.isValidPath(i)); + + state = &SubstitutionGoal::tryToRun; + worker.wakeUp(shared_from_this()); +} + + +void SubstitutionGoal::tryToRun() +{ + trace("trying to run"); + + /* Make sure that we are allowed to start a build. Note that even + if maxBuildJobs == 0 (no local builds allowed), we still allow + a substituter to run. This is because substitutions cannot be + distributed to another machine via the build hook. */ + if (worker.getNrLocalBuilds() >= std::max(1U, (unsigned int) settings.maxBuildJobs)) { + worker.waitForBuildSlot(shared_from_this()); + return; + } + + maintainRunningSubstitutions = std::make_unique>(worker.runningSubstitutions); + worker.updateProgress(); + + outPipe.create(); + + promise = std::promise(); + + thr = std::thread([this]() { + try { + /* Wake up the worker loop when we're done. */ + Finally updateStats([this]() { outPipe.writeSide = -1; }); + + Activity act(*logger, actSubstitute, Logger::Fields{worker.store.printStorePath(storePath), sub->getUri()}); + PushActivity pact(act.id); + + copyStorePath(ref(sub), ref(worker.store.shared_from_this()), + subPath ? *subPath : storePath, repair, sub->isTrusted ? NoCheckSigs : CheckSigs); + + promise.set_value(); + } catch (...) { + promise.set_exception(std::current_exception()); + } + }); + + worker.childStarted(shared_from_this(), {outPipe.readSide.get()}, true, false); + + state = &SubstitutionGoal::finished; +} + + +void SubstitutionGoal::finished() +{ + trace("substitute finished"); + + thr.join(); + worker.childTerminated(this); + + try { + promise.get_future().get(); + } catch (std::exception & e) { + printError(e.what()); + + /* Cause the parent build to fail unless --fallback is given, + or the substitute has disappeared. The latter case behaves + the same as the substitute never having existed in the + first place. */ + try { + throw; + } catch (SubstituteGone &) { + } catch (...) { + substituterFailed = true; + } + + /* Try the next substitute. */ + state = &SubstitutionGoal::tryNext; + worker.wakeUp(shared_from_this()); + return; + } + + worker.markContentsGood(storePath); + + printMsg(lvlChatty, "substitution of path '%s' succeeded", worker.store.printStorePath(storePath)); + + maintainRunningSubstitutions.reset(); + + maintainExpectedSubstitutions.reset(); + worker.doneSubstitutions++; + + if (maintainExpectedDownload) { + auto fileSize = maintainExpectedDownload->delta; + maintainExpectedDownload.reset(); + worker.doneDownloadSize += fileSize; + } + + worker.doneNarSize += maintainExpectedNar->delta; + maintainExpectedNar.reset(); + + worker.updateProgress(); + + amDone(ecSuccess); +} + + +void SubstitutionGoal::handleChildOutput(int fd, const string & data) +{ +} + + +void SubstitutionGoal::handleEOF(int fd) +{ + if (fd == outPipe.readSide.get()) worker.wakeUp(shared_from_this()); +} + +} diff --git a/src/libstore/build/substitution-goal.hh b/src/libstore/build/substitution-goal.hh new file mode 100644 index 000000000..3ae9a9e6b --- /dev/null +++ b/src/libstore/build/substitution-goal.hh @@ -0,0 +1,89 @@ +#pragma once + +#include "lock.hh" +#include "store-api.hh" +#include "goal.hh" + +namespace nix { + +class Worker; + +class SubstitutionGoal : public Goal +{ + friend class Worker; + +private: + /* The store path that should be realised through a substitute. */ + StorePath storePath; + + /* The path the substituter refers to the path as. This will be + * different when the stores have different names. */ + std::optional subPath; + + /* The remaining substituters. */ + std::list> subs; + + /* The current substituter. */ + std::shared_ptr sub; + + /* Whether a substituter failed. */ + bool substituterFailed = false; + + /* Path info returned by the substituter's query info operation. */ + std::shared_ptr info; + + /* Pipe for the substituter's standard output. */ + Pipe outPipe; + + /* The substituter thread. */ + std::thread thr; + + std::promise promise; + + /* Whether to try to repair a valid path. */ + RepairFlag repair; + + /* Location where we're downloading the substitute. Differs from + storePath when doing a repair. */ + Path destPath; + + std::unique_ptr> maintainExpectedSubstitutions, + maintainRunningSubstitutions, maintainExpectedNar, maintainExpectedDownload; + + typedef void (SubstitutionGoal::*GoalState)(); + GoalState state; + + /* Content address for recomputing store path */ + std::optional ca; + +public: + SubstitutionGoal(const StorePath & storePath, Worker & worker, RepairFlag repair = NoRepair, std::optional ca = std::nullopt); + ~SubstitutionGoal(); + + void timedOut(Error && ex) override { abort(); }; + + string key() override + { + /* "a$" ensures substitution goals happen before derivation + goals. */ + return "a$" + std::string(storePath.name()) + "$" + worker.store.printStorePath(storePath); + } + + void work() override; + + /* The states. */ + void init(); + void tryNext(); + void gotInfo(); + void referencesValid(); + void tryToRun(); + void finished(); + + /* Callback used by the worker to write to the log. */ + void handleChildOutput(int fd, const string & data) override; + void handleEOF(int fd) override; + + StorePath getStorePath() { return storePath; } +}; + +} diff --git a/src/libstore/build/worker.cc b/src/libstore/build/worker.cc new file mode 100644 index 000000000..5c3fe2f57 --- /dev/null +++ b/src/libstore/build/worker.cc @@ -0,0 +1,455 @@ +#include "machines.hh" +#include "worker.hh" +#include "substitution-goal.hh" +#include "derivation-goal.hh" +#include "hook-instance.hh" + +#include + +namespace nix { + +Worker::Worker(LocalStore & store) + : act(*logger, actRealise) + , actDerivations(*logger, actBuilds) + , actSubstitutions(*logger, actCopyPaths) + , store(store) +{ + /* Debugging: prevent recursive workers. */ + nrLocalBuilds = 0; + lastWokenUp = steady_time_point::min(); + permanentFailure = false; + timedOut = false; + hashMismatch = false; + checkMismatch = false; +} + + +Worker::~Worker() +{ + /* Explicitly get rid of all strong pointers now. After this all + goals that refer to this worker should be gone. (Otherwise we + are in trouble, since goals may call childTerminated() etc. in + their destructors). */ + topGoals.clear(); + + assert(expectedSubstitutions == 0); + assert(expectedDownloadSize == 0); + assert(expectedNarSize == 0); +} + + +std::shared_ptr Worker::makeDerivationGoalCommon( + const StorePath & drvPath, + const StringSet & wantedOutputs, + std::function()> mkDrvGoal) +{ + WeakGoalPtr & abstract_goal_weak = derivationGoals[drvPath]; + GoalPtr abstract_goal = abstract_goal_weak.lock(); // FIXME + std::shared_ptr goal; + if (!abstract_goal) { + goal = mkDrvGoal(); + abstract_goal_weak = goal; + wakeUp(goal); + } else { + goal = std::dynamic_pointer_cast(abstract_goal); + assert(goal); + goal->addWantedOutputs(wantedOutputs); + } + return goal; +} + + +std::shared_ptr Worker::makeDerivationGoal(const StorePath & drvPath, + const StringSet & wantedOutputs, BuildMode buildMode) +{ + return makeDerivationGoalCommon(drvPath, wantedOutputs, [&]() { + return std::make_shared(drvPath, wantedOutputs, *this, buildMode); + }); +} + + +std::shared_ptr Worker::makeBasicDerivationGoal(const StorePath & drvPath, + const BasicDerivation & drv, const StringSet & wantedOutputs, BuildMode buildMode) +{ + return makeDerivationGoalCommon(drvPath, wantedOutputs, [&]() { + return std::make_shared(drvPath, drv, wantedOutputs, *this, buildMode); + }); +} + + +GoalPtr Worker::makeSubstitutionGoal(const StorePath & path, RepairFlag repair, std::optional ca) +{ + WeakGoalPtr & goal_weak = substitutionGoals[path]; + GoalPtr goal = goal_weak.lock(); // FIXME + if (!goal) { + goal = std::make_shared(path, *this, repair, ca); + goal_weak = goal; + wakeUp(goal); + } + return goal; +} + + +static void removeGoal(GoalPtr goal, WeakGoalMap & goalMap) +{ + /* !!! inefficient */ + for (WeakGoalMap::iterator i = goalMap.begin(); + i != goalMap.end(); ) + if (i->second.lock() == goal) { + WeakGoalMap::iterator j = i; ++j; + goalMap.erase(i); + i = j; + } + else ++i; +} + + +void Worker::removeGoal(GoalPtr goal) +{ + nix::removeGoal(goal, derivationGoals); + nix::removeGoal(goal, substitutionGoals); + if (topGoals.find(goal) != topGoals.end()) { + topGoals.erase(goal); + /* If a top-level goal failed, then kill all other goals + (unless keepGoing was set). */ + if (goal->exitCode == Goal::ecFailed && !settings.keepGoing) + topGoals.clear(); + } + + /* Wake up goals waiting for any goal to finish. */ + for (auto & i : waitingForAnyGoal) { + GoalPtr goal = i.lock(); + if (goal) wakeUp(goal); + } + + waitingForAnyGoal.clear(); +} + + +void Worker::wakeUp(GoalPtr goal) +{ + goal->trace("woken up"); + addToWeakGoals(awake, goal); +} + + +unsigned Worker::getNrLocalBuilds() +{ + return nrLocalBuilds; +} + + +void Worker::childStarted(GoalPtr goal, const set & fds, + bool inBuildSlot, bool respectTimeouts) +{ + Child child; + child.goal = goal; + child.goal2 = goal.get(); + child.fds = fds; + child.timeStarted = child.lastOutput = steady_time_point::clock::now(); + child.inBuildSlot = inBuildSlot; + child.respectTimeouts = respectTimeouts; + children.emplace_back(child); + if (inBuildSlot) nrLocalBuilds++; +} + + +void Worker::childTerminated(Goal * goal, bool wakeSleepers) +{ + auto i = std::find_if(children.begin(), children.end(), + [&](const Child & child) { return child.goal2 == goal; }); + if (i == children.end()) return; + + if (i->inBuildSlot) { + assert(nrLocalBuilds > 0); + nrLocalBuilds--; + } + + children.erase(i); + + if (wakeSleepers) { + + /* Wake up goals waiting for a build slot. */ + for (auto & j : wantingToBuild) { + GoalPtr goal = j.lock(); + if (goal) wakeUp(goal); + } + + wantingToBuild.clear(); + } +} + + +void Worker::waitForBuildSlot(GoalPtr goal) +{ + debug("wait for build slot"); + if (getNrLocalBuilds() < settings.maxBuildJobs) + wakeUp(goal); /* we can do it right away */ + else + addToWeakGoals(wantingToBuild, goal); +} + + +void Worker::waitForAnyGoal(GoalPtr goal) +{ + debug("wait for any goal"); + addToWeakGoals(waitingForAnyGoal, goal); +} + + +void Worker::waitForAWhile(GoalPtr goal) +{ + debug("wait for a while"); + addToWeakGoals(waitingForAWhile, goal); +} + + +void Worker::run(const Goals & _topGoals) +{ + for (auto & i : _topGoals) topGoals.insert(i); + + debug("entered goal loop"); + + while (1) { + + checkInterrupt(); + + store.autoGC(false); + + /* Call every wake goal (in the ordering established by + CompareGoalPtrs). */ + while (!awake.empty() && !topGoals.empty()) { + Goals awake2; + for (auto & i : awake) { + GoalPtr goal = i.lock(); + if (goal) awake2.insert(goal); + } + awake.clear(); + for (auto & goal : awake2) { + checkInterrupt(); + goal->work(); + if (topGoals.empty()) break; // stuff may have been cancelled + } + } + + if (topGoals.empty()) break; + + /* Wait for input. */ + if (!children.empty() || !waitingForAWhile.empty()) + waitForInput(); + else { + if (awake.empty() && 0 == settings.maxBuildJobs) + { + if (getMachines().empty()) + throw Error("unable to start any build; either increase '--max-jobs' " + "or enable remote builds." + "\nhttps://nixos.org/nix/manual/#chap-distributed-builds"); + else + throw Error("unable to start any build; remote machines may not have " + "all required system features." + "\nhttps://nixos.org/nix/manual/#chap-distributed-builds"); + + } + assert(!awake.empty()); + } + } + + /* If --keep-going is not set, it's possible that the main goal + exited while some of its subgoals were still active. But if + --keep-going *is* set, then they must all be finished now. */ + assert(!settings.keepGoing || awake.empty()); + assert(!settings.keepGoing || wantingToBuild.empty()); + assert(!settings.keepGoing || children.empty()); +} + +void Worker::waitForInput() +{ + printMsg(lvlVomit, "waiting for children"); + + /* Process output from the file descriptors attached to the + children, namely log output and output path creation commands. + We also use this to detect child termination: if we get EOF on + the logger pipe of a build, we assume that the builder has + terminated. */ + + bool useTimeout = false; + long timeout = 0; + auto before = steady_time_point::clock::now(); + + /* If we're monitoring for silence on stdout/stderr, or if there + is a build timeout, then wait for input until the first + deadline for any child. */ + auto nearest = steady_time_point::max(); // nearest deadline + if (settings.minFree.get() != 0) + // Periodicallty wake up to see if we need to run the garbage collector. + nearest = before + std::chrono::seconds(10); + for (auto & i : children) { + if (!i.respectTimeouts) continue; + if (0 != settings.maxSilentTime) + nearest = std::min(nearest, i.lastOutput + std::chrono::seconds(settings.maxSilentTime)); + if (0 != settings.buildTimeout) + nearest = std::min(nearest, i.timeStarted + std::chrono::seconds(settings.buildTimeout)); + } + if (nearest != steady_time_point::max()) { + timeout = std::max(1L, (long) std::chrono::duration_cast(nearest - before).count()); + useTimeout = true; + } + + /* If we are polling goals that are waiting for a lock, then wake + up after a few seconds at most. */ + if (!waitingForAWhile.empty()) { + useTimeout = true; + if (lastWokenUp == steady_time_point::min() || lastWokenUp > before) lastWokenUp = before; + timeout = std::max(1L, + (long) std::chrono::duration_cast( + lastWokenUp + std::chrono::seconds(settings.pollInterval) - before).count()); + } else lastWokenUp = steady_time_point::min(); + + if (useTimeout) + vomit("sleeping %d seconds", timeout); + + /* Use select() to wait for the input side of any logger pipe to + become `available'. Note that `available' (i.e., non-blocking) + includes EOF. */ + std::vector pollStatus; + std::map fdToPollStatus; + for (auto & i : children) { + for (auto & j : i.fds) { + pollStatus.push_back((struct pollfd) { .fd = j, .events = POLLIN }); + fdToPollStatus[j] = pollStatus.size() - 1; + } + } + + if (poll(pollStatus.data(), pollStatus.size(), + useTimeout ? timeout * 1000 : -1) == -1) { + if (errno == EINTR) return; + throw SysError("waiting for input"); + } + + auto after = steady_time_point::clock::now(); + + /* Process all available file descriptors. FIXME: this is + O(children * fds). */ + decltype(children)::iterator i; + for (auto j = children.begin(); j != children.end(); j = i) { + i = std::next(j); + + checkInterrupt(); + + GoalPtr goal = j->goal.lock(); + assert(goal); + + set fds2(j->fds); + std::vector buffer(4096); + for (auto & k : fds2) { + if (pollStatus.at(fdToPollStatus.at(k)).revents) { + ssize_t rd = ::read(k, buffer.data(), buffer.size()); + // FIXME: is there a cleaner way to handle pt close + // than EIO? Is this even standard? + if (rd == 0 || (rd == -1 && errno == EIO)) { + debug("%1%: got EOF", goal->getName()); + goal->handleEOF(k); + j->fds.erase(k); + } else if (rd == -1) { + if (errno != EINTR) + throw SysError("%s: read failed", goal->getName()); + } else { + printMsg(lvlVomit, "%1%: read %2% bytes", + goal->getName(), rd); + string data((char *) buffer.data(), rd); + j->lastOutput = after; + goal->handleChildOutput(k, data); + } + } + } + + if (goal->exitCode == Goal::ecBusy && + 0 != settings.maxSilentTime && + j->respectTimeouts && + after - j->lastOutput >= std::chrono::seconds(settings.maxSilentTime)) + { + goal->timedOut(Error( + "%1% timed out after %2% seconds of silence", + goal->getName(), settings.maxSilentTime)); + } + + else if (goal->exitCode == Goal::ecBusy && + 0 != settings.buildTimeout && + j->respectTimeouts && + after - j->timeStarted >= std::chrono::seconds(settings.buildTimeout)) + { + goal->timedOut(Error( + "%1% timed out after %2% seconds", + goal->getName(), settings.buildTimeout)); + } + } + + if (!waitingForAWhile.empty() && lastWokenUp + std::chrono::seconds(settings.pollInterval) <= after) { + lastWokenUp = after; + for (auto & i : waitingForAWhile) { + GoalPtr goal = i.lock(); + if (goal) wakeUp(goal); + } + waitingForAWhile.clear(); + } +} + + +unsigned int Worker::exitStatus() +{ + /* + * 1100100 + * ^^^^ + * |||`- timeout + * ||`-- output hash mismatch + * |`--- build failure + * `---- not deterministic + */ + unsigned int mask = 0; + bool buildFailure = permanentFailure || timedOut || hashMismatch; + if (buildFailure) + mask |= 0x04; // 100 + if (timedOut) + mask |= 0x01; // 101 + if (hashMismatch) + mask |= 0x02; // 102 + if (checkMismatch) { + mask |= 0x08; // 104 + } + + if (mask) + mask |= 0x60; + return mask ? mask : 1; +} + + +bool Worker::pathContentsGood(const StorePath & path) +{ + auto i = pathContentsGoodCache.find(path); + if (i != pathContentsGoodCache.end()) return i->second; + printInfo("checking path '%s'...", store.printStorePath(path)); + auto info = store.queryPathInfo(path); + bool res; + if (!pathExists(store.printStorePath(path))) + res = false; + else { + HashResult current = hashPath(info->narHash.type, store.printStorePath(path)); + Hash nullHash(htSHA256); + res = info->narHash == nullHash || info->narHash == current.first; + } + pathContentsGoodCache.insert_or_assign(path, res); + if (!res) + logError({ + .name = "Corrupted path", + .hint = hintfmt("path '%s' is corrupted or missing!", store.printStorePath(path)) + }); + return res; +} + + +void Worker::markContentsGood(const StorePath & path) +{ + pathContentsGoodCache.insert_or_assign(path, true); +} + +} diff --git a/src/libstore/build/worker.hh b/src/libstore/build/worker.hh new file mode 100644 index 000000000..a54316343 --- /dev/null +++ b/src/libstore/build/worker.hh @@ -0,0 +1,195 @@ +#pragma once + +#include "types.hh" +#include "lock.hh" +#include "local-store.hh" +#include "goal.hh" + +namespace nix { + +/* Forward definition. */ +class DerivationGoal; + +typedef std::chrono::time_point steady_time_point; + + +/* A mapping used to remember for each child process to what goal it + belongs, and file descriptors for receiving log data and output + path creation commands. */ +struct Child +{ + WeakGoalPtr goal; + Goal * goal2; // ugly hackery + set fds; + bool respectTimeouts; + bool inBuildSlot; + steady_time_point lastOutput; /* time we last got output on stdout/stderr */ + steady_time_point timeStarted; +}; + +/* Forward definition. */ +struct HookInstance; + +/* The worker class. */ +class Worker +{ +private: + + /* Note: the worker should only have strong pointers to the + top-level goals. */ + + /* The top-level goals of the worker. */ + Goals topGoals; + + /* Goals that are ready to do some work. */ + WeakGoals awake; + + /* Goals waiting for a build slot. */ + WeakGoals wantingToBuild; + + /* Child processes currently running. */ + std::list children; + + /* Number of build slots occupied. This includes local builds and + substitutions but not remote builds via the build hook. */ + unsigned int nrLocalBuilds; + + /* Maps used to prevent multiple instantiations of a goal for the + same derivation / path. */ + WeakGoalMap derivationGoals; + WeakGoalMap substitutionGoals; + + /* Goals waiting for busy paths to be unlocked. */ + WeakGoals waitingForAnyGoal; + + /* Goals sleeping for a few seconds (polling a lock). */ + WeakGoals waitingForAWhile; + + /* Last time the goals in `waitingForAWhile' where woken up. */ + steady_time_point lastWokenUp; + + /* Cache for pathContentsGood(). */ + std::map pathContentsGoodCache; + +public: + + const Activity act; + const Activity actDerivations; + const Activity actSubstitutions; + + /* Set if at least one derivation had a BuildError (i.e. permanent + failure). */ + bool permanentFailure; + + /* Set if at least one derivation had a timeout. */ + bool timedOut; + + /* Set if at least one derivation fails with a hash mismatch. */ + bool hashMismatch; + + /* Set if at least one derivation is not deterministic in check mode. */ + bool checkMismatch; + + LocalStore & store; + + std::unique_ptr hook; + + uint64_t expectedBuilds = 0; + uint64_t doneBuilds = 0; + uint64_t failedBuilds = 0; + uint64_t runningBuilds = 0; + + uint64_t expectedSubstitutions = 0; + uint64_t doneSubstitutions = 0; + uint64_t failedSubstitutions = 0; + uint64_t runningSubstitutions = 0; + uint64_t expectedDownloadSize = 0; + uint64_t doneDownloadSize = 0; + uint64_t expectedNarSize = 0; + uint64_t doneNarSize = 0; + + /* Whether to ask the build hook if it can build a derivation. If + it answers with "decline-permanently", we don't try again. */ + bool tryBuildHook = true; + + Worker(LocalStore & store); + ~Worker(); + + /* Make a goal (with caching). */ + + /* derivation goal */ +private: + std::shared_ptr makeDerivationGoalCommon( + const StorePath & drvPath, const StringSet & wantedOutputs, + std::function()> mkDrvGoal); +public: + std::shared_ptr makeDerivationGoal( + const StorePath & drvPath, + const StringSet & wantedOutputs, BuildMode buildMode = bmNormal); + std::shared_ptr makeBasicDerivationGoal( + const StorePath & drvPath, const BasicDerivation & drv, + const StringSet & wantedOutputs, BuildMode buildMode = bmNormal); + + /* substitution goal */ + GoalPtr makeSubstitutionGoal(const StorePath & storePath, RepairFlag repair = NoRepair, std::optional ca = std::nullopt); + + /* Remove a dead goal. */ + void removeGoal(GoalPtr goal); + + /* Wake up a goal (i.e., there is something for it to do). */ + void wakeUp(GoalPtr goal); + + /* Return the number of local build and substitution processes + currently running (but not remote builds via the build + hook). */ + unsigned int getNrLocalBuilds(); + + /* Registers a running child process. `inBuildSlot' means that + the process counts towards the jobs limit. */ + void childStarted(GoalPtr goal, const set & fds, + bool inBuildSlot, bool respectTimeouts); + + /* Unregisters a running child process. `wakeSleepers' should be + false if there is no sense in waking up goals that are sleeping + because they can't run yet (e.g., there is no free build slot, + or the hook would still say `postpone'). */ + void childTerminated(Goal * goal, bool wakeSleepers = true); + + /* Put `goal' to sleep until a build slot becomes available (which + might be right away). */ + void waitForBuildSlot(GoalPtr goal); + + /* Wait for any goal to finish. Pretty indiscriminate way to + wait for some resource that some other goal is holding. */ + void waitForAnyGoal(GoalPtr goal); + + /* Wait for a few seconds and then retry this goal. Used when + waiting for a lock held by another process. This kind of + polling is inefficient, but POSIX doesn't really provide a way + to wait for multiple locks in the main select() loop. */ + void waitForAWhile(GoalPtr goal); + + /* Loop until the specified top-level goals have finished. */ + void run(const Goals & topGoals); + + /* Wait for input to become available. */ + void waitForInput(); + + unsigned int exitStatus(); + + /* Check whether the given valid path exists and has the right + contents. */ + bool pathContentsGood(const StorePath & path); + + void markContentsGood(const StorePath & path); + + void updateProgress() + { + actDerivations.progress(doneBuilds, expectedBuilds + doneBuilds, runningBuilds, failedBuilds); + actSubstitutions.progress(doneSubstitutions, expectedSubstitutions + doneSubstitutions, runningSubstitutions, failedSubstitutions); + act.setExpected(actFileTransfer, expectedDownloadSize + doneDownloadSize); + act.setExpected(actCopyPath, expectedNarSize + doneNarSize); + } +}; + +} diff --git a/src/libstore/daemon.cc b/src/libstore/daemon.cc index 83f8968b0..99d8add92 100644 --- a/src/libstore/daemon.cc +++ b/src/libstore/daemon.cc @@ -101,17 +101,20 @@ struct TunnelLogger : public Logger /* stopWork() means that we're done; stop sending stderr to the client. */ - void stopWork(bool success = true, const string & msg = "", unsigned int status = 0) + void stopWork(const Error * ex = nullptr) { auto state(state_.lock()); state->canSendStderr = false; - if (success) + if (!ex) to << STDERR_LAST; else { - to << STDERR_ERROR << msg; - if (status != 0) to << status; + if (GET_PROTOCOL_MINOR(clientVersion) >= 26) { + to << STDERR_ERROR << *ex; + } else { + to << STDERR_ERROR << ex->what() << ex->status; + } } } @@ -247,7 +250,7 @@ static void writeValidPathInfo( { to << (info->deriver ? store->printStorePath(*info->deriver) : "") << info->narHash.to_string(Base16, false); - writeStorePaths(*store, to, info->references); + worker_proto::write(*store, to, info->references); to << info->registrationTime << info->narSize; if (GET_PROTOCOL_MINOR(clientVersion) >= 16) { to << info->ultimate @@ -272,11 +275,11 @@ static void performOp(TunnelLogger * logger, ref store, } case wopQueryValidPaths: { - auto paths = readStorePaths(*store, from); + auto paths = worker_proto::read(*store, from, Phantom {}); logger->startWork(); auto res = store->queryValidPaths(paths); logger->stopWork(); - writeStorePaths(*store, to, res); + worker_proto::write(*store, to, res); break; } @@ -292,11 +295,11 @@ static void performOp(TunnelLogger * logger, ref store, } case wopQuerySubstitutablePaths: { - auto paths = readStorePaths(*store, from); + auto paths = worker_proto::read(*store, from, Phantom {}); logger->startWork(); auto res = store->querySubstitutablePaths(paths); logger->stopWork(); - writeStorePaths(*store, to, res); + worker_proto::write(*store, to, res); break; } @@ -325,7 +328,7 @@ static void performOp(TunnelLogger * logger, ref store, paths = store->queryValidDerivers(path); else paths = store->queryDerivationOutputs(path); logger->stopWork(); - writeStorePaths(*store, to, paths); + worker_proto::write(*store, to, paths); break; } @@ -369,7 +372,7 @@ static void performOp(TunnelLogger * logger, ref store, if (GET_PROTOCOL_MINOR(clientVersion) >= 25) { auto name = readString(from); auto camStr = readString(from); - auto refs = readStorePaths(*store, from); + auto refs = worker_proto::read(*store, from, Phantom {}); bool repairBool; from >> repairBool; auto repair = RepairFlag{repairBool}; @@ -449,7 +452,7 @@ static void performOp(TunnelLogger * logger, ref store, case wopAddTextToStore: { string suffix = readString(from); string s = readString(from); - auto refs = readStorePaths(*store, from); + auto refs = worker_proto::read(*store, from, Phantom {}); logger->startWork(); auto path = store->addTextToStore(suffix, s, refs, NoRepair); logger->stopWork(); @@ -546,6 +549,20 @@ static void performOp(TunnelLogger * logger, ref store, are in fact content-addressed if we don't trust them. */ assert(derivationIsCA(drv.type()) || trusted); + /* Recompute the derivation path when we cannot trust the original. */ + if (!trusted) { + /* Recomputing the derivation path for input-address derivations + makes it harder to audit them after the fact, since we need the + original not-necessarily-resolved derivation to verify the drv + derivation as adequate claim to the input-addressed output + paths. */ + assert(derivationIsCA(drv.type())); + + Derivation drv2; + static_cast(drv2) = drv; + drvPath = writeDerivation(*store, Derivation { drv2 }); + } + auto res = store->buildDerivation(drvPath, drv, buildMode); logger->stopWork(); to << res.status << res.errorMsg; @@ -608,7 +625,7 @@ static void performOp(TunnelLogger * logger, ref store, case wopCollectGarbage: { GCOptions options; options.action = (GCOptions::GCAction) readInt(from); - options.pathsToDelete = readStorePaths(*store, from); + options.pathsToDelete = worker_proto::read(*store, from, Phantom {}); from >> options.ignoreLiveness >> options.maxFreed; // obsolete fields readInt(from); @@ -677,7 +694,7 @@ static void performOp(TunnelLogger * logger, ref store, else { to << 1 << (i->second.deriver ? store->printStorePath(*i->second.deriver) : ""); - writeStorePaths(*store, to, i->second.references); + worker_proto::write(*store, to, i->second.references); to << i->second.downloadSize << i->second.narSize; } @@ -688,11 +705,11 @@ static void performOp(TunnelLogger * logger, ref store, SubstitutablePathInfos infos; StorePathCAMap pathsMap = {}; if (GET_PROTOCOL_MINOR(clientVersion) < 22) { - auto paths = readStorePaths(*store, from); + auto paths = worker_proto::read(*store, from, Phantom {}); for (auto & path : paths) pathsMap.emplace(path, std::nullopt); } else - pathsMap = readStorePathCAMap(*store, from); + pathsMap = worker_proto::read(*store, from, Phantom {}); logger->startWork(); store->querySubstitutablePathInfos(pathsMap, infos); logger->stopWork(); @@ -700,7 +717,7 @@ static void performOp(TunnelLogger * logger, ref store, for (auto & i : infos) { to << store->printStorePath(i.first) << (i.second.deriver ? store->printStorePath(*i.second.deriver) : ""); - writeStorePaths(*store, to, i.second.references); + worker_proto::write(*store, to, i.second.references); to << i.second.downloadSize << i.second.narSize; } break; @@ -710,7 +727,7 @@ static void performOp(TunnelLogger * logger, ref store, logger->startWork(); auto paths = store->queryAllValidPaths(); logger->stopWork(); - writeStorePaths(*store, to, paths); + worker_proto::write(*store, to, paths); break; } @@ -782,7 +799,7 @@ static void performOp(TunnelLogger * logger, ref store, ValidPathInfo info { path, narHash }; if (deriver != "") info.deriver = store->parseStorePath(deriver); - info.references = readStorePaths(*store, from); + info.references = worker_proto::read(*store, from, Phantom {}); from >> info.registrationTime >> info.narSize >> info.ultimate; info.sigs = readStrings(from); info.ca = parseContentAddressOpt(readString(from)); @@ -835,9 +852,9 @@ static void performOp(TunnelLogger * logger, ref store, uint64_t downloadSize, narSize; store->queryMissing(targets, willBuild, willSubstitute, unknown, downloadSize, narSize); logger->stopWork(); - writeStorePaths(*store, to, willBuild); - writeStorePaths(*store, to, willSubstitute); - writeStorePaths(*store, to, unknown); + worker_proto::write(*store, to, willBuild); + worker_proto::write(*store, to, willSubstitute); + worker_proto::write(*store, to, unknown); to << downloadSize << narSize; break; } @@ -921,10 +938,11 @@ void processConnection( during addTextToStore() / importPath(). If that happens, just send the error message and exit. */ bool errorAllowed = tunnelLogger->state_.lock()->canSendStderr; - tunnelLogger->stopWork(false, e.msg(), e.status); + tunnelLogger->stopWork(&e); if (!errorAllowed) throw; } catch (std::bad_alloc & e) { - tunnelLogger->stopWork(false, "Nix daemon out of memory", 1); + auto ex = Error("Nix daemon out of memory"); + tunnelLogger->stopWork(&ex); throw; } @@ -933,8 +951,13 @@ void processConnection( assert(!tunnelLogger->state_.lock()->canSendStderr); }; + } catch (Error & e) { + tunnelLogger->stopWork(&e); + to.flush(); + return; } catch (std::exception & e) { - tunnelLogger->stopWork(false, e.what(), 1); + auto ex = Error(e.what()); + tunnelLogger->stopWork(&ex); to.flush(); return; } diff --git a/src/libstore/daemon.hh b/src/libstore/daemon.hh index 841ace316..67755d54e 100644 --- a/src/libstore/daemon.hh +++ b/src/libstore/daemon.hh @@ -1,3 +1,5 @@ +#pragma once + #include "serialise.hh" #include "store-api.hh" diff --git a/src/libstore/derivations.cc b/src/libstore/derivations.cc index 9d8ce5e36..07b4e772b 100644 --- a/src/libstore/derivations.cc +++ b/src/libstore/derivations.cc @@ -69,7 +69,7 @@ bool BasicDerivation::isBuiltin() const StorePath writeDerivation(Store & store, - const Derivation & drv, RepairFlag repair) + const Derivation & drv, RepairFlag repair, bool readOnly) { auto references = drv.inputSrcs; for (auto & i : drv.inputDrvs) @@ -79,7 +79,7 @@ StorePath writeDerivation(Store & store, held during a garbage collection). */ auto suffix = std::string(drv.name) + drvExtension; auto contents = drv.unparse(store, false); - return settings.readOnlyMode + return readOnly || settings.readOnlyMode ? store.computeStorePathForText(suffix, contents, references) : store.addTextToStore(suffix, contents, references, repair); } @@ -584,7 +584,7 @@ Source & readDerivation(Source & in, const Store & store, BasicDerivation & drv, drv.outputs.emplace(std::move(name), std::move(output)); } - drv.inputSrcs = readStorePaths(store, in); + drv.inputSrcs = worker_proto::read(store, in, Phantom {}); in >> drv.platform >> drv.builder; drv.args = readStrings(in); @@ -622,7 +622,7 @@ void writeDerivation(Sink & out, const Store & store, const BasicDerivation & dr }, }, i.second.output); } - writeStorePaths(store, out, drv.inputSrcs); + worker_proto::write(store, out, drv.inputSrcs); out << drv.platform << drv.builder << drv.args; out << drv.env.size(); for (auto & i : drv.env) @@ -644,4 +644,57 @@ std::string downstreamPlaceholder(const Store & store, const StorePath & drvPath return "/" + hashString(htSHA256, clearText).to_string(Base32, false); } + +// N.B. Outputs are left unchanged +static void rewriteDerivation(Store & store, BasicDerivation & drv, const StringMap & rewrites) { + + debug("Rewriting the derivation"); + + for (auto &rewrite: rewrites) { + debug("rewriting %s as %s", rewrite.first, rewrite.second); + } + + drv.builder = rewriteStrings(drv.builder, rewrites); + for (auto & arg: drv.args) { + arg = rewriteStrings(arg, rewrites); + } + + StringPairs newEnv; + for (auto & envVar: drv.env) { + auto envName = rewriteStrings(envVar.first, rewrites); + auto envValue = rewriteStrings(envVar.second, rewrites); + newEnv.emplace(envName, envValue); + } + drv.env = newEnv; +} + + +Sync drvPathResolutions; + +std::optional Derivation::tryResolve(Store & store) { + BasicDerivation resolved { *this }; + + // Input paths that we'll want to rewrite in the derivation + StringMap inputRewrites; + + for (auto & input : inputDrvs) { + auto inputDrvOutputs = store.queryPartialDerivationOutputMap(input.first); + StringSet newOutputNames; + for (auto & outputName : input.second) { + auto actualPathOpt = inputDrvOutputs.at(outputName); + if (!actualPathOpt) + return std::nullopt; + auto actualPath = *actualPathOpt; + inputRewrites.emplace( + downstreamPlaceholder(store, input.first, outputName), + store.printStorePath(actualPath)); + resolved.inputSrcs.insert(std::move(actualPath)); + } + } + + rewriteDerivation(store, resolved, inputRewrites); + + return resolved; +} + } diff --git a/src/libstore/derivations.hh b/src/libstore/derivations.hh index 0b5652685..6d292b2e5 100644 --- a/src/libstore/derivations.hh +++ b/src/libstore/derivations.hh @@ -4,6 +4,7 @@ #include "types.hh" #include "hash.hh" #include "content-address.hh" +#include "sync.hh" #include #include @@ -60,8 +61,6 @@ typedef std::map DerivationOutputs; also contains, for each output, the (optional) store path in which it would be written. To calculate values of these types, see the corresponding functions in BasicDerivation */ -typedef std::map> - DerivationOutputsAndPaths; typedef std::map>> DerivationOutputsAndOptPaths; @@ -100,7 +99,7 @@ struct BasicDerivation StringPairs env; std::string name; - BasicDerivation() { } + BasicDerivation() = default; virtual ~BasicDerivation() { }; bool isBuiltin() const; @@ -127,7 +126,17 @@ struct Derivation : BasicDerivation std::string unparse(const Store & store, bool maskOutputs, std::map * actualInputs = nullptr) const; - Derivation() { } + /* Return the underlying basic derivation but with these changes: + + 1. Input drvs are emptied, but the outputs of them that were used are + added directly to input sources. + + 2. Input placeholders are replaced with realized input store paths. */ + std::optional tryResolve(Store & store); + + Derivation() = default; + Derivation(const BasicDerivation & bd) : BasicDerivation(bd) { } + Derivation(BasicDerivation && bd) : BasicDerivation(std::move(bd)) { } }; @@ -137,7 +146,9 @@ enum RepairFlag : bool { NoRepair = false, Repair = true }; /* Write a derivation to the Nix store, and return its path. */ StorePath writeDerivation(Store & store, - const Derivation & drv, RepairFlag repair = NoRepair); + const Derivation & drv, + RepairFlag repair = NoRepair, + bool readOnly = false); /* Read a derivation from a file. */ Derivation parseDerivation(const Store & store, std::string && s, std::string_view name); @@ -191,6 +202,16 @@ typedef std::map DrvHashes; extern DrvHashes drvHashes; // FIXME: global, not thread-safe +/* Memoisation of `readDerivation(..).resove()`. */ +typedef std::map< + StorePath, + std::optional +> DrvPathResolutions; + +// FIXME: global, though at least thread-safe. +// FIXME: arguably overlaps with hashDerivationModulo memo table. +extern Sync drvPathResolutions; + bool wantOutput(const string & output, const std::set & wanted); struct Source; diff --git a/src/libstore/dummy-store.cc b/src/libstore/dummy-store.cc index 49641c2ac..98b745c3a 100644 --- a/src/libstore/dummy-store.cc +++ b/src/libstore/dummy-store.cc @@ -18,8 +18,7 @@ struct DummyStore : public Store, public virtual DummyStoreConfig DummyStore(const Params & params) : StoreConfig(params) , Store(params) - { - } + { } string getUri() override { @@ -63,6 +62,6 @@ struct DummyStore : public Store, public virtual DummyStoreConfig { unsupported("buildDerivation"); } }; -static RegisterStoreImplementation regStore; +static RegisterStoreImplementation regDummyStore; } diff --git a/src/libstore/export-import.cc b/src/libstore/export-import.cc index ccd466d09..02c839520 100644 --- a/src/libstore/export-import.cc +++ b/src/libstore/export-import.cc @@ -45,7 +45,7 @@ void Store::exportPath(const StorePath & path, Sink & sink) teeSink << exportMagic << printStorePath(path); - writeStorePaths(*this, teeSink, info->references); + worker_proto::write(*this, teeSink, info->references); teeSink << (info->deriver ? printStorePath(*info->deriver) : "") << 0; @@ -73,7 +73,7 @@ StorePaths Store::importPaths(Source & source, CheckSigsFlag checkSigs) //Activity act(*logger, lvlInfo, format("importing path '%s'") % info.path); - auto references = readStorePaths(*this, source); + auto references = worker_proto::read(*this, source, Phantom {}); auto deriver = readString(source); auto narHash = hashString(htSHA256, *saved.s); diff --git a/src/libstore/filetransfer.cc b/src/libstore/filetransfer.cc index 6241b5e00..c2c65af05 100644 --- a/src/libstore/filetransfer.cc +++ b/src/libstore/filetransfer.cc @@ -31,7 +31,7 @@ namespace nix { FileTransferSettings fileTransferSettings; -static GlobalConfig::Register r1(&fileTransferSettings); +static GlobalConfig::Register rFileTransferSettings(&fileTransferSettings); std::string resolveUri(const std::string & uri) { @@ -113,6 +113,9 @@ struct curlFileTransfer : public FileTransfer requestHeaders = curl_slist_append(requestHeaders, ("If-None-Match: " + request.expectedETag).c_str()); if (!request.mimeType.empty()) requestHeaders = curl_slist_append(requestHeaders, ("Content-Type: " + request.mimeType).c_str()); + for (auto it = request.headers.begin(); it != request.headers.end(); ++it){ + requestHeaders = curl_slist_append(requestHeaders, fmt("%s: %s", it->first, it->second).c_str()); + } } ~TransferItem() diff --git a/src/libstore/filetransfer.hh b/src/libstore/filetransfer.hh index 0d608c8d8..c89c51a21 100644 --- a/src/libstore/filetransfer.hh +++ b/src/libstore/filetransfer.hh @@ -51,6 +51,7 @@ extern FileTransferSettings fileTransferSettings; struct FileTransferRequest { std::string uri; + Headers headers; std::string expectedETag; bool verifyTLS = true; bool head = false; diff --git a/src/libstore/gc.cc b/src/libstore/gc.cc index 518a357ef..bc692ca42 100644 --- a/src/libstore/gc.cc +++ b/src/libstore/gc.cc @@ -1,6 +1,7 @@ #include "derivations.hh" #include "globals.hh" #include "local-store.hh" +#include "local-fs-store.hh" #include "finally.hh" #include @@ -682,7 +683,7 @@ void LocalStore::removeUnusedLinks(const GCState & state) struct stat st; if (stat(linksDir.c_str(), &st) == -1) throw SysError("statting '%1%'", linksDir); - auto overhead = st.st_blocks * 512ULL; + int64_t overhead = st.st_blocks * 512ULL; printInfo("note: currently hard linking saves %.2f MiB", ((unsharedSize - actualSize - overhead) / (1024.0 * 1024.0))); diff --git a/src/libstore/globals.cc b/src/libstore/globals.cc index c5734852d..1238dc530 100644 --- a/src/libstore/globals.cc +++ b/src/libstore/globals.cc @@ -25,7 +25,7 @@ namespace nix { Settings settings; -static GlobalConfig::Register r1(&settings); +static GlobalConfig::Register rSettings(&settings); Settings::Settings() : nixPrefix(NIX_PREFIX) diff --git a/src/libstore/globals.hh b/src/libstore/globals.hh index ebcfa9d80..8c63c5b34 100644 --- a/src/libstore/globals.hh +++ b/src/libstore/globals.hh @@ -859,8 +859,54 @@ public: are loaded as plugins (non-recursively). )"}; - Setting githubAccessToken{this, "", "github-access-token", - "GitHub access token to get access to GitHub data through the GitHub API for `github:<..>` flakes."}; + Setting accessTokens{this, {}, "access-tokens", + R"( + Access tokens used to access protected GitHub, GitLab, or + other locations requiring token-based authentication. + + Access tokens are specified as a string made up of + space-separated `host=token` values. The specific token + used is selected by matching the `host` portion against the + "host" specification of the input. The actual use of the + `token` value is determined by the type of resource being + accessed: + + * Github: the token value is the OAUTH-TOKEN string obtained + as the Personal Access Token from the Github server (see + https://docs.github.com/en/developers/apps/authorizing-oath-apps). + + * Gitlab: the token value is either the OAuth2 token or the + Personal Access Token (these are different types tokens + for gitlab, see + https://docs.gitlab.com/12.10/ee/api/README.html#authentication). + The `token` value should be `type:tokenstring` where + `type` is either `OAuth2` or `PAT` to indicate which type + of token is being specified. + + Example `~/.config/nix/nix.conf`: + + ``` + access-tokens = "github.com=23ac...b289 gitlab.mycompany.com=PAT:A123Bp_Cd..EfG gitlab.com=OAuth2:1jklw3jk" + ``` + + Example `~/code/flake.nix`: + + ```nix + input.foo = { + type = "gitlab"; + host = "gitlab.mycompany.com"; + owner = "mycompany"; + repo = "pro"; + }; + ``` + + This example specifies three tokens, one each for accessing + github.com, gitlab.mycompany.com, and sourceforge.net. + + The `input.foo` uses the "gitlab" fetcher, which might + requires specifying the token type along with the token + value. + )"}; Setting experimentalFeatures{this, {}, "experimental-features", "Experimental Nix features to enable."}; diff --git a/src/libstore/http-binary-cache-store.cc b/src/libstore/http-binary-cache-store.cc index 86be7c006..9d2a89f96 100644 --- a/src/libstore/http-binary-cache-store.cc +++ b/src/libstore/http-binary-cache-store.cc @@ -73,6 +73,7 @@ public: if (forceHttp) ret.insert("file"); return ret; } + protected: void maybeDisable() @@ -180,6 +181,6 @@ protected: }; -static RegisterStoreImplementation regStore; +static RegisterStoreImplementation regHttpBinaryCacheStore; } diff --git a/src/libstore/legacy-ssh-store.cc b/src/libstore/legacy-ssh-store.cc index 5af75669a..467169ce8 100644 --- a/src/libstore/legacy-ssh-store.cc +++ b/src/libstore/legacy-ssh-store.cc @@ -122,7 +122,7 @@ struct LegacySSHStore : public Store, public virtual LegacySSHStoreConfig auto deriver = readString(conn->from); if (deriver != "") info->deriver = parseStorePath(deriver); - info->references = readStorePaths(*this, conn->from); + info->references = worker_proto::read(*this, conn->from, Phantom {}); readLongLong(conn->from); // download size info->narSize = readLongLong(conn->from); @@ -156,7 +156,7 @@ struct LegacySSHStore : public Store, public virtual LegacySSHStoreConfig << printStorePath(info.path) << (info.deriver ? printStorePath(*info.deriver) : "") << info.narHash.to_string(Base16, false); - writeStorePaths(*this, conn->to, info.references); + worker_proto::write(*this, conn->to, info.references); conn->to << info.registrationTime << info.narSize @@ -185,7 +185,7 @@ struct LegacySSHStore : public Store, public virtual LegacySSHStoreConfig conn->to << exportMagic << printStorePath(info.path); - writeStorePaths(*this, conn->to, info.references); + worker_proto::write(*this, conn->to, info.references); conn->to << (info.deriver ? printStorePath(*info.deriver) : "") << 0 @@ -301,10 +301,10 @@ public: conn->to << cmdQueryClosure << includeOutputs; - writeStorePaths(*this, conn->to, paths); + worker_proto::write(*this, conn->to, paths); conn->to.flush(); - for (auto & i : readStorePaths(*this, conn->from)) + for (auto & i : worker_proto::read(*this, conn->from, Phantom {})) out.insert(i); } @@ -317,10 +317,10 @@ public: << cmdQueryValidPaths << false // lock << maybeSubstitute; - writeStorePaths(*this, conn->to, paths); + worker_proto::write(*this, conn->to, paths); conn->to.flush(); - return readStorePaths(*this, conn->from); + return worker_proto::read(*this, conn->from, Phantom {}); } void connect() override @@ -335,6 +335,6 @@ public: } }; -static RegisterStoreImplementation regStore; +static RegisterStoreImplementation regLegacySSHStore; } diff --git a/src/libstore/local-binary-cache-store.cc b/src/libstore/local-binary-cache-store.cc index b5744448e..7d979c5c2 100644 --- a/src/libstore/local-binary-cache-store.cc +++ b/src/libstore/local-binary-cache-store.cc @@ -105,6 +105,6 @@ std::set LocalBinaryCacheStore::uriSchemes() return {"file"}; } -static RegisterStoreImplementation regStore; +static RegisterStoreImplementation regLocalBinaryCacheStore; } diff --git a/src/libstore/local-fs-store.cc b/src/libstore/local-fs-store.cc index 2f1d9663a..e7c3dae92 100644 --- a/src/libstore/local-fs-store.cc +++ b/src/libstore/local-fs-store.cc @@ -1,6 +1,7 @@ #include "archive.hh" #include "fs-accessor.hh" #include "store-api.hh" +#include "local-fs-store.hh" #include "globals.hh" #include "compression.hh" #include "derivations.hh" diff --git a/src/libstore/local-fs-store.hh b/src/libstore/local-fs-store.hh new file mode 100644 index 000000000..8eccd8236 --- /dev/null +++ b/src/libstore/local-fs-store.hh @@ -0,0 +1,48 @@ +#pragma once + +#include "store-api.hh" + +namespace nix { + +struct LocalFSStoreConfig : virtual StoreConfig +{ + using StoreConfig::StoreConfig; + // FIXME: the (StoreConfig*) cast works around a bug in gcc that causes + // it to omit the call to the Setting constructor. Clang works fine + // either way. + const PathSetting rootDir{(StoreConfig*) this, true, "", + "root", "directory prefixed to all other paths"}; + const PathSetting stateDir{(StoreConfig*) this, false, + rootDir != "" ? rootDir + "/nix/var/nix" : settings.nixStateDir, + "state", "directory where Nix will store state"}; + const PathSetting logDir{(StoreConfig*) this, false, + rootDir != "" ? rootDir + "/nix/var/log/nix" : settings.nixLogDir, + "log", "directory where Nix will store state"}; +}; + +class LocalFSStore : public virtual Store, public virtual LocalFSStoreConfig +{ +public: + + const static string drvsLogDir; + + LocalFSStore(const Params & params); + + void narFromPath(const StorePath & path, Sink & sink) override; + ref getFSAccessor() override; + + /* Register a permanent GC root. */ + Path addPermRoot(const StorePath & storePath, const Path & gcRoot); + + virtual Path getRealStoreDir() { return storeDir; } + + Path toRealPath(const Path & storePath) override + { + assert(isInStore(storePath)); + return getRealStoreDir() + "/" + std::string(storePath, storeDir.size() + 1); + } + + std::shared_ptr getBuildLog(const StorePath & path) override; +}; + +} diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc index ee997ef3a..d29236a9c 100644 --- a/src/libstore/local-store.cc +++ b/src/libstore/local-store.cc @@ -721,7 +721,7 @@ uint64_t LocalStore::queryValidPathId(State & state, const StorePath & path) { auto use(state.stmtQueryPathInfo.use()(printStorePath(path))); if (!use.next()) - throw Error("path '%s' is not valid", printStorePath(path)); + throw InvalidPath("path '%s' is not valid", printStorePath(path)); return use.getInt(0); } @@ -796,18 +796,58 @@ StorePathSet LocalStore::queryValidDerivers(const StorePath & path) } -std::map> LocalStore::queryPartialDerivationOutputMap(const StorePath & path) +std::map> LocalStore::queryPartialDerivationOutputMap(const StorePath & path_) { + auto path = path_; std::map> outputs; - BasicDerivation drv = readDerivation(path); + Derivation drv = readDerivation(path); for (auto & [outName, _] : drv.outputs) { outputs.insert_or_assign(outName, std::nullopt); } + bool haveCached = false; + { + auto resolutions = drvPathResolutions.lock(); + auto resolvedPathOptIter = resolutions->find(path); + if (resolvedPathOptIter != resolutions->end()) { + auto & [_, resolvedPathOpt] = *resolvedPathOptIter; + if (resolvedPathOpt) + path = *resolvedPathOpt; + haveCached = true; + } + } + /* can't just use else-if instead of `!haveCached` because we need to unlock + `drvPathResolutions` before it is locked in `Derivation::resolve`. */ + if (!haveCached && drv.type() == DerivationType::CAFloating) { + /* Try resolve drv and use that path instead. */ + auto attempt = drv.tryResolve(*this); + if (!attempt) + /* If we cannot resolve the derivation, we cannot have any path + assigned so we return the map of all std::nullopts. */ + return outputs; + /* Just compute store path */ + auto pathResolved = writeDerivation(*this, *std::move(attempt), NoRepair, true); + /* Store in memo table. */ + /* FIXME: memo logic should not be local-store specific, should have + wrapper-method instead. */ + drvPathResolutions.lock()->insert_or_assign(path, pathResolved); + path = std::move(pathResolved); + } return retrySQLite>>([&]() { auto state(_state.lock()); - auto useQueryDerivationOutputs(state->stmtQueryDerivationOutputs.use() - (queryValidPathId(*state, path))); + uint64_t drvId; + try { + drvId = queryValidPathId(*state, path); + } catch (InvalidPath &) { + /* FIXME? if the derivation doesn't exist, we cannot have a mapping + for it. */ + return outputs; + } + + auto useQueryDerivationOutputs { + state->stmtQueryDerivationOutputs.use() + (drvId) + }; while (useQueryDerivationOutputs.next()) outputs.insert_or_assign( diff --git a/src/libstore/local-store.hh b/src/libstore/local-store.hh index e7c9d1605..f1e2ab7f9 100644 --- a/src/libstore/local-store.hh +++ b/src/libstore/local-store.hh @@ -4,6 +4,7 @@ #include "pathlocks.hh" #include "store-api.hh" +#include "local-fs-store.hh" #include "sync.hh" #include "util.hh" diff --git a/src/libstore/local.mk b/src/libstore/local.mk index d266c8efe..dfe1e2cc4 100644 --- a/src/libstore/local.mk +++ b/src/libstore/local.mk @@ -4,7 +4,7 @@ libstore_NAME = libnixstore libstore_DIR := $(d) -libstore_SOURCES := $(wildcard $(d)/*.cc $(d)/builtins/*.cc) +libstore_SOURCES := $(wildcard $(d)/*.cc $(d)/builtins/*.cc $(d)/build/*.cc) libstore_LIBS = libutil @@ -32,7 +32,7 @@ ifeq ($(HAVE_SECCOMP), 1) endif libstore_CXXFLAGS += \ - -I src/libutil -I src/libstore \ + -I src/libutil -I src/libstore -I src/libstore/build \ -DNIX_PREFIX=\"$(prefix)\" \ -DNIX_STORE_DIR=\"$(storedir)\" \ -DNIX_DATA_DIR=\"$(datadir)\" \ @@ -64,3 +64,6 @@ $(eval $(call install-file-in, $(d)/nix-store.pc, $(prefix)/lib/pkgconfig, 0644) $(foreach i, $(wildcard src/libstore/builtins/*.hh), \ $(eval $(call install-file-in, $(i), $(includedir)/nix/builtins, 0644))) + +$(foreach i, $(wildcard src/libstore/build/*.hh), \ + $(eval $(call install-file-in, $(i), $(includedir)/nix/build, 0644))) diff --git a/src/libstore/lock.cc b/src/libstore/lock.cc new file mode 100644 index 000000000..f1356fdca --- /dev/null +++ b/src/libstore/lock.cc @@ -0,0 +1,93 @@ +#include "lock.hh" +#include "globals.hh" +#include "pathlocks.hh" + +#include +#include + +#include +#include + +namespace nix { + +UserLock::UserLock() +{ + assert(settings.buildUsersGroup != ""); + createDirs(settings.nixStateDir + "/userpool"); +} + +bool UserLock::findFreeUser() { + if (enabled()) return true; + + /* Get the members of the build-users-group. */ + struct group * gr = getgrnam(settings.buildUsersGroup.get().c_str()); + if (!gr) + throw Error("the group '%1%' specified in 'build-users-group' does not exist", + settings.buildUsersGroup); + gid = gr->gr_gid; + + /* Copy the result of getgrnam. */ + Strings users; + for (char * * p = gr->gr_mem; *p; ++p) { + debug("found build user '%1%'", *p); + users.push_back(*p); + } + + if (users.empty()) + throw Error("the build users group '%1%' has no members", + settings.buildUsersGroup); + + /* Find a user account that isn't currently in use for another + build. */ + for (auto & i : users) { + debug("trying user '%1%'", i); + + struct passwd * pw = getpwnam(i.c_str()); + if (!pw) + throw Error("the user '%1%' in the group '%2%' does not exist", + i, settings.buildUsersGroup); + + + fnUserLock = (format("%1%/userpool/%2%") % settings.nixStateDir % pw->pw_uid).str(); + + AutoCloseFD fd = open(fnUserLock.c_str(), O_RDWR | O_CREAT | O_CLOEXEC, 0600); + if (!fd) + throw SysError("opening user lock '%1%'", fnUserLock); + + if (lockFile(fd.get(), ltWrite, false)) { + fdUserLock = std::move(fd); + user = i; + uid = pw->pw_uid; + + /* Sanity check... */ + if (uid == getuid() || uid == geteuid()) + throw Error("the Nix user should not be a member of '%1%'", + settings.buildUsersGroup); + +#if __linux__ + /* Get the list of supplementary groups of this build user. This + is usually either empty or contains a group such as "kvm". */ + supplementaryGIDs.resize(10); + int ngroups = supplementaryGIDs.size(); + int err = getgrouplist(pw->pw_name, pw->pw_gid, + supplementaryGIDs.data(), &ngroups); + if (err == -1) + throw Error("failed to get list of supplementary groups for '%1%'", pw->pw_name); + + supplementaryGIDs.resize(ngroups); +#endif + + isEnabled = true; + return true; + } + } + + return false; +} + +void UserLock::kill() +{ + killUser(uid); +} + +} diff --git a/src/libstore/lock.hh b/src/libstore/lock.hh new file mode 100644 index 000000000..8fbb67ddc --- /dev/null +++ b/src/libstore/lock.hh @@ -0,0 +1,37 @@ +#pragma once + +#include "sync.hh" +#include "types.hh" +#include "util.hh" + +namespace nix { + +class UserLock +{ +private: + Path fnUserLock; + AutoCloseFD fdUserLock; + + bool isEnabled = false; + string user; + uid_t uid = 0; + gid_t gid = 0; + std::vector supplementaryGIDs; + +public: + UserLock(); + + void kill(); + + string getUser() { return user; } + uid_t getUID() { assert(uid); return uid; } + uid_t getGID() { assert(gid); return gid; } + std::vector getSupplementaryGIDs() { return supplementaryGIDs; } + + bool findFreeUser(); + + bool enabled() { return isEnabled; } + +}; + +} diff --git a/src/libstore/optimise-store.cc b/src/libstore/optimise-store.cc index c032a5e22..a0d482ddf 100644 --- a/src/libstore/optimise-store.cc +++ b/src/libstore/optimise-store.cc @@ -276,21 +276,15 @@ void LocalStore::optimiseStore(OptimiseStats & stats) } } -static string showBytes(uint64_t bytes) -{ - return (format("%.2f MiB") % (bytes / (1024.0 * 1024.0))).str(); -} - void LocalStore::optimiseStore() { OptimiseStats stats; optimiseStore(stats); - printInfo( - format("%1% freed by hard-linking %2% files") - % showBytes(stats.bytesFreed) - % stats.filesLinked); + printInfo("%s freed by hard-linking %d files", + showBytes(stats.bytesFreed), + stats.filesLinked); } void LocalStore::optimisePath(const Path & path) diff --git a/src/libstore/parsed-derivations.hh b/src/libstore/parsed-derivations.hh index 3fa09f34f..c9fbe68c4 100644 --- a/src/libstore/parsed-derivations.hh +++ b/src/libstore/parsed-derivations.hh @@ -1,3 +1,5 @@ +#pragma once + #include "store-api.hh" #include diff --git a/src/libstore/profiles.cc b/src/libstore/profiles.cc index c3809bad7..ed10dd519 100644 --- a/src/libstore/profiles.cc +++ b/src/libstore/profiles.cc @@ -1,5 +1,6 @@ #include "profiles.hh" #include "store-api.hh" +#include "local-fs-store.hh" #include "util.hh" #include diff --git a/src/libstore/remote-store.cc b/src/libstore/remote-store.cc index be5eb4736..488270f48 100644 --- a/src/libstore/remote-store.cc +++ b/src/libstore/remote-store.cc @@ -12,59 +12,21 @@ #include "logging.hh" #include "callback.hh" -#include -#include -#include -#include -#include -#include -#include - -#include - namespace nix { - -template<> StorePathSet readStorePaths(const Store & store, Source & from) -{ - StorePathSet paths; - for (auto & i : readStrings(from)) - paths.insert(store.parseStorePath(i)); - return paths; -} - -void writeStorePaths(const Store & store, Sink & out, const StorePathSet & paths) -{ - out << paths.size(); - for (auto & i : paths) - out << store.printStorePath(i); -} - - -StorePathCAMap readStorePathCAMap(const Store & store, Source & from) -{ - StorePathCAMap paths; - auto count = readNum(from); - while (count--) { - auto path = store.parseStorePath(readString(from)); - auto ca = parseContentAddressOpt(readString(from)); - paths.insert_or_assign(path, ca); - } - return paths; -} - -void writeStorePathCAMap(const Store & store, Sink & out, const StorePathCAMap & paths) -{ - out << paths.size(); - for (auto & i : paths) { - out << store.printStorePath(i.first); - out << renderContentAddress(i.second); - } -} - - namespace worker_proto { +std::string read(const Store & store, Source & from, Phantom _) +{ + return readString(from); +} + +void write(const Store & store, Sink & out, const std::string & str) +{ + out << str; +} + + StorePath read(const Store & store, Source & from, Phantom _) { return store.parseStorePath(readString(from)); @@ -76,19 +38,39 @@ void write(const Store & store, Sink & out, const StorePath & storePath) } -template<> +ContentAddress read(const Store & store, Source & from, Phantom _) +{ + return parseContentAddress(readString(from)); +} + +void write(const Store & store, Sink & out, const ContentAddress & ca) +{ + out << renderContentAddress(ca); +} + + std::optional read(const Store & store, Source & from, Phantom> _) { auto s = readString(from); return s == "" ? std::optional {} : store.parseStorePath(s); } -template<> void write(const Store & store, Sink & out, const std::optional & storePathOpt) { out << (storePathOpt ? store.printStorePath(*storePathOpt) : ""); } + +std::optional read(const Store & store, Source & from, Phantom> _) +{ + return parseContentAddressOpt(readString(from)); +} + +void write(const Store & store, Sink & out, const std::optional & caOpt) +{ + out << (caOpt ? renderContentAddress(*caOpt) : ""); +} + } @@ -133,69 +115,6 @@ ref RemoteStore::openConnectionWrapper() } -UDSRemoteStore::UDSRemoteStore(const Params & params) - : StoreConfig(params) - , Store(params) - , LocalFSStore(params) - , RemoteStore(params) -{ -} - - -UDSRemoteStore::UDSRemoteStore( - const std::string scheme, - std::string socket_path, - const Params & params) - : UDSRemoteStore(params) -{ - path.emplace(socket_path); -} - - -std::string UDSRemoteStore::getUri() -{ - if (path) { - return std::string("unix://") + *path; - } else { - return "daemon"; - } -} - - -ref UDSRemoteStore::openConnection() -{ - auto conn = make_ref(); - - /* Connect to a daemon that does the privileged work for us. */ - conn->fd = socket(PF_UNIX, SOCK_STREAM - #ifdef SOCK_CLOEXEC - | SOCK_CLOEXEC - #endif - , 0); - if (!conn->fd) - throw SysError("cannot create Unix domain socket"); - closeOnExec(conn->fd.get()); - - string socketPath = path ? *path : settings.nixDaemonSocketFile; - - struct sockaddr_un addr; - addr.sun_family = AF_UNIX; - if (socketPath.size() + 1 >= sizeof(addr.sun_path)) - throw Error("socket path '%1%' is too long", socketPath); - strcpy(addr.sun_path, socketPath.c_str()); - - if (::connect(conn->fd.get(), (struct sockaddr *) &addr, sizeof(addr)) == -1) - throw SysError("cannot connect to daemon at '%1%'", socketPath); - - conn->from.fd = conn->fd.get(); - conn->to.fd = conn->fd.get(); - - conn->startTime = std::chrono::steady_clock::now(); - - return conn; -} - - void RemoteStore::initConnection(Connection & conn) { /* Send the magic greeting, check for the reply. */ @@ -337,9 +256,9 @@ StorePathSet RemoteStore::queryValidPaths(const StorePathSet & paths, Substitute return res; } else { conn->to << wopQueryValidPaths; - writeStorePaths(*this, conn->to, paths); + worker_proto::write(*this, conn->to, paths); conn.processStderr(); - return readStorePaths(*this, conn->from); + return worker_proto::read(*this, conn->from, Phantom {}); } } @@ -349,7 +268,7 @@ StorePathSet RemoteStore::queryAllValidPaths() auto conn(getConnection()); conn->to << wopQueryAllValidPaths; conn.processStderr(); - return readStorePaths(*this, conn->from); + return worker_proto::read(*this, conn->from, Phantom {}); } @@ -366,9 +285,9 @@ StorePathSet RemoteStore::querySubstitutablePaths(const StorePathSet & paths) return res; } else { conn->to << wopQuerySubstitutablePaths; - writeStorePaths(*this, conn->to, paths); + worker_proto::write(*this, conn->to, paths); conn.processStderr(); - return readStorePaths(*this, conn->from); + return worker_proto::read(*this, conn->from, Phantom {}); } } @@ -390,7 +309,7 @@ void RemoteStore::querySubstitutablePathInfos(const StorePathCAMap & pathsMap, S auto deriver = readString(conn->from); if (deriver != "") info.deriver = parseStorePath(deriver); - info.references = readStorePaths(*this, conn->from); + info.references = worker_proto::read(*this, conn->from, Phantom {}); info.downloadSize = readLongLong(conn->from); info.narSize = readLongLong(conn->from); infos.insert_or_assign(i.first, std::move(info)); @@ -403,9 +322,9 @@ void RemoteStore::querySubstitutablePathInfos(const StorePathCAMap & pathsMap, S StorePathSet paths; for (auto & path : pathsMap) paths.insert(path.first); - writeStorePaths(*this, conn->to, paths); + worker_proto::write(*this, conn->to, paths); } else - writeStorePathCAMap(*this, conn->to, pathsMap); + worker_proto::write(*this, conn->to, pathsMap); conn.processStderr(); size_t count = readNum(conn->from); for (size_t n = 0; n < count; n++) { @@ -413,7 +332,7 @@ void RemoteStore::querySubstitutablePathInfos(const StorePathCAMap & pathsMap, S auto deriver = readString(conn->from); if (deriver != "") info.deriver = parseStorePath(deriver); - info.references = readStorePaths(*this, conn->from); + info.references = worker_proto::read(*this, conn->from, Phantom {}); info.downloadSize = readLongLong(conn->from); info.narSize = readLongLong(conn->from); } @@ -428,7 +347,7 @@ ref RemoteStore::readValidPathInfo(ConnectionHandle & conn, auto narHash = Hash::parseAny(readString(conn->from), htSHA256); auto info = make_ref(path, narHash); if (deriver != "") info->deriver = parseStorePath(deriver); - info->references = readStorePaths(*this, conn->from); + info->references = worker_proto::read(*this, conn->from, Phantom {}); conn->from >> info->registrationTime >> info->narSize; if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 16) { conn->from >> info->ultimate; @@ -472,7 +391,7 @@ void RemoteStore::queryReferrers(const StorePath & path, auto conn(getConnection()); conn->to << wopQueryReferrers << printStorePath(path); conn.processStderr(); - for (auto & i : readStorePaths(*this, conn->from)) + for (auto & i : worker_proto::read(*this, conn->from, Phantom {})) referrers.insert(i); } @@ -482,7 +401,7 @@ StorePathSet RemoteStore::queryValidDerivers(const StorePath & path) auto conn(getConnection()); conn->to << wopQueryValidDerivers << printStorePath(path); conn.processStderr(); - return readStorePaths(*this, conn->from); + return worker_proto::read(*this, conn->from, Phantom {}); } @@ -494,7 +413,7 @@ StorePathSet RemoteStore::queryDerivationOutputs(const StorePath & path) } conn->to << wopQueryDerivationOutputs << printStorePath(path); conn.processStderr(); - return readStorePaths(*this, conn->from); + return worker_proto::read(*this, conn->from, Phantom {}); } @@ -520,7 +439,6 @@ std::map> RemoteStore::queryPartialDerivat } return ret; } - } std::optional RemoteStore::queryPathFromHashPart(const std::string & hashPart) @@ -550,7 +468,7 @@ ref RemoteStore::addCAToStore( << wopAddToStore << name << renderContentAddressMethod(caMethod); - writeStorePaths(*this, conn->to, references); + worker_proto::write(*this, conn->to, references); conn->to << repair; conn.withFramedSink([&](Sink & sink) { @@ -567,7 +485,7 @@ ref RemoteStore::addCAToStore( [&](TextHashMethod thm) -> void { std::string s = dump.drain(); conn->to << wopAddTextToStore << name << s; - writeStorePaths(*this, conn->to, references); + worker_proto::write(*this, conn->to, references); conn.processStderr(); }, [&](FixedOutputHashMethod fohm) -> void { @@ -636,7 +554,7 @@ void RemoteStore::addToStore(const ValidPathInfo & info, Source & source, sink << exportMagic << printStorePath(info.path); - writeStorePaths(*this, sink, info.references); + worker_proto::write(*this, sink, info.references); sink << (info.deriver ? printStorePath(*info.deriver) : "") << 0 // == no legacy signature @@ -646,7 +564,7 @@ void RemoteStore::addToStore(const ValidPathInfo & info, Source & source, conn.processStderr(0, source2.get()); - auto importedPaths = readStorePaths(*this, conn->from); + auto importedPaths = worker_proto::read(*this, conn->from, Phantom {}); assert(importedPaths.size() <= 1); } @@ -655,7 +573,7 @@ void RemoteStore::addToStore(const ValidPathInfo & info, Source & source, << printStorePath(info.path) << (info.deriver ? printStorePath(*info.deriver) : "") << info.narHash.to_string(Base16, false); - writeStorePaths(*this, conn->to, info.references); + worker_proto::write(*this, conn->to, info.references); conn->to << info.registrationTime << info.narSize << info.ultimate << info.sigs << renderContentAddress(info.ca) << repair << !checkSigs; @@ -777,7 +695,7 @@ void RemoteStore::collectGarbage(const GCOptions & options, GCResults & results) conn->to << wopCollectGarbage << options.action; - writeStorePaths(*this, conn->to, options.pathsToDelete); + worker_proto::write(*this, conn->to, options.pathsToDelete); conn->to << options.ignoreLiveness << options.maxFreed /* removed options */ @@ -839,9 +757,9 @@ void RemoteStore::queryMissing(const std::vector & targets ss.push_back(p.to_string(*this)); conn->to << ss; conn.processStderr(); - willBuild = readStorePaths(*this, conn->from); - willSubstitute = readStorePaths(*this, conn->from); - unknown = readStorePaths(*this, conn->from); + willBuild = worker_proto::read(*this, conn->from, Phantom {}); + willSubstitute = worker_proto::read(*this, conn->from, Phantom {}); + unknown = worker_proto::read(*this, conn->from, Phantom {}); conn->from >> downloadSize >> narSize; return; } @@ -934,9 +852,13 @@ std::exception_ptr RemoteStore::Connection::processStderr(Sink * sink, Source * } else if (msg == STDERR_ERROR) { - string error = readString(from); - unsigned int status = readInt(from); - return std::make_exception_ptr(Error(status, error)); + if (GET_PROTOCOL_MINOR(daemonVersion) >= 26) { + return std::make_exception_ptr(readError(from)); + } else { + string error = readString(from); + unsigned int status = readInt(from); + return std::make_exception_ptr(Error(status, error)); + } } else if (msg == STDERR_NEXT) @@ -1017,6 +939,4 @@ void ConnectionHandle::withFramedSink(std::function fun) } -static RegisterStoreImplementation regStore; - } diff --git a/src/libstore/remote-store.hh b/src/libstore/remote-store.hh index ec04be985..9f78fcb02 100644 --- a/src/libstore/remote-store.hh +++ b/src/libstore/remote-store.hh @@ -155,49 +155,5 @@ private: }; -struct UDSRemoteStoreConfig : virtual LocalFSStoreConfig, virtual RemoteStoreConfig -{ - UDSRemoteStoreConfig(const Store::Params & params) - : StoreConfig(params) - , LocalFSStoreConfig(params) - , RemoteStoreConfig(params) - { - } - - UDSRemoteStoreConfig() - : UDSRemoteStoreConfig(Store::Params({})) - { - } - - const std::string name() override { return "Local Daemon Store"; } -}; - -class UDSRemoteStore : public LocalFSStore, public RemoteStore, public virtual UDSRemoteStoreConfig -{ -public: - - UDSRemoteStore(const Params & params); - UDSRemoteStore(const std::string scheme, std::string path, const Params & params); - - std::string getUri() override; - - static std::set uriSchemes() - { return {"unix"}; } - - bool sameMachine() override - { return true; } - - ref getFSAccessor() override - { return LocalFSStore::getFSAccessor(); } - - void narFromPath(const StorePath & path, Sink & sink) override - { LocalFSStore::narFromPath(path, sink); } - -private: - - ref openConnection() override; - std::optional path; -}; - } diff --git a/src/libstore/s3-binary-cache-store.cc b/src/libstore/s3-binary-cache-store.cc index d43f267e0..552c4aac7 100644 --- a/src/libstore/s3-binary-cache-store.cc +++ b/src/libstore/s3-binary-cache-store.cc @@ -439,7 +439,7 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore, virtual S3BinaryCache }; -static RegisterStoreImplementation regStore; +static RegisterStoreImplementation regS3BinaryCacheStore; } diff --git a/src/libstore/ssh-store.cc b/src/libstore/ssh-store.cc index 6d6eca98d..08d0bd565 100644 --- a/src/libstore/ssh-store.cc +++ b/src/libstore/ssh-store.cc @@ -83,6 +83,6 @@ ref SSHStore::openConnection() return conn; } -static RegisterStoreImplementation regStore; +static RegisterStoreImplementation regSSHStore; } diff --git a/src/libstore/store-api.cc b/src/libstore/store-api.cc index 1bbc74db8..9f21f0434 100644 --- a/src/libstore/store-api.cc +++ b/src/libstore/store-api.cc @@ -1011,7 +1011,7 @@ Derivation Store::readDerivation(const StorePath & drvPath) #include "local-store.hh" -#include "remote-store.hh" +#include "uds-remote-store.hh" namespace nix { diff --git a/src/libstore/store-api.hh b/src/libstore/store-api.hh index 591140874..f77bc21d1 100644 --- a/src/libstore/store-api.hh +++ b/src/libstore/store-api.hh @@ -194,6 +194,8 @@ struct StoreConfig : public Config */ StoreConfig() { assert(false); } + virtual ~StoreConfig() { } + virtual const std::string name() = 0; const PathSetting storeDir_{this, false, settings.nixStore, @@ -454,9 +456,7 @@ public: // FIXME: remove? virtual StorePath addToStoreFromDump(Source & dump, const string & name, FileIngestionMethod method = FileIngestionMethod::Recursive, HashType hashAlgo = htSHA256, RepairFlag repair = NoRepair) - { - throw Error("addToStoreFromDump() is not supported by this store"); - } + { unsupported("addToStoreFromDump"); } /* Like addToStore, but the contents written to the output path is a regular file containing the given string. */ @@ -479,8 +479,38 @@ public: BuildMode buildMode = bmNormal); /* Build a single non-materialized derivation (i.e. not from an - on-disk .drv file). Note that ‘drvPath’ is only used for - informational purposes. */ + on-disk .drv file). + + ‘drvPath’ is used to deduplicate worker goals so it is imperative that + is correct. That said, it doesn't literally need to be store path that + would be calculated from writing this derivation to the store: it is OK + if it instead is that of a Derivation which would resolve to this (by + taking the outputs of it's input derivations and adding them as input + sources) such that the build time referenceable-paths are the same. + + In the input-addressed case, we usually *do* use an "original" + unresolved derivations's path, as that is what will be used in the + `buildPaths` case. Also, the input-addressed output paths are verified + only by that contents of that specific unresolved derivation, so it is + nice to keep that information around so if the original derivation is + ever obtained later, it can be verified whether the trusted user in fact + used the proper output path. + + In the content-addressed case, we want to always use the + resolved drv path calculated from the provided derivation. This serves + two purposes: + + - It keeps the operation trustless, by ruling out a maliciously + invalid drv path corresponding to a non-resolution-equivalent + derivation. + + - For the floating case in particular, it ensures that the derivation + to output mapping respects the resolution equivalence relation, so + one cannot choose different resolution-equivalent derivations to + subvert dependency coherence (i.e. the property that one doesn't end + up with multiple different versions of dependencies without + explicitly choosing to allow it). + */ virtual BuildResult buildDerivation(const StorePath & drvPath, const BasicDerivation & drv, BuildMode buildMode = bmNormal) = 0; @@ -517,7 +547,7 @@ public: - The collector isn't running, or it's just started but hasn't acquired the GC lock yet. In that case we get and release the lock right away, then exit. The collector scans the - permanent root and sees our's. + permanent root and sees ours. In either case the permanent root is seen by the collector. */ virtual void syncWithGC() { }; @@ -685,47 +715,6 @@ protected: }; -struct LocalFSStoreConfig : virtual StoreConfig -{ - using StoreConfig::StoreConfig; - // FIXME: the (StoreConfig*) cast works around a bug in gcc that causes - // it to omit the call to the Setting constructor. Clang works fine - // either way. - const PathSetting rootDir{(StoreConfig*) this, true, "", - "root", "directory prefixed to all other paths"}; - const PathSetting stateDir{(StoreConfig*) this, false, - rootDir != "" ? rootDir + "/nix/var/nix" : settings.nixStateDir, - "state", "directory where Nix will store state"}; - const PathSetting logDir{(StoreConfig*) this, false, - rootDir != "" ? rootDir + "/nix/var/log/nix" : settings.nixLogDir, - "log", "directory where Nix will store state"}; -}; - -class LocalFSStore : public virtual Store, public virtual LocalFSStoreConfig -{ -public: - - const static string drvsLogDir; - - LocalFSStore(const Params & params); - - void narFromPath(const StorePath & path, Sink & sink) override; - ref getFSAccessor() override; - - /* Register a permanent GC root. */ - Path addPermRoot(const StorePath & storePath, const Path & gcRoot); - - virtual Path getRealStoreDir() { return storeDir; } - - Path toRealPath(const Path & storePath) override - { - assert(isInStore(storePath)); - return getRealStoreDir() + "/" + std::string(storePath, storeDir.size() + 1); - } - - std::shared_ptr getBuildLog(const StorePath & path) override; -}; - /* Copy a path from one store to another. */ void copyStorePath(ref srcStore, ref dstStore, @@ -802,6 +791,7 @@ struct StoreFactory std::function (const std::string & scheme, const std::string & uri, const Store::Params & params)> create; std::function ()> getConfig; }; + struct Implementations { static std::vector * registered; diff --git a/src/libstore/uds-remote-store.cc b/src/libstore/uds-remote-store.cc new file mode 100644 index 000000000..24f3e9c6d --- /dev/null +++ b/src/libstore/uds-remote-store.cc @@ -0,0 +1,81 @@ +#include "uds-remote-store.hh" + +#include +#include +#include +#include +#include +#include +#include + +#include + + +namespace nix { + +UDSRemoteStore::UDSRemoteStore(const Params & params) + : StoreConfig(params) + , Store(params) + , LocalFSStore(params) + , RemoteStore(params) +{ +} + + +UDSRemoteStore::UDSRemoteStore( + const std::string scheme, + std::string socket_path, + const Params & params) + : UDSRemoteStore(params) +{ + path.emplace(socket_path); +} + + +std::string UDSRemoteStore::getUri() +{ + if (path) { + return std::string("unix://") + *path; + } else { + return "daemon"; + } +} + + +ref UDSRemoteStore::openConnection() +{ + auto conn = make_ref(); + + /* Connect to a daemon that does the privileged work for us. */ + conn->fd = socket(PF_UNIX, SOCK_STREAM + #ifdef SOCK_CLOEXEC + | SOCK_CLOEXEC + #endif + , 0); + if (!conn->fd) + throw SysError("cannot create Unix domain socket"); + closeOnExec(conn->fd.get()); + + string socketPath = path ? *path : settings.nixDaemonSocketFile; + + struct sockaddr_un addr; + addr.sun_family = AF_UNIX; + if (socketPath.size() + 1 >= sizeof(addr.sun_path)) + throw Error("socket path '%1%' is too long", socketPath); + strcpy(addr.sun_path, socketPath.c_str()); + + if (::connect(conn->fd.get(), (struct sockaddr *) &addr, sizeof(addr)) == -1) + throw SysError("cannot connect to daemon at '%1%'", socketPath); + + conn->from.fd = conn->fd.get(); + conn->to.fd = conn->fd.get(); + + conn->startTime = std::chrono::steady_clock::now(); + + return conn; +} + + +static RegisterStoreImplementation regUDSRemoteStore; + +} diff --git a/src/libstore/uds-remote-store.hh b/src/libstore/uds-remote-store.hh new file mode 100644 index 000000000..e5de104c9 --- /dev/null +++ b/src/libstore/uds-remote-store.hh @@ -0,0 +1,52 @@ +#pragma once + +#include "remote-store.hh" +#include "local-fs-store.hh" + +namespace nix { + +struct UDSRemoteStoreConfig : virtual LocalFSStoreConfig, virtual RemoteStoreConfig +{ + UDSRemoteStoreConfig(const Store::Params & params) + : StoreConfig(params) + , LocalFSStoreConfig(params) + , RemoteStoreConfig(params) + { + } + + UDSRemoteStoreConfig() + : UDSRemoteStoreConfig(Store::Params({})) + { + } + + const std::string name() override { return "Local Daemon Store"; } +}; + +class UDSRemoteStore : public LocalFSStore, public RemoteStore, public virtual UDSRemoteStoreConfig +{ +public: + + UDSRemoteStore(const Params & params); + UDSRemoteStore(const std::string scheme, std::string path, const Params & params); + + std::string getUri() override; + + static std::set uriSchemes() + { return {"unix"}; } + + bool sameMachine() override + { return true; } + + ref getFSAccessor() override + { return LocalFSStore::getFSAccessor(); } + + void narFromPath(const StorePath & path, Sink & sink) override + { LocalFSStore::narFromPath(path, sink); } + +private: + + ref openConnection() override; + std::optional path; +}; + +} diff --git a/src/libstore/worker-protocol.hh b/src/libstore/worker-protocol.hh index b100d1550..b3705578e 100644 --- a/src/libstore/worker-protocol.hh +++ b/src/libstore/worker-protocol.hh @@ -6,7 +6,7 @@ namespace nix { #define WORKER_MAGIC_1 0x6e697863 #define WORKER_MAGIC_2 0x6478696f -#define PROTOCOL_VERSION 0x119 +#define PROTOCOL_VERSION 0x11a #define GET_PROTOCOL_MAJOR(x) ((x) & 0xff00) #define GET_PROTOCOL_MINOR(x) ((x) & 0x00ff) @@ -66,10 +66,6 @@ typedef enum { class Store; struct Source; -template T readStorePaths(const Store & store, Source & from); - -void writeStorePaths(const Store & store, Sink & out, const StorePathSet & paths); - /* To guide overloading */ template struct Phantom {}; @@ -78,76 +74,81 @@ struct Phantom {}; namespace worker_proto { /* FIXME maybe move more stuff inside here */ -StorePath read(const Store & store, Source & from, Phantom _); -void write(const Store & store, Sink & out, const StorePath & storePath); +#define MAKE_WORKER_PROTO(TEMPLATE, T) \ + TEMPLATE T read(const Store & store, Source & from, Phantom< T > _); \ + TEMPLATE void write(const Store & store, Sink & out, const T & str) -template -std::map read(const Store & store, Source & from, Phantom> _); -template -void write(const Store & store, Sink & out, const std::map & resMap); -template -std::optional read(const Store & store, Source & from, Phantom> _); -template -void write(const Store & store, Sink & out, const std::optional & optVal); +MAKE_WORKER_PROTO(, std::string); +MAKE_WORKER_PROTO(, StorePath); +MAKE_WORKER_PROTO(, ContentAddress); -/* Specialization which uses and empty string for the empty case, taking - advantage of the fact StorePaths always serialize to a non-empty string. - This is done primarily for backwards compatability, so that StorePath <= - std::optional, where <= is the compatability partial order. +MAKE_WORKER_PROTO(template, std::set); + +#define X_ template +#define Y_ std::map +MAKE_WORKER_PROTO(X_, Y_); +#undef X_ +#undef Y_ + +/* These use the empty string for the null case, relying on the fact + that the underlying types never serialize to the empty string. + + We do this instead of a generic std::optional instance because + ordinal tags (0 or 1, here) are a bit of a compatability hazard. For + the same reason, we don't have a std::variant instances (ordinal + tags 0...n). + + We could the generic instances and then these as specializations for + compatability, but that's proven a bit finnicky, and also makes the + worker protocol harder to implement in other languages where such + specializations may not be allowed. */ -template<> -void write(const Store & store, Sink & out, const std::optional & optVal); +MAKE_WORKER_PROTO(, std::optional); +MAKE_WORKER_PROTO(, std::optional); template -std::map read(const Store & store, Source & from, Phantom> _) +std::set read(const Store & store, Source & from, Phantom> _) { - std::map resMap; - auto size = (size_t)readInt(from); + std::set resSet; + auto size = readNum(from); while (size--) { - auto thisKey = readString(from); - resMap.insert_or_assign(std::move(thisKey), nix::worker_proto::read(store, from, Phantom {})); + resSet.insert(read(store, from, Phantom {})); + } + return resSet; +} + +template +void write(const Store & store, Sink & out, const std::set & resSet) +{ + out << resSet.size(); + for (auto & key : resSet) { + write(store, out, key); + } +} + +template +std::map read(const Store & store, Source & from, Phantom> _) +{ + std::map resMap; + auto size = readNum(from); + while (size--) { + auto k = read(store, from, Phantom {}); + auto v = read(store, from, Phantom {}); + resMap.insert_or_assign(std::move(k), std::move(v)); } return resMap; } -template -void write(const Store & store, Sink & out, const std::map & resMap) +template +void write(const Store & store, Sink & out, const std::map & resMap) { out << resMap.size(); for (auto & i : resMap) { - out << i.first; - nix::worker_proto::write(store, out, i.second); + write(store, out, i.first); + write(store, out, i.second); } } -template -std::optional read(const Store & store, Source & from, Phantom> _) -{ - auto tag = readNum(from); - switch (tag) { - case 0: - return std::nullopt; - case 1: - return nix::worker_proto::read(store, from, Phantom {}); - default: - throw Error("got an invalid tag bit for std::optional: %#04x", tag); - } } -template -void write(const Store & store, Sink & out, const std::optional & optVal) -{ - out << (optVal ? 1 : 0); - if (optVal) - nix::worker_proto::write(store, out, *optVal); -} - - -} - - -StorePathCAMap readStorePathCAMap(const Store & store, Source & from); - -void writeStorePathCAMap(const Store & store, Sink & out, const StorePathCAMap & paths); - } diff --git a/src/libutil/archive.cc b/src/libutil/archive.cc index 6ad9fa238..f1479329f 100644 --- a/src/libutil/archive.cc +++ b/src/libutil/archive.cc @@ -33,7 +33,7 @@ struct ArchiveSettings : Config static ArchiveSettings archiveSettings; -static GlobalConfig::Register r1(&archiveSettings); +static GlobalConfig::Register rArchiveSettings(&archiveSettings); const std::string narVersionMagic1 = "nix-archive-1"; diff --git a/src/libutil/args.cc b/src/libutil/args.cc index 147602415..8bd9c8aeb 100644 --- a/src/libutil/args.cc +++ b/src/libutil/args.cc @@ -17,8 +17,20 @@ void Args::addFlag(Flag && flag_) if (flag->shortName) shortFlags[flag->shortName] = flag; } +void Completions::add(std::string completion, std::string description) +{ + assert(description.find('\n') == std::string::npos); + insert(Completion { + .completion = completion, + .description = description + }); +} + +bool Completion::operator<(const Completion & other) const +{ return completion < other.completion || (completion == other.completion && description < other.description); } + bool pathCompletions = false; -std::shared_ptr> completions; +std::shared_ptr completions; std::string completionMarker = "___COMPLETE___"; @@ -148,7 +160,7 @@ bool Args::processFlag(Strings::iterator & pos, Strings::iterator end) for (auto & [name, flag] : longFlags) { if (!hiddenCategories.count(flag->category) && hasPrefix(name, std::string(*prefix, 2))) - completions->insert("--" + name); + completions->add("--" + name, flag->description); } } auto i = longFlags.find(string(*pos, 2)); @@ -165,9 +177,9 @@ bool Args::processFlag(Strings::iterator & pos, Strings::iterator end) if (auto prefix = needsCompletion(*pos)) { if (prefix == "-") { - completions->insert("--"); - for (auto & [flag, _] : shortFlags) - completions->insert(std::string("-") + flag); + completions->add("--"); + for (auto & [flagName, flag] : shortFlags) + completions->add(std::string("-") + flagName, flag->description); } } @@ -244,11 +256,11 @@ nlohmann::json Args::toJSON() return res; } -static void hashTypeCompleter(size_t index, std::string_view prefix) +static void hashTypeCompleter(size_t index, std::string_view prefix) { for (auto & type : hashTypes) if (hasPrefix(type, prefix)) - completions->insert(type); + completions->add(type); } Args::Flag Args::Flag::mkHashTypeFlag(std::string && longName, HashType * ht) @@ -277,7 +289,7 @@ Args::Flag Args::Flag::mkHashTypeOptFlag(std::string && longName, std::optional< }; } -static void completePath(std::string_view prefix, bool onlyDirs) +static void _completePath(std::string_view prefix, bool onlyDirs) { pathCompletions = true; glob_t globbuf; @@ -292,7 +304,7 @@ static void completePath(std::string_view prefix, bool onlyDirs) auto st = lstat(globbuf.gl_pathv[i]); if (!S_ISDIR(st.st_mode)) continue; } - completions->insert(globbuf.gl_pathv[i]); + completions->add(globbuf.gl_pathv[i]); } globfree(&globbuf); } @@ -300,12 +312,12 @@ static void completePath(std::string_view prefix, bool onlyDirs) void completePath(size_t, std::string_view prefix) { - completePath(prefix, false); + _completePath(prefix, false); } void completeDir(size_t, std::string_view prefix) { - completePath(prefix, true); + _completePath(prefix, true); } Strings argvToStrings(int argc, char * * argv) @@ -385,7 +397,7 @@ MultiCommand::MultiCommand(const Commands & commands) if (auto prefix = needsCompletion(s)) { for (auto & [name, command] : commands) if (hasPrefix(name, *prefix)) - completions->insert(name); + completions->add(name); } auto i = commands.find(s); if (i == commands.end()) diff --git a/src/libutil/args.hh b/src/libutil/args.hh index f41242e17..26f1bc11b 100644 --- a/src/libutil/args.hh +++ b/src/libutil/args.hh @@ -283,7 +283,17 @@ typedef std::vector> Table2; void printTable(std::ostream & out, const Table2 & table); -extern std::shared_ptr> completions; +struct Completion { + std::string completion; + std::string description; + + bool operator<(const Completion & other) const; +}; +class Completions : public std::set { +public: + void add(std::string completion, std::string description = ""); +}; +extern std::shared_ptr completions; extern bool pathCompletions; std::optional needsCompletion(std::string_view s); diff --git a/src/libutil/config.cc b/src/libutil/config.cc index 5e6a211df..521733025 100644 --- a/src/libutil/config.cc +++ b/src/libutil/config.cc @@ -268,6 +268,26 @@ template<> std::string BaseSetting::to_string() const return concatStringsSep(" ", value); } +template<> void BaseSetting::set(const std::string & str) +{ + auto kvpairs = tokenizeString(str); + for (auto & s : kvpairs) + { + auto eq = s.find_first_of('='); + if (std::string::npos != eq) + value.emplace(std::string(s, 0, eq), std::string(s, eq + 1)); + // else ignored + } +} + +template<> std::string BaseSetting::to_string() const +{ + Strings kvstrs; + std::transform(value.begin(), value.end(), back_inserter(kvstrs), + [&](auto kvpair){ return kvpair.first + "=" + kvpair.second; }); + return concatStringsSep(" ", kvstrs); +} + template class BaseSetting; template class BaseSetting; template class BaseSetting; @@ -278,6 +298,7 @@ template class BaseSetting; template class BaseSetting; template class BaseSetting; template class BaseSetting; +template class BaseSetting; void PathSetting::set(const std::string & str) { diff --git a/src/libutil/error.cc b/src/libutil/error.cc index fd6f69b7f..803a72953 100644 --- a/src/libutil/error.cc +++ b/src/libutil/error.cc @@ -11,13 +11,13 @@ const std::string nativeSystem = SYSTEM; BaseError & BaseError::addTrace(std::optional e, hintformat hint) { - err.traces.push_front(Trace { .pos = e, .hint = hint}); + err.traces.push_front(Trace { .pos = e, .hint = hint }); return *this; } // c++ std::exception descendants must have a 'const char* what()' function. // This stringifies the error and caches it for use by what(), or similarly by msg(). -const string& BaseError::calcWhat() const +const string & BaseError::calcWhat() const { if (what_.has_value()) return *what_; @@ -34,12 +34,12 @@ const string& BaseError::calcWhat() const std::optional ErrorInfo::programName = std::nullopt; -std::ostream& operator<<(std::ostream &os, const hintformat &hf) +std::ostream & operator<<(std::ostream & os, const hintformat & hf) { return os << hf.str(); } -string showErrPos(const ErrPos &errPos) +string showErrPos(const ErrPos & errPos) { if (errPos.line > 0) { if (errPos.column > 0) { @@ -53,7 +53,7 @@ string showErrPos(const ErrPos &errPos) } } -std::optional getCodeLines(const ErrPos &errPos) +std::optional getCodeLines(const ErrPos & errPos) { if (errPos.line <= 0) return std::nullopt; @@ -92,13 +92,13 @@ std::optional getCodeLines(const ErrPos &errPos) return loc; } } - catch (EndOfFile &eof) { + catch (EndOfFile & eof) { if (loc.errLineOfCode.has_value()) return loc; else return std::nullopt; } - catch (std::exception &e) { + catch (std::exception & e) { printError("error reading nix file: %s\n%s", errPos.file, e.what()); return std::nullopt; } @@ -137,10 +137,10 @@ std::optional getCodeLines(const ErrPos &errPos) } // print lines of code to the ostream, indicating the error column. -void printCodeLines(std::ostream &out, - const string &prefix, - const ErrPos &errPos, - const LinesOfCode &loc) +void printCodeLines(std::ostream & out, + const string & prefix, + const ErrPos & errPos, + const LinesOfCode & loc) { // previous line of code. if (loc.prevLineOfCode.has_value()) { @@ -186,7 +186,7 @@ void printCodeLines(std::ostream &out, } } -void printAtPos(const string &prefix, const ErrPos &pos, std::ostream &out) +void printAtPos(const string & prefix, const ErrPos & pos, std::ostream & out) { if (pos) { @@ -212,7 +212,7 @@ void printAtPos(const string &prefix, const ErrPos &pos, std::ostream &out) } } -std::ostream& showErrorInfo(std::ostream &out, const ErrorInfo &einfo, bool showTrace) +std::ostream & showErrorInfo(std::ostream & out, const ErrorInfo & einfo, bool showTrace) { auto errwidth = std::max(getWindowSize().second, 20); string prefix = ""; diff --git a/src/libutil/error.hh b/src/libutil/error.hh index f3babcbde..d1b6d82bb 100644 --- a/src/libutil/error.hh +++ b/src/libutil/error.hh @@ -107,7 +107,7 @@ struct Trace { struct ErrorInfo { Verbosity level; string name; - string description; + string description; // FIXME: remove? it seems to be barely used std::optional hint; std::optional errPos; std::list traces; @@ -169,7 +169,7 @@ public: #endif const string & msg() const { return calcWhat(); } - const ErrorInfo & info() { calcWhat(); return err; } + const ErrorInfo & info() const { calcWhat(); return err; } template BaseError & addTrace(std::optional e, const string &fs, const Args & ... args) diff --git a/src/libutil/fmt.hh b/src/libutil/fmt.hh index 6e69bdce2..85c0e9429 100644 --- a/src/libutil/fmt.hh +++ b/src/libutil/fmt.hh @@ -76,11 +76,11 @@ template struct yellowtxt { yellowtxt(const T &s) : value(s) {} - const T &value; + const T & value; }; template -std::ostream& operator<<(std::ostream &out, const yellowtxt &y) +std::ostream & operator<<(std::ostream & out, const yellowtxt & y) { return out << ANSI_YELLOW << y.value << ANSI_NORMAL; } @@ -88,12 +88,12 @@ std::ostream& operator<<(std::ostream &out, const yellowtxt &y) template struct normaltxt { - normaltxt(const T &s) : value(s) {} - const T &value; + normaltxt(const T & s) : value(s) {} + const T & value; }; template -std::ostream& operator<<(std::ostream &out, const normaltxt &y) +std::ostream & operator<<(std::ostream & out, const normaltxt & y) { return out << ANSI_NORMAL << y.value; } @@ -101,26 +101,30 @@ std::ostream& operator<<(std::ostream &out, const normaltxt &y) class hintformat { public: - hintformat(const string &format) :fmt(format) + hintformat(const string & format) : fmt(format) { - fmt.exceptions(boost::io::all_error_bits ^ + fmt.exceptions(boost::io::all_error_bits ^ boost::io::too_many_args_bit ^ boost::io::too_few_args_bit); } - hintformat(const hintformat &hf) - : fmt(hf.fmt) - {} + hintformat(const hintformat & hf) + : fmt(hf.fmt) + { } + + hintformat(format && fmt) + : fmt(std::move(fmt)) + { } template - hintformat& operator%(const T &value) + hintformat & operator%(const T & value) { fmt % yellowtxt(value); return *this; } template - hintformat& operator%(const normaltxt &value) + hintformat & operator%(const normaltxt & value) { fmt % value.value; return *this; @@ -135,7 +139,7 @@ private: format fmt; }; -std::ostream& operator<<(std::ostream &os, const hintformat &hf); +std::ostream & operator<<(std::ostream & os, const hintformat & hf); template inline hintformat hintfmt(const std::string & fs, const Args & ... args) diff --git a/src/libutil/lazy.hh b/src/libutil/lazy.hh deleted file mode 100644 index d073e486c..000000000 --- a/src/libutil/lazy.hh +++ /dev/null @@ -1,48 +0,0 @@ -#include -#include -#include - -namespace nix { - -/* A helper class for lazily-initialized variables. - - Lazy var([]() { return value; }); - - declares a variable of type T that is initialized to 'value' (in a - thread-safe way) on first use, that is, when var() is first - called. If the initialiser code throws an exception, then all - subsequent calls to var() will rethrow that exception. */ -template -class Lazy -{ - - typedef std::function Init; - - Init init; - - std::once_flag done; - - T value; - - std::exception_ptr ex; - -public: - - Lazy(Init init) : init(init) - { } - - const T & operator () () - { - std::call_once(done, [&]() { - try { - value = init(); - } catch (...) { - ex = std::current_exception(); - } - }); - if (ex) std::rethrow_exception(ex); - return value; - } -}; - -} diff --git a/src/libutil/logging.cc b/src/libutil/logging.cc index cbbf64395..8a6752e22 100644 --- a/src/libutil/logging.cc +++ b/src/libutil/logging.cc @@ -10,7 +10,7 @@ namespace nix { LoggerSettings loggerSettings; -static GlobalConfig::Register r1(&loggerSettings); +static GlobalConfig::Register rLoggerSettings(&loggerSettings); static thread_local ActivityId curActivity = 0; diff --git a/src/libutil/serialise.cc b/src/libutil/serialise.cc index a469a1e73..5c9f6f901 100644 --- a/src/libutil/serialise.cc +++ b/src/libutil/serialise.cc @@ -266,6 +266,24 @@ Sink & operator << (Sink & sink, const StringSet & s) return sink; } +Sink & operator << (Sink & sink, const Error & ex) +{ + auto info = ex.info(); + sink + << "Error" + << info.level + << info.name + << info.description + << (info.hint ? info.hint->str() : "") + << 0 // FIXME: info.errPos + << info.traces.size(); + for (auto & trace : info.traces) { + sink << 0; // FIXME: trace.pos + sink << trace.hint.str(); + } + return sink; +} + void readPadding(size_t len, Source & source) { @@ -319,6 +337,30 @@ template Paths readStrings(Source & source); template PathSet readStrings(Source & source); +Error readError(Source & source) +{ + auto type = readString(source); + assert(type == "Error"); + ErrorInfo info; + info.level = (Verbosity) readInt(source); + info.name = readString(source); + info.description = readString(source); + auto hint = readString(source); + if (hint != "") info.hint = hintformat(std::move(format("%s") % hint)); + auto havePos = readNum(source); + assert(havePos == 0); + auto nrTraces = readNum(source); + for (size_t i = 0; i < nrTraces; ++i) { + havePos = readNum(source); + assert(havePos == 0); + info.traces.push_back(Trace { + .hint = hintformat(std::move(format("%s") % readString(source))) + }); + } + return Error(std::move(info)); +} + + void StringSink::operator () (const unsigned char * data, size_t len) { static bool warned = false; diff --git a/src/libutil/serialise.hh b/src/libutil/serialise.hh index b41e58f33..d7fe0b81e 100644 --- a/src/libutil/serialise.hh +++ b/src/libutil/serialise.hh @@ -321,6 +321,7 @@ inline Sink & operator << (Sink & sink, uint64_t n) Sink & operator << (Sink & sink, const string & s); Sink & operator << (Sink & sink, const Strings & s); Sink & operator << (Sink & sink, const StringSet & s); +Sink & operator << (Sink & in, const Error & ex); MakeError(SerialisationError, Error); @@ -382,6 +383,8 @@ Source & operator >> (Source & in, bool & b) return in; } +Error readError(Source & source); + /* An adapter that converts a std::basic_istream into a source. */ struct StreamToSourceAdapter : Source diff --git a/src/libutil/topo-sort.hh b/src/libutil/topo-sort.hh index 7a68ff169..7418be5e0 100644 --- a/src/libutil/topo-sort.hh +++ b/src/libutil/topo-sort.hh @@ -1,3 +1,5 @@ +#pragma once + #include "error.hh" namespace nix { diff --git a/src/libutil/types.hh b/src/libutil/types.hh index 3af485fa0..55d02bcf9 100644 --- a/src/libutil/types.hh +++ b/src/libutil/types.hh @@ -24,6 +24,8 @@ typedef string Path; typedef list Paths; typedef set PathSet; +typedef vector> Headers; + /* Helper class to run code at startup. */ template struct OnStartup diff --git a/src/libutil/url-parts.hh b/src/libutil/url-parts.hh index 64e06cfbc..68be15cb0 100644 --- a/src/libutil/url-parts.hh +++ b/src/libutil/url-parts.hh @@ -7,7 +7,7 @@ namespace nix { // URI stuff. const static std::string pctEncoded = "(?:%[0-9a-fA-F][0-9a-fA-F])"; -const static std::string schemeRegex = "(?:[a-z+.-]+)"; +const static std::string schemeRegex = "(?:[a-z][a-z0-9+.-]*)"; const static std::string ipv6AddressRegex = "(?:\\[[0-9a-fA-F:]+\\])"; const static std::string unreservedRegex = "(?:[a-zA-Z0-9-._~])"; const static std::string subdelimsRegex = "(?:[!$&'\"()*+,;=])"; diff --git a/src/libutil/util.cc b/src/libutil/util.cc index 9e7142e01..53342b5cb 100644 --- a/src/libutil/util.cc +++ b/src/libutil/util.cc @@ -1,4 +1,3 @@ -#include "lazy.hh" #include "util.hh" #include "affinity.hh" #include "sync.hh" @@ -326,7 +325,12 @@ void writeFile(const Path & path, const string & s, mode_t mode) AutoCloseFD fd = open(path.c_str(), O_WRONLY | O_TRUNC | O_CREAT | O_CLOEXEC, mode); if (!fd) throw SysError("opening file '%1%'", path); - writeFull(fd.get(), s); + try { + writeFull(fd.get(), s); + } catch (Error & e) { + e.addTrace({}, "writing file '%1%'", path); + throw; + } } @@ -338,11 +342,16 @@ void writeFile(const Path & path, Source & source, mode_t mode) std::vector buf(64 * 1024); - while (true) { - try { - auto n = source.read(buf.data(), buf.size()); - writeFull(fd.get(), (unsigned char *) buf.data(), n); - } catch (EndOfFile &) { break; } + try { + while (true) { + try { + auto n = source.read(buf.data(), buf.size()); + writeFull(fd.get(), (unsigned char *) buf.data(), n); + } catch (EndOfFile &) { break; } + } + } catch (Error & e) { + e.addTrace({}, "writing file '%1%'", path); + throw; } } @@ -512,21 +521,24 @@ std::string getUserName() } -static Lazy getHome2([]() { - auto homeDir = getEnv("HOME"); - if (!homeDir) { - std::vector buf(16384); - struct passwd pwbuf; - struct passwd * pw; - if (getpwuid_r(geteuid(), &pwbuf, buf.data(), buf.size(), &pw) != 0 - || !pw || !pw->pw_dir || !pw->pw_dir[0]) - throw Error("cannot determine user's home directory"); - homeDir = pw->pw_dir; - } - return *homeDir; -}); - -Path getHome() { return getHome2(); } +Path getHome() +{ + static Path homeDir = []() + { + auto homeDir = getEnv("HOME"); + if (!homeDir) { + std::vector buf(16384); + struct passwd pwbuf; + struct passwd * pw; + if (getpwuid_r(geteuid(), &pwbuf, buf.data(), buf.size(), &pw) != 0 + || !pw || !pw->pw_dir || !pw->pw_dir[0]) + throw Error("cannot determine user's home directory"); + homeDir = pw->pw_dir; + } + return *homeDir; + }(); + return homeDir; +} Path getCacheDir() @@ -1641,4 +1653,40 @@ AutoCloseFD createUnixDomainSocket(const Path & path, mode_t mode) return fdSocket; } + +string showBytes(uint64_t bytes) +{ + return fmt("%.2f MiB", bytes / (1024.0 * 1024.0)); +} + + +void commonChildInit(Pipe & logPipe) +{ + const static string pathNullDevice = "/dev/null"; + restoreSignals(); + + /* Put the child in a separate session (and thus a separate + process group) so that it has no controlling terminal (meaning + that e.g. ssh cannot open /dev/tty) and it doesn't receive + terminal signals. */ + if (setsid() == -1) + throw SysError("creating a new session"); + + /* Dup the write side of the logger pipe into stderr. */ + if (dup2(logPipe.writeSide.get(), STDERR_FILENO) == -1) + throw SysError("cannot pipe standard error into log file"); + + /* Dup stderr to stdout. */ + if (dup2(STDERR_FILENO, STDOUT_FILENO) == -1) + throw SysError("cannot dup stderr into stdout"); + + /* Reroute stdin to /dev/null. */ + int fdDevNull = open(pathNullDevice.c_str(), O_RDWR); + if (fdDevNull == -1) + throw SysError("cannot open '%1%'", pathNullDevice); + if (dup2(fdDevNull, STDIN_FILENO) == -1) + throw SysError("cannot dup null device into stdin"); + close(fdDevNull); +} + } diff --git a/src/libutil/util.hh b/src/libutil/util.hh index b8e201203..cafe93702 100644 --- a/src/libutil/util.hh +++ b/src/libutil/util.hh @@ -536,6 +536,8 @@ typedef std::function PathFilter; extern PathFilter defaultPathFilter; +/* Common initialisation performed in child processes. */ +void commonChildInit(Pipe & logPipe); /* Create a Unix domain socket in listen mode. */ AutoCloseFD createUnixDomainSocket(const Path & path, mode_t mode); @@ -573,4 +575,7 @@ template struct overloaded : Ts... { using Ts::operator()...; }; template overloaded(Ts...) -> overloaded; +std::string showBytes(uint64_t bytes); + + } diff --git a/src/nix-build/nix-build.cc b/src/nix-build/nix-build.cc index 3a8d67f21..f60e0706c 100755 --- a/src/nix-build/nix-build.cc +++ b/src/nix-build/nix-build.cc @@ -6,6 +6,7 @@ #include #include "store-api.hh" +#include "local-fs-store.hh" #include "globals.hh" #include "derivations.hh" #include "affinity.hh" @@ -67,7 +68,7 @@ std::vector shellwords(const string & s) return res; } -static void _main(int argc, char * * argv) +static void main_nix_build(int argc, char * * argv) { auto dryRun = false; auto runEnv = std::regex_search(argv[0], std::regex("nix-shell$")); @@ -540,5 +541,5 @@ static void _main(int argc, char * * argv) } } -static RegisterLegacyCommand s1("nix-build", _main); -static RegisterLegacyCommand s2("nix-shell", _main); +static RegisterLegacyCommand r_nix_build("nix-build", main_nix_build); +static RegisterLegacyCommand r_nix_shell("nix-shell", main_nix_build); diff --git a/src/nix-channel/nix-channel.cc b/src/nix-channel/nix-channel.cc index e48f7af9a..309970df6 100755 --- a/src/nix-channel/nix-channel.cc +++ b/src/nix-channel/nix-channel.cc @@ -153,7 +153,7 @@ static void update(const StringSet & channelNames) replaceSymlink(profile, channelLink); } -static int _main(int argc, char ** argv) +static int main_nix_channel(int argc, char ** argv) { { // Figure out the name of the `.nix-channels' file to use @@ -250,4 +250,4 @@ static int _main(int argc, char ** argv) } } -static RegisterLegacyCommand s1("nix-channel", _main); +static RegisterLegacyCommand r_nix_channel("nix-channel", main_nix_channel); diff --git a/src/nix-collect-garbage/nix-collect-garbage.cc b/src/nix-collect-garbage/nix-collect-garbage.cc index bcf1d8518..57092b887 100644 --- a/src/nix-collect-garbage/nix-collect-garbage.cc +++ b/src/nix-collect-garbage/nix-collect-garbage.cc @@ -49,7 +49,7 @@ void removeOldGenerations(std::string dir) } } -static int _main(int argc, char * * argv) +static int main_nix_collect_garbage(int argc, char * * argv) { { bool removeOld = false; @@ -92,4 +92,4 @@ static int _main(int argc, char * * argv) } } -static RegisterLegacyCommand s1("nix-collect-garbage", _main); +static RegisterLegacyCommand r_nix_collect_garbage("nix-collect-garbage", main_nix_collect_garbage); diff --git a/src/nix-copy-closure/nix-copy-closure.cc b/src/nix-copy-closure/nix-copy-closure.cc index b10184718..10990f7b5 100755 --- a/src/nix-copy-closure/nix-copy-closure.cc +++ b/src/nix-copy-closure/nix-copy-closure.cc @@ -4,7 +4,7 @@ using namespace nix; -static int _main(int argc, char ** argv) +static int main_nix_copy_closure(int argc, char ** argv) { { auto gzip = false; @@ -65,4 +65,4 @@ static int _main(int argc, char ** argv) } } -static RegisterLegacyCommand s1("nix-copy-closure", _main); +static RegisterLegacyCommand r_nix_copy_closure("nix-copy-closure", main_nix_copy_closure); diff --git a/src/nix-daemon/nix-daemon.cc b/src/nix-daemon/nix-daemon.cc index 6e652ccbf..fc6195cf0 100644 --- a/src/nix-daemon/nix-daemon.cc +++ b/src/nix-daemon/nix-daemon.cc @@ -265,7 +265,7 @@ static void daemonLoop(char * * argv) } -static int _main(int argc, char * * argv) +static int main_nix_daemon(int argc, char * * argv) { { auto stdio = false; @@ -330,4 +330,4 @@ static int _main(int argc, char * * argv) } } -static RegisterLegacyCommand s1("nix-daemon", _main); +static RegisterLegacyCommand r_nix_daemon("nix-daemon", main_nix_daemon); diff --git a/src/nix-env/nix-env.cc b/src/nix-env/nix-env.cc index 3e7c453fb..a4b5c9e2c 100644 --- a/src/nix-env/nix-env.cc +++ b/src/nix-env/nix-env.cc @@ -8,6 +8,7 @@ #include "profiles.hh" #include "shared.hh" #include "store-api.hh" +#include "local-fs-store.hh" #include "user-env.hh" #include "util.hh" #include "json.hh" @@ -1330,7 +1331,7 @@ static void opVersion(Globals & globals, Strings opFlags, Strings opArgs) } -static int _main(int argc, char * * argv) +static int main_nix_env(int argc, char * * argv) { { Strings opFlags, opArgs; @@ -1460,4 +1461,4 @@ static int _main(int argc, char * * argv) } } -static RegisterLegacyCommand s1("nix-env", _main); +static RegisterLegacyCommand r_nix_env("nix-env", main_nix_env); diff --git a/src/nix-env/user-env.cc b/src/nix-env/user-env.cc index 8c6c8af05..87387e794 100644 --- a/src/nix-env/user-env.cc +++ b/src/nix-env/user-env.cc @@ -2,6 +2,7 @@ #include "util.hh" #include "derivations.hh" #include "store-api.hh" +#include "local-fs-store.hh" #include "globals.hh" #include "shared.hh" #include "eval.hh" diff --git a/src/nix-instantiate/nix-instantiate.cc b/src/nix-instantiate/nix-instantiate.cc index 539092cbe..3956fef6d 100644 --- a/src/nix-instantiate/nix-instantiate.cc +++ b/src/nix-instantiate/nix-instantiate.cc @@ -8,6 +8,7 @@ #include "value-to-json.hh" #include "util.hh" #include "store-api.hh" +#include "local-fs-store.hh" #include "common-eval-args.hh" #include "../nix/legacy.hh" @@ -83,7 +84,7 @@ void processExpr(EvalState & state, const Strings & attrPaths, } -static int _main(int argc, char * * argv) +static int main_nix_instantiate(int argc, char * * argv) { { Strings files; @@ -192,4 +193,4 @@ static int _main(int argc, char * * argv) } } -static RegisterLegacyCommand s1("nix-instantiate", _main); +static RegisterLegacyCommand r_nix_instantiate("nix-instantiate", main_nix_instantiate); diff --git a/src/nix-prefetch-url/nix-prefetch-url.cc b/src/nix-prefetch-url/nix-prefetch-url.cc index 377ae03a8..3bdee55a7 100644 --- a/src/nix-prefetch-url/nix-prefetch-url.cc +++ b/src/nix-prefetch-url/nix-prefetch-url.cc @@ -48,7 +48,7 @@ string resolveMirrorUri(EvalState & state, string uri) } -static int _main(int argc, char * * argv) +static int main_nix_prefetch_url(int argc, char * * argv) { { HashType ht = htSHA256; @@ -229,4 +229,4 @@ static int _main(int argc, char * * argv) } } -static RegisterLegacyCommand s1("nix-prefetch-url", _main); +static RegisterLegacyCommand r_nix_prefetch_url("nix-prefetch-url", main_nix_prefetch_url); diff --git a/src/nix-store/nix-store.cc b/src/nix-store/nix-store.cc index b027e84b7..14baabc36 100644 --- a/src/nix-store/nix-store.cc +++ b/src/nix-store/nix-store.cc @@ -24,6 +24,9 @@ #endif +namespace nix_store { + + using namespace nix; using std::cin; using std::cout; @@ -822,7 +825,7 @@ static void opServe(Strings opFlags, Strings opArgs) case cmdQueryValidPaths: { bool lock = readInt(in); bool substitute = readInt(in); - auto paths = readStorePaths(*store, in); + auto paths = worker_proto::read(*store, in, Phantom {}); if (lock && writeAllowed) for (auto & path : paths) store->addTempRoot(path); @@ -852,19 +855,19 @@ static void opServe(Strings opFlags, Strings opArgs) } } - writeStorePaths(*store, out, store->queryValidPaths(paths)); + worker_proto::write(*store, out, store->queryValidPaths(paths)); break; } case cmdQueryPathInfos: { - auto paths = readStorePaths(*store, in); + auto paths = worker_proto::read(*store, in, Phantom {}); // !!! Maybe we want a queryPathInfos? for (auto & i : paths) { try { auto info = store->queryPathInfo(i); out << store->printStorePath(info->path) << (info->deriver ? store->printStorePath(*info->deriver) : ""); - writeStorePaths(*store, out, info->references); + worker_proto::write(*store, out, info->references); // !!! Maybe we want compression? out << info->narSize // downloadSize << info->narSize; @@ -892,7 +895,7 @@ static void opServe(Strings opFlags, Strings opArgs) case cmdExportPaths: { readInt(in); // obsolete - store->exportPaths(readStorePaths(*store, in), out); + store->exportPaths(worker_proto::read(*store, in, Phantom {}), out); break; } @@ -941,9 +944,9 @@ static void opServe(Strings opFlags, Strings opArgs) case cmdQueryClosure: { bool includeOutputs = readInt(in); StorePathSet closure; - store->computeFSClosure(readStorePaths(*store, in), + store->computeFSClosure(worker_proto::read(*store, in, Phantom {}), closure, false, includeOutputs); - writeStorePaths(*store, out, closure); + worker_proto::write(*store, out, closure); break; } @@ -958,7 +961,7 @@ static void opServe(Strings opFlags, Strings opArgs) }; if (deriver != "") info.deriver = store->parseStorePath(deriver); - info.references = readStorePaths(*store, in); + info.references = worker_proto::read(*store, in, Phantom {}); in >> info.registrationTime >> info.narSize >> info.ultimate; info.sigs = readStrings(in); info.ca = parseContentAddressOpt(readString(in)); @@ -1025,7 +1028,7 @@ static void opVersion(Strings opFlags, Strings opArgs) /* Scan the arguments; find the operation, set global flags, put all other flags in a list, and put all other arguments in another list. */ -static int _main(int argc, char * * argv) +static int main_nix_store(int argc, char * * argv) { { Strings opFlags, opArgs; @@ -1121,4 +1124,6 @@ static int _main(int argc, char * * argv) } } -static RegisterLegacyCommand s1("nix-store", _main); +static RegisterLegacyCommand r_nix_store("nix-store", main_nix_store); + +} diff --git a/src/nix/add-to-store.cc b/src/nix/add-to-store.cc index 023ffa4ed..7fe87d757 100644 --- a/src/nix/add-to-store.cc +++ b/src/nix/add-to-store.cc @@ -87,4 +87,4 @@ struct CmdAddToStore : MixDryRun, StoreCommand } }; -static auto r1 = registerCommand("add-to-store"); +static auto rCmdAddToStore = registerCommand("add-to-store"); diff --git a/src/nix/build.cc b/src/nix/build.cc index 5952d696b..86eda7fb9 100644 --- a/src/nix/build.cc +++ b/src/nix/build.cc @@ -3,6 +3,7 @@ #include "common-args.hh" #include "shared.hh" #include "store-api.hh" +#include "local-fs-store.hh" using namespace nix; @@ -88,4 +89,4 @@ struct CmdBuild : InstallablesCommand, MixDryRun, MixProfile } }; -static auto r1 = registerCommand("build"); +static auto rCmdBuild = registerCommand("build"); diff --git a/src/nix/bundle.cc b/src/nix/bundle.cc index a757abf1e..87988891b 100644 --- a/src/nix/bundle.cc +++ b/src/nix/bundle.cc @@ -2,6 +2,7 @@ #include "common-args.hh" #include "shared.hh" #include "store-api.hh" +#include "local-fs-store.hh" #include "fs-accessor.hh" using namespace nix; diff --git a/src/nix/cat.cc b/src/nix/cat.cc index 97306107c..eef172cfc 100644 --- a/src/nix/cat.cc +++ b/src/nix/cat.cc @@ -72,5 +72,5 @@ struct CmdCatNar : StoreCommand, MixCat } }; -static auto r1 = registerCommand("cat-store"); -static auto r2 = registerCommand("cat-nar"); +static auto rCmdCatStore = registerCommand("cat-store"); +static auto rCmdCatNar = registerCommand("cat-nar"); diff --git a/src/nix/command.cc b/src/nix/command.cc index 37a4bc785..9a38c77f1 100644 --- a/src/nix/command.cc +++ b/src/nix/command.cc @@ -1,5 +1,6 @@ #include "command.hh" #include "store-api.hh" +#include "local-fs-store.hh" #include "derivations.hh" #include "nixexpr.hh" #include "profiles.hh" @@ -152,7 +153,7 @@ void MixProfile::updateProfile(const Buildables & buildables) for (auto & output : bfd.outputs) { /* Output path should be known because we just tried to build it. */ - assert(!output.second); + assert(output.second); result.push_back(*output.second); } }, diff --git a/src/nix/copy.cc b/src/nix/copy.cc index 811c8ab34..cb31aac8f 100644 --- a/src/nix/copy.cc +++ b/src/nix/copy.cc @@ -106,4 +106,4 @@ struct CmdCopy : StorePathsCommand } }; -static auto r1 = registerCommand("copy"); +static auto rCmdCopy = registerCommand("copy"); diff --git a/src/nix/describe-stores.cc b/src/nix/describe-stores.cc index 0cc2d9337..1dd384c0e 100644 --- a/src/nix/describe-stores.cc +++ b/src/nix/describe-stores.cc @@ -41,4 +41,4 @@ struct CmdDescribeStores : Command, MixJSON } }; -static auto r1 = registerCommand("describe-stores"); +static auto rDescribeStore = registerCommand("describe-stores"); diff --git a/src/nix/develop.cc b/src/nix/develop.cc index f2681e14d..3be59bc62 100644 --- a/src/nix/develop.cc +++ b/src/nix/develop.cc @@ -164,6 +164,7 @@ struct Common : InstallableCommand, MixProfile "BASHOPTS", "EUID", "HOME", // FIXME: don't ignore in pure mode? + "HOSTNAME", "NIX_BUILD_TOP", "NIX_ENFORCE_PURITY", "NIX_LOG_FD", @@ -377,6 +378,10 @@ struct CmdDevelop : Common, MixEnvironment script += fmt("exec %s\n", concatStringsSep(" ", args)); } + else { + script += "[ -n \"$PS1\" ] && [ -e ~/.bashrc ] && source ~/.bashrc;\n"; + } + writeFull(rcFileFd.get(), script); stopProgressBar(); @@ -444,5 +449,5 @@ struct CmdPrintDevEnv : Common } }; -static auto r1 = registerCommand("print-dev-env"); -static auto r2 = registerCommand("develop"); +static auto rCmdPrintDevEnv = registerCommand("print-dev-env"); +static auto rCmdDevelop = registerCommand("develop"); diff --git a/src/nix/diff-closures.cc b/src/nix/diff-closures.cc index 0dc99d05e..30e7b20e1 100644 --- a/src/nix/diff-closures.cc +++ b/src/nix/diff-closures.cc @@ -143,4 +143,4 @@ struct CmdDiffClosures : SourceExprCommand } }; -static auto r1 = registerCommand("diff-closures"); +static auto rCmdDiffClosures = registerCommand("diff-closures"); diff --git a/src/nix/doctor.cc b/src/nix/doctor.cc index 683e91446..4f3003448 100644 --- a/src/nix/doctor.cc +++ b/src/nix/doctor.cc @@ -5,6 +5,7 @@ #include "serve-protocol.hh" #include "shared.hh" #include "store-api.hh" +#include "local-fs-store.hh" #include "util.hh" #include "worker-protocol.hh" @@ -131,4 +132,4 @@ struct CmdDoctor : StoreCommand } }; -static auto r1 = registerCommand("doctor"); +static auto rCmdDoctor = registerCommand("doctor"); diff --git a/src/nix/dump-path.cc b/src/nix/dump-path.cc index e1de71bf8..6fd197531 100644 --- a/src/nix/dump-path.cc +++ b/src/nix/dump-path.cc @@ -30,4 +30,4 @@ struct CmdDumpPath : StorePathCommand } }; -static auto r1 = registerCommand("dump-path"); +static auto rDumpPath = registerCommand("dump-path"); diff --git a/src/nix/edit.cc b/src/nix/edit.cc index 378a3739c..51c16f5a9 100644 --- a/src/nix/edit.cc +++ b/src/nix/edit.cc @@ -54,4 +54,4 @@ struct CmdEdit : InstallableCommand } }; -static auto r1 = registerCommand("edit"); +static auto rCmdEdit = registerCommand("edit"); diff --git a/src/nix/eval.cc b/src/nix/eval.cc index a8ca446be..43ce46546 100644 --- a/src/nix/eval.cc +++ b/src/nix/eval.cc @@ -90,4 +90,4 @@ struct CmdEval : MixJSON, InstallableCommand } }; -static auto r1 = registerCommand("eval"); +static auto rCmdEval = registerCommand("eval"); diff --git a/src/nix/flake.cc b/src/nix/flake.cc index 4186ee955..f8b3c8d0c 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -958,4 +958,4 @@ struct CmdFlake : NixMultiCommand } }; -static auto r1 = registerCommand("flake"); +static auto rCmdFlake = registerCommand("flake"); diff --git a/src/nix/hash.cc b/src/nix/hash.cc index 494f00a20..1d23bb0e2 100644 --- a/src/nix/hash.cc +++ b/src/nix/hash.cc @@ -79,8 +79,8 @@ struct CmdHash : Command } }; -static RegisterCommand r1("hash-file", [](){ return make_ref(FileIngestionMethod::Flat); }); -static RegisterCommand r2("hash-path", [](){ return make_ref(FileIngestionMethod::Recursive); }); +static RegisterCommand rCmdHashFile("hash-file", [](){ return make_ref(FileIngestionMethod::Flat); }); +static RegisterCommand rCmdHashPath("hash-path", [](){ return make_ref(FileIngestionMethod::Recursive); }); struct CmdToBase : Command { @@ -112,10 +112,10 @@ struct CmdToBase : Command } }; -static RegisterCommand r3("to-base16", [](){ return make_ref(Base16); }); -static RegisterCommand r4("to-base32", [](){ return make_ref(Base32); }); -static RegisterCommand r5("to-base64", [](){ return make_ref(Base64); }); -static RegisterCommand r6("to-sri", [](){ return make_ref(SRI); }); +static RegisterCommand rCmdToBase16("to-base16", [](){ return make_ref(Base16); }); +static RegisterCommand rCmdToBase32("to-base32", [](){ return make_ref(Base32); }); +static RegisterCommand rCmdToBase64("to-base64", [](){ return make_ref(Base64); }); +static RegisterCommand rCmdToSRI("to-sri", [](){ return make_ref(SRI); }); /* Legacy nix-hash command. */ static int compatNixHash(int argc, char * * argv) @@ -167,4 +167,4 @@ static int compatNixHash(int argc, char * * argv) return 0; } -static RegisterLegacyCommand s1("nix-hash", compatNixHash); +static RegisterLegacyCommand r_nix_hash("nix-hash", compatNixHash); diff --git a/src/nix/installables.cc b/src/nix/installables.cc index d12a8f866..6841c53b4 100644 --- a/src/nix/installables.cc +++ b/src/nix/installables.cc @@ -28,7 +28,7 @@ void completeFlakeInputPath( auto flake = flake::getFlake(*evalState, flakeRef, true); for (auto & input : flake.inputs) if (hasPrefix(input.first, prefix)) - completions->insert(input.first); + completions->add(input.first); } MixFlakeOptions::MixFlakeOptions() @@ -214,7 +214,7 @@ void completeFlakeRefWithFragment( auto attrPath2 = attr->getAttrPath(attr2); /* Strip the attrpath prefix. */ attrPath2.erase(attrPath2.begin(), attrPath2.begin() + attrPathPrefix.size()); - completions->insert(flakeRefS + "#" + concatStringsSep(".", attrPath2)); + completions->add(flakeRefS + "#" + concatStringsSep(".", attrPath2)); } } } @@ -225,7 +225,7 @@ void completeFlakeRefWithFragment( for (auto & attrPath : defaultFlakeAttrPaths) { auto attr = root->findAlongAttrPath(parseAttrPath(*evalState, attrPath)); if (!attr) continue; - completions->insert(flakeRefS + "#"); + completions->add(flakeRefS + "#"); } } } @@ -246,7 +246,7 @@ ref EvalCommand::getEvalState() void completeFlakeRef(ref store, std::string_view prefix) { if (prefix == "") - completions->insert("."); + completions->add("."); completeDir(0, prefix); @@ -257,10 +257,10 @@ void completeFlakeRef(ref store, std::string_view prefix) if (!hasPrefix(prefix, "flake:") && hasPrefix(from, "flake:")) { std::string from2(from, 6); if (hasPrefix(from2, prefix)) - completions->insert(from2); + completions->add(from2); } else { if (hasPrefix(from, prefix)) - completions->insert(from); + completions->add(from); } } } diff --git a/src/nix/local.mk b/src/nix/local.mk index ad2cec930..bdbd49d67 100644 --- a/src/nix/local.mk +++ b/src/nix/local.mk @@ -19,7 +19,7 @@ nix_CXXFLAGS += -I src/libutil -I src/libstore -I src/libfetchers -I src/libexpr nix_LIBS = libexpr libmain libfetchers libstore libutil -nix_LDFLAGS = -pthread $(SODIUM_LIBS) $(EDITLINE_LIBS) $(BOOST_LDFLAGS) -lboost_context -lboost_thread -lboost_system -llowdown +nix_LDFLAGS = -pthread $(SODIUM_LIBS) $(EDITLINE_LIBS) $(BOOST_LDFLAGS) -llowdown $(foreach name, \ nix-build nix-channel nix-collect-garbage nix-copy-closure nix-daemon nix-env nix-hash nix-instantiate nix-prefetch-url nix-shell nix-store, \ diff --git a/src/nix/log.cc b/src/nix/log.cc index 33380dcf5..33a3053f5 100644 --- a/src/nix/log.cc +++ b/src/nix/log.cc @@ -64,4 +64,4 @@ struct CmdLog : InstallableCommand } }; -static auto r1 = registerCommand("log"); +static auto rCmdLog = registerCommand("log"); diff --git a/src/nix/ls.cc b/src/nix/ls.cc index 76c8bc9a3..baca54431 100644 --- a/src/nix/ls.cc +++ b/src/nix/ls.cc @@ -152,5 +152,5 @@ struct CmdLsNar : Command, MixLs } }; -static auto r1 = registerCommand("ls-store"); -static auto r2 = registerCommand("ls-nar"); +static auto rCmdLsStore = registerCommand("ls-store"); +static auto rCmdLsNar = registerCommand("ls-nar"); diff --git a/src/nix/main.cc b/src/nix/main.cc index 1e9e07bc0..5056ceb78 100644 --- a/src/nix/main.cc +++ b/src/nix/main.cc @@ -208,7 +208,7 @@ void mainWrapped(int argc, char * * argv) if (completions) { std::cout << (pathCompletions ? "filenames\n" : "no-filenames\n"); for (auto & s : *completions) - std::cout << s << "\n"; + std::cout << s.completion << "\t" << s.description << "\n"; } }); diff --git a/src/nix/make-content-addressable.cc b/src/nix/make-content-addressable.cc index 38b60fc38..df3ec5194 100644 --- a/src/nix/make-content-addressable.cc +++ b/src/nix/make-content-addressable.cc @@ -108,4 +108,4 @@ struct CmdMakeContentAddressable : StorePathsCommand, MixJSON } }; -static auto r1 = registerCommand("make-content-addressable"); +static auto rCmdMakeContentAddressable = registerCommand("make-content-addressable"); diff --git a/src/nix/optimise-store.cc b/src/nix/optimise-store.cc index b45951879..51a7a9756 100644 --- a/src/nix/optimise-store.cc +++ b/src/nix/optimise-store.cc @@ -31,4 +31,4 @@ struct CmdOptimiseStore : StoreCommand } }; -static auto r1 = registerCommand("optimise-store"); +static auto rCmdOptimiseStore = registerCommand("optimise-store"); diff --git a/src/nix/path-info.cc b/src/nix/path-info.cc index 0c12efaf0..63cf885f9 100644 --- a/src/nix/path-info.cc +++ b/src/nix/path-info.cc @@ -127,4 +127,4 @@ struct CmdPathInfo : StorePathsCommand, MixJSON } }; -static auto r1 = registerCommand("path-info"); +static auto rCmdPathInfo = registerCommand("path-info"); diff --git a/src/nix/ping-store.cc b/src/nix/ping-store.cc index 127397a29..8db78d591 100644 --- a/src/nix/ping-store.cc +++ b/src/nix/ping-store.cc @@ -29,4 +29,4 @@ struct CmdPingStore : StoreCommand } }; -static auto r1 = registerCommand("ping-store"); +static auto rCmdPingStore = registerCommand("ping-store"); diff --git a/src/nix/profile.cc b/src/nix/profile.cc index 251ee8ca7..fd2240baa 100644 --- a/src/nix/profile.cc +++ b/src/nix/profile.cc @@ -471,4 +471,4 @@ struct CmdProfile : NixMultiCommand } }; -static auto r1 = registerCommand("profile"); +static auto rCmdProfile = registerCommand("profile"); diff --git a/src/nix/registry.cc b/src/nix/registry.cc index 367268683..8e8983ad0 100644 --- a/src/nix/registry.cc +++ b/src/nix/registry.cc @@ -31,8 +31,8 @@ struct CmdRegistryList : StoreCommand registry->type == Registry::User ? "user " : registry->type == Registry::System ? "system" : "global", - entry.from.to_string(), - entry.to.to_string()); + entry.from.toURLString(), + entry.to.toURLString(attrsToQuery(entry.extraAttrs))); } } } @@ -143,4 +143,4 @@ struct CmdRegistry : virtual NixMultiCommand } }; -static auto r1 = registerCommand("registry"); +static auto rCmdRegistry = registerCommand("registry"); diff --git a/src/nix/repl.cc b/src/nix/repl.cc index 329999475..9ff386b1d 100644 --- a/src/nix/repl.cc +++ b/src/nix/repl.cc @@ -825,6 +825,6 @@ struct CmdRepl : StoreCommand, MixEvalArgs } }; -static auto r1 = registerCommand("repl"); +static auto rCmdRepl = registerCommand("repl"); } diff --git a/src/nix/run.cc b/src/nix/run.cc index cbaba9d90..790784382 100644 --- a/src/nix/run.cc +++ b/src/nix/run.cc @@ -140,7 +140,7 @@ struct CmdShell : InstallablesCommand, RunCommon, MixEnvironment } }; -static auto r1 = registerCommand("shell"); +static auto rCmdShell = registerCommand("shell"); struct CmdRun : InstallableCommand, RunCommon { @@ -167,6 +167,14 @@ struct CmdRun : InstallableCommand, RunCommon "To run Blender:", "nix run blender-bin" }, + Example{ + "To run vim from nixpkgs:", + "nix run nixpkgs#vim" + }, + Example{ + "To run vim from nixpkgs with arguments:", + "nix run nixpkgs#vim -- --help" + }, }; } @@ -201,7 +209,7 @@ struct CmdRun : InstallableCommand, RunCommon } }; -static auto r2 = registerCommand("run"); +static auto rCmdRun = registerCommand("run"); void chrootHelper(int argc, char * * argv) { diff --git a/src/nix/search.cc b/src/nix/search.cc index 430979274..d4326dc84 100644 --- a/src/nix/search.cc +++ b/src/nix/search.cc @@ -185,4 +185,4 @@ struct CmdSearch : InstallableCommand, MixJSON } }; -static auto r1 = registerCommand("search"); +static auto rCmdSearch = registerCommand("search"); diff --git a/src/nix/show-config.cc b/src/nix/show-config.cc index 3ed1ad2aa..1ef54a33a 100644 --- a/src/nix/show-config.cc +++ b/src/nix/show-config.cc @@ -30,4 +30,4 @@ struct CmdShowConfig : Command, MixJSON } }; -static auto r1 = registerCommand("show-config"); +static auto rShowConfig = registerCommand("show-config"); diff --git a/src/nix/show-derivation.cc b/src/nix/show-derivation.cc index b9f33499b..2542537d3 100644 --- a/src/nix/show-derivation.cc +++ b/src/nix/show-derivation.cc @@ -123,4 +123,4 @@ struct CmdShowDerivation : InstallablesCommand } }; -static auto r1 = registerCommand("show-derivation"); +static auto rCmdShowDerivation = registerCommand("show-derivation"); diff --git a/src/nix/sigs.cc b/src/nix/sigs.cc index 7821a5432..44916c77f 100644 --- a/src/nix/sigs.cc +++ b/src/nix/sigs.cc @@ -92,7 +92,7 @@ struct CmdCopySigs : StorePathsCommand } }; -static auto r1 = registerCommand("copy-sigs"); +static auto rCmdCopySigs = registerCommand("copy-sigs"); struct CmdSignPaths : StorePathsCommand { @@ -144,4 +144,4 @@ struct CmdSignPaths : StorePathsCommand } }; -static auto r2 = registerCommand("sign-paths"); +static auto rCmdSignPaths = registerCommand("sign-paths"); diff --git a/src/nix/upgrade-nix.cc b/src/nix/upgrade-nix.cc index a880bdae0..66ecc5b34 100644 --- a/src/nix/upgrade-nix.cc +++ b/src/nix/upgrade-nix.cc @@ -158,4 +158,4 @@ struct CmdUpgradeNix : MixDryRun, StoreCommand } }; -static auto r1 = registerCommand("upgrade-nix"); +static auto rCmdUpgradeNix = registerCommand("upgrade-nix"); diff --git a/src/nix/verify.cc b/src/nix/verify.cc index 26f755fd9..ec7333d03 100644 --- a/src/nix/verify.cc +++ b/src/nix/verify.cc @@ -189,4 +189,4 @@ struct CmdVerify : StorePathsCommand } }; -static auto r1 = registerCommand("verify"); +static auto rCmdVerify = registerCommand("verify"); diff --git a/src/nix/why-depends.cc b/src/nix/why-depends.cc index 7e630b745..63bf087e6 100644 --- a/src/nix/why-depends.cc +++ b/src/nix/why-depends.cc @@ -263,4 +263,4 @@ struct CmdWhyDepends : SourceExprCommand } }; -static auto r1 = registerCommand("why-depends"); +static auto rCmdWhyDepends = registerCommand("why-depends"); diff --git a/tests/content-addressed.nix b/tests/content-addressed.nix index 5e9bad0ac..3dcf916c3 100644 --- a/tests/content-addressed.nix +++ b/tests/content-addressed.nix @@ -29,4 +29,26 @@ rec { outputHashMode = "recursive"; outputHashAlgo = "sha256"; }; + dependentCA = mkDerivation { + name = "dependent"; + buildCommand = '' + echo "building a dependent derivation" + mkdir -p $out + echo ${rootCA}/hello > $out/dep + ''; + __contentAddressed = true; + outputHashMode = "recursive"; + outputHashAlgo = "sha256"; + }; + transitivelyDependentCA = mkDerivation { + name = "transitively-dependent"; + buildCommand = '' + echo "building transitively-dependent" + cat ${dependentCA}/dep + echo ${dependentCA} > $out + ''; + __contentAddressed = true; + outputHashMode = "recursive"; + outputHashAlgo = "sha256"; + }; } diff --git a/tests/content-addressed.sh b/tests/content-addressed.sh index 0ae2852d2..61ec03fe3 100644 --- a/tests/content-addressed.sh +++ b/tests/content-addressed.sh @@ -2,19 +2,26 @@ source common.sh -clearStore -clearCache - -export REMOTE_STORE=file://$cacheDir - drv=$(nix-instantiate --experimental-features ca-derivations ./content-addressed.nix -A rootCA --arg seed 1) nix --experimental-features 'nix-command ca-derivations' show-derivation --derivation "$drv" --arg seed 1 -commonArgs=("--experimental-features" "ca-derivations" "./content-addressed.nix" "-A" "rootCA" "--no-out-link") -out1=$(nix-build "${commonArgs[@]}" ./content-addressed.nix --arg seed 1) -out2=$(nix-build "${commonArgs[@]}" ./content-addressed.nix --arg seed 2) +testDerivation () { + local derivationPath=$1 + local commonArgs=("--experimental-features" "ca-derivations" "./content-addressed.nix" "-A" "$derivationPath" "--no-out-link") + local out1 out2 + out1=$(nix-build "${commonArgs[@]}" --arg seed 1) + out2=$(nix-build "${commonArgs[@]}" --arg seed 2 "${secondSeedArgs[@]}") + test "$out1" == "$out2" +} -test $out1 == $out2 +testDerivation rootCA +# The seed only changes the root derivation, and not it's output, so the +# dependent derivations should only need to be built once. +secondSeedArgs=(-j0) +# Don't directly build depenentCA, that way we'll make sure we dodn't rely on +# dependent derivations always being already built. +#testDerivation dependentCA +testDerivation transitivelyDependentCA nix-instantiate --experimental-features ca-derivations ./content-addressed.nix -A rootCA --arg seed 5 nix-collect-garbage --experimental-features ca-derivations --option keep-derivations true diff --git a/tests/tarball.sh b/tests/tarball.sh index 88a1a07a0..fe65a22e4 100644 --- a/tests/tarball.sh +++ b/tests/tarball.sh @@ -17,7 +17,7 @@ test_tarball() { local compressor="$2" tarball=$TEST_ROOT/tarball.tar$ext - (cd $TEST_ROOT && tar c tarball) | $compressor > $tarball + (cd $TEST_ROOT && tar cf - tarball) | $compressor > $tarball nix-env -f file://$tarball -qa --out-path | grep -q dependencies