1
1
Fork 0
mirror of https://github.com/NixOS/nix.git synced 2025-11-21 09:49:36 +01:00

lockFlake(): Always compute a NAR hash for inputs

For the top-level flake, we don't need a NAR hash. But for inputs, we
do.

Also, add a test for the lazy behaviour of `nix flake metadata|lock`.
This commit is contained in:
Eelco Dolstra 2025-02-07 14:58:22 +01:00
parent f24ff056cb
commit 9e6b89c92c
7 changed files with 69 additions and 32 deletions

View file

@ -100,6 +100,20 @@ static StorePath copyInputToStore(
return storePath;
}
static SourcePath maybeCopyInputToStore(
EvalState & state,
fetchers::Input & input,
const fetchers::Input & originalInput,
ref<SourceAccessor> accessor,
CopyMode copyMode)
{
return copyMode == CopyMode::Lazy || (copyMode == CopyMode::RequireLockable && (input.isLocked() || input.getNarHash()))
? SourcePath(accessor)
: state.rootPath(
state.store->toRealPath(
copyInputToStore(state, input, originalInput, accessor)));
}
static void forceTrivialValue(EvalState & state, Value & value, const PosIdx pos)
{
if (value.isThunk() && value.isTrivial())
@ -398,7 +412,7 @@ static Flake getFlake(
bool useRegistries,
FlakeCache & flakeCache,
const InputAttrPath & lockRootAttrPath,
bool forceLazy)
CopyMode copyMode)
{
// Fetch a lazy tree first.
auto [accessor, resolvedRef, lockedRef] = fetchOrSubstituteTree(
@ -423,19 +437,14 @@ static Flake getFlake(
// Re-parse flake.nix from the store.
return readFlake(
state, originalRef, resolvedRef, lockedRef,
forceLazy && lockedRef.input.isLocked()
? SourcePath(accessor)
: // Copy the tree to the store.
state.rootPath(
state.store->toRealPath(
copyInputToStore(state, lockedRef.input, originalRef.input, accessor))),
maybeCopyInputToStore(state, lockedRef.input, originalRef.input, accessor, copyMode),
lockRootAttrPath);
}
Flake getFlake(EvalState & state, const FlakeRef & originalRef, bool useRegistries, bool forceLazy)
Flake getFlake(EvalState & state, const FlakeRef & originalRef, bool useRegistries, CopyMode copyMode)
{
FlakeCache flakeCache;
return getFlake(state, originalRef, useRegistries, flakeCache, {}, forceLazy);
return getFlake(state, originalRef, useRegistries, flakeCache, {}, copyMode);
}
static LockFile readLockFile(
@ -461,7 +470,7 @@ LockedFlake lockFlake(
auto useRegistries = lockFlags.useRegistries.value_or(settings.useRegistries);
auto flake = getFlake(state, topRef, useRegistries, flakeCache, {}, lockFlags.forceLazy);
auto flake = getFlake(state, topRef, useRegistries, flakeCache, {}, lockFlags.copyMode);
if (lockFlags.applyNixConfig) {
flake.config.apply(settings);
@ -506,6 +515,13 @@ LockedFlake lockFlake(
explicitCliOverrides.insert(i.first);
}
/* For locking of inputs, we require at least a NAR
hash. I.e. we can't be fully lazy. */
auto inputCopyMode =
lockFlags.copyMode == CopyMode::Lazy
? CopyMode::RequireLockable
: lockFlags.copyMode;
LockFile newLockFile;
std::vector<FlakeRef> parents;
@ -633,11 +649,10 @@ LockedFlake lockFlake(
flakerefs relative to the parent flake. */
auto getInputFlake = [&]()
{
if (auto resolvedPath = resolveRelativePath()) {
if (auto resolvedPath = resolveRelativePath())
return readFlake(state, *input.ref, *input.ref, *input.ref, *resolvedPath, inputAttrPath);
} else {
return getFlake(state, *input.ref, useRegistries, flakeCache, inputAttrPath, lockFlags.forceLazy);
}
else
return getFlake(state, *input.ref, useRegistries, flakeCache, inputAttrPath, inputCopyMode);
};
/* Do we have an entry in the existing lock file?
@ -788,11 +803,7 @@ LockedFlake lockFlake(
state, *input.ref, useRegistries, flakeCache);
return {
lockFlags.forceLazy && lockedRef.input.isLocked()
? SourcePath(accessor)
: state.rootPath(
state.store->toRealPath(
copyInputToStore(state, lockedRef.input, input.ref->input, accessor))),
maybeCopyInputToStore(state, lockedRef.input, input.ref->input, accessor, inputCopyMode),
lockedRef
};
}
@ -904,7 +915,7 @@ LockedFlake lockFlake(
repo, so we should re-read it. FIXME: we could
also just clear the 'rev' field... */
auto prevLockedRef = flake.lockedRef;
flake = getFlake(state, topRef, useRegistries, lockFlags.forceLazy);
flake = getFlake(state, topRef, useRegistries, lockFlags.copyMode);
if (lockFlags.commitLockFile &&
flake.lockedRef.input.getRev() &&

View file

@ -123,11 +123,20 @@ struct Flake
}
};
enum struct CopyMode {
//! Copy the input to the store.
RequireStorePath,
//! Ensure that the input is locked or has a NAR hash.
RequireLockable,
//! Just return a lazy source accessor.
Lazy,
};
Flake getFlake(
EvalState & state,
const FlakeRef & flakeRef,
bool useRegistries,
bool forceLazy = false);
CopyMode copyMode = CopyMode::RequireStorePath);
/**
* Fingerprint of a locked flake; used as a cache key.
@ -229,7 +238,7 @@ struct LockFlags
/**
* If set, do not copy the flake to the Nix store.
*/
bool forceLazy = false;
CopyMode copyMode = CopyMode::RequireStorePath;
};
LockedFlake lockFlake(

View file

@ -214,8 +214,12 @@ StorePath Store::addToStore(
auto sink = sourceToSink([&](Source & source) {
LengthSource lengthSource(source);
storePath = addToStoreFromDump(lengthSource, name, fsm, method, hashAlgo, references, repair);
if (settings.warnLargePathThreshold && lengthSource.total >= settings.warnLargePathThreshold)
warn("copied large path '%s' to the store (%s)", path, renderSize(lengthSource.total));
if (settings.warnLargePathThreshold && lengthSource.total >= settings.warnLargePathThreshold) {
static bool failOnLargePath = getEnv("_NIX_TEST_FAIL_ON_LARGE_PATH").value_or("") == "1";
if (failOnLargePath)
throw Error("won't copy large path '%s' to the store (%d)", path, renderSize(lengthSource.total));
warn("copied large path '%s' to the store (%d)", path, renderSize(lengthSource.total));
}
});
dumpPath(path, *sink, fsm, filter);
sink->finish();

View file

@ -133,7 +133,7 @@ public:
lockFlags.recreateLockFile = updateAll;
lockFlags.writeLockFile = true;
lockFlags.applyNixConfig = true;
lockFlags.forceLazy = true;
lockFlags.copyMode = CopyMode::Lazy;
lockFlake();
}
@ -166,7 +166,7 @@ struct CmdFlakeLock : FlakeCommand
lockFlags.writeLockFile = true;
lockFlags.failOnUnlocked = true;
lockFlags.applyNixConfig = true;
lockFlags.forceLazy = true;
lockFlags.copyMode = CopyMode::Lazy;
lockFlake();
}
@ -213,10 +213,14 @@ struct CmdFlakeMetadata : FlakeCommand, MixJSON
void run(nix::ref<nix::Store> store) override
{
lockFlags.forceLazy = true;
lockFlags.copyMode = CopyMode::Lazy;
auto lockedFlake = lockFlake();
auto & flake = lockedFlake.flake;
std::optional<StorePath> storePath;
if (flake.lockedRef.input.getNarHash())
storePath = flake.lockedRef.input.computeStorePath(*store);
if (json) {
nlohmann::json j;
if (flake.description)
@ -237,6 +241,8 @@ struct CmdFlakeMetadata : FlakeCommand, MixJSON
j["revCount"] = *revCount;
if (auto lastModified = flake.lockedRef.input.getLastModified())
j["lastModified"] = *lastModified;
if (storePath)
j["path"] = store->printStorePath(*storePath);
j["locks"] = lockedFlake.lockFile.toJSON().first;
if (auto fingerprint = lockedFlake.getFingerprint(store, fetchSettings))
j["fingerprint"] = fingerprint->to_string(HashFormat::Base16, false);
@ -253,6 +259,10 @@ struct CmdFlakeMetadata : FlakeCommand, MixJSON
logger->cout(
ANSI_BOLD "Description:" ANSI_NORMAL " %s",
*flake.description);
if (storePath)
logger->cout(
ANSI_BOLD "Path:" ANSI_NORMAL " %s",
store->printStorePath(*storePath));
if (auto rev = flake.lockedRef.input.getRev())
logger->cout(
ANSI_BOLD "Revision:" ANSI_NORMAL " %s",