mirror of
https://github.com/NixOS/nix.git
synced 2025-11-21 01:39:36 +01:00
Merge branch 'lazy-flake-commands' into lazy-trees-v2
This commit is contained in:
commit
c891554999
10 changed files with 93 additions and 31 deletions
|
|
@ -2461,7 +2461,7 @@ StorePath EvalState::coerceToStorePath(const PosIdx pos, Value & v, NixStringCon
|
|||
auto path = coerceToString(pos, v, context, errorCtx, false, false, true).toOwned();
|
||||
if (auto storePath = store->maybeParseStorePath(path))
|
||||
return *storePath;
|
||||
error<EvalError>("path '%1%' is not in the Nix store", path).withTrace(pos, errorCtx).debugThrow();
|
||||
error<EvalError>("cannot coerce '%s' to a store path because it does not denote a subpath of the Nix store", path).withTrace(pos, errorCtx).debugThrow();
|
||||
}
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -332,6 +332,13 @@ struct GitArchiveInputScheme : InputScheme
|
|||
false,
|
||||
"«" + input.to_string() + "»");
|
||||
|
||||
if (!input.settings->trustTarballsFromGitForges)
|
||||
// FIXME: computing the NAR hash here is wasteful if
|
||||
// copyInputToStore() is just going to hash/copy it as
|
||||
// well.
|
||||
input.attrs.insert_or_assign("narHash",
|
||||
accessor->hashPath(CanonPath::root).to_string(HashFormat::SRI, true));
|
||||
|
||||
return {accessor, input};
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -104,6 +104,19 @@ static StorePath copyInputToStore(
|
|||
return storePath;
|
||||
}
|
||||
|
||||
static SourcePath maybeCopyInputToStore(
|
||||
EvalState & state,
|
||||
fetchers::Input & input,
|
||||
const fetchers::Input & originalInput,
|
||||
ref<SourceAccessor> accessor,
|
||||
CopyMode copyMode)
|
||||
{
|
||||
return copyMode == CopyMode::Lazy || (copyMode == CopyMode::RequireLockable && (input.isLocked() || input.getNarHash()))
|
||||
? SourcePath(accessor)
|
||||
: state.storePath(
|
||||
copyInputToStore(state, input, originalInput, accessor));
|
||||
}
|
||||
|
||||
static void forceTrivialValue(EvalState & state, Value & value, const PosIdx pos)
|
||||
{
|
||||
if (value.isThunk() && value.isTrivial())
|
||||
|
|
@ -401,7 +414,8 @@ static Flake getFlake(
|
|||
const FlakeRef & originalRef,
|
||||
bool useRegistries,
|
||||
FlakeCache & flakeCache,
|
||||
const InputAttrPath & lockRootAttrPath)
|
||||
const InputAttrPath & lockRootAttrPath,
|
||||
CopyMode copyMode)
|
||||
{
|
||||
// Fetch a lazy tree first.
|
||||
auto [accessor, resolvedRef, lockedRef] = fetchOrSubstituteTree(
|
||||
|
|
@ -423,17 +437,17 @@ static Flake getFlake(
|
|||
lockedRef = lockedRef2;
|
||||
}
|
||||
|
||||
// Copy the tree to the store.
|
||||
auto storePath = copyInputToStore(state, lockedRef.input, originalRef.input, accessor);
|
||||
|
||||
// Re-parse flake.nix from the store.
|
||||
return readFlake(state, originalRef, resolvedRef, lockedRef, state.storePath(storePath), lockRootAttrPath);
|
||||
return readFlake(
|
||||
state, originalRef, resolvedRef, lockedRef,
|
||||
maybeCopyInputToStore(state, lockedRef.input, originalRef.input, accessor, copyMode),
|
||||
lockRootAttrPath);
|
||||
}
|
||||
|
||||
Flake getFlake(EvalState & state, const FlakeRef & originalRef, bool useRegistries)
|
||||
Flake getFlake(EvalState & state, const FlakeRef & originalRef, bool useRegistries, CopyMode copyMode)
|
||||
{
|
||||
FlakeCache flakeCache;
|
||||
return getFlake(state, originalRef, useRegistries, flakeCache, {});
|
||||
return getFlake(state, originalRef, useRegistries, flakeCache, {}, copyMode);
|
||||
}
|
||||
|
||||
static LockFile readLockFile(
|
||||
|
|
@ -457,7 +471,7 @@ LockedFlake lockFlake(
|
|||
|
||||
auto useRegistries = lockFlags.useRegistries.value_or(settings.useRegistries);
|
||||
|
||||
auto flake = getFlake(state, topRef, useRegistries, flakeCache, {});
|
||||
auto flake = getFlake(state, topRef, useRegistries, flakeCache, {}, lockFlags.copyMode);
|
||||
|
||||
if (lockFlags.applyNixConfig) {
|
||||
flake.config.apply(settings);
|
||||
|
|
@ -502,6 +516,13 @@ LockedFlake lockFlake(
|
|||
explicitCliOverrides.insert(i.first);
|
||||
}
|
||||
|
||||
/* For locking of inputs, we require at least a NAR
|
||||
hash. I.e. we can't be fully lazy. */
|
||||
auto inputCopyMode =
|
||||
lockFlags.copyMode == CopyMode::Lazy
|
||||
? CopyMode::RequireLockable
|
||||
: lockFlags.copyMode;
|
||||
|
||||
LockFile newLockFile;
|
||||
|
||||
std::vector<FlakeRef> parents;
|
||||
|
|
@ -632,7 +653,7 @@ LockedFlake lockFlake(
|
|||
if (auto resolvedPath = resolveRelativePath()) {
|
||||
return readFlake(state, ref, ref, ref, *resolvedPath, inputAttrPath);
|
||||
} else {
|
||||
return getFlake(state, ref, useRegistries, flakeCache, inputAttrPath);
|
||||
return getFlake(state, ref, useRegistries, flakeCache, inputAttrPath, inputCopyMode);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
@ -783,10 +804,10 @@ LockedFlake lockFlake(
|
|||
auto [accessor, resolvedRef, lockedRef] = fetchOrSubstituteTree(
|
||||
state, *input.ref, useRegistries, flakeCache);
|
||||
|
||||
// FIXME: allow input to be lazy.
|
||||
auto storePath = copyInputToStore(state, lockedRef.input, input.ref->input, accessor);
|
||||
|
||||
return {state.storePath(storePath), lockedRef};
|
||||
return {
|
||||
maybeCopyInputToStore(state, lockedRef.input, input.ref->input, accessor, inputCopyMode),
|
||||
lockedRef
|
||||
};
|
||||
}
|
||||
}();
|
||||
|
||||
|
|
@ -896,7 +917,7 @@ LockedFlake lockFlake(
|
|||
repo, so we should re-read it. FIXME: we could
|
||||
also just clear the 'rev' field... */
|
||||
auto prevLockedRef = flake.lockedRef;
|
||||
flake = getFlake(state, topRef, useRegistries);
|
||||
flake = getFlake(state, topRef, useRegistries, lockFlags.copyMode);
|
||||
|
||||
if (lockFlags.commitLockFile &&
|
||||
flake.lockedRef.input.getRev() &&
|
||||
|
|
|
|||
|
|
@ -115,7 +115,20 @@ struct Flake
|
|||
}
|
||||
};
|
||||
|
||||
Flake getFlake(EvalState & state, const FlakeRef & flakeRef, bool useRegistries);
|
||||
enum struct CopyMode {
|
||||
//! Copy the input to the store.
|
||||
RequireStorePath,
|
||||
//! Ensure that the input is locked or has a NAR hash.
|
||||
RequireLockable,
|
||||
//! Just return a lazy source accessor.
|
||||
Lazy,
|
||||
};
|
||||
|
||||
Flake getFlake(
|
||||
EvalState & state,
|
||||
const FlakeRef & flakeRef,
|
||||
bool useRegistries,
|
||||
CopyMode copyMode = CopyMode::RequireStorePath);
|
||||
|
||||
/**
|
||||
* Fingerprint of a locked flake; used as a cache key.
|
||||
|
|
@ -213,6 +226,11 @@ struct LockFlags
|
|||
* for those inputs will be ignored.
|
||||
*/
|
||||
std::set<InputAttrPath> inputUpdates;
|
||||
|
||||
/**
|
||||
* If set, do not copy the flake to the Nix store.
|
||||
*/
|
||||
CopyMode copyMode = CopyMode::RequireStorePath;
|
||||
};
|
||||
|
||||
LockedFlake lockFlake(
|
||||
|
|
|
|||
|
|
@ -214,8 +214,12 @@ StorePath Store::addToStore(
|
|||
auto sink = sourceToSink([&](Source & source) {
|
||||
LengthSource lengthSource(source);
|
||||
storePath = addToStoreFromDump(lengthSource, name, fsm, method, hashAlgo, references, repair);
|
||||
if (settings.warnLargePathThreshold && lengthSource.total >= settings.warnLargePathThreshold)
|
||||
warn("copied large path '%s' to the store (%s)", path, renderSize(lengthSource.total));
|
||||
if (settings.warnLargePathThreshold && lengthSource.total >= settings.warnLargePathThreshold) {
|
||||
static bool failOnLargePath = getEnv("_NIX_TEST_FAIL_ON_LARGE_PATH").value_or("") == "1";
|
||||
if (failOnLargePath)
|
||||
throw Error("won't copy large path '%s' to the store (%d)", path, renderSize(lengthSource.total));
|
||||
warn("copied large path '%s' to the store (%d)", path, renderSize(lengthSource.total));
|
||||
}
|
||||
});
|
||||
dumpPath(path, *sink, fsm, filter);
|
||||
sink->finish();
|
||||
|
|
|
|||
|
|
@ -134,6 +134,7 @@ public:
|
|||
lockFlags.recreateLockFile = updateAll;
|
||||
lockFlags.writeLockFile = true;
|
||||
lockFlags.applyNixConfig = true;
|
||||
lockFlags.copyMode = CopyMode::Lazy;
|
||||
|
||||
lockFlake();
|
||||
}
|
||||
|
|
@ -166,6 +167,7 @@ struct CmdFlakeLock : FlakeCommand
|
|||
lockFlags.writeLockFile = true;
|
||||
lockFlags.failOnUnlocked = true;
|
||||
lockFlags.applyNixConfig = true;
|
||||
lockFlags.copyMode = CopyMode::Lazy;
|
||||
|
||||
lockFlake();
|
||||
}
|
||||
|
|
@ -212,11 +214,13 @@ struct CmdFlakeMetadata : FlakeCommand, MixJSON
|
|||
|
||||
void run(nix::ref<nix::Store> store) override
|
||||
{
|
||||
lockFlags.copyMode = CopyMode::Lazy;
|
||||
auto lockedFlake = lockFlake();
|
||||
auto & flake = lockedFlake.flake;
|
||||
|
||||
// Currently, all flakes are in the Nix store via the rootFS accessor.
|
||||
auto storePath = store->printStorePath(store->toStorePath(flake.path.path.abs()).first);
|
||||
std::optional<StorePath> storePath;
|
||||
if (flake.lockedRef.input.getNarHash())
|
||||
storePath = flake.lockedRef.input.computeStorePath(*store);
|
||||
|
||||
if (json) {
|
||||
nlohmann::json j;
|
||||
|
|
@ -238,7 +242,8 @@ struct CmdFlakeMetadata : FlakeCommand, MixJSON
|
|||
j["revCount"] = *revCount;
|
||||
if (auto lastModified = flake.lockedRef.input.getLastModified())
|
||||
j["lastModified"] = *lastModified;
|
||||
j["path"] = storePath;
|
||||
if (storePath)
|
||||
j["path"] = store->printStorePath(*storePath);
|
||||
j["locks"] = lockedFlake.lockFile.toJSON().first;
|
||||
if (auto fingerprint = lockedFlake.getFingerprint(store, fetchSettings))
|
||||
j["fingerprint"] = fingerprint->to_string(HashFormat::Base16, false);
|
||||
|
|
@ -255,9 +260,10 @@ struct CmdFlakeMetadata : FlakeCommand, MixJSON
|
|||
logger->cout(
|
||||
ANSI_BOLD "Description:" ANSI_NORMAL " %s",
|
||||
*flake.description);
|
||||
logger->cout(
|
||||
ANSI_BOLD "Path:" ANSI_NORMAL " %s",
|
||||
storePath);
|
||||
if (storePath)
|
||||
logger->cout(
|
||||
ANSI_BOLD "Path:" ANSI_NORMAL " %s",
|
||||
store->printStorePath(*storePath));
|
||||
if (auto rev = flake.lockedRef.input.getRev())
|
||||
logger->cout(
|
||||
ANSI_BOLD "Revision:" ANSI_NORMAL " %s",
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue