1
1
Fork 0
mirror of https://github.com/NixOS/nix.git synced 2025-11-15 15:02:42 +01:00
nix/src/libstore/references.cc
Sergei Zimmerman 2e3ebfb829
libutil: Move references.{hh,cc} to libstore
The implicit dependency on refLength (which is the StorePath::HashLen)
is not good. Also the companion tests and benchmarks are already in libstore-tests.
2025-08-08 10:30:09 +03:00

126 lines
3.3 KiB
C++

#include "nix/store/references.hh"
#include "nix/store/path.hh"
#include "nix/util/hash.hh"
#include "nix/util/base-nix-32.hh"
#include <map>
#include <cstdlib>
#include <mutex>
#include <algorithm>
namespace nix {
static constexpr auto refLength = StorePath::HashLen;
static void search(std::string_view s, StringSet & hashes, StringSet & seen)
{
for (size_t i = 0; i + refLength <= s.size();) {
int j;
bool match = true;
for (j = refLength - 1; j >= 0; --j)
if (!BaseNix32::lookupReverse(s[i + j])) {
i += j + 1;
match = false;
break;
}
if (!match)
continue;
std::string ref(s.substr(i, refLength));
if (hashes.erase(ref)) {
debug("found reference to '%1%' at offset '%2%'", ref, i);
seen.insert(ref);
}
++i;
}
}
void RefScanSink::operator()(std::string_view data)
{
/* It's possible that a reference spans the previous and current
fragment, so search in the concatenation of the tail of the
previous fragment and the start of the current fragment. */
auto s = tail;
auto tailLen = std::min(data.size(), refLength);
s.append(data.data(), tailLen);
search(s, hashes, seen);
search(data, hashes, seen);
auto rest = refLength - tailLen;
if (rest < tail.size())
tail = tail.substr(tail.size() - rest);
tail.append(data.data() + data.size() - tailLen, tailLen);
}
RewritingSink::RewritingSink(const std::string & from, const std::string & to, Sink & nextSink)
: RewritingSink({{from, to}}, nextSink)
{
}
RewritingSink::RewritingSink(const StringMap & rewrites, Sink & nextSink)
: rewrites(rewrites)
, nextSink(nextSink)
{
std::string::size_type maxRewriteSize = 0;
for (auto & [from, to] : rewrites) {
assert(from.size() == to.size());
maxRewriteSize = std::max(maxRewriteSize, from.size());
}
this->maxRewriteSize = maxRewriteSize;
}
void RewritingSink::operator()(std::string_view data)
{
std::string s(prev);
s.append(data);
s = rewriteStrings(s, rewrites);
prev = s.size() < maxRewriteSize ? s
: maxRewriteSize == 0 ? ""
: std::string(s, s.size() - maxRewriteSize + 1, maxRewriteSize - 1);
auto consumed = s.size() - prev.size();
pos += consumed;
if (consumed)
nextSink(s.substr(0, consumed));
}
void RewritingSink::flush()
{
if (prev.empty())
return;
pos += prev.size();
nextSink(prev);
prev.clear();
}
HashModuloSink::HashModuloSink(HashAlgorithm ha, const std::string & modulus)
: hashSink(ha)
, rewritingSink(modulus, std::string(modulus.size(), 0), hashSink)
{
}
void HashModuloSink::operator()(std::string_view data)
{
rewritingSink(data);
}
HashResult HashModuloSink::finish()
{
rewritingSink.flush();
/* Hash the positions of the self-references. This ensures that a
NAR with self-references and a NAR with some of the
self-references already zeroed out do not produce a hash
collision. FIXME: proof. */
for (auto & pos : rewritingSink.matches)
hashSink(fmt("|%d", pos));
auto h = hashSink.finish();
return {.hash = h.hash, .numBytesDigested = rewritingSink.pos};
}
} // namespace nix