1
1
Fork 0
mirror of https://github.com/NixOS/nix.git synced 2025-11-22 02:09:36 +01:00

Merge remote-tracking branch 'detsys/detsys-main' into lazy-trees-tmp

This commit is contained in:
Eelco Dolstra 2025-04-23 12:25:28 +02:00
commit a6faa69fc8
109 changed files with 1219 additions and 546 deletions

View file

@ -68,7 +68,6 @@ DerivationOptions DerivationOptions::fromParsedDerivation(const ParsedDerivation
throw Error("attribute '%s' must be a list of strings", name);
res.insert(j->get<std::string>());
}
checks.disallowedRequisites = res;
return res;
}
return {};

View file

@ -1368,7 +1368,7 @@ Derivation Derivation::fromJSON(
for (auto & [outputName, output] : getObject(valueAt(json, "outputs"))) {
res.outputs.insert_or_assign(
outputName,
DerivationOutput::fromJSON(store, res.name, outputName, output));
DerivationOutput::fromJSON(store, res.name, outputName, output, xpSettings));
}
} catch (Error & e) {
e.addTrace({}, "while reading key 'outputs'");

View file

@ -22,10 +22,8 @@
#include <curl/curl.h>
#include <algorithm>
#include <cmath>
#include <cstring>
#include <iostream>
#include <queue>
#include <random>
#include <thread>
@ -95,7 +93,7 @@ struct curlFileTransfer : public FileTransfer
: fileTransfer(fileTransfer)
, request(request)
, act(*logger, lvlTalkative, actFileTransfer,
request.post ? "" : fmt(request.data ? "uploading '%s'" : "downloading '%s'", request.uri),
fmt("%sing '%s'", request.verb(), request.uri),
{request.uri}, request.parentAct)
, callback(std::move(callback))
, finalSink([this](std::string_view data) {
@ -272,19 +270,11 @@ struct curlFileTransfer : public FileTransfer
return getInterrupted();
}
int silentProgressCallback(curl_off_t dltotal, curl_off_t dlnow)
{
return getInterrupted();
}
static int progressCallbackWrapper(void * userp, curl_off_t dltotal, curl_off_t dlnow, curl_off_t ultotal, curl_off_t ulnow)
{
return ((TransferItem *) userp)->progressCallback(dltotal, dlnow);
}
static int silentProgressCallbackWrapper(void * userp, curl_off_t dltotal, curl_off_t dlnow, curl_off_t ultotal, curl_off_t ulnow)
{
return ((TransferItem *) userp)->silentProgressCallback(dltotal, dlnow);
auto & item = *static_cast<TransferItem *>(userp);
auto isUpload = bool(item.request.data);
return item.progressCallback(isUpload ? ultotal : dltotal, isUpload ? ulnow : dlnow);
}
static int debugCallback(CURL * handle, curl_infotype type, char * data, size_t size, void * userptr)
@ -353,10 +343,7 @@ struct curlFileTransfer : public FileTransfer
curl_easy_setopt(req, CURLOPT_HEADERFUNCTION, TransferItem::headerCallbackWrapper);
curl_easy_setopt(req, CURLOPT_HEADERDATA, this);
if (request.post)
curl_easy_setopt(req, CURLOPT_XFERINFOFUNCTION, silentProgressCallbackWrapper);
else
curl_easy_setopt(req, CURLOPT_XFERINFOFUNCTION, progressCallbackWrapper);
curl_easy_setopt(req, CURLOPT_XFERINFOFUNCTION, progressCallbackWrapper);
curl_easy_setopt(req, CURLOPT_XFERINFODATA, this);
curl_easy_setopt(req, CURLOPT_NOPROGRESS, 0);
@ -449,8 +436,7 @@ struct curlFileTransfer : public FileTransfer
if (httpStatus == 304 && result.etag == "")
result.etag = request.expectedETag;
if (!request.post)
act.progress(result.bodySize, result.bodySize);
act.progress(result.bodySize, result.bodySize);
done = true;
callback(std::move(result));
}
@ -539,6 +525,8 @@ struct curlFileTransfer : public FileTransfer
warn("%s; retrying from offset %d in %d ms", exc.what(), writtenToSink, ms);
else
warn("%s; retrying in %d ms", exc.what(), ms);
decompressionSink.reset();
errorSink.reset();
embargo = std::chrono::steady_clock::now() + std::chrono::milliseconds(ms);
fileTransfer.enqueueItem(shared_from_this());
}
@ -791,10 +779,6 @@ struct curlFileTransfer : public FileTransfer
S3Helper s3Helper(profile, region, scheme, endpoint);
Activity act(*logger, lvlTalkative, actFileTransfer,
fmt("downloading '%s'", request.uri),
{request.uri}, request.parentAct);
// FIXME: implement ETag
auto s3Res = s3Helper.getObject(bucketName, key);
FileTransferResult res;

View file

@ -280,21 +280,21 @@ template<> void BaseSetting<SandboxMode>::convertToArg(Args & args, const std::s
.aliases = aliases,
.description = "Enable sandboxing.",
.category = category,
.handler = {[this]() { override(smEnabled); }}
.handler = {[this]() { override(smEnabled); }},
});
args.addFlag({
.longName = "no-" + name,
.aliases = aliases,
.description = "Disable sandboxing.",
.category = category,
.handler = {[this]() { override(smDisabled); }}
.handler = {[this]() { override(smDisabled); }},
});
args.addFlag({
.longName = "relaxed-" + name,
.aliases = aliases,
.description = "Enable sandboxing, but allow builds to disable it.",
.category = category,
.handler = {[this]() { override(smRelaxed); }}
.handler = {[this]() { override(smRelaxed); }},
});
}

View file

@ -77,7 +77,7 @@ struct FileTransferRequest
FileTransferRequest(std::string_view uri)
: uri(uri), parentAct(getCurActivity()) { }
std::string verb()
std::string verb() const
{
return data ? "upload" : "download";
}

View file

@ -1,6 +1,5 @@
include_dirs += include_directories('../..')
headers += files(
'fchmodat2-compat.hh',
'personality.hh',
)

View file

@ -105,28 +105,31 @@ ref<Store> Machine::openStore() const
static std::vector<std::string> expandBuilderLines(const std::string & builders)
{
std::vector<std::string> result;
for (auto line : tokenizeString<std::vector<std::string>>(builders, "\n;")) {
trim(line);
for (auto line : tokenizeString<std::vector<std::string>>(builders, "\n")) {
line.erase(std::find(line.begin(), line.end(), '#'), line.end());
if (line.empty()) continue;
for (auto entry : tokenizeString<std::vector<std::string>>(line, ";")) {
entry = trim(entry);
if (line[0] == '@') {
const std::string path = trim(std::string(line, 1));
std::string text;
try {
text = readFile(path);
} catch (const SysError & e) {
if (e.errNo != ENOENT)
throw;
debug("cannot find machines file '%s'", path);
if (entry.empty()) {
// skip blank entries
} else if (entry[0] == '@') {
const std::string path = trim(std::string_view{entry}.substr(1));
std::string text;
try {
text = readFile(path);
} catch (const SysError & e) {
if (e.errNo != ENOENT)
throw;
debug("cannot find machines file '%s'", path);
continue;
}
const auto entrys = expandBuilderLines(text);
result.insert(end(result), begin(entrys), end(entrys));
} else {
result.emplace_back(entry);
}
const auto lines = expandBuilderLines(text);
result.insert(end(result), begin(lines), end(lines));
continue;
}
result.emplace_back(line);
}
return result;
}

View file

@ -160,7 +160,10 @@ ref<Aws::Client::ClientConfiguration> S3Helper::makeConfig(
S3Helper::FileTransferResult S3Helper::getObject(
const std::string & bucketName, const std::string & key)
{
debug("fetching 's3://%s/%s'...", bucketName, key);
std::string uri = "s3://" + bucketName + "/" + key;
Activity act(*logger, lvlTalkative, actFileTransfer,
fmt("downloading '%s'", uri),
Logger::Fields{uri}, getCurActivity());
auto request =
Aws::S3::Model::GetObjectRequest()
@ -171,6 +174,22 @@ S3Helper::FileTransferResult S3Helper::getObject(
return Aws::New<std::stringstream>("STRINGSTREAM");
});
size_t bytesDone = 0;
size_t bytesExpected = 0;
request.SetDataReceivedEventHandler([&](const Aws::Http::HttpRequest * req, Aws::Http::HttpResponse * resp, long long l) {
if (!bytesExpected && resp->HasHeader("Content-Length")) {
if (auto length = string2Int<size_t>(resp->GetHeader("Content-Length"))) {
bytesExpected = *length;
}
}
bytesDone += l;
act.progress(bytesDone, bytesExpected);
});
request.SetContinueRequestHandler([](const Aws::Http::HttpRequest*) {
return !isInterrupted();
});
FileTransferResult res;
auto now1 = std::chrono::steady_clock::now();
@ -180,6 +199,8 @@ S3Helper::FileTransferResult S3Helper::getObject(
auto result = checkAws(fmt("AWS error fetching '%s'", key),
client->GetObject(request));
act.progress(result.GetContentLength(), result.GetContentLength());
res.data = decompress(result.GetContentEncoding(),
dynamic_cast<std::stringstream &>(result.GetBody()).str());
@ -307,11 +328,35 @@ struct S3BinaryCacheStoreImpl : virtual S3BinaryCacheStoreConfig, public virtual
std::shared_ptr<TransferManager> transferManager;
std::once_flag transferManagerCreated;
struct AsyncContext : public Aws::Client::AsyncCallerContext
{
mutable std::mutex mutex;
mutable std::condition_variable cv;
const Activity & act;
void notify() const
{
cv.notify_one();
}
void wait() const
{
std::unique_lock<std::mutex> lk(mutex);
cv.wait(lk);
}
AsyncContext(const Activity & act) : act(act) {}
};
void uploadFile(const std::string & path,
std::shared_ptr<std::basic_iostream<char>> istream,
const std::string & mimeType,
const std::string & contentEncoding)
{
std::string uri = "s3://" + bucketName + "/" + path;
Activity act(*logger, lvlTalkative, actFileTransfer,
fmt("uploading '%s'", uri),
Logger::Fields{uri}, getCurActivity());
istream->seekg(0, istream->end);
auto size = istream->tellg();
istream->seekg(0, istream->beg);
@ -330,16 +375,25 @@ struct S3BinaryCacheStoreImpl : virtual S3BinaryCacheStoreConfig, public virtual
transferConfig.bufferSize = bufferSize;
transferConfig.uploadProgressCallback =
[](const TransferManager *transferManager,
const std::shared_ptr<const TransferHandle>
&transferHandle)
[](const TransferManager * transferManager,
const std::shared_ptr<const TransferHandle> & transferHandle)
{
//FIXME: find a way to properly abort the multipart upload.
//checkInterrupt();
debug("upload progress ('%s'): '%d' of '%d' bytes",
transferHandle->GetKey(),
transferHandle->GetBytesTransferred(),
transferHandle->GetBytesTotalSize());
auto context = std::dynamic_pointer_cast<const AsyncContext>(transferHandle->GetContext());
size_t bytesDone = transferHandle->GetBytesTransferred();
size_t bytesTotal = transferHandle->GetBytesTotalSize();
try {
checkInterrupt();
context->act.progress(bytesDone, bytesTotal);
} catch (...) {
context->notify();
}
};
transferConfig.transferStatusUpdatedCallback =
[](const TransferManager * transferManager,
const std::shared_ptr<const TransferHandle> & transferHandle)
{
auto context = std::dynamic_pointer_cast<const AsyncContext>(transferHandle->GetContext());
context->notify();
};
transferManager = TransferManager::Create(transferConfig);
@ -353,29 +407,51 @@ struct S3BinaryCacheStoreImpl : virtual S3BinaryCacheStoreConfig, public virtual
if (contentEncoding != "")
throw Error("setting a content encoding is not supported with S3 multi-part uploads");
auto context = std::make_shared<AsyncContext>(act);
std::shared_ptr<TransferHandle> transferHandle =
transferManager->UploadFile(
istream, bucketName, path, mimeType,
Aws::Map<Aws::String, Aws::String>(),
nullptr /*, contentEncoding */);
context /*, contentEncoding */);
transferHandle->WaitUntilFinished();
TransferStatus status = transferHandle->GetStatus();
while (status == TransferStatus::IN_PROGRESS || status == TransferStatus::NOT_STARTED) {
if (!isInterrupted()) {
context->wait();
} else {
transferHandle->Cancel();
transferHandle->WaitUntilFinished();
}
status = transferHandle->GetStatus();
}
act.progress(transferHandle->GetBytesTransferred(), transferHandle->GetBytesTotalSize());
if (transferHandle->GetStatus() == TransferStatus::FAILED)
if (status == TransferStatus::FAILED)
throw Error("AWS error: failed to upload 's3://%s/%s': %s",
bucketName, path, transferHandle->GetLastError().GetMessage());
if (transferHandle->GetStatus() != TransferStatus::COMPLETED)
if (status != TransferStatus::COMPLETED)
throw Error("AWS error: transfer status of 's3://%s/%s' in unexpected state",
bucketName, path);
} else {
act.progress(0, size);
auto request =
Aws::S3::Model::PutObjectRequest()
.WithBucket(bucketName)
.WithKey(path);
size_t bytesSent = 0;
request.SetDataSentEventHandler([&](const Aws::Http::HttpRequest * req, long long l) {
bytesSent += l;
act.progress(bytesSent, size);
});
request.SetContinueRequestHandler([](const Aws::Http::HttpRequest*) {
return !isInterrupted();
});
request.SetContentType(mimeType);
if (contentEncoding != "")
@ -385,6 +461,8 @@ struct S3BinaryCacheStoreImpl : virtual S3BinaryCacheStoreConfig, public virtual
auto result = checkAws(fmt("AWS error uploading '%s'", path),
s3Helper.client->PutObject(request));
act.progress(size, size);
}
auto now2 = std::chrono::steady_clock::now();

View file

@ -42,7 +42,7 @@
/* Includes required for chroot support. */
#ifdef __linux__
# include "nix/store/fchmodat2-compat.hh"
# include "linux/fchmodat2-compat.hh"
# include <sys/ioctl.h>
# include <net/if.h>
# include <netinet/ip.h>