summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorjoerg <joerg@pkgsrc.org>2020-05-19 13:52:10 +0000
committerjoerg <joerg@pkgsrc.org>2020-05-19 13:52:10 +0000
commit2dabb0568bdfe45c573c22462d4622af84e12a7a (patch)
tree7545af283930ab3335c5055281eae8ac94aa8682
parent2870660d4cfa4c2dbd0c9a2530afb5919c9fb259 (diff)
downloadpkgsrc-2dabb0568bdfe45c573c22462d4622af84e12a7a.tar.gz
Fix a number of long-standing race conditions in cmake's autogen
handling. Bump revision.
-rw-r--r--devel/cmake/Makefile4
-rw-r--r--devel/cmake/distinfo3
-rw-r--r--devel/cmake/patches/patch-Source_cmWorkerPool.cxx138
3 files changed, 142 insertions, 3 deletions
diff --git a/devel/cmake/Makefile b/devel/cmake/Makefile
index 8a6e2cc1e6c..62cb02d3d04 100644
--- a/devel/cmake/Makefile
+++ b/devel/cmake/Makefile
@@ -1,6 +1,6 @@
-# $NetBSD: Makefile,v 1.174 2020/05/06 14:04:25 adam Exp $
+# $NetBSD: Makefile,v 1.175 2020/05/19 13:52:10 joerg Exp $
-PKGREVISION= 1
+PKGREVISION= 2
.include "Makefile.common"
COMMENT= Cross platform make
diff --git a/devel/cmake/distinfo b/devel/cmake/distinfo
index 0ba834bef8a..542ede4e8e0 100644
--- a/devel/cmake/distinfo
+++ b/devel/cmake/distinfo
@@ -1,4 +1,4 @@
-$NetBSD: distinfo,v 1.157 2020/04/29 06:47:39 adam Exp $
+$NetBSD: distinfo,v 1.158 2020/05/19 13:52:10 joerg Exp $
SHA1 (cmake-3.17.2.tar.gz) = 4b3ea5249e0d544c5d09a08b0e81f66a4963e5d7
RMD160 (cmake-3.17.2.tar.gz) = a6d7f219aa85afff7bd6ca0000c0336fdfb17904
@@ -19,6 +19,7 @@ SHA1 (patch-Source_Checks_cm__cxx17__check.cpp) = d5e2708df6fcda078b1b5ea59264c6
SHA1 (patch-Source_CursesDialog_ccmake.cxx) = 7f6ca6fda5d0db615f04c18efa8ecdd6ef00cb93
SHA1 (patch-Source_QtDialog_CMakeLists.txt) = c4007da363c5b7c925f1ff345901057f3fbdc4e1
SHA1 (patch-Source_cmArchiveWrite.cxx) = 1b6a46252bd10618703116ef69e22f8ec5c5f31f
+SHA1 (patch-Source_cmWorkerPool.cxx) = 8eb3faf10767fe9228ed6822c17c96c04f017777
SHA1 (patch-Utilities_KWIML_CMakeLists.txt) = e4bdf9fc58757e87bf7e3e3e195839eededbc796
SHA1 (patch-Utilities_std_cm_string__view) = 90bbb578c5628b661a25974d7dd9aa6f5063271f
SHA1 (patch-bootstrap) = fc1b689bbe705cd888e2bef4debad1a26e5885bd
diff --git a/devel/cmake/patches/patch-Source_cmWorkerPool.cxx b/devel/cmake/patches/patch-Source_cmWorkerPool.cxx
new file mode 100644
index 00000000000..567b9979662
--- /dev/null
+++ b/devel/cmake/patches/patch-Source_cmWorkerPool.cxx
@@ -0,0 +1,138 @@
+$NetBSD: patch-Source_cmWorkerPool.cxx,v 1.1 2020/05/19 13:52:10 joerg Exp $
+
+Redo locking and state machine for fence handling and the worker pool.
+
+(1) All CV use must hold the corresponding mutex, otherwise race
+conditions happen. This is mandated by the C++ standard.
+
+(2) Introduce a separate CV for the thread waiting for other jobs to
+finish before running a fence. This avoids waking up all other workers
+blindly. Correctly wake that thread up when the processing of outstanding
+jobs is done.
+
+(3) Split the waiting for a fence to become runnable from a fence is
+pending. This avoids problems if more than one fence can end up on the
+queue. The thread that took a fence off the queue is responsible for
+clearing the fence processing flag.
+
+--- Source/cmWorkerPool.cxx.orig 2020-04-28 14:23:06.000000000 +0000
++++ Source/cmWorkerPool.cxx
+@@ -469,11 +469,9 @@ void cmWorkerPoolWorker::UVProcessStart(
+
+ void cmWorkerPoolWorker::UVProcessFinished()
+ {
+- {
+- std::lock_guard<std::mutex> lock(Proc_.Mutex);
+- if (Proc_.ROP && (Proc_.ROP->IsFinished() || !Proc_.ROP->IsStarted())) {
+- Proc_.ROP.reset();
+- }
++ std::lock_guard<std::mutex> lock(Proc_.Mutex);
++ if (Proc_.ROP && (Proc_.ROP->IsFinished() || !Proc_.ROP->IsStarted())) {
++ Proc_.ROP.reset();
+ }
+ // Notify idling thread
+ Proc_.Condition.notify_one();
+@@ -532,6 +530,7 @@ public:
+ unsigned int JobsProcessing = 0;
+ std::deque<cmWorkerPool::JobHandleT> Queue;
+ std::condition_variable Condition;
++ std::condition_variable ConditionFence;
+ std::vector<std::unique_ptr<cmWorkerPoolWorker>> Workers;
+
+ // -- References
+@@ -593,19 +592,12 @@ bool cmWorkerPoolInternal::Process()
+
+ void cmWorkerPoolInternal::Abort()
+ {
+- bool notifyThreads = false;
+ // Clear all jobs and set abort flag
+- {
+- std::lock_guard<std::mutex> guard(Mutex);
+- if (Processing && !Aborting) {
+- // Register abort and clear queue
+- Aborting = true;
+- Queue.clear();
+- notifyThreads = true;
+- }
+- }
+- if (notifyThreads) {
+- // Wake threads
++ std::lock_guard<std::mutex> guard(Mutex);
++ if (!Aborting) {
++ // Register abort and clear queue
++ Aborting = true;
++ Queue.clear();
+ Condition.notify_all();
+ }
+ }
+@@ -669,7 +661,7 @@ void cmWorkerPoolInternal::Work(unsigned
+ if (Aborting) {
+ break;
+ }
+- // Wait for new jobs
++ // Wait for new jobs on the main CV
+ if (Queue.empty()) {
+ ++WorkersIdle;
+ Condition.wait(uLock);
+@@ -677,20 +669,33 @@ void cmWorkerPoolInternal::Work(unsigned
+ continue;
+ }
+
+- // Check for fence jobs
+- if (FenceProcessing || Queue.front()->IsFence()) {
+- if (JobsProcessing != 0) {
+- Condition.wait(uLock);
+- continue;
+- }
+- // No jobs get processed. Set the fence job processing flag.
+- FenceProcessing = true;
++ // If there is a fence currently active or waiting,
++ // sleep on the main CV and try again.
++ if (FenceProcessing) {
++ Condition.wait(uLock);
++ continue;
+ }
+
+ // Pop next job from queue
+ jobHandle = std::move(Queue.front());
+ Queue.pop_front();
+
++ // Check for fence jobs
++ bool raisedFence = false;
++ if (jobHandle->IsFence()) {
++ FenceProcessing = true;
++ raisedFence = true;
++ // Wait on the Fence CV until all pending jobs are done.
++ while (JobsProcessing != 0 && !Aborting)
++ ConditionFence.wait(uLock);
++ // When aborting, explicitly kick all threads alive once more.
++ if (Aborting) {
++ FenceProcessing = false;
++ Condition.notify_all();
++ break;
++ }
++ }
++
+ // Unlocked scope for job processing
+ ++JobsProcessing;
+ {
+@@ -701,11 +706,17 @@ void cmWorkerPoolInternal::Work(unsigned
+ }
+ --JobsProcessing;
+
+- // Was this a fence job?
+- if (FenceProcessing) {
++ // If this was the thread that entered fence processing
++ // originally, notify all idling workers that the fence
++ // is done.
++ if (raisedFence) {
+ FenceProcessing = false;
+ Condition.notify_all();
+ }
++ // If fence processing is still not done, notify the
++ // the fencing worker when all active jobs are done.
++ if (FenceProcessing && JobsProcessing == 0)
++ ConditionFence.notify_all();
+ }
+
+ // Decrement running workers count