summaryrefslogtreecommitdiff
path: root/src/libstrongswan/threading
diff options
context:
space:
mode:
Diffstat (limited to 'src/libstrongswan/threading')
-rw-r--r--src/libstrongswan/threading/mutex.c21
-rw-r--r--src/libstrongswan/threading/thread.h28
2 files changed, 40 insertions, 9 deletions
diff --git a/src/libstrongswan/threading/mutex.c b/src/libstrongswan/threading/mutex.c
index f86e781c5..10cf04542 100644
--- a/src/libstrongswan/threading/mutex.c
+++ b/src/libstrongswan/threading/mutex.c
@@ -23,6 +23,7 @@
#include <library.h>
#include <utils/debug.h>
+#include "thread.h"
#include "condvar.h"
#include "mutex.h"
#include "lock_profiler.h"
@@ -70,7 +71,7 @@ struct private_r_mutex_t {
/**
* thread which currently owns mutex
*/
- pthread_t thread;
+ thread_t *thread;
/**
* times the current thread locked the mutex
@@ -125,16 +126,16 @@ METHOD(mutex_t, unlock, void,
METHOD(mutex_t, lock_r, void,
private_r_mutex_t *this)
{
- pthread_t self = pthread_self();
+ thread_t *self = thread_current();
- if (pthread_equal(this->thread, self))
+ if (cas_ptr(&this->thread, self, self))
{
this->times++;
}
else
{
lock(&this->generic);
- this->thread = self;
+ cas_ptr(&this->thread, NULL, self);
this->times = 1;
}
}
@@ -144,7 +145,7 @@ METHOD(mutex_t, unlock_r, void,
{
if (--this->times == 0)
{
- memset(&this->thread, 0, sizeof(this->thread));
+ cas_ptr(&this->thread, thread_current(), NULL);
unlock(&this->generic);
}
}
@@ -220,14 +221,15 @@ METHOD(condvar_t, wait_, void,
if (mutex->recursive)
{
private_r_mutex_t* recursive = (private_r_mutex_t*)mutex;
+ thread_t *self = thread_current();
u_int times;
/* keep track of the number of times this thread locked the mutex */
times = recursive->times;
/* mutex owner gets cleared during condvar wait */
- memset(&recursive->thread, 0, sizeof(recursive->thread));
+ cas_ptr(&recursive->thread, self, NULL);
pthread_cond_wait(&this->condvar, &mutex->mutex);
- recursive->thread = pthread_self();
+ cas_ptr(&recursive->thread, NULL, self);
recursive->times = times;
}
else
@@ -253,13 +255,14 @@ METHOD(condvar_t, timed_wait_abs, bool,
if (mutex->recursive)
{
private_r_mutex_t* recursive = (private_r_mutex_t*)mutex;
+ thread_t *self = thread_current();
u_int times;
times = recursive->times;
- memset(&recursive->thread, 0, sizeof(recursive->thread));
+ cas_ptr(&recursive->thread, self, NULL);
timed_out = pthread_cond_timedwait(&this->condvar, &mutex->mutex,
&ts) == ETIMEDOUT;
- recursive->thread = pthread_self();
+ cas_ptr(&recursive->thread, NULL, self);
recursive->times = times;
}
else
diff --git a/src/libstrongswan/threading/thread.h b/src/libstrongswan/threading/thread.h
index 8d3c30e9b..6abb83411 100644
--- a/src/libstrongswan/threading/thread.h
+++ b/src/libstrongswan/threading/thread.h
@@ -189,4 +189,32 @@ void threads_init();
*/
void threads_deinit();
+
+#ifdef __APPLE__
+
+/*
+ * While select() is a cancellation point, it seems that OS X does not honor
+ * pending cancellation points when entering the function. We manually test for
+ * and honor pending cancellation requests, but this obviously can't prevent
+ * some race conditions where the the cancellation happens after the check,
+ * but before the select.
+ */
+static inline int precancellable_select(int nfds, fd_set *restrict readfds,
+ fd_set *restrict writefds, fd_set *restrict errorfds,
+ struct timeval *restrict timeout)
+{
+ if (thread_cancelability(TRUE))
+ {
+ thread_cancellation_point();
+ }
+ else
+ {
+ thread_cancelability(FALSE);
+ }
+ return select(nfds, readfds, writefds, errorfds, timeout);
+}
+#define select precancellable_select
+
+#endif /* __APPLE__ */
+
#endif /** THREADING_THREAD_H_ @} */