summaryrefslogtreecommitdiff
path: root/src/libstrongswan/threading/rwlock.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/libstrongswan/threading/rwlock.c')
-rw-r--r--src/libstrongswan/threading/rwlock.c230
1 files changed, 202 insertions, 28 deletions
diff --git a/src/libstrongswan/threading/rwlock.c b/src/libstrongswan/threading/rwlock.c
index 15dc0b334..176445705 100644
--- a/src/libstrongswan/threading/rwlock.c
+++ b/src/libstrongswan/threading/rwlock.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008-2009 Tobias Brunner
+ * Copyright (C) 2008-2012 Tobias Brunner
* Copyright (C) 2008 Martin Willi
* Hochschule fuer Technik Rapperswil
*
@@ -18,14 +18,17 @@
#include <pthread.h>
#include <library.h>
-#include <debug.h>
+#include <utils/debug.h>
#include "rwlock.h"
+#include "rwlock_condvar.h"
+#include "thread.h"
#include "condvar.h"
#include "mutex.h"
#include "lock_profiler.h"
typedef struct private_rwlock_t private_rwlock_t;
+typedef struct private_rwlock_condvar_t private_rwlock_condvar_t;
/**
* private data of rwlock
@@ -72,9 +75,9 @@ struct private_rwlock_t {
u_int reader_count;
/**
- * current writer thread, if any
+ * TRUE, if a writer is holding the lock currently
*/
- pthread_t writer;
+ bool writer;
#endif /* HAVE_PTHREAD_RWLOCK_INIT */
@@ -84,6 +87,27 @@ struct private_rwlock_t {
lock_profile_t profile;
};
+/**
+ * private data of condvar
+ */
+struct private_rwlock_condvar_t {
+
+ /**
+ * public interface
+ */
+ rwlock_condvar_t public;
+
+ /**
+ * mutex used to implement rwlock condvar
+ */
+ mutex_t *mutex;
+
+ /**
+ * regular condvar to implement rwlock condvar
+ */
+ condvar_t *condvar;
+};
+
#ifdef HAVE_PTHREAD_RWLOCK_INIT
@@ -175,37 +199,81 @@ rwlock_t *rwlock_create(rwlock_type_t type)
/**
* This implementation of the rwlock_t interface uses mutex_t and condvar_t
- * primitives, if the pthread_rwlock_* group of functions is not available.
+ * primitives, if the pthread_rwlock_* group of functions is not available or
+ * don't allow recursive locking for readers.
*
* The following constraints are enforced:
* - Multiple readers can hold the lock at the same time.
* - Only a single writer can hold the lock at any given time.
* - A writer must block until all readers have released the lock before
* obtaining the lock exclusively.
- * - Readers that arrive while a writer is waiting to acquire the lock will
- * block until after the writer has obtained and released the lock.
+ * - Readers that don't hold any read lock and arrive while a writer is
+ * waiting to acquire the lock will block until after the writer has
+ * obtained and released the lock.
* These constraints allow for read sharing, prevent write sharing, prevent
- * read-write sharing and prevent starvation of writers by a steady stream
- * of incoming readers. Reader starvation is not prevented (this could happen
- * if there are more writers than readers).
+ * read-write sharing and (largely) prevent starvation of writers by a steady
+ * stream of incoming readers. Reader starvation is not prevented (this could
+ * happen if there are more writers than readers).
*
- * The implementation does not support recursive locking and readers must not
- * acquire the lock exclusively at the same time and vice-versa (this is not
- * checked or enforced so behave yourself to prevent deadlocks).
+ * The implementation supports recursive locking of the read lock but not of
+ * the write lock. Readers must not acquire the lock exclusively at the same
+ * time and vice-versa (this is not checked or enforced so behave yourself to
+ * prevent deadlocks).
+ *
+ * Since writers are preferred a thread currently holding the read lock that
+ * tries to acquire the read lock recursively while a writer is waiting would
+ * result in a deadlock. In order to avoid having to use a thread-specific
+ * value for each rwlock_t (or a list of threads) to keep track if a thread
+ * already acquired the read lock we use a single thread-specific value for all
+ * rwlock_t objects that keeps track of how many read locks a thread currently
+ * holds. Preferring readers that already hold ANY read locks prevents this
+ * deadlock while it still largely avoids writer starvation (for locks that can
+ * only be acquired while holding another read lock this will obviously not
+ * work).
+ */
+
+/**
+ * Keep track of how many read locks a thread holds.
+ */
+static pthread_key_t is_reader;
+
+/**
+ * Only initialize the read lock counter once.
*/
+static pthread_once_t is_reader_initialized = PTHREAD_ONCE_INIT;
+
+/**
+ * Initialize the read lock counter.
+ */
+static void initialize_is_reader()
+{
+ pthread_key_create(&is_reader, NULL);
+}
METHOD(rwlock_t, read_lock, void,
private_rwlock_t *this)
{
+ uintptr_t reading;
+
+ reading = (uintptr_t)pthread_getspecific(is_reader);
profiler_start(&this->profile);
this->mutex->lock(this->mutex);
- while (this->writer || this->waiting_writers)
+ if (!this->writer && reading > 0)
{
- this->readers->wait(this->readers, this->mutex);
+ /* directly allow threads that hold ANY read locks, to avoid a deadlock
+ * caused by preferring writers in the loop below */
+ }
+ else
+ {
+ while (this->writer || this->waiting_writers)
+ {
+ this->readers->wait(this->readers, this->mutex);
+ }
}
this->reader_count++;
profiler_end(&this->profile);
this->mutex->unlock(this->mutex);
+ pthread_setspecific(is_reader, (void*)(reading + 1));
}
METHOD(rwlock_t, write_lock, void,
@@ -219,7 +287,7 @@ METHOD(rwlock_t, write_lock, void,
this->writers->wait(this->writers, this->mutex);
}
this->waiting_writers--;
- this->writer = pthread_self();
+ this->writer = TRUE;
profiler_end(&this->profile);
this->mutex->unlock(this->mutex);
}
@@ -231,8 +299,7 @@ METHOD(rwlock_t, try_write_lock, bool,
this->mutex->lock(this->mutex);
if (!this->writer && !this->reader_count)
{
- res = TRUE;
- this->writer = pthread_self();
+ res = this->writer = TRUE;
}
this->mutex->unlock(this->mutex);
return res;
@@ -242,9 +309,20 @@ METHOD(rwlock_t, unlock, void,
private_rwlock_t *this)
{
this->mutex->lock(this->mutex);
- if (this->writer == pthread_self())
+ if (this->writer)
+ {
+ this->writer = FALSE;
+ }
+ else
+ {
+ uintptr_t reading;
+
+ this->reader_count--;
+ reading = (uintptr_t)pthread_getspecific(is_reader);
+ pthread_setspecific(is_reader, (void*)(reading - 1));
+ }
+ if (!this->reader_count)
{
- this->writer = 0;
if (this->waiting_writers)
{
this->writers->signal(this->writers);
@@ -254,14 +332,6 @@ METHOD(rwlock_t, unlock, void,
this->readers->broadcast(this->readers);
}
}
- else
- {
- this->reader_count--;
- if (!this->reader_count)
- {
- this->writers->signal(this->writers);
- }
- }
this->mutex->unlock(this->mutex);
}
@@ -280,6 +350,8 @@ METHOD(rwlock_t, destroy, void,
*/
rwlock_t *rwlock_create(rwlock_type_t type)
{
+ pthread_once(&is_reader_initialized, initialize_is_reader);
+
switch (type)
{
case RWLOCK_TYPE_DEFAULT:
@@ -309,3 +381,105 @@ rwlock_t *rwlock_create(rwlock_type_t type)
#endif /* HAVE_PTHREAD_RWLOCK_INIT */
+
+METHOD(rwlock_condvar_t, wait_, void,
+ private_rwlock_condvar_t *this, rwlock_t *lock)
+{
+ /* at this point we have the write lock locked, to make signals more
+ * predictable we try to prevent other threads from signaling by acquiring
+ * the mutex while we still hold the write lock (this assumes they will
+ * hold the write lock themselves when signaling, which is not mandatory) */
+ this->mutex->lock(this->mutex);
+ /* unlock the rwlock and wait for a signal */
+ lock->unlock(lock);
+ /* if the calling thread enabled thread cancelability we want to replicate
+ * the behavior of the regular condvar, i.e. the lock will be held again
+ * before executing cleanup functions registered by the calling thread */
+ thread_cleanup_push((thread_cleanup_t)lock->write_lock, lock);
+ thread_cleanup_push((thread_cleanup_t)this->mutex->unlock, this->mutex);
+ this->condvar->wait(this->condvar, this->mutex);
+ /* we release the mutex to allow other threads into the condvar (might even
+ * be required so we can acquire the lock again below) */
+ thread_cleanup_pop(TRUE);
+ /* finally we reacquire the lock we held previously */
+ thread_cleanup_pop(TRUE);
+}
+
+METHOD(rwlock_condvar_t, timed_wait_abs, bool,
+ private_rwlock_condvar_t *this, rwlock_t *lock, timeval_t time)
+{
+ bool timed_out;
+
+ /* see wait() above for details on what is going on here */
+ this->mutex->lock(this->mutex);
+ lock->unlock(lock);
+ thread_cleanup_push((thread_cleanup_t)lock->write_lock, lock);
+ thread_cleanup_push((thread_cleanup_t)this->mutex->unlock, this->mutex);
+ timed_out = this->condvar->timed_wait_abs(this->condvar, this->mutex, time);
+ thread_cleanup_pop(TRUE);
+ thread_cleanup_pop(!timed_out);
+ return timed_out;
+}
+
+METHOD(rwlock_condvar_t, timed_wait, bool,
+ private_rwlock_condvar_t *this, rwlock_t *lock, u_int timeout)
+{
+ timeval_t tv;
+ u_int s, ms;
+
+ time_monotonic(&tv);
+
+ s = timeout / 1000;
+ ms = timeout % 1000;
+
+ tv.tv_sec += s;
+ timeval_add_ms(&tv, ms);
+
+ return timed_wait_abs(this, lock, tv);
+}
+
+METHOD(rwlock_condvar_t, signal_, void,
+ private_rwlock_condvar_t *this)
+{
+ this->mutex->lock(this->mutex);
+ this->condvar->signal(this->condvar);
+ this->mutex->unlock(this->mutex);
+}
+
+METHOD(rwlock_condvar_t, broadcast, void,
+ private_rwlock_condvar_t *this)
+{
+ this->mutex->lock(this->mutex);
+ this->condvar->broadcast(this->condvar);
+ this->mutex->unlock(this->mutex);
+}
+
+METHOD(rwlock_condvar_t, condvar_destroy, void,
+ private_rwlock_condvar_t *this)
+{
+ this->condvar->destroy(this->condvar);
+ this->mutex->destroy(this->mutex);
+ free(this);
+}
+
+/*
+ * see header file
+ */
+rwlock_condvar_t *rwlock_condvar_create()
+{
+ private_rwlock_condvar_t *this;
+
+ INIT(this,
+ .public = {
+ .wait = _wait_,
+ .timed_wait = _timed_wait,
+ .timed_wait_abs = _timed_wait_abs,
+ .signal = _signal_,
+ .broadcast = _broadcast,
+ .destroy = _condvar_destroy,
+ },
+ .mutex = mutex_create(MUTEX_TYPE_DEFAULT),
+ .condvar = condvar_create(CONDVAR_TYPE_DEFAULT),
+ );
+ return &this->public;
+}