summaryrefslogtreecommitdiff
path: root/nptl/sysdeps/unix/sysv/solaris2/kopensolaris-gnu/cond_reltimedwait.c
diff options
context:
space:
mode:
Diffstat (limited to 'nptl/sysdeps/unix/sysv/solaris2/kopensolaris-gnu/cond_reltimedwait.c')
-rw-r--r--nptl/sysdeps/unix/sysv/solaris2/kopensolaris-gnu/cond_reltimedwait.c162
1 files changed, 162 insertions, 0 deletions
diff --git a/nptl/sysdeps/unix/sysv/solaris2/kopensolaris-gnu/cond_reltimedwait.c b/nptl/sysdeps/unix/sysv/solaris2/kopensolaris-gnu/cond_reltimedwait.c
new file mode 100644
index 0000000000..61af00a1ea
--- /dev/null
+++ b/nptl/sysdeps/unix/sysv/solaris2/kopensolaris-gnu/cond_reltimedwait.c
@@ -0,0 +1,162 @@
+/* Copyright (C) 2008 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by David Bartley <dtbartle@csclub.uwaterloo.ca>, 2008.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <inline-syscall.h>
+#include <pthreadP.h>
+#include <synch.h>
+#include <errno.h>
+
+DECLARE_INLINE_SYSCALL (int, lwp_cond_wait, cond_t *cv, mutex_t *mp,
+ struct timespec *tsp, int check_park);
+
+struct _condvar_cleanup_buffer
+{
+ int oldtype;
+ cond_t *cond;
+ mutex_t *mutex;
+ int recursive;
+ uint8_t old_mutex_rcount;
+};
+
+void
+__attribute__ ((visibility ("hidden")))
+__condvar_cleanup (void *arg)
+{
+ struct _condvar_cleanup_buffer *cbuffer =
+ (struct _condvar_cleanup_buffer *) arg;
+ mutex_t *mutex = cbuffer->mutex;
+
+ /* Note: we don't whether the mutex was unlocked by the kernel or not. */
+
+ int errval = 0;
+ while (1)
+ {
+ if (MUTEX_IS_OWNER (mutex))
+ {
+ /* The lock is held by us (we didn't get signaled). */
+ break;
+ }
+ else
+ {
+ /* The mutex was unlocked so we claim it, */
+ errval = mutex_lock (mutex);
+ break;
+ }
+ }
+
+ /* Restore the mutex_rcount. */
+ if (errval == 0)
+ mutex->mutex_rcount = cbuffer->old_mutex_rcount;
+
+ /* The condition variable is no longer using the mutex. */
+ atomic_decrement (&mutex->mutex_cond_waiters);
+}
+
+
+int
+__cond_reltimedwait_internal (cond, mutex, reltime, cancel)
+ cond_t *cond;
+ mutex_t *mutex;
+ struct timespec *reltime;
+ int cancel;
+{
+ struct _pthread_cleanup_buffer buffer;
+ struct _condvar_cleanup_buffer cbuffer = {
+ .recursive = 0
+ };
+
+ /* Reject invalid timeouts. */
+ if (PAST_TIMESPEC (reltime))
+ return ETIME;
+ else if (INVALID_TIMESPEC (reltime))
+ return EINVAL;
+
+ if ((mutex->mutex_type & LOCK_ERRORCHECK) && MUTEX_NOT_OWNER (mutex))
+ {
+ /* Error checking: lock not held by this thread. */
+ return EPERM;
+ }
+ else if ((mutex->mutex_type & LOCK_RECURSIVE) && MUTEX_IS_OWNER (mutex) &&
+ mutex->mutex_rcount > 0)
+ {
+ /* Recursively held lock. XXX: Using recursive mutexes with condition
+ variables is undefined; we do what sun's libc does, namely fully
+ release the lock. */
+ cbuffer.recursive = 1;
+ cbuffer.old_mutex_rcount = mutex->mutex_rcount;
+ mutex->mutex_rcount = 0;
+ }
+
+ /* Mark the mutex as still in use. */
+ atomic_increment (&mutex->mutex_cond_waiters);
+
+ /* Prepare structure passed to cancellation handler. */
+ cbuffer.cond = cond;
+ cbuffer.mutex = mutex;
+
+ if (cancel)
+ {
+ /* Before we block we enable cancellation. Therefore we have to
+ install a cancellation handler. */
+ __pthread_cleanup_push (&buffer, __condvar_cleanup, &cbuffer);
+
+ /* Enable asynchronous cancellation. Required by the standard. */
+ cbuffer.oldtype = __pthread_enable_asynccancel ();
+ }
+
+ int errval = INLINE_SYSCALL (lwp_cond_wait, 4, cond, mutex, reltime, 1);
+
+ if (cancel)
+ {
+ /* Disable asynchronous cancellation. */
+ __pthread_disable_asynccancel (cbuffer.oldtype);
+
+ /* The cancellation handling is back to normal, remove the handler. */
+ __pthread_cleanup_pop (&buffer, 0);
+ }
+
+ /* Re-acquire the lock. The docs say we must always re-acquire so we don't
+ use __mutex_timedlock. Note that even if the above wait fails the kernel
+ always unlocks the mutex. */
+ int errval2 = mutex_lock (mutex);
+ if (errval2 == EOWNERDEAD)
+ errval = errval2;
+ else if (errval == 0)
+ errval = errval2;
+
+ /* Restore the mutex_rcount. */
+ if ((errval2 == 0 || errval2 == EOWNERDEAD) && cbuffer.recursive)
+ mutex->mutex_rcount = cbuffer.old_mutex_rcount;
+
+ /* The condition variable is no longer using the mutex. */
+ atomic_decrement (&mutex->mutex_cond_waiters);
+
+ return errval;
+}
+
+
+int cond_reltimedwait (cond, mutex, reltime)
+ cond_t *cond;
+ mutex_t *mutex;
+ struct timespec *reltime;
+{
+ return __cond_reltimedwait_internal (cond, mutex, reltime, 1);
+}
+
+weak_alias (cond_reltimedwait, _lwp_cond_reltimedwait)