1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
|
/* Copyright (C) 2008 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by David Bartley <dtbartle@csclub.uwaterloo.ca>, 2008.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, write to the Free
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
02111-1307 USA. */
#include <inline-syscall.h>
#include <pthreadP.h>
#include <synch.h>
#include <errno.h>
DECLARE_INLINE_SYSCALL (int, lwp_cond_wait, cond_t *cv, mutex_t *mp,
struct timespec *tsp, int check_park);
struct _condvar_cleanup_buffer
{
int oldtype;
cond_t *cond;
mutex_t *mutex;
int recursive;
uint8_t old_mutex_rcount;
};
void
__attribute__ ((visibility ("hidden")))
__condvar_cleanup (void *arg)
{
struct _condvar_cleanup_buffer *cbuffer =
(struct _condvar_cleanup_buffer *) arg;
mutex_t *mutex = cbuffer->mutex;
/* Note: we don't whether the mutex was unlocked by the kernel or not. */
int errval = 0;
while (1)
{
if (MUTEX_IS_OWNER (mutex))
{
/* The lock is held by us (we didn't get signaled). */
break;
}
else
{
/* The mutex was unlocked so we claim it, */
errval = mutex_lock (mutex);
break;
}
}
/* Restore the mutex_rcount. */
if (errval == 0)
mutex->mutex_rcount = cbuffer->old_mutex_rcount;
/* The condition variable is no longer using the mutex. */
atomic_decrement (&mutex->mutex_cond_waiters);
}
int
__cond_reltimedwait_internal (cond, mutex, reltime, cancel)
cond_t *cond;
mutex_t *mutex;
struct timespec *reltime;
int cancel;
{
struct _pthread_cleanup_buffer buffer;
struct _condvar_cleanup_buffer cbuffer = {
.recursive = 0
};
/* Reject invalid timeouts. */
if (PAST_TIMESPEC (reltime))
return ETIME;
else if (INVALID_TIMESPEC (reltime))
return EINVAL;
if ((mutex->mutex_type & LOCK_ERRORCHECK) && MUTEX_NOT_OWNER (mutex))
{
/* Error checking: lock not held by this thread. */
return EPERM;
}
else if ((mutex->mutex_type & LOCK_RECURSIVE) && MUTEX_IS_OWNER (mutex) &&
mutex->mutex_rcount > 0)
{
/* Recursively held lock. XXX: Using recursive mutexes with condition
variables is undefined; we do what sun's libc does, namely fully
release the lock. */
cbuffer.recursive = 1;
cbuffer.old_mutex_rcount = mutex->mutex_rcount;
mutex->mutex_rcount = 0;
}
/* Mark the mutex as still in use. */
atomic_increment (&mutex->mutex_cond_waiters);
/* Prepare structure passed to cancellation handler. */
cbuffer.cond = cond;
cbuffer.mutex = mutex;
if (cancel)
{
/* Before we block we enable cancellation. Therefore we have to
install a cancellation handler. */
__pthread_cleanup_push (&buffer, __condvar_cleanup, &cbuffer);
/* Enable asynchronous cancellation. Required by the standard. */
cbuffer.oldtype = __pthread_enable_asynccancel ();
}
int errval = INLINE_SYSCALL (lwp_cond_wait, 4, cond, mutex, reltime, 1);
if (cancel)
{
/* Disable asynchronous cancellation. */
__pthread_disable_asynccancel (cbuffer.oldtype);
/* The cancellation handling is back to normal, remove the handler. */
__pthread_cleanup_pop (&buffer, 0);
}
/* Re-acquire the lock. The docs say we must always re-acquire so we don't
use __mutex_timedlock. Note that even if the above wait fails the kernel
always unlocks the mutex. */
int errval2 = mutex_lock (mutex);
if (errval2 == EOWNERDEAD)
errval = errval2;
else if (errval == 0)
errval = errval2;
/* Restore the mutex_rcount. */
if ((errval2 == 0 || errval2 == EOWNERDEAD) && cbuffer.recursive)
mutex->mutex_rcount = cbuffer.old_mutex_rcount;
/* The condition variable is no longer using the mutex. */
atomic_decrement (&mutex->mutex_cond_waiters);
return errval;
}
int cond_reltimedwait (cond, mutex, reltime)
cond_t *cond;
mutex_t *mutex;
struct timespec *reltime;
{
return __cond_reltimedwait_internal (cond, mutex, reltime, 1);
}
weak_alias (cond_reltimedwait, _lwp_cond_reltimedwait)
|