1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
|
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#ifndef _SYS_MUTEX_IMPL_H
#define _SYS_MUTEX_IMPL_H
#ifndef _ASM
#include <sys/types.h>
#include <sys/machlock.h>
#endif
#ifdef __cplusplus
extern "C" {
#endif
#ifndef _ASM
/*
* mutex_enter() assumes that the mutex is adaptive and tries to grab the
* lock by doing a atomic compare and exchange on the first word of the mutex.
* If the compare and exchange fails, it means that either (1) the lock is a
* spin lock, or (2) the lock is adaptive but already held.
* mutex_vector_enter() distinguishes these cases by looking at the mutex
* type, which is encoded in the low-order bits of the owner field.
*/
typedef union mutex_impl {
/*
* Adaptive mutex.
*/
struct adaptive_mutex {
uintptr_t _m_owner; /* 0-3/0-7 owner and waiters bit */
#ifndef _LP64
uintptr_t _m_filler; /* 4-7 unused */
#endif
} m_adaptive;
/*
* Spin Mutex.
*/
struct spin_mutex {
lock_t m_dummylock; /* 0 dummy lock (always set) */
lock_t m_spinlock; /* 1 real lock */
ushort_t m_filler; /* 2-3 unused */
ushort_t m_oldspl; /* 4-5 old pil value */
ushort_t m_minspl; /* 6-7 min pil val if lock held */
} m_spin;
} mutex_impl_t;
#define m_owner m_adaptive._m_owner
#define MUTEX_ALIGN _LONG_ALIGNMENT
#define MUTEX_ALIGN_WARNINGS 10 /* num of warnings to issue */
#define MUTEX_WAITERS 0x1
#define MUTEX_DEAD 0x6
#define MUTEX_THREAD (-0x8)
#define MUTEX_OWNER(lp) ((kthread_id_t)((lp)->m_owner & MUTEX_THREAD))
#define MUTEX_NO_OWNER ((kthread_id_t)NULL)
#define MUTEX_SET_WAITERS(lp) \
{ \
uintptr_t old; \
while ((old = (lp)->m_owner) != 0 && \
casip(&(lp)->m_owner, old, old | MUTEX_WAITERS) != old) \
continue; \
}
#define MUTEX_HAS_WAITERS(lp) ((lp)->m_owner & MUTEX_WAITERS)
#define MUTEX_CLEAR_LOCK_AND_WAITERS(lp) (lp)->m_owner = 0
#define MUTEX_SET_TYPE(lp, type)
#define MUTEX_TYPE_ADAPTIVE(lp) (((lp)->m_owner & MUTEX_DEAD) == 0)
#define MUTEX_TYPE_SPIN(lp) ((lp)->m_spin.m_dummylock == LOCK_HELD_VALUE)
#define MUTEX_DESTROY(lp) \
(lp)->m_owner = ((uintptr_t)curthread | MUTEX_DEAD)
/* mutex backoff delay macro and constants */
#define MUTEX_BACKOFF_BASE 1
#define MUTEX_BACKOFF_SHIFT 2
#define MUTEX_CAP_FACTOR 64
#define MUTEX_DELAY() { \
mutex_delay(); \
SMT_PAUSE(); \
}
/* low overhead clock read */
#define MUTEX_GETTICK() tsc_read()
extern void null_xcall(void);
#define MUTEX_SYNC() { \
cpuset_t set; \
CPUSET_ALL(set); \
xc_call(0, 0, 0, CPUSET2BV(set), \
(xc_func_t)null_xcall); \
}
extern int mutex_adaptive_tryenter(mutex_impl_t *);
extern void *mutex_owner_running(mutex_impl_t *);
#else /* _ASM */
#define MUTEX_THREAD -0x8
#endif /* _ASM */
#ifdef __cplusplus
}
#endif
#endif /* _SYS_MUTEX_IMPL_H */
|