summaryrefslogtreecommitdiff
path: root/usr/src/lib/libc/inc/thr_inlines.h
diff options
context:
space:
mode:
authorRichard Lowe <richlowe@richlowe.net>2012-04-14 20:29:22 -0400
committerRichard Lowe <richlowe@richlowe.net>2012-06-14 15:56:15 -0400
commit6a3e8e8695d5c7d1d18c6800d676990d7f61a2a4 (patch)
tree0779c36aa010ab283c6d8df98bdb87909bf11d3a /usr/src/lib/libc/inc/thr_inlines.h
parenta3477ee4728af4a4c3c6869e248aa735d52cbefb (diff)
downloadillumos-gate-6a3e8e8695d5c7d1d18c6800d676990d7f61a2a4.tar.gz
1450 Illumos should be buildable with GCC4
Reviewed by: Joshua M. Clulow <josh@sysmgr.org> Reviewed by: Keith Wesolowski <keith.wesolowski@joyent.com> Reviewed by: Gordon Ross <gwr@nexenta.com> Reviewed by: Albert Lee <trisk@nexenta.com> Approved by: Gordon Ross <gwr@nexenta.com>
Diffstat (limited to 'usr/src/lib/libc/inc/thr_inlines.h')
-rw-r--r--usr/src/lib/libc/inc/thr_inlines.h237
1 files changed, 123 insertions, 114 deletions
diff --git a/usr/src/lib/libc/inc/thr_inlines.h b/usr/src/lib/libc/inc/thr_inlines.h
index 55bd645428..f7cdc6a6bd 100644
--- a/usr/src/lib/libc/inc/thr_inlines.h
+++ b/usr/src/lib/libc/inc/thr_inlines.h
@@ -27,13 +27,24 @@
#ifndef _THR_INLINES_H
#define _THR_INLINES_H
-#pragma ident "%Z%%M% %I% %E% SMI"
+#include <sys/ccompile.h>
#if !defined(__lint) && defined(__GNUC__)
/* inlines for gcc */
-extern __inline__ ulwp_t *
+/*
+ * ON-usable GCC 4.x emits register pseudo-ops declaring %g7 as ignored, rather
+ * than scratch, GCC 3 does the reverse. All uses, both ones it generated
+ * (_curthread) and ones it didn't (__curthread) must agree.
+ */
+#if __GNUC__ > 3
+#define SPARC_REG_SPEC "#ignore"
+#else
+#define SPARC_REG_SPEC "#scratch"
+#endif
+
+extern __GNU_INLINE ulwp_t *
_curthread(void)
{
#if defined(__amd64)
@@ -50,31 +61,29 @@ _curthread(void)
return (__value);
}
-extern __inline__ ulwp_t *
+extern __GNU_INLINE ulwp_t *
__curthread(void)
{
ulwp_t *__value;
__asm__ __volatile__(
#if defined(__amd64)
- "movq %%fs:0, %0\n\t"
+ "movq %%fs:0, %0\n\t"
#elif defined(__i386)
- "movl %%gs:0, %0\n\t"
+ "movl %%gs:0, %0\n\t"
#elif defined(__sparcv9)
- ".register %%g7, #scratch\n\t"
- "ldx [%%g7 + 80], %0\n\t"
+ ".register %%g7, " SPARC_REG_SPEC "\n\t"
+ "ldx [%%g7 + 80], %0\n\t"
#elif defined(__sparc)
- ".register %%g7, #scratch\n\t"
- "ld [%%g7 + 80], %0\n\t"
+ ".register %%g7, " SPARC_REG_SPEC "\n\t"
+ "ld [%%g7 + 80], %0\n\t"
#else
#error "port me"
#endif
- "1:"
- : "=r" (__value)
- : : "cc");
+ : "=r" (__value));
return (__value);
}
-extern __inline__ greg_t
+extern __GNU_INLINE greg_t
stkptr(void)
{
#if defined(__amd64)
@@ -89,207 +98,207 @@ stkptr(void)
return (__value);
}
-extern __inline__ hrtime_t
+extern __GNU_INLINE hrtime_t
gethrtime(void) /* note: caller-saved registers are trashed */
{
#if defined(__amd64)
hrtime_t __value;
__asm__ __volatile__(
- "movl $3, %%eax\n\t"
- "int $0xd2"
- : "=a" (__value)
- : : "rcx", "rdx", "rsi", "rdi", "r8", "r9", "r10", "r11", "cc");
+ "movl $3, %%eax\n\t"
+ "int $0xd2"
+ : "=a" (__value)
+ : : "rcx", "rdx", "rsi", "rdi", "r8", "r9", "r10", "r11", "cc");
#elif defined(__i386)
hrtime_t __value;
__asm__ __volatile__(
- "movl $3, %%eax\n\t"
- "int $0xd2"
- : "=A" (__value)
- : : "ecx", "cc");
+ "movl $3, %%eax\n\t"
+ "int $0xd2"
+ : "=A" (__value)
+ : : "ecx", "cc");
#elif defined(__sparcv9)
register hrtime_t __value __asm__("o0");
__asm__ __volatile__(
- "ta 0x24\n\t"
- "sllx %%o0, 32, %0\n\t"
- "or %%o1, %0, %0"
- : "=r" (__value)
- : : "o1", "o2", "o3", "o4", "o5", "cc");
+ "ta 0x24\n\t"
+ "sllx %%o0, 32, %0\n\t"
+ "or %%o1, %0, %0"
+ : "=r" (__value)
+ : : "o1", "o2", "o3", "o4", "o5", "cc");
#elif defined(__sparc)
register hrtime_t __value __asm__("o0");
__asm__ __volatile__(
- "ta 0x24"
- : "=r" (__value)
- : : "o2", "o3", "o4", "o5", "cc");
+ "ta 0x24"
+ : "=r" (__value)
+ : : "o2", "o3", "o4", "o5", "cc");
#else
#error "port me"
#endif
return (__value);
}
-extern __inline__ int
+extern __GNU_INLINE int
set_lock_byte(volatile uint8_t *__lockp)
{
int __value;
#if defined(__x86)
__asm__ __volatile__(
- "movl $1, %0\n\t"
- "xchgb %%dl, %1"
- : "+d" (__value), "+m" (*__lockp));
+ "movl $1, %0\n\t"
+ "xchgb %%dl, %1"
+ : "+d" (__value), "+m" (*__lockp));
#elif defined(__sparc)
__asm__ __volatile__(
- "ldstub %1, %0\n\t"
- "membar #LoadLoad"
- : "=r" (__value), "+m" (*__lockp));
+ "ldstub %1, %0\n\t"
+ "membar #LoadLoad"
+ : "=r" (__value), "+m" (*__lockp));
#else
#error "port me"
#endif
return (__value);
}
-extern __inline__ uint32_t
+extern __GNU_INLINE uint32_t
atomic_swap_32(volatile uint32_t *__memory, uint32_t __value)
{
#if defined(__x86)
__asm__ __volatile__(
- "xchgl %0, %1"
- : "+q" (__value), "+m" (*__memory));
+ "xchgl %0, %1"
+ : "+q" (__value), "+m" (*__memory));
return (__value);
#elif defined(__sparc)
uint32_t __tmp1, __tmp2;
__asm__ __volatile__(
- "ld [%3], %0\n\t"
- "1:\n\t"
- "mov %4, %1\n\t"
- "cas [%3], %0, %1\n\t"
- "cmp %0, %1\n\t"
- "bne,a,pn %%icc, 1b\n\t"
- " mov %1, %0"
- : "=&r" (__tmp1), "=&r" (__tmp2), "=m" (*__memory)
- : "r" (__memory), "r" (__value)
- : "cc");
+ "ld [%3], %0\n\t"
+ "1:\n\t"
+ "mov %4, %1\n\t"
+ "cas [%3], %0, %1\n\t"
+ "cmp %0, %1\n\t"
+ "bne,a,pn %%icc, 1b\n\t"
+ " mov %1, %0"
+ : "=&r" (__tmp1), "=&r" (__tmp2), "=m" (*__memory)
+ : "r" (__memory), "r" (__value)
+ : "cc");
return (__tmp2);
#else
#error "port me"
#endif
}
-extern __inline__ uint32_t
+extern __GNU_INLINE uint32_t
atomic_cas_32(volatile uint32_t *__memory, uint32_t __cmp, uint32_t __newvalue)
{
uint32_t __oldvalue;
#if defined(__x86)
__asm__ __volatile__(
- "lock; cmpxchgl %3, %0"
- : "=m" (*__memory), "=a" (__oldvalue)
- : "a" (__cmp), "r" (__newvalue));
+ "lock; cmpxchgl %3, %0"
+ : "=m" (*__memory), "=a" (__oldvalue)
+ : "a" (__cmp), "r" (__newvalue));
#elif defined(__sparc)
__asm__ __volatile__(
- "cas [%2], %3, %1"
- : "=m" (*__memory), "=&r" (__oldvalue)
- : "r" (__memory), "r" (__cmp), "1" (__newvalue));
+ "cas [%2], %3, %1"
+ : "=m" (*__memory), "=&r" (__oldvalue)
+ : "r" (__memory), "r" (__cmp), "1" (__newvalue));
#else
#error "port me"
#endif
return (__oldvalue);
}
-extern __inline__ void
+extern __GNU_INLINE void
atomic_inc_32(volatile uint32_t *__memory)
{
#if defined(__x86)
__asm__ __volatile__(
- "lock; incl %0"
- : "+m" (*__memory));
+ "lock; incl %0"
+ : "+m" (*__memory));
#elif defined(__sparc)
uint32_t __tmp1, __tmp2;
__asm__ __volatile__(
- "ld [%3], %0\n\t"
- "1:\n\t"
- "add %0, 1, %1\n\t"
- "cas [%3], %0, %1\n\t"
- "cmp %0, %1\n\t"
- "bne,a,pn %%icc, 1b\n\t"
- " mov %1, %0"
- : "=&r" (__tmp1), "=&r" (__tmp2), "=m" (*__memory)
- : "r" (__memory)
- : "cc");
+ "ld [%3], %0\n\t"
+ "1:\n\t"
+ "add %0, 1, %1\n\t"
+ "cas [%3], %0, %1\n\t"
+ "cmp %0, %1\n\t"
+ "bne,a,pn %%icc, 1b\n\t"
+ " mov %1, %0"
+ : "=&r" (__tmp1), "=&r" (__tmp2), "=m" (*__memory)
+ : "r" (__memory)
+ : "cc");
#else
#error "port me"
#endif
}
-extern __inline__ void
+extern __GNU_INLINE void
atomic_dec_32(volatile uint32_t *__memory)
{
#if defined(__x86)
__asm__ __volatile__(
- "lock; decl %0"
- : "+m" (*__memory));
+ "lock; decl %0"
+ : "+m" (*__memory));
#elif defined(__sparc)
uint32_t __tmp1, __tmp2;
__asm__ __volatile__(
- "ld [%3], %0\n\t"
- "1:\n\t"
- "sub %0, 1, %1\n\t"
- "cas [%3], %0, %1\n\t"
- "cmp %0, %1\n\t"
- "bne,a,pn %%icc, 1b\n\t"
- " mov %1, %0"
- : "=&r" (__tmp1), "=&r" (__tmp2), "=m" (*__memory)
- : "r" (__memory)
- : "cc");
+ "ld [%3], %0\n\t"
+ "1:\n\t"
+ "sub %0, 1, %1\n\t"
+ "cas [%3], %0, %1\n\t"
+ "cmp %0, %1\n\t"
+ "bne,a,pn %%icc, 1b\n\t"
+ " mov %1, %0"
+ : "=&r" (__tmp1), "=&r" (__tmp2), "=m" (*__memory)
+ : "r" (__memory)
+ : "cc");
#else
#error "port me"
#endif
}
-extern __inline__ void
+extern __GNU_INLINE void
atomic_and_32(volatile uint32_t *__memory, uint32_t __bits)
{
#if defined(__x86)
__asm__ __volatile__(
- "lock; andl %1, %0"
- : "+m" (*__memory)
- : "r" (__bits));
+ "lock; andl %1, %0"
+ : "+m" (*__memory)
+ : "r" (__bits));
#elif defined(__sparc)
uint32_t __tmp1, __tmp2;
__asm__ __volatile__(
- "ld [%3], %0\n\t"
- "1:\n\t"
- "and %0, %4, %1\n\t"
- "cas [%3], %0, %1\n\t"
- "cmp %0, %1\n\t"
- "bne,a,pn %%icc, 1b\n\t"
- " mov %1, %0"
- : "=&r" (__tmp1), "=&r" (__tmp2), "=m" (*__memory)
- : "r" (__memory), "r" (__bits)
- : "cc");
+ "ld [%3], %0\n\t"
+ "1:\n\t"
+ "and %0, %4, %1\n\t"
+ "cas [%3], %0, %1\n\t"
+ "cmp %0, %1\n\t"
+ "bne,a,pn %%icc, 1b\n\t"
+ " mov %1, %0"
+ : "=&r" (__tmp1), "=&r" (__tmp2), "=m" (*__memory)
+ : "r" (__memory), "r" (__bits)
+ : "cc");
#else
#error "port me"
#endif
}
-extern __inline__ void
+extern __GNU_INLINE void
atomic_or_32(volatile uint32_t *__memory, uint32_t __bits)
{
#if defined(__x86)
__asm__ __volatile__(
- "lock; orl %1, %0"
- : "+m" (*__memory)
- : "r" (__bits));
+ "lock; orl %1, %0"
+ : "+m" (*__memory)
+ : "r" (__bits));
#elif defined(__sparc)
uint32_t __tmp1, __tmp2;
__asm__ __volatile__(
- "ld [%3], %0\n\t"
- "1:\n\t"
- "or %0, %4, %1\n\t"
- "cas [%3], %0, %1\n\t"
- "cmp %0, %1\n\t"
- "bne,a,pn %%icc, 1b\n\t"
- " mov %1, %0"
- : "=&r" (__tmp1), "=&r" (__tmp2), "=m" (*__memory)
- : "r" (__memory), "r" (__bits)
- : "cc");
+ "ld [%3], %0\n\t"
+ "1:\n\t"
+ "or %0, %4, %1\n\t"
+ "cas [%3], %0, %1\n\t"
+ "cmp %0, %1\n\t"
+ "bne,a,pn %%icc, 1b\n\t"
+ " mov %1, %0"
+ : "=&r" (__tmp1), "=&r" (__tmp2), "=m" (*__memory)
+ : "r" (__memory), "r" (__bits)
+ : "cc");
#else
#error "port me"
#endif
@@ -297,14 +306,14 @@ atomic_or_32(volatile uint32_t *__memory, uint32_t __bits)
#if defined(__sparc) /* only needed on sparc */
-extern __inline__ ulong_t
+extern __GNU_INLINE ulong_t
caller(void)
{
register ulong_t __value __asm__("i7");
return (__value);
}
-extern __inline__ ulong_t
+extern __GNU_INLINE ulong_t
getfp(void)
{
register ulong_t __value __asm__("fp");
@@ -315,7 +324,7 @@ getfp(void)
#if defined(__x86) /* only needed on x86 */
-extern __inline__ void
+extern __GNU_INLINE void
ht_pause(void)
{
__asm__ __volatile__("rep; nop");