summaryrefslogtreecommitdiff
path: root/comms/asterisk18/patches/patch-as
blob: 35a5d78075450900f10fe5b0e6bcb3f14b106e8f (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
$NetBSD: patch-as,v 1.3 2011/01/29 22:50:32 jnemeth Exp $

# Reported upstream as https://issues.asterisk.org/view.php?id=18705

--- include/asterisk/lock.h.orig	2010-07-03 02:36:31.000000000 +0000
+++ include/asterisk/lock.h
@@ -558,7 +558,7 @@ static void  __attribute__((destructor))
 
 #define gethostbyname __gethostbyname__is__not__reentrant__use__ast_gethostbyname__instead__
 
-#ifndef __linux__
+#if !defined(__linux__) && !defined(__DragonFly__)
 #define pthread_create __use_ast_pthread_create_instead__
 #endif
 
@@ -580,6 +580,10 @@ int ast_atomic_fetchadd_int_slow(volatil
 #include "libkern/OSAtomic.h"
 #endif
 
+#if defined(HAVE_SYS_ATOMIC_H)
+#include <sys/atomic.h>
+#endif
+
 /*! \brief Atomically add v to *p and return * the previous value of *p.
  * This can be used to handle reference counts, and the return value
  * can be used to generate unique identifiers.
@@ -599,6 +603,12 @@ AST_INLINE_API(int ast_atomic_fetchadd_i
 AST_INLINE_API(int ast_atomic_fetchadd_int(volatile int *p, int v),
 {
 	return OSAtomicAdd64(v, (int64_t *) p) - v;
+})
+#elif defined(HAVE_SYS_ATOMIC_H)
+AST_INLINE_API(int ast_atomic_fetchadd_int(volatile int *p, int v),
+{
+	return atomic_add_int_nv((unsigned int *)p, v) - v;
+})
 #elif defined (__i386__) || defined(__x86_64__)
 #ifdef sun
 AST_INLINE_API(int ast_atomic_fetchadd_int(volatile int *p, int v),
@@ -645,6 +655,12 @@ AST_INLINE_API(int ast_atomic_dec_and_te
 AST_INLINE_API(int ast_atomic_dec_and_test(volatile int *p),
 {
 	return OSAtomicAdd64( -1, (int64_t *) p) == 0;
+})
+#elif defined(HAVE_SYS_ATOMIC_H)
+AST_INLINE_API(int ast_atomic_dec_and_test(volatile int *p),
+{
+	return atomic_dec_uint_nv((unsigned int *)p) == 0;
+})
 #else
 AST_INLINE_API(int ast_atomic_dec_and_test(volatile int *p),
 {