summaryrefslogtreecommitdiff
path: root/usr/src/uts
diff options
context:
space:
mode:
authorJosef 'Jeff' Sipek <josef.sipek@nexenta.com>2014-08-08 10:27:20 -0400
committerRobert Mustacchi <rm@joyent.com>2014-08-12 09:27:14 -0700
commit75d94465dbafa487b716482dc36d5150a4ec9853 (patch)
treeecebaff9a32b7a5e5c3f4bba78393ceb328aab73 /usr/src/uts
parent588575c88d134c8f25f645542bcf36d61ac43a82 (diff)
downloadillumos-joyent-75d94465dbafa487b716482dc36d5150a4ec9853.tar.gz
5042 stop using deprecated atomic functions
Reviewed by: Dan McDonald <danmcd@omniti.com> Approved by: Robert Mustacchi <rm@joyent.com>
Diffstat (limited to 'usr/src/uts')
-rw-r--r--usr/src/uts/common/disp/fss.c2
-rw-r--r--usr/src/uts/common/disp/ts.c2
-rw-r--r--usr/src/uts/common/dtrace/systrace.c8
-rw-r--r--usr/src/uts/common/fs/fem.c6
-rw-r--r--usr/src/uts/common/fs/portfs/port_fop.c6
-rw-r--r--usr/src/uts/common/fs/vfs.c4
-rw-r--r--usr/src/uts/common/fs/vnode.c7
-rw-r--r--usr/src/uts/common/inet/ip/sadb.c6
-rw-r--r--usr/src/uts/common/inet/ipsecah.h4
-rw-r--r--usr/src/uts/common/inet/ipsecesp.h4
-rw-r--r--usr/src/uts/common/inet/keysock.h2
-rw-r--r--usr/src/uts/common/inet/nca/nca.h12
-rw-r--r--usr/src/uts/common/inet/tcp/tcp_input.c7
-rw-r--r--usr/src/uts/common/io/bge/bge_atomic.c16
-rw-r--r--usr/src/uts/common/io/hxge/hxge_send.c2
-rw-r--r--usr/src/uts/common/io/hxge/hxge_txdma.c3
-rw-r--r--usr/src/uts/common/io/ksyms.c3
-rw-r--r--usr/src/uts/common/io/multidata.c4
-rw-r--r--usr/src/uts/common/io/nge/nge_atomic.c8
-rw-r--r--usr/src/uts/common/io/nxge/nxge_send.c4
-rw-r--r--usr/src/uts/common/io/nxge/nxge_txdma.c3
-rw-r--r--usr/src/uts/common/io/rge/rge_chip.c2
-rw-r--r--usr/src/uts/common/io/rge/rge_rxtx.c4
-rw-r--r--usr/src/uts/common/io/smbios.c2
-rw-r--r--usr/src/uts/common/io/stream.c7
-rw-r--r--usr/src/uts/common/io/xge/drv/xge_osdep.h4
-rw-r--r--usr/src/uts/common/os/clock.c2
-rw-r--r--usr/src/uts/common/os/clock_highres.c8
-rw-r--r--usr/src/uts/common/os/cyclic.c11
-rw-r--r--usr/src/uts/common/os/dtrace_subr.c8
-rw-r--r--usr/src/uts/common/os/errorq.c33
-rw-r--r--usr/src/uts/common/os/fm.c2
-rw-r--r--usr/src/uts/common/os/kdi.c4
-rw-r--r--usr/src/uts/common/os/kmem.c2
-rw-r--r--usr/src/uts/common/os/lgrp.c14
-rw-r--r--usr/src/uts/common/os/msacct.c8
-rw-r--r--usr/src/uts/common/os/mutex.c2
-rw-r--r--usr/src/uts/common/os/printf.c2
-rw-r--r--usr/src/uts/common/os/rwlock.c2
-rw-r--r--usr/src/uts/common/sys/bitmap.h4
-rw-r--r--usr/src/uts/common/sys/cpuvar.h4
-rw-r--r--usr/src/uts/common/vm/seg_kp.c2
-rw-r--r--usr/src/uts/common/vm/seg_vn.c13
-rw-r--r--usr/src/uts/i86pc/os/intr.c11
-rw-r--r--usr/src/uts/i86pc/os/memnode.c11
-rw-r--r--usr/src/uts/i86pc/os/x_call.c20
-rw-r--r--usr/src/uts/i86pc/vm/hat_i86.c8
-rw-r--r--usr/src/uts/i86pc/vm/hat_pte.h11
-rw-r--r--usr/src/uts/intel/asm/atomic.h51
-rw-r--r--usr/src/uts/intel/sys/synch32.h4
-rw-r--r--usr/src/uts/sfmmu/vm/hat_sfmmu.c6
-rw-r--r--usr/src/uts/sparc/sys/synch32.h4
-rw-r--r--usr/src/uts/sparc/v9/syscall/install_utrap.c18
-rw-r--r--usr/src/uts/sun4/os/machdep.c5
-rw-r--r--usr/src/uts/sun4/os/memnode.c11
-rw-r--r--usr/src/uts/sun4/os/prom_subr.c6
-rw-r--r--usr/src/uts/sun4/vm/vm_dep.c2
-rw-r--r--usr/src/uts/sun4u/cpu/spitfire.c2
-rw-r--r--usr/src/uts/sun4u/cpu/us3_common.c6
-rw-r--r--usr/src/uts/sun4u/io/zuluvm.c4
-rw-r--r--usr/src/uts/sun4u/os/ppage.c6
-rw-r--r--usr/src/uts/sun4u/vm/zulu_hat.c2
-rw-r--r--usr/src/uts/sun4v/os/ppage.c5
63 files changed, 197 insertions, 249 deletions
diff --git a/usr/src/uts/common/disp/fss.c b/usr/src/uts/common/disp/fss.c
index b7f992c300..9fb8601e37 100644
--- a/usr/src/uts/common/disp/fss.c
+++ b/usr/src/uts/common/disp/fss.c
@@ -1445,7 +1445,7 @@ fss_enterclass(kthread_t *t, id_t cid, void *parmsp, cred_t *reqpcredp,
* (but check with an ordinary load first since most of the time
* this will already be done).
*/
- if (fssexists == 0 && cas32(&fssexists, 0, 1) == 0)
+ if (fssexists == 0 && atomic_cas_32(&fssexists, 0, 1) == 0)
(void) timeout(fss_update, NULL, hz);
return (0);
diff --git a/usr/src/uts/common/disp/ts.c b/usr/src/uts/common/disp/ts.c
index b499244de5..bf65c3c42d 100644
--- a/usr/src/uts/common/disp/ts.c
+++ b/usr/src/uts/common/disp/ts.c
@@ -650,7 +650,7 @@ ts_enterclass(kthread_t *t, id_t cid, void *parmsp,
* faster than a mutex (but check with an ordinary load first
* since most of the time this will already be done).
*/
- if (tspexists == 0 && cas32(&tspexists, 0, 1) == 0)
+ if (tspexists == 0 && atomic_cas_32(&tspexists, 0, 1) == 0)
(void) timeout(ts_update, NULL, hz);
return (0);
diff --git a/usr/src/uts/common/dtrace/systrace.c b/usr/src/uts/common/dtrace/systrace.c
index b864041c45..3d36ce9547 100644
--- a/usr/src/uts/common/dtrace/systrace.c
+++ b/usr/src/uts/common/dtrace/systrace.c
@@ -164,11 +164,11 @@ systrace_enable(void *arg, dtrace_id_t id, void *parg)
return (0);
}
- (void) casptr(&sysent[sysnum].sy_callc,
+ (void) atomic_cas_ptr(&sysent[sysnum].sy_callc,
(void *)systrace_sysent[sysnum].stsy_underlying,
(void *)dtrace_systrace_syscall);
#ifdef _SYSCALL32_IMPL
- (void) casptr(&sysent32[sysnum].sy_callc,
+ (void) atomic_cas_ptr(&sysent32[sysnum].sy_callc,
(void *)systrace_sysent32[sysnum].stsy_underlying,
(void *)dtrace_systrace_syscall32);
#endif
@@ -184,12 +184,12 @@ systrace_disable(void *arg, dtrace_id_t id, void *parg)
systrace_sysent[sysnum].stsy_return == DTRACE_IDNONE);
if (disable) {
- (void) casptr(&sysent[sysnum].sy_callc,
+ (void) atomic_cas_ptr(&sysent[sysnum].sy_callc,
(void *)dtrace_systrace_syscall,
(void *)systrace_sysent[sysnum].stsy_underlying);
#ifdef _SYSCALL32_IMPL
- (void) casptr(&sysent32[sysnum].sy_callc,
+ (void) atomic_cas_ptr(&sysent32[sysnum].sy_callc,
(void *)dtrace_systrace_syscall32,
(void *)systrace_sysent32[sysnum].stsy_underlying);
#endif
diff --git a/usr/src/uts/common/fs/fem.c b/usr/src/uts/common/fs/fem.c
index 95248d1077..69ac6f3068 100644
--- a/usr/src/uts/common/fs/fem.c
+++ b/usr/src/uts/common/fs/fem.c
@@ -2872,7 +2872,7 @@ new_femhead(struct fem_head **hp)
head = kmem_alloc(sizeof (*head), KM_SLEEP);
mutex_init(&head->femh_lock, NULL, MUTEX_DEFAULT, NULL);
head->femh_list = NULL;
- if (casptr(hp, NULL, head) != NULL) {
+ if (atomic_cas_ptr(hp, NULL, head) != NULL) {
kmem_free(head, sizeof (*head));
head = *hp;
}
@@ -3365,7 +3365,7 @@ fem_setvnops(vnode_t *v, vnodeops_t *newops)
}
fem_unlock(v->v_femhead);
}
- } while (casptr(&v->v_op, r, newops) != r);
+ } while (atomic_cas_ptr(&v->v_op, r, newops) != r);
}
vnodeops_t *
@@ -3508,7 +3508,7 @@ fsem_setvfsops(vfs_t *v, vfsops_t *newops)
}
fem_unlock(v->vfs_femhead);
}
- } while (casptr(&v->vfs_op, r, newops) != r);
+ } while (atomic_cas_ptr(&v->vfs_op, r, newops) != r);
}
vfsops_t *
diff --git a/usr/src/uts/common/fs/portfs/port_fop.c b/usr/src/uts/common/fs/portfs/port_fop.c
index 1816956d10..045958059a 100644
--- a/usr/src/uts/common/fs/portfs/port_fop.c
+++ b/usr/src/uts/common/fs/portfs/port_fop.c
@@ -293,7 +293,7 @@ port_fop_femop()
(fem_t **)&femp)) {
return (NULL);
}
- if (casptr(&fop_femop, NULL, femp) != NULL) {
+ if (atomic_cas_ptr(&fop_femop, NULL, femp) != NULL) {
/*
* some other thread beat us to it.
*/
@@ -311,7 +311,7 @@ port_fop_fsemop()
if (fsem_create("portfop_fsem", port_vfssrc_template, &fsemp)) {
return (NULL);
}
- if (casptr(&fop_fsemop, NULL, fsemp) != NULL) {
+ if (atomic_cas_ptr(&fop_fsemop, NULL, fsemp) != NULL) {
/*
* some other thread beat us to it.
*/
@@ -1087,7 +1087,7 @@ port_install_fopdata(vnode_t *vp)
/*
* If v_fopdata is not null, some other thread beat us to it.
*/
- if (casptr(&vp->v_fopdata, NULL, npvp) != NULL) {
+ if (atomic_cas_ptr(&vp->v_fopdata, NULL, npvp) != NULL) {
mutex_destroy(&npvp->pvp_mutex);
list_destroy(&npvp->pvp_pfoplist);
kmem_free(npvp, sizeof (*npvp));
diff --git a/usr/src/uts/common/fs/vfs.c b/usr/src/uts/common/fs/vfs.c
index 83c53d859d..f16f5a6fc1 100644
--- a/usr/src/uts/common/fs/vfs.c
+++ b/usr/src/uts/common/fs/vfs.c
@@ -467,7 +467,7 @@ vfs_setops(vfs_t *vfsp, vfsops_t *vfsops)
op = vfsp->vfs_op;
membar_consumer();
if (vfsp->vfs_femhead == NULL &&
- casptr(&vfsp->vfs_op, op, vfsops) == op) {
+ atomic_cas_ptr(&vfsp->vfs_op, op, vfsops) == op) {
return;
}
fsem_setvfsops(vfsp, vfsops);
@@ -2969,7 +2969,7 @@ vfs_mono_time(timespec_t *ts)
oldhrt = hrt;
if (newhrt <= hrt)
newhrt = hrt + 1;
- if (cas64((uint64_t *)&hrt, oldhrt, newhrt) == oldhrt)
+ if (atomic_cas_64((uint64_t *)&hrt, oldhrt, newhrt) == oldhrt)
break;
}
hrt2ts(newhrt, ts);
diff --git a/usr/src/uts/common/fs/vnode.c b/usr/src/uts/common/fs/vnode.c
index d67d164286..9cd8f0562b 100644
--- a/usr/src/uts/common/fs/vnode.c
+++ b/usr/src/uts/common/fs/vnode.c
@@ -2837,11 +2837,12 @@ vn_setops(vnode_t *vp, vnodeops_t *vnodeops)
op = vp->v_op;
membar_consumer();
/*
- * If vp->v_femhead == NULL, then we'll call casptr() to do the
- * compare-and-swap on vp->v_op. If either fails, then FEM is
+ * If vp->v_femhead == NULL, then we'll call atomic_cas_ptr() to do
+ * the compare-and-swap on vp->v_op. If either fails, then FEM is
* in effect on the vnode and we need to have FEM deal with it.
*/
- if (vp->v_femhead != NULL || casptr(&vp->v_op, op, vnodeops) != op) {
+ if (vp->v_femhead != NULL || atomic_cas_ptr(&vp->v_op, op, vnodeops) !=
+ op) {
fem_setvnops(vp, vnodeops);
}
}
diff --git a/usr/src/uts/common/inet/ip/sadb.c b/usr/src/uts/common/inet/ip/sadb.c
index 628d93d56a..cb24250af2 100644
--- a/usr/src/uts/common/inet/ip/sadb.c
+++ b/usr/src/uts/common/inet/ip/sadb.c
@@ -1731,7 +1731,7 @@ sadb_keysock_hello(queue_t **pfkey_qp, queue_t *q, mblk_t *mp,
* messages.
*/
- oldq = casptr((void **)pfkey_qp, NULL, OTHERQ(q));
+ oldq = atomic_cas_ptr((void **)pfkey_qp, NULL, OTHERQ(q));
if (oldq != NULL) {
ASSERT(oldq != q);
cmn_err(CE_WARN, "Danger! Multiple keysocks on top of %s.\n",
@@ -1746,8 +1746,8 @@ sadb_keysock_hello(queue_t **pfkey_qp, queue_t *q, mblk_t *mp,
kha->ks_hello_satype = (uint8_t)satype;
/*
- * If we made it past the casptr, then we have "exclusive" access
- * to the timeout handle. Fire it off after the default ager
+ * If we made it past the atomic_cas_ptr, then we have "exclusive"
+ * access to the timeout handle. Fire it off after the default ager
* interval.
*/
*top = qtimeout(*pfkey_qp, ager, agerarg,
diff --git a/usr/src/uts/common/inet/ipsecah.h b/usr/src/uts/common/inet/ipsecah.h
index cde745da88..62f96876b8 100644
--- a/usr/src/uts/common/inet/ipsecah.h
+++ b/usr/src/uts/common/inet/ipsecah.h
@@ -105,8 +105,8 @@ struct ipsecah_stack {
/*
* Keysock instance of AH. There can be only one per stack instance.
- * Use casptr() on this because I don't set it until KEYSOCK_HELLO
- * comes down.
+ * Use atomic_cas_ptr() on this because I don't set it until
+ * KEYSOCK_HELLO comes down.
* Paired up with the ah_pfkey_q is the ah_event, which will age SAs.
*/
queue_t *ah_pfkey_q;
diff --git a/usr/src/uts/common/inet/ipsecesp.h b/usr/src/uts/common/inet/ipsecesp.h
index 7be35276aa..96e139add0 100644
--- a/usr/src/uts/common/inet/ipsecesp.h
+++ b/usr/src/uts/common/inet/ipsecesp.h
@@ -61,8 +61,8 @@ struct ipsecesp_stack {
/*
* Keysock instance of ESP. There can be only one per stack instance.
- * Use casptr() on this because I don't set it until KEYSOCK_HELLO
- * comes down.
+ * Use atomic_cas_ptr() on this because I don't set it until
+ * KEYSOCK_HELLO comes down.
* Paired up with the esp_pfkey_q is the esp_event, which will age SAs.
*/
queue_t *esp_pfkey_q;
diff --git a/usr/src/uts/common/inet/keysock.h b/usr/src/uts/common/inet/keysock.h
index cb618cedaf..10ca661d7f 100644
--- a/usr/src/uts/common/inet/keysock.h
+++ b/usr/src/uts/common/inet/keysock.h
@@ -74,7 +74,7 @@ struct keysock_stack {
/*
* State for flush/dump. This would normally be a boolean_t, but
- * cas32() works best for a known 32-bit quantity.
+ * atomic_cas_32() works best for a known 32-bit quantity.
*/
uint32_t keystack_flushdump;
int keystack_flushdump_errno;
diff --git a/usr/src/uts/common/inet/nca/nca.h b/usr/src/uts/common/inet/nca/nca.h
index 1dea4fa392..5508e5a631 100644
--- a/usr/src/uts/common/inet/nca/nca.h
+++ b/usr/src/uts/common/inet/nca/nca.h
@@ -695,7 +695,7 @@ extern struct node_ts *node_tp;
_p = node_tp; \
if ((_np = _p + 1) == &node_tv[NODE_TV_SZ]) \
_np = node_tv; \
- } while (casptr(&node_tp, _p, _np) != _p); \
+ } while (atomic_cas_ptr(&node_tp, _p, _np) != _p); \
_p->node = (p); \
_p->action = (a); \
_p->ref = (p) ? (p)->ref : 0; \
@@ -785,7 +785,7 @@ extern struct door_ts *door_tp;
_p = door_tp; \
if ((_np = _p + 1) == &door_tv[DOOR_TV_SZ]) \
_np = door_tv; \
- } while (casptr(&door_tp, _p, _np) != _p); \
+ } while (atomic_cas_ptr(&door_tp, _p, _np) != _p); \
_p->cp = _cp; \
_p->np = _req_np; \
_p->action = (a); \
@@ -987,7 +987,7 @@ extern kmutex_t nca_dcb_readers;
continue; \
} \
new = old | DCB_COUNT_USELOCK; \
- while (cas32(rp, old, new) != old) { \
+ while (atomic_cas_32(rp, old, new) != old) { \
old = *rp; \
new = old | DCB_COUNT_USELOCK; \
} \
@@ -1010,7 +1010,7 @@ extern kmutex_t nca_dcb_readers;
int old = *rp; \
\
new = old & ~DCB_COUNT_USELOCK; \
- while (cas32(rp, old, new) != old) { \
+ while (atomic_cas_32(rp, old, new) != old) { \
old = *rp; \
new = old & ~DCB_COUNT_USELOCK; \
} \
@@ -1543,7 +1543,7 @@ extern struct conn_ts *conn_tp;
_p = conn_tp; \
if ((_np = _p + 1) == &con_tv[CON_TV_SZ]) \
_np = con_tv; \
- } while (casptr(&conn_tp, _p, _np) != _p); \
+ } while (atomic_cas_ptr(&conn_tp, _p, _np) != _p); \
_p->conn = (p); \
_p->action = (a); \
_p->ref = (p)->ref; \
@@ -1765,7 +1765,7 @@ extern nca_counter_t *nca_counter_tp;
_ntp = _otp + 1; \
if (_ntp == &nca_counter_tv[NCA_COUNTER_TRACE_SZ]) \
_ntp = nca_counter_tv; \
- } while (casptr((void *)&nca_counter_tp, (void *)_otp, \
+ } while (atomic_cas_ptr((void *)&nca_counter_tp, (void *)_otp, \
(void *)_ntp) != (void *)_otp); \
_ntp->t = gethrtime(); \
_ntp->p = p; \
diff --git a/usr/src/uts/common/inet/tcp/tcp_input.c b/usr/src/uts/common/inet/tcp/tcp_input.c
index 5bd2be3a7d..367d78eeb8 100644
--- a/usr/src/uts/common/inet/tcp/tcp_input.c
+++ b/usr/src/uts/common/inet/tcp/tcp_input.c
@@ -1939,7 +1939,8 @@ tcp_input_listener_unbound(void *arg, mblk_t *mp, void *arg2,
}
if (connp->conn_sqp != new_sqp) {
while (connp->conn_sqp != new_sqp)
- (void) casptr(&connp->conn_sqp, sqp, new_sqp);
+ (void) atomic_cas_ptr(&connp->conn_sqp, sqp,
+ new_sqp);
/* No special MT issues for outbound ixa_sqp hint */
connp->conn_ixa->ixa_sqp = new_sqp;
}
@@ -1947,8 +1948,8 @@ tcp_input_listener_unbound(void *arg, mblk_t *mp, void *arg2,
do {
conn_flags = connp->conn_flags;
conn_flags |= IPCL_FULLY_BOUND;
- (void) cas32(&connp->conn_flags, connp->conn_flags,
- conn_flags);
+ (void) atomic_cas_32(&connp->conn_flags,
+ connp->conn_flags, conn_flags);
} while (!(connp->conn_flags & IPCL_FULLY_BOUND));
mutex_exit(&connp->conn_fanout->connf_lock);
diff --git a/usr/src/uts/common/io/bge/bge_atomic.c b/usr/src/uts/common/io/bge/bge_atomic.c
index 23bdbb0941..269e66983d 100644
--- a/usr/src/uts/common/io/bge/bge_atomic.c
+++ b/usr/src/uts/common/io/bge/bge_atomic.c
@@ -24,8 +24,6 @@
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include "bge_impl.h"
/*
@@ -49,7 +47,7 @@ bge_atomic_reserve(uint64_t *count_p, uint64_t n)
newval = oldval - n;
if (oldval <= n)
return (0); /* no resources left */
- } while (cas64(count_p, oldval, newval) != oldval);
+ } while (atomic_cas_64(count_p, oldval, newval) != oldval);
return (newval);
}
@@ -67,7 +65,7 @@ bge_atomic_renounce(uint64_t *count_p, uint64_t n)
do {
oldval = *count_p;
newval = oldval + n;
- } while (cas64(count_p, oldval, newval) != oldval);
+ } while (atomic_cas_64(count_p, oldval, newval) != oldval);
}
/*
@@ -83,7 +81,7 @@ bge_atomic_claim(uint64_t *count_p, uint64_t limit)
do {
oldval = *count_p;
newval = NEXT(oldval, limit);
- } while (cas64(count_p, oldval, newval) != oldval);
+ } while (atomic_cas_64(count_p, oldval, newval) != oldval);
return (oldval);
}
@@ -102,7 +100,7 @@ bge_atomic_next(uint64_t *sp, uint64_t limit)
do {
oldval = *sp;
newval = NEXT(oldval, limit);
- } while (cas64(sp, oldval, newval) != oldval);
+ } while (atomic_cas_64(sp, oldval, newval) != oldval);
return (oldval);
}
@@ -120,7 +118,7 @@ bge_atomic_sub64(uint64_t *count_p, uint64_t n)
do {
oldval = *count_p;
newval = oldval - n;
- } while (cas64(count_p, oldval, newval) != oldval);
+ } while (atomic_cas_64(count_p, oldval, newval) != oldval);
}
/*
@@ -137,7 +135,7 @@ bge_atomic_clr64(uint64_t *sp, uint64_t bits)
do {
oldval = *sp;
newval = oldval & ~bits;
- } while (cas64(sp, oldval, newval) != oldval);
+ } while (atomic_cas_64(sp, oldval, newval) != oldval);
return (oldval);
}
@@ -156,7 +154,7 @@ bge_atomic_shl32(uint32_t *sp, uint_t count)
do {
oldval = *sp;
newval = oldval << count;
- } while (cas32(sp, oldval, newval) != oldval);
+ } while (atomic_cas_32(sp, oldval, newval) != oldval);
return (oldval);
}
diff --git a/usr/src/uts/common/io/hxge/hxge_send.c b/usr/src/uts/common/io/hxge/hxge_send.c
index 647717b82c..b59757011e 100644
--- a/usr/src/uts/common/io/hxge/hxge_send.c
+++ b/usr/src/uts/common/io/hxge/hxge_send.c
@@ -219,7 +219,7 @@ start_again:
"TX Descriptor ring is full: channel %d", tx_ring_p->tdc));
HXGE_DEBUG_MSG((hxgep, TX_CTL,
"TX Descriptor ring is full: channel %d", tx_ring_p->tdc));
- (void) cas32((uint32_t *)&tx_ring_p->queueing, 0, 1);
+ (void) atomic_cas_32((uint32_t *)&tx_ring_p->queueing, 0, 1);
tdc_stats->tx_no_desc++;
MUTEX_EXIT(&tx_ring_p->lock);
status = 1;
diff --git a/usr/src/uts/common/io/hxge/hxge_txdma.c b/usr/src/uts/common/io/hxge/hxge_txdma.c
index 67a8d653f9..deb5c2b3a1 100644
--- a/usr/src/uts/common/io/hxge/hxge_txdma.c
+++ b/usr/src/uts/common/io/hxge/hxge_txdma.c
@@ -787,7 +787,8 @@ hxge_txdma_reclaim(p_hxge_t hxgep, p_tx_ring_t tx_ring_p, int nmblks)
status = (nmblks <= ((int)tx_ring_p->tx_ring_size -
(int)tx_ring_p->descs_pending - TX_FULL_MARK));
if (status) {
- (void) cas32((uint32_t *)&tx_ring_p->queueing, 1, 0);
+ (void) atomic_cas_32((uint32_t *)&tx_ring_p->queueing,
+ 1, 0);
}
} else {
status = (nmblks <= ((int)tx_ring_p->tx_ring_size -
diff --git a/usr/src/uts/common/io/ksyms.c b/usr/src/uts/common/io/ksyms.c
index d43324ceae..c9f0c63b69 100644
--- a/usr/src/uts/common/io/ksyms.c
+++ b/usr/src/uts/common/io/ksyms.c
@@ -247,7 +247,8 @@ ksyms_open(dev_t *devp, int flag, int otyp, struct cred *cred)
* since that's the "real" minor number.
*/
for (clone = 1; clone < nksyms_clones; clone++) {
- if (casptr(&ksyms_clones[clone].ksyms_base, 0, addr) == 0) {
+ if (atomic_cas_ptr(&ksyms_clones[clone].ksyms_base, 0, addr) ==
+ 0) {
ksyms_clones[clone].ksyms_size = realsize;
*devp = makedevice(getemajor(*devp), clone);
(void) ddi_prop_update_int(*devp, ksyms_devi,
diff --git a/usr/src/uts/common/io/multidata.c b/usr/src/uts/common/io/multidata.c
index 833aec0206..ebef8ec32c 100644
--- a/usr/src/uts/common/io/multidata.c
+++ b/usr/src/uts/common/io/multidata.c
@@ -24,8 +24,6 @@
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
/*
* Multidata, as described in the following papers:
*
@@ -1294,7 +1292,7 @@ mmd_addpattr(multidata_t *mmd, pdesc_t *pd, pattrinfo_t *pai,
return (NULL);
/* if someone got there first, use his table instead */
- if ((o_tbl = casptr(tbl_p, NULL, tbl)) != NULL) {
+ if ((o_tbl = atomic_cas_ptr(tbl_p, NULL, tbl)) != NULL) {
kmem_cache_free(pattbl_cache, tbl);
tbl = o_tbl;
}
diff --git a/usr/src/uts/common/io/nge/nge_atomic.c b/usr/src/uts/common/io/nge/nge_atomic.c
index b8099a714b..4d87aa5ab5 100644
--- a/usr/src/uts/common/io/nge/nge_atomic.c
+++ b/usr/src/uts/common/io/nge/nge_atomic.c
@@ -24,8 +24,6 @@
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include "nge.h"
/*
@@ -44,7 +42,7 @@ nge_atomic_decrease(uint64_t *count_p, uint64_t n)
newval = oldval - n;
if (oldval < n)
return (B_FALSE);
- } while (cas64(count_p, oldval, newval) != oldval);
+ } while (atomic_cas_64(count_p, oldval, newval) != oldval);
return (B_TRUE);
}
@@ -62,7 +60,7 @@ nge_atomic_increase(uint64_t *count_p, uint64_t n)
do {
oldval = *count_p;
newval = oldval + n;
- } while (cas64(count_p, oldval, newval) != oldval);
+ } while (atomic_cas_64(count_p, oldval, newval) != oldval);
}
@@ -80,7 +78,7 @@ nge_atomic_shl32(uint32_t *sp, uint_t count)
do {
oldval = *sp;
newval = oldval << count;
- } while (cas32(sp, oldval, newval) != oldval);
+ } while (atomic_cas_32(sp, oldval, newval) != oldval);
return (oldval);
}
diff --git a/usr/src/uts/common/io/nxge/nxge_send.c b/usr/src/uts/common/io/nxge/nxge_send.c
index 7b78fa8af6..6f9672c277 100644
--- a/usr/src/uts/common/io/nxge/nxge_send.c
+++ b/usr/src/uts/common/io/nxge/nxge_send.c
@@ -373,8 +373,8 @@ start_again:
tx_ring_p->tdc));
goto nxge_start_fail_lso;
} else {
- (void) cas32((uint32_t *)&tx_ring_p->queueing,
- 0, 1);
+ (void) atomic_cas_32(
+ (uint32_t *)&tx_ring_p->queueing, 0, 1);
tdc_stats->tx_no_desc++;
if (isLDOMservice(nxgep)) {
diff --git a/usr/src/uts/common/io/nxge/nxge_txdma.c b/usr/src/uts/common/io/nxge/nxge_txdma.c
index f3fd19a3c2..74495f9f8a 100644
--- a/usr/src/uts/common/io/nxge/nxge_txdma.c
+++ b/usr/src/uts/common/io/nxge/nxge_txdma.c
@@ -1083,7 +1083,8 @@ nxge_txdma_reclaim(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, int nmblks)
status = (nmblks <= ((int)tx_ring_p->tx_ring_size -
(int)tx_ring_p->descs_pending - TX_FULL_MARK));
if (status) {
- (void) cas32((uint32_t *)&tx_ring_p->queueing, 1, 0);
+ (void) atomic_cas_32((uint32_t *)&tx_ring_p->queueing,
+ 1, 0);
}
} else {
status = (nmblks <= ((int)tx_ring_p->tx_ring_size -
diff --git a/usr/src/uts/common/io/rge/rge_chip.c b/usr/src/uts/common/io/rge/rge_chip.c
index 2409b29d74..bb9971048c 100644
--- a/usr/src/uts/common/io/rge/rge_chip.c
+++ b/usr/src/uts/common/io/rge/rge_chip.c
@@ -285,7 +285,7 @@ rge_atomic_shl32(uint32_t *sp, uint_t count)
do {
oldval = *sp;
newval = oldval << count;
- } while (cas32(sp, oldval, newval) != oldval);
+ } while (atomic_cas_32(sp, oldval, newval) != oldval);
return (oldval);
}
diff --git a/usr/src/uts/common/io/rge/rge_rxtx.c b/usr/src/uts/common/io/rge/rge_rxtx.c
index 9b16c2ae82..1edac700c3 100644
--- a/usr/src/uts/common/io/rge/rge_rxtx.c
+++ b/usr/src/uts/common/io/rge/rge_rxtx.c
@@ -49,7 +49,7 @@ rge_atomic_reserve(uint32_t *count_p, uint32_t n)
newval = oldval - n;
if (oldval <= n)
return (0); /* no resources left */
- } while (cas32(count_p, oldval, newval) != oldval);
+ } while (atomic_cas_32(count_p, oldval, newval) != oldval);
return (newval);
}
@@ -70,7 +70,7 @@ rge_atomic_renounce(uint32_t *count_p, uint32_t n)
do {
oldval = *count_p;
newval = oldval + n;
- } while (cas32(count_p, oldval, newval) != oldval);
+ } while (atomic_cas_32(count_p, oldval, newval) != oldval);
}
/*
diff --git a/usr/src/uts/common/io/smbios.c b/usr/src/uts/common/io/smbios.c
index bd7fdf3f3d..3628d6aac9 100644
--- a/usr/src/uts/common/io/smbios.c
+++ b/usr/src/uts/common/io/smbios.c
@@ -78,7 +78,7 @@ smb_open(dev_t *dp, int flag, int otyp, cred_t *cred)
* the real minor number, and we assign a new minor to each clone.
*/
for (c = 1; c < smb_nclones; c++) {
- if (casptr(&smb_clones[c].c_hdl, NULL, ksmbios) == NULL)
+ if (atomic_cas_ptr(&smb_clones[c].c_hdl, NULL, ksmbios) == NULL)
break;
}
diff --git a/usr/src/uts/common/io/stream.c b/usr/src/uts/common/io/stream.c
index 658735b784..e9af19ca18 100644
--- a/usr/src/uts/common/io/stream.c
+++ b/usr/src/uts/common/io/stream.c
@@ -798,7 +798,8 @@ dupb(mblk_t *mp)
new_mp = NULL;
goto out;
}
- } while (cas32(&DBLK_RTFU_WORD(dbp), oldrtfu, newrtfu) != oldrtfu);
+ } while (atomic_cas_32(&DBLK_RTFU_WORD(dbp), oldrtfu, newrtfu) !=
+ oldrtfu);
out:
FTRACE_1("dupb(): new_mp=0x%lx", (uintptr_t)new_mp);
@@ -4250,7 +4251,7 @@ str_ftevent(fthdr_t *hp, void *p, ushort_t evnt, ushort_t data)
* the value is there for it.
*/
membar_producer();
- if (casptr(&hp->tail, bp, nbp) == bp) {
+ if (atomic_cas_ptr(&hp->tail, bp, nbp) == bp) {
/* CAS was successful */
bp->nxt = nbp;
membar_producer();
@@ -4264,7 +4265,7 @@ str_ftevent(fthdr_t *hp, void *p, ushort_t evnt, ushort_t data)
}
}
nix = ix + 1;
- if (cas32((uint32_t *)&bp->ix, ix, nix) == ix) {
+ if (atomic_cas_32((uint32_t *)&bp->ix, ix, nix) == ix) {
cas_good:
if (curthread != hp->thread) {
hp->thread = curthread;
diff --git a/usr/src/uts/common/io/xge/drv/xge_osdep.h b/usr/src/uts/common/io/xge/drv/xge_osdep.h
index 4b09b0f983..1b36c32241 100644
--- a/usr/src/uts/common/io/xge/drv/xge_osdep.h
+++ b/usr/src/uts/common/io/xge/drv/xge_osdep.h
@@ -188,8 +188,8 @@ static inline int xge_os_sprintf(char *buf, char *fmt, ...) {
#define xge_os_cmpxchg(targetp, cmp, newval) \
sizeof (*(targetp)) == 4 ? \
- cas32((uint32_t *)targetp, cmp, newval) : \
- cas64((uint64_t *)targetp, cmp, newval)
+ atomic_cas_32((uint32_t *)targetp, cmp, newval) : \
+ atomic_cas_64((uint64_t *)targetp, cmp, newval)
/* ------------------------- misc primitives ------------------------------- */
diff --git a/usr/src/uts/common/os/clock.c b/usr/src/uts/common/os/clock.c
index 3421ae28d1..574dc31f8a 100644
--- a/usr/src/uts/common/os/clock.c
+++ b/usr/src/uts/common/os/clock.c
@@ -1650,7 +1650,7 @@ profil_tick(uintptr_t upc)
do {
ticks = lwp->lwp_oweupc;
- } while (cas32(&lwp->lwp_oweupc, ticks, 0) != ticks);
+ } while (atomic_cas_32(&lwp->lwp_oweupc, ticks, 0) != ticks);
mutex_enter(&p->p_pflock);
if (pr->pr_scale >= 2 && upc >= pr->pr_off) {
diff --git a/usr/src/uts/common/os/clock_highres.c b/usr/src/uts/common/os/clock_highres.c
index 7870617a26..bcdf20c0bd 100644
--- a/usr/src/uts/common/os/clock_highres.c
+++ b/usr/src/uts/common/os/clock_highres.c
@@ -93,7 +93,7 @@ clock_highres_fire(void *arg)
do {
old = *addr;
- } while (cas64((uint64_t *)addr, old, new) != old);
+ } while (atomic_cas_64((uint64_t *)addr, old, new) != old);
timer_fire(it);
}
@@ -235,10 +235,10 @@ clock_highres_timer_gettime(itimer_t *it, struct itimerspec *when)
hrtime_t last;
/*
- * We're using cas64() here only to assure that we slurp the entire
- * timestamp atomically.
+ * We're using atomic_cas_64() here only to assure that we slurp the
+ * entire timestamp atomically.
*/
- last = cas64((uint64_t *)addr, 0, 0);
+ last = atomic_cas_64((uint64_t *)addr, 0, 0);
*when = it->it_itime;
diff --git a/usr/src/uts/common/os/cyclic.c b/usr/src/uts/common/os/cyclic.c
index 93a318d260..21907b4957 100644
--- a/usr/src/uts/common/os/cyclic.c
+++ b/usr/src/uts/common/os/cyclic.c
@@ -658,7 +658,8 @@ cyclic_coverage(char *why, int level, uint64_t arg0, uint64_t arg1)
break;
if (cyc_coverage[ndx].cyv_why != NULL ||
- casptr(&cyc_coverage[ndx].cyv_why, NULL, why) != NULL) {
+ atomic_cas_ptr(&cyc_coverage[ndx].cyv_why, NULL, why) !=
+ NULL) {
if (++ndx == CY_NCOVERAGE)
ndx = 0;
@@ -1219,9 +1220,10 @@ reread:
}
if ((opend =
- cas32(&cyclic->cy_pend, pend, npend)) != pend) {
+ atomic_cas_32(&cyclic->cy_pend, pend, npend)) !=
+ pend) {
/*
- * Our cas32 can fail for one of several
+ * Our atomic_cas_32 can fail for one of several
* reasons:
*
* (a) An intervening high level bumped up the
@@ -1311,7 +1313,8 @@ reread:
do {
lev = cpu->cyp_modify_levels;
nlev = lev + 1;
- } while (cas32(&cpu->cyp_modify_levels, lev, nlev) != lev);
+ } while (atomic_cas_32(&cpu->cyp_modify_levels, lev, nlev) !=
+ lev);
/*
* If we are the last soft level to see the modification,
diff --git a/usr/src/uts/common/os/dtrace_subr.c b/usr/src/uts/common/os/dtrace_subr.c
index d2ce3361c1..44cfddad97 100644
--- a/usr/src/uts/common/os/dtrace_subr.c
+++ b/usr/src/uts/common/os/dtrace_subr.c
@@ -203,7 +203,7 @@ dtrace_vtime_enable(void)
/*NOTREACHED*/
}
- } while (cas32((uint32_t *)&dtrace_vtime_active,
+ } while (atomic_cas_32((uint32_t *)&dtrace_vtime_active,
state, nstate) != state);
}
@@ -230,7 +230,7 @@ dtrace_vtime_disable(void)
/*NOTREACHED*/
}
- } while (cas32((uint32_t *)&dtrace_vtime_active,
+ } while (atomic_cas_32((uint32_t *)&dtrace_vtime_active,
state, nstate) != state);
}
@@ -257,7 +257,7 @@ dtrace_vtime_enable_tnf(void)
/*NOTREACHED*/
}
- } while (cas32((uint32_t *)&dtrace_vtime_active,
+ } while (atomic_cas_32((uint32_t *)&dtrace_vtime_active,
state, nstate) != state);
}
@@ -284,7 +284,7 @@ dtrace_vtime_disable_tnf(void)
/*NOTREACHED*/
}
- } while (cas32((uint32_t *)&dtrace_vtime_active,
+ } while (atomic_cas_32((uint32_t *)&dtrace_vtime_active,
state, nstate) != state);
}
diff --git a/usr/src/uts/common/os/errorq.c b/usr/src/uts/common/os/errorq.c
index d4c9fef5a2..547076f0ad 100644
--- a/usr/src/uts/common/os/errorq.c
+++ b/usr/src/uts/common/os/errorq.c
@@ -555,7 +555,7 @@ errorq_dispatch(errorq_t *eqp, const void *data, size_t len, uint_t flag)
eep->eqe_prev = old;
membar_producer();
- if (casptr(&eqp->eq_pend, old, eep) == old)
+ if (atomic_cas_ptr(&eqp->eq_pend, old, eep) == old)
break;
}
@@ -596,15 +596,16 @@ errorq_drain(errorq_t *eqp)
* make sure that eq_ptail will be visible to errorq_panic() below
* before the pending list is NULLed out. This section is labeled
* case (1) for errorq_panic, below. If eq_ptail is not yet set (1A)
- * eq_pend has all the pending errors. If casptr fails or has not
- * been called yet (1B), eq_pend still has all the pending errors.
- * If casptr succeeds (1C), eq_ptail has all the pending errors.
+ * eq_pend has all the pending errors. If atomic_cas_ptr fails or
+ * has not been called yet (1B), eq_pend still has all the pending
+ * errors. If atomic_cas_ptr succeeds (1C), eq_ptail has all the
+ * pending errors.
*/
while ((eep = eqp->eq_pend) != NULL) {
eqp->eq_ptail = eep;
membar_producer();
- if (casptr(&eqp->eq_pend, eep, NULL) == eep)
+ if (atomic_cas_ptr(&eqp->eq_pend, eep, NULL) == eep)
break;
}
@@ -750,13 +751,14 @@ errorq_panic_drain(uint_t what)
loggedtmp = eqp->eq_kstat.eqk_logged.value.ui64;
/*
- * In case (1B) above, eq_ptail may be set but the casptr may
- * not have been executed yet or may have failed. Either way,
- * we must log errors in chronological order. So we search
- * the pending list for the error pointed to by eq_ptail. If
- * it is found, we know that all subsequent errors are also
- * still on the pending list, so just NULL out eq_ptail and let
- * errorq_drain(), below, take care of the logging.
+ * In case (1B) above, eq_ptail may be set but the
+ * atomic_cas_ptr may not have been executed yet or may have
+ * failed. Either way, we must log errors in chronological
+ * order. So we search the pending list for the error
+ * pointed to by eq_ptail. If it is found, we know that all
+ * subsequent errors are also still on the pending list, so
+ * just NULL out eq_ptail and let errorq_drain(), below,
+ * take care of the logging.
*/
for (eep = eqp->eq_pend; eep != NULL; eep = eep->eqe_prev) {
if (eep == eqp->eq_ptail) {
@@ -790,8 +792,9 @@ errorq_panic_drain(uint_t what)
*
* Unlike errorq_drain(), we don't need to worry about updating
* eq_phead because errorq_panic() will be called at most once.
- * However, we must use casptr to update the freelist in case
- * errors are still being enqueued during panic.
+ * However, we must use atomic_cas_ptr to update the
+ * freelist in case errors are still being enqueued during
+ * panic.
*/
for (eep = eqp->eq_phead; eep != NULL; eep = nep) {
eqp->eq_func(eqp->eq_private, eep->eqe_data, eep);
@@ -914,7 +917,7 @@ errorq_commit(errorq_t *eqp, errorq_elem_t *eqep, uint_t flag)
eqep->eqe_prev = old;
membar_producer();
- if (casptr(&eqp->eq_pend, old, eqep) == old)
+ if (atomic_cas_ptr(&eqp->eq_pend, old, eqep) == old)
break;
}
diff --git a/usr/src/uts/common/os/fm.c b/usr/src/uts/common/os/fm.c
index eff91aee5e..fbe45e833e 100644
--- a/usr/src/uts/common/os/fm.c
+++ b/usr/src/uts/common/os/fm.c
@@ -374,7 +374,7 @@ fm_panic(const char *format, ...)
{
va_list ap;
- (void) casptr((void *)&fm_panicstr, NULL, (void *)format);
+ (void) atomic_cas_ptr((void *)&fm_panicstr, NULL, (void *)format);
#if defined(__i386) || defined(__amd64)
fastreboot_disable_highpil();
#endif /* __i386 || __amd64 */
diff --git a/usr/src/uts/common/os/kdi.c b/usr/src/uts/common/os/kdi.c
index 20eed5a16f..482c47dd84 100644
--- a/usr/src/uts/common/os/kdi.c
+++ b/usr/src/uts/common/os/kdi.c
@@ -23,8 +23,6 @@
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include <sys/cpuvar.h>
#include <sys/kdi_impl.h>
#include <sys/reboot.h>
@@ -151,7 +149,7 @@ kdi_dtrace_set(kdi_dtrace_set_t transition)
default:
return (EINVAL);
}
- } while (cas32((uint_t *)&kdi_dtrace_state, cur, new) != cur);
+ } while (atomic_cas_32((uint_t *)&kdi_dtrace_state, cur, new) != cur);
return (0);
}
diff --git a/usr/src/uts/common/os/kmem.c b/usr/src/uts/common/os/kmem.c
index dbd7c9a1ea..88382dfce4 100644
--- a/usr/src/uts/common/os/kmem.c
+++ b/usr/src/uts/common/os/kmem.c
@@ -3166,7 +3166,7 @@ kmem_reap_common(void *flag_arg)
uint32_t *flag = (uint32_t *)flag_arg;
if (MUTEX_HELD(&kmem_cache_lock) || kmem_taskq == NULL ||
- cas32(flag, 0, 1) != 0)
+ atomic_cas_32(flag, 0, 1) != 0)
return;
/*
diff --git a/usr/src/uts/common/os/lgrp.c b/usr/src/uts/common/os/lgrp.c
index 72a03f00c5..3f344e2cfd 100644
--- a/usr/src/uts/common/os/lgrp.c
+++ b/usr/src/uts/common/os/lgrp.c
@@ -2773,8 +2773,8 @@ lgrp_loadavg(lpl_t *lpl, uint_t nrcpus, int ageflag)
new = LGRP_LOADAVG_MAX;
else if (new < 0)
new = 0;
- } while (cas32((lgrp_load_t *)&lpl->lpl_loadavg, old,
- new) != old);
+ } while (atomic_cas_32((lgrp_load_t *)&lpl->lpl_loadavg,
+ old, new) != old);
} else {
/*
* We're supposed to update the load, but not age it.
@@ -2792,8 +2792,8 @@ lgrp_loadavg(lpl_t *lpl, uint_t nrcpus, int ageflag)
*/
if (new < old)
new = LGRP_LOADAVG_MAX;
- } while (cas32((lgrp_load_t *)&lpl->lpl_loadavg, old,
- new) != old);
+ } while (atomic_cas_32((lgrp_load_t *)&lpl->lpl_loadavg,
+ old, new) != old);
}
/*
@@ -3354,7 +3354,7 @@ lgrp_move_thread(kthread_t *t, lpl_t *newlpl, int do_lgrpset_delete)
*/
new = 0;
}
- } while (cas32(
+ } while (atomic_cas_32(
(lgrp_load_t *)&lpl->lpl_loadavg, old,
new) != old);
@@ -3423,8 +3423,8 @@ lgrp_move_thread(kthread_t *t, lpl_t *newlpl, int do_lgrpset_delete)
*/
if (new < old)
new = UINT32_MAX;
- } while (cas32((lgrp_load_t *)&lpl->lpl_loadavg, old,
- new) != old);
+ } while (atomic_cas_32((lgrp_load_t *)&lpl->lpl_loadavg,
+ old, new) != old);
lpl = lpl->lpl_parent;
if (lpl == NULL)
diff --git a/usr/src/uts/common/os/msacct.c b/usr/src/uts/common/os/msacct.c
index c10dce81ca..928c6b3bb4 100644
--- a/usr/src/uts/common/os/msacct.c
+++ b/usr/src/uts/common/os/msacct.c
@@ -589,7 +589,7 @@ cpu_update_pct(kthread_t *t, hrtime_t newtime)
pctcpu = t->t_pctcpu;
npctcpu = cpu_decay(pctcpu, delta);
}
- } while (cas32(&t->t_pctcpu, pctcpu, npctcpu) != pctcpu);
+ } while (atomic_cas_32(&t->t_pctcpu, pctcpu, npctcpu) != pctcpu);
return (npctcpu);
}
@@ -657,7 +657,8 @@ new_mstate(kthread_t *t, int new_state)
newtime += oldtime;
t->t_mstate = new_state;
ms->ms_state_start = curtime;
- } while (cas64((uint64_t *)mstimep, oldtime, newtime) != oldtime);
+ } while (atomic_cas_64((uint64_t *)mstimep, oldtime, newtime) !=
+ oldtime);
/*
* When the system boots the initial startup thread will have a
@@ -781,7 +782,8 @@ restore_mstate(kthread_t *t)
}
oldtime = *mstimep;
newtime += oldtime;
- } while (cas64((uint64_t *)mstimep, oldtime, newtime) != oldtime);
+ } while (atomic_cas_64((uint64_t *)mstimep, oldtime, newtime) !=
+ oldtime);
/*
* Update the WAIT_CPU timer and per-cpu waitrq total.
diff --git a/usr/src/uts/common/os/mutex.c b/usr/src/uts/common/os/mutex.c
index 0bdb5fdfe7..a309ca89a3 100644
--- a/usr/src/uts/common/os/mutex.c
+++ b/usr/src/uts/common/os/mutex.c
@@ -243,7 +243,7 @@ mutex_panic(char *msg, mutex_impl_t *lp)
if (panicstr)
return;
- if (casptr(&panic_mutex_addr, NULL, lp) == NULL)
+ if (atomic_cas_ptr(&panic_mutex_addr, NULL, lp) == NULL)
panic_mutex = *lp;
panic("%s, lp=%p owner=%p thread=%p",
diff --git a/usr/src/uts/common/os/printf.c b/usr/src/uts/common/os/printf.c
index c9e1524de0..d47443334f 100644
--- a/usr/src/uts/common/os/printf.c
+++ b/usr/src/uts/common/os/printf.c
@@ -167,7 +167,7 @@ out:
new = old + len;
if (new >= PANICBUFSIZE)
new = panicbuf_log + len;
- } while (cas32(&panicbuf_index, old, new) != old);
+ } while (atomic_cas_32(&panicbuf_index, old, new) != old);
bcopy(body, &panicbuf[new - len], len);
}
if (bufp != buf)
diff --git a/usr/src/uts/common/os/rwlock.c b/usr/src/uts/common/os/rwlock.c
index e35b564983..f851686ce0 100644
--- a/usr/src/uts/common/os/rwlock.c
+++ b/usr/src/uts/common/os/rwlock.c
@@ -217,7 +217,7 @@ rw_panic(char *msg, rwlock_impl_t *lp)
if (panicstr)
return;
- if (casptr(&panic_rwlock_addr, NULL, lp) == NULL)
+ if (atomic_cas_ptr(&panic_rwlock_addr, NULL, lp) == NULL)
panic_rwlock = *lp;
panic("%s, lp=%p wwwh=%lx thread=%p",
diff --git a/usr/src/uts/common/sys/bitmap.h b/usr/src/uts/common/sys/bitmap.h
index d2592820b6..8d7054954d 100644
--- a/usr/src/uts/common/sys/bitmap.h
+++ b/usr/src/uts/common/sys/bitmap.h
@@ -171,9 +171,9 @@ extern int odd_parity(ulong_t);
* to 0 otherwise.
*/
#define BT_ATOMIC_SET(bitmap, bitindex) \
- { atomic_or_long(&(BT_WIM(bitmap, bitindex)), BT_BIW(bitindex)); }
+ { atomic_or_ulong(&(BT_WIM(bitmap, bitindex)), BT_BIW(bitindex)); }
#define BT_ATOMIC_CLEAR(bitmap, bitindex) \
- { atomic_and_long(&(BT_WIM(bitmap, bitindex)), ~BT_BIW(bitindex)); }
+ { atomic_and_ulong(&(BT_WIM(bitmap, bitindex)), ~BT_BIW(bitindex)); }
#define BT_ATOMIC_SET_EXCL(bitmap, bitindex, result) \
{ result = atomic_set_long_excl(&(BT_WIM(bitmap, bitindex)), \
diff --git a/usr/src/uts/common/sys/cpuvar.h b/usr/src/uts/common/sys/cpuvar.h
index 6c07bcbc8a..bdefd08e3a 100644
--- a/usr/src/uts/common/sys/cpuvar.h
+++ b/usr/src/uts/common/sys/cpuvar.h
@@ -528,8 +528,8 @@ typedef ulong_t cpuset_t; /* a set of CPUs */
largest = (uint_t)(highbit(set) - 1); \
}
-#define CPUSET_ATOMIC_DEL(set, cpu) atomic_and_long(&(set), ~CPUSET(cpu))
-#define CPUSET_ATOMIC_ADD(set, cpu) atomic_or_long(&(set), CPUSET(cpu))
+#define CPUSET_ATOMIC_DEL(set, cpu) atomic_and_ulong(&(set), ~CPUSET(cpu))
+#define CPUSET_ATOMIC_ADD(set, cpu) atomic_or_ulong(&(set), CPUSET(cpu))
#define CPUSET_ATOMIC_XADD(set, cpu, result) \
{ result = atomic_set_long_excl(&(set), (cpu)); }
diff --git a/usr/src/uts/common/vm/seg_kp.c b/usr/src/uts/common/vm/seg_kp.c
index ad5d88b0d2..c810adf41c 100644
--- a/usr/src/uts/common/vm/seg_kp.c
+++ b/usr/src/uts/common/vm/seg_kp.c
@@ -840,7 +840,7 @@ segkp_map_red(void)
atomic_add_32(&red_nmapped, 1);
while (fp - (uintptr_t)curthread->t_stkbase < red_closest) {
- (void) cas32(&red_closest, red_closest,
+ (void) atomic_cas_32(&red_closest, red_closest,
(uint32_t)(fp - (uintptr_t)curthread->t_stkbase));
}
return (1);
diff --git a/usr/src/uts/common/vm/seg_vn.c b/usr/src/uts/common/vm/seg_vn.c
index 31c293d416..2803f071f7 100644
--- a/usr/src/uts/common/vm/seg_vn.c
+++ b/usr/src/uts/common/vm/seg_vn.c
@@ -9711,17 +9711,18 @@ again:
* replication and T1 new home is different from lgrp used for text
* replication. When this happens asyncronous segvn thread rechecks if
* segments should change lgrps used for text replication. If we fail
- * to set p_tr_lgrpid with cas32 then set it to NLGRPS_MAX without cas
- * if it's not already NLGRPS_MAX and not equal lgrp_id we want to
- * use. We don't need to use cas in this case because another thread
- * that races in between our non atomic check and set may only change
- * p_tr_lgrpid to NLGRPS_MAX at this point.
+ * to set p_tr_lgrpid with atomic_cas_32 then set it to NLGRPS_MAX
+ * without cas if it's not already NLGRPS_MAX and not equal lgrp_id
+ * we want to use. We don't need to use cas in this case because
+ * another thread that races in between our non atomic check and set
+ * may only change p_tr_lgrpid to NLGRPS_MAX at this point.
*/
ASSERT(lgrp_id != LGRP_NONE && lgrp_id < NLGRPS_MAX);
olid = p->p_tr_lgrpid;
if (lgrp_id != olid && olid != NLGRPS_MAX) {
lgrp_id_t nlid = (olid == LGRP_NONE) ? lgrp_id : NLGRPS_MAX;
- if (cas32((uint32_t *)&p->p_tr_lgrpid, olid, nlid) != olid) {
+ if (atomic_cas_32((uint32_t *)&p->p_tr_lgrpid, olid, nlid) !=
+ olid) {
olid = p->p_tr_lgrpid;
ASSERT(olid != LGRP_NONE);
if (olid != lgrp_id && olid != NLGRPS_MAX) {
diff --git a/usr/src/uts/i86pc/os/intr.c b/usr/src/uts/i86pc/os/intr.c
index 6006ca1202..2569812c47 100644
--- a/usr/src/uts/i86pc/os/intr.c
+++ b/usr/src/uts/i86pc/os/intr.c
@@ -1218,15 +1218,16 @@ cpu_intr_swtch_enter(kthread_id_t t)
* PIL and zeroed its timestamp. Since there was no pinned thread to
* return to, swtch() gets called and we end up here.
*
- * Note that we use atomic ops below (cas64 and atomic_add_64), which
- * we don't use in the functions above, because we're not called
- * with interrupts blocked, but the epilog/prolog functions are.
+ * Note that we use atomic ops below (atomic_cas_64 and
+ * atomic_add_64), which we don't use in the functions above,
+ * because we're not called with interrupts blocked, but the
+ * epilog/prolog functions are.
*/
if (t->t_intr_start) {
do {
start = t->t_intr_start;
interval = tsc_read() - start;
- } while (cas64(&t->t_intr_start, start, 0) != start);
+ } while (atomic_cas_64(&t->t_intr_start, start, 0) != start);
cpu = CPU;
cpu->cpu_m.intrstat[t->t_pil][0] += interval;
@@ -1250,7 +1251,7 @@ cpu_intr_swtch_exit(kthread_id_t t)
do {
ts = t->t_intr_start;
- } while (cas64(&t->t_intr_start, ts, tsc_read()) != ts);
+ } while (atomic_cas_64(&t->t_intr_start, ts, tsc_read()) != ts);
}
/*
diff --git a/usr/src/uts/i86pc/os/memnode.c b/usr/src/uts/i86pc/os/memnode.c
index 35185bcd84..2c1c6e91d5 100644
--- a/usr/src/uts/i86pc/os/memnode.c
+++ b/usr/src/uts/i86pc/os/memnode.c
@@ -86,7 +86,7 @@ mem_node_add_slice(pfn_t start, pfn_t end)
mnode = PFN_2_MEM_NODE(start);
ASSERT(mnode >= 0 && mnode < max_mem_nodes);
- if (cas32((uint32_t *)&mem_node_config[mnode].exists, 0, 1)) {
+ if (atomic_cas_32((uint32_t *)&mem_node_config[mnode].exists, 0, 1)) {
/*
* Add slice to existing node.
*/
@@ -101,7 +101,8 @@ mem_node_add_slice(pfn_t start, pfn_t end)
do {
oldmask = memnodes_mask;
newmask = memnodes_mask | (1ull << mnode);
- } while (cas64(&memnodes_mask, oldmask, newmask) != oldmask);
+ } while (atomic_cas_64(&memnodes_mask, oldmask, newmask) !=
+ oldmask);
}
/*
@@ -161,7 +162,7 @@ mem_node_del_slice(pfn_t start, pfn_t end)
do {
omask = memnodes_mask;
nmask = omask & ~(1ull << mnode);
- } while (cas64(&memnodes_mask, omask, nmask) != omask);
+ } while (atomic_cas_64(&memnodes_mask, omask, nmask) != omask);
atomic_add_16(&num_memnodes, -1);
mem_node_config[mnode].exists = 0;
}
@@ -229,7 +230,7 @@ mem_node_alloc()
* a first time memnode creation race.
*/
for (mnode = 0; mnode < max_mem_nodes; mnode++)
- if (cas32((uint32_t *)&mem_node_config[mnode].exists,
+ if (atomic_cas_32((uint32_t *)&mem_node_config[mnode].exists,
0, 1) == 0)
break;
@@ -242,7 +243,7 @@ mem_node_alloc()
do {
oldmask = memnodes_mask;
newmask = memnodes_mask | (1ull << mnode);
- } while (cas64(&memnodes_mask, oldmask, newmask) != oldmask);
+ } while (atomic_cas_64(&memnodes_mask, oldmask, newmask) != oldmask);
return (mnode);
}
diff --git a/usr/src/uts/i86pc/os/x_call.c b/usr/src/uts/i86pc/os/x_call.c
index 17d5123508..8c2821fc73 100644
--- a/usr/src/uts/i86pc/os/x_call.c
+++ b/usr/src/uts/i86pc/os/x_call.c
@@ -51,9 +51,9 @@
*
* This implementation uses a message passing architecture to allow multiple
* concurrent cross calls to be in flight at any given time. We use the cmpxchg
- * instruction, aka casptr(), to implement simple efficient work queues for
- * message passing between CPUs with almost no need for regular locking.
- * See xc_extract() and xc_insert() below.
+ * instruction, aka atomic_cas_ptr(), to implement simple efficient work
+ * queues for message passing between CPUs with almost no need for regular
+ * locking. See xc_extract() and xc_insert() below.
*
* The general idea is that initiating a cross call means putting a message
* on a target(s) CPU's work queue. Any synchronization is handled by passing
@@ -64,8 +64,9 @@
* with every message that finishes all processing.
*
* The code needs no mfence or other membar_*() calls. The uses of
- * casptr(), cas32() and atomic_dec_32() for the message passing are
- * implemented with LOCK prefix instructions which are equivalent to mfence.
+ * atomic_cas_ptr(), atomic_cas_32() and atomic_dec_32() for the message
+ * passing are implemented with LOCK prefix instructions which are
+ * equivalent to mfence.
*
* One interesting aspect of this implmentation is that it allows 2 or more
* CPUs to initiate cross calls to intersecting sets of CPUs at the same time.
@@ -144,7 +145,7 @@ xc_increment(struct machcpu *mcpu)
int old;
do {
old = mcpu->xc_work_cnt;
- } while (cas32((uint32_t *)&mcpu->xc_work_cnt, old, old + 1) != old);
+ } while (atomic_cas_32(&mcpu->xc_work_cnt, old, old + 1) != old);
return (old);
}
@@ -168,7 +169,7 @@ xc_insert(void *queue, xc_msg_t *msg)
do {
old_head = (xc_msg_t *)*(volatile xc_msg_t **)queue;
msg->xc_next = old_head;
- } while (casptr(queue, old_head, msg) != old_head);
+ } while (atomic_cas_ptr(queue, old_head, msg) != old_head);
}
/*
@@ -185,7 +186,8 @@ xc_extract(xc_msg_t **queue)
old_head = (xc_msg_t *)*(volatile xc_msg_t **)queue;
if (old_head == NULL)
return (old_head);
- } while (casptr(queue, old_head, old_head->xc_next) != old_head);
+ } while (atomic_cas_ptr(queue, old_head, old_head->xc_next) !=
+ old_head);
old_head->xc_next = NULL;
return (old_head);
}
@@ -608,7 +610,7 @@ xc_priority_common(
XC_BT_SET(xc_priority_set, c);
send_dirint(c, XC_HI_PIL);
for (i = 0; i < 10; ++i) {
- (void) casptr(&cpup->cpu_m.xc_msgbox,
+ (void) atomic_cas_ptr(&cpup->cpu_m.xc_msgbox,
cpup->cpu_m.xc_msgbox, cpup->cpu_m.xc_msgbox);
}
}
diff --git a/usr/src/uts/i86pc/vm/hat_i86.c b/usr/src/uts/i86pc/vm/hat_i86.c
index 8da02a4c36..4f41c68d9f 100644
--- a/usr/src/uts/i86pc/vm/hat_i86.c
+++ b/usr/src/uts/i86pc/vm/hat_i86.c
@@ -944,7 +944,7 @@ hat_init_finish(void)
/*
* On 32 bit PAE mode, PTE's are 64 bits, but ordinary atomic memory references
- * are 32 bit, so for safety we must use cas64() to install these.
+ * are 32 bit, so for safety we must use atomic_cas_64() to install these.
*/
#ifdef __i386
static void
@@ -967,7 +967,7 @@ reload_pae32(hat_t *hat, cpu_t *cpu)
pte = dest[i];
if (pte == src[i])
break;
- if (cas64(dest + i, pte, src[i]) != src[i])
+ if (atomic_cas_64(dest + i, pte, src[i]) != src[i])
break;
}
}
@@ -1988,7 +1988,7 @@ flush_all_tlb_entries(void)
#define TLB_CPU_HALTED (01ul)
#define TLB_INVAL_ALL (02ul)
#define CAS_TLB_INFO(cpu, old, new) \
- caslong((ulong_t *)&(cpu)->cpu_m.mcpu_tlb_info, (old), (new))
+ atomic_cas_ulong((ulong_t *)&(cpu)->cpu_m.mcpu_tlb_info, (old), (new))
/*
* Record that a CPU is going idle
@@ -1996,7 +1996,7 @@ flush_all_tlb_entries(void)
void
tlb_going_idle(void)
{
- atomic_or_long((ulong_t *)&CPU->cpu_m.mcpu_tlb_info, TLB_CPU_HALTED);
+ atomic_or_ulong((ulong_t *)&CPU->cpu_m.mcpu_tlb_info, TLB_CPU_HALTED);
}
/*
diff --git a/usr/src/uts/i86pc/vm/hat_pte.h b/usr/src/uts/i86pc/vm/hat_pte.h
index 8e5686f4ff..756df7020a 100644
--- a/usr/src/uts/i86pc/vm/hat_pte.h
+++ b/usr/src/uts/i86pc/vm/hat_pte.h
@@ -26,8 +26,6 @@
#ifndef _VM_HAT_PTE_H
#define _VM_HAT_PTE_H
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#ifdef __cplusplus
extern "C" {
#endif
@@ -235,7 +233,8 @@ struct hat_mmu_info {
* The concept of a VA hole exists in AMD64. This might need to be made
* model specific eventually.
*
- * In the 64 bit kernel PTE loads are atomic, but need cas64 on 32 bit kernel.
+ * In the 64 bit kernel PTE loads are atomic, but need atomic_cas_64 on 32
+ * bit kernel.
*/
#if defined(__amd64)
@@ -248,7 +247,7 @@ struct hat_mmu_info {
#define FMT_PTE "0x%lx"
#define GET_PTE(ptr) (*(x86pte_t *)(ptr))
#define SET_PTE(ptr, pte) (*(x86pte_t *)(ptr) = pte)
-#define CAS_PTE(ptr, x, y) cas64(ptr, x, y)
+#define CAS_PTE(ptr, x, y) atomic_cas_64(ptr, x, y)
#elif defined(__i386)
@@ -263,8 +262,8 @@ extern x86pte_t get_pte64(x86pte_t *ptr);
((mmu.pae_hat ? ((x86pte32_t *)(ptr))[1] = (pte >> 32) : 0), \
*(x86pte32_t *)(ptr) = pte)
#define CAS_PTE(ptr, x, y) \
- (mmu.pae_hat ? cas64(ptr, x, y) : \
- cas32((uint32_t *)(ptr), (uint32_t)(x), (uint32_t)(y)))
+ (mmu.pae_hat ? atomic_cas_64(ptr, x, y) : \
+ atomic_cas_32((uint32_t *)(ptr), (uint32_t)(x), (uint32_t)(y)))
#endif /* __i386 */
diff --git a/usr/src/uts/intel/asm/atomic.h b/usr/src/uts/intel/asm/atomic.h
index 6dd675c613..63d90533d9 100644
--- a/usr/src/uts/intel/asm/atomic.h
+++ b/usr/src/uts/intel/asm/atomic.h
@@ -38,59 +38,8 @@ extern "C" {
#if defined(__amd64)
-extern __GNU_INLINE void
-atomic_or_long(ulong_t *target, ulong_t bits)
-{
- __asm__ __volatile__(
- "lock; orq %1, (%0)"
- : /* no output */
- : "r" (target), "r" (bits));
-}
-
-extern __GNU_INLINE void
-atomic_and_long(ulong_t *target, ulong_t bits)
-{
- __asm__ __volatile__(
- "lock; andq %1, (%0)"
- : /* no output */
- : "r" (target), "r" (bits));
-}
-
-#ifdef notdef
-extern __GNU_INLINE uint64_t
-cas64(uint64_t *target, uint64_t cmp,
- uint64_t newval)
-{
- uint64_t retval;
-
- __asm__ __volatile__(
- "movq %2, %%rax; lock; cmpxchgq %3, (%1)"
- : "=a" (retval)
- : "r" (target), "r" (cmp), "r" (newval));
- return (retval);
-}
-#endif
-
#elif defined(__i386)
-extern __GNU_INLINE void
-atomic_or_long(ulong_t *target, ulong_t bits)
-{
- __asm__ __volatile__(
- "lock; orl %1, (%0)"
- : /* no output */
- : "r" (target), "r" (bits));
-}
-
-extern __GNU_INLINE void
-atomic_and_long(ulong_t *target, ulong_t bits)
-{
- __asm__ __volatile__(
- "lock; andl %1, (%0)"
- : /* no output */
- : "r" (target), "r" (bits));
-}
-
#else
#error "port me"
#endif
diff --git a/usr/src/uts/intel/sys/synch32.h b/usr/src/uts/intel/sys/synch32.h
index 6b33cfd1a5..6b0c2e298f 100644
--- a/usr/src/uts/intel/sys/synch32.h
+++ b/usr/src/uts/intel/sys/synch32.h
@@ -27,8 +27,6 @@
#ifndef _SYS_SYNCH32_H
#define _SYS_SYNCH32_H
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#ifdef __cplusplus
extern "C" {
#endif
@@ -42,7 +40,7 @@ extern "C" {
#define mutex_owner data
/* used to atomically operate on whole word via cas or swap instruction */
#define mutex_lockword lock.lock32.lockword
-/* this requires cas64 */
+/* this requires atomic_cas_64 */
#define mutex_lockword64 lock.owner64
/* these are bytes */
#define mutex_lockw lock.lock64.pad[7]
diff --git a/usr/src/uts/sfmmu/vm/hat_sfmmu.c b/usr/src/uts/sfmmu/vm/hat_sfmmu.c
index a50a2db579..4525a67c44 100644
--- a/usr/src/uts/sfmmu/vm/hat_sfmmu.c
+++ b/usr/src/uts/sfmmu/vm/hat_sfmmu.c
@@ -3678,7 +3678,7 @@ sfmmu_shadow_hcreate(sfmmu_t *sfmmup, caddr_t vaddr, int ttesz, uint_t flags)
do {
shw_mask = hmeblkp->hblk_shw_mask;
newshw_mask = shw_mask | (1 << vshift);
- newshw_mask = cas32(&hmeblkp->hblk_shw_mask, shw_mask,
+ newshw_mask = atomic_cas_32(&hmeblkp->hblk_shw_mask, shw_mask,
newshw_mask);
} while (newshw_mask != shw_mask);
@@ -11678,7 +11678,7 @@ sfmmu_steal_this_hblk(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp,
shw_mask = shw_hblkp->hblk_shw_mask;
ASSERT(shw_mask & (1 << vshift));
newshw_mask = shw_mask & ~(1 << vshift);
- newshw_mask = cas32(&shw_hblkp->hblk_shw_mask,
+ newshw_mask = atomic_cas_32(&shw_hblkp->hblk_shw_mask,
shw_mask, newshw_mask);
} while (newshw_mask != shw_mask);
hmeblkp->hblk_shadow = NULL;
@@ -15757,7 +15757,7 @@ sfmmu_hblk_hash_rm(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp,
shw_mask = shw_hblkp->hblk_shw_mask;
ASSERT(shw_mask & (1 << vshift));
newshw_mask = shw_mask & ~(1 << vshift);
- newshw_mask = cas32(&shw_hblkp->hblk_shw_mask,
+ newshw_mask = atomic_cas_32(&shw_hblkp->hblk_shw_mask,
shw_mask, newshw_mask);
} while (newshw_mask != shw_mask);
hmeblkp->hblk_shadow = NULL;
diff --git a/usr/src/uts/sparc/sys/synch32.h b/usr/src/uts/sparc/sys/synch32.h
index 06ef98a1c4..2bc06f1bd3 100644
--- a/usr/src/uts/sparc/sys/synch32.h
+++ b/usr/src/uts/sparc/sys/synch32.h
@@ -27,8 +27,6 @@
#ifndef _SYS_SYNCH32_H
#define _SYS_SYNCH32_H
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#ifdef __cplusplus
extern "C" {
#endif
@@ -42,7 +40,7 @@ extern "C" {
#define mutex_owner data
/* used to atomically operate on whole word via cas or swap instruction */
#define mutex_lockword lock.lock32.lockword
-/* this requires cas64 */
+/* this requires atomic_cas_64 */
#define mutex_lockword64 lock.owner64
/* these are bytes */
#define mutex_lockw lock.lock64.pad[4]
diff --git a/usr/src/uts/sparc/v9/syscall/install_utrap.c b/usr/src/uts/sparc/v9/syscall/install_utrap.c
index fa6408f82e..ba6c98598c 100644
--- a/usr/src/uts/sparc/v9/syscall/install_utrap.c
+++ b/usr/src/uts/sparc/v9/syscall/install_utrap.c
@@ -23,8 +23,6 @@
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include <sys/types.h>
#include <sys/errno.h>
#include <sys/systm.h>
@@ -81,13 +79,13 @@ install_utrap(utrap_entry_t type, utrap_handler_t new_handler,
}
/*
* Allocate proc space for saving the addresses to these user
- * trap handlers, which must later be freed. Use casptr to
+ * trap handlers, which must later be freed. Use atomic_cas_ptr to
* do this atomically.
*/
if (p->p_utraps == NULL) {
pv = sv = kmem_zalloc((UT_PRECISE_MAXTRAPS+1) *
sizeof (utrap_handler_t *), KM_SLEEP);
- tmp = casptr(&p->p_utraps, NULL, sv);
+ tmp = atomic_cas_ptr(&p->p_utraps, NULL, sv);
if (tmp != NULL) {
kmem_free(pv, (UT_PRECISE_MAXTRAPS+1) *
sizeof (utrap_handler_t *));
@@ -96,12 +94,12 @@ install_utrap(utrap_entry_t type, utrap_handler_t new_handler,
ASSERT(p->p_utraps != NULL);
/*
- * Use casptr to atomically install the handler.
+ * Use atomic_cas_ptr to atomically install the handler.
*/
ov = p->p_utraps[idx];
if (new_handler != (utrap_handler_t)UTRAP_UTH_NOCHANGE) {
for (;;) {
- tmp = casptr(&p->p_utraps[idx], ov, nv);
+ tmp = atomic_cas_ptr(&p->p_utraps[idx], ov, nv);
if (ov == tmp)
break;
ov = tmp;
@@ -225,13 +223,13 @@ sparc_utrap_install(utrap_entry_t type,
/*
* Allocate proc space for saving the addresses to these user
- * trap handlers, which must later be freed. Use casptr to
+ * trap handlers, which must later be freed. Use atomic_cas_ptr to
* do this atomically.
*/
if (p->p_utraps == NULL) {
pv = sv = kmem_zalloc((UT_PRECISE_MAXTRAPS+1) *
sizeof (utrap_handler_t *), KM_SLEEP);
- tmp = casptr(&p->p_utraps, NULL, sv);
+ tmp = atomic_cas_ptr(&p->p_utraps, NULL, sv);
if (tmp != NULL) {
kmem_free(pv, (UT_PRECISE_MAXTRAPS+1) *
sizeof (utrap_handler_t *));
@@ -240,12 +238,12 @@ sparc_utrap_install(utrap_entry_t type,
ASSERT(p->p_utraps != NULL);
/*
- * Use casptr to atomically install the handlers.
+ * Use atomic_cas_ptr to atomically install the handlers.
*/
ov = p->p_utraps[idx];
if (new_precise != (utrap_handler_t)UTH_NOCHANGE) {
for (;;) {
- tmp = casptr(&p->p_utraps[idx], ov, nvp);
+ tmp = atomic_cas_ptr(&p->p_utraps[idx], ov, nvp);
if (ov == tmp)
break;
ov = tmp;
diff --git a/usr/src/uts/sun4/os/machdep.c b/usr/src/uts/sun4/os/machdep.c
index 60d6f07f53..e6e4875da6 100644
--- a/usr/src/uts/sun4/os/machdep.c
+++ b/usr/src/uts/sun4/os/machdep.c
@@ -550,7 +550,7 @@ cpu_intr_swtch_enter(kthread_id_t t)
do {
start = t->t_intr_start;
interval = CLOCK_TICK_COUNTER() - start;
- } while (cas64(&t->t_intr_start, start, 0) != start);
+ } while (atomic_cas_64(&t->t_intr_start, start, 0) != start);
cpu = CPU;
if (cpu->cpu_m.divisor > 1)
interval *= cpu->cpu_m.divisor;
@@ -577,7 +577,8 @@ cpu_intr_swtch_exit(kthread_id_t t)
do {
ts = t->t_intr_start;
- } while (cas64(&t->t_intr_start, ts, CLOCK_TICK_COUNTER()) != ts);
+ } while (atomic_cas_64(&t->t_intr_start, ts, CLOCK_TICK_COUNTER()) !=
+ ts);
}
diff --git a/usr/src/uts/sun4/os/memnode.c b/usr/src/uts/sun4/os/memnode.c
index 9d03e14ad9..bbee6dd2c6 100644
--- a/usr/src/uts/sun4/os/memnode.c
+++ b/usr/src/uts/sun4/os/memnode.c
@@ -83,7 +83,7 @@ mem_node_add_slice(pfn_t start, pfn_t end)
mnode = PFN_2_MEM_NODE(start);
ASSERT(mnode < max_mem_nodes);
- if (cas32((uint32_t *)&mem_node_config[mnode].exists, 0, 1)) {
+ if (atomic_cas_32((uint32_t *)&mem_node_config[mnode].exists, 0, 1)) {
/*
* Add slice to existing node.
*/
@@ -98,7 +98,8 @@ mem_node_add_slice(pfn_t start, pfn_t end)
do {
oldmask = memnodes_mask;
newmask = memnodes_mask | (1ull << mnode);
- } while (cas64(&memnodes_mask, oldmask, newmask) != oldmask);
+ } while (atomic_cas_64(&memnodes_mask, oldmask, newmask) !=
+ oldmask);
}
/*
* Let the common lgrp framework know about the new memory
@@ -158,7 +159,7 @@ mem_node_del_slice(pfn_t start, pfn_t end)
do {
omask = memnodes_mask;
nmask = omask & ~(1ull << mnode);
- } while (cas64(&memnodes_mask, omask, nmask) != omask);
+ } while (atomic_cas_64(&memnodes_mask, omask, nmask) != omask);
atomic_add_16(&num_memnodes, -1);
mem_node_config[mnode].exists = 0;
}
@@ -220,7 +221,7 @@ mem_node_alloc()
* a first time memnode creation race.
*/
for (mnode = 0; mnode < max_mem_nodes; mnode++)
- if (cas32((uint32_t *)&mem_node_config[mnode].exists,
+ if (atomic_cas_32((uint32_t *)&mem_node_config[mnode].exists,
0, 1) == 0)
break;
@@ -233,7 +234,7 @@ mem_node_alloc()
do {
oldmask = memnodes_mask;
newmask = memnodes_mask | (1ull << mnode);
- } while (cas64(&memnodes_mask, oldmask, newmask) != oldmask);
+ } while (atomic_cas_64(&memnodes_mask, oldmask, newmask) != oldmask);
return (mnode);
}
diff --git a/usr/src/uts/sun4/os/prom_subr.c b/usr/src/uts/sun4/os/prom_subr.c
index 7bacfd2090..578692011f 100644
--- a/usr/src/uts/sun4/os/prom_subr.c
+++ b/usr/src/uts/sun4/os/prom_subr.c
@@ -23,8 +23,6 @@
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include <sys/types.h>
#include <sys/param.h>
#include <sys/cmn_err.h>
@@ -273,7 +271,7 @@ kern_preprom(void)
* previous kpreempt_disable() before returning since
* preemption was disabled by an earlier kern_preprom.
*/
- prcp = casptr((void *)&prom_cpu, NULL, cp);
+ prcp = atomic_cas_ptr((void *)&prom_cpu, NULL, cp);
if (prcp == NULL ||
(prcp == cp && prom_thread == curthread)) {
if (prcp == cp)
@@ -310,7 +308,7 @@ kern_preprom(void)
* the lock. If we get it or already hold it, break.
*/
ASSERT(getpil() == PIL_MAX);
- prcp = casptr((void *)&prom_cpu, NULL, cp);
+ prcp = atomic_cas_ptr((void *)&prom_cpu, NULL, cp);
if (prcp == NULL || prcp == cp)
break;
}
diff --git a/usr/src/uts/sun4/vm/vm_dep.c b/usr/src/uts/sun4/vm/vm_dep.c
index baa8271ca9..291122177a 100644
--- a/usr/src/uts/sun4/vm/vm_dep.c
+++ b/usr/src/uts/sun4/vm/vm_dep.c
@@ -898,7 +898,7 @@ get_color_start(struct as *as)
do {
old = color_start_current;
new = old + (color_start_stride << (vac_shift - MMU_PAGESHIFT));
- } while (cas32(&color_start_current, old, new) != old);
+ } while (atomic_cas_32(&color_start_current, old, new) != old);
return ((uint_t)(new));
}
diff --git a/usr/src/uts/sun4u/cpu/spitfire.c b/usr/src/uts/sun4u/cpu/spitfire.c
index 9784a2338a..bf6bc3dcd0 100644
--- a/usr/src/uts/sun4u/cpu/spitfire.c
+++ b/usr/src/uts/sun4u/cpu/spitfire.c
@@ -4324,7 +4324,7 @@ ce_count_unum(int status, int len, char *unum)
/*
* Initialize the leaky_bucket timeout
*/
- if (casptr(&leaky_bucket_timeout_id,
+ if (atomic_cas_ptr(&leaky_bucket_timeout_id,
TIMEOUT_NONE, TIMEOUT_SET) == TIMEOUT_NONE)
add_leaky_bucket_timeout();
diff --git a/usr/src/uts/sun4u/cpu/us3_common.c b/usr/src/uts/sun4u/cpu/us3_common.c
index 7fc8fcc986..301d7874df 100644
--- a/usr/src/uts/sun4u/cpu/us3_common.c
+++ b/usr/src/uts/sun4u/cpu/us3_common.c
@@ -898,7 +898,7 @@ mondo_recover(uint16_t cpuid, int bn)
cheetah_livelock_entry_t *histp;
uint64_t idsr;
- if (cas32(&sendmondo_in_recover, 0, 1) != 0) {
+ if (atomic_cas_32(&sendmondo_in_recover, 0, 1) != 0) {
/*
* Wait while recovery takes place
*/
@@ -984,7 +984,7 @@ done:
CHEETAH_LIVELOCK_ENTRY_SET(histp, recovery_time, \
(end_hrt - begin_hrt));
- while (cas32(&sendmondo_in_recover, 1, 0) != 1)
+ while (atomic_cas_32(&sendmondo_in_recover, 1, 0) != 1)
;
return (retval);
@@ -6300,7 +6300,7 @@ cpu_ce_delayed_ec_logout(uint64_t afar)
return (0);
clop = CPU_PRIVATE_PTR(CPU, chpr_cecc_logout);
- if (cas64(&clop->clo_data.chd_afar, LOGOUT_INVALID, afar) !=
+ if (atomic_cas_64(&clop->clo_data.chd_afar, LOGOUT_INVALID, afar) !=
LOGOUT_INVALID)
return (0);
diff --git a/usr/src/uts/sun4u/io/zuluvm.c b/usr/src/uts/sun4u/io/zuluvm.c
index 46c68181ec..f1184f4580 100644
--- a/usr/src/uts/sun4u/io/zuluvm.c
+++ b/usr/src/uts/sun4u/io/zuluvm.c
@@ -23,8 +23,6 @@
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
/*
* zuluvm module
*
@@ -78,7 +76,7 @@
#define ZULUVM_UNLOCK mutex_exit(&(zdev->dev_lck))
#define ZULUVM_SET_STATE(_z, b, c) \
- cas32((uint32_t *)&((_z)->zvm.state), c, b)
+ atomic_cas_32((uint32_t *)&((_z)->zvm.state), c, b)
#define ZULUVM_GET_STATE(_z) \
(_z)->zvm.state
#define ZULUVM_SET_IDLE(_z) \
diff --git a/usr/src/uts/sun4u/os/ppage.c b/usr/src/uts/sun4u/os/ppage.c
index 17058e5029..87986771f1 100644
--- a/usr/src/uts/sun4u/os/ppage.c
+++ b/usr/src/uts/sun4u/os/ppage.c
@@ -23,8 +23,6 @@
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include <sys/types.h>
#include <sys/systm.h>
#include <sys/archsystm.h>
@@ -189,7 +187,7 @@ ppmapin(page_t *pp, uint_t vprot, caddr_t hint)
#ifdef PPDEBUG
align_hits[color]++;
#endif /* PPDEBUG */
- if (casptr(&ppmap_vaddrs[index],
+ if (atomic_cas_ptr(&ppmap_vaddrs[index],
va, NULL) == va) {
hat_memload(kas.a_hat, va, pp,
vprot | HAT_NOSYNC,
@@ -319,7 +317,7 @@ pp_load_tlb(processorid_t cpu, caddr_t **pslot, page_t *pp, uint_t prot)
for (i = start; i < pp_slots; i += stride) {
if (*myslot == NULL) {
- if (casptr(myslot, NULL, va) == NULL)
+ if (atomic_cas_ptr(myslot, NULL, va) == NULL)
break;
}
myslot += stride;
diff --git a/usr/src/uts/sun4u/vm/zulu_hat.c b/usr/src/uts/sun4u/vm/zulu_hat.c
index e50e38d7b9..5ecadc028f 100644
--- a/usr/src/uts/sun4u/vm/zulu_hat.c
+++ b/usr/src/uts/sun4u/vm/zulu_hat.c
@@ -278,7 +278,7 @@ zulu_ctx_tsb_lock_enter(struct zulu_hat *zhat)
new_lck = lck | ZULU_CTX_LOCK;
- old_lck = cas64(plck, lck, new_lck);
+ old_lck = atomic_cas_64(plck, lck, new_lck);
if (old_lck == lck) {
/*
diff --git a/usr/src/uts/sun4v/os/ppage.c b/usr/src/uts/sun4v/os/ppage.c
index 67c49996b5..c5ddafc5a9 100644
--- a/usr/src/uts/sun4v/os/ppage.c
+++ b/usr/src/uts/sun4v/os/ppage.c
@@ -23,8 +23,6 @@
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include <sys/types.h>
#include <sys/systm.h>
#include <sys/archsystm.h>
@@ -161,7 +159,8 @@ ppmapin(page_t *pp, uint_t vprot, caddr_t hint)
#ifdef PPDEBUG
align_hits++;
#endif /* PPDEBUG */
- if (casptr(&ppmap_vaddrs[nset], va, NULL) == va) {
+ if (atomic_cas_ptr(&ppmap_vaddrs[nset], va, NULL) ==
+ va) {
hat_memload(kas.a_hat, va, pp,
vprot | HAT_NOSYNC,
HAT_LOAD_LOCK);