summaryrefslogtreecommitdiff
path: root/usr/src
diff options
context:
space:
mode:
Diffstat (limited to 'usr/src')
-rw-r--r--usr/src/uts/common/crypto/io/crypto.c4
-rw-r--r--usr/src/uts/common/crypto/io/dprov.c4
-rw-r--r--usr/src/uts/common/disp/cmt.c8
-rw-r--r--usr/src/uts/common/dtrace/fasttrap.c12
-rw-r--r--usr/src/uts/common/dtrace/profile.c6
-rw-r--r--usr/src/uts/common/fs/ctfs/ctfs_root.c2
-rw-r--r--usr/src/uts/common/fs/dnlc.c4
-rw-r--r--usr/src/uts/common/fs/fem.c4
-rw-r--r--usr/src/uts/common/fs/lofs/lofs_subr.c8
-rw-r--r--usr/src/uts/common/fs/mntfs/mntvnops.c4
-rw-r--r--usr/src/uts/common/fs/nfs/nfs3_vnops.c6
-rw-r--r--usr/src/uts/common/fs/nfs/nfs4_client.c8
-rw-r--r--usr/src/uts/common/fs/nfs/nfs4_client_state.c6
-rw-r--r--usr/src/uts/common/fs/nfs/nfs4_db.c6
-rw-r--r--usr/src/uts/common/fs/nfs/nfs4_rnode.c4
-rw-r--r--usr/src/uts/common/fs/nfs/nfs4_subr.c10
-rw-r--r--usr/src/uts/common/fs/nfs/nfs4_vnops.c6
-rw-r--r--usr/src/uts/common/fs/nfs/nfs_subr.c14
-rw-r--r--usr/src/uts/common/fs/nfs/nfs_vnops.c6
-rw-r--r--usr/src/uts/common/fs/objfs/objfs_vfs.c2
-rw-r--r--usr/src/uts/common/fs/proc/prvnops.c4
-rw-r--r--usr/src/uts/common/fs/sharefs/sharefs_vfsops.c2
-rw-r--r--usr/src/uts/common/fs/sharefs/sharefs_vnops.c6
-rw-r--r--usr/src/uts/common/fs/sharefs/sharetab.c18
-rw-r--r--usr/src/uts/common/fs/smbclnt/smbfs/smbfs_subr2.c4
-rw-r--r--usr/src/uts/common/fs/sockfs/nl7curi.c12
-rw-r--r--usr/src/uts/common/fs/sockfs/nl7curi.h6
-rw-r--r--usr/src/uts/common/fs/sockfs/sockfilter_impl.h2
-rw-r--r--usr/src/uts/common/fs/sockfs/socksyscalls.c2
-rw-r--r--usr/src/uts/common/fs/ufs/lufs.c4
-rw-r--r--usr/src/uts/common/fs/ufs/ufs_directio.c4
-rw-r--r--usr/src/uts/common/fs/ufs/ufs_filio.c8
-rw-r--r--usr/src/uts/common/fs/ufs/ufs_lockfs.c30
-rw-r--r--usr/src/uts/common/fs/ufs/ufs_vfsops.c14
-rw-r--r--usr/src/uts/common/fs/ufs/ufs_vnops.c12
-rw-r--r--usr/src/uts/common/fs/vfs.c4
-rw-r--r--usr/src/uts/common/fs/vnode.c30
-rw-r--r--usr/src/uts/common/fs/zfs/dbuf.c4
-rw-r--r--usr/src/uts/common/fs/zfs/spa.c4
-rw-r--r--usr/src/uts/common/fs/zfs/sys/refcount.h4
-rw-r--r--usr/src/uts/common/fs/zfs/vdev_cache.c2
-rw-r--r--usr/src/uts/common/fs/zfs/vdev_label.c4
-rw-r--r--usr/src/uts/common/fs/zfs/zfs_vfsops.c4
-rw-r--r--usr/src/uts/common/fs/zfs/zio_inject.c4
-rw-r--r--usr/src/uts/common/inet/ilb/ilb.c2
-rw-r--r--usr/src/uts/common/inet/ilb/ilb_nat.c2
-rw-r--r--usr/src/uts/common/inet/ip.h4
-rw-r--r--usr/src/uts/common/inet/ip/igmp.c12
-rw-r--r--usr/src/uts/common/inet/ip/ip_attr.c8
-rw-r--r--usr/src/uts/common/inet/ip/ip_dce.c18
-rw-r--r--usr/src/uts/common/inet/ip/ip_if.c6
-rw-r--r--usr/src/uts/common/inet/ip/ip_ire.c14
-rw-r--r--usr/src/uts/common/inet/ip/ip_ndp.c2
-rw-r--r--usr/src/uts/common/inet/ip/ip_output.c2
-rw-r--r--usr/src/uts/common/inet/ip/ipsecah.c2
-rw-r--r--usr/src/uts/common/inet/ip/ipsecesp.c2
-rw-r--r--usr/src/uts/common/inet/ip/keysock.c10
-rw-r--r--usr/src/uts/common/inet/ip/spd.c2
-rw-r--r--usr/src/uts/common/inet/ip_ire.h2
-rw-r--r--usr/src/uts/common/inet/ipf/netinet/ip_compat.h32
-rw-r--r--usr/src/uts/common/inet/ipsec_impl.h20
-rw-r--r--usr/src/uts/common/inet/kssl/ksslimpl.h4
-rw-r--r--usr/src/uts/common/inet/nca/nca.h2
-rw-r--r--usr/src/uts/common/inet/sadb.h4
-rw-r--r--usr/src/uts/common/inet/sctp/sctp.c4
-rw-r--r--usr/src/uts/common/inet/sctp/sctp_addr.c16
-rw-r--r--usr/src/uts/common/inet/sctp/sctp_conn.c6
-rw-r--r--usr/src/uts/common/inet/sctp/sctp_impl.h2
-rw-r--r--usr/src/uts/common/inet/tcp/tcp_input.c6
-rw-r--r--usr/src/uts/common/inet/tcp_impl.h2
-rw-r--r--usr/src/uts/common/io/bscbus.c2
-rw-r--r--usr/src/uts/common/io/chxge/pe.c10
-rw-r--r--usr/src/uts/common/io/comstar/lu/stmf_sbd/sbd.c8
-rw-r--r--usr/src/uts/common/io/comstar/port/fcoet/fcoet.h4
-rw-r--r--usr/src/uts/common/io/comstar/port/fcoet/fcoet_fc.c6
-rw-r--r--usr/src/uts/common/io/comstar/port/fct/discovery.c46
-rw-r--r--usr/src/uts/common/io/comstar/port/fct/fct.c28
-rw-r--r--usr/src/uts/common/io/comstar/port/qlt/qlt.c6
-rw-r--r--usr/src/uts/common/io/comstar/stmf/lun_map.c10
-rw-r--r--usr/src/uts/common/io/comstar/stmf/stmf.c37
-rw-r--r--usr/src/uts/common/io/cxgbe/t4nex/t4_l2t.c2
-rw-r--r--usr/src/uts/common/io/dld/dld_str.c6
-rw-r--r--usr/src/uts/common/io/dls/dls_link.c10
-rw-r--r--usr/src/uts/common/io/drm/drm_atomic.h2
-rw-r--r--usr/src/uts/common/io/fcoe/fcoe.c4
-rw-r--r--usr/src/uts/common/io/fibre-channel/fca/emlxs/emlxs_sli3.c4
-rw-r--r--usr/src/uts/common/io/fibre-channel/fca/emlxs/emlxs_sli4.c6
-rw-r--r--usr/src/uts/common/io/fibre-channel/fca/fcoei/fcoei.c2
-rw-r--r--usr/src/uts/common/io/fibre-channel/fca/fcoei/fcoei_eth.c2
-rw-r--r--usr/src/uts/common/io/fibre-channel/fca/fcoei/fcoei_lv.c8
-rw-r--r--usr/src/uts/common/io/fibre-channel/fca/oce/oce_rx.c4
-rw-r--r--usr/src/uts/common/io/fssnap.c6
-rw-r--r--usr/src/uts/common/io/gld.c6
-rw-r--r--usr/src/uts/common/io/hxge/hxge_rxdma.c2
-rw-r--r--usr/src/uts/common/io/ib/clients/daplt/daplt.c46
-rw-r--r--usr/src/uts/common/io/ib/clients/ibd/ibd_cm.c13
-rw-r--r--usr/src/uts/common/io/ib/clients/rdsv3/cong.c2
-rw-r--r--usr/src/uts/common/io/ib/clients/rdsv3/ib_recv.c4
-rw-r--r--usr/src/uts/common/io/ib/clients/rdsv3/message.c2
-rw-r--r--usr/src/uts/common/io/ib/clients/rdsv3/rdma.c8
-rw-r--r--usr/src/uts/common/io/ib/clients/rdsv3/rds_recv.c2
-rw-r--r--usr/src/uts/common/io/ib/clients/rdsv3/send.c6
-rw-r--r--usr/src/uts/common/io/mac/mac.c6
-rw-r--r--usr/src/uts/common/io/mac/mac_bcast.c2
-rw-r--r--usr/src/uts/common/io/mega_sas/megaraid_sas.c4
-rw-r--r--usr/src/uts/common/io/mr_sas/mr_sas.c2
-rw-r--r--usr/src/uts/common/io/mr_sas/mr_sas_tbolt.c2
-rw-r--r--usr/src/uts/common/io/myri10ge/drv/myri10ge.c8
-rw-r--r--usr/src/uts/common/io/myri10ge/drv/myri10ge_var.h8
-rw-r--r--usr/src/uts/common/io/neti_impl.c6
-rw-r--r--usr/src/uts/common/io/nxge/nxge_rxdma.c2
-rw-r--r--usr/src/uts/common/io/pciex/pcie_fault.c2
-rw-r--r--usr/src/uts/common/io/rsm/rsm.c18
-rw-r--r--usr/src/uts/common/io/str_conf.c6
-rw-r--r--usr/src/uts/common/io/tl.c10
-rw-r--r--usr/src/uts/common/io/usb/usba/usbai_pipe_mgmt.c2
-rw-r--r--usr/src/uts/common/io/xge/drv/xgell.c2
-rw-r--r--usr/src/uts/common/ipp/dlcosmk/dlcosmk.c15
-rw-r--r--usr/src/uts/common/ipp/dscpmk/dscpmk.c16
-rw-r--r--usr/src/uts/common/ipp/flowacct/flowacct.c18
-rw-r--r--usr/src/uts/common/ipp/ipgpc/classifierddi.c6
-rw-r--r--usr/src/uts/common/ipp/ipgpc/filters.c18
-rw-r--r--usr/src/uts/common/ipp/ippconf.c14
-rw-r--r--usr/src/uts/common/ipp/meters/tokenmt.c14
-rw-r--r--usr/src/uts/common/ipp/meters/tswtcl.c14
-rw-r--r--usr/src/uts/common/os/audit_memory.c4
-rw-r--r--usr/src/uts/common/os/bio.c4
-rw-r--r--usr/src/uts/common/os/clock.c2
-rw-r--r--usr/src/uts/common/os/contract.c2
-rw-r--r--usr/src/uts/common/os/cred.c8
-rw-r--r--usr/src/uts/common/os/ddi_intr.c2
-rw-r--r--usr/src/uts/common/os/ddifm.c6
-rw-r--r--usr/src/uts/common/os/devcfg.c8
-rw-r--r--usr/src/uts/common/os/devpolicy.c4
-rw-r--r--usr/src/uts/common/os/driver_lyr.c4
-rw-r--r--usr/src/uts/common/os/errorq.c18
-rw-r--r--usr/src/uts/common/os/evchannels.c4
-rw-r--r--usr/src/uts/common/os/exit.c2
-rw-r--r--usr/src/uts/common/os/fio.c2
-rw-r--r--usr/src/uts/common/os/fm.c117
-rw-r--r--usr/src/uts/common/os/fork.c6
-rw-r--r--usr/src/uts/common/os/kcpc.c4
-rw-r--r--usr/src/uts/common/os/klpd.c10
-rw-r--r--usr/src/uts/common/os/kmem.c8
-rw-r--r--usr/src/uts/common/os/lgrp.c16
-rw-r--r--usr/src/uts/common/os/mmapobj.c8
-rw-r--r--usr/src/uts/common/os/pool.c4
-rw-r--r--usr/src/uts/common/os/refstr.c6
-rw-r--r--usr/src/uts/common/os/sid.c12
-rw-r--r--usr/src/uts/common/os/strsubr.c2
-rw-r--r--usr/src/uts/common/os/sunddi.c4
-rw-r--r--usr/src/uts/common/os/task.c4
-rw-r--r--usr/src/uts/common/os/tlabel.c4
-rw-r--r--usr/src/uts/common/os/vmem.c4
-rw-r--r--usr/src/uts/common/rpc/clnt_clts.c2
-rw-r--r--usr/src/uts/common/rpc/clnt_cots.c4
-rw-r--r--usr/src/uts/common/rpc/svc_clts.c2
-rw-r--r--usr/src/uts/common/rpc/svc_cots.c2
-rw-r--r--usr/src/uts/common/rpc/svc_rdma.c2
-rw-r--r--usr/src/uts/common/sys/aggr_impl.h8
-rw-r--r--usr/src/uts/common/sys/crypto/impl.h4
-rw-r--r--usr/src/uts/common/sys/crypto/sched_impl.h12
-rw-r--r--usr/src/uts/common/sys/ib/clients/rdsv3/rdsv3_impl.h2
-rw-r--r--usr/src/uts/common/syscall/corectl.c10
-rw-r--r--usr/src/uts/common/syscall/lwp_sobj.c4
-rw-r--r--usr/src/uts/common/vm/page_retire.c4
-rw-r--r--usr/src/uts/common/vm/seg_kmem.c2
-rw-r--r--usr/src/uts/common/vm/seg_kp.c11
-rw-r--r--usr/src/uts/common/vm/seg_spt.c10
-rw-r--r--usr/src/uts/common/vm/seg_vn.c16
-rw-r--r--usr/src/uts/common/vm/vm_page.c2
-rw-r--r--usr/src/uts/common/vm/vm_pagelist.c4
-rw-r--r--usr/src/uts/common/xen/io/xnb.c4
-rw-r--r--usr/src/uts/common/xen/io/xnf.c12
-rw-r--r--usr/src/uts/i86pc/io/psm/uppc.c4
-rw-r--r--usr/src/uts/i86pc/os/memnode.c6
-rw-r--r--usr/src/uts/i86pc/sys/rootnex.h2
-rw-r--r--usr/src/uts/i86pc/vm/hat_i86.c4
-rw-r--r--usr/src/uts/i86pc/vm/hat_i86.h4
-rw-r--r--usr/src/uts/i86pc/vm/htable.c12
-rw-r--r--usr/src/uts/i86pc/vm/htable.h6
-rw-r--r--usr/src/uts/i86xpv/io/psm/xpv_uppc.c4
-rw-r--r--usr/src/uts/intel/io/scsi/adapters/arcmsr/arcmsr.c4
-rw-r--r--usr/src/uts/sfmmu/vm/hat_sfmmu.c48
-rw-r--r--usr/src/uts/sfmmu/vm/hat_sfmmu.h5
-rw-r--r--usr/src/uts/sparc/dtrace/fasttrap_isa.c22
-rw-r--r--usr/src/uts/sparc/fpu/fpu_simulator.c8
-rw-r--r--usr/src/uts/sparc/sys/fpu/fpu_simulator.h2
-rw-r--r--usr/src/uts/sun4/os/memnode.c6
-rw-r--r--usr/src/uts/sun4/os/prom_subr.c4
-rw-r--r--usr/src/uts/sun4u/cpu/spitfire.c10
-rw-r--r--usr/src/uts/sun4u/cpu/us3_common.c2
-rw-r--r--usr/src/uts/sun4u/os/memscrub.c8
-rw-r--r--usr/src/uts/sun4u/sunfire/io/ac_test.c12
-rw-r--r--usr/src/uts/sun4u/sys/pci/pci_axq.h12
195 files changed, 768 insertions, 810 deletions
diff --git a/usr/src/uts/common/crypto/io/crypto.c b/usr/src/uts/common/crypto/io/crypto.c
index 47ce67d708..31947ae130 100644
--- a/usr/src/uts/common/crypto/io/crypto.c
+++ b/usr/src/uts/common/crypto/io/crypto.c
@@ -627,7 +627,7 @@ crypto_hold_minor(minor_t minor)
mutex_enter(&mp->kl_lock);
if ((cm = crypto_minors[minor - 1]) != NULL) {
- atomic_add_32(&cm->cm_refcnt, 1);
+ atomic_inc_32(&cm->cm_refcnt);
}
mutex_exit(&mp->kl_lock);
return (cm);
@@ -636,7 +636,7 @@ crypto_hold_minor(minor_t minor)
static void
crypto_release_minor(crypto_minor_t *cm)
{
- if (atomic_add_32_nv(&cm->cm_refcnt, -1) == 0) {
+ if (atomic_dec_32_nv(&cm->cm_refcnt) == 0) {
cv_signal(&cm->cm_cv);
}
}
diff --git a/usr/src/uts/common/crypto/io/dprov.c b/usr/src/uts/common/crypto/io/dprov.c
index 58de58718d..6259c8396d 100644
--- a/usr/src/uts/common/crypto/io/dprov.c
+++ b/usr/src/uts/common/crypto/io/dprov.c
@@ -1101,7 +1101,7 @@ typedef struct dprov_object {
* it REFHOLD()s.
*/
#define DPROV_OBJECT_REFHOLD(object) { \
- atomic_add_32(&(object)->do_refcnt, 1); \
+ atomic_inc_32(&(object)->do_refcnt); \
ASSERT((object)->do_refcnt != 0); \
}
@@ -1112,7 +1112,7 @@ typedef struct dprov_object {
#define DPROV_OBJECT_REFRELE(object) { \
ASSERT((object)->do_refcnt != 0); \
membar_exit(); \
- if (atomic_add_32_nv(&(object)->do_refcnt, -1) == 0) \
+ if (atomic_dec_32_nv(&(object)->do_refcnt) == 0) \
dprov_free_object(object); \
}
diff --git a/usr/src/uts/common/disp/cmt.c b/usr/src/uts/common/disp/cmt.c
index 7e46509fce..da3296f6cf 100644
--- a/usr/src/uts/common/disp/cmt.c
+++ b/usr/src/uts/common/disp/cmt.c
@@ -1358,9 +1358,9 @@ cmt_ev_thread_swtch(pg_t *pg, cpu_t *cp, hrtime_t now, kthread_t *old,
pg_cmt_t *cmt_pg = (pg_cmt_t *)pg;
if (old == cp->cpu_idle_thread) {
- atomic_add_32(&cmt_pg->cmt_utilization, 1);
+ atomic_inc_32(&cmt_pg->cmt_utilization);
} else if (new == cp->cpu_idle_thread) {
- atomic_add_32(&cmt_pg->cmt_utilization, -1);
+ atomic_dec_32(&cmt_pg->cmt_utilization);
}
}
@@ -1383,7 +1383,7 @@ cmt_ev_thread_swtch_pwr(pg_t *pg, cpu_t *cp, hrtime_t now, kthread_t *old,
if (old == cp->cpu_idle_thread) {
ASSERT(new != cp->cpu_idle_thread);
- u = atomic_add_32_nv(&cmt->cmt_utilization, 1);
+ u = atomic_inc_32_nv(&cmt->cmt_utilization);
if (u == 1) {
/*
* Notify the CPU power manager that the domain
@@ -1395,7 +1395,7 @@ cmt_ev_thread_swtch_pwr(pg_t *pg, cpu_t *cp, hrtime_t now, kthread_t *old,
}
} else if (new == cp->cpu_idle_thread) {
ASSERT(old != cp->cpu_idle_thread);
- u = atomic_add_32_nv(&cmt->cmt_utilization, -1);
+ u = atomic_dec_32_nv(&cmt->cmt_utilization);
if (u == 0) {
/*
* The domain is idle, notify the CPU power
diff --git a/usr/src/uts/common/dtrace/fasttrap.c b/usr/src/uts/common/dtrace/fasttrap.c
index f486c06b2c..ce7b1c40e6 100644
--- a/usr/src/uts/common/dtrace/fasttrap.c
+++ b/usr/src/uts/common/dtrace/fasttrap.c
@@ -1182,7 +1182,7 @@ fasttrap_proc_lookup(pid_t pid)
mutex_enter(&fprc->ftpc_mtx);
mutex_exit(&bucket->ftb_mtx);
fprc->ftpc_rcount++;
- atomic_add_64(&fprc->ftpc_acount, 1);
+ atomic_inc_64(&fprc->ftpc_acount);
ASSERT(fprc->ftpc_acount <= fprc->ftpc_rcount);
mutex_exit(&fprc->ftpc_mtx);
@@ -1212,7 +1212,7 @@ fasttrap_proc_lookup(pid_t pid)
mutex_enter(&fprc->ftpc_mtx);
mutex_exit(&bucket->ftb_mtx);
fprc->ftpc_rcount++;
- atomic_add_64(&fprc->ftpc_acount, 1);
+ atomic_inc_64(&fprc->ftpc_acount);
ASSERT(fprc->ftpc_acount <= fprc->ftpc_rcount);
mutex_exit(&fprc->ftpc_mtx);
@@ -1424,7 +1424,7 @@ fasttrap_provider_free(fasttrap_provider_t *provider)
* count of active providers on the associated process structure.
*/
if (!provider->ftp_retired) {
- atomic_add_64(&provider->ftp_proc->ftpc_acount, -1);
+ atomic_dec_64(&provider->ftp_proc->ftpc_acount);
ASSERT(provider->ftp_proc->ftpc_acount <
provider->ftp_proc->ftpc_rcount);
}
@@ -1499,7 +1499,7 @@ fasttrap_provider_retire(pid_t pid, const char *name, int mprov)
* bucket lock therefore protects the integrity of the provider hash
* table.
*/
- atomic_add_64(&fp->ftp_proc->ftpc_acount, -1);
+ atomic_dec_64(&fp->ftp_proc->ftpc_acount);
ASSERT(fp->ftp_proc->ftpc_acount < fp->ftp_proc->ftpc_rcount);
fp->ftp_retired = 1;
@@ -1595,10 +1595,10 @@ fasttrap_add_probe(fasttrap_probe_spec_t *pdata)
pdata->ftps_mod, pdata->ftps_func, name_str) != 0)
continue;
- atomic_add_32(&fasttrap_total, 1);
+ atomic_inc_32(&fasttrap_total);
if (fasttrap_total > fasttrap_max) {
- atomic_add_32(&fasttrap_total, -1);
+ atomic_dec_32(&fasttrap_total);
goto no_mem;
}
diff --git a/usr/src/uts/common/dtrace/profile.c b/usr/src/uts/common/dtrace/profile.c
index fc809d3579..106fb735df 100644
--- a/usr/src/uts/common/dtrace/profile.c
+++ b/usr/src/uts/common/dtrace/profile.c
@@ -171,9 +171,9 @@ profile_create(hrtime_t interval, const char *name, int kind)
if (dtrace_probe_lookup(profile_id, NULL, NULL, name) != 0)
return;
- atomic_add_32(&profile_total, 1);
+ atomic_inc_32(&profile_total);
if (profile_total > profile_max) {
- atomic_add_32(&profile_total, -1);
+ atomic_dec_32(&profile_total);
return;
}
@@ -328,7 +328,7 @@ profile_destroy(void *arg, dtrace_id_t id, void *parg)
kmem_free(prof, sizeof (profile_probe_t));
ASSERT(profile_total >= 1);
- atomic_add_32(&profile_total, -1);
+ atomic_dec_32(&profile_total);
}
/*ARGSUSED*/
diff --git a/usr/src/uts/common/fs/ctfs/ctfs_root.c b/usr/src/uts/common/fs/ctfs/ctfs_root.c
index 1e70b36206..cf96908450 100644
--- a/usr/src/uts/common/fs/ctfs/ctfs_root.c
+++ b/usr/src/uts/common/fs/ctfs/ctfs_root.c
@@ -240,7 +240,7 @@ ctfs_mount(vfs_t *vfsp, vnode_t *mvp, struct mounta *uap, cred_t *cr)
vfsp->vfs_fstype = ctfs_fstype;
do {
dev = makedevice(ctfs_major,
- atomic_add_32_nv(&ctfs_minor, 1) & L_MAXMIN32);
+ atomic_inc_32_nv(&ctfs_minor) & L_MAXMIN32);
} while (vfs_devismounted(dev));
vfs_make_fsid(&vfsp->vfs_fsid, dev, ctfs_fstype);
vfsp->vfs_data = data;
diff --git a/usr/src/uts/common/fs/dnlc.c b/usr/src/uts/common/fs/dnlc.c
index b45e3b17cb..25327d2852 100644
--- a/usr/src/uts/common/fs/dnlc.c
+++ b/usr/src/uts/common/fs/dnlc.c
@@ -254,7 +254,7 @@ vnode_t negative_cache_vnode;
#define dnlc_free(ncp) \
{ \
kmem_free((ncp), sizeof (ncache_t) + (ncp)->namlen); \
- atomic_add_32(&dnlc_nentries, -1); \
+ atomic_dec_32(&dnlc_nentries); \
}
@@ -1024,7 +1024,7 @@ dnlc_get(uchar_t namlen)
return (NULL);
}
ncp->namlen = namlen;
- atomic_add_32(&dnlc_nentries, 1);
+ atomic_inc_32(&dnlc_nentries);
dnlc_reduce_cache(NULL);
return (ncp);
}
diff --git a/usr/src/uts/common/fs/fem.c b/usr/src/uts/common/fs/fem.c
index 69ac6f3068..b4e28cc860 100644
--- a/usr/src/uts/common/fs/fem.c
+++ b/usr/src/uts/common/fs/fem.c
@@ -367,13 +367,13 @@ fem_unlock(struct fem_head *fp)
static void
fem_addref(struct fem_list *sp)
{
- atomic_add_32(&sp->feml_refc, 1);
+ atomic_inc_32(&sp->feml_refc);
}
static uint32_t
fem_delref(struct fem_list *sp)
{
- return (atomic_add_32_nv(&sp->feml_refc, -1));
+ return (atomic_dec_32_nv(&sp->feml_refc));
}
static struct fem_list *
diff --git a/usr/src/uts/common/fs/lofs/lofs_subr.c b/usr/src/uts/common/fs/lofs/lofs_subr.c
index c5d4551be2..433a76d789 100644
--- a/usr/src/uts/common/fs/lofs/lofs_subr.c
+++ b/usr/src/uts/common/fs/lofs/lofs_subr.c
@@ -23,8 +23,6 @@
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
/*
* The idea behind composition-based stacked filesystems is to add a
* vnode to the stack of vnodes for each mount. These vnodes have their
@@ -289,7 +287,7 @@ makelonode(struct vnode *vp, struct loinfo *li, int flag)
}
lp = tlp;
}
- atomic_add_32(&li->li_refct, 1);
+ atomic_inc_32(&li->li_refct);
vfsp = makelfsnode(vp->v_vfsp, li);
lp->lo_vnode = nvp;
VN_SET_VFS_TYPE_DEV(nvp, vfsp, vp->v_type, vp->v_rdev);
@@ -632,7 +630,7 @@ lfs_rele(struct lfsnode *lfs, struct loinfo *li)
ASSERT(MUTEX_HELD(&li->li_lfslock));
ASSERT(vfsp->vfs_count > 1);
- if (atomic_add_32_nv(&vfsp->vfs_count, -1) == 1)
+ if (atomic_dec_32_nv(&vfsp->vfs_count) == 1)
freelfsnode(lfs, li);
}
@@ -672,7 +670,7 @@ freelonode(lnode_t *lp)
lo_dprint(4, "freeing %p, vfsp %p\n",
vp, vp->v_vfsp);
#endif
- atomic_add_32(&li->li_refct, -1);
+ atomic_dec_32(&li->li_refct);
vfsp = vp->v_vfsp;
vn_invalid(vp);
if (vfsp != li->li_mountvfs) {
diff --git a/usr/src/uts/common/fs/mntfs/mntvnops.c b/usr/src/uts/common/fs/mntfs/mntvnops.c
index 7fff58a602..7374820f95 100644
--- a/usr/src/uts/common/fs/mntfs/mntvnops.c
+++ b/usr/src/uts/common/fs/mntfs/mntvnops.c
@@ -852,7 +852,7 @@ mntopen(vnode_t **vpp, int flag, cred_t *cr, caller_context_t *ct)
nmnp = mntgetnode(vp);
*vpp = MTOV(nmnp);
- atomic_add_32(&MTOD(nmnp)->mnt_nopen, 1);
+ atomic_inc_32(&MTOD(nmnp)->mnt_nopen);
VN_RELE(vp);
return (0);
}
@@ -875,7 +875,7 @@ mntclose(vnode_t *vp, int flag, int count, offset_t offset, cred_t *cr,
mntfs_freesnap(mnp, &mnp->mnt_read);
mntfs_freesnap(mnp, &mnp->mnt_ioctl);
rw_exit(&mnp->mnt_contents);
- atomic_add_32(&MTOD(mnp)->mnt_nopen, -1);
+ atomic_dec_32(&MTOD(mnp)->mnt_nopen);
}
return (0);
}
diff --git a/usr/src/uts/common/fs/nfs/nfs3_vnops.c b/usr/src/uts/common/fs/nfs/nfs3_vnops.c
index fd1595b9f5..291e5cd337 100644
--- a/usr/src/uts/common/fs/nfs/nfs3_vnops.c
+++ b/usr/src/uts/common/fs/nfs/nfs3_vnops.c
@@ -5276,11 +5276,11 @@ nfs3_map(vnode_t *vp, offset_t off, struct as *as, caddr_t *addrp,
if (nfs_rw_enter_sig(&rp->r_rwlock, RW_WRITER, INTR(vp)))
return (EINTR);
- atomic_add_int(&rp->r_inmap, 1);
+ atomic_inc_uint(&rp->r_inmap);
nfs_rw_exit(&rp->r_rwlock);
if (nfs_rw_enter_sig(&rp->r_lkserlock, RW_READER, INTR(vp))) {
- atomic_add_int(&rp->r_inmap, -1);
+ atomic_dec_uint(&rp->r_inmap);
return (EINTR);
}
@@ -5322,7 +5322,7 @@ nfs3_map(vnode_t *vp, offset_t off, struct as *as, caddr_t *addrp,
done:
nfs_rw_exit(&rp->r_lkserlock);
- atomic_add_int(&rp->r_inmap, -1);
+ atomic_dec_uint(&rp->r_inmap);
return (error);
}
diff --git a/usr/src/uts/common/fs/nfs/nfs4_client.c b/usr/src/uts/common/fs/nfs/nfs4_client.c
index 6c1006fbdc..7bfa46e1fb 100644
--- a/usr/src/uts/common/fs/nfs/nfs4_client.c
+++ b/usr/src/uts/common/fs/nfs/nfs4_client.c
@@ -3148,7 +3148,7 @@ nfs_free_mi4(mntinfo4_t *mi)
void
mi_hold(mntinfo4_t *mi)
{
- atomic_add_32(&mi->mi_count, 1);
+ atomic_inc_32(&mi->mi_count);
ASSERT(mi->mi_count != 0);
}
@@ -3156,7 +3156,7 @@ void
mi_rele(mntinfo4_t *mi)
{
ASSERT(mi->mi_count != 0);
- if (atomic_add_32_nv(&mi->mi_count, -1) == 0) {
+ if (atomic_dec_32_nv(&mi->mi_count) == 0) {
nfs_free_mi4(mi);
}
}
@@ -4111,7 +4111,7 @@ again:
void
fn_hold(nfs4_fname_t *fnp)
{
- atomic_add_32(&fnp->fn_refcnt, 1);
+ atomic_inc_32(&fnp->fn_refcnt);
NFS4_DEBUG(nfs4_fname_debug, (CE_NOTE,
"fn_hold %p:%s, new refcnt=%d",
(void *)fnp, fnp->fn_name, fnp->fn_refcnt));
@@ -4137,7 +4137,7 @@ recur:
parent = fnp->fn_parent;
if (parent != NULL)
mutex_enter(&parent->fn_lock); /* prevent new references */
- newref = atomic_add_32_nv(&fnp->fn_refcnt, -1);
+ newref = atomic_dec_32_nv(&fnp->fn_refcnt);
if (newref > 0) {
NFS4_DEBUG(nfs4_fname_debug, (CE_NOTE,
"fn_rele %p:%s, new refcnt=%d",
diff --git a/usr/src/uts/common/fs/nfs/nfs4_client_state.c b/usr/src/uts/common/fs/nfs/nfs4_client_state.c
index 5120fcf3ad..969729b3f8 100644
--- a/usr/src/uts/common/fs/nfs/nfs4_client_state.c
+++ b/usr/src/uts/common/fs/nfs/nfs4_client_state.c
@@ -673,7 +673,7 @@ static uint64_t open_owner_seq_num = 0;
uint64_t
nfs4_get_new_oo_name(void)
{
- return (atomic_add_64_nv(&open_owner_seq_num, 1));
+ return (atomic_inc_64_nv(&open_owner_seq_num));
}
/*
@@ -836,7 +836,7 @@ create_lock_owner(rnode4_t *rp, pid_t pid)
* A Solaris lock_owner is <seq_num><pid>
*/
lop->lock_owner_name.ln_seq_num =
- atomic_add_64_nv(&lock_owner_seq_num, 1);
+ atomic_inc_64_nv(&lock_owner_seq_num);
lop->lock_owner_name.ln_pid = pid;
cv_init(&lop->lo_cv_seqid_sync, NULL, CV_DEFAULT, NULL);
@@ -883,7 +883,7 @@ nfs4_set_new_lock_owner_args(lock_owner4 *owner, pid_t pid)
* A Solaris lock_owner is <seq_num><pid>
*/
cast_namep = (nfs4_lo_name_t *)owner->owner_val;
- cast_namep->ln_seq_num = atomic_add_64_nv(&lock_owner_seq_num, 1);
+ cast_namep->ln_seq_num = atomic_inc_64_nv(&lock_owner_seq_num);
cast_namep->ln_pid = pid;
}
diff --git a/usr/src/uts/common/fs/nfs/nfs4_db.c b/usr/src/uts/common/fs/nfs/nfs4_db.c
index 3c8b63b5d8..fbecb86f64 100644
--- a/usr/src/uts/common/fs/nfs/nfs4_db.c
+++ b/usr/src/uts/common/fs/nfs/nfs4_db.c
@@ -60,7 +60,7 @@ rfs4_dbe_getid(rfs4_dbe_t *entry)
void
rfs4_dbe_hold(rfs4_dbe_t *entry)
{
- atomic_add_32(&entry->dbe_refcnt, 1);
+ atomic_inc_32(&entry->dbe_refcnt);
}
/*
@@ -69,7 +69,7 @@ rfs4_dbe_hold(rfs4_dbe_t *entry)
void
rfs4_dbe_rele_nolock(rfs4_dbe_t *entry)
{
- atomic_add_32(&entry->dbe_refcnt, -1);
+ atomic_dec_32(&entry->dbe_refcnt);
}
@@ -129,7 +129,7 @@ rfs4_dbe_rele(rfs4_dbe_t *entry)
{
mutex_enter(entry->dbe_lock);
ASSERT(entry->dbe_refcnt > 1);
- atomic_add_32(&entry->dbe_refcnt, -1);
+ atomic_dec_32(&entry->dbe_refcnt);
entry->dbe_time_rele = gethrestime_sec();
mutex_exit(entry->dbe_lock);
}
diff --git a/usr/src/uts/common/fs/nfs/nfs4_rnode.c b/usr/src/uts/common/fs/nfs/nfs4_rnode.c
index a5b19334dd..5d3254e478 100644
--- a/usr/src/uts/common/fs/nfs/nfs4_rnode.c
+++ b/usr/src/uts/common/fs/nfs/nfs4_rnode.c
@@ -647,7 +647,7 @@ start:
rp = kmem_cache_alloc(rnode4_cache, KM_SLEEP);
new_vp = vn_alloc(KM_SLEEP);
- atomic_add_long((ulong_t *)&rnode4_new, 1);
+ atomic_inc_ulong((ulong_t *)&rnode4_new);
#ifdef DEBUG
clstat4_debug.nrnode.value.ui64++;
#endif
@@ -1220,7 +1220,7 @@ destroy_rnode4(rnode4_t *rp)
vfsp = vp->v_vfsp;
uninit_rnode4(rp);
- atomic_add_long((ulong_t *)&rnode4_new, -1);
+ atomic_dec_ulong((ulong_t *)&rnode4_new);
#ifdef DEBUG
clstat4_debug.nrnode.value.ui64--;
#endif
diff --git a/usr/src/uts/common/fs/nfs/nfs4_subr.c b/usr/src/uts/common/fs/nfs/nfs4_subr.c
index cfac742707..991217ff5f 100644
--- a/usr/src/uts/common/fs/nfs/nfs4_subr.c
+++ b/usr/src/uts/common/fs/nfs/nfs4_subr.c
@@ -934,7 +934,7 @@ top:
* a new one and use that.
*/
#ifdef DEBUG
- atomic_add_64(&nfscl->nfscl_stat.clalloc.value.ui64, 1);
+ atomic_inc_64(&nfscl->nfscl_stat.clalloc.value.ui64);
#endif
mutex_exit(&nfscl->nfscl_chtable4_lock);
@@ -955,7 +955,7 @@ top:
if (error != 0) {
kmem_cache_free(chtab4_cache, cp);
#ifdef DEBUG
- atomic_add_64(&nfscl->nfscl_stat.clalloc.value.ui64, -1);
+ atomic_dec_64(&nfscl->nfscl_stat.clalloc.value.ui64);
#endif
/*
* Warning is unnecessary if error is EINTR.
@@ -977,7 +977,7 @@ top:
CLNT_DESTROY(cp->ch_client);
kmem_cache_free(chtab4_cache, cp);
#ifdef DEBUG
- atomic_add_64(&nfscl->nfscl_stat.clalloc.value.ui64, -1);
+ atomic_dec_64(&nfscl->nfscl_stat.clalloc.value.ui64);
#endif
return ((error != 0) ? error : EINTR);
}
@@ -2646,7 +2646,7 @@ rddir4_cache_alloc(int flags)
mutex_init(&rdip->lock, NULL, MUTEX_DEFAULT, NULL);
rdip->count = 1;
#ifdef DEBUG
- atomic_add_64(&clstat4_debug.dirent.value.ui64, 1);
+ atomic_inc_64(&clstat4_debug.dirent.value.ui64);
#endif
}
return (rc);
@@ -2703,7 +2703,7 @@ rddir4_cache_free(rddir4_cache_impl *rdip)
rddir4_cache *rc = &rdip->rc;
#ifdef DEBUG
- atomic_add_64(&clstat4_debug.dirent.value.ui64, -1);
+ atomic_dec_64(&clstat4_debug.dirent.value.ui64);
#endif
if (rc->entries != NULL)
kmem_free(rc->entries, rc->buflen);
diff --git a/usr/src/uts/common/fs/nfs/nfs4_vnops.c b/usr/src/uts/common/fs/nfs/nfs4_vnops.c
index 0c97fcc176..b9ba9a6ead 100644
--- a/usr/src/uts/common/fs/nfs/nfs4_vnops.c
+++ b/usr/src/uts/common/fs/nfs/nfs4_vnops.c
@@ -10490,11 +10490,11 @@ nfs4_map(vnode_t *vp, offset_t off, struct as *as, caddr_t *addrp,
if (nfs_rw_enter_sig(&rp->r_rwlock, RW_WRITER, INTR4(vp)))
return (EINTR);
- atomic_add_int(&rp->r_inmap, 1);
+ atomic_inc_uint(&rp->r_inmap);
nfs_rw_exit(&rp->r_rwlock);
if (nfs_rw_enter_sig(&rp->r_lkserlock, RW_READER, INTR4(vp))) {
- atomic_add_int(&rp->r_inmap, -1);
+ atomic_dec_uint(&rp->r_inmap);
return (EINTR);
}
@@ -10602,7 +10602,7 @@ nfs4_map(vnode_t *vp, offset_t off, struct as *as, caddr_t *addrp,
done:
nfs_rw_exit(&rp->r_lkserlock);
- atomic_add_int(&rp->r_inmap, -1);
+ atomic_dec_uint(&rp->r_inmap);
return (error);
}
diff --git a/usr/src/uts/common/fs/nfs/nfs_subr.c b/usr/src/uts/common/fs/nfs/nfs_subr.c
index 787e92c3a4..cc94571a42 100644
--- a/usr/src/uts/common/fs/nfs/nfs_subr.c
+++ b/usr/src/uts/common/fs/nfs/nfs_subr.c
@@ -415,7 +415,7 @@ top:
* a new one and use that.
*/
#ifdef DEBUG
- atomic_add_64(&nfscl->nfscl_stat.clalloc.value.ui64, 1);
+ atomic_inc_64(&nfscl->nfscl_stat.clalloc.value.ui64);
#endif
mutex_exit(&nfscl->nfscl_chtable_lock);
@@ -436,7 +436,7 @@ top:
if (error != 0) {
kmem_cache_free(chtab_cache, cp);
#ifdef DEBUG
- atomic_add_64(&nfscl->nfscl_stat.clalloc.value.ui64, -1);
+ atomic_dec_64(&nfscl->nfscl_stat.clalloc.value.ui64);
#endif
/*
* Warning is unnecessary if error is EINTR.
@@ -455,7 +455,7 @@ top:
CLNT_DESTROY(cp->ch_client);
kmem_cache_free(chtab_cache, cp);
#ifdef DEBUG
- atomic_add_64(&nfscl->nfscl_stat.clalloc.value.ui64, -1);
+ atomic_dec_64(&nfscl->nfscl_stat.clalloc.value.ui64);
#endif
return ((error != 0) ? error : EINTR);
}
@@ -2537,7 +2537,7 @@ start:
rp = kmem_cache_alloc(rnode_cache, KM_SLEEP);
new_vp = vn_alloc(KM_SLEEP);
- atomic_add_long((ulong_t *)&rnew, 1);
+ atomic_inc_ulong((ulong_t *)&rnew);
#ifdef DEBUG
clstat_debug.nrnode.value.ui64++;
#endif
@@ -3019,7 +3019,7 @@ destroy_rnode(rnode_t *rp)
ASSERT(rp->r_mapcnt == 0);
ASSERT(!(rp->r_flags & RHASHED));
ASSERT(rp->r_freef == NULL && rp->r_freeb == NULL);
- atomic_add_long((ulong_t *)&rnew, -1);
+ atomic_dec_ulong((ulong_t *)&rnew);
#ifdef DEBUG
clstat_debug.nrnode.value.ui64--;
#endif
@@ -3813,7 +3813,7 @@ rddir_cache_alloc(int flags)
mutex_init(&rc->lock, NULL, MUTEX_DEFAULT, NULL);
rc->count = 1;
#ifdef DEBUG
- atomic_add_64(&clstat_debug.dirent.value.ui64, 1);
+ atomic_inc_64(&clstat_debug.dirent.value.ui64);
#endif
}
return (rc);
@@ -3824,7 +3824,7 @@ rddir_cache_free(rddir_cache *rc)
{
#ifdef DEBUG
- atomic_add_64(&clstat_debug.dirent.value.ui64, -1);
+ atomic_dec_64(&clstat_debug.dirent.value.ui64);
#endif
if (rc->entries != NULL) {
#ifdef DEBUG
diff --git a/usr/src/uts/common/fs/nfs/nfs_vnops.c b/usr/src/uts/common/fs/nfs/nfs_vnops.c
index df128f4bc6..4ac6450381 100644
--- a/usr/src/uts/common/fs/nfs/nfs_vnops.c
+++ b/usr/src/uts/common/fs/nfs/nfs_vnops.c
@@ -4365,11 +4365,11 @@ nfs_map(vnode_t *vp, offset_t off, struct as *as, caddr_t *addrp,
if (nfs_rw_enter_sig(&rp->r_rwlock, RW_WRITER, INTR(vp)))
return (EINTR);
- atomic_add_int(&rp->r_inmap, 1);
+ atomic_inc_uint(&rp->r_inmap);
nfs_rw_exit(&rp->r_rwlock);
if (nfs_rw_enter_sig(&rp->r_lkserlock, RW_READER, INTR(vp))) {
- atomic_add_int(&rp->r_inmap, -1);
+ atomic_dec_uint(&rp->r_inmap);
return (EINTR);
}
if (vp->v_flag & VNOCACHE) {
@@ -4410,7 +4410,7 @@ nfs_map(vnode_t *vp, offset_t off, struct as *as, caddr_t *addrp,
done:
nfs_rw_exit(&rp->r_lkserlock);
- atomic_add_int(&rp->r_inmap, -1);
+ atomic_dec_uint(&rp->r_inmap);
return (error);
}
diff --git a/usr/src/uts/common/fs/objfs/objfs_vfs.c b/usr/src/uts/common/fs/objfs/objfs_vfs.c
index 00dafeb625..3d20d15634 100644
--- a/usr/src/uts/common/fs/objfs/objfs_vfs.c
+++ b/usr/src/uts/common/fs/objfs/objfs_vfs.c
@@ -179,7 +179,7 @@ objfs_mount(vfs_t *vfsp, vnode_t *mvp, struct mounta *uap, cred_t *cr)
vfsp->vfs_fstype = objfs_fstype;
do {
dev = makedevice(objfs_major,
- atomic_add_32_nv(&objfs_minor, 1) & L_MAXMIN32);
+ atomic_inc_32_nv(&objfs_minor) & L_MAXMIN32);
} while (vfs_devismounted(dev));
vfs_make_fsid(&vfsp->vfs_fsid, dev, objfs_fstype);
vfsp->vfs_data = data;
diff --git a/usr/src/uts/common/fs/proc/prvnops.c b/usr/src/uts/common/fs/proc/prvnops.c
index c84b9d3726..411c9b8b0b 100644
--- a/usr/src/uts/common/fs/proc/prvnops.c
+++ b/usr/src/uts/common/fs/proc/prvnops.c
@@ -4429,8 +4429,8 @@ prlwpnode(prnode_t *pnp, uint_t tid)
static uint32_t nprnode;
static uint32_t nprcommon;
-#define INCREMENT(x) atomic_add_32(&x, 1);
-#define DECREMENT(x) atomic_add_32(&x, -1);
+#define INCREMENT(x) atomic_inc_32(&x);
+#define DECREMENT(x) atomic_dec_32(&x);
#else
diff --git a/usr/src/uts/common/fs/sharefs/sharefs_vfsops.c b/usr/src/uts/common/fs/sharefs/sharefs_vfsops.c
index 6f7796e6e0..1fa1617ec8 100644
--- a/usr/src/uts/common/fs/sharefs/sharefs_vfsops.c
+++ b/usr/src/uts/common/fs/sharefs/sharefs_vfsops.c
@@ -201,7 +201,7 @@ sharefs_mount(vfs_t *vfsp, vnode_t *mvp, struct mounta *uap, cred_t *cr)
vfsp->vfs_fstype = sharefs_fstype;
do {
dev = makedevice(sharefs_major,
- atomic_add_32_nv(&sharefs_minor, 1) & L_MAXMIN32);
+ atomic_inc_32_nv(&sharefs_minor) & L_MAXMIN32);
} while (vfs_devismounted(dev));
vfs_make_fsid(&vfsp->vfs_fsid, dev, sharefs_fstype);
vfsp->vfs_data = data;
diff --git a/usr/src/uts/common/fs/sharefs/sharefs_vnops.c b/usr/src/uts/common/fs/sharefs/sharefs_vnops.c
index cbafc38150..2ca3f293a5 100644
--- a/usr/src/uts/common/fs/sharefs/sharefs_vnops.c
+++ b/usr/src/uts/common/fs/sharefs/sharefs_vnops.c
@@ -24,8 +24,6 @@
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include <fs/fs_subr.h>
#include <sys/errno.h>
@@ -253,7 +251,7 @@ sharefs_open(vnode_t **vpp, int flag, cred_t *cr, caller_context_t *ct)
* No need for the lock, no other thread can be accessing
* this data structure.
*/
- atomic_add_32(&sft->sharefs_refs, 1);
+ atomic_inc_32(&sft->sharefs_refs);
sft->sharefs_real_vp = 0;
/*
@@ -285,7 +283,7 @@ sharefs_close(vnode_t *vp, int flag, int count,
sft->sharefs_generation = 0;
}
}
- atomic_add_32(&sft->sharefs_refs, -1);
+ atomic_dec_32(&sft->sharefs_refs);
rw_exit(&sharefs_lock);
return (0);
diff --git a/usr/src/uts/common/fs/sharefs/sharetab.c b/usr/src/uts/common/fs/sharefs/sharetab.c
index 5036cd3f17..0f8543641c 100644
--- a/usr/src/uts/common/fs/sharefs/sharetab.c
+++ b/usr/src/uts/common/fs/sharefs/sharetab.c
@@ -154,15 +154,15 @@ sharefs_remove(share_t *sh, sharefs_lens_t *shl)
}
ASSERT(sht->s_buckets[iHash].ssh_count != 0);
- atomic_add_32(&sht->s_buckets[iHash].ssh_count, -1);
- atomic_add_32(&sht->s_count, -1);
- atomic_add_32(&sharetab_count, -1);
+ atomic_dec_32(&sht->s_buckets[iHash].ssh_count);
+ atomic_dec_32(&sht->s_count);
+ atomic_dec_32(&sharetab_count);
ASSERT(sharetab_size >= s->sh_size);
sharetab_size -= s->sh_size;
gethrestime(&sharetab_mtime);
- atomic_add_32(&sharetab_generation, 1);
+ atomic_inc_32(&sharetab_generation);
break;
}
@@ -281,7 +281,7 @@ sharefs_add(share_t *sh, sharefs_lens_t *shl)
sharefree(s, NULL);
gethrestime(&sharetab_mtime);
- atomic_add_32(&sharetab_generation, 1);
+ atomic_inc_32(&sharetab_generation);
ASSERT(sht->s_buckets[iHash].ssh_count != 0);
rw_exit(&sharetab_lock);
@@ -298,13 +298,13 @@ sharefs_add(share_t *sh, sharefs_lens_t *shl)
*/
sh->sh_next = sht->s_buckets[iHash].ssh_sh;
sht->s_buckets[iHash].ssh_sh = sh;
- atomic_add_32(&sht->s_buckets[iHash].ssh_count, 1);
- atomic_add_32(&sht->s_count, 1);
- atomic_add_32(&sharetab_count, 1);
+ atomic_inc_32(&sht->s_buckets[iHash].ssh_count);
+ atomic_inc_32(&sht->s_count);
+ atomic_inc_32(&sharetab_count);
sharetab_size += sh->sh_size;
gethrestime(&sharetab_mtime);
- atomic_add_32(&sharetab_generation, 1);
+ atomic_inc_32(&sharetab_generation);
rw_exit(&sharetab_lock);
diff --git a/usr/src/uts/common/fs/smbclnt/smbfs/smbfs_subr2.c b/usr/src/uts/common/fs/smbclnt/smbfs/smbfs_subr2.c
index 9423747de9..025bc0b3ec 100644
--- a/usr/src/uts/common/fs/smbclnt/smbfs/smbfs_subr2.c
+++ b/usr/src/uts/common/fs/smbclnt/smbfs/smbfs_subr2.c
@@ -403,7 +403,7 @@ start:
np = kmem_cache_alloc(smbnode_cache, KM_SLEEP);
new_vp = vn_alloc(KM_SLEEP);
- atomic_add_long((ulong_t *)&smbnodenew, 1);
+ atomic_inc_ulong((ulong_t *)&smbnodenew);
vp = new_vp;
}
@@ -1027,7 +1027,7 @@ sn_destroy_node(smbnode_t *np)
ASSERT(np->n_rpath == NULL);
ASSERT(!(np->r_flags & RHASHED));
ASSERT(np->r_freef == NULL && np->r_freeb == NULL);
- atomic_add_long((ulong_t *)&smbnodenew, -1);
+ atomic_dec_ulong((ulong_t *)&smbnodenew);
vn_invalid(vp);
vn_free(vp);
kmem_cache_free(smbnode_cache, np);
diff --git a/usr/src/uts/common/fs/sockfs/nl7curi.c b/usr/src/uts/common/fs/sockfs/nl7curi.c
index 69f1e7ca21..c6818babe7 100644
--- a/usr/src/uts/common/fs/sockfs/nl7curi.c
+++ b/usr/src/uts/common/fs/sockfs/nl7curi.c
@@ -274,8 +274,8 @@ static const int P2Ps[] = {
mutex_enter(&(hp)->lock); \
while ((_nuri = (hp)->list) != NULL) { \
(hp)->list = _nuri->hash; \
- atomic_add_32(&uri_hash_cnt[(from)], -1); \
- atomic_add_32(&uri_hash_cnt[(to)], 1); \
+ atomic_dec_32(&uri_hash_cnt[(from)]); \
+ atomic_inc_32(&uri_hash_cnt[(to)]); \
_nhix = _nuri->hvalue; \
URI_HASH_IX(_nhix, to); \
_nhp = &uri_hash_ab[(to)][_nhix]; \
@@ -294,7 +294,7 @@ static const int P2Ps[] = {
} else { \
(hp)->list = (uri)->hash; \
} \
- if (atomic_add_32_nv(&uri_hash_cnt[(cur)], -1) == 0 && \
+ if (atomic_dec_32_nv(&uri_hash_cnt[(cur)]) == 0 && \
uri_hash_ab[(new)] != NULL) { \
kmem_free(uri_hash_ab[cur], \
sizeof (uri_hash_t) * uri_hash_sz[cur]); \
@@ -597,7 +597,7 @@ again:
* as the check is only advisory.
*/
fast:
- atomic_add_32(&uri_hash_cnt[cur], 1);
+ atomic_inc_32(&uri_hash_cnt[cur]);
hp = &uri_hash_ab[cur][hix];
mutex_enter(&hp->lock);
uri->hash = hp->list;
@@ -689,7 +689,7 @@ again:
* completely migrated then walk all current hash chains and
* migrate list members now.
*/
- if (atomic_add_32_nv(&uri_hash_cnt[new], 1) >= uri_hash_overflow[new]) {
+ if (atomic_inc_32_nv(&uri_hash_cnt[new]) >= uri_hash_overflow[new]) {
for (hix = 0; hix < uri_hash_sz[cur]; hix++) {
hp = &uri_hash_ab[cur][hix];
if (hp->list != NULL) {
@@ -837,7 +837,7 @@ nexthash:
hp->list = uri->hash;
}
mutex_exit(&hp->lock);
- atomic_add_32(&uri_hash_cnt[cur], -1);
+ atomic_dec_32(&uri_hash_cnt[cur]);
rw_exit(&uri_hash_access);
if (ruri->nocache)
nl7c_uri_purge++;
diff --git a/usr/src/uts/common/fs/sockfs/nl7curi.h b/usr/src/uts/common/fs/sockfs/nl7curi.h
index be5131dee2..d002a0e5f9 100644
--- a/usr/src/uts/common/fs/sockfs/nl7curi.h
+++ b/usr/src/uts/common/fs/sockfs/nl7curi.h
@@ -27,8 +27,6 @@
#ifndef _SYS_SOCKFS_NL7CURI_H
#define _SYS_SOCKFS_NL7CURI_H
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#ifdef __cplusplus
extern "C" {
#endif
@@ -83,12 +81,12 @@ typedef struct ref_s {
}
#define REF_HOLD(container) { \
- atomic_add_32(&(container)->ref.cnt, 1); \
+ atomic_inc_32(&(container)->ref.cnt); \
ASSERT((container)->ref.cnt != 0); \
}
#define REF_RELE(container) { \
- if (atomic_add_32_nv(&(container)->ref.cnt, -1) == 0) { \
+ if (atomic_dec_32_nv(&(container)->ref.cnt) == 0) { \
(container)->ref.last((container)); \
kmem_cache_free((container)->ref.kmc, (container)); \
} \
diff --git a/usr/src/uts/common/fs/sockfs/sockfilter_impl.h b/usr/src/uts/common/fs/sockfs/sockfilter_impl.h
index d37410a0d1..7f7aece1f1 100644
--- a/usr/src/uts/common/fs/sockfs/sockfilter_impl.h
+++ b/usr/src/uts/common/fs/sockfs/sockfilter_impl.h
@@ -63,7 +63,7 @@ struct sof_kstat {
};
#define SOF_GLOBAL_STAT_BUMP(s) \
- atomic_add_64(&sof_stat.sofks_##s.value.ui64, 1)
+ atomic_inc_64(&sof_stat.sofks_##s.value.ui64)
/*
* Per filter statistics.
diff --git a/usr/src/uts/common/fs/sockfs/socksyscalls.c b/usr/src/uts/common/fs/sockfs/socksyscalls.c
index eec95b2fad..a86cda937c 100644
--- a/usr/src/uts/common/fs/sockfs/socksyscalls.c
+++ b/usr/src/uts/common/fs/sockfs/socksyscalls.c
@@ -2402,7 +2402,7 @@ void
snf_vmap_desbfree(snf_vmap_desbinfo *snfv)
{
ASSERT(snfv->snfv_ref != 0);
- if (atomic_add_32_nv(&snfv->snfv_ref, -1) == 0) {
+ if (atomic_dec_32_nv(&snfv->snfv_ref) == 0) {
vpm_unmap_pages(snfv->snfv_vml, S_READ);
VN_RELE(snfv->snfv_vp);
kmem_free(snfv, sizeof (snf_vmap_desbinfo));
diff --git a/usr/src/uts/common/fs/ufs/lufs.c b/usr/src/uts/common/fs/ufs/lufs.c
index 43cc78219c..dc29d7323b 100644
--- a/usr/src/uts/common/fs/ufs/lufs.c
+++ b/usr/src/uts/common/fs/ufs/lufs.c
@@ -920,7 +920,7 @@ lufs_disable(vnode_t *vp, struct fiolog *flp)
vfs_lock_wait(ufsvfsp->vfs_vfs);
ulp = &ufsvfsp->vfs_ulockfs;
mutex_enter(&ulp->ul_lock);
- atomic_add_long(&ufs_quiesce_pend, 1);
+ atomic_inc_ulong(&ufs_quiesce_pend);
(void) ufs_quiesce(ulp);
(void) ufs_flush(ufsvfsp->vfs_vfs);
@@ -939,7 +939,7 @@ lufs_disable(vnode_t *vp, struct fiolog *flp)
(void) lufs_unsnarf(ufsvfsp);
mutex_exit(&ufs_scan_lock);
- atomic_add_long(&ufs_quiesce_pend, -1);
+ atomic_dec_ulong(&ufs_quiesce_pend);
mutex_exit(&ulp->ul_lock);
vfs_setmntopt(ufsvfsp->vfs_vfs, MNTOPT_NOLOGGING, NULL, 0);
vfs_unlock(ufsvfsp->vfs_vfs);
diff --git a/usr/src/uts/common/fs/ufs/ufs_directio.c b/usr/src/uts/common/fs/ufs/ufs_directio.c
index 86f7f559eb..940bd964f4 100644
--- a/usr/src/uts/common/fs/ufs/ufs_directio.c
+++ b/usr/src/uts/common/fs/ufs/ufs_directio.c
@@ -614,7 +614,7 @@ skip_alloc:
if (!exclusive) {
ufs_shared_writes++;
- ncur = atomic_add_32_nv(&ufs_cur_writes, 1);
+ ncur = atomic_inc_32_nv(&ufs_cur_writes);
if (ncur > ufs_maxcur_writes)
ufs_maxcur_writes = ncur;
}
@@ -727,7 +727,7 @@ skip_alloc:
}
if (!exclusive) {
- atomic_add_32(&ufs_cur_writes, -1);
+ atomic_dec_32(&ufs_cur_writes);
/*
* If this write was done shared, readers may
* have pulled in unmodified pages. Get rid of
diff --git a/usr/src/uts/common/fs/ufs/ufs_filio.c b/usr/src/uts/common/fs/ufs/ufs_filio.c
index 6d63801a5e..f0a07babeb 100644
--- a/usr/src/uts/common/fs/ufs/ufs_filio.c
+++ b/usr/src/uts/common/fs/ufs/ufs_filio.c
@@ -344,7 +344,7 @@ ufs_fiosdio(
/* hold the mutex to prevent race with a lockfs request */
vfs_lock_wait(vp->v_vfsp);
mutex_enter(&ulp->ul_lock);
- atomic_add_long(&ufs_quiesce_pend, 1);
+ atomic_inc_ulong(&ufs_quiesce_pend);
if (ULOCKFS_IS_HLOCK(ulp)) {
error = EIO;
@@ -388,7 +388,7 @@ out:
/*
* we need this broadcast because of the ufs_quiesce call above
*/
- atomic_add_long(&ufs_quiesce_pend, -1);
+ atomic_dec_ulong(&ufs_quiesce_pend);
cv_broadcast(&ulp->ul_cv);
mutex_exit(&ulp->ul_lock);
vfs_unlock(vp->v_vfsp);
@@ -425,7 +425,7 @@ ufs_fioffs(
/* hold the mutex to prevent race with a lockfs request */
mutex_enter(&ulp->ul_lock);
- atomic_add_long(&ufs_quiesce_pend, 1);
+ atomic_inc_ulong(&ufs_quiesce_pend);
if (ULOCKFS_IS_HLOCK(ulp)) {
error = EIO;
@@ -486,7 +486,7 @@ ufs_fioffs(
error = ufs_flush(vp->v_vfsp);
out:
- atomic_add_long(&ufs_quiesce_pend, -1);
+ atomic_dec_ulong(&ufs_quiesce_pend);
cv_broadcast(&ulp->ul_cv);
mutex_exit(&ulp->ul_lock);
vfs_unlock(vp->v_vfsp);
diff --git a/usr/src/uts/common/fs/ufs/ufs_lockfs.c b/usr/src/uts/common/fs/ufs/ufs_lockfs.c
index 66715344bb..c5d46ef1a0 100644
--- a/usr/src/uts/common/fs/ufs/ufs_lockfs.c
+++ b/usr/src/uts/common/fs/ufs/ufs_lockfs.c
@@ -933,7 +933,7 @@ ufs__fiolfs(
ufs_thread_suspend(&ufsvfsp->vfs_delete);
mutex_enter(&ulp->ul_lock);
- atomic_add_long(&ufs_quiesce_pend, 1);
+ atomic_inc_ulong(&ufs_quiesce_pend);
/*
* Quit if there is another lockfs request in progress
@@ -1163,7 +1163,7 @@ ufs__fiolfs(
ulp->ul_lockfs.lf_comment && ulp->ul_lockfs.lf_comlen > 0 ?
ulp->ul_lockfs.lf_comment: "user-applied error lock");
- atomic_add_long(&ufs_quiesce_pend, -1);
+ atomic_dec_ulong(&ufs_quiesce_pend);
mutex_exit(&ulp->ul_lock);
vfs_unlock(vfsp);
@@ -1202,7 +1202,7 @@ errout:
LOCKFS_CLR_BUSY(&ulp->ul_lockfs);
errexit:
- atomic_add_long(&ufs_quiesce_pend, -1);
+ atomic_dec_ulong(&ufs_quiesce_pend);
mutex_exit(&ulp->ul_lock);
vfs_unlock(vfsp);
@@ -1299,10 +1299,10 @@ ufs_check_lockfs(struct ufsvfs *ufsvfsp, struct ulockfs *ulp, ulong_t mask)
}
if (mask & ULOCKFS_FWLOCK) {
- atomic_add_long(&ulp->ul_falloc_cnt, 1);
+ atomic_inc_ulong(&ulp->ul_falloc_cnt);
ULOCKFS_SET_FALLOC(ulp);
} else {
- atomic_add_long(&ulp->ul_vnops_cnt, 1);
+ atomic_inc_ulong(&ulp->ul_vnops_cnt);
}
return (0);
@@ -1380,7 +1380,7 @@ ufs_lockfs_begin(struct ufsvfs *ufsvfsp, struct ulockfs **ulpp, ulong_t mask)
ctr = (mask & ULOCKFS_FWLOCK) ?
&ulp->ul_falloc_cnt : &ulp->ul_vnops_cnt;
if (!ULOCKFS_IS_SLOCK(ulp)) {
- atomic_add_long(ctr, 1);
+ atomic_inc_ulong(ctr);
op_cnt_incremented++;
}
@@ -1399,7 +1399,7 @@ ufs_lockfs_begin(struct ufsvfs *ufsvfsp, struct ulockfs **ulpp, ulong_t mask)
*/
if (!ULOCKFS_IS_JUSTULOCK(ulp) || ufs_quiesce_pend) {
if (op_cnt_incremented)
- if (!atomic_add_long_nv(ctr, -1))
+ if (!atomic_dec_ulong_nv(ctr))
cv_broadcast(&ulp->ul_cv);
mutex_enter(&ulp->ul_lock);
error = ufs_check_lockfs(ufsvfsp, ulp, mask);
@@ -1514,14 +1514,14 @@ ufs_lockfs_end(struct ulockfs *ulp)
if (ULOCKFS_IS_FALLOC(ulp) && info->flags & ULOCK_INFO_FALLOCATE) {
/* Clear the thread's fallocate state */
info->flags &= ~ULOCK_INFO_FALLOCATE;
- if (!atomic_add_long_nv(&ulp->ul_falloc_cnt, -1)) {
+ if (!atomic_dec_ulong_nv(&ulp->ul_falloc_cnt)) {
mutex_enter(&ulp->ul_lock);
ULOCKFS_CLR_FALLOC(ulp);
cv_broadcast(&ulp->ul_cv);
mutex_exit(&ulp->ul_lock);
}
} else { /* normal thread */
- if (!atomic_add_long_nv(&ulp->ul_vnops_cnt, -1))
+ if (!atomic_dec_ulong_nv(&ulp->ul_vnops_cnt))
cv_broadcast(&ulp->ul_cv);
}
}
@@ -1587,7 +1587,7 @@ ufs_lockfs_trybegin(struct ufsvfs *ufsvfsp, struct ulockfs **ulpp, ulong_t mask)
ctr = (mask & ULOCKFS_FWLOCK) ?
&ulp->ul_falloc_cnt : &ulp->ul_vnops_cnt;
if (!ULOCKFS_IS_SLOCK(ulp)) {
- atomic_add_long(ctr, 1);
+ atomic_inc_ulong(ctr);
op_cnt_incremented++;
}
@@ -1601,7 +1601,7 @@ ufs_lockfs_trybegin(struct ufsvfs *ufsvfsp, struct ulockfs **ulpp, ulong_t mask)
* file system is delete locked, a mmap can still go through).
*/
if (op_cnt_incremented)
- if (!atomic_add_long_nv(ctr, -1))
+ if (!atomic_dec_ulong_nv(ctr))
cv_broadcast(&ulp->ul_cv);
mutex_enter(&ulp->ul_lock);
if (ULOCKFS_IS_HLOCK(ulp) ||
@@ -1617,7 +1617,7 @@ ufs_lockfs_trybegin(struct ufsvfs *ufsvfsp, struct ulockfs **ulpp, ulong_t mask)
sizeof (ulockfs_info_t));
return (error);
}
- atomic_add_long(ctr, 1);
+ atomic_inc_ulong(ctr);
if (mask & ULOCKFS_FWLOCK)
ULOCKFS_SET_FALLOC(ulp);
mutex_exit(&ulp->ul_lock);
@@ -1648,7 +1648,7 @@ ufs_lockfs_trybegin(struct ufsvfs *ufsvfsp, struct ulockfs **ulpp, ulong_t mask)
sizeof (ulockfs_info_t));
return (error);
}
- atomic_add_long(ctr, 1);
+ atomic_inc_ulong(ctr);
if (mask & ULOCKFS_FWLOCK)
ULOCKFS_SET_FALLOC(ulp);
mutex_exit(&ulp->ul_lock);
@@ -1730,9 +1730,9 @@ ufs_lockfs_begin_getpage(
/*
* First time VOP call
*/
- atomic_add_long(&ulp->ul_vnops_cnt, 1);
+ atomic_inc_ulong(&ulp->ul_vnops_cnt);
if (!ULOCKFS_IS_JUSTULOCK(ulp) || ufs_quiesce_pend) {
- if (!atomic_add_long_nv(&ulp->ul_vnops_cnt, -1))
+ if (!atomic_dec_ulong_nv(&ulp->ul_vnops_cnt))
cv_broadcast(&ulp->ul_cv);
mutex_enter(&ulp->ul_lock);
if (seg->s_ops == &segvn_ops &&
diff --git a/usr/src/uts/common/fs/ufs/ufs_vfsops.c b/usr/src/uts/common/fs/ufs/ufs_vfsops.c
index b41947eeb1..ca6d32c3d4 100644
--- a/usr/src/uts/common/fs/ufs/ufs_vfsops.c
+++ b/usr/src/uts/common/fs/ufs/ufs_vfsops.c
@@ -599,7 +599,7 @@ remountfs(struct vfs *vfsp, dev_t dev, void *raw_argsp, int args_len)
* synchronize w/ufs ioctls
*/
mutex_enter(&ulp->ul_lock);
- atomic_add_long(&ufs_quiesce_pend, 1);
+ atomic_inc_ulong(&ufs_quiesce_pend);
/*
* reset options
@@ -745,7 +745,7 @@ remountfs(struct vfs *vfsp, dev_t dev, void *raw_argsp, int args_len)
fsp->fs_fmod = 0;
fsp->fs_ronly = 0;
- atomic_add_long(&ufs_quiesce_pend, -1);
+ atomic_dec_ulong(&ufs_quiesce_pend);
cv_broadcast(&ulp->ul_cv);
mutex_exit(&ulp->ul_lock);
@@ -774,7 +774,7 @@ remountfs(struct vfs *vfsp, dev_t dev, void *raw_argsp, int args_len)
remounterr:
if (tpt)
brelse(tpt);
- atomic_add_long(&ufs_quiesce_pend, -1);
+ atomic_dec_ulong(&ufs_quiesce_pend);
cv_broadcast(&ulp->ul_cv);
mutex_exit(&ulp->ul_lock);
return (error);
@@ -1421,7 +1421,7 @@ ufs_unmount(struct vfs *vfsp, int fflag, struct cred *cr)
* hard lock it before unmounting.
*/
if (!ULOCKFS_IS_HLOCK(ulp)) {
- atomic_add_long(&ufs_quiesce_pend, 1);
+ atomic_inc_ulong(&ufs_quiesce_pend);
lockfs.lf_lock = LOCKFS_HLOCK;
lockfs.lf_flags = 0;
lockfs.lf_key = ulp->ul_lockfs.lf_key + 1;
@@ -1433,7 +1433,7 @@ ufs_unmount(struct vfs *vfsp, int fflag, struct cred *cr)
(void) ufs_quiesce(ulp);
(void) ufs_flush(vfsp);
(void) ufs_thaw(vfsp, ufsvfsp, ulp);
- atomic_add_long(&ufs_quiesce_pend, -1);
+ atomic_dec_ulong(&ufs_quiesce_pend);
ULOCKFS_CLR_BUSY(ulp);
LOCKFS_CLR_BUSY(&ulp->ul_lockfs);
poll_events |= POLLERR;
@@ -2153,7 +2153,7 @@ ufs_remountroot(struct vfs *vfsp)
ulp = &ufsvfsp->vfs_ulockfs;
mutex_enter(&ulp->ul_lock);
- atomic_add_long(&ufs_quiesce_pend, 1);
+ atomic_inc_ulong(&ufs_quiesce_pend);
(void) ufs_quiesce(ulp);
(void) ufs_flush(vfsp);
@@ -2278,7 +2278,7 @@ ufs_remountroot(struct vfs *vfsp)
rootdev = new_rootdev;
rootvp = new_rootvp;
- atomic_add_long(&ufs_quiesce_pend, -1);
+ atomic_dec_ulong(&ufs_quiesce_pend);
cv_broadcast(&ulp->ul_cv);
mutex_exit(&ulp->ul_lock);
diff --git a/usr/src/uts/common/fs/ufs/ufs_vnops.c b/usr/src/uts/common/fs/ufs/ufs_vnops.c
index 92bdb0d0a7..fcffd952ed 100644
--- a/usr/src/uts/common/fs/ufs/ufs_vnops.c
+++ b/usr/src/uts/common/fs/ufs/ufs_vnops.c
@@ -5976,11 +5976,11 @@ ufs_pageio(struct vnode *vp, page_t *pp, u_offset_t io_off, size_t io_len,
}
return (vmpss ? EIO : EINVAL);
}
- atomic_add_long(&ulp->ul_vnops_cnt, 1);
+ atomic_inc_ulong(&ulp->ul_vnops_cnt);
if (pp == NULL)
mutex_exit(&ulp->ul_lock);
if (ufs_quiesce_pend) {
- if (!atomic_add_long_nv(&ulp->ul_vnops_cnt, -1))
+ if (!atomic_dec_ulong_nv(&ulp->ul_vnops_cnt))
cv_broadcast(&ulp->ul_cv);
return (vmpss ? EIO : EINVAL);
}
@@ -5999,7 +5999,7 @@ ufs_pageio(struct vnode *vp, page_t *pp, u_offset_t io_off, size_t io_len,
if (!vmpss) {
rw_enter(&ip->i_contents, RW_READER);
} else if (!rw_tryenter(&ip->i_contents, RW_READER)) {
- if (!atomic_add_long_nv(&ulp->ul_vnops_cnt, -1))
+ if (!atomic_dec_ulong_nv(&ulp->ul_vnops_cnt))
cv_broadcast(&ulp->ul_cv);
return (EDEADLK);
}
@@ -6012,7 +6012,7 @@ ufs_pageio(struct vnode *vp, page_t *pp, u_offset_t io_off, size_t io_len,
if (vmpss && btopr(io_off + io_len) > btopr(ip->i_size)) {
if (dolock)
rw_exit(&ip->i_contents);
- if (!atomic_add_long_nv(&ulp->ul_vnops_cnt, -1))
+ if (!atomic_dec_ulong_nv(&ulp->ul_vnops_cnt))
cv_broadcast(&ulp->ul_cv);
return (EFAULT);
}
@@ -6025,7 +6025,7 @@ ufs_pageio(struct vnode *vp, page_t *pp, u_offset_t io_off, size_t io_len,
}
if (dolock)
rw_exit(&ip->i_contents);
- if (!atomic_add_long_nv(&ulp->ul_vnops_cnt, -1))
+ if (!atomic_dec_ulong_nv(&ulp->ul_vnops_cnt))
cv_broadcast(&ulp->ul_cv);
return (err);
}
@@ -6132,7 +6132,7 @@ ufs_pageio(struct vnode *vp, page_t *pp, u_offset_t io_off, size_t io_len,
if (dolock)
rw_exit(&ip->i_contents);
- if (vmpss && !atomic_add_long_nv(&ulp->ul_vnops_cnt, -1))
+ if (vmpss && !atomic_dec_ulong_nv(&ulp->ul_vnops_cnt))
cv_broadcast(&ulp->ul_cv);
return (err);
}
diff --git a/usr/src/uts/common/fs/vfs.c b/usr/src/uts/common/fs/vfs.c
index f16f5a6fc1..409e69a00d 100644
--- a/usr/src/uts/common/fs/vfs.c
+++ b/usr/src/uts/common/fs/vfs.c
@@ -4331,7 +4331,7 @@ vfs_free(vfs_t *vfsp)
void
vfs_hold(vfs_t *vfsp)
{
- atomic_add_32(&vfsp->vfs_count, 1);
+ atomic_inc_32(&vfsp->vfs_count);
ASSERT(vfsp->vfs_count != 0);
}
@@ -4344,7 +4344,7 @@ void
vfs_rele(vfs_t *vfsp)
{
ASSERT(vfsp->vfs_count != 0);
- if (atomic_add_32_nv(&vfsp->vfs_count, -1) == 0) {
+ if (atomic_dec_32_nv(&vfsp->vfs_count) == 0) {
VFS_FREEVFS(vfsp);
lofi_remove(vfsp);
if (vfsp->vfs_zone)
diff --git a/usr/src/uts/common/fs/vnode.c b/usr/src/uts/common/fs/vnode.c
index 9cd8f0562b..4abb040de0 100644
--- a/usr/src/uts/common/fs/vnode.c
+++ b/usr/src/uts/common/fs/vnode.c
@@ -1244,9 +1244,9 @@ vn_open_upgrade(
ASSERT(vp->v_type == VREG);
if (filemode & FREAD)
- atomic_add_32(&(vp->v_rdcnt), 1);
+ atomic_inc_32(&vp->v_rdcnt);
if (filemode & FWRITE)
- atomic_add_32(&(vp->v_wrcnt), 1);
+ atomic_inc_32(&vp->v_wrcnt);
}
@@ -1259,11 +1259,11 @@ vn_open_downgrade(
if (filemode & FREAD) {
ASSERT(vp->v_rdcnt > 0);
- atomic_add_32(&(vp->v_rdcnt), -1);
+ atomic_dec_32(&vp->v_rdcnt);
}
if (filemode & FWRITE) {
ASSERT(vp->v_wrcnt > 0);
- atomic_add_32(&(vp->v_wrcnt), -1);
+ atomic_dec_32(&vp->v_wrcnt);
}
}
@@ -2918,7 +2918,7 @@ fs_new_caller_id()
{
static uint64_t next_caller_id = 0LL; /* First call returns 1 */
- return ((u_longlong_t)atomic_add_64_nv(&next_caller_id, 1));
+ return ((u_longlong_t)atomic_inc_64_nv(&next_caller_id));
}
/*
@@ -3146,9 +3146,9 @@ fop_open(
*/
if ((*vpp)->v_type == VREG) {
if (mode & FREAD)
- atomic_add_32(&((*vpp)->v_rdcnt), 1);
+ atomic_inc_32(&(*vpp)->v_rdcnt);
if (mode & FWRITE)
- atomic_add_32(&((*vpp)->v_wrcnt), 1);
+ atomic_inc_32(&(*vpp)->v_wrcnt);
}
VOPXID_MAP_CR(vp, cr);
@@ -3162,9 +3162,9 @@ fop_open(
*/
VOPSTATS_UPDATE(vp, open);
if ((vp->v_type == VREG) && (mode & FREAD))
- atomic_add_32(&(vp->v_rdcnt), -1);
+ atomic_dec_32(&vp->v_rdcnt);
if ((vp->v_type == VREG) && (mode & FWRITE))
- atomic_add_32(&(vp->v_wrcnt), -1);
+ atomic_dec_32(&vp->v_wrcnt);
} else {
/*
* Some filesystems will return a different vnode,
@@ -3178,13 +3178,13 @@ fop_open(
if (*vpp != vp && *vpp != NULL) {
vn_copypath(vp, *vpp);
if (((*vpp)->v_type == VREG) && (mode & FREAD))
- atomic_add_32(&((*vpp)->v_rdcnt), 1);
+ atomic_inc_32(&(*vpp)->v_rdcnt);
if ((vp->v_type == VREG) && (mode & FREAD))
- atomic_add_32(&(vp->v_rdcnt), -1);
+ atomic_dec_32(&vp->v_rdcnt);
if (((*vpp)->v_type == VREG) && (mode & FWRITE))
- atomic_add_32(&((*vpp)->v_wrcnt), 1);
+ atomic_inc_32(&(*vpp)->v_wrcnt);
if ((vp->v_type == VREG) && (mode & FWRITE))
- atomic_add_32(&(vp->v_wrcnt), -1);
+ atomic_dec_32(&vp->v_wrcnt);
}
}
VN_RELE(vp);
@@ -3213,11 +3213,11 @@ fop_close(
if ((vp->v_type == VREG) && (count == 1)) {
if (flag & FREAD) {
ASSERT(vp->v_rdcnt > 0);
- atomic_add_32(&(vp->v_rdcnt), -1);
+ atomic_dec_32(&vp->v_rdcnt);
}
if (flag & FWRITE) {
ASSERT(vp->v_wrcnt > 0);
- atomic_add_32(&(vp->v_wrcnt), -1);
+ atomic_dec_32(&vp->v_wrcnt);
}
}
return (err);
diff --git a/usr/src/uts/common/fs/zfs/dbuf.c b/usr/src/uts/common/fs/zfs/dbuf.c
index 8db626fadc..a56372197c 100644
--- a/usr/src/uts/common/fs/zfs/dbuf.c
+++ b/usr/src/uts/common/fs/zfs/dbuf.c
@@ -178,7 +178,7 @@ dbuf_hash_insert(dmu_buf_impl_t *db)
db->db_hash_next = h->hash_table[idx];
h->hash_table[idx] = db;
mutex_exit(DBUF_HASH_MUTEX(h, idx));
- atomic_add_64(&dbuf_hash_count, 1);
+ atomic_inc_64(&dbuf_hash_count);
return (NULL);
}
@@ -212,7 +212,7 @@ dbuf_hash_remove(dmu_buf_impl_t *db)
*dbp = db->db_hash_next;
db->db_hash_next = NULL;
mutex_exit(DBUF_HASH_MUTEX(h, idx));
- atomic_add_64(&dbuf_hash_count, -1);
+ atomic_dec_64(&dbuf_hash_count);
}
static arc_evict_func_t dbuf_do_evict;
diff --git a/usr/src/uts/common/fs/zfs/spa.c b/usr/src/uts/common/fs/zfs/spa.c
index c10cac1b20..72870b5898 100644
--- a/usr/src/uts/common/fs/zfs/spa.c
+++ b/usr/src/uts/common/fs/zfs/spa.c
@@ -1817,9 +1817,9 @@ spa_load_verify_done(zio_t *zio)
if (error) {
if ((BP_GET_LEVEL(bp) != 0 || DMU_OT_IS_METADATA(type)) &&
type != DMU_OT_INTENT_LOG)
- atomic_add_64(&sle->sle_meta_count, 1);
+ atomic_inc_64(&sle->sle_meta_count);
else
- atomic_add_64(&sle->sle_data_count, 1);
+ atomic_inc_64(&sle->sle_data_count);
}
zio_data_buf_free(zio->io_data, zio->io_size);
diff --git a/usr/src/uts/common/fs/zfs/sys/refcount.h b/usr/src/uts/common/fs/zfs/sys/refcount.h
index 9efc5f1e44..27c39135e0 100644
--- a/usr/src/uts/common/fs/zfs/sys/refcount.h
+++ b/usr/src/uts/common/fs/zfs/sys/refcount.h
@@ -87,8 +87,8 @@ typedef struct refcount {
#define refcount_destroy_many(rc, number) ((rc)->rc_count = 0)
#define refcount_is_zero(rc) ((rc)->rc_count == 0)
#define refcount_count(rc) ((rc)->rc_count)
-#define refcount_add(rc, holder) atomic_add_64_nv(&(rc)->rc_count, 1)
-#define refcount_remove(rc, holder) atomic_add_64_nv(&(rc)->rc_count, -1)
+#define refcount_add(rc, holder) atomic_inc_64_nv(&(rc)->rc_count)
+#define refcount_remove(rc, holder) atomic_dec_64_nv(&(rc)->rc_count)
#define refcount_add_many(rc, number, holder) \
atomic_add_64_nv(&(rc)->rc_count, number)
#define refcount_remove_many(rc, number, holder) \
diff --git a/usr/src/uts/common/fs/zfs/vdev_cache.c b/usr/src/uts/common/fs/zfs/vdev_cache.c
index 678c4e40ea..0b188dbc16 100644
--- a/usr/src/uts/common/fs/zfs/vdev_cache.c
+++ b/usr/src/uts/common/fs/zfs/vdev_cache.c
@@ -102,7 +102,7 @@ static vdc_stats_t vdc_stats = {
{ "misses", KSTAT_DATA_UINT64 }
};
-#define VDCSTAT_BUMP(stat) atomic_add_64(&vdc_stats.stat.value.ui64, 1);
+#define VDCSTAT_BUMP(stat) atomic_inc_64(&vdc_stats.stat.value.ui64);
static int
vdev_cache_offset_compare(const void *a1, const void *a2)
diff --git a/usr/src/uts/common/fs/zfs/vdev_label.c b/usr/src/uts/common/fs/zfs/vdev_label.c
index c7ae60c68f..7bbd7f2bde 100644
--- a/usr/src/uts/common/fs/zfs/vdev_label.c
+++ b/usr/src/uts/common/fs/zfs/vdev_label.c
@@ -976,7 +976,7 @@ vdev_uberblock_sync_done(zio_t *zio)
uint64_t *good_writes = zio->io_private;
if (zio->io_error == 0 && zio->io_vd->vdev_top->vdev_ms_array != 0)
- atomic_add_64(good_writes, 1);
+ atomic_inc_64(good_writes);
}
/*
@@ -1051,7 +1051,7 @@ vdev_label_sync_done(zio_t *zio)
uint64_t *good_writes = zio->io_private;
if (zio->io_error == 0)
- atomic_add_64(good_writes, 1);
+ atomic_inc_64(good_writes);
}
/*
diff --git a/usr/src/uts/common/fs/zfs/zfs_vfsops.c b/usr/src/uts/common/fs/zfs/zfs_vfsops.c
index 8e3bf69167..0b0b0a99cc 100644
--- a/usr/src/uts/common/fs/zfs/zfs_vfsops.c
+++ b/usr/src/uts/common/fs/zfs/zfs_vfsops.c
@@ -1240,7 +1240,7 @@ out:
dmu_objset_disown(zfsvfs->z_os, zfsvfs);
zfsvfs_free(zfsvfs);
} else {
- atomic_add_32(&zfs_active_fs_count, 1);
+ atomic_inc_32(&zfs_active_fs_count);
}
return (error);
@@ -2158,7 +2158,7 @@ zfs_freevfs(vfs_t *vfsp)
zfsvfs_free(zfsvfs);
- atomic_add_32(&zfs_active_fs_count, -1);
+ atomic_dec_32(&zfs_active_fs_count);
}
/*
diff --git a/usr/src/uts/common/fs/zfs/zio_inject.c b/usr/src/uts/common/fs/zfs/zio_inject.c
index b2976111d6..991a0a34ff 100644
--- a/usr/src/uts/common/fs/zfs/zio_inject.c
+++ b/usr/src/uts/common/fs/zfs/zio_inject.c
@@ -426,7 +426,7 @@ zio_inject_fault(char *name, int flags, int *id, zinject_record_t *record)
handler->zi_spa = spa;
handler->zi_record = *record;
list_insert_tail(&inject_handlers, handler);
- atomic_add_32(&zio_injection_enabled, 1);
+ atomic_inc_32(&zio_injection_enabled);
rw_exit(&inject_lock);
}
@@ -503,7 +503,7 @@ zio_clear_fault(int id)
spa_inject_delref(handler->zi_spa);
kmem_free(handler, sizeof (inject_handler_t));
- atomic_add_32(&zio_injection_enabled, -1);
+ atomic_dec_32(&zio_injection_enabled);
return (0);
}
diff --git a/usr/src/uts/common/inet/ilb/ilb.c b/usr/src/uts/common/inet/ilb/ilb.c
index 747c3768ad..f1340df195 100644
--- a/usr/src/uts/common/inet/ilb/ilb.c
+++ b/usr/src/uts/common/inet/ilb/ilb.c
@@ -638,7 +638,7 @@ ilb_rule_add(ilb_stack_t *ilbs, zoneid_t zoneid, const ilb_rule_cmd_t *cmd)
/* ir_name is all 0 to begin with */
(void) memcpy(rule->ir_name, cmd->name, ILB_RULE_NAMESZ - 1);
- rule->ir_ks_instance = atomic_add_int_nv(&ilb_kstat_instance, 1);
+ rule->ir_ks_instance = atomic_inc_uint_nv(&ilb_kstat_instance);
stackid = (netstackid_t)(uintptr_t)ilbs->ilbs_ksp->ks_private;
if ((rule->ir_ksp = ilb_rule_kstat_init(stackid, rule)) == NULL) {
ret = ENOMEM;
diff --git a/usr/src/uts/common/inet/ilb/ilb_nat.c b/usr/src/uts/common/inet/ilb/ilb_nat.c
index 6fd790e9a4..b5042fb229 100644
--- a/usr/src/uts/common/inet/ilb/ilb_nat.c
+++ b/usr/src/uts/common/inet/ilb/ilb_nat.c
@@ -236,7 +236,7 @@ ilb_find_nat_src(ilb_stack_t *ilbs, const in6_addr_t *nat_src,
tmp->nse_refcnt = 1;
(void) snprintf(arena_name, ARENA_NAMESZ, "ilb_ns_%u",
- atomic_add_32_nv(&ilb_nat_src_instance, 1));
+ atomic_inc_32_nv(&ilb_nat_src_instance));
if ((tmp->nse_port_arena = vmem_create(arena_name,
(void *)NAT_PORT_START, NAT_PORT_SIZE, 1, NULL, NULL, NULL, 1,
VM_SLEEP | VMC_IDENTIFIER)) == NULL) {
diff --git a/usr/src/uts/common/inet/ip.h b/usr/src/uts/common/inet/ip.h
index fde3dc3ad3..748d9f26e7 100644
--- a/usr/src/uts/common/inet/ip.h
+++ b/usr/src/uts/common/inet/ip.h
@@ -720,14 +720,14 @@ typedef struct ipsec_latch_s
} ipsec_latch_t;
#define IPLATCH_REFHOLD(ipl) { \
- atomic_add_32(&(ipl)->ipl_refcnt, 1); \
+ atomic_inc_32(&(ipl)->ipl_refcnt); \
ASSERT((ipl)->ipl_refcnt != 0); \
}
#define IPLATCH_REFRELE(ipl) { \
ASSERT((ipl)->ipl_refcnt != 0); \
membar_exit(); \
- if (atomic_add_32_nv(&(ipl)->ipl_refcnt, -1) == 0) \
+ if (atomic_dec_32_nv(&(ipl)->ipl_refcnt) == 0) \
iplatch_free(ipl); \
}
diff --git a/usr/src/uts/common/inet/ip/igmp.c b/usr/src/uts/common/inet/ip/igmp.c
index c6fcf68ff8..411a781e6d 100644
--- a/usr/src/uts/common/inet/ip/igmp.c
+++ b/usr/src/uts/common/inet/ip/igmp.c
@@ -508,7 +508,7 @@ igmp_query_in(ipha_t *ipha, igmpa_t *igmpa, ill_t *ill)
if (ill->ill_mcast_type != IGMP_V1_ROUTER) {
ip1dbg(("Received IGMPv1 Query on %s, switching mode "
"to IGMP_V1_ROUTER\n", ill->ill_name));
- atomic_add_16(&ill->ill_ifptr->illif_mcast_v1, 1);
+ atomic_inc_16(&ill->ill_ifptr->illif_mcast_v1);
ill->ill_mcast_type = IGMP_V1_ROUTER;
}
@@ -545,7 +545,7 @@ igmp_query_in(ipha_t *ipha, igmpa_t *igmpa, ill_t *ill)
if (ill->ill_mcast_type == IGMP_V3_ROUTER) {
ip1dbg(("Received IGMPv2 Query on %s, switching mode "
"to IGMP_V2_ROUTER", ill->ill_name));
- atomic_add_16(&ill->ill_ifptr->illif_mcast_v2, 1);
+ atomic_inc_16(&ill->ill_ifptr->illif_mcast_v2);
ill->ill_mcast_type = IGMP_V2_ROUTER;
}
ill->ill_mcast_v2_time = 0;
@@ -1716,7 +1716,7 @@ igmp_slowtimo(void *arg)
}
ill->ill_mcast_v1_time = 0;
ill->ill_mcast_v1_tset = 0;
- atomic_add_16(&ifp->illif_mcast_v1, -1);
+ atomic_dec_16(&ifp->illif_mcast_v1);
}
if ((ill->ill_mcast_type == IGMP_V2_ROUTER) &&
(ipst->ips_igmp_max_version >= IGMP_V3_ROUTER) &&
@@ -1727,7 +1727,7 @@ igmp_slowtimo(void *arg)
ill->ill_mcast_type = IGMP_V3_ROUTER;
ill->ill_mcast_v2_time = 0;
ill->ill_mcast_v2_tset = 0;
- atomic_add_16(&ifp->illif_mcast_v2, -1);
+ atomic_dec_16(&ifp->illif_mcast_v2);
}
rw_exit(&ill->ill_mcast_lock);
ill_refrele(ill);
@@ -1786,7 +1786,7 @@ mld_slowtimo(void *arg)
ill->ill_mcast_type = MLD_V2_ROUTER;
ill->ill_mcast_v1_time = 0;
ill->ill_mcast_v1_tset = 0;
- atomic_add_16(&ifp->illif_mcast_v1, -1);
+ atomic_dec_16(&ifp->illif_mcast_v1);
}
rw_exit(&ill->ill_mcast_lock);
ill_refrele(ill);
@@ -2216,7 +2216,7 @@ mld_query_in(mld_hdr_t *mldh, ill_t *ill)
if (ill->ill_mcast_type == MLD_V2_ROUTER) {
ip1dbg(("Received MLDv1 Query on %s, switching mode to "
"MLD_V1_ROUTER\n", ill->ill_name));
- atomic_add_16(&ill->ill_ifptr->illif_mcast_v1, 1);
+ atomic_inc_16(&ill->ill_ifptr->illif_mcast_v1);
ill->ill_mcast_type = MLD_V1_ROUTER;
}
diff --git a/usr/src/uts/common/inet/ip/ip_attr.c b/usr/src/uts/common/inet/ip/ip_attr.c
index 6a075639fc..85ee142dfc 100644
--- a/usr/src/uts/common/inet/ip/ip_attr.c
+++ b/usr/src/uts/common/inet/ip/ip_attr.c
@@ -100,14 +100,14 @@
*/
#define IXA_REFRELE(ixa) \
{ \
- if (atomic_add_32_nv(&(ixa)->ixa_refcnt, -1) == 0) \
+ if (atomic_dec_32_nv(&(ixa)->ixa_refcnt) == 0) \
ixa_inactive(ixa); \
}
#define IXA_REFHOLD(ixa) \
{ \
ASSERT((ixa)->ixa_refcnt != 0); \
- atomic_add_32(&(ixa)->ixa_refcnt, 1); \
+ atomic_inc_32(&(ixa)->ixa_refcnt); \
}
/*
@@ -754,7 +754,7 @@ conn_get_ixa_impl(conn_t *connp, boolean_t replace, int kmflag)
/* At least one references for the conn_t */
ASSERT(ixa->ixa_refcnt >= 1);
- if (atomic_add_32_nv(&ixa->ixa_refcnt, 1) == 2) {
+ if (atomic_inc_32_nv(&ixa->ixa_refcnt) == 2) {
/* No other thread using conn_ixa */
mutex_exit(&connp->conn_lock);
return (ixa);
@@ -856,7 +856,7 @@ conn_get_ixa_exclusive(conn_t *connp)
ASSERT(ixa->ixa_refcnt >= 1);
/* Make sure conn_ixa doesn't disappear while we copy it */
- atomic_add_32(&ixa->ixa_refcnt, 1);
+ atomic_inc_32(&ixa->ixa_refcnt);
ixa = kmem_alloc(sizeof (*ixa), KM_NOSLEEP);
if (ixa == NULL) {
diff --git a/usr/src/uts/common/inet/ip/ip_dce.c b/usr/src/uts/common/inet/ip/ip_dce.c
index a6b9c98cad..bf56147312 100644
--- a/usr/src/uts/common/inet/ip/ip_dce.c
+++ b/usr/src/uts/common/inet/ip/ip_dce.c
@@ -532,7 +532,7 @@ dce_lookup_and_add_v4(ipaddr_t dst, ip_stack_t *ipst)
dce->dce_ptpn = &dcb->dcb_dce;
dcb->dcb_dce = dce;
dce->dce_bucket = dcb;
- atomic_add_32(&dcb->dcb_cnt, 1);
+ atomic_inc_32(&dcb->dcb_cnt);
dce_refhold(dce); /* For the caller */
rw_exit(&dcb->dcb_lock);
@@ -604,7 +604,7 @@ dce_lookup_and_add_v6(const in6_addr_t *dst, uint_t ifindex, ip_stack_t *ipst)
dce->dce_ptpn = &dcb->dcb_dce;
dcb->dcb_dce = dce;
dce->dce_bucket = dcb;
- atomic_add_32(&dcb->dcb_cnt, 1);
+ atomic_inc_32(&dcb->dcb_cnt);
dce_refhold(dce); /* For the caller */
rw_exit(&dcb->dcb_lock);
@@ -731,7 +731,7 @@ dce_make_condemned(dce_t *dce)
dce->dce_generation = DCE_GENERATION_CONDEMNED;
mutex_exit(&dce->dce_lock);
/* Count how many condemned dces for kmem_cache callback */
- atomic_add_32(&ipst->ips_num_dce_condemned, 1);
+ atomic_inc_32(&ipst->ips_num_dce_condemned);
}
/*
@@ -793,7 +793,7 @@ dce_delete_locked(dcb_t *dcb, dce_t *dce)
dce->dce_next->dce_ptpn = dce->dce_ptpn;
dce->dce_ptpn = NULL;
dce->dce_next = NULL;
- atomic_add_32(&dcb->dcb_cnt, -1);
+ atomic_dec_32(&dcb->dcb_cnt);
dce_make_condemned(dce);
}
@@ -808,7 +808,7 @@ dce_inactive(dce_t *dce)
/* Count how many condemned dces for kmem_cache callback */
if (DCE_IS_CONDEMNED(dce))
- atomic_add_32(&ipst->ips_num_dce_condemned, -1);
+ atomic_dec_32(&ipst->ips_num_dce_condemned);
kmem_cache_free(dce_cache, dce);
}
@@ -817,14 +817,14 @@ void
dce_refrele(dce_t *dce)
{
ASSERT(dce->dce_refcnt != 0);
- if (atomic_add_32_nv(&dce->dce_refcnt, -1) == 0)
+ if (atomic_dec_32_nv(&dce->dce_refcnt) == 0)
dce_inactive(dce);
}
void
dce_refhold(dce_t *dce)
{
- atomic_add_32(&dce->dce_refcnt, 1);
+ atomic_inc_32(&dce->dce_refcnt);
ASSERT(dce->dce_refcnt != 0);
}
@@ -833,14 +833,14 @@ void
dce_refrele_notr(dce_t *dce)
{
ASSERT(dce->dce_refcnt != 0);
- if (atomic_add_32_nv(&dce->dce_refcnt, -1) == 0)
+ if (atomic_dec_32_nv(&dce->dce_refcnt) == 0)
dce_inactive(dce);
}
void
dce_refhold_notr(dce_t *dce)
{
- atomic_add_32(&dce->dce_refcnt, 1);
+ atomic_inc_32(&dce->dce_refcnt);
ASSERT(dce->dce_refcnt != 0);
}
diff --git a/usr/src/uts/common/inet/ip/ip_if.c b/usr/src/uts/common/inet/ip/ip_if.c
index 905197a9fa..d15d86d248 100644
--- a/usr/src/uts/common/inet/ip/ip_if.c
+++ b/usr/src/uts/common/inet/ip/ip_if.c
@@ -11776,7 +11776,7 @@ ipif_assign_seqid(ipif_t *ipif)
{
ip_stack_t *ipst = ipif->ipif_ill->ill_ipst;
- ipif->ipif_seqid = atomic_add_64_nv(&ipst->ips_ipif_g_seqid, 1);
+ ipif->ipif_seqid = atomic_inc_64_nv(&ipst->ips_ipif_g_seqid);
}
/*
@@ -12449,9 +12449,9 @@ void
ip_update_source_selection(ip_stack_t *ipst)
{
/* We skip past SRC_GENERATION_VERIFY */
- if (atomic_add_32_nv(&ipst->ips_src_generation, 1) ==
+ if (atomic_inc_32_nv(&ipst->ips_src_generation) ==
SRC_GENERATION_VERIFY)
- atomic_add_32(&ipst->ips_src_generation, 1);
+ atomic_inc_32(&ipst->ips_src_generation);
}
/*
diff --git a/usr/src/uts/common/inet/ip/ip_ire.c b/usr/src/uts/common/inet/ip/ip_ire.c
index 88c104909c..533713eddd 100644
--- a/usr/src/uts/common/inet/ip/ip_ire.c
+++ b/usr/src/uts/common/inet/ip/ip_ire.c
@@ -333,7 +333,7 @@ irb_refrele(irb_t *irb)
void
ire_refhold(ire_t *ire)
{
- atomic_add_32(&(ire)->ire_refcnt, 1);
+ atomic_inc_32(&(ire)->ire_refcnt);
ASSERT((ire)->ire_refcnt != 0);
#ifdef DEBUG
ire_trace_ref(ire);
@@ -343,7 +343,7 @@ ire_refhold(ire_t *ire)
void
ire_refhold_notr(ire_t *ire)
{
- atomic_add_32(&(ire)->ire_refcnt, 1);
+ atomic_inc_32(&(ire)->ire_refcnt);
ASSERT((ire)->ire_refcnt != 0);
}
@@ -379,7 +379,7 @@ ire_refrele(ire_t *ire)
#endif
ASSERT((ire)->ire_refcnt != 0);
membar_exit();
- if (atomic_add_32_nv(&(ire)->ire_refcnt, -1) == 0)
+ if (atomic_dec_32_nv(&(ire)->ire_refcnt) == 0)
ire_inactive(ire);
}
@@ -388,7 +388,7 @@ ire_refrele_notr(ire_t *ire)
{
ASSERT((ire)->ire_refcnt != 0);
membar_exit();
- if (atomic_add_32_nv(&(ire)->ire_refcnt, -1) == 0)
+ if (atomic_dec_32_nv(&(ire)->ire_refcnt) == 0)
ire_inactive(ire);
}
@@ -1272,7 +1272,7 @@ ire_add_v4(ire_t *ire)
* an identical_ref, but with an ire_ref held.
*/
if (ire->ire_type != IRE_IF_CLONE) {
- atomic_add_32(&ire1->ire_identical_ref, 1);
+ atomic_inc_32(&ire1->ire_identical_ref);
DTRACE_PROBE2(ire__add__exist, ire_t *, ire1,
ire_t *, ire);
}
@@ -1533,7 +1533,7 @@ ire_delete(ire_t *ire)
if (!IRE_IS_CONDEMNED(ire)) {
/* Is this an IRE representing multiple duplicate entries? */
ASSERT(ire->ire_identical_ref >= 1);
- if (atomic_add_32_nv(&ire->ire_identical_ref, -1) != 0) {
+ if (atomic_dec_32_nv(&ire->ire_identical_ref) != 0) {
/* Removed one of the identical parties */
rw_exit(&irb->irb_lock);
return;
@@ -2618,7 +2618,7 @@ ire_make_condemned(ire_t *ire)
ASSERT(!IRE_IS_CONDEMNED(ire));
ire->ire_generation = IRE_GENERATION_CONDEMNED;
/* Count how many condemned ires for kmem_cache callback */
- atomic_add_32(&ipst->ips_num_ire_condemned, 1);
+ atomic_inc_32(&ipst->ips_num_ire_condemned);
nce = ire->ire_nce_cache;
ire->ire_nce_cache = NULL;
mutex_exit(&ire->ire_lock);
diff --git a/usr/src/uts/common/inet/ip/ip_ndp.c b/usr/src/uts/common/inet/ip/ip_ndp.c
index c0be85cd62..c6dee0247a 100644
--- a/usr/src/uts/common/inet/ip/ip_ndp.c
+++ b/usr/src/uts/common/inet/ip/ip_ndp.c
@@ -479,7 +479,7 @@ ncec_delete(ncec_t *ncec)
mutex_exit(&ncec->ncec_lock);
/* Count how many condemned ires for kmem_cache callback */
- atomic_add_32(&ipst->ips_num_nce_condemned, 1);
+ atomic_inc_32(&ipst->ips_num_nce_condemned);
nce_fastpath_list_delete(ncec->ncec_ill, ncec, NULL);
/* Complete any waiting callbacks */
diff --git a/usr/src/uts/common/inet/ip/ip_output.c b/usr/src/uts/common/inet/ip/ip_output.c
index b9d6ee5a4c..5caa043a35 100644
--- a/usr/src/uts/common/inet/ip/ip_output.c
+++ b/usr/src/uts/common/inet/ip/ip_output.c
@@ -1558,7 +1558,7 @@ ire_send_noroute_v4(ire_t *ire, mblk_t *mp, void *iph_arg,
boolean_t dummy;
/* We assign an IP ident for nice errors */
- ipha->ipha_ident = atomic_add_32_nv(identp, 1);
+ ipha->ipha_ident = atomic_inc_32_nv(identp);
BUMP_MIB(&ipst->ips_ip_mib, ipIfStatsOutNoRoutes);
diff --git a/usr/src/uts/common/inet/ip/ipsecah.c b/usr/src/uts/common/inet/ip/ipsecah.c
index d303b38a25..d7394626cf 100644
--- a/usr/src/uts/common/inet/ip/ipsecah.c
+++ b/usr/src/uts/common/inet/ip/ipsecah.c
@@ -2574,7 +2574,7 @@ ah_finish_up(ah_t *phdr_ah, ah_t *inbound_ah, ipsa_t *assoc,
phdr_ah->ah_spi = assoc->ipsa_spi;
phdr_ah->ah_replay =
- htonl(atomic_add_32_nv(&assoc->ipsa_replay, 1));
+ htonl(atomic_inc_32_nv(&assoc->ipsa_replay));
if (phdr_ah->ah_replay == 0 && assoc->ipsa_replay_wsize != 0) {
/*
* XXX We have replay counter wrapping. We probably
diff --git a/usr/src/uts/common/inet/ip/ipsecesp.c b/usr/src/uts/common/inet/ip/ipsecesp.c
index 47972a8c1a..c325e8dc26 100644
--- a/usr/src/uts/common/inet/ip/ipsecesp.c
+++ b/usr/src/uts/common/inet/ip/ipsecesp.c
@@ -2818,7 +2818,7 @@ esp_outbound(mblk_t *data_mp, ip_xmit_attr_t *ixa)
esph_ptr->esph_spi = assoc->ipsa_spi;
- esph_ptr->esph_replay = htonl(atomic_add_32_nv(&assoc->ipsa_replay, 1));
+ esph_ptr->esph_replay = htonl(atomic_inc_32_nv(&assoc->ipsa_replay));
if (esph_ptr->esph_replay == 0 && assoc->ipsa_replay_wsize != 0) {
/*
* XXX We have replay counter wrapping.
diff --git a/usr/src/uts/common/inet/ip/keysock.c b/usr/src/uts/common/inet/ip/keysock.c
index 855af28bb2..c05c147b4b 100644
--- a/usr/src/uts/common/inet/ip/keysock.c
+++ b/usr/src/uts/common/inet/ip/keysock.c
@@ -537,7 +537,7 @@ keysock_close(queue_t *q)
ks3dbg(keystack,
("Driver close, PF_KEY socket is going away.\n"));
if ((ks->keysock_flags & KEYSOCK_EXTENDED) != 0)
- atomic_add_32(&keystack->keystack_num_extended, -1);
+ atomic_dec_32(&keystack->keystack_num_extended);
size = sizeof (keysock_t);
mutex_enter(&keystack->keystack_list_lock);
*(ks->keysock_ptpn) = ks->keysock_next;
@@ -1565,7 +1565,7 @@ keysock_extended_register(keysock_t *ks, mblk_t *mp, sadb_ext_t *extv[])
/*
* Set global to indicate we prefer an extended ACQUIRE.
*/
- atomic_add_32(&keystack->keystack_num_extended, 1);
+ atomic_inc_32(&keystack->keystack_num_extended);
}
static void
@@ -2335,8 +2335,8 @@ keysock_rput(queue_t *q, mblk_t *mp)
* the last one, send up the end-of-{FLUSH,DUMP} to
* the appropriate PF_KEY socket.
*/
- if (atomic_add_32_nv(&keystack->keystack_flushdump,
- -1) != 0) {
+ if (atomic_dec_32_nv(&keystack->keystack_flushdump) !=
+ 0) {
ks1dbg(keystack,
("One flush/dump message back from %d,"
" more to go.\n", samsg->sadb_msg_satype));
@@ -2382,5 +2382,5 @@ keysock_next_seq(netstack_t *ns)
{
keysock_stack_t *keystack = ns->netstack_keysock;
- return (atomic_add_32_nv(&keystack->keystack_acquire_seq, -1));
+ return (atomic_dec_32_nv(&keystack->keystack_acquire_seq));
}
diff --git a/usr/src/uts/common/inet/ip/spd.c b/usr/src/uts/common/inet/ip/spd.c
index f830e750ee..a1c1b355e9 100644
--- a/usr/src/uts/common/inet/ip/spd.c
+++ b/usr/src/uts/common/inet/ip/spd.c
@@ -3263,7 +3263,7 @@ ipsec_action_free(ipsec_action_t *ap)
if (ap == NULL)
break;
membar_exit();
- if (atomic_add_32_nv(&(ap)->ipa_refs, -1) != 0)
+ if (atomic_dec_32_nv(&(ap)->ipa_refs) != 0)
break;
/* End inlined IPACT_REFRELE */
}
diff --git a/usr/src/uts/common/inet/ip_ire.h b/usr/src/uts/common/inet/ip_ire.h
index d3bb6b94d7..9045f98458 100644
--- a/usr/src/uts/common/inet/ip_ire.h
+++ b/usr/src/uts/common/inet/ip_ire.h
@@ -102,7 +102,7 @@ extern "C" {
* We use atomics so that we get an accurate accounting on the ires.
* Otherwise we can't determine leaks correctly.
*/
-#define BUMP_IRE_STATS(ire_stats, x) atomic_add_64(&(ire_stats).x, 1)
+#define BUMP_IRE_STATS(ire_stats, x) atomic_inc_64(&(ire_stats).x)
#ifdef _KERNEL
struct ts_label_s;
diff --git a/usr/src/uts/common/inet/ipf/netinet/ip_compat.h b/usr/src/uts/common/inet/ipf/netinet/ip_compat.h
index caae45f012..01e7c0901f 100644
--- a/usr/src/uts/common/inet/ipf/netinet/ip_compat.h
+++ b/usr/src/uts/common/inet/ipf/netinet/ip_compat.h
@@ -240,18 +240,18 @@ typedef unsigned int u_32_t;
# endif /* SOLARIS2 >= 10 */
# if SOLARIS2 >= 6
# if SOLARIS2 == 6
-# define ATOMIC_INCL(x) atomic_add_long((uint32_t*)&(x), 1)
-# define ATOMIC_DECL(x) atomic_add_long((uint32_t*)&(x), -1)
+# define ATOMIC_INCL(x) atomic_inc_ulong((uint32_t *)&(x))
+# define ATOMIC_DECL(x) atomic_dec_ulong((uint32_t *)&(x))
# else
-# define ATOMIC_INCL(x) atomic_add_long(&(x), 1)
-# define ATOMIC_DECL(x) atomic_add_long(&(x), -1)
+# define ATOMIC_INCL(x) atomic_inc_ulong(&(x))
+# define ATOMIC_DECL(x) atomic_dec_ulong(&(x))
# endif /* SOLARIS2 == 6 */
-# define ATOMIC_INC64(x) atomic_add_64((uint64_t*)&(x), 1)
-# define ATOMIC_INC32(x) atomic_add_32((uint32_t*)&(x), 1)
-# define ATOMIC_INC16(x) atomic_add_16((uint16_t*)&(x), 1)
-# define ATOMIC_DEC64(x) atomic_add_64((uint64_t*)&(x), -1)
-# define ATOMIC_DEC32(x) atomic_add_32((uint32_t*)&(x), -1)
-# define ATOMIC_DEC16(x) atomic_add_16((uint16_t*)&(x), -1)
+# define ATOMIC_INC64(x) atomic_inc_64((uint64_t *)&(x))
+# define ATOMIC_INC32(x) atomic_inc_32((uint32_t *)&(x))
+# define ATOMIC_INC16(x) atomic_inc_16((uint16_t *)&(x))
+# define ATOMIC_DEC64(x) atomic_dec_64((uint64_t *)&(x))
+# define ATOMIC_DEC32(x) atomic_dec_32((uint32_t *)&(x))
+# define ATOMIC_DEC16(x) atomic_dec_16((uint16_t *)&(x))
# else
# define ATOMIC_INC(x) { mutex_enter(&ipf_rw); (x)++; \
mutex_exit(&ipf_rw); }
@@ -925,14 +925,14 @@ typedef u_int32_t u_32_t;
mtx_unlock(&ipf_rw.ipf_lk); }
# define ATOMIC_DEC(x) { mtx_lock(&ipf_rw.ipf_lk); (x)--; \
mtx_unlock(&ipf_rw.ipf_lk); }
-# define ATOMIC_INCL(x) atomic_add_long(&(x), 1)
+# define ATOMIC_INCL(x) atomic_inc_ulong(&(x))
# define ATOMIC_INC64(x) ATOMIC_INC(x)
-# define ATOMIC_INC32(x) atomic_add_32(&(x), 1)
-# define ATOMIC_INC16(x) atomic_add_16(&(x), 1)
-# define ATOMIC_DECL(x) atomic_add_long(&(x), -1)
+# define ATOMIC_INC32(x) atomic_inc_32(&(x))
+# define ATOMIC_INC16(x) atomic_inc_16(&(x))
+# define ATOMIC_DECL(x) atomic_dec_ulong(&(x))
# define ATOMIC_DEC64(x) ATOMIC_DEC(x)
-# define ATOMIC_DEC32(x) atomic_add_32(&(x), -1)
-# define ATOMIC_DEC16(x) atomic_add_16(&(x), -1)
+# define ATOMIC_DEC32(x) atomic_dec_32(&(x))
+# define ATOMIC_DEC16(x) atomic_dec_16(&(x))
# define SPL_X(x) ;
# define SPL_NET(x) ;
# define SPL_IMP(x) ;
diff --git a/usr/src/uts/common/inet/ipsec_impl.h b/usr/src/uts/common/inet/ipsec_impl.h
index 228e01008d..256664911d 100644
--- a/usr/src/uts/common/inet/ipsec_impl.h
+++ b/usr/src/uts/common/inet/ipsec_impl.h
@@ -298,13 +298,13 @@ typedef struct ipsec_action_s
} ipsec_action_t;
#define IPACT_REFHOLD(ipa) { \
- atomic_add_32(&(ipa)->ipa_refs, 1); \
+ atomic_inc_32(&(ipa)->ipa_refs); \
ASSERT((ipa)->ipa_refs != 0); \
}
#define IPACT_REFRELE(ipa) { \
ASSERT((ipa)->ipa_refs != 0); \
membar_exit(); \
- if (atomic_add_32_nv(&(ipa)->ipa_refs, -1) == 0) \
+ if (atomic_dec_32_nv(&(ipa)->ipa_refs) == 0) \
ipsec_action_free(ipa); \
(ipa) = 0; \
}
@@ -414,13 +414,13 @@ struct ipsec_policy_s
};
#define IPPOL_REFHOLD(ipp) { \
- atomic_add_32(&(ipp)->ipsp_refs, 1); \
+ atomic_inc_32(&(ipp)->ipsp_refs); \
ASSERT((ipp)->ipsp_refs != 0); \
}
#define IPPOL_REFRELE(ipp) { \
ASSERT((ipp)->ipsp_refs != 0); \
membar_exit(); \
- if (atomic_add_32_nv(&(ipp)->ipsp_refs, -1) == 0) \
+ if (atomic_dec_32_nv(&(ipp)->ipsp_refs) == 0) \
ipsec_policy_free(ipp); \
(ipp) = 0; \
}
@@ -461,13 +461,13 @@ typedef struct ipsec_policy_head_s
} ipsec_policy_head_t;
#define IPPH_REFHOLD(iph) { \
- atomic_add_32(&(iph)->iph_refs, 1); \
+ atomic_inc_32(&(iph)->iph_refs); \
ASSERT((iph)->iph_refs != 0); \
}
#define IPPH_REFRELE(iph, ns) { \
ASSERT((iph)->iph_refs != 0); \
membar_exit(); \
- if (atomic_add_32_nv(&(iph)->iph_refs, -1) == 0) \
+ if (atomic_dec_32_nv(&(iph)->iph_refs) == 0) \
ipsec_polhead_free(iph, ns); \
(iph) = 0; \
}
@@ -548,14 +548,14 @@ typedef struct ipsec_tun_pol_s {
ITPF_I_PER_PORT_SECURITY))
#define ITP_REFHOLD(itp) { \
- atomic_add_32(&((itp)->itp_refcnt), 1); \
+ atomic_inc_32(&((itp)->itp_refcnt)); \
ASSERT((itp)->itp_refcnt != 0); \
}
#define ITP_REFRELE(itp, ns) { \
ASSERT((itp)->itp_refcnt != 0); \
membar_exit(); \
- if (atomic_add_32_nv(&((itp)->itp_refcnt), -1) == 0) \
+ if (atomic_dec_32_nv(&((itp)->itp_refcnt)) == 0) \
itp_free(itp, ns); \
}
@@ -577,7 +577,7 @@ typedef struct ipsid_s
*/
#define IPSID_REFHOLD(ipsid) { \
- atomic_add_32(&(ipsid)->ipsid_refcnt, 1); \
+ atomic_inc_32(&(ipsid)->ipsid_refcnt); \
ASSERT((ipsid)->ipsid_refcnt != 0); \
}
@@ -588,7 +588,7 @@ typedef struct ipsid_s
#define IPSID_REFRELE(ipsid) { \
membar_exit(); \
- atomic_add_32(&(ipsid)->ipsid_refcnt, -1); \
+ atomic_dec_32(&(ipsid)->ipsid_refcnt); \
}
/*
diff --git a/usr/src/uts/common/inet/kssl/ksslimpl.h b/usr/src/uts/common/inet/kssl/ksslimpl.h
index 8d379bfa7c..95e83ee6b0 100644
--- a/usr/src/uts/common/inet/kssl/ksslimpl.h
+++ b/usr/src/uts/common/inet/kssl/ksslimpl.h
@@ -123,14 +123,14 @@ typedef struct mech_to_cipher_s {
} mech_to_cipher_t;
#define KSSL_ENTRY_REFHOLD(kssl_entry) { \
- atomic_add_32(&(kssl_entry)->ke_refcnt, 1); \
+ atomic_inc_32(&(kssl_entry)->ke_refcnt); \
ASSERT((kssl_entry)->ke_refcnt != 0); \
}
#define KSSL_ENTRY_REFRELE(kssl_entry) { \
ASSERT((kssl_entry)->ke_refcnt != 0); \
membar_exit(); \
- if (atomic_add_32_nv(&(kssl_entry)->ke_refcnt, -1) == 0) { \
+ if (atomic_dec_32_nv(&(kssl_entry)->ke_refcnt) == 0) { \
kssl_free_entry((kssl_entry)); \
} \
}
diff --git a/usr/src/uts/common/inet/nca/nca.h b/usr/src/uts/common/inet/nca/nca.h
index 5508e5a631..ec93cf5deb 100644
--- a/usr/src/uts/common/inet/nca/nca.h
+++ b/usr/src/uts/common/inet/nca/nca.h
@@ -959,7 +959,7 @@ extern kmutex_t nca_dcb_readers;
#define DCB_RD_EXIT(cpu) { \
uint32_t *rp = &nca_gv[cpu].dcb_readers; \
\
- if (atomic_add_32_nv(rp, -1) == DCB_COUNT_USELOCK) { \
+ if (atomic_dec_32_nv(rp) == DCB_COUNT_USELOCK) { \
mutex_enter(&nca_dcb_lock); \
if (CV_HAS_WAITERS(&nca_dcb_wait)) { \
/* May be the last reader for this CPU */ \
diff --git a/usr/src/uts/common/inet/sadb.h b/usr/src/uts/common/inet/sadb.h
index 73ffb1cf72..23a6a68db4 100644
--- a/usr/src/uts/common/inet/sadb.h
+++ b/usr/src/uts/common/inet/sadb.h
@@ -353,7 +353,7 @@ typedef struct ipsa_s {
*/
#define IPSA_REFHOLD(ipsa) { \
- atomic_add_32(&(ipsa)->ipsa_refcnt, 1); \
+ atomic_inc_32(&(ipsa)->ipsa_refcnt); \
ASSERT((ipsa)->ipsa_refcnt != 0); \
}
@@ -368,7 +368,7 @@ typedef struct ipsa_s {
#define IPSA_REFRELE(ipsa) { \
ASSERT((ipsa)->ipsa_refcnt != 0); \
membar_exit(); \
- if (atomic_add_32_nv(&(ipsa)->ipsa_refcnt, -1) == 0) \
+ if (atomic_dec_32_nv(&(ipsa)->ipsa_refcnt) == 0) \
((ipsa)->ipsa_freefunc)(ipsa); \
}
diff --git a/usr/src/uts/common/inet/sctp/sctp.c b/usr/src/uts/common/inet/sctp/sctp.c
index 259cd2baf1..adc28ae129 100644
--- a/usr/src/uts/common/inet/sctp/sctp.c
+++ b/usr/src/uts/common/inet/sctp/sctp.c
@@ -1756,7 +1756,7 @@ sctp_inc_taskq(sctp_stack_t *sctps)
ASSERT(sctps->sctps_recvq_tq_list[
sctps->sctps_recvq_tq_list_cur_sz] == NULL);
sctps->sctps_recvq_tq_list[sctps->sctps_recvq_tq_list_cur_sz] = tq;
- atomic_add_32(&sctps->sctps_recvq_tq_list_cur_sz, 1);
+ atomic_inc_32(&sctps->sctps_recvq_tq_list_cur_sz);
mutex_exit(&sctps->sctps_rq_tq_lock);
}
@@ -1783,7 +1783,7 @@ sctp_find_next_tq(sctp_t *sctp)
* this loop. The problem this will create is that the loop may
* not have tried all the recvq_tq. This should be OK.
*/
- next_tq = atomic_add_32_nv(&sctps->sctps_recvq_tq_list_cur, 1) %
+ next_tq = atomic_inc_32_nv(&sctps->sctps_recvq_tq_list_cur) %
sctps->sctps_recvq_tq_list_cur_sz;
for (try = 0; try < sctps->sctps_recvq_tq_list_cur_sz; try++) {
tq = sctps->sctps_recvq_tq_list[next_tq];
diff --git a/usr/src/uts/common/inet/sctp/sctp_addr.c b/usr/src/uts/common/inet/sctp/sctp_addr.c
index 9be87faa15..e7a98de2d9 100644
--- a/usr/src/uts/common/inet/sctp/sctp_addr.c
+++ b/usr/src/uts/common/inet/sctp/sctp_addr.c
@@ -137,7 +137,7 @@ sctp_ipif_inactive(sctp_ipif_t *sctp_ipif)
rw_destroy(&sctp_ipif->sctp_ipif_lock);
kmem_free(sctp_ipif, sizeof (sctp_ipif_t));
- (void) atomic_add_32_nv(&sctp_ill->sctp_ill_ipifcnt, -1);
+ (void) atomic_dec_32_nv(&sctp_ill->sctp_ill_ipifcnt);
if (rw_tryupgrade(&sctps->sctps_g_ills_lock) != 0) {
rw_downgrade(&sctps->sctps_g_ipifs_lock);
if (sctp_ill->sctp_ill_ipifcnt == 0 &&
@@ -884,8 +884,8 @@ sctp_move_ipif(ipif_t *ipif, ill_t *f_ill, ill_t *t_ill)
ASSERT(sctp_ipif->sctp_ipif_ill == fsctp_ill);
sctp_ipif->sctp_ipif_ill = tsctp_ill;
rw_exit(&sctp_ipif->sctp_ipif_lock);
- (void) atomic_add_32_nv(&fsctp_ill->sctp_ill_ipifcnt, -1);
- atomic_add_32(&tsctp_ill->sctp_ill_ipifcnt, 1);
+ (void) atomic_dec_32_nv(&fsctp_ill->sctp_ill_ipifcnt);
+ atomic_inc_32(&tsctp_ill->sctp_ill_ipifcnt);
rw_exit(&sctps->sctps_g_ipifs_lock);
rw_exit(&sctps->sctps_g_ills_lock);
}
@@ -1097,8 +1097,7 @@ sctp_update_ipif_addr(ipif_t *ipif, in6_addr_t v6addr)
sctps->sctps_g_ipifs_count--;
rw_destroy(&osctp_ipif->sctp_ipif_lock);
kmem_free(osctp_ipif, sizeof (sctp_ipif_t));
- (void) atomic_add_32_nv(&osctp_ill->sctp_ill_ipifcnt,
- -1);
+ (void) atomic_dec_32_nv(&osctp_ill->sctp_ill_ipifcnt);
}
}
@@ -1130,7 +1129,7 @@ sctp_update_ipif_addr(ipif_t *ipif, in6_addr_t v6addr)
list_insert_head(&sctps->sctps_g_ipifs[hindex].sctp_ipif_list,
(void *)sctp_ipif);
sctps->sctps_g_ipifs[hindex].ipif_count++;
- atomic_add_32(&sctp_ill->sctp_ill_ipifcnt, 1);
+ atomic_inc_32(&sctp_ill->sctp_ill_ipifcnt);
if (sctp_ipif->sctp_ipif_state == SCTP_IPIFS_UP)
sctp_chk_and_updt_saddr(hindex, sctp_ipif, sctps);
rw_exit(&sctps->sctps_g_ipifs_lock);
@@ -1210,7 +1209,7 @@ sctp_update_ipif(ipif_t *ipif, int op)
sctps->sctps_g_ipifs_count--;
rw_destroy(&sctp_ipif->sctp_ipif_lock);
kmem_free(sctp_ipif, sizeof (sctp_ipif_t));
- (void) atomic_add_32_nv(&sctp_ill->sctp_ill_ipifcnt, -1);
+ (void) atomic_dec_32_nv(&sctp_ill->sctp_ill_ipifcnt);
if (rw_tryupgrade(&sctps->sctps_g_ills_lock) != 0) {
rw_downgrade(&sctps->sctps_g_ipifs_lock);
if (sctp_ill->sctp_ill_ipifcnt == 0 &&
@@ -2030,8 +2029,7 @@ sctp_free_ipifs(sctp_stack_t *sctps)
list_remove(&sctps->sctps_g_ipifs[i].sctp_ipif_list,
sctp_ipif);
sctps->sctps_g_ipifs_count--;
- (void) atomic_add_32_nv(&sctp_ill->sctp_ill_ipifcnt,
- -1);
+ (void) atomic_dec_32_nv(&sctp_ill->sctp_ill_ipifcnt);
kmem_free(sctp_ipif, sizeof (sctp_ipif_t));
sctp_ipif =
list_tail(&sctps->sctps_g_ipifs[i].sctp_ipif_list);
diff --git a/usr/src/uts/common/inet/sctp/sctp_conn.c b/usr/src/uts/common/inet/sctp/sctp_conn.c
index a2bf44e3f4..3845f24c32 100644
--- a/usr/src/uts/common/inet/sctp/sctp_conn.c
+++ b/usr/src/uts/common/inet/sctp/sctp_conn.c
@@ -168,9 +168,9 @@ sctp_conn_request(sctp_t *sctp, mblk_t *mp, uint_t ifindex, uint_t ip_hdr_len,
if (slc != NULL) {
int64_t now;
- if (atomic_add_32_nv(&slc->slc_cnt, 1) > slc->slc_max + 1) {
+ if (atomic_inc_32_nv(&slc->slc_cnt) > slc->slc_max + 1) {
now = ddi_get_lbolt64();
- atomic_add_32(&slc->slc_cnt, -1);
+ atomic_dec_32(&slc->slc_cnt);
SCTP_KSTAT(sctps, sctp_listen_cnt_drop);
slc->slc_drop++;
if (now - slc->slc_report_time >
@@ -189,7 +189,7 @@ sctp_conn_request(sctp_t *sctp, mblk_t *mp, uint_t ifindex, uint_t ip_hdr_len,
if ((eager = sctp_create_eager(sctp)) == NULL) {
if (slc_set)
- atomic_add_32(&slc->slc_cnt, -1);
+ atomic_dec_32(&slc->slc_cnt);
return (NULL);
}
econnp = eager->sctp_connp;
diff --git a/usr/src/uts/common/inet/sctp/sctp_impl.h b/usr/src/uts/common/inet/sctp/sctp_impl.h
index cf69efc382..1586961332 100644
--- a/usr/src/uts/common/inet/sctp/sctp_impl.h
+++ b/usr/src/uts/common/inet/sctp/sctp_impl.h
@@ -404,7 +404,7 @@ typedef struct sctp_listen_cnt_s {
#define SCTP_DECR_LISTEN_CNT(sctp) \
{ \
ASSERT((sctp)->sctp_listen_cnt->slc_cnt > 0); \
- if (atomic_add_32_nv(&(sctp)->sctp_listen_cnt->slc_cnt, -1) == 0) \
+ if (atomic_dec_32_nv(&(sctp)->sctp_listen_cnt->slc_cnt) == 0) \
kmem_free((sctp)->sctp_listen_cnt, sizeof (sctp_listen_cnt_t));\
(sctp)->sctp_listen_cnt = NULL; \
}
diff --git a/usr/src/uts/common/inet/tcp/tcp_input.c b/usr/src/uts/common/inet/tcp/tcp_input.c
index 367d78eeb8..b98eb33a46 100644
--- a/usr/src/uts/common/inet/tcp/tcp_input.c
+++ b/usr/src/uts/common/inet/tcp/tcp_input.c
@@ -1426,10 +1426,10 @@ tcp_input_listener(void *arg, mblk_t *mp, void *arg2, ip_recv_attr_t *ira)
tcp_listen_cnt_t *tlc = listener->tcp_listen_cnt;
int64_t now;
- if (atomic_add_32_nv(&tlc->tlc_cnt, 1) > tlc->tlc_max + 1) {
+ if (atomic_inc_32_nv(&tlc->tlc_cnt) > tlc->tlc_max + 1) {
mutex_exit(&listener->tcp_eager_lock);
now = ddi_get_lbolt64();
- atomic_add_32(&tlc->tlc_cnt, -1);
+ atomic_dec_32(&tlc->tlc_cnt);
TCP_STAT(tcps, tcp_listen_cnt_drop);
tlc->tlc_drop++;
if (now - tlc->tlc_report_time >
@@ -1871,7 +1871,7 @@ error3:
error2:
freemsg(mp);
if (tlc_set)
- atomic_add_32(&listener->tcp_listen_cnt->tlc_cnt, -1);
+ atomic_dec_32(&listener->tcp_listen_cnt->tlc_cnt);
}
/*
diff --git a/usr/src/uts/common/inet/tcp_impl.h b/usr/src/uts/common/inet/tcp_impl.h
index da0947bccb..cb8984a9ca 100644
--- a/usr/src/uts/common/inet/tcp_impl.h
+++ b/usr/src/uts/common/inet/tcp_impl.h
@@ -371,7 +371,7 @@ typedef struct tcp_listen_cnt_s {
#define TCP_DECR_LISTEN_CNT(tcp) \
{ \
ASSERT((tcp)->tcp_listen_cnt->tlc_cnt > 0); \
- if (atomic_add_32_nv(&(tcp)->tcp_listen_cnt->tlc_cnt, -1) == 0) \
+ if (atomic_dec_32_nv(&(tcp)->tcp_listen_cnt->tlc_cnt) == 0) \
kmem_free((tcp)->tcp_listen_cnt, sizeof (tcp_listen_cnt_t)); \
(tcp)->tcp_listen_cnt = NULL; \
}
diff --git a/usr/src/uts/common/io/bscbus.c b/usr/src/uts/common/io/bscbus.c
index b1f1ec9567..318ed40d69 100644
--- a/usr/src/uts/common/io/bscbus.c
+++ b/usr/src/uts/common/io/bscbus.c
@@ -2622,7 +2622,7 @@ void bscbus_cmd_log(struct bscbus_channel_state *csp, bsc_cmd_stamp_t cat,
return;
if ((bscbus_cmd_log_flags & (1 << cat)) == 0)
return;
- idx = atomic_add_32_nv(&ssp->cmd_log_idx, 1);
+ idx = atomic_inc_32_nv(&ssp->cmd_log_idx);
logp = &ssp->cmd_log[idx % ssp->cmd_log_size];
logp->bcl_seq = idx;
logp->bcl_cat = cat;
diff --git a/usr/src/uts/common/io/chxge/pe.c b/usr/src/uts/common/io/chxge/pe.c
index 63db9c5eb1..816d5ce8a5 100644
--- a/usr/src/uts/common/io/chxge/pe.c
+++ b/usr/src/uts/common/io/chxge/pe.c
@@ -35,8 +35,6 @@
* Interface code
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include <sys/types.h>
#include <sys/systm.h>
#include <sys/cmn_err.h>
@@ -1390,7 +1388,7 @@ ch_small_rbuf_recycle(ch_esb_t *rbp)
* We decrement here so anyone trying to do fini will
* only remove the driver once the counts go to 0.
*/
- atomic_add_32(&buffers_in_use[i], -1);
+ atomic_dec_32(&buffers_in_use[i]);
return;
}
@@ -1403,7 +1401,7 @@ ch_small_rbuf_recycle(ch_esb_t *rbp)
/*
* decrement count of receive buffers freed by callback
*/
- atomic_add_32(&buffers_in_use[rbp->cs_index], -1);
+ atomic_dec_32(&buffers_in_use[rbp->cs_index]);
}
/*
@@ -1433,7 +1431,7 @@ ch_big_rbuf_recycle(ch_esb_t *rbp)
* We decrement here so anyone trying to do fini will
* only remove the driver once the counts go to 0.
*/
- atomic_add_32(&buffers_in_use[i], -1);
+ atomic_dec_32(&buffers_in_use[i]);
return;
}
@@ -1446,7 +1444,7 @@ ch_big_rbuf_recycle(ch_esb_t *rbp)
/*
* decrement count of receive buffers freed by callback
*/
- atomic_add_32(&buffers_in_use[rbp->cs_index], -1);
+ atomic_dec_32(&buffers_in_use[rbp->cs_index]);
}
/*
diff --git a/usr/src/uts/common/io/comstar/lu/stmf_sbd/sbd.c b/usr/src/uts/common/io/comstar/lu/stmf_sbd/sbd.c
index 6c9c1f1158..0a25fdac4b 100644
--- a/usr/src/uts/common/io/comstar/lu/stmf_sbd/sbd.c
+++ b/usr/src/uts/common/io/comstar/lu/stmf_sbd/sbd.c
@@ -2010,7 +2010,7 @@ over_meta_open:
}
sl->sl_trans_op = SL_OP_NONE;
- atomic_add_32(&sbd_lu_count, 1);
+ atomic_inc_32(&sbd_lu_count);
return (0);
scm_err_out:
@@ -2184,7 +2184,7 @@ sbd_create_standby_lu(sbd_create_standby_lu_t *slu, uint32_t *err_ret)
}
sl->sl_trans_op = SL_OP_NONE;
- atomic_add_32(&sbd_lu_count, 1);
+ atomic_inc_32(&sbd_lu_count);
return (0);
scs_err_out:
@@ -2543,7 +2543,7 @@ sim_sli_loaded:
if (ret) {
goto sim_err_out;
}
- atomic_add_32(&sbd_lu_count, 1);
+ atomic_inc_32(&sbd_lu_count);
}
bcopy(sl->sl_device_id + 4, ilu->ilu_ret_guid, 16);
@@ -2949,7 +2949,7 @@ sbd_delete_locked_lu(sbd_lu_t *sl, uint32_t *err_ret,
sdl_do_dereg:;
if (stmf_deregister_lu(sl->sl_lu) != STMF_SUCCESS)
return (EBUSY);
- atomic_add_32(&sbd_lu_count, -1);
+ atomic_dec_32(&sbd_lu_count);
return (sbd_close_delete_lu(sl, 0));
}
diff --git a/usr/src/uts/common/io/comstar/port/fcoet/fcoet.h b/usr/src/uts/common/io/comstar/port/fcoet/fcoet.h
index 2aa591cf3c..8859d5eac8 100644
--- a/usr/src/uts/common/io/comstar/port/fcoet/fcoet.h
+++ b/usr/src/uts/common/io/comstar/port/fcoet/fcoet.h
@@ -223,8 +223,8 @@ typedef struct fcoet_exchange {
* These two situation should seldom happen. But just invoke this seems won't
* downgrade the performance too much, so we keep it.
*/
-#define FCOET_BUSY_XCHG(xch) atomic_add_8(&(xch)->xch_ref, 1)
-#define FCOET_RELE_XCHG(xch) atomic_add_8(&(xch)->xch_ref, -1)
+#define FCOET_BUSY_XCHG(xch) atomic_inc_8(&(xch)->xch_ref)
+#define FCOET_RELE_XCHG(xch) atomic_dec_8(&(xch)->xch_ref)
#define XCH_FLAG_NONFCP_REQ_SENT 0x0001
#define XCH_FLAG_NONFCP_RESP_SENT 0x0002
diff --git a/usr/src/uts/common/io/comstar/port/fcoet/fcoet_fc.c b/usr/src/uts/common/io/comstar/port/fcoet/fcoet_fc.c
index 9884a16605..779751e69d 100644
--- a/usr/src/uts/common/io/comstar/port/fcoet/fcoet_fc.c
+++ b/usr/src/uts/common/io/comstar/port/fcoet/fcoet_fc.c
@@ -304,7 +304,7 @@ fcoet_xfer_scsi_data(fct_cmd_t *cmd, stmf_data_buf_t *dbuf, uint32_t ioflags)
FFM_S_ID(cmd->cmd_lportid, frm);
FFM_D_ID(cmd->cmd_rportid, frm);
FFM_SEQ_CNT(xch->xch_sequence_no, frm);
- atomic_add_8(&xch->xch_sequence_no, 1);
+ atomic_inc_8(&xch->xch_sequence_no);
FFM_PARAM(offset, frm);
offset += data_size;
left_size -= data_size;
@@ -938,9 +938,9 @@ fcoet_logo_fabric(fcoet_soft_state_t *ss)
fcoet_init_tfm(frm, NULL);
bzero(frm->frm_payload, frm->frm_payload_size);
}
- xch_oxid = atomic_add_16_nv(&ss->ss_next_sol_oxid, 1);
+ xch_oxid = atomic_inc_16_nv(&ss->ss_next_sol_oxid);
if (xch_oxid == 0xFFFF) {
- xch_oxid = atomic_add_16_nv(&ss->ss_next_sol_oxid, 1);
+ xch_oxid = atomic_inc_16_nv(&ss->ss_next_sol_oxid);
}
FFM_R_CTL(0x22, frm);
FRM2TFM(frm)->tfm_rctl = 0x22;
diff --git a/usr/src/uts/common/io/comstar/port/fct/discovery.c b/usr/src/uts/common/io/comstar/port/fct/discovery.c
index 47553b20ba..8260823589 100644
--- a/usr/src/uts/common/io/comstar/port/fct/discovery.c
+++ b/usr/src/uts/common/io/comstar/port/fct/discovery.c
@@ -938,7 +938,7 @@ start_els_posting:;
fct_post_to_discovery_queue(iport, irp, NULL);
/* A PLOGI also invalidates any RSCNs related to this rp */
- atomic_add_32(&irp->irp_rscn_counter, 1);
+ atomic_inc_32(&irp->irp_rscn_counter);
} else {
/*
* For everything else, we have (or be able to lookup) a
@@ -983,7 +983,7 @@ start_els_posting:;
*/
atomic_or_32(&icmd->icmd_flags, ICMD_IMPLICIT_CMD_HAS_RESOURCE);
}
- atomic_add_16(&irp->irp_nonfcp_xchg_count, 1);
+ atomic_inc_16(&irp->irp_nonfcp_xchg_count);
/*
* Grab the remote port lock while we modify the port state.
@@ -998,13 +998,13 @@ start_els_posting:;
if ((op == ELS_OP_PLOGI) || (op == ELS_OP_LOGO)) {
rf |= IRP_PLOGI_DONE;
if (irp->irp_flags & IRP_PLOGI_DONE)
- atomic_add_32(&iport->iport_nrps_login, -1);
+ atomic_dec_32(&iport->iport_nrps_login);
}
- atomic_add_16(&irp->irp_sa_elses_count, 1);
+ atomic_inc_16(&irp->irp_sa_elses_count);
atomic_and_32(&irp->irp_flags, ~rf);
atomic_or_32(&icmd->icmd_flags, ICMD_SESSION_AFFECTING);
} else {
- atomic_add_16(&irp->irp_nsa_elses_count, 1);
+ atomic_inc_16(&irp->irp_nsa_elses_count);
}
fct_post_to_discovery_queue(iport, irp, icmd);
@@ -1160,7 +1160,7 @@ fct_register_remote_port(fct_local_port_t *port, fct_remote_port_t *rp,
iport->iport_rp_slots[rp->rp_handle] = irp;
atomic_or_32(&irp->irp_flags, IRP_HANDLE_OPENED);
}
- (void) atomic_add_64_nv(&iport->iport_last_change, 1);
+ (void) atomic_inc_64_nv(&iport->iport_last_change);
fct_log_remote_port_event(port, ESC_SUNFC_TARGET_ADD,
rp->rp_pwwn, rp->rp_id);
@@ -1205,7 +1205,7 @@ fct_deregister_remote_port(fct_local_port_t *port, fct_remote_port_t *rp)
atomic_and_32(&irp->irp_flags, ~IRP_HANDLE_OPENED);
iport->iport_rp_slots[rp->rp_handle] = NULL;
}
- (void) atomic_add_64_nv(&iport->iport_last_change, 1);
+ (void) atomic_inc_64_nv(&iport->iport_last_change);
fct_log_remote_port_event(port, ESC_SUNFC_TARGET_REMOVE,
rp->rp_pwwn, rp->rp_id);
@@ -1511,12 +1511,12 @@ fct_process_plogi(fct_i_cmd_t *icmd)
}
}
}
- atomic_add_16(&irp->irp_sa_elses_count, -1);
+ atomic_dec_16(&irp->irp_sa_elses_count);
if (ret == FCT_SUCCESS) {
if (cmd_type == FCT_CMD_RCVD_ELS) {
atomic_or_32(&irp->irp_flags, IRP_PLOGI_DONE);
- atomic_add_32(&iport->iport_nrps_login, 1);
+ atomic_inc_32(&iport->iport_nrps_login);
if (irp->irp_deregister_timer)
irp->irp_deregister_timer = 0;
}
@@ -1597,7 +1597,7 @@ fct_process_prli(fct_i_cmd_t *icmd)
els->els_req_size, els->els_req_payload[6]);
fct_dequeue_els(irp);
- atomic_add_16(&irp->irp_sa_elses_count, -1);
+ atomic_dec_16(&irp->irp_sa_elses_count);
ret = fct_send_accrjt(cmd, ELS_OP_LSRJT, 3, 0x2c);
goto prli_end;
}
@@ -1671,7 +1671,7 @@ fct_process_prli(fct_i_cmd_t *icmd)
}
fct_dequeue_els(irp);
- atomic_add_16(&irp->irp_sa_elses_count, -1);
+ atomic_dec_16(&irp->irp_sa_elses_count);
if (ses == NULL) {
/* fail PRLI */
ret = fct_send_accrjt(cmd, ELS_OP_LSRJT, 3, 0);
@@ -1775,7 +1775,7 @@ fct_process_logo(fct_i_cmd_t *icmd)
}
fct_dequeue_els(irp);
- atomic_add_16(&irp->irp_sa_elses_count, -1);
+ atomic_dec_16(&irp->irp_sa_elses_count);
/* don't send response if this is an implicit logout cmd */
if (!(icmd->icmd_flags & ICMD_IMPLICIT)) {
@@ -1886,7 +1886,7 @@ fct_process_prlo(fct_i_cmd_t *icmd)
}
fct_dequeue_els(irp);
- atomic_add_16(&irp->irp_sa_elses_count, -1);
+ atomic_dec_16(&irp->irp_sa_elses_count);
ret = fct_send_accrjt(cmd, ELS_OP_ACC, 0, 0);
if (ret != FCT_SUCCESS)
fct_queue_cmd_for_termination(cmd, ret);
@@ -1911,7 +1911,7 @@ fct_process_rcvd_adisc(fct_i_cmd_t *icmd)
fct_status_t ret;
fct_dequeue_els(irp);
- atomic_add_16(&irp->irp_nsa_elses_count, -1);
+ atomic_dec_16(&irp->irp_nsa_elses_count);
/* Validate the adisc request */
p = els->els_req_payload;
@@ -1949,7 +1949,7 @@ fct_process_unknown_els(fct_i_cmd_t *icmd)
ASSERT(icmd->icmd_cmd->cmd_type == FCT_CMD_RCVD_ELS);
fct_dequeue_els(ICMD_TO_IRP(icmd));
- atomic_add_16(&ICMD_TO_IRP(icmd)->irp_nsa_elses_count, -1);
+ atomic_dec_16(&ICMD_TO_IRP(icmd)->irp_nsa_elses_count);
op = ICMD_TO_ELS(icmd)->els_req_payload[0];
stmf_trace(iport->iport_alias, "Rejecting unknown unsol els %x (%s)",
op, FCT_ELS_NAME(op));
@@ -1971,7 +1971,7 @@ fct_process_rscn(fct_i_cmd_t *icmd)
uint32_t rscn_req_size;
fct_dequeue_els(ICMD_TO_IRP(icmd));
- atomic_add_16(&ICMD_TO_IRP(icmd)->irp_nsa_elses_count, -1);
+ atomic_dec_16(&ICMD_TO_IRP(icmd)->irp_nsa_elses_count);
if (icmd->icmd_cmd->cmd_type == FCT_CMD_RCVD_ELS) {
op = ICMD_TO_ELS(icmd)->els_req_payload[0];
stmf_trace(iport->iport_alias, "Accepting RSCN %x (%s)",
@@ -2068,9 +2068,9 @@ fct_process_els(fct_i_local_port_t *iport, fct_i_remote_port_t *irp)
fct_i_cmd_t *c = (*ppcmd)->icmd_next;
if ((*ppcmd)->icmd_flags & ICMD_SESSION_AFFECTING)
- atomic_add_16(&irp->irp_sa_elses_count, -1);
+ atomic_dec_16(&irp->irp_sa_elses_count);
else
- atomic_add_16(&irp->irp_nsa_elses_count, -1);
+ atomic_dec_16(&irp->irp_nsa_elses_count);
(*ppcmd)->icmd_next = cmd_to_abort;
cmd_to_abort = *ppcmd;
*ppcmd = c;
@@ -2125,7 +2125,7 @@ fct_process_els(fct_i_local_port_t *iport, fct_i_remote_port_t *irp)
fct_local_port_t *port = iport->iport_port;
fct_dequeue_els(irp);
- atomic_add_16(&irp->irp_nsa_elses_count, -1);
+ atomic_dec_16(&irp->irp_nsa_elses_count);
atomic_or_32(&icmd->icmd_flags, ICMD_KNOWN_TO_FCA);
if ((s = port->port_send_cmd(cmd)) != FCT_SUCCESS) {
atomic_and_32(&icmd->icmd_flags, ~ICMD_KNOWN_TO_FCA);
@@ -2179,7 +2179,7 @@ fct_handle_sol_els_completion(fct_i_local_port_t *iport, fct_i_cmd_t *icmd)
stmf_wwn_to_devid_desc((scsi_devid_desc_t *)irp->irp_id,
irp->irp_rp->rp_pwwn, PROTOCOL_FIBRE_CHANNEL);
atomic_or_32(&irp->irp_flags, IRP_PLOGI_DONE);
- atomic_add_32(&iport->iport_nrps_login, 1);
+ atomic_inc_32(&iport->iport_nrps_login);
if (irp->irp_deregister_timer) {
irp->irp_deregister_timer = 0;
irp->irp_dereg_count = 0;
@@ -2225,7 +2225,7 @@ fct_check_cmdlist(fct_i_local_port_t *iport)
iport->iport_cached_cmdlist = icmd->icmd_next;
iport->iport_cached_ncmds--;
mutex_exit(&iport->iport_cached_cmd_lock);
- atomic_add_32(&iport->iport_total_alloced_ncmds, -1);
+ atomic_dec_32(&iport->iport_total_alloced_ncmds);
fct_free(icmd->icmd_cmd);
}
mutex_enter(&iport->iport_worker_lock);
@@ -2374,7 +2374,7 @@ fct_handle_solct(fct_cmd_t *cmd)
rw_exit(&irp->irp_lock);
rw_exit(&iport->iport_lock);
- atomic_add_16(&irp->irp_nonfcp_xchg_count, 1);
+ atomic_inc_16(&irp->irp_nonfcp_xchg_count);
atomic_or_32(&icmd->icmd_flags, ICMD_KNOWN_TO_FCA);
icmd->icmd_start_time = ddi_get_lbolt();
ret = iport->iport_port->port_send_cmd(cmd);
@@ -2763,7 +2763,7 @@ do { \
fct_gid_cb); \
if (ct_cmd) { \
uint32_t cnt; \
- cnt = atomic_add_32_nv(&irp->irp_rscn_counter, 1); \
+ cnt = atomic_inc_32_nv(&irp->irp_rscn_counter); \
CMD_TO_ICMD(ct_cmd)->icmd_cb_private = \
INT2PTR(cnt, void *); \
irp->irp_flags |= IRP_RSCN_QUEUED; \
diff --git a/usr/src/uts/common/io/comstar/port/fct/fct.c b/usr/src/uts/common/io/comstar/port/fct/fct.c
index a05d1964ec..52092e324b 100644
--- a/usr/src/uts/common/io/comstar/port/fct/fct.c
+++ b/usr/src/uts/common/io/comstar/port/fct/fct.c
@@ -1162,7 +1162,7 @@ fct_register_local_port(fct_local_port_t *port)
stmf_wwn_to_devid_desc((scsi_devid_desc_t *)iport->iport_id,
port->port_pwwn, PROTOCOL_FIBRE_CHANNEL);
(void) snprintf(taskq_name, sizeof (taskq_name), "stmf_fct_taskq_%d",
- atomic_add_32_nv(&taskq_cntr, 1));
+ atomic_inc_32_nv(&taskq_cntr));
if ((iport->iport_worker_taskq = ddi_taskq_create(NULL,
taskq_name, 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
return (FCT_FAILURE);
@@ -1440,7 +1440,7 @@ fct_deque_rp(fct_i_local_port_t *iport, fct_i_remote_port_t *irp)
while (irp_next != NULL) {
if (irp == irp_next) {
if (irp->irp_flags & IRP_PLOGI_DONE) {
- atomic_add_32(&iport->iport_nrps_login, -1);
+ atomic_dec_32(&iport->iport_nrps_login);
}
atomic_and_32(&irp->irp_flags,
~(IRP_PLOGI_DONE | IRP_PRLI_DONE));
@@ -1678,7 +1678,7 @@ fct_scsi_task_alloc(fct_local_port_t *port, uint16_t rp_handle,
icmd = (fct_i_cmd_t *)cmd->cmd_fct_private;
icmd->icmd_next = NULL;
cmd->cmd_port = port;
- atomic_add_32(&iport->iport_total_alloced_ncmds, 1);
+ atomic_inc_32(&iport->iport_total_alloced_ncmds);
}
/*
@@ -1701,7 +1701,7 @@ fct_scsi_task_alloc(fct_local_port_t *port, uint16_t rp_handle,
fct_cmd_free(cmd);
return (NULL);
}
- atomic_add_16(&irp->irp_fcp_xchg_count, 1);
+ atomic_inc_16(&irp->irp_fcp_xchg_count);
cmd->cmd_rp = rp;
icmd->icmd_flags |= ICMD_IN_TRANSITION | ICMD_KNOWN_TO_FCA;
rw_exit(&irp->irp_lock);
@@ -1826,15 +1826,15 @@ fct_post_implicit_logo(fct_cmd_t *cmd)
rw_enter(&irp->irp_lock, RW_WRITER);
atomic_or_32(&icmd->icmd_flags, ICMD_IMPLICIT_CMD_HAS_RESOURCE);
- atomic_add_16(&irp->irp_nonfcp_xchg_count, 1);
- atomic_add_16(&irp->irp_sa_elses_count, 1);
+ atomic_inc_16(&irp->irp_nonfcp_xchg_count);
+ atomic_inc_16(&irp->irp_sa_elses_count);
/*
* An implicit LOGO can also be posted to a irp where a PLOGI might
* be in process. That PLOGI will reset this flag and decrement the
* iport_nrps_login counter.
*/
if (irp->irp_flags & IRP_PLOGI_DONE) {
- atomic_add_32(&iport->iport_nrps_login, -1);
+ atomic_dec_32(&iport->iport_nrps_login);
}
atomic_and_32(&irp->irp_flags, ~(IRP_PLOGI_DONE | IRP_PRLI_DONE));
atomic_or_32(&icmd->icmd_flags, ICMD_SESSION_AFFECTING);
@@ -1865,7 +1865,7 @@ fct_alloc_cmd_slot(fct_i_local_port_t *iport, fct_cmd_t *cmd)
new |= iport->iport_cmd_slots[cmd_slot].slot_next;
} while (atomic_cas_32(&iport->iport_next_free_slot, old, new) != old);
- atomic_add_16(&iport->iport_nslots_free, -1);
+ atomic_dec_16(&iport->iport_nslots_free);
iport->iport_cmd_slots[cmd_slot].slot_cmd = icmd;
cmd->cmd_handle = (uint32_t)cmd_slot | 0x80000000 |
(((uint32_t)(iport->iport_cmd_slots[cmd_slot].slot_uniq_cntr))
@@ -2072,14 +2072,14 @@ fct_cmd_free(fct_cmd_t *cmd)
} while (atomic_cas_32(&iport->iport_next_free_slot,
old, new) != old);
cmd->cmd_handle = 0;
- atomic_add_16(&iport->iport_nslots_free, 1);
+ atomic_inc_16(&iport->iport_nslots_free);
if (cmd->cmd_rp) {
irp = (fct_i_remote_port_t *)
cmd->cmd_rp->rp_fct_private;
if (cmd->cmd_type == FCT_CMD_FCP_XCHG)
- atomic_add_16(&irp->irp_fcp_xchg_count, -1);
+ atomic_dec_16(&irp->irp_fcp_xchg_count);
else
- atomic_add_16(&irp->irp_nonfcp_xchg_count, -1);
+ atomic_dec_16(&irp->irp_nonfcp_xchg_count);
}
rw_exit(&iport->iport_lock);
} else if ((icmd->icmd_flags & ICMD_IMPLICIT) &&
@@ -2089,9 +2089,9 @@ fct_cmd_free(fct_cmd_t *cmd)
irp = (fct_i_remote_port_t *)
cmd->cmd_rp->rp_fct_private;
if (cmd->cmd_type == FCT_CMD_FCP_XCHG)
- atomic_add_16(&irp->irp_fcp_xchg_count, -1);
+ atomic_dec_16(&irp->irp_fcp_xchg_count);
else
- atomic_add_16(&irp->irp_nonfcp_xchg_count, -1);
+ atomic_dec_16(&irp->irp_nonfcp_xchg_count);
}
}
@@ -2126,7 +2126,7 @@ fct_cmd_free(fct_cmd_t *cmd)
iport->iport_cached_ncmds++;
mutex_exit(&iport->iport_cached_cmd_lock);
} else {
- atomic_add_32(&iport->iport_total_alloced_ncmds, -1);
+ atomic_dec_32(&iport->iport_total_alloced_ncmds);
fct_free(cmd);
}
} else {
diff --git a/usr/src/uts/common/io/comstar/port/qlt/qlt.c b/usr/src/uts/common/io/comstar/port/qlt/qlt.c
index ff7dcad9eb..8d8100fe68 100644
--- a/usr/src/uts/common/io/comstar/port/qlt/qlt.c
+++ b/usr/src/uts/common/io/comstar/port/qlt/qlt.c
@@ -1799,7 +1799,7 @@ qlt_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
qlt->fw_length02) << 2);
qlt->fw_code01 = NULL;
} else {
- atomic_add_32(&qlt_loaded_counter, 1);
+ atomic_inc_32(&qlt_loaded_counter);
}
qlt->fw_length01 = intp[3];
qlt->fw_code01 = (uint32_t *)kmem_alloc(iocd->stmf_ibuf_size,
@@ -1816,7 +1816,7 @@ qlt_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
kmem_free(qlt->fw_code01, (qlt->fw_length01 +
qlt->fw_length02) << 2);
qlt->fw_code01 = NULL;
- atomic_add_32(&qlt_loaded_counter, -1);
+ atomic_dec_32(&qlt_loaded_counter);
}
break;
@@ -4671,7 +4671,7 @@ qlt_abort_cmd(struct fct_local_port *port, fct_cmd_t *cmd, uint32_t flags)
#ifdef DEBUG
if (qlt_drop_abort_counter > 0) {
- if (atomic_add_32_nv(&qlt_drop_abort_counter, -1) == 1)
+ if (atomic_dec_32_nv(&qlt_drop_abort_counter) == 1)
return (FCT_SUCCESS);
}
#endif
diff --git a/usr/src/uts/common/io/comstar/stmf/lun_map.c b/usr/src/uts/common/io/comstar/stmf/lun_map.c
index f95a05aa86..c709a2688a 100644
--- a/usr/src/uts/common/io/comstar/stmf/lun_map.c
+++ b/usr/src/uts/common/io/comstar/stmf/lun_map.c
@@ -267,7 +267,7 @@ stmf_session_destroy_lun_map(stmf_i_local_port_t *ilport,
}
ilu = (stmf_i_lu_t *)
ent->ent_lu->lu_stmf_private;
- atomic_add_32(&ilu->ilu_ref_cnt, -1);
+ atomic_dec_32(&ilu->ilu_ref_cnt);
kmem_free(sm->lm_plus[n],
sizeof (stmf_lun_map_ent_t));
}
@@ -420,7 +420,7 @@ stmf_add_lu_to_session(stmf_i_local_port_t *ilport,
lun_map_ent->ent_lu = lu;
ret = stmf_add_ent_to_map(sm, (void *)lun_map_ent, lu_nbr);
ASSERT(ret == STMF_SUCCESS);
- atomic_add_32(&ilu->ilu_ref_cnt, 1);
+ atomic_inc_32(&ilu->ilu_ref_cnt);
/*
* do not set lun inventory flag for standby port
* as this would be handled from peer
@@ -457,7 +457,7 @@ stmf_remove_lu_from_session(stmf_i_local_port_t *ilport,
ret = stmf_remove_ent_from_map(sm, lu_nbr);
ASSERT(ret == STMF_SUCCESS);
- atomic_add_32(&ilu->ilu_ref_cnt, -1);
+ atomic_dec_32(&ilu->ilu_ref_cnt);
iss->iss_flags |= ISS_LUN_INVENTORY_CHANGED;
if (lun_map_ent->ent_itl_datap) {
stmf_do_itl_dereg(lu, lun_map_ent->ent_itl_datap,
@@ -684,7 +684,7 @@ stmf_append_id(stmf_id_list_t *idlist, stmf_id_data_t *id)
idlist->idl_tail->id_next = id;
idlist->idl_tail = id;
}
- atomic_add_32(&idlist->id_count, 1);
+ atomic_inc_32(&idlist->id_count);
}
void
@@ -701,7 +701,7 @@ stmf_remove_id(stmf_id_list_t *idlist, stmf_id_data_t *id)
} else {
idlist->idl_head = id->id_next;
}
- atomic_add_32(&idlist->id_count, -1);
+ atomic_dec_32(&idlist->id_count);
}
diff --git a/usr/src/uts/common/io/comstar/stmf/stmf.c b/usr/src/uts/common/io/comstar/stmf/stmf.c
index b8cf79508c..6bc47d710e 100644
--- a/usr/src/uts/common/io/comstar/stmf/stmf.c
+++ b/usr/src/uts/common/io/comstar/stmf/stmf.c
@@ -2094,7 +2094,7 @@ stmf_set_alua_state(stmf_alua_state_desc_t *alua_state)
}
if (alua_state->alua_node != 0) {
ilport->ilport_rtpid =
- atomic_add_16_nv(&stmf_rtpid_counter, 1);
+ atomic_inc_16_nv(&stmf_rtpid_counter);
}
lport = ilport->ilport_lport;
ic_reg_port = ic_reg_port_msg_alloc(
@@ -3253,7 +3253,7 @@ stmf_register_local_port(stmf_local_port_t *lport)
* and ports that are alua participants (ilport_alua == 1)
*/
if (ilport->ilport_standby == 0) {
- ilport->ilport_rtpid = atomic_add_16_nv(&stmf_rtpid_counter, 1);
+ ilport->ilport_rtpid = atomic_inc_16_nv(&stmf_rtpid_counter);
}
if (stmf_state.stmf_alua_state == 1 &&
@@ -3595,7 +3595,7 @@ stmf_register_scsi_session(stmf_local_port_t *lport, stmf_scsi_session_t *ss)
mutex_exit(&stmf_state.stmf_lock);
iss->iss_creation_time = ddi_get_time();
- ss->ss_session_id = atomic_add_64_nv(&stmf_session_counter, 1);
+ ss->ss_session_id = atomic_inc_64_nv(&stmf_session_counter);
iss->iss_flags &= ~ISS_BEING_CREATED;
/* XXX should we remove ISS_LUN_INVENTORY_CHANGED on new session? */
iss->iss_flags &= ~ISS_LUN_INVENTORY_CHANGED;
@@ -3794,7 +3794,7 @@ stmf_do_itl_dereg(stmf_lu_t *lu, stmf_itl_data_t *itl, uint8_t hdlrm_reason)
ASSERT(itl->itl_counter);
- if (atomic_add_32_nv(&itl->itl_counter, -1))
+ if (atomic_dec_32_nv(&itl->itl_counter))
return;
stmf_release_itl_handle(lu, itl);
@@ -4152,12 +4152,12 @@ stmf_task_alloc(struct stmf_local_port *lport, stmf_scsi_session_t *ss,
}
itask->itask_ilu_task_cntr = ilu->ilu_cur_task_cntr;
- atomic_add_32(itask->itask_ilu_task_cntr, 1);
+ atomic_inc_32(itask->itask_ilu_task_cntr);
itask->itask_start_time = ddi_get_lbolt();
if ((lun_map_ent != NULL) && ((itask->itask_itl_datap =
lun_map_ent->ent_itl_datap) != NULL)) {
- atomic_add_32(&itask->itask_itl_datap->itl_counter, 1);
+ atomic_inc_32(&itask->itask_itl_datap->itl_counter);
task->task_lu_itl_handle = itask->itask_itl_datap->itl_handle;
} else {
itask->itask_itl_datap = NULL;
@@ -4185,7 +4185,7 @@ stmf_task_lu_free(scsi_task_t *task, stmf_i_scsi_session_t *iss)
if (ilu->ilu_ntasks == ilu->ilu_ntasks_free)
cv_signal(&ilu->ilu_offline_pending_cv);
mutex_exit(&ilu->ilu_task_lock);
- atomic_add_32(itask->itask_ilu_task_cntr, -1);
+ atomic_dec_32(itask->itask_ilu_task_cntr);
}
void
@@ -4408,8 +4408,8 @@ stmf_task_free(scsi_task_t *task)
hrtime_t,
itask->itask_done_timestamp - itask->itask_start_timestamp);
if (itask->itask_itl_datap) {
- if (atomic_add_32_nv(&itask->itask_itl_datap->itl_counter,
- -1) == 0) {
+ if (atomic_dec_32_nv(&itask->itask_itl_datap->itl_counter) ==
+ 0) {
stmf_release_itl_handle(task->task_lu,
itask->itask_itl_datap);
}
@@ -4418,8 +4418,8 @@ stmf_task_free(scsi_task_t *task)
rw_enter(iss->iss_lockp, RW_READER);
lport->lport_task_free(task);
if (itask->itask_worker) {
- atomic_add_32(&stmf_cur_ntasks, -1);
- atomic_add_32(&itask->itask_worker->worker_ref_count, -1);
+ atomic_dec_32(&stmf_cur_ntasks);
+ atomic_dec_32(&itask->itask_worker->worker_ref_count);
}
/*
* After calling stmf_task_lu_free, the task pointer can no longer
@@ -4445,10 +4445,10 @@ stmf_post_task(scsi_task_t *task, stmf_data_buf_t *dbuf)
task->task_max_nbufs = 4;
task->task_cur_nbufs = 0;
/* Latest value of currently running tasks */
- ct = atomic_add_32_nv(&stmf_cur_ntasks, 1);
+ ct = atomic_inc_32_nv(&stmf_cur_ntasks);
/* Select the next worker using round robin */
- nv = (int)atomic_add_32_nv((uint32_t *)&stmf_worker_sel_counter, 1);
+ nv = (int)atomic_inc_32_nv((uint32_t *)&stmf_worker_sel_counter);
if (nv >= stmf_nworkers_accepting_cmds) {
int s = nv;
do {
@@ -4525,7 +4525,7 @@ stmf_post_task(scsi_task_t *task, stmf_data_buf_t *dbuf)
}
/* Measure task waitq time */
itask->itask_waitq_enter_timestamp = gethrtime();
- atomic_add_32(&w->worker_ref_count, 1);
+ atomic_inc_32(&w->worker_ref_count);
itask->itask_cmd_stack[0] = ITASK_CMD_NEW_TASK;
itask->itask_ncmds = 1;
stmf_task_audit(itask, TE_TASK_START, CMD_OR_IOF_NA, dbuf);
@@ -4613,7 +4613,7 @@ stmf_xfer_data(scsi_task_t *task, stmf_data_buf_t *dbuf, uint32_t ioflags)
return (STMF_ABORTED);
#ifdef DEBUG
if (!(ioflags & STMF_IOF_STATS_ONLY) && stmf_drop_buf_counter > 0) {
- if (atomic_add_32_nv((uint32_t *)&stmf_drop_buf_counter, -1) ==
+ if (atomic_dec_32_nv((uint32_t *)&stmf_drop_buf_counter) ==
1)
return (STMF_SUCCESS);
}
@@ -5710,7 +5710,7 @@ stmf_scsilib_uniq_lu_id2(uint32_t company_id, uint32_t host_id,
p = (uint8_t *)lu_id;
- gen_number = atomic_add_16_nv(&stmf_lu_id_gen_number, 1);
+ gen_number = atomic_inc_16_nv(&stmf_lu_id_gen_number);
p[0] = 0xf1; p[1] = 3; p[2] = 0; p[3] = 0x10;
p[4] = ((company_id >> 20) & 0xf) | 0x60;
@@ -6342,9 +6342,8 @@ out_itask_flag_loop:
}
#ifdef DEBUG
if (stmf_drop_task_counter > 0) {
- if (atomic_add_32_nv(
- (uint32_t *)&stmf_drop_task_counter,
- -1) == 1) {
+ if (atomic_dec_32_nv(
+ (uint32_t *)&stmf_drop_task_counter) == 1) {
break;
}
}
diff --git a/usr/src/uts/common/io/cxgbe/t4nex/t4_l2t.c b/usr/src/uts/common/io/cxgbe/t4nex/t4_l2t.c
index ffd6164bb8..d779f4caf3 100644
--- a/usr/src/uts/common/io/cxgbe/t4nex/t4_l2t.c
+++ b/usr/src/uts/common/io/cxgbe/t4nex/t4_l2t.c
@@ -422,7 +422,7 @@ t4_l2e_free(struct l2t_entry *e)
mutex_exit(&e->lock);
d = container_of(e, struct l2t_data, l2tab[e->idx]);
- atomic_add_int(&d->nfree, 1);
+ atomic_inc_uint(&d->nfree);
}
diff --git a/usr/src/uts/common/io/dld/dld_str.c b/usr/src/uts/common/io/dld/dld_str.c
index 4e693c3a2a..6f0d0b9a6c 100644
--- a/usr/src/uts/common/io/dld/dld_str.c
+++ b/usr/src/uts/common/io/dld/dld_str.c
@@ -604,7 +604,7 @@ dld_str_create(queue_t *rq, uint_t type, major_t major, t_uscalar_t style)
/*
* Allocate an object from the cache.
*/
- atomic_add_32(&str_count, 1);
+ atomic_inc_32(&str_count);
dsp = kmem_cache_alloc(str_cachep, KM_SLEEP);
/*
@@ -613,7 +613,7 @@ dld_str_create(queue_t *rq, uint_t type, major_t major, t_uscalar_t style)
dsp->ds_tx_flow_mp = allocb(1, BPRI_HI);
if (dsp->ds_tx_flow_mp == NULL) {
kmem_cache_free(str_cachep, dsp);
- atomic_add_32(&str_count, -1);
+ atomic_dec_32(&str_count);
return (NULL);
}
dsp->ds_type = type;
@@ -710,7 +710,7 @@ dld_str_destroy(dld_str_t *dsp)
* Free the object back to the cache.
*/
kmem_cache_free(str_cachep, dsp);
- atomic_add_32(&str_count, -1);
+ atomic_dec_32(&str_count);
}
/*
diff --git a/usr/src/uts/common/io/dls/dls_link.c b/usr/src/uts/common/io/dls/dls_link.c
index 9e4d6fdad5..6b92a81e77 100644
--- a/usr/src/uts/common/io/dls/dls_link.c
+++ b/usr/src/uts/common/io/dls/dls_link.c
@@ -361,7 +361,7 @@ i_dls_link_rx(void *arg, mac_resource_handle_t mrh, mblk_t *mp,
DLS_PREPARE_PKT(dlp->dl_mh, mp, &mhi, err);
if (err != 0) {
- atomic_add_32(&(dlp->dl_unknowns), 1);
+ atomic_inc_32(&(dlp->dl_unknowns));
nextp = mp->b_next;
mp->b_next = NULL;
freemsg(mp);
@@ -535,7 +535,7 @@ dls_rx_vlan_promisc(void *arg, mac_resource_handle_t mrh, mblk_t *mp,
}
drop:
- atomic_add_32(&dlp->dl_unknowns, 1);
+ atomic_inc_32(&dlp->dl_unknowns);
freemsg(mp);
}
@@ -575,7 +575,7 @@ dls_rx_promisc(void *arg, mac_resource_handle_t mrh, mblk_t *mp,
return;
drop:
- atomic_add_32(&dlp->dl_unknowns, 1);
+ atomic_inc_32(&dlp->dl_unknowns);
freemsg(mp);
}
@@ -732,7 +732,7 @@ dls_link_hold_common(const char *name, dls_link_t **dlpp, boolean_t create)
(mod_hash_val_t)dlp);
ASSERT(err == 0);
- atomic_add_32(&i_dls_link_count, 1);
+ atomic_inc_32(&i_dls_link_count);
ASSERT(i_dls_link_count != 0);
done:
@@ -819,7 +819,7 @@ dls_link_rele(dls_link_t *dlp)
*/
i_dls_link_destroy(dlp);
ASSERT(i_dls_link_count > 0);
- atomic_add_32(&i_dls_link_count, -1);
+ atomic_dec_32(&i_dls_link_count);
}
}
diff --git a/usr/src/uts/common/io/drm/drm_atomic.h b/usr/src/uts/common/io/drm/drm_atomic.h
index b8a4f56091..0adc70c1bc 100644
--- a/usr/src/uts/common/io/drm/drm_atomic.h
+++ b/usr/src/uts/common/io/drm/drm_atomic.h
@@ -55,7 +55,7 @@ typedef uint32_t atomic_t;
#define atomic_set(p, v) (*(p) = (v))
#define atomic_read(p) (*(p))
-#define atomic_inc(p) atomic_add_int(p, 1)
+#define atomic_inc(p) atomic_inc_uint(p)
#define atomic_dec(p) atomic_dec_uint(p)
#define atomic_add(n, p) atomic_add_int(p, n)
#define atomic_sub(n, p) atomic_add_int(p, -n)
diff --git a/usr/src/uts/common/io/fcoe/fcoe.c b/usr/src/uts/common/io/fcoe/fcoe.c
index f2550c3008..7b22258b6f 100644
--- a/usr/src/uts/common/io/fcoe/fcoe.c
+++ b/usr/src/uts/common/io/fcoe/fcoe.c
@@ -1152,7 +1152,7 @@ fcoe_worker_frame(void *arg)
fcoe_i_frame_t *fmi;
int ret;
- atomic_add_32(&fcoe_nworkers_running, 1);
+ atomic_inc_32(&fcoe_nworkers_running);
mutex_enter(&w->worker_lock);
w->worker_flags |= FCOE_WORKER_STARTED | FCOE_WORKER_ACTIVE;
while ((w->worker_flags & FCOE_WORKER_TERMINATE) == 0) {
@@ -1181,7 +1181,7 @@ fcoe_worker_frame(void *arg)
}
w->worker_flags &= ~(FCOE_WORKER_STARTED | FCOE_WORKER_ACTIVE);
mutex_exit(&w->worker_lock);
- atomic_add_32(&fcoe_nworkers_running, -1);
+ atomic_dec_32(&fcoe_nworkers_running);
list_destroy(&w->worker_frm_list);
}
diff --git a/usr/src/uts/common/io/fibre-channel/fca/emlxs/emlxs_sli3.c b/usr/src/uts/common/io/fibre-channel/fca/emlxs/emlxs_sli3.c
index 18a63d73f0..af7d58f63d 100644
--- a/usr/src/uts/common/io/fibre-channel/fca/emlxs/emlxs_sli3.c
+++ b/usr/src/uts/common/io/fibre-channel/fca/emlxs/emlxs_sli3.c
@@ -4808,7 +4808,7 @@ emlxs_sli3_handle_ring_event(emlxs_hba_t *hba, int32_t ring_no,
}
#endif /* SFCT_SUPPORT */
cp->hbaCmplCmd_sbp++;
- atomic_add_32(&hba->io_active, -1);
+ atomic_dec_32(&hba->io_active);
/* Copy entry to sbp's iocbq */
iocbq = &sbp->iocbq;
@@ -5218,7 +5218,7 @@ emlxs_sli3_issue_iocb(emlxs_hba_t *hba, RING *rp, IOCBQ *iocbq)
sbp->pkt_flags |= PACKET_IN_CHIPQ;
mutex_exit(&sbp->mtx);
- atomic_add_32(&hba->io_active, 1);
+ atomic_inc_32(&hba->io_active);
#ifdef SFCT_SUPPORT
#ifdef FCT_IO_TRACE
diff --git a/usr/src/uts/common/io/fibre-channel/fca/emlxs/emlxs_sli4.c b/usr/src/uts/common/io/fibre-channel/fca/emlxs/emlxs_sli4.c
index aefa0996f9..6bdbba23cd 100644
--- a/usr/src/uts/common/io/fibre-channel/fca/emlxs/emlxs_sli4.c
+++ b/usr/src/uts/common/io/fibre-channel/fca/emlxs/emlxs_sli4.c
@@ -1991,7 +1991,7 @@ sendit:
sbp->pkt_flags |= PACKET_IN_CHIPQ;
mutex_exit(&sbp->mtx);
- atomic_add_32(&hba->io_active, 1);
+ atomic_inc_32(&hba->io_active);
sbp->xrip->flag |= EMLXS_XRI_PENDING_IO;
}
@@ -3869,7 +3869,7 @@ emlxs_sli4_hba_flush_chipq(emlxs_hba_t *hba)
#endif /* FCT_IO_TRACE */
#endif /* SFCT_SUPPORT */
- atomic_add_32(&hba->io_active, -1);
+ atomic_dec_32(&hba->io_active);
/* Copy entry to sbp's iocbq */
iocbq = &sbp->iocbq;
@@ -3963,7 +3963,7 @@ emlxs_sli4_process_wqe_cmpl(emlxs_hba_t *hba, CQ_DESC_t *cq, CQE_CmplWQ_t *cqe)
mutex_enter(&EMLXS_FCTAB_LOCK);
sbp = hba->fc_table[request_tag];
- atomic_add_32(&hba->io_active, -1);
+ atomic_dec_32(&hba->io_active);
if (sbp == STALE_PACKET) {
cp->hbaCmplCmd_sbp++;
diff --git a/usr/src/uts/common/io/fibre-channel/fca/fcoei/fcoei.c b/usr/src/uts/common/io/fibre-channel/fca/fcoei/fcoei.c
index c020f30765..9531279478 100644
--- a/usr/src/uts/common/io/fibre-channel/fca/fcoei/fcoei.c
+++ b/usr/src/uts/common/io/fibre-channel/fca/fcoei/fcoei.c
@@ -773,7 +773,7 @@ fcoei_clear_watchdog_jobs(fcoei_soft_state_t *ss)
break;
case AE_EVENT_PORT:
- atomic_add_32(&ss->ss_port_event_counter, -1);
+ atomic_dec_32(&ss->ss_port_event_counter);
/* FALLTHROUGH */
case AE_EVENT_RESET:
diff --git a/usr/src/uts/common/io/fibre-channel/fca/fcoei/fcoei_eth.c b/usr/src/uts/common/io/fibre-channel/fca/fcoei/fcoei_eth.c
index 889ee6ac11..ac1a4809ac 100644
--- a/usr/src/uts/common/io/fibre-channel/fca/fcoei/fcoei_eth.c
+++ b/usr/src/uts/common/io/fibre-channel/fca/fcoei/fcoei_eth.c
@@ -1385,6 +1385,6 @@ fcoei_process_event_port(fcoei_event_t *ae)
FCOEI_LOG(__FUNCTION__, "ss %p not bound now", ss);
}
- atomic_add_32(&ss->ss_port_event_counter, -1);
+ atomic_dec_32(&ss->ss_port_event_counter);
kmem_free(ae, sizeof (fcoei_event_t));
}
diff --git a/usr/src/uts/common/io/fibre-channel/fca/fcoei/fcoei_lv.c b/usr/src/uts/common/io/fibre-channel/fca/fcoei/fcoei_lv.c
index abf1c26387..fbfc4674c6 100644
--- a/usr/src/uts/common/io/fibre-channel/fca/fcoei/fcoei_lv.c
+++ b/usr/src/uts/common/io/fibre-channel/fca/fcoei/fcoei_lv.c
@@ -860,7 +860,7 @@ fcoei_initiate_ct_req(fcoei_exchange_t *xch)
bzero(frm->frm_payload, cmd_len);
xch->xch_cnt = xch->xch_ss->ss_sol_cnt;
- atomic_add_32(xch->xch_cnt, 1);
+ atomic_inc_32(xch->xch_cnt);
FFM_R_CTL(fpkt->pkt_cmd_fhdr.r_ctl, frm);
FFM_D_ID(fpkt->pkt_cmd_fhdr.d_id, frm);
@@ -1056,7 +1056,7 @@ fcoei_initiate_fcp_cmd(fcoei_exchange_t *xch)
* This will affect timing check
*/
xch->xch_cnt = xch->xch_ss->ss_sol_cnt;
- atomic_add_32(xch->xch_cnt, 1);
+ atomic_inc_32(xch->xch_cnt);
/*
* Set exchange residual bytes
@@ -1160,7 +1160,7 @@ fcoei_initiate_els_req(fcoei_exchange_t *xch)
* This will affect timing check
*/
xch->xch_cnt = xch->xch_ss->ss_sol_cnt;
- atomic_add_32(xch->xch_cnt, 1);
+ atomic_inc_32(xch->xch_cnt);
els_code = (ls_code_t *)(void *)fpkt->pkt_cmd;
switch (els_code->ls_code) {
@@ -1267,7 +1267,7 @@ fcoei_initiate_els_resp(fcoei_exchange_t *xch)
* This will affect timing check
*/
xch->xch_cnt = xch->xch_ss->ss_unsol_cnt;
- atomic_add_32(xch->xch_cnt, 1);
+ atomic_inc_32(xch->xch_cnt);
/*
* Set ifm_rctl
diff --git a/usr/src/uts/common/io/fibre-channel/fca/oce/oce_rx.c b/usr/src/uts/common/io/fibre-channel/fca/oce/oce_rx.c
index da00160b68..c8af6ae527 100644
--- a/usr/src/uts/common/io/fibre-channel/fca/oce/oce_rx.c
+++ b/usr/src/uts/common/io/fibre-channel/fca/oce/oce_rx.c
@@ -226,7 +226,7 @@ oce_rqb_free(struct oce_rq *rq, oce_rq_bdesc_t *rqbd)
rq->rqb_freelist[free_index] = rqbd;
rq->rqb_rc_head = GET_Q_NEXT(free_index, 1, rq->cfg.nbufs);
mutex_exit(&rq->rc_lock);
- atomic_add_32(&rq->rqb_free, 1);
+ atomic_inc_32(&rq->rqb_free);
} /* oce_rqb_free */
@@ -598,7 +598,7 @@ oce_rx_pool_free(char *arg)
}
oce_rqb_free(rq, rqbd);
- (void) atomic_add_32(&rq->pending, -1);
+ (void) atomic_dec_32(&rq->pending);
} /* rx_pool_free */
/*
diff --git a/usr/src/uts/common/io/fssnap.c b/usr/src/uts/common/io/fssnap.c
index 3b4d31c915..93e4a2b1bb 100644
--- a/usr/src/uts/common/io/fssnap.c
+++ b/usr/src/uts/common/io/fssnap.c
@@ -1544,9 +1544,9 @@ fssnap_translate(struct snapshot_id **sidpp, struct buf *wbp)
if (throttle_write) {
if (sema_tryp(&cmap->cmap_throttle_sem) == 0) {
rw_exit(&sidp->sid_rwlock);
- atomic_add_32(&cmap->cmap_waiters, 1);
+ atomic_inc_32(&cmap->cmap_waiters);
sema_p(&cmap->cmap_throttle_sem);
- atomic_add_32(&cmap->cmap_waiters, -1);
+ atomic_dec_32(&cmap->cmap_waiters);
rw_enter(&sidp->sid_rwlock, RW_READER);
/*
@@ -1680,7 +1680,7 @@ fssnap_write_taskq(void *arg)
}
rw_exit(&sidp->sid_rwlock);
- atomic_add_64((uint64_t *)&cmap->cmap_nchunks, 1);
+ atomic_inc_64((uint64_t *)&cmap->cmap_nchunks);
if ((cmap->cmap_maxsize != 0) &&
((cmap->cmap_nchunks * cmap->cmap_chunksz) > cmap->cmap_maxsize)) {
diff --git a/usr/src/uts/common/io/gld.c b/usr/src/uts/common/io/gld.c
index 4d76971d79..b6a022df69 100644
--- a/usr/src/uts/common/io/gld.c
+++ b/usr/src/uts/common/io/gld.c
@@ -1748,14 +1748,14 @@ gld_wput(queue_t *q, mblk_t *mp)
* Nonzero count delays any attempted DL_UNBIND.
* See comments above gld_start().
*/
- atomic_add_32((uint32_t *)&gld->gld_wput_count, 1);
+ atomic_inc_32((uint32_t *)&gld->gld_wput_count);
membar_enter();
/* Recheck state now wput_count is set to prevent DL_UNBIND */
/* If this Q is in process of DL_UNBIND, don't call start */
if (gld->gld_state != DL_IDLE || gld->gld_in_unbind) {
/* Extremely unlikely */
- atomic_add_32((uint32_t *)&gld->gld_wput_count, -1);
+ atomic_dec_32((uint32_t *)&gld->gld_wput_count);
goto use_wsrv;
}
@@ -1771,7 +1771,7 @@ gld_wput(queue_t *q, mblk_t *mp)
/* Allow DL_UNBIND again */
membar_exit();
- atomic_add_32((uint32_t *)&gld->gld_wput_count, -1);
+ atomic_dec_32((uint32_t *)&gld->gld_wput_count);
if (rc == GLD_NORESOURCES)
qenable(q);
diff --git a/usr/src/uts/common/io/hxge/hxge_rxdma.c b/usr/src/uts/common/io/hxge/hxge_rxdma.c
index 3ac170277d..d953061d8b 100644
--- a/usr/src/uts/common/io/hxge/hxge_rxdma.c
+++ b/usr/src/uts/common/io/hxge/hxge_rxdma.c
@@ -1031,7 +1031,7 @@ hxge_freeb(p_rx_msg_t rx_msg_p)
* is processing a loaned up buffer block.
*/
free_state = rx_msg_p->free;
- ref_cnt = atomic_add_32_nv(&rx_msg_p->ref_cnt, -1);
+ ref_cnt = atomic_dec_32_nv(&rx_msg_p->ref_cnt);
if (!ref_cnt) {
atomic_dec_32(&hxge_mblks_pending);
diff --git a/usr/src/uts/common/io/ib/clients/daplt/daplt.c b/usr/src/uts/common/io/ib/clients/daplt/daplt.c
index 4b89262e89..a207150d62 100644
--- a/usr/src/uts/common/io/ib/clients/daplt/daplt.c
+++ b/usr/src/uts/common/io/ib/clients/daplt/daplt.c
@@ -8293,13 +8293,13 @@ daplka_ibt_alloc_rc_channel(daplka_ep_resource_t *ep_rp, ibt_hca_hdl_t hca_hdl,
return (IBT_INSUFF_RESOURCE);
}
DAPLKA_RS_ACCT_INC(ep_rp, 1);
- atomic_add_32(&hca_p->hca_qp_count, 1);
+ atomic_inc_32(&hca_p->hca_qp_count);
}
status = ibt_alloc_rc_channel(hca_hdl, flags, args, chan_hdl_p, sizes);
if (status != IBT_SUCCESS && acct_enabled) {
DAPLKA_RS_ACCT_DEC(ep_rp, 1);
- atomic_add_32(&hca_p->hca_qp_count, -1);
+ atomic_dec_32(&hca_p->hca_qp_count);
}
return (status);
}
@@ -8318,7 +8318,7 @@ daplka_ibt_free_channel(daplka_ep_resource_t *ep_rp, ibt_channel_hdl_t chan_hdl)
}
if (DAPLKA_RS_ACCT_CHARGED(ep_rp) > 0) {
DAPLKA_RS_ACCT_DEC(ep_rp, 1);
- atomic_add_32(&hca_p->hca_qp_count, -1);
+ atomic_dec_32(&hca_p->hca_qp_count);
}
return (status);
}
@@ -8345,13 +8345,13 @@ daplka_ibt_alloc_cq(daplka_evd_resource_t *evd_rp, ibt_hca_hdl_t hca_hdl,
return (IBT_INSUFF_RESOURCE);
}
DAPLKA_RS_ACCT_INC(evd_rp, 1);
- atomic_add_32(&hca_p->hca_cq_count, 1);
+ atomic_inc_32(&hca_p->hca_cq_count);
}
status = ibt_alloc_cq(hca_hdl, cq_attr, ibt_cq_p, real_size);
if (status != IBT_SUCCESS && acct_enabled) {
DAPLKA_RS_ACCT_DEC(evd_rp, 1);
- atomic_add_32(&hca_p->hca_cq_count, -1);
+ atomic_dec_32(&hca_p->hca_cq_count);
}
return (status);
}
@@ -8370,7 +8370,7 @@ daplka_ibt_free_cq(daplka_evd_resource_t *evd_rp, ibt_cq_hdl_t cq_hdl)
}
if (DAPLKA_RS_ACCT_CHARGED(evd_rp) > 0) {
DAPLKA_RS_ACCT_DEC(evd_rp, 1);
- atomic_add_32(&hca_p->hca_cq_count, -1);
+ atomic_dec_32(&hca_p->hca_cq_count);
}
return (status);
}
@@ -8397,13 +8397,13 @@ daplka_ibt_alloc_pd(daplka_pd_resource_t *pd_rp, ibt_hca_hdl_t hca_hdl,
return (IBT_INSUFF_RESOURCE);
}
DAPLKA_RS_ACCT_INC(pd_rp, 1);
- atomic_add_32(&hca_p->hca_pd_count, 1);
+ atomic_inc_32(&hca_p->hca_pd_count);
}
status = ibt_alloc_pd(hca_hdl, flags, pd_hdl_p);
if (status != IBT_SUCCESS && acct_enabled) {
DAPLKA_RS_ACCT_DEC(pd_rp, 1);
- atomic_add_32(&hca_p->hca_pd_count, -1);
+ atomic_dec_32(&hca_p->hca_pd_count);
}
return (status);
}
@@ -8423,7 +8423,7 @@ daplka_ibt_free_pd(daplka_pd_resource_t *pd_rp, ibt_hca_hdl_t hca_hdl,
}
if (DAPLKA_RS_ACCT_CHARGED(pd_rp) > 0) {
DAPLKA_RS_ACCT_DEC(pd_rp, 1);
- atomic_add_32(&hca_p->hca_pd_count, -1);
+ atomic_dec_32(&hca_p->hca_pd_count);
}
return (status);
}
@@ -8451,13 +8451,13 @@ daplka_ibt_alloc_mw(daplka_mw_resource_t *mw_rp, ibt_hca_hdl_t hca_hdl,
return (IBT_INSUFF_RESOURCE);
}
DAPLKA_RS_ACCT_INC(mw_rp, 1);
- atomic_add_32(&hca_p->hca_mw_count, 1);
+ atomic_inc_32(&hca_p->hca_mw_count);
}
status = ibt_alloc_mw(hca_hdl, pd_hdl, flags, mw_hdl_p, rkey_p);
if (status != IBT_SUCCESS && acct_enabled) {
DAPLKA_RS_ACCT_DEC(mw_rp, 1);
- atomic_add_32(&hca_p->hca_mw_count, -1);
+ atomic_dec_32(&hca_p->hca_mw_count);
}
return (status);
}
@@ -8477,7 +8477,7 @@ daplka_ibt_free_mw(daplka_mw_resource_t *mw_rp, ibt_hca_hdl_t hca_hdl,
}
if (DAPLKA_RS_ACCT_CHARGED(mw_rp) > 0) {
DAPLKA_RS_ACCT_DEC(mw_rp, 1);
- atomic_add_32(&hca_p->hca_mw_count, -1);
+ atomic_dec_32(&hca_p->hca_mw_count);
}
return (status);
}
@@ -8505,13 +8505,13 @@ daplka_ibt_register_mr(daplka_mr_resource_t *mr_rp, ibt_hca_hdl_t hca_hdl,
return (IBT_INSUFF_RESOURCE);
}
DAPLKA_RS_ACCT_INC(mr_rp, 1);
- atomic_add_32(&hca_p->hca_mr_count, 1);
+ atomic_inc_32(&hca_p->hca_mr_count);
}
status = ibt_register_mr(hca_hdl, pd_hdl, mr_attr, mr_hdl_p, mr_desc_p);
if (status != IBT_SUCCESS && acct_enabled) {
DAPLKA_RS_ACCT_DEC(mr_rp, 1);
- atomic_add_32(&hca_p->hca_mr_count, -1);
+ atomic_dec_32(&hca_p->hca_mr_count);
}
return (status);
}
@@ -8540,14 +8540,14 @@ daplka_ibt_register_shared_mr(daplka_mr_resource_t *mr_rp,
return (IBT_INSUFF_RESOURCE);
}
DAPLKA_RS_ACCT_INC(mr_rp, 1);
- atomic_add_32(&hca_p->hca_mr_count, 1);
+ atomic_inc_32(&hca_p->hca_mr_count);
}
status = ibt_register_shared_mr(hca_hdl, mr_hdl, pd_hdl,
smr_attr_p, mr_hdl_p, mr_desc_p);
if (status != IBT_SUCCESS && acct_enabled) {
DAPLKA_RS_ACCT_DEC(mr_rp, 1);
- atomic_add_32(&hca_p->hca_mr_count, -1);
+ atomic_dec_32(&hca_p->hca_mr_count);
}
return (status);
}
@@ -8567,7 +8567,7 @@ daplka_ibt_deregister_mr(daplka_mr_resource_t *mr_rp, ibt_hca_hdl_t hca_hdl,
}
if (DAPLKA_RS_ACCT_CHARGED(mr_rp) > 0) {
DAPLKA_RS_ACCT_DEC(mr_rp, 1);
- atomic_add_32(&hca_p->hca_mr_count, -1);
+ atomic_dec_32(&hca_p->hca_mr_count);
}
return (status);
}
@@ -8595,13 +8595,13 @@ daplka_ibt_alloc_srq(daplka_srq_resource_t *srq_rp, ibt_hca_hdl_t hca_hdl,
return (IBT_INSUFF_RESOURCE);
}
DAPLKA_RS_ACCT_INC(srq_rp, 1);
- atomic_add_32(&hca_p->hca_srq_count, 1);
+ atomic_inc_32(&hca_p->hca_srq_count);
}
status = ibt_alloc_srq(hca_hdl, flags, pd, reqsz, srq_hdl_p, realsz);
if (status != IBT_SUCCESS && acct_enabled) {
DAPLKA_RS_ACCT_DEC(srq_rp, 1);
- atomic_add_32(&hca_p->hca_srq_count, -1);
+ atomic_dec_32(&hca_p->hca_srq_count);
}
return (status);
}
@@ -8622,7 +8622,7 @@ daplka_ibt_free_srq(daplka_srq_resource_t *srq_rp, ibt_srq_hdl_t srq_hdl)
}
if (DAPLKA_RS_ACCT_CHARGED(srq_rp) > 0) {
DAPLKA_RS_ACCT_DEC(srq_rp, 1);
- atomic_add_32(&hca_p->hca_srq_count, -1);
+ atomic_dec_32(&hca_p->hca_srq_count);
}
return (status);
}
@@ -9035,7 +9035,7 @@ daplka_close(dev_t dev, int flag, int otyp, struct cred *cred)
return (EINVAL);
}
D2("daplka_close: closing rnum = %d\n", rnum);
- atomic_add_32(&daplka_pending_close, 1);
+ atomic_inc_32(&daplka_pending_close);
/*
* remove from resource table.
@@ -9048,7 +9048,7 @@ daplka_close(dev_t dev, int flag, int otyp, struct cred *cred)
if (ia_rp != NULL) {
DAPLKA_RS_UNREF(ia_rp);
}
- atomic_add_32(&daplka_pending_close, -1);
+ atomic_dec_32(&daplka_pending_close);
return (DDI_SUCCESS);
}
@@ -9846,7 +9846,7 @@ daplka_timer_hkey_gen()
uint32_t new_hkey;
do {
- new_hkey = atomic_add_32_nv(&daplka_timer_hkey, 1);
+ new_hkey = atomic_inc_32_nv(&daplka_timer_hkey);
} while (new_hkey == 0);
return (new_hkey);
diff --git a/usr/src/uts/common/io/ib/clients/ibd/ibd_cm.c b/usr/src/uts/common/io/ib/clients/ibd/ibd_cm.c
index 25a3f6026d..1c8318b191 100644
--- a/usr/src/uts/common/io/ib/clients/ibd/ibd_cm.c
+++ b/usr/src/uts/common/io/ib/clients/ibd/ibd_cm.c
@@ -1236,7 +1236,7 @@ ibd_rc_post_srq(ibd_state_t *state, ibd_rwqe_t *rwqe)
* the corresponding ibd_rc_process_rx() is called.
*/
ASSERT(state->rc_srq_rwqe_list.dl_cnt < state->rc_srq_size);
- atomic_add_32(&state->rc_srq_rwqe_list.dl_cnt, 1);
+ atomic_inc_32(&state->rc_srq_rwqe_list.dl_cnt);
if (ibt_post_srq(state->rc_srq_hdl, &rwqe->w_rwr, 1, NULL) !=
IBT_SUCCESS) {
atomic_dec_32(&state->rc_srq_rwqe_list.dl_cnt);
@@ -1258,7 +1258,7 @@ ibd_rc_post_rwqe(ibd_rc_chan_t *chan, ibd_rwqe_t *rwqe)
* have to make sure dl_cnt has already updated before
* corresponding ibd_rc_process_rx() is called.
*/
- atomic_add_32(&chan->rx_wqe_list.dl_cnt, 1);
+ atomic_inc_32(&chan->rx_wqe_list.dl_cnt);
if (ibt_post_recv(chan->chan_hdl, &rwqe->w_rwr, 1, NULL) !=
IBT_SUCCESS) {
atomic_dec_32(&chan->rx_wqe_list.dl_cnt);
@@ -1499,11 +1499,10 @@ ibd_rc_process_rx(ibd_rc_chan_t *chan, ibd_rwqe_t *rwqe, ibt_wc_t *wc)
* network layer
*/
if (state->rc_enable_srq) {
- atomic_add_32(&state->rc_srq_rwqe_list.
- dl_bufs_outstanding, 1);
+ atomic_inc_32(
+ &state->rc_srq_rwqe_list.dl_bufs_outstanding);
} else {
- atomic_add_32(&chan->rx_wqe_list.
- dl_bufs_outstanding, 1);
+ atomic_inc_32(&chan->rx_wqe_list.dl_bufs_outstanding);
}
mp = rwqe->rwqe_im_mblk;
} else {
@@ -1669,7 +1668,7 @@ ibd_rc_freemsg_cb(char *arg)
ibd_rc_free_rwqe(chan, rwqe);
return;
}
- atomic_add_32(&chan->rx_wqe_list.dl_bufs_outstanding, -1);
+ atomic_dec_32(&chan->rx_wqe_list.dl_bufs_outstanding);
}
/*
diff --git a/usr/src/uts/common/io/ib/clients/rdsv3/cong.c b/usr/src/uts/common/io/ib/clients/rdsv3/cong.c
index 9324f4f8bf..1d1e891f58 100644
--- a/usr/src/uts/common/io/ib/clients/rdsv3/cong.c
+++ b/usr/src/uts/common/io/ib/clients/rdsv3/cong.c
@@ -259,7 +259,7 @@ rdsv3_cong_map_updated(struct rdsv3_cong_map *map, uint64_t portmask)
map, NIPQUAD(map->m_addr));
rdsv3_stats_inc(s_cong_update_received);
- atomic_add_32(&rdsv3_cong_generation, 1);
+ atomic_inc_32(&rdsv3_cong_generation);
#if 0
XXX
if (waitqueue_active(&map->m_waitq))
diff --git a/usr/src/uts/common/io/ib/clients/rdsv3/ib_recv.c b/usr/src/uts/common/io/ib/clients/rdsv3/ib_recv.c
index c614531ff0..49550b0306 100644
--- a/usr/src/uts/common/io/ib/clients/rdsv3/ib_recv.c
+++ b/usr/src/uts/common/io/ib/clients/rdsv3/ib_recv.c
@@ -129,7 +129,7 @@ rdsv3_ib_recv_refill_one(struct rdsv3_connection *conn,
recv->r_ibinc = kmem_cache_alloc(rdsv3_ib_incoming_slab,
KM_NOSLEEP);
if (recv->r_ibinc == NULL) {
- atomic_add_32(&rdsv3_ib_allocation, -1);
+ atomic_dec_32(&rdsv3_ib_allocation);
goto out;
}
rdsv3_inc_init(&recv->r_ibinc->ii_inc, conn, conn->c_faddr);
@@ -154,7 +154,7 @@ rdsv3_ib_recv_refill_one(struct rdsv3_connection *conn,
out:
if (recv->r_ibinc) {
kmem_cache_free(rdsv3_ib_incoming_slab, recv->r_ibinc);
- atomic_add_32(&rdsv3_ib_allocation, -1);
+ atomic_dec_32(&rdsv3_ib_allocation);
recv->r_ibinc = NULL;
}
return (-ENOMEM);
diff --git a/usr/src/uts/common/io/ib/clients/rdsv3/message.c b/usr/src/uts/common/io/ib/clients/rdsv3/message.c
index 711d4dc780..fe52954fa9 100644
--- a/usr/src/uts/common/io/ib/clients/rdsv3/message.c
+++ b/usr/src/uts/common/io/ib/clients/rdsv3/message.c
@@ -68,7 +68,7 @@ rdsv3_message_addref(struct rdsv3_message *rm)
{
RDSV3_DPRINTF5("rdsv3_message_addref", "addref rm %p ref %d",
rm, atomic_get(&rm->m_refcount));
- atomic_add_32(&rm->m_refcount, 1);
+ atomic_inc_32(&rm->m_refcount);
}
/*
diff --git a/usr/src/uts/common/io/ib/clients/rdsv3/rdma.c b/usr/src/uts/common/io/ib/clients/rdsv3/rdma.c
index 9196b3b290..cd670bd7ad 100644
--- a/usr/src/uts/common/io/ib/clients/rdsv3/rdma.c
+++ b/usr/src/uts/common/io/ib/clients/rdsv3/rdma.c
@@ -91,7 +91,7 @@ rdsv3_mr_tree_walk(struct avl_tree *root, uint32_t key,
mr = avl_find(root, &key, &where);
if ((mr == NULL) && (insert != NULL)) {
avl_insert(root, (void *)insert, where);
- atomic_add_32(&insert->r_refcount, 1);
+ atomic_inc_32(&insert->r_refcount);
return (NULL);
}
@@ -254,7 +254,7 @@ __rdsv3_rdma_map(struct rdsv3_sock *rs, struct rds_get_mr_args *args,
ASSERT(!(found && found != mr));
if (mr_ret) {
- atomic_add_32(&mr->r_refcount, 1);
+ atomic_inc_32(&mr->r_refcount);
*mr_ret = mr;
}
@@ -399,7 +399,7 @@ rdsv3_rdma_unuse(struct rdsv3_sock *rs, uint32_t r_key, int force)
RB_CLEAR_NODE(&mr->r_rb_node);
zot_me = 1;
} else {
- atomic_add_32(&mr->r_refcount, 1);
+ atomic_inc_32(&mr->r_refcount);
}
mutex_exit(&rs->rs_rdma_lock);
@@ -638,7 +638,7 @@ rdsv3_cmsg_rdma_dest(struct rdsv3_sock *rs, struct rdsv3_message *rm,
if (!mr)
err = -EINVAL; /* invalid r_key */
else
- atomic_add_32(&mr->r_refcount, 1);
+ atomic_inc_32(&mr->r_refcount);
mutex_exit(&rs->rs_rdma_lock);
if (mr) {
diff --git a/usr/src/uts/common/io/ib/clients/rdsv3/rds_recv.c b/usr/src/uts/common/io/ib/clients/rdsv3/rds_recv.c
index 7a6bb4ecf5..4c60636b38 100644
--- a/usr/src/uts/common/io/ib/clients/rdsv3/rds_recv.c
+++ b/usr/src/uts/common/io/ib/clients/rdsv3/rds_recv.c
@@ -64,7 +64,7 @@ rdsv3_inc_addref(struct rdsv3_incoming *inc)
{
RDSV3_DPRINTF4("rdsv3_inc_addref",
"addref inc %p ref %d", inc, atomic_get(&inc->i_refcount));
- atomic_add_32(&inc->i_refcount, 1);
+ atomic_inc_32(&inc->i_refcount);
}
void
diff --git a/usr/src/uts/common/io/ib/clients/rdsv3/send.c b/usr/src/uts/common/io/ib/clients/rdsv3/send.c
index 164c97f510..d488286b2b 100644
--- a/usr/src/uts/common/io/ib/clients/rdsv3/send.c
+++ b/usr/src/uts/common/io/ib/clients/rdsv3/send.c
@@ -176,7 +176,7 @@ restart:
ret = -ENOMEM;
goto out;
}
- atomic_add_32(&conn->c_senders, 1);
+ atomic_inc_32(&conn->c_senders);
if (conn->c_trans->xmit_prepare)
conn->c_trans->xmit_prepare(conn);
@@ -567,7 +567,7 @@ rdsv3_send_get_message(struct rdsv3_connection *conn,
RDSV3_FOR_EACH_LIST_NODE_SAFE(rm, tmp, &conn->c_retrans, m_conn_item) {
if (rm->m_rdma_op == op) {
- atomic_add_32(&rm->m_refcount, 1);
+ atomic_inc_32(&rm->m_refcount);
found = rm;
goto out;
}
@@ -576,7 +576,7 @@ rdsv3_send_get_message(struct rdsv3_connection *conn,
RDSV3_FOR_EACH_LIST_NODE_SAFE(rm, tmp, &conn->c_send_queue,
m_conn_item) {
if (rm->m_rdma_op == op) {
- atomic_add_32(&rm->m_refcount, 1);
+ atomic_inc_32(&rm->m_refcount);
found = rm;
break;
}
diff --git a/usr/src/uts/common/io/mac/mac.c b/usr/src/uts/common/io/mac/mac.c
index 4e1979cf54..7939228339 100644
--- a/usr/src/uts/common/io/mac/mac.c
+++ b/usr/src/uts/common/io/mac/mac.c
@@ -2255,7 +2255,7 @@ mac_minor_hold(boolean_t sleep)
/*
* Grab a value from the arena.
*/
- atomic_add_32(&minor_count, 1);
+ atomic_inc_32(&minor_count);
if (sleep)
minor = (uint_t)id_alloc(minor_ids);
@@ -2263,7 +2263,7 @@ mac_minor_hold(boolean_t sleep)
minor = (uint_t)id_alloc_nosleep(minor_ids);
if (minor == 0) {
- atomic_add_32(&minor_count, -1);
+ atomic_dec_32(&minor_count);
return (0);
}
@@ -2280,7 +2280,7 @@ mac_minor_rele(minor_t minor)
* Return the value to the arena.
*/
id_free(minor_ids, minor);
- atomic_add_32(&minor_count, -1);
+ atomic_dec_32(&minor_count);
}
uint32_t
diff --git a/usr/src/uts/common/io/mac/mac_bcast.c b/usr/src/uts/common/io/mac/mac_bcast.c
index 1aba37c822..1ff33c3578 100644
--- a/usr/src/uts/common/io/mac/mac_bcast.c
+++ b/usr/src/uts/common/io/mac/mac_bcast.c
@@ -373,7 +373,7 @@ mac_bcast_add(mac_client_impl_t *mcip, const uint8_t *addr, uint16_t vid,
flow_desc.fd_mask |= FLOW_LINK_VID;
}
- grp->mbg_id = atomic_add_32_nv(&mac_bcast_id, 1);
+ grp->mbg_id = atomic_inc_32_nv(&mac_bcast_id);
(void) sprintf(flow_name,
"mac/%s/mcast%d", mip->mi_name, grp->mbg_id);
diff --git a/usr/src/uts/common/io/mega_sas/megaraid_sas.c b/usr/src/uts/common/io/mega_sas/megaraid_sas.c
index a63e7e488a..1ec50a98cf 100644
--- a/usr/src/uts/common/io/mega_sas/megaraid_sas.c
+++ b/usr/src/uts/common/io/mega_sas/megaraid_sas.c
@@ -4565,7 +4565,7 @@ read_fw_status_reg_ppc(struct megasas_instance *instance)
static void
issue_cmd_xscale(struct megasas_cmd *cmd, struct megasas_instance *instance)
{
- atomic_add_16(&instance->fw_outstanding, 1);
+ atomic_inc_16(&instance->fw_outstanding);
/* Issue the command to the FW */
WR_IB_QPORT((host_to_le32(cmd->frame_phys_addr) >> 3) |
@@ -4575,7 +4575,7 @@ issue_cmd_xscale(struct megasas_cmd *cmd, struct megasas_instance *instance)
static void
issue_cmd_ppc(struct megasas_cmd *cmd, struct megasas_instance *instance)
{
- atomic_add_16(&instance->fw_outstanding, 1);
+ atomic_inc_16(&instance->fw_outstanding);
/* Issue the command to the FW */
WR_IB_QPORT((host_to_le32(cmd->frame_phys_addr)) |
diff --git a/usr/src/uts/common/io/mr_sas/mr_sas.c b/usr/src/uts/common/io/mr_sas/mr_sas.c
index e929672325..ee43931f7d 100644
--- a/usr/src/uts/common/io/mr_sas/mr_sas.c
+++ b/usr/src/uts/common/io/mr_sas/mr_sas.c
@@ -6734,7 +6734,7 @@ static void
issue_cmd_ppc(struct mrsas_cmd *cmd, struct mrsas_instance *instance)
{
struct scsi_pkt *pkt;
- atomic_add_16(&instance->fw_outstanding, 1);
+ atomic_inc_16(&instance->fw_outstanding);
pkt = cmd->pkt;
if (pkt) {
diff --git a/usr/src/uts/common/io/mr_sas/mr_sas_tbolt.c b/usr/src/uts/common/io/mr_sas/mr_sas_tbolt.c
index aebc70c3b8..44ce92e513 100644
--- a/usr/src/uts/common/io/mr_sas/mr_sas_tbolt.c
+++ b/usr/src/uts/common/io/mr_sas/mr_sas_tbolt.c
@@ -1894,7 +1894,7 @@ void
tbolt_issue_cmd(struct mrsas_cmd *cmd, struct mrsas_instance *instance)
{
MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
- atomic_add_16(&instance->fw_outstanding, 1);
+ atomic_inc_16(&instance->fw_outstanding);
struct scsi_pkt *pkt;
diff --git a/usr/src/uts/common/io/myri10ge/drv/myri10ge.c b/usr/src/uts/common/io/myri10ge/drv/myri10ge.c
index 791d6cb54c..f0af9b76f5 100644
--- a/usr/src/uts/common/io/myri10ge/drv/myri10ge.c
+++ b/usr/src/uts/common/io/myri10ge/drv/myri10ge.c
@@ -3132,7 +3132,7 @@ myri10ge_tx_tso_copy(struct myri10ge_slice_state *ss, mblk_t *mp,
/* check to see if the slots are really there */
avail = tx->mask - (tx->req - tx->done);
if (unlikely(avail <= MYRI10GE_MAX_SEND_DESC_TSO)) {
- atomic_add_32(&tx->stall, 1);
+ atomic_inc_32(&tx->stall);
mutex_exit(&tx->lock);
return (EBUSY);
}
@@ -3373,7 +3373,7 @@ again:
if (avail < max_segs) {
err = EBUSY;
- atomic_add_32(&tx->stall_early, 1);
+ atomic_inc_32(&tx->stall_early);
goto stall;
}
@@ -3638,7 +3638,7 @@ again:
late_stall:
try_pullup = 0;
- atomic_add_32(&tx->stall_late, 1);
+ atomic_inc_32(&tx->stall_late);
abort_with_handles:
/* unbind and free handles from previous mblks */
@@ -3671,7 +3671,7 @@ pullup:
stall:
if (err != 0) {
if (err == EBUSY) {
- atomic_add_32(&tx->stall, 1);
+ atomic_inc_32(&tx->stall);
} else {
MYRI10GE_ATOMIC_SLICE_STAT_INC(xmit_err);
}
diff --git a/usr/src/uts/common/io/myri10ge/drv/myri10ge_var.h b/usr/src/uts/common/io/myri10ge/drv/myri10ge_var.h
index 24889e48a6..dd7b6da710 100644
--- a/usr/src/uts/common/io/myri10ge/drv/myri10ge_var.h
+++ b/usr/src/uts/common/io/myri10ge/drv/myri10ge_var.h
@@ -291,11 +291,11 @@ struct myri10ge_info {
#define MYRI10GE_SLICE_STAT_DEC(field) \
(((struct myri10ge_slice_stat *)ss->ksp_stat->ks_data)->field.value.ul)--
#define MYRI10GE_ATOMIC_SLICE_STAT_INC(field) \
-atomic_add_long(&(((struct myri10ge_slice_stat *) \
- ss->ksp_stat->ks_data)->field.value.ul), 1)
+atomic_inc_ulong(&(((struct myri10ge_slice_stat *) \
+ ss->ksp_stat->ks_data)->field.value.ul))
#define MYRI10GE_ATOMIC_SLICE_STAT_DEC(field) \
-atomic_add_long(&(((struct myri10ge_slice_stat *) \
- ss->ksp_stat->ks_data)->field.value.ul), -1)
+atomic_dec_ulong(&(((struct myri10ge_slice_stat *) \
+ ss->ksp_stat->ks_data)->field.value.ul))
#define MYRI10GE_SLICE_STAT(field) \
(((struct myri10ge_slice_stat *)ss->ksp_stat->ks_data)->field.value.ul)
diff --git a/usr/src/uts/common/io/neti_impl.c b/usr/src/uts/common/io/neti_impl.c
index 0a90e9d47e..05f2549dd7 100644
--- a/usr/src/uts/common/io/neti_impl.c
+++ b/usr/src/uts/common/io/neti_impl.c
@@ -134,7 +134,7 @@ net_protocol_lookup(netid_t netid, const char *protocol)
mutex_enter(&nts->nts_lock);
nd = net_find(protocol, nts);
if (nd != NULL)
- atomic_add_32((uint_t *)&nd->netd_refcnt, 1);
+ atomic_inc_32((uint_t *)&nd->netd_refcnt);
mutex_exit(&nts->nts_lock);
return (nd);
}
@@ -157,7 +157,7 @@ net_protocol_release(net_handle_t info)
* removed from the nts_netd_head list on the neti_stack_t from a
* call to net_protocol_unregister already, so it is thus an orphan.
*/
- if (atomic_add_32_nv((uint_t *)&info->netd_refcnt, -1) == 0) {
+ if (atomic_dec_32_nv((uint_t *)&info->netd_refcnt) == 0) {
ASSERT(info->netd_hooks == NULL);
ASSERT(info->netd_stack == NULL);
kmem_free(info, sizeof (struct net_data));
@@ -201,7 +201,7 @@ net_protocol_walk(netid_t netid, net_handle_t info)
(void) net_protocol_release(info);
if (n != NULL)
- atomic_add_32((uint_t *)&n->netd_refcnt, 1);
+ atomic_inc_32((uint_t *)&n->netd_refcnt);
mutex_exit(&nts->nts_lock);
diff --git a/usr/src/uts/common/io/nxge/nxge_rxdma.c b/usr/src/uts/common/io/nxge/nxge_rxdma.c
index 16931c739b..0f2385e840 100644
--- a/usr/src/uts/common/io/nxge/nxge_rxdma.c
+++ b/usr/src/uts/common/io/nxge/nxge_rxdma.c
@@ -1684,7 +1684,7 @@ nxge_freeb(p_rx_msg_t rx_msg_p)
* is processing a loaned up buffer block.
*/
free_state = rx_msg_p->free;
- ref_cnt = atomic_add_32_nv(&rx_msg_p->ref_cnt, -1);
+ ref_cnt = atomic_dec_32_nv(&rx_msg_p->ref_cnt);
if (!ref_cnt) {
atomic_dec_32(&nxge_mblks_pending);
buffer = rx_msg_p->buffer;
diff --git a/usr/src/uts/common/io/pciex/pcie_fault.c b/usr/src/uts/common/io/pciex/pcie_fault.c
index 76fe57adc7..f4a2e9190e 100644
--- a/usr/src/uts/common/io/pciex/pcie_fault.c
+++ b/usr/src/uts/common/io/pciex/pcie_fault.c
@@ -2629,7 +2629,7 @@ pf_ereport_setup(dev_info_t *dip, uint64_t ena, nvlist_t **ereport,
*eqep = errorq_reserve(fmhdl->fh_errorq);
if (*eqep == NULL) {
- atomic_add_64(&fmhdl->fh_kstat.fek_erpt_dropped.value.ui64, 1);
+ atomic_inc_64(&fmhdl->fh_kstat.fek_erpt_dropped.value.ui64);
return (DDI_FAILURE);
}
diff --git a/usr/src/uts/common/io/rsm/rsm.c b/usr/src/uts/common/io/rsm/rsm.c
index 77ce815dc3..b0c93e7d8a 100644
--- a/usr/src/uts/common/io/rsm/rsm.c
+++ b/usr/src/uts/common/io/rsm/rsm.c
@@ -4043,7 +4043,7 @@ rsm_intr_event(rsmipc_request_t *msg)
seg = (rsmseg_t *)p;
rsmseglock_acquire(seg);
- atomic_add_32(&seg->s_pollevent, 1);
+ atomic_inc_32(&seg->s_pollevent);
if (seg->s_pollflag & RSM_SEGMENT_POLL)
pollwakeup(&seg->s_poll, POLLRDNORM);
@@ -4064,7 +4064,7 @@ rsm_intr_event(rsmipc_request_t *msg)
ASSERT(rsmseglock_held(seg));
- atomic_add_32(&seg->s_pollevent, 1);
+ atomic_inc_32(&seg->s_pollevent);
/*
* We must hold the segment lock here, or else the segment
@@ -5502,7 +5502,7 @@ again:
e, no_reply_cnt));
ASSERT(e != RSMERR_QUEUE_FENCE_UP &&
e != RSMERR_BAD_BARRIER_HNDL);
- atomic_add_64(&rsm_ipcsend_errcnt, 1);
+ atomic_inc_64(&rsm_ipcsend_errcnt);
goto again;
} else {
DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
@@ -5533,7 +5533,7 @@ again:
DBG_PRINTF((category, RSM_ERR,
"rsm: rsmipc_send reply send"
" err = %d\n", e));
- atomic_add_64(&rsm_ipcsend_errcnt, 1);
+ atomic_inc_64(&rsm_ipcsend_errcnt);
goto again;
} else {
DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
@@ -5639,7 +5639,7 @@ again:
RSMIPC_CLEAR(rslot, RSMIPC_PENDING);
rsmipc_free(rslot);
rele_sendq_token(sendq_token);
- atomic_add_64(&rsm_ipcsend_errcnt, 1);
+ atomic_inc_64(&rsm_ipcsend_errcnt);
goto again;
}
@@ -5935,7 +5935,7 @@ rsmipc_send_controlmsg(path_t *path, int msgtype)
break;
}
/* error counter for statistics */
- atomic_add_64(&rsm_ctrlmsg_errcnt, 1);
+ atomic_inc_64(&rsm_ctrlmsg_errcnt);
DBG_PRINTF((category, RSM_ERR,
"rsmipc_send_controlmsg:rsm_send error=%d", e));
@@ -6449,7 +6449,7 @@ rsm_connect(rsmseg_t *seg, rsm_ioctlmsg_t *msg, cred_t *cred,
seg->s_flags &= ~RSM_IMPORT_DUMMY; /* clear dummy flag */
if (bar_va) {
/* increment generation number on barrier page */
- atomic_add_16(bar_va + seg->s_hdr.rsmrc_num, 1);
+ atomic_inc_16(bar_va + seg->s_hdr.rsmrc_num);
/* return user off into barrier page where status will be */
msg->off = (int)seg->s_hdr.rsmrc_num;
msg->gnum = bar_va[msg->off]; /* gnum race */
@@ -6685,7 +6685,7 @@ rsm_closeconnection(rsmseg_t *seg, void **cookie)
/* increment generation number on barrier page */
if (bar_va) {
- atomic_add_16(bar_va + seg->s_hdr.rsmrc_num, 1);
+ atomic_inc_16(bar_va + seg->s_hdr.rsmrc_num);
}
/*
@@ -7284,7 +7284,7 @@ rsm_consumeevent_ioctl(caddr_t arg, int mode)
seg));
if (seg->s_pollevent) {
/* consume the event */
- atomic_add_32(&seg->s_pollevent, -1);
+ atomic_dec_32(&seg->s_pollevent);
event_list[i].revent = POLLRDNORM;
}
rsmseglock_release(seg);
diff --git a/usr/src/uts/common/io/str_conf.c b/usr/src/uts/common/io/str_conf.c
index 5ab410a9ee..29c7a4e033 100644
--- a/usr/src/uts/common/io/str_conf.c
+++ b/usr/src/uts/common/io/str_conf.c
@@ -24,8 +24,6 @@
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include <sys/types.h>
#include <sys/param.h>
#include <sys/systm.h>
@@ -284,7 +282,7 @@ try_again:
rw_enter(&fmodsw_lock, RW_READER);
if (i_fmodsw_hash_find(name, &fp) == 0) {
if (flags & FMODSW_HOLD) {
- atomic_add_32(&(fp->f_ref), 1); /* lock must be held */
+ atomic_inc_32(&(fp->f_ref)); /* lock must be held */
ASSERT(fp->f_ref > 0);
}
@@ -308,5 +306,5 @@ void
fmodsw_rele(fmodsw_impl_t *fp)
{
ASSERT(fp->f_ref > 0);
- atomic_add_32(&(fp->f_ref), -1);
+ atomic_dec_32(&(fp->f_ref));
}
diff --git a/usr/src/uts/common/io/tl.c b/usr/src/uts/common/io/tl.c
index f5cd181284..88d3784998 100644
--- a/usr/src/uts/common/io/tl.c
+++ b/usr/src/uts/common/io/tl.c
@@ -1138,7 +1138,7 @@ tl_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
static void
tl_refhold(tl_endpt_t *tep)
{
- atomic_add_32(&tep->te_refcnt, 1);
+ atomic_inc_32(&tep->te_refcnt);
}
static void
@@ -1146,7 +1146,7 @@ tl_refrele(tl_endpt_t *tep)
{
ASSERT(tep->te_refcnt != 0);
- if (atomic_add_32_nv(&tep->te_refcnt, -1) == 0)
+ if (atomic_dec_32_nv(&tep->te_refcnt) == 0)
tl_free(tep);
}
@@ -1264,13 +1264,13 @@ tl_serializer_alloc(int flags)
static void
tl_serializer_refhold(tl_serializer_t *s)
{
- atomic_add_32(&s->ts_refcnt, 1);
+ atomic_inc_32(&s->ts_refcnt);
}
static void
tl_serializer_refrele(tl_serializer_t *s)
{
- if (atomic_add_32_nv(&s->ts_refcnt, -1) == 0) {
+ if (atomic_dec_32_nv(&s->ts_refcnt) == 0) {
serializer_destroy(s->ts_serializer);
kmem_free(s, sizeof (tl_serializer_t));
}
@@ -5426,7 +5426,7 @@ tl_get_any_addr(tl_endpt_t *tep, tl_addr_t *req)
* Use default address.
*/
bcopy(&tep->te_defaddr, tep->te_abuf, sizeof (uint32_t));
- atomic_add_32(&tep->te_defaddr, 1);
+ atomic_inc_32(&tep->te_defaddr);
}
/*
diff --git a/usr/src/uts/common/io/usb/usba/usbai_pipe_mgmt.c b/usr/src/uts/common/io/usb/usba/usbai_pipe_mgmt.c
index ea81d9d757..e4d9c2bac8 100644
--- a/usr/src/uts/common/io/usb/usba/usbai_pipe_mgmt.c
+++ b/usr/src/uts/common/io/usb/usba/usbai_pipe_mgmt.c
@@ -458,7 +458,7 @@ usba_init_pipe_handle(dev_info_t *dip,
"USB_%s_%x_pipehndl_tq_%d",
ddi_driver_name(dip), ep->bEndpointAddress, instance);
} else {
- def_instance = atomic_add_32_nv(&anon_instance, 1);
+ def_instance = atomic_inc_32_nv(&anon_instance);
(void) snprintf(tq_name, sizeof (tq_name),
"USB_%s_%x_pipehndl_tq_%d_",
diff --git a/usr/src/uts/common/io/xge/drv/xgell.c b/usr/src/uts/common/io/xge/drv/xgell.c
index a5d857f05d..720e95b065 100644
--- a/usr/src/uts/common/io/xge/drv/xgell.c
+++ b/usr/src/uts/common/io/xge/drv/xgell.c
@@ -1180,7 +1180,7 @@ _begin:
xge_hal_fifo_dtr_post(ring->channelh, dtr);
/* Update per-ring tx statistics */
- atomic_add_64(&ring->tx_pkts, 1);
+ atomic_inc_64(&ring->tx_pkts);
atomic_add_64(&ring->tx_bytes, sent_bytes);
return (NULL);
diff --git a/usr/src/uts/common/ipp/dlcosmk/dlcosmk.c b/usr/src/uts/common/ipp/dlcosmk/dlcosmk.c
index c827fb9e82..b69a0896d2 100644
--- a/usr/src/uts/common/ipp/dlcosmk/dlcosmk.c
+++ b/usr/src/uts/common/ipp/dlcosmk/dlcosmk.c
@@ -66,14 +66,14 @@ dlcosmk_process(mblk_t **mpp, dlcosmk_data_t *dlcosmk_data, uint32_t ill_index,
if (mp->b_datap->db_type != M_DATA) {
if ((mp->b_cont == NULL) ||
(mp->b_cont->b_datap->db_type != M_DATA)) {
- atomic_add_64(&dlcosmk_data->epackets, 1);
+ atomic_inc_64(&dlcosmk_data->epackets);
dlcosmk0dbg(("dlcosmk_process: no data\n"));
return (EINVAL);
}
}
/* Update global stats */
- atomic_add_64(&dlcosmk_data->npackets, 1);
+ atomic_inc_64(&dlcosmk_data->npackets);
/*
* This should only be called for outgoing packets. For inbound, just
@@ -81,7 +81,7 @@ dlcosmk_process(mblk_t **mpp, dlcosmk_data_t *dlcosmk_data, uint32_t ill_index,
*/
if ((proc == IPP_LOCAL_IN) || (proc == IPP_FWD_IN)) {
dlcosmk2dbg(("dlcosmk_process:cannot mark incoming packets\n"));
- atomic_add_64(&dlcosmk_data->ipackets, 1);
+ atomic_inc_64(&dlcosmk_data->ipackets);
return (0);
}
@@ -90,7 +90,7 @@ dlcosmk_process(mblk_t **mpp, dlcosmk_data_t *dlcosmk_data, uint32_t ill_index,
B_FALSE)) == NULL)) {
dlcosmk2dbg(("dlcosmk_process:invalid ill index %u\n",
ill_index));
- atomic_add_64(&dlcosmk_data->ipackets, 1);
+ atomic_inc_64(&dlcosmk_data->ipackets);
return (0);
}
@@ -101,7 +101,7 @@ dlcosmk_process(mblk_t **mpp, dlcosmk_data_t *dlcosmk_data, uint32_t ill_index,
if (!(ill->ill_flags & ILLF_COS_ENABLED)) {
dlcosmk2dbg(("dlcosmk_process:ill %u does not support CoS\n",
ill_index));
- atomic_add_64(&dlcosmk_data->ipackets, 1);
+ atomic_inc_64(&dlcosmk_data->ipackets);
ill_refrele(ill);
return (0);
}
@@ -124,8 +124,7 @@ dlcosmk_process(mblk_t **mpp, dlcosmk_data_t *dlcosmk_data, uint32_t ill_index,
dlur->dl_priority.dl_max =
dlcosmk_data->dl_max;
} else {
- atomic_add_64(&dlcosmk_data->ipackets,
- 1);
+ atomic_inc_64(&dlcosmk_data->ipackets);
}
break;
}
@@ -134,7 +133,7 @@ dlcosmk_process(mblk_t **mpp, dlcosmk_data_t *dlcosmk_data, uint32_t ill_index,
mp->b_band = dlcosmk_data->b_band;
break;
default:
- atomic_add_64(&dlcosmk_data->ipackets, 1);
+ atomic_inc_64(&dlcosmk_data->ipackets);
break;
}
diff --git a/usr/src/uts/common/ipp/dscpmk/dscpmk.c b/usr/src/uts/common/ipp/dscpmk/dscpmk.c
index 70d1b9cbf5..8658b433d5 100644
--- a/usr/src/uts/common/ipp/dscpmk/dscpmk.c
+++ b/usr/src/uts/common/ipp/dscpmk/dscpmk.c
@@ -24,8 +24,6 @@
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include <sys/types.h>
#include <sys/atomic.h>
#include <sys/pattr.h>
@@ -68,7 +66,7 @@ dscpmk_process(mblk_t **mpp, dscpmk_data_t *dscpmk_data, ip_proc_t proc)
mp = mp->b_cont;
} else {
dscpmk0dbg(("dscpmk_process: no data\n"));
- atomic_add_64(&dscpmk_data->epackets, 1);
+ atomic_inc_64(&dscpmk_data->epackets);
return (EINVAL);
}
}
@@ -77,14 +75,14 @@ dscpmk_process(mblk_t **mpp, dscpmk_data_t *dscpmk_data, ip_proc_t proc)
if ((mp->b_wptr - mp->b_rptr) < IP_SIMPLE_HDR_LENGTH) {
if (!pullupmsg(mp, IP_SIMPLE_HDR_LENGTH)) {
dscpmk0dbg(("dscpmk_process: pullup failed\n"));
- atomic_add_64(&dscpmk_data->epackets, 1);
+ atomic_inc_64(&dscpmk_data->epackets);
return (EINVAL);
}
}
ipha = (ipha_t *)mp->b_rptr;
/* Update global stats */
- atomic_add_64(&dscpmk_data->npackets, 1);
+ atomic_inc_64(&dscpmk_data->npackets);
/*
* This should only be called for outgoing packets. For inbound packets
@@ -92,7 +90,7 @@ dscpmk_process(mblk_t **mpp, dscpmk_data_t *dscpmk_data, ip_proc_t proc)
*/
if ((proc == IPP_LOCAL_IN) || (proc == IPP_FWD_IN)) {
dscpmk2dbg(("dscpmk_process: cannot mark incoming packets\n"));
- atomic_add_64(&dscpmk_data->ipackets, 1);
+ atomic_inc_64(&dscpmk_data->ipackets);
return (0);
}
@@ -114,21 +112,21 @@ dscpmk_process(mblk_t **mpp, dscpmk_data_t *dscpmk_data, ip_proc_t proc)
new_dscp = dscpmk_data->dscp_map[dscp >> 2];
/* Update stats for this new_dscp */
- atomic_add_64(&dscpmk_data->dscp_stats[new_dscp].npackets, 1);
+ atomic_inc_64(&dscpmk_data->dscp_stats[new_dscp].npackets);
/*
* if new_dscp is same as the original, update stats and
* return.
*/
if (new_dscp == (dscp >> 2)) {
- atomic_add_64(&dscpmk_data->unchanged, 1);
+ atomic_inc_64(&dscpmk_data->unchanged);
return (0);
}
/* Get back the ECN/CU value from the original dscp */
new_dscp = (new_dscp << 2) | (dscp & 0x3);
- atomic_add_64(&dscpmk_data->changed, 1);
+ atomic_inc_64(&dscpmk_data->changed);
/*
* IPv4 : ToS structure -- RFC 791
*
diff --git a/usr/src/uts/common/ipp/flowacct/flowacct.c b/usr/src/uts/common/ipp/flowacct/flowacct.c
index ad208610b5..02bac3d545 100644
--- a/usr/src/uts/common/ipp/flowacct/flowacct.c
+++ b/usr/src/uts/common/ipp/flowacct/flowacct.c
@@ -496,9 +496,9 @@ flowacct_update_flows_tbl(header_t *header, flowacct_data_t *flowacct_data)
* the maximum no. of flow items in the table.
*/
try_again:
- atomic_add_32(&flowacct_data->nflows, 1);
+ atomic_inc_32(&flowacct_data->nflows);
if (flowacct_data->nflows > flowacct_data->max_limit) {
- atomic_add_32(&flowacct_data->nflows, -1);
+ atomic_dec_32(&flowacct_data->nflows);
/* Try timing out once */
if (just_once) {
@@ -536,7 +536,7 @@ flowacct_update_flows_tbl(header_t *header, flowacct_data_t *flowacct_data)
FLOWACCT_DEL_OBJ);
}
mutex_exit(&fhead->lock);
- atomic_add_32(&flowacct_data->nflows, -1);
+ atomic_dec_32(&flowacct_data->nflows);
flowacct0dbg(("flowacct_update_flows_tbl: mem alloc "\
"error"));
return (-1);
@@ -550,7 +550,7 @@ flowacct_update_flows_tbl(header_t *header, flowacct_data_t *flowacct_data)
FLOWACCT_DEL_OBJ);
}
mutex_exit(&fhead->lock);
- atomic_add_32(&flowacct_data->nflows, -1);
+ atomic_dec_32(&flowacct_data->nflows);
kmem_free(item, FLOWACCT_ITEM_SZ);
flowacct0dbg(("flowacct_update_flows_tbl: mem alloc "\
"error\n"));
@@ -887,7 +887,7 @@ flowacct_process(mblk_t **mpp, flowacct_data_t *flowacct_data)
mp = mp->b_cont;
} else {
flowacct0dbg(("flowacct_process: no data\n"));
- atomic_add_64(&flowacct_data->epackets, 1);
+ atomic_inc_64(&flowacct_data->epackets);
return (EINVAL);
}
}
@@ -895,26 +895,26 @@ flowacct_process(mblk_t **mpp, flowacct_data_t *flowacct_data)
header = kmem_zalloc(FLOWACCT_HEADER_SZ, KM_NOSLEEP);
if (header == NULL) {
flowacct0dbg(("flowacct_process: error allocing mem"));
- atomic_add_64(&flowacct_data->epackets, 1);
+ atomic_inc_64(&flowacct_data->epackets);
return (ENOMEM);
}
/* Get all the required information into header. */
if (flowacct_extract_header(mp, header) != 0) {
kmem_free(header, FLOWACCT_HEADER_SZ);
- atomic_add_64(&flowacct_data->epackets, 1);
+ atomic_inc_64(&flowacct_data->epackets);
return (EINVAL);
}
/* Updated the flow table with this entry */
if (flowacct_update_flows_tbl(header, flowacct_data) != 0) {
kmem_free(header, FLOWACCT_HEADER_SZ);
- atomic_add_64(&flowacct_data->epackets, 1);
+ atomic_inc_64(&flowacct_data->epackets);
return (ENOMEM);
}
/* Update global stats */
- atomic_add_64(&flowacct_data->npackets, 1);
+ atomic_inc_64(&flowacct_data->npackets);
atomic_add_64(&flowacct_data->nbytes, header->pktlen);
kmem_free(header, FLOWACCT_HEADER_SZ);
diff --git a/usr/src/uts/common/ipp/ipgpc/classifierddi.c b/usr/src/uts/common/ipp/ipgpc/classifierddi.c
index e76c181d92..ab34d43781 100644
--- a/usr/src/uts/common/ipp/ipgpc/classifierddi.c
+++ b/usr/src/uts/common/ipp/ipgpc/classifierddi.c
@@ -413,7 +413,7 @@ ipgpc_invoke_action(ipp_action_id_t aid, ipp_packet_t *packet)
mp = mp->b_cont; /* jump over the M_CTL into M_DATA */
} else {
ipgpc0dbg(("ipgpc_invoke_action: no data\n"));
- atomic_add_64(&ipgpc_epackets, 1);
+ atomic_inc_64(&ipgpc_epackets);
return (EINVAL);
}
}
@@ -485,7 +485,7 @@ ipgpc_invoke_action(ipp_action_id_t aid, ipp_packet_t *packet)
/* ipgpc_classify will only return NULL if a memory error occured */
if (out_class == NULL) {
- atomic_add_64(&ipgpc_epackets, 1);
+ atomic_inc_64(&ipgpc_epackets);
return (ENOMEM);
}
@@ -495,7 +495,7 @@ ipgpc_invoke_action(ipp_action_id_t aid, ipp_packet_t *packet)
if ((rc = ipp_packet_add_class(packet, out_class->class_name,
out_class->next_action)) != 0) {
- atomic_add_64(&ipgpc_epackets, 1);
+ atomic_inc_64(&ipgpc_epackets);
ipgpc0dbg(("ipgpc_invoke_action: ipp_packet_add_class " \
"failed with error %d", rc));
return (rc);
diff --git a/usr/src/uts/common/ipp/ipgpc/filters.c b/usr/src/uts/common/ipp/ipgpc/filters.c
index 3a2f954d0a..d3a21f14cd 100644
--- a/usr/src/uts/common/ipp/ipgpc/filters.c
+++ b/usr/src/uts/common/ipp/ipgpc/filters.c
@@ -275,7 +275,7 @@ initialize_ba_tables(void)
static void
element_node_ref(element_node_t *element)
{
- atomic_add_32(&element->element_refcnt, 1);
+ atomic_inc_32(&element->element_refcnt);
ASSERT(element->element_refcnt > 1);
}
@@ -283,7 +283,7 @@ static void
element_node_unref(element_node_t *element)
{
ASSERT(element->element_refcnt > 0);
- if (atomic_add_32_nv(&element->element_refcnt, -1) == 0) {
+ if (atomic_dec_32_nv(&element->element_refcnt) == 0) {
kmem_cache_free(element_node_cache, element);
}
}
@@ -1136,7 +1136,7 @@ ipgpc_addfilter(ipgpc_filter_t *filter, char *class_name, ipp_flags_t flags)
mutex_exit(&ipgpc_cid_list_lock);
}
mutex_exit(&ipgpc_fid_list_lock);
- atomic_add_long(&ipgpc_num_fltrs, 1);
+ atomic_inc_ulong(&ipgpc_num_fltrs);
ipgpc3dbg(("ipgpc_addfilter: adding filter %s", filter->filter_name));
return (0);
}
@@ -1154,12 +1154,12 @@ reset_dontcare_stats(void)
int i;
for (i = 0; i < NUM_TRIES; ++i) {
- atomic_add_32(&ipgpc_trie_list[i].stats.num_dontcare, -1);
+ atomic_dec_32(&ipgpc_trie_list[i].stats.num_dontcare);
}
for (i = 0; i < NUM_TABLES; ++i) {
- atomic_add_32(&ipgpc_table_list[i].stats.num_dontcare, -1);
+ atomic_dec_32(&ipgpc_table_list[i].stats.num_dontcare);
}
- atomic_add_32(&ipgpc_ds_table_id.stats.num_dontcare, -1);
+ atomic_dec_32(&ipgpc_ds_table_id.stats.num_dontcare);
}
/*
@@ -1357,7 +1357,7 @@ insertcid(ipgpc_class_t *in_class, int *out_class_id)
bcopy(in_class->class_name,
ipgpc_cid_list[class_id].aclass.class_name, MAXNAMELEN);
ipgpc_cid_list[class_id].filter_list = NULL;
- atomic_add_long(&ipgpc_num_cls, 1);
+ atomic_inc_ulong(&ipgpc_num_cls);
} else {
ipgpc0dbg(("insertcid: class name lookup error %d", err));
mutex_exit(&ipgpc_cid_list_lock);
@@ -1504,7 +1504,7 @@ ipgpc_removefilter(char *filter_name, int32_t filter_instance,
/* remove filter id from class' list of filters */
remove_from_cid_filter_list(ipgpc_fid_list[filter_id].class_id,
filter_id);
- atomic_add_long(&ipgpc_num_fltrs, -1);
+ atomic_dec_ulong(&ipgpc_num_fltrs);
return (0);
}
@@ -1525,7 +1525,7 @@ removecid(int in_class_id)
ipgpc_cid_list[in_class_id].cl_stats = NULL;
}
/* decrement total number of classes loaded */
- atomic_add_long(&ipgpc_num_cls, -1);
+ atomic_dec_ulong(&ipgpc_num_cls);
}
/*
diff --git a/usr/src/uts/common/ipp/ippconf.c b/usr/src/uts/common/ipp/ippconf.c
index 40d93f7a91..851a0ecca0 100644
--- a/usr/src/uts/common/ipp/ippconf.c
+++ b/usr/src/uts/common/ipp/ippconf.c
@@ -23,8 +23,6 @@
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include <sys/types.h>
#include <sys/param.h>
#include <sys/modctl.h>
@@ -1378,7 +1376,7 @@ ipp_packet_process(
* being incremented.
*/
- atomic_add_32(&(ap->ippa_packets), 1);
+ atomic_inc_32(&(ap->ippa_packets));
imp = ap->ippa_mod;
ASSERT(imp != NULL);
@@ -1420,7 +1418,7 @@ ipp_packet_process(
* Decrement the packet count.
*/
- atomic_add_32(&(ap->ippa_packets), -1);
+ atomic_dec_32(&(ap->ippa_packets));
/*
* If the class' action id is the same now as it was
@@ -2379,7 +2377,7 @@ hold_mod(
* freed.
*/
- atomic_add_32(&(imp->ippm_hold_count), 1);
+ atomic_inc_32(&(imp->ippm_hold_count));
rw_exit(ipp_mod_byid_lock);
return (imp);
@@ -2397,7 +2395,7 @@ rele_mod(
*/
ASSERT(imp->ippm_hold_count != 0);
- atomic_add_32(&(imp->ippm_hold_count), -1);
+ atomic_dec_32(&(imp->ippm_hold_count));
/*
* If the structure has 'destruct pending' set then we tried to free
@@ -3071,7 +3069,7 @@ hold_action(
* freed.
*/
- atomic_add_32(&(ap->ippa_hold_count), 1);
+ atomic_inc_32(&(ap->ippa_hold_count));
rw_exit(ipp_action_byid_lock);
return (ap);
@@ -3089,7 +3087,7 @@ rele_action(
*/
ASSERT(ap->ippa_hold_count != 0);
- atomic_add_32(&(ap->ippa_hold_count), -1);
+ atomic_dec_32(&(ap->ippa_hold_count));
/*
* If the structure has 'destruct pending' set then we tried to free
diff --git a/usr/src/uts/common/ipp/meters/tokenmt.c b/usr/src/uts/common/ipp/meters/tokenmt.c
index 9f5432f327..33db115f64 100644
--- a/usr/src/uts/common/ipp/meters/tokenmt.c
+++ b/usr/src/uts/common/ipp/meters/tokenmt.c
@@ -24,8 +24,6 @@
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include <sys/types.h>
#include <sys/kmem.h>
#include <sys/conf.h>
@@ -84,7 +82,7 @@ tokenmt_process(mblk_t **mpp, tokenmt_data_t *tokenmt_data,
if (mp == NULL) {
tokenmt0dbg(("tokenmt_process: null mp!\n"));
- atomic_add_64(&tokenmt_data->epackets, 1);
+ atomic_inc_64(&tokenmt_data->epackets);
return (EINVAL);
}
@@ -94,7 +92,7 @@ tokenmt_process(mblk_t **mpp, tokenmt_data_t *tokenmt_data,
mp = mp->b_cont;
} else {
tokenmt0dbg(("tokenmt_process: no data\n"));
- atomic_add_64(&tokenmt_data->epackets, 1);
+ atomic_inc_64(&tokenmt_data->epackets);
return (EINVAL);
}
}
@@ -103,7 +101,7 @@ tokenmt_process(mblk_t **mpp, tokenmt_data_t *tokenmt_data,
if ((mp->b_wptr - mp->b_rptr) < IP_SIMPLE_HDR_LENGTH) {
if (!pullupmsg(mp, IP_SIMPLE_HDR_LENGTH)) {
tokenmt0dbg(("tokenmt_process: pullup error\n"));
- atomic_add_64(&tokenmt_data->epackets, 1);
+ atomic_inc_64(&tokenmt_data->epackets);
return (EINVAL);
}
}
@@ -218,14 +216,14 @@ tokenmt_process(mblk_t **mpp, tokenmt_data_t *tokenmt_data,
/* Update Stats */
if (*next_action == cfg_parms->green_action) {
- atomic_add_64(&tokenmt_data->green_packets, 1);
+ atomic_inc_64(&tokenmt_data->green_packets);
atomic_add_64(&tokenmt_data->green_bits, pkt_len);
} else if (*next_action == cfg_parms->yellow_action) {
- atomic_add_64(&tokenmt_data->yellow_packets, 1);
+ atomic_inc_64(&tokenmt_data->yellow_packets);
atomic_add_64(&tokenmt_data->yellow_bits, pkt_len);
} else {
ASSERT(*next_action == cfg_parms->red_action);
- atomic_add_64(&tokenmt_data->red_packets, 1);
+ atomic_inc_64(&tokenmt_data->red_packets);
atomic_add_64(&tokenmt_data->red_bits, pkt_len);
}
diff --git a/usr/src/uts/common/ipp/meters/tswtcl.c b/usr/src/uts/common/ipp/meters/tswtcl.c
index bc970ac667..386fa25107 100644
--- a/usr/src/uts/common/ipp/meters/tswtcl.c
+++ b/usr/src/uts/common/ipp/meters/tswtcl.c
@@ -24,8 +24,6 @@
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include <sys/types.h>
#include <sys/kmem.h>
#include <sys/random.h>
@@ -74,7 +72,7 @@ tswtcl_process(mblk_t **mpp, tswtcl_data_t *tswtcl_data,
if (mp == NULL) {
tswtcl0dbg(("tswtcl_process: null mp!\n"));
- atomic_add_64(&tswtcl_data->epackets, 1);
+ atomic_inc_64(&tswtcl_data->epackets);
return (EINVAL);
}
@@ -84,7 +82,7 @@ tswtcl_process(mblk_t **mpp, tswtcl_data_t *tswtcl_data,
mp = mp->b_cont;
} else {
tswtcl0dbg(("tswtcl_process: no data\n"));
- atomic_add_64(&tswtcl_data->epackets, 1);
+ atomic_inc_64(&tswtcl_data->epackets);
return (EINVAL);
}
}
@@ -93,7 +91,7 @@ tswtcl_process(mblk_t **mpp, tswtcl_data_t *tswtcl_data,
if ((mp->b_wptr - mp->b_rptr) < IP_SIMPLE_HDR_LENGTH) {
if (!pullupmsg(mp, IP_SIMPLE_HDR_LENGTH)) {
tswtcl0dbg(("tswtcl_process: pullup error\n"));
- atomic_add_64(&tswtcl_data->epackets, 1);
+ atomic_inc_64(&tswtcl_data->epackets);
return (EINVAL);
}
}
@@ -184,14 +182,14 @@ tswtcl_process(mblk_t **mpp, tswtcl_data_t *tswtcl_data,
/* Update Stats */
if (*next_action == cfg_parms->green_action) {
- atomic_add_64(&tswtcl_data->green_packets, 1);
+ atomic_inc_64(&tswtcl_data->green_packets);
atomic_add_64(&tswtcl_data->green_bits, pkt_len);
} else if (*next_action == cfg_parms->yellow_action) {
- atomic_add_64(&tswtcl_data->yellow_packets, 1);
+ atomic_inc_64(&tswtcl_data->yellow_packets);
atomic_add_64(&tswtcl_data->yellow_bits, pkt_len);
} else {
ASSERT(*next_action == cfg_parms->red_action);
- atomic_add_64(&tswtcl_data->red_packets, 1);
+ atomic_inc_64(&tswtcl_data->red_packets);
atomic_add_64(&tswtcl_data->red_bits, pkt_len);
}
return (0);
diff --git a/usr/src/uts/common/os/audit_memory.c b/usr/src/uts/common/os/audit_memory.c
index a3b61e13e2..d7af4cb3f8 100644
--- a/usr/src/uts/common/os/audit_memory.c
+++ b/usr/src/uts/common/os/audit_memory.c
@@ -39,7 +39,7 @@ kmem_cache_t *au_pad_cache;
void
au_pathhold(struct audit_path *app)
{
- atomic_add_32(&app->audp_ref, 1);
+ atomic_inc_32(&app->audp_ref);
}
/*
@@ -48,7 +48,7 @@ au_pathhold(struct audit_path *app)
void
au_pathrele(struct audit_path *app)
{
- if (atomic_add_32_nv(&app->audp_ref, -1) > 0)
+ if (atomic_dec_32_nv(&app->audp_ref) > 0)
return;
kmem_free(app, app->audp_size);
}
diff --git a/usr/src/uts/common/os/bio.c b/usr/src/uts/common/os/bio.c
index 0db01f80d7..96502b8230 100644
--- a/usr/src/uts/common/os/bio.c
+++ b/usr/src/uts/common/os/bio.c
@@ -1213,7 +1213,7 @@ biowait(struct buf *bp)
ASSERT(SEMA_HELD(&bp->b_sem));
cpup = CPU;
- atomic_add_64(&cpup->cpu_stats.sys.iowait, 1);
+ atomic_inc_64(&cpup->cpu_stats.sys.iowait);
DTRACE_IO1(wait__start, struct buf *, bp);
/*
@@ -1226,7 +1226,7 @@ biowait(struct buf *bp)
sema_p(&bp->b_io);
DTRACE_IO1(wait__done, struct buf *, bp);
- atomic_add_64(&cpup->cpu_stats.sys.iowait, -1);
+ atomic_dec_64(&cpup->cpu_stats.sys.iowait);
error = geterror(bp);
if ((bp->b_flags & B_ASYNC) == 0) {
diff --git a/usr/src/uts/common/os/clock.c b/usr/src/uts/common/os/clock.c
index 574dc31f8a..ed217c45b7 100644
--- a/usr/src/uts/common/os/clock.c
+++ b/usr/src/uts/common/os/clock.c
@@ -1987,7 +1987,7 @@ deadman(void)
* typically be a multiple of the total number of CPUs in
* the system.
*/
- atomic_add_32(&deadman_panics, 1);
+ atomic_inc_32(&deadman_panics);
if (!deadman_enabled) {
CPU->cpu_deadman_countdown = deadman_seconds;
diff --git a/usr/src/uts/common/os/contract.c b/usr/src/uts/common/os/contract.c
index ebaa6bfe41..f3c888a1db 100644
--- a/usr/src/uts/common/os/contract.c
+++ b/usr/src/uts/common/os/contract.c
@@ -2302,7 +2302,7 @@ cte_publish_all(contract_t *ct, ct_kevent_t *e, nvlist_t *data, nvlist_t *gdata)
e->cte_data = data;
e->cte_gdata = gdata;
e->cte_refs = 3;
- evid = e->cte_id = atomic_add_64_nv(&ct->ct_type->ct_type_evid, 1);
+ evid = e->cte_id = atomic_inc_64_nv(&ct->ct_type->ct_type_evid);
contract_hold(ct);
/*
diff --git a/usr/src/uts/common/os/cred.c b/usr/src/uts/common/os/cred.c
index 7add6a4b8e..733fd03a92 100644
--- a/usr/src/uts/common/os/cred.c
+++ b/usr/src/uts/common/os/cred.c
@@ -344,7 +344,7 @@ void
crhold(cred_t *cr)
{
ASSERT(cr->cr_ref != 0xdeadbeef && cr->cr_ref != 0);
- atomic_add_32(&cr->cr_ref, 1);
+ atomic_inc_32(&cr->cr_ref);
}
/*
@@ -355,7 +355,7 @@ void
crfree(cred_t *cr)
{
ASSERT(cr->cr_ref != 0xdeadbeef && cr->cr_ref != 0);
- if (atomic_add_32_nv(&cr->cr_ref, -1) == 0) {
+ if (atomic_dec_32_nv(&cr->cr_ref) == 0) {
ASSERT(cr != kcred);
if (cr->cr_label)
label_rele(cr->cr_label);
@@ -1467,12 +1467,12 @@ crsetcredgrp(cred_t *cr, credgrp_t *grps)
void
crgrprele(credgrp_t *grps)
{
- if (atomic_add_32_nv(&grps->crg_ref, -1) == 0)
+ if (atomic_dec_32_nv(&grps->crg_ref) == 0)
kmem_free(grps, CREDGRPSZ(grps->crg_ngroups));
}
static void
crgrphold(credgrp_t *grps)
{
- atomic_add_32(&grps->crg_ref, 1);
+ atomic_inc_32(&grps->crg_ref);
}
diff --git a/usr/src/uts/common/os/ddi_intr.c b/usr/src/uts/common/os/ddi_intr.c
index 2f8b72c46e..6edef36fbb 100644
--- a/usr/src/uts/common/os/ddi_intr.c
+++ b/usr/src/uts/common/os/ddi_intr.c
@@ -678,7 +678,7 @@ ddi_intr_dup_handler(ddi_intr_handle_t org, int dup_inum,
dup_hdlp = (ddi_intr_handle_impl_t *)
kmem_alloc(sizeof (ddi_intr_handle_impl_t), KM_SLEEP);
- atomic_add_32(&hdlp->ih_dup_cnt, 1);
+ atomic_inc_32(&hdlp->ih_dup_cnt);
*dup = (ddi_intr_handle_t)dup_hdlp;
bcopy(hdlp, dup_hdlp, sizeof (ddi_intr_handle_impl_t));
diff --git a/usr/src/uts/common/os/ddifm.c b/usr/src/uts/common/os/ddifm.c
index 8ad563aad2..533fa15aed 100644
--- a/usr/src/uts/common/os/ddifm.c
+++ b/usr/src/uts/common/os/ddifm.c
@@ -455,7 +455,7 @@ fm_dev_ereport_postv(dev_info_t *dip, dev_info_t *eqdip,
/* Count errors as drops. */
err: if (fmhdl)
- atomic_add_64(&fmhdl->fh_kstat.fek_erpt_dropped.value.ui64, 1);
+ atomic_inc_64(&fmhdl->fh_kstat.fek_erpt_dropped.value.ui64);
/* Free up nvlists if normal interfaces were used to allocate memory */
out: if (ereport && (nva == NULL))
@@ -1019,7 +1019,7 @@ i_ddi_fm_acc_err_set(ddi_acc_handle_t handle, uint64_t ena, int status,
i_hdlp->ahi_err->err_ena = ena;
i_hdlp->ahi_err->err_status = status;
i_hdlp->ahi_err->err_expected = flag;
- atomic_add_64(&fmhdl->fh_kstat.fek_acc_err.value.ui64, 1);
+ atomic_inc_64(&fmhdl->fh_kstat.fek_acc_err.value.ui64);
}
void
@@ -1032,7 +1032,7 @@ i_ddi_fm_dma_err_set(ddi_dma_handle_t handle, uint64_t ena, int status,
hdlp->dmai_error.err_ena = ena;
hdlp->dmai_error.err_status = status;
hdlp->dmai_error.err_expected = flag;
- atomic_add_64(&fmhdl->fh_kstat.fek_dma_err.value.ui64, 1);
+ atomic_inc_64(&fmhdl->fh_kstat.fek_dma_err.value.ui64);
}
ddi_fmcompare_t
diff --git a/usr/src/uts/common/os/devcfg.c b/usr/src/uts/common/os/devcfg.c
index f8331cb132..da412f4ea8 100644
--- a/usr/src/uts/common/os/devcfg.c
+++ b/usr/src/uts/common/os/devcfg.c
@@ -1612,10 +1612,10 @@ i_ndi_config_node(dev_info_t *dip, ddi_node_state_t state, uint_t flag)
rv = DDI_FAILURE;
break;
}
- atomic_add_long(&devinfo_attach_detach, 1);
+ atomic_inc_ulong(&devinfo_attach_detach);
if ((rv = attach_node(dip)) == DDI_SUCCESS)
i_ddi_set_node_state(dip, DS_ATTACHED);
- atomic_add_long(&devinfo_attach_detach, -1);
+ atomic_dec_ulong(&devinfo_attach_detach);
break;
case DS_ATTACHED:
if ((rv = postattach_node(dip)) == DDI_SUCCESS)
@@ -1681,7 +1681,7 @@ i_ndi_unconfig_node(dev_info_t *dip, ddi_node_state_t state, uint_t flag)
i_ddi_set_node_state(dip, DS_INITIALIZED);
break;
case DS_ATTACHED:
- atomic_add_long(&devinfo_attach_detach, 1);
+ atomic_inc_ulong(&devinfo_attach_detach);
mutex_enter(&(DEVI(dip)->devi_lock));
DEVI_SET_DETACHING(dip);
@@ -1696,7 +1696,7 @@ i_ndi_unconfig_node(dev_info_t *dip, ddi_node_state_t state, uint_t flag)
DEVI_CLR_DETACHING(dip);
mutex_exit(&(DEVI(dip)->devi_lock));
- atomic_add_long(&devinfo_attach_detach, -1);
+ atomic_dec_ulong(&devinfo_attach_detach);
break;
case DS_READY:
if ((rv = predetach_node(dip, flag)) == DDI_SUCCESS)
diff --git a/usr/src/uts/common/os/devpolicy.c b/usr/src/uts/common/os/devpolicy.c
index 441b1d7c17..7acc3cef86 100644
--- a/usr/src/uts/common/os/devpolicy.c
+++ b/usr/src/uts/common/os/devpolicy.c
@@ -181,14 +181,14 @@ void
dphold(devplcy_t *dp)
{
ASSERT(dp->dp_ref != 0xdeadbeef && dp->dp_ref != 0);
- atomic_add_32(&dp->dp_ref, 1);
+ atomic_inc_32(&dp->dp_ref);
}
void
dpfree(devplcy_t *dp)
{
ASSERT(dp->dp_ref != 0xdeadbeef && dp->dp_ref != 0);
- if (atomic_add_32_nv(&dp->dp_ref, -1) == 0)
+ if (atomic_dec_32_nv(&dp->dp_ref) == 0)
kmem_free(dp, sizeof (*dp));
}
diff --git a/usr/src/uts/common/os/driver_lyr.c b/usr/src/uts/common/os/driver_lyr.c
index e59acc4370..0d6cf16939 100644
--- a/usr/src/uts/common/os/driver_lyr.c
+++ b/usr/src/uts/common/os/driver_lyr.c
@@ -408,7 +408,7 @@ handle_alloc(vnode_t *vp, struct ldi_ident *ident)
/* add it to the handle hash */
lhp->lh_next = ldi_handle_hash[index];
ldi_handle_hash[index] = lhp;
- atomic_add_long(&ldi_handle_hash_count, 1);
+ atomic_inc_ulong(&ldi_handle_hash_count);
LDI_ALLOCFREE((CE_WARN, "ldi handle alloc: new "
"lh=0x%p, ident=0x%p, vp=0x%p, drv=%s, minor=0x%x",
@@ -448,7 +448,7 @@ handle_release(struct ldi_handle *lhp)
lhpp = handle_find_ref_nolock(lhp->lh_vp, lhp->lh_ident);
ASSERT((lhpp != NULL) && (*lhpp != NULL));
*lhpp = lhp->lh_next;
- atomic_add_long(&ldi_handle_hash_count, -1);
+ atomic_dec_ulong(&ldi_handle_hash_count);
mutex_exit(&ldi_handle_hash_lock[index]);
VN_RELE(lhp->lh_vp);
diff --git a/usr/src/uts/common/os/errorq.c b/usr/src/uts/common/os/errorq.c
index 547076f0ad..0924da6eda 100644
--- a/usr/src/uts/common/os/errorq.c
+++ b/usr/src/uts/common/os/errorq.c
@@ -524,7 +524,7 @@ errorq_dispatch(errorq_t *eqp, const void *data, size_t len, uint_t flag)
errorq_elem_t *eep, *old;
if (eqp == NULL || !(eqp->eq_flags & ERRORQ_ACTIVE)) {
- atomic_add_64(&errorq_lost, 1);
+ atomic_inc_64(&errorq_lost);
return; /* drop error if queue is uninitialized or disabled */
}
@@ -533,7 +533,7 @@ errorq_dispatch(errorq_t *eqp, const void *data, size_t len, uint_t flag)
if ((i = errorq_availbit(eqp->eq_bitmap, eqp->eq_qlen,
eqp->eq_rotor)) == -1) {
- atomic_add_64(&eqp->eq_kstat.eqk_dropped.value.ui64, 1);
+ atomic_inc_64(&eqp->eq_kstat.eqk_dropped.value.ui64);
return;
}
BT_ATOMIC_SET_EXCL(eqp->eq_bitmap, i, rval);
@@ -559,7 +559,7 @@ errorq_dispatch(errorq_t *eqp, const void *data, size_t len, uint_t flag)
break;
}
- atomic_add_64(&eqp->eq_kstat.eqk_dispatched.value.ui64, 1);
+ atomic_inc_64(&eqp->eq_kstat.eqk_dispatched.value.ui64);
if (flag == ERRORQ_ASYNC && eqp->eq_id != NULL)
ddi_trigger_softintr(eqp->eq_id);
@@ -867,7 +867,7 @@ errorq_reserve(errorq_t *eqp)
errorq_elem_t *eqep;
if (eqp == NULL || !(eqp->eq_flags & ERRORQ_ACTIVE)) {
- atomic_add_64(&errorq_lost, 1);
+ atomic_inc_64(&errorq_lost);
return (NULL);
}
@@ -876,7 +876,7 @@ errorq_reserve(errorq_t *eqp)
if ((i = errorq_availbit(eqp->eq_bitmap, eqp->eq_qlen,
eqp->eq_rotor)) == -1) {
- atomic_add_64(&eqp->eq_kstat.eqk_dropped.value.ui64, 1);
+ atomic_inc_64(&eqp->eq_kstat.eqk_dropped.value.ui64);
return (NULL);
}
BT_ATOMIC_SET_EXCL(eqp->eq_bitmap, i, rval);
@@ -893,7 +893,7 @@ errorq_reserve(errorq_t *eqp)
eqnp->eqn_nvl = fm_nvlist_create(eqnp->eqn_nva);
}
- atomic_add_64(&eqp->eq_kstat.eqk_reserved.value.ui64, 1);
+ atomic_inc_64(&eqp->eq_kstat.eqk_reserved.value.ui64);
return (eqep);
}
@@ -908,7 +908,7 @@ errorq_commit(errorq_t *eqp, errorq_elem_t *eqep, uint_t flag)
errorq_elem_t *old;
if (eqep == NULL || !(eqp->eq_flags & ERRORQ_ACTIVE)) {
- atomic_add_64(&eqp->eq_kstat.eqk_commit_fail.value.ui64, 1);
+ atomic_inc_64(&eqp->eq_kstat.eqk_commit_fail.value.ui64);
return;
}
@@ -921,7 +921,7 @@ errorq_commit(errorq_t *eqp, errorq_elem_t *eqep, uint_t flag)
break;
}
- atomic_add_64(&eqp->eq_kstat.eqk_committed.value.ui64, 1);
+ atomic_inc_64(&eqp->eq_kstat.eqk_committed.value.ui64);
if (flag == ERRORQ_ASYNC && eqp->eq_id != NULL)
ddi_trigger_softintr(eqp->eq_id);
@@ -939,7 +939,7 @@ errorq_cancel(errorq_t *eqp, errorq_elem_t *eqep)
BT_ATOMIC_CLEAR(eqp->eq_bitmap, eqep - eqp->eq_elems);
- atomic_add_64(&eqp->eq_kstat.eqk_cancelled.value.ui64, 1);
+ atomic_inc_64(&eqp->eq_kstat.eqk_cancelled.value.ui64);
}
/*
diff --git a/usr/src/uts/common/os/evchannels.c b/usr/src/uts/common/os/evchannels.c
index e34af700ca..3495dcfcfa 100644
--- a/usr/src/uts/common/os/evchannels.c
+++ b/usr/src/uts/common/os/evchannels.c
@@ -379,7 +379,7 @@ evch_gevent_free(evch_gevent_t *evp)
{
int32_t refcnt;
- refcnt = (int32_t)atomic_add_32_nv(&evp->ge_refcount, -1);
+ refcnt = (int32_t)atomic_dec_32_nv(&evp->ge_refcount);
if (refcnt <= 0) {
if (evp->ge_destruct != NULL) {
evp->ge_destruct((void *)&(evp->ge_payload),
@@ -647,7 +647,7 @@ evch_evq_pub(evch_eventq_t *eqp, void *ev, int flags)
}
qep->q_objref = (void *)evp;
qep->q_objsize = size;
- atomic_add_32(&evp->ge_refcount, 1);
+ atomic_inc_32(&evp->ge_refcount);
mutex_enter(&eqp->eq_queuemx);
evch_q_in(&eqp->eq_eventq, qep);
diff --git a/usr/src/uts/common/os/exit.c b/usr/src/uts/common/os/exit.c
index b97a09454b..21360145bc 100644
--- a/usr/src/uts/common/os/exit.c
+++ b/usr/src/uts/common/os/exit.c
@@ -597,7 +597,7 @@ proc_exit(int why, int what)
*/
mutex_enter(&p->p_lock);
ASSERT(p->p_pool->pool_ref > 0);
- atomic_add_32(&p->p_pool->pool_ref, -1);
+ atomic_dec_32(&p->p_pool->pool_ref);
p->p_pool = pool_default;
/*
* Now that our address space has been freed and all other threads
diff --git a/usr/src/uts/common/os/fio.c b/usr/src/uts/common/os/fio.c
index 3b47e05ef2..6dc0d00011 100644
--- a/usr/src/uts/common/os/fio.c
+++ b/usr/src/uts/common/os/fio.c
@@ -67,7 +67,7 @@ static uint32_t afd_alloc; /* count of kmem_alloc()s */
static uint32_t afd_free; /* count of kmem_free()s */
static uint32_t afd_wait; /* count of waits on non-zero ref count */
#define MAXFD(x) (afd_maxfd = ((afd_maxfd >= (x))? afd_maxfd : (x)))
-#define COUNT(x) atomic_add_32(&x, 1)
+#define COUNT(x) atomic_inc_32(&x)
#else /* DEBUG */
diff --git a/usr/src/uts/common/os/fm.c b/usr/src/uts/common/os/fm.c
index fbe45e833e..66fe699366 100644
--- a/usr/src/uts/common/os/fm.c
+++ b/usr/src/uts/common/os/fm.c
@@ -517,19 +517,19 @@ fm_ereport_post(nvlist_t *ereport, int evc_flag)
(void) nvlist_size(ereport, &nvl_size, NV_ENCODE_NATIVE);
if (nvl_size > ERPT_DATA_SZ || nvl_size == 0) {
- atomic_add_64(&erpt_kstat_data.erpt_dropped.value.ui64, 1);
+ atomic_inc_64(&erpt_kstat_data.erpt_dropped.value.ui64);
return;
}
if (sysevent_evc_bind(FM_ERROR_CHAN, &error_chan,
EVCH_CREAT|EVCH_HOLD_PEND) != 0) {
- atomic_add_64(&erpt_kstat_data.erpt_dropped.value.ui64, 1);
+ atomic_inc_64(&erpt_kstat_data.erpt_dropped.value.ui64);
return;
}
if (sysevent_evc_publish(error_chan, EC_FM, ESC_FM_ERROR,
SUNW_VENDOR, FM_PUB, ereport, evc_flag) != 0) {
- atomic_add_64(&erpt_kstat_data.erpt_dropped.value.ui64, 1);
+ atomic_inc_64(&erpt_kstat_data.erpt_dropped.value.ui64);
(void) sysevent_evc_unbind(error_chan);
return;
}
@@ -791,8 +791,7 @@ fm_payload_set(nvlist_t *payload, ...)
va_end(ap);
if (ret)
- atomic_add_64(
- &erpt_kstat_data.payload_set_failed.value.ui64, 1);
+ atomic_inc_64(&erpt_kstat_data.payload_set_failed.value.ui64);
}
/*
@@ -825,24 +824,24 @@ fm_ereport_set(nvlist_t *ereport, int version, const char *erpt_class,
int ret;
if (version != FM_EREPORT_VERS0) {
- atomic_add_64(&erpt_kstat_data.erpt_set_failed.value.ui64, 1);
+ atomic_inc_64(&erpt_kstat_data.erpt_set_failed.value.ui64);
return;
}
(void) snprintf(ereport_class, FM_MAX_CLASS, "%s.%s",
FM_EREPORT_CLASS, erpt_class);
if (nvlist_add_string(ereport, FM_CLASS, ereport_class) != 0) {
- atomic_add_64(&erpt_kstat_data.erpt_set_failed.value.ui64, 1);
+ atomic_inc_64(&erpt_kstat_data.erpt_set_failed.value.ui64);
return;
}
if (nvlist_add_uint64(ereport, FM_EREPORT_ENA, ena)) {
- atomic_add_64(&erpt_kstat_data.erpt_set_failed.value.ui64, 1);
+ atomic_inc_64(&erpt_kstat_data.erpt_set_failed.value.ui64);
}
if (nvlist_add_nvlist(ereport, FM_EREPORT_DETECTOR,
(nvlist_t *)detector) != 0) {
- atomic_add_64(&erpt_kstat_data.erpt_set_failed.value.ui64, 1);
+ atomic_inc_64(&erpt_kstat_data.erpt_set_failed.value.ui64);
}
va_start(ap, detector);
@@ -851,7 +850,7 @@ fm_ereport_set(nvlist_t *ereport, int version, const char *erpt_class,
va_end(ap);
if (ret)
- atomic_add_64(&erpt_kstat_data.erpt_set_failed.value.ui64, 1);
+ atomic_inc_64(&erpt_kstat_data.erpt_set_failed.value.ui64);
}
/*
@@ -874,19 +873,19 @@ static int
fm_fmri_hc_set_common(nvlist_t *fmri, int version, const nvlist_t *auth)
{
if (version != FM_HC_SCHEME_VERSION) {
- atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
+ atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
return (0);
}
if (nvlist_add_uint8(fmri, FM_VERSION, version) != 0 ||
nvlist_add_string(fmri, FM_FMRI_SCHEME, FM_FMRI_SCHEME_HC) != 0) {
- atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
+ atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
return (0);
}
if (auth != NULL && nvlist_add_nvlist(fmri, FM_FMRI_AUTHORITY,
(nvlist_t *)auth) != 0) {
- atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
+ atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
return (0);
}
@@ -918,22 +917,22 @@ fm_fmri_hc_set(nvlist_t *fmri, int version, const nvlist_t *auth,
pairs[i] = fm_nvlist_create(nva);
if (nvlist_add_string(pairs[i], FM_FMRI_HC_NAME, name) != 0 ||
nvlist_add_string(pairs[i], FM_FMRI_HC_ID, idstr) != 0) {
- atomic_add_64(
- &erpt_kstat_data.fmri_set_failed.value.ui64, 1);
+ atomic_inc_64(
+ &erpt_kstat_data.fmri_set_failed.value.ui64);
}
}
va_end(ap);
if (nvlist_add_nvlist_array(fmri, FM_FMRI_HC_LIST, pairs, npairs) != 0)
- atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
+ atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
for (i = 0; i < npairs; i++)
fm_nvlist_destroy(pairs[i], FM_NVA_RETAIN);
if (snvl != NULL) {
if (nvlist_add_nvlist(fmri, FM_FMRI_HC_SPECIFIC, snvl) != 0) {
- atomic_add_64(
- &erpt_kstat_data.fmri_set_failed.value.ui64, 1);
+ atomic_inc_64(
+ &erpt_kstat_data.fmri_set_failed.value.ui64);
}
}
}
@@ -958,7 +957,7 @@ fm_fmri_dev_set(nvlist_t *fmri_dev, int version, const nvlist_t *auth,
int err = 0;
if (version != DEV_SCHEME_VERSION0) {
- atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
+ atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
return;
}
@@ -979,7 +978,7 @@ fm_fmri_dev_set(nvlist_t *fmri_dev, int version, const nvlist_t *auth,
err |= nvlist_add_string(fmri_dev, FM_FMRI_DEV_TGTPTLUN0, tpl0);
if (err)
- atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
+ atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
}
@@ -1004,35 +1003,35 @@ fm_fmri_cpu_set(nvlist_t *fmri_cpu, int version, const nvlist_t *auth,
uint64_t *failedp = &erpt_kstat_data.fmri_set_failed.value.ui64;
if (version < CPU_SCHEME_VERSION1) {
- atomic_add_64(failedp, 1);
+ atomic_inc_64(failedp);
return;
}
if (nvlist_add_uint8(fmri_cpu, FM_VERSION, version) != 0) {
- atomic_add_64(failedp, 1);
+ atomic_inc_64(failedp);
return;
}
if (nvlist_add_string(fmri_cpu, FM_FMRI_SCHEME,
FM_FMRI_SCHEME_CPU) != 0) {
- atomic_add_64(failedp, 1);
+ atomic_inc_64(failedp);
return;
}
if (auth != NULL && nvlist_add_nvlist(fmri_cpu, FM_FMRI_AUTHORITY,
(nvlist_t *)auth) != 0)
- atomic_add_64(failedp, 1);
+ atomic_inc_64(failedp);
if (nvlist_add_uint32(fmri_cpu, FM_FMRI_CPU_ID, cpu_id) != 0)
- atomic_add_64(failedp, 1);
+ atomic_inc_64(failedp);
if (cpu_maskp != NULL && nvlist_add_uint8(fmri_cpu, FM_FMRI_CPU_MASK,
*cpu_maskp) != 0)
- atomic_add_64(failedp, 1);
+ atomic_inc_64(failedp);
if (serial_idp == NULL || nvlist_add_string(fmri_cpu,
FM_FMRI_CPU_SERIAL_ID, (char *)serial_idp) != 0)
- atomic_add_64(failedp, 1);
+ atomic_inc_64(failedp);
}
/*
@@ -1053,49 +1052,47 @@ fm_fmri_mem_set(nvlist_t *fmri, int version, const nvlist_t *auth,
const char *unum, const char *serial, uint64_t offset)
{
if (version != MEM_SCHEME_VERSION0) {
- atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
+ atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
return;
}
if (!serial && (offset != (uint64_t)-1)) {
- atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
+ atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
return;
}
if (nvlist_add_uint8(fmri, FM_VERSION, version) != 0) {
- atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
+ atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
return;
}
if (nvlist_add_string(fmri, FM_FMRI_SCHEME, FM_FMRI_SCHEME_MEM) != 0) {
- atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
+ atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
return;
}
if (auth != NULL) {
if (nvlist_add_nvlist(fmri, FM_FMRI_AUTHORITY,
(nvlist_t *)auth) != 0) {
- atomic_add_64(
- &erpt_kstat_data.fmri_set_failed.value.ui64, 1);
+ atomic_inc_64(
+ &erpt_kstat_data.fmri_set_failed.value.ui64);
}
}
if (nvlist_add_string(fmri, FM_FMRI_MEM_UNUM, unum) != 0) {
- atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
+ atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
}
if (serial != NULL) {
if (nvlist_add_string_array(fmri, FM_FMRI_MEM_SERIAL_ID,
(char **)&serial, 1) != 0) {
- atomic_add_64(
- &erpt_kstat_data.fmri_set_failed.value.ui64, 1);
+ atomic_inc_64(
+ &erpt_kstat_data.fmri_set_failed.value.ui64);
}
- if (offset != (uint64_t)-1) {
- if (nvlist_add_uint64(fmri, FM_FMRI_MEM_OFFSET,
- offset) != 0) {
- atomic_add_64(&erpt_kstat_data.
- fmri_set_failed.value.ui64, 1);
- }
+ if (offset != (uint64_t)-1 && nvlist_add_uint64(fmri,
+ FM_FMRI_MEM_OFFSET, offset) != 0) {
+ atomic_inc_64(
+ &erpt_kstat_data.fmri_set_failed.value.ui64);
}
}
}
@@ -1105,28 +1102,28 @@ fm_fmri_zfs_set(nvlist_t *fmri, int version, uint64_t pool_guid,
uint64_t vdev_guid)
{
if (version != ZFS_SCHEME_VERSION0) {
- atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
+ atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
return;
}
if (nvlist_add_uint8(fmri, FM_VERSION, version) != 0) {
- atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
+ atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
return;
}
if (nvlist_add_string(fmri, FM_FMRI_SCHEME, FM_FMRI_SCHEME_ZFS) != 0) {
- atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
+ atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
return;
}
if (nvlist_add_uint64(fmri, FM_FMRI_ZFS_POOL, pool_guid) != 0) {
- atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
+ atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
}
if (vdev_guid != 0) {
if (nvlist_add_uint64(fmri, FM_FMRI_ZFS_VDEV, vdev_guid) != 0) {
- atomic_add_64(
- &erpt_kstat_data.fmri_set_failed.value.ui64, 1);
+ atomic_inc_64(
+ &erpt_kstat_data.fmri_set_failed.value.ui64);
}
}
}
@@ -1306,20 +1303,20 @@ fm_fmri_hc_create(nvlist_t *fmri, int version, const nvlist_t *auth,
*/
if (nvlist_lookup_nvlist_array(bboard, FM_FMRI_HC_LIST, &hcl, &n)
!= 0) {
- atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
+ atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
return;
}
for (i = 0; i < n; i++) {
if (nvlist_lookup_string(hcl[i], FM_FMRI_HC_NAME,
&hcname) != 0) {
- atomic_add_64(
- &erpt_kstat_data.fmri_set_failed.value.ui64, 1);
+ atomic_inc_64(
+ &erpt_kstat_data.fmri_set_failed.value.ui64);
return;
}
if (nvlist_lookup_string(hcl[i], FM_FMRI_HC_ID, &hcid) != 0) {
- atomic_add_64(
- &erpt_kstat_data.fmri_set_failed.value.ui64, 1);
+ atomic_inc_64(
+ &erpt_kstat_data.fmri_set_failed.value.ui64);
return;
}
@@ -1331,8 +1328,8 @@ fm_fmri_hc_create(nvlist_t *fmri, int version, const nvlist_t *auth,
fm_nvlist_destroy(pairs[j],
FM_NVA_RETAIN);
}
- atomic_add_64(
- &erpt_kstat_data.fmri_set_failed.value.ui64, 1);
+ atomic_inc_64(
+ &erpt_kstat_data.fmri_set_failed.value.ui64);
return;
}
}
@@ -1356,8 +1353,8 @@ fm_fmri_hc_create(nvlist_t *fmri, int version, const nvlist_t *auth,
fm_nvlist_destroy(pairs[j],
FM_NVA_RETAIN);
}
- atomic_add_64(
- &erpt_kstat_data.fmri_set_failed.value.ui64, 1);
+ atomic_inc_64(
+ &erpt_kstat_data.fmri_set_failed.value.ui64);
return;
}
}
@@ -1368,7 +1365,7 @@ fm_fmri_hc_create(nvlist_t *fmri, int version, const nvlist_t *auth,
*/
if (nvlist_add_nvlist_array(fmri, FM_FMRI_HC_LIST, pairs,
npairs + n) != 0) {
- atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
+ atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
return;
}
@@ -1378,8 +1375,8 @@ fm_fmri_hc_create(nvlist_t *fmri, int version, const nvlist_t *auth,
if (snvl != NULL) {
if (nvlist_add_nvlist(fmri, FM_FMRI_HC_SPECIFIC, snvl) != 0) {
- atomic_add_64(
- &erpt_kstat_data.fmri_set_failed.value.ui64, 1);
+ atomic_inc_64(
+ &erpt_kstat_data.fmri_set_failed.value.ui64);
return;
}
}
diff --git a/usr/src/uts/common/os/fork.c b/usr/src/uts/common/os/fork.c
index 2e9b2ef403..5d8c144f99 100644
--- a/usr/src/uts/common/os/fork.c
+++ b/usr/src/uts/common/os/fork.c
@@ -274,7 +274,7 @@ cfork(int isvfork, int isfork1, int flags)
tk = cp->p_task;
task_detach(cp);
ASSERT(cp->p_pool->pool_ref > 0);
- atomic_add_32(&cp->p_pool->pool_ref, -1);
+ atomic_dec_32(&cp->p_pool->pool_ref);
mutex_exit(&cp->p_lock);
pid_exit(cp, tk);
mutex_exit(&pidlock);
@@ -628,7 +628,7 @@ forklwperr:
tk = cp->p_task;
task_detach(cp);
ASSERT(cp->p_pool->pool_ref > 0);
- atomic_add_32(&cp->p_pool->pool_ref, -1);
+ atomic_dec_32(&cp->p_pool->pool_ref);
mutex_exit(&cp->p_lock);
orphpp = &p->p_orphan;
@@ -1128,7 +1128,7 @@ getproc(proc_t **cpp, pid_t pid, uint_t flags)
} else {
cp->p_pool = pp->p_pool;
}
- atomic_add_32(&cp->p_pool->pool_ref, 1);
+ atomic_inc_32(&cp->p_pool->pool_ref);
mutex_exit(&pp->p_lock);
/*
diff --git a/usr/src/uts/common/os/kcpc.c b/usr/src/uts/common/os/kcpc.c
index c9707c5b48..af44c16356 100644
--- a/usr/src/uts/common/os/kcpc.c
+++ b/usr/src/uts/common/os/kcpc.c
@@ -938,7 +938,7 @@ kcpc_overflow_intr(caddr_t arg, uint64_t bitmap)
if (t->t_flag & T_INTR_THREAD) {
klwp_t *lwp;
- atomic_add_32(&kcpc_intrctx_count, 1);
+ atomic_inc_32(&kcpc_intrctx_count);
/*
* Note that t_lwp is always set to point at the underlying
@@ -1008,7 +1008,7 @@ kcpc_overflow_intr(caddr_t arg, uint64_t bitmap)
cmn_err(CE_NOTE,
"null cpc context found in overflow handler!\n");
#endif
- atomic_add_32(&kcpc_nullctx_count, 1);
+ atomic_inc_32(&kcpc_nullctx_count);
} else if ((ctx->kc_flags & KCPC_CTX_INVALID) == 0) {
/*
* Schedule an ast to sample the counters, which will
diff --git a/usr/src/uts/common/os/klpd.c b/usr/src/uts/common/os/klpd.c
index 2d7cd7e1c5..a3cd79c41d 100644
--- a/usr/src/uts/common/os/klpd.c
+++ b/usr/src/uts/common/os/klpd.c
@@ -87,7 +87,7 @@ extern size_t max_vnode_path;
void
klpd_rele(klpd_reg_t *p)
{
- if (atomic_add_32_nv(&p->klpd_ref, -1) == 0) {
+ if (atomic_dec_32_nv(&p->klpd_ref) == 0) {
if (p->klpd_refp != NULL)
klpd_unlink(p);
if (p->klpd_cred != NULL)
@@ -116,7 +116,7 @@ klpd_rele_next(klpd_reg_t *p)
static void
klpd_hold(klpd_reg_t *p)
{
- atomic_add_32(&p->klpd_ref, 1);
+ atomic_inc_32(&p->klpd_ref);
}
/*
@@ -350,7 +350,7 @@ klpd_call(const cred_t *cr, const priv_set_t *req, va_list ap)
*/
if (mutex_owned(&pidlock) || mutex_owned(&curproc->p_lock) ||
mutex_owned(&curproc->p_crlock)) {
- atomic_add_32(&klpd_bad_locks, 1);
+ atomic_inc_32(&klpd_bad_locks);
return (-1);
}
@@ -674,13 +674,13 @@ out:
void
crklpd_hold(credklpd_t *crkpd)
{
- atomic_add_32(&crkpd->crkl_ref, 1);
+ atomic_inc_32(&crkpd->crkl_ref);
}
void
crklpd_rele(credklpd_t *crkpd)
{
- if (atomic_add_32_nv(&crkpd->crkl_ref, -1) == 0) {
+ if (atomic_dec_32_nv(&crkpd->crkl_ref) == 0) {
if (crkpd->crkl_reg != NULL)
klpd_rele(crkpd->crkl_reg);
mutex_destroy(&crkpd->crkl_lock);
diff --git a/usr/src/uts/common/os/kmem.c b/usr/src/uts/common/os/kmem.c
index 88382dfce4..ab278ded82 100644
--- a/usr/src/uts/common/os/kmem.c
+++ b/usr/src/uts/common/os/kmem.c
@@ -1626,7 +1626,7 @@ slab_alloc_failure:
vmem_alloc_failure:
kmem_log_event(kmem_failure_log, cp, NULL, NULL);
- atomic_add_64(&cp->cache_alloc_fail, 1);
+ atomic_inc_64(&cp->cache_alloc_fail);
return (NULL);
}
@@ -1995,7 +1995,7 @@ kmem_cache_alloc_debug(kmem_cache_t *cp, void *buf, int kmflag, int construct,
if (mtbf || (construct && cp->cache_constructor != NULL &&
cp->cache_constructor(buf, cp->cache_private, kmflag) != 0)) {
- atomic_add_64(&cp->cache_alloc_fail, 1);
+ atomic_inc_64(&cp->cache_alloc_fail);
btp->bt_bxstat = (intptr_t)bcp ^ KMEM_BUFTAG_FREE;
if (cp->cache_flags & KMF_DEADBEEF)
copy_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify);
@@ -2603,7 +2603,7 @@ kmem_cache_alloc(kmem_cache_t *cp, int kmflag)
if (cp->cache_constructor != NULL &&
cp->cache_constructor(buf, cp->cache_private, kmflag) != 0) {
- atomic_add_64(&cp->cache_alloc_fail, 1);
+ atomic_inc_64(&cp->cache_alloc_fail);
kmem_slab_free(cp, buf);
return (NULL);
}
@@ -4877,7 +4877,7 @@ kmem_move_buffer(kmem_move_t *callback)
} else if (cp->cache_constructor != NULL &&
cp->cache_constructor(callback->kmm_to_buf, cp->cache_private,
KM_NOSLEEP) != 0) {
- atomic_add_64(&cp->cache_alloc_fail, 1);
+ atomic_inc_64(&cp->cache_alloc_fail);
KMEM_STAT_ADD(kmem_move_stats.kms_constructor_fail);
kmem_slab_free(cp, callback->kmm_to_buf);
kmem_move_end(cp, callback);
diff --git a/usr/src/uts/common/os/lgrp.c b/usr/src/uts/common/os/lgrp.c
index 3f344e2cfd..b75f438f59 100644
--- a/usr/src/uts/common/os/lgrp.c
+++ b/usr/src/uts/common/os/lgrp.c
@@ -584,12 +584,12 @@ lgrp_config(lgrp_config_flag_t event, uintptr_t resource, uintptr_t where)
cp->cpu_lpl = lpl_bootstrap;
lgrp_plat_config(event, resource);
- atomic_add_32(&lgrp_gen, 1);
+ atomic_inc_32(&lgrp_gen);
break;
case LGRP_CONFIG_CPU_DEL:
lgrp_plat_config(event, resource);
- atomic_add_32(&lgrp_gen, 1);
+ atomic_inc_32(&lgrp_gen);
break;
case LGRP_CONFIG_CPU_ONLINE:
@@ -601,7 +601,7 @@ lgrp_config(lgrp_config_flag_t event, uintptr_t resource, uintptr_t where)
panic("lpl_topo_verify failed: %d", rc);
}
lgrp_plat_config(event, resource);
- atomic_add_32(&lgrp_gen, 1);
+ atomic_inc_32(&lgrp_gen);
break;
case LGRP_CONFIG_CPU_OFFLINE:
@@ -614,7 +614,7 @@ lgrp_config(lgrp_config_flag_t event, uintptr_t resource, uintptr_t where)
panic("lpl_topo_verify failed: %d", rc);
}
lgrp_plat_config(event, resource);
- atomic_add_32(&lgrp_gen, 1);
+ atomic_inc_32(&lgrp_gen);
break;
case LGRP_CONFIG_CPUPART_ADD:
@@ -643,12 +643,12 @@ lgrp_config(lgrp_config_flag_t event, uintptr_t resource, uintptr_t where)
*/
case LGRP_CONFIG_MEM_ADD:
lgrp_mem_init((int)resource, where, B_FALSE);
- atomic_add_32(&lgrp_gen, 1);
+ atomic_inc_32(&lgrp_gen);
break;
case LGRP_CONFIG_MEM_DEL:
lgrp_mem_fini((int)resource, where, B_FALSE);
- atomic_add_32(&lgrp_gen, 1);
+ atomic_inc_32(&lgrp_gen);
break;
case LGRP_CONFIG_MEM_RENAME: {
@@ -658,12 +658,12 @@ lgrp_config(lgrp_config_flag_t event, uintptr_t resource, uintptr_t where)
lgrp_mem_rename((int)resource,
ren_arg->lmem_rename_from,
ren_arg->lmem_rename_to);
- atomic_add_32(&lgrp_gen, 1);
+ atomic_inc_32(&lgrp_gen);
break;
}
case LGRP_CONFIG_GEN_UPDATE:
- atomic_add_32(&lgrp_gen, 1);
+ atomic_inc_32(&lgrp_gen);
break;
case LGRP_CONFIG_FLATTEN:
diff --git a/usr/src/uts/common/os/mmapobj.c b/usr/src/uts/common/os/mmapobj.c
index 1c6bbb80a2..b33ef6922b 100644
--- a/usr/src/uts/common/os/mmapobj.c
+++ b/usr/src/uts/common/os/mmapobj.c
@@ -309,9 +309,9 @@ lib_va_free(struct lib_va *lvp)
vmem_xfree(is_64bit ? lib_va_64_arena : lib_va_32_arena,
lvp->lv_base_va, lvp->lv_len);
if (is_64bit) {
- atomic_add_32(&libs_mapped_64, -1);
+ atomic_dec_32(&libs_mapped_64);
} else {
- atomic_add_32(&libs_mapped_32, -1);
+ atomic_dec_32(&libs_mapped_32);
}
}
kmem_free(lvp, sizeof (struct lib_va));
@@ -472,10 +472,10 @@ lib_va_add_hash(caddr_t base_va, ssize_t len, size_t align, vattr_t *vap)
if (base_va != NULL) {
if (model == DATAMODEL_LP64) {
- atomic_add_32(&libs_mapped_64, 1);
+ atomic_inc_32(&libs_mapped_64);
} else {
ASSERT(model == DATAMODEL_ILP32);
- atomic_add_32(&libs_mapped_32, 1);
+ atomic_inc_32(&libs_mapped_32);
}
}
ASSERT(*tmp == NULL);
diff --git a/usr/src/uts/common/os/pool.c b/usr/src/uts/common/os/pool.c
index 86a119fdea..c79c7dad51 100644
--- a/usr/src/uts/common/os/pool.c
+++ b/usr/src/uts/common/os/pool.c
@@ -1687,9 +1687,9 @@ skip:
*/
if (p->p_pool != pool) {
ASSERT(p->p_pool->pool_ref > 0);
- atomic_add_32(&p->p_pool->pool_ref, -1);
+ atomic_dec_32(&p->p_pool->pool_ref);
p->p_pool = pool;
- atomic_add_32(&p->p_pool->pool_ref, 1);
+ atomic_inc_32(&p->p_pool->pool_ref);
}
/*
* Okay, we've tortured this guy enough.
diff --git a/usr/src/uts/common/os/refstr.c b/usr/src/uts/common/os/refstr.c
index 3d86ecd9e2..67442d8384 100644
--- a/usr/src/uts/common/os/refstr.c
+++ b/usr/src/uts/common/os/refstr.c
@@ -24,8 +24,6 @@
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include <sys/systm.h>
#include <sys/param.h>
#include <sys/atomic.h>
@@ -57,12 +55,12 @@ refstr_value(refstr_t *rsp)
void
refstr_hold(refstr_t *rsp)
{
- atomic_add_32(&rsp->rs_refcnt, 1);
+ atomic_inc_32(&rsp->rs_refcnt);
}
void
refstr_rele(refstr_t *rsp)
{
- if (atomic_add_32_nv(&rsp->rs_refcnt, -1) == 0)
+ if (atomic_dec_32_nv(&rsp->rs_refcnt) == 0)
kmem_free(rsp, (size_t)rsp->rs_size);
}
diff --git a/usr/src/uts/common/os/sid.c b/usr/src/uts/common/os/sid.c
index 019bcd09e2..e2aa18dc20 100644
--- a/usr/src/uts/common/os/sid.c
+++ b/usr/src/uts/common/os/sid.c
@@ -79,13 +79,13 @@ ksid_rele(ksid_t *ks)
void
ksiddomain_hold(ksiddomain_t *kd)
{
- atomic_add_32(&kd->kd_ref, 1);
+ atomic_inc_32(&kd->kd_ref);
}
void
ksiddomain_rele(ksiddomain_t *kd)
{
- if (atomic_add_32_nv(&kd->kd_ref, -1) == 0) {
+ if (atomic_dec_32_nv(&kd->kd_ref) == 0) {
/*
* The kd reference can only be incremented from 0 when
* the sid_lock is held; so we lock and then check need to
@@ -104,13 +104,13 @@ ksiddomain_rele(ksiddomain_t *kd)
void
ksidlist_hold(ksidlist_t *ksl)
{
- atomic_add_32(&ksl->ksl_ref, 1);
+ atomic_inc_32(&ksl->ksl_ref);
}
void
ksidlist_rele(ksidlist_t *ksl)
{
- if (atomic_add_32_nv(&ksl->ksl_ref, -1) == 0) {
+ if (atomic_dec_32_nv(&ksl->ksl_ref) == 0) {
int i;
for (i = 0; i < ksl->ksl_nsid; i++)
@@ -261,13 +261,13 @@ kcrsid_dup(credsid_t *org)
void
kcrsid_hold(credsid_t *kcr)
{
- atomic_add_32(&kcr->kr_ref, 1);
+ atomic_inc_32(&kcr->kr_ref);
}
void
kcrsid_rele(credsid_t *kcr)
{
- if (atomic_add_32_nv(&kcr->kr_ref, -1) == 0) {
+ if (atomic_dec_32_nv(&kcr->kr_ref) == 0) {
ksid_index_t i;
for (i = 0; i < KSID_COUNT; i++)
diff --git a/usr/src/uts/common/os/strsubr.c b/usr/src/uts/common/os/strsubr.c
index c8068ca965..1c39b5be88 100644
--- a/usr/src/uts/common/os/strsubr.c
+++ b/usr/src/uts/common/os/strsubr.c
@@ -2653,7 +2653,7 @@ hold_dm(struct streamtab *str, uint32_t qflag, uint32_t sqtype)
rw_enter(&perdm_rwlock, RW_READER);
for (p = perdm_list; p != NULL; p = p->dm_next) {
if (p->dm_str == str) { /* found one */
- atomic_add_32(&(p->dm_ref), 1);
+ atomic_inc_32(&(p->dm_ref));
rw_exit(&perdm_rwlock);
return (p);
}
diff --git a/usr/src/uts/common/os/sunddi.c b/usr/src/uts/common/os/sunddi.c
index b60db8dcea..151bf4b5b0 100644
--- a/usr/src/uts/common/os/sunddi.c
+++ b/usr/src/uts/common/os/sunddi.c
@@ -8092,7 +8092,7 @@ umem_lock_undo(struct as *as, void *arg, uint_t event)
(*cp->callbacks.cbo_umem_lock_cleanup)((ddi_umem_cookie_t)cp);
/* remove the cookie if reference goes to zero */
- if (atomic_add_long_nv((ulong_t *)(&(cp->cook_refcnt)), -1) == 0) {
+ if (atomic_dec_ulong_nv((ulong_t *)(&(cp->cook_refcnt))) == 0) {
kmem_free(cp, sizeof (struct ddi_umem_cookie));
}
}
@@ -8487,7 +8487,7 @@ i_ddi_umem_unlock(struct ddi_umem_cookie *p)
* case, just return the cookie memory.
*/
if ((rc != AS_CALLBACK_DELETE_DEFERRED) ||
- (atomic_add_long_nv((ulong_t *)(&(p->cook_refcnt)), -1)
+ (atomic_dec_ulong_nv((ulong_t *)(&(p->cook_refcnt)))
== 0)) {
kmem_free(p, sizeof (struct ddi_umem_cookie));
}
diff --git a/usr/src/uts/common/os/task.c b/usr/src/uts/common/os/task.c
index af578d9ce0..b25825edd1 100644
--- a/usr/src/uts/common/os/task.c
+++ b/usr/src/uts/common/os/task.c
@@ -373,7 +373,7 @@ task_hold_by_id_zone(taskid_t id, zoneid_t zoneid)
mutex_enter(&task_hash_lock);
if ((tk = task_find(id, zoneid)) != NULL)
- atomic_add_32(&tk->tk_hold_count, 1);
+ atomic_inc_32(&tk->tk_hold_count);
mutex_exit(&task_hash_lock);
return (tk);
@@ -406,7 +406,7 @@ task_hold_by_id(taskid_t id)
void
task_hold(task_t *tk)
{
- atomic_add_32(&tk->tk_hold_count, 1);
+ atomic_inc_32(&tk->tk_hold_count);
}
/*
diff --git a/usr/src/uts/common/os/tlabel.c b/usr/src/uts/common/os/tlabel.c
index 05a99bf3a6..29e24876a1 100644
--- a/usr/src/uts/common/os/tlabel.c
+++ b/usr/src/uts/common/os/tlabel.c
@@ -121,7 +121,7 @@ labeldup(const ts_label_t *val, int flag)
void
label_hold(ts_label_t *lab)
{
- atomic_add_32(&lab->tsl_ref, 1);
+ atomic_inc_32(&lab->tsl_ref);
}
/*
@@ -130,7 +130,7 @@ label_hold(ts_label_t *lab)
void
label_rele(ts_label_t *lab)
{
- if (atomic_add_32_nv(&lab->tsl_ref, -1) == 0)
+ if (atomic_dec_32_nv(&lab->tsl_ref) == 0)
kmem_cache_free(tslabel_cache, lab);
}
diff --git a/usr/src/uts/common/os/vmem.c b/usr/src/uts/common/os/vmem.c
index 6946a35a38..9f950c012f 100644
--- a/usr/src/uts/common/os/vmem.c
+++ b/usr/src/uts/common/os/vmem.c
@@ -1453,7 +1453,7 @@ vmem_create_common(const char *name, void *base, size_t size, size_t quantum,
vmem_t *vmp, *cur, **vmpp;
vmem_seg_t *vsp;
vmem_freelist_t *vfp;
- uint32_t id = atomic_add_32_nv(&vmem_id, 1);
+ uint32_t id = atomic_inc_32_nv(&vmem_id);
if (vmem_vmem_arena != NULL) {
vmp = vmem_alloc(vmem_vmem_arena, sizeof (vmem_t),
@@ -1555,7 +1555,7 @@ vmem_create_common(const char *name, void *base, size_t size, size_t quantum,
if (vmp->vm_cflags & VMC_POPULATOR) {
ASSERT(vmem_populators < VMEM_INITIAL);
- vmem_populator[atomic_add_32_nv(&vmem_populators, 1) - 1] = vmp;
+ vmem_populator[atomic_inc_32_nv(&vmem_populators) - 1] = vmp;
mutex_enter(&vmp->vm_lock);
(void) vmem_populate(vmp, vmflag | VM_PANIC);
mutex_exit(&vmp->vm_lock);
diff --git a/usr/src/uts/common/rpc/clnt_clts.c b/usr/src/uts/common/rpc/clnt_clts.c
index 0847fa7b46..2c4bf03bf4 100644
--- a/usr/src/uts/common/rpc/clnt_clts.c
+++ b/usr/src/uts/common/rpc/clnt_clts.c
@@ -243,7 +243,7 @@ static uint_t clts_rcstat_ndata =
sizeof (clts_rcstat_tmpl) / sizeof (kstat_named_t);
#define RCSTAT_INCR(s, x) \
- atomic_add_64(&(s)->x.value.ui64, 1)
+ atomic_inc_64(&(s)->x.value.ui64)
#define ptoh(p) (&((p)->cku_client))
#define htop(h) ((struct cku_private *)((h)->cl_private))
diff --git a/usr/src/uts/common/rpc/clnt_cots.c b/usr/src/uts/common/rpc/clnt_cots.c
index ab4a9028a8..f3b2c1946c 100644
--- a/usr/src/uts/common/rpc/clnt_cots.c
+++ b/usr/src/uts/common/rpc/clnt_cots.c
@@ -465,7 +465,7 @@ static const struct rpc_cots_client {
};
#define COTSRCSTAT_INCR(p, x) \
- atomic_add_64(&(p)->x.value.ui64, 1)
+ atomic_inc_64(&(p)->x.value.ui64)
#define CLNT_MAX_CONNS 1 /* concurrent connections between clnt/srvr */
int clnt_max_conns = CLNT_MAX_CONNS;
@@ -2795,7 +2795,7 @@ connmgr_connect(
* We need to increment rpc_kstat_instance atomically to prevent
* two kstats being created with the same instance.
*/
- kstat_instance = atomic_add_32_nv((uint32_t *)&rpc_kstat_instance, 1);
+ kstat_instance = atomic_inc_32_nv((uint32_t *)&rpc_kstat_instance);
if ((cm_entry->x_ksp = kstat_create_zone("unix", kstat_instance,
"rpc_cots_connections", "rpc", KSTAT_TYPE_NAMED,
diff --git a/usr/src/uts/common/rpc/svc_clts.c b/usr/src/uts/common/rpc/svc_clts.c
index 8cf8faadbb..acbaa7f3f6 100644
--- a/usr/src/uts/common/rpc/svc_clts.c
+++ b/usr/src/uts/common/rpc/svc_clts.c
@@ -141,7 +141,7 @@ static uint_t clts_rsstat_ndata =
(struct rpc_clts_server *)(clone_xprt)->xp_master->xp_p2
#define RSSTAT_INCR(stats, x) \
- atomic_add_64(&(stats)->x.value.ui64, 1)
+ atomic_inc_64(&(stats)->x.value.ui64)
/*
* Create a transport record.
diff --git a/usr/src/uts/common/rpc/svc_cots.c b/usr/src/uts/common/rpc/svc_cots.c
index f2599b8f76..aa2740725f 100644
--- a/usr/src/uts/common/rpc/svc_cots.c
+++ b/usr/src/uts/common/rpc/svc_cots.c
@@ -149,7 +149,7 @@ static const struct rpc_cots_server {
#define CLONE2STATS(clone_xprt) \
((struct cots_master_data *)(clone_xprt)->xp_master->xp_p2)->cmd_stats
#define RSSTAT_INCR(s, x) \
- atomic_add_64(&(s)->x.value.ui64, 1)
+ atomic_inc_64(&(s)->x.value.ui64)
/*
* Pointer to a transport specific `ready to receive' function in rpcmod
diff --git a/usr/src/uts/common/rpc/svc_rdma.c b/usr/src/uts/common/rpc/svc_rdma.c
index 767aae9c12..905b479112 100644
--- a/usr/src/uts/common/rpc/svc_rdma.c
+++ b/usr/src/uts/common/rpc/svc_rdma.c
@@ -182,7 +182,7 @@ struct {
kstat_named_t *rdmarsstat_ptr = (kstat_named_t *)&rdmarsstat;
uint_t rdmarsstat_ndata = sizeof (rdmarsstat) / sizeof (kstat_named_t);
-#define RSSTAT_INCR(x) atomic_add_64(&rdmarsstat.x.value.ui64, 1)
+#define RSSTAT_INCR(x) atomic_inc_64(&rdmarsstat.x.value.ui64)
/*
* Create a transport record.
* The transport record, output buffer, and private data structure
diff --git a/usr/src/uts/common/sys/aggr_impl.h b/usr/src/uts/common/sys/aggr_impl.h
index 8363d231cf..5b3bba08c2 100644
--- a/usr/src/uts/common/sys/aggr_impl.h
+++ b/usr/src/uts/common/sys/aggr_impl.h
@@ -234,26 +234,26 @@ typedef struct aggr_grp_s {
} aggr_grp_t;
#define AGGR_GRP_REFHOLD(grp) { \
- atomic_add_32(&(grp)->lg_refs, 1); \
+ atomic_inc_32(&(grp)->lg_refs); \
ASSERT((grp)->lg_refs != 0); \
}
#define AGGR_GRP_REFRELE(grp) { \
ASSERT((grp)->lg_refs != 0); \
membar_exit(); \
- if (atomic_add_32_nv(&(grp)->lg_refs, -1) == 0) \
+ if (atomic_dec_32_nv(&(grp)->lg_refs) == 0) \
aggr_grp_free(grp); \
}
#define AGGR_PORT_REFHOLD(port) { \
- atomic_add_32(&(port)->lp_refs, 1); \
+ atomic_inc_32(&(port)->lp_refs); \
ASSERT((port)->lp_refs != 0); \
}
#define AGGR_PORT_REFRELE(port) { \
ASSERT((port)->lp_refs != 0); \
membar_exit(); \
- if (atomic_add_32_nv(&(port)->lp_refs, -1) == 0) \
+ if (atomic_dec_32_nv(&(port)->lp_refs) == 0) \
aggr_port_free(port); \
}
diff --git a/usr/src/uts/common/sys/crypto/impl.h b/usr/src/uts/common/sys/crypto/impl.h
index ab401ed943..aaeae3409c 100644
--- a/usr/src/uts/common/sys/crypto/impl.h
+++ b/usr/src/uts/common/sys/crypto/impl.h
@@ -396,7 +396,7 @@ typedef struct kcf_policy_desc {
* by the policy table has a reference count of one.
*/
#define KCF_POLICY_REFHOLD(desc) { \
- atomic_add_32(&(desc)->pd_refcnt, 1); \
+ atomic_inc_32(&(desc)->pd_refcnt); \
ASSERT((desc)->pd_refcnt != 0); \
}
@@ -407,7 +407,7 @@ typedef struct kcf_policy_desc {
#define KCF_POLICY_REFRELE(desc) { \
ASSERT((desc)->pd_refcnt != 0); \
membar_exit(); \
- if (atomic_add_32_nv(&(desc)->pd_refcnt, -1) == 0) \
+ if (atomic_dec_32_nv(&(desc)->pd_refcnt) == 0) \
kcf_policy_free_desc(desc); \
}
diff --git a/usr/src/uts/common/sys/crypto/sched_impl.h b/usr/src/uts/common/sys/crypto/sched_impl.h
index fd35cc3a54..b46495decb 100644
--- a/usr/src/uts/common/sys/crypto/sched_impl.h
+++ b/usr/src/uts/common/sys/crypto/sched_impl.h
@@ -117,8 +117,8 @@ typedef struct kcf_prov_tried {
error == CRYPTO_KEY_SIZE_RANGE || \
error == CRYPTO_NO_PERMISSION)
-#define KCF_ATOMIC_INCR(x) atomic_add_32(&(x), 1)
-#define KCF_ATOMIC_DECR(x) atomic_add_32(&(x), -1)
+#define KCF_ATOMIC_INCR(x) atomic_inc_32(&(x))
+#define KCF_ATOMIC_DECR(x) atomic_dec_32(&(x))
/*
* Node structure for synchronous requests.
@@ -210,14 +210,14 @@ typedef struct kcf_areq_node {
} kcf_areq_node_t;
#define KCF_AREQ_REFHOLD(areq) { \
- atomic_add_32(&(areq)->an_refcnt, 1); \
+ atomic_inc_32(&(areq)->an_refcnt); \
ASSERT((areq)->an_refcnt != 0); \
}
#define KCF_AREQ_REFRELE(areq) { \
ASSERT((areq)->an_refcnt != 0); \
membar_exit(); \
- if (atomic_add_32_nv(&(areq)->an_refcnt, -1) == 0) \
+ if (atomic_dec_32_nv(&(areq)->an_refcnt) == 0) \
kcf_free_req(areq); \
}
@@ -314,7 +314,7 @@ typedef struct kcf_context {
* do a hold.
*/
#define KCF_CONTEXT_REFHOLD(ictx) { \
- atomic_add_32(&(ictx)->kc_refcnt, 1); \
+ atomic_inc_32(&(ictx)->kc_refcnt); \
ASSERT((ictx)->kc_refcnt != 0); \
}
@@ -326,7 +326,7 @@ typedef struct kcf_context {
#define KCF_CONTEXT_REFRELE(ictx) { \
ASSERT((ictx)->kc_refcnt != 0); \
membar_exit(); \
- if (atomic_add_32_nv(&(ictx)->kc_refcnt, -1) == 0) \
+ if (atomic_dec_32_nv(&(ictx)->kc_refcnt) == 0) \
kcf_free_context(ictx); \
}
diff --git a/usr/src/uts/common/sys/ib/clients/rdsv3/rdsv3_impl.h b/usr/src/uts/common/sys/ib/clients/rdsv3/rdsv3_impl.h
index a5c597c5cb..56b92f8037 100644
--- a/usr/src/uts/common/sys/ib/clients/rdsv3/rdsv3_impl.h
+++ b/usr/src/uts/common/sys/ib/clients/rdsv3/rdsv3_impl.h
@@ -375,7 +375,7 @@ void rdsv3_ib_dma_unmap_sg(ib_device_t *dev, struct rdsv3_scatterlist *scat,
static inline void
rdsv3_sk_sock_hold(struct rsock *sk)
{
- atomic_add_32(&sk->sk_refcount, 1);
+ atomic_inc_32(&sk->sk_refcount);
}
static inline void
rdsv3_sk_sock_put(struct rsock *sk)
diff --git a/usr/src/uts/common/syscall/corectl.c b/usr/src/uts/common/syscall/corectl.c
index bbd31530b6..dc99f92a6e 100644
--- a/usr/src/uts/common/syscall/corectl.c
+++ b/usr/src/uts/common/syscall/corectl.c
@@ -23,8 +23,6 @@
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include <sys/proc.h>
#include <sys/systm.h>
#include <sys/param.h>
@@ -105,13 +103,13 @@ corectl_content_set(corectl_content_t *ccp, core_content_t content)
void
corectl_content_hold(corectl_content_t *ccp)
{
- atomic_add_32(&ccp->ccc_refcnt, 1);
+ atomic_inc_32(&ccp->ccc_refcnt);
}
void
corectl_content_rele(corectl_content_t *ccp)
{
- if (atomic_add_32_nv(&ccp->ccc_refcnt, -1) == 0)
+ if (atomic_dec_32_nv(&ccp->ccc_refcnt) == 0)
kmem_free(ccp, sizeof (corectl_content_t));
}
@@ -154,13 +152,13 @@ corectl_path_set(corectl_path_t *ccp, const char *path)
void
corectl_path_hold(corectl_path_t *ccp)
{
- atomic_add_32(&ccp->ccp_refcnt, 1);
+ atomic_inc_32(&ccp->ccp_refcnt);
}
void
corectl_path_rele(corectl_path_t *ccp)
{
- if (atomic_add_32_nv(&ccp->ccp_refcnt, -1) == 0) {
+ if (atomic_dec_32_nv(&ccp->ccp_refcnt) == 0) {
refstr_rele(ccp->ccp_path);
kmem_free(ccp, sizeof (corectl_path_t));
}
diff --git a/usr/src/uts/common/syscall/lwp_sobj.c b/usr/src/uts/common/syscall/lwp_sobj.c
index 94492d64f0..3ac8504e6a 100644
--- a/usr/src/uts/common/syscall/lwp_sobj.c
+++ b/usr/src/uts/common/syscall/lwp_sobj.c
@@ -195,7 +195,7 @@ lwpchan_delete_mapping(proc_t *p, caddr_t start, caddr_t end)
if ((addr = ent->lwpchan_uaddr) != NULL)
lwp_mutex_unregister(addr);
kmem_free(ent, sizeof (*ent));
- atomic_add_32(&lcp->lwpchan_entries, -1);
+ atomic_dec_32(&lcp->lwpchan_entries);
} else {
prev = &ent->lwpchan_next;
}
@@ -468,7 +468,7 @@ top:
ent->lwpchan_lwpchan = *lwpchan;
ent->lwpchan_next = hashbucket->lwpchan_chain;
hashbucket->lwpchan_chain = ent;
- atomic_add_32(&lcp->lwpchan_entries, 1);
+ atomic_inc_32(&lcp->lwpchan_entries);
mutex_exit(&hashbucket->lwpchan_lock);
return (1);
}
diff --git a/usr/src/uts/common/vm/page_retire.c b/usr/src/uts/common/vm/page_retire.c
index 8836a18f0c..2bebe9a3f9 100644
--- a/usr/src/uts/common/vm/page_retire.c
+++ b/usr/src/uts/common/vm/page_retire.c
@@ -218,9 +218,9 @@ static struct page_retire_kstat page_retire_kstat = {
static kstat_t *page_retire_ksp = NULL;
#define PR_INCR_KSTAT(stat) \
- atomic_add_64(&(page_retire_kstat.stat.value.ui64), 1)
+ atomic_inc_64(&(page_retire_kstat.stat.value.ui64))
#define PR_DECR_KSTAT(stat) \
- atomic_add_64(&(page_retire_kstat.stat.value.ui64), -1)
+ atomic_dec_64(&(page_retire_kstat.stat.value.ui64))
#define PR_KSTAT_RETIRED_CE (page_retire_kstat.pr_mce.value.ui64)
#define PR_KSTAT_RETIRED_FMA (page_retire_kstat.pr_fma.value.ui64)
diff --git a/usr/src/uts/common/vm/seg_kmem.c b/usr/src/uts/common/vm/seg_kmem.c
index a124e47ef4..dd0d6f5087 100644
--- a/usr/src/uts/common/vm/seg_kmem.c
+++ b/usr/src/uts/common/vm/seg_kmem.c
@@ -1277,7 +1277,7 @@ segkmem_alloc_lp(vmem_t *vmp, size_t *sizep, size_t align, int vmflag)
if (lpthrt != 0) {
/* try to update the throttle value */
- lpthrt = atomic_add_long_nv(lpthrtp, 1);
+ lpthrt = atomic_inc_ulong_nv(lpthrtp);
if (lpthrt >= segkmem_lpthrottle_max) {
lpthrt = atomic_cas_ulong(lpthrtp, lpthrt,
segkmem_lpthrottle_max / 4);
diff --git a/usr/src/uts/common/vm/seg_kp.c b/usr/src/uts/common/vm/seg_kp.c
index c810adf41c..2fe1e5f17d 100644
--- a/usr/src/uts/common/vm/seg_kp.c
+++ b/usr/src/uts/common/vm/seg_kp.c
@@ -714,8 +714,7 @@ segkp_release_internal(struct seg *seg, struct segkp_data *kpd, size_t len)
anon_free(kpd->kp_anon, kpd->kp_anon_idx + i,
PAGESIZE);
anon_unresv_zone(PAGESIZE, NULL);
- atomic_add_long(&anon_segkp_pages_resv,
- -1);
+ atomic_dec_ulong(&anon_segkp_pages_resv);
}
TRACE_5(TR_FAC_VM,
TR_ANON_SEGKP, "anon segkp:%p %p %lu %u %u",
@@ -838,7 +837,7 @@ segkp_map_red(void)
*/
curthread->t_red_pp = red_pp;
- atomic_add_32(&red_nmapped, 1);
+ atomic_inc_32(&red_nmapped);
while (fp - (uintptr_t)curthread->t_stkbase < red_closest) {
(void) atomic_cas_32(&red_closest, red_closest,
(uint32_t)(fp - (uintptr_t)curthread->t_stkbase));
@@ -849,7 +848,7 @@ segkp_map_red(void)
stkbase = (caddr_t)(((uintptr_t)curthread->t_stkbase &
(uintptr_t)PAGEMASK) - PAGESIZE);
- atomic_add_32(&red_ndoubles, 1);
+ atomic_inc_32(&red_ndoubles);
if (fp - (uintptr_t)stkbase < RED_DEEP_THRESHOLD) {
/*
@@ -1437,7 +1436,7 @@ segkp_mem_config_post_add(void *arg, pgcnt_t delta_pages)
static int
segkp_mem_config_pre_del(void *arg, pgcnt_t delta_pages)
{
- atomic_add_32(&segkp_indel, 1);
+ atomic_inc_32(&segkp_indel);
segkp_cache_free();
return (0);
}
@@ -1446,7 +1445,7 @@ segkp_mem_config_pre_del(void *arg, pgcnt_t delta_pages)
static void
segkp_mem_config_post_del(void *arg, pgcnt_t delta_pages, int cancelled)
{
- atomic_add_32(&segkp_indel, -1);
+ atomic_dec_32(&segkp_indel);
}
static kphysm_setup_vector_t segkp_mem_config_vec = {
diff --git a/usr/src/uts/common/vm/seg_spt.c b/usr/src/uts/common/vm/seg_spt.c
index a96951830d..f087d5fc30 100644
--- a/usr/src/uts/common/vm/seg_spt.c
+++ b/usr/src/uts/common/vm/seg_spt.c
@@ -1088,7 +1088,7 @@ segspt_dismpagelock(struct seg *seg, caddr_t addr, size_t len,
* In either case, we increment softlockcnt on the 'real' segment.
*/
sptd->spt_pcachecnt++;
- atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), 1);
+ atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
ppa = sptd->spt_ppa;
for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
@@ -1367,7 +1367,7 @@ segspt_shmpagelock(struct seg *seg, caddr_t addr, size_t len,
* In either case, we increment softlockcnt on the 'real' segment.
*/
sptd->spt_pcachecnt++;
- atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), 1);
+ atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
/*
* We can now drop the sptd->spt_lock since the ppa[]
@@ -1519,7 +1519,7 @@ segspt_reclaim(void *ptag, caddr_t addr, size_t len, struct page **pplist,
* Now decrement softlockcnt.
*/
ASSERT(shmd->shm_softlockcnt > 0);
- atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), -1);
+ atomic_dec_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
if (shmd->shm_softlockcnt <= 0) {
if (async || AS_ISUNMAPWAIT(seg->s_as)) {
@@ -2911,7 +2911,7 @@ segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
* to keep this segment resident.
*/
writer = AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock);
- atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), 1);
+ atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
AS_LOCK_EXIT(seg->s_as, &seg->s_as->a_lock);
mutex_enter(&sptd->spt_lock);
@@ -2935,7 +2935,7 @@ segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
/* Regrab the AS_LOCK and release our hold on the segment */
AS_LOCK_ENTER(seg->s_as, &seg->s_as->a_lock,
writer ? RW_WRITER : RW_READER);
- atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), -1);
+ atomic_dec_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
if (shmd->shm_softlockcnt <= 0) {
if (AS_ISUNMAPWAIT(seg->s_as)) {
mutex_enter(&seg->s_as->a_contents);
diff --git a/usr/src/uts/common/vm/seg_vn.c b/usr/src/uts/common/vm/seg_vn.c
index 2803f071f7..9426a125dc 100644
--- a/usr/src/uts/common/vm/seg_vn.c
+++ b/usr/src/uts/common/vm/seg_vn.c
@@ -2703,7 +2703,7 @@ segvn_faultpage(
}
if (type == F_SOFTLOCK) {
- atomic_add_long((ulong_t *)&svd->softlockcnt, 1);
+ atomic_inc_ulong((ulong_t *)&svd->softlockcnt);
}
/*
@@ -3064,7 +3064,7 @@ out:
anon_array_exit(&cookie);
if (type == F_SOFTLOCK) {
- atomic_add_long((ulong_t *)&svd->softlockcnt, -1);
+ atomic_dec_ulong((ulong_t *)&svd->softlockcnt);
}
return (FC_MAKE_ERR(err));
}
@@ -8892,11 +8892,11 @@ segvn_pagelock(struct seg *seg, caddr_t addr, size_t len, struct page ***ppp,
if (sftlck_sbase) {
ASSERT(svd->softlockcnt_sbase > 0);
- atomic_add_long((ulong_t *)&svd->softlockcnt_sbase, -1);
+ atomic_dec_ulong((ulong_t *)&svd->softlockcnt_sbase);
}
if (sftlck_send) {
ASSERT(svd->softlockcnt_send > 0);
- atomic_add_long((ulong_t *)&svd->softlockcnt_send, -1);
+ atomic_dec_ulong((ulong_t *)&svd->softlockcnt_send);
}
/*
@@ -8993,10 +8993,10 @@ segvn_pagelock(struct seg *seg, caddr_t addr, size_t len, struct page ***ppp,
npages);
}
if (sftlck_sbase) {
- atomic_add_long((ulong_t *)&svd->softlockcnt_sbase, 1);
+ atomic_inc_ulong((ulong_t *)&svd->softlockcnt_sbase);
}
if (sftlck_send) {
- atomic_add_long((ulong_t *)&svd->softlockcnt_send, 1);
+ atomic_inc_ulong((ulong_t *)&svd->softlockcnt_send);
}
SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
*ppp = pplist + adjustpages;
@@ -9186,10 +9186,10 @@ segvn_pagelock(struct seg *seg, caddr_t addr, size_t len, struct page ***ppp,
wlen = len;
}
if (sftlck_sbase) {
- atomic_add_long((ulong_t *)&svd->softlockcnt_sbase, 1);
+ atomic_inc_ulong((ulong_t *)&svd->softlockcnt_sbase);
}
if (sftlck_send) {
- atomic_add_long((ulong_t *)&svd->softlockcnt_send, 1);
+ atomic_inc_ulong((ulong_t *)&svd->softlockcnt_send);
}
if (use_pcache) {
(void) seg_pinsert(seg, pamp, paddr, len, wlen, pl,
diff --git a/usr/src/uts/common/vm/vm_page.c b/usr/src/uts/common/vm/vm_page.c
index 0254bcb681..4c6ec13587 100644
--- a/usr/src/uts/common/vm/vm_page.c
+++ b/usr/src/uts/common/vm/vm_page.c
@@ -317,7 +317,7 @@ struct memseg_stats {
} memseg_stats;
#define MEMSEG_STAT_INCR(v) \
- atomic_add_32(&memseg_stats.v, 1)
+ atomic_inc_32(&memseg_stats.v)
#else
#define MEMSEG_STAT_INCR(x)
#endif
diff --git a/usr/src/uts/common/vm/vm_pagelist.c b/usr/src/uts/common/vm/vm_pagelist.c
index ff4eb5e0a0..216947f665 100644
--- a/usr/src/uts/common/vm/vm_pagelist.c
+++ b/usr/src/uts/common/vm/vm_pagelist.c
@@ -1563,7 +1563,7 @@ page_list_noreloc_startup(page_t *pp)
/*
* Update cage freemem counter
*/
- atomic_add_long(&kcage_freemem, 1);
+ atomic_inc_ulong(&kcage_freemem);
}
#else /* __sparc */
@@ -4117,7 +4117,7 @@ struct repl_page_stats {
uint_t nnofree;
uint_t nnext_pp;
} repl_page_stats;
-#define REPL_STAT_INCR(v) atomic_add_32(&repl_page_stats.v, 1)
+#define REPL_STAT_INCR(v) atomic_inc_32(&repl_page_stats.v)
#else /* REPL_PAGE_STATS */
#define REPL_STAT_INCR(v)
#endif /* REPL_PAGE_STATS */
diff --git a/usr/src/uts/common/xen/io/xnb.c b/usr/src/uts/common/xen/io/xnb.c
index 7c22ff8e52..761597653b 100644
--- a/usr/src/uts/common/xen/io/xnb.c
+++ b/usr/src/uts/common/xen/io/xnb.c
@@ -1300,7 +1300,7 @@ xnb_txbuf_constructor(void *buf, void *arg, int kmflag)
DTRACE_PROBE(txbuf_allocated);
- atomic_add_32(&xnbp->xnb_tx_buf_count, 1);
+ atomic_inc_32(&xnbp->xnb_tx_buf_count);
xnbp->xnb_tx_buf_outstanding++;
return (0);
@@ -1326,7 +1326,7 @@ xnb_txbuf_destructor(void *buf, void *arg)
ddi_dma_mem_free(&txp->xt_acc_handle);
ddi_dma_free_handle(&txp->xt_dma_handle);
- atomic_add_32(&xnbp->xnb_tx_buf_count, -1);
+ atomic_dec_32(&xnbp->xnb_tx_buf_count);
}
/*
diff --git a/usr/src/uts/common/xen/io/xnf.c b/usr/src/uts/common/xen/io/xnf.c
index 534b3f0904..2f895a33d7 100644
--- a/usr/src/uts/common/xen/io/xnf.c
+++ b/usr/src/uts/common/xen/io/xnf.c
@@ -366,7 +366,7 @@ gref_get(xnf_t *xnfp)
if (gref == INVALID_GRANT_REF) {
xnfp->xnf_stat_gref_failure++;
} else {
- atomic_add_64(&xnfp->xnf_stat_gref_outstanding, 1);
+ atomic_inc_64(&xnfp->xnf_stat_gref_outstanding);
if (xnfp->xnf_stat_gref_outstanding > xnfp->xnf_stat_gref_peak)
xnfp->xnf_stat_gref_peak =
xnfp->xnf_stat_gref_outstanding;
@@ -387,7 +387,7 @@ gref_put(xnf_t *xnfp, grant_ref_t gref)
gnttab_release_grant_reference(&xnfp->xnf_gref_head, gref);
mutex_exit(&xnfp->xnf_gref_lock);
- atomic_add_64(&xnfp->xnf_stat_gref_outstanding, -1);
+ atomic_dec_64(&xnfp->xnf_stat_gref_outstanding);
}
/*
@@ -2352,7 +2352,7 @@ xnf_buf_constructor(void *buf, void *arg, int kmflag)
bdesc->grant_ref = INVALID_GRANT_REF;
bdesc->gen = xnfp->xnf_gen;
- atomic_add_64(&xnfp->xnf_stat_buf_allocated, 1);
+ atomic_inc_64(&xnfp->xnf_stat_buf_allocated);
return (0);
@@ -2378,7 +2378,7 @@ xnf_buf_destructor(void *buf, void *arg)
ddi_dma_mem_free(&bdesc->acc_handle);
ddi_dma_free_handle(&bdesc->dma_handle);
- atomic_add_64(&xnfp->xnf_stat_buf_allocated, -1);
+ atomic_dec_64(&xnfp->xnf_stat_buf_allocated);
}
static xnf_buf_t *
@@ -2412,7 +2412,7 @@ xnf_buf_get(xnf_t *xnfp, int flags, boolean_t readonly)
xvdi_get_oeid(bufp->xnfp->xnf_devinfo),
bufp->buf_mfn, readonly ? 1 : 0);
- atomic_add_64(&xnfp->xnf_stat_buf_outstanding, 1);
+ atomic_inc_64(&xnfp->xnf_stat_buf_outstanding);
return (bufp);
}
@@ -2429,7 +2429,7 @@ xnf_buf_put(xnf_t *xnfp, xnf_buf_t *bufp, boolean_t readonly)
kmem_cache_free(xnfp->xnf_buf_cache, bufp);
- atomic_add_64(&xnfp->xnf_stat_buf_outstanding, -1);
+ atomic_dec_64(&xnfp->xnf_stat_buf_outstanding);
}
/*
diff --git a/usr/src/uts/i86pc/io/psm/uppc.c b/usr/src/uts/i86pc/io/psm/uppc.c
index ad4d378f76..adfd5079a1 100644
--- a/usr/src/uts/i86pc/io/psm/uppc.c
+++ b/usr/src/uts/i86pc/io/psm/uppc.c
@@ -317,7 +317,7 @@ uppc_addspl(int irqno, int ipl, int min_ipl, int max_ipl)
uchar_t vectmask;
if (irqno <= MAX_ISA_IRQ)
- atomic_add_16(&uppc_irq_shared_table[irqno], 1);
+ atomic_inc_16(&uppc_irq_shared_table[irqno]);
if (ipl != min_ipl)
return (0);
@@ -360,7 +360,7 @@ uppc_delspl(int irqno, int ipl, int min_ipl, int max_ipl)
uchar_t vectmask;
if (irqno <= MAX_ISA_IRQ)
- atomic_add_16(&uppc_irq_shared_table[irqno], -1);
+ atomic_dec_16(&uppc_irq_shared_table[irqno]);
/*
* skip if we are not deleting the last handler
diff --git a/usr/src/uts/i86pc/os/memnode.c b/usr/src/uts/i86pc/os/memnode.c
index 2c1c6e91d5..e065f1ba21 100644
--- a/usr/src/uts/i86pc/os/memnode.c
+++ b/usr/src/uts/i86pc/os/memnode.c
@@ -97,7 +97,7 @@ mem_node_add_slice(pfn_t start, pfn_t end)
} else {
mem_node_config[mnode].physbase = start;
mem_node_config[mnode].physmax = end;
- atomic_add_16(&num_memnodes, 1);
+ atomic_inc_16(&num_memnodes);
do {
oldmask = memnodes_mask;
newmask = memnodes_mask | (1ull << mnode);
@@ -163,7 +163,7 @@ mem_node_del_slice(pfn_t start, pfn_t end)
omask = memnodes_mask;
nmask = omask & ~(1ull << mnode);
} while (atomic_cas_64(&memnodes_mask, omask, nmask) != omask);
- atomic_add_16(&num_memnodes, -1);
+ atomic_dec_16(&num_memnodes);
mem_node_config[mnode].exists = 0;
}
}
@@ -239,7 +239,7 @@ mem_node_alloc()
mem_node_config[mnode].physbase = (pfn_t)-1l;
mem_node_config[mnode].physmax = 0;
- atomic_add_16(&num_memnodes, 1);
+ atomic_inc_16(&num_memnodes);
do {
oldmask = memnodes_mask;
newmask = memnodes_mask | (1ull << mnode);
diff --git a/usr/src/uts/i86pc/sys/rootnex.h b/usr/src/uts/i86pc/sys/rootnex.h
index 859157d1c8..161c5d8df4 100644
--- a/usr/src/uts/i86pc/sys/rootnex.h
+++ b/usr/src/uts/i86pc/sys/rootnex.h
@@ -51,7 +51,7 @@ extern "C" {
/* atomic increment/decrement to keep track of outstanding binds, etc */
#ifdef DEBUG
#define ROOTNEX_DPROF_INC(addr) atomic_inc_64(addr)
-#define ROOTNEX_DPROF_DEC(addr) atomic_add_64(addr, -1)
+#define ROOTNEX_DPROF_DEC(addr) atomic_dec_64(addr)
#define ROOTNEX_DPROBE1(name, type1, arg1) \
DTRACE_PROBE1(name, type1, arg1)
#define ROOTNEX_DPROBE2(name, type1, arg1, type2, arg2) \
diff --git a/usr/src/uts/i86pc/vm/hat_i86.c b/usr/src/uts/i86pc/vm/hat_i86.c
index 4f41c68d9f..64e1c2fb46 100644
--- a/usr/src/uts/i86pc/vm/hat_i86.c
+++ b/usr/src/uts/i86pc/vm/hat_i86.c
@@ -1227,14 +1227,14 @@ hat_get_mapped_size(hat_t *hat)
int
hat_stats_enable(hat_t *hat)
{
- atomic_add_32(&hat->hat_stats, 1);
+ atomic_inc_32(&hat->hat_stats);
return (1);
}
void
hat_stats_disable(hat_t *hat)
{
- atomic_add_32(&hat->hat_stats, -1);
+ atomic_dec_32(&hat->hat_stats);
}
/*
diff --git a/usr/src/uts/i86pc/vm/hat_i86.h b/usr/src/uts/i86pc/vm/hat_i86.h
index 8007347d0f..03317b35e8 100644
--- a/usr/src/uts/i86pc/vm/hat_i86.h
+++ b/usr/src/uts/i86pc/vm/hat_i86.h
@@ -98,9 +98,9 @@ struct hat {
typedef struct hat hat_t;
#define PGCNT_INC(hat, level) \
- atomic_add_long(&(hat)->hat_pages_mapped[level], 1);
+ atomic_inc_ulong(&(hat)->hat_pages_mapped[level]);
#define PGCNT_DEC(hat, level) \
- atomic_add_long(&(hat)->hat_pages_mapped[level], -1);
+ atomic_dec_ulong(&(hat)->hat_pages_mapped[level]);
/*
* Flags for the hat_flags field
diff --git a/usr/src/uts/i86pc/vm/htable.c b/usr/src/uts/i86pc/vm/htable.c
index bcb2b117a3..4ed2a74413 100644
--- a/usr/src/uts/i86pc/vm/htable.c
+++ b/usr/src/uts/i86pc/vm/htable.c
@@ -303,7 +303,7 @@ ptable_alloc(uintptr_t seed)
pfn = pp->p_pagenum;
if (pfn == PFN_INVALID)
panic("ptable_alloc(): Invalid PFN!!");
- atomic_add_32(&active_ptables, 1);
+ atomic_inc_32(&active_ptables);
HATSTAT_INC(hs_ptable_allocs);
return (pfn);
}
@@ -322,7 +322,7 @@ ptable_free(pfn_t pfn)
*/
ASSERT(pfn != PFN_INVALID);
HATSTAT_INC(hs_ptable_frees);
- atomic_add_32(&active_ptables, -1);
+ atomic_dec_32(&active_ptables);
if (pp == NULL)
panic("ptable_free(): no page for pfn!");
ASSERT(PAGE_SHARED(pp));
@@ -460,7 +460,7 @@ htable_steal(uint_t cnt)
* Loop through all user hats. The 1st pass takes cached htables that
* aren't in use. The later passes steal by removing mappings, too.
*/
- atomic_add_32(&htable_dont_cache, 1);
+ atomic_inc_32(&htable_dont_cache);
for (pass = 0; pass <= htable_steal_passes && stolen < cnt; ++pass) {
threshold = pass * mmu.ptes_per_table / htable_steal_passes;
hat = kas.a_hat;
@@ -669,7 +669,7 @@ htable_steal(uint_t cnt)
} while (stolen < cnt && h != h_start);
}
}
- atomic_add_32(&htable_dont_cache, -1);
+ atomic_dec_32(&htable_dont_cache);
return (list);
}
@@ -985,7 +985,7 @@ htable_purge_hat(hat_t *hat)
* Purge the htable cache if just reaping.
*/
if (!(hat->hat_flags & HAT_FREEING)) {
- atomic_add_32(&htable_dont_cache, 1);
+ atomic_inc_32(&htable_dont_cache);
for (;;) {
hat_enter(hat);
ht = hat->hat_ht_cached;
@@ -997,7 +997,7 @@ htable_purge_hat(hat_t *hat)
hat_exit(hat);
htable_free(ht);
}
- atomic_add_32(&htable_dont_cache, -1);
+ atomic_dec_32(&htable_dont_cache);
return;
}
diff --git a/usr/src/uts/i86pc/vm/htable.h b/usr/src/uts/i86pc/vm/htable.h
index 758b3dcd1d..c0f5639c60 100644
--- a/usr/src/uts/i86pc/vm/htable.h
+++ b/usr/src/uts/i86pc/vm/htable.h
@@ -26,8 +26,6 @@
#ifndef _VM_HTABLE_H
#define _VM_HTABLE_H
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#ifdef __cplusplus
extern "C" {
#endif
@@ -292,8 +290,8 @@ extern void x86pte_mapout(void);
*/
#define HTABLE_INC(x) atomic_inc16((uint16_t *)&x)
#define HTABLE_DEC(x) atomic_dec16((uint16_t *)&x)
-#define HTABLE_LOCK_INC(ht) atomic_add_32(&(ht)->ht_lock_cnt, 1)
-#define HTABLE_LOCK_DEC(ht) atomic_add_32(&(ht)->ht_lock_cnt, -1)
+#define HTABLE_LOCK_INC(ht) atomic_inc_32(&(ht)->ht_lock_cnt)
+#define HTABLE_LOCK_DEC(ht) atomic_dec_32(&(ht)->ht_lock_cnt)
#ifdef __xpv
extern void xen_flush_va(caddr_t va);
diff --git a/usr/src/uts/i86xpv/io/psm/xpv_uppc.c b/usr/src/uts/i86xpv/io/psm/xpv_uppc.c
index 06049ed9e0..d50b7f504e 100644
--- a/usr/src/uts/i86xpv/io/psm/xpv_uppc.c
+++ b/usr/src/uts/i86xpv/io/psm/xpv_uppc.c
@@ -251,7 +251,7 @@ xen_uppc_addspl(int irqno, int ipl, int min_ipl, int max_ipl)
cpuset_t cpus;
if (irqno >= 0 && irqno <= MAX_ISA_IRQ)
- atomic_add_16(&xen_uppc_irq_shared_table[irqno], 1);
+ atomic_inc_16(&xen_uppc_irq_shared_table[irqno]);
/*
* We are called at splhi() so we can't call anything that might end
@@ -284,7 +284,7 @@ xen_uppc_delspl(int irqno, int ipl, int min_ipl, int max_ipl)
int err = PSM_SUCCESS;
if (irqno >= 0 && irqno <= MAX_ISA_IRQ)
- atomic_add_16(&xen_uppc_irq_shared_table[irqno], -1);
+ atomic_dec_16(&xen_uppc_irq_shared_table[irqno]);
if (irqno >= PIRQ_BASE && irqno < NR_PIRQS &&
DOMAIN_IS_INITDOMAIN(xen_info)) {
diff --git a/usr/src/uts/intel/io/scsi/adapters/arcmsr/arcmsr.c b/usr/src/uts/intel/io/scsi/adapters/arcmsr/arcmsr.c
index 8aa0546c6b..4c5b9ff1fd 100644
--- a/usr/src/uts/intel/io/scsi/adapters/arcmsr/arcmsr.c
+++ b/usr/src/uts/intel/io/scsi/adapters/arcmsr/arcmsr.c
@@ -1842,7 +1842,7 @@ arcmsr_post_ccb(struct ACB *acb, struct CCB *ccb)
DDI_FAILURE)
return (DDI_FAILURE);
- atomic_add_32(&acb->ccboutstandingcount, 1);
+ atomic_inc_32(&acb->ccboutstandingcount);
ccb->ccb_time = (time_t)(ddi_get_time() + pkt->pkt_time);
ccb->ccb_state = ARCMSR_CCB_START;
@@ -1955,7 +1955,7 @@ arcmsr_ccb_complete(struct CCB *ccb, int flag)
scsi_hba_pkt_comp(pkt);
}
if (flag == 1) {
- atomic_add_32(&acb->ccboutstandingcount, -1);
+ atomic_dec_32(&acb->ccboutstandingcount);
}
}
diff --git a/usr/src/uts/sfmmu/vm/hat_sfmmu.c b/usr/src/uts/sfmmu/vm/hat_sfmmu.c
index 4525a67c44..7170891206 100644
--- a/usr/src/uts/sfmmu/vm/hat_sfmmu.c
+++ b/usr/src/uts/sfmmu/vm/hat_sfmmu.c
@@ -2151,7 +2151,7 @@ hat_dup(struct hat *hat, struct hat *newhat, caddr_t addr, size_t len,
VN_HOLD(srdp->srd_evp);
ASSERT(srdp->srd_refcnt > 0);
newhat->sfmmu_srdp = srdp;
- atomic_add_32((volatile uint_t *)&srdp->srd_refcnt, 1);
+ atomic_inc_32((volatile uint_t *)&srdp->srd_refcnt);
}
/*
@@ -3227,7 +3227,7 @@ sfmmu_tteload_addentry(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, tte_t *ttep,
panic("too high lckcnt-hmeblk %p",
(void *)hmeblkp);
}
- atomic_add_32(&hmeblkp->hblk_lckcnt, 1);
+ atomic_inc_32(&hmeblkp->hblk_lckcnt);
HBLK_STACK_TRACE(hmeblkp, HBLK_LOCK);
}
@@ -3262,9 +3262,9 @@ sfmmu_tteload_addentry(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, tte_t *ttep,
if (!TTE_IS_VALID(&tteold)) {
- atomic_add_16(&hmeblkp->hblk_vcnt, 1);
+ atomic_inc_16(&hmeblkp->hblk_vcnt);
if (rid == SFMMU_INVALID_SHMERID) {
- atomic_add_long(&sfmmup->sfmmu_ttecnt[size], 1);
+ atomic_inc_ulong(&sfmmup->sfmmu_ttecnt[size]);
} else {
sf_srd_t *srdp = sfmmup->sfmmu_srdp;
sf_region_t *rgnp = srdp->srd_hmergnp[rid];
@@ -3273,7 +3273,7 @@ sfmmu_tteload_addentry(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, tte_t *ttep,
* during hat_join_region() processing. Here we
* only update ttecnt's in region struture.
*/
- atomic_add_long(&rgnp->rgn_ttecnt[size], 1);
+ atomic_inc_ulong(&rgnp->rgn_ttecnt[size]);
}
}
@@ -3381,7 +3381,7 @@ sfmmu_tteload_addentry(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, tte_t *ttep,
if (pp) {
if (!remap) {
HME_ADD(sfhme, pp);
- atomic_add_16(&hmeblkp->hblk_hmecnt, 1);
+ atomic_inc_16(&hmeblkp->hblk_hmecnt);
ASSERT(hmeblkp->hblk_hmecnt > 0);
/*
@@ -4154,7 +4154,7 @@ readtte:
panic("can't unlock large tte");
ASSERT(hmeblkp->hblk_lckcnt > 0);
- atomic_add_32(&hmeblkp->hblk_lckcnt, -1);
+ atomic_dec_32(&hmeblkp->hblk_lckcnt);
HBLK_STACK_TRACE(hmeblkp, HBLK_UNLOCK);
} else {
panic("sfmmu_hblk_unlock: invalid tte");
@@ -6133,7 +6133,7 @@ again:
if (flags & HAT_UNLOAD_UNLOCK) {
ASSERT(hmeblkp->hblk_lckcnt > 0);
- atomic_add_32(&hmeblkp->hblk_lckcnt, -1);
+ atomic_dec_32(&hmeblkp->hblk_lckcnt);
HBLK_STACK_TRACE(hmeblkp, HBLK_UNLOCK);
}
@@ -6187,11 +6187,11 @@ again:
*/
HME_SUB(sfhmep, pp);
membar_stst();
- atomic_add_16(&hmeblkp->hblk_hmecnt, -1);
+ atomic_dec_16(&hmeblkp->hblk_hmecnt);
}
ASSERT(hmeblkp->hblk_vcnt > 0);
- atomic_add_16(&hmeblkp->hblk_vcnt, -1);
+ atomic_dec_16(&hmeblkp->hblk_vcnt);
ASSERT(hmeblkp->hblk_hmecnt || hmeblkp->hblk_vcnt ||
!hmeblkp->hblk_lckcnt);
@@ -7349,10 +7349,10 @@ readtte:
cpuset = sfmmu_rgntlb_demap(addr, rgnp, hmeblkp, 1);
sfmmu_ttesync(NULL, addr, &tte, pp);
ASSERT(rgnp->rgn_ttecnt[ttesz] > 0);
- atomic_add_long(&rgnp->rgn_ttecnt[ttesz], -1);
+ atomic_dec_ulong(&rgnp->rgn_ttecnt[ttesz]);
} else {
sfmmu_ttesync(sfmmup, addr, &tte, pp);
- atomic_add_long(&sfmmup->sfmmu_ttecnt[ttesz], -1);
+ atomic_dec_ulong(&sfmmup->sfmmu_ttecnt[ttesz]);
/*
* We need to flush the page from the virtual cache
@@ -7413,8 +7413,8 @@ readtte:
*/
ASSERT(hmeblkp->hblk_hmecnt > 0);
ASSERT(hmeblkp->hblk_vcnt > 0);
- atomic_add_16(&hmeblkp->hblk_vcnt, -1);
- atomic_add_16(&hmeblkp->hblk_hmecnt, -1);
+ atomic_dec_16(&hmeblkp->hblk_vcnt);
+ atomic_dec_16(&hmeblkp->hblk_hmecnt);
/*
* This is bug 4063182.
* XXX: fixme
@@ -13813,8 +13813,8 @@ hat_join_srd(struct hat *sfmmup, vnode_t *evp)
if (srdp->srd_evp == evp) {
ASSERT(srdp->srd_refcnt >= 0);
sfmmup->sfmmu_srdp = srdp;
- atomic_add_32(
- (volatile uint_t *)&srdp->srd_refcnt, 1);
+ atomic_inc_32(
+ (volatile uint_t *)&srdp->srd_refcnt);
mutex_exit(&srd_buckets[hash].srdb_lock);
return;
}
@@ -13835,7 +13835,7 @@ hat_join_srd(struct hat *sfmmup, vnode_t *evp)
if (srdp->srd_evp == evp) {
ASSERT(srdp->srd_refcnt >= 0);
sfmmup->sfmmu_srdp = srdp;
- atomic_add_32((volatile uint_t *)&srdp->srd_refcnt, 1);
+ atomic_inc_32((volatile uint_t *)&srdp->srd_refcnt);
mutex_exit(&srd_buckets[hash].srdb_lock);
kmem_cache_free(srd_cache, newsrdp);
return;
@@ -13872,8 +13872,7 @@ sfmmu_leave_srd(sfmmu_t *sfmmup)
sfmmup->sfmmu_srdp = NULL;
evp = srdp->srd_evp;
ASSERT(evp != NULL);
- if (atomic_add_32_nv(
- (volatile uint_t *)&srdp->srd_refcnt, -1)) {
+ if (atomic_dec_32_nv((volatile uint_t *)&srdp->srd_refcnt)) {
VN_RELE(evp);
return;
}
@@ -14090,7 +14089,7 @@ rfound:
ASSERT(rid < maxids);
ASSERT(rarrp[rid] == rgnp);
ASSERT(rid < *nextidp);
- atomic_add_32((volatile uint_t *)&rgnp->rgn_refcnt, 1);
+ atomic_inc_32((volatile uint_t *)&rgnp->rgn_refcnt);
mutex_exit(&srdp->srd_mutex);
if (new_rgnp != NULL) {
kmem_cache_free(region_cache, new_rgnp);
@@ -14440,7 +14439,7 @@ hat_leave_region(struct hat *sfmmup, hat_region_cookie_t rcookie, uint_t flags)
}
r_obj = rgnp->rgn_obj;
- if (atomic_add_32_nv((volatile uint_t *)&rgnp->rgn_refcnt, -1)) {
+ if (atomic_dec_32_nv((volatile uint_t *)&rgnp->rgn_refcnt)) {
return;
}
@@ -14525,7 +14524,7 @@ hat_dup_region(struct hat *sfmmup, hat_region_cookie_t rcookie)
ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == SFMMU_REGION_HME);
ASSERT(!(rgnp->rgn_flags & SFMMU_REGION_FREE));
- atomic_add_32((volatile uint_t *)&rgnp->rgn_refcnt, 1);
+ atomic_inc_32((volatile uint_t *)&rgnp->rgn_refcnt);
/* LINTED: constant in conditional context */
SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 1, 0);
@@ -15255,8 +15254,7 @@ sfmmu_find_scd(sfmmu_t *sfmmup)
mutex_exit(&srdp->srd_scd_mutex);
sfmmu_join_scd(scdp, sfmmup);
ASSERT(scdp->scd_refcnt >= 2);
- atomic_add_32((volatile uint32_t *)
- &scdp->scd_refcnt, -1);
+ atomic_dec_32((volatile uint32_t *)&scdp->scd_refcnt);
return;
} else {
/*
@@ -15301,7 +15299,7 @@ sfmmu_find_scd(sfmmu_t *sfmmup)
mutex_exit(&srdp->srd_scd_mutex);
sfmmu_join_scd(new_scdp, sfmmup);
ASSERT(new_scdp->scd_refcnt >= 2);
- atomic_add_32((volatile uint32_t *)&new_scdp->scd_refcnt, -1);
+ atomic_dec_32((volatile uint32_t *)&new_scdp->scd_refcnt);
}
/*
diff --git a/usr/src/uts/sfmmu/vm/hat_sfmmu.h b/usr/src/uts/sfmmu/vm/hat_sfmmu.h
index 8bdc3e39b2..6798fb0c55 100644
--- a/usr/src/uts/sfmmu/vm/hat_sfmmu.h
+++ b/usr/src/uts/sfmmu/vm/hat_sfmmu.h
@@ -359,13 +359,12 @@ typedef union sf_region_map_u {
}
#define SF_SCD_INCR_REF(scdp) { \
- atomic_add_32((volatile uint32_t *)&(scdp)->scd_refcnt, 1); \
+ atomic_inc_32((volatile uint32_t *)&(scdp)->scd_refcnt); \
}
#define SF_SCD_DECR_REF(srdp, scdp) { \
sf_region_map_t _scd_rmap = (scdp)->scd_region_map; \
- if (!atomic_add_32_nv( \
- (volatile uint32_t *)&(scdp)->scd_refcnt, -1)) { \
+ if (!atomic_dec_32_nv((volatile uint32_t *)&(scdp)->scd_refcnt)) {\
sfmmu_destroy_scd((srdp), (scdp), &_scd_rmap); \
} \
}
diff --git a/usr/src/uts/sparc/dtrace/fasttrap_isa.c b/usr/src/uts/sparc/dtrace/fasttrap_isa.c
index 45d87478d6..735bb9d0eb 100644
--- a/usr/src/uts/sparc/dtrace/fasttrap_isa.c
+++ b/usr/src/uts/sparc/dtrace/fasttrap_isa.c
@@ -24,8 +24,6 @@
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include <sys/fasttrap_isa.h>
#include <sys/fasttrap_impl.h>
#include <sys/dtrace.h>
@@ -1410,7 +1408,7 @@ fasttrap_getreg(struct regs *rp, uint_t reg)
value = dtrace_getreg_win(reg, 1);
dtrace_interrupt_enable(cookie);
- atomic_add_64(&fasttrap_getreg_fast_cnt, 1);
+ atomic_inc_64(&fasttrap_getreg_fast_cnt);
return (value);
}
@@ -1435,7 +1433,7 @@ fasttrap_getreg(struct regs *rp, uint_t reg)
if ((long)mpcb->mpcb_spbuf[i] != rp->r_sp)
continue;
- atomic_add_64(&fasttrap_getreg_mpcb_cnt, 1);
+ atomic_inc_64(&fasttrap_getreg_mpcb_cnt);
return (rwin[i].rw_local[reg - 16]);
} while (i > 0);
}
@@ -1455,7 +1453,7 @@ fasttrap_getreg(struct regs *rp, uint_t reg)
if ((long)mpcb->mpcb_spbuf[i] != rp->r_sp)
continue;
- atomic_add_64(&fasttrap_getreg_mpcb_cnt, 1);
+ atomic_inc_64(&fasttrap_getreg_mpcb_cnt);
return (rwin[i].rw_local[reg - 16]);
} while (i > 0);
}
@@ -1466,7 +1464,7 @@ fasttrap_getreg(struct regs *rp, uint_t reg)
v32[0] = 0;
}
- atomic_add_64(&fasttrap_getreg_slow_cnt, 1);
+ atomic_inc_64(&fasttrap_getreg_slow_cnt);
return (value);
err:
@@ -1505,7 +1503,7 @@ fasttrap_putreg(struct regs *rp, uint_t reg, ulong_t value)
if (dtrace_getotherwin() > 0) {
dtrace_putreg_win(reg, value);
dtrace_interrupt_enable(cookie);
- atomic_add_64(&fasttrap_putreg_fast_cnt, 1);
+ atomic_inc_64(&fasttrap_putreg_fast_cnt);
return;
}
dtrace_interrupt_enable(cookie);
@@ -1536,7 +1534,7 @@ fasttrap_putreg(struct regs *rp, uint_t reg, ulong_t value)
continue;
rwin[i].rw_local[reg - 16] = value;
- atomic_add_64(&fasttrap_putreg_mpcb_cnt, 1);
+ atomic_inc_64(&fasttrap_putreg_mpcb_cnt);
return;
} while (i > 0);
}
@@ -1549,7 +1547,7 @@ fasttrap_putreg(struct regs *rp, uint_t reg, ulong_t value)
rwin[mpcb->mpcb_wbcnt].rw_local[reg - 16] = value;
mpcb->mpcb_spbuf[mpcb->mpcb_wbcnt] = (caddr_t)rp->r_sp;
mpcb->mpcb_wbcnt++;
- atomic_add_64(&fasttrap_putreg_mpcb_cnt, 1);
+ atomic_inc_64(&fasttrap_putreg_mpcb_cnt);
return;
}
} else {
@@ -1567,7 +1565,7 @@ fasttrap_putreg(struct regs *rp, uint_t reg, ulong_t value)
continue;
rwin[i].rw_local[reg - 16] = v32;
- atomic_add_64(&fasttrap_putreg_mpcb_cnt, 1);
+ atomic_inc_64(&fasttrap_putreg_mpcb_cnt);
return;
} while (i > 0);
}
@@ -1580,12 +1578,12 @@ fasttrap_putreg(struct regs *rp, uint_t reg, ulong_t value)
rwin[mpcb->mpcb_wbcnt].rw_local[reg - 16] = v32;
mpcb->mpcb_spbuf[mpcb->mpcb_wbcnt] = (caddr_t)rp->r_sp;
mpcb->mpcb_wbcnt++;
- atomic_add_64(&fasttrap_putreg_mpcb_cnt, 1);
+ atomic_inc_64(&fasttrap_putreg_mpcb_cnt);
return;
}
}
- atomic_add_64(&fasttrap_putreg_slow_cnt, 1);
+ atomic_inc_64(&fasttrap_putreg_slow_cnt);
return;
err:
diff --git a/usr/src/uts/sparc/fpu/fpu_simulator.c b/usr/src/uts/sparc/fpu/fpu_simulator.c
index c65e15402a..aeafdc515d 100644
--- a/usr/src/uts/sparc/fpu/fpu_simulator.c
+++ b/usr/src/uts/sparc/fpu/fpu_simulator.c
@@ -42,7 +42,7 @@
extern void __dtrace_probe___fpuinfo_##opcode(uint64_t *); \
uint64_t *stataddr = &fpuinfo.opcode.value.ui64; \
__dtrace_probe___fpuinfo_##opcode(stataddr); \
- atomic_add_64(&fpuinfo.opcode.value.ui64, 1); \
+ atomic_inc_64(&fpuinfo.opcode.value.ui64); \
}
#define FPUINFO_KSTAT_PREC(prec, kstat_s, kstat_d, kstat_q) \
@@ -791,9 +791,9 @@ fp_kstat_update(enum ftt_type ftt)
ASSERT((ftt == ftt_ieee) || (ftt == ftt_unfinished) ||
(ftt == ftt_unimplemented));
if (ftt == ftt_ieee)
- atomic_add_64(&fpustat.fpu_ieee_traps.value.ui64, 1);
+ atomic_inc_64(&fpustat.fpu_ieee_traps.value.ui64);
else if (ftt == ftt_unfinished)
- atomic_add_64(&fpustat.fpu_unfinished_traps.value.ui64, 1);
+ atomic_inc_64(&fpustat.fpu_unfinished_traps.value.ui64);
else if (ftt == ftt_unimplemented)
- atomic_add_64(&fpustat.fpu_unimplemented_traps.value.ui64, 1);
+ atomic_inc_64(&fpustat.fpu_unimplemented_traps.value.ui64);
}
diff --git a/usr/src/uts/sparc/sys/fpu/fpu_simulator.h b/usr/src/uts/sparc/sys/fpu/fpu_simulator.h
index 0a89f34a0b..4d83a2c795 100644
--- a/usr/src/uts/sparc/sys/fpu/fpu_simulator.h
+++ b/usr/src/uts/sparc/sys/fpu/fpu_simulator.h
@@ -379,7 +379,7 @@ struct visinfo_kstat {
extern void __dtrace_probe___visinfo_##opcode(uint64_t *); \
uint64_t *stataddr = &visinfo.opcode.value.ui64; \
__dtrace_probe___visinfo_##opcode(stataddr); \
- atomic_add_64(&visinfo.opcode.value.ui64, 1); \
+ atomic_inc_64(&visinfo.opcode.value.ui64); \
}
diff --git a/usr/src/uts/sun4/os/memnode.c b/usr/src/uts/sun4/os/memnode.c
index bbee6dd2c6..82f872a5fb 100644
--- a/usr/src/uts/sun4/os/memnode.c
+++ b/usr/src/uts/sun4/os/memnode.c
@@ -94,7 +94,7 @@ mem_node_add_slice(pfn_t start, pfn_t end)
} else {
mem_node_config[mnode].physbase = start;
mem_node_config[mnode].physmax = end;
- atomic_add_16(&num_memnodes, 1);
+ atomic_inc_16(&num_memnodes);
do {
oldmask = memnodes_mask;
newmask = memnodes_mask | (1ull << mnode);
@@ -160,7 +160,7 @@ mem_node_del_slice(pfn_t start, pfn_t end)
omask = memnodes_mask;
nmask = omask & ~(1ull << mnode);
} while (atomic_cas_64(&memnodes_mask, omask, nmask) != omask);
- atomic_add_16(&num_memnodes, -1);
+ atomic_dec_16(&num_memnodes);
mem_node_config[mnode].exists = 0;
}
}
@@ -230,7 +230,7 @@ mem_node_alloc()
mem_node_config[mnode].physbase = (uint64_t)-1;
mem_node_config[mnode].physmax = 0;
- atomic_add_16(&num_memnodes, 1);
+ atomic_inc_16(&num_memnodes);
do {
oldmask = memnodes_mask;
newmask = memnodes_mask | (1ull << mnode);
diff --git a/usr/src/uts/sun4/os/prom_subr.c b/usr/src/uts/sun4/os/prom_subr.c
index 578692011f..37fd0f3ee7 100644
--- a/usr/src/uts/sun4/os/prom_subr.c
+++ b/usr/src/uts/sun4/os/prom_subr.c
@@ -318,7 +318,7 @@ kern_preprom(void)
* We now hold the prom_cpu lock. Increment the hold count by one
* and assert our current state before returning to the caller.
*/
- atomic_add_32(&prom_holdcnt, 1);
+ atomic_inc_32(&prom_holdcnt);
ASSERT(prom_holdcnt >= 1);
prom_thread = curthread;
}
@@ -345,7 +345,7 @@ kern_postprom(void)
panic("kern_postprom: prom_holdcnt == 0, owner=%p",
(void *)prom_cpu);
- if (atomic_add_32_nv(&prom_holdcnt, -1) != 0)
+ if (atomic_dec_32_nv(&prom_holdcnt) != 0)
return; /* prom lock is held recursively by this CPU */
if ((boothowto & RB_DEBUG) && prom_exit_enter_debugger)
diff --git a/usr/src/uts/sun4u/cpu/spitfire.c b/usr/src/uts/sun4u/cpu/spitfire.c
index bf6bc3dcd0..b4c35566ce 100644
--- a/usr/src/uts/sun4u/cpu/spitfire.c
+++ b/usr/src/uts/sun4u/cpu/spitfire.c
@@ -4237,7 +4237,7 @@ leaky_bucket_timeout(void *arg)
for (i = 0; i < mem_ce_simm_size; i++) {
if (psimm[i].leaky_bucket_cnt > 0)
- atomic_add_16(&psimm[i].leaky_bucket_cnt, -1);
+ atomic_dec_16(&psimm[i].leaky_bucket_cnt);
}
add_leaky_bucket_timeout();
}
@@ -4382,8 +4382,8 @@ ce_count_unum(int status, int len, char *unum)
} else if (status & ECC_PERSISTENT) {
int new_value;
- new_value = atomic_add_16_nv(
- &psimm[i].leaky_bucket_cnt, 1);
+ new_value = atomic_inc_16_nv(
+ &psimm[i].leaky_bucket_cnt);
psimm[i].persistent_total++;
if (new_value > ecc_softerr_limit) {
cmn_err(CE_NOTE, "[AFT0] Most recent %d"
@@ -4394,8 +4394,8 @@ ce_count_unum(int status, int len, char *unum)
ecc_softerr_limit,
ecc_softerr_interval / 60,
ecc_softerr_interval % 60);
- atomic_add_16(
- &psimm[i].leaky_bucket_cnt, -1);
+ atomic_dec_16(
+ &psimm[i].leaky_bucket_cnt);
page_status = PR_MCE;
}
} else { /* Intermittent */
diff --git a/usr/src/uts/sun4u/cpu/us3_common.c b/usr/src/uts/sun4u/cpu/us3_common.c
index 301d7874df..00e8e39483 100644
--- a/usr/src/uts/sun4u/cpu/us3_common.c
+++ b/usr/src/uts/sun4u/cpu/us3_common.c
@@ -5642,7 +5642,7 @@ do_scrub(struct scrub_info *csi)
uint32_t *outstanding = &csmp->chsm_outstanding[index];
if (*(csi->csi_enable) && (csmp->chsm_enable[index])) {
- if (atomic_add_32_nv(outstanding, 1) == 1) {
+ if (atomic_inc_32_nv(outstanding) == 1) {
xt_one_unchecked(CPU->cpu_id, setsoftint_tl1,
csi->csi_inum, 0);
}
diff --git a/usr/src/uts/sun4u/os/memscrub.c b/usr/src/uts/sun4u/os/memscrub.c
index 1da293c405..2fda07db9e 100644
--- a/usr/src/uts/sun4u/os/memscrub.c
+++ b/usr/src/uts/sun4u/os/memscrub.c
@@ -1414,7 +1414,7 @@ memscrub_mem_config_post_add(
* atomic_add_32() allows concurrent memory DR operations to use the
* callbacks safely.
*/
- atomic_add_32(&pause_memscrub, 1);
+ atomic_inc_32(&pause_memscrub);
ASSERT(pause_memscrub != 0);
/*
@@ -1423,7 +1423,7 @@ memscrub_mem_config_post_add(
(void) new_memscrub(0); /* retain page retire list */
/* Restore the pause setting. */
- atomic_add_32(&pause_memscrub, -1);
+ atomic_dec_32(&pause_memscrub);
}
/*ARGSUSED*/
@@ -1450,7 +1450,7 @@ memscrub_mem_config_post_del(
* atomic_add_32() allows concurrent memory DR operations to use the
* callbacks safely.
*/
- atomic_add_32(&pause_memscrub, 1);
+ atomic_inc_32(&pause_memscrub);
ASSERT(pause_memscrub != 0);
/*
@@ -1461,7 +1461,7 @@ memscrub_mem_config_post_del(
}
/* Restore the pause setting. */
- atomic_add_32(&pause_memscrub, -1);
+ atomic_dec_32(&pause_memscrub);
}
static kphysm_setup_vector_t memscrub_mem_config_vec = {
diff --git a/usr/src/uts/sun4u/sunfire/io/ac_test.c b/usr/src/uts/sun4u/sunfire/io/ac_test.c
index 729282db9c..5b16bc5558 100644
--- a/usr/src/uts/sun4u/sunfire/io/ac_test.c
+++ b/usr/src/uts/sun4u/sunfire/io/ac_test.c
@@ -24,8 +24,6 @@
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include <sys/types.h>
#include <sys/conf.h>
#include <sys/ddi.h>
@@ -192,7 +190,7 @@ ac_mem_test_start(ac_cfga_pkt_t *pkt, int flag)
test->board = softsp->board;
test->bank = pkt->bank;
test->bufp = kmem_alloc(TEST_PAGESIZE, KM_SLEEP);
- test->info.handle = atomic_add_32_nv(&mem_test_sequence_id, 1);
+ test->info.handle = atomic_inc_32_nv(&mem_test_sequence_id);
(void) drv_getparm(PPID, (ulong_t *)(&(test->info.tester_pid)));
test->info.prev_condition = mem_info->condition;
test->info.page_size = TEST_PAGESIZE;
@@ -404,7 +402,7 @@ ac_mem_test_read(ac_cfga_pkt_t *pkt, int flag)
}
/* bump the busy bit */
- atomic_add_32(&test->in_test, 1);
+ atomic_inc_32(&test->in_test);
mutex_exit(&test_mutex);
/* verify the remaining parameters */
@@ -498,7 +496,7 @@ ac_mem_test_read(ac_cfga_pkt_t *pkt, int flag)
}
read_done:
- atomic_add_32(&test->in_test, -1);
+ atomic_dec_32(&test->in_test);
return (retval);
}
@@ -550,7 +548,7 @@ ac_mem_test_write(ac_cfga_pkt_t *pkt, int flag)
}
/* bump the busy bit */
- atomic_add_32(&test->in_test, 1);
+ atomic_inc_32(&test->in_test);
mutex_exit(&test_mutex);
/* verify the remaining parameters */
@@ -602,6 +600,6 @@ ac_mem_test_write(ac_cfga_pkt_t *pkt, int flag)
kpreempt_enable();
write_done:
- atomic_add_32(&test->in_test, -1);
+ atomic_dec_32(&test->in_test);
return (retval);
}
diff --git a/usr/src/uts/sun4u/sys/pci/pci_axq.h b/usr/src/uts/sun4u/sys/pci/pci_axq.h
index b10d06d208..7cb0e14ba4 100644
--- a/usr/src/uts/sun4u/sys/pci/pci_axq.h
+++ b/usr/src/uts/sun4u/sys/pci/pci_axq.h
@@ -27,8 +27,6 @@
#ifndef _SYS_PCI_AXQ_H
#define _SYS_PCI_AXQ_H
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include <sys/types.h>
#include <sys/atomic.h>
@@ -42,18 +40,18 @@ extern "C" {
do {\
n = p->pbm_pio_counter;\
} while (n <= 0);\
- if (atomic_add_32_nv(\
- (uint_t *)&p->pbm_pio_counter, -1)\
+ if (atomic_dec_32_nv(\
+ (uint_t *)&p->pbm_pio_counter)\
== (n - 1))\
break;\
- atomic_add_32(\
- (uint_t *)&p->pbm_pio_counter, 1);\
+ atomic_inc_32(\
+ (uint_t *)&p->pbm_pio_counter);\
}\
}
-#define PIO_LIMIT_EXIT(p) atomic_add_32((uint_t *)&p->pbm_pio_counter, 1);
+#define PIO_LIMIT_EXIT(p) atomic_inc_32((uint_t *)&p->pbm_pio_counter);
extern void pci_axq_setup(ddi_map_req_t *mp, pbm_t *pbm_p);
extern void pci_axq_pio_limit(pbm_t *pbm_p);