summaryrefslogtreecommitdiff
path: root/usr/src/uts/i86pc
diff options
context:
space:
mode:
Diffstat (limited to 'usr/src/uts/i86pc')
-rw-r--r--usr/src/uts/i86pc/os/intr.c11
-rw-r--r--usr/src/uts/i86pc/os/memnode.c11
-rw-r--r--usr/src/uts/i86pc/os/x_call.c20
-rw-r--r--usr/src/uts/i86pc/vm/hat_i86.c8
-rw-r--r--usr/src/uts/i86pc/vm/hat_pte.h11
5 files changed, 32 insertions, 29 deletions
diff --git a/usr/src/uts/i86pc/os/intr.c b/usr/src/uts/i86pc/os/intr.c
index 6006ca1202..2569812c47 100644
--- a/usr/src/uts/i86pc/os/intr.c
+++ b/usr/src/uts/i86pc/os/intr.c
@@ -1218,15 +1218,16 @@ cpu_intr_swtch_enter(kthread_id_t t)
* PIL and zeroed its timestamp. Since there was no pinned thread to
* return to, swtch() gets called and we end up here.
*
- * Note that we use atomic ops below (cas64 and atomic_add_64), which
- * we don't use in the functions above, because we're not called
- * with interrupts blocked, but the epilog/prolog functions are.
+ * Note that we use atomic ops below (atomic_cas_64 and
+ * atomic_add_64), which we don't use in the functions above,
+ * because we're not called with interrupts blocked, but the
+ * epilog/prolog functions are.
*/
if (t->t_intr_start) {
do {
start = t->t_intr_start;
interval = tsc_read() - start;
- } while (cas64(&t->t_intr_start, start, 0) != start);
+ } while (atomic_cas_64(&t->t_intr_start, start, 0) != start);
cpu = CPU;
cpu->cpu_m.intrstat[t->t_pil][0] += interval;
@@ -1250,7 +1251,7 @@ cpu_intr_swtch_exit(kthread_id_t t)
do {
ts = t->t_intr_start;
- } while (cas64(&t->t_intr_start, ts, tsc_read()) != ts);
+ } while (atomic_cas_64(&t->t_intr_start, ts, tsc_read()) != ts);
}
/*
diff --git a/usr/src/uts/i86pc/os/memnode.c b/usr/src/uts/i86pc/os/memnode.c
index 35185bcd84..2c1c6e91d5 100644
--- a/usr/src/uts/i86pc/os/memnode.c
+++ b/usr/src/uts/i86pc/os/memnode.c
@@ -86,7 +86,7 @@ mem_node_add_slice(pfn_t start, pfn_t end)
mnode = PFN_2_MEM_NODE(start);
ASSERT(mnode >= 0 && mnode < max_mem_nodes);
- if (cas32((uint32_t *)&mem_node_config[mnode].exists, 0, 1)) {
+ if (atomic_cas_32((uint32_t *)&mem_node_config[mnode].exists, 0, 1)) {
/*
* Add slice to existing node.
*/
@@ -101,7 +101,8 @@ mem_node_add_slice(pfn_t start, pfn_t end)
do {
oldmask = memnodes_mask;
newmask = memnodes_mask | (1ull << mnode);
- } while (cas64(&memnodes_mask, oldmask, newmask) != oldmask);
+ } while (atomic_cas_64(&memnodes_mask, oldmask, newmask) !=
+ oldmask);
}
/*
@@ -161,7 +162,7 @@ mem_node_del_slice(pfn_t start, pfn_t end)
do {
omask = memnodes_mask;
nmask = omask & ~(1ull << mnode);
- } while (cas64(&memnodes_mask, omask, nmask) != omask);
+ } while (atomic_cas_64(&memnodes_mask, omask, nmask) != omask);
atomic_add_16(&num_memnodes, -1);
mem_node_config[mnode].exists = 0;
}
@@ -229,7 +230,7 @@ mem_node_alloc()
* a first time memnode creation race.
*/
for (mnode = 0; mnode < max_mem_nodes; mnode++)
- if (cas32((uint32_t *)&mem_node_config[mnode].exists,
+ if (atomic_cas_32((uint32_t *)&mem_node_config[mnode].exists,
0, 1) == 0)
break;
@@ -242,7 +243,7 @@ mem_node_alloc()
do {
oldmask = memnodes_mask;
newmask = memnodes_mask | (1ull << mnode);
- } while (cas64(&memnodes_mask, oldmask, newmask) != oldmask);
+ } while (atomic_cas_64(&memnodes_mask, oldmask, newmask) != oldmask);
return (mnode);
}
diff --git a/usr/src/uts/i86pc/os/x_call.c b/usr/src/uts/i86pc/os/x_call.c
index 17d5123508..8c2821fc73 100644
--- a/usr/src/uts/i86pc/os/x_call.c
+++ b/usr/src/uts/i86pc/os/x_call.c
@@ -51,9 +51,9 @@
*
* This implementation uses a message passing architecture to allow multiple
* concurrent cross calls to be in flight at any given time. We use the cmpxchg
- * instruction, aka casptr(), to implement simple efficient work queues for
- * message passing between CPUs with almost no need for regular locking.
- * See xc_extract() and xc_insert() below.
+ * instruction, aka atomic_cas_ptr(), to implement simple efficient work
+ * queues for message passing between CPUs with almost no need for regular
+ * locking. See xc_extract() and xc_insert() below.
*
* The general idea is that initiating a cross call means putting a message
* on a target(s) CPU's work queue. Any synchronization is handled by passing
@@ -64,8 +64,9 @@
* with every message that finishes all processing.
*
* The code needs no mfence or other membar_*() calls. The uses of
- * casptr(), cas32() and atomic_dec_32() for the message passing are
- * implemented with LOCK prefix instructions which are equivalent to mfence.
+ * atomic_cas_ptr(), atomic_cas_32() and atomic_dec_32() for the message
+ * passing are implemented with LOCK prefix instructions which are
+ * equivalent to mfence.
*
* One interesting aspect of this implmentation is that it allows 2 or more
* CPUs to initiate cross calls to intersecting sets of CPUs at the same time.
@@ -144,7 +145,7 @@ xc_increment(struct machcpu *mcpu)
int old;
do {
old = mcpu->xc_work_cnt;
- } while (cas32((uint32_t *)&mcpu->xc_work_cnt, old, old + 1) != old);
+ } while (atomic_cas_32(&mcpu->xc_work_cnt, old, old + 1) != old);
return (old);
}
@@ -168,7 +169,7 @@ xc_insert(void *queue, xc_msg_t *msg)
do {
old_head = (xc_msg_t *)*(volatile xc_msg_t **)queue;
msg->xc_next = old_head;
- } while (casptr(queue, old_head, msg) != old_head);
+ } while (atomic_cas_ptr(queue, old_head, msg) != old_head);
}
/*
@@ -185,7 +186,8 @@ xc_extract(xc_msg_t **queue)
old_head = (xc_msg_t *)*(volatile xc_msg_t **)queue;
if (old_head == NULL)
return (old_head);
- } while (casptr(queue, old_head, old_head->xc_next) != old_head);
+ } while (atomic_cas_ptr(queue, old_head, old_head->xc_next) !=
+ old_head);
old_head->xc_next = NULL;
return (old_head);
}
@@ -608,7 +610,7 @@ xc_priority_common(
XC_BT_SET(xc_priority_set, c);
send_dirint(c, XC_HI_PIL);
for (i = 0; i < 10; ++i) {
- (void) casptr(&cpup->cpu_m.xc_msgbox,
+ (void) atomic_cas_ptr(&cpup->cpu_m.xc_msgbox,
cpup->cpu_m.xc_msgbox, cpup->cpu_m.xc_msgbox);
}
}
diff --git a/usr/src/uts/i86pc/vm/hat_i86.c b/usr/src/uts/i86pc/vm/hat_i86.c
index 8da02a4c36..4f41c68d9f 100644
--- a/usr/src/uts/i86pc/vm/hat_i86.c
+++ b/usr/src/uts/i86pc/vm/hat_i86.c
@@ -944,7 +944,7 @@ hat_init_finish(void)
/*
* On 32 bit PAE mode, PTE's are 64 bits, but ordinary atomic memory references
- * are 32 bit, so for safety we must use cas64() to install these.
+ * are 32 bit, so for safety we must use atomic_cas_64() to install these.
*/
#ifdef __i386
static void
@@ -967,7 +967,7 @@ reload_pae32(hat_t *hat, cpu_t *cpu)
pte = dest[i];
if (pte == src[i])
break;
- if (cas64(dest + i, pte, src[i]) != src[i])
+ if (atomic_cas_64(dest + i, pte, src[i]) != src[i])
break;
}
}
@@ -1988,7 +1988,7 @@ flush_all_tlb_entries(void)
#define TLB_CPU_HALTED (01ul)
#define TLB_INVAL_ALL (02ul)
#define CAS_TLB_INFO(cpu, old, new) \
- caslong((ulong_t *)&(cpu)->cpu_m.mcpu_tlb_info, (old), (new))
+ atomic_cas_ulong((ulong_t *)&(cpu)->cpu_m.mcpu_tlb_info, (old), (new))
/*
* Record that a CPU is going idle
@@ -1996,7 +1996,7 @@ flush_all_tlb_entries(void)
void
tlb_going_idle(void)
{
- atomic_or_long((ulong_t *)&CPU->cpu_m.mcpu_tlb_info, TLB_CPU_HALTED);
+ atomic_or_ulong((ulong_t *)&CPU->cpu_m.mcpu_tlb_info, TLB_CPU_HALTED);
}
/*
diff --git a/usr/src/uts/i86pc/vm/hat_pte.h b/usr/src/uts/i86pc/vm/hat_pte.h
index 8e5686f4ff..756df7020a 100644
--- a/usr/src/uts/i86pc/vm/hat_pte.h
+++ b/usr/src/uts/i86pc/vm/hat_pte.h
@@ -26,8 +26,6 @@
#ifndef _VM_HAT_PTE_H
#define _VM_HAT_PTE_H
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#ifdef __cplusplus
extern "C" {
#endif
@@ -235,7 +233,8 @@ struct hat_mmu_info {
* The concept of a VA hole exists in AMD64. This might need to be made
* model specific eventually.
*
- * In the 64 bit kernel PTE loads are atomic, but need cas64 on 32 bit kernel.
+ * In the 64 bit kernel PTE loads are atomic, but need atomic_cas_64 on 32
+ * bit kernel.
*/
#if defined(__amd64)
@@ -248,7 +247,7 @@ struct hat_mmu_info {
#define FMT_PTE "0x%lx"
#define GET_PTE(ptr) (*(x86pte_t *)(ptr))
#define SET_PTE(ptr, pte) (*(x86pte_t *)(ptr) = pte)
-#define CAS_PTE(ptr, x, y) cas64(ptr, x, y)
+#define CAS_PTE(ptr, x, y) atomic_cas_64(ptr, x, y)
#elif defined(__i386)
@@ -263,8 +262,8 @@ extern x86pte_t get_pte64(x86pte_t *ptr);
((mmu.pae_hat ? ((x86pte32_t *)(ptr))[1] = (pte >> 32) : 0), \
*(x86pte32_t *)(ptr) = pte)
#define CAS_PTE(ptr, x, y) \
- (mmu.pae_hat ? cas64(ptr, x, y) : \
- cas32((uint32_t *)(ptr), (uint32_t)(x), (uint32_t)(y)))
+ (mmu.pae_hat ? atomic_cas_64(ptr, x, y) : \
+ atomic_cas_32((uint32_t *)(ptr), (uint32_t)(x), (uint32_t)(y)))
#endif /* __i386 */