summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJohn Levon <john.levon@joyent.com>2019-05-07 13:54:58 +0000
committerJohn Levon <john.levon@joyent.com>2019-05-14 18:05:29 +0000
commitf88de5a983b6f9306d7b481aeb57227f91d9794e (patch)
treec266f2f1f6e74a29d4fd41f18fb2558a2dbff0c7
parent04460653a9b6a473bbd02b6cfba261ccbff254a1 (diff)
downloadillumos-joyent-f88de5a983b6f9306d7b481aeb57227f91d9794e.tar.gz
m
-rw-r--r--usr/src/uts/common/fs/zfs/zvol.c18
-rw-r--r--usr/src/uts/common/sys/cpuvar.h4
-rw-r--r--usr/src/uts/i86pc/os/smt.c96
-rw-r--r--usr/src/uts/i86pc/sys/machcpuvar.h2
-rw-r--r--usr/src/uts/intel/ia32/ml/swtch.s4
5 files changed, 62 insertions, 62 deletions
diff --git a/usr/src/uts/common/fs/zfs/zvol.c b/usr/src/uts/common/fs/zfs/zvol.c
index 4a2bb82e0d..4dfb5b24d8 100644
--- a/usr/src/uts/common/fs/zfs/zvol.c
+++ b/usr/src/uts/common/fs/zfs/zvol.c
@@ -1471,7 +1471,7 @@ zvol_read(dev_t dev, uio_t *uio, cred_t *cr)
DTRACE_PROBE4(zvol__uio__done, dev_t, dev, uio_t *, uio, int, 0, int,
error);
- ht_end_unsafe();
+ smt_end_unsafe();
return (error);
}
@@ -1504,7 +1504,7 @@ zvol_write(dev_t dev, uio_t *uio, cred_t *cr)
return (error);
}
- ht_begin_unsafe();
+ smt_begin_unsafe();
DTRACE_PROBE3(zvol__uio__start, dev_t, dev, uio_t *, uio, int, 1);
@@ -1555,7 +1555,7 @@ zvol_write(dev_t dev, uio_t *uio, cred_t *cr)
DTRACE_PROBE4(zvol__uio__done, dev_t, dev, uio_t *, uio, int, 1, int,
error);
- ht_end_unsafe();
+ smt_end_unsafe();
mutex_enter(&zonep->zone_vfs_lock);
zonep->zone_vfs_rwstats.writes++;
@@ -1827,7 +1827,7 @@ zvol_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp)
dkc = (struct dk_callback *)arg;
mutex_exit(&zfsdev_state_lock);
- ht_begin_unsafe();
+ smt_begin_unsafe();
zil_commit(zv->zv_zilog, ZVOL_OBJ);
if ((flag & FKIOCTL) && dkc != NULL && dkc->dkc_callback) {
@@ -1835,7 +1835,7 @@ zvol_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp)
error = 0;
}
- ht_end_unsafe();
+ smt_end_unsafe();
return (error);
@@ -1861,9 +1861,9 @@ zvol_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp)
} else {
zv->zv_flags &= ~ZVOL_WCE;
mutex_exit(&zfsdev_state_lock);
- ht_begin_unsafe();
+ smt_begin_unsafe();
zil_commit(zv->zv_zilog, ZVOL_OBJ);
- ht_end_unsafe();
+ smt_end_unsafe();
}
return (0);
}
@@ -1916,7 +1916,7 @@ zvol_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp)
mutex_exit(&zfsdev_state_lock);
- ht_begin_unsafe();
+ smt_begin_unsafe();
for (int i = 0; i < dfl->dfl_num_exts; i++) {
uint64_t start = dfl->dfl_exts[i].dfle_start,
@@ -1973,7 +1973,7 @@ zvol_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp)
if (!(flag & FKIOCTL))
dfl_free(dfl);
- ht_end_unsafe();
+ smt_end_unsafe();
return (error);
}
diff --git a/usr/src/uts/common/sys/cpuvar.h b/usr/src/uts/common/sys/cpuvar.h
index 3d3182ee71..21bdfbd160 100644
--- a/usr/src/uts/common/sys/cpuvar.h
+++ b/usr/src/uts/common/sys/cpuvar.h
@@ -330,8 +330,8 @@ extern cpu_core_t cpu_core[];
* suspended (in the suspend path), or have yet to be resumed (in the resume
* case).
*
- * CPU_DISABLED is used for disabling hyper-threading. It is similar to
- * CPU_OFFLINE, but cannot be onlined without being forced.
+ * CPU_DISABLED is used for disabling SMT. It is similar to CPU_OFFLINE, but
+ * cannot be onlined without being forced.
*
* On some platforms CPUs can be individually powered off.
* The following flags are set for powered off CPUs: CPU_QUIESCED,
diff --git a/usr/src/uts/i86pc/os/smt.c b/usr/src/uts/i86pc/os/smt.c
index 8bc6dfd68d..2bf02c0ee7 100644
--- a/usr/src/uts/i86pc/os/smt.c
+++ b/usr/src/uts/i86pc/os/smt.c
@@ -257,31 +257,31 @@ int
smt_acquire(void)
{
clock_t wait = smt_acquire_wait_time;
- cpu_smt_t *ht = &CPU->cpu_m.mcpu_smt;
+ cpu_smt_t *smt = &CPU->cpu_m.mcpu_smt;
zoneid_t zoneid = getzoneid();
cpu_smt_t *sibsmt;
int ret = 0;
ASSERT(!interrupts_enabled());
- if (ht->cs_sib == NULL) {
+ if (smt->cs_sib == NULL) {
/* For the "sequential" L1TF case. */
spec_uarch_flush();
return (1);
}
- sibsmt = &ht->cs_sib->cpu_m.mcpu_smt;
+ sibsmt = &smt->cs_sib->cpu_m.mcpu_smt;
/* A VCPU thread should never change zone. */
- ASSERT3U(CS_ZONE(ht->cs_state), ==, zoneid);
- ASSERT3U(CS_MARK(ht->cs_state), ==, CM_VCPU);
+ ASSERT3U(CS_ZONE(smt->cs_state), ==, zoneid);
+ ASSERT3U(CS_MARK(smt->cs_state), ==, CM_VCPU);
ASSERT3U(zoneid, !=, GLOBAL_ZONEID);
ASSERT3U(curthread->t_preempt, >=, 1);
ASSERT(curthread->t_schedflag & TS_VCPU);
while (ret == 0 && wait > 0) {
- if (yield_to_vcpu(ht->cs_sib, zoneid)) {
+ if (yield_to_vcpu(smt->cs_sib, zoneid)) {
ret = -1;
break;
}
@@ -290,7 +290,7 @@ smt_acquire(void)
lock_set(&sibsmt->cs_lock);
if (sibling_compatible(sibsmt, zoneid)) {
- ht->cs_state = CS_MK(CM_POISONED, zoneid);
+ smt->cs_state = CS_MK(CM_POISONED, zoneid);
sibsmt->cs_sibstate = CS_MK(CM_POISONED, zoneid);
membar_enter();
ret = 1;
@@ -315,25 +315,25 @@ smt_acquire(void)
void
smt_release(void)
{
- cpu_smt_t *ht = &CPU->cpu_m.mcpu_smt;
+ cpu_smt_t *smt = &CPU->cpu_m.mcpu_smt;
zoneid_t zoneid = getzoneid();
cpu_smt_t *sibsmt;
ASSERT(!interrupts_enabled());
- if (ht->cs_sib == NULL)
+ if (smt->cs_sib == NULL)
return;
ASSERT3U(zoneid, !=, GLOBAL_ZONEID);
- ASSERT3U(CS_ZONE(ht->cs_state), ==, zoneid);
- ASSERT3U(CS_MARK(ht->cs_state), ==, CM_POISONED);
+ ASSERT3U(CS_ZONE(smt->cs_state), ==, zoneid);
+ ASSERT3U(CS_MARK(smt->cs_state), ==, CM_POISONED);
ASSERT3U(curthread->t_preempt, >=, 1);
- sibsmt = &ht->cs_sib->cpu_m.mcpu_smt;
+ sibsmt = &smt->cs_sib->cpu_m.mcpu_smt;
lock_set(&sibsmt->cs_lock);
- ht->cs_state = CS_MK(CM_VCPU, zoneid);
+ smt->cs_state = CS_MK(CM_VCPU, zoneid);
sibsmt->cs_sibstate = CS_MK(CM_VCPU, zoneid);
membar_producer();
@@ -341,22 +341,22 @@ smt_release(void)
}
static void
-smt_kick(cpu_smt_t *ht, zoneid_t zoneid)
+smt_kick(cpu_smt_t *smt, zoneid_t zoneid)
{
uint64_t sibstate;
- ASSERT(LOCK_HELD(&ht->cs_lock));
+ ASSERT(LOCK_HELD(&smt->cs_lock));
ASSERT(!interrupts_enabled());
- poke_cpu(ht->cs_sib->cpu_id);
+ poke_cpu(smt->cs_sib->cpu_id);
membar_consumer();
- sibstate = ht->cs_sibstate;
+ sibstate = smt->cs_sibstate;
if (CS_MARK(sibstate) != CM_POISONED || CS_ZONE(sibstate) == zoneid)
return;
- lock_clear(&ht->cs_lock);
+ lock_clear(&smt->cs_lock);
/*
* Spin until we can see the sibling has been kicked out or is otherwise
@@ -364,7 +364,7 @@ smt_kick(cpu_smt_t *ht, zoneid_t zoneid)
*/
for (;;) {
membar_consumer();
- sibstate = ht->cs_sibstate;
+ sibstate = smt->cs_sibstate;
if (CS_MARK(sibstate) != CM_POISONED ||
CS_ZONE(sibstate) == zoneid)
@@ -373,7 +373,7 @@ smt_kick(cpu_smt_t *ht, zoneid_t zoneid)
SMT_PAUSE();
}
- lock_set(&ht->cs_lock);
+ lock_set(&smt->cs_lock);
}
static boolean_t
@@ -386,27 +386,27 @@ void
smt_begin_intr(uint_t pil)
{
ulong_t flags;
- cpu_smt_t *ht;
+ cpu_smt_t *smt;
ASSERT(pil <= PIL_MAX);
flags = intr_clear();
- ht = &CPU->cpu_m.mcpu_smt;
+ smt = &CPU->cpu_m.mcpu_smt;
- if (ht->cs_sib == NULL) {
+ if (smt->cs_sib == NULL) {
intr_restore(flags);
return;
}
- if (atomic_inc_64_nv(&ht->cs_intr_depth) == 1 && pil_needs_kick(pil)) {
- lock_set(&ht->cs_lock);
+ if (atomic_inc_64_nv(&smt->cs_intr_depth) == 1 && pil_needs_kick(pil)) {
+ lock_set(&smt->cs_lock);
membar_consumer();
- if (CS_MARK(ht->cs_sibstate) == CM_POISONED)
- smt_kick(ht, GLOBAL_ZONEID);
+ if (CS_MARK(smt->cs_sibstate) == CM_POISONED)
+ smt_kick(smt, GLOBAL_ZONEID);
- lock_clear(&ht->cs_lock);
+ lock_clear(&smt->cs_lock);
}
intr_restore(flags);
@@ -416,34 +416,34 @@ void
smt_end_intr(void)
{
ulong_t flags;
- cpu_smt_t *ht;
+ cpu_smt_t *smt;
flags = intr_clear();
- ht = &CPU->cpu_m.mcpu_smt;
+ smt = &CPU->cpu_m.mcpu_smt;
- if (ht->cs_sib == NULL) {
+ if (smt->cs_sib == NULL) {
intr_restore(flags);
return;
}
- ASSERT3U(ht->cs_intr_depth, >, 0);
- atomic_dec_64(&ht->cs_intr_depth);
+ ASSERT3U(smt->cs_intr_depth, >, 0);
+ atomic_dec_64(&smt->cs_intr_depth);
intr_restore(flags);
}
static inline boolean_t
-smt_need_kick(cpu_smt_t *ht, zoneid_t zoneid)
+smt_need_kick(cpu_smt_t *smt, zoneid_t zoneid)
{
membar_consumer();
- if (CS_MARK(ht->cs_sibstate) != CM_POISONED)
+ if (CS_MARK(smt->cs_sibstate) != CM_POISONED)
return (B_FALSE);
- if (CS_MARK(ht->cs_state) == CM_UNSAFE)
+ if (CS_MARK(smt->cs_state) == CM_UNSAFE)
return (B_TRUE);
- return (CS_ZONE(ht->cs_sibstate) != zoneid);
+ return (CS_ZONE(smt->cs_sibstate) != zoneid);
}
void
@@ -452,37 +452,37 @@ smt_mark(void)
zoneid_t zoneid = getzoneid();
kthread_t *t = curthread;
ulong_t flags;
- cpu_smt_t *ht;
+ cpu_smt_t *smt;
cpu_t *cp;
flags = intr_clear();
cp = CPU;
- ht = &cp->cpu_m.mcpu_smt;
+ smt = &cp->cpu_m.mcpu_smt;
- if (ht->cs_sib == NULL) {
+ if (smt->cs_sib == NULL) {
intr_restore(flags);
return;
}
- lock_set(&ht->cs_lock);
+ lock_set(&smt->cs_lock);
/*
* If we were a nested interrupt and went through the resume_from_intr()
* path, we can now be resuming to a pinning interrupt thread; in which
* case, skip marking, until we later resume to a "real" thread.
*/
- if (ht->cs_intr_depth > 0) {
+ if (smt->cs_intr_depth > 0) {
ASSERT3P(t->t_intr, !=, NULL);
- if (smt_need_kick(ht, zoneid))
- smt_kick(ht, zoneid);
+ if (smt_need_kick(smt, zoneid))
+ smt_kick(smt, zoneid);
goto out;
}
if (t == t->t_cpu->cpu_idle_thread) {
ASSERT3U(zoneid, ==, GLOBAL_ZONEID);
- ht->cs_state = CS_MK(CM_IDLE, zoneid);
+ smt->cs_state = CS_MK(CM_IDLE, zoneid);
} else {
uint64_t state = CM_THREAD;
@@ -491,10 +491,10 @@ smt_mark(void)
else if (t->t_schedflag & TS_VCPU)
state = CM_VCPU;
- ht->cs_state = CS_MK(state, zoneid);
+ smt->cs_state = CS_MK(state, zoneid);
- if (smt_need_kick(ht, zoneid))
- smt_kick(ht, zoneid);
+ if (smt_need_kick(smt, zoneid))
+ smt_kick(smt, zoneid);
}
out:
diff --git a/usr/src/uts/i86pc/sys/machcpuvar.h b/usr/src/uts/i86pc/sys/machcpuvar.h
index 398912c3a7..d6748e78fc 100644
--- a/usr/src/uts/i86pc/sys/machcpuvar.h
+++ b/usr/src/uts/i86pc/sys/machcpuvar.h
@@ -142,7 +142,7 @@ struct kpti_frame {
typedef struct cpu_smt {
lock_t cs_lock;
- csar ch_pad[56];
+ char ch_pad[56];
struct cpu *cs_sib;
volatile uint64_t cs_intr_depth;
volatile uint64_t cs_state;
diff --git a/usr/src/uts/intel/ia32/ml/swtch.s b/usr/src/uts/intel/ia32/ml/swtch.s
index c2c9fd9bd2..bc4ebd7202 100644
--- a/usr/src/uts/intel/ia32/ml/swtch.s
+++ b/usr/src/uts/intel/ia32/ml/swtch.s
@@ -343,7 +343,7 @@
call smap_disable
.nosmap:
- call ht_mark
+ call smt_mark
/*
* Restore non-volatile registers, then have spl0 return to the
@@ -465,7 +465,7 @@ resume_from_zombie_return:
STORE_INTR_START(%r12)
- call ht_mark
+ call smt_mark
/*
* Restore non-volatile registers, then have spl0 return to the