summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPatrick Mooney <pmooney@pfmooney.com>2018-02-26 20:42:20 +0000
committerPatrick Mooney <pmooney@pfmooney.com>2018-03-08 17:36:28 +0000
commitecbf093fc1011c08dd6f9c033e92ac8120773be3 (patch)
tree02ec72109a55eb2da00ad7b1a29e5beb986e21ed
parent5367c10ccf59c25a48d868138bd4e511e70b0dcf (diff)
downloadillumos-joyent-ecbf093fc1011c08dd6f9c033e92ac8120773be3.tar.gz
OS-6698 thread_affinity_set(CPU_CURRENT) can skip cpu_lock
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com> Reviewed by: John Levon <john.levon@joyent.com> Approved by: John Levon <john.levon@joyent.com>
-rw-r--r--usr/src/uts/common/os/cpu.c65
1 files changed, 40 insertions, 25 deletions
diff --git a/usr/src/uts/common/os/cpu.c b/usr/src/uts/common/os/cpu.c
index 9c7600ea24..2efb68889c 100644
--- a/usr/src/uts/common/os/cpu.c
+++ b/usr/src/uts/common/os/cpu.c
@@ -21,7 +21,7 @@
/*
* Copyright (c) 1991, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
- * Copyright 2017 Joyent, Inc.
+ * Copyright 2018 Joyent, Inc.
*/
/*
@@ -388,36 +388,49 @@ force_thread_migrate(kthread_id_t tp)
/*
* Set affinity for a specified CPU.
- * A reference count is incremented and the affinity is held until the
- * reference count is decremented to zero by thread_affinity_clear().
- * This is so regions of code requiring affinity can be nested.
- * Caller needs to ensure that cpu_id remains valid, which can be
- * done by holding cpu_lock across this call, unless the caller
- * specifies CPU_CURRENT in which case the cpu_lock will be acquired
- * by thread_affinity_set and CPU->cpu_id will be the target CPU.
+ *
+ * Specifying a cpu_id of CPU_CURRENT, allowed _only_ when setting affinity for
+ * curthread, will set affinity to the CPU on which the thread is currently
+ * running. For other cpu_id values, the caller must ensure that the
+ * referenced CPU remains valid, which can be done by holding cpu_lock across
+ * this call.
+ *
+ * CPU affinity is guaranteed after return of thread_affinity_set(). If a
+ * caller setting affinity to CPU_CURRENT requires that its thread not migrate
+ * CPUs prior to a successful return, it should take extra precautions (such as
+ * their own call to kpreempt_disable) to ensure that safety.
+ *
+ * A CPU affinity reference count is maintained by thread_affinity_set and
+ * thread_affinity_clear (incrementing and decrementing it, respectively),
+ * maintaining CPU affinity while the count is non-zero, and allowing regions
+ * of code which require affinity to be nested.
*/
void
thread_affinity_set(kthread_id_t t, int cpu_id)
{
- cpu_t *cp;
- int c;
+ cpu_t *cp;
ASSERT(!(t == curthread && t->t_weakbound_cpu != NULL));
- if ((c = cpu_id) == CPU_CURRENT) {
- mutex_enter(&cpu_lock);
- cpu_id = CPU->cpu_id;
+ if (cpu_id == CPU_CURRENT) {
+ VERIFY3P(t, ==, curthread);
+ kpreempt_disable();
+ cp = CPU;
+ } else {
+ /*
+ * We should be asserting that cpu_lock is held here, but
+ * the NCA code doesn't acquire it. The following assert
+ * should be uncommented when the NCA code is fixed.
+ *
+ * ASSERT(MUTEX_HELD(&cpu_lock));
+ */
+ VERIFY((cpu_id >= 0) && (cpu_id < NCPU));
+ cp = cpu[cpu_id];
+
+ /* user must provide a good cpu_id */
+ VERIFY(cp != NULL);
}
- /*
- * We should be asserting that cpu_lock is held here, but
- * the NCA code doesn't acquire it. The following assert
- * should be uncommented when the NCA code is fixed.
- *
- * ASSERT(MUTEX_HELD(&cpu_lock));
- */
- ASSERT((cpu_id >= 0) && (cpu_id < NCPU));
- cp = cpu[cpu_id];
- ASSERT(cp != NULL); /* user must provide a good cpu_id */
+
/*
* If there is already a hard affinity requested, and this affinity
* conflicts with that, panic.
@@ -434,13 +447,15 @@ thread_affinity_set(kthread_id_t t, int cpu_id)
* Make sure we're running on the right CPU.
*/
if (cp != t->t_cpu || t != curthread) {
+ ASSERT(cpu_id != CPU_CURRENT);
force_thread_migrate(t); /* drops thread lock */
} else {
thread_unlock(t);
}
- if (c == CPU_CURRENT)
- mutex_exit(&cpu_lock);
+ if (cpu_id == CPU_CURRENT) {
+ kpreempt_enable();
+ }
}
/*