summaryrefslogtreecommitdiff
path: root/usr/src/uts/common/os/cpu.c
diff options
context:
space:
mode:
Diffstat (limited to 'usr/src/uts/common/os/cpu.c')
-rw-r--r--usr/src/uts/common/os/cpu.c204
1 files changed, 161 insertions, 43 deletions
diff --git a/usr/src/uts/common/os/cpu.c b/usr/src/uts/common/os/cpu.c
index 87c0896814..4648dae9dd 100644
--- a/usr/src/uts/common/os/cpu.c
+++ b/usr/src/uts/common/os/cpu.c
@@ -21,6 +21,7 @@
/*
* Copyright (c) 1991, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
+ * Copyright 2018 Joyent, Inc.
*/
/*
@@ -108,7 +109,8 @@ kmutex_t cpu_lock;
cpu_t *cpu_list; /* list of all CPUs */
cpu_t *clock_cpu_list; /* used by clock to walk CPUs */
cpu_t *cpu_active; /* list of active CPUs */
-static cpuset_t cpu_available; /* set of available CPUs */
+cpuset_t cpu_active_set; /* cached set of active CPUs */
+cpuset_t cpu_available; /* set of available CPUs */
cpuset_t cpu_seqid_inuse; /* which cpu_seqids are in use */
cpu_t **cpu_seq; /* ptrs to CPUs, indexed by seq_id */
@@ -386,36 +388,56 @@ force_thread_migrate(kthread_id_t tp)
/*
* Set affinity for a specified CPU.
- * A reference count is incremented and the affinity is held until the
- * reference count is decremented to zero by thread_affinity_clear().
- * This is so regions of code requiring affinity can be nested.
- * Caller needs to ensure that cpu_id remains valid, which can be
- * done by holding cpu_lock across this call, unless the caller
- * specifies CPU_CURRENT in which case the cpu_lock will be acquired
- * by thread_affinity_set and CPU->cpu_id will be the target CPU.
+ *
+ * Specifying a cpu_id of CPU_CURRENT, allowed _only_ when setting affinity for
+ * curthread, will set affinity to the CPU on which the thread is currently
+ * running. For other cpu_id values, the caller must ensure that the
+ * referenced CPU remains valid, which can be done by holding cpu_lock across
+ * this call.
+ *
+ * CPU affinity is guaranteed after return of thread_affinity_set(). If a
+ * caller setting affinity to CPU_CURRENT requires that its thread not migrate
+ * CPUs prior to a successful return, it should take extra precautions (such as
+ * their own call to kpreempt_disable) to ensure that safety.
+ *
+ * CPU_BEST can be used to pick a "best" CPU to migrate to, including
+ * potentially the current CPU.
+ *
+ * A CPU affinity reference count is maintained by thread_affinity_set and
+ * thread_affinity_clear (incrementing and decrementing it, respectively),
+ * maintaining CPU affinity while the count is non-zero, and allowing regions
+ * of code which require affinity to be nested.
*/
void
thread_affinity_set(kthread_id_t t, int cpu_id)
{
- cpu_t *cp;
- int c;
+ cpu_t *cp;
ASSERT(!(t == curthread && t->t_weakbound_cpu != NULL));
- if ((c = cpu_id) == CPU_CURRENT) {
- mutex_enter(&cpu_lock);
- cpu_id = CPU->cpu_id;
+ if (cpu_id == CPU_CURRENT) {
+ VERIFY3P(t, ==, curthread);
+ kpreempt_disable();
+ cp = CPU;
+ } else if (cpu_id == CPU_BEST) {
+ VERIFY3P(t, ==, curthread);
+ kpreempt_disable();
+ cp = disp_choose_best_cpu();
+ } else {
+ /*
+ * We should be asserting that cpu_lock is held here, but
+ * the NCA code doesn't acquire it. The following assert
+ * should be uncommented when the NCA code is fixed.
+ *
+ * ASSERT(MUTEX_HELD(&cpu_lock));
+ */
+ VERIFY((cpu_id >= 0) && (cpu_id < NCPU));
+ cp = cpu[cpu_id];
+
+ /* user must provide a good cpu_id */
+ VERIFY(cp != NULL);
}
- /*
- * We should be asserting that cpu_lock is held here, but
- * the NCA code doesn't acquire it. The following assert
- * should be uncommented when the NCA code is fixed.
- *
- * ASSERT(MUTEX_HELD(&cpu_lock));
- */
- ASSERT((cpu_id >= 0) && (cpu_id < NCPU));
- cp = cpu[cpu_id];
- ASSERT(cp != NULL); /* user must provide a good cpu_id */
+
/*
* If there is already a hard affinity requested, and this affinity
* conflicts with that, panic.
@@ -432,13 +454,14 @@ thread_affinity_set(kthread_id_t t, int cpu_id)
* Make sure we're running on the right CPU.
*/
if (cp != t->t_cpu || t != curthread) {
+ ASSERT(cpu_id != CPU_CURRENT);
force_thread_migrate(t); /* drops thread lock */
} else {
thread_unlock(t);
}
- if (c == CPU_CURRENT)
- mutex_exit(&cpu_lock);
+ if (cpu_id == CPU_CURRENT || cpu_id == CPU_BEST)
+ kpreempt_enable();
}
/*
@@ -1473,8 +1496,8 @@ again: for (loop_count = 0; (*bound_func)(cp, 0); loop_count++) {
* Update CPU last ran on if it was this CPU
*/
if (t->t_cpu == cp && t->t_bound_cpu != cp)
- t->t_cpu = disp_lowpri_cpu(ncp,
- t->t_lpl, t->t_pri, NULL);
+ t->t_cpu = disp_lowpri_cpu(ncp, t,
+ t->t_pri);
ASSERT(t->t_cpu != cp || t->t_bound_cpu == cp ||
t->t_weakbound_cpu == cp);
@@ -1516,10 +1539,9 @@ again: for (loop_count = 0; (*bound_func)(cp, 0); loop_count++) {
* Update CPU last ran on if it was this CPU
*/
- if (t->t_cpu == cp && t->t_bound_cpu != cp) {
- t->t_cpu = disp_lowpri_cpu(ncp,
- t->t_lpl, t->t_pri, NULL);
- }
+ if (t->t_cpu == cp && t->t_bound_cpu != cp)
+ t->t_cpu = disp_lowpri_cpu(ncp, t, t->t_pri);
+
ASSERT(t->t_cpu != cp || t->t_bound_cpu == cp ||
t->t_weakbound_cpu == cp);
t = t->t_next;
@@ -1724,6 +1746,7 @@ cpu_list_init(cpu_t *cp)
cp->cpu_part = &cp_default;
CPUSET_ADD(cpu_available, cp->cpu_id);
+ CPUSET_ADD(cpu_active_set, cp->cpu_id);
}
/*
@@ -1895,6 +1918,7 @@ cpu_add_active_internal(cpu_t *cp)
cp->cpu_prev_onln = cpu_active->cpu_prev_onln;
cpu_active->cpu_prev_onln->cpu_next_onln = cp;
cpu_active->cpu_prev_onln = cp;
+ CPUSET_ADD(cpu_active_set, cp->cpu_id);
if (pp->cp_cpulist) {
cp->cpu_next_part = pp->cp_cpulist;
@@ -1965,6 +1989,7 @@ cpu_remove_active(cpu_t *cp)
}
cp->cpu_next_onln = cp;
cp->cpu_prev_onln = cp;
+ CPUSET_DEL(cpu_active_set, cp->cpu_id);
cp->cpu_prev_part->cpu_next_part = cp->cpu_next_part;
cp->cpu_next_part->cpu_prev_part = cp->cpu_prev_part;
@@ -2704,13 +2729,18 @@ cpu_bind_thread(kthread_id_t tp, processorid_t bind, processorid_t *obind,
return (0);
}
-#if CPUSET_WORDS > 1
-/*
- * Functions for implementing cpuset operations when a cpuset is more
- * than one word. On platforms where a cpuset is a single word these
- * are implemented as macros in cpuvar.h.
- */
+cpuset_t *
+cpuset_alloc(int kmflags)
+{
+ return (kmem_alloc(sizeof (cpuset_t), kmflags));
+}
+
+void
+cpuset_free(cpuset_t *s)
+{
+ kmem_free(s, sizeof (cpuset_t));
+}
void
cpuset_all(cpuset_t *s)
@@ -2722,38 +2752,61 @@ cpuset_all(cpuset_t *s)
}
void
-cpuset_all_but(cpuset_t *s, uint_t cpu)
+cpuset_all_but(cpuset_t *s, const uint_t cpu)
{
cpuset_all(s);
CPUSET_DEL(*s, cpu);
}
void
-cpuset_only(cpuset_t *s, uint_t cpu)
+cpuset_only(cpuset_t *s, const uint_t cpu)
{
CPUSET_ZERO(*s);
CPUSET_ADD(*s, cpu);
}
+long
+cpu_in_set(cpuset_t *s, const uint_t cpu)
+{
+ VERIFY(cpu < NCPU);
+ return (BT_TEST(s->cpub, cpu));
+}
+
+void
+cpuset_add(cpuset_t *s, const uint_t cpu)
+{
+ VERIFY(cpu < NCPU);
+ BT_SET(s->cpub, cpu);
+}
+
+void
+cpuset_del(cpuset_t *s, const uint_t cpu)
+{
+ VERIFY(cpu < NCPU);
+ BT_CLEAR(s->cpub, cpu);
+}
+
int
cpuset_isnull(cpuset_t *s)
{
int i;
- for (i = 0; i < CPUSET_WORDS; i++)
+ for (i = 0; i < CPUSET_WORDS; i++) {
if (s->cpub[i] != 0)
return (0);
+ }
return (1);
}
int
-cpuset_cmp(cpuset_t *s1, cpuset_t *s2)
+cpuset_isequal(cpuset_t *s1, cpuset_t *s2)
{
int i;
- for (i = 0; i < CPUSET_WORDS; i++)
+ for (i = 0; i < CPUSET_WORDS; i++) {
if (s1->cpub[i] != s2->cpub[i])
return (0);
+ }
return (1);
}
@@ -2822,7 +2875,72 @@ cpuset_bounds(cpuset_t *s, uint_t *smallestid, uint_t *largestid)
*smallestid = *largestid = CPUSET_NOTINSET;
}
-#endif /* CPUSET_WORDS */
+void
+cpuset_atomic_del(cpuset_t *s, const uint_t cpu)
+{
+ VERIFY(cpu < NCPU);
+ BT_ATOMIC_CLEAR(s->cpub, (cpu))
+}
+
+void
+cpuset_atomic_add(cpuset_t *s, const uint_t cpu)
+{
+ VERIFY(cpu < NCPU);
+ BT_ATOMIC_SET(s->cpub, (cpu))
+}
+
+long
+cpuset_atomic_xadd(cpuset_t *s, const uint_t cpu)
+{
+ long res;
+
+ VERIFY(cpu < NCPU);
+ BT_ATOMIC_SET_EXCL(s->cpub, cpu, res);
+ return (res);
+}
+
+long
+cpuset_atomic_xdel(cpuset_t *s, const uint_t cpu)
+{
+ long res;
+
+ VERIFY(cpu < NCPU);
+ BT_ATOMIC_CLEAR_EXCL(s->cpub, cpu, res);
+ return (res);
+}
+
+void
+cpuset_or(cpuset_t *dst, cpuset_t *src)
+{
+ for (int i = 0; i < CPUSET_WORDS; i++) {
+ dst->cpub[i] |= src->cpub[i];
+ }
+}
+
+void
+cpuset_xor(cpuset_t *dst, cpuset_t *src)
+{
+ for (int i = 0; i < CPUSET_WORDS; i++) {
+ dst->cpub[i] ^= src->cpub[i];
+ }
+}
+
+void
+cpuset_and(cpuset_t *dst, cpuset_t *src)
+{
+ for (int i = 0; i < CPUSET_WORDS; i++) {
+ dst->cpub[i] &= src->cpub[i];
+ }
+}
+
+void
+cpuset_zero(cpuset_t *dst)
+{
+ for (int i = 0; i < CPUSET_WORDS; i++) {
+ dst->cpub[i] = 0;
+ }
+}
+
/*
* Unbind threads bound to specified CPU.