summaryrefslogtreecommitdiff
path: root/usr/src/uts/common/os/cyclic.c
diff options
context:
space:
mode:
Diffstat (limited to 'usr/src/uts/common/os/cyclic.c')
-rw-r--r--usr/src/uts/common/os/cyclic.c114
1 files changed, 103 insertions, 11 deletions
diff --git a/usr/src/uts/common/os/cyclic.c b/usr/src/uts/common/os/cyclic.c
index 0aa54eeaee..58db0d5edf 100644
--- a/usr/src/uts/common/os/cyclic.c
+++ b/usr/src/uts/common/os/cyclic.c
@@ -24,7 +24,7 @@
*/
/*
- * Copyright (c) 2012, Joyent Inc. All rights reserved.
+ * Copyright 2019 Joyent, Inc.
*/
/*
@@ -112,6 +112,7 @@
* cyclic_remove() <-- Removes a cyclic
* cyclic_bind() <-- Change a cyclic's CPU or partition binding
* cyclic_reprogram() <-- Reprogram a cyclic's expiration
+ * cyclic_move_here() <-- Shuffle cyclic to current CPU
*
* Inter-subsystem Interfaces
*
@@ -591,7 +592,7 @@
* correct position in the heap (up or down depending on whether the
* new expiration is less than or greater than the old one).
* 5. If the cyclic move modified the root of the heap, the backend is
- * reprogrammed.
+ * reprogrammed.
*
* Reprogramming can be a frequent event (see the callout subsystem). So,
* the serialization used has to be efficient. As with all other cyclic
@@ -610,6 +611,13 @@
* some sort of synchronization for its cyclic-related activities. This
* little caveat exists because the cyclic ID is not really an ID. It is
* implemented as a pointer to a structure.
+ *
+ * For cyclics which reprogram themselves during their own handler function,
+ * avoiding the potential race with cyclic_remove() can be a challenge. If a
+ * handler is running and a remote thread issues a cyclic_remove() on its
+ * cyclic (interrupting the handler with the removal xcall), subsequent
+ * attempts to reprogram the cyclics from within the handler will result in a
+ * failure return code from cyclic_reprogram().
*/
#include <sys/cyclic_impl.h>
#include <sys/sysmacros.h>
@@ -1951,8 +1959,9 @@ cyclic_remove_here(cyc_cpu_t *cpu, cyc_index_t ndx, cyc_time_t *when, int wait)
* it calls this function directly. Else, it invokes this function through
* an X-call to the cyclic's CPU.
*/
-static void
-cyclic_reprogram_cyclic(cyc_cpu_t *cpu, cyc_index_t ndx, hrtime_t expire)
+static boolean_t
+cyclic_reprogram_cyclic(cyc_cpu_t *cpu, cyc_index_t ndx, hrtime_t expire,
+ boolean_t is_local)
{
cyc_backend_t *be = cpu->cyp_backend;
cyb_arg_t bar = be->cyb_arg;
@@ -1981,8 +1990,22 @@ cyclic_reprogram_cyclic(cyc_cpu_t *cpu, cyc_index_t ndx, hrtime_t expire)
if (heap[i] == ndx)
break;
}
- if (i < 0)
+ if (i < 0) {
+ /*
+ * Report failure (rather than panicking) if and only if the
+ * cyclic_reprogram() is occurring on the CPU which the cyclic
+ * resides upon, and there is evidence that a pending cyclic
+ * was removed from that CPU.
+ *
+ * This covers the race where a cyclic is removed out from
+ * under its running handler, which then attempts a reprogram.
+ */
+ if (is_local &&
+ cpu->cyp_state == CYS_REMOVING && cpu->cyp_rpend > 0) {
+ return (B_FALSE);
+ }
panic("attempt to reprogram non-existent cyclic");
+ }
cyclic = &cpu->cyp_cyclics[ndx];
oexpire = cyclic->cy_expire;
@@ -2007,13 +2030,18 @@ cyclic_reprogram_cyclic(cyc_cpu_t *cpu, cyc_index_t ndx, hrtime_t expire)
}
be->cyb_restore_level(bar, cookie);
+ return (B_TRUE);
}
static void
cyclic_reprogram_xcall(cyc_xcallarg_t *arg)
{
- cyclic_reprogram_cyclic(arg->cyx_cpu, arg->cyx_ndx,
- arg->cyx_when->cyt_when);
+ /*
+ * Cross-call reprogram operations should never fail due to racing
+ * cyclic removal, as they cannot occur from the handler itself.
+ */
+ VERIFY(cyclic_reprogram_cyclic(arg->cyx_cpu, arg->cyx_ndx,
+ arg->cyx_when->cyt_when, B_FALSE));
}
static void
@@ -3051,6 +3079,7 @@ cyclic_reprogram(cyclic_id_t id, hrtime_t expiration)
cyc_cpu_t *cpu;
cyc_omni_cpu_t *ocpu;
cyc_index_t ndx;
+ int res = 1;
ASSERT(expiration > 0);
@@ -3096,10 +3125,18 @@ cyclic_reprogram(cyclic_id_t id, hrtime_t expiration)
ndx = idp->cyi_ndx;
}
- if (cpu->cyp_cpu == CPU)
- cyclic_reprogram_cyclic(cpu, ndx, expiration);
- else
+ if (cpu->cyp_cpu == CPU) {
+ /*
+ * If this reprogram is being done as part of a running cyclic
+ * handler, it is possible that a racing cyclic_remove() on a
+ * remote CPU will cause it to fail.
+ */
+ if (!cyclic_reprogram_cyclic(cpu, ndx, expiration, B_TRUE)) {
+ res = 0;
+ }
+ } else {
cyclic_reprogram_here(cpu, ndx, expiration);
+ }
/*
* Allow the cyclic to be moved or removed.
@@ -3108,7 +3145,62 @@ cyclic_reprogram(cyclic_id_t id, hrtime_t expiration)
kpreempt_enable();
- return (1);
+ return (res);
+}
+
+/*
+ * void cyclic_move_here(cyclic_id_t)
+ *
+ * Overview
+ *
+ * cyclic_move_here() attempts to shuffle a cyclic onto the current CPU.
+ *
+ * Arguments and notes
+ *
+ * The first argument is a cyclic_id returned from cyclic_add().
+ * cyclic_move_here() may _not_ be called on a cyclic_id returned from
+ * cyclic_add_omni() or one bound to a CPU or partition via cyclic_bind().
+ *
+ * This cyclic shuffling is performed on a best-effort basis. If for some
+ * reason the current CPU is unsuitable or the thread migrates between CPUs
+ * during the call, the function may return with the cyclic residing on some
+ * other CPU.
+ *
+ * Return value
+ *
+ * None; cyclic_move_here() always reports success.
+ *
+ * Caller's context
+ *
+ * cpu_lock must be held by the caller, and the caller must not be in
+ * interrupt context. The caller may not hold any locks which are also
+ * grabbed by any cyclic handler.
+ */
+void
+cyclic_move_here(cyclic_id_t id)
+{
+ cyc_id_t *idp = (cyc_id_t *)id;
+ cyc_cpu_t *cc = idp->cyi_cpu;
+ cpu_t *dest = CPU;
+
+ ASSERT(MUTEX_HELD(&cpu_lock));
+ CYC_PTRACE("move_here", idp, dest);
+ VERIFY3P(cc, !=, NULL);
+ VERIFY3U(cc->cyp_cyclics[idp->cyi_ndx].cy_flags &
+ (CYF_CPU_BOUND|CYF_PART_BOUND), ==, 0);
+
+ if (cc->cyp_cpu == dest) {
+ return;
+ }
+
+ /* Is the destination CPU suitable for a migration target? */
+ if (dest->cpu_cyclic == NULL ||
+ dest->cpu_cyclic->cyp_state == CYS_OFFLINE ||
+ (dest->cpu_flags & CPU_ENABLE) == 0) {
+ return;
+ }
+
+ cyclic_juggle_one_to(idp, dest->cpu_cyclic);
}
hrtime_t