summaryrefslogtreecommitdiff
path: root/usr
diff options
context:
space:
mode:
authorPatrick Mooney <pmooney@pfmooney.com>2021-06-30 23:44:48 +0000
committerPatrick Mooney <pmooney@oxide.computer>2022-01-27 18:19:34 +0000
commit5a469116729183a46e77dc0620955bbde58d93f7 (patch)
treee4da8cb9ec7d5b2a903a5bf047191fc3413cb5eb /usr
parentdd980e6395616846ce076ec5b40ca8c7593b48f9 (diff)
downloadillumos-joyent-5a469116729183a46e77dc0620955bbde58d93f7.tar.gz
13917 ctxops interfaces could be more ergonomic
Reviewed by: Dan McDonald <danmcd@joyent.com> Reviewed by: Andy Fiddaman <andy@omnios.org> Reviewed by: Toomas Soome <tsoome@me.com> Reviewed by: Dan Cross <cross@oxidecomputer.com> Approved by: Robert Mustacchi <rm@fingolfin.org>
Diffstat (limited to 'usr')
-rw-r--r--usr/src/uts/common/disp/thread.c220
-rw-r--r--usr/src/uts/common/os/cap_util.c2
-rw-r--r--usr/src/uts/common/os/kcpc.c76
-rw-r--r--usr/src/uts/common/os/schedctl.c31
-rw-r--r--usr/src/uts/common/sys/kcpc.h5
-rw-r--r--usr/src/uts/common/sys/proc.h28
-rw-r--r--usr/src/uts/i86pc/io/vmm/vmm.c67
-rw-r--r--usr/src/uts/i86xpv/os/xen_machdep.c4
-rw-r--r--usr/src/uts/intel/os/archdep.c2
-rw-r--r--usr/src/uts/intel/os/cpc_subr.c3
-rw-r--r--usr/src/uts/intel/os/desctbls.c4
-rw-r--r--usr/src/uts/intel/os/fpu.c75
-rw-r--r--usr/src/uts/intel/os/sundep.c81
-rw-r--r--usr/src/uts/intel/os/sysi86.c19
-rw-r--r--usr/src/uts/intel/sys/archsystm.h6
-rw-r--r--usr/src/uts/intel/sys/hypervisor.h4
-rw-r--r--usr/src/uts/sun4/ml/subr_asm.s6
-rw-r--r--usr/src/uts/sun4u/os/cpc_subr.c3
-rw-r--r--usr/src/uts/sun4v/os/cpc_subr.c3
19 files changed, 393 insertions, 246 deletions
diff --git a/usr/src/uts/common/disp/thread.c b/usr/src/uts/common/disp/thread.c
index dd0d4b9533..6dc47dd80a 100644
--- a/usr/src/uts/common/disp/thread.c
+++ b/usr/src/uts/common/disp/thread.c
@@ -22,6 +22,7 @@
/*
* Copyright (c) 1991, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2021 Joyent, Inc.
+ * Copyright 2021 Oxide Computer Company
*/
#include <sys/types.h>
@@ -1029,54 +1030,95 @@ reapq_add(kthread_t *t)
mutex_exit(&reaplock);
}
-/*
- * Provide an allocation function for callers of installctx() that, for
- * reasons of incomplete context-op initialization, must call installctx()
- * in a kpreempt_disable() block. The caller, therefore, must call this
- * without being in such a block.
- */
+static struct ctxop *
+ctxop_find_by_tmpl(kthread_t *t, const struct ctxop_template *ct, void *arg)
+{
+ struct ctxop *ctx, *head;
+
+ ASSERT(MUTEX_HELD(&t->t_ctx_lock));
+ ASSERT(curthread->t_preempt > 0);
+
+ if (t->t_ctx == NULL) {
+ return (NULL);
+ }
+
+ ctx = head = t->t_ctx;
+ do {
+ if (ctx->save_op == ct->ct_save &&
+ ctx->restore_op == ct->ct_restore &&
+ ctx->fork_op == ct->ct_fork &&
+ ctx->lwp_create_op == ct->ct_lwp_create &&
+ ctx->exit_op == ct->ct_exit &&
+ ctx->free_op == ct->ct_free &&
+ ctx->arg == arg) {
+ return (ctx);
+ }
+
+ ctx = ctx->next;
+ } while (ctx != head);
+
+ return (NULL);
+}
+
+static void
+ctxop_detach_chain(kthread_t *t, struct ctxop *ctx)
+{
+ ASSERT(t != NULL);
+ ASSERT(t->t_ctx != NULL);
+ ASSERT(ctx != NULL);
+ ASSERT(ctx->next != NULL && ctx->prev != NULL);
+
+ ctx->prev->next = ctx->next;
+ ctx->next->prev = ctx->prev;
+ if (ctx->next == ctx) {
+ /* last remaining item */
+ t->t_ctx = NULL;
+ } else if (ctx == t->t_ctx) {
+ /* fix up head of list */
+ t->t_ctx = ctx->next;
+ }
+ ctx->next = ctx->prev = NULL;
+}
+
struct ctxop *
-installctx_preallocate(void)
+ctxop_allocate(const struct ctxop_template *ct, void *arg)
{
+ struct ctxop *ctx;
+
/*
- * NOTE: We could ASSERT/VERIFY that we are not in a place where
- * a KM_SLEEP allocation could block indefinitely.
- *
- * ASSERT(curthread->t_preempt == 0);
+ * No changes have been made to the interface yet, so we expect all
+ * callers to use the original revision.
*/
+ VERIFY3U(ct->ct_rev, ==, CTXOP_TPL_REV);
+
+ ctx = kmem_alloc(sizeof (struct ctxop), KM_SLEEP);
+ ctx->save_op = ct->ct_save;
+ ctx->restore_op = ct->ct_restore;
+ ctx->fork_op = ct->ct_fork;
+ ctx->lwp_create_op = ct->ct_lwp_create;
+ ctx->exit_op = ct->ct_exit;
+ ctx->free_op = ct->ct_free;
+ ctx->arg = arg;
+ ctx->save_ts = 0;
+ ctx->restore_ts = 0;
+ ctx->next = ctx->prev = NULL;
- return (kmem_alloc(sizeof (struct ctxop), KM_SLEEP));
+ return (ctx);
}
-/*
- * Install thread context ops for the current thread.
- * The caller can pass in a preallocated struct ctxop, eliminating the need
- * for the requirement of entering with kernel preemption still enabled.
- */
void
-installctx(
- kthread_t *t,
- void *arg,
- void (*save)(void *),
- void (*restore)(void *),
- void (*fork)(void *, void *),
- void (*lwp_create)(void *, void *),
- void (*exit)(void *),
- void (*free)(void *, int),
- struct ctxop *ctx)
+ctxop_free(struct ctxop *ctx)
{
- if (ctx == NULL)
- ctx = kmem_alloc(sizeof (struct ctxop), KM_SLEEP);
-
- ctx->save_op = save;
- ctx->restore_op = restore;
- ctx->fork_op = fork;
- ctx->lwp_create_op = lwp_create;
- ctx->exit_op = exit;
- ctx->free_op = free;
- ctx->arg = arg;
- ctx->save_ts = 0;
- ctx->restore_ts = 0;
+ if (ctx->free_op != NULL)
+ (ctx->free_op)(ctx->arg, 0);
+
+ kmem_free(ctx, sizeof (struct ctxop));
+}
+
+void
+ctxop_attach(kthread_t *t, struct ctxop *ctx)
+{
+ ASSERT(ctx->next == NULL && ctx->prev == NULL);
/*
* Keep ctxops in a doubly-linked list to allow traversal in both
@@ -1115,25 +1157,12 @@ installctx(
kpreempt_enable();
}
-/*
- * Remove the thread context ops from a thread.
- */
-int
-removectx(
- kthread_t *t,
- void *arg,
- void (*save)(void *),
- void (*restore)(void *),
- void (*fork)(void *, void *),
- void (*lwp_create)(void *, void *),
- void (*exit)(void *),
- void (*free)(void *, int))
+void
+ctxop_detach(kthread_t *t, struct ctxop *ctx)
{
- struct ctxop *ctx, *head;
-
/*
* The incoming kthread_t (which is the thread for which the
- * context ops will be removed) should be one of the following:
+ * context ops will be detached) should be one of the following:
*
* a) the current thread,
*
@@ -1156,42 +1185,65 @@ removectx(
mutex_enter(&t->t_ctx_lock);
kpreempt_disable();
- if (t->t_ctx == NULL) {
- mutex_exit(&t->t_ctx_lock);
- kpreempt_enable();
- return (0);
+ VERIFY(t->t_ctx != NULL);
+
+#ifdef DEBUG
+ /* Check that provided `ctx` is actually present in the t_ctx chain */
+ struct ctxop *head, *cur;
+ head = cur = t->t_ctx;
+ for (;;) {
+ if (cur == ctx) {
+ break;
+ }
+ cur = cur->next;
+ /* If we wrap, having not found `ctx`, this assert will fail */
+ ASSERT3P(cur, !=, head);
}
+#endif /* DEBUG */
- ctx = head = t->t_ctx;
- do {
- if (ctx->save_op == save && ctx->restore_op == restore &&
- ctx->fork_op == fork && ctx->lwp_create_op == lwp_create &&
- ctx->exit_op == exit && ctx->free_op == free &&
- ctx->arg == arg) {
- ctx->prev->next = ctx->next;
- ctx->next->prev = ctx->prev;
- if (ctx->next == ctx) {
- /* last remaining item */
- t->t_ctx = NULL;
- } else if (ctx == t->t_ctx) {
- /* fix up head of list */
- t->t_ctx = ctx->next;
- }
- ctx->next = ctx->prev = NULL;
+ ctxop_detach_chain(t, ctx);
- mutex_exit(&t->t_ctx_lock);
- if (ctx->free_op != NULL)
- (ctx->free_op)(ctx->arg, 0);
- kmem_free(ctx, sizeof (struct ctxop));
- kpreempt_enable();
- return (1);
- }
+ mutex_exit(&t->t_ctx_lock);
+ kpreempt_enable();
+}
- ctx = ctx->next;
- } while (ctx != head);
+void
+ctxop_install(kthread_t *t, const struct ctxop_template *ct, void *arg)
+{
+ ctxop_attach(t, ctxop_allocate(ct, arg));
+}
+
+int
+ctxop_remove(kthread_t *t, const struct ctxop_template *ct, void *arg)
+{
+ struct ctxop *ctx;
+
+ /*
+ * ctxop_remove() shares the same requirements for the acted-upon thread
+ * as ctxop_detach()
+ */
+ ASSERT(t == curthread || ttoproc(t)->p_stat == SIDL ||
+ ttoproc(t)->p_agenttp == curthread || t->t_state == TS_STOPPED);
+
+ /*
+ * Serialize modifications to t->t_ctx to prevent the agent thread
+ * and the target thread from racing with each other during lwp exit.
+ */
+ mutex_enter(&t->t_ctx_lock);
+ kpreempt_disable();
+
+ ctx = ctxop_find_by_tmpl(t, ct, arg);
+ if (ctx != NULL) {
+ ctxop_detach_chain(t, ctx);
+ ctxop_free(ctx);
+ }
mutex_exit(&t->t_ctx_lock);
kpreempt_enable();
+
+ if (ctx != NULL) {
+ return (1);
+ }
return (0);
}
diff --git a/usr/src/uts/common/os/cap_util.c b/usr/src/uts/common/os/cap_util.c
index 4f9b9f5985..86475315e1 100644
--- a/usr/src/uts/common/os/cap_util.c
+++ b/usr/src/uts/common/os/cap_util.c
@@ -1258,7 +1258,7 @@ cu_cpu_fini(cpu_t *cp)
ctx = cpu_ctx->ctx_ptr_array[i];
if (ctx == NULL)
continue;
- kcpc_free(ctx, 0);
+ kcpc_free_cpu(ctx);
}
/*
diff --git a/usr/src/uts/common/os/kcpc.c b/usr/src/uts/common/os/kcpc.c
index 33486aaff3..27e30a5725 100644
--- a/usr/src/uts/common/os/kcpc.c
+++ b/usr/src/uts/common/os/kcpc.c
@@ -22,6 +22,7 @@
/*
* Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2021 Joyent, Inc.
+ * Copyright 2021 Oxide Computer Company
*/
#include <sys/param.h>
@@ -75,9 +76,10 @@ static uint32_t kcpc_nullctx_count; /* # overflows in a thread with no ctx */
*/
static int kcpc_nullctx_panic = 0;
-static void kcpc_lwp_create(kthread_t *t, kthread_t *ct);
-static void kcpc_restore(kcpc_ctx_t *ctx);
-static void kcpc_save(kcpc_ctx_t *ctx);
+static void kcpc_save(void *);
+static void kcpc_restore(void *);
+static void kcpc_lwp_create(void *, void *);
+static void kcpc_free(void *, int);
static void kcpc_ctx_clone(kcpc_ctx_t *ctx, kcpc_ctx_t *cctx);
static int kcpc_tryassign(kcpc_set_t *set, int starting_req, int *scratch);
static kcpc_set_t *kcpc_dup_set(kcpc_set_t *set);
@@ -112,6 +114,14 @@ extern int kcpc_hw_load_pcbe(void);
*/
static int kcpc_pcbe_error = 0;
+static const struct ctxop_template kcpc_ctxop_tpl = {
+ .ct_rev = CTXOP_TPL_REV,
+ .ct_save = kcpc_save,
+ .ct_restore = kcpc_restore,
+ .ct_lwp_create = kcpc_lwp_create,
+ .ct_free = kcpc_free,
+};
+
/*
* Perform one-time initialization of kcpc framework.
* This function performs the initialization only the first time it is called.
@@ -318,8 +328,7 @@ kcpc_bind_thread(kcpc_set_t *set, kthread_t *t, int *subcode)
/*
* Add a device context to the subject thread.
*/
- installctx(t, ctx, kcpc_save, kcpc_restore, NULL,
- kcpc_lwp_create, NULL, kcpc_free, NULL);
+ ctxop_install(t, &kcpc_ctxop_tpl, ctx);
/*
* Ask the backend to program the hardware.
@@ -547,7 +556,7 @@ kcpc_unbind(kcpc_set_t *set)
t = ctx->kc_thread;
/*
* The context is thread-bound and therefore has a device
- * context. It will be freed via removectx() calling
+ * context. It will be freed via ctxop_remove() calling
* freectx() calling kcpc_free().
*/
if (t == curthread) {
@@ -560,15 +569,7 @@ kcpc_unbind(kcpc_set_t *set)
splx(save_spl);
kpreempt_enable();
}
-#ifdef DEBUG
- if (removectx(t, ctx, kcpc_save, kcpc_restore, NULL,
- kcpc_lwp_create, NULL, kcpc_free) == 0)
- panic("kcpc_unbind: context %p not preset on thread %p",
- (void *)ctx, (void *)t);
-#else
- (void) removectx(t, ctx, kcpc_save, kcpc_restore, NULL,
- kcpc_lwp_create, NULL, kcpc_free);
-#endif /* DEBUG */
+ VERIFY3U(ctxop_remove(t, &kcpc_ctxop_tpl, ctx), !=, 0);
t->t_cpc_set = NULL;
t->t_cpc_ctx = NULL;
} else {
@@ -1215,8 +1216,9 @@ kcpc_overflow_ast()
* Called when switching away from current thread.
*/
static void
-kcpc_save(kcpc_ctx_t *ctx)
+kcpc_save(void *arg)
{
+ kcpc_ctx_t *ctx = arg;
int err;
int save_spl;
@@ -1264,8 +1266,9 @@ kcpc_save(kcpc_ctx_t *ctx)
}
static void
-kcpc_restore(kcpc_ctx_t *ctx)
+kcpc_restore(void *arg)
{
+ kcpc_ctx_t *ctx = arg;
int save_spl;
mutex_enter(&ctx->kc_lock);
@@ -1324,9 +1327,11 @@ kcpc_restore(kcpc_ctx_t *ctx)
* it is switched off.
*/
/*ARGSUSED*/
-void
-kcpc_idle_save(struct cpu *cp)
+static void
+kcpc_idle_save(void *arg)
{
+ struct cpu *cp = arg;
+
/*
* The idle thread shouldn't be run anywhere else.
*/
@@ -1348,9 +1353,11 @@ kcpc_idle_save(struct cpu *cp)
mutex_exit(&cp->cpu_cpc_ctxlock);
}
-void
-kcpc_idle_restore(struct cpu *cp)
+static void
+kcpc_idle_restore(void *arg)
{
+ struct cpu *cp = arg;
+
/*
* The idle thread shouldn't be run anywhere else.
*/
@@ -1372,10 +1379,23 @@ kcpc_idle_restore(struct cpu *cp)
mutex_exit(&cp->cpu_cpc_ctxlock);
}
+static const struct ctxop_template kcpc_idle_ctxop_tpl = {
+ .ct_rev = CTXOP_TPL_REV,
+ .ct_save = kcpc_idle_save,
+ .ct_restore = kcpc_idle_restore,
+};
+
+void
+kcpc_idle_ctxop_install(kthread_t *t, struct cpu *cp)
+{
+ ctxop_install(t, &kcpc_idle_ctxop_tpl, cp);
+}
+
/*ARGSUSED*/
static void
-kcpc_lwp_create(kthread_t *t, kthread_t *ct)
+kcpc_lwp_create(void *parent, void *child)
{
+ kthread_t *t = parent, *ct = child;
kcpc_ctx_t *ctx = t->t_cpc_ctx, *cctx;
int i;
@@ -1424,8 +1444,7 @@ kcpc_lwp_create(kthread_t *t, kthread_t *ct)
aston(ct);
}
- installctx(ct, cctx, kcpc_save, kcpc_restore,
- NULL, kcpc_lwp_create, NULL, kcpc_free, NULL);
+ ctxop_install(ct, &kcpc_ctxop_tpl, cctx);
}
/*
@@ -1462,8 +1481,9 @@ kcpc_lwp_create(kthread_t *t, kthread_t *ct)
/*ARGSUSED*/
void
-kcpc_free(kcpc_ctx_t *ctx, int isexec)
+kcpc_free(void *arg, int isexec)
{
+ kcpc_ctx_t *ctx = arg;
int i;
kcpc_set_t *set = ctx->kc_set;
@@ -1544,6 +1564,12 @@ kcpc_free(kcpc_ctx_t *ctx, int isexec)
kcpc_free_set(set);
}
+void
+kcpc_free_cpu(kcpc_ctx_t *ctx)
+{
+ kcpc_free(ctx, 0);
+}
+
/*
* Free the memory associated with a request set.
*/
diff --git a/usr/src/uts/common/os/schedctl.c b/usr/src/uts/common/os/schedctl.c
index 34f72995a2..d500bf7468 100644
--- a/usr/src/uts/common/os/schedctl.c
+++ b/usr/src/uts/common/os/schedctl.c
@@ -23,6 +23,7 @@
* Copyright 2010 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
* Copyright 2021 Joyent, Inc.
+ * Copyright 2021 Oxide Computer Company
*/
#include <sys/types.h>
@@ -81,9 +82,9 @@ static size_t sc_bitmap_len; /* # of bits in allocation bitmap */
static size_t sc_bitmap_words; /* # of words in allocation bitmap */
/* Context ops */
-static void schedctl_save(sc_shared_t *);
-static void schedctl_restore(sc_shared_t *);
-static void schedctl_fork(kthread_t *, kthread_t *);
+static void schedctl_save(void *);
+static void schedctl_restore(void *);
+static void schedctl_fork(void *, void *);
/* Functions for handling shared pages */
static int schedctl_shared_alloc(sc_shared_t **, uintptr_t *);
@@ -92,6 +93,13 @@ static int schedctl_map(struct anon_map *, caddr_t *, caddr_t);
static int schedctl_getpage(struct anon_map **, caddr_t *);
static void schedctl_freepage(struct anon_map *, caddr_t);
+static const struct ctxop_template schedctl_ctxop_tpl = {
+ .ct_rev = CTXOP_TPL_REV,
+ .ct_save = schedctl_save,
+ .ct_restore = schedctl_restore,
+ .ct_fork = schedctl_fork,
+};
+
/*
* System call interface to scheduler activations.
* This always operates on the current lwp.
@@ -112,8 +120,7 @@ schedctl(void)
return ((caddr_t)(uintptr_t)set_errno(error));
bzero(ssp, sizeof (*ssp));
- installctx(t, ssp, schedctl_save, schedctl_restore,
- schedctl_fork, NULL, NULL, NULL, NULL);
+ ctxop_install(t, &schedctl_ctxop_tpl, ssp);
thread_lock(t); /* protect against ts_tick and ts_update */
t->t_schedctl = ssp;
@@ -151,8 +158,7 @@ schedctl_lwp_cleanup(kthread_t *t)
* Remove the context op to avoid the final call to
* schedctl_save when switching away from this lwp.
*/
- (void) removectx(t, ssp, schedctl_save, schedctl_restore,
- schedctl_fork, NULL, NULL, NULL);
+ (void) ctxop_remove(t, &schedctl_ctxop_tpl, ssp);
/*
* Do not unmap the shared page until the process exits.
@@ -207,8 +213,10 @@ schedctl_proc_cleanup(void)
* Save new thread state.
*/
static void
-schedctl_save(sc_shared_t *ssp)
+schedctl_save(void *arg)
{
+ sc_shared_t *ssp = arg;
+
ssp->sc_state = curthread->t_state;
}
@@ -218,8 +226,10 @@ schedctl_save(sc_shared_t *ssp)
* Save new thread state and CPU.
*/
static void
-schedctl_restore(sc_shared_t *ssp)
+schedctl_restore(void *arg)
{
+ sc_shared_t *ssp = arg;
+
ssp->sc_state = SC_ONPROC;
ssp->sc_cpu = CPU->cpu_id;
}
@@ -230,8 +240,9 @@ schedctl_restore(sc_shared_t *ssp)
* The child's threads must call schedctl() to get new shared mappings.
*/
static void
-schedctl_fork(kthread_t *pt, kthread_t *ct)
+schedctl_fork(void *parent, void *child)
{
+ kthread_t *pt = parent, *ct = child;
proc_t *pp = ttoproc(pt);
proc_t *cp = ttoproc(ct);
sc_page_ctl_t *pagep;
diff --git a/usr/src/uts/common/sys/kcpc.h b/usr/src/uts/common/sys/kcpc.h
index d90b1c1d29..9644d019e3 100644
--- a/usr/src/uts/common/sys/kcpc.h
+++ b/usr/src/uts/common/sys/kcpc.h
@@ -199,13 +199,12 @@ extern int kcpc_overflow_ast(void);
extern uint_t kcpc_hw_overflow_intr(caddr_t, caddr_t);
extern int kcpc_hw_cpu_hook(int cpuid, ulong_t *kcpc_cpumap);
extern int kcpc_hw_lwp_hook(void);
-extern void kcpc_idle_save(struct cpu *cp);
-extern void kcpc_idle_restore(struct cpu *cp);
+extern void kcpc_idle_ctxop_install(struct _kthread *, struct cpu *);
extern krwlock_t kcpc_cpuctx_lock; /* lock for 'kcpc_cpuctx' below */
extern int kcpc_cpuctx; /* number of cpu-specific contexts */
-extern void kcpc_free(kcpc_ctx_t *ctx, int isexec);
+extern void kcpc_free_cpu(kcpc_ctx_t *);
/*
* 'dtrace_cpc_in_use' contains the number of currently active cpc provider
diff --git a/usr/src/uts/common/sys/proc.h b/usr/src/uts/common/sys/proc.h
index 3862eb2c78..a2fd02e910 100644
--- a/usr/src/uts/common/sys/proc.h
+++ b/usr/src/uts/common/sys/proc.h
@@ -22,7 +22,7 @@
/*
* Copyright (c) 1988, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2021 Joyent, Inc.
- * Copyright 2020 Oxide Computer Company
+ * Copyright 2021 Oxide Computer Company
*/
/* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
@@ -718,11 +718,27 @@ extern void thread_free(kthread_t *);
extern void thread_rele(kthread_t *);
extern void thread_join(kt_did_t);
extern int reaper(void);
-extern struct ctxop *installctx_preallocate(void);
-extern void installctx(kthread_t *, void *, void (*)(), void (*)(),
- void (*)(), void (*)(), void (*)(), void (*)(), struct ctxop *);
-extern int removectx(kthread_t *, void *, void (*)(), void (*)(),
- void (*)(), void (*)(), void (*)(), void (*)());
+
+#define CTXOP_TPL_REV 1
+
+struct ctxop_template {
+ uint32_t ct_rev;
+ uint32_t ct_pad;
+ void (*ct_save)(void *);
+ void (*ct_restore)(void *);
+ void (*ct_fork)(void *, void *);
+ void (*ct_lwp_create)(void *, void *);
+ void (*ct_exit)(void *);
+ void (*ct_free)(void *, int);
+};
+
+extern struct ctxop *ctxop_allocate(const struct ctxop_template *, void *);
+extern void ctxop_free(struct ctxop *);
+extern void ctxop_attach(kthread_t *, struct ctxop *);
+extern void ctxop_detach(kthread_t *, struct ctxop *);
+extern void ctxop_install(kthread_t *, const struct ctxop_template *, void *);
+extern int ctxop_remove(kthread_t *, const struct ctxop_template *, void *);
+
extern void savectx(kthread_t *);
extern void restorectx(kthread_t *);
extern void forkctx(kthread_t *, kthread_t *);
diff --git a/usr/src/uts/i86pc/io/vmm/vmm.c b/usr/src/uts/i86pc/io/vmm/vmm.c
index 937947d4f8..16acc1ea2c 100644
--- a/usr/src/uts/i86pc/io/vmm/vmm.c
+++ b/usr/src/uts/i86pc/io/vmm/vmm.c
@@ -93,6 +93,17 @@ __FBSDID("$FreeBSD$");
struct vlapic;
+/* Flags for vtc_status */
+#define VTCS_FPU_RESTORED 1 /* guest FPU restored, host FPU saved */
+#define VTCS_FPU_CTX_CRITICAL 2 /* in ctx where FPU restore cannot be lazy */
+
+typedef struct vm_thread_ctx {
+ struct vm *vtc_vm;
+ int vtc_vcpuid;
+ uint_t vtc_status;
+ enum vcpu_ustate vtc_ustate;
+} vm_thread_ctx_t;
+
/*
* Initialization:
* (a) allocated when vcpu is created
@@ -133,6 +144,8 @@ struct vcpu {
enum vcpu_ustate ustate; /* (i) microstate for the vcpu */
hrtime_t ustate_when; /* (i) time of last ustate change */
uint64_t ustate_total[VU_MAX]; /* (o) total time spent in ustates */
+ vm_thread_ctx_t vtc; /* (o) thread state for ctxops */
+ struct ctxop *ctxop; /* (o) ctxop storage for vcpu */
};
#define vcpu_lock_initialized(v) mtx_initialized(&((v)->mtx))
@@ -269,16 +282,13 @@ static void vcpu_notify_event_locked(struct vcpu *vcpu, vcpu_notify_t);
static bool vcpu_sleep_bailout_checks(struct vm *vm, int vcpuid);
static int vcpu_vector_sipi(struct vm *vm, int vcpuid, uint8_t vector);
-/* Flags for vtc_status */
-#define VTCS_FPU_RESTORED 1 /* guest FPU restored, host FPU saved */
-#define VTCS_FPU_CTX_CRITICAL 2 /* in ctx where FPU restore cannot be lazy */
-
-typedef struct vm_thread_ctx {
- struct vm *vtc_vm;
- int vtc_vcpuid;
- uint_t vtc_status;
- enum vcpu_ustate vtc_ustate;
-} vm_thread_ctx_t;
+static void vmm_savectx(void *);
+static void vmm_restorectx(void *);
+static const struct ctxop_template vmm_ctxop_tpl = {
+ .ct_rev = CTXOP_TPL_REV,
+ .ct_save = vmm_savectx,
+ .ct_restore = vmm_restorectx,
+};
#ifdef KTR
static const char *
@@ -313,6 +323,7 @@ vcpu_cleanup(struct vm *vm, int i, bool destroy)
vcpu->vie_ctx = NULL;
vmc_destroy(vcpu->vmclient);
vcpu->vmclient = NULL;
+ ctxop_free(vcpu->ctxop);
}
}
@@ -337,6 +348,10 @@ vcpu_init(struct vm *vm, int vcpu_id, bool create)
vcpu->ustate = VU_INIT;
vcpu->ustate_when = gethrtime();
+
+ vcpu->vtc.vtc_vm = vm;
+ vcpu->vtc.vtc_vcpuid = vcpu_id;
+ vcpu->ctxop = ctxop_allocate(&vmm_ctxop_tpl, &vcpu->vtc);
} else {
vie_reset(vcpu->vie_ctx);
bzero(&vcpu->exitinfo, sizeof (vcpu->exitinfo));
@@ -2050,15 +2065,6 @@ vmm_restorectx(void *arg)
}
-/*
- * If we're in removectx(), we might still have state to tidy up.
- */
-static void
-vmm_freectx(void *arg, int isexec)
-{
- vmm_savectx(arg);
-}
-
static int
vm_entry_actions(struct vm *vm, int vcpuid, const struct vm_entry *entry,
struct vm_exit *vme)
@@ -2147,7 +2153,6 @@ vm_run(struct vm *vm, int vcpuid, const struct vm_entry *entry)
struct vcpu *vcpu;
struct vm_exit *vme;
bool intr_disabled;
- vm_thread_ctx_t vtc;
int affinity_type = CPU_CURRENT;
if (vcpuid < 0 || vcpuid >= vm->maxcpus)
@@ -2160,11 +2165,8 @@ vm_run(struct vm *vm, int vcpuid, const struct vm_entry *entry)
vcpu_ustate_change(vm, vcpuid, VU_EMU_KERN);
- vtc.vtc_vm = vm;
- vtc.vtc_vcpuid = vcpuid;
- vtc.vtc_status = 0;
- installctx(curthread, &vtc, vmm_savectx, vmm_restorectx, NULL, NULL,
- NULL, vmm_freectx, NULL);
+ vcpu->vtc.vtc_status = 0;
+ ctxop_attach(curthread, vcpu->ctxop);
error = vm_entry_actions(vm, vcpuid, entry, vme);
if (error != 0) {
@@ -2193,11 +2195,11 @@ restart:
/* Force a trip through update_sregs to reload %fs/%gs and friends */
PCB_SET_UPDATE_SEGS(&ttolwp(curthread)->lwp_pcb);
- if ((vtc.vtc_status & VTCS_FPU_RESTORED) == 0) {
+ if ((vcpu->vtc.vtc_status & VTCS_FPU_RESTORED) == 0) {
restore_guest_fpustate(vcpu);
- vtc.vtc_status |= VTCS_FPU_RESTORED;
+ vcpu->vtc.vtc_status |= VTCS_FPU_RESTORED;
}
- vtc.vtc_status |= VTCS_FPU_CTX_CRITICAL;
+ vcpu->vtc.vtc_status |= VTCS_FPU_CTX_CRITICAL;
vcpu_require_state(vm, vcpuid, VCPU_RUNNING);
error = VMRUN(vm->cookie, vcpuid, vcpu->nextrip);
@@ -2207,7 +2209,7 @@ restart:
* Once clear of the delicate contexts comprising the VM_RUN handler,
* thread CPU affinity can be loosened while other processing occurs.
*/
- vtc.vtc_status &= ~VTCS_FPU_CTX_CRITICAL;
+ vcpu->vtc.vtc_status &= ~VTCS_FPU_CTX_CRITICAL;
thread_affinity_clear(curthread);
critical_exit();
@@ -2277,8 +2279,11 @@ restart:
}
exit:
- removectx(curthread, &vtc, vmm_savectx, vmm_restorectx, NULL, NULL,
- NULL, vmm_freectx);
+ kpreempt_disable();
+ ctxop_detach(curthread, vcpu->ctxop);
+ /* Make sure all of the needed vCPU context state is saved */
+ vmm_savectx(&vcpu->vtc);
+ kpreempt_enable();
VCPU_CTR2(vm, vcpuid, "retu %d/%d", error, vme->exitcode);
diff --git a/usr/src/uts/i86xpv/os/xen_machdep.c b/usr/src/uts/i86xpv/os/xen_machdep.c
index fd81d6d0e8..7106be2b2b 100644
--- a/usr/src/uts/i86xpv/os/xen_machdep.c
+++ b/usr/src/uts/i86xpv/os/xen_machdep.c
@@ -1086,7 +1086,7 @@ xen_xlate_errcode(int error)
* Caller responsible for preventing kernel preemption.
*/
void
-xen_enable_user_iopl(void)
+xen_enable_user_iopl(void *arg __unused)
{
physdev_set_iopl_t set_iopl;
set_iopl.iopl = 3; /* user ring 3 */
@@ -1097,7 +1097,7 @@ xen_enable_user_iopl(void)
* Drop PS_IOPL on current vcpu to kernel level
*/
void
-xen_disable_user_iopl(void)
+xen_disable_user_iopl(void *arg __unused)
{
physdev_set_iopl_t set_iopl;
set_iopl.iopl = 1; /* kernel pseudo ring 1 */
diff --git a/usr/src/uts/intel/os/archdep.c b/usr/src/uts/intel/os/archdep.c
index 14d20bb487..3d2996880d 100644
--- a/usr/src/uts/intel/os/archdep.c
+++ b/usr/src/uts/intel/os/archdep.c
@@ -246,7 +246,7 @@ setfpregs(klwp_t *lwp, fpregset_t *fp)
* FPU context is still active, release the
* ownership.
*/
- fp_free(fpu, 0);
+ fp_free(fpu);
}
}
/*
diff --git a/usr/src/uts/intel/os/cpc_subr.c b/usr/src/uts/intel/os/cpc_subr.c
index 71e1ebaeee..e98a8e3d81 100644
--- a/usr/src/uts/intel/os/cpc_subr.c
+++ b/usr/src/uts/intel/os/cpc_subr.c
@@ -168,8 +168,7 @@ kcpc_hw_init(cpu_t *cp)
if (kcpc_counts_include_idle)
return;
- installctx(t, cp, kcpc_idle_save, kcpc_idle_restore,
- NULL, NULL, NULL, NULL, NULL);
+ kcpc_idle_ctxop_install(t, cp);
}
void
diff --git a/usr/src/uts/intel/os/desctbls.c b/usr/src/uts/intel/os/desctbls.c
index 35345c3fe8..0969392855 100644
--- a/usr/src/uts/intel/os/desctbls.c
+++ b/usr/src/uts/intel/os/desctbls.c
@@ -1116,7 +1116,7 @@ init_boot_gdt(user_desc_t *bgdt)
* the branded entry points.
*/
void
-brand_interpositioning_enable(void)
+brand_interpositioning_enable(void *arg __unused)
{
gate_desc_t *idt = CPU->cpu_idt;
int i;
@@ -1171,7 +1171,7 @@ brand_interpositioning_enable(void)
* the standard entry points, which bypass the interpositioning hooks.
*/
void
-brand_interpositioning_disable(void)
+brand_interpositioning_disable(void *arg __unused)
{
gate_desc_t *idt = CPU->cpu_idt;
int i;
diff --git a/usr/src/uts/intel/os/fpu.c b/usr/src/uts/intel/os/fpu.c
index 0037f49f85..6f0345b391 100644
--- a/usr/src/uts/intel/os/fpu.c
+++ b/usr/src/uts/intel/os/fpu.c
@@ -22,6 +22,7 @@
* Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2021 Joyent, Inc.
* Copyright 2021 RackTop Systems, Inc.
+ * Copyright 2021 Oxide Computer Company
*/
/* Copyright (c) 1990, 1991 UNIX System Laboratories, Inc. */
@@ -579,6 +580,22 @@ void (*xsavep)(struct xsave_state *, uint64_t) = xsave;
static int fpe_sicode(uint_t);
static int fpe_simd_sicode(uint_t);
+static void fp_new_lwp(void *, void *);
+static void fp_free_ctx(void *, int);
+
+static struct ctxop *
+fp_ctxop_allocate(struct fpu_ctx *fp)
+{
+ const struct ctxop_template tpl = {
+ .ct_rev = CTXOP_TPL_REV,
+ .ct_save = fpsave_ctxt,
+ .ct_restore = fprestore_ctxt,
+ .ct_fork = fp_new_lwp,
+ .ct_lwp_create = fp_new_lwp,
+ .ct_free = fp_free_ctx,
+ };
+ return (ctxop_allocate(&tpl, fp));
+}
/*
* Copy the state of parent lwp's floating point context into the new lwp.
@@ -589,8 +606,9 @@ static int fpe_simd_sicode(uint_t);
* reset to their initial state.
*/
static void
-fp_new_lwp(kthread_id_t t, kthread_id_t ct)
+fp_new_lwp(void *parent, void *child)
{
+ kthread_id_t t = parent, ct = child;
struct fpu_ctx *fp; /* parent fpu context */
struct fpu_ctx *cfp; /* new fpu context */
struct fxsave_state *fx, *cfx;
@@ -651,8 +669,7 @@ fp_new_lwp(kthread_id_t t, kthread_id_t ct)
* before returning to user land.
*/
- installctx(ct, cfp, fpsave_ctxt, fprestore_ctxt, fp_new_lwp,
- fp_new_lwp, NULL, fp_free, NULL);
+ ctxop_attach(ct, fp_ctxop_allocate(cfp));
}
/*
@@ -672,9 +689,8 @@ fp_new_lwp(kthread_id_t t, kthread_id_t ct)
* disable fpu and release the fp context for the CPU
*
*/
-/*ARGSUSED*/
void
-fp_free(struct fpu_ctx *fp, int isexec)
+fp_free(struct fpu_ctx *fp)
{
ASSERT(fp_kind != FP_NO);
@@ -699,6 +715,15 @@ fp_free(struct fpu_ctx *fp, int isexec)
}
/*
+ * Wrapper for freectx to make the types line up for fp_free()
+ */
+static void
+fp_free_ctx(void *arg, int isexec __unused)
+{
+ fp_free((struct fpu_ctx *)arg);
+}
+
+/*
* Store the floating point state and disable the floating point unit.
*/
void
@@ -778,19 +803,18 @@ void
fp_exec(void)
{
struct fpu_ctx *fp = &ttolwp(curthread)->lwp_pcb.pcb_fpu;
- struct ctxop *ctx = installctx_preallocate();
if (fp_save_mech == FP_XSAVE) {
fp->fpu_xsave_mask = XFEATURE_FP_ALL;
}
+ struct ctxop *ctx = fp_ctxop_allocate(fp);
/*
* Make sure that we're not preempted in the middle of initializing the
* FPU on CPU.
*/
kpreempt_disable();
- installctx(curthread, fp, fpsave_ctxt, fprestore_ctxt, fp_new_lwp,
- fp_new_lwp, NULL, fp_free, ctx);
+ ctxop_attach(curthread, ctx);
fpinit();
fp->fpu_flags = FPU_EN;
kpreempt_enable();
@@ -818,8 +842,7 @@ fp_seed(void)
fp->fpu_xsave_mask = XFEATURE_FP_ALL;
}
- installctx(curthread, fp, fpsave_ctxt, fprestore_ctxt, fp_new_lwp,
- fp_new_lwp, NULL, fp_free, NULL);
+ ctxop_attach(curthread, fp_ctxop_allocate(fp));
fpinit();
/*
@@ -1297,6 +1320,12 @@ kernel_fpu_no_swtch(void)
}
}
+static const struct ctxop_template kfpu_ctxop_tpl = {
+ .ct_rev = CTXOP_TPL_REV,
+ .ct_save = kernel_fpu_ctx_save,
+ .ct_restore = kernel_fpu_ctx_restore,
+};
+
void
kernel_fpu_begin(kfpu_state_t *kfpu, uint_t flags)
{
@@ -1364,7 +1393,7 @@ kernel_fpu_begin(kfpu_state_t *kfpu, uint_t flags)
* FPU or another code path) so FPU_VALID could be set. This is handled
* by fp_save, as is the FPU_EN check.
*/
- ctx = installctx_preallocate();
+ ctx = ctxop_allocate(&kfpu_ctxop_tpl, kfpu);
kpreempt_disable();
if (pl != NULL) {
if ((flags & KFPU_USE_LWP) == 0)
@@ -1373,17 +1402,14 @@ kernel_fpu_begin(kfpu_state_t *kfpu, uint_t flags)
}
/*
- * Set the context operations for kernel FPU usage. Note that this is
- * done with a preallocated buffer and under kpreempt_disable because
- * without a preallocated buffer, installctx does a sleeping
- * allocation. We haven't finished initializing our kernel FPU state
- * yet, and in the rare case that we happen to save/restore just as
- * installctx() exits its own kpreempt_enable() internal call, we
- * guard against restoring an uninitialized buffer (0xbaddcafe).
+ * Set the context operations for kernel FPU usage. Because kernel FPU
+ * setup and ctxop attachment needs to happen under the protection of
+ * kpreempt_disable(), we allocate the ctxop outside the guard so its
+ * sleeping allocation will not cause a voluntary swtch(). This allows
+ * the rest of the initialization to proceed, ensuring valid state for
+ * the ctxop handlers.
*/
- installctx(curthread, kfpu, kernel_fpu_ctx_save, kernel_fpu_ctx_restore,
- NULL, NULL, NULL, NULL, ctx);
-
+ ctxop_attach(curthread, ctx);
curthread->t_flag |= T_KFPU;
if ((flags & KFPU_USE_LWP) == KFPU_USE_LWP) {
@@ -1433,9 +1459,9 @@ kernel_fpu_end(kfpu_state_t *kfpu, uint_t flags)
*
* Calling fpdisable only effects the current CPU's %cr0 register.
*
- * During removectx and kpreempt_enable, we can voluntarily context
+ * During ctxop_remove and kpreempt_enable, we can voluntarily context
* switch, so the CPU we were on when we entered this function might
- * not be the same one we're on when we return from removectx or end
+ * not be the same one we're on when we return from ctxop_remove or end
* the function. Note there can be user-level context switch handlers
* still installed if this is a user-level thread.
*
@@ -1477,8 +1503,7 @@ kernel_fpu_end(kfpu_state_t *kfpu, uint_t flags)
}
if ((flags & KFPU_NO_STATE) == 0) {
- removectx(curthread, kfpu, kernel_fpu_ctx_save,
- kernel_fpu_ctx_restore, NULL, NULL, NULL, NULL);
+ ctxop_remove(curthread, &kfpu_ctxop_tpl, kfpu);
if (kfpu != NULL) {
if (kfpu->kfpu_curthread != curthread) {
diff --git a/usr/src/uts/intel/os/sundep.c b/usr/src/uts/intel/os/sundep.c
index 80e149f01b..8938dfa0c6 100644
--- a/usr/src/uts/intel/os/sundep.c
+++ b/usr/src/uts/intel/os/sundep.c
@@ -21,6 +21,7 @@
/*
* Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2021 Joyent, Inc.
+ * Copyright 2021 Oxide Computer Company
*/
/* Copyright (c) 1990, 1991 UNIX System Laboratories, Inc. */
@@ -28,6 +29,7 @@
/* All Rights Reserved */
#include <sys/types.h>
+#include <sys/stdbool.h>
#include <sys/param.h>
#include <sys/sysmacros.h>
#include <sys/signal.h>
@@ -457,8 +459,9 @@ lwp_pcb_exit(void)
/*ARGSUSED*/
void
-lwp_segregs_save(klwp_t *lwp)
+lwp_segregs_save(void *arg)
{
+ klwp_t *lwp = arg;
pcb_t *pcb = &lwp->lwp_pcb;
struct regs *rp;
@@ -702,8 +705,9 @@ gdt_ucode_model(model_t model)
* on current cpu's GDT.
*/
static void
-lwp_segregs_restore(klwp_t *lwp)
+lwp_segregs_restore(void *arg)
{
+ klwp_t *lwp = arg;
pcb_t *pcb = &lwp->lwp_pcb;
ASSERT(VALID_LWP_DESC(&pcb->pcb_fsdesc));
@@ -721,8 +725,9 @@ lwp_segregs_restore(klwp_t *lwp)
#ifdef _SYSCALL32_IMPL
static void
-lwp_segregs_restore32(klwp_t *lwp)
+lwp_segregs_restore32(void *arg)
{
+ klwp_t *lwp = arg;
/*LINTED*/
cpu_t *cpu = CPU;
pcb_t *pcb = &lwp->lwp_pcb;
@@ -737,6 +742,13 @@ lwp_segregs_restore32(klwp_t *lwp)
#endif /* _SYSCALL32_IMPL */
+static const struct ctxop_template brand_interpose_ctxop_tpl = {
+ .ct_rev = CTXOP_TPL_REV,
+ .ct_save = brand_interpositioning_disable,
+ .ct_restore = brand_interpositioning_enable,
+ .ct_exit = brand_interpositioning_disable,
+};
+
/*
* If this is a process in a branded zone, then we want it to use the brand
* syscall entry points instead of the standard Solaris entry points. This
@@ -751,16 +763,14 @@ lwp_attach_brand_hdlrs(klwp_t *lwp)
ASSERT(PROC_IS_BRANDED(lwptoproc(lwp)));
- ASSERT(removectx(t, NULL, brand_interpositioning_disable,
- brand_interpositioning_enable, NULL, NULL,
- brand_interpositioning_disable, NULL) == 0);
- installctx(t, NULL, brand_interpositioning_disable,
- brand_interpositioning_enable, NULL, NULL,
- brand_interpositioning_disable, NULL, NULL);
+ /* Confirm that brand interposition ctxop is not already present */
+ ASSERT0(ctxop_remove(t, &brand_interpose_ctxop_tpl, NULL));
+
+ ctxop_install(t, &brand_interpose_ctxop_tpl, NULL);
if (t == curthread) {
kpreempt_disable();
- brand_interpositioning_enable();
+ brand_interpositioning_enable(NULL);
kpreempt_enable();
}
}
@@ -780,17 +790,21 @@ lwp_detach_brand_hdlrs(klwp_t *lwp)
kpreempt_disable();
/* Remove the original context handlers */
- VERIFY(removectx(t, NULL, brand_interpositioning_disable,
- brand_interpositioning_enable, NULL, NULL,
- brand_interpositioning_disable, NULL) != 0);
+ ctxop_remove(t, &brand_interpose_ctxop_tpl, NULL);
if (t == curthread) {
/* Cleanup our MSR and IDT entries. */
- brand_interpositioning_disable();
+ brand_interpositioning_disable(NULL);
kpreempt_enable();
}
}
+static const struct ctxop_template sep_tpl = {
+ .ct_rev = CTXOP_TPL_REV,
+ .ct_save = sep_save,
+ .ct_restore = sep_restore,
+};
+
/*
* Add any lwp-associated context handlers to the lwp at the beginning
* of the lwp's useful life.
@@ -815,14 +829,19 @@ void
lwp_installctx(klwp_t *lwp)
{
kthread_t *t = lwptot(lwp);
- int thisthread = t == curthread;
+ bool thisthread = (t == curthread);
+ struct ctxop *ctx;
+
+ const struct ctxop_template segreg_tpl = {
+ .ct_rev = CTXOP_TPL_REV,
+ .ct_save = lwp_segregs_save,
#ifdef _SYSCALL32_IMPL
- void (*restop)(klwp_t *) = lwp_getdatamodel(lwp) == DATAMODEL_NATIVE ?
- lwp_segregs_restore : lwp_segregs_restore32;
+ .ct_restore = lwp_getdatamodel(lwp) == DATAMODEL_NATIVE ?
+ lwp_segregs_restore : lwp_segregs_restore32
#else
- void (*restop)(klwp_t *) = lwp_segregs_restore;
+ .ct_restore = lwp_segregs_restore;
#endif
- struct ctxop *ctx;
+ };
/*
* Install the basic lwp context handlers on each lwp.
@@ -836,21 +855,18 @@ lwp_installctx(klwp_t *lwp)
* On the i386 kernel, the context handlers are responsible for
* virtualizing %gs/%fs to the lwp by updating the per-cpu GDTs
*/
- ASSERT(removectx(t, lwp, lwp_segregs_save, restop,
- NULL, NULL, NULL, NULL) == 0);
+ ASSERT0(ctxop_remove(t, &segreg_tpl, lwp));
+
+ ctx = ctxop_allocate(&segreg_tpl, lwp);
if (thisthread) {
- ctx = installctx_preallocate();
kpreempt_disable();
- } else {
- ctx = NULL;
}
- installctx(t, lwp, lwp_segregs_save, restop,
- NULL, NULL, NULL, NULL, ctx);
+ ctxop_attach(t, ctx);
if (thisthread) {
/*
* Since we're the right thread, set the values in the GDT
*/
- restop(lwp);
+ segreg_tpl.ct_restore(lwp);
kpreempt_enable();
}
@@ -864,17 +880,14 @@ lwp_installctx(klwp_t *lwp)
*/
if (is_x86_feature(x86_featureset, X86FSET_SEP)) {
caddr_t kstktop = (caddr_t)lwp->lwp_regs;
- ASSERT(removectx(t, kstktop,
- sep_save, sep_restore, NULL, NULL, NULL, NULL) == 0);
+ ASSERT0(ctxop_remove(t, &sep_tpl, kstktop));
+
+ ctx = ctxop_allocate(&sep_tpl, kstktop);
if (thisthread) {
- ctx = installctx_preallocate();
kpreempt_disable();
- } else {
- ctx = NULL;
}
- installctx(t, kstktop,
- sep_save, sep_restore, NULL, NULL, NULL, NULL, ctx);
+ ctxop_attach(t, ctx);
if (thisthread) {
/*
* We're the right thread, so set the stack pointer
diff --git a/usr/src/uts/intel/os/sysi86.c b/usr/src/uts/intel/os/sysi86.c
index b107afddfb..9e793e6075 100644
--- a/usr/src/uts/intel/os/sysi86.c
+++ b/usr/src/uts/intel/os/sysi86.c
@@ -93,9 +93,6 @@ sysi86(short cmd, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3)
*/
case SI86V86:
if (arg1 == V86SC_IOPL) {
-#if defined(__xpv)
- struct ctxop *ctx;
-#endif
struct regs *rp = lwptoregs(ttolwp(curthread));
greg_t oldpl = rp->r_ps & PS_IOPL;
greg_t newpl = arg2 & PS_IOPL;
@@ -108,12 +105,18 @@ sysi86(short cmd, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3)
secpolicy_sys_config(CRED(), B_FALSE)) != 0)
return (set_errno(error));
#if defined(__xpv)
- ctx = installctx_preallocate();
+ const struct ctxop_template xen_tpl = {
+ .ct_rev = CTXOP_TPL_REV,
+ .ct_save = xen_disable_user_iopl,
+ .ct_restore = xen_enable_user_iopl,
+ .ct_exit = xen_disable_user_iopl,
+ };
+ struct ctxop *ctx;
+
+ ctx = ctxop_allocate(&xen_tpl, NULL);
kpreempt_disable();
- installctx(curthread, NULL, xen_disable_user_iopl,
- xen_enable_user_iopl, NULL, NULL,
- xen_disable_user_iopl, NULL, ctx);
- xen_enable_user_iopl();
+ ctxop_attach(curthread, ctx);
+ xen_enable_user_iopl(NULL);
kpreempt_enable();
#else
rp->r_ps ^= oldpl ^ newpl;
diff --git a/usr/src/uts/intel/sys/archsystm.h b/usr/src/uts/intel/sys/archsystm.h
index 0cc12086f3..6c43f2fe77 100644
--- a/usr/src/uts/intel/sys/archsystm.h
+++ b/usr/src/uts/intel/sys/archsystm.h
@@ -124,7 +124,7 @@ extern void getfpregs32(klwp_t *, fpregset32_t *);
struct fpu_ctx;
-extern void fp_free(struct fpu_ctx *, int);
+extern void fp_free(struct fpu_ctx *);
extern void fp_save(struct fpu_ctx *);
extern void fp_restore(struct fpu_ctx *);
@@ -133,8 +133,8 @@ extern int fpu_pentium_fdivbug;
extern void sep_save(void *);
extern void sep_restore(void *);
-extern void brand_interpositioning_enable(void);
-extern void brand_interpositioning_disable(void);
+extern void brand_interpositioning_enable(void *);
+extern void brand_interpositioning_disable(void *);
struct regs;
diff --git a/usr/src/uts/intel/sys/hypervisor.h b/usr/src/uts/intel/sys/hypervisor.h
index 8456380453..acb6ad7202 100644
--- a/usr/src/uts/intel/sys/hypervisor.h
+++ b/usr/src/uts/intel/sys/hypervisor.h
@@ -154,8 +154,8 @@ extern void xen_set_segment_base(int, ulong_t);
#endif /* __amd64 */
extern long xen_vcpu_up(processorid_t);
extern long xen_vcpu_down(processorid_t);
-extern void xen_enable_user_iopl(void);
-extern void xen_disable_user_iopl(void);
+extern void xen_enable_user_iopl(void *);
+extern void xen_disable_user_iopl(void *);
extern int xen_get_mc_physcpuinfo(xen_mc_logical_cpu_t *, uint_t *);
extern uint_t xen_phys_ncpus;
diff --git a/usr/src/uts/sun4/ml/subr_asm.s b/usr/src/uts/sun4/ml/subr_asm.s
index e08e346a0d..1f1c4f5a06 100644
--- a/usr/src/uts/sun4/ml/subr_asm.s
+++ b/usr/src/uts/sun4/ml/subr_asm.s
@@ -270,7 +270,7 @@
mov %o0, %gsr
SET_SIZE(_fp_write_pgsr)
-/*
+/*
* set_gsr(uint64_t buf, kfpu_t *fp)
* Set the graphics status register info to fp from buf
*/
@@ -296,7 +296,7 @@
* The Spitfire floating point code has been changed not to use install/
* save/restore/fork/freectx() because of the special memcpy library
* routines, which will lose too much performance if they have to go
- * through the fp_disabled trap (which used to call installctx()). So
+ * through the fp_disabled trap (which used to call ctxop_install()). So
* now fp_save/fp_restore are called from resume, and they don't care
* whether floating point was enabled from the user program via the
* fp_enabled trap or from the memcpy library, which just turns on floating
@@ -315,7 +315,7 @@
* setfpregs/xregs_setfpregs. But note that for saving and restoring
* context, both *must* happen. For prmachdep, aka access from [k]adb,
* it's OK if only one part happens.
- */
+ */
/*
* fp_save(kfpu_t *fp)
diff --git a/usr/src/uts/sun4u/os/cpc_subr.c b/usr/src/uts/sun4u/os/cpc_subr.c
index 5efa580e71..9bb4834f2a 100644
--- a/usr/src/uts/sun4u/os/cpc_subr.c
+++ b/usr/src/uts/sun4u/os/cpc_subr.c
@@ -95,8 +95,7 @@ kcpc_hw_startup_cpu(ushort_t cpflags)
if (kcpc_counts_include_idle)
return;
- installctx(t, cp, kcpc_idle_save, kcpc_idle_restore, NULL, NULL,
- NULL, NULL, NULL);
+ kcpc_idle_ctxop_install(t, cp);
}
/*
diff --git a/usr/src/uts/sun4v/os/cpc_subr.c b/usr/src/uts/sun4v/os/cpc_subr.c
index 558549e9b3..36ddaa9506 100644
--- a/usr/src/uts/sun4v/os/cpc_subr.c
+++ b/usr/src/uts/sun4v/os/cpc_subr.c
@@ -94,8 +94,7 @@ kcpc_hw_startup_cpu(ushort_t cpflags)
if (kcpc_counts_include_idle)
return;
- installctx(t, cp, kcpc_idle_save, kcpc_idle_restore, NULL, NULL,
- NULL, NULL, NULL);
+ kcpc_idle_ctxop_install(t, cp);
}
/*