summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBryan Cantrill <bryan@joyent.com>2015-01-03 00:48:36 +0000
committerBryan Cantrill <bryan@joyent.com>2015-01-03 00:48:36 +0000
commit0173c3089b5d2cc973f4453ad6410855e13ad28a (patch)
tree64c41ae1603a8c50bcf35a99a3af993463b02ffa
parentce402f7264952f724625a71be91af2e2b08c950c (diff)
downloadillumos-joyent-0173c3089b5d2cc973f4453ad6410855e13ad28a.tar.gz
OS-3678 many file event monitors on a single vnode can induce stack overflow
-rw-r--r--usr/src/uts/common/disp/thread.c120
-rw-r--r--usr/src/uts/common/fs/fem.c688
-rw-r--r--usr/src/uts/common/os/sched.c15
-rw-r--r--usr/src/uts/common/sys/proc.h6
-rw-r--r--usr/src/uts/common/sys/thread.h6
-rw-r--r--usr/src/uts/intel/ia32/ml/swtch.s104
-rw-r--r--usr/src/uts/sun4/os/machdep.c11
7 files changed, 865 insertions, 85 deletions
diff --git a/usr/src/uts/common/disp/thread.c b/usr/src/uts/common/disp/thread.c
index 331ca417e2..cfc4c99f64 100644
--- a/usr/src/uts/common/disp/thread.c
+++ b/usr/src/uts/common/disp/thread.c
@@ -21,7 +21,7 @@
/*
* Copyright (c) 1991, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2013, Joyent, Inc. All rights reserved.
+ * Copyright (c) 2015, Joyent, Inc. All rights reserved.
*/
#include <sys/types.h>
@@ -75,6 +75,10 @@
#include <sys/cpucaps.h>
#include <sys/kiconv.h>
+#ifndef STACK_GROWTH_DOWN
+#error Stacks do not grow downward; 3b2 zombie attack detected!
+#endif
+
struct kmem_cache *thread_cache; /* cache of free threads */
struct kmem_cache *lwp_cache; /* cache of free lwps */
struct kmem_cache *turnstile_cache; /* cache of free turnstiles */
@@ -372,7 +376,7 @@ thread_create(
if (stksize <= sizeof (kthread_t) + PTR24_ALIGN)
cmn_err(CE_PANIC, "thread_create: proposed stack size"
" too small to hold thread.");
-#ifdef STACK_GROWTH_DOWN
+
stksize -= SA(sizeof (kthread_t) + PTR24_ALIGN - 1);
stksize &= -PTR24_ALIGN; /* make thread aligned */
t = (kthread_t *)(stk + stksize);
@@ -381,13 +385,6 @@ thread_create(
audit_thread_create(t);
t->t_stk = stk + stksize;
t->t_stkbase = stk;
-#else /* stack grows to larger addresses */
- stksize -= SA(sizeof (kthread_t));
- t = (kthread_t *)(stk);
- bzero(t, sizeof (kthread_t));
- t->t_stk = stk + sizeof (kthread_t);
- t->t_stkbase = stk + stksize + sizeof (kthread_t);
-#endif /* STACK_GROWTH_DOWN */
t->t_flag |= T_TALLOCSTK;
t->t_swap = stk;
} else {
@@ -400,13 +397,8 @@ thread_create(
* Initialize t_stk to the kernel stack pointer to use
* upon entry to the kernel
*/
-#ifdef STACK_GROWTH_DOWN
t->t_stk = stk + stksize;
t->t_stkbase = stk;
-#else
- t->t_stk = stk; /* 3b2-like */
- t->t_stkbase = stk + stksize;
-#endif /* STACK_GROWTH_DOWN */
}
if (kmem_stackinfo != 0) {
@@ -589,6 +581,9 @@ thread_exit(void)
if ((t->t_proc_flag & TP_ZTHREAD) != 0)
cmn_err(CE_PANIC, "thread_exit: zthread_exit() not called");
+ if ((t->t_flag & T_SPLITSTK) != 0)
+ cmn_err(CE_PANIC, "thread_exit: called when stack is split");
+
tsd_exit(); /* Clean up this thread's TSD */
kcpc_passivate(); /* clean up performance counter state */
@@ -1891,6 +1886,103 @@ thread_change_pri(kthread_t *t, pri_t disp_pri, int front)
return (on_rq);
}
+
+/*
+ * There are occasions in the kernel when we need much more stack than we
+ * allocate by default, but we do not wish to have that work done
+ * asynchronously by another thread. To accommodate these scenarios, we allow
+ * for a split stack (also known as a "segmented stack") whereby a new stack
+ * is dynamically allocated and the current thread jumps onto it for purposes
+ * of executing the specified function. After the specified function returns,
+ * the stack is deallocated and control is returned to the caller. This
+ * functionality is implemented by thread_splitstack(), below; there are a few
+ * constraints on its use:
+ *
+ * - The caller must be in a context where it is safe to block for memory.
+ * - The caller cannot be in a t_onfault context
+ * - The called function must not call thread_exit() while on the split stack
+ *
+ * The code will explicitly panic if these constraints are violated. Notably,
+ * however, thread_splitstack() _can_ be called on a split stack -- there
+ * is no limit to the level that split stacks can nest.
+ *
+ * When the stack is split, it is constructed such that stack backtraces
+ * from kernel debuggers continue to function -- though note that DTrace's
+ * stack() action and stackdepth function will only show the stack up to and
+ * including thread_splitstack_run(); DTrace explicitly bounds itself to
+ * pointers that exist within the current declared stack as a safety
+ * mechanism.
+ */
+void
+thread_splitstack(void (*func)(void *), void *arg, size_t stksize)
+{
+ kthread_t *t = curthread;
+ caddr_t ostk, ostkbase, stk;
+ ushort_t otflag;
+
+ if (t->t_onfault != NULL)
+ panic("thread_splitstack: called with non-NULL t_onfault");
+
+ ostk = t->t_stk;
+ ostkbase = t->t_stkbase;
+ otflag = t->t_flag;
+
+ stksize = roundup(stksize, PAGESIZE);
+
+ if (stksize < default_stksize)
+ stksize = default_stksize;
+
+ if (stksize == default_stksize) {
+ stk = (caddr_t)segkp_cache_get(segkp_thread);
+ } else {
+ stksize = roundup(stksize, PAGESIZE);
+ stk = (caddr_t)segkp_get(segkp, stksize,
+ (KPD_HASREDZONE | KPD_NO_ANON | KPD_LOCKED));
+ }
+
+ /*
+ * We're going to lock ourselves before we set T_SPLITSTK to assure
+ * that we're not swapped out in the meantime. (Note that we don't
+ * bother to set t_swap, as we're not going to be swapped out.)
+ */
+ thread_lock(t);
+
+ if (!(otflag & T_SPLITSTK))
+ t->t_flag |= T_SPLITSTK;
+
+ t->t_stk = stk + stksize;
+ t->t_stkbase = stk;
+
+ thread_unlock(t);
+
+ /*
+ * Now actually run on the new (split) stack...
+ */
+ thread_splitstack_run(t->t_stk, func, arg);
+
+ /*
+ * We're back onto our own stack; lock ourselves and restore our
+ * pre-split state.
+ */
+ thread_lock(t);
+
+ t->t_stk = ostk;
+ t->t_stkbase = ostkbase;
+
+ if (!(otflag & T_SPLITSTK))
+ t->t_flag &= ~T_SPLITSTK;
+
+ thread_unlock(t);
+
+ /*
+ * Now that we are entirely back on our own stack, call back into
+ * the platform layer to perform any platform-specific cleanup.
+ */
+ thread_splitstack_cleanup();
+
+ segkp_release(segkp, stk);
+}
+
/*
* Tunable kmem_stackinfo is set, fill the kernel thread stack with a
* specific pattern.
diff --git a/usr/src/uts/common/fs/fem.c b/usr/src/uts/common/fs/fem.c
index b4e28cc860..5f524def30 100644
--- a/usr/src/uts/common/fs/fem.c
+++ b/usr/src/uts/common/fs/fem.c
@@ -23,6 +23,10 @@
* Use is subject to license terms.
*/
+/*
+ * Copyright (c) 2015, Joyent, Inc. All rights reserved.
+ */
+
#include <sys/types.h>
#include <sys/atomic.h>
#include <sys/kmem.h>
@@ -33,11 +37,12 @@
#include <sys/systm.h>
#include <sys/cmn_err.h>
#include <sys/debug.h>
-
#include <sys/fem.h>
#include <sys/vfs.h>
#include <sys/vnode.h>
#include <sys/vfs_opreg.h>
+#include <sys/stack.h>
+#include <sys/archsystm.h>
#define NNODES_DEFAULT 8 /* Default number of nodes in a fem_list */
/*
@@ -291,6 +296,536 @@ _op_find(femarg_t *ap, void **fp, int offs0, int offs1)
}
#endif
+/*
+ * File event monitoring handoffs
+ *
+ * File event monitoring relies on being able to inject stack frames between
+ * vnode consumers and the underlying file systems. This becomes problematic
+ * when there exist many monitors, as kernel stack depth is finite. The model
+ * very much encodes this injected frame: the flow of control deliberately
+ * lies with the monitor, not with the monitoring system. While we could
+ * conceivably address this by allowing each subsystem to install at most
+ * one monitor per vnode (and impose on subsystems that they handle any
+ * of their own consumer multiplexing internally), this in fact exports a
+ * substantial amount of run-time complexity to deal with an uncommon case
+ * (and, it must be said, assumes a small number of consuming subsystems).
+ * To allow our abstraction to remain clean, we instead check our remaining
+ * stack in every vnext_*() call; if the amount of stack remaining is lower
+ * than a threshold (fem_stack_needed), we call thread_splitstack() to carry
+ * on the execution of the monitors and the underlying vnode operation on a
+ * split stack. Because we can only pass a single argument to our split stack
+ * function, we must marshal our arguments, the mechanics of which are somewhat
+ * ornate in terms of the code: to marshal in a type-safe manner, we define a
+ * baton that is a union of payload structures for each kind of operation,
+ * loading the per-operation payload explicitly and calling into common handoff
+ * code that itself calls thread_splitstack(). The function passed to
+ * thread_splitstack() is a per-entry point function that continues monitor
+ * processing given the specified (marshalled) arguments. While this method
+ * is a little verbose to implement, it has the advantage of being relatively
+ * robust (that is, broadly type-safe) while imposing minimal burden on each
+ * vnext_*() entry point.
+ *
+ * In terms of the implementation:
+ *
+ * - The FEM_BATON_n macros define the per-entry point baton structures
+ * - The fem_baton_payload_t contains the union of these structures
+ * - The FEM_VNEXTn_DECL macros declare the post-handoff entry point
+ * - The FEM_VNEXTn macros constitute the per-handoff entry point
+ *
+ * Note that we don't use variadic macros -- we define a variant of these
+ * macros for each of our relevant argument counts. This may seem overly
+ * explicit, but it is deliberate: the object here is to minimize the
+ * future maintenance burden by minimizing the likelihood of introduced
+ * error -- not to minimize the number of characters in this source file.
+ */
+
+#ifndef STACK_GROWTH_DOWN
+#error Downward stack growth assumed.
+#endif
+
+int fem_stack_toodeep;
+uintptr_t fem_stack_needed = 8 * 1024;
+size_t fem_handoff_stacksize = 128 * 1024;
+
+#define FEM_TOODEEP() (STACK_BIAS + (uintptr_t)getfp() - \
+ (uintptr_t)curthread->t_stkbase < fem_stack_needed)
+
+#define FEM_BATON_1(what, t0, l0) \
+ struct { \
+ void *fb_##what##_arg0; \
+ caller_context_t *fb_##what##_ct; \
+ t0 fb_##what##_##l0; \
+ } fb_##what
+
+#define FEM_BATON_2(what, t0, l0, t1, l1) \
+ struct { \
+ void *fb_##what##_arg0; \
+ caller_context_t *fb_##what##_ct; \
+ t0 fb_##what##_##l0; \
+ t1 fb_##what##_##l1; \
+ } fb_##what
+
+#define FEM_BATON_3(what, t0, l0, t1, l1, t2, l2) \
+ struct { \
+ void *fb_##what##_arg0; \
+ caller_context_t *fb_##what##_ct; \
+ t0 fb_##what##_##l0; \
+ t1 fb_##what##_##l1; \
+ t2 fb_##what##_##l2; \
+ } fb_##what
+
+#define FEM_BATON_4(what, t0, l0, t1, l1, t2, l2, t3, l3) \
+ struct { \
+ void *fb_##what##_arg0; \
+ caller_context_t *fb_##what##_ct; \
+ t0 fb_##what##_##l0; \
+ t1 fb_##what##_##l1; \
+ t2 fb_##what##_##l2; \
+ t3 fb_##what##_##l3; \
+ } fb_##what
+
+#define FEM_BATON_5(what, t0, l0, t1, l1, t2, l2, t3, l3, t4, l4) \
+ struct { \
+ void *fb_##what##_arg0; \
+ caller_context_t *fb_##what##_ct; \
+ t0 fb_##what##_##l0; \
+ t1 fb_##what##_##l1; \
+ t2 fb_##what##_##l2; \
+ t3 fb_##what##_##l3; \
+ t4 fb_##what##_##l4; \
+ } fb_##what
+
+#define FEM_BATON_6(what, t0, l0, t1, l1, t2, l2, t3, l3, t4, l4, t5, l5) \
+ struct { \
+ void *fb_##what##_arg0; \
+ caller_context_t *fb_##what##_ct; \
+ t0 fb_##what##_##l0; \
+ t1 fb_##what##_##l1; \
+ t2 fb_##what##_##l2; \
+ t3 fb_##what##_##l3; \
+ t4 fb_##what##_##l4; \
+ t5 fb_##what##_##l5; \
+ } fb_##what
+
+#define FEM_BATON_8(what, t0, l0, t1, l1, t2, l2, t3, l3, t4, l4, t5, l5, \
+ t6, l6, t7, l7) \
+ struct { \
+ void *fb_##what##_arg0; \
+ caller_context_t *fb_##what##_ct; \
+ t0 fb_##what##_##l0; \
+ t1 fb_##what##_##l1; \
+ t2 fb_##what##_##l2; \
+ t3 fb_##what##_##l3; \
+ t4 fb_##what##_##l4; \
+ t5 fb_##what##_##l5; \
+ t6 fb_##what##_##l6; \
+ t7 fb_##what##_##l7; \
+ } fb_##what
+
+#define FEM_BATON_9(what, t0, l0, t1, l1, t2, l2, t3, l3, t4, l4, t5, l5, \
+ t6, l6, t7, l7, t8, l8) \
+ struct { \
+ void *fb_##what##_arg0; \
+ caller_context_t *fb_##what##_ct; \
+ t0 fb_##what##_##l0; \
+ t1 fb_##what##_##l1; \
+ t2 fb_##what##_##l2; \
+ t3 fb_##what##_##l3; \
+ t4 fb_##what##_##l4; \
+ t5 fb_##what##_##l5; \
+ t6 fb_##what##_##l6; \
+ t7 fb_##what##_##l7; \
+ t8 fb_##what##_##l8; \
+ } fb_##what
+
+typedef union {
+ FEM_BATON_2(open, int, mode, cred_t *, cr);
+ FEM_BATON_4(close, int, flag, int, count,
+ offset_t, offset, cred_t *, cr);
+ FEM_BATON_3(read, uio_t *, uiop, int, ioflag, cred_t *, cr);
+ FEM_BATON_3(write, uio_t *, uiop, int, ioflag, cred_t *, cr);
+ FEM_BATON_5(ioctl, int, cmd, intptr_t, arg,
+ int, flag, cred_t *, cr, int *, rvalp);
+ FEM_BATON_3(setfl, int, oflags, int, nflags, cred_t *, cr);
+ FEM_BATON_3(getattr, vattr_t *, vap, int, flags, cred_t *, cr);
+ FEM_BATON_3(setattr, vattr_t *, vap, int, flags, cred_t *, cr);
+ FEM_BATON_3(access, int, mode, int, flags, cred_t *, cr);
+ FEM_BATON_8(lookup, char *, nm, vnode_t **, vpp,
+ pathname_t *, pnp, int, flags, vnode_t *, rdir,
+ cred_t *, cr, int *, direntflags, pathname_t *, realpnp);
+ FEM_BATON_8(create, char *, name, vattr_t *, vap,
+ vcexcl_t, excl, int, mode, vnode_t **, vpp,
+ cred_t *, cr, int, flag, vsecattr_t *, vsecp);
+ FEM_BATON_3(remove, char *, nm, cred_t *, cr, int, flags);
+ FEM_BATON_4(link, vnode_t *, svp, char *, tnm,
+ cred_t *, cr, int, flags);
+ FEM_BATON_5(rename, char *, snm, vnode_t *, tdvp,
+ char *, tnm, cred_t *, cr, int, flags);
+ FEM_BATON_6(mkdir, char *, dirname, vattr_t *, vap,
+ vnode_t **, vpp, cred_t *, cr, int, flags,
+ vsecattr_t *, vsecp);
+ FEM_BATON_4(rmdir, char *, nm, vnode_t *, cdir,
+ cred_t *, cr, int, flags);
+ FEM_BATON_4(readdir, uio_t *, uiop, cred_t *, cr,
+ int *, eofp, int, flags);
+ FEM_BATON_5(symlink, char *, linkname, vattr_t *, vap,
+ char *, target, cred_t *, cr, int, flags);
+ FEM_BATON_2(readlink, uio_t *, uiop, cred_t *, cr);
+ FEM_BATON_2(fsync, int, syncflag, cred_t *, cr);
+ FEM_BATON_1(inactive, cred_t *, cr);
+ FEM_BATON_1(fid, fid_t *, fidp);
+ FEM_BATON_1(rwlock, int, write_lock);
+ FEM_BATON_1(rwunlock, int, write_lock);
+ FEM_BATON_2(seek, offset_t, ooff, offset_t *, noffp);
+ FEM_BATON_1(cmp, vnode_t *, vp2);
+ FEM_BATON_6(frlock, int, cmd, struct flock64 *, bfp,
+ int, flag, offset_t, offset, struct flk_callback *, flk_cbp,
+ cred_t *, cr);
+ FEM_BATON_5(space, int, cmd, struct flock64 *, bfp,
+ int, flag, offset_t, offset, cred_t *, cr);
+ FEM_BATON_1(realvp, vnode_t **, vpp);
+ FEM_BATON_9(getpage, offset_t, off, size_t, len,
+ uint_t *, protp, struct page **, plarr, size_t, plsz,
+ struct seg *, seg, caddr_t, addr, enum seg_rw, rw,
+ cred_t *, cr);
+ FEM_BATON_4(putpage, offset_t, off, size_t, len,
+ int, flags, cred_t *, cr);
+ FEM_BATON_8(map, offset_t, off, struct as *, as,
+ caddr_t *, addrp, size_t, len, uchar_t, prot,
+ uchar_t, maxprot, uint_t, flags, cred_t *, cr);
+ FEM_BATON_8(addmap, offset_t, off, struct as *, as,
+ caddr_t, addr, size_t, len, uchar_t, prot,
+ uchar_t, maxprot, uint_t, flags, cred_t *, cr);
+ FEM_BATON_8(delmap, offset_t, off, struct as *, as,
+ caddr_t, addr, size_t, len, uint_t, prot,
+ uint_t, maxprot, uint_t, flags, cred_t *, cr);
+ FEM_BATON_4(poll, short, events, int, anyyet,
+ short *, reventsp, struct pollhead **, phpp);
+ FEM_BATON_3(dump, caddr_t, addr, offset_t, lbdn, offset_t, dblks);
+ FEM_BATON_3(pathconf, int, cmd, ulong_t *, valp, cred_t *, cr);
+ FEM_BATON_5(pageio, struct page *, pp, u_offset_t, io_off,
+ size_t, io_len, int, flags, cred_t *, cr);
+ FEM_BATON_2(dumpctl, int, action, offset_t *, blkp);
+ FEM_BATON_4(dispose, struct page *, pp, int, flag,
+ int, dn, cred_t *, cr);
+ FEM_BATON_3(setsecattr, vsecattr_t *, vsap, int, flag, cred_t *, cr);
+ FEM_BATON_3(getsecattr, vsecattr_t *, vsap, int, flag, cred_t *, cr);
+ FEM_BATON_4(shrlock, int, cmd, struct shrlock *, shr,
+ int, flag, cred_t *, cr);
+ FEM_BATON_3(vnevent, vnevent_t, vnevent, vnode_t *, dvp, char *, cname);
+ FEM_BATON_3(reqzcbuf, enum uio_rw, ioflag,
+ xuio_t *, xuiop, cred_t *, cr);
+ FEM_BATON_2(retzcbuf, xuio_t *, xuiop, cred_t *, cr);
+} fem_baton_payload_t;
+
+typedef struct {
+ fem_baton_payload_t fb_payload;
+ int (*fb_func)();
+ void (*fb_handoff)();
+ int fb_rval;
+} fem_baton_t;
+
+static int
+fem_handoff(fem_baton_t *bp)
+{
+ fem_stack_toodeep++;
+ thread_splitstack(bp->fb_handoff, bp, fem_handoff_stacksize);
+
+ return (bp->fb_rval);
+}
+
+#define FEM_VNEXT3_DECL(what, a0, a1, a2) \
+void \
+fem_handoff_##what(fem_baton_t *bp) \
+{ \
+ bp->fb_rval = bp->fb_func( \
+ bp->fb_payload.fb_##what.fb_##what##_##a0, \
+ bp->fb_payload.fb_##what.fb_##what##_##a1, \
+ bp->fb_payload.fb_##what.fb_##what##_##a2); \
+}
+
+#define FEM_VNEXT4_DECL(what, a0, a1, a2, a3) \
+void \
+fem_handoff_##what(fem_baton_t *bp) \
+{ \
+ bp->fb_rval = bp->fb_func( \
+ bp->fb_payload.fb_##what.fb_##what##_##a0, \
+ bp->fb_payload.fb_##what.fb_##what##_##a1, \
+ bp->fb_payload.fb_##what.fb_##what##_##a2, \
+ bp->fb_payload.fb_##what.fb_##what##_##a3); \
+}
+
+#define FEM_VNEXT5_DECL(what, a0, a1, a2, a3, a4) \
+void \
+fem_handoff_##what(fem_baton_t *bp) \
+{ \
+ bp->fb_rval = bp->fb_func( \
+ bp->fb_payload.fb_##what.fb_##what##_##a0, \
+ bp->fb_payload.fb_##what.fb_##what##_##a1, \
+ bp->fb_payload.fb_##what.fb_##what##_##a2, \
+ bp->fb_payload.fb_##what.fb_##what##_##a3, \
+ bp->fb_payload.fb_##what.fb_##what##_##a4); \
+}
+
+#define FEM_VNEXT6_DECL(what, a0, a1, a2, a3, a4, a5) \
+void \
+fem_handoff_##what(fem_baton_t *bp) \
+{ \
+ bp->fb_rval = bp->fb_func( \
+ bp->fb_payload.fb_##what.fb_##what##_##a0, \
+ bp->fb_payload.fb_##what.fb_##what##_##a1, \
+ bp->fb_payload.fb_##what.fb_##what##_##a2, \
+ bp->fb_payload.fb_##what.fb_##what##_##a3, \
+ bp->fb_payload.fb_##what.fb_##what##_##a4, \
+ bp->fb_payload.fb_##what.fb_##what##_##a5); \
+}
+
+#define FEM_VNEXT7_DECL(what, a0, a1, a2, a3, a4, a5, a6) \
+void \
+fem_handoff_##what(fem_baton_t *bp) \
+{ \
+ bp->fb_rval = bp->fb_func( \
+ bp->fb_payload.fb_##what.fb_##what##_##a0, \
+ bp->fb_payload.fb_##what.fb_##what##_##a1, \
+ bp->fb_payload.fb_##what.fb_##what##_##a2, \
+ bp->fb_payload.fb_##what.fb_##what##_##a3, \
+ bp->fb_payload.fb_##what.fb_##what##_##a4, \
+ bp->fb_payload.fb_##what.fb_##what##_##a5, \
+ bp->fb_payload.fb_##what.fb_##what##_##a6); \
+}
+
+#define FEM_VNEXT8_DECL(what, a0, a1, a2, a3, a4, a5, a6, a7) \
+void \
+fem_handoff_##what(fem_baton_t *bp) \
+{ \
+ bp->fb_rval = bp->fb_func( \
+ bp->fb_payload.fb_##what.fb_##what##_##a0, \
+ bp->fb_payload.fb_##what.fb_##what##_##a1, \
+ bp->fb_payload.fb_##what.fb_##what##_##a2, \
+ bp->fb_payload.fb_##what.fb_##what##_##a3, \
+ bp->fb_payload.fb_##what.fb_##what##_##a4, \
+ bp->fb_payload.fb_##what.fb_##what##_##a5, \
+ bp->fb_payload.fb_##what.fb_##what##_##a6, \
+ bp->fb_payload.fb_##what.fb_##what##_##a7); \
+}
+
+#define FEM_VNEXT10_DECL(what, a0, a1, a2, a3, a4, a5, a6, a7, a8, a9) \
+void \
+fem_handoff_##what(fem_baton_t *bp) \
+{ \
+ bp->fb_rval = bp->fb_func( \
+ bp->fb_payload.fb_##what.fb_##what##_##a0, \
+ bp->fb_payload.fb_##what.fb_##what##_##a1, \
+ bp->fb_payload.fb_##what.fb_##what##_##a2, \
+ bp->fb_payload.fb_##what.fb_##what##_##a3, \
+ bp->fb_payload.fb_##what.fb_##what##_##a4, \
+ bp->fb_payload.fb_##what.fb_##what##_##a5, \
+ bp->fb_payload.fb_##what.fb_##what##_##a6, \
+ bp->fb_payload.fb_##what.fb_##what##_##a7, \
+ bp->fb_payload.fb_##what.fb_##what##_##a8, \
+ bp->fb_payload.fb_##what.fb_##what##_##a9); \
+}
+
+#define FEM_VNEXT11_DECL(what, a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10) \
+void \
+fem_handoff_##what(fem_baton_t *bp) \
+{ \
+ bp->fb_rval = bp->fb_func( \
+ bp->fb_payload.fb_##what.fb_##what##_##a0, \
+ bp->fb_payload.fb_##what.fb_##what##_##a1, \
+ bp->fb_payload.fb_##what.fb_##what##_##a2, \
+ bp->fb_payload.fb_##what.fb_##what##_##a3, \
+ bp->fb_payload.fb_##what.fb_##what##_##a4, \
+ bp->fb_payload.fb_##what.fb_##what##_##a5, \
+ bp->fb_payload.fb_##what.fb_##what##_##a6, \
+ bp->fb_payload.fb_##what.fb_##what##_##a7, \
+ bp->fb_payload.fb_##what.fb_##what##_##a8, \
+ bp->fb_payload.fb_##what.fb_##what##_##a9, \
+ bp->fb_payload.fb_##what.fb_##what##_##a10); \
+}
+
+#define FEM_VNEXT3(what, func, a0, a1, a2) \
+ if (FEM_TOODEEP()) { \
+ fem_baton_t *baton; \
+ int rval; \
+ \
+ baton = kmem_alloc(sizeof (fem_baton_t), KM_SLEEP); \
+ baton->fb_payload.fb_##what.fb_##what##_##a0 = a0; \
+ baton->fb_payload.fb_##what.fb_##what##_##a1 = a1; \
+ baton->fb_payload.fb_##what.fb_##what##_##a2 = a2; \
+ baton->fb_handoff = fem_handoff_##what; \
+ baton->fb_func = func; \
+ \
+ rval = fem_handoff(baton); \
+ kmem_free(baton, sizeof (fem_baton_t)); \
+ \
+ return (rval); \
+ } \
+ return (func(a0, a1, a2))
+
+#define FEM_VNEXT4(what, func, a0, a1, a2, a3) \
+ if (FEM_TOODEEP()) { \
+ fem_baton_t *baton; \
+ int rval; \
+ \
+ baton = kmem_alloc(sizeof (fem_baton_t), KM_SLEEP); \
+ baton->fb_payload.fb_##what.fb_##what##_##a0 = a0; \
+ baton->fb_payload.fb_##what.fb_##what##_##a1 = a1; \
+ baton->fb_payload.fb_##what.fb_##what##_##a2 = a2; \
+ baton->fb_payload.fb_##what.fb_##what##_##a3 = a3; \
+ baton->fb_handoff = fem_handoff_##what; \
+ baton->fb_func = func; \
+ \
+ rval = fem_handoff(baton); \
+ kmem_free(baton, sizeof (fem_baton_t)); \
+ \
+ return (rval); \
+ } \
+ return (func(a0, a1, a2, a3))
+
+#define FEM_VNEXT5(what, func, a0, a1, a2, a3, a4) \
+ if (FEM_TOODEEP()) { \
+ fem_baton_t *baton; \
+ int rval; \
+ \
+ baton = kmem_alloc(sizeof (fem_baton_t), KM_SLEEP); \
+ baton->fb_payload.fb_##what.fb_##what##_##a0 = a0; \
+ baton->fb_payload.fb_##what.fb_##what##_##a1 = a1; \
+ baton->fb_payload.fb_##what.fb_##what##_##a2 = a2; \
+ baton->fb_payload.fb_##what.fb_##what##_##a3 = a3; \
+ baton->fb_payload.fb_##what.fb_##what##_##a4 = a4; \
+ baton->fb_handoff = fem_handoff_##what; \
+ baton->fb_func = func; \
+ \
+ rval = fem_handoff(baton); \
+ kmem_free(baton, sizeof (fem_baton_t)); \
+ \
+ return (rval); \
+ } \
+ return (func(a0, a1, a2, a3, a4))
+
+#define FEM_VNEXT6(what, func, a0, a1, a2, a3, a4, a5) \
+ if (FEM_TOODEEP()) { \
+ fem_baton_t *baton; \
+ int rval; \
+ \
+ baton = kmem_alloc(sizeof (fem_baton_t), KM_SLEEP); \
+ baton->fb_payload.fb_##what.fb_##what##_##a0 = a0; \
+ baton->fb_payload.fb_##what.fb_##what##_##a1 = a1; \
+ baton->fb_payload.fb_##what.fb_##what##_##a2 = a2; \
+ baton->fb_payload.fb_##what.fb_##what##_##a3 = a3; \
+ baton->fb_payload.fb_##what.fb_##what##_##a4 = a4; \
+ baton->fb_payload.fb_##what.fb_##what##_##a5 = a5; \
+ baton->fb_handoff = fem_handoff_##what; \
+ baton->fb_func = func; \
+ \
+ rval = fem_handoff(baton); \
+ kmem_free(baton, sizeof (fem_baton_t)); \
+ \
+ return (rval); \
+ } \
+ return (func(a0, a1, a2, a3, a4, a5))
+
+#define FEM_VNEXT7(what, func, a0, a1, a2, a3, a4, a5, a6) \
+ if (FEM_TOODEEP()) { \
+ fem_baton_t *baton; \
+ int rval; \
+ \
+ baton = kmem_alloc(sizeof (fem_baton_t), KM_SLEEP); \
+ baton->fb_payload.fb_##what.fb_##what##_##a0 = a0; \
+ baton->fb_payload.fb_##what.fb_##what##_##a1 = a1; \
+ baton->fb_payload.fb_##what.fb_##what##_##a2 = a2; \
+ baton->fb_payload.fb_##what.fb_##what##_##a3 = a3; \
+ baton->fb_payload.fb_##what.fb_##what##_##a4 = a4; \
+ baton->fb_payload.fb_##what.fb_##what##_##a5 = a5; \
+ baton->fb_payload.fb_##what.fb_##what##_##a6 = a6; \
+ baton->fb_handoff = fem_handoff_##what; \
+ baton->fb_func = func; \
+ \
+ rval = fem_handoff(baton); \
+ kmem_free(baton, sizeof (fem_baton_t)); \
+ \
+ return (rval); \
+ } \
+ return (func(a0, a1, a2, a3, a4, a5, a6))
+
+#define FEM_VNEXT8(what, func, a0, a1, a2, a3, a4, a5, a6, a7) \
+ if (FEM_TOODEEP()) { \
+ fem_baton_t *baton; \
+ int rval; \
+ \
+ baton = kmem_alloc(sizeof (fem_baton_t), KM_SLEEP); \
+ baton->fb_payload.fb_##what.fb_##what##_##a0 = a0; \
+ baton->fb_payload.fb_##what.fb_##what##_##a1 = a1; \
+ baton->fb_payload.fb_##what.fb_##what##_##a2 = a2; \
+ baton->fb_payload.fb_##what.fb_##what##_##a3 = a3; \
+ baton->fb_payload.fb_##what.fb_##what##_##a4 = a4; \
+ baton->fb_payload.fb_##what.fb_##what##_##a5 = a5; \
+ baton->fb_payload.fb_##what.fb_##what##_##a6 = a6; \
+ baton->fb_payload.fb_##what.fb_##what##_##a7 = a7; \
+ baton->fb_handoff = fem_handoff_##what; \
+ baton->fb_func = func; \
+ \
+ rval = fem_handoff(baton); \
+ kmem_free(baton, sizeof (fem_baton_t)); \
+ \
+ return (rval); \
+ } \
+ return (func(a0, a1, a2, a3, a4, a5, a6, a7))
+
+#define FEM_VNEXT10(what, func, a0, a1, a2, a3, a4, a5, a6, a7, a8, a9) \
+ if (FEM_TOODEEP()) { \
+ fem_baton_t *baton; \
+ int rval; \
+ \
+ baton = kmem_alloc(sizeof (fem_baton_t), KM_SLEEP); \
+ baton->fb_payload.fb_##what.fb_##what##_##a0 = a0; \
+ baton->fb_payload.fb_##what.fb_##what##_##a1 = a1; \
+ baton->fb_payload.fb_##what.fb_##what##_##a2 = a2; \
+ baton->fb_payload.fb_##what.fb_##what##_##a3 = a3; \
+ baton->fb_payload.fb_##what.fb_##what##_##a4 = a4; \
+ baton->fb_payload.fb_##what.fb_##what##_##a5 = a5; \
+ baton->fb_payload.fb_##what.fb_##what##_##a6 = a6; \
+ baton->fb_payload.fb_##what.fb_##what##_##a7 = a7; \
+ baton->fb_payload.fb_##what.fb_##what##_##a8 = a8; \
+ baton->fb_payload.fb_##what.fb_##what##_##a9 = a9; \
+ baton->fb_handoff = fem_handoff_##what; \
+ baton->fb_func = func; \
+ \
+ rval = fem_handoff(baton); \
+ kmem_free(baton, sizeof (fem_baton_t)); \
+ \
+ return (rval); \
+ } \
+ return (func(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9))
+
+#define FEM_VNEXT11(what, func, a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10) \
+ if (FEM_TOODEEP()) { \
+ fem_baton_t *baton; \
+ int rval; \
+ \
+ baton = kmem_alloc(sizeof (fem_baton_t), KM_SLEEP); \
+ baton->fb_payload.fb_##what.fb_##what##_##a0 = a0; \
+ baton->fb_payload.fb_##what.fb_##what##_##a1 = a1; \
+ baton->fb_payload.fb_##what.fb_##what##_##a2 = a2; \
+ baton->fb_payload.fb_##what.fb_##what##_##a3 = a3; \
+ baton->fb_payload.fb_##what.fb_##what##_##a4 = a4; \
+ baton->fb_payload.fb_##what.fb_##what##_##a5 = a5; \
+ baton->fb_payload.fb_##what.fb_##what##_##a6 = a6; \
+ baton->fb_payload.fb_##what.fb_##what##_##a7 = a7; \
+ baton->fb_payload.fb_##what.fb_##what##_##a8 = a8; \
+ baton->fb_payload.fb_##what.fb_##what##_##a9 = a9; \
+ baton->fb_payload.fb_##what.fb_##what##_##a10 = a10; \
+ baton->fb_handoff = fem_handoff_##what; \
+ baton->fb_func = func; \
+ \
+ rval = fem_handoff(baton); \
+ kmem_free(baton, sizeof (fem_baton_t)); \
+ \
+ return (rval); \
+ } \
+ return (func(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10))
+
static fem_t *
fem_alloc()
{
@@ -2036,10 +2571,60 @@ static struct fs_operation_def fshead_vfs_spec[] = {
* 5. Return by invoking the base operation with the base object.
*
* for each classification, there needs to be at least one "next" operation
- * for each "head"operation.
- *
+ * for each "head" operation. Note that we also use the FEM_VNEXTn_DECL macros
+ * to define the function to run when the stack is split; see the discussion
+ * on "File event monitoring handoffs", above.
*/
+FEM_VNEXT4_DECL(open, arg0, mode, cr, ct)
+FEM_VNEXT6_DECL(close, arg0, flag, count, offset, cr, ct)
+FEM_VNEXT5_DECL(read, arg0, uiop, ioflag, cr, ct)
+FEM_VNEXT5_DECL(write, arg0, uiop, ioflag, cr, ct)
+FEM_VNEXT7_DECL(ioctl, arg0, cmd, arg, flag, cr, rvalp, ct)
+FEM_VNEXT5_DECL(setfl, arg0, oflags, nflags, cr, ct)
+FEM_VNEXT5_DECL(getattr, arg0, vap, flags, cr, ct)
+FEM_VNEXT5_DECL(setattr, arg0, vap, flags, cr, ct)
+FEM_VNEXT5_DECL(access, arg0, mode, flags, cr, ct)
+FEM_VNEXT10_DECL(lookup, arg0, nm, vpp, pnp, flags, rdir,
+ cr, ct, direntflags, realpnp)
+FEM_VNEXT10_DECL(create, arg0, name, vap, excl, mode, vpp, cr, flag, ct, vsecp)
+FEM_VNEXT5_DECL(remove, arg0, nm, cr, ct, flags)
+FEM_VNEXT6_DECL(link, arg0, svp, tnm, cr, ct, flags)
+FEM_VNEXT7_DECL(rename, arg0, snm, tdvp, tnm, cr, ct, flags)
+FEM_VNEXT8_DECL(mkdir, arg0, dirname, vap, vpp, cr, ct, flags, vsecp)
+FEM_VNEXT6_DECL(rmdir, arg0, nm, cdir, cr, ct, flags)
+FEM_VNEXT6_DECL(readdir, arg0, uiop, cr, eofp, ct, flags)
+FEM_VNEXT7_DECL(symlink, arg0, linkname, vap, target, cr, ct, flags)
+FEM_VNEXT4_DECL(readlink, arg0, uiop, cr, ct)
+FEM_VNEXT4_DECL(fsync, arg0, syncflag, cr, ct)
+FEM_VNEXT3_DECL(fid, arg0, fidp, ct)
+FEM_VNEXT3_DECL(rwlock, arg0, write_lock, ct)
+FEM_VNEXT4_DECL(seek, arg0, ooff, noffp, ct)
+FEM_VNEXT3_DECL(cmp, arg0, vp2, ct)
+FEM_VNEXT8_DECL(frlock, arg0, cmd, bfp, flag, offset, flk_cbp, cr, ct)
+FEM_VNEXT7_DECL(space, arg0, cmd, bfp, flag, offset, cr, ct)
+FEM_VNEXT3_DECL(realvp, arg0, vpp, ct)
+FEM_VNEXT11_DECL(getpage, arg0, off, len, protp, plarr, plsz,
+ seg, addr, rw, cr, ct)
+FEM_VNEXT6_DECL(putpage, arg0, off, len, flags, cr, ct)
+FEM_VNEXT10_DECL(map, arg0, off, as, addrp, len, prot, maxprot,
+ flags, cr, ct)
+FEM_VNEXT10_DECL(addmap, arg0, off, as, addr, len, prot, maxprot,
+ flags, cr, ct)
+FEM_VNEXT10_DECL(delmap, arg0, off, as, addr, len, prot, maxprot,
+ flags, cr, ct)
+FEM_VNEXT6_DECL(poll, arg0, events, anyyet, reventsp, phpp, ct)
+FEM_VNEXT5_DECL(dump, arg0, addr, lbdn, dblks, ct)
+FEM_VNEXT5_DECL(pathconf, arg0, cmd, valp, cr, ct)
+FEM_VNEXT7_DECL(pageio, arg0, pp, io_off, io_len, flags, cr, ct)
+FEM_VNEXT4_DECL(dumpctl, arg0, action, blkp, ct)
+FEM_VNEXT5_DECL(setsecattr, arg0, vsap, flag, cr, ct)
+FEM_VNEXT5_DECL(getsecattr, arg0, vsap, flag, cr, ct)
+FEM_VNEXT6_DECL(shrlock, arg0, cmd, shr, flag, cr, ct)
+FEM_VNEXT5_DECL(vnevent, arg0, vnevent, dvp, cname, ct)
+FEM_VNEXT5_DECL(reqzcbuf, arg0, ioflag, xuiop, cr, ct)
+FEM_VNEXT4_DECL(retzcbuf, arg0, xuiop, cr, ct)
+
int
vnext_open(femarg_t *vf, int mode, cred_t *cr, caller_context_t *ct)
{
@@ -2051,7 +2636,7 @@ vnext_open(femarg_t *vf, int mode, cred_t *cr, caller_context_t *ct)
vsop_find(vf, &func, int, &arg0, vop_open, femop_open);
ASSERT(func != NULL);
ASSERT(arg0 != NULL);
- return ((*func)(arg0, mode, cr, ct));
+ FEM_VNEXT4(open, func, arg0, mode, cr, ct);
}
int
@@ -2066,7 +2651,7 @@ vnext_close(femarg_t *vf, int flag, int count, offset_t offset, cred_t *cr,
vsop_find(vf, &func, int, &arg0, vop_close, femop_close);
ASSERT(func != NULL);
ASSERT(arg0 != NULL);
- return ((*func)(arg0, flag, count, offset, cr, ct));
+ FEM_VNEXT6(close, func, arg0, flag, count, offset, cr, ct);
}
int
@@ -2081,7 +2666,7 @@ vnext_read(femarg_t *vf, uio_t *uiop, int ioflag, cred_t *cr,
vsop_find(vf, &func, int, &arg0, vop_read, femop_read);
ASSERT(func != NULL);
ASSERT(arg0 != NULL);
- return ((*func)(arg0, uiop, ioflag, cr, ct));
+ FEM_VNEXT5(read, func, arg0, uiop, ioflag, cr, ct);
}
int
@@ -2096,7 +2681,7 @@ vnext_write(femarg_t *vf, uio_t *uiop, int ioflag, cred_t *cr,
vsop_find(vf, &func, int, &arg0, vop_write, femop_write);
ASSERT(func != NULL);
ASSERT(arg0 != NULL);
- return ((*func)(arg0, uiop, ioflag, cr, ct));
+ FEM_VNEXT5(write, func, arg0, uiop, ioflag, cr, ct);
}
int
@@ -2111,7 +2696,7 @@ vnext_ioctl(femarg_t *vf, int cmd, intptr_t arg, int flag, cred_t *cr,
vsop_find(vf, &func, int, &arg0, vop_ioctl, femop_ioctl);
ASSERT(func != NULL);
ASSERT(arg0 != NULL);
- return ((*func)(arg0, cmd, arg, flag, cr, rvalp, ct));
+ FEM_VNEXT7(ioctl, func, arg0, cmd, arg, flag, cr, rvalp, ct);
}
int
@@ -2126,7 +2711,7 @@ vnext_setfl(femarg_t *vf, int oflags, int nflags, cred_t *cr,
vsop_find(vf, &func, int, &arg0, vop_setfl, femop_setfl);
ASSERT(func != NULL);
ASSERT(arg0 != NULL);
- return ((*func)(arg0, oflags, nflags, cr, ct));
+ FEM_VNEXT5(setfl, func, arg0, oflags, nflags, cr, ct);
}
int
@@ -2141,7 +2726,7 @@ vnext_getattr(femarg_t *vf, vattr_t *vap, int flags, cred_t *cr,
vsop_find(vf, &func, int, &arg0, vop_getattr, femop_getattr);
ASSERT(func != NULL);
ASSERT(arg0 != NULL);
- return ((*func)(arg0, vap, flags, cr, ct));
+ FEM_VNEXT5(getattr, func, arg0, vap, flags, cr, ct);
}
int
@@ -2156,7 +2741,7 @@ vnext_setattr(femarg_t *vf, vattr_t *vap, int flags, cred_t *cr,
vsop_find(vf, &func, int, &arg0, vop_setattr, femop_setattr);
ASSERT(func != NULL);
ASSERT(arg0 != NULL);
- return ((*func)(arg0, vap, flags, cr, ct));
+ FEM_VNEXT5(setattr, func, arg0, vap, flags, cr, ct);
}
int
@@ -2171,7 +2756,7 @@ vnext_access(femarg_t *vf, int mode, int flags, cred_t *cr,
vsop_find(vf, &func, int, &arg0, vop_access, femop_access);
ASSERT(func != NULL);
ASSERT(arg0 != NULL);
- return ((*func)(arg0, mode, flags, cr, ct));
+ FEM_VNEXT5(access, func, arg0, mode, flags, cr, ct);
}
int
@@ -2187,8 +2772,8 @@ vnext_lookup(femarg_t *vf, char *nm, vnode_t **vpp, pathname_t *pnp,
vsop_find(vf, &func, int, &arg0, vop_lookup, femop_lookup);
ASSERT(func != NULL);
ASSERT(arg0 != NULL);
- return ((*func)(arg0, nm, vpp, pnp, flags, rdir, cr, ct,
- direntflags, realpnp));
+ FEM_VNEXT10(lookup, func, arg0, nm, vpp, pnp, flags, rdir, cr, ct,
+ direntflags, realpnp);
}
int
@@ -2204,7 +2789,8 @@ vnext_create(femarg_t *vf, char *name, vattr_t *vap, vcexcl_t excl,
vsop_find(vf, &func, int, &arg0, vop_create, femop_create);
ASSERT(func != NULL);
ASSERT(arg0 != NULL);
- return ((*func)(arg0, name, vap, excl, mode, vpp, cr, flag, ct, vsecp));
+ FEM_VNEXT10(create, func, arg0, name, vap, excl,
+ mode, vpp, cr, flag, ct, vsecp);
}
int
@@ -2219,7 +2805,7 @@ vnext_remove(femarg_t *vf, char *nm, cred_t *cr, caller_context_t *ct,
vsop_find(vf, &func, int, &arg0, vop_remove, femop_remove);
ASSERT(func != NULL);
ASSERT(arg0 != NULL);
- return ((*func)(arg0, nm, cr, ct, flags));
+ FEM_VNEXT5(remove, func, arg0, nm, cr, ct, flags);
}
int
@@ -2234,7 +2820,7 @@ vnext_link(femarg_t *vf, vnode_t *svp, char *tnm, cred_t *cr,
vsop_find(vf, &func, int, &arg0, vop_link, femop_link);
ASSERT(func != NULL);
ASSERT(arg0 != NULL);
- return ((*func)(arg0, svp, tnm, cr, ct, flags));
+ FEM_VNEXT6(link, func, arg0, svp, tnm, cr, ct, flags);
}
int
@@ -2249,7 +2835,7 @@ vnext_rename(femarg_t *vf, char *snm, vnode_t *tdvp, char *tnm, cred_t *cr,
vsop_find(vf, &func, int, &arg0, vop_rename, femop_rename);
ASSERT(func != NULL);
ASSERT(arg0 != NULL);
- return ((*func)(arg0, snm, tdvp, tnm, cr, ct, flags));
+ FEM_VNEXT7(rename, func, arg0, snm, tdvp, tnm, cr, ct, flags);
}
int
@@ -2264,7 +2850,7 @@ vnext_mkdir(femarg_t *vf, char *dirname, vattr_t *vap, vnode_t **vpp,
vsop_find(vf, &func, int, &arg0, vop_mkdir, femop_mkdir);
ASSERT(func != NULL);
ASSERT(arg0 != NULL);
- return ((*func)(arg0, dirname, vap, vpp, cr, ct, flags, vsecp));
+ FEM_VNEXT8(mkdir, func, arg0, dirname, vap, vpp, cr, ct, flags, vsecp);
}
int
@@ -2279,7 +2865,7 @@ vnext_rmdir(femarg_t *vf, char *nm, vnode_t *cdir, cred_t *cr,
vsop_find(vf, &func, int, &arg0, vop_rmdir, femop_rmdir);
ASSERT(func != NULL);
ASSERT(arg0 != NULL);
- return ((*func)(arg0, nm, cdir, cr, ct, flags));
+ FEM_VNEXT6(rmdir, func, arg0, nm, cdir, cr, ct, flags);
}
int
@@ -2294,7 +2880,7 @@ vnext_readdir(femarg_t *vf, uio_t *uiop, cred_t *cr, int *eofp,
vsop_find(vf, &func, int, &arg0, vop_readdir, femop_readdir);
ASSERT(func != NULL);
ASSERT(arg0 != NULL);
- return ((*func)(arg0, uiop, cr, eofp, ct, flags));
+ FEM_VNEXT6(readdir, func, arg0, uiop, cr, eofp, ct, flags);
}
int
@@ -2309,7 +2895,7 @@ vnext_symlink(femarg_t *vf, char *linkname, vattr_t *vap, char *target,
vsop_find(vf, &func, int, &arg0, vop_symlink, femop_symlink);
ASSERT(func != NULL);
ASSERT(arg0 != NULL);
- return ((*func)(arg0, linkname, vap, target, cr, ct, flags));
+ FEM_VNEXT7(symlink, func, arg0, linkname, vap, target, cr, ct, flags);
}
int
@@ -2323,7 +2909,7 @@ vnext_readlink(femarg_t *vf, uio_t *uiop, cred_t *cr, caller_context_t *ct)
vsop_find(vf, &func, int, &arg0, vop_readlink, femop_readlink);
ASSERT(func != NULL);
ASSERT(arg0 != NULL);
- return ((*func)(arg0, uiop, cr, ct));
+ FEM_VNEXT4(readlink, func, arg0, uiop, cr, ct);
}
int
@@ -2337,7 +2923,7 @@ vnext_fsync(femarg_t *vf, int syncflag, cred_t *cr, caller_context_t *ct)
vsop_find(vf, &func, int, &arg0, vop_fsync, femop_fsync);
ASSERT(func != NULL);
ASSERT(arg0 != NULL);
- return ((*func)(arg0, syncflag, cr, ct));
+ FEM_VNEXT4(fsync, func, arg0, syncflag, cr, ct);
}
void
@@ -2365,7 +2951,7 @@ vnext_fid(femarg_t *vf, fid_t *fidp, caller_context_t *ct)
vsop_find(vf, &func, int, &arg0, vop_fid, femop_fid);
ASSERT(func != NULL);
ASSERT(arg0 != NULL);
- return ((*func)(arg0, fidp, ct));
+ FEM_VNEXT3(fid, func, arg0, fidp, ct);
}
int
@@ -2379,7 +2965,7 @@ vnext_rwlock(femarg_t *vf, int write_lock, caller_context_t *ct)
vsop_find(vf, &func, int, &arg0, vop_rwlock, femop_rwlock);
ASSERT(func != NULL);
ASSERT(arg0 != NULL);
- return ((*func)(arg0, write_lock, ct));
+ FEM_VNEXT3(rwlock, func, arg0, write_lock, ct);
}
void
@@ -2407,7 +2993,7 @@ vnext_seek(femarg_t *vf, offset_t ooff, offset_t *noffp, caller_context_t *ct)
vsop_find(vf, &func, int, &arg0, vop_seek, femop_seek);
ASSERT(func != NULL);
ASSERT(arg0 != NULL);
- return ((*func)(arg0, ooff, noffp, ct));
+ FEM_VNEXT4(seek, func, arg0, ooff, noffp, ct);
}
int
@@ -2421,7 +3007,7 @@ vnext_cmp(femarg_t *vf, vnode_t *vp2, caller_context_t *ct)
vsop_find(vf, &func, int, &arg0, vop_cmp, femop_cmp);
ASSERT(func != NULL);
ASSERT(arg0 != NULL);
- return ((*func)(arg0, vp2, ct));
+ FEM_VNEXT3(cmp, func, arg0, vp2, ct);
}
int
@@ -2437,7 +3023,7 @@ vnext_frlock(femarg_t *vf, int cmd, struct flock64 *bfp, int flag,
vsop_find(vf, &func, int, &arg0, vop_frlock, femop_frlock);
ASSERT(func != NULL);
ASSERT(arg0 != NULL);
- return ((*func)(arg0, cmd, bfp, flag, offset, flk_cbp, cr, ct));
+ FEM_VNEXT8(frlock, func, arg0, cmd, bfp, flag, offset, flk_cbp, cr, ct);
}
int
@@ -2452,7 +3038,7 @@ vnext_space(femarg_t *vf, int cmd, struct flock64 *bfp, int flag,
vsop_find(vf, &func, int, &arg0, vop_space, femop_space);
ASSERT(func != NULL);
ASSERT(arg0 != NULL);
- return ((*func)(arg0, cmd, bfp, flag, offset, cr, ct));
+ FEM_VNEXT7(space, func, arg0, cmd, bfp, flag, offset, cr, ct);
}
int
@@ -2466,7 +3052,7 @@ vnext_realvp(femarg_t *vf, vnode_t **vpp, caller_context_t *ct)
vsop_find(vf, &func, int, &arg0, vop_realvp, femop_realvp);
ASSERT(func != NULL);
ASSERT(arg0 != NULL);
- return ((*func)(arg0, vpp, ct));
+ FEM_VNEXT3(realvp, func, arg0, vpp, ct);
}
int
@@ -2482,8 +3068,8 @@ vnext_getpage(femarg_t *vf, offset_t off, size_t len, uint_t *protp,
vsop_find(vf, &func, int, &arg0, vop_getpage, femop_getpage);
ASSERT(func != NULL);
ASSERT(arg0 != NULL);
- return ((*func)(arg0, off, len, protp, plarr, plsz, seg, addr, rw,
- cr, ct));
+ FEM_VNEXT11(getpage, func, arg0, off, len, protp,
+ plarr, plsz, seg, addr, rw, cr, ct);
}
int
@@ -2498,7 +3084,7 @@ vnext_putpage(femarg_t *vf, offset_t off, size_t len, int flags,
vsop_find(vf, &func, int, &arg0, vop_putpage, femop_putpage);
ASSERT(func != NULL);
ASSERT(arg0 != NULL);
- return ((*func)(arg0, off, len, flags, cr, ct));
+ FEM_VNEXT6(putpage, func, arg0, off, len, flags, cr, ct);
}
int
@@ -2514,8 +3100,8 @@ vnext_map(femarg_t *vf, offset_t off, struct as *as, caddr_t *addrp,
vsop_find(vf, &func, int, &arg0, vop_map, femop_map);
ASSERT(func != NULL);
ASSERT(arg0 != NULL);
- return ((*func)(arg0, off, as, addrp, len, prot, maxprot, flags,
- cr, ct));
+ FEM_VNEXT10(map, func, arg0, off, as, addrp, len, prot, maxprot, flags,
+ cr, ct);
}
int
@@ -2531,8 +3117,8 @@ vnext_addmap(femarg_t *vf, offset_t off, struct as *as, caddr_t addr,
vsop_find(vf, &func, int, &arg0, vop_addmap, femop_addmap);
ASSERT(func != NULL);
ASSERT(arg0 != NULL);
- return ((*func)(arg0, off, as, addr, len, prot, maxprot, flags,
- cr, ct));
+ FEM_VNEXT10(addmap, func, arg0, off, as, addr, len, prot, maxprot,
+ flags, cr, ct);
}
int
@@ -2548,8 +3134,8 @@ vnext_delmap(femarg_t *vf, offset_t off, struct as *as, caddr_t addr,
vsop_find(vf, &func, int, &arg0, vop_delmap, femop_delmap);
ASSERT(func != NULL);
ASSERT(arg0 != NULL);
- return ((*func)(arg0, off, as, addr, len, prot, maxprot, flags,
- cr, ct));
+ FEM_VNEXT10(delmap, func, arg0, off, as, addr, len, prot, maxprot,
+ flags, cr, ct);
}
int
@@ -2564,7 +3150,7 @@ vnext_poll(femarg_t *vf, short events, int anyyet, short *reventsp,
vsop_find(vf, &func, int, &arg0, vop_poll, femop_poll);
ASSERT(func != NULL);
ASSERT(arg0 != NULL);
- return ((*func)(arg0, events, anyyet, reventsp, phpp, ct));
+ FEM_VNEXT6(poll, func, arg0, events, anyyet, reventsp, phpp, ct);
}
int
@@ -2579,7 +3165,7 @@ vnext_dump(femarg_t *vf, caddr_t addr, offset_t lbdn, offset_t dblks,
vsop_find(vf, &func, int, &arg0, vop_dump, femop_dump);
ASSERT(func != NULL);
ASSERT(arg0 != NULL);
- return ((*func)(arg0, addr, lbdn, dblks, ct));
+ FEM_VNEXT5(dump, func, arg0, addr, lbdn, dblks, ct);
}
int
@@ -2594,7 +3180,7 @@ vnext_pathconf(femarg_t *vf, int cmd, ulong_t *valp, cred_t *cr,
vsop_find(vf, &func, int, &arg0, vop_pathconf, femop_pathconf);
ASSERT(func != NULL);
ASSERT(arg0 != NULL);
- return ((*func)(arg0, cmd, valp, cr, ct));
+ FEM_VNEXT5(pathconf, func, arg0, cmd, valp, cr, ct);
}
int
@@ -2609,7 +3195,7 @@ vnext_pageio(femarg_t *vf, struct page *pp, u_offset_t io_off,
vsop_find(vf, &func, int, &arg0, vop_pageio, femop_pageio);
ASSERT(func != NULL);
ASSERT(arg0 != NULL);
- return ((*func)(arg0, pp, io_off, io_len, flags, cr, ct));
+ FEM_VNEXT7(pageio, func, arg0, pp, io_off, io_len, flags, cr, ct);
}
int
@@ -2623,7 +3209,7 @@ vnext_dumpctl(femarg_t *vf, int action, offset_t *blkp, caller_context_t *ct)
vsop_find(vf, &func, int, &arg0, vop_dumpctl, femop_dumpctl);
ASSERT(func != NULL);
ASSERT(arg0 != NULL);
- return ((*func)(arg0, action, blkp, ct));
+ FEM_VNEXT4(dumpctl, func, arg0, action, blkp, ct);
}
void
@@ -2653,7 +3239,7 @@ vnext_setsecattr(femarg_t *vf, vsecattr_t *vsap, int flag, cred_t *cr,
vsop_find(vf, &func, int, &arg0, vop_setsecattr, femop_setsecattr);
ASSERT(func != NULL);
ASSERT(arg0 != NULL);
- return ((*func)(arg0, vsap, flag, cr, ct));
+ FEM_VNEXT5(setsecattr, func, arg0, vsap, flag, cr, ct);
}
int
@@ -2668,7 +3254,7 @@ vnext_getsecattr(femarg_t *vf, vsecattr_t *vsap, int flag, cred_t *cr,
vsop_find(vf, &func, int, &arg0, vop_getsecattr, femop_getsecattr);
ASSERT(func != NULL);
ASSERT(arg0 != NULL);
- return ((*func)(arg0, vsap, flag, cr, ct));
+ FEM_VNEXT5(getsecattr, func, arg0, vsap, flag, cr, ct);
}
int
@@ -2683,7 +3269,7 @@ vnext_shrlock(femarg_t *vf, int cmd, struct shrlock *shr, int flag,
vsop_find(vf, &func, int, &arg0, vop_shrlock, femop_shrlock);
ASSERT(func != NULL);
ASSERT(arg0 != NULL);
- return ((*func)(arg0, cmd, shr, flag, cr, ct));
+ FEM_VNEXT6(shrlock, func, arg0, cmd, shr, flag, cr, ct);
}
int
@@ -2698,7 +3284,7 @@ vnext_vnevent(femarg_t *vf, vnevent_t vnevent, vnode_t *dvp, char *cname,
vsop_find(vf, &func, int, &arg0, vop_vnevent, femop_vnevent);
ASSERT(func != NULL);
ASSERT(arg0 != NULL);
- return ((*func)(arg0, vnevent, dvp, cname, ct));
+ FEM_VNEXT5(vnevent, func, arg0, vnevent, dvp, cname, ct);
}
int
@@ -2713,7 +3299,7 @@ vnext_reqzcbuf(femarg_t *vf, enum uio_rw ioflag, xuio_t *xuiop, cred_t *cr,
vsop_find(vf, &func, int, &arg0, vop_reqzcbuf, femop_reqzcbuf);
ASSERT(func != NULL);
ASSERT(arg0 != NULL);
- return ((*func)(arg0, ioflag, xuiop, cr, ct));
+ FEM_VNEXT5(reqzcbuf, func, arg0, ioflag, xuiop, cr, ct);
}
int
@@ -2727,7 +3313,7 @@ vnext_retzcbuf(femarg_t *vf, xuio_t *xuiop, cred_t *cr, caller_context_t *ct)
vsop_find(vf, &func, int, &arg0, vop_retzcbuf, femop_retzcbuf);
ASSERT(func != NULL);
ASSERT(arg0 != NULL);
- return ((*func)(arg0, xuiop, cr, ct));
+ FEM_VNEXT4(retzcbuf, func, arg0, xuiop, cr, ct);
}
int
diff --git a/usr/src/uts/common/os/sched.c b/usr/src/uts/common/os/sched.c
index c1d6569f11..15e77d39f7 100644
--- a/usr/src/uts/common/os/sched.c
+++ b/usr/src/uts/common/os/sched.c
@@ -27,6 +27,10 @@
/* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
/* All Rights Reserved */
+/*
+ * Copyright (c) 2015, Joyent, Inc. All rights reserved.
+ */
+
#include <sys/param.h>
#include <sys/types.h>
#include <sys/sysmacros.h>
@@ -646,16 +650,17 @@ top:
klwp_t *lwp = ttolwp(tp);
/*
- * Swapout eligible lwps (specified by the scheduling
- * class) which don't have TS_DONT_SWAP set. Set the
- * "intent to swap" flag (TS_SWAPENQ) on threads
- * which have TS_DONT_SWAP set so that they can be
+ * Swapout eligible lwps (specified by the scheduling class)
+ * which don't have TS_DONT_SWAP set. Set the "intent to swap"
+ * flag (TS_SWAPENQ) on threads which have either TS_DONT_SWAP
+ * set or are currently on a split stack so that they can be
* swapped if and when they reach a safe point.
*/
thread_lock(tp);
thread_pri = CL_SWAPOUT(tp, swapflags);
if (thread_pri != -1) {
- if (tp->t_schedflag & TS_DONT_SWAP) {
+ if ((tp->t_schedflag & TS_DONT_SWAP) ||
+ (tp->t_flag & T_SPLITSTK)) {
tp->t_schedflag |= TS_SWAPENQ;
tp->t_trapret = 1;
aston(tp);
diff --git a/usr/src/uts/common/sys/proc.h b/usr/src/uts/common/sys/proc.h
index ee5892066b..134cb9132b 100644
--- a/usr/src/uts/common/sys/proc.h
+++ b/usr/src/uts/common/sys/proc.h
@@ -21,7 +21,7 @@
/*
* Copyright (c) 1988, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2014 Joyent, Inc. All rights reserved.
+ * Copyright (c) 2015 Joyent, Inc. All rights reserved.
*/
/* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
@@ -723,6 +723,10 @@ extern kthread_t *thread_unpin(void);
extern void thread_init(void);
extern void thread_load(kthread_t *, void (*)(), caddr_t, size_t);
+extern void thread_splitstack(void (*)(void *), void *, size_t);
+extern void thread_splitstack_run(caddr_t, void (*)(void *), void *);
+extern void thread_splitstack_cleanup(void);
+
extern void tsd_create(uint_t *, void (*)(void *));
extern void tsd_destroy(uint_t *);
extern void *tsd_getcreate(uint_t *, void (*)(void *), void *(*)(void));
diff --git a/usr/src/uts/common/sys/thread.h b/usr/src/uts/common/sys/thread.h
index c7f460e7c7..9f2e166fea 100644
--- a/usr/src/uts/common/sys/thread.h
+++ b/usr/src/uts/common/sys/thread.h
@@ -24,6 +24,10 @@
* Use is subject to license terms.
*/
+/*
+ * Copyright (c) 2015, Joyent, Inc. All rights reserved.
+ */
+
#ifndef _SYS_THREAD_H
#define _SYS_THREAD_H
@@ -367,7 +371,7 @@ typedef struct _kthread {
#define T_WOULDBLOCK 0x0020 /* for lockfs */
#define T_DONTBLOCK 0x0040 /* for lockfs */
#define T_DONTPEND 0x0080 /* for lockfs */
-#define T_SYS_PROF 0x0100 /* profiling on for duration of system call */
+#define T_SPLITSTK 0x0100 /* kernel stack is currently split */
#define T_WAITCVSEM 0x0200 /* waiting for a lwp_cv or lwp_sema on sleepq */
#define T_WATCHPT 0x0400 /* thread undergoing a watchpoint emulation */
#define T_PANIC 0x0800 /* thread initiated a system panic */
diff --git a/usr/src/uts/intel/ia32/ml/swtch.s b/usr/src/uts/intel/ia32/ml/swtch.s
index 331c38d00e..c77e469e0e 100644
--- a/usr/src/uts/intel/ia32/ml/swtch.s
+++ b/usr/src/uts/intel/ia32/ml/swtch.s
@@ -24,7 +24,7 @@
*/
/*
- * Copyright (c) 2013, Joyent, Inc. All rights reserved.
+ * Copyright (c) 2015, Joyent, Inc. All rights reserved.
*/
/*
@@ -337,13 +337,12 @@ resume(kthread_t *t)
.setup_cpu:
/*
- * Setup rsp0 (kernel stack) in TSS to curthread's stack.
- * (Note: Since we don't have saved 'regs' structure for all
- * the threads we can't easily determine if we need to
- * change rsp0. So, we simply change the rsp0 to bottom
- * of the thread stack and it will work for all cases.)
- *
- * XX64 - Is this correct?
+ * Setup rsp0 (kernel stack) in TSS to curthread's saved regs
+ * structure. If this thread doesn't have a regs structure above
+ * the stack -- that is, if lwp_stk_init() was never called for the
+ * thread -- this will set rsp0 to the wrong value, but it's harmless
+ * as it's a kernel thread, and it won't actually attempt to implicitly
+ * use the rsp0 via a privilege change.
*/
movq CPU_TSS(%r13), %r14
movq T_STACK(%r12), %rax
@@ -504,11 +503,12 @@ resume_return:
jne .L5_2
.L5_1:
/*
- * Setup esp0 (kernel stack) in TSS to curthread's stack.
- * (Note: Since we don't have saved 'regs' structure for all
- * the threads we can't easily determine if we need to
- * change esp0. So, we simply change the esp0 to bottom
- * of the thread stack and it will work for all cases.)
+ * Setup esp0 (kernel stack) in TSS to curthread's stack. If this
+ * thread doesn't have a regs structure above the stack -- that is, if
+ * lwp_stk_init() was never called for the thread -- this will set
+ * esp0 to the wrong value, but it's harmless as it's a kernel thread,
+ * and it won't actually attempt to implicitly use the esp0 via a
+ * privilege change.
*/
movl CPU_TSS(%esi), %ecx
addl $REGSIZE+MINFRAME, %eax /* to the bottom of thread stack */
@@ -891,3 +891,81 @@ thread_start(void)
#endif /* __i386 */
#endif /* __lint */
+
+#if defined(__lint)
+
+void
+thread_splitstack_run(caddr_t stack, void (*func)(void *), void *arg)
+{}
+
+void
+thread_splitstack_cleanup(void)
+{}
+
+#else /* __lint */
+
+#if defined(__amd64)
+
+ ENTRY(thread_splitstack_run)
+ pushq %rbp /* push base pointer */
+ movq %rsp, %rbp /* construct frame */
+ movq %rdi, %rsp /* set stack pinter */
+ movq %rdx, %rdi /* load arg */
+ call *%rsi /* call specified function */
+ leave /* pop base pointer */
+ ret
+ SET_SIZE(thread_splitstack_run)
+
+ /*
+ * Once we're back on our own stack, we need to be sure to set the
+ * value of rsp0 in the TSS back to our original stack: if we gave
+ * up the CPU at all while on our split stack, the rsp0 will point
+ * to that stack from resume (above); if were to try to return to
+ * userland in that state, we will die absolutely horribly (namely,
+ * trying to iretq back to registers in a bunch of freed segkp). We
+ * are expecting this to be called after T_STACK has been restored,
+ * but before we return. It's okay if we are preempted in this code:
+ * when the new CPU picks us up, they will automatically set rsp0
+ * correctly, which is all we're trying to do here.
+ */
+ ENTRY(thread_splitstack_cleanup)
+ LOADCPU(%r8)
+ movq CPU_TSS(%r8), %r9
+ movq CPU_THREAD(%r8), %r10
+ movq T_STACK(%r10), %rax
+ addq $REGSIZE+MINFRAME, %rax
+ movq %rax, TSS_RSP0(%r9)
+ ret
+ SET_SIZE(thread_splitstack_cleanup)
+
+#elif defined(__i386)
+
+ ENTRY(thread_splitstack_run)
+ pushl %ebp /* push base pointer */
+ movl %esp, %ebp /* construct frame */
+ movl 8(%ebp), %esp /* set stack pointer */
+ movl 12(%ebp), %eax /* load func */
+ movl 16(%ebp), %edx /* load arg */
+ pushl %edx /* push arg */
+ call *%eax /* call specifed function */
+ addl $4, %esp /* restore stack pointer */
+ leave /* pop base pointer */
+ ret
+ SET_SIZE(thread_splitstack_run)
+
+ /*
+ * See comment in the amd64 code, above.
+ */
+ ENTRY(thread_splitstack_cleanup)
+ LOADCPU(%eax)
+ movl CPU_TSS(%eax), %ecx
+ movl CPU_THREAD(%eax), %edx
+ movl T_STACK(%edx), %edx
+ addl $REGSIZE+MINFRAME, %edx
+ movl %edx, TSS_ESP0(%ecx)
+ ret
+ SET_SIZE(thread_splitstack_cleanup)
+
+#endif /* __i386 */
+
+#endif /* __lint */
diff --git a/usr/src/uts/sun4/os/machdep.c b/usr/src/uts/sun4/os/machdep.c
index e6e4875da6..b9c01971ac 100644
--- a/usr/src/uts/sun4/os/machdep.c
+++ b/usr/src/uts/sun4/os/machdep.c
@@ -20,6 +20,7 @@
*/
/*
* Copyright (c) 1993, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, Joyent, Inc. All rights reserved.
*/
#include <sys/types.h>
@@ -893,3 +894,13 @@ lbolt_softint_post(void)
{
setsoftint(lbolt_softint_inum);
}
+
+void
+thread_splitstack_run(caddr_t addr, void (*func)(void *), void *)
+{
+ panic("thread_splitstack() not supported on SPARC");
+}
+
+void
+thread_splitstack_cleanup(void)
+{}