summaryrefslogtreecommitdiff
path: root/usr/src/uts/i86pc/os/trap.c
diff options
context:
space:
mode:
Diffstat (limited to 'usr/src/uts/i86pc/os/trap.c')
-rw-r--r--usr/src/uts/i86pc/os/trap.c48
1 files changed, 43 insertions, 5 deletions
diff --git a/usr/src/uts/i86pc/os/trap.c b/usr/src/uts/i86pc/os/trap.c
index 4184b116f5..c88fec6fbe 100644
--- a/usr/src/uts/i86pc/os/trap.c
+++ b/usr/src/uts/i86pc/os/trap.c
@@ -32,7 +32,7 @@
/* */
/*
- * Copyright 2012 Joyent, Inc. All rights reserved.
+ * Copyright 2015 Joyent, Inc.
*/
#include <sys/types.h>
@@ -101,6 +101,7 @@
#include <sys/hypervisor.h>
#endif
#include <sys/contract/process_impl.h>
+#include <sys/brand.h>
#define USER 0x10000 /* user-mode flag added to trap type */
@@ -638,6 +639,21 @@ trap(struct regs *rp, caddr_t addr, processorid_t cpuid)
}
/*
+ * We need to check if SMAP is in play. If SMAP is in play, then
+ * any access to a user page will show up as a protection
+ * violation. To see if SMAP is enabled we first check if it's a
+ * user address and whether we have the feature flag set. If we
+ * do and the interrupted registers do not allow for user
+ * accesses (PS_ACHK is not enabled), then we need to die
+ * immediately.
+ */
+ if (addr < (caddr_t)kernelbase &&
+ is_x86_feature(x86_featureset, X86FSET_SMAP) == B_TRUE &&
+ (rp->r_ps & PS_ACHK) == 0) {
+ (void) die(type, rp, addr, cpuid);
+ }
+
+ /*
* See if we can handle as pagefault. Save lofault and onfault
* across this. Here we assume that an address less than
* KERNELBASE is a user fault. We can do this as copy.s
@@ -847,6 +863,17 @@ trap(struct regs *rp, caddr_t addr, processorid_t cpuid)
fault_type = F_INVAL;
}
+ /*
+ * Allow the brand to interpose on invalid memory accesses
+ * prior to running the native pagefault handler. If this
+ * brand hook returns zero, it was able to handle the fault
+ * completely. Otherwise, drive on and call pagefault().
+ */
+ if (PROC_IS_BRANDED(p) && BROP(p)->b_pagefault != NULL &&
+ BROP(p)->b_pagefault(p, lwp, addr, fault_type, rw) == 0) {
+ goto out;
+ }
+
res = pagefault(addr, fault_type, rw, 0);
/*
@@ -1453,12 +1480,23 @@ trap(struct regs *rp, caddr_t addr, processorid_t cpuid)
ct->t_sig_check = 0;
- mutex_enter(&p->p_lock);
+ /*
+ * As in other code paths that check against TP_CHANGEBIND,
+ * we perform the check first without p_lock held -- only
+ * acquiring p_lock in the unlikely event that it is indeed
+ * set. This is safe because we are doing this after the
+ * astoff(); if we are racing another thread setting
+ * TP_CHANGEBIND on us, we will pick it up on a subsequent
+ * lap through.
+ */
if (curthread->t_proc_flag & TP_CHANGEBIND) {
- timer_lwpbind();
- curthread->t_proc_flag &= ~TP_CHANGEBIND;
+ mutex_enter(&p->p_lock);
+ if (curthread->t_proc_flag & TP_CHANGEBIND) {
+ timer_lwpbind();
+ curthread->t_proc_flag &= ~TP_CHANGEBIND;
+ }
+ mutex_exit(&p->p_lock);
}
- mutex_exit(&p->p_lock);
/*
* for kaio requests that are on the per-process poll queue,