summaryrefslogtreecommitdiff
path: root/usr/src/uts/i86pc/os/trap.c
diff options
context:
space:
mode:
Diffstat (limited to 'usr/src/uts/i86pc/os/trap.c')
-rw-r--r--usr/src/uts/i86pc/os/trap.c33
1 files changed, 28 insertions, 5 deletions
diff --git a/usr/src/uts/i86pc/os/trap.c b/usr/src/uts/i86pc/os/trap.c
index 9390690e95..c88fec6fbe 100644
--- a/usr/src/uts/i86pc/os/trap.c
+++ b/usr/src/uts/i86pc/os/trap.c
@@ -32,7 +32,7 @@
/* */
/*
- * Copyright 2012 Joyent, Inc. All rights reserved.
+ * Copyright 2015 Joyent, Inc.
*/
#include <sys/types.h>
@@ -101,6 +101,7 @@
#include <sys/hypervisor.h>
#endif
#include <sys/contract/process_impl.h>
+#include <sys/brand.h>
#define USER 0x10000 /* user-mode flag added to trap type */
@@ -862,6 +863,17 @@ trap(struct regs *rp, caddr_t addr, processorid_t cpuid)
fault_type = F_INVAL;
}
+ /*
+ * Allow the brand to interpose on invalid memory accesses
+ * prior to running the native pagefault handler. If this
+ * brand hook returns zero, it was able to handle the fault
+ * completely. Otherwise, drive on and call pagefault().
+ */
+ if (PROC_IS_BRANDED(p) && BROP(p)->b_pagefault != NULL &&
+ BROP(p)->b_pagefault(p, lwp, addr, fault_type, rw) == 0) {
+ goto out;
+ }
+
res = pagefault(addr, fault_type, rw, 0);
/*
@@ -1468,12 +1480,23 @@ trap(struct regs *rp, caddr_t addr, processorid_t cpuid)
ct->t_sig_check = 0;
- mutex_enter(&p->p_lock);
+ /*
+ * As in other code paths that check against TP_CHANGEBIND,
+ * we perform the check first without p_lock held -- only
+ * acquiring p_lock in the unlikely event that it is indeed
+ * set. This is safe because we are doing this after the
+ * astoff(); if we are racing another thread setting
+ * TP_CHANGEBIND on us, we will pick it up on a subsequent
+ * lap through.
+ */
if (curthread->t_proc_flag & TP_CHANGEBIND) {
- timer_lwpbind();
- curthread->t_proc_flag &= ~TP_CHANGEBIND;
+ mutex_enter(&p->p_lock);
+ if (curthread->t_proc_flag & TP_CHANGEBIND) {
+ timer_lwpbind();
+ curthread->t_proc_flag &= ~TP_CHANGEBIND;
+ }
+ mutex_exit(&p->p_lock);
}
- mutex_exit(&p->p_lock);
/*
* for kaio requests that are on the per-process poll queue,