summaryrefslogtreecommitdiff
path: root/usr/src/uts/i86pc
diff options
context:
space:
mode:
Diffstat (limited to 'usr/src/uts/i86pc')
-rw-r--r--usr/src/uts/i86pc/os/mlsetup.c2
-rw-r--r--usr/src/uts/i86pc/vm/vm_machdep.c41
2 files changed, 37 insertions, 6 deletions
diff --git a/usr/src/uts/i86pc/os/mlsetup.c b/usr/src/uts/i86pc/os/mlsetup.c
index 438f83b6e9..7923836f75 100644
--- a/usr/src/uts/i86pc/os/mlsetup.c
+++ b/usr/src/uts/i86pc/os/mlsetup.c
@@ -323,6 +323,8 @@ mlsetup(struct regs *rp)
p0.p_brkpageszc = 0;
p0.p_t1_lgrpid = LGRP_NONE;
p0.p_tr_lgrpid = LGRP_NONE;
+ psecflags_default(&p0.p_secflags);
+
sigorset(&p0.p_ignore, &ignoredefault);
CPU->cpu_thread = &t0;
diff --git a/usr/src/uts/i86pc/vm/vm_machdep.c b/usr/src/uts/i86pc/vm/vm_machdep.c
index 1c2bd3e0ec..d27effaf70 100644
--- a/usr/src/uts/i86pc/vm/vm_machdep.c
+++ b/usr/src/uts/i86pc/vm/vm_machdep.c
@@ -60,6 +60,7 @@
#include <sys/vmsystm.h>
#include <sys/swap.h>
#include <sys/dumphdr.h>
+#include <sys/random.h>
#include <vm/hat.h>
#include <vm/as.h>
@@ -81,6 +82,7 @@
#include <sys/cmn_err.h>
#include <sys/archsystm.h>
#include <sys/machsystm.h>
+#include <sys/secflags.h>
#include <sys/vtrace.h>
#include <sys/ddidmareq.h>
@@ -636,6 +638,13 @@ map_addr_vacalign_check(caddr_t addr, u_offset_t off)
}
/*
+ * The maximum amount a randomized mapping will be slewed. We should perhaps
+ * arrange things so these tunables can be separate for mmap, mmapobj, and
+ * ld.so
+ */
+size_t aslr_max_map_skew = 256 * 1024 * 1024; /* 256MB */
+
+/*
* map_addr_proc() is the routine called when the system is to
* choose an address for the user. We will pick an address
* range which is the highest available below userlimit.
@@ -751,6 +760,7 @@ map_addr_proc(
ASSERT(align_amount == 0 || align_amount >= PAGESIZE);
off = off & (align_amount - 1);
+
/*
* Look for a large enough hole starting below userlimit.
* After finding it, use the upper part.
@@ -778,6 +788,20 @@ map_addr_proc(
addr -= align_amount;
}
+ /*
+ * If randomization is requested, slew the allocation
+ * backwards, within the same gap, by a random amount.
+ */
+ if (flags & _MAP_RANDOMIZE) {
+ uint32_t slew;
+
+ (void) random_get_pseudo_bytes((uint8_t *)&slew,
+ sizeof (slew));
+
+ slew = slew % MIN(aslr_max_map_skew, (addr - base));
+ addr -= P2ALIGN(slew, align_amount);
+ }
+
ASSERT(addr > base);
ASSERT(addr + len < base + slen);
ASSERT(((uintptr_t)addr & (align_amount - 1)) ==
@@ -903,6 +927,13 @@ valid_va_range(caddr_t *basep, size_t *lenp, size_t minlen, int dir)
}
/*
+ * Default to forbidding the first 64k of address space. This protects most
+ * reasonably sized structures from dereferences through NULL:
+ * ((foo_t *)0)->bar
+ */
+uintptr_t forbidden_null_mapping_sz = 0x10000;
+
+/*
* Determine whether [addr, addr+len] are valid user addresses.
*/
/*ARGSUSED*/
@@ -915,6 +946,10 @@ valid_usr_range(caddr_t addr, size_t len, uint_t prot, struct as *as,
if (eaddr <= addr || addr >= userlimit || eaddr > userlimit)
return (RANGE_BADADDR);
+ if ((addr <= (caddr_t)forbidden_null_mapping_sz) &&
+ secflag_enabled(as->a_proc, PROC_SEC_FORBIDNULLMAP))
+ return (RANGE_BADADDR);
+
#if defined(__amd64)
/*
* Check for the VA hole
@@ -3925,12 +3960,6 @@ void
dcache_flushall()
{}
-size_t
-exec_get_spslew(void)
-{
- return (0);
-}
-
/*
* Allocate a memory page. The argument 'seed' can be any pseudo-random
* number to vary where the pages come from. This is quite a hacked up