diff options
author | susans <none@none> | 2006-10-26 16:44:53 -0700 |
---|---|---|
committer | susans <none@none> | 2006-10-26 16:44:53 -0700 |
commit | ec25b48f5e0576a68280c5e549673a266f0be346 (patch) | |
tree | 0809083d34488bf4261bc5614b23c9d61a8d1601 /usr/src/uts/common/syscall/memcntl.c | |
parent | d7d10855241d89119833d1122507da070aaa6a9a (diff) | |
download | illumos-gate-ec25b48f5e0576a68280c5e549673a266f0be346.tar.gz |
6254029 memcntl() MC_HAT_ADVISE with page size 0 may cause segment page sizes to be demoted
6325885 map_pgszstk() uses p->p_brkpageszc rather than p->p_stkpageszc
6371967 assign large pages to anon segment created using mmap /dev/zero
6483208 unify and cleanup OOB (out of the box) large pagesize selection code
6483216 use intermediate pagesizes to map the beginning of bss/heap and stack when it may help performance
6483226 bss size is not properly taken into account by LP OOB policy at exec() time
6483230 grow_internal() doesn't properly align stack bottom for large pages
6483231 memcntl.c: ASSERT(IS_P2ALIGNED(p->p_brkbase + p->p_brksize, pgsz));
6483233 provide a mechanism to enable the use of 32M text pages on OPL by default
6485171 memcntl() shouldn't silently fail when stack space is unavailable with requested pagesize
Diffstat (limited to 'usr/src/uts/common/syscall/memcntl.c')
-rw-r--r-- | usr/src/uts/common/syscall/memcntl.c | 43 |
1 files changed, 31 insertions, 12 deletions
diff --git a/usr/src/uts/common/syscall/memcntl.c b/usr/src/uts/common/syscall/memcntl.c index 6bdf5a1cc2..5bff588641 100644 --- a/usr/src/uts/common/syscall/memcntl.c +++ b/usr/src/uts/common/syscall/memcntl.c @@ -199,31 +199,36 @@ memcntl(caddr_t addr, size_t len, int cmd, caddr_t arg, int attr, int mask) else type = MAPPGSZ_STK; - pgsz = map_pgsz(type, p, 0, 0, NULL); + pgsz = map_pgsz(type, p, 0, 0, 1); } } else { /* + * addr and len must be valid for range specified. + */ + if (valid_usr_range(addr, len, 0, as, + as->a_userlimit) != RANGE_OKAY) { + return (set_errno(ENOMEM)); + } + /* * Note that we don't disable automatic large page * selection for anon segments based on use of * memcntl(). */ if (pgsz == 0) { - pgsz = map_pgsz(MAPPGSZ_VA, p, addr, len, - NULL); + error = as_set_default_lpsize(as, addr, len); + if (error) { + (void) set_errno(error); + } + return (error); } /* * addr and len must be prefered page size aligned - * and valid for range specified. */ if (!IS_P2ALIGNED(addr, pgsz) || !IS_P2ALIGNED(len, pgsz)) { return (set_errno(EINVAL)); } - if (valid_usr_range(addr, len, 0, as, - as->a_userlimit) != RANGE_OKAY) { - return (set_errno(ENOMEM)); - } } szc = mem_getpgszc(pgsz); @@ -257,10 +262,17 @@ memcntl(caddr_t addr, size_t len, int cmd, caddr_t arg, int attr, int mask) return (set_errno(error)); } } + /* + * It is possible for brk_internal to silently fail to + * promote the heap size, so don't panic or ASSERT. + */ + if (!IS_P2ALIGNED(p->p_brkbase + p->p_brksize, pgsz)) { + as_rangeunlock(as); + return (set_errno(ENOMEM)); + } oszc = p->p_brkpageszc; p->p_brkpageszc = szc; - ASSERT(IS_P2ALIGNED(p->p_brkbase + p->p_brksize, pgsz)); addr = (caddr_t)P2ROUNDUP((uintptr_t)p->p_bssbase, pgsz); len = (p->p_brkbase + p->p_brksize) - addr; @@ -292,17 +304,24 @@ memcntl(caddr_t addr, size_t len, int cmd, caddr_t arg, int attr, int mask) } if (szc > p->p_stkpageszc) { - error = grow_internal(p->p_usrstack - - p->p_stksize, szc); + error = grow_internal(p->p_usrstack - + p->p_stksize, szc); if (error) { as_rangeunlock(as); return (set_errno(error)); } } + /* + * It is possible for grow_internal to silently fail to + * promote the stack size, so don't panic or ASSERT. + */ + if (!IS_P2ALIGNED(p->p_usrstack - p->p_stksize, pgsz)) { + as_rangeunlock(as); + return (set_errno(ENOMEM)); + } oszc = p->p_stkpageszc; p->p_stkpageszc = szc; - ASSERT(IS_P2ALIGNED(p->p_usrstack, pgsz)); addr = p->p_usrstack - p->p_stksize; len = p->p_stksize; |