summaryrefslogtreecommitdiff
path: root/usr/src
diff options
context:
space:
mode:
Diffstat (limited to 'usr/src')
-rw-r--r--usr/src/uts/common/sys/vmsystm.h5
-rw-r--r--usr/src/uts/common/vm/as.h6
-rw-r--r--usr/src/uts/common/vm/vm_as.c48
-rw-r--r--usr/src/uts/i86pc/vm/vm_machdep.c126
-rw-r--r--usr/src/uts/sun4/vm/vm_dep.c77
-rw-r--r--usr/src/uts/sun4u/vm/mach_vm_dep.c64
-rw-r--r--usr/src/uts/sun4v/vm/mach_vm_dep.c56
7 files changed, 259 insertions, 123 deletions
diff --git a/usr/src/uts/common/sys/vmsystm.h b/usr/src/uts/common/sys/vmsystm.h
index 95de5472f2..0561ecf03a 100644
--- a/usr/src/uts/common/sys/vmsystm.h
+++ b/usr/src/uts/common/sys/vmsystm.h
@@ -19,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -124,6 +124,9 @@ extern void swapout_lwp(klwp_t *);
extern int valid_va_range(caddr_t *basep, size_t *lenp, size_t minlen,
int dir);
+extern int valid_va_range_aligned(caddr_t *basep, size_t *lenp,
+ size_t minlen, int dir, size_t align, size_t redzone, size_t off);
+
extern int valid_usr_range(caddr_t, size_t, uint_t, struct as *, caddr_t);
extern int useracc(void *, size_t, int);
extern size_t map_pgsz(int maptype, struct proc *p, caddr_t addr, size_t len,
diff --git a/usr/src/uts/common/vm/as.h b/usr/src/uts/common/vm/as.h
index 6272f3aa91..826ad4dbb9 100644
--- a/usr/src/uts/common/vm/as.h
+++ b/usr/src/uts/common/vm/as.h
@@ -19,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -265,6 +265,10 @@ int as_map(struct as *as, caddr_t addr, size_t size, int ((*crfp)()),
void as_purge(struct as *as);
int as_gap(struct as *as, size_t minlen, caddr_t *basep, size_t *lenp,
uint_t flags, caddr_t addr);
+int as_gap_aligned(struct as *as, size_t minlen, caddr_t *basep,
+ size_t *lenp, uint_t flags, caddr_t addr, size_t align,
+ size_t redzone, size_t off);
+
int as_memory(struct as *as, caddr_t *basep, size_t *lenp);
size_t as_swapout(struct as *as);
int as_incore(struct as *as, caddr_t addr, size_t size, char *vec,
diff --git a/usr/src/uts/common/vm/vm_as.c b/usr/src/uts/common/vm/vm_as.c
index 86d7bc982f..5a34aa2803 100644
--- a/usr/src/uts/common/vm/vm_as.c
+++ b/usr/src/uts/common/vm/vm_as.c
@@ -1815,7 +1815,12 @@ as_purge(struct as *as)
}
/*
- * Find a hole of at least size minlen within [base, base + len).
+ * Find a hole within [*basep, *basep + *lenp), which contains a mappable
+ * range of addresses at least "minlen" long, where the base of the range is
+ * at "off" phase from an "align" boundary and there is space for a
+ * "redzone"-sized redzone on eithe rside of the range. Thus,
+ * if align was 4M and off was 16k, the user wants a hole which will start
+ * 16k into a 4M page.
*
* If flags specifies AH_HI, the hole will have the highest possible address
* in the range. We use the as->a_lastgap field to figure out where to
@@ -1825,15 +1830,14 @@ as_purge(struct as *as)
*
* If flags specifies AH_CONTAIN, the hole will contain the address addr.
*
- * If an adequate hole is found, base and len are set to reflect the part of
- * the hole that is within range, and 0 is returned, otherwise,
- * -1 is returned.
+ * If an adequate hole is found, *basep and *lenp are set to reflect the part of
+ * the hole that is within range, and 0 is returned. On failure, -1 is returned.
*
* NOTE: This routine is not correct when base+len overflows caddr_t.
*/
int
-as_gap(struct as *as, size_t minlen, caddr_t *basep, size_t *lenp, uint_t flags,
- caddr_t addr)
+as_gap_aligned(struct as *as, size_t minlen, caddr_t *basep, size_t *lenp,
+ uint_t flags, caddr_t addr, size_t align, size_t redzone, size_t off)
{
caddr_t lobound = *basep;
caddr_t hibound = lobound + *lenp;
@@ -1847,7 +1851,8 @@ as_gap(struct as *as, size_t minlen, caddr_t *basep, size_t *lenp, uint_t flags,
save_len = *lenp;
AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
if (AS_SEGFIRST(as) == NULL) {
- if (valid_va_range(basep, lenp, minlen, flags & AH_DIR)) {
+ if (valid_va_range_aligned(basep, lenp, minlen, flags & AH_DIR,
+ align, redzone, off)) {
AS_LOCK_EXIT(as, &as->a_lock);
return (0);
} else {
@@ -1920,8 +1925,8 @@ as_gap(struct as *as, size_t minlen, caddr_t *basep, size_t *lenp, uint_t flags,
*/
*basep = lo;
*lenp = hi - lo;
- if (valid_va_range(basep, lenp, minlen,
- forward ? AH_LO : AH_HI) &&
+ if (valid_va_range_aligned(basep, lenp, minlen,
+ forward ? AH_LO : AH_HI, align, redzone, off) &&
((flags & AH_CONTAIN) == 0 ||
(*basep <= addr && *basep + *lenp > addr))) {
if (!forward)
@@ -1956,6 +1961,31 @@ as_gap(struct as *as, size_t minlen, caddr_t *basep, size_t *lenp, uint_t flags,
}
/*
+ * Find a hole of at least size minlen within [*basep, *basep + *lenp).
+ *
+ * If flags specifies AH_HI, the hole will have the highest possible address
+ * in the range. We use the as->a_lastgap field to figure out where to
+ * start looking for a gap.
+ *
+ * Otherwise, the gap will have the lowest possible address.
+ *
+ * If flags specifies AH_CONTAIN, the hole will contain the address addr.
+ *
+ * If an adequate hole is found, base and len are set to reflect the part of
+ * the hole that is within range, and 0 is returned, otherwise,
+ * -1 is returned.
+ *
+ * NOTE: This routine is not correct when base+len overflows caddr_t.
+ */
+int
+as_gap(struct as *as, size_t minlen, caddr_t *basep, size_t *lenp, uint_t flags,
+ caddr_t addr)
+{
+
+ return (as_gap_aligned(as, minlen, basep, lenp, flags, addr, 0, 0, 0));
+}
+
+/*
* Return the next range within [base, base + len) that is backed
* with "real memory". Skip holes and non-seg_vn segments.
* We're lazy and only return one segment at a time.
diff --git a/usr/src/uts/i86pc/vm/vm_machdep.c b/usr/src/uts/i86pc/vm/vm_machdep.c
index 5f3b97b1d9..a6dc452da8 100644
--- a/usr/src/uts/i86pc/vm/vm_machdep.c
+++ b/usr/src/uts/i86pc/vm/vm_machdep.c
@@ -629,15 +629,24 @@ map_addr_vacalign_check(caddr_t addr, u_offset_t off)
* choose an address for the user. We will pick an address
* range which is the highest available below userlimit.
*
+ * Every mapping will have a redzone of a single page on either side of
+ * the request. This is done to leave one page unmapped between segments.
+ * This is not required, but it's useful for the user because if their
+ * program strays across a segment boundary, it will catch a fault
+ * immediately making debugging a little easier. Currently the redzone
+ * is mandatory.
+ *
* addrp is a value/result parameter.
* On input it is a hint from the user to be used in a completely
* machine dependent fashion. We decide to completely ignore this hint.
+ * If MAP_ALIGN was specified, addrp contains the minimal alignment, which
+ * must be some "power of two" multiple of pagesize.
*
* On output it is NULL if no address can be found in the current
* processes address space or else an address that is currently
* not mapped for len bytes with a page of red zone on either side.
*
- * align is not needed on x86 (it's for viturally addressed caches)
+ * vacalign is not needed on x86 (it's for viturally addressed caches)
*/
/*ARGSUSED*/
void
@@ -696,18 +705,10 @@ map_addr_proc(
#endif
slen = userlimit - base;
+ /* Make len be a multiple of PAGESIZE */
len = (len + PAGEOFFSET) & PAGEMASK;
/*
- * Redzone for each side of the request. This is done to leave
- * one page unmapped between segments. This is not required, but
- * it's useful for the user because if their program strays across
- * a segment boundary, it will catch a fault immediately making
- * debugging a little easier.
- */
- len += 2 * MMU_PAGESIZE;
-
- /*
* figure out what the alignment should be
*
* XX64 -- is there an ELF_AMD64_MAXPGSZ or is it the same????
@@ -731,63 +732,86 @@ map_addr_proc(
if ((flags & MAP_ALIGN) && ((uintptr_t)*addrp > align_amount))
align_amount = (uintptr_t)*addrp;
- len += align_amount;
+ ASSERT(ISP2(align_amount));
+ ASSERT(align_amount == 0 || align_amount >= PAGESIZE);
+ off = off & (align_amount - 1);
/*
* Look for a large enough hole starting below userlimit.
- * After finding it, use the upper part. Addition of PAGESIZE
- * is for the redzone as described above.
+ * After finding it, use the upper part.
*/
- if (as_gap(as, len, &base, &slen, AH_HI, NULL) == 0) {
+ if (as_gap_aligned(as, len, &base, &slen, AH_HI, NULL, align_amount,
+ PAGESIZE, off) == 0) {
caddr_t as_addr;
- addr = base + slen - len + MMU_PAGESIZE;
+ /*
+ * addr is the highest possible address to use since we have
+ * a PAGESIZE redzone at the beginning and end.
+ */
+ addr = base + slen - (PAGESIZE + len);
as_addr = addr;
/*
- * Round address DOWN to the alignment amount,
- * add the offset, and if this address is less
- * than the original address, add alignment amount.
+ * Round address DOWN to the alignment amount and
+ * add the offset in.
+ * If addr is greater than as_addr, len would not be large
+ * enough to include the redzone, so we must adjust down
+ * by the alignment amount.
*/
addr = (caddr_t)((uintptr_t)addr & (~(align_amount - 1)));
- addr += (uintptr_t)(off & (align_amount - 1));
- if (addr < as_addr)
- addr += align_amount;
+ addr += (uintptr_t)off;
+ if (addr > as_addr) {
+ addr -= align_amount;
+ }
- ASSERT(addr <= (as_addr + align_amount));
+ ASSERT(addr > base);
+ ASSERT(addr + len < base + slen);
ASSERT(((uintptr_t)addr & (align_amount - 1)) ==
- ((uintptr_t)(off & (align_amount - 1))));
+ ((uintptr_t)(off)));
*addrp = addr;
} else {
*addrp = NULL; /* no more virtual space */
}
}
+int valid_va_range_aligned_wraparound;
+
/*
- * Determine whether [base, base+len] contains a valid range of
- * addresses at least minlen long. base and len are adjusted if
- * required to provide a valid range.
+ * Determine whether [*basep, *basep + *lenp) contains a mappable range of
+ * addresses at least "minlen" long, where the base of the range is at "off"
+ * phase from an "align" boundary and there is space for a "redzone"-sized
+ * redzone on either side of the range. On success, 1 is returned and *basep
+ * and *lenp are adjusted to describe the acceptable range (including
+ * the redzone). On failure, 0 is returned.
*/
/*ARGSUSED3*/
int
-valid_va_range(caddr_t *basep, size_t *lenp, size_t minlen, int dir)
+valid_va_range_aligned(caddr_t *basep, size_t *lenp, size_t minlen, int dir,
+ size_t align, size_t redzone, size_t off)
{
uintptr_t hi, lo;
+ size_t tot_len;
+
+ ASSERT(align == 0 ? off == 0 : off < align);
+ ASSERT(ISP2(align));
+ ASSERT(align == 0 || align >= PAGESIZE);
lo = (uintptr_t)*basep;
hi = lo + *lenp;
+ tot_len = minlen + 2 * redzone; /* need at least this much space */
/*
* If hi rolled over the top, try cutting back.
*/
if (hi < lo) {
- if (0 - lo + hi < minlen)
- return (0);
- if (0 - lo < minlen)
- return (0);
- *lenp = 0 - lo;
- } else if (hi - lo < minlen) {
+ *lenp = 0UL - lo - 1UL;
+ /* See if this really happens. If so, then we figure out why */
+ valid_va_range_aligned_wraparound++;
+ hi = lo + *lenp;
+ }
+ if (*lenp < tot_len) {
return (0);
}
+
#if defined(__amd64)
/*
* Deal with a possible hole in the address range between
@@ -803,9 +827,9 @@ valid_va_range(caddr_t *basep, size_t *lenp, size_t minlen, int dir)
/*
* prefer lowest range
*/
- if (hole_start - lo >= minlen)
+ if (hole_start - lo >= tot_len)
hi = hole_start;
- else if (hi - hole_end >= minlen)
+ else if (hi - hole_end >= tot_len)
lo = hole_end;
else
return (0);
@@ -813,9 +837,9 @@ valid_va_range(caddr_t *basep, size_t *lenp, size_t minlen, int dir)
/*
* prefer highest range
*/
- if (hi - hole_end >= minlen)
+ if (hi - hole_end >= tot_len)
lo = hole_end;
- else if (hole_start - lo >= minlen)
+ else if (hole_start - lo >= tot_len)
hi = hole_start;
else
return (0);
@@ -829,17 +853,41 @@ valid_va_range(caddr_t *basep, size_t *lenp, size_t minlen, int dir)
if (lo < hole_end)
lo = hole_end;
}
+#endif
- if (hi - lo < minlen)
+ if (hi - lo < tot_len)
return (0);
+ if (align > 1) {
+ uintptr_t tlo = lo + redzone;
+ uintptr_t thi = hi - redzone;
+ tlo = (uintptr_t)P2PHASEUP(tlo, align, off);
+ if (tlo < lo + redzone) {
+ return (0);
+ }
+ if (thi < tlo || thi - tlo < minlen) {
+ return (0);
+ }
+ }
+
*basep = (caddr_t)lo;
*lenp = hi - lo;
-#endif
return (1);
}
/*
+ * Determine whether [*basep, *basep + *lenp) contains a mappable range of
+ * addresses at least "minlen" long. On success, 1 is returned and *basep
+ * and *lenp are adjusted to describe the acceptable range. On failure, 0
+ * is returned.
+ */
+int
+valid_va_range(caddr_t *basep, size_t *lenp, size_t minlen, int dir)
+{
+ return (valid_va_range_aligned(basep, lenp, minlen, dir, 0, 0, 0));
+}
+
+/*
* Determine whether [addr, addr+len] are valid user addresses.
*/
/*ARGSUSED*/
diff --git a/usr/src/uts/sun4/vm/vm_dep.c b/usr/src/uts/sun4/vm/vm_dep.c
index 4b80603a8f..d5d696c749 100644
--- a/usr/src/uts/sun4/vm/vm_dep.c
+++ b/usr/src/uts/sun4/vm/vm_dep.c
@@ -258,39 +258,45 @@ caddr_t kpm_vbase;
size_t kpm_size;
uchar_t kpm_size_shift;
+int valid_va_range_aligned_wraparound;
/*
- * Determine whether [base, base+len] contains a mapable range of
- * addresses at least minlen long. base and len are adjusted if
- * required to provide a mapable range.
+ * Determine whether [*basep, *basep + *lenp) contains a mappable range of
+ * addresses at least "minlen" long, where the base of the range is at "off"
+ * phase from an "align" boundary and there is space for a "redzone"-sized
+ * redzone on either side of the range. On success, 1 is returned and *basep
+ * and *lenp are adjusted to describe the acceptable range (including
+ * the redzone). On failure, 0 is returned.
*/
-/* ARGSUSED */
int
-valid_va_range(caddr_t *basep, size_t *lenp, size_t minlen, int dir)
+valid_va_range_aligned(caddr_t *basep, size_t *lenp, size_t minlen, int dir,
+ size_t align, size_t redzone, size_t off)
{
caddr_t hi, lo;
+ size_t tot_len;
+
+ ASSERT(align == 0 ? off == 0 : off < align);
+ ASSERT(ISP2(align));
+ ASSERT(align == 0 || align >= PAGESIZE);
lo = *basep;
hi = lo + *lenp;
+ tot_len = minlen + 2 * redzone; /* need at least this much space */
- /*
- * If hi rolled over the top, try cutting back.
- */
+ /* If hi rolled over the top try cutting back. */
if (hi < lo) {
- size_t newlen = 0 - (uintptr_t)lo - 1l;
-
- if (newlen + (uintptr_t)hi < minlen)
- return (0);
- if (newlen < minlen)
- return (0);
- *lenp = newlen;
- } else if (hi - lo < minlen)
+ *lenp = 0UL - (uintptr_t)lo - 1UL;
+ /* Trying to see if this really happens, and then if so, why */
+ valid_va_range_aligned_wraparound++;
+ hi = lo + *lenp;
+ }
+ if (*lenp < tot_len) {
return (0);
+ }
/*
* Deal with a possible hole in the address range between
* hole_start and hole_end that should never be mapped by the MMU.
*/
- hi = lo + *lenp;
if (lo < hole_start) {
if (hi > hole_start)
@@ -302,9 +308,9 @@ valid_va_range(caddr_t *basep, size_t *lenp, size_t minlen, int dir)
/*
* prefer lowest range
*/
- if (hole_start - lo >= minlen)
+ if (hole_start - lo >= tot_len)
hi = hole_start;
- else if (hi - hole_end >= minlen)
+ else if (hi - hole_end >= tot_len)
lo = hole_end;
else
return (0);
@@ -312,9 +318,9 @@ valid_va_range(caddr_t *basep, size_t *lenp, size_t minlen, int dir)
/*
* prefer highest range
*/
- if (hi - hole_end >= minlen)
+ if (hi - hole_end >= tot_len)
lo = hole_end;
- else if (hole_start - lo >= minlen)
+ else if (hole_start - lo >= tot_len)
hi = hole_start;
else
return (0);
@@ -327,16 +333,39 @@ valid_va_range(caddr_t *basep, size_t *lenp, size_t minlen, int dir)
lo = hole_end;
}
- if (hi - lo < minlen)
+ /* Check if remaining length is too small */
+ if (hi - lo < tot_len) {
return (0);
-
+ }
+ if (align > 1) {
+ caddr_t tlo = lo + redzone;
+ caddr_t thi = hi - redzone;
+ tlo = (caddr_t)P2PHASEUP((uintptr_t)tlo, align, off);
+ if (tlo < lo + redzone) {
+ return (0);
+ }
+ if (thi < tlo || thi - tlo < minlen) {
+ return (0);
+ }
+ }
*basep = lo;
*lenp = hi - lo;
-
return (1);
}
/*
+ * Determine whether [*basep, *basep + *lenp) contains a mappable range of
+ * addresses at least "minlen" long. On success, 1 is returned and *basep
+ * and *lenp are adjusted to describe the acceptable range. On failure, 0
+ * is returned.
+ */
+int
+valid_va_range(caddr_t *basep, size_t *lenp, size_t minlen, int dir)
+{
+ return (valid_va_range_aligned(basep, lenp, minlen, dir, 0, 0, 0));
+}
+
+/*
* Determine whether [addr, addr+len] with protections `prot' are valid
* for a user address space.
*/
diff --git a/usr/src/uts/sun4u/vm/mach_vm_dep.c b/usr/src/uts/sun4u/vm/mach_vm_dep.c
index 81427b8007..7f2725211c 100644
--- a/usr/src/uts/sun4u/vm/mach_vm_dep.c
+++ b/usr/src/uts/sun4u/vm/mach_vm_dep.c
@@ -151,10 +151,19 @@ adjust_data_maxlpsize(size_t ismpagesize)
* lower level code must manage the translations so that this
* is not seen here (at the cost of efficiency, of course).
*
+ * Every mapping will have a redzone of a single page on either side of
+ * the request. This is done to leave one page unmapped between segments.
+ * This is not required, but it's useful for the user because if their
+ * program strays across a segment boundary, it will catch a fault
+ * immediately making debugging a little easier. Currently the redzone
+ * is mandatory.
+ *
+ *
* addrp is a value/result parameter.
* On input it is a hint from the user to be used in a completely
* machine dependent fashion. For MAP_ALIGN, addrp contains the
- * minimal alignment.
+ * minimal alignment, which must be some "power of two" multiple of
+ * pagesize.
*
* On output it is NULL if no address can be found in the current
* processes address space or else an address that is currently
@@ -190,16 +199,9 @@ map_addr_proc(caddr_t *addrp, size_t len, offset_t off, int vacalign,
rctlproc_legacy[RLIMIT_STACK], p->p_rctls, p) + PAGEOFFSET)
& PAGEMASK);
}
- len = (len + PAGEOFFSET) & PAGEMASK;
- /*
- * Redzone for each side of the request. This is done to leave
- * one page unmapped between segments. This is not required, but
- * it's useful for the user because if their program strays across
- * a segment boundary, it will catch a fault immediately making
- * debugging a little easier.
- */
- len += (2 * PAGESIZE);
+ /* Make len be a multiple of PAGESIZE */
+ len = (len + PAGEOFFSET) & PAGEMASK;
/*
* If the request is larger than the size of a particular
@@ -219,11 +221,11 @@ map_addr_proc(caddr_t *addrp, size_t len, offset_t off, int vacalign,
}
if ((mmu_page_sizes == max_mmu_page_sizes) &&
allow_largepage_alignment &&
- (len >= MMU_PAGESIZE256M)) { /* 256MB mappings */
+ (len >= MMU_PAGESIZE256M)) { /* 256MB mappings */
align_amount = MMU_PAGESIZE256M;
} else if ((mmu_page_sizes == max_mmu_page_sizes) &&
allow_largepage_alignment &&
- (len >= MMU_PAGESIZE32M)) { /* 32MB mappings */
+ (len >= MMU_PAGESIZE32M)) { /* 32MB mappings */
align_amount = MMU_PAGESIZE32M;
} else if (len >= MMU_PAGESIZE4M) { /* 4MB mappings */
align_amount = MMU_PAGESIZE4M;
@@ -239,7 +241,7 @@ map_addr_proc(caddr_t *addrp, size_t len, offset_t off, int vacalign,
*/
align_amount = ELF_SPARC_MAXPGSZ;
if ((flags & MAP_ALIGN) && ((uintptr_t)*addrp != 0) &&
- ((uintptr_t)*addrp < align_amount))
+ ((uintptr_t)*addrp < align_amount))
align_amount = (uintptr_t)*addrp;
}
@@ -256,33 +258,43 @@ map_addr_proc(caddr_t *addrp, size_t len, offset_t off, int vacalign,
if ((flags & MAP_ALIGN) && ((uintptr_t)*addrp > align_amount)) {
align_amount = (uintptr_t)*addrp;
}
- len += align_amount;
+
+ ASSERT(ISP2(align_amount));
+ ASSERT(align_amount == 0 || align_amount >= PAGESIZE);
/*
* Look for a large enough hole starting below the stack limit.
- * After finding it, use the upper part. Addition of PAGESIZE is
- * for the redzone as described above.
+ * After finding it, use the upper part.
*/
as_purge(as);
- if (as_gap(as, len, &base, &slen, AH_HI, NULL) == 0) {
+ off = off & (align_amount - 1);
+ if (as_gap_aligned(as, len, &base, &slen, AH_HI, NULL, align_amount,
+ PAGESIZE, off) == 0) {
caddr_t as_addr;
- addr = base + slen - len + PAGESIZE;
+ /*
+ * addr is the highest possible address to use since we have
+ * a PAGESIZE redzone at the beginning and end.
+ */
+ addr = base + slen - (PAGESIZE + len);
as_addr = addr;
/*
- * Round address DOWN to the alignment amount,
- * add the offset, and if this address is less
- * than the original address, add alignment amount.
+ * Round address DOWN to the alignment amount and
+ * add the offset in.
+ * If addr is greater than as_addr, len would not be large
+ * enough to include the redzone, so we must adjust down
+ * by the alignment amount.
*/
addr = (caddr_t)((uintptr_t)addr & (~(align_amount - 1l)));
- addr += (long)(off & (align_amount - 1l));
- if (addr < as_addr) {
- addr += align_amount;
+ addr += (long)off;
+ if (addr > as_addr) {
+ addr -= align_amount;
}
- ASSERT(addr <= (as_addr + align_amount));
+ ASSERT(addr > base);
+ ASSERT(addr + len < base + slen);
ASSERT(((uintptr_t)addr & (align_amount - 1l)) ==
- ((uintptr_t)(off & (align_amount - 1l))));
+ ((uintptr_t)(off)));
*addrp = addr;
#if defined(SF_ERRATA_57)
diff --git a/usr/src/uts/sun4v/vm/mach_vm_dep.c b/usr/src/uts/sun4v/vm/mach_vm_dep.c
index 09aea923e0..49de1e7b07 100644
--- a/usr/src/uts/sun4v/vm/mach_vm_dep.c
+++ b/usr/src/uts/sun4v/vm/mach_vm_dep.c
@@ -182,10 +182,18 @@ static void *contig_mem_prealloc_buf;
* lower level code must manage the translations so that this
* is not seen here (at the cost of efficiency, of course).
*
+ * Every mapping will have a redzone of a single page on either side of
+ * the request. This is done to leave one page unmapped between segments.
+ * This is not required, but it's useful for the user because if their
+ * program strays across a segment boundary, it will catch a fault
+ * immediately making debugging a little easier. Currently the redzone
+ * is mandatory.
+ *
* addrp is a value/result parameter.
* On input it is a hint from the user to be used in a completely
* machine dependent fashion. For MAP_ALIGN, addrp contains the
- * minimal alignment.
+ * minimal alignment, which must be some "power of two" multiple of
+ * pagesize.
*
* On output it is NULL if no address can be found in the current
* processes address space or else an address that is currently
@@ -221,18 +229,10 @@ map_addr_proc(caddr_t *addrp, size_t len, offset_t off, int vacalign,
rctlproc_legacy[RLIMIT_STACK], p->p_rctls, p) + PAGEOFFSET)
& PAGEMASK);
}
+ /* Make len be a multiple of PAGESIZE */
len = (len + PAGEOFFSET) & PAGEMASK;
/*
- * Redzone for each side of the request. This is done to leave
- * one page unmapped between segments. This is not required, but
- * it's useful for the user because if their program strays across
- * a segment boundary, it will catch a fault immediately making
- * debugging a little easier.
- */
- len += (2 * PAGESIZE);
-
- /*
* If the request is larger than the size of a particular
* mmu level, then we use that level to map the request.
* But this requires that both the virtual and the physical
@@ -290,33 +290,43 @@ map_addr_proc(caddr_t *addrp, size_t len, offset_t off, int vacalign,
if ((flags & MAP_ALIGN) && ((uintptr_t)*addrp > align_amount)) {
align_amount = (uintptr_t)*addrp;
}
- len += align_amount;
+
+ ASSERT(ISP2(align_amount));
+ ASSERT(align_amount == 0 || align_amount >= PAGESIZE);
/*
* Look for a large enough hole starting below the stack limit.
- * After finding it, use the upper part. Addition of PAGESIZE is
- * for the redzone as described above.
+ * After finding it, use the upper part.
*/
as_purge(as);
- if (as_gap(as, len, &base, &slen, AH_HI, NULL) == 0) {
+ off = off & (align_amount - 1);
+ if (as_gap_aligned(as, len, &base, &slen, AH_HI, NULL, align_amount,
+ PAGESIZE, off) == 0) {
caddr_t as_addr;
- addr = base + slen - len + PAGESIZE;
+ /*
+ * addr is the highest possible address to use since we have
+ * a PAGESIZE redzone at the beginning and end.
+ */
+ addr = base + slen - (PAGESIZE + len);
as_addr = addr;
/*
- * Round address DOWN to the alignment amount,
- * add the offset, and if this address is less
- * than the original address, add alignment amount.
+ * Round address DOWN to the alignment amount and
+ * add the offset in.
+ * If addr is greater than as_addr, len would not be large
+ * enough to include the redzone, so we must adjust down
+ * by the alignment amount.
*/
addr = (caddr_t)((uintptr_t)addr & (~(align_amount - 1l)));
- addr += (long)(off & (align_amount - 1l));
- if (addr < as_addr) {
- addr += align_amount;
+ addr += (long)off;
+ if (addr > as_addr) {
+ addr -= align_amount;
}
- ASSERT(addr <= (as_addr + align_amount));
+ ASSERT(addr > base);
+ ASSERT(addr + len < base + slen);
ASSERT(((uintptr_t)addr & (align_amount - 1l)) ==
- ((uintptr_t)(off & (align_amount - 1l))));
+ ((uintptr_t)(off)));
*addrp = addr;
} else {