summaryrefslogtreecommitdiff
path: root/usr/src
diff options
context:
space:
mode:
authoraguzovsk <none@none>2007-01-02 21:36:42 -0800
committeraguzovsk <none@none>2007-01-02 21:36:42 -0800
commit081a94b09d73451d9d75fe283b5bed730cbb88cf (patch)
tree687fa4c1a0edeedda717c41261a834b489254035 /usr/src
parent8162146132b0fb9b7c6dc3371ff205edc236ebfa (diff)
downloadillumos-gate-081a94b09d73451d9d75fe283b5bed730cbb88cf.tar.gz
6498368 Reduce va_to_pa overheads
6500938 tte_remap panics with inconsistent sf_hments
Diffstat (limited to 'usr/src')
-rw-r--r--usr/src/uts/common/vm/seg_kmem.c6
-rw-r--r--usr/src/uts/common/vm/seg_kmem.h9
-rw-r--r--usr/src/uts/common/vm/seg_vn.c15
-rw-r--r--usr/src/uts/common/vm/vm_pvn.c44
-rw-r--r--usr/src/uts/sfmmu/ml/sfmmu_asm.s86
-rw-r--r--usr/src/uts/sfmmu/vm/hat_sfmmu.c46
-rw-r--r--usr/src/uts/sfmmu/vm/hat_sfmmu.h3
7 files changed, 156 insertions, 53 deletions
diff --git a/usr/src/uts/common/vm/seg_kmem.c b/usr/src/uts/common/vm/seg_kmem.c
index c88b4a199b..d37bdec645 100644
--- a/usr/src/uts/common/vm/seg_kmem.c
+++ b/usr/src/uts/common/vm/seg_kmem.c
@@ -19,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -140,6 +140,7 @@ vmem_t *zio_alloc_arena = NULL; /* arena for allocating zio memory */
size_t segkmem_lpsize;
static uint_t segkmem_lpshift = PAGESHIFT;
+int segkmem_lpszc = 0;
size_t segkmem_kmemlp_quantum = 0x400000; /* 4MB */
size_t segkmem_heaplp_quantum;
@@ -1489,7 +1490,8 @@ segkmem_lpsetup()
}
use_large_pages = 1;
- segkmem_lpshift = page_get_shift(page_szc(segkmem_lpsize));
+ segkmem_lpszc = page_szc(segkmem_lpsize);
+ segkmem_lpshift = page_get_shift(segkmem_lpszc);
#endif
return (use_large_pages);
diff --git a/usr/src/uts/common/vm/seg_kmem.h b/usr/src/uts/common/vm/seg_kmem.h
index 70ce2cee26..5b50a070ac 100644
--- a/usr/src/uts/common/vm/seg_kmem.h
+++ b/usr/src/uts/common/vm/seg_kmem.h
@@ -19,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -119,9 +119,10 @@ extern void segkmem_free_lp(vmem_t *, void *, size_t);
extern int segkmem_lpsetup();
extern void segkmem_heap_lp_init(void);
-extern size_t segkmem_lpsize;
-extern size_t segkmem_heaplp_quantum;
-extern size_t segkmem_kmemlp_max;
+extern size_t segkmem_lpsize;
+extern int segkmem_lpszc;
+extern size_t segkmem_heaplp_quantum;
+extern size_t segkmem_kmemlp_max;
#define SEGKMEM_USE_LARGEPAGES (segkmem_lpsize > PAGESIZE)
diff --git a/usr/src/uts/common/vm/seg_vn.c b/usr/src/uts/common/vm/seg_vn.c
index d4e84085c6..9306e270ce 100644
--- a/usr/src/uts/common/vm/seg_vn.c
+++ b/usr/src/uts/common/vm/seg_vn.c
@@ -19,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -3616,6 +3616,17 @@ segvn_fault_vnodepages(struct hat *hat, struct seg *seg, caddr_t lpgaddr,
ierr = VOP_GETPAGE(vp, (offset_t)off, pgsz,
&vpprot, ppa, pgsz, seg, a, arw,
svd->cred);
+#ifdef DEBUG
+ if (ierr == 0) {
+ for (i = 0; i < pages; i++) {
+ ASSERT(PAGE_LOCKED(ppa[i]));
+ ASSERT(!PP_ISFREE(ppa[i]));
+ ASSERT(ppa[i]->p_vnode == vp);
+ ASSERT(ppa[i]->p_offset ==
+ off + (i << PAGESHIFT));
+ }
+ }
+#endif /* DEBUG */
if (segtype == MAP_PRIVATE) {
SEGVN_VMSTAT_FLTVNPAGES(15);
vpprot &= ~PROT_WRITE;
@@ -3922,7 +3933,7 @@ segvn_fault_vnodepages(struct hat *hat, struct seg *seg, caddr_t lpgaddr,
ierr = -1;
break;
}
- if (szc != 0 && !xhat) {
+ if (szc != 0 && !xhat && !upgrdfail) {
segvn_faultvnmpss_align_err5++;
}
SEGVN_VMSTAT_FLTVNPAGES(34);
diff --git a/usr/src/uts/common/vm/vm_pvn.c b/usr/src/uts/common/vm/vm_pvn.c
index c7ee09453e..d9f66d6049 100644
--- a/usr/src/uts/common/vm/vm_pvn.c
+++ b/usr/src/uts/common/vm/vm_pvn.c
@@ -19,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -1112,6 +1112,7 @@ pvn_getpages(
/*
* Initialize the page list array.
*/
+/*ARGSUSED*/
void
pvn_plist_init(page_t *pp, page_t *pl[], size_t plsz,
u_offset_t off, size_t io_len, enum seg_rw rw)
@@ -1119,33 +1120,28 @@ pvn_plist_init(page_t *pp, page_t *pl[], size_t plsz,
ssize_t sz;
page_t *ppcur, **ppp;
- if (plsz >= io_len) {
- /*
- * Everything fits, set up to load
- * all the pages.
- */
- sz = io_len;
- } else {
+ /*
+ * Set up to load plsz worth
+ * starting at the needed page.
+ */
+ while (pp != NULL && pp->p_offset != off) {
/*
- * Set up to load plsz worth
- * starting at the needed page.
+ * Remove page from the i/o list,
+ * release the i/o and the page lock.
*/
- while (pp->p_offset != off) {
- /* XXX - Do we need this assert? */
- ASSERT(pp->p_next->p_offset !=
- pp->p_offset);
- /*
- * Remove page from the i/o list,
- * release the i/o and the page lock.
- */
- ppcur = pp;
- page_sub(&pp, ppcur);
- page_io_unlock(ppcur);
- (void) page_release(ppcur, 1);
- }
- sz = plsz;
+ ppcur = pp;
+ page_sub(&pp, ppcur);
+ page_io_unlock(ppcur);
+ (void) page_release(ppcur, 1);
}
+ if (pp == NULL) {
+ pl[0] = NULL;
+ return;
+ }
+
+ sz = plsz;
+
/*
* Initialize the page list array.
*/
diff --git a/usr/src/uts/sfmmu/ml/sfmmu_asm.s b/usr/src/uts/sfmmu/ml/sfmmu_asm.s
index 6e26c226ee..17f4bac38e 100644
--- a/usr/src/uts/sfmmu/ml/sfmmu_asm.s
+++ b/usr/src/uts/sfmmu/ml/sfmmu_asm.s
@@ -19,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -3550,6 +3550,13 @@ sfmmu_vatopfn(caddr_t vaddr, sfmmu_t *sfmmup, tte_t *ttep)
return(0);
}
+/* ARGSUSED */
+pfn_t
+sfmmu_kvaszc2pfn(caddr_t vaddr, int hashno)
+{
+ return(0);
+}
+
#else /* lint */
ENTRY_NP(sfmmu_vatopfn)
@@ -3700,6 +3707,83 @@ vatopfn_nokernel:
or %o0, %lo(sfmmu_panic3), %o0
SET_SIZE(sfmmu_vatopfn)
+
+ /*
+ * %o0 = vaddr
+ * %o1 = hashno (aka szc)
+ *
+ *
+ * This routine is similar to sfmmu_vatopfn() but will only look for
+ * a kernel vaddr in the hash structure for the specified rehash value.
+ * It's just an optimization for the case when pagesize for a given
+ * va range is already known (e.g. large page heap) and we don't want
+ * to start the search with rehash value 1 as sfmmu_vatopfn() does.
+ *
+ * Returns valid pfn or PFN_INVALID if
+ * tte for specified rehash # is not found, invalid or suspended.
+ */
+ ENTRY_NP(sfmmu_kvaszc2pfn)
+ /*
+ * disable interrupts
+ */
+ rdpr %pstate, %o3
+#ifdef DEBUG
+ PANIC_IF_INTR_DISABLED_PSTR(%o3, sfmmu_di_l6, %g1)
+#endif
+ /*
+ * disable interrupts to protect the TSBMISS area
+ */
+ andn %o3, PSTATE_IE, %o5
+ wrpr %o5, 0, %pstate
+
+ CPU_TSBMISS_AREA(%g1, %o5)
+ ldn [%g1 + TSBMISS_KHATID], %o4
+ sll %o1, 1, %g6
+ add %g6, %o1, %g6
+ add %g6, MMU_PAGESHIFT, %g6
+ /*
+ * %o0 = vaddr
+ * %o1 = hashno
+ * %o3 = old %pstate
+ * %o4 = ksfmmup
+ * %g1 = tsbmiss area
+ * %g6 = hmeshift
+ */
+
+ /*
+ * The first arg to GET_TTE is actually tagaccess register
+ * not just vaddr. Since this call is for kernel we need to clear
+ * any lower vaddr bits that would be interpreted as ctx bits.
+ */
+ srlx %o0, MMU_PAGESHIFT, %o0
+ sllx %o0, MMU_PAGESHIFT, %o0
+ GET_TTE(%o0, %o4, %g3, %g4, %g5, %g1, %o5, %g6, %o1,
+ kvaszc2pfn_l1, kvaszc2pfn_hblk_found, kvaszc2pfn_nohblk,
+ kvaszc2pfn_nohblk)
+
+kvaszc2pfn_hblk_found:
+ /*
+ * %g3 = tte
+ * %o0 = vaddr
+ */
+ brgez,a,pn %g3, 1f /* check if tte is invalid */
+ mov -1, %o0 /* output = -1 (PFN_INVALID) */
+ TTETOPFN(%g3, %o0, kvaszc2pfn_l2, %g2, %g4, %g5)
+ /*
+ * g3 = pfn
+ */
+ ba,pt %xcc, 1f
+ mov %g3, %o0
+
+kvaszc2pfn_nohblk:
+ mov -1, %o0
+
+1:
+ retl
+ wrpr %g0, %o3, %pstate /* re-enable interrupts */
+
+ SET_SIZE(sfmmu_kvaszc2pfn)
+
#endif /* lint */
diff --git a/usr/src/uts/sfmmu/vm/hat_sfmmu.c b/usr/src/uts/sfmmu/vm/hat_sfmmu.c
index d30c50c5c7..33c9d7802d 100644
--- a/usr/src/uts/sfmmu/vm/hat_sfmmu.c
+++ b/usr/src/uts/sfmmu/vm/hat_sfmmu.c
@@ -19,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -3133,9 +3133,13 @@ sfmmu_shadow_hcreate(sfmmu_t *sfmmup, caddr_t vaddr, int ttesz, uint_t flags)
* potentially be a previous large page hblk so we need to
* set the shadow bit.
*/
+ ASSERT(!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt);
hmeblkp->hblk_shw_bit = 1;
+ } else if (hmeblkp->hblk_shw_bit == 0) {
+ panic("sfmmu_shadow_hcreate: shw bit not set in hmeblkp 0x%p",
+ (void *)hmeblkp);
}
- ASSERT(hmeblkp->hblk_shw_bit == 1);
+
vshift = vaddr_to_vshift(hblktag, vaddr, size);
ASSERT(vshift < 8);
/*
@@ -5335,11 +5339,10 @@ sfmmu_hblk_unload(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr,
while (addr < endaddr) {
pml = NULL;
-again:
sfmmu_copytte(&sfhmep->hme_tte, &tte);
if (TTE_IS_VALID(&tte)) {
pp = sfhmep->hme_page;
- if (pp && pml == NULL) {
+ if (pp != NULL) {
pml = sfmmu_mlist_enter(pp);
}
@@ -5349,9 +5352,8 @@ again:
*/
if (sfhmep->hme_page != pp) {
if (pp != NULL && sfhmep->hme_page != NULL) {
- if (pml) {
- sfmmu_mlist_exit(pml);
- }
+ ASSERT(pml != NULL);
+ sfmmu_mlist_exit(pml);
/* Re-start this iteration. */
continue;
}
@@ -5375,6 +5377,7 @@ again:
* Page_unload can also invalidate the tte after
* we read tte outside of p_mapping lock.
*/
+again:
ttemod = tte;
TTE_SET_INVALID(&ttemod);
@@ -5383,17 +5386,15 @@ again:
if (ret <= 0) {
if (TTE_IS_VALID(&tte)) {
+ ASSERT(ret < 0);
goto again;
- } else {
- /*
- * We read in a valid pte, but it
- * is unloaded by page_unload.
- * hme_page has become NULL and
- * we hold no p_mapping lock.
- */
- ASSERT(pp == NULL && pml == NULL);
- goto tte_unloaded;
}
+ if (pp != NULL) {
+ panic("sfmmu_hblk_unload: pp = 0x%p "
+ "tte became invalid under mlist"
+ " lock = 0x%p", pp, pml);
+ }
+ continue;
}
if (!(flags & HAT_UNLOAD_NOSYNC)) {
@@ -5511,8 +5512,7 @@ again:
pml = sfmmu_mlist_enter(pp);
if (sfhmep->hme_page != NULL) {
sfmmu_mlist_exit(pml);
- pml = NULL;
- goto again;
+ continue;
}
ASSERT(sfhmep->hme_page == NULL);
} else if (hmeblkp->hblk_hmecnt != 0) {
@@ -7158,8 +7158,16 @@ hat_getpfnum(struct hat *hat, caddr_t addr)
ASSERT(hat->sfmmu_xhat_provider == NULL);
if (hat == ksfmmup) {
- if (segkpm && IS_KPM_ADDR(addr))
+ if (IS_KMEM_VA_LARGEPAGE(addr)) {
+ ASSERT(segkmem_lpszc > 0);
+ pfn = sfmmu_kvaszc2pfn(addr, segkmem_lpszc);
+ if (pfn != PFN_INVALID) {
+ sfmmu_check_kpfn(pfn);
+ return (pfn);
+ }
+ } else if (segkpm && IS_KPM_ADDR(addr)) {
return (sfmmu_kpm_vatopfn(addr));
+ }
while ((pfn = sfmmu_vatopfn(addr, ksfmmup, &tte))
== PFN_SUSPENDED) {
sfmmu_vatopfn_suspended(addr, ksfmmup, &tte);
diff --git a/usr/src/uts/sfmmu/vm/hat_sfmmu.h b/usr/src/uts/sfmmu/vm/hat_sfmmu.h
index bab9f5db7d..d93c96c87b 100644
--- a/usr/src/uts/sfmmu/vm/hat_sfmmu.h
+++ b/usr/src/uts/sfmmu/vm/hat_sfmmu.h
@@ -19,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -1736,6 +1736,7 @@ extern void sfmmu_patch_utsb(void);
#endif /* UTSB_PHYS */
extern pfn_t sfmmu_vatopfn(caddr_t, sfmmu_t *, tte_t *);
extern void sfmmu_vatopfn_suspended(caddr_t, sfmmu_t *, tte_t *);
+extern pfn_t sfmmu_kvaszc2pfn(caddr_t, int);
#ifdef DEBUG
extern void sfmmu_check_kpfn(pfn_t);
#else