summaryrefslogtreecommitdiff
path: root/usr/src
diff options
context:
space:
mode:
authorkchow <none@none>2006-10-03 14:50:02 -0700
committerkchow <none@none>2006-10-03 14:50:02 -0700
commita985e5786d4eb44e83b59c9c7e38bc77e12fec47 (patch)
treed38fda3a6cee05e7bab87ce71432c45fd32aa6a8 /usr/src
parent5705dae2b68d5caac5a244659599252ed9235deb (diff)
downloadillumos-gate-a985e5786d4eb44e83b59c9c7e38bc77e12fec47.tar.gz
6426285 ref bit only pte - BAD TRAP: type=e fc-cache: #pf Page fault post install of snv_40
6474786 stack overflow during kmem_cache_free
Diffstat (limited to 'usr/src')
-rw-r--r--usr/src/uts/i86pc/vm/htable.c20
-rw-r--r--usr/src/uts/i86pc/vm/vm_dep.h29
2 files changed, 33 insertions, 16 deletions
diff --git a/usr/src/uts/i86pc/vm/htable.c b/usr/src/uts/i86pc/vm/htable.c
index 45f8e90ba1..3105ad9e27 100644
--- a/usr/src/uts/i86pc/vm/htable.c
+++ b/usr/src/uts/i86pc/vm/htable.c
@@ -1846,12 +1846,19 @@ x86pte_set(htable_t *ht, uint_t entry, x86pte_t new, void *ptr)
prev = *ptep;
n = new;
/*
- * prevent potential data loss by preserving the MOD
- * bit if set in the current PTE and the pfns are the
- * same. For example, segmap can reissue a read-only
- * hat_memload on top of a dirty page.
+ * prevent potential data loss by preserving the
+ * MOD/REF bits if set in the current PTE, the pfns are
+ * the same and the 'new' pte is non-zero. For example,
+ * segmap can reissue a read-only hat_memload on top
+ * of a dirty page.
+ *
+ * 'new' is required to be non-zero on a remap as at
+ * least the valid bit should be non-zero. The 'new'
+ * check also avoids incorrectly preserving the REF/MOD
+ * bit when unmapping pfn 0.
*/
- if (PTE_ISVALID(prev) && PTE2PFN(prev, ht->ht_level) ==
+ if (new != 0 && PTE_ISVALID(prev) &&
+ PTE2PFN(prev, ht->ht_level) ==
PTE2PFN(n, ht->ht_level)) {
n |= prev & (PT_REF | PT_MOD);
}
@@ -1868,7 +1875,8 @@ x86pte_set(htable_t *ht, uint_t entry, x86pte_t new, void *ptr)
for (;;) {
p32 = *pte32p;
n32 = new;
- if (PTE_ISVALID(p32) && PTE2PFN(p32, ht->ht_level) ==
+ if (new != 0 && PTE_ISVALID(p32) &&
+ PTE2PFN(p32, ht->ht_level) ==
PTE2PFN(n32, ht->ht_level)) {
n32 |= p32 & (PT_REF | PT_MOD);
}
diff --git a/usr/src/uts/i86pc/vm/vm_dep.h b/usr/src/uts/i86pc/vm/vm_dep.h
index ba08977f83..96e3f3589a 100644
--- a/usr/src/uts/i86pc/vm/vm_dep.h
+++ b/usr/src/uts/i86pc/vm/vm_dep.h
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -216,13 +215,15 @@ extern int desfree4gshift;
*
* In this case, general page allocations via page_get_{free,cache}list
* routines will be restricted from allocating from the 16m pool. Allocations
- * that require specific pfn ranges (page_get_anylist) are not restricted.
+ * that require specific pfn ranges (page_get_anylist) and PG_PANIC allocations
+ * are not restricted.
*/
#define FREEMEM16M MTYPE_FREEMEM(0)
#define DESFREE16M desfree16m
-#define RESTRICT16M_ALLOC(freemem, pgcnt) \
- (freemem != 0 && ((freemem >= (FREEMEM16M)) || \
+#define RESTRICT16M_ALLOC(freemem, pgcnt, flags) \
+ ((freemem != 0) && ((flags & PG_PANIC) == 0) && \
+ ((freemem >= (FREEMEM16M)) || \
(FREEMEM16M < (DESFREE16M + pgcnt))))
extern pgcnt_t desfree16m;
@@ -307,10 +308,12 @@ extern struct cpu cpus[];
VM_STAT_ADD(vmm_vmstats.restrict4gcnt); \
/* here only for > 4g systems */ \
flags |= PGI_MT_RANGE4G; \
- } else if (RESTRICT16M_ALLOC(freemem, btop(pgsz))) { \
+ } else if (RESTRICT16M_ALLOC(freemem, btop(pgsz), flags)) { \
flags |= PGI_MT_RANGE16M; \
} else { \
VM_STAT_ADD(vmm_vmstats.unrestrict16mcnt); \
+ VM_STAT_COND_ADD((flags & PG_PANIC), \
+ vmm_vmstats.pgpanicalloc); \
flags |= PGI_MT_RANGE0; \
} \
}
@@ -331,10 +334,12 @@ extern struct cpu cpus[];
ASSERT(physmax4g); \
mtype = mtype4g; \
if (RESTRICT16M_ALLOC(freemem4g - btop(pgsz), \
- btop(pgsz))) { \
+ btop(pgsz), flags)) { \
flags |= PGI_MT_RANGE16M; \
} else { \
VM_STAT_ADD(vmm_vmstats.unrestrict16mcnt); \
+ VM_STAT_COND_ADD((flags & PG_PANIC), \
+ vmm_vmstats.pgpanicalloc); \
flags |= PGI_MT_RANGE0; \
} \
} else { \
@@ -343,10 +348,13 @@ extern struct cpu cpus[];
VM_STAT_ADD(vmm_vmstats.restrict4gcnt); \
/* here only for > 4g systems */ \
flags |= PGI_MT_RANGE4G; \
- } else if (RESTRICT16M_ALLOC(freemem, btop(pgsz))) { \
+ } else if (RESTRICT16M_ALLOC(freemem, btop(pgsz), \
+ flags)) { \
flags |= PGI_MT_RANGE16M; \
} else { \
VM_STAT_ADD(vmm_vmstats.unrestrict16mcnt); \
+ VM_STAT_COND_ADD((flags & PG_PANIC), \
+ vmm_vmstats.pgpanicalloc); \
flags |= PGI_MT_RANGE0; \
} \
} \
@@ -380,7 +388,7 @@ extern struct cpu cpus[];
#define MTYPE_PGR_INIT(mtype, flags, pp, mnode, pgcnt) { \
mtype = mnoderangecnt - 1; \
- if (RESTRICT16M_ALLOC(freemem, pgcnt)) { \
+ if (RESTRICT16M_ALLOC(freemem, pgcnt, flags)) { \
flags |= PGI_MT_RANGE16M; \
} else { \
VM_STAT_ADD(vmm_vmstats.unrestrict16mcnt); \
@@ -584,6 +592,7 @@ struct vmm_vmstats_str {
ulong_t page_ctrs_cands_skip_all; /* candidates useful for all func */
ulong_t restrict4gcnt;
ulong_t unrestrict16mcnt; /* non-DMA 16m allocs allowed */
+ ulong_t pgpanicalloc; /* PG_PANIC allocation */
};
extern struct vmm_vmstats_str vmm_vmstats;
#endif /* VM_STATS */