summaryrefslogtreecommitdiff
path: root/usr
diff options
context:
space:
mode:
authorChristopher Baumbauer - Sun Microsystems - San Diego United States <Christopher.Baumbauer@Sun.COM>2009-05-05 15:10:16 -0700
committerChristopher Baumbauer - Sun Microsystems - San Diego United States <Christopher.Baumbauer@Sun.COM>2009-05-05 15:10:16 -0700
commit704b9682c0691c177d1e86c94fabeaf97cc79d55 (patch)
treeeea16a9481629dff5a82254bd22d5d49011de730 /usr
parent34a79eb7e68ed2b7c23a4dcc4851b4f437bf59b6 (diff)
downloadillumos-gate-704b9682c0691c177d1e86c94fabeaf97cc79d55.tar.gz
6587140 page_retire()/page_trycapture should not try to retire non relocatable kernel pages
PSARC 2009/181 Page Retirement Kernel KStat Addition 6819876 pages slated for capture under x86 are being ignored
Diffstat (limited to 'usr')
-rw-r--r--usr/src/uts/common/vm/page.h7
-rw-r--r--usr/src/uts/common/vm/page_retire.c23
-rw-r--r--usr/src/uts/common/vm/vm_page.c39
3 files changed, 43 insertions, 26 deletions
diff --git a/usr/src/uts/common/vm/page.h b/usr/src/uts/common/vm/page.h
index fe975a0900..da2879b687 100644
--- a/usr/src/uts/common/vm/page.h
+++ b/usr/src/uts/common/vm/page.h
@@ -19,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -770,8 +770,9 @@ int page_unretire_pp(page_t *, int);
void page_tryretire(page_t *);
void page_retire_mdboot();
uint64_t page_retire_pend_count(void);
-void page_retire_incr_pend_count(void);
-void page_retire_decr_pend_count(void);
+uint64_t page_retire_pend_kas_count(void);
+void page_retire_incr_pend_count(void *);
+void page_retire_decr_pend_count(void *);
void page_clrtoxic(page_t *, uchar_t);
void page_settoxic(page_t *, uchar_t);
diff --git a/usr/src/uts/common/vm/page_retire.c b/usr/src/uts/common/vm/page_retire.c
index 0dbcd41245..fa454101f8 100644
--- a/usr/src/uts/common/vm/page_retire.c
+++ b/usr/src/uts/common/vm/page_retire.c
@@ -178,6 +178,7 @@ struct page_retire_kstat {
kstat_named_t pr_enqueue_fail;
kstat_named_t pr_dequeue_fail;
kstat_named_t pr_pending;
+ kstat_named_t pr_pending_kas;
kstat_named_t pr_failed;
kstat_named_t pr_failed_kernel;
kstat_named_t pr_limit;
@@ -198,6 +199,7 @@ static struct page_retire_kstat page_retire_kstat = {
{ "pages_notenqueued", KSTAT_DATA_UINT64},
{ "pages_notdequeued", KSTAT_DATA_UINT64},
{ "pages_pending", KSTAT_DATA_UINT64},
+ { "pages_pending_kas", KSTAT_DATA_UINT64},
{ "pages_deferred", KSTAT_DATA_UINT64},
{ "pages_deferred_kernel", KSTAT_DATA_UINT64},
{ "pages_limit", KSTAT_DATA_UINT64},
@@ -222,6 +224,7 @@ static kstat_t *page_retire_ksp = NULL;
#define PR_KSTAT_RETIRED_FMA (page_retire_kstat.pr_fma.value.ui64)
#define PR_KSTAT_RETIRED_NOTUE (PR_KSTAT_RETIRED_CE + PR_KSTAT_RETIRED_FMA)
#define PR_KSTAT_PENDING (page_retire_kstat.pr_pending.value.ui64)
+#define PR_KSTAT_PENDING_KAS (page_retire_kstat.pr_pending_kas.value.ui64)
#define PR_KSTAT_EQFAIL (page_retire_kstat.pr_enqueue_fail.value.ui64)
#define PR_KSTAT_DQFAIL (page_retire_kstat.pr_dequeue_fail.value.ui64)
@@ -834,16 +837,30 @@ page_retire_pend_count(void)
return (PR_KSTAT_PENDING);
}
+uint64_t
+page_retire_pend_kas_count(void)
+{
+ return (PR_KSTAT_PENDING_KAS);
+}
+
void
-page_retire_incr_pend_count(void)
+page_retire_incr_pend_count(void *datap)
{
PR_INCR_KSTAT(pr_pending);
+
+ if ((datap == &kvp) || (datap == &zvp)) {
+ PR_INCR_KSTAT(pr_pending_kas);
+ }
}
void
-page_retire_decr_pend_count(void)
+page_retire_decr_pend_count(void *datap)
{
PR_DECR_KSTAT(pr_pending);
+
+ if ((datap == &kvp) || (datap == &zvp)) {
+ PR_DECR_KSTAT(pr_pending_kas);
+ }
}
/*
@@ -1042,7 +1059,7 @@ page_retire(uint64_t pa, uchar_t reason)
if (MTBF(pr_calls, pr_mtbf)) {
page_settoxic(pp, reason);
- if (page_trycapture(pp, 0, CAPTURE_RETIRE, NULL) == 0) {
+ if (page_trycapture(pp, 0, CAPTURE_RETIRE, pp->p_vnode) == 0) {
PR_DEBUG(prd_prlocked);
} else {
PR_DEBUG(prd_prnotlocked);
diff --git a/usr/src/uts/common/vm/vm_page.c b/usr/src/uts/common/vm/vm_page.c
index 836881f6d2..f5a36ae2cf 100644
--- a/usr/src/uts/common/vm/vm_page.c
+++ b/usr/src/uts/common/vm/vm_page.c
@@ -6451,7 +6451,7 @@ page_capture_add_hash(page_t *pp, uint_t szc, uint_t flags, void *datap)
page_capture_hash[index].lists[0].next = bp1;
page_capture_hash[index].num_pages++;
if (flags & CAPTURE_RETIRE) {
- page_retire_incr_pend_count();
+ page_retire_incr_pend_count(datap);
}
mutex_exit(&page_capture_hash[index].pchh_mutex);
rw_exit(&pc_cb[cb_index].cb_rwlock);
@@ -6477,7 +6477,8 @@ page_capture_add_hash(page_t *pp, uint_t szc, uint_t flags, void *datap)
if (bp2->pp == pp) {
if (flags & CAPTURE_RETIRE) {
if (!(bp2->flags & CAPTURE_RETIRE)) {
- page_retire_incr_pend_count();
+ page_retire_incr_pend_count(
+ datap);
bp2->flags = flags;
bp2->expires = bp1->expires;
bp2->datap = datap;
@@ -6829,7 +6830,7 @@ page_capture_take_action(page_t *pp, uint_t flags, void *datap)
if (ret >= 0) {
if (found) {
if (bp1->flags & CAPTURE_RETIRE) {
- page_retire_decr_pend_count();
+ page_retire_decr_pend_count(datap);
}
kmem_free(bp1, sizeof (*bp1));
}
@@ -7278,28 +7279,16 @@ page_capture_handle_outstanding(void)
{
int ntry;
- if (!page_retire_pend_count()) {
- /*
- * Do we really want to be this aggressive
- * for things other than page_retire?
- * Maybe have a counter for each callback
- * type to guide how aggressive we should
- * be here. Thus if there's at least one
- * page for page_retire we go ahead and reap
- * like this.
- */
- kmem_reap();
- seg_preap();
- page_capture_async();
- } else if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
+ /* Reap pages before attempting capture pages */
+ kmem_reap();
+
+ if ((page_retire_pend_count() > page_retire_pend_kas_count()) &&
+ hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
/*
* Note: Purging only for platforms that support
* ISM hat_pageunload() - mainly SPARC. On x86/x64
* platforms ISM pages SE_SHARED locked until destroyed.
- * There are pages pending retirement, so
- * we reap prior to attempting to capture.
*/
- kmem_reap();
/* disable and purge seg_pcache */
(void) seg_p_disable();
@@ -7317,7 +7306,17 @@ page_capture_handle_outstanding(void)
}
/* reenable seg_pcache */
seg_p_enable();
+
+ /* completed what can be done. break out */
+ return;
}
+
+ /*
+ * For kernel pages and/or unsupported HAT_DYNAMIC_ISM_UNMAP, reap
+ * and then attempt to capture.
+ */
+ seg_preap();
+ page_capture_async();
}
/*