summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorkchow <none@none>2005-08-24 15:17:32 -0700
committerkchow <none@none>2005-08-24 15:17:32 -0700
commitaffbd3ccca8e26191a210ec9f9ffae170f919afd (patch)
tree7c0b6f58653b11f5821f876998bd00b9627f1907
parentb5cda42e4451eb63c27c9fb170e50e628aa89257 (diff)
downloadillumos-joyent-affbd3ccca8e26191a210ec9f9ffae170f919afd.tar.gz
6286816 page_numtopp_nolock is inefficent
6288107 Missing comment terminator in i86pc/ml/locore.s 6290613 high page_get_cachelist failure rate on jurassic 6297005 performance issues with page_nextn()
-rw-r--r--usr/src/uts/common/os/mem_cage.c4
-rw-r--r--usr/src/uts/common/sys/cpuvar.h3
-rw-r--r--usr/src/uts/common/vm/page.h3
-rw-r--r--usr/src/uts/common/vm/seg_vn.c6
-rw-r--r--usr/src/uts/common/vm/vm_anon.c4
-rw-r--r--usr/src/uts/common/vm/vm_page.c53
-rw-r--r--usr/src/uts/common/vm/vm_pagelist.c187
-rw-r--r--usr/src/uts/i86pc/ml/locore.s4
-rw-r--r--usr/src/uts/i86pc/os/lgrpplat.c3
-rw-r--r--usr/src/uts/i86pc/os/memnode.c5
-rw-r--r--usr/src/uts/i86pc/os/mlsetup.c2
-rw-r--r--usr/src/uts/i86pc/os/mp_startup.c3
-rw-r--r--usr/src/uts/i86pc/sys/memnode.h1
-rw-r--r--usr/src/uts/i86pc/vm/vm_dep.h114
-rw-r--r--usr/src/uts/i86pc/vm/vm_machdep.c33
-rw-r--r--usr/src/uts/sun4/os/lgrpplat.c3
-rw-r--r--usr/src/uts/sun4/os/memnode.c7
-rw-r--r--usr/src/uts/sun4/os/mlsetup.c2
-rw-r--r--usr/src/uts/sun4/os/mp_startup.c4
-rw-r--r--usr/src/uts/sun4/sys/memnode.h1
-rw-r--r--usr/src/uts/sun4/vm/vm_dep.c2
-rw-r--r--usr/src/uts/sun4/vm/vm_dep.h205
22 files changed, 423 insertions, 226 deletions
diff --git a/usr/src/uts/common/os/mem_cage.c b/usr/src/uts/common/os/mem_cage.c
index a74d3c115b..5a445739b8 100644
--- a/usr/src/uts/common/os/mem_cage.c
+++ b/usr/src/uts/common/os/mem_cage.c
@@ -45,6 +45,7 @@
#include <vm/seg_kmem.h>
#include <vm/page.h>
#include <vm/hat.h>
+#include <vm/vm_dep.h>
#include <sys/mem_config.h>
#include <sys/lgrp.h>
@@ -1154,7 +1155,7 @@ kcage_setnoreloc_pages(page_t *rootpp, se_t se)
npgs = page_get_pagecnt(szc);
ASSERT(IS_P2ALIGNED(rootpfn, npgs));
pp = rootpp;
- for (i = 0; i < npgs; i++, pp = page_next(pp)) {
+ for (i = 0; i < npgs; i++, pp++) {
ASSERT(PAGE_LOCKED_SE(pp, se));
ASSERT(!PP_ISFREE(pp));
ASSERT(pp->p_szc == szc);
@@ -1249,6 +1250,7 @@ check_free_and_return:
} else {
PP_SETNORELOC(pp);
}
+ page_list_xfer(pp, MTYPE_NORELOC, MTYPE_RELOC);
return (kcage_invalidate_page(pp, nfreedp));
}
/*NOTREACHED*/
diff --git a/usr/src/uts/common/sys/cpuvar.h b/usr/src/uts/common/sys/cpuvar.h
index a72fa96ca3..9d83937f6a 100644
--- a/usr/src/uts/common/sys/cpuvar.h
+++ b/usr/src/uts/common/sys/cpuvar.h
@@ -200,6 +200,7 @@ typedef struct cpu {
* this cpu. Cleared at start of interrupt redistribution.
*/
int32_t cpu_intr_weight;
+ void *cpu_vm_data;
#if (defined(_KERNEL) || defined(_KMEMUSER)) && defined(_MACHDEP)
/*
@@ -658,6 +659,8 @@ extern void cpu_state_change_notify(int, cpu_setup_t);
#define CPU_IDSTRLEN 100
extern void init_cpu_info(struct cpu *);
+extern void cpu_vm_data_init(struct cpu *);
+extern void cpu_vm_data_destroy(struct cpu *);
#endif /* _KERNEL */
diff --git a/usr/src/uts/common/vm/page.h b/usr/src/uts/common/vm/page.h
index ecba3b9775..0a8a6a690f 100644
--- a/usr/src/uts/common/vm/page.h
+++ b/usr/src/uts/common/vm/page.h
@@ -707,6 +707,7 @@ void page_boot_demote(page_t *);
void page_promote_size(page_t *, uint_t);
void page_list_add_pages(page_t *, int);
void page_list_sub(page_t *, int);
+void page_list_xfer(page_t *, int, int);
void page_list_break(page_t **, page_t **, size_t);
void page_list_concat(page_t **, page_t **);
void page_vpadd(page_t **, page_t *);
@@ -736,8 +737,6 @@ page_t *page_numtopp_nolock(pfn_t);
page_t *page_numtopp_nowait(pfn_t, se_t);
page_t *page_first();
page_t *page_next(page_t *);
-page_t *page_nextn_raw(page_t *, ulong_t); /* pp += n */
-#define page_next_raw(PP) page_nextn_raw((PP), 1)
page_t *page_list_next(page_t *);
page_t *page_nextn(page_t *, ulong_t);
page_t *page_next_scan_init(void **);
diff --git a/usr/src/uts/common/vm/seg_vn.c b/usr/src/uts/common/vm/seg_vn.c
index d830cf6154..5830c81737 100644
--- a/usr/src/uts/common/vm/seg_vn.c
+++ b/usr/src/uts/common/vm/seg_vn.c
@@ -2489,7 +2489,7 @@ segvn_relocate_pages(page_t **targ, page_t *replacement)
ASSERT(!PP_ISFREE(repl));
targ[i] = repl;
page_downgrade(targ[i]);
- repl = page_next(repl);
+ repl++;
}
}
@@ -2910,7 +2910,7 @@ segvn_fill_vp_pages(struct segvn_data *svd, vnode_t *vp, u_offset_t off,
ASSERT(pgidx ==
((pp->p_offset - start_off) >> PAGESHIFT));
ppa[pgidx++] = pp;
- pp = page_next(pp);
+ pp++;
}
}
@@ -2992,7 +2992,7 @@ out:
page_list_concat(&tmp_pplist, &pp);
while (--ppages != 0) {
VM_STAT_ADD(segvnvmstats.fill_vp_pages[22]);
- pp = page_next(pp);
+ pp++;
ASSERT(PAGE_EXCL(pp));
ASSERT(pp->p_szc == szc);
page_list_concat(&tmp_pplist, &pp);
diff --git a/usr/src/uts/common/vm/vm_anon.c b/usr/src/uts/common/vm/vm_anon.c
index b8da5c97c2..1e99b746e8 100644
--- a/usr/src/uts/common/vm/vm_anon.c
+++ b/usr/src/uts/common/vm/vm_anon.c
@@ -1631,7 +1631,7 @@ anon_disclaim(struct anon_map *amp, ulong_t index, size_t size, int flags)
* try to lock remaining pages
*/
for (idx = 1; idx < pgcnt; idx++) {
- pp = page_next(pp);
+ pp++;
if (!page_trylock(pp, SE_EXCL))
break;
if (pp->p_lckcnt != 0 || pp->p_cowcnt != 0) {
@@ -1673,7 +1673,7 @@ anon_disclaim(struct anon_map *amp, ulong_t index, size_t size, int flags)
}
skiplp:
segadvstat.MADV_FREE_miss.value.ul += pgcnt;
- for (i = 0, pp = root_pp; i < idx; pp = page_next(pp), i++)
+ for (i = 0, pp = root_pp; i < idx; pp++, i++)
page_unlock(pp);
anon_array_exit(&cookie);
}
diff --git a/usr/src/uts/common/vm/vm_page.c b/usr/src/uts/common/vm/vm_page.c
index 1865d78f81..caf105ccc1 100644
--- a/usr/src/uts/common/vm/vm_page.c
+++ b/usr/src/uts/common/vm/vm_page.c
@@ -641,7 +641,7 @@ add_physmem(
* in the page structure, and put each on
* the free list
*/
- for (; num; pp = page_next_raw(pp), pnum++, num--) {
+ for (; num; pp++, pnum++, num--) {
/*
* this needs to fill in the page number
@@ -3101,7 +3101,7 @@ page_free_pages(page_t *pp)
/*NOTREACHED*/
}
- for (i = 0, tpp = pp; i < pgcnt; i++, tpp = page_next(tpp)) {
+ for (i = 0, tpp = pp; i < pgcnt; i++, tpp++) {
ASSERT((PAGE_EXCL(tpp) &&
!page_iolock_assert(tpp)) || panicstr);
if (PP_ISFREE(tpp)) {
@@ -3454,7 +3454,7 @@ page_destroy_pages(page_t *pp)
/*NOTREACHED*/
}
- for (i = 0, tpp = pp; i < pgcnt; i++, tpp = page_next(tpp)) {
+ for (i = 0, tpp = pp; i < pgcnt; i++, tpp++) {
ASSERT((PAGE_EXCL(tpp) &&
!page_iolock_assert(tpp)) || panicstr);
(void) hat_pageunload(tpp, HAT_FORCE_PGUNLOAD);
@@ -5445,7 +5445,7 @@ page_try_demote_pages(page_t *pp)
* Attempt to lock all constituent pages except the page passed
* in since it's already locked.
*/
- for (tpp = rootpp, i = 0; i < npgs; i++, tpp = page_next(tpp)) {
+ for (tpp = rootpp, i = 0; i < npgs; i++, tpp++) {
ASSERT(!PP_ISFREE(tpp));
ASSERT(tpp->p_vnode != NULL);
@@ -5465,7 +5465,7 @@ page_try_demote_pages(page_t *pp)
while (i-- > 0) {
if (tpp != pp)
page_unlock(tpp);
- tpp = page_next(tpp);
+ tpp++;
}
VM_STAT_ADD(pagecnt.pc_try_demote_pages[4]);
return (0);
@@ -5475,7 +5475,7 @@ page_try_demote_pages(page_t *pp)
* XXX probably p_szc clearing and page unlocking can be done within
* one loop but since this is rare code we can play very safe.
*/
- for (tpp = rootpp, i = 0; i < npgs; i++, tpp = page_next(tpp)) {
+ for (tpp = rootpp, i = 0; i < npgs; i++, tpp++) {
ASSERT(PAGE_EXCL(tpp));
tpp->p_szc = 0;
}
@@ -5483,7 +5483,7 @@ page_try_demote_pages(page_t *pp)
/*
* Unlock all pages except the page passed in.
*/
- for (tpp = rootpp, i = 0; i < npgs; i++, tpp = page_next(tpp)) {
+ for (tpp = rootpp, i = 0; i < npgs; i++, tpp++) {
ASSERT(!hat_page_is_mapped(tpp));
if (tpp != pp)
page_unlock(tpp);
@@ -5930,7 +5930,7 @@ page_mark_migrate(struct seg *seg, caddr_t addr, size_t len,
* should be locked already.
*/
for (i = 1; i < pages; i++) {
- pp = page_next(pp);
+ pp++;
if (!page_trylock(pp, SE_EXCL)) {
break;
}
@@ -6349,24 +6349,16 @@ struct memseg *memseg_hash[N_MEM_SLOTS];
page_t *
page_numtopp_nolock(pfn_t pfnum)
{
- static struct memseg *last_memseg_by_pfnum = NULL;
struct memseg *seg;
page_t *pp;
+ vm_cpu_data_t *vc = CPU->cpu_vm_data;
- /*
- * XXX - Since page_numtopp_nolock is called in many places where
- * the search fails more than it succeeds. It maybe worthwhile
- * to put a check for pf_is_memory or a pfnum <= max_pfn (set at
- * boot time).
- *
- * if (!pf_is_memory(pfnum) || (pfnum > max_pfn))
- * return (NULL);
- */
+ ASSERT(vc != NULL);
MEMSEG_STAT_INCR(nsearch);
/* Try last winner first */
- if (((seg = last_memseg_by_pfnum) != NULL) &&
+ if (((seg = vc->vc_pnum_memseg) != NULL) &&
(pfnum >= seg->pages_base) && (pfnum < seg->pages_end)) {
MEMSEG_STAT_INCR(nlastwon);
pp = seg->pages + (pfnum - seg->pages_base);
@@ -6378,7 +6370,7 @@ page_numtopp_nolock(pfn_t pfnum)
if (((seg = memseg_hash[MEMSEG_PFN_HASH(pfnum)]) != NULL) &&
(pfnum >= seg->pages_base) && (pfnum < seg->pages_end)) {
MEMSEG_STAT_INCR(nhashwon);
- last_memseg_by_pfnum = seg;
+ vc->vc_pnum_memseg = seg;
pp = seg->pages + (pfnum - seg->pages_base);
if (pp->p_pagenum == pfnum)
return ((page_t *)pp);
@@ -6387,12 +6379,12 @@ page_numtopp_nolock(pfn_t pfnum)
/* Else Brute force */
for (seg = memsegs; seg != NULL; seg = seg->next) {
if (pfnum >= seg->pages_base && pfnum < seg->pages_end) {
- last_memseg_by_pfnum = seg;
+ vc->vc_pnum_memseg = seg;
pp = seg->pages + (pfnum - seg->pages_base);
return ((page_t *)pp);
}
}
- last_memseg_by_pfnum = NULL;
+ vc->vc_pnum_memseg = NULL;
MEMSEG_STAT_INCR(nnotfound);
return ((page_t *)NULL);
@@ -6432,11 +6424,13 @@ page_numtomemseg_nolock(pfn_t pfnum)
page_t *
page_nextn(page_t *pp, ulong_t n)
{
- static struct memseg *last_page_next_memseg = NULL;
struct memseg *seg;
page_t *ppn;
+ vm_cpu_data_t *vc = (vm_cpu_data_t *)CPU->cpu_vm_data;
- if (((seg = last_page_next_memseg) == NULL) ||
+ ASSERT(vc != NULL);
+
+ if (((seg = vc->vc_pnext_memseg) == NULL) ||
(seg->pages_base == seg->pages_end) ||
!(pp >= seg->pages && pp < seg->epages)) {
@@ -6461,7 +6455,7 @@ page_nextn(page_t *pp, ulong_t n)
seg = memsegs;
pp = seg->pages;
}
- last_page_next_memseg = seg;
+ vc->vc_pnext_memseg = seg;
return (ppn);
}
@@ -6540,15 +6534,6 @@ page_next(page_t *pp)
return (page_nextn(pp, 1));
}
-/*
- * Special for routines processing an array of page_t.
- */
-page_t *
-page_nextn_raw(page_t *pp, ulong_t n)
-{
- return (pp+n);
-}
-
page_t *
page_first()
{
diff --git a/usr/src/uts/common/vm/vm_pagelist.c b/usr/src/uts/common/vm/vm_pagelist.c
index fda2acfce9..775437dd97 100644
--- a/usr/src/uts/common/vm/vm_pagelist.c
+++ b/usr/src/uts/common/vm/vm_pagelist.c
@@ -63,6 +63,10 @@
extern uint_t vac_colors;
+/* vm_cpu_data for the boot cpu before kmem is initialized */
+#pragma align L2CACHE_ALIGN_MAX(vm_cpu_data0)
+char vm_cpu_data0[VM_CPU_DATA_PADSIZE];
+
/*
* number of page colors equivalent to reqested color in page_get routines.
* If set, keeps large pages intact longer and keeps MPO allocation
@@ -191,9 +195,9 @@ static kmutex_t *ctr_mutex[NPC_MUTEX];
* Local functions prototypes.
*/
-void page_ctr_add(page_t *, int);
-void page_ctr_add_internal(int, page_t *, int);
-void page_ctr_sub(page_t *, int);
+void page_ctr_add(int, int, page_t *, int);
+void page_ctr_add_internal(int, int, page_t *, int);
+void page_ctr_sub(int, int, page_t *, int);
uint_t page_convert_color(uchar_t, uchar_t, uint_t);
void page_freelist_lock(int);
void page_freelist_unlock(int);
@@ -317,6 +321,43 @@ static hw_page_map_t *page_counters[MMU_PAGE_SIZES];
*/
krwlock_t page_ctrs_rwlock[MAX_MEM_NODES];
+
+/*
+ * initialize cpu_vm_data to point at cache aligned vm_cpu_data_t.
+ */
+void
+cpu_vm_data_init(struct cpu *cp)
+{
+ int align = (L2CACHE_ALIGN) ? L2CACHE_ALIGN : L2CACHE_ALIGN_MAX;
+
+ ASSERT(L2CACHE_ALIGN <= L2CACHE_ALIGN_MAX);
+
+ if (cp == CPU0) {
+ cp->cpu_vm_data = (void *)&vm_cpu_data0;
+ } else {
+ void *kmptr;
+
+ kmptr = kmem_zalloc(VM_CPU_DATA_PADSIZE + align, KM_SLEEP);
+ cp->cpu_vm_data = (void *) P2ROUNDUP((uintptr_t)kmptr, align);
+ ((vm_cpu_data_t *)cp->cpu_vm_data)->vc_kmptr = kmptr;
+ }
+}
+
+/*
+ * free cpu_vm_data
+ */
+void
+cpu_vm_data_destroy(struct cpu *cp)
+{
+ if (cp->cpu_seqid && cp->cpu_vm_data) {
+ ASSERT(cp != CPU0);
+ kmem_free(((vm_cpu_data_t *)cp->cpu_vm_data)->vc_kmptr,
+ VM_CPU_DATA_PADSIZE);
+ }
+ cp->cpu_vm_data = NULL;
+}
+
+
/*
* page size to page size code
*/
@@ -631,16 +672,19 @@ page_ctrs_alloc(caddr_t alloc_base)
*/
/* ARGSUSED */
void
-page_ctr_add_internal(int mnode, page_t *pp, int flags)
+page_ctr_add_internal(int mnode, int mtype, page_t *pp, int flags)
{
ssize_t r; /* region size */
ssize_t idx;
pfn_t pfnum;
int lckidx;
+ ASSERT(mnode == PP_2_MEM_NODE(pp));
+ ASSERT(mtype == PP_2_MTYPE(pp));
+
ASSERT(pp->p_szc < mmu_page_sizes);
- PLCNT_INCR(pp, mnode, pp->p_szc, flags);
+ PLCNT_INCR(pp, mnode, mtype, pp->p_szc, flags);
/* no counter update needed for largest page size */
if (pp->p_szc >= mmu_page_sizes - 1) {
@@ -673,30 +717,31 @@ page_ctr_add_internal(int mnode, page_t *pp, int flags)
}
void
-page_ctr_add(page_t *pp, int flags)
+page_ctr_add(int mnode, int mtype, page_t *pp, int flags)
{
int lckidx = PP_CTR_LOCK_INDX(pp);
- int mnode = PP_2_MEM_NODE(pp);
kmutex_t *lock = &ctr_mutex[lckidx][mnode];
mutex_enter(lock);
- page_ctr_add_internal(mnode, pp, flags);
+ page_ctr_add_internal(mnode, mtype, pp, flags);
mutex_exit(lock);
}
void
-page_ctr_sub(page_t *pp, int flags)
+page_ctr_sub(int mnode, int mtype, page_t *pp, int flags)
{
int lckidx;
- int mnode = PP_2_MEM_NODE(pp);
kmutex_t *lock;
ssize_t r; /* region size */
ssize_t idx;
pfn_t pfnum;
+ ASSERT(mnode == PP_2_MEM_NODE(pp));
+ ASSERT(mtype == PP_2_MTYPE(pp));
+
ASSERT(pp->p_szc < mmu_page_sizes);
- PLCNT_DECR(pp, mnode, pp->p_szc, flags);
+ PLCNT_DECR(pp, mnode, mtype, pp->p_szc, flags);
/* no counter update needed for largest page size */
if (pp->p_szc >= mmu_page_sizes - 1) {
@@ -995,6 +1040,18 @@ page_freelist_unlock(int mnode)
}
/*
+ * update the page list max counts for already allocated pages that has xfer'ed
+ * (kcage_assimilate_page) between different mtypes.
+ */
+/* ARGSUSED */
+void
+page_list_xfer(page_t *pp, int to_mtype, int from_mtype)
+{
+ PLCNT_MAX_INCR(pp, PP_2_MEM_NODE(pp), to_mtype, pp->p_szc);
+ PLCNT_MAX_DECR(pp, PP_2_MEM_NODE(pp), from_mtype, pp->p_szc);
+}
+
+/*
* add pp to the specified page list. Defaults to head of the page list
* unless PG_LIST_TAIL is specified.
*/
@@ -1043,15 +1100,18 @@ page_list_add(page_t *pp, int flags)
} else
*ppp = pp;
- page_ctr_add_internal(mnode, pp, flags);
+ page_ctr_add_internal(mnode, mtype, pp, flags);
+ VM_STAT_ADD(vmm_vmstats.pladd_free[0]);
} else {
pcm = PC_BIN_MUTEX(mnode, bin, flags);
if (flags & PG_FREE_LIST) {
+ VM_STAT_ADD(vmm_vmstats.pladd_free[0]);
ASSERT(PP_ISAGED(pp));
ppp = &PAGE_FREELISTS(mnode, 0, bin, mtype);
} else {
+ VM_STAT_ADD(vmm_vmstats.pladd_cache);
ASSERT(pp->p_vnode);
ASSERT((pp->p_offset & PAGEOFFSET) == 0);
ppp = &PAGE_CACHELISTS(mnode, bin, mtype);
@@ -1065,7 +1125,7 @@ page_list_add(page_t *pp, int flags)
* Add counters before releasing pcm mutex to avoid a race with
* page_freelist_coalesce and page_freelist_fill.
*/
- page_ctr_add(pp, flags);
+ page_ctr_add(mnode, mtype, pp, flags);
mutex_exit(pcm);
}
@@ -1136,7 +1196,7 @@ page_list_noreloc_startup(page_t *pp)
}
/* LINTED */
- PLCNT_DECR(pp, mnode, 0, flags);
+ PLCNT_DECR(pp, mnode, mtype, 0, flags);
/*
* Set no reloc for cage initted pages.
@@ -1169,7 +1229,7 @@ page_list_noreloc_startup(page_t *pp)
}
/* LINTED */
- PLCNT_INCR(pp, mnode, 0, flags);
+ PLCNT_INCR(pp, mnode, mtype, 0, flags);
/*
* Update cage freemem counter
@@ -1198,7 +1258,7 @@ page_list_add_pages(page_t *pp, int flags)
ASSERT((flags & (PG_CACHE_LIST | PG_LIST_TAIL)) == 0);
CHK_LPG(pp, pp->p_szc);
- VM_STAT_ADD(vmm_vmstats.pc_list_add_pages[pp->p_szc]);
+ VM_STAT_ADD(vmm_vmstats.pladd_free[pp->p_szc]);
bin = PP_2_BIN(pp);
mnode = PP_2_MEM_NODE(pp);
@@ -1208,7 +1268,7 @@ page_list_add_pages(page_t *pp, int flags)
ASSERT(pp->p_szc == mmu_page_sizes - 1);
page_vpadd(&PAGE_FREELISTS(mnode, pp->p_szc, bin, mtype), pp);
ASSERT(!PP_ISNORELOC(pp));
- PLCNT_INCR(pp, mnode, pp->p_szc, flags);
+ PLCNT_INCR(pp, mnode, mtype, pp->p_szc, flags);
} else {
ASSERT(pp->p_szc != 0 && pp->p_szc < mmu_page_sizes);
@@ -1217,7 +1277,7 @@ page_list_add_pages(page_t *pp, int flags)
mutex_enter(pcm);
page_vpadd(&PAGE_FREELISTS(mnode, pp->p_szc, bin, mtype), pp);
- page_ctr_add(pp, PG_FREE_LIST);
+ page_ctr_add(mnode, mtype, pp, PG_FREE_LIST);
mutex_exit(pcm);
pgcnt = page_get_pagecnt(pp->p_szc);
@@ -1291,9 +1351,11 @@ try_again:
mtype = PP_2_MTYPE(pp);
if (flags & PG_FREE_LIST) {
+ VM_STAT_ADD(vmm_vmstats.plsub_free[0]);
ASSERT(PP_ISAGED(pp));
ppp = &PAGE_FREELISTS(mnode, pp->p_szc, bin, mtype);
} else {
+ VM_STAT_ADD(vmm_vmstats.plsub_cache);
ASSERT(!PP_ISAGED(pp));
ppp = &PAGE_CACHELISTS(mnode, bin, mtype);
}
@@ -1311,7 +1373,7 @@ try_again:
* Subtract counters before releasing pcm mutex
* to avoid race with page_freelist_coalesce.
*/
- page_ctr_sub(pp, flags);
+ page_ctr_sub(mnode, mtype, pp, flags);
mutex_exit(pcm);
#if defined(__sparc)
@@ -1362,7 +1424,7 @@ try_again:
ppp = &PAGE_FREELISTS(mnode, pp->p_szc, bin, mtype);
page_sub(ppp, pp);
- page_ctr_sub(pp, flags);
+ page_ctr_sub(mnode, mtype, pp, flags);
page_freelist_unlock(mnode);
#if defined(__sparc)
@@ -1396,8 +1458,6 @@ try_again:
goto try_again;
}
- VM_STAT_ADD(vmm_vmstats.pc_list_sub_pages1[pp->p_szc]);
-
/*
* If we're called with a page larger than szc or it got
* promoted above szc before we locked the freelist then
@@ -1405,12 +1465,11 @@ try_again:
* than szc then demote it.
*/
if (pp->p_szc > szc) {
- VM_STAT_ADD(vmm_vmstats.pc_list_sub_pages2[pp->p_szc]);
mutex_exit(pcm);
pcm = NULL;
page_freelist_lock(mnode);
if (pp->p_szc > szc) {
- VM_STAT_ADD(vmm_vmstats.pc_list_sub_pages3[pp->p_szc]);
+ VM_STAT_ADD(vmm_vmstats.plsubpages_szcbig);
(void) page_demote(mnode,
PFN_BASE(pp->p_pagenum, pp->p_szc),
pp->p_szc, szc, PC_NO_COLOR, PC_FREE);
@@ -1422,14 +1481,17 @@ try_again:
ASSERT(pp->p_szc <= szc);
ASSERT(pp == PP_PAGEROOT(pp));
+ VM_STAT_ADD(vmm_vmstats.plsub_free[pp->p_szc]);
+
mtype = PP_2_MTYPE(pp);
if (pp->p_szc != 0) {
page_vpsub(&PAGE_FREELISTS(mnode, pp->p_szc, bin, mtype), pp);
CHK_LPG(pp, pp->p_szc);
} else {
+ VM_STAT_ADD(vmm_vmstats.plsubpages_szc0);
page_sub(&PAGE_FREELISTS(mnode, pp->p_szc, bin, mtype), pp);
}
- page_ctr_sub(pp, PG_FREE_LIST);
+ page_ctr_sub(mnode, mtype, pp, PG_FREE_LIST);
if (pcm != NULL) {
mutex_exit(pcm);
@@ -1683,7 +1745,7 @@ page_promote(int mnode, pfn_t pfnum, uchar_t new_szc, int flags)
page_unlock(pp);
which_list = PG_CACHE_LIST;
}
- page_ctr_sub(pp, which_list);
+ page_ctr_sub(mnode, mtype, pp, which_list);
/*
* Concatenate the smaller page(s) onto
@@ -1717,7 +1779,7 @@ page_promote(int mnode, pfn_t pfnum, uchar_t new_szc, int flags)
mtype = PP_2_MTYPE(pplist);
page_vpadd(&PAGE_FREELISTS(mnode, new_szc, bin, mtype), pplist);
- page_ctr_add(pplist, PG_FREE_LIST);
+ page_ctr_add(mnode, mtype, pplist, PG_FREE_LIST);
return (NULL);
fail_promote:
@@ -1737,7 +1799,7 @@ fail_promote:
bin = PP_2_BIN(pp);
mtype = PP_2_MTYPE(pp);
mach_page_add(&PAGE_FREELISTS(mnode, 0, bin, mtype), pp);
- page_ctr_add(pp, PG_FREE_LIST);
+ page_ctr_add(mnode, mtype, pp, PG_FREE_LIST);
}
return (NULL);
@@ -1778,7 +1840,7 @@ page_demote(int mnode, pfn_t pfnum, uchar_t cur_szc, uchar_t new_szc,
page_vpsub(&PAGE_FREELISTS(mnode, cur_szc, bin, mtype), pplist);
CHK_LPG(pplist, cur_szc);
- page_ctr_sub(pplist, PG_FREE_LIST);
+ page_ctr_sub(mnode, mtype, pplist, PG_FREE_LIST);
/*
* Number of PAGESIZE pages for smaller new_szc
@@ -1809,7 +1871,7 @@ page_demote(int mnode, pfn_t pfnum, uchar_t cur_szc, uchar_t new_szc,
mtype = PP_2_MTYPE(pp);
mach_page_add(&PAGE_FREELISTS(mnode, 0, bin,
mtype), pp);
- page_ctr_add(pp, PG_FREE_LIST);
+ page_ctr_add(mnode, mtype, pp, PG_FREE_LIST);
}
} else {
@@ -1839,7 +1901,8 @@ page_demote(int mnode, pfn_t pfnum, uchar_t cur_szc, uchar_t new_szc,
page_vpadd(&PAGE_FREELISTS(mnode, new_szc,
bin, mtype), pplist);
- page_ctr_add(pplist, PG_FREE_LIST);
+ page_ctr_add(mnode, mtype, pplist,
+ PG_FREE_LIST);
}
pplist = npplist;
}
@@ -2029,6 +2092,7 @@ page_freelist_fill(uchar_t szc, int color, int mnode, int mtype, pfn_t pfnhi)
ASSERT(szc < mmu_page_sizes);
+ VM_STAT_ADD(vmm_vmstats.pff_req[szc]);
/*
* First try to break up a larger page to fill
* current size freelist.
@@ -2057,6 +2121,7 @@ page_freelist_fill(uchar_t szc, int color, int mnode, int mtype, pfn_t pfnhi)
}
if (pp) {
ASSERT(pp->p_szc == nszc);
+ VM_STAT_ADD(vmm_vmstats.pff_demote[nszc]);
ret_pp = page_demote(mnode, pp->p_pagenum,
pp->p_szc, szc, color, PC_ALLOC);
if (ret_pp) {
@@ -2083,6 +2148,7 @@ page_freelist_fill(uchar_t szc, int color, int mnode, int mtype, pfn_t pfnhi)
*/
if (szc != 0) {
ret_pp = page_freelist_coalesce(mnode, szc, color);
+ VM_STAT_COND_ADD(ret_pp, vmm_vmstats.pff_coalok[szc]);
}
return (ret_pp);
@@ -2151,7 +2217,6 @@ page_get_mnode_freelist(int mnode, uint_t bin, int mtype, uchar_t szc,
VM_STAT_ADD(vmm_vmstats.pgmf_alloc[szc]);
- /* LINTED */
MTYPE_START(mnode, mtype, flags);
if (mtype < 0) { /* mnode foes not have memory in mtype range */
VM_STAT_ADD(vmm_vmstats.pgmf_allocempty[szc]);
@@ -2264,7 +2329,8 @@ try_again:
pp);
CHK_LPG(pp, szc);
}
- page_ctr_sub(pp, PG_FREE_LIST);
+ page_ctr_sub(mnode, mtype, pp,
+ PG_FREE_LIST);
if ((PP_ISFREE(pp) == 0) ||
(PP_ISAGED(pp) == 0))
@@ -2387,26 +2453,11 @@ try_again:
fill_tried = 0;
}
-#if defined(__sparc)
- if (!(flags & (PG_NORELOC | PGI_NOCAGE | PGI_RELOCONLY)) &&
- (kcage_freemem >= kcage_lotsfree)) {
- /*
- * The Cage is ON and with plenty of free mem, and
- * we're willing to check for a NORELOC page if we
- * couldn't find a RELOC page, so spin again.
- */
- flags |= PG_NORELOC;
- mtype = MTYPE_NORELOC;
+ /* if allowed, cycle through additional mtypes */
+ MTYPE_NEXT(mnode, mtype, flags);
+ if (mtype >= 0)
goto big_try_again;
- }
-#else
- if (flags & PGI_MT_RANGE) {
- /* cycle through range of mtypes */
- MTYPE_NEXT(mnode, mtype, flags);
- if (mtype >= 0)
- goto big_try_again;
- }
-#endif
+
VM_STAT_ADD(vmm_vmstats.pgmf_allocfailed[szc]);
return (NULL);
@@ -2934,6 +2985,9 @@ page_get_contig_pages(int mnode, uint_t bin, int mtype, uchar_t szc,
ASSERT(szc > 0 || (flags & PGI_PGCPSZC0));
+ /* no allocations from cage */
+ flags |= PGI_NOCAGE;
+
/* do not limit search and ignore color if hi pri */
if (pgcplimitsearch && ((flags & PGI_PGCPHIPRI) == 0))
@@ -2962,9 +3016,8 @@ page_get_contig_pages(int mnode, uint_t bin, int mtype, uchar_t szc,
VM_STAT_ADD(vmm_vmstats.pgcp_allocok[szc]);
return (pp);
}
- /* LINTED */
- } while ((flags & PGI_MT_RANGE) &&
- (MTYPE_NEXT(mnode, mtype, flags) >= 0));
+ MTYPE_NEXT(mnode, mtype, flags);
+ } while (mtype >= 0);
VM_STAT_ADD(vmm_vmstats.pgcp_allocfailed[szc]);
return (NULL);
@@ -3329,7 +3382,8 @@ big_try_again:
* page_freelist_coalesce and
* page_freelist_fill.
*/
- page_ctr_sub(pp, PG_CACHE_LIST);
+ page_ctr_sub(mnode, mtype, pp,
+ PG_CACHE_LIST);
mutex_exit(pcm);
ASSERT(pp->p_vnode);
ASSERT(PP_ISAGED(pp) == 0);
@@ -3390,25 +3444,10 @@ big_try_again:
}
}
-#if defined(__sparc)
- if (!(flags & (PG_NORELOC | PGI_NOCAGE | PGI_RELOCONLY)) &&
- (kcage_freemem >= kcage_lotsfree)) {
- /*
- * The Cage is ON and with plenty of free mem, and
- * we're willing to check for a NORELOC page if we
- * couldn't find a RELOC page, so spin again.
- */
- flags |= PG_NORELOC;
- mtype = MTYPE_NORELOC;
+ MTYPE_NEXT(mnode, mtype, flags);
+ if (mtype >= 0)
goto big_try_again;
- }
-#else
- if (flags & PGI_MT_RANGE) {
- MTYPE_NEXT(mnode, mtype, flags);
- if (mtype >= 0)
- goto big_try_again;
- }
-#endif
+
VM_STAT_ADD(vmm_vmstats.pgmc_allocfailed);
return (NULL);
}
diff --git a/usr/src/uts/i86pc/ml/locore.s b/usr/src/uts/i86pc/ml/locore.s
index 2649a6df7e..725ab88625 100644
--- a/usr/src/uts/i86pc/ml/locore.s
+++ b/usr/src/uts/i86pc/ml/locore.s
@@ -375,7 +375,7 @@ enable_big_page_support_done:
movl %cr0, %eax
orl $CR0_PG, %eax
movl %eax, %cr0
- jmp enable_pae_done /* jmp is required after enabling paging */
+ jmp enable_pae_done / jmp required after enabling paging
enable_pae_done:
/*
* flush TLBs just in case and return
@@ -390,7 +390,7 @@ __start:
/*
* %ecx = boot services (should die someday)
* %ebx = bootops
- *
+ */
mov $edata, %ebp / edata needs to be defined for ksyms
movl $0, (%ebp) / limit stack back trace
diff --git a/usr/src/uts/i86pc/os/lgrpplat.c b/usr/src/uts/i86pc/os/lgrpplat.c
index ea7072f1da..40ab9fb102 100644
--- a/usr/src/uts/i86pc/os/lgrpplat.c
+++ b/usr/src/uts/i86pc/os/lgrpplat.c
@@ -46,6 +46,7 @@
#include <sys/x86_archext.h> /* for x86_feature and X86_AMD */
#include <vm/hat_i86.h>
#include <vm/seg_kmem.h>
+#include <vm/vm_dep.h>
@@ -1441,7 +1442,7 @@ lgrp_plat_mem_size(lgrp_handle_t plathand, lgrp_mem_query_t query)
if (mnode >= 0 && mem_node_config[mnode].exists) {
switch (query) {
case LGRP_MEM_SIZE_FREE:
- npgs = mem_node_config[mnode].cursize;
+ npgs = MNODE_PGCNT(mnode);
break;
case LGRP_MEM_SIZE_AVAIL:
npgs = mem_node_memlist_pages(mnode,
diff --git a/usr/src/uts/i86pc/os/memnode.c b/usr/src/uts/i86pc/os/memnode.c
index b5e07d18eb..e64fd2b0c6 100644
--- a/usr/src/uts/i86pc/os/memnode.c
+++ b/usr/src/uts/i86pc/os/memnode.c
@@ -34,6 +34,7 @@
#include <sys/memlist.h>
#include <sys/memnode.h>
#include <sys/platform_module.h>
+#include <vm/vm_dep.h>
int max_mem_nodes = 1;
@@ -102,7 +103,6 @@ mem_node_add_slice(pfn_t start, pfn_t end)
} else {
mem_node_config[mnode].physbase = start;
mem_node_config[mnode].physmax = end;
- mem_node_config[mnode].cursize = 0;
atomic_add_16(&num_memnodes, 1);
do {
oldmask = memnodes_mask;
@@ -174,7 +174,7 @@ mem_node_post_del_slice(pfn_t start, pfn_t end, int cancelled)
/*
* Delete the whole node.
*/
- ASSERT(mem_node_config[mnode].cursize == 0);
+ ASSERT(MNODE_PGCNT(mnode) == 0);
do {
omask = memnodes_mask;
nmask = omask & ~(1ull << mnode);
@@ -240,7 +240,6 @@ mem_node_alloc()
mem_node_config[mnode].physbase = (pfn_t)-1l;
mem_node_config[mnode].physmax = 0;
- mem_node_config[mnode].cursize = 0;
atomic_add_16(&num_memnodes, 1);
do {
oldmask = memnodes_mask;
diff --git a/usr/src/uts/i86pc/os/mlsetup.c b/usr/src/uts/i86pc/os/mlsetup.c
index 0cefc333a6..24ed4a4615 100644
--- a/usr/src/uts/i86pc/os/mlsetup.c
+++ b/usr/src/uts/i86pc/os/mlsetup.c
@@ -316,6 +316,8 @@ mlsetup(struct regs *rp)
*/
cpu_list_init(CPU);
+ cpu_vm_data_init(CPU);
+
/*
* Initialize the lgrp framework
*/
diff --git a/usr/src/uts/i86pc/os/mp_startup.c b/usr/src/uts/i86pc/os/mp_startup.c
index 474ca4a9e9..a35202e970 100644
--- a/usr/src/uts/i86pc/os/mp_startup.c
+++ b/usr/src/uts/i86pc/os/mp_startup.c
@@ -263,6 +263,8 @@ extern void *long_mode_64(void);
disp_cpu_init(cp);
mutex_exit(&cpu_lock);
+ cpu_vm_data_init(cp);
+
/*
* Allocate and initialize the startup thread for this CPU.
* Interrupt and process switch stacks get allocated later
@@ -913,6 +915,7 @@ start_other_cpus(int cprboot)
mutex_enter(&cpu_lock);
cpu[who]->cpu_flags = 0;
+ cpu_vm_data_destroy(cpu[who]);
cpu_del_unit(who);
mutex_exit(&cpu_lock);
diff --git a/usr/src/uts/i86pc/sys/memnode.h b/usr/src/uts/i86pc/sys/memnode.h
index 2c1da70e74..c76f90216e 100644
--- a/usr/src/uts/i86pc/sys/memnode.h
+++ b/usr/src/uts/i86pc/sys/memnode.h
@@ -79,7 +79,6 @@ struct mem_node_conf {
int exists; /* only try if set, list may still be empty */
pfn_t physbase; /* lowest PFN in this memnode */
pfn_t physmax; /* highest PFN in this memnode */
- size_t cursize; /* current number of PAGESIZE pages on lists */
};
struct memlist;
diff --git a/usr/src/uts/i86pc/vm/vm_dep.h b/usr/src/uts/i86pc/vm/vm_dep.h
index 252f61480e..cc88f2b806 100644
--- a/usr/src/uts/i86pc/vm/vm_dep.h
+++ b/usr/src/uts/i86pc/vm/vm_dep.h
@@ -63,11 +63,12 @@ typedef struct {
pfn_t mnr_pfnhi;
int mnr_mnode;
int mnr_memrange; /* index into memranges[] */
-#ifdef DEBUG
/* maintain page list stats */
pgcnt_t mnr_mt_pgmax; /* mnode/mtype max page cnt */
- pgcnt_t mnr_mt_pgcnt; /* free cnt */
- pgcnt_t mnr_mt_clpgcnt; /* cache list free cnt */
+ pgcnt_t mnr_mt_clpgcnt; /* cache list cnt */
+ pgcnt_t mnr_mt_flpgcnt; /* free list cnt - small pages */
+ pgcnt_t mnr_mt_lgpgcnt; /* free list cnt - large pages */
+#ifdef DEBUG
struct mnr_mts { /* mnode/mtype szc stats */
pgcnt_t mnr_mts_pgcnt;
int mnr_mts_colors;
@@ -106,10 +107,16 @@ typedef struct {
int bin = PP_2_BIN(pp); \
if (flags & PG_LIST_ISINIT) \
mnoderanges[mtype].mnr_mt_pgmax += cnt; \
- atomic_add_long(&mnoderanges[mtype].mnr_mt_pgcnt, cnt); \
+ ASSERT((flags & PG_LIST_ISCAGE) == 0); \
if (flags & PG_CACHE_LIST) \
- atomic_add_long(&mnoderanges[mtype].mnr_mt_clpgcnt, \
- cnt); \
+ atomic_add_long(&mnoderanges[mtype]. \
+ mnr_mt_clpgcnt, cnt); \
+ else if (szc) \
+ atomic_add_long(&mnoderanges[mtype]. \
+ mnr_mt_lgpgcnt, cnt); \
+ else \
+ atomic_add_long(&mnoderanges[mtype]. \
+ mnr_mt_flpgcnt, cnt); \
atomic_add_long(&mnoderanges[mtype].mnr_mts[szc]. \
mnr_mts_pgcnt, cnt); \
atomic_add_long(&mnoderanges[mtype].mnr_mts[szc]. \
@@ -118,13 +125,24 @@ typedef struct {
#else
#define PLCNT_SZ(ctrs_sz)
#define PLCNT_INIT(base)
-#define PLCNT_DO(pp, mtype, szc, cnt, flags)
+#define PLCNT_DO(pp, mtype, szc, cnt, flags) { \
+ if (flags & PG_LIST_ISINIT) \
+ mnoderanges[mtype].mnr_mt_pgmax += cnt; \
+ if (flags & PG_CACHE_LIST) \
+ atomic_add_long(&mnoderanges[mtype]. \
+ mnr_mt_clpgcnt, cnt); \
+ else if (szc) \
+ atomic_add_long(&mnoderanges[mtype]. \
+ mnr_mt_lgpgcnt, cnt); \
+ else \
+ atomic_add_long(&mnoderanges[mtype]. \
+ mnr_mt_flpgcnt, cnt); \
+}
#endif
-#define PLCNT_INCR(pp, mnode, szc, flags) { \
+#define PLCNT_INCR(pp, mnode, mtype, szc, flags) { \
long cnt = (1 << PAGE_BSZS_SHIFT(szc)); \
- int mtype = PP_2_MTYPE(pp); \
- atomic_add_long(&mem_node_config[mnode].cursize, cnt); \
+ ASSERT(mtype == PP_2_MTYPE(pp)); \
if (physmax4g && mtype <= mtype4g) \
atomic_add_long(&freemem4g, cnt); \
if (flags & PG_LIST_ISINIT) { \
@@ -134,15 +152,20 @@ typedef struct {
PLCNT_DO(pp, mtype, szc, cnt, flags); \
}
-#define PLCNT_DECR(pp, mnode, szc, flags) { \
+#define PLCNT_DECR(pp, mnode, mtype, szc, flags) { \
long cnt = ((-1) << PAGE_BSZS_SHIFT(szc)); \
- int mtype = PP_2_MTYPE(pp); \
- atomic_add_long(&mem_node_config[mnode].cursize, cnt); \
+ ASSERT(mtype == PP_2_MTYPE(pp)); \
if (physmax4g && mtype <= mtype4g) \
atomic_add_long(&freemem4g, cnt); \
PLCNT_DO(pp, mtype, szc, cnt, flags); \
}
+/*
+ * macros to update page list max counts. no-op on x86.
+ */
+#define PLCNT_MAX_INCR(pp, mnode, mtype, szc)
+#define PLCNT_MAX_DECR(pp, mnode, mtype, szc)
+
extern mnoderange_t *mnoderanges;
extern int mnoderangecnt;
extern int mtype4g;
@@ -190,6 +213,7 @@ extern int restricted_kmemalloc;
extern int memrange_num(pfn_t);
extern int pfn_2_mtype(pfn_t);
extern int mtype_func(int, int, uint_t);
+extern int mnode_pgcnt(int);
#define NUM_MEM_RANGES 4 /* memory range types */
@@ -245,6 +269,9 @@ extern page_t *page_get_mnode_cachelist(uint_t, uint_t, int, int);
#define SZCPAGES(szc) (1 << PAGE_BSZS_SHIFT(szc))
#define PFN_BASE(pfnum, szc) (pfnum & ~(SZCPAGES(szc) - 1))
+extern struct cpu cpus[];
+#define CPU0 cpus
+
#if defined(__amd64)
/*
@@ -310,8 +337,13 @@ extern page_t *page_get_mnode_cachelist(uint_t, uint_t, int, int);
#define MTYPE_START(mnode, mtype, flags) \
(mtype = mtype_func(mnode, mtype, flags))
-#define MTYPE_NEXT(mnode, mtype, flags) \
- (mtype = mtype_func(mnode, mtype, flags | PGI_MT_NEXT))
+#define MTYPE_NEXT(mnode, mtype, flags) { \
+ if (flags & PGI_MT_RANGE) { \
+ mtype = mtype_func(mnode, mtype, flags | PGI_MT_NEXT); \
+ } else { \
+ mtype = -1; \
+ } \
+}
/* mtype init for page_get_replacement_page */
@@ -320,6 +352,8 @@ extern page_t *page_get_mnode_cachelist(uint_t, uint_t, int, int);
flags |= PGI_MT_RANGE0; \
}
+#define MNODE_PGCNT(mnode) mnode_pgcnt(mnode)
+
#define MNODETYPE_2_PFN(mnode, mtype, pfnlo, pfnhi) \
ASSERT(mnoderanges[mtype].mnr_mnode == mnode); \
pfnlo = mnoderanges[mtype].mnr_pfnlo; \
@@ -385,6 +419,7 @@ typedef short hpmctr_t;
extern int l2cache_sz, l2cache_linesz, l2cache_assoc;
#define L2CACHE_ALIGN l2cache_linesz
+#define L2CACHE_ALIGN_MAX 64
#define CPUSETSIZE() \
(l2cache_assoc ? (l2cache_sz / l2cache_assoc) : MMU_PAGESIZE)
@@ -419,6 +454,26 @@ extern int l2cache_sz, l2cache_linesz, l2cache_assoc;
& page_colors_mask)
/*
+ * cpu private vm data - accessed thru CPU->cpu_vm_data
+ * vc_pnum_memseg: tracks last memseg visited in page_numtopp_nolock()
+ * vc_pnext_memseg: tracks last memseg visited in page_nextn()
+ * vc_kmptr: orignal unaligned kmem pointer for this vm_cpu_data_t
+ */
+
+typedef struct {
+ struct memseg *vc_pnum_memseg;
+ struct memseg *vc_pnext_memseg;
+ void *vc_kmptr;
+} vm_cpu_data_t;
+
+/* allocation size to ensure vm_cpu_data_t resides in its own cache line */
+#define VM_CPU_DATA_PADSIZE \
+ (P2ROUNDUP(sizeof (vm_cpu_data_t), L2CACHE_ALIGN_MAX))
+
+/* for boot cpu before kmem is initialized */
+extern char vm_cpu_data0[];
+
+/*
* When a bin is empty, and we can't satisfy a color request correctly,
* we scan. If we assume that the programs have reasonable spatial
* behavior, then it will not be a good idea to use the adjacent color.
@@ -438,40 +493,45 @@ extern int l2cache_sz, l2cache_linesz, l2cache_assoc;
#ifdef VM_STATS
struct vmm_vmstats_str {
- ulong_t pc_list_add_pages[MMU_PAGE_SIZES];
- ulong_t pc_list_sub_pages1[MMU_PAGE_SIZES];
- ulong_t pc_list_sub_pages2[MMU_PAGE_SIZES];
- ulong_t pc_list_sub_pages3[MMU_PAGE_SIZES];
- ulong_t pgf_alloc[MMU_PAGE_SIZES];
+ ulong_t pgf_alloc[MMU_PAGE_SIZES]; /* page_get_freelist */
ulong_t pgf_allocok[MMU_PAGE_SIZES];
ulong_t pgf_allocokrem[MMU_PAGE_SIZES];
ulong_t pgf_allocfailed[MMU_PAGE_SIZES];
ulong_t pgf_allocdeferred;
ulong_t pgf_allocretry[MMU_PAGE_SIZES];
- ulong_t pgc_alloc;
+ ulong_t pgc_alloc; /* page_get_cachelist */
ulong_t pgc_allocok;
ulong_t pgc_allocokrem;
ulong_t pgc_allocokdeferred;
ulong_t pgc_allocfailed;
- ulong_t pgcp_alloc[MMU_PAGE_SIZES];
+ ulong_t pgcp_alloc[MMU_PAGE_SIZES]; /* page_get_contig_pages */
ulong_t pgcp_allocfailed[MMU_PAGE_SIZES];
ulong_t pgcp_allocempty[MMU_PAGE_SIZES];
ulong_t pgcp_allocok[MMU_PAGE_SIZES];
- ulong_t ptcp[MMU_PAGE_SIZES];
+ ulong_t ptcp[MMU_PAGE_SIZES]; /* page_trylock_contig_pages */
ulong_t ptcpfreethresh[MMU_PAGE_SIZES];
ulong_t ptcpfailexcl[MMU_PAGE_SIZES];
ulong_t ptcpfailszc[MMU_PAGE_SIZES];
ulong_t ptcpfailcage[MMU_PAGE_SIZES];
ulong_t ptcpok[MMU_PAGE_SIZES];
- ulong_t pgmf_alloc[MMU_PAGE_SIZES];
+ ulong_t pgmf_alloc[MMU_PAGE_SIZES]; /* page_get_mnode_freelist */
ulong_t pgmf_allocfailed[MMU_PAGE_SIZES];
ulong_t pgmf_allocempty[MMU_PAGE_SIZES];
ulong_t pgmf_allocok[MMU_PAGE_SIZES];
- ulong_t pgmc_alloc;
+ ulong_t pgmc_alloc; /* page_get_mnode_cachelist */
ulong_t pgmc_allocfailed;
ulong_t pgmc_allocempty;
ulong_t pgmc_allocok;
- ulong_t ppr_reloc[MMU_PAGE_SIZES];
+ ulong_t pladd_free[MMU_PAGE_SIZES]; /* page_list_add/sub */
+ ulong_t plsub_free[MMU_PAGE_SIZES];
+ ulong_t pladd_cache;
+ ulong_t plsub_cache;
+ ulong_t plsubpages_szcbig;
+ ulong_t plsubpages_szc0;
+ ulong_t pff_req[MMU_PAGE_SIZES]; /* page_freelist_fill */
+ ulong_t pff_demote[MMU_PAGE_SIZES];
+ ulong_t pff_coalok[MMU_PAGE_SIZES];
+ ulong_t ppr_reloc[MMU_PAGE_SIZES]; /* page_relocate */
ulong_t ppr_relocnoroot[MMU_PAGE_SIZES];
ulong_t ppr_reloc_replnoroot[MMU_PAGE_SIZES];
ulong_t ppr_relocnolock[MMU_PAGE_SIZES];
@@ -490,7 +550,7 @@ extern struct vmm_vmstats_str vmm_vmstats;
extern size_t page_ctrs_sz(void);
extern caddr_t page_ctrs_alloc(caddr_t);
-extern void page_ctr_sub(page_t *, int);
+extern void page_ctr_sub(int, int, page_t *, int);
extern page_t *page_freelist_fill(uchar_t, int, int, int, pfn_t);
extern uint_t page_get_pagecolors(uint_t);
diff --git a/usr/src/uts/i86pc/vm/vm_machdep.c b/usr/src/uts/i86pc/vm/vm_machdep.c
index 370f3ca9c2..4426aaf8eb 100644
--- a/usr/src/uts/i86pc/vm/vm_machdep.c
+++ b/usr/src/uts/i86pc/vm/vm_machdep.c
@@ -1018,6 +1018,27 @@ mtype_func(int mnode, int mtype, uint_t flags)
}
/*
+ * Returns the free page count for mnode
+ */
+int
+mnode_pgcnt(int mnode)
+{
+ int mtype = mnoderangecnt - 1;
+ int flags = PGI_MT_RANGE0;
+ pgcnt_t pgcnt = 0;
+
+ mtype = mtype_func(mnode, mtype, flags);
+
+ while (mtype != -1) {
+ pgcnt += (mnoderanges[mtype].mnr_mt_flpgcnt +
+ mnoderanges[mtype].mnr_mt_lgpgcnt +
+ mnoderanges[mtype].mnr_mt_clpgcnt);
+ mtype = mtype_func(mnode, mtype, flags | PGI_MT_NEXT);
+ }
+ return (pgcnt);
+}
+
+/*
* Initialize page coloring variables based on the l2 cache parameters.
* Calculate and return memory needed for page coloring data structures.
*/
@@ -1248,7 +1269,7 @@ page_get_mnode_anylist(ulong_t origbin, uchar_t szc, uint_t flags,
/* found a page with specified DMA attributes */
page_sub(&PAGE_FREELISTS(mnode, szc, bin,
mtype), pp);
- page_ctr_sub(pp, PG_FREE_LIST);
+ page_ctr_sub(mnode, mtype, pp, PG_FREE_LIST);
if ((PP_ISFREE(pp) == 0) ||
(PP_ISAGED(pp) == 0)) {
@@ -1273,8 +1294,8 @@ nextfreebin:
bin &= page_colors_mask;
i++;
}
- } while ((flags & PGI_MT_RANGE) &&
- (MTYPE_NEXT(mnode, mtype, flags) >= 0));
+ MTYPE_NEXT(mnode, mtype, flags);
+ } while (mtype >= 0);
/* failed to find a page in the freelist; try it in the cachelist */
@@ -1329,7 +1350,7 @@ nextfreebin:
/* found a page with specified DMA attributes */
page_sub(&PAGE_CACHELISTS(mnode, bin,
mtype), pp);
- page_ctr_sub(pp, PG_CACHE_LIST);
+ page_ctr_sub(mnode, mtype, pp, PG_CACHE_LIST);
mutex_exit(pcm);
ASSERT(pp->p_vnode);
@@ -1343,8 +1364,8 @@ nextcachebin:
bin += (i == 0) ? BIN_STEP : 1;
bin &= page_colors_mask;
}
- } while ((flags & PGI_MT_RANGE) &&
- (MTYPE_NEXT(mnode, mtype, flags) >= 0));
+ MTYPE_NEXT(mnode, mtype, flags);
+ } while (mtype >= 0);
VM_STAT_ADD(pga_vmstats.pgma_allocfailed);
return (NULL);
diff --git a/usr/src/uts/sun4/os/lgrpplat.c b/usr/src/uts/sun4/os/lgrpplat.c
index b036fa8f1f..ac5bf11e19 100644
--- a/usr/src/uts/sun4/os/lgrpplat.c
+++ b/usr/src/uts/sun4/os/lgrpplat.c
@@ -36,6 +36,7 @@
#include <sys/types.h>
#include <vm/seg_spt.h>
#include <vm/seg_vn.h>
+#include <vm/vm_dep.h>
#include <sys/errno.h>
#include <sys/kstat.h>
@@ -210,7 +211,7 @@ lgrp_plat_mem_size(lgrp_handle_t plathand, lgrp_mem_query_t query)
if (mnode >= 0 && mem_node_config[mnode].exists) {
switch (query) {
case LGRP_MEM_SIZE_FREE:
- npgs = mem_node_config[mnode].cursize;
+ npgs = MNODE_PGCNT(mnode);
break;
case LGRP_MEM_SIZE_AVAIL:
npgs = mem_node_memlist_pages(mnode,
diff --git a/usr/src/uts/sun4/os/memnode.c b/usr/src/uts/sun4/os/memnode.c
index c00966c1c3..849bec22c8 100644
--- a/usr/src/uts/sun4/os/memnode.c
+++ b/usr/src/uts/sun4/os/memnode.c
@@ -20,7 +20,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -32,6 +32,7 @@
#include <sys/atomic.h>
#include <sys/memlist.h>
#include <sys/memnode.h>
+#include <vm/vm_dep.h>
int max_mem_nodes = 1; /* max memory nodes on this system */
@@ -99,7 +100,6 @@ mem_node_add_slice(pfn_t start, pfn_t end)
} else {
mem_node_config[mnode].physbase = start;
mem_node_config[mnode].physmax = end;
- mem_node_config[mnode].cursize = 0;
atomic_add_16(&num_memnodes, 1);
do {
oldmask = memnodes_mask;
@@ -171,7 +171,7 @@ mem_node_post_del_slice(pfn_t start, pfn_t end, int cancelled)
/*
* Delete the whole node.
*/
- ASSERT(mem_node_config[mnode].cursize == 0);
+ ASSERT(MNODE_PGCNT(mnode) == 0);
do {
omask = memnodes_mask;
nmask = omask & ~(1ull << mnode);
@@ -234,7 +234,6 @@ mem_node_alloc()
mem_node_config[mnode].physbase = (uint64_t)-1;
mem_node_config[mnode].physmax = 0;
- mem_node_config[mnode].cursize = 0;
atomic_add_16(&num_memnodes, 1);
do {
oldmask = memnodes_mask;
diff --git a/usr/src/uts/sun4/os/mlsetup.c b/usr/src/uts/sun4/os/mlsetup.c
index 6c4556448f..66ade7e0bb 100644
--- a/usr/src/uts/sun4/os/mlsetup.c
+++ b/usr/src/uts/sun4/os/mlsetup.c
@@ -203,6 +203,8 @@ mlsetup(struct regs *rp, void *cif, kfpu_t *fp)
*/
cpu_list_init(CPU);
+ cpu_vm_data_init(CPU);
+
prom_init("kernel", cif);
(void) prom_set_preprom(kern_splr_preprom);
(void) prom_set_postprom(kern_splx_postprom);
diff --git a/usr/src/uts/sun4/os/mp_startup.c b/usr/src/uts/sun4/os/mp_startup.c
index 2d9662e5f7..f97ac1ecf2 100644
--- a/usr/src/uts/sun4/os/mp_startup.c
+++ b/usr/src/uts/sun4/os/mp_startup.c
@@ -381,6 +381,8 @@ setup_cpu_common(int cpuid)
*/
disp_cpu_init(cp);
+ cpu_vm_data_init(cp);
+
/*
* Now, initialize per-CPU idle thread for this CPU.
*/
@@ -453,6 +455,8 @@ cleanup_cpu_common(int cpuid)
/* Free cpu module private data structures, including scrubber. */
cpu_uninit_private(cp);
+ cpu_vm_data_destroy(cp);
+
/*
* Remove CPU from list of available CPUs.
*/
diff --git a/usr/src/uts/sun4/sys/memnode.h b/usr/src/uts/sun4/sys/memnode.h
index d24f5da1c5..d8068b9235 100644
--- a/usr/src/uts/sun4/sys/memnode.h
+++ b/usr/src/uts/sun4/sys/memnode.h
@@ -101,7 +101,6 @@ struct mem_node_conf {
int exists; /* only try if set, list may still be empty */
pfn_t physbase; /* lowest PFN in this memnode */
pfn_t physmax; /* highest PFN in this memnode */
- size_t cursize; /* current number of PAGESIZE pages on lists */
};
struct memlist;
diff --git a/usr/src/uts/sun4/vm/vm_dep.c b/usr/src/uts/sun4/vm/vm_dep.c
index e7b0b4b4b8..a50a06c238 100644
--- a/usr/src/uts/sun4/vm/vm_dep.c
+++ b/usr/src/uts/sun4/vm/vm_dep.c
@@ -82,9 +82,7 @@ uint_t vac_colors_mask = 0;
*/
#define CPUSETSIZE() (cpunodes[CPU->cpu_id].ecache_setsize)
-#ifdef DEBUG
plcnt_t plcnt; /* page list count */
-#endif
/*
* This variable is set by the cpu module to contain the lowest
diff --git a/usr/src/uts/sun4/vm/vm_dep.h b/usr/src/uts/sun4/vm/vm_dep.h
index 43f7c2e6e3..7f4f4428eb 100644
--- a/usr/src/uts/sun4/vm/vm_dep.h
+++ b/usr/src/uts/sun4/vm/vm_dep.h
@@ -55,12 +55,6 @@ extern "C" {
#define MTYPE_INIT(mtype, vp, vaddr, flags) \
mtype = (flags & PG_NORELOC) ? MTYPE_NORELOC : MTYPE_RELOC;
-/*
- * macros to loop through the mtype range - noops for sparc
- */
-#define MTYPE_START(mnode, mtype, flags)
-#define MTYPE_NEXT(mnode, mtype, flags) (-1)
-
/* mtype init for page_get_replacement_page */
#define MTYPE_PGR_INIT(mtype, flags, pp, mnode) \
@@ -133,25 +127,24 @@ extern void chk_lpg(page_t *, uchar_t);
#define CHK_LPG(pp, szc)
#endif
-#ifdef DEBUG
-
-/* page list count */
+/*
+ * page list count per mnode and type.
+ */
typedef struct {
- pgcnt_t plc_m_pgmax;
- pgcnt_t plc_m_pgcnt;
- pgcnt_t plc_m_clpgcnt; /* cache list cnt */
+ pgcnt_t plc_mt_pgmax; /* max page cnt */
+ pgcnt_t plc_mt_clpgcnt; /* cache list cnt */
+ pgcnt_t plc_mt_flpgcnt; /* free list cnt - small pages */
+ pgcnt_t plc_mt_lgpgcnt; /* free list cnt - large pages */
+#ifdef DEBUG
struct {
- pgcnt_t plc_mt_pgmax;
- pgcnt_t plc_mt_pgcnt;
- struct {
- pgcnt_t plc_mts_pgcnt;
- int plc_mts_colors;
- pgcnt_t *plc_mtsc_pgcnt;
- } plc_mts[MMU_PAGE_SIZES];
- } plc_mt[MAX_MEM_TYPES];
-} plcnt_t[MAX_MEM_NODES];
+ pgcnt_t plc_mts_pgcnt; /* per page size count */
+ int plc_mts_colors;
+ pgcnt_t *plc_mtsc_pgcnt; /* per color bin count */
+ } plc_mts[MMU_PAGE_SIZES];
+#endif
+} plcnt_t[MAX_MEM_NODES][MAX_MEM_TYPES];
-extern plcnt_t plcnt;
+#ifdef DEBUG
#define PLCNT_SZ(ctrs_sz) { \
int szc; \
@@ -168,9 +161,9 @@ extern plcnt_t plcnt;
colors = page_get_pagecolors(szc); \
for (mn = 0; mn < max_mem_nodes; mn++) { \
for (mt = 0; mt < MAX_MEM_TYPES; mt++) { \
- plcnt[mn].plc_mt[mt].plc_mts[szc]. \
+ plcnt[mn][mt].plc_mts[szc]. \
plc_mts_colors = colors; \
- plcnt[mn].plc_mt[mt].plc_mts[szc]. \
+ plcnt[mn][mt].plc_mts[szc]. \
plc_mtsc_pgcnt = (pgcnt_t *)base; \
base += (colors * sizeof (pgcnt_t)); \
} \
@@ -178,58 +171,120 @@ extern plcnt_t plcnt;
} \
}
-#define PLCNT_DO(pp, mn, szc, cnt, flags) { \
- int mtype = PP_2_MTYPE(pp); \
+#define PLCNT_DO(pp, mn, mtype, szc, cnt, flags) { \
int bin = PP_2_BIN(pp); \
if (flags & (PG_LIST_ISINIT | PG_LIST_ISCAGE)) \
- atomic_add_long(&plcnt[mn].plc_mt[mtype].plc_mt_pgmax, \
+ atomic_add_long(&plcnt[mn][mtype].plc_mt_pgmax, \
cnt); \
- atomic_add_long(&mem_node_config[mn].cursize, cnt); \
if (flags & PG_CACHE_LIST) \
- atomic_add_long(&plcnt[mn].plc_m_clpgcnt, cnt); \
- atomic_add_long(&plcnt[mn].plc_m_pgcnt, cnt); \
- atomic_add_long(&plcnt[mn].plc_mt[mtype].plc_mt_pgcnt, cnt); \
- atomic_add_long(&plcnt[mn].plc_mt[mtype].plc_mts[szc]. \
- plc_mts_pgcnt, cnt); \
- atomic_add_long(&plcnt[mn].plc_mt[mtype].plc_mts[szc]. \
+ atomic_add_long(&plcnt[mn][mtype].plc_mt_clpgcnt, cnt); \
+ else if (szc) \
+ atomic_add_long(&plcnt[mn][mtype].plc_mt_lgpgcnt, cnt); \
+ else \
+ atomic_add_long(&plcnt[mn][mtype].plc_mt_flpgcnt, cnt); \
+ atomic_add_long(&plcnt[mn][mtype].plc_mts[szc].plc_mts_pgcnt, \
+ cnt); \
+ atomic_add_long(&plcnt[mn][mtype].plc_mts[szc]. \
plc_mtsc_pgcnt[bin], cnt); \
}
-#define PLCNT_INCR(pp, mn, szc, flags) { \
- long cnt = (1 << PAGE_BSZS_SHIFT(szc)); \
- if (flags & PG_LIST_ISINIT) \
- plcnt[mn].plc_m_pgmax += cnt; \
- PLCNT_DO(pp, mn, szc, cnt, flags); \
-}
-
-#define PLCNT_DECR(pp, mn, szc, flags) { \
- long cnt = ((-1) << PAGE_BSZS_SHIFT(szc)); \
- PLCNT_DO(pp, mn, szc, cnt, flags); \
-}
-
#else
#define PLCNT_SZ(ctrs_sz)
#define PLCNT_INIT(base)
-#define PLCNT_INCR(pp, mnode, szc, flags) { \
+/* PG_FREE_LIST may not be explicitly set in flags for large pages */
+
+#define PLCNT_DO(pp, mn, mtype, szc, cnt, flags) { \
+ if (flags & (PG_LIST_ISINIT | PG_LIST_ISCAGE)) \
+ atomic_add_long(&plcnt[mn][mtype].plc_mt_pgmax, cnt); \
+ if (flags & PG_CACHE_LIST) \
+ atomic_add_long(&plcnt[mn][mtype].plc_mt_clpgcnt, cnt); \
+ else if (szc) \
+ atomic_add_long(&plcnt[mn][mtype].plc_mt_lgpgcnt, cnt); \
+ else \
+ atomic_add_long(&plcnt[mn][mtype].plc_mt_flpgcnt, cnt); \
+}
+
+#endif
+
+#define PLCNT_INCR(pp, mn, mtype, szc, flags) { \
long cnt = (1 << PAGE_BSZS_SHIFT(szc)); \
- atomic_add_long(&mem_node_config[mnode].cursize, cnt); \
+ PLCNT_DO(pp, mn, mtype, szc, cnt, flags); \
}
-#define PLCNT_DECR(pp, mnode, szc, flags) { \
+#define PLCNT_DECR(pp, mn, mtype, szc, flags) { \
long cnt = ((-1) << PAGE_BSZS_SHIFT(szc)); \
- atomic_add_long(&mem_node_config[mnode].cursize, cnt); \
+ PLCNT_DO(pp, mn, mtype, szc, cnt, flags); \
}
-#endif
+/*
+ * macros to update page list max counts - done when pages transferred
+ * between mtypes (as in kcage_assimilate_page).
+ */
+#define PLCNT_MAX_INCR(pp, mn, mtype, szc) { \
+ long cnt = (1 << PAGE_BSZS_SHIFT(szc)); \
+ atomic_add_long(&plcnt[mn][mtype].plc_mt_pgmax, cnt); \
+}
+
+#define PLCNT_MAX_DECR(pp, mn, mtype, szc) { \
+ long cnt = ((-1) << PAGE_BSZS_SHIFT(szc)); \
+ atomic_add_long(&plcnt[mn][mtype].plc_mt_pgmax, cnt); \
+}
+
+extern plcnt_t plcnt;
+
+#define MNODE_PGCNT(mn) \
+ (plcnt[mn][MTYPE_RELOC].plc_mt_clpgcnt + \
+ plcnt[mn][MTYPE_NORELOC].plc_mt_clpgcnt + \
+ plcnt[mn][MTYPE_RELOC].plc_mt_flpgcnt + \
+ plcnt[mn][MTYPE_NORELOC].plc_mt_flpgcnt + \
+ plcnt[mn][MTYPE_RELOC].plc_mt_lgpgcnt + \
+ plcnt[mn][MTYPE_NORELOC].plc_mt_lgpgcnt)
+
+#define MNODETYPE_PGCNT(mn, mtype) \
+ (plcnt[mn][mtype].plc_mt_clpgcnt + \
+ plcnt[mn][mtype].plc_mt_flpgcnt + \
+ plcnt[mn][mtype].plc_mt_lgpgcnt)
+
+/*
+ * macros to loop through the mtype range - MTYPE_START returns -1 in
+ * mtype if no pages in mnode/mtype and possibly NEXT mtype.
+ */
+#define MTYPE_START(mnode, mtype, flags) { \
+ if (plcnt[mnode][mtype].plc_mt_pgmax == 0) { \
+ ASSERT(MNODETYPE_PGCNT(mnode, mtype) == 0); \
+ MTYPE_NEXT(mnode, mtype, flags); \
+ } \
+}
+
+/*
+ * if allocation from the RELOC pool failed and there is sufficient cage
+ * memory, attempt to allocate from the NORELOC pool.
+ */
+#define MTYPE_NEXT(mnode, mtype, flags) { \
+ if (!(flags & (PG_NORELOC | PGI_NOCAGE | PGI_RELOCONLY)) && \
+ (kcage_freemem >= kcage_lotsfree)) { \
+ if (plcnt[mnode][mtype].plc_mt_pgmax == 0) { \
+ ASSERT(MNODETYPE_PGCNT(mnode, mtype) == 0); \
+ mtype = -1; \
+ } else { \
+ mtype = MTYPE_NORELOC; \
+ flags |= PG_NORELOC; \
+ } \
+ } else { \
+ mtype = -1; \
+ } \
+}
/*
* get the ecache setsize for the current cpu.
*/
#define CPUSETSIZE() (cpunodes[CPU->cpu_id].ecache_setsize)
+extern struct cpu cpu0;
+#define CPU0 &cpu0
#define PAGE_BSZS_SHIFT(szc) TTE_BSZS_SHIFT(szc)
/*
@@ -252,6 +307,7 @@ extern plcnt_t plcnt;
extern int ecache_alignsize;
#define L2CACHE_ALIGN ecache_alignsize
+#define L2CACHE_ALIGN_MAX 64
extern int consistent_coloring;
extern uint_t vac_colors_mask;
@@ -323,6 +379,26 @@ switch (consistent_coloring) { \
ASSERT(bin <= page_colors_mask);
/*
+ * cpu private vm data - accessed thru CPU->cpu_vm_data
+ * vc_pnum_memseg: tracks last memseg visited in page_numtopp_nolock()
+ * vc_pnext_memseg: tracks last memseg visited in page_nextn()
+ * vc_kmptr: unaligned kmem pointer for this vm_cpu_data_t
+ */
+
+typedef struct {
+ struct memseg *vc_pnum_memseg;
+ struct memseg *vc_pnext_memseg;
+ void *vc_kmptr;
+} vm_cpu_data_t;
+
+/* allocation size to ensure vm_cpu_data_t resides in its own cache line */
+#define VM_CPU_DATA_PADSIZE \
+ (P2ROUNDUP(sizeof (vm_cpu_data_t), L2CACHE_ALIGN_MAX))
+
+/* for boot cpu before kmem is initialized */
+extern char vm_cpu_data0[];
+
+/*
* Function to get an ecache color bin: F(as, cnt, vcolor).
* the goal of this function is to:
* - to spread a processes' physical pages across the entire ecache to
@@ -371,40 +447,45 @@ switch (consistent_coloring) { \
#ifdef VM_STATS
struct vmm_vmstats_str {
- ulong_t pc_list_add_pages[MMU_PAGE_SIZES];
- ulong_t pc_list_sub_pages1[MMU_PAGE_SIZES];
- ulong_t pc_list_sub_pages2[MMU_PAGE_SIZES];
- ulong_t pc_list_sub_pages3[MMU_PAGE_SIZES];
- ulong_t pgf_alloc[MMU_PAGE_SIZES];
+ ulong_t pgf_alloc[MMU_PAGE_SIZES]; /* page_get_freelist */
ulong_t pgf_allocok[MMU_PAGE_SIZES];
ulong_t pgf_allocokrem[MMU_PAGE_SIZES];
ulong_t pgf_allocfailed[MMU_PAGE_SIZES];
ulong_t pgf_allocdeferred;
ulong_t pgf_allocretry[MMU_PAGE_SIZES];
- ulong_t pgc_alloc;
+ ulong_t pgc_alloc; /* page_get_cachelist */
ulong_t pgc_allocok;
ulong_t pgc_allocokrem;
ulong_t pgc_allocokdeferred;
ulong_t pgc_allocfailed;
- ulong_t pgcp_alloc[MMU_PAGE_SIZES];
+ ulong_t pgcp_alloc[MMU_PAGE_SIZES]; /* page_get_contig_pages */
ulong_t pgcp_allocfailed[MMU_PAGE_SIZES];
ulong_t pgcp_allocempty[MMU_PAGE_SIZES];
ulong_t pgcp_allocok[MMU_PAGE_SIZES];
- ulong_t ptcp[MMU_PAGE_SIZES];
+ ulong_t ptcp[MMU_PAGE_SIZES]; /* page_trylock_contig_pages */
ulong_t ptcpfreethresh[MMU_PAGE_SIZES];
ulong_t ptcpfailexcl[MMU_PAGE_SIZES];
ulong_t ptcpfailszc[MMU_PAGE_SIZES];
ulong_t ptcpfailcage[MMU_PAGE_SIZES];
ulong_t ptcpok[MMU_PAGE_SIZES];
- ulong_t pgmf_alloc[MMU_PAGE_SIZES];
+ ulong_t pgmf_alloc[MMU_PAGE_SIZES]; /* page_get_mnode_freelist */
ulong_t pgmf_allocfailed[MMU_PAGE_SIZES];
ulong_t pgmf_allocempty[MMU_PAGE_SIZES];
ulong_t pgmf_allocok[MMU_PAGE_SIZES];
- ulong_t pgmc_alloc;
+ ulong_t pgmc_alloc; /* page_get_mnode_cachelist */
ulong_t pgmc_allocfailed;
ulong_t pgmc_allocempty;
ulong_t pgmc_allocok;
- ulong_t ppr_reloc[MMU_PAGE_SIZES];
+ ulong_t pladd_free[MMU_PAGE_SIZES]; /* page_list_add/sub */
+ ulong_t plsub_free[MMU_PAGE_SIZES];
+ ulong_t pladd_cache;
+ ulong_t plsub_cache;
+ ulong_t plsubpages_szcbig;
+ ulong_t plsubpages_szc0;
+ ulong_t pff_req[MMU_PAGE_SIZES]; /* page_freelist_fill */
+ ulong_t pff_demote[MMU_PAGE_SIZES];
+ ulong_t pff_coalok[MMU_PAGE_SIZES];
+ ulong_t ppr_reloc[MMU_PAGE_SIZES]; /* page_relocate */
ulong_t ppr_relocok[MMU_PAGE_SIZES];
ulong_t ppr_relocnoroot[MMU_PAGE_SIZES];
ulong_t ppr_reloc_replnoroot[MMU_PAGE_SIZES];