diff options
Diffstat (limited to 'usr/src')
-rw-r--r-- | usr/src/uts/sfmmu/vm/hat_sfmmu.c | 40 | ||||
-rw-r--r-- | usr/src/uts/sfmmu/vm/hat_sfmmu.h | 3 | ||||
-rw-r--r-- | usr/src/uts/sun4/vm/vm_dep.c | 57 | ||||
-rw-r--r-- | usr/src/uts/sun4/vm/vm_dep.h | 7 | ||||
-rw-r--r-- | usr/src/uts/sun4u/cherrystone/os/cherrystone.c | 9 | ||||
-rw-r--r-- | usr/src/uts/sun4u/cpu/opl_olympus.c | 52 | ||||
-rw-r--r-- | usr/src/uts/sun4u/cpu/us3_common_mmu.c | 52 | ||||
-rw-r--r-- | usr/src/uts/sun4u/daktari/os/daktari.c | 9 | ||||
-rw-r--r-- | usr/src/uts/sun4u/lw8/os/lw8_platmod.c | 2 | ||||
-rw-r--r-- | usr/src/uts/sun4u/opl/os/opl.c | 2 | ||||
-rw-r--r-- | usr/src/uts/sun4u/serengeti/os/serengeti.c | 2 | ||||
-rw-r--r-- | usr/src/uts/sun4u/starcat/os/starcat.c | 2 | ||||
-rw-r--r-- | usr/src/uts/sun4u/vm/mach_vm_dep.c | 88 | ||||
-rw-r--r-- | usr/src/uts/sun4v/vm/mach_vm_dep.c | 58 |
14 files changed, 107 insertions, 276 deletions
diff --git a/usr/src/uts/sfmmu/vm/hat_sfmmu.c b/usr/src/uts/sfmmu/vm/hat_sfmmu.c index c9474ee8fa..37527ae3e4 100644 --- a/usr/src/uts/sfmmu/vm/hat_sfmmu.c +++ b/usr/src/uts/sfmmu/vm/hat_sfmmu.c @@ -9295,46 +9295,6 @@ sfmmu_size_tsb(sfmmu_t *sfmmup, int growing, uint64_t tte8k_cnt, } /* - * Get the preferred page size code for a hat. - * This is only advice, so locking is not done; - * this transitory information could change - * following the call anyway. This interface is - * sun4 private. - */ -/*ARGSUSED*/ -uint_t -hat_preferred_pgsz(struct hat *hat, caddr_t vaddr, size_t maplen, int maptype) -{ - sfmmu_t *sfmmup = (sfmmu_t *)hat; - uint_t szc, maxszc = mmu_page_sizes - 1; - size_t pgsz; - - if (maptype == MAPPGSZ_ISM) { - for (szc = maxszc; szc >= TTE4M; szc--) { - if (disable_ism_large_pages & (1 << szc)) - continue; - - pgsz = hw_page_array[szc].hp_size; - if ((maplen >= pgsz) && IS_P2ALIGNED(vaddr, pgsz)) - return (szc); - } - return (TTE4M); - } else if (&mmu_preferred_pgsz) { /* USIII+-USIV+ */ - return (mmu_preferred_pgsz(sfmmup, vaddr, maplen)); - } else { /* USIII, USII, Niagara */ - for (szc = maxszc; szc > TTE8K; szc--) { - if (disable_large_pages & (1 << szc)) - continue; - - pgsz = hw_page_array[szc].hp_size; - if ((maplen >= pgsz) && IS_P2ALIGNED(vaddr, pgsz)) - return (szc); - } - return (TTE8K); - } -} - -/* * Free up a sfmmu * Since the sfmmu is currently embedded in the hat struct we simply zero * out our fields and free up the ism map blk list if any. diff --git a/usr/src/uts/sfmmu/vm/hat_sfmmu.h b/usr/src/uts/sfmmu/vm/hat_sfmmu.h index 830a589e35..7563c2f1fd 100644 --- a/usr/src/uts/sfmmu/vm/hat_sfmmu.h +++ b/usr/src/uts/sfmmu/vm/hat_sfmmu.h @@ -1762,7 +1762,6 @@ extern void sfmmu_reprog_pgsz_arr(sfmmu_t *, uint8_t *); extern void hat_kern_setup(void); extern int hat_page_relocate(page_t **, page_t **, spgcnt_t *); -extern uint_t hat_preferred_pgsz(struct hat *, caddr_t, size_t, int); extern int sfmmu_get_ppvcolor(struct page *); extern int sfmmu_get_addrvcolor(caddr_t); extern int sfmmu_hat_lock_held(sfmmu_t *); @@ -1781,12 +1780,10 @@ extern struct hme_blk *sfmmu_hmetohblk(struct sf_hment *); */ #pragma weak mmu_large_pages_disabled #pragma weak mmu_set_ctx_page_sizes -#pragma weak mmu_preferred_pgsz #pragma weak mmu_check_page_sizes extern int mmu_large_pages_disabled(uint_t); extern void mmu_set_ctx_page_sizes(sfmmu_t *); -extern uint_t mmu_preferred_pgsz(sfmmu_t *, caddr_t, size_t); extern void mmu_check_page_sizes(sfmmu_t *, uint64_t *); extern sfmmu_t *ksfmmup; diff --git a/usr/src/uts/sun4/vm/vm_dep.c b/usr/src/uts/sun4/vm/vm_dep.c index 9f2eebc551..08d2006d9b 100644 --- a/usr/src/uts/sun4/vm/vm_dep.c +++ b/usr/src/uts/sun4/vm/vm_dep.c @@ -474,7 +474,7 @@ getexinfo( /*ARGSUSED*/ -size_t +static size_t map_pgszva(struct proc *p, caddr_t addr, size_t len) { size_t pgsz = MMU_PAGESIZE; @@ -508,7 +508,7 @@ map_pgszva(struct proc *p, caddr_t addr, size_t len) return (pgsz); } -size_t +static size_t map_pgszheap(struct proc *p, caddr_t addr, size_t len) { size_t pgsz; @@ -554,7 +554,7 @@ map_pgszheap(struct proc *p, caddr_t addr, size_t len) return (pgsz); } -size_t +static size_t map_pgszstk(struct proc *p, caddr_t addr, size_t len) { size_t pgsz; @@ -600,6 +600,57 @@ map_pgszstk(struct proc *p, caddr_t addr, size_t len) return (pgsz); } +static size_t +map_pgszism(caddr_t addr, size_t len) +{ + uint_t szc; + size_t pgsz; + extern int disable_ism_large_pages; + + for (szc = mmu_page_sizes - 1; szc >= TTE4M; szc--) { + if (disable_ism_large_pages & (1 << szc)) + continue; + + pgsz = hw_page_array[szc].hp_size; + if ((len >= pgsz) && IS_P2ALIGNED(addr, pgsz)) + return (pgsz); + } + return (DEFAULT_ISM_PAGESIZE); +} + +/* + * Suggest a page size to be used to map a segment of type maptype and length + * len. Returns a page size (not a size code). + * If remap is non-NULL, fill in a value suggesting whether or not to remap + * this segment. + */ +size_t +map_pgsz(int maptype, struct proc *p, caddr_t addr, size_t len, int *remap) +{ + size_t pgsz = 0; + + if (remap != NULL) + *remap = (len > auto_lpg_remap_threshold); + + switch (maptype) { + case MAPPGSZ_ISM: + pgsz = map_pgszism(addr, len); + break; + + case MAPPGSZ_VA: + pgsz = map_pgszva(p, addr, len); + break; + + case MAPPGSZ_STK: + pgsz = map_pgszstk(p, addr, len); + break; + + case MAPPGSZ_HEAP: + pgsz = map_pgszheap(p, addr, len); + break; + } + return (pgsz); +} /* * Return non 0 value if the address may cause a VAC alias with KPM mappings. diff --git a/usr/src/uts/sun4/vm/vm_dep.h b/usr/src/uts/sun4/vm/vm_dep.h index 3a201208b8..185cdf212a 100644 --- a/usr/src/uts/sun4/vm/vm_dep.h +++ b/usr/src/uts/sun4/vm/vm_dep.h @@ -531,13 +531,6 @@ extern uint_t userszc_2_szc[]; #define SZC_2_USERSZC(szc) (szc_2_userszc[szc]) /* - * Platform specific map_pgsz large page hook routines. - */ -extern size_t map_pgszva(struct proc *p, caddr_t addr, size_t len); -extern size_t map_pgszheap(struct proc *p, caddr_t addr, size_t len); -extern size_t map_pgszstk(struct proc *p, caddr_t addr, size_t len); - -/* * Platform specific page routines */ extern void mach_page_add(page_t **, page_t *); diff --git a/usr/src/uts/sun4u/cherrystone/os/cherrystone.c b/usr/src/uts/sun4u/cherrystone/os/cherrystone.c index 01425c4030..738adb273d 100644 --- a/usr/src/uts/sun4u/cherrystone/os/cherrystone.c +++ b/usr/src/uts/sun4u/cherrystone/os/cherrystone.c @@ -2,9 +2,8 @@ * CDDL HEADER START * * The contents of this file are subject to the terms of the - * Common Development and Distribution License, Version 1.0 only - * (the "License"). You may not use this file except in compliance - * with the License. + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or http://www.opensolaris.org/os/licensing. @@ -20,7 +19,7 @@ * CDDL HEADER END */ /* - * Copyright 2005 Sun Microsystems, Inc. All rights reserved. + * Copyright 2006 Sun Microsystems, Inc. All rights reserved. * Use is subject to license terms. */ @@ -114,7 +113,7 @@ set_platform_defaults(void) extern void mmu_init_large_pages(size_t); if ((mmu_page_sizes == max_mmu_page_sizes) && - (mmu_ism_pagesize != MMU_PAGESIZE32M)) { + (mmu_ism_pagesize != DEFAULT_ISM_PAGESIZE)) { if (&mmu_init_large_pages) mmu_init_large_pages(mmu_ism_pagesize); } diff --git a/usr/src/uts/sun4u/cpu/opl_olympus.c b/usr/src/uts/sun4u/cpu/opl_olympus.c index 4ca069e087..7c38ca6568 100644 --- a/usr/src/uts/sun4u/cpu/opl_olympus.c +++ b/usr/src/uts/sun4u/cpu/opl_olympus.c @@ -700,9 +700,9 @@ send_one_mondo(int cpuid) */ int init_mmu_page_sizes = 0; static int mmu_disable_ism_large_pages = ((1 << TTE64K) | - (1 << TTE512K) | (1 << TTE256M)); + (1 << TTE512K) | (1 << TTE32M) | (1 << TTE256M)); static int mmu_disable_auto_large_pages = ((1 << TTE64K) | - (1 << TTE512K) | (1 << TTE4M) | (1 << TTE256M)); + (1 << TTE512K) | (1 << TTE32M) | (1 << TTE256M)); static int mmu_disable_large_pages = 0; /* @@ -719,8 +719,8 @@ mmu_init_mmu_page_sizes(int32_t not_used) if (!init_mmu_page_sizes) { mmu_page_sizes = MMU_PAGE_SIZES; mmu_hashcnt = MAX_HASHCNT; - mmu_ism_pagesize = MMU_PAGESIZE32M; - auto_lpg_maxszc = TTE32M; + mmu_ism_pagesize = DEFAULT_ISM_PAGESIZE; + auto_lpg_maxszc = TTE4M; mmu_exported_pagesize_mask = (1 << TTE8K) | (1 << TTE64K) | (1 << TTE512K) | (1 << TTE4M) | (1 << TTE32M) | (1 << TTE256M); @@ -803,50 +803,6 @@ mmu_init_large_pages(size_t ism_pagesize) } } -/*ARGSUSED*/ -uint_t -mmu_preferred_pgsz(struct hat *hat, caddr_t addr, size_t len) -{ - sfmmu_t *sfmmup = (sfmmu_t *)hat; - uint_t pgsz0, pgsz1; - uint_t szc, maxszc = mmu_page_sizes - 1; - size_t pgsz; - extern int disable_auto_large_pages; - - pgsz0 = (uint_t)sfmmup->sfmmu_pgsz[0]; - pgsz1 = (uint_t)sfmmup->sfmmu_pgsz[1]; - - /* - * If either of the TLBs are reprogrammed, choose - * the largest mapping size as the preferred size, - * if it fits the size and alignment constraints. - * Else return the largest mapping size that fits, - * if neither TLB is reprogrammed. - */ - if (pgsz0 > TTE8K || pgsz1 > TTE8K) { - if (pgsz1 > pgsz0) { /* First try pgsz1 */ - pgsz = hw_page_array[pgsz1].hp_size; - if ((len >= pgsz) && IS_P2ALIGNED(addr, pgsz)) - return (pgsz1); - } - if (pgsz0 > TTE8K) { /* Then try pgsz0, if !TTE8K */ - pgsz = hw_page_array[pgsz0].hp_size; - if ((len >= pgsz) && IS_P2ALIGNED(addr, pgsz)) - return (pgsz0); - } - } else { /* Otherwise pick best fit if neither TLB is reprogrammed. */ - for (szc = maxszc; szc > TTE8K; szc--) { - if (disable_auto_large_pages & (1 << szc)) - continue; - - pgsz = hw_page_array[szc].hp_size; - if ((len >= pgsz) && IS_P2ALIGNED(addr, pgsz)) - return (szc); - } - } - return (TTE8K); -} - /* * Function to reprogram the TLBs when page sizes used * by a process change significantly. diff --git a/usr/src/uts/sun4u/cpu/us3_common_mmu.c b/usr/src/uts/sun4u/cpu/us3_common_mmu.c index e8adf0be6a..f62f37151f 100644 --- a/usr/src/uts/sun4u/cpu/us3_common_mmu.c +++ b/usr/src/uts/sun4u/cpu/us3_common_mmu.c @@ -58,10 +58,10 @@ static int panther_only = 0; static int pan_disable_ism_large_pages = ((1 << TTE64K) | - (1 << TTE512K) | (1 << TTE256M)); + (1 << TTE512K) | (1 << TTE32M) | (1 << TTE256M)); static int pan_disable_large_pages = (1 << TTE256M); static int pan_disable_auto_large_pages = ((1 << TTE64K) | - (1 << TTE512K) | (1 << TTE4M) | (1 << TTE256M)); + (1 << TTE512K) | (1 << TTE32M) | (1 << TTE256M)); static int chjag_disable_ism_large_pages = ((1 << TTE64K) | (1 << TTE512K) | (1 << TTE32M) | (1 << TTE256M)); @@ -205,13 +205,13 @@ mmu_init_mmu_page_sizes(int cinfo) if (npanther == ncpunode) { mmu_page_sizes = MMU_PAGE_SIZES; mmu_hashcnt = MAX_HASHCNT; - mmu_ism_pagesize = MMU_PAGESIZE32M; + mmu_ism_pagesize = DEFAULT_ISM_PAGESIZE; mmu_exported_pagesize_mask = (1 << TTE8K) | (1 << TTE64K) | (1 << TTE512K) | (1 << TTE4M) | (1 << TTE32M) | (1 << TTE256M); panther_dtlb_restrictions = 1; panther_only = 1; - auto_lpg_maxszc = TTE32M; + auto_lpg_maxszc = TTE4M; } else if (npanther > 0) { panther_dtlb_restrictions = 1; } @@ -235,50 +235,6 @@ static uint64_t ttecnt_threshold[MMU_PAGE_SIZES] = { AVAIL_DTLB_ENTRIES, AVAIL_DTLB_ENTRIES, AVAIL_32M_ENTRIES, AVAIL_256M_ENTRIES }; -/*ARGSUSED*/ -uint_t -mmu_preferred_pgsz(struct hat *hat, caddr_t addr, size_t len) -{ - sfmmu_t *sfmmup = (sfmmu_t *)hat; - uint_t pgsz0, pgsz1; - uint_t szc, maxszc = mmu_page_sizes - 1; - size_t pgsz; - extern int disable_auto_large_pages; - - pgsz0 = (uint_t)sfmmup->sfmmu_pgsz[0]; - pgsz1 = (uint_t)sfmmup->sfmmu_pgsz[1]; - - /* - * If either of the TLBs are reprogrammed, choose - * the largest mapping size as the preferred size, - * if it fits the size and alignment constraints. - * Else return the largest mapping size that fits, - * if neither TLB is reprogrammed. - */ - if (pgsz0 > TTE8K || pgsz1 > TTE8K) { - if (pgsz1 > pgsz0) { /* First try pgsz1 */ - pgsz = hw_page_array[pgsz1].hp_size; - if ((len >= pgsz) && IS_P2ALIGNED(addr, pgsz)) - return (pgsz1); - } - if (pgsz0 > TTE8K) { /* Then try pgsz0, if !TTE8K */ - pgsz = hw_page_array[pgsz0].hp_size; - if ((len >= pgsz) && IS_P2ALIGNED(addr, pgsz)) - return (pgsz0); - } - } else { /* Otherwise pick best fit if neither TLB is reprogrammed. */ - for (szc = maxszc; szc > TTE8K; szc--) { - if (disable_auto_large_pages & (1 << szc)) - continue; - - pgsz = hw_page_array[szc].hp_size; - if ((len >= pgsz) && IS_P2ALIGNED(addr, pgsz)) - return (szc); - } - } - return (TTE8K); -} - /* * The purpose of this code is to indirectly reorganize the sfmmu_pgsz array * in order to handle the Panther mmu DTLB requirements. Panther only supports diff --git a/usr/src/uts/sun4u/daktari/os/daktari.c b/usr/src/uts/sun4u/daktari/os/daktari.c index 496b81bbeb..1ee5082226 100644 --- a/usr/src/uts/sun4u/daktari/os/daktari.c +++ b/usr/src/uts/sun4u/daktari/os/daktari.c @@ -2,9 +2,8 @@ * CDDL HEADER START * * The contents of this file are subject to the terms of the - * Common Development and Distribution License, Version 1.0 only - * (the "License"). You may not use this file except in compliance - * with the License. + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or http://www.opensolaris.org/os/licensing. @@ -20,7 +19,7 @@ * CDDL HEADER END */ /* - * Copyright 2005 Sun Microsystems, Inc. All rights reserved. + * Copyright 2006 Sun Microsystems, Inc. All rights reserved. * Use is subject to license terms. */ @@ -105,7 +104,7 @@ set_platform_defaults(void) extern void mmu_init_large_pages(size_t); if ((mmu_page_sizes == max_mmu_page_sizes) && - (mmu_ism_pagesize != MMU_PAGESIZE32M)) { + (mmu_ism_pagesize != DEFAULT_ISM_PAGESIZE)) { if (&mmu_init_large_pages) mmu_init_large_pages(mmu_ism_pagesize); } diff --git a/usr/src/uts/sun4u/lw8/os/lw8_platmod.c b/usr/src/uts/sun4u/lw8/os/lw8_platmod.c index c21b03f0af..5d298cccdb 100644 --- a/usr/src/uts/sun4u/lw8/os/lw8_platmod.c +++ b/usr/src/uts/sun4u/lw8/os/lw8_platmod.c @@ -212,7 +212,7 @@ set_platform_defaults(void) xc_tick_limit_scale = 5; if ((mmu_page_sizes == max_mmu_page_sizes) && - (mmu_ism_pagesize != MMU_PAGESIZE32M)) { + (mmu_ism_pagesize != DEFAULT_ISM_PAGESIZE)) { if (&mmu_init_large_pages) mmu_init_large_pages(mmu_ism_pagesize); } diff --git a/usr/src/uts/sun4u/opl/os/opl.c b/usr/src/uts/sun4u/opl/os/opl.c index 9d66400a66..99946a037c 100644 --- a/usr/src/uts/sun4u/opl/os/opl.c +++ b/usr/src/uts/sun4u/opl/os/opl.c @@ -175,7 +175,7 @@ set_platform_defaults(void) ts_dispatch_extended = 1; if ((mmu_page_sizes == max_mmu_page_sizes) && - (mmu_ism_pagesize != MMU_PAGESIZE32M)) { + (mmu_ism_pagesize != DEFAULT_ISM_PAGESIZE)) { if (&mmu_init_large_pages) mmu_init_large_pages(mmu_ism_pagesize); } diff --git a/usr/src/uts/sun4u/serengeti/os/serengeti.c b/usr/src/uts/sun4u/serengeti/os/serengeti.c index f6a055181f..cafcd22c45 100644 --- a/usr/src/uts/sun4u/serengeti/os/serengeti.c +++ b/usr/src/uts/sun4u/serengeti/os/serengeti.c @@ -205,7 +205,7 @@ set_platform_defaults(void) xc_tick_limit_scale = 5; if ((mmu_page_sizes == max_mmu_page_sizes) && - (mmu_ism_pagesize != MMU_PAGESIZE32M)) { + (mmu_ism_pagesize != DEFAULT_ISM_PAGESIZE)) { if (&mmu_init_large_pages) mmu_init_large_pages(mmu_ism_pagesize); } diff --git a/usr/src/uts/sun4u/starcat/os/starcat.c b/usr/src/uts/sun4u/starcat/os/starcat.c index 207e3b6cdb..4d4edc08cb 100644 --- a/usr/src/uts/sun4u/starcat/os/starcat.c +++ b/usr/src/uts/sun4u/starcat/os/starcat.c @@ -171,7 +171,7 @@ set_platform_defaults(void) tsb_lgrp_affinity = 1; if ((mmu_page_sizes == max_mmu_page_sizes) && - (mmu_ism_pagesize != MMU_PAGESIZE32M)) { + (mmu_ism_pagesize != DEFAULT_ISM_PAGESIZE)) { if (&mmu_init_large_pages) mmu_init_large_pages(mmu_ism_pagesize); } diff --git a/usr/src/uts/sun4u/vm/mach_vm_dep.c b/usr/src/uts/sun4u/vm/mach_vm_dep.c index d9907b3616..a246ba7498 100644 --- a/usr/src/uts/sun4u/vm/mach_vm_dep.c +++ b/usr/src/uts/sun4u/vm/mach_vm_dep.c @@ -125,6 +125,25 @@ size_t initdata_pgsz64k_minsize = MMU_PAGESIZE64K; size_t max_shm_lpsize = ULONG_MAX; /* + * Platforms with smaller or larger TLBs may wish to change this. Most + * sun4u platforms can hold 1024 8K entries by default and most processes + * are observed to be < 6MB on these machines, so we decide to move up + * here to give ourselves some wiggle room for other, smaller segments. + */ +int auto_lpg_tlb_threshold = 768; +int auto_lpg_minszc = TTE4M; +int auto_lpg_maxszc = TTE4M; +size_t auto_lpg_heap_default = MMU_PAGESIZE; +size_t auto_lpg_stack_default = MMU_PAGESIZE; +size_t auto_lpg_va_default = MMU_PAGESIZE; +size_t auto_lpg_remap_threshold = 0; +/* + * Number of pages in 1 GB. Don't enable automatic large pages if we have + * fewer than this many pages. + */ +pgcnt_t auto_lpg_min_physmem = 1 << (30 - MMU_PAGESHIFT); + +/* * map_addr_proc() is the routine called when the system is to * choose an address for the user. We will pick an address * range which is just below the current stack limit. The @@ -282,75 +301,6 @@ map_addr_proc(caddr_t *addrp, size_t len, offset_t off, int vacalign, } /* - * Platforms with smaller or larger TLBs may wish to change this. Most - * sun4u platforms can hold 1024 8K entries by default and most processes - * are observed to be < 6MB on these machines, so we decide to move up - * here to give ourselves some wiggle room for other, smaller segments. - */ -int auto_lpg_tlb_threshold = 768; -int auto_lpg_minszc = TTE4M; -int auto_lpg_maxszc = TTE4M; -size_t auto_lpg_heap_default = MMU_PAGESIZE; -size_t auto_lpg_stack_default = MMU_PAGESIZE; -size_t auto_lpg_va_default = MMU_PAGESIZE; -size_t auto_lpg_remap_threshold = 0; -/* - * Number of pages in 1 GB. Don't enable automatic large pages if we have - * fewer than this many pages. - */ -pgcnt_t auto_lpg_min_physmem = 1 << (30 - MMU_PAGESHIFT); - -/* - * Suggest a page size to be used to map a segment of type maptype and length - * len. Returns a page size (not a size code). - * If remap is non-NULL, fill in a value suggesting whether or not to remap - * this segment. - */ -size_t -map_pgsz(int maptype, struct proc *p, caddr_t addr, size_t len, int *remap) -{ - uint_t n; - size_t pgsz = 0; - - if (remap) - *remap = (len > auto_lpg_remap_threshold); - - switch (maptype) { - case MAPPGSZ_ISM: - n = hat_preferred_pgsz(p->p_as->a_hat, addr, len, maptype); - pgsz = hw_page_array[n].hp_size; - - /* - * For non-Panther systems, the following code sets the [D]ISM - * pagesize to 4M if either of the DTLBs happens to be - * programmed to a different large pagesize. - * The Panther code might hit this case as well, - * if and only if the addr is not aligned to >= 4M. - */ - if ((pgsz > 0) && (pgsz < MMU_PAGESIZE4M)) - pgsz = MMU_PAGESIZE4M; - break; - - case MAPPGSZ_VA: - n = hat_preferred_pgsz(p->p_as->a_hat, addr, len, maptype); - pgsz = hw_page_array[n].hp_size; - if ((pgsz <= MMU_PAGESIZE) || - !IS_P2ALIGNED(addr, pgsz) || !IS_P2ALIGNED(len, pgsz)) - pgsz = map_pgszva(p, addr, len); - break; - - case MAPPGSZ_STK: - pgsz = map_pgszstk(p, addr, len); - break; - - case MAPPGSZ_HEAP: - pgsz = map_pgszheap(p, addr, len); - break; - } - return (pgsz); -} - -/* * Platform-dependent page scrub call. */ void diff --git a/usr/src/uts/sun4v/vm/mach_vm_dep.c b/usr/src/uts/sun4v/vm/mach_vm_dep.c index b43efd4907..b01a5c182d 100644 --- a/usr/src/uts/sun4v/vm/mach_vm_dep.c +++ b/usr/src/uts/sun4v/vm/mach_vm_dep.c @@ -132,6 +132,20 @@ size_t initdata_pgsz64k_minsize = MMU_PAGESIZE64K; size_t max_shm_lpsize = MMU_PAGESIZE4M; +/* Auto large page tunables. */ +int auto_lpg_tlb_threshold = 32; +int auto_lpg_minszc = TTE64K; +int auto_lpg_maxszc = TTE64K; +size_t auto_lpg_heap_default = MMU_PAGESIZE64K; +size_t auto_lpg_stack_default = MMU_PAGESIZE64K; +size_t auto_lpg_va_default = MMU_PAGESIZE64K; +size_t auto_lpg_remap_threshold = 0; /* always remap */ +/* + * Number of pages in 1 GB. Don't enable automatic large pages if we have + * fewer than this many pages. + */ +pgcnt_t auto_lpg_min_physmem = 1 << (30 - MMU_PAGESHIFT); + /* * map_addr_proc() is the routine called when the system is to * choose an address for the user. We will pick an address @@ -287,50 +301,6 @@ map_addr_proc(caddr_t *addrp, size_t len, offset_t off, int vacalign, } } -/* Auto large page tunables. */ -int auto_lpg_tlb_threshold = 32; -int auto_lpg_minszc = TTE64K; -int auto_lpg_maxszc = TTE64K; -size_t auto_lpg_heap_default = MMU_PAGESIZE64K; -size_t auto_lpg_stack_default = MMU_PAGESIZE64K; -size_t auto_lpg_va_default = MMU_PAGESIZE64K; -size_t auto_lpg_remap_threshold = 0; /* always remap */ -/* - * Number of pages in 1 GB. Don't enable automatic large pages if we have - * fewer than this many pages. - */ -pgcnt_t auto_lpg_min_physmem = 1 << (30 - MMU_PAGESHIFT); - -size_t -map_pgsz(int maptype, struct proc *p, caddr_t addr, size_t len, int *remap) -{ - uint_t n; - size_t pgsz = 0; - - if (remap) - *remap = (len > auto_lpg_remap_threshold); - - switch (maptype) { - case MAPPGSZ_ISM: - n = hat_preferred_pgsz(p->p_as->a_hat, addr, len, maptype); - pgsz = hw_page_array[n].hp_size; - break; - - case MAPPGSZ_VA: - pgsz = map_pgszva(p, addr, len); - break; - - case MAPPGSZ_STK: - pgsz = map_pgszstk(p, addr, len); - break; - - case MAPPGSZ_HEAP: - pgsz = map_pgszheap(p, addr, len); - break; - } - return (pgsz); -} - /* * Platform-dependent page scrub call. * We call hypervisor to scrub the page. |