diff options
author | Jason Beloro <Jason.Beloro@Sun.COM> | 2009-02-13 16:02:59 -0800 |
---|---|---|
committer | Jason Beloro <Jason.Beloro@Sun.COM> | 2009-02-13 16:02:59 -0800 |
commit | 125be069be21bcb7b94bd4525c19eb424bc56276 (patch) | |
tree | 5c4beedb49f67f81c2aab6502d4ce373fa89f0ab /usr/src/uts/sfmmu | |
parent | 1b7fc709228029b3f29f1c3de6d817a476f7c583 (diff) | |
download | illumos-joyent-125be069be21bcb7b94bd4525c19eb424bc56276.tar.gz |
FWARC 2008/592 MMU_EXT API group name and number change
6729256 kernel routines such as bcopy, bzero need to be optimized for UltraSPARC-AT10.
6757032 Use TLB search order register API
6771318 Some packets get dropped from the performance counter ringbuffer
6781121 CPC: Rock PCBE does not correctly handle overflow profiling. 64-bit extension is incorrect.
6791429 SUNW,UltraSPARC-AT10 should not be using ASI_NQUAD_LD since it is redefined as ASI_STICK_CMPR
Diffstat (limited to 'usr/src/uts/sfmmu')
-rw-r--r-- | usr/src/uts/sfmmu/ml/sfmmu_asm.s | 95 | ||||
-rw-r--r-- | usr/src/uts/sfmmu/vm/hat_sfmmu.c | 104 | ||||
-rw-r--r-- | usr/src/uts/sfmmu/vm/hat_sfmmu.h | 33 |
3 files changed, 204 insertions, 28 deletions
diff --git a/usr/src/uts/sfmmu/ml/sfmmu_asm.s b/usr/src/uts/sfmmu/ml/sfmmu_asm.s index 6e5e8848b3..6ecd81de3a 100644 --- a/usr/src/uts/sfmmu/ml/sfmmu_asm.s +++ b/usr/src/uts/sfmmu/ml/sfmmu_asm.s @@ -563,6 +563,10 @@ sfmmu_panic9: .global sfmmu_panic10 sfmmu_panic10: .asciz "sfmmu_asm: valid SCD with no 3rd scd TSB" + + .global sfmmu_panic11 +sfmmu_panic11: + .asciz "sfmmu_asm: ktsb_phys must not be 0 on a sun4v platform" ENTRY(sfmmu_disable_intrs) rdpr %pstate, %o0 @@ -942,6 +946,11 @@ sfmmu_patch_shctx(void) { } +void +sfmmu_patch_pgsz_reg(void) +{ +} + /* ARGSUSED */ void sfmmu_load_tsbe(struct tsbe *tsbep, uint64_t vaddr, tte_t *ttep, int phys) @@ -1134,8 +1143,31 @@ sfmmu_kpm_unload_tsb(caddr_t addr, int vpshift) */ ENTRY_NP(sfmmu_patch_mmu_asi) mov %o7, %o4 ! save return pc in %o4 - movrnz %o0, ASI_QUAD_LDD_PHYS, %o3 + mov ASI_QUAD_LDD_PHYS, %o3 ! set QUAD_LDD_PHYS by default + +#ifdef sun4v + + /* + * Check ktsb_phys. It must be non-zero for sun4v, panic if not. + */ + + brnz,pt %o0, do_patch + nop + + sethi %hi(sfmmu_panic11), %o0 + call panic + or %o0, %lo(sfmmu_panic11), %o0 +do_patch: + +#else /* sun4v */ + /* + * Some non-sun4v platforms deploy virtual ktsb (ktsb_phys==0). + * Note that ASI_NQUAD_LD is not defined/used for sun4v + */ movrz %o0, ASI_NQUAD_LD, %o3 + +#endif /* sun4v */ + sll %o3, 5, %o1 ! imm_asi offset mov 6, %o3 ! number of instructions sethi %hi(dktsb), %o0 ! to search @@ -1158,6 +1190,7 @@ sfmmu_kpm_unload_tsb(caddr_t addr, int vpshift) nop SET_SIZE(sfmmu_patch_mmu_asi) + ENTRY_NP(sfmmu_patch_ktsb) /* * We need to fix iktsb, dktsb, et. al. @@ -1408,6 +1441,19 @@ sfmmu_kpm_unload_tsb(caddr_t addr, int vpshift) #endif /* sun4u */ SET_SIZE(sfmmu_patch_shctx) + ENTRY_NP(sfmmu_patch_pgsz_reg) +#ifdef sun4u + retl + nop +#else /* sun4u */ + set sfmmu_pgsz_load_mmustate_patch, %o0 + MAKE_NOP_INSTR(%o1) + st %o1, [%o0] + retl + flush %o0 +#endif /* sun4u */ + SET_SIZE(sfmmu_patch_pgsz_reg) + /* * Routine that loads an entry into a tsb using virtual addresses. * Locking is required since all cpus can use the same TSB. @@ -2362,6 +2408,13 @@ label/**/4: ;\ ba,a,pt %xcc, label/**/8 ;\ label/**/6: ;\ GET_SCDSHMERMAP(tsbarea, hmeblkpa, hatid, hmemisc) ;\ + /* ;\ + * hmemisc is set to 1 if this is a shared mapping. It will ;\ + * be cleared by CHECK_SHARED_PGSZ if this pagesize is not ;\ + * allowed, in order to limit the number of entries in the ;\ + * pagesize register. ;\ + */ ;\ + CHECK_SHARED_PGSZ(tsbarea, tte, hatid, hmemisc, label/**/9) ;\ ldn [tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)], hatid ;\ label/**/7: ;\ set TTE_SUSPEND, hatid ;\ @@ -3242,8 +3295,37 @@ tsb_shme_checktte: stub %g1, [%g6 + TSBMISS_URTTEFLAGS] SAVE_CTX1(%g7, %g2, %g1, tsb_shmel) + ba tsb_validtte #endif /* sun4u && !UTSB_PHYS */ +tsb_ism_validtte: +#ifdef sun4v + /* + * Check pagesize against bitmap for Rock page size register, + * for ism mappings. + * + * %g1, %g2 = scratch + * %g3 = tte + * g4 = tte pa + * g5 = tte va + * g6 = tsbmiss area + * %g7 = tt + */ + ldub [%g6 + TSBMISS_URTTEFLAGS], %g1 + and %g1, HAT_CHKCTX1_FLAG, %g2 + /* + * Clear the HAT_CHKCTX1_FLAG in %g2 if this shared pagesize is not allowed + * to limit the number of entries in the pagesize search register. + */ + CHECK_SHARED_PGSZ(%g6, %g3, %g7, %g2, ism_chk_pgsz) + andn %g1, HAT_CHKCTX1_FLAG, %g1 + or %g1, %g2, %g1 + stub %g1, [%g6 + TSBMISS_URTTEFLAGS] + brz %g2, tsb_validtte + rdpr %tt, %g7 + SAVE_CTX1(%g7, %g1, %g2, tsb_shctxl) +#endif /* sun4v */ + tsb_validtte: /* * g3 = tte @@ -3576,10 +3658,7 @@ tsb_ism: ldub [%g6 + TSBMISS_URTTEFLAGS], %g5 or %g5, HAT_CHKCTX1_FLAG, %g5 stub %g5, [%g6 + TSBMISS_URTTEFLAGS] - rdpr %tt, %g5 - SAVE_CTX1(%g5, %g3, %g1, tsb_shctxl) #endif /* defined(sun4v) || defined(UTSB_PHYS) */ - /* * ISM pages are always locked down. * If we can't find the tte then pagefault @@ -3611,7 +3690,7 @@ tsb_ism_32M: /* NOT REACHED */ tsb_ism_32M_found: - brlz,a,pt %g3, tsb_validtte + brlz,a,pt %g3, tsb_ism_validtte rdpr %tt, %g7 ba,pt %xcc, tsb_ism_4M nop @@ -3629,7 +3708,7 @@ tsb_ism_256M: tsb_ism_4M) tsb_ism_256M_found: - brlz,a,pt %g3, tsb_validtte + brlz,a,pt %g3, tsb_ism_validtte rdpr %tt, %g7 tsb_ism_4M: @@ -3642,7 +3721,7 @@ tsb_ism_4M: /* NOT REACHED */ tsb_ism_4M_found: - brlz,a,pt %g3, tsb_validtte + brlz,a,pt %g3, tsb_ism_validtte rdpr %tt, %g7 tsb_ism_8K: @@ -3656,7 +3735,7 @@ tsb_ism_8K: /* NOT REACHED */ tsb_ism_8K_found: - brlz,a,pt %g3, tsb_validtte + brlz,a,pt %g3, tsb_ism_validtte rdpr %tt, %g7 tsb_pagefault: diff --git a/usr/src/uts/sfmmu/vm/hat_sfmmu.c b/usr/src/uts/sfmmu/vm/hat_sfmmu.c index 6c9d340704..0398a38276 100644 --- a/usr/src/uts/sfmmu/vm/hat_sfmmu.c +++ b/usr/src/uts/sfmmu/vm/hat_sfmmu.c @@ -227,6 +227,7 @@ uint_t disable_large_pages = 0; uint_t disable_ism_large_pages = (1 << TTE512K); uint_t disable_auto_data_large_pages = 0; uint_t disable_auto_text_large_pages = 0; +uint_t disable_shctx_large_pages = 0; /* * Private sfmmu data structures for hat management @@ -293,6 +294,14 @@ int disable_shctx = 0; /* Internal variable, set by MD if the HW supports shctx feature */ int shctx_on = 0; +/* Internal variable, set by MD if the HW supports the search order register */ +int pgsz_search_on = 0; +/* + * External /etc/system tunable, for controlling search order register + * support. + */ +int disable_pgsz_search = 0; + #ifdef DEBUG static void check_scd_sfmmu_list(sfmmu_t **, sfmmu_t *, int); #endif @@ -472,7 +481,6 @@ static void sfmmu_ismtlbcache_demap(caddr_t, sfmmu_t *, struct hme_blk *, pfn_t, int); static void sfmmu_tlb_demap(caddr_t, sfmmu_t *, struct hme_blk *, int, int); static void sfmmu_tlb_range_demap(demap_range_t *); -static void sfmmu_invalidate_ctx(sfmmu_t *); static void sfmmu_sync_mmustate(sfmmu_t *); static void sfmmu_tsbinfo_setup_phys(struct tsb_info *, pfn_t); @@ -733,11 +741,7 @@ int sfmmu_page_spl_held(struct page *); static void sfmmu_mlist_reloc_enter(page_t *, page_t *, kmutex_t **, kmutex_t **); static void sfmmu_mlist_reloc_exit(kmutex_t *, kmutex_t *); -static hatlock_t * - sfmmu_hat_enter(sfmmu_t *); -static hatlock_t * - sfmmu_hat_tryenter(sfmmu_t *); -static void sfmmu_hat_exit(hatlock_t *); +static hatlock_t *sfmmu_hat_tryenter(sfmmu_t *); static void sfmmu_hat_lock_all(void); static void sfmmu_hat_unlock_all(void); static void sfmmu_ismhat_enter(sfmmu_t *, int); @@ -1061,12 +1065,14 @@ hat_init_pagesizes() disable_ism_large_pages |= disable_large_pages; disable_auto_data_large_pages = disable_large_pages; disable_auto_text_large_pages = disable_large_pages; + disable_shctx_large_pages |= disable_large_pages; /* * Initialize mmu-specific large page sizes. */ if (&mmu_large_pages_disabled) { disable_large_pages |= mmu_large_pages_disabled(HAT_LOAD); + disable_shctx_large_pages |= disable_large_pages; disable_ism_large_pages |= mmu_large_pages_disabled(HAT_LOAD_SHARE); disable_auto_data_large_pages |= @@ -1405,6 +1411,14 @@ hat_init(void) shctx_on = 0; } + /* + * If support for page size search is disabled via /etc/system + * set pgsz_search_on to 0 here. + */ + if (pgsz_search_on && disable_pgsz_search) { + pgsz_search_on = 0; + } + if (shctx_on) { srd_buckets = kmem_zalloc(SFMMU_MAX_SRD_BUCKETS * sizeof (srd_buckets[0]), KM_SLEEP); @@ -1579,6 +1593,11 @@ hat_alloc(struct as *as) sfmmup->sfmmu_scdp = NULL; sfmmup->sfmmu_scd_link.next = NULL; sfmmup->sfmmu_scd_link.prev = NULL; + + if (&mmu_set_pgsz_order && sfmmup != ksfmmup) { + mmu_set_pgsz_order(sfmmup, 0); + sfmmu_init_pgsz_hv(sfmmup); + } return (sfmmup); } @@ -2061,6 +2080,8 @@ hat_dup(struct hat *hat, struct hat *newhat, caddr_t addr, size_t len, newhat->sfmmu_scdismttecnt[i] = hat->sfmmu_scdismttecnt[i]; } + } else if (&mmu_set_pgsz_order) { + mmu_set_pgsz_order(newhat, 0); } sfmmu_check_page_sizes(newhat, 1); @@ -3204,11 +3225,17 @@ sfmmu_tteload_addentry(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, tte_t *ttep, if (!(sfmmup->sfmmu_tteflags & tteflag)) { hatlockp = sfmmu_hat_enter(sfmmup); sfmmup->sfmmu_tteflags |= tteflag; + if (&mmu_set_pgsz_order) { + mmu_set_pgsz_order(sfmmup, 1); + } sfmmu_hat_exit(hatlockp); } } else if (!(sfmmup->sfmmu_rtteflags & tteflag)) { hatlockp = sfmmu_hat_enter(sfmmup); sfmmup->sfmmu_rtteflags |= tteflag; + if (&mmu_set_pgsz_order && sfmmup != ksfmmup) { + mmu_set_pgsz_order(sfmmup, 1); + } sfmmu_hat_exit(hatlockp); } /* @@ -8435,6 +8462,8 @@ ism_tsb_entries(sfmmu_t *sfmmup, int szc) int j; sf_scd_t *scdp; uchar_t rid; + hatlock_t *hatlockp; + int ismnotinscd = 0; ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)); scdp = sfmmup->sfmmu_scdp; @@ -8455,9 +8484,21 @@ ism_tsb_entries(sfmmu_t *sfmmup, int szc) /* ISMs is not in SCD */ npgs += ism_map[j].imap_ismhat->sfmmu_ttecnt[szc]; + ismnotinscd = 1; } } } + + if (&mmu_set_pgsz_order) { + hatlockp = sfmmu_hat_enter(sfmmup); + if (ismnotinscd) { + SFMMU_FLAGS_SET(sfmmup, HAT_ISMNOTINSCD); + } else { + SFMMU_FLAGS_CLEAR(sfmmup, HAT_ISMNOTINSCD); + } + sfmmu_hat_exit(hatlockp); + } + sfmmup->sfmmu_ismttecnt[szc] = npgs; sfmmup->sfmmu_scdismttecnt[szc] = npgs_scd; return (npgs); @@ -8791,6 +8832,11 @@ hat_share(struct hat *sfmmup, caddr_t addr, sfmmu_hat_exit(hatlockp); } + if (&mmu_set_pgsz_order) { + hatlockp = sfmmu_hat_enter(sfmmup); + mmu_set_pgsz_order(sfmmup, 1); + sfmmu_hat_exit(hatlockp); + } sfmmu_ismhat_exit(sfmmup, 0); /* @@ -8986,6 +9032,11 @@ hat_unshare(struct hat *sfmmup, caddr_t addr, size_t len, uint_t ismszc) (void) ism_tsb_entries(sfmmup, i); } + if (&mmu_set_pgsz_order) { + hatlockp = sfmmu_hat_enter(sfmmup); + mmu_set_pgsz_order(sfmmup, 1); + sfmmu_hat_exit(hatlockp); + } sfmmu_ismhat_exit(sfmmup, 0); /* @@ -10958,7 +11009,7 @@ sfmmu_mlist_reloc_exit(kmutex_t *low, kmutex_t *high) mutex_exit(low); } -static hatlock_t * +hatlock_t * sfmmu_hat_enter(sfmmu_t *sfmmup) { hatlock_t *hatlockp; @@ -10985,7 +11036,7 @@ sfmmu_hat_tryenter(sfmmu_t *sfmmup) return (NULL); } -static void +void sfmmu_hat_exit(hatlock_t *hatlockp) { if (hatlockp != NULL) @@ -12128,8 +12179,13 @@ sfmmu_rgntlb_demap(caddr_t addr, sf_region_t *rgnp, * then we flush the shared TSBs, if we find a private hat, * which is part of an SCD, but where the region * is not part of the SCD then we flush the private TSBs. + * + * If the Rock page size register is present, then SCDs + * may contain both shared and private pages, so we cannot + * use this optimization to avoid flushing private TSBs. */ - if (!sfmmup->sfmmu_scdhat && sfmmup->sfmmu_scdp != NULL && + if (pgsz_search_on == 0 && + !sfmmup->sfmmu_scdhat && sfmmup->sfmmu_scdp != NULL && !SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD)) { scdp = sfmmup->sfmmu_scdp; if (SF_RGNMAP_TEST(scdp->scd_hmeregion_map, rid)) { @@ -12258,8 +12314,13 @@ sfmmu_ismtlbcache_demap(caddr_t addr, sfmmu_t *ism_sfmmup, * which is part of an SCD, but where the region * corresponding to this va is not part of the SCD then we * flush the private TSBs. + * + * If the Rock page size register is present, then SCDs + * may contain both shared and private pages, so we cannot + * use this optimization to avoid flushing private TSBs. */ - if (!sfmmup->sfmmu_scdhat && sfmmup->sfmmu_scdp != NULL && + if (pgsz_search_on == 0 && + !sfmmup->sfmmu_scdhat && sfmmup->sfmmu_scdp != NULL && !SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD) && !SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)) { if (!find_ism_rid(sfmmup, ism_sfmmup, va, @@ -12569,7 +12630,7 @@ sfmmu_tlb_range_demap(demap_range_t *dmrp) * A per-process (PP) lock is used to synchronize ctx allocations in * resume() and ctx invalidations here. */ -static void +void sfmmu_invalidate_ctx(sfmmu_t *sfmmup) { cpuset_t cpuset; @@ -14095,6 +14156,9 @@ rfound: if (tteflag && !(sfmmup->sfmmu_rtteflags & tteflag)) { hatlockp = sfmmu_hat_enter(sfmmup); sfmmup->sfmmu_rtteflags |= tteflag; + if (&mmu_set_pgsz_order) { + mmu_set_pgsz_order(sfmmup, 1); + } sfmmu_hat_exit(hatlockp); } hatlockp = sfmmu_hat_enter(sfmmup); @@ -15150,6 +15214,9 @@ sfmmu_join_scd(sf_scd_t *scdp, sfmmu_t *sfmmup) ASSERT(sfmmup->sfmmu_ttecnt[i] >= scdp->scd_rttecnt[i]); atomic_add_long(&sfmmup->sfmmu_ttecnt[i], -sfmmup->sfmmu_scdrttecnt[i]); + if (!sfmmup->sfmmu_ttecnt[i]) { + sfmmup->sfmmu_tteflags &= ~(1 << i); + } } /* update tsb0 inflation count */ if (old_scdp != NULL) { @@ -15160,6 +15227,9 @@ sfmmu_join_scd(sf_scd_t *scdp, sfmmu_t *sfmmup) scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt); sfmmup->sfmmu_tsb0_4minflcnt -= scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt; + if (&mmu_set_pgsz_order) { + mmu_set_pgsz_order(sfmmup, 0); + } sfmmu_hat_exit(hatlockp); if (old_scdp != NULL) { @@ -15219,7 +15289,7 @@ sfmmu_find_scd(sfmmu_t *sfmmup) for (scdp = srdp->srd_scdp; scdp != NULL; scdp = scdp->scd_next) { SF_RGNMAP_EQUAL(&scdp->scd_region_map, - &sfmmup->sfmmu_region_map, ret); + &sfmmup->sfmmu_region_map, SFMMU_RGNMAP_WORDS, ret); if (ret == 1) { SF_SCD_INCR_REF(scdp); mutex_exit(&srdp->srd_scd_mutex); @@ -15367,6 +15437,10 @@ sfmmu_leave_scd(sfmmu_t *sfmmup, uchar_t r_type) scdp->scd_rttecnt[i]); atomic_add_long(&sfmmup->sfmmu_ttecnt[i], sfmmup->sfmmu_scdrttecnt[i]); + if (sfmmup->sfmmu_ttecnt[i] && + (sfmmup->sfmmu_tteflags & (1 << i)) == 0) { + sfmmup->sfmmu_tteflags |= (1 << i); + } sfmmup->sfmmu_scdrttecnt[i] = 0; /* update ismttecnt to include SCD ism before hat leaves SCD */ sfmmup->sfmmu_ismttecnt[i] += sfmmup->sfmmu_scdismttecnt[i]; @@ -15380,6 +15454,9 @@ sfmmu_leave_scd(sfmmu_t *sfmmup, uchar_t r_type) } sfmmup->sfmmu_scdp = NULL; + if (&mmu_set_pgsz_order) { + mmu_set_pgsz_order(sfmmup, 0); + } sfmmu_hat_exit(hatlockp); /* @@ -15425,7 +15502,8 @@ sfmmu_destroy_scd(sf_srd_t *srdp, sf_scd_t *scdp, sf_region_map_t *scd_rmap) * It is possible that the scd has been freed and reallocated with a * different region map while we've been waiting for the srd_scd_mutex. */ - SF_RGNMAP_EQUAL(scd_rmap, &sp->scd_region_map, ret); + SF_RGNMAP_EQUAL(scd_rmap, &sp->scd_region_map, + SFMMU_RGNMAP_WORDS, ret); if (ret != 1) { mutex_exit(&srdp->srd_scd_mutex); return; diff --git a/usr/src/uts/sfmmu/vm/hat_sfmmu.h b/usr/src/uts/sfmmu/vm/hat_sfmmu.h index 0a3ddfc2ec..5e56264869 100644 --- a/usr/src/uts/sfmmu/vm/hat_sfmmu.h +++ b/usr/src/uts/sfmmu/vm/hat_sfmmu.h @@ -337,15 +337,15 @@ typedef union sf_region_map_u { } /* - * Returns 1 if map1 and map2 are equal. + * Returns 1 if region map1 and map2 are equal. */ -#define SF_RGNMAP_EQUAL(map1, map2, rval) { \ +#define SF_RGNMAP_EQUAL(map1, map2, words, rval) { \ int _i; \ - for (_i = 0; _i < SFMMU_RGNMAP_WORDS; _i++) { \ + for (_i = 0; _i < words; _i++) { \ if ((map1)->bitmap[_i] != (map2)->bitmap[_i]) \ break; \ } \ - if (_i < SFMMU_RGNMAP_WORDS) \ + if (_i < words) \ rval = 0; \ else \ rval = 1; \ @@ -613,6 +613,9 @@ extern uint_t nctxs; extern void sfmmu_cpu_init(cpu_t *); extern void sfmmu_cpu_cleanup(cpu_t *); +extern void sfmmu_invalidate_ctx(sfmmu_t *); +extern hatlock_t *sfmmu_hat_enter(sfmmu_t *); +extern void sfmmu_hat_exit(hatlock_t *); /* * The following structure is used to get MMU context domain information for @@ -649,7 +652,6 @@ typedef struct sfmmu_ctx { uint64_t cnum:16; } sfmmu_ctx_t; - /* * The platform dependent hat structure. * tte counts should be protected by cas. @@ -711,7 +713,11 @@ struct hat { sf_rgn_link_t *sfmmu_hmeregion_links[SFMMU_L1_HMERLINKS]; sf_rgn_link_t sfmmu_scd_link; /* link to scd or pending queue */ #ifdef sun4v + /* ttecnt for Rock pagesize register management */ + ulong_t sfmmu_mmuttecnt[MMU_PAGE_SIZES]; struct hv_tsb_block sfmmu_hvblock; + struct hv_pgsz_order sfmmu_pgsz_order; /* pagesize search order */ + uint8_t sfmmu_pgsz_map; /* bit map to control shared pgsz use */ #endif /* * sfmmu_ctxs is a variable length array of max_mmu_ctxdoms # of @@ -757,6 +763,8 @@ struct sf_scd { extern int disable_shctx; extern int shctx_on; +extern int pgsz_search_on; +extern int disable_pgsz_search; /* * bit mask for managing vac conflicts on large pages. @@ -870,6 +878,7 @@ struct ctx_trace { #define HAT_CTX1_FLAG 0x100 /* ISM imap hatflag for ctx1 */ #define HAT_JOIN_SCD 0x200 /* region is joining scd */ #define HAT_ALLCTX_INVALID 0x400 /* all per-MMU ctxs are invalidated */ +#define HAT_ISMNOTINSCD 0x800 /* Not all ISM segs are in the SCD */ #define SFMMU_LGPGS_INUSE(sfmmup) \ (((sfmmup)->sfmmu_tteflags | (sfmmup)->sfmmu_rtteflags) || \ @@ -1813,7 +1822,8 @@ struct tsbmiss { uintptr_t scratch[3]; ulong_t shmermap[SFMMU_HMERGNMAP_WORDS]; /* 8 bytes */ ulong_t scd_shmermap[SFMMU_HMERGNMAP_WORDS]; /* 8 bytes */ - uint8_t pad[48]; /* pad to 64 bytes */ + uint8_t pgsz_bitmap; /* limits ctx1 page sizes */ + uint8_t pad[47]; /* pad to 64 bytes */ }; /* @@ -2344,11 +2354,17 @@ extern struct hme_blk *sfmmu_hmetohblk(struct sf_hment *); #pragma weak mmu_large_pages_disabled #pragma weak mmu_set_ctx_page_sizes #pragma weak mmu_check_page_sizes +#pragma weak mmu_set_pgsz_order +#pragma weak sfmmu_init_pgsz_hv +#pragma weak mmu_enable_pgsz_search extern void mmu_init_scd(sf_scd_t *); extern uint_t mmu_large_pages_disabled(uint_t); extern void mmu_set_ctx_page_sizes(sfmmu_t *); extern void mmu_check_page_sizes(sfmmu_t *, uint64_t *); +extern void mmu_set_pgsz_order(sfmmu_t *, int); +extern void sfmmu_init_pgsz_hv(sfmmu_t *); +extern void mmu_enable_pgsz_search(); extern sfmmu_t *ksfmmup; extern caddr_t ktsb_base; @@ -2390,12 +2406,15 @@ extern uint_t disable_large_pages; extern uint_t disable_ism_large_pages; extern uint_t disable_auto_data_large_pages; extern uint_t disable_auto_text_large_pages; +extern uint_t disable_shctx_large_pages; + +extern void sfmmu_patch_shctx(void); +extern void sfmmu_patch_pgsz_reg(void); /* kpm externals */ extern pfn_t sfmmu_kpm_vatopfn(caddr_t); extern void sfmmu_kpm_patch_tlbm(void); extern void sfmmu_kpm_patch_tsbm(void); -extern void sfmmu_patch_shctx(void); extern void sfmmu_kpm_load_tsb(caddr_t, tte_t *, int); extern void sfmmu_kpm_unload_tsb(caddr_t, int); extern void sfmmu_kpm_tsbmtl(short *, uint_t *, int); |