summaryrefslogtreecommitdiff
path: root/usr/src/uts/sfmmu
diff options
context:
space:
mode:
authorJason Beloro <Jason.Beloro@Sun.COM>2009-08-06 17:39:39 -0700
committerJason Beloro <Jason.Beloro@Sun.COM>2009-08-06 17:39:39 -0700
commit9d0d62ad2e60e8f742a2e723d06e88352ee6a1f3 (patch)
tree016e2a6b2f674016c46785258d0ff85e6b1bce09 /usr/src/uts/sfmmu
parent32a6953793c636df949ca1ae3555438159bda3f6 (diff)
downloadillumos-joyent-9d0d62ad2e60e8f742a2e723d06e88352ee6a1f3.tar.gz
6858457 Remove Solaris support for UltraSPARC-AT10 processor
Diffstat (limited to 'usr/src/uts/sfmmu')
-rw-r--r--usr/src/uts/sfmmu/ml/sfmmu_asm.s80
-rw-r--r--usr/src/uts/sfmmu/ml/sfmmu_kdi.s18
-rw-r--r--usr/src/uts/sfmmu/vm/hat_sfmmu.c251
-rw-r--r--usr/src/uts/sfmmu/vm/hat_sfmmu.h48
4 files changed, 64 insertions, 333 deletions
diff --git a/usr/src/uts/sfmmu/ml/sfmmu_asm.s b/usr/src/uts/sfmmu/ml/sfmmu_asm.s
index 6ecd81de3a..78bc5d21b7 100644
--- a/usr/src/uts/sfmmu/ml/sfmmu_asm.s
+++ b/usr/src/uts/sfmmu/ml/sfmmu_asm.s
@@ -248,7 +248,6 @@
*/ ;\
sllx tagtarget, TTARGET_VA_SHIFT, tagtarget ;\
ldxa [ttepa]ASI_MEM, tte ;\
- TTE_CLR_SOFTEXEC_ML(tte) ;\
srlx tagtarget, TTARGET_VA_SHIFT, tagtarget ;\
sethi %hi(TSBTAG_INVALID), tmp2 ;\
add tsbep, TSBE_TAG, tmp1 ;\
@@ -371,7 +370,6 @@ label: ;\
#define TSB_UPDATE(tsbep, tteva, tagtarget, tmp1, tmp2, label) \
/* can't rd tteva after locking tsb because it can tlb miss */ ;\
ldx [tteva], tteva /* load tte */ ;\
- TTE_CLR_SOFTEXEC_ML(tteva) ;\
TSB_LOCK_ENTRY(tsbep, tmp1, tmp2, label) ;\
sethi %hi(TSBTAG_INVALID), tmp2 ;\
add tsbep, TSBE_TAG, tmp1 ;\
@@ -946,11 +944,6 @@ sfmmu_patch_shctx(void)
{
}
-void
-sfmmu_patch_pgsz_reg(void)
-{
-}
-
/* ARGSUSED */
void
sfmmu_load_tsbe(struct tsbe *tsbep, uint64_t vaddr, tte_t *ttep, int phys)
@@ -1441,19 +1434,6 @@ do_patch:
#endif /* sun4u */
SET_SIZE(sfmmu_patch_shctx)
- ENTRY_NP(sfmmu_patch_pgsz_reg)
-#ifdef sun4u
- retl
- nop
-#else /* sun4u */
- set sfmmu_pgsz_load_mmustate_patch, %o0
- MAKE_NOP_INSTR(%o1)
- st %o1, [%o0]
- retl
- flush %o0
-#endif /* sun4u */
- SET_SIZE(sfmmu_patch_pgsz_reg)
-
/*
* Routine that loads an entry into a tsb using virtual addresses.
* Locking is required since all cpus can use the same TSB.
@@ -2408,13 +2388,6 @@ label/**/4: ;\
ba,a,pt %xcc, label/**/8 ;\
label/**/6: ;\
GET_SCDSHMERMAP(tsbarea, hmeblkpa, hatid, hmemisc) ;\
- /* ;\
- * hmemisc is set to 1 if this is a shared mapping. It will ;\
- * be cleared by CHECK_SHARED_PGSZ if this pagesize is not ;\
- * allowed, in order to limit the number of entries in the ;\
- * pagesize register. ;\
- */ ;\
- CHECK_SHARED_PGSZ(tsbarea, tte, hatid, hmemisc, label/**/9) ;\
ldn [tsbarea + (TSBMISS_SCRATCH + TSBMISS_HMEBP)], hatid ;\
label/**/7: ;\
set TTE_SUSPEND, hatid ;\
@@ -3295,37 +3268,8 @@ tsb_shme_checktte:
stub %g1, [%g6 + TSBMISS_URTTEFLAGS]
SAVE_CTX1(%g7, %g2, %g1, tsb_shmel)
- ba tsb_validtte
#endif /* sun4u && !UTSB_PHYS */
-tsb_ism_validtte:
-#ifdef sun4v
- /*
- * Check pagesize against bitmap for Rock page size register,
- * for ism mappings.
- *
- * %g1, %g2 = scratch
- * %g3 = tte
- * g4 = tte pa
- * g5 = tte va
- * g6 = tsbmiss area
- * %g7 = tt
- */
- ldub [%g6 + TSBMISS_URTTEFLAGS], %g1
- and %g1, HAT_CHKCTX1_FLAG, %g2
- /*
- * Clear the HAT_CHKCTX1_FLAG in %g2 if this shared pagesize is not allowed
- * to limit the number of entries in the pagesize search register.
- */
- CHECK_SHARED_PGSZ(%g6, %g3, %g7, %g2, ism_chk_pgsz)
- andn %g1, HAT_CHKCTX1_FLAG, %g1
- or %g1, %g2, %g1
- stub %g1, [%g6 + TSBMISS_URTTEFLAGS]
- brz %g2, tsb_validtte
- rdpr %tt, %g7
- SAVE_CTX1(%g7, %g1, %g2, tsb_shctxl)
-#endif /* sun4v */
-
tsb_validtte:
/*
* g3 = tte
@@ -3355,11 +3299,9 @@ tsb_validtte:
ba,pt %xcc, tsb_update_tl1
nop
4:
- /*
- * ITLB translation was found but execute permission is
- * disabled. If we have software execute permission (soft exec
- * bit is set), then enable hardware execute permission.
- * Otherwise continue with a protection violation.
+ /*
+ * If ITLB miss check exec bit.
+ * If not set treat as invalid TTE.
*/
cmp %g7, T_INSTR_MMU_MISS
be,pn %icc, 5f
@@ -3368,11 +3310,9 @@ tsb_validtte:
bne,pt %icc, 3f
andcc %g3, TTE_EXECPRM_INT, %g0 /* check execute bit is set */
5:
- bnz,pn %icc, 3f
- TTE_CHK_SOFTEXEC_ML(%g3) /* check soft execute */
bz,pn %icc, tsb_protfault
nop
- TTE_SET_EXEC_ML(%g3, %g4, %g7, tsb_lset_exec)
+
3:
/*
* Set reference bit if not already set
@@ -3415,7 +3355,6 @@ tsb_validtte:
#endif /* sun4v */
tsb_update_tl1:
- TTE_CLR_SOFTEXEC_ML(%g3)
srlx %g2, TTARGET_CTX_SHIFT, %g7
brz,pn %g7, tsb_kernel
#ifdef sun4v
@@ -3658,7 +3597,10 @@ tsb_ism:
ldub [%g6 + TSBMISS_URTTEFLAGS], %g5
or %g5, HAT_CHKCTX1_FLAG, %g5
stub %g5, [%g6 + TSBMISS_URTTEFLAGS]
+ rdpr %tt, %g5
+ SAVE_CTX1(%g5, %g3, %g1, tsb_shctxl)
#endif /* defined(sun4v) || defined(UTSB_PHYS) */
+
/*
* ISM pages are always locked down.
* If we can't find the tte then pagefault
@@ -3690,7 +3632,7 @@ tsb_ism_32M:
/* NOT REACHED */
tsb_ism_32M_found:
- brlz,a,pt %g3, tsb_ism_validtte
+ brlz,a,pt %g3, tsb_validtte
rdpr %tt, %g7
ba,pt %xcc, tsb_ism_4M
nop
@@ -3708,7 +3650,7 @@ tsb_ism_256M:
tsb_ism_4M)
tsb_ism_256M_found:
- brlz,a,pt %g3, tsb_ism_validtte
+ brlz,a,pt %g3, tsb_validtte
rdpr %tt, %g7
tsb_ism_4M:
@@ -3721,7 +3663,7 @@ tsb_ism_4M:
/* NOT REACHED */
tsb_ism_4M_found:
- brlz,a,pt %g3, tsb_ism_validtte
+ brlz,a,pt %g3, tsb_validtte
rdpr %tt, %g7
tsb_ism_8K:
@@ -3735,7 +3677,7 @@ tsb_ism_8K:
/* NOT REACHED */
tsb_ism_8K_found:
- brlz,a,pt %g3, tsb_ism_validtte
+ brlz,a,pt %g3, tsb_validtte
rdpr %tt, %g7
tsb_pagefault:
diff --git a/usr/src/uts/sfmmu/ml/sfmmu_kdi.s b/usr/src/uts/sfmmu/ml/sfmmu_kdi.s
index 615066961a..4e60c2e38a 100644
--- a/usr/src/uts/sfmmu/ml/sfmmu_kdi.s
+++ b/usr/src/uts/sfmmu/ml/sfmmu_kdi.s
@@ -20,7 +20,7 @@
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -328,21 +328,7 @@ kdi_trap_vatotte(void)
ldxa [%g2]ASI_MEM, %g1
brgez,a %g1, 4f
clr %g1
-4:
- /*
- * If soft execute bit is set, make sure HW execute permission
- * is also set. But, clear soft execute bit before giving tte to
- * the caller.
- */
- TTE_CHK_SOFTEXEC_ML(%g1)
- bz,pt %icc, 6f
- andcc %g1, TTE_EXECPRM_INT, %g0
- bnz,pt %icc, 7f
- nop
- TTE_SET_EXEC_ML(%g1, %g2, %g4, kdi_trap_vatotte)
-7:
- TTE_CLR_SOFTEXEC_ML(%g1)
- ba,a 6f
+4: ba,a 6f
5: add %g3, 1, %g3
set mmu_hashcnt, %g4
diff --git a/usr/src/uts/sfmmu/vm/hat_sfmmu.c b/usr/src/uts/sfmmu/vm/hat_sfmmu.c
index a25f1d9964..6fa557f2d3 100644
--- a/usr/src/uts/sfmmu/vm/hat_sfmmu.c
+++ b/usr/src/uts/sfmmu/vm/hat_sfmmu.c
@@ -184,14 +184,6 @@ void hat_pagecachectl(struct page *, int);
#define HAT_TMPNC 0x4
/*
- * This flag is set to 0 via the MD in platforms that do not support
- * I-cache coherency in hardware. Used to enable "soft exec" mode.
- * The MD "coherency" property is optional, and defaults to 1 (because
- * coherent I-cache is the norm.)
- */
-uint_t icache_is_coherent = 1;
-
-/*
* Flag to allow the creation of non-cacheable translations
* to system memory. It is off by default. At the moment this
* flag is used by the ecache error injector. The error injector
@@ -227,7 +219,6 @@ uint_t disable_large_pages = 0;
uint_t disable_ism_large_pages = (1 << TTE512K);
uint_t disable_auto_data_large_pages = 0;
uint_t disable_auto_text_large_pages = 0;
-uint_t disable_shctx_large_pages = 0;
/*
* Private sfmmu data structures for hat management
@@ -294,14 +285,6 @@ int disable_shctx = 0;
/* Internal variable, set by MD if the HW supports shctx feature */
int shctx_on = 0;
-/* Internal variable, set by MD if the HW supports the search order register */
-int pgsz_search_on = 0;
-/*
- * External /etc/system tunable, for controlling search order register
- * support.
- */
-int disable_pgsz_search = 0;
-
#ifdef DEBUG
static void check_scd_sfmmu_list(sfmmu_t **, sfmmu_t *, int);
#endif
@@ -481,6 +464,7 @@ static void sfmmu_ismtlbcache_demap(caddr_t, sfmmu_t *, struct hme_blk *,
pfn_t, int);
static void sfmmu_tlb_demap(caddr_t, sfmmu_t *, struct hme_blk *, int, int);
static void sfmmu_tlb_range_demap(demap_range_t *);
+static void sfmmu_invalidate_ctx(sfmmu_t *);
static void sfmmu_sync_mmustate(sfmmu_t *);
static void sfmmu_tsbinfo_setup_phys(struct tsb_info *, pfn_t);
@@ -589,7 +573,7 @@ mmu_ctx_t **mmu_ctxs_tbl; /* global array of context domains */
uint64_t mmu_saved_gnum = 0; /* to init incoming MMUs' gnums */
#define DEFAULT_NUM_CTXS_PER_MMU 8192
-uint_t nctxs = DEFAULT_NUM_CTXS_PER_MMU;
+static uint_t nctxs = DEFAULT_NUM_CTXS_PER_MMU;
int cache; /* describes system cache */
@@ -743,7 +727,11 @@ int sfmmu_page_spl_held(struct page *);
static void sfmmu_mlist_reloc_enter(page_t *, page_t *,
kmutex_t **, kmutex_t **);
static void sfmmu_mlist_reloc_exit(kmutex_t *, kmutex_t *);
-static hatlock_t *sfmmu_hat_tryenter(sfmmu_t *);
+static hatlock_t *
+ sfmmu_hat_enter(sfmmu_t *);
+static hatlock_t *
+ sfmmu_hat_tryenter(sfmmu_t *);
+static void sfmmu_hat_exit(hatlock_t *);
static void sfmmu_hat_lock_all(void);
static void sfmmu_hat_unlock_all(void);
static void sfmmu_ismhat_enter(sfmmu_t *, int);
@@ -1067,14 +1055,12 @@ hat_init_pagesizes()
disable_ism_large_pages |= disable_large_pages;
disable_auto_data_large_pages = disable_large_pages;
disable_auto_text_large_pages = disable_large_pages;
- disable_shctx_large_pages |= disable_large_pages;
/*
* Initialize mmu-specific large page sizes.
*/
if (&mmu_large_pages_disabled) {
disable_large_pages |= mmu_large_pages_disabled(HAT_LOAD);
- disable_shctx_large_pages |= disable_large_pages;
disable_ism_large_pages |=
mmu_large_pages_disabled(HAT_LOAD_SHARE);
disable_auto_data_large_pages |=
@@ -1413,14 +1399,6 @@ hat_init(void)
shctx_on = 0;
}
- /*
- * If support for page size search is disabled via /etc/system
- * set pgsz_search_on to 0 here.
- */
- if (pgsz_search_on && disable_pgsz_search) {
- pgsz_search_on = 0;
- }
-
if (shctx_on) {
srd_buckets = kmem_zalloc(SFMMU_MAX_SRD_BUCKETS *
sizeof (srd_buckets[0]), KM_SLEEP);
@@ -1595,11 +1573,6 @@ hat_alloc(struct as *as)
sfmmup->sfmmu_scdp = NULL;
sfmmup->sfmmu_scd_link.next = NULL;
sfmmup->sfmmu_scd_link.prev = NULL;
-
- if (&mmu_set_pgsz_order && sfmmup != ksfmmup) {
- mmu_set_pgsz_order(sfmmup, 0);
- sfmmu_init_pgsz_hv(sfmmup);
- }
return (sfmmup);
}
@@ -2082,8 +2055,6 @@ hat_dup(struct hat *hat, struct hat *newhat, caddr_t addr, size_t len,
newhat->sfmmu_scdismttecnt[i] =
hat->sfmmu_scdismttecnt[i];
}
- } else if (&mmu_set_pgsz_order) {
- mmu_set_pgsz_order(newhat, 0);
}
sfmmu_check_page_sizes(newhat, 1);
@@ -2579,7 +2550,7 @@ sfmmu_memload_batchsmall(struct hat *hat, caddr_t vaddr, page_t **pps,
void
sfmmu_memtte(tte_t *ttep, pfn_t pfn, uint_t attr, int tte_sz)
{
- ASSERT((attr & ~(SFMMU_LOAD_ALLATTR | HAT_ATTR_NOSOFTEXEC)) == 0);
+ ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR));
ttep->tte_inthi = MAKE_TTE_INTHI(pfn, attr, tte_sz, 0 /* hmenum */);
ttep->tte_intlo = MAKE_TTE_INTLO(pfn, attr, tte_sz, 0 /* hmenum */);
@@ -2593,18 +2564,6 @@ sfmmu_memtte(tte_t *ttep, pfn_t pfn, uint_t attr, int tte_sz)
if (TTE_IS_NFO(ttep) && TTE_IS_EXECUTABLE(ttep)) {
panic("sfmmu_memtte: can't set both NFO and EXEC bits");
}
-
- /*
- * Disable hardware execute permission to force a fault if
- * this page is executed, so we can detect the execution. Set
- * the soft exec bit to remember that this TTE has execute
- * permission.
- */
- if (TTE_IS_EXECUTABLE(ttep) && (attr & HAT_ATTR_NOSOFTEXEC) == 0 &&
- icache_is_coherent == 0) {
- TTE_CLR_EXEC(ttep);
- TTE_SET_SOFTEXEC(ttep);
- }
}
/*
@@ -3095,26 +3054,9 @@ sfmmu_tteload_addentry(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, tte_t *ttep,
(void *)hmeblkp);
}
ASSERT(TTE_CSZ(&tteold) == TTE_CSZ(ttep));
-
- if (TTE_IS_EXECUTABLE(&tteold) && TTE_IS_SOFTEXEC(ttep)) {
- TTE_SET_EXEC(ttep);
- }
}
if (pp) {
- /*
- * If we know that this page will be executed, because
- * it was in the past (PP_ISEXEC is already true), or
- * if the caller says it will likely be executed
- * (HAT_LOAD_TEXT is true), then there is no need to
- * dynamically detect execution with a soft exec
- * fault. Enable hardware execute permission now.
- */
- if ((PP_ISEXEC(pp) || (flags & HAT_LOAD_TEXT)) &&
- TTE_IS_SOFTEXEC(ttep)) {
- TTE_SET_EXEC(ttep);
- }
-
if (size == TTE8K) {
#ifdef VAC
/*
@@ -3138,12 +3080,6 @@ sfmmu_tteload_addentry(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, tte_t *ttep,
sfmmu_page_exit(pmtx);
}
- if (TTE_EXECUTED(ttep)) {
- pmtx = sfmmu_page_enter(pp);
- PP_SETEXEC(pp);
- sfmmu_page_exit(pmtx);
- }
-
} else if (sfmmu_pagearray_setup(vaddr, pps, ttep, remap)) {
/*
* sfmmu_pagearray_setup failed so return
@@ -3151,9 +3087,6 @@ sfmmu_tteload_addentry(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, tte_t *ttep,
sfmmu_mlist_exit(pml);
return (1);
}
-
- } else if (TTE_IS_SOFTEXEC(ttep)) {
- TTE_SET_EXEC(ttep);
}
/*
@@ -3227,17 +3160,11 @@ sfmmu_tteload_addentry(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, tte_t *ttep,
if (!(sfmmup->sfmmu_tteflags & tteflag)) {
hatlockp = sfmmu_hat_enter(sfmmup);
sfmmup->sfmmu_tteflags |= tteflag;
- if (&mmu_set_pgsz_order) {
- mmu_set_pgsz_order(sfmmup, 1);
- }
sfmmu_hat_exit(hatlockp);
}
} else if (!(sfmmup->sfmmu_rtteflags & tteflag)) {
hatlockp = sfmmu_hat_enter(sfmmup);
sfmmup->sfmmu_rtteflags |= tteflag;
- if (&mmu_set_pgsz_order && sfmmup != ksfmmup) {
- mmu_set_pgsz_order(sfmmup, 1);
- }
sfmmu_hat_exit(hatlockp);
}
/*
@@ -3284,8 +3211,7 @@ sfmmu_tteload_addentry(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, tte_t *ttep,
* ref bit in tteload.
*/
ASSERT(TTE_IS_REF(ttep));
- if (TTE_IS_MOD(&tteold) || (TTE_EXECUTED(&tteold) &&
- !TTE_IS_EXECUTABLE(ttep))) {
+ if (TTE_IS_MOD(&tteold)) {
sfmmu_ttesync(sfmmup, vaddr, &tteold, pp);
}
/*
@@ -3416,12 +3342,6 @@ sfmmu_pagearray_setup(caddr_t addr, page_t **pps, tte_t *ttep, int remap)
sfmmu_page_exit(pmtx);
}
- if (TTE_EXECUTED(ttep)) {
- pmtx = sfmmu_page_enter(pp);
- PP_SETEXEC(pp);
- sfmmu_page_exit(pmtx);
- }
-
/*
* If this is a remap we skip vac & contiguity checks.
*/
@@ -5052,11 +4972,9 @@ sfmmu_hblk_chgattr(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr,
continue;
}
- if ((tteflags.tte_intlo & TTE_HWWR_INT) ||
- (TTE_EXECUTED(&tte) &&
- !TTE_IS_EXECUTABLE(&ttemod))) {
+ if (tteflags.tte_intlo & TTE_HWWR_INT) {
/*
- * need to sync if clearing modify/exec bit.
+ * need to sync if we are clearing modify bit.
*/
sfmmu_ttesync(sfmmup, addr, &tte, pp);
}
@@ -5109,14 +5027,6 @@ sfmmu_vtop_attr(uint_t attr, int mode, tte_t *ttemaskp)
ttevalue.tte_intlo = MAKE_TTEATTR_INTLO(attr);
ttemaskp->tte_inthi = TTEINTHI_ATTR;
ttemaskp->tte_intlo = TTEINTLO_ATTR;
- if (!icache_is_coherent) {
- if (!(attr & PROT_EXEC)) {
- TTE_SET_SOFTEXEC(ttemaskp);
- } else {
- TTE_CLR_EXEC(ttemaskp);
- TTE_SET_SOFTEXEC(&ttevalue);
- }
- }
break;
case SFMMU_SETATTR:
ASSERT(!(attr & ~HAT_PROT_MASK));
@@ -5171,9 +5081,6 @@ sfmmu_ptov_attr(tte_t *ttep)
if (TTE_IS_EXECUTABLE(ttep)) {
attr |= PROT_EXEC;
}
- if (TTE_IS_SOFTEXEC(ttep)) {
- attr |= PROT_EXEC;
- }
if (!TTE_IS_PRIVILEGED(ttep)) {
attr |= PROT_USER;
}
@@ -5390,11 +5297,6 @@ sfmmu_hblk_chgprot(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, caddr_t addr,
ttemod = tte;
TTE_SET_LOFLAGS(&ttemod, tteflags, pprot);
- ASSERT(TTE_IS_SOFTEXEC(&tte) ==
- TTE_IS_SOFTEXEC(&ttemod));
- ASSERT(TTE_IS_EXECUTABLE(&tte) ==
- TTE_IS_EXECUTABLE(&ttemod));
-
#if defined(SF_ERRATA_57)
if (check_exec && addr < errata57_limit)
ttemod.tte_exec_perm = 0;
@@ -6094,8 +5996,7 @@ again:
continue;
}
- if (!(flags & HAT_UNLOAD_NOSYNC) ||
- (pp != NULL && TTE_EXECUTED(&tte))) {
+ if (!(flags & HAT_UNLOAD_NOSYNC)) {
sfmmu_ttesync(sfmmup, addr, &tte, pp);
}
@@ -6435,47 +6336,35 @@ static void
sfmmu_ttesync(struct hat *sfmmup, caddr_t addr, tte_t *ttep, page_t *pp)
{
uint_t rm = 0;
- int sz = TTE_CSZ(ttep);
+ int sz;
pgcnt_t npgs;
ASSERT(TTE_IS_VALID(ttep));
- if (!TTE_IS_NOSYNC(ttep)) {
+ if (TTE_IS_NOSYNC(ttep)) {
+ return;
+ }
- if (TTE_IS_REF(ttep))
- rm |= P_REF;
+ if (TTE_IS_REF(ttep)) {
+ rm = P_REF;
+ }
+ if (TTE_IS_MOD(ttep)) {
+ rm |= P_MOD;
+ }
- if (TTE_IS_MOD(ttep))
- rm |= P_MOD;
+ if (rm == 0) {
+ return;
+ }
- if (rm != 0) {
- if (sfmmup != NULL && sfmmup->sfmmu_rmstat) {
- int i;
- caddr_t vaddr = addr;
+ sz = TTE_CSZ(ttep);
+ if (sfmmup != NULL && sfmmup->sfmmu_rmstat) {
+ int i;
+ caddr_t vaddr = addr;
- for (i = 0; i < TTEPAGES(sz); i++) {
- hat_setstat(sfmmup->sfmmu_as, vaddr,
- MMU_PAGESIZE, rm);
- vaddr += MMU_PAGESIZE;
- }
- }
+ for (i = 0; i < TTEPAGES(sz); i++, vaddr += MMU_PAGESIZE) {
+ hat_setstat(sfmmup->sfmmu_as, vaddr, MMU_PAGESIZE, rm);
}
- }
-
- if (!pp)
- return;
- /*
- * If software says this page is executable, and the page was
- * in fact executed (indicated by hardware exec permission
- * being enabled), then set P_EXEC on the page to remember
- * that it was executed. The I$ will be flushed when the page
- * is reassigned.
- */
- if (TTE_EXECUTED(ttep)) {
- rm |= P_EXEC;
- } else if (rm == 0) {
- return;
}
/*
@@ -6485,6 +6374,8 @@ sfmmu_ttesync(struct hat *sfmmup, caddr_t addr, tte_t *ttep, page_t *pp)
* The nrm bits are protected by the same mutex as
* the one that protects the page's mapping list.
*/
+ if (!pp)
+ return;
ASSERT(sfmmu_mlist_held(pp));
/*
* If the tte is for a large page, we need to sync all the
@@ -6503,8 +6394,7 @@ sfmmu_ttesync(struct hat *sfmmup, caddr_t addr, tte_t *ttep, page_t *pp)
ASSERT(pp);
ASSERT(sfmmu_mlist_held(pp));
if (((rm & P_REF) != 0 && !PP_ISREF(pp)) ||
- ((rm & P_MOD) != 0 && !PP_ISMOD(pp)) ||
- ((rm & P_EXEC) != 0 && !PP_ISEXEC(pp)))
+ ((rm & P_MOD) != 0 && !PP_ISMOD(pp)))
hat_page_setattr(pp, rm);
/*
@@ -6826,7 +6716,6 @@ hat_page_relocate(page_t **target, page_t **replacement, spgcnt_t *nrelocp)
kmutex_t *low, *high;
spgcnt_t npages, i;
page_t *pl = NULL;
- uint_t ppattr;
int old_pil;
cpuset_t cpuset;
int cap_cpus;
@@ -6977,9 +6866,8 @@ hat_page_relocate(page_t **target, page_t **replacement, spgcnt_t *nrelocp)
* Copy attributes. VAC consistency was handled above,
* if required.
*/
- ppattr = hat_page_getattr(tpp, (P_MOD | P_REF | P_RO));
- page_clr_all_props(rpp, 0);
- page_set_props(rpp, ppattr);
+ rpp->p_nrm = tpp->p_nrm;
+ tpp->p_nrm = 0;
rpp->p_index = tpp->p_index;
tpp->p_index = 0;
#ifdef VAC
@@ -7791,7 +7679,7 @@ hat_page_setattr(page_t *pp, uint_t flag)
noshuffle = flag & P_NSH;
flag &= ~P_NSH;
- ASSERT(!(flag & ~(P_MOD | P_REF | P_RO | P_EXEC)));
+ ASSERT(!(flag & ~(P_MOD | P_REF | P_RO)));
/*
* nothing to do if attribute already set
@@ -8480,8 +8368,6 @@ ism_tsb_entries(sfmmu_t *sfmmup, int szc)
int j;
sf_scd_t *scdp;
uchar_t rid;
- hatlock_t *hatlockp;
- int ismnotinscd = 0;
ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY));
scdp = sfmmup->sfmmu_scdp;
@@ -8502,21 +8388,9 @@ ism_tsb_entries(sfmmu_t *sfmmup, int szc)
/* ISMs is not in SCD */
npgs +=
ism_map[j].imap_ismhat->sfmmu_ttecnt[szc];
- ismnotinscd = 1;
}
}
}
-
- if (&mmu_set_pgsz_order) {
- hatlockp = sfmmu_hat_enter(sfmmup);
- if (ismnotinscd) {
- SFMMU_FLAGS_SET(sfmmup, HAT_ISMNOTINSCD);
- } else {
- SFMMU_FLAGS_CLEAR(sfmmup, HAT_ISMNOTINSCD);
- }
- sfmmu_hat_exit(hatlockp);
- }
-
sfmmup->sfmmu_ismttecnt[szc] = npgs;
sfmmup->sfmmu_scdismttecnt[szc] = npgs_scd;
return (npgs);
@@ -8850,11 +8724,6 @@ hat_share(struct hat *sfmmup, caddr_t addr,
sfmmu_hat_exit(hatlockp);
}
- if (&mmu_set_pgsz_order) {
- hatlockp = sfmmu_hat_enter(sfmmup);
- mmu_set_pgsz_order(sfmmup, 1);
- sfmmu_hat_exit(hatlockp);
- }
sfmmu_ismhat_exit(sfmmup, 0);
/*
@@ -9050,11 +8919,6 @@ hat_unshare(struct hat *sfmmup, caddr_t addr, size_t len, uint_t ismszc)
(void) ism_tsb_entries(sfmmup, i);
}
- if (&mmu_set_pgsz_order) {
- hatlockp = sfmmu_hat_enter(sfmmup);
- mmu_set_pgsz_order(sfmmup, 1);
- sfmmu_hat_exit(hatlockp);
- }
sfmmu_ismhat_exit(sfmmup, 0);
/*
@@ -11027,7 +10891,7 @@ sfmmu_mlist_reloc_exit(kmutex_t *low, kmutex_t *high)
mutex_exit(low);
}
-hatlock_t *
+static hatlock_t *
sfmmu_hat_enter(sfmmu_t *sfmmup)
{
hatlock_t *hatlockp;
@@ -11054,7 +10918,7 @@ sfmmu_hat_tryenter(sfmmu_t *sfmmup)
return (NULL);
}
-void
+static void
sfmmu_hat_exit(hatlock_t *hatlockp)
{
if (hatlockp != NULL)
@@ -12197,13 +12061,8 @@ sfmmu_rgntlb_demap(caddr_t addr, sf_region_t *rgnp,
* then we flush the shared TSBs, if we find a private hat,
* which is part of an SCD, but where the region
* is not part of the SCD then we flush the private TSBs.
- *
- * If the Rock page size register is present, then SCDs
- * may contain both shared and private pages, so we cannot
- * use this optimization to avoid flushing private TSBs.
*/
- if (pgsz_search_on == 0 &&
- !sfmmup->sfmmu_scdhat && sfmmup->sfmmu_scdp != NULL &&
+ if (!sfmmup->sfmmu_scdhat && sfmmup->sfmmu_scdp != NULL &&
!SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD)) {
scdp = sfmmup->sfmmu_scdp;
if (SF_RGNMAP_TEST(scdp->scd_hmeregion_map, rid)) {
@@ -12332,13 +12191,8 @@ sfmmu_ismtlbcache_demap(caddr_t addr, sfmmu_t *ism_sfmmup,
* which is part of an SCD, but where the region
* corresponding to this va is not part of the SCD then we
* flush the private TSBs.
- *
- * If the Rock page size register is present, then SCDs
- * may contain both shared and private pages, so we cannot
- * use this optimization to avoid flushing private TSBs.
*/
- if (pgsz_search_on == 0 &&
- !sfmmup->sfmmu_scdhat && sfmmup->sfmmu_scdp != NULL &&
+ if (!sfmmup->sfmmu_scdhat && sfmmup->sfmmu_scdp != NULL &&
!SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD) &&
!SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)) {
if (!find_ism_rid(sfmmup, ism_sfmmup, va,
@@ -12648,7 +12502,7 @@ sfmmu_tlb_range_demap(demap_range_t *dmrp)
* A per-process (PP) lock is used to synchronize ctx allocations in
* resume() and ctx invalidations here.
*/
-void
+static void
sfmmu_invalidate_ctx(sfmmu_t *sfmmup)
{
cpuset_t cpuset;
@@ -14174,9 +14028,6 @@ rfound:
if (tteflag && !(sfmmup->sfmmu_rtteflags & tteflag)) {
hatlockp = sfmmu_hat_enter(sfmmup);
sfmmup->sfmmu_rtteflags |= tteflag;
- if (&mmu_set_pgsz_order) {
- mmu_set_pgsz_order(sfmmup, 1);
- }
sfmmu_hat_exit(hatlockp);
}
hatlockp = sfmmu_hat_enter(sfmmup);
@@ -15232,9 +15083,6 @@ sfmmu_join_scd(sf_scd_t *scdp, sfmmu_t *sfmmup)
ASSERT(sfmmup->sfmmu_ttecnt[i] >= scdp->scd_rttecnt[i]);
atomic_add_long(&sfmmup->sfmmu_ttecnt[i],
-sfmmup->sfmmu_scdrttecnt[i]);
- if (!sfmmup->sfmmu_ttecnt[i]) {
- sfmmup->sfmmu_tteflags &= ~(1 << i);
- }
}
/* update tsb0 inflation count */
if (old_scdp != NULL) {
@@ -15245,9 +15093,6 @@ sfmmu_join_scd(sf_scd_t *scdp, sfmmu_t *sfmmup)
scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt);
sfmmup->sfmmu_tsb0_4minflcnt -= scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt;
- if (&mmu_set_pgsz_order) {
- mmu_set_pgsz_order(sfmmup, 0);
- }
sfmmu_hat_exit(hatlockp);
if (old_scdp != NULL) {
@@ -15307,7 +15152,7 @@ sfmmu_find_scd(sfmmu_t *sfmmup)
for (scdp = srdp->srd_scdp; scdp != NULL;
scdp = scdp->scd_next) {
SF_RGNMAP_EQUAL(&scdp->scd_region_map,
- &sfmmup->sfmmu_region_map, SFMMU_RGNMAP_WORDS, ret);
+ &sfmmup->sfmmu_region_map, ret);
if (ret == 1) {
SF_SCD_INCR_REF(scdp);
mutex_exit(&srdp->srd_scd_mutex);
@@ -15455,10 +15300,6 @@ sfmmu_leave_scd(sfmmu_t *sfmmup, uchar_t r_type)
scdp->scd_rttecnt[i]);
atomic_add_long(&sfmmup->sfmmu_ttecnt[i],
sfmmup->sfmmu_scdrttecnt[i]);
- if (sfmmup->sfmmu_ttecnt[i] &&
- (sfmmup->sfmmu_tteflags & (1 << i)) == 0) {
- sfmmup->sfmmu_tteflags |= (1 << i);
- }
sfmmup->sfmmu_scdrttecnt[i] = 0;
/* update ismttecnt to include SCD ism before hat leaves SCD */
sfmmup->sfmmu_ismttecnt[i] += sfmmup->sfmmu_scdismttecnt[i];
@@ -15472,9 +15313,6 @@ sfmmu_leave_scd(sfmmu_t *sfmmup, uchar_t r_type)
}
sfmmup->sfmmu_scdp = NULL;
- if (&mmu_set_pgsz_order) {
- mmu_set_pgsz_order(sfmmup, 0);
- }
sfmmu_hat_exit(hatlockp);
/*
@@ -15520,8 +15358,7 @@ sfmmu_destroy_scd(sf_srd_t *srdp, sf_scd_t *scdp, sf_region_map_t *scd_rmap)
* It is possible that the scd has been freed and reallocated with a
* different region map while we've been waiting for the srd_scd_mutex.
*/
- SF_RGNMAP_EQUAL(scd_rmap, &sp->scd_region_map,
- SFMMU_RGNMAP_WORDS, ret);
+ SF_RGNMAP_EQUAL(scd_rmap, &sp->scd_region_map, ret);
if (ret != 1) {
mutex_exit(&srdp->srd_scd_mutex);
return;
diff --git a/usr/src/uts/sfmmu/vm/hat_sfmmu.h b/usr/src/uts/sfmmu/vm/hat_sfmmu.h
index 5e56264869..327b2fcf36 100644
--- a/usr/src/uts/sfmmu/vm/hat_sfmmu.h
+++ b/usr/src/uts/sfmmu/vm/hat_sfmmu.h
@@ -112,7 +112,6 @@ typedef struct sf_scd sf_scd_t;
#define P_TNC 0x10 /* non-caching is temporary bit */
#define P_KPMS 0x20 /* kpm mapped small (vac alias prevention) */
#define P_KPMC 0x40 /* kpm conflict page (vac alias prevention) */
-#define P_EXEC 0x80 /* execution reference (I-cache filled) */
#define PP_GENERIC_ATTR(pp) ((pp)->p_nrm & (P_MOD | P_REF | P_RO))
#define PP_ISMOD(pp) ((pp)->p_nrm & P_MOD)
@@ -125,7 +124,6 @@ typedef struct sf_scd sf_scd_t;
#endif
#define PP_ISKPMS(pp) ((pp)->p_nrm & P_KPMS)
#define PP_ISKPMC(pp) ((pp)->p_nrm & P_KPMC)
-#define PP_ISEXEC(pp) ((pp)->p_nrm & P_EXEC)
#define PP_SETMOD(pp) ((pp)->p_nrm |= P_MOD)
#define PP_SETREF(pp) ((pp)->p_nrm |= P_REF)
@@ -138,7 +136,6 @@ typedef struct sf_scd sf_scd_t;
#endif
#define PP_SETKPMS(pp) ((pp)->p_nrm |= P_KPMS)
#define PP_SETKPMC(pp) ((pp)->p_nrm |= P_KPMC)
-#define PP_SETEXEC(pp) ((pp)->p_nrm |= P_EXEC)
#define PP_CLRMOD(pp) ((pp)->p_nrm &= ~P_MOD)
#define PP_CLRREF(pp) ((pp)->p_nrm &= ~P_REF)
@@ -150,17 +147,6 @@ typedef struct sf_scd sf_scd_t;
#endif
#define PP_CLRKPMS(pp) ((pp)->p_nrm &= ~P_KPMS)
#define PP_CLRKPMC(pp) ((pp)->p_nrm &= ~P_KPMC)
-#define PP_CLREXEC(pp) ((pp)->p_nrm &= ~P_EXEC)
-
-/*
- * Support for non-coherent I-cache. If the MD property "coherency"
- * is set to 0, it means that the I-cache must be flushed in
- * software. Use the "soft exec" bit in the TTE to detect when a page
- * has been executed, so that it can be flushed before it is re-used
- * for another program.
- */
-#define TTE_EXECUTED(ttep) \
- (TTE_IS_EXECUTABLE(ttep) && TTE_IS_SOFTEXEC(ttep))
/*
* All shared memory segments attached with the SHM_SHARE_MMU flag (ISM)
@@ -337,15 +323,15 @@ typedef union sf_region_map_u {
}
/*
- * Returns 1 if region map1 and map2 are equal.
+ * Returns 1 if map1 and map2 are equal.
*/
-#define SF_RGNMAP_EQUAL(map1, map2, words, rval) { \
+#define SF_RGNMAP_EQUAL(map1, map2, rval) { \
int _i; \
- for (_i = 0; _i < words; _i++) { \
+ for (_i = 0; _i < SFMMU_RGNMAP_WORDS; _i++) { \
if ((map1)->bitmap[_i] != (map2)->bitmap[_i]) \
break; \
} \
- if (_i < words) \
+ if (_i < SFMMU_RGNMAP_WORDS) \
rval = 0; \
else \
rval = 1; \
@@ -609,13 +595,9 @@ typedef struct mmu_ctx {
extern uint_t max_mmu_ctxdoms;
extern mmu_ctx_t **mmu_ctxs_tbl;
-extern uint_t nctxs;
extern void sfmmu_cpu_init(cpu_t *);
extern void sfmmu_cpu_cleanup(cpu_t *);
-extern void sfmmu_invalidate_ctx(sfmmu_t *);
-extern hatlock_t *sfmmu_hat_enter(sfmmu_t *);
-extern void sfmmu_hat_exit(hatlock_t *);
/*
* The following structure is used to get MMU context domain information for
@@ -652,6 +634,7 @@ typedef struct sfmmu_ctx {
uint64_t cnum:16;
} sfmmu_ctx_t;
+
/*
* The platform dependent hat structure.
* tte counts should be protected by cas.
@@ -713,11 +696,7 @@ struct hat {
sf_rgn_link_t *sfmmu_hmeregion_links[SFMMU_L1_HMERLINKS];
sf_rgn_link_t sfmmu_scd_link; /* link to scd or pending queue */
#ifdef sun4v
- /* ttecnt for Rock pagesize register management */
- ulong_t sfmmu_mmuttecnt[MMU_PAGE_SIZES];
struct hv_tsb_block sfmmu_hvblock;
- struct hv_pgsz_order sfmmu_pgsz_order; /* pagesize search order */
- uint8_t sfmmu_pgsz_map; /* bit map to control shared pgsz use */
#endif
/*
* sfmmu_ctxs is a variable length array of max_mmu_ctxdoms # of
@@ -763,8 +742,6 @@ struct sf_scd {
extern int disable_shctx;
extern int shctx_on;
-extern int pgsz_search_on;
-extern int disable_pgsz_search;
/*
* bit mask for managing vac conflicts on large pages.
@@ -878,7 +855,6 @@ struct ctx_trace {
#define HAT_CTX1_FLAG 0x100 /* ISM imap hatflag for ctx1 */
#define HAT_JOIN_SCD 0x200 /* region is joining scd */
#define HAT_ALLCTX_INVALID 0x400 /* all per-MMU ctxs are invalidated */
-#define HAT_ISMNOTINSCD 0x800 /* Not all ISM segs are in the SCD */
#define SFMMU_LGPGS_INUSE(sfmmup) \
(((sfmmup)->sfmmu_tteflags | (sfmmup)->sfmmu_rtteflags) || \
@@ -1822,8 +1798,7 @@ struct tsbmiss {
uintptr_t scratch[3];
ulong_t shmermap[SFMMU_HMERGNMAP_WORDS]; /* 8 bytes */
ulong_t scd_shmermap[SFMMU_HMERGNMAP_WORDS]; /* 8 bytes */
- uint8_t pgsz_bitmap; /* limits ctx1 page sizes */
- uint8_t pad[47]; /* pad to 64 bytes */
+ uint8_t pad[48]; /* pad to 64 bytes */
};
/*
@@ -2354,17 +2329,11 @@ extern struct hme_blk *sfmmu_hmetohblk(struct sf_hment *);
#pragma weak mmu_large_pages_disabled
#pragma weak mmu_set_ctx_page_sizes
#pragma weak mmu_check_page_sizes
-#pragma weak mmu_set_pgsz_order
-#pragma weak sfmmu_init_pgsz_hv
-#pragma weak mmu_enable_pgsz_search
extern void mmu_init_scd(sf_scd_t *);
extern uint_t mmu_large_pages_disabled(uint_t);
extern void mmu_set_ctx_page_sizes(sfmmu_t *);
extern void mmu_check_page_sizes(sfmmu_t *, uint64_t *);
-extern void mmu_set_pgsz_order(sfmmu_t *, int);
-extern void sfmmu_init_pgsz_hv(sfmmu_t *);
-extern void mmu_enable_pgsz_search();
extern sfmmu_t *ksfmmup;
extern caddr_t ktsb_base;
@@ -2406,15 +2375,12 @@ extern uint_t disable_large_pages;
extern uint_t disable_ism_large_pages;
extern uint_t disable_auto_data_large_pages;
extern uint_t disable_auto_text_large_pages;
-extern uint_t disable_shctx_large_pages;
-
-extern void sfmmu_patch_shctx(void);
-extern void sfmmu_patch_pgsz_reg(void);
/* kpm externals */
extern pfn_t sfmmu_kpm_vatopfn(caddr_t);
extern void sfmmu_kpm_patch_tlbm(void);
extern void sfmmu_kpm_patch_tsbm(void);
+extern void sfmmu_patch_shctx(void);
extern void sfmmu_kpm_load_tsb(caddr_t, tte_t *, int);
extern void sfmmu_kpm_unload_tsb(caddr_t, int);
extern void sfmmu_kpm_tsbmtl(short *, uint_t *, int);