summaryrefslogtreecommitdiff
path: root/usr/src/uts/sfmmu
diff options
context:
space:
mode:
authorDonghai Qiao <Donghai.Qiao@Sun.COM>2008-08-22 14:18:53 -0700
committerDonghai Qiao <Donghai.Qiao@Sun.COM>2008-08-22 14:18:53 -0700
commit444ce08e035c2cafaa89f7236e38bbe82a287904 (patch)
tree7abdadfc2b62c5d894c67bb7699206d5fa6b653a /usr/src/uts/sfmmu
parent58841c984c2766b572c98824c0582c30e1d814d9 (diff)
downloadillumos-joyent-444ce08e035c2cafaa89f7236e38bbe82a287904.tar.gz
6672470 Looping within uiomove()/xcopyout()/copyout_move()
6707987 kpm has some VAC handling issues 6388567 VAC flushing is broken on US III-IV+ for large pages
Diffstat (limited to 'usr/src/uts/sfmmu')
-rw-r--r--usr/src/uts/sfmmu/ml/sfmmu_asm.s87
-rw-r--r--usr/src/uts/sfmmu/vm/hat_sfmmu.c37
-rw-r--r--usr/src/uts/sfmmu/vm/hat_sfmmu.h22
3 files changed, 99 insertions, 47 deletions
diff --git a/usr/src/uts/sfmmu/ml/sfmmu_asm.s b/usr/src/uts/sfmmu/ml/sfmmu_asm.s
index 0b2eceb912..59ebe45097 100644
--- a/usr/src/uts/sfmmu/ml/sfmmu_asm.s
+++ b/usr/src/uts/sfmmu/ml/sfmmu_asm.s
@@ -23,8 +23,6 @@
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
/*
* SFMMU primitives. These primitives should only be used by sfmmu
* routines.
@@ -4736,36 +4734,70 @@ label/**/_ok:
* g7 = kpm_vbase
*/
- /* vaddr2pfn */
- ldub [%g6 + KPMTSBM_SZSHIFT], %g3
+ /*
+ * Assembly implementation of SFMMU_KPM_VTOP(vaddr, paddr)
+ * which is defined in mach_kpm.h. Any changes in that macro
+ * should also be ported back to this assembly code.
+ */
+ ldub [%g6 + KPMTSBM_SZSHIFT], %g3 /* g3 = kpm_size_shift */
sub %g2, %g7, %g4 /* paddr = vaddr-kpm_vbase */
- srax %g4, %g3, %g2 /* which alias range (r) */
- brnz,pn %g2, sfmmu_kpm_exception /* if (r != 0) goto C handler */
- srlx %g4, MMU_PAGESHIFT, %g2 /* %g2 = pfn */
+ srax %g4, %g3, %g7 /* which alias range (r) */
+ brz,pt %g7, 2f
+ sethi %hi(vac_colors_mask), %g5
+ ld [%g5 + %lo(vac_colors_mask)], %g5
+
+ srlx %g2, MMU_PAGESHIFT, %g1 /* vaddr >> MMU_PAGESHIFT */
+ and %g1, %g5, %g1 /* g1 = v */
+ sllx %g7, %g3, %g5 /* g5 = r << kpm_size_shift */
+ cmp %g7, %g1 /* if (r > v) */
+ bleu,pn %xcc, 1f
+ sub %g4, %g5, %g4 /* paddr -= r << kpm_size_shift */
+ sub %g7, %g1, %g5 /* g5 = r - v */
+ sllx %g5, MMU_PAGESHIFT, %g7 /* (r-v) << MMU_PAGESHIFT */
+ add %g4, %g7, %g4 /* paddr += (r-v)<<MMU_PAGESHIFT */
+ ba 2f
+ nop
+1:
+ sllx %g7, MMU_PAGESHIFT, %g5 /* else */
+ sub %g4, %g5, %g4 /* paddr -= r << MMU_PAGESHIFT */
+
+ /*
+ * paddr2pfn
+ * g1 = vcolor (not used)
+ * g2 = tag access register
+ * g3 = clobbered
+ * g4 = paddr
+ * g5 = clobbered
+ * g6 = per-CPU kpm tsbmiss area
+ * g7 = clobbered
+ */
+2:
+ srlx %g4, MMU_PAGESHIFT, %g2 /* g2 = pfn */
/*
* Setup %asi
* mseg_pa = page_numtomemseg_nolock_pa(pfn)
* if (mseg not found) sfmmu_kpm_exception
- * g2=pfn
+ * g2=pfn g6=per-CPU kpm tsbmiss area
+ * g4 g5 g7 for scratch use.
*/
mov ASI_MEM, %asi
PAGE_NUM2MEMSEG_NOLOCK_PA(%g2, %g3, %g6, %g4, %g5, %g7, kpmtsbmsp2m)
- cmp %g3, MSEG_NULLPTR_PA
+ cmp %g3, MSEG_NULLPTR_PA
be,pn %xcc, sfmmu_kpm_exception /* if mseg not found */
nop
/*
* inx = pfn - mseg_pa->kpm_pbase
- * g2=pfn g3=mseg_pa
+ * g2=pfn g3=mseg_pa g6=per-CPU kpm tsbmiss area
*/
ldxa [%g3 + MEMSEG_KPM_PBASE]%asi, %g7
- sub %g2, %g7, %g4
+ sub %g2, %g7, %g4
#ifdef DEBUG
/*
* Validate inx value
- * g2=pfn g3=mseg_pa g4=inx
+ * g2=pfn g3=mseg_pa g4=inx g6=per-CPU tsbmiss area
*/
ldxa [%g3 + MEMSEG_KPM_NKPMPGS]%asi, %g5
cmp %g4, %g5 /* inx - nkpmpgs */
@@ -4780,7 +4812,8 @@ label/**/_ok:
/*
* KPMP_SHASH(kp)
- * g2=pfn g3=mseg_pa g4=inx g5=ksp g7=kpmp_stable_sz
+ * g2=pfn g3=mseg_pa g4=inx g5=ksp
+ * g6=per-CPU kpm tsbmiss area g7=kpmp_stable_sz
*/
ldub [%g6 + KPMTSBM_KPMPSHIFT], %g1 /* kpmp_shift */
sub %g7, 1, %g7 /* mask */
@@ -4791,6 +4824,7 @@ label/**/_ok:
/*
* Calculate physical kpm_spage pointer
* g2=pfn g3=mseg_pa g4=offset g5=hashinx
+ * g6=per-CPU kpm tsbmiss area
*/
ldxa [%g3 + MEMSEG_KPM_PAGESPA]%asi, %g1 /* kpm_spagespa */
add %g1, %g4, %g1 /* ksp_pa */
@@ -4799,18 +4833,20 @@ label/**/_ok:
* Calculate physical hash lock address.
* Note: Changes in kpm_shlk_t must be reflected here.
* g1=ksp_pa g2=pfn g5=hashinx
+ * g6=per-CPU kpm tsbmiss area
*/
ldx [%g6 + KPMTSBM_KPMPTABLEPA], %g4 /* kpmp_stablepa */
sllx %g5, KPMSHLK_SHIFT, %g5
add %g4, %g5, %g3 /* hlck_pa */
/*
- * Assemble tte
+ * Assemble non-cacheable tte initially
* g1=ksp_pa g2=pfn g3=hlck_pa
+ * g6=per-CPU kpm tsbmiss area
*/
sethi %hi(TTE_VALID_INT), %g5 /* upper part */
sllx %g5, 32, %g5
- mov (TTE_CP_INT|TTE_CV_INT|TTE_PRIV_INT|TTE_HWWR_INT), %g4
+ mov (TTE_CP_INT|TTE_PRIV_INT|TTE_HWWR_INT), %g4
or %g5, %g4, %g5
sllx %g2, MMU_PAGESHIFT, %g4
or %g5, %g4, %g5 /* tte */
@@ -4819,18 +4855,23 @@ label/**/_ok:
/*
* tsb dropin
- * g1=ksp_pa g2=ttarget g3=hlck_pa g4=ktsbp g5=tte
+ * g1=ksp_pa g2=ttarget g3=hlck_pa g4=ktsbp g5=tte (non-cacheable)
+ * g6=per-CPU kpm tsbmiss area g7=scratch register
*/
/* KPMLOCK_ENTER(kpmlckp, tmp1, label1, asi) */
KPMLOCK_ENTER(%g3, %g7, kpmtsbsmlock, ASI_MEM)
/* use C-handler if there's no go for dropin */
- ldsba [%g1 + KPMSPAGE_MAPPED]%asi, %g7 /* kp_mapped */
- cmp %g7, -1
- bne,pn %xcc, 5f
- nop
-
+ ldsba [%g1 + KPMSPAGE_MAPPED]%asi, %g7 /* kp_mapped */
+ andcc %g7, KPM_MAPPED_GO, %g0 /* go or no go ? */
+ bz,pt %icc, 5f /* no go */
+ nop
+ and %g7, KPM_MAPPED_MASK, %g7 /* go */
+ cmp %g7, KPM_MAPPEDS /* cacheable ? */
+ be,a,pn %xcc, 3f
+ or %g5, TTE_CV_INT, %g5 /* cacheable */
+3:
#ifndef sun4v
ldub [%g6 + KPMTSBM_FLAGS], %g7
mov ASI_N, %g1
@@ -4905,7 +4946,7 @@ sfmmu_kpm_tsbmtl(short *kp_refcntc, uint_t *khl_lock, int cmd)
*/
/* ARGSUSED */
int
-sfmmu_kpm_stsbmtl(char *mapped, uint_t *kshl_lock, int val)
+sfmmu_kpm_stsbmtl(uchar_t *mapped, uint_t *kshl_lock, int val)
{
return (0);
}
@@ -4983,7 +5024,7 @@ sfmmu_kpm_stsbmtl_panic:
stb %o2, [%o0]
KPMLOCK_EXIT(%o1, ASI_N)
- mov %o5, %o0 /* return old val */
+ and %o5, KPM_MAPPED_MASK, %o0 /* return old val */
retl
wrpr %g0, %o3, %pstate /* enable interrupts */
SET_SIZE(sfmmu_kpm_stsbmtl)
diff --git a/usr/src/uts/sfmmu/vm/hat_sfmmu.c b/usr/src/uts/sfmmu/vm/hat_sfmmu.c
index 3536da7154..ed6edb76f4 100644
--- a/usr/src/uts/sfmmu/vm/hat_sfmmu.c
+++ b/usr/src/uts/sfmmu/vm/hat_sfmmu.c
@@ -23,8 +23,6 @@
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
/*
* VM - Hardware Address Translation management for Spitfire MMU.
*
@@ -3453,7 +3451,7 @@ sfmmu_vacconflict_array(caddr_t addr, page_t *pp, int *cflags)
return (0);
}
- if (!PP_ISMAPPED(pp)) {
+ if (!PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp)) {
/*
* Previous user of page had a differnet color
* but since there are no current users
@@ -6740,6 +6738,9 @@ hat_page_relocate(page_t **target, page_t **replacement, spgcnt_t *nrelocp)
cpuset_t cpuset;
int cap_cpus;
int ret;
+#ifdef VAC
+ int cflags = 0;
+#endif
if (hat_kpr_enabled == 0 || !kcage_on || PP_ISNORELOC(*target)) {
PAGE_RELOCATE_LOG(target, replacement, EAGAIN, -1);
@@ -6806,21 +6807,6 @@ hat_page_relocate(page_t **target, page_t **replacement, spgcnt_t *nrelocp)
kpreempt_disable();
-#ifdef VAC
- /*
- * If the replacement page is of a different virtual color
- * than the page it is replacing, we need to handle the VAC
- * consistency for it just as we would if we were setting up
- * a new mapping to a page.
- */
- if ((tpp->p_szc == 0) && (PP_GET_VCOLOR(rpp) != NO_VCOLOR)) {
- if (tpp->p_vcolor != rpp->p_vcolor) {
- sfmmu_cache_flushcolor(PP_GET_VCOLOR(rpp),
- rpp->p_pagenum);
- }
- }
-#endif
-
/*
* We raise our PIL to 13 so that we don't get captured by
* another CPU or pinned by an interrupt thread. We can't go to
@@ -6870,6 +6856,21 @@ hat_page_relocate(page_t **target, page_t **replacement, spgcnt_t *nrelocp)
* this context.
*/
for (i = 0; i < npages; i++, tpp++, rpp++) {
+#ifdef VAC
+ /*
+ * If the replacement has a different vcolor than
+ * the one being replacd, we need to handle VAC
+ * consistency for it just as we were setting up
+ * a new mapping to it.
+ */
+ if ((PP_GET_VCOLOR(rpp) != NO_VCOLOR) &&
+ (tpp->p_vcolor != rpp->p_vcolor) &&
+ !CacheColor_IsFlushed(cflags, PP_GET_VCOLOR(rpp))) {
+ CacheColor_SetFlushed(cflags, PP_GET_VCOLOR(rpp));
+ sfmmu_cache_flushcolor(PP_GET_VCOLOR(rpp),
+ rpp->p_pagenum);
+ }
+#endif
/*
* Copy the contents of the page.
*/
diff --git a/usr/src/uts/sfmmu/vm/hat_sfmmu.h b/usr/src/uts/sfmmu/vm/hat_sfmmu.h
index 92f238aafa..b83e7a8d24 100644
--- a/usr/src/uts/sfmmu/vm/hat_sfmmu.h
+++ b/usr/src/uts/sfmmu/vm/hat_sfmmu.h
@@ -34,8 +34,6 @@
#ifndef _VM_HAT_SFMMU_H
#define _VM_HAT_SFMMU_H
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#ifdef __cplusplus
extern "C" {
#endif
@@ -2377,7 +2375,7 @@ extern void sfmmu_patch_shctx(void);
extern void sfmmu_kpm_load_tsb(caddr_t, tte_t *, int);
extern void sfmmu_kpm_unload_tsb(caddr_t, int);
extern void sfmmu_kpm_tsbmtl(short *, uint_t *, int);
-extern int sfmmu_kpm_stsbmtl(char *, uint_t *, int);
+extern int sfmmu_kpm_stsbmtl(uchar_t *, uint_t *, int);
extern caddr_t kpm_vbase;
extern size_t kpm_size;
extern struct memseg *memseg_hash[];
@@ -2399,9 +2397,21 @@ extern uchar_t kpmp_shift;
#define KPMTSBM_STOP 0
#define KPMTSBM_START 1
-/* kpm_smallpages kp_mapped values */
-#define KPM_MAPPEDS -1 /* small mapping valid, no conflict */
-#define KPM_MAPPEDSC 1 /* small mapping valid, conflict */
+/*
+ * For kpm_smallpages, the state about how a kpm page is mapped and whether
+ * it is ready to go is indicated by the two 4-bit fields defined in the
+ * kpm_spage structure as follows:
+ * kp_mapped_flag bit[0:3] - the page is mapped cacheable or not
+ * kp_mapped_flag bit[4:7] - the mapping is ready to go or not
+ * If the bit KPM_MAPPED_GO is on, it indicates that the assembly tsb miss
+ * handler can drop the mapping in regardless of the caching state of the
+ * mapping. Otherwise, we will have C handler resolve the VAC conflict no
+ * matter the page is currently mapped cacheable or non-cacheable.
+ */
+#define KPM_MAPPEDS 0x1 /* small mapping valid, no conflict */
+#define KPM_MAPPEDSC 0x2 /* small mapping valid, conflict */
+#define KPM_MAPPED_GO 0x10 /* the mapping is ready to go */
+#define KPM_MAPPED_MASK 0xf
/* Physical memseg address NULL marker */
#define MSEG_NULLPTR_PA -1