summaryrefslogtreecommitdiff
path: root/usr/src
diff options
context:
space:
mode:
Diffstat (limited to 'usr/src')
-rw-r--r--usr/src/uts/common/vm/page.h38
-rw-r--r--usr/src/uts/sfmmu/ml/sfmmu_asm.s87
-rw-r--r--usr/src/uts/sfmmu/vm/hat_sfmmu.c37
-rw-r--r--usr/src/uts/sfmmu/vm/hat_sfmmu.h22
-rw-r--r--usr/src/uts/sun4/vm/mach_kpm.h7
-rw-r--r--usr/src/uts/sun4u/cpu/spitfire.c12
-rw-r--r--usr/src/uts/sun4u/cpu/us3_common_asm.s53
-rw-r--r--usr/src/uts/sun4u/ml/mach_offsets.in4
-rw-r--r--usr/src/uts/sun4u/sys/opl_olympus_regs.h8
-rw-r--r--usr/src/uts/sun4u/vm/mach_kpm.c159
-rw-r--r--usr/src/uts/sun4v/ml/mach_offsets.in6
11 files changed, 266 insertions, 167 deletions
diff --git a/usr/src/uts/common/vm/page.h b/usr/src/uts/common/vm/page.h
index 28ba33cedf..db0129195d 100644
--- a/usr/src/uts/common/vm/page.h
+++ b/usr/src/uts/common/vm/page.h
@@ -39,8 +39,6 @@
#ifndef _VM_PAGE_H
#define _VM_PAGE_H
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include <vm/seg.h>
#ifdef __cplusplus
@@ -1033,18 +1031,36 @@ typedef struct kpm_hlk {
* to save memory space. Alias range mappings and regular segkpm
* mappings are done in units of PAGESIZE and can share the mapping
* information and the mappings are always distinguishable by their
- * virtual address. Other information neeeded for VAC conflict prevention
- * is already available on a per page basis. There are basically 3 states
- * a kpm_spage can have: not mapped (0), mapped in Alias range or virtually
- * uncached (1) and mapped in the regular segkpm window (-1). The -1 value
- * is also used as "go" indication for the segkpm trap level tsbmiss
- * handler for small pages (value is kept the same as it is used for large
- * mappings).
+ * virtual address. Other information needed for VAC conflict prevention
+ * is already available on a per page basis.
+ *
+ * The state about how a kpm page is mapped and whether it is ready to go
+ * is indicated by the following 1 byte kpm_spage structure. This byte is
+ * split into two 4-bit parts - kp_mapped and kp_mapped_go.
+ * - kp_mapped == 1 the page is mapped cacheable
+ * - kp_mapped == 2 the page is mapped non-cacheable
+ * - kp_mapped_go == 1 the mapping is ready to be dropped in
+ * - kp_mapped_go == 0 the mapping is not ready to be dropped in.
+ * When kp_mapped_go == 0, we will have C handler resolve the VAC conflict.
+ * Otherwise, the assembly tsb miss handler can simply drop in the mapping
+ * when a tsb miss occurs.
*/
-typedef struct kpm_spage {
- char kp_mapped; /* page mapped small */
+typedef union kpm_spage {
+ struct {
+#ifdef _BIG_ENDIAN
+ uchar_t mapped_go: 4; /* go or nogo flag */
+ uchar_t mapped: 4; /* page mapped small */
+#else
+ uchar_t mapped: 4; /* page mapped small */
+ uchar_t mapped_go: 4; /* go or nogo flag */
+#endif
+ } kpm_spage_un;
+ uchar_t kp_mapped_flag;
} kpm_spage_t;
+#define kp_mapped kpm_spage_un.mapped
+#define kp_mapped_go kpm_spage_un.mapped_go
+
/*
* Note: kshl_lock offset changes must be reflected in sfmmu_asm.s
*/
diff --git a/usr/src/uts/sfmmu/ml/sfmmu_asm.s b/usr/src/uts/sfmmu/ml/sfmmu_asm.s
index 0b2eceb912..59ebe45097 100644
--- a/usr/src/uts/sfmmu/ml/sfmmu_asm.s
+++ b/usr/src/uts/sfmmu/ml/sfmmu_asm.s
@@ -23,8 +23,6 @@
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
/*
* SFMMU primitives. These primitives should only be used by sfmmu
* routines.
@@ -4736,36 +4734,70 @@ label/**/_ok:
* g7 = kpm_vbase
*/
- /* vaddr2pfn */
- ldub [%g6 + KPMTSBM_SZSHIFT], %g3
+ /*
+ * Assembly implementation of SFMMU_KPM_VTOP(vaddr, paddr)
+ * which is defined in mach_kpm.h. Any changes in that macro
+ * should also be ported back to this assembly code.
+ */
+ ldub [%g6 + KPMTSBM_SZSHIFT], %g3 /* g3 = kpm_size_shift */
sub %g2, %g7, %g4 /* paddr = vaddr-kpm_vbase */
- srax %g4, %g3, %g2 /* which alias range (r) */
- brnz,pn %g2, sfmmu_kpm_exception /* if (r != 0) goto C handler */
- srlx %g4, MMU_PAGESHIFT, %g2 /* %g2 = pfn */
+ srax %g4, %g3, %g7 /* which alias range (r) */
+ brz,pt %g7, 2f
+ sethi %hi(vac_colors_mask), %g5
+ ld [%g5 + %lo(vac_colors_mask)], %g5
+
+ srlx %g2, MMU_PAGESHIFT, %g1 /* vaddr >> MMU_PAGESHIFT */
+ and %g1, %g5, %g1 /* g1 = v */
+ sllx %g7, %g3, %g5 /* g5 = r << kpm_size_shift */
+ cmp %g7, %g1 /* if (r > v) */
+ bleu,pn %xcc, 1f
+ sub %g4, %g5, %g4 /* paddr -= r << kpm_size_shift */
+ sub %g7, %g1, %g5 /* g5 = r - v */
+ sllx %g5, MMU_PAGESHIFT, %g7 /* (r-v) << MMU_PAGESHIFT */
+ add %g4, %g7, %g4 /* paddr += (r-v)<<MMU_PAGESHIFT */
+ ba 2f
+ nop
+1:
+ sllx %g7, MMU_PAGESHIFT, %g5 /* else */
+ sub %g4, %g5, %g4 /* paddr -= r << MMU_PAGESHIFT */
+
+ /*
+ * paddr2pfn
+ * g1 = vcolor (not used)
+ * g2 = tag access register
+ * g3 = clobbered
+ * g4 = paddr
+ * g5 = clobbered
+ * g6 = per-CPU kpm tsbmiss area
+ * g7 = clobbered
+ */
+2:
+ srlx %g4, MMU_PAGESHIFT, %g2 /* g2 = pfn */
/*
* Setup %asi
* mseg_pa = page_numtomemseg_nolock_pa(pfn)
* if (mseg not found) sfmmu_kpm_exception
- * g2=pfn
+ * g2=pfn g6=per-CPU kpm tsbmiss area
+ * g4 g5 g7 for scratch use.
*/
mov ASI_MEM, %asi
PAGE_NUM2MEMSEG_NOLOCK_PA(%g2, %g3, %g6, %g4, %g5, %g7, kpmtsbmsp2m)
- cmp %g3, MSEG_NULLPTR_PA
+ cmp %g3, MSEG_NULLPTR_PA
be,pn %xcc, sfmmu_kpm_exception /* if mseg not found */
nop
/*
* inx = pfn - mseg_pa->kpm_pbase
- * g2=pfn g3=mseg_pa
+ * g2=pfn g3=mseg_pa g6=per-CPU kpm tsbmiss area
*/
ldxa [%g3 + MEMSEG_KPM_PBASE]%asi, %g7
- sub %g2, %g7, %g4
+ sub %g2, %g7, %g4
#ifdef DEBUG
/*
* Validate inx value
- * g2=pfn g3=mseg_pa g4=inx
+ * g2=pfn g3=mseg_pa g4=inx g6=per-CPU tsbmiss area
*/
ldxa [%g3 + MEMSEG_KPM_NKPMPGS]%asi, %g5
cmp %g4, %g5 /* inx - nkpmpgs */
@@ -4780,7 +4812,8 @@ label/**/_ok:
/*
* KPMP_SHASH(kp)
- * g2=pfn g3=mseg_pa g4=inx g5=ksp g7=kpmp_stable_sz
+ * g2=pfn g3=mseg_pa g4=inx g5=ksp
+ * g6=per-CPU kpm tsbmiss area g7=kpmp_stable_sz
*/
ldub [%g6 + KPMTSBM_KPMPSHIFT], %g1 /* kpmp_shift */
sub %g7, 1, %g7 /* mask */
@@ -4791,6 +4824,7 @@ label/**/_ok:
/*
* Calculate physical kpm_spage pointer
* g2=pfn g3=mseg_pa g4=offset g5=hashinx
+ * g6=per-CPU kpm tsbmiss area
*/
ldxa [%g3 + MEMSEG_KPM_PAGESPA]%asi, %g1 /* kpm_spagespa */
add %g1, %g4, %g1 /* ksp_pa */
@@ -4799,18 +4833,20 @@ label/**/_ok:
* Calculate physical hash lock address.
* Note: Changes in kpm_shlk_t must be reflected here.
* g1=ksp_pa g2=pfn g5=hashinx
+ * g6=per-CPU kpm tsbmiss area
*/
ldx [%g6 + KPMTSBM_KPMPTABLEPA], %g4 /* kpmp_stablepa */
sllx %g5, KPMSHLK_SHIFT, %g5
add %g4, %g5, %g3 /* hlck_pa */
/*
- * Assemble tte
+ * Assemble non-cacheable tte initially
* g1=ksp_pa g2=pfn g3=hlck_pa
+ * g6=per-CPU kpm tsbmiss area
*/
sethi %hi(TTE_VALID_INT), %g5 /* upper part */
sllx %g5, 32, %g5
- mov (TTE_CP_INT|TTE_CV_INT|TTE_PRIV_INT|TTE_HWWR_INT), %g4
+ mov (TTE_CP_INT|TTE_PRIV_INT|TTE_HWWR_INT), %g4
or %g5, %g4, %g5
sllx %g2, MMU_PAGESHIFT, %g4
or %g5, %g4, %g5 /* tte */
@@ -4819,18 +4855,23 @@ label/**/_ok:
/*
* tsb dropin
- * g1=ksp_pa g2=ttarget g3=hlck_pa g4=ktsbp g5=tte
+ * g1=ksp_pa g2=ttarget g3=hlck_pa g4=ktsbp g5=tte (non-cacheable)
+ * g6=per-CPU kpm tsbmiss area g7=scratch register
*/
/* KPMLOCK_ENTER(kpmlckp, tmp1, label1, asi) */
KPMLOCK_ENTER(%g3, %g7, kpmtsbsmlock, ASI_MEM)
/* use C-handler if there's no go for dropin */
- ldsba [%g1 + KPMSPAGE_MAPPED]%asi, %g7 /* kp_mapped */
- cmp %g7, -1
- bne,pn %xcc, 5f
- nop
-
+ ldsba [%g1 + KPMSPAGE_MAPPED]%asi, %g7 /* kp_mapped */
+ andcc %g7, KPM_MAPPED_GO, %g0 /* go or no go ? */
+ bz,pt %icc, 5f /* no go */
+ nop
+ and %g7, KPM_MAPPED_MASK, %g7 /* go */
+ cmp %g7, KPM_MAPPEDS /* cacheable ? */
+ be,a,pn %xcc, 3f
+ or %g5, TTE_CV_INT, %g5 /* cacheable */
+3:
#ifndef sun4v
ldub [%g6 + KPMTSBM_FLAGS], %g7
mov ASI_N, %g1
@@ -4905,7 +4946,7 @@ sfmmu_kpm_tsbmtl(short *kp_refcntc, uint_t *khl_lock, int cmd)
*/
/* ARGSUSED */
int
-sfmmu_kpm_stsbmtl(char *mapped, uint_t *kshl_lock, int val)
+sfmmu_kpm_stsbmtl(uchar_t *mapped, uint_t *kshl_lock, int val)
{
return (0);
}
@@ -4983,7 +5024,7 @@ sfmmu_kpm_stsbmtl_panic:
stb %o2, [%o0]
KPMLOCK_EXIT(%o1, ASI_N)
- mov %o5, %o0 /* return old val */
+ and %o5, KPM_MAPPED_MASK, %o0 /* return old val */
retl
wrpr %g0, %o3, %pstate /* enable interrupts */
SET_SIZE(sfmmu_kpm_stsbmtl)
diff --git a/usr/src/uts/sfmmu/vm/hat_sfmmu.c b/usr/src/uts/sfmmu/vm/hat_sfmmu.c
index 3536da7154..ed6edb76f4 100644
--- a/usr/src/uts/sfmmu/vm/hat_sfmmu.c
+++ b/usr/src/uts/sfmmu/vm/hat_sfmmu.c
@@ -23,8 +23,6 @@
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
/*
* VM - Hardware Address Translation management for Spitfire MMU.
*
@@ -3453,7 +3451,7 @@ sfmmu_vacconflict_array(caddr_t addr, page_t *pp, int *cflags)
return (0);
}
- if (!PP_ISMAPPED(pp)) {
+ if (!PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp)) {
/*
* Previous user of page had a differnet color
* but since there are no current users
@@ -6740,6 +6738,9 @@ hat_page_relocate(page_t **target, page_t **replacement, spgcnt_t *nrelocp)
cpuset_t cpuset;
int cap_cpus;
int ret;
+#ifdef VAC
+ int cflags = 0;
+#endif
if (hat_kpr_enabled == 0 || !kcage_on || PP_ISNORELOC(*target)) {
PAGE_RELOCATE_LOG(target, replacement, EAGAIN, -1);
@@ -6806,21 +6807,6 @@ hat_page_relocate(page_t **target, page_t **replacement, spgcnt_t *nrelocp)
kpreempt_disable();
-#ifdef VAC
- /*
- * If the replacement page is of a different virtual color
- * than the page it is replacing, we need to handle the VAC
- * consistency for it just as we would if we were setting up
- * a new mapping to a page.
- */
- if ((tpp->p_szc == 0) && (PP_GET_VCOLOR(rpp) != NO_VCOLOR)) {
- if (tpp->p_vcolor != rpp->p_vcolor) {
- sfmmu_cache_flushcolor(PP_GET_VCOLOR(rpp),
- rpp->p_pagenum);
- }
- }
-#endif
-
/*
* We raise our PIL to 13 so that we don't get captured by
* another CPU or pinned by an interrupt thread. We can't go to
@@ -6870,6 +6856,21 @@ hat_page_relocate(page_t **target, page_t **replacement, spgcnt_t *nrelocp)
* this context.
*/
for (i = 0; i < npages; i++, tpp++, rpp++) {
+#ifdef VAC
+ /*
+ * If the replacement has a different vcolor than
+ * the one being replacd, we need to handle VAC
+ * consistency for it just as we were setting up
+ * a new mapping to it.
+ */
+ if ((PP_GET_VCOLOR(rpp) != NO_VCOLOR) &&
+ (tpp->p_vcolor != rpp->p_vcolor) &&
+ !CacheColor_IsFlushed(cflags, PP_GET_VCOLOR(rpp))) {
+ CacheColor_SetFlushed(cflags, PP_GET_VCOLOR(rpp));
+ sfmmu_cache_flushcolor(PP_GET_VCOLOR(rpp),
+ rpp->p_pagenum);
+ }
+#endif
/*
* Copy the contents of the page.
*/
diff --git a/usr/src/uts/sfmmu/vm/hat_sfmmu.h b/usr/src/uts/sfmmu/vm/hat_sfmmu.h
index 92f238aafa..b83e7a8d24 100644
--- a/usr/src/uts/sfmmu/vm/hat_sfmmu.h
+++ b/usr/src/uts/sfmmu/vm/hat_sfmmu.h
@@ -34,8 +34,6 @@
#ifndef _VM_HAT_SFMMU_H
#define _VM_HAT_SFMMU_H
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#ifdef __cplusplus
extern "C" {
#endif
@@ -2377,7 +2375,7 @@ extern void sfmmu_patch_shctx(void);
extern void sfmmu_kpm_load_tsb(caddr_t, tte_t *, int);
extern void sfmmu_kpm_unload_tsb(caddr_t, int);
extern void sfmmu_kpm_tsbmtl(short *, uint_t *, int);
-extern int sfmmu_kpm_stsbmtl(char *, uint_t *, int);
+extern int sfmmu_kpm_stsbmtl(uchar_t *, uint_t *, int);
extern caddr_t kpm_vbase;
extern size_t kpm_size;
extern struct memseg *memseg_hash[];
@@ -2399,9 +2397,21 @@ extern uchar_t kpmp_shift;
#define KPMTSBM_STOP 0
#define KPMTSBM_START 1
-/* kpm_smallpages kp_mapped values */
-#define KPM_MAPPEDS -1 /* small mapping valid, no conflict */
-#define KPM_MAPPEDSC 1 /* small mapping valid, conflict */
+/*
+ * For kpm_smallpages, the state about how a kpm page is mapped and whether
+ * it is ready to go is indicated by the two 4-bit fields defined in the
+ * kpm_spage structure as follows:
+ * kp_mapped_flag bit[0:3] - the page is mapped cacheable or not
+ * kp_mapped_flag bit[4:7] - the mapping is ready to go or not
+ * If the bit KPM_MAPPED_GO is on, it indicates that the assembly tsb miss
+ * handler can drop the mapping in regardless of the caching state of the
+ * mapping. Otherwise, we will have C handler resolve the VAC conflict no
+ * matter the page is currently mapped cacheable or non-cacheable.
+ */
+#define KPM_MAPPEDS 0x1 /* small mapping valid, no conflict */
+#define KPM_MAPPEDSC 0x2 /* small mapping valid, conflict */
+#define KPM_MAPPED_GO 0x10 /* the mapping is ready to go */
+#define KPM_MAPPED_MASK 0xf
/* Physical memseg address NULL marker */
#define MSEG_NULLPTR_PA -1
diff --git a/usr/src/uts/sun4/vm/mach_kpm.h b/usr/src/uts/sun4/vm/mach_kpm.h
index a0a9734221..9262ad60a4 100644
--- a/usr/src/uts/sun4/vm/mach_kpm.h
+++ b/usr/src/uts/sun4/vm/mach_kpm.h
@@ -19,15 +19,13 @@
* CDDL HEADER END
*/
/*
- * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#ifndef _MACH_KPM_H
#define _MACH_KPM_H
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#ifdef __cplusplus
extern "C" {
#endif
@@ -97,7 +95,8 @@ int kpmp_hash_debug;
#endif /* DEBUG */
/*
- * kpm virtual address to physical address
+ * kpm virtual address to physical address. Any changes in this macro must
+ * also be ported to the assembly implementation in sfmmu_asm.s
*/
#ifdef VAC
#define SFMMU_KPM_VTOP(vaddr, paddr) { \
diff --git a/usr/src/uts/sun4u/cpu/spitfire.c b/usr/src/uts/sun4u/cpu/spitfire.c
index a2a16eee83..44e84b5bd8 100644
--- a/usr/src/uts/sun4u/cpu/spitfire.c
+++ b/usr/src/uts/sun4u/cpu/spitfire.c
@@ -19,12 +19,10 @@
* CDDL HEADER END
*/
/*
- * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include <sys/types.h>
#include <sys/systm.h>
#include <sys/archsystm.h>
@@ -33,6 +31,7 @@
#include <sys/cpu.h>
#include <sys/elf_SPARC.h>
#include <vm/hat_sfmmu.h>
+#include <vm/seg_kpm.h>
#include <vm/page.h>
#include <vm/vm_dep.h>
#include <sys/cpuvar.h>
@@ -505,6 +504,13 @@ cpu_setup(void)
kpm_size_shift = 41;
kpm_vbase = (caddr_t)0xfffffa0000000000ull; /* 16EB - 6TB */
+ /*
+ * All UltraSPARC platforms should use small kpm page as default, as
+ * the KPM large page VAC conflict code has no value to maintain. The
+ * new generation of SPARC no longer have VAC conflict issue.
+ */
+ kpm_smallpages = 1;
+
#if defined(SF_ERRATA_57)
errata57_limit = (caddr_t)0x80000000ul;
#endif
diff --git a/usr/src/uts/sun4u/cpu/us3_common_asm.s b/usr/src/uts/sun4u/cpu/us3_common_asm.s
index f7a30ae7a4..430b62f380 100644
--- a/usr/src/uts/sun4u/cpu/us3_common_asm.s
+++ b/usr/src/uts/sun4u/cpu/us3_common_asm.s
@@ -25,8 +25,6 @@
* Assembly code support for Cheetah/Cheetah+ modules
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#if !defined(lint)
#include "assym.h"
#endif /* !lint */
@@ -132,6 +130,33 @@
sub arg2, tmp1, arg2 ;\
1:
+/*
+ * macro that flushes the entire dcache color
+ * dcache size = 64K, one way 16K
+ */
+#define DCACHE_FLUSHCOLOR(arg, way, tmp1, tmp2, tmp3) \
+ ldxa [%g0]ASI_DCU, tmp1; \
+ btst DCU_DC, tmp1; /* is dcache enabled? */ \
+ bz,pn %icc, 1f; \
+ ASM_LD(tmp1, dcache_linesize) \
+ set MMU_PAGESIZE, tmp2; \
+ /* \
+ * arg = virtual color \
+ * tmp2 = page size \
+ * tmp1 = cache line size \
+ */ \
+ sllx arg, MMU_PAGESHIFT, arg; /* color to dcache page */ \
+ mov way, tmp3; \
+ sllx tmp3, 14, tmp3; /* One way 16K */ \
+ or arg, tmp3, arg; \
+ sub tmp2, tmp1, tmp2; \
+2: \
+ stxa %g0, [arg + tmp2]ASI_DC_TAG; \
+ membar #Sync; \
+ cmp %g0, tmp2; \
+ bne,pt %icc, 2b; \
+ sub tmp2, tmp1, tmp2; \
+1:
/* END CSTYLED */
@@ -478,23 +503,15 @@ vac_flushcolor(int vcolor, pfn_t pfnum)
{}
#else /* lint */
- /*
- * In UltraSPARC III flushcolor is same as as flushpage.
- * This is because we have an ASI to flush dcache using physical
- * address.
- * Flushing dcache using physical address is faster because we
- * don't have to deal with associativity of dcache.
- * The arguments to vac_flushpage() and vac_flushcolor() are same but
- * the order is reversed. this is because we maintain compatibility
- * with spitfire, in which vac_flushcolor has only one argument, namely
- * vcolor.
- */
ENTRY(vac_flushcolor)
/*
- * %o0 = vcolor, %o1 = pfnum
+ * %o0 = vcolor
*/
- DCACHE_FLUSHPAGE(%o1, %o0, %o2, %o3, %o4)
+ DCACHE_FLUSHCOLOR(%o0, 0, %o1, %o2, %o3)
+ DCACHE_FLUSHCOLOR(%o0, 1, %o1, %o2, %o3)
+ DCACHE_FLUSHCOLOR(%o0, 2, %o1, %o2, %o3)
+ DCACHE_FLUSHCOLOR(%o0, 3, %o1, %o2, %o3)
retl
nop
SET_SIZE(vac_flushcolor)
@@ -514,9 +531,11 @@ vac_flushcolor_tl1(uint64_t vcolor, uint64_t pfnum)
ENTRY(vac_flushcolor_tl1)
/*
* %g1 = vcolor
- * %g2 = pfnum
*/
- DCACHE_FLUSHPAGE(%g2, %g1, %g3, %g4, %g5)
+ DCACHE_FLUSHCOLOR(%g1, 0, %g2, %g3, %g4)
+ DCACHE_FLUSHCOLOR(%g1, 1, %g2, %g3, %g4)
+ DCACHE_FLUSHCOLOR(%g1, 2, %g2, %g3, %g4)
+ DCACHE_FLUSHCOLOR(%g1, 3, %g2, %g3, %g4)
retry
SET_SIZE(vac_flushcolor_tl1)
diff --git a/usr/src/uts/sun4u/ml/mach_offsets.in b/usr/src/uts/sun4u/ml/mach_offsets.in
index 7d8df8a635..f32cd2374b 100644
--- a/usr/src/uts/sun4u/ml/mach_offsets.in
+++ b/usr/src/uts/sun4u/ml/mach_offsets.in
@@ -70,8 +70,6 @@
\ and all of the nested structures/unions together. See the many
\ examples already in this file.
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#ifndef _GENASSYM
#define _GENASSYM
#endif
@@ -179,7 +177,7 @@ kpm_hlk KPMHLK_SIZE KPMHLK_SHIFT
khl_lock KPMHLK_LOCK
kpm_spage KPMSPAGE_SIZE KPMSPAGE_SHIFT
- kp_mapped KPMSPAGE_MAPPED
+ kp_mapped_flag KPMSPAGE_MAPPED
kpm_shlk KPMSHLK_SIZE KPMSHLK_SHIFT
kshl_lock KPMSHLK_LOCK
diff --git a/usr/src/uts/sun4u/sys/opl_olympus_regs.h b/usr/src/uts/sun4u/sys/opl_olympus_regs.h
index 89d9689816..02e7beaf03 100644
--- a/usr/src/uts/sun4u/sys/opl_olympus_regs.h
+++ b/usr/src/uts/sun4u/sys/opl_olympus_regs.h
@@ -26,8 +26,6 @@
#ifndef _SYS_OPL_OLYMPUS_REGS_H
#define _SYS_OPL_OLYMPUS_REGS_H
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include <sys/machasi.h>
#include <sys/cpu_impl.h>
@@ -263,11 +261,9 @@ extern "C" {
#define OPL_ECACHE_SETSIZE 0x80000
/*
- * The minimum size needed to ensure consistency on a virtually address
- * cache. Computed by taking the largest virtually indexed cache and dividing
- * by its associativity.
+ * OPL platform has no vac consistent issue. So set it to 8KB.
*/
-#define OPL_VAC_SIZE 0x4000
+#define OPL_VAC_SIZE 0x2000
/* these are field offsets for opl_errlog structure */
#define LOG_STICK_OFF 0x0
diff --git a/usr/src/uts/sun4u/vm/mach_kpm.c b/usr/src/uts/sun4u/vm/mach_kpm.c
index 85ae7d50ad..0c5f7e6c01 100644
--- a/usr/src/uts/sun4u/vm/mach_kpm.c
+++ b/usr/src/uts/sun4u/vm/mach_kpm.c
@@ -19,12 +19,10 @@
* CDDL HEADER END
*/
/*
- * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
/*
* Kernel Physical Mapping (segkpm) hat interface routines for sun4u.
*/
@@ -96,7 +94,7 @@ hat_kpm_mapin(struct page *pp, struct kpme *kpme)
ASSERT(pp->p_kpmref >= 0);
vaddr = (pp->p_kpmref == 0) ?
- sfmmu_kpm_mapin(pp) : hat_kpm_page2va(pp, 1);
+ sfmmu_kpm_mapin(pp) : hat_kpm_page2va(pp, 1);
if (kpme != NULL) {
/*
@@ -152,7 +150,7 @@ hat_kpm_mapout(struct page *pp, struct kpme *kpme, caddr_t vaddr)
if (sfmmu_kpme_lookup(kpme, pp) == 0)
panic("hat_kpm_mapout: kpme not found pp=%p",
- (void *)pp);
+ (void *)pp);
ASSERT(pp->p_kpmref > 0);
sfmmu_kpme_sub(kpme, pp);
@@ -200,8 +198,8 @@ hat_kpm_page2va(struct page *pp, int checkswap)
if (vcolor_pa != vcolor) {
vaddr += ((uintptr_t)(vcolor - vcolor_pa) << MMU_PAGESHIFT);
vaddr += (vcolor_pa > vcolor) ?
- ((uintptr_t)vcolor_pa << kpm_size_shift) :
- ((uintptr_t)(vcolor - vcolor_pa) << kpm_size_shift);
+ ((uintptr_t)vcolor_pa << kpm_size_shift) :
+ ((uintptr_t)(vcolor - vcolor_pa) << kpm_size_shift);
}
return ((caddr_t)vaddr);
@@ -413,12 +411,12 @@ hat_kpm_delmem_mseg_update(struct memseg *msp, struct memseg **mspp)
if (mspp == &memsegs) {
memsegspa = (msp->next) ?
- va_to_pa(msp->next) : MSEG_NULLPTR_PA;
+ va_to_pa(msp->next) : MSEG_NULLPTR_PA;
} else {
lmsp = (struct memseg *)
- ((uint64_t)mspp - offsetof(struct memseg, next));
+ ((uint64_t)mspp - offsetof(struct memseg, next));
lmsp->nextpa = (msp->next) ?
- va_to_pa(msp->next) : MSEG_NULLPTR_PA;
+ va_to_pa(msp->next) : MSEG_NULLPTR_PA;
}
}
@@ -508,7 +506,7 @@ hat_kpm_split_mseg_update(struct memseg *msp, struct memseg **mspp,
memsegspa = (lo) ? va_to_pa(lo) : va_to_pa(mid);
} else {
lmsp = (struct memseg *)
- ((uint64_t)mspp - offsetof(struct memseg, next));
+ ((uint64_t)mspp - offsetof(struct memseg, next));
lmsp->nextpa = (lo) ? va_to_pa(lo) : va_to_pa(mid);
}
}
@@ -681,7 +679,7 @@ sfmmu_kpm_mapin(page_t *pp)
if (kp->kp_refcntc == -1) {
/* remove go indication */
sfmmu_kpm_tsbmtl(&kp->kp_refcntc,
- &kpmp->khl_lock, KPMTSBM_STOP);
+ &kpmp->khl_lock, KPMTSBM_STOP);
}
if (kp->kp_refcnt > 0 && kp->kp_refcntc == 0)
sfmmu_kpm_demap_large(vaddr);
@@ -735,7 +733,7 @@ sfmmu_kpm_mapin(page_t *pp)
/* Set go flag for TL tsbmiss handler */
if (kp->kp_refcntc == 0)
sfmmu_kpm_tsbmtl(&kp->kp_refcntc,
- &kpmp->khl_lock, KPMTSBM_START);
+ &kpmp->khl_lock, KPMTSBM_START);
ASSERT(kp->kp_refcntc == -1);
}
@@ -769,7 +767,7 @@ sfmmu_kpm_mapin(page_t *pp)
/* remove go indication */
sfmmu_kpm_tsbmtl(&kp->kp_refcntc, &kpmp->khl_lock,
- KPMTSBM_STOP);
+ KPMTSBM_STOP);
}
ASSERT(kp->kp_refcntc >= 0);
}
@@ -782,6 +780,12 @@ smallpages_mapin:
/* tte assembly */
KPM_TTE_VCACHED(tte.ll, pfn, TTE8K);
} else {
+ /*
+ * Just in case this same page was mapped cacheable prior to
+ * this and the old tte remains in tlb.
+ */
+ sfmmu_kpm_demap_small(vaddr);
+
/* ASSERT(pp->p_share); XXX use hat_page_getshare */
pmtx = sfmmu_page_enter(pp);
PP_SETKPMC(pp);
@@ -796,8 +800,9 @@ smallpages_mapin:
PP2KPMSPG(pp, ksp);
kpmsp = KPMP_SHASH(ksp);
- oldval = sfmmu_kpm_stsbmtl(&ksp->kp_mapped, &kpmsp->kshl_lock,
- (uncached) ? KPM_MAPPEDSC : KPM_MAPPEDS);
+ oldval = sfmmu_kpm_stsbmtl(&ksp->kp_mapped_flag, &kpmsp->kshl_lock,
+ (uncached) ? (KPM_MAPPED_GO | KPM_MAPPEDSC) :
+ (KPM_MAPPED_GO | KPM_MAPPEDS));
if (oldval != 0)
panic("sfmmu_kpm_mapin: stale smallpages mapping");
@@ -844,7 +849,7 @@ sfmmu_kpm_mapout(page_t *pp, caddr_t vaddr)
ASSERT(PP_ISKPMS(pp) == 0);
if (kp->kp_refcnta <= 0) {
panic("sfmmu_kpm_mapout: bad refcnta kp=%p",
- (void *)kp);
+ (void *)kp);
}
if (PP_ISTNC(pp)) {
@@ -854,7 +859,7 @@ sfmmu_kpm_mapout(page_t *pp, caddr_t vaddr)
* forced "small page" mode.
*/
panic("sfmmu_kpm_mapout: uncached page not "
- "kpm marked");
+ "kpm marked");
}
sfmmu_kpm_demap_small(vaddr);
@@ -900,7 +905,7 @@ sfmmu_kpm_mapout(page_t *pp, caddr_t vaddr)
/* remove go indication */
if (kp->kp_refcntc == -1) {
sfmmu_kpm_tsbmtl(&kp->kp_refcntc,
- &kpmp->khl_lock, KPMTSBM_STOP);
+ &kpmp->khl_lock, KPMTSBM_STOP);
}
ASSERT(kp->kp_refcntc == 0);
@@ -936,7 +941,7 @@ sfmmu_kpm_mapout(page_t *pp, caddr_t vaddr)
if (PP_ISKPMS(pp)) {
if (kp->kp_refcnts < 1) {
panic("sfmmu_kpm_mapout: bad refcnts kp=%p",
- (void *)kp);
+ (void *)kp);
}
sfmmu_kpm_demap_small(vaddr);
@@ -955,7 +960,7 @@ sfmmu_kpm_mapout(page_t *pp, caddr_t vaddr)
* have forced "small page" mode.
*/
panic("sfmmu_kpm_mapout: uncached "
- "page not kpm marked");
+ "page not kpm marked");
}
conv_tnc(pp, TTE8K);
}
@@ -969,7 +974,7 @@ sfmmu_kpm_mapout(page_t *pp, caddr_t vaddr)
if (PP_ISKPMC(pp)) {
if (kp->kp_refcntc < 1) {
panic("sfmmu_kpm_mapout: bad refcntc kp=%p",
- (void *)kp);
+ (void *)kp);
}
pmtx = sfmmu_page_enter(pp);
PP_CLRKPMC(pp);
@@ -989,8 +994,8 @@ smallpages_mapout:
kpmsp = KPMP_SHASH(ksp);
if (PP_ISKPMC(pp) == 0) {
- oldval = sfmmu_kpm_stsbmtl(&ksp->kp_mapped,
- &kpmsp->kshl_lock, 0);
+ oldval = sfmmu_kpm_stsbmtl(&ksp->kp_mapped_flag,
+ &kpmsp->kshl_lock, 0);
if (oldval != KPM_MAPPEDS) {
/*
@@ -1009,8 +1014,8 @@ smallpages_mapout:
#endif
} else if (PP_ISTNC(pp)) {
- oldval = sfmmu_kpm_stsbmtl(&ksp->kp_mapped,
- &kpmsp->kshl_lock, 0);
+ oldval = sfmmu_kpm_stsbmtl(&ksp->kp_mapped_flag,
+ &kpmsp->kshl_lock, 0);
if (oldval != KPM_MAPPEDSC || PP_ISKPMC(pp) == 0)
panic("sfmmu_kpm_mapout: inconsistent TNC mapping");
@@ -1031,8 +1036,8 @@ smallpages_mapout:
conv_tnc(pp, TTE8K);
} else {
- oldval = sfmmu_kpm_stsbmtl(&ksp->kp_mapped,
- &kpmsp->kshl_lock, 0);
+ oldval = sfmmu_kpm_stsbmtl(&ksp->kp_mapped_flag,
+ &kpmsp->kshl_lock, 0);
if (oldval != KPM_MAPPEDSC)
panic("sfmmu_kpm_mapout: inconsistent mapping");
@@ -1076,8 +1081,8 @@ sfmmu_kpm_getvaddr(page_t *pp, int *kpm_vac_rangep)
*kpm_vac_rangep = abs(vcolor - vcolor_pa);
vaddr += ((uintptr_t)(vcolor - vcolor_pa) << MMU_PAGESHIFT);
vaddr += (vcolor_pa > vcolor) ?
- ((uintptr_t)vcolor_pa << kpm_size_shift) :
- ((uintptr_t)(vcolor - vcolor_pa) << kpm_size_shift);
+ ((uintptr_t)vcolor_pa << kpm_size_shift) :
+ ((uintptr_t)(vcolor - vcolor_pa) << kpm_size_shift);
ASSERT(!PP_ISMAPPED_LARGE(pp));
}
@@ -1162,7 +1167,7 @@ sfmmu_kpm_fault(caddr_t vaddr, struct memseg *mseg, page_t *pp)
inx = ptokpmp(kpmptop(ptokpmp(pfn)) - mseg->kpm_pbase);
if (inx >= mseg->kpm_nkpmpgs) {
cmn_err(CE_PANIC, "sfmmu_kpm_fault: kpm overflow in memseg "
- "0x%p pp 0x%p", (void *)mseg, (void *)pp);
+ "0x%p pp 0x%p", (void *)mseg, (void *)pp);
}
kp = &mseg->kpm_pages[inx];
@@ -1225,7 +1230,7 @@ sfmmu_kpm_fault(caddr_t vaddr, struct memseg *mseg, page_t *pp)
* handler is disabled.
*/
badstate |= (kp->kp_refcnt == 0 || kp->kp_refcnts > 0 ||
- PP_ISKPMC(pp) || PP_ISKPMS(pp) || PP_ISNC(pp));
+ PP_ISKPMC(pp) || PP_ISKPMS(pp) || PP_ISNC(pp));
if (badstate == 0)
goto largeexit;
@@ -1240,9 +1245,9 @@ sfmmu_kpm_fault(caddr_t vaddr, struct memseg *mseg, page_t *pp)
* more concise.
*/
tsbmcase = (((kp->kp_refcntc > 0) ? KPM_KC : 0) |
- ((kp->kp_refcnts > 0) ? KPM_KS : 0) |
- (PP_ISKPMC(pp) ? KPM_C : 0) |
- (PP_ISKPMS(pp) ? KPM_S : 0));
+ ((kp->kp_refcnts > 0) ? KPM_KS : 0) |
+ (PP_ISKPMC(pp) ? KPM_C : 0) |
+ (PP_ISKPMS(pp) ? KPM_S : 0));
switch (tsbmcase) {
case KPM_TSBM_CONFL_GONE: /* - - - - */
@@ -1260,7 +1265,7 @@ sfmmu_kpm_fault(caddr_t vaddr, struct memseg *mseg, page_t *pp)
if (PP_ISNC(pp) || kp->kp_refcnt <= 0 ||
addr_to_vcolor(vaddr) != PP_GET_VCOLOR(pp)) {
panic("sfmmu_kpm_fault: inconsistent CONFL_GONE "
- "state, pp=%p", (void *)pp);
+ "state, pp=%p", (void *)pp);
}
goto largeexit;
@@ -1287,7 +1292,7 @@ sfmmu_kpm_fault(caddr_t vaddr, struct memseg *mseg, page_t *pp)
if (PP_ISNC(pp) || kp->kp_refcnt <= 0 ||
addr_to_vcolor(vaddr) != PP_GET_VCOLOR(pp)) {
panic("sfmmu_kpm_fault: inconsistent MAPS state, "
- "pp=%p", (void *)pp);
+ "pp=%p", (void *)pp);
}
kp->kp_refcnt--;
kp->kp_refcnts++;
@@ -1314,7 +1319,7 @@ sfmmu_kpm_fault(caddr_t vaddr, struct memseg *mseg, page_t *pp)
if (PP_ISNC(pp) ||
addr_to_vcolor(vaddr) != PP_GET_VCOLOR(pp)) {
panic("sfmmu_kpm_fault: inconsistent RPLS state, "
- "pp=%p", (void *)pp);
+ "pp=%p", (void *)pp);
}
goto smallexit;
@@ -1332,7 +1337,7 @@ sfmmu_kpm_fault(caddr_t vaddr, struct memseg *mseg, page_t *pp)
if (PP_ISNC(pp) || kp->kp_refcnt <= 0 ||
addr_to_vcolor(vaddr) != PP_GET_VCOLOR(pp)) {
panic("sfmmu_kpm_fault: inconsistent MAPS_BRKO state, "
- "pp=%p", (void *)pp);
+ "pp=%p", (void *)pp);
}
kp->kp_refcnt--;
kp->kp_refcnts++;
@@ -1351,7 +1356,7 @@ sfmmu_kpm_fault(caddr_t vaddr, struct memseg *mseg, page_t *pp)
* must have bypassed the kpm alias prevention logic.
*/
panic("sfmmu_kpm_fault: stale VAC conflict, pp=%p",
- (void *)pp);
+ (void *)pp);
}
/*
@@ -1409,7 +1414,7 @@ sfmmu_kpm_fault(caddr_t vaddr, struct memseg *mseg, page_t *pp)
* somehow bypassed the kpm alias prevention logic.
*/
panic("sfmmu_kpm_fault: stale VAC conflict, pp=%p",
- (void *)pp);
+ (void *)pp);
}
/*
@@ -1417,7 +1422,7 @@ sfmmu_kpm_fault(caddr_t vaddr, struct memseg *mseg, page_t *pp)
*/
if (!PP_ISNC(pp)) {
panic("sfmmu_kpm_fault: page not uncached, pp=%p",
- (void *)pp);
+ (void *)pp);
}
uncached = 1;
goto smallexit;
@@ -1425,7 +1430,7 @@ sfmmu_kpm_fault(caddr_t vaddr, struct memseg *mseg, page_t *pp)
default:
badstate_exit:
panic("sfmmu_kpm_fault: inconsistent VAC state, vaddr=%p kp=%p "
- "pp=%p", (void *)vaddr, (void *)kp, (void *)pp);
+ "pp=%p", (void *)vaddr, (void *)kp, (void *)pp);
}
smallexit:
@@ -1453,7 +1458,7 @@ largeexit:
if (kp->kp_refcntc == 0) {
/* Set "go" flag for TL tsbmiss handler */
sfmmu_kpm_tsbmtl(&kp->kp_refcntc, &kpmp->khl_lock,
- KPMTSBM_START);
+ KPMTSBM_START);
}
ASSERT(kp->kp_refcntc == -1);
error = 0;
@@ -1529,6 +1534,12 @@ sfmmu_kpm_fault_small(caddr_t vaddr, struct memseg *mseg, page_t *pp)
/* tsb dropin */
sfmmu_kpm_load_tsb(vaddr, &tte, MMU_PAGESHIFT);
+ oldval = sfmmu_kpm_stsbmtl(&ksp->kp_mapped_flag,
+ &kpmsp->kshl_lock, (KPM_MAPPED_GO | KPM_MAPPEDSC));
+
+ if (oldval != KPM_MAPPEDSC)
+ panic("sfmmu_kpm_fault_small: "
+ "stale smallpages mapping");
} else {
if (PP_ISKPMC(pp)) {
pmtx = sfmmu_page_enter(pp);
@@ -1542,12 +1553,12 @@ sfmmu_kpm_fault_small(caddr_t vaddr, struct memseg *mseg, page_t *pp)
/* tsb dropin */
sfmmu_kpm_load_tsb(vaddr, &tte, MMU_PAGESHIFT);
- oldval = sfmmu_kpm_stsbmtl(&ksp->kp_mapped,
- &kpmsp->kshl_lock, KPM_MAPPEDS);
+ oldval = sfmmu_kpm_stsbmtl(&ksp->kp_mapped_flag,
+ &kpmsp->kshl_lock, (KPM_MAPPED_GO | KPM_MAPPEDS));
if (oldval != KPM_MAPPEDSC)
panic("sfmmu_kpm_fault_small: "
- "stale smallpages mapping");
+ "stale smallpages mapping");
}
} else {
@@ -1680,7 +1691,7 @@ sfmmu_kpm_pageunload(page_t *pp)
if (pp->p_kpmref == 0)
panic("sfmmu_kpm_pageunload: stale p_kpmref pp=%p "
- "kpme=%p", (void *)pp, (void *)kpme);
+ "kpme=%p", (void *)pp, (void *)kpme);
nkpme = kpme->kpe_next;
@@ -1796,7 +1807,7 @@ sfmmu_kpm_vac_unload(page_t *pp, caddr_t vaddr)
if (IS_KPM_ALIAS_RANGE(kpmvaddr)) {
if (kp->kp_refcnta < 1) {
panic("sfmmu_kpm_vac_unload: bad refcnta kpm_page=%p\n",
- (void *)kp);
+ (void *)kp);
}
if (PP_ISKPMC(pp) == 0) {
@@ -1826,7 +1837,7 @@ sfmmu_kpm_vac_unload(page_t *pp, caddr_t vaddr)
* handler is disabled.
*/
badstate |= (kp->kp_refcnt == 0 || kp->kp_refcnts > 0 ||
- PP_ISKPMC(pp) || PP_ISKPMS(pp) || PP_ISNC(pp));
+ PP_ISKPMC(pp) || PP_ISKPMS(pp) || PP_ISNC(pp));
} else {
badstate |= (kp->kp_refcntc < 0);
}
@@ -1845,9 +1856,9 @@ sfmmu_kpm_vac_unload(page_t *pp, caddr_t vaddr)
* handling more concise.
*/
vacunlcase = (((kp->kp_refcntc > 0) ? KPM_KC : 0) |
- ((kp->kp_refcnts > 0) ? KPM_KS : 0) |
- (PP_ISKPMC(pp) ? KPM_C : 0) |
- (PP_ISKPMS(pp) ? KPM_S : 0));
+ ((kp->kp_refcnts > 0) ? KPM_KS : 0) |
+ (PP_ISKPMC(pp) ? KPM_C : 0) |
+ (PP_ISKPMS(pp) ? KPM_S : 0));
switch (vacunlcase) {
case KPM_VUL_BIG: /* - - - - */
@@ -1858,7 +1869,7 @@ sfmmu_kpm_vac_unload(page_t *pp, caddr_t vaddr)
if (kp->kp_refcntc == -1) {
/* remove go indication */
sfmmu_kpm_tsbmtl(&kp->kp_refcntc,
- &kpmp->khl_lock, KPMTSBM_STOP);
+ &kpmp->khl_lock, KPMTSBM_STOP);
}
sfmmu_kpm_demap_large(kpmvaddr);
@@ -1918,7 +1929,7 @@ sfmmu_kpm_vac_unload(page_t *pp, caddr_t vaddr)
case KPM_VUL_TNC: /* kc c ks s */
cmn_err(CE_NOTE, "sfmmu_kpm_vac_unload: "
- "page not in NC state");
+ "page not in NC state");
/* FALLTHRU */
default:
@@ -1927,8 +1938,8 @@ sfmmu_kpm_vac_unload(page_t *pp, caddr_t vaddr)
exit:
if (badstate) {
panic("sfmmu_kpm_vac_unload: inconsistent VAC state, "
- "kpmvaddr=%p kp=%p pp=%p",
- (void *)kpmvaddr, (void *)kp, (void *)pp);
+ "kpmvaddr=%p kp=%p pp=%p",
+ (void *)kpmvaddr, (void *)kp, (void *)pp);
}
mutex_exit(&kpmp->khl_mutex);
@@ -1946,8 +1957,8 @@ smallpages_vac_unload:
/*
* Stop TL tsbmiss handling
*/
- (void) sfmmu_kpm_stsbmtl(&ksp->kp_mapped,
- &kpmsp->kshl_lock, KPM_MAPPEDSC);
+ (void) sfmmu_kpm_stsbmtl(&ksp->kp_mapped_flag,
+ &kpmsp->kshl_lock, KPM_MAPPEDSC);
sfmmu_kpm_demap_small(kpmvaddr);
@@ -2007,12 +2018,12 @@ sfmmu_kpm_hme_unload(page_t *pp)
if (IS_KPM_ALIAS_RANGE(vaddr)) {
if (kp->kp_refcnta < 1) {
panic("sfmmu_kpm_hme_unload: bad refcnta kpm_page=%p\n",
- (void *)kp);
+ (void *)kp);
}
} else {
if (kp->kp_refcntc < 1) {
panic("sfmmu_kpm_hme_unload: bad refcntc kpm_page=%p\n",
- (void *)kp);
+ (void *)kp);
}
kp->kp_refcntc--;
}
@@ -2157,7 +2168,7 @@ sfmmu_kpm_page_cache(page_t *pp, int flags, int cache_flush_tag)
if (IS_KPM_ALIAS_RANGE(kpmvaddr)) {
if (kp->kp_refcnta < 1) {
panic("sfmmu_kpm_page_cache: bad refcnta "
- "kpm_page=%p\n", (void *)kp);
+ "kpm_page=%p\n", (void *)kp);
}
sfmmu_kpm_demap_small(kpmvaddr);
if (flags == HAT_TMPNC) {
@@ -2177,7 +2188,7 @@ sfmmu_kpm_page_cache(page_t *pp, int flags, int cache_flush_tag)
* handler is disabled.
*/
badstate |= (kp->kp_refcnt == 0 || kp->kp_refcnts > 0 ||
- PP_ISKPMC(pp) || PP_ISKPMS(pp) || PP_ISNC(pp));
+ PP_ISKPMC(pp) || PP_ISKPMS(pp) || PP_ISNC(pp));
} else {
badstate |= (kp->kp_refcntc < 0);
}
@@ -2191,9 +2202,9 @@ sfmmu_kpm_page_cache(page_t *pp, int flags, int cache_flush_tag)
* handling more concise.
*/
pgcacase = (((kp->kp_refcntc > 0) ? KPM_KC : 0) |
- ((kp->kp_refcnts > 0) ? KPM_KS : 0) |
- (PP_ISKPMC(pp) ? KPM_C : 0) |
- (PP_ISKPMS(pp) ? KPM_S : 0));
+ ((kp->kp_refcnts > 0) ? KPM_KS : 0) |
+ (PP_ISKPMC(pp) ? KPM_C : 0) |
+ (PP_ISKPMS(pp) ? KPM_S : 0));
if (flags == HAT_CACHE) {
switch (pgcacase) {
@@ -2224,7 +2235,7 @@ sfmmu_kpm_page_cache(page_t *pp, int flags, int cache_flush_tag)
case KPM_UNC_BIG: /* - - - - */
if (kp->kp_refcnt < 1) {
panic("sfmmu_kpm_page_cache: bad refcnt "
- "kpm_page=%p\n", (void *)kp);
+ "kpm_page=%p\n", (void *)kp);
}
/*
@@ -2236,7 +2247,7 @@ sfmmu_kpm_page_cache(page_t *pp, int flags, int cache_flush_tag)
if (kp->kp_refcntc == -1) {
/* remove go indication */
sfmmu_kpm_tsbmtl(&kp->kp_refcntc,
- &kpmp->khl_lock, KPMTSBM_STOP);
+ &kpmp->khl_lock, KPMTSBM_STOP);
}
ASSERT(kp->kp_refcntc == 0);
sfmmu_kpm_demap_large(kpmvaddr);
@@ -2278,8 +2289,8 @@ sfmmu_kpm_page_cache(page_t *pp, int flags, int cache_flush_tag)
exit:
if (badstate) {
panic("sfmmu_kpm_page_cache: inconsistent VAC state "
- "kpmvaddr=%p kp=%p pp=%p", (void *)kpmvaddr,
- (void *)kp, (void *)pp);
+ "kpmvaddr=%p kp=%p pp=%p", (void *)kpmvaddr,
+ (void *)kp, (void *)pp);
}
return;
@@ -2287,8 +2298,12 @@ smallpages_page_cache:
PP2KPMSPG(pp, ksp);
kpmsp = KPMP_SHASH(ksp);
- oldval = sfmmu_kpm_stsbmtl(&ksp->kp_mapped,
- &kpmsp->kshl_lock, KPM_MAPPEDSC);
+ /*
+ * marked as nogo for we will fault in and resolve it
+ * through sfmmu_kpm_fault_small
+ */
+ oldval = sfmmu_kpm_stsbmtl(&ksp->kp_mapped_flag, &kpmsp->kshl_lock,
+ KPM_MAPPEDSC);
if (!(oldval == KPM_MAPPEDS || oldval == KPM_MAPPEDSC))
panic("smallpages_page_cache: inconsistent mapping");
diff --git a/usr/src/uts/sun4v/ml/mach_offsets.in b/usr/src/uts/sun4v/ml/mach_offsets.in
index 362d419c82..134b059d6b 100644
--- a/usr/src/uts/sun4v/ml/mach_offsets.in
+++ b/usr/src/uts/sun4v/ml/mach_offsets.in
@@ -18,7 +18,7 @@
\
\ CDDL HEADER END
\
-\ Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+\ Copyright 2008 Sun Microsystems, Inc. All rights reserved.
\ Use is subject to license terms.
\
\ offsets.in: input file to produce assym.h using the stabs program
@@ -70,8 +70,6 @@
\ and all of the nested structures/unions together. See the many
\ examples already in this file.
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#ifndef _GENASSYM
#define _GENASSYM
#endif
@@ -213,7 +211,7 @@ kpm_hlk KPMHLK_SIZE KPMHLK_SHIFT
khl_lock KPMHLK_LOCK
kpm_spage KPMSPAGE_SIZE KPMSPAGE_SHIFT
- kp_mapped KPMSPAGE_MAPPED
+ kp_mapped_flag KPMSPAGE_MAPPED
kpm_shlk KPMSHLK_SIZE KPMSHLK_SHIFT
kshl_lock KPMSHLK_LOCK