summaryrefslogtreecommitdiff
path: root/usr/src/uts/common/vm
diff options
context:
space:
mode:
authorJerry Jelinek <jerry.jelinek@joyent.com>2020-03-16 11:40:43 +0000
committerJerry Jelinek <jerry.jelinek@joyent.com>2020-03-16 11:40:43 +0000
commita24006240222cb87b3dc787960d97c3fdcbadc69 (patch)
treee65f036782dabbf23f392670eac46bf058084d6c /usr/src/uts/common/vm
parent7e2a00efe240b326a184e01b87d73c18421ce847 (diff)
parent17abec598417b0120193b3ad4e5390d5d8682014 (diff)
downloadillumos-joyent-a24006240222cb87b3dc787960d97c3fdcbadc69.tar.gz
[illumos-gate merge]
commit 17abec598417b0120193b3ad4e5390d5d8682014 12385 tst.subr.d needs fixing for NULL as a pointer commit 584b574a3b16c6772c8204ec1d1c957c56f22a87 12174 i86pc: variable may be used uninitialized commit 34a4e6b53d2e6f2605fd77cda5b161201d7d0f20 12377 aggr: may be used uninitialized commit c9ffe217655ce62448bdb45d6f113f43b4dfcc54 12367 aac: variable may be used uninitialized commit a25e615d76804404e5fc63897a9196d4f92c3f5e 12371 dis x86 EVEX prefix mishandled 12372 dis EVEX encoding SIB mishandled 12373 dis support for EVEX vaes instructions 12374 dis support for EVEX vpclmulqdq instructions 12375 dis support for gfni instructions commit c1e9bf00765d7ac9cf1986575e4489dd8710d9b1 12369 dis WBNOINVD support Conflicts: usr/src/common/dis/i386/dis_tables.c exception_lists/wscheck
Diffstat (limited to 'usr/src/uts/common/vm')
-rw-r--r--usr/src/uts/common/vm/vm_pagelist.c30
1 files changed, 16 insertions, 14 deletions
diff --git a/usr/src/uts/common/vm/vm_pagelist.c b/usr/src/uts/common/vm/vm_pagelist.c
index f494c3d2b4..950b142588 100644
--- a/usr/src/uts/common/vm/vm_pagelist.c
+++ b/usr/src/uts/common/vm/vm_pagelist.c
@@ -318,16 +318,16 @@ static int mnode_maxmrange[MAX_MEM_NODES];
#define PAGE_COUNTERS(mnode, rg_szc, idx) \
(page_counters[(rg_szc)][(mnode)].hpm_counters[(idx)])
-#define PAGE_COUNTERS_COUNTERS(mnode, rg_szc) \
+#define PAGE_COUNTERS_COUNTERS(mnode, rg_szc) \
(page_counters[(rg_szc)][(mnode)].hpm_counters)
-#define PAGE_COUNTERS_SHIFT(mnode, rg_szc) \
+#define PAGE_COUNTERS_SHIFT(mnode, rg_szc) \
(page_counters[(rg_szc)][(mnode)].hpm_shift)
-#define PAGE_COUNTERS_ENTRIES(mnode, rg_szc) \
+#define PAGE_COUNTERS_ENTRIES(mnode, rg_szc) \
(page_counters[(rg_szc)][(mnode)].hpm_entries)
-#define PAGE_COUNTERS_BASE(mnode, rg_szc) \
+#define PAGE_COUNTERS_BASE(mnode, rg_szc) \
(page_counters[(rg_szc)][(mnode)].hpm_base)
#define PAGE_COUNTERS_CURRENT_COLOR_ARRAY(mnode, rg_szc, g) \
@@ -341,7 +341,7 @@ static int mnode_maxmrange[MAX_MEM_NODES];
(((pnum) - PAGE_COUNTERS_BASE((mnode), (rg_szc))) >> \
PAGE_COUNTERS_SHIFT((mnode), (rg_szc)))
-#define IDX_TO_PNUM(mnode, rg_szc, index) \
+#define IDX_TO_PNUM(mnode, rg_szc, index) \
(PAGE_COUNTERS_BASE((mnode), (rg_szc)) + \
((index) << PAGE_COUNTERS_SHIFT((mnode), (rg_szc))))
@@ -546,7 +546,7 @@ page_ctrs_sz(void)
pfn_t physbase;
pfn_t physmax;
uint_t ctrs_sz = 0;
- int i;
+ int i;
pgcnt_t colors_per_szc[MMU_PAGE_SIZES];
/*
@@ -1925,7 +1925,7 @@ static uint_t page_promote_noreloc_err;
* accounting which needs to be done for a returned page.
*
* RFE: For performance pass in pp instead of pfnum so
- * we can avoid excessive calls to page_numtopp_nolock().
+ * we can avoid excessive calls to page_numtopp_nolock().
* This would depend on an assumption that all contiguous
* pages are in the same memseg so we can just add/dec
* our pp.
@@ -1970,7 +1970,7 @@ page_promote(int mnode, pfn_t pfnum, uchar_t new_szc, int flags, int mtype)
uint_t bin;
pgcnt_t tmpnpgs, pages_left;
uint_t noreloc;
- int which_list;
+ int which_list;
ulong_t index;
kmutex_t *phm;
@@ -2270,9 +2270,9 @@ page_t *
page_freelist_coalesce(int mnode, uchar_t szc, uint_t color, uint_t ceq_mask,
int mtype, pfn_t pfnhi)
{
- int r = szc; /* region size */
+ int r = szc; /* region size */
int mrange;
- uint_t full, bin, color_mask, wrap = 0;
+ uint_t full, bin, color_mask, wrap = 0;
pfn_t pfnum, lo, hi;
size_t len, idx, idx0;
pgcnt_t cands = 0, szcpgcnt = page_get_pagecnt(szc);
@@ -2420,7 +2420,7 @@ page_freelist_coalesce(int mnode, uchar_t szc, uint_t color, uint_t ceq_mask,
/*
* RFE: For performance maybe we can do something less
* brutal than locking the entire freelist. So far
- * this doesn't seem to be a performance problem?
+ * this doesn't seem to be a performance problem?
*/
page_freelist_lock(mnode);
if (PAGE_COUNTERS(mnode, r, idx) == full) {
@@ -2490,8 +2490,8 @@ wrapit:
void
page_freelist_coalesce_all(int mnode)
{
- int r; /* region size */
- int idx, full;
+ int r; /* region size */
+ int idx, full;
size_t len;
int doall = interleaved_mnodes || mnode < 0;
int mlo = doall ? 0 : mnode;
@@ -2584,7 +2584,7 @@ page_freelist_split(uchar_t szc, uint_t color, int mnode, int mtype,
pfn_t pfnlo, pfn_t pfnhi, page_list_walker_t *plw)
{
uchar_t nszc = szc + 1;
- uint_t bin, sbin, bin_prev;
+ uint_t bin, sbin, bin_prev;
page_t *pp, *firstpp;
page_t *ret_pp = NULL;
uint_t color_mask;
@@ -4147,6 +4147,8 @@ page_get_replacement_page(page_t *orig_like_pp, struct lgrp *lgrp_target,
lgrp_mnode_cookie_t lgrp_cookie;
lgrp_t *lgrp;
+ mnode = 0;
+ lgrp = NULL;
REPL_STAT_INCR(ngets);
like_pp = orig_like_pp;
ASSERT(PAGE_EXCL(like_pp));