summaryrefslogtreecommitdiff
path: root/usr/src/uts/common/os/mem_config.c
diff options
context:
space:
mode:
Diffstat (limited to 'usr/src/uts/common/os/mem_config.c')
-rw-r--r--usr/src/uts/common/os/mem_config.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/usr/src/uts/common/os/mem_config.c b/usr/src/uts/common/os/mem_config.c
index 3571747e9c..c1a769d04d 100644
--- a/usr/src/uts/common/os/mem_config.c
+++ b/usr/src/uts/common/os/mem_config.c
@@ -144,7 +144,7 @@ kphysm_add_memory_dynamic(pfn_t base, pgcnt_t npgs)
void *mapva;
void *metabase = (void *)base;
pgcnt_t nkpmpgs = 0;
- offset_t kpm_pages_off;
+ offset_t kpm_pages_off = 0;
cmn_err(CE_CONT,
"?kphysm_add_memory_dynamic: adding %ldK at 0x%" PRIx64 "\n",
@@ -410,7 +410,7 @@ mapalloc:
*
* If a memseg is reused, invalidate memseg pointers in
* all cpu vm caches. We need to do this this since the check
- * pp >= seg->pages && pp < seg->epages
+ * pp >= seg->pages && pp < seg->epages
* used in various places is not atomic and so the first compare
* can happen before reuse and the second compare after reuse.
* The invalidation ensures that a memseg is not deferenced while
@@ -2642,7 +2642,7 @@ kphysm_del_cleanup(struct mem_handle *mhp)
{
struct memdelspan *mdsp;
struct memseg *seg;
- struct memseg **segpp;
+ struct memseg **segpp;
struct memseg *seglist;
pfn_t p_end;
uint64_t avmem;