diff options
author | Toomas Soome <tsoome@me.com> | 2019-12-30 15:05:56 +0200 |
---|---|---|
committer | Toomas Soome <tsoome@me.com> | 2020-03-16 08:57:02 +0200 |
commit | 584b574a3b16c6772c8204ec1d1c957c56f22a87 (patch) | |
tree | c9bfd155a2c2edf14608a4ee0d1b328680cde6d8 | |
parent | 34a4e6b53d2e6f2605fd77cda5b161201d7d0f20 (diff) | |
download | illumos-joyent-584b574a3b16c6772c8204ec1d1c957c56f22a87.tar.gz |
12174 i86pc: variable may be used uninitialized
Reviewed by: John Levon <john.levon@joyent.com>
Reviewed by: Andrew Stormont <astormont@racktopsystems.com>
Approved by: Dan McDonald <danmcd@joyent.com>
45 files changed, 176 insertions, 123 deletions
diff --git a/usr/src/common/dis/i386/dis_tables.c b/usr/src/common/dis/i386/dis_tables.c index 928fa51916..4d26a7ac2c 100644 --- a/usr/src/common/dis/i386/dis_tables.c +++ b/usr/src/common/dis/i386/dis_tables.c @@ -3626,7 +3626,7 @@ dtrace_get_operand(dis86_t *x, uint_t mode, uint_t r_m, int wbit, int opindex) int dtrace_disx86(dis86_t *x, uint_t cpu_mode) { - const instable_t *dp; /* decode table being used */ + const instable_t *dp = NULL; /* decode table being used */ #ifdef DIS_TEXT uint_t i; #endif @@ -6406,6 +6406,8 @@ L_VEX_RM: done: #ifdef DIS_MEM + if (dp == NULL) + return (1); /* * compute the size of any memory accessed by the instruction */ diff --git a/usr/src/common/fs/hsfs.c b/usr/src/common/fs/hsfs.c index 9efe265cfa..e648d03360 100644 --- a/usr/src/common/fs/hsfs.c +++ b/usr/src/common/fs/hsfs.c @@ -23,8 +23,6 @@ * Use is subject to license terms. */ -#pragma ident "%Z%%M% %I% %E% SMI" - /* * Basic file system reading code for standalone I/O system. * Simulates a primitive UNIX I/O system (read(), write(), open(), etc). @@ -174,7 +172,7 @@ opendir(ino_t inode, fileid_t *filep) filep->fi_blocknum = hdbtodb(inode); if (inode != root_ino) - return (0); + return (0); if (parse_dir(filep, 0, &hsdep) > 0) { struct inode *ip; @@ -198,6 +196,7 @@ find(char *path, fileid_t *filep) char c; ino_t n; + n = 0; dprintf("find: %s\n", path); if (path == NULL || *path == '\0') return (0); diff --git a/usr/src/common/fs/ufsops.c b/usr/src/common/fs/ufsops.c index 52e716f1de..a467f41202 100644 --- a/usr/src/common/fs/ufsops.c +++ b/usr/src/common/fs/ufsops.c @@ -69,15 +69,15 @@ static fileid_t *head; devid_t *ufs_devp; struct dirinfo { - int loc; + int loc; fileid_t *fi; }; static int bufs_close(int); static void bufs_closeall(int); -static ino_t find(fileid_t *filep, char *path); +static ino_t find(fileid_t *filep, char *path); static ino_t dlook(fileid_t *filep, char *path); -static daddr32_t sbmap(fileid_t *filep, daddr32_t bn); +static daddr32_t sbmap(fileid_t *filep, daddr32_t bn); static struct direct *readdir(struct dirinfo *dstuff); static void set_cache(int, void *, uint_t); static void *get_cache(int); @@ -151,9 +151,10 @@ find(fileid_t *filep, char *path) int len, r; devid_t *devp; + inode = 0; if (path == NULL || *path == '\0') { printf("null path\n"); - return ((ino_t)0); + return (inode); } dprintf("openi: %s\n", path); diff --git a/usr/src/common/util/string.c b/usr/src/common/util/string.c index 5fe5ddab13..2ed2791eda 100644 --- a/usr/src/common/util/string.c +++ b/usr/src/common/util/string.c @@ -80,6 +80,8 @@ vsnprintf(char *buf, size_t buflen, const char *fmt, va_list aargs) int numwidth; va_list args; + ul = 0; + bs = NULL; /* * Make a copy so that all our callers don't have to make a copy */ diff --git a/usr/src/uts/common/disp/cmt.c b/usr/src/uts/common/disp/cmt.c index 0196b15dae..fd734bd229 100644 --- a/usr/src/uts/common/disp/cmt.c +++ b/usr/src/uts/common/disp/cmt.c @@ -1353,7 +1353,7 @@ cmt_pad_disable(pghw_type_t type) /* ARGSUSED */ static void cmt_ev_thread_swtch(pg_t *pg, cpu_t *cp, hrtime_t now, kthread_t *old, - kthread_t *new) + kthread_t *new) { pg_cmt_t *cmt_pg = (pg_cmt_t *)pg; @@ -1747,6 +1747,8 @@ pg_cmt_lineage_validate(pg_cmt_t **lineage, int *sz, cpu_pg_t *pgdata) lgrp_handle_t lgrp; ASSERT(MUTEX_HELD(&cpu_lock)); + pg = NULL; + pg_next = NULL; revalidate: size = *sz; diff --git a/usr/src/uts/common/io/ppm/ppm.c b/usr/src/uts/common/io/ppm/ppm.c index c53f1d723b..0e5c45e466 100644 --- a/usr/src/uts/common/io/ppm/ppm.c +++ b/usr/src/uts/common/io/ppm/ppm.c @@ -764,9 +764,10 @@ ppm_ctlops(dev_info_t *dip, dev_info_t *rdip, ppm_owned_t *owned; int mode; int ret = DDI_SUCCESS; - int *res = (int *)result; + int *res = (int *)result; s3a_t s3args; + domp = NULL; #ifdef DEBUG char *str = "ppm_ctlops"; int mask = ppm_debug & (D_CTLOPS1 | D_CTLOPS2); @@ -827,6 +828,9 @@ ppm_ctlops(dev_info_t *dip, dev_info_t *rdip, ppmd = ppm_get_dev(rdip, domp); } + if (domp == NULL) + return (DDI_FAILURE); + PPMD(D_LOCKS, ("ppm_lock_%s: %s, %s\n", (domp->dflags & PPMD_LOCK_ALL) ? "all" : "one", ppmd->path, ppm_get_ctlstr(reqp->request_type, D_LOCKS))) @@ -840,14 +844,17 @@ ppm_ctlops(dev_info_t *dip, dev_info_t *rdip, case PMR_PPM_POWER_LOCK_OWNER: ASSERT(reqp->req.ppm_power_lock_owner_req.who == rdip); ppmd = PPM_GET_PRIVATE(rdip); - if (ppmd) + if (ppmd) { domp = ppmd->domp; - else { + } else { domp = ppm_lookup_dev(rdip); ASSERT(domp); ppmd = ppm_get_dev(rdip, domp); } + if (domp == NULL) + return (DDI_FAILURE); + /* * In case of LOCK_ALL, effective owner of the power lock * is the owner of the domain lock. otherwise, it is the owner @@ -1400,6 +1407,9 @@ ppm_cpr_callb(void *arg, int code) mutex_exit(&ppm_cpr_window_lock); break; + default: + ret = DDI_SUCCESS; + break; } return (ret == DDI_SUCCESS); diff --git a/usr/src/uts/common/krtld/kobj.c b/usr/src/uts/common/krtld/kobj.c index 3ed867f0bf..8f2b885149 100644 --- a/usr/src/uts/common/krtld/kobj.c +++ b/usr/src/uts/common/krtld/kobj.c @@ -1176,7 +1176,6 @@ bind_primary(val_t *bootaux, int lmid) if (mp->flags & KOBJ_EXEC) { Dyn *dyn; Word relasz = 0, relaent = 0; - Word shtype; char *rela = NULL; for (dyn = (Dyn *)bootaux[BA_DYNAMIC].ba_ptr; @@ -1191,11 +1190,9 @@ bind_primary(val_t *bootaux, int lmid) relaent = dyn->d_un.d_val; break; case DT_RELA: - shtype = SHT_RELA; rela = (char *)dyn->d_un.d_ptr; break; case DT_REL: - shtype = SHT_REL; rela = (char *)dyn->d_un.d_ptr; break; } @@ -1212,8 +1209,8 @@ bind_primary(val_t *bootaux, int lmid) _kobj_printf(ops, "krtld: relocating: file=%s " "KOBJ_EXEC\n", mp->filename); #endif - if (do_relocate(mp, rela, shtype, relasz/relaent, - relaent, (Addr)mp->text) < 0) + if (do_relocate(mp, rela, relasz/relaent, relaent, + (Addr)mp->text) < 0) return (-1); } else { if (do_relocations(mp) < 0) @@ -2103,6 +2100,7 @@ kobj_load_primary_module(struct modctl *modp) if (kobj_load_module(modp, 0) != 0) return (-1); + dep = NULL; mp = modp->mod_mp; mp->flags |= KOBJ_PRIM; @@ -2118,7 +2116,8 @@ kobj_load_primary_module(struct modctl *modp) return (-1); } - add_dependent(mp, dep->mod_mp); + if (dep != NULL) + add_dependent(mp, dep->mod_mp); /* * Relocate it. This module may not be part of a link map, so we diff --git a/usr/src/uts/common/os/bp_map.c b/usr/src/uts/common/os/bp_map.c index 0fd5a5d216..b031514095 100644 --- a/usr/src/uts/common/os/bp_map.c +++ b/usr/src/uts/common/os/bp_map.c @@ -23,8 +23,6 @@ * Use is subject to license terms. */ -#pragma ident "%Z%%M% %I% %E% SMI" - #include <sys/types.h> #include <sys/sysmacros.h> #include <sys/systm.h> @@ -94,6 +92,7 @@ bp_mapin_common(struct buf *bp, int flag) pgcnt_t npages; int color; + as = NULL; /* return if already mapped in, no pageio/physio, or physio to kas */ if ((bp->b_flags & B_REMAPPED) || !(bp->b_flags & (B_PAGEIO | B_PHYS)) || @@ -283,8 +282,8 @@ bp_copy_common(bp_copydir_t dir, struct buf *bp, void *driverbuf, page_t *pp; pfn_t pfn; - ASSERT((offset + size) <= bp->b_bcount); + as = NULL; /* if the buf_t already has a KVA, just do a bcopy */ if (!(bp->b_flags & (B_PHYS | B_PAGEIO))) { diff --git a/usr/src/uts/common/os/lgrp.c b/usr/src/uts/common/os/lgrp.c index d8039f1a1f..f3404a1cdf 100644 --- a/usr/src/uts/common/os/lgrp.c +++ b/usr/src/uts/common/os/lgrp.c @@ -848,6 +848,7 @@ lgrp_create(void) int i; ASSERT(!lgrp_initialized || MUTEX_HELD(&cpu_lock)); + lgrpid = 0; /* * Find an open slot in the lgroup table and recycle unused lgroup @@ -1346,6 +1347,10 @@ lgrp_mem_init(int mnode, lgrp_handle_t hand, boolean_t is_copy_rename) klgrpset_add(changed, lgrp->lgrp_id); count++; } + } else { + if (drop_lock) + mutex_exit(&cpu_lock); + return; } /* @@ -3570,6 +3575,8 @@ lgrp_shm_policy_get(struct anon_map *amp, ulong_t anon_index, vnode_t *vp, avl_tree_t *tree; avl_index_t where; + shm_locality = NULL; + tree = NULL; /* * Get policy segment tree from anon_map or vnode and use specified * anon index or vnode offset as offset diff --git a/usr/src/uts/common/os/mem_config.c b/usr/src/uts/common/os/mem_config.c index 3571747e9c..c1a769d04d 100644 --- a/usr/src/uts/common/os/mem_config.c +++ b/usr/src/uts/common/os/mem_config.c @@ -144,7 +144,7 @@ kphysm_add_memory_dynamic(pfn_t base, pgcnt_t npgs) void *mapva; void *metabase = (void *)base; pgcnt_t nkpmpgs = 0; - offset_t kpm_pages_off; + offset_t kpm_pages_off = 0; cmn_err(CE_CONT, "?kphysm_add_memory_dynamic: adding %ldK at 0x%" PRIx64 "\n", @@ -410,7 +410,7 @@ mapalloc: * * If a memseg is reused, invalidate memseg pointers in * all cpu vm caches. We need to do this this since the check - * pp >= seg->pages && pp < seg->epages + * pp >= seg->pages && pp < seg->epages * used in various places is not atomic and so the first compare * can happen before reuse and the second compare after reuse. * The invalidation ensures that a memseg is not deferenced while @@ -2642,7 +2642,7 @@ kphysm_del_cleanup(struct mem_handle *mhp) { struct memdelspan *mdsp; struct memseg *seg; - struct memseg **segpp; + struct memseg **segpp; struct memseg *seglist; pfn_t p_end; uint64_t avmem; diff --git a/usr/src/uts/common/sys/kobj_impl.h b/usr/src/uts/common/sys/kobj_impl.h index 8b3ce3fdeb..a8488365b9 100644 --- a/usr/src/uts/common/sys/kobj_impl.h +++ b/usr/src/uts/common/sys/kobj_impl.h @@ -164,7 +164,7 @@ extern void kobj_init(void *romvec, void *dvec, extern int kobj_notify_add(kobj_notify_list_t *); extern int kobj_notify_remove(kobj_notify_list_t *); extern int do_relocations(struct module *); -extern int do_relocate(struct module *, char *, Word, int, int, Addr); +extern int do_relocate(struct module *, char *, int, int, Addr); extern struct bootops *ops; extern void exitto(caddr_t); extern void kobj_sync_instruction_memory(caddr_t, size_t); diff --git a/usr/src/uts/common/vm/vm_pagelist.c b/usr/src/uts/common/vm/vm_pagelist.c index f494c3d2b4..950b142588 100644 --- a/usr/src/uts/common/vm/vm_pagelist.c +++ b/usr/src/uts/common/vm/vm_pagelist.c @@ -318,16 +318,16 @@ static int mnode_maxmrange[MAX_MEM_NODES]; #define PAGE_COUNTERS(mnode, rg_szc, idx) \ (page_counters[(rg_szc)][(mnode)].hpm_counters[(idx)]) -#define PAGE_COUNTERS_COUNTERS(mnode, rg_szc) \ +#define PAGE_COUNTERS_COUNTERS(mnode, rg_szc) \ (page_counters[(rg_szc)][(mnode)].hpm_counters) -#define PAGE_COUNTERS_SHIFT(mnode, rg_szc) \ +#define PAGE_COUNTERS_SHIFT(mnode, rg_szc) \ (page_counters[(rg_szc)][(mnode)].hpm_shift) -#define PAGE_COUNTERS_ENTRIES(mnode, rg_szc) \ +#define PAGE_COUNTERS_ENTRIES(mnode, rg_szc) \ (page_counters[(rg_szc)][(mnode)].hpm_entries) -#define PAGE_COUNTERS_BASE(mnode, rg_szc) \ +#define PAGE_COUNTERS_BASE(mnode, rg_szc) \ (page_counters[(rg_szc)][(mnode)].hpm_base) #define PAGE_COUNTERS_CURRENT_COLOR_ARRAY(mnode, rg_szc, g) \ @@ -341,7 +341,7 @@ static int mnode_maxmrange[MAX_MEM_NODES]; (((pnum) - PAGE_COUNTERS_BASE((mnode), (rg_szc))) >> \ PAGE_COUNTERS_SHIFT((mnode), (rg_szc))) -#define IDX_TO_PNUM(mnode, rg_szc, index) \ +#define IDX_TO_PNUM(mnode, rg_szc, index) \ (PAGE_COUNTERS_BASE((mnode), (rg_szc)) + \ ((index) << PAGE_COUNTERS_SHIFT((mnode), (rg_szc)))) @@ -546,7 +546,7 @@ page_ctrs_sz(void) pfn_t physbase; pfn_t physmax; uint_t ctrs_sz = 0; - int i; + int i; pgcnt_t colors_per_szc[MMU_PAGE_SIZES]; /* @@ -1925,7 +1925,7 @@ static uint_t page_promote_noreloc_err; * accounting which needs to be done for a returned page. * * RFE: For performance pass in pp instead of pfnum so - * we can avoid excessive calls to page_numtopp_nolock(). + * we can avoid excessive calls to page_numtopp_nolock(). * This would depend on an assumption that all contiguous * pages are in the same memseg so we can just add/dec * our pp. @@ -1970,7 +1970,7 @@ page_promote(int mnode, pfn_t pfnum, uchar_t new_szc, int flags, int mtype) uint_t bin; pgcnt_t tmpnpgs, pages_left; uint_t noreloc; - int which_list; + int which_list; ulong_t index; kmutex_t *phm; @@ -2270,9 +2270,9 @@ page_t * page_freelist_coalesce(int mnode, uchar_t szc, uint_t color, uint_t ceq_mask, int mtype, pfn_t pfnhi) { - int r = szc; /* region size */ + int r = szc; /* region size */ int mrange; - uint_t full, bin, color_mask, wrap = 0; + uint_t full, bin, color_mask, wrap = 0; pfn_t pfnum, lo, hi; size_t len, idx, idx0; pgcnt_t cands = 0, szcpgcnt = page_get_pagecnt(szc); @@ -2420,7 +2420,7 @@ page_freelist_coalesce(int mnode, uchar_t szc, uint_t color, uint_t ceq_mask, /* * RFE: For performance maybe we can do something less * brutal than locking the entire freelist. So far - * this doesn't seem to be a performance problem? + * this doesn't seem to be a performance problem? */ page_freelist_lock(mnode); if (PAGE_COUNTERS(mnode, r, idx) == full) { @@ -2490,8 +2490,8 @@ wrapit: void page_freelist_coalesce_all(int mnode) { - int r; /* region size */ - int idx, full; + int r; /* region size */ + int idx, full; size_t len; int doall = interleaved_mnodes || mnode < 0; int mlo = doall ? 0 : mnode; @@ -2584,7 +2584,7 @@ page_freelist_split(uchar_t szc, uint_t color, int mnode, int mtype, pfn_t pfnlo, pfn_t pfnhi, page_list_walker_t *plw) { uchar_t nszc = szc + 1; - uint_t bin, sbin, bin_prev; + uint_t bin, sbin, bin_prev; page_t *pp, *firstpp; page_t *ret_pp = NULL; uint_t color_mask; @@ -4147,6 +4147,8 @@ page_get_replacement_page(page_t *orig_like_pp, struct lgrp *lgrp_target, lgrp_mnode_cookie_t lgrp_cookie; lgrp_t *lgrp; + mnode = 0; + lgrp = NULL; REPL_STAT_INCR(ngets); like_pp = orig_like_pp; ASSERT(PAGE_EXCL(like_pp)); diff --git a/usr/src/uts/common/xen/io/xpvd.c b/usr/src/uts/common/xen/io/xpvd.c index 5f8966fce7..4609580437 100644 --- a/usr/src/uts/common/xen/io/xpvd.c +++ b/usr/src/uts/common/xen/io/xpvd.c @@ -377,9 +377,11 @@ got_xs_prop: break; } - if ((rv == DDI_PROP_SUCCESS) && (prop_len > 0)) { - bcopy(prop_str, buff, prop_len); - *lengthp = prop_len; + if (rv == DDI_PROP_SUCCESS) { + if (prop_op != PROP_LEN) { + bcopy(prop_str, buff, prop_len); + *lengthp = prop_len; + } } kmem_free(prop_str, len); return (rv); @@ -874,7 +876,7 @@ i_xpvd_parse_devname(char *name, xendev_devclass_t *devclassp, int len = strlen(name) + 1; char *device_name = i_ddi_strdup(name, KM_SLEEP); char *cname = NULL, *caddr = NULL; - boolean_t ret; + boolean_t ret = B_FALSE; i_ddi_parse_name(device_name, &cname, &caddr, NULL); diff --git a/usr/src/uts/i86pc/boot/boot_console.c b/usr/src/uts/i86pc/boot/boot_console.c index f23baf2f8b..cfd4da40d0 100644 --- a/usr/src/uts/i86pc/boot/boot_console.c +++ b/usr/src/uts/i86pc/boot/boot_console.c @@ -1207,6 +1207,7 @@ bcons_post_bootenvrc(char *inputdev, char *outputdev, char *consoledev) int i; extern int post_fastreboot; + ttyn = 0; if (post_fastreboot && console == CONS_SCREEN_GRAPHICS) console = CONS_SCREEN_TEXT; diff --git a/usr/src/uts/i86pc/cpu/generic_cpu/gcpu_mca.c b/usr/src/uts/i86pc/cpu/generic_cpu/gcpu_mca.c index 348ec93761..962bf52e89 100644 --- a/usr/src/uts/i86pc/cpu/generic_cpu/gcpu_mca.c +++ b/usr/src/uts/i86pc/cpu/generic_cpu/gcpu_mca.c @@ -854,6 +854,8 @@ gcpu_ereport_post(const gcpu_logout_t *gcl, int bankidx, } else { ereport = fm_nvlist_create(NULL); nva = NULL; + eqep = NULL; + scr_eqep = NULL; } if (ereport == NULL) @@ -1808,6 +1810,7 @@ gcpu_mca_logout(cmi_hdl_t hdl, struct regs *rp, uint64_t bankmask, if (ismc) { gcl = mca->gcpu_mca_logout[GCPU_MCA_LOGOUT_EXCEPTION]; + pgcl = NULL; } else { int pidx = mca->gcpu_mca_nextpoll_idx; int ppidx = (pidx == GCPU_MCA_LOGOUT_POLLER_1) ? diff --git a/usr/src/uts/i86pc/dboot/dboot_elfload.c b/usr/src/uts/i86pc/dboot/dboot_elfload.c index 417729bfda..7d1f34fc32 100644 --- a/usr/src/uts/i86pc/dboot/dboot_elfload.c +++ b/usr/src/uts/i86pc/dboot/dboot_elfload.c @@ -84,6 +84,7 @@ dboot_elfload64(uintptr_t file_image) paddr_t dst; paddr_t next_addr; + next_addr = 0; elf_file = (caddr_t)file_image; allphdrs = NULL; diff --git a/usr/src/uts/i86pc/io/acpi/acpidev/acpidev_drv.c b/usr/src/uts/i86pc/io/acpi/acpidev/acpidev_drv.c index 1668a4fcad..7d4245ce2b 100644 --- a/usr/src/uts/i86pc/io/acpi/acpidev/acpidev_drv.c +++ b/usr/src/uts/i86pc/io/acpi/acpidev/acpidev_drv.c @@ -1092,6 +1092,7 @@ acpidev_filter_device(acpidev_walk_info_t *infop, ACPI_HANDLE hdl, { acpidev_filter_result_t res; + res = ACPIDEV_FILTER_FAILED; /* Evaluate filtering rules. */ for (; entries > 0; entries--, afrp++) { if (afrp->adf_filter_func != NULL) { diff --git a/usr/src/uts/i86pc/io/amd_iommu/amd_iommu_acpi.c b/usr/src/uts/i86pc/io/amd_iommu/amd_iommu_acpi.c index 825c661c2d..1857cb2292 100644 --- a/usr/src/uts/i86pc/io/amd_iommu/amd_iommu_acpi.c +++ b/usr/src/uts/i86pc/io/amd_iommu/amd_iommu_acpi.c @@ -548,6 +548,7 @@ add_deventry_info(ivhd_t *ivhdp, ivhd_deventry_t *deventry, last = acpi_ivhdp; break; case DEVENTRY_RANGE_END: + idx = 0; cmn_err(CE_PANIC, "%s: Unexpected Range End Deventry", amd_iommu_modname); /*NOTREACHED*/ @@ -589,9 +590,10 @@ add_deventry_info(ivhd_t *ivhdp, ivhd_deventry_t *deventry, idx = AMD_IOMMU_ACPI_INFO_HASH_SZ; break; default: + idx = 0; cmn_err(CE_PANIC, "%s: Unsupported deventry type", amd_iommu_modname); - /* FALLTHROUGH */ + /*NOTREACHED*/ } acpi_ivhdp->ach_Lint1Pass = deventry->idev_Lint1Pass; @@ -717,6 +719,7 @@ set_ivmd_info(ivmd_t *ivmdp, amd_iommu_acpi_ivmd_t **hash) idx = AMD_IOMMU_ACPI_INFO_HASH_SZ; break; default: + idx = 0; cmn_err(CE_PANIC, "Unknown AMD IOMMU ACPI IVMD deviceid type: " "%x", ivmdp->ivmd_type); /*NOTREACHED*/ diff --git a/usr/src/uts/i86pc/io/amd_iommu/amd_iommu_page_tables.c b/usr/src/uts/i86pc/io/amd_iommu/amd_iommu_page_tables.c index 7d70b5d4e4..255a5f7479 100644 --- a/usr/src/uts/i86pc/io/amd_iommu/amd_iommu_page_tables.c +++ b/usr/src/uts/i86pc/io/amd_iommu/amd_iommu_page_tables.c @@ -1302,6 +1302,7 @@ amd_iommu_destroy_pgtables(amd_iommu_t *iommu, dev_info_t *rdip, int instance = ddi_get_instance(iommu->aiomt_dip); const char *f = "amd_iommu_destroy_pgtables"; + tear_level = -1; if (amd_iommu_debug & AMD_IOMMU_DEBUG_PAGE_TABLES) { cmn_err(CE_NOTE, "%s: %s%d: idx = %u, domainid = %d, " "deviceid = %u, va = %p, path = %s", @@ -1347,7 +1348,6 @@ amd_iommu_destroy_pgtables(amd_iommu_t *iommu, dev_info_t *rdip, } } - tear_level = -1; invalidate_pde = 0; invalidate_pte = 0; for (++level; level <= AMD_IOMMU_PGTABLE_MAXLEVEL; level++) { @@ -1401,7 +1401,7 @@ invalidate: } passthru: - if (tear_level == AMD_IOMMU_PGTABLE_MAXLEVEL) { + if (tear_level == AMD_IOMMU_PGTABLE_MAXLEVEL) { error = amd_iommu_clear_devtbl_entry(iommu, rdip, domainid, deviceid, dp, domain_freed, path); } else { diff --git a/usr/src/uts/i86pc/io/cpudrv_mach.c b/usr/src/uts/i86pc/io/cpudrv_mach.c index 772ad33ccb..d8cde4b61e 100644 --- a/usr/src/uts/i86pc/io/cpudrv_mach.c +++ b/usr/src/uts/i86pc/io/cpudrv_mach.c @@ -142,6 +142,7 @@ cpudrv_set_topspeed(void *ctx, int plat_level) int instance; int i; + top_spd = NULL; dip = ctx; instance = ddi_get_instance(dip); cpudsp = ddi_get_soft_state(cpudrv_state, instance); diff --git a/usr/src/uts/i86pc/io/dr/dr.c b/usr/src/uts/i86pc/io/dr/dr.c index 65cc469d71..c1156cfe98 100644 --- a/usr/src/uts/i86pc/io/dr/dr.c +++ b/usr/src/uts/i86pc/io/dr/dr.c @@ -1570,6 +1570,7 @@ dr_dev_make_list(dr_handle_t *hp, sbd_comp_type_t type, int present_only, nunits = MAX_IO_UNITS_PER_BOARD; break; default: + nunits = 0; /* catch this in debug kernels */ ASSERT(0); break; diff --git a/usr/src/uts/i86pc/io/gfx_private/gfxp_fb.c b/usr/src/uts/i86pc/io/gfx_private/gfxp_fb.c index 483498b5fd..6d1a99ea05 100644 --- a/usr/src/uts/i86pc/io/gfx_private/gfxp_fb.c +++ b/usr/src/uts/i86pc/io/gfx_private/gfxp_fb.c @@ -272,7 +272,7 @@ gfxp_fb_detach(dev_info_t *devi, ddi_detach_cmd_t cmd, gfxp_fb_softc_ptr_t ptr) case DDI_DETACH: (void) ddi_prop_remove(DDI_DEV_T_ANY, devi, "primary-controller"); - + error = DDI_SUCCESS; switch (softc->fb_type) { case GFXP_BITMAP: error = gfxp_bm_detach(devi, softc); diff --git a/usr/src/uts/i86pc/io/gfx_private/gfxp_segmap.c b/usr/src/uts/i86pc/io/gfx_private/gfxp_segmap.c index 1c4c849b51..6e3f8e0066 100644 --- a/usr/src/uts/i86pc/io/gfx_private/gfxp_segmap.c +++ b/usr/src/uts/i86pc/io/gfx_private/gfxp_segmap.c @@ -75,6 +75,9 @@ gfxp_ddi_segmap_setup(dev_t dev, off_t offset, struct as *as, caddr_t *addrp, if ((flags & MAP_TYPE) != MAP_SHARED) return (EINVAL); + if (len == 0) + return (EINVAL); + /* * Check that this region is indeed mappable on this platform. * Use the mapping function. @@ -102,20 +105,16 @@ gfxp_ddi_segmap_setup(dev_t dev, off_t offset, struct as *as, caddr_t *addrp, * legal and we are not trying to map in * more than the device will let us. */ - for (i = 0; i < len; i += PAGESIZE) { - if (i == 0) { - /* - * Save the pfn at offset here. This pfn will be - * used later to get user address. - */ - if ((pfn = (pfn_t)cdev_mmap(mapfunc, dev, offset, - maxprot)) == PFN_INVALID) - return (ENXIO); - } else { - if (cdev_mmap(mapfunc, dev, offset + i, maxprot) == - PFN_INVALID) - return (ENXIO); - } + /* + * Save the pfn at offset here. This pfn will be + * used later to get user address. + */ + pfn = (pfn_t)cdev_mmap(mapfunc, dev, offset, maxprot); + if (pfn == PFN_INVALID) + return (ENXIO); + for (i = PAGESIZE; i < len; i += PAGESIZE) { + if (cdev_mmap(mapfunc, dev, offset + i, maxprot) == PFN_INVALID) + return (ENXIO); } as_rangelock(as); diff --git a/usr/src/uts/i86pc/io/immu_dvma.c b/usr/src/uts/i86pc/io/immu_dvma.c index f1db9ee317..98ec560e90 100644 --- a/usr/src/uts/i86pc/io/immu_dvma.c +++ b/usr/src/uts/i86pc/io/immu_dvma.c @@ -1014,7 +1014,7 @@ map_unity_domain(domain_t *domain) /* * create_xlate_arena() - * Create the dvma arena for a domain with translation + * Create the dvma arena for a domain with translation * mapping */ static void @@ -1158,7 +1158,7 @@ set_domain( /* * device_domain() - * Get domain for a device. The domain may be global in which case it + * Get domain for a device. The domain may be global in which case it * is shared between all IOMMU units. Due to potential AGAW differences * between IOMMU units, such global domains *have to be* UNITY mapping * domains. Alternatively, the domain may be local to a IOMMU unit. @@ -2570,6 +2570,8 @@ immu_map_dvmaseg(dev_info_t *rdip, ddi_dma_handle_t handle, immu_dcookie_t *dcookies; int pde_set; + rwmask = 0; + page = NULL; domain = IMMU_DEVI(rdip)->imd_domain; immu = domain->dom_immu; immu_flags = dma_to_immu_flags(dmareq); diff --git a/usr/src/uts/i86pc/io/ioat/ioat_chan.c b/usr/src/uts/i86pc/io/ioat/ioat_chan.c index 7337ef9d07..4fca64aef3 100644 --- a/usr/src/uts/i86pc/io/ioat/ioat_chan.c +++ b/usr/src/uts/i86pc/io/ioat/ioat_chan.c @@ -824,6 +824,7 @@ ioat_ring_loop(ioat_channel_ring_t *ring, dcopy_cmd_t cmd) ioat_cmd_private_t *prevpriv; ioat_cmd_private_t *currpriv; + currpriv = NULL; channel = ring->cr_chan; ASSERT(channel->ic_ver == IOAT_CBv1); diff --git a/usr/src/uts/i86pc/io/pci/pci_common.c b/usr/src/uts/i86pc/io/pci/pci_common.c index 1cea07237f..1907ea104b 100644 --- a/usr/src/uts/i86pc/io/pci/pci_common.c +++ b/usr/src/uts/i86pc/io/pci/pci_common.c @@ -533,6 +533,7 @@ SUPPORTED_TYPES_OUT: * First check the config space and/or * MSI capability register(s) */ + pci_rval = DDI_FAILURE; if (DDI_INTR_IS_MSI_OR_MSIX(hdlp->ih_type)) pci_rval = pci_msi_get_cap(rdip, hdlp->ih_type, &pci_status); @@ -699,6 +700,7 @@ SUPPORTED_TYPES_OUT: * First check the config space and/or * MSI capability register(s) */ + pci_rval = DDI_FAILURE; if (DDI_INTR_IS_MSI_OR_MSIX(hdlp->ih_type)) pci_rval = pci_msi_get_pending(rdip, hdlp->ih_type, hdlp->ih_inum, &pci_status); @@ -989,7 +991,7 @@ int pci_common_get_reg_prop(dev_info_t *dip, pci_regspec_t *pci_rp) { int i; - int number; + int number; int assigned_addr_len; uint_t phys_hi = pci_rp->pci_phys_hi; pci_regspec_t *assigned_addr; @@ -1473,7 +1475,7 @@ pci_common_ctlops_peek(peekpoke_ctlops_t *in_args) /*ARGSUSED*/ int pci_common_peekpoke(dev_info_t *dip, dev_info_t *rdip, - ddi_ctl_enum_t ctlop, void *arg, void *result) + ddi_ctl_enum_t ctlop, void *arg, void *result) { if (ctlop == DDI_CTLOPS_PEEK) return (pci_common_ctlops_peek((peekpoke_ctlops_t *)arg)); @@ -1516,7 +1518,7 @@ pci_config_rd8(ddi_acc_impl_t *hdlp, uint8_t *addr) void pci_config_rep_rd8(ddi_acc_impl_t *hdlp, uint8_t *host_addr, - uint8_t *dev_addr, size_t repcount, uint_t flags) + uint8_t *dev_addr, size_t repcount, uint_t flags) { uint8_t *h, *d; @@ -1552,7 +1554,7 @@ pci_config_rd16(ddi_acc_impl_t *hdlp, uint16_t *addr) void pci_config_rep_rd16(ddi_acc_impl_t *hdlp, uint16_t *host_addr, - uint16_t *dev_addr, size_t repcount, uint_t flags) + uint16_t *dev_addr, size_t repcount, uint_t flags) { uint16_t *h, *d; @@ -1588,7 +1590,7 @@ pci_config_rd32(ddi_acc_impl_t *hdlp, uint32_t *addr) void pci_config_rep_rd32(ddi_acc_impl_t *hdlp, uint32_t *host_addr, - uint32_t *dev_addr, size_t repcount, uint_t flags) + uint32_t *dev_addr, size_t repcount, uint_t flags) { uint32_t *h, *d; @@ -1622,7 +1624,7 @@ pci_config_wr8(ddi_acc_impl_t *hdlp, uint8_t *addr, uint8_t value) void pci_config_rep_wr8(ddi_acc_impl_t *hdlp, uint8_t *host_addr, - uint8_t *dev_addr, size_t repcount, uint_t flags) + uint8_t *dev_addr, size_t repcount, uint_t flags) { uint8_t *h, *d; @@ -1655,7 +1657,7 @@ pci_config_wr16(ddi_acc_impl_t *hdlp, uint16_t *addr, uint16_t value) void pci_config_rep_wr16(ddi_acc_impl_t *hdlp, uint16_t *host_addr, - uint16_t *dev_addr, size_t repcount, uint_t flags) + uint16_t *dev_addr, size_t repcount, uint_t flags) { uint16_t *h, *d; @@ -1688,7 +1690,7 @@ pci_config_wr32(ddi_acc_impl_t *hdlp, uint32_t *addr, uint32_t value) void pci_config_rep_wr32(ddi_acc_impl_t *hdlp, uint32_t *host_addr, - uint32_t *dev_addr, size_t repcount, uint_t flags) + uint32_t *dev_addr, size_t repcount, uint_t flags) { uint32_t *h, *d; @@ -1736,7 +1738,7 @@ pci_config_wr64(ddi_acc_impl_t *hdlp, uint64_t *addr, uint64_t value) void pci_config_rep_rd64(ddi_acc_impl_t *hdlp, uint64_t *host_addr, - uint64_t *dev_addr, size_t repcount, uint_t flags) + uint64_t *dev_addr, size_t repcount, uint_t flags) { if (flags == DDI_DEV_AUTOINCR) { for (; repcount; repcount--) @@ -1749,7 +1751,7 @@ pci_config_rep_rd64(ddi_acc_impl_t *hdlp, uint64_t *host_addr, void pci_config_rep_wr64(ddi_acc_impl_t *hdlp, uint64_t *host_addr, - uint64_t *dev_addr, size_t repcount, uint_t flags) + uint64_t *dev_addr, size_t repcount, uint_t flags) { if (flags == DDI_DEV_AUTOINCR) { for (; repcount; repcount--) diff --git a/usr/src/uts/i86pc/io/pci/pci_tools.c b/usr/src/uts/i86pc/io/pci/pci_tools.c index c28ca5f0b7..7fb496bc10 100644 --- a/usr/src/uts/i86pc/io/pci/pci_tools.c +++ b/usr/src/uts/i86pc/io/pci/pci_tools.c @@ -278,12 +278,11 @@ pcitool_get_intr(dev_info_t *dip, void *arg, int mode) pcitool_intr_get_t partial_iget; pcitool_intr_get_t *iget = &partial_iget; size_t iget_kmem_alloc_size = 0; - uint8_t num_devs_ret; + uint8_t num_devs_ret = 0; int copyout_rval; int rval = SUCCESS; int circ; int i; - ddi_intr_handle_impl_t info_hdl; apic_get_intr_t intr_info; apic_get_type_t type_info; @@ -640,6 +639,9 @@ pcitool_cfg_access(pcitool_reg_t *prg, boolean_t write_flag, case 8: local_data = VAL64(&req); break; + default: + prg->status = PCITOOL_INVALID_ADDRESS; + return (ENOTSUP); } if (big_endian) { prg->data = diff --git a/usr/src/uts/i86pc/io/rootnex.c b/usr/src/uts/i86pc/io/rootnex.c index 972f726287..757d73dc5e 100644 --- a/usr/src/uts/i86pc/io/rootnex.c +++ b/usr/src/uts/i86pc/io/rootnex.c @@ -1142,6 +1142,8 @@ rootnex_map_regspec(ddi_map_req_t *mp, caddr_t *vaddrp) case DDI_STORECACHING_OK_ACC: hat_acc_flags = HAT_STORECACHING_OK; break; + default: + return (DDI_ME_INVAL); } ap = (ddi_acc_impl_t *)hp->ah_platform_private; ap->ahi_acc_attr |= DDI_ACCATTR_CPU_VADDR; @@ -2762,7 +2764,7 @@ rootnex_need_bounce_seg(ddi_dma_obj_t *dmar_object, rootnex_sglinfo_t *sglinfo) uint_t pcnt; page_t *pp; - + pp = NULL; /* shortcuts */ pplist = dmar_object->dmao_obj.virt_obj.v_priv; vaddr = dmar_object->dmao_obj.virt_obj.v_addr; @@ -2891,7 +2893,7 @@ rootnex_get_sgl(ddi_dma_obj_t *dmar_object, ddi_dma_cookie_t *sgl, page_t *pp; uint_t cnt; - + pp = NULL; /* shortcuts */ pplist = dmar_object->dmao_obj.virt_obj.v_priv; vaddr = dmar_object->dmao_obj.virt_obj.v_addr; @@ -4226,7 +4228,7 @@ rootnex_copybuf_window_boundary(ddi_dma_impl_t *hp, rootnex_dma_t *dma, uint_t pidx; off_t poff; - + pidx = 0; sinfo = &dma->dp_sglinfo; /* diff --git a/usr/src/uts/i86pc/io/tzmon/tzmon.c b/usr/src/uts/i86pc/io/tzmon/tzmon.c index 6841299373..df19882879 100644 --- a/usr/src/uts/i86pc/io/tzmon/tzmon.c +++ b/usr/src/uts/i86pc/io/tzmon/tzmon.c @@ -397,7 +397,7 @@ tzmon_enumerate_zone(ACPI_HANDLE obj, thermal_zone_t *tzp, int enum_flag) ACPI_STATUS status; ACPI_BUFFER zone_name; int level; - int instance; + int instance = 0; char abuf[5]; /* diff --git a/usr/src/uts/i86pc/io/xsvc/xsvc.c b/usr/src/uts/i86pc/io/xsvc/xsvc.c index de3b727544..734a8a6472 100644 --- a/usr/src/uts/i86pc/io/xsvc/xsvc.c +++ b/usr/src/uts/i86pc/io/xsvc/xsvc.c @@ -65,7 +65,7 @@ static int xsvc_detach(dev_info_t *devi, ddi_detach_cmd_t cmd); static int xsvc_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result); -static struct cb_ops xsvc_cb_ops = { +static struct cb_ops xsvc_cb_ops = { xsvc_open, /* cb_open */ xsvc_close, /* cb_close */ nodev, /* cb_strategy */ @@ -468,7 +468,6 @@ xsvc_ioctl_alloc_memory(xsvc_state_t *state, void *arg, int mode) int err; int i; - /* Copy in the params, then get the size and key */ if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) { err = ddi_copyin(arg, ¶ms32, sizeof (xsvc_mem_req_32), @@ -525,6 +524,7 @@ xsvc_ioctl_alloc_memory(xsvc_state_t *state, void *arg, int mode) usgl32 = (xsvc_mloc_32 *)(uintptr_t)params32.xsvc_sg_list; mp->xm_dma_attr.dma_attr_align = P2ROUNDUP( params32.xsvc_mem_align, PAGESIZE); + usgl = NULL; } else { mp->xm_dma_attr.dma_attr_addr_lo = params.xsvc_mem_addr_lo; mp->xm_dma_attr.dma_attr_addr_hi = params.xsvc_mem_addr_hi; @@ -532,6 +532,7 @@ xsvc_ioctl_alloc_memory(xsvc_state_t *state, void *arg, int mode) usgl = (xsvc_mloc *)(uintptr_t)params.xsvc_sg_list; mp->xm_dma_attr.dma_attr_align = P2ROUNDUP( params.xsvc_mem_align, PAGESIZE); + usgl32 = NULL; } mp->xm_device_attr = xsvc_device_attr; diff --git a/usr/src/uts/i86pc/os/cpuid.c b/usr/src/uts/i86pc/os/cpuid.c index d52aaeeb97..741f53714f 100644 --- a/usr/src/uts/i86pc/os/cpuid.c +++ b/usr/src/uts/i86pc/os/cpuid.c @@ -2272,7 +2272,8 @@ cpuid_intel_getids(cpu_t *cpu, void *feature) * Multi-core (and possibly multi-threaded) * processors. */ - uint_t ncpu_per_core; + uint_t ncpu_per_core = 0; + if (cpi->cpi_ncore_per_chip == 1) ncpu_per_core = cpi->cpi_ncpu_per_chip; else if (cpi->cpi_ncore_per_chip > 1) diff --git a/usr/src/uts/i86pc/os/cpupm/cpupm_mach.c b/usr/src/uts/i86pc/os/cpupm/cpupm_mach.c index 72302fcf07..af14349807 100644 --- a/usr/src/uts/i86pc/os/cpupm/cpupm_mach.c +++ b/usr/src/uts/i86pc/os/cpupm/cpupm_mach.c @@ -613,7 +613,7 @@ cpupm_state_change(cpu_t *cp, int level, int state) cpupm_mach_state_t *mach_state = (cpupm_mach_state_t *)(cp->cpu_m.mcpu_pm_mach_state); cpupm_state_ops_t *state_ops; - cpupm_state_domains_t *state_domain; + cpupm_state_domains_t *state_domain; cpuset_t set; DTRACE_PROBE2(cpupm__state__change, cpu_t *, cp, int, level); @@ -632,7 +632,7 @@ cpupm_state_change(cpu_t *cp, int level, int state) state_domain = mach_state->ms_tstate.cma_domain; break; default: - break; + return; } switch (state_domain->pm_type) { @@ -872,9 +872,9 @@ int cpupm_get_top_speed(cpu_t *cp) { #ifndef __xpv - cpupm_mach_state_t *mach_state; - cpu_acpi_handle_t handle; - int plat_level; + cpupm_mach_state_t *mach_state; + cpu_acpi_handle_t handle; + int plat_level; uint_t nspeeds; int max_level; diff --git a/usr/src/uts/i86pc/os/ddi_impl.c b/usr/src/uts/i86pc/os/ddi_impl.c index 6767b4e5aa..c86cfcdaeb 100644 --- a/usr/src/uts/i86pc/os/ddi_impl.c +++ b/usr/src/uts/i86pc/os/ddi_impl.c @@ -1414,7 +1414,7 @@ kalloca(size_t size, size_t align, int cansleep, int physcontig, size_t *addr, *raddr, rsize; size_t hdrsize = 4 * sizeof (size_t); /* must be power of 2 */ int a, i, c; - vmem_t *vmp; + vmem_t *vmp = NULL; kmem_cache_t *cp = NULL; if (attr->dma_attr_addr_lo > mmu_ptob((uint64_t)ddiphysmin)) diff --git a/usr/src/uts/i86pc/os/fakebop.c b/usr/src/uts/i86pc/os/fakebop.c index aee332aaf4..166791b2c1 100644 --- a/usr/src/uts/i86pc/os/fakebop.c +++ b/usr/src/uts/i86pc/os/fakebop.c @@ -2411,6 +2411,7 @@ find_fw_table(ACPI_TABLE_RSDP *rsdp, char *signature) * use the XSDT. If the XSDT address is 0, though, fall back to * revision 1 and use the RSDT. */ + xsdt_addr = 0; if (revision == 0) { if (rsdp == NULL) return (NULL); diff --git a/usr/src/uts/i86pc/os/lgrpplat.c b/usr/src/uts/i86pc/os/lgrpplat.c index 29cea5dcbb..d398a19291 100644 --- a/usr/src/uts/i86pc/os/lgrpplat.c +++ b/usr/src/uts/i86pc/os/lgrpplat.c @@ -1347,7 +1347,7 @@ lgrp_plat_root_hand(void) /* * Update CPU to node mapping for given CPU and proximity domain. * Return values: - * - zero for success + * - zero for success * - positive numbers for warnings * - negative numbers for errors */ @@ -1693,6 +1693,7 @@ lgrp_plat_latency_adjust(memnode_phys_addr_map_t *memnode_info, const lgrp_config_flag_t cflag = LGRP_CONFIG_LAT_CHANGE_ALL; int lat_corrected[MAX_NODES][MAX_NODES]; + t = 0; /* * Nothing to do when this is an UMA machine or don't have args needed */ @@ -3482,7 +3483,7 @@ opt_get_numa_config(uint_t *node_cnt, int *mem_intrlv, uint_t node_info[MAX_NODES]; uint_t off_hi; uint_t off_lo; - uint64_t nb_cfg_reg; + uint64_t nb_cfg_reg; /* * Read configuration registers from PCI configuration space to @@ -3517,6 +3518,7 @@ opt_get_numa_config(uint_t *node_cnt, int *mem_intrlv, * For Greyhound, PCI Extended Configuration Space must be enabled to * read high DRAM address map base and limit registers */ + nb_cfg_reg = 0; if (opt_family == AMD_FAMILY_GREYHOUND) { nb_cfg_reg = rdmsr(MSR_AMD_NB_CFG); if ((nb_cfg_reg & AMD_GH_NB_CFG_EN_ECS) == 0) diff --git a/usr/src/uts/i86pc/os/mp_machdep.c b/usr/src/uts/i86pc/os/mp_machdep.c index bb25cc26fc..f36f5f052d 100644 --- a/usr/src/uts/i86pc/os/mp_machdep.c +++ b/usr/src/uts/i86pc/os/mp_machdep.c @@ -350,6 +350,9 @@ pg_plat_hw_rank(pghw_type_t hw1, pghw_type_t hw2) PGHW_NUM_COMPONENTS }; + rank1 = 0; + rank2 = 0; + for (i = 0; hw_hier[i] != PGHW_NUM_COMPONENTS; i++) { if (hw_hier[i] == hw1) rank1 = i; diff --git a/usr/src/uts/i86pc/os/smb_dev.c b/usr/src/uts/i86pc/os/smb_dev.c index 11567548ea..876df1ed2a 100644 --- a/usr/src/uts/i86pc/os/smb_dev.c +++ b/usr/src/uts/i86pc/os/smb_dev.c @@ -148,7 +148,7 @@ smbios_open(const char *file, int version, int flags, int *errp) } else if (smb3 != NULL) { ep_type = SMBIOS_ENTRY_POINT_30; p = smb3; - } else if (smb2 != NULL) { + } else { ep_type = SMBIOS_ENTRY_POINT_21; p = smb2; } diff --git a/usr/src/uts/i86pc/os/startup.c b/usr/src/uts/i86pc/os/startup.c index e98f049391..dfbb83cd15 100644 --- a/usr/src/uts/i86pc/os/startup.c +++ b/usr/src/uts/i86pc/os/startup.c @@ -2610,9 +2610,7 @@ add_physmem_cb(page_t *pp, pfn_t pnum) * kphysm_init() initializes physical memory. */ static pgcnt_t -kphysm_init( - page_t *pp, - pgcnt_t npages) +kphysm_init(page_t *pp, pgcnt_t npages) { struct memlist *pmem; struct memseg *cur_memseg; @@ -2686,9 +2684,8 @@ kphysm_init( * of these large pages, configure the memsegs based on the * memory node ranges which had been made non-contiguous. */ + end_pfn = base_pfn + num - 1; if (mnode_xwa > 1) { - - end_pfn = base_pfn + num - 1; ms = PFN_2_MEM_NODE(base_pfn); me = PFN_2_MEM_NODE(end_pfn); @@ -2747,8 +2744,14 @@ kphysm_init( /* process next memory node range */ ms++; base_pfn = mem_node_config[ms].physbase; - num = MIN(mem_node_config[ms].physmax, - end_pfn) - base_pfn + 1; + + if (mnode_xwa > 1) { + num = MIN(mem_node_config[ms].physmax, + end_pfn) - base_pfn + 1; + } else { + num = mem_node_config[ms].physmax - + base_pfn + 1; + } } } diff --git a/usr/src/uts/i86pc/os/trap.c b/usr/src/uts/i86pc/os/trap.c index f30bedd3f2..d0fb6acf57 100644 --- a/usr/src/uts/i86pc/os/trap.c +++ b/usr/src/uts/i86pc/os/trap.c @@ -488,6 +488,9 @@ trap(struct regs *rp, caddr_t addr, processorid_t cpuid) ASSERT_STACK_ALIGNED(); + errcode = 0; + mstate = 0; + rw = S_OTHER; type = rp->r_trapno; CPU_STATS_ADDQ(CPU, sys, trap, 1); ASSERT(ct->t_schedflag & TS_DONT_SWAP); diff --git a/usr/src/uts/i86pc/vm/htable.c b/usr/src/uts/i86pc/vm/htable.c index a2d59d98ab..08e2130117 100644 --- a/usr/src/uts/i86pc/vm/htable.c +++ b/usr/src/uts/i86pc/vm/htable.c @@ -1382,6 +1382,7 @@ htable_create( if (level < 0 || level > TOP_LEVEL(hat)) panic("htable_create(): level %d out of range\n", level); + ht = NULL; /* * Create the page tables in top down order. */ diff --git a/usr/src/uts/i86pc/vm/vm_dep.h b/usr/src/uts/i86pc/vm/vm_dep.h index e1f04fd5d7..9b0c513a1c 100644 --- a/usr/src/uts/i86pc/vm/vm_dep.h +++ b/usr/src/uts/i86pc/vm/vm_dep.h @@ -127,7 +127,7 @@ extern page_t ****page_freelists; */ extern page_t ***page_cachelists; -#define PAGE_CACHELISTS(mnode, color, mtype) \ +#define PAGE_CACHELISTS(mnode, color, mtype) \ (*(page_cachelists[mtype] + (color))) /* @@ -154,7 +154,7 @@ extern page_t *page_get_mnode_cachelist(uint_t, uint_t, int, int); * simply return the limits of the given mnode, which then * determines the length of hpm_counters array for the mnode. */ -#define HPM_COUNTERS_LIMITS(mnode, physbase, physmax, first) \ +#define HPM_COUNTERS_LIMITS(mnode, physbase, physmax, first) \ { \ (physbase) = mem_node_config[(mnode)].physbase; \ (physmax) = mem_node_config[(mnode)].physmax; \ @@ -183,6 +183,7 @@ extern page_t *page_get_mnode_cachelist(uint_t, uint_t, int, int); pgcnt_t _np; \ pfn_t _pfn = (pfn); \ pfn_t _endpfn = _pfn + _cnt; \ + rv = 0; \ while (_pfn < _endpfn) { \ _mn = PFN_2_MEM_NODE(_pfn); \ _np = MIN(_endpfn, mem_node_config[_mn].physmax + 1) - _pfn; \ diff --git a/usr/src/uts/intel/amd64/krtld/kobj_reloc.c b/usr/src/uts/intel/amd64/krtld/kobj_reloc.c index 56c8087baa..401f13a33a 100644 --- a/usr/src/uts/intel/amd64/krtld/kobj_reloc.c +++ b/usr/src/uts/intel/amd64/krtld/kobj_reloc.c @@ -24,8 +24,6 @@ * Use is subject to license terms. */ -#pragma ident "%Z%%M% %I% %E% SMI" - /* * x86 relocation code. */ @@ -120,9 +118,8 @@ sdt_reloc_resolve(struct module *mp, char *symname, uint8_t *instr) } int -/* ARGSUSED2 */ -do_relocate(struct module *mp, char *reltbl, Word relshtype, int nreloc, - int relocsize, Addr baseaddr) +do_relocate(struct module *mp, char *reltbl, int nreloc, int relocsize, + Addr baseaddr) { unsigned long stndx; unsigned long off; /* can't be register for tnf_reloc_resolve() */ @@ -130,7 +127,7 @@ do_relocate(struct module *mp, char *reltbl, Word relshtype, int nreloc, register unsigned int rtype; unsigned long value; Elf64_Sxword addend; - Sym *symref; + Sym *symref = NULL; int err = 0; tnf_probe_control_t *probelist = NULL; tnf_tag_data_t *taglist = NULL; @@ -326,8 +323,8 @@ do_relocations(struct module *mp) } #endif - if (do_relocate(mp, (char *)rshp->sh_addr, rshp->sh_type, - nreloc, rshp->sh_entsize, shp->sh_addr) < 0) { + if (do_relocate(mp, (char *)rshp->sh_addr, nreloc, + rshp->sh_entsize, shp->sh_addr) < 0) { _kobj_printf(ops, "do_relocations: %s do_relocate failed\n", mp->filename); diff --git a/usr/src/uts/intel/os/fmsmb.c b/usr/src/uts/intel/os/fmsmb.c index 3a4785475b..51ca9dd693 100644 --- a/usr/src/uts/intel/os/fmsmb.c +++ b/usr/src/uts/intel/os/fmsmb.c @@ -1073,7 +1073,8 @@ bad: } int -fm_smb_mc_chipinst(uint_t bdf, uint_t *chip_inst) { +fm_smb_mc_chipinst(uint_t bdf, uint_t *chip_inst) +{ int i, j; smbios_hdl_t *shp; @@ -1101,10 +1102,12 @@ fm_smb_mc_chipinst(uint_t bdf, uint_t *chip_inst) { mastypes->type = SUN_OEM_EXT_MEMARRAY; smb_strcnt(shp, mastypes); + pstypes = NULL; + p_strcnt = 0; for (i = 0; i < mastypes->count; i++) { ext_id = mastypes->ids[i]->id; (void) smbios_info_extmemarray(shp, ext_id, &em); - if (em.smbmae_bdf == bdf) { + if (em.smbmae_bdf == bdf) { p_strcnt = smb_cnttypes(shp, SMB_TYPE_PROCESSOR); if (p_strcnt == 0) { smb_free_strcnt(mastypes, ma_strcnt); diff --git a/usr/src/uts/sparc/krtld/kobj_reloc.c b/usr/src/uts/sparc/krtld/kobj_reloc.c index 05e207810a..cc7df40d7c 100644 --- a/usr/src/uts/sparc/krtld/kobj_reloc.c +++ b/usr/src/uts/sparc/krtld/kobj_reloc.c @@ -24,8 +24,6 @@ * Use is subject to license terms. */ -#pragma ident "%Z%%M% %I% %E% SMI" - /* * SPARC relocation code. */ @@ -146,14 +144,8 @@ sdt_reloc_resolve(struct module *mp, char *symname, uint32_t *instr, long roff) } int -/* ARGSUSED2 */ -do_relocate( - struct module *mp, - char *reltbl, - Word relshtype, - int nreloc, - int relocsize, - Addr baseaddr) +do_relocate(struct module *mp, char *reltbl, int nreloc, int relocsize, + Addr baseaddr) { Word stndx; long off, roff; @@ -382,8 +374,8 @@ do_relocations(struct module *mp) _kobj_printf(ops, " section=%d\n", shn); } #endif - if (do_relocate(mp, (char *)rshp->sh_addr, rshp->sh_type, - nreloc, rshp->sh_entsize, shp->sh_addr) < 0) { + if (do_relocate(mp, (char *)rshp->sh_addr, nreloc, + rshp->sh_entsize, shp->sh_addr) < 0) { _kobj_printf(ops, "do_relocations: %s do_relocate failed\n", mp->filename); diff --git a/usr/src/uts/sun4/vm/vm_dep.h b/usr/src/uts/sun4/vm/vm_dep.h index 4923173d81..1bff35f69e 100644 --- a/usr/src/uts/sun4/vm/vm_dep.h +++ b/usr/src/uts/sun4/vm/vm_dep.h @@ -473,6 +473,7 @@ typedef struct { spgcnt_t _cnt = (spgcnt_t)(cnt); \ int _mn; \ pgcnt_t _np; \ + rv = 0; \ if (&plat_mem_node_intersect_range != NULL) { \ for (_mn = 0; _mn < max_mem_nodes; _mn++) { \ plat_mem_node_intersect_range((pfn), _cnt, _mn, &_np); \ @@ -527,7 +528,7 @@ extern plcnt_t plcnt; * if allocation from the RELOC pool failed and there is sufficient cage * memory, attempt to allocate from the NORELOC pool. */ -#define MTYPE_NEXT(mnode, mtype, flags) { \ +#define MTYPE_NEXT(mnode, mtype, flags) { \ if (!(flags & (PG_NORELOC | PGI_NOCAGE | PGI_RELOCONLY)) && \ (kcage_freemem >= kcage_lotsfree)) { \ if (plcnt[mnode][MTYPE_NORELOC].plc_mt_pgmax == 0) { \ |