diff options
author | Jerry Jelinek <jerry.jelinek@joyent.com> | 2020-01-30 12:31:34 +0000 |
---|---|---|
committer | Jerry Jelinek <jerry.jelinek@joyent.com> | 2020-01-30 12:31:34 +0000 |
commit | f0dcd619b1421138ac11033df8e0cf904ba1efcf (patch) | |
tree | 2e7668916bdeb41d7b6b70d455b3bea71a9bdc4d /usr/src/uts/common/vm | |
parent | e0823b796aeea75fe40ec8888eb84ca5a25cff39 (diff) | |
parent | c6f039c73ee9eb7e4acb232afaca51cdf9d30ff3 (diff) | |
download | illumos-joyent-f0dcd619b1421138ac11033df8e0cf904ba1efcf.tar.gz |
[illumos-gate merge]
commit c6f039c73ee9eb7e4acb232afaca51cdf9d30ff3
12172 genunix: variable may be used uninitialized
commit 7093fd72e0be9a255aa4b1ed00a701f80699ac7d
12247 vtinfo and vtdaemon are missing CTF data
Diffstat (limited to 'usr/src/uts/common/vm')
-rw-r--r-- | usr/src/uts/common/vm/hat_refmod.c | 1 | ||||
-rw-r--r-- | usr/src/uts/common/vm/seg_dev.c | 38 | ||||
-rw-r--r-- | usr/src/uts/common/vm/seg_kp.c | 2 | ||||
-rw-r--r-- | usr/src/uts/common/vm/seg_spt.c | 4 | ||||
-rw-r--r-- | usr/src/uts/common/vm/seg_vn.c | 38 | ||||
-rw-r--r-- | usr/src/uts/common/vm/vm_as.c | 7 | ||||
-rw-r--r-- | usr/src/uts/common/vm/vm_usage.c | 2 |
7 files changed, 56 insertions, 36 deletions
diff --git a/usr/src/uts/common/vm/hat_refmod.c b/usr/src/uts/common/vm/hat_refmod.c index 31a85d4cf6..218c7b5a91 100644 --- a/usr/src/uts/common/vm/hat_refmod.c +++ b/usr/src/uts/common/vm/hat_refmod.c @@ -180,6 +180,7 @@ hat_setstat(struct as *as, caddr_t addr, size_t len, uint_t rmbits) */ if (vbits != as->a_vbits) { newbits = (vbits ^ as->a_vbits) & as->a_vbits; + nb = 0; while (newbits) { if (ffs(newbits)) nb = 1 << (ffs(newbits)-1); diff --git a/usr/src/uts/common/vm/seg_dev.c b/usr/src/uts/common/vm/seg_dev.c index 29896a6374..67c646049a 100644 --- a/usr/src/uts/common/vm/seg_dev.c +++ b/usr/src/uts/common/vm/seg_dev.c @@ -2385,10 +2385,10 @@ segdev_advise(struct seg *seg, caddr_t addr, size_t len, uint_t behav) /* * segdev pages are not dumped, so we just return */ -/*ARGSUSED*/ static void -segdev_dump(struct seg *seg) -{} +segdev_dump(struct seg *seg __unused) +{ +} /* * ddi_segmap_setup: Used by drivers who wish specify mapping attributes @@ -2405,7 +2405,7 @@ ddi_segmap_setup(dev_t dev, off_t offset, struct as *as, caddr_t *addrp, int (*mapfunc)(dev_t dev, off_t off, int prot); uint_t hat_attr; pfn_t pfn; - int error, i; + int error, i; TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_SEGMAP_SETUP, "ddi_segmap_setup:start"); @@ -2427,25 +2427,25 @@ ddi_segmap_setup(dev_t dev, off_t offset, struct as *as, caddr_t *addrp, if (ddi_device_mapping_check(dev, accattrp, rnumber, &hat_attr) == -1) return (ENXIO); + if (len == 0) + return (ENXIO); + /* * Check to ensure that the entire range is * legal and we are not trying to map in * more than the device will let us. */ - for (i = 0; i < len; i += PAGESIZE) { - if (i == 0) { - /* - * Save the pfn at offset here. This pfn will be - * used later to get user address. - */ - if ((pfn = (pfn_t)cdev_mmap(mapfunc, dev, offset, - maxprot)) == PFN_INVALID) - return (ENXIO); - } else { - if (cdev_mmap(mapfunc, dev, offset + i, maxprot) == - PFN_INVALID) - return (ENXIO); - } + /* + * Save the pfn at offset here. This pfn will be + * used later to get user address. + */ + pfn = (pfn_t)cdev_mmap(mapfunc, dev, offset, maxprot); + if (pfn == PFN_INVALID) + return (ENXIO); + + for (i = PAGESIZE; i < len; i += PAGESIZE) { + if (cdev_mmap(mapfunc, dev, offset + i, maxprot) == PFN_INVALID) + return (ENXIO); } as_rangelock(as); @@ -2727,6 +2727,8 @@ devmap_roundup(devmap_handle_t *dhp, ulong_t offset, size_t len, * the page size to use. The same calculations can use the * virtual address to determine the page size. */ + pg = 0; + poff = 0; base = (ulong_t)ptob(dhp->dh_pfn); for (level = dhp->dh_mmulevel; level >= 0; level--) { pg = page_get_pagesize(level); diff --git a/usr/src/uts/common/vm/seg_kp.c b/usr/src/uts/common/vm/seg_kp.c index 02c844bff6..1f080d3079 100644 --- a/usr/src/uts/common/vm/seg_kp.c +++ b/usr/src/uts/common/vm/seg_kp.c @@ -399,6 +399,7 @@ segkp_get_internal( int kmflag = (flags & KPD_NOWAIT) ? KM_NOSLEEP : KM_SLEEP; caddr_t s_base = (segkp_fromheap) ? kvseg.s_base : seg->s_base; + segkpindex = 0; if (len & PAGEOFFSET) { panic("segkp_get: len is not page-aligned"); /*NOTREACHED*/ @@ -647,6 +648,7 @@ segkp_release_internal(struct seg *seg, struct segkp_data *kpd, size_t len) struct anon *ap; pgcnt_t segkpindex; + segkpindex = 0; ASSERT(kpd != NULL); ASSERT((kpd->kp_flags & KPD_HASAMP) == 0 || kpd->kp_cookie == -1); np = btop(len); diff --git a/usr/src/uts/common/vm/seg_spt.c b/usr/src/uts/common/vm/seg_spt.c index fc77d874e0..1a9ef5223f 100644 --- a/usr/src/uts/common/vm/seg_spt.c +++ b/usr/src/uts/common/vm/seg_spt.c @@ -566,6 +566,7 @@ segspt_create(struct seg **segpp, void *argsp) if ((sptd = kmem_zalloc(sizeof (*sptd), KM_NOSLEEP)) == NULL) goto out1; + ppa = NULL; if ((sptcargs->flags & SHM_PAGEABLE) == 0) { if ((ppa = kmem_zalloc(((sizeof (page_t *)) * npages), KM_NOSLEEP)) == NULL) @@ -806,6 +807,9 @@ segspt_free_pages(struct seg *seg, caddr_t addr, size_t len) ASSERT(amp != NULL); + proj = NULL; + rootpp = NULL; + sp = NULL; if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { sp = amp->a_sp; proj = sp->shm_perm.ipc_proj; diff --git a/usr/src/uts/common/vm/seg_vn.c b/usr/src/uts/common/vm/seg_vn.c index da6393f792..75e3bf5acb 100644 --- a/usr/src/uts/common/vm/seg_vn.c +++ b/usr/src/uts/common/vm/seg_vn.c @@ -2574,7 +2574,7 @@ segvn_softunlock(struct seg *seg, caddr_t addr, size_t len, enum seg_rw rw) caddr_t adr; struct vnode *vp; u_offset_t offset; - ulong_t anon_index; + ulong_t anon_index = 0; struct anon_map *amp; struct anon *ap = NULL; @@ -2725,7 +2725,7 @@ segvn_faultpage( int cow; int claim; int steal = 0; - ulong_t anon_index; + ulong_t anon_index = 0; struct anon *ap, *oldap; struct anon_map *amp; int hat_flag = (type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD; @@ -3890,6 +3890,7 @@ segvn_fault_vnodepages(struct hat *hat, struct seg *seg, caddr_t lpgaddr, } if (svd->pageprot) { + prot = PROT_NONE; switch (rw) { case S_READ: protchk = PROT_READ; @@ -3906,6 +3907,7 @@ segvn_fault_vnodepages(struct hat *hat, struct seg *seg, caddr_t lpgaddr, break; } } else { + protchk = PROT_NONE; prot = svd->prot; /* caller has already done segment level protection check. */ } @@ -3921,6 +3923,7 @@ segvn_fault_vnodepages(struct hat *hat, struct seg *seg, caddr_t lpgaddr, VM_STAT_COND_ADD(amp != NULL, segvnvmstats.fltvnpages[3]); + ierr = 0; for (;;) { adjszc_chk = 0; for (; a < lpgeaddr; a += pgsz, off += pgsz, aindx += pages) { @@ -4681,6 +4684,7 @@ segvn_fault_anonpages(struct hat *hat, struct seg *seg, caddr_t lpgaddr, } if (svd->pageprot) { + prot = PROT_NONE; switch (rw) { case S_READ: protchk = PROT_READ; @@ -4698,12 +4702,14 @@ segvn_fault_anonpages(struct hat *hat, struct seg *seg, caddr_t lpgaddr, } VM_STAT_ADD(segvnvmstats.fltanpages[2]); } else { + protchk = PROT_NONE; prot = svd->prot; /* caller has already done segment level protection check. */ } ppa = kmem_cache_alloc(segvn_szc_cache[ppaszc], KM_SLEEP); ANON_LOCK_ENTER(&->a_rwlock, RW_READER); + ierr = 0; for (;;) { adjszc_chk = 0; for (; a < lpgeaddr; a += pgsz, aindx += pages) { @@ -4932,12 +4938,12 @@ segvn_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t len, u_offset_t off; caddr_t a; struct vpage *vpage; - uint_t vpprot, prot; + uint_t vpprot, prot = 0; int err; page_t *pl[PVN_GETPAGE_NUM + 1]; size_t plsz, pl_alloc_sz; size_t page; - ulong_t anon_index; + ulong_t anon_index = 0; struct anon_map *amp; int dogetpage = 0; caddr_t lpgaddr, lpgeaddr; @@ -5235,8 +5241,8 @@ slow: */ if ((page != 0) && fltadvice && svd->tr_state != SEGVN_TR_ON) { - struct vpage *vpp; - ulong_t fanon_index; + struct vpage *vpp = NULL; + ulong_t fanon_index = 0; size_t fpage; u_offset_t pgoff, fpgoff; struct vnode *fvp; @@ -5674,10 +5680,10 @@ static int segvn_setprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot) { struct segvn_data *svd = (struct segvn_data *)seg->s_data; - struct vpage *cvp, *svp, *evp; + struct vpage *cvp, *svp = NULL, *evp = NULL; struct vnode *vp; size_t pgsz; - pgcnt_t pgcnt; + pgcnt_t pgcnt = 0; anon_sync_obj_t cookie; int unload_done = 0; @@ -7256,9 +7262,9 @@ segvn_sync(struct seg *seg, caddr_t addr, size_t len, int attr, uint_t flags) int bflags; int err = 0; int segtype; - int pageprot; + int pageprot = 0; int prot; - ulong_t anon_index; + ulong_t anon_index = 0; struct anon_map *amp; struct anon *ap; anon_sync_obj_t cookie; @@ -7642,10 +7648,10 @@ segvn_lockop(struct seg *seg, caddr_t addr, size_t len, u_offset_t offset; u_offset_t off; int segtype; - int pageprot; + int pageprot = 0; int claim; struct vnode *vp; - ulong_t anon_index; + ulong_t anon_index = 0; struct anon_map *amp; struct anon *ap; struct vattr va; @@ -8332,7 +8338,7 @@ segvn_advise(struct seg *seg, caddr_t addr, size_t len, uint_t behav) } else { caddr_t eaddr; struct seg *new_seg; - struct segvn_data *new_svd; + struct segvn_data *new_svd = NULL; u_offset_t off; caddr_t oldeaddr; @@ -8705,7 +8711,7 @@ segvn_dump(struct seg *seg) struct segvn_data *svd; page_t *pp; struct anon_map *amp; - ulong_t anon_index; + ulong_t anon_index = 0; struct vnode *vp; u_offset_t off, offset; pfn_t pfn; @@ -8832,7 +8838,7 @@ segvn_pagelock(struct seg *seg, caddr_t addr, size_t len, struct page ***ppp, seg_preclaim_cbfunc_t preclaim_callback; size_t pgsz; int use_pcache; - size_t wlen; + size_t wlen = 0; uint_t pflags = 0; int sftlck_sbase = 0; int sftlck_send = 0; @@ -9593,7 +9599,7 @@ segvn_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp) { struct segvn_data *svd = (struct segvn_data *)seg->s_data; struct anon *ap = NULL; - ulong_t anon_index; + ulong_t anon_index = 0; struct anon_map *amp; anon_sync_obj_t cookie; diff --git a/usr/src/uts/common/vm/vm_as.c b/usr/src/uts/common/vm/vm_as.c index ec6d2b8920..69726d10c5 100644 --- a/usr/src/uts/common/vm/vm_as.c +++ b/usr/src/uts/common/vm/vm_as.c @@ -1439,7 +1439,7 @@ as_map_segvn_segs(struct as *as, caddr_t addr, size_t size, uint_t szcvec, uint_t szc, nszc, save_szcvec; int error; caddr_t a, eaddr; - size_t pgsz; + size_t pgsz = 0; const boolean_t do_off = (vn_a->vp != NULL || vn_a->amp != NULL); ASSERT(AS_WRITE_HELD(as)); @@ -1643,7 +1643,7 @@ as_map_ansegs(struct as *as, caddr_t addr, size_t size, segcreate_func_t crfp, struct segvn_crargs *vn_a, boolean_t *segcreated) { uint_t szcvec; - uchar_t type; + uchar_t type = 0; ASSERT(vn_a->type == MAP_SHARED || vn_a->type == MAP_PRIVATE); if (vn_a->type == MAP_SHARED) { @@ -2295,6 +2295,9 @@ as_ctl(struct as *as, caddr_t addr, size_t size, int func, int attr, ulong_t *mlock_map; /* pointer to bitmap used */ /* to represent the locked */ /* pages. */ + + mlock_size = 0; + mlock_map = NULL; retry: if (error == IE_RETRY) AS_LOCK_ENTER(as, RW_WRITER); diff --git a/usr/src/uts/common/vm/vm_usage.c b/usr/src/uts/common/vm/vm_usage.c index 01c2666e91..88c694336d 100644 --- a/usr/src/uts/common/vm/vm_usage.c +++ b/usr/src/uts/common/vm/vm_usage.c @@ -1151,6 +1151,8 @@ vmu_calculate_seg(vmu_entity_t *vmu_entities, struct seg *seg) pgcnt_t swresv = 0; pgcnt_t panon = 0; + s_start = 0; + p_end = 0; /* Can zero-length segments exist? Not sure, so paranoia. */ if (seg->s_size <= 0) return; |