diff options
author | Josef 'Jeff' Sipek <jeffpc@josefsipek.net> | 2015-12-20 23:52:23 -0500 |
---|---|---|
committer | Dan McDonald <danmcd@omniti.com> | 2016-01-05 15:38:23 -0500 |
commit | dc32d872cbeb56532bcea030255db9cd79bac7da (patch) | |
tree | 3094992b63b9d1bc8343415366dd0ed07a08a4f9 | |
parent | f285096a3146a243a565abdce1ba710a9ce24b0b (diff) | |
download | illumos-gate-dc32d872cbeb56532bcea030255db9cd79bac7da.tar.gz |
6514 AS_* lock macros simplification
Reviewed by: Piotr Jasiukajtis <estibi@me.com>
Reviewed by: Yuri Pankov <yuri.pankov@nexenta.com>
Reviewed by: Albert Lee <trisk@omniti.com>
Approved by: Dan McDonald <danmcd@omniti.com>
40 files changed, 465 insertions, 490 deletions
diff --git a/usr/src/uts/common/exec/elf/elf.c b/usr/src/uts/common/exec/elf/elf.c index 0eeb4e798c..d244c9cc81 100644 --- a/usr/src/uts/common/exec/elf/elf.c +++ b/usr/src/uts/common/exec/elf/elf.c @@ -1752,7 +1752,7 @@ top: ASSERT(p == ttoproc(curthread)); prstop(0, 0); - AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER); + AS_LOCK_ENTER(as, RW_WRITER); nphdrs = prnsegs(as, 0) + 2; /* two CORE note sections */ /* @@ -1763,7 +1763,7 @@ top: (void) process_scns(content, p, credp, NULL, NULL, NULL, 0, NULL, &nshdrs); } - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); ASSERT(nshdrs == 0 || nshdrs > 1); @@ -1879,7 +1879,7 @@ top: mutex_exit(&p->p_lock); - AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER); + AS_LOCK_ENTER(as, RW_WRITER); i = 2; for (seg = AS_SEGFIRST(as); seg != NULL; seg = AS_SEGNEXT(as, seg)) { caddr_t eaddr = seg->s_base + pr_getsegsize(seg, 0); @@ -1979,7 +1979,7 @@ exclude: } ASSERT(tmp == NULL); } - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); if (overflow || i != nphdrs) { if (ntries++ == 0) { @@ -2128,14 +2128,14 @@ exclude: bigwad->shdr[0].sh_info = nphdrs; if (nshdrs > 1) { - AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER); + AS_LOCK_ENTER(as, RW_WRITER); if ((error = process_scns(content, p, credp, vp, &bigwad->shdr[0], nshdrs, rlimit, &doffset, NULL)) != 0) { - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); goto done; } - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); } if ((error = core_write(vp, UIO_SYSSPACE, soffset, diff --git a/usr/src/uts/common/fs/doorfs/door_sys.c b/usr/src/uts/common/fs/doorfs/door_sys.c index a1a285dc76..b11a5f0d7c 100644 --- a/usr/src/uts/common/fs/doorfs/door_sys.c +++ b/usr/src/uts/common/fs/doorfs/door_sys.c @@ -3020,9 +3020,9 @@ door_copy(struct as *as, caddr_t src, caddr_t dest, uint_t len) pfn_t pfnum; /* MMU mapping is already locked down */ - AS_LOCK_ENTER(as, &as->a_lock, RW_READER); + AS_LOCK_ENTER(as, RW_READER); pfnum = hat_getpfnum(as->a_hat, rdest); - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); /* * TODO: The pfn step should not be necessary - need diff --git a/usr/src/uts/common/fs/proc/prcontrol.c b/usr/src/uts/common/fs/proc/prcontrol.c index a73a64a4a4..6b151a6369 100644 --- a/usr/src/uts/common/fs/proc/prcontrol.c +++ b/usr/src/uts/common/fs/proc/prcontrol.c @@ -2513,7 +2513,7 @@ pr_cancel_watch(prnode_t *pnp) tree = &as->a_wpage; mutex_exit(&p->p_lock); - AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER); + AS_LOCK_ENTER(as, RW_WRITER); for (pwp = avl_first(tree); pwp != NULL; pwp = AVL_NEXT(tree, pwp)) { @@ -2528,7 +2528,7 @@ pr_cancel_watch(prnode_t *pnp) } } - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); mutex_enter(&p->p_lock); } diff --git a/usr/src/uts/common/fs/proc/prioctl.c b/usr/src/uts/common/fs/proc/prioctl.c index 54d8a2b5c0..f1f39ed3be 100644 --- a/usr/src/uts/common/fs/proc/prioctl.c +++ b/usr/src/uts/common/fs/proc/prioctl.c @@ -962,9 +962,9 @@ startover: n = 0; else { mutex_exit(&p->p_lock); - AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER); + AS_LOCK_ENTER(as, RW_WRITER); n = prnsegs(as, 0); - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); mutex_enter(&p->p_lock); } prunlock(pnp); @@ -983,9 +983,9 @@ startover: prunlock(pnp); } else { mutex_exit(&p->p_lock); - AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER); + AS_LOCK_ENTER(as, RW_WRITER); error = oprgetmap(p, &iolhead); - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); mutex_enter(&p->p_lock); prunlock(pnp); @@ -1657,11 +1657,11 @@ oprgetpsinfo32(proc_t *p, prpsinfo32_t *psp, kthread_t *tp) psp->pr_pctmem = 0; } else { mutex_exit(&p->p_lock); - AS_LOCK_ENTER(as, &as->a_lock, RW_READER); + AS_LOCK_ENTER(as, RW_READER); psp->pr_size = (size32_t)btopr(as->a_resvsize); psp->pr_rssize = (size32_t)rm_asrss(as); psp->pr_pctmem = rm_pctmemory(as); - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); mutex_enter(&p->p_lock); } } @@ -2589,9 +2589,9 @@ startover: n = 0; else { mutex_exit(&p->p_lock); - AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER); + AS_LOCK_ENTER(as, RW_WRITER); n = prnsegs(as, 0); - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); mutex_enter(&p->p_lock); } prunlock(pnp); @@ -2613,9 +2613,9 @@ startover: prunlock(pnp); } else { mutex_exit(&p->p_lock); - AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER); + AS_LOCK_ENTER(as, RW_WRITER); error = oprgetmap32(p, &iolhead); - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); mutex_enter(&p->p_lock); prunlock(pnp); @@ -3142,7 +3142,7 @@ propenm(prnode_t *pnp, caddr_t cmaddr, caddr_t va, int *rvalp, cred_t *cr) * change because it is marked P_PR_LOCK. */ mutex_exit(&p->p_lock); - AS_LOCK_ENTER(as, &as->a_lock, RW_READER); + AS_LOCK_ENTER(as, RW_READER); seg = as_segat(as, va); if (seg != NULL && seg->s_ops == &segvn_ops && @@ -3153,7 +3153,7 @@ propenm(prnode_t *pnp, caddr_t cmaddr, caddr_t va, int *rvalp, cred_t *cr) } else { error = EINVAL; } - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); mutex_enter(&p->p_lock); } else if ((xvp = p->p_exec) == NULL) { error = EINVAL; @@ -3496,11 +3496,11 @@ oprgetpsinfo(proc_t *p, prpsinfo_t *psp, kthread_t *tp) psp->pr_pctmem = 0; } else { mutex_exit(&p->p_lock); - AS_LOCK_ENTER(as, &as->a_lock, RW_READER); + AS_LOCK_ENTER(as, RW_READER); psp->pr_size = btopr(as->a_resvsize); psp->pr_rssize = rm_asrss(as); psp->pr_pctmem = rm_pctmemory(as); - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); mutex_enter(&p->p_lock); } } @@ -3523,7 +3523,7 @@ oprgetmap(proc_t *p, list_t *iolhead) struct seg *brkseg, *stkseg; uint_t prot; - ASSERT(as != &kas && AS_WRITE_HELD(as, &as->a_lock)); + ASSERT(as != &kas && AS_WRITE_HELD(as)); /* * Request an initial buffer size that doesn't waste memory @@ -3583,7 +3583,7 @@ oprgetmap32(proc_t *p, list_t *iolhead) struct seg *brkseg, *stkseg; uint_t prot; - ASSERT(as != &kas && AS_WRITE_HELD(as, &as->a_lock)); + ASSERT(as != &kas && AS_WRITE_HELD(as)); /* * Request an initial buffer size that doesn't waste memory @@ -3643,7 +3643,7 @@ oprpdsize(struct as *as) struct seg *seg; size_t size; - ASSERT(as != &kas && AS_WRITE_HELD(as, &as->a_lock)); + ASSERT(as != &kas && AS_WRITE_HELD(as)); if ((seg = AS_SEGFIRST(as)) == NULL) return (0); @@ -3673,7 +3673,7 @@ oprpdsize32(struct as *as) struct seg *seg; size_t size; - ASSERT(as != &kas && AS_WRITE_HELD(as, &as->a_lock)); + ASSERT(as != &kas && AS_WRITE_HELD(as)); if ((seg = AS_SEGFIRST(as)) == NULL) return (0); @@ -3711,15 +3711,15 @@ oprpdread(struct as *as, uint_t hatid, struct uio *uiop) int error; again: - AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER); + AS_LOCK_ENTER(as, RW_WRITER); if ((seg = AS_SEGFIRST(as)) == NULL) { - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); return (0); } size = oprpdsize(as); if (uiop->uio_resid < size) { - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); return (E2BIG); } @@ -3765,7 +3765,7 @@ again: */ if (next > (uintptr_t)buf + size) { pr_getprot_done(&tmp); - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); kmem_free(buf, size); @@ -3797,7 +3797,7 @@ again: ASSERT(tmp == NULL); } while ((seg = AS_SEGNEXT(as, seg)) != NULL); - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); ASSERT((uintptr_t)pmp <= (uintptr_t)buf + size); error = uiomove(buf, (caddr_t)pmp - buf, UIO_READ, uiop); @@ -3818,15 +3818,15 @@ oprpdread32(struct as *as, uint_t hatid, struct uio *uiop) int error; again: - AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER); + AS_LOCK_ENTER(as, RW_WRITER); if ((seg = AS_SEGFIRST(as)) == NULL) { - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); return (0); } size = oprpdsize32(as); if (uiop->uio_resid < size) { - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); return (E2BIG); } @@ -3872,7 +3872,7 @@ again: */ if (next > (uintptr_t)buf + size) { pr_getprot_done(&tmp); - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); kmem_free(buf, size); @@ -3904,7 +3904,7 @@ again: ASSERT(tmp == NULL); } while ((seg = AS_SEGNEXT(as, seg)) != NULL); - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); ASSERT((uintptr_t)pmp == (uintptr_t)buf + size); error = uiomove(buf, (caddr_t)pmp - buf, UIO_READ, uiop); diff --git a/usr/src/uts/common/fs/proc/prsubr.c b/usr/src/uts/common/fs/proc/prsubr.c index 7801fd0ac8..28950bf972 100644 --- a/usr/src/uts/common/fs/proc/prsubr.c +++ b/usr/src/uts/common/fs/proc/prsubr.c @@ -1376,7 +1376,7 @@ prnsegs(struct as *as, int reserved) int n = 0; struct seg *seg; - ASSERT(as != &kas && AS_WRITE_HELD(as, &as->a_lock)); + ASSERT(as != &kas && AS_WRITE_HELD(as)); for (seg = AS_SEGFIRST(as); seg != NULL; seg = AS_SEGNEXT(as, seg)) { caddr_t eaddr = seg->s_base + pr_getsegsize(seg, reserved); @@ -1619,7 +1619,7 @@ prgetmap(proc_t *p, int reserved, list_t *iolhead) struct vattr vattr; uint_t prot; - ASSERT(as != &kas && AS_WRITE_HELD(as, &as->a_lock)); + ASSERT(as != &kas && AS_WRITE_HELD(as)); /* * Request an initial buffer size that doesn't waste memory @@ -1730,7 +1730,7 @@ prgetmap32(proc_t *p, int reserved, list_t *iolhead) struct vattr vattr; uint_t prot; - ASSERT(as != &kas && AS_WRITE_HELD(as, &as->a_lock)); + ASSERT(as != &kas && AS_WRITE_HELD(as)); /* * Request an initial buffer size that doesn't waste memory @@ -1840,7 +1840,7 @@ prpdsize(struct as *as) struct seg *seg; size_t size; - ASSERT(as != &kas && AS_WRITE_HELD(as, &as->a_lock)); + ASSERT(as != &kas && AS_WRITE_HELD(as)); if ((seg = AS_SEGFIRST(as)) == NULL) return (0); @@ -1870,7 +1870,7 @@ prpdsize32(struct as *as) struct seg *seg; size_t size; - ASSERT(as != &kas && AS_WRITE_HELD(as, &as->a_lock)); + ASSERT(as != &kas && AS_WRITE_HELD(as)); if ((seg = AS_SEGFIRST(as)) == NULL) return (0); @@ -1909,15 +1909,15 @@ prpdread(proc_t *p, uint_t hatid, struct uio *uiop) int error; again: - AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER); + AS_LOCK_ENTER(as, RW_WRITER); if ((seg = AS_SEGFIRST(as)) == NULL) { - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); return (0); } size = prpdsize(as); if (uiop->uio_resid < size) { - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); return (E2BIG); } @@ -1965,7 +1965,7 @@ again: */ if (next > (uintptr_t)buf + size) { pr_getprot_done(&tmp); - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); kmem_free(buf, size); @@ -2034,7 +2034,7 @@ again: ASSERT(tmp == NULL); } while ((seg = AS_SEGNEXT(as, seg)) != NULL); - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); ASSERT((uintptr_t)pmp <= (uintptr_t)buf + size); error = uiomove(buf, (caddr_t)pmp - buf, UIO_READ, uiop); @@ -2056,15 +2056,15 @@ prpdread32(proc_t *p, uint_t hatid, struct uio *uiop) int error; again: - AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER); + AS_LOCK_ENTER(as, RW_WRITER); if ((seg = AS_SEGFIRST(as)) == NULL) { - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); return (0); } size = prpdsize32(as); if (uiop->uio_resid < size) { - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); return (E2BIG); } @@ -2112,7 +2112,7 @@ again: */ if (next > (uintptr_t)buf + size) { pr_getprot_done(&tmp); - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); kmem_free(buf, size); @@ -2181,7 +2181,7 @@ again: ASSERT(tmp == NULL); } while ((seg = AS_SEGNEXT(as, seg)) != NULL); - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); ASSERT((uintptr_t)pmp <= (uintptr_t)buf + size); error = uiomove(buf, (caddr_t)pmp - buf, UIO_READ, uiop); @@ -2336,12 +2336,12 @@ prgetpsinfo(proc_t *p, psinfo_t *psp) psp->pr_rssize = 0; } else { mutex_exit(&p->p_lock); - AS_LOCK_ENTER(as, &as->a_lock, RW_READER); + AS_LOCK_ENTER(as, RW_READER); psp->pr_size = btopr(as->a_resvsize) * (PAGESIZE / 1024); psp->pr_rssize = rm_asrss(as) * (PAGESIZE / 1024); psp->pr_pctmem = rm_pctmemory(as); - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); mutex_enter(&p->p_lock); } } @@ -2469,13 +2469,13 @@ prgetpsinfo32(proc_t *p, psinfo32_t *psp) psp->pr_rssize = 0; } else { mutex_exit(&p->p_lock); - AS_LOCK_ENTER(as, &as->a_lock, RW_READER); + AS_LOCK_ENTER(as, RW_READER); psp->pr_size = (size32_t) (btopr(as->a_resvsize) * (PAGESIZE / 1024)); psp->pr_rssize = (size32_t) (rm_asrss(as) * (PAGESIZE / 1024)); psp->pr_pctmem = rm_pctmemory(as); - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); mutex_enter(&p->p_lock); } } @@ -3313,7 +3313,7 @@ pr_free_watched_pages(proc_t *p) return; ASSERT(MUTEX_NOT_HELD(&curproc->p_lock)); - AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER); + AS_LOCK_ENTER(as, RW_WRITER); pwp = avl_first(&as->a_wpage); @@ -3342,7 +3342,7 @@ pr_free_watched_pages(proc_t *p) avl_destroy(&as->a_wpage); p->p_wprot = NULL; - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); } /* @@ -3376,7 +3376,7 @@ set_watched_page(proc_t *p, caddr_t vaddr, caddr_t eaddr, newpwp = pwp; } - AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER); + AS_LOCK_ENTER(as, RW_WRITER); /* * Search for an existing watched page to contain the watched area. @@ -3391,7 +3391,7 @@ set_watched_page(proc_t *p, caddr_t vaddr, caddr_t eaddr, again: if (avl_numnodes(pwp_tree) > prnwatch) { - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); while (newpwp != NULL) { pwp = newpwp->wp_list; kmem_free(newpwp, sizeof (struct watched_page)); @@ -3464,7 +3464,7 @@ again: if ((vaddr = pwp->wp_vaddr + PAGESIZE) < eaddr) goto again; - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); /* * Free any pages we may have over-allocated @@ -3491,7 +3491,7 @@ clear_watched_page(proc_t *p, caddr_t vaddr, caddr_t eaddr, ulong_t flags) avl_tree_t *tree; avl_index_t where; - AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER); + AS_LOCK_ENTER(as, RW_WRITER); if (p->p_flag & SVFWAIT) tree = &p->p_wpage; @@ -3556,7 +3556,7 @@ clear_watched_page(proc_t *p, caddr_t vaddr, caddr_t eaddr, ulong_t flags) pwp = AVL_NEXT(tree, pwp); } - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); } /* @@ -3568,7 +3568,7 @@ getwatchprot(struct as *as, caddr_t addr, uint_t *prot) struct watched_page *pwp; struct watched_page tpw; - ASSERT(AS_LOCK_HELD(as, &as->a_lock)); + ASSERT(AS_LOCK_HELD(as)); tpw.wp_vaddr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK); if ((pwp = avl_find(&as->a_wpage, &tpw, NULL)) != NULL) @@ -3855,7 +3855,7 @@ pr_getprot(struct seg *seg, int reserved, void **tmp, s.data = seg->s_data; - ASSERT(AS_WRITE_HELD(as, &as->a_lock)); + ASSERT(AS_WRITE_HELD(as)); ASSERT(saddr >= seg->s_base && saddr < eaddr); ASSERT(eaddr <= seg->s_base + seg->s_size); @@ -3969,7 +3969,7 @@ pr_getpagesize(struct seg *seg, caddr_t saddr, caddr_t *naddrp, caddr_t eaddr) { ssize_t pagesize, hatsize; - ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(AS_WRITE_HELD(seg->s_as)); ASSERT(IS_P2ALIGNED(saddr, PAGESIZE)); ASSERT(IS_P2ALIGNED(eaddr, PAGESIZE)); ASSERT(saddr < eaddr); @@ -4009,7 +4009,7 @@ prgetxmap(proc_t *p, list_t *iolhead) struct vattr vattr; uint_t prot; - ASSERT(as != &kas && AS_WRITE_HELD(as, &as->a_lock)); + ASSERT(as != &kas && AS_WRITE_HELD(as)); /* * Request an initial buffer size that doesn't waste memory @@ -4193,7 +4193,7 @@ prgetxmap32(proc_t *p, list_t *iolhead) struct vattr vattr; uint_t prot; - ASSERT(as != &kas && AS_WRITE_HELD(as, &as->a_lock)); + ASSERT(as != &kas && AS_WRITE_HELD(as)); /* * Request an initial buffer size that doesn't waste memory diff --git a/usr/src/uts/common/fs/proc/prvnops.c b/usr/src/uts/common/fs/proc/prvnops.c index b60f938c9b..39f2abbc32 100644 --- a/usr/src/uts/common/fs/proc/prvnops.c +++ b/usr/src/uts/common/fs/proc/prvnops.c @@ -889,7 +889,7 @@ readmap_common: return (0); } - if (!AS_LOCK_TRYENTER(as, &as->a_lock, RW_WRITER)) { + if (!AS_LOCK_TRYENTER(as, RW_WRITER)) { prunlock(pnp); delay(1); goto readmap_common; @@ -908,7 +908,7 @@ readmap_common: break; } - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); mutex_enter(&p->p_lock); prunlock(pnp); @@ -2005,7 +2005,7 @@ readmap32_common: return (EOVERFLOW); } - if (!AS_LOCK_TRYENTER(as, &as->a_lock, RW_WRITER)) { + if (!AS_LOCK_TRYENTER(as, RW_WRITER)) { prunlock(pnp); delay(1); goto readmap32_common; @@ -2023,7 +2023,7 @@ readmap32_common: error = prgetmap32(p, 0, &iolhead); break; } - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); mutex_enter(&p->p_lock); prunlock(pnp); @@ -2930,11 +2930,11 @@ prgetattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr, vap->va_size = 2 * PRSDSIZE; else { mutex_exit(&p->p_lock); - AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER); + AS_LOCK_ENTER(as, RW_WRITER); if (as->a_updatedir) rebuild_objdir(as); vap->va_size = (as->a_sizedir + 2) * PRSDSIZE; - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); mutex_enter(&p->p_lock); } vap->va_nlink = 2; @@ -2944,12 +2944,12 @@ prgetattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr, vap->va_size = (P_FINFO(p)->fi_nfiles + 4) * PRSDSIZE; else { mutex_exit(&p->p_lock); - AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER); + AS_LOCK_ENTER(as, RW_WRITER); if (as->a_updatedir) rebuild_objdir(as); vap->va_size = (as->a_sizedir + 4 + P_FINFO(p)->fi_nfiles) * PRSDSIZE; - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); mutex_enter(&p->p_lock); } vap->va_nlink = 2; @@ -3015,7 +3015,7 @@ prgetattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr, vap->va_size = 0; else { mutex_exit(&p->p_lock); - AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER); + AS_LOCK_ENTER(as, RW_WRITER); if (type == PR_MAP) vap->va_mtime = as->a_updatetime; if (type == PR_XMAP) @@ -3024,7 +3024,7 @@ prgetattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr, else vap->va_size = prnsegs(as, type == PR_RMAP) * PR_OBJSIZE(prmap32_t, prmap_t); - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); mutex_enter(&p->p_lock); } break; @@ -3073,14 +3073,14 @@ prgetattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr, * change while the process is marked P_PR_LOCK. */ mutex_exit(&p->p_lock); - AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER); + AS_LOCK_ENTER(as, RW_WRITER); #ifdef _LP64 vap->va_size = iam32bit? prpdsize32(as) : prpdsize(as); #else vap->va_size = prpdsize(as); #endif - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); mutex_enter(&p->p_lock); } break; @@ -3089,14 +3089,14 @@ prgetattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr, vap->va_size = 0; else { mutex_exit(&p->p_lock); - AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER); + AS_LOCK_ENTER(as, RW_WRITER); #ifdef _LP64 vap->va_size = iam32bit? oprpdsize32(as) : oprpdsize(as); #else vap->va_size = oprpdsize(as); #endif - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); mutex_enter(&p->p_lock); } break; @@ -3693,7 +3693,7 @@ pr_lookup_objectdir(vnode_t *dp, char *comp) * will not change because it is marked P_PR_LOCK. */ mutex_exit(&p->p_lock); - AS_LOCK_ENTER(as, &as->a_lock, RW_READER); + AS_LOCK_ENTER(as, RW_READER); if ((seg = AS_SEGFIRST(as)) == NULL) { vp = NULL; goto out; @@ -3726,7 +3726,7 @@ out: if (vp != NULL) { VN_HOLD(vp); } - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); mutex_enter(&p->p_lock); prunlock(dpnp); @@ -4150,7 +4150,7 @@ pr_lookup_pathdir(vnode_t *dp, char *comp) type = NAME_OBJECT; } } else { - AS_LOCK_ENTER(as, &as->a_lock, RW_READER); + AS_LOCK_ENTER(as, RW_READER); if ((seg = AS_SEGFIRST(as)) != NULL) { do { /* @@ -4184,7 +4184,7 @@ pr_lookup_pathdir(vnode_t *dp, char *comp) type = NAME_OBJECT; } - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); } } @@ -4838,7 +4838,7 @@ rebuild_objdir(struct as *as) int i, j; ulong_t nold, nnew; - ASSERT(AS_WRITE_HELD(as, &as->a_lock)); + ASSERT(AS_WRITE_HELD(as)); if (as->a_updatedir == 0 && as->a_objectdir != NULL) return; @@ -4943,7 +4943,7 @@ rebuild_objdir(struct as *as) static vnode_t * obj_entry(struct as *as, int slot) { - ASSERT(AS_LOCK_HELD(as, &as->a_lock)); + ASSERT(AS_LOCK_HELD(as)); if (as->a_objectdir == NULL) return (NULL); ASSERT(slot < as->a_sizedir); @@ -5007,7 +5007,7 @@ pr_readdir_objectdir(prnode_t *pnp, uio_t *uiop, int *eofp) * space via mmap/munmap calls. */ if (as != NULL) { - AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER); + AS_LOCK_ENTER(as, RW_WRITER); if (as->a_updatedir) rebuild_objdir(as); objdirsize = as->a_sizedir; @@ -5025,7 +5025,7 @@ pr_readdir_objectdir(prnode_t *pnp, uio_t *uiop, int *eofp) } if (as != NULL) - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); /* * Stop when all objects have been reported. @@ -5299,11 +5299,11 @@ pr_readdir_pathdir(prnode_t *pnp, uio_t *uiop, int *eofp) as = NULL; objdirsize = 0; } else { - AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER); + AS_LOCK_ENTER(as, RW_WRITER); if (as->a_updatedir) rebuild_objdir(as); objdirsize = as->a_sizedir; - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); as = NULL; } @@ -5363,7 +5363,7 @@ pr_readdir_pathdir(prnode_t *pnp, uio_t *uiop, int *eofp) */ if (as == NULL) { as = p->p_as; - AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER); + AS_LOCK_ENTER(as, RW_WRITER); } if (as->a_updatedir) { @@ -5401,11 +5401,11 @@ pr_readdir_pathdir(prnode_t *pnp, uio_t *uiop, int *eofp) * Drop the address space lock to do the uiomove(). */ if (as != NULL) - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); error = uiomove((caddr_t)dirent, reclen, UIO_READ, uiop); if (as != NULL) - AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER); + AS_LOCK_ENTER(as, RW_WRITER); if (error) break; @@ -5417,7 +5417,7 @@ pr_readdir_pathdir(prnode_t *pnp, uio_t *uiop, int *eofp) if (fip != NULL) mutex_exit(&fip->fi_lock); if (as != NULL) - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); mutex_enter(&p->p_lock); prunlock(pnp); return (error); diff --git a/usr/src/uts/common/fs/ufs/ufs_vnops.c b/usr/src/uts/common/fs/ufs/ufs_vnops.c index fcffd952ed..b0f0d33e25 100644 --- a/usr/src/uts/common/fs/ufs/ufs_vnops.c +++ b/usr/src/uts/common/fs/ufs/ufs_vnops.c @@ -5653,7 +5653,7 @@ retry_map: * deadlock between ufs_read/ufs_map/pagefault when a quiesce is * pending. */ - while (!AS_LOCK_TRYENTER(as, &as->a_lock, RW_WRITER)) { + while (!AS_LOCK_TRYENTER(as, RW_WRITER)) { ufs_map_alock_retry_cnt++; delay(RETRY_LOCK_DELAY); } @@ -5669,7 +5669,7 @@ retry_map: * as->a_lock and wait for ulp->ul_fs_lock status to change. */ ufs_map_lockfs_retry_cnt++; - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); as_rangeunlock(as); if (error == EIO) goto out; diff --git a/usr/src/uts/common/io/mem.c b/usr/src/uts/common/io/mem.c index 7a703c5750..2931f97d85 100644 --- a/usr/src/uts/common/io/mem.c +++ b/usr/src/uts/common/io/mem.c @@ -288,10 +288,10 @@ mmpagelock(struct as *as, caddr_t va) struct seg *seg; int i; - AS_LOCK_ENTER(as, &as->a_lock, RW_READER); + AS_LOCK_ENTER(as, RW_READER); seg = as_segat(as, va); i = (seg != NULL)? SEGOP_CAPABLE(seg, S_CAPABILITY_NOMINFLT) : 0; - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); return (i); } @@ -496,7 +496,7 @@ mmioctl_vtop(intptr_t data) as = p->p_as; if (as == mem_vtop.m_as) { mutex_exit(&p->p_lock); - AS_LOCK_ENTER(as, &as->a_lock, RW_READER); + AS_LOCK_ENTER(as, RW_READER); for (seg = AS_SEGFIRST(as); seg != NULL; seg = AS_SEGNEXT(as, seg)) if ((uintptr_t)mem_vtop.m_va - @@ -504,7 +504,7 @@ mmioctl_vtop(intptr_t data) break; if (seg != NULL) pfn = hat_getpfnum(as->a_hat, mem_vtop.m_va); - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); mutex_enter(&p->p_lock); } sprunlock(p); diff --git a/usr/src/uts/common/io/physmem.c b/usr/src/uts/common/io/physmem.c index b6f8bf87f5..39d5003b02 100644 --- a/usr/src/uts/common/io/physmem.c +++ b/usr/src/uts/common/io/physmem.c @@ -647,7 +647,7 @@ physmem_getpage(struct vnode *vp, offset_t off, size_t len, uint_t *protp, page_t *pp; ASSERT(len == PAGESIZE); - ASSERT(AS_READ_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(AS_READ_HELD(seg->s_as)); /* * If the page is in the hash, then we successfully claimed this diff --git a/usr/src/uts/common/os/dumpsubr.c b/usr/src/uts/common/os/dumpsubr.c index 3f2bef8221..781c564233 100644 --- a/usr/src/uts/common/os/dumpsubr.c +++ b/usr/src/uts/common/os/dumpsubr.c @@ -1407,7 +1407,7 @@ dump_as(struct as *as) { struct seg *seg; - AS_LOCK_ENTER(as, &as->a_lock, RW_READER); + AS_LOCK_ENTER(as, RW_READER); for (seg = AS_SEGFIRST(as); seg; seg = AS_SEGNEXT(as, seg)) { if (seg->s_as != as) break; @@ -1415,7 +1415,7 @@ dump_as(struct as *as) continue; SEGOP_DUMP(seg); } - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); if (seg != NULL) cmn_err(CE_WARN, "invalid segment %p in address space %p", diff --git a/usr/src/uts/common/os/exec.c b/usr/src/uts/common/os/exec.c index 7adc5c0c9d..f7c565e546 100644 --- a/usr/src/uts/common/os/exec.c +++ b/usr/src/uts/common/os/exec.c @@ -1273,12 +1273,12 @@ execmap(struct vnode *vp, caddr_t addr, size_t len, size_t zfodlen, * other return values from as_setprot. */ - AS_LOCK_ENTER(as, &as->a_lock, RW_READER); + AS_LOCK_ENTER(as, RW_READER); seg = as_segat(curproc->p_as, (caddr_t)end); if (seg != NULL) SEGOP_GETPROT(seg, (caddr_t)end, zfoddiff - 1, &zprot); - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); if (seg != NULL && (zprot & PROT_WRITE) == 0) { if (as_setprot(as, (caddr_t)end, zfoddiff - 1, diff --git a/usr/src/uts/common/os/fork.c b/usr/src/uts/common/os/fork.c index 210a301850..fe3a362fa7 100644 --- a/usr/src/uts/common/os/fork.c +++ b/usr/src/uts/common/os/fork.c @@ -231,13 +231,13 @@ cfork(int isvfork, int isfork1, int flags) */ as = p->p_as; if (avl_numnodes(&as->a_wpage) != 0) { - AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER); + AS_LOCK_ENTER(as, RW_WRITER); as_clearwatch(as); p->p_wpage = as->a_wpage; avl_create(&as->a_wpage, wp_compare, sizeof (struct watched_page), offsetof(struct watched_page, wp_link)); - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); } cp->p_as = as; cp->p_flag |= SVFORK; @@ -592,13 +592,13 @@ forklwperr: if (avl_numnodes(&p->p_wpage) != 0) { /* restore watchpoints to parent */ as = p->p_as; - AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER); + AS_LOCK_ENTER(as, RW_WRITER); as->a_wpage = p->p_wpage; avl_create(&p->p_wpage, wp_compare, sizeof (struct watched_page), offsetof(struct watched_page, wp_link)); as_setwatch(as); - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); } } else { if (cp->p_segacct) @@ -1450,9 +1450,9 @@ vfwait(pid_t pid) /* restore watchpoints to parent */ if (pr_watch_active(pp)) { struct as *as = pp->p_as; - AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER); + AS_LOCK_ENTER(as, RW_WRITER); as_setwatch(as); - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); } mutex_enter(&pp->p_lock); diff --git a/usr/src/uts/common/os/mmapobj.c b/usr/src/uts/common/os/mmapobj.c index 578e6dae86..b6b5446d71 100644 --- a/usr/src/uts/common/os/mmapobj.c +++ b/usr/src/uts/common/os/mmapobj.c @@ -1484,7 +1484,7 @@ check_exec_addrs(int loadable, mmapobj_result_t *mrp, caddr_t start_addr) * segdev and the type is neither MAP_SHARED * nor MAP_PRIVATE. */ - AS_LOCK_ENTER(as, &as->a_lock, RW_READER); + AS_LOCK_ENTER(as, RW_READER); seg = as_findseg(as, myaddr, 0); MOBJ_STAT_ADD(exec_addr_mapped); if (seg && seg->s_ops == &segdev_ops && @@ -1494,7 +1494,7 @@ check_exec_addrs(int loadable, mmapobj_result_t *mrp, caddr_t start_addr) myaddr + mylen <= seg->s_base + seg->s_size) { MOBJ_STAT_ADD(exec_addr_devnull); - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); (void) as_unmap(as, myaddr, mylen); ret = as_map(as, myaddr, mylen, segvn_create, &crargs); @@ -1507,7 +1507,7 @@ check_exec_addrs(int loadable, mmapobj_result_t *mrp, caddr_t start_addr) return (ret); } } else { - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); as_rangeunlock(as); mmapobj_unmap_exec(mrp, i, start_addr); MOBJ_STAT_ADD(exec_addr_in_use); diff --git a/usr/src/uts/common/os/move.c b/usr/src/uts/common/os/move.c index 073c77ac28..7ed8e4e3d3 100644 --- a/usr/src/uts/common/os/move.c +++ b/usr/src/uts/common/os/move.c @@ -662,12 +662,12 @@ uioainit(uio_t *uiop, uioa_t *uioap) } locked->uioa_ppp = (void **)pfnp; locked->uioa_pfncnt = pcnt; - AS_LOCK_ENTER(as, &as->a_lock, RW_READER); + AS_LOCK_ENTER(as, RW_READER); while (pcnt-- > 0) { *pfnp++ = hat_getpfnum(as->a_hat, addr); addr += PAGESIZE; } - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); } else { /* Have a page_t list, save it */ locked->uioa_ppp = (void **)pages; diff --git a/usr/src/uts/common/os/sunddi.c b/usr/src/uts/common/os/sunddi.c index 151bf4b5b0..ede7da413b 100644 --- a/usr/src/uts/common/os/sunddi.c +++ b/usr/src/uts/common/os/sunddi.c @@ -8370,7 +8370,7 @@ umem_lockmemory(caddr_t addr, size_t len, int flags, ddi_umem_cookie_t *cookie, if (flags & DDI_UMEMLOCK_LONGTERM) { extern struct seg_ops segspt_shmops; extern struct seg_ops segdev_ops; - AS_LOCK_ENTER(as, &as->a_lock, RW_READER); + AS_LOCK_ENTER(as, RW_READER); for (seg = as_segat(as, addr); ; seg = AS_SEGNEXT(as, seg)) { if (seg == NULL || seg->s_base > addr + len) break; @@ -8383,14 +8383,14 @@ umem_lockmemory(caddr_t addr, size_t len, int flags, ddi_umem_cookie_t *cookie, (SEGOP_GETTYPE(seg, addr) & MAP_SHARED))) { as_pageunlock(as, p->pparray, addr, len, p->s_flags); - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); umem_decr_devlockmem(p); kmem_free(p, sizeof (struct ddi_umem_cookie)); *cookie = (ddi_umem_cookie_t)NULL; return (EFAULT); } } - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); } diff --git a/usr/src/uts/common/os/urw.c b/usr/src/uts/common/os/urw.c index ef70ccac45..4e97e99eb7 100644 --- a/usr/src/uts/common/os/urw.c +++ b/usr/src/uts/common/os/urw.c @@ -26,8 +26,6 @@ /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ /* All Rights Reserved */ -#pragma ident "%Z%%M% %I% %E% SMI" - #include <sys/atomic.h> #include <sys/errno.h> #include <sys/stat.h> @@ -148,8 +146,7 @@ mapin(struct as *as, caddr_t addr, int writing) if (pp != NULL) { ASSERT(PAGE_LOCKED(pp)); kaddr = ppmapin(pp, writing ? - (PROT_READ | PROT_WRITE) : PROT_READ, - (caddr_t)-1); + (PROT_READ | PROT_WRITE) : PROT_READ, (caddr_t)-1); return (kaddr + ((uintptr_t)addr & PAGEOFFSET)); } } @@ -162,7 +159,7 @@ mapin(struct as *as, caddr_t addr, int writing) kaddr = vmem_alloc(heap_arena, PAGESIZE, VM_SLEEP); hat_devload(kas.a_hat, kaddr, PAGESIZE, pfnum, - writing ? (PROT_READ | PROT_WRITE) : PROT_READ, HAT_LOAD_LOCK); + writing ? (PROT_READ | PROT_WRITE) : PROT_READ, HAT_LOAD_LOCK); return (kaddr + ((uintptr_t)addr & PAGEOFFSET)); } @@ -202,11 +199,11 @@ urw(proc_t *p, int writing, void *buf, size_t len, uintptr_t a) */ page = (caddr_t)(uintptr_t)((uintptr_t)addr & PAGEMASK); retrycnt = 0; - AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER); + AS_LOCK_ENTER(as, RW_WRITER); retry: if ((seg = as_segat(as, page)) == NULL || !page_valid(seg, page)) { - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); return (ENXIO); } SEGOP_GETPROT(seg, page, 0, &prot); @@ -224,7 +221,7 @@ retry: } if (err != 0) { - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); return (ENXIO); } } @@ -247,7 +244,7 @@ retry: if (SEGOP_FAULT(as->a_hat, seg, page, PAGESIZE, F_SOFTLOCK, rw)) { if (protchanged) (void) SEGOP_SETPROT(seg, page, PAGESIZE, prot); - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); return (ENXIO); } CPU_STATS_ADD_K(vm, softlock, 1); @@ -308,7 +305,7 @@ retry: if (protchanged) (void) SEGOP_SETPROT(seg, page, PAGESIZE, prot); - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); return (error); } diff --git a/usr/src/uts/common/os/vm_subr.c b/usr/src/uts/common/os/vm_subr.c index 6f4abb6de5..537ddb6908 100644 --- a/usr/src/uts/common/os/vm_subr.c +++ b/usr/src/uts/common/os/vm_subr.c @@ -364,11 +364,11 @@ cow_mapin(struct as *as, caddr_t uaddr, caddr_t kaddr, struct page **cached_ppp, *lenp = 0; if (cow) { - AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER); + AS_LOCK_ENTER(as, RW_WRITER); seg = as_findseg(as, uaddr, 0); if ((seg == NULL) || ((base = seg->s_base) > uaddr) || (uaddr + total) > base + seg->s_size) { - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); return (EINVAL); } /* @@ -376,10 +376,10 @@ cow_mapin(struct as *as, caddr_t uaddr, caddr_t kaddr, struct page **cached_ppp, * But to be safe, we check against segvn. */ if (seg->s_ops != &segvn_ops) { - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); return (ENOTSUP); } else if ((SEGOP_GETTYPE(seg, uaddr) & MAP_PRIVATE) == 0) { - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); return (ENOTSUP); } } @@ -484,7 +484,7 @@ tryagain: ++i; } if (cow) { - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); } if (first && res == FC_NOMAP) { /* @@ -502,7 +502,7 @@ tryagain: size = total; res = as_fault(as->a_hat, as, uaddr, size, F_INVAL, S_READ); if (cow) - AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER); + AS_LOCK_ENTER(as, RW_WRITER); goto tryagain; } switch (res) { diff --git a/usr/src/uts/common/os/watchpoint.c b/usr/src/uts/common/os/watchpoint.c index 7909efcd2a..9704b44e30 100644 --- a/usr/src/uts/common/os/watchpoint.c +++ b/usr/src/uts/common/os/watchpoint.c @@ -24,8 +24,6 @@ * Use is subject to license terms. */ -#pragma ident "%Z%%M% %I% %E% SMI" - #include <sys/types.h> #include <sys/t_lock.h> #include <sys/param.h> @@ -196,14 +194,14 @@ startover: */ mutex_enter(&p->p_maplock); - AS_LOCK_ENTER(as, &as->a_lock, RW_READER); + AS_LOCK_ENTER(as, RW_READER); tpw.wp_vaddr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK); if ((pwp = avl_find(&as->a_wpage, &tpw, &where)) == NULL) pwp = avl_nearest(&as->a_wpage, where, AVL_AFTER); for (; pwp != NULL && pwp->wp_vaddr < eaddr; - pwp = AVL_NEXT(&as->a_wpage, pwp)) { + pwp = AVL_NEXT(&as->a_wpage, pwp)) { /* * If the requested protection has not been @@ -235,7 +233,7 @@ startover: * Release as lock while in holdwatch() * in case other threads need to grab it. */ - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); mutex_exit(&p->p_maplock); if (holdwatch() != 0) { /* @@ -246,7 +244,7 @@ startover: goto startover; } mutex_enter(&p->p_maplock); - AS_LOCK_ENTER(as, &as->a_lock, RW_READER); + AS_LOCK_ENTER(as, RW_READER); } p->p_mapcnt++; } @@ -306,8 +304,8 @@ startover: uint_t oprot; int err, retrycnt = 0; - AS_LOCK_EXIT(as, &as->a_lock); - AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER); + AS_LOCK_EXIT(as); + AS_LOCK_ENTER(as, RW_WRITER); retry: seg = as_segat(as, addr); ASSERT(seg != NULL); @@ -320,9 +318,9 @@ startover: goto retry; } } - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); } else - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); /* * When all pages are mapped back to their normal state, @@ -340,10 +338,10 @@ startover: } } - AS_LOCK_ENTER(as, &as->a_lock, RW_READER); + AS_LOCK_ENTER(as, RW_READER); } - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); mutex_exit(&p->p_maplock); return (rv); @@ -394,7 +392,7 @@ setallwatch(void) ASSERT(MUTEX_NOT_HELD(&curproc->p_lock)); - AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER); + AS_LOCK_ENTER(as, RW_WRITER); pwp = p->p_wprot; while (pwp != NULL) { @@ -431,7 +429,7 @@ setallwatch(void) } p->p_wprot = NULL; - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); } @@ -469,15 +467,15 @@ pr_is_watchpage_as(caddr_t addr, enum seg_rw rw, struct as *as) switch (rw) { case S_READ: rv = ((prot & (PROT_USER|PROT_READ)) - != (PROT_USER|PROT_READ)); + != (PROT_USER|PROT_READ)); break; case S_WRITE: rv = ((prot & (PROT_USER|PROT_WRITE)) - != (PROT_USER|PROT_WRITE)); + != (PROT_USER|PROT_WRITE)); break; case S_EXEC: rv = ((prot & (PROT_USER|PROT_EXEC)) - != (PROT_USER|PROT_EXEC)); + != (PROT_USER|PROT_EXEC)); break; default: /* can't happen! */ @@ -505,9 +503,9 @@ pr_is_watchpage(caddr_t addr, enum seg_rw rw) return (0); /* Grab the lock because of XHAT (see comment in pr_mappage()) */ - AS_LOCK_ENTER(as, &as->a_lock, RW_READER); + AS_LOCK_ENTER(as, RW_READER); rv = pr_is_watchpage_as(addr, rw, as); - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); return (rv); } @@ -1026,7 +1024,7 @@ watch_copyinstr( kaddr += size; resid -= size; if (error == ENAMETOOLONG && resid > 0) - error = 0; + error = 0; if (error != 0 || (watchcode && (uaddr < vaddr || kaddr[-1] == '\0'))) break; /* didn't reach the watched area */ @@ -1056,7 +1054,7 @@ watch_copyinstr( kaddr += size; resid -= size; if (error == ENAMETOOLONG && resid > 0) - error = 0; + error = 0; } /* if we hit a watched address, do the watchpoint logic */ @@ -1133,7 +1131,7 @@ watch_copyoutstr( kaddr += size; resid -= size; if (error == ENAMETOOLONG && resid > 0) - error = 0; + error = 0; if (error != 0 || (watchcode && (uaddr < vaddr || kaddr[-1] == '\0'))) break; /* didn't reach the watched area */ @@ -1163,7 +1161,7 @@ watch_copyoutstr( kaddr += size; resid -= size; if (error == ENAMETOOLONG && resid > 0) - error = 0; + error = 0; } /* if we hit a watched address, do the watchpoint logic */ diff --git a/usr/src/uts/common/os/zone.c b/usr/src/uts/common/os/zone.c index 56c654331e..706e5ed16f 100644 --- a/usr/src/uts/common/os/zone.c +++ b/usr/src/uts/common/os/zone.c @@ -5609,7 +5609,7 @@ as_can_change_zones(void) int allow = 1; ASSERT(pp->p_as != &kas); - AS_LOCK_ENTER(as, &as->a_lock, RW_READER); + AS_LOCK_ENTER(as, RW_READER); for (seg = AS_SEGFIRST(as); seg != NULL; seg = AS_SEGNEXT(as, seg)) { /* @@ -5632,7 +5632,7 @@ as_can_change_zones(void) break; } } - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); return (allow); } @@ -5648,7 +5648,7 @@ as_swresv(void) size_t swap = 0; ASSERT(pp->p_as != &kas); - ASSERT(AS_WRITE_HELD(as, &as->a_lock)); + ASSERT(AS_WRITE_HELD(as)); for (seg = AS_SEGFIRST(as); seg != NULL; seg = AS_SEGNEXT(as, seg)) swap += seg_swresv(seg); @@ -5853,7 +5853,7 @@ zone_enter(zoneid_t zoneid) * memory and reserve swap via MCL_FUTURE and MAP_NORESERVE * segments respectively. */ - AS_LOCK_ENTER(pp->as, &pp->p_as->a_lock, RW_WRITER); + AS_LOCK_ENTER(pp->p_as, RW_WRITER); swap = as_swresv(); mutex_enter(&pp->p_lock); zone_proj0 = zone->zone_zsched->p_task->tk_proj; @@ -5900,7 +5900,7 @@ zone_enter(zoneid_t zoneid) pp->p_flag |= SZONETOP; pp->p_zone = zone; mutex_exit(&pp->p_lock); - AS_LOCK_EXIT(pp->p_as, &pp->p_as->a_lock); + AS_LOCK_EXIT(pp->p_as); /* * Joining the zone cannot fail from now on. diff --git a/usr/src/uts/common/syscall/lgrpsys.c b/usr/src/uts/common/syscall/lgrpsys.c index a8afac8627..be60d1a445 100644 --- a/usr/src/uts/common/syscall/lgrpsys.c +++ b/usr/src/uts/common/syscall/lgrpsys.c @@ -190,7 +190,7 @@ meminfo(int addr_count, struct meminfo *mip) /* get the corresponding memory info for each virtual address */ as = curproc->p_as; - AS_LOCK_ENTER(as, &as->a_lock, RW_READER); + AS_LOCK_ENTER(as, RW_READER); hat = as->a_hat; for (i = out_idx = 0; i < addr_count; i++, out_idx += info_count) { @@ -286,7 +286,7 @@ meminfo(int addr_count, struct meminfo *mip) } } } - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); } /* copy out the results and validity bits and free the buffers */ diff --git a/usr/src/uts/common/syscall/rlimit.c b/usr/src/uts/common/syscall/rlimit.c index eac3584764..9b7b6ffee6 100644 --- a/usr/src/uts/common/syscall/rlimit.c +++ b/usr/src/uts/common/syscall/rlimit.c @@ -28,8 +28,6 @@ /* All Rights Reserved */ -#pragma ident "%Z%%M% %I% %E% SMI" - #include <sys/param.h> #include <sys/types.h> #include <sys/inttypes.h> @@ -141,7 +139,7 @@ ulimit(int cmd, long arg) */ retval = (long)brkend; - AS_LOCK_ENTER(as, &as->a_lock, RW_READER); + AS_LOCK_ENTER(as, RW_READER); for (seg = as_findseg(as, brkend, 0); seg != NULL; seg = AS_SEGNEXT(as, seg)) { if (seg->s_base >= brkend) { @@ -212,7 +210,7 @@ ulimit(int cmd, long arg) (size_t)(brkendpg + size)); } - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); /* truncate to same boundary as sbrk */ diff --git a/usr/src/uts/common/syscall/utssys.c b/usr/src/uts/common/syscall/utssys.c index 380df8e8fc..4e3a3a9ab4 100644 --- a/usr/src/uts/common/syscall/utssys.c +++ b/usr/src/uts/common/syscall/utssys.c @@ -29,8 +29,6 @@ /* All Rights Reserved */ -#pragma ident "%Z%%M% %I% %E% SMI" - #include <sys/param.h> #include <sys/inttypes.h> #include <sys/types.h> @@ -534,7 +532,7 @@ dofusers(vnode_t *fvp, int flags) struct seg *seg; struct as *as = prp->p_as; - AS_LOCK_ENTER(as, &as->a_lock, RW_READER); + AS_LOCK_ENTER(as, RW_READER); for (seg = AS_SEGFIRST(as); seg; seg = AS_SEGNEXT(as, seg)) { /* @@ -598,7 +596,7 @@ dofusers(vnode_t *fvp, int flags) break; } } - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); } if (use_flag) { diff --git a/usr/src/uts/common/vm/as.h b/usr/src/uts/common/vm/as.h index d70c655f9c..22ceb82fae 100644 --- a/usr/src/uts/common/vm/as.h +++ b/usr/src/uts/common/vm/as.h @@ -240,19 +240,19 @@ extern struct as kas; /* kernel's address space */ * in rwlock.c for more information on the semantics of and motivation behind * RW_READER_STARVEWRITER.) */ -#define AS_LOCK_ENTER(as, lock, type) rw_enter((lock), \ +#define AS_LOCK_ENTER(as, type) rw_enter(&(as)->a_lock, \ (type) == RW_READER ? RW_READER_STARVEWRITER : (type)) -#define AS_LOCK_EXIT(as, lock) rw_exit((lock)) -#define AS_LOCK_DESTROY(as, lock) rw_destroy((lock)) -#define AS_LOCK_TRYENTER(as, lock, type) rw_tryenter((lock), \ +#define AS_LOCK_EXIT(as) rw_exit(&(as)->a_lock) +#define AS_LOCK_DESTROY(as) rw_destroy(&(as)->a_lock) +#define AS_LOCK_TRYENTER(as, type) rw_tryenter(&(as)->a_lock, \ (type) == RW_READER ? RW_READER_STARVEWRITER : (type)) /* * Macros to test lock states. */ -#define AS_LOCK_HELD(as, lock) RW_LOCK_HELD((lock)) -#define AS_READ_HELD(as, lock) RW_READ_HELD((lock)) -#define AS_WRITE_HELD(as, lock) RW_WRITE_HELD((lock)) +#define AS_LOCK_HELD(as) RW_LOCK_HELD(&(as)->a_lock) +#define AS_READ_HELD(as) RW_READ_HELD(&(as)->a_lock) +#define AS_WRITE_HELD(as) RW_WRITE_HELD(&(as)->a_lock) /* * macros to walk thru segment lists diff --git a/usr/src/uts/common/vm/seg_dev.c b/usr/src/uts/common/vm/seg_dev.c index 6cf938a007..f43a288cec 100644 --- a/usr/src/uts/common/vm/seg_dev.c +++ b/usr/src/uts/common/vm/seg_dev.c @@ -368,7 +368,7 @@ segdev_create(struct seg *seg, void *argsp) * Since the address space is "write" locked, we * don't need the segment lock to protect "segdev" data. */ - ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as)); hat_map(seg->s_as->a_hat, seg->s_base, seg->s_size, HAT_MAP); @@ -474,7 +474,7 @@ segdev_dup(struct seg *seg, struct seg *newseg) * Since the address space is "write" locked, we * don't need the segment lock to protect "segdev" data. */ - ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as)); newsdp = sdp_alloc(); @@ -646,7 +646,7 @@ segdev_unmap(struct seg *seg, caddr_t addr, size_t len) * Since the address space is "write" locked, we * don't need the segment lock to protect "segdev" data. */ - ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as)); if ((sz = sdp->softlockcnt) > 0) { /* @@ -1135,7 +1135,7 @@ segdev_free(struct seg *seg) * Since the address space is "write" locked, we * don't need the segment lock to protect "segdev" data. */ - ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as)); while (dhp != NULL) dhp = devmap_handle_unmap(dhp); @@ -1617,7 +1617,7 @@ segdev_fault( "addr %p len %lx type %x\n", (void *)dhp_head, (void *)seg, (void *)addr, len, type)); - ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); /* Handle non-devmap case */ if (dhp_head == NULL) @@ -2057,7 +2057,7 @@ segdev_faulta(struct seg *seg, caddr_t addr) { TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_FAULTA, "segdev_faulta: seg=%p addr=%p", (void *)seg, (void *)addr); - ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); return (0); } @@ -2075,7 +2075,7 @@ segdev_setprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot) TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_SETPROT, "segdev_setprot:start seg=%p addr=%p len=%lx prot=%x", (void *)seg, (void *)addr, len, prot); - ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); if ((sz = sdp->softlockcnt) > 0 && dhp_head != NULL) { /* @@ -2198,7 +2198,7 @@ segdev_checkprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot) TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_CHECKPROT, "segdev_checkprot:start seg=%p addr=%p len=%lx prot=%x", (void *)seg, (void *)addr, len, prot); - ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); /* * If segment protection can be used, simply check against them @@ -2235,7 +2235,7 @@ segdev_getprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv) TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_GETPROT, "segdev_getprot:start seg=%p addr=%p len=%lx protv=%p", (void *)seg, (void *)addr, len, (void *)protv); - ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); pgno = seg_page(seg, addr + len) - seg_page(seg, addr) + 1; if (pgno != 0) { @@ -2266,7 +2266,7 @@ segdev_getoffset(register struct seg *seg, caddr_t addr) TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_GETOFFSET, "segdev_getoffset:start seg=%p addr=%p", (void *)seg, (void *)addr); - ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); return ((u_offset_t)sdp->offset + (addr - seg->s_base)); } @@ -2280,7 +2280,7 @@ segdev_gettype(register struct seg *seg, caddr_t addr) TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_GETTYPE, "segdev_gettype:start seg=%p addr=%p", (void *)seg, (void *)addr); - ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); return (sdp->type); } @@ -2295,7 +2295,7 @@ segdev_getvp(register struct seg *seg, caddr_t addr, struct vnode **vpp) TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_GETVP, "segdev_getvp:start seg=%p addr=%p", (void *)seg, (void *)addr); - ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); /* * Note that this vp is the common_vp of the device, where the @@ -2325,7 +2325,7 @@ segdev_sync(struct seg *seg, caddr_t addr, size_t len, int attr, uint_t flags) { TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_SYNC, "segdev_sync:start"); - ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); return (0); } @@ -2341,7 +2341,7 @@ segdev_incore(struct seg *seg, caddr_t addr, size_t len, char *vec) TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_INCORE, "segdev_incore:start"); - ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); for (len = (len + PAGEOFFSET) & PAGEMASK; len; len -= PAGESIZE, v += PAGESIZE) @@ -2360,7 +2360,7 @@ segdev_lockop(struct seg *seg, caddr_t addr, { TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_LOCKOP, "segdev_lockop:start"); - ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); return (0); } @@ -2375,7 +2375,7 @@ segdev_advise(struct seg *seg, caddr_t addr, size_t len, uint_t behav) { TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_ADVISE, "segdev_advise:start"); - ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); return (0); } @@ -3080,7 +3080,7 @@ devmap_load(devmap_cookie_t dhc, offset_t offset, size_t len, uint_t type, * the as lock is held. Verify here and return error if drivers * inadvertently call devmap_load on a wrong devmap handle. */ - if ((asp != &kas) && !AS_LOCK_HELD(asp, &asp->a_lock)) + if ((asp != &kas) && !AS_LOCK_HELD(asp)) return (FC_MAKE_ERR(EINVAL)); soff = (ssize_t)(offset - dhp->dh_uoff); diff --git a/usr/src/uts/common/vm/seg_kpm.c b/usr/src/uts/common/vm/seg_kpm.c index 0886513183..bf0656a629 100644 --- a/usr/src/uts/common/vm/seg_kpm.c +++ b/usr/src/uts/common/vm/seg_kpm.c @@ -210,7 +210,7 @@ faultcode_t segkpm_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t len, enum fault_type type, enum seg_rw rw) { - ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); switch (type) { case F_INVAL: diff --git a/usr/src/uts/common/vm/seg_map.c b/usr/src/uts/common/vm/seg_map.c index 1edb92e892..f08afe1066 100644 --- a/usr/src/uts/common/vm/seg_map.c +++ b/usr/src/uts/common/vm/seg_map.c @@ -853,7 +853,7 @@ segmap_getprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv) struct segmap_data *smd = (struct segmap_data *)seg->s_data; size_t pgno = seg_page(seg, addr + len) - seg_page(seg, addr) + 1; - ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); if (pgno != 0) { do { diff --git a/usr/src/uts/common/vm/seg_spt.c b/usr/src/uts/common/vm/seg_spt.c index 8d85fbaef7..ded3ed49e3 100644 --- a/usr/src/uts/common/vm/seg_spt.c +++ b/usr/src/uts/common/vm/seg_spt.c @@ -241,7 +241,7 @@ segspt_free(struct seg *seg) { struct spt_data *sptd = (struct spt_data *)seg->s_data; - ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as)); if (sptd != NULL) { if (sptd->spt_realsize) @@ -263,7 +263,7 @@ static int segspt_shmsync(struct seg *seg, caddr_t addr, size_t len, int attr, uint_t flags) { - ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); return (0); } @@ -278,7 +278,7 @@ segspt_shmincore(struct seg *seg, caddr_t addr, size_t len, char *vec) struct seg *sptseg; struct spt_data *sptd; - ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); #ifdef lint seg = seg; #endif @@ -342,7 +342,7 @@ segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize) { size_t share_size; - ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as)); /* * seg.s_size may have been rounded up to the largest page size @@ -393,7 +393,7 @@ segspt_create(struct seg *seg, caddr_t argsp) * We are holding the a_lock on the underlying dummy as, * so we can make calls to the HAT layer. */ - ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as)); ASSERT(sp != NULL); #ifdef DEBUG @@ -629,7 +629,7 @@ segspt_free_pages(struct seg *seg, caddr_t addr, size_t len) kproject_t *proj; kshmid_t *sp; - ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as)); len = P2ROUNDUP(len, PAGESIZE); @@ -838,7 +838,7 @@ segspt_dismpagelock(struct seg *seg, caddr_t addr, size_t len, pgcnt_t claim_availrmem = 0; uint_t szc; - ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK); /* @@ -1193,7 +1193,7 @@ segspt_shmpagelock(struct seg *seg, caddr_t addr, size_t len, struct vnode *vp; u_offset_t off; - ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK); @@ -1451,7 +1451,7 @@ segspt_reclaim(void *ptag, caddr_t addr, size_t len, struct page **pplist, ASSERT(sptd->spt_pcachecnt != 0); ASSERT(sptd->spt_ppa == pplist); ASSERT(npages == btopr(sptd->spt_amp->size)); - ASSERT(async || AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(async || AS_LOCK_HELD(seg->s_as)); /* * Acquire the lock on the dummy seg and destroy the @@ -1585,7 +1585,7 @@ segspt_softunlock(struct seg *seg, caddr_t sptseg_addr, struct anon *ap = NULL; pgcnt_t npages; - ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); sptseg = shmd->shm_sptseg; sptd = sptseg->s_data; @@ -1609,9 +1609,9 @@ segspt_softunlock(struct seg *seg, caddr_t sptseg_addr, * underlying dummy as. This is mostly to satisfy the * underlying HAT layer. */ - AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER); + AS_LOCK_ENTER(sptseg->s_as, RW_READER); hat_unlock(sptseg->s_as->a_hat, sptseg_addr, len); - AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock); + AS_LOCK_EXIT(sptseg->s_as); amp = sptd->spt_amp; ASSERT(amp != NULL); @@ -1676,7 +1676,7 @@ segspt_shmattach(struct seg *seg, caddr_t *argsp) struct spt_data *sptd; int error = 0; - ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as)); shmd = kmem_zalloc((sizeof (*shmd)), KM_NOSLEEP); if (shmd == NULL) @@ -1735,7 +1735,7 @@ segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize) struct shm_data *shmd = (struct shm_data *)seg->s_data; int reclaim = 1; - ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as)); retry: if (shmd->shm_softlockcnt > 0) { if (reclaim == 1) { @@ -1769,7 +1769,7 @@ segspt_shmfree(struct seg *seg) struct shm_data *shmd = (struct shm_data *)seg->s_data; struct anon_map *shm_amp = shmd->shm_amp; - ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as)); (void) segspt_shmlockop(seg, seg->s_base, shm_amp->size, 0, MC_UNLOCK, NULL, 0); @@ -1802,7 +1802,7 @@ segspt_shmfree(struct seg *seg) int segspt_shmsetprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot) { - ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); /* * Shared page table is more than shared mapping. @@ -1840,7 +1840,7 @@ segspt_dismfault(struct hat *hat, struct seg *seg, caddr_t addr, #ifdef lint hat = hat; #endif - ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); /* * Because of the way spt is implemented @@ -1909,7 +1909,7 @@ segspt_dismfault(struct hat *hat, struct seg *seg, caddr_t addr, } goto dism_err; } - AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER); + AS_LOCK_ENTER(sptseg->s_as, RW_READER); a = segspt_addr; pidx = 0; if (type == F_SOFTLOCK) { @@ -1970,7 +1970,7 @@ segspt_dismfault(struct hat *hat, struct seg *seg, caddr_t addr, } } } - AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock); + AS_LOCK_EXIT(sptseg->s_as); dism_err: kmem_free(ppa, npages * sizeof (page_t *)); return (err); @@ -2037,7 +2037,7 @@ segspt_shmfault(struct hat *hat, struct seg *seg, caddr_t addr, hat = hat; #endif - ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); if (sptd->spt_flags & SHM_PAGEABLE) { return (segspt_dismfault(hat, seg, addr, len, type, rw)); @@ -2169,7 +2169,7 @@ segspt_shmfault(struct hat *hat, struct seg *seg, caddr_t addr, * underlying dummy as. This is mostly to satisfy the * underlying HAT layer. */ - AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER); + AS_LOCK_ENTER(sptseg->s_as, RW_READER); a = sptseg_addr; pidx = 0; if (type == F_SOFTLOCK) { @@ -2214,7 +2214,7 @@ segspt_shmfault(struct hat *hat, struct seg *seg, caddr_t addr, for (i = 0; i < npages; i++) page_unlock(ppa[i]); } - AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock); + AS_LOCK_EXIT(sptseg->s_as); kmem_free(ppa, sizeof (page_t *) * npages); return (0); @@ -2284,7 +2284,7 @@ segspt_shmdup(struct seg *seg, struct seg *newseg) struct spt_data *sptd = spt_seg->s_data; int error = 0; - ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as)); shmd_new = kmem_zalloc((sizeof (*shmd_new)), KM_SLEEP); newseg->s_data = (void *)shmd_new; @@ -2326,7 +2326,7 @@ segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot) struct shm_data *shmd = (struct shm_data *)seg->s_data; struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; - ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); /* * ISM segment is always rw. @@ -2677,7 +2677,7 @@ segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len, struct proc *p = curproc; kproject_t *proj; - ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); ASSERT(sp != NULL); if ((sptd->spt_flags & SHM_PAGEABLE) == 0) { @@ -2807,7 +2807,7 @@ segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv) struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; spgcnt_t pgno = seg_page(seg, addr+len) - seg_page(seg, addr) + 1; - ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); /* * ISM segment is always rw. @@ -2821,7 +2821,7 @@ segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv) u_offset_t segspt_shmgetoffset(struct seg *seg, caddr_t addr) { - ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); /* Offset does not matter in ISM memory */ @@ -2835,7 +2835,7 @@ segspt_shmgettype(struct seg *seg, caddr_t addr) struct shm_data *shmd = (struct shm_data *)seg->s_data; struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; - ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); /* * The shared memory mapping is always MAP_SHARED, SWAP is only @@ -2852,7 +2852,7 @@ segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp) struct shm_data *shmd = (struct shm_data *)seg->s_data; struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data; - ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); *vpp = sptd->spt_vp; return (0); @@ -2878,7 +2878,7 @@ segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len, uint_t behav) int writer; page_t **ppa; - ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); if (behav == MADV_FREE) { if ((sptd->spt_flags & SHM_PAGEABLE) == 0) @@ -2912,9 +2912,9 @@ segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len, uint_t behav) * kicked out of the seg_pcache. We bump the shm_softlockcnt * to keep this segment resident. */ - writer = AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock); + writer = AS_WRITE_HELD(seg->s_as); atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt))); - AS_LOCK_EXIT(seg->s_as, &seg->s_as->a_lock); + AS_LOCK_EXIT(seg->s_as); mutex_enter(&sptd->spt_lock); @@ -2935,8 +2935,7 @@ segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len, uint_t behav) mutex_exit(&sptd->spt_lock); /* Regrab the AS_LOCK and release our hold on the segment */ - AS_LOCK_ENTER(seg->s_as, &seg->s_as->a_lock, - writer ? RW_WRITER : RW_READER); + AS_LOCK_ENTER(seg->s_as, writer ? RW_WRITER : RW_READER); atomic_dec_ulong((ulong_t *)(&(shmd->shm_softlockcnt))); if (shmd->shm_softlockcnt <= 0) { if (AS_ISUNMAPWAIT(seg->s_as)) { diff --git a/usr/src/uts/common/vm/seg_vn.c b/usr/src/uts/common/vm/seg_vn.c index 7e514ba1c9..7ec20713d9 100644 --- a/usr/src/uts/common/vm/seg_vn.c +++ b/usr/src/uts/common/vm/seg_vn.c @@ -551,7 +551,7 @@ segvn_create(struct seg *seg, void *argsp) int use_rgn = 0; int trok = 0; - ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as)); if (a->type != MAP_PRIVATE && a->type != MAP_SHARED) { panic("segvn_create type"); @@ -1022,7 +1022,7 @@ segvn_concat(struct seg *seg1, struct seg *seg2, int amp_cat) pgcnt_t npages1, npages2; ASSERT(seg1->s_as && seg2->s_as && seg1->s_as == seg2->s_as); - ASSERT(AS_WRITE_HELD(seg1->s_as, &seg1->s_as->a_lock)); + ASSERT(AS_WRITE_HELD(seg1->s_as)); ASSERT(seg1->s_ops == seg2->s_ops); if (HAT_IS_REGION_COOKIE_VALID(svd1->rcookie) || @@ -1267,7 +1267,7 @@ segvn_extend_prev(seg1, seg2, a, swresv) * We don't need any segment level locks for "segvn" data * since the address space is "write" locked. */ - ASSERT(seg1->s_as && AS_WRITE_HELD(seg1->s_as, &seg1->s_as->a_lock)); + ASSERT(seg1->s_as && AS_WRITE_HELD(seg1->s_as)); if (HAT_IS_REGION_COOKIE_VALID(svd1->rcookie)) { return (-1); @@ -1388,7 +1388,7 @@ segvn_extend_next( * We don't need any segment level locks for "segvn" data * since the address space is "write" locked. */ - ASSERT(seg2->s_as && AS_WRITE_HELD(seg2->s_as, &seg2->s_as->a_lock)); + ASSERT(seg2->s_as && AS_WRITE_HELD(seg2->s_as)); if (HAT_IS_REGION_COOKIE_VALID(svd2->rcookie)) { return (-1); @@ -1575,7 +1575,7 @@ segvn_dup(struct seg *seg, struct seg *newseg) size_t len; struct anon_map *amp; - ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as)); ASSERT(newseg->s_as->a_proc->p_parent == curproc); /* @@ -1872,7 +1872,7 @@ segvn_unmap(struct seg *seg, caddr_t addr, size_t len) * We don't need any segment level locks for "segvn" data * since the address space is "write" locked. */ - ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as)); /* * Fail the unmap if pages are SOFTLOCKed through this mapping. @@ -2422,7 +2422,7 @@ segvn_free(struct seg *seg) * We don't need any segment level locks for "segvn" data * since the address space is "write" locked. */ - ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as)); ASSERT(svd->tr_state == SEGVN_TR_OFF); ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); @@ -2572,7 +2572,7 @@ segvn_softunlock(struct seg *seg, caddr_t addr, size_t len, enum seg_rw rw) struct anon_map *amp; struct anon *ap = NULL; - ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock)); if ((amp = svd->amp) != NULL) @@ -4961,7 +4961,7 @@ segvn_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t len, anon_sync_obj_t cookie; int brkcow = BREAK_COW_SHARE(rw, type, svd->type); - ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); ASSERT(svd->amp == NULL || svd->rcookie == HAT_INVALID_REGION_COOKIE); /* @@ -4970,7 +4970,7 @@ segvn_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t len, if (type == F_SOFTUNLOCK) { if (rw == S_READ_NOCOW) { rw = S_READ; - ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(AS_WRITE_HELD(seg->s_as)); } SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); pgsz = (seg->s_szc == 0) ? PAGESIZE : @@ -5115,7 +5115,7 @@ top: } } - ASSERT(demote || AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(demote || AS_WRITE_HELD(seg->s_as)); if (demote) { SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); @@ -5169,7 +5169,7 @@ top: */ if (rw == S_READ_NOCOW) { ASSERT(type == F_SOFTLOCK); - ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(AS_WRITE_HELD(seg->s_as)); rw = S_READ; } @@ -5640,7 +5640,7 @@ segvn_faulta(struct seg *seg, caddr_t addr) struct anon_map *amp; vnode_t *vp; - ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); if ((amp = svd->amp) != NULL) { @@ -5697,7 +5697,7 @@ segvn_setprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot) anon_sync_obj_t cookie; int unload_done = 0; - ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); if ((svd->maxprot & prot) != prot) return (EACCES); /* violated maxprot */ @@ -5774,7 +5774,7 @@ segvn_setprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot) * we need to return IE_RETRY and let the as * layer drop and re-acquire the lock as a writer. */ - if (AS_READ_HELD(seg->s_as, &seg->s_as->a_lock)) + if (AS_READ_HELD(seg->s_as)) return (IE_RETRY); VM_STAT_ADD(segvnvmstats.demoterange[1]); if (svd->type == MAP_PRIVATE || svd->vp != NULL) { @@ -6100,7 +6100,7 @@ segvn_setpagesize(struct seg *seg, caddr_t addr, size_t len, uint_t szc) int err; u_offset_t off = svd->offset + (uintptr_t)(addr - seg->s_base); - ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as)); ASSERT(addr >= seg->s_base && eaddr <= seg->s_base + seg->s_size); if (seg->s_szc == szc || segvn_lpg_disable != 0) { @@ -6388,7 +6388,7 @@ segvn_clrszc(struct seg *seg) uint_t prot = svd->prot, vpprot; int pageflag = 0; - ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock) || + ASSERT(AS_WRITE_HELD(seg->s_as) || SEGVN_WRITE_HELD(seg->s_as, &svd->lock)); ASSERT(svd->softlockcnt == 0); @@ -6600,7 +6600,7 @@ segvn_split_seg(struct seg *seg, caddr_t addr) size_t nsize; struct segvn_data *nsvd; - ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(AS_WRITE_HELD(seg->s_as)); ASSERT(svd->tr_state == SEGVN_TR_OFF); ASSERT(addr >= seg->s_base); @@ -6755,7 +6755,7 @@ segvn_demote_range( uint_t szc = seg->s_szc; uint_t tszcvec; - ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(AS_WRITE_HELD(seg->s_as)); ASSERT(svd->tr_state == SEGVN_TR_OFF); ASSERT(szc != 0); pgsz = page_get_pagesize(szc); @@ -6884,7 +6884,7 @@ segvn_checkprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot) struct segvn_data *svd = (struct segvn_data *)seg->s_data; struct vpage *vp, *evp; - ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); /* @@ -6918,7 +6918,7 @@ segvn_getprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv) struct segvn_data *svd = (struct segvn_data *)seg->s_data; size_t pgno = seg_page(seg, addr + len) - seg_page(seg, addr) + 1; - ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); if (pgno != 0) { SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); @@ -6944,7 +6944,7 @@ segvn_getoffset(struct seg *seg, caddr_t addr) { struct segvn_data *svd = (struct segvn_data *)seg->s_data; - ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); return (svd->offset + (uintptr_t)(addr - seg->s_base)); } @@ -6955,7 +6955,7 @@ segvn_gettype(struct seg *seg, caddr_t addr) { struct segvn_data *svd = (struct segvn_data *)seg->s_data; - ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); return (svd->type | (svd->flags & (MAP_NORESERVE | MAP_TEXT | MAP_INITDATA))); @@ -6967,7 +6967,7 @@ segvn_getvp(struct seg *seg, caddr_t addr, struct vnode **vpp) { struct segvn_data *svd = (struct segvn_data *)seg->s_data; - ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); *vpp = svd->vp; return (0); @@ -6994,8 +6994,8 @@ segvn_kluster(struct seg *seg, caddr_t addr, ssize_t delta) u_offset_t off1, off2; struct anon_map *amp; - ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); - ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock) || + ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); + ASSERT(AS_WRITE_HELD(seg->s_as) || SEGVN_LOCK_HELD(seg->s_as, &svd->lock)); if (addr + delta < seg->s_base || @@ -7100,7 +7100,7 @@ segvn_swapout(struct seg *seg) pgcnt_t page; ulong_t anon_index; - ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); /* @@ -7279,7 +7279,7 @@ segvn_sync(struct seg *seg, caddr_t addr, size_t len, int attr, uint_t flags) struct anon *ap; anon_sync_obj_t cookie; - ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); @@ -7501,7 +7501,7 @@ segvn_incore(struct seg *seg, caddr_t addr, size_t len, char *vec) uint_t attr; anon_sync_obj_t cookie; - ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); if (svd->amp == NULL && svd->vp == NULL) { @@ -7677,7 +7677,7 @@ segvn_lockop(struct seg *seg, caddr_t addr, size_t len, * Hold write lock on address space because may split or concatenate * segments */ - ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); /* * If this is a shm, use shm's project and zone, else use @@ -8093,7 +8093,7 @@ segvn_advise(struct seg *seg, caddr_t addr, size_t len, uint_t behav) struct seg *prev; struct vnode *vp; - ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); /* * In case of MADV_FREE, we won't be modifying any segment private @@ -8221,8 +8221,7 @@ segvn_advise(struct seg *seg, caddr_t addr, size_t len, uint_t behav) * address space because the segment may be * split or concatenated when changing policy */ - if (AS_READ_HELD(seg->s_as, - &seg->s_as->a_lock)) { + if (AS_READ_HELD(seg->s_as)) { SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); return (IE_RETRY); } @@ -8372,7 +8371,7 @@ segvn_advise(struct seg *seg, caddr_t addr, size_t len, uint_t behav) * split or concatenated when changing policy */ if (svd->type == MAP_PRIVATE && - AS_READ_HELD(seg->s_as, &seg->s_as->a_lock)) { + AS_READ_HELD(seg->s_as)) { SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); return (IE_RETRY); } @@ -8558,7 +8557,7 @@ segvn_inherit(struct seg *seg, caddr_t addr, size_t len, uint_t behav) size_t page; int ret = 0; - ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); /* Can't support something we don't know about */ if (behav != SEGP_INH_ZERO) @@ -8827,7 +8826,7 @@ segvn_pagelock(struct seg *seg, caddr_t addr, size_t len, struct page ***ppp, TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_START, "segvn_pagelock: start seg %p addr %p", seg, addr); - ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK); SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); @@ -9449,7 +9448,7 @@ segvn_reclaim(void *ptag, caddr_t addr, size_t len, struct page **pplist, ASSERT(svd->vp == NULL && svd->amp != NULL); ASSERT(svd->softlockcnt >= npages); - ASSERT(async || AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(async || AS_LOCK_HELD(seg->s_as)); pl = pplist; @@ -9727,7 +9726,7 @@ segvn_textrepl(struct seg *seg) int first; struct anon_map *amp; - ASSERT(AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(AS_LOCK_HELD(seg->s_as)); ASSERT(SEGVN_WRITE_HELD(seg->s_as, &svd->lock)); ASSERT(p != NULL); ASSERT(svd->tr_state == SEGVN_TR_INIT); @@ -10008,8 +10007,8 @@ segvn_textunrepl(struct seg *seg, int unload_unmap) lgrp_id_t lgrp_id = svd->tr_policy_info.mem_lgrpid; lgrp_id_t i; - ASSERT(AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); - ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock) || + ASSERT(AS_LOCK_HELD(seg->s_as)); + ASSERT(AS_WRITE_HELD(seg->s_as) || SEGVN_WRITE_HELD(seg->s_as, &svd->lock)); ASSERT(svd->tr_state == SEGVN_TR_ON); ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie)); @@ -10234,7 +10233,7 @@ segvn_trupdate_seg(struct seg *seg, * Use tryenter locking since we are locking as/seg and svntr hash * lock in reverse from syncrounous thread order. */ - if (!AS_LOCK_TRYENTER(as, &as->a_lock, RW_READER)) { + if (!AS_LOCK_TRYENTER(as, RW_READER)) { SEGVN_TR_ADDSTAT(nolock); if (segvn_lgrp_trthr_migrs_snpsht) { segvn_lgrp_trthr_migrs_snpsht = 0; @@ -10242,7 +10241,7 @@ segvn_trupdate_seg(struct seg *seg, return; } if (!SEGVN_LOCK_TRYENTER(seg->s_as, &svd->lock, RW_WRITER)) { - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); SEGVN_TR_ADDSTAT(nolock); if (segvn_lgrp_trthr_migrs_snpsht) { segvn_lgrp_trthr_migrs_snpsht = 0; @@ -10254,14 +10253,14 @@ segvn_trupdate_seg(struct seg *seg, size_t trmem = atomic_add_long_nv(&segvn_textrepl_bytes, size); if (trmem > segvn_textrepl_max_bytes) { SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); atomic_add_long(&segvn_textrepl_bytes, -size); SEGVN_TR_ADDSTAT(normem); return; } if (anon_try_resv_zone(size, NULL) == 0) { SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); atomic_add_long(&segvn_textrepl_bytes, -size); SEGVN_TR_ADDSTAT(noanon); return; @@ -10269,7 +10268,7 @@ segvn_trupdate_seg(struct seg *seg, amp = anonmap_alloc(size, size, KM_NOSLEEP); if (amp == NULL) { SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); atomic_add_long(&segvn_textrepl_bytes, -size); anon_unresv_zone(size, NULL); SEGVN_TR_ADDSTAT(nokmem); @@ -10301,7 +10300,7 @@ segvn_trupdate_seg(struct seg *seg, svd->amp = svntrp->tr_amp[lgrp_id]; p->p_tr_lgrpid = NLGRPS_MAX; SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); ASSERT(svntrp->tr_refcnt != 0); ASSERT(svd->vp == svntrp->tr_vp); diff --git a/usr/src/uts/common/vm/vm_as.c b/usr/src/uts/common/vm/vm_as.c index e0432f5b42..d20bdbf804 100644 --- a/usr/src/uts/common/vm/vm_as.c +++ b/usr/src/uts/common/vm/vm_as.c @@ -360,7 +360,7 @@ as_findseg(struct as *as, caddr_t addr, int tail) struct seg *seg = as->a_seglast; avl_index_t where; - ASSERT(AS_LOCK_HELD(as, &as->a_lock)); + ASSERT(AS_LOCK_HELD(as)); if (seg != NULL && seg->s_base <= addr && @@ -422,7 +422,7 @@ as_addseg(struct as *as, struct seg *newseg) caddr_t eaddr; avl_index_t where; - ASSERT(AS_WRITE_HELD(as, &as->a_lock)); + ASSERT(AS_WRITE_HELD(as)); as->a_updatedir = 1; /* inform /proc */ gethrestime(&as->a_updatetime); @@ -504,7 +504,7 @@ as_removeseg(struct as *as, struct seg *seg) { avl_tree_t *t; - ASSERT(AS_WRITE_HELD(as, &as->a_lock)); + ASSERT(AS_WRITE_HELD(as)); as->a_updatedir = 1; /* inform /proc */ gethrestime(&as->a_updatetime); @@ -544,7 +544,7 @@ as_segat(struct as *as, caddr_t addr) { struct seg *seg = as->a_seglast; - ASSERT(AS_LOCK_HELD(as, &as->a_lock)); + ASSERT(AS_LOCK_HELD(as)); if (seg != NULL && seg->s_base <= addr && addr < seg->s_base + seg->s_size) @@ -667,9 +667,9 @@ as_alloc(void) as->a_lastgaphl = NULL; as->a_callbacks = NULL; - AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER); + AS_LOCK_ENTER(as, RW_WRITER); as->a_hat = hat_alloc(as); /* create hat for default system mmu */ - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); as->a_xhat = NULL; @@ -703,7 +703,7 @@ top: if (!called) AS_SETBUSY(as); mutex_exit(&as->a_contents); - AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER); + AS_LOCK_ENTER(as, RW_WRITER); if (!called) { called = 1; @@ -720,7 +720,7 @@ retry: if (err == EAGAIN) { mutex_enter(&as->a_contents); if (as->a_callbacks) { - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); } else if (!AS_ISNOUNMAPWAIT(as)) { /* * Memory is currently locked. Wait for a @@ -730,7 +730,7 @@ retry: if (AS_ISUNMAPWAIT(as) == 0) cv_broadcast(&as->a_cv); AS_SETUNMAPWAIT(as); - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); while (AS_ISUNMAPWAIT(as)) cv_wait(&as->a_cv, &as->a_contents); } else { @@ -761,7 +761,7 @@ retry: hat_free_end(hat); if (as->a_xhat != NULL) xhat_free_end_all(as); - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); /* /proc stuff */ ASSERT(avl_numnodes(&as->a_wpage) == 0); @@ -786,13 +786,13 @@ as_dup(struct as *as, struct proc *forkedproc) size_t purgesize = 0; int error; - AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER); + AS_LOCK_ENTER(as, RW_WRITER); as_clearwatch(as); newas = as_alloc(); newas->a_userlimit = as->a_userlimit; newas->a_proc = forkedproc; - AS_LOCK_ENTER(newas, &newas->a_lock, RW_WRITER); + AS_LOCK_ENTER(newas, RW_WRITER); /* This will prevent new XHATs from attaching */ mutex_enter(&as->a_contents); @@ -813,12 +813,12 @@ as_dup(struct as *as, struct proc *forkedproc) newseg = seg_alloc(newas, seg->s_base, seg->s_size); if (newseg == NULL) { - AS_LOCK_EXIT(newas, &newas->a_lock); + AS_LOCK_EXIT(newas); as_setwatch(as); mutex_enter(&as->a_contents); AS_CLRBUSY(as); mutex_exit(&as->a_contents); - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); as_free(newas); return (-1); } @@ -832,9 +832,9 @@ as_dup(struct as *as, struct proc *forkedproc) mutex_enter(&as->a_contents); AS_CLRBUSY(as); mutex_exit(&as->a_contents); - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); seg_free(newseg); - AS_LOCK_EXIT(newas, &newas->a_lock); + AS_LOCK_EXIT(newas); as_free(newas); return (error); } @@ -849,13 +849,13 @@ as_dup(struct as *as, struct proc *forkedproc) mutex_enter(&newas->a_contents); AS_CLRBUSY(newas); mutex_exit(&newas->a_contents); - AS_LOCK_EXIT(newas, &newas->a_lock); + AS_LOCK_EXIT(newas); as_setwatch(as); mutex_enter(&as->a_contents); AS_CLRBUSY(as); mutex_exit(&as->a_contents); - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); if (error != 0) { as_free(newas); return (error); @@ -959,7 +959,7 @@ retry: seg = segkmap; as_lock_held = 0; } else { - AS_LOCK_ENTER(as, &as->a_lock, RW_READER); + AS_LOCK_ENTER(as, RW_READER); if (is_xhat && avl_numnodes(&as->a_wpage) != 0) { /* * Grab and hold the writers' lock on the as @@ -973,13 +973,13 @@ retry: * is within a watched page and only then grab * the writer lock, but this is simpler. */ - AS_LOCK_EXIT(as, &as->a_lock); - AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER); + AS_LOCK_EXIT(as); + AS_LOCK_ENTER(as, RW_WRITER); } seg = as_segat(as, raddr); if (seg == NULL) { - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); if ((lwp != NULL) && (!is_xhat)) lwp->lwp_nostop--; return (FC_NOMAP); @@ -1060,7 +1060,7 @@ retry: } } if (as_lock_held) - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); if ((lwp != NULL) && (!is_xhat)) lwp->lwp_nostop--; @@ -1108,10 +1108,10 @@ retry: rsize = (((size_t)(addr + size) + PAGEOFFSET) & PAGEMASK) - (size_t)raddr; - AS_LOCK_ENTER(as, &as->a_lock, RW_READER); + AS_LOCK_ENTER(as, RW_READER); seg = as_segat(as, raddr); if (seg == NULL) { - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); if (lwp != NULL) lwp->lwp_nostop--; return (FC_NOMAP); @@ -1129,7 +1129,7 @@ retry: if (res != 0) break; } - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); if (lwp != NULL) lwp->lwp_nostop--; /* @@ -1189,16 +1189,16 @@ setprot_top: * want to only lock as a writer when necessary. */ if (writer || avl_numnodes(&as->a_wpage) != 0) { - AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER); + AS_LOCK_ENTER(as, RW_WRITER); } else { - AS_LOCK_ENTER(as, &as->a_lock, RW_READER); + AS_LOCK_ENTER(as, RW_READER); } as_clearwatchprot(as, raddr, rsize); seg = as_segat(as, raddr); if (seg == NULL) { as_setwatch(as); - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); return (ENOMEM); } @@ -1223,7 +1223,7 @@ retry: } if (error == IE_RETRY) { - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); writer = 1; goto setprot_top; } @@ -1233,7 +1233,7 @@ retry: * Make sure we have a_lock as writer. */ if (writer == 0) { - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); writer = 1; goto setprot_top; } @@ -1274,13 +1274,13 @@ retry: if (as->a_callbacks && (cb = as_find_callback(as, AS_SETPROT_EVENT, seg->s_base, seg->s_size))) { - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); as_execute_callback(as, cb, AS_SETPROT_EVENT); } else if (!AS_ISNOUNMAPWAIT(as)) { if (AS_ISUNMAPWAIT(as) == 0) cv_broadcast(&as->a_cv); AS_SETUNMAPWAIT(as); - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); while (AS_ISUNMAPWAIT(as)) cv_wait(&as->a_cv, &as->a_contents); } else { @@ -1308,7 +1308,7 @@ retry: } else { as_setwatchprot(as, saveraddr, saversize, prot); } - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); return (error); } @@ -1342,14 +1342,14 @@ as_checkprot(struct as *as, caddr_t addr, size_t size, uint_t prot) * the benefit of as_clearwatchprot() and as_setwatchprot(). */ if (avl_numnodes(&as->a_wpage) != 0) - AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER); + AS_LOCK_ENTER(as, RW_WRITER); else - AS_LOCK_ENTER(as, &as->a_lock, RW_READER); + AS_LOCK_ENTER(as, RW_READER); as_clearwatchprot(as, raddr, rsize); seg = as_segat(as, raddr); if (seg == NULL) { as_setwatch(as); - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); return (ENOMEM); } @@ -1371,7 +1371,7 @@ as_checkprot(struct as *as, caddr_t addr, size_t size, uint_t prot) break; } as_setwatch(as); - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); return (error); } @@ -1389,7 +1389,7 @@ top: eaddr = (caddr_t)(((uintptr_t)(addr + size) + PAGEOFFSET) & (uintptr_t)PAGEMASK); - AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER); + AS_LOCK_ENTER(as, RW_WRITER); as->a_updatedir = 1; /* inform /proc */ gethrestime(&as->a_updatetime); @@ -1470,13 +1470,13 @@ retry: if (as->a_callbacks && (cb = as_find_callback(as, AS_UNMAP_EVENT, seg->s_base, seg->s_size))) { - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); as_execute_callback(as, cb, AS_UNMAP_EVENT); } else if (!AS_ISNOUNMAPWAIT(as)) { if (AS_ISUNMAPWAIT(as) == 0) cv_broadcast(&as->a_cv); AS_SETUNMAPWAIT(as); - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); while (AS_ISUNMAPWAIT(as)) cv_wait(&as->a_cv, &as->a_contents); } else { @@ -1497,11 +1497,11 @@ retry: mutex_exit(&as->a_contents); goto top; } else if (err == IE_RETRY) { - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); goto top; } else if (err) { as_setwatch(as); - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); return (-1); } @@ -1510,7 +1510,7 @@ retry: as->a_resvsize -= rsize; raddr += ssize; } - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); return (0); } @@ -1529,7 +1529,7 @@ as_map_segvn_segs(struct as *as, caddr_t addr, size_t size, uint_t szcvec, int do_off = (vn_a->vp != NULL || vn_a->amp != NULL); uint_t save_szcvec; - ASSERT(AS_WRITE_HELD(as, &as->a_lock)); + ASSERT(AS_WRITE_HELD(as)); ASSERT(IS_P2ALIGNED(addr, PAGESIZE)); ASSERT(IS_P2ALIGNED(size, PAGESIZE)); ASSERT(vn_a->vp == NULL || vn_a->amp == NULL); @@ -1643,7 +1643,7 @@ as_map_vnsegs(struct as *as, caddr_t addr, size_t size, size_t save_size = 0; extern size_t textrepl_size_thresh; - ASSERT(AS_WRITE_HELD(as, &as->a_lock)); + ASSERT(AS_WRITE_HELD(as)); ASSERT(IS_P2ALIGNED(addr, PAGESIZE)); ASSERT(IS_P2ALIGNED(size, PAGESIZE)); ASSERT(vn_a->vp != NULL); @@ -1732,7 +1732,7 @@ as_map_ansegs(struct as *as, caddr_t addr, size_t size, szcvec = map_pgszcvec(addr, size, vn_a->amp == NULL ? (uintptr_t)addr : (uintptr_t)P2ROUNDUP(vn_a->offset, PAGESIZE), (vn_a->flags & MAP_TEXT), type, 0); - ASSERT(AS_WRITE_HELD(as, &as->a_lock)); + ASSERT(AS_WRITE_HELD(as)); ASSERT(IS_P2ALIGNED(addr, PAGESIZE)); ASSERT(IS_P2ALIGNED(size, PAGESIZE)); ASSERT(vn_a->vp == NULL); @@ -1744,7 +1744,7 @@ as_map_ansegs(struct as *as, caddr_t addr, size_t size, int as_map(struct as *as, caddr_t addr, size_t size, int (*crfp)(), void *argsp) { - AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER); + AS_LOCK_ENTER(as, RW_WRITER); return (as_map_locked(as, addr, size, crfp, argsp)); } @@ -1768,7 +1768,7 @@ as_map_locked(struct as *as, caddr_t addr, size_t size, int (*crfp)(), * check for wrap around */ if ((raddr + rsize < raddr) || (as->a_size > (ULONG_MAX - size))) { - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); return (ENOMEM); } @@ -1776,7 +1776,7 @@ as_map_locked(struct as *as, caddr_t addr, size_t size, int (*crfp)(), gethrestime(&as->a_updatetime); if (as != &kas && as->a_size + rsize > (size_t)p->p_vmem_ctl) { - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); (void) rctl_action(rctlproc_legacy[RLIMIT_VMEM], p->p_rctls, p, RCA_UNSAFE_ALL); @@ -1788,7 +1788,7 @@ as_map_locked(struct as *as, caddr_t addr, size_t size, int (*crfp)(), crargs = *(struct segvn_crargs *)argsp; error = as_map_vnsegs(as, raddr, rsize, crfp, &crargs, &unmap); if (error != 0) { - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); if (unmap) { (void) as_unmap(as, addr, size); } @@ -1798,7 +1798,7 @@ as_map_locked(struct as *as, caddr_t addr, size_t size, int (*crfp)(), crargs = *(struct segvn_crargs *)argsp; error = as_map_ansegs(as, raddr, rsize, crfp, &crargs, &unmap); if (error != 0) { - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); if (unmap) { (void) as_unmap(as, addr, size); } @@ -1807,14 +1807,14 @@ as_map_locked(struct as *as, caddr_t addr, size_t size, int (*crfp)(), } else { seg = seg_alloc(as, addr, size); if (seg == NULL) { - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); return (ENOMEM); } error = (*crfp)(seg, argsp); if (error != 0) { seg_free(seg); - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); return (error); } /* @@ -1833,13 +1833,13 @@ as_map_locked(struct as *as, caddr_t addr, size_t size, int (*crfp)(), mutex_enter(&as->a_contents); if (AS_ISPGLCK(as)) { mutex_exit(&as->a_contents); - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); error = as_ctl(as, addr, size, MC_LOCK, 0, 0, NULL, 0); if (error != 0) (void) as_unmap(as, addr, size); } else { mutex_exit(&as->a_contents); - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); } return (error); } @@ -1864,7 +1864,7 @@ as_purge(struct as *as) if ((as->a_flags & AS_NEEDSPURGE) == 0) return; - AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER); + AS_LOCK_ENTER(as, RW_WRITER); next_seg = NULL; seg = AS_SEGFIRST(as); while (seg != NULL) { @@ -1873,7 +1873,7 @@ as_purge(struct as *as) SEGOP_UNMAP(seg, seg->s_base, seg->s_size); seg = next_seg; } - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); mutex_enter(&as->a_contents); as->a_flags &= ~AS_NEEDSPURGE; @@ -1936,14 +1936,14 @@ as_gap_aligned(struct as *as, size_t minlen, caddr_t *basep, size_t *lenp, minlen += 2 * redzone; redzone = 0; - AS_LOCK_ENTER(as, &as->a_lock, RW_READER); + AS_LOCK_ENTER(as, RW_READER); if (AS_SEGFIRST(as) == NULL) { if (valid_va_range_aligned(basep, lenp, minlen, flags & AH_DIR, align, redzone, off)) { - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); return (0); } else { - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); *basep = save_base; *lenp = save_len; return (-1); @@ -2024,7 +2024,7 @@ retry: as->a_lastgaphl = hseg; else as->a_lastgaphl = lseg; - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); return (0); } cont: @@ -2051,7 +2051,7 @@ retry: } *basep = save_base; *lenp = save_len; - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); return (-1); } @@ -2093,7 +2093,7 @@ as_memory(struct as *as, caddr_t *basep, size_t *lenp) caddr_t addr, eaddr; caddr_t segend; - AS_LOCK_ENTER(as, &as->a_lock, RW_READER); + AS_LOCK_ENTER(as, RW_READER); addr = *basep; eaddr = addr + *lenp; @@ -2104,7 +2104,7 @@ as_memory(struct as *as, caddr_t *basep, size_t *lenp) for (;;) { if (seg == NULL || addr >= eaddr || eaddr <= seg->s_base) { - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); return (EINVAL); } @@ -2136,7 +2136,7 @@ as_memory(struct as *as, caddr_t *basep, size_t *lenp) else *lenp = segend - addr; - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); return (0); } @@ -2164,7 +2164,7 @@ as_swapout(struct as *as) if (as == NULL) return (0); - AS_LOCK_ENTER(as, &as->a_lock, RW_READER); + AS_LOCK_ENTER(as, RW_READER); /* Prevent XHATs from attaching */ mutex_enter(&as->a_contents); @@ -2203,7 +2203,7 @@ as_swapout(struct as *as) if ((ov != NULL) && (ov->swapout != NULL)) swpcnt += SEGOP_SWAPOUT(seg); } - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); return (swpcnt); } @@ -2230,10 +2230,10 @@ as_incore(struct as *as, caddr_t addr, if (raddr + rsize < raddr) /* check for wraparound */ return (ENOMEM); - AS_LOCK_ENTER(as, &as->a_lock, RW_READER); + AS_LOCK_ENTER(as, RW_READER); seg = as_segat(as, raddr); if (seg == NULL) { - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); return (-1); } @@ -2256,7 +2256,7 @@ as_incore(struct as *as, caddr_t addr, } vec += btopr(ssize); } - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); return (error); } @@ -2326,9 +2326,9 @@ as_ctl(struct as *as, caddr_t addr, size_t size, int func, int attr, /* pages. */ retry: if (error == IE_RETRY) - AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER); + AS_LOCK_ENTER(as, RW_WRITER); else - AS_LOCK_ENTER(as, &as->a_lock, RW_READER); + AS_LOCK_ENTER(as, RW_READER); /* * If these are address space lock/unlock operations, loop over @@ -2346,13 +2346,13 @@ retry: mutex_exit(&as->a_contents); } if ((arg & MCL_CURRENT) == 0) { - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); return (0); } seg = AS_SEGFIRST(as); if (seg == NULL) { - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); return (0); } @@ -2366,7 +2366,7 @@ retry: mlock_size = BT_BITOUL(btopr(rlen)); if ((mlock_map = (ulong_t *)kmem_zalloc(mlock_size * sizeof (ulong_t), KM_NOSLEEP)) == NULL) { - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); return (EAGAIN); } @@ -2392,7 +2392,7 @@ retry: } kmem_free(mlock_map, mlock_size * sizeof (ulong_t)); - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); goto lockerr; } else if (func == MC_UNLOCKAS) { mutex_enter(&as->a_contents); @@ -2406,7 +2406,7 @@ retry: break; } - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); goto lockerr; } @@ -2418,7 +2418,7 @@ retry: (size_t)raddr; if (raddr + rsize < raddr) { /* check for wraparound */ - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); return (ENOMEM); } @@ -2426,7 +2426,7 @@ retry: * Get initial segment. */ if ((seg = as_segat(as, raddr)) == NULL) { - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); return (ENOMEM); } @@ -2434,7 +2434,7 @@ retry: mlock_size = BT_BITOUL(btopr(rsize)); if ((mlock_map = (ulong_t *)kmem_zalloc(mlock_size * sizeof (ulong_t), KM_NOSLEEP)) == NULL) { - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); return (EAGAIN); } } @@ -2459,7 +2459,7 @@ retry: kmem_free(mlock_map, mlock_size * sizeof (ulong_t)); } - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); return (ENOMEM); } } @@ -2480,7 +2480,7 @@ retry: case MC_SYNC: if (error = SEGOP_SYNC(seg, raddr, ssize, attr, (uint_t)arg)) { - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); return (error); } break; @@ -2495,7 +2495,7 @@ retry: initrsize - rsize + ssize); kmem_free(mlock_map, mlock_size * sizeof (ulong_t)); - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); goto lockerr; } break; @@ -2524,7 +2524,7 @@ retry: * have to drop readers lock and start * all over again */ - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); goto retry; } else if (error == IE_REATTACH) { /* @@ -2534,14 +2534,14 @@ retry: */ seg = as_segat(as, raddr); if (seg == NULL) { - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); return (ENOMEM); } } else { /* * Regular error */ - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); return (error); } } @@ -2555,7 +2555,7 @@ retry: SEGP_INH_ZERO); } if (error != 0) { - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); return (error); } break; @@ -2574,7 +2574,7 @@ retry: if (func == MC_LOCK) kmem_free(mlock_map, mlock_size * sizeof (ulong_t)); - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); return (0); lockerr: @@ -2639,7 +2639,7 @@ as_pagelock_segs(struct as *as, struct seg *seg, struct page ***ppp, pgcnt_t pl_off; extern struct seg_ops segspt_shmops; - ASSERT(AS_LOCK_HELD(as, &as->a_lock)); + ASSERT(AS_LOCK_HELD(as)); ASSERT(seg != NULL); ASSERT(addr >= seg->s_base && addr < seg->s_base + seg->s_size); ASSERT(addr + size > seg->s_base + seg->s_size); @@ -2656,7 +2656,7 @@ as_pagelock_segs(struct as *as, struct seg *seg, struct page ***ppp, seg = AS_SEGNEXT(as, seg); if (seg == NULL || addr != seg->s_base) { - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); return (EFAULT); } /* @@ -2668,11 +2668,11 @@ as_pagelock_segs(struct as *as, struct seg *seg, struct page ***ppp, if (SEGOP_GETVP(seg, addr, &vp) != 0 || vp != NULL) { - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); goto slow; } } else if (seg->s_ops != &segspt_shmops) { - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); goto slow; } segcnt++; @@ -2717,7 +2717,7 @@ as_pagelock_segs(struct as *as, struct seg *seg, struct page ***ppp, } if (size == 0) { - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); ASSERT(cnt == segcnt - 1); *ppp = plist; return (0); @@ -2751,7 +2751,7 @@ as_pagelock_segs(struct as *as, struct seg *seg, struct page ***ppp, L_PAGEUNLOCK, rw); } - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); kmem_free(plist, (npages + segcnt) * sizeof (page_t *)); @@ -2800,11 +2800,11 @@ as_pagelock(struct as *as, struct page ***ppp, caddr_t addr, * if the request crosses two segments let * as_fault handle it. */ - AS_LOCK_ENTER(as, &as->a_lock, RW_READER); + AS_LOCK_ENTER(as, RW_READER); seg = as_segat(as, raddr); if (seg == NULL) { - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); return (EFAULT); } ASSERT(raddr >= seg->s_base && raddr < seg->s_base + seg->s_size); @@ -2812,7 +2812,7 @@ as_pagelock(struct as *as, struct page ***ppp, caddr_t addr, return (as_pagelock_segs(as, seg, ppp, raddr, rsize, rw)); } if (raddr + rsize <= raddr) { - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); return (EFAULT); } @@ -2826,7 +2826,7 @@ as_pagelock(struct as *as, struct page ***ppp, caddr_t addr, TRACE_0(TR_FAC_PHYSIO, TR_PHYSIO_SEG_LOCK_END, "seg_lock_1_end"); - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); if (err == 0 || (err != ENOTSUP && err != EFAULT)) { return (err); @@ -2864,7 +2864,7 @@ as_pageunlock_segs(struct as *as, struct seg *seg, caddr_t addr, size_t size, size_t ssize; page_t **pl; - ASSERT(AS_LOCK_HELD(as, &as->a_lock)); + ASSERT(AS_LOCK_HELD(as)); ASSERT(seg != NULL); ASSERT(addr >= seg->s_base && addr < seg->s_base + seg->s_size); ASSERT(addr + size > seg->s_base + seg->s_size); @@ -2889,7 +2889,7 @@ as_pageunlock_segs(struct as *as, struct seg *seg, caddr_t addr, size_t size, L_PAGEUNLOCK, rw); } ASSERT(cnt > 0); - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); cnt++; kmem_free(plist, (npages + cnt) * sizeof (page_t *)); @@ -2922,7 +2922,7 @@ as_pageunlock(struct as *as, struct page **pp, caddr_t addr, size_t size, rsize = (((size_t)(addr + size) + PAGEOFFSET) & PAGEMASK) - (size_t)raddr; - AS_LOCK_ENTER(as, &as->a_lock, RW_READER); + AS_LOCK_ENTER(as, RW_READER); seg = as_segat(as, raddr); ASSERT(seg != NULL); @@ -2936,7 +2936,7 @@ as_pageunlock(struct as *as, struct page **pp, caddr_t addr, size_t size, as_pageunlock_segs(as, seg, raddr, rsize, pp, rw); return; } - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); TRACE_0(TR_FAC_PHYSIO, TR_PHYSIO_AS_UNLOCK_END, "as_pageunlock_end"); } @@ -2962,12 +2962,12 @@ setpgsz_top: if (raddr + rsize < raddr) /* check for wraparound */ return (ENOMEM); - AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER); + AS_LOCK_ENTER(as, RW_WRITER); as_clearwatchprot(as, raddr, rsize); seg = as_segat(as, raddr); if (seg == NULL) { as_setwatch(as); - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); return (ENOMEM); } @@ -2994,7 +2994,7 @@ retry: } if (error == IE_RETRY) { - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); goto setpgsz_top; } @@ -3034,7 +3034,7 @@ retry: cv_broadcast(&as->a_cv); } AS_SETUNMAPWAIT(as); - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); while (AS_ISUNMAPWAIT(as)) { cv_wait(&as->a_cv, &as->a_contents); } @@ -3060,7 +3060,7 @@ retry: } } as_setwatch(as); - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); return (error); } @@ -3076,7 +3076,7 @@ as_iset3_default_lpsize(struct as *as, caddr_t raddr, size_t rsize, uint_t szc, size_t ssize; int error; - ASSERT(AS_WRITE_HELD(as, &as->a_lock)); + ASSERT(AS_WRITE_HELD(as)); seg = as_segat(as, raddr); if (seg == NULL) { @@ -3133,7 +3133,7 @@ as_iset2_default_lpsize(struct as *as, caddr_t addr, size_t size, uint_t szc, int error; int retry; - ASSERT(AS_WRITE_HELD(as, &as->a_lock)); + ASSERT(AS_WRITE_HELD(as)); for (;;) { error = as_iset3_default_lpsize(as, addr, size, szc, &retry); @@ -3165,7 +3165,7 @@ as_iset1_default_lpsize(struct as *as, caddr_t raddr, size_t rsize, uint_t szc, int set; int error; - ASSERT(AS_WRITE_HELD(as, &as->a_lock)); + ASSERT(AS_WRITE_HELD(as)); seg = as_segat(as, raddr); if (seg == NULL) { @@ -3233,7 +3233,7 @@ as_iset_default_lpsize(struct as *as, caddr_t addr, size_t size, int flags, size_t pgsz; uint_t save_szcvec; - ASSERT(AS_WRITE_HELD(as, &as->a_lock)); + ASSERT(AS_WRITE_HELD(as)); ASSERT(IS_P2ALIGNED(addr, PAGESIZE)); ASSERT(IS_P2ALIGNED(size, PAGESIZE)); @@ -3325,7 +3325,7 @@ as_set_default_lpsize(struct as *as, caddr_t addr, size_t size) if (size == 0) return (0); - AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER); + AS_LOCK_ENTER(as, RW_WRITER); again: error = 0; @@ -3334,14 +3334,14 @@ again: (size_t)raddr; if (raddr + rsize < raddr) { /* check for wraparound */ - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); return (ENOMEM); } as_clearwatchprot(as, raddr, rsize); seg = as_segat(as, raddr); if (seg == NULL) { as_setwatch(as); - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); return (ENOMEM); } if (seg->s_ops == &segvn_ops) { @@ -3426,12 +3426,12 @@ again: cv_broadcast(&as->a_cv); } AS_SETUNMAPWAIT(as); - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); while (AS_ISUNMAPWAIT(as)) { cv_wait(&as->a_cv, &as->a_contents); } mutex_exit(&as->a_contents); - AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER); + AS_LOCK_ENTER(as, RW_WRITER); } else { /* * We may have raced with @@ -3449,7 +3449,7 @@ again: } as_setwatch(as); - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); return (error); } @@ -3468,7 +3468,7 @@ as_setwatch(struct as *as) if (avl_numnodes(&as->a_wpage) == 0) return; - ASSERT(AS_WRITE_HELD(as, &as->a_lock)); + ASSERT(AS_WRITE_HELD(as)); for (pwp = avl_first(&as->a_wpage); pwp != NULL; pwp = AVL_NEXT(&as->a_wpage, pwp)) { @@ -3515,7 +3515,7 @@ as_clearwatch(struct as *as) if (avl_numnodes(&as->a_wpage) == 0) return; - ASSERT(AS_WRITE_HELD(as, &as->a_lock)); + ASSERT(AS_WRITE_HELD(as)); for (pwp = avl_first(&as->a_wpage); pwp != NULL; pwp = AVL_NEXT(&as->a_wpage, pwp)) { @@ -3557,7 +3557,7 @@ as_setwatchprot(struct as *as, caddr_t addr, size_t size, uint_t prot) if (avl_numnodes(&as->a_wpage) == 0) return; - ASSERT(AS_WRITE_HELD(as, &as->a_lock)); + ASSERT(AS_WRITE_HELD(as)); tpw.wp_vaddr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK); if ((pwp = avl_find(&as->a_wpage, &tpw, &where)) == NULL) @@ -3616,7 +3616,7 @@ as_clearwatchprot(struct as *as, caddr_t addr, size_t size) if ((pwp = avl_find(&as->a_wpage, &tpw, &where)) == NULL) pwp = avl_nearest(&as->a_wpage, where, AVL_AFTER); - ASSERT(AS_WRITE_HELD(as, &as->a_lock)); + ASSERT(AS_WRITE_HELD(as)); while (pwp != NULL && pwp->wp_vaddr < eaddr) { @@ -3671,22 +3671,22 @@ as_getmemid(struct as *as, caddr_t addr, memid_t *memidp) struct seg *seg; int sts; - AS_LOCK_ENTER(as, &as->a_lock, RW_READER); + AS_LOCK_ENTER(as, RW_READER); seg = as_segat(as, addr); if (seg == NULL) { - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); return (EFAULT); } /* * catch old drivers which may not support getmemid */ if (seg->s_ops->getmemid == NULL) { - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); return (ENODEV); } sts = SEGOP_GETMEMID(seg, addr, memidp); - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); return (sts); } diff --git a/usr/src/uts/common/vm/vm_page.c b/usr/src/uts/common/vm/vm_page.c index b5d78502f4..73833c669b 100644 --- a/usr/src/uts/common/vm/vm_page.c +++ b/usr/src/uts/common/vm/vm_page.c @@ -5381,7 +5381,7 @@ page_mark_migrate(struct seg *seg, caddr_t addr, size_t len, ulong_t an_idx; anon_sync_obj_t cookie; - ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); /* * Don't do anything if don't need to do lgroup optimizations @@ -5559,7 +5559,7 @@ page_migrate( spgcnt_t i; uint_t pszc; - ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); while (npages > 0) { pp = *ppa; diff --git a/usr/src/uts/common/vm/vm_seg.c b/usr/src/uts/common/vm/vm_seg.c index e54401ddeb..14076ddff8 100644 --- a/usr/src/uts/common/vm/vm_seg.c +++ b/usr/src/uts/common/vm/vm_seg.c @@ -1649,7 +1649,7 @@ seg_unmap(struct seg *seg) int ret; #endif /* DEBUG */ - ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as)); /* Shouldn't have called seg_unmap if mapping isn't yet established */ ASSERT(seg->s_data != NULL); diff --git a/usr/src/uts/common/vm/vm_usage.c b/usr/src/uts/common/vm/vm_usage.c index d422f8d0e8..57166b4e63 100644 --- a/usr/src/uts/common/vm/vm_usage.c +++ b/usr/src/uts/common/vm/vm_usage.c @@ -1533,12 +1533,12 @@ vmu_calculate_proc(proc_t *p) ASSERT(entities != NULL); /* process all segs in process's address space */ as = p->p_as; - AS_LOCK_ENTER(as, &as->a_lock, RW_READER); + AS_LOCK_ENTER(as, RW_READER); for (seg = AS_SEGFIRST(as); seg != NULL; seg = AS_SEGNEXT(as, seg)) { vmu_calculate_seg(entities, seg); } - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); } /* diff --git a/usr/src/uts/common/xen/io/xpvtap.c b/usr/src/uts/common/xen/io/xpvtap.c index a54b6490f0..57290aa9d5 100644 --- a/usr/src/uts/common/xen/io/xpvtap.c +++ b/usr/src/uts/common/xen/io/xpvtap.c @@ -789,12 +789,12 @@ xpvtap_segmf_register(xpvtap_state_t *state) return (DDI_FAILURE); } - AS_LOCK_ENTER(as, &as->a_lock, RW_READER); + AS_LOCK_ENTER(as, RW_READER); seg = as_findseg(as, state->bt_map.um_guest_pages, 0); if ((seg == NULL) || ((uaddr + state->bt_map.um_guest_size) > (seg->s_base + seg->s_size))) { - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); return (DDI_FAILURE); } @@ -815,7 +815,7 @@ xpvtap_segmf_register(xpvtap_state_t *state) state->bt_map.um_registered = B_TRUE; - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); return (DDI_SUCCESS); } @@ -850,11 +850,11 @@ xpvtap_segmf_unregister(struct as *as, void *arg, uint_t event) /* Unlock the gref pages */ for (i = 0; i < pgcnt; i++) { - AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER); + AS_LOCK_ENTER(as, RW_WRITER); hat_prepare_mapping(as->a_hat, uaddr, NULL); hat_unload(as->a_hat, uaddr, PAGESIZE, HAT_UNLOAD_UNLOCK); hat_release_mapping(as->a_hat, uaddr); - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); uaddr += PAGESIZE; } @@ -1228,11 +1228,11 @@ xpvtap_user_request_map(xpvtap_state_t *state, blkif_request_t *req, /* get the apps gref address */ uaddr = XPVTAP_GREF_REQADDR(state->bt_map.um_guest_pages, *uid); - AS_LOCK_ENTER(as, &as->a_lock, RW_READER); + AS_LOCK_ENTER(as, RW_READER); seg = as_findseg(as, state->bt_map.um_guest_pages, 0); if ((seg == NULL) || ((uaddr + mmu_ptob(req->nr_segments)) > (seg->s_base + seg->s_size))) { - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); return (DDI_FAILURE); } @@ -1249,7 +1249,7 @@ xpvtap_user_request_map(xpvtap_state_t *state, blkif_request_t *req, (void) segmf_add_grefs(seg, uaddr, flags, gref, req->nr_segments, domid); - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); return (DDI_SUCCESS); } @@ -1314,11 +1314,11 @@ xpvtap_user_request_unmap(xpvtap_state_t *state, uint_t uid) (req->operation != BLKIF_OP_FLUSH_DISKCACHE) && (req->nr_segments != 0)) { uaddr = XPVTAP_GREF_REQADDR(state->bt_map.um_guest_pages, uid); - AS_LOCK_ENTER(as, &as->a_lock, RW_READER); + AS_LOCK_ENTER(as, RW_READER); seg = as_findseg(as, state->bt_map.um_guest_pages, 0); if ((seg == NULL) || ((uaddr + mmu_ptob(req->nr_segments)) > (seg->s_base + seg->s_size))) { - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); xpvtap_rs_free(state->bt_map.um_rs, uid); return; } @@ -1328,7 +1328,7 @@ xpvtap_user_request_unmap(xpvtap_state_t *state, uint_t uid) cmn_err(CE_WARN, "unable to release grefs"); } - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); } /* free up the user ring id */ diff --git a/usr/src/uts/i86pc/io/rootnex.c b/usr/src/uts/i86pc/io/rootnex.c index a36c47b8b9..889c2f3300 100644 --- a/usr/src/uts/i86pc/io/rootnex.c +++ b/usr/src/uts/i86pc/io/rootnex.c @@ -1007,7 +1007,7 @@ rootnex_map_fault(dev_info_t *dip, dev_info_t *rdip, struct hat *hat, * the hat of the system MMU. At alternative * would be to panic .. this might well be better .. */ - ASSERT(AS_READ_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(AS_READ_HELD(seg->s_as)); hat = seg->s_as->a_hat; cmn_err(CE_NOTE, "rootnex_map_fault: nil hat"); } diff --git a/usr/src/uts/i86pc/vm/hat_i86.c b/usr/src/uts/i86pc/vm/hat_i86.c index f3af56238f..ea2a83b2bd 100644 --- a/usr/src/uts/i86pc/vm/hat_i86.c +++ b/usr/src/uts/i86pc/vm/hat_i86.c @@ -260,7 +260,7 @@ hat_alloc(struct as *as) if (can_steal_post_boot == 0) can_steal_post_boot = 1; - ASSERT(AS_WRITE_HELD(as, &as->a_lock)); + ASSERT(AS_WRITE_HELD(as)); hat = kmem_cache_alloc(hat_cache, KM_SLEEP); hat->hat_as = as; mutex_init(&hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL); @@ -393,7 +393,7 @@ init_done: void hat_free_start(hat_t *hat) { - ASSERT(AS_WRITE_HELD(hat->hat_as, &hat->hat_as->a_lock)); + ASSERT(AS_WRITE_HELD(hat->hat_as)); /* * If the hat is currently a stealing victim, wait for the stealing @@ -726,12 +726,12 @@ hat_init() /* * Set up the kernel's hat */ - AS_LOCK_ENTER(&kas, &kas.a_lock, RW_WRITER); + AS_LOCK_ENTER(&kas, RW_WRITER); kas.a_hat = kmem_cache_alloc(hat_cache, KM_NOSLEEP); mutex_init(&kas.a_hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL); kas.a_hat->hat_as = &kas; kas.a_hat->hat_flags = 0; - AS_LOCK_EXIT(&kas, &kas.a_lock); + AS_LOCK_EXIT(&kas); CPUSET_ZERO(khat_cpuset); CPUSET_ADD(khat_cpuset, CPU->cpu_id); @@ -1157,7 +1157,7 @@ hat_swapout(hat_t *hat) */ ASSERT(IS_PAGEALIGNED(vaddr)); ASSERT(IS_PAGEALIGNED(eaddr)); - ASSERT(AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); + ASSERT(AS_LOCK_HELD(hat->hat_as)); if ((uintptr_t)hat->hat_as->a_userlimit < eaddr) eaddr = (uintptr_t)hat->hat_as->a_userlimit; @@ -1438,8 +1438,7 @@ hati_load_common( ++curthread->t_hatdepth; ASSERT(curthread->t_hatdepth < 16); - ASSERT(hat == kas.a_hat || - AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); + ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as)); if (flags & HAT_LOAD_SHARE) hat->hat_flags |= HAT_SHARED; @@ -1587,8 +1586,7 @@ hat_memload( XPV_DISALLOW_MIGRATE(); ASSERT(IS_PAGEALIGNED(va)); ASSERT(hat == kas.a_hat || va < _userlimit); - ASSERT(hat == kas.a_hat || - AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); + ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as)); ASSERT((flags & supported_memload_flags) == flags); ASSERT(!IN_VA_HOLE(va)); @@ -1645,8 +1643,7 @@ hat_memload_array( XPV_DISALLOW_MIGRATE(); ASSERT(IS_PAGEALIGNED(va)); ASSERT(hat == kas.a_hat || va + len <= _userlimit); - ASSERT(hat == kas.a_hat || - AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); + ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as)); ASSERT((flags & supported_memload_flags) == flags); /* @@ -1781,8 +1778,7 @@ hat_devload( XPV_DISALLOW_MIGRATE(); ASSERT(IS_PAGEALIGNED(va)); ASSERT(hat == kas.a_hat || eva <= _userlimit); - ASSERT(hat == kas.a_hat || - AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); + ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as)); ASSERT((flags & supported_devload_flags) == flags); /* @@ -1890,7 +1886,7 @@ hat_unlock(hat_t *hat, caddr_t addr, size_t len) panic("hat_unlock() address out of range - above _userlimit"); XPV_DISALLOW_MIGRATE(); - ASSERT(AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); + ASSERT(AS_LOCK_HELD(hat->hat_as)); while (vaddr < eaddr) { (void) htable_walk(hat, &ht, &vaddr, eaddr); if (ht == NULL) @@ -2645,8 +2641,7 @@ hat_updateattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr, int what) XPV_DISALLOW_MIGRATE(); ASSERT(IS_PAGEALIGNED(vaddr)); ASSERT(IS_PAGEALIGNED(eaddr)); - ASSERT(hat == kas.a_hat || - AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); + ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as)); for (; vaddr < eaddr; vaddr += LEVEL_SIZE(ht->ht_level)) { try_again: oldpte = htable_walk(hat, &ht, &vaddr, eaddr); @@ -2856,8 +2851,7 @@ hat_probe(hat_t *hat, caddr_t addr) pgcnt_t pg_off; ASSERT(hat == kas.a_hat || vaddr <= _userlimit); - ASSERT(hat == kas.a_hat || - AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); + ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as)); if (IN_VA_HOLE(vaddr)) return (0); diff --git a/usr/src/uts/i86xpv/io/privcmd.c b/usr/src/uts/i86xpv/io/privcmd.c index d6353d6eb4..dcd629da39 100644 --- a/usr/src/uts/i86xpv/io/privcmd.c +++ b/usr/src/uts/i86xpv/io/privcmd.c @@ -169,14 +169,14 @@ do_privcmd_mmap(void *uarg, int mode, cred_t *cr) * Find the segment we want to mess with, then add * the mfn range to the segment. */ - AS_LOCK_ENTER(as, &as->a_lock, RW_READER); + AS_LOCK_ENTER(as, RW_READER); if ((seg = as_findseg(as, addr, 0)) == NULL || addr + mmu_ptob(mme->npages) > seg->s_base + seg->s_size) error = EINVAL; else error = segmf_add_mfns(seg, addr, mme->mfn, mme->npages, mmc->dom); - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); if (error != 0) break; @@ -219,7 +219,7 @@ do_privcmd_mmapbatch(void *uarg, int mode, cred_t *cr) caddr_t, mmb->addr); addr = (caddr_t)mmb->addr; - AS_LOCK_ENTER(as, &as->a_lock, RW_READER); + AS_LOCK_ENTER(as, RW_READER); if ((seg = as_findseg(as, addr, 0)) == NULL || addr + ptob(mmb->num) > seg->s_base + seg->s_size) { error = EINVAL; @@ -260,7 +260,7 @@ do_privcmd_mmapbatch(void *uarg, int mode, cred_t *cr) } done: - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); DTRACE_XPV3(mmapbatch__end, int, error, struct seg *, seg, caddr_t, mmb->addr); diff --git a/usr/src/uts/sfmmu/vm/hat_sfmmu.c b/usr/src/uts/sfmmu/vm/hat_sfmmu.c index 7170891206..8e1c3838d3 100644 --- a/usr/src/uts/sfmmu/vm/hat_sfmmu.c +++ b/usr/src/uts/sfmmu/vm/hat_sfmmu.c @@ -1318,9 +1318,9 @@ hat_init(void) /* * We grab the first hat for the kernel, */ - AS_LOCK_ENTER(&kas, &kas.a_lock, RW_WRITER); + AS_LOCK_ENTER(&kas, RW_WRITER); kas.a_hat = hat_alloc(&kas); - AS_LOCK_EXIT(&kas, &kas.a_lock); + AS_LOCK_EXIT(&kas); /* * Initialize hblk_reserve. @@ -1463,7 +1463,7 @@ hat_alloc(struct as *as) uint64_t cnum; extern uint_t get_color_start(struct as *); - ASSERT(AS_WRITE_HELD(as, &as->a_lock)); + ASSERT(AS_WRITE_HELD(as)); sfmmup = kmem_cache_alloc(sfmmuid_cache, KM_SLEEP); sfmmup->sfmmu_as = as; sfmmup->sfmmu_flags = 0; @@ -1917,7 +1917,7 @@ hat_setup(struct hat *sfmmup, int allocflag) void hat_free_start(struct hat *sfmmup) { - ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); + ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as)); ASSERT(sfmmup != ksfmmup); ASSERT(sfmmup->sfmmu_xhat_provider == NULL); @@ -2247,8 +2247,7 @@ hat_do_memload(struct hat *hat, caddr_t addr, struct page *pp, return; } - ASSERT((hat == ksfmmup) || - AS_LOCK_HELD(hat->sfmmu_as, &hat->sfmmu_as->a_lock)); + ASSERT((hat == ksfmmup) || AS_LOCK_HELD(hat->sfmmu_as)); if (flags & ~SFMMU_LOAD_ALLFLAG) cmn_err(CE_NOTE, "hat_memload: unsupported flags %d", @@ -2303,8 +2302,7 @@ hat_devload(struct hat *hat, caddr_t addr, size_t len, pfn_t pfn, ASSERT(!(flags & ~SFMMU_LOAD_ALLFLAG)); ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR)); - ASSERT((hat == ksfmmup) || - AS_LOCK_HELD(hat->sfmmu_as, &hat->sfmmu_as->a_lock)); + ASSERT((hat == ksfmmup) || AS_LOCK_HELD(hat->sfmmu_as)); if (len == 0) panic("hat_devload: zero len"); if (flags & ~SFMMU_LOAD_ALLFLAG) @@ -3973,8 +3971,7 @@ hat_unlock(struct hat *sfmmup, caddr_t addr, size_t len) ASSERT(sfmmup != NULL); ASSERT(sfmmup->sfmmu_xhat_provider == NULL); - ASSERT((sfmmup == ksfmmup) || - AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); + ASSERT((sfmmup == ksfmmup) || AS_LOCK_HELD(sfmmup->sfmmu_as)); ASSERT((len & MMU_PAGEOFFSET) == 0); endaddr = addr + len; hblktag.htag_id = sfmmup; @@ -4770,8 +4767,7 @@ hat_probe(struct hat *sfmmup, caddr_t addr) ASSERT(sfmmup != NULL); ASSERT(sfmmup->sfmmu_xhat_provider == NULL); - ASSERT((sfmmup == ksfmmup) || - AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); + ASSERT((sfmmup == ksfmmup) || AS_LOCK_HELD(sfmmup->sfmmu_as)); if (sfmmup == ksfmmup) { while ((pfn = sfmmu_vatopfn(addr, sfmmup, &tte)) @@ -4920,8 +4916,7 @@ sfmmu_chgattr(struct hat *sfmmup, caddr_t addr, size_t len, uint_t attr, CPUSET_ZERO(cpuset); - ASSERT((sfmmup == ksfmmup) || - AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); + ASSERT((sfmmup == ksfmmup) || AS_LOCK_HELD(sfmmup->sfmmu_as)); ASSERT((len & MMU_PAGEOFFSET) == 0); ASSERT(((uintptr_t)addr & MMU_PAGEOFFSET) == 0); @@ -5714,7 +5709,7 @@ hat_unload_callback( } ASSERT((sfmmup == ksfmmup) || (flags & HAT_UNLOAD_OTHER) || \ - AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); + AS_LOCK_HELD(sfmmup->sfmmu_as)); ASSERT(sfmmup != NULL); ASSERT((len & MMU_PAGEOFFSET) == 0); @@ -6332,8 +6327,7 @@ hat_sync(struct hat *sfmmup, caddr_t addr, size_t len, uint_t clearflag) cpuset_t cpuset; ASSERT(sfmmup->sfmmu_xhat_provider == NULL); - ASSERT((sfmmup == ksfmmup) || - AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); + ASSERT((sfmmup == ksfmmup) || AS_LOCK_HELD(sfmmup->sfmmu_as)); ASSERT((len & MMU_PAGEOFFSET) == 0); ASSERT((clearflag == HAT_SYNC_DONTZERO) || (clearflag == HAT_SYNC_ZERORM)); @@ -7976,7 +7970,7 @@ hat_getpfnum(struct hat *hat, caddr_t addr) /* * We would like to - * ASSERT(AS_LOCK_HELD(as, &as->a_lock)); + * ASSERT(AS_LOCK_HELD(as)); * but we can't because the iommu driver will call this * routine at interrupt time and it can't grab the as lock * or it will deadlock: A thread could have the as lock @@ -14031,7 +14025,7 @@ hat_join_region(struct hat *sfmmup, ASSERT(sfmmup->sfmmu_xhat_provider == NULL); ASSERT(sfmmup != ksfmmup); - ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); + ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as)); ASSERT(srdp->srd_refcnt > 0); ASSERT(!(flags & ~HAT_REGION_TYPE_MASK)); ASSERT(flags == HAT_REGION_TEXT || flags == HAT_REGION_ISM); @@ -14334,7 +14328,7 @@ hat_leave_region(struct hat *sfmmup, hat_region_cookie_t rcookie, uint_t flags) ASSERT(rgnp->rgn_id == rid); ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == r_type); ASSERT(!(rgnp->rgn_flags & SFMMU_REGION_FREE)); - ASSERT(AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); + ASSERT(AS_LOCK_HELD(sfmmup->sfmmu_as)); ASSERT(sfmmup->sfmmu_xhat_provider == NULL); if (r_type == SFMMU_REGION_HME && sfmmup->sfmmu_as->a_xhat != NULL) { @@ -15130,7 +15124,7 @@ sfmmu_join_scd(sf_scd_t *scdp, sfmmu_t *sfmmup) ASSERT(srdp != NULL); ASSERT(scdp != NULL); ASSERT(scdp->scd_refcnt > 0); - ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); + ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as)); if ((old_scdp = sfmmup->sfmmu_scdp) != NULL) { ASSERT(old_scdp != scdp); @@ -15242,7 +15236,7 @@ sfmmu_find_scd(sfmmu_t *sfmmup) int ret; ASSERT(srdp != NULL); - ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); + ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as)); mutex_enter(&srdp->srd_scd_mutex); for (scdp = srdp->srd_scdp; scdp != NULL; @@ -15348,7 +15342,7 @@ sfmmu_leave_scd(sfmmu_t *sfmmup, uchar_t r_type) ASSERT(scdp->scd_refcnt); ASSERT(!sfmmup->sfmmu_free); ASSERT(sfmmu_hat_lock_held(sfmmup)); - ASSERT(AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); + ASSERT(AS_LOCK_HELD(sfmmup->sfmmu_as)); /* * Wait for ISM maps to be updated. diff --git a/usr/src/uts/sparc/v9/os/simulator.c b/usr/src/uts/sparc/v9/os/simulator.c index 8897ea8706..4075a3823b 100644 --- a/usr/src/uts/sparc/v9/os/simulator.c +++ b/usr/src/uts/sparc/v9/os/simulator.c @@ -1087,7 +1087,7 @@ simulate_unimp(struct regs *rp, caddr_t *badaddr) as = p->p_as; - AS_LOCK_ENTER(as, &as->a_lock, RW_READER); + AS_LOCK_ENTER(as, RW_READER); mapseg = as_findseg(as, (caddr_t)rp->r_pc, 0); ASSERT(mapseg != NULL); svd = (struct segvn_data *)mapseg->s_data; @@ -1098,11 +1098,11 @@ simulate_unimp(struct regs *rp, caddr_t *badaddr) SEGVN_LOCK_ENTER(as, &svd->lock, RW_READER); if ((svd->type & MAP_TYPE) & MAP_SHARED) { SEGVN_LOCK_EXIT(as, &svd->lock); - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); return (SIMU_ILLEGAL); } SEGVN_LOCK_EXIT(as, &svd->lock); - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); /* * A "flush" instruction using the user PC's vaddr will not work @@ -1114,9 +1114,9 @@ simulate_unimp(struct regs *rp, caddr_t *badaddr) F_SOFTLOCK, S_READ)) return (SIMU_FAULT); - AS_LOCK_ENTER(as, &as->a_lock, RW_READER); + AS_LOCK_ENTER(as, RW_READER); pfnum = hat_getpfnum(as->a_hat, (caddr_t)rp->r_pc); - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); if (pf_is_memory(pfnum)) { pp = page_numtopp_nolock(pfnum); ASSERT(pp == NULL || PAGE_LOCKED(pp)); @@ -1126,12 +1126,12 @@ simulate_unimp(struct regs *rp, caddr_t *badaddr) return (SIMU_FAULT); } - AS_LOCK_ENTER(as, &as->a_lock, RW_READER); + AS_LOCK_ENTER(as, RW_READER); ka = ppmapin(pp, PROT_READ|PROT_WRITE, (caddr_t)rp->r_pc); *(uint_t *)(ka + (uintptr_t)(rp->r_pc % PAGESIZE)) = inst; doflush(ka + (uintptr_t)(rp->r_pc % PAGESIZE)); ppmapout(ka); - AS_LOCK_EXIT(as, &as->a_lock); + AS_LOCK_EXIT(as); (void) as_fault(as->a_hat, as, (caddr_t)(rp->r_pc & PAGEMASK), PAGESIZE, F_SOFTUNLOCK, S_READ); diff --git a/usr/src/uts/sparc/v9/vm/seg_nf.c b/usr/src/uts/sparc/v9/vm/seg_nf.c index 6355933a6b..6e204d3b34 100644 --- a/usr/src/uts/sparc/v9/vm/seg_nf.c +++ b/usr/src/uts/sparc/v9/vm/seg_nf.c @@ -31,8 +31,6 @@ * under license from the Regents of the University of California. */ -#pragma ident "%Z%%M% %I% %E% SMI" - /* * VM - segment for non-faulting loads. */ @@ -188,7 +186,7 @@ segnf_create(struct seg *seg, void *argsp) size_t size; struct as *as = seg->s_as; - ASSERT(as && AS_WRITE_HELD(as, &as->a_lock)); + ASSERT(as && AS_WRITE_HELD(as)); /* * Need a page per virtual color or just 1 if no vac. @@ -306,7 +304,7 @@ segnf_dup(struct seg *seg, struct seg *newseg) static int segnf_unmap(struct seg *seg, caddr_t addr, size_t len) { - ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as)); /* * Check for bad sizes. @@ -375,7 +373,7 @@ segnf_unmap(struct seg *seg, caddr_t addr, size_t len) static void segnf_free(struct seg *seg) { - ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as)); } /* @@ -391,7 +389,7 @@ segnf_nomap(void) static int segnf_setprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot) { - ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); return (EACCES); } @@ -400,7 +398,7 @@ static int segnf_checkprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot) { uint_t sprot; - ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); sprot = seg->s_as == &kas ? PROT_READ : PROT_READ|PROT_USER; return ((prot & sprot) == prot ? 0 : EACCES); @@ -424,7 +422,7 @@ segnf_getprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv) { size_t pgno = seg_page(seg, addr + len) - seg_page(seg, addr) + 1; size_t p; - ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); for (p = 0; p < pgno; ++p) protv[p] = PROT_READ; @@ -435,7 +433,7 @@ segnf_getprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv) static u_offset_t segnf_getoffset(struct seg *seg, caddr_t addr) { - ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); return ((u_offset_t)0); } @@ -444,7 +442,7 @@ segnf_getoffset(struct seg *seg, caddr_t addr) static int segnf_gettype(struct seg *seg, caddr_t addr) { - ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); return (MAP_SHARED); } @@ -453,7 +451,7 @@ segnf_gettype(struct seg *seg, caddr_t addr) static int segnf_getvp(struct seg *seg, caddr_t addr, struct vnode **vpp) { - ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as)); *vpp = &nfvp; return (0); diff --git a/usr/src/uts/sun4/io/rootnex.c b/usr/src/uts/sun4/io/rootnex.c index fcc1c514c9..c7f504437b 100644 --- a/usr/src/uts/sun4/io/rootnex.c +++ b/usr/src/uts/sun4/io/rootnex.c @@ -746,7 +746,7 @@ rootnex_map_fault(dev_info_t *dip, dev_info_t *rdip, * the hat of the system MMU. At alternative * would be to panic .. this might well be better .. */ - ASSERT(AS_READ_HELD(seg->s_as, &seg->s_as->a_lock)); + ASSERT(AS_READ_HELD(seg->s_as)); hat = seg->s_as->a_hat; cmn_err(CE_NOTE, "rootnex_map_fault: nil hat"); } |