summaryrefslogtreecommitdiff
path: root/usr/src/uts/common/vm
diff options
context:
space:
mode:
authorJohn Levon <john.levon@joyent.com>2018-03-02 17:34:28 +0000
committerJohn Levon <john.levon@joyent.com>2018-04-03 12:57:23 +0000
commit3967d7bb10e6d302c162721b7e5ed98bd69744b7 (patch)
tree395ce9d9f5613d784eb6d0319fba7820793a9087 /usr/src/uts/common/vm
parent976f1c5d53cffc14d4b30bdb72a2e2fa6257b746 (diff)
downloadillumos-joyent-3967d7bb10e6d302c162721b7e5ed98bd69744b7.tar.gz
OS-6606 want memory arena for vmm applications
OS-6835 memory DR should be disabled Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com> Reviewed by: Patrick Mooney <patrick.mooney@joyent.com> Approved by: Patrick Mooney <patrick.mooney@joyent.com>
Diffstat (limited to 'usr/src/uts/common/vm')
-rw-r--r--usr/src/uts/common/vm/page_lock.c10
-rw-r--r--usr/src/uts/common/vm/page_retire.c7
-rw-r--r--usr/src/uts/common/vm/seg_kmem.c83
-rw-r--r--usr/src/uts/common/vm/seg_kmem.h18
4 files changed, 68 insertions, 50 deletions
diff --git a/usr/src/uts/common/vm/page_lock.c b/usr/src/uts/common/vm/page_lock.c
index 7e48602189..7305c9c85a 100644
--- a/usr/src/uts/common/vm/page_lock.c
+++ b/usr/src/uts/common/vm/page_lock.c
@@ -20,6 +20,7 @@
*/
/*
* Copyright (c) 1991, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2018 Joyent, Inc.
*/
@@ -140,9 +141,8 @@ static pad_mutex_t pszc_mutex[PSZC_MTX_TABLE_SIZE];
& (VPH_TABLE_SIZE - 1))
/*
- * Two slots after VPH_TABLE_SIZE are reserved in vph_mutex for kernel vnodes.
- * The lock for kvp is VPH_TABLE_SIZE + 0, and the lock for zvp is
- * VPH_TABLE_SIZE + 1.
+ * Two slots after VPH_TABLE_SIZE are reserved in vph_mutex for kernel vnodes,
+ * one for kvps[KV_ZVP], and one for other kvps[] users.
*/
kmutex_t vph_mutex[VPH_TABLE_SIZE + 2];
@@ -888,10 +888,10 @@ static int page_vnode_mutex_stress = 0;
kmutex_t *
page_vnode_mutex(vnode_t *vp)
{
- if (vp == &kvp)
+ if (vp == &kvp || vp == &kvps[KV_VVP])
return (&vph_mutex[VPH_TABLE_SIZE + 0]);
- if (vp == &zvp)
+ if (vp == &kvps[KV_ZVP])
return (&vph_mutex[VPH_TABLE_SIZE + 1]);
#ifdef DEBUG
if (page_vnode_mutex_stress != 0)
diff --git a/usr/src/uts/common/vm/page_retire.c b/usr/src/uts/common/vm/page_retire.c
index 76be970a45..f4e8d0737f 100644
--- a/usr/src/uts/common/vm/page_retire.c
+++ b/usr/src/uts/common/vm/page_retire.c
@@ -22,6 +22,7 @@
* Copyright 2010 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
* Copyright (c) 2016 by Delphix. All rights reserved.
+ * Copyright 2018 Joyent, Inc.
*/
/*
@@ -851,9 +852,8 @@ page_retire_incr_pend_count(void *datap)
{
PR_INCR_KSTAT(pr_pending);
- if ((datap == &kvp) || (datap == &zvp)) {
+ if (datap == &kvp || datap == &kvps[KV_ZVP] || datap == &kvps[KV_VVP])
PR_INCR_KSTAT(pr_pending_kas);
- }
}
void
@@ -861,9 +861,8 @@ page_retire_decr_pend_count(void *datap)
{
PR_DECR_KSTAT(pr_pending);
- if ((datap == &kvp) || (datap == &zvp)) {
+ if (datap == &kvp || datap == &kvps[KV_ZVP] || datap == &kvps[KV_VVP])
PR_DECR_KSTAT(pr_pending_kas);
- }
}
/*
diff --git a/usr/src/uts/common/vm/seg_kmem.c b/usr/src/uts/common/vm/seg_kmem.c
index 439c859d96..0b116d6eba 100644
--- a/usr/src/uts/common/vm/seg_kmem.c
+++ b/usr/src/uts/common/vm/seg_kmem.c
@@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2016 Joyent, Inc.
+ * Copyright 2018 Joyent, Inc.
*/
#include <sys/types.h>
@@ -122,6 +122,11 @@ vmem_t *static_alloc_arena; /* arena for allocating static memory */
vmem_t *zio_arena = NULL; /* arena for allocating zio memory */
vmem_t *zio_alloc_arena = NULL; /* arena for allocating zio memory */
+#if defined(__amd64)
+vmem_t *kvmm_arena; /* arena for vmm VA */
+struct seg kvmmseg; /* Segment for vmm memory */
+#endif
+
/*
* seg_kmem driver can map part of the kernel heap with large pages.
* Currently this functionality is implemented for sparc platforms only.
@@ -440,7 +445,7 @@ segkmem_badop()
/*ARGSUSED*/
static faultcode_t
segkmem_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t size,
- enum fault_type type, enum seg_rw rw)
+ enum fault_type type, enum seg_rw rw)
{
pgcnt_t npages;
spgcnt_t pg;
@@ -655,13 +660,19 @@ segkmem_dump(struct seg *seg)
segkmem_dump_range, seg->s_as);
vmem_walk(heaptext_arena, VMEM_ALLOC | VMEM_REENTRANT,
segkmem_dump_range, seg->s_as);
+ /*
+ * We don't want to dump pages attached to kzioseg since they
+ * contain file data from ZFS. If this page's segment is
+ * kzioseg return instead of writing it to the dump device.
+ *
+ * Same applies to VM memory allocations.
+ */
} else if (seg == &kzioseg) {
- /*
- * We don't want to dump pages attached to kzioseg since they
- * contain file data from ZFS. If this page's segment is
- * kzioseg return instead of writing it to the dump device.
- */
return;
+#if defined(__amd64)
+ } else if (seg == &kvmmseg) {
+ return;
+#endif
} else {
segkmem_dump_range(seg->s_as, seg->s_base, seg->s_size);
}
@@ -677,7 +688,7 @@ segkmem_dump(struct seg *seg)
/*ARGSUSED*/
static int
segkmem_pagelock(struct seg *seg, caddr_t addr, size_t len,
- page_t ***ppp, enum lock_type type, enum seg_rw rw)
+ page_t ***ppp, enum lock_type type, enum seg_rw rw)
{
page_t **pplist, *pp;
pgcnt_t npages;
@@ -802,21 +813,18 @@ struct seg_ops segkmem_ops = {
};
int
-segkmem_zio_create(struct seg *seg)
-{
- ASSERT(seg->s_as == &kas && RW_WRITE_HELD(&kas.a_lock));
- seg->s_ops = &segkmem_ops;
- seg->s_data = &zvp;
- kas.a_size += seg->s_size;
- return (0);
-}
-
-int
segkmem_create(struct seg *seg)
{
ASSERT(seg->s_as == &kas && RW_WRITE_HELD(&kas.a_lock));
seg->s_ops = &segkmem_ops;
- seg->s_data = &kvp;
+ if (seg == &kzioseg)
+ seg->s_data = &kvps[KV_ZVP];
+#if defined(__amd64)
+ else if (seg == &kvmmseg)
+ seg->s_data = &kvps[KV_VVP];
+#endif
+ else
+ seg->s_data = &kvps[KV_KVP];
kas.a_size += seg->s_size;
return (0);
}
@@ -858,7 +866,7 @@ segkmem_page_create(void *addr, size_t size, int vmflag, void *arg)
*/
void *
segkmem_xalloc(vmem_t *vmp, void *inaddr, size_t size, int vmflag, uint_t attr,
- page_t *(*page_create_func)(void *, size_t, int, void *), void *pcarg)
+ page_t *(*page_create_func)(void *, size_t, int, void *), void *pcarg)
{
page_t *ppl;
caddr_t addr = inaddr;
@@ -968,10 +976,10 @@ segkmem_alloc(vmem_t *vmp, size_t size, int vmflag)
return (segkmem_alloc_vn(vmp, size, vmflag, &kvp));
}
-void *
+static void *
segkmem_zio_alloc(vmem_t *vmp, size_t size, int vmflag)
{
- return (segkmem_alloc_vn(vmp, size, vmflag, &zvp));
+ return (segkmem_alloc_vn(vmp, size, vmflag, &kvps[KV_ZVP]));
}
/*
@@ -980,8 +988,8 @@ segkmem_zio_alloc(vmem_t *vmp, size_t size, int vmflag)
* we currently don't have a special kernel segment for non-paged
* kernel memory that is exported by drivers to user space.
*/
-static void
-segkmem_free_vn(vmem_t *vmp, void *inaddr, size_t size, struct vnode *vp,
+void
+segkmem_xfree(vmem_t *vmp, void *inaddr, size_t size, struct vnode *vp,
void (*func)(page_t *))
{
page_t *pp;
@@ -1038,21 +1046,15 @@ segkmem_free_vn(vmem_t *vmp, void *inaddr, size_t size, struct vnode *vp,
}
void
-segkmem_xfree(vmem_t *vmp, void *inaddr, size_t size, void (*func)(page_t *))
-{
- segkmem_free_vn(vmp, inaddr, size, &kvp, func);
-}
-
-void
segkmem_free(vmem_t *vmp, void *inaddr, size_t size)
{
- segkmem_free_vn(vmp, inaddr, size, &kvp, NULL);
+ segkmem_xfree(vmp, inaddr, size, &kvp, NULL);
}
-void
+static void
segkmem_zio_free(vmem_t *vmp, void *inaddr, size_t size)
{
- segkmem_free_vn(vmp, inaddr, size, &zvp, NULL);
+ segkmem_xfree(vmp, inaddr, size, &kvps[KV_ZVP], NULL);
}
void
@@ -1534,8 +1536,21 @@ segkmem_zio_init(void *zio_mem_base, size_t zio_mem_size)
ASSERT(zio_alloc_arena != NULL);
}
-#ifdef __sparc
+#if defined(__amd64)
+
+void
+segkmem_kvmm_init(void *base, size_t size)
+{
+ ASSERT(base != NULL);
+ ASSERT(size != 0);
+
+ kvmm_arena = vmem_create("kvmm_arena", base, size, 1024 * 1024,
+ NULL, NULL, NULL, 0, VM_SLEEP);
+
+ ASSERT(kvmm_arena != NULL);
+}
+#elif defined(__sparc)
static void *
segkmem_alloc_ppa(vmem_t *vmp, size_t size, int vmflag)
diff --git a/usr/src/uts/common/vm/seg_kmem.h b/usr/src/uts/common/vm/seg_kmem.h
index 1db85826b1..9a20101670 100644
--- a/usr/src/uts/common/vm/seg_kmem.h
+++ b/usr/src/uts/common/vm/seg_kmem.h
@@ -21,7 +21,7 @@
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
- * Copyright 2016 Joyent, Inc.
+ * Copyright 2018 Joyent, Inc.
* Copyright 2017 RackTop Systems.
*/
@@ -65,12 +65,18 @@ extern vmem_t *static_arena; /* arena for caches to import static memory */
extern vmem_t *static_alloc_arena; /* arena for allocating static memory */
extern vmem_t *zio_arena; /* arena for zio caches */
extern vmem_t *zio_alloc_arena; /* arena for zio caches */
+
+#if defined(__amd64)
+extern struct seg kvmmseg; /* Segment for vmm mappings */
+extern vmem_t *kvmm_arena; /* arena for vmm VA */
+extern void segkmem_kvmm_init(void *, size_t);
+#endif
+
extern struct vnode kvps[];
/*
- * segkmem page vnodes
+ * segkmem page vnodes (please don't add more defines here...)
*/
#define kvp (kvps[KV_KVP])
-#define zvp (kvps[KV_ZVP])
#if defined(__sparc)
#define mpvp (kvps[KV_MPVP])
#define promvp (kvps[KV_PROMVP])
@@ -83,16 +89,14 @@ extern void *segkmem_xalloc(vmem_t *, void *, size_t, int, uint_t,
extern void *segkmem_alloc(vmem_t *, size_t, int);
extern void *segkmem_alloc_permanent(vmem_t *, size_t, int);
extern void segkmem_free(vmem_t *, void *, size_t);
-extern void segkmem_xfree(vmem_t *, void *, size_t, void (*)(page_t *));
+extern void segkmem_xfree(vmem_t *, void *, size_t,
+ struct vnode *, void (*)(page_t *));
extern void *boot_alloc(void *, size_t, uint_t);
extern void boot_mapin(caddr_t addr, size_t size);
extern void kernelheap_init(void *, void *, char *, void *, void *);
extern void segkmem_gc(void);
-extern void *segkmem_zio_alloc(vmem_t *, size_t, int);
-extern int segkmem_zio_create(struct seg *);
-extern void segkmem_zio_free(vmem_t *, void *, size_t);
extern void segkmem_zio_init(void *, size_t);
/*