summaryrefslogtreecommitdiff
path: root/usr/src
diff options
context:
space:
mode:
Diffstat (limited to 'usr/src')
-rw-r--r--usr/src/lib/libumem/common/umem.c28
-rw-r--r--usr/src/lib/libumem/common/umem_fork.c86
2 files changed, 79 insertions, 35 deletions
diff --git a/usr/src/lib/libumem/common/umem.c b/usr/src/lib/libumem/common/umem.c
index 725d4add40..6f2304390f 100644
--- a/usr/src/lib/libumem/common/umem.c
+++ b/usr/src/lib/libumem/common/umem.c
@@ -20,7 +20,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -55,6 +55,7 @@
*
* * KM_SLEEP v.s. UMEM_NOFAIL
*
+ * * lock ordering
*
* 2. Initialization
* -----------------
@@ -329,6 +330,31 @@
* If a constructor callback _does_ do a UMEM_NOFAIL allocation, and
* the nofail callback does a non-local exit, we will leak the
* partially-constructed buffer.
+ *
+ *
+ * 6. Lock Ordering
+ * ----------------
+ * umem has a few more locks than kmem does, mostly in the update path. The
+ * overall lock ordering (earlier locks must be acquired first) is:
+ *
+ * umem_init_lock
+ *
+ * vmem_list_lock
+ * vmem_nosleep_lock.vmpl_mutex
+ * vmem_t's:
+ * vm_lock
+ * sbrk_faillock
+ *
+ * umem_cache_lock
+ * umem_update_lock
+ * umem_flags_lock
+ * umem_cache_t's:
+ * cache_cpu[*].cc_lock
+ * cache_depot_lock
+ * cache_lock
+ * umem_log_header_t's:
+ * lh_cpu[*].clh_lock
+ * lh_lock
*/
#include "mtlib.h"
diff --git a/usr/src/lib/libumem/common/umem_fork.c b/usr/src/lib/libumem/common/umem_fork.c
index 34c15c4db2..8aa7a31dd4 100644
--- a/usr/src/lib/libumem/common/umem_fork.c
+++ b/usr/src/lib/libumem/common/umem_fork.c
@@ -20,7 +20,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -33,7 +33,8 @@
#include <unistd.h>
/*
- * The following functions are for pre- and post-fork1(2) handling.
+ * The following functions are for pre- and post-fork1(2) handling. See
+ * "Lock Ordering" in lib/libumem/common/umem.c for the lock ordering used.
*/
static void
@@ -103,6 +104,10 @@ umem_lockup(void)
(void) umem_init();
(void) mutex_lock(&umem_init_lock);
}
+
+ vmem_lockup();
+ vmem_sbrk_lockup();
+
(void) mutex_lock(&umem_cache_lock);
(void) mutex_lock(&umem_update_lock);
(void) mutex_lock(&umem_flags_lock);
@@ -119,46 +124,30 @@ umem_lockup(void)
(void) cond_broadcast(&umem_update_cv);
- vmem_sbrk_lockup();
- vmem_lockup();
}
static void
-umem_release(void)
-{
- umem_cache_t *cp;
-
- vmem_release();
- vmem_sbrk_release();
-
- umem_release_log_header(umem_slab_log);
- umem_release_log_header(umem_failure_log);
- umem_release_log_header(umem_content_log);
- umem_release_log_header(umem_transaction_log);
-
- for (cp = umem_null_cache.cache_next; cp != &umem_null_cache;
- cp = cp->cache_next)
- umem_release_cache(cp);
- umem_release_cache(&umem_null_cache);
-
- (void) mutex_unlock(&umem_flags_lock);
- (void) mutex_unlock(&umem_update_lock);
- (void) mutex_unlock(&umem_cache_lock);
- (void) mutex_unlock(&umem_init_lock);
-}
-
-static void
-umem_release_child(void)
+umem_do_release(int as_child)
{
umem_cache_t *cp;
+ int cleanup_update = 0;
/*
- * Clean up the update state
+ * Clean up the update state if we are the child process and
+ * another thread was processing updates.
*/
- umem_update_thr = 0;
+ if (as_child) {
+ if (umem_update_thr != thr_self()) {
+ umem_update_thr = 0;
+ cleanup_update = 1;
+ }
+ if (umem_st_update_thr != thr_self()) {
+ umem_st_update_thr = 0;
+ cleanup_update = 1;
+ }
+ }
- if (umem_st_update_thr != thr_self()) {
- umem_st_update_thr = 0;
+ if (cleanup_update) {
umem_reaping = UMEM_REAP_DONE;
for (cp = umem_null_cache.cache_next; cp != &umem_null_cache;
@@ -191,7 +180,36 @@ umem_release_child(void)
}
}
- umem_release();
+ umem_release_log_header(umem_slab_log);
+ umem_release_log_header(umem_failure_log);
+ umem_release_log_header(umem_content_log);
+ umem_release_log_header(umem_transaction_log);
+
+ for (cp = umem_null_cache.cache_next; cp != &umem_null_cache;
+ cp = cp->cache_next)
+ umem_release_cache(cp);
+ umem_release_cache(&umem_null_cache);
+
+ (void) mutex_unlock(&umem_flags_lock);
+ (void) mutex_unlock(&umem_update_lock);
+ (void) mutex_unlock(&umem_cache_lock);
+
+ vmem_sbrk_release();
+ vmem_release();
+
+ (void) mutex_unlock(&umem_init_lock);
+}
+
+static void
+umem_release(void)
+{
+ umem_do_release(0);
+}
+
+static void
+umem_release_child(void)
+{
+ umem_do_release(1);
}
void