summaryrefslogtreecommitdiff
path: root/usr/src
diff options
context:
space:
mode:
authorJonathan Adams <Jonathan.Adams@Sun.COM>2009-12-09 16:02:00 -0800
committerJonathan Adams <Jonathan.Adams@Sun.COM>2009-12-09 16:02:00 -0800
commitd32efdadf99ffd25752922f91fe04ab04eda7f70 (patch)
tree14b10a1ddf2ab6166e0f073bb9235a425dcd3540 /usr/src
parent2f172c55ef76964744bc62b4500ece87f3089b4d (diff)
downloadillumos-gate-d32efdadf99ffd25752922f91fe04ab04eda7f70.tar.gz
6905982 panic: NULL pointer dereference in anon_get_ptr() in swapout thread
6908270 Insufficient test for "segkp_fault: bad unlock" panic in segkp_fault()
Diffstat (limited to 'usr/src')
-rw-r--r--usr/src/uts/common/disp/thread.c63
-rw-r--r--usr/src/uts/common/os/lwp.c55
-rw-r--r--usr/src/uts/common/sys/thread.h4
-rw-r--r--usr/src/uts/common/vm/seg_kp.c2
4 files changed, 74 insertions, 50 deletions
diff --git a/usr/src/uts/common/disp/thread.c b/usr/src/uts/common/disp/thread.c
index a5a06b95ba..245ee27e04 100644
--- a/usr/src/uts/common/disp/thread.c
+++ b/usr/src/uts/common/disp/thread.c
@@ -413,10 +413,6 @@ thread_create(
stkinfo_begin(t);
}
- /* set default stack flag */
- if (stksize == lwp_default_stksize)
- t->t_flag |= T_DFLTSTK;
-
t->t_ts = ts;
/*
@@ -727,6 +723,10 @@ thread_free_barrier(kthread_t *t)
void
thread_free(kthread_t *t)
{
+ boolean_t allocstk = (t->t_flag & T_TALLOCSTK);
+ klwp_t *lwp = t->t_lwp;
+ caddr_t swap = t->t_swap;
+
ASSERT(t != &t0 && t->t_state == TS_FREE);
ASSERT(t->t_door == NULL);
ASSERT(t->t_schedctl == NULL);
@@ -759,13 +759,13 @@ thread_free(kthread_t *t)
t->t_rprof = NULL;
}
t->t_lockp = NULL; /* nothing should try to lock this thread now */
- if (t->t_lwp)
- lwp_freeregs(t->t_lwp, 0);
+ if (lwp)
+ lwp_freeregs(lwp, 0);
if (t->t_ctx)
freectx(t, 0);
t->t_stk = NULL;
- if (t->t_lwp)
- lwp_stk_fini(t->t_lwp);
+ if (lwp)
+ lwp_stk_fini(lwp);
lock_clear(&t->t_lock);
if (t->t_ts->ts_waiters > 0)
@@ -787,27 +787,24 @@ thread_free(kthread_t *t)
lgrp_affinity_free(&t->t_lgrp_affinity);
+ mutex_enter(&pidlock);
+ nthread--;
+ mutex_exit(&pidlock);
+
/*
- * Free thread struct and its stack.
+ * Free thread, lwp and stack. This needs to be done carefully, since
+ * if T_TALLOCSTK is set, the thread is part of the stack.
*/
- if (t->t_flag & T_TALLOCSTK) {
- /* thread struct is embedded in stack */
- segkp_release(segkp, t->t_swap);
- mutex_enter(&pidlock);
- nthread--;
- mutex_exit(&pidlock);
- } else {
- if (t->t_swap) {
- segkp_release(segkp, t->t_swap);
- t->t_swap = NULL;
- }
- if (t->t_lwp) {
- kmem_cache_free(lwp_cache, t->t_lwp);
- t->t_lwp = NULL;
- }
- mutex_enter(&pidlock);
- nthread--;
- mutex_exit(&pidlock);
+ t->t_lwp = NULL;
+ t->t_swap = NULL;
+
+ if (swap) {
+ segkp_release(segkp, swap);
+ }
+ if (lwp) {
+ kmem_cache_free(lwp_cache, lwp);
+ }
+ if (!allocstk) {
kmem_cache_free(thread_cache, t);
}
}
@@ -983,11 +980,15 @@ reapq_add(kthread_t *t)
mutex_enter(&reaplock);
/*
- * lwp_deathrow contains only threads with lwp linkage
- * that are of the default stacksize. Anything else goes
- * on thread_deathrow.
+ * lwp_deathrow contains threads with lwp linkage and
+ * swappable thread stacks which have the default stacksize.
+ * These threads' lwps and stacks may be reused by lwp_create().
+ *
+ * Anything else goes on thread_deathrow(), where it will eventually
+ * be thread_free()d.
*/
- if (ttolwp(t) && (t->t_flag & T_DFLTSTK)) {
+ if (t->t_flag & T_LWPREUSE) {
+ ASSERT(ttolwp(t) != NULL);
t->t_forw = lwp_deathrow;
lwp_deathrow = t;
lwp_reapcnt++;
diff --git a/usr/src/uts/common/os/lwp.c b/usr/src/uts/common/os/lwp.c
index 091c4c4a21..229c3d1177 100644
--- a/usr/src/uts/common/os/lwp.c
+++ b/usr/src/uts/common/os/lwp.c
@@ -150,26 +150,25 @@ lwp_create(void (*proc)(), caddr_t arg, size_t len, proc_t *p,
mutex_exit(&p->p_zone->zone_nlwps_lock);
mutex_exit(&p->p_lock);
- if (CLASS_KERNEL(cid)) {
- curlwp = NULL; /* don't inherit from curlwp */
+ curlwp = ttolwp(curthread);
+ if (curlwp == NULL || (stksize = curlwp->lwp_childstksz) == 0)
stksize = lwp_default_stksize;
- } else {
- curlwp = ttolwp(curthread);
- if (curlwp == NULL || (stksize = curlwp->lwp_childstksz) == 0)
- stksize = lwp_default_stksize;
- }
- /*
- * For system threads, we sleep for our swap reservation, and the
- * thread stack can't be swapped.
- *
- * Otherwise, try to reclaim a <lwp,stack> from 'deathrow'
- */
if (CLASS_KERNEL(cid)) {
- lwpdata = (caddr_t)segkp_get(segkp, stksize,
- (KPD_NO_ANON | KPD_HASREDZONE | KPD_LOCKED));
+ /*
+ * Since we are creating an LWP in an SSYS process, we do not
+ * inherit anything from the current thread's LWP. We set
+ * stksize and lwpdata to 0 in order to let thread_create()
+ * allocate a regular kernel thread stack for this thread.
+ */
+ curlwp = NULL;
+ stksize = 0;
+ lwpdata = NULL;
} else if (stksize == lwp_default_stksize) {
+ /*
+ * Try to reuse an <lwp,stack> from the LWP deathrow.
+ */
if (lwp_reapcnt > 0) {
mutex_enter(&reaplock);
if ((t = lwp_deathrow) != NULL) {
@@ -223,7 +222,31 @@ lwp_create(void (*proc)(), caddr_t arg, size_t len, proc_t *p,
*/
t = thread_create(lwpdata, stksize, NULL, NULL, 0, p, TS_STOPPED, pri);
- t->t_swap = lwpdata; /* Start of page-able data */
+ /*
+ * If a non-NULL stack base is passed in, thread_create() assumes
+ * that the stack might be statically allocated (as opposed to being
+ * allocated from segkp), and so it does not set t_swap. Since
+ * the lwpdata was allocated from segkp, we must set t_swap to point
+ * to it ourselves.
+ *
+ * This would be less confusing if t_swap had a better name; it really
+ * indicates that the stack is allocated from segkp, regardless of
+ * whether or not it is swappable.
+ */
+ if (lwpdata != NULL) {
+ ASSERT(!CLASS_KERNEL(cid));
+ ASSERT(t->t_swap == NULL);
+ t->t_swap = lwpdata; /* Start of page-able data */
+ }
+
+ /*
+ * If the stack and lwp can be reused, mark the thread as such.
+ * When we get to reapq_add() from resume_from_zombie(), these
+ * threads will go onto lwp_deathrow instead of thread_deathrow.
+ */
+ if (!CLASS_KERNEL(cid) && stksize == lwp_default_stksize)
+ t->t_flag |= T_LWPREUSE;
+
if (lwp == NULL)
lwp = kmem_cache_alloc(lwp_cache, KM_SLEEP);
bzero(lwp, sizeof (*lwp));
diff --git a/usr/src/uts/common/sys/thread.h b/usr/src/uts/common/sys/thread.h
index 8b5a48905d..22b495cd89 100644
--- a/usr/src/uts/common/sys/thread.h
+++ b/usr/src/uts/common/sys/thread.h
@@ -138,7 +138,7 @@ typedef struct _kthread {
uintptr_t t_lofault; /* ret pc for failed page faults */
label_t *t_onfault; /* on_fault() setjmp buf */
struct on_trap_data *t_ontrap; /* on_trap() protection data */
- caddr_t t_swap; /* swappable thread storage */
+ caddr_t t_swap; /* the bottom of the stack, if from segkp */
lock_t t_lock; /* used to resume() a thread */
uint8_t t_lockstat; /* set while thread is in lockstat code */
uint8_t t_pil; /* interrupt thread PIL */
@@ -369,7 +369,7 @@ typedef struct _kthread {
#define T_WAITCVSEM 0x0200 /* waiting for a lwp_cv or lwp_sema on sleepq */
#define T_WATCHPT 0x0400 /* thread undergoing a watchpoint emulation */
#define T_PANIC 0x0800 /* thread initiated a system panic */
-#define T_DFLTSTK 0x1000 /* stack is default size */
+#define T_LWPREUSE 0x1000 /* stack and LWP can be reused */
#define T_CAPTURING 0x2000 /* thread is in page capture logic */
#define T_VFPARENT 0x4000 /* thread is vfork parent, must call vfwait */
#define T_DONTDTRACE 0x8000 /* disable DTrace probes */
diff --git a/usr/src/uts/common/vm/seg_kp.c b/usr/src/uts/common/vm/seg_kp.c
index af684f4c06..9336e03540 100644
--- a/usr/src/uts/common/vm/seg_kp.c
+++ b/usr/src/uts/common/vm/seg_kp.c
@@ -1001,7 +1001,7 @@ segkp_fault(
* Make sure the addr is LOCKED and it has anon backing
* before unlocking
*/
- if ((kpd->kp_flags & (KPD_LOCKED|KPD_NO_ANON)) == KPD_NO_ANON) {
+ if ((kpd->kp_flags & (KPD_LOCKED|KPD_NO_ANON)) != KPD_LOCKED) {
panic("segkp_fault: bad unlock");
/*NOTREACHED*/
}