diff options
author | trevtom <none@none> | 2008-03-25 06:30:28 -0700 |
---|---|---|
committer | trevtom <none@none> | 2008-03-25 06:30:28 -0700 |
commit | 4568bee7eb526744ad873b26264367b35234c9f4 (patch) | |
tree | 07f14daef0b75aa02c38e939601fd3882e246eac | |
parent | 44aaa2b62b55f245416069f3c1ad5a3485d51e95 (diff) | |
download | illumos-joyent-4568bee7eb526744ad873b26264367b35234c9f4.tar.gz |
6398097 Panic in kcpc_unbind().
6398111 Circular dependency betweeen sys/thread.h and sys/kcpc.h.
6542909 race condition between kcpc_restore() and kcpc_unbind()
-rw-r--r-- | usr/src/uts/common/io/cpc.c | 15 | ||||
-rw-r--r-- | usr/src/uts/common/os/kcpc.c | 104 | ||||
-rw-r--r-- | usr/src/uts/common/sys/cpc_impl.h | 7 | ||||
-rw-r--r-- | usr/src/uts/common/sys/kcpc.h | 8 | ||||
-rw-r--r-- | usr/src/uts/common/sys/thread.h | 6 |
5 files changed, 111 insertions, 29 deletions
diff --git a/usr/src/uts/common/io/cpc.c b/usr/src/uts/common/io/cpc.c index 7a80b0d888..8a00a92fc1 100644 --- a/usr/src/uts/common/io/cpc.c +++ b/usr/src/uts/common/io/cpc.c @@ -2,9 +2,8 @@ * CDDL HEADER START * * The contents of this file are subject to the terms of the - * Common Development and Distribution License, Version 1.0 only - * (the "License"). You may not use this file except in compliance - * with the License. + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or http://www.opensolaris.org/os/licensing. @@ -20,7 +19,7 @@ * CDDL HEADER END */ /* - * Copyright 2005 Sun Microsystems, Inc. All rights reserved. + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. * Use is subject to license terms. */ @@ -638,8 +637,10 @@ kcpc_copyin_set(kcpc_set_t **inset, void *ubuf, size_t len) /* * The requests are now stored in the nvlist array at reqlist. + * Note that the use of kmem_zalloc() to alloc the kcpc_set_t means + * we don't need to call the init routines for ks_lock and ks_condv. */ - set = kmem_alloc(sizeof (kcpc_set_t), KM_SLEEP); + set = kmem_zalloc(sizeof (kcpc_set_t), KM_SLEEP); set->ks_req = (kcpc_request_t *)kmem_zalloc(sizeof (kcpc_request_t) * nreqs, KM_SLEEP); set->ks_nreqs = nreqs; @@ -648,6 +649,10 @@ kcpc_copyin_set(kcpc_set_t **inset, void *ubuf, size_t len) * with an illegal value and this set will fail sanity checks later on. */ set->ks_flags = setflags; + /* + * Initialize bind/unbind set synchronization. + */ + set->ks_state &= ~KCPC_SET_BOUND; /* * Build the set up one request at a time, always keeping it self- diff --git a/usr/src/uts/common/os/kcpc.c b/usr/src/uts/common/os/kcpc.c index 50fb62d782..df255cac93 100644 --- a/usr/src/uts/common/os/kcpc.c +++ b/usr/src/uts/common/os/kcpc.c @@ -20,7 +20,7 @@ */ /* - * Copyright 2007 Sun Microsystems, Inc. All rights reserved. + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. * Use is subject to license terms. */ @@ -32,7 +32,7 @@ #include <sys/inttypes.h> #include <sys/cmn_err.h> #include <sys/time.h> -#include <sys/mutex.h> +#include <sys/ksynch.h> #include <sys/systm.h> #include <sys/kcpc.h> #include <sys/cpc_impl.h> @@ -166,6 +166,11 @@ kcpc_bind_cpu(kcpc_set_t *set, processorid_t cpuid, int *subcode) mutex_exit(&cp->cpu_cpc_ctxlock); mutex_exit(&cpu_lock); + mutex_enter(&set->ks_lock); + set->ks_state |= KCPC_SET_BOUND; + cv_signal(&set->ks_condv); + mutex_exit(&set->ks_lock); + return (0); unbound: @@ -257,6 +262,10 @@ kcpc_bind_thread(kcpc_set_t *set, kthread_t *t, int *subcode) */ atomic_and_uint(&ctx->kc_flags, ~KCPC_CTX_FREEZE); + mutex_enter(&set->ks_lock); + set->ks_state |= KCPC_SET_BOUND; + cv_signal(&set->ks_condv); + mutex_exit(&set->ks_lock); return (0); } @@ -340,9 +349,14 @@ kcpc_sample(kcpc_set_t *set, uint64_t *buf, hrtime_t *hrtime, uint64_t *tick) kcpc_ctx_t *ctx = set->ks_ctx; uint64_t curtick = KCPC_GET_TICK(); - if (ctx == NULL) + mutex_enter(&set->ks_lock); + if ((set->ks_state & KCPC_SET_BOUND) == 0) { + mutex_exit(&set->ks_lock); return (EINVAL); - else if (ctx->kc_flags & KCPC_CTX_INVALID) + } + mutex_exit(&set->ks_lock); + + if (ctx->kc_flags & KCPC_CTX_INVALID) return (EAGAIN); if ((ctx->kc_flags & KCPC_CTX_FREEZE) == 0) { @@ -416,13 +430,27 @@ kcpc_stop_hw(kcpc_ctx_t *ctx) int kcpc_unbind(kcpc_set_t *set) { - kcpc_ctx_t *ctx = set->ks_ctx; + kcpc_ctx_t *ctx; kthread_t *t; - if (ctx == NULL) - return (EINVAL); + /* + * We could be racing with the process's agent thread as it + * binds the set; we must wait for the set to finish binding + * before attempting to tear it down. + */ + mutex_enter(&set->ks_lock); + while ((set->ks_state & KCPC_SET_BOUND) == 0) + cv_wait(&set->ks_condv, &set->ks_lock); + mutex_exit(&set->ks_lock); - atomic_or_uint(&ctx->kc_flags, KCPC_CTX_INVALID); + ctx = set->ks_ctx; + + /* + * Use kc_lock to synchronize with kcpc_restore(). + */ + mutex_enter(&ctx->kc_lock); + ctx->kc_flags |= KCPC_CTX_INVALID; + mutex_exit(&ctx->kc_lock); if (ctx->kc_cpuid == -1) { t = ctx->kc_thread; @@ -492,7 +520,7 @@ kcpc_preset(kcpc_set_t *set, int index, uint64_t preset) int i; ASSERT(set != NULL); - ASSERT(set->ks_ctx != NULL); + ASSERT(set->ks_state & KCPC_SET_BOUND); ASSERT(set->ks_ctx->kc_thread == curthread); ASSERT(set->ks_ctx->kc_cpuid == -1); @@ -514,7 +542,7 @@ kcpc_restart(kcpc_set_t *set) kcpc_ctx_t *ctx = set->ks_ctx; int i; - ASSERT(ctx != NULL); + ASSERT(set->ks_state & KCPC_SET_BOUND); ASSERT(ctx->kc_thread == curthread); ASSERT(ctx->kc_cpuid == -1); @@ -690,7 +718,7 @@ kcpc_ctx_alloc(void) kcpc_ctx_t *ctx; long hash; - ctx = (kcpc_ctx_t *)kmem_alloc(sizeof (kcpc_ctx_t), KM_SLEEP); + ctx = (kcpc_ctx_t *)kmem_zalloc(sizeof (kcpc_ctx_t), KM_SLEEP); hash = CPC_HASH_CTX(ctx); mutex_enter(&kcpc_ctx_llock[hash]); @@ -701,9 +729,6 @@ kcpc_ctx_alloc(void) ctx->kc_pics = (kcpc_pic_t *)kmem_zalloc(sizeof (kcpc_pic_t) * cpc_ncounters, KM_SLEEP); - ctx->kc_flags = 0; - ctx->kc_vtick = 0; - ctx->kc_rawtick = 0; ctx->kc_cpuid = -1; return (ctx); @@ -725,7 +750,8 @@ kcpc_ctx_clone(kcpc_ctx_t *ctx, kcpc_ctx_t *cctx) if ((ks->ks_flags & CPC_BIND_LWP_INHERIT) == 0) return; - cks = kmem_alloc(sizeof (*cks), KM_SLEEP); + cks = kmem_zalloc(sizeof (*cks), KM_SLEEP); + cks->ks_state &= ~KCPC_SET_BOUND; cctx->kc_set = cks; cks->ks_flags = ks->ks_flags; cks->ks_nreqs = ks->ks_nreqs; @@ -758,6 +784,11 @@ kcpc_ctx_clone(kcpc_ctx_t *ctx, kcpc_ctx_t *cctx) } if (kcpc_configure_reqs(cctx, cks, &code) != 0) kcpc_invalidate_config(cctx); + + mutex_enter(&cks->ks_lock); + cks->ks_state |= KCPC_SET_BOUND; + cv_signal(&cks->ks_condv); + mutex_exit(&cks->ks_lock); } @@ -776,6 +807,8 @@ kcpc_ctx_free(kcpc_ctx_t *ctx) mutex_exit(&kcpc_ctx_llock[hash]); kmem_free(ctx->kc_pics, cpc_ncounters * sizeof (kcpc_pic_t)); + cv_destroy(&ctx->kc_condv); + mutex_destroy(&ctx->kc_lock); kmem_free(ctx, sizeof (*ctx)); } @@ -1021,6 +1054,7 @@ kcpc_save(kcpc_ctx_t *ctx) static void kcpc_restore(kcpc_ctx_t *ctx) { + mutex_enter(&ctx->kc_lock); if ((ctx->kc_flags & (KCPC_CTX_INVALID | KCPC_CTX_INVALID_STOPPED)) == KCPC_CTX_INVALID) /* @@ -1028,11 +1062,25 @@ kcpc_restore(kcpc_ctx_t *ctx) * We mark it as such here because we will not start the * counters during this context switch. */ - atomic_or_uint(&ctx->kc_flags, KCPC_CTX_INVALID_STOPPED); + ctx->kc_flags |= KCPC_CTX_INVALID_STOPPED; - if (ctx->kc_flags & (KCPC_CTX_INVALID | KCPC_CTX_FREEZE)) + if (ctx->kc_flags & (KCPC_CTX_INVALID | KCPC_CTX_FREEZE)) { + mutex_exit(&ctx->kc_lock); return; + } + + /* + * Set kc_flags to show that a kcpc_restore() is in progress to avoid + * ctx & set related memory objects being freed without us knowing. + * This can happen if an agent thread is executing a kcpc_unbind(), + * with this thread as the target, whilst we're concurrently doing a + * restorectx() during, for example, a proc_exit(). Effectively, by + * doing this, we're asking kcpc_free() to cv_wait() until + * kcpc_restore() has completed. + */ + ctx->kc_flags |= KCPC_CTX_RESTORE; + mutex_exit(&ctx->kc_lock); /* * While programming the hardware, the counters should be stopped. We @@ -1041,6 +1089,14 @@ kcpc_restore(kcpc_ctx_t *ctx) */ ctx->kc_rawtick = KCPC_GET_TICK(); pcbe_ops->pcbe_program(ctx); + + /* + * Wake the agent thread if it's waiting in kcpc_free(). + */ + mutex_enter(&ctx->kc_lock); + ctx->kc_flags &= ~KCPC_CTX_RESTORE; + cv_signal(&ctx->kc_condv); + mutex_exit(&ctx->kc_lock); } /* @@ -1195,7 +1251,14 @@ kcpc_free(kcpc_ctx_t *ctx, int isexec) ASSERT(set != NULL); - atomic_or_uint(&ctx->kc_flags, KCPC_CTX_INVALID); + /* + * Wait for kcpc_restore() to finish before we tear things down. + */ + mutex_enter(&ctx->kc_lock); + while (ctx->kc_flags & KCPC_CTX_RESTORE) + cv_wait(&ctx->kc_condv, &ctx->kc_lock); + ctx->kc_flags |= KCPC_CTX_INVALID; + mutex_exit(&ctx->kc_lock); if (isexec) { /* @@ -1283,6 +1346,8 @@ kcpc_free_set(kcpc_set_t *set) } kmem_free(set->ks_req, sizeof (kcpc_request_t) * set->ks_nreqs); + cv_destroy(&set->ks_condv); + mutex_destroy(&set->ks_lock); kmem_free(set, sizeof (kcpc_set_t)); } @@ -1484,7 +1549,8 @@ kcpc_dup_set(kcpc_set_t *set) int i; int j; - new = kmem_alloc(sizeof (*new), KM_SLEEP); + new = kmem_zalloc(sizeof (*new), KM_SLEEP); + new->ks_state &= ~KCPC_SET_BOUND; new->ks_flags = set->ks_flags; new->ks_nreqs = set->ks_nreqs; new->ks_req = kmem_alloc(set->ks_nreqs * sizeof (kcpc_request_t), diff --git a/usr/src/uts/common/sys/cpc_impl.h b/usr/src/uts/common/sys/cpc_impl.h index f7bc7a5838..bb9dc5e6e3 100644 --- a/usr/src/uts/common/sys/cpc_impl.h +++ b/usr/src/uts/common/sys/cpc_impl.h @@ -19,7 +19,7 @@ * CDDL HEADER END */ /* - * Copyright 2007 Sun Microsystems, Inc. All rights reserved. + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. * Use is subject to license terms. */ @@ -30,7 +30,7 @@ #include <sys/types.h> #include <sys/time.h> -#include <sys/rwlock.h> +#include <sys/ksynch.h> #if defined(_KERNEL) && defined(_MULTI_DATAMODEL) #include <sys/types32.h> @@ -140,6 +140,8 @@ struct _kcpc_ctx { struct _kthread *kc_thread; /* thread this context is measuring */ int kc_cpuid; /* CPU this context is measuring */ kcpc_ctx_t *kc_next; /* Global list of all contexts */ + kmutex_t kc_lock; /* protects kc_flags */ + kcondvar_t kc_condv; /* wait for kcpc_restore completion */ }; typedef struct __cpc_args { @@ -172,6 +174,7 @@ typedef struct __cpc_args32 { #define KCPC_CTX_LWPINHERIT 0x8 /* => lwp_create inherits ctx */ #define KCPC_CTX_INVALID 0x100 /* => context stolen; discard */ #define KCPC_CTX_INVALID_STOPPED 0x200 /* => invalid ctx has been stopped */ +#define KCPC_CTX_RESTORE 0x400 /* => kcpc_restore in progress */ /* * PIC flags. diff --git a/usr/src/uts/common/sys/kcpc.h b/usr/src/uts/common/sys/kcpc.h index 50bce17513..0763723bdf 100644 --- a/usr/src/uts/common/sys/kcpc.h +++ b/usr/src/uts/common/sys/kcpc.h @@ -19,7 +19,7 @@ * CDDL HEADER END */ /* - * Copyright 2007 Sun Microsystems, Inc. All rights reserved. + * Copyright 2008 Sun Microsystems, Inc. All rights reserved. * Use is subject to license terms. */ @@ -29,6 +29,7 @@ #pragma ident "%Z%%M% %I% %E% SMI" #include <sys/cpc_impl.h> +#include <sys/ksynch.h> #ifdef __cplusplus extern "C" { @@ -51,12 +52,17 @@ struct cpu; typedef struct _kcpc_request kcpc_request_t; struct __pcbe_ops; +#define KCPC_SET_BOUND 0x0001 /* Used in ks_state */ + struct _kcpc_set { int ks_flags; int ks_nreqs; /* Number of reqs */ kcpc_request_t *ks_req; /* Pointer to reqs */ uint64_t *ks_data; /* Data store for this set */ kcpc_ctx_t *ks_ctx; /* ctx this set belongs to */ + ushort_t ks_state; /* Set is bound or unbound */ + kmutex_t ks_lock; /* Protects ks_state */ + kcondvar_t ks_condv; /* Wait for bind to complete */ }; struct _kcpc_request { diff --git a/usr/src/uts/common/sys/thread.h b/usr/src/uts/common/sys/thread.h index 78ded14796..6fced086b3 100644 --- a/usr/src/uts/common/sys/thread.h +++ b/usr/src/uts/common/sys/thread.h @@ -101,6 +101,8 @@ struct upimutex; struct kproject; struct on_trap_data; struct waitq; +struct _kcpc_ctx; +struct _kcpc_set; /* Definition for kernel thread identifier type */ typedef uint64_t kt_did_t; @@ -149,8 +151,8 @@ typedef struct _kthread { uint64_t t_intr_start; /* timestamp when time slice began */ kt_did_t t_did; /* thread id for kernel debuggers */ caddr_t t_tnf_tpdp; /* Trace facility data pointer */ - kcpc_ctx_t *t_cpc_ctx; /* performance counter context */ - kcpc_set_t *t_cpc_set; /* set this thread has bound */ + struct _kcpc_ctx *t_cpc_ctx; /* performance counter context */ + struct _kcpc_set *t_cpc_set; /* set this thread has bound */ /* * non swappable part of the lwp state. |