summaryrefslogtreecommitdiff
path: root/usr/src/uts/common/os/taskq.c
diff options
context:
space:
mode:
authorGarrett D'Amore <garrett@nexenta.com>2011-07-27 07:13:44 -0700
committerGarrett D'Amore <garrett@nexenta.com>2011-07-27 07:13:44 -0700
commit5aeb94743e3be0c51e86f73096334611ae3a058e (patch)
tree6e92f4ddb8ba1052529227e47400a69354c37af3 /usr/src/uts/common/os/taskq.c
parent135e56f22e13a5d0057a31c742b8c7d8324c4af3 (diff)
downloadillumos-gate-5aeb94743e3be0c51e86f73096334611ae3a058e.tar.gz
734 taskq_dispatch_prealloc() desired
943 zio_interrupt ends up calling taskq_dispatch with TQ_SLEEP Reviewed by: Albert Lee <trisk@nexenta.com> Reviewed by: Richard Lowe <richlowe@richlowe.net> Reviewed by: Alexey Zaytsev <alexey.zaytsev@nexenta.com> Reviewed by: Jason Brian King <jason.brian.king@gmail.com> Reviewed by: George Wilson <gwilson@zfsmail.com> Reviewed by: Adam Leventhal <ahl@delphix.com> Approved by: Gordon Ross <gwr@nexenta.com>
Diffstat (limited to 'usr/src/uts/common/os/taskq.c')
-rw-r--r--usr/src/uts/common/os/taskq.c71
1 files changed, 66 insertions, 5 deletions
diff --git a/usr/src/uts/common/os/taskq.c b/usr/src/uts/common/os/taskq.c
index 68dc5203f1..26e7b952d7 100644
--- a/usr/src/uts/common/os/taskq.c
+++ b/usr/src/uts/common/os/taskq.c
@@ -24,6 +24,10 @@
*/
/*
+ * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
+ */
+
+/*
* Kernel task queues: general-purpose asynchronous task scheduling.
*
* A common problem in kernel programming is the need to schedule tasks
@@ -184,6 +188,17 @@
* taskq_dispatch() (especially if TQ_NOQUEUE was specified), so it
* is important to have backup strategies handling such failures.
*
+ * void taskq_dispatch_ent(tq, func, arg, flags, tqent)
+ *
+ * This is a light-weight form of taskq_dispatch(), that uses a
+ * preallocated taskq_ent_t structure for scheduling. As a
+ * result, it does not perform allocations and cannot ever fail.
+ * Note especially that it cannot be used with TASKQ_DYNAMIC
+ * taskqs. The memory for the tqent must not be modified or used
+ * until the function (func) is called. (However, func itself
+ * may safely modify or free this memory, once it is called.)
+ * Note that the taskq framework will NOT free this memory.
+ *
* void taskq_wait(tq):
*
* Waits for all previously scheduled tasks to complete.
@@ -1118,7 +1133,6 @@ taskq_bucket_dispatch(taskq_bucket_t *b, task_func_t func, void *arg)
* Actual return value is the pointer to taskq entry that was used to
* dispatch a task. This is useful for debugging.
*/
-/* ARGSUSED */
taskqid_t
taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags)
{
@@ -1134,7 +1148,7 @@ taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags)
/*
* TQ_NOQUEUE flag can't be used with non-dynamic task queues.
*/
- ASSERT(! (flags & TQ_NOQUEUE));
+ ASSERT(!(flags & TQ_NOQUEUE));
/*
* Enqueue the task to the underlying queue.
*/
@@ -1146,6 +1160,9 @@ taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags)
mutex_exit(&tq->tq_lock);
return (NULL);
}
+ /* Make sure we start without any flags */
+ tqe->tqent_un.tqent_flags = 0;
+
if (flags & TQ_FRONT) {
TQ_ENQUEUE_FRONT(tq, tqe, func, arg);
} else {
@@ -1273,6 +1290,31 @@ taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags)
return ((taskqid_t)tqe);
}
+void
+taskq_dispatch_ent(taskq_t *tq, task_func_t func, void *arg, uint_t flags,
+ taskq_ent_t *tqe)
+{
+ ASSERT(func != NULL);
+ ASSERT(!(tq->tq_flags & TASKQ_DYNAMIC));
+
+ /*
+ * Mark it as a prealloc'd task. This is important
+ * to ensure that we don't free it later.
+ */
+ tqe->tqent_un.tqent_flags |= TQENT_FLAG_PREALLOC;
+ /*
+ * Enqueue the task to the underlying queue.
+ */
+ mutex_enter(&tq->tq_lock);
+
+ if (flags & TQ_FRONT) {
+ TQ_ENQUEUE_FRONT(tq, tqe, func, arg);
+ } else {
+ TQ_ENQUEUE(tq, tqe, func, arg);
+ }
+ mutex_exit(&tq->tq_lock);
+}
+
/*
* Wait for all pending tasks to complete.
* Calling taskq_wait from a task will cause deadlock.
@@ -1460,6 +1502,7 @@ taskq_thread(void *arg)
taskq_ent_t *tqe;
callb_cpr_t cprinfo;
hrtime_t start, end;
+ boolean_t freeit;
curthread->t_taskq = tq; /* mark ourselves for taskq_member() */
@@ -1546,6 +1589,23 @@ taskq_thread(void *arg)
tqe->tqent_next->tqent_prev = tqe->tqent_prev;
mutex_exit(&tq->tq_lock);
+ /*
+ * For prealloc'd tasks, we don't free anything. We
+ * have to check this now, because once we call the
+ * function for a prealloc'd taskq, we can't touch the
+ * tqent any longer (calling the function returns the
+ * ownershp of the tqent back to caller of
+ * taskq_dispatch.)
+ */
+ if ((!(tq->tq_flags & TASKQ_DYNAMIC)) &&
+ (tqe->tqent_un.tqent_flags & TQENT_FLAG_PREALLOC)) {
+ /* clear pointers to assist assertion checks */
+ tqe->tqent_next = tqe->tqent_prev = NULL;
+ freeit = B_FALSE;
+ } else {
+ freeit = B_TRUE;
+ }
+
rw_enter(&tq->tq_threadlock, RW_READER);
start = gethrtime();
DTRACE_PROBE2(taskq__exec__start, taskq_t *, tq,
@@ -1560,7 +1620,8 @@ taskq_thread(void *arg)
tq->tq_totaltime += end - start;
tq->tq_executed++;
- taskq_ent_free(tq, tqe);
+ if (freeit)
+ taskq_ent_free(tq, tqe);
}
if (tq->tq_nthreads_max == 1)
@@ -1600,7 +1661,7 @@ taskq_thread(void *arg)
static void
taskq_d_thread(taskq_ent_t *tqe)
{
- taskq_bucket_t *bucket = tqe->tqent_bucket;
+ taskq_bucket_t *bucket = tqe->tqent_un.tqent_bucket;
taskq_t *tq = bucket->tqbucket_taskq;
kmutex_t *lock = &bucket->tqbucket_lock;
kcondvar_t *cv = &tqe->tqent_cv;
@@ -2115,7 +2176,7 @@ taskq_bucket_extend(void *arg)
ASSERT(tqe->tqent_thread == NULL);
- tqe->tqent_bucket = b;
+ tqe->tqent_un.tqent_bucket = b;
/*
* Create a thread in a TS_STOPPED state first. If it is successfully