/*-
* Copyright (c)2002, 2005, 2006, 2007 YAMAMOTO Takashi,
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/* find the workqueue of this kthread */
q = workqueue_queue_lookup(wq, curlwp->l_cpu);
if (fpu)
s = kthread_fpu_enter();
mutex_enter(&q->q_mutex);
for (;;) {
struct workqhead tmp;
SIMPLEQ_INIT(&tmp);
while (SIMPLEQ_EMPTY(&q->q_queue_pending))
cv_wait(&q->q_cv, &q->q_mutex);
SIMPLEQ_CONCAT(&tmp, &q->q_queue_pending);
SIMPLEQ_INIT(&q->q_queue_pending);
/*
* Mark the queue as actively running a batch of work
* by setting the generation number odd.
*/
q->q_gen |= 1;
mutex_exit(&q->q_mutex);
workqueue_runlist(wq, &tmp);
/*
* Notify workqueue_wait that we have completed a batch
* of work by incrementing the generation number.
*/
mutex_enter(&q->q_mutex);
KASSERTMSG(q->q_gen & 1, "q=%p gen=%"PRIu64, q, q->q_gen);
q->q_gen++;
cv_broadcast(&q->q_cv);
}
mutex_exit(&q->q_mutex);
if (fpu)
kthread_fpu_exit(s);
}
/*
* Avoid a deadlock scenario. We can't guarantee that
* wk_target has completed at this point, but we can't wait for
* it either, so do nothing.
*
* XXX Are there use-cases that require this semantics?
*/
if (q->q_worker == curlwp) {
SDT_PROBE2(sdt, kernel, workqueue, wait__self, wq, wk_target);
goto out;
}
/*
* Wait until the target is no longer pending. If we find it
* on this queue, the caller can stop looking in other queues.
* If we don't find it in this queue, however, we can't skip
* waiting -- it may be hidden in the running queue which we
* have no access to.
*/
again:
SIMPLEQ_FOREACH(wk, &q->q_queue_pending, wk_entry) {
if (wk == wk_target) {
SDT_PROBE2(sdt, kernel, workqueue, wait__hit, wq, wk);
found = true;
cv_wait(&q->q_cv, &q->q_mutex);
goto again;
}
}
/*
* The target may be in the batch of work currently running,
* but we can't touch that queue. So if there's anything
* running, wait until the generation changes.
*/
gen = q->q_gen;
if (gen & 1) {
do
cv_wait(&q->q_cv, &q->q_mutex);
while (gen == q->q_gen);
}
out:
mutex_exit(&q->q_mutex);
return found;
}
/*
* Wait for a specified work to finish. The caller must ensure that no new
* work will be enqueued before calling workqueue_wait. Note that if the
* workqueue is WQ_PERCPU, the caller can enqueue a new work to another queue
* other than the waiting queue.
*/
void
workqueue_wait(struct workqueue *wq, struct work *wk)
{
struct workqueue_queue *q;
bool found;
ASSERT_SLEEPABLE();
SDT_PROBE2(sdt, kernel, workqueue, wait__start, wq, wk);
if (ISSET(wq->wq_flags, WQ_PERCPU)) {
struct cpu_info *ci;
CPU_INFO_ITERATOR cii;
for (CPU_INFO_FOREACH(cii, ci)) {
q = workqueue_queue_lookup(wq, ci);
found = workqueue_q_wait(wq, q, (work_impl_t *)wk);
if (found)
break;
}
} else {
q = workqueue_queue_lookup(wq, NULL);
(void)workqueue_q_wait(wq, q, (work_impl_t *)wk);
}
SDT_PROBE2(sdt, kernel, workqueue, wait__done, wq, wk);
}