/* $NetBSD: threads.c,v 1.28 2023/10/04 21:56:15 ad Exp $ */
/*
* Copyright (c) 2007-2009 Antti Kantee. All Rights Reserved.
*
* Development of this software was supported by
* The Finnish Cultural Foundation.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/* don't allow threads to run before all CPUs have fully attached */
if (!threads_are_go) {
rumpuser_mutex_enter_nowrap(thrmtx);
while (!threads_are_go) {
rumpuser_cv_wait_nowrap(thrcv, thrmtx);
}
rumpuser_mutex_exit(thrmtx);
}
/*
* We don't want a module unload thread.
* (XXX: yes, this is a kludge too, and the kernel should
* have a more flexible method for configuring which threads
* we want).
*/
if (strcmp(thrstore, "modunload") == 0) {
return 0;
}
if (!rump_threads) {
bool matched;
int i;
/* do we want to fake it? */
for (i = 0; i < __arraycount(nothreads); i++) {
if (nothreads[i].t_ncmp) {
matched = strncmp(thrstore, nothreads[i].t_name,
strlen(nothreads[i].t_name)) == 0;
} else {
matched = strcmp(thrstore,
nothreads[i].t_name) == 0;
}
if (matched) {
aprint_error("rump kernel threads not enabled, "
"%s not functional\n", nothreads[i].t_name);
return 0;
}
}
panic("threads not available");
}
KASSERT(fmt != NULL);
/*
* Allocate with intr-safe allocator, give that we may be
* creating interrupt threads.
*/
td = kmem_intr_alloc(sizeof(*td), KM_SLEEP);
td->f = func;
td->arg = arg;
td->newlwp = l = rump__lwproc_alloclwp(&proc0);
l->l_flag |= LW_SYSTEM;
if (flags & KTHREAD_MPSAFE)
l->l_pflag |= LP_MPSAFE;
if (flags & KTHREAD_INTR)
l->l_pflag |= LP_INTR;
if (ci) {
l->l_pflag |= LP_BOUND;
l->l_target_cpu = ci;
}
if (thrname) {
l->l_name = kmem_alloc(MAXCOMLEN, KM_SLEEP);
strlcpy(l->l_name, thrname, MAXCOMLEN);
}
KASSERT(s == (s & LW_SYSTEM_FPU));
KASSERTMSG(l->l_flag & LW_SYSTEM,
"%s is allowed only in kthreads", __func__);
KASSERT(l->l_flag & LW_SYSTEM_FPU);
l->l_flag ^= s ^ LW_SYSTEM_FPU;
}
/*
* Create a non-kernel thread that is scheduled by a rump kernel hypercall.
*
* Sounds strange and out-of-place? yup yup yup. the original motivation
* for this was aio. This is a very infrequent code path in rump kernels.
* XXX: threads created with lwp_create() are eternal for local clients.
* however, they are correctly reaped for remote clients with process exit.
*/
static void *
lwpbouncer(void *arg)
{
struct thrdesc *td = arg;
struct lwp *l = td->newlwp;
void (*f)(void *);
void *thrarg;
int run;
f = td->f;
thrarg = td->arg;
/* do not run until we've been enqueued */
rumpuser_mutex_enter_nowrap(thrmtx);
while ((run = td->runnable) == 0) {
rumpuser_cv_wait_nowrap(thrcv, thrmtx);
}
rumpuser_mutex_exit(thrmtx);