/*
* Copyright (c) 2010, 2011 Antti Kantee. All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* Look up a live LWP within the specified process.
*
* Must be called with p->p_lock held.
*/
struct lwp *
lwp_find(struct proc *p, lwpid_t id)
{
struct lwp *l;
KASSERT(mutex_owned(p->p_lock));
LIST_FOREACH(l, &p->p_lwps, l_sibling) {
if (l->l_lid == id)
break;
}
/*
* No need to lock - all of these conditions will
* be visible with the process level mutex held.
*/
if (l != NULL && (l->l_stat == LSIDL || l->l_stat == LSZOMB))
l = NULL;
/* non-local vmspaces are not shared */
if (!RUMP_LOCALPROC_P(p)) {
struct rump_spctl *ctl = (struct rump_spctl *)p->p_vmspace;
KASSERT(p->p_vmspace->vm_refcnt == 1);
kmem_free(ctl, sizeof(*ctl));
}
proc_free_mem(p);
}
/*
* Allocate a new process. Mostly mimic fork by
* copying the properties of the parent. However, there are some
* differences.
*
* Switch to the new lwp and return a pointer to it.
*/
static struct proc *
lwproc_newproc(struct proc *parent, struct vmspace *vm, int flags)
{
uid_t uid = kauth_cred_getuid(parent->p_cred);
struct proc *p;
/* maxproc not enforced */
atomic_inc_uint(&nprocs);
mutex_enter(p->p_lock);
/*
* Account the new lwp to the owner of the process.
* For some reason, NetBSD doesn't count the first lwp
* in a process as a lwp, so skip that.
*/
if (p->p_nlwps++) {
chglwpcnt(kauth_cred_getuid(p->p_cred), 1);
}
/*
* Switch to a new process/thread. Release previous one if
* deemed to be exiting. This is considered a slow path for
* rump kernel entry.
*/
void
rump_lwproc_switch(struct lwp *newlwp)
{
struct lwp *l = curlwp;
int nlocks;
/*
* Check if the thread should get a signal. This is
* mostly to satisfy the "record" rump sigmodel.
*/
mutex_enter(newlwp->l_proc->p_lock);
if (sigispending(newlwp, 0)) {
newlwp->l_flag |= LW_PENDSIG;
}
mutex_exit(newlwp->l_proc->p_lock);
/*
* In-kernel routines used to add and remove references for the
* current thread. The main purpose is to make it possible for
* implicit threads to persist over scheduling operations in
* rump kernel drivers. Note that we don't need p_lock in a
* rump kernel, since we do refcounting only for curlwp.
*/
void
rump__lwproc_lwphold(void)
{
struct lwp *l = curlwp;
if (l->l_flag & LW_WEXIT)
return NULL;
return l;
}
/* this interface is under construction (like the proverbial 90's web page) */
int rump_i_know_what_i_am_doing_with_sysents = 0;
void
rump_lwproc_sysent_usenative()
{
if (!rump_i_know_what_i_am_doing_with_sysents)
panic("don't use rump_lwproc_sysent_usenative()");
curproc->p_emul = &emul_netbsd;
}