/*-
* Copyright (c) 1999, 2019 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
* NASA Ames Research Center; by Matthias Scheler.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Linux compatibility module. Try to deal with scheduler related syscalls.
*/
/*
* We don't support the Linux CLONE_PID or CLONE_PTRACE flags.
*/
if (SCARG(uap, flags) & (LINUX_CLONE_PID|LINUX_CLONE_PTRACE))
return EINVAL;
/*
* Thread group implies shared signals. Shared signals
* imply shared VM. This matches what Linux kernel does.
*/
if (SCARG(uap, flags) & LINUX_CLONE_THREAD
&& (SCARG(uap, flags) & LINUX_CLONE_SIGHAND) == 0)
return EINVAL;
if (SCARG(uap, flags) & LINUX_CLONE_SIGHAND
&& (SCARG(uap, flags) & LINUX_CLONE_VM) == 0)
return EINVAL;
/*
* The thread group flavor is implemented totally differently.
*/
if (SCARG(uap, flags) & LINUX_CLONE_THREAD)
return linux_clone_nptl(l, uap, retval);
flags = 0;
if (SCARG(uap, flags) & LINUX_CLONE_VM)
flags |= FORK_SHAREVM;
if (SCARG(uap, flags) & LINUX_CLONE_FS)
flags |= FORK_SHARECWD;
if (SCARG(uap, flags) & LINUX_CLONE_FILES)
flags |= FORK_SHAREFILES;
if (SCARG(uap, flags) & LINUX_CLONE_SIGHAND)
flags |= FORK_SHARESIGS;
if (SCARG(uap, flags) & LINUX_CLONE_VFORK)
flags |= FORK_PPWAIT;
sig = SCARG(uap, flags) & LINUX_CLONE_CSIGNAL;
if (sig < 0 || sig >= LINUX__NSIG)
return EINVAL;
sig = linux_to_native_signo[sig];
if (SCARG(uap, flags) & LINUX_CLONE_CHILD_SETTID) {
led = l->l_emuldata;
led->led_child_tidptr = SCARG(uap, child_tidptr);
}
/*
* Note that Linux does not provide a portable way of specifying
* the stack area; the caller must know if the stack grows up
* or down. So, we pass a stack size of 0, so that the code
* that makes this adjustment is a noop.
*/
if ((error = fork1(l, flags, sig, SCARG(uap, stack), 0,
linux_child_return, NULL, retval)) != 0) {
DPRINTF("%s: fork1: error %d\n", __func__, error);
return error;
}
return 0;
}
int
linux_sys_clone3(struct lwp *l, const struct linux_sys_clone3_args *uap, register_t *retval)
{
struct linux_user_clone3_args cl_args;
struct linux_sys_clone_args clone_args;
int error;
if (SCARG(uap, size) != sizeof(cl_args)) {
DPRINTF("%s: Invalid size less or more\n", __func__);
return EINVAL;
}
#if 0
// XXX: this is wrong, exit_signal is the signal to deliver to the
// process upon exit.
if ((cl_args.exit_signal & ~(uint64_t)LINUX_CLONE_CSIGNAL) != 0){
DPRINTF("%s: Disallowed flags for clone3: %#x\n", __func__,
cl_args.exit_signal & ~(uint64_t)LINUX_CLONE_CSIGNAL);
return EINVAL;
}
#endif
if (cl_args.stack == 0 && cl_args.stack_size != 0) {
DPRINTF("%s: Stack is NULL but stack size is not 0\n",
__func__);
return EINVAL;
}
if (cl_args.stack != 0 && cl_args.stack_size == 0) {
DPRINTF("%s: Stack is not NULL but stack size is 0\n",
__func__);
return EINVAL;
}
int flags = cl_args.flags & LINUX_CLONE_ALLOWED_FLAGS;
#if 0
int sig = cl_args.exit_signal & LINUX_CLONE_CSIGNAL;
#endif
// XXX: Pidfd member handling
// XXX: we don't have cgroups
// XXX: what to do with tid_set and tid_set_size
// XXX: clone3 has stacksize, instead implement clone as a clone3
// wrapper.
SCARG(&clone_args, flags) = flags;
SCARG(&clone_args, stack) = (void *)(uintptr_t)cl_args.stack;
SCARG(&clone_args, parent_tidptr) =
(void *)(intptr_t)cl_args.parent_tid;
SCARG(&clone_args, tls) =
(void *)(intptr_t)cl_args.tls;
SCARG(&clone_args, child_tidptr) =
(void *)(intptr_t)cl_args.child_tid;
/* LINUX_CLONE_CHILD_CLEARTID: clear TID in child's memory on exit() */
if (flags & LINUX_CLONE_CHILD_CLEARTID) {
led = l2->l_emuldata;
led->led_clear_tid = child_tidptr;
}
/* LINUX_CLONE_PARENT_SETTID: store child's TID in parent's memory */
if (flags & LINUX_CLONE_PARENT_SETTID) {
if ((error = copyout(&lid, parent_tidptr, sizeof(lid))) != 0)
printf("%s: LINUX_CLONE_PARENT_SETTID "
"failed (parent_tidptr = %p tid = %d error=%d)\n",
__func__, parent_tidptr, lid, error);
}
/* LINUX_CLONE_CHILD_SETTID: store child's TID in child's memory */
if (flags & LINUX_CLONE_CHILD_SETTID) {
if ((error = copyout(&lid, child_tidptr, sizeof(lid))) != 0)
printf("%s: LINUX_CLONE_CHILD_SETTID "
"failed (child_tidptr = %p, tid = %d error=%d)\n",
__func__, child_tidptr, lid, error);
}
error = copyin(SCARG(uap, sp), &lp, sizeof(lp));
if (error)
goto out;
/* We need the current policy in Linux terms. */
error = do_sched_getparam(SCARG(uap, pid), 0, &policy, NULL);
if (error)
goto out;
error = sched_native2linux(policy, NULL, &policy, NULL);
if (error)
goto out;
error = sched_linux2native(policy, &lp, &policy, &sp);
if (error)
goto out;
/*
* The affinity syscalls assume that the layout of our cpu kcpuset is
* the same as linux's: a linear bitmask.
*/
int
linux_sys_sched_getaffinity(struct lwp *l, const struct linux_sys_sched_getaffinity_args *uap, register_t *retval)
{
/* {
syscallarg(linux_pid_t) pid;
syscallarg(unsigned int) len;
syscallarg(unsigned long *) mask;
} */
struct proc *p;
struct lwp *t;
kcpuset_t *kcset;
size_t size;
cpuid_t i;
int error;
size = LINUX_CPU_MASK_SIZE;
if (SCARG(uap, len) < size)
return EINVAL;
if (SCARG(uap, pid) == 0) {
p = curproc;
mutex_enter(p->p_lock);
t = curlwp;
} else {
t = lwp_find2(-1, SCARG(uap, pid));
if (__predict_false(t == NULL)) {
return ESRCH;
}
p = t->l_proc;
KASSERT(mutex_owned(p->p_lock));
}
/* Check the permission */
if (kauth_authorize_process(l->l_cred,
KAUTH_PROCESS_SCHEDULER_GETAFFINITY, p, NULL, NULL, NULL)) {
mutex_exit(p->p_lock);
return EPERM;
}
kcpuset_create(&kcset, true);
lwp_lock(t);
if (t->l_affinity != NULL)
kcpuset_copy(kcset, t->l_affinity);
else {
/*
* All available CPUs should be masked when affinity has not
* been set.
*/
kcpuset_zero(kcset);
for (i = 0; i < ncpu; i++)
kcpuset_set(kcset, i);
}
lwp_unlock(t);
mutex_exit(p->p_lock);
error = kcpuset_copyout(kcset, (cpuset_t *)SCARG(uap, mask), size);
kcpuset_unuse(kcset, NULL);
*retval = size;
return error;
}