/* $NetBSD: locks.c,v 1.88 2023/11/02 10:31:55 martin Exp $ */
/*
* Copyright (c) 2007-2011 Antti Kantee. All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifdef LOCKDEBUG
const int rump_lockdebug = 1;
#else
const int rump_lockdebug = 0;
#endif
/*
* Simple lockdebug. If it's compiled in, it's always active.
* Currently available only for mtx/rwlock.
*/
#ifdef LOCKDEBUG
#include <sys/lockdebug.h>
#define ALLOCK(lock, ops, return_address) \
lockdebug_alloc(__func__, __LINE__, lock, ops, \
return_address)
#define FREELOCK(lock) \
lockdebug_free(__func__, __LINE__, lock)
#define WANTLOCK(lock, shar) \
lockdebug_wantlock(__func__, __LINE__, lock, \
(uintptr_t)__builtin_return_address(0), shar)
#define LOCKED(lock, shar) \
lockdebug_locked(__func__, __LINE__, lock, NULL,\
(uintptr_t)__builtin_return_address(0), shar)
#define UNLOCKED(lock, shar) \
lockdebug_unlocked(__func__, __LINE__, lock, \
(uintptr_t)__builtin_return_address(0), shar)
#define BARRIER(lock, slp) \
lockdebug_barrier(__func__, __LINE__, lock, slp)
#else
#define ALLOCK(a, b, c) do {} while (0)
#define FREELOCK(a) do {} while (0)
#define WANTLOCK(a, b) do {} while (0)
#define LOCKED(a, b) do {} while (0)
#define UNLOCKED(a, b) do {} while (0)
#define BARRIER(a, b) do {} while (0)
#endif
/*
* We map locks to pthread routines. The difference between kernel
* and rumpuser routines is that while the kernel uses static
* storage, rumpuser allocates the object from the heap. This
* indirection is necessary because we don't know the size of
* pthread objects here. It is also beneficial, since we can
* be easily compatible with the kernel ABI because all kernel
* objects regardless of machine architecture are always at least
* the size of a pointer. The downside, of course, is a performance
* penalty.
*/
void
_mutex_init(kmutex_t *mtx, kmutex_type_t type, int ipl,
uintptr_t return_address)
{
int ruflags = RUMPUSER_MTX_KMUTEX;
int isspin;
CTASSERT(sizeof(kmutex_t) >= sizeof(void *));
/*
* Try to figure out if the caller wanted a spin mutex or
* not with this easy set of conditionals. The difference
* between a spin mutex and an adaptive mutex for a rump
* kernel is that the hypervisor does not relinquish the
* rump kernel CPU context for a spin mutex. The
* hypervisor itself may block even when "spinning".
*/
if (type == MUTEX_SPIN) {
isspin = 1;
} else if (ipl == IPL_NONE || ipl == IPL_SOFTCLOCK ||
ipl == IPL_SOFTBIO || ipl == IPL_SOFTNET ||
ipl == IPL_SOFTSERIAL) {
isspin = 0;
} else {
isspin = 1;
}
if (isspin)
ruflags |= RUMPUSER_MTX_SPIN;
rumpuser_mutex_init((struct rumpuser_mtx **)mtx, ruflags);
if (isspin)
ALLOCK(mtx, &mutex_spin_lockops, return_address);
else
ALLOCK(mtx, &mutex_adaptive_lockops, return_address);
}
void
mutex_init(kmutex_t *mtx, kmutex_type_t type, int ipl)
{
static int
docvwait(kcondvar_t *cv, kmutex_t *mtx, struct timespec *ts)
{
struct lwp *l = curlwp;
int rv;
if (__predict_false(l->l_flag & LW_RUMP_QEXIT)) {
/*
* yield() here, someone might want the cpu
* to set a condition. otherwise we'll just
* loop forever.
*/
yield();
return EINTR;
}
/*
* Check for QEXIT. if so, we need to wait here until we
* are allowed to exit.
*/
if (__predict_false(l->l_flag & LW_RUMP_QEXIT)) {
struct proc *p = l->l_proc;