/*-
* Copyright (c) 2014 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Mindaugas Rasiukevicius.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Inter-processor interrupt (IPI) interface: asynchronous IPIs to
* invoke functions with a constant argument and synchronous IPIs
* with the cross-call support.
*/
/*
* Per-CPU mailbox for IPI messages: it is a single cache line storing
* up to IPI_MSG_MAX messages. This interface is built on top of the
* synchronous IPIs.
*/
/*
* Register the handler for synchronous IPIs. This mechanism
* is built on top of the asynchronous interface. Slot zero is
* reserved permanently; it is also handy to use zero as a failure
* for other registers (as it is potentially less error-prone).
*/
ipi_intrs[IPI_SYNCH_ID].func = ipi_msg_cpu_handler;
/* Ensure that there are no IPIs in flight. */
kpreempt_disable();
ipi_broadcast(&ipimsg, false);
ipi_wait(&ipimsg);
kpreempt_enable();
mutex_exit(&ipi_mngmt_lock);
}
/*
* ipi_mark_pending: internal routine to mark an IPI pending on the
* specified CPU (which might be curcpu()).
*/
static bool
ipi_mark_pending(u_int ipi_id, struct cpu_info *ci)
{
const u_int i = ipi_id >> IPI_BITW_SHIFT;
const uint32_t bitm = 1U << (ipi_id & IPI_BITW_MASK);
/* Mark as pending and return true if not previously marked. */
if ((atomic_load_acquire(&ci->ci_ipipend[i]) & bitm) == 0) {
membar_release();
atomic_or_32(&ci->ci_ipipend[i], bitm);
return true;
}
return false;
}
/*
* ipi_trigger: asynchronously send an IPI to the specified CPU.
*/
void
ipi_trigger(u_int ipi_id, struct cpu_info *ci)
{
KASSERT(curcpu() != ci);
if (ipi_mark_pending(ipi_id, ci)) {
cpu_ipi(ci);
}
}
for (CPU_INFO_FOREACH(cii, ci)) {
const cpuid_t cpuid = cpu_index(ci);
if (!kcpuset_isset(target, cpuid) || cpuid == selfid) {
continue;
}
ipi_trigger(ipi_id, ci);
}
if (!skip_self && kcpuset_isset(target, selfid)) {
ipi_mark_pending(ipi_id, curcpu());
int s = splhigh();
ipi_cpu_handler();
splx(s);
}
}
/*
* ipi_trigger_multi: same as ipi_trigger() but sends to the multiple
* CPUs given the target CPU set.
*/
void
ipi_trigger_multi(u_int ipi_id, const kcpuset_t *target)
{
ipi_trigger_multi_internal(ipi_id, target, false);
}
/*
* ipi_trigger_broadcast: same as ipi_trigger_multi() to kcpuset_attached,
* optionally skipping the sending CPU.
*/
void
ipi_trigger_broadcast(u_int ipi_id, bool skip_self)
{
ipi_trigger_multi_internal(ipi_id, kcpuset_attached, skip_self);
}
/*
* put_msg: insert message into the mailbox.
*
* Caller is responsible for issuing membar_release first.
*/
static inline void
put_msg(ipi_mbox_t *mbox, ipi_msg_t *msg)
{
int count = SPINLOCK_BACKOFF_MIN;
again:
for (u_int i = 0; i < IPI_MSG_MAX; i++) {
if (atomic_cas_ptr(&mbox->msg[i], NULL, msg) == NULL) {
return;
}
}
/* All slots are full: we have to spin-wait. */
ipi_mboxfull_ev.ev_count++;
SPINLOCK_BACKOFF(count);
goto again;
}
/*
* ipi_cpu_handler: the IPI handler.
*/
void
ipi_cpu_handler(void)
{
struct cpu_info * const ci = curcpu();
/*
* Handle asynchronous IPIs: inspect per-CPU bit field, extract
* IPI ID numbers and execute functions in those slots.
*/
for (u_int i = 0; i < IPI_BITWORDS; i++) {
uint32_t pending, bit;
/*
* ipi_msg_cpu_handler: handle synchronous IPIs - iterate mailbox,
* execute the passed functions and acknowledge the messages.
*/
static void
ipi_msg_cpu_handler(void *arg __unused)
{
const struct cpu_info * const ci = curcpu();
ipi_mbox_t *mbox = &ipi_mboxes[cpu_index(ci)];
for (u_int i = 0; i < IPI_MSG_MAX; i++) {
ipi_msg_t *msg;
/* Get the message. */
if ((msg = atomic_load_acquire(&mbox->msg[i])) == NULL) {
continue;
}
atomic_store_relaxed(&mbox->msg[i], NULL);
/* Execute the handler. */
KASSERT(msg->func);
msg->func(msg->arg);
/* Ack the request. */
membar_release();
atomic_dec_uint(&msg->_pending);
}
}
/*
* ipi_unicast: send an IPI to a single CPU.
*
* => The CPU must be remote; must not be local.
* => The caller must ipi_wait() on the message for completion.
*/
void
ipi_unicast(ipi_msg_t *msg, struct cpu_info *ci)
{
const cpuid_t id = cpu_index(ci);
/*
* ipi_multicast: send an IPI to each CPU in the specified set.
*
* => The caller must ipi_wait() on the message for completion.
*/
void
ipi_multicast(ipi_msg_t *msg, const kcpuset_t *target)
{
const struct cpu_info * const self = curcpu();
CPU_INFO_ITERATOR cii;
struct cpu_info *ci;
u_int local;