/* $NetBSD: octeon_intr.c,v 1.27 2022/04/09 23:34:40 riastradh Exp $ */
/*
* Copyright 2001, 2002 Wasabi Systems, Inc.
* All rights reserved.
*
* Written by Jason R. Thorpe and Simon Burge for Wasabi Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed for the NetBSD Project by
* Wasabi Systems, Inc.
* 4. The name of Wasabi Systems, Inc. may not be used to endorse
* or promote products derived from this software without specific prior
* written permission.
*
* THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Platform-specific interrupt support for the MIPS Malta.
*/
/*
* XXX:
* Force all interrupts (except clock intrs and IPIs) to be routed
* through cpu0 until MP on MIPS is more stable.
*/
#define OCTEON_CPU0_INTERRUPTS
/*
* This is a mask of bits to clear in the SR when we go to a
* given hardware interrupt priority level.
*/
static const struct ipl_sr_map octeon_ipl_sr_map = {
.sr_bits = {
[IPL_NONE] = 0,
[IPL_SOFTCLOCK] = MIPS_SOFT_INT_MASK_0,
[IPL_SOFTNET] = MIPS_SOFT_INT_MASK,
[IPL_VM] = MIPS_SOFT_INT_MASK | MIPS_INT_MASK_0,
[IPL_SCHED] = MIPS_SOFT_INT_MASK | MIPS_INT_MASK_0
| MIPS_INT_MASK_1 | MIPS_INT_MASK_5,
[IPL_DDB] = MIPS_SOFT_INT_MASK | MIPS_INT_MASK_0
| MIPS_INT_MASK_1 | MIPS_INT_MASK_5,
[IPL_HIGH] = MIPS_INT_MASK,
},
};
/*
* First, make it known.
*/
KASSERTMSG(octciu_intrs[irq] == NULL, "irq %d in use! (%p)",
irq, octciu_intrs[irq]);
atomic_store_release(&octciu_intrs[irq], ih);
/*
* Now enable it.
*/
const int bank = irq / 64;
const uint64_t irq_mask = __BIT(irq % 64);
switch (ipl) {
case IPL_VM:
cpu = &octeon_cpu_softc[0];
cpu->cpu_ip2_enable[bank] |= irq_mask;
mips3_sd(cpu->cpu_ip2_en[bank], cpu->cpu_ip2_enable[bank]);
break;
case IPL_SCHED:
#ifdef OCTEON_CPU0_INTERRUPTS
cpu = &octeon_cpu_softc[0];
cpu->cpu_ip3_enable[bank] |= irq_mask;
mips3_sd(cpu->cpu_ip3_en[bank], cpu->cpu_ip3_enable[bank]);
#else /* OCTEON_CPU0_INTERRUPTS */
for (cpunum = 0; cpunum < OCTEON_NCPU; cpunum++) {
cpu = &octeon_cpu_softc[cpunum];
if (cpu->cpu_ci == NULL)
break;
cpu->cpu_ip3_enable[bank] |= irq_mask;
mips3_sd(cpu->cpu_ip3_en[bank], cpu->cpu_ip3_enable[bank]);
}
#endif /* OCTEON_CPU0_INTERRUPTS */
break;
case IPL_DDB:
case IPL_HIGH:
#ifdef OCTEON_CPU0_INTERRUPTS
cpu = &octeon_cpu_softc[0];
cpu->cpu_ip4_enable[bank] |= irq_mask;
mips3_sd(cpu->cpu_ip4_en[bank], cpu->cpu_ip4_enable[bank]);
#else /* OCTEON_CPU0_INTERRUPTS */
for (cpunum = 0; cpunum < OCTEON_NCPU; cpunum++) {
cpu = &octeon_cpu_softc[cpunum];
if (cpu->cpu_ci == NULL)
break;
cpu->cpu_ip4_enable[bank] |= irq_mask;
mips3_sd(cpu->cpu_ip4_en[bank], cpu->cpu_ip4_enable[bank]);
}
#endif /* OCTEON_CPU0_INTERRUPTS */
break;
}
mutex_exit(&octeon_intr_lock);
return ih;
}
void
octeon_intr_disestablish(void *cookie)
{
struct octeon_intrhand * const ih = cookie;
struct cpu_softc *cpu;
const int irq = ih->ih_irq & (NIRQS-1);
const int ipl = ih->ih_ipl;
int cpunum;
mutex_enter(&octeon_intr_lock);
/*
* First disable it.
*/
const int bank = irq / 64;
const uint64_t irq_mask = ~__BIT(irq % 64);
switch (ipl) {
case IPL_VM:
cpu = &octeon_cpu_softc[0];
cpu->cpu_ip2_enable[bank] &= ~irq_mask;
mips3_sd(cpu->cpu_ip2_en[bank], cpu->cpu_ip2_enable[bank]);
break;
case IPL_SCHED:
for (cpunum = 0; cpunum < OCTEON_NCPU; cpunum++) {
cpu = &octeon_cpu_softc[cpunum];
if (cpu->cpu_ci == NULL)
break;
cpu->cpu_ip3_enable[bank] &= ~irq_mask;
mips3_sd(cpu->cpu_ip3_en[bank], cpu->cpu_ip3_enable[bank]);
}
break;
case IPL_DDB:
case IPL_HIGH:
for (cpunum = 0; cpunum < OCTEON_NCPU; cpunum++) {
cpu = &octeon_cpu_softc[cpunum];
if (cpu->cpu_ci == NULL)
break;
cpu->cpu_ip4_enable[bank] &= ~irq_mask;
mips3_sd(cpu->cpu_ip4_en[bank], cpu->cpu_ip4_enable[bank]);
}
break;
}
atomic_store_relaxed(&octciu_intrs[irq], NULL);
mutex_exit(&octeon_intr_lock);
/*
* Wait until the interrupt handler is no longer running on all
* CPUs before freeing ih and returning.
*/
xc_barrier(0);
kmem_free(ih, sizeof(*ih));
}
void
octeon_iointr(int ipl, vaddr_t pc, uint32_t ipending)
{
struct cpu_info * const ci = curcpu();
struct cpu_softc * const cpu = ci->ci_softc;
int bank;
/* if the request is clear, it was previously processed */
if ((atomic_load_relaxed(&ci->ci_request_ipis) & ipi_mask) == 0)
return 0;
membar_acquire();