/* $NetBSD: if_sq.c,v 1.60 2024/07/05 04:31:50 rin Exp $ */
/*
* Copyright (c) 2001 Rafal K. Boni
* Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc.
* All rights reserved.
*
* Portions of this code are derived from software contributed to The
* NetBSD Foundation by Jason R. Thorpe of the Numerical Aerospace
* Simulation Facility, NASA Ames Research Center.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Short TODO list:
* (1) Do counters for bad-RX packets.
* (2) Allow multi-segment transmits, instead of copying to a single,
* contiguous mbuf.
* (3) Verify sq_stop() turns off enough stuff; I was still getting
* seeq interrupts after sq_stop().
* (4) Implement EDLC modes: especially packet auto-pad and simplex
* mode.
* (5) Should the driver filter out its own transmissions in non-EDLC
* mode?
* (6) Multicast support -- multicast filter, address management, ...
* (7) Deal with RB0 (recv buffer overflow) on reception. Will need
* to figure out if RB0 is read-only as stated in one spot in the
* HPC spec or read-write (ie, is the 'write a one to clear it')
* the correct thing?
*/
#if defined(SQ_DEBUG)
int sq_debug = 0;
#define SQ_DPRINTF(x) if (sq_debug) printf x
#else
#define SQ_DPRINTF(x)
#endif
/*
* If our mac address is bogus, obtain it from ARCBIOS. This will
* be true of the onboard HPC3 on IP22, since there is no eeprom,
* but rather the DS1386 RTC's battery-backed ram is used.
*/
if (sc->sc_enaddr[0] != SGI_OUI_0 ||
sc->sc_enaddr[1] != SGI_OUI_1 ||
sc->sc_enaddr[2] != SGI_OUI_2) {
macaddr = arcbios_GetEnvironmentVariable("eaddr");
if (macaddr == NULL) {
printf(": unable to get MAC address!\n");
goto fail_6;
}
ether_aton_r(sc->sc_enaddr, sizeof(sc->sc_enaddr), macaddr);
}
if ((cpu_intr_establish(haa->ha_irq, IPL_NET, sq_intr, sc)) == NULL) {
printf(": unable to establish interrupt!\n");
goto fail_6;
}
/* Reset the chip to a known state. */
sq_reset(sc);
/*
* Determine if we're an 8003 or 80c03 by setting the first
* MAC address register to non-zero, and then reading it back.
* If it's zero, we have an 80c03, because we will have read
* the TxCollLSB register.
*/
sq_seeq_write(sc, SEEQ_TXCOLLS0, 0xa5);
if (sq_seeq_read(sc, SEEQ_TXCOLLS0) == 0)
sc->sc_type = SQ_TYPE_80C03;
else
sc->sc_type = SQ_TYPE_8003;
sq_seeq_write(sc, SEEQ_TXCOLLS0, 0x00);
/*
* Free any resources we've allocated during the failed attach
* attempt. Do this in reverse order and fall through.
*/
fail_6:
for (i = 0; i < SQ_NRXDESC; i++) {
if (sc->sc_rxmbuf[i] != NULL) {
bus_dmamap_unload(sc->sc_dmat, sc->sc_rxmap[i]);
m_freem(sc->sc_rxmbuf[i]);
}
}
fail_5:
for (i = 0; i < SQ_NRXDESC; i++) {
if (sc->sc_rxmap[i] != NULL)
bus_dmamap_destroy(sc->sc_dmat, sc->sc_rxmap[i]);
}
fail_4:
for (i = 0; i < SQ_NTXDESC; i++) {
if (sc->sc_txmap[i] != NULL)
bus_dmamap_destroy(sc->sc_dmat, sc->sc_txmap[i]);
}
bus_dmamap_unload(sc->sc_dmat, sc->sc_cdmap);
fail_3:
bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdmap);
fail_2:
bus_dmamem_unmap(sc->sc_dmat,
(void *)sc->sc_control, sizeof(struct sq_control));
fail_1:
bus_dmamem_free(sc->sc_dmat, &sc->sc_cdseg, sc->sc_ncdseg);
fail_0:
return;
}
/* Set up data to get the interface up and running. */
int
sq_init(struct ifnet *ifp)
{
int i;
struct sq_softc *sc = ifp->if_softc;
/*
* Set the receive filter -- this will add some bits to the
* prototype RXCMD register. Do this before setting the
* transmit config register, since we might need to switch
* banks.
*/
sq_set_filter(sc);
/* Set up Seeq transmit command register */
sq_seeq_write(sc, SEEQ_TXCMD,
TXCMD_IE_UFLOW |
TXCMD_IE_COLL |
TXCMD_IE_16COLL |
TXCMD_IE_GOOD);
/* Now write the receive command register. */
sq_seeq_write(sc, SEEQ_RXCMD, sc->sc_rxcmd);
/*
* Set up HPC ethernet PIO and DMA configurations.
*
* The PROM appears to do most of this for the onboard HPC3, but
* not for the Challenge S's IOPLUS chip. We copy how the onboard
* chip is configured and assume that it's correct for both.
*/
if (sc->hpc_regs->revision == 3) {
uint32_t dmareg, pioreg;
/* Pass the start of the receive ring to the HPC */
sq_hpc_write(sc, sc->hpc_regs->enetr_ndbp, SQ_CDRXADDR(sc, 0));
/* And turn on the HPC ethernet receive channel */
sq_hpc_write(sc, sc->hpc_regs->enetr_ctl,
sc->hpc_regs->enetr_ctl_active);
/*
* Turn off delayed receive interrupts on HPC1.
* (see Hollywood HPC Specification 2.1.4.3)
*/
if (sc->hpc_regs->revision != 3)
sq_hpc_write(sc, HPC1_ENET_INTDELAY, HPC1_ENET_INTDELAY_OFF);
/*
* Check for promiscuous mode. Also implies
* all-multicast.
*/
if (ifp->if_flags & IFF_PROMISC) {
sc->sc_rxcmd |= RXCMD_REC_ALL;
ifp->if_flags |= IFF_ALLMULTI;
return;
}
/*
* The 8003 has no hash table. If we have any multicast
* addresses on the list, enable reception of all multicast
* frames.
*
* XXX The 80c03 has a hash table. We should use it.
*/
/*
* Remember the previous number of free descriptors and
* the first descriptor we'll use.
*/
ofree = sc->sc_nfreetx;
firsttx = sc->sc_nexttx;
/*
* Loop through the send queue, setting up transmit descriptors
* until we drain the queue, or use up all available transmit
* descriptors.
*/
while (sc->sc_nfreetx != 0) {
/*
* Grab a packet off the queue.
*/
IFQ_POLL(&ifp->if_snd, m0);
if (m0 == NULL)
break;
m = NULL;
dmamap = sc->sc_txmap[sc->sc_nexttx];
/*
* Load the DMA map. If this fails, the packet either
* didn't fit in the allotted number of segments, or we were
* short on resources. In this case, we'll copy and try
* again.
* Also copy it if we need to pad, so that we are sure there
* is room for the pad buffer.
* XXX the right way of doing this is to use a static buffer
* for padding and adding it to the transmit descriptor (see
* sys/dev/pci/if_tl.c for example). We can't do this here yet
* because we can't send packets with more than one fragment.
*/
if (m0->m_pkthdr.len < ETHER_PAD_LEN ||
bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
BUS_DMA_NOWAIT) != 0) {
MGETHDR(m, M_DONTWAIT, MT_DATA);
if (m == NULL) {
printf("%s: unable to allocate Tx mbuf\n",
device_xname(sc->sc_dev));
break;
}
if (m0->m_pkthdr.len > MHLEN) {
MCLGET(m, M_DONTWAIT);
if ((m->m_flags & M_EXT) == 0) {
printf("%s: unable to allocate Tx "
"cluster\n",
device_xname(sc->sc_dev));
m_freem(m);
break;
}
}
if ((err = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
m, BUS_DMA_NOWAIT)) != 0) {
printf("%s: unable to load Tx buffer, "
"error = %d\n",
device_xname(sc->sc_dev), err);
break;
}
}
/*
* Ensure we have enough descriptors free to describe
* the packet.
*/
if (dmamap->dm_nsegs > sc->sc_nfreetx) {
/*
* Not enough free descriptors to transmit this
* packet. We haven't committed to anything yet,
* so just unload the DMA map, put the packet
* back on the queue, and punt.
*
* XXX We could allocate an mbuf and copy, but
* XXX it is worth it?
*/
bus_dmamap_unload(sc->sc_dmat, dmamap);
m_freem(m);
break;
}
IFQ_DEQUEUE(&ifp->if_snd, m0);
/*
* Pass the packet to any BPF listeners.
*/
bpf_mtap(ifp, m0, BPF_D_OUT);
if (m != NULL) {
m_freem(m0);
m0 = m;
}
/*
* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
*/
if (sc->sc_nfreetx != ofree) {
SQ_DPRINTF(("%s: %d packets enqueued, first %d, INTR on %d\n",
device_xname(sc->sc_dev), lasttx - firsttx + 1,
firsttx, lasttx));
/*
* Cause a transmit interrupt to happen on the
* last packet we enqueued, mark it as the last
* descriptor.
*
* HPC1_HDD_CTL_INTR will generate an interrupt on
* HPC1. HPC3 requires HPC3_HDD_CTL_EOPACKET in
* addition to HPC3_HDD_CTL_INTR to interrupt.
*/
KASSERT(lasttx != -1);
if (sc->hpc_regs->revision == 3) {
sc->sc_txdesc[lasttx].hpc3_hdd_ctl |=
HPC3_HDD_CTL_INTR | HPC3_HDD_CTL_EOCHAIN;
} else {
sc->sc_txdesc[lasttx].hpc1_hdd_ctl |= HPC1_HDD_CTL_INTR;
sc->sc_txdesc[lasttx].hpc1_hdd_bufptr |=
HPC1_HDD_CTL_EOCHAIN;
}
/*
* There is a potential race condition here if the HPC
* DMA channel is active and we try and either update
* the 'next descriptor' pointer in the HPC PIO space
* or the 'next descriptor' pointer in a previous desc-
* riptor.
*
* To avoid this, if the channel is active, we rely on
* the transmit interrupt routine noticing that there
* are more packets to send and restarting the HPC DMA
* engine, rather than mucking with the DMA state here.
*/
status = sq_hpc_read(sc, sc->hpc_regs->enetx_ctl);
/* Kick DMA channel into life */
sq_hpc_write(sc, HPC3_ENETX_CTL, HPC3_ENETX_CTL_ACTIVE);
} else {
/*
* In the HPC1 case where transmit DMA is
* inactive, we can either kick off if
* the ring was previously empty, or call
* our transmit interrupt handler to
* figure out if the ring stopped short
* and restart at the right place.
*/
if (ofree == SQ_NTXDESC) {
SQ_TRACE(SQ_START_DMA, sc, firsttx, status);
/*
* If the interface isn't running, the interrupt couldn't
* possibly have come from us.
*/
if ((ifp->if_flags & IFF_RUNNING) == 0)
return 0;
sc->sq_intrcnt.ev_count++;
/* Always check for received packets */
if (sq_rxintr(sc) != 0)
handled++;
/* Only handle transmit interrupts if we actually sent something */
if (sc->sc_nfreetx < SQ_NTXDESC) {
sq_txintr(sc);
handled++;
}
if (handled)
rnd_add_uint32(&sc->rnd_source, stat);
return handled;
}
static int
sq_rxintr(struct sq_softc *sc)
{
int count = 0;
struct mbuf* m;
int i, framelen;
uint8_t pktstat;
uint32_t status;
uint32_t ctl_reg;
int new_end, orig_end;
struct ifnet *ifp = &sc->sc_ethercom.ec_if;
for (i = sc->sc_nextrx;; i = SQ_NEXTRX(i)) {
SQ_CDRXSYNC(sc, i,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
/*
* If this is a CPU-owned buffer, we're at the end of the list.
*/
if (sc->hpc_regs->revision == 3)
ctl_reg =
sc->sc_rxdesc[i].hpc3_hdd_ctl & HPC3_HDD_CTL_OWN;
else
ctl_reg =
sc->sc_rxdesc[i].hpc1_hdd_ctl & HPC1_HDD_CTL_OWN;
if (ctl_reg) {
#if defined(SQ_DEBUG)
uint32_t reg;
reg = sq_hpc_read(sc, sc->hpc_regs->enetr_ctl);
SQ_DPRINTF(("%s: rxintr: done at %d (ctl %08x)\n",
device_xname(sc->sc_dev), i, reg));
#endif
break;
}
count++;
m = sc->sc_rxmbuf[i];
framelen = m->m_ext.ext_size - 3;
if (sc->hpc_regs->revision == 3)
framelen -=
HPC3_HDD_CTL_BYTECNT(sc->sc_rxdesc[i].hpc3_hdd_ctl);
else
framelen -=
HPC1_HDD_CTL_BYTECNT(sc->sc_rxdesc[i].hpc1_hdd_ctl);
/* Now sync the actual packet data */
bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0,
sc->sc_rxmap[i]->dm_mapsize, BUS_DMASYNC_POSTREAD);
pktstat = *((uint8_t *)m->m_data + framelen + 2);
if ((pktstat & RXSTAT_GOOD) == 0) {
if_statinc(ifp, if_ierrors);
if (pktstat & RXSTAT_OFLOW)
printf("%s: receive FIFO overflow\n",
device_xname(sc->sc_dev));
SQ_DPRINTF(("%s: sq_rxintr: buf %d len %d\n",
device_xname(sc->sc_dev), i, framelen));
if_percpuq_enqueue(ifp->if_percpuq, m);
}
/* If anything happened, move ring start/end pointers to new spot */
if (i != sc->sc_nextrx) {
/*
* NB: hpc3_hdd_ctl == hpc1_hdd_bufptr, and
* HPC1_HDD_CTL_EOCHAIN == HPC3_HDD_CTL_EOCHAIN
*/
status = sq_hpc_read(sc, sc->hpc_regs->enetr_ctl);
/* If receive channel is stopped, restart it... */
if ((status & sc->hpc_regs->enetr_ctl_active) == 0) {
/* Pass the start of the receive ring to the HPC */
sq_hpc_write(sc, sc->hpc_regs->enetr_ndbp,
SQ_CDRXADDR(sc, sc->sc_nextrx));
/* And turn on the HPC ethernet receive channel */
sq_hpc_write(sc, sc->hpc_regs->enetr_ctl,
sc->hpc_regs->enetr_ctl_active);
}
return count;
}
static int
sq_txintr(struct sq_softc *sc)
{
int shift = 0;
uint32_t status, tmp;
struct ifnet *ifp = &sc->sc_ethercom.ec_if;
if (sc->hpc_regs->revision != 3)
shift = 16;
status = sq_hpc_read(sc, sc->hpc_regs->enetx_ctl) >> shift;
/*
* Reclaim used transmit descriptors and restart the transmit DMA
* engine if necessary.
*/
static void
sq_txring_hpc1(struct sq_softc *sc)
{
/*
* HPC1 doesn't tag transmitted descriptors, however,
* the NDBP register points to the next descriptor that
* has not yet been processed. If DMA is not in progress,
* we can safely reclaim all descriptors up to NDBP, and,
* if necessary, restart DMA at NDBP. Otherwise, if DMA
* is active, we can only safely reclaim up to CBP.
*
* For now, we'll only reclaim on inactive DMA and assume
* that a sufficiently large ring keeps us out of trouble.
*/
uint32_t reclaimto, status;
int reclaimall, i = sc->sc_prevtx;
struct ifnet *ifp = &sc->sc_ethercom.ec_if;
status = sq_hpc_read(sc, HPC1_ENETX_CTL);
if (status & HPC1_ENETX_CTL_ACTIVE) {
SQ_TRACE(SQ_TXINTR_BUSY, sc, i, status);
return;
} else
reclaimto = sq_hpc_read(sc, HPC1_ENETX_NDBP);
/* Kick DMA channel into life */
sq_hpc_write(sc, HPC1_ENETX_CTL, HPC1_ENETX_CTL_ACTIVE);
/*
* Set a watchdog timer in case the chip
* flakes out.
*/
ifp->if_timer = 5;
}
sc->sc_prevtx = i;
}
/*
* Reclaim used transmit descriptors and restart the transmit DMA
* engine if necessary.
*/
static void
sq_txring_hpc3(struct sq_softc *sc)
{
/*
* HPC3 tags descriptors with a bit once they've been
* transmitted. We need only free each XMITDONE'd
* descriptor, and restart the DMA engine if any
* descriptors are left over.
*/
int i;
uint32_t status = 0;
struct ifnet *ifp = &sc->sc_ethercom.ec_if;
i = sc->sc_prevtx;
while (sc->sc_nfreetx < SQ_NTXDESC) {
/*
* Check status first so we don't end up with a case of
* the buffer not being finished while the DMA channel
* has gone idle.
*/
status = sq_hpc_read(sc, HPC3_ENETX_CTL);
SQ_CDTXSYNC(sc, i, sc->sc_txmap[i]->dm_nsegs,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
/* Check for used descriptor and restart DMA chain if needed */
if ((sc->sc_txdesc[i].hpc3_hdd_ctl &
HPC3_HDD_CTL_XMITDONE) == 0) {
if ((status & HPC3_ENETX_CTL_ACTIVE) == 0) {
SQ_TRACE(SQ_RESTART_DMA, sc, i, status);