/*
* Copyright 2001, 2002 Wasabi Systems, Inc.
* All rights reserved.
*
* Written by Simon Burge and Jason Thorpe for Wasabi Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed for the NetBSD Project by
* Wasabi Systems, Inc.
* 4. The name of Wasabi Systems, Inc. may not be used to endorse
* or promote products derived from this software without specific prior
* written permission.
*
* THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* emac(4) supports following ibm4xx's EMACs.
* XXXX: ZMII and 'TCP Accelaration Hardware' not support yet...
*
* tested
* ------
* 405EP - 10/100 x2
* 405EX/EXr o 10/100/1000 x2 (EXr x1), STA v2, 256bit hash-Table, RGMII
* 405GP/GPr o 10/100
* 440EP - 10/100 x2, ZMII
* 440GP - 10/100 x2, ZMII
* 440GX - 10/100/1000 x4, ZMII/RGMII(ch 2, 3), TAH(ch 2, 3)
* 440SP - 10/100/1000
* 440SPe - 10/100/1000, STA v2
*/
/*
* Transmit descriptor list size. There are two Tx channels, each with
* up to 256 hardware descriptors available. We currently use one Tx
* channel. We tell the upper layers that they can queue a lot of
* packets, and we go ahead and manage up to 64 of them at a time. We
* allow up to 16 DMA segments per packet.
*/
#define EMAC_NTXSEGS 16
#define EMAC_TXQUEUELEN 64
#define EMAC_TXQUEUELEN_MASK (EMAC_TXQUEUELEN - 1)
#define EMAC_TXQUEUE_GC (EMAC_TXQUEUELEN / 4)
#define EMAC_NTXDESC 256
#define EMAC_NTXDESC_MASK (EMAC_NTXDESC - 1)
#define EMAC_NEXTTX(x) (((x) + 1) & EMAC_NTXDESC_MASK)
#define EMAC_NEXTTXS(x) (((x) + 1) & EMAC_TXQUEUELEN_MASK)
/*
* Receive descriptor list size. There is one Rx channel with up to 256
* hardware descriptors available. We allocate 64 receive descriptors,
* each with a 2k buffer (MCLBYTES).
*/
#define EMAC_NRXDESC 64
#define EMAC_NRXDESC_MASK (EMAC_NRXDESC - 1)
#define EMAC_NEXTRX(x) (((x) + 1) & EMAC_NRXDESC_MASK)
#define EMAC_PREVRX(x) (((x) - 1) & EMAC_NRXDESC_MASK)
/*
* Transmit/receive descriptors that are DMA'd to the EMAC.
*/
struct emac_control_data {
struct mal_descriptor ecd_txdesc[EMAC_NTXDESC];
struct mal_descriptor ecd_rxdesc[EMAC_NRXDESC];
};
/*
* Software state for transmit jobs.
*/
struct emac_txsoft {
struct mbuf *txs_mbuf; /* head of mbuf chain */
bus_dmamap_t txs_dmamap; /* our DMA map */
int txs_firstdesc; /* first descriptor in packet */
int txs_lastdesc; /* last descriptor in packet */
int txs_ndesc; /* # of descriptors used */
};
/*
* Software state for receive descriptors.
*/
struct emac_rxsoft {
struct mbuf *rxs_mbuf; /* head of mbuf chain */
bus_dmamap_t rxs_dmamap; /* our DMA map */
};
/*
* Software state per device.
*/
struct emac_softc {
device_t sc_dev; /* generic device information */
int sc_instance; /* instance no. */
bus_space_tag_t sc_st; /* bus space tag */
bus_space_handle_t sc_sh; /* bus space handle */
bus_dma_tag_t sc_dmat; /* bus DMA tag */
struct ethercom sc_ethercom; /* ethernet common data */
void *sc_sdhook; /* shutdown hook */
void *sc_powerhook; /* power management hook */
/*
* Allocate the control data structures, and create and load the
* DMA map for it.
*/
if ((error = bus_dmamem_alloc(sc->sc_dmat,
sizeof(struct emac_control_data), 0, 0, &seg, 1, &nseg, 0)) != 0) {
aprint_error_dev(self,
"unable to allocate control data, error = %d\n", error);
goto fail_0;
}
if ((error = bus_dmamem_map(sc->sc_dmat, &seg, nseg,
sizeof(struct emac_control_data), (void **)&sc->sc_control_data,
BUS_DMA_COHERENT)) != 0) {
aprint_error_dev(self,
"unable to map control data, error = %d\n", error);
goto fail_1;
}
if ((error = bus_dmamap_create(sc->sc_dmat,
sizeof(struct emac_control_data), 1,
sizeof(struct emac_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
aprint_error_dev(self,
"unable to create control data DMA map, error = %d\n",
error);
goto fail_2;
}
if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
sc->sc_control_data, sizeof(struct emac_control_data), NULL,
0)) != 0) {
aprint_error_dev(self,
"unable to load control data DMA map, error = %d\n", error);
goto fail_3;
}
/*
* Create the transmit buffer DMA maps.
*/
for (i = 0; i < EMAC_TXQUEUELEN; i++) {
if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
EMAC_NTXSEGS, MCLBYTES, 0, 0,
&sc->sc_txsoft[i].txs_dmamap)) != 0) {
aprint_error_dev(self,
"unable to create tx DMA map %d, error = %d\n",
i, error);
goto fail_4;
}
}
/*
* Create the receive buffer DMA maps.
*/
for (i = 0; i < EMAC_NRXDESC; i++) {
if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
aprint_error_dev(self,
"unable to create rx DMA map %d, error = %d\n",
i, error);
goto fail_5;
}
sc->sc_rxsoft[i].rxs_mbuf = NULL;
}
/* Soft Reset the EMAC. The chip to a known state. */
emac_soft_reset(sc);
opb_freq = opb_get_frequency();
switch (opb_freq) {
case 33333333: opbc = STACR_OPBC_33MHZ; break;
case 50000000: opbc = STACR_OPBC_50MHZ; break;
case 66666666: opbc = STACR_OPBC_66MHZ; break;
case 83333333: opbc = STACR_OPBC_83MHZ; break;
case 100000000: opbc = STACR_OPBC_100MHZ; break;
/*
* Make sure the interface is shutdown during reboot.
*/
sc->sc_sdhook = shutdownhook_establish(emac_shutdown, sc);
if (sc->sc_sdhook == NULL)
aprint_error_dev(self,
"WARNING: unable to establish shutdown hook\n");
return;
/*
* Free any resources we've allocated during the failed attach
* attempt. Do this in reverse order and fall through.
*/
fail_5:
for (i = 0; i < EMAC_NRXDESC; i++) {
if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
bus_dmamap_destroy(sc->sc_dmat,
sc->sc_rxsoft[i].rxs_dmamap);
}
fail_4:
for (i = 0; i < EMAC_TXQUEUELEN; i++) {
if (sc->sc_txsoft[i].txs_dmamap != NULL)
bus_dmamap_destroy(sc->sc_dmat,
sc->sc_txsoft[i].txs_dmamap);
}
bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
fail_3:
bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
fail_2:
bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
sizeof(struct emac_control_data));
fail_1:
bus_dmamem_free(sc->sc_dmat, &seg, nseg);
fail_0:
return;
}
/*
* EMAC General interrupt handler
*/
static int
emac_intr(void *arg)
{
struct emac_softc *sc = arg;
uint32_t status;
EMAC_EVCNT_INCR(&sc->sc_ev_intr);
status = EMAC_READ(sc, EMAC_ISR);
/* Clear the interrupt status bits. */
EMAC_WRITE(sc, EMAC_ISR, status);
/*
* Remember the previous number of free descriptors.
*/
ofree = sc->sc_txfree;
/*
* Loop through the send queue, setting up transmit descriptors
* until we drain the queue, or use up all available transmit
* descriptors.
*/
for (;;) {
/* Grab a packet off the queue. */
IFQ_POLL(&ifp->if_snd, m0);
if (m0 == NULL)
break;
/*
* Get a work queue entry. Reclaim used Tx descriptors if
* we are running low.
*/
if (sc->sc_txsfree < EMAC_TXQUEUE_GC) {
emac_txreap(sc);
if (sc->sc_txsfree == 0) {
EMAC_EVCNT_INCR(&sc->sc_ev_txsstall);
break;
}
}
/*
* Load the DMA map. If this fails, the packet either
* didn't fit in the allotted number of segments, or we
* were short on resources. In this case, we'll copy
* and try again.
*/
error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
BUS_DMA_WRITE | BUS_DMA_NOWAIT);
if (error) {
if (error == EFBIG) {
EMAC_EVCNT_INCR(&sc->sc_ev_txdrop);
aprint_error_ifnet(ifp,
"Tx packet consumes too many "
"DMA segments, dropping...\n");
IFQ_DEQUEUE(&ifp->if_snd, m0);
m_freem(m0);
continue;
}
/* Short on resources, just stop for now. */
break;
}
/*
* Ensure we have enough descriptors free to describe
* the packet.
*/
if (dmamap->dm_nsegs > sc->sc_txfree) {
/*
* Not enough free descriptors to transmit this
* packet. We haven't committed anything yet,
* so just unload the DMA map, put the packet
* back on the queue, and punt. Notify the upper
* layer that there are not more slots left.
*
*/
bus_dmamap_unload(sc->sc_dmat, dmamap);
EMAC_EVCNT_INCR(&sc->sc_ev_txdstall);
break;
}
IFQ_DEQUEUE(&ifp->if_snd, m0);
/*
* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
*/
/*
* Store a pointer to the packet so that we can free it
* later.
*/
txs->txs_mbuf = m0;
txs->txs_firstdesc = sc->sc_txnext;
txs->txs_ndesc = dmamap->dm_nsegs;
/*
* Initialize the transmit descriptor.
*/
firsttx = sc->sc_txnext;
for (nexttx = sc->sc_txnext, seg = 0;
seg < dmamap->dm_nsegs;
seg++, nexttx = EMAC_NEXTTX(nexttx)) {
struct mal_descriptor *txdesc =
&sc->sc_txdescs[nexttx];
/*
* If this is the first descriptor we're
* enqueueing, don't set the TX_READY bit just
* yet. That could cause a race condition.
* We'll do it below.
*/
txdesc->md_data = dmamap->dm_segs[seg].ds_addr;
txdesc->md_data_len = dmamap->dm_segs[seg].ds_len;
txdesc->md_stat_ctrl =
(txdesc->md_stat_ctrl & MAL_TX_WRAP) |
(nexttx == firsttx ? 0 : MAL_TX_READY) |
EMAC_TXC_GFCS | EMAC_TXC_GPAD;
lasttx = nexttx;
}
/* Set the LAST bit on the last segment. */
sc->sc_txdescs[lasttx].md_stat_ctrl |= MAL_TX_LAST;
/*
* Set up last segment descriptor to send an interrupt after
* that descriptor is transmitted, and bypass existing Tx
* descriptor reaping method (for now...).
*/
sc->sc_txdescs[lasttx].md_stat_ctrl |= MAL_TX_INTERRUPT;
/*
* The entire packet chain is set up. Give the
* first descriptor to the chip now.
*/
sc->sc_txdescs[firsttx].md_stat_ctrl |= MAL_TX_READY;
EMAC_CDTXSYNC(sc, firsttx, 1,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
/*
* Tell the EMAC that a new packet is available.
*/
EMAC_WRITE(sc, EMAC_TMR0, TMR0_GNP0 | TMR0_TFAE_2);
/* Enable the transmit and receive channel on the MAL. */
error = mal_start(sc->sc_instance,
EMAC_CDTXADDR(sc, 0), EMAC_CDRXADDR(sc, 0));
if (error)
goto out;
sc->sc_mr1 &= ~MR1_JPSM;
if (ifp->if_mtu > ETHERMTU)
/* Enable Jumbo Packet Support Mode */
sc->sc_mr1 |= MR1_JPSM;
/* Set fifos, media modes. */
EMAC_WRITE(sc, EMAC_MR1, sc->sc_mr1);
ETHER_LOCK(ec);
ETHER_FIRST_MULTI(step, ec, enm);
while (enm != NULL) {
if (memcmp(enm->enm_addrlo,
enm->enm_addrhi, ETHER_ADDR_LEN) != 0) {
/*
* We must listen to a range of multicast addresses.
* For now, just accept all multicasts, rather than
* trying to set only those filter bits needed to match
* the range. (At this time, the only use of address
* ranges is for IP multicast routing, for which the
* range is big enough to require all bits set.)
*/
gaht[0] = gaht[1] = gaht[2] = gaht[3] =
gaht[4] = gaht[5] = gaht[6] = gaht[7] = mask;
break;
}
count = 0;
/*
* Go through our Tx list and free mbufs for those
* frames that have been transmitted.
*/
for (i = sc->sc_txsdirty; sc->sc_txsfree != EMAC_TXQUEUELEN;
i = EMAC_NEXTTXS(i), sc->sc_txsfree++) {
txs = &sc->sc_txsoft[i];
/*
* If there are no more pending transmissions, cancel the watchdog
* timer.
*/
if (sc->sc_txsfree == EMAC_TXQUEUELEN)
ifp->if_timer = 0;
if (count != 0)
rnd_add_uint32(&sc->rnd_source, count);
return handled;
}
/*
* Reset functions
*/
static void
emac_soft_reset(struct emac_softc *sc)
{
uint32_t sdr;
int t = 0;
/*
* The PHY must provide a TX Clk in order perform a soft reset the
* EMAC. If none is present, select the internal clock,
* SDR0_MFR[E0CS, E1CS]. After the soft reset, select the external
* clock.
*/
/* try to get more packets going */
if_schedule_deferred_start(&sc->sc_ethercom.ec_if);
return handled;
}
int
emac_rxeob_intr(void *arg)
{
struct emac_softc *sc = arg;
struct ifnet *ifp = &sc->sc_ethercom.ec_if;
struct emac_rxsoft *rxs;
struct mbuf *m;
uint32_t rxstat, count;
int i, len;
EMAC_EVCNT_INCR(&sc->sc_ev_rxintr);
count = 0;
for (i = sc->sc_rxptr; ; i = EMAC_NEXTRX(i)) {
rxs = &sc->sc_rxsoft[i];
EMAC_CDRXSYNC(sc, i,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
rxstat = sc->sc_rxdescs[i].md_stat_ctrl;
if (rxstat & MAL_RX_EMPTY) {
/*
* We have processed all of the receive buffers.
*/
/* Flush current empty descriptor */
EMAC_CDRXSYNC(sc, i,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
break;
}
/*
* If an error occurred, update stats, clear the status
* word, and leave the packet buffer in place. It will
* simply be reused the next time the ring comes around.
*/
if (rxstat & (EMAC_RXS_OE | EMAC_RXS_BP | EMAC_RXS_SE |
EMAC_RXS_AE | EMAC_RXS_BFCS | EMAC_RXS_PTL | EMAC_RXS_ORE |
EMAC_RXS_IRE)) {
#define PRINTERR(bit, str) \
if (rxstat & (bit)) \
aprint_error_ifnet(ifp, \
"receive error: %s\n", str)
if_statinc(ifp, if_ierrors);
PRINTERR(EMAC_RXS_OE, "overrun error");
PRINTERR(EMAC_RXS_BP, "bad packet");
PRINTERR(EMAC_RXS_RP, "runt packet");
PRINTERR(EMAC_RXS_SE, "short event");
PRINTERR(EMAC_RXS_AE, "alignment error");
PRINTERR(EMAC_RXS_BFCS, "bad FCS");
PRINTERR(EMAC_RXS_PTL, "packet too long");
PRINTERR(EMAC_RXS_ORE, "out of range error");
PRINTERR(EMAC_RXS_IRE, "in range error");
#undef PRINTERR
EMAC_INIT_RXDESC(sc, i);
continue;
}
/*
* No errors; receive the packet. Note, the 405GP emac
* includes the CRC with every packet.
*/
len = sc->sc_rxdescs[i].md_data_len - ETHER_CRC_LEN;
/*
* If the packet is small enough to fit in a
* single header mbuf, allocate one and copy
* the data into it. This greatly reduces
* memory consumption when we receive lots
* of small packets.
*
* Otherwise, we add a new buffer to the receive
* chain. If this fails, we drop the packet and
* recycle the old buffer.
*/
if (emac_copy_small != 0 && len <= MHLEN) {
MGETHDR(m, M_DONTWAIT, MT_DATA);
if (m == NULL)
goto dropit;
memcpy(mtod(m, void *),
mtod(rxs->rxs_mbuf, void *), len);
EMAC_INIT_RXDESC(sc, i);
bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
rxs->rxs_dmamap->dm_mapsize,
BUS_DMASYNC_PREREAD);
} else {
m = rxs->rxs_mbuf;
if (emac_add_rxbuf(sc, i) != 0) {
dropit:
if_statinc(ifp, if_ierrors);
EMAC_INIT_RXDESC(sc, i);
bus_dmamap_sync(sc->sc_dmat,
rxs->rxs_dmamap, 0,
rxs->rxs_dmamap->dm_mapsize,
BUS_DMASYNC_PREREAD);
continue;
}
}
int
emac_rxde_intr(void *arg)
{
struct emac_softc *sc = arg;
int i;
EMAC_EVCNT_INCR(&sc->sc_ev_rxde);
aprint_error_dev(sc->sc_dev, "emac_rxde_intr\n");
/*
* XXX!
* This is a bit drastic; we just drop all descriptors that aren't
* "clean". We should probably send any that are up the stack.
*/
for (i = 0; i < EMAC_NRXDESC; i++) {
EMAC_CDRXSYNC(sc, i,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
if (sc->sc_rxdescs[i].md_data_len != MCLBYTES)
EMAC_INIT_RXDESC(sc, i);
}