/*-
* Copyright (c) 2007 Ruslan Ermilov and Vsevolod Lobko.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* 3. The names of the authors may not be used to endorse or promote
* products derived from this software without specific prior
* written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
* OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*/
/*
* Copyright (c) 2001 Wasabi Systems, Inc.
* All rights reserved.
*
* Written by Jason R. Thorpe for Wasabi Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed for the NetBSD Project by
* Wasabi Systems, Inc.
* 4. The name of Wasabi Systems, Inc. may not be used to endorse
* or promote products derived from this software without specific prior
* written permission.
*
* THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Device driver for Alchemy Semiconductor Au1x00 Ethernet Media
* Access Controller.
*
* TODO:
*
* Better Rx buffer management; we want to get new Rx buffers
* to the chip more quickly than we currently do.
*/
for (i = 0; i < ADMSW_NRXHDESC; i++) {
if (sc->sc_rxhsoft[i].ds_mbuf == NULL) {
if (admsw_add_rxhbuf(sc, i) != 0)
panic("admsw_init_bufs\n");
} else
ADMSW_INIT_RXHDESC(sc, i);
}
for (i = 0; i < ADMSW_NRXLDESC; i++) {
if (sc->sc_rxlsoft[i].ds_mbuf == NULL) {
if (admsw_add_rxlbuf(sc, i) != 0)
panic("admsw_init_bufs\n");
} else
ADMSW_INIT_RXLDESC(sc, i);
}
/* Wait for DMA to complete. Overkill. In 3ms, we can
* send at least two entire 1500-byte packets at 10 Mb/s.
*/
DELAY(3000);
/* The datasheet recommends that we move all PHYs to reset
* state prior to software reset.
*/
REG_WRITE(PHY_CNTL2_REG,
REG_READ(PHY_CNTL2_REG) & ~PHY_CNTL2_PHYR_MASK);
/* Reset the switch. */
REG_WRITE(ADMSW_SW_RES, 0x1);
DELAY(100 * 1000);
REG_WRITE(ADMSW_BOOT_DONE, ADMSW_BOOT_DONE_BO);
/* begin old code */
REG_WRITE(CPUP_CONF_REG,
CPUP_CONF_DCPUP | CPUP_CONF_CRCP | CPUP_CONF_DUNP_MASK |
CPUP_CONF_DMCP_MASK);
/*
* While in DDB, we stop servicing interrupts, RX ring
* fills up and when free block counter falls behind FC
* threshold, the switch starts to emit 802.3x PAUSE
* frames. This can upset peer switches.
*
* Stop this from happening by disabling FC and D2
* thresholds.
*/
REG_WRITE(FC_TH_REG,
REG_READ(FC_TH_REG) & ~(FC_TH_FCS_MASK | FC_TH_D2S_MASK));
admsw_setvlan(sc, vlan_matrix);
for (i = 0; i < SW_DEVS; i++) {
REG_WRITE(MAC_WT1_REG,
sc->sc_enaddr[2] |
(sc->sc_enaddr[3]<<8) |
(sc->sc_enaddr[4]<<16) |
((sc->sc_enaddr[5]+i)<<24));
REG_WRITE(MAC_WT0_REG, (i<<MAC_WT0_VLANID_SHIFT) |
(sc->sc_enaddr[0]<<16) | (sc->sc_enaddr[1]<<24) |
MAC_WT0_WRITE | MAC_WT0_VLANID_EN);
/* Make sure the interface is shutdown during reboot. */
sc->sc_sdhook = shutdownhook_establish(admsw_shutdown, sc);
if (sc->sc_sdhook == NULL)
printf("%s: WARNING: unable to establish shutdown hook\n",
device_xname(sc->sc_dev));
/* leave interrupts and cpu port disabled */
return;
}
/*
* admsw_shutdown:
*
* Make sure the interface is stopped at reboot time.
*/
static void
admsw_shutdown(void *arg)
{
struct admsw_softc *sc = arg;
int i;
for (i = 0; i < SW_DEVS; i++)
admsw_stop(&sc->sc_ethercom[i].ec_if, 1);
}
/*
* Loop through the send queues, setting up transmit descriptors
* unitl we drain the queues, or use up all available transmit
* descriptors.
*/
for (;;) {
vlan++;
if (vlan == SW_DEVS)
vlan = 0;
i = vlan;
for (;;) {
ifp = &sc->sc_ethercom[i].ec_if;
if ((ifp->if_flags & IFF_RUNNING) == 0)
continue;
/* Grab a packet off the queue. */
IFQ_POLL(&ifp->if_snd, m0);
if (m0 != NULL)
break;
i++;
if (i == SW_DEVS)
i = 0;
if (i == vlan)
return;
}
vlan = i;
m = NULL;
/* Get a spare descriptor. */
if (sc->sc_txfree == 0) {
/* No more slots left. */
ADMSW_EVCNT_INCR(&sc->sc_ev_txstall);
break;
}
nexttx = sc->sc_txnext;
desc = &sc->sc_txldescs[nexttx];
ds = &sc->sc_txlsoft[nexttx];
dmamap = ds->ds_dmamap;
/*
* Load the DMA map. If this fails, the packet either
* didn't fit in the allotted number of segments, or we
* were short on resources. In this case, we'll copy
* and try again.
*/
if (m0->m_pkthdr.len < ETHER_MIN_LEN ||
bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
BUS_DMA_WRITE | BUS_DMA_NOWAIT) != 0) {
MGETHDR(m, M_DONTWAIT, MT_DATA);
if (m == NULL) {
printf("%s: unable to allocate Tx mbuf\n",
device_xname(sc->sc_dev));
break;
}
if (m0->m_pkthdr.len > MHLEN) {
MCLGET(m, M_DONTWAIT);
if ((m->m_flags & M_EXT) == 0) {
printf("%s: unable to allocate Tx "
"cluster\n", device_xname(sc->sc_dev));
m_freem(m);
break;
}
}
m->m_pkthdr.csum_flags = m0->m_pkthdr.csum_flags;
m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *));
m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
if (m->m_pkthdr.len < ETHER_MIN_LEN) {
if (M_TRAILINGSPACE(m) < ETHER_MIN_LEN - m->m_pkthdr.len)
panic("admsw_start: M_TRAILINGSPACE\n");
memset(mtod(m, uint8_t *) + m->m_pkthdr.len, 0,
ETHER_MIN_LEN - ETHER_CRC_LEN - m->m_pkthdr.len);
m->m_pkthdr.len = m->m_len = ETHER_MIN_LEN;
}
error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
m, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
if (error) {
printf("%s: unable to load Tx buffer, error = "
"%d\n", device_xname(sc->sc_dev), error);
break;
}
}
IFQ_DEQUEUE(&ifp->if_snd, m0);
if (m != NULL) {
m_freem(m0);
m0 = m;
}
/*
* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
*/
/* printf("rxintr\n"); */
if (high)
panic("admsw_rxintr: high priority packet\n");
#ifdef ADMSW_EVENT_COUNTERS
int pkts = 0;
#endif
#if 1
ADMSW_CDRXLSYNC(sc, sc->sc_rxptr,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
if ((sc->sc_rxldescs[sc->sc_rxptr].data & ADM5120_DMA_OWN) == 0)
ADMSW_CDRXLSYNC(sc, sc->sc_rxptr,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
else {
i = sc->sc_rxptr;
do {
ADMSW_CDRXLSYNC(sc, i,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
i = ADMSW_NEXTRXL(i);
/* the ring is empty, just return. */
if (i == sc->sc_rxptr)
return;
ADMSW_CDRXLSYNC(sc, i,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
} while (sc->sc_rxldescs[i].data & ADM5120_DMA_OWN);
ADMSW_CDRXLSYNC(sc, i,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
ADMSW_CDRXLSYNC(sc, sc->sc_rxptr,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
if ((sc->sc_rxldescs[sc->sc_rxptr].data & ADM5120_DMA_OWN) == 0)
ADMSW_CDRXLSYNC(sc, sc->sc_rxptr,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
else {
ADMSW_CDRXLSYNC(sc, sc->sc_rxptr,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
/* We've fallen behind the chip: catch it. */
printf("%s: RX ring resync, base=%x, work=%x, %d -> %d\n",
device_xname(sc->sc_dev), REG_READ(RECV_LBADDR_REG),
REG_READ(RECV_LWADDR_REG), sc->sc_rxptr, i);
sc->sc_rxptr = i;
ADMSW_EVCNT_INCR(&sc->sc_ev_rxsync);
}
}
#endif
for (i = sc->sc_rxptr;; i = ADMSW_NEXTRXL(i)) {
ds = &sc->sc_rxlsoft[i];
ADMSW_CDRXLSYNC(sc, i,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
if (sc->sc_rxldescs[i].data & ADM5120_DMA_OWN) {
ADMSW_CDRXLSYNC(sc, i,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
break;
}
/* Pass it on. */
if_percpuq_enqueue(ifp->if_percpuq, m);
}
#ifdef ADMSW_EVENT_COUNTERS
if (pkts)
ADMSW_EVCNT_INCR(&sc->sc_ev_rxintr);
if (pkts == ADMSW_NRXLDESC)
ADMSW_EVCNT_INCR(&sc->sc_ev_rxstall);
#endif
/* Update the receive pointer. */
sc->sc_rxptr = i;
}
/*
* admsw_init: [ifnet interface function]
*
* Initialize the interface. Must be called at splnet().
*/
static int
admsw_init(struct ifnet *ifp)
{
struct admsw_softc *sc = ifp->if_softc;
/* printf("admsw_init called\n"); */
if ((ifp->if_flags & IFF_RUNNING) == 0) {
if (sc->ndevs == 0) {
admsw_init_bufs(sc);
admsw_reset(sc);
REG_WRITE(CPUP_CONF_REG,
CPUP_CONF_CRCP | CPUP_CONF_DUNP_MASK |
CPUP_CONF_DMCP_MASK);
/* Clear all pending interrupts */
REG_WRITE(ADMSW_INT_ST, INT_MASK);
/* Mark the interface as down and cancel the watchdog timer. */
ifp->if_flags &= ~IFF_RUNNING;
ifp->if_timer = 0;
return;
}
/*
* admsw_set_filter:
*
* Set up the receive filter.
*/
static void
admsw_set_filter(struct admsw_softc *sc)
{
int i;
uint32_t allmc, anymc, conf, promisc;
struct ether_multi *enm;
struct ethercom *ec;
struct ifnet *ifp;
struct ether_multistep step;
/* Find which ports should be operated in promisc mode. */
allmc = anymc = promisc = 0;
for (i = 0; i < SW_DEVS; i++) {
ec = &sc->sc_ethercom[i];
ifp = &ec->ec_if;
if (ifp->if_flags & IFF_PROMISC)
promisc |= vlan_matrix[i];
ifp->if_flags &= ~IFF_ALLMULTI;
ETHER_LOCK(ec);
ETHER_FIRST_MULTI(step, ec, enm);
while (enm != NULL) {
if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
ETHER_ADDR_LEN) != 0) {
printf("%s: punting on mcast range\n",
__func__);
ifp->if_flags |= IFF_ALLMULTI;
allmc |= vlan_matrix[i];
break;
}
/* Load h/w with mcast address, port = CPU */
ETHER_NEXT_MULTI(step, enm);
}
ETHER_UNLOCK(ec);
}
conf = REG_READ(CPUP_CONF_REG);
/* 1 Disable forwarding of unknown & multicast packets to
* CPU on all ports.
* 2 Enable forwarding of unknown & multicast packets to
* CPU on ports where IFF_PROMISC or IFF_ALLMULTI is set.
*/
conf |= CPUP_CONF_DUNP_MASK | CPUP_CONF_DMCP_MASK;
/* Enable forwarding of unknown packets to CPU on selected ports. */
conf ^= ((promisc << CPUP_CONF_DUNP_SHIFT) & CPUP_CONF_DUNP_MASK);
conf ^= ((allmc << CPUP_CONF_DMCP_SHIFT) & CPUP_CONF_DMCP_MASK);
conf ^= ((anymc << CPUP_CONF_DMCP_SHIFT) & CPUP_CONF_DMCP_MASK);
REG_WRITE(CPUP_CONF_REG, conf);
}
/*
* admsw_add_rxbuf:
*
* Add a receive buffer to the indicated descriptor.
*/
int
admsw_add_rxbuf(struct admsw_softc *sc, int idx, int high)
{
struct admsw_descsoft *ds;
struct mbuf *m;
int error;
if (high)
ds = &sc->sc_rxhsoft[idx];
else
ds = &sc->sc_rxlsoft[idx];
MGETHDR(m, M_DONTWAIT, MT_DATA);
if (m == NULL)
return ENOBUFS;