/*-
* Copyright (c) 2001 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Jason R. Thorpe.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Device driver for the Adaptec AIC-6915 (``Starfire'')
* 10/100 Ethernet controller.
*/
/*
* If we're I/O mapped, the functional register handle is
* the same as the base handle. If we're memory mapped,
* carve off a chunk of the register space for the functional
* registers, to save on arithmetic later.
*/
if (sc->sc_iomapped)
sc->sc_sh_func = sc->sc_sh;
else {
if ((error = bus_space_subregion(sc->sc_st, sc->sc_sh,
SF_GENREG_OFFSET, SF_FUNCREG_SIZE, &sc->sc_sh_func)) != 0) {
aprint_error_dev(sc->sc_dev, "unable to sub-region "
"functional registers, error = %d\n", error);
return;
}
}
/*
* Initialize the transmit threshold for this interface. The
* manual describes the default as 4 * 16 bytes. We start out
* at 10 * 16 bytes, to avoid a bunch of initial underruns on
* several platforms.
*/
sc->sc_txthresh = 10;
/*
* Allocate the control data structures, and create and load the
* DMA map for it.
*/
if ((error = bus_dmamem_alloc(sc->sc_dmat,
sizeof(struct sf_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
BUS_DMA_NOWAIT)) != 0) {
aprint_error_dev(sc->sc_dev,
"unable to allocate control data, error = %d\n", error);
goto fail_0;
}
if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
sizeof(struct sf_control_data), (void **)&sc->sc_control_data,
BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
aprint_error_dev(sc->sc_dev,
"unable to map control data, error = %d\n", error);
goto fail_1;
}
if ((error = bus_dmamap_create(sc->sc_dmat,
sizeof(struct sf_control_data), 1,
sizeof(struct sf_control_data), 0, BUS_DMA_NOWAIT,
&sc->sc_cddmamap)) != 0) {
aprint_error_dev(sc->sc_dev, "unable to create control data "
"DMA map, error = %d\n", error);
goto fail_2;
}
if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
sc->sc_control_data, sizeof(struct sf_control_data), NULL,
BUS_DMA_NOWAIT)) != 0) {
aprint_error_dev(sc->sc_dev, "unable to load control data "
"DMA map, error = %d\n", error);
goto fail_3;
}
/*
* Create the transmit buffer DMA maps.
*/
for (i = 0; i < SF_NTXDESC; i++) {
if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
SF_NTXFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
&sc->sc_txsoft[i].ds_dmamap)) != 0) {
aprint_error_dev(sc->sc_dev,
"unable to create tx DMA map %d, error = %d\n", i,
error);
goto fail_4;
}
}
/*
* Create the receive buffer DMA maps.
*/
for (i = 0; i < SF_NRXDESC; i++) {
if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
MCLBYTES, 0, BUS_DMA_NOWAIT,
&sc->sc_rxsoft[i].ds_dmamap)) != 0) {
aprint_error_dev(sc->sc_dev,
"unable to create rx DMA map %d, error = %d\n", i,
error);
goto fail_5;
}
}
/*
* Reset the chip to a known state.
*/
sf_reset(sc);
/*
* Read the Ethernet address from the EEPROM.
*/
for (i = 0; i < ETHER_ADDR_LEN; i++)
enaddr[i] = sf_read_eeprom(sc, (15 + (ETHER_ADDR_LEN - 1)) - i);
/*
* Make sure the interface is shutdown during reboot.
*/
if (pmf_device_register1(sc->sc_dev, NULL, NULL, sf_shutdown))
pmf_class_network_register(sc->sc_dev, ifp);
else
aprint_error_dev(sc->sc_dev,
"couldn't establish power handler\n");
return;
/*
* Free any resources we've allocated during the failed attach
* attempt. Do this in reverse order an fall through.
*/
fail_5:
for (i = 0; i < SF_NRXDESC; i++) {
if (sc->sc_rxsoft[i].ds_dmamap != NULL)
bus_dmamap_destroy(sc->sc_dmat,
sc->sc_rxsoft[i].ds_dmamap);
}
fail_4:
for (i = 0; i < SF_NTXDESC; i++) {
if (sc->sc_txsoft[i].ds_dmamap != NULL)
bus_dmamap_destroy(sc->sc_dmat,
sc->sc_txsoft[i].ds_dmamap);
}
bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
fail_3:
bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
fail_2:
bus_dmamem_unmap(sc->sc_dmat, (void *) sc->sc_control_data,
sizeof(struct sf_control_data));
fail_1:
bus_dmamem_free(sc->sc_dmat, &seg, rseg);
fail_0:
return;
}
/*
* sf_shutdown:
*
* Shutdown hook -- make sure the interface is stopped at reboot.
*/
static bool
sf_shutdown(device_t self, int howto)
{
struct sf_softc *sc;
/*
* Remember the previous number of pending transmits.
*/
opending = sc->sc_txpending;
/*
* Find out where we're sitting.
*/
producer = SF_TXDINDEX_TO_HOST(
TDQPI_HiPrTxProducerIndex_get(
sf_funcreg_read(sc, SF_TxDescQueueProducerIndex)));
/*
* Loop through the send queue, setting up transmit descriptors
* until we drain the queue, or use up all available transmit
* descriptors. Leave a blank one at the end for sanity's sake.
*/
while (sc->sc_txpending < (SF_NTXDESC - 1)) {
/*
* Grab a packet off the queue.
*/
IFQ_POLL(&ifp->if_snd, m0);
if (m0 == NULL)
break;
m = NULL;
/*
* Get the transmit descriptor.
*/
txd = &sc->sc_txdescs[producer];
ds = &sc->sc_txsoft[producer];
dmamap = ds->ds_dmamap;
/*
* Load the DMA map. If this fails, the packet either
* didn't fit in the allotted number of frags, or we were
* short on resources. In this case, we'll copy and try
* again.
*/
if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
BUS_DMA_WRITE | BUS_DMA_NOWAIT) != 0) {
MGETHDR(m, M_DONTWAIT, MT_DATA);
if (m == NULL) {
aprint_error_dev(sc->sc_dev,
"unable to allocate Tx mbuf\n");
break;
}
MCLAIM(m, &sc->sc_ethercom.ec_tx_mowner);
if (m0->m_pkthdr.len > MHLEN) {
MCLGET(m, M_DONTWAIT);
if ((m->m_flags & M_EXT) == 0) {
aprint_error_dev(sc->sc_dev,
"unable to allocate Tx cluster\n");
m_freem(m);
break;
}
}
m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *));
m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
m, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
if (error) {
aprint_error_dev(sc->sc_dev,
"unable to load Tx buffer, error = %d\n",
error);
break;
}
}
/*
* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
*/
IFQ_DEQUEUE(&ifp->if_snd, m0);
if (m != NULL) {
m_freem(m0);
m0 = m;
}
/* Initialize the descriptor. */
txd->td_word0 =
htole32(TD_W0_ID | TD_W0_CRCEN | m0->m_pkthdr.len);
if (producer == (SF_NTXDESC - 1))
txd->td_word0 |= TD_W0_END;
txd->td_word1 = htole32(dmamap->dm_nsegs);
for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
txd->td_frags[seg].fr_addr =
htole32(dmamap->dm_segs[seg].ds_addr);
txd->td_frags[seg].fr_len =
htole32(dmamap->dm_segs[seg].ds_len);
}
/* Sync the descriptor and the DMA map. */
SF_CDTXDSYNC(sc, producer, BUS_DMASYNC_PREWRITE);
bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
BUS_DMASYNC_PREWRITE);
/*
* Store a pointer to the packet so we can free it later.
*/
ds->ds_mbuf = m0;
/* Advance the Tx pointer. */
sc->sc_txpending++;
last = producer;
producer = SF_NEXTTX(producer);
/*
* Pass the packet to any BPF listeners.
*/
bpf_mtap(ifp, m0, BPF_D_OUT);
}
if (sc->sc_txpending != opending) {
KASSERT(last != -1);
/*
* We enqueued packets. Cause a transmit interrupt to
* happen on the last packet we enqueued, and give the
* new descriptors to the chip by writing the new
* producer index.
*/
sc->sc_txdescs[last].td_word0 |= TD_W0_INTR;
SF_CDTXDSYNC(sc, last, BUS_DMASYNC_PREWRITE);
/*
* No errors; receive the packet. Note that we have
* configured the Starfire to NOT transfer the CRC
* with the packet.
*/
len = RCD_W0_Length(word0);
#ifdef __NO_STRICT_ALIGNMENT
/*
* Allocate a new mbuf cluster. If that fails, we are
* out of memory, and must drop the packet and recycle
* the buffer that's already attached to this descriptor.
*/
m = ds->ds_mbuf;
if (sf_add_rxbuf(sc, rxidx) != 0) {
if_statinc(ifp, if_ierrors);
SF_INIT_RXDESC(sc, rxidx);
bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
continue;
}
#else
/*
* The Starfire's receive buffer must be 4-byte aligned.
* But this means that the data after the Ethernet header
* is misaligned. We must allocate a new buffer and
* copy the data, shifted forward 2 bytes.
*/
MGETHDR(m, M_DONTWAIT, MT_DATA);
if (m == NULL) {
dropit:
if_statinc(ifp, if_ierrors);
SF_INIT_RXDESC(sc, rxidx);
bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
continue;
}
MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner);
if (len > (MHLEN - 2)) {
MCLGET(m, M_DONTWAIT);
if ((m->m_flags & M_EXT) == 0) {
m_freem(m);
goto dropit;
}
}
m->m_data += 2;
/*
* Note that we use cluster for incoming frames, so the
* buffer is virtually contiguous.
*/
memcpy(mtod(m, void *), mtod(ds->ds_mbuf, void *), len);
/* Allow the receive descriptor to continue using its mbuf. */
SF_INIT_RXDESC(sc, rxidx);
bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
#endif /* __NO_STRICT_ALIGNMENT */
p = &stats.TransmitOKFrames;
for (i = 0; i < (sizeof(stats) / sizeof(uint32_t)); i++) {
*p++ = sf_genreg_read(sc,
SF_STATS_BASE + (i * sizeof(uint32_t)));
sf_genreg_write(sc, SF_STATS_BASE + (i * sizeof(uint32_t)), 0);
}
/* Start by clearing the perfect and hash tables. */
for (i = 0; i < SF_PERFECT_SIZE; i += sizeof(uint32_t))
sf_genreg_write(sc, SF_PERFECT_BASE + i, 0);
for (i = 0; i < SF_HASH_SIZE; i += sizeof(uint32_t))
sf_genreg_write(sc, SF_HASH_BASE + i, 0);
/*
* Clear the perfect and hash mode bits.
*/
sc->sc_RxAddressFilteringCtl &=
~(RAFC_PerfectFilteringMode(3) | RAFC_HashFilteringMode(3));
/*
* Set normal perfect filtering mode.
*/
sc->sc_RxAddressFilteringCtl |= RAFC_PerfectFilteringMode(1);
/*
* First, write the station address to the perfect filter
* table.
*/
sf_set_filter_perfect(sc, 0, CLLADDR(ifp->if_sadl));
/*
* Now set the hash bits for each multicast address in our
* list.
*/
ETHER_LOCK(ec);
ETHER_FIRST_MULTI(step, ec, enm);
if (enm == NULL) {
ETHER_UNLOCK(ec);
goto done;
}
while (enm != NULL) {
if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
/*
* We must listen to a range of multicast addresses.
* For now, just accept all multicasts, rather than
* trying to set only those filter bits needed to match
* the range. (At this time, the only use of address
* ranges is for IP multicast routing, for which the
* range is big enough to require all bits set.)
*/
ETHER_UNLOCK(ec);
goto allmulti;
}
sf_set_filter_hash(sc, enm->enm_addrlo);
ETHER_NEXT_MULTI(step, enm);
}
ETHER_UNLOCK(ec);
/*
* Set "hash only multicast dest, match regardless of VLAN ID".
*/
sc->sc_RxAddressFilteringCtl |= RAFC_HashFilteringMode(2);
goto done;
allmulti:
/*
* XXX RAFC_PassMulticast is sub-optimal if using VLAN mode.
*/
sc->sc_RxAddressFilteringCtl |= RAFC_PassMulticast;
ifp->if_flags |= IFF_ALLMULTI;