/*
* Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
* All rights reserved.
*
* Written by Jason R. Thorpe for Wasabi Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed for the NetBSD Project by
* Wasabi Systems, Inc.
* 4. The name of Wasabi Systems, Inc. may not be used to endorse
* or promote products derived from this software without specific prior
* written permission.
*
* THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
Copyright (c) 2001-2005, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************/
/*
* Device driver for the Intel i8254x family of Gigabit Ethernet chips.
*
* TODO (in order of importance):
*
* - Check XXX'ed comments
* - TX Multi queue improvement (refine queue selection logic)
* - Split header buffer for newer descriptors
* - EEE (Energy Efficiency Ethernet) for I354
* - Virtual Function
* - Set LED correctly (based on contents in EEPROM)
* - Rework how parameters are loaded from the EEPROM.
*/
#include <netinet/in.h> /* XXX for struct ip */
#include <netinet/in_systm.h> /* XXX for struct ip */
#include <netinet/ip.h> /* XXX for struct ip */
#include <netinet/ip6.h> /* XXX for struct ip6_hdr */
#include <netinet/tcp.h> /* XXX for struct tcphdr */
/*
* Transmit descriptor list size. Due to errata, we can only have
* 256 hardware descriptors in the ring on < 82544, but we use 4096
* on >= 82544. We tell the upper layers that they can queue a lot
* of packets, and we go ahead and manage up to 64 (16 for the i82547)
* of them at a time.
*
* We allow up to 64 DMA segments per packet. Pathological packet
* chains containing many small mbufs have been observed in zero-copy
* situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
* m_defrag() is called to reduce it.
*/
#define WM_NTXSEGS 64
#define WM_IFQUEUELEN 256
#define WM_TXQUEUELEN_MAX 64
#define WM_TXQUEUELEN_MAX_82547 16
#define WM_TXQUEUELEN(txq) ((txq)->txq_num)
#define WM_TXQUEUELEN_MASK(txq) (WM_TXQUEUELEN(txq) - 1)
#define WM_TXQUEUE_GC(txq) (WM_TXQUEUELEN(txq) / 8)
#define WM_NTXDESC_82542 256
#define WM_NTXDESC_82544 4096
#define WM_NTXDESC(txq) ((txq)->txq_ndesc)
#define WM_NTXDESC_MASK(txq) (WM_NTXDESC(txq) - 1)
#define WM_TXDESCS_SIZE(txq) (WM_NTXDESC(txq) * (txq)->txq_descsize)
#define WM_NEXTTX(txq, x) (((x) + 1) & WM_NTXDESC_MASK(txq))
#define WM_NEXTTXS(txq, x) (((x) + 1) & WM_TXQUEUELEN_MASK(txq))
#define WM_MAXTXDMA (2 * round_page(IP_MAXPACKET)) /* for TSO */
/*
* Receive descriptor list size. We have one Rx buffer for normal
* sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
* packet. We allocate 256 receive descriptors, each with a 2k
* buffer (MCLBYTES), which gives us room for 50 jumbo packets.
*/
#define WM_NRXDESC 256U
#define WM_NRXDESC_MASK (WM_NRXDESC - 1)
#define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
#define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
/*
* Software state for transmit jobs.
*/
struct wm_txsoft {
struct mbuf *txs_mbuf; /* head of our mbuf chain */
bus_dmamap_t txs_dmamap; /* our DMA map */
int txs_firstdesc; /* first descriptor in packet */
int txs_lastdesc; /* last descriptor in packet */
int txs_ndesc; /* # of descriptors used */
};
/*
* Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
* buffer and a DMA map. For packets which fill more than one buffer, we chain
* them together.
*/
struct wm_rxsoft {
struct mbuf *rxs_mbuf; /* head of our mbuf chain */
bus_dmamap_t rxs_dmamap; /* our DMA map */
};
/* Software state for the transmit descriptors. */
int txq_num; /* must be a power of two */
struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
/* TX control data structures. */
int txq_ndesc; /* must be a power of two */
size_t txq_descsize; /* a tx descriptor size */
txdescs_t *txq_descs_u;
bus_dmamap_t txq_desc_dmamap; /* control data DMA map */
bus_dma_segment_t txq_desc_seg; /* control data segment */
int txq_desc_rseg; /* real number of control segment */
#define txq_desc_dma txq_desc_dmamap->dm_segs[0].ds_addr
#define txq_descs txq_descs_u->sctxu_txdescs
#define txq_nq_descs txq_descs_u->sctxu_nq_txdescs
bus_addr_t txq_tdt_reg; /* offset of TDT register */
int txq_free; /* number of free Tx descriptors */
int txq_next; /* next ready Tx descriptor */
int txq_sfree; /* number of free Tx jobs */
int txq_snext; /* next free Tx job */
int txq_sdirty; /* dirty Tx jobs */
/* These 4 variables are used only on the 82547. */
int txq_fifo_size; /* Tx FIFO size */
int txq_fifo_head; /* current head of FIFO */
uint32_t txq_fifo_addr; /* internal address of start of FIFO */
int txq_fifo_stall; /* Tx FIFO is stalled */
/*
* When ncpu > number of Tx queues, a Tx queue is shared by multiple
* CPUs. This queue intermediate them without block.
*/
pcq_t *txq_interq;
/*
* NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
* to manage Tx H/W queue's busy flag.
*/
int txq_flags; /* flags for H/W queue, see below */
#define WM_TXQ_NO_SPACE 0x1
#define WM_TXQ_LINKDOWN_DISCARD 0x2
bool txq_stopping;
bool txq_sending;
time_t txq_lastsent;
/* Checksum flags used for previous packet */
uint32_t txq_last_hw_cmd;
uint8_t txq_last_hw_fields;
uint16_t txq_last_hw_ipcs;
uint16_t txq_last_hw_tucs;
uint32_t txq_packets; /* for AIM */
uint32_t txq_bytes; /* for AIM */
#ifdef WM_EVENT_COUNTERS
/* TX event counters */
WM_Q_EVCNT_DEFINE(txq, txsstall); /* Stalled due to no txs */
WM_Q_EVCNT_DEFINE(txq, txdstall); /* Stalled due to no txd */
WM_Q_EVCNT_DEFINE(txq, fifo_stall); /* FIFO stalls (82547) */
WM_Q_EVCNT_DEFINE(txq, txdw); /* Tx descriptor interrupts */
WM_Q_EVCNT_DEFINE(txq, txqe); /* Tx queue empty interrupts */
/* XXX not used? */
WM_Q_EVCNT_DEFINE(txq, ipsum); /* IP checksums comp. */
WM_Q_EVCNT_DEFINE(txq, tusum); /* TCP/UDP cksums comp. */
WM_Q_EVCNT_DEFINE(txq, tusum6); /* TCP/UDP v6 cksums comp. */
WM_Q_EVCNT_DEFINE(txq, tso); /* TCP seg offload (IPv4) */
WM_Q_EVCNT_DEFINE(txq, tso6); /* TCP seg offload (IPv6) */
WM_Q_EVCNT_DEFINE(txq, tsopain); /* Painful header manip. for TSO */
WM_Q_EVCNT_DEFINE(txq, pcqdrop); /* Pkt dropped in pcq */
WM_Q_EVCNT_DEFINE(txq, descdrop); /* Pkt dropped in MAC desc ring */
/* other than toomanyseg */
/* Software state for the receive descriptors. */
struct wm_rxsoft rxq_soft[WM_NRXDESC];
/* RX control data structures. */
int rxq_ndesc; /* must be a power of two */
size_t rxq_descsize; /* a rx descriptor size */
rxdescs_t *rxq_descs_u;
bus_dmamap_t rxq_desc_dmamap; /* control data DMA map */
bus_dma_segment_t rxq_desc_seg; /* control data segment */
int rxq_desc_rseg; /* real number of control segment */
#define rxq_desc_dma rxq_desc_dmamap->dm_segs[0].ds_addr
#define rxq_descs rxq_descs_u->sctxu_rxdescs
#define rxq_ext_descs rxq_descs_u->sctxu_ext_rxdescs
#define rxq_nq_descs rxq_descs_u->sctxu_nq_rxdescs
bus_addr_t rxq_rdt_reg; /* offset of RDT register */
int rxq_ptr; /* next ready Rx desc/queue ent */
int rxq_discard;
int rxq_len;
struct mbuf *rxq_head;
struct mbuf *rxq_tail;
struct mbuf **rxq_tailp;
/*
* Software state per device.
*/
struct wm_softc {
device_t sc_dev; /* generic device information */
bus_space_tag_t sc_st; /* bus space tag */
bus_space_handle_t sc_sh; /* bus space handle */
bus_size_t sc_ss; /* bus space size */
bus_space_tag_t sc_iot; /* I/O space tag */
bus_space_handle_t sc_ioh; /* I/O space handle */
bus_size_t sc_ios; /* I/O space size */
bus_space_tag_t sc_flasht; /* flash registers space tag */
bus_space_handle_t sc_flashh; /* flash registers space handle */
bus_size_t sc_flashs; /* flash registers space size */
off_t sc_flashreg_offset; /*
* offset to flash registers from
* start of BAR
*/
bus_dma_tag_t sc_dmat; /* bus DMA tag */
struct ethercom sc_ethercom; /* Ethernet common data */
struct mii_data sc_mii; /* MII/media information */
pci_chipset_tag_t sc_pc;
pcitag_t sc_pcitag;
int sc_bus_speed; /* PCI/PCIX bus speed */
int sc_pcixe_capoff; /* PCI[Xe] capability reg offset */
uint16_t sc_pcidevid; /* PCI device ID */
wm_chip_type sc_type; /* MAC type */
int sc_rev; /* MAC revision */
wm_phy_type sc_phytype; /* PHY type */
uint8_t sc_sfptype; /* SFP type */
uint32_t sc_mediatype; /* Media type (Copper, Fiber, SERDES)*/
#define WM_MEDIATYPE_UNKNOWN 0x00
#define WM_MEDIATYPE_FIBER 0x01
#define WM_MEDIATYPE_COPPER 0x02
#define WM_MEDIATYPE_SERDES 0x03 /* Internal SERDES */
int sc_funcid; /* unit number of the chip (0 to 3) */
u_int sc_flags; /* flags; see below */
u_short sc_if_flags; /* last if_flags */
int sc_ec_capenable; /* last ec_capenable */
int sc_flowflags; /* 802.3x flow control flags */
uint16_t eee_lp_ability; /* EEE link partner's ability */
int sc_align_tweak;
void *sc_ihs[WM_MAX_NINTR]; /*
* interrupt cookie.
* - legacy and msi use sc_ihs[0] only
* - msix use sc_ihs[0] to sc_ihs[nintrs-1]
*/
pci_intr_handle_t *sc_intrs; /*
* legacy and msi use sc_intrs[0] only
* msix use sc_intrs[0] to sc_ihs[nintrs-1]
*/
int sc_nintrs; /* number of interrupts */
int sc_nvm_ver_major;
int sc_nvm_ver_minor;
int sc_nvm_ver_build;
int sc_nvm_addrbits; /* NVM address bits */
unsigned int sc_nvm_wordsize; /* NVM word size */
int sc_ich8_flash_base;
int sc_ich8_flash_bank_size;
int sc_nvm_k1_enabled;
int sc_nqueues;
struct wm_queue *sc_queue;
u_int sc_tx_process_limit; /* Tx proc. repeat limit in softint */
u_int sc_tx_intr_process_limit; /* Tx proc. repeat limit in H/W intr */
u_int sc_rx_process_limit; /* Rx proc. repeat limit in softint */
u_int sc_rx_intr_process_limit; /* Rx proc. repeat limit in H/W intr */
struct workqueue *sc_queue_wq;
bool sc_txrx_use_workqueue;
kmutex_t *sc_core_lock; /* lock for softc operations */
kmutex_t *sc_ich_phymtx; /*
* 82574/82583/ICH/PCH specific PHY
* mutex. For 82574/82583, the mutex
* is used for both PHY and NVM.
*/
kmutex_t *sc_ich_nvmmtx; /* ICH/PCH specific NVM mutex */
struct wm_phyop phy;
struct wm_nvmop nvm;
struct workqueue *sc_reset_wq;
struct work sc_reset_work;
volatile unsigned sc_reset_pending;
/*
* Hardware semaphores.
* Very complexed...
*/
static int wm_get_null(struct wm_softc *);
static void wm_put_null(struct wm_softc *);
static int wm_get_eecd(struct wm_softc *);
static void wm_put_eecd(struct wm_softc *);
static int wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
static void wm_put_swsm_semaphore(struct wm_softc *);
static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
static int wm_get_nvm_80003(struct wm_softc *);
static void wm_put_nvm_80003(struct wm_softc *);
static int wm_get_nvm_82571(struct wm_softc *);
static void wm_put_nvm_82571(struct wm_softc *);
static int wm_get_phy_82575(struct wm_softc *);
static void wm_put_phy_82575(struct wm_softc *);
static int wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
static void wm_put_swfwhw_semaphore(struct wm_softc *);
static int wm_get_swflag_ich8lan(struct wm_softc *); /* For PHY */
static void wm_put_swflag_ich8lan(struct wm_softc *);
static int wm_get_nvm_ich8lan(struct wm_softc *);
static void wm_put_nvm_ich8lan(struct wm_softc *);
static int wm_get_hw_semaphore_82573(struct wm_softc *);
static void wm_put_hw_semaphore_82573(struct wm_softc *);
/*
* Management mode and power management related subroutines.
* BMC, AMT, suspend/resume and EEE.
*/
#if 0
static int wm_check_mng_mode(struct wm_softc *);
static int wm_check_mng_mode_ich8lan(struct wm_softc *);
static int wm_check_mng_mode_82574(struct wm_softc *);
static int wm_check_mng_mode_generic(struct wm_softc *);
#endif
static int wm_enable_mng_pass_thru(struct wm_softc *);
static bool wm_phy_resetisblocked(struct wm_softc *);
static void wm_get_hw_control(struct wm_softc *);
static void wm_release_hw_control(struct wm_softc *);
static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
static int wm_init_phy_workarounds_pchlan(struct wm_softc *);
static void wm_init_manageability(struct wm_softc *);
static void wm_release_manageability(struct wm_softc *);
static void wm_get_wakeup(struct wm_softc *);
static int wm_ulp_disable(struct wm_softc *);
static int wm_enable_phy_wakeup(struct wm_softc *);
static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
static void wm_suspend_workarounds_ich8lan(struct wm_softc *);
static int wm_resume_workarounds_pchlan(struct wm_softc *);
static void wm_enable_wakeup(struct wm_softc *);
static void wm_disable_aspm(struct wm_softc *);
/* LPLU (Low Power Link Up) */
static void wm_lplu_d0_disable(struct wm_softc *);
/* EEE */
static int wm_set_eee_i350(struct wm_softc *);
static int wm_set_eee_pchlan(struct wm_softc *);
static int wm_set_eee(struct wm_softc *);
/*
* Workarounds (mainly PHY related).
* Basically, PHY's workarounds are in the PHY drivers.
*/
static int wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
static int wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
static void wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
static void wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *);
static int wm_lv_jumbo_workaround_ich8lan(struct wm_softc *, bool);
static int wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
static int wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
static int wm_k1_gig_workaround_hv(struct wm_softc *, int);
static int wm_k1_workaround_lv(struct wm_softc *);
static int wm_link_stall_workaround_hv(struct wm_softc *);
static int wm_set_mdio_slow_mode_hv(struct wm_softc *);
static int wm_set_mdio_slow_mode_hv_locked(struct wm_softc *);
static void wm_configure_k1_ich8lan(struct wm_softc *, int);
static void wm_reset_init_script_82575(struct wm_softc *);
static void wm_reset_mdicnfg_82580(struct wm_softc *);
static bool wm_phy_is_accessible_pchlan(struct wm_softc *);
static void wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
static int wm_platform_pm_pch_lpt(struct wm_softc *, bool);
static int wm_pll_workaround_i210(struct wm_softc *);
static void wm_legacy_irq_quirk_spt(struct wm_softc *);
static bool wm_phy_need_linkdown_discard(struct wm_softc *);
static void wm_set_linkdown_discard(struct wm_softc *);
static void wm_clear_linkdown_discard(struct wm_softc *);
static int wm_sysctl_tdh_handler(SYSCTLFN_PROTO);
static int wm_sysctl_tdt_handler(SYSCTLFN_PROTO);
#ifdef WM_DEBUG
static int wm_sysctl_debug(SYSCTLFN_PROTO);
#endif
for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
delay(5);
if (CSR_READ(sc, reg) & SCTL_CTL_READY)
break;
}
if (i == SCTL_CTL_POLL_TIMEOUT) {
aprint_error("%s: WARNING:"
" i82575 reg 0x%08x setup did not indicate ready\n",
device_xname(sc->sc_dev), reg);
}
}
/*
* Descriptor sync/init functions.
*/
static inline void
wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
{
struct wm_softc *sc = txq->txq_sc;
/* If it will wrap around, sync to the end of the ring. */
if ((start + num) > WM_NTXDESC(txq)) {
bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
WM_CDTXOFF(txq, start), txq->txq_descsize *
(WM_NTXDESC(txq) - start), ops);
num -= (WM_NTXDESC(txq) - start);
start = 0;
}
/* Now sync whatever is left. */
bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
}
static inline void
wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
{
struct wm_softc *sc = rxq->rxq_sc;
/*
* Note: We scoot the packet forward 2 bytes in the buffer
* so that the payload after the Ethernet header is aligned
* to a 4-byte boundary.
* XXX BRAINDAMAGE ALERT!
* The stupid chip uses the same size for every buffer, which
* is set in the Receive Control register. We are using the 2K
* size option, but what we REALLY want is (2K - 2)! For this
* reason, we can't "scoot" packets longer than the standard
* Ethernet MTU. On strict-alignment platforms, if the total
* size exceeds (2K - 2) we set align_tweak to 0 and let
* the upper layer copy the headers.
*/
m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
if (sc->sc_type < WM_T_82543) {
if (sc->sc_rev < 2) {
aprint_error_dev(sc->sc_dev,
"i82542 must be at least rev. 2\n");
return;
}
if (sc->sc_rev < 3)
sc->sc_type = WM_T_82542_2_0;
}
/*
* Disable MSI for Errata:
* "Message Signaled Interrupt Feature May Corrupt Write Transactions"
*
* 82544: Errata 25
* 82540: Errata 6 (easy to reproduce device timeout)
* 82545: Errata 4 (easy to reproduce device timeout)
* 82546: Errata 26 (easy to reproduce device timeout)
* 82541: Errata 7 (easy to reproduce device timeout)
*
* "Byte Enables 2 and 3 are not set on MSI writes"
*
* 82571 & 82572: Errata 63
*/
if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
|| (sc->sc_type == WM_T_82572))
pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
/*
* In addition, i82544 and later support I/O mapped indirect
* register access. It is not desirable (nor supported in
* this driver) to use it for normal operation, though it is
* required to work around bugs in some chip versions.
*/
switch (sc->sc_type) {
case WM_T_82544:
case WM_T_82541:
case WM_T_82541_2:
case WM_T_82547:
case WM_T_82547_2:
/* First we have to find the I/O BAR. */
for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
if (memtype == PCI_MAPREG_TYPE_IO)
break;
if (PCI_MAPREG_MEM_TYPE(memtype) ==
PCI_MAPREG_MEM_TYPE_64BIT)
i += 4; /* skip high bits, too */
}
if (i < PCI_MAPREG_END) {
/*
* We found PCI_MAPREG_TYPE_IO. Note that 82580
* (and newer?) chip has no PCI_MAPREG_TYPE_IO.
* It's no problem because newer chips has no this
* bug.
*
* The i8254x doesn't apparently respond when the
* I/O BAR is 0, which looks somewhat like it's not
* been configured.
*/
preg = pci_conf_read(pc, pa->pa_tag, i);
if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
aprint_error_dev(sc->sc_dev,
"WARNING: I/O BAR at zero.\n");
} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
0, &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_ios)
== 0) {
sc->sc_flags |= WM_F_IOH_VALID;
} else
aprint_error_dev(sc->sc_dev,
"WARNING: unable to map I/O space\n");
}
break;
default:
break;
}
/* Enable bus mastering. Disable MWI on the i82542 2.0. */
preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
preg |= PCI_COMMAND_MASTER_ENABLE;
if (sc->sc_type < WM_T_82542_2_1)
preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
/* Power up chip */
if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
&& error != EOPNOTSUPP) {
aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
return;
}
wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
/*
* Don't use MSI-X if we can use only one queue to save interrupt
* resource.
*/
if (sc->sc_nqueues > 1) {
max_type = PCI_INTR_TYPE_MSIX;
/*
* 82583 has a MSI-X capability in the PCI configuration space
* but it doesn't support it. At least the document doesn't
* say anything about MSI-X.
*/
counts[PCI_INTR_TYPE_MSIX]
= (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
} else {
max_type = PCI_INTR_TYPE_MSI;
counts[PCI_INTR_TYPE_MSIX] = 0;
}
/* Set PHY, NVM mutex related stuff */
switch (sc->sc_type) {
case WM_T_82542_2_0:
case WM_T_82542_2_1:
case WM_T_82543:
case WM_T_82544:
/* Microwire */
sc->nvm.read = wm_nvm_read_uwire;
sc->sc_nvm_wordsize = 64;
sc->sc_nvm_addrbits = 6;
break;
case WM_T_82540:
case WM_T_82545:
case WM_T_82545_3:
case WM_T_82546:
case WM_T_82546_3:
/* Microwire */
sc->nvm.read = wm_nvm_read_uwire;
reg = CSR_READ(sc, WMREG_EECD);
if (reg & EECD_EE_SIZE) {
sc->sc_nvm_wordsize = 256;
sc->sc_nvm_addrbits = 8;
} else {
sc->sc_nvm_wordsize = 64;
sc->sc_nvm_addrbits = 6;
}
sc->sc_flags |= WM_F_LOCK_EECD;
sc->nvm.acquire = wm_get_eecd;
sc->nvm.release = wm_put_eecd;
break;
case WM_T_82541:
case WM_T_82541_2:
case WM_T_82547:
case WM_T_82547_2:
reg = CSR_READ(sc, WMREG_EECD);
/*
* wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
* on 8254[17], so set flags and functios before calling it.
*/
sc->sc_flags |= WM_F_LOCK_EECD;
sc->nvm.acquire = wm_get_eecd;
sc->nvm.release = wm_put_eecd;
if (reg & EECD_EE_TYPE) {
/* SPI */
sc->nvm.read = wm_nvm_read_spi;
sc->sc_flags |= WM_F_EEPROM_SPI;
wm_nvm_set_addrbits_size_eecd(sc);
} else {
/* Microwire */
sc->nvm.read = wm_nvm_read_uwire;
if ((reg & EECD_EE_ABITS) != 0) {
sc->sc_nvm_wordsize = 256;
sc->sc_nvm_addrbits = 8;
} else {
sc->sc_nvm_wordsize = 64;
sc->sc_nvm_addrbits = 6;
}
}
break;
case WM_T_82571:
case WM_T_82572:
/* SPI */
sc->nvm.read = wm_nvm_read_eerd;
/* Not use WM_F_LOCK_EECD because we use EERD */
sc->sc_flags |= WM_F_EEPROM_SPI;
wm_nvm_set_addrbits_size_eecd(sc);
sc->phy.acquire = wm_get_swsm_semaphore;
sc->phy.release = wm_put_swsm_semaphore;
sc->nvm.acquire = wm_get_nvm_82571;
sc->nvm.release = wm_put_nvm_82571;
break;
case WM_T_82573:
case WM_T_82574:
case WM_T_82583:
sc->nvm.read = wm_nvm_read_eerd;
/* Not use WM_F_LOCK_EECD because we use EERD */
if (sc->sc_type == WM_T_82573) {
sc->phy.acquire = wm_get_swsm_semaphore;
sc->phy.release = wm_put_swsm_semaphore;
sc->nvm.acquire = wm_get_nvm_82571;
sc->nvm.release = wm_put_nvm_82571;
} else {
/* Both PHY and NVM use the same semaphore. */
sc->phy.acquire = sc->nvm.acquire
= wm_get_swfwhw_semaphore;
sc->phy.release = sc->nvm.release
= wm_put_swfwhw_semaphore;
}
if (wm_nvm_is_onboard_eeprom(sc) == 0) {
sc->sc_flags |= WM_F_EEPROM_FLASH;
sc->sc_nvm_wordsize = 2048;
} else {
/* SPI */
sc->sc_flags |= WM_F_EEPROM_SPI;
wm_nvm_set_addrbits_size_eecd(sc);
}
break;
case WM_T_82575:
case WM_T_82576:
case WM_T_82580:
case WM_T_I350:
case WM_T_I354:
case WM_T_80003:
/* SPI */
sc->sc_flags |= WM_F_EEPROM_SPI;
wm_nvm_set_addrbits_size_eecd(sc);
if ((sc->sc_type == WM_T_80003)
|| (sc->sc_nvm_wordsize < (1 << 15))) {
sc->nvm.read = wm_nvm_read_eerd;
/* Don't use WM_F_LOCK_EECD because we use EERD */
} else {
sc->nvm.read = wm_nvm_read_spi;
sc->sc_flags |= WM_F_LOCK_EECD;
}
sc->phy.acquire = wm_get_phy_82575;
sc->phy.release = wm_put_phy_82575;
sc->nvm.acquire = wm_get_nvm_80003;
sc->nvm.release = wm_put_nvm_80003;
break;
case WM_T_ICH8:
case WM_T_ICH9:
case WM_T_ICH10:
case WM_T_PCH:
case WM_T_PCH2:
case WM_T_PCH_LPT:
sc->nvm.read = wm_nvm_read_ich8;
/* FLASH */
sc->sc_flags |= WM_F_EEPROM_FLASH;
sc->sc_nvm_wordsize = 2048;
memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
&sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
aprint_error_dev(sc->sc_dev,
"can't map FLASH registers\n");
goto out;
}
reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
ICH_FLASH_SECTOR_SIZE;
sc->sc_ich8_flash_bank_size =
((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
sc->sc_flashreg_offset = 0;
sc->phy.acquire = wm_get_swflag_ich8lan;
sc->phy.release = wm_put_swflag_ich8lan;
sc->nvm.acquire = wm_get_nvm_ich8lan;
sc->nvm.release = wm_put_nvm_ich8lan;
break;
case WM_T_PCH_SPT:
case WM_T_PCH_CNP:
case WM_T_PCH_TGP:
sc->nvm.read = wm_nvm_read_spt;
/* SPT has no GFPREG; flash registers mapped through BAR0 */
sc->sc_flags |= WM_F_EEPROM_FLASH;
sc->sc_flasht = sc->sc_st;
sc->sc_flashh = sc->sc_sh;
sc->sc_ich8_flash_base = 0;
sc->sc_nvm_wordsize =
(((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
* NVM_SIZE_MULTIPLIER;
/* It is size in bytes, we want words */
sc->sc_nvm_wordsize /= 2;
/* Assume 2 banks */
sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
sc->phy.acquire = wm_get_swflag_ich8lan;
sc->phy.release = wm_put_swflag_ich8lan;
sc->nvm.acquire = wm_get_nvm_ich8lan;
sc->nvm.release = wm_put_nvm_ich8lan;
break;
case WM_T_I210:
case WM_T_I211:
/* Allow a single clear of the SW semaphore on I210 and newer*/
sc->sc_flags |= WM_F_WA_I210_CLSEM;
if (wm_nvm_flash_presence_i210(sc)) {
sc->nvm.read = wm_nvm_read_eerd;
/* Don't use WM_F_LOCK_EECD because we use EERD */
sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
wm_nvm_set_addrbits_size_eecd(sc);
} else {
sc->nvm.read = wm_nvm_read_invm;
sc->sc_flags |= WM_F_EEPROM_INVM;
sc->sc_nvm_wordsize = INVM_SIZE;
}
sc->phy.acquire = wm_get_phy_82575;
sc->phy.release = wm_put_phy_82575;
sc->nvm.acquire = wm_get_nvm_80003;
sc->nvm.release = wm_put_nvm_80003;
break;
default:
break;
}
/* Ensure the SMBI bit is clear before first NVM or PHY access */
switch (sc->sc_type) {
case WM_T_82571:
case WM_T_82572:
reg = CSR_READ(sc, WMREG_SWSM2);
if ((reg & SWSM2_LOCK) == 0) {
CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
force_clear_smbi = true;
} else
force_clear_smbi = false;
break;
case WM_T_82573:
case WM_T_82574:
case WM_T_82583:
force_clear_smbi = true;
break;
default:
force_clear_smbi = false;
break;
}
if (force_clear_smbi) {
reg = CSR_READ(sc, WMREG_SWSM);
if ((reg & SWSM_SMBI) != 0)
aprint_error_dev(sc->sc_dev,
"Please update the Bootagent\n");
CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
}
/*
* Defer printing the EEPROM type until after verifying the checksum
* This allows the EEPROM type to be printed correctly in the case
* that no EEPROM is attached.
*/
/*
* Validate the EEPROM checksum. If the checksum fails, flag
* this for later, so we can fail future reads from the EEPROM.
*/
if (wm_nvm_validate_checksum(sc)) {
/*
* Read twice again because some PCI-e parts fail the
* first check due to the link being in sleep state.
*/
if (wm_nvm_validate_checksum(sc))
sc->sc_flags |= WM_F_EEPROM_INVALID;
}
if (sc->sc_flags & WM_F_EEPROM_INVALID)
aprint_verbose_dev(sc->sc_dev, "No EEPROM");
else {
aprint_verbose_dev(sc->sc_dev, "%u words ",
sc->sc_nvm_wordsize);
if (sc->sc_flags & WM_F_EEPROM_INVM)
aprint_verbose("iNVM");
else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
aprint_verbose("FLASH(HW)");
else if (sc->sc_flags & WM_F_EEPROM_FLASH)
aprint_verbose("FLASH");
else {
if (sc->sc_flags & WM_F_EEPROM_SPI)
eetype = "SPI";
else
eetype = "MicroWire";
aprint_verbose("(%d address bits) %s EEPROM",
sc->sc_nvm_addrbits, eetype);
}
}
wm_nvm_version(sc);
aprint_verbose("\n");
/*
* XXX The first call of wm_gmii_setup_phytype. The result might be
* incorrect.
*/
wm_gmii_setup_phytype(sc, 0, 0);
/* Check for WM_F_WOL on some chips before wm_reset() */
switch (sc->sc_type) {
case WM_T_ICH8:
case WM_T_ICH9:
case WM_T_ICH10:
case WM_T_PCH:
case WM_T_PCH2:
case WM_T_PCH_LPT:
case WM_T_PCH_SPT:
case WM_T_PCH_CNP:
case WM_T_PCH_TGP:
apme_mask = WUC_APME;
eeprom_data = CSR_READ(sc, WMREG_WUC);
if ((eeprom_data & apme_mask) != 0)
sc->sc_flags |= WM_F_WOL;
break;
default:
break;
}
/* Reset the chip to a known state. */
wm_reset(sc);
/* sc->sc_pba is set in wm_reset(). */
aprint_verbose_dev(sc->sc_dev, "RX packet buffer size: %uKB\n",
sc->sc_pba);
/*
* Check for I21[01] PLL workaround.
*
* Three cases:
* a) Chip is I211.
* b) Chip is I210 and it uses INVM (not FLASH).
* c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
*/
if (sc->sc_type == WM_T_I211)
sc->sc_flags |= WM_F_PLL_WA_I210;
if (sc->sc_type == WM_T_I210) {
if (!wm_nvm_flash_presence_i210(sc))
sc->sc_flags |= WM_F_PLL_WA_I210;
else if ((sc->sc_nvm_ver_major < 3)
|| ((sc->sc_nvm_ver_major == 3)
&& (sc->sc_nvm_ver_minor < 25))) {
aprint_verbose_dev(sc->sc_dev,
"ROM image version %d.%d is older than 3.25\n",
sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
sc->sc_flags |= WM_F_PLL_WA_I210;
}
}
if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
wm_pll_workaround_i210(sc);
wm_get_wakeup(sc);
/* Non-AMT based hardware can now take control from firmware */
if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
wm_get_hw_control(sc);
/*
* Read the Ethernet address from the EEPROM, if not first found
* in device properties.
*/
ea = prop_dictionary_get(dict, "mac-address");
if (ea != NULL) {
KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
memcpy(enaddr, prop_data_value(ea), ETHER_ADDR_LEN);
} else {
if (wm_read_mac_addr(sc, enaddr) != 0) {
aprint_error_dev(sc->sc_dev,
"unable to read Ethernet address\n");
goto out;
}
}
/*
* Read the config info from the EEPROM, and set up various
* bits in the control registers based on their contents.
*/
pn = prop_dictionary_get(dict, "i82543-cfg1");
if (pn != NULL) {
KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
cfg1 = (uint16_t) prop_number_signed_value(pn);
} else {
if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
goto out;
}
}
/* check for WM_F_WOL */
switch (sc->sc_type) {
case WM_T_82542_2_0:
case WM_T_82542_2_1:
case WM_T_82543:
/* dummy? */
eeprom_data = 0;
apme_mask = NVM_CFG3_APME;
break;
case WM_T_82544:
apme_mask = NVM_CFG2_82544_APM_EN;
eeprom_data = cfg2;
break;
case WM_T_82546:
case WM_T_82546_3:
case WM_T_82571:
case WM_T_82572:
case WM_T_82573:
case WM_T_82574:
case WM_T_82583:
case WM_T_80003:
case WM_T_82575:
case WM_T_82576:
apme_mask = NVM_CFG3_APME;
wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
: NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
break;
case WM_T_82580:
case WM_T_I350:
case WM_T_I354:
case WM_T_I210:
case WM_T_I211:
apme_mask = NVM_CFG3_APME;
wm_nvm_read(sc,
NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
1, &eeprom_data);
break;
case WM_T_ICH8:
case WM_T_ICH9:
case WM_T_ICH10:
case WM_T_PCH:
case WM_T_PCH2:
case WM_T_PCH_LPT:
case WM_T_PCH_SPT:
case WM_T_PCH_CNP:
case WM_T_PCH_TGP:
/* Already checked before wm_reset () */
apme_mask = eeprom_data = 0;
break;
default: /* XXX 82540 */
apme_mask = NVM_CFG3_APME;
wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
break;
}
/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
if ((eeprom_data & apme_mask) != 0)
sc->sc_flags |= WM_F_WOL;
/*
* We have the eeprom settings, now apply the special cases
* where the eeprom may be wrong or the board won't support
* wake on lan on a particular port
*/
switch (sc->sc_pcidevid) {
case PCI_PRODUCT_INTEL_82546GB_PCIE:
sc->sc_flags &= ~WM_F_WOL;
break;
case PCI_PRODUCT_INTEL_82546EB_FIBER:
case PCI_PRODUCT_INTEL_82546GB_FIBER:
/* Wake events only supported on port A for dual fiber
* regardless of eeprom setting */
if (sc->sc_funcid == 1)
sc->sc_flags &= ~WM_F_WOL;
break;
case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
/* If quad port adapter, disable WoL on all but port A */
if (sc->sc_funcid != 0)
sc->sc_flags &= ~WM_F_WOL;
break;
case PCI_PRODUCT_INTEL_82571EB_FIBER:
/* Wake events only supported on port A for dual fiber
* regardless of eeprom setting */
if (sc->sc_funcid == 1)
sc->sc_flags &= ~WM_F_WOL;
break;
case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
/* If quad port adapter, disable WoL on all but port A */
if (sc->sc_funcid != 0)
sc->sc_flags &= ~WM_F_WOL;
break;
}
if (sc->sc_type >= WM_T_82575) {
if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
aprint_debug_dev(sc->sc_dev, "COMPAT = %hx\n",
nvmword);
if ((sc->sc_type == WM_T_82575) ||
(sc->sc_type == WM_T_82576)) {
/* Check NVM for autonegotiation */
if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE)
!= 0)
sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
}
if ((sc->sc_type == WM_T_82575) ||
(sc->sc_type == WM_T_I350)) {
if (nvmword & NVM_COMPAT_MAS_EN(sc->sc_funcid))
sc->sc_flags |= WM_F_MAS;
}
}
}
/*
* XXX need special handling for some multiple port cards
* to disable a paticular port.
*/
if (sc->sc_type >= WM_T_82544) {
pn = prop_dictionary_get(dict, "i82543-swdpin");
if (pn != NULL) {
KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
swdpin = (uint16_t) prop_number_signed_value(pn);
} else {
if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
aprint_error_dev(sc->sc_dev,
"unable to read SWDPIN\n");
goto out;
}
}
}
if (cfg1 & NVM_CFG1_ILOS)
sc->sc_ctrl |= CTRL_ILOS;
/*
* XXX
* This code isn't correct because pin 2 and 3 are located
* in different position on newer chips. Check all datasheet.
*
* Until resolve this problem, check if a chip < 82580
*/
if (sc->sc_type <= WM_T_82580) {
if (sc->sc_type >= WM_T_82544) {
sc->sc_ctrl |=
((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
CTRL_SWDPIO_SHIFT;
sc->sc_ctrl |=
((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
CTRL_SWDPINS_SHIFT;
} else {
sc->sc_ctrl |=
((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
CTRL_SWDPIO_SHIFT;
}
}
/* Change current link mode setting */
reg &= ~CTRL_EXT_LINK_MODE_MASK;
if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
reg |= CTRL_EXT_LINK_MODE_SGMII;
else
reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
break;
case CTRL_EXT_LINK_MODE_GMII:
default:
aprint_normal_dev(sc->sc_dev, "Copper\n");
sc->sc_mediatype = WM_MEDIATYPE_COPPER;
break;
}
reg &= ~CTRL_EXT_I2C_ENA;
if ((sc->sc_flags & WM_F_SGMII) != 0)
reg |= CTRL_EXT_I2C_ENA;
else
reg &= ~CTRL_EXT_I2C_ENA;
CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
if ((sc->sc_flags & WM_F_SGMII) != 0) {
if (!wm_sgmii_uses_mdio(sc))
wm_gmii_setup_phytype(sc, 0, 0);
wm_reset_mdicnfg_82580(sc);
}
} else if (sc->sc_type < WM_T_82543 ||
(CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
aprint_error_dev(sc->sc_dev,
"WARNING: TBIMODE set on 1000BASE-T product!\n");
sc->sc_mediatype = WM_MEDIATYPE_FIBER;
}
} else {
if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
aprint_error_dev(sc->sc_dev,
"WARNING: TBIMODE clear on 1000BASE-X product!\n");
sc->sc_mediatype = WM_MEDIATYPE_COPPER;
}
}
if (sc->sc_type >= WM_T_PCH2)
sc->sc_flags |= WM_F_EEE;
else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
&& (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
/* XXX: Need special handling for I354. (not yet) */
if (sc->sc_type != WM_T_I354)
sc->sc_flags |= WM_F_EEE;
}
/*
* The I350 has a bug where it always strips the CRC whether
* asked to or not. So ask for stripped CRC here and cope in rxeof
*/
if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
|| (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
sc->sc_flags |= WM_F_CRC_STRIP;
/*
* Workaround for some chips to delay sending LINK_STATE_UP.
* Some systems can't send packet soon after linkup. See also
* wm_linkintr_gmii(), wm_tick() and wm_gmii_mediastatus().
*/
switch (sc->sc_type) {
case WM_T_I350:
case WM_T_I354:
case WM_T_I210:
case WM_T_I211:
if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
sc->sc_flags |= WM_F_DELAY_LINKUP;
break;
default:
break;
}
/* Set device properties (macflags) */
prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
/* Initialize the media structures accordingly. */
if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
wm_gmii_mediainit(sc, wmp->wmp_product);
else
wm_tbi_mediainit(sc); /* All others */
ifp = &sc->sc_ethercom.ec_if;
xname = device_xname(sc->sc_dev);
strlcpy(ifp->if_xname, xname, IFNAMSIZ);
ifp->if_softc = sc;
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
ifp->if_extflags = IFEF_MPSAFE;
ifp->if_ioctl = wm_ioctl;
if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
ifp->if_start = wm_nq_start;
/*
* When the number of CPUs is one and the controller can use
* MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
* That is, wm(4) use two interrupts, one is used for Tx/Rx
* and the other is used for link status changing.
* In this situation, wm_nq_transmit() is disadvantageous
* because of wm_select_txqueue() and pcq(9) overhead.
*/
if (wm_is_using_multiqueue(sc))
ifp->if_transmit = wm_nq_transmit;
} else {
ifp->if_start = wm_start;
/*
* wm_transmit() has the same disadvantages as wm_nq_transmit()
* described above.
*/
if (wm_is_using_multiqueue(sc))
ifp->if_transmit = wm_transmit;
}
/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
ifp->if_init = wm_init;
ifp->if_stop = wm_stop;
IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
IFQ_SET_READY(&ifp->if_snd);
/* Check for jumbo frame */
switch (sc->sc_type) {
case WM_T_82573:
/* XXX limited to 9234 if ASPM is disabled */
wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
break;
case WM_T_82571:
case WM_T_82572:
case WM_T_82574:
case WM_T_82583:
case WM_T_82575:
case WM_T_82576:
case WM_T_82580:
case WM_T_I350:
case WM_T_I354:
case WM_T_I210:
case WM_T_I211:
case WM_T_80003:
case WM_T_ICH9:
case WM_T_ICH10:
case WM_T_PCH2: /* PCH2 supports 9K frame size */
case WM_T_PCH_LPT:
case WM_T_PCH_SPT:
case WM_T_PCH_CNP:
case WM_T_PCH_TGP:
/* XXX limited to 9234 */
sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
break;
case WM_T_PCH:
/* XXX limited to 4096 */
sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
break;
case WM_T_82542_2_0:
case WM_T_82542_2_1:
case WM_T_ICH8:
/* No support for jumbo frame */
break;
default:
/* ETHER_MAX_LEN_JUMBO */
sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
break;
}
/* If we're a i82543 or greater, we can support VLANs. */
if (sc->sc_type >= WM_T_82543) {
sc->sc_ethercom.ec_capabilities |=
ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
}
if ((sc->sc_flags & WM_F_EEE) != 0)
sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
/*
* We can perform TCPv4 and UDPv4 checksums in-bound. Only
* on i82543 and later.
*/
if (sc->sc_type >= WM_T_82543) {
ifp->if_capabilities |=
IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
IFCAP_CSUM_TCPv6_Tx |
IFCAP_CSUM_UDPv6_Tx;
}
/*
* XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
*
* 82541GI (8086:1076) ... no
* 82572EI (8086:10b9) ... yes
*/
if (sc->sc_type >= WM_T_82571) {
ifp->if_capabilities |=
IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
}
/*
* If we're a i82544 or greater (except i82547), we can do
* TCP segmentation offload.
*/
if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547)
ifp->if_capabilities |= IFCAP_TSOv4;
if (sc->sc_type >= WM_T_82571)
ifp->if_capabilities |= IFCAP_TSOv6;
/* XXX 82575 document says it has ICRXOC. Is that right? */
evcnt_attach_dynamic(&sc->sc_ev_icrxoc, EVCNT_TYPE_MISC,
NULL, xname, "Interrupt Cause Receiver Overrun");
} else if (!WM_IS_ICHPCH(sc)) {
/*
* For 82575 and newer.
*
* On 80003, ICHs and PCHs, it seems all of the following
* registers are zero.
*/
evcnt_attach_dynamic(&sc->sc_ev_rpthc, EVCNT_TYPE_MISC,
NULL, xname, "Rx Packets To Host");
evcnt_attach_dynamic(&sc->sc_ev_debug1, EVCNT_TYPE_MISC,
NULL, xname, "Debug Counter 1");
evcnt_attach_dynamic(&sc->sc_ev_debug2, EVCNT_TYPE_MISC,
NULL, xname, "Debug Counter 2");
evcnt_attach_dynamic(&sc->sc_ev_debug3, EVCNT_TYPE_MISC,
NULL, xname, "Debug Counter 3");
/*
* 82575 datasheet says 0x4118 is for TXQEC(Tx Queue Empty).
* I think it's wrong. The real count I observed is the same
* as GPTC(Good Packets Tx) and TPT(Total Packets Tx).
* It's HGPTC(Host Good Packets Tx) which is described in
* 82576's datasheet.
*/
evcnt_attach_dynamic(&sc->sc_ev_hgptc, EVCNT_TYPE_MISC,
NULL, xname, "Host Good Packets TX");
evcnt_attach_dynamic(&sc->sc_ev_debug4, EVCNT_TYPE_MISC,
NULL, xname, "Debug Counter 4");
evcnt_attach_dynamic(&sc->sc_ev_rxdmtc, EVCNT_TYPE_MISC,
NULL, xname, "Rx Desc Min Thresh");
/* XXX Is the circuit breaker only for 82576? */
evcnt_attach_dynamic(&sc->sc_ev_htcbdpc, EVCNT_TYPE_MISC,
NULL, xname, "Host Tx Circuit Breaker Dropped Packets");
/* Tell the firmware about the release */
mutex_enter(sc->sc_core_lock);
wm_release_manageability(sc);
wm_release_hw_control(sc);
wm_enable_wakeup(sc);
mutex_exit(sc->sc_core_lock);
/* Delete all remaining media. */
ifmedia_fini(&sc->sc_mii.mii_media);
/* Unload RX dmamaps and free mbufs */
for (i = 0; i < sc->sc_nqueues; i++) {
struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
mutex_enter(rxq->rxq_lock);
wm_rxdrain(rxq);
mutex_exit(rxq->rxq_lock);
}
/* Must unlock here */
/* Disestablish the interrupt handler */
for (i = 0; i < sc->sc_nintrs; i++) {
if (sc->sc_ihs[i] != NULL) {
pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
sc->sc_ihs[i] = NULL;
}
}
pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
/* wm_stop() ensured that the workqueues are stopped. */
workqueue_destroy(sc->sc_queue_wq);
workqueue_destroy(sc->sc_reset_wq);
for (i = 0; i < sc->sc_nqueues; i++)
softint_disestablish(sc->sc_queue[i].wmq_si);
wm_free_txrx_queues(sc);
/* Unmap the registers */
if (sc->sc_ss) {
bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
sc->sc_ss = 0;
}
if (sc->sc_ios) {
bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
sc->sc_ios = 0;
}
if (sc->sc_flashs) {
bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
sc->sc_flashs = 0;
}
if (sc->sc_core_lock)
mutex_obj_free(sc->sc_core_lock);
if (sc->sc_ich_phymtx)
mutex_obj_free(sc->sc_ich_phymtx);
if (sc->sc_ich_nvmmtx)
mutex_obj_free(sc->sc_ich_nvmmtx);
if (sc->sc_type >= WM_T_PCH2)
wm_resume_workarounds_pchlan(sc);
IFNET_LOCK(ifp);
if ((ifp->if_flags & IFF_UP) == 0) {
/* >= PCH_SPT hardware workaround before reset. */
if (sc->sc_type >= WM_T_PCH_SPT)
wm_flush_desc_rings(sc);
wm_reset(sc);
/* Non-AMT based hardware can now take control from firmware */
if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
wm_get_hw_control(sc);
wm_init_manageability(sc);
} else {
/*
* We called pmf_class_network_register(), so if_init() is
* automatically called when IFF_UP. wm_reset(),
* wm_get_hw_control() and wm_init_manageability() are called
* via wm_init().
*/
}
IFNET_UNLOCK(ifp);
return true;
}
/*
* wm_watchdog:
*
* Watchdog checker.
*/
static bool
wm_watchdog(struct ifnet *ifp)
{
int qid;
struct wm_softc *sc = ifp->if_softc;
uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
#ifdef WM_DEBUG
if (sc->sc_trigger_reset) {
/* debug operation, no need for atomicity or reliability */
sc->sc_trigger_reset = 0;
hang_queue++;
}
#endif
if (hang_queue == 0)
return true;
if (atomic_swap_uint(&sc->sc_reset_pending, 1) == 0)
workqueue_enqueue(sc->sc_reset_wq, &sc->sc_reset_work, NULL);
/* Don't want ioctl operations to happen */
IFNET_LOCK(ifp);
/* reset the interface. */
wm_init(ifp);
IFNET_UNLOCK(ifp);
/*
* There are still some upper layer processing which call
* ifp->if_start(). e.g. ALTQ or one CPU system
*/
/* Try to get more packets going. */
ifp->if_start(ifp);
/*
* wm_tick:
*
* One second timer, used to check link status, sweep up
* completed transmit jobs, etc.
*/
static void
wm_tick(void *arg)
{
struct wm_softc *sc = arg;
struct ifnet *ifp = &sc->sc_ethercom.ec_if;
mutex_enter(sc->sc_core_lock);
if (sc->sc_core_stopping) {
mutex_exit(sc->sc_core_lock);
return;
}
wm_update_stats(sc);
if (sc->sc_flags & WM_F_HAS_MII) {
bool dotick = true;
/*
* Workaround for some chips to delay sending LINK_STATE_UP.
* See also wm_linkintr_gmii() and wm_gmii_mediastatus().
*/
if ((sc->sc_flags & WM_F_DELAY_LINKUP) != 0) {
struct timeval now;
getmicrotime(&now);
if (timercmp(&now, &sc->sc_linkup_delay_time, <))
dotick = false;
else if (sc->sc_linkup_delay_time.tv_sec != 0) {
/* Simplify by checking tv_sec only. */
switch (cmd) {
case SIOCADDMULTI:
case SIOCDELMULTI:
break;
default:
KASSERT(IFNET_LOCKED(ifp));
}
if (cmd == SIOCZIFDATA) {
/*
* Special handling for SIOCZIFDATA.
* Copying and clearing the if_data structure is done with
* ether_ioctl() below.
*/
mutex_enter(sc->sc_core_lock);
wm_update_stats(sc);
wm_clear_evcnt(sc);
mutex_exit(sc->sc_core_lock);
}
switch (cmd) {
case SIOCSIFMEDIA:
mutex_enter(sc->sc_core_lock);
/* Flow control requires full-duplex mode. */
if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
(ifr->ifr_media & IFM_FDX) == 0)
ifr->ifr_media &= ~IFM_ETH_FMASK;
if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
/* We can do both TXPAUSE and RXPAUSE. */
ifr->ifr_media |=
IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
}
sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
}
mutex_exit(sc->sc_core_lock);
error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
if (error == 0 && wm_phy_need_linkdown_discard(sc)) {
if (IFM_SUBTYPE(ifr->ifr_media) == IFM_NONE) {
DPRINTF(sc, WM_DEBUG_LINK,
("%s: %s: Set linkdown discard flag\n",
device_xname(sc->sc_dev), __func__));
wm_set_linkdown_discard(sc);
}
}
break;
case SIOCINITIFADDR:
mutex_enter(sc->sc_core_lock);
if (ifa->ifa_addr->sa_family == AF_LINK) {
sdl = satosdl(ifp->if_dl->ifa_addr);
(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
/* Unicast address is the first multicast entry */
wm_set_filter(sc);
error = 0;
mutex_exit(sc->sc_core_lock);
break;
}
mutex_exit(sc->sc_core_lock);
/*FALLTHROUGH*/
default:
if (cmd == SIOCSIFFLAGS && wm_phy_need_linkdown_discard(sc)) {
if (((ifp->if_flags & IFF_UP) != 0) &&
((ifr->ifr_flags & IFF_UP) == 0)) {
DPRINTF(sc, WM_DEBUG_LINK,
("%s: %s: Set linkdown discard flag\n",
device_xname(sc->sc_dev), __func__));
wm_set_linkdown_discard(sc);
}
}
const int s = splnet();
/* It may call wm_start, so unlock here */
error = ether_ioctl(ifp, cmd, data);
splx(s);
if (error != ENETRESET)
break;
error = 0;
if (cmd == SIOCSIFCAP)
error = if_init(ifp);
else if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) {
mutex_enter(sc->sc_core_lock);
if (sc->sc_if_flags & IFF_RUNNING) {
/*
* Multicast list has changed; set the
* hardware filter accordingly.
*/
wm_set_filter(sc);
}
mutex_exit(sc->sc_core_lock);
}
break;
}
return error;
}
/* MAC address related */
/*
* Get the offset of MAC address and return it.
* If error occured, use offset 0.
*/
static uint16_t
wm_check_alt_mac_addr(struct wm_softc *sc)
{
uint16_t myea[ETHER_ADDR_LEN / 2];
uint16_t offset = NVM_OFF_MACADDR;
/* Try to read alternative MAC address pointer */
if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
return 0;
/* Check pointer if it's valid or not. */
if ((offset == 0x0000) || (offset == 0xffff))
return 0;
offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
/*
* Check whether alternative MAC address is valid or not.
* Some cards have non 0xffff pointer but those don't use
* alternative MAC address in reality.
*
* Check whether the broadcast bit is set or not.
*/
if (wm_nvm_read(sc, offset, 1, myea) == 0)
if (((myea[0] & 0xff) & 0x01) == 0)
return offset; /* Found */
/* Not found */
return 0;
}
static int
wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
{
uint16_t myea[ETHER_ADDR_LEN / 2];
uint16_t offset = NVM_OFF_MACADDR;
int do_invert = 0;
switch (sc->sc_type) {
case WM_T_82580:
case WM_T_I350:
case WM_T_I354:
/* EEPROM Top Level Partitioning */
offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
break;
case WM_T_82571:
case WM_T_82575:
case WM_T_82576:
case WM_T_80003:
case WM_T_I210:
case WM_T_I211:
offset = wm_check_alt_mac_addr(sc);
if (offset == 0)
if ((sc->sc_funcid & 0x01) == 1)
do_invert = 1;
break;
default:
if ((sc->sc_funcid & 0x01) == 1)
do_invert = 1;
break;
}
/*
*
*
*/
static int
wm_rar_count(struct wm_softc *sc)
{
int size;
switch (sc->sc_type) {
case WM_T_ICH8:
size = WM_RAL_TABSIZE_ICH8 -1;
break;
case WM_T_ICH9:
case WM_T_ICH10:
case WM_T_PCH:
size = WM_RAL_TABSIZE_ICH8;
break;
case WM_T_PCH2:
size = WM_RAL_TABSIZE_PCH2;
break;
case WM_T_PCH_LPT:
case WM_T_PCH_SPT:
case WM_T_PCH_CNP:
case WM_T_PCH_TGP:
size = WM_RAL_TABSIZE_PCH_LPT;
break;
case WM_T_82575:
case WM_T_I210:
case WM_T_I211:
size = WM_RAL_TABSIZE_82575;
break;
case WM_T_82576:
case WM_T_82580:
size = WM_RAL_TABSIZE_82576;
break;
case WM_T_I350:
case WM_T_I354:
size = WM_RAL_TABSIZE_I350;
break;
default:
size = WM_RAL_TABSIZE;
}
return size;
}
/*
* wm_set_filter:
*
* Set up the receive filter.
*/
static void
wm_set_filter(struct wm_softc *sc)
{
struct ethercom *ec = &sc->sc_ethercom;
struct ifnet *ifp = &sc->sc_ethercom.ec_if;
struct ether_multi *enm;
struct ether_multistep step;
bus_addr_t mta_reg;
uint32_t hash, reg, bit;
int i, size, ralmax, rv;
/*
* Set the station address in the first RAL slot, and
* clear the remaining slots.
*/
size = wm_rar_count(sc);
wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT) ||
(sc->sc_type == WM_T_PCH_CNP) || (sc->sc_type == WM_T_PCH_TGP)) {
i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
switch (i) {
case 0:
/* We can use all entries */
ralmax = size;
break;
case 1:
/* Only RAR[0] */
ralmax = 1;
break;
default:
/* Available SHRA + RAR[0] */
ralmax = i + 1;
}
} else
ralmax = size;
for (i = 1; i < size; i++) {
if (i < ralmax)
wm_set_ral(sc, NULL, i);
}
if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
|| (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
|| (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)
|| (sc->sc_type == WM_T_PCH_TGP))
size = WM_ICH8_MC_TABSIZE;
else
size = WM_MC_TABSIZE;
/* Clear out the multicast table. */
for (i = 0; i < size; i++) {
CSR_WRITE(sc, mta_reg + (i << 2), 0);
CSR_WRITE_FLUSH(sc);
}
ETHER_LOCK(ec);
ETHER_FIRST_MULTI(step, ec, enm);
while (enm != NULL) {
if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
ec->ec_flags |= ETHER_F_ALLMULTI;
ETHER_UNLOCK(ec);
/*
* We must listen to a range of multicast addresses.
* For now, just accept all multicasts, rather than
* trying to set only those filter bits needed to match
* the range. (At this time, the only use of address
* ranges is for IP multicast routing, for which the
* range is big enough to require all bits set.)
*/
goto allmulti;
}
void
wm_get_auto_rd_done(struct wm_softc *sc)
{
int i;
/* wait for eeprom to reload */
switch (sc->sc_type) {
case WM_T_82571:
case WM_T_82572:
case WM_T_82573:
case WM_T_82574:
case WM_T_82583:
case WM_T_82575:
case WM_T_82576:
case WM_T_82580:
case WM_T_I350:
case WM_T_I354:
case WM_T_I210:
case WM_T_I211:
case WM_T_80003:
case WM_T_ICH8:
case WM_T_ICH9:
for (i = 0; i < 10; i++) {
if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
break;
delay(1000);
}
if (i == 10) {
log(LOG_ERR, "%s: auto read from eeprom failed to "
"complete\n", device_xname(sc->sc_dev));
}
break;
default:
break;
}
}
/* Wait for eeprom to reload */
switch (sc->sc_type) {
case WM_T_ICH10:
case WM_T_PCH:
case WM_T_PCH2:
case WM_T_PCH_LPT:
case WM_T_PCH_SPT:
case WM_T_PCH_CNP:
case WM_T_PCH_TGP:
for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
reg = CSR_READ(sc, WMREG_STATUS);
if ((reg & STATUS_LAN_INIT_DONE) != 0)
break;
delay(100);
}
if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
log(LOG_ERR, "%s: %s: lan_init_done failed to "
"complete\n", device_xname(sc->sc_dev), __func__);
}
break;
default:
panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
__func__);
break;
}
/* Wait for eeprom to reload */
switch (sc->sc_type) {
case WM_T_82542_2_0:
case WM_T_82542_2_1:
/* null */
break;
case WM_T_82543:
case WM_T_82544:
case WM_T_82540:
case WM_T_82545:
case WM_T_82545_3:
case WM_T_82546:
case WM_T_82546_3:
case WM_T_82541:
case WM_T_82541_2:
case WM_T_82547:
case WM_T_82547_2:
case WM_T_82573:
case WM_T_82574:
case WM_T_82583:
/* generic */
delay(10*1000);
break;
case WM_T_80003:
case WM_T_82571:
case WM_T_82572:
case WM_T_82575:
case WM_T_82576:
case WM_T_82580:
case WM_T_I350:
case WM_T_I354:
case WM_T_I210:
case WM_T_I211:
if (sc->sc_type == WM_T_82571) {
/* Only 82571 shares port 0 */
mask = EEMNGCTL_CFGDONE_0;
} else
mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
break;
delay(1000);
}
if (i >= WM_PHY_CFG_TIMEOUT)
DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s failed\n",
device_xname(sc->sc_dev), __func__));
break;
case WM_T_ICH8:
case WM_T_ICH9:
case WM_T_ICH10:
case WM_T_PCH:
case WM_T_PCH2:
case WM_T_PCH_LPT:
case WM_T_PCH_SPT:
case WM_T_PCH_CNP:
case WM_T_PCH_TGP:
delay(10*1000);
if (sc->sc_type >= WM_T_ICH10)
wm_lan_init_done(sc);
else
wm_get_auto_rd_done(sc);
if (((sc->sc_type == WM_T_PCH)
&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
|| (sc->sc_type > WM_T_PCH)) {
/*
* HW configures the SMBus address and LEDs when the OEM and
* LCD Write Enable bits are set in the NVM. When both NVM bits
* are cleared, SW will configure them instead.
*/
DPRINTF(sc, WM_DEBUG_INIT,
("%s: %s: Configure SMBus and LED\n",
device_xname(sc->sc_dev), __func__));
if ((rv = wm_write_smbus_addr(sc)) != 0)
goto release;
/*
* wm_oem_bits_config_ich8lan - SW-based LCD Configuration
* @sc: pointer to the HW structure
* @d0_state: boolean if entering d0 or d3 device state
*
* SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
* collectively called OEM bits. The OEM Write Enable bit and SW Config bit
* in NVM determines whether HW should configure LPLU and Gbe Disable.
*/
int
wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
{
uint32_t mac_reg;
uint16_t oem_reg;
int rv;
if (sc->sc_type < WM_T_PCH)
return 0;
rv = sc->phy.acquire(sc);
if (rv != 0)
return rv;
if (sc->sc_type == WM_T_PCH) {
mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
goto release;
}
/*
* 8257[12] Errata No.13
* Disable Dyamic Clock Gating.
*/
reg = CSR_READ(sc, WMREG_CTRL_EXT);
reg &= ~CTRL_EXT_DMA_DYN_CLK;
CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
break;
case WM_T_82573:
case WM_T_82574:
case WM_T_82583:
if ((sc->sc_type == WM_T_82574)
|| (sc->sc_type == WM_T_82583))
tarc0 |= __BIT(26); /* TARC0 bit 26 */
/* Extended Device Control */
reg = CSR_READ(sc, WMREG_CTRL_EXT);
reg &= ~__BIT(23); /* Clear bit 23 */
reg |= __BIT(22); /* Set bit 22 */
CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
/* Device Control */
sc->sc_ctrl &= ~__BIT(29); /* Clear bit 29 */
CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
/* PCIe Control Register */
/*
* 82573 Errata (unknown).
*
* 82574 Errata 25 and 82583 Errata 12
* "Dropped Rx Packets":
* NVM Image Version 2.1.4 and newer has no this bug.
*/
reg = CSR_READ(sc, WMREG_GCR);
reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
CSR_WRITE(sc, WMREG_GCR, reg);
if ((sc->sc_type == WM_T_82574)
|| (sc->sc_type == WM_T_82583)) {
/*
* Document says this bit must be set for
* proper operation.
*/
reg = CSR_READ(sc, WMREG_GCR);
reg |= __BIT(22);
CSR_WRITE(sc, WMREG_GCR, reg);
/*
* Apply workaround for hardware errata
* documented in errata docs Fixes issue where
* some error prone or unreliable PCIe
* completions are occurring, particularly
* with ASPM enabled. Without fix, issue can
* cause Tx timeouts.
*/
reg = CSR_READ(sc, WMREG_GCR2);
reg |= __BIT(0);
CSR_WRITE(sc, WMREG_GCR2, reg);
}
break;
case WM_T_80003:
/* TARC0 */
if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
|| (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
tarc0 &= ~__BIT(20); /* Clear bits 20 */
/* TARC1 bit 28 */
tarc1 = CSR_READ(sc, WMREG_TARC1);
if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
tarc1 &= ~__BIT(28);
else
tarc1 |= __BIT(28);
CSR_WRITE(sc, WMREG_TARC1, tarc1);
break;
case WM_T_ICH8:
case WM_T_ICH9:
case WM_T_ICH10:
case WM_T_PCH:
case WM_T_PCH2:
case WM_T_PCH_LPT:
case WM_T_PCH_SPT:
case WM_T_PCH_CNP:
case WM_T_PCH_TGP:
/* TARC0 */
if (sc->sc_type == WM_T_ICH8) {
/* Set TARC0 bits 29 and 28 */
tarc0 |= __BITS(29, 28);
} else if (sc->sc_type == WM_T_PCH_SPT) {
tarc0 |= __BIT(29);
/*
* Drop bit 28. From Linux.
* See I218/I219 spec update
* "5. Buffer Overrun While the I219 is
* Processing DMA Transactions"
*/
tarc0 &= ~__BIT(28);
}
/* Set TARC0 bits 23,24,26,27 */
tarc0 |= __BITS(27, 26) | __BITS(24, 23);
/* CTRL_EXT */
reg = CSR_READ(sc, WMREG_CTRL_EXT);
reg |= __BIT(22); /* Set bit 22 */
/*
* Enable PHY low-power state when MAC is at D3
* w/o WoL
*/
if (sc->sc_type >= WM_T_PCH)
reg |= CTRL_EXT_PHYPDEN;
CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
/*
* wm_flush_desc_rings - remove all descriptors from the descriptor rings.
*
* In i219, the descriptor rings must be emptied before resetting the HW
* or before changing the device state to D3 during runtime (runtime PM).
*
* Failure to do this will cause the HW to enter a unit hang state which can
* only be released by PCI reset on the device.
*
* I219 does not use multiqueue, so it is enough to check sc->sc_queue[0] only.
*/
static void
wm_flush_desc_rings(struct wm_softc *sc)
{
pcireg_t preg;
uint32_t reg;
struct wm_txqueue *txq;
wiseman_txdesc_t *txd;
int nexttx;
uint32_t rctl;
/*
* Remove all descriptors from the tx_ring.
*
* We want to clear all pending descriptors from the TX ring. Zeroing
* happens when the HW reads the regs. We assign the ring itself as
* the data of the next descriptor. We don't care about the data we are
* about to reset the HW.
*/
#ifdef WM_DEBUG
device_printf(sc->sc_dev, "Need TX flush (reg = %08x)\n", preg);
#endif
reg = CSR_READ(sc, WMREG_TCTL);
CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
/*
* Mark all descriptors in the RX ring as consumed and disable the
* rx ring.
*/
#ifdef WM_DEBUG
device_printf(sc->sc_dev, "Need RX flush (reg = %08x)\n", preg);
#endif
rctl = CSR_READ(sc, WMREG_RCTL);
CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
CSR_WRITE_FLUSH(sc);
delay(150);
reg = CSR_READ(sc, WMREG_RXDCTL(0));
/* Zero the lower 14 bits (prefetch and host thresholds) */
reg &= 0xffffc000;
/*
* Update thresholds: prefetch threshold to 31, host threshold
* to 1 and make sure the granularity is "descriptors" and not
* "cache lines"
*/
reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
/* Momentarily enable the RX ring for the changes to take effect */
CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
CSR_WRITE_FLUSH(sc);
delay(150);
CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
}
/*
* wm_reset:
*
* Reset the i82542 chip.
*/
static void
wm_reset(struct wm_softc *sc)
{
int phy_reset = 0;
int i, error = 0;
uint32_t reg;
uint16_t kmreg;
int rv;
/*
* Allocate on-chip memory according to the MTU size.
* The Packet Buffer Allocation register must be written
* before the chip is reset.
*/
switch (sc->sc_type) {
case WM_T_82547:
case WM_T_82547_2:
sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
PBA_22K : PBA_30K;
for (i = 0; i < sc->sc_nqueues; i++) {
struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
txq->txq_fifo_head = 0;
txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
txq->txq_fifo_size =
(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
txq->txq_fifo_stall = 0;
}
break;
case WM_T_82571:
case WM_T_82572:
case WM_T_82575: /* XXX need special handing for jumbo frames */
case WM_T_80003:
sc->sc_pba = PBA_32K;
break;
case WM_T_82573:
sc->sc_pba = PBA_12K;
break;
case WM_T_82574:
case WM_T_82583:
sc->sc_pba = PBA_20K;
break;
case WM_T_82576:
sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
sc->sc_pba &= RXPBS_SIZE_MASK_82576;
break;
case WM_T_82580:
case WM_T_I350:
case WM_T_I354:
sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
break;
case WM_T_I210:
case WM_T_I211:
sc->sc_pba = PBA_34K;
break;
case WM_T_ICH8:
/* Workaround for a bit corruption issue in FIFO memory */
sc->sc_pba = PBA_8K;
CSR_WRITE(sc, WMREG_PBS, PBA_16K);
break;
case WM_T_ICH9:
case WM_T_ICH10:
sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
PBA_14K : PBA_10K;
break;
case WM_T_PCH:
case WM_T_PCH2: /* XXX 14K? */
case WM_T_PCH_LPT:
case WM_T_PCH_SPT:
case WM_T_PCH_CNP:
case WM_T_PCH_TGP:
sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 1500 ?
PBA_12K : PBA_26K;
break;
default:
sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
PBA_40K : PBA_48K;
break;
}
/*
* Only old or non-multiqueue devices have the PBA register
* XXX Need special handling for 82575.
*/
if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
|| (sc->sc_type == WM_T_82575))
CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
/* Prevent the PCI-E bus from sticking */
if (sc->sc_flags & WM_F_PCIE) {
int timeout = 800;
while (timeout--) {
if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
== 0)
break;
delay(100);
}
if (timeout == 0)
device_printf(sc->sc_dev,
"failed to disable bus mastering\n");
}
/* Set the completion timeout for interface */
if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
|| (sc->sc_type == WM_T_82580)
|| (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
|| (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
wm_set_pcie_completion_timeout(sc);
/* Stop the transmit and receive processes. */
CSR_WRITE(sc, WMREG_RCTL, 0);
sc->sc_rctl &= ~RCTL_EN;
CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
CSR_WRITE_FLUSH(sc);
/* XXX set_tbi_sbp_82543() */
delay(10*1000);
/* Must acquire the MDIO ownership before MAC reset */
switch (sc->sc_type) {
case WM_T_82573:
case WM_T_82574:
case WM_T_82583:
error = wm_get_hw_semaphore_82573(sc);
break;
default:
break;
}
/*
* 82541 Errata 29? & 82547 Errata 28?
* See also the description about PHY_RST bit in CTRL register
* in 8254x_GBe_SDM.pdf.
*/
if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
CSR_WRITE(sc, WMREG_CTRL,
CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
CSR_WRITE_FLUSH(sc);
delay(5000);
}
switch (sc->sc_type) {
case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
case WM_T_82541:
case WM_T_82541_2:
case WM_T_82547:
case WM_T_82547_2:
/*
* On some chipsets, a reset through a memory-mapped write
* cycle can cause the chip to reset before completing the
* write cycle. This causes major headache that can be avoided
* by issuing the reset via indirect register writes through
* I/O space.
*
* So, if we successfully mapped the I/O BAR at attach time,
* use that. Otherwise, try our luck with a memory-mapped
* reset.
*/
if (sc->sc_flags & WM_F_IOH_VALID)
wm_io_write(sc, WMREG_CTRL, CTRL_RST);
else
CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
break;
case WM_T_82545_3:
case WM_T_82546_3:
/* Use the shadow control register on these chips. */
CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
break;
case WM_T_80003:
reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
if (sc->phy.acquire(sc) != 0)
break;
CSR_WRITE(sc, WMREG_CTRL, reg);
sc->phy.release(sc);
break;
case WM_T_ICH8:
case WM_T_ICH9:
case WM_T_ICH10:
case WM_T_PCH:
case WM_T_PCH2:
case WM_T_PCH_LPT:
case WM_T_PCH_SPT:
case WM_T_PCH_CNP:
case WM_T_PCH_TGP:
reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
if (wm_phy_resetisblocked(sc) == false) {
/*
* Gate automatic PHY configuration by hardware on
* non-managed 82579
*/
if ((sc->sc_type == WM_T_PCH2)
&& ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
== 0))
wm_gate_hw_phy_config_ich8lan(sc, true);
reg |= CTRL_PHY_RESET;
phy_reset = 1;
} else
device_printf(sc->sc_dev, "XXX reset is blocked!!!\n");
if (sc->phy.acquire(sc) != 0)
break;
CSR_WRITE(sc, WMREG_CTRL, reg);
/* Don't insert a completion barrier when reset */
delay(20*1000);
/*
* The EXTCNFCTR_MDIO_SW_OWNERSHIP bit is cleared by the reset,
* so don't use sc->phy.release(sc). Release sc_ich_phymtx
* only. See also wm_get_swflag_ich8lan().
*/
mutex_exit(sc->sc_ich_phymtx);
break;
case WM_T_82580:
case WM_T_I350:
case WM_T_I354:
case WM_T_I210:
case WM_T_I211:
CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
CSR_WRITE_FLUSH(sc);
delay(5000);
break;
case WM_T_82542_2_0:
case WM_T_82542_2_1:
case WM_T_82543:
case WM_T_82540:
case WM_T_82545:
case WM_T_82546:
case WM_T_82571:
case WM_T_82572:
case WM_T_82573:
case WM_T_82574:
case WM_T_82575:
case WM_T_82576:
case WM_T_82583:
default:
/* Everything else can safely use the documented method. */
CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
break;
}
/* Must release the MDIO ownership after MAC reset */
switch (sc->sc_type) {
case WM_T_82573:
case WM_T_82574:
case WM_T_82583:
if (error == 0)
wm_put_hw_semaphore_82573(sc);
break;
default:
break;
}
/* Set Phy Config Counter to 50msec */
if (sc->sc_type == WM_T_PCH2) {
reg = CSR_READ(sc, WMREG_FEXTNVM3);
reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
}
if (phy_reset != 0)
wm_get_cfg_done(sc);
/* Reload EEPROM */
switch (sc->sc_type) {
case WM_T_82542_2_0:
case WM_T_82542_2_1:
case WM_T_82543:
case WM_T_82544:
delay(10);
reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
CSR_WRITE_FLUSH(sc);
delay(2000);
break;
case WM_T_82540:
case WM_T_82545:
case WM_T_82545_3:
case WM_T_82546:
case WM_T_82546_3:
delay(5*1000);
/* XXX Disable HW ARPs on ASF enabled adapters */
break;
case WM_T_82541:
case WM_T_82541_2:
case WM_T_82547:
case WM_T_82547_2:
delay(20000);
/* XXX Disable HW ARPs on ASF enabled adapters */
break;
case WM_T_82571:
case WM_T_82572:
case WM_T_82573:
case WM_T_82574:
case WM_T_82583:
if (sc->sc_flags & WM_F_EEPROM_FLASH) {
delay(10);
reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
CSR_WRITE_FLUSH(sc);
}
/* check EECD_EE_AUTORD */
wm_get_auto_rd_done(sc);
/*
* Phy configuration from NVM just starts after EECD_AUTO_RD
* is set.
*/
if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
|| (sc->sc_type == WM_T_82583))
delay(25*1000);
break;
case WM_T_82575:
case WM_T_82576:
case WM_T_82580:
case WM_T_I350:
case WM_T_I354:
case WM_T_I210:
case WM_T_I211:
case WM_T_80003:
/* check EECD_EE_AUTORD */
wm_get_auto_rd_done(sc);
break;
case WM_T_ICH8:
case WM_T_ICH9:
case WM_T_ICH10:
case WM_T_PCH:
case WM_T_PCH2:
case WM_T_PCH_LPT:
case WM_T_PCH_SPT:
case WM_T_PCH_CNP:
case WM_T_PCH_TGP:
break;
default:
panic("%s: unknown type\n", __func__);
}
/* Check whether EEPROM is present or not */
switch (sc->sc_type) {
case WM_T_82575:
case WM_T_82576:
case WM_T_82580:
case WM_T_I350:
case WM_T_I354:
case WM_T_ICH8:
case WM_T_ICH9:
if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
/* Not found */
sc->sc_flags |= WM_F_EEPROM_INVALID;
if (sc->sc_type == WM_T_82575)
wm_reset_init_script_82575(sc);
}
break;
default:
break;
}
if (phy_reset != 0)
wm_phy_post_reset(sc);
if ((sc->sc_type == WM_T_82580)
|| (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
/* Clear global device reset status bit */
CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
}
/*
* For PCH, this write will make sure that any noise will be detected
* as a CRC error and be dropped rather than show up as a bad packet
* to the DMA engine
*/
if (sc->sc_type == WM_T_PCH)
CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
if (sc->sc_type >= WM_T_82544)
CSR_WRITE(sc, WMREG_WUC, 0);
if (sc->sc_type < WM_T_82575)
wm_disable_aspm(sc); /* Workaround for some chips */
wm_reset_mdicnfg_82580(sc);
if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
wm_pll_workaround_i210(sc);
if (sc->sc_type == WM_T_80003) {
/* Default to TRUE to enable the MDIC W/A */
sc->sc_flags |= WM_F_80003_MDIC_WA;
/*
* MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
* See IPV6EXDIS bit in wm_initialize_hardware_bits().
*/
mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
#if 0
mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
mrqc |= MRQC_RSS_FIELD_IPV6_UDP_EX;
#endif
mrqc |= MRQC_RSS_FIELD_IPV6_TCP_EX;
CSR_WRITE(sc, WMREG_MRQC, mrqc);
}
/*
* Adjust TX and RX queue numbers which the system actulally uses.
*
* The numbers are affected by below parameters.
* - The nubmer of hardware queues
* - The number of MSI-X vectors (= "nvectors" argument)
* - ncpu
*/
static void
wm_adjust_qnum(struct wm_softc *sc, int nvectors)
{
int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
if (nvectors < 2) {
sc->sc_nqueues = 1;
return;
}
switch (sc->sc_type) {
case WM_T_82572:
hw_ntxqueues = 2;
hw_nrxqueues = 2;
break;
case WM_T_82574:
hw_ntxqueues = 2;
hw_nrxqueues = 2;
break;
case WM_T_82575:
hw_ntxqueues = 4;
hw_nrxqueues = 4;
break;
case WM_T_82576:
hw_ntxqueues = 16;
hw_nrxqueues = 16;
break;
case WM_T_82580:
case WM_T_I350:
case WM_T_I354:
hw_ntxqueues = 8;
hw_nrxqueues = 8;
break;
case WM_T_I210:
hw_ntxqueues = 4;
hw_nrxqueues = 4;
break;
case WM_T_I211:
hw_ntxqueues = 2;
hw_nrxqueues = 2;
break;
/*
* The below Ethernet controllers do not support MSI-X;
* this driver doesn't let them use multiqueue.
* - WM_T_80003
* - WM_T_ICH8
* - WM_T_ICH9
* - WM_T_ICH10
* - WM_T_PCH
* - WM_T_PCH2
* - WM_T_PCH_LPT
*/
default:
hw_ntxqueues = 1;
hw_nrxqueues = 1;
break;
}
hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
/*
* As queues more than MSI-X vectors cannot improve scaling, we limit
* the number of queues used actually.
*/
if (nvectors < hw_nqueues + 1)
sc->sc_nqueues = nvectors - 1;
else
sc->sc_nqueues = hw_nqueues;
/*
* As queues more than CPUs cannot improve scaling, we limit
* the number of queues used actually.
*/
if (ncpu < sc->sc_nqueues)
sc->sc_nqueues = ncpu;
}
/*
* Both single interrupt MSI and INTx can use this function.
*/
static int
wm_setup_legacy(struct wm_softc *sc)
{
pci_chipset_tag_t pc = sc->sc_pc;
const char *intrstr = NULL;
char intrbuf[PCI_INTRSTR_LEN];
int error;
aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
sc->sc_nintrs = 1;
return wm_softint_establish_queue(sc, 0, 0);
}
static int
wm_setup_msix(struct wm_softc *sc)
{
void *vih;
kcpuset_t *affinity;
int qidx, error, intr_idx, txrx_established;
pci_chipset_tag_t pc = sc->sc_pc;
const char *intrstr = NULL;
char intrbuf[PCI_INTRSTR_LEN];
char intr_xname[INTRDEVNAMEBUF];
if (sc->sc_nqueues < ncpu) {
/*
* To avoid other devices' interrupts, the affinity of Tx/Rx
* interrupts start from CPU#1.
*/
sc->sc_affinity_offset = 1;
} else {
/*
* In this case, this device use all CPUs. So, we unify
* affinitied cpu_index to msix vector number for readability.
*/
sc->sc_affinity_offset = 0;
}
static void
wm_set_stopping_flags(struct wm_softc *sc)
{
int i;
KASSERT(mutex_owned(sc->sc_core_lock));
sc->sc_core_stopping = true;
/* Must set stopping flags in ascending order. */
for (i = 0; i < sc->sc_nqueues; i++) {
struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
/*
* 82575 doesn't have CNT_INGR field.
* So, overwrite counter field by software.
*/
if (sc->sc_type == WM_T_82575)
eitr |= __SHIFTIN(wmq->wmq_itr,
EITR_COUNTER_MASK_82575);
else
eitr |= EITR_CNT_INGR;
CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
/*
* 82574 has both ITR and EITR. SET EITR when we use
* the multi queue function with MSI-X.
*/
CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
} else {
KASSERT(wmq->wmq_id == 0);
CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
}
wmq->wmq_set_itr = false;
}
/*
* TODO
* Below dynamic calculation of itr is almost the same as Linux igb,
* however it does not fit to wm(4). So, we will have been disable AIM
* until we will find appropriate calculation of itr.
*/
/*
* Calculate interrupt interval value to be going to write register in
* wm_itrs_writereg(). This function does not write ITR/EITR register.
*/
static void
wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
{
#ifdef NOTYET
struct wm_rxqueue *rxq = &wmq->wmq_rxq;
struct wm_txqueue *txq = &wmq->wmq_txq;
uint32_t avg_size = 0;
uint32_t new_itr;
if (rxq->rxq_packets)
avg_size = rxq->rxq_bytes / rxq->rxq_packets;
if (txq->txq_packets)
avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
if (avg_size == 0) {
new_itr = 450; /* restore default value */
goto out;
}
/* Add 24 bytes to size to account for CRC, preamble, and gap */
avg_size += 24;
/* Give a little boost to mid-size frames */
if ((avg_size > 300) && (avg_size < 1200))
new_itr = avg_size / 3;
else
new_itr = avg_size / 2;
out:
/*
* The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
* controllers. See sc->sc_itr_init setting in wm_init_locked().
*/
if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
new_itr *= 4;
rqdpc = CSR_READ(sc, WMREG_RQDPC(i));
/*
* On I210 and newer device, the RQDPC register is not
* cleard on read.
*/
if ((rqdpc != 0) && (sc->sc_type >= WM_T_I210))
CSR_WRITE(sc, WMREG_RQDPC(i), 0);
WM_Q_EVCNT_ADD(rxq, qdrop, rqdpc);
total_qdrop += rqdpc;
}
#endif
}
if ((sc->sc_type >= WM_T_I350) && !WM_IS_ICHPCH(sc)) {
WM_EVCNT_ADD(&sc->sc_ev_tlpic, CSR_READ(sc, WMREG_TLPIC));
WM_EVCNT_ADD(&sc->sc_ev_rlpic, CSR_READ(sc, WMREG_RLPIC));
if ((CSR_READ(sc, WMREG_MANC) & MANC_EN_BMC2OS) != 0) {
WM_EVCNT_ADD(&sc->sc_ev_b2ogprc,
CSR_READ(sc, WMREG_B2OGPRC));
WM_EVCNT_ADD(&sc->sc_ev_o2bspc,
CSR_READ(sc, WMREG_O2BSPC));
WM_EVCNT_ADD(&sc->sc_ev_b2ospc,
CSR_READ(sc, WMREG_B2OSPC));
WM_EVCNT_ADD(&sc->sc_ev_o2bgptc,
CSR_READ(sc, WMREG_O2BGPTC));
}
}
net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
if_statadd_ref(ifp, nsr, if_collisions, colc);
if_statadd_ref(ifp, nsr, if_ierrors,
crcerrs + algnerrc + symerrc + rxerrc + sec + cexterr + rlec);
/*
* WMREG_RNBC is incremented when there are no available buffers in
* host memory. It does not mean the number of dropped packets, because
* an Ethernet controller can receive packets in such case if there is
* space in the phy's FIFO.
*
* If you want to know the nubmer of WMREG_RMBC, you should use such as
* own EVCNT instead of if_iqdrops.
*/
if_statadd_ref(ifp, nsr, if_iqdrops, mpc + total_qdrop);
IF_STAT_PUTREF(ifp);
}
void
wm_clear_evcnt(struct wm_softc *sc)
{
#ifdef WM_EVENT_COUNTERS
int i;
/* RX queues */
for (i = 0; i < sc->sc_nqueues; i++) {
struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
/*
* *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
* There is a small but measurable benefit to avoiding the adjusment
* of the descriptor so that the headers are aligned, for normal mtu,
* on such platforms. One possibility is that the DMA itself is
* slightly more efficient if the front of the entire packet (instead
* of the front of the headers) is aligned.
*
* Note we must always set align_tweak to 0 if we are using
* jumbo frames.
*/
#ifdef __NO_STRICT_ALIGNMENT
sc->sc_align_tweak = 0;
#else
if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
sc->sc_align_tweak = 0;
else
sc->sc_align_tweak = 2;
#endif /* __NO_STRICT_ALIGNMENT */
/* Cancel any pending I/O. */
wm_stop_locked(ifp, false, false);
/* Calculate (E)ITR value */
if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
/*
* For NEWQUEUE's EITR (except for 82575).
* 82575's EITR should be set same throttling value as other
* old controllers' ITR because the interrupt/sec calculation
* is the same, that is, 1,000,000,000 / (N * 256).
*
* 82574's EITR should be set same throttling value as ITR.
*
* For N interrupts/sec, set this value to:
* 1,000,000 / N in contrast to ITR throttling value.
*/
sc->sc_itr_init = 450;
} else if (sc->sc_type >= WM_T_82543) {
/*
* Set up the interrupt throttling register (units of 256ns)
* Note that a footnote in Intel's documentation says this
* ticker runs at 1/4 the rate when the chip is in 100Mbit
* or 10Mbit mode. Empirically, it appears to be the case
* that that is also true for the 1024ns units of the other
* interrupt-related timer registers -- so, really, we ought
* to divide this value by 4 when the link speed is low.
*
* XXX implement this division at link speed change!
*/
/*
* For N interrupts/sec, set this value to:
* 1,000,000,000 / (N * 256). Note that we set the
* absolute and packet timer values to this value
* divided by 4 to get "simple timer" behavior.
*/
sc->sc_itr_init = 1500; /* 2604 ints/sec */
}
error = wm_init_txrx_queues(sc);
if (error)
goto out;
/* Clear out the VLAN table -- we don't use it (yet). */
CSR_WRITE(sc, WMREG_VET, 0);
if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
trynum = 10; /* Due to hw errata */
else
trynum = 1;
for (i = 0; i < WM_VLAN_TABSIZE; i++)
for (j = 0; j < trynum; j++)
CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
/* Writes the control register. */
wm_set_vlan(sc);
if (sc->sc_flags & WM_F_HAS_MII) {
uint16_t kmreg;
switch (sc->sc_type) {
case WM_T_80003:
case WM_T_ICH8:
case WM_T_ICH9:
case WM_T_ICH10:
case WM_T_PCH:
case WM_T_PCH2:
case WM_T_PCH_LPT:
case WM_T_PCH_SPT:
case WM_T_PCH_CNP:
case WM_T_PCH_TGP:
/*
* Set the mac to wait the maximum time between each
* iteration and increase the max iterations when
* polling the phy; this fixes erroneous timeouts at
* 10Mbps.
*/
wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
0xFFFF);
wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
&kmreg);
kmreg |= 0x3F;
wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
kmreg);
break;
default:
break;
}
/* TX and RX */
for (i = 0; i < sc->sc_nqueues; i++) {
wmq = &sc->sc_queue[i];
CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
EITR_TX_QUEUE(wmq->wmq_id)
| EITR_RX_QUEUE(wmq->wmq_id));
}
/* Link status */
CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
EITR_OTHER);
} else if (sc->sc_type == WM_T_82574) {
/* Interrupt control */
reg = CSR_READ(sc, WMREG_CTRL_EXT);
reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
/*
* Work around issue with spurious interrupts
* in MSI-X mode.
* At wm_initialize_hardware_bits(), sc_nintrs has not
* initialized yet. So re-initialize WMREG_RFCTL here.
*/
reg = CSR_READ(sc, WMREG_RFCTL);
reg |= WMREG_RFCTL_ACKDIS;
CSR_WRITE(sc, WMREG_RFCTL, reg);
ivar = 0;
/* TX and RX */
for (i = 0; i < sc->sc_nqueues; i++) {
wmq = &sc->sc_queue[i];
qid = wmq->wmq_id;
qintr_idx = wmq->wmq_intr_idx;
ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
IVAR_TX_MASK_Q_82574(qid));
ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
IVAR_RX_MASK_Q_82574(qid));
}
/* Link status */
ivar |= __SHIFTIN((IVAR_VALID_82574
| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
} else {
/* Interrupt control */
CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
| GPIE_EIAME | GPIE_PBA);
switch (sc->sc_type) {
case WM_T_82580:
case WM_T_I350:
case WM_T_I354:
case WM_T_I210:
case WM_T_I211:
/* TX and RX */
for (i = 0; i < sc->sc_nqueues; i++) {
wmq = &sc->sc_queue[i];
qid = wmq->wmq_id;
qintr_idx = wmq->wmq_intr_idx;
ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
ivar &= ~IVAR_TX_MASK_Q(qid);
ivar |= __SHIFTIN((qintr_idx
| IVAR_VALID),
IVAR_TX_MASK_Q(qid));
ivar &= ~IVAR_RX_MASK_Q(qid);
ivar |= __SHIFTIN((qintr_idx
| IVAR_VALID),
IVAR_RX_MASK_Q(qid));
CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
}
break;
case WM_T_82576:
/* TX and RX */
for (i = 0; i < sc->sc_nqueues; i++) {
wmq = &sc->sc_queue[i];
qid = wmq->wmq_id;
qintr_idx = wmq->wmq_intr_idx;
ivar = CSR_READ(sc,
WMREG_IVAR_Q_82576(qid));
ivar &= ~IVAR_TX_MASK_Q_82576(qid);
ivar |= __SHIFTIN((qintr_idx
| IVAR_VALID),
IVAR_TX_MASK_Q_82576(qid));
ivar &= ~IVAR_RX_MASK_Q_82576(qid);
ivar |= __SHIFTIN((qintr_idx
| IVAR_VALID),
IVAR_RX_MASK_Q_82576(qid));
CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
ivar);
}
break;
default:
break;
}
/* Link status */
ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
IVAR_MISC_OTHER);
CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
}
if (wm_is_using_multiqueue(sc)) {
wm_init_rss(sc);
/*
** NOTE: Receive Full-Packet Checksum Offload
** is mutually exclusive with Multiqueue. However
** this is not the same as TCP/IP checksums which
** still work.
*/
reg = CSR_READ(sc, WMREG_RXCSUM);
reg |= RXCSUM_PCSD;
CSR_WRITE(sc, WMREG_RXCSUM, reg);
}
}
/* Set up the interrupt registers. */
CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
/* Set up the inter-packet gap. */
CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
if (sc->sc_type >= WM_T_82543) {
for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
struct wm_queue *wmq = &sc->sc_queue[qidx];
wm_itrs_writereg(sc, wmq);
}
/*
* Link interrupts occur much less than TX
* interrupts and RX interrupts. So, we don't
* tune EINTR(WM_MSIX_LINKINTR_IDX) value like
* FreeBSD's if_igb.
*/
}
/* Set the VLAN EtherType. */
CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
/*
* Set up the transmit control register; we start out with
* a collision distance suitable for FDX, but update it when
* we resolve the media type.
*/
sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
| TCTL_CT(TX_COLLISION_THRESHOLD)
| TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
if (sc->sc_type >= WM_T_82571)
sc->sc_tctl |= TCTL_MULR;
CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
/* Write TDT after TCTL.EN is set. See the document. */
CSR_WRITE(sc, WMREG_TDT(0), 0);
}
/* Set the media. */
if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
goto out;
/* Configure for OS presence */
wm_init_manageability(sc);
/*
* Set up the receive control register; we actually program the
* register when we set the receive filter. Use multicast address
* offset type 0.
*
* Only the i82544 has the ability to strip the incoming CRC, so we
* don't enable that feature.
*/
sc->sc_mchash_type = 0;
sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
| __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
/* 82574 use one buffer extended Rx descriptor. */
if (sc->sc_type == WM_T_82574)
sc->sc_rctl |= RCTL_DTYP_ONEBUF;
if ((sc->sc_flags & WM_F_CRC_STRIP) != 0)
sc->sc_rctl |= RCTL_SECRC;
/*
* Set the receive filter.
*
* For 82575 and 82576, the RX descriptors must be initialized after
* the setting of RCTL.EN in wm_set_filter()
*/
wm_set_filter(sc);
/* On 575 and later set RDT only if RX enabled */
if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
int qidx;
for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
for (i = 0; i < WM_NRXDESC; i++) {
mutex_enter(rxq->rxq_lock);
wm_init_rxdesc(rxq, i);
mutex_exit(rxq->rxq_lock);
}
}
}
wm_unset_stopping_flags(sc);
/* Start the one second link check clock. */
callout_schedule(&sc->sc_tick_ch, hz);
out:
/* Save last flags for the callback */
sc->sc_if_flags = ifp->if_flags;
sc->sc_ec_capenable = ec->ec_capenable;
if (error)
log(LOG_ERR, "%s: interface not running\n",
device_xname(sc->sc_dev));
return error;
}
/*
* wm_stop: [ifnet interface function]
*
* Stop transmission on the interface.
*/
static void
wm_stop(struct ifnet *ifp, int disable)
{
struct wm_softc *sc = ifp->if_softc;
/*
* After wm_set_stopping_flags(), it is guaranteed that
* wm_handle_queue_work() does not call workqueue_enqueue().
* However, workqueue_wait() cannot call in wm_stop_locked()
* because it can sleep...
* so, call workqueue_wait() here.
*/
for (int i = 0; i < sc->sc_nqueues; i++)
workqueue_wait(sc->sc_queue_wq, &sc->sc_queue[i].wmq_cookie);
workqueue_wait(sc->sc_reset_wq, &sc->sc_reset_work);
}
if (sc->sc_flags & WM_F_HAS_MII) {
/* Down the MII. */
mii_down(&sc->sc_mii);
} else {
#if 0
/* Should we clear PHY's status properly? */
wm_reset(sc);
#endif
}
/* Stop the transmit and receive processes. */
CSR_WRITE(sc, WMREG_TCTL, 0);
CSR_WRITE(sc, WMREG_RCTL, 0);
sc->sc_rctl &= ~RCTL_EN;
/*
* Clear the interrupt mask to ensure the device cannot assert its
* interrupt line.
* Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
* service any currently pending or shared interrupt.
*/
CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
sc->sc_icr = 0;
if (wm_is_using_msix(sc)) {
if (sc->sc_type != WM_T_82574) {
CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
CSR_WRITE(sc, WMREG_EIAC, 0);
} else
CSR_WRITE(sc, WMREG_EIAC_82574, 0);
}
/*
* Stop callouts after interrupts are disabled; if we have
* to wait for them, we will be releasing the CORE_LOCK
* briefly, which will unblock interrupts on the current CPU.
*/
/* Stop the one second clock. */
if (wait)
callout_halt(&sc->sc_tick_ch, sc->sc_core_lock);
else
callout_stop(&sc->sc_tick_ch);
/* Stop the 82547 Tx FIFO stall check timer. */
if (sc->sc_type == WM_T_82547) {
if (wait)
callout_halt(&sc->sc_txfifo_ch, sc->sc_core_lock);
else
callout_stop(&sc->sc_txfifo_ch);
}
txq->txq_fifo_head = 0;
txq->txq_fifo_stall = 0;
wm_start_locked(&sc->sc_ethercom.ec_if);
} else {
/*
* Still waiting for packets to drain; try again in
* another tick.
*/
callout_schedule(&sc->sc_txfifo_ch, 1);
}
}
out:
mutex_exit(txq->txq_lock);
}
/*
* wm_82547_txfifo_bugchk:
*
* Check for bug condition in the 82547 Tx FIFO. We need to
* prevent enqueueing a packet that would wrap around the end
* if the Tx FIFO ring buffer, otherwise the chip will croak.
*
* We do this by checking the amount of space before the end
* of the Tx FIFO buffer. If the packet will not fit, we "stall"
* the Tx FIFO, wait for all remaining packets to drain, reset
* the internal FIFO pointers to the beginning, and restart
* transmission on the interface.
*/
#define WM_FIFO_HDR 0x10
#define WM_82547_PAD_LEN 0x3e0
static int
wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
{
struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
int space = txq->txq_fifo_size - txq->txq_fifo_head;
int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
/* Just return if already stalled. */
if (txq->txq_fifo_stall)
return 1;
if (sc->sc_mii.mii_media_active & IFM_FDX) {
/* Stall only occurs in half-duplex mode. */
goto send_packet;
}
static int
wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
{
int error;
/*
* Allocate the control data structures, and create and load the
* DMA map for it.
*
* NOTE: All Tx descriptors must be in the same 4G segment of
* memory. So must Rx descriptors. We simplify by allocating
* both sets within the same 4G segment.
*/
if (sc->sc_type < WM_T_82544)
WM_NTXDESC(txq) = WM_NTXDESC_82542;
else
WM_NTXDESC(txq) = WM_NTXDESC_82544;
if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
txq->txq_descsize = sizeof(nq_txdesc_t);
else
txq->txq_descsize = sizeof(wiseman_txdesc_t);
if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
1, &txq->txq_desc_rseg, 0)) != 0) {
aprint_error_dev(sc->sc_dev,
"unable to allocate TX control data, error = %d\n",
error);
goto fail_0;
}
if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
(void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
aprint_error_dev(sc->sc_dev,
"unable to map TX control data, error = %d\n", error);
goto fail_1;
}
if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
aprint_error_dev(sc->sc_dev,
"unable to create TX control data DMA map, error = %d\n",
error);
goto fail_2;
}
if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
aprint_error_dev(sc->sc_dev,
"unable to load TX control data DMA map, error = %d\n",
error);
goto fail_3;
}
static int
wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
{
int error;
size_t rxq_descs_size;
/*
* Allocate the control data structures, and create and load the
* DMA map for it.
*
* NOTE: All Tx descriptors must be in the same 4G segment of
* memory. So must Rx descriptors. We simplify by allocating
* both sets within the same 4G segment.
*/
rxq->rxq_ndesc = WM_NRXDESC;
if (sc->sc_type == WM_T_82574)
rxq->rxq_descsize = sizeof(ext_rxdesc_t);
else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
rxq->rxq_descsize = sizeof(nq_rxdesc_t);
else
rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
1, &rxq->rxq_desc_rseg, 0)) != 0) {
aprint_error_dev(sc->sc_dev,
"unable to allocate RX control data, error = %d\n",
error);
goto fail_0;
}
if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
rxq->rxq_desc_rseg, rxq_descs_size,
(void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
aprint_error_dev(sc->sc_dev,
"unable to map RX control data, error = %d\n", error);
goto fail_1;
}
if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
aprint_error_dev(sc->sc_dev,
"unable to create RX control data DMA map, error = %d\n",
error);
goto fail_2;
}
if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
aprint_error_dev(sc->sc_dev,
"unable to load RX control data DMA map, error = %d\n",
error);
goto fail_3;
}
static int
wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
{
int i, error;
/* Create the transmit buffer DMA maps. */
WM_TXQUEUELEN(txq) =
(sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
&txq->txq_soft[i].txs_dmamap)) != 0) {
aprint_error_dev(sc->sc_dev,
"unable to create Tx DMA map %d, error = %d\n",
i, error);
goto fail;
}
}
return 0;
fail:
for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
if (txq->txq_soft[i].txs_dmamap != NULL)
bus_dmamap_destroy(sc->sc_dmat,
txq->txq_soft[i].txs_dmamap);
}
return error;
}
for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
if (txq->txq_soft[i].txs_dmamap != NULL)
bus_dmamap_destroy(sc->sc_dmat,
txq->txq_soft[i].txs_dmamap);
}
}
static int
wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
{
int i, error;
/* Create the receive buffer DMA maps. */
for (i = 0; i < rxq->rxq_ndesc; i++) {
if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
MCLBYTES, 0, 0,
&rxq->rxq_soft[i].rxs_dmamap)) != 0) {
aprint_error_dev(sc->sc_dev,
"unable to create Rx DMA map %d error = %d\n",
i, error);
goto fail;
}
rxq->rxq_soft[i].rxs_mbuf = NULL;
}
return 0;
fail:
for (i = 0; i < rxq->rxq_ndesc; i++) {
if (rxq->rxq_soft[i].rxs_dmamap != NULL)
bus_dmamap_destroy(sc->sc_dmat,
rxq->rxq_soft[i].rxs_dmamap);
}
return error;
}
WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
for (j = 0; j < WM_NTXSEGS; j++) {
snprintf(txq->txq_txseg_evcnt_names[j],
sizeof(txq->txq_txseg_evcnt_names[j]),
"txq%02dtxseg%d", i, j);
evcnt_attach_dynamic(&txq->txq_ev_txseg[j],
EVCNT_TYPE_MISC,
NULL, xname, txq->txq_txseg_evcnt_names[j]);
}
WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
/* Only for 82544 (and earlier?) */
if (sc->sc_type <= WM_T_82544)
WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
WM_Q_MISC_EVCNT_ATTACH(txq, skipcontext, txq, i, xname);
#endif /* WM_EVENT_COUNTERS */
tx_done++;
}
if (error)
goto fail_1;
/* For receive */
error = 0;
rx_done = 0;
for (i = 0; i < sc->sc_nqueues; i++) {
#ifdef WM_EVENT_COUNTERS
const char *xname;
#endif
struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
rxq->rxq_sc = sc;
rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
error = wm_alloc_rx_descs(sc, rxq);
if (error)
break;
if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
/*
* Don't write TDT before TCTL.EN is set.
* See the document.
*/
CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
| TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
| TXDCTL_WTHRESH(0));
else {
/* XXX should update with AIM? */
CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
if (sc->sc_type >= WM_T_82540) {
/* Should be the same */
CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
}
/*
* Set up some register offsets that are different between
* the i82542 and the i82543 and later chips.
*/
if (sc->sc_type < WM_T_82543)
txq->txq_tdt_reg = WMREG_OLD_TDT;
else
txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
uint32_t srrctl;
if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
panic("%s: MCLBYTES %d unsupported for 82575 "
"or higher\n", __func__, MCLBYTES);
/*
* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF
* only.
*/
srrctl = SRRCTL_DESCTYPE_ADV_ONEBUF
| (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT);
/*
* Drop frames if the RX descriptor ring has no room.
* This is enabled only on multiqueue system to avoid
* bad influence to other queues.
*/
if (sc->sc_nqueues > 1)
srrctl |= SRRCTL_DROP_EN;
CSR_WRITE(sc, WMREG_SRRCTL(qid), srrctl);
static int
wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
{
struct wm_rxsoft *rxs;
int error, i;
KASSERT(mutex_owned(rxq->rxq_lock));
for (i = 0; i < rxq->rxq_ndesc; i++) {
rxs = &rxq->rxq_soft[i];
if (rxs->rxs_mbuf == NULL) {
if ((error = wm_add_rxbuf(rxq, i)) != 0) {
log(LOG_ERR, "%s: unable to allocate or map "
"rx buffer %d, error = %d\n",
device_xname(sc->sc_dev), i, error);
/*
* XXX Should attempt to run with fewer receive
* XXX buffers instead of just failing.
*/
wm_rxdrain(rxq);
return ENOMEM;
}
} else {
/*
* For 82575 and 82576, the RX descriptors must be
* initialized after the setting of RCTL.EN in
* wm_set_filter()
*/
if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
wm_init_rxdesc(rxq, i);
}
}
rxq->rxq_ptr = 0;
rxq->rxq_discard = 0;
WM_RXCHAIN_RESET(rxq);
/*
* Set up some register offsets that are different between
* the i82542 and the i82543 and later chips.
*/
if (sc->sc_type < WM_T_82543)
rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
else
rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
for (i = 0; i < sc->sc_nqueues; i++) {
struct wm_queue *wmq = &sc->sc_queue[i];
struct wm_txqueue *txq = &wmq->wmq_txq;
struct wm_rxqueue *rxq = &wmq->wmq_rxq;
/*
* TODO
* Currently, use constant variable instead of AIM.
* Furthermore, the interrupt interval of multiqueue which use
* polling mode is less than default value.
* More tuning and AIM are required.
*/
if (wm_is_using_multiqueue(sc))
wmq->wmq_itr = 50;
else
wmq->wmq_itr = sc->sc_itr_init;
wmq->wmq_set_itr = true;
if (__predict_false(m0->m_len <
(hlen + sizeof(struct tcphdr)))) {
/*
* TCP/IP headers are not in the first mbuf; we need
* to do this the slow and painful way. Let's just
* hope this doesn't happen very often.
*/
struct tcphdr th;
WM_Q_EVCNT_INCR(txq, tsopain);
m_copydata(m0, hlen, sizeof(th), &th);
if (v4) {
struct ip ip;
/*
* NOTE: Even if we're not using the IP or TCP/UDP checksum
* offload feature, if we load the context descriptor, we
* MUST provide valid values for IPCSS and TUCSS fields.
*/
if (m0->m_pkthdr.csum_flags &
(M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
WM_Q_EVCNT_INCR(txq, tusum);
fields |= WTX_TXSM;
tucs = WTX_TCPIP_TUCSS(offset) |
WTX_TCPIP_TUCSO(offset +
M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
WTX_TCPIP_TUCSE(0) /* Rest of packet */;
} else if ((m0->m_pkthdr.csum_flags &
(M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
WM_Q_EVCNT_INCR(txq, tusum6);
fields |= WTX_TXSM;
tucs = WTX_TCPIP_TUCSS(offset) |
WTX_TCPIP_TUCSO(offset +
M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
WTX_TCPIP_TUCSE(0) /* Rest of packet */;
} else {
/* Just initialize it to a valid TCP context. */
tucs = WTX_TCPIP_TUCSS(offset) |
WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
WTX_TCPIP_TUCSE(0) /* Rest of packet */;
}
*cmdp = cmd;
*fieldsp = fields;
/*
* We don't have to write context descriptor for every packet
* except for 82574. For 82574, we must write context descriptor
* for every packet when we use two descriptor queues.
*
* The 82574L can only remember the *last* context used
* regardless of queue that it was use for. We cannot reuse
* contexts on this hardware platform and must generate a new
* context every time. 82574L hardware spec, section 7.2.6,
* second note.
*/
if (sc->sc_nqueues < 2) {
/*
* Setting up new checksum offload context for every
* frames takes a lot of processing time for hardware.
* This also reduces performance a lot for small sized
* frames so avoid it if driver can use previously
* configured checksum offload context.
* For TSO, in theory we can use the same TSO context only if
* frame is the same type(IP/TCP) and the same MSS. However
* checking whether a frame has the same IP/TCP structure is a
* hard thing so just ignore that and always restablish a
* new TSO context.
*/
if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6))
== 0) {
if (txq->txq_last_hw_cmd == cmd &&
txq->txq_last_hw_fields == fields &&
txq->txq_last_hw_ipcs == (ipcs & 0xffff) &&
txq->txq_last_hw_tucs == (tucs & 0xffff)) {
WM_Q_EVCNT_INCR(txq, skipcontext);
return;
}
}
if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
return;
if (__predict_false(wm_linkdown_discard(txq))) {
do {
if (is_transmit)
m0 = pcq_get(txq->txq_interq);
else
IFQ_DEQUEUE(&ifp->if_snd, m0);
/*
* increment successed packet counter as in the case
* which the packet is discarded by link down PHY.
*/
if (m0 != NULL) {
if_statinc(ifp, if_opackets);
m_freem(m0);
}
} while (m0 != NULL);
return;
}
/* Remember the previous number of free descriptors. */
ofree = txq->txq_free;
/*
* Loop through the send queue, setting up transmit descriptors
* until we drain the queue, or use up all available transmit
* descriptors.
*/
for (;;) {
m0 = NULL;
/* Get a work queue entry. */
if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
wm_txeof(txq, UINT_MAX);
if (txq->txq_sfree == 0) {
DPRINTF(sc, WM_DEBUG_TX,
("%s: TX: no free job descriptors\n",
device_xname(sc->sc_dev)));
WM_Q_EVCNT_INCR(txq, txsstall);
break;
}
}
/* Grab a packet off the queue. */
if (is_transmit)
m0 = pcq_get(txq->txq_interq);
else
IFQ_DEQUEUE(&ifp->if_snd, m0);
if (m0 == NULL)
break;
DPRINTF(sc, WM_DEBUG_TX,
("%s: TX: have packet to transmit: %p\n",
device_xname(sc->sc_dev), m0));
/*
* So says the Linux driver:
* The controller does a simple calculation to make sure
* there is enough room in the FIFO before initiating the
* DMA for each buffer. The calc is:
* 4 = ceil(buffer len / MSS)
* To make sure we don't overrun the FIFO, adjust the max
* buffer len if the MSS drops.
*/
dmamap->dm_maxsegsz =
(use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
? m0->m_pkthdr.segsz << 2
: WTX_MAX_LEN;
/*
* Load the DMA map. If this fails, the packet either
* didn't fit in the allotted number of segments, or we
* were short on resources. For the too-many-segments
* case, we simply report an error and drop the packet,
* since we can't sanely copy a jumbo packet to a single
* buffer.
*/
retry:
error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
BUS_DMA_WRITE | BUS_DMA_NOWAIT);
if (__predict_false(error)) {
if (error == EFBIG) {
if (remap == true) {
struct mbuf *m;
remap = false;
m = m_defrag(m0, M_NOWAIT);
if (m != NULL) {
WM_Q_EVCNT_INCR(txq, defrag);
m0 = m;
goto retry;
}
}
WM_Q_EVCNT_INCR(txq, toomanyseg);
log(LOG_ERR, "%s: Tx packet consumes too many "
"DMA segments, dropping...\n",
device_xname(sc->sc_dev));
wm_dump_mbuf_chain(sc, m0);
m_freem(m0);
continue;
}
/* Short on resources, just stop for now. */
DPRINTF(sc, WM_DEBUG_TX,
("%s: TX: dmamap load failed: %d\n",
device_xname(sc->sc_dev), error));
break;
}
segs_needed = dmamap->dm_nsegs;
if (use_tso) {
/* For sentinel descriptor; see below. */
segs_needed++;
}
/*
* Ensure we have enough descriptors free to describe
* the packet. Note, we always reserve one descriptor
* at the end of the ring due to the semantics of the
* TDT register, plus one more in the event we need
* to load offload context.
*/
if (segs_needed > txq->txq_free - 2) {
/*
* Not enough free descriptors to transmit this
* packet. We haven't committed anything yet,
* so just unload the DMA map, put the packet
* pack on the queue, and punt. Notify the upper
* layer that there are no more slots left.
*/
DPRINTF(sc, WM_DEBUG_TX,
("%s: TX: need %d (%d) descriptors, have %d\n",
device_xname(sc->sc_dev), dmamap->dm_nsegs,
segs_needed, txq->txq_free - 1));
txq->txq_flags |= WM_TXQ_NO_SPACE;
bus_dmamap_unload(sc->sc_dmat, dmamap);
WM_Q_EVCNT_INCR(txq, txdstall);
break;
}
/*
* Check for 82547 Tx FIFO bug. We need to do this
* once we know we can transmit the packet, since we
* do some internal FIFO space accounting here.
*/
if (sc->sc_type == WM_T_82547 &&
wm_82547_txfifo_bugchk(sc, m0)) {
DPRINTF(sc, WM_DEBUG_TX,
("%s: TX: 82547 Tx FIFO bug detected\n",
device_xname(sc->sc_dev)));
txq->txq_flags |= WM_TXQ_NO_SPACE;
bus_dmamap_unload(sc->sc_dmat, dmamap);
WM_Q_EVCNT_INCR(txq, fifo_stall);
break;
}
/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
/*
* Store a pointer to the packet so that we can free it
* later.
*
* Initially, we consider the number of descriptors the
* packet uses the number of DMA segments. This may be
* incremented by 1 if we do checksum offload (a descriptor
* is used to set the checksum context).
*/
txs->txs_mbuf = m0;
txs->txs_firstdesc = txq->txq_next;
txs->txs_ndesc = segs_needed;
/* Initialize the transmit descriptor. */
for (nexttx = txq->txq_next, seg = 0;
seg < dmamap->dm_nsegs; seg++) {
for (seglen = dmamap->dm_segs[seg].ds_len,
curaddr = dmamap->dm_segs[seg].ds_addr;
seglen != 0;
curaddr += curlen, seglen -= curlen,
nexttx = WM_NEXTTX(txq, nexttx)) {
curlen = seglen;
/*
* So says the Linux driver:
* Work around for premature descriptor
* write-backs in TSO mode. Append a
* 4-byte sentinel descriptor.
*/
if (use_tso && seg == dmamap->dm_nsegs - 1 &&
curlen > 8)
curlen -= 4;
/*
* Set up the command byte on the last descriptor of
* the packet. If we're in the interrupt delay window,
* delay the interrupt.
*/
txq->txq_descs[lasttx].wtx_cmdlen |=
htole32(WTX_CMD_EOP | WTX_CMD_RS);
/*
* If VLANs are enabled and the packet has a VLAN tag, set
* up the descriptor to encapsulate the packet for us.
*
* This is only valid on the last descriptor of the packet.
*/
if (vlan_has_tag(m0)) {
txq->txq_descs[lasttx].wtx_cmdlen |=
htole32(WTX_CMD_VLE);
txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
= htole16(vlan_get_tag(m0));
}
if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
int hlen = offset + iphl;
int tcp_hlen;
bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
if (__predict_false(m0->m_len <
(hlen + sizeof(struct tcphdr)))) {
/*
* TCP/IP headers are not in the first mbuf; we need
* to do this the slow and painful way. Let's just
* hope this doesn't happen very often.
*/
struct tcphdr th;
WM_Q_EVCNT_INCR(txq, tsopain);
m_copydata(m0, hlen, sizeof(th), &th);
if (v4) {
struct ip ip;
/*
* We don't have to write context descriptor for every packet to
* NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
* I210 and I211. It is enough to write once per a Tx queue for these
* controllers.
* It would be overhead to write context descriptor for every packet,
* however it does not cause problems.
*/
/* Fill in the context descriptor. */
txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_vl_len =
htole32(vl_len);
txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_sn = 0;
txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_cmd =
htole32(cmdc);
txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_mssidx =
htole32(mssidx);
wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
DPRINTF(sc, WM_DEBUG_TX,
("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
txq->txq_next, 0, vl_len));
DPRINTF(sc, WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
txs->txs_ndesc++;
}
/*
* The situations which this mutex_tryenter() fails at running time
* are below two patterns.
* (1) contention with interrupt handler(wm_txrxintr_msix())
* (2) contention with deferred if_start softint(wm_handle_queue())
* In the case of (1), the last packet enqueued to txq->txq_interq is
* dequeued by wm_deferred_start_locked(). So, it does not get stuck.
* In the case of (2), the last packet enqueued to txq->txq_interq is
* also dequeued by wm_deferred_start_locked(). So, it does not get
* stuck, either.
*/
if (mutex_tryenter(txq->txq_lock)) {
if (!txq->txq_stopping)
wm_nq_transmit_locked(ifp, txq);
mutex_exit(txq->txq_lock);
}
if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
return;
if (__predict_false(wm_linkdown_discard(txq))) {
do {
if (is_transmit)
m0 = pcq_get(txq->txq_interq);
else
IFQ_DEQUEUE(&ifp->if_snd, m0);
/*
* increment successed packet counter as in the case
* which the packet is discarded by link down PHY.
*/
if (m0 != NULL) {
if_statinc(ifp, if_opackets);
m_freem(m0);
}
} while (m0 != NULL);
return;
}
sent = false;
/*
* Loop through the send queue, setting up transmit descriptors
* until we drain the queue, or use up all available transmit
* descriptors.
*/
for (;;) {
m0 = NULL;
/* Get a work queue entry. */
if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
wm_txeof(txq, UINT_MAX);
if (txq->txq_sfree == 0) {
DPRINTF(sc, WM_DEBUG_TX,
("%s: TX: no free job descriptors\n",
device_xname(sc->sc_dev)));
WM_Q_EVCNT_INCR(txq, txsstall);
break;
}
}
/* Grab a packet off the queue. */
if (is_transmit)
m0 = pcq_get(txq->txq_interq);
else
IFQ_DEQUEUE(&ifp->if_snd, m0);
if (m0 == NULL)
break;
DPRINTF(sc, WM_DEBUG_TX,
("%s: TX: have packet to transmit: %p\n",
device_xname(sc->sc_dev), m0));
/*
* Load the DMA map. If this fails, the packet either
* didn't fit in the allotted number of segments, or we
* were short on resources. For the too-many-segments
* case, we simply report an error and drop the packet,
* since we can't sanely copy a jumbo packet to a single
* buffer.
*/
retry:
error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
BUS_DMA_WRITE | BUS_DMA_NOWAIT);
if (__predict_false(error)) {
if (error == EFBIG) {
if (remap == true) {
struct mbuf *m;
remap = false;
m = m_defrag(m0, M_NOWAIT);
if (m != NULL) {
WM_Q_EVCNT_INCR(txq, defrag);
m0 = m;
goto retry;
}
}
WM_Q_EVCNT_INCR(txq, toomanyseg);
log(LOG_ERR, "%s: Tx packet consumes too many "
"DMA segments, dropping...\n",
device_xname(sc->sc_dev));
wm_dump_mbuf_chain(sc, m0);
m_freem(m0);
continue;
}
/* Short on resources, just stop for now. */
DPRINTF(sc, WM_DEBUG_TX,
("%s: TX: dmamap load failed: %d\n",
device_xname(sc->sc_dev), error));
break;
}
segs_needed = dmamap->dm_nsegs;
/*
* Ensure we have enough descriptors free to describe
* the packet. Note, we always reserve one descriptor
* at the end of the ring due to the semantics of the
* TDT register, plus one more in the event we need
* to load offload context.
*/
if (segs_needed > txq->txq_free - 2) {
/*
* Not enough free descriptors to transmit this
* packet. We haven't committed anything yet,
* so just unload the DMA map, put the packet
* pack on the queue, and punt. Notify the upper
* layer that there are no more slots left.
*/
DPRINTF(sc, WM_DEBUG_TX,
("%s: TX: need %d (%d) descriptors, have %d\n",
device_xname(sc->sc_dev), dmamap->dm_nsegs,
segs_needed, txq->txq_free - 1));
txq->txq_flags |= WM_TXQ_NO_SPACE;
bus_dmamap_unload(sc->sc_dmat, dmamap);
WM_Q_EVCNT_INCR(txq, txdstall);
break;
}
/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
/*
* Store a pointer to the packet so that we can free it
* later.
*
* Initially, we consider the number of descriptors the
* packet uses the number of DMA segments. This may be
* incremented by 1 if we do checksum offload (a descriptor
* is used to set the checksum context).
*/
txs->txs_mbuf = m0;
txs->txs_firstdesc = txq->txq_next;
txs->txs_ndesc = segs_needed;
/* Set up offload parameters for this packet. */
uint32_t cmdlen, fields, dcmdlen;
if (m0->m_pkthdr.csum_flags &
(M_CSUM_TSOv4 | M_CSUM_TSOv6 |
M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
&do_csum);
} else {
do_csum = false;
cmdlen = 0;
fields = 0;
}
/*
* Set up the command byte on the last descriptor of
* the packet. If we're in the interrupt delay window,
* delay the interrupt.
*/
KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
(NQTX_CMD_EOP | NQTX_CMD_RS));
txq->txq_descs[lasttx].wtx_cmdlen |=
htole32(WTX_CMD_EOP | WTX_CMD_RS);
if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
/* XXX need for ALTQ or one CPU system */
if (qid == 0)
wm_nq_start_locked(ifp);
wm_nq_transmit_locked(ifp, txq);
} else {
/* XXX need for ALTQ or one CPU system */
if (qid == 0)
wm_start_locked(ifp);
wm_transmit_locked(ifp, txq);
}
}
/*
* Go through the Tx list and free mbufs for those
* frames which have been transmitted.
*/
for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
txs = &txq->txq_soft[i];
/*
* 82574 and newer's document says the status field has neither
* EC (Excessive Collision) bit nor LC (Late Collision) bit
* (reserved). Refer "PCIe GbE Controller Open Source Software
* Developer's Manual", 82574 datasheet and newer.
*
* XXX I saw the LC bit was set on I218 even though the media
* was full duplex, so the bit might be used for other
* meaning ...(I have no document).
*/
if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
NQRXC_STATUS_DD)) {
/* We have processed all of the receive descriptors. */
wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
return false;
}
if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
if (wm_rxdesc_is_set_status(sc, status,
WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
WM_Q_EVCNT_INCR(rxq, ipsum);
m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
if (wm_rxdesc_is_set_error(sc, errors,
WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
}
if (wm_rxdesc_is_set_status(sc, status,
WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
/*
* Note: we don't know if this was TCP or UDP,
* so we just set both bits, and expect the
* upper layers to deal.
*/
WM_Q_EVCNT_INCR(rxq, tusum);
m->m_pkthdr.csum_flags |=
M_CSUM_TCPv4 | M_CSUM_UDPv4 |
M_CSUM_TCPv6 | M_CSUM_UDPv6;
if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
}
}
}
/*
* Add a new receive buffer to the ring, unless of
* course the length is zero. Treat the latter as a
* failed mapping.
*/
if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
/*
* Failed, throw away what we've done so
* far, and discard the rest of the packet.
*/
if_statinc(ifp, if_ierrors);
bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
wm_init_rxdesc(rxq, i);
if (!wm_rxdesc_is_eop(rxq, status))
rxq->rxq_discard = 1;
m_freem(rxq->rxq_head);
WM_RXCHAIN_RESET(rxq);
DPRINTF(sc, WM_DEBUG_RX,
("%s: RX: Rx buffer allocation failed, "
"dropping packet%s\n", device_xname(sc->sc_dev),
rxq->rxq_discard ? " (discard)" : ""));
continue;
}
m->m_len = len;
rxq->rxq_len += len;
DPRINTF(sc, WM_DEBUG_RX,
("%s: RX: buffer at %p len %d\n",
device_xname(sc->sc_dev), m->m_data, len));
/* If this is not the end of the packet, keep looking. */
if (!wm_rxdesc_is_eop(rxq, status)) {
WM_RXCHAIN_LINK(rxq, m);
DPRINTF(sc, WM_DEBUG_RX,
("%s: RX: not yet EOP, rxlen -> %d\n",
device_xname(sc->sc_dev), rxq->rxq_len));
continue;
}
/*
* Okay, we have the entire packet now. The chip is
* configured to include the FCS except I35[04], I21[01].
* (not all chips can be configured to strip it), so we need
* to trim it. Those chips have an eratta, the RCTL_SECRC bit
* in RCTL register is always set, so we don't trim it.
* PCH2 and newer chip also not include FCS when jumbo
* frame is used to do workaround an errata.
* May need to adjust length of previous mbuf in the
* chain if the current mbuf is too short.
*/
if ((sc->sc_flags & WM_F_CRC_STRIP) == 0) {
if (m->m_len < ETHER_CRC_LEN) {
rxq->rxq_tail->m_len
-= (ETHER_CRC_LEN - m->m_len);
m->m_len = 0;
} else
m->m_len -= ETHER_CRC_LEN;
len = rxq->rxq_len - ETHER_CRC_LEN;
} else
len = rxq->rxq_len;
WM_RXCHAIN_LINK(rxq, m);
*rxq->rxq_tailp = NULL;
m = rxq->rxq_head;
WM_RXCHAIN_RESET(rxq);
DPRINTF(sc, WM_DEBUG_RX,
("%s: RX: have entire packet, len -> %d\n",
device_xname(sc->sc_dev), len));
/* If an error occurred, update stats and drop the packet. */
if (wm_rxdesc_has_errors(rxq, errors)) {
m_freem(m);
continue;
}
/* No errors. Receive the packet. */
m_set_rcvif(m, ifp);
m->m_pkthdr.len = len;
/*
* TODO
* should be save rsshash and rsstype to this mbuf.
*/
DPRINTF(sc, WM_DEBUG_RX,
("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
device_xname(sc->sc_dev), rsstype, rsshash));
/*
* If VLANs are enabled, VLAN packets have been unwrapped
* for us. Associate the tag with the packet.
*/
if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
continue;
/* Set up checksum info for this packet. */
wm_rxdesc_ensure_checksum(rxq, status, errors, m);
rxq->rxq_packets++;
rxq->rxq_bytes += len;
/* Pass it on. */
if_percpuq_enqueue(sc->sc_ipq, m);
if (rxq->rxq_stopping)
break;
}
rxq->rxq_ptr = i;
if (count != 0)
rnd_add_uint32(&sc->rnd_source, count);
if ((icr & ICR_LSC) == 0) {
if (icr & ICR_RXSEQ)
DPRINTF(sc, WM_DEBUG_LINK,
("%s: LINK Receive sequence error\n",
device_xname(dev)));
return;
}
/* Link status changed */
status = CSR_READ(sc, WMREG_STATUS);
link = status & STATUS_LU;
if (link) {
DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
device_xname(dev),
(status & STATUS_FD) ? "FDX" : "HDX"));
if (wm_phy_need_linkdown_discard(sc)) {
DPRINTF(sc, WM_DEBUG_LINK,
("%s: linkintr: Clear linkdown discard flag\n",
device_xname(dev)));
wm_clear_linkdown_discard(sc);
}
} else {
DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
device_xname(dev)));
if (wm_phy_need_linkdown_discard(sc)) {
DPRINTF(sc, WM_DEBUG_LINK,
("%s: linkintr: Set linkdown discard flag\n",
device_xname(dev)));
wm_set_linkdown_discard(sc);
}
}
if ((sc->sc_type == WM_T_ICH8) && (link == false))
wm_gig_downshift_workaround_ich8lan(sc);
if ((sc->sc_type == WM_T_ICH8) && (sc->sc_phytype == WMPHY_IGP_3))
wm_kmrn_lock_loss_workaround_ich8lan(sc);
DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
device_xname(dev)));
if ((sc->sc_flags & WM_F_DELAY_LINKUP) != 0) {
if (link) {
/*
* To workaround the problem, it's required to wait
* several hundred miliseconds. The time depend
* on the environment. Wait 1 second for the safety.
*/
dopoll = false;
getmicrotime(&sc->sc_linkup_delay_time);
sc->sc_linkup_delay_time.tv_sec += 1;
} else if (sc->sc_linkup_delay_time.tv_sec != 0) {
/*
* Simplify by checking tv_sec only. It's enough.
*
* Currently, it's not required to clear the time.
* It's just to know the timer is stopped
* (for debugging).
*/
/*
* Call mii_pollstat().
*
* Some (not all) systems use I35[04] or I21[01] don't send packet soon
* after linkup. The MAC send a packet to the PHY and any error is not
* observed. This behavior causes a problem that gratuitous ARP and/or
* IPv6 DAD packet are silently dropped. To avoid this problem, don't
* call mii_pollstat() here which will send LINK_STATE_UP notification
* to the upper layer. Instead, mii_pollstat() will be called in
* wm_gmii_mediastatus() or mii_tick() will be called in wm_tick().
*/
if (dopoll)
mii_pollstat(&sc->sc_mii);
/* Do some workarounds soon after link status is changed. */
if (sc->sc_type == WM_T_82543) {
int miistatus, active;
/*
* With 82543, we need to force speed and
* duplex on the MAC equal to what the PHY
* speed and duplex configuration is.
*/
miistatus = sc->sc_mii.mii_media_status;
if (miistatus & IFM_ACTIVE) {
active = sc->sc_mii.mii_media_active;
sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
switch (IFM_SUBTYPE(active)) {
case IFM_10_T:
sc->sc_ctrl |= CTRL_SPEED_10;
break;
case IFM_100_TX:
sc->sc_ctrl |= CTRL_SPEED_100;
break;
case IFM_1000_T:
sc->sc_ctrl |= CTRL_SPEED_1000;
break;
default:
/*
* Fiber?
* Shoud not enter here.
*/
device_printf(dev, "unknown media (%x)\n",
active);
break;
}
if (active & IFM_FDX)
sc->sc_ctrl |= CTRL_FD;
CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
}
} else if (sc->sc_type == WM_T_PCH) {
wm_k1_gig_workaround_hv(sc,
((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
}
/*
* When connected at 10Mbps half-duplex, some parts are excessively
* aggressive resulting in many collisions. To avoid this, increase
* the IPG and reduce Rx latency in the PHY.
*/
if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_TGP)
&& link) {
uint32_t tipg_reg;
uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
bool fdx;
uint16_t emi_addr, emi_val;
ptr_gap = (data & (0x3ff << 2)) >> 2;
if (ptr_gap < 0x18) {
data &= ~(0x3ff << 2);
data |= (0x18 << 2);
rv = sc->phy.writereg_locked(dev,
2, I82579_UNKNOWN1, data);
}
sc->phy.release(sc);
if (rv)
return;
} else {
rv = sc->phy.acquire(sc);
if (rv)
return;
rv = sc->phy.writereg_locked(dev, 2,
I82579_UNKNOWN1, 0xc023);
sc->phy.release(sc);
if (rv)
return;
}
}
}
/*
* I217 Packet Loss issue:
* ensure that FEXTNVM4 Beacon Duration is set correctly
* on power up.
* Set the Beacon Duration for I217 to 8 usec
*/
if (sc->sc_type >= WM_T_PCH_LPT) {
reg = CSR_READ(sc, WMREG_FEXTNVM4);
reg &= ~FEXTNVM4_BEACON_DURATION;
reg |= FEXTNVM4_BEACON_DURATION_8US;
CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
}
if (more) {
/* Try to get more packets going. */
wm_legacy_intr_disable(sc);
wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
wm_sched_handle_queue(sc, wmq);
}
if (__predict_false(!wm_is_using_msix(sc))) {
wm_legacy_intr_enable(sc);
return;
}
/*
* ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
* There is no need to care about which of RXQ(0) and RXQ(1) enable
* ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
* while each wm_handle_queue(wmq) is runnig.
*/
if (sc->sc_type == WM_T_82574)
CSR_WRITE(sc, WMREG_IMS,
ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
else if (sc->sc_type == WM_T_82575)
CSR_WRITE(sc, WMREG_EIMS,
EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
else
CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
}
/*
* Some qemu environment workaround. They don't stop interrupt
* immediately.
*/
wmq->wmq_wq_enqueued = false;
wm_handle_queue(wmq);
}
/*
* wm_linkintr_msix:
*
* Interrupt service routine for link status change for MSI-X.
*/
static int
wm_linkintr_msix(void *arg)
{
struct wm_softc *sc = arg;
uint32_t reg;
bool has_rxo;
/*
* XXX 82574 MSI-X mode workaround
*
* 82574 MSI-X mode causes a receive overrun(RXO) interrupt as an
* ICR_OTHER MSI-X vector; furthermore it causes neither ICR_RXQ(0)
* nor ICR_RXQ(1) vectors. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
* interrupts by writing WMREG_ICS to process receive packets.
*/
if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
#if defined(WM_DEBUG)
log(LOG_WARNING, "%s: Receive overrun\n",
device_xname(sc->sc_dev));
#endif /* defined(WM_DEBUG) */
has_rxo = true;
/*
* The RXO interrupt is very high rate when receive traffic is
* high rate. We use polling mode for ICR_OTHER like Tx/Rx
* interrupts. ICR_OTHER will be enabled at the end of
* wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
* ICR_RXQ(1) interrupts.
*/
CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
rv = sc->phy.acquire(sc);
if (rv != 0) {
aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
__func__);
return;
}
switch (sc->sc_type) {
case WM_T_82542_2_0:
case WM_T_82542_2_1:
/* null */
break;
case WM_T_82543:
/*
* With 82543, we need to force speed and duplex on the MAC
* equal to what the PHY speed and duplex configuration is.
* In addition, we need to perform a hardware reset on the PHY
* to take it out of reset.
*/
sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
CSR_WRITE_FLUSH(sc);
delay(150);
#if 0
sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
#endif
delay(20*1000); /* XXX extra delay to get PHY ID? */
break;
case WM_T_82544: /* Reset 10000us */
case WM_T_82540:
case WM_T_82545:
case WM_T_82545_3:
case WM_T_82546:
case WM_T_82546_3:
case WM_T_82541:
case WM_T_82541_2:
case WM_T_82547:
case WM_T_82547_2:
case WM_T_82571: /* Reset 100us */
case WM_T_82572:
case WM_T_82573:
case WM_T_82574:
case WM_T_82575:
case WM_T_82576:
case WM_T_82580:
case WM_T_I350:
case WM_T_I354:
case WM_T_I210:
case WM_T_I211:
case WM_T_82583:
case WM_T_80003:
/* Generic reset */
CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
CSR_WRITE_FLUSH(sc);
delay(20000);
CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
CSR_WRITE_FLUSH(sc);
delay(20000);
if ((sc->sc_type == WM_T_82541)
|| (sc->sc_type == WM_T_82541_2)
|| (sc->sc_type == WM_T_82547)
|| (sc->sc_type == WM_T_82547_2)) {
/* Workaround for igp are done in igp_reset() */
/* XXX add code to set LED after phy reset */
}
break;
case WM_T_ICH8:
case WM_T_ICH9:
case WM_T_ICH10:
case WM_T_PCH:
case WM_T_PCH2:
case WM_T_PCH_LPT:
case WM_T_PCH_SPT:
case WM_T_PCH_CNP:
case WM_T_PCH_TGP:
/* Generic reset */
CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
CSR_WRITE_FLUSH(sc);
delay(100);
CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
CSR_WRITE_FLUSH(sc);
delay(150);
break;
default:
panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
__func__);
break;
}
sc->phy.release(sc);
/* get_cfg_done */
wm_get_cfg_done(sc);
/* Extra setup */
switch (sc->sc_type) {
case WM_T_82542_2_0:
case WM_T_82542_2_1:
case WM_T_82543:
case WM_T_82544:
case WM_T_82540:
case WM_T_82545:
case WM_T_82545_3:
case WM_T_82546:
case WM_T_82546_3:
case WM_T_82541_2:
case WM_T_82547_2:
case WM_T_82571:
case WM_T_82572:
case WM_T_82573:
case WM_T_82574:
case WM_T_82583:
case WM_T_82575:
case WM_T_82576:
case WM_T_82580:
case WM_T_I350:
case WM_T_I354:
case WM_T_I210:
case WM_T_I211:
case WM_T_80003:
/* Null */
break;
case WM_T_82541:
case WM_T_82547:
/* XXX Configure actively LED after PHY reset */
break;
case WM_T_ICH8:
case WM_T_ICH9:
case WM_T_ICH10:
case WM_T_PCH:
case WM_T_PCH2:
case WM_T_PCH_LPT:
case WM_T_PCH_SPT:
case WM_T_PCH_CNP:
case WM_T_PCH_TGP:
wm_phy_post_reset(sc);
break;
default:
panic("%s: unknown type\n", __func__);
break;
}
}
/*
* Set up sc_phytype and mii_{read|write}reg.
*
* To identify PHY type, correct read/write function should be selected.
* To select correct read/write function, PCI ID or MAC type are required
* without accessing PHY registers.
*
* On the first call of this function, PHY ID is not known yet. Check
* PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
* result might be incorrect.
*
* In the second call, PHY OUI and model is used to identify PHY type.
* It might not be perfect because of the lack of compared entry, but it
* would be better than the first call.
*
* If the detected new result and previous assumption is different,
* a diagnostic message will be printed.
*/
static void
wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
uint16_t phy_model)
{
device_t dev = sc->sc_dev;
struct mii_data *mii = &sc->sc_mii;
uint16_t new_phytype = WMPHY_UNKNOWN;
uint16_t doubt_phytype = WMPHY_UNKNOWN;
mii_readreg_t new_readreg;
mii_writereg_t new_writereg;
bool dodiag = true;
/*
* 1000BASE-T SFP uses SGMII and the first asumed PHY type is always
* incorrect. So don't print diag output when it's 2nd call.
*/
if ((sc->sc_sfptype != 0) && (phy_oui == 0) && (phy_model == 0))
dodiag = false;
if (mii->mii_readreg == NULL) {
/*
* This is the first call of this function. For ICH and PCH
* variants, it's difficult to determine the PHY access method
* by sc_type, so use the PCI product ID for some devices.
*/
switch (sc->sc_pcidevid) {
case PCI_PRODUCT_INTEL_PCH_M_LM:
case PCI_PRODUCT_INTEL_PCH_M_LC:
/* 82577 */
new_phytype = WMPHY_82577;
break;
case PCI_PRODUCT_INTEL_PCH_D_DM:
case PCI_PRODUCT_INTEL_PCH_D_DC:
/* 82578 */
new_phytype = WMPHY_82578;
break;
case PCI_PRODUCT_INTEL_PCH2_LV_LM:
case PCI_PRODUCT_INTEL_PCH2_LV_V:
/* 82579 */
new_phytype = WMPHY_82579;
break;
case PCI_PRODUCT_INTEL_82801H_82567V_3:
case PCI_PRODUCT_INTEL_82801I_BM:
case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
case PCI_PRODUCT_INTEL_82801J_R_BM_V:
/* ICH8, 9, 10 with 82567 */
new_phytype = WMPHY_BM;
break;
default:
break;
}
} else {
/* It's not the first call. Use PHY OUI and model */
switch (phy_oui) {
case MII_OUI_ATTANSIC: /* atphy(4) */
switch (phy_model) {
case MII_MODEL_ATTANSIC_AR8021:
new_phytype = WMPHY_82578;
break;
default:
break;
}
break;
case MII_OUI_xxMARVELL:
switch (phy_model) {
case MII_MODEL_xxMARVELL_I210:
new_phytype = WMPHY_I210;
break;
case MII_MODEL_xxMARVELL_E1011:
case MII_MODEL_xxMARVELL_E1000_3:
case MII_MODEL_xxMARVELL_E1000_5:
case MII_MODEL_xxMARVELL_E1112:
new_phytype = WMPHY_M88;
break;
case MII_MODEL_xxMARVELL_E1149:
new_phytype = WMPHY_BM;
break;
case MII_MODEL_xxMARVELL_E1111:
case MII_MODEL_xxMARVELL_I347:
case MII_MODEL_xxMARVELL_E1512:
case MII_MODEL_xxMARVELL_E1340M:
case MII_MODEL_xxMARVELL_E1543:
new_phytype = WMPHY_M88;
break;
case MII_MODEL_xxMARVELL_I82563:
new_phytype = WMPHY_GG82563;
break;
default:
break;
}
break;
case MII_OUI_INTEL:
switch (phy_model) {
case MII_MODEL_INTEL_I82577:
new_phytype = WMPHY_82577;
break;
case MII_MODEL_INTEL_I82579:
new_phytype = WMPHY_82579;
break;
case MII_MODEL_INTEL_I217:
new_phytype = WMPHY_I217;
break;
case MII_MODEL_INTEL_I82580:
new_phytype = WMPHY_82580;
break;
case MII_MODEL_INTEL_I350:
new_phytype = WMPHY_I350;
break;
default:
break;
}
break;
case MII_OUI_yyINTEL:
switch (phy_model) {
case MII_MODEL_yyINTEL_I82562G:
case MII_MODEL_yyINTEL_I82562EM:
case MII_MODEL_yyINTEL_I82562ET:
new_phytype = WMPHY_IFE;
break;
case MII_MODEL_yyINTEL_IGP01E1000:
new_phytype = WMPHY_IGP;
break;
case MII_MODEL_yyINTEL_I82566:
new_phytype = WMPHY_IGP_3;
break;
default:
break;
}
break;
default:
break;
}
if (dodiag) {
if (new_phytype == WMPHY_UNKNOWN)
aprint_verbose_dev(dev,
"%s: Unknown PHY model. OUI=%06x, "
"model=%04x\n", __func__, phy_oui,
phy_model);
if ((sc->sc_phytype != WMPHY_UNKNOWN)
&& (sc->sc_phytype != new_phytype)) {
aprint_error_dev(dev, "Previously assumed PHY "
"type(%u) was incorrect. PHY type from PHY"
"ID = %u\n", sc->sc_phytype, new_phytype);
}
}
}
if (new_phytype == WMPHY_BM) {
/* All BM use _bm_ */
new_readreg = wm_gmii_bm_readreg;
new_writereg = wm_gmii_bm_writereg;
}
if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_TGP)) {
/* All PCH* use _hv_ */
new_readreg = wm_gmii_hv_readreg;
new_writereg = wm_gmii_hv_writereg;
}
/* Diag output */
if (dodiag) {
if (doubt_phytype != WMPHY_UNKNOWN)
aprint_error_dev(dev, "Assumed new PHY type was "
"incorrect. old = %u, new = %u\n", sc->sc_phytype,
new_phytype);
else if ((sc->sc_phytype != WMPHY_UNKNOWN)
&& (sc->sc_phytype != new_phytype))
aprint_error_dev(dev, "Previously assumed PHY type(%u)"
"was incorrect. New PHY type = %u\n",
sc->sc_phytype, new_phytype);
if ((mii->mii_readreg != NULL) &&
(new_phytype == WMPHY_UNKNOWN))
aprint_error_dev(dev, "PHY type is still unknown.\n");
if ((mii->mii_readreg != NULL) &&
(mii->mii_readreg != new_readreg))
aprint_error_dev(dev, "Previously assumed PHY "
"read/write function was incorrect.\n");
}
/*
* Let the chip set speed/duplex on its own based on
* signals from the PHY.
* XXXbouyer - I'm not sure this is right for the 80003,
* the em driver only sets CTRL_SLU here - but it seems to work.
*/
sc->sc_ctrl |= CTRL_SLU;
CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
/* Initialize our media structures and probe the GMII. */
mii->mii_ifp = ifp;
mii->mii_statchg = wm_gmii_statchg;
/* get PHY control from SMBus to PCIe */
if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
|| (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
|| (sc->sc_type == WM_T_PCH_CNP) || (sc->sc_type == WM_T_PCH_TGP))
wm_init_phy_workarounds_pchlan(sc);
/* Setup internal SGMII PHY for SFP */
wm_sgmii_sfp_preconfig(sc);
if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
|| (sc->sc_type == WM_T_82580)
|| (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
|| (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
if ((sc->sc_flags & WM_F_SGMII) == 0) {
/* Attach only one port */
mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
MII_OFFSET_ANY, MIIF_DOPAUSE);
} else {
int i, id;
uint32_t ctrl_ext;
id = wm_get_phy_id_82575(sc);
if (id != -1) {
mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
id, MII_OFFSET_ANY, MIIF_DOPAUSE);
}
if ((id == -1)
|| (LIST_FIRST(&mii->mii_phys) == NULL)) {
/* Power on sgmii phy if it is disabled */
ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
CSR_WRITE(sc, WMREG_CTRL_EXT,
ctrl_ext &~ CTRL_EXT_SWDPIN(3));
CSR_WRITE_FLUSH(sc);
delay(300*1000); /* XXX too long */
/*
* From 1 to 8.
*
* I2C access fails with I2C register's ERROR
* bit set, so prevent error message while
* scanning.
*/
sc->phy.no_errprint = true;
for (i = 1; i < 8; i++)
mii_attach(sc->sc_dev, &sc->sc_mii,
0xffffffff, i, MII_OFFSET_ANY,
MIIF_DOPAUSE);
sc->phy.no_errprint = false;
/*
* If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
* wm_set_mdio_slow_mode_hv() for a workaround and retry.
*/
if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
(sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)
|| (sc->sc_type == WM_T_PCH_TGP))
&& (LIST_FIRST(&mii->mii_phys) == NULL)) {
wm_set_mdio_slow_mode_hv(sc);
mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
MII_OFFSET_ANY, MIIF_DOPAUSE);
}
/*
* (For ICH8 variants)
* If PHY detection failed, use BM's r/w function and retry.
*/
if (LIST_FIRST(&mii->mii_phys) == NULL) {
/* if failed, retry with *_bm_* */
aprint_verbose_dev(dev, "Assumed PHY access function "
"(type = %d) might be incorrect. Use BM and retry.\n",
sc->sc_phytype);
sc->sc_phytype = WMPHY_BM;
mii->mii_readreg = wm_gmii_bm_readreg;
mii->mii_writereg = wm_gmii_bm_writereg;
/*
* PHY found! Check PHY type again by the second call of
* wm_gmii_setup_phytype.
*/
wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
child->mii_mpd_model);
/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
if ((sc->sc_type == WM_T_82580)
|| (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
|| (sc->sc_type == WM_T_I211)) {
reg = CSR_READ(sc, WMREG_PHPM);
reg &= ~PHPM_GO_LINK_D;
CSR_WRITE(sc, WMREG_PHPM, reg);
}
/* Disable D0 LPLU. */
wm_lplu_d0_disable(sc);
sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
sc->sc_ctrl |= CTRL_SLU;
if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
|| (sc->sc_type > WM_T_82543)) {
sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
} else {
sc->sc_ctrl &= ~CTRL_ASDE;
sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
if (ife->ifm_media & IFM_FDX)
sc->sc_ctrl |= CTRL_FD;
switch (IFM_SUBTYPE(ife->ifm_media)) {
case IFM_10_T:
sc->sc_ctrl |= CTRL_SPEED_10;
break;
case IFM_100_TX:
sc->sc_ctrl |= CTRL_SPEED_100;
break;
case IFM_1000_T:
sc->sc_ctrl |= CTRL_SPEED_1000;
break;
case IFM_NONE:
/* There is no specific setting for IFM_NONE */
break;
default:
panic("wm_gmii_mediachange: bad media 0x%x",
ife->ifm_media);
}
}
CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
CSR_WRITE_FLUSH(sc);
if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
wm_serdes_mediachange(ifp);
if (sc->sc_type <= WM_T_82543)
wm_gmii_reset(sc);
else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
&& ((sc->sc_flags & WM_F_SGMII) != 0)) {
/* allow time for SFP cage time to power up phy */
delay(300 * 1000);
wm_gmii_reset(sc);
}
/*
* wm_gmii_mediastatus: [ifmedia interface function]
*
* Get the current interface media status on a 1000BASE-T device.
*/
static void
wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
{
struct wm_softc *sc = ifp->if_softc;
struct ethercom *ec = &sc->sc_ethercom;
struct mii_data *mii;
bool dopoll = true;
/*
* In normal drivers, ether_mediastatus() is called here.
* To avoid calling mii_pollstat(), ether_mediastatus() is open coded.
*/
KASSERT(mutex_owned(sc->sc_core_lock));
KASSERT(ec->ec_mii != NULL);
KASSERT(mii_locked(ec->ec_mii));
mii = ec->ec_mii;
if ((sc->sc_flags & WM_F_DELAY_LINKUP) != 0) {
struct timeval now;
getmicrotime(&now);
if (timercmp(&now, &sc->sc_linkup_delay_time, <))
dopoll = false;
else if (sc->sc_linkup_delay_time.tv_sec != 0) {
/* Simplify by checking tv_sec only. It's enough. */
/*
* Don't call mii_pollstat() while doing workaround.
* See also wm_linkintr_gmii() and wm_tick().
*/
if (dopoll)
mii_pollstat(mii);
ifmr->ifm_active = mii->mii_media_active;
ifmr->ifm_status = mii->mii_media_status;
for (i = 0; i < 16; i++) {
data <<= 1;
CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
CSR_WRITE_FLUSH(sc);
delay(10);
if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
data |= 1;
CSR_WRITE(sc, WMREG_CTRL, v);
CSR_WRITE_FLUSH(sc);
delay(10);
}
for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
delay(50);
mdic = CSR_READ(sc, WMREG_MDIC);
if (mdic & MDIC_READY)
break;
}
if ((mdic & MDIC_READY) == 0) {
DPRINTF(sc, WM_DEBUG_GMII,
("%s: MDIC read timed out: phy %d reg %d\n",
device_xname(dev), phy, reg));
return ETIMEDOUT;
} else if (mdic & MDIC_E) {
/* This is normal if no PHY is present. */
DPRINTF(sc, WM_DEBUG_GMII,
("%s: MDIC read error: phy %d reg %d\n",
device_xname(sc->sc_dev), phy, reg));
return -1;
} else
*val = MDIC_DATA(mdic);
/*
* Allow some time after each MDIC transaction to avoid
* reading duplicate data in the next MDIC transaction.
*/
if (sc->sc_type == WM_T_PCH2)
delay(100);
return 0;
}
/*
* wm_gmii_mdic_writereg: [mii interface function]
*
* Write a PHY register on the GMII.
*/
static int
wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
{
struct wm_softc *sc = device_private(dev);
uint32_t mdic = 0;
int i;
/*
* Allow some time after each MDIC transaction to avoid
* reading duplicate data in the next MDIC transaction.
*/
if (sc->sc_type == WM_T_PCH2)
delay(100);
return 0;
}
/*
* wm_gmii_i82544_readreg: [mii interface function]
*
* Read a PHY register on the GMII.
*/
static int
wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
{
struct wm_softc *sc = device_private(dev);
int rv;
rv = sc->phy.acquire(sc);
if (rv != 0) {
device_printf(dev, "%s: failed to get semaphore\n", __func__);
return rv;
}
/*
* wm_gmii_i82544_writereg: [mii interface function]
*
* Write a PHY register on the GMII.
*/
static int
wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
{
struct wm_softc *sc = device_private(dev);
int rv;
rv = sc->phy.acquire(sc);
if (rv != 0) {
device_printf(dev, "%s: failed to get semaphore\n", __func__);
return rv;
}
/*
* wm_gmii_i80003_readreg: [mii interface function]
*
* Read a PHY register on the kumeran
* This could be handled by the PHY layer if we didn't have to lock the
* resource ...
*/
static int
wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
{
struct wm_softc *sc = device_private(dev);
int page_select;
uint16_t temp, temp2;
int rv;
if (phy != 1) /* Only one PHY on kumeran bus */
return -1;
rv = sc->phy.acquire(sc);
if (rv != 0) {
device_printf(dev, "%s: failed to get semaphore\n", __func__);
return rv;
}
if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
page_select = GG82563_PHY_PAGE_SELECT;
else {
/*
* Use Alternative Page Select register to access registers
* 30 and 31.
*/
page_select = GG82563_PHY_PAGE_SELECT_ALT;
}
temp = reg >> GG82563_PAGE_SHIFT;
if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
goto out;
if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
/*
* Wait more 200us for a bug of the ready bit in the MDIC
* register.
*/
delay(200);
rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
if ((rv != 0) || (temp2 != temp)) {
device_printf(dev, "%s failed\n", __func__);
rv = -1;
goto out;
}
delay(200);
rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
delay(200);
} else
rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
out:
sc->phy.release(sc);
return rv;
}
/*
* wm_gmii_i80003_writereg: [mii interface function]
*
* Write a PHY register on the kumeran.
* This could be handled by the PHY layer if we didn't have to lock the
* resource ...
*/
static int
wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
{
struct wm_softc *sc = device_private(dev);
int page_select, rv;
uint16_t temp, temp2;
if (phy != 1) /* Only one PHY on kumeran bus */
return -1;
rv = sc->phy.acquire(sc);
if (rv != 0) {
device_printf(dev, "%s: failed to get semaphore\n", __func__);
return rv;
}
if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
page_select = GG82563_PHY_PAGE_SELECT;
else {
/*
* Use Alternative Page Select register to access registers
* 30 and 31.
*/
page_select = GG82563_PHY_PAGE_SELECT_ALT;
}
temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
goto out;
if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
/*
* Wait more 200us for a bug of the ready bit in the MDIC
* register.
*/
delay(200);
rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
if ((rv != 0) || (temp2 != temp)) {
device_printf(dev, "%s failed\n", __func__);
rv = -1;
goto out;
}
delay(200);
rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
delay(200);
} else
rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
out:
sc->phy.release(sc);
return rv;
}
/*
* wm_gmii_bm_readreg: [mii interface function]
*
* Read a PHY register on the kumeran
* This could be handled by the PHY layer if we didn't have to lock the
* resource ...
*/
static int
wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
{
struct wm_softc *sc = device_private(dev);
uint16_t page = reg >> BME1000_PAGE_SHIFT;
int rv;
rv = sc->phy.acquire(sc);
if (rv != 0) {
device_printf(dev, "%s: failed to get semaphore\n", __func__);
return rv;
}
if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
phy = ((page >= 768) || ((page == 0) && (reg == 25))
|| (reg == 31)) ? 1 : phy;
/* Page 800 works differently than the rest so it has its own func */
if (page == BM_WUC_PAGE) {
rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
goto release;
}
/*
* wm_gmii_bm_writereg: [mii interface function]
*
* Write a PHY register on the kumeran.
* This could be handled by the PHY layer if we didn't have to lock the
* resource ...
*/
static int
wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
{
struct wm_softc *sc = device_private(dev);
uint16_t page = reg >> BME1000_PAGE_SHIFT;
int rv;
rv = sc->phy.acquire(sc);
if (rv != 0) {
device_printf(dev, "%s: failed to get semaphore\n", __func__);
return rv;
}
if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
phy = ((page >= 768) || ((page == 0) && (reg == 25))
|| (reg == 31)) ? 1 : phy;
/* Page 800 works differently than the rest so it has its own func */
if (page == BM_WUC_PAGE) {
rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
goto release;
}
/*
* wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
* @dev: pointer to the HW structure
* @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
*
* Assumes semaphore already acquired and phy_reg points to a valid memory
* address to store contents of the BM_WUC_ENABLE_REG register.
*/
static int
wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
{
#ifdef WM_DEBUG
struct wm_softc *sc = device_private(dev);
#endif
uint16_t temp;
int rv;
/* All page select, port ctrl and wakeup registers use phy address 1 */
/* Select Port Control Registers page */
rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
if (rv != 0)
return rv;
/* Read WUCE and save it */
rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
if (rv != 0)
return rv;
/* Enable both PHY wakeup mode and Wakeup register page writes.
* Prevent a power state change by disabling ME and Host PHY wakeup.
*/
temp = *phy_regp;
temp |= BM_WUC_ENABLE_BIT;
temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
/* Select Host Wakeup Registers page - caller now able to write
* registers on the Wakeup registers page
*/
return wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
BM_WUC_PAGE << IGP3_PAGE_SHIFT);
}
/*
* wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
* @dev: pointer to the HW structure
* @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
*
* Restore BM_WUC_ENABLE_REG to its original value.
*
* Assumes semaphore already acquired and *phy_reg is the contents of the
* BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
* caller.
*/
static int
wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
{
#ifdef WM_DEBUG
struct wm_softc *sc = device_private(dev);
#endif
/* Select Port Control Registers page */
wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
/* Restore 769.17 to its original value */
wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
return 0;
}
/*
* wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
* @sc: pointer to the HW structure
* @offset: register offset to be read or written
* @val: pointer to the data to read or write
* @rd: determines if operation is read or write
* @page_set: BM_WUC_PAGE already set and access enabled
*
* Read the PHY register at offset and store the retrieved information in
* data, or write data to PHY register at offset. Note the procedure to
* access the PHY wakeup registers is different than reading the other PHY
* registers. It works as such:
* 1) Set 769.17.2 (page 769, register 17, bit 2) = 1
* 2) Set page to 800 for host (801 if we were manageability)
* 3) Write the address using the address opcode (0x11)
* 4) Read or write the data using the data opcode (0x12)
* 5) Restore 769.17.2 to its original value
*
* Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
* step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
*
* Assumes semaphore is already acquired. When page_set==TRUE, assumes
* the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
* is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
*/
static int
wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
bool page_set)
{
struct wm_softc *sc = device_private(dev);
uint16_t regnum = BM_PHY_REG_NUM(offset);
uint16_t page = BM_PHY_REG_PAGE(offset);
uint16_t wuce;
int rv = 0;
DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
device_xname(dev), __func__));
/* XXX Gig must be disabled for MDIO accesses to page 800 */
if ((sc->sc_type == WM_T_PCH)
&& ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
device_printf(dev,
"Attempting to access page %d while gig enabled.\n", page);
}
if (!page_set) {
/* Enable access to PHY wakeup registers */
rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
if (rv != 0) {
device_printf(dev,
"%s: Could not enable PHY wakeup reg access\n",
__func__);
return rv;
}
}
DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
device_xname(sc->sc_dev), __func__, page, regnum));
/* Write the Wakeup register page offset value using opcode 0x11 */
rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
if (rv != 0)
return rv;
if (rd) {
/* Read the Wakeup register page value using opcode 0x12 */
rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
} else {
/* Write the Wakeup register page value using opcode 0x12 */
rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
}
if (rv != 0)
return rv;
if (!page_set)
rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
return rv;
}
/*
* wm_gmii_hv_readreg: [mii interface function]
*
* Read a PHY register on the kumeran
* This could be handled by the PHY layer if we didn't have to lock the
* resource ...
*/
static int
wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
{
struct wm_softc *sc = device_private(dev);
int rv;
static int
wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
{
uint16_t page = BM_PHY_REG_PAGE(reg);
uint16_t regnum = BM_PHY_REG_NUM(reg);
int rv;
phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
/* Page 800 works differently than the rest so it has its own func */
if (page == BM_WUC_PAGE)
return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
/*
* Lower than page 768 works differently than the rest so it has its
* own func
*/
if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
device_printf(dev, "gmii_hv_readreg!!!\n");
return -1;
}
/*
* XXX I21[789] documents say that the SMBus Address register is at
* PHY address 01, Page 0 (not 768), Register 26.
*/
if (page == HV_INTC_FC_PAGE_START)
page = 0;
if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
page << BME1000_PAGE_SHIFT);
if (rv != 0)
return rv;
}
return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
}
/*
* wm_gmii_hv_writereg: [mii interface function]
*
* Write a PHY register on the kumeran.
* This could be handled by the PHY layer if we didn't have to lock the
* resource ...
*/
static int
wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
{
struct wm_softc *sc = device_private(dev);
int rv;
static int
wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
{
struct wm_softc *sc = device_private(dev);
uint16_t page = BM_PHY_REG_PAGE(reg);
uint16_t regnum = BM_PHY_REG_NUM(reg);
int rv;
phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
/* Page 800 works differently than the rest so it has its own func */
if (page == BM_WUC_PAGE)
return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
false);
/*
* Lower than page 768 works differently than the rest so it has its
* own func
*/
if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
device_printf(dev, "gmii_hv_writereg!!!\n");
return -1;
}
{
/*
* XXX I21[789] documents say that the SMBus Address register
* is at PHY address 01, Page 0 (not 768), Register 26.
*/
if (page == HV_INTC_FC_PAGE_START)
page = 0;
/*
* XXX Workaround MDIO accesses being disabled after entering
* IEEE Power Down (whenever bit 11 of the PHY control
* register is set)
*/
if (sc->sc_phytype == WMPHY_82578) {
struct mii_softc *child;
return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
}
/*
* wm_gmii_82580_readreg: [mii interface function]
*
* Read a PHY register on the 82580 and I350.
* This could be handled by the PHY layer if we didn't have to lock the
* resource ...
*/
static int
wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
{
struct wm_softc *sc = device_private(dev);
int rv;
rv = sc->phy.acquire(sc);
if (rv != 0) {
device_printf(dev, "%s: failed to get semaphore\n", __func__);
return rv;
}
/*
* wm_gmii_82580_writereg: [mii interface function]
*
* Write a PHY register on the 82580 and I350.
* This could be handled by the PHY layer if we didn't have to lock the
* resource ...
*/
static int
wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
{
struct wm_softc *sc = device_private(dev);
int rv;
rv = sc->phy.acquire(sc);
if (rv != 0) {
device_printf(dev, "%s: failed to get semaphore\n", __func__);
return rv;
}
/*
* wm_gmii_gs40g_readreg: [mii interface function]
*
* Read a PHY register on the I2100 and I211.
* This could be handled by the PHY layer if we didn't have to lock the
* resource ...
*/
static int
wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
{
struct wm_softc *sc = device_private(dev);
int page, offset;
int rv;
/* Acquire semaphore */
rv = sc->phy.acquire(sc);
if (rv != 0) {
device_printf(dev, "%s: failed to get semaphore\n", __func__);
return rv;
}
/*
* wm_gmii_gs40g_writereg: [mii interface function]
*
* Write a PHY register on the I210 and I211.
* This could be handled by the PHY layer if we didn't have to lock the
* resource ...
*/
static int
wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
{
struct wm_softc *sc = device_private(dev);
uint16_t page;
int offset, rv;
/* Acquire semaphore */
rv = sc->phy.acquire(sc);
if (rv != 0) {
device_printf(dev, "%s: failed to get semaphore\n", __func__);
return rv;
}
/*
* EMI register related (82579, WMPHY_I217(PCH2 and newer))
* This access method is different from IEEE MMD.
*/
static int
wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
{
struct wm_softc *sc = device_private(dev);
int rv;
for (i = 0; i < MII_NPHY; i++) {
sc->phy.no_errprint = true;
rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR1, &id1);
if (rv != 0)
continue;
rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR2, &id2);
if (rv != 0)
continue;
if (MII_OUI(id1, id2) != MII_OUI_xxMARVELL)
continue;
sc->phy.no_errprint = false;
sc->phy.readreg_locked(sc->sc_dev, i, MAKPHY_ESSR, &phyreg);
phyreg &= ~(ESSR_SER_ANEG_BYPASS | ESSR_HWCFG_MODE);
phyreg |= ESSR_SGMII_WOC_COPPER;
sc->phy.writereg_locked(sc->sc_dev, i, MAKPHY_ESSR, phyreg);
break;
}
}
/*
* wm_sgmii_readreg: [mii interface function]
*
* Read a PHY register on the SGMII
* This could be handled by the PHY layer if we didn't have to lock the
* resource ...
*/
static int
wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
{
struct wm_softc *sc = device_private(dev);
int rv;
rv = sc->phy.acquire(sc);
if (rv != 0) {
device_printf(dev, "%s: failed to get semaphore\n", __func__);
return rv;
}
rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
sc->phy.release(sc);
return rv;
}
static int
wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
{
struct wm_softc *sc = device_private(dev);
uint32_t i2ccmd;
int i, rv = 0;
/*
* wm_sgmii_writereg: [mii interface function]
*
* Write a PHY register on the SGMII.
* This could be handled by the PHY layer if we didn't have to lock the
* resource ...
*/
static int
wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
{
struct wm_softc *sc = device_private(dev);
int rv;
rv = sc->phy.acquire(sc);
if (rv != 0) {
device_printf(dev, "%s: failed to get semaphore\n", __func__);
return rv;
}
/*
* On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
* detect a signal, 1 if they don't.
*/
if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
sig = !sig;
return sig;
}
/*
* wm_tbi_mediainit:
*
* Initialize media for use on 1000BASE-X devices.
*/
static void
wm_tbi_mediainit(struct wm_softc *sc)
{
struct ifnet *ifp = &sc->sc_ethercom.ec_if;
const char *sep = "";
/*
* wm_tbi_mediachange: [ifmedia interface function]
*
* Set hardware to newly-selected media on a 1000BASE-X device.
*/
static int
wm_tbi_mediachange(struct ifnet *ifp)
{
struct wm_softc *sc = ifp->if_softc;
struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
uint32_t status, ctrl;
bool signal;
int i;
KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
/* XXX need some work for >= 82571 and < 82575 */
if (sc->sc_type < WM_T_82575)
return 0;
}
ctrl = CSR_READ(sc, WMREG_CTRL);
signal = wm_tbi_havesignal(sc, ctrl);
DPRINTF(sc, WM_DEBUG_LINK,
("%s: signal = %d\n", device_xname(sc->sc_dev), signal));
if (signal) {
/* Have signal; wait for the link to come up. */
for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
delay(10000);
if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
break;
}
DPRINTF(sc, WM_DEBUG_LINK,
("%s: i = %d after waiting for link\n",
device_xname(sc->sc_dev), i));
status = CSR_READ(sc, WMREG_STATUS);
DPRINTF(sc, WM_DEBUG_LINK,
("%s: status after final read = 0x%x, STATUS_LU = %#"
__PRIxBIT "\n",
device_xname(sc->sc_dev), status, STATUS_LU));
if (status & STATUS_LU) {
/* Link is up. */
DPRINTF(sc, WM_DEBUG_LINK,
("%s: LINK: set media -> link up %s\n",
device_xname(sc->sc_dev),
(status & STATUS_FD) ? "FDX" : "HDX"));
/*
* NOTE: CTRL will update TFCE and RFCE automatically,
* so we should update sc->sc_ctrl
*/
sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
sc->sc_tctl &= ~TCTL_COLD(0x3ff);
sc->sc_fcrtl &= ~FCRTL_XONE;
if (status & STATUS_FD)
sc->sc_tctl |=
TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
else
sc->sc_tctl |=
TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
sc->sc_fcrtl |= FCRTL_XONE;
CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
sc->sc_tbi_linkup = 1;
} else {
if (i == WM_LINKUP_TIMEOUT)
wm_check_for_link(sc);
/* Link is down. */
DPRINTF(sc, WM_DEBUG_LINK,
("%s: LINK: set media -> link down\n",
device_xname(sc->sc_dev)));
sc->sc_tbi_linkup = 0;
}
} else {
DPRINTF(sc, WM_DEBUG_LINK,
("%s: LINK: set media -> no signal\n",
device_xname(sc->sc_dev)));
sc->sc_tbi_linkup = 0;
}
wm_tbi_serdes_set_linkled(sc);
return 0;
}
/*
* wm_tbi_mediastatus: [ifmedia interface function]
*
* Get the current interface media status on a 1000BASE-X device.
*/
static void
wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
{
struct wm_softc *sc = ifp->if_softc;
uint32_t ctrl, status;
if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
/* XXX need some work for >= 82571 */
if (sc->sc_type >= WM_T_82571) {
sc->sc_tbi_linkup = 1;
return 0;
}
}
rxcw = CSR_READ(sc, WMREG_RXCW);
ctrl = CSR_READ(sc, WMREG_CTRL);
status = CSR_READ(sc, WMREG_STATUS);
signal = wm_tbi_havesignal(sc, ctrl);
/* XXX Currently, this function is not called on 8257[12] */
if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
|| (sc->sc_type >= WM_T_82575))
CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
/* Power on the sfp cage if present */
ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
ctrl_ext |= CTRL_EXT_I2C_ENA;
CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
/*
* wm_eeprom_sendbits:
*
* Send a series of bits to the EEPROM.
*/
static void
wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
{
uint32_t reg;
int x;
/*
* wm_eeprom_recvbits:
*
* Receive a series of bits from the EEPROM.
*/
static void
wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
{
uint32_t reg, val;
int x;
reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
val = 0;
for (x = nbits; x > 0; x--) {
CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
CSR_WRITE_FLUSH(sc);
delay(2);
if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
val |= (1U << (x - 1));
CSR_WRITE(sc, WMREG_EECD, reg);
CSR_WRITE_FLUSH(sc);
delay(2);
}
*valp = val;
}
/* Microwire */
/*
* wm_nvm_read_uwire:
*
* Read a word from the EEPROM using the MicroWire protocol.
*/
static int
wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
{
uint32_t reg, val;
int i, rv;
for (i = 0; i < wordcnt; i++) {
/* Clear SK and DI. */
reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
CSR_WRITE(sc, WMREG_EECD, reg);
/*
* XXX: workaround for a bug in qemu-0.12.x and prior
* and Xen.
*
* We use this workaround only for 82540 because qemu's
* e1000 act as 82540.
*/
if (sc->sc_type == WM_T_82540) {
reg |= EECD_SK;
CSR_WRITE(sc, WMREG_EECD, reg);
reg &= ~EECD_SK;
CSR_WRITE(sc, WMREG_EECD, reg);
CSR_WRITE_FLUSH(sc);
delay(2);
}
/* XXX: end of workaround */
/*
* Set SPI and FLASH related information from the EECD register.
* For 82541 and 82547, the word size is taken from EEPROM.
*/
static int
wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
{
int size;
uint32_t reg;
uint16_t data;
/* Read the size of NVM from EECD by default */
size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
switch (sc->sc_type) {
case WM_T_82541:
case WM_T_82541_2:
case WM_T_82547:
case WM_T_82547_2:
/* Set dummy value to access EEPROM */
sc->sc_nvm_wordsize = 64;
if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
aprint_error_dev(sc->sc_dev,
"%s: failed to read EEPROM size\n", __func__);
}
reg = data;
size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
if (size == 0)
size = 6; /* 64 word size */
else
size += NVM_WORD_SIZE_BASE_SHIFT + 1;
break;
case WM_T_80003:
case WM_T_82571:
case WM_T_82572:
case WM_T_82573: /* SPI case */
case WM_T_82574: /* SPI case */
case WM_T_82583: /* SPI case */
size += NVM_WORD_SIZE_BASE_SHIFT;
if (size > 14)
size = 14;
break;
case WM_T_82575:
case WM_T_82576:
case WM_T_82580:
case WM_T_I350:
case WM_T_I354:
case WM_T_I210:
case WM_T_I211:
size += NVM_WORD_SIZE_BASE_SHIFT;
if (size > 15)
size = 15;
break;
default:
aprint_error_dev(sc->sc_dev,
"%s: unknown device(%d)?\n", __func__, sc->sc_type);
return -1;
break;
}
sc->sc_nvm_wordsize = 1 << size;
return 0;
}
/*
* wm_nvm_ready_spi:
*
* Wait for a SPI EEPROM to be ready for commands.
*/
static int
wm_nvm_ready_spi(struct wm_softc *sc)
{
uint32_t val;
int usec;
for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
wm_eeprom_recvbits(sc, &val, 8);
if ((val & SPI_SR_RDY) == 0)
break;
}
if (usec >= SPI_MAX_RETRIES) {
aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
return -1;
}
return 0;
}
/*
* wm_nvm_read_spi:
*
* Read a work from the EEPROM using the SPI protocol.
*/
static int
wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
{
uint32_t reg, val;
int i;
uint8_t opc;
int rv;
DPRINTF(sc, WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
device_xname(sc->sc_dev)));
return -1;
}
/******************************************************************************
* This function does initial flash setup so that a new read/write/erase cycle
* can be started.
*
* sc - The pointer to the hw structure
****************************************************************************/
static int32_t
wm_ich8_cycle_init(struct wm_softc *sc)
{
uint16_t hsfsts;
int32_t error = 1;
int32_t i = 0;
/*
* Either we should have a hardware SPI cycle in progress bit to check
* against, in order to start a new cycle or FDONE bit should be
* changed in the hardware so that it is 1 after hardware reset, which
* can then be used as an indication whether a cycle is in progress or
* has been completed .. we should also have some software semaphore
* mechanism to guard FDONE or the cycle in progress bit so that two
* threads access to those bits can be sequentiallized or a way so that
* 2 threads don't start the cycle at the same time
*/
if ((hsfsts & HSFSTS_FLINPRO) == 0) {
/*
* There is no cycle running at present, so we can start a
* cycle
*/
/* Begin by setting Flash Cycle Done. */
hsfsts |= HSFSTS_DONE;
if (sc->sc_type >= WM_T_PCH_SPT)
ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
hsfsts & 0xffffUL);
else
ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
error = 0;
} else {
/*
* Otherwise poll for sometime so the current cycle has a
* chance to end before giving up.
*/
for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
if (sc->sc_type >= WM_T_PCH_SPT)
hsfsts = ICH8_FLASH_READ32(sc,
ICH_FLASH_HSFSTS) & 0xffffUL;
else
hsfsts = ICH8_FLASH_READ16(sc,
ICH_FLASH_HSFSTS);
if ((hsfsts & HSFSTS_FLINPRO) == 0) {
error = 0;
break;
}
delay(1);
}
if (error == 0) {
/*
* Successful in waiting for previous cycle to timeout,
* now set the Flash Cycle Done.
*/
hsfsts |= HSFSTS_DONE;
if (sc->sc_type >= WM_T_PCH_SPT)
ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
hsfsts & 0xffffUL);
else
ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
hsfsts);
}
}
return error;
}
/******************************************************************************
* This function starts a flash cycle and waits for its completion
*
* sc - The pointer to the hw structure
****************************************************************************/
static int32_t
wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
{
uint16_t hsflctl;
uint16_t hsfsts;
int32_t error = 1;
uint32_t i = 0;
/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
if (sc->sc_type >= WM_T_PCH_SPT)
hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
else
hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
hsflctl |= HSFCTL_GO;
if (sc->sc_type >= WM_T_PCH_SPT)
ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
(uint32_t)hsflctl << 16);
else
ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
/* Wait till FDONE bit is set to 1 */
do {
if (sc->sc_type >= WM_T_PCH_SPT)
hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
& 0xffffUL;
else
hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
if (hsfsts & HSFSTS_DONE)
break;
delay(1);
i++;
} while (i < timeout);
if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
error = 0;
return error;
}
/******************************************************************************
* Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
*
* sc - The pointer to the hw structure
* index - The index of the byte or word to read.
* size - Size of data to read, 1=byte 2=word, 4=dword
* data - Pointer to the word to store the value read.
*****************************************************************************/
static int32_t
wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
uint32_t size, uint32_t *data)
{
uint16_t hsfsts;
uint16_t hsflctl;
uint32_t flash_linear_address;
uint32_t flash_data = 0;
int32_t error = 1;
int32_t count = 0;
if (size < 1 || size > 4 || data == 0x0 ||
index > ICH_FLASH_LINEAR_ADDR_MASK)
return error;
do {
delay(1);
/* Steps */
error = wm_ich8_cycle_init(sc);
if (error)
break;
if (sc->sc_type >= WM_T_PCH_SPT)
hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
>> 16;
else
hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT)
& HSFCTL_BCOUNT_MASK;
hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
if (sc->sc_type >= WM_T_PCH_SPT) {
/*
* In SPT, This register is in Lan memory space, not
* flash. Therefore, only 32 bit access is supported.
*/
ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
(uint32_t)hsflctl << 16);
} else
ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
/*
* Write the last 24 bits of index into Flash Linear address
* field in Flash Address
*/
/* TODO: TBD maybe check the index against the size of flash */
/*
* Check if FCERR is set to 1, if set to 1, clear it and try
* the whole sequence a few more times, else read in (shift in)
* the Flash Data0, the order is least significant byte first
* msb to lsb
*/
if (error == 0) {
flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
if (size == 1)
*data = (uint8_t)(flash_data & 0x000000FF);
else if (size == 2)
*data = (uint16_t)(flash_data & 0x0000FFFF);
else if (size == 4)
*data = (uint32_t)flash_data;
break;
} else {
/*
* If we've gotten here, then things are probably
* completely hosed, but if the error condition is
* detected, it won't hurt to give it another try...
* ICH_FLASH_CYCLE_REPEAT_COUNT times.
*/
if (sc->sc_type >= WM_T_PCH_SPT)
hsfsts = ICH8_FLASH_READ32(sc,
ICH_FLASH_HSFSTS) & 0xffffUL;
else
hsfsts = ICH8_FLASH_READ16(sc,
ICH_FLASH_HSFSTS);
if (hsfsts & HSFSTS_ERR) {
/* Repeat for some time before giving up. */
continue;
} else if ((hsfsts & HSFSTS_DONE) == 0)
break;
}
} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
return error;
}
/******************************************************************************
* Reads a single byte from the NVM using the ICH8 flash access registers.
*
* sc - pointer to wm_hw structure
* index - The index of the byte to read.
* data - Pointer to a byte to store the value read.
*****************************************************************************/
static int32_t
wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
{
int32_t status;
uint32_t word = 0;
status = wm_read_ich8_data(sc, index, 1, &word);
if (status == 0)
*data = (uint8_t)word;
else
*data = 0;
return status;
}
/******************************************************************************
* Reads a word from the NVM using the ICH8 flash access registers.
*
* sc - pointer to wm_hw structure
* index - The starting byte index of the word to read.
* data - Pointer to a word to store the value read.
*****************************************************************************/
static int32_t
wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
{
int32_t status;
uint32_t word = 0;
status = wm_read_ich8_data(sc, index, 2, &word);
if (status == 0)
*data = (uint16_t)word;
else
*data = 0;
return status;
}
/******************************************************************************
* Reads a dword from the NVM using the ICH8 flash access registers.
*
* sc - pointer to wm_hw structure
* index - The starting byte index of the word to read.
* data - Pointer to a word to store the value read.
*****************************************************************************/
static int32_t
wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
{
int32_t status;
status = wm_read_ich8_data(sc, index, 4, data);
return status;
}
/******************************************************************************
* Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
* register.
*
* sc - Struct containing variables accessed by shared code
* offset - offset of word in the EEPROM to read
* data - word read from the EEPROM
* words - number of words to read
*****************************************************************************/
static int
wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
{
int rv;
uint32_t flash_bank = 0;
uint32_t act_offset = 0;
uint32_t bank_offset = 0;
uint16_t word = 0;
uint16_t i = 0;
/*
* We need to know which is the valid flash bank. In the event
* that we didn't allocate eeprom_shadow_ram, we may not be
* managing flash_bank. So it cannot be trusted and needs
* to be updated with each read.
*/
rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
if (rv) {
DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
device_xname(sc->sc_dev)));
flash_bank = 0;
}
/*
* Adjust offset appropriately if we're on bank 1 - adjust for word
* size
*/
bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
for (i = 0; i < words; i++) {
/* The NVM part needs a byte offset, hence * 2 */
act_offset = bank_offset + ((offset + i) * 2);
rv = wm_read_ich8_word(sc, act_offset, &word);
if (rv) {
aprint_error_dev(sc->sc_dev,
"%s: failed to read NVM\n", __func__);
break;
}
data[i] = word;
}
sc->nvm.release(sc);
return rv;
}
/******************************************************************************
* Reads a 16 bit word or words from the EEPROM using the SPT's flash access
* register.
*
* sc - Struct containing variables accessed by shared code
* offset - offset of word in the EEPROM to read
* data - word read from the EEPROM
* words - number of words to read
*****************************************************************************/
static int
wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
{
int rv;
uint32_t flash_bank = 0;
uint32_t act_offset = 0;
uint32_t bank_offset = 0;
uint32_t dword = 0;
uint16_t i = 0;
/*
* We need to know which is the valid flash bank. In the event
* that we didn't allocate eeprom_shadow_ram, we may not be
* managing flash_bank. So it cannot be trusted and needs
* to be updated with each read.
*/
rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
if (rv) {
DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
device_xname(sc->sc_dev)));
flash_bank = 0;
}
/*
* Adjust offset appropriately if we're on bank 1 - adjust for word
* size
*/
bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
for (i = 0; i < words; i++) {
/* The NVM part needs a byte offset, hence * 2 */
act_offset = bank_offset + ((offset + i) * 2);
/* but we must read dword aligned, so mask ... */
rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
if (rv) {
aprint_error_dev(sc->sc_dev,
"%s: failed to read NVM\n", __func__);
break;
}
/* ... and pick out low or high word */
if ((act_offset & 0x2) == 0)
data[i] = (uint16_t)(dword & 0xFFFF);
else
data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
}
for (i = 0; i < INVM_SIZE; i++) {
invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
/* Get record type */
record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
if (record_type == INVM_UNINITIALIZED_STRUCTURE)
break;
if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
if (word_address == address) {
*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
rv = 0;
break;
}
}
}
return rv;
}
static int
wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
{
int i, rv;
/*
* wm_nvm_validate_checksum
*
* The checksum is defined as the sum of the first 64 (16 bit) words.
*/
static int
wm_nvm_validate_checksum(struct wm_softc *sc)
{
uint16_t checksum;
uint16_t eeprom_data;
#ifdef WM_DEBUG
uint16_t csum_wordaddr, valid_checksum;
#endif
int i;
checksum = 0;
/* Don't check for I211 */
if (sc->sc_type == WM_T_I211)
return 0;
if ((sc->sc_debug & WM_DEBUG_NVM) != 0) {
printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
for (i = 0; i < NVM_SIZE; i++) {
if (wm_nvm_read(sc, i, 1, &eeprom_data))
printf("XXXX ");
else
printf("%04hx ", eeprom_data);
if (i % 8 == 7)
printf("\n");
}
}
#endif /* WM_DEBUG */
for (i = 0; i < NVM_SIZE; i++) {
if (wm_nvm_read(sc, i, 1, &eeprom_data))
return -1;
checksum += eeprom_data;
}
/*
* Linux's code to decode version is very strange, so we don't
* obey that algorithm and just use word 61 as the document.
* Perhaps it's not perfect though...
*
* Example:
*
* Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
*/
dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
dword = __SHIFTOUT(dword, INVM_VER_1);
sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
}
/*
* XXX
* Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
* I've never seen real 82574 hardware with such small SPI ROM.
*/
if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
|| (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
have_uid = false;
switch (sc->sc_type) {
case WM_T_82571:
case WM_T_82572:
case WM_T_82574:
case WM_T_82583:
check_version = true;
check_optionrom = true;
have_build = true;
break;
case WM_T_ICH8:
case WM_T_ICH9:
case WM_T_ICH10:
case WM_T_PCH:
case WM_T_PCH2:
case WM_T_PCH_LPT:
case WM_T_PCH_SPT:
case WM_T_PCH_CNP:
case WM_T_PCH_TGP:
check_version = true;
have_build = true;
have_uid = false;
break;
case WM_T_82575:
case WM_T_82576:
case WM_T_82580:
if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
check_version = true;
break;
case WM_T_I211:
wm_nvm_version_invm(sc);
have_uid = false;
goto printver;
case WM_T_I210:
if (!wm_nvm_flash_presence_i210(sc)) {
wm_nvm_version_invm(sc);
have_uid = false;
goto printver;
}
/* FALLTHROUGH */
case WM_T_I350:
case WM_T_I354:
check_version = true;
check_optionrom = true;
break;
default:
return;
}
if (check_version
&& (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
build = nvm_data & NVM_BUILD_MASK;
have_build = true;
} else
minor = nvm_data & 0x00ff;
printver:
aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
sc->sc_nvm_ver_minor);
if (have_build) {
sc->sc_nvm_ver_build = build;
aprint_verbose(".%d", build);
}
}
/* Assume the Option ROM area is at avove NVM_SIZE */
if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
&& (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
/* Option ROM Version */
if ((off != 0x0000) && (off != 0xffff)) {
int rv;
uint16_t oid0, oid1;
if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
aprint_verbose(", Image Unique ID %08x",
((uint32_t)uid1 << 16) | uid0);
}
/*
* wm_nvm_read:
*
* Read data from the serial EEPROM.
*/
static int
wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
{
int rv;
/*
* Get hardware semaphore.
* Same as e1000_get_hw_semaphore_generic()
*/
static int
wm_get_swsm_semaphore(struct wm_softc *sc)
{
int32_t timeout;
uint32_t swsm;
retry:
/* Get the SW semaphore. */
timeout = sc->sc_nvm_wordsize + 1;
while (timeout) {
swsm = CSR_READ(sc, WMREG_SWSM);
if ((swsm & SWSM_SMBI) == 0)
break;
delay(50);
timeout--;
}
if (timeout == 0) {
if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
/*
* In rare circumstances, the SW semaphore may already
* be held unintentionally. Clear the semaphore once
* before giving up.
*/
sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
wm_put_swsm_semaphore(sc);
goto retry;
}
aprint_error_dev(sc->sc_dev, "could not acquire SWSM SMBI\n");
return -1;
}
/* Get the FW semaphore. */
timeout = sc->sc_nvm_wordsize + 1;
while (timeout) {
swsm = CSR_READ(sc, WMREG_SWSM);
swsm |= SWSM_SWESMBI;
CSR_WRITE(sc, WMREG_SWSM, swsm);
/* If we managed to set the bit we got the semaphore. */
swsm = CSR_READ(sc, WMREG_SWSM);
if (swsm & SWSM_SWESMBI)
break;
mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
for (timeout = 0; timeout < 200; timeout++) {
ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
return 0;
delay(5000);
}
device_printf(sc->sc_dev,
"failed to get swfwhw semaphore ext_ctrl 0x%x\n", ext_ctrl);
mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
return -1;
}
/*
* Management mode and power management related subroutines.
* BMC, AMT, suspend/resume and EEE.
*/
#ifdef WM_WOL
static int
wm_check_mng_mode(struct wm_softc *sc)
{
int rv;
switch (sc->sc_type) {
case WM_T_ICH8:
case WM_T_ICH9:
case WM_T_ICH10:
case WM_T_PCH:
case WM_T_PCH2:
case WM_T_PCH_LPT:
case WM_T_PCH_SPT:
case WM_T_PCH_CNP:
case WM_T_PCH_TGP:
rv = wm_check_mng_mode_ich8lan(sc);
break;
case WM_T_82574:
case WM_T_82583:
rv = wm_check_mng_mode_82574(sc);
break;
case WM_T_82571:
case WM_T_82572:
case WM_T_82573:
case WM_T_80003:
rv = wm_check_mng_mode_generic(sc);
break;
default:
/* Noting to do */
rv = 0;
break;
}
return rv;
}
static int
wm_check_mng_mode_ich8lan(struct wm_softc *sc)
{
uint32_t fwsm;
switch (sc->sc_type) {
case WM_T_ICH8:
case WM_T_ICH9:
case WM_T_ICH10:
case WM_T_PCH:
case WM_T_PCH2:
case WM_T_PCH_LPT:
case WM_T_PCH_SPT:
case WM_T_PCH_CNP:
case WM_T_PCH_TGP:
do {
reg = CSR_READ(sc, WMREG_FWSM);
if ((reg & FWSM_RSPCIPHY) == 0) {
blocked = true;
delay(10*1000);
continue;
}
blocked = false;
} while (blocked && (i++ < 30));
return blocked;
break;
case WM_T_82571:
case WM_T_82572:
case WM_T_82573:
case WM_T_82574:
case WM_T_82583:
case WM_T_80003:
reg = CSR_READ(sc, WMREG_MANC);
if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
return true;
else
return false;
break;
default:
/* No problem */
break;
}
/* The MAC-PHY interconnect may be in SMBus mode. If the PHY is
* inaccessible and resetting the PHY is not blocked, toggle the
* LANPHYPC Value bit to force the interconnect to PCIe mode.
*/
fwsm = CSR_READ(sc, WMREG_FWSM);
switch (sc->sc_type) {
case WM_T_PCH_LPT:
case WM_T_PCH_SPT:
case WM_T_PCH_CNP:
case WM_T_PCH_TGP:
if (wm_phy_is_accessible_pchlan(sc))
break;
/* Before toggling LANPHYPC, see if PHY is accessible by
* forcing MAC to SMBus mode first.
*/
reg = CSR_READ(sc, WMREG_CTRL_EXT);
reg |= CTRL_EXT_FORCE_SMBUS;
CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
#if 0
/* XXX Isn't this required??? */
CSR_WRITE_FLUSH(sc);
#endif
/* Wait 50 milliseconds for MAC to finish any retries
* that it might be trying to perform from previous
* attempts to acknowledge any phy read requests.
*/
delay(50 * 1000);
/* FALLTHROUGH */
case WM_T_PCH2:
if (wm_phy_is_accessible_pchlan(sc) == true)
break;
/* FALLTHROUGH */
case WM_T_PCH:
if (sc->sc_type == WM_T_PCH)
if ((fwsm & FWSM_FW_VALID) != 0)
break;
if (wm_phy_resetisblocked(sc) == true) {
device_printf(sc->sc_dev, "XXX reset is blocked(2)\n");
break;
}
/* Toggle LANPHYPC Value bit */
wm_toggle_lanphypc_pch_lpt(sc);
if (sc->sc_type >= WM_T_PCH_LPT) {
if (wm_phy_is_accessible_pchlan(sc) == true)
break;
/* Toggling LANPHYPC brings the PHY out of SMBus mode
* so ensure that the MAC is also out of SMBus mode
*/
reg = CSR_READ(sc, WMREG_CTRL_EXT);
reg &= ~CTRL_EXT_FORCE_SMBUS;
CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
if (rv == 0) {
/* Check to see if able to reset PHY. Print error if not */
if (wm_phy_resetisblocked(sc)) {
device_printf(sc->sc_dev, "XXX reset is blocked(3)\n");
goto out;
}
/* Reset the PHY before any access to it. Doing so, ensures
* that the PHY is in a known good state before we read/write
* PHY registers. The generic reset is sufficient here,
* because we haven't determined the PHY type yet.
*/
if (wm_reset_phy(sc) != 0)
goto out;
/* On a successful reset, possibly need to wait for the PHY
* to quiesce to an accessible state before returning control
* to the calling function. If the PHY does not quiesce, then
* return E1000E_BLK_PHY_RESET, as this is the condition that
* the PHY is in.
*/
if (wm_phy_resetisblocked(sc))
device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
}
if (sc->sc_flags & WM_F_HAS_MANAGE) {
uint32_t manc = CSR_READ(sc, WMREG_MANC);
manc |= MANC_ARP_EN;
if (sc->sc_type >= WM_T_82571)
manc &= ~MANC_EN_MNG2HOST;
CSR_WRITE(sc, WMREG_MANC, manc);
}
}
static void
wm_get_wakeup(struct wm_softc *sc)
{
/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
switch (sc->sc_type) {
case WM_T_82573:
case WM_T_82583:
sc->sc_flags |= WM_F_HAS_AMT;
/* FALLTHROUGH */
case WM_T_80003:
case WM_T_82575:
case WM_T_82576:
case WM_T_82580:
case WM_T_I350:
case WM_T_I354:
if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
/* FALLTHROUGH */
case WM_T_82541:
case WM_T_82541_2:
case WM_T_82547:
case WM_T_82547_2:
case WM_T_82571:
case WM_T_82572:
case WM_T_82574:
sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
break;
case WM_T_ICH8:
case WM_T_ICH9:
case WM_T_ICH10:
case WM_T_PCH:
case WM_T_PCH2:
case WM_T_PCH_LPT:
case WM_T_PCH_SPT:
case WM_T_PCH_CNP:
case WM_T_PCH_TGP:
sc->sc_flags |= WM_F_HAS_AMT;
sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
break;
default:
break;
}
/*
* Note that the WOL flags is set after the resetting of the eeprom
* stuff
*/
}
/*
* Unconfigure Ultra Low Power mode.
* Only for I217 and newer (see below).
*/
static int
wm_ulp_disable(struct wm_softc *sc)
{
uint32_t reg;
uint16_t phyreg;
int i = 0, rv;
/* WOL in the newer chipset interfaces (pchlan) */
static int
wm_enable_phy_wakeup(struct wm_softc *sc)
{
device_t dev = sc->sc_dev;
uint32_t mreg, moff;
uint16_t wuce, wuc, wufc, preg;
int i, rv;
KASSERT(sc->sc_type >= WM_T_PCH);
/* Copy MAC RARs to PHY RARs */
wm_copy_rx_addrs_to_phy_ich8lan(sc);
/* Power down workaround on D3 */
static void
wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
{
uint32_t reg;
uint16_t phyreg;
int i;
for (i = 0; i < 2; i++) {
/* Disable link */
reg = CSR_READ(sc, WMREG_PHY_CTRL);
reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
/*
* Call gig speed drop workaround on Gig disable before
* accessing any PHY registers
*/
if (sc->sc_type == WM_T_ICH8)
wm_gig_downshift_workaround_ich8lan(sc);
/* Read it back and test */
sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
break;
/* Issue PHY reset and repeat at most one more time */
CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
}
}
/*
* wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
* @sc: pointer to the HW structure
*
* During S0 to Sx transition, it is possible the link remains at gig
* instead of negotiating to a lower speed. Before going to Sx, set
* 'Gig Disable' to force link speed negotiation to a lower speed based on
* the LPLU setting in the NVM or custom setting. For PCH and newer parts,
* the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
* needs to be written.
* Parts that support (and are linked to a partner which support) EEE in
* 100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
* than 10Mbps w/o EEE.
*/
static void
wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
{
device_t dev = sc->sc_dev;
struct ethercom *ec = &sc->sc_ethercom;
uint32_t phy_ctrl;
int rv;
if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
uint16_t eee_advert;
rv = wm_read_emi_reg_locked(dev,
I217_EEE_ADVERTISEMENT, &eee_advert);
if (rv)
goto release;
/*
* Disable LPLU if both link partners support 100BaseT
* EEE and 100Full is advertised on both ends of the
* link, and enable Auto Enable LPI since there will
* be no driver to enable LPI while in Sx.
*/
if ((eee_advert & AN_EEEADVERT_100_TX) &&
(sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
uint16_t anar, phy_reg;
/* Set Auto Enable LPI after link up */
sc->phy.readreg_locked(dev, 2,
I217_LPI_GPIO_CTRL, &phy_reg);
phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
sc->phy.writereg_locked(dev, 2,
I217_LPI_GPIO_CTRL, phy_reg);
}
}
}
/*
* For i217 Intel Rapid Start Technology support,
* when the system is going into Sx and no manageability engine
* is present, the driver must configure proxy to reset only on
* power good. LPI (Low Power Idle) state must also reset only
* on power good, as well as the MTA (Multicast table array).
* The SMBus release must also be disabled on LCD reset.
*/
/*
* Enable MTA to reset for Intel Rapid Start Technology
* Support
*/
if (sc->sc_type == WM_T_ICH8)
wm_gig_downshift_workaround_ich8lan(sc);
if (sc->sc_type >= WM_T_PCH) {
wm_oem_bits_config_ich8lan(sc, false);
/* Reset PHY to activate OEM bits on 82577/8 */
if (sc->sc_type == WM_T_PCH)
wm_reset_phy(sc);
if (sc->phy.acquire(sc) != 0)
return;
wm_write_smbus_addr(sc);
sc->phy.release(sc);
}
}
/*
* wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
* @sc: pointer to the HW structure
*
* During Sx to S0 transitions on non-managed devices or managed devices
* on which PHY resets are not blocked, if the PHY registers cannot be
* accessed properly by the s/w toggle the LANPHYPC value to power cycle
* the PHY.
* On i217, setup Intel Rapid Start Technology.
*/
static int
wm_resume_workarounds_pchlan(struct wm_softc *sc)
{
device_t dev = sc->sc_dev;
int rv;
if (sc->sc_type < WM_T_PCH2)
return 0;
rv = wm_init_phy_workarounds_pchlan(sc);
if (rv != 0)
return rv;
/* For i217 Intel Rapid Start Technology support when the system
* is transitioning from Sx and no manageability engine is present
* configure SMBus to restore on reset, disable proxy, and enable
* the reset on MTA (Multicast table array).
*/
if (sc->sc_phytype == WMPHY_I217) {
uint16_t phy_reg;
rv = sc->phy.acquire(sc);
if (rv != 0)
return rv;
/* Clear Auto Enable LPI after link up */
sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
/* Restore clear on SMB if no manageability engine
* is present
*/
rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
&phy_reg);
if (rv != 0)
goto release;
phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
/*
* Only for PCIe device which has PCIe capability in the PCI config
* space.
*/
if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
return;
switch (sc->sc_type) {
case WM_T_82571:
case WM_T_82572:
/*
* 8257[12] Errata 13: Device Does Not Support PCIe Active
* State Power management L1 State (ASPM L1).
*/
mask = PCIE_LCSR_ASPM_L1;
str = "L1 is";
break;
case WM_T_82573:
case WM_T_82574:
case WM_T_82583:
/*
* The 82573 disappears when PCIe ASPM L0s is enabled.
*
* The 82574 and 82583 does not support PCIe ASPM L0s with
* some chipset. The document of 82574 and 82583 says that
* disabling L0s with some specific chipset is sufficient,
* but we follow as of the Intel em driver does.
*
* References:
* Errata 8 of the Specification Update of i82573.
* Errata 20 of the Specification Update of i82574.
* Errata 9 of the Specification Update of i82583.
*/
mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
str = "L0s and L1 are";
break;
default:
return;
}
/* Print only in wm_attach() */
if ((sc->sc_flags & WM_F_ATTACHED) == 0)
aprint_verbose_dev(sc->sc_dev,
"ASPM %s disabled to workaround the errata.\n", str);
}
switch (sc->sc_type) {
case WM_T_82571:
case WM_T_82572:
case WM_T_82573:
case WM_T_82575:
case WM_T_82576:
mii->mii_readreg(sc->sc_dev, 1, IGPHY_POWER_MGMT, &phyval);
phyval &= ~PMR_D0_LPLU;
mii->mii_writereg(sc->sc_dev, 1, IGPHY_POWER_MGMT, phyval);
break;
case WM_T_82580:
case WM_T_I350:
case WM_T_I210:
case WM_T_I211:
reg = CSR_READ(sc, WMREG_PHPM);
reg &= ~PHPM_D0A_LPLU;
CSR_WRITE(sc, WMREG_PHPM, reg);
break;
case WM_T_82574:
case WM_T_82583:
case WM_T_ICH8:
case WM_T_ICH9:
case WM_T_ICH10:
reg = CSR_READ(sc, WMREG_PHY_CTRL);
reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
CSR_WRITE_FLUSH(sc);
break;
case WM_T_PCH:
case WM_T_PCH2:
case WM_T_PCH_LPT:
case WM_T_PCH_SPT:
case WM_T_PCH_CNP:
case WM_T_PCH_TGP:
wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
if (wm_phy_resetisblocked(sc) == false)
phyval |= HV_OEM_BITS_ANEGNOW;
wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
break;
default:
break;
}
}
/*
* Enable EEE only for speeds in which the link partner is
* EEE capable and for which we advertise EEE.
*/
if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
if ((data & ANLPAR_TX_FD) != 0)
lpi_ctrl |= I82579_LPI_CTRL_EN_100;
else {
/*
* EEE is not supported in 100Half, so ignore
* partner's EEE in 100 ability if full-duplex
* is not advertised.
*/
sc->eee_lp_ability
&= ~AN_EEEADVERT_100_TX;
}
}
}
if (sc->sc_phytype == WMPHY_82579) {
rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
if (rv != 0)
goto release;
data &= ~I82579_LPI_PLL_SHUT_100;
rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
}
/*
* Call gig speed drop workaround on Gig disable before accessing
* any PHY registers.
*/
wm_gig_downshift_workaround_ich8lan(sc);
out:
return 0;
}
/*
* wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
* @sc: pointer to the HW structure
*
* Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
* LPLU, Gig disable, MDIC PHY reset):
* 1) Set Kumeran Near-end loopback
* 2) Clear Kumeran Near-end loopback
* Should only be called for ICH8[m] devices with any 1G Phy.
*/
static void
wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
{
uint16_t kmreg;
/* Only for igp3 */
if (sc->sc_phytype == WMPHY_IGP_3) {
if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
return;
kmreg |= KUMCTRLSTA_DIAG_NELPBK;
if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
return;
kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
}
}
/*
* Workaround for pch's PHYs
* XXX should be moved to new PHY driver?
*/
static int
wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
{
device_t dev = sc->sc_dev;
struct mii_data *mii = &sc->sc_mii;
struct mii_softc *child;
uint16_t phy_data, phyrev = 0;
int phytype = sc->sc_phytype;
int rv;
/*
* Configure the K1 Si workaround during phy reset assuming there is
* link so that it disables K1 if link is in 1Gbps.
*/
if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
return rv;
/* Workaround for link disconnects on a busy hub in half duplex */
rv = sc->phy.acquire(sc);
if (rv)
return rv;
rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
if (rv)
goto release;
rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
phy_data & 0x00ff);
if (rv)
goto release;
/* Set MSE higher to enable link to stay up when noise is high */
rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
release:
sc->phy.release(sc);
return rv;
}
/*
* wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
* @sc: pointer to the HW structure
*/
static void
wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
{
if (enable) {
/* Write Rx addresses (rar_entry_count for RAL/H, and
* SHRAL/H) and initial CRC values to the MAC
*/
rar_count = wm_rar_count(sc);
for (i = 0; i < rar_count; i++) {
uint8_t mac_addr[ETHER_ADDR_LEN] = {0};
uint32_t addr_high, addr_low;
rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, &data);
if (rv != 0)
goto out;
if (enable)
data |= 1 << 0;
else
data &= ~(1 << 0);
rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, data);
if (rv != 0)
goto out;
rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, &data);
if (rv != 0)
goto out;
/*
* XXX FreeBSD and Linux do the same thing that they set the same value
* on both the enable case and the disable case. Is it correct?
*/
data &= ~(0xf << 8);
data |= (0xb << 8);
rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, data);
if (rv != 0)
goto out;
/*
* If enable ==
* true: Enable jumbo frame workaround in the PHY.
* false: Write PHY register values back to h/w defaults.
*/
rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 23), &data);
if (rv != 0)
goto out;
data &= ~(0x7F << 5);
if (enable)
data |= (0x37 << 5);
rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 23), data);
if (rv != 0)
goto out;
rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 16), &data);
if (rv != 0)
goto out;
if (enable)
data &= ~(1 << 13);
else
data |= (1 << 13);
rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 16), data);
if (rv != 0)
goto out;
rv = sc->phy.readreg_locked(dev, 2, I82579_UNKNOWN1, &data);
if (rv != 0)
goto out;
data &= ~(0x3FF << 2);
if (enable)
data |= (I82579_TX_PTR_GAP << 2);
else
data |= (0x8 << 2);
rv = sc->phy.writereg_locked(dev, 2, I82579_UNKNOWN1, data);
if (rv != 0)
goto out;
/*
* wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
* done after every PHY reset.
*/
static int
wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
{
device_t dev = sc->sc_dev;
int rv;
/* Set MDIO slow mode before any other MDIO access */
rv = wm_set_mdio_slow_mode_hv(sc);
if (rv != 0)
return rv;
rv = sc->phy.acquire(sc);
if (rv != 0)
return rv;
/* Set MSE higher to enable link to stay up when noise is high */
rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
if (rv != 0)
goto release;
/* Drop link after 5 times MSE threshold was reached */
rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
release:
sc->phy.release(sc);
return rv;
}
/**
* wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
* @link: link up bool flag
*
* When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
* preventing further DMA write requests. Workaround the issue by disabling
* the de-assertion of the clock request when in 1Gpbs mode.
* Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
* speeds in order to avoid Tx hangs.
**/
static int
wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
{
uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
uint32_t status = CSR_READ(sc, WMREG_STATUS);
uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
uint16_t phyreg;
if (link && (speed == STATUS_SPEED_1000)) {
int rv;
/* Clear link status transmit timeout */
phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
if (speed == STATUS_SPEED_100) {
/* Set inband Tx timeout to 5x10us for 100Half */
phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
/* Do not extend the K1 entry latency for 100Half */
fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
} else {
/* Set inband Tx timeout to 50x10us for 10Full/Half */
phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
/* Extend the K1 entry latency for 10 Mbps */
fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
}
/*
* wm_k1_gig_workaround_hv - K1 Si workaround
* @sc: pointer to the HW structure
* @link: link up bool flag
*
* If K1 is enabled for 1Gbps, the MAC might stall when transitioning
* from a lower speed. This workaround disables K1 whenever link is at 1Gig
* If link is down, the function will restore the default K1 setting located
* in the NVM.
*/
static int
wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
{
int k1_enable = sc->sc_nvm_k1_enabled;
int rv;
/* Link stall fix for link up */
wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
0x0100);
} else {
/* Link stall fix for link down */
wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
0x4100);
}
/*
* wm_k1_workaround_lv - K1 Si workaround
* @sc: pointer to the HW structure
*
* Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
* Disable K1 for 1000 and 100 speeds
*/
static int
wm_k1_workaround_lv(struct wm_softc *sc)
{
uint32_t reg;
uint16_t phyreg;
int rv;
if (sc->sc_type != WM_T_PCH2)
return 0;
/* Set K1 beacon duration based on 10Mbps speed */
rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
if (rv != 0)
return rv;
/*
* wm_link_stall_workaround_hv - Si workaround
* @sc: pointer to the HW structure
*
* This function works around a Si bug where the link partner can get
* a link up indication before the PHY does. If small packets are sent
* by the link partner they can be placed in the packet buffer without
* being properly accounted for by the PHY and will stall preventing
* further packets from being received. The workaround is to clear the
* packet buffer after the PHY detects link up.
*/
static int
wm_link_stall_workaround_hv(struct wm_softc *sc)
{
uint16_t phyreg;
if (sc->sc_phytype != WMPHY_82578)
return 0;
/* Do not apply workaround if in PHY loopback bit 14 set */
wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
if ((phyreg & BMCR_LOOP) != 0)
return 0;
/* Check if link is up and at 1Gbps */
wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
| BM_CS_STATUS_SPEED_MASK;
if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
| BM_CS_STATUS_SPEED_1000))
return 0;
delay(200 * 1000); /* XXX too big */
/* Flush the packets in the fifo buffer */
wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
HV_MUX_DATA_CTRL_GEN_TO_MAC);
return 0;
}
static int
wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
{
int rv;
rv = sc->phy.acquire(sc);
if (rv != 0) {
device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
__func__);
return rv;
}
rv = wm_set_mdio_slow_mode_hv_locked(sc);
sc->phy.release(sc);
return rv;
}
static int
wm_set_mdio_slow_mode_hv_locked(struct wm_softc *sc)
{
int rv;
uint16_t reg;
/*
* wm_configure_k1_ich8lan - Configure K1 power state
* @sc: pointer to the HW structure
* @enable: K1 state to configure
*
* Configure the K1 power state based on the provided parameter.
* Assumes semaphore already acquired.
*/
static void
wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
{
uint32_t ctrl, ctrl_ext, tmp;
uint16_t kmreg;
int rv;
/* special case - for 82575 - need to do manual init ... */
static void
wm_reset_init_script_82575(struct wm_softc *sc)
{
/*
* Remark: this is untested code - we have no board without EEPROM
* same setup as mentioned int the FreeBSD driver for the i82575
*/
id1 = id2 = 0xffff;
for (i = 0; i < 2; i++) {
rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
&id1);
if ((rv != 0) || MII_INVALIDID(id1))
continue;
rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
&id2);
if ((rv != 0) || MII_INVALIDID(id2))
continue;
break;
}
if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
goto out;
/*
* In case the PHY needs to be in mdio slow mode,
* set slow mode and try to get the PHY id again.
*/
rv = 0;
if (sc->sc_type < WM_T_PCH_LPT) {
wm_set_mdio_slow_mode_hv_locked(sc);
rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
&id1);
rv |= wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
&id2);
}
if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
device_printf(sc->sc_dev, "XXX return with false\n");
return false;
}
out:
if (sc->sc_type >= WM_T_PCH_LPT) {
/* Only unforce SMBus if ME is not active */
if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
uint16_t phyreg;
/*
* Determine the maximum latency tolerated by the device.
*
* Per the PCIe spec, the tolerated latencies are encoded as
* a 3-bit encoded scale (only 0-5 are valid) multiplied by
* a 10-bit value (0-1023) to provide a range from 1 ns to
* 2^25*(2^10-1) ns. The scale is encoded as 0=2^0ns,
* 1=2^5ns, 2=2^10ns,...5=2^25ns.
*/
lat_ns = ((int64_t)rxa * 1024 -
(2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
+ ETHER_HDR_LEN))) * 8 * 1000;
if (lat_ns < 0)
lat_ns = 0;
else
lat_ns /= speed;
value = lat_ns;
/*
* I210 Errata 25 and I211 Errata 10
* Slow System Clock.
*
* Note that this function is called on both FLASH and iNVM case on NetBSD.
*/
static int
wm_pll_workaround_i210(struct wm_softc *sc)
{
uint32_t mdicnfg, wuc;
uint32_t reg;
pcireg_t pcireg;
uint32_t pmreg;
uint16_t nvmword, tmp_nvmword;
uint16_t phyval;
bool wa_done = false;
int i, rv = 0;
/* Get Power Management cap offset */
if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
&pmreg, NULL) == 0)
return -1;
/* Save WUC and MDICNFG registers */
wuc = CSR_READ(sc, WMREG_WUC);
mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0) {
/*
* The default value of the Initialization Control Word 1
* is the same on both I210's FLASH_HW and I21[01]'s iNVM.
*/
nvmword = INVM_DEFAULT_AL;
}
tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
wm_gmii_gs40g_readreg(sc->sc_dev, 1,
GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
rv = 0;
break; /* OK */
} else
rv = -1;