/* $NetBSD: gtmpsc.c,v 1.50 2024/09/10 17:56:35 andvar Exp $ */
/*
* Copyright (c) 2009 KIYOHARA Takashi
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* mpsc.c - Multi-Protocol Serial Controller driver, supports UART mode only
*/
/*
* clear any pending SDMA interrupts for this unit
*/
(void) gt_sdma_icause(device_parent(sc->sc_dev),
SDMA_INTR_RXBUF(sc->sc_unit) |
SDMA_INTR_RXERR(sc->sc_unit) |
SDMA_INTR_TXBUF(sc->sc_unit) |
SDMA_INTR_TXEND(sc->sc_unit));
#ifdef KGDB
/*
* Allow kgdb to "take over" this port. If this is
* the kgdb device, it has exclusive use.
*/
if (sc->sc_unit == gtmpsckgdbport) {
#ifdef MPSC_CONSOLE
if (sc->sc_unit == MPSC_CONSOLE) {
aprint_error_dev(self,
"(kgdb): cannot share with console\n");
return;
}
#endif
/* If not yet open, drop the entire buffer content here */
if (!ISSET(tp->t_state, TS_ISOPEN))
cc = 0;
vrxp = &sc->sc_poll_sdmapage->rx[sc->sc_rcvrx];
end = vrxp->rxbuf + vrxp->rxdesc.sdma_cnt;
get = vrxp->rxbuf + sc->sc_roffset;
while (cc > 0) {
code = *get;
lsr = vrxp->rxdesc.sdma_csr;
if (ISSET(lsr,
SDMA_CSR_RX_PE |
SDMA_CSR_RX_FR |
SDMA_CSR_RX_OR |
SDMA_CSR_RX_BR)) {
if (ISSET(lsr, SDMA_CSR_RX_OR))
; /* XXXXX not yet... */
if (ISSET(lsr, SDMA_CSR_RX_BR | SDMA_CSR_RX_FR))
SET(code, TTY_FE);
if (ISSET(lsr, SDMA_CSR_RX_PE))
SET(code, TTY_PE);
}
if ((*rint)(code, tp) == -1) {
/*
* The line discipline's buffer is out of space.
*/
/* XXXXX not yet... */
}
if (++get >= end) {
/* cleanup this descriptor, and return to DMA */
CLEANUP_AND_RETURN_RXDMA(sc, sc->sc_rcvrx);
sc->sc_rcvrx =
(sc->sc_rcvrx + 1) % GTMPSC_NTXDESC;
vrxp = &sc->sc_poll_sdmapage->rx[sc->sc_rcvrx];
end = vrxp->rxbuf + vrxp->rxdesc.sdma_cnt;
get = vrxp->rxbuf + sc->sc_roffset;
}
cc--;
}
}
if (sc->sc_tx_done) {
sc->sc_tx_done = 0;
CLR(tp->t_state, TS_BUSY);
if (ISSET(tp->t_state, TS_FLUSH))
CLR(tp->t_state, TS_FLUSH);
else
ndflush(&tp->t_outq, (int)(sc->sc_tba - tp->t_outq.c_cf));
(*tp->t_linesw->l_start)(tp);
}
}
int
gtmpscopen(dev_t dev, int flag, int mode, struct lwp *l)
{
struct gtmpsc_softc *sc;
int unit = GTMPSCUNIT(dev);
struct tty *tp;
int s;
int error;
sc = device_lookup_private(>mpsc_cd, unit);
if (!sc)
return ENXIO;
#ifdef KGDB
/*
* If this is the kgdb port, no other use is permitted.
*/
if (sc->sc_flags & GTMPSC_KGDB)
return EBUSY;
#endif
tp = sc->sc_tty;
if (kauth_authorize_device_tty(l->l_cred, KAUTH_DEVICE_TTY_OPEN, tp))
return EBUSY;
error = (*tp->t_linesw->l_open)(dev, tp);
if (error)
goto bad;
return 0;
bad:
if (!ISSET(tp->t_state, TS_ISOPEN) && tp->t_wopen == 0) {
/*
* We failed to open the device, and nobody else had it opened.
* Clean up the state as appropriate.
*/
gtmpscshutdown(sc);
}
return error;
}
int
gtmpscclose(dev_t dev, int flag, int mode, struct lwp *l)
{
int unit = GTMPSCUNIT(dev);
struct gtmpsc_softc *sc = device_lookup_private(>mpsc_cd, unit);
struct tty *tp = sc->sc_tty;
if (!ISSET(tp->t_state, TS_ISOPEN))
return 0;
(*tp->t_linesw->l_close)(tp, flag);
ttyclose(tp);
if (!ISSET(tp->t_state, TS_ISOPEN) && tp->t_wopen == 0) {
/*
* Although we got a last close, the device may still be in
* use; e.g. if this was the dialout node, and there are still
* processes waiting for carrier on the non-dialout node.
*/
gtmpscshutdown(sc);
}
return 0;
}
int
gtmpscread(dev_t dev, struct uio *uio, int flag)
{
struct gtmpsc_softc *sc =
device_lookup_private(>mpsc_cd, GTMPSCUNIT(dev));
struct tty *tp = sc->sc_tty;
return (*tp->t_linesw->l_read)(tp, uio, flag);
}
int
gtmpscwrite(dev_t dev, struct uio *uio, int flag)
{
struct gtmpsc_softc *sc =
device_lookup_private(>mpsc_cd, GTMPSCUNIT(dev));
struct tty *tp = sc->sc_tty;
return (*tp->t_linesw->l_write)(tp, uio, flag);
}
int
gtmpscioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
{
struct gtmpsc_softc *sc =
device_lookup_private(>mpsc_cd, GTMPSCUNIT(dev));
struct tty *tp = sc->sc_tty;
int error;
switch (cmd) {
case PPS_IOC_CREATE:
case PPS_IOC_DESTROY:
case PPS_IOC_GETPARAMS:
case PPS_IOC_SETPARAMS:
case PPS_IOC_GETCAP:
case PPS_IOC_FETCH:
#ifdef PPS_SYNC
case PPS_IOC_KCBIND:
#endif
mutex_spin_enter(&timecounter_lock);
error = pps_ioctl(cmd, data, &sc->sc_pps_state);
mutex_spin_exit(&timecounter_lock);
break;
case TIOCDCDTIMESTAMP: /* XXX old, overloaded API used by xntpd v3 */
mutex_spin_enter(&timecounter_lock);
#ifndef PPS_TRAILING_EDGE
TIMESPEC_TO_TIMEVAL((struct timeval *)data,
&sc->sc_pps_state.ppsinfo.assert_timestamp);
#else
TIMESPEC_TO_TIMEVAL((struct timeval *)data,
&sc->sc_pps_state.ppsinfo.clear_timestamp);
#endif
mutex_spin_exit(&timecounter_lock);
break;
/* Check requested parameters. */
if (compute_cdv(t->c_ospeed) < 0)
return EINVAL;
if (t->c_ispeed && t->c_ispeed != t->c_ospeed)
return EINVAL;
/*
* If there were no changes, don't do anything. This avoids dropping
* input and improves performance when all we did was frob things like
* VMIN and VTIME.
*/
if (tp->t_ospeed == t->c_ospeed &&
tp->t_cflag == t->c_cflag)
return 0;
mutex_spin_enter(&sc->sc_lock);
/* And copy to tty. */
tp->t_ispeed = 0;
tp->t_ospeed = t->c_ospeed;
tp->t_cflag = t->c_cflag;
sc->sc_baudrate = t->c_ospeed;
if (!sc->sc_heldchange) {
if (sc->sc_tx_busy) {
sc->sc_heldtbc = sc->sc_tbc;
sc->sc_tbc = 0;
sc->sc_heldchange = 1;
} else
gtmpsc_loadchannelregs(sc);
}
mutex_spin_exit(&sc->sc_lock);
/* Fake carrier on */
(void) (*tp->t_linesw->l_modem)(tp, 1);
/*
* If we've delayed a parameter change, do it now,
* and restart output.
*/
if (sc->sc_heldchange) {
gtmpsc_loadchannelregs(sc);
sc->sc_heldchange = 0;
sc->sc_tbc = sc->sc_heldtbc;
sc->sc_heldtbc = 0;
}
/* Clean-up TX descriptors and buffers */
ix = sc->sc_lasttx;
while (ix != sc->sc_nexttx) {
vtxp = &sc->sc_poll_sdmapage->tx[ix];
bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
ix * sizeof(gtmpsc_polltx_t), sizeof(sdma_desc_t),
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
csr = vtxp->txdesc.sdma_csr;
if (csr & SDMA_CSR_TX_OWN) {
bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
ix * sizeof(gtmpsc_polltx_t), sizeof(sdma_desc_t),
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
break;
}
bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
ix * sizeof(gtmpsc_polltx_t) + sizeof(sdma_desc_t),
sizeof(vtxp->txbuf), BUS_DMASYNC_POSTWRITE);
ix = (ix + 1) % GTMPSC_NTXDESC;
}
sc->sc_lasttx = ix;
/* Output the next chunk of the contiguous buffer */
gtmpsc_write(sc);
if (sc->sc_tbc == 0 && sc->sc_tx_busy) {
sc->sc_tx_busy = 0;
sc->sc_tx_done = 1;
softint_schedule(sc->sc_si);
sdma_imask &= ~SDMA_INTR_TXBUF(sc->sc_unit);
gt_sdma_imask(device_parent(sc->sc_dev), sdma_imask);
}
}
/*
* gtmpsc_write - write a buffer into the hardware
*/
STATIC void
gtmpsc_write(struct gtmpsc_softc *sc)
{
gtmpsc_polltx_t *vtxp;
uint32_t sdcm, ix;
int kick, n;
kick = 0;
while (sc->sc_tbc > 0 && sc->sc_nexttx != sc->sc_lasttx) {
n = uimin(sc->sc_tbc, GTMPSC_TXBUFSZ);
ix = sc->sc_nexttx;
sc->sc_nexttx = (ix + 1) % GTMPSC_NTXDESC;
/*
* Set pointers of current/first descriptor of TX to SDMA register.
*/
GT_SDMA_WRITE(sc, SDMA_SCTDP, sc->sc_txdma_map->dm_segs->ds_addr);
GT_SDMA_WRITE(sc, SDMA_SFTDP, sc->sc_txdma_map->dm_segs->ds_addr);
/*
* Set pointer of current descriptor of TX to SDMA register.
*/
GT_SDMA_WRITE(sc, SDMA_SCRDP, sc->sc_rxdma_map->dm_segs->ds_addr);
/*
* set MPSC LO and HI port config registers for GTMPSC unit
*/
GT_MPSC_WRITE(sc, GTMPSC_MMCR_LO,
GTMPSC_MMCR_LO_MODE_UART |
GTMPSC_MMCR_LO_ET |
GTMPSC_MMCR_LO_ER |
GTMPSC_MMCR_LO_NLM);
GT_MPSC_WRITE(sc, GTMPSC_MMCR_HI,
GTMPSC_MMCR_HI_TCDV_DEFAULT |
GTMPSC_MMCR_HI_RDW |
GTMPSC_MMCR_HI_RCDV_DEFAULT);
/*
* tell MPSC receive the Enter Hunt
*/
GT_MPSC_WRITE(sc, GTMPSC_CHRN(2), GTMPSC_CHR2_EH);
}
/*
* abort SDMA RX for MPSC unit
*/
GT_SDMA_WRITE(sc, SDMA_SDCM, SDMA_SDCM_AR);
}
int
gtmpsccnattach(bus_space_tag_t iot, bus_dma_tag_t dmat, bus_addr_t base,
int unit, int brg, int speed, tcflag_t tcflag)
{
struct gtmpsc_softc *sc = >mpsc_cn_softc;
int i, res;
const unsigned char cp[] = "\r\nMPSC Lives!\r\n";
res = gtmpsc_hackinit(sc, iot, dmat, base, unit, brg, speed, tcflag);
if (res != 0)
return res;
/*
* gtmpsc_hackinit - hacks required to support GTMPSC console
*/
STATIC int
gtmpsc_hackinit(struct gtmpsc_softc *sc, bus_space_tag_t iot,
bus_dma_tag_t dmat, bus_addr_t base, int unit, int brg,
int baudrate, tcflag_t tcflag)
{
gtmpsc_poll_sdma_t *cn_dmapage =
(gtmpsc_poll_sdma_t *)gtmpsc_cn_dmapage;
int error;
DPRINTF(("hackinit\n"));
memset(sc, 0, sizeof(struct gtmpsc_softc));
error = bus_space_map(iot, base + GTMPSC_BASE(unit), GTMPSC_SIZE, 0,
&sc->sc_mpsch);
if (error != 0)
goto fail0;
#if defined(MPSC_CONSOLE) || defined(KGDB)
/*
* gtmpsc_common_getc - polled console read
*
* We copy data from the DMA buffers into a buffer in the softc
* to reduce descriptor ownership turnaround time
* MPSC can crater if it wraps descriptor rings,
* which is asynchronous and throttled only by line speed.
*/
STATIC int
gtmpsc_common_getc(struct gtmpsc_softc *sc)
{
gtmpsc_pollrx_t *vrxp;
uint32_t csr;
int ix, ch, wdog_interval = 0;
if (!cold)
mutex_spin_enter(&sc->sc_lock);
ix = sc->sc_rcvdrx;
vrxp = &sc->sc_poll_sdmapage->rx[ix];
while (sc->sc_rcvcnt == 0) {
/* Wait receive */
bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdma_map,
ix * sizeof(gtmpsc_pollrx_t),
sizeof(sdma_desc_t),
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
csr = vrxp->rxdesc.sdma_csr;
if (csr & SDMA_CSR_RX_OWN) {
GT_MPSC_WRITE(sc, GTMPSC_CHRN(2),
GTMPSC_CHR2_EH | GTMPSC_CHR2_CRD);
if (wdog_interval++ % 32)
gt_watchdog_service();
bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdma_map,
ix * sizeof(gtmpsc_pollrx_t),
sizeof(sdma_desc_t),
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
DELAY(50);
continue;
}
if (csr & SDMA_CSR_RX_ES)
aprint_error_dev(sc->sc_dev,
"RX error, rxdesc csr 0x%x\n", csr);
bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdma_map,
ix * sizeof(gtmpsc_pollrx_t) + sizeof(sdma_desc_t),
sizeof(vrxp->rxbuf),
BUS_DMASYNC_POSTREAD);