/*
* Copyright (c) 2000 Ludd, University of Lule}, Sweden. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* RL11/RLV11/RLV12 disk controller driver and
* RL01/RL02 disk device driver.
*
* TODO:
* Handle disk errors more gracefully
* Do overlapping seeks on multiple drives
*
* Implementation comments:
*
*/
/*
* The RL11 can only have one transfer going at a time,
* and max transfer size is one track, so only one dmamap
* is needed.
*/
error = bus_dmamap_create(sc->sc_dmat, MAXRLXFER, 1, MAXRLXFER, 0,
BUS_DMA_ALLOCNOW, &sc->sc_dmam);
if (error) {
aprint_error(": Failed to allocate DMA map, error %d\n", error);
return;
}
bufq_alloc(&sc->sc_q, "disksort", BUFQ_SORT_CYLINDER);
for (i = 0; i < RL_MAXDPC; i++) {
waitcrdy(sc);
RL_WREG(RL_DA, RLDA_GS|RLDA_RST);
RL_WREG(RL_CS, RLCS_GS|(i << RLCS_USHFT));
waitcrdy(sc);
ra.type = RL_RREG(RL_MP);
ra.hwid = i;
if ((RL_RREG(RL_CS) & RLCS_ERR) == 0)
config_found(sc->sc_dev, &ra, rlcprint, CFARGS_NONE);
}
}
/*
* XXX We should try to discovery wedges here, but
* XXX that would mean loading up the pack and being
* XXX able to do I/O. Should use config_defer() here.
*/
}
int
rlopen(dev_t dev, int flag, int fmt, struct lwp *l)
{
struct rl_softc * const rc = device_lookup_private(&rl_cd, DISKUNIT(dev));
struct rlc_softc *sc;
int error, part, mask;
struct disklabel *dl;
const char *msg;
/*
* Make sure this is a reasonable open request.
*/
if (rc == NULL)
return ENXIO;
sc = rc->rc_rlc;
part = DISKPART(dev);
mutex_enter(&rc->rc_disk.dk_openlock);
/*
* If there are wedges, and this is not RAW_PART, then we
* need to fail.
*/
if (rc->rc_disk.dk_nwedges != 0 && part != RAW_PART) {
error = EBUSY;
goto bad1;
}
/* Check that the disk actually is useable */
msg = rlstate(sc, rc->rc_hwid);
if (msg == NULL || msg == rlstates[RLMP_UNLOAD] ||
msg == rlstates[RLMP_SPUNDOWN]) {
error = ENXIO;
goto bad1;
}
/*
* If this is the first open; read in where on the disk we are.
*/
dl = rc->rc_disk.dk_label;
if (rc->rc_state == DK_CLOSED) {
u_int16_t mp;
int maj;
RL_WREG(RL_CS, RLCS_RHDR|(rc->rc_hwid << RLCS_USHFT));
waitcrdy(sc);
mp = RL_RREG(RL_MP);
rc->rc_head = ((mp & RLMP_HS) == RLMP_HS);
rc->rc_cyl = (mp >> 7) & 0777;
rc->rc_state = DK_OPEN;
/* Get disk label */
maj = cdevsw_lookup_major(&rl_cdevsw);
if ((msg = readdisklabel(MAKEDISKDEV(maj,
device_unit(rc->rc_dev), RAW_PART), rlstrategy, dl, NULL)))
aprint_normal_dev(rc->rc_dev, "%s", msg);
aprint_normal_dev(rc->rc_dev, "size %d sectors\n",
dl->d_secperunit);
}
if (part >= dl->d_npartitions) {
error = ENXIO;
goto bad1;
}
int
rlclose(dev_t dev, int flag, int fmt, struct lwp *l)
{
int unit = DISKUNIT(dev);
struct rl_softc *rc = device_lookup_private(&rl_cd, unit);
int mask = (1 << DISKPART(dev));
bp = sc->sc_active;
if (bp == 0) {
aprint_error_dev(sc->sc_dev, "strange interrupt\n");
return;
}
bus_dmamap_unload(sc->sc_dmat, sc->sc_dmam);
sc->sc_active = 0;
cs = RL_RREG(RL_CS);
if (cs & RLCS_ERR) {
int error = (cs & RLCS_ERRMSK) >> 10;
aprint_error_dev(sc->sc_dev, "%s\n", rlerr[error]);
bp->b_error = EIO;
bp->b_resid = bp->b_bcount;
sc->sc_bytecnt = 0;
}
if (sc->sc_bytecnt == 0) /* Finished transfer */
biodone(bp);
rlcstart(sc, sc->sc_bytecnt ? bp : 0);
}
/*
* Start routine. First position the disk to the given position,
* then start reading/writing. An optimization would be to be able
* to handle overlapping seeks between disks.
*/
void
rlcstart(struct rlc_softc *sc, struct buf *ob)
{
struct disklabel *lp;
struct rl_softc *rc;
struct buf *bp;
int bn, cn, sn, tn, blks, err;
if (sc->sc_active)
return; /* Already doing something */
if (ob == 0) {
bp = bufq_get(sc->sc_q);
if (bp == NULL)
return; /* Nothing to do */
sc->sc_bufaddr = bp->b_data;
sc->sc_diskblk = bp->b_rawblkno;
sc->sc_bytecnt = bp->b_bcount;
bp->b_resid = 0;
} else
bp = ob;
sc->sc_active = bp;
rc = device_lookup_private(&rl_cd, DISKUNIT(bp->b_dev));
bn = sc->sc_diskblk;
lp = rc->rc_disk.dk_label;
if (bn) {
cn = bn / lp->d_secpercyl;
sn = bn % lp->d_secpercyl;
tn = sn / lp->d_nsectors;
sn = sn % lp->d_nsectors;
} else
cn = sn = tn = 0;
/*
* Check if we have to position disk first.
*/
if (rc->rc_cyl != cn || rc->rc_head != tn) {
u_int16_t da = RLDA_SEEK;
if (cn > rc->rc_cyl)
da |= ((cn - rc->rc_cyl) << RLDA_CYLSHFT) | RLDA_DIR;
else
da |= ((rc->rc_cyl - cn) << RLDA_CYLSHFT);
if (tn)
da |= RLDA_HSSEEK;
waitcrdy(sc);
RL_WREG(RL_DA, da);
RL_WREG(RL_CS, RLCS_SEEK | (rc->rc_hwid << RLCS_USHFT));
waitcrdy(sc);
rc->rc_cyl = cn;
rc->rc_head = tn;
}
RL_WREG(RL_DA, (cn << RLDA_CYLSHFT) | (tn ? RLDA_HSRW : 0) | (sn << 1));
blks = sc->sc_bytecnt/DEV_BSIZE;
/*
* Called once per controller when an ubareset occurs.
* Retracts all disks and restarts active transfers.
*/
void
rlcreset(device_t dev)
{
struct rlc_softc *sc = device_private(dev);
struct rl_softc *rc;
int i;
u_int16_t mp;
for (i = 0; i < rl_cd.cd_ndevs; i++) {
if ((rc = device_lookup_private(&rl_cd, i)) == NULL)
continue;
if (rc->rc_state != DK_OPEN)
continue;
if (rc->rc_rlc != sc)
continue;