/*
* Copyright (c) 2003 Wasabi Systems, Inc.
* All rights reserved.
*
* Written by Jason R. Thorpe for Wasabi Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed for the NetBSD Project by
* Wasabi Systems, Inc.
* 4. The name of Wasabi Systems, Inc. may not be used to endorse
* or promote products derived from this software without specific prior
* written permission.
*
* THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Copyright (c) 2000, 2001 by Greg Ansley
* Partially derived from Matt Jacob's ISP driver.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice immediately at the beginning of the file, without modification,
* this list of conditions, and the following disclaimer.
* 2. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* Additional Copyright (c) 2002 by Matthew Jacob under same license.
*/
/*
* mpt_netbsd.c:
*
* NetBSD-specific routines for LSI Fusion adapters. Includes some
* bus_dma glue, and SCSIPI glue.
*
* Adapted from the FreeBSD "mpt" driver by Jason R. Thorpe for
* Wasabi Systems, Inc.
*
* Additional contributions by Garrett D'Amore on behalf of TELES AG.
*/
/*
* Save the output of the config so we can rescan the bus in case of
* errors
*/
mpt->sc_scsibus_dv = config_found(mpt->sc_dev, &mpt->sc_channel,
scsiprint, CFARGS_NONE);
#if NBIO > 0
if (mpt_is_raid(mpt)) {
if (bio_register(mpt->sc_dev, mpt_bio_ioctl) != 0)
panic("%s: controller registration failed",
device_xname(mpt->sc_dev));
}
#endif
}
int
mpt_dma_mem_alloc(mpt_softc_t *mpt)
{
bus_dma_segment_t reply_seg, request_seg;
int reply_rseg, request_rseg;
bus_addr_t pptr, end;
char *vptr;
size_t len;
int error, i;
/* Check if we have already allocated the reply memory. */
if (mpt->reply != NULL)
return (0);
/*
* Allocate the request pool. This isn't really DMA'd memory,
* but it's a convenient place to do it.
*/
len = sizeof(request_t) * MPT_MAX_REQUESTS(mpt);
mpt->request_pool = malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
if (mpt->request_pool == NULL) {
aprint_error_dev(mpt->sc_dev, "unable to allocate request pool\n");
return (ENOMEM);
}
static int
mpt_poll(mpt_softc_t *mpt, struct scsipi_xfer *xs, int count)
{
/* Timeouts are in msec, so we loop in 1000usec cycles */
while (count) {
mpt_intr(mpt);
if (xs->xs_status & XS_STS_DONE)
return (0);
delay(1000); /* only happens in boot, so ok */
count--;
}
return (1);
}
if (req->xfer == NULL) {
printf("mpt_timeout: NULL xfer for request index 0x%x, sequenc 0x%x\n",
req->index, req->sequence);
return;
}
xs = req->xfer;
periph = xs->xs_periph;
mpt = device_private(periph->periph_channel->chan_adapter->adapt_dev);
scsipi_printaddr(periph);
printf("command timeout\n");
s = splbio();
oseq = req->sequence;
mpt->timeouts++;
if (mpt_intr(mpt)) {
if (req->sequence != oseq) {
mpt->success++;
mpt_prt(mpt, "recovered from command timeout");
splx(s);
return;
}
}
/*
* Ensure the IOC is really done giving us data since it appears it can
* sometimes fail to give us interrupts under heavy load.
*/
nrepl = mpt_drain_queue(mpt);
if (nrepl ) {
mpt_prt(mpt, "mpt_timeout: recovered %d commands",nrepl);
}
if (req->sequence != oseq) {
mpt->success++;
splx(s);
return;
}
static void
mpt_restart(mpt_softc_t *mpt, request_t *req0)
{
int i, s, nreq;
request_t *req;
struct scsipi_xfer *xs;
/* first, reset the IOC, leaving stopped so all requests are idle */
if (mpt_soft_reset(mpt) != MPT_OK) {
mpt_prt(mpt, "soft reset failed");
/*
* Don't try a hard reset since this mangles the PCI
* configuration registers.
*/
return;
}
/* Freeze the channel so scsipi doesn't queue more commands. */
scsipi_channel_freeze(&mpt->sc_channel, 1);
/* Return all pending requests to scsipi and de-allocate them. */
s = splbio();
nreq = 0;
for (i = 0; i < MPT_MAX_REQUESTS(mpt); i++) {
req = &mpt->request_pool[i];
xs = req->xfer;
if (xs != NULL) {
if (xs->datalen != 0)
bus_dmamap_unload(mpt->sc_dmat, req->dmap);
req->xfer = NULL;
callout_stop(&xs->xs_callout);
if (req != req0) {
nreq++;
xs->error = XS_REQUEUE;
}
scsipi_done(xs);
/*
* Don't need to mpt_free_request() since mpt_init()
* below will free all requests anyway.
*/
mpt_free_request(mpt, req);
}
}
splx(s);
if (nreq > 0)
mpt_prt(mpt, "re-queued %d requests", nreq);
/* Re-initialize the IOC (which restarts it). */
if (mpt_init(mpt, MPT_DB_INIT_HOST) == 0)
mpt_prt(mpt, "restart succeeded");
/* else error message already printed */
/* Thaw the channel, causing scsipi to re-queue the commands. */
scsipi_channel_thaw(&mpt->sc_channel, 1);
}
static int
mpt_drain_queue(mpt_softc_t *mpt)
{
int nrepl = 0;
uint32_t reply;
reply = mpt_pop_reply_queue(mpt);
while (reply != MPT_REPLY_EMPTY) {
nrepl++;
if (mpt->verbose > 1) {
if ((reply & MPT_CONTEXT_REPLY) != 0) {
/* Address reply; IOC has something to say */
mpt_print_reply(MPT_REPLY_PTOV(mpt, reply));
} else {
/* Context reply; all went well */
mpt_prt(mpt, "context %u reply OK", reply);
}
}
mpt_done(mpt, reply);
reply = mpt_pop_reply_queue(mpt);
}
return (nrepl);
}
static void
mpt_done(mpt_softc_t *mpt, uint32_t reply)
{
struct scsipi_xfer *xs = NULL;
struct scsipi_periph *periph;
int index;
request_t *req;
MSG_REQUEST_HEADER *mpt_req;
MSG_SCSI_IO_REPLY *mpt_reply;
int restart = 0; /* nonzero if we need to restart the IOC*/
/*
* Address reply with MessageContext high bit set.
* This is most likely a notify message, so we try
* to process it, then free it.
*/
if (__predict_false((index & 0x80000000) != 0)) {
if (mpt_reply != NULL)
mpt_ctlop(mpt, mpt_reply, reply);
else
mpt_prt(mpt, "%s: index 0x%x, NULL reply", __func__,
index);
return;
}
/* Did we end up with a valid index into the table? */
if (__predict_false(index < 0 || index >= MPT_MAX_REQUESTS(mpt))) {
mpt_prt(mpt, "%s: invalid index (0x%x) in reply", __func__,
index);
return;
}
req = &mpt->request_pool[index];
/* Make sure memory hasn't been trashed. */
if (__predict_false(req->index != index)) {
mpt_prt(mpt, "%s: corrupted request_t (0x%x)", __func__,
index);
return;
}
/* Short cut for task management replies; nothing more for us to do. */
if (__predict_false(mpt_req->Function == MPI_FUNCTION_SCSI_TASK_MGMT)) {
if (mpt->verbose > 1)
mpt_prt(mpt, "%s: TASK MGMT", __func__);
KASSERT(req == mpt->mngt_req);
mpt->mngt_req = NULL;
goto done;
}
if (__predict_false(mpt_req->Function == MPI_FUNCTION_PORT_ENABLE))
goto done;
/*
* At this point, it had better be a SCSI I/O command, but don't
* crash if it isn't.
*/
if (__predict_false(mpt_req->Function !=
MPI_FUNCTION_SCSI_IO_REQUEST)) {
if (mpt->verbose > 1)
mpt_prt(mpt, "%s: unknown Function 0x%x (0x%x)",
__func__, mpt_req->Function, index);
goto done;
}
/* Recover scsipi_xfer from the request structure. */
xs = req->xfer;
/* Can't have a SCSI command without a scsipi_xfer. */
if (__predict_false(xs == NULL)) {
mpt_prt(mpt,
"%s: no scsipi_xfer, index = 0x%x, seq = 0x%08x", __func__,
req->index, req->sequence);
mpt_prt(mpt, "request state: %s", mpt_req_state(req->debug));
mpt_prt(mpt, "mpt_request:");
mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf);
/*
* If we were a data transfer, unload the map that described
* the data buffer.
*/
if (__predict_true(xs->datalen != 0)) {
bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
req->dmap->dm_mapsize,
(xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_POSTREAD
: BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(mpt->sc_dmat, req->dmap);
}
if (__predict_true(mpt_reply == NULL)) {
/*
* Context reply; report that the command was
* successful!
*
* Also report the xfer mode, if necessary.
*/
if (__predict_false(mpt->mpt_report_xfer_mode != 0)) {
if ((mpt->mpt_report_xfer_mode &
(1 << periph->periph_target)) != 0)
mpt_get_xfer_mode(mpt, periph);
}
xs->error = XS_NOERROR;
xs->status = SCSI_OK;
xs->resid = 0;
mpt_free_request(mpt, req);
scsipi_done(xs);
return;
}
xs->status = mpt_reply->SCSIStatus;
switch (le16toh(mpt_reply->IOCStatus) & MPI_IOCSTATUS_MASK) {
case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:
xs->error = XS_DRIVER_STUFFUP;
mpt_prt(mpt, "%s: IOC overrun!", __func__);
break;
case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
/*
* Yikes! Tagged queue full comes through this path!
*
* So we'll change it to a status error and anything
* that returns status should probably be a status
* error as well.
*/
xs->resid = xs->datalen - le32toh(mpt_reply->TransferCount);
if (mpt_reply->SCSIState &
MPI_SCSI_STATE_NO_SCSI_STATUS) {
xs->error = XS_DRIVER_STUFFUP;
break;
}
/* FALLTHROUGH */
case MPI_IOCSTATUS_SUCCESS:
case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
switch (xs->status) {
case SCSI_OK:
/* Report the xfer mode, if necessary. */
if ((mpt->mpt_report_xfer_mode &
(1 << periph->periph_target)) != 0)
mpt_get_xfer_mode(mpt, periph);
xs->resid = 0;
break;
case SCSI_CHECK:
xs->error = XS_SENSE;
break;
case SCSI_BUSY:
case SCSI_QUEUE_FULL:
xs->error = XS_BUSY;
break;
case MPI_IOCSTATUS_BUSY:
case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
xs->error = XS_RESOURCE_SHORTAGE;
break;
case MPI_IOCSTATUS_SCSI_INVALID_BUS:
case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
xs->error = XS_SELTIMEOUT;
break;
case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
xs->error = XS_DRIVER_STUFFUP;
mpt_prt(mpt, "%s: IOC SCSI residual mismatch!", __func__);
restart = 1;
break;
case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:
/* XXX What should we do here? */
mpt_prt(mpt, "%s: IOC SCSI task terminated!", __func__);
restart = 1;
break;
case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
/* XXX */
xs->error = XS_DRIVER_STUFFUP;
mpt_prt(mpt, "%s: IOC SCSI task failed!", __func__);
restart = 1;
break;
case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
/* XXX */
xs->error = XS_DRIVER_STUFFUP;
mpt_prt(mpt, "%s: IOC task terminated!", __func__);
restart = 1;
break;
case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
/* XXX This is a bus-reset */
xs->error = XS_DRIVER_STUFFUP;
mpt_prt(mpt, "%s: IOC SCSI bus reset!", __func__);
restart = 1;
break;
case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR:
/*
* FreeBSD and Linux indicate this is a phase error between
* the IOC and the drive itself. When this happens, the IOC
* becomes unhappy and stops processing all transactions.
* Call mpt_timeout which knows how to get the IOC back
* on its feet.
*/
mpt_prt(mpt, "%s: IOC indicates protocol error -- "
"recovering...", __func__);
xs->error = XS_TIMEOUT;
restart = 1;
if (mpt_reply != NULL) {
if (mpt_reply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID) {
memcpy(&xs->sense.scsi_sense, req->sense_vbuf,
sizeof(xs->sense.scsi_sense));
} else if (mpt_reply->SCSIState &
MPI_SCSI_STATE_AUTOSENSE_FAILED) {
/*
* This will cause the scsipi layer to issue
* a REQUEST SENSE.
*/
if (xs->status == SCSI_CHECK)
xs->error = XS_BUSY;
}
}
done:
if (mpt_reply != NULL && le16toh(mpt_reply->IOCStatus) &
MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
mpt_prt(mpt, "%s: IOC has error - logging...\n", __func__);
mpt_ctlop(mpt, mpt_reply, reply);
}
/* If IOC done with this request, free it up. */
if (mpt_reply == NULL || (mpt_reply->MsgFlags & 0x80) == 0)
mpt_free_request(mpt, req);
/* If address reply, give the buffer back to the IOC. */
if (mpt_reply != NULL)
mpt_free_reply(mpt, (reply << 1));
if (xs != NULL)
scsipi_done(xs);
if (restart) {
mpt_prt(mpt, "%s: IOC fatal error: restarting...", __func__);
mpt_restart(mpt, NULL);
}
}
/*
* We use the message context to find the request structure when
* we get the command completion interrupt from the IOC.
*/
mpt_req->MsgContext = htole32(req->index);
/* Which physical device to do the I/O on. */
mpt_req->TargetID = periph->periph_target;
mpt_req->LUN[1] = periph->periph_lun;
/* Set the direction of the transfer. */
if (xs->xs_control & XS_CTL_DATA_IN)
mpt_req->Control = MPI_SCSIIO_CONTROL_READ;
else if (xs->xs_control & XS_CTL_DATA_OUT)
mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE;
else
mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER;
/* Set the queue behavior. */
if (__predict_true((!mpt->is_scsi) ||
(mpt->mpt_tag_enable &
(1 << periph->periph_target)))) {
switch (XS_CTL_TAGTYPE(xs)) {
case XS_CTL_HEAD_TAG:
mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ;
break;
if (mpt->is_scsi) {
/*
* Always allow disconnect; we don't have a way to disable
* it right now, in any case.
*/
mpt->mpt_disc_enable |= (1 << xm->xm_target);
/*
* SCSI transport settings only make any sense for
* SCSI
*/
tmp = mpt->mpt_dev_page1[xm->xm_target];
/*
* Set the wide/narrow parameter for the target.
*/
if (xm->xm_mode & PERIPH_CAP_WIDE16)
tmp.RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE;
else
tmp.RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE;
/*
* Set the synchronous parameters for the target.
*
* XXX If we request sync transfers, we just go ahead and
* XXX request the maximum available. We need finer control
* XXX in order to implement Domain Validation.
*/
tmp.RequestedParameters &= ~(MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK |
MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK |
MPI_SCSIDEVPAGE1_RP_DT | MPI_SCSIDEVPAGE1_RP_QAS |
MPI_SCSIDEVPAGE1_RP_IU);
if (xm->xm_mode & PERIPH_CAP_SYNC) {
int factor, offset, np;
/*
* Make a note that we should perform an async callback at the
* end of the next successful command completion to report the
* negotiated transfer mode.
*/
mpt->mpt_report_xfer_mode |= (1 << xm->xm_target);
}
/*
* Tagged queueing is all controlled by us; there is no
* other setting to query.
*/
if (mpt->mpt_tag_enable & (1 << periph->periph_target))
xm.xm_mode |= PERIPH_CAP_TQING;
/*
* We're going to deliver the async event, so clear the marker.
*/
mpt->mpt_report_xfer_mode &= ~(1 << periph->periph_target);
switch (le32toh(msg->Event)) {
case MPI_EVENT_LOG_DATA:
{
int i;
/* Some error occurrerd that the Fusion wants logged. */
mpt_prt(mpt, "EvtLogData: IOCLogInfo: 0x%08x", msg->IOCLogInfo);
mpt_prt(mpt, "EvtLogData: Event Data:");
for (i = 0; i < msg->EventDataLength; i++) {
if ((i % 4) == 0)
printf("%s:\t", device_xname(mpt->sc_dev));
printf("0x%08x%c", msg->Data[i],
((i % 4) == 3) ? '\n' : ' ');
}
if ((i % 4) != 0)
printf("\n");
break;
}
case MPI_EVENT_UNIT_ATTENTION:
mpt_prt(mpt, "Unit Attn: Bus 0x%02x Target 0x%02x",
(msg->Data[0] >> 8) & 0xff, msg->Data[0] & 0xff);
break;
case MPI_EVENT_IOC_BUS_RESET:
/* We generated a bus reset. */
mpt_prt(mpt, "IOC Bus Reset Port %d",
(msg->Data[0] >> 8) & 0xff);
break;
case MPI_EVENT_EXT_BUS_RESET:
/* Someone else generated a bus reset. */
mpt_prt(mpt, "External Bus Reset");
/*
* These replies don't return EventData like the MPI
* spec says they do.
*/
/* XXX Send an async event? */
break;
case MPI_EVENT_RESCAN:
/*
* In general, this means a device has been added
* to the loop.
*/
mpt_prt(mpt, "Rescan Port %d", (msg->Data[0] >> 8) & 0xff);
/* XXX Send an async event? */
break;
case MPI_EVENT_LINK_STATUS_CHANGE:
mpt_prt(mpt, "Port %d: Link state %s",
(msg->Data[1] >> 8) & 0xff,
(msg->Data[0] & 0xff) == 0 ? "Failed" : "Active");
break;
case MPI_EVENT_LOOP_STATE_CHANGE:
switch ((msg->Data[0] >> 16) & 0xff) {
case 0x01:
mpt_prt(mpt,
"Port %d: FC Link Event: LIP(%02x,%02x) "
"(Loop Initialization)",
(msg->Data[1] >> 8) & 0xff,
(msg->Data[0] >> 8) & 0xff,
(msg->Data[0] ) & 0xff);
switch ((msg->Data[0] >> 8) & 0xff) {
case 0xf7:
if ((msg->Data[0] & 0xff) == 0xf7)
mpt_prt(mpt, "\tDevice needs AL_PA");
else
mpt_prt(mpt, "\tDevice %02x doesn't "
"like FC performance",
msg->Data[0] & 0xff);
break;
case 0xf8:
if ((msg->Data[0] & 0xff) == 0xf7)
mpt_prt(mpt, "\tDevice detected loop "
"failure before acquiring AL_PA");
else
mpt_prt(mpt, "\tDevice %02x detected "
"loop failure",
msg->Data[0] & 0xff);
break;
case ADAPTER_REQ_GROW_RESOURCES:
/* Not supported. */
return;
case ADAPTER_REQ_SET_XFER_MODE:
mpt_set_xfer_mode(mpt, (struct scsipi_xfer_mode *) arg);
return;
}
}
static void
mpt_minphys(struct buf *bp)
{
/*
* Subtract one from the SGL limit, since we need an extra one to handle
* an non-page-aligned transfer.
*/
#define MPT_MAX_XFER ((MPT_SGL_MAX - 1) * PAGE_SIZE)
if (bp->b_bcount > MPT_MAX_XFER)
bp->b_bcount = MPT_MAX_XFER;
minphys(bp);
}
static int
mpt_ioctl(struct scsipi_channel *chan, u_long cmd, void *arg,
int flag, struct proc *p)
{
mpt_softc_t *mpt;
int s;
mpt = device_private(chan->chan_adapter->adapt_dev);
switch (cmd) {
case SCBUSIORESET:
mpt_bus_reset(mpt);
s = splbio();
mpt_intr(mpt);
splx(s);
return(0);
default:
return (ENOTTY);
}
}