/* $NetBSD: iop.c,v 1.93 2023/09/07 20:07:03 ad Exp $ */
/*-
* Copyright (c) 2000, 2001, 2002, 2007, 2023 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Andrew Doran.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Support for I2O IOPs (intelligent I/O processors).
*/
/* Initialise the IOP's outbound FIFO. */
if (iop_ofifo_init(sc) != 0) {
aprint_error_dev(sc->sc_dev, "unable to init oubound FIFO\n");
goto bail_out3;
}
/*
* Defer further configuration until (a) interrupts are working and
* (b) we have enough information to build the system table.
*/
config_interrupts(sc->sc_dev, iop_config_interrupts);
/* Configure shutdown hook before we start any device activity. */
if (iop_sdh == NULL)
iop_sdh = shutdownhook_establish(iop_shutdown, NULL);
/* Ensure interrupts are enabled at the IOP. */
mask = iop_inl(sc, IOP_REG_INTR_MASK);
iop_outl(sc, IOP_REG_INTR_MASK, mask & ~IOP_INTR_OFIFO);
if (intrstr != NULL)
printf("%s: interrupting at %s\n", device_xname(sc->sc_dev),
intrstr);
/*
* Post the system table to the IOP and bring it to the OPERATIONAL
* state.
*/
if (iop_systab_set(sc) != 0) {
aprint_error_dev(sc->sc_dev, "unable to set system table\n");
mutex_exit(&sc->sc_conflock);
return;
}
if (iop_sys_enable(sc) != 0) {
aprint_error_dev(sc->sc_dev, "unable to enable system\n");
mutex_exit(&sc->sc_conflock);
return;
}
/*
* Set up an event handler for this IOP.
*/
sc->sc_eventii.ii_dv = self;
sc->sc_eventii.ii_intr = iop_intr_event;
sc->sc_eventii.ii_flags = II_NOTCTX | II_UTILITY;
sc->sc_eventii.ii_tid = I2O_TID_IOP;
iop_initiator_register(sc, &sc->sc_eventii);
/*
* Reconfigure: find new and removed devices.
*/
int
iop_reconfigure(struct iop_softc *sc, u_int chgind)
{
struct iop_msg *im;
struct i2o_hba_bus_scan mf;
struct i2o_lct_entry *le;
struct iop_initiator *ii, *nextii;
int rv, tid, i;
KASSERT(mutex_owned(&sc->sc_conflock));
/*
* If the reconfiguration request isn't the result of LCT change
* notification, then be more thorough: ask all bus ports to scan
* their busses. Wait up to 5 minutes for each bus port to complete
* the request.
*/
if (chgind == 0) {
if ((rv = iop_lct_get(sc)) != 0) {
DPRINTF(("iop_reconfigure: unable to read LCT\n"));
return (rv);
}
le = sc->sc_lct->entry;
for (i = 0; i < sc->sc_nlctent; i++, le++) {
if ((le16toh(le->classid) & 4095) !=
I2O_CLASS_BUS_ADAPTER_PORT)
continue;
tid = le16toh(le->localtid) & 4095;
/* Re-read the LCT and determine if it has changed. */
if ((rv = iop_lct_get(sc)) != 0) {
DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
return (rv);
}
DPRINTF(("%s: %d LCT entries\n", device_xname(sc->sc_dev), sc->sc_nlctent));
/* Allow 1 queued command per device while we're configuring. */
iop_adjqparam(sc, 1);
/*
* Match and attach child devices. We configure high-level devices
* first so that any claims will propagate throughout the LCT,
* hopefully masking off aliased devices as a result.
*
* Re-reading the LCT at this point is a little dangerous, but we'll
* trust the IOP (and the operator) to behave itself...
*/
iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
IC_CONFIGURE | IC_PRIORITY);
if ((rv = iop_lct_get(sc)) != 0) {
DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
}
iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
IC_CONFIGURE);
for (ii = LIST_FIRST(&sc->sc_iilist); ii != NULL; ii = nextii) {
nextii = LIST_NEXT(ii, ii_list);
/* Detach devices that were configured, but are now gone. */
for (i = 0; i < sc->sc_nlctent; i++)
if (ii->ii_tid == sc->sc_tidmap[i].it_tid)
break;
if (i == sc->sc_nlctent ||
(sc->sc_tidmap[i].it_flags & IT_CONFIGURED) == 0) {
config_detach(ii->ii_dv, DETACH_FORCE);
continue;
}
/*
* Tell initiators that existed before the re-configuration
* to re-configure.
*/
if (ii->ii_reconfig == NULL)
continue;
if ((rv = (*ii->ii_reconfig)(ii->ii_dv)) != 0)
aprint_error_dev(sc->sc_dev, "%s failed reconfigure (%d)\n",
device_xname(ii->ii_dv), rv);
}
/* Re-adjust queue parameters and return. */
if (sc->sc_nii != 0)
iop_adjqparam(sc, (sc->sc_maxib - sc->sc_nuii - IOP_MF_RESERVE)
/ sc->sc_nii);
return (0);
}
/*
* Configure I2O devices into the system.
*/
static void
iop_configure_devices(struct iop_softc *sc, int mask, int maskval)
{
struct iop_attach_args ia;
struct iop_initiator *ii;
const struct i2o_lct_entry *le;
device_t dv;
int i, j, nent;
u_int usertid;
int locs[IOPCF_NLOCS];
nent = sc->sc_nlctent;
for (i = 0, le = sc->sc_lct->entry; i < nent; i++, le++) {
sc->sc_tidmap[i].it_tid = le16toh(le->localtid) & 4095;
/* Ignore the device if it's in use. */
usertid = le32toh(le->usertid) & 4095;
if (usertid != I2O_TID_NONE && usertid != I2O_TID_HOST)
continue;
if (pnp != NULL) {
iop_devinfo(ia->ia_class, devinfo, sizeof(devinfo));
aprint_normal("%s at %s", devinfo, pnp);
}
aprint_normal(" tid %d", ia->ia_tid);
return (UNCONF);
}
/*
* Shut down all configured IOPs.
*/
static void
iop_shutdown(void *junk)
{
struct iop_softc *sc;
int i;
printf("shutting down iop devices...");
for (i = 0; i < iop_cd.cd_ndevs; i++) {
if ((sc = device_lookup_private(&iop_cd, i)) == NULL)
continue;
if ((sc->sc_flags & IOP_ONLINE) == 0)
continue;
if (le16toh(sc->sc_status.orgid) != I2O_ORG_AMI) {
/*
* Some AMI firmware revisions will go to sleep and
* never come back after this.
*/
iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_IOP_CLEAR,
IOP_ICTX, 0, 1000);
}
}
/* Wait. Some boards could still be flushing, stupidly enough. */
delay(5000*1000);
printf(" done\n");
}
/*
* Retrieve IOP status.
*/
int
iop_status_get(struct iop_softc *sc, int nosleep)
{
struct i2o_exec_status_get mf;
struct i2o_status *st;
paddr_t pa;
int rv, i;
pa = sc->sc_scr_dmamap->dm_segs[0].ds_addr;
st = (struct i2o_status *)sc->sc_scr;
/*
* The I2O spec says that there are two SGLs: one for the status
* word, and one for a list of discarded MFAs. It continues to say
* that if you don't want to get the list of MFAs, an IGNORE SGL is
* necessary; this isn't the case (and is in fact a bad thing).
*/
mb[sizeof(*mf) / sizeof(u_int32_t) + 0] = sizeof(*sw) |
I2O_SGL_SIMPLE | I2O_SGL_END_BUFFER | I2O_SGL_END;
mb[sizeof(*mf) / sizeof(u_int32_t) + 1] =
(u_int32_t)sc->sc_scr_dmamap->dm_segs[0].ds_addr;
mb[0] += 2 << 16;
/* Now safe to sync the reply map. */
sc->sc_curib = 0;
}
/* Populate the outbound FIFO. */
for (i = sc->sc_maxob, addr = sc->sc_rep_phys; i != 0; i--) {
iop_outl(sc, IOP_REG_OFIFO, (u_int32_t)addr);
addr += sc->sc_framesize;
}
return (0);
}
/*
* Read the specified number of bytes from the IOP's hardware resource table.
*/
static int
iop_hrt_get0(struct iop_softc *sc, struct i2o_hrt *hrt, int size)
{
struct iop_msg *im;
int rv;
struct i2o_exec_hrt_get *mf;
u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
/*
* Request the specified number of bytes from the IOP's logical
* configuration table. If a change indicator is specified, this
* is a verbatim notification request, so the caller is prepared
* to wait indefinitely.
*/
static int
iop_lct_get0(struct iop_softc *sc, struct i2o_lct *lct, int size,
u_int32_t chgind)
{
struct iop_msg *im;
struct i2o_exec_lct_notify *mf;
int rv;
u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
im = iop_msg_alloc(sc, IM_WAIT);
memset(lct, 0, size);
/*
* Request the specified parameter group from the target. If an initiator
* is specified (a) don't wait for the operation to complete, but instead
* let the initiator's interrupt handler deal with the reply and (b) place a
* pointer to the parameter group op in the wrapper's `im_dvcontext' field.
*/
int
iop_field_get_all(struct iop_softc *sc, int tid, int group, void *buf,
int size, struct iop_initiator *ii)
{
struct iop_msg *im;
struct i2o_util_params_op *mf;
int rv;
struct iop_pgop *pgop;
u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
/*
* Set a single field in a scalar parameter group.
*/
int
iop_field_set(struct iop_softc *sc, int tid, int group, void *buf,
int size, int field)
{
struct iop_msg *im;
struct i2o_util_params_op *mf;
struct iop_pgop *pgop;
int rv, totsize;
u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
/*
* Delete all rows in a tablular parameter group.
*/
int
iop_table_clear(struct iop_softc *sc, int tid, int group)
{
struct iop_msg *im;
struct i2o_util_params_op *mf;
struct iop_pgop pgop;
u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
int rv;
/*
* Add a single row to a tabular parameter group. The row can have only one
* field.
*/
int
iop_table_add_row(struct iop_softc *sc, int tid, int group, void *buf,
int size, int row)
{
struct iop_msg *im;
struct i2o_util_params_op *mf;
struct iop_pgop *pgop;
int rv, totsize;
u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
/*
* Execute a simple command (no parameters).
*/
int
iop_simple_cmd(struct iop_softc *sc, int tid, int function, int ictx,
int async, int timo)
{
struct iop_msg *im;
struct i2o_msg mf;
int rv, fl;
/*
* Post the system table to the IOP.
*/
static int
iop_systab_set(struct iop_softc *sc)
{
struct i2o_exec_sys_tab_set *mf;
struct iop_msg *im;
bus_space_handle_t bsh;
bus_addr_t boo;
u_int32_t mema[2], ioa[2];
int rv;
u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
/*
* Reset the IOP. Must be called with interrupts disabled.
*/
static int
iop_reset(struct iop_softc *sc)
{
u_int32_t mfa, *sw;
struct i2o_exec_iop_reset mf;
int rv;
paddr_t pa;
sw = (u_int32_t *)sc->sc_scr;
pa = sc->sc_scr_dmamap->dm_segs[0].ds_addr;
/*
* IOP is now in the INIT state. Wait no more than 10 seconds for
* the inbound queue to become responsive.
*/
POLL(10000, (mfa = iop_inl(sc, IOP_REG_IFIFO)) != IOP_MFA_EMPTY);
if (mfa == IOP_MFA_EMPTY) {
aprint_error_dev(sc->sc_dev, "reset failed\n");
return (EIO);
}
iop_release_mfa(sc, mfa);
return (0);
}
/*
* Register a new initiator. Must be called with the configuration lock
* held.
*/
void
iop_initiator_register(struct iop_softc *sc, struct iop_initiator *ii)
{
static int ictxgen;
/* 0 is reserved (by us) for system messages. */
ii->ii_ictx = ++ictxgen;
/*
* `Utility initiators' don't make it onto the per-IOP initiator list
* (which is used only for configuration), but do get one slot on
* the inbound queue.
*/
if ((ii->ii_flags & II_UTILITY) == 0) {
LIST_INSERT_HEAD(&sc->sc_iilist, ii, ii_list);
sc->sc_nii++;
} else
sc->sc_nuii++;
cv_init(&ii->ii_cv, "iopevt");
mutex_spin_enter(&sc->sc_intrlock);
LIST_INSERT_HEAD(IOP_ICTXHASH(ii->ii_ictx), ii, ii_hash);
mutex_spin_exit(&sc->sc_intrlock);
}
/*
* Unregister an initiator. Must be called with the configuration lock
* held.
*/
void
iop_initiator_unregister(struct iop_softc *sc, struct iop_initiator *ii)
{
#ifdef I2ODEBUG
if ((le32toh(rb->msgflags) & I2O_MSGFLAGS_64BIT) != 0)
panic("iop_handle_reply: 64-bit reply");
#endif
/*
* Find the initiator.
*/
ictx = le32toh(rb->msgictx);
if (ictx == IOP_ICTX)
ii = NULL;
else {
ii = LIST_FIRST(IOP_ICTXHASH(ictx));
for (; ii != NULL; ii = LIST_NEXT(ii, ii_hash))
if (ii->ii_ictx == ictx)
break;
if (ii == NULL) {
#ifdef I2ODEBUG
iop_reply_print(sc, rb);
#endif
aprint_error_dev(sc->sc_dev, "WARNING: bad ictx returned (%x)\n",
ictx);
return (-1);
}
}
/*
* If we received a transport failure notice, we've got to dig the
* transaction context (if any) out of the original message frame,
* and then release the original MFA back to the inbound FIFO.
*/
if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) {
status = I2O_STATUS_SUCCESS;
/*
* Map a data transfer. Write a scatter-gather list into the message frame.
*/
int
iop_msg_map(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
void *xferaddr, int xfersize, int out, struct proc *up)
{
bus_dmamap_t dm;
bus_dma_segment_t *ds;
struct iop_xfer *ix;
u_int rv, i, nsegs, flg, off, xn;
u_int32_t *p;
for (xn = 0, ix = im->im_xfer; xn < IOP_MAX_MSG_XFERS; xn++, ix++)
if (ix->ix_size == 0)
break;
#ifdef I2ODEBUG
if (xfersize == 0)
panic("iop_msg_map: null transfer");
if (xfersize > IOP_MAX_XFER)
panic("iop_msg_map: transfer too large");
if (xn == IOP_MAX_MSG_XFERS)
panic("iop_msg_map: too many xfers");
#endif
/*
* Only the first DMA map is static.
*/
if (xn != 0) {
rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
IOP_MAX_SEGS, IOP_MAX_XFER, 0,
BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ix->ix_map);
if (rv != 0)
return (rv);
}
/* Fix up the transfer record, and sync the map. */
ix->ix_flags = (out ? IX_OUT : IX_IN);
ix->ix_size = xfersize;
bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
out ? BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
/*
* If this is the first xfer we've mapped for this message, adjust
* the SGL offset field in the message header.
*/
if ((im->im_flags & IM_SGLOFFADJ) == 0) {
mb[0] += (mb[0] >> 12) & 0xf0;
im->im_flags |= IM_SGLOFFADJ;
}
mb[0] += (nsegs << 17);
return (0);
bad:
if (xn != 0)
bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
return (rv);
}
/*
* Map a block I/O data transfer (different in that there's only one per
* message maximum, and PAGE addressing may be used). Write a scatter
* gather list into the message frame.
*/
int
iop_msg_map_bio(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
void *xferaddr, int xfersize, int out)
{
bus_dma_segment_t *ds;
bus_dmamap_t dm;
struct iop_xfer *ix;
u_int rv, i, nsegs, off, slen, tlen, flg;
paddr_t saddr, eaddr;
u_int32_t *p;
#ifdef I2ODEBUG
if (xfersize == 0)
panic("iop_msg_map_bio: null transfer");
if (xfersize > IOP_MAX_XFER)
panic("iop_msg_map_bio: transfer too large");
if ((im->im_flags & IM_SGLOFFADJ) != 0)
panic("iop_msg_map_bio: SGLOFFADJ");
#endif
/*
* If the transfer is highly fragmented and won't fit using SIMPLE
* elements, use PAGE_LIST elements instead. SIMPLE elements are
* potentially more efficient, both for us and the IOP.
*/
if (dm->dm_nsegs > nsegs) {
nsegs = 1;
p = mb + off + 1;
/* XXX This should be done with a bus_space flag. */
for (i = dm->dm_nsegs, ds = dm->dm_segs; i > 0; i--, ds++) {
slen = ds->ds_len;
saddr = ds->ds_addr;
/* Fix up the transfer record, and sync the map. */
ix->ix_flags = (out ? IX_OUT : IX_IN);
ix->ix_size = xfersize;
bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
out ? BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
/*
* Adjust the SGL offset and total message size fields. We don't
* set IM_SGLOFFADJ, since it's used only for SIMPLE elements.
*/
mb[0] += ((off << 4) + (nsegs << 16));
return (0);
}
/*
* Unmap all data transfers associated with a message wrapper.
*/
void
iop_msg_unmap(struct iop_softc *sc, struct iop_msg *im)
{
struct iop_xfer *ix;
int i;
#ifdef I2ODEBUG
if (im->im_xfer[0].ix_size == 0)
panic("iop_msg_unmap: no transfers mapped");
#endif
/* Only the first DMA map is static. */
if (i != 0)
bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
if (++i >= IOP_MAX_MSG_XFERS)
break;
if ((++ix)->ix_size == 0)
break;
}
}
/*
* Post a message frame to the IOP's inbound queue.
*/
int
iop_post(struct iop_softc *sc, u_int32_t *mb)
{
u_int32_t mfa;
#ifdef I2ODEBUG
if ((mb[0] >> 16) > (sc->sc_framesize >> 2))
panic("iop_post: frame too large");
#endif
mutex_spin_enter(&sc->sc_intrlock);
/* Allocate a slot with the IOP. */
if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY)
if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY) {
mutex_spin_exit(&sc->sc_intrlock);
aprint_error_dev(sc->sc_dev, "mfa not forthcoming\n");
return (EAGAIN);
}
/* Post the MFA back to the IOP. */
iop_outl(sc, IOP_REG_IFIFO, mfa);
mutex_spin_exit(&sc->sc_intrlock);
return (0);
}
/*
* Post a message to the IOP and deal with completion.
*/
int
iop_msg_post(struct iop_softc *sc, struct iop_msg *im, void *xmb, int timo)
{
u_int32_t *mb;
int rv;
mb = xmb;
/* Terminate the scatter/gather list chain. */
if ((im->im_flags & IM_SGLOFFADJ) != 0)
mb[(mb[0] >> 16) - 2] |= I2O_SGL_END;
/*
* Translate an I2O ASCII field into a C string.
*/
void
iop_strvis(struct iop_softc *sc, const char *src, int slen, char *dst, int dlen)
{
int hc, lc, i, nit;
dlen--;
lc = 0;
hc = 0;
i = 0;
/*
* DPT use NUL as a space, whereas AMI use it as a terminator. The
* spec has nothing to say about it. Since AMI fields are usually
* filled with junk after the terminator, ...
*/
nit = (le16toh(sc->sc_status.orgid) != I2O_ORG_DPT);
while (slen-- != 0 && dlen-- != 0) {
if (nit && *src == '\0')
break;
else if (*src <= 0x20 || *src >= 0x7f) {
if (hc)
dst[i++] = ' ';
} else {
hc = 1;
dst[i++] = *src;
lc = i;
}
src++;
}
dst[lc] = '\0';
}
/*
* Retrieve the DEVICE_IDENTITY parameter group from the target and dump it.
*/
int
iop_print_ident(struct iop_softc *sc, int tid)
{
struct {
struct i2o_param_op_results pr;
struct i2o_param_read_results prr;
struct i2o_param_device_identity di;
} __packed p;
char buf[32];
int rv;
/*
* Claim or unclaim the specified TID.
*/
int
iop_util_claim(struct iop_softc *sc, struct iop_initiator *ii, int release,
int flags)
{
struct iop_msg *im;
struct i2o_util_claim mf;
int rv, func;
/* We can use the same structure, as they're identical. */
mf.msgflags = I2O_MSGFLAGS(i2o_util_claim);
mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, func);
mf.msgictx = ii->ii_ictx;
mf.msgtctx = im->im_tctx;
mf.flags = flags;
/*
* Perform an abort.
*/
int iop_util_abort(struct iop_softc *sc, struct iop_initiator *ii, int func,
int tctxabort, int flags)
{
struct iop_msg *im;
struct i2o_util_abort mf;
int rv;
/*
* Enable or disable reception of events for the specified device.
*/
int iop_util_eventreg(struct iop_softc *sc, struct iop_initiator *ii, int mask)
{
struct i2o_util_event_register mf;
i = (le32toh(im->im_rb->msgflags) >> 14) & ~3;
if (i > sc->sc_framesize)
i = sc->sc_framesize;
if (i > pt->pt_replylen)
i = pt->pt_replylen;
rv = copyout(im->im_rb, pt->pt_reply, i);
bad:
if (mapped != 0)
iop_msg_unmap(sc, im);
if (im != NULL)
iop_msg_free(sc, im);
if (mf != NULL)
free(mf, M_DEVBUF);
return (rv);
}