/* $NetBSD: ld_iop.c,v 1.42 2025/04/13 02:34:03 rin Exp $ */
/*-
* Copyright (c) 2000, 2001 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Andrew Doran.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* I2O front-end for ld(4) driver, supporting random block storage class
* devices. Currently, this doesn't handle anything more complex than
* fixed direct-access devices.
*/
/*
* Determine if the device has a private cache. If so, print the
* cache size. Even if the device doesn't appear to have a cache,
* we perform a flush at shutdown.
*/
rv = iop_field_get_all(iop, ia->ia_tid, I2O_PARAM_RBS_CACHE_CONTROL,
¶m, sizeof(param), NULL);
if (rv != 0)
goto bad;
/*
* Configure the DDM's timeout functions to time out all commands
* after 30 seconds.
*/
timeoutbase = htole32(LD_IOP_TIMEOUT * 1000);
rwvtimeoutbase = htole32(LD_IOP_TIMEOUT * 1000);
rwvtimeout = 0;
if ((sc->sc_flags & LD_IOP_CLAIMED) != 0)
iop_util_claim(iop, &sc->sc_ii, 1,
I2O_UTIL_CLAIM_PRIMARY_USER);
if (evreg) {
/*
* Mask off events, and wait up to 5 seconds for a reply.
* Note that some adapters won't reply to this (XXX We
* should check the event capabilities).
*/
mutex_spin_enter(&iop->sc_intrlock);
sc->sc_flags &= ~LD_IOP_NEW_EVTMASK;
mutex_spin_exit(&iop->sc_intrlock);
if ((rv = ldbegindetach(&sc->sc_ld, flags)) != 0)
return (rv);
/*
* Abort any requests queued with the IOP, but allow requests that
* are already in progress to complete.
*/
if ((sc->sc_ld.sc_flags & LDF_ENABLED) != 0)
iop_util_abort(iop, &sc->sc_ii, 0, 0,
I2O_UTIL_ABORT_WILD | I2O_UTIL_ABORT_CLEAN);
ldenddetach(&sc->sc_ld);
/* Un-claim the target, and un-register our initiators. */
if ((sc->sc_ld.sc_flags & LDF_ENABLED) != 0)
ld_iop_unconfig(sc, 1);
/*
* Write through the cache when performing synchronous writes. When
* performing a read, we don't request that the DDM cache the data,
* as there's little advantage to it.
*/
if (write) {
if ((bp->b_flags & B_ASYNC) == 0)
flags = I2O_RBS_BLOCK_WRITE_CACHE_WT;
else
flags = I2O_RBS_BLOCK_WRITE_CACHE_WB;
} else
flags = 0;
/*
* Fill the message frame. We can use the block_read structure for
* both reads and writes, as it's almost identical to the
* block_write structure.
*/
mf = (struct i2o_rbs_block_read *)mb;
mf->msgflags = I2O_MSGFLAGS(i2o_rbs_block_read);
mf->msgfunc = I2O_MSGFUNC(sc->sc_ii.ii_tid,
write ? I2O_RBS_BLOCK_WRITE : I2O_RBS_BLOCK_READ);
mf->msgictx = sc->sc_ii.ii_ictx;
mf->msgtctx = im->im_tctx;
mf->flags = flags | (1 << 16); /* flags & time multiplier */
mf->datasize = bp->b_bcount;
mf->lowoffset = (u_int32_t)ba;
mf->highoffset = (u_int32_t)(ba >> 32);
/* Map the data transfer and enqueue the command. */
rv = iop_msg_map_bio(iop, im, mb, bp->b_data, bp->b_bcount, write);
if (rv == 0) {
if ((rv = iop_post(iop, mb)) != 0) {
iop_msg_unmap(iop, im);
iop_msg_free(iop, im);
}
}
return (rv);
}
/*
* AMI controllers seem to lose the plot if you hand off lots of
* queued commands.
*/
if (le16toh(I2O_ORG_AMI) == iop->sc_status.orgid && mpi > 64)
mpi = 64;