/*-
* Copyright (c) 2002, 2007 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Andrew Doran.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*-
* Copyright (c) 2001 Scott Long
* Copyright (c) 2001 Adaptec, Inc.
* Copyright (c) 2000 Michael Smith
* Copyright (c) 2000 BSDi
* Copyright (c) 2000 Niklas Hallqvist
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* Driver for the Adaptec 'FSA' family of PCI/SCSI RAID adapters.
*
* TODO:
*
* o Management interface.
* o Look again at some of the portability issues.
* o Handle various AIFs (e.g., notification that a container is going away).
*/
/*
* Adapter-space FIB queue manipulation.
*
* Note that the queue implementation here is a little funky; neither the PI or
* CI will ever be zero. This behaviour is a controller feature.
*/
static struct {
int size;
int notify;
} const aac_qinfo[] = {
{ AAC_HOST_NORM_CMD_ENTRIES, AAC_DB_COMMAND_NOT_FULL },
{ AAC_HOST_HIGH_CMD_ENTRIES, 0 },
{ AAC_ADAP_NORM_CMD_ENTRIES, AAC_DB_COMMAND_READY },
{ AAC_ADAP_HIGH_CMD_ENTRIES, 0 },
{ AAC_HOST_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_NOT_FULL },
{ AAC_HOST_HIGH_RESP_ENTRIES, 0 },
{ AAC_ADAP_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_READY },
{ AAC_ADAP_HIGH_RESP_ENTRIES, 0 }
};
#ifdef AAC_DEBUG
int aac_debug = AAC_DEBUG;
#endif
MALLOC_DEFINE(M_AACBUF, "aacbuf", "Buffers for aac(4)");
static int
aac_alloc_commands(struct aac_softc *sc)
{
struct aac_fibmap *fm;
struct aac_ccb *ac;
bus_addr_t fibpa;
int size, nsegs;
int i, error;
int state;
if (sc->sc_total_fibs + sc->sc_max_fibs_alloc > sc->sc_max_fibs)
return ENOMEM;
fm = malloc(sizeof(struct aac_fibmap), M_AACBUF, M_NOWAIT|M_ZERO);
if (fm == NULL)
return ENOMEM;
/*
* Print autoconfiguration message for a sub-device.
*/
static int
aac_print(void *aux, const char *pnp)
{
struct aac_attach_args *aaca;
aaca = aux;
if (pnp != NULL)
aprint_normal("block device at %s", pnp);
aprint_normal(" unit %d", aaca->aaca_unit);
return (UNCONF);
}
/*
* Look up a text description of a numeric error code and return a pointer to
* same.
*/
const char *
aac_describe_code(const struct aac_code_lookup *table, u_int32_t code)
{
int i;
for (i = 0; table[i].string != NULL; i++)
if (table[i].code == code)
return (table[i].string);
return (table[i + 1].string);
}
/*
* snprintb(3) format string for the adapter options.
*/
static const char *optfmt =
"\20\1SNAPSHOT\2CLUSTERS\3WCACHE\4DATA64\5HOSTTIME\6RAID50"
"\7WINDOW4GB"
"\10SCSIUPGD\11SOFTERR\12NORECOND\13SGMAP64\14ALARM\15NONDASD";
/* Save the kernel revision structure for later use. */
sc->sc_revision = info->KernelRevision;
}
/*
* Retrieve the firmware version numbers. Dell PERC2/QC cards with firmware
* version 1.x are not compatible with this driver.
*/
static int
aac_check_firmware(struct aac_softc *sc)
{
u_int32_t major, minor, opts, atusize = 0, status = 0;
u_int32_t calcsgs;
TAILQ_INIT(&sc->sc_fibmap_tqh);
sc->sc_ccbs = malloc(sizeof(struct aac_ccb) * sc->sc_max_fibs, M_AACBUF,
M_WAITOK | M_ZERO);
state++;
while (sc->sc_total_fibs < AAC_PREALLOCATE_FIBS(sc)) {
if (aac_alloc_commands(sc) != 0)
break;
}
if (sc->sc_total_fibs == 0)
goto bail_out;
/*
* Fill in the init structure. This tells the adapter about the
* physical location of various important shared data structures.
*/
ip = &sc->sc_common->ac_init;
ip->InitStructRevision = htole32(AAC_INIT_STRUCT_REVISION);
if (sc->sc_quirks & AAC_QUIRK_RAW_IO)
ip->InitStructRevision = htole32(AAC_INIT_STRUCT_REVISION_4);
ip->MiniPortRevision = htole32(AAC_INIT_STRUCT_MINIPORT_REVISION);
/*
* The adapter assumes that pages are 4K in size, except on some
* broken firmware versions that do the page->byte conversion twice,
* therefore 'assuming' that this value is in 16MB units (2^24).
* Round up since the granularity is so high.
*/
ip->HostPhysMemPages = ctob(physmem) / AAC_PAGE_SIZE;
if (sc->sc_quirks & AAC_QUIRK_BROKEN_MMAP) {
ip->HostPhysMemPages =
(ip->HostPhysMemPages + AAC_PAGE_SIZE) / AAC_PAGE_SIZE;
}
ip->HostElapsedSeconds = 0; /* reset later if invalid */
ip->InitFlags = 0;
if (sc->sc_quirks & AAC_QUIRK_NEW_COMM) {
ip->InitFlags = htole32(AAC_INITFLAGS_NEW_COMM_SUPPORTED);
aprint_normal_dev(sc->sc_dv, "New comm. interface enabled\n");
}
/*
* Initialise FIB queues. Note that it appears that the layout of
* the indexes and the segmentation of the entries is mandated by
* the adapter, which is only told about the base of the queue index
* fields.
*
* The initial values of the indices are assumed to inform the
* adapter of the sizes of the respective queues.
*
* The Linux driver uses a much more complex scheme whereby several
* header records are kept for each queue. We use a couple of
* generic list manipulation functions which 'know' the size of each
* list by virtue of a table.
*/
qoff = offsetof(struct aac_common, ac_qbuf) + AAC_QUEUE_ALIGN;
qoff &= ~(AAC_QUEUE_ALIGN - 1);
sc->sc_queues = (struct aac_queue_table *)((uintptr_t)sc->sc_common + qoff);
ip->CommHeaderAddress = htole32(sc->sc_common_seg.ds_addr +
((char *)sc->sc_queues - (char *)sc->sc_common));
memset(sc->sc_queues, 0, sizeof(struct aac_queue_table));
norm = htole32(AAC_HOST_NORM_CMD_ENTRIES);
high = htole32(AAC_HOST_HIGH_CMD_ENTRIES);
/*
* Loop over possible containers.
*/
hd = sc->sc_hdr;
for (i = 0; i < AAC_MAX_CONTAINERS; i++, hd++) {
/*
* Request information on this container.
*/
memset(&mi, 0, sizeof(mi));
/* use 64-bit LBA if enabled */
if (sc->sc_quirks & AAC_QUIRK_LBA_64BIT) {
mi.Command = htole32(VM_NameServe64);
ersize = sizeof(mir);
} else {
mi.Command = htole32(VM_NameServe);
ersize = sizeof(mir) - sizeof(mir.MntTable[0].CapacityHigh);
}
mi.MntType = htole32(FT_FILESYS);
mi.MntCount = htole32(i);
if (aac_sync_fib(sc, ContainerCommand, 0, &mi, sizeof(mi), &mir,
&rsize)) {
aprint_error_dev(sc->sc_dv, "error probing container %d\n", i);
continue;
}
if (rsize != ersize) {
aprint_error_dev(sc->sc_dv, "container info response wrong size "
"(%d should be %zu)\n", rsize, ersize);
continue;
}
/*
* Check container volume type for validity. Note that many
* of the possible types may never show up.
*/
if (le32toh(mir.Status) != ST_OK ||
le32toh(mir.MntTable[0].VolType) == CT_NONE)
continue;
for (i = 0; i < aac_cd.cd_ndevs; i++) {
if ((sc = device_lookup_private(&aac_cd, i)) == NULL)
continue;
if ((sc->sc_flags & AAC_ONLINE) == 0)
continue;
AAC_MASK_INTERRUPTS(sc);
/*
* Send a Container shutdown followed by a HostShutdown FIB
* to the controller to convince it that we don't want to
* talk to it anymore. We've been closed and all I/O
* completed already
*/
memset(&cc, 0, sizeof(cc));
cc.Command = htole32(VM_CloseAll);
cc.ContainerId = 0xffffffff;
if (aac_sync_fib(sc, ContainerCommand, 0, &cc, sizeof(cc),
NULL, NULL)) {
aprint_error_dev(sc->sc_dv, "unable to halt controller\n");
continue;
}
/*
* Note that issuing this command to the controller makes it
* shut down but also keeps it from coming back up without a
* reset of the PCI bus.
*/
if (aac_sync_fib(sc, FsaHostShutdown, AAC_FIBSTATE_SHUTDOWN,
&i, sizeof(i), NULL, NULL))
aprint_error_dev(sc->sc_dv, "unable to halt controller\n");
for (;;) {
index = AAC_GET_OUTB_QUEUE(sc);
if (index == 0xffffffff)
index = AAC_GET_OUTB_QUEUE(sc);
if (index == 0xffffffff)
break;
if (index & 2) {
if (index == 0xfffffffe) {
/* XXX This means that the controller wants
* more work. Ignore it for now.
*/
continue;
}
/* AIF */
index &= ~2;
fib = sc->sc_aif_fib;
for (i = 0; i < sizeof(struct aac_fib)/4; i++) {
((u_int32_t*)fib)[i] =
AAC_GETREG4(sc, index + i*4);
}
#ifdef notyet
aac_handle_aif(sc, &fib);
#endif
AAC_SET_OUTB_QUEUE(sc, index);
AAC_CLEAR_ISTATUS(sc, AAC_DB_RESPONSE_READY);
} else {
fast = index & 1;
ac = sc->sc_ccbs + (index >> 2);
fib = ac->ac_fib;
fm = ac->ac_fibmap;
if (fast) {
bus_dmamap_sync(sc->sc_dmat, fm->fm_fibmap,
(char *)fib - (char *)fm->fm_fibs,
sc->sc_max_fib_size,
BUS_DMASYNC_POSTWRITE |
BUS_DMASYNC_POSTREAD);
fib->Header.XferState |=
htole32(AAC_FIBSTATE_DONEADAP);
*((u_int32_t *)(fib->data)) =
htole32(AAC_ERROR_NORMAL);
}
ac->ac_flags |= AAC_CCB_COMPLETED;
if (ac->ac_intr != NULL)
(*ac->ac_intr)(ac);
else
wakeup(ac);
}
}
/*
* Try to submit more commands.
*/
if (! SIMPLEQ_EMPTY(&sc->sc_ccb_queue))
aac_ccb_enqueue(sc, NULL);
return 1;
}
/*
* Take an interrupt.
*/
int
aac_intr(void *cookie)
{
struct aac_softc *sc;
u_int16_t reason;
int claimed;
/*
* Controller wants to talk to the log. XXX Should we defer this?
*/
if ((reason & AAC_DB_PRINTF) != 0) {
if (sc->sc_common->ac_printf[0] == '\0')
sc->sc_common->ac_printf[0] = ' ';
printf("%s: WARNING: adapter logged message:\n",
device_xname(sc->sc_dv));
printf("%s: %.*s", device_xname(sc->sc_dv),
AAC_PRINTF_BUFSIZE, sc->sc_common->ac_printf);
sc->sc_common->ac_printf[0] = '\0';
AAC_QNOTIFY(sc, AAC_DB_PRINTF);
claimed = 1;
}
/*
* Controller has a message for us?
*/
if ((reason & AAC_DB_COMMAND_READY) != 0) {
aac_host_command(sc);
claimed = 1;
}
/*
* Controller has a response for us?
*/
if ((reason & AAC_DB_RESPONSE_READY) != 0) {
aac_host_response(sc);
claimed = 1;
}
/*
* Spurious interrupts that we don't use - reset the mask and clear
* the interrupts.
*/
if ((reason & (AAC_DB_SYNC_COMMAND | AAC_DB_COMMAND_NOT_FULL |
AAC_DB_RESPONSE_NOT_FULL)) != 0) {
AAC_UNMASK_INTERRUPTS(sc);
AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND |
AAC_DB_COMMAND_NOT_FULL | AAC_DB_RESPONSE_NOT_FULL);
claimed = 1;
}
return (claimed);
}
/*
* Handle notification of one or more FIBs coming from the controller.
*/
static void
aac_host_command(struct aac_softc *sc)
{
struct aac_fib *fib;
u_int32_t fib_size;
for (;;) {
if (aac_dequeue_fib(sc, AAC_HOST_NORM_CMD_QUEUE, &fib_size,
&fib))
break; /* nothing to do */
/*
* Since we didn't generate this command, it can't
* go through the normal process.
*/
aac_enqueue_response(sc,
AAC_ADAP_NORM_RESP_QUEUE, fib);
}
}
}
/*
* Handle notification of one or more FIBs completed by the controller
*/
static void
aac_host_response(struct aac_softc *sc)
{
struct aac_ccb *ac;
struct aac_fib *fib;
u_int32_t fib_size;
/*
* Look for completed FIBs on our queue.
*/
for (;;) {
if (aac_dequeue_fib(sc, AAC_HOST_NORM_RESP_QUEUE, &fib_size,
&fib))
break; /* nothing to do */
if ((fib->Header.SenderData & 0x80000000) == 0) {
/* Not valid; not sent by us. */
AAC_PRINT_FIB(sc, fib);
} else {
ac = (struct aac_ccb *)(sc->sc_ccbs +
(fib->Header.SenderData & 0x7fffffff));
fib->Header.SenderData = 0;
SIMPLEQ_INSERT_TAIL(&sc->sc_ccb_complete, ac, ac_chain);
}
}
/*
* Deal with any completed commands.
*/
while ((ac = SIMPLEQ_FIRST(&sc->sc_ccb_complete)) != NULL) {
SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_complete, ac_chain);
ac->ac_flags |= AAC_CCB_COMPLETED;
if (ac->ac_intr != NULL)
(*ac->ac_intr)(ac);
else
wakeup(ac);
}
/*
* Try to submit more commands.
*/
if (! SIMPLEQ_EMPTY(&sc->sc_ccb_queue))
aac_ccb_enqueue(sc, NULL);
}
/*
* Send a synchronous command to the controller and wait for a result.
*/
static int
aac_sync_command(struct aac_softc *sc, u_int32_t command, u_int32_t arg0,
u_int32_t arg1, u_int32_t arg2, u_int32_t arg3, u_int32_t *sp)
{
int i;
u_int32_t status;
int s;
/* Ensure the sync command doorbell flag is cleared. */
AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND);
/* ... then set it to signal the adapter. */
AAC_QNOTIFY(sc, AAC_DB_SYNC_COMMAND);
DELAY(AAC_SYNC_DELAY);
/* Spin waiting for the command to complete. */
for (i = 0; i < AAC_IMMEDIATE_TIMEOUT * 1000; i++) {
if (AAC_GET_ISTATUS(sc) & AAC_DB_SYNC_COMMAND)
break;
DELAY(1000);
}
if (i == AAC_IMMEDIATE_TIMEOUT * 1000) {
splx(s);
return (EIO);
}
/* Clear the completion flag. */
AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND);
/* Get the command status. */
status = AAC_GET_MAILBOXSTATUS(sc);
splx(s);
if (sp != NULL)
*sp = status;
/*
* Give the FIB to the controller, wait for a response.
*/
if (aac_sync_command(sc, AAC_MONKER_SYNCFIB, fibpa, 0, 0, 0, &status))
return (EIO);
if (status != 1) {
printf("%s: syncfib command %04x status %08x\n",
device_xname(sc->sc_dv), command, status);
}
#ifdef AAC_DEBUG
/*
* These are duplicated in aac_ccb_submit() to cover the case where
* an intermediate stage may have destroyed them. They're left
* initialised here for debugging purposes only.
*/
ac->ac_fib->Header.SenderFibAddress =
htole32(((u_int32_t) (ac - sc->sc_ccbs)) << 2);
ac->ac_fib->Header.ReceiverFibAddress = htole32(ac->ac_fibphys);
#endif
s = splbio();
SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_free, ac, ac_chain);
splx(s);
}
int
aac_ccb_map(struct aac_softc *sc, struct aac_ccb *ac)
{
int error;
for (timo *= 1000; timo != 0; timo--) {
if (sc->sc_quirks & AAC_QUIRK_NEW_COMM)
aac_new_intr(sc);
else
aac_intr(sc);
if ((ac->ac_flags & AAC_CCB_COMPLETED) != 0)
break;
DELAY(100);
}
splx(s);
return (timo == 0);
}
/*
* Atomically insert an entry into the nominated queue, returns 0 on success
* or EBUSY if the queue is full.
*
* XXX Note that it would be more efficient to defer notifying the
* controller in the case where we may be inserting several entries in rapid
* succession, but implementing this usefully is difficult.
*/
static int
aac_enqueue_fib(struct aac_softc *sc, int queue, struct aac_ccb *ac)
{
u_int32_t fib_size, fib_addr, pi, ci;
/* Get the producer/consumer indices. */
pi = le32toh(sc->sc_queues->qt_qindex[queue][AAC_PRODUCER_INDEX]);
ci = le32toh(sc->sc_queues->qt_qindex[queue][AAC_CONSUMER_INDEX]);
/* Wrap the queue? */
if (pi >= aac_qinfo[queue].size)
pi = 0;
/* Check for queue full. */
if ((pi + 1) == ci)
return (EAGAIN);
/* Notify the adapter if we know how. */
if (aac_qinfo[queue].notify != 0)
AAC_QNOTIFY(sc, aac_qinfo[queue].notify);
return (0);
}
/*
* Atomically remove one entry from the nominated queue, returns 0 on success
* or ENOENT if the queue is empty.
*/
static int
aac_dequeue_fib(struct aac_softc *sc, int queue, u_int32_t *fib_size,
struct aac_fib **fib_addr)
{
struct aac_fibmap *fm;
struct aac_ccb *ac;
u_int32_t pi, ci, idx;
int notify;
/* Get the producer/consumer indices. */
pi = le32toh(sc->sc_queues->qt_qindex[queue][AAC_PRODUCER_INDEX]);
ci = le32toh(sc->sc_queues->qt_qindex[queue][AAC_CONSUMER_INDEX]);
/* Check for queue empty. */
if (ci == pi)
return (ENOENT);
notify = 0;
if (ci == pi + 1)
notify = 1;
/* Wrap the queue? */
if (ci >= aac_qinfo[queue].size)
ci = 0;
/* Fetch the entry. */
*fib_size = le32toh((sc->sc_qentries[queue] + ci)->aq_fib_size);
switch (queue) {
case AAC_HOST_NORM_CMD_QUEUE:
case AAC_HOST_HIGH_CMD_QUEUE:
idx = le32toh((sc->sc_qentries[queue] + ci)->aq_fib_addr);
idx /= sizeof(struct aac_fib);
*fib_addr = &sc->sc_common->ac_fibs[idx];
break;
case AAC_HOST_NORM_RESP_QUEUE:
case AAC_HOST_HIGH_RESP_QUEUE:
idx = le32toh((sc->sc_qentries[queue] + ci)->aq_fib_addr);
ac = sc->sc_ccbs + (idx >> 2);
*fib_addr = ac->ac_fib;
if (idx & 0x01) {
fm = ac->ac_fibmap;
bus_dmamap_sync(sc->sc_dmat, fm->fm_fibmap,
(char *)ac->ac_fib - (char *)fm->fm_fibs,
sc->sc_max_fib_size,
BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
ac->ac_fib->Header.XferState |=
htole32(AAC_FIBSTATE_DONEADAP);
*((u_int32_t*)(ac->ac_fib->data)) =
htole32(AAC_ERROR_NORMAL);
}
break;
default:
panic("Invalid queue in aac_dequeue_fib()");
break;
}
/* Update consumer index. */
sc->sc_queues->qt_qindex[queue][AAC_CONSUMER_INDEX] = ci + 1;
/* If we have made the queue un-full, notify the adapter. */
if (notify && (aac_qinfo[queue].notify != 0))
AAC_QNOTIFY(sc, aac_qinfo[queue].notify);
return (0);
}
/*
* Put our response to an adapter-initiated fib (AIF) on the response queue.
*/
static int
aac_enqueue_response(struct aac_softc *sc, int queue, struct aac_fib *fib)
{
u_int32_t fib_size, fib_addr, pi, ci;
/* Get the producer/consumer indices. */
pi = le32toh(sc->sc_queues->qt_qindex[queue][AAC_PRODUCER_INDEX]);
ci = le32toh(sc->sc_queues->qt_qindex[queue][AAC_CONSUMER_INDEX]);
/* Wrap the queue? */
if (pi >= aac_qinfo[queue].size)
pi = 0;
/* Check for queue full. */
if ((pi + 1) == ci)
return (EAGAIN);