/*
* Copyright (c) 2009 Antti Kantee. All Rights Reserved.
*
* Development of this software was supported by the
* Finnish Cultural Foundation.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* Block device emulation. Presents a block device interface and
* uses rumpuser system calls to satisfy I/O requests.
*
* We provide fault injection. The driver can be made to fail
* I/O occasionally.
*/
static int backend_open(struct rblkdev *, const char *);
static int backend_close(struct rblkdev *);
/* fail every n out of BLKFAIL_MAX */
#define BLKFAIL_MAX 10000
static int blkfail;
static unsigned randstate;
static kmutex_t rumpblk_lock;
static int sectshift = DEV_BSHIFT;
static void
makedefaultlabel(struct disklabel *lp, off_t size, int part)
{
int i;
/*
* Unregister rumpblk. It's the callers responsibility to make
* sure it's no longer in use.
*/
int
rumpblk_deregister(const char *path)
{
struct rblkdev *rblk;
int i;
mutex_enter(&rumpblk_lock);
for (i = 0; i < RUMPBLK_SIZE; i++) {
if (minors[i].rblk_path&&strcmp(minors[i].rblk_path, path)==0) {
break;
}
}
mutex_exit(&rumpblk_lock);
/*
* Release all backend resources, to be called only when the rump
* kernel is being shut down.
* This routine does not do a full "fini" since we're going down anyway.
*/
void
rumpblk_fini(void)
{
int i;
for (i = 0; i < RUMPBLK_SIZE; i++) {
struct rblkdev *rblk;
rblk = &minors[i];
if (rblk->rblk_fd != -1)
backend_close(rblk);
}
}
static int
backend_open(struct rblkdev *rblk, const char *path)
{
int error, fd;
/* collect statistics */
ev_io_total.ev_count++;
if (async)
ev_io_async.ev_count++;
if (BUF_ISWRITE(bp)) {
ev_bwrite_total.ev_count += bp->b_bcount;
if (async)
ev_bwrite_async.ev_count += bp->b_bcount;
} else {
ev_bread_total.ev_count++;
}
/*
* b_blkno is always in terms of DEV_BSIZE, and since we need
* to translate to a byte offset for the host read, this
* calculation does not need sectshift.
*/
off = bp->b_blkno << DEV_BSHIFT;
/*
* Do bounds checking if we're working on a file. Otherwise
* invalid file systems might attempt to read beyond EOF. This
* is bad(tm) especially on mmapped images. This is essentially
* the kernel bounds_check() routines.
*/
if (off + bp->b_bcount > rblk->rblk_size) {
int64_t sz = rblk->rblk_size - off;
/*
* Simple random number generator. This is private so that we can
* very repeatedly control which blocks will fail.
*
* <mlelstv> pooka, rand()
* <mlelstv> [paste]
*/
static unsigned
gimmerand(void)
{
/*
* Block device with very simple fault injection. Fails every
* n out of BLKFAIL_MAX I/O with EIO. n is determined by the env
* variable RUMP_BLKFAIL.
*/
void
rumpblk_strategy_fail(struct buf *bp)
{