/*-
* Copyright (c) 2004, 2005, 2006, 2007 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Jason R. Thorpe.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Lock order:
*
* sc->sc_dk.dk_openlock
* => sc->sc_parent->dk_rawlock
* => sc->sc_parent->dk_openlock
* => dkwedges_lock
* => sc->sc_sizelock
*
* Locking notes:
*
* W dkwedges_lock
* D device reference
* O sc->sc_dk.dk_openlock
* P sc->sc_parent->dk_openlock
* R sc->sc_parent->dk_rawlock
* S sc->sc_sizelock
* I sc->sc_iolock
* $ stable after initialization
* 1 used only by a single thread
*
* x&y means both x and y must be held to write (with a write lock if
* one is rwlock), and either x or y must be held to read.
*/
struct dkwedge_softc {
device_t sc_dev; /* P&W: pointer to our pseudo-device */
/* sc_dev is also stable while device is referenced */
struct cfdata sc_cfdata; /* 1: our cfdata structure */
uint8_t sc_wname[128]; /* $: wedge name (Unicode, UTF-8) */
dkwedge_state_t sc_state; /* state this wedge is in */
/* stable while device is referenced */
/* used only in assertions when stable, and in dump in ddb */
struct disk *sc_parent; /* $: parent disk */
/* P: sc_parent->dk_openmask */
/* P: sc_parent->dk_nwedges */
/* P: sc_parent->dk_wedges */
/* R: sc_parent->dk_rawopens */
/* R: sc_parent->dk_rawvp (also stable while wedge is open) */
daddr_t sc_offset; /* $: LBA offset of wedge in parent */
krwlock_t sc_sizelock;
uint64_t sc_size; /* S: size of wedge in blocks */
char sc_ptype[32]; /* $: partition type */
dev_t sc_pdev; /* $: cached parent's dev_t */
/* P: link on parent's wedge list */
LIST_ENTRY(dkwedge_softc) sc_plink;
struct disk sc_dk; /* our own disk structure */
/* O&R: sc_dk.dk_bopenmask */
/* O&R: sc_dk.dk_copenmask */
/* O&R: sc_dk.dk_openmask */
struct bufq_state *sc_bufq; /* $: buffer queue */
struct callout sc_restart_ch; /* I: callout to restart I/O */
kmutex_t sc_iolock;
bool sc_iostop; /* I: don't schedule restart */
int sc_mode; /* O&R: parent open mode */
};
static int dkwedge_match(device_t, cfdata_t, void *);
static void dkwedge_attach(device_t, device_t, void *);
static int dkwedge_detach(device_t, int);
static void dk_set_geometry(struct dkwedge_softc *, struct disk *);
/*
* dkwedge_array_expand:
*
* Expand the dkwedges array.
*
* Releases and reacquires dkwedges_lock as a writer.
*/
static int
dkwedge_array_expand(void)
{
/*
* dkwedge_add: [exported function]
*
* Add a disk wedge based on the provided information.
*
* The incoming dkw_devname[] is ignored, instead being
* filled in and returned to the caller.
*/
int
dkwedge_add(struct dkwedge_info *dkw)
{
struct dkwedge_softc *sc, *lsc;
struct disk *pdk;
u_int unit;
int error;
dev_t pdev;
device_t dev __diagused;
error = dkwedge_compute_pdev(pdk->dk_name, &pdev, VBLK);
if (error)
return error;
if (dkw->dkw_offset < 0)
return EINVAL;
/*
* Check for an existing wedge at the same disk offset. Allow
* updating a wedge if the only change is the size, and the new
* size is larger than the old.
*/
sc = NULL;
mutex_enter(&pdk->dk_openlock);
LIST_FOREACH(lsc, &pdk->dk_wedges, sc_plink) {
if (lsc->sc_offset != dkw->dkw_offset)
continue;
if (strcmp(lsc->sc_wname, dkw->dkw_wname) != 0)
break;
if (strcmp(lsc->sc_ptype, dkw->dkw_ptype) != 0)
break;
if (dkwedge_size(lsc) > dkw->dkw_size)
break;
if (lsc->sc_dev == NULL)
break;
/*
* Wedge will be added; increment the wedge count for the parent.
* Only allow this to happen if RAW_PART is the only thing open.
*/
mutex_enter(&pdk->dk_openlock);
if (pdk->dk_openmask & ~(1 << RAW_PART))
error = EBUSY;
else {
/* Check for wedge overlap. */
LIST_FOREACH(lsc, &pdk->dk_wedges, sc_plink) {
/* XXX arithmetic overflow */
uint64_t size = dkwedge_size(sc);
uint64_t lsize = dkwedge_size(lsc);
daddr_t lastblk = sc->sc_offset + size - 1;
daddr_t llastblk = lsc->sc_offset + lsize - 1;
if (sc->sc_offset >= lsc->sc_offset &&
sc->sc_offset <= llastblk) {
/* Overlaps the tail of the existing wedge. */
break;
}
if (lastblk >= lsc->sc_offset &&
lastblk <= llastblk) {
/* Overlaps the head of the existing wedge. */
break;
}
}
if (lsc != NULL) {
if (sc->sc_offset == lsc->sc_offset &&
dkwedge_size(sc) == dkwedge_size(lsc) &&
strcmp(sc->sc_wname, lsc->sc_wname) == 0)
error = EEXIST;
else
error = EINVAL;
} else {
pdk->dk_nwedges++;
LIST_INSERT_HEAD(&pdk->dk_wedges, sc, sc_plink);
}
}
mutex_exit(&pdk->dk_openlock);
if (error) {
mutex_destroy(&sc->sc_iolock);
bufq_free(sc->sc_bufq);
dkwedge_size_fini(sc);
free(sc, M_DKWEDGE);
return error;
}
/* Fill in our cfdata for the pseudo-device glue. */
sc->sc_cfdata.cf_name = dk_cd.cd_name;
sc->sc_cfdata.cf_atname = dk_ca.ca_name;
/* sc->sc_cfdata.cf_unit set below */
sc->sc_cfdata.cf_fstate = FSTATE_NOTFOUND; /* use chosen cf_unit */
/* Insert the larval wedge into the array. */
rw_enter(&dkwedges_lock, RW_WRITER);
for (error = 0;;) {
struct dkwedge_softc **scpp;
/*
* Check for a duplicate wname while searching for
* a slot.
*/
for (scpp = NULL, unit = 0; unit < ndkwedges; unit++) {
if (dkwedges[unit] == NULL) {
if (scpp == NULL) {
scpp = &dkwedges[unit];
sc->sc_cfdata.cf_unit = unit;
}
} else {
/* XXX Unicode. */
if (strcmp(dkwedges[unit]->sc_wname,
sc->sc_wname) == 0) {
error = EEXIST;
break;
}
}
}
if (error)
break;
KASSERT(unit == ndkwedges);
if (scpp == NULL) {
error = dkwedge_array_expand();
if (error)
break;
} else {
KASSERT(scpp == &dkwedges[sc->sc_cfdata.cf_unit]);
*scpp = sc;
break;
}
}
rw_exit(&dkwedges_lock);
if (error) {
mutex_enter(&pdk->dk_openlock);
pdk->dk_nwedges--;
LIST_REMOVE(sc, sc_plink);
mutex_exit(&pdk->dk_openlock);
/*
* Now that we know the unit #, attach a pseudo-device for
* this wedge instance. This will provide us with the
* device_t necessary for glue to other parts of the system.
*
* This should never fail, unless we're almost totally out of
* memory.
*/
if ((dev = config_attach_pseudo_acquire(&sc->sc_cfdata, sc)) == NULL) {
aprint_error("%s%u: unable to attach pseudo-device\n",
sc->sc_cfdata.cf_name, sc->sc_cfdata.cf_unit);
/* Return the devname to the caller. */
strlcpy(dkw->dkw_devname, device_xname(sc->sc_dev),
sizeof(dkw->dkw_devname));
device_release(sc->sc_dev);
return 0;
}
/*
* dkwedge_find_acquire:
*
* Lookup a disk wedge based on the provided information.
* NOTE: We look up the wedge based on the wedge devname,
* not wname.
*
* Return NULL if the wedge is not found, otherwise return
* the wedge's softc. Assign the wedge's unit number to unitp
* if unitp is not NULL. The wedge's sc_dev is referenced and
* must be released by device_release or equivalent.
*/
static struct dkwedge_softc *
dkwedge_find_acquire(struct dkwedge_info *dkw, u_int *unitp)
{
struct dkwedge_softc *sc = NULL;
u_int unit;
/*
* dkwedge_del: [exported function]
*
* Delete a disk wedge based on the provided information.
* NOTE: We look up the wedge based on the wedge devname,
* not wname.
*/
int
dkwedge_del(struct dkwedge_info *dkw)
{
return dkwedge_del1(dkw, 0);
}
int
dkwedge_del1(struct dkwedge_info *dkw, int flags)
{
struct dkwedge_softc *sc = NULL;
/* Locate the wedge major numbers. */
bmaj = bdevsw_lookup_major(&dk_bdevsw);
cmaj = cdevsw_lookup_major(&dk_cdevsw);
/* Nuke the vnodes for any open instances. */
vdevgone(bmaj, unit, unit, VBLK);
vdevgone(cmaj, unit, unit, VCHR);
/*
* At this point, all block device opens have been closed,
* synchronously flushing any buffered writes; and all
* character device I/O operations have completed
* synchronously, and character device opens have been closed.
*
* So there can be no more opens or queued buffers by now.
*/
KASSERT(sc->sc_dk.dk_openmask == 0);
KASSERT(bufq_peek(sc->sc_bufq) == NULL);
bufq_drain(sc->sc_bufq);
/*
* dkwedge_delall: [exported function]
*
* Forcibly delete all of the wedges on the specified disk. Used
* when a disk is being detached.
*/
void
dkwedge_delall(struct disk *pdk)
{
dkwedge_delall1(pdk, /*idleonly*/false);
}
/*
* dkwedge_delidle: [exported function]
*
* Delete all of the wedges on the specified disk if idle. Used
* by ioctl(DIOCRMWEDGES).
*/
void
dkwedge_delidle(struct disk *pdk)
{
dkwedge_delall1(pdk, /*idleonly*/true);
}
static void
dkwedge_delall1(struct disk *pdk, bool idleonly)
{
struct dkwedge_softc *sc;
int flags;
flags = DETACH_QUIET;
if (!idleonly)
flags |= DETACH_FORCE;
for (;;) {
mutex_enter(&pdk->dk_rawlock); /* for sc->sc_dk.dk_openmask */
mutex_enter(&pdk->dk_openlock);
LIST_FOREACH(sc, &pdk->dk_wedges, sc_plink) {
/*
* Wedge is not yet created. This is a race --
* it may as well have been added just after we
* deleted all the wedges, so pretend it's not
* here yet.
*/
if (sc->sc_dev == NULL)
continue;
if (!idleonly || sc->sc_dk.dk_openmask == 0) {
device_acquire(sc->sc_dev);
break;
}
}
if (sc == NULL) {
KASSERT(idleonly || pdk->dk_nwedges == 0);
mutex_exit(&pdk->dk_openlock);
mutex_exit(&pdk->dk_rawlock);
return;
}
mutex_exit(&pdk->dk_openlock);
mutex_exit(&pdk->dk_rawlock);
(void)config_detach_release(sc->sc_dev, flags);
}
}
/*
* dkwedge_list: [exported function]
*
* List all of the wedges on a particular disk.
*/
int
dkwedge_list(struct disk *pdk, struct dkwedge_list *dkwl, struct lwp *l)
{
struct uio uio;
struct iovec iov;
struct dkwedge_softc *sc;
struct dkwedge_info dkw;
int error = 0;
/*
* Acquire a device reference so this wedge doesn't go
* away before our next iteration in LIST_FOREACH, and
* then release the lock for uiomove.
*/
device_acquire(sc->sc_dev);
mutex_exit(&pdk->dk_openlock);
error = uiomove(&dkw, sizeof(dkw), &uio);
mutex_enter(&pdk->dk_openlock);
device_release(sc->sc_dev);
if (error)
break;
void
dkwedge_print_wnames(void)
{
struct dkwedge_softc *sc;
int i;
rw_enter(&dkwedges_lock, RW_READER);
for (i = 0; i < ndkwedges; i++) {
if ((sc = dkwedges[i]) == NULL || sc->sc_dev == NULL)
continue;
printf(" wedge:%s", sc->sc_wname);
}
rw_exit(&dkwedges_lock);
}
/*
* We need a dummy object to stuff into the dkwedge discovery method link
* set to ensure that there is always at least one object in the set.
*/
static struct dkwedge_discovery_method dummy_discovery_method;
__link_set_add_bss(dkwedge_methods, dummy_discovery_method);
if (config_cfdriver_attach(&dk_cd) != 0)
panic("dkwedge: unable to attach cfdriver");
if (config_cfattach_attach(dk_cd.cd_name, &dk_ca) != 0)
panic("dkwedge: unable to attach cfattach");
#ifdef DKWEDGE_AUTODISCOVER
int dkwedge_autodiscover = 1;
#else
int dkwedge_autodiscover = 0;
#endif
/*
* dkwedge_discover: [exported function]
*
* Discover the wedges on a newly attached disk.
* Remove all unused wedges on the disk first.
*/
void
dkwedge_discover(struct disk *pdk)
{
struct dkwedge_discovery_method *ddm;
struct vnode *vp;
int error;
dev_t pdev;
/*
* Require people playing with wedges to enable this explicitly.
*/
if (dkwedge_autodiscover == 0)
return;
/*
* Use the character device for scanning, the block device
* is busy if there are already wedges attached.
*/
error = dkwedge_compute_pdev(pdk->dk_name, &pdev, VCHR);
if (error) {
aprint_error("%s: unable to compute pdev, error = %d\n",
pdk->dk_name, error);
goto out;
}
error = cdevvp(pdev, &vp);
if (error) {
aprint_error("%s: unable to find vnode for pdev, error = %d\n",
pdk->dk_name, error);
goto out;
}
error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
if (error) {
aprint_error("%s: unable to lock vnode for pdev, error = %d\n",
pdk->dk_name, error);
vrele(vp);
goto out;
}
error = VOP_OPEN(vp, FREAD | FSILENT, NOCRED);
if (error) {
if (error != ENXIO)
aprint_error("%s: unable to open device, error = %d\n",
pdk->dk_name, error);
vput(vp);
goto out;
}
VOP_UNLOCK(vp);
/*
* For each supported partition map type, look to see if
* this map type exists. If so, parse it and add the
* corresponding wedges.
*/
LIST_FOREACH(ddm, &dkwedge_discovery_methods, ddm_list) {
error = (*ddm->ddm_discover)(pdk, vp);
if (error == 0) {
/* Successfully created wedges; we're done. */
break;
}
}
error = vn_close(vp, FREAD, NOCRED);
if (error) {
aprint_error("%s: unable to close device, error = %d\n",
pdk->dk_name, error);
/* We'll just assume the vnode has been cleaned up. */
}
out:
rw_exit(&dkwedge_discovery_methods_lock);
}
/*
* dkwedge_read:
*
* Read some data from the specified disk, used for
* partition discovery.
*/
int
dkwedge_read(struct disk *pdk, struct vnode *vp, daddr_t blkno,
void *tbuf, size_t len)
{
buf_t *bp;
int error;
bool isopen;
dev_t bdev;
struct vnode *bdvp;
/*
* The kernel cannot read from a character device vnode
* as physio() only handles user memory.
*
* If the block device has already been opened by a wedge
* use that vnode and temporarily bump the open counter.
*
* Otherwise try to open the block device.
*/
/*
* dkwedge_lookup:
*
* Look up a dkwedge_softc based on the provided dev_t.
*
* Caller must guarantee the wedge is referenced.
*/
static struct dkwedge_softc *
dkwedge_lookup(dev_t dev)
{
/*
* We go through a complicated little dance to only open the parent
* vnode once per wedge, no matter how many times the wedge is
* opened. The reason? We see one dkopen() per open call, but
* only dkclose() on the last close.
*/
mutex_enter(&sc->sc_dk.dk_openlock);
mutex_enter(&sc->sc_parent->dk_rawlock);
if (sc->sc_dk.dk_openmask == 0) {
error = dkfirstopen(sc, flags);
if (error)
goto out;
} else if (flags & ~sc->sc_mode & FWRITE) {
/*
* The parent is already open, but the previous attempt
* to open it read/write failed and fell back to
* read-only. In that case, we assume the medium is
* read-only and fail to open the wedge read/write.
*/
error = EROFS;
goto out;
}
KASSERT(sc->sc_mode != 0);
KASSERTMSG(sc->sc_mode & FREAD, "%s: sc_mode=%x",
device_xname(sc->sc_dev), sc->sc_mode);
KASSERTMSG((flags & FWRITE) ? (sc->sc_mode & FWRITE) : 1,
"%s: flags=%x sc_mode=%x",
device_xname(sc->sc_dev), flags, sc->sc_mode);
if (fmt == S_IFCHR)
sc->sc_dk.dk_copenmask |= 1;
else
sc->sc_dk.dk_bopenmask |= 1;
sc->sc_dk.dk_openmask =
sc->sc_dk.dk_copenmask | sc->sc_dk.dk_bopenmask;
if (sc->sc_parent->dk_rawopens == 0) {
KASSERT(sc->sc_parent->dk_rawvp == NULL);
/*
* Try open read-write. If this fails for EROFS
* and wedge is read-only, retry to open read-only.
*/
mode = FREAD | FWRITE;
error = dk_open_parent(sc->sc_pdev, mode, &vp);
if (error == EROFS && (flags & FWRITE) == 0) {
mode &= ~FWRITE;
error = dk_open_parent(sc->sc_pdev, mode, &vp);
}
if (error)
return error;
KASSERT(vp != NULL);
sc->sc_parent->dk_rawvp = vp;
} else {
/*
* Retrieve mode from an already opened wedge.
*
* At this point, dk_rawopens is bounded by the number
* of dkwedge devices in the system, which is limited
* by autoconf device numbering to INT_MAX. Since
* dk_rawopens is unsigned, this can't overflow.
*/
KASSERT(sc->sc_parent->dk_rawopens < UINT_MAX);
KASSERT(sc->sc_parent->dk_rawvp != NULL);
mode = 0;
mutex_enter(&sc->sc_parent->dk_openlock);
LIST_FOREACH(nsc, &sc->sc_parent->dk_wedges, sc_plink) {
if (nsc == sc || nsc->sc_dk.dk_openmask == 0)
continue;
mode = nsc->sc_mode;
break;
}
mutex_exit(&sc->sc_parent->dk_openlock);
}
sc->sc_mode = mode;
sc->sc_parent->dk_rawopens++;
if (--sc->sc_parent->dk_rawopens == 0) {
struct vnode *const vp = sc->sc_parent->dk_rawvp;
const int mode = sc->sc_mode;
sc->sc_parent->dk_rawvp = NULL;
sc->sc_mode = 0;
dk_close_parent(vp, mode);
}
}
/*
* dkclose: [devsw entry point]
*
* Close a wedge.
*/
static int
dkclose(dev_t dev, int flags, int fmt, struct lwp *l)
{
struct dkwedge_softc *sc = dkwedge_lookup(dev);
/*
* dkclose can be called even if dkopen didn't succeed, so we
* have to handle the same possibility that the wedge may not
* exist.
*/
if (sc == NULL)
return ENXIO;
KASSERT(sc->sc_dev != NULL);
KASSERT(sc->sc_state != DKW_STATE_LARVAL);
KASSERT(sc->sc_state != DKW_STATE_DEAD);
/*
* Disk I/O is expected to complete or fail within a reasonable
* timeframe -- it's storage, not communication. Further, the
* character and block device interface guarantees that prior
* reads and writes have completed or failed by the time close
* returns -- we are not to cancel them here. If the parent
* device's hardware is gone, the parent driver can make them
* fail. Nothing for dk(4) itself to do.
*/
/* Make sure it's in-range. */
if (bounds_check_with_mediasize(bp, DEV_BSIZE, p_size) <= 0)
goto done;
/* Translate it to the parent's raw LBA. */
bp->b_rawblkno = bp->b_blkno + p_offset;
/* Place it in the queue and start I/O on the unit. */
mutex_enter(&sc->sc_iolock);
disk_wait(&sc->sc_dk);
bufq_put(sc->sc_bufq, bp);
mutex_exit(&sc->sc_iolock);
dkstart(sc);
return;
done:
bp->b_resid = bp->b_bcount;
biodone(bp);
}
/*
* dkstart:
*
* Start I/O that has been enqueued on the wedge.
*/
static void
dkstart(struct dkwedge_softc *sc)
{
struct vnode *vp;
struct buf *bp, *nbp;
mutex_enter(&sc->sc_iolock);
/* Do as much work as has been enqueued. */
while ((bp = bufq_peek(sc->sc_bufq)) != NULL) {
if (sc->sc_iostop) {
(void) bufq_get(sc->sc_bufq);
mutex_exit(&sc->sc_iolock);
bp->b_error = ENXIO;
bp->b_resid = bp->b_bcount;
biodone(bp);
mutex_enter(&sc->sc_iolock);
continue;
}
/* fetch an I/O buf with sc_iolock dropped */
mutex_exit(&sc->sc_iolock);
nbp = getiobuf(sc->sc_parent->dk_rawvp, false);
mutex_enter(&sc->sc_iolock);
if (nbp == NULL) {
/*
* No resources to run this request; leave the
* buffer queued up, and schedule a timer to
* restart the queue in 1/2 a second.
*/
if (!sc->sc_iostop)
callout_schedule(&sc->sc_restart_ch, hz/2);
break;
}
/*
* fetch buf, this can fail if another thread
* has already processed the queue, it can also
* return a completely different buf.
*/
bp = bufq_get(sc->sc_bufq);
if (bp == NULL) {
mutex_exit(&sc->sc_iolock);
putiobuf(nbp);
mutex_enter(&sc->sc_iolock);
continue;
}
/* Instrumentation. */
disk_busy(&sc->sc_dk);
/* release lock for VOP_STRATEGY */
mutex_exit(&sc->sc_iolock);
/* Kick the queue in case there is more work we can do. */
dkstart(sc);
}
/*
* dkrestart:
*
* Restart the work queue after it was stalled due to
* a resource shortage. Invoked via a callout.
*/
static void
dkrestart(void *v)
{
struct dkwedge_softc *sc = v;
/*
* dksize: [devsw entry point]
*
* Query the size of a wedge for the purpose of performing a dump
* or for swapping to.
*/
static int
dksize(dev_t dev)
{
/*
* Don't bother taking a reference because this is only used
* either (a) while the device is open (for swap), or (b) while
* any multiprocessing is quiescent (for crash dumps).
*/
struct dkwedge_softc *sc = dkwedge_lookup(dev);
uint64_t p_size;
int rv = -1;
if (sc == NULL)
return -1;
if (sc->sc_state != DKW_STATE_RUNNING)
return -1;
/* Our content type is static, no need to open the device. */
p_size = dkwedge_size(sc) << sc->sc_parent->dk_blkshift;
if (strcmp(sc->sc_ptype, DKW_PTYPE_SWAP) == 0) {
/* Saturate if we are larger than INT_MAX. */
if (p_size > INT_MAX)
rv = INT_MAX;
else
rv = (int)p_size;
}
return rv;
}
/*
* dkdump: [devsw entry point]
*
* Perform a crash dump to a wedge.
*/
static int
dkdump(dev_t dev, daddr_t blkno, void *va, size_t size)
{
/*
* Don't bother taking a reference because this is only used
* while any multiprocessing is quiescent.
*/
struct dkwedge_softc *sc = dkwedge_lookup(dev);
const struct bdevsw *bdev;
uint64_t p_size, p_offset;
if (sc == NULL)
return ENXIO;
if (sc->sc_state != DKW_STATE_RUNNING)
return ENXIO;
/* Our content type is static, no need to open the device. */
const char *
dkwedge_get_parent_name(dev_t dev)
{
/* XXX: perhaps do this in lookup? */
int bmaj = bdevsw_lookup_major(&dk_bdevsw);
int cmaj = cdevsw_lookup_major(&dk_cdevsw);
if (major(dev) != bmaj && major(dev) != cmaj)
return NULL;