/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* from: Id: uvm_device.c,v 1.1.2.9 1998/02/06 05:11:47 chs Exp
*/
/*
* udv_attach
*
* get a VM object that is associated with a device. allocate a new
* one if needed.
*
* => caller must _not_ already be holding the lock on the uvm_object.
* => in fact, nothing should be locked so that we can sleep here.
*/
struct uvm_object *
udv_attach(dev_t device, vm_prot_t accessprot,
voff_t off, /* used only for access check */
vsize_t size /* used only for access check */)
{
struct uvm_device *udv, *lcv;
const struct cdevsw *cdev;
dev_mmap_t *mapfn;
/*
* Negative offsets on the object are not allowed, unless the
* device has affirmatively set D_NEGOFFSAFE.
*/
if ((cdev->d_flag & D_NEGOFFSAFE) == 0 && off != UVM_UNKNOWN_OFFSET) {
if (off < 0)
return NULL;
#if SIZE_MAX > UINT32_MAX /* XXX -Wtype-limits */
if (size > __type_max(voff_t))
return NULL;
#endif
if (off > __type_max(voff_t) - size)
return NULL;
}
/*
* Check that the specified range of the device allows the
* desired protection.
*
* XXX assumes VM_PROT_* == PROT_*
* XXX clobbers off and size, but nothing else here needs them.
*/
do {
KASSERTMSG((off % PAGE_SIZE) == 0, "off=%jd", (intmax_t)off);
KASSERTMSG(size >= PAGE_SIZE, "size=%"PRIuVSIZE, size);
if (cdev_mmap(device, off, accessprot) == -1)
return NULL;
KASSERT(off <= __type_max(voff_t) - PAGE_SIZE ||
(cdev->d_flag & D_NEGOFFSAFE) != 0);
if (__predict_false(off > __type_max(voff_t) - PAGE_SIZE)) {
/*
* off += PAGE_SIZE, with two's-complement
* wraparound, or
*
* off += PAGE_SIZE - 2*(VOFF_MAX + 1).
*/
CTASSERT(MIN_PAGE_SIZE >= 2);
off -= __type_max(voff_t);
off += PAGE_SIZE - 2;
off -= __type_max(voff_t);
} else {
off += PAGE_SIZE;
}
size -= PAGE_SIZE;
} while (size != 0);
/*
* keep looping until we get it
*/
for (;;) {
/*
* first, attempt to find it on the main list
*/
/*
* udv_reference
*
* add a reference to a VM object. Note that the reference count must
* already be one (the passed in reference) so there is no chance of the
* udv being released or locked out here.
*
* => caller must call with object unlocked.
*/
/*
* udv_fault: non-standard fault routine for device "pages"
*
* => rather than having a "get" function, we have a fault routine
* since we don't return vm_pages we need full control over the
* pmap_enter map in
* => all the usual fault data structured are locked by the caller
* (i.e. maps(read), amap (if any), uobj)
* => on return, we unlock all fault data structures
* => flags: PGO_ALLPAGES: get all of the pages
* PGO_LOCKED: fault data structures are locked
* XXX: currently PGO_LOCKED is always required ... consider removing
* it as a flag
* => NOTE: vaddr is the VA of pps[0] in ufi->entry, _NOT_ pps[centeridx]
*/
device = udv->u_device;
if (cdevsw_lookup(device) == NULL) {
/* XXX This should not happen */
uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj);
return EIO;
}
/*
* now we must determine the offset in udv to use and the VA to
* use for pmap_enter. note that we always use orig_map's pmap
* for pmap_enter (even if we have a submap). since virtual
* addresses in a submap must match the main map, this is ok.
*/
/* udv offset = (offset from start of entry) + entry's offset */
curr_offset = entry->offset + (vaddr - entry->start);
/* pmap va = vaddr (virtual address of pps[0]) */
curr_va = vaddr;
/*
* loop over the page range entering in as needed
*/
mdpgno = cdev_mmap(device, curr_offset, access_type);
if (mdpgno == -1) {
retval = EIO;
break;
}
paddr = pmap_phys_address(mdpgno);
mmapflags = pmap_mmap_flags(mdpgno);
mapprot = ufi->entry->protection;
UVMHIST_LOG(maphist,
" MAPPING: device: pm=%#jx, va=%#jx, pa=%#jx, at=%jd",
(uintptr_t)ufi->orig_map->pmap, curr_va, paddr, mapprot);
if (pmap_enter(ufi->orig_map->pmap, curr_va, paddr, mapprot,
PMAP_CANFAIL | mapprot | mmapflags) != 0) {
/*
* pmap_enter() didn't have the resource to
* enter this mapping. Unlock everything,
* wait for the pagedaemon to free up some
* pages, and then tell uvm_fault() to start
* the fault again.
*
* XXX Needs some rethinking for the PGO_ALLPAGES
* XXX case.
*/
pmap_update(ufi->orig_map->pmap); /* sync what we have so far */
uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap,
uobj);
return ENOMEM;
}
}