/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* from: Id: uvm_pager.c,v 1.1.2.23 1998/02/02 20:38:06 chuck Exp
*/
/*
* uvm_pager.c: generic functions used to assist the pagers.
*/
#ifdef PMAP_DIRECT
/*
* uvm_pagermapdirect: map a single page via the pmap's direct segment
*
* this is an abuse of pmap_direct_process(), since the kva is being grabbed
* and no processing is taking place, but for now..
*/
static int
uvm_pagermapdirect(void *kva, size_t sz, void *cookie)
{
/*
* uvm_pagermapin: map pages into KVA (pager_map) for I/O that needs mappings
*
* we basically just map in a blank map entry to reserve the space in the
* map and then use pmap_enter() to put the mappings in by hand.
*/
vm_map_lock(pager_map);
uvm_unmap_remove(pager_map, kva, kva + size, &entries, 0);
mutex_enter(&pager_map_wanted_lock);
if (pager_map_wanted) {
pager_map_wanted = false;
wakeup(pager_map);
}
mutex_exit(&pager_map_wanted_lock);
vm_map_unlock(pager_map);
if (entries)
uvm_unmap_detach(entries, 0);
UVMHIST_LOG(maphist,"<- done",0,0,0,0);
}
void
uvm_aio_aiodone_pages(struct vm_page **pgs, int npages, bool write, int error)
{
struct uvm_object *uobj;
struct vm_page *pg;
krwlock_t *slock;
int pageout_done; /* number of PG_PAGEOUT pages processed */
int swslot __unused; /* used for VMSWAP */
int i;
bool swap;
UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
if (write && uobj != NULL) {
KASSERT(uvm_obj_page_writeback_p(pg));
uvm_obj_page_clear_writeback(pg);
}
/*
* process errors. for reads, just mark the page to be freed.
* for writes, if the error was ENOMEM, we assume this was
* a transient failure so we mark the page dirty so that
* we'll try to write it again later. for all other write
* errors, we assume the error is permanent, thus the data
* in the page is lost. bummer.
*/
if (error) {
int slot __unused; /* used for VMSWAP */
if (!write) {
pg->flags |= PG_RELEASED;
continue;
} else if (error == ENOMEM) {
if (pg->flags & PG_PAGEOUT) {
pg->flags &= ~PG_PAGEOUT;
pageout_done++;
}
uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY);
uvm_pagelock(pg);
uvm_pageactivate(pg);
uvm_pageunlock(pg);
slot = 0;
} else
slot = SWSLOT_BAD;
/* these pages are now only in swap. */
if (error != ENOMEM) {
atomic_add_int(&uvmexp.swpgonly, npages);
}
if (error) {
if (error != ENOMEM)
uvm_swap_markbad(swslot, npages);
else
uvm_swap_free(swslot, npages);
}
atomic_dec_uint(&uvmexp.pdpending);
#endif /* defined(VMSWAP) */
}
}
/*
* uvm_aio_aiodone: do iodone processing for async i/os.
* this should be called in thread context, not interrupt context.
*/
void
uvm_aio_aiodone(struct buf *bp)
{
const int npages = bp->b_bufsize >> PAGE_SHIFT;
struct vm_page *pgs[howmany(MAXPHYS, MIN_PAGE_SIZE)];
int i, error;
bool write;
UVMHIST_FUNC(__func__);
UVMHIST_CALLARGS(ubchist, "bp %#jx", (uintptr_t)bp, 0,0,0);