Index: ufs/ufs/ufs_readwrite.c
===================================================================
RCS file: /cvsroot/src/sys/ufs/ufs/ufs_readwrite.c,v
retrieving revision 1.61
diff -u -p -r1.61 ufs_readwrite.c
--- ufs/ufs/ufs_readwrite.c     26 Feb 2005 05:40:42 -0000      1.61
+++ ufs/ufs/ufs_readwrite.c     27 Feb 2005 10:08:41 -0000
@@ -112,6 +112,9 @@ READ(void *v)
       usepc = vp->v_type == VREG;
#endif /* !LFS_READWRITE */
       if (usepc) {
+               if (uio->uio_offset + uio->uio_resid <= vp->v_size) {
+                       error = uvm_loanobj(&vp->v_uobj, uio);
+               }
               while (uio->uio_resid > 0) {
                       bytelen = MIN(ip->i_size - uio->uio_offset,
                           uio->uio_resid);
Index: uvm/uvm_amap_i.h
===================================================================
RCS file: /cvsroot/src/sys/uvm/uvm_amap_i.h,v
retrieving revision 1.20
diff -u -p -r1.20 uvm_amap_i.h
--- uvm/uvm_amap_i.h    20 Dec 2002 18:21:13 -0000      1.20
+++ uvm/uvm_amap_i.h    27 Feb 2005 10:08:41 -0000
@@ -59,6 +59,7 @@ amap_lookup(aref, offset)
       int slot;
       struct vm_amap *amap = aref->ar_amap;
       UVMHIST_FUNC("amap_lookup"); UVMHIST_CALLED(maphist);
+       LOCK_ASSERT(simple_lock_held(&amap->am_l));

       AMAP_B2SLOT(slot, offset);
       slot += aref->ar_pageoff;
@@ -87,6 +88,7 @@ amap_lookups(aref, offset, anons, npages
       int slot;
       struct vm_amap *amap = aref->ar_amap;
       UVMHIST_FUNC("amap_lookups"); UVMHIST_CALLED(maphist);
+       LOCK_ASSERT(simple_lock_held(&amap->am_l));

       AMAP_B2SLOT(slot, offset);
       slot += aref->ar_pageoff;
@@ -120,6 +122,7 @@ amap_add(aref, offset, anon, replace)
       int slot;
       struct vm_amap *amap = aref->ar_amap;
       UVMHIST_FUNC("amap_add"); UVMHIST_CALLED(maphist);
+       LOCK_ASSERT(simple_lock_held(&amap->am_l));

       AMAP_B2SLOT(slot, offset);
       slot += aref->ar_pageoff;
@@ -166,6 +169,7 @@ amap_unadd(aref, offset)
       int ptr, slot;
       struct vm_amap *amap = aref->ar_amap;
       UVMHIST_FUNC("amap_unadd"); UVMHIST_CALLED(maphist);
+       LOCK_ASSERT(simple_lock_held(&amap->am_l));

       AMAP_B2SLOT(slot, offset);
       slot += aref->ar_pageoff;
Index: uvm/uvm_anon.c
===================================================================
RCS file: /cvsroot/src/sys/uvm/uvm_anon.c,v
retrieving revision 1.31
diff -u -p -r1.31 uvm_anon.c
--- uvm/uvm_anon.c      1 Sep 2004 11:53:38 -0000       1.31
+++ uvm/uvm_anon.c      27 Feb 2005 10:08:42 -0000
@@ -201,17 +201,6 @@ uvm_anfree(anon)

       pg = anon->u.an_page;

-       /*
-        * if there is a resident page and it is loaned, then anon may not
-        * own it.   call out to uvm_anon_lockpage() to ensure the real owner
-        * of the page has been identified and locked.
-        */
-
-       if (pg && pg->loan_count) {
-               simple_lock(&anon->an_lock);
-               pg = uvm_anon_lockloanpg(anon);
-               simple_unlock(&anon->an_lock);
-       }

       /*
        * if we have a resident page, we must dispose of it before freeing
@@ -221,6 +210,18 @@ uvm_anfree(anon)
       if (pg) {

               /*
+                * if there is a resident page and it is loaned, then anon
+                * may not own it.  call out to uvm_anon_lockpage() to ensure
+                * the real owner of the page has been identified and locked.
+                */
+
+               if (pg->loan_count) {
+                       simple_lock(&anon->an_lock);
+                       pg = uvm_anon_lockloanpg(anon);
+                       simple_unlock(&anon->an_lock);
+               }
+
+               /*
                * if the page is owned by a uobject (now locked), then we must
                * kill the loan on the page rather than free it.
                */
@@ -230,6 +231,7 @@ uvm_anfree(anon)
                       KASSERT(pg->loan_count > 0);
                       pg->loan_count--;
                       pg->uanon = NULL;
+                       anon->u.an_page = NULL;
                       uvm_unlock_pageq();
                       simple_unlock(&pg->uobject->vmobjlock);
               } else {
@@ -259,8 +261,7 @@ uvm_anfree(anon)
                       UVMHIST_LOG(maphist, "anon 0x%x, page 0x%x: "
                                   "freed now!", anon, pg, 0, 0);
               }
-       }
-       if (pg == NULL && anon->an_swslot > 0) {
+       } else if (anon->an_swslot > 0) {
               /* this page is no longer only in swap. */
               simple_lock(&uvm.swap_data_lock);
               KASSERT(uvmexp.swpgonly > 0);
Index: uvm/uvm_fault.c
===================================================================
RCS file: /cvsroot/src/sys/uvm/uvm_fault.c,v
retrieving revision 1.90
diff -u -p -r1.90 uvm_fault.c
--- uvm/uvm_fault.c     7 Feb 2005 11:57:38 -0000       1.90
+++ uvm/uvm_fault.c     27 Feb 2005 10:08:44 -0000
@@ -1210,7 +1210,7 @@ ReFault:
               uvm_unlock_pageq();
               UVM_PAGE_OWN(pg, NULL);
               amap_add(&ufi.entry->aref, ufi.orig_rvaddr - ufi.entry->start,
-                   anon, 1);
+                   anon, TRUE);

               /* deref: can not drop to zero here by defn! */
               oanon->an_ref--;
@@ -1623,7 +1623,7 @@ Case2:
                           anon, pg, 0, 0);
               }
               amap_add(&ufi.entry->aref, ufi.orig_rvaddr - ufi.entry->start,
-                   anon, 0);
+                   anon, FALSE);
       }

       /*
Index: uvm/uvm_loan.c
===================================================================
RCS file: /cvsroot/src/sys/uvm/uvm_loan.c,v
retrieving revision 1.52
diff -u -p -r1.52 uvm_loan.c
--- uvm/uvm_loan.c      23 Nov 2004 04:51:56 -0000      1.52
+++ uvm/uvm_loan.c      27 Feb 2005 10:08:45 -0000
@@ -50,6 +50,8 @@ __KERNEL_RCSID(0, "$NetBSD: uvm_loan.c,v

#include <uvm/uvm.h>

+boolean_t doloanobj = TRUE;
+
/*
 * "loaned" pages are pages which are (read-only, copy-on-write) loaned
 * from the VM system to other parts of the kernel.   this allows page
@@ -116,6 +118,10 @@ static int uvm_loanzero(struct uvm_fault
static void    uvm_unloananon(struct vm_anon **, int);
static void    uvm_unloanpage(struct vm_page **, int);
static int     uvm_loanpage(struct vm_page **, int);
+static int     uvm_loanobj_read(struct vm_map *, vaddr_t, size_t,
+                   struct uvm_object *, off_t);
+static int     uvm_loanobj_write(struct vm_map *, vaddr_t, size_t,
+                   struct uvm_object *, off_t);


/*
@@ -1209,3 +1215,540 @@ uvm_loanbreak(struct vm_page *uobjpage)

       return pg;
}
+
+int
+uvm_loanobj(struct uvm_object *uobj, struct uio *uio)
+{
+       struct iovec *iov;
+       struct vm_map *map;
+       vaddr_t va;
+       size_t len;
+       int i, error = 0;
+
+       if (!doloanobj) {
+               return ENOSYS;
+       }
+
+       /*
+        * This interface is only for loaning to user space.
+        * Loans to the kernel should be done with the kernel-specific
+        * loaning interfaces.
+        */
+
+       if (uio->uio_segflg != UIO_USERSPACE) {
+               return ENOSYS;
+       }
+
+       /*
+        * Check that the uio is aligned properly for loaning.
+        */
+
+       if (uio->uio_offset & PAGE_MASK || uio->uio_resid & PAGE_MASK) {
+               return EINVAL;
+       }
+       for (i = 0; i < uio->uio_iovcnt; i++) {
+               if (((vaddr_t)uio->uio_iov[i].iov_base & PAGE_MASK) ||
+                   (uio->uio_iov[i].iov_len & PAGE_MASK)) {
+                       return EINVAL;
+               }
+       }
+
+       /*
+        * Process the uio.
+        */
+
+       map = &uio->uio_procp->p_vmspace->vm_map;
+       while (uio->uio_resid) {
+               iov = uio->uio_iov;
+               while (iov->iov_len) {
+                       va = (vaddr_t)iov->iov_base;
+                       len = MIN(iov->iov_len, MAXPHYS);
+                       if (uio->uio_rw == UIO_READ)
+                               error = uvm_loanobj_read(map, va, len, uobj,
+                                                        uio->uio_offset);
+                       else
+                               error = uvm_loanobj_write(map, va, len, uobj,
+                                                         uio->uio_offset);
+                       if (error) {
+                               goto out;
+                       }
+                       iov->iov_base = (caddr_t)iov->iov_base + len;
+                       iov->iov_len -= len;
+                       uio->uio_offset += len;
+                       uio->uio_resid -= len;
+               }
+               uio->uio_iov++;
+               uio->uio_iovcnt--;
+       }
+
+out:
+       pmap_update(map->pmap);
+       return error;
+}
+
+/*
+ * Loan object pages to a user process.
+ */
+
+static int
+uvm_loanobj_read(struct vm_map *map, vaddr_t va, size_t len,
+    struct uvm_object *uobj, off_t off)
+{
+       int npages = len >> PAGE_SHIFT;
+       struct vm_page *pgs[npages], *pg;
+       struct vm_amap *amap;
+       struct vm_anon *anon, *oanons[npages], *nanons[npages];
+       struct vm_map_entry *entry;
+       unsigned int maptime;
+       int error, i, refs, aoff, pgoff;
+       UVMHIST_FUNC("uvm_vnp_loanread"); UVMHIST_CALLED(ubchist);
+
+       UVMHIST_LOG(ubchist, "map %p va 0x%x npages %d", map, va, npages, 0);
+       UVMHIST_LOG(ubchist, "uobj %p off 0x%x", uobj, off, 0, 0);
+       vm_map_lock_read(map);
+
+retry:
+       if (!uvm_map_lookup_entry(map, va, &entry)) {
+               vm_map_unlock_read(map);
+               UVMHIST_LOG(ubchist, "no entry", 0,0,0,0);
+               return EINVAL;
+       }
+       if ((entry->protection & VM_PROT_WRITE) == 0) {
+               vm_map_unlock_read(map);
+               UVMHIST_LOG(ubchist, "no write access", 0,0,0,0);
+               return EACCES;
+       }
+       if (VM_MAPENT_ISWIRED(entry)) {
+               vm_map_unlock_read(map);
+               UVMHIST_LOG(ubchist, "entry is wired", 0,0,0,0);
+               return EBUSY;
+       }
+       if (!UVM_ET_ISCOPYONWRITE(entry)) {
+               vm_map_unlock_read(map);
+               UVMHIST_LOG(ubchist, "entry is not COW", 0,0,0,0);
+               return EINVAL;
+       }
+       if (entry->end < va + len) {
+               vm_map_unlock_read(map);
+               UVMHIST_LOG(ubchist, "chunk longer than entry", 0,0,0,0);
+               return EINVAL;
+       }
+
+       /*
+        * None of the trivial reasons why we might not be able to do the loan
+        * are true.  If we need to COW the amap, try to do it now.
+        */
+
+       amap = entry->aref.ar_amap;
+       KASSERT(amap || UVM_ET_ISNEEDSCOPY(entry));
+       if (amap == NULL) {
+               amap_copy(map, entry, M_WAITOK, TRUE, va, va + len);
+               if (UVM_ET_ISNEEDSCOPY(entry)) {
+                       vm_map_unlock_read(map);
+                       UVMHIST_LOG(ubchist, "amap COW failed", 0,0,0,0);
+                       return ENOMEM;
+               }
+               UVMHIST_LOG(ubchist, "amap has been COWed", 0,0,0,0);
+       }
+       aoff = va - entry->start;
+       maptime = map->timestamp;
+       vm_map_unlock_read(map);
+
+       /*
+        * The map is all ready for us, now fetch the amap pages.
+        * If the map changes out from under us, start over.
+        * If any anons are missing from the amap, fail.
+        */
+
+       memset(pgs, 0, sizeof(pgs));
+       error = (*uobj->pgops->pgo_get)(uobj, off, pgs, &npages, 0,
+           VM_PROT_READ, 0, PGO_SYNCIO);
+       if (error) {
+               UVMHIST_LOG(ubchist, "getpages -> %d", error,0,0,0);
+               return error;
+       }
+       vm_map_lock_read(map);
+       if (map->timestamp != maptime) {
+               simple_lock(&uobj->vmobjlock);
+               uvm_lock_pageq();
+               for (i = 0; i < npages; i++) {
+                       uvm_pageactivate(pgs[i]);
+               }
+               uvm_page_unbusy(pgs, npages);
+               uvm_unlock_pageq();
+               simple_unlock(&uobj->vmobjlock);
+               goto retry;
+       }
+
+       /*
+        * Prepare each object page for loaning.  Allocate an anon for each page
+        * that doesn't already have one.  If any of the pages are wired,
+        * undo everything and fail.
+        */
+
+       memset(nanons, 0, sizeof(nanons));
+       simple_lock(&uobj->vmobjlock);
+       for (i = 0; i < npages; i++) {
+               pg = pgs[i];
+               if (pg->wire_count) {
+                       error = EBUSY;
+                       goto fail;
+               }
+               pmap_page_protect(pg, VM_PROT_READ);
+               uvm_pageactivate(pg);
+               if (pg->uanon) {
+                       anon = pg->uanon;
+                       simple_lock(&anon->an_lock);
+                       anon->an_ref++;
+               } else {
+                       pg->loan_count++;
+                       anon = uvm_analloc();
+                       if (anon == NULL) {
+                               error = ENOMEM;
+                               goto fail;
+                       }
+                       anon->u.an_page = pg;
+                       pg->uanon = anon;
+               }
+               simple_unlock(&anon->an_lock);
+               nanons[i] = anon;
+       }
+
+       /*
+        * Look for any existing anons in the amap.  These will be replaced
+        * by the new loan anons we just set up.  If any of these anon pages
+        * are wired then we can't replace them.
+        */
+
+       memset(oanons, 0, sizeof(oanons));
+       amap_lock(amap);
+       uvm_lock_pageq();
+       for (i = 0; i < npages; i++) {
+               UVMHIST_LOG(ubchist, "pgs[%d] %p", i, pgs[i], 0,0);
+               anon = amap_lookup(&entry->aref, aoff + (i << PAGE_SHIFT));
+               oanons[i] = anon;
+               if (anon && anon->u.an_page && anon->u.an_page->wire_count) {
+                       amap_unlock(amap);
+                       uvm_unlock_pageq();
+                       error = EBUSY;
+                       goto fail;
+               }
+       }
+
+       /*
+        * Everything is good to go.  Remove any existing anons and insert
+        * the loaned object anons.
+        */
+
+       for (i = 0; i < npages; i++) {
+               pgoff = i << PAGE_SHIFT;
+               if (oanons[i]) {
+                       amap_unadd(&entry->aref, aoff + pgoff);
+               }
+               amap_add(&entry->aref, aoff + pgoff, nanons[i], FALSE);
+       }
+       uvm_unlock_pageq();
+       amap_unlock(amap);
+       simple_unlock(&uobj->vmobjlock);
+       vm_map_unlock_read(map);
+
+       /*
+        * The map has all the new information now.
+        * Enter the pages into the pmap to save likely faults later.
+        */
+
+       for (i = 0; i < npages; i++) {
+               (void) pmap_enter(map->pmap, va + (i << PAGE_SHIFT),
+                   VM_PAGE_TO_PHYS(pgs[i]), VM_PROT_READ, PMAP_CANFAIL);
+       }
+
+       /*
+        * At this point we're done with the pages, unlock them now.
+        */
+
+       simple_lock(&uobj->vmobjlock);
+       uvm_lock_pageq();
+       uvm_page_unbusy(pgs, npages);
+       uvm_unlock_pageq();
+       simple_unlock(&uobj->vmobjlock);
+
+       /*
+        * Finally, free any anons which we replaced in the map.
+        */
+
+       for (i = 0; i < npages; i++) {
+               anon = oanons[i];
+               if (!anon) {
+                       continue;
+               }
+               simple_lock(&anon->an_lock);
+               refs = --anon->an_ref;
+               simple_unlock(&anon->an_lock);
+               if (refs == 0) {
+                       uvm_anfree(anon);
+               }
+       }
+       return error;
+
+       /*
+        * We couldn't complete the loan for some reason.
+        * Undo any work we did so far.
+        */
+
+fail:
+       for (i = 0; i < npages; i++) {
+               anon = nanons[i];
+               if (anon) {
+                       simple_lock(&anon->an_lock);
+                       refs = --anon->an_ref;
+                       simple_unlock(&anon->an_lock);
+                       if (refs == 0) {
+                               uvm_anfree(anon);
+                       }
+               } else {
+                       uvm_pageactivate(pgs[i]);
+               }
+       }
+       uvm_lock_pageq();
+       uvm_page_unbusy(pgs, npages);
+       uvm_unlock_pageq();
+       simple_unlock(&uobj->vmobjlock);
+       vm_map_unlock_read(map);
+       return error;
+}
+
+/*
+ * Reverse-loaning for write operations.
+ * The user pages will be atomically transferred to the object and
+ * loaned back to the user process.
+ */
+
+static int
+uvm_loanobj_write(struct vm_map *map, vaddr_t va, size_t len,
+    struct uvm_object *uobj, off_t off)
+{
+       int npages = len >> PAGE_SHIFT;
+       struct vm_page *pgs[npages], *pg;
+       struct vm_amap *amap;
+       struct vm_anon *anon, *oanons[npages], *nanons[npages];
+       struct vm_map_entry *entry;
+       unsigned int maptime;
+       int error, i, refs, aoff, pgoff;
+       UVMHIST_FUNC("uvm_vnp_loanread"); UVMHIST_CALLED(ubchist);
+
+       /* not yet. */
+       return ENOSYS;
+
+       UVMHIST_LOG(ubchist, "map %p va 0x%x npages %d", map, va, npages, 0);
+       UVMHIST_LOG(ubchist, "uobj %p off 0x%x", uobj, off, 0, 0);
+       vm_map_lock_read(map);
+
+retry:
+       if (!uvm_map_lookup_entry(map, va, &entry)) {
+               vm_map_unlock_read(map);
+               UVMHIST_LOG(ubchist, "no entry", 0,0,0,0);
+               return EINVAL;
+       }
+       if ((entry->protection & VM_PROT_WRITE) == 0) {
+               vm_map_unlock_read(map);
+               UVMHIST_LOG(ubchist, "no write access", 0,0,0,0);
+               return EACCES;
+       }
+       if (VM_MAPENT_ISWIRED(entry)) {
+               vm_map_unlock_read(map);
+               UVMHIST_LOG(ubchist, "entry is wired", 0,0,0,0);
+               return EBUSY;
+       }
+       if (!UVM_ET_ISCOPYONWRITE(entry)) {
+               vm_map_unlock_read(map);
+               UVMHIST_LOG(ubchist, "entry is not COW", 0,0,0,0);
+               return EINVAL;
+       }
+       if (entry->end < va + len) {
+               vm_map_unlock_read(map);
+               UVMHIST_LOG(ubchist, "chunk longer than entry", 0,0,0,0);
+               return EINVAL;
+       }
+
+       /*
+        * None of the trivial reasons why we might not be able to do the loan
+        * are true.  If we need to COW the amap, try to do it now.
+        */
+
+       amap = entry->aref.ar_amap;
+       KASSERT(amap || UVM_ET_ISNEEDSCOPY(entry));
+       if (amap == NULL) {
+               amap_copy(map, entry, M_WAITOK, TRUE, va, va + len);
+               if (UVM_ET_ISNEEDSCOPY(entry)) {
+                       vm_map_unlock_read(map);
+                       UVMHIST_LOG(ubchist, "amap COW failed", 0,0,0,0);
+                       return ENOMEM;
+               }
+               UVMHIST_LOG(ubchist, "amap has been COWed", 0,0,0,0);
+       }
+       aoff = va - entry->start;
+       maptime = map->timestamp;
+       vm_map_unlock_read(map);
+
+       /*
+        * The map is all ready for us, now fetch the object pages.
+        * If the map changes out from under us, start over.
+        */
+
+       simple_lock(&uobj->vmobjlock);
+       memset(pgs, 0, sizeof(pgs));
+       error = (*uobj->pgops->pgo_get)(uobj, off, pgs, &npages, 0,
+           VM_PROT_READ, 0, PGO_SYNCIO);
+       if (error) {
+               UVMHIST_LOG(ubchist, "getpages -> %d", error,0,0,0);
+               return error;
+       }
+       vm_map_lock_read(map);
+       if (map->timestamp != maptime) {
+               simple_lock(&uobj->vmobjlock);
+               uvm_lock_pageq();
+               for (i = 0; i < npages; i++) {
+                       uvm_pageactivate(pgs[i]);
+               }
+               uvm_page_unbusy(pgs, npages);
+               uvm_unlock_pageq();
+               simple_unlock(&uobj->vmobjlock);
+               goto retry;
+       }
+
+       /*
+        * Prepare each object page for loaning.  Allocate an anon for each page
+        * that doesn't already have one.  If any of the pages are wired,
+        * undo everything and fail.
+        */
+
+       memset(nanons, 0, sizeof(nanons));
+       simple_lock(&uobj->vmobjlock);
+       for (i = 0; i < npages; i++) {
+               pg = pgs[i];
+               if (pg->wire_count) {
+                       error = EBUSY;
+                       goto fail;
+               }
+               pmap_page_protect(pg, VM_PROT_READ);
+               uvm_pageactivate(pg);
+               if (pg->uanon) {
+                       anon = pg->uanon;
+                       simple_lock(&anon->an_lock);
+                       anon->an_ref++;
+               } else {
+                       pg->loan_count++;
+                       anon = uvm_analloc();
+                       if (anon == NULL) {
+                               error = ENOMEM;
+                               goto fail;
+                       }
+                       anon->u.an_page = pg;
+                       pg->uanon = anon;
+               }
+               simple_unlock(&anon->an_lock);
+               nanons[i] = anon;
+       }
+
+       /*
+        * Look for any existing anons in the amap.  These will be replaced
+        * by the new loan anons we just set up.  If any of these anon pages
+        * are wired then we can't replace them.
+        */
+
+       memset(oanons, 0, sizeof(oanons));
+       amap_lock(amap);
+       uvm_lock_pageq();
+       for (i = 0; i < npages; i++) {
+               UVMHIST_LOG(ubchist, "pgs[%d] %p", i, pgs[i], 0,0);
+               anon = amap_lookup(&entry->aref, aoff + (i << PAGE_SHIFT));
+               oanons[i] = anon;
+               if (anon && anon->u.an_page && anon->u.an_page->wire_count) {
+                       amap_unlock(amap);
+                       uvm_unlock_pageq();
+                       error = EBUSY;
+                       goto fail;
+               }
+       }
+
+       /*
+        * Everything is good to go.  Remove any existing anons and insert
+        * the loaned object anons.
+        */
+
+       for (i = 0; i < npages; i++) {
+               pgoff = i << PAGE_SHIFT;
+               if (oanons[i]) {
+                       amap_unadd(&entry->aref, aoff + pgoff);
+               }
+               amap_add(&entry->aref, aoff + pgoff, nanons[i], FALSE);
+       }
+       uvm_unlock_pageq();
+       amap_unlock(amap);
+       simple_unlock(&uobj->vmobjlock);
+       vm_map_unlock_read(map);
+
+       /*
+        * The map has all the new information now.
+        * Enter the pages into the pmap to save likely faults later.
+        */
+
+       for (i = 0; i < npages; i++) {
+               (void) pmap_enter(map->pmap, va + (i << PAGE_SHIFT),
+                   VM_PAGE_TO_PHYS(pgs[i]), VM_PROT_READ, PMAP_CANFAIL);
+       }
+
+       /*
+        * At this point we're done with the pages, unlock them now.
+        */
+
+       simple_lock(&uobj->vmobjlock);
+       uvm_lock_pageq();
+       uvm_page_unbusy(pgs, npages);
+       uvm_unlock_pageq();
+       simple_unlock(&uobj->vmobjlock);
+
+       /*
+        * Finally, free any anons which we replaced in the map.
+        */
+
+       for (i = 0; i < npages; i++) {
+               anon = oanons[i];
+               if (!anon) {
+                       continue;
+               }
+               simple_lock(&anon->an_lock);
+               refs = --anon->an_ref;
+               simple_unlock(&anon->an_lock);
+               if (refs == 0) {
+                       uvm_anfree(anon);
+               }
+       }
+       return error;
+
+       /*
+        * We couldn't complete the loan for some reason.
+        * Undo any work we did so far.
+        */
+
+fail:
+       for (i = 0; i < npages; i++) {
+               anon = nanons[i];
+               if (anon) {
+                       simple_lock(&anon->an_lock);
+                       refs = --anon->an_ref;
+                       simple_unlock(&anon->an_lock);
+                       if (refs == 0) {
+                               uvm_anfree(anon);
+                       }
+               } else {
+                       uvm_pageactivate(pgs[i]);
+               }
+       }
+       uvm_lock_pageq();
+       uvm_page_unbusy(pgs, npages);
+       uvm_unlock_pageq();
+       simple_unlock(&uobj->vmobjlock);
+       vm_map_unlock_read(map);
+       return error;
+}
Index: uvm/uvm_loan.h
===================================================================
RCS file: /cvsroot/src/sys/uvm/uvm_loan.h,v
retrieving revision 1.13
diff -u -p -r1.13 uvm_loan.h
--- uvm/uvm_loan.h      24 Mar 2004 07:55:01 -0000      1.13
+++ uvm/uvm_loan.h      27 Feb 2005 10:08:45 -0000
@@ -53,9 +53,9 @@
void uvm_loan_init(void);
int uvm_loan(struct vm_map *, vaddr_t, vsize_t, void *, int);
void uvm_unloan(void *, int, int);
-int uvm_loanuobjpages(struct uvm_object *, voff_t, int,
-    struct vm_page **);
+int uvm_loanuobjpages(struct uvm_object *, voff_t, int, struct vm_page **);
struct vm_page *uvm_loanbreak(struct vm_page *);
+int uvm_loanobj(struct uvm_object *, struct uio *);

#endif /* _KERNEL */

Index: uvm/uvm_map.c
===================================================================
RCS file: /cvsroot/src/sys/uvm/uvm_map.c,v
retrieving revision 1.185
diff -u -p -r1.185 uvm_map.c
--- uvm/uvm_map.c       26 Feb 2005 22:31:44 -0000      1.185
+++ uvm/uvm_map.c       27 Feb 2005 10:08:49 -0000
@@ -1904,6 +1904,7 @@ uvm_unmap_remove(struct vm_map *map, vad
               if (VM_MAPENT_ISWIRED(entry)) {
                       uvm_map_entry_unwire(map, entry);
               }
+
               if ((map->flags & VM_MAP_PAGEABLE) == 0) {

                       /*
@@ -2000,7 +2001,7 @@ uvm_unmap_remove(struct vm_map *map, vad
                * that we've nuked.  then go to next entry.
                */

-               UVMHIST_LOG(maphist, "  removed map entry 0x%x", entry, 0, 0,0);
+               UVMHIST_LOG(maphist, "  removed map entry %p", entry, 0, 0,0);

               /* critical!  prevents stale hint */
               SAVE_HINT(map, entry, entry->prev);
@@ -3430,6 +3431,10 @@ uvm_map_clean(struct vm_map *map, vaddr_
                       vm_map_unlock_read(map);
                       return EINVAL;
               }
+               if (flags & PGO_FREE && VM_MAPENT_ISWIRED(entry)) {
+                       vm_map_unlock_read(map);
+                       return EBUSY;
+               }
               if (end <= current->end) {
                       break;
               }
@@ -3643,7 +3648,7 @@ uvmspace_init(struct vmspace *vm, struct
/*
 * uvmspace_share: share a vmspace between two processes
 *
- * - used for vfork, threads(?)
+ * - used for vfork.
 */

void