Index: arch/acorn26/acorn26/pmap.c
===================================================================
RCS file: /cvsroot/src/sys/arch/acorn26/acorn26/pmap.c,v
retrieving revision 1.36
diff -u -r1.36 pmap.c
--- arch/acorn26/acorn26/pmap.c 11 May 2012 15:39:17 -0000      1.36
+++ arch/acorn26/acorn26/pmap.c 22 Dec 2016 16:13:10 -0000
@@ -293,19 +293,26 @@
vaddr_t
pmap_steal_memory(vsize_t size, vaddr_t *vstartp, vaddr_t *vendp)
{
-       int i;
       vaddr_t addr;
+       uvm_physseg_t bank;
+
       UVMHIST_FUNC("pmap_steal_memory");

       UVMHIST_CALLED(pmaphist);
       addr = 0;
       size = round_page(size);
-       for (i = 0; i < vm_nphysseg; i++) {
-               if (VM_PHYSMEM_PTR(i)->avail_start < VM_PHYSMEM_PTR(i)->avail_end) {
+       for (bank = uvm_physseg_get_first();
+            uvm_physseg_valid_p(bank);
+            bank = uvm_physseg_get_next(bank)) {
+               if (uvm_physseg_get_avail_start(bank) < uvm_physseg_get_avail_end(bank)) {
+                       paddr_t avail_start = uvm_physseg_get_avail_start(bank);
+
                       addr = (vaddr_t)
                           ((char*)MEMC_PHYS_BASE +
-                               ptoa(VM_PHYSMEM_PTR(i)->avail_start));
-                       VM_PHYSMEM_PTR(i)->avail_start++;
+                               ptoa(avail_start));
+                       avail_start++;
+                       uvm_physseg_set_avail_start(avail_start);
+
                       break;
               }
       }
Index: arch/alpha/alpha/machdep.c
===================================================================
RCS file: /cvsroot/src/sys/arch/alpha/alpha/machdep.c,v
retrieving revision 1.348
diff -u -r1.348 machdep.c
--- arch/alpha/alpha/machdep.c  22 Dec 2016 14:47:54 -0000      1.348
+++ arch/alpha/alpha/machdep.c  22 Dec 2016 16:13:10 -0000
@@ -227,7 +227,6 @@
       struct mddt *mddtp;
       struct mddt_cluster *memc;
       int i, mddtweird;
-       struct vm_physseg *vps;
       struct pcb *pcb0;
       vaddr_t kernstart, kernend, v;
       paddr_t kernstartpfn, kernendpfn, pfn0, pfn1;
@@ -611,23 +610,24 @@
        * Initialize error message buffer (at end of core).
        */
       {
+               paddr_t end;
               vsize_t sz = (vsize_t)round_page(MSGBUFSIZE);
               vsize_t reqsz = sz;
+               uvm_physseg_t bank;

-               vps = VM_PHYSMEM_PTR(vm_nphysseg - 1);
+               bank = uvm_physseg_get_last();

               /* shrink so that it'll fit in the last segment */
-               if ((vps->avail_end - vps->avail_start) < atop(sz))
-                       sz = ptoa(vps->avail_end - vps->avail_start);
+               if (uvm_physseg_get_avail_end(bank) - uvm_physseg_get_avail_start(bank) < atop(sz))
+                       sz = ptoa(uvm_physseg_get_avail_end(bank) - uvm_physseg_get_avail_start(bank));

-               vps->end -= atop(sz);
-               vps->avail_end -= atop(sz);
-               msgbufaddr = (void *) ALPHA_PHYS_TO_K0SEG(ptoa(vps->end));
-               initmsgbuf(msgbufaddr, sz);
+               end = uvm_physseg_get_end(bank);
+               end -= atop(sz);
+
+               uvm_physseg_unplug(end, atop(sz));
+               msgbufaddr = (void *) ALPHA_PHYS_TO_K0SEG(ptoa(end));

-               /* Remove the last segment if it now has no pages. */
-               if (vps->start == vps->end)
-                       vm_nphysseg--;
+               initmsgbuf(msgbufaddr, sz);

               /* warn if the message buffer had to be shrunk */
               if (sz != reqsz)
Index: arch/alpha/alpha/pmap.c
===================================================================
RCS file: /cvsroot/src/sys/arch/alpha/alpha/pmap.c,v
retrieving revision 1.260
diff -u -r1.260 pmap.c
--- arch/alpha/alpha/pmap.c     5 Nov 2015 06:26:15 -0000       1.260
+++ arch/alpha/alpha/pmap.c     22 Dec 2016 16:13:11 -0000
@@ -784,8 +784,8 @@
        * the fact that BSEARCH sorts the vm_physmem[] array
        * for us.
        */
-       avail_start = ptoa(VM_PHYSMEM_PTR(0)->start);
-       avail_end = ptoa(VM_PHYSMEM_PTR(vm_nphysseg - 1)->end);
+       avail_start = ptoa(uvm_physseg_get_avail_start(uvm_physseg_get_first()));
+       avail_end = ptoa(uvm_physseg_get_avail_end(uvm_physseg_get_last()));
       virtual_end = VM_MIN_KERNEL_ADDRESS + lev3mapsize * PAGE_SIZE;

#if 0
@@ -1007,10 +1007,12 @@
vaddr_t
pmap_steal_memory(vsize_t size, vaddr_t *vstartp, vaddr_t *vendp)
{
-       int bank, npgs, x;
+       int npgs;
       vaddr_t va;
-       paddr_t pa;
+       paddr_t pa;

+       uvm_physseg_t bank;
+
       size = round_page(size);
       npgs = atop(size);

@@ -1018,50 +1020,36 @@
       printf("PSM: size 0x%lx (npgs 0x%x)\n", size, npgs);
#endif

-       for (bank = 0; bank < vm_nphysseg; bank++) {
+       for (bank = uvm_physseg_get_first();
+            uvm_physseg_valid_p(bank);
+            bank = uvm_physseg_get_next(bank)) {
               if (uvm.page_init_done == true)
                       panic("pmap_steal_memory: called _after_ bootstrap");

#if 0
-               printf("     bank %d: avail_start 0x%lx, start 0x%lx, "
-                   "avail_end 0x%lx\n", bank, VM_PHYSMEM_PTR(bank)->avail_start,
+               printf("     bank %d: avail_start 0x%"PRIxPADDR", start 0x%"PRIxPADDR", "
+                   "avail_end 0x%"PRIxPADDR"\n", bank, VM_PHYSMEM_PTR(bank)->avail_start,
                   VM_PHYSMEM_PTR(bank)->start, VM_PHYSMEM_PTR(bank)->avail_end);
#endif

-               if (VM_PHYSMEM_PTR(bank)->avail_start != VM_PHYSMEM_PTR(bank)->start ||
-                   VM_PHYSMEM_PTR(bank)->avail_start >= VM_PHYSMEM_PTR(bank)->avail_end)
+               if (uvm_physseg_get_avail_start(bank) != uvm_physseg_get_start(bank) ||
+                   uvm_physseg_get_avail_start(bank) >= uvm_physseg_get_avail_end(bank))
                       continue;

#if 0
-               printf("             avail_end - avail_start = 0x%lx\n",
+               printf("             avail_end - avail_start = 0x%"PRIxPADDR"\n",
                   VM_PHYSMEM_PTR(bank)->avail_end - VM_PHYSMEM_PTR(bank)->avail_start);
#endif

-               if ((VM_PHYSMEM_PTR(bank)->avail_end - VM_PHYSMEM_PTR(bank)->avail_start)
+               if (uvm_physseg_get_avail_end(bank) - uvm_physseg_get_avail_start(bank)
                   < npgs)
                       continue;

               /*
                * There are enough pages here; steal them!
                */
-               pa = ptoa(VM_PHYSMEM_PTR(bank)->avail_start);
-               VM_PHYSMEM_PTR(bank)->avail_start += npgs;
-               VM_PHYSMEM_PTR(bank)->start += npgs;
-
-               /*
-                * Have we used up this segment?
-                */
-               if (VM_PHYSMEM_PTR(bank)->avail_start == VM_PHYSMEM_PTR(bank)->end) {
-                       if (vm_nphysseg == 1)
-                               panic("pmap_steal_memory: out of memory!");
-
-                       /* Remove this segment from the list. */
-                       vm_nphysseg--;
-                       for (x = bank; x < vm_nphysseg; x++) {
-                               /* structure copy */
-                               VM_PHYSMEM_PTR_SWAP(x, x + 1);
-                       }
-               }
+               pa = ptoa(uvm_physseg_get_start(bank));
+               uvm_physseg_unplug(atop(pa), npgs);

               va = ALPHA_PHYS_TO_K0SEG(pa);
               memset((void *)va, 0, size);
Index: arch/amd64/amd64/machdep.c
===================================================================
RCS file: /cvsroot/src/sys/arch/amd64/amd64/machdep.c,v
retrieving revision 1.241
diff -u -r1.241 machdep.c
--- arch/amd64/amd64/machdep.c  22 Dec 2016 14:47:54 -0000      1.241
+++ arch/amd64/amd64/machdep.c  22 Dec 2016 16:13:11 -0000
@@ -789,6 +789,7 @@
       paddr_t p, pstart, pend;
       struct vm_page *pg;
       int i;
+       uvm_physseg_t upm;

       /*
        * Mark all memory pages, then unmark pages that are uninteresting.
@@ -805,10 +806,25 @@
                       setbit(sparse_dump_physmap, p);
               }
       }
-       for (i = 0; i < vm_nphysseg; i++) {
-               struct vm_physseg *seg = VM_PHYSMEM_PTR(i);
+        for (upm = uvm_physseg_get_first();
+            uvm_physseg_valid_p(upm);
+            upm = uvm_physseg_get_next(upm)) {
+               paddr_t pfn;

-               for (pg = seg->pgs; pg < seg->lastpg; pg++) {
+               if (uvm_physseg_valid_p(upm) == false)
+                       break;
+
+               const paddr_t startpfn = uvm_physseg_get_start(upm);
+               const paddr_t endpfn = uvm_physseg_get_end(upm);
+
+               KASSERT(startpfn != -1 && endpfn != -1);
+
+               /*
+                * We assume that seg->start to seg->end are
+                * uvm_page_physload()ed
+                */
+               for (pfn = startpfn; pfn <= endpfn; pfn++) {
+                       pg = PHYS_TO_VM_PAGE(ptoa(pfn));
                       if (pg->uanon || (pg->pqflags & PQ_FREE) ||
                           (pg->uobject && pg->uobject->pgops)) {
                               p = VM_PAGE_TO_PHYS(pg) / PAGE_SIZE;
@@ -1452,57 +1468,30 @@
static void
init_x86_64_msgbuf(void)
{
-       /* Message buffer is located at end of core. */
-       struct vm_physseg *vps;
-       psize_t sz = round_page(MSGBUFSIZE);
-       psize_t reqsz = sz;
-       int x;
-
- search_again:
-       vps = NULL;
-
-       for (x = 0; x < vm_nphysseg; x++) {
-               vps = VM_PHYSMEM_PTR(x);
-               if (ctob(vps->avail_end) == avail_end)
-                       break;
-       }
-       if (x == vm_nphysseg)
-               panic("init_x86_64: can't find end of memory");
+        /* Message buffer is located at end of core. */
+       psize_t reqsz = round_page(MSGBUFSIZE);
+       psize_t sz = 0;

-       /* Shrink so it'll fit in the last segment. */
-       if ((vps->avail_end - vps->avail_start) < atop(sz))
-               sz = ctob(vps->avail_end - vps->avail_start);
-
-       vps->avail_end -= atop(sz);
-       vps->end -= atop(sz);
-            msgbuf_p_seg[msgbuf_p_cnt].sz = sz;
-            msgbuf_p_seg[msgbuf_p_cnt++].paddr = ctob(vps->avail_end);
-
-       /* Remove the last segment if it now has no pages. */
-       if (vps->start == vps->end) {
-               for (vm_nphysseg--; x < vm_nphysseg; x++)
-                       VM_PHYSMEM_PTR_SWAP(x, x + 1);
-       }
-
-       /* Now find where the new avail_end is. */
-       for (avail_end = 0, x = 0; x < vm_nphysseg; x++)
-               if (VM_PHYSMEM_PTR(x)->avail_end > avail_end)
-                       avail_end = VM_PHYSMEM_PTR(x)->avail_end;
-       avail_end = ctob(avail_end);
+       for (sz = 0; sz < reqsz; sz += PAGE_SIZE) {
+               paddr_t stolenpa;

-       if (sz == reqsz)
-               return;
+               if (!uvm_page_physget(&stolenpa))
+                       break;

-       reqsz -= sz;
-       if (msgbuf_p_cnt == VM_PHYSSEG_MAX) {
-               /* No more segments available, bail out. */
-               printf("WARNING: MSGBUFSIZE (%zu) too large, using %zu.\n",
-                   (size_t)MSGBUFSIZE, (size_t)(MSGBUFSIZE - reqsz));
-               return;
+               if (stolenpa == (msgbuf_p_seg[msgbuf_p_cnt].paddr
+                       + PAGE_SIZE)) {
+                       /* contiguous: append it to current buf alloc */
+                       msgbuf_p_seg[msgbuf_p_cnt].sz += PAGE_SIZE;
+               } else  {
+                       /* non-contiguous: start a new msgbuf seg */
+                       msgbuf_p_seg[msgbuf_p_cnt].sz = PAGE_SIZE;
+                       msgbuf_p_seg[msgbuf_p_cnt++].paddr = stolenpa;
+               }
       }

-       sz = reqsz;
-       goto search_again;
+       if (sz != reqsz)
+               printf("%s: could only allocate %ld bytes of requested %ld bytes\n",
+                   __func__, sz, reqsz);
}

static void
Index: arch/arm/arm32/bus_dma.c
===================================================================
RCS file: /cvsroot/src/sys/arch/arm/arm32/bus_dma.c,v
retrieving revision 1.96
diff -u -r1.96 bus_dma.c
--- arch/arm/arm32/bus_dma.c    5 Nov 2016 14:26:23 -0000       1.96
+++ arch/arm/arm32/bus_dma.c    22 Dec 2016 16:13:11 -0000
@@ -1365,11 +1365,11 @@
                * The page can only be direct mapped if was allocated out
                * of the arm poolpage vm freelist.
                */
-               int lcv = vm_physseg_find(atop(pa), NULL);
-               KASSERT(lcv != -1);
+               uvm_physseg_t upm = uvm_physseg_find(atop(pa), NULL);
+               KASSERT(uvm_physseg_valid_p(upm));
               if (direct_mapable) {
                       direct_mapable =
-                           (arm_poolpage_vmfreelist == VM_PHYSMEM_PTR(lcv)->free_list);
+                           (arm_poolpage_vmfreelist == uvm_physseg_get_free_list(upm));
               }
#endif

Index: arch/arm/arm32/pmap.c
===================================================================
RCS file: /cvsroot/src/sys/arch/arm/arm32/pmap.c,v
retrieving revision 1.341
diff -u -r1.341 pmap.c
--- arch/arm/arm32/pmap.c       17 Dec 2016 14:36:29 -0000      1.341
+++ arch/arm/arm32/pmap.c       22 Dec 2016 16:13:11 -0000
@@ -6453,8 +6453,8 @@
        * One could argue whether this should be the entire memory or just
        * the memory that is useable in a user process.
        */
-       avail_start = ptoa(VM_PHYSMEM_PTR(0)->start);
-       avail_end = ptoa(VM_PHYSMEM_PTR(vm_nphysseg - 1)->end);
+       avail_start = ptoa(uvm_physseg_get_avail_start(uvm_physseg_get_first()));
+       avail_end = ptoa(uvm_physseg_get_avail_end(uvm_physseg_get_last()));

       /*
        * Now we need to free enough pv_entry structures to allow us to get
Index: arch/i386/i386/machdep.c
===================================================================
RCS file: /cvsroot/src/sys/arch/i386/i386/machdep.c,v
retrieving revision 1.771
diff -u -r1.771 machdep.c
--- arch/i386/i386/machdep.c    22 Dec 2016 14:47:58 -0000      1.771
+++ arch/i386/i386/machdep.c    22 Dec 2016 16:13:23 -0000
@@ -1037,57 +1037,30 @@
static void
init386_msgbuf(void)
{
-       /* Message buffer is located at end of core. */
-       struct vm_physseg *vps;
-       psize_t sz = round_page(MSGBUFSIZE);
-       psize_t reqsz = sz;
-       unsigned int x;
-
- search_again:
-       vps = NULL;
-       for (x = 0; x < vm_nphysseg; ++x) {
-               vps = VM_PHYSMEM_PTR(x);
-               if (ctob(vps->avail_end) == avail_end) {
-                       break;
-               }
-       }
-       if (x == vm_nphysseg)
-               panic("init386: can't find end of memory");
+        /* Message buffer is located at end of core. */
+       psize_t reqsz = round_page(MSGBUFSIZE);
+       psize_t sz = 0;

-       /* Shrink so it'll fit in the last segment. */
-       if (vps->avail_end - vps->avail_start < atop(sz))
-               sz = ctob(vps->avail_end - vps->avail_start);
-
-       vps->avail_end -= atop(sz);
-       vps->end -= atop(sz);
-       msgbuf_p_seg[msgbuf_p_cnt].sz = sz;
-       msgbuf_p_seg[msgbuf_p_cnt++].paddr = ctob(vps->avail_end);
-
-       /* Remove the last segment if it now has no pages. */
-       if (vps->start == vps->end) {
-               for (--vm_nphysseg; x < vm_nphysseg; x++)
-                       VM_PHYSMEM_PTR_SWAP(x, x + 1);
-       }
-
-       /* Now find where the new avail_end is. */
-       for (avail_end = 0, x = 0; x < vm_nphysseg; x++)
-               if (VM_PHYSMEM_PTR(x)->avail_end > avail_end)
-                       avail_end = VM_PHYSMEM_PTR(x)->avail_end;
-       avail_end = ctob(avail_end);
+       for (sz = 0; sz < reqsz; sz += PAGE_SIZE) {
+               paddr_t stolenpa;

-       if (sz == reqsz)
-               return;
+               if (!uvm_page_physget(&stolenpa))
+                       break;

-       reqsz -= sz;
-       if (msgbuf_p_cnt == VM_PHYSSEG_MAX) {
-               /* No more segments available, bail out. */
-               printf("WARNING: MSGBUFSIZE (%zu) too large, using %zu.\n",
-                   (size_t)MSGBUFSIZE, (size_t)(MSGBUFSIZE - reqsz));
-               return;
+               if (stolenpa == (msgbuf_p_seg[msgbuf_p_cnt].paddr
+                       + PAGE_SIZE)) {
+                       /* contiguous: append it to current buf alloc */
+                       msgbuf_p_seg[msgbuf_p_cnt].sz += PAGE_SIZE;
+               } else  {
+                       /* non-contiguous: start a new msgbuf seg */
+                       msgbuf_p_seg[msgbuf_p_cnt].sz = PAGE_SIZE;
+                       msgbuf_p_seg[msgbuf_p_cnt++].paddr = stolenpa;
+               }
       }

-       sz = reqsz;
-       goto search_again;
+       if (sz != reqsz)
+               printf("%s: could only allocate %ld bytes of requested %ld bytes\n",
+                   __func__, sz, reqsz);
}

#ifndef XEN
Index: arch/ia64/ia64/machdep.c
===================================================================
RCS file: /cvsroot/src/sys/arch/ia64/ia64/machdep.c,v
retrieving revision 1.36
diff -u -r1.36 machdep.c
--- arch/ia64/ia64/machdep.c    22 Dec 2016 14:47:58 -0000      1.36
+++ arch/ia64/ia64/machdep.c    22 Dec 2016 16:13:23 -0000
@@ -175,18 +175,19 @@
        * Display any holes after the first chunk of extended memory.
        */
       if (bootverbose) {
-               int lcv, sizetmp;
-
+               int sizetmp, vm_nphysseg;
+               uvm_physseg_t upm;
+
               printf("Physical memory chunk(s):\n");
-               for (lcv = 0;
-                   lcv < vm_nphysseg || VM_PHYSMEM_PTR(lcv)->avail_end != 0;
-                   lcv++) {
-                       sizetmp = VM_PHYSMEM_PTR(lcv)->avail_end -
-                           VM_PHYSMEM_PTR(lcv)->avail_start;
+               for (vm_nphysseg = 0, upm = uvm_physseg_get_first();
+                    uvm_physseg_valid_p(upm);
+                    vm_nphysseg++, upm = uvm_physseg_get_next(upm)) {
+                       sizetmp = uvm_physseg_get_avail_end(upm) -
+                           uvm_physseg_get_avail_start(upm);

                       printf("0x%016lx - 0x%016lx, %ld bytes (%d pages)\n",
-                           ptoa(VM_PHYSMEM_PTR(lcv)->avail_start),
-                               ptoa(VM_PHYSMEM_PTR(lcv)->avail_end) - 1,
+                           ptoa(uvm_physseg_get_avail_start(upm)),
+                           ptoa(uvm_physseg_get_avail_end(upm)) - 1,
                                   ptoa(sizetmp), sizetmp);
               }
               printf("Total number of segments: vm_nphysseg = %d \n",
@@ -467,7 +468,6 @@
       uvmexp.pagesize = PAGE_SIZE;
       uvm_md_init();

-
       /*
        * Find out how much memory is available, by looking at
        * the memory descriptors.
Index: arch/ia64/ia64/pmap.c
===================================================================
RCS file: /cvsroot/src/sys/arch/ia64/ia64/pmap.c,v
retrieving revision 1.32
diff -u -r1.32 pmap.c
--- arch/ia64/ia64/pmap.c       10 Mar 2014 13:47:45 -0000      1.32
+++ arch/ia64/ia64/pmap.c       22 Dec 2016 16:13:23 -0000
@@ -94,6 +94,7 @@
#include <sys/lock.h>

#include <uvm/uvm.h>
+#include <uvm/uvm_physseg.h>

#include <machine/pal.h>
#include <machine/atomic.h>
@@ -316,47 +317,33 @@
vaddr_t
pmap_steal_memory(vsize_t size, vaddr_t *vstartp, vaddr_t *vendp)
{
-       int lcv, npgs, x;
+       int npgs;
+       uvm_physseg_t upm;
       vaddr_t va;
       paddr_t pa;

       size = round_page(size);
       npgs = atop(size);

-       for (lcv = 0; lcv < vm_nphysseg; lcv++) {
+       for (upm = uvm_physseg_get_first();
+            uvm_physseg_valid_p(upm);
+            upm = uvm_physseg_get_next(upm)) {
               if (uvm.page_init_done == true)
                       panic("pmap_steal_memory: called _after_ bootstrap");

-               if (VM_PHYSMEM_PTR(lcv)->avail_start != VM_PHYSMEM_PTR(lcv)->start ||
-                   VM_PHYSMEM_PTR(lcv)->avail_start >= VM_PHYSMEM_PTR(lcv)->avail_end)
+               if (uvm_physseg_get_avail_start(upm) != uvm_physseg_get_start(upm) ||
+                   uvm_physseg_get_avail_start(upm) >= uvm_physseg_get_avail_end(upm))
                       continue;

-               if ((VM_PHYSMEM_PTR(lcv)->avail_end - VM_PHYSMEM_PTR(lcv)->avail_start)
+               if ((uvm_physseg_get_avail_end(upm) - uvm_physseg_get_avail_start(upm))
                   < npgs)
                       continue;

               /*
                * There are enough pages here; steal them!
                */
-               pa = ptoa(VM_PHYSMEM_PTR(lcv)->avail_start);
-               VM_PHYSMEM_PTR(lcv)->avail_start += npgs;
-               VM_PHYSMEM_PTR(lcv)->start += npgs;
-
-               /*
-                * Have we used up this segment?
-                */
-               if (VM_PHYSMEM_PTR(lcv)->avail_start ==
-                   VM_PHYSMEM_PTR(lcv)->end) {
-                       if (vm_nphysseg == 1)
-                               panic("pmap_steal_memory: out of memory!");
-
-                       /* Remove this segment from the list. */
-                       vm_nphysseg--;
-                       for (x = lcv; x < vm_nphysseg; x++) {
-                               /* structure copy */
-                               VM_PHYSMEM_PTR_SWAP(x, x + 1);
-                       }
-               }
+               pa = ptoa(uvm_physseg_get_start(bank));
+               uvm_physseg_unplug(atop(pa), npgs);

               va = IA64_PHYS_TO_RR7(pa);
               memset((void *)va, 0, size);
@@ -380,31 +367,34 @@
static vaddr_t
pmap_steal_vhpt_memory(vsize_t size)
{
-       int lcv, npgs, x;
+       int npgs;
+       uvm_physseg_t upm;
       vaddr_t va;
-       paddr_t pa;
+       paddr_t tmppa, pa = 0;
       paddr_t vhpt_start = 0, start1, start2, end1, end2;

       size = round_page(size);
       npgs = atop(size);

-       for (lcv = 0; lcv < vm_nphysseg; lcv++) {
+       for (upm = uvm_physseg_get_first();
+            uvm_physseg_valid_p(upm);
+            upm = uvm_physseg_get_next(upm)) {
               if (uvm.page_init_done == true)
                       panic("pmap_vhpt_steal_memory: called _after_ bootstrap");

-               if (VM_PHYSMEM_PTR(lcv)->avail_start != VM_PHYSMEM_PTR(lcv)->start || /* XXX: ??? */
-                   VM_PHYSMEM_PTR(lcv)->avail_start >= VM_PHYSMEM_PTR(lcv)->avail_end)
+               if (uvm_physseg_get_avail_start(upm) != uvm_physseg_get_start(upm) || /* XXX: ??? */
+                   uvm_physseg_get_avail_start(upm) >= uvm_physseg_get_avail_end(upm))
                       continue;

               /* Break off a VHPT sized, aligned chunk off this segment. */

-               start1 = VM_PHYSMEM_PTR(lcv)->avail_start;
+               start1 = uvm_physseg_get_avail_start(upm);

               /* Align requested start address on requested size boundary */
               end1 = vhpt_start = roundup(start1, npgs);

               start2 = vhpt_start + npgs;
-               end2 = VM_PHYSMEM_PTR(lcv)->avail_end;
+               end2 = uvm_physseg_get_avail_end(upm);

               /* Case 1: Doesn't fit. skip this segment */

@@ -423,7 +413,7 @@
                */
               if (start1 == end1 &&
                   start2 == end2 &&
-                   vm_nphysseg == 1) {
+                   uvm_physseg_get_first() == uvm_physseg_get_last() /* single segment */) {
#ifdef DEBUG
                       printf("pmap_vhpt_steal_memory: out of memory!");
#endif
@@ -431,11 +421,13 @@
               }

               /* Remove this segment from the list. */
-               vm_nphysseg--;
-               for (x = lcv; x < vm_nphysseg; x++)
-                       /* structure copy */
-                       VM_PHYSMEM_PTR_SWAP(x, x + 1);
-
+               if (uvm_physseg_unplug(uvm_physseg_get_start(upm),
+                       uvm_physseg_get_end(upm) - uvm_physseg_get_start(upm)) == false) {
+                       panic("%s: uvm_physseg_unplug(%"PRIxPADDR", %"PRIxPADDR") failed\n",
+                           __func__, uvm_physseg_get_start(upm),
+                           uvm_physseg_get_end(upm) - uvm_physseg_get_start(upm));
+               }
+
               /* Case 2: Perfect fit - skip segment reload. */

               if (start1 == end1 && start2 == end2) break;
Index: arch/luna68k/luna68k/pmap_bootstrap.c
===================================================================
RCS file: /cvsroot/src/sys/arch/luna68k/luna68k/pmap_bootstrap.c,v
retrieving revision 1.37
diff -u -r1.37 pmap_bootstrap.c
--- arch/luna68k/luna68k/pmap_bootstrap.c       22 Dec 2016 16:05:15 -0000      1.37
+++ arch/luna68k/luna68k/pmap_bootstrap.c       22 Dec 2016 16:13:23 -0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: pmap_bootstrap.c,v 1.37 2016/12/22 16:05:15 cherry Exp $       */
+/*     $NetBSD: pmap_bootstrap.c,v 1.36 2013/01/26 15:46:24 tsutsui Exp $      */

/*
 * Copyright (c) 1991, 1993
@@ -36,7 +36,7 @@
 */

#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap_bootstrap.c,v 1.37 2016/12/22 16:05:15 cherry Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap_bootstrap.c,v 1.36 2013/01/26 15:46:24 tsutsui Exp $");

#include "opt_m68k_arch.h"

Index: arch/m68k/m68k/pmap_motorola.c
===================================================================
RCS file: /cvsroot/src/sys/arch/m68k/m68k/pmap_motorola.c,v
retrieving revision 1.68
diff -u -r1.68 pmap_motorola.c
--- arch/m68k/m68k/pmap_motorola.c      22 Dec 2016 14:47:58 -0000      1.68
+++ arch/m68k/m68k/pmap_motorola.c      22 Dec 2016 16:13:24 -0000
@@ -133,6 +133,7 @@
#include <machine/pcb.h>

#include <uvm/uvm.h>
+#include <uvm/uvm_physseg.h>

#include <m68k/cacheops.h>

@@ -295,10 +296,11 @@
static inline struct pv_header *
pa_to_pvh(paddr_t pa)
{
-       int bank, pg = 0;       /* XXX gcc4 -Wuninitialized */
-
-       bank = vm_physseg_find(atop((pa)), &pg);
-       return &VM_PHYSMEM_PTR(bank)->pmseg.pvheader[pg];
+       uvm_physseg_t bank = 0; /* XXX gcc4 -Wuninitialized */
+       psize_t pg = 0;
+
+       bank = uvm_physseg_find(atop((pa)), &pg);
+       return &uvm_physseg_get_pmseg(bank)->pvheader[pg];
}

/*
@@ -412,7 +414,7 @@
       struct pv_header *pvh;
       int             rv;
       int             npages;
-       int             bank;
+       uvm_physseg_t   bank;

       PMAP_DPRINTF(PDB_FOLLOW, ("pmap_init()\n"));

@@ -434,8 +436,10 @@
        * Allocate memory for random pmap data structures.  Includes the
        * initial segment table, pv_head_table and pmap_attributes.
        */
-       for (page_cnt = 0, bank = 0; bank < vm_nphysseg; bank++)
-               page_cnt += VM_PHYSMEM_PTR(bank)->end - VM_PHYSMEM_PTR(bank)->start;
+       for (page_cnt = 0, bank = uvm_physseg_get_first();
+            uvm_physseg_valid_p(bank);
+            bank = uvm_physseg_get_next(bank))
+               page_cnt += uvm_physseg_get_end(bank) - uvm_physseg_get_start(bank);
       s = M68K_STSIZE;                                        /* Segtabzero */
       s += page_cnt * sizeof(struct pv_header);       /* pv table */
       s = round_page(s);
@@ -461,9 +465,11 @@
        * assign them to the memory segments.
        */
       pvh = pv_table;
-       for (bank = 0; bank < vm_nphysseg; bank++) {
-               npages = VM_PHYSMEM_PTR(bank)->end - VM_PHYSMEM_PTR(bank)->start;
-               VM_PHYSMEM_PTR(bank)->pmseg.pvheader = pvh;
+       for (bank = uvm_physseg_get_first();
+            uvm_physseg_valid_p(bank);
+            bank = uvm_physseg_get_next(bank)) {
+               npages = uvm_physseg_get_end(bank) - uvm_physseg_get_start(bank);
+               uvm_physseg_get_pmseg(bank)->pvheader = pvh;
               pvh += npages;
       }

@@ -1704,17 +1710,20 @@
static void
pmap_collect(void)
{
-       int bank, s;
-
+       int s;
+       uvm_physseg_t bank;
+
       /*
        * XXX This is very bogus.  We should handle kernel PT
        * XXX pages much differently.
        */

       s = splvm();
-       for (bank = 0; bank < vm_nphysseg; bank++) {
-               pmap_collect1(pmap_kernel(), ptoa(VM_PHYSMEM_PTR(bank)->start),
-                   ptoa(VM_PHYSMEM_PTR(bank)->end));
+       for (bank = uvm_physseg_get_first();
+            uvm_physseg_valid_p(bank);
+            bank = uvm_physseg_get_next(bank)) {
+               pmap_collect1(pmap_kernel(), ptoa(uvm_physseg_get_start(bank)),
+                   ptoa(uvm_physseg_get_end(bank)));
       }
       splx(s);
}
Index: arch/mips/include/pmap.h
===================================================================
RCS file: /cvsroot/src/sys/arch/mips/include/pmap.h,v
retrieving revision 1.68
diff -u -r1.68 pmap.h
--- arch/mips/include/pmap.h    11 Jul 2016 16:15:35 -0000      1.68
+++ arch/mips/include/pmap.h    22 Dec 2016 16:13:25 -0000
@@ -116,7 +116,7 @@
#define PMAP_SEGTAB_ALIGN __aligned(sizeof(void *)*NSEGPG) __section(".data1")
#endif

-struct vm_physseg;
+#include <uvm/uvm_physseg.h>

void   pmap_md_init(void);
void   pmap_md_icache_sync_all(void);
@@ -125,7 +125,7 @@
bool   pmap_md_vca_add(struct vm_page *, vaddr_t, pt_entry_t *);
void   pmap_md_vca_clean(struct vm_page *, int);
void   pmap_md_vca_remove(struct vm_page *, vaddr_t, bool, bool);
-bool   pmap_md_ok_to_steal_p(const struct vm_physseg *, size_t);
+bool   pmap_md_ok_to_steal_p(const uvm_physseg_t, size_t);
bool   pmap_md_tlb_check_entry(void *, vaddr_t, tlb_asid_t, pt_entry_t);

static inline bool
Index: arch/mips/mips/mips_machdep.c
===================================================================
RCS file: /cvsroot/src/sys/arch/mips/mips/mips_machdep.c,v
retrieving revision 1.275
diff -u -r1.275 mips_machdep.c
--- arch/mips/mips/mips_machdep.c       22 Dec 2016 07:56:38 -0000      1.275
+++ arch/mips/mips/mips_machdep.c       22 Dec 2016 16:13:26 -0000
@@ -145,6 +145,7 @@
#endif

#include <uvm/uvm.h>
+#include <uvm/uvm_physseg.h>

#include <dev/cons.h>
#include <dev/mm.h>
@@ -2008,38 +2009,34 @@
{
       vsize_t sz = (vsize_t)round_page(MSGBUFSIZE);
       vsize_t reqsz = sz;
-       u_int bank = vm_nphysseg - 1;
-       struct vm_physseg *vps = VM_PHYSMEM_PTR(bank);
+       uvm_physseg_t bank = uvm_physseg_get_last();
#ifndef _LP64
       /*
        * Fist the physical segment that can be mapped to KSEG0
        */
-       for (; vps >= vm_physmem; vps--, bank--) {
-               if (vps->avail_start + atop(sz) <= atop(MIPS_PHYS_MASK))
+       for (; uvm_physseg_valid_p(bank); bank = uvm_physseg_get_prev(bank)) {
+               if (uvm_physseg_get_avail_start(bank) + atop(sz) <= atop(MIPS_PHYS_MASK))
                       break;
       }
#endif

+       paddr_t start = uvm_physseg_get_start(bank);
+       paddr_t end = uvm_physseg_get_end(bank);
+
       /* shrink so that it'll fit in the last segment */
-       if ((vps->avail_end - vps->avail_start) < atop(sz))
-               sz = ptoa(vps->avail_end - vps->avail_start);
+       if ((end - start) < atop(sz))
+               sz = ptoa(end - start);

-       vps->end -= atop(sz);
-       vps->avail_end -= atop(sz);
+       end -= atop(sz);
+       uvm_physseg_unplug(end, atop(sz));
+
#ifdef _LP64
-       msgbufaddr = (void *) MIPS_PHYS_TO_XKPHYS_CACHED(ptoa(vps->end));
+       msgbufaddr = (void *) MIPS_PHYS_TO_XKPHYS_CACHED(ptoa(end));
#else
-       msgbufaddr = (void *) MIPS_PHYS_TO_KSEG0(ptoa(vps->end));
+       msgbufaddr = (void *) MIPS_PHYS_TO_KSEG0(ptoa(end));
#endif
       initmsgbuf(msgbufaddr, sz);

-       /* Remove the [last] segment if it now has no pages. */
-       if (vps->start == vps->end) {
-               for (vm_nphysseg--; bank < vm_nphysseg - 1; bank++) {
-                       VM_PHYSMEM_PTR_SWAP(bank, bank + 1);
-               }
-       }
-
       /* warn if the message buffer had to be shrunk */
       if (sz != reqsz)
               printf("WARNING: %"PRIdVSIZE" bytes not available for msgbuf "
Index: arch/mips/mips/pmap_machdep.c
===================================================================
RCS file: /cvsroot/src/sys/arch/mips/mips/pmap_machdep.c,v
retrieving revision 1.11
diff -u -r1.11 pmap_machdep.c
--- arch/mips/mips/pmap_machdep.c       5 Sep 2016 06:59:25 -0000       1.11
+++ arch/mips/mips/pmap_machdep.c       22 Dec 2016 16:13:26 -0000
@@ -133,6 +133,7 @@
#endif

#include <uvm/uvm.h>
+#include <uvm/uvm_physseg.h>

#include <mips/cache.h>
#include <mips/cpuregs.h>
@@ -306,12 +307,12 @@
}

bool
-pmap_md_ok_to_steal_p(const struct vm_physseg *seg, size_t npgs)
+pmap_md_ok_to_steal_p(const uvm_physseg_t bank, size_t npgs)
{
#ifndef _LP64
-       if (seg->avail_start + npgs >= atop(MIPS_PHYS_MASK + 1)) {
-               aprint_debug("%s: seg %zu: not enough in KSEG0 for %zu pages\n",
-                   __func__, seg - VM_PHYSMEM_PTR(0), npgs);
+       if (uvm_physseg_get_avail_start(bank) + npgs >= atop(MIPS_PHYS_MASK + 1)) {
+               aprint_debug("%s: seg not enough in KSEG0 for %zu pages\n",
+                   __func__, npgs);
               return false;
       }
#endif
@@ -393,8 +394,8 @@
        * for us.  Must do this before uvm_pageboot_alloc()
        * can be called.
        */
-       pmap_limits.avail_start = ptoa(VM_PHYSMEM_PTR(0)->start);
-       pmap_limits.avail_end = ptoa(VM_PHYSMEM_PTR(vm_nphysseg - 1)->end);
+       pmap_limits.avail_start = ptoa(uvm_physseg_get_start(uvm_physseg_get_first()));
+       pmap_limits.avail_end = ptoa(uvm_physseg_get_end(uvm_physseg_get_last()));
       pmap_limits.virtual_end = pmap_limits.virtual_start + (vaddr_t)sysmap_size * NBPG;

#ifndef _LP64
Index: arch/powerpc/ibm4xx/pmap.c
===================================================================
RCS file: /cvsroot/src/sys/arch/powerpc/ibm4xx/pmap.c,v
retrieving revision 1.73
diff -u -r1.73 pmap.c
--- arch/powerpc/ibm4xx/pmap.c  22 Dec 2016 14:47:58 -0000      1.73
+++ arch/powerpc/ibm4xx/pmap.c  22 Dec 2016 16:13:27 -0000
@@ -199,7 +199,7 @@
{
       int bank, pg;

-       bank = vm_physseg_find(atop(pa), &pg);
+       bank = uvm_physseg_find(atop(pa), &pg);
       if (bank == -1)
               return NULL;
       return &VM_PHYSMEM_PTR(bank)->pmseg.pvent[pg];
@@ -210,7 +210,7 @@
{
       int bank, pg;

-       bank = vm_physseg_find(atop(pa), &pg);
+       bank = uvm_physseg_find(atop(pa), &pg);
       if (bank == -1)
               return NULL;
       return &VM_PHYSMEM_PTR(bank)->pmseg.attrs[pg];
Index: arch/powerpc/isa/isadma_machdep.c
===================================================================
RCS file: /cvsroot/src/sys/arch/powerpc/isa/isadma_machdep.c,v
retrieving revision 1.10
diff -u -r1.10 isadma_machdep.c
--- arch/powerpc/isa/isadma_machdep.c   26 Feb 2016 18:17:39 -0000      1.10
+++ arch/powerpc/isa/isadma_machdep.c   22 Dec 2016 16:13:27 -0000
@@ -168,9 +168,11 @@
       size_t cookiesize;
       paddr_t avail_end = 0;

-       for (bank = 0; bank < vm_nphysseg; bank++) {
-               if (avail_end < VM_PHYSMEM_PTR(bank)->avail_end << PGSHIFT)
-                       avail_end = VM_PHYSMEM_PTR(bank)->avail_end << PGSHIFT;
+       for (bank = uvm_physseg_get_first();
+            uvm_physseg_valid_p(bank);
+            bank = uvm_physseg_get_next(bank)) {
+               if (avail_end < uvm_physseg_get_avail_end(bank) << PGSHIFT)
+                       avail_end = uvm_physseg_get_avail_end(bank) << PGSHIFT;
       }

       /* Call common function to create the basic map. */
@@ -598,9 +600,11 @@
       paddr_t high, avail_end = 0;
       int bank;

-       for (bank = 0; bank < vm_nphysseg; bank++) {
-               if (avail_end < VM_PHYSMEM_PTR(bank)->avail_end << PGSHIFT)
-                       avail_end = VM_PHYSMEM_PTR(bank)->avail_end << PGSHIFT;
+       for (bank = uvm_physseg_get_first();
+            uvm_physseg_valid_p(bank);
+            bank = uvm_physseg_get_next(bank)) {
+               if (avail_end < uvm_physseg_get_avail_end(bank) << PGSHIFT)
+                       avail_end = uvm_physseg_get_avail_end(bank) << PGSHIFT;
       }

       if (avail_end > ISA_DMA_BOUNCE_THRESHOLD)
Index: arch/powerpc/oea/pmap.c
===================================================================
RCS file: /cvsroot/src/sys/arch/powerpc/oea/pmap.c,v
retrieving revision 1.93
diff -u -r1.93 pmap.c
--- arch/powerpc/oea/pmap.c     14 Feb 2016 18:07:49 -0000      1.93
+++ arch/powerpc/oea/pmap.c     22 Dec 2016 16:13:27 -0000
@@ -81,6 +81,7 @@
#include <sys/atomic.h>

#include <uvm/uvm.h>
+#include <uvm/uvm_physseg.h>

#include <machine/powerpc.h>
#include <powerpc/bat.h>
@@ -2909,9 +2910,9 @@
{
       vsize_t size;
       vaddr_t va;
-       paddr_t pa = 0;
-       int npgs, bank;
-       struct vm_physseg *ps;
+       paddr_t start, end, pa = 0;
+       int npgs, freelist;
+       uvm_physseg_t bank;

       if (uvm.page_init_done == true)
               panic("pmap_steal_memory: called _after_ bootstrap");
@@ -2926,11 +2927,18 @@
        * PA 0 will never be among those given to UVM so we can use it
        * to indicate we couldn't steal any memory.
        */
-       for (bank = 0; bank < vm_nphysseg; bank++) {
-               ps = VM_PHYSMEM_PTR(bank);
-               if (ps->free_list == VM_FREELIST_FIRST256 &&
-                   ps->avail_end - ps->avail_start >= npgs) {
-                       pa = ptoa(ps->avail_start);
+
+       for (bank = uvm_physseg_get_first();
+            uvm_physseg_valid_p(bank);
+            bank = uvm_physseg_get_next(bank)) {
+
+               freelist = uvm_physseg_get_free_list(bank);
+               start = uvm_physseg_get_start(bank);
+               end = uvm_physseg_get_end(bank);
+
+               if (freelist == VM_FREELIST_FIRST256 &&
+                   (end - start) >= npgs) {
+                       pa = ptoa(start);
                       break;
               }
       }
@@ -2938,25 +2946,7 @@
       if (pa == 0)
               panic("pmap_steal_memory: no approriate memory to steal!");

-       ps->avail_start += npgs;
-       ps->start += npgs;
-
-       /*
-        * If we've used up all the pages in the segment, remove it and
-        * compact the list.
-        */
-       if (ps->avail_start == ps->end) {
-               /*
-                * If this was the last one, then a very bad thing has occurred
-                */
-               if (--vm_nphysseg == 0)
-                       panic("pmap_steal_memory: out of memory!");
-
-               printf("pmap_steal_memory: consumed bank %d\n", bank);
-               for (; bank < vm_nphysseg; bank++, ps++) {
-                       ps[0] = ps[1];
-               }
-       }
+       uvm_physseg_unplug(start, npgs);

       va = (vaddr_t) pa;
       memset((void *) va, 0, size);
@@ -2964,9 +2954,10 @@
#ifdef DEBUG
       if (pmapdebug && npgs > 1) {
               u_int cnt = 0;
-               for (bank = 0; bank < vm_nphysseg; bank++) {
-                       ps = VM_PHYSMEM_PTR(bank);
-                       cnt += ps->avail_end - ps->avail_start;
+       for (bank = uvm_physseg_get_first();
+            uvm_physseg_valid_p(bank);
+            bank = uvm_physseg_get_next(bank)) {
+               cnt += uvm_physseg_get_avail_end(bank) - uvm_physseg_get_avail_start(bank);
               }
               printf("pmap_steal_memory: stole %u (total %u) pages (%u left)\n",
                   npgs, pmap_pages_stolen, cnt);
@@ -3446,15 +3437,18 @@
#ifdef DEBUG
       if (pmapdebug & PMAPDEBUG_BOOT) {
               u_int cnt;
-               int bank;
+               uvm_physseg_t bank;
               char pbuf[9];
-               for (cnt = 0, bank = 0; bank < vm_nphysseg; bank++) {
-                       cnt += VM_PHYSMEM_PTR(bank)->avail_end - VM_PHYSMEM_PTR(bank)->avail_start;
+               for (cnt = 0, bank = uvm_physseg_get_first();
+                    uvm_physseg_valid_p(bank);
+                    bank = uvm_physseg_get_next(bank)) {
+                       cnt += uvm_physseg_get_avail_end(bank) -
+                           uvm_physseg_get_avail_start(bank);
                       printf("pmap_bootstrap: vm_physmem[%d]=%#" _PRIxpa "-%#" _PRIxpa "/%#" _PRIxpa "\n",
                           bank,
-                           ptoa(VM_PHYSMEM_PTR(bank)->avail_start),
-                           ptoa(VM_PHYSMEM_PTR(bank)->avail_end),
-                           ptoa(VM_PHYSMEM_PTR(bank)->avail_end - VM_PHYSMEM_PTR(bank)->avail_start));
+                           ptoa(uvm_physseg_get_avail_start(bank)),
+                           ptoa(uvm_physseg_get_avail_end(bank)),
+                           ptoa(uvm_physseg_get_avail_end(bank) - uvm_physseg_get_avail_start(bank)));
               }
               format_bytes(pbuf, sizeof(pbuf), ptoa((u_int64_t) cnt));
               printf("pmap_bootstrap: UVM memory = %s (%u pages)\n",
Index: arch/powerpc/powerpc/bus_dma.c
===================================================================
RCS file: /cvsroot/src/sys/arch/powerpc/powerpc/bus_dma.c,v
retrieving revision 1.46
diff -u -r1.46 bus_dma.c
--- arch/powerpc/powerpc/bus_dma.c      1 Feb 2012 09:54:03 -0000       1.46
+++ arch/powerpc/powerpc/bus_dma.c      22 Dec 2016 16:13:27 -0000
@@ -45,6 +45,7 @@
#include <sys/intr.h>

#include <uvm/uvm.h>
+#include <uvm/uvm_physseg.h>

#ifdef PPC_BOOKE
#define        EIEIO   __asm volatile("mbar\t0")
@@ -544,13 +545,15 @@
_bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment, bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags)
{
       paddr_t start = 0xffffffff, end = 0;
-       int bank;
+       uvm_physseg_t bank;

-       for (bank = 0; bank < vm_nphysseg; bank++) {
-               if (start > ptoa(VM_PHYSMEM_PTR(bank)->avail_start))
-                       start = ptoa(VM_PHYSMEM_PTR(bank)->avail_start);
-               if (end < ptoa(VM_PHYSMEM_PTR(bank)->avail_end))
-                       end = ptoa(VM_PHYSMEM_PTR(bank)->avail_end);
+       for (bank = uvm_physseg_get_first();
+            uvm_physseg_valid_p(bank);
+            bank = uvm_physseg_get_next(bank)) {
+               if (start > ptoa(uvm_physseg_get_avail_start(bank)))
+                       start = ptoa(uvm_physseg_get_avail_start(bank));
+               if (end < ptoa(uvm_physseg_get_avail_end(bank)))
+                       end = ptoa(uvm_physseg_get_avail_end(bank));
       }

       return _bus_dmamem_alloc_range(t, size, alignment, boundary, segs,
Index: arch/sh3/sh3/pmap.c
===================================================================
RCS file: /cvsroot/src/sys/arch/sh3/sh3/pmap.c,v
retrieving revision 1.78
diff -u -r1.78 pmap.c
--- arch/sh3/sh3/pmap.c 3 Sep 2016 09:07:54 -0000       1.78
+++ arch/sh3/sh3/pmap.c 22 Dec 2016 16:13:27 -0000
@@ -39,6 +39,7 @@
#include <sys/socketvar.h>     /* XXX: for sock_loan_thresh */

#include <uvm/uvm.h>
+#include <uvm/uvm_physseg.h>

#include <sh3/mmu.h>
#include <sh3/cache.h>
@@ -107,8 +108,8 @@
       /* Steal msgbuf area */
       initmsgbuf((void *)uvm_pageboot_alloc(MSGBUFSIZE), MSGBUFSIZE);

-       avail_start = ptoa(VM_PHYSMEM_PTR(0)->start);
-       avail_end = ptoa(VM_PHYSMEM_PTR(vm_nphysseg - 1)->end);
+       avail_start = ptoa(uvm_physseg_get_start(uvm_physseg_get_first()));
+       avail_end = ptoa(uvm_physseg_get_end(uvm_physseg_get_last()));
       __pmap_kve = VM_MIN_KERNEL_ADDRESS;

       pmap_kernel()->pm_refcnt = 1;
@@ -126,39 +127,28 @@
vaddr_t
pmap_steal_memory(vsize_t size, vaddr_t *vstart, vaddr_t *vend)
{
-       struct vm_physseg *bank;
-       int i, j, npage;
+       int npage;
       paddr_t pa;
       vaddr_t va;
+       uvm_physseg_t bank;

       KDASSERT(!uvm.page_init_done);

       size = round_page(size);
       npage = atop(size);

-       bank = NULL;
-       for (i = 0; i < vm_nphysseg; i++) {
-               bank = VM_PHYSMEM_PTR(i);
-               if (npage <= bank->avail_end - bank->avail_start)
+       for (bank = uvm_physseg_get_first();
+            uvm_physseg_valid_p(bank);
+            bank = uvm_physseg_get_next(bank)) {
+               if (npage <= uvm_physseg_get_end(bank) - uvm_physseg_get_start(bank))
                       break;
       }
-       KDASSERT(i != vm_nphysseg);
-       KDASSERT(bank != NULL);

-       /* Steal pages */
-       pa = ptoa(bank->avail_start);
-       bank->avail_start += npage;
-       bank->start += npage;
-
-       /* GC memory bank */
-       if (bank->avail_start == bank->end) {
-               /* Remove this segment from the list. */
-               vm_nphysseg--;
-               KDASSERT(vm_nphysseg > 0);
-               for (j = i; i < vm_nphysseg; j++)
-                       VM_PHYSMEM_PTR_SWAP(j, j + 1);
-       }
+       KDASSERT(uvm_physseg_valid_p(bank));

+       /* Steal pages */
+       pa = ptoa(uvm_physseg_get_start(bank));
+       uvm_physseg_unplug(start, npage);
       va = SH3_PHYS_TO_P1SEG(pa);
       memset((void *)va, 0, size);

Index: arch/sh3/sh3/vm_machdep.c
===================================================================
RCS file: /cvsroot/src/sys/arch/sh3/sh3/vm_machdep.c,v
retrieving revision 1.76
diff -u -r1.76 vm_machdep.c
--- arch/sh3/sh3/vm_machdep.c   7 Nov 2013 21:45:04 -0000       1.76
+++ arch/sh3/sh3/vm_machdep.c   22 Dec 2016 16:13:27 -0000
@@ -102,6 +102,7 @@

#include <uvm/uvm_extern.h>
#include <uvm/uvm_page.h>
+#include <uvm/uvm_physseg.h>

#include <sh3/locore.h>
#include <sh3/cpu.h>
@@ -387,7 +388,7 @@
mm_md_physacc(paddr_t pa, vm_prot_t prot)
{

-       if (atop(pa) < vm_physmem[0].start || PHYS_TO_VM_PAGE(pa) != NULL) {
+       if (atop(pa) < uvm_physseg_get_start(uvm_physseg_get_first()) || PHYS_TO_VM_PAGE(pa) != NULL) {
               return 0;
       }
       return EFAULT;
Index: arch/vax/vax/ka650.c
===================================================================
RCS file: /cvsroot/src/sys/arch/vax/vax/ka650.c,v
retrieving revision 1.36
diff -u -r1.36 ka650.c
--- arch/vax/vax/ka650.c        14 Dec 2010 23:44:49 -0000      1.36
+++ arch/vax/vax/ka650.c        22 Dec 2016 16:13:31 -0000
@@ -106,7 +106,7 @@

       ka650setcache(CACHEON);
       if (ctob(physmem) > ka650merr_ptr->merr_qbmbr) {
-               printf("physmem(0x%x) > qbmbr(0x%x)\n",
+               printf("physmem(%"PRIxPSIZE") > qbmbr(0x%x)\n",
                   ctob(physmem), (int)ka650merr_ptr->merr_qbmbr);
               panic("qbus map unprotected");
       }
Index: arch/vax/vax/pmap.c
===================================================================
RCS file: /cvsroot/src/sys/arch/vax/vax/pmap.c,v
retrieving revision 1.183
diff -u -r1.183 pmap.c
--- arch/vax/vax/pmap.c 22 Dec 2016 14:48:00 -0000      1.183
+++ arch/vax/vax/pmap.c 22 Dec 2016 16:13:31 -0000
@@ -51,6 +51,7 @@
#include <sys/mutex.h>

#include <uvm/uvm.h>
+#include <uvm/uvm_physseg.h>

#ifdef PMAPDEBUG
#include <dev/cons.h>
@@ -475,7 +476,8 @@
{
       vaddr_t v;
       int npgs;
-
+       uvm_physseg_t bank;
+
       PMDEBUG(("pmap_steal_memory: size 0x%lx start %p end %p\n",
                   size, vstartp, vendp));

@@ -490,10 +492,10 @@
       /*
        * A vax only have one segment of memory.
        */
+       bank = uvm_physseg_get_first();

-       v = (VM_PHYSMEM_PTR(0)->avail_start << PGSHIFT) | KERNBASE;
-       VM_PHYSMEM_PTR(0)->avail_start += npgs;
-       VM_PHYSMEM_PTR(0)->start += npgs;
+       v = (uvm_physseg_get_start(bank) << PGSHIFT) | KERNBASE;
+       uvm_physseg_unplug(uvm_physseg_get_start(bank), npgs);
       memset((void *)v, 0, size);
       return v;
}
Index: arch/x68k/x68k/machdep.c
===================================================================
RCS file: /cvsroot/src/sys/arch/x68k/x68k/machdep.c,v
retrieving revision 1.194
diff -u -r1.194 machdep.c
--- arch/x68k/x68k/machdep.c    2 Dec 2016 12:43:07 -0000       1.194
+++ arch/x68k/x68k/machdep.c    22 Dec 2016 16:13:33 -0000
@@ -102,6 +102,7 @@

#define        MAXMEM  64*1024 /* XXX - from cmap.h */
#include <uvm/uvm.h>
+#include <uvm/uvm_physseg.h>

#include <machine/bus.h>
#include <machine/autoconf.h>
@@ -553,10 +554,7 @@
{
       cpu_kcore_hdr_t *h = &cpu_kcore_hdr;
       struct m68k_kcore_hdr *m = &h->un._m68k;
-       psize_t size;
-#ifdef EXTENDED_MEMORY
-       int i, seg;
-#endif
+       uvm_physseg_t i;

       memset(&cpu_kcore_hdr, 0, sizeof(cpu_kcore_hdr));

@@ -605,20 +603,25 @@
       /*
        * X68k has multiple RAM segments on some models.
        */
-       size = phys_basemem_seg.end - phys_basemem_seg.start;
-       m->ram_segs[0].start = phys_basemem_seg.start;
-       m->ram_segs[0].size  = size;
-#ifdef EXTENDED_MEMORY
-       seg = 1;
-       for (i = 0; i < EXTMEM_SEGS; i++) {
-               size = phys_extmem_seg[i].end - phys_extmem_seg[i].start;
-               if (size == 0)
-                       continue;
-               m->ram_segs[seg].start = phys_extmem_seg[i].start;
-               m->ram_segs[seg].size  = size;
-               seg++;
+       m->ram_segs[0].start = lowram;
+       m->ram_segs[0].size = mem_size - lowram;
+
+       i = uvm_physseg_get_first();
+
+        for (uvm_physseg_get_next(i); uvm_physseg_valid_p(i); i = uvm_physseg_get_next(i)) {
+               if (uvm_physseg_valid_p(i) == false)
+                       break;
+
+               const paddr_t startpfn = uvm_physseg_get_start(i);
+               const paddr_t endpfn = uvm_physseg_get_end(i);
+
+               KASSERT(startpfn != -1 && endpfn != -1);
+
+               m->ram_segs[i].start =
+                   ctob(startpfn);
+               m->ram_segs[i].size  =
+                   ctob(endpfn - startpfn);
       }
-#endif
}

/*
@@ -1249,11 +1252,14 @@
int
mm_md_physacc(paddr_t pa, vm_prot_t prot)
{
-       int i;
+       uvm_physseg_t i;
+
+       for (i = uvm_physseg_get_first(); uvm_physseg_valid_p(i); i = uvm_physseg_get_next(i)) {
+               if (uvm_physseg_valid_p(i) == false)
+                       break;

-       for (i = 0; i < vm_nphysseg; i++) {
-               if (ctob(vm_physmem[i].start) <= pa &&
-                   pa < ctob(vm_physmem[i].end))
+               if (ctob(uvm_physseg_get_start(i)) <= pa &&
+                   pa < ctob(uvm_physseg_get_end(i)))
                       return 0;
       }
       return EFAULT;
Index: uvm/files.uvm
===================================================================
RCS file: /cvsroot/src/sys/uvm/files.uvm,v
retrieving revision 1.27
diff -u -r1.27 files.uvm
--- uvm/files.uvm       1 Dec 2016 02:09:03 -0000       1.27
+++ uvm/files.uvm       22 Dec 2016 16:13:39 -0000
@@ -15,6 +15,7 @@
defflag                                PDPOLICY_CLOCKPRO
defparam                       USER_VA0_DISABLE_DEFAULT
defflag opt_uvm_page_trkown.h  UVM_PAGE_TRKOWN
+defflag opt_uvm_hotplug.h      UVM_HOTPLUG

define uvm
defflag        opt_uvm.h                       UVM
@@ -42,6 +43,7 @@
file   uvm/uvm_pdpolicy_clock.c        !pdpolicy_clockpro
file   uvm/uvm_pdpolicy_clockpro.c     pdpolicy_clockpro
file   uvm/uvm_pglist.c                uvm
+file   uvm/uvm_physseg.c               uvm
file   uvm/uvm_readahead.c             uvm
file   uvm/uvm_stat.c                  uvm
file   uvm/uvm_swap.c                  vmswap
Index: uvm/uvm_init.c
===================================================================
RCS file: /cvsroot/src/sys/uvm/uvm_init.c,v
retrieving revision 1.47
diff -u -r1.47 uvm_init.c
--- uvm/uvm_init.c      22 Dec 2016 12:55:21 -0000      1.47
+++ uvm/uvm_init.c      22 Dec 2016 16:13:39 -0000
@@ -46,6 +46,7 @@

#include <uvm/uvm.h>
#include <uvm/uvm_pdpolicy.h>
+#include <uvm/uvm_physseg.h>
#include <uvm/uvm_readahead.h>

/*
@@ -77,6 +78,7 @@
uvm_md_init(void)
{
       uvm_setpagesize(); /* initialize PAGE_SIZE-dependent variables */
+       uvm_physseg_init();
}

/*
Index: uvm/uvm_page.c
===================================================================
RCS file: /cvsroot/src/sys/uvm/uvm_page.c,v
retrieving revision 1.189
diff -u -r1.189 uvm_page.c
--- uvm/uvm_page.c      22 Dec 2016 16:05:15 -0000      1.189
+++ uvm/uvm_page.c      22 Dec 2016 16:13:39 -0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: uvm_page.c,v 1.189 2016/12/22 16:05:15 cherry Exp $    */
+/*     $NetBSD: uvm_page.c,v 1.188 2016/12/22 13:26:25 cherry Exp $    */

/*
 * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -66,7 +66,7 @@
 */

#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.189 2016/12/22 16:05:15 cherry Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.188 2016/12/22 13:26:25 cherry Exp $");

#include "opt_ddb.h"
#include "opt_uvm.h"
@@ -81,24 +81,13 @@
#include <sys/proc.h>
#include <sys/atomic.h>
#include <sys/cpu.h>
+#include <sys/extent.h>

#include <uvm/uvm.h>
#include <uvm/uvm_ddb.h>
#include <uvm/uvm_pdpolicy.h>

/*
- * global vars... XXXCDC: move to uvm. structure.
- */
-
-/*
- * physical memory config is stored in vm_physmem.
- */
-
-struct vm_physseg vm_physmem[VM_PHYSSEG_MAX];  /* XXXCDC: uvm.physmem */
-int vm_nphysseg = 0;                           /* XXXCDC: uvm.nphysseg */
-#define        vm_nphysmem     vm_nphysseg
-
-/*
 * Some supported CPUs in a given architecture don't support all
 * of the things necessary to do idle page zero'ing efficiently.
 * We therefore provide a way to enable it from machdep code here.
@@ -146,6 +135,18 @@
#endif /* DEBUG */

/*
+ * These functions are reserved for uvm(9) internal use and are not
+ * exported in the header file uvm_physseg.h
+ *
+ * Thus they are redefined here.
+ */
+void uvm_physseg_init_seg(uvm_physseg_t, struct vm_page *);
+void uvm_physseg_seg_chomp_slab(uvm_physseg_t, struct vm_page *, size_t);
+
+/* returns a pgs array */
+struct vm_page *uvm_physseg_seg_alloc_from_slab(uvm_physseg_t, size_t);
+
+/*
 * local prototypes
 */

@@ -337,11 +338,9 @@
       static struct uvm_cpu boot_cpu;
       psize_t freepages, pagecount, bucketcount, n;
       struct pgflbucket *bucketarray, *cpuarray;
-       struct vm_physseg *seg;
       struct vm_page *pagearray;
+       uvm_physseg_t bank;
       int lcv;
-       u_int i;
-       paddr_t paddr;

       KASSERT(ncpu <= 1);
       CTASSERT(sizeof(pagearray->offset) >= sizeof(struct uvm_cpu *));
@@ -369,7 +368,7 @@
        * now is to allocate vm_page structures for this memory.
        */

-       if (vm_nphysmem == 0)
+       if (uvm_physseg_get_last() == UVM_PHYSSEG_TYPE_INVALID)
               panic("uvm_page_bootstrap: no memory pre-allocated");

       /*
@@ -381,9 +380,11 @@
        */

       freepages = 0;
-       for (lcv = 0 ; lcv < vm_nphysmem ; lcv++) {
-               seg = VM_PHYSMEM_PTR(lcv);
-               freepages += (seg->end - seg->start);
+
+       for (bank = uvm_physseg_get_first();
+            uvm_physseg_valid_p(bank) ;
+            bank = uvm_physseg_get_next(bank)) {
+               freepages += (uvm_physseg_get_end(bank) - uvm_physseg_get_start(bank));
       }

       /*
@@ -428,31 +429,20 @@
       /*
        * init the vm_page structures and put them in the correct place.
        */
+       /* First init the extent */

-       for (lcv = 0 ; lcv < vm_nphysmem ; lcv++) {
-               seg = VM_PHYSMEM_PTR(lcv);
-               n = seg->end - seg->start;
+       for (bank = uvm_physseg_get_first(),
+                uvm_physseg_seg_chomp_slab(bank, pagearray, pagecount);
+            uvm_physseg_valid_p(bank);
+            bank = uvm_physseg_get_next(bank)) {
+
+               n = uvm_physseg_get_end(bank) - uvm_physseg_get_start(bank);
+               uvm_physseg_seg_alloc_from_slab(bank, n);
+               uvm_physseg_init_seg(bank, pagearray);

               /* set up page array pointers */
-               seg->pgs = pagearray;
               pagearray += n;
               pagecount -= n;
-               seg->lastpg = seg->pgs + n;
-
-               /* init and free vm_pages (we've already zeroed them) */
-               paddr = ctob(seg->start);
-               for (i = 0 ; i < n ; i++, paddr += PAGE_SIZE) {
-                       seg->pgs[i].phys_addr = paddr;
-#ifdef __HAVE_VM_PAGE_MD
-                       VM_MDPAGE_INIT(&seg->pgs[i]);
-#endif
-                       if (atop(paddr) >= seg->avail_start &&
-                           atop(paddr) < seg->avail_end) {
-                               uvmexp.npages++;
-                               /* add page to free pool */
-                               uvm_pagefree(&seg->pgs[i]);
-                       }
-               }
       }

       /*
@@ -625,92 +615,42 @@
static bool
uvm_page_physget_freelist(paddr_t *paddrp, int freelist)
{
-       struct vm_physseg *seg;
-       int lcv, x;
+       uvm_physseg_t lcv;

       /* pass 1: try allocating from a matching end */
#if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
-       for (lcv = vm_nphysmem - 1 ; lcv >= 0 ; lcv--)
+       for (lcv = uvm_physseg_get_last() ; uvm_physseg_valid_p(lcv) ; lcv = uvm_physseg_get_prev(lcv))
#else
-       for (lcv = 0 ; lcv < vm_nphysmem ; lcv++)
+               for (lcv = uvm_physseg_get_first() ; uvm_physseg_valid_p(lcv) ; lcv = uvm_physseg_get_next(lcv))
#endif
       {
-               seg = VM_PHYSMEM_PTR(lcv);
-
               if (uvm.page_init_done == true)
                       panic("uvm_page_physget: called _after_ bootstrap");

-               if (seg->free_list != freelist)
-                       continue;
+               /* Try to match at front or back on unused segment */
+               if (uvm_page_physunload(lcv, freelist, paddrp) == false) {
+                       if (paddrp == NULL) /* freelist fail, try next */
+                               continue;
+               } else
+                       return true;

-               /* try from front */
-               if (seg->avail_start == seg->start &&
-                   seg->avail_start < seg->avail_end) {
-                       *paddrp = ctob(seg->avail_start);
-                       seg->avail_start++;
-                       seg->start++;
-                       /* nothing left?   nuke it */
-                       if (seg->avail_start == seg->end) {
-                               if (vm_nphysmem == 1)
-                                   panic("uvm_page_physget: out of memory!");
-                               vm_nphysmem--;
-                               for (x = lcv ; x < vm_nphysmem ; x++)
-                                       /* structure copy */
-                                       VM_PHYSMEM_PTR_SWAP(x, x + 1);
-                       }
-                       return (true);
-               }
-
-               /* try from rear */
-               if (seg->avail_end == seg->end &&
-                   seg->avail_start < seg->avail_end) {
-                       *paddrp = ctob(seg->avail_end - 1);
-                       seg->avail_end--;
-                       seg->end--;
-                       /* nothing left?   nuke it */
-                       if (seg->avail_end == seg->start) {
-                               if (vm_nphysmem == 1)
-                                   panic("uvm_page_physget: out of memory!");
-                               vm_nphysmem--;
-                               for (x = lcv ; x < vm_nphysmem ; x++)
-                                       /* structure copy */
-                                       VM_PHYSMEM_PTR_SWAP(x, x + 1);
-                       }
-                       return (true);
-               }
-       }

       /* pass2: forget about matching ends, just allocate something */
#if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
-       for (lcv = vm_nphysmem - 1 ; lcv >= 0 ; lcv--)
+               for (lcv = uvm_physseg_get_last() ; uvm_physseg_valid_p(lcv); lcv = uvm_physseg_get_prev(lcv))
#else
-       for (lcv = 0 ; lcv < vm_nphysmem ; lcv++)
+               for (lcv = uvm_physseg_get_first() ; uvm_physseg_valid_p(lcv) ; lcv = uvm_physseg_get_next(lcv))
#endif
       {
-               seg = VM_PHYSMEM_PTR(lcv);
-
-               /* any room in this bank? */
-               if (seg->avail_start >= seg->avail_end)
-                       continue;  /* nope */
-
-               *paddrp = ctob(seg->avail_start);
-               seg->avail_start++;
-               /* truncate! */
-               seg->start = seg->avail_start;
-
-               /* nothing left?   nuke it */
-               if (seg->avail_start == seg->end) {
-                       if (vm_nphysmem == 1)
-                               panic("uvm_page_physget: out of memory!");
-                       vm_nphysmem--;
-                       for (x = lcv ; x < vm_nphysmem ; x++)
-                               /* structure copy */
-                               VM_PHYSMEM_PTR_SWAP(x, x + 1);
-               }
-               return (true);
+               /* Try the front regardless. */
+               if (uvm_page_physunload_force(lcv, freelist, paddrp) == false) {
+                       if (paddrp == NULL) /* freelist fail, try next */
+                               continue;
+               } else
+                       return true;
       }
-
-       return (false);        /* whoops! */
+       }
+       return false;
}

bool
@@ -727,230 +667,6 @@
#endif /* PMAP_STEAL_MEMORY */

/*
- * uvm_page_physload: load physical memory into VM system
- *
- * => all args are PFs
- * => all pages in start/end get vm_page structures
- * => areas marked by avail_start/avail_end get added to the free page pool
- * => we are limited to VM_PHYSSEG_MAX physical memory segments
- */
-
-uvm_physseg_t
-uvm_page_physload(paddr_t start, paddr_t end, paddr_t avail_start,
-    paddr_t avail_end, int free_list)
-{
-       int preload, lcv;
-       psize_t npages;
-       struct vm_page *pgs;
-       struct vm_physseg *ps;
-
-       if (uvmexp.pagesize == 0)
-               panic("uvm_page_physload: page size not set!");
-       if (free_list >= VM_NFREELIST || free_list < VM_FREELIST_DEFAULT)
-               panic("uvm_page_physload: bad free list %d", free_list);
-       if (start >= end)
-               panic("uvm_page_physload: start >= end");
-
-       /*
-        * do we have room?
-        */
-
-       if (vm_nphysmem == VM_PHYSSEG_MAX) {
-               printf("uvm_page_physload: unable to load physical memory "
-                   "segment\n");
-               printf("\t%d segments allocated, ignoring 0x%llx -> 0x%llx\n",
-                   VM_PHYSSEG_MAX, (long long)start, (long long)end);
-               printf("\tincrease VM_PHYSSEG_MAX\n");
-               return 0;
-       }
-
-       /*
-        * check to see if this is a "preload" (i.e. uvm_page_init hasn't been
-        * called yet, so kmem is not available).
-        */
-
-       for (lcv = 0 ; lcv < vm_nphysmem ; lcv++) {
-               if (VM_PHYSMEM_PTR(lcv)->pgs)
-                       break;
-       }
-       preload = (lcv == vm_nphysmem);
-
-       /*
-        * if VM is already running, attempt to kmem_alloc vm_page structures
-        */
-
-       if (!preload) {
-               panic("uvm_page_physload: tried to add RAM after vm_mem_init");
-       } else {
-               pgs = NULL;
-               npages = 0;
-       }
-
-       /*
-        * now insert us in the proper place in vm_physmem[]
-        */
-
-#if (VM_PHYSSEG_STRAT == VM_PSTRAT_RANDOM)
-       /* random: put it at the end (easy!) */
-       ps = VM_PHYSMEM_PTR(vm_nphysmem);
-#elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
-       {
-               int x;
-               /* sort by address for binary search */
-               for (lcv = 0 ; lcv < vm_nphysmem ; lcv++)
-                       if (start < VM_PHYSMEM_PTR(lcv)->start)
-                               break;
-               ps = VM_PHYSMEM_PTR(lcv);
-               /* move back other entries, if necessary ... */
-               for (x = vm_nphysmem ; x > lcv ; x--)
-                       /* structure copy */
-                       VM_PHYSMEM_PTR_SWAP(x, x - 1);
-       }
-#elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
-       {
-               int x;
-               /* sort by largest segment first */
-               for (lcv = 0 ; lcv < vm_nphysmem ; lcv++)
-                       if ((end - start) >
-                           (VM_PHYSMEM_PTR(lcv)->end - VM_PHYSMEM_PTR(lcv)->start))
-                               break;
-               ps = VM_PHYSMEM_PTR(lcv);
-               /* move back other entries, if necessary ... */
-               for (x = vm_nphysmem ; x > lcv ; x--)
-                       /* structure copy */
-                       VM_PHYSMEM_PTR_SWAP(x, x - 1);
-       }
-#else
-       panic("uvm_page_physload: unknown physseg strategy selected!");
-#endif
-
-       ps->start = start;
-       ps->end = end;
-       ps->avail_start = avail_start;
-       ps->avail_end = avail_end;
-       if (preload) {
-               ps->pgs = NULL;
-       } else {
-               ps->pgs = pgs;
-               ps->lastpg = pgs + npages;
-       }
-       ps->free_list = free_list;
-       vm_nphysmem++;
-
-       if (!preload) {
-               uvmpdpol_reinit();
-       }
-
-       return 0;
-}
-
-/*
- * when VM_PHYSSEG_MAX is 1, we can simplify these functions
- */
-
-#if VM_PHYSSEG_MAX == 1
-static inline int vm_physseg_find_contig(struct vm_physseg *, int, paddr_t, int *);
-#elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
-static inline int vm_physseg_find_bsearch(struct vm_physseg *, int, paddr_t, int *);
-#else
-static inline int vm_physseg_find_linear(struct vm_physseg *, int, paddr_t, int *);
-#endif
-
-/*
- * vm_physseg_find: find vm_physseg structure that belongs to a PA
- */
-int
-vm_physseg_find(paddr_t pframe, int *offp)
-{
-
-#if VM_PHYSSEG_MAX == 1
-       return vm_physseg_find_contig(vm_physmem, vm_nphysseg, pframe, offp);
-#elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
-       return vm_physseg_find_bsearch(vm_physmem, vm_nphysseg, pframe, offp);
-#else
-       return vm_physseg_find_linear(vm_physmem, vm_nphysseg, pframe, offp);
-#endif
-}
-
-#if VM_PHYSSEG_MAX == 1
-static inline int
-vm_physseg_find_contig(struct vm_physseg *segs, int nsegs, paddr_t pframe, int *offp)
-{
-
-       /* 'contig' case */
-       if (pframe >= segs[0].start && pframe < segs[0].end) {
-               if (offp)
-                       *offp = pframe - segs[0].start;
-               return(0);
-       }
-       return(-1);
-}
-
-#elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
-
-static inline int
-vm_physseg_find_bsearch(struct vm_physseg *segs, int nsegs, paddr_t pframe, int *offp)
-{
-       /* binary search for it */
-       u_int   start, len, guess;
-
-       /*
-        * if try is too large (thus target is less than try) we reduce
-        * the length to trunc(len/2) [i.e. everything smaller than "try"]
-        *
-        * if the try is too small (thus target is greater than try) then
-        * we set the new start to be (try + 1).   this means we need to
-        * reduce the length to (round(len/2) - 1).
-        *
-        * note "adjust" below which takes advantage of the fact that
-        *  (round(len/2) - 1) == trunc((len - 1) / 2)
-        * for any value of len we may have
-        */
-
-       for (start = 0, len = nsegs ; len != 0 ; len = len / 2) {
-               guess = start + (len / 2);      /* try in the middle */
-
-               /* start past our try? */
-               if (pframe >= segs[guess].start) {
-                       /* was try correct? */
-                       if (pframe < segs[guess].end) {
-                               if (offp)
-                                       *offp = pframe - segs[guess].start;
-                               return guess;            /* got it */
-                       }
-                       start = guess + 1;      /* next time, start here */
-                       len--;                  /* "adjust" */
-               } else {
-                       /*
-                        * pframe before try, just reduce length of
-                        * region, done in "for" loop
-                        */
-               }
-       }
-       return(-1);
-}
-
-#else
-
-static inline int
-vm_physseg_find_linear(struct vm_physseg *segs, int nsegs, paddr_t pframe, int *offp)
-{
-       /* linear search for it */
-       int     lcv;
-
-       for (lcv = 0; lcv < nsegs; lcv++) {
-               if (pframe >= segs[lcv].start &&
-                   pframe < segs[lcv].end) {
-                       if (offp)
-                               *offp = pframe - segs[lcv].start;
-                       return(lcv);               /* got it */
-               }
-       }
-       return(-1);
-}
-#endif
-
-/*
 * PHYS_TO_VM_PAGE: find vm_page for a PA.   used by MI code to get vm_pages
 * back from an I/O mapping (ugh!).   used in some MD code as well.
 */
@@ -958,12 +674,12 @@
uvm_phys_to_vm_page(paddr_t pa)
{
       paddr_t pf = atop(pa);
-       int     off;
-       int     psi;
+       paddr_t off;
+       uvm_physseg_t   upm;

-       psi = vm_physseg_find(pf, &off);
-       if (psi != -1)
-               return(&VM_PHYSMEM_PTR(psi)->pgs[off]);
+       upm = uvm_physseg_find(pf, &off);
+       if (upm != UVM_PHYSSEG_TYPE_INVALID)
+               return uvm_physseg_get_pg(upm, off);
       return(NULL);
}

@@ -987,7 +703,8 @@
       struct vm_page *pg;
       vsize_t bucketcount;
       size_t bucketmemsize, oldbucketmemsize;
-       int lcv, color, i, ocolors;
+       int color, i, ocolors;
+       int lcv;
       struct uvm_cpu *ucpu;

       KASSERT(((newncolors - 1) & newncolors) == 0);
@@ -1221,7 +938,8 @@
uvm_pagealloc_strat(struct uvm_object *obj, voff_t off, struct vm_anon *anon,
    int flags, int strat, int free_list)
{
-       int lcv, try1, try2, zeroit = 0, color;
+       int try1, try2, zeroit = 0, color;
+       int lcv;
       struct uvm_cpu *ucpu;
       struct vm_page *pg;
       lwp_t *l;
@@ -2007,7 +1725,7 @@
uvm_pageismanaged(paddr_t pa)
{

-       return (vm_physseg_find(atop(pa), NULL) != -1);
+       return (uvm_physseg_find(atop(pa), NULL) != UVM_PHYSSEG_TYPE_INVALID);
}

/*
@@ -2017,11 +1735,11 @@
int
uvm_page_lookup_freelist(struct vm_page *pg)
{
-       int lcv;
+       uvm_physseg_t upm;

-       lcv = vm_physseg_find(atop(VM_PAGE_TO_PHYS(pg)), NULL);
-       KASSERT(lcv != -1);
-       return (VM_PHYSMEM_PTR(lcv)->free_list);
+       upm = uvm_physseg_find(atop(VM_PAGE_TO_PHYS(pg)), NULL);
+       KASSERT(upm != UVM_PHYSSEG_TYPE_INVALID);
+       return uvm_physseg_get_free_list(upm);
}

/*
@@ -2137,7 +1855,8 @@
void
uvm_page_printall(void (*pr)(const char *, ...))
{
-       unsigned i;
+       uvm_physseg_t i;
+       paddr_t pfn;
       struct vm_page *pg;

       (*pr)("%18s %4s %4s %18s %18s"
@@ -2145,8 +1864,14 @@
           " OWNER"
#endif
           "\n", "PAGE", "FLAG", "PQ", "UOBJECT", "UANON");
-       for (i = 0; i < vm_nphysmem; i++) {
-               for (pg = VM_PHYSMEM_PTR(i)->pgs; pg < VM_PHYSMEM_PTR(i)->lastpg; pg++) {
+       for (i = uvm_physseg_get_first();
+            uvm_physseg_valid_p(i);
+            i = uvm_physseg_get_next(i)) {
+               for (pfn = uvm_physseg_get_start(i);
+                    pfn <= uvm_physseg_get_end(i);
+                    pfn++) {
+                       pg = PHYS_TO_VM_PAGE(ptoa(pfn));
+
                       (*pr)("%18p %04x %04x %18p %18p",
                           pg, pg->flags, pg->pqflags, pg->uobject,
                           pg->uanon);
Index: uvm/uvm_page.h
===================================================================
RCS file: /cvsroot/src/sys/uvm/uvm_page.h,v
retrieving revision 1.80
diff -u -r1.80 uvm_page.h
--- uvm/uvm_page.h      23 Mar 2015 07:59:12 -0000      1.80
+++ uvm/uvm_page.h      22 Dec 2016 16:13:39 -0000
@@ -294,24 +294,6 @@
#define VM_PSTRAT_BSEARCH      2
#define VM_PSTRAT_BIGFIRST     3

-/*
- * vm_physseg: describes one segment of physical memory
- */
-struct vm_physseg {
-       paddr_t start;                  /* PF# of first page in segment */
-       paddr_t end;                    /* (PF# of last page in segment) + 1 */
-       paddr_t avail_start;            /* PF# of first free page in segment */
-       paddr_t avail_end;              /* (PF# of last free page in segment) +1  */
-       struct  vm_page *pgs;           /* vm_page structures (from start) */
-       struct  vm_page *lastpg;        /* vm_page structure for end */
-       int     free_list;              /* which free list they belong on */
-       u_int   start_hint;             /* start looking for free pages here */
-                                       /* protected by uvm_fpageqlock */
-#ifdef __HAVE_PMAP_PHYSSEG
-       struct  pmap_physseg pmseg;     /* pmap specific (MD) data */
-#endif
-};
-
#ifdef _KERNEL

/*
@@ -321,21 +303,6 @@
extern bool vm_page_zero_enable;

/*
- * physical memory config is stored in vm_physmem.
- */
-
-#define        VM_PHYSMEM_PTR(i)       (&vm_physmem[i])
-#if VM_PHYSSEG_MAX == 1
-#define VM_PHYSMEM_PTR_SWAP(i, j) /* impossible */
-#else
-#define VM_PHYSMEM_PTR_SWAP(i, j) \
-       do { vm_physmem[(i)] = vm_physmem[(j)]; } while (0)
-#endif
-
-extern struct vm_physseg vm_physmem[VM_PHYSSEG_MAX];
-extern int vm_nphysseg;
-
-/*
 * prototypes: the following prototypes define the interface to pages
 */

@@ -366,10 +333,13 @@

int uvm_page_lookup_freelist(struct vm_page *);

-int vm_physseg_find(paddr_t, int *);
struct vm_page *uvm_phys_to_vm_page(paddr_t);
paddr_t uvm_vm_page_to_phys(const struct vm_page *);

+#if !defined(PMAP_STEAL_MEMORY)
+bool uvm_page_physget(paddr_t *);
+#endif
+
/*
 * macros
 */
Index: uvm/uvm_pglist.c
===================================================================
RCS file: /cvsroot/src/sys/uvm/uvm_pglist.c,v
retrieving revision 1.67
diff -u -r1.67 uvm_pglist.c
--- uvm/uvm_pglist.c    26 Oct 2014 01:42:07 -0000      1.67
+++ uvm/uvm_pglist.c    22 Dec 2016 16:13:39 -0000
@@ -116,16 +116,15 @@
}

static int
-uvm_pglistalloc_c_ps(struct vm_physseg *ps, int num, paddr_t low, paddr_t high,
+uvm_pglistalloc_c_ps(uvm_physseg_t psi, int num, paddr_t low, paddr_t high,
    paddr_t alignment, paddr_t boundary, struct pglist *rlist)
{
       signed int candidate, limit, candidateidx, end, idx, skip;
-       struct vm_page *pgs;
       int pagemask;
       bool second_pass;
#ifdef DEBUG
       paddr_t idxpa, lastidxpa;
-       int cidx = 0;   /* XXX: GCC */
+       paddr_t cidx = 0;       /* XXX: GCC */
#endif
#ifdef PGALLOC_VERBOSE
       printf("pgalloc: contig %d pgs from psi %zd\n", num, ps - vm_physmem);
@@ -140,26 +139,26 @@
       /*
        * Make sure that physseg falls within with range to be allocated from.
        */
-       if (high <= ps->avail_start || low >= ps->avail_end)
+       if (high <= uvm_physseg_get_avail_start(psi) || low >= uvm_physseg_get_avail_end(psi))
               return 0;

       /*
        * We start our search at the just after where the last allocation
        * succeeded.
        */
-       candidate = roundup2(max(low, ps->avail_start + ps->start_hint), alignment);
-       limit = min(high, ps->avail_end);
+       candidate = roundup2(max(low, uvm_physseg_get_avail_start(psi) +
+               uvm_physseg_get_start_hint(psi)), alignment);
+       limit = min(high, uvm_physseg_get_avail_end(psi));
       pagemask = ~((boundary >> PAGE_SHIFT) - 1);
       skip = 0;
       second_pass = false;
-       pgs = ps->pgs;

       for (;;) {
               bool ok = true;
               signed int cnt;

               if (candidate + num > limit) {
-                       if (ps->start_hint == 0 || second_pass) {
+                       if (uvm_physseg_get_start_hint(psi) == 0 || second_pass) {
                               /*
                                * We've run past the allowable range.
                                */
@@ -171,8 +170,9 @@
                        * is were we started.
                        */
                       second_pass = true;
-                       candidate = roundup2(max(low, ps->avail_start), alignment);
-                       limit = min(limit, ps->avail_start + ps->start_hint);
+                       candidate = roundup2(max(low, uvm_physseg_get_avail_start(psi)), alignment);
+                       limit = min(limit, uvm_physseg_get_avail_start(psi) +
+                           uvm_physseg_get_start_hint(psi));
                       skip = 0;
                       continue;
               }
@@ -192,16 +192,16 @@
                * Make sure this is a managed physical page.
                */

-               if (vm_physseg_find(candidate, &cidx) != ps - vm_physmem)
+               if (uvm_physseg_find(candidate, &cidx) != psi)
                       panic("pgalloc contig: botch1");
-               if (cidx != candidate - ps->start)
+               if (cidx != candidate - uvm_physseg_get_start(psi))
                       panic("pgalloc contig: botch2");
-               if (vm_physseg_find(candidate + num - 1, &cidx) != ps - vm_physmem)
+               if (uvm_physseg_find(candidate + num - 1, &cidx) != psi)
                       panic("pgalloc contig: botch3");
-               if (cidx != candidate - ps->start + num - 1)
+               if (cidx != candidate - uvm_physseg_get_start(psi) + num - 1)
                       panic("pgalloc contig: botch4");
#endif
-               candidateidx = candidate - ps->start;
+               candidateidx = candidate - uvm_physseg_get_start(psi);
               end = candidateidx + num;

               /*
@@ -220,15 +220,15 @@
                * testing most of those pages again in the next pass.
                */
               for (idx = end - 1; idx >= candidateidx + skip; idx--) {
-                       if (VM_PAGE_IS_FREE(&pgs[idx]) == 0) {
+                       if (VM_PAGE_IS_FREE(uvm_physseg_get_pg(psi, idx)) == 0) {
                               ok = false;
                               break;
                       }

#ifdef DEBUG
                       if (idx > candidateidx) {
-                               idxpa = VM_PAGE_TO_PHYS(&pgs[idx]);
-                               lastidxpa = VM_PAGE_TO_PHYS(&pgs[idx - 1]);
+                               idxpa = VM_PAGE_TO_PHYS(uvm_physseg_get_pg(psi, idx));
+                               lastidxpa = VM_PAGE_TO_PHYS(uvm_physseg_get_pg(psi, idx - 1));
                               if ((lastidxpa + PAGE_SIZE) != idxpa) {
                                       /*
                                        * Region not contiguous.
@@ -249,7 +249,7 @@

               if (ok) {
                       while (skip-- > 0) {
-                               KDASSERT(VM_PAGE_IS_FREE(&pgs[candidateidx + skip]));
+                               KDASSERT(VM_PAGE_IS_FREE(uvm_physseg_get_pg(psi, candidateidx + skip)));
                       }
#ifdef PGALLOC_VERBOSE
                       printf(": ok\n");
@@ -280,19 +280,22 @@
       /*
        * we have a chunk of memory that conforms to the requested constraints.
        */
-       for (idx = candidateidx, pgs += idx; idx < end; idx++, pgs++)
-               uvm_pglist_add(pgs, rlist);
+       for (idx = candidateidx; idx < end; idx++)
+               uvm_pglist_add(uvm_physseg_get_pg(psi, idx), rlist);

       /*
        * the next time we need to search this segment, start after this
        * chunk of pages we just allocated.
        */
-       ps->start_hint = candidate + num - ps->avail_start;
-       KASSERTMSG(ps->start_hint <= ps->avail_end - ps->avail_start,
+       uvm_physseg_set_start_hint(psi, candidate + num -
+           uvm_physseg_get_avail_start(psi));
+       KASSERTMSG(uvm_physseg_get_start_hint(psi) <=
+           uvm_physseg_get_avail_end(psi) - uvm_physseg_get_avail_start(psi),
           "%x %u (%#x) <= %#"PRIxPADDR" - %#"PRIxPADDR" (%#"PRIxPADDR")",
           candidate + num,
-           ps->start_hint, ps->start_hint, ps->avail_end, ps->avail_start,
-           ps->avail_end - ps->avail_start);
+           uvm_physseg_get_start_hint(psi), uvm_physseg_get_start_hint(psi),
+           uvm_physseg_get_avail_end(psi), uvm_physseg_get_avail_start(psi),
+           uvm_physseg_get_avail_end(psi) - uvm_physseg_get_avail_start(psi));

#ifdef PGALLOC_VERBOSE
       printf("got %d pgs\n", num);
@@ -304,10 +307,10 @@
uvm_pglistalloc_contig(int num, paddr_t low, paddr_t high, paddr_t alignment,
    paddr_t boundary, struct pglist *rlist)
{
-       int fl, psi;
-       struct vm_physseg *ps;
+       int fl;
       int error;

+       uvm_physseg_t psi;
       /* Default to "lose". */
       error = ENOMEM;

@@ -322,17 +325,16 @@

       for (fl = 0; fl < VM_NFREELIST; fl++) {
#if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
-               for (psi = vm_nphysseg - 1 ; psi >= 0 ; psi--)
+               for (psi = uvm_physseg_get_last(); uvm_physseg_valid_p(psi); psi = uvm_physseg_get_prev(psi))
+
#else
-               for (psi = 0 ; psi < vm_nphysseg ; psi++)
+               for (psi = uvm_physseg_get_first(); uvm_physseg_valid_p(psi); psi = uvm_physseg_get_next(psi))
#endif
               {
-                       ps = &vm_physmem[psi];
-
-                       if (ps->free_list != fl)
+                       if (uvm_physseg_get_free_list(psi) != fl)
                               continue;

-                       num -= uvm_pglistalloc_c_ps(ps, num, low, high,
+                       num -= uvm_pglistalloc_c_ps(psi, num, low, high,
                                                   alignment, boundary, rlist);
                       if (num == 0) {
#ifdef PGALLOC_VERBOSE
@@ -358,59 +360,62 @@
}

static int
-uvm_pglistalloc_s_ps(struct vm_physseg *ps, int num, paddr_t low, paddr_t high,
+uvm_pglistalloc_s_ps(uvm_physseg_t psi, int num, paddr_t low, paddr_t high,
    struct pglist *rlist)
{
       int todo, limit, candidate;
       struct vm_page *pg;
       bool second_pass;
#ifdef PGALLOC_VERBOSE
-       printf("pgalloc: simple %d pgs from psi %zd\n", num, ps - vm_physmem);
+       printf("pgalloc: simple %d pgs from psi %zd\n", num, psi);
#endif

       KASSERT(mutex_owned(&uvm_fpageqlock));
-       KASSERT(ps->start <= ps->avail_start);
-       KASSERT(ps->start <= ps->avail_end);
-       KASSERT(ps->avail_start <= ps->end);
-       KASSERT(ps->avail_end <= ps->end);
+       KASSERT(uvm_physseg_get_start(psi) <= uvm_physseg_get_avail_start(psi));
+       KASSERT(uvm_physseg_get_start(psi) <= uvm_physseg_get_avail_end(psi));
+       KASSERT(uvm_physseg_get_avail_start(psi) <= uvm_physseg_get_end(psi));
+       KASSERT(uvm_physseg_get_avail_end(psi) <= uvm_physseg_get_end(psi));

       low = atop(low);
       high = atop(high);
       todo = num;
-       candidate = max(low, ps->avail_start + ps->start_hint);
-       limit = min(high, ps->avail_end);
-       pg = &ps->pgs[candidate - ps->start];
+       candidate = max(low, uvm_physseg_get_avail_start(psi) +
+           uvm_physseg_get_start_hint(psi));
+       limit = min(high, uvm_physseg_get_avail_end(psi));
+       pg = uvm_physseg_get_pg(psi, candidate - uvm_physseg_get_start(psi));
       second_pass = false;

       /*
        * Make sure that physseg falls within with range to be allocated from.
        */
-       if (high <= ps->avail_start || low >= ps->avail_end)
+       if (high <= uvm_physseg_get_avail_start(psi) ||
+           low >= uvm_physseg_get_avail_end(psi))
               return 0;

again:
       for (;; candidate++, pg++) {
               if (candidate >= limit) {
-                       if (ps->start_hint == 0 || second_pass) {
+                       if (uvm_physseg_get_start_hint(psi) == 0 || second_pass) {
                               candidate = limit - 1;
                               break;
                       }
                       second_pass = true;
-                       candidate = max(low, ps->avail_start);
-                       limit = min(limit, ps->avail_start + ps->start_hint);
-                       pg = &ps->pgs[candidate - ps->start];
+                       candidate = max(low, uvm_physseg_get_avail_start(psi));
+                       limit = min(limit, uvm_physseg_get_avail_start(psi) +
+                           uvm_physseg_get_start_hint(psi));
+                       pg = uvm_physseg_get_pg(psi, candidate - uvm_physseg_get_start(psi));
                       goto again;
               }
#if defined(DEBUG)
               {
-                       int cidx = 0;
-                       const int bank = vm_physseg_find(candidate, &cidx);
-                       KDASSERTMSG(bank == ps - vm_physmem,
-                           "vm_physseg_find(%#x) (%d) != ps %zd",
-                            candidate, bank, ps - vm_physmem);
-                       KDASSERTMSG(cidx == candidate - ps->start,
-                           "vm_physseg_find(%#x): %#x != off %"PRIxPADDR,
-                            candidate, cidx, candidate - ps->start);
+                       paddr_t cidx = 0;
+                       const uvm_physseg_t bank = uvm_physseg_find(candidate, &cidx);
+                       KDASSERTMSG(bank == psi,
+                           "uvm_physseg_find(%#x) (%"PRIxPHYSMEM ") != psi %"PRIxPHYSMEM,
+                            candidate, bank, psi);
+                       KDASSERTMSG(cidx == candidate - uvm_physseg_get_start(psi),
+                           "uvm_physseg_find(%#x): %#"PRIxPADDR" != off %"PRIxPADDR,
+                            candidate, cidx, candidate - uvm_physseg_get_start(psi));
               }
#endif
               if (VM_PAGE_IS_FREE(pg) == 0)
@@ -426,12 +431,16 @@
        * The next time we need to search this segment,
        * start just after the pages we just allocated.
        */
-       ps->start_hint = candidate + 1 - ps->avail_start;
-       KASSERTMSG(ps->start_hint <= ps->avail_end - ps->avail_start,
+       uvm_physseg_set_start_hint(psi, candidate + 1 - uvm_physseg_get_avail_start(psi));
+       KASSERTMSG(uvm_physseg_get_start_hint(psi) <= uvm_physseg_get_avail_end(psi) -
+           uvm_physseg_get_avail_start(psi),
           "%#x %u (%#x) <= %#"PRIxPADDR" - %#"PRIxPADDR" (%#"PRIxPADDR")",
           candidate + 1,
-           ps->start_hint, ps->start_hint, ps->avail_end, ps->avail_start,
-           ps->avail_end - ps->avail_start);
+           uvm_physseg_get_start_hint(psi),
+           uvm_physseg_get_start_hint(psi),
+           uvm_physseg_get_avail_end(psi),
+           uvm_physseg_get_avail_start(psi),
+           uvm_physseg_get_avail_end(psi) - uvm_physseg_get_avail_start(psi));

#ifdef PGALLOC_VERBOSE
       printf("got %d pgs\n", num - todo);
@@ -443,9 +452,10 @@
uvm_pglistalloc_simple(int num, paddr_t low, paddr_t high,
    struct pglist *rlist, int waitok)
{
-       int fl, psi, error;
-       struct vm_physseg *ps;
+       int fl, error;

+       uvm_physseg_t psi;
+
       /* Default to "lose". */
       error = ENOMEM;

@@ -461,17 +471,16 @@

       for (fl = 0; fl < VM_NFREELIST; fl++) {
#if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
-               for (psi = vm_nphysseg - 1 ; psi >= 0 ; psi--)
+               for (psi = uvm_physseg_get_last(); uvm_physseg_valid_p(psi); psi = uvm_physseg_get_prev(psi))
+
#else
-               for (psi = 0 ; psi < vm_nphysseg ; psi++)
+               for (psi = uvm_physseg_get_first(); uvm_physseg_valid_p(psi); psi = uvm_physseg_get_next(psi))
#endif
               {
-                       ps = &vm_physmem[psi];
-
-                       if (ps->free_list != fl)
+                       if (uvm_physseg_get_free_list(psi) != fl)
                               continue;

-                       num -= uvm_pglistalloc_s_ps(ps, num, low, high, rlist);
+                       num -= uvm_pglistalloc_s_ps(psi, num, low, high, rlist);
                       if (num == 0) {
                               error = 0;
                               goto out;
Index: uvm/uvm_physseg.h
===================================================================
RCS file: /cvsroot/src/sys/uvm/uvm_physseg.h,v
retrieving revision 1.3
diff -u -r1.3 uvm_physseg.h
--- uvm/uvm_physseg.h   22 Dec 2016 15:54:35 -0000      1.3
+++ uvm/uvm_physseg.h   22 Dec 2016 16:13:39 -0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_physseg.h,v 1.3 2016/12/22 15:54:35 cherry Exp $ */
+/* $NetBSD: uvm_physseg.h,v 1.2 2016/12/22 08:15:20 cherry Exp $ */

/*
 * Consolidated API from uvm_page.c and others.
@@ -9,10 +9,8 @@
#define _UVM_UVM_PHYSSEG_H_

#if defined(_KERNEL_OPT)
-#if notyet
#include "opt_uvm_hotplug.h"
#endif
-#endif

#include <sys/cdefs.h>
#include <sys/param.h>
Index: uvm/pmap/pmap.c
===================================================================
RCS file: /cvsroot/src/sys/uvm/pmap/pmap.c,v
retrieving revision 1.25
diff -u -r1.25 pmap.c
--- uvm/pmap/pmap.c     1 Dec 2016 02:15:08 -0000       1.25
+++ uvm/pmap/pmap.c     22 Dec 2016 16:13:39 -0000
@@ -112,6 +112,7 @@
#include <sys/atomic.h>

#include <uvm/uvm.h>
+#include <uvm/uvm_physseg.h>

#if defined(MULTIPROCESSOR) && defined(PMAP_VIRTUAL_CACHE_ALIASES) \
    && !defined(PMAP_NO_PV_UNCACHED)
@@ -452,37 +453,39 @@
       size_t npgs;
       paddr_t pa;
       vaddr_t va;
-       struct vm_physseg *maybe_seg = NULL;
-       u_int maybe_bank = vm_nphysseg;
+
+       uvm_physseg_t maybe_bank = UVM_PHYSMEM_TYPE_INVALID;

       size = round_page(size);
       npgs = atop(size);

       aprint_debug("%s: need %zu pages\n", __func__, npgs);

-       for (u_int bank = 0; bank < vm_nphysseg; bank++) {
-               struct vm_physseg * const seg = VM_PHYSMEM_PTR(bank);
+       for (uvm_physseg_t bank = uvm_physseg_get_first();
+            uvm_physseg_valid_p(bank);
+            bank = uvm_physseg_get_next(bank)) {
+
               if (uvm.page_init_done == true)
                       panic("pmap_steal_memory: called _after_ bootstrap");

-               aprint_debug("%s: seg %u: %#"PRIxPADDR" %#"PRIxPADDR" %#"PRIxPADDR" %#"PRIxPADDR"\n",
+               aprint_debug("%s: seg %"PRIxPHYSMEM": %#"PRIxPADDR" %#"PRIxPADDR" %#"PRIxPADDR" %#"PRIxPADDR"\n",
                   __func__, bank,
-                   seg->avail_start, seg->start,
-                   seg->avail_end, seg->end);
+                   uvm_physseg_get_avail_start(bank), uvm_physseg_get_start(bank),
+                   uvm_physseg_get_avail_end(bank), uvm_physseg_get_end(bank));

-               if (seg->avail_start != seg->start
-                   || seg->avail_start >= seg->avail_end) {
-                       aprint_debug("%s: seg %u: bad start\n", __func__, bank);
+               if (uvm_physseg_get_avail_start(bank) != uvm_physseg_get_start(bank)
+                   || uvm_physseg_get_avail_start(bank) >= uvm_physseg_get_avail_end(bank)) {
+                       aprint_debug("%s: seg %"PRIxPHYSMEM": bad start\n", __func__, bank);
                       continue;
               }

-               if (seg->avail_end - seg->avail_start < npgs) {
-                       aprint_debug("%s: seg %u: too small for %zu pages\n",
+               if (uvm_physseg_get_avail_end(bank) - uvm_physseg_get_avail_start(bank) < npgs) {
+                       aprint_debug("%s: seg %"PRIxPHYSMEM": too small for %zu pages\n",
                           __func__, bank, npgs);
                       continue;
               }

-               if (!pmap_md_ok_to_steal_p(seg, npgs)) {
+               if (!pmap_md_ok_to_steal_p(bank, npgs)) {
                       continue;
               }

@@ -490,44 +493,24 @@
                * Always try to allocate from the segment with the least
                * amount of space left.
                */
-#define VM_PHYSMEM_SPACE(s)    ((s)->avail_end - (s)->avail_start)
-               if (maybe_seg == NULL
-                   || VM_PHYSMEM_SPACE(seg) < VM_PHYSMEM_SPACE(maybe_seg)) {
-                       maybe_seg = seg;
+#define VM_PHYSMEM_SPACE(b)    ((uvm_physseg_get_avail_end(b)) - (uvm_physseg_get_avail_start(b)))
+               if (uvm_physseg_valid_p(maybe_bank) == false
+                   || VM_PHYSMEM_SPACE(bank) < VM_PHYSMEM_SPACE(maybe_bank)) {
                       maybe_bank = bank;
               }
       }

-       if (maybe_seg) {
-               struct vm_physseg * const seg = maybe_seg;
-               u_int bank = maybe_bank;
-
+       if (uvm_physseg_valid_p(maybe_bank)) {
+               const uvm_physseg_t bank = maybe_bank;
+
               /*
                * There are enough pages here; steal them!
                */
-               pa = ptoa(seg->avail_start);
-               seg->avail_start += npgs;
-               seg->start += npgs;
-
-               /*
-                * Have we used up this segment?
-                */
-               if (seg->avail_start == seg->end) {
-                       if (vm_nphysseg == 1)
-                               panic("pmap_steal_memory: out of memory!");
+               pa = ptoa(uvm_physseg_get_start(bank));
+               uvm_physseg_unplug(atop(pa), npgs);

-                       aprint_debug("%s: seg %u: %zu pages stolen (removed)\n",
-                           __func__, bank, npgs);
-                       /* Remove this segment from the list. */
-                       vm_nphysseg--;
-                       for (u_int x = bank; x < vm_nphysseg; x++) {
-                               /* structure copy */
-                               VM_PHYSMEM_PTR_SWAP(x, x + 1);
-                       }
-               } else {
-                       aprint_debug("%s: seg %u: %zu pages stolen (%#"PRIxPADDR" left)\n",
-                           __func__, bank, npgs, VM_PHYSMEM_SPACE(seg));
-               }
+               aprint_debug("%s: seg %"PRIxPHYSMEM": %zu pages stolen (%#"PRIxPADDR" left)\n",
+                   __func__, bank, npgs, VM_PHYSMEM_SPACE(bank));

               va = pmap_md_map_poolpage(pa, size);
               memset((void *)va, 0, size);