- vps = VM_PHYSMEM_PTR(vm_nphysseg - 1);
+ bank = uvm_physseg_get_last();
/* shrink so that it'll fit in the last segment */
- if ((vps->avail_end - vps->avail_start) < atop(sz))
- sz = ptoa(vps->avail_end - vps->avail_start);
+ if (uvm_physseg_get_avail_end(bank) - uvm_physseg_get_avail_start(bank) < atop(sz))
+ sz = ptoa(uvm_physseg_get_avail_end(bank) - uvm_physseg_get_avail_start(bank));
- for (bank = 0; bank < vm_nphysseg; bank++) {
+ for (bank = uvm_physseg_get_first();
+ uvm_physseg_valid_p(bank);
+ bank = uvm_physseg_get_next(bank)) {
if (uvm.page_init_done == true)
panic("pmap_steal_memory: called _after_ bootstrap");
#if 0
- printf(" bank %d: avail_start 0x%lx, start 0x%lx, "
- "avail_end 0x%lx\n", bank, VM_PHYSMEM_PTR(bank)->avail_start,
+ printf(" bank %d: avail_start 0x%"PRIxPADDR", start 0x%"PRIxPADDR", "
+ "avail_end 0x%"PRIxPADDR"\n", bank, VM_PHYSMEM_PTR(bank)->avail_start,
VM_PHYSMEM_PTR(bank)->start, VM_PHYSMEM_PTR(bank)->avail_end);
#endif
- if ((VM_PHYSMEM_PTR(bank)->avail_end - VM_PHYSMEM_PTR(bank)->avail_start)
+ if (uvm_physseg_get_avail_end(bank) - uvm_physseg_get_avail_start(bank)
< npgs)
continue;
/*
* There are enough pages here; steal them!
*/
- pa = ptoa(VM_PHYSMEM_PTR(bank)->avail_start);
- VM_PHYSMEM_PTR(bank)->avail_start += npgs;
- VM_PHYSMEM_PTR(bank)->start += npgs;
-
- /*
- * Have we used up this segment?
- */
- if (VM_PHYSMEM_PTR(bank)->avail_start == VM_PHYSMEM_PTR(bank)->end) {
- if (vm_nphysseg == 1)
- panic("pmap_steal_memory: out of memory!");
-
- /* Remove this segment from the list. */
- vm_nphysseg--;
- for (x = bank; x < vm_nphysseg; x++) {
- /* structure copy */
- VM_PHYSMEM_PTR_SWAP(x, x + 1);
- }
- }
+ pa = ptoa(uvm_physseg_get_start(bank));
+ uvm_physseg_unplug(atop(pa), npgs);
/*
* Mark all memory pages, then unmark pages that are uninteresting.
@@ -805,10 +806,25 @@
setbit(sparse_dump_physmap, p);
}
}
- for (i = 0; i < vm_nphysseg; i++) {
- struct vm_physseg *seg = VM_PHYSMEM_PTR(i);
+ for (upm = uvm_physseg_get_first();
+ uvm_physseg_valid_p(upm);
+ upm = uvm_physseg_get_next(upm)) {
+ paddr_t pfn;
- for (pg = seg->pgs; pg < seg->lastpg; pg++) {
+ if (uvm_physseg_valid_p(upm) == false)
+ break;
+
+ const paddr_t startpfn = uvm_physseg_get_start(upm);
+ const paddr_t endpfn = uvm_physseg_get_end(upm);
+
+ KASSERT(startpfn != -1 && endpfn != -1);
+
+ /*
+ * We assume that seg->start to seg->end are
+ * uvm_page_physload()ed
+ */
+ for (pfn = startpfn; pfn <= endpfn; pfn++) {
+ pg = PHYS_TO_VM_PAGE(ptoa(pfn));
if (pg->uanon || (pg->pqflags & PQ_FREE) ||
(pg->uobject && pg->uobject->pgops)) {
p = VM_PAGE_TO_PHYS(pg) / PAGE_SIZE;
@@ -1452,57 +1468,30 @@
static void
init_x86_64_msgbuf(void)
{
- /* Message buffer is located at end of core. */
- struct vm_physseg *vps;
- psize_t sz = round_page(MSGBUFSIZE);
- psize_t reqsz = sz;
- int x;
-
- search_again:
- vps = NULL;
-
- for (x = 0; x < vm_nphysseg; x++) {
- vps = VM_PHYSMEM_PTR(x);
- if (ctob(vps->avail_end) == avail_end)
- break;
- }
- if (x == vm_nphysseg)
- panic("init_x86_64: can't find end of memory");
+ /* Message buffer is located at end of core. */
+ psize_t reqsz = round_page(MSGBUFSIZE);
+ psize_t sz = 0;
- /* Shrink so it'll fit in the last segment. */
- if ((vps->avail_end - vps->avail_start) < atop(sz))
- sz = ctob(vps->avail_end - vps->avail_start);
-
- vps->avail_end -= atop(sz);
- vps->end -= atop(sz);
- msgbuf_p_seg[msgbuf_p_cnt].sz = sz;
- msgbuf_p_seg[msgbuf_p_cnt++].paddr = ctob(vps->avail_end);
-
- /* Remove the last segment if it now has no pages. */
- if (vps->start == vps->end) {
- for (vm_nphysseg--; x < vm_nphysseg; x++)
- VM_PHYSMEM_PTR_SWAP(x, x + 1);
- }
-
- /* Now find where the new avail_end is. */
- for (avail_end = 0, x = 0; x < vm_nphysseg; x++)
- if (VM_PHYSMEM_PTR(x)->avail_end > avail_end)
- avail_end = VM_PHYSMEM_PTR(x)->avail_end;
- avail_end = ctob(avail_end);
+ for (sz = 0; sz < reqsz; sz += PAGE_SIZE) {
+ paddr_t stolenpa;
- if (sz == reqsz)
- return;
+ if (!uvm_page_physget(&stolenpa))
+ break;
- reqsz -= sz;
- if (msgbuf_p_cnt == VM_PHYSSEG_MAX) {
- /* No more segments available, bail out. */
- printf("WARNING: MSGBUFSIZE (%zu) too large, using %zu.\n",
- (size_t)MSGBUFSIZE, (size_t)(MSGBUFSIZE - reqsz));
- return;
+ if (stolenpa == (msgbuf_p_seg[msgbuf_p_cnt].paddr
+ + PAGE_SIZE)) {
+ /* contiguous: append it to current buf alloc */
+ msgbuf_p_seg[msgbuf_p_cnt].sz += PAGE_SIZE;
+ } else {
+ /* non-contiguous: start a new msgbuf seg */
+ msgbuf_p_seg[msgbuf_p_cnt].sz = PAGE_SIZE;
+ msgbuf_p_seg[msgbuf_p_cnt++].paddr = stolenpa;
+ }
}
- sz = reqsz;
- goto search_again;
+ if (sz != reqsz)
+ printf("%s: could only allocate %ld bytes of requested %ld bytes\n",
+ __func__, sz, reqsz);
}
static void
Index: arch/arm/arm32/bus_dma.c
===================================================================
RCS file: /cvsroot/src/sys/arch/arm/arm32/bus_dma.c,v
retrieving revision 1.96
diff -u -r1.96 bus_dma.c
--- arch/arm/arm32/bus_dma.c 5 Nov 2016 14:26:23 -0000 1.96
+++ arch/arm/arm32/bus_dma.c 22 Dec 2016 16:13:11 -0000
@@ -1365,11 +1365,11 @@
* The page can only be direct mapped if was allocated out
* of the arm poolpage vm freelist.
*/
- int lcv = vm_physseg_find(atop(pa), NULL);
- KASSERT(lcv != -1);
+ uvm_physseg_t upm = uvm_physseg_find(atop(pa), NULL);
+ KASSERT(uvm_physseg_valid_p(upm));
if (direct_mapable) {
direct_mapable =
- (arm_poolpage_vmfreelist == VM_PHYSMEM_PTR(lcv)->free_list);
+ (arm_poolpage_vmfreelist == uvm_physseg_get_free_list(upm));
}
#endif
Index: arch/arm/arm32/pmap.c
===================================================================
RCS file: /cvsroot/src/sys/arch/arm/arm32/pmap.c,v
retrieving revision 1.341
diff -u -r1.341 pmap.c
--- arch/arm/arm32/pmap.c 17 Dec 2016 14:36:29 -0000 1.341
+++ arch/arm/arm32/pmap.c 22 Dec 2016 16:13:11 -0000
@@ -6453,8 +6453,8 @@
* One could argue whether this should be the entire memory or just
* the memory that is useable in a user process.
*/
- avail_start = ptoa(VM_PHYSMEM_PTR(0)->start);
- avail_end = ptoa(VM_PHYSMEM_PTR(vm_nphysseg - 1)->end);
+ avail_start = ptoa(uvm_physseg_get_avail_start(uvm_physseg_get_first()));
+ avail_end = ptoa(uvm_physseg_get_avail_end(uvm_physseg_get_last()));
/*
* Now we need to free enough pv_entry structures to allow us to get
Index: arch/i386/i386/machdep.c
===================================================================
RCS file: /cvsroot/src/sys/arch/i386/i386/machdep.c,v
retrieving revision 1.771
diff -u -r1.771 machdep.c
--- arch/i386/i386/machdep.c 22 Dec 2016 14:47:58 -0000 1.771
+++ arch/i386/i386/machdep.c 22 Dec 2016 16:13:23 -0000
@@ -1037,57 +1037,30 @@
static void
init386_msgbuf(void)
{
- /* Message buffer is located at end of core. */
- struct vm_physseg *vps;
- psize_t sz = round_page(MSGBUFSIZE);
- psize_t reqsz = sz;
- unsigned int x;
-
- search_again:
- vps = NULL;
- for (x = 0; x < vm_nphysseg; ++x) {
- vps = VM_PHYSMEM_PTR(x);
- if (ctob(vps->avail_end) == avail_end) {
- break;
- }
- }
- if (x == vm_nphysseg)
- panic("init386: can't find end of memory");
+ /* Message buffer is located at end of core. */
+ psize_t reqsz = round_page(MSGBUFSIZE);
+ psize_t sz = 0;
- /* Shrink so it'll fit in the last segment. */
- if (vps->avail_end - vps->avail_start < atop(sz))
- sz = ctob(vps->avail_end - vps->avail_start);
-
- vps->avail_end -= atop(sz);
- vps->end -= atop(sz);
- msgbuf_p_seg[msgbuf_p_cnt].sz = sz;
- msgbuf_p_seg[msgbuf_p_cnt++].paddr = ctob(vps->avail_end);
-
- /* Remove the last segment if it now has no pages. */
- if (vps->start == vps->end) {
- for (--vm_nphysseg; x < vm_nphysseg; x++)
- VM_PHYSMEM_PTR_SWAP(x, x + 1);
- }
-
- /* Now find where the new avail_end is. */
- for (avail_end = 0, x = 0; x < vm_nphysseg; x++)
- if (VM_PHYSMEM_PTR(x)->avail_end > avail_end)
- avail_end = VM_PHYSMEM_PTR(x)->avail_end;
- avail_end = ctob(avail_end);
+ for (sz = 0; sz < reqsz; sz += PAGE_SIZE) {
+ paddr_t stolenpa;
- if (sz == reqsz)
- return;
+ if (!uvm_page_physget(&stolenpa))
+ break;
- reqsz -= sz;
- if (msgbuf_p_cnt == VM_PHYSSEG_MAX) {
- /* No more segments available, bail out. */
- printf("WARNING: MSGBUFSIZE (%zu) too large, using %zu.\n",
- (size_t)MSGBUFSIZE, (size_t)(MSGBUFSIZE - reqsz));
- return;
+ if (stolenpa == (msgbuf_p_seg[msgbuf_p_cnt].paddr
+ + PAGE_SIZE)) {
+ /* contiguous: append it to current buf alloc */
+ msgbuf_p_seg[msgbuf_p_cnt].sz += PAGE_SIZE;
+ } else {
+ /* non-contiguous: start a new msgbuf seg */
+ msgbuf_p_seg[msgbuf_p_cnt].sz = PAGE_SIZE;
+ msgbuf_p_seg[msgbuf_p_cnt++].paddr = stolenpa;
+ }
}
- sz = reqsz;
- goto search_again;
+ if (sz != reqsz)
+ printf("%s: could only allocate %ld bytes of requested %ld bytes\n",
+ __func__, sz, reqsz);
}
#ifndef XEN
Index: arch/ia64/ia64/machdep.c
===================================================================
RCS file: /cvsroot/src/sys/arch/ia64/ia64/machdep.c,v
retrieving revision 1.36
diff -u -r1.36 machdep.c
--- arch/ia64/ia64/machdep.c 22 Dec 2016 14:47:58 -0000 1.36
+++ arch/ia64/ia64/machdep.c 22 Dec 2016 16:13:23 -0000
@@ -175,18 +175,19 @@
* Display any holes after the first chunk of extended memory.
*/
if (bootverbose) {
- int lcv, sizetmp;
-
+ int sizetmp, vm_nphysseg;
+ uvm_physseg_t upm;
+
printf("Physical memory chunk(s):\n");
- for (lcv = 0;
- lcv < vm_nphysseg || VM_PHYSMEM_PTR(lcv)->avail_end != 0;
- lcv++) {
- sizetmp = VM_PHYSMEM_PTR(lcv)->avail_end -
- VM_PHYSMEM_PTR(lcv)->avail_start;
+ for (vm_nphysseg = 0, upm = uvm_physseg_get_first();
+ uvm_physseg_valid_p(upm);
+ vm_nphysseg++, upm = uvm_physseg_get_next(upm)) {
+ sizetmp = uvm_physseg_get_avail_end(upm) -
+ uvm_physseg_get_avail_start(upm);
- if ((VM_PHYSMEM_PTR(lcv)->avail_end - VM_PHYSMEM_PTR(lcv)->avail_start)
+ if ((uvm_physseg_get_avail_end(upm) - uvm_physseg_get_avail_start(upm))
< npgs)
continue;
/*
* There are enough pages here; steal them!
*/
- pa = ptoa(VM_PHYSMEM_PTR(lcv)->avail_start);
- VM_PHYSMEM_PTR(lcv)->avail_start += npgs;
- VM_PHYSMEM_PTR(lcv)->start += npgs;
-
- /*
- * Have we used up this segment?
- */
- if (VM_PHYSMEM_PTR(lcv)->avail_start ==
- VM_PHYSMEM_PTR(lcv)->end) {
- if (vm_nphysseg == 1)
- panic("pmap_steal_memory: out of memory!");
-
- /* Remove this segment from the list. */
- vm_nphysseg--;
- for (x = lcv; x < vm_nphysseg; x++) {
- /* structure copy */
- VM_PHYSMEM_PTR_SWAP(x, x + 1);
- }
- }
+ pa = ptoa(uvm_physseg_get_start(bank));
+ uvm_physseg_unplug(atop(pa), npgs);
/*
@@ -412,7 +414,7 @@
struct pv_header *pvh;
int rv;
int npages;
- int bank;
+ uvm_physseg_t bank;
PMAP_DPRINTF(PDB_FOLLOW, ("pmap_init()\n"));
@@ -434,8 +436,10 @@
* Allocate memory for random pmap data structures. Includes the
* initial segment table, pv_head_table and pmap_attributes.
*/
- for (page_cnt = 0, bank = 0; bank < vm_nphysseg; bank++)
- page_cnt += VM_PHYSMEM_PTR(bank)->end - VM_PHYSMEM_PTR(bank)->start;
+ for (page_cnt = 0, bank = uvm_physseg_get_first();
+ uvm_physseg_valid_p(bank);
+ bank = uvm_physseg_get_next(bank))
+ page_cnt += uvm_physseg_get_end(bank) - uvm_physseg_get_start(bank);
s = M68K_STSIZE; /* Segtabzero */
s += page_cnt * sizeof(struct pv_header); /* pv table */
s = round_page(s);
@@ -461,9 +465,11 @@
* assign them to the memory segments.
*/
pvh = pv_table;
- for (bank = 0; bank < vm_nphysseg; bank++) {
- npages = VM_PHYSMEM_PTR(bank)->end - VM_PHYSMEM_PTR(bank)->start;
- VM_PHYSMEM_PTR(bank)->pmseg.pvheader = pvh;
+ for (bank = uvm_physseg_get_first();
+ uvm_physseg_valid_p(bank);
+ bank = uvm_physseg_get_next(bank)) {
+ npages = uvm_physseg_get_end(bank) - uvm_physseg_get_start(bank);
+ uvm_physseg_get_pmseg(bank)->pvheader = pvh;
pvh += npages;
}
@@ -1704,17 +1710,20 @@
static void
pmap_collect(void)
{
- int bank, s;
-
+ int s;
+ uvm_physseg_t bank;
+
/*
* XXX This is very bogus. We should handle kernel PT
* XXX pages much differently.
*/
- /* Remove the [last] segment if it now has no pages. */
- if (vps->start == vps->end) {
- for (vm_nphysseg--; bank < vm_nphysseg - 1; bank++) {
- VM_PHYSMEM_PTR_SWAP(bank, bank + 1);
- }
- }
-
/* warn if the message buffer had to be shrunk */
if (sz != reqsz)
printf("WARNING: %"PRIdVSIZE" bytes not available for msgbuf "
Index: arch/mips/mips/pmap_machdep.c
===================================================================
RCS file: /cvsroot/src/sys/arch/mips/mips/pmap_machdep.c,v
retrieving revision 1.11
diff -u -r1.11 pmap_machdep.c
--- arch/mips/mips/pmap_machdep.c 5 Sep 2016 06:59:25 -0000 1.11
+++ arch/mips/mips/pmap_machdep.c 22 Dec 2016 16:13:26 -0000
@@ -133,6 +133,7 @@
#endif
#include <machine/powerpc.h>
#include <powerpc/bat.h>
@@ -2909,9 +2910,9 @@
{
vsize_t size;
vaddr_t va;
- paddr_t pa = 0;
- int npgs, bank;
- struct vm_physseg *ps;
+ paddr_t start, end, pa = 0;
+ int npgs, freelist;
+ uvm_physseg_t bank;
if (uvm.page_init_done == true)
panic("pmap_steal_memory: called _after_ bootstrap");
@@ -2926,11 +2927,18 @@
* PA 0 will never be among those given to UVM so we can use it
* to indicate we couldn't steal any memory.
*/
- for (bank = 0; bank < vm_nphysseg; bank++) {
- ps = VM_PHYSMEM_PTR(bank);
- if (ps->free_list == VM_FREELIST_FIRST256 &&
- ps->avail_end - ps->avail_start >= npgs) {
- pa = ptoa(ps->avail_start);
+
+ for (bank = uvm_physseg_get_first();
+ uvm_physseg_valid_p(bank);
+ bank = uvm_physseg_get_next(bank)) {
+
+ freelist = uvm_physseg_get_free_list(bank);
+ start = uvm_physseg_get_start(bank);
+ end = uvm_physseg_get_end(bank);
+
+ if (freelist == VM_FREELIST_FIRST256 &&
+ (end - start) >= npgs) {
+ pa = ptoa(start);
break;
}
}
@@ -2938,25 +2946,7 @@
if (pa == 0)
panic("pmap_steal_memory: no approriate memory to steal!");
- ps->avail_start += npgs;
- ps->start += npgs;
-
- /*
- * If we've used up all the pages in the segment, remove it and
- * compact the list.
- */
- if (ps->avail_start == ps->end) {
- /*
- * If this was the last one, then a very bad thing has occurred
- */
- if (--vm_nphysseg == 0)
- panic("pmap_steal_memory: out of memory!");
-
- printf("pmap_steal_memory: consumed bank %d\n", bank);
- for (; bank < vm_nphysseg; bank++, ps++) {
- ps[0] = ps[1];
- }
- }
+ uvm_physseg_unplug(start, npgs);
/*
- * global vars... XXXCDC: move to uvm. structure.
- */
-
-/*
- * physical memory config is stored in vm_physmem.
- */
-
-struct vm_physseg vm_physmem[VM_PHYSSEG_MAX]; /* XXXCDC: uvm.physmem */
-int vm_nphysseg = 0; /* XXXCDC: uvm.nphysseg */
-#define vm_nphysmem vm_nphysseg
-
-/*
* Some supported CPUs in a given architecture don't support all
* of the things necessary to do idle page zero'ing efficiently.
* We therefore provide a way to enable it from machdep code here.
@@ -146,6 +135,18 @@
#endif /* DEBUG */
/*
+ * These functions are reserved for uvm(9) internal use and are not
+ * exported in the header file uvm_physseg.h
+ *
+ * Thus they are redefined here.
+ */
+void uvm_physseg_init_seg(uvm_physseg_t, struct vm_page *);
+void uvm_physseg_seg_chomp_slab(uvm_physseg_t, struct vm_page *, size_t);
+
+/* returns a pgs array */
+struct vm_page *uvm_physseg_seg_alloc_from_slab(uvm_physseg_t, size_t);
+
+/*
* local prototypes
*/
KASSERT(ncpu <= 1);
CTASSERT(sizeof(pagearray->offset) >= sizeof(struct uvm_cpu *));
@@ -369,7 +368,7 @@
* now is to allocate vm_page structures for this memory.
*/
- if (vm_nphysmem == 0)
+ if (uvm_physseg_get_last() == UVM_PHYSSEG_TYPE_INVALID)
panic("uvm_page_bootstrap: no memory pre-allocated");
/* pass 1: try allocating from a matching end */
#if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
- for (lcv = vm_nphysmem - 1 ; lcv >= 0 ; lcv--)
+ for (lcv = uvm_physseg_get_last() ; uvm_physseg_valid_p(lcv) ; lcv = uvm_physseg_get_prev(lcv))
#else
- for (lcv = 0 ; lcv < vm_nphysmem ; lcv++)
+ for (lcv = uvm_physseg_get_first() ; uvm_physseg_valid_p(lcv) ; lcv = uvm_physseg_get_next(lcv))
#endif
{
- seg = VM_PHYSMEM_PTR(lcv);
-
if (uvm.page_init_done == true)
panic("uvm_page_physget: called _after_ bootstrap");
- if (seg->free_list != freelist)
- continue;
+ /* Try to match at front or back on unused segment */
+ if (uvm_page_physunload(lcv, freelist, paddrp) == false) {
+ if (paddrp == NULL) /* freelist fail, try next */
+ continue;
+ } else
+ return true;
- /* try from front */
- if (seg->avail_start == seg->start &&
- seg->avail_start < seg->avail_end) {
- *paddrp = ctob(seg->avail_start);
- seg->avail_start++;
- seg->start++;
- /* nothing left? nuke it */
- if (seg->avail_start == seg->end) {
- if (vm_nphysmem == 1)
- panic("uvm_page_physget: out of memory!");
- vm_nphysmem--;
- for (x = lcv ; x < vm_nphysmem ; x++)
- /* structure copy */
- VM_PHYSMEM_PTR_SWAP(x, x + 1);
- }
- return (true);
- }
-
- /* try from rear */
- if (seg->avail_end == seg->end &&
- seg->avail_start < seg->avail_end) {
- *paddrp = ctob(seg->avail_end - 1);
- seg->avail_end--;
- seg->end--;
- /* nothing left? nuke it */
- if (seg->avail_end == seg->start) {
- if (vm_nphysmem == 1)
- panic("uvm_page_physget: out of memory!");
- vm_nphysmem--;
- for (x = lcv ; x < vm_nphysmem ; x++)
- /* structure copy */
- VM_PHYSMEM_PTR_SWAP(x, x + 1);
- }
- return (true);
- }
- }
/* pass2: forget about matching ends, just allocate something */
#if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
- for (lcv = vm_nphysmem - 1 ; lcv >= 0 ; lcv--)
+ for (lcv = uvm_physseg_get_last() ; uvm_physseg_valid_p(lcv); lcv = uvm_physseg_get_prev(lcv))
#else
- for (lcv = 0 ; lcv < vm_nphysmem ; lcv++)
+ for (lcv = uvm_physseg_get_first() ; uvm_physseg_valid_p(lcv) ; lcv = uvm_physseg_get_next(lcv))
#endif
{
- seg = VM_PHYSMEM_PTR(lcv);
-
- /* any room in this bank? */
- if (seg->avail_start >= seg->avail_end)
- continue; /* nope */
-
- *paddrp = ctob(seg->avail_start);
- seg->avail_start++;
- /* truncate! */
- seg->start = seg->avail_start;
-
- /* nothing left? nuke it */
- if (seg->avail_start == seg->end) {
- if (vm_nphysmem == 1)
- panic("uvm_page_physget: out of memory!");
- vm_nphysmem--;
- for (x = lcv ; x < vm_nphysmem ; x++)
- /* structure copy */
- VM_PHYSMEM_PTR_SWAP(x, x + 1);
- }
- return (true);
+ /* Try the front regardless. */
+ if (uvm_page_physunload_force(lcv, freelist, paddrp) == false) {
+ if (paddrp == NULL) /* freelist fail, try next */
+ continue;
+ } else
+ return true;
}
-
- return (false); /* whoops! */
+ }
+ return false;
}
-/*
- * vm_physseg: describes one segment of physical memory
- */
-struct vm_physseg {
- paddr_t start; /* PF# of first page in segment */
- paddr_t end; /* (PF# of last page in segment) + 1 */
- paddr_t avail_start; /* PF# of first free page in segment */
- paddr_t avail_end; /* (PF# of last free page in segment) +1 */
- struct vm_page *pgs; /* vm_page structures (from start) */
- struct vm_page *lastpg; /* vm_page structure for end */
- int free_list; /* which free list they belong on */
- u_int start_hint; /* start looking for free pages here */
- /* protected by uvm_fpageqlock */
-#ifdef __HAVE_PMAP_PHYSSEG
- struct pmap_physseg pmseg; /* pmap specific (MD) data */
-#endif
-};
-
#ifdef _KERNEL
static int
-uvm_pglistalloc_c_ps(struct vm_physseg *ps, int num, paddr_t low, paddr_t high,
+uvm_pglistalloc_c_ps(uvm_physseg_t psi, int num, paddr_t low, paddr_t high,
paddr_t alignment, paddr_t boundary, struct pglist *rlist)
{
signed int candidate, limit, candidateidx, end, idx, skip;
- struct vm_page *pgs;
int pagemask;
bool second_pass;
#ifdef DEBUG
paddr_t idxpa, lastidxpa;
- int cidx = 0; /* XXX: GCC */
+ paddr_t cidx = 0; /* XXX: GCC */
#endif
#ifdef PGALLOC_VERBOSE
printf("pgalloc: contig %d pgs from psi %zd\n", num, ps - vm_physmem);
@@ -140,26 +139,26 @@
/*
* Make sure that physseg falls within with range to be allocated from.
*/
- if (high <= ps->avail_start || low >= ps->avail_end)
+ if (high <= uvm_physseg_get_avail_start(psi) || low >= uvm_physseg_get_avail_end(psi))
return 0;
/*
* We start our search at the just after where the last allocation
* succeeded.
*/
- candidate = roundup2(max(low, ps->avail_start + ps->start_hint), alignment);
- limit = min(high, ps->avail_end);
+ candidate = roundup2(max(low, uvm_physseg_get_avail_start(psi) +
+ uvm_physseg_get_start_hint(psi)), alignment);
+ limit = min(high, uvm_physseg_get_avail_end(psi));
pagemask = ~((boundary >> PAGE_SHIFT) - 1);
skip = 0;
second_pass = false;
- pgs = ps->pgs;
for (;;) {
bool ok = true;
signed int cnt;
if (candidate + num > limit) {
- if (ps->start_hint == 0 || second_pass) {
+ if (uvm_physseg_get_start_hint(psi) == 0 || second_pass) {
/*
* We've run past the allowable range.
*/
@@ -171,8 +170,9 @@
* is were we started.
*/
second_pass = true;
- candidate = roundup2(max(low, ps->avail_start), alignment);
- limit = min(limit, ps->avail_start + ps->start_hint);
+ candidate = roundup2(max(low, uvm_physseg_get_avail_start(psi)), alignment);
+ limit = min(limit, uvm_physseg_get_avail_start(psi) +
+ uvm_physseg_get_start_hint(psi));
skip = 0;
continue;
}
@@ -192,16 +192,16 @@
* Make sure this is a managed physical page.
*/
- if (vm_physseg_find(candidate, &cidx) != ps - vm_physmem)
+ if (uvm_physseg_find(candidate, &cidx) != psi)
panic("pgalloc contig: botch1");
- if (cidx != candidate - ps->start)
+ if (cidx != candidate - uvm_physseg_get_start(psi))
panic("pgalloc contig: botch2");
- if (vm_physseg_find(candidate + num - 1, &cidx) != ps - vm_physmem)
+ if (uvm_physseg_find(candidate + num - 1, &cidx) != psi)
panic("pgalloc contig: botch3");
- if (cidx != candidate - ps->start + num - 1)
+ if (cidx != candidate - uvm_physseg_get_start(psi) + num - 1)
panic("pgalloc contig: botch4");
#endif
- candidateidx = candidate - ps->start;
+ candidateidx = candidate - uvm_physseg_get_start(psi);
end = candidateidx + num;
/*
@@ -220,15 +220,15 @@
* testing most of those pages again in the next pass.
*/
for (idx = end - 1; idx >= candidateidx + skip; idx--) {
- if (VM_PAGE_IS_FREE(&pgs[idx]) == 0) {
+ if (VM_PAGE_IS_FREE(uvm_physseg_get_pg(psi, idx)) == 0) {
ok = false;
break;
}
/*
* Make sure that physseg falls within with range to be allocated from.
*/
- if (high <= ps->avail_start || low >= ps->avail_end)
+ if (high <= uvm_physseg_get_avail_start(psi) ||
+ low >= uvm_physseg_get_avail_end(psi))
return 0;
aprint_debug("%s: need %zu pages\n", __func__, npgs);
- for (u_int bank = 0; bank < vm_nphysseg; bank++) {
- struct vm_physseg * const seg = VM_PHYSMEM_PTR(bank);
+ for (uvm_physseg_t bank = uvm_physseg_get_first();
+ uvm_physseg_valid_p(bank);
+ bank = uvm_physseg_get_next(bank)) {
+
if (uvm.page_init_done == true)
panic("pmap_steal_memory: called _after_ bootstrap");
- aprint_debug("%s: seg %u: %#"PRIxPADDR" %#"PRIxPADDR" %#"PRIxPADDR" %#"PRIxPADDR"\n",
+ aprint_debug("%s: seg %"PRIxPHYSMEM": %#"PRIxPADDR" %#"PRIxPADDR" %#"PRIxPADDR" %#"PRIxPADDR"\n",
__func__, bank,
- seg->avail_start, seg->start,
- seg->avail_end, seg->end);
+ uvm_physseg_get_avail_start(bank), uvm_physseg_get_start(bank),
+ uvm_physseg_get_avail_end(bank), uvm_physseg_get_end(bank));
- if (seg->avail_start != seg->start
- || seg->avail_start >= seg->avail_end) {
- aprint_debug("%s: seg %u: bad start\n", __func__, bank);
+ if (uvm_physseg_get_avail_start(bank) != uvm_physseg_get_start(bank)
+ || uvm_physseg_get_avail_start(bank) >= uvm_physseg_get_avail_end(bank)) {
+ aprint_debug("%s: seg %"PRIxPHYSMEM": bad start\n", __func__, bank);
continue;
}
- if (seg->avail_end - seg->avail_start < npgs) {
- aprint_debug("%s: seg %u: too small for %zu pages\n",
+ if (uvm_physseg_get_avail_end(bank) - uvm_physseg_get_avail_start(bank) < npgs) {
+ aprint_debug("%s: seg %"PRIxPHYSMEM": too small for %zu pages\n",
__func__, bank, npgs);
continue;
}
- if (!pmap_md_ok_to_steal_p(seg, npgs)) {
+ if (!pmap_md_ok_to_steal_p(bank, npgs)) {
continue;
}
@@ -490,44 +493,24 @@
* Always try to allocate from the segment with the least
* amount of space left.
*/
-#define VM_PHYSMEM_SPACE(s) ((s)->avail_end - (s)->avail_start)
- if (maybe_seg == NULL
- || VM_PHYSMEM_SPACE(seg) < VM_PHYSMEM_SPACE(maybe_seg)) {
- maybe_seg = seg;
+#define VM_PHYSMEM_SPACE(b) ((uvm_physseg_get_avail_end(b)) - (uvm_physseg_get_avail_start(b)))
+ if (uvm_physseg_valid_p(maybe_bank) == false
+ || VM_PHYSMEM_SPACE(bank) < VM_PHYSMEM_SPACE(maybe_bank)) {
maybe_bank = bank;
}
}
- if (maybe_seg) {
- struct vm_physseg * const seg = maybe_seg;
- u_int bank = maybe_bank;
-
+ if (uvm_physseg_valid_p(maybe_bank)) {
+ const uvm_physseg_t bank = maybe_bank;
+
/*
* There are enough pages here; steal them!
*/
- pa = ptoa(seg->avail_start);
- seg->avail_start += npgs;
- seg->start += npgs;
-
- /*
- * Have we used up this segment?
- */
- if (seg->avail_start == seg->end) {
- if (vm_nphysseg == 1)
- panic("pmap_steal_memory: out of memory!");
+ pa = ptoa(uvm_physseg_get_start(bank));
+ uvm_physseg_unplug(atop(pa), npgs);
- aprint_debug("%s: seg %u: %zu pages stolen (removed)\n",
- __func__, bank, npgs);
- /* Remove this segment from the list. */
- vm_nphysseg--;
- for (u_int x = bank; x < vm_nphysseg; x++) {
- /* structure copy */
- VM_PHYSMEM_PTR_SWAP(x, x + 1);
- }
- } else {
- aprint_debug("%s: seg %u: %zu pages stolen (%#"PRIxPADDR" left)\n",
- __func__, bank, npgs, VM_PHYSMEM_SPACE(seg));
- }
+ aprint_debug("%s: seg %"PRIxPHYSMEM": %zu pages stolen (%#"PRIxPADDR" left)\n",
+ __func__, bank, npgs, VM_PHYSMEM_SPACE(bank));
va = pmap_md_map_poolpage(pa, size);
memset((void *)va, 0, size);