/* dump level 1 page table at virtual addr l1 */
void
mmudump(PTE *l1)
{
int i, type, rngtype;
uintptr pa, startva, startpa;
uvlong va, endva;
PTE pte;
// pa -= MACHSIZE+1024; /* put level 2 entries below level 1 */
// l2 = KADDR(pa);
print("\n");
endva = startva = startpa = 0;
rngtype = 0;
/* dump first level of ptes */
for (va = i = 0; i < 4096; i++) {
pte = l1[i];
pa = pte & ~(MB - 1);
type = pte & (Fine|Section|Coarse);
if (ISHOLE(pte)) {
if (endva != 0) { /* open range? close it */
print("l1 maps va (%#lux-%#llux) -> pa %#lux type %#ux\n",
startva, endva-1, startpa, rngtype);
endva = 0;
}
} else {
if (endva == 0) { /* no open range? start one */
startva = va;
startpa = pa;
rngtype = type;
}
endva = va + MB; /* continue the open range */
// if (type == Coarse) {
// // could dump the l2 table for this l1 entry
// }
}
va += MB;
}
if (endva != 0) /* close an open range */
print("l1 maps va (%#lux-%#llux) -> pa %#lux type %#ux\n",
startva, endva-1, startpa, rngtype);
}
/* identity map the megabyte containing va, uncached */
static void
idmap(PTE *l1, ulong va)
{
va &= ~(MB-1);
l1[L1X(va)] = va | Dom0 | L1AP(Krw) | Section;
}
/* map `mbs' megabytes from virt to phys */
void
mmumap(uintptr virt, uintptr phys, int mbs)
{
uint off;
PTE *l1;
/* map high vectors to start of dram, but only 4K, not 1MB */
pa -= MACHSIZE+2*1024;
l2 = KADDR(pa);
memset(l2, 0, 1024);
/* vectors step on u-boot, but so do page tables */
l2[L2X(HVECTORS)] = PHYSDRAM|L2AP(Krw)|Small;
l1[L1X(HVECTORS)] = pa|Dom0|Coarse; /* vectors -> ttb-machsize-2k */
coherence();
/* protection bits are
* PTERONLY|PTEVALID;
* PTEWRITE|PTEVALID;
* PTEWRITE|PTEUNCACHED|PTEVALID;
*/
x = Small;
if(!(pa & PTEUNCACHED))
x |= Cached|Buffered;
if(pa & PTEWRITE)
x |= L2AP(Urw);
else
x |= L2AP(Uro);
pte[L2X(va)] = PPN(pa)|x;
cachedwbse(&pte[L2X(va)], sizeof pte[0]);
/* clear out the current entry */
mmuinvalidateaddr(PPN(va));
/* write back dirty entries - we need this because the pio() in
* fault.c is writing via a different virt addr and won't clean
* its changes out of the dcache. Page coloring doesn't work
* on this mmu because the virtual cache is set associative
* rather than direct mapped.
*/
cachedwbinv();
if(page->txtflush){
cacheiinv();
page->txtflush = 0;
}
//print("putmmu %#p %#p %#p\n", va, pa, PPN(pa)|x);
}
/*
* Simple helper for ucalloc().
* Uncache a Section, must already be
* valid in the MMU.
*/
va = (uintptr)v;
assert(!(va & (1*MiB-1)) && size == 1*MiB);
/*
* Return the number of bytes that can be accessed via KADDR(pa).
* If pa is not a valid argument to KADDR, return 0.
*/
uintptr
cankaddr(uintptr pa)
{
if(pa >= PHYSDRAM && pa < PHYSDRAM+memsize)
return PHYSDRAM+memsize - pa;
return 0;
}
/* from 386 */
void*
vmap(uintptr pa, usize size)
{
uintptr pae, va;
usize o, osize;
/*
* XXX - replace with new vm stuff.
* Crock after crock - the first 4MB is mapped with 2MB pages
* so catch that and return good values because the current mmukmap
* will fail.
*/
if(pa+size < 4*MiB)
return (void*)(kseg0|pa);
osize = size;
o = pa & (BY2PG-1);
pa -= o;
size += o;
size = PGROUND(size);
va = kseg0|pa;
pae = mmukmap(va, pa, size);
if(pae == 0 || pae-size != pa)
panic("vmap(%#p, %ld) called from %#p: mmukmap fails %#p",
pa+o, osize, getcallerpc(&pa), pae);
return (void*)(va+o);
}
/* from 386 */
void
vunmap(void* v, usize size)
{
/*
* XXX - replace with new vm stuff.
* Can't do this until do real vmap for all space that
* might be used, e.g. stuff below 1MB which is currently
* mapped automagically at boot but that isn't used (or
* at least shouldn't be used) by the kernel.
upafree(PADDR(v), size);
*/
USED(v, size);
}
/*
* Notes.
* Everything is in domain 0;
* domain 0 access bits in the DAC register are set
* to Client, which means access is controlled by the
* permission values set in the PTE.
*
* L1 access control for the kernel is set to 1 (RW,
* no user mode access);
* L2 access control for the kernel is set to 1 (ditto)
* for all 4 AP sets;
* L1 user mode access is never set;
* L2 access control for user mode is set to either
* 2 (RO) or 3 (RW) depending on whether text or data,
* for all 4 AP sets.
* (To get kernel RO set AP to 0 and S bit in control
* register c1).
* Coarse L1 page-tables are used. They have 256 entries
* and so consume 1024 bytes per table.
* Small L2 page-tables are used. They have 1024 entries
* and so consume 4096 bytes per table.
*
* 4KiB. That's the size of 1) a page, 2) the
* size allocated for an L2 page-table page (note only 1KiB
* is needed per L2 page - to be dealt with later) and
* 3) the size of the area in L1 needed to hold the PTEs
* to map 1GiB of user space (0 -> 0x3fffffff, 1024 entries).
*/