qlock(s);
if(loadrec == nil) { /* This is demand load */
/*
* race, another proc may have gotten here first while
* s was unlocked
*/
if(*p == nil) {
/*
* check page cache again after i/o to reduce double caching
*/
*p = lookpage(s->image, daddr);
if(*p == nil) {
incref(new);
new->daddr = daddr;
cachepage(new, s->image);
*p = new;
}
}
}
else { /* This is paged out */
/*
* race, another proc may have gotten here first
* (and the pager may have run on that page) while
* s was unlocked
*/
if(*p != loadrec) {
if(!pagedout(*p)) {
/* another process did it for me */
goto done;
} else if(*p != nil) {
/* another process and the pager got in */
putpage(new);
goto retry;
} else {
/* another process segfreed the page */
incref(new);
k = kmap(new);
memset((void*)VA(k), 0, ask);
kunmap(k);
*p = new;
goto done;
}
}
case SG_BSS:
case SG_SHARED: /* Zero fill on demand */
case SG_STACK:
if(*pg == nil) {
new = newpage(1, &s, addr);
if(s == nil)
return -1;
*pg = new;
}
/* wet floor */
case SG_DATA: /* Demand load/pagein/copy on write */
if(pagedout(*pg))
pio(s, addr, soff, pg);
/*
* It's only possible to copy on write if
* we're the only user of the segment.
*/
if(read && conf.copymode == 0 && s->ref == 1) {
mmuphys = PPN((*pg)->pa) | PTERONLY | PTECACHED | PTEVALID;
(*pg)->modref |= PG_REF;
break;
}
void
validaddr(uintptr addr, ulong len, int write)
{
if(!okaddr(addr, len, write)){
pprint("suicide: invalid address %#p/%lud in sys call pc=%#p\n", addr, len, userpc());
postnote(up, 1, "sys: bad address in syscall", NDebug);
error(Ebadarg);
}
}
/*
* &s[0] is known to be a valid address.
*/
void*
vmemchr(void *s, int c, ulong n)
{
uintptr a;
ulong m;
void *t;
a = (uintptr)s;
for(;;){
m = BY2PG - (a & (BY2PG-1));
if(n <= m)
break;
/* spans pages; handle this page */
t = memchr((void*)a, c, m);
if(t != nil)
return t;
a += m;
n -= m;
if(a < KZERO)
validaddr(a, 1, 0);
}
/* fits in one page */
return memchr((void*)a, c, n);
}