/* * Preallocate some pages: * some 2M ones will be used by the first process. * some 1G ones will be allocated for each domain so processes may use them. */ void pageinit(void) { int si, i, color; Page *pg; pga.userinit = 1; DBG("pageinit: npgsz = %d\n", sys->npgsz); /* * Don't pre-allocate 4K pages, we are not using them anymore. */ for(si = 1; si < sys->npgsz; si++){ for(i = 0; i < Nstartpgs; i++){ if(si < 2) color = -1; else color = i; pg = pgalloc(sys->pgsz[si], color); if(pg == nil){ DBG("pageinit: pgalloc failed. breaking.\n"); break; /* don't consume more memory */ } DBG("pageinit: alloced pa %#P sz %#ux color %d\n", pg->pa, sys->pgsz[si], pg->color); lock(&pga.l); pg->ref = 0; pagechainhead(pg); unlock(&pga.l); } } pga.userinit = 0; }
static int tryalloc(int pgszi, int color) { Page *p; p = pgalloc(sys->pgsz[pgszi], color); if(p != nil){ lock(&pga); pagechainhead(p); unlock(&pga); return 0; } return -1; }
/* * given the current proc's page directory, find the pte where * the virtual address lied in. If the 'creat' parameter is set, * allocate a page as middle page table and always success. * */ struct pte* find_pte(struct pde *pgd, uint vaddr, uint creat){ struct pde *pde; struct pte *pt; struct page *pg; uint pn; if (vaddr < KMEM_END) { panic("find_pte(): don't touch kernel's address space."); } pde = &pgd[PDX(vaddr)]; if ((pde->pd_flag & PTE_P)==0) { if (creat==0) { return NULL; } pg = pgalloc(); pde->pd_flag = PTE_P | PTE_U | PTE_W; pde->pd_off = pg->pg_num; pt = (struct pte*)(pde->pd_off * PAGE); memset(pt, 0, PAGE); flmmu(); } pt = (struct pte*)(pde->pd_off * PAGE); return &pt[PTX(vaddr)]; }
FAR void *mm_sbrk(FAR struct mm_heap_s *heap, intptr_t incr, uintptr_t maxbreak) { uintptr_t brkaddr; uintptr_t allocbase; unsigned int pgincr; size_t bytesize; int err; DEBUGASSERT(incr >= 0); if (incr < 0) { err = ENOSYS; goto errout; } /* Get the current break address (NOTE: assumes region 0). If * the memory manager is uninitialized, mm_brkaddr() will return * zero. */ brkaddr = (uintptr_t)mm_brkaddr(heap, 0); if (incr > 0) { /* Convert the increment to multiples of the page size */ pgincr = MM_NPAGES(incr); /* Check if this increment would exceed the maximum break value */ if ((brkaddr > 0) && ((maxbreak - brkaddr) < (pgincr << MM_PGSHIFT))) { err = ENOMEM; goto errout; } /* Allocate the requested number of pages and map them to the * break address. If we provide a zero brkaddr to pgalloc(), it * will create the first block in the correct virtual address * space and return the start address of that block. */ allocbase = pgalloc(brkaddr, pgincr); if (allocbase == 0) { err = EAGAIN; goto errout; } /* Has the been been initialized? brkaddr will be zero if the * memory manager has not yet been initialized. */ bytesize = pgincr << MM_PGSHIFT; if (brkaddr != 0) { /* No... then initialize it now */ mm_initialize(heap, (FAR void *)allocbase, bytesize); } else { /* Extend the heap (region 0) */ mm_extend(heap, (FAR void *)allocbase, bytesize, 0); } } return (FAR void *)brkaddr; errout: set_errno(err); return (FAR void *)-1; }
/* * can be called with up == nil during boot. */ Page* newpage(int clear, Segment **s, uintptr_t va, usize size, int color) { Page *p; KMap *k; uint8_t ct; Pgsza *pa; int i, dontalloc, si; // static int once; si = getpgszi(size); //iprint("(remove this print and diea)newpage, size %x, si %d\n", size, si); pa = &pga.pgsza[si]; lock(&pga.l); /* * Beware, new page may enter a loop even if this loop does not * loop more than once, if the segment is lost and fault calls us * again. Either way, we accept any color if we failed a couple of times. */ for(i = 0;; i++){ if(i > 3) color = NOCOLOR; /* * 1. try to reuse a free one. */ p = findpg(pa->head, color); if(p != nil) break; /* * 2. try to allocate a new one from physical memory */ p = pgalloc(size, color); if(p != nil){ pagechainhead(p); break; } /* * 3. out of memory, try with the pager. * but release the segment (if any) while in the pager. */ unlock(&pga.l); dontalloc = 0; if(s && *s) { qunlock(&((*s)->lk)); *s = 0; dontalloc = 1; } /* * Try to get any page of the desired color * or any color for NOCOLOR. */ kickpager(si, color); /* * If called from fault and we lost the segment from * underneath don't waste time allocating and freeing * a page. Fault will call newpage again when it has * reacquired the segment locks */ if(dontalloc) return 0; lock(&pga.l); } assert(p != nil); ct = PG_NEWCOL; pageunchain(p); lock(&p->l); if(p->ref != 0) panic("newpage pa %#ullx", p->pa); uncachepage(p); p->ref++; p->va = va; p->modref = 0; for(i = 0; i < nelem(p->cachectl); i++) p->cachectl[i] = ct; unlock(&p->l); unlock(&pga.l); if(clear) { k = kmap(p); if (VA(k) == 0xfffffe007d800000ULL) trip++; // if (trip) die("trip before memset"); // This will frequently die if we use 3K-1 (3071 -- 0xbff) // it will not if we use 3070. // The fault is a null pointer deref. //memset((void*)VA(k), 0, machp()->pgsz[p->pgszi]); // thinking about it, using memset is stupid. // Don't get upset about this loop; // we make it readable, compilers optimize it. int i; uint64_t *v = (void *)VA(k); if (1) for(i = 0; i < sys->pgsz[p->pgszi]/sizeof(*v); i++) v[i] = 0; //if (trip) die("trip"); kunmap(k); } DBG("newpage: va %#p pa %#ullx pgsz %#ux color %d\n", p->va, p->pa, sys->pgsz[p->pgszi], p->color); return p; }