Exemplo n.º 1
0
/*
 * Allocate a page to hold pagerefs.
 */
static
void
allocpagerefpage(struct kheap_root *root)
{
	vaddr_t va;

	KASSERT(root->page == NULL);

	/*
	 * We release the spinlock while calling alloc_kpages. This
	 * avoids deadlock if alloc_kpages needs to come back here.
	 * Note that this means things can change behind our back...
	 */
	spinlock_release(&kmalloc_spinlock);
	va = alloc_kpages(1);
	spinlock_acquire(&kmalloc_spinlock);
	if (va == 0) {
		kprintf("kmalloc: Couldn't get a pageref page\n");
		return;
	}
	KASSERT(va % PAGE_SIZE == 0);

	if (root->page != NULL) {
		/* Oops, somebody else allocated it. */
		spinlock_release(&kmalloc_spinlock);
		free_kpages(va);
		spinlock_acquire(&kmalloc_spinlock);
		/* Once allocated it isn't ever freed. */
		KASSERT(root->page != NULL);
		return;
	}

	root->page = (struct pagerefpage *)va;
}
Exemplo n.º 2
0
void *
kmalloc(size_t sz)
{
	if (sz>=LARGEST_SUBPAGE_SIZE) {
		unsigned long npages;
		vaddr_t address;

		/* Round up to a whole number of pages. */
		npages = (sz + PAGE_SIZE - 1)/PAGE_SIZE;
		address = alloc_kpages(npages);
		if (address==0) {
			return NULL;
		}

		return (void *)address;
	DEBUG(DB_KMALLOC, "KMALLOC DEBUG: %x\n", address);	
	}
	return subpage_kmalloc(sz);
}
Exemplo n.º 3
0
int
getppages(struct addrspace *as, vaddr_t vbase, size_t npage)
{
	vaddr_t addr;
	int pt1;
	int pt2;
	int offset;

	if (vbase > USERSTACK) {
		return EFAULT;
	}

	pt1 = (int)PT1_INDEX(vbase);
	pt2 = (int)PT2_INDEX(vbase);
	offset = (int)PT_OFFSET(vbase);

	/* Align vaddr to PAGE_SIZE */
	// page_base = vbase / PAGE_SIZE;

	if (as->page_table[pt1] == NULL) {
		as->page_table[pt1] = kmalloc(sizeof(page_table_entry) * PAGE_TABLE_SIZE);
		if (as->page_table[pt1] == NULL) {
			return ENOMEM;
		}
	}

	/* Store KV addr of page, additional info not included yet, TBD */
	for(unsigned i = 0; i < npage; i++){
		addr = alloc_kpages(1);
		if (addr == 0) {
			for(unsigned j = 0; j < i; j++){
				free_kpages(as->page_table[pt1][pt2 + j]);
				return ENOMEM;
			}
		}
		as->page_table[pt1][pt2] = addr;
		KASSERT(addr != 0);
		bzero((void*)addr, PAGE_SIZE);
	}

	return 0;
}
Exemplo n.º 4
0
static
void
coremapthread(void *sm, unsigned long num)
{
	struct semaphore *sem = sm;
	uint32_t page;
	uint32_t oldpage = 0;
	uint32_t oldpage2 = 0;
	int i;

	for (i=0; i<NTRIES; i++) {
		page = alloc_kpages(NPAGES);
		if (page==0) {
			if (sem) {
				kprintf("thread %lu: alloc_kpages failed\n",
					num);
				V(sem);
				return;
			}
			kprintf("alloc_kpages failed; test failed.\n");
			return;
		}
		if (oldpage2) {
			coremap_free(KVADDR_TO_PADDR(oldpage2), true /* iskern */);
		}
		oldpage2 = oldpage;
		oldpage = page;
	}
	if (oldpage2) {
		coremap_free(KVADDR_TO_PADDR(oldpage2), true /* iskern */);
	}
	if (oldpage) {
		coremap_free(KVADDR_TO_PADDR(oldpage), true /* iskern */);
	}
	if (sem) {
		V(sem);
	}
}
Exemplo n.º 5
0
/*
 * Allocate a block of size SZ. Redirect either to subpage_kmalloc or
 * alloc_kpages depending on how big SZ is.
 */
void *
kmalloc(size_t sz)
{
	size_t checksz;
#ifdef LABELS
	vaddr_t label;
#endif

#ifdef LABELS
#ifdef __GNUC__
	label = (vaddr_t)__builtin_return_address(0);
#else
#error "Don't know how to get return address with this compiler"
#endif /* __GNUC__ */
#endif /* LABELS */

	checksz = sz + GUARD_OVERHEAD + LABEL_OVERHEAD;
	if (checksz >= LARGEST_SUBPAGE_SIZE) {
		unsigned long npages;
		vaddr_t address;

		/* Round up to a whole number of pages. */
		npages = (sz + PAGE_SIZE - 1)/PAGE_SIZE;
		address = alloc_kpages(npages);
		if (address==0) {
			return NULL;
		}
		KASSERT(address % PAGE_SIZE == 0);

		return (void *)address;
	}

#ifdef LABELS
	return subpage_kmalloc(sz, label);
#else
	return subpage_kmalloc(sz);
#endif
}
Exemplo n.º 6
0
static
void *
subpage_kmalloc(size_t sz)
{
	unsigned blktype;	// index into sizes[] that we're using
	struct pageref *pr;	// pageref for page we're allocating from
	vaddr_t prpage;		// PR_PAGEADDR(pr)
	vaddr_t fla;		// free list entry address
	struct freelist *volatile fl;	// free list entry
	void *retptr;		// our result

	volatile int i;


	blktype = blocktype(sz);
	sz = sizes[blktype];

	spinlock_acquire(&kmalloc_spinlock);

	checksubpages();

	for (pr = sizebases[blktype]; pr != NULL; pr = pr->next_samesize) {

		/* check for corruption */
		KASSERT(PR_BLOCKTYPE(pr) == blktype);
		checksubpage(pr);

		if (pr->nfree > 0) {

		doalloc: /* comes here after getting a whole fresh page */

			KASSERT(pr->freelist_offset < PAGE_SIZE);
			prpage = PR_PAGEADDR(pr);
			fla = prpage + pr->freelist_offset;
			fl = (struct freelist *)fla;

			retptr = fl;
			fl = fl->next;
			pr->nfree--;

			if (fl != NULL) {
				KASSERT(pr->nfree > 0);
				fla = (vaddr_t)fl;
				KASSERT(fla - prpage < PAGE_SIZE);
				pr->freelist_offset = fla - prpage;
			}
			else {
				KASSERT(pr->nfree == 0);
				pr->freelist_offset = INVALID_OFFSET;
			}

			checksubpages();

			spinlock_release(&kmalloc_spinlock);
			return retptr;
		}
	}

	/*
	 * No page of the right size available.
	 * Make a new one.
	 *
	 * We release the spinlock while calling alloc_kpages. This
	 * avoids deadlock if alloc_kpages needs to come back here.
	 * Note that this means things can change behind our back...
	 */

	spinlock_release(&kmalloc_spinlock);
	prpage = alloc_kpages(1);
	if (prpage==0) {
		/* Out of memory. */
		kprintf("kmalloc: Subpage allocator couldn't get a page\n"); 
		return NULL;
	}
	spinlock_acquire(&kmalloc_spinlock);

	pr = allocpageref();
	if (pr==NULL) {
		/* Couldn't allocate accounting space for the new page. */
		spinlock_release(&kmalloc_spinlock);
		free_kpages(prpage);
		kprintf("kmalloc: Subpage allocator couldn't get pageref\n"); 
		return NULL;
	}

	pr->pageaddr_and_blocktype = MKPAB(prpage, blktype);
	pr->nfree = PAGE_SIZE / sizes[blktype];

	/*
	 * Note: fl is volatile because the MIPS toolchain we were
	 * using in spring 2001 attempted to optimize this loop and
	 * blew it. Making fl volatile inhibits the optimization.
	 */

	fla = prpage;
	fl = (struct freelist *)fla;
	fl->next = NULL;
	for (i=1; i<pr->nfree; i++) {
		fl = (struct freelist *)(fla + i*sizes[blktype]);
		fl->next = (struct freelist *)(fla + (i-1)*sizes[blktype]);
		KASSERT(fl != fl->next);
	}
	fla = (vaddr_t) fl;
	pr->freelist_offset = fla - prpage;
	KASSERT(pr->freelist_offset == (pr->nfree-1)*sizes[blktype]);

	pr->next_samesize = sizebases[blktype];
	sizebases[blktype] = pr;

	pr->next_all = allbase;
	allbase = pr;

	/* This is kind of cheesy, but avoids duplicating the alloc code. */
	goto doalloc;
}