Exemplo n.º 1
0
/*
 * Allocate a page to hold pagerefs.
 */
static
void
allocpagerefpage(struct kheap_root *root)
{
	vaddr_t va;

	KASSERT(root->page == NULL);

	/*
	 * We release the spinlock while calling alloc_kpages. This
	 * avoids deadlock if alloc_kpages needs to come back here.
	 * Note that this means things can change behind our back...
	 */
	spinlock_release(&kmalloc_spinlock);
	va = alloc_kpages(1);
	spinlock_acquire(&kmalloc_spinlock);
	if (va == 0) {
		kprintf("kmalloc: Couldn't get a pageref page\n");
		return;
	}
	KASSERT(va % PAGE_SIZE == 0);

	if (root->page != NULL) {
		/* Oops, somebody else allocated it. */
		spinlock_release(&kmalloc_spinlock);
		free_kpages(va);
		spinlock_acquire(&kmalloc_spinlock);
		/* Once allocated it isn't ever freed. */
		KASSERT(root->page != NULL);
		return;
	}

	root->page = (struct pagerefpage *)va;
}
Exemplo n.º 2
0
void
kfree(void *ptr)
{
	/*
	 * Try subpage first; if that fails, assume it's a big allocation.
	 */
	if (ptr == NULL) {
		return;
	} else if (subpage_kfree(ptr)) {
		KASSERT((vaddr_t)ptr%PAGE_SIZE==0);
		free_kpages((vaddr_t)ptr);
	}
}
Exemplo n.º 3
0
void
kfree(void *ptr)
{
	/*
	 * Try subpage first; if that fails, assume it's a big allocation.
	 */
	//kprintf("free memory: 0x%08x\n",(unsigned int)ptr);
	if (ptr == NULL) {
		return;
	} else if (subpage_kfree(ptr)) {
		KASSERT((vaddr_t)ptr%PAGE_SIZE==0);
		free_kpages((vaddr_t)ptr);
	}
}
Exemplo n.º 4
0
int
getppages(struct addrspace *as, vaddr_t vbase, size_t npage)
{
	vaddr_t addr;
	int pt1;
	int pt2;
	int offset;

	if (vbase > USERSTACK) {
		return EFAULT;
	}

	pt1 = (int)PT1_INDEX(vbase);
	pt2 = (int)PT2_INDEX(vbase);
	offset = (int)PT_OFFSET(vbase);

	/* Align vaddr to PAGE_SIZE */
	// page_base = vbase / PAGE_SIZE;

	if (as->page_table[pt1] == NULL) {
		as->page_table[pt1] = kmalloc(sizeof(page_table_entry) * PAGE_TABLE_SIZE);
		if (as->page_table[pt1] == NULL) {
			return ENOMEM;
		}
	}

	/* Store KV addr of page, additional info not included yet, TBD */
	for(unsigned i = 0; i < npage; i++){
		addr = alloc_kpages(1);
		if (addr == 0) {
			for(unsigned j = 0; j < i; j++){
				free_kpages(as->page_table[pt1][pt2 + j]);
				return ENOMEM;
			}
		}
		as->page_table[pt1][pt2] = addr;
		KASSERT(addr != 0);
		bzero((void*)addr, PAGE_SIZE);
	}

	return 0;
}
Exemplo n.º 5
0
static
int
subpage_kfree(void *ptr)
{
	int blktype;		// index into sizes[] that we're using
	vaddr_t ptraddr;	// same as ptr
	struct pageref *pr;	// pageref for page we're freeing in
	vaddr_t prpage;		// PR_PAGEADDR(pr)
	vaddr_t fla;		// free list entry address
	struct freelist *fl;	// free list entry
	vaddr_t offset;		// offset into page

	ptraddr = (vaddr_t)ptr;

	spinlock_acquire(&kmalloc_spinlock);

	checksubpages();

	for (pr = allbase; pr; pr = pr->next_all) {
		prpage = PR_PAGEADDR(pr);
		blktype = PR_BLOCKTYPE(pr);

		/* check for corruption */
		KASSERT(blktype>=0 && blktype<NSIZES);
		checksubpage(pr);

		if (ptraddr >= prpage && ptraddr < prpage + PAGE_SIZE) {
			break;
		}
	}

	if (pr==NULL) {
		/* Not on any of our pages - not a subpage allocation */
		spinlock_release(&kmalloc_spinlock);
		return -1;
	}

	offset = ptraddr - prpage;

	/* Check for proper positioning and alignment */
	if (offset >= PAGE_SIZE || offset % sizes[blktype] != 0) {
		panic("kfree: subpage free of invalid addr %p\n", ptr);
	}

	/*
	 * Clear the block to 0xdeadbeef to make it easier to detect
	 * uses of dangling pointers.
	 */
	fill_deadbeef(ptr, sizes[blktype]);

	/*
	 * We probably ought to check for free twice by seeing if the block
	 * is already on the free list. But that's expensive, so we don't.
	 */

	fla = prpage + offset;
	fl = (struct freelist *)fla;
	if (pr->freelist_offset == INVALID_OFFSET) {
		fl->next = NULL;
	} else {
		fl->next = (struct freelist *)(prpage + pr->freelist_offset);
	}
	pr->freelist_offset = offset;
	pr->nfree++;

	KASSERT(pr->nfree <= PAGE_SIZE / sizes[blktype]);
	if (pr->nfree == PAGE_SIZE / sizes[blktype]) {
		/* Whole page is free. */
		remove_lists(pr, blktype);
		freepageref(pr);
		/* Call free_kpages without kmalloc_spinlock. */
		spinlock_release(&kmalloc_spinlock);
		free_kpages(prpage);
	}
	else {
		spinlock_release(&kmalloc_spinlock);
	}

#ifdef SLOWER /* Don't get the lock unless checksubpages does something. */
	spinlock_acquire(&kmalloc_spinlock);
	checksubpages();
	spinlock_release(&kmalloc_spinlock);
#endif

	return 0;
}
Exemplo n.º 6
0
static
void *
subpage_kmalloc(size_t sz)
{
	unsigned blktype;	// index into sizes[] that we're using
	struct pageref *pr;	// pageref for page we're allocating from
	vaddr_t prpage;		// PR_PAGEADDR(pr)
	vaddr_t fla;		// free list entry address
	struct freelist *volatile fl;	// free list entry
	void *retptr;		// our result

	volatile int i;


	blktype = blocktype(sz);
	sz = sizes[blktype];

	spinlock_acquire(&kmalloc_spinlock);

	checksubpages();

	for (pr = sizebases[blktype]; pr != NULL; pr = pr->next_samesize) {

		/* check for corruption */
		KASSERT(PR_BLOCKTYPE(pr) == blktype);
		checksubpage(pr);

		if (pr->nfree > 0) {

		doalloc: /* comes here after getting a whole fresh page */

			KASSERT(pr->freelist_offset < PAGE_SIZE);
			prpage = PR_PAGEADDR(pr);
			fla = prpage + pr->freelist_offset;
			fl = (struct freelist *)fla;

			retptr = fl;
			fl = fl->next;
			pr->nfree--;

			if (fl != NULL) {
				KASSERT(pr->nfree > 0);
				fla = (vaddr_t)fl;
				KASSERT(fla - prpage < PAGE_SIZE);
				pr->freelist_offset = fla - prpage;
			}
			else {
				KASSERT(pr->nfree == 0);
				pr->freelist_offset = INVALID_OFFSET;
			}

			checksubpages();

			spinlock_release(&kmalloc_spinlock);
			return retptr;
		}
	}

	/*
	 * No page of the right size available.
	 * Make a new one.
	 *
	 * We release the spinlock while calling alloc_kpages. This
	 * avoids deadlock if alloc_kpages needs to come back here.
	 * Note that this means things can change behind our back...
	 */

	spinlock_release(&kmalloc_spinlock);
	prpage = alloc_kpages(1);
	if (prpage==0) {
		/* Out of memory. */
		kprintf("kmalloc: Subpage allocator couldn't get a page\n"); 
		return NULL;
	}
	spinlock_acquire(&kmalloc_spinlock);

	pr = allocpageref();
	if (pr==NULL) {
		/* Couldn't allocate accounting space for the new page. */
		spinlock_release(&kmalloc_spinlock);
		free_kpages(prpage);
		kprintf("kmalloc: Subpage allocator couldn't get pageref\n"); 
		return NULL;
	}

	pr->pageaddr_and_blocktype = MKPAB(prpage, blktype);
	pr->nfree = PAGE_SIZE / sizes[blktype];

	/*
	 * Note: fl is volatile because the MIPS toolchain we were
	 * using in spring 2001 attempted to optimize this loop and
	 * blew it. Making fl volatile inhibits the optimization.
	 */

	fla = prpage;
	fl = (struct freelist *)fla;
	fl->next = NULL;
	for (i=1; i<pr->nfree; i++) {
		fl = (struct freelist *)(fla + i*sizes[blktype]);
		fl->next = (struct freelist *)(fla + (i-1)*sizes[blktype]);
		KASSERT(fl != fl->next);
	}
	fla = (vaddr_t) fl;
	pr->freelist_offset = fla - prpage;
	KASSERT(pr->freelist_offset == (pr->nfree-1)*sizes[blktype]);

	pr->next_samesize = sizebases[blktype];
	sizebases[blktype] = pr;

	pr->next_all = allbase;
	allbase = pr;

	/* This is kind of cheesy, but avoids duplicating the alloc code. */
	goto doalloc;
}
Exemplo n.º 7
0
/*
 * Free a pointer previously returned from subpage_kmalloc. If the
 * pointer is not on any heap page we recognize, return -1.
 */
static
int
subpage_kfree(void *ptr)
{
	int blktype;		// index into sizes[] that we're using
	vaddr_t ptraddr;	// same as ptr
	struct pageref *pr;	// pageref for page we're freeing in
	vaddr_t prpage;		// PR_PAGEADDR(pr)
	vaddr_t fla;		// free list entry address
	struct freelist *fl;	// free list entry
	vaddr_t offset;		// offset into page
#ifdef GUARDS
	size_t blocksize, smallerblocksize;
#endif

	ptraddr = (vaddr_t)ptr;
#ifdef GUARDS
	if (ptraddr % PAGE_SIZE == 0) {
		/*
		 * With guard bands, all client-facing subpage
		 * pointers are offset by GUARD_PTROFFSET (which is 4)
		 * from the underlying blocks and are therefore not
		 * page-aligned. So a page-aligned pointer is not one
		 * of ours. Catch this up front, as otherwise
		 * subtracting GUARD_PTROFFSET could give a pointer on
		 * a page we *do* own, and then we'll panic because
		 * it's not a valid one.
		 */
		return -1;
	}
	ptraddr -= GUARD_PTROFFSET;
#endif
#ifdef LABELS
	if (ptraddr % PAGE_SIZE == 0) {
		/* ditto */
		return -1;
	}
	ptraddr -= LABEL_PTROFFSET;
#endif

	spinlock_acquire(&kmalloc_spinlock);

	checksubpages();

	for (pr = allbase; pr; pr = pr->next_all) {
		prpage = PR_PAGEADDR(pr);
		blktype = PR_BLOCKTYPE(pr);
		KASSERT(blktype >= 0 && blktype < NSIZES);

		/* check for corruption */
		KASSERT(blktype>=0 && blktype<NSIZES);
		checksubpage(pr);

		if (ptraddr >= prpage && ptraddr < prpage + PAGE_SIZE) {
			break;
		}
	}

	if (pr==NULL) {
		/* Not on any of our pages - not a subpage allocation */
		spinlock_release(&kmalloc_spinlock);
		return -1;
	}

	offset = ptraddr - prpage;

	/* Check for proper positioning and alignment */
	if (offset >= PAGE_SIZE || offset % sizes[blktype] != 0) {
		panic("kfree: subpage free of invalid addr %p\n", ptr);
	}

#ifdef GUARDS
	blocksize = sizes[blktype];
	smallerblocksize = blktype > 0 ? sizes[blktype - 1] : 0;
	checkguardband(ptraddr, smallerblocksize, blocksize);
#endif

	/*
	 * Clear the block to 0xdeadbeef to make it easier to detect
	 * uses of dangling pointers.
	 */
	fill_deadbeef((void *)ptraddr, sizes[blktype]);

	/*
	 * We probably ought to check for free twice by seeing if the block
	 * is already on the free list. But that's expensive, so we don't.
	 */

	fla = prpage + offset;
	fl = (struct freelist *)fla;
	if (pr->freelist_offset == INVALID_OFFSET) {
		fl->next = NULL;
	} else {
		fl->next = (struct freelist *)(prpage + pr->freelist_offset);

		/* this block should not already be on the free list! */
#ifdef SLOW
		{
			struct freelist *fl2;

			for (fl2 = fl->next; fl2 != NULL; fl2 = fl2->next) {
				KASSERT(fl2 != fl);
			}
		}
#else
		/* check just the head */
		KASSERT(fl != fl->next);
#endif
	}
	pr->freelist_offset = offset;
	pr->nfree++;

	KASSERT(pr->nfree <= PAGE_SIZE / sizes[blktype]);
	if (pr->nfree == PAGE_SIZE / sizes[blktype]) {
		/* Whole page is free. */
		remove_lists(pr, blktype);
		freepageref(pr);
		/* Call free_kpages without kmalloc_spinlock. */
		spinlock_release(&kmalloc_spinlock);
		free_kpages(prpage);
	}
	else {
		spinlock_release(&kmalloc_spinlock);
	}

#ifdef SLOWER /* Don't get the lock unless checksubpages does something. */
	spinlock_acquire(&kmalloc_spinlock);
	checksubpages();
	spinlock_release(&kmalloc_spinlock);
#endif

	return 0;
}