Beispiel #1
0
/*
 * Allocate a block of size SZ, where SZ is not large enough to
 * warrant a whole-page allocation.
 */
static
void *
subpage_kmalloc(size_t sz
#ifdef LABELS
		, vaddr_t label
#endif
	)
{
	unsigned blktype;	// index into sizes[] that we're using
	struct pageref *pr;	// pageref for page we're allocating from
	vaddr_t prpage;		// PR_PAGEADDR(pr)
	vaddr_t fla;		// free list entry address
	struct freelist *volatile fl;	// free list entry
	void *retptr;		// our result

	volatile int i;

#ifdef GUARDS
	size_t clientsz;
#endif

#ifdef GUARDS
	clientsz = sz;
	sz += GUARD_OVERHEAD;
#endif
#ifdef LABELS
#ifdef GUARDS
	/* Include the label in what GUARDS considers the client data. */
	clientsz += LABEL_PTROFFSET;
#endif
	sz += LABEL_PTROFFSET;
#endif
	blktype = blocktype(sz);
#ifdef GUARDS
	sz = sizes[blktype];
#endif

	spinlock_acquire(&kmalloc_spinlock);

	checksubpages();

	for (pr = sizebases[blktype]; pr != NULL; pr = pr->next_samesize) {

		/* check for corruption */
		KASSERT(PR_BLOCKTYPE(pr) == blktype);
		checksubpage(pr);

		if (pr->nfree > 0) {

		doalloc: /* comes here after getting a whole fresh page */

			KASSERT(pr->freelist_offset < PAGE_SIZE);
			prpage = PR_PAGEADDR(pr);
			fla = prpage + pr->freelist_offset;
			fl = (struct freelist *)fla;

			retptr = fl;
			fl = fl->next;
			pr->nfree--;

			if (fl != NULL) {
				KASSERT(pr->nfree > 0);
				fla = (vaddr_t)fl;
				KASSERT(fla - prpage < PAGE_SIZE);
				pr->freelist_offset = fla - prpage;
			}
			else {
				KASSERT(pr->nfree == 0);
				pr->freelist_offset = INVALID_OFFSET;
			}
#ifdef GUARDS
			retptr = establishguardband(retptr, clientsz, sz);
#endif
#ifdef LABELS
			retptr = establishlabel(retptr, label);
#endif

			checksubpages();

			spinlock_release(&kmalloc_spinlock);
			return retptr;
		}
	}

	/*
	 * No page of the right size available.
	 * Make a new one.
	 *
	 * We release the spinlock while calling alloc_kpages. This
	 * avoids deadlock if alloc_kpages needs to come back here.
	 * Note that this means things can change behind our back...
	 */

	spinlock_release(&kmalloc_spinlock);
	prpage = alloc_kpages(1);
	if (prpage==0) {
		/* Out of memory. */
		kprintf("kmalloc: Subpage allocator couldn't get a page\n");
		return NULL;
	}
	KASSERT(prpage % PAGE_SIZE == 0);
#ifdef CHECKBEEF
	/* deadbeef the whole page, as it probably starts zeroed */
	fill_deadbeef((void *)prpage, PAGE_SIZE);
#endif
	spinlock_acquire(&kmalloc_spinlock);

	pr = allocpageref();
	if (pr==NULL) {
		/* Couldn't allocate accounting space for the new page. */
		spinlock_release(&kmalloc_spinlock);
		free_kpages(prpage);
		kprintf("kmalloc: Subpage allocator couldn't get pageref\n");
		return NULL;
	}

	pr->pageaddr_and_blocktype = MKPAB(prpage, blktype);
	pr->nfree = PAGE_SIZE / sizes[blktype];

	/*
	 * Note: fl is volatile because the MIPS toolchain we were
	 * using in spring 2001 attempted to optimize this loop and
	 * blew it. Making fl volatile inhibits the optimization.
	 */

	fla = prpage;
	fl = (struct freelist *)fla;
	fl->next = NULL;
	for (i=1; i<pr->nfree; i++) {
		fl = (struct freelist *)(fla + i*sizes[blktype]);
		fl->next = (struct freelist *)(fla + (i-1)*sizes[blktype]);
		KASSERT(fl != fl->next);
	}
	fla = (vaddr_t) fl;
	pr->freelist_offset = fla - prpage;
	KASSERT(pr->freelist_offset == (pr->nfree-1)*sizes[blktype]);

	pr->next_samesize = sizebases[blktype];
	sizebases[blktype] = pr;

	pr->next_all = allbase;
	allbase = pr;

	/* This is kind of cheesy, but avoids duplicating the alloc code. */
	goto doalloc;
}
Beispiel #2
0
static
int
subpage_kfree(void *ptr)
{
	int blktype;		// index into sizes[] that we're using
	vaddr_t ptraddr;	// same as ptr
	struct pageref *pr;	// pageref for page we're freeing in
	vaddr_t prpage;		// PR_PAGEADDR(pr)
	vaddr_t fla;		// free list entry address
	struct freelist *fl;	// free list entry
	vaddr_t offset;		// offset into page

	ptraddr = (vaddr_t)ptr;

	spinlock_acquire(&kmalloc_spinlock);

	checksubpages();

	for (pr = allbase; pr; pr = pr->next_all) {
		prpage = PR_PAGEADDR(pr);
		blktype = PR_BLOCKTYPE(pr);

		/* check for corruption */
		KASSERT(blktype>=0 && blktype<NSIZES);
		checksubpage(pr);

		if (ptraddr >= prpage && ptraddr < prpage + PAGE_SIZE) {
			break;
		}
	}

	if (pr==NULL) {
		/* Not on any of our pages - not a subpage allocation */
		spinlock_release(&kmalloc_spinlock);
		return -1;
	}

	offset = ptraddr - prpage;

	/* Check for proper positioning and alignment */
	if (offset >= PAGE_SIZE || offset % sizes[blktype] != 0) {
		panic("kfree: subpage free of invalid addr %p\n", ptr);
	}

	/*
	 * Clear the block to 0xdeadbeef to make it easier to detect
	 * uses of dangling pointers.
	 */
	fill_deadbeef(ptr, sizes[blktype]);

	/*
	 * We probably ought to check for free twice by seeing if the block
	 * is already on the free list. But that's expensive, so we don't.
	 */

	fla = prpage + offset;
	fl = (struct freelist *)fla;
	if (pr->freelist_offset == INVALID_OFFSET) {
		fl->next = NULL;
	} else {
		fl->next = (struct freelist *)(prpage + pr->freelist_offset);
	}
	pr->freelist_offset = offset;
	pr->nfree++;

	KASSERT(pr->nfree <= PAGE_SIZE / sizes[blktype]);
	if (pr->nfree == PAGE_SIZE / sizes[blktype]) {
		/* Whole page is free. */
		remove_lists(pr, blktype);
		freepageref(pr);
		/* Call free_kpages without kmalloc_spinlock. */
		spinlock_release(&kmalloc_spinlock);
		free_kpages(prpage);
	}
	else {
		spinlock_release(&kmalloc_spinlock);
	}

#ifdef SLOWER /* Don't get the lock unless checksubpages does something. */
	spinlock_acquire(&kmalloc_spinlock);
	checksubpages();
	spinlock_release(&kmalloc_spinlock);
#endif

	return 0;
}
Beispiel #3
0
/*
 * Free a pointer previously returned from subpage_kmalloc. If the
 * pointer is not on any heap page we recognize, return -1.
 */
static
int
subpage_kfree(void *ptr)
{
	int blktype;		// index into sizes[] that we're using
	vaddr_t ptraddr;	// same as ptr
	struct pageref *pr;	// pageref for page we're freeing in
	vaddr_t prpage;		// PR_PAGEADDR(pr)
	vaddr_t fla;		// free list entry address
	struct freelist *fl;	// free list entry
	vaddr_t offset;		// offset into page
#ifdef GUARDS
	size_t blocksize, smallerblocksize;
#endif

	ptraddr = (vaddr_t)ptr;
#ifdef GUARDS
	if (ptraddr % PAGE_SIZE == 0) {
		/*
		 * With guard bands, all client-facing subpage
		 * pointers are offset by GUARD_PTROFFSET (which is 4)
		 * from the underlying blocks and are therefore not
		 * page-aligned. So a page-aligned pointer is not one
		 * of ours. Catch this up front, as otherwise
		 * subtracting GUARD_PTROFFSET could give a pointer on
		 * a page we *do* own, and then we'll panic because
		 * it's not a valid one.
		 */
		return -1;
	}
	ptraddr -= GUARD_PTROFFSET;
#endif
#ifdef LABELS
	if (ptraddr % PAGE_SIZE == 0) {
		/* ditto */
		return -1;
	}
	ptraddr -= LABEL_PTROFFSET;
#endif

	spinlock_acquire(&kmalloc_spinlock);

	checksubpages();

	for (pr = allbase; pr; pr = pr->next_all) {
		prpage = PR_PAGEADDR(pr);
		blktype = PR_BLOCKTYPE(pr);
		KASSERT(blktype >= 0 && blktype < NSIZES);

		/* check for corruption */
		KASSERT(blktype>=0 && blktype<NSIZES);
		checksubpage(pr);

		if (ptraddr >= prpage && ptraddr < prpage + PAGE_SIZE) {
			break;
		}
	}

	if (pr==NULL) {
		/* Not on any of our pages - not a subpage allocation */
		spinlock_release(&kmalloc_spinlock);
		return -1;
	}

	offset = ptraddr - prpage;

	/* Check for proper positioning and alignment */
	if (offset >= PAGE_SIZE || offset % sizes[blktype] != 0) {
		panic("kfree: subpage free of invalid addr %p\n", ptr);
	}

#ifdef GUARDS
	blocksize = sizes[blktype];
	smallerblocksize = blktype > 0 ? sizes[blktype - 1] : 0;
	checkguardband(ptraddr, smallerblocksize, blocksize);
#endif

	/*
	 * Clear the block to 0xdeadbeef to make it easier to detect
	 * uses of dangling pointers.
	 */
	fill_deadbeef((void *)ptraddr, sizes[blktype]);

	/*
	 * We probably ought to check for free twice by seeing if the block
	 * is already on the free list. But that's expensive, so we don't.
	 */

	fla = prpage + offset;
	fl = (struct freelist *)fla;
	if (pr->freelist_offset == INVALID_OFFSET) {
		fl->next = NULL;
	} else {
		fl->next = (struct freelist *)(prpage + pr->freelist_offset);

		/* this block should not already be on the free list! */
#ifdef SLOW
		{
			struct freelist *fl2;

			for (fl2 = fl->next; fl2 != NULL; fl2 = fl2->next) {
				KASSERT(fl2 != fl);
			}
		}
#else
		/* check just the head */
		KASSERT(fl != fl->next);
#endif
	}
	pr->freelist_offset = offset;
	pr->nfree++;

	KASSERT(pr->nfree <= PAGE_SIZE / sizes[blktype]);
	if (pr->nfree == PAGE_SIZE / sizes[blktype]) {
		/* Whole page is free. */
		remove_lists(pr, blktype);
		freepageref(pr);
		/* Call free_kpages without kmalloc_spinlock. */
		spinlock_release(&kmalloc_spinlock);
		free_kpages(prpage);
	}
	else {
		spinlock_release(&kmalloc_spinlock);
	}

#ifdef SLOWER /* Don't get the lock unless checksubpages does something. */
	spinlock_acquire(&kmalloc_spinlock);
	checksubpages();
	spinlock_release(&kmalloc_spinlock);
#endif

	return 0;
}