예제 #1
0
static
void
checksubpages(void)
{
	struct pageref *pr;
	int i;
	unsigned sc=0, ac=0;

	KASSERT(spinlock_do_i_hold(&kmalloc_spinlock));

	for (i=0; i<NSIZES; i++) {
		for (pr = sizebases[i]; pr != NULL; pr = pr->next_samesize) {
			checksubpage(pr);
			KASSERT(sc < NPAGEREFS);
			sc++;
		}
	}

	for (pr = allbase; pr != NULL; pr = pr->next_all) {
		checksubpage(pr);
		KASSERT(ac < NPAGEREFS);
		ac++;
	}

	KASSERT(sc==ac);
}
예제 #2
0
static
void
checksubpages(void)
{
	struct pageref *pr;
	int i;
	unsigned sc=0, ac=0;

	assert(curspl>0);

	for (i=0; i<NSIZES; i++) {
		for (pr = sizebases[i]; pr != NULL; pr = pr->next_samesize) {
			checksubpage(pr);
			assert(sc < NPAGEREFS);
			sc++;
		}
	}

	for (pr = allbase; pr != NULL; pr = pr->next_all) {
		checksubpage(pr);
		assert(ac < NPAGEREFS);
		ac++;
	}

	assert(sc==ac);
}
예제 #3
0
/*
 * Print the allocated/freed map of a single kernel heap page.
 */
static
void
subpage_stats(struct pageref *pr)
{
	vaddr_t prpage, fla;
	struct freelist *fl;
	int blktype;
	unsigned i, n, index;
	uint32_t freemap[PAGE_SIZE / (SMALLEST_SUBPAGE_SIZE*32)];

	checksubpage(pr);
	KASSERT(spinlock_do_i_hold(&kmalloc_spinlock));

	/* clear freemap[] */
	for (i=0; i<ARRAYCOUNT(freemap); i++) {
		freemap[i] = 0;
	}

	prpage = PR_PAGEADDR(pr);
	blktype = PR_BLOCKTYPE(pr);
	KASSERT(blktype >= 0 && blktype < NSIZES);

	/* compute how many bits we need in freemap and assert we fit */
	n = PAGE_SIZE / sizes[blktype];
	KASSERT(n <= 32 * ARRAYCOUNT(freemap));

	if (pr->freelist_offset != INVALID_OFFSET) {
		fla = prpage + pr->freelist_offset;
		fl = (struct freelist *)fla;

		for (; fl != NULL; fl = fl->next) {
			fla = (vaddr_t)fl;
			index = (fla-prpage) / sizes[blktype];
			KASSERT(index<n);
			freemap[index/32] |= (1<<(index%32));
		}
	}

	kprintf("at 0x%08lx: size %-4lu  %u/%u free\n",
		(unsigned long)prpage, (unsigned long) sizes[blktype],
		(unsigned) pr->nfree, n);
	kprintf("   ");
	for (i=0; i<n; i++) {
		int val = (freemap[i/32] & (1<<(i%32)))!=0;
		kprintf("%c", val ? '.' : '*');
		if (i%64==63 && i<n-1) {
			kprintf("\n   ");
		}
	}
	kprintf("\n");
}
예제 #4
0
static
void
dumpsubpage(struct pageref *pr)
{
	vaddr_t prpage, fla;
	struct freelist *fl;
	int blktype;
	unsigned i, n, index;
	u_int32_t freemap[PAGE_SIZE / (SMALLEST_SUBPAGE_SIZE*32)];

	checksubpage(pr);
	assert(curspl>0);

	/* clear freemap[] */
	for (i=0; i<sizeof(freemap)/sizeof(freemap[0]); i++) {
		freemap[i] = 0;
	}

	prpage = PR_PAGEADDR(pr);
	blktype = PR_BLOCKTYPE(pr);

	/* compute how many bits we need in freemap and assert we fit */
	n = PAGE_SIZE / sizes[blktype];
	assert(n <= 32*sizeof(freemap)/sizeof(freemap[0]));

	if (pr->freelist_offset != INVALID_OFFSET) {
		fla = prpage + pr->freelist_offset;
		fl = (struct freelist *)fla;

		for (; fl != NULL; fl = fl->next) {
			fla = (vaddr_t)fl;
			index = (fla-prpage) / sizes[blktype];
			assert(index<n);
			freemap[index/32] |= (1<<(index%32));
		}
	}

	kprintf("at 0x%08lx: size %-4lu  %u/%u free\n", 
		(unsigned long)prpage, (unsigned long) sizes[blktype],
		(unsigned) pr->nfree, n);
	kprintf("   ");
	for (i=0; i<n; i++) {
		int val = (freemap[i/32] & (1<<(i%32)))!=0;
		kprintf("%c", val ? '.' : '*');
		if (i%64==63 && i<n-1) {
			kprintf("\n   ");
		}
	}
	kprintf("\n");
}
예제 #5
0
static
void
remove_lists(struct pageref *pr, int blktype)
{
	struct pageref **guy;

	KASSERT(blktype>=0 && blktype<NSIZES);

	for (guy = &sizebases[blktype]; *guy; guy = &(*guy)->next_samesize) {
		checksubpage(*guy);
		if (*guy == pr) {
			*guy = pr->next_samesize;
			break;
		}
	}

	for (guy = &allbase; *guy; guy = &(*guy)->next_all) {
		checksubpage(*guy);
		if (*guy == pr) {
			*guy = pr->next_all;
			break;
		}
	}
}
예제 #6
0
static
int
subpage_kfree(void *ptr)
{
	int blktype;		// index into sizes[] that we're using
	vaddr_t ptraddr;	// same as ptr
	struct pageref *pr;	// pageref for page we're freeing in
	vaddr_t prpage;		// PR_PAGEADDR(pr)
	vaddr_t fla;		// free list entry address
	struct freelist *fl;	// free list entry
	vaddr_t offset;		// offset into page

	ptraddr = (vaddr_t)ptr;

	spinlock_acquire(&kmalloc_spinlock);

	checksubpages();

	for (pr = allbase; pr; pr = pr->next_all) {
		prpage = PR_PAGEADDR(pr);
		blktype = PR_BLOCKTYPE(pr);

		/* check for corruption */
		KASSERT(blktype>=0 && blktype<NSIZES);
		checksubpage(pr);

		if (ptraddr >= prpage && ptraddr < prpage + PAGE_SIZE) {
			break;
		}
	}

	if (pr==NULL) {
		/* Not on any of our pages - not a subpage allocation */
		spinlock_release(&kmalloc_spinlock);
		return -1;
	}

	offset = ptraddr - prpage;

	/* Check for proper positioning and alignment */
	if (offset >= PAGE_SIZE || offset % sizes[blktype] != 0) {
		panic("kfree: subpage free of invalid addr %p\n", ptr);
	}

	/*
	 * Clear the block to 0xdeadbeef to make it easier to detect
	 * uses of dangling pointers.
	 */
	fill_deadbeef(ptr, sizes[blktype]);

	/*
	 * We probably ought to check for free twice by seeing if the block
	 * is already on the free list. But that's expensive, so we don't.
	 */

	fla = prpage + offset;
	fl = (struct freelist *)fla;
	if (pr->freelist_offset == INVALID_OFFSET) {
		fl->next = NULL;
	} else {
		fl->next = (struct freelist *)(prpage + pr->freelist_offset);
	}
	pr->freelist_offset = offset;
	pr->nfree++;

	KASSERT(pr->nfree <= PAGE_SIZE / sizes[blktype]);
	if (pr->nfree == PAGE_SIZE / sizes[blktype]) {
		/* Whole page is free. */
		remove_lists(pr, blktype);
		freepageref(pr);
		/* Call free_kpages without kmalloc_spinlock. */
		spinlock_release(&kmalloc_spinlock);
		free_kpages(prpage);
	}
	else {
		spinlock_release(&kmalloc_spinlock);
	}

#ifdef SLOWER /* Don't get the lock unless checksubpages does something. */
	spinlock_acquire(&kmalloc_spinlock);
	checksubpages();
	spinlock_release(&kmalloc_spinlock);
#endif

	return 0;
}
예제 #7
0
static
void *
subpage_kmalloc(size_t sz)
{
	unsigned blktype;	// index into sizes[] that we're using
	struct pageref *pr;	// pageref for page we're allocating from
	vaddr_t prpage;		// PR_PAGEADDR(pr)
	vaddr_t fla;		// free list entry address
	struct freelist *volatile fl;	// free list entry
	void *retptr;		// our result

	volatile int i;


	blktype = blocktype(sz);
	sz = sizes[blktype];

	spinlock_acquire(&kmalloc_spinlock);

	checksubpages();

	for (pr = sizebases[blktype]; pr != NULL; pr = pr->next_samesize) {

		/* check for corruption */
		KASSERT(PR_BLOCKTYPE(pr) == blktype);
		checksubpage(pr);

		if (pr->nfree > 0) {

		doalloc: /* comes here after getting a whole fresh page */

			KASSERT(pr->freelist_offset < PAGE_SIZE);
			prpage = PR_PAGEADDR(pr);
			fla = prpage + pr->freelist_offset;
			fl = (struct freelist *)fla;

			retptr = fl;
			fl = fl->next;
			pr->nfree--;

			if (fl != NULL) {
				KASSERT(pr->nfree > 0);
				fla = (vaddr_t)fl;
				KASSERT(fla - prpage < PAGE_SIZE);
				pr->freelist_offset = fla - prpage;
			}
			else {
				KASSERT(pr->nfree == 0);
				pr->freelist_offset = INVALID_OFFSET;
			}

			checksubpages();

			spinlock_release(&kmalloc_spinlock);
			return retptr;
		}
	}

	/*
	 * No page of the right size available.
	 * Make a new one.
	 *
	 * We release the spinlock while calling alloc_kpages. This
	 * avoids deadlock if alloc_kpages needs to come back here.
	 * Note that this means things can change behind our back...
	 */

	spinlock_release(&kmalloc_spinlock);
	prpage = alloc_kpages(1);
	if (prpage==0) {
		/* Out of memory. */
		kprintf("kmalloc: Subpage allocator couldn't get a page\n"); 
		return NULL;
	}
	spinlock_acquire(&kmalloc_spinlock);

	pr = allocpageref();
	if (pr==NULL) {
		/* Couldn't allocate accounting space for the new page. */
		spinlock_release(&kmalloc_spinlock);
		free_kpages(prpage);
		kprintf("kmalloc: Subpage allocator couldn't get pageref\n"); 
		return NULL;
	}

	pr->pageaddr_and_blocktype = MKPAB(prpage, blktype);
	pr->nfree = PAGE_SIZE / sizes[blktype];

	/*
	 * Note: fl is volatile because the MIPS toolchain we were
	 * using in spring 2001 attempted to optimize this loop and
	 * blew it. Making fl volatile inhibits the optimization.
	 */

	fla = prpage;
	fl = (struct freelist *)fla;
	fl->next = NULL;
	for (i=1; i<pr->nfree; i++) {
		fl = (struct freelist *)(fla + i*sizes[blktype]);
		fl->next = (struct freelist *)(fla + (i-1)*sizes[blktype]);
		KASSERT(fl != fl->next);
	}
	fla = (vaddr_t) fl;
	pr->freelist_offset = fla - prpage;
	KASSERT(pr->freelist_offset == (pr->nfree-1)*sizes[blktype]);

	pr->next_samesize = sizebases[blktype];
	sizebases[blktype] = pr;

	pr->next_all = allbase;
	allbase = pr;

	/* This is kind of cheesy, but avoids duplicating the alloc code. */
	goto doalloc;
}
예제 #8
0
/*
 * Free a pointer previously returned from subpage_kmalloc. If the
 * pointer is not on any heap page we recognize, return -1.
 */
static
int
subpage_kfree(void *ptr)
{
	int blktype;		// index into sizes[] that we're using
	vaddr_t ptraddr;	// same as ptr
	struct pageref *pr;	// pageref for page we're freeing in
	vaddr_t prpage;		// PR_PAGEADDR(pr)
	vaddr_t fla;		// free list entry address
	struct freelist *fl;	// free list entry
	vaddr_t offset;		// offset into page
#ifdef GUARDS
	size_t blocksize, smallerblocksize;
#endif

	ptraddr = (vaddr_t)ptr;
#ifdef GUARDS
	if (ptraddr % PAGE_SIZE == 0) {
		/*
		 * With guard bands, all client-facing subpage
		 * pointers are offset by GUARD_PTROFFSET (which is 4)
		 * from the underlying blocks and are therefore not
		 * page-aligned. So a page-aligned pointer is not one
		 * of ours. Catch this up front, as otherwise
		 * subtracting GUARD_PTROFFSET could give a pointer on
		 * a page we *do* own, and then we'll panic because
		 * it's not a valid one.
		 */
		return -1;
	}
	ptraddr -= GUARD_PTROFFSET;
#endif
#ifdef LABELS
	if (ptraddr % PAGE_SIZE == 0) {
		/* ditto */
		return -1;
	}
	ptraddr -= LABEL_PTROFFSET;
#endif

	spinlock_acquire(&kmalloc_spinlock);

	checksubpages();

	for (pr = allbase; pr; pr = pr->next_all) {
		prpage = PR_PAGEADDR(pr);
		blktype = PR_BLOCKTYPE(pr);
		KASSERT(blktype >= 0 && blktype < NSIZES);

		/* check for corruption */
		KASSERT(blktype>=0 && blktype<NSIZES);
		checksubpage(pr);

		if (ptraddr >= prpage && ptraddr < prpage + PAGE_SIZE) {
			break;
		}
	}

	if (pr==NULL) {
		/* Not on any of our pages - not a subpage allocation */
		spinlock_release(&kmalloc_spinlock);
		return -1;
	}

	offset = ptraddr - prpage;

	/* Check for proper positioning and alignment */
	if (offset >= PAGE_SIZE || offset % sizes[blktype] != 0) {
		panic("kfree: subpage free of invalid addr %p\n", ptr);
	}

#ifdef GUARDS
	blocksize = sizes[blktype];
	smallerblocksize = blktype > 0 ? sizes[blktype - 1] : 0;
	checkguardband(ptraddr, smallerblocksize, blocksize);
#endif

	/*
	 * Clear the block to 0xdeadbeef to make it easier to detect
	 * uses of dangling pointers.
	 */
	fill_deadbeef((void *)ptraddr, sizes[blktype]);

	/*
	 * We probably ought to check for free twice by seeing if the block
	 * is already on the free list. But that's expensive, so we don't.
	 */

	fla = prpage + offset;
	fl = (struct freelist *)fla;
	if (pr->freelist_offset == INVALID_OFFSET) {
		fl->next = NULL;
	} else {
		fl->next = (struct freelist *)(prpage + pr->freelist_offset);

		/* this block should not already be on the free list! */
#ifdef SLOW
		{
			struct freelist *fl2;

			for (fl2 = fl->next; fl2 != NULL; fl2 = fl2->next) {
				KASSERT(fl2 != fl);
			}
		}
#else
		/* check just the head */
		KASSERT(fl != fl->next);
#endif
	}
	pr->freelist_offset = offset;
	pr->nfree++;

	KASSERT(pr->nfree <= PAGE_SIZE / sizes[blktype]);
	if (pr->nfree == PAGE_SIZE / sizes[blktype]) {
		/* Whole page is free. */
		remove_lists(pr, blktype);
		freepageref(pr);
		/* Call free_kpages without kmalloc_spinlock. */
		spinlock_release(&kmalloc_spinlock);
		free_kpages(prpage);
	}
	else {
		spinlock_release(&kmalloc_spinlock);
	}

#ifdef SLOWER /* Don't get the lock unless checksubpages does something. */
	spinlock_acquire(&kmalloc_spinlock);
	checksubpages();
	spinlock_release(&kmalloc_spinlock);
#endif

	return 0;
}