Example #1
0
/*
 * Ugh, this is ugly, but we want the default case to run
 * straight through, which is why we have the ugly goto's
 */
void *kmalloc( size_t size, int priority )
{
	unsigned long flags;
	unsigned long type;
	int order, dma;
	struct block_header *p;
	struct page_descriptor *page, **pg;
	struct size_descriptor *bucket = sizes;

	if ( CURRENT_THREAD != NULL && CURRENT_THREAD->tr_nNumLockedCacheBlocks > 0 && ( priority & MEMF_NOBLOCK ) == 0 )
	{
		//printk( "Error: kmalloc() attempt to alloc memory while holding %d cache blocks locked. Could may lead to deadlock\n", CURRENT_THREAD->tr_nNumLockedCacheBlocks );
		//trace_stack( 0, NULL );
	}
	/* Get order */
	order = 0;
	{
		unsigned int realsize = size + sizeof( struct block_header );

		// kmalloc() is inefficient for allocations >= 128K
		//if ( realsize > BLOCKSIZE( 12 ) )
		//{
		//	printk( "Warning: kmalloc() of oversized block (%d bytes). Could cause fragmentation problems\n", size );
		//	trace_stack( 0, NULL );
		//}

		for ( ;; )
		{
			int ordersize = BLOCKSIZE( order );

			if ( realsize <= ordersize )
				break;
			order++;
			bucket++;
			if ( ordersize )
				continue;
			printk( "kmalloc of too large a block (%d bytes).\n", ( int )size );
			return NULL;
		}
	}

	dma = 0;
	type = MF_USED;
	pg = &bucket->firstfree;

#ifndef __ATHEOS__
	if ( priority & GFP_DMA )
	{
		dma = 1;
		type = MF_DMA;
		pg = &bucket->dmafree;
	}
#endif

/* Sanity check... */

	flags = spinlock_disable( &g_sMemSpinLock );
	page = *pg;
	if ( !page )
		goto no_bucket_page;

	p = page->firstfree;

	if ( p->bh_flags != MF_FREE )
		goto not_free_on_freelist;

      found_it:
	page->firstfree = p->bh_next;

	page->nfree--;
	if ( !page->nfree )
		*pg = page->next;

	spinunlock_enable( &g_sMemSpinLock, flags );
	bucket->nmallocs++;
	bucket->nbytesmalloced += size;
	p->bh_flags = type;	/* As of now this block is officially in use */
	p->bh_length = size;

	memset( p +1, 0, size );

	atomic_add( &g_sSysBase.ex_nKernelMemSize, size );
	return ( p +1 );	/* Pointer arithmetic: increments past header */


      no_bucket_page:
	/*
	 * If we didn't find a page already allocated for this
	 * bucket size, we need to get one..
	 *
	 * This can be done with ints on: it is private to this invocation
	 */
	spinunlock_enable( &g_sMemSpinLock, flags );

	{
		int i, sz;

		/* sz is the size of the blocks we're dealing with */
		sz = BLOCKSIZE( order );

		page = get_kmalloc_pages( priority, bucket->gfporder, dma );
		if ( !page )
			goto no_free_page;
	      found_cached_page:
		bucket->npages++;

		page->order = order;
		/* Loop for all but last block: */
		i = ( page->nfree = bucket->nblocks ) - 1;
		p = BH( page + 1 );

		while ( i > 0 )
		{
			i--;
			p->bh_flags = MF_FREE;
			p->bh_next = BH( ( ( long )p )+sz );
			p = p->bh_next;
		}
		/* Last block: */
		p->bh_flags = MF_FREE;
		p->bh_next = NULL;

		p = BH( page + 1 );
	}

	/*
	 * Now we're going to muck with the "global" freelist
	 * for this size: this should be uninterruptible
	 */
	flags = spinlock_disable( &g_sMemSpinLock );
	page->next = *pg;
	*pg = page;
	goto found_it;


      no_free_page:
	/*
	 * No free pages, check the kmalloc cache of
	 * pages to see if maybe we have something available
	 */
	if ( !dma && order < MAX_CACHE_ORDER )
	{
		page = ( struct page_descriptor * )atomic_swap( ( int * )( kmalloc_cache + order ), ( int )page );
		if ( page )
		{
			goto found_cached_page;
		}
	}
	return NULL;


      not_free_on_freelist:
	spinunlock_enable( &g_sMemSpinLock, flags );
	printk( "Problem: block on freelist at %08lx isn't free.\n", ( long )p );
	printk( "%p\n%p\n%p\n", __builtin_return_address( 0 ), __builtin_return_address( 1 ), __builtin_return_address( 2 ) );
	return NULL;
}
Example #2
0
/*
 * Ugh, this is ugly, but we want the default case to run
 * straight through, which is why we have the ugly goto's
 */
void *kmalloc(size_t size, int priority)
{
	unsigned long flags;
	unsigned long type;
	int order, dma;
	struct block_header *p;
	struct page_descriptor *page, **pg;
	struct size_descriptor *bucket = sizes;

	/* Get order */
	order = 0;
	{
		unsigned int realsize = size + sizeof(struct block_header);
		for (;;) {
			int ordersize = BLOCKSIZE(order);
			if (realsize <= ordersize)
				break;
			order++;
			bucket++;
			if (ordersize)
				continue;
			printk("kmalloc of too large a block (%d bytes).\n", (int) size);
			return NULL;
		}
	}

	dma = 0;
	type = MF_USED;
	pg = &bucket->firstfree;
	if (priority & GFP_DMA) {
		dma = 1;
		type = MF_DMA;
		pg = &bucket->dmafree;
	}

	priority &= GFP_LEVEL_MASK;

/* Sanity check... */
	if (intr_count && priority != GFP_ATOMIC) {
		static int count = 0;
		if (++count < 5) {
			printk("kmalloc called nonatomically from interrupt %p\n",
			       __builtin_return_address(0));
			priority = GFP_ATOMIC;
		}
	}

	save_flags(flags);
	cli();
	page = *pg;
	if (!page)
		goto no_bucket_page;

	p = page->firstfree;
	if (p->bh_flags != MF_FREE)
		goto not_free_on_freelist;

found_it:
	page->firstfree = p->bh_next;
	page->nfree--;
	if (!page->nfree)
		*pg = page->next;
	restore_flags(flags);
	bucket->nmallocs++;
	bucket->nbytesmalloced += size;
	p->bh_flags = type;	/* As of now this block is officially in use */
	p->bh_length = size;
#ifdef SADISTIC_KMALLOC
	memset(p+1, 0xf0, size);
#endif
	return p + 1;		/* Pointer arithmetic: increments past header */


no_bucket_page:
	/*
	 * If we didn't find a page already allocated for this
	 * bucket size, we need to get one..
	 *
	 * This can be done with ints on: it is private to this invocation
	 */
	restore_flags(flags);

	{
		int i, realpages;
		
		if (BLOCKSIZE(order) < PAGE_SIZE)
			realpages = 1;
		else
			realpages = NUM_PAGES(size + sizeof(struct block_header) +
					sizeof(struct page_descriptor));
			
		page = get_kmalloc_pages(priority, realpages, dma);
		if (!page)
			goto no_free_page;
found_cached_page:

		bucket->npages++;

		page->order = order | (realpages * PAGE_SIZE);
		/* Loop for all but last block: */
		i = (page->nfree = bucket->nblocks) - 1;
		p = BH(page + 1);
		while (i > 0) { /* doesn't happen except for small ^2 mallocs */
			i--;
			p->bh_flags = MF_FREE;
			p->bh_next = BH(((long) p) + BLOCKSIZE(order));
			p = p->bh_next;
		}
		/* Last block: */
		p->bh_flags = MF_FREE;
		p->bh_next = NULL;

		p = BH(page+1);
	}

	/*
	 * Now we're going to muck with the "global" freelist
	 * for this size: this should be uninterruptible
	 */
	cli();
	page->next = *pg;
	*pg = page;
	goto found_it;


no_free_page:
	/*
	 * No free pages, check the kmalloc cache of
	 * pages to see if maybe we have something available
	 */
	if (!dma && order < MAX_CACHE_ORDER) {
		page = xchg(kmalloc_cache+order, page);
		if (page)
			goto found_cached_page;
	}
	{
		static unsigned long last = 0;
		if (priority != GFP_BUFFER && priority != GFP_IO &&
		    (last + 10 * HZ < jiffies)) {
			last = jiffies;
			printk("Couldn't get a free page.....\n");
		}
		return NULL;
	}

not_free_on_freelist:
	restore_flags(flags);
	printk("Problem: block on freelist at %08lx isn't free.\n", (long) p);
	return NULL;
}