Exemplo n.º 1
0
int __kfree( void *__ptr )
{
	int dma;
	unsigned long flags;
	unsigned int order;
	struct page_descriptor *page, **pg;
	struct size_descriptor *bucket;

	if ( !__ptr )
		goto null_kfree;
#define ptr ((struct block_header *) __ptr)
	page = PAGE_DESC( ptr );
	__ptr = ptr - 1;
	if ( ~PAGE_MASK & ( unsigned long )page->next )
		goto bad_order;
	order = page->order;
	if ( order >= sizeof( sizes ) / sizeof( sizes[0] ) )
		goto bad_order;
	bucket = sizes + order;
	dma = 0;
	pg = &bucket->firstfree;

	if ( ptr->bh_flags == MF_DMA )
	{
		dma = 1;
		ptr->bh_flags = MF_USED;
		pg = &bucket->dmafree;
	}

	if ( ptr->bh_flags != MF_USED )
	{
		goto bad_order;
	}
	flags = spinlock_disable( &g_sMemSpinLock );	
	ptr->bh_flags = MF_FREE;	/* As of now this block is officially free */

	atomic_sub( &g_sSysBase.ex_nKernelMemSize, ptr->bh_length );

	bucket->nfrees++;
	bucket->nbytesmalloced -= ptr->bh_length;

	ptr->bh_next = page->firstfree;
	page->firstfree = ptr;
	if ( !page->nfree++ )
	{
		// Page went from full to one free block: put it on the freelist.
		if ( bucket->nblocks == 1 )
			goto free_page;
		page->next = *pg;
		*pg = page;
	}
	// If page is completely free, free it
	if ( page->nfree == bucket->nblocks )
	{
		for ( ;; )
		{
			struct page_descriptor *tmp = *pg;

			if ( !tmp )
			{
				goto not_on_freelist;
			}
			if ( tmp == page )
			{
				break;
			}
			pg = &tmp->next;
		}
		*pg = page->next;
	      free_page:
		bucket->npages--;
		free_kmalloc_pages( page, bucket->gfporder, dma );
	}

	spinunlock_enable( &g_sMemSpinLock, flags );
	
      null_kfree:
	return ( 0 );

      bad_order:
	printk( "kfree of non-kmalloced memory: %p, next= %p, order=%d\n", ptr + 1, page->next, page->order );
	return ( -EINVAL );

      not_on_freelist:
	printk( "Ooops. page %p doesn't show on freelist.\n", page );
	spinunlock_enable( &g_sMemSpinLock, flags );
	return ( -EINVAL );
}
Exemplo n.º 2
0
void kfree(void *__ptr)
{
	int dma;
	unsigned long flags;
	unsigned int order;
	struct page_descriptor *page, **pg;
	struct size_descriptor *bucket;

	if (!__ptr)
		goto null_kfree;
#define ptr ((struct block_header *) __ptr)
	page = PAGE_DESC(ptr);
	__ptr = ptr - 1;
	if (~PAGE_MASK & (unsigned long)page->next)
		goto bad_order;
	order = (page->order & ~PAGE_MASK);
	if (order >= sizeof(sizes) / sizeof(sizes[0]))
		goto bad_order;
	bucket = sizes + order;
	dma = 0;
	pg = &bucket->firstfree;
	if (ptr->bh_flags == MF_DMA) {
		dma = 1;
		ptr->bh_flags = MF_USED;
		pg = &bucket->dmafree;
	}
	if (ptr->bh_flags != MF_USED)
		goto bad_order;
	ptr->bh_flags = MF_FREE;	/* As of now this block is officially free */
#ifdef SADISTIC_KMALLOC
	memset(ptr+1, 0xe0, ptr->bh_length);
#endif
	save_flags(flags);
	cli();

	bucket->nfrees++;
	bucket->nbytesmalloced -= ptr->bh_length;

	ptr->bh_next = page->firstfree;
	page->firstfree = ptr;
	if (!page->nfree++) {
/* Page went from full to one free block: put it on the freelist. */
		if (bucket->nblocks == 1)
			goto free_page;
		page->next = *pg;
		*pg = page;
	}
/* If page is completely free, free it */
	if (page->nfree == bucket->nblocks) {
		for (;;) {
			struct page_descriptor *tmp = *pg;
			if (!tmp)
				goto not_on_freelist;
			if (tmp == page)
				break;
			pg = &tmp->next;
		}
		*pg = page->next;
free_page:
		bucket->npages--;
		free_kmalloc_pages(page, dma);
	}
	restore_flags(flags);
null_kfree:
	return;

bad_order:
	printk("kfree of non-kmalloced memory: %p, next= %p, order=%d\n",
	       ptr+1, page->next, page->order);
	return;

not_on_freelist:
	restore_flags(flags);
	printk("Ooops. page %p doesn't show on freelist.\n", page);
}