示例#1
0
文件: slob.c 项目: 7799/linux
void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
{
	void *b;

	flags &= gfp_allowed_mask;

	lockdep_trace_alloc(flags);

	if (c->size < PAGE_SIZE) {
		b = slob_alloc(c->size, flags, c->align, node);
		trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
					    SLOB_UNITS(c->size) * SLOB_UNIT,
					    flags, node);
	} else {
		b = slob_new_pages(flags, get_order(c->size), node);
		trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
					    PAGE_SIZE << get_order(c->size),
					    flags, node);
	}

	if (b && c->ctor)
		c->ctor(b);

	kmemleak_alloc_recursive(b, c->size, 1, c->flags, flags);
	return b;
}
示例#2
0
/*synarthsh opoy elegxoyme ean se mia page yparxei kapoio block poy na xoraei to request mas*/
int check_block(struct slob_page *sp, size_t size, int align){
	slob_t *prev, *cur, *aligned = NULL;
	int delta = 0, units = SLOB_UNITS(size);	
	slobidx_t avail ;
	
	/*diatrexoyme thn lista mexri na broyme kapoio block opoy xoraei to request*/
	for (prev = NULL, cur = sp->free; ; prev = cur, cur = slob_next(cur)) {	
		
		avail = slob_units(cur);
		
		if (align) {
			aligned = (slob_t *)ALIGN((unsigned long)cur, align);
			delta = aligned - cur;
		}
		if (avail >= units + delta) { /* room enough? */
			return(1);
		}
		
		/*an einai to teleytaio sth lista bgainei apo thn for */
		if(slob_last(cur)){
			break;
		}
	}
	return(0);
}
示例#3
0
文件: slob.c 项目: fusion2004/cop4610
/*
 * slob_alloc: entry point into the slob allocator.
 *
 * We modified the slob_alloc function. We defined new local variables: 
 *   1.) temp_amt_free will accumulate all the free bytes on each page 
 * (used for the system call sys_get_slob_amt_free); 
 *   2.) best_sp points to the page with the "best fit"; 
 *   3.) best_fit/ current_fit contain a number in which the smaller 
 * the number, the better the fit; best_fit is the overall best number 
 * and current_fit is the current page's number
 * 
 * When iterating through the free page list, our slob_alloc will: 
 *   1.) collect the free units of the page and store them into 
 * temp_amt_free; 
 *   2.) call a helper function slob_page_best_fit_check which 
 * returns a number put into current_fit.
 *   3.) This current_fit number is checked against a number of 
 * cases. First case is if current_fit is equal to 0. This means,
 * a perfect fit, so we break out of the loop and allocate there. 
 * Second case is -1, meaning there is no block fit so we continue 
 * the loop. Last case is some positive number so we check to see if 
 * this number is less than the best_fit. Or we check to see if 
 * best_fit has been set yet. If this case is reached, then the new 
 * best page is the current page in the loop.
 * 
 * Once the loop is over, we try to allocate on that page if best_fit is 
 * some positive number. Otherwise, we don't have enough space and must 
 * allocate a new page. This part of the slob_alloc function was mostly 
 * unchanged except for updating the values of the array lists for the 
 * system calls. When we have to allocate a new page, is when we set the 
 * amt_claimed, at position of the counter, to the size of the request (in 
 * bytes). We  also set the amt_free, at the position of the counter, to 
 * the total accumulated units from the list, converting it to bytes. Then 
 * we increment the counter.
 *
 */
static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
{
	struct slob_page *sp;
	struct list_head *prev;
	struct list_head *slob_list;
	slob_t *b = NULL;
	unsigned long flags;

	// Lab 3 - Statistics
        long temp_amt_free = 0;

	struct slob_page *best_sp = NULL;
	int best_fit = -1;

	if (size < SLOB_BREAK1)
		slob_list = &free_slob_small;
	else if (size < SLOB_BREAK2)
		slob_list = &free_slob_medium;
	else
		slob_list = &free_slob_large;

	spin_lock_irqsave(&slob_lock, flags);
	/* Iterate through each partially free page, try to find room */
	list_for_each_entry(sp, slob_list, list) {
		int current_fit = -1;

		// Lab 3 - Statistics
		temp_amt_free = temp_amt_free + sp->units;

#ifdef CONFIG_NUMA
		/*
		 * If there's a node specification, search for a partial
		 * page with a matching node id in the freelist.
		 */
		if (node != -1 && page_to_nid(&sp->page) != node)
			continue;
#endif
		/* Enough room on this page? */
		if (sp->units < SLOB_UNITS(size))
			continue;

#ifdef SLOB_BEST_FIT_ALG
		current_fit = slob_page_best_fit_check(sp, size, align);
		if(current_fit == 0) {
			best_sp = sp;
			best_fit = current_fit;
			break;
		}
		else if(current_fit > 0 && (best_fit == -1 || current_fit < best_fit) ) {
			best_sp = sp;
			best_fit = current_fit;
		}
		continue;
	}
示例#4
0
文件: slob.c 项目: 7799/linux
/*
 * Allocate a slob block within a given slob_page sp.
 */
static void *slob_page_alloc(struct page *sp, size_t size, int align)
{
	slob_t *prev, *cur, *aligned = NULL;
	int delta = 0, units = SLOB_UNITS(size);

	for (prev = NULL, cur = sp->freelist; ; prev = cur, cur = slob_next(cur)) {
		slobidx_t avail = slob_units(cur);

		if (align) {
			aligned = (slob_t *)ALIGN((unsigned long)cur, align);
			delta = aligned - cur;
		}
		if (avail >= units + delta) { /* room enough? */
			slob_t *next;

			if (delta) { /* need to fragment head to align? */
				next = slob_next(cur);
				set_slob(aligned, avail - delta, next);
				set_slob(cur, delta, aligned);
				prev = cur;
				cur = aligned;
				avail = slob_units(cur);
			}

			next = slob_next(cur);
			if (avail == units) { /* exact fit? unlink. */
				if (prev)
					set_slob(prev, slob_units(prev), next);
				else
					sp->freelist = next;
			} else { /* fragment */
				if (prev)
					set_slob(prev, slob_units(prev), cur + units);
				else
					sp->freelist = cur + units;
				set_slob(cur + units, avail - units, next);
			}

			sp->units -= units;
			if (!sp->units)
				clear_slob_page_free(sp);
			return cur;
		}
		if (slob_last(cur))
			return NULL;
	}
}
示例#5
0
文件: slob.c 项目: 7799/linux
/* can't use ksize for kmem_cache_alloc memory, only kmalloc */
size_t ksize(const void *block)
{
	struct page *sp;
	int align;
	unsigned int *m;

	BUG_ON(!block);
	if (unlikely(block == ZERO_SIZE_PTR))
		return 0;

	sp = virt_to_page(block);
	if (unlikely(!PageSlab(sp)))
		return PAGE_SIZE << compound_order(sp);

	align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
	m = (unsigned int *)(block - align);
	return SLOB_UNITS(*m) * SLOB_UNIT;
}
示例#6
0
asmlinkage long sys_slob_claimed(void)
{
	long slob_total = SLOB_UNITS(PAGE_SIZE) * slob_page_count;
	return slob_total;
}
示例#7
0
/*
 * slob_alloc: entry point into the slob allocator.
 */
static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
{
	struct slob_page *sp = NULL;
	struct slob_page *sp_t;
	struct list_head *slob_list;
	slob_t *b = NULL;
	unsigned long flags;

	if (size < SLOB_BREAK1)
		slob_list = &free_slob_small;
	else if (size < SLOB_BREAK2)
		slob_list = &free_slob_medium;
	else
		slob_list = &free_slob_large;

	spin_lock_irqsave(&slob_lock, flags);
	/* Iterate through each partially free page, try to find room */
	list_for_each_entry(sp_t, slob_list, list) {
#ifdef CONFIG_NUMA
		/*
		 * If there's a node specification, search for a partial
		 * page with a matching node id in the freelist.
		 */
		if (node != -1 && page_to_nid(&sp_t->page) != node)
			continue;
#endif
		/* Enough room on this page? */
		if (sp_t->units < SLOB_UNITS(size))
			/* Not enough room */
			continue;

		if (sp == NULL)
			sp = sp_t;

		if (sp_t->units < sp->units)
			/* Get the smallest slob_page that
 			 * is large enough for our needs */
			sp = sp_t;
	}

	/* Attempt to alloc */
	if(sp != NULL) {
		b = slob_page_alloc(sp, size, align);
	}
	spin_unlock_irqrestore(&slob_lock, flags);

	/* Not enough space: must allocate a new page */
	if (!b) {
		b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
		if (!b)
			return NULL;
		sp = slob_page(b);
		set_slob_page(sp);
		
		/* We allocatted a new page, increment the count */
		slob_page_count++;


		spin_lock_irqsave(&slob_lock, flags);
		sp->units = SLOB_UNITS(PAGE_SIZE);
		sp->free = b;
		INIT_LIST_HEAD(&sp->list);
		set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
		set_slob_page_free(sp, slob_list);
		b = slob_page_alloc(sp, size, align);
		BUG_ON(!b);
		spin_unlock_irqrestore(&slob_lock, flags);
	}
	if (unlikely((gfp & __GFP_ZERO) && b))
		memset(b, 0, size);
	return b;
}
示例#8
0
/*
 * slob_alloc: entry point into the slob allocator.
 */
static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
{
	
	struct slob_page *sp;
	struct list_head *slob_list;
	slob_t *b = NULL;
	unsigned long flags;
	int i;
	unsigned long int count_free;
	
#ifdef BESTFIT_PAGE
	slobidx_t min,prev_min;
	struct slob_page *curr;
	int flag_if;
	int check;
#else
	struct list_head *prev;
#endif
	/*arxikopoioyme ta total_alloc kai ta total_free*/
	if(flag_mem==0){
		total_alloc = 0;
		total_free = 0;
		flag_mem = 1;
	}

	if (size < SLOB_BREAK1)
		slob_list = &free_slob_small;
	else if (size < SLOB_BREAK2)
		slob_list = &free_slob_medium;
	else
		slob_list = &free_slob_large;

	spin_lock_irqsave(&slob_lock, flags);
	/* Iterate through each partially free page, try to find room */
	
	
	counter_print++;
#ifdef BESTFIT_PAGE
	
	flag_if = 0;
	curr = NULL;
	min = 0;
	prev_min = SLOB_UNITS(size);
	
	/*diatexoume th lista mexri na broyme thn kalyterh selida*/
	list_for_each_entry(sp, slob_list, list) {
#ifdef CONFIG_NUMA
	/*
	  * If there's a node specification, search for a partial
	  * page with a matching node id in the freelist.
	  */
		if (node != -1 && page_to_nid(&sp->page) != node)
			continue;
	
#endif
		/* Enough room on this page? */
		if (sp->units < prev_min){
			continue;
		}
		if(flag_if==0){
			check = check_block(sp,size,align);
			if(check){
				min = sp->units;
				curr = sp;
				flag_if = 1;
			}
		}
		else{
			if(sp->units <= min){
				check = check_block(sp,size,align);
				if(check){
					min = sp->units;
					curr = sp;
				}
			}
		}
	}
		
	//kaloyme thn slob_page_alloc
	if(curr!=NULL){
		b = slob_page_alloc(curr, size, align);
	}
	else{
		b = NULL;
	}


#else

	list_for_each_entry(sp, slob_list, list) {
#ifdef CONFIG_NUMA
		/*
		 * If there's a node specification, search for a partial
		 * page with a matching node id in the freelist.
		 */
		if (node != -1 && page_to_nid(&sp->page) != node)
			continue;
		
#endif
		
		/* Enough room on this page? */
		if (sp->units < SLOB_UNITS(size))
			continue;

		/* Attempt to alloc */
		prev = sp->list.prev;
		b = slob_page_alloc(sp, size, align);
		if (!b)
			continue;

		/* Improve fragment distribution and reduce our average
		 * search time by starting our next search here. (see
		 * Knuth vol 1, sec 2.5, pg 449) */
		if (prev != slob_list->prev &&
				slob_list->next != prev->next)
			list_move_tail(slob_list, prev->next);
		break;
	}
#endif
	spin_unlock_irqrestore(&slob_lock, flags);

	/* Not enough space: must allocate a new page */
	if (!b) {
		b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
		if (!b)
			return NULL;
		sp = slob_page(b);
		set_slob_page(sp);

		spin_lock_irqsave(&slob_lock, flags);
		sp->units = SLOB_UNITS(PAGE_SIZE);
		sp->free = b;
		INIT_LIST_HEAD(&sp->list);
		set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
		set_slob_page_free(sp, slob_list);
		b = slob_page_alloc(sp, size, align);
		BUG_ON(!b);
		spin_unlock_irqrestore(&slob_lock, flags);
	}
	if (unlikely((gfp & __GFP_ZERO) && b))
		memset(b, 0, size);
	
	/*diatrexoyme kai tis treis listes megethon gia na metrhsoyme ta synolika free olwn twn selidwn*/
	count_free = 0;
	for(i=0;i<3;i++){
		if (i==0){
			slob_list = &free_slob_small;
		}
		else if (i==1){
			slob_list = &free_slob_medium;
		}
		else{
			slob_list = &free_slob_large; 
		}
		list_for_each_entry(sp, slob_list, list) {
			count_free = count_free + sp->units;
		}
	}
示例#9
0
/*
 * Allocate a slob block within a given slob_page sp.
 */
static void *slob_page_alloc(struct slob_page *sp, size_t size, int align)
{
	slob_t *prev, *cur, *aligned = NULL;
	int delta = 0, units = SLOB_UNITS(size);
#ifdef BESTFIT_BLOCK
	slob_t *min_cur = NULL,*min_aligned = NULL,*min_prev;
	slobidx_t min_avail = 0;
	int flag_if = 0 ,min_delta = 0;
	
	slobidx_t avail ;
#endif
	
#ifdef BESTFIT_BLOCK
	
	if((counter_print%print_iteration) == 0){
		printk("\nslob_Request: %d\n",units);
		printk("slob_alloc: Candidate blocks size:");
	}
	
	/*diatrexoyme thn lista mexri na broyme to best block*/
	for (prev = NULL, cur = sp->free; ; prev = cur, cur = slob_next(cur)) {	
		
		avail = slob_units(cur);
		
		if((counter_print%print_iteration) == 0){
			printk(" %d",avail);
		}
		if (align) {
			aligned = (slob_t *)ALIGN((unsigned long)cur, align);
			delta = aligned - cur;
		}
		if (avail >= units + delta) { /* room enough? */
			/*arxikopoioyme ta min*/
			  if(!flag_if){
				min_avail = slob_units(cur);
				min_cur = cur;
				min_aligned = aligned;
				min_delta = delta;
				min_prev = prev;
				flag_if = 1;
			}
			else {
				/*se periptosh poy brethei kalytero block tote allazoyme tis thmes twn min*/
				if(min_avail > avail){
					min_avail = avail;
					min_cur = cur;
					min_aligned = aligned;
					min_delta = delta;
					min_prev = prev;
				}
			}
		}
		
		/*an einai to teleytaio sth lista bgainei apo thn for */
		if(slob_last(cur)){
			break;
		}
	}
	
	/*an exei brethei block poy na xoraei to request tote to epilegoume*/
	if(min_avail!=0){
		slob_t *next;
		cur = min_cur;
		avail = min_avail;
		delta = min_delta;
		aligned = min_aligned;
		prev = min_prev;
		
		if (delta) { /* need to fragment head to align? */
			next = slob_next(cur);
			set_slob(aligned, avail - delta, next);
			set_slob(cur, delta, aligned);
			prev = cur;
			cur = aligned;
			avail = slob_units(cur);
		}
		
		next = slob_next(cur);
		if (avail == units) { /* exact fit? unlink. */
			if (prev)
				set_slob(prev, slob_units(prev), next);
			else
				sp->free = next;
		} 
		else { /* fragment */
			if (prev)
				set_slob(prev, slob_units(prev), cur + units);
			else
				sp->free = cur + units;
			set_slob(cur + units, avail - units, next);
		}

		sp->units -= units;
		if (!sp->units)
			clear_slob_page_free(sp);
		if((counter_print%print_iteration) == 0){
			printk("\nslob_alloc: Best Fit: %d\n",min_avail);
		}
		return cur;
	
		
	}
	else{
		if((counter_print%print_iteration) == 0){
			printk("\nslob_alloc: Best Fit: None\n");
		}
		return NULL;
	}
	
	
#else
	if((counter_print%print_iteration) == 0){
		printk("\nslob_Request: %d\n",units);
		printk("slob_alloc: Candidate blocks size:");
	}
	for (prev = NULL, cur = sp->free; ; prev = cur, cur = slob_next(cur)) {	
		slobidx_t avail = slob_units(cur);
		if((counter_print%print_iteration) == 0){
			printk(" %d",avail);
		}
		if (align) {
			aligned = (slob_t *)ALIGN((unsigned long)cur, align);
			delta = aligned - cur;
		}
		if (avail >= units + delta) { /* room enough? */
			slob_t *next;
			
			if (delta) { /* need to fragment head to align? */
				next = slob_next(cur);
				set_slob(aligned, avail - delta, next);
				set_slob(cur, delta, aligned);
				prev = cur;
				cur = aligned;
				avail = slob_units(cur);
			}

			next = slob_next(cur);
			if (avail == units) { /* exact fit? unlink. */
				if (prev)
					set_slob(prev, slob_units(prev), next);
				else
					sp->free = next;
			} 
			else { /* fragment */
				if (prev)
					set_slob(prev, slob_units(prev), cur + units);
				else
					sp->free = cur + units;
				set_slob(cur + units, avail - units, next);
			}

			sp->units -= units;
			if (!sp->units)
				clear_slob_page_free(sp);
			if((counter_print%print_iteration) == 0){
				printk("\nslob_alloc: First Fit is the last available\n");
			}
			return cur;
		}
		if (slob_last(cur)){
			if((counter_print%print_iteration) == 0){
				printk("\nslob_alloc: First Fit: None\n");
			}
			return NULL;
		}
	}
#endif
}
示例#10
0
文件: slob.c 项目: 7799/linux
/*
 * slob_free: entry point into the slob allocator.
 */
static void slob_free(void *block, int size)
{
	struct page *sp;
	slob_t *prev, *next, *b = (slob_t *)block;
	slobidx_t units;
	unsigned long flags;
	struct list_head *slob_list;

	if (unlikely(ZERO_OR_NULL_PTR(block)))
		return;
	BUG_ON(!size);

	sp = virt_to_page(block);
	units = SLOB_UNITS(size);

	spin_lock_irqsave(&slob_lock, flags);

	if (sp->units + units == SLOB_UNITS(PAGE_SIZE)) {
		/* Go directly to page allocator. Do not pass slob allocator */
		if (slob_page_free(sp))
			clear_slob_page_free(sp);
		spin_unlock_irqrestore(&slob_lock, flags);
		__ClearPageSlab(sp);
		page_mapcount_reset(sp);
		slob_free_pages(b, 0);
		return;
	}

	if (!slob_page_free(sp)) {
		/* This slob page is about to become partially free. Easy! */
		sp->units = units;
		sp->freelist = b;
		set_slob(b, units,
			(void *)((unsigned long)(b +
					SLOB_UNITS(PAGE_SIZE)) & PAGE_MASK));
		if (size < SLOB_BREAK1)
			slob_list = &free_slob_small;
		else if (size < SLOB_BREAK2)
			slob_list = &free_slob_medium;
		else
			slob_list = &free_slob_large;
		set_slob_page_free(sp, slob_list);
		goto out;
	}

	/*
	 * Otherwise the page is already partially free, so find reinsertion
	 * point.
	 */
	sp->units += units;

	if (b < (slob_t *)sp->freelist) {
		if (b + units == sp->freelist) {
			units += slob_units(sp->freelist);
			sp->freelist = slob_next(sp->freelist);
		}
		set_slob(b, units, sp->freelist);
		sp->freelist = b;
	} else {
		prev = sp->freelist;
		next = slob_next(prev);
		while (b > next) {
			prev = next;
			next = slob_next(prev);
		}

		if (!slob_last(prev) && b + units == next) {
			units += slob_units(next);
			set_slob(b, units, slob_next(next));
		} else
			set_slob(b, units, next);

		if (prev + slob_units(prev) == b) {
			units = slob_units(b) + slob_units(prev);
			set_slob(prev, units, slob_next(b));
		} else
			set_slob(prev, slob_units(prev), b);
	}
out:
	spin_unlock_irqrestore(&slob_lock, flags);
}
示例#11
0
文件: slob.c 项目: 7799/linux
/*
 * slob_alloc: entry point into the slob allocator.
 */
static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
{
	struct page *sp;
	struct list_head *prev;
	struct list_head *slob_list;
	slob_t *b = NULL;
	unsigned long flags;

	if (size < SLOB_BREAK1)
		slob_list = &free_slob_small;
	else if (size < SLOB_BREAK2)
		slob_list = &free_slob_medium;
	else
		slob_list = &free_slob_large;

	spin_lock_irqsave(&slob_lock, flags);
	/* Iterate through each partially free page, try to find room */
	list_for_each_entry(sp, slob_list, lru) {
#ifdef CONFIG_NUMA
		/*
		 * If there's a node specification, search for a partial
		 * page with a matching node id in the freelist.
		 */
		if (node != NUMA_NO_NODE && page_to_nid(sp) != node)
			continue;
#endif
		/* Enough room on this page? */
		if (sp->units < SLOB_UNITS(size))
			continue;

		/* Attempt to alloc */
		prev = sp->lru.prev;
		b = slob_page_alloc(sp, size, align);
		if (!b)
			continue;

		/* Improve fragment distribution and reduce our average
		 * search time by starting our next search here. (see
		 * Knuth vol 1, sec 2.5, pg 449) */
		if (prev != slob_list->prev &&
				slob_list->next != prev->next)
			list_move_tail(slob_list, prev->next);
		break;
	}
	spin_unlock_irqrestore(&slob_lock, flags);

	/* Not enough space: must allocate a new page */
	if (!b) {
		b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
		if (!b)
			return NULL;
		sp = virt_to_page(b);
		__SetPageSlab(sp);

		spin_lock_irqsave(&slob_lock, flags);
		sp->units = SLOB_UNITS(PAGE_SIZE);
		sp->freelist = b;
		INIT_LIST_HEAD(&sp->lru);
		set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
		set_slob_page_free(sp, slob_list);
		b = slob_page_alloc(sp, size, align);
		BUG_ON(!b);
		spin_unlock_irqrestore(&slob_lock, flags);
	}
	if (unlikely((gfp & __GFP_ZERO) && b))
		memset(b, 0, size);
	return b;
}
示例#12
0
static void slob_free(void *block, int size)
{
	struct page *sp;
	slob_t *prev, *next, *b = (slob_t *)block;
	slobidx_t units;
	unsigned long flags;
	struct list_head *slob_list;

	if (unlikely(ZERO_OR_NULL_PTR(block)))
		return;
	BUG_ON(!size);

	sp = slob_page(block);
	units = SLOB_UNITS(size);

	spin_lock_irqsave(&slob_lock, flags);

	if (sp->units + units == SLOB_UNITS(PAGE_SIZE)) {
		
		if (slob_page_free(sp))
			clear_slob_page_free(sp);
		spin_unlock_irqrestore(&slob_lock, flags);
		clear_slob_page(sp);
		free_slob_page(sp);
		slob_free_pages(b, 0);
		return;
	}

	if (!slob_page_free(sp)) {
		
		sp->units = units;
		sp->freelist = b;
		set_slob(b, units,
			(void *)((unsigned long)(b +
					SLOB_UNITS(PAGE_SIZE)) & PAGE_MASK));
		if (size < SLOB_BREAK1)
			slob_list = &free_slob_small;
		else if (size < SLOB_BREAK2)
			slob_list = &free_slob_medium;
		else
			slob_list = &free_slob_large;
		set_slob_page_free(sp, slob_list);
		goto out;
	}

	sp->units += units;

	if (b < (slob_t *)sp->freelist) {
		if (b + units == sp->freelist) {
			units += slob_units(sp->freelist);
			sp->freelist = slob_next(sp->freelist);
		}
		set_slob(b, units, sp->freelist);
		sp->freelist = b;
	} else {
		prev = sp->freelist;
		next = slob_next(prev);
		while (b > next) {
			prev = next;
			next = slob_next(prev);
		}

		if (!slob_last(prev) && b + units == next) {
			units += slob_units(next);
			set_slob(b, units, slob_next(next));
		} else
			set_slob(b, units, next);

		if (prev + slob_units(prev) == b) {
			units = slob_units(b) + slob_units(prev);
			set_slob(prev, units, slob_next(b));
		} else
			set_slob(prev, slob_units(prev), b);
	}
out:
	spin_unlock_irqrestore(&slob_lock, flags);
}
示例#13
0
static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
{
	struct page *sp;
	struct list_head *prev;
	struct list_head *slob_list;
	slob_t *b = NULL;
	unsigned long flags;

	if (size < SLOB_BREAK1)
		slob_list = &free_slob_small;
	else if (size < SLOB_BREAK2)
		slob_list = &free_slob_medium;
	else
		slob_list = &free_slob_large;

	spin_lock_irqsave(&slob_lock, flags);
	
	list_for_each_entry(sp, slob_list, list) {
#ifdef CONFIG_NUMA
		/*
		 * If there's a node specification, search for a partial
		 * page with a matching node id in the freelist.
		 */
		if (node != NUMA_NO_NODE && page_to_nid(sp) != node)
			continue;
#endif
		
		if (sp->units < SLOB_UNITS(size))
			continue;

		
		prev = sp->list.prev;
		b = slob_page_alloc(sp, size, align);
		if (!b)
			continue;

		if (prev != slob_list->prev &&
				slob_list->next != prev->next)
			list_move_tail(slob_list, prev->next);
		break;
	}
	spin_unlock_irqrestore(&slob_lock, flags);

	
	if (!b) {
		b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
		if (!b)
			return NULL;
		sp = slob_page(b);
		set_slob_page(sp);

		spin_lock_irqsave(&slob_lock, flags);
		sp->units = SLOB_UNITS(PAGE_SIZE);
		sp->freelist = b;
		INIT_LIST_HEAD(&sp->list);
		set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
		set_slob_page_free(sp, slob_list);
		b = slob_page_alloc(sp, size, align);
		BUG_ON(!b);
		spin_unlock_irqrestore(&slob_lock, flags);
	}
	if (unlikely((gfp & __GFP_ZERO) && b))
		memset(b, 0, size);
	return b;
}
示例#14
0
文件: slob.c 项目: fusion2004/cop4610
/*
 * Allocate a slob block within a given slob_page sp.
 * 
 * We modified the slob_page_alloc function to find the 
 * best block on a given page. We understand that it is 
 * searching through a page's free blocks twice (once 
 * from the helper, and a second time for this function). 
 * We make a best version of all the variables in the 
 * function to keep track of the best block. Then we go 
 * through the entire block list and return the best block.
 */
static void *slob_page_alloc(struct slob_page *sp, size_t size, int align)
{
	slob_t *prev, *cur, *aligned = NULL;
	int delta = 0, units = SLOB_UNITS(size);

	slob_t *best_prev = NULL, *best_cur = NULL, *best_aligned = NULL;
	int best_delta = 0;
	slobidx_t best_fit = 0;

	for (prev = NULL, cur = sp->free; ; prev = cur, cur = slob_next(cur)) {
		slobidx_t avail = slob_units(cur);

		if (align) {
			aligned = (slob_t *)ALIGN((unsigned long)cur, align);
			delta = aligned - cur;
		}
#ifdef SLOB_BEST_FIT_ALG
		if (avail >= units + delta && (best_cur == NULL || avail - (units + delta) < best_fit) ) { /* room enough? */
#else
		if (avail >= units + delta) { /* room enough? */
#endif
			best_prev = prev;
			best_cur = cur;
			best_aligned = aligned;
			best_delta = delta;
			best_fit = avail - (units + delta);

#ifdef SLOB_BEST_FIT_ALG
		}
		if (slob_last(cur)) {
			if (best_cur != NULL) {
#endif
			slob_t *best_next = NULL;
			slobidx_t best_avail = slob_units(best_cur);

			if (best_delta) { /* need to fragment head to align? */
				best_next = slob_next(best_cur);
				set_slob(best_aligned, best_avail - best_delta, best_next);
				set_slob(best_cur, best_delta, best_aligned);
				best_prev = best_cur;
				best_cur = best_aligned;
				best_avail = slob_units(best_cur);
			}

			best_next = slob_next(best_cur);
			if (best_avail == units) { /* exact fit? unlink. */
				if (best_prev)
					set_slob(best_prev, slob_units(best_prev), best_next);
				else
					sp->free = best_next;
			} else { /* fragment */
				if (best_prev)
					set_slob(best_prev, slob_units(best_prev), best_cur + units);
				else
					sp->free = best_cur + units;
				set_slob(best_cur + units, best_avail - units, best_next);
			}

			sp->units -= units;
			if (!sp->units)
				clear_slob_page_free(sp);
			return best_cur;

#ifdef SLOB_BEST_FIT_ALG
			}
#else
		}
		if (slob_last(cur)) {
#endif
			return NULL;
		}
	}
}

/*
 * The helper function, slob_page_best_fit_check, goes 
 * through the page's list of blocks and returns a number. 
 * The number will either be -1, 0, or some positive integer. -1 
 * means that there is no big enough block. 0 means a perfect 
 * fitted block. Any positive integer represents the amount 
 * that will be left over in the block if allocation happens. We 
 * either want this number to be 0 or as low as possible for 
 * best-fit algorithm.
*/
static int slob_page_best_fit_check(struct slob_page *sp, size_t size, int align)
{
	slob_t *prev, *cur, *aligned = NULL;
	int delta = 0, units = SLOB_UNITS(size);

	slob_t *best_cur = NULL;
	slobidx_t best_fit = 0;

	for (prev = NULL, cur = sp->free; ; prev = cur, cur = slob_next(cur)) {
		slobidx_t avail = slob_units(cur);

		if (align) {
			aligned = (slob_t *)ALIGN((unsigned long)cur, align);
			delta = aligned - cur;
		}
		if (avail >= units + delta && (best_cur == NULL || avail - (units + delta) < best_fit) ) { /* room enough? */
			best_cur = cur;
			best_fit = avail - (units + delta);
			if(best_fit == 0)
				return 0;
		}
		if (slob_last(cur)) {
			if (best_cur != NULL) 
				return best_fit;
			
			return -1;
		}
	}
}