Пример #1
0
/*synarthsh opoy elegxoyme ean se mia page yparxei kapoio block poy na xoraei to request mas*/
int check_block(struct slob_page *sp, size_t size, int align){
	slob_t *prev, *cur, *aligned = NULL;
	int delta = 0, units = SLOB_UNITS(size);	
	slobidx_t avail ;
	
	/*diatrexoyme thn lista mexri na broyme kapoio block opoy xoraei to request*/
	for (prev = NULL, cur = sp->free; ; prev = cur, cur = slob_next(cur)) {	
		
		avail = slob_units(cur);
		
		if (align) {
			aligned = (slob_t *)ALIGN((unsigned long)cur, align);
			delta = aligned - cur;
		}
		if (avail >= units + delta) { /* room enough? */
			return(1);
		}
		
		/*an einai to teleytaio sth lista bgainei apo thn for */
		if(slob_last(cur)){
			break;
		}
	}
	return(0);
}
Пример #2
0
Файл: slob.c Проект: 7799/linux
/*
 * Allocate a slob block within a given slob_page sp.
 */
static void *slob_page_alloc(struct page *sp, size_t size, int align)
{
	slob_t *prev, *cur, *aligned = NULL;
	int delta = 0, units = SLOB_UNITS(size);

	for (prev = NULL, cur = sp->freelist; ; prev = cur, cur = slob_next(cur)) {
		slobidx_t avail = slob_units(cur);

		if (align) {
			aligned = (slob_t *)ALIGN((unsigned long)cur, align);
			delta = aligned - cur;
		}
		if (avail >= units + delta) { /* room enough? */
			slob_t *next;

			if (delta) { /* need to fragment head to align? */
				next = slob_next(cur);
				set_slob(aligned, avail - delta, next);
				set_slob(cur, delta, aligned);
				prev = cur;
				cur = aligned;
				avail = slob_units(cur);
			}

			next = slob_next(cur);
			if (avail == units) { /* exact fit? unlink. */
				if (prev)
					set_slob(prev, slob_units(prev), next);
				else
					sp->freelist = next;
			} else { /* fragment */
				if (prev)
					set_slob(prev, slob_units(prev), cur + units);
				else
					sp->freelist = cur + units;
				set_slob(cur + units, avail - units, next);
			}

			sp->units -= units;
			if (!sp->units)
				clear_slob_page_free(sp);
			return cur;
		}
		if (slob_last(cur))
			return NULL;
	}
}
Пример #3
0
static void slob_free_pages(void *b, int order)
{
	if (current->reclaim_state)
		current->reclaim_state->reclaimed_slab += 1 << order;
	
	/*afairoume apo to total_alloc to megethos ths selidas poy apodesmeytike*/
	total_alloc = total_alloc - slob_units(b);
	free_pages((unsigned long)b, order);
}
Пример #4
0
static void *slob_new_pages(gfp_t gfp, int order, int node)
{
	void *page;

#ifdef CONFIG_NUMA
	if (node != -1)
		page = alloc_pages_exact_node(node, gfp, order);
	else
#endif
		page = alloc_pages(gfp, order);

	if (!page)
		return NULL;
	/*prosthetoyme to megethos ths page sto total_alloc*/
	total_alloc = total_alloc + slob_units(page);
	
	return page_address(page);
}
Пример #5
0
/*
 * Allocate a slob block within a given slob_page sp.
 */
static void *slob_page_alloc(struct slob_page *sp, size_t size, int align)
{
	slob_t *prev, *cur, *aligned = NULL;
	int delta = 0, units = SLOB_UNITS(size);
#ifdef BESTFIT_BLOCK
	slob_t *min_cur = NULL,*min_aligned = NULL,*min_prev;
	slobidx_t min_avail = 0;
	int flag_if = 0 ,min_delta = 0;
	
	slobidx_t avail ;
#endif
	
#ifdef BESTFIT_BLOCK
	
	if((counter_print%print_iteration) == 0){
		printk("\nslob_Request: %d\n",units);
		printk("slob_alloc: Candidate blocks size:");
	}
	
	/*diatrexoyme thn lista mexri na broyme to best block*/
	for (prev = NULL, cur = sp->free; ; prev = cur, cur = slob_next(cur)) {	
		
		avail = slob_units(cur);
		
		if((counter_print%print_iteration) == 0){
			printk(" %d",avail);
		}
		if (align) {
			aligned = (slob_t *)ALIGN((unsigned long)cur, align);
			delta = aligned - cur;
		}
		if (avail >= units + delta) { /* room enough? */
			/*arxikopoioyme ta min*/
			  if(!flag_if){
				min_avail = slob_units(cur);
				min_cur = cur;
				min_aligned = aligned;
				min_delta = delta;
				min_prev = prev;
				flag_if = 1;
			}
			else {
				/*se periptosh poy brethei kalytero block tote allazoyme tis thmes twn min*/
				if(min_avail > avail){
					min_avail = avail;
					min_cur = cur;
					min_aligned = aligned;
					min_delta = delta;
					min_prev = prev;
				}
			}
		}
		
		/*an einai to teleytaio sth lista bgainei apo thn for */
		if(slob_last(cur)){
			break;
		}
	}
	
	/*an exei brethei block poy na xoraei to request tote to epilegoume*/
	if(min_avail!=0){
		slob_t *next;
		cur = min_cur;
		avail = min_avail;
		delta = min_delta;
		aligned = min_aligned;
		prev = min_prev;
		
		if (delta) { /* need to fragment head to align? */
			next = slob_next(cur);
			set_slob(aligned, avail - delta, next);
			set_slob(cur, delta, aligned);
			prev = cur;
			cur = aligned;
			avail = slob_units(cur);
		}
		
		next = slob_next(cur);
		if (avail == units) { /* exact fit? unlink. */
			if (prev)
				set_slob(prev, slob_units(prev), next);
			else
				sp->free = next;
		} 
		else { /* fragment */
			if (prev)
				set_slob(prev, slob_units(prev), cur + units);
			else
				sp->free = cur + units;
			set_slob(cur + units, avail - units, next);
		}

		sp->units -= units;
		if (!sp->units)
			clear_slob_page_free(sp);
		if((counter_print%print_iteration) == 0){
			printk("\nslob_alloc: Best Fit: %d\n",min_avail);
		}
		return cur;
	
		
	}
	else{
		if((counter_print%print_iteration) == 0){
			printk("\nslob_alloc: Best Fit: None\n");
		}
		return NULL;
	}
	
	
#else
	if((counter_print%print_iteration) == 0){
		printk("\nslob_Request: %d\n",units);
		printk("slob_alloc: Candidate blocks size:");
	}
	for (prev = NULL, cur = sp->free; ; prev = cur, cur = slob_next(cur)) {	
		slobidx_t avail = slob_units(cur);
		if((counter_print%print_iteration) == 0){
			printk(" %d",avail);
		}
		if (align) {
			aligned = (slob_t *)ALIGN((unsigned long)cur, align);
			delta = aligned - cur;
		}
		if (avail >= units + delta) { /* room enough? */
			slob_t *next;
			
			if (delta) { /* need to fragment head to align? */
				next = slob_next(cur);
				set_slob(aligned, avail - delta, next);
				set_slob(cur, delta, aligned);
				prev = cur;
				cur = aligned;
				avail = slob_units(cur);
			}

			next = slob_next(cur);
			if (avail == units) { /* exact fit? unlink. */
				if (prev)
					set_slob(prev, slob_units(prev), next);
				else
					sp->free = next;
			} 
			else { /* fragment */
				if (prev)
					set_slob(prev, slob_units(prev), cur + units);
				else
					sp->free = cur + units;
				set_slob(cur + units, avail - units, next);
			}

			sp->units -= units;
			if (!sp->units)
				clear_slob_page_free(sp);
			if((counter_print%print_iteration) == 0){
				printk("\nslob_alloc: First Fit is the last available\n");
			}
			return cur;
		}
		if (slob_last(cur)){
			if((counter_print%print_iteration) == 0){
				printk("\nslob_alloc: First Fit: None\n");
			}
			return NULL;
		}
	}
#endif
}
Пример #6
0
Файл: slob.c Проект: 7799/linux
/*
 * slob_free: entry point into the slob allocator.
 */
static void slob_free(void *block, int size)
{
	struct page *sp;
	slob_t *prev, *next, *b = (slob_t *)block;
	slobidx_t units;
	unsigned long flags;
	struct list_head *slob_list;

	if (unlikely(ZERO_OR_NULL_PTR(block)))
		return;
	BUG_ON(!size);

	sp = virt_to_page(block);
	units = SLOB_UNITS(size);

	spin_lock_irqsave(&slob_lock, flags);

	if (sp->units + units == SLOB_UNITS(PAGE_SIZE)) {
		/* Go directly to page allocator. Do not pass slob allocator */
		if (slob_page_free(sp))
			clear_slob_page_free(sp);
		spin_unlock_irqrestore(&slob_lock, flags);
		__ClearPageSlab(sp);
		page_mapcount_reset(sp);
		slob_free_pages(b, 0);
		return;
	}

	if (!slob_page_free(sp)) {
		/* This slob page is about to become partially free. Easy! */
		sp->units = units;
		sp->freelist = b;
		set_slob(b, units,
			(void *)((unsigned long)(b +
					SLOB_UNITS(PAGE_SIZE)) & PAGE_MASK));
		if (size < SLOB_BREAK1)
			slob_list = &free_slob_small;
		else if (size < SLOB_BREAK2)
			slob_list = &free_slob_medium;
		else
			slob_list = &free_slob_large;
		set_slob_page_free(sp, slob_list);
		goto out;
	}

	/*
	 * Otherwise the page is already partially free, so find reinsertion
	 * point.
	 */
	sp->units += units;

	if (b < (slob_t *)sp->freelist) {
		if (b + units == sp->freelist) {
			units += slob_units(sp->freelist);
			sp->freelist = slob_next(sp->freelist);
		}
		set_slob(b, units, sp->freelist);
		sp->freelist = b;
	} else {
		prev = sp->freelist;
		next = slob_next(prev);
		while (b > next) {
			prev = next;
			next = slob_next(prev);
		}

		if (!slob_last(prev) && b + units == next) {
			units += slob_units(next);
			set_slob(b, units, slob_next(next));
		} else
			set_slob(b, units, next);

		if (prev + slob_units(prev) == b) {
			units = slob_units(b) + slob_units(prev);
			set_slob(prev, units, slob_next(b));
		} else
			set_slob(prev, slob_units(prev), b);
	}
out:
	spin_unlock_irqrestore(&slob_lock, flags);
}
Пример #7
0
static void slob_free(void *block, int size)
{
	struct page *sp;
	slob_t *prev, *next, *b = (slob_t *)block;
	slobidx_t units;
	unsigned long flags;
	struct list_head *slob_list;

	if (unlikely(ZERO_OR_NULL_PTR(block)))
		return;
	BUG_ON(!size);

	sp = slob_page(block);
	units = SLOB_UNITS(size);

	spin_lock_irqsave(&slob_lock, flags);

	if (sp->units + units == SLOB_UNITS(PAGE_SIZE)) {
		
		if (slob_page_free(sp))
			clear_slob_page_free(sp);
		spin_unlock_irqrestore(&slob_lock, flags);
		clear_slob_page(sp);
		free_slob_page(sp);
		slob_free_pages(b, 0);
		return;
	}

	if (!slob_page_free(sp)) {
		
		sp->units = units;
		sp->freelist = b;
		set_slob(b, units,
			(void *)((unsigned long)(b +
					SLOB_UNITS(PAGE_SIZE)) & PAGE_MASK));
		if (size < SLOB_BREAK1)
			slob_list = &free_slob_small;
		else if (size < SLOB_BREAK2)
			slob_list = &free_slob_medium;
		else
			slob_list = &free_slob_large;
		set_slob_page_free(sp, slob_list);
		goto out;
	}

	sp->units += units;

	if (b < (slob_t *)sp->freelist) {
		if (b + units == sp->freelist) {
			units += slob_units(sp->freelist);
			sp->freelist = slob_next(sp->freelist);
		}
		set_slob(b, units, sp->freelist);
		sp->freelist = b;
	} else {
		prev = sp->freelist;
		next = slob_next(prev);
		while (b > next) {
			prev = next;
			next = slob_next(prev);
		}

		if (!slob_last(prev) && b + units == next) {
			units += slob_units(next);
			set_slob(b, units, slob_next(next));
		} else
			set_slob(b, units, next);

		if (prev + slob_units(prev) == b) {
			units = slob_units(b) + slob_units(prev);
			set_slob(prev, units, slob_next(b));
		} else
			set_slob(prev, slob_units(prev), b);
	}
out:
	spin_unlock_irqrestore(&slob_lock, flags);
}
Пример #8
0
/*
 * Allocate a slob block within a given slob_page sp.
 * 
 * We modified the slob_page_alloc function to find the 
 * best block on a given page. We understand that it is 
 * searching through a page's free blocks twice (once 
 * from the helper, and a second time for this function). 
 * We make a best version of all the variables in the 
 * function to keep track of the best block. Then we go 
 * through the entire block list and return the best block.
 */
static void *slob_page_alloc(struct slob_page *sp, size_t size, int align)
{
	slob_t *prev, *cur, *aligned = NULL;
	int delta = 0, units = SLOB_UNITS(size);

	slob_t *best_prev = NULL, *best_cur = NULL, *best_aligned = NULL;
	int best_delta = 0;
	slobidx_t best_fit = 0;

	for (prev = NULL, cur = sp->free; ; prev = cur, cur = slob_next(cur)) {
		slobidx_t avail = slob_units(cur);

		if (align) {
			aligned = (slob_t *)ALIGN((unsigned long)cur, align);
			delta = aligned - cur;
		}
#ifdef SLOB_BEST_FIT_ALG
		if (avail >= units + delta && (best_cur == NULL || avail - (units + delta) < best_fit) ) { /* room enough? */
#else
		if (avail >= units + delta) { /* room enough? */
#endif
			best_prev = prev;
			best_cur = cur;
			best_aligned = aligned;
			best_delta = delta;
			best_fit = avail - (units + delta);

#ifdef SLOB_BEST_FIT_ALG
		}
		if (slob_last(cur)) {
			if (best_cur != NULL) {
#endif
			slob_t *best_next = NULL;
			slobidx_t best_avail = slob_units(best_cur);

			if (best_delta) { /* need to fragment head to align? */
				best_next = slob_next(best_cur);
				set_slob(best_aligned, best_avail - best_delta, best_next);
				set_slob(best_cur, best_delta, best_aligned);
				best_prev = best_cur;
				best_cur = best_aligned;
				best_avail = slob_units(best_cur);
			}

			best_next = slob_next(best_cur);
			if (best_avail == units) { /* exact fit? unlink. */
				if (best_prev)
					set_slob(best_prev, slob_units(best_prev), best_next);
				else
					sp->free = best_next;
			} else { /* fragment */
				if (best_prev)
					set_slob(best_prev, slob_units(best_prev), best_cur + units);
				else
					sp->free = best_cur + units;
				set_slob(best_cur + units, best_avail - units, best_next);
			}

			sp->units -= units;
			if (!sp->units)
				clear_slob_page_free(sp);
			return best_cur;

#ifdef SLOB_BEST_FIT_ALG
			}
#else
		}
		if (slob_last(cur)) {
#endif
			return NULL;
		}
	}
}

/*
 * The helper function, slob_page_best_fit_check, goes 
 * through the page's list of blocks and returns a number. 
 * The number will either be -1, 0, or some positive integer. -1 
 * means that there is no big enough block. 0 means a perfect 
 * fitted block. Any positive integer represents the amount 
 * that will be left over in the block if allocation happens. We 
 * either want this number to be 0 or as low as possible for 
 * best-fit algorithm.
*/
static int slob_page_best_fit_check(struct slob_page *sp, size_t size, int align)
{
	slob_t *prev, *cur, *aligned = NULL;
	int delta = 0, units = SLOB_UNITS(size);

	slob_t *best_cur = NULL;
	slobidx_t best_fit = 0;

	for (prev = NULL, cur = sp->free; ; prev = cur, cur = slob_next(cur)) {
		slobidx_t avail = slob_units(cur);

		if (align) {
			aligned = (slob_t *)ALIGN((unsigned long)cur, align);
			delta = aligned - cur;
		}
		if (avail >= units + delta && (best_cur == NULL || avail - (units + delta) < best_fit) ) { /* room enough? */
			best_cur = cur;
			best_fit = avail - (units + delta);
			if(best_fit == 0)
				return 0;
		}
		if (slob_last(cur)) {
			if (best_cur != NULL) 
				return best_fit;
			
			return -1;
		}
	}
}