Exemple #1
0
/* IAMROOT-12AB:
 * -------------
 * sparse 메모리 모델이 아닌 경우 page로 node_page_ext를 
 * 찾은 후 page_ext를 찾아 반환한다.
 */
struct page_ext *lookup_page_ext(struct page *page)
{
	unsigned long pfn = page_to_pfn(page);
	unsigned long offset;
	struct page_ext *base;

	base = NODE_DATA(page_to_nid(page))->node_page_ext;
#ifdef CONFIG_DEBUG_VM
	/*
	 * The sanity checks the page allocator does upon freeing a
	 * page can reach here before the page_ext arrays are
	 * allocated when feeding a range of pages to the allocator
	 * for the first time during bootup or memory hotplug.
	 */
	if (unlikely(!base))
		return NULL;
#endif

/* IAMROOT-12AB:
 * -------------
 * offset은 mem_map에서 구한 후 함수에서 반환 시에 base(page_ext *)에 
 * offset을 더해 반환하여 page_ext 포인터 주소를 알려준다.
 */
	offset = pfn - round_down(node_start_pfn(page_to_nid(page)),
					MAX_ORDER_NR_PAGES);
	return base + offset;
}
Exemple #2
0
struct page_cgroup *lookup_page_cgroup(struct page *page)
{
	unsigned long pfn = page_to_pfn(page);
	unsigned long offset;
	struct page_cgroup *base;

	base = NODE_DATA(page_to_nid(page))->node_page_cgroup;
	if (unlikely(!base))
		return NULL;

	offset = pfn - NODE_DATA(page_to_nid(page))->node_start_pfn;
	return base + offset;
}
Exemple #3
0
static void
count_pmd_pages(struct mm_struct * mm, struct vm_area_struct * vma,
                pmd_t *dir, unsigned long address, unsigned long end,
                signed char *data_buf, int node_map)
{
    pte_t * pte;
    unsigned long pmd_end;
    struct page *page;
    unsigned long pfn;
    int val, index;

    if (pmd_none(*dir))
        return;
    pmd_end = (address + PMD_SIZE) & PMD_MASK;
    if (end > pmd_end)
        end = pmd_end;
    index = 0;
    do {
        pte = pte_offset_map(dir, address);
        if (!pte_none(pte) && pte_present(*pte)) {
            pfn = pte_pfn(*pte);
            if (pfn_valid(pfn)) {
                page = pfn_to_page(pfn);
                val = node_map ? page_to_nid(page) :
                      page_count(page);
                val = (val > 99) ? 99 : val;
                data_buf[index] = val;
            }
        }
        address += PAGE_SIZE;
        pte++;
        index++;
    } while (address && (address < end));
    return;
}
Exemple #4
0
static void enqueue_huge_page(struct page *page)
{
	int nid = page_to_nid(page);
	list_add(&page->lru, &hugepage_freelists[nid]);
	free_huge_pages++;
	free_huge_pages_node[nid]++;
}
Exemple #5
0
/* Ensure all existing pages follow the policy. */
static int
verify_pages(unsigned long addr, unsigned long end, unsigned long *nodes)
{
	while (addr < end) {
		struct page *p;
		pte_t *pte;
		pmd_t *pmd;
		pgd_t *pgd = pgd_offset_k(addr);
		if (pgd_none(*pgd)) {
			addr = (addr + PGDIR_SIZE) & PGDIR_MASK;
			continue;
		}
		pmd = pmd_offset(pgd, addr);
		if (pmd_none(*pmd)) {
			addr = (addr + PMD_SIZE) & PMD_MASK;
			continue;
		}
		p = NULL;
		pte = pte_offset_map(pmd, addr);
		if (pte_present(*pte))
			p = pte_page(*pte);
		pte_unmap(pte);
		if (p) {
			unsigned nid = page_to_nid(p);
			if (!test_bit(nid, nodes))
				return -EIO;
		}
		addr += PAGE_SIZE;
	}
	return 0;
}
Exemple #6
0
struct page_ext *lookup_page_ext(const struct page *page)
{
	unsigned long pfn = page_to_pfn(page);
	unsigned long index;
	struct page_ext *base;

	base = NODE_DATA(page_to_nid(page))->node_page_ext;
	/*
	 * The sanity checks the page allocator does upon freeing a
	 * page can reach here before the page_ext arrays are
	 * allocated when feeding a range of pages to the allocator
	 * for the first time during bootup or memory hotplug.
	 */
	if (unlikely(!base))
		return NULL;
	index = pfn - round_down(node_start_pfn(page_to_nid(page)),
					MAX_ORDER_NR_PAGES);
	return get_entry(base, index);
}
Exemple #7
0
struct page_cgroup *lookup_page_cgroup(struct page *page)
{
	unsigned long pfn = page_to_pfn(page);
	unsigned long offset;
	struct page_cgroup *base;

	base = NODE_DATA(page_to_nid(page))->node_page_cgroup;
#ifdef CONFIG_DEBUG_VM
	/*
	 * The sanity checks the page allocator does upon freeing a
	 * page can reach here before the page_cgroup arrays are
	 * allocated when feeding a range of pages to the allocator
	 * for the first time during bootup or memory hotplug.
	 */
	if (unlikely(!base))
		return NULL;
#endif
	offset = pfn - NODE_DATA(page_to_nid(page))->node_start_pfn;
	return base + offset;
}
Exemple #8
0
/*
 * slob_alloc: entry point into the slob allocator.
 *
 * We modified the slob_alloc function. We defined new local variables: 
 *   1.) temp_amt_free will accumulate all the free bytes on each page 
 * (used for the system call sys_get_slob_amt_free); 
 *   2.) best_sp points to the page with the "best fit"; 
 *   3.) best_fit/ current_fit contain a number in which the smaller 
 * the number, the better the fit; best_fit is the overall best number 
 * and current_fit is the current page's number
 * 
 * When iterating through the free page list, our slob_alloc will: 
 *   1.) collect the free units of the page and store them into 
 * temp_amt_free; 
 *   2.) call a helper function slob_page_best_fit_check which 
 * returns a number put into current_fit.
 *   3.) This current_fit number is checked against a number of 
 * cases. First case is if current_fit is equal to 0. This means,
 * a perfect fit, so we break out of the loop and allocate there. 
 * Second case is -1, meaning there is no block fit so we continue 
 * the loop. Last case is some positive number so we check to see if 
 * this number is less than the best_fit. Or we check to see if 
 * best_fit has been set yet. If this case is reached, then the new 
 * best page is the current page in the loop.
 * 
 * Once the loop is over, we try to allocate on that page if best_fit is 
 * some positive number. Otherwise, we don't have enough space and must 
 * allocate a new page. This part of the slob_alloc function was mostly 
 * unchanged except for updating the values of the array lists for the 
 * system calls. When we have to allocate a new page, is when we set the 
 * amt_claimed, at position of the counter, to the size of the request (in 
 * bytes). We  also set the amt_free, at the position of the counter, to 
 * the total accumulated units from the list, converting it to bytes. Then 
 * we increment the counter.
 *
 */
static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
{
	struct slob_page *sp;
	struct list_head *prev;
	struct list_head *slob_list;
	slob_t *b = NULL;
	unsigned long flags;

	// Lab 3 - Statistics
        long temp_amt_free = 0;

	struct slob_page *best_sp = NULL;
	int best_fit = -1;

	if (size < SLOB_BREAK1)
		slob_list = &free_slob_small;
	else if (size < SLOB_BREAK2)
		slob_list = &free_slob_medium;
	else
		slob_list = &free_slob_large;

	spin_lock_irqsave(&slob_lock, flags);
	/* Iterate through each partially free page, try to find room */
	list_for_each_entry(sp, slob_list, list) {
		int current_fit = -1;

		// Lab 3 - Statistics
		temp_amt_free = temp_amt_free + sp->units;

#ifdef CONFIG_NUMA
		/*
		 * If there's a node specification, search for a partial
		 * page with a matching node id in the freelist.
		 */
		if (node != -1 && page_to_nid(&sp->page) != node)
			continue;
#endif
		/* Enough room on this page? */
		if (sp->units < SLOB_UNITS(size))
			continue;

#ifdef SLOB_BEST_FIT_ALG
		current_fit = slob_page_best_fit_check(sp, size, align);
		if(current_fit == 0) {
			best_sp = sp;
			best_fit = current_fit;
			break;
		}
		else if(current_fit > 0 && (best_fit == -1 || current_fit < best_fit) ) {
			best_sp = sp;
			best_fit = current_fit;
		}
		continue;
	}
/* __alloc_bootmem...() is protected by !slab_available() */
int __init_refok init_section_page_cgroup(unsigned long pfn)
{
	struct mem_section *section;
	struct page_cgroup *base, *pc;
	unsigned long table_size;
	int nid, index;

	section = __pfn_to_section(pfn);

	if (!section->page_cgroup) {
		nid = page_to_nid(pfn_to_page(pfn));
		table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION;
		if (slab_is_available()) {
			base = kmalloc_node(table_size, GFP_KERNEL, nid);
			if (!base)
				base = vmalloc_node(table_size, nid);
		} else {
			base = __alloc_bootmem_node_nopanic(NODE_DATA(nid),
				table_size,
				PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
		}
	} else {
		/*
 		 * We don't have to allocate page_cgroup again, but
		 * address of memmap may be changed. So, we have to initialize
		 * again.
		 */
		base = section->page_cgroup + pfn;
		table_size = 0;
		/* check address of memmap is changed or not. */
		if (base->page == pfn_to_page(pfn))
			return 0;
	}

	if (!base) {
		printk(KERN_ERR "page cgroup allocation failure\n");
		return -ENOMEM;
	}

	for (index = 0; index < PAGES_PER_SECTION; index++) {
		pc = base + index;
		__init_page_cgroup(pc, pfn + index);
	}

	section = __pfn_to_section(pfn);
	section->page_cgroup = base - pfn;
	total_usage += table_size;
	return 0;
}
Exemple #10
0
struct page_ext *lookup_page_ext(struct page *page)
{
	unsigned long pfn = page_to_pfn(page);
	unsigned long index;
	struct page_ext *base;

	base = NODE_DATA(page_to_nid(page))->node_page_ext;
#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAGE_POISONING)
	/*
	 * The sanity checks the page allocator does upon freeing a
	 * page can reach here before the page_ext arrays are
	 * allocated when feeding a range of pages to the allocator
	 * for the first time during bootup or memory hotplug.
	 *
	 * This check is also necessary for ensuring page poisoning
	 * works as expected when enabled
	 */
	if (unlikely(!base))
		return NULL;
#endif
	index = pfn - round_down(node_start_pfn(page_to_nid(page)),
					MAX_ORDER_NR_PAGES);
	return get_entry(base, index);
}
Exemple #11
0
/* __alloc_bootmem...() is protected by !slab_available() */
static int __init_refok init_section_page_cgroup(unsigned long pfn)
{
	struct mem_section *section = __pfn_to_section(pfn);
	struct page_cgroup *base, *pc;
	unsigned long table_size;
	int nid, index;

	if (!section->page_cgroup) {
		nid = page_to_nid(pfn_to_page(pfn));
		table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION;
		VM_BUG_ON(!slab_is_available());
		if (node_state(nid, N_HIGH_MEMORY)) {
			base = kmalloc_node(table_size,
				GFP_KERNEL | __GFP_NOWARN, nid);
			if (!base)
				base = vmalloc_node(table_size, nid);
		} else {
			base = kmalloc(table_size, GFP_KERNEL | __GFP_NOWARN);
			if (!base)
				base = vmalloc(table_size);
		}
	} else {
		/*
 		 * We don't have to allocate page_cgroup again, but
		 * address of memmap may be changed. So, we have to initialize
		 * again.
		 */
		base = section->page_cgroup + pfn;
		table_size = 0;
		/* check address of memmap is changed or not. */
		if (base->page == pfn_to_page(pfn))
			return 0;
	}

	if (!base) {
		printk(KERN_ERR "page cgroup allocation failure\n");
		return -ENOMEM;
	}

	for (index = 0; index < PAGES_PER_SECTION; index++) {
		pc = base + index;
		__init_page_cgroup(pc, pfn + index);
	}

	section->page_cgroup = base - pfn;
	total_usage += table_size;
	return 0;
}
struct page *alloc_migrate_target(struct page *page, unsigned long nid,
				  int **resultp)
{
	/*
	 * hugeTLB: allocate a destination page from a nearest neighbor node,
	 * accordance with memory policy of the user process if possible. For
	 * now as a simple work-around, we use the next node for destination.
	 * Normal page: use prefer mempolicy for destination if called by
	 * hotplug, use default mempolicy for destination if called by cma.
	 */
	if (PageHuge(page))
		return alloc_huge_page_node(page_hstate(compound_head(page)),
					    next_node_in(page_to_nid(page),
							 node_online_map));
	else
		return alloc_pages_node(nid, GFP_HIGHUSER_MOVABLE, 0);
}
/*
 * slob_alloc: entry point into the slob allocator.
 */
static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
{
	
	struct slob_page *sp;
	struct list_head *slob_list;
	slob_t *b = NULL;
	unsigned long flags;
	int i;
	unsigned long int count_free;
	
#ifdef BESTFIT_PAGE
	slobidx_t min,prev_min;
	struct slob_page *curr;
	int flag_if;
	int check;
#else
	struct list_head *prev;
#endif
	/*arxikopoioyme ta total_alloc kai ta total_free*/
	if(flag_mem==0){
		total_alloc = 0;
		total_free = 0;
		flag_mem = 1;
	}

	if (size < SLOB_BREAK1)
		slob_list = &free_slob_small;
	else if (size < SLOB_BREAK2)
		slob_list = &free_slob_medium;
	else
		slob_list = &free_slob_large;

	spin_lock_irqsave(&slob_lock, flags);
	/* Iterate through each partially free page, try to find room */
	
	
	counter_print++;
#ifdef BESTFIT_PAGE
	
	flag_if = 0;
	curr = NULL;
	min = 0;
	prev_min = SLOB_UNITS(size);
	
	/*diatexoume th lista mexri na broyme thn kalyterh selida*/
	list_for_each_entry(sp, slob_list, list) {
#ifdef CONFIG_NUMA
	/*
	  * If there's a node specification, search for a partial
	  * page with a matching node id in the freelist.
	  */
		if (node != -1 && page_to_nid(&sp->page) != node)
			continue;
	
#endif
		/* Enough room on this page? */
		if (sp->units < prev_min){
			continue;
		}
		if(flag_if==0){
			check = check_block(sp,size,align);
			if(check){
				min = sp->units;
				curr = sp;
				flag_if = 1;
			}
		}
		else{
			if(sp->units <= min){
				check = check_block(sp,size,align);
				if(check){
					min = sp->units;
					curr = sp;
				}
			}
		}
	}
		
	//kaloyme thn slob_page_alloc
	if(curr!=NULL){
		b = slob_page_alloc(curr, size, align);
	}
	else{
		b = NULL;
	}


#else

	list_for_each_entry(sp, slob_list, list) {
#ifdef CONFIG_NUMA
		/*
		 * If there's a node specification, search for a partial
		 * page with a matching node id in the freelist.
		 */
		if (node != -1 && page_to_nid(&sp->page) != node)
			continue;
		
#endif
		
		/* Enough room on this page? */
		if (sp->units < SLOB_UNITS(size))
			continue;

		/* Attempt to alloc */
		prev = sp->list.prev;
		b = slob_page_alloc(sp, size, align);
		if (!b)
			continue;

		/* Improve fragment distribution and reduce our average
		 * search time by starting our next search here. (see
		 * Knuth vol 1, sec 2.5, pg 449) */
		if (prev != slob_list->prev &&
				slob_list->next != prev->next)
			list_move_tail(slob_list, prev->next);
		break;
	}
#endif
	spin_unlock_irqrestore(&slob_lock, flags);

	/* Not enough space: must allocate a new page */
	if (!b) {
		b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
		if (!b)
			return NULL;
		sp = slob_page(b);
		set_slob_page(sp);

		spin_lock_irqsave(&slob_lock, flags);
		sp->units = SLOB_UNITS(PAGE_SIZE);
		sp->free = b;
		INIT_LIST_HEAD(&sp->list);
		set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
		set_slob_page_free(sp, slob_list);
		b = slob_page_alloc(sp, size, align);
		BUG_ON(!b);
		spin_unlock_irqrestore(&slob_lock, flags);
	}
	if (unlikely((gfp & __GFP_ZERO) && b))
		memset(b, 0, size);
	
	/*diatrexoyme kai tis treis listes megethon gia na metrhsoyme ta synolika free olwn twn selidwn*/
	count_free = 0;
	for(i=0;i<3;i++){
		if (i==0){
			slob_list = &free_slob_small;
		}
		else if (i==1){
			slob_list = &free_slob_medium;
		}
		else{
			slob_list = &free_slob_large; 
		}
		list_for_each_entry(sp, slob_list, list) {
			count_free = count_free + sp->units;
		}
	}
Exemple #14
0
static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
{
	struct page *sp;
	struct list_head *prev;
	struct list_head *slob_list;
	slob_t *b = NULL;
	unsigned long flags;

	if (size < SLOB_BREAK1)
		slob_list = &free_slob_small;
	else if (size < SLOB_BREAK2)
		slob_list = &free_slob_medium;
	else
		slob_list = &free_slob_large;

	spin_lock_irqsave(&slob_lock, flags);
	
	list_for_each_entry(sp, slob_list, list) {
#ifdef CONFIG_NUMA
		/*
		 * If there's a node specification, search for a partial
		 * page with a matching node id in the freelist.
		 */
		if (node != NUMA_NO_NODE && page_to_nid(sp) != node)
			continue;
#endif
		
		if (sp->units < SLOB_UNITS(size))
			continue;

		
		prev = sp->list.prev;
		b = slob_page_alloc(sp, size, align);
		if (!b)
			continue;

		if (prev != slob_list->prev &&
				slob_list->next != prev->next)
			list_move_tail(slob_list, prev->next);
		break;
	}
	spin_unlock_irqrestore(&slob_lock, flags);

	
	if (!b) {
		b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
		if (!b)
			return NULL;
		sp = slob_page(b);
		set_slob_page(sp);

		spin_lock_irqsave(&slob_lock, flags);
		sp->units = SLOB_UNITS(PAGE_SIZE);
		sp->freelist = b;
		INIT_LIST_HEAD(&sp->list);
		set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
		set_slob_page_free(sp, slob_list);
		b = slob_page_alloc(sp, size, align);
		BUG_ON(!b);
		spin_unlock_irqrestore(&slob_lock, flags);
	}
	if (unlikely((gfp & __GFP_ZERO) && b))
		memset(b, 0, size);
	return b;
}
int online_pages(unsigned long pfn, unsigned long nr_pages)
{
	unsigned long flags;
	unsigned long onlined_pages = 0;
	struct zone *zone;
	int need_zonelists_rebuild = 0;
	int nid;
	int ret;
	struct memory_notify arg;

	arg.start_pfn = pfn;
	arg.nr_pages = nr_pages;
	arg.status_change_nid = -1;

	nid = page_to_nid(pfn_to_page(pfn));
	if (node_present_pages(nid) == 0)
		arg.status_change_nid = nid;

	ret = memory_notify(MEM_GOING_ONLINE, &arg);
	ret = notifier_to_errno(ret);
	if (ret) {
		memory_notify(MEM_CANCEL_ONLINE, &arg);
		return ret;
	}
	/*
	 * This doesn't need a lock to do pfn_to_page().
	 * The section can't be removed here because of the
	 * memory_block->state_sem.
	 */
	zone = page_zone(pfn_to_page(pfn));
	pgdat_resize_lock(zone->zone_pgdat, &flags);
	grow_zone_span(zone, pfn, pfn + nr_pages);
	grow_pgdat_span(zone->zone_pgdat, pfn, pfn + nr_pages);
	pgdat_resize_unlock(zone->zone_pgdat, &flags);

	/*
	 * If this zone is not populated, then it is not in zonelist.
	 * This means the page allocator ignores this zone.
	 * So, zonelist must be updated after online.
	 */
	if (!populated_zone(zone))
		need_zonelists_rebuild = 1;

	walk_memory_resource(pfn, nr_pages, &onlined_pages,
		online_pages_range);
	zone->present_pages += onlined_pages;
	zone->zone_pgdat->node_present_pages += onlined_pages;

	setup_per_zone_pages_min();
	if (onlined_pages) {
		kswapd_run(zone_to_nid(zone));
		node_set_state(zone_to_nid(zone), N_HIGH_MEMORY);
	}

	if (need_zonelists_rebuild)
		build_all_zonelists();
	vm_total_pages = nr_free_pagecache_pages();
	writeback_set_ratelimit();

	if (onlined_pages)
		memory_notify(MEM_ONLINE, &arg);

	return 0;
}
Exemple #16
0
/*
 * slob_alloc: entry point into the slob allocator.
 */
static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
{
	struct slob_page *sp = NULL;
	struct slob_page *sp_t;
	struct list_head *slob_list;
	slob_t *b = NULL;
	unsigned long flags;

	if (size < SLOB_BREAK1)
		slob_list = &free_slob_small;
	else if (size < SLOB_BREAK2)
		slob_list = &free_slob_medium;
	else
		slob_list = &free_slob_large;

	spin_lock_irqsave(&slob_lock, flags);
	/* Iterate through each partially free page, try to find room */
	list_for_each_entry(sp_t, slob_list, list) {
#ifdef CONFIG_NUMA
		/*
		 * If there's a node specification, search for a partial
		 * page with a matching node id in the freelist.
		 */
		if (node != -1 && page_to_nid(&sp_t->page) != node)
			continue;
#endif
		/* Enough room on this page? */
		if (sp_t->units < SLOB_UNITS(size))
			/* Not enough room */
			continue;

		if (sp == NULL)
			sp = sp_t;

		if (sp_t->units < sp->units)
			/* Get the smallest slob_page that
 			 * is large enough for our needs */
			sp = sp_t;
	}

	/* Attempt to alloc */
	if(sp != NULL) {
		b = slob_page_alloc(sp, size, align);
	}
	spin_unlock_irqrestore(&slob_lock, flags);

	/* Not enough space: must allocate a new page */
	if (!b) {
		b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
		if (!b)
			return NULL;
		sp = slob_page(b);
		set_slob_page(sp);
		
		/* We allocatted a new page, increment the count */
		slob_page_count++;


		spin_lock_irqsave(&slob_lock, flags);
		sp->units = SLOB_UNITS(PAGE_SIZE);
		sp->free = b;
		INIT_LIST_HEAD(&sp->list);
		set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
		set_slob_page_free(sp, slob_list);
		b = slob_page_alloc(sp, size, align);
		BUG_ON(!b);
		spin_unlock_irqrestore(&slob_lock, flags);
	}
	if (unlikely((gfp & __GFP_ZERO) && b))
		memset(b, 0, size);
	return b;
}
Exemple #17
0
/*
 * slob_alloc: entry point into the slob allocator.
 */
static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
{
	struct page *sp;
	struct list_head *prev;
	struct list_head *slob_list;
	slob_t *b = NULL;
	unsigned long flags;

	if (size < SLOB_BREAK1)
		slob_list = &free_slob_small;
	else if (size < SLOB_BREAK2)
		slob_list = &free_slob_medium;
	else
		slob_list = &free_slob_large;

	spin_lock_irqsave(&slob_lock, flags);
	/* Iterate through each partially free page, try to find room */
	list_for_each_entry(sp, slob_list, lru) {
#ifdef CONFIG_NUMA
		/*
		 * If there's a node specification, search for a partial
		 * page with a matching node id in the freelist.
		 */
		if (node != NUMA_NO_NODE && page_to_nid(sp) != node)
			continue;
#endif
		/* Enough room on this page? */
		if (sp->units < SLOB_UNITS(size))
			continue;

		/* Attempt to alloc */
		prev = sp->lru.prev;
		b = slob_page_alloc(sp, size, align);
		if (!b)
			continue;

		/* Improve fragment distribution and reduce our average
		 * search time by starting our next search here. (see
		 * Knuth vol 1, sec 2.5, pg 449) */
		if (prev != slob_list->prev &&
				slob_list->next != prev->next)
			list_move_tail(slob_list, prev->next);
		break;
	}
	spin_unlock_irqrestore(&slob_lock, flags);

	/* Not enough space: must allocate a new page */
	if (!b) {
		b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
		if (!b)
			return NULL;
		sp = virt_to_page(b);
		__SetPageSlab(sp);

		spin_lock_irqsave(&slob_lock, flags);
		sp->units = SLOB_UNITS(PAGE_SIZE);
		sp->freelist = b;
		INIT_LIST_HEAD(&sp->lru);
		set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
		set_slob_page_free(sp, slob_list);
		b = slob_page_alloc(sp, size, align);
		BUG_ON(!b);
		spin_unlock_irqrestore(&slob_lock, flags);
	}
	if (unlikely((gfp & __GFP_ZERO) && b))
		memset(b, 0, size);
	return b;
}