/* * Allocate a slob block within a given slob_page sp. */ static void *slob_page_alloc(struct page *sp, size_t size, int align) { slob_t *prev, *cur, *aligned = NULL; int delta = 0, units = SLOB_UNITS(size); for (prev = NULL, cur = sp->freelist; ; prev = cur, cur = slob_next(cur)) { slobidx_t avail = slob_units(cur); if (align) { aligned = (slob_t *)ALIGN((unsigned long)cur, align); delta = aligned - cur; } if (avail >= units + delta) { /* room enough? */ slob_t *next; if (delta) { /* need to fragment head to align? */ next = slob_next(cur); set_slob(aligned, avail - delta, next); set_slob(cur, delta, aligned); prev = cur; cur = aligned; avail = slob_units(cur); } next = slob_next(cur); if (avail == units) { /* exact fit? unlink. */ if (prev) set_slob(prev, slob_units(prev), next); else sp->freelist = next; } else { /* fragment */ if (prev) set_slob(prev, slob_units(prev), cur + units); else sp->freelist = cur + units; set_slob(cur + units, avail - units, next); } sp->units -= units; if (!sp->units) clear_slob_page_free(sp); return cur; } if (slob_last(cur)) return NULL; } }
/* * slob_alloc: entry point into the slob allocator. */ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node) { struct slob_page *sp = NULL; struct slob_page *sp_t; struct list_head *slob_list; slob_t *b = NULL; unsigned long flags; if (size < SLOB_BREAK1) slob_list = &free_slob_small; else if (size < SLOB_BREAK2) slob_list = &free_slob_medium; else slob_list = &free_slob_large; spin_lock_irqsave(&slob_lock, flags); /* Iterate through each partially free page, try to find room */ list_for_each_entry(sp_t, slob_list, list) { #ifdef CONFIG_NUMA /* * If there's a node specification, search for a partial * page with a matching node id in the freelist. */ if (node != -1 && page_to_nid(&sp_t->page) != node) continue; #endif /* Enough room on this page? */ if (sp_t->units < SLOB_UNITS(size)) /* Not enough room */ continue; if (sp == NULL) sp = sp_t; if (sp_t->units < sp->units) /* Get the smallest slob_page that * is large enough for our needs */ sp = sp_t; } /* Attempt to alloc */ if(sp != NULL) { b = slob_page_alloc(sp, size, align); } spin_unlock_irqrestore(&slob_lock, flags); /* Not enough space: must allocate a new page */ if (!b) { b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node); if (!b) return NULL; sp = slob_page(b); set_slob_page(sp); /* We allocatted a new page, increment the count */ slob_page_count++; spin_lock_irqsave(&slob_lock, flags); sp->units = SLOB_UNITS(PAGE_SIZE); sp->free = b; INIT_LIST_HEAD(&sp->list); set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE)); set_slob_page_free(sp, slob_list); b = slob_page_alloc(sp, size, align); BUG_ON(!b); spin_unlock_irqrestore(&slob_lock, flags); } if (unlikely((gfp & __GFP_ZERO) && b)) memset(b, 0, size); return b; }
/* * slob_alloc: entry point into the slob allocator. */ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node) { struct slob_page *sp; struct list_head *slob_list; slob_t *b = NULL; unsigned long flags; int i; unsigned long int count_free; #ifdef BESTFIT_PAGE slobidx_t min,prev_min; struct slob_page *curr; int flag_if; int check; #else struct list_head *prev; #endif /*arxikopoioyme ta total_alloc kai ta total_free*/ if(flag_mem==0){ total_alloc = 0; total_free = 0; flag_mem = 1; } if (size < SLOB_BREAK1) slob_list = &free_slob_small; else if (size < SLOB_BREAK2) slob_list = &free_slob_medium; else slob_list = &free_slob_large; spin_lock_irqsave(&slob_lock, flags); /* Iterate through each partially free page, try to find room */ counter_print++; #ifdef BESTFIT_PAGE flag_if = 0; curr = NULL; min = 0; prev_min = SLOB_UNITS(size); /*diatexoume th lista mexri na broyme thn kalyterh selida*/ list_for_each_entry(sp, slob_list, list) { #ifdef CONFIG_NUMA /* * If there's a node specification, search for a partial * page with a matching node id in the freelist. */ if (node != -1 && page_to_nid(&sp->page) != node) continue; #endif /* Enough room on this page? */ if (sp->units < prev_min){ continue; } if(flag_if==0){ check = check_block(sp,size,align); if(check){ min = sp->units; curr = sp; flag_if = 1; } } else{ if(sp->units <= min){ check = check_block(sp,size,align); if(check){ min = sp->units; curr = sp; } } } } //kaloyme thn slob_page_alloc if(curr!=NULL){ b = slob_page_alloc(curr, size, align); } else{ b = NULL; } #else list_for_each_entry(sp, slob_list, list) { #ifdef CONFIG_NUMA /* * If there's a node specification, search for a partial * page with a matching node id in the freelist. */ if (node != -1 && page_to_nid(&sp->page) != node) continue; #endif /* Enough room on this page? */ if (sp->units < SLOB_UNITS(size)) continue; /* Attempt to alloc */ prev = sp->list.prev; b = slob_page_alloc(sp, size, align); if (!b) continue; /* Improve fragment distribution and reduce our average * search time by starting our next search here. (see * Knuth vol 1, sec 2.5, pg 449) */ if (prev != slob_list->prev && slob_list->next != prev->next) list_move_tail(slob_list, prev->next); break; } #endif spin_unlock_irqrestore(&slob_lock, flags); /* Not enough space: must allocate a new page */ if (!b) { b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node); if (!b) return NULL; sp = slob_page(b); set_slob_page(sp); spin_lock_irqsave(&slob_lock, flags); sp->units = SLOB_UNITS(PAGE_SIZE); sp->free = b; INIT_LIST_HEAD(&sp->list); set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE)); set_slob_page_free(sp, slob_list); b = slob_page_alloc(sp, size, align); BUG_ON(!b); spin_unlock_irqrestore(&slob_lock, flags); } if (unlikely((gfp & __GFP_ZERO) && b)) memset(b, 0, size); /*diatrexoyme kai tis treis listes megethon gia na metrhsoyme ta synolika free olwn twn selidwn*/ count_free = 0; for(i=0;i<3;i++){ if (i==0){ slob_list = &free_slob_small; } else if (i==1){ slob_list = &free_slob_medium; } else{ slob_list = &free_slob_large; } list_for_each_entry(sp, slob_list, list) { count_free = count_free + sp->units; } }
/* * Allocate a slob block within a given slob_page sp. */ static void *slob_page_alloc(struct slob_page *sp, size_t size, int align) { slob_t *prev, *cur, *aligned = NULL; int delta = 0, units = SLOB_UNITS(size); #ifdef BESTFIT_BLOCK slob_t *min_cur = NULL,*min_aligned = NULL,*min_prev; slobidx_t min_avail = 0; int flag_if = 0 ,min_delta = 0; slobidx_t avail ; #endif #ifdef BESTFIT_BLOCK if((counter_print%print_iteration) == 0){ printk("\nslob_Request: %d\n",units); printk("slob_alloc: Candidate blocks size:"); } /*diatrexoyme thn lista mexri na broyme to best block*/ for (prev = NULL, cur = sp->free; ; prev = cur, cur = slob_next(cur)) { avail = slob_units(cur); if((counter_print%print_iteration) == 0){ printk(" %d",avail); } if (align) { aligned = (slob_t *)ALIGN((unsigned long)cur, align); delta = aligned - cur; } if (avail >= units + delta) { /* room enough? */ /*arxikopoioyme ta min*/ if(!flag_if){ min_avail = slob_units(cur); min_cur = cur; min_aligned = aligned; min_delta = delta; min_prev = prev; flag_if = 1; } else { /*se periptosh poy brethei kalytero block tote allazoyme tis thmes twn min*/ if(min_avail > avail){ min_avail = avail; min_cur = cur; min_aligned = aligned; min_delta = delta; min_prev = prev; } } } /*an einai to teleytaio sth lista bgainei apo thn for */ if(slob_last(cur)){ break; } } /*an exei brethei block poy na xoraei to request tote to epilegoume*/ if(min_avail!=0){ slob_t *next; cur = min_cur; avail = min_avail; delta = min_delta; aligned = min_aligned; prev = min_prev; if (delta) { /* need to fragment head to align? */ next = slob_next(cur); set_slob(aligned, avail - delta, next); set_slob(cur, delta, aligned); prev = cur; cur = aligned; avail = slob_units(cur); } next = slob_next(cur); if (avail == units) { /* exact fit? unlink. */ if (prev) set_slob(prev, slob_units(prev), next); else sp->free = next; } else { /* fragment */ if (prev) set_slob(prev, slob_units(prev), cur + units); else sp->free = cur + units; set_slob(cur + units, avail - units, next); } sp->units -= units; if (!sp->units) clear_slob_page_free(sp); if((counter_print%print_iteration) == 0){ printk("\nslob_alloc: Best Fit: %d\n",min_avail); } return cur; } else{ if((counter_print%print_iteration) == 0){ printk("\nslob_alloc: Best Fit: None\n"); } return NULL; } #else if((counter_print%print_iteration) == 0){ printk("\nslob_Request: %d\n",units); printk("slob_alloc: Candidate blocks size:"); } for (prev = NULL, cur = sp->free; ; prev = cur, cur = slob_next(cur)) { slobidx_t avail = slob_units(cur); if((counter_print%print_iteration) == 0){ printk(" %d",avail); } if (align) { aligned = (slob_t *)ALIGN((unsigned long)cur, align); delta = aligned - cur; } if (avail >= units + delta) { /* room enough? */ slob_t *next; if (delta) { /* need to fragment head to align? */ next = slob_next(cur); set_slob(aligned, avail - delta, next); set_slob(cur, delta, aligned); prev = cur; cur = aligned; avail = slob_units(cur); } next = slob_next(cur); if (avail == units) { /* exact fit? unlink. */ if (prev) set_slob(prev, slob_units(prev), next); else sp->free = next; } else { /* fragment */ if (prev) set_slob(prev, slob_units(prev), cur + units); else sp->free = cur + units; set_slob(cur + units, avail - units, next); } sp->units -= units; if (!sp->units) clear_slob_page_free(sp); if((counter_print%print_iteration) == 0){ printk("\nslob_alloc: First Fit is the last available\n"); } return cur; } if (slob_last(cur)){ if((counter_print%print_iteration) == 0){ printk("\nslob_alloc: First Fit: None\n"); } return NULL; } } #endif }
/* * slob_free: entry point into the slob allocator. */ static void slob_free(void *block, int size) { struct page *sp; slob_t *prev, *next, *b = (slob_t *)block; slobidx_t units; unsigned long flags; struct list_head *slob_list; if (unlikely(ZERO_OR_NULL_PTR(block))) return; BUG_ON(!size); sp = virt_to_page(block); units = SLOB_UNITS(size); spin_lock_irqsave(&slob_lock, flags); if (sp->units + units == SLOB_UNITS(PAGE_SIZE)) { /* Go directly to page allocator. Do not pass slob allocator */ if (slob_page_free(sp)) clear_slob_page_free(sp); spin_unlock_irqrestore(&slob_lock, flags); __ClearPageSlab(sp); page_mapcount_reset(sp); slob_free_pages(b, 0); return; } if (!slob_page_free(sp)) { /* This slob page is about to become partially free. Easy! */ sp->units = units; sp->freelist = b; set_slob(b, units, (void *)((unsigned long)(b + SLOB_UNITS(PAGE_SIZE)) & PAGE_MASK)); if (size < SLOB_BREAK1) slob_list = &free_slob_small; else if (size < SLOB_BREAK2) slob_list = &free_slob_medium; else slob_list = &free_slob_large; set_slob_page_free(sp, slob_list); goto out; } /* * Otherwise the page is already partially free, so find reinsertion * point. */ sp->units += units; if (b < (slob_t *)sp->freelist) { if (b + units == sp->freelist) { units += slob_units(sp->freelist); sp->freelist = slob_next(sp->freelist); } set_slob(b, units, sp->freelist); sp->freelist = b; } else { prev = sp->freelist; next = slob_next(prev); while (b > next) { prev = next; next = slob_next(prev); } if (!slob_last(prev) && b + units == next) { units += slob_units(next); set_slob(b, units, slob_next(next)); } else set_slob(b, units, next); if (prev + slob_units(prev) == b) { units = slob_units(b) + slob_units(prev); set_slob(prev, units, slob_next(b)); } else set_slob(prev, slob_units(prev), b); } out: spin_unlock_irqrestore(&slob_lock, flags); }
/* * slob_alloc: entry point into the slob allocator. */ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node) { struct page *sp; struct list_head *prev; struct list_head *slob_list; slob_t *b = NULL; unsigned long flags; if (size < SLOB_BREAK1) slob_list = &free_slob_small; else if (size < SLOB_BREAK2) slob_list = &free_slob_medium; else slob_list = &free_slob_large; spin_lock_irqsave(&slob_lock, flags); /* Iterate through each partially free page, try to find room */ list_for_each_entry(sp, slob_list, lru) { #ifdef CONFIG_NUMA /* * If there's a node specification, search for a partial * page with a matching node id in the freelist. */ if (node != NUMA_NO_NODE && page_to_nid(sp) != node) continue; #endif /* Enough room on this page? */ if (sp->units < SLOB_UNITS(size)) continue; /* Attempt to alloc */ prev = sp->lru.prev; b = slob_page_alloc(sp, size, align); if (!b) continue; /* Improve fragment distribution and reduce our average * search time by starting our next search here. (see * Knuth vol 1, sec 2.5, pg 449) */ if (prev != slob_list->prev && slob_list->next != prev->next) list_move_tail(slob_list, prev->next); break; } spin_unlock_irqrestore(&slob_lock, flags); /* Not enough space: must allocate a new page */ if (!b) { b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node); if (!b) return NULL; sp = virt_to_page(b); __SetPageSlab(sp); spin_lock_irqsave(&slob_lock, flags); sp->units = SLOB_UNITS(PAGE_SIZE); sp->freelist = b; INIT_LIST_HEAD(&sp->lru); set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE)); set_slob_page_free(sp, slob_list); b = slob_page_alloc(sp, size, align); BUG_ON(!b); spin_unlock_irqrestore(&slob_lock, flags); } if (unlikely((gfp & __GFP_ZERO) && b)) memset(b, 0, size); return b; }
static void slob_free(void *block, int size) { struct page *sp; slob_t *prev, *next, *b = (slob_t *)block; slobidx_t units; unsigned long flags; struct list_head *slob_list; if (unlikely(ZERO_OR_NULL_PTR(block))) return; BUG_ON(!size); sp = slob_page(block); units = SLOB_UNITS(size); spin_lock_irqsave(&slob_lock, flags); if (sp->units + units == SLOB_UNITS(PAGE_SIZE)) { if (slob_page_free(sp)) clear_slob_page_free(sp); spin_unlock_irqrestore(&slob_lock, flags); clear_slob_page(sp); free_slob_page(sp); slob_free_pages(b, 0); return; } if (!slob_page_free(sp)) { sp->units = units; sp->freelist = b; set_slob(b, units, (void *)((unsigned long)(b + SLOB_UNITS(PAGE_SIZE)) & PAGE_MASK)); if (size < SLOB_BREAK1) slob_list = &free_slob_small; else if (size < SLOB_BREAK2) slob_list = &free_slob_medium; else slob_list = &free_slob_large; set_slob_page_free(sp, slob_list); goto out; } sp->units += units; if (b < (slob_t *)sp->freelist) { if (b + units == sp->freelist) { units += slob_units(sp->freelist); sp->freelist = slob_next(sp->freelist); } set_slob(b, units, sp->freelist); sp->freelist = b; } else { prev = sp->freelist; next = slob_next(prev); while (b > next) { prev = next; next = slob_next(prev); } if (!slob_last(prev) && b + units == next) { units += slob_units(next); set_slob(b, units, slob_next(next)); } else set_slob(b, units, next); if (prev + slob_units(prev) == b) { units = slob_units(b) + slob_units(prev); set_slob(prev, units, slob_next(b)); } else set_slob(prev, slob_units(prev), b); } out: spin_unlock_irqrestore(&slob_lock, flags); }
static void *slob_alloc(size_t size, gfp_t gfp, int align, int node) { struct page *sp; struct list_head *prev; struct list_head *slob_list; slob_t *b = NULL; unsigned long flags; if (size < SLOB_BREAK1) slob_list = &free_slob_small; else if (size < SLOB_BREAK2) slob_list = &free_slob_medium; else slob_list = &free_slob_large; spin_lock_irqsave(&slob_lock, flags); list_for_each_entry(sp, slob_list, list) { #ifdef CONFIG_NUMA /* * If there's a node specification, search for a partial * page with a matching node id in the freelist. */ if (node != NUMA_NO_NODE && page_to_nid(sp) != node) continue; #endif if (sp->units < SLOB_UNITS(size)) continue; prev = sp->list.prev; b = slob_page_alloc(sp, size, align); if (!b) continue; if (prev != slob_list->prev && slob_list->next != prev->next) list_move_tail(slob_list, prev->next); break; } spin_unlock_irqrestore(&slob_lock, flags); if (!b) { b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node); if (!b) return NULL; sp = slob_page(b); set_slob_page(sp); spin_lock_irqsave(&slob_lock, flags); sp->units = SLOB_UNITS(PAGE_SIZE); sp->freelist = b; INIT_LIST_HEAD(&sp->list); set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE)); set_slob_page_free(sp, slob_list); b = slob_page_alloc(sp, size, align); BUG_ON(!b); spin_unlock_irqrestore(&slob_lock, flags); } if (unlikely((gfp & __GFP_ZERO) && b)) memset(b, 0, size); return b; }
/* * Allocate a slob block within a given slob_page sp. * * We modified the slob_page_alloc function to find the * best block on a given page. We understand that it is * searching through a page's free blocks twice (once * from the helper, and a second time for this function). * We make a best version of all the variables in the * function to keep track of the best block. Then we go * through the entire block list and return the best block. */ static void *slob_page_alloc(struct slob_page *sp, size_t size, int align) { slob_t *prev, *cur, *aligned = NULL; int delta = 0, units = SLOB_UNITS(size); slob_t *best_prev = NULL, *best_cur = NULL, *best_aligned = NULL; int best_delta = 0; slobidx_t best_fit = 0; for (prev = NULL, cur = sp->free; ; prev = cur, cur = slob_next(cur)) { slobidx_t avail = slob_units(cur); if (align) { aligned = (slob_t *)ALIGN((unsigned long)cur, align); delta = aligned - cur; } #ifdef SLOB_BEST_FIT_ALG if (avail >= units + delta && (best_cur == NULL || avail - (units + delta) < best_fit) ) { /* room enough? */ #else if (avail >= units + delta) { /* room enough? */ #endif best_prev = prev; best_cur = cur; best_aligned = aligned; best_delta = delta; best_fit = avail - (units + delta); #ifdef SLOB_BEST_FIT_ALG } if (slob_last(cur)) { if (best_cur != NULL) { #endif slob_t *best_next = NULL; slobidx_t best_avail = slob_units(best_cur); if (best_delta) { /* need to fragment head to align? */ best_next = slob_next(best_cur); set_slob(best_aligned, best_avail - best_delta, best_next); set_slob(best_cur, best_delta, best_aligned); best_prev = best_cur; best_cur = best_aligned; best_avail = slob_units(best_cur); } best_next = slob_next(best_cur); if (best_avail == units) { /* exact fit? unlink. */ if (best_prev) set_slob(best_prev, slob_units(best_prev), best_next); else sp->free = best_next; } else { /* fragment */ if (best_prev) set_slob(best_prev, slob_units(best_prev), best_cur + units); else sp->free = best_cur + units; set_slob(best_cur + units, best_avail - units, best_next); } sp->units -= units; if (!sp->units) clear_slob_page_free(sp); return best_cur; #ifdef SLOB_BEST_FIT_ALG } #else } if (slob_last(cur)) { #endif return NULL; } } } /* * The helper function, slob_page_best_fit_check, goes * through the page's list of blocks and returns a number. * The number will either be -1, 0, or some positive integer. -1 * means that there is no big enough block. 0 means a perfect * fitted block. Any positive integer represents the amount * that will be left over in the block if allocation happens. We * either want this number to be 0 or as low as possible for * best-fit algorithm. */ static int slob_page_best_fit_check(struct slob_page *sp, size_t size, int align) { slob_t *prev, *cur, *aligned = NULL; int delta = 0, units = SLOB_UNITS(size); slob_t *best_cur = NULL; slobidx_t best_fit = 0; for (prev = NULL, cur = sp->free; ; prev = cur, cur = slob_next(cur)) { slobidx_t avail = slob_units(cur); if (align) { aligned = (slob_t *)ALIGN((unsigned long)cur, align); delta = aligned - cur; } if (avail >= units + delta && (best_cur == NULL || avail - (units + delta) < best_fit) ) { /* room enough? */ best_cur = cur; best_fit = avail - (units + delta); if(best_fit == 0) return 0; } if (slob_last(cur)) { if (best_cur != NULL) return best_fit; return -1; } } }