void scavenge_single_pages(int n) { /* Add n pages to the single_pages list */ struct page *scan, *best; __rcintptr bestn; /* Take any group in unused_pages that is <= n or < K. Remember smallest entry > n too. This is sortof equivalent to a best fit where we allow partial allocations to make up a whole */ best = NULL; bestn = (__rcintptr)1 << (sizeof(__rcintptr) * CHAR_BIT - 2); scan = unused_pages; while (scan) { /* The pages < K can't be used for anything but single pages so we might as well grab them even if they are a little too big */ if (scan->pagecount <= n || scan->pagecount < K) { struct page *adding = scan; scan = scan->next; n -= adding->pagecount; unlink_page(&unused_pages, adding); add_single_pages(adding); assert(single_pages->pagecount > 0); if (n <= 0) return; } else { if (scan->pagecount < bestn) { bestn = scan->pagecount; best = scan; } scan = scan->next; } } /* Still not enough. Split the best block if there is one, allocate new pages otherwise */ if (!best) { add_single_pages(alloc_new(n, NULL)); } else if (best->pagecount - n < K) { unlink_page(&unused_pages, best); add_single_pages(best); assert(single_pages->pagecount > 0); } else { add_single_pages(alloc_split(best, n, NULL)); assert(single_pages->pagecount > 0); } }
struct page* alloc_single_page(struct page *next) { struct page *p = NULL; /* pthread_t pt = pthread_self(); */ list_id = get_next_random_list(MAXLISTS); while (try_lock(&single_pages[list_id % MAXLISTS].lock) == 1) list_id = get_next_random_list(MAXLISTS); /*if( single_pages[Hash(pt)%MAXLISTS].page_count == 0 ){*/ if (single_pages[list_id % MAXLISTS].page_count == 0) p = alloc_new(PAGE_GROUP_SIZE, NULL); add_single_pages(p); /*p = single_pages[Hash(pt)%MAXLISTS].pages;*/ p = single_pages[list_id % MAXLISTS].pages; /*single_pages[Hash(pt)%MAXLISTS].pages = p->next;*/ single_pages[list_id % MAXLISTS].pages = p->next; p->next = next; /*single_pages[Hash(pt)%MAXLISTS].page_count--;*/ single_pages[list_id % MAXLISTS].page_count--; /*release_spinlock( &single_pages[Hash(pt)%MAXLISTS].lock );*/ release_spinlock(&single_pages[list_id % MAXLISTS].lock); /*list_id++;*/ return p; }
struct page* alloc_pages(int n, struct page *next) { /* pthread_t pt = pthread_self(); */ struct page *ret_val, *p = NULL; assert(n >= K); list_id = get_next_random_list(MAXLISTS); while (try_lock(&single_pages[list_id % MAXLISTS].lock) == 1) list_id = get_next_random_list(MAXLISTS); /*if( n > single_pages[Hash(pt)%MAXLISTS].page_count ){*/ if (n > single_pages[list_id % MAXLISTS].page_count) p = alloc_new(n + PAGE_GROUP_SIZE, NULL); add_single_pages(p); /*ret_val = single_pages[Hash(pt)%MAXLISTS].pages;*/ /*single_pages[Hash(pt)%MAXLISTS].pages =*/ /*single_pages[Hash(pt)%MAXLISTS].pages->next;*/ ret_val = single_pages[list_id % MAXLISTS].pages; single_pages[list_id % MAXLISTS].pages = single_pages[list_id % MAXLISTS].pages->next; ret_val->next = next; /*single_pages[Hash(pt)%MAXLISTS].page_count -= n;*/ single_pages[list_id % MAXLISTS].page_count -= n; /*release_spinlock( &single_pages[Hash(pt)%MAXLISTS].lock );*/ release_spinlock(&single_pages[list_id % MAXLISTS].lock); /*list_id++;*/ return ret_val; }