void scavenge_single_pages(int n) { /* Add n pages to the single_pages list */ struct page *scan, *best; __rcintptr bestn; /* Take any group in unused_pages that is <= n or < K. Remember smallest entry > n too. This is sortof equivalent to a best fit where we allow partial allocations to make up a whole */ best = NULL; bestn = (__rcintptr)1 << (sizeof(__rcintptr) * CHAR_BIT - 2); scan = unused_pages; while (scan) { /* The pages < K can't be used for anything but single pages so we might as well grab them even if they are a little too big */ if (scan->pagecount <= n || scan->pagecount < K) { struct page *adding = scan; scan = scan->next; n -= adding->pagecount; unlink_page(&unused_pages, adding); add_single_pages(adding); assert(single_pages->pagecount > 0); if (n <= 0) return; } else { if (scan->pagecount < bestn) { bestn = scan->pagecount; best = scan; } scan = scan->next; } } /* Still not enough. Split the best block if there is one, allocate new pages otherwise */ if (!best) { add_single_pages(alloc_new(n, NULL)); } else if (best->pagecount - n < K) { unlink_page(&unused_pages, best); add_single_pages(best); assert(single_pages->pagecount > 0); } else { add_single_pages(alloc_split(best, n, NULL)); assert(single_pages->pagecount > 0); } }
void unlink_partial_page(page_info *info) { int bucket = info->free_nr / PARTIAL_PAGE_BUCKET; unlink_page(info, partial_pages[bucket]); if (first_partial_page_index == bucket && partial_pages[bucket] == 0) { --partial_bucket_count; if (partial_bucket_count == 0) { first_partial_page_index = 0; return ; } for (int i = bucket + 1; i < partial_pages_bucket_num; ++i) { if (partial_pages[i] != 0) { first_partial_page_index = i; break; } } } else if ( partial_pages[bucket] == 0) { --partial_bucket_count; } }
static void coalesce(struct page *p) { struct page *prev = p->prev_address, *next; p->free = 1; assert(p->pagecount > 0); if (p->pagecount) { // mark data part of page non-accessible // VALGRIND_MAKE_NOACCESS(p + sizeof(struct page), // (p->pagecount << RPAGELOG) - sizeof(struct page)); } /* Coalesce with predecessor ? */ if (prev->free && (char *)prev + (prev->pagecount << RPAGELOG) == (char *)p) { // fprintf(stderr, // "## coalesce p=%p (pagecount=%d) with prev=%p (pagecount=%d)\n", // p, p->pagecount, prev, prev->pagecount); assert(prev->pagecount > 0); prev->pagecount += p->pagecount; unlink_address(p); p = prev; } else /* No, add to free pages list */ { // fprintf(stderr, // "## coalesce p=%p (pagecount=%d) => unused_pages\n", // p, p->pagecount); addfront(&unused_pages, p); } next = p->next_address; /* Coalesce with successor ? */ if (next->free && (char *)p + (p->pagecount << RPAGELOG) == (char *)next) { // fprintf(stderr, // "## coalesce p=%p (pagecount=%d) with next=%p (pagecount=%d)\n", // p, p->pagecount, next, next->pagecount); unlink_page(&unused_pages, next); p->pagecount += next->pagecount; unlink_address(next); } }
struct page *alloc_split(struct page *split, int n, struct page *next) /* Assumes freepages_lock held */ { // fprintf(stderr, "## alloc_split: split=%p, n=%d, next=%p, split->pagecount=%d\n", split, n, next, split->pagecount); #ifndef NMEMDEBUG /* These pages had better be free */ pageid i, pnb = PAGENB(split); assert(n > 0); assert(split->pagecount >= n); for (i = pnb; i < pnb + split->pagecount; i++) assert(page_region(i) == FREEPAGE); #endif if (split->pagecount > n) { struct page *splitoff; /* Keep first part of block */ split->pagecount -= n; assert(split->pagecount > 0); /* Return latter part of block */ splitoff = split; split = (struct page *)((char *)split + (split->pagecount << RPAGELOG)); VALGRIND_MAKE_WRITABLE(split, sizeof(struct page)); memset(split, 0, sizeof(struct page)); /* Update the by-address list */ insertbefore_address(split, splitoff->next_address); } else { /* remove split from list */ unlink_page(&unused_pages, split); } split->next = next; split->pagecount = n; assert(split->pagecount > 0); split->free = 0; return split; }