void free_single_page(region r, struct page *p) /* Assumes freepages_lock held */ { #ifndef NMEMDEBUG ASSERT_INUSE(p, r); set_page_region(PAGENB(p), FREEPAGE); #endif // fprintf(stderr, "## free_single_page: p=%p, p->pagecount=%d\n", p, p->pagecount); // assert(p->pagecount > 0); assert(p->pagecount == 1); /* Don't keep too many single pages (a small fraction of total allocated pages) */ if (single_page_count > PAGE_GROUP_SIZE * 2) { // quarl 2006-05-13: we now enforce the invariant that p->pagecount is // correct, so it should already be 1 at this point. // p->pagecount = 1; // XXX coalesce(p); } else { p->next = single_pages; single_pages = p; single_page_count++; } }
void free_single_page(region_t r, struct page *p) /* Assumes freepages_lock held */ { /* pthread_t pt = pthread_self(); */ #ifndef NMEMDEBUG ASSERT_INUSE(p, r); set_page_region(MAPNB(p), PAGENB(p), FREEPAGE); #endif /* ifndef NMEMDEBUG */ list_id = get_next_random_list(MAXLISTS); while (try_lock(&single_pages[list_id % MAXLISTS].lock) == 1) list_id = get_next_random_list(MAXLISTS); p->next = single_pages[list_id].pages; single_pages[list_id].pages = p; single_pages[list_id].page_count++; release_spinlock(&single_pages[list_id].lock); /*acquire_spinlock1( &single_pages[p->list_id].lock );*/ /*p->next = single_pages[p->list_id].pages;*/ /*single_pages[p->list_id].pages = p;*/ /*single_pages[p->list_id].page_count++;*/ /*release_spinlock( &single_pages[p->list_id].lock );*/ /*p->next = single_pages[Hash(pt)%MAXLISTS].pages;*/ /*single_pages[Hash(pt)%MAXLISTS].pages = p;*/ /*single_pages[Hash(pt)%MAXLISTS].page_count++;*/ }