void free_single_page(region r, struct page *p) /* Assumes freepages_lock held */ { #ifndef NMEMDEBUG ASSERT_INUSE(p, r); set_page_region(PAGENB(p), FREEPAGE); #endif // fprintf(stderr, "## free_single_page: p=%p, p->pagecount=%d\n", p, p->pagecount); // assert(p->pagecount > 0); assert(p->pagecount == 1); /* Don't keep too many single pages (a small fraction of total allocated pages) */ if (single_page_count > PAGE_GROUP_SIZE * 2) { // quarl 2006-05-13: we now enforce the invariant that p->pagecount is // correct, so it should already be 1 at this point. // p->pagecount = 1; // XXX coalesce(p); } else { p->next = single_pages; single_pages = p; single_page_count++; } }
/* Mark the memory range from 'from' (inclusive) to 'to' (exclusive) as belonging to region with id 'rid' */ void set_region_range(void *from, void *to, region r) { pageid first = PAGENB(from), last = PAGENB((pageid)to - 1); while (first <= last) set_page_region(first++, r); }
void set_region(struct page *p, int npages, region r) { pageid pnb = PAGENB(p); while (npages-- > 0) set_page_region(pnb++, r); }
void free_single_page(region_t r, struct page *p) /* Assumes freepages_lock held */ { /* pthread_t pt = pthread_self(); */ #ifndef NMEMDEBUG ASSERT_INUSE(p, r); set_page_region(MAPNB(p), PAGENB(p), FREEPAGE); #endif /* ifndef NMEMDEBUG */ list_id = get_next_random_list(MAXLISTS); while (try_lock(&single_pages[list_id % MAXLISTS].lock) == 1) list_id = get_next_random_list(MAXLISTS); p->next = single_pages[list_id].pages; single_pages[list_id].pages = p; single_pages[list_id].page_count++; release_spinlock(&single_pages[list_id].lock); /*acquire_spinlock1( &single_pages[p->list_id].lock );*/ /*p->next = single_pages[p->list_id].pages;*/ /*single_pages[p->list_id].pages = p;*/ /*single_pages[p->list_id].page_count++;*/ /*release_spinlock( &single_pages[p->list_id].lock );*/ /*p->next = single_pages[Hash(pt)%MAXLISTS].pages;*/ /*single_pages[Hash(pt)%MAXLISTS].pages = p;*/ /*single_pages[Hash(pt)%MAXLISTS].page_count++;*/ }
void set_region(struct page *p, int npages, region_t r) { pageid pnb = PAGENB(p); pageid mnb = MAPNB(p); while (npages-- > 0) set_page_region(mnb, pnb++, r); }
/* Mark the memory range from 'from' (inclusive) to 'to' (exclusive) * as belonging to region with id 'rid' */ void set_region_range(void *from, void *to, region_t r) { pageid firstp = PAGENB( (pageid)from ), firstm = MAPNB( (pageid)from ), lastp = PAGENB( (pageid)to - 1 ), lastm = MAPNB( (pageid)to - 1 ); while (firstm < lastm || firstp <= lastp) if (firstp >= RMAXPAGE) { firstm++; firstp -= RMAXPAGE; } set_page_region(firstm, firstp++, r); }
void free_pages(region r, struct page *p) /* Assumes freepages_lock held */ { #ifndef NMEMDEBUG pageid i, pnb = PAGENB(p); for (i = pnb; i < pnb + p->pagecount; i++) { assert(page_region(i) == r); set_page_region(i, FREEPAGE); } #endif coalesce(p); }
void free_pages(region_t r, struct page *p) /* Assumes freepages_lock held */ { #ifndef NMEMDEBUG pageid i; pageid pnb = PAGENB(p); pageid mnb = MAPNB(p); for (i = pnb; i < pnb + p->pagecount; i++) { assert(page_region(mnb, i) == r); set_page_region(mnb, i, FREEPAGE); } #endif /* ifndef NMEMDEBUG */ list_id = get_next_random_list(MAXLISTS); while (try_lock(&single_pages[list_id % MAXLISTS].lock) == 1) list_id = get_next_random_list(MAXLISTS); p->next = single_pages[list_id].pages; single_pages[list_id].pages = p; single_pages[list_id].page_count += p->pagecount; release_spinlock(&single_pages[list_id].lock); /*acquire_spinlock2( &single_pages[p->list_id].lock );*/ /*p->next = single_pages[p->list_id].pages;*/ /*single_pages[p->list_id].pages = p;*/ /*single_pages[p->list_id].page_count += p->pagecount;*/ /*release_spinlock( &single_pages[p->list_id].lock );*/ /*p->next = single_pages[pt%MAXLISTS].pages;*/ /*single_pages[pt%MAXLISTS].pages = p;*/ /*single_pages[pt%MAXLISTS].page_count++;*/ /*for( i = 0; i < p->pagecount; i++){*/ /*add_single_pages(p,Hash(pt)%MAXLISTS);*/ /*p=p->next;*/ /*}*/ }