/* Mark the memory range from 'from' (inclusive) to 'to' (exclusive) as belonging to region with id 'rid' */ void set_region_range(void *from, void *to, region r) { pageid first = PAGENB(from), last = PAGENB((pageid)to - 1); while (first <= last) set_page_region(first++, r); }
/* Mark the memory range from 'from' (inclusive) to 'to' (exclusive) * as belonging to region with id 'rid' */ void set_region_range(void *from, void *to, region_t r) { pageid firstp = PAGENB( (pageid)from ), firstm = MAPNB( (pageid)from ), lastp = PAGENB( (pageid)to - 1 ), lastm = MAPNB( (pageid)to - 1 ); while (firstm < lastm || firstp <= lastp) if (firstp >= RMAXPAGE) { firstm++; firstp -= RMAXPAGE; } set_page_region(firstm, firstp++, r); }
void free_single_page(region r, struct page *p) /* Assumes freepages_lock held */ { #ifndef NMEMDEBUG ASSERT_INUSE(p, r); set_page_region(PAGENB(p), FREEPAGE); #endif // fprintf(stderr, "## free_single_page: p=%p, p->pagecount=%d\n", p, p->pagecount); // assert(p->pagecount > 0); assert(p->pagecount == 1); /* Don't keep too many single pages (a small fraction of total allocated pages) */ if (single_page_count > PAGE_GROUP_SIZE * 2) { // quarl 2006-05-13: we now enforce the invariant that p->pagecount is // correct, so it should already be 1 at this point. // p->pagecount = 1; // XXX coalesce(p); } else { p->next = single_pages; single_pages = p; single_page_count++; } }
void set_region(struct page *p, int npages, region r) { pageid pnb = PAGENB(p); while (npages-- > 0) set_page_region(pnb++, r); }
void free_single_page(region_t r, struct page *p) /* Assumes freepages_lock held */ { /* pthread_t pt = pthread_self(); */ #ifndef NMEMDEBUG ASSERT_INUSE(p, r); set_page_region(MAPNB(p), PAGENB(p), FREEPAGE); #endif /* ifndef NMEMDEBUG */ list_id = get_next_random_list(MAXLISTS); while (try_lock(&single_pages[list_id % MAXLISTS].lock) == 1) list_id = get_next_random_list(MAXLISTS); p->next = single_pages[list_id].pages; single_pages[list_id].pages = p; single_pages[list_id].page_count++; release_spinlock(&single_pages[list_id].lock); /*acquire_spinlock1( &single_pages[p->list_id].lock );*/ /*p->next = single_pages[p->list_id].pages;*/ /*single_pages[p->list_id].pages = p;*/ /*single_pages[p->list_id].page_count++;*/ /*release_spinlock( &single_pages[p->list_id].lock );*/ /*p->next = single_pages[Hash(pt)%MAXLISTS].pages;*/ /*single_pages[Hash(pt)%MAXLISTS].pages = p;*/ /*single_pages[Hash(pt)%MAXLISTS].page_count++;*/ }
struct page* alloc_split(struct page *split, int n, struct page *next) { /* Assumes freepages_lock held */ #ifndef NMEMDEBUG /* These pages had better be free */ pageid i, pnb = PAGENB(split); pageid mnb = MAPNB(split); assert(split->pagecount >= n); for (i = pnb; i < pnb + split->pagecount; i++) assert(page_region(mnb, i) == FREEPAGE); #endif /* ifndef NMEMDEBUG */ if (split->pagecount > n) { /* Keep first part of block */ split->pagecount -= n; /* Return latter part of block */ split = (struct page*)( (char*)split + (split->pagecount << RPAGELOG) ); /* Update the by address list */ } split->next = next; split->pagecount = n; split->free = 0; /*split->list_id = Hash(pthread_self())%MAXLISTS;*/ split->list_id = list_id % MAXLISTS; return split; }
void set_region(struct page *p, int npages, region_t r) { pageid pnb = PAGENB(p); pageid mnb = MAPNB(p); while (npages-- > 0) set_page_region(mnb, pnb++, r); }
void free_pages(region r, struct page *p) /* Assumes freepages_lock held */ { #ifndef NMEMDEBUG pageid i, pnb = PAGENB(p); for (i = pnb; i < pnb + p->pagecount; i++) { assert(page_region(i) == r); set_page_region(i, FREEPAGE); } #endif coalesce(p); }
struct page *alloc_split(struct page *split, int n, struct page *next) /* Assumes freepages_lock held */ { // fprintf(stderr, "## alloc_split: split=%p, n=%d, next=%p, split->pagecount=%d\n", split, n, next, split->pagecount); #ifndef NMEMDEBUG /* These pages had better be free */ pageid i, pnb = PAGENB(split); assert(n > 0); assert(split->pagecount >= n); for (i = pnb; i < pnb + split->pagecount; i++) assert(page_region(i) == FREEPAGE); #endif if (split->pagecount > n) { struct page *splitoff; /* Keep first part of block */ split->pagecount -= n; assert(split->pagecount > 0); /* Return latter part of block */ splitoff = split; split = (struct page *)((char *)split + (split->pagecount << RPAGELOG)); VALGRIND_MAKE_WRITABLE(split, sizeof(struct page)); memset(split, 0, sizeof(struct page)); /* Update the by-address list */ insertbefore_address(split, splitoff->next_address); } else { /* remove split from list */ unlink_page(&unused_pages, split); } split->next = next; split->pagecount = n; assert(split->pagecount > 0); split->free = 0; return split; }
void free_pages(region_t r, struct page *p) /* Assumes freepages_lock held */ { #ifndef NMEMDEBUG pageid i; pageid pnb = PAGENB(p); pageid mnb = MAPNB(p); for (i = pnb; i < pnb + p->pagecount; i++) { assert(page_region(mnb, i) == r); set_page_region(mnb, i, FREEPAGE); } #endif /* ifndef NMEMDEBUG */ list_id = get_next_random_list(MAXLISTS); while (try_lock(&single_pages[list_id % MAXLISTS].lock) == 1) list_id = get_next_random_list(MAXLISTS); p->next = single_pages[list_id].pages; single_pages[list_id].pages = p; single_pages[list_id].page_count += p->pagecount; release_spinlock(&single_pages[list_id].lock); /*acquire_spinlock2( &single_pages[p->list_id].lock );*/ /*p->next = single_pages[p->list_id].pages;*/ /*single_pages[p->list_id].pages = p;*/ /*single_pages[p->list_id].page_count += p->pagecount;*/ /*release_spinlock( &single_pages[p->list_id].lock );*/ /*p->next = single_pages[pt%MAXLISTS].pages;*/ /*single_pages[pt%MAXLISTS].pages = p;*/ /*single_pages[pt%MAXLISTS].page_count++;*/ /*for( i = 0; i < p->pagecount; i++){*/ /*add_single_pages(p,Hash(pt)%MAXLISTS);*/ /*p=p->next;*/ /*}*/ }