/* completion handler for BIO writes */ static int bi_write_complete(struct bio *bio, unsigned int bytes_done, int error) { const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; if (bio->bi_size) return 1; if(!uptodate) err("bi_write_complete: not uptodate\n"); do { struct page *page = bvec->bv_page; DEBUG(3, "Cleaning up page %ld\n", page->index); if (--bvec >= bio->bi_io_vec) prefetchw(&bvec->bv_page->flags); if (uptodate) { SetPageUptodate(page); } else { ClearPageUptodate(page); SetPageError(page); } ClearPageDirty(page); unlock_page(page); page_cache_release(page); } while (bvec >= bio->bi_io_vec); complete((struct completion*)bio->bi_private); return 0; }
static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector, int silent) { struct super_block *sb = sdp->sd_vfs; struct gfs2_sb *p; struct page *page; struct bio *bio; page = alloc_page(GFP_NOFS); if (unlikely(!page)) return -ENOBUFS; ClearPageUptodate(page); ClearPageDirty(page); lock_page(page); bio = bio_alloc(GFP_NOFS, 1); bio->bi_sector = sector * (sb->s_blocksize >> 9); bio->bi_bdev = sb->s_bdev; bio_add_page(bio, page, PAGE_SIZE, 0); bio->bi_end_io = end_bio_io_page; bio->bi_private = page; submit_bio(READ_SYNC | REQ_META, bio); wait_on_page_locked(page); bio_put(bio); if (!PageUptodate(page)) { __free_page(page); return -EIO; } p = kmap(page); gfs2_sb_in(sdp, p); kunmap(page); __free_page(page); return gfs2_check_sb(sdp, silent); }
/* * This must be called only on pages that have * been verified to be in the swap cache. */ void __delete_from_swap_cache(struct page *page) { if (!PageLocked(page)) BUG(); if (!PageSwapCache(page)) BUG(); ClearPageDirty(page); __remove_inode_page(page); INC_CACHE_INFO(del_total); }
/* * Strange swizzling function for shmem_getpage (and shmem_unuse) */ int move_from_swap_cache(struct page *page, unsigned long index, struct address_space *mapping) { int err = add_to_page_cache(page, mapping, index, GFP_ATOMIC); if (!err) { delete_from_swap_cache(page); /* shift page from clean_pages to dirty_pages list */ ClearPageDirty(page); set_page_dirty(page); } return err; }
static inline void remove_from_swap_cache(struct page *page) { struct address_space *mapping = page->mapping; if (mapping != &swapper_space) BUG(); if (!PageSwapCache(page) || !PageLocked(page)) PAGE_BUG(page); PageClearSwapCache(page); ClearPageDirty(page); __remove_inode_page(page); }
/* * This will never put the page into the free list, the caller has * a reference on the page. */ void delete_from_swap_cache_nolock(struct page *page) { if (!PageLocked(page)) BUG(); if (block_flushpage(page, 0)) lru_cache_del(page); spin_lock(&pagecache_lock); ClearPageDirty(page); __delete_from_swap_cache(page); spin_unlock(&pagecache_lock); page_cache_release(page); }
static int replayfs_commit_write(struct file *file, struct page *page, unsigned from, unsigned to) { struct inode *inode; loff_t pos; pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to; inode = page->mapping->host; ClearPageDirty(page); SetPageUptodate(page); kunmap(page); if (pos > inode->i_size) { i_size_write(inode, pos); mark_inode_dirty(inode); } return 0; }
// page_launder - try to move page to swap_active_list OR swap_inactive_list, // - and call swap_fs_write to swap out pages in swap_inactive_list int page_launder(void) { size_t maxscan = nr_inactive_pages, free_count = 0; list_entry_t *list = &(inactive_list.swap_list), *le = list_next(list); while (maxscan -- > 0 && le != list) { struct Page *page = le2page(le, swap_link); le = list_next(le); if (!(PageSwap(page) && !PageActive(page))) { panic("inactive: wrong swap list.\n"); } swap_list_del(page); if (page_ref(page) != 0) { swap_active_list_add(page); continue ; } swap_entry_t entry = page->index; if (!try_free_swap_entry(entry)) { if (PageDirty(page)) { ClearPageDirty(page); swap_duplicate(entry); if (swapfs_write(entry, page) != 0) { SetPageDirty(page); } mem_map[swap_offset(entry)] --; if (page_ref(page) != 0) { swap_active_list_add(page); continue ; } if (PageDirty(page)) { swap_inactive_list_add(page); continue ; } try_free_swap_entry(entry); } } free_count ++; swap_free_page(page); } return free_count; }
/* * @brief This will be called when the dirty pages are flushed. */ int yramfs_file_writepage(struct page* page, struct writeback_control* wbc) { struct inode *pNode = page->mapping->host; yramfs_file_info_t *pInfo = NULL; DBG_PRINT("write page %p, %ld", page_address(page), pNode->i_ino); pInfo = (yramfs_file_info_t *)pNode->i_private; if (NULL == pInfo) { return EINVAL; } if (pInfo->pContent == NULL) { pInfo->pContent = kmalloc(sizeof(PAGE_SIZE), GFP_KERNEL); pInfo->contentSize = PAGE_SIZE; } memcpy(pInfo->pContent, page_address(page), PAGE_SIZE); ClearPageDirty(page); if(PageLocked(page)) unlock_page(page); return 0; }
int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len, unsigned copied, struct page *page) { int ret, no_expand; void *kaddr; struct ext4_iloc iloc; if (unlikely(copied < len)) { if (!PageUptodate(page)) { copied = 0; goto out; } } ret = ext4_get_inode_loc(inode, &iloc); if (ret) { ext4_std_error(inode->i_sb, ret); copied = 0; goto out; } ext4_write_lock_xattr(inode, &no_expand); BUG_ON(!ext4_has_inline_data(inode)); kaddr = kmap_atomic(page); ext4_write_inline_data(inode, &iloc, kaddr, pos, len); kunmap_atomic(kaddr); SetPageUptodate(page); /* clear page dirty so that writepages wouldn't work for us. */ ClearPageDirty(page); ext4_write_unlock_xattr(inode, &no_expand); brelse(iloc.bh); mark_inode_dirty(inode); out: return copied; }
static int shrink_cache(int nr_pages, zone_t * classzone, unsigned int gfp_mask, int priority) { struct list_head * entry; int max_scan = nr_inactive_pages / priority; int max_mapped = min((nr_pages << (10 - priority)), max_scan / 10); spin_lock(&pagemap_lru_lock); while (--max_scan >= 0 && (entry = inactive_list.prev) != &inactive_list) { struct page * page; /* lock depth is 1 or 2 */ if (unlikely(current->need_resched)) { spin_unlock(&pagemap_lru_lock); __set_current_state(TASK_RUNNING); schedule(); spin_lock(&pagemap_lru_lock); continue; } page = list_entry(entry, struct page, lru); if (unlikely(!PageLRU(page))) BUG(); if (unlikely(PageActive(page))) BUG(); list_del(entry); list_add(entry, &inactive_list); /* * Zero page counts can happen because we unlink the pages * _after_ decrementing the usage count.. */ if (unlikely(!page_count(page))) continue; if (!memclass(page->zone, classzone)) continue; /* Racy check to avoid trylocking when not worthwhile */ if (!page->buffers && (page_count(page) != 1 || !page->mapping)) goto page_mapped; /* * The page is locked. IO in progress? * Move it to the back of the list. */ if (unlikely(TryLockPage(page))) { if (PageLaunder(page) && (gfp_mask & __GFP_FS)) { page_cache_get(page); spin_unlock(&pagemap_lru_lock); wait_on_page(page); page_cache_release(page); spin_lock(&pagemap_lru_lock); } continue; } if ((PageDirty(page) || DelallocPage(page)) && is_page_cache_freeable(page) && page->mapping) { /* * It is not critical here to write it only if * the page is unmapped beause any direct writer * like O_DIRECT would set the PG_dirty bitflag * on the phisical page after having successfully * pinned it and after the I/O to the page is finished, * so the direct writes to the page cannot get lost. */ int (*writepage)(struct page *); writepage = page->mapping->a_ops->writepage; if ((gfp_mask & __GFP_FS) && writepage) { ClearPageDirty(page); SetPageLaunder(page); page_cache_get(page); spin_unlock(&pagemap_lru_lock); writepage(page); page_cache_release(page); spin_lock(&pagemap_lru_lock); continue; } } /* * If the page has buffers, try to free the buffer mappings * associated with this page. If we succeed we try to free * the page as well. */ if (page->buffers) { spin_unlock(&pagemap_lru_lock); /* avoid to free a locked page */ page_cache_get(page); if (try_to_release_page(page, gfp_mask)) { if (!page->mapping) { /* * We must not allow an anon page * with no buffers to be visible on * the LRU, so we unlock the page after * taking the lru lock */ spin_lock(&pagemap_lru_lock); UnlockPage(page); __lru_cache_del(page); /* effectively free the page here */ page_cache_release(page); if (--nr_pages) continue; break; } else { /* * The page is still in pagecache so undo the stuff * before the try_to_release_page since we've not * finished and we can now try the next step. */ page_cache_release(page); spin_lock(&pagemap_lru_lock); } } else { /* failed to drop the buffers so stop here */ UnlockPage(page); page_cache_release(page); spin_lock(&pagemap_lru_lock); continue; } } spin_lock(&pagecache_lock); /* * this is the non-racy check for busy page. */ if (!page->mapping || !is_page_cache_freeable(page)) { spin_unlock(&pagecache_lock); UnlockPage(page); page_mapped: if (--max_mapped >= 0) continue; /* * Alert! We've found too many mapped pages on the * inactive list, so we start swapping out now! */ spin_unlock(&pagemap_lru_lock); swap_out(priority, gfp_mask, classzone); return nr_pages; } /* * It is critical to check PageDirty _after_ we made sure * the page is freeable* so not in use by anybody. */ if (PageDirty(page)) { spin_unlock(&pagecache_lock); UnlockPage(page); continue; } /* point of no return */ if (likely(!PageSwapCache(page))) { __remove_inode_page(page); spin_unlock(&pagecache_lock); } else { swp_entry_t swap; swap.val = page->index; __delete_from_swap_cache(page); spin_unlock(&pagecache_lock); swap_free(swap); } __lru_cache_del(page); UnlockPage(page); /* effectively free the page here */ page_cache_release(page); if (--nr_pages) continue; break; } spin_unlock(&pagemap_lru_lock); return nr_pages; }
static void __free_pages_ok (struct page *page, unsigned int order) { unsigned long index, page_idx, mask, flags; free_area_t *area; struct page *base; zone_t *zone; /* * Yes, think what happens when other parts of the kernel take * a reference to a page in order to pin it for io. -ben */ if (PageLRU(page)) { if (unlikely(in_interrupt())) BUG(); lru_cache_del(page); } if (page->buffers) BUG(); if (page->mapping) BUG(); if (!VALID_PAGE(page)) BUG(); if (PageLocked(page)) BUG(); if (PageActive(page)) BUG(); ClearPageReferenced(page); ClearPageDirty(page); if (current->flags & PF_FREE_PAGES) goto local_freelist; back_local_freelist: zone = page_zone(page); mask = (~0UL) << order; base = zone->zone_mem_map; page_idx = page - base; if (page_idx & ~mask) BUG(); index = page_idx >> (1 + order); area = zone->free_area + order; spin_lock_irqsave(&zone->lock, flags); zone->free_pages -= mask; while (mask + (1 << (MAX_ORDER-1))) { struct page *buddy1, *buddy2; if (area >= zone->free_area + MAX_ORDER) BUG(); if (!__test_and_change_bit(index, area->map)) /* * the buddy page is still allocated. */ break; /* * Move the buddy up one level. * This code is taking advantage of the identity: * -mask = 1+~mask */ buddy1 = base + (page_idx ^ -mask); buddy2 = base + page_idx; if (BAD_RANGE(zone,buddy1)) BUG(); if (BAD_RANGE(zone,buddy2)) BUG(); list_del(&buddy1->list); mask <<= 1; area++; index >>= 1; page_idx &= mask; } list_add(&(base + page_idx)->list, &area->free_list); spin_unlock_irqrestore(&zone->lock, flags); return; local_freelist: if (current->nr_local_pages) goto back_local_freelist; if (in_interrupt()) goto back_local_freelist; list_add(&page->list, ¤t->local_pages); page->index = order; current->nr_local_pages++; }
int page_launder(int gfp_mask, int sync) { int launder_loop, maxscan, cleaned_pages, maxlaunder; int can_get_io_locks; struct list_head * page_lru; struct page * page; /* * We can only grab the IO locks (eg. for flushing dirty * buffers to disk) if __GFP_IO is set. */ can_get_io_locks = gfp_mask & __GFP_IO; launder_loop = 0; maxlaunder = 0; cleaned_pages = 0; dirty_page_rescan: spin_lock(&pagemap_lru_lock); maxscan = nr_inactive_dirty_pages; while ((page_lru = inactive_dirty_list.prev) != &inactive_dirty_list && maxscan-- > 0) { page = list_entry(page_lru, struct page, lru); /* Wrong page on list?! (list corruption, should not happen) */ if (!PageInactiveDirty(page)) { printk("VM: page_launder, wrong page on list.\n"); list_del(page_lru); nr_inactive_dirty_pages--; page->zone->inactive_dirty_pages--; continue; } /* Page is or was in use? Move it to the active list. */ if (PageTestandClearReferenced(page) || page->age > 0 || (!page->buffers && page_count(page) > 1) || page_ramdisk(page)) { del_page_from_inactive_dirty_list(page); add_page_to_active_list(page); continue; } /* * The page is locked. IO in progress? * Move it to the back of the list. */ if (TryLockPage(page)) { list_del(page_lru); list_add(page_lru, &inactive_dirty_list); continue; } /* * Dirty swap-cache page? Write it out if * last copy.. */ if (PageDirty(page)) { int (*writepage)(struct page *) = page->mapping->a_ops->writepage; int result; if (!writepage) goto page_active; /* First time through? Move it to the back of the list */ if (!launder_loop) { list_del(page_lru); list_add(page_lru, &inactive_dirty_list); UnlockPage(page); continue; } /* OK, do a physical asynchronous write to swap. */ ClearPageDirty(page); page_cache_get(page); spin_unlock(&pagemap_lru_lock); result = writepage(page); page_cache_release(page); /* And re-start the thing.. */ spin_lock(&pagemap_lru_lock); if (result != 1) continue; /* writepage refused to do anything */ set_page_dirty(page); goto page_active; } /* * If the page has buffers, try to free the buffer mappings * associated with this page. If we succeed we either free * the page (in case it was a buffercache only page) or we * move the page to the inactive_clean list. * * On the first round, we should free all previously cleaned * buffer pages */ if (page->buffers) { int wait, clearedbuf; int freed_page = 0; /* * Since we might be doing disk IO, we have to * drop the spinlock and take an extra reference * on the page so it doesn't go away from under us. */ del_page_from_inactive_dirty_list(page); page_cache_get(page); spin_unlock(&pagemap_lru_lock); /* Will we do (asynchronous) IO? */ if (launder_loop && maxlaunder == 0 && sync) wait = 2; /* Synchrounous IO */ else if (launder_loop && maxlaunder-- > 0) wait = 1; /* Async IO */ else wait = 0; /* No IO */ /* Try to free the page buffers. */ clearedbuf = try_to_free_buffers(page, wait); /* * Re-take the spinlock. Note that we cannot * unlock the page yet since we're still * accessing the page_struct here... */ spin_lock(&pagemap_lru_lock); /* The buffers were not freed. */ if (!clearedbuf) { add_page_to_inactive_dirty_list(page); /* The page was only in the buffer cache. */ } else if (!page->mapping) { atomic_dec(&buffermem_pages); freed_page = 1; cleaned_pages++; /* The page has more users besides the cache and us. */ } else if (page_count(page) > 2) { add_page_to_active_list(page); /* OK, we "created" a freeable page. */ } else /* page->mapping && page_count(page) == 2 */ { add_page_to_inactive_clean_list(page); cleaned_pages++; } /* * Unlock the page and drop the extra reference. * We can only do it here because we ar accessing * the page struct above. */ UnlockPage(page); page_cache_release(page); /* * If we're freeing buffer cache pages, stop when * we've got enough free memory. */ if (freed_page && !free_shortage()) break; continue; } else if (page->mapping && !PageDirty(page)) { /* * If a page had an extra reference in * deactivate_page(), we will find it here. * Now the page is really freeable, so we * move it to the inactive_clean list. */ del_page_from_inactive_dirty_list(page); add_page_to_inactive_clean_list(page); UnlockPage(page); cleaned_pages++; } else { page_active: /* * OK, we don't know what to do with the page. * It's no use keeping it here, so we move it to * the active list. */ del_page_from_inactive_dirty_list(page); add_page_to_active_list(page); UnlockPage(page); } } spin_unlock(&pagemap_lru_lock); /* * If we don't have enough free pages, we loop back once * to queue the dirty pages for writeout. When we were called * by a user process (that /needs/ a free page) and we didn't * free anything yet, we wait synchronously on the writeout of * MAX_SYNC_LAUNDER pages. * * We also wake up bdflush, since bdflush should, under most * loads, flush out the dirty pages before we have to wait on * IO. */ if (can_get_io_locks && !launder_loop && free_shortage()) { launder_loop = 1; /* If we cleaned pages, never do synchronous IO. */ if (cleaned_pages) sync = 0; /* We only do a few "out of order" flushes. */ maxlaunder = MAX_LAUNDER; /* Kflushd takes care of the rest. */ wakeup_bdflush(0); goto dirty_page_rescan; } /* Return the number of pages moved to the inactive_clean list. */ return cleaned_pages; }
static int shrink_cache(int nr_pages, zone_t * classzone, unsigned int gfp_mask, int * failed_swapout) { struct list_head * entry; int max_scan = (classzone->nr_inactive_pages + classzone->nr_active_pages) / vm_cache_scan_ratio; int max_mapped = vm_mapped_ratio * nr_pages; while (max_scan && classzone->nr_inactive_pages && (entry = inactive_list.prev) != &inactive_list) { struct page * page; if (unlikely(current->need_resched)) { spin_unlock(&pagemap_lru_lock); __set_current_state(TASK_RUNNING); schedule(); spin_lock(&pagemap_lru_lock); continue; } page = list_entry(entry, struct page, lru); BUG_ON(!PageLRU(page)); BUG_ON(PageActive(page)); list_del(entry); list_add(entry, &inactive_list); /* * Zero page counts can happen because we unlink the pages * _after_ decrementing the usage count.. */ if (unlikely(!page_count(page))) continue; if (!memclass(page_zone(page), classzone)) continue; max_scan--; /* Racy check to avoid trylocking when not worthwhile */ if (!page->buffers && (page_count(page) != 1 || !page->mapping)) goto page_mapped; /* * The page is locked. IO in progress? * Move it to the back of the list. */ if (unlikely(TryLockPage(page))) { if (PageLaunder(page) && (gfp_mask & __GFP_FS)) { page_cache_get(page); spin_unlock(&pagemap_lru_lock); wait_on_page(page); page_cache_release(page); spin_lock(&pagemap_lru_lock); } continue; } if (PageDirty(page) && is_page_cache_freeable(page) && page->mapping) { /* * It is not critical here to write it only if * the page is unmapped beause any direct writer * like O_DIRECT would set the PG_dirty bitflag * on the phisical page after having successfully * pinned it and after the I/O to the page is finished, * so the direct writes to the page cannot get lost. */ int (*writepage)(struct page *); writepage = page->mapping->a_ops->writepage; if ((gfp_mask & __GFP_FS) && writepage) { ClearPageDirty(page); SetPageLaunder(page); page_cache_get(page); spin_unlock(&pagemap_lru_lock); writepage(page); page_cache_release(page); spin_lock(&pagemap_lru_lock); continue; } } /* * If the page has buffers, try to free the buffer mappings * associated with this page. If we succeed we try to free * the page as well. */ if (page->buffers) { spin_unlock(&pagemap_lru_lock); /* avoid to free a locked page */ page_cache_get(page); if (try_to_release_page(page, gfp_mask)) { if (!page->mapping) { /* * We must not allow an anon page * with no buffers to be visible on * the LRU, so we unlock the page after * taking the lru lock */ spin_lock(&pagemap_lru_lock); UnlockPage(page); __lru_cache_del(page); /* effectively free the page here */ page_cache_release(page); if (--nr_pages) continue; break; } else { /* * The page is still in pagecache so undo the stuff * before the try_to_release_page since we've not * finished and we can now try the next step. */ page_cache_release(page); spin_lock(&pagemap_lru_lock); } } else { /* failed to drop the buffers so stop here */ UnlockPage(page); page_cache_release(page); spin_lock(&pagemap_lru_lock); continue; } } spin_lock(&pagecache_lock); /* * This is the non-racy check for busy page. * It is critical to check PageDirty _after_ we made sure * the page is freeable so not in use by anybody. * At this point we're guaranteed that page->buffers is NULL, * nobody can refill page->buffers under us because we still * hold the page lock. */ if (!page->mapping || page_count(page) > 1) { spin_unlock(&pagecache_lock); UnlockPage(page); page_mapped: if (--max_mapped < 0) { spin_unlock(&pagemap_lru_lock); nr_pages -= kmem_cache_reap(gfp_mask); if (nr_pages <= 0) goto out; shrink_dcache_memory(vm_vfs_scan_ratio, gfp_mask); shrink_icache_memory(vm_vfs_scan_ratio, gfp_mask); #ifdef CONFIG_QUOTA shrink_dqcache_memory(vm_vfs_scan_ratio, gfp_mask); #endif if (!*failed_swapout) *failed_swapout = !swap_out(classzone); max_mapped = nr_pages * vm_mapped_ratio; spin_lock(&pagemap_lru_lock); refill_inactive(nr_pages, classzone); } continue; } if (PageDirty(page)) { spin_unlock(&pagecache_lock); UnlockPage(page); continue; } __lru_cache_del(page); /* point of no return */ if (likely(!PageSwapCache(page))) { __remove_inode_page(page); spin_unlock(&pagecache_lock); } else { swp_entry_t swap; swap.val = page->index; __delete_from_swap_cache(page); spin_unlock(&pagecache_lock); swap_free(swap); } UnlockPage(page); /* effectively free the page here */ page_cache_release(page); if (--nr_pages) continue; break; } spin_unlock(&pagemap_lru_lock); out: return nr_pages; }
static void __free_pages_ok (struct page *page, unsigned int order) { unsigned long index, page_idx, mask, flags; free_area_t *area; struct page *base; zone_t *zone; /* * Subtle. We do not want to test this in the inlined part of * __free_page() - it's a rare condition and just increases * cache footprint unnecesserily. So we do an 'incorrect' * decrement on page->count for reserved pages, but this part * makes it safe. */ if (PageReserved(page)) return; /* * Yes, think what happens when other parts of the kernel take * a reference to a page in order to pin it for io. -ben */ if (PageLRU(page)) { if (unlikely(in_interrupt())) { unsigned long flags; spin_lock_irqsave(&free_pages_ok_no_irq_lock, flags); page->next_hash = free_pages_ok_no_irq_head; free_pages_ok_no_irq_head = page; page->index = order; spin_unlock_irqrestore(&free_pages_ok_no_irq_lock, flags); schedule_task(&free_pages_ok_no_irq_task); return; } lru_cache_del(page); } if (page->buffers) BUG(); if (page->mapping) BUG(); if (!VALID_PAGE(page)) BUG(); if (PageLocked(page)) BUG(); if (PageActive(page)) BUG(); ClearPageReferenced(page); ClearPageDirty(page); /* de-reference all the pages for this order */ for (page_idx = 1; page_idx < (1 << order); page_idx++) set_page_count(&page[page_idx], 0); if (current->flags & PF_FREE_PAGES) goto local_freelist; back_local_freelist: zone = page_zone(page); mask = (~0UL) << order; base = zone->zone_mem_map; page_idx = page - base; if (page_idx & ~mask) BUG(); index = page_idx >> (1 + order); area = zone->free_area + order; spin_lock_irqsave(&zone->lock, flags); zone->free_pages -= mask; while (mask + (1 << (MAX_ORDER-1))) { struct page *buddy1, *buddy2; if (area >= zone->free_area + MAX_ORDER) BUG(); if (!__test_and_change_bit(index, area->map)) /* * the buddy page is still allocated. */ break; /* * Move the buddy up one level. * This code is taking advantage of the identity: * -mask = 1+~mask */ buddy1 = base + (page_idx ^ -mask); buddy2 = base + page_idx; if (BAD_RANGE(zone,buddy1)) BUG(); if (BAD_RANGE(zone,buddy2)) BUG(); list_del(&buddy1->list); mask <<= 1; area++; index >>= 1; page_idx &= mask; } list_add(&(base + page_idx)->list, &area->free_list); spin_unlock_irqrestore(&zone->lock, flags); return; local_freelist: if (current->nr_local_pages) goto back_local_freelist; if (in_interrupt()) goto back_local_freelist; list_add(&page->list, ¤t->local_pages); page->index = order; current->nr_local_pages++; }