/* * Batched page_cache_release(). Decrement the reference count on all the * passed pages. If it fell to zero then remove the page from the LRU and * free it. * * Avoid taking zone->lru_lock if possible, but if it is taken, retain it * for the remainder of the operation. * * The locking in this function is against shrink_inactive_list(): we recheck * the page count inside the lock to see whether shrink_inactive_list() * grabbed the page via the LRU. If it did, give up: shrink_inactive_list() * will free it. */ void release_pages(struct page **pages, int nr, int cold) { int i; struct pagevec pages_to_free; struct zone *zone = NULL; unsigned long uninitialized_var(flags); pagevec_init(&pages_to_free, cold); for (i = 0; i < nr; i++) { struct page *page = pages[i]; if (unlikely(PageCompound(page))) { if (zone) { spin_unlock_irqrestore(&zone->lru_lock, flags); zone = NULL; } put_compound_page(page); continue; } if (!put_page_testzero(page)) continue; if (PageLRU(page)) { struct zone *pagezone = page_zone(page); if (pagezone != zone) { if (zone) spin_unlock_irqrestore(&zone->lru_lock, flags); zone = pagezone; spin_lock_irqsave(&zone->lru_lock, flags); } VM_BUG_ON(!PageLRU(page)); __ClearPageLRU(page); del_page_from_lru(zone, page); } if (!pagevec_add(&pages_to_free, page)) { if (zone) { spin_unlock_irqrestore(&zone->lru_lock, flags); zone = NULL; } __pagevec_free(&pages_to_free); pagevec_reinit(&pages_to_free); } } if (zone) spin_unlock_irqrestore(&zone->lru_lock, flags); pagevec_free(&pages_to_free); }
/* * Batched page_cache_release(). Decrement the reference count on all the * passed pages. If it fell to zero then remove the page from the LRU and * free it. * * Avoid taking zone->lru_lock if possible, but if it is taken, retain it * for the remainder of the operation. * * The locking in this function is against shrink_cache(): we recheck the * page count inside the lock to see whether shrink_cache grabbed the page * via the LRU. If it did, give up: shrink_cache will free it. */ void release_pages(struct page **pages, int nr, int cold) { int i; struct pagevec pages_to_free; struct zone *zone = NULL; unsigned long uninitialized_var(flags); pagevec_init(&pages_to_free, cold); for (i = 0; i < nr; i++) { struct page *page = pages[i]; if (unlikely(PageCompound(page))) { if (zone) { spin_unlock_irqrestore(&zone->lru_lock, flags); zone = NULL; } put_compound_page(page); continue; } // dyc: if page->ref not zero, continue if (!put_page_testzero(page)) continue; // dyc: if in url, remove from it if (PageLRU(page)) { struct zone *pagezone = page_zone(page); if (pagezone != zone) { if (zone) spin_unlock_irqrestore(&zone->lru_lock, flags); zone = pagezone; spin_lock_irqsave(&zone->lru_lock, flags); } VM_BUG_ON(!PageLRU(page)); __ClearPageLRU(page); del_page_from_lru(zone, page); } // dyc: if no space available after adding if (!pagevec_add(&pages_to_free, page)) { if (zone) { spin_unlock_irqrestore(&zone->lru_lock, flags); zone = NULL; } // dyc: return page to buddy system __pagevec_free(&pages_to_free); pagevec_reinit(&pages_to_free); } } // for (i = 0; i < nr; i++) if (zone) spin_unlock_irqrestore(&zone->lru_lock, flags); pagevec_free(&pages_to_free); }
/* * This path almost never happens for VM activity - pages are normally * freed via pagevecs. But it gets used by networking. */ static void __page_cache_release(struct page *page) { if (PageLRU(page)) { unsigned long flags; struct zone *zone = page_zone(page); spin_lock_irqsave(&zone->lru_lock, flags); VM_BUG_ON(!PageLRU(page)); __ClearPageLRU(page); del_page_from_lru_list(zone, page, page_off_lru(page)); spin_unlock_irqrestore(&zone->lru_lock, flags); } }
/* * FIXME: speed this up? */ void fastcall activate_page(struct page *page) { struct zone *zone = page_zone(page); spin_lock_irq(&zone->lru_lock); if (PageLRU(page) && !PageActive(page)) { del_page_from_inactive_list(zone, page); SetPageActive(page); add_page_to_active_list(zone, page); inc_page_state(pgactivate); } spin_unlock_irq(&zone->lru_lock); }
/* * Confirm all pages in a range [start, end) is belongs to the same zone. */ static int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn) { unsigned long pfn; struct zone *zone = NULL; struct page *page; int i; for (pfn = start_pfn; pfn < end_pfn; pfn += MAX_ORDER_NR_PAGES) { i = 0; /* This is just a CONFIG_HOLES_IN_ZONE check.*/ while ((i < MAX_ORDER_NR_PAGES) && !pfn_valid_within(pfn + i)) i++; if (i == MAX_ORDER_NR_PAGES) continue; page = pfn_to_page(pfn + i); if (zone && page_zone(page) != zone) return 0; zone = page_zone(page); } return 1; }
/** * add_page_to_unevictable_list - add a page to the unevictable list * @page: the page to be added to the unevictable list * * Add page directly to its zone's unevictable list. To avoid races with * tasks that might be making the page evictable, through eg. munlock, * munmap or exit, while it's not on the lru, we want to add the page * while it's locked or otherwise "invisible" to other tasks. This is * difficult to do when using the pagevec cache, so bypass that. */ void add_page_to_unevictable_list(struct page *page) { struct zone *zone = page_zone(page); struct lruvec *lruvec; spin_lock_irq(&zone->lru_lock); lruvec = mem_cgroup_page_lruvec(page, zone); ClearPageActive(page); SetPageUnevictable(page); SetPageLRU(page); add_page_to_lru_list(page, lruvec, LRU_UNEVICTABLE); spin_unlock_irq(&zone->lru_lock); }
/* * FIXME: speed this up? */ void fastcall activate_page(struct page *page) { struct zone *zone = page_zone(page); spin_lock_irq(&zone->lru_lock); if (PageLRU(page) && !PageActive(page)) { del_page_from_inactive_list(zone, page); SetPageActive(page); add_page_to_active_list(zone, page); __count_vm_event(PGACTIVATE); } spin_unlock_irq(&zone->lru_lock); }
static int __init cma_activate_area(struct cma *cma) { int bitmap_size = BITS_TO_LONGS(cma->count) * sizeof(long); unsigned long base_pfn = cma->base_pfn, pfn = base_pfn; unsigned i = cma->count >> pageblock_order; struct zone *zone; cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL); if (!cma->bitmap) return -ENOMEM; WARN_ON_ONCE(!pfn_valid(pfn)); zone = page_zone(pfn_to_page(pfn)); do { unsigned j; base_pfn = pfn; for (j = pageblock_nr_pages; j; --j, pfn++) { WARN_ON_ONCE(!pfn_valid(pfn)); /* * alloc_contig_range requires the pfn range * specified to be in the same zone. Make this * simple by forcing the entire CMA resv range * to be in the same zone. */ if (page_zone(pfn_to_page(pfn)) != zone) goto err; } init_cma_reserved_pageblock(pfn_to_page(base_pfn)); } while (--i); return 0; err: kfree(cma->bitmap); return -EINVAL; }
static __init int cma_activate_area(unsigned long base_pfn, unsigned long count) { unsigned long pfn = base_pfn; unsigned i = count >> pageblock_order; struct zone *zone; WARN_ON_ONCE(!pfn_valid(pfn)); zone = page_zone(pfn_to_page(pfn)); do { unsigned j; base_pfn = pfn; for (j = pageblock_nr_pages; j; --j, pfn++) { WARN_ON_ONCE(!pfn_valid(pfn)); if (page_zone(pfn_to_page(pfn)) != zone) { pr_err("%s(%d) err: CMA reserved area should be in common zone\n", __func__, __LINE__); return -EINVAL; } } init_cma_reserved_pageblock(pfn_to_page(base_pfn)); } while (--i); return 0; }
/* * Wrest a page from the buddy system. * * CAVE: * * This method manipulates buddy-system internal structures to accomplish this * goal. * * Source: * This methods implementation has been inspired by "__rmqueue_smallest" */ static inline struct page * claim_free_buddy_page(struct page * requested) { struct page* ret = NULL; unsigned int order = 0; struct zone *zone; int requested_page_count; zone = page_zone(requested); /* Protect the lru list */ spin_lock(&zone->lru_lock); /* Protect the area */ spin_lock(&zone->lock); requested_page_count = page_count(requested); if (likely(0 == requested_page_count) && PageBuddy(requested)) { unsigned int current_order; struct free_area * area; int migratetype; migratetype = get_pageblock_migratetype__clone(requested); current_order = page_order__clone(requested); area = &(zone->free_area[current_order]); list_del(&requested->lru); rmv_page_order__clone(requested); area->nr_free--; expand__clone(zone, requested, order, current_order, area, migratetype); ret = requested; } else { printk(KERN_DEBUG "NOT: likely(0 == requested_page_count {%i}) && PageBuddy(requested){%s} \n", requested_page_count, PageBuddy(requested) ? "true" : "false"); } spin_unlock(&zone->lock); spin_unlock(&zone->lru_lock); if (ret) { if (prep_new_page(ret, 0)) { printk(KERN_ALERT "Could not prep_new_page %p, %lu \n", ret, page_to_pfn(ret)); } } return ret; }
static void pagevec_move_tail_fn(struct page *page, void *arg) { int *pgmoved = arg; if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { enum lru_list lru = page_lru_base_type(page); struct lruvec *lruvec; lruvec = mem_cgroup_lru_move_lists(page_zone(page), page, lru, lru); list_move_tail(&page->lru, &lruvec->lists[lru]); (*pgmoved)++; } }
/* * Batched page_cache_release(). Decrement the reference count on all the * passed pages. If it fell to zero then remove the page from the LRU and * free it. * * Avoid taking zone->lru_lock if possible, but if it is taken, retain it * for the remainder of the operation. * * The locking in this function is against shrink_inactive_list(): we recheck * the page count inside the lock to see whether shrink_inactive_list() * grabbed the page via the LRU. If it did, give up: shrink_inactive_list() * will free it. */ void release_pages(struct page **pages, int nr, int cold) { int i; LIST_HEAD(pages_to_free); struct zone *zone = NULL; struct lruvec *lruvec; unsigned long uninitialized_var(flags); for (i = 0; i < nr; i++) { struct page *page = pages[i]; if (unlikely(PageCompound(page))) { if (zone) { spin_unlock_irqrestore(&zone->lru_lock, flags); zone = NULL; } put_compound_page(page); continue; } if (!put_page_testzero(page)) continue; if (PageLRU(page)) { struct zone *pagezone = page_zone(page); if (pagezone != zone) { if (zone) spin_unlock_irqrestore(&zone->lru_lock, flags); zone = pagezone; spin_lock_irqsave(&zone->lru_lock, flags); } lruvec = mem_cgroup_page_lruvec(page, zone); VM_BUG_ON(!PageLRU(page)); __ClearPageLRU(page); del_page_from_lru_list(page, lruvec, page_off_lru(page)); } /* Clear Active bit in case of parallel mark_page_accessed */ ClearPageActive(page); list_add(&page->lru, &pages_to_free); } if (zone) spin_unlock_irqrestore(&zone->lru_lock, flags); free_hot_cold_page_list(&pages_to_free, cold); }
static __init int cma_activate_area(unsigned long base_pfn, unsigned long count) { unsigned long pfn = base_pfn; unsigned i = count >> pageblock_order; struct zone *zone; WARN_ON_ONCE(!pfn_valid(pfn)); zone = page_zone(pfn_to_page(pfn)); do { unsigned j; base_pfn = pfn; for (j = pageblock_nr_pages; j; --j, pfn++) { WARN_ON_ONCE(!pfn_valid(pfn)); if (page_zone(pfn_to_page(pfn)) != zone) return -EINVAL; } init_cma_reserved_pageblock(pfn_to_page(base_pfn)); } while (--i); adjust_managed_cma_page_count(zone, count); return 0; }
/* * This path almost never happens for VM activity - pages are normally * freed via pagevecs. But it gets used by networking. */ void fastcall __page_cache_release(struct page *page) { unsigned long flags; struct zone *zone = page_zone(page); spin_lock_irqsave(&zone->lru_lock, flags); if (TestClearPageLRU(page)) del_page_from_lru(zone, page); if (page_count(page) != 0) page = NULL; spin_unlock_irqrestore(&zone->lru_lock, flags); if (page) free_hot_page(page); }
/* * This path almost never happens for VM activity - pages are normally * freed via pagevecs. But it gets used by networking. */ static void __page_cache_release(struct page *page) { if (PageLRU(page)) { struct zone *zone = page_zone(page); struct lruvec *lruvec; unsigned long flags; spin_lock_irqsave(&zone->lru_lock, flags); lruvec = mem_cgroup_page_lruvec(page, zone); VM_BUG_ON_PAGE(!PageLRU(page), page); __ClearPageLRU(page); del_page_from_lru_list(page, lruvec, page_off_lru(page)); spin_unlock_irqrestore(&zone->lru_lock, flags); } }
void unset_migratetype_isolate(struct page *page, unsigned migratetype) { struct zone *zone; unsigned long flags, nr_pages; zone = page_zone(page); spin_lock_irqsave(&zone->lock, flags); if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE) goto out; nr_pages = move_freepages_block(zone, page, migratetype); __mod_zone_freepage_state(zone, nr_pages, migratetype); set_pageblock_migratetype(page, migratetype); out: spin_unlock_irqrestore(&zone->lock, flags); }
/* * This path almost never happens for VM activity - pages are normally * freed via pagevecs. But it gets used by networking. */ static void __page_cache_release(struct page *page) { if (PageLRU(page)) { unsigned long flags; struct zone *zone = page_zone(page); spin_lock_irqsave(&zone->lru_lock, flags); VM_BUG_ON(!PageLRU(page)); __ClearPageLRU(page); del_page_from_lru(zone, page); spin_unlock_irqrestore(&zone->lru_lock, flags); } else if (PageIONBacked(page)) { ClearPageActive(page); ClearPageUnevictable(page); } }
int msm_iommu_pagetable_alloc(struct msm_iommu_pt *pt) { pt->fl_table = (unsigned long *)__get_free_pages(GFP_KERNEL, get_order(SZ_16K)); if (!pt->fl_table) return -ENOMEM; #ifdef CONFIG_LGE_MEMORY_INFO __mod_zone_page_state(page_zone(virt_to_page((void *)pt->fl_table)), NR_IOMMU_PAGES, (1UL << get_order(SZ_16K))); #endif memset(pt->fl_table, 0, SZ_16K); clean_pte(pt->fl_table, pt->fl_table + NUM_FL_PTE, pt->redirect); return 0; }
/* * Test all pages in the range is free(means isolated) or not. * all pages in [start_pfn...end_pfn) must be in the same zone. * zone->lock must be held before call this. * * Returns 1 if all pages in the range are isolated. */ static int __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn, bool skip_hwpoisoned_pages) { struct page *page; while (pfn < end_pfn) { if (!pfn_valid_within(pfn)) { pfn++; continue; } page = pfn_to_page(pfn); if (PageBuddy(page)) { /* * If race between isolatation and allocation happens, * some free pages could be in MIGRATE_MOVABLE list * although pageblock's migratation type of the page * is MIGRATE_ISOLATE. Catch it and move the page into * MIGRATE_ISOLATE list. */ if (get_freepage_migratetype(page) != MIGRATE_ISOLATE) { struct page *end_page; end_page = page + (1 << page_order(page)) - 1; move_freepages(page_zone(page), page, end_page, MIGRATE_ISOLATE); } pfn += 1 << page_order(page); } else if (page_count(page) == 0 && get_freepage_migratetype(page) == MIGRATE_ISOLATE) pfn += 1; else if (skip_hwpoisoned_pages && PageHWPoison(page)) { /* * The HWPoisoned page may be not in buddy * system, and page_count() is not 0. */ pfn++; continue; } else break; } if (pfn < end_pfn) return 0; return 1; }
/* * If the page can not be invalidated, it is moved to the * inactive list to speed up its reclaim. It is moved to the * head of the list, rather than the tail, to give the flusher * threads some time to write it out, as this is much more * effective than the single-page writeout from reclaim. * * If the page isn't page_mapped and dirty/writeback, the page * could reclaim asap using PG_reclaim. * * 1. active, mapped page -> none * 2. active, dirty/writeback page -> inactive, head, PG_reclaim * 3. inactive, mapped page -> none * 4. inactive, dirty/writeback page -> inactive, head, PG_reclaim * 5. inactive, clean -> inactive, tail * 6. Others -> none * * In 4, why it moves inactive's head, the VM expects the page would * be write it out by flusher threads as this is much more effective * than the single-page writeout from reclaim. */ static void lru_deactivate_fn(struct page *page, void *arg) { int lru, file; bool active; struct zone *zone = page_zone(page); if (!PageLRU(page)) return; if (PageUnevictable(page)) return; /* Some processes are using the page */ if (page_mapped(page)) return; active = PageActive(page); file = page_is_file_cache(page); lru = page_lru_base_type(page); del_page_from_lru_list(zone, page, lru + active); ClearPageActive(page); ClearPageReferenced(page); add_page_to_lru_list(zone, page, lru); if (PageWriteback(page) || PageDirty(page)) { /* * PG_reclaim could be raced with end_page_writeback * It can make readahead confusing. But race window * is _really_ small and it's non-critical problem. */ SetPageReclaim(page); } else { struct lruvec *lruvec; /* * The page's writeback ends up during pagevec * We moves tha page into tail of inactive. */ lruvec = mem_cgroup_lru_move_lists(zone, page, lru, lru); list_move_tail(&page->lru, &lruvec->lists[lru]); __count_vm_event(PGROTATED); } if (active) __count_vm_event(PGDEACTIVATE); update_page_reclaim_stat(zone, page, file, 0); }
static void __activate_page(struct page *page, void *arg) { struct zone *zone = page_zone(page); if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { int file = page_is_file_cache(page); int lru = page_lru_base_type(page); del_page_from_lru_list(zone, page, lru); SetPageActive(page); lru += LRU_ACTIVE; add_page_to_lru_list(zone, page, lru); __count_vm_event(PGACTIVATE); update_page_reclaim_stat(zone, page, file, 1); } }
/* * Batched page_cache_release(). Decrement the reference count on all the * passed pages. If it fell to zero then remove the page from the LRU and * free it. * * Avoid taking zone->lru_lock if possible, but if it is taken, retain it * for the remainder of the operation. * * The locking in this function is against shrink_cache(): we recheck the * page count inside the lock to see whether shrink_cache grabbed the page * via the LRU. If it did, give up: shrink_cache will free it. */ void release_pages(struct page **pages, int nr, int cold) { int i; struct pagevec pages_to_free; struct zone *zone = NULL; pagevec_init(&pages_to_free, cold); for (i = 0; i < nr; i++) { struct page *page = pages[i]; struct zone *pagezone; if (unlikely(PageCompound(page))) { if (zone) { spin_unlock_irq(&zone->lru_lock); zone = NULL; } put_compound_page(page); continue; } if (!put_page_testzero(page)) continue; pagezone = page_zone(page); if (pagezone != zone) { if (zone) spin_unlock_irq(&zone->lru_lock); zone = pagezone; spin_lock_irq(&zone->lru_lock); } if (TestClearPageLRU(page)) del_page_from_lru(zone, page); if (page_count(page) == 0) { if (!pagevec_add(&pages_to_free, page)) { spin_unlock_irq(&zone->lru_lock); __pagevec_free(&pages_to_free); pagevec_reinit(&pages_to_free); zone = NULL; /* No lock is held */ } } } if (zone) spin_unlock_irq(&zone->lru_lock); pagevec_free(&pages_to_free); }
static void __pagevec_lru_add_fn(struct page *page, void *arg) { enum lru_list lru = (enum lru_list)arg; struct zone *zone = page_zone(page); int file = is_file_lru(lru); int active = is_active_lru(lru); VM_BUG_ON(PageActive(page)); VM_BUG_ON(PageUnevictable(page)); VM_BUG_ON(PageLRU(page)); SetPageLRU(page); if (active) SetPageActive(page); add_page_to_lru_list(zone, page, lru); update_page_reclaim_stat(zone, page, file, active); }
static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size) { unsigned long start, start_pfn; struct zone *zone; int ret; start_pfn = base >> PAGE_SHIFT; if (!pfn_valid(start_pfn)) { memblock_remove(base, memblock_size); return 0; } zone = page_zone(pfn_to_page(start_pfn)); /* * Remove section mappings and sysfs entries for the * section of the memory we are removing. * * NOTE: Ideally, this should be done in generic code like * remove_memory(). But remove_memory() gets called by writing * to sysfs "state" file and we can't remove sysfs entries * while writing to it. So we have to defer it to here. */ ret = __remove_pages(zone, start_pfn, memblock_size >> PAGE_SHIFT); if (ret) return ret; /* * Update memory regions for memory remove */ memblock_remove(base, memblock_size); /* * Remove htab bolted mappings for this section of memory */ start = (unsigned long)__va(base); ret = remove_section_mapping(start, start + memblock_size); /* Ensure all vmalloc mappings are flushed in case they also * hit that section of memory */ vm_unmap_aliases(); return ret; }
/* * page_alloc.c */ bool is_free_buddy_page(struct page *page) { struct zone *zone = page_zone(page); unsigned long pfn = page_to_pfn(page); unsigned long flags; int order; spin_lock_irqsave(&zone->lock, flags); for (order = 0; order < MAX_ORDER; order++) { struct page *page_head = page - (pfn & ((1 << order) - 1)); if (PageBuddy(page_head) && page_order(page_head) >= order) break; } spin_unlock_irqrestore(&zone->lock, flags); return order < MAX_ORDER; }
/* * FIXME: speed this up? */ void activate_page(struct page *page) { struct zone *zone = page_zone(page); spin_lock_irq(&zone->lru_lock); if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { int file = page_is_file_cache(page); int lru = LRU_BASE + file; del_page_from_lru_list(zone, page, lru); SetPageActive(page); lru += LRU_ACTIVE; add_page_to_lru_list(zone, page, lru); __count_vm_event(PGACTIVATE); update_page_reclaim_stat(zone, page, !!file, 1); } spin_unlock_irq(&zone->lru_lock); }
/** * lru_cache_add_active_or_unevictable * @page: the page to be added to LRU * @vma: vma in which page is mapped for determining reclaimability * * Place @page on the active or unevictable LRU list, depending on its * evictability. Note that if the page is not evictable, it goes * directly back onto it's zone's unevictable list, it does NOT use a * per cpu pagevec. */ void lru_cache_add_active_or_unevictable(struct page *page, struct vm_area_struct *vma) { VM_BUG_ON_PAGE(PageLRU(page), page); if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) SetPageActive(page); else if (!TestSetPageMlocked(page)) { /* * We use the irq-unsafe __mod_zone_page_stat because this * counter is not modified from interrupt context, and the pte * lock is held(spinlock), which implies preemption disabled. */ __mod_zone_page_state(page_zone(page), NR_MLOCK, hpage_nr_pages(page)); count_vm_event(UNEVICTABLE_PGMLOCKED); } lru_cache_add(page); }
/* * added by qijiwen. * put the page on the lru directly */ void add_page_to_lru_list_cma(struct page *page, enum lru_list lru) { struct zone *zone = page_zone(page); int file = is_file_lru(lru); int active = is_active_lru(lru); unsigned long flags = 0; VM_BUG_ON(PageActive(page)); VM_BUG_ON(PageUnevictable(page)); VM_BUG_ON(PageLRU(page)); spin_lock_irqsave(&zone->lru_lock, flags); SetPageLRU(page); if (active) SetPageActive(page); update_page_reclaim_stat(zone, page, file, active); add_page_to_lru_list(zone, page, lru); spin_unlock_irqrestore(&zone->lru_lock, flags); }
/* * First pass at this code will check to determine if the remove * request is within the RMO. Do not allow removal within the RMO. */ int __devinit remove_memory(u64 start, u64 size) { struct zone *zone; unsigned long start_pfn, end_pfn, nr_pages; start_pfn = start >> PAGE_SHIFT; nr_pages = size >> PAGE_SHIFT; end_pfn = start_pfn + nr_pages; printk("%s(): Attempting to remove memoy in range " "%lx to %lx\n", __func__, start, start+size); /* * check for range within RMO */ zone = page_zone(pfn_to_page(start_pfn)); printk("%s(): memory will be removed from " "the %s zone\n", __func__, zone->name); /* * not handling removing memory ranges that * overlap multiple zones yet */ if (end_pfn > (zone->zone_start_pfn + zone->spanned_pages)) goto overlap; /* make sure it is NOT in RMO */ if ((start < lmb.rmo_size) || ((start+size) < lmb.rmo_size)) { printk("%s(): range to be removed must NOT be in RMO!\n", __func__); goto in_rmo; } return __remove_pages(zone, start_pfn, nr_pages); overlap: printk("%s(): memory range to be removed overlaps " "multiple zones!!!\n", __func__); in_rmo: return -1; }
static ssize_t show_valid_zones(struct device *dev, struct device_attribute *attr, char *buf) { struct memory_block *mem = to_memory_block(dev); unsigned long start_pfn, end_pfn; unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; struct page *first_page; struct zone *zone; int zone_shift = 0; start_pfn = section_nr_to_pfn(mem->start_section_nr); end_pfn = start_pfn + nr_pages; first_page = pfn_to_page(start_pfn); /* The block contains more than one zone can not be offlined. */ if (!test_pages_in_a_zone(start_pfn, end_pfn)) return sprintf(buf, "none\n"); zone = page_zone(first_page); /* MMOP_ONLINE_KEEP */ sprintf(buf, "%s", zone->name); /* MMOP_ONLINE_KERNEL */ zone_shift = zone_can_shift(start_pfn, nr_pages, ZONE_NORMAL); if (zone_shift) { strcat(buf, " "); strcat(buf, (zone + zone_shift)->name); } /* MMOP_ONLINE_MOVABLE */ zone_shift = zone_can_shift(start_pfn, nr_pages, ZONE_MOVABLE); if (zone_shift) { strcat(buf, " "); strcat(buf, (zone + zone_shift)->name); } strcat(buf, "\n"); return strlen(buf); }