void msm_iommu_pagetable_free(struct msm_iommu_pt *pt)
{
	unsigned long *fl_table;
	int i;

	fl_table = pt->fl_table;
#ifdef CONFIG_LGE_MEMORY_INFO
	for (i = 0; i < NUM_FL_PTE; i++)
		if ((fl_table[i] & 0x03) == FL_TYPE_TABLE) {
			__dec_zone_page_state(virt_to_page((void *)(unsigned long) __va(((fl_table[i]) &
							FL_BASE_MASK))), NR_IOMMU_PAGES);
			free_page((unsigned long) __va(((fl_table[i]) &
							FL_BASE_MASK)));
		}
	__mod_zone_page_state(page_zone(virt_to_page((void *)(unsigned long)fl_table)),
							NR_IOMMU_PAGES, - (1UL << get_order(SZ_16K)));
	free_pages((unsigned long)fl_table, get_order(SZ_16K));
#else
	for (i = 0; i < NUM_FL_PTE; i++)
		if ((fl_table[i] & 0x03) == FL_TYPE_TABLE)
			free_page((unsigned long) __va(((fl_table[i]) &
							FL_BASE_MASK)));
	free_pages((unsigned long)fl_table, get_order(SZ_16K));
#endif
	pt->fl_table = 0;
}
void unset_migratetype_isolate(struct page *page, unsigned migratetype)
{
 	struct zone *zone;
 	unsigned long flags, nr_pages;
	struct page *isolated_page = NULL;
	unsigned int order;
	unsigned long page_idx, buddy_idx;
	struct page *buddy;
 
 	zone = page_zone(page);
 	spin_lock_irqsave(&zone->lock, flags);
 	if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
 		goto out;

	/*
	 * Because freepage with more than pageblock_order on isolated
	 * pageblock is restricted to merge due to freepage counting problem,
	 * it is possible that there is free buddy page.
	 * move_freepages_block() doesn't care of merge so we need other
	 * approach in order to merge them. Isolation and free will make
	 * these pages to be merged.
	 */
	if (PageBuddy(page)) {
		order = page_order(page);
		if (order >= pageblock_order) {
			page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
			buddy_idx = __find_buddy_index(page_idx, order);
			buddy = page + (buddy_idx - page_idx);

			if (!is_migrate_isolate_page(buddy)) {
				__isolate_free_page(page, order);
				set_page_refcounted(page);
				isolated_page = page;
			}
		}
	}

	/*
	 * If we isolate freepage with more than pageblock_order, there
	 * should be no freepage in the range, so we could avoid costly
	 * pageblock scanning for freepage moving.
	 */
	if (!isolated_page) {
		nr_pages = move_freepages_block(zone, page, migratetype);
#if !defined(CONFIG_CMA) || !defined(CONFIG_MTK_SVP) // SVP 16
		__mod_zone_freepage_state(zone, nr_pages, migratetype);
#else
		__mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
#endif
	}
 	set_pageblock_migratetype(page, migratetype);
 	zone->nr_isolate_pageblock--;
out:
 	spin_unlock_irqrestore(&zone->lock, flags);
	if (isolated_page)
		__free_pages(isolated_page, order);
 }
int msm_iommu_pagetable_alloc(struct msm_iommu_pt *pt)
{
	pt->fl_table = (unsigned long *)__get_free_pages(GFP_KERNEL,
							  get_order(SZ_16K));
	if (!pt->fl_table)
		return -ENOMEM;

#ifdef CONFIG_LGE_MEMORY_INFO
	__mod_zone_page_state(page_zone(virt_to_page((void *)pt->fl_table)),
							NR_IOMMU_PAGES, (1UL << get_order(SZ_16K)));
#endif
	memset(pt->fl_table, 0, SZ_16K);
	clean_pte(pt->fl_table, pt->fl_table + NUM_FL_PTE, pt->redirect);

	return 0;
}
Beispiel #4
0
/**
 * lru_cache_add_active_or_unevictable
 * @page:  the page to be added to LRU
 * @vma:   vma in which page is mapped for determining reclaimability
 *
 * Place @page on the active or unevictable LRU list, depending on its
 * evictability.  Note that if the page is not evictable, it goes
 * directly back onto it's zone's unevictable list, it does NOT use a
 * per cpu pagevec.
 */
void lru_cache_add_active_or_unevictable(struct page *page,
					 struct vm_area_struct *vma)
{
	VM_BUG_ON_PAGE(PageLRU(page), page);

	if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED))
		SetPageActive(page);
	else if (!TestSetPageMlocked(page)) {
		/*
		 * We use the irq-unsafe __mod_zone_page_stat because this
		 * counter is not modified from interrupt context, and the pte
		 * lock is held(spinlock), which implies preemption disabled.
		 */
		__mod_zone_page_state(page_zone(page), NR_MLOCK,
				    hpage_nr_pages(page));
		count_vm_event(UNEVICTABLE_PGMLOCKED);
	}
	lru_cache_add(page);
}
int set_migratetype_isolate(struct page *page, bool skip_hwpoisoned_pages)
{
	struct zone *zone;
	unsigned long flags, pfn;
	struct memory_isolate_notify arg;
	int notifier_ret;
	int ret = -EBUSY;

	zone = page_zone(page);

	spin_lock_irqsave(&zone->lock, flags);

	pfn = page_to_pfn(page);
	arg.start_pfn = pfn;
	arg.nr_pages = pageblock_nr_pages;
	arg.pages_found = 0;

	/*
	 * It may be possible to isolate a pageblock even if the
	 * migratetype is not MIGRATE_MOVABLE. The memory isolation
	 * notifier chain is used by balloon drivers to return the
	 * number of pages in a range that are held by the balloon
	 * driver to shrink memory. If all the pages are accounted for
	 * by balloons, are free, or on the LRU, isolation can continue.
	 * Later, for example, when memory hotplug notifier runs, these
	 * pages reported as "can be isolated" should be isolated(freed)
	 * by the balloon driver through the memory notifier chain.
	 */
	notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg);
	notifier_ret = notifier_to_errno(notifier_ret);
	if (notifier_ret)
		goto out;
	/*
	 * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
	 * We just check MOVABLE pages.
	 */
	if (!has_unmovable_pages(zone, page, arg.pages_found,
				 skip_hwpoisoned_pages))
		ret = 0;

	/*
	 * immobile means "not-on-lru" paes. If immobile is larger than
	 * removable-by-driver pages reported by notifier, we'll fail.
	 */

out:
	if (!ret) {
		unsigned long nr_pages;
#if !defined(CONFIG_CMA) || !defined(CONFIG_MTK_SVP) // SVP 16
		int migratetype = get_pageblock_migratetype(page);
#endif

		set_pageblock_migratetype(page, MIGRATE_ISOLATE);
#if defined(CONFIG_CMA) && defined(CONFIG_MTK_SVP)
// commit ad53f92eb416d81e469fa8ea57153e59455e7175
		zone->nr_isolate_pageblock++;
#endif
		nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE);

#if !defined(CONFIG_CMA) || !defined(CONFIG_MTK_SVP) // SVP 16
		__mod_zone_freepage_state(zone, -nr_pages, migratetype);
#else
		__mod_zone_page_state(zone, NR_FREE_PAGES, -nr_pages);
#endif
	}

	spin_unlock_irqrestore(&zone->lock, flags);
	if (!ret)
		drain_all_pages();
	return ret;
}