Пример #1
0
static int __devinit profile_cpu_callback(struct notifier_block *info,
					unsigned long action, void *__cpu)
{
	int node, cpu = (unsigned long)__cpu;
	struct page *page;

	switch (action) {
	case CPU_UP_PREPARE:
		node = cpu_to_node(cpu);
		per_cpu(cpu_profile_flip, cpu) = 0;
		if (!per_cpu(cpu_profile_hits, cpu)[1]) {
			page = alloc_pages_node(node, GFP_KERNEL, 0);
			if (!page)
				return NOTIFY_BAD;
			clear_highpage(page);
			per_cpu(cpu_profile_hits, cpu)[1] = page_address(page);
		}
		if (!per_cpu(cpu_profile_hits, cpu)[0]) {
			page = alloc_pages_node(node, GFP_KERNEL, 0);
			if (!page)
				goto out_free;
			clear_highpage(page);
			per_cpu(cpu_profile_hits, cpu)[0] = page_address(page);
		}
		break;
	out_free:
		page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]);
		per_cpu(cpu_profile_hits, cpu)[1] = NULL;
		__free_page(page);
		return NOTIFY_BAD;
	case CPU_ONLINE:
		cpu_set(cpu, prof_cpu_mask);
		break;
	case CPU_UP_CANCELED:
	case CPU_DEAD:
		cpu_clear(cpu, prof_cpu_mask);
		if (per_cpu(cpu_profile_hits, cpu)[0]) {
			page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]);
			per_cpu(cpu_profile_hits, cpu)[0] = NULL;
			__free_page(page);
		}
		if (per_cpu(cpu_profile_hits, cpu)[1]) {
			page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]);
			per_cpu(cpu_profile_hits, cpu)[1] = NULL;
			__free_page(page);
		}
		break;
	}
	return NOTIFY_OK;
}
Пример #2
0
static int __alloc_ldt(mm_context_t *pc, int mincount)
{
	int oldsize, newsize, nr;

	if (mincount <= pc->size)
		return 0;
	/*
	 * LDT got larger - reallocate if necessary.
	 */
	oldsize = pc->size;
	mincount = (mincount+511)&(~511);
	newsize = mincount*LDT_ENTRY_SIZE;
	for (nr = 0; nr * PAGE_SIZE < newsize; nr++) {
		BUG_ON(nr * PAGE_SIZE >= 64*1024);
		if (!pc->ldt_pages[nr]) {
			pc->ldt_pages[nr] = alloc_page(GFP_HIGHUSER|__GFP_UBC);
			if (!pc->ldt_pages[nr])
				goto nomem;
			clear_highpage(pc->ldt_pages[nr]);
		}
	}
	pc->size = mincount;
	return 0;

nomem:
	while (--nr >= 0)
		__free_page(pc->ldt_pages[nr]);
	pc->size = 0;
	return -ENOMEM;
}
Пример #3
0
static int __init create_hash_tables(void)
{
	int cpu;

	for_each_online_cpu(cpu) {
		int node = cpu_to_node(cpu);
		struct page *page;

		page = alloc_pages_node(node, GFP_KERNEL, 0);
		if (!page)
			goto out_cleanup;
		clear_highpage(page);
		per_cpu(cpu_profile_hits, cpu)[1]
				= (struct profile_hit *)page_address(page);
		page = alloc_pages_node(node, GFP_KERNEL, 0);
		if (!page)
			goto out_cleanup;
		clear_highpage(page);
		per_cpu(cpu_profile_hits, cpu)[0]
				= (struct profile_hit *)page_address(page);
	}
	return 0;
out_cleanup:
	prof_on = 0;
	mb();
	on_each_cpu(profile_nop, NULL, 0, 1);
	for_each_online_cpu(cpu) {
		struct page *page;

		if (per_cpu(cpu_profile_hits, cpu)[0]) {
			page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]);
			per_cpu(cpu_profile_hits, cpu)[0] = NULL;
			__free_page(page);
		}
		if (per_cpu(cpu_profile_hits, cpu)[1]) {
			page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]);
			per_cpu(cpu_profile_hits, cpu)[1] = NULL;
			__free_page(page);
		}
	}
	return -1;
}
Пример #4
0
struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
    struct page *pte;

#ifdef CONFIG_HIGHPTE
    pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT, 0);
#else
    pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
#endif
    if (pte)
        clear_highpage(pte);
    return pte;
}
Пример #5
0
struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
	struct page *ptepage;

#ifdef CONFIG_HIGHPTE
	gfp_t flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_REPEAT;
#else
	gfp_t flags = GFP_KERNEL | __GFP_REPEAT;
#endif

	ptepage = alloc_pages(flags, 0);
	if (ptepage)
		clear_highpage(ptepage);
	return ptepage;
}
Пример #6
0
pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
	struct page *page;

#ifdef CONFIG_HIGHPTE
	page = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT, 0);
#else
	page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
#endif
	if (page) {
		clear_highpage(page);
		pgtable_page_ctor(page);
		flush_dcache_page(page);
	}
	return page;
}
Пример #7
0
struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
	struct page *ptepage;

#ifdef CONFIG_HIGHPTE
	int flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_REPEAT;
#else
	int flags = GFP_KERNEL | __GFP_REPEAT;
#endif

	ptepage = alloc_pages(flags, 0);
	if (ptepage) {
		ptepage->mapping = (void *) mm;
		ptepage->index = address & PMD_MASK;
		clear_highpage(ptepage);
	}
	return ptepage;
}
pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
	struct page *page;

#ifdef CONFIG_HIGHPTE
	page = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM, 0);
#else
	page = alloc_pages(GFP_KERNEL, 0);
#endif
	if (!page)
		return NULL;

	clear_highpage(page);
	if (!pgtable_page_ctor(page)) {
		__free_page(page);
		return NULL;
	}
	flush_dcache_page(page);
	return page;
}
Пример #9
0
struct page* pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
	struct page *page = NULL, *p;
	int color = ADDR_COLOR(address);

	p = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER);

	if (likely(p)) {
		split_page(p, COLOR_ORDER);

		for (i = 0; i < PAGE_ORDER; i++) {
			if (PADDR_COLOR(page_address(p)) == color)
				page = p;
			else
				__free_page(p);
			p++;
		}
		clear_highpage(page);
	}

	return page;
}
Пример #10
0
static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
{
	int oldsize, newsize, i;

	if (mincount <= pc->size)
		return 0;
	/*
	 * LDT got larger - reallocate if necessary.
	 */
	oldsize = pc->size;
	mincount = (mincount+511)&(~511);
	newsize = mincount*LDT_ENTRY_SIZE;
	for (i = 0; i < newsize; i += PAGE_SIZE) {
		int nr = i/PAGE_SIZE;
		BUG_ON(i >= 64*1024);
		if (!pc->ldt_pages[nr]) {
			pc->ldt_pages[nr] = alloc_page(GFP_HIGHUSER);
			if (!pc->ldt_pages[nr])
				return -ENOMEM;
			clear_highpage(pc->ldt_pages[nr]);
		}
	}
	pc->size = mincount;
	if (reload) {
#ifdef CONFIG_SMP
		local_irq_disable();
#endif
		load_LDT(pc);
#ifdef CONFIG_SMP
		local_irq_enable();
		if (current->mm->cpu_vm_mask != (1<<smp_processor_id()))
			smp_call_function(flush_ldt, 0, 1, 1);
#endif
	}
	return 0;
}
Пример #11
0
static void scrub_page(struct page *page)
{
#ifdef CONFIG_XEN_SCRUB_PAGES
	clear_highpage(page);
#endif
}