Esempio n. 1
0
static void __init setup_zero_pages(void)
{
	unsigned int order;
	struct page *page;
	int i;

	/* Latest machines require a mapping granularity of 512KB */
	order = 7;

	/* Limit number of empty zero pages for small memory sizes */
	while (order > 2 && (totalram_pages >> 10) < (1UL << order))
		order--;

	empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
	if (!empty_zero_page)
		panic("Out of memory in setup_zero_pages");

	page = virt_to_page((void *) empty_zero_page);
	split_page(page, order);
	for (i = 1 << order; i > 0; i--) {
		mark_page_reserved(page);
		page++;
	}

	zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
}
Esempio n. 2
0
	void bin_index_t::file_node::add_item(const index_t& val,page_t& page)
	{
		page_pr pr(page);
		page_iter p=std::lower_bound(page.begin(),page.end(),val.key,pr);
		
		index_ref r=page[static_cast<size_t>(*p)];

		if(r.left()==0)
		{
			index_t cp(val);
			cp.index_in_page=static_cast<size_t>(*p);
			page.insert_item(cp);
			return;
		}

		page_ptr child_page=get_page(r.left());

		if(child_page->items_count()<child_page->page_max)
		{
			add_item(val,*child_page);
			return;
		}

		page_ptr new_right_page(create_page());

		split_page(*child_page,page,static_cast<size_t>(*p),*new_right_page);
		
		if(pr(val.key,*p)) add_item(val,*child_page);
		else add_item(val,*new_right_page);

		add_page(new_right_page);
	}
Esempio n. 3
0
static struct nvos_pagemap *nv_alloc_pages(unsigned int count,
        pgprot_t prot, bool contiguous, int create_mapping)
{
    struct nvos_pagemap *pm;
    size_t size;
    unsigned int i = 0;

    size = sizeof(struct nvos_pagemap) + sizeof(struct page *)*(count-1);
    pm = kzalloc(size, GFP_KERNEL);
    if (!pm)
        return NULL;

    if (count==1) contiguous = true;

    if (contiguous) {
        size_t order = get_order(count << PAGE_SHIFT);
        struct page *compound_page;
        compound_page = alloc_pages(nv_gfp_pool, order);
        if (!compound_page) goto fail;

        split_page(compound_page, order);
        for (i=0; i<count; i++)
            pm->pages[i] = nth_page(compound_page, i);

        for ( ; i < (1<<order); i++)
            __free_page(nth_page(compound_page, i));
        i = count;
    } else {
        for (i=0; i<count; i++) {
            pm->pages[i] = alloc_page(nv_gfp_pool);
            if (!pm->pages[i]) goto fail;
        }
    }

    if (create_mapping) {
        /* since the linear kernel mapping uses sections and super-
         * sections rather than PTEs, it's not possible to overwrite
         * it with the correct caching attributes, so use a local
         * mapping */
        pm->addr = vm_map_ram(pm->pages, count, -1, prot);
        if (!pm->addr) {
            pr_err("nv_alloc_pages fail to vmap contiguous area\n");
            goto fail;
        }
    }

    pm->nr_pages = count;
    for (i=0; i<count; i++) {
        SetPageReserved(pm->pages[i]);
        pagemap_flush_page(pm->pages[i]);
    }

    return pm;

fail:
    while (i) __free_page(pm->pages[--i]);
    if (pm) kfree(pm);
    return NULL;
}
Esempio n. 4
0
/*
 * This function will allocate the requested contiguous pages and
 * map them into the kernel's vmalloc() space.  This is done so we
 * get unique mapping for these pages, outside of the kernel's 1:1
 * virtual:physical mapping.  This is necessary so we can cover large
 * portions of the kernel with single large page TLB entries, and
 * still get unique uncached pages for consistent DMA.
 */
void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *dma_handle)
{
	struct vm_struct *area;
	unsigned long page, va, pa;
	void *ret;
	int order, err, i;

	if (in_interrupt())
		BUG();

	/* only allocate page size areas */
	size = PAGE_ALIGN(size);
	order = get_order(size);

	page = __get_free_pages(gfp, order);
	if (!page) {
		BUG();
		return NULL;
	}

	/* allocate some common virtual space to map the new pages */
	area = get_vm_area(size, VM_ALLOC);
	if (area == 0) {
		free_pages(page, order);
		return NULL;
	}
	va = VMALLOC_VMADDR(area->addr);
	ret = (void *) va;

	/* this gives us the real physical address of the first page */
	*dma_handle = pa = virt_to_bus((void *) page);

	/* set refcount=1 on all pages in an order>0 allocation so that vfree() will actually free
	 * all pages that were allocated.
	 */
	if (order > 0) {
		struct page *rpage = virt_to_page(page);
		split_page(rpage, order);
	}

	err = 0;
	for (i = 0; i < size && err == 0; i += PAGE_SIZE)
		err = map_page(va + i, pa + i, PAGE_KERNEL_NOCACHE);

	if (err) {
		vfree((void *) va);
		return NULL;
	}

	/* we need to ensure that there are no cachelines in use, or worse dirty in this area
	 * - can't do until after virtual address mappings are created
	 */
	frv_cache_invalidate(va, va + size);

	return ret;
}
Esempio n. 5
0
static void __init setup_zero_pages(void)
{
	struct cpuid cpu_id;
	unsigned int order;
	struct page *page;
	int i;

	get_cpu_id(&cpu_id);
	switch (cpu_id.machine) {
	case 0x9672:	/* g5 */
	case 0x2064:	/* z900 */
	case 0x2066:	/* z900 */
	case 0x2084:	/* z990 */
	case 0x2086:	/* z990 */
	case 0x2094:	/* z9-109 */
	case 0x2096:	/* z9-109 */
		order = 0;
		break;
	case 0x2097:	/* z10 */
	case 0x2098:	/* z10 */
	case 0x2817:	/* z196 */
	case 0x2818:	/* z196 */
		order = 2;
		break;
	case 0x2827:	/* zEC12 */
	case 0x2828:	/* zEC12 */
		order = 5;
		break;
	case 0x2964:	/* z13 */
	default:
		order = 7;
		break;
	}
	/* Limit number of empty zero pages for small memory sizes */
	while (order > 2 && (totalram_pages >> 10) < (1UL << order))
		order--;

	empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
	if (!empty_zero_page)
		panic("Out of memory in setup_zero_pages");

	page = virt_to_page((void *) empty_zero_page);
	split_page(page, order);
	for (i = 1 << order; i > 0; i--) {
		mark_page_reserved(page);
		page++;
	}

	zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
}
/*
 * Allocate a DMA buffer for 'dev' of size 'size' using the
 * specified gfp mask.  Note that 'size' must be page aligned.
 */
static struct page *__dma_alloc_buffer(struct device *dev,
	size_t size, gfp_t gfp)
{
	unsigned long order = get_order(size);
	struct page *page, *p, *e;
	void *ptr;
	u64 mask = get_coherent_dma_mask(dev);

#ifdef CONFIG_DMA_API_DEBUG
	u64 limit = (mask + 1) & ~mask;
	if (limit && size >= limit) {
		dev_warn(dev, "coherent allocation too big"
			"(requested %#x mask %#llx)\n",
			size, mask);
		return NULL;
	}
#endif

	if (!mask)
		return NULL;

	if (mask < 0xffffffffULL)
		gfp |= GFP_DMA;

	page = alloc_pages(gfp, order);
	if (!page)
		return NULL;

	/*
	 * Now split the huge page and free the excess pages
	 */
	split_page(page, order);
	for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order);
		p < e; p++)
		__free_page(p);

	/*
	 * Ensure that the allocated pages are zeroed, and that any data
	 * lurking in the kernel direct-mapped region is invalidated.
	 */
	ptr = page_address(page);
	memset(ptr, 0, size);
	dmac_flush_range(ptr, ptr + size);
	outer_flush_range(__pa(ptr), __pa(ptr) + size);

	return page;
}
Esempio n. 7
0
int omapvout_mem_alloc(u32 size, u32 *phy_addr, u32 *virt_addr)
{
	int	order;
	u32	dss_page_addr;
	u32	dss_page_phy;
	u32	dss_page_virt;
	u32	used, alloc_end;
	struct page	*tmp_page;

	size = PAGE_ALIGN(size);
	order = get_order(size);

	dss_page_addr = __get_free_pages(GFP_KERNEL, order);
	if (!dss_page_addr) {
		printk(KERN_ERR "Failed to allocate pages !!!! \n");
		return -ENOMEM;
	}

	/*
	 *'alloc_pages' allocates pages in power of 2,
	 *so free the not needed pages
	 */
	split_page(virt_to_page(dss_page_addr), order);
	alloc_end = dss_page_addr + (PAGE_SIZE<<order);
	used = dss_page_addr + size;

	DBG("mem_alloc: dss_page_addr=0x%x, alloc_end=0x%x, used=0x%x\n"
		, dss_page_addr, alloc_end, used);
	DBG("mem_alloc: physical_start=0x%lx, order=0x%x, size=0x%x\n"
		, virt_to_phys((void *)dss_page_addr), order, size);

	while (used < alloc_end) {
		BUG_ON(!virt_addr_valid((void *)used));
		tmp_page = virt_to_page((void *)used);
		__free_page(tmp_page);
		used += PAGE_SIZE;
	}

	dss_page_phy = virt_to_phys((void *)dss_page_addr);
	dss_page_virt = (u32) ioremap_cached(dss_page_phy, size);

	*phy_addr = dss_page_phy;
	*virt_addr = dss_page_virt;

	return 0;
}
static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gfp)
{
	unsigned long order = get_order(size);
	struct page *page, *p, *e;

	page = alloc_pages(gfp, order);
	if (!page)
		return NULL;

	split_page(page, order);
	for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
		__free_page(p);

	__dma_clear_buffer(page, size);

	add_meminfo_total_pages(NR_DMA_PAGES, size >> PAGE_SHIFT);

	return page;
}
Esempio n. 9
0
/*
 * split_page takes a non-compound higher-order page, and splits it into
 * n (1<<order) sub-pages: page[0..n]
 * Each sub-page must be freed individually.
 *
 * Note: this is probably too low level an operation for use in drivers.
 * Please consult with lkml before using this in your driver.
 */
void split_page(struct page *page, unsigned int order)
{
	int i;

	VM_BUG_ON(PageCompound(page));
	VM_BUG_ON(!page_count(page));

#ifdef CONFIG_KMEMCHECK
	/*
	 * Split shadow pages too, because free(page[0]) would
	 * otherwise free the whole shadow.
	 */
	if (kmemcheck_page_is_tracked(page))
		split_page(virt_to_page(page[0].shadow), order);
#endif

	for (i = 1; i < (1 << order); i++)
		set_page_refcounted(page + i);
}
Esempio n. 10
0
static void *m68k_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
                            gfp_t flag, struct dma_attrs *attrs)
{
    struct page *page, **map;
    pgprot_t pgprot;
    void *addr;
    int i, order;

    pr_debug("dma_alloc_coherent: %d,%x\n", size, flag);

    size = PAGE_ALIGN(size);
    order = get_order(size);

    page = alloc_pages(flag, order);
    if (!page)
        return NULL;

    *handle = page_to_phys(page);
    map = kmalloc(sizeof(struct page *) << order, flag & ~__GFP_DMA);
    if (!map) {
        __free_pages(page, order);
        return NULL;
    }
    split_page(page, order);

    order = 1 << order;
    size >>= PAGE_SHIFT;
    map[0] = page;
    for (i = 1; i < size; i++)
        map[i] = page + i;
    for (; i < order; i++)
        __free_page(page + i);
    pgprot = __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY);
    if (CPU_IS_040_OR_060)
        pgprot_val(pgprot) |= _PAGE_GLOBAL040 | _PAGE_NOCACHE_S;
    else
        pgprot_val(pgprot) |= _PAGE_NOCACHE030;
    addr = vmap(map, size, VM_MAP, pgprot);
    kfree(map);

    return addr;
}
Esempio n. 11
0
static unsigned long setup_zero_pages(void)
{
	struct cpuid cpu_id;
	unsigned int order;
	unsigned long size;
	struct page *page;
	int i;

	get_cpu_id(&cpu_id);
	switch (cpu_id.machine) {
	case 0x9672:	/* g5 */
	case 0x2064:	/* z900 */
	case 0x2066:	/* z900 */
	case 0x2084:	/* z990 */
	case 0x2086:	/* z990 */
	case 0x2094:	/* z9-109 */
	case 0x2096:	/* z9-109 */
		order = 0;
		break;
	case 0x2097:	/* z10 */
	case 0x2098:	/* z10 */
	default:
		order = 2;
		break;
	}

	empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
	if (!empty_zero_page)
		panic("Out of memory in setup_zero_pages");

	page = virt_to_page((void *) empty_zero_page);
	split_page(page, order);
	for (i = 1 << order; i > 0; i--) {
		SetPageReserved(page);
		page++;
	}

	size = PAGE_SIZE << order;
	zero_page_mask = (size - 1) & PAGE_MASK;

	return 1UL << order;
}
Esempio n. 12
0
/*
 * Allocate a DMA buffer for 'dev' of size 'size' using the
 * specified gfp mask.  Note that 'size' must be page aligned.
 */
static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gfp)
{
	unsigned long order = get_order(size);
	struct page *page, *p, *e;

	page = alloc_pages(gfp, order);
	if (!page)
		return NULL;

	/*
	 * Now split the huge page and free the excess pages
	 */
	split_page(page, order);
	for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
		__free_page(p);

	__dma_clear_buffer(page, size);

	return page;
}
Esempio n. 13
0
static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf,
		gfp_t gfp_flags)
{
	unsigned int last_page = 0;
	int size = buf->size;

	while (size > 0) {
		struct page *pages;
		int order;
		int i;

		order = get_order(size);
		/* Dont over allocate*/
		if ((PAGE_SIZE << order) > size)
			order--;

		pages = NULL;
		while (!pages) {
			pages = alloc_pages(GFP_KERNEL | __GFP_ZERO |
					__GFP_NOWARN | gfp_flags, order);
			if (pages)
				break;

			if (order == 0) {
				while (last_page--)
					__free_page(buf->pages[last_page]);
				return -ENOMEM;
			}
			order--;
		}

		split_page(pages, order);
		for (i = 0; i < (1 << order); i++)
			buf->pages[last_page++] = &pages[i];

		size -= PAGE_SIZE << order;
	}

	return 0;
}
Esempio n. 14
0
struct page* pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
	struct page *page = NULL, *p;
	int color = ADDR_COLOR(address);

	p = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER);

	if (likely(p)) {
		split_page(p, COLOR_ORDER);

		for (i = 0; i < PAGE_ORDER; i++) {
			if (PADDR_COLOR(page_address(p)) == color)
				page = p;
			else
				__free_page(p);
			p++;
		}
		clear_highpage(page);
	}

	return page;
}
Esempio n. 15
0
pte_t* pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
	pte_t *pte = NULL, *p;
	int color = ADDR_COLOR(address);
	int i;

	p = (pte_t*) __get_free_pages(GFP_KERNEL|__GFP_REPEAT, COLOR_ORDER);

	if (likely(p)) {
		split_page(virt_to_page(p), COLOR_ORDER);

		for (i = 0; i < COLOR_SIZE; i++) {
			if (ADDR_COLOR(p) == color)
				pte = p;
			else
				free_page(p);
			p += PTRS_PER_PTE;
		}
		clear_page(pte);
	}
	return pte;
}
static struct page *alloc_buffer_page(struct ion_system_heap *heap,
				      struct ion_buffer *buffer,
				      unsigned long order,
				      bool *from_pool)
{
	bool cached = ion_buffer_cached(buffer);
	bool split_pages = ion_buffer_fault_user_mappings(buffer);
	struct page *page;
	struct ion_page_pool *pool;

	if (!cached)
		pool = heap->uncached_pools[order_to_index(order)];
	else
		pool = heap->cached_pools[order_to_index(order)];
	page = ion_page_pool_alloc(pool, from_pool);
	if (!page)
		return 0;

	if (split_pages)
		split_page(page, order);
	return page;
}
Esempio n. 17
0
/*
 * Consistent memory allocators. Used for DMA devices that want to
 * share uncached memory with the processor core.
 * My crufty no-MMU approach is simple. In the HW platform we can optionally
 * mirror the DDR up above the processor cacheable region.  So, memory accessed
 * in this mirror region will not be cached.  It's alloced from the same
 * pool as normal memory, but the handle we return is shifted up into the
 * uncached region.  This will no doubt cause big problems if memory allocated
 * here is not also freed properly. -- JW
 */
void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *dma_handle)
{
	unsigned long order, vaddr;
	void *ret;
	unsigned int i, err = 0;
	struct page *page, *end;

#ifdef CONFIG_MMU
	phys_addr_t pa;
	struct vm_struct *area;
	unsigned long va;
#endif

	if (in_interrupt())
		BUG();

	/* Only allocate page size areas. */
	size = PAGE_ALIGN(size);
	order = get_order(size);

	vaddr = __get_free_pages(gfp, order);
	if (!vaddr)
		return NULL;

	/*
	 * we need to ensure that there are no cachelines in use,
	 * or worse dirty in this area.
	 */
	flush_dcache_range(virt_to_phys((void *)vaddr),
					virt_to_phys((void *)vaddr) + size);

#ifndef CONFIG_MMU
	ret = (void *)vaddr;
	/*
	 * Here's the magic!  Note if the uncached shadow is not implemented,
	 * it's up to the calling code to also test that condition and make
	 * other arranegments, such as manually flushing the cache and so on.
	 */
# ifdef CONFIG_XILINX_UNCACHED_SHADOW
	ret = (void *)((unsigned) ret | UNCACHED_SHADOW_MASK);
# endif
	if ((unsigned int)ret > cpuinfo.dcache_base &&
				(unsigned int)ret < cpuinfo.dcache_high)
		pr_warn("ERROR: Your cache coherent area is CACHED!!!\n");

	/* dma_handle is same as physical (shadowed) address */
	*dma_handle = (dma_addr_t)ret;
#else
	/* Allocate some common virtual space to map the new pages. */
	area = get_vm_area(size, VM_ALLOC);
	if (!area) {
		free_pages(vaddr, order);
		return NULL;
	}
	va = (unsigned long) area->addr;
	ret = (void *)va;

	/* This gives us the real physical address of the first page. */
	*dma_handle = pa = __virt_to_phys(vaddr);
#endif

	/*
	 * free wasted pages.  We skip the first page since we know
	 * that it will have count = 1 and won't require freeing.
	 * We also mark the pages in use as reserved so that
	 * remap_page_range works.
	 */
	page = virt_to_page(vaddr);
	end = page + (1 << order);

	split_page(page, order);

	for (i = 0; i < size && err == 0; i += PAGE_SIZE) {
#ifdef CONFIG_MMU
		/* MS: This is the whole magic - use cache inhibit pages */
		err = map_page(va + i, pa + i, _PAGE_KERNEL | _PAGE_NO_CACHE);
#endif

		SetPageReserved(page);
		page++;
	}

	/* Free the otherwise unused pages. */
	while (page < end) {
		__free_page(page);
		page++;
	}

	if (err) {
		free_pages(vaddr, order);
		return NULL;
	}

	return ret;
}
Esempio n. 18
0
int btree_put(btree_t bt, const void *key, const void *data)
{
	const struct btree_def *def = bt->def;
	struct btree_page *new_root = NULL;
	struct btree_page *path_new[MAX_HEIGHT] = {0};
	struct btree_page *path_old[MAX_HEIGHT] = {0};
	int slot_old[MAX_HEIGHT] = {0};
	int h;

	check_btree(bt);

	/* Special case: cursor overwrite */
	if (!key) {
		if (bt->slot[0] < 0) {
			fprintf(stderr, "btree: put at invalid cursor\n");
			return -1;
		}

		memcpy(PAGE_DATA(bt->path[0], bt->slot[0]), data,
		       def->data_size);
		return 1;
	}

	/* Find a path down the tree that leads to the page which should
	 * contain this datum (though the page might be too big to hold it).
	 */
	if (trace_path(bt, key, path_old, slot_old)) {
		/* Special case: overwrite existing item */
		memcpy(PAGE_DATA(path_old[0], slot_old[0]), data,
		       def->data_size);
		return 1;
	}

	/* Trace from the leaf up. If the leaf is at its maximum size, it will
	 * need to split, and cause a pointer to be added in the parent page
	 * of the same node (which may in turn cause it to split).
	 */
	for (h = 0; h <= bt->root->height; h++) {
		if (path_old[h]->num_children < def->branches)
			break;

		path_new[h] = allocate_page(bt, h);
		if (!path_new[h])
			goto fail;
	}

	/* If the split reaches the top (i.e. the root splits), then we need
	 * to allocate a new root node.
	 */
	if (h > bt->root->height) {
		if (h >= MAX_HEIGHT) {
			fprintf(stderr, "btree: maximum height exceeded\n");
			goto fail;
		}

		new_root = allocate_page(bt, h);
		if (!new_root)
			goto fail;
	}

	/* Trace up to one page above the split. At each page that needs
	 * splitting, copy the top half of keys into the new page. Also,
	 * insert a key into one of the pages at all pages from the leaf
	 * to the page above the top of the split.
	 */
	for (h = 0; h <= bt->root->height; h++) {
		int s = slot_old[h] + 1;
		struct btree_page *p = path_old[h];

		/* If there's a split at this level, copy the top half of
		 * the keys from the old page to the new one. Check to see
		 * if the position we were going to insert into is in the
		 * old page or the new one.
		 */
		if (path_new[h]) {
			split_page(path_old[h], path_new[h]);

			if (s > p->num_children) {
				s -= p->num_children;
				p = path_new[h];
			}
		}

		/* Insert the key in the appropriate page */
		if (h)
			insert_ptr(p, s, PAGE_KEY(path_new[h - 1], 0),
				   path_new[h - 1]);
		else
			insert_data(p, s, key, data);

		/* If there was no split at this level, there's nothing to
		 * insert higher up, and we're all done.
		 */
		if (!path_new[h])
			return 0;
	}

	/* If we made it this far, the split reached the top of the tree, and
	 * we need to grow it using the extra page we allocated.
	 */
	assert (new_root);

	if (bt->slot[0] >= 0) {
		/* Fix up the cursor, if active */
		bt->slot[new_root->height] =
			bt->path[bt->root->height] == new_root ? 1 : 0;
		bt->path[new_root->height] = new_root;
	}

	memcpy(PAGE_KEY(new_root, 0), def->zero, def->key_size);
	*PAGE_PTR(new_root, 0) = path_old[h - 1];
	memcpy(PAGE_KEY(new_root, 1), PAGE_KEY(path_new[h - 1], 0),
	       def->key_size);
	*PAGE_PTR(new_root, 1) = path_new[h - 1];
	new_root->num_children = 2;
	bt->root = new_root;

	return 0;

 fail:
	for (h = 0; h <= bt->root->height; h++)
		if (path_new[h])
			free(path_new[h]);
	return -1;
}