Esempio n. 1
0
unsigned long *crst_table_alloc(struct mm_struct *mm, int noexec)
{
	struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);

	if (!page)
		return NULL;
	page->index = 0;
	if (noexec) {
		struct page *shadow = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
		if (!shadow) {
			__free_pages(page, ALLOC_ORDER);
			return NULL;
		}
		page->index = page_to_phys(shadow);
	}
	return (unsigned long *) page_to_phys(page);
}
Esempio n. 2
0
static dma_addr_t arc_dma_map_page(struct device *dev, struct page *page,
		unsigned long offset, size_t size, enum dma_data_direction dir,
		unsigned long attrs)
{
	phys_addr_t paddr = page_to_phys(page) + offset;
	_dma_cache_sync(paddr, size, dir);
	return plat_phys_to_dma(dev, paddr);
}
Esempio n. 3
0
File: dma.c Progetto: AllenWeb/linux
dma_addr_t dma_map_page(struct device *dev, struct page *page,
			unsigned long offset, size_t size,
			enum dma_data_direction dir)
{
	dma_addr_t handle = page_to_phys(page) + offset;

	dma_sync_single_for_device(dev, handle, size, dir);
	return handle;
}
Esempio n. 4
0
static void tegra114_flush_dcache(struct page *page, unsigned long offset,
				  size_t size)
{
	phys_addr_t phys = page_to_phys(page) + offset;
	void *virt = page_address(page) + offset;

	__cpuc_flush_dcache_area(virt, size);
	outer_flush_range(phys, phys + size);
}
Esempio n. 5
0
static phys_addr_t kvm_kaddr_to_phys(void *kaddr)
{
	if (!is_vmalloc_addr(kaddr)) {
		BUG_ON(!virt_addr_valid(kaddr));
		return __pa(kaddr);
	} else {
		return page_to_phys(vmalloc_to_page(kaddr)) +
		       offset_in_page(kaddr);
	}
}
Esempio n. 6
0
void *dma_generic_alloc_coherent(struct device *dev, size_t size,
				 dma_addr_t *dma_addr, gfp_t flag,
				 struct dma_attrs *attrs)
{
	unsigned long dma_mask;
	struct page *page;
	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
	dma_addr_t addr;

	dma_mask = dma_alloc_coherent_mask(dev, flag);

	flag &= ~__GFP_ZERO;
again:
	page = NULL;
	/* CMA can be used only in the context which permits sleeping */
	if (flag & __GFP_WAIT) {
		page = dma_alloc_from_contiguous(dev, count, get_order(size));
		if (page && page_to_phys(page) + size > dma_mask) {
			dma_release_from_contiguous(dev, page, count);
			page = NULL;
		}
	}
	/* fallback */
	if (!page)
		page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
	if (!page)
		return NULL;

	addr = page_to_phys(page);
	if (addr + size > dma_mask) {
		__free_pages(page, get_order(size));

		if (dma_mask < DMA_BIT_MASK(32) && !(flag & GFP_DMA)) {
			flag = (flag & ~GFP_DMA32) | GFP_DMA;
			goto again;
		}

		return NULL;
	}
	memset(page_address(page), 0, size);
	*dma_addr = addr;
	return page_address(page);
}
Esempio n. 7
0
static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
{
	struct page *page = alloc_pages(pool->gfp_mask, pool->order);

	if (!page)
		return NULL;
	dma_sync_single_for_device(NULL, (dma_addr_t)page_to_phys(page),
		PAGE_SIZE << pool->order, DMA_BIDIRECTIONAL);
	return page;
}
Esempio n. 8
0
static inline void kmap_invalidate_coherent(struct page *page,
					    unsigned long vaddr)
{
	if (!DCACHE_ALIAS_EQ(page_to_phys(page), vaddr)) {
		unsigned long kvaddr;

		if (!PageHighMem(page)) {
			kvaddr = (unsigned long)page_to_virt(page);

			__invalidate_dcache_page(kvaddr);
		} else {
			kvaddr = TLBTEMP_BASE_1 +
				(page_to_phys(page) & DCACHE_ALIAS_MASK);

			__invalidate_dcache_page_alias(kvaddr,
						       page_to_phys(page));
		}
	}
}
Esempio n. 9
0
/*
 * Do normal kexec
 */
static void __do_machine_kexec(void *data)
{
	relocate_kernel_t data_mover;
	struct kimage *image = data;

	data_mover = (relocate_kernel_t) page_to_phys(image->control_code_page);

	/* Call the moving routine */
	(*data_mover)(&image->head, image->start);
}
Esempio n. 10
0
static int i460_insert_memory_small_io_page (struct agp_memory *mem,
				off_t pg_start, int type)
{
	unsigned long paddr, io_pg_start, io_page_size;
	int i, j, k, num_entries;
	void *temp;

	pr_debug("i460_insert_memory_small_io_page(mem=%p, pg_start=%ld, type=%d, paddr0=0x%lx)\n",
		 mem, pg_start, type, page_to_phys(mem->pages[0]));

	if (type >= AGP_USER_TYPES || mem->type >= AGP_USER_TYPES)
		return -EINVAL;

	io_pg_start = I460_IOPAGES_PER_KPAGE * pg_start;

	temp = agp_bridge->current_size;
	num_entries = A_SIZE_8(temp)->num_entries;

	if ((io_pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count) > num_entries) {
		printk(KERN_ERR PFX "Looks like we're out of AGP memory\n");
		return -EINVAL;
	}

	j = io_pg_start;
	while (j < (io_pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count)) {
		if (!PGE_EMPTY(agp_bridge, RD_GATT(j))) {
			pr_debug("i460_insert_memory_small_io_page: GATT[%d]=0x%x is busy\n",
				 j, RD_GATT(j));
			return -EBUSY;
		}
		j++;
	}

	io_page_size = 1UL << I460_IO_PAGE_SHIFT;
	for (i = 0, j = io_pg_start; i < mem->page_count; i++) {
		paddr = page_to_phys(mem->pages[i]);
		for (k = 0; k < I460_IOPAGES_PER_KPAGE; k++, j++, paddr += io_page_size)
			WR_GATT(j, i460_mask_memory(agp_bridge, paddr, mem->type));
	}
	WR_FLUSH_GATT(j - 1);
	return 0;
}
Esempio n. 11
0
static dma_addr_t
__mic_dma_map_page(struct device *dev, struct page *page, unsigned long offset,
		   size_t size, enum dma_data_direction dir,
		   struct dma_attrs *attrs)
{
	void *va = phys_to_virt(page_to_phys(page)) + offset;
	struct scif_hw_dev *scdev = dev_get_drvdata(dev);
	struct mic_device *mdev = scdev_to_mdev(scdev);

	return mic_map_single(mdev, va, size);
}
Esempio n. 12
0
unsigned long *crst_table_alloc(struct mm_struct *mm, int noexec)
{
	struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);

	if (!page)
		return NULL;
	page->index = 0;
	if (noexec) {
		struct page *shadow = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
		if (!shadow) {
			__free_pages(page, ALLOC_ORDER);
			return NULL;
		}
		page->index = page_to_phys(shadow);
	}
	spin_lock(&mm->page_table_lock);
	list_add(&page->lru, &mm->context.crst_list);
	spin_unlock(&mm->page_table_lock);
	return (unsigned long *) page_to_phys(page);
}
Esempio n. 13
0
void flush_cache_page(struct vm_area_struct* vma, unsigned long address,
    		      unsigned long pfn)
{
	

	unsigned long phys = page_to_phys(pfn_to_page(pfn));
	unsigned long virt = TLBTEMP_BASE_1 + (address & DCACHE_ALIAS_MASK);

	__flush_invalidate_dcache_page_alias(virt, phys);
	__invalidate_icache_page_alias(virt, phys);
}
Esempio n. 14
0
void local_flush_cache_page(struct vm_area_struct *vma, unsigned long address,
		      unsigned long pfn)
{
	/* Note that we have to use the 'alias' address to avoid multi-hit */

	unsigned long phys = page_to_phys(pfn_to_page(pfn));
	unsigned long virt = TLBTEMP_BASE_1 + (address & DCACHE_ALIAS_MASK);

	__flush_invalidate_dcache_page_alias(virt, phys);
	__invalidate_icache_page_alias(virt, phys);
}
Esempio n. 15
0
NvOsPhysAddr NvOsPageAddress(NvOsPageAllocHandle desc, size_t offs)
{
    struct nvos_pagemap *pm = (struct nvos_pagemap *)desc;
    size_t index;

    if (unlikely(!pm)) return (NvOsPhysAddr)0;

    index = offs >> PAGE_SHIFT;
    offs &= (PAGE_SIZE - 1);

    return (NvOsPhysAddr)(page_to_phys(pm->pages[index]) + offs);
}
Esempio n. 16
0
static int pci_direct_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
		int nents, enum dma_data_direction direction)
{
	int i;

	for (i = 0; i < nents; i++, sg++) {
		sg->dma_address = page_to_phys(sg->page) + sg->offset;
		sg->dma_length = sg->length;
	}

	return nents;
}
Esempio n. 17
0
static dma_addr_t nommu_map_page(struct device *dev, struct page *page,
				 unsigned long offset, size_t size,
				 enum dma_data_direction dir,
				 unsigned long attrs)
{
	dma_addr_t addr = page_to_phys(page) + offset;

	WARN_ON(size == 0);
	dma_cache_sync(dev, page_address(page) + offset, size, dir);

	return addr;
}
/**
 * hp3a_histogram_isr - ISR for the histogram done interrupt.
 *
 * No return value.
 **/
static void hp3a_histogram_isr(unsigned long status, isp_vbq_callback_ptr arg1,
	void *arg2)
{
	u32 *hist_buffer;
	u32 i;
	struct hp3a_internal_buffer *ibuffer = NULL;

	if (unlikely((HIST_DONE & status) != HIST_DONE))
		return;

	omap_writel(omap_readl(ISPHIST_PCR) & ~(ISPHIST_PCR_EN), ISPHIST_PCR);

	if (unlikely(g_tc.v4l2_streaming == 0))
		return;

	if (hp3a_dequeue_irqsave(&g_tc.hist_hw_queue, &ibuffer) == 0) {
		/* If there is a buffer available then fill it. */
		hist_buffer = (u32 *)phys_to_virt( \
			page_to_phys(ibuffer->pages[0]));

		omap_writel((omap_readl(ISPHIST_CNT)) | \
			ISPHIST_CNT_CLR_EN, ISPHIST_CNT);
		for (i = g_tc.hist_bin_size; i--;) {
			*hist_buffer = omap_readl(ISPHIST_DATA);
			++hist_buffer;
		}
		omap_writel((omap_readl(ISPHIST_CNT)) & ~ISPHIST_CNT_CLR_EN,
			ISPHIST_CNT);
	} else {
		/* There are no buffers availavle so just */
		/* clear internal histogram memory. */
		omap_writel((omap_readl(ISPHIST_CNT)) | \
			ISPHIST_CNT_CLR_EN, ISPHIST_CNT);
		for (i = g_tc.hist_bin_size; i--;)
			omap_writel(0, ISPHIST_DATA);
		omap_writel((omap_readl(ISPHIST_CNT)) & ~ISPHIST_CNT_CLR_EN,
			ISPHIST_CNT);
	}

	/* Set memory HW memory address and enable. */
	omap_writel(0, ISPHIST_ADDR);

	if (g_tc.hist_hw_enable == 1) {
		/* Enable histogram. */
		omap_writel(omap_readl(ISPHIST_PCR) | (ISPHIST_PCR_EN),
			ISPHIST_PCR);
	}

	g_tc.hist_done = 1;

	/* Release the tasks waiting for stats. */
	wake_up(&g_tc.stats_done);
}
Esempio n. 19
0
void ___dma_page_dev_to_cpu(struct page *page, unsigned long off,
	size_t size, enum dma_data_direction dir)
{
	unsigned long paddr = page_to_phys(page) + off;

	/* FIXME: non-speculating: not required */
	/* don't bother invalidating if DMA to device */
	if (dir != DMA_TO_DEVICE)
		outer_inv_range(paddr, paddr + size);

	dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);
}
Esempio n. 20
0
void copy_user_highpage(struct page *to, struct page *from,
			unsigned long vaddr, struct vm_area_struct *vma)
{
	unsigned long vto, vfrom, flags, kto, kfrom, pfrom, pto;
	kto = ((unsigned long)page_address(to) & PAGE_MASK);
	kfrom = ((unsigned long)page_address(from) & PAGE_MASK);
	pto = page_to_phys(to);
	pfrom = page_to_phys(from);

	if (aliasing(vaddr, (unsigned long)kfrom))
		cpu_dcache_wb_page((unsigned long)kfrom);
	if (aliasing(vaddr, (unsigned long)kto))
		cpu_dcache_inval_page((unsigned long)kto);
	local_irq_save(flags);
	vto = kremap0(vaddr, pto);
	vfrom = kremap1(vaddr, pfrom);
	copy_page((void *)vto, (void *)vfrom);
	kunmap01(vfrom);
	kunmap01(vto);
	local_irq_restore(flags);
}
Esempio n. 21
0
void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
			 unsigned long vaddr, void *dst, void *src, int len)
{
	unsigned long vto, flags;

	local_irq_save(flags);
	vto = kremap0(vaddr, page_to_phys(page));
	src = (void *)(vto | (vaddr & (PAGE_SIZE - 1)));
	memcpy(dst, src, len);
	kunmap01(vto);
	local_irq_restore(flags);
}
Esempio n. 22
0
static dma_addr_t nommu_map_page(struct device *dev, struct page *page,
				 unsigned long offset, size_t size,
				 enum dma_data_direction dir,
				 struct dma_attrs *attrs)
{
	dma_addr_t bus = page_to_phys(page) + offset;
	WARN_ON(size == 0);
	if (!check_addr("map_single", dev, bus, size))
		return bad_dma_address;
	flush_write_buffers();
	return bus;
}
dma_addr_t dma_map_page(struct device *dev, struct page *page,
	unsigned long offset, size_t size, enum dma_data_direction direction)
{
	unsigned long addr;

	BUG_ON(direction == DMA_NONE);

	addr = (unsigned long) page_address(page) + offset;
	dma_cache_wback_inv(addr, size);

	return page_to_phys(page) + offset;
}
Esempio n. 24
0
static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
			     int nents, enum dma_data_direction direction,
			     struct dma_attrs *attrs)
{
	struct scatterlist *sg;
	int i;

	/* FIXME this part of code is untested */
	for_each_sg(sgl, sg, nents, i) {
		sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev);
		__dma_sync_page(page_to_phys(sg_page(sg)), sg->offset,
							sg->length, direction);
	}
Esempio n. 25
0
/*
 * page table entry allocation/free routines.
 */
unsigned long *page_table_alloc(int noexec)
{
	struct page *page = alloc_page(GFP_KERNEL);
	unsigned long *table;

	if (!page)
		return NULL;
	page->index = 0;
	if (noexec) {
		struct page *shadow = alloc_page(GFP_KERNEL);
		if (!shadow) {
			__free_page(page);
			return NULL;
		}
		table = (unsigned long *) page_to_phys(shadow);
		clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
		page->index = (addr_t) table;
	}
	table = (unsigned long *) page_to_phys(page);
	clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
	return table;
}
Esempio n. 26
0
/**
 * flush_icache_page - Flush a page from the dcache and invalidate the icache
 * @vma: The VMA the page is part of.
 * @page: The page to be flushed.
 *
 * Write a page back from the dcache and invalidate the icache so that we can
 * run code from it that we've just written into it
 */
void flush_icache_page(struct vm_area_struct *vma, struct page *page)
{
	unsigned long start = page_to_phys(page);
	unsigned long flags;

	flags = smp_lock_cache();

	mn10300_local_dcache_flush_page(start);
	mn10300_local_icache_inv_page(start);

	smp_cache_call(SMP_IDCACHE_INV_FLUSH_RANGE, start, start + PAGE_SIZE);
	smp_unlock_cache(flags);
}
Esempio n. 27
0
static void pagemap_flush_page(struct page *page)
{
#ifdef CONFIG_HIGHMEM
    void *km = NULL;

    if (!page_address(page)) {
        km = kmap(page);
        if (!km) {
            pr_err("unable to map high page\n");
            return;
        }
    }
#endif

    __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
    outer_flush_range(page_to_phys(page), page_to_phys(page)+PAGE_SIZE);
    wmb();

#ifdef CONFIG_HIGHMEM
    if (km) kunmap(page);
#endif
}
Esempio n. 28
0
static void *__dma_alloc(struct device *dev, size_t size,
				 dma_addr_t *dma_addr, gfp_t flag,
				 struct dma_attrs *attrs, bool is_coherent)
{
	unsigned long dma_mask;
	struct page *page;
	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
	dma_addr_t addr;

	dma_mask = dma_alloc_coherent_mask(dev, flag);

	flag |= __GFP_ZERO;
again:
	page = NULL;
	/* CMA can be used only in the context which permits sleeping */
	if (flag & __GFP_WAIT)
#ifdef CONFIG_CMA_EXPLICIT_USE
		if (dma_get_attr(DMA_ATTR_CMA, attrs)) {
#endif
			page = dma_alloc_from_contiguous(dev,
						count, get_order(size));
#ifdef CONFIG_CMA_EXPLICIT_USE
			if (!page)
				return NULL;
		}
#endif

	/* fallback */
	if (!page)
		page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
	if (!page)
		return NULL;

	addr = page_to_phys(page);
	if (addr + size > dma_mask) {
		__free_pages(page, get_order(size));

		if (dma_mask < DMA_BIT_MASK(32) && !(flag & GFP_DMA)) {
			flag = (flag & ~GFP_DMA32) | GFP_DMA;
			goto again;
		}

		return NULL;
	}

	if (is_coherent == false)
		__dma_set_pages(page, count, attrs);

	*dma_addr = addr;
	return page_address(page);
}
Esempio n. 29
0
/* stores the physical address (+offset) of each handle relocation entry
 * into its output location. see nvmap_pin_array for more details.
 *
 * each entry in arr (i.e., each relocation request) specifies two handles:
 * the handle to pin (pin), and the handle where the address of pin should be
 * written (patch). in pseudocode, this loop basically looks like:
 *
 * for (i = 0; i < nr; i++) {
 *     (pin, pin_offset, patch, patch_offset) = arr[i];
 *     patch[patch_offset] = address_of(pin) + pin_offset;
 * }
 */
static int nvmap_reloc_pin_array(struct nvmap_client *client,
				 const struct nvmap_pinarray_elem *arr,
				 int nr, struct nvmap_handle *gather)
{
	struct nvmap_handle *last_patch = NULL;
	unsigned int last_pfn = 0;
	pte_t **pte;
	void *addr;
	int i;

	pte = nvmap_alloc_pte(client->dev, &addr);
	if (IS_ERR(pte))
		return PTR_ERR(pte);

	for (i = 0; i < nr; i++) {
		struct nvmap_handle *patch;
		struct nvmap_handle *pin;
		phys_addr_t reloc_addr;
		phys_addr_t phys;
		unsigned int pfn;

		/* all of the handles are validated and get'ted prior to
		 * calling this function, so casting is safe here */
		pin = (struct nvmap_handle *)arr[i].pin_mem;

		if (arr[i].patch_mem == (unsigned long)last_patch) {
			patch = last_patch;
		} else if (arr[i].patch_mem == (unsigned long)gather) {
			patch = gather;
		} else {
			if (last_patch)
				nvmap_handle_put(last_patch);

			patch = nvmap_get_handle_id(client, arr[i].patch_mem);
			if (!patch) {
				nvmap_free_pte(client->dev, pte);
				return -EPERM;
			}
			last_patch = patch;
		}

                if (!patch) {
                        nvmap_free_pte(client->dev, pte);
                        return -EPERM;
                }

		if (patch->heap_pgalloc) {
			unsigned int page = arr[i].patch_offset >> PAGE_SHIFT;
			phys = page_to_phys(patch->pgalloc.pages[page]);
			phys += (arr[i].patch_offset & ~PAGE_MASK);
		} else {
Esempio n. 30
0
static void flush_user_buffer(struct mmu2darena *arena)
{
	u32 i;
	struct gcpage gcpage;
	unsigned char *logical;

	if (arena->pages == NULL) {
		GCPRINT(NULL, 0, GC_MOD_PREFIX
			"page array is NULL.\n",
			__func__, __LINE__);
		return;
	}


	logical = arena->logical;
	if (logical == NULL) {
		GCPRINT(NULL, 0, GC_MOD_PREFIX
			"buffer base is NULL.\n",
			__func__, __LINE__);
		return;
	}

	for (i = 0; i < arena->count; i += 1) {
		gcpage.order = get_order(PAGE_SIZE);
		gcpage.size = PAGE_SIZE;

		gcpage.pages = arena->pages[i];
		if (gcpage.pages == NULL) {
			GCPRINT(NULL, 0, GC_MOD_PREFIX
				"page structure %d is NULL.\n",
				__func__, __LINE__, i);
			continue;
		}

		gcpage.physical = page_to_phys(gcpage.pages);
		if (gcpage.physical == 0) {
			GCPRINT(NULL, 0, GC_MOD_PREFIX
				"physical address of page %d is 0.\n",
				__func__, __LINE__, i);
			continue;
		}

		gcpage.logical = (unsigned int *) (logical + i * PAGE_SIZE);
		if (gcpage.logical == NULL) {
			GCPRINT(NULL, 0, GC_MOD_PREFIX
				"virtual address of page %d is NULL.\n",
				__func__, __LINE__, i);
			continue;
		}
	}
}