Exemplo n.º 1
0
static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
				    struct io_pgtable_cfg *cfg)
{
	struct device *dev = cfg->iommu_dev;
	dma_addr_t dma;
	void *pages = alloc_pages_exact(size, gfp | __GFP_ZERO);

	if (!pages)
		return NULL;

	if (!selftest_running) {
		dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE);
		if (dma_mapping_error(dev, dma))
			goto out_free;
		/*
		 * We depend on the IOMMU being able to work with any physical
		 * address directly, so if the DMA layer suggests otherwise by
		 * translating or truncating them, that bodes very badly...
		 */
		if (dma != virt_to_phys(pages))
			goto out_unmap;
	}

	return pages;

out_unmap:
	dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n");
	dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
out_free:
	free_pages_exact(pages, size);
	return NULL;
}
Exemplo n.º 2
0
void *or1k_dma_alloc_coherent(struct device *dev, size_t size,
			      dma_addr_t *dma_handle, gfp_t gfp)
{
	unsigned long va;
	void *page;
	struct mm_walk walk = {
		.pte_entry = page_set_nocache,
		.mm = &init_mm
	};

	page = alloc_pages_exact(size, gfp);
	if (!page)
		return NULL;

	
	*dma_handle = __pa(page);

	va = (unsigned long)page;

	if (walk_page_range(va, va + size, &walk)) {
		free_pages_exact(page, size);
		return NULL;
	}

	return (void *)va;
}
Exemplo n.º 3
0
/*
 * Alloc "coherent" memory, which for OpenRISC means simply uncached.
 *
 * This function effectively just calls __get_free_pages, sets the
 * cache-inhibit bit on those pages, and makes sure that the pages are
 * flushed out of the cache before they are used.
 *
 * If the NON_CONSISTENT attribute is set, then this function just
 * returns "normal", cachable memory.
 *
 * There are additional flags WEAK_ORDERING and WRITE_COMBINE to take
 * into consideration here, too.  All current known implementations of
 * the OR1K support only strongly ordered memory accesses, so that flag
 * is being ignored for now; uncached but write-combined memory is a
 * missing feature of the OR1K.
 */
void *
arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
		gfp_t gfp, unsigned long attrs)
{
	unsigned long va;
	void *page;
	struct mm_walk walk = {
		.pte_entry = page_set_nocache,
		.mm = &init_mm
	};

	page = alloc_pages_exact(size, gfp);
	if (!page)
		return NULL;

	/* This gives us the real physical address of the first page. */
	*dma_handle = __pa(page);

	va = (unsigned long)page;

	if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0) {
		/*
		 * We need to iterate through the pages, clearing the dcache for
		 * them and setting the cache-inhibit bit.
		 */
		if (walk_page_range(va, va + size, &walk)) {
			free_pages_exact(page, size);
			return NULL;
		}
	}

	return (void *)va;
}
Exemplo n.º 4
0
static void __arm_lpae_free_pages(void *pages, size_t size,
				  struct io_pgtable_cfg *cfg)
{
	if (!selftest_running)
		dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages),
				 size, DMA_TO_DEVICE);
	free_pages_exact(pages, size);
}
Exemplo n.º 5
0
void dma_free_coherent(struct device *dev, size_t size, void *kvaddr,
		       dma_addr_t dma_handle)
{
	iounmap((void __force __iomem *)kvaddr);

	free_pages_exact((void *)plat_dma_addr_to_kernel(dev, dma_handle),
			 size);
}
Exemplo n.º 6
0
static void 
repl_free_pages_exact(void *virt, size_t size)
{
    if (!ZERO_OR_NULL_PTR(virt) && !klc_find_and_remove_alloc(virt)) 
        klc_add_bad_free(virt, stack_depth);
    
    free_pages_exact(virt, size);
    return;
}
Exemplo n.º 7
0
void dma_free_coherent(struct device *dev, size_t size, void *kvaddr,
		       dma_addr_t dma_handle)
{
	if (is_isa_arcv2() && ioc_exists)
		return dma_free_noncoherent(dev, size, kvaddr, dma_handle);

	iounmap((void __force __iomem *)kvaddr);

	free_pages_exact((void *)dma_handle, size);
}
void io_pgtable_free_pages_exact(struct io_pgtable_cfg *cfg, void *cookie,
				 void *virt, size_t size)
{
	if (cfg->tlb->free_pages_exact)
		cfg->tlb->free_pages_exact(cookie, virt, size);
	else
		free_pages_exact(virt, size);

	atomic_sub(1 << get_order(size), &pages_allocated);
}
Exemplo n.º 9
0
static void __exit ion_dummy_exit(void)
{
	int i;

	ion_device_destroy(idev);

	for (i = 0; i < dummy_ion_pdata.nr; i++)
		ion_heap_destroy(heaps[i]);
	kfree(heaps);

	if (carveout_ptr) {
		free_pages_exact(carveout_ptr,
				dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size);
		carveout_ptr = NULL;
	}
	if (chunk_ptr) {
		free_pages_exact(chunk_ptr,
				dummy_heaps[ION_HEAP_TYPE_CHUNK].size);
		chunk_ptr = NULL;
	}
}
Exemplo n.º 10
0
static void free_page_cgroup(void *addr)
{
	if (is_vmalloc_addr(addr)) {
		vfree(addr);
	} else {
		struct page *page = virt_to_page(addr);
		size_t table_size =
			sizeof(struct page_cgroup) * PAGES_PER_SECTION;

		BUG_ON(PageReserved(page));
		free_pages_exact(addr, table_size);
	}
}
Exemplo n.º 11
0
void or1k_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
			    dma_addr_t dma_handle)
{
	unsigned long va = (unsigned long)vaddr;
	struct mm_walk walk = {
		.pte_entry = page_clear_nocache,
		.mm = &init_mm
	};

	/* walk_page_range shouldn't be able to fail here */
	WARN_ON(walk_page_range(va, va + size, &walk));

	free_pages_exact(vaddr, size);
}
Exemplo n.º 12
0
void
arch_dma_free(struct device *dev, size_t size, void *vaddr,
		dma_addr_t dma_handle, unsigned long attrs)
{
	unsigned long va = (unsigned long)vaddr;
	struct mm_walk walk = {
		.pte_entry = page_clear_nocache,
		.mm = &init_mm
	};

	if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0) {
		/* walk_page_range shouldn't be able to fail here */
		WARN_ON(walk_page_range(va, va + size, &walk));
	}

	free_pages_exact(vaddr, size);
}
Exemplo n.º 13
0
static void vm_del_vq(struct virtqueue *vq)
{
	struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev);
	struct virtio_mmio_vq_info *info = vq->priv;
	unsigned long flags, size;

	spin_lock_irqsave(&vm_dev->lock, flags);
	list_del(&info->node);
	spin_unlock_irqrestore(&vm_dev->lock, flags);

	vring_del_virtqueue(vq);

	/* Select and deactivate the queue */
	writel(info->queue_index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL);
	writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);

	size = PAGE_ALIGN(vring_size(info->num, VIRTIO_MMIO_VRING_ALIGN));
	free_pages_exact(info->queue, size);
	kfree(info);
}
static int __init ion_dummy_init(void)
{
	int i, err;

	idev = ion_device_create(NULL);
	heaps = kzalloc(sizeof(struct ion_heap *) * dummy_ion_pdata.nr,
			GFP_KERNEL);
	if (!heaps)
		return PTR_ERR(heaps);


	/* Allocate a dummy carveout heap */
	carveout_ptr = alloc_pages_exact(
				dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size,
				GFP_KERNEL);
	if (carveout_ptr)
		dummy_heaps[ION_HEAP_TYPE_CARVEOUT].base =
						virt_to_phys(carveout_ptr);
	else
		pr_err("ion_dummy: Could not allocate carveout\n");

	/* Allocate a dummy chunk heap */
	chunk_ptr = alloc_pages_exact(
				dummy_heaps[ION_HEAP_TYPE_CHUNK].size,
				GFP_KERNEL);
	if (chunk_ptr)
		dummy_heaps[ION_HEAP_TYPE_CHUNK].base = virt_to_phys(chunk_ptr);
	else
		pr_err("ion_dummy: Could not allocate chunk\n");

	for (i = 0; i < dummy_ion_pdata.nr; i++) {
		struct ion_platform_heap *heap_data = &dummy_ion_pdata.heaps[i];

		if (heap_data->type == ION_HEAP_TYPE_CARVEOUT &&
							!heap_data->base)
			continue;

		if (heap_data->type == ION_HEAP_TYPE_CHUNK && !heap_data->base)
			continue;

		heaps[i] = ion_heap_create(heap_data);
		if (IS_ERR_OR_NULL(heaps[i])) {
			err = PTR_ERR(heaps[i]);
			goto err;
		}
		ion_device_add_heap(idev, heaps[i]);
	}
	return 0;
err:
	for (i = 0; i < dummy_ion_pdata.nr; i++) {
		if (heaps[i])
			ion_heap_destroy(heaps[i]);
	}
	kfree(heaps);

	if (carveout_ptr) {
		free_pages_exact(carveout_ptr,
				dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size);
		carveout_ptr = NULL;
	}
	if (chunk_ptr) {
		free_pages_exact(chunk_ptr,
				dummy_heaps[ION_HEAP_TYPE_CHUNK].size);
		chunk_ptr = NULL;
	}
	return err;
}
Exemplo n.º 15
0
/* Function to perform hardware set up */
int isp_af_configure(struct af_configuration *afconfig)
{
	int result;
	int buff_size, i;
	unsigned int busyaf;
	struct af_configuration *af_curr_cfg = af_dev_configptr->config;

	if (NULL == afconfig) {
		printk(KERN_ERR "Null argument in configuration. \n");
		return -EINVAL;
	}

	memcpy(af_curr_cfg, afconfig, sizeof(struct af_configuration));
	/* Get the value of PCR register */
	busyaf = isp_reg_readl(OMAP3_ISP_IOMEM_H3A, ISPH3A_PCR);

	if ((busyaf & AF_BUSYAF) == AF_BUSYAF) {
		DPRINTK_ISP_AF("AF_register_setup_ERROR : Engine Busy");
		DPRINTK_ISP_AF("\n Configuration cannot be done ");
		return -AF_ERR_ENGINE_BUSY;
	}

	/* Check IIR Coefficient and start Values */
	result = isp_af_check_iir();
	if (result < 0)
		return result;

	/* Check Paxel Values */
	result = isp_af_check_paxel();
	if (result < 0)
		return result;

	/* Check HMF Threshold Values */
	if (af_curr_cfg->hmf_config.threshold > AF_THRESHOLD_MAX) {
		DPRINTK_ISP_AF("Error : HMF Threshold is incorrect");
		return -AF_ERR_THRESHOLD;
	}

	/* Compute buffer size */
	buff_size = (af_curr_cfg->paxel_config.hz_cnt + 1) *
		(af_curr_cfg->paxel_config.vt_cnt + 1) * AF_PAXEL_SIZE;

	afstat.curr_cfg_buf_size = buff_size;
	/* Deallocate the previous buffers */
	if (afstat.stats_buf_size && buff_size > afstat.stats_buf_size) {
		isp_af_enable(0);
		for (i = 0; i < H3A_MAX_BUFF; i++) {
			ispmmu_kunmap(afstat.af_buff[i].ispmmu_addr);
			free_pages_exact((void *)afstat.af_buff[i].virt_addr,
					afstat.min_buf_size);
			afstat.af_buff[i].virt_addr = 0;
		}
		afstat.stats_buf_size = 0;
	}

	if (!afstat.af_buff[0].virt_addr) {
		afstat.stats_buf_size = buff_size;
		afstat.min_buf_size = PAGE_ALIGN(afstat.stats_buf_size);

		for (i = 0; i < H3A_MAX_BUFF; i++) {
			afstat.af_buff[i].virt_addr =
				(unsigned long)alloc_pages_exact(
					afstat.min_buf_size,
					GFP_KERNEL | GFP_DMA);
			if (afstat.af_buff[i].virt_addr == 0) {
				printk(KERN_ERR "Can't acquire memory for "
				       "buffer[%d]\n", i);
				return -ENOMEM;
			}
			afstat.af_buff[i].phy_addr = dma_map_single(NULL,
					(void *)afstat.af_buff[i].virt_addr,
					afstat.min_buf_size,
					DMA_FROM_DEVICE);
			afstat.af_buff[i].addr_align =
				afstat.af_buff[i].virt_addr;
			while ((afstat.af_buff[i].addr_align & 0xFFFFFFC0) !=
			       afstat.af_buff[i].addr_align)
				afstat.af_buff[i].addr_align++;
			afstat.af_buff[i].ispmmu_addr =
				ispmmu_kmap(afstat.af_buff[i].phy_addr,
					    afstat.min_buf_size);
		}
		isp_af_unlock_buffers();
		isp_af_link_buffers();

		/* First active buffer */
		if (active_buff == NULL)
			active_buff = &afstat.af_buff[0];
		isp_af_set_address(active_buff->ispmmu_addr);
	}

	result = isp_af_register_setup(af_dev_configptr);
	if (result < 0)
		return result;
	af_dev_configptr->size_paxel = buff_size;
	atomic_inc(&afstat.config_counter);
	afstat.initialized = 1;
	afstat.frame_count = 1;
	active_buff->frame_num = 1;
	/* Set configuration flag to indicate HW setup done */
	if (af_curr_cfg->af_config)
		isp_af_enable(1);
	else
		isp_af_enable(0);

	/* Success */
	return 0;
}
Exemplo n.º 16
0
Arquivo: mmu.c Projeto: 0x00evil/linux
/* Free the HW pgd, one page at a time */
static void kvm_free_hwpgd(void *hwpgd)
{
	free_pages_exact(hwpgd, kvm_get_hwpgd_size());
}
Exemplo n.º 17
0
void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
			  dma_addr_t dma_handle)
{
	free_pages_exact((void *)dma_handle, size);
}
Exemplo n.º 18
0
static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index,
				  void (*callback)(struct virtqueue *vq),
				  const char *name)
{
	struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
	struct virtio_mmio_vq_info *info;
	struct virtqueue *vq;
	unsigned long flags, size;
	int err;

	/* Select the queue we're interested in */
	writel(index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL);

	/* Queue shouldn't already be set up. */
	if (readl(vm_dev->base + VIRTIO_MMIO_QUEUE_PFN)) {
		err = -ENOENT;
		goto error_available;
	}

	/* Allocate and fill out our active queue description */
	info = kmalloc(sizeof(*info), GFP_KERNEL);
	if (!info) {
		err = -ENOMEM;
		goto error_kmalloc;
	}
	info->queue_index = index;

	/* Allocate pages for the queue - start with a queue as big as
	 * possible (limited by maximum size allowed by device), drop down
	 * to a minimal size, just big enough to fit descriptor table
	 * and two rings (which makes it "alignment_size * 2")
	 */
	info->num = readl(vm_dev->base + VIRTIO_MMIO_QUEUE_NUM_MAX);
	while (1) {
		size = PAGE_ALIGN(vring_size(info->num,
				VIRTIO_MMIO_VRING_ALIGN));
		/* Already smallest possible allocation? */
		if (size <= VIRTIO_MMIO_VRING_ALIGN * 2) {
			err = -ENOMEM;
			goto error_alloc_pages;
		}

		info->queue = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
		if (info->queue)
			break;

		info->num /= 2;
	}

	/* Activate the queue */
	writel(info->num, vm_dev->base + VIRTIO_MMIO_QUEUE_NUM);
	writel(VIRTIO_MMIO_VRING_ALIGN,
			vm_dev->base + VIRTIO_MMIO_QUEUE_ALIGN);
	writel(virt_to_phys(info->queue) >> PAGE_SHIFT,
			vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);

	/* Create the vring */
	vq = vring_new_virtqueue(info->num, VIRTIO_MMIO_VRING_ALIGN, vdev,
				 true, info->queue, vm_notify, callback, name);
	if (!vq) {
		err = -ENOMEM;
		goto error_new_virtqueue;
	}

	vq->priv = info;
	info->vq = vq;

	spin_lock_irqsave(&vm_dev->lock, flags);
	list_add(&info->node, &vm_dev->virtqueues);
	spin_unlock_irqrestore(&vm_dev->lock, flags);

	return vq;

error_new_virtqueue:
	writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);
	free_pages_exact(info->queue, size);
error_alloc_pages:
	kfree(info);
error_kmalloc:
error_available:
	return ERR_PTR(err);
}
Exemplo n.º 19
0
void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
			  dma_addr_t dma_handle)
{
	free_pages_exact((void *)plat_dma_addr_to_kernel(dev, dma_handle),
			 size);
}