Пример #1
0
int __ref profile_init(void)
{
	int buffer_bytes;
	if (!prof_on)
		return 0;

	/* only text is profiled */
	prof_len = (_etext - _stext) >> prof_shift;
	buffer_bytes = prof_len*sizeof(atomic_t);

	if (!alloc_cpumask_var(&prof_cpu_mask, GFP_KERNEL))
		return -ENOMEM;

	cpumask_copy(prof_cpu_mask, cpu_possible_mask);

	prof_buffer = kzalloc(buffer_bytes, GFP_KERNEL|__GFP_NOWARN);
	if (prof_buffer)
		return 0;

	prof_buffer = alloc_pages_exact(buffer_bytes,
					GFP_KERNEL|__GFP_ZERO|__GFP_NOWARN);
	if (prof_buffer)
		return 0;

	prof_buffer = vzalloc(buffer_bytes);
	if (prof_buffer)
		return 0;

	free_cpumask_var(prof_cpu_mask);
	return -ENOMEM;
}
Пример #2
0
static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
				    struct io_pgtable_cfg *cfg)
{
	struct device *dev = cfg->iommu_dev;
	dma_addr_t dma;
	void *pages = alloc_pages_exact(size, gfp | __GFP_ZERO);

	if (!pages)
		return NULL;

	if (!selftest_running) {
		dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE);
		if (dma_mapping_error(dev, dma))
			goto out_free;
		/*
		 * We depend on the IOMMU being able to work with any physical
		 * address directly, so if the DMA layer suggests otherwise by
		 * translating or truncating them, that bodes very badly...
		 */
		if (dma != virt_to_phys(pages))
			goto out_unmap;
	}

	return pages;

out_unmap:
	dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n");
	dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
out_free:
	free_pages_exact(pages, size);
	return NULL;
}
Пример #3
0
/*
 * Alloc "coherent" memory, which for OpenRISC means simply uncached.
 *
 * This function effectively just calls __get_free_pages, sets the
 * cache-inhibit bit on those pages, and makes sure that the pages are
 * flushed out of the cache before they are used.
 *
 * If the NON_CONSISTENT attribute is set, then this function just
 * returns "normal", cachable memory.
 *
 * There are additional flags WEAK_ORDERING and WRITE_COMBINE to take
 * into consideration here, too.  All current known implementations of
 * the OR1K support only strongly ordered memory accesses, so that flag
 * is being ignored for now; uncached but write-combined memory is a
 * missing feature of the OR1K.
 */
void *
arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
		gfp_t gfp, unsigned long attrs)
{
	unsigned long va;
	void *page;
	struct mm_walk walk = {
		.pte_entry = page_set_nocache,
		.mm = &init_mm
	};

	page = alloc_pages_exact(size, gfp);
	if (!page)
		return NULL;

	/* This gives us the real physical address of the first page. */
	*dma_handle = __pa(page);

	va = (unsigned long)page;

	if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0) {
		/*
		 * We need to iterate through the pages, clearing the dcache for
		 * them and setting the cache-inhibit bit.
		 */
		if (walk_page_range(va, va + size, &walk)) {
			free_pages_exact(page, size);
			return NULL;
		}
	}

	return (void *)va;
}
Пример #4
0
void *or1k_dma_alloc_coherent(struct device *dev, size_t size,
			      dma_addr_t *dma_handle, gfp_t gfp)
{
	unsigned long va;
	void *page;
	struct mm_walk walk = {
		.pte_entry = page_set_nocache,
		.mm = &init_mm
	};

	page = alloc_pages_exact(size, gfp);
	if (!page)
		return NULL;

	
	*dma_handle = __pa(page);

	va = (unsigned long)page;

	if (walk_page_range(va, va + size, &walk)) {
		free_pages_exact(page, size);
		return NULL;
	}

	return (void *)va;
}
Пример #5
0
static void *
repl_alloc_pages_exact(size_t size, gfp_t gfp_mask)
{
    void *ret_val;
    ret_val = alloc_pages_exact(size, gfp_mask);
    
    if (!ZERO_OR_NULL_PTR(ret_val))
        klc_add_alloc(ret_val, size, stack_depth);

    return ret_val;
}
Пример #6
0
/*
 * Helpers for Coherent DMA API.
 */
void *dma_alloc_noncoherent(struct device *dev, size_t size,
			    dma_addr_t *dma_handle, gfp_t gfp)
{
	void *paddr;

	/* This is linear addr (0x8000_0000 based) */
	paddr = alloc_pages_exact(size, gfp);
	if (!paddr)
		return NULL;

	/* This is bus address, platform dependent */
	*dma_handle = (dma_addr_t)paddr;

	return paddr;
}
void *io_pgtable_alloc_pages_exact(struct io_pgtable_cfg *cfg, void *cookie,
				   size_t size, gfp_t gfp_mask)
{
	void *ret;

	if (cfg->tlb->alloc_pages_exact)
		ret = cfg->tlb->alloc_pages_exact(cookie, size, gfp_mask);
	else
		ret = alloc_pages_exact(size, gfp_mask);

	if (likely(ret))
		atomic_add(1 << get_order(size), &pages_allocated);

	return ret;
}
Пример #8
0
void *dma_alloc_coherent(struct device *dev, size_t size,
			 dma_addr_t *dma_handle, gfp_t gfp)
{
	void *paddr, *kvaddr;

	/*
	 * IOC relies on all data (even coherent DMA data) being in cache
	 * Thus allocate normal cached memory
	 *
	 * The gains with IOC are two pronged:
	 *   -For streaming data, elides needs for cache maintenance, saving
	 *    cycles in flush code, and bus bandwidth as all the lines of a
	 *    buffer need to be flushed out to memory
	 *   -For coherent data, Read/Write to buffers terminate early in cache
	 *   (vs. always going to memory - thus are faster)
	 */
	if (is_isa_arcv2() && ioc_exists)
		return dma_alloc_noncoherent(dev, size, dma_handle, gfp);

	/* This is linear addr (0x8000_0000 based) */
	paddr = alloc_pages_exact(size, gfp);
	if (!paddr)
		return NULL;

	/* This is kernel Virtual address (0x7000_0000 based) */
	kvaddr = ioremap_nocache((unsigned long)paddr, size);
	if (kvaddr == NULL)
		return NULL;

	/* This is bus address, platform dependent */
	*dma_handle = (dma_addr_t)paddr;

	/*
	 * Evict any existing L1 and/or L2 lines for the backing page
	 * in case it was used earlier as a normal "cached" page.
	 * Yeah this bit us - STAR 9000898266
	 *
	 * Although core does call flush_cache_vmap(), it gets kvaddr hence
	 * can't be used to efficiently flush L1 and/or L2 which need paddr
	 * Currently flush_cache_vmap nukes the L1 cache completely which
	 * will be optimized as a separate commit
	 */
	dma_cache_wback_inv((unsigned long)paddr, size);

	return kvaddr;
}
Пример #9
0
void *dma_alloc_coherent(struct device *dev, size_t size,
			 dma_addr_t *dma_handle, gfp_t gfp)
{
	void *paddr, *kvaddr;

	/* This is linear addr (0x8000_0000 based) */
	paddr = alloc_pages_exact(size, gfp);
	if (!paddr)
		return NULL;

	/* This is kernel Virtual address (0x7000_0000 based) */
	kvaddr = ioremap_nocache((unsigned long)paddr, size);
	if (kvaddr != NULL)
		memset(kvaddr, 0, size);

	/* This is bus address, platform dependent */
	*dma_handle = plat_kernel_addr_to_dma(dev, paddr);

	return kvaddr;
}
Пример #10
0
/*! 위 __setup("profile=", profile_setup); 을 통해 profile_setup 먼저 수행 됨 */
int __ref profile_init(void)
{
	int buffer_bytes;
	/*! profile_setup에서 prof_on 값 set  */
	if (!prof_on)
		return 0;

	/* only text is profiled */
	prof_len = (_etext - _stext) >> prof_shift;
	buffer_bytes = prof_len*sizeof(atomic_t);

	/*! alloc_cpumask_var : 무조건 true. */
	if (!alloc_cpumask_var(&prof_cpu_mask, GFP_KERNEL))
		return -ENOMEM;

	/*! prof_cpu_mask 에 cpu_possible_mask를 bitmap copy  */
	cpumask_copy(prof_cpu_mask, cpu_possible_mask);

	/*! prof_buffer에 memory alloc  */
	prof_buffer = kzalloc(buffer_bytes, GFP_KERNEL|__GFP_NOWARN);
	if (prof_buffer)
		return 0;

	/*! alloc_pages_exact - 물리적으로 연속적인 page 할당  */
	prof_buffer = alloc_pages_exact(buffer_bytes,
					GFP_KERNEL|__GFP_ZERO|__GFP_NOWARN);

	/*! 성공 시 리턴  */
	if (prof_buffer)
		return 0;

	/*! vzalloc 가상으로 연속적인 memory alloc 후 0으로 초기화  */
	prof_buffer = vzalloc(buffer_bytes);
	/*! 성공 시 여기서 정상적으로 0 리턴  */
	if (prof_buffer)
		return 0;

	/*! Do Nothing. 에러 리턴 */
	free_cpumask_var(prof_cpu_mask);
	return -ENOMEM;
}
Пример #11
0
static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index,
				  void (*callback)(struct virtqueue *vq),
				  const char *name)
{
	struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
	struct virtio_mmio_vq_info *info;
	struct virtqueue *vq;
	unsigned long flags, size;
	int err;

	/* Select the queue we're interested in */
	writel(index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL);

	/* Queue shouldn't already be set up. */
	if (readl(vm_dev->base + VIRTIO_MMIO_QUEUE_PFN)) {
		err = -ENOENT;
		goto error_available;
	}

	/* Allocate and fill out our active queue description */
	info = kmalloc(sizeof(*info), GFP_KERNEL);
	if (!info) {
		err = -ENOMEM;
		goto error_kmalloc;
	}
	info->queue_index = index;

	/* Allocate pages for the queue - start with a queue as big as
	 * possible (limited by maximum size allowed by device), drop down
	 * to a minimal size, just big enough to fit descriptor table
	 * and two rings (which makes it "alignment_size * 2")
	 */
	info->num = readl(vm_dev->base + VIRTIO_MMIO_QUEUE_NUM_MAX);
	while (1) {
		size = PAGE_ALIGN(vring_size(info->num,
				VIRTIO_MMIO_VRING_ALIGN));
		/* Already smallest possible allocation? */
		if (size <= VIRTIO_MMIO_VRING_ALIGN * 2) {
			err = -ENOMEM;
			goto error_alloc_pages;
		}

		info->queue = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
		if (info->queue)
			break;

		info->num /= 2;
	}

	/* Activate the queue */
	writel(info->num, vm_dev->base + VIRTIO_MMIO_QUEUE_NUM);
	writel(VIRTIO_MMIO_VRING_ALIGN,
			vm_dev->base + VIRTIO_MMIO_QUEUE_ALIGN);
	writel(virt_to_phys(info->queue) >> PAGE_SHIFT,
			vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);

	/* Create the vring */
	vq = vring_new_virtqueue(info->num, VIRTIO_MMIO_VRING_ALIGN, vdev,
				 true, info->queue, vm_notify, callback, name);
	if (!vq) {
		err = -ENOMEM;
		goto error_new_virtqueue;
	}

	vq->priv = info;
	info->vq = vq;

	spin_lock_irqsave(&vm_dev->lock, flags);
	list_add(&info->node, &vm_dev->virtqueues);
	spin_unlock_irqrestore(&vm_dev->lock, flags);

	return vq;

error_new_virtqueue:
	writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);
	free_pages_exact(info->queue, size);
error_alloc_pages:
	kfree(info);
error_kmalloc:
error_available:
	return ERR_PTR(err);
}
Пример #12
0
/* Allocate the HW PGD, making sure that each page gets its own refcount */
static void *kvm_alloc_hwpgd(void)
{
	unsigned int size = kvm_get_hwpgd_size();

	return alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
}
static int __init ion_dummy_init(void)
{
	int i, err;

	idev = ion_device_create(NULL);
	heaps = kzalloc(sizeof(struct ion_heap *) * dummy_ion_pdata.nr,
			GFP_KERNEL);
	if (!heaps)
		return PTR_ERR(heaps);


	/* Allocate a dummy carveout heap */
	carveout_ptr = alloc_pages_exact(
				dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size,
				GFP_KERNEL);
	if (carveout_ptr)
		dummy_heaps[ION_HEAP_TYPE_CARVEOUT].base =
						virt_to_phys(carveout_ptr);
	else
		pr_err("ion_dummy: Could not allocate carveout\n");

	/* Allocate a dummy chunk heap */
	chunk_ptr = alloc_pages_exact(
				dummy_heaps[ION_HEAP_TYPE_CHUNK].size,
				GFP_KERNEL);
	if (chunk_ptr)
		dummy_heaps[ION_HEAP_TYPE_CHUNK].base = virt_to_phys(chunk_ptr);
	else
		pr_err("ion_dummy: Could not allocate chunk\n");

	for (i = 0; i < dummy_ion_pdata.nr; i++) {
		struct ion_platform_heap *heap_data = &dummy_ion_pdata.heaps[i];

		if (heap_data->type == ION_HEAP_TYPE_CARVEOUT &&
							!heap_data->base)
			continue;

		if (heap_data->type == ION_HEAP_TYPE_CHUNK && !heap_data->base)
			continue;

		heaps[i] = ion_heap_create(heap_data);
		if (IS_ERR_OR_NULL(heaps[i])) {
			err = PTR_ERR(heaps[i]);
			goto err;
		}
		ion_device_add_heap(idev, heaps[i]);
	}
	return 0;
err:
	for (i = 0; i < dummy_ion_pdata.nr; i++) {
		if (heaps[i])
			ion_heap_destroy(heaps[i]);
	}
	kfree(heaps);

	if (carveout_ptr) {
		free_pages_exact(carveout_ptr,
				dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size);
		carveout_ptr = NULL;
	}
	if (chunk_ptr) {
		free_pages_exact(chunk_ptr,
				dummy_heaps[ION_HEAP_TYPE_CHUNK].size);
		chunk_ptr = NULL;
	}
	return err;
}
Пример #14
0
/* Function to perform hardware set up */
int isp_af_configure(struct af_configuration *afconfig)
{
	int result;
	int buff_size, i;
	unsigned int busyaf;
	struct af_configuration *af_curr_cfg = af_dev_configptr->config;

	if (NULL == afconfig) {
		printk(KERN_ERR "Null argument in configuration. \n");
		return -EINVAL;
	}

	memcpy(af_curr_cfg, afconfig, sizeof(struct af_configuration));
	/* Get the value of PCR register */
	busyaf = isp_reg_readl(OMAP3_ISP_IOMEM_H3A, ISPH3A_PCR);

	if ((busyaf & AF_BUSYAF) == AF_BUSYAF) {
		DPRINTK_ISP_AF("AF_register_setup_ERROR : Engine Busy");
		DPRINTK_ISP_AF("\n Configuration cannot be done ");
		return -AF_ERR_ENGINE_BUSY;
	}

	/* Check IIR Coefficient and start Values */
	result = isp_af_check_iir();
	if (result < 0)
		return result;

	/* Check Paxel Values */
	result = isp_af_check_paxel();
	if (result < 0)
		return result;

	/* Check HMF Threshold Values */
	if (af_curr_cfg->hmf_config.threshold > AF_THRESHOLD_MAX) {
		DPRINTK_ISP_AF("Error : HMF Threshold is incorrect");
		return -AF_ERR_THRESHOLD;
	}

	/* Compute buffer size */
	buff_size = (af_curr_cfg->paxel_config.hz_cnt + 1) *
		(af_curr_cfg->paxel_config.vt_cnt + 1) * AF_PAXEL_SIZE;

	afstat.curr_cfg_buf_size = buff_size;
	/* Deallocate the previous buffers */
	if (afstat.stats_buf_size && buff_size > afstat.stats_buf_size) {
		isp_af_enable(0);
		for (i = 0; i < H3A_MAX_BUFF; i++) {
			ispmmu_kunmap(afstat.af_buff[i].ispmmu_addr);
			free_pages_exact((void *)afstat.af_buff[i].virt_addr,
					afstat.min_buf_size);
			afstat.af_buff[i].virt_addr = 0;
		}
		afstat.stats_buf_size = 0;
	}

	if (!afstat.af_buff[0].virt_addr) {
		afstat.stats_buf_size = buff_size;
		afstat.min_buf_size = PAGE_ALIGN(afstat.stats_buf_size);

		for (i = 0; i < H3A_MAX_BUFF; i++) {
			afstat.af_buff[i].virt_addr =
				(unsigned long)alloc_pages_exact(
					afstat.min_buf_size,
					GFP_KERNEL | GFP_DMA);
			if (afstat.af_buff[i].virt_addr == 0) {
				printk(KERN_ERR "Can't acquire memory for "
				       "buffer[%d]\n", i);
				return -ENOMEM;
			}
			afstat.af_buff[i].phy_addr = dma_map_single(NULL,
					(void *)afstat.af_buff[i].virt_addr,
					afstat.min_buf_size,
					DMA_FROM_DEVICE);
			afstat.af_buff[i].addr_align =
				afstat.af_buff[i].virt_addr;
			while ((afstat.af_buff[i].addr_align & 0xFFFFFFC0) !=
			       afstat.af_buff[i].addr_align)
				afstat.af_buff[i].addr_align++;
			afstat.af_buff[i].ispmmu_addr =
				ispmmu_kmap(afstat.af_buff[i].phy_addr,
					    afstat.min_buf_size);
		}
		isp_af_unlock_buffers();
		isp_af_link_buffers();

		/* First active buffer */
		if (active_buff == NULL)
			active_buff = &afstat.af_buff[0];
		isp_af_set_address(active_buff->ispmmu_addr);
	}

	result = isp_af_register_setup(af_dev_configptr);
	if (result < 0)
		return result;
	af_dev_configptr->size_paxel = buff_size;
	atomic_inc(&afstat.config_counter);
	afstat.initialized = 1;
	afstat.frame_count = 1;
	active_buff->frame_num = 1;
	/* Set configuration flag to indicate HW setup done */
	if (af_curr_cfg->af_config)
		isp_af_enable(1);
	else
		isp_af_enable(0);

	/* Success */
	return 0;
}