static int __devinit profile_cpu_callback(struct notifier_block *info,
					unsigned long action, void *__cpu)
{
	int node, cpu = (unsigned long)__cpu;
	struct page *page;

	switch (action) {
	case CPU_UP_PREPARE:
	case CPU_UP_PREPARE_FROZEN:
		node = cpu_to_node(cpu);
		per_cpu(cpu_profile_flip, cpu) = 0;
		if (!per_cpu(cpu_profile_hits, cpu)[1]) {
			page = alloc_pages_node(node,
					GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
					0);
			if (!page)
				return NOTIFY_BAD;
			per_cpu(cpu_profile_hits, cpu)[1] = page_address(page);
		}
		if (!per_cpu(cpu_profile_hits, cpu)[0]) {
			page = alloc_pages_node(node,
					GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
					0);
			if (!page)
				goto out_free;
			per_cpu(cpu_profile_hits, cpu)[0] = page_address(page);
		}
		break;
	out_free:
		page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]);
		per_cpu(cpu_profile_hits, cpu)[1] = NULL;
		__free_page(page);
		return NOTIFY_BAD;
	case CPU_ONLINE:
	case CPU_ONLINE_FROZEN:
		cpu_set(cpu, prof_cpu_mask);
		break;
	case CPU_UP_CANCELED:
	case CPU_UP_CANCELED_FROZEN:
	case CPU_DEAD:
	case CPU_DEAD_FROZEN:
		cpu_clear(cpu, prof_cpu_mask);
		if (per_cpu(cpu_profile_hits, cpu)[0]) {
			page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]);
			per_cpu(cpu_profile_hits, cpu)[0] = NULL;
			__free_page(page);
		}
		if (per_cpu(cpu_profile_hits, cpu)[1]) {
			page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]);
			per_cpu(cpu_profile_hits, cpu)[1] = NULL;
			__free_page(page);
		}
		break;
	}
	return NOTIFY_OK;
}
static int setup_intr_remapping(struct intel_iommu *iommu, int mode)
{
	struct ir_table *ir_table;
	struct page *pages;

	ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table),
					     GFP_ATOMIC);

	if (!iommu->ir_table)
		return -ENOMEM;

	pages = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO,
				 INTR_REMAP_PAGE_ORDER);

	if (!pages) {
		printk(KERN_ERR "failed to allocate pages of order %d\n",
		       INTR_REMAP_PAGE_ORDER);
		kfree(iommu->ir_table);
		return -ENOMEM;
	}

	ir_table->base = page_address(pages);

	iommu_set_intr_remapping(iommu, mode);
	return 0;
}
/**
 * pcpu_alloc_pages - allocates pages for @chunk
 * @chunk: target chunk
 * @pages: array to put the allocated pages into, indexed by pcpu_page_idx()
 * @populated: populated bitmap
 * @page_start: page index of the first page to be allocated
 * @page_end: page index of the last page to be allocated + 1
 *
 * Allocate pages [@page_start,@page_end) into @pages for all units.
 * The allocation is for @chunk.  Percpu core doesn't care about the
 * content of @pages and will pass it verbatim to pcpu_map_pages().
 */
static int pcpu_alloc_pages(struct pcpu_chunk *chunk,
			    struct page **pages, unsigned long *populated,
			    int page_start, int page_end)
{
	const gfp_t gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD;
	unsigned int cpu;
	int nid;
	int i;

	for_each_possible_cpu(cpu) {
		for (i = page_start; i < page_end; i++) {
			struct page **pagep = &pages[pcpu_page_idx(cpu, i)];

			//*pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0);
			nid = cpu_to_node(cpu);

			if((nid == -1) || !(node_zonelist(nid, GFP_KERNEL)->_zonerefs->zone))
				nid = numa_node_id();

			*pagep = alloc_pages_node(nid, gfp, 0);
			
			if (!*pagep) {
				pcpu_free_pages(chunk, pages, populated,
						page_start, page_end);
				return -ENOMEM;
			}
		}
	}
	return 0;
}
Beispiel #4
0
/*
 * Allocate an RPC server's buffer space.
 * We allocate pages and place them in rq_argpages.
 */
static int
svc_init_buffer(struct svc_rqst *rqstp, unsigned int size, int node)
{
	unsigned int pages, arghi;

	/* bc_xprt uses fore channel allocated buffers */
	if (svc_is_backchannel(rqstp))
		return 1;

	pages = size / PAGE_SIZE + 1; /* extra page as we hold both request and reply.
				       * We assume one is at most one page
				       */
	arghi = 0;
	WARN_ON_ONCE(pages > RPCSVC_MAXPAGES);
	if (pages > RPCSVC_MAXPAGES)
		pages = RPCSVC_MAXPAGES;
	while (pages) {
		struct page *p = alloc_pages_node(node, GFP_KERNEL, 0);
		if (!p)
			break;
		rqstp->rq_pages[arghi++] = p;
		pages--;
	}
	return pages == 0;
}
Beispiel #5
0
void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node)
{
	struct page *shadow;
	int pages;
	int i;

	pages = 1 << order;

	/*
	 * With kmemcheck enabled, we need to allocate a memory area for the
	 * shadow bits as well.
	 */
	shadow = alloc_pages_node(node, flags | __GFP_NOTRACK, order);
	if (!shadow) {
		if (printk_ratelimit())
			printk(KERN_ERR "kmemcheck: failed to allocate "
				"shadow bitmap\n");
		return;
	}

	for(i = 0; i < pages; ++i)
		page[i].shadow = page_address(&shadow[i]);

	/*
	 * Mark it as non-present for the MMU so that our accesses to
	 * this memory will trigger a page fault and let us analyze
	 * the memory accesses.
	 */
	kmemcheck_hide_pages(page, pages);
}
Beispiel #6
0
void *dma_generic_alloc_coherent(struct device *dev, size_t size,
				 dma_addr_t *dma_addr, gfp_t flag,
				 struct dma_attrs *attrs)
{
	unsigned long dma_mask;
	struct page *page = NULL;
	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
	dma_addr_t addr;

	dma_mask = dma_alloc_coherent_mask(dev, flag);

	flag |= __GFP_ZERO;
again:
	if (!(flag & GFP_ATOMIC))
		page = dma_alloc_from_contiguous(dev, count, get_order(size));
	if (!page)
		page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
	if (!page)
		return NULL;

	addr = page_to_phys(page);
	if (addr + size > dma_mask) {
		__free_pages(page, get_order(size));

		if (dma_mask < DMA_BIT_MASK(32) && !(flag & GFP_DMA)) {
			flag = (flag & ~GFP_DMA32) | GFP_DMA;
			goto again;
		}

		return NULL;
	}

	*dma_addr = addr;
	return page_address(page);
}
Beispiel #7
0
/**
 * pcpu_alloc_pages - allocates pages for @chunk
 * @chunk: target chunk
 * @pages: array to put the allocated pages into, indexed by pcpu_page_idx()
 * @page_start: page index of the first page to be allocated
 * @page_end: page index of the last page to be allocated + 1
 *
 * Allocate pages [@page_start,@page_end) into @pages for all units.
 * The allocation is for @chunk.  Percpu core doesn't care about the
 * content of @pages and will pass it verbatim to pcpu_map_pages().
 */
static int pcpu_alloc_pages(struct pcpu_chunk *chunk,
			    struct page **pages, int page_start, int page_end)
{
	const gfp_t gfp = GFP_KERNEL | __GFP_HIGHMEM;
	unsigned int cpu, tcpu;
	int i;

	for_each_possible_cpu(cpu) {
		for (i = page_start; i < page_end; i++) {
			struct page **pagep = &pages[pcpu_page_idx(cpu, i)];

			*pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0);
			if (!*pagep)
				goto err;
		}
	}
	return 0;

err:
	while (--i >= page_start)
		__free_page(pages[pcpu_page_idx(cpu, i)]);

	for_each_possible_cpu(tcpu) {
		if (tcpu == cpu)
			break;
		for (i = page_start; i < page_end; i++)
			__free_page(pages[pcpu_page_idx(tcpu, i)]);
	}
	return -ENOMEM;
}
Beispiel #8
0
void *dma_direct_alloc_coherent(struct device *dev, size_t size,
				dma_addr_t *dma_handle, gfp_t flag)
{
	void *ret;
#ifdef CONFIG_NOT_COHERENT_CACHE
	ret = __dma_alloc_coherent(dev, size, dma_handle, flag);
	if (ret == NULL)
		return NULL;
	*dma_handle += get_dma_offset(dev);
	return ret;
#else
	struct page *page;
	int node = dev_to_node(dev);

	/* ignore region specifiers */
	flag  &= ~(__GFP_HIGHMEM);

	page = alloc_pages_node(node, flag, get_order(size));
	if (page == NULL)
		return NULL;
	ret = page_address(page);
	memset(ret, 0, size);
	*dma_handle = virt_to_abs(ret) + get_dma_offset(dev);

	return ret;
#endif
}
Beispiel #9
0
/*! 2016-04-02 study -ing */
static int pcpu_alloc_pages(struct pcpu_chunk *chunk,
			    struct page **pages, unsigned long *populated,
			    int page_start, int page_end)
{
	const gfp_t gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD;
	unsigned int cpu;
	int i;

	for_each_possible_cpu(cpu) {
		for (i = page_start; i < page_end; i++) {
			/*! pages에서 pcpu page 의 index에 해당하는 위치를 찾는다.
			 *  (pages 는 포인터 배열)
			 */
			struct page **pagep = &pages[pcpu_page_idx(cpu, i)];

			/*! 해당 위치에 pages node alloc */
			*pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0);
			if (!*pagep) {
				/*! alloc 실패 시 free  */
				pcpu_free_pages(chunk, pages, populated,
						page_start, page_end);
				return -ENOMEM;
			}
		}
	}
	return 0;
}
Beispiel #10
0
static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
						  int node)
{
	struct page *page = alloc_pages_node(node, THREADINFO_GFP_ACCOUNTED,
					     THREAD_SIZE_ORDER);

	return page ? page_address(page) : NULL;
}
static int __init create_hash_tables(void)
{
	int cpu;

	for_each_online_cpu(cpu) {
		int node = cpu_to_node(cpu);
		struct page *page;

		page = alloc_pages_node(node,
				GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
				0);
		if (!page)
			goto out_cleanup;
		per_cpu(cpu_profile_hits, cpu)[1]
				= (struct profile_hit *)page_address(page);
		page = alloc_pages_node(node,
				GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
				0);
		if (!page)
			goto out_cleanup;
		per_cpu(cpu_profile_hits, cpu)[0]
				= (struct profile_hit *)page_address(page);
	}
	return 0;
out_cleanup:
	immediate_set_early(prof_on, 0);
	smp_mb();
	on_each_cpu(profile_nop, NULL, 0, 1);
	for_each_online_cpu(cpu) {
		struct page *page;

		if (per_cpu(cpu_profile_hits, cpu)[0]) {
			page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]);
			per_cpu(cpu_profile_hits, cpu)[0] = NULL;
			__free_page(page);
		}
		if (per_cpu(cpu_profile_hits, cpu)[1]) {
			page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]);
			per_cpu(cpu_profile_hits, cpu)[1] = NULL;
			__free_page(page);
		}
	}
	return -1;
}
Beispiel #12
0
struct page *homecache_alloc_pages_node(int nid, gfp_t gfp_mask,
					unsigned int order, int home)
{
	struct page *page;
	BUG_ON(gfp_mask & __GFP_HIGHMEM);   /* must be lowmem */
	page = alloc_pages_node(nid, gfp_mask, order);
	if (page)
		homecache_change_page_home(page, order, home);
	return page;
}
struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node)
{
#ifdef CONFIG_DEBUG_STACK_USAGE
	gfp_t mask = GFP_KERNEL | __GFP_ZERO;
#else
	gfp_t mask = GFP_KERNEL;
#endif
	struct page *page = alloc_pages_node(node, mask, THREAD_SIZE_ORDER);

	return page ? page_address(page) : NULL;
}
Beispiel #14
0
static void *perf_mmap_alloc_page(int cpu)
{
	struct page *page;
	int node;

	node = (cpu == -1) ? cpu : cpu_to_node(cpu);
	page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
	if (!page)
		return NULL;

	return page_address(page);
}
Beispiel #15
0
void
sn_init_irq_desc(void) {
	int i;
	irq_desc_t *base_desc = _irq_desc, *p;

	for (i=0; i < NR_CPUS; i++) {
		p =  page_address(alloc_pages_node(local_cnodeid(), GFP_KERNEL,
			get_order(sizeof(struct irq_desc) * NR_IRQS) ) );
		ASSERT(p);
		memcpy(p, base_desc, sizeof(struct irq_desc) * NR_IRQS);
		_sn_irq_desc[i] = p;
	}
}
Beispiel #16
0
void * __meminit vmemmap_alloc_block(unsigned long size, int node)
{
	/* If the main allocator is up use that, fallback to bootmem. */
	if (slab_is_available()) {
		struct page *page = alloc_pages_node(node,
				GFP_KERNEL | __GFP_ZERO, get_order(size));
		if (page)
			return page_address(page);
		return NULL;
	} else
		return __earlyonly_bootmem_alloc(node, size, size,
				__pa(MAX_DMA_ADDRESS));
}
Beispiel #17
0
static void *__dma_alloc(struct device *dev, size_t size,
				 dma_addr_t *dma_addr, gfp_t flag,
				 struct dma_attrs *attrs, bool is_coherent)
{
	unsigned long dma_mask;
	struct page *page;
	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
	dma_addr_t addr;

	dma_mask = dma_alloc_coherent_mask(dev, flag);

	flag |= __GFP_ZERO;
again:
	page = NULL;
	/* CMA can be used only in the context which permits sleeping */
	if (flag & __GFP_WAIT)
#ifdef CONFIG_CMA_EXPLICIT_USE
		if (dma_get_attr(DMA_ATTR_CMA, attrs)) {
#endif
			page = dma_alloc_from_contiguous(dev,
						count, get_order(size));
#ifdef CONFIG_CMA_EXPLICIT_USE
			if (!page)
				return NULL;
		}
#endif

	/* fallback */
	if (!page)
		page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
	if (!page)
		return NULL;

	addr = page_to_phys(page);
	if (addr + size > dma_mask) {
		__free_pages(page, get_order(size));

		if (dma_mask < DMA_BIT_MASK(32) && !(flag & GFP_DMA)) {
			flag = (flag & ~GFP_DMA32) | GFP_DMA;
			goto again;
		}

		return NULL;
	}

	if (is_coherent == false)
		__dma_set_pages(page, count, attrs);

	*dma_addr = addr;
	return page_address(page);
}
Beispiel #18
0
static unsigned long __get_free_pages_node(int nid, gfp_t gfp_mask, unsigned int order) {
  struct page *page;

#if 0  
  VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
#endif

  page = alloc_pages_node(nid, gfp_mask, order);
  
  if (!page)
    return 0;

  return (unsigned long) page_address(page);
}
Beispiel #19
0
/**
 * sn_dma_alloc_coherent - allocate memory for coherent DMA
 * @dev: device to allocate for
 * @size: size of the region
 * @dma_handle: DMA (bus) address
 * @flags: memory allocation flags
 *
 * dma_alloc_coherent() returns a pointer to a memory region suitable for
 * coherent DMA traffic to/from a PCI device.  On SN platforms, this means
 * that @dma_handle will have the %PCIIO_DMA_CMD flag set.
 *
 * This interface is usually used for "command" streams (e.g. the command
 * queue for a SCSI controller).  See Documentation/DMA-API.txt for
 * more information.
 */
void *sn_dma_alloc_coherent(struct device *dev, size_t size,
                dma_addr_t * dma_handle, gfp_t flags)
{
    void *cpuaddr;
    unsigned long phys_addr;
    int node;
    struct pci_dev *pdev = to_pci_dev(dev);
    struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);

    BUG_ON(dev->bus != &pci_bus_type);

    /*
     * Allocate the memory.
     */
    node = pcibus_to_node(pdev->bus);
    if (likely(node >=0)) {
        struct page *p = alloc_pages_node(node, flags, get_order(size));

        if (likely(p))
            cpuaddr = page_address(p);
        else
            return NULL;
    } else
        cpuaddr = (void *)__get_free_pages(flags, get_order(size));

    if (unlikely(!cpuaddr))
        return NULL;

    memset(cpuaddr, 0x0, size);

    /* physical addr. of the memory we just got */
    phys_addr = __pa(cpuaddr);

    /*
     * 64 bit address translations should never fail.
     * 32 bit translations can fail if there are insufficient mapping
     * resources.
     */

    *dma_handle = provider->dma_map_consistent(pdev, phys_addr, size,
                           SN_DMA_ADDR_PHYS);
    if (!*dma_handle) {
        printk(KERN_ERR "%s: out of ATEs\n", __func__);
        free_pages((unsigned long)cpuaddr, get_order(size));
        return NULL;
    }

    return cpuaddr;
}
Beispiel #20
0
struct thread_info *alloc_thread_info_node(struct task_struct *task, int node)
{
	struct page *page;
	gfp_t flags = GFP_KERNEL;

#ifdef CONFIG_DEBUG_STACK_USAGE
	flags |= __GFP_ZERO;
#endif

	page = alloc_pages_node(node, flags, THREAD_SIZE_ORDER);
	if (!page)
		return NULL;

	return (struct thread_info *)page_address(page);
}
Beispiel #21
0
struct page *__alloc_pages(int order, int type)
{
	const struct list_head *head = &node_order;
	struct list_head *ptr = node_type[type];

	for (; ptr != head; ptr = ptr->next) {
		struct memory_node *node = LIST_ENTRY(ptr, struct memory_node,
					link);
		struct page *pages = alloc_pages_node(order, node);

		if (pages)
			return pages;
	}

	return 0;
}
Beispiel #22
0
struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
	int page_order = get_order(size);
	struct page *page = NULL;
	u64 phys_mask;

	if (attrs & DMA_ATTR_NO_WARN)
		gfp |= __GFP_NOWARN;

	/* we always manually zero the memory once we are done: */
	gfp &= ~__GFP_ZERO;
	gfp |= __dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
			&phys_mask);
again:
	/* CMA can be used only in the context which permits sleeping */
	if (gfpflags_allow_blocking(gfp)) {
		page = dma_alloc_from_contiguous(dev, count, page_order,
						 gfp & __GFP_NOWARN);
		if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
			dma_release_from_contiguous(dev, page, count);
			page = NULL;
		}
	}
	if (!page)
		page = alloc_pages_node(dev_to_node(dev), gfp, page_order);

	if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
		__free_pages(page, page_order);
		page = NULL;

		if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
		    phys_mask < DMA_BIT_MASK(64) &&
		    !(gfp & (GFP_DMA32 | GFP_DMA))) {
			gfp |= GFP_DMA32;
			goto again;
		}

		if (IS_ENABLED(CONFIG_ZONE_DMA) && !(gfp & GFP_DMA)) {
			gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
			goto again;
		}
	}

	return page;
}
Beispiel #23
0
/*
 * Allocate an array of struct blk_zone to get nr_zones zone information.
 * The allocated array may be smaller than nr_zones.
 */
static struct blk_zone *blk_alloc_zones(int node, unsigned int *nr_zones)
{
	size_t size = *nr_zones * sizeof(struct blk_zone);
	struct page *page;
	int order;

	for (order = get_order(size); order >= 0; order--) {
		page = alloc_pages_node(node, GFP_NOIO | __GFP_ZERO, order);
		if (page) {
			*nr_zones = min_t(unsigned int, *nr_zones,
				(PAGE_SIZE << order) / sizeof(struct blk_zone));
			return page_address(page);
		}
	}

	return NULL;
}
struct page *alloc_migrate_target(struct page *page, unsigned long nid,
				  int **resultp)
{
	/*
	 * hugeTLB: allocate a destination page from a nearest neighbor node,
	 * accordance with memory policy of the user process if possible. For
	 * now as a simple work-around, we use the next node for destination.
	 * Normal page: use prefer mempolicy for destination if called by
	 * hotplug, use default mempolicy for destination if called by cma.
	 */
	if (PageHuge(page))
		return alloc_huge_page_node(page_hstate(compound_head(page)),
					    next_node_in(page_to_nid(page),
							 node_online_map));
	else
		return alloc_pages_node(nid, GFP_HIGHUSER_MOVABLE, 0);
}
void init_espfix_ap(int cpu)
{
	unsigned int page;
	unsigned long addr;
	pud_t pud, *pud_p;
	pmd_t pmd, *pmd_p;
	pte_t pte, *pte_p;
	int n, node;
	void *stack_page;
	pteval_t ptemask;

	/* We only have to do this once... */
	if (likely(per_cpu(espfix_stack, cpu)))
		return;		/* Already initialized */

	addr = espfix_base_addr(cpu);
	page = cpu/ESPFIX_STACKS_PER_PAGE;

	/* Did another CPU already set this up? */
	stack_page = ACCESS_ONCE(espfix_pages[page]);
	if (likely(stack_page))
		goto done;

	mutex_lock(&espfix_init_mutex);

	/* Did we race on the lock? */
	stack_page = ACCESS_ONCE(espfix_pages[page]);
	if (stack_page)
		goto unlock_done;

	node = cpu_to_node(cpu);
	ptemask = __supported_pte_mask;

	pud_p = &espfix_pud_page[pud_index(addr)];
	pud = *pud_p;
	if (!pud_present(pud)) {
		if (cpu)
			pmd_p = page_address(alloc_pages_node(node, PGALLOC_GFP, 0));
		else
			pmd_p = espfix_pmd_page;
		pud = __pud(__pa(pmd_p) | (PGTABLE_PROT & ptemask));
		paravirt_alloc_pmd(&init_mm, __pa(pmd_p) >> PAGE_SHIFT);
		for (n = 0; n < ESPFIX_PUD_CLONES; n++)
			set_pud(&pud_p[n], pud);
	} else
Beispiel #26
0
/* Allocate DMA memory on node near device */
noinline static void *
dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order)
{
	struct page *page;
	int node;
#ifdef CONFIG_PCI
	if (dev->bus == &pci_bus_type)
		node = pcibus_to_node(to_pci_dev(dev)->bus);
	else
#endif
		node = numa_node_id();

	if (node < first_node(node_online_map))
		node = first_node(node_online_map);

	page = alloc_pages_node(node, gfp, order);
	return page ? page_address(page) : NULL;
}
Beispiel #27
0
/* Allocates a contiguous real buffer and creates mappings over it.
 * Returns the virtual address of the buffer and sets dma_handle
 * to the dma address (mapping) of the first page.
 */
void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
		dma_addr_t *dma_handle, unsigned long mask, gfp_t flag, int node)
{
	void *ret = NULL;
	dma_addr_t mapping;
	unsigned int order;
	unsigned int nio_pages, io_order;
	struct page *page;

	size = PAGE_ALIGN(size);
	order = get_order(size);

 	/*
	 * Client asked for way too much space.  This is checked later
	 * anyway.  It is easier to debug here for the drivers than in
	 * the tce tables.
	 */
	if (order >= IOMAP_MAX_ORDER) {
		printk("iommu_alloc_consistent size too large: 0x%lx\n", size);
		return NULL;
	}

	if (!tbl)
		return NULL;

	/* Alloc enough pages (and possibly more) */
	page = alloc_pages_node(node, flag, order);
	if (!page)
		return NULL;
	ret = page_address(page);
	memset(ret, 0, size);

	/* Set up tces to cover the allocated range */
	nio_pages = size >> IOMMU_PAGE_SHIFT;
	io_order = get_iommu_order(size);
	mapping = iommu_alloc(tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
			      mask >> IOMMU_PAGE_SHIFT, io_order);
	if (mapping == DMA_ERROR_CODE) {
		free_pages((unsigned long)ret, order);
		return NULL;
	}
	*dma_handle = mapping;
	return ret;
}
Beispiel #28
0
/*
 * Allocate a pasid table for @dev. It should be called in a
 * single-thread context.
 */
int intel_pasid_alloc_table(struct device *dev)
{
	struct device_domain_info *info;
	struct pasid_table *pasid_table;
	struct pasid_table_opaque data;
	struct page *pages;
	size_t size, count;
	int ret, order;

	info = dev->archdata.iommu;
	if (WARN_ON(!info || !dev_is_pci(dev) ||
		    !info->pasid_supported || info->pasid_table))
		return -EINVAL;

	/* DMA alias device already has a pasid table, use it: */
	data.pasid_table = &pasid_table;
	ret = pci_for_each_dma_alias(to_pci_dev(dev),
				     &get_alias_pasid_table, &data);
	if (ret)
		goto attach_out;

	pasid_table = kzalloc(sizeof(*pasid_table), GFP_ATOMIC);
	if (!pasid_table)
		return -ENOMEM;
	INIT_LIST_HEAD(&pasid_table->dev);

	size = sizeof(struct pasid_entry);
	count = min_t(int, pci_max_pasids(to_pci_dev(dev)), intel_pasid_max_id);
	order = get_order(size * count);
	pages = alloc_pages_node(info->iommu->node,
				 GFP_ATOMIC | __GFP_ZERO,
				 order);
	if (!pages)
		return -ENOMEM;

	pasid_table->table = page_address(pages);
	pasid_table->order = order;
	pasid_table->max_pasid = count;

attach_out:
	device_attach_pasid_table(info, pasid_table);

	return 0;
}
void * __meminit vmemmap_alloc_block(unsigned long size, int node)
{

    if (slab_is_available()) {
        struct page *page;

        if (node_state(node, N_HIGH_MEMORY))
            page = alloc_pages_node(node,
                                    GFP_KERNEL | __GFP_ZERO, get_order(size));
        else
            page = alloc_pages(GFP_KERNEL | __GFP_ZERO,
                               get_order(size));
        if (page)
            return page_address(page);
        return NULL;
    } else
        return __earlyonly_bootmem_alloc(node, size, size,
                                         __pa(MAX_DMA_ADDRESS));
}
Beispiel #30
0
void *dma_generic_alloc_coherent(struct device *dev, size_t size,
				 dma_addr_t *dma_addr, gfp_t flag,
				 struct dma_attrs *attrs)
{
	unsigned long dma_mask;
	struct page *page;
	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
	dma_addr_t addr;

	dma_mask = dma_alloc_coherent_mask(dev, flag);

	flag &= ~__GFP_ZERO;
again:
	page = NULL;
	/* CMA can be used only in the context which permits sleeping */
	if (flag & __GFP_WAIT) {
		page = dma_alloc_from_contiguous(dev, count, get_order(size));
		if (page && page_to_phys(page) + size > dma_mask) {
			dma_release_from_contiguous(dev, page, count);
			page = NULL;
		}
	}
	/* fallback */
	if (!page)
		page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
	if (!page)
		return NULL;

	addr = page_to_phys(page);
	if (addr + size > dma_mask) {
		__free_pages(page, get_order(size));

		if (dma_mask < DMA_BIT_MASK(32) && !(flag & GFP_DMA)) {
			flag = (flag & ~GFP_DMA32) | GFP_DMA;
			goto again;
		}

		return NULL;
	}
	memset(page_address(page), 0, size);
	*dma_addr = addr;
	return page_address(page);
}