static int time_alloc_pages(
	struct time_bench_record *rec, void *data)
{
	/* Important to set: __GFP_COMP for compound pages
	 */
	gfp_t gfp_mask = (GFP_ATOMIC | __GFP_COMP);
	struct page *my_page;
	int order = rec->step;
	int i;

	/* Drop WARN on failures, time_bench will invalidate test */
	gfp_mask |= __GFP_NOWARN;

	time_bench_start(rec);
	/** Loop to measure **/
	for (i = 0; i < rec->loops; i++) {
		my_page = alloc_pages(gfp_mask, order);
		if (unlikely(my_page == NULL))
			return 0;
		__free_pages(my_page, order);
	}
	time_bench_stop(rec, i);

	if (verbose) {
		time_bench_calc_stats(rec);
		pr_info("alloc_pages order:%d(%luB/x%d) %llu cycles"
			" per-%luB %llu cycles\n",
			order, PAGE_SIZE << order, 1 << order,
			rec->tsc_cycles, PAGE_SIZE,
			rec->tsc_cycles >> order);
	}

	return i;
}
Example #2
0
void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
{
	int i;
	struct efx_rx_buffer *rx_buf;

	EFX_LOG(rx_queue->efx, "shutting down RX queue %d\n", rx_queue->queue);

	efx_nic_fini_rx(rx_queue);

	/* Release RX buffers NB start at index 0 not current HW ptr */
	if (rx_queue->buffer) {
		for (i = 0; i <= EFX_RXQ_MASK; i++) {
			rx_buf = efx_rx_buffer(rx_queue, i);
			efx_fini_rx_buffer(rx_queue, rx_buf);
		}
	}

	/* For a page that is part-way through splitting into RX buffers */
	if (rx_queue->buf_page != NULL) {
		pci_unmap_page(rx_queue->efx->pci_dev, rx_queue->buf_dma_addr,
			       efx_rx_buf_size(rx_queue->efx),
			       PCI_DMA_FROMDEVICE);
		__free_pages(rx_queue->buf_page,
			     rx_queue->efx->rx_buffer_order);
		rx_queue->buf_page = NULL;
	}
}
/*
 * Free all frame-stati in session->frame_stati  and
 * reset the session so, that the frame-stati-part is
 * 'Unconfigured'
 * Claimed pages in the frame-stati are freed.
 *
 *  The session lock must be held, when calling this function.
 */
void free_page_stati(struct phys_mem_session* session) {


    if (session->frame_stati) {
        if (session->num_frame_stati) {
            size_t i;

            for (i = 0; i < session->num_frame_stati; i++) {
                struct page* p = session->frame_stati[i].page;
                if (p) {
                    session->frame_stati[i].page = NULL;

                    if (page_count(p)) {
                        printk(KERN_DEBUG "Session %llu: Freeing page #%lu @%lu with page_count %u\n", session->session_id, page_to_pfn(p), i, page_count(p));
                        __free_pages(p, 0);
                    } else {
                        printk(KERN_WARNING "Session %llu: NOT freeing page #%lu @%lu with page_count %u\n", session->session_id, page_to_pfn(p), i, page_count(p));
                    }
                }
            }
        }
        SESSION_FREE_FRAME_STATI(session->frame_stati);
    }
    session->num_frame_stati = 0;
    session->frame_stati = NULL;
}
Example #4
0
void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
{
	unsigned long flags;
	int i;
	spin_lock_irqsave(&rxq->lock, flags);
	INIT_LIST_HEAD(&rxq->rx_free);
	INIT_LIST_HEAD(&rxq->rx_used);
	/* Fill the rx_used queue with _all_ of the Rx buffers */
	for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
		/* In the reset function, these buffers may have been allocated
		 * to an SKB, so we need to unmap and free potential storage */
		if (rxq->pool[i].page != NULL) {
			pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
				PAGE_SIZE << priv->hw_params.rx_page_order,
				PCI_DMA_FROMDEVICE);
			priv->alloc_rxb_page--;
			__free_pages(rxq->pool[i].page,
				     priv->hw_params.rx_page_order);
			rxq->pool[i].page = NULL;
		}
		list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
	}

	/* Set us so that we have processed and used all buffers, but have
	 * not restocked the Rx queue with fresh buffers */
	rxq->read = rxq->write = 0;
	rxq->write_actual = 0;
	rxq->free_count = 0;
	spin_unlock_irqrestore(&rxq->lock, flags);
}
Example #5
0
static inline
void __brick_block_free(void *data, int order, int cline)
{
#ifdef CONFIG_MARS_DEBUG_MEM_STRONG
	struct mem_block_info *inf = _find_block_info(data, true);
	if (likely(inf)) {
		int inf_len = inf->inf_len;
		int inf_line = inf->inf_line;
		kfree(inf);
		if (unlikely(inf_len != (PAGE_SIZE << order))) {
			BRICK_ERR("line %d: address %p: bad freeing size %d (correct should be %d, previous line = %d)\n", cline, data, (int)(PAGE_SIZE << order), inf_len, inf_line);
			goto err;
		}
	} else {
		BRICK_ERR("line %d: trying to free non-existent address %p (order = %d)\n", cline, data, order);
		goto err;
	}
#endif
#ifdef USE_KERNEL_PAGES
	__free_pages(virt_to_page((unsigned long)data), order);
#else
	vfree(data);
#endif
#ifdef CONFIG_MARS_DEBUG_MEM_STRONG
 err:
#endif
#ifdef BRICK_DEBUG_MEM
	atomic_dec(&phys_block_alloc);
	atomic_dec(&raw_count[order]);
#endif
	atomic64_sub((PAGE_SIZE/1024) << order, &brick_global_block_used);
}
Example #6
0
File: init.c Project: Gaffey/linux
static void __init init_free_pfn_range(unsigned long start, unsigned long end)
{
	unsigned long pfn;
	struct page *page = pfn_to_page(start);

	for (pfn = start; pfn < end; ) {
		/* Optimize by freeing pages in large batches */
		int order = __ffs(pfn);
		int count, i;
		struct page *p;

		if (order >= MAX_ORDER)
			order = MAX_ORDER-1;
		count = 1 << order;
		while (pfn + count > end) {
			count >>= 1;
			--order;
		}
		for (p = page, i = 0; i < count; ++i, ++p) {
			__ClearPageReserved(p);
			/*
			 * Hacky direct set to avoid unnecessary
			 * lock take/release for EVERY page here.
			 */
			p->_count.counter = 0;
			p->_mapcount.counter = -1;
		}
		init_page_count(page);
		__free_pages(page, order);
		adjust_managed_page_count(page, count);

		page += count;
		pfn += count;
	}
}
Example #7
0
File: main.c Project: wangxiaoq/MEI
/*
 * Free all frame-stati in session->frame_stati  and
 * reset the session so, that the frame-stati-part is
 * 'Unconfigured'
 * Claimed pages in the frame-stati are freed.
 *
 *  The session lock must be held, when calling this function.
 */
void free_page_stati(struct phys_mem_session* session) {
    if (session->frame_stati) {
        if (session->num_frame_stati) {
            size_t i;

            for (i = 0; i < session->num_frame_stati; i++) {
                struct page* p = session->frame_stati[i].page;
                if (p) {
                    session->frame_stati[i].page = NULL;

                    // FIXME: Use a pointer to unclaim-method
                    if (session->frame_stati[i].actual_source == SOURCE_HOTPLUG_CLAIM) {
                      unclaim_pages_via_hotplug(p);
                    } else
                      if (page_count(p)) {
#if 0
                        printk(KERN_DEBUG "Session %llu: Freeing page 0x%08lx @%lu with page_count %u\n", session->session_id, page_to_pfn(p), i, page_count(p));
#endif
                        __free_pages(p, 0);
                    } else {
                        printk(KERN_WARNING "Session %llu: NOT freeing page 0x%08lx @%lu with page_count %u\n", session->session_id, page_to_pfn(p), i, page_count(p));
                    }
                }
            }
        }
        SESSION_FREE_FRAME_STATI(session->frame_stati);
    }
    session->num_frame_stati = 0;
    session->frame_stati = NULL;
}
Example #8
0
static void __exit logfs_exit(void)
{
	unregister_filesystem(&logfs_fs_type);
	logfs_destroy_inode_cache();
	logfs_compr_exit();
	__free_pages(emergency_page, 0);
}
Example #9
0
static int __init logfs_init(void)
{
	int ret;

	emergency_page = alloc_pages(GFP_KERNEL, 0);
	if (!emergency_page)
		return -ENOMEM;

	ret = logfs_compr_init();
	if (ret)
		goto out1;

	ret = logfs_init_inode_cache();
	if (ret)
		goto out2;

	ret = register_filesystem(&logfs_fs_type);
	if (!ret)
		return 0;
	logfs_destroy_inode_cache();
out2:
	logfs_compr_exit();
out1:
	__free_pages(emergency_page, 0);
	return ret;
}
Example #10
0
static void
mt7601u_rx_process_entry(struct mt7601u_dev *dev, struct mt7601u_dma_buf_rx *e)
{
	u32 seg_len, data_len = e->urb->actual_length;
	u8 *data = page_address(e->p);
	struct page *new_p = NULL;
	int cnt = 0;

	if (!test_bit(MT7601U_STATE_INITIALIZED, &dev->state))
		return;

	/* Copy if there is very little data in the buffer. */
	if (data_len > 512)
		new_p = dev_alloc_pages(MT_RX_ORDER);

	while ((seg_len = mt7601u_rx_next_seg_len(data, data_len))) {
		mt7601u_rx_process_seg(dev, data, seg_len, new_p ? e->p : NULL);

		data_len -= seg_len;
		data += seg_len;
		cnt++;
	}

	if (cnt > 1)
		trace_mt_rx_dma_aggr(dev, cnt, !!new_p);

	if (new_p) {
		/* we have one extra ref from the allocator */
		__free_pages(e->p, MT_RX_ORDER);

		e->p = new_p;
	}
}
Example #11
0
void *dma_generic_alloc_coherent(struct device *dev, size_t size,
				 dma_addr_t *dma_addr, gfp_t flag,
				 struct dma_attrs *attrs)
{
	unsigned long dma_mask;
	struct page *page = NULL;
	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
	dma_addr_t addr;

	dma_mask = dma_alloc_coherent_mask(dev, flag);

	flag |= __GFP_ZERO;
again:
	if (!(flag & GFP_ATOMIC))
		page = dma_alloc_from_contiguous(dev, count, get_order(size));
	if (!page)
		page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
	if (!page)
		return NULL;

	addr = page_to_phys(page);
	if (addr + size > dma_mask) {
		__free_pages(page, get_order(size));

		if (dma_mask < DMA_BIT_MASK(32) && !(flag & GFP_DMA)) {
			flag = (flag & ~GFP_DMA32) | GFP_DMA;
			goto again;
		}

		return NULL;
	}

	*dma_addr = addr;
	return page_address(page);
}
Example #12
0
static int __init atomic_pool_init(void)
{
	pgprot_t prot = __pgprot(PROT_NORMAL_NC);
	unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT;
	struct page *page;
	void *addr;
	unsigned int pool_size_order = get_order(atomic_pool_size);

	if (dev_get_cma_area(NULL))
		page = dma_alloc_from_contiguous(NULL, nr_pages,
						 pool_size_order, false);
	else
		page = alloc_pages(GFP_DMA32, pool_size_order);

	if (page) {
		int ret;
		void *page_addr = page_address(page);

		memset(page_addr, 0, atomic_pool_size);
		__dma_flush_area(page_addr, atomic_pool_size);

		atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
		if (!atomic_pool)
			goto free_page;

		addr = dma_common_contiguous_remap(page, atomic_pool_size,
					VM_USERMAP, prot, atomic_pool_init);

		if (!addr)
			goto destroy_genpool;

		ret = gen_pool_add_virt(atomic_pool, (unsigned long)addr,
					page_to_phys(page),
					atomic_pool_size, -1);
		if (ret)
			goto remove_mapping;

		gen_pool_set_algo(atomic_pool,
				  gen_pool_first_fit_order_align,
				  NULL);

		pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n",
			atomic_pool_size / 1024);
		return 0;
	}
	goto out;

remove_mapping:
	dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP);
destroy_genpool:
	gen_pool_destroy(atomic_pool);
	atomic_pool = NULL;
free_page:
	if (!dma_release_from_contiguous(NULL, page, nr_pages))
		__free_pages(page, pool_size_order);
out:
	pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
		atomic_pool_size / 1024);
	return -ENOMEM;
}
/**
 * Frees memory allocated using RTMemContAlloc().
 *
 * @param   pv      Pointer to return from RTMemContAlloc().
 * @param   cb      The cb parameter passed to RTMemContAlloc().
 */
RTR0DECL(void) RTMemContFree(void *pv, size_t cb)
{
    if (pv)
    {
        int             cOrder;
        unsigned        cPages;
        unsigned        iPage;
        struct page    *paPages;

        /* validate */
        AssertMsg(!((uintptr_t)pv & PAGE_OFFSET_MASK), ("pv=%p\n", pv));
        Assert(cb > 0);

        /* calc order and get pages */
        cb = RT_ALIGN_Z(cb, PAGE_SIZE);
        cPages = cb >> PAGE_SHIFT;
        cOrder = CalcPowerOf2Order(cPages);
        paPages = virt_to_page(pv);

        /*
         * Restore page attributes freeing the pages.
         */
        for (iPage = 0; iPage < cPages; iPage++)
        {
            ClearPageReserved(&paPages[iPage]);
#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 20) /** @todo find the exact kernel where change_page_attr was introduced. */
            MY_SET_PAGES_NOEXEC(&paPages[iPage], 1);
#endif
        }
        __free_pages(paPages, cOrder);
    }
}
/**
 * Frees the physical pages allocated by the rtR0MemObjLinuxAllocPages() call.
 *
 * This method does NOT free the object.
 *
 * @param   pMemLnx     The object which physical pages should be freed.
 */
static void rtR0MemObjLinuxFreePages(PRTR0MEMOBJLNX pMemLnx)
{
    size_t iPage = pMemLnx->cPages;
    if (iPage > 0)
    {
        /*
         * Restore the page flags.
         */
        while (iPage-- > 0)
        {
            ClearPageReserved(pMemLnx->apPages[iPage]);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22)
#else
            MY_SET_PAGES_NOEXEC(pMemLnx->apPages[iPage], 1);
#endif
        }

        /*
         * Free the pages.
         */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22)
        if (!pMemLnx->fContiguous)
        {
            iPage = pMemLnx->cPages;
            while (iPage-- > 0)
                __free_page(pMemLnx->apPages[iPage]);
        }
        else
#endif
            __free_pages(pMemLnx->apPages[0], rtR0MemObjLinuxOrder(pMemLnx->cPages));

        pMemLnx->cPages = 0;
    }
}
Example #15
0
/* Cleanup a mapping */
static void fb_vma_free(struct pme_fb_vma *node)
{
	int order, num_pages;
	struct page *p;
	if (atomic_dec_and_test(&node->ref_count)) {
		/* This buffer can be recycled
		 * (Buffers can be NULL in the case where
		 * the mapped area is an iovec structure) */
		if (node->buffers)
			pme_fbchain_recycle(node->buffers);

		if (node->iovec_pages) {
			order = get_order(node->kmem_size);
			if (order) {
				p = node->iovec_pages;
				num_pages = 1 << order;
				while (num_pages--) {
					put_page_testzero(p);
					p++;
				}
			}
			__free_pages(node->iovec_pages, order);
		}
		vfree(node);
	}
}
Example #16
0
void FreeAllocPagesLinuxMemArea(struct LinuxMemArea *psLinuxMemArea)
{
	u32 ui32PageCount;
	struct page **pvPageList;
	void *hBlockPageList;
	u32 i;

	PVR_ASSERT(psLinuxMemArea);
	PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_ALLOC_PAGES);

#if defined(DEBUG_LINUX_MEM_AREAS)
	DebugLinuxMemAreaRecordRemove(psLinuxMemArea);
#endif

	ui32PageCount = RANGE_TO_PAGES(psLinuxMemArea->ui32ByteSize);
	pvPageList = psLinuxMemArea->uData.sPageList.pvPageList;
	hBlockPageList = psLinuxMemArea->uData.sPageList.hBlockPageList;

#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
	DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES, pvPageList,
				  __FILE__, __LINE__);
#endif

	for (i = 0; i < ui32PageCount; i++)
		__free_pages(pvPageList[i], 0);

	OSFreeMem(0, sizeof(*pvPageList) * ui32PageCount, pvPageList,
			hBlockPageList);

	LinuxMemAreaStructFree(psLinuxMemArea);
}
Example #17
0
void spu_free_lscsa(struct spu_state *csa)
{
	unsigned char *p;
	int i;

	if (!csa->use_big_pages) {
		spu_free_lscsa_std(csa);
		return;
	}
	csa->use_big_pages = 0;

	if (csa->lscsa == NULL)
		goto free_pages;

	for (p = csa->lscsa->ls; p < csa->lscsa->ls + LS_SIZE; p += PAGE_SIZE)
		ClearPageReserved(vmalloc_to_page(p));

	vunmap(csa->lscsa);
	csa->lscsa = NULL;

 free_pages:

	for (i = 0; i < SPU_LSCSA_NUM_BIG_PAGES; i++)
		if (csa->lscsa_pages[i])
			__free_pages(csa->lscsa_pages[i], SPU_64K_PAGE_ORDER);
}
Example #18
0
/*
 * Free the memory-mapped buffer memory allocated for a
 * videobuf_buffer and the associated scatterlist.
 */
static void omap24xxcam_vbq_free_mmap_buffer(struct videobuf_buffer *vb)
{
	struct videobuf_dmabuf *dma = videobuf_to_dma(vb);
	size_t alloc_size;
	struct page *page;
	int i;

	if (dma->sglist == NULL)
		return;

	i = dma->sglen;
	while (i) {
		i--;
		alloc_size = sg_dma_len(&dma->sglist[i]);
		page = sg_page(&dma->sglist[i]);
		do {
			ClearPageReserved(page++);
		} while (alloc_size -= PAGE_SIZE);
		__free_pages(sg_page(&dma->sglist[i]),
			     get_order(sg_dma_len(&dma->sglist[i])));
	}

	kfree(dma->sglist);
	dma->sglist = NULL;
}
Example #19
0
void __dma_direct_free_pages(struct device *dev, size_t size, struct page *page)
{
	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;

	if (!dma_release_from_contiguous(dev, page, count))
		__free_pages(page, get_order(size));
}
Example #20
0
File: pmap.c Project: ecros/xv6-vm
// Remove the mapping at pte
// RETURNS:
// 0 on success
// -E_ALREADY_FREE if pte is already free
// 
int
remove_pte(pde_t * pgdir, pte_t * pte)
{ 
  struct Page * p;
  acquire(&phy_mem_lock);
  if (pte == NULL)
    return -E_ALREADY_FREE;

  if (*pte & PTE_P) {
    p = page_frame(PTE_ADDR(*pte));
    DecPageCount(p);
    if (!PageReserved(p) && !IsPageMapped(p)) {
      dbmsg("removing mapping at pages %x\n", p - pages);
      __free_pages(p, 1);
    }
    *pte = 0;
  }
  else {
    release(&phy_mem_lock);
    return -E_ALREADY_FREE;
  }

  release(&phy_mem_lock);
  return 0;
}
static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
{
	struct page *page;
	struct scatterlist sg;
	const bool high_order = pool->order > 4;

	if (high_order)
		page = alloc_pages(pool->gfp_mask & ~__GFP_ZERO, pool->order);
	else
		page = alloc_pages(pool->gfp_mask, pool->order);

	if (!page)
		return NULL;

	if ((pool->gfp_mask & __GFP_ZERO) && high_order)
		if (ion_heap_high_order_page_zero(
				page, pool->order, pool->should_invalidate))
			goto error_free_pages;

	sg_init_table(&sg, 1);
	sg_set_page(&sg, page, PAGE_SIZE << pool->order, 0);
	sg_dma_address(&sg) = sg_phys(&sg);
	dma_sync_sg_for_device(NULL, &sg, 1, DMA_BIDIRECTIONAL);

	ion_alloc_inc_usage(ION_TOTAL, 1 << pool->order);
	return page;
error_free_pages:
	__free_pages(page, pool->order);
	return NULL;
}
Example #22
0
struct LinuxMemArea *NewAllocPagesLinuxMemArea(u32 ui32Bytes,
					u32 ui32AreaFlags)
{
	struct LinuxMemArea *psLinuxMemArea;
	u32 ui32PageCount;
	struct page **pvPageList;
	void *hBlockPageList;
	s32 i;
	enum PVRSRV_ERROR eError;

	psLinuxMemArea = LinuxMemAreaStructAlloc();
	if (!psLinuxMemArea)
		goto failed_area_alloc;

	ui32PageCount = RANGE_TO_PAGES(ui32Bytes);
	eError = OSAllocMem(0, sizeof(*pvPageList) * ui32PageCount,
		       (void **)&pvPageList, &hBlockPageList);
	if (eError != PVRSRV_OK)
		goto failed_page_list_alloc;

	for (i = 0; i < ui32PageCount; i++) {
		pvPageList[i] = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, 0);
		if (!pvPageList[i])
			goto failed_alloc_pages;

	}

#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
	DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES,
			       pvPageList, NULL, 0, NULL, PAGE_ALIGN(ui32Bytes),
			       "unknown", 0);
#endif

	psLinuxMemArea->eAreaType = LINUX_MEM_AREA_ALLOC_PAGES;
	psLinuxMemArea->uData.sPageList.pvPageList = pvPageList;
	psLinuxMemArea->uData.sPageList.hBlockPageList = hBlockPageList;
	psLinuxMemArea->ui32ByteSize = ui32Bytes;
	psLinuxMemArea->ui32AreaFlags = ui32AreaFlags;
	psLinuxMemArea->bMMapRegistered = IMG_FALSE;
	INIT_LIST_HEAD(&psLinuxMemArea->sMMapOffsetStructList);

#if defined(DEBUG_LINUX_MEM_AREAS)
	DebugLinuxMemAreaRecordAdd(psLinuxMemArea, ui32AreaFlags);
#endif

	return psLinuxMemArea;

failed_alloc_pages:
	for (i--; i >= 0; i--)
		__free_pages(pvPageList[i], 0);
	OSFreeMem(0, sizeof(*pvPageList) * ui32PageCount, pvPageList,
			hBlockPageList);
failed_page_list_alloc:
	LinuxMemAreaStructFree(psLinuxMemArea);
failed_area_alloc:
	PVR_DPF(PVR_DBG_ERROR, "%s: failed", __func__);

	return NULL;
}
Example #23
0
static void *__iommu_alloc_attrs(struct device *dev, size_t size,
				 dma_addr_t *handle, gfp_t gfp,
				 unsigned long attrs)
{
	bool coherent = dev_is_dma_coherent(dev);
	int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
	size_t iosize = size;
	void *addr;

	if (WARN(!dev, "cannot create IOMMU mapping for unknown device\n"))
		return NULL;

	size = PAGE_ALIGN(size);

	/*
	 * Some drivers rely on this, and we probably don't want the
	 * possibility of stale kernel data being read by devices anyway.
	 */
	gfp |= __GFP_ZERO;

	if (!gfpflags_allow_blocking(gfp)) {
		struct page *page;
		/*
		 * In atomic context we can't remap anything, so we'll only
		 * get the virtually contiguous buffer we need by way of a
		 * physically contiguous allocation.
		 */
		if (coherent) {
			page = alloc_pages(gfp, get_order(size));
			addr = page ? page_address(page) : NULL;
		} else {
			addr = __alloc_from_pool(size, &page, gfp);
		}
		if (!addr)
			return NULL;

		*handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
		if (iommu_dma_mapping_error(dev, *handle)) {
			if (coherent)
				__free_pages(page, get_order(size));
			else
				__free_from_pool(addr, size);
			addr = NULL;
		}
	} else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
		pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
		struct page *page;

		page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
					get_order(size), gfp & __GFP_NOWARN);
		if (!page)
			return NULL;

		*handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
		if (iommu_dma_mapping_error(dev, *handle)) {
			dma_release_from_contiguous(dev, page,
						    size >> PAGE_SHIFT);
			return NULL;
		}
Example #24
0
void ceph_release_page_vector(struct page **pages, int num_pages)
{
	int i;

	for (i = 0; i < num_pages; i++)
		__free_pages(pages[i], 0);
	kfree(pages);
}
Example #25
0
static void i460_free_large_page (struct lp_desc *lp)
{
	kfree(lp->alloced_map);
	lp->alloced_map = NULL;

	__free_pages(lp->page, I460_IO_PAGE_SHIFT - PAGE_SHIFT);
	atomic_sub(I460_KPAGES_PER_IOPAGE, &agp_bridge->current_memory_agp);
}
static void pcpu_destroy_chunk(struct pcpu_chunk *chunk)
{
	const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT;

	if (chunk && chunk->data)
		__free_pages(chunk->data, order_base_2(nr_pages));
	pcpu_free_chunk(chunk);
}
static ssize_t mmc_test_store(struct device *dev,
	struct device_attribute *attr, const char *buf, size_t count)
{
	struct mmc_card *card;
	struct mmc_test_card *test;
	int testcase;

	card = container_of(dev, struct mmc_card, dev);

	testcase = simple_strtol(buf, NULL, 10);

	test = kzalloc(sizeof(struct mmc_test_card), GFP_KERNEL);
	if (!test)
		return -ENOMEM;

	test->card = card;

	test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL);
#ifdef CONFIG_HIGHMEM
	test->highmem = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, BUFFER_ORDER);
#endif

#ifdef CONFIG_HIGHMEM
	if (test->buffer && test->highmem) {
#else
	if (test->buffer) {
#endif
		mutex_lock(&mmc_test_lock);
		mmc_test_run(test, testcase);
		mutex_unlock(&mmc_test_lock);
	}

#ifdef CONFIG_HIGHMEM
	__free_pages(test->highmem, BUFFER_ORDER);
#endif
	kfree(test->buffer);
	kfree(test);

	return count;
}

static DEVICE_ATTR(test, S_IWUSR | S_IRUGO, mmc_test_show, mmc_test_store);

static int mmc_test_probe(struct mmc_card *card)
{
	int ret;

	if ((card->type != MMC_TYPE_MMC) && (card->type != MMC_TYPE_SD))
		return -ENODEV;

	ret = device_create_file(&card->dev, &dev_attr_test);
	if (ret)
		return ret;

	dev_info(&card->dev, "Card claimed for testing.\n");

	return 0;
}
Example #28
0
File: rx.c Project: 3bsa/linux
/**
 * efx_init_rx_buffers - create EFX_RX_BATCH page-based RX buffers
 *
 * @rx_queue:		Efx RX queue
 *
 * This allocates a batch of pages, maps them for DMA, and populates
 * struct efx_rx_buffers for each one. Return a negative error code or
 * 0 on success. If a single page can be used for multiple buffers,
 * then the page will either be inserted fully, or not at all.
 */
static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
{
	struct efx_nic *efx = rx_queue->efx;
	struct efx_rx_buffer *rx_buf;
	struct page *page;
	unsigned int page_offset;
	struct efx_rx_page_state *state;
	dma_addr_t dma_addr;
	unsigned index, count;

	count = 0;
	do {
		page = efx_reuse_page(rx_queue);
		if (page == NULL) {
			page = alloc_pages(__GFP_COLD | __GFP_COMP |
					   (atomic ? GFP_ATOMIC : GFP_KERNEL),
					   efx->rx_buffer_order);
			if (unlikely(page == NULL))
				return -ENOMEM;
			dma_addr =
				dma_map_page(&efx->pci_dev->dev, page, 0,
					     PAGE_SIZE << efx->rx_buffer_order,
					     DMA_FROM_DEVICE);
			if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
						       dma_addr))) {
				__free_pages(page, efx->rx_buffer_order);
				return -EIO;
			}
			state = page_address(page);
			state->dma_addr = dma_addr;
		} else {
			state = page_address(page);
			dma_addr = state->dma_addr;
		}

		dma_addr += sizeof(struct efx_rx_page_state);
		page_offset = sizeof(struct efx_rx_page_state);

		do {
			index = rx_queue->added_count & rx_queue->ptr_mask;
			rx_buf = efx_rx_buffer(rx_queue, index);
			rx_buf->dma_addr = dma_addr + efx->rx_ip_align;
			rx_buf->page = page;
			rx_buf->page_offset = page_offset + efx->rx_ip_align;
			rx_buf->len = efx->rx_dma_len;
			rx_buf->flags = 0;
			++rx_queue->added_count;
			get_page(page);
			dma_addr += efx->rx_page_buf_step;
			page_offset += efx->rx_page_buf_step;
		} while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE);

		rx_buf->flags = EFX_RX_BUF_LAST_IN_PAGE;
	} while (++count < efx->rx_pages_per_batch);

	return 0;
}
Example #29
0
/**
 * efx_init_rx_buffers_page - create EFX_RX_BATCH page-based RX buffers
 *
 * @rx_queue:		Efx RX queue
 *
 * This allocates memory for EFX_RX_BATCH receive buffers, maps them for DMA,
 * and populates struct efx_rx_buffers for each one. Return a negative error
 * code or 0 on success. If a single page can be split between two buffers,
 * then the page will either be inserted fully, or not at at all.
 */
static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
{
	struct efx_nic *efx = rx_queue->efx;
	struct efx_rx_buffer *rx_buf;
	struct page *page;
	void *page_addr;
	unsigned int page_offset;
	struct efx_rx_page_state *state;
	dma_addr_t dma_addr;
	unsigned index, count;

	/* We can split a page between two buffers */
	BUILD_BUG_ON(EFX_RX_BATCH & 1);

	for (count = 0; count < EFX_RX_BATCH; ++count) {
		page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC,
				   efx->rx_buffer_order);
		if (unlikely(page == NULL))
			return -ENOMEM;
		dma_addr = pci_map_page(efx->pci_dev, page, 0,
					efx_rx_buf_size(efx),
					PCI_DMA_FROMDEVICE);
		if (unlikely(pci_dma_mapping_error(efx->pci_dev, dma_addr))) {
			__free_pages(page, efx->rx_buffer_order);
			return -EIO;
		}
		page_addr = page_address(page);
		state = page_addr;
		state->refcnt = 0;
		state->dma_addr = dma_addr;

		page_addr += sizeof(struct efx_rx_page_state);
		dma_addr += sizeof(struct efx_rx_page_state);
		page_offset = sizeof(struct efx_rx_page_state);

	split:
		index = rx_queue->added_count & rx_queue->ptr_mask;
		rx_buf = efx_rx_buffer(rx_queue, index);
		rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
		rx_buf->u.page = page;
		rx_buf->page_offset = page_offset + EFX_PAGE_IP_ALIGN;
		rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
		rx_buf->is_page = true;
		++rx_queue->added_count;
		++rx_queue->alloc_page_count;
		++state->refcnt;

		if ((~count & 1) && (efx->rx_buffer_len <= EFX_RX_HALF_PAGE)) {
			/* Use the second half of the page */
			get_page(page);
			dma_addr += (PAGE_SIZE >> 1);
			page_addr += (PAGE_SIZE >> 1);
			page_offset += (PAGE_SIZE >> 1);
			++count;
			goto split;
		}
	}
void toi_free_pages(int fail_num, struct page *page, int order)
{
	if (page && toi_alloc_ops.enabled)
		free_update_stats(fail_num, PAGE_SIZE << order);

	if (fail_num == toi_trace_allocs)
		dump_stack();
	__free_pages(page, order);
}