/*
 * Allocate a DMA buffer for 'dev' of size 'size' using the
 * specified gfp mask.  Note that 'size' must be page aligned.
 */
static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gfp)
{
	unsigned long order = get_order(size);
	struct page *page, *p, *e;

	page = alloc_pages(gfp, order);
	if (!page)
		return NULL;

	/*
	 * Now split the huge page and free the excess pages
	 */
	split_page(page, order);
	for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
		__free_page(p);

	__dma_clear_buffer(page, size);

	return page;
}
Exemple #2
0
void ivtv_udma_free(struct ivtv *itv)
{
	int i;

	/* Unmap SG Array */
	if (itv->udma.SG_handle) {
		pci_unmap_single(itv->pdev, itv->udma.SG_handle,
			 sizeof(itv->udma.SGarray), PCI_DMA_TODEVICE);
	}

	/* Unmap Scatterlist */
	if (itv->udma.SG_length) {
		pci_unmap_sg(itv->pdev, itv->udma.SGlist, itv->udma.page_count, PCI_DMA_TODEVICE);
	}

	for (i = 0; i < IVTV_DMA_SG_OSD_ENT; i++) {
		if (itv->udma.bouncemap[i])
			__free_page(itv->udma.bouncemap[i]);
	}
}
Exemple #3
0
static void zcache_free_page(struct page *page)
{
	long curr_pageframes;
	static long max_pageframes, min_pageframes;

	if (page == NULL)
		BUG();
	__free_page(page);
	inc_zcache_pageframes_freed();
	curr_pageframes = curr_pageframes_count();
	if (curr_pageframes > max_pageframes)
		max_pageframes = curr_pageframes;
	if (curr_pageframes < min_pageframes)
		min_pageframes = curr_pageframes;
#ifdef CONFIG_ZCACHE_DEBUG
	if (curr_pageframes > 2L || curr_pageframes < -2L) {
		/* pr_info here */
	}
#endif
}
Exemple #4
0
/*
 * return how many pages cleaned up.
 */
static unsigned long enc_pools_cleanup(struct page ***pools, int npools)
{
	unsigned long cleaned = 0;
	int i, j;

	for (i = 0; i < npools; i++) {
		if (pools[i]) {
			for (j = 0; j < PAGES_PER_POOL; j++) {
				if (pools[i][j]) {
					__free_page(pools[i][j]);
					cleaned++;
				}
			}
			kfree(pools[i]);
			pools[i] = NULL;
		}
	}

	return cleaned;
}
Exemple #5
0
static int rds_page_remainder_cpu_notify(struct notifier_block *self,
					 unsigned long action, void *hcpu)
{
	struct rds_page_remainder *rem;
	long cpu = (long)hcpu;

	rem = &per_cpu(rds_page_remainders, cpu);

	rdsdebug("cpu %ld action 0x%lx\n", cpu, action);

	switch (action) {
	case CPU_DEAD:
		if (rem->r_page)
			__free_page(rem->r_page);
		rem->r_page = NULL;
		break;
	}

	return 0;
}
pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
	struct page *page;

#ifdef CONFIG_HIGHPTE
	page = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM, 0);
#else
	page = alloc_pages(GFP_KERNEL, 0);
#endif
	if (!page)
		return NULL;

	clear_highpage(page);
	if (!pgtable_page_ctor(page)) {
		__free_page(page);
		return NULL;
	}
	flush_dcache_page(page);
	return page;
}
Exemple #7
0
void check_pgt_cache(void)
{
	preempt_disable();
	if (pgtable_cache_size > PGT_CACHE_HIGH) {
		do {
#ifdef CONFIG_SMP
			if (pgd_quicklist)
				free_pgd_slow(get_pgd_fast());
#endif
			if (pte_quicklist[0])
				free_pte_slow(pte_alloc_one_fast(NULL, 0));
			if (pte_quicklist[1])
				free_pte_slow(pte_alloc_one_fast(NULL, 1 << (PAGE_SHIFT + 10)));
		} while (pgtable_cache_size > PGT_CACHE_LOW);
	}
#ifndef CONFIG_SMP
        if (pgd_cache_size > PGT_CACHE_HIGH / 4) {
		struct page *page, *page2;
                for (page2 = NULL, page = (struct page *)pgd_quicklist; page;) {
                        if ((unsigned long)page->lru.prev == 3) {
                                if (page2)
                                        page2->lru.next = page->lru.next;
                                else
                                        pgd_quicklist = (void *) page->lru.next;
                                pgd_cache_size -= 2;
                                __free_page(page);
                                if (page2)
                                        page = (struct page *)page2->lru.next;
                                else
                                        page = (struct page *)pgd_quicklist;
                                if (pgd_cache_size <= PGT_CACHE_LOW / 4)
                                        break;
                                continue;
                        }
                        page2 = page;
                        page = (struct page *)page->lru.next;
                }
        }
#endif
	preempt_enable();
}
Exemple #8
0
static void zram_free_page(struct zram *zram, size_t index)
{
	int zlen;
	void *obj;
	u32 offset;
	struct page *page;

	/*
	 * No memory is allocated for zero filled pages.
	 * Simply clear corresponding table entry.
	 */
	if (zram_is_zero_page(zram, index)) {
		zram_clear_zero_page(zram, index);
		zram_dec_stat(zram, ZRAM_STAT_PAGES_ZERO);
		return;
	}

	zram_find_obj(zram, index, &page, &offset);
	if (!page)
		return;

	/* Uncompressed pages cosume whole page, so offset is zero */
	if (unlikely(!offset)) {
		zlen = PAGE_SIZE;
		__free_page(page);
		zram_dec_stat(zram, ZRAM_STAT_PAGES_EXPAND);
		goto out;
	}

	obj = kmap_atomic(page, KM_USER0) + offset;
	zlen = xv_get_object_size(obj);
	kunmap_atomic(obj, KM_USER0);

	xv_free(zram->mem_pool, page, offset);

out:
	zram_add_stat(zram, ZRAM_STAT_COMPR_SIZE, -zlen);
	zram_dec_stat(zram, ZRAM_STAT_PAGES_STORED);

	zram->table[index].addr = 0;
}
Exemple #9
0
static void free_pud_range(pgd_t *pgd)
{
	int i;
	pud_t *pud;
	pud = pud_offset(pgd, 0);

	for (i=0 ; i<PTRS_PER_PUD ; i++, pud++) {
		pmd_t *pmd;
		struct page *page;

		if (oleole_pud_none_or_clear_bad(pud))
			continue;

		free_pmd_range(pud);

		pmd = pmd_offset(pud, 0);
		page = virt_to_page(pmd);
		__free_page(page);
		pud_clear(pud);
	}
}
Exemple #10
0
static void log_end_io(struct bio *bio)
{
	struct log_writes_c *lc = bio->bi_private;
	struct bio_vec *bvec;
	int i;

	if (bio->bi_error) {
		unsigned long flags;

		DMERR("Error writing log block, error=%d", bio->bi_error);
		spin_lock_irqsave(&lc->blocks_lock, flags);
		lc->logging_enabled = false;
		spin_unlock_irqrestore(&lc->blocks_lock, flags);
	}

	bio_for_each_segment_all(bvec, bio, i)
		__free_page(bvec->bv_page);

	put_io_block(lc);
	bio_put(bio);
}
Exemple #11
0
static void drm_ttm_free_alloced_pages(struct drm_ttm *ttm)
{
	int i;
	struct drm_buffer_manager *bm = &ttm->dev->bm;
	struct page **cur_page;

	for (i = 0; i < ttm->num_pages; ++i) {
		cur_page = ttm->pages + i;
		if (*cur_page) {
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
			ClearPageReserved(*cur_page);
#endif
			if (page_count(*cur_page) != 1)
				DRM_ERROR("Erroneous page count. Leaking pages.\n");
			if (page_mapped(*cur_page))
				DRM_ERROR("Erroneous map count. Leaking page mappings.\n");
			__free_page(*cur_page);
			--bm->cur_pages;
		}
	}
}
Exemple #12
0
void oleolevm_free_pgd_range(struct mmu_gather *tlb,
			     struct vm_area_struct *vma,
			     unsigned long addr, unsigned long end,
			     unsigned long floor,
			      unsigned long ceiling)
{
	unsigned long flags;
	oleole_guest_system_t *gsys;

	addr &= PMD_MASK;

	for ( ; addr < end ; addr += PMD_SIZE) {
		pgd_t *pgd;
		pud_t *pud;
		struct page *page;

		pgd = pgd_offset(tlb->mm, addr);

		if (oleole_pgd_none_or_clear_bad(pgd))
			continue;

		free_pud_range(pgd);

		pud = pud_offset(pgd, 0);
		page = virt_to_page(pud);
		__free_page(page);
		pgd_clear(pgd);
	}

	/* */
	gsys = vma->vm_private_data;
	if (gsys) {
		spin_lock_irqsave(&gsys->lock, flags);
		vma->vm_private_data = NULL;
		gsys->vma = NULL;
		spin_unlock_irqrestore(&gsys->lock, flags);
	}

	tlb->need_flush = 1;
}
static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf,
		gfp_t gfp_flags)
{
	unsigned int last_page = 0;
	int size = buf->size;

	while (size > 0) {
		struct page *pages;
		int order;
		int i;

		order = get_order(size);
		/* Dont over allocate*/
		if ((PAGE_SIZE << order) > size)
			order--;

		pages = NULL;
		while (!pages) {
			pages = alloc_pages(GFP_KERNEL | __GFP_ZERO |
					__GFP_NOWARN | gfp_flags, order);
			if (pages)
				break;

			if (order == 0) {
				while (last_page--)
					__free_page(buf->pages[last_page]);
				return -ENOMEM;
			}
			order--;
		}

		split_page(pages, order);
		for (i = 0; i < (1 << order); i++)
			buf->pages[last_page++] = &pages[i];

		size -= PAGE_SIZE << order;
	}

	return 0;
}
int ttm_bo_global_init(struct drm_global_reference *ref)
{
	struct ttm_bo_global_ref *bo_ref =
		container_of(ref, struct ttm_bo_global_ref, ref);
	struct ttm_bo_global *glob = ref->object;
	int ret;

	mutex_init(&glob->device_list_mutex);
	spin_lock_init(&glob->lru_lock);
	glob->mem_glob = bo_ref->mem_glob;
	glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);

	if (unlikely(glob->dummy_read_page == NULL)) {
		ret = -ENOMEM;
		goto out_no_drp;
	}

	INIT_LIST_HEAD(&glob->swap_lru);
	INIT_LIST_HEAD(&glob->device_list);

	ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
	ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
	if (unlikely(ret != 0)) {
		pr_err("Could not register buffer object swapout\n");
		goto out_no_shrink;
	}

	atomic_set(&glob->bo_count, 0);

	ret = kobject_init_and_add(
		&glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects");
	if (unlikely(ret != 0))
		kobject_put(&glob->kobj);
	return ret;
out_no_shrink:
	__free_page(glob->dummy_read_page);
out_no_drp:
	kfree(glob);
	return ret;
}
Exemple #15
0
/**
 * ceph_cls_unlock - release rados lock for object
 * @oid, @oloc: object to lock
 * @lock_name: the name of the lock
 * @cookie: user-defined identifier for this instance of the lock
 */
int ceph_cls_unlock(struct ceph_osd_client *osdc,
		    struct ceph_object_id *oid,
		    struct ceph_object_locator *oloc,
		    char *lock_name, char *cookie)
{
	int unlock_op_buf_size;
	int name_len = strlen(lock_name);
	int cookie_len = strlen(cookie);
	void *p, *end;
	struct page *unlock_op_page;
	int ret;

	unlock_op_buf_size = name_len + sizeof(__le32) +
			     cookie_len + sizeof(__le32) +
			     CEPH_ENCODING_START_BLK_LEN;
	if (unlock_op_buf_size > PAGE_SIZE)
		return -E2BIG;

	unlock_op_page = alloc_page(GFP_NOIO);
	if (!unlock_op_page)
		return -ENOMEM;

	p = page_address(unlock_op_page);
	end = p + unlock_op_buf_size;

	/* encode cls_lock_unlock_op struct */
	ceph_start_encoding(&p, 1, 1,
			    unlock_op_buf_size - CEPH_ENCODING_START_BLK_LEN);
	ceph_encode_string(&p, end, lock_name, name_len);
	ceph_encode_string(&p, end, cookie, cookie_len);

	dout("%s lock_name %s cookie %s\n", __func__, lock_name, cookie);
	ret = ceph_osdc_call(osdc, oid, oloc, "lock", "unlock",
			     CEPH_OSD_FLAG_WRITE, unlock_op_page,
			     unlock_op_buf_size, NULL, NULL);

	dout("%s: status %d\n", __func__, ret);
	__free_page(unlock_op_page);
	return ret;
}
Exemple #16
0
/*
 * page table entry allocation/free routines.
 */
unsigned long *page_table_alloc(int noexec)
{
	struct page *page = alloc_page(GFP_KERNEL);
	unsigned long *table;

	if (!page)
		return NULL;
	page->index = 0;
	if (noexec) {
		struct page *shadow = alloc_page(GFP_KERNEL);
		if (!shadow) {
			__free_page(page);
			return NULL;
		}
		table = (unsigned long *) page_to_phys(shadow);
		clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
		page->index = (addr_t) table;
	}
	table = (unsigned long *) page_to_phys(page);
	clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
	return table;
}
Exemple #17
0
void ll_release_page(struct inode *inode, struct page *page, bool remove)
{
	kunmap(page);

	/*
	 * Always remove the page for striped dir, because the page is
	 * built from temporarily in LMV layer
	 */
	if (inode && S_ISDIR(inode->i_mode) &&
	    ll_i2info(inode)->lli_lsm_md) {
		__free_page(page);
		return;
	}

	if (remove) {
		lock_page(page);
		if (likely(page->mapping))
			truncate_complete_page(page->mapping, page);
		unlock_page(page);
	}
	put_page(page);
}
Exemple #18
0
struct page* pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
	struct page *page = NULL, *p;
	int color = ADDR_COLOR(address);

	p = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER);

	if (likely(p)) {
		split_page(p, COLOR_ORDER);

		for (i = 0; i < PAGE_ORDER; i++) {
			if (PADDR_COLOR(page_address(p)) == color)
				page = p;
			else
				__free_page(p);
			p++;
		}
		clear_highpage(page);
	}

	return page;
}
Exemple #19
0
void swap_cgroup_swapoff(int type)
{
	int i;
	struct swap_cgroup_ctrl *ctrl;

	if (!do_swap_account)
		return;

	mutex_lock(&swap_cgroup_mutex);
	ctrl = &swap_cgroup_ctrl[type];
	if (ctrl->map) {
		for (i = 0; i < ctrl->length; i++) {
			struct page *page = ctrl->map[i];
			if (page)
				__free_page(page);
		}
		vfree(ctrl->map);
		ctrl->map = NULL;
		ctrl->length = 0;
	}
	mutex_unlock(&swap_cgroup_mutex);
}
Exemple #20
0
/*
 * allocate buffer for swap_cgroup.
 */
static int swap_cgroup_prepare(int type)
{
	struct page *page;
	struct swap_cgroup_ctrl *ctrl;
	unsigned long idx, max;

	ctrl = &swap_cgroup_ctrl[type];

	for (idx = 0; idx < ctrl->length; idx++) {
		page = alloc_page(GFP_KERNEL | __GFP_ZERO);
		if (!page)
			goto not_enough_page;
		ctrl->map[idx] = page;
	}
	return 0;
not_enough_page:
	max = idx;
	for (idx = 0; idx < max; idx++)
		__free_page(ctrl->map[idx]);

	return -ENOMEM;
}
static void __fw_free_buf(struct kref *ref)
{
	struct firmware_buf *buf = to_fwbuf(ref);
	struct firmware_cache *fwc = buf->fwc;
	int i;

	pr_debug("%s: fw-%s buf=%p data=%p size=%u\n",
		 __func__, buf->fw_id, buf, buf->data,
		 (unsigned int)buf->size);

	list_del(&buf->list);
	spin_unlock(&fwc->lock);


	if (buf->fmt == PAGE_BUF) {
		vunmap(buf->data);
		for (i = 0; i < buf->nr_pages; i++)
			__free_page(buf->pages[i]);
		kfree(buf->pages);
	} else
		vfree(buf->data);
	kfree(buf);
}
Exemple #22
0
/**
 * f2fs_release_crypto_ctx() - Releases an encryption context
 * @ctx: The encryption context to release.
 *
 * If the encryption context was allocated from the pre-allocated pool, returns
 * it to that pool. Else, frees it.
 *
 * If there's a bounce page in the context, this frees that.
 */
void f2fs_release_crypto_ctx(struct f2fs_crypto_ctx *ctx)
{
	unsigned long flags;

	if (ctx->flags & F2FS_WRITE_PATH_FL && ctx->w.bounce_page) {
		if (ctx->flags & F2FS_BOUNCE_PAGE_POOL_FREE_ENCRYPT_FL)
			mempool_free(ctx->w.bounce_page, f2fs_bounce_page_pool);
		else if (ctx->flags & F2FS_EMERGENT_PAGE_POOL_FREE_ENCRYPT_FL)
			mempool_free(ctx->w.bounce_page,
					f2fs_emergent_page_pool);
		else
			__free_page(ctx->w.bounce_page);
		ctx->w.bounce_page = NULL;
	}
	ctx->w.control_page = NULL;
	if (ctx->flags & F2FS_CTX_REQUIRES_FREE_ENCRYPT_FL) {
		kmem_cache_free(f2fs_crypto_ctx_cachep, ctx);
	} else {
		spin_lock_irqsave(&f2fs_crypto_ctx_lock, flags);
		list_add(&ctx->free_list, &f2fs_free_crypto_ctxs);
		spin_unlock_irqrestore(&f2fs_crypto_ctx_lock, flags);
	}
}
static unsigned long highmem_setup(void)
{
	unsigned long pfn;
	unsigned long reservedpages = 0;

	for (pfn = max_low_pfn; pfn < max_pfn; ++pfn) {
		struct page *page = pfn_to_page(pfn);

		/* FIXME not sure about */
		if (memblock_is_reserved(pfn << PAGE_SHIFT))
			continue;
		ClearPageReserved(page);
		init_page_count(page);
		__free_page(page);
		totalhigh_pages++;
		reservedpages++;
	}
	totalram_pages += totalhigh_pages;
	printk(KERN_INFO "High memory: %luk\n",
					totalhigh_pages << (PAGE_SHIFT-10));

	return reservedpages;
}
Exemple #24
0
static int fuse_readdir(struct file *file, void *dstbuf, filldir_t filldir)
{
	int err;
	size_t nbytes;
	struct page *page;
	struct inode *inode = file->f_path.dentry->d_inode;
	struct fuse_conn *fc = get_fuse_conn(inode);
	struct fuse_req *req;

	if (is_bad_inode(inode))
		return -EIO;

	req = fuse_get_req(fc);
	if (IS_ERR(req))
		return PTR_ERR(req);

	page = alloc_page(GFP_KERNEL);
	if (!page) {
		fuse_put_request(fc, req);
		return -ENOMEM;
	}
	req->out.argpages = 1;
	req->num_pages = 1;
	req->pages[0] = page;
	fuse_read_fill(req, file, file->f_pos, PAGE_SIZE, FUSE_READDIR);
	fuse_request_send(fc, req);
	nbytes = req->out.args[0].size;
	err = req->out.h.error;
	fuse_put_request(fc, req);
	if (!err)
		err = parse_dirfile(page_address(page), nbytes, file, dstbuf,
				    filldir);

	__free_page(page);
	fuse_invalidate_attr(inode); /* atime changed */
	return err;
}
Exemple #25
0
/*
 * XXX: for the moment I don't want to use lnb_flags for osd-internal
 *      purposes as it's not very well defined ...
 *      instead I use the lowest bit of the address so that:
 *        arc buffer:  .lnb_data = abuf          (arc we loan for write)
 *        dbuf buffer: .lnb_data = dbuf | 1      (dbuf we get for read)
 *        copy buffer: .lnb_page->mapping = obj (page we allocate for write)
 *
 *      bzzz, to blame
 */
static int osd_bufs_put(const struct lu_env *env, struct dt_object *dt,
			struct niobuf_local *lnb, int npages)
{
	struct osd_object *obj  = osd_dt_obj(dt);
	struct osd_device *osd = osd_obj2dev(obj);
	unsigned long      ptr;
	int                i;

	LASSERT(dt_object_exists(dt));
	LASSERT(obj->oo_db);

	for (i = 0; i < npages; i++) {
		if (lnb[i].lnb_page == NULL)
			continue;
		if (lnb[i].lnb_page->mapping == (void *)obj) {
			/* this is anonymous page allocated for copy-write */
			lnb[i].lnb_page->mapping = NULL;
			__free_page(lnb[i].lnb_page);
			atomic_dec(&osd->od_zerocopy_alloc);
		} else {
			/* see comment in osd_bufs_get_read() */
			ptr = (unsigned long)lnb[i].lnb_data;
			if (ptr & 1UL) {
				ptr &= ~1UL;
				dmu_buf_rele((void *)ptr, osd_zerocopy_tag);
				atomic_dec(&osd->od_zerocopy_pin);
			} else if (lnb[i].lnb_data != NULL) {
				dmu_return_arcbuf(lnb[i].lnb_data);
				atomic_dec(&osd->od_zerocopy_loan);
			}
		}
		lnb[i].lnb_page = NULL;
		lnb[i].lnb_data = NULL;
	}

	return 0;
}
Exemple #26
0
/**
 * fm10k_clean_rx_ring - Free Rx Buffers per Queue
 * @rx_ring: ring to free buffers from
 **/
static void fm10k_clean_rx_ring(struct fm10k_ring *rx_ring)
{
	unsigned long size;
	u16 i;

	if (!rx_ring->rx_buffer)
		return;

	if (rx_ring->skb)
		dev_kfree_skb(rx_ring->skb);
	rx_ring->skb = NULL;

	/* Free all the Rx ring sk_buffs */
	for (i = 0; i < rx_ring->count; i++) {
		struct fm10k_rx_buffer *buffer = &rx_ring->rx_buffer[i];
		/* clean-up will only set page pointer to NULL */
		if (!buffer->page)
			continue;

		dma_unmap_page(rx_ring->dev, buffer->dma,
			       PAGE_SIZE, DMA_FROM_DEVICE);
		__free_page(buffer->page);

		buffer->page = NULL;
	}

	size = sizeof(struct fm10k_rx_buffer) * rx_ring->count;
	memset(rx_ring->rx_buffer, 0, size);

	/* Zero out the descriptor ring */
	memset(rx_ring->desc, 0, rx_ring->size);

	rx_ring->next_to_alloc = 0;
	rx_ring->next_to_clean = 0;
	rx_ring->next_to_use = 0;
}
Exemple #27
0
/*
 * This relies on dma_map_sg() not touching sg[].page during merging.
 */
static void rds_message_purge(struct rds_message *rm)
{
	unsigned long i;

	if (unlikely(test_bit(RDS_MSG_PAGEVEC, &rm->m_flags)))
		return;

	for (i = 0; i < rm->data.op_nents; i++) {
		rdsdebug("putting data page %p\n", (void *)sg_page(&rm->data.op_sg[i]));
		/* XXX will have to put_page for page refs */
		__free_page(sg_page(&rm->data.op_sg[i]));
	}
	rm->data.op_nents = 0;

	if (rm->rdma.op_active)
		rds_rdma_free_op(&rm->rdma);
	if (rm->rdma.op_rdma_mr)
		rds_mr_put(rm->rdma.op_rdma_mr);

	if (rm->atomic.op_active)
		rds_atomic_free_op(&rm->atomic);
	if (rm->atomic.op_rdma_mr)
		rds_mr_put(rm->atomic.op_rdma_mr);
}
Exemple #28
0
static void __fw_free_buf(struct kref *ref)
{
	struct firmware_buf *buf = to_fwbuf(ref);
	struct firmware_cache *fwc = buf->fwc;

	pr_debug("%s: fw-%s buf=%p data=%p size=%u\n",
		 __func__, buf->fw_id, buf, buf->data,
		 (unsigned int)buf->size);

	list_del(&buf->list);
	spin_unlock(&fwc->lock);

#ifdef CONFIG_FW_LOADER_USER_HELPER
	if (buf->is_paged_buf) {
		int i;
		vunmap(buf->data);
		for (i = 0; i < buf->nr_pages; i++)
			__free_page(buf->pages[i]);
		kfree(buf->pages);
	} else
#endif
		vfree(buf->data);
	kfree(buf);
}
Exemple #29
0
/*	rd_release_device_space():
 *
 *
 */
static void rd_release_device_space(struct rd_dev *rd_dev)
{
	u32 i, j, page_count = 0, sg_per_table;
	struct rd_dev_sg_table *sg_table;
	struct page *pg;
	struct scatterlist *sg;

	if (!rd_dev->sg_table_array || !rd_dev->sg_table_count)
		return;

	sg_table = rd_dev->sg_table_array;

	for (i = 0; i < rd_dev->sg_table_count; i++) {
		sg = sg_table[i].sg_table;
		sg_per_table = sg_table[i].rd_sg_count;

		for (j = 0; j < sg_per_table; j++) {
			pg = sg_page(&sg[j]);
			if (pg) {
				__free_page(pg);
				page_count++;
			}
		}

		kfree(sg);
	}

	pr_debug("CORE_RD[%u] - Released device space for Ramdisk"
		" Device ID: %u, pages %u in %u tables total bytes %lu\n",
		rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
		rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);

	kfree(sg_table);
	rd_dev->sg_table_array = NULL;
	rd_dev->sg_table_count = 0;
}
Exemple #30
0
static void vb2_dma_sg_put(void *buf_priv)
{
	struct vb2_dma_sg_buf *buf = buf_priv;
	struct sg_table *sgt = &buf->sg_table;
	int i = buf->num_pages;

	if (atomic_dec_and_test(&buf->refcount)) {
		DEFINE_DMA_ATTRS(attrs);

		dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
		dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,
			buf->num_pages);
		dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
				   buf->dma_dir, &attrs);
		if (buf->vaddr)
			vm_unmap_ram(buf->vaddr, buf->num_pages);
		sg_free_table(buf->dma_sgt);
		while (--i >= 0)
			__free_page(buf->pages[i]);
		kfree(buf->pages);
		put_device(buf->dev);
		kfree(buf);
	}
}