コード例 #1
0
static void *reset_reason_ram_vmap(phys_addr_t start, size_t size)
{
	struct page **pages;
	phys_addr_t page_start;
	unsigned int page_count;
	pgprot_t prot;
	unsigned int i;
	void *vaddr;

	page_start = start - offset_in_page(start);
	page_count = DIV_ROUND_UP(size + offset_in_page(start), PAGE_SIZE);

	prot = pgprot_noncached(PAGE_KERNEL);

	pages = kmalloc(sizeof(struct page *) * page_count, GFP_KERNEL);
	if (!pages) {
		pr_err("%s: Failed to allocate array for %u pages\n", __func__,
			page_count);
		return NULL;
	}

	for (i = 0; i < page_count; i++) {
		phys_addr_t addr = page_start + i * PAGE_SIZE;
		pages[i] = pfn_to_page(addr >> PAGE_SHIFT);
	}
	vaddr = vmap(pages, page_count, VM_MAP, prot);
	kfree(pages);

	return vaddr;
}
コード例 #2
0
/* Allocate a scatterlist for a vmalloc block. The scatterlist is allocated
   with kmalloc. Buffers of arbitrary alignment are supported.
   This function is derived from other vmalloc_to_sg functions in the kernel
   tree, but note that its second argument is a size in bytes, not in pages.
 */
static struct scatterlist *vmalloc_to_sg(unsigned char *const buf,
					 size_t const bytes)
{
	struct scatterlist *sg_array = NULL;
	struct page *pg;
	/* Allow non-page-aligned pointers, so the first and last page may
	   both be partial. */
	unsigned const page_count = bytes / PAGE_SIZE + 2;
	unsigned char *ptr;
	unsigned i;

	sg_array = kcalloc(page_count, sizeof(*sg_array), GFP_KERNEL);
	if (sg_array == NULL)
		goto abort;
	sg_init_table(sg_array, page_count);
	for (i = 0, ptr = (void *)((unsigned long)buf & PAGE_MASK);
	     ptr < buf + bytes;
	     i++, ptr += PAGE_SIZE) {
		pg = vmalloc_to_page(ptr);
		if (pg == NULL)
			goto abort;
		sg_set_page(&sg_array[i], pg, PAGE_SIZE, 0);
	}
	/* Rectify the first page which may be partial. The last page may
	   also be partial but its offset is correct so it doesn't matter. */
	sg_array[0].offset = offset_in_page(buf);
	sg_array[0].length = PAGE_SIZE - offset_in_page(buf);
	return sg_array;
abort:
	if (sg_array != NULL)
		kfree(sg_array);
	return NULL;
}
コード例 #3
0
static int cpu_set(struct drm_i915_gem_object *obj,
		   unsigned long offset,
		   u32 v)
{
	unsigned int needs_clflush;
	struct page *page;
	u32 *map;
	int err;

	err = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
	if (err)
		return err;

	page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
	map = kmap_atomic(page);
	if (needs_clflush & CLFLUSH_BEFORE)
		clflush(map+offset_in_page(offset) / sizeof(*map));
	map[offset_in_page(offset) / sizeof(*map)] = v;
	if (needs_clflush & CLFLUSH_AFTER)
		clflush(map+offset_in_page(offset) / sizeof(*map));
	kunmap_atomic(map);

	i915_gem_obj_finish_shmem_access(obj);
	return 0;
}
コード例 #4
0
ファイル: netvsc_drv.c プロジェクト: ayiyaliing/lis-next
static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
			   struct hv_page_buffer *pb)
{
	u32 slots_used = 0;
	char *data = skb->data;
	int frags = skb_shinfo(skb)->nr_frags;
	int i;

	/* The packet is laid out thus:
	 * 1. hdr
	 * 2. skb linear data
	 * 3. skb fragment data
	 */
	if (hdr != NULL)
		slots_used += fill_pg_buf(virt_to_page(hdr),
					offset_in_page(hdr),
					len, &pb[slots_used]);

	slots_used += fill_pg_buf(virt_to_page(data),
				offset_in_page(data),
				skb_headlen(skb), &pb[slots_used]);

	for (i = 0; i < frags; i++) {
		skb_frag_t *frag = skb_shinfo(skb)->frags + i;

		slots_used += fill_pg_buf(skb_frag_page(frag),
					frag->page_offset,
					skb_frag_size(frag), &pb[slots_used]);
	}
	return slots_used;
}
コード例 #5
0
ファイル: hmac.c プロジェクト: kzlin129/tt-gpl
void crypto_hmac_final(struct crypto_tfm *tfm, u8 *key,
                       unsigned int *keylen, u8 *out)
{
	unsigned int i;
	struct scatterlist tmp;
	char *opad = tfm->crt_digest.dit_hmac_block;
	
	if (*keylen > crypto_tfm_alg_blocksize(tfm)) {
		hash_key(tfm, key, *keylen);
		*keylen = crypto_tfm_alg_digestsize(tfm);
	}

	crypto_digest_final(tfm, out);

	memset(opad, 0, crypto_tfm_alg_blocksize(tfm));
	memcpy(opad, key, *keylen);
		
	for (i = 0; i < crypto_tfm_alg_blocksize(tfm); i++)
		opad[i] ^= 0x5c;

	tmp.page = virt_to_page(opad);
	tmp.offset = offset_in_page(opad);
	tmp.length = crypto_tfm_alg_blocksize(tfm);

	crypto_digest_init(tfm);
	crypto_digest_update(tfm, &tmp, 1);
	
	tmp.page = virt_to_page(out);
	tmp.offset = offset_in_page(out);
	tmp.length = crypto_tfm_alg_digestsize(tfm);
	
	crypto_digest_update(tfm, &tmp, 1);
	crypto_digest_final(tfm, out);
}
コード例 #6
0
static inline int blkcipher_next_fast(struct blkcipher_desc *desc,
				      struct blkcipher_walk *walk)
{
	unsigned long diff;

	walk->src.phys.page = scatterwalk_page(&walk->in);
	walk->src.phys.offset = offset_in_page(walk->in.offset);
	walk->dst.phys.page = scatterwalk_page(&walk->out);
	walk->dst.phys.offset = offset_in_page(walk->out.offset);

	if (walk->flags & BLKCIPHER_WALK_PHYS)
		return 0;

	diff = walk->src.phys.offset - walk->dst.phys.offset;
	diff |= walk->src.virt.page - walk->dst.virt.page;

	blkcipher_map_src(walk);
	walk->dst.virt.addr = walk->src.virt.addr;

	if (diff) {
		walk->flags |= BLKCIPHER_WALK_DIFF;
		blkcipher_map_dst(walk);
	}

	return 0;
}
コード例 #7
0
ファイル: vme_dma.c プロジェクト: bradomyn/vme_driver
/**
 * sgl_map_user_pages() - Pin user pages and put them into a scatter gather list
 * @sgl: Scatter gather list to fill
 * @nr_pages: Number of pages
 * @uaddr: User buffer address
 * @count: Length of user buffer
 * @rw: Direction (0=read from userspace / 1 = write to userspace)
 * @to_user: 1 - transfer is to/from a user-space buffer. 0 - kernel buffer.
 *
 *  This function pins the pages of the userspace buffer and fill in the
 * scatter gather list.
 */
static int sgl_map_user_pages(struct scatterlist *sgl,
			      const unsigned int nr_pages, unsigned long uaddr,
			      size_t length, int rw, int to_user)
{
	int rc;
	int i;
	struct page **pages;

	if ((pages = kmalloc(nr_pages * sizeof(struct page *),
			     GFP_KERNEL)) == NULL)
		return -ENOMEM;

	if (to_user) {
		rc = sgl_fill_user_pages(pages, uaddr, nr_pages, rw);
		if (rc >= 0 && rc < nr_pages) {
			/* Some pages were pinned, release these */
			for (i = 0; i < rc; i++)
				page_cache_release(pages[i]);
			rc = -ENOMEM;
			goto out_free;
		}
	} else {
		rc = sgl_fill_kernel_pages(pages, uaddr, nr_pages, rw);
	}

	if (rc < 0)
		/* We completely failed to get the pages */
		goto out_free;

	/* Populate the scatter/gather list */
	sg_init_table(sgl, nr_pages);

	/* Take a shortcut here when we only have a single page transfer */
	if (nr_pages > 1) {
		unsigned int off = offset_in_page(uaddr);
		unsigned int len = PAGE_SIZE - off;

		sg_set_page (&sgl[0], pages[0], len, off);
		length -= len;

		for (i = 1; i < nr_pages; i++) {
			sg_set_page (&sgl[i], pages[i],
				     (length < PAGE_SIZE) ? length : PAGE_SIZE,
				     0);
			length -= PAGE_SIZE;
		}
	} else
		sg_set_page (&sgl[0], pages[0], length, offset_in_page(uaddr));

out_free:
	/* We do not need the pages array anymore */
	kfree(pages);

	return nr_pages;
}
コード例 #8
0
ファイル: user_fbmap.c プロジェクト: DavionKnight/H18CE-1604C
/* Releases a SG list from user space */
static int unmap_sglist_from_user(struct iovec *vector,
				  unsigned long user_vec, int size)
{
	int i, ret;
	for (i = 0; i < size; i++) {
		ret = do_munmap(current->mm,
			((unsigned long)vector[i].iov_base) & PAGE_MASK,
			vector[i].iov_len + offset_in_page(vector[i].iov_base));
		if (ret)
			return ret;
	}
	return do_munmap(current->mm, user_vec & PAGE_MASK,
			 size + offset_in_page(user_vec));
}
コード例 #9
0
ファイル: dax.c プロジェクト: nikajia/linux
int dax_clear_blocks(struct inode *inode, sector_t block, long size)
{
	struct block_device *bdev = inode->i_sb->s_bdev;
	sector_t sector = block << (inode->i_blkbits - 9);

	might_sleep();
	do {
		void __pmem *addr;
		unsigned long pfn;
		long count;

		count = bdev_direct_access(bdev, sector, &addr, &pfn, size);
		if (count < 0)
			return count;
		BUG_ON(size < count);
		while (count > 0) {
			unsigned pgsz = PAGE_SIZE - offset_in_page(addr);
			if (pgsz > count)
				pgsz = count;
			clear_pmem(addr, pgsz);
			addr += pgsz;
			size -= pgsz;
			count -= pgsz;
			BUG_ON(pgsz & 511);
			sector += pgsz / 512;
			cond_resched();
		}
	} while (size);

	wmb_pmem();
	return 0;
}
コード例 #10
0
ファイル: netback.c プロジェクト: xf739645524/kernel-rhel5
static void netbk_gop_skb(struct sk_buff *skb,
			  struct netrx_pending_operations *npo)
{
	netif_t *netif = netdev_priv(skb->dev);
	int nr_frags = skb_shinfo(skb)->nr_frags;
	int i;
	int extra;
	struct netbk_rx_meta *head_meta, *meta;

	head_meta = npo->meta + npo->meta_prod++;
	head_meta->frag.page_offset = skb_shinfo(skb)->gso_type;
	head_meta->frag.size = skb_shinfo(skb)->gso_size;
	extra = !!head_meta->frag.size + 1;

	for (i = 0; i < nr_frags; i++) {
		meta = npo->meta + npo->meta_prod++;
		meta->frag = skb_shinfo(skb)->frags[i];
		meta->id = netbk_gop_frag(netif, meta, i + extra, npo,
					  meta->frag.page,
					  meta->frag.size,
					  meta->frag.page_offset);
	}

	/*
	 * This must occur at the end to ensure that we don't trash
	 * skb_shinfo until we're done.
	 */
	head_meta->id = netbk_gop_frag(netif, head_meta, 0, npo,
				       virt_to_page(skb->data),
				       skb_headlen(skb),
				       offset_in_page(skb->data));

	netif->rx.req_cons += nr_frags + extra;
}
コード例 #11
0
ファイル: netback.c プロジェクト: kenkit/AndromadusMod-New
/*
 * Figure out how many ring slots we're going to need to send @skb to
 * the guest. This function is essentially a dry run of
 * netbk_gop_frag_copy.
 */
unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb)
{
	unsigned int count;
	int i, copy_off;

	count = DIV_ROUND_UP(
			offset_in_page(skb->data)+skb_headlen(skb), PAGE_SIZE);

	copy_off = skb_headlen(skb) % PAGE_SIZE;

	if (skb_shinfo(skb)->gso_size)
		count++;

	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
		unsigned long size = skb_shinfo(skb)->frags[i].size;
		unsigned long bytes;
		while (size > 0) {
			BUG_ON(copy_off > MAX_BUFFER_OFFSET);

			if (start_new_rx_buffer(copy_off, size, 0)) {
				count++;
				copy_off = 0;
			}

			bytes = size;
			if (copy_off + bytes > MAX_BUFFER_OFFSET)
				bytes = MAX_BUFFER_OFFSET - copy_off;

			copy_off += bytes;
			size -= bytes;
		}
	}
	return count;
}
コード例 #12
0
ファイル: etnaviv_drv.c プロジェクト: 020gzh/linux
static int etnaviv_ioctl_gem_userptr(struct drm_device *dev, void *data,
	struct drm_file *file)
{
	struct drm_etnaviv_gem_userptr *args = data;
	int access;

	if (args->flags & ~(ETNA_USERPTR_READ|ETNA_USERPTR_WRITE) ||
	    args->flags == 0)
		return -EINVAL;

	if (offset_in_page(args->user_ptr | args->user_size) ||
	    (uintptr_t)args->user_ptr != args->user_ptr ||
	    (u32)args->user_size != args->user_size ||
	    args->user_ptr & ~PAGE_MASK)
		return -EINVAL;

	if (args->flags & ETNA_USERPTR_WRITE)
		access = VERIFY_WRITE;
	else
		access = VERIFY_READ;

	if (!access_ok(access, (void __user *)(unsigned long)args->user_ptr,
		       args->user_size))
		return -EFAULT;

	return etnaviv_gem_new_userptr(dev, file, args->user_ptr,
				       args->user_size, args->flags,
				       &args->handle);
}
コード例 #13
0
ファイル: user_fbmap.c プロジェクト: DavionKnight/H18CE-1604C
/* Unmaps an fbchain from user space */
int pme_mem_fb_unmap(struct file *filep, unsigned long user_addr)
{
	int ret;
	struct pme_fb_vma *mem_node;
	struct vm_area_struct *vma;
	void *iovec_mem;

	down_write(&current->mm->mmap_sem);
	vma = find_vma(current->mm, user_addr);
	if (!vma) {
		ret = -EINVAL;
		goto done;
	}
	/* Get the type from the node */
	mem_node = vma->vm_private_data;
	if (mem_node) {
		if (!mem_node->iovec_pages)
			ret = do_munmap(current->mm, user_addr & PAGE_MASK,
				mem_node->mapped_size +
				offset_in_page(user_addr));
		else {
			iovec_mem = kmap(mem_node->iovec_pages);
			ret = unmap_sglist_from_user(iovec_mem,
					user_addr, mem_node->mapped_size);
			kunmap(mem_node->iovec_pages);
		}
	} else
		ret = -EINVAL;
done:
	up_write(&current->mm->mmap_sem);
	return ret;
}
コード例 #14
0
ファイル: ppp_mppe.c プロジェクト: xf739645524/kernel-rhel5
static void
setup_sg(struct scatterlist *sg, const void *address, unsigned int length)
{
	sg[0].page = virt_to_page(address);
	sg[0].offset = offset_in_page(address);
	sg[0].length = length;
}
コード例 #15
0
/**
 * pack_sg_list_p - Just like pack_sg_list. Instead of taking a buffer,
 * this takes a list of pages.
 * @sg: scatter/gather list to pack into
 * @start: which segment of the sg_list to start at
 * @pdata: a list of pages to add into sg.
 * @nr_pages: number of pages to pack into the scatter/gather list
 * @data: data to pack into scatter/gather list
 * @count: amount of data to pack into the scatter/gather list
 */
static int
pack_sg_list_p(struct scatterlist *sg, int start, int limit,
	       struct page **pdata, int nr_pages, char *data, int count)
{
	int i = 0, s;
	int data_off;
	int index = start;

	BUG_ON(nr_pages > (limit - start));
	/*
	 * if the first page doesn't start at
	 * page boundary find the offset
	 */
	data_off = offset_in_page(data);
	while (nr_pages) {
		s = rest_of_page(data);
		if (s > count)
			s = count;
		/* Make sure we don't terminate early. */
		sg_unmark_end(&sg[index]);
		sg_set_page(&sg[index++], pdata[i++], s, data_off);
		data_off = 0;
		data += s;
		count -= s;
		nr_pages--;
	}

	if (index-start)
		sg_mark_end(&sg[index - 1]);
	return index - start;
}
コード例 #16
0
int hfsplus_submit_bio(struct block_device *bdev, sector_t sector,
		void *data, int rw)
{
	DECLARE_COMPLETION_ONSTACK(wait);
	struct bio *bio;

	bio = bio_alloc(GFP_NOIO, 1);
	bio->bi_sector = sector;
	bio->bi_bdev = bdev;
	bio->bi_end_io = hfsplus_end_io_sync;
	bio->bi_private = &wait;

	/*
	 * We always submit one sector at a time, so bio_add_page must not fail.
	 */
	if (bio_add_page(bio, virt_to_page(data), HFSPLUS_SECTOR_SIZE,
			 offset_in_page(data)) != HFSPLUS_SECTOR_SIZE)
		BUG();

	submit_bio(rw, bio);
	wait_for_completion(&wait);

	if (!bio_flagged(bio, BIO_UPTODATE))
		return -EIO;
	return 0;
}
コード例 #17
0
ファイル: scatterlist.c プロジェクト: haozhun/ucore_plus
/**
 * sg_init_one - Initialize a single entry sg list
 * @sg:		 SG entry
 * @buf:	 Virtual address for IO
 * @buflen:	 IO length
 *
 **/
void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen)
{
  //pr_debug("### %s:%d SG %08x %d\n", __FILE__,__LINE__, buf, buflen);
	sg_init_table(sg, 1);
	sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
	//sg_set_buf(sg, buf, buflen);
}
コード例 #18
0
ファイル: pgtable.c プロジェクト: AshishNamdev/linux
unsigned long vmalloc_to_phys(void *va)
{
	unsigned long pfn = vmalloc_to_pfn(va);

	BUG_ON(!pfn);
	return __pa(pfn_to_kaddr(pfn)) + offset_in_page(va);
}
コード例 #19
0
ファイル: mtdram.c プロジェクト: ReneNyffenegger/linux
static int ram_point(struct mtd_info *mtd, loff_t from, size_t len,
		size_t *retlen, void **virt, resource_size_t *phys)
{
	*virt = mtd->priv + from;
	*retlen = len;

	if (phys) {
		/* limit retlen to the number of contiguous physical pages */
		unsigned long page_ofs = offset_in_page(*virt);
		void *addr = *virt - page_ofs;
		unsigned long pfn1, pfn0 = vmalloc_to_pfn(addr);

		*phys = __pfn_to_phys(pfn0) + page_ofs;
		len += page_ofs;
		while (len > PAGE_SIZE) {
			len -= PAGE_SIZE;
			addr += PAGE_SIZE;
			pfn0++;
			pfn1 = vmalloc_to_pfn(addr);
			if (pfn1 != pfn0) {
				*retlen = addr - *virt;
				break;
			}
		}
	}

	return 0;
}
コード例 #20
0
static void xen_vcpu_setup(int cpu)
{
	struct vcpu_register_vcpu_info info;
	int err;
	struct vcpu_info *vcpup;

	BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);

	if (cpu < MAX_VIRT_CPUS)
		per_cpu(xen_vcpu,cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];

	if (!have_vcpu_info_placement) {
		if (cpu >= MAX_VIRT_CPUS)
			clamp_max_cpus();
		return;
	}

	vcpup = &per_cpu(xen_vcpu_info, cpu);
	info.mfn = arbitrary_virt_to_mfn(vcpup);
	info.offset = offset_in_page(vcpup);

	err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info);

	if (err) {
		printk(KERN_DEBUG "register_vcpu_info failed: err=%d\n", err);
		have_vcpu_info_placement = 0;
		clamp_max_cpus();
	} else {
		per_cpu(xen_vcpu, cpu) = vcpup;
	}
}
コード例 #21
0
__be32
nfs4_make_rec_clidname(char *dname, struct xdr_netobj *clname)
{
	struct xdr_netobj cksum;
	struct hash_desc desc;
	struct scatterlist sg[1];
	__be32 status = nfserr_resource;

	dprintk("NFSD: nfs4_make_rec_clidname for %.*s\n",
			clname->len, clname->data);
	desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
	desc.tfm = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
	if (IS_ERR(desc.tfm))
		goto out_no_tfm;
	cksum.len = crypto_hash_digestsize(desc.tfm);
	cksum.data = kmalloc(cksum.len, GFP_KERNEL);
	if (cksum.data == NULL)
 		goto out;

	sg[0].page = virt_to_page(clname->data);
	sg[0].offset = offset_in_page(clname->data);
	sg[0].length = clname->len;

	if (crypto_hash_digest(&desc, sg, sg->length, cksum.data))
		goto out;

	md5_to_hex(dname, cksum.data);

	kfree(cksum.data);
	status = nfs_ok;
out:
	crypto_free_hash(desc.tfm);
out_no_tfm:
	return status;
}
コード例 #22
0
ファイル: util.c プロジェクト: Anjali05/linux
/*
 * Generally it isn't good to access .bi_io_vec and .bi_vcnt directly,
 * the preferred way is bio_add_page, but in this case, bch_bio_map()
 * supposes that the bvec table is empty, so it is safe to access
 * .bi_vcnt & .bi_io_vec in this way even after multipage bvec is
 * supported.
 */
void bch_bio_map(struct bio *bio, void *base)
{
	size_t size = bio->bi_iter.bi_size;
	struct bio_vec *bv = bio->bi_io_vec;

	BUG_ON(!bio->bi_iter.bi_size);
	BUG_ON(bio->bi_vcnt);

	bv->bv_offset = base ? offset_in_page(base) : 0;
	goto start;

	for (; size; bio->bi_vcnt++, bv++) {
		bv->bv_offset	= 0;
start:		bv->bv_len	= min_t(size_t, PAGE_SIZE - bv->bv_offset,
					size);
		if (base) {
			bv->bv_page = is_vmalloc_addr(base)
				? vmalloc_to_page(base)
				: virt_to_page(base);

			base += bv->bv_len;
		}

		size -= bv->bv_len;
	}
}
コード例 #23
0
ファイル: crypto.c プロジェクト: AshishNamdev/linux
int ubifs_encrypt(const struct inode *inode, struct ubifs_data_node *dn,
		  unsigned int in_len, unsigned int *out_len, int block)
{
	struct ubifs_info *c = inode->i_sb->s_fs_info;
	void *p = &dn->data;
	struct page *ret;
	unsigned int pad_len = round_up(in_len, UBIFS_CIPHER_BLOCK_SIZE);

	ubifs_assert(pad_len <= *out_len);
	dn->compr_size = cpu_to_le16(in_len);

	/* pad to full block cipher length */
	if (pad_len != in_len)
		memset(p + in_len, 0, pad_len - in_len);

	ret = fscrypt_encrypt_page(inode, virt_to_page(&dn->data), pad_len,
			offset_in_page(&dn->data), block, GFP_NOFS);
	if (IS_ERR(ret)) {
		ubifs_err(c, "fscrypt_encrypt_page failed: %ld", PTR_ERR(ret));
		return PTR_ERR(ret);
	}
	*out_len = pad_len;

	return 0;
}
コード例 #24
0
ファイル: queue.c プロジェクト: cilynx/dd-wrt
unsigned int mmc_queue_map_sg(struct mmc_queue *mq)
{
	unsigned int sg_len;

	if (!mq->bounce_buf)
		return blk_rq_map_sg(mq->queue, mq->req, mq->sg);

	BUG_ON(!mq->bounce_sg);

	sg_len = blk_rq_map_sg(mq->queue, mq->req, mq->bounce_sg);

	mq->bounce_sg_len = sg_len;

	/*
	 * Shortcut in the event we only get a single entry.
	 */
	if (sg_len == 1) {
		memcpy(mq->sg, mq->bounce_sg, sizeof(struct scatterlist));
		return 1;
	}

	mq->sg[0].page = virt_to_page(mq->bounce_buf);
	mq->sg[0].offset = offset_in_page(mq->bounce_buf);
	mq->sg[0].length = 0;

	while (sg_len) {
		mq->sg[0].length += mq->bounce_sg[sg_len - 1].length;
		sg_len--;
	}

	return 1;
}
コード例 #25
0
ファイル: coresight-stm.c プロジェクト: Anjali05/linux
static phys_addr_t
stm_mmio_addr(struct stm_data *stm_data, unsigned int master,
	      unsigned int channel, unsigned int nr_chans)
{
	struct stm_drvdata *drvdata = container_of(stm_data,
						   struct stm_drvdata, stm);
	phys_addr_t addr;

	addr = drvdata->chs.phys + channel * BYTES_PER_CHANNEL;

	if (offset_in_page(addr) ||
	    offset_in_page(nr_chans * BYTES_PER_CHANNEL))
		return 0;

	return addr;
}
コード例 #26
0
/*
 * Prepare an SKB to be transmitted to the frontend.
 *
 * This function is responsible for allocating grant operations, meta
 * structures, etc.
 *
 * It returns the number of meta structures consumed. The number of
 * ring slots used is always equal to the number of meta slots used
 * plus the number of GSO descriptors used. Currently, we use either
 * zero GSO descriptors (for non-GSO packets) or one descriptor (for
 * frontend-side LRO).
 */
static int netbk_gop_skb(struct sk_buff *skb,
			 struct netrx_pending_operations *npo)
{
	struct xenvif *vif = netdev_priv(skb->dev);
	int nr_frags = skb_shinfo(skb)->nr_frags;
	int i;
	struct xen_netif_rx_request *req;
	struct netbk_rx_meta *meta;
	unsigned char *data;
	int head = 1;
	int old_meta_prod;

	old_meta_prod = npo->meta_prod;

	/* Set up a GSO prefix descriptor, if necessary */
	if (skb_shinfo(skb)->gso_size && vif->gso_prefix) {
		req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
		meta = npo->meta + npo->meta_prod++;
		meta->gso_size = skb_shinfo(skb)->gso_size;
		meta->size = 0;
		meta->id = req->id;
	}

	req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
	meta = npo->meta + npo->meta_prod++;

	if (!vif->gso_prefix)
		meta->gso_size = skb_shinfo(skb)->gso_size;
	else
		meta->gso_size = 0;

	meta->size = 0;
	meta->id = req->id;
	npo->copy_off = 0;
	npo->copy_gref = req->gref;

	data = skb->data;
	while (data < skb_tail_pointer(skb)) {
		unsigned int offset = offset_in_page(data);
		unsigned int len = PAGE_SIZE - offset;

		if (data + len > skb_tail_pointer(skb))
			len = skb_tail_pointer(skb) - data;

		netbk_gop_frag_copy(vif, skb, npo,
				    virt_to_page(data), len, offset, &head);
		data += len;
	}

	for (i = 0; i < nr_frags; i++) {
		netbk_gop_frag_copy(vif, skb, npo,
				    skb_frag_page(&skb_shinfo(skb)->frags[i]),
				    skb_frag_size(&skb_shinfo(skb)->frags[i]),
				    skb_shinfo(skb)->frags[i].page_offset,
				    &head);
	}

	return npo->meta_prod - old_meta_prod;
}
コード例 #27
0
ファイル: nx-842.c プロジェクト: 24hours/linux
static inline unsigned long nx842_get_pa(void *addr)
{
	if (is_vmalloc_addr(addr))
		return page_to_phys(vmalloc_to_page(addr))
		       + offset_in_page(addr);
	else
		return __pa(addr);
}
コード例 #28
0
ファイル: xio_sg_scatter.c プロジェクト: xiaom/accelio
/*---------------------------------------------------------------------------*/
static inline void xio_sg_set_buf(struct scatterlist *sg, const void *buf,
				  uint32_t buflen, void *mr)
{
#ifdef XIO_DEBUG_SG
	BUG_ON(sg->sg_magic != SG_MAGIC);
#endif
	sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
}
コード例 #29
0
ファイル: xio_sg_scatter.c プロジェクト: xiaom/accelio
/*---------------------------------------------------------------------------*/
static inline void xio_sg_set_addr(struct scatterlist *sg, void *addr)
{
	/* keep the length */
#ifdef XIO_DEBUG_SG
	BUG_ON(sg->sg_magic != SG_MAGIC);
#endif
	sg_set_page(sg, virt_to_page(addr), sg->length, offset_in_page(addr));
}
コード例 #30
0
static bool ux500_configure_channel(struct dma_channel *channel,
                                    u16 packet_sz, u8 mode,
                                    dma_addr_t dma_addr, u32 len)
{
    struct ux500_dma_channel *ux500_channel = channel->private_data;
    struct musb_hw_ep *hw_ep = ux500_channel->hw_ep;
    struct dma_chan *dma_chan = ux500_channel->dma_chan;
    struct dma_async_tx_descriptor *dma_desc;
    enum dma_transfer_direction direction;
    struct scatterlist sg;
    struct dma_slave_config slave_conf;
    enum dma_slave_buswidth addr_width;
    dma_addr_t usb_fifo_addr = (MUSB_FIFO_OFFSET(hw_ep->epnum) +
                                ux500_channel->controller->phy_base);
    struct musb *musb = ux500_channel->controller->private_data;

    dev_dbg(musb->controller,
            "packet_sz=%d, mode=%d, dma_addr=0x%llu, len=%d is_tx=%d\n",
            packet_sz, mode, (unsigned long long) dma_addr,
            len, ux500_channel->is_tx);

    ux500_channel->cur_len = len;

    sg_init_table(&sg, 1);
    sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_addr)), len,
                offset_in_page(dma_addr));
    sg_dma_address(&sg) = dma_addr;
    sg_dma_len(&sg) = len;

    direction = ux500_channel->is_tx ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
    addr_width = (len & 0x3) ? DMA_SLAVE_BUSWIDTH_1_BYTE :
                 DMA_SLAVE_BUSWIDTH_4_BYTES;

    slave_conf.direction = direction;
    slave_conf.src_addr = usb_fifo_addr;
    slave_conf.src_addr_width = addr_width;
    slave_conf.src_maxburst = 16;
    slave_conf.dst_addr = usb_fifo_addr;
    slave_conf.dst_addr_width = addr_width;
    slave_conf.dst_maxburst = 16;
    slave_conf.device_fc = false;

    dma_chan->device->device_control(dma_chan, DMA_SLAVE_CONFIG,
                                     (unsigned long) &slave_conf);

    dma_desc = dmaengine_prep_slave_sg(dma_chan, &sg, 1, direction,
                                       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
    if (!dma_desc)
        return false;

    dma_desc->callback = ux500_dma_callback;
    dma_desc->callback_param = channel;
    ux500_channel->cookie = dma_desc->tx_submit(dma_desc);

    dma_async_issue_pending(dma_chan);

    return true;
}