예제 #1
0
/*---------------------------------------------------------------------------*/
static inline void xio_tbl_set_nents(struct sg_table *tbl, uint32_t nents)
{
	struct scatterlist *sg;
	int i;

#ifdef XIO_DEBUG_SG
	verify_tbl(tbl);
#endif
	if (!tbl || tbl->orig_nents < nents)
		return;

	sg = tbl->sgl;
	/* tbl->nents is unsigned so if tbl->nents is ZERO then tbl->nents - 1
	 * is a huge number, so check this.
	 */
	if (tbl->nents && (tbl->nents < tbl->orig_nents)) {
		for (i = 0; i < tbl->nents - 1; i++)
			sg = sg_next(sg);
		sg_unmark_end(sg);
	}

	if (!nents) {
		tbl->nents = nents;
		return;
	}

	sg = tbl->sgl;
	for (i = 0; i < nents - 1; i++)
		sg = sg_next(sg);

	sg_mark_end(sg);

	tbl->nents = nents;
}
/**
 * pack_sg_list_p - Just like pack_sg_list. Instead of taking a buffer,
 * this takes a list of pages.
 * @sg: scatter/gather list to pack into
 * @start: which segment of the sg_list to start at
 * @pdata: a list of pages to add into sg.
 * @nr_pages: number of pages to pack into the scatter/gather list
 * @data: data to pack into scatter/gather list
 * @count: amount of data to pack into the scatter/gather list
 */
static int
pack_sg_list_p(struct scatterlist *sg, int start, int limit,
	       struct page **pdata, int nr_pages, char *data, int count)
{
	int i = 0, s;
	int data_off;
	int index = start;

	BUG_ON(nr_pages > (limit - start));
	/*
	 * if the first page doesn't start at
	 * page boundary find the offset
	 */
	data_off = offset_in_page(data);
	while (nr_pages) {
		s = rest_of_page(data);
		if (s > count)
			s = count;
		/* Make sure we don't terminate early. */
		sg_unmark_end(&sg[index]);
		sg_set_page(&sg[index++], pdata[i++], s, data_off);
		data_off = 0;
		data += s;
		count -= s;
		nr_pages--;
	}

	if (index-start)
		sg_mark_end(&sg[index - 1]);
	return index - start;
}
예제 #3
0
static int alloc_sg(struct sock *sk, int len, struct scatterlist *sg,
		    int *sg_num_elem, unsigned int *sg_size,
		    int first_coalesce)
{
	struct page_frag *pfrag;
	unsigned int size = *sg_size;
	int num_elem = *sg_num_elem, use = 0, rc = 0;
	struct scatterlist *sge;
	unsigned int orig_offset;

	len -= size;
	pfrag = sk_page_frag(sk);

	while (len > 0) {
		if (!sk_page_frag_refill(sk, pfrag)) {
			rc = -ENOMEM;
			goto out;
		}

		use = min_t(int, len, pfrag->size - pfrag->offset);

		if (!sk_wmem_schedule(sk, use)) {
			rc = -ENOMEM;
			goto out;
		}

		sk_mem_charge(sk, use);
		size += use;
		orig_offset = pfrag->offset;
		pfrag->offset += use;

		sge = sg + num_elem - 1;
		if (num_elem > first_coalesce && sg_page(sg) == pfrag->page &&
		    sg->offset + sg->length == orig_offset) {
			sg->length += use;
		} else {
			sge++;
			sg_unmark_end(sge);
			sg_set_page(sge, pfrag->page, use, orig_offset);
			get_page(pfrag->page);
			++num_elem;
			if (num_elem == MAX_SKB_FRAGS) {
				rc = -ENOSPC;
				break;
			}
		}

		len -= use;
	}
	goto out;

out:
	*sg_size = size;
	*sg_num_elem = num_elem;
	return rc;
}
예제 #4
0
static int zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
			      int length)
{
	struct tls_context *tls_ctx = tls_get_ctx(sk);
	struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
	struct page *pages[MAX_SKB_FRAGS];

	size_t offset;
	ssize_t copied, use;
	int i = 0;
	unsigned int size = ctx->sg_plaintext_size;
	int num_elem = ctx->sg_plaintext_num_elem;
	int rc = 0;
	int maxpages;

	while (length > 0) {
		i = 0;
		maxpages = ARRAY_SIZE(ctx->sg_plaintext_data) - num_elem;
		if (maxpages == 0) {
			rc = -EFAULT;
			goto out;
		}
		copied = iov_iter_get_pages(from, pages,
					    length,
					    maxpages, &offset);
		if (copied <= 0) {
			rc = -EFAULT;
			goto out;
		}

		iov_iter_advance(from, copied);

		length -= copied;
		size += copied;
		while (copied) {
			use = min_t(int, copied, PAGE_SIZE - offset);

			sg_set_page(&ctx->sg_plaintext_data[num_elem],
				    pages[i], use, offset);
			sg_unmark_end(&ctx->sg_plaintext_data[num_elem]);
			sk_mem_charge(sk, use);

			offset = 0;
			copied -= use;

			++i;
			++num_elem;
		}
	}

out:
	ctx->sg_plaintext_size = size;
	ctx->sg_plaintext_num_elem = num_elem;
	return rc;
}
예제 #5
0
/**
 * blk_rq_map_integrity_sg - Map integrity metadata into a scatterlist
 * @q:		request queue
 * @bio:	bio with integrity metadata attached
 * @sglist:	target scatterlist
 *
 * Description: Map the integrity vectors in request into a
 * scatterlist.  The scatterlist must be big enough to hold all
 * elements.  I.e. sized using blk_rq_count_integrity_sg().
 */
int blk_rq_map_integrity_sg(struct request_queue *q, struct bio *bio,
			    struct scatterlist *sglist)
{
	struct bio_vec iv, ivprv = { NULL };
	struct scatterlist *sg = NULL;
	unsigned int segments = 0;
	struct bvec_iter iter;
	int prev = 0;

	bio_for_each_integrity_vec(iv, bio, iter) {

		if (prev) {
			if (!BIOVEC_PHYS_MERGEABLE(&ivprv, &iv))
				goto new_segment;

			if (!BIOVEC_SEG_BOUNDARY(q, &ivprv, &iv))
				goto new_segment;

			if (sg->length + iv.bv_len > queue_max_segment_size(q))
				goto new_segment;

			sg->length += iv.bv_len;
		} else {
new_segment:
			if (!sg)
				sg = sglist;
			else {
				sg_unmark_end(sg);
				sg = sg_next(sg);
			}

			sg_set_page(sg, iv.bv_page, iv.bv_len, iv.bv_offset);
			segments++;
		}

		prev = 1;
		ivprv = iv;
	}

	if (sg)
		sg_mark_end(sg);

	return segments;
}
static inline void
__blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
		     struct scatterlist *sglist, struct bio_vec *bvprv,
		     struct scatterlist **sg, int *nsegs, int *cluster)
{

	int nbytes = bvec->bv_len;

	if (*sg && *cluster) {
		if ((*sg)->length + nbytes > queue_max_segment_size(q))
			goto new_segment;

		if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
			goto new_segment;
		if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
			goto new_segment;

		(*sg)->length += nbytes;
	} else {
new_segment:
		if (!*sg)
			*sg = sglist;
		else {
			/*
			 * If the driver previously mapped a shorter
			 * list, we could see a termination bit
			 * prematurely unless it fully inits the sg
			 * table on each mapping. We KNOW that there
			 * must be more entries here or the driver
			 * would be buggy, so force clear the
			 * termination bit to avoid doing a full
			 * sg_init_table() in drivers for each command.
			 */
			sg_unmark_end(*sg);
			*sg = sg_next(*sg);
		}

		sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
		(*nsegs)++;
	}
	*bvprv = *bvec;
}
static int pack_sg_list(struct scatterlist *sg, int start,
			int limit, char *data, int count)
{
	int s;
	int index = start;

	while (count) {
		s = rest_of_page(data);
		if (s > count)
			s = count;
		BUG_ON(index > limit);
		/* Make sure we don't terminate early. */
		sg_unmark_end(&sg[index]);
		sg_set_buf(&sg[index++], data, s);
		count -= s;
		data += s;
	}
	if (index-start)
		sg_mark_end(&sg[index - 1]);
	return index-start;
}
예제 #8
0
int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx)
{
	char keyval[TLS_CIPHER_AES_GCM_128_KEY_SIZE];
	struct tls_crypto_info *crypto_info;
	struct tls12_crypto_info_aes_gcm_128 *gcm_128_info;
	struct tls_sw_context *sw_ctx;
	u16 nonce_size, tag_size, iv_size, rec_seq_size;
	char *iv, *rec_seq;
	int rc = 0;

	if (!ctx) {
		rc = -EINVAL;
		goto out;
	}

	if (ctx->priv_ctx) {
		rc = -EEXIST;
		goto out;
	}

	sw_ctx = kzalloc(sizeof(*sw_ctx), GFP_KERNEL);
	if (!sw_ctx) {
		rc = -ENOMEM;
		goto out;
	}

	ctx->priv_ctx = (struct tls_offload_context *)sw_ctx;

	crypto_info = &ctx->crypto_send;
	switch (crypto_info->cipher_type) {
	case TLS_CIPHER_AES_GCM_128: {
		nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
		tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
		iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
		iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv;
		rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE;
		rec_seq =
		 ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq;
		gcm_128_info =
			(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
		break;
	}
	default:
		rc = -EINVAL;
		goto out;
	}

	ctx->prepend_size = TLS_HEADER_SIZE + nonce_size;
	ctx->tag_size = tag_size;
	ctx->overhead_size = ctx->prepend_size + ctx->tag_size;
	ctx->iv_size = iv_size;
	ctx->iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
			  GFP_KERNEL);
	if (!ctx->iv) {
		rc = -ENOMEM;
		goto out;
	}
	memcpy(ctx->iv, gcm_128_info->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
	memcpy(ctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size);
	ctx->rec_seq_size = rec_seq_size;
	ctx->rec_seq = kmalloc(rec_seq_size, GFP_KERNEL);
	if (!ctx->rec_seq) {
		rc = -ENOMEM;
		goto free_iv;
	}
	memcpy(ctx->rec_seq, rec_seq, rec_seq_size);

	sg_init_table(sw_ctx->sg_encrypted_data,
		      ARRAY_SIZE(sw_ctx->sg_encrypted_data));
	sg_init_table(sw_ctx->sg_plaintext_data,
		      ARRAY_SIZE(sw_ctx->sg_plaintext_data));

	sg_init_table(sw_ctx->sg_aead_in, 2);
	sg_set_buf(&sw_ctx->sg_aead_in[0], sw_ctx->aad_space,
		   sizeof(sw_ctx->aad_space));
	sg_unmark_end(&sw_ctx->sg_aead_in[1]);
	sg_chain(sw_ctx->sg_aead_in, 2, sw_ctx->sg_plaintext_data);
	sg_init_table(sw_ctx->sg_aead_out, 2);
	sg_set_buf(&sw_ctx->sg_aead_out[0], sw_ctx->aad_space,
		   sizeof(sw_ctx->aad_space));
	sg_unmark_end(&sw_ctx->sg_aead_out[1]);
	sg_chain(sw_ctx->sg_aead_out, 2, sw_ctx->sg_encrypted_data);

	if (!sw_ctx->aead_send) {
		sw_ctx->aead_send = crypto_alloc_aead("gcm(aes)", 0, 0);
		if (IS_ERR(sw_ctx->aead_send)) {
			rc = PTR_ERR(sw_ctx->aead_send);
			sw_ctx->aead_send = NULL;
			goto free_rec_seq;
		}
	}

	ctx->push_pending_record = tls_sw_push_pending_record;

	memcpy(keyval, gcm_128_info->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE);

	rc = crypto_aead_setkey(sw_ctx->aead_send, keyval,
				TLS_CIPHER_AES_GCM_128_KEY_SIZE);
	if (rc)
		goto free_aead;

	rc = crypto_aead_setauthsize(sw_ctx->aead_send, ctx->tag_size);
	if (!rc)
		goto out;

free_aead:
	crypto_free_aead(sw_ctx->aead_send);
	sw_ctx->aead_send = NULL;
free_rec_seq:
	kfree(ctx->rec_seq);
	ctx->rec_seq = NULL;
free_iv:
	kfree(ctx->iv);
	ctx->iv = NULL;
out:
	return rc;
}
예제 #9
0
static sense_reason_t rd_do_prot_rw(struct se_cmd *cmd, dif_verify dif_verify)
{
	struct se_device *se_dev = cmd->se_dev;
	struct rd_dev *dev = RD_DEV(se_dev);
	struct rd_dev_sg_table *prot_table;
	bool need_to_release = false;
	struct scatterlist *prot_sg;
	u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size;
	u32 prot_offset, prot_page;
	u32 prot_npages __maybe_unused;
	u64 tmp;
	sense_reason_t rc = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;

	tmp = cmd->t_task_lba * se_dev->prot_length;
	prot_offset = do_div(tmp, PAGE_SIZE);
	prot_page = tmp;

	prot_table = rd_get_prot_table(dev, prot_page);
	if (!prot_table)
		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;

	prot_sg = &prot_table->sg_table[prot_page -
					prot_table->page_start_offset];

#ifndef CONFIG_ARCH_HAS_SG_CHAIN

	prot_npages = DIV_ROUND_UP(prot_offset + sectors * se_dev->prot_length,
				   PAGE_SIZE);

	/*
	 * Allocate temporaly contiguous scatterlist entries if prot pages
	 * straddles multiple scatterlist tables.
	 */
	if (prot_table->page_end_offset < prot_page + prot_npages - 1) {
		int i;

		prot_sg = kcalloc(prot_npages, sizeof(*prot_sg), GFP_KERNEL);
		if (!prot_sg)
			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;

		need_to_release = true;
		sg_init_table(prot_sg, prot_npages);

		for (i = 0; i < prot_npages; i++) {
			if (prot_page + i > prot_table->page_end_offset) {
				prot_table = rd_get_prot_table(dev,
								prot_page + i);
				if (!prot_table) {
					kfree(prot_sg);
					return rc;
				}
				sg_unmark_end(&prot_sg[i - 1]);
			}
			prot_sg[i] = prot_table->sg_table[prot_page + i -
						prot_table->page_start_offset];
		}
	}

#endif /* !CONFIG_ARCH_HAS_SG_CHAIN */

	rc = dif_verify(cmd, cmd->t_task_lba, sectors, 0, prot_sg, prot_offset);
	if (need_to_release)
		kfree(prot_sg);

	return rc;
}