Example #1
0
/* This is the main crypto function - zero-copy edition */
static int
__crypto_run_zc(struct csession *ses_ptr, struct kernel_crypt_op *kcop)
{
	struct scatterlist *src_sg, *dst_sg;
	struct crypt_op *cop = &kcop->cop;
	int ret = 0;

	ret = get_userbuf(ses_ptr, cop->src, cop->len, cop->dst, cop->len,
	                  kcop->task, kcop->mm, &src_sg, &dst_sg);
	if (unlikely(ret)) {
		derr(1, "Error getting user pages. Falling back to non zero copy.");
		return __crypto_run_std(ses_ptr, cop);
	}

	ret = hash_n_crypt(ses_ptr, cop, src_sg, dst_sg, cop->len);

	release_user_pages(ses_ptr);
	return ret;
}
Example #2
0
static int transfer_max_buffers(struct goldfish_pipe *pipe,
				unsigned long address,
				unsigned long address_end,
				int is_write,
				unsigned long last_page,
				unsigned int last_page_size,
				s32 *consumed_size,
				int *status)
{
	unsigned long first_page = address & PAGE_MASK;
	unsigned int iter_last_page_size;
	int pages_count;

	/* Serialize access to the pipe command buffers */
	if (mutex_lock_interruptible(&pipe->lock))
		return -ERESTARTSYS;

	pages_count = pin_user_pages(first_page, last_page,
				     last_page_size, is_write,
				     pipe->pages, &iter_last_page_size);
	if (pages_count < 0) {
		mutex_unlock(&pipe->lock);
		return pages_count;
	}

	populate_rw_params(pipe->pages, pages_count, address, address_end,
			   first_page, last_page, iter_last_page_size, is_write,
			   pipe->command_buffer);

	/* Transfer the data */
	*status = goldfish_pipe_cmd_locked(pipe,
				is_write ? PIPE_CMD_WRITE : PIPE_CMD_READ);

	*consumed_size = pipe->command_buffer->rw_params.consumed_size;

	release_user_pages(pipe->pages, pages_count, is_write, *consumed_size);

	mutex_unlock(&pipe->lock);
	return 0;
}
Example #3
0
/* This is the main crypto function - zero-copy edition */
static int
__crypto_auth_run_zc(struct csession *ses_ptr, struct kernel_crypt_auth_op *kcaop)
{
	struct scatterlist *dst_sg, *auth_sg, *src_sg;
	struct crypt_auth_op *caop = &kcaop->caop;
	int ret = 0;

	if (caop->flags & COP_FLAG_AEAD_SRTP_TYPE) {
		if (unlikely(ses_ptr->cdata.init != 0 &&
			(ses_ptr->cdata.stream == 0 || ses_ptr->cdata.aead != 0)))
		{
			dprintk(0, KERN_ERR, "Only stream modes are allowed in SRTP mode (but not AEAD)\n");
			return -EINVAL;
		}

		ret = get_userbuf_srtp(ses_ptr, kcaop, &auth_sg, &dst_sg);
		if (unlikely(ret)) {
			dprintk(1, KERN_ERR, "get_userbuf_srtp(): Error getting user pages.\n");
			return ret;
		}

		ret = srtp_auth_n_crypt(ses_ptr, kcaop, auth_sg, caop->auth_len,
			   dst_sg, caop->len);

		release_user_pages(ses_ptr);
	} else { /* TLS and normal cases. Here auth data are usually small
	          * so we just copy them to a free page, instead of trying
	          * to map them.
	          */
		unsigned char* auth_buf = NULL;
		struct scatterlist tmp;

		if (unlikely(caop->auth_len > PAGE_SIZE)) {
			dprintk(1, KERN_ERR, "auth data len is excessive.\n");
			return -EINVAL;
		}

		auth_buf = (char *)__get_free_page(GFP_KERNEL);
		if (unlikely(!auth_buf)) {
			dprintk(1, KERN_ERR, "unable to get a free page.\n");
			return -ENOMEM;
		}

		if (caop->auth_src && caop->auth_len > 0) {
			if (unlikely(copy_from_user(auth_buf, caop->auth_src, caop->auth_len))) {
				dprintk(1, KERN_ERR, "unable to copy auth data from userspace.\n");
				ret = -EFAULT;
				goto free_auth_buf;
			}

			sg_init_one(&tmp, auth_buf, caop->auth_len);
			auth_sg = &tmp;
		} else {
			auth_sg = NULL;
		}

		if (caop->flags & COP_FLAG_AEAD_TLS_TYPE && ses_ptr->cdata.aead == 0) {
			ret = get_userbuf_tls(ses_ptr, kcaop, &dst_sg);
			if (unlikely(ret)) {
				dprintk(1, KERN_ERR, "get_userbuf_tls(): Error getting user pages.\n");
				goto free_auth_buf;
			}

			ret = tls_auth_n_crypt(ses_ptr, kcaop, auth_sg, caop->auth_len,
				   dst_sg, caop->len);
		} else {
			int dst_len;

			if (unlikely(ses_ptr->cdata.init == 0 ||
					ses_ptr->cdata.stream == 0 ||
					ses_ptr->cdata.aead == 0))
			{
				dprintk(0, KERN_ERR, "Only stream and AEAD ciphers are allowed for authenc\n");
				ret = -EINVAL;
				goto free_auth_buf;
			}

			if (caop->op == COP_ENCRYPT) dst_len = caop->len + cryptodev_cipher_get_tag_size(&ses_ptr->cdata);
			else dst_len = caop->len;

			ret = get_userbuf(ses_ptr, caop->src, caop->len, caop->dst, dst_len,
					  kcaop->task, kcaop->mm, &src_sg, &dst_sg);
			if (unlikely(ret)) {
				dprintk(1, KERN_ERR, "get_userbuf(): Error getting user pages.\n");
				goto free_auth_buf;
			}

			ret = auth_n_crypt(ses_ptr, kcaop, auth_sg, caop->auth_len,
					   src_sg, dst_sg, caop->len);
		}

		release_user_pages(ses_ptr);

free_auth_buf:
		free_page((unsigned long)auth_buf);
	}

	return ret;
}
Example #4
0
/* Makes caop->auth_src available as scatterlist.
 * It also provides a pointer to caop->dst, which however,
 * is assumed to be within the caop->auth_src buffer. If not
 * (if their difference exceeds MAX_SRTP_AUTH_DATA_DIFF) it
 * returns error.
 */
static int get_userbuf_srtp(struct csession *ses, struct kernel_crypt_auth_op *kcaop,
			struct scatterlist **auth_sg, struct scatterlist **dst_sg)
{
	int pagecount, diff;
	int auth_pagecount = 0;
	struct crypt_auth_op *caop = &kcaop->caop;
	int rc;

	if (caop->dst == NULL && caop->auth_src == NULL) {
		dprintk(1, KERN_ERR, "dst and auth_src cannot be both null\n");
		return -EINVAL;
	}

	if (ses->alignmask) {
		if (!IS_ALIGNED((unsigned long)caop->dst, ses->alignmask))
			dprintk(2, KERN_WARNING, "careful - source address %lx is not %d byte aligned\n",
				(unsigned long)caop->dst, ses->alignmask + 1);
		if (!IS_ALIGNED((unsigned long)caop->auth_src, ses->alignmask))
			dprintk(2, KERN_WARNING, "careful - source address %lx is not %d byte aligned\n",
				(unsigned long)caop->auth_src, ses->alignmask + 1);
	}

	if (unlikely(kcaop->dst_len == 0 || caop->auth_len == 0)) {
		dprintk(1, KERN_WARNING, "Destination length cannot be zero\n");
		return -EINVAL;
	}

	/* Note that in SRTP auth data overlap with data to be encrypted (dst)
         */

	auth_pagecount = PAGECOUNT(caop->auth_src, caop->auth_len);
	diff = (int)(caop->src - caop->auth_src);
	if (diff > MAX_SRTP_AUTH_DATA_DIFF || diff < 0) {
		dprintk(1, KERN_WARNING, "auth_src must overlap with src (diff: %d).\n", diff);
		return -EINVAL;
	}

	pagecount = auth_pagecount;

	rc = adjust_sg_array(ses, pagecount*2); /* double pages to have pages for dst(=auth_src) */
	if (rc) {
		dprintk(1, KERN_ERR, "cannot adjust sg array\n");
		return rc;
	}

	rc = __get_userbuf(caop->auth_src, caop->auth_len, 1, auth_pagecount,
			   ses->pages, ses->sg, kcaop->task, kcaop->mm);
	if (unlikely(rc)) {
		dprintk(1, KERN_ERR,
			"failed to get user pages for data input\n");
		return -EINVAL;
	}

	ses->used_pages = pagecount;
	ses->readonly_pages = 0;

	(*auth_sg) = ses->sg;

	(*dst_sg) = ses->sg + auth_pagecount;
	sg_init_table(*dst_sg, auth_pagecount);
	sg_copy(ses->sg, (*dst_sg), caop->auth_len);
	(*dst_sg) = sg_advance(*dst_sg, diff);
	if (*dst_sg == NULL) {
		release_user_pages(ses);
		dprintk(1, KERN_ERR,
			"failed to get enough pages for auth data\n");
		return -EINVAL;
	}

	return 0;
}