Ejemplo n.º 1
0
static int pkcs1pad_verify_complete(struct akcipher_request *req, int err)
{
	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
	struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
	struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
	const struct rsa_asn1_template *digest_info;
	unsigned int pos;

	if (err == -EOVERFLOW)
		/* Decrypted value had no leading 0 byte */
		err = -EINVAL;

	if (err)
		goto done;

	if (req_ctx->child_req.dst_len != ctx->key_size - 1) {
		err = -EINVAL;
		goto done;
	}

	err = -EBADMSG;
	if (req_ctx->out_buf[0] != 0x01)
		goto done;

	for (pos = 1; pos < req_ctx->child_req.dst_len; pos++)
		if (req_ctx->out_buf[pos] != 0xff)
			break;

	if (pos < 9 || pos == req_ctx->child_req.dst_len ||
	    req_ctx->out_buf[pos] != 0x00)
		goto done;
	pos++;

	if (ctx->hash_name) {
		digest_info = rsa_lookup_asn1(ctx->hash_name);
		if (!digest_info)
			goto done;

		if (memcmp(req_ctx->out_buf + pos, digest_info->data,
			   digest_info->size))
			goto done;

		pos += digest_info->size;
	}

	err = 0;

	if (req->dst_len < req_ctx->child_req.dst_len - pos)
		err = -EOVERFLOW;
	req->dst_len = req_ctx->child_req.dst_len - pos;

	if (!err)
		sg_copy_from_buffer(req->dst,
				sg_nents_for_len(req->dst, req->dst_len),
				req_ctx->out_buf + pos, req->dst_len);
done:
	kzfree(req_ctx->out_buf);

	return err;
}
Ejemplo n.º 2
0
static inline void wbsd_dma_to_sg(struct wbsd_host *host, struct mmc_data *data)
{
	size_t len = 0;
	int i;

	for (i = 0; i < data->sg_len; i++)
		len += data->sg[i].length;
	sg_copy_from_buffer(data->sg, data->sg_len, host->dma_buffer, len);
}
Ejemplo n.º 3
0
static int
qla4xxx_get_acb_state(struct bsg_job *bsg_job)
{
	struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
	struct scsi_qla_host *ha = to_qla_host(host);
	struct iscsi_bsg_request *bsg_req = bsg_job->request;
	struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
	uint32_t status[MBOX_REG_COUNT];
	uint32_t acb_idx;
	uint32_t ip_idx;
	int rval = -EINVAL;

	bsg_reply->reply_payload_rcv_len = 0;

	if (unlikely(pci_channel_offline(ha->pdev)))
		goto leave;

	/* Only 4022 and above adapters are supported */
	if (is_qla4010(ha))
		goto leave;

	if (ql4xxx_reset_active(ha)) {
		ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
		rval = -EBUSY;
		goto leave;
	}

	if (bsg_job->reply_payload.payload_len < sizeof(status)) {
		ql4_printk(KERN_ERR, ha, "%s: invalid payload len %d\n",
			   __func__, bsg_job->reply_payload.payload_len);
		rval = -EINVAL;
		goto leave;
	}

	acb_idx = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
	ip_idx = bsg_req->rqst_data.h_vendor.vendor_cmd[2];

	rval = qla4xxx_get_ip_state(ha, acb_idx, ip_idx, status);
	if (rval) {
		ql4_printk(KERN_ERR, ha, "%s: get ip state failed\n",
			   __func__);
		bsg_reply->result = DID_ERROR << 16;
		rval = -EIO;
	} else {
		bsg_reply->reply_payload_rcv_len =
			sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
					    bsg_job->reply_payload.sg_cnt,
					    status, sizeof(status));
		bsg_reply->result = DID_OK << 16;
	}

	bsg_job_done(bsg_job, bsg_reply->result,
		     bsg_reply->reply_payload_rcv_len);
leave:
	return rval;
}
/*
 * If reading, bounce the data from the buffer after the request
 * has been handled by the host driver
 */
void mmc_queue_bounce_post(struct mmc_queue_req *mqrq)
{
	if (!mqrq->bounce_buf)
		return;

	if (rq_data_dir(mqrq->req) != READ)
		return;

	sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
		mqrq->bounce_buf, mqrq->sg[0].length);
}
Ejemplo n.º 5
0
static int pkcs1pad_decrypt_complete(struct akcipher_request *req, int err)
{
	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
	struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
	struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
	unsigned int dst_len;
	unsigned int pos;
	u8 *out_buf;

	if (err)
		goto done;

	err = -EINVAL;
	dst_len = req_ctx->child_req.dst_len;
	if (dst_len < ctx->key_size - 1)
		goto done;

	out_buf = req_ctx->out_buf;
	if (dst_len == ctx->key_size) {
		if (out_buf[0] != 0x00)
			/* Decrypted value had no leading 0 byte */
			goto done;

		dst_len--;
		out_buf++;
	}

	if (out_buf[0] != 0x02)
		goto done;

	for (pos = 1; pos < dst_len; pos++)
		if (out_buf[pos] == 0x00)
			break;
	if (pos < 9 || pos == dst_len)
		goto done;
	pos++;

	err = 0;

	if (req->dst_len < dst_len - pos)
		err = -EOVERFLOW;
	req->dst_len = dst_len - pos;

	if (!err)
		sg_copy_from_buffer(req->dst,
				sg_nents_for_len(req->dst, req->dst_len),
				out_buf + pos, req->dst_len);

done:
	kzfree(req_ctx->out_buf);

	return err;
}
/* Copy sg data, from to_skip to end, to dest and vice versa*/
void dx_sg_copy_part(u8 *dest, struct scatterlist *sg,
	      int to_skip, unsigned int end, enum dx_sg_cpy_direct direct)
{
	struct scatterlist t_sg;
	struct scatterlist *current_sg = sg;
	int sg_index, cpy_index;
	int nents;
	int lbytes;

	nents = sg_count_ents(sg, end, &lbytes);
	sg_index = current_sg->length;
	while (sg_index <= to_skip) {
		current_sg = scatterwalk_sg_next(current_sg);
		sg_index += current_sg->length;
		nents--;
	}
	cpy_index = sg_index - to_skip;
	/* copy current sg to temporary */
	t_sg = *current_sg;
	/*update the offset in the sg entry*/
	t_sg.offset += current_sg->length - cpy_index;
	/*copy the data*/
	if (direct == DX_SG_TO_BUF) {
		sg_copy_to_buffer(&t_sg, 1, dest, cpy_index);
	} else {
		sg_copy_from_buffer(&t_sg, 1, dest, cpy_index);
	}
	current_sg = scatterwalk_sg_next(current_sg);
	nents--;

	if (end > sg_index) {
		if (direct == DX_SG_TO_BUF) {
			sg_copy_to_buffer(current_sg, nents,
					  &dest[cpy_index], end - sg_index);
		} else {
			sg_copy_from_buffer(current_sg, nents,
					    &dest[cpy_index], end - sg_index);
		}
	}
}
Ejemplo n.º 7
0
/*
 * If reading, bounce the data from the buffer after the request
 * has been handled by the host driver
 */
void mmc_queue_bounce_post(struct mmc_queue *mq)
{
	unsigned long flags;

	if (!mq->bounce_buf)
		return;

	if (rq_data_dir(mq->req) != READ)
		return;

	local_irq_save(flags);
	sg_copy_from_buffer(mq->bounce_sg, mq->bounce_sg_len,
		mq->bounce_buf, mq->sg[0].length);
	local_irq_restore(flags);
}
Ejemplo n.º 8
0
/*
 * If reading, bounce the data from the buffer after the request
 * has been handled by the host driver
 */
static void card_queue_bounce_post(struct card_queue *cq)
{
	unsigned long flags;

	if (!cq->bounce_buf)
		return;

	if (rq_data_dir(cq->req) != READ)
		return;
	
	local_irq_save(flags);
	
	sg_copy_from_buffer(cq->bounce_sg, cq->bounce_sg_len,
		cq->bounce_buf, cq->sg[0].length);

	local_irq_restore(flags);
	
	bio_flush_dcache_pages(cq->req->bio);
}
Ejemplo n.º 9
0
/* Returns 0 if ok else (DID_ERROR << 16). Sets scp->resid . */
static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
				int arr_len)
{
	int act_len;
	struct scsi_data_buffer *sdb = scsi_in(scp);

	if (!sdb->length)
		return 0;
	if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
		return DID_ERROR << 16;

	act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
					arr, arr_len);
	if (sdb->resid)
		sdb->resid -= act_len;
	else
		sdb->resid = scsi_bufflen(scp) - act_len;

	return 0;
}
Ejemplo n.º 10
0
static int pkcs1pad_encrypt_sign_complete(struct akcipher_request *req, int err)
{
	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
	struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
	struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
	unsigned int pad_len;
	unsigned int len;
	u8 *out_buf;

	if (err)
		goto out;

	len = req_ctx->child_req.dst_len;
	pad_len = ctx->key_size - len;

	/* Four billion to one */
	if (likely(!pad_len))
		goto out;

	out_buf = kzalloc(ctx->key_size, GFP_KERNEL);
	err = -ENOMEM;
	if (!out_buf)
		goto out;

	sg_copy_to_buffer(req->dst, sg_nents_for_len(req->dst, len),
			  out_buf + pad_len, len);
	sg_copy_from_buffer(req->dst,
			    sg_nents_for_len(req->dst, ctx->key_size),
			    out_buf, ctx->key_size);
	kzfree(out_buf);

out:
	req->dst_len = ctx->key_size;

	kfree(req_ctx->in_buf);

	return err;
}
Ejemplo n.º 11
0
static void sd_normal_rw(struct realtek_pci_sdmmc *host,
		struct mmc_request *mrq)
{
	struct mmc_command *cmd = mrq->cmd;
	struct mmc_data *data = mrq->data;
	u8 _cmd[5], *buf;

	_cmd[0] = 0x40 | (u8)cmd->opcode;
	put_unaligned_be32(cmd->arg, (u32 *)(&_cmd[1]));

	buf = kzalloc(data->blksz, GFP_NOIO);
	if (!buf) {
		cmd->error = -ENOMEM;
		return;
	}

	if (data->flags & MMC_DATA_READ) {
		if (host->initial_mode)
			sd_disable_initial_mode(host);

		cmd->error = sd_read_data(host, _cmd, (u16)data->blksz, buf,
				data->blksz, 200);

		if (host->initial_mode)
			sd_enable_initial_mode(host);

		sg_copy_from_buffer(data->sg, data->sg_len, buf, data->blksz);
	} else {
		sg_copy_to_buffer(data->sg, data->sg_len, buf, data->blksz);

		cmd->error = sd_write_data(host, _cmd, (u16)data->blksz, buf,
				data->blksz, 200);
	}

	kfree(buf);
}
Ejemplo n.º 12
0
static int mmc_rpmb_send_command(struct mmc_card *card, u8 *buf, __u16 blks,
		__u16 type, u8 req_type)
{
	struct mmc_request mrq = {NULL};
	struct mmc_command cmd = {0};
	struct mmc_command sbc = {0};
	struct mmc_data data = {0};
	struct scatterlist sg;
	u8 *transfer_buf = NULL;

	mrq.sbc = &sbc;
	mrq.cmd = &cmd;
	mrq.data = &data;
	mrq.stop = NULL;
	transfer_buf = kzalloc(512 * blks, GFP_KERNEL);
	if (!transfer_buf)
		return -ENOMEM;

	/*
	 * set CMD23
	 */
	sbc.opcode = MMC_SET_BLOCK_COUNT;
	sbc.arg = blks;
	if ((req_type == RPMB_REQ) && type == RPMB_WRITE_DATA)
		sbc.arg |= 1 << 31;
	sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;

	/*
	 * set CMD25/18
	 */
	sg_init_one(&sg, transfer_buf, 512 * blks);
	if (req_type == RPMB_REQ) {
		cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK;
		sg_copy_from_buffer(&sg, 1, buf, 512 * blks);
		data.flags |= MMC_DATA_WRITE;
	} else {
		cmd.opcode = MMC_READ_MULTIPLE_BLOCK;
		data.flags |= MMC_DATA_READ;
	}
	cmd.arg = 0;
	cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
	data.blksz = 512;
	data.blocks = blks;
	data.sg = &sg;
	data.sg_len = 1;

	mmc_set_data_timeout(&data, card);

	mmc_wait_for_req(card->host, &mrq);

	if (req_type != RPMB_REQ)
		sg_copy_to_buffer(&sg, 1, buf, 512 * blks);

	kfree(transfer_buf);

	if (cmd.error)
		return cmd.error;
	if (data.error)
		return data.error;
	return 0;
}
Ejemplo n.º 13
0
static int pkcs1pad_verify_complete(struct akcipher_request *req, int err)
{
	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
	struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
	struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
	struct akcipher_instance *inst = akcipher_alg_instance(tfm);
	struct pkcs1pad_inst_ctx *ictx = akcipher_instance_ctx(inst);
	const struct rsa_asn1_template *digest_info = ictx->digest_info;
	unsigned int dst_len;
	unsigned int pos;
	u8 *out_buf;

	if (err)
		goto done;

	err = -EINVAL;
	dst_len = req_ctx->child_req.dst_len;
	if (dst_len < ctx->key_size - 1)
		goto done;

	out_buf = req_ctx->out_buf;
	if (dst_len == ctx->key_size) {
		if (out_buf[0] != 0x00)
			/* Decrypted value had no leading 0 byte */
			goto done;

		dst_len--;
		out_buf++;
	}

	err = -EBADMSG;
	if (out_buf[0] != 0x01)
		goto done;

	for (pos = 1; pos < dst_len; pos++)
		if (out_buf[pos] != 0xff)
			break;

	if (pos < 9 || pos == dst_len || out_buf[pos] != 0x00)
		goto done;
	pos++;

	if (digest_info) {
		if (crypto_memneq(out_buf + pos, digest_info->data,
				  digest_info->size))
			goto done;

		pos += digest_info->size;
	}

	err = 0;

	if (req->dst_len < dst_len - pos)
		err = -EOVERFLOW;
	req->dst_len = dst_len - pos;

	if (!err)
		sg_copy_from_buffer(req->dst,
				sg_nents_for_len(req->dst, req->dst_len),
				out_buf + pos, req->dst_len);
done:
	kzfree(req_ctx->out_buf);

	return err;
}
Ejemplo n.º 14
0
static int
qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
{
	struct Scsi_Host *host = bsg_job->shost;
	scsi_qla_host_t *vha = shost_priv(host);
	struct qla_hw_data *ha = vha->hw;
	int ret = 0;
	uint32_t len;
	uint32_t oper;

	bsg_job->reply->reply_payload_rcv_len = 0;

	if (!IS_QLA24XX_TYPE(ha) || !IS_QLA25XX(ha)) {
		ret = -EINVAL;
		goto exit_fcp_prio_cfg;
	}

	if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
		test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
		test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
		ret = -EBUSY;
		goto exit_fcp_prio_cfg;
	}

	/* Get the sub command */
	oper = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];

	/* Only set config is allowed if config memory is not allocated */
	if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) {
		ret = -EINVAL;
		goto exit_fcp_prio_cfg;
	}
	switch (oper) {
	case QLFC_FCP_PRIO_DISABLE:
		if (ha->flags.fcp_prio_enabled) {
			ha->flags.fcp_prio_enabled = 0;
			ha->fcp_prio_cfg->attributes &=
				~FCP_PRIO_ATTR_ENABLE;
			qla24xx_update_all_fcp_prio(vha);
			bsg_job->reply->result = DID_OK;
		} else {
			ret = -EINVAL;
			bsg_job->reply->result = (DID_ERROR << 16);
			goto exit_fcp_prio_cfg;
		}
		break;

	case QLFC_FCP_PRIO_ENABLE:
		if (!ha->flags.fcp_prio_enabled) {
			if (ha->fcp_prio_cfg) {
				ha->flags.fcp_prio_enabled = 1;
				ha->fcp_prio_cfg->attributes |=
				    FCP_PRIO_ATTR_ENABLE;
				qla24xx_update_all_fcp_prio(vha);
				bsg_job->reply->result = DID_OK;
			} else {
				ret = -EINVAL;
				bsg_job->reply->result = (DID_ERROR << 16);
				goto exit_fcp_prio_cfg;
			}
		}
		break;

	case QLFC_FCP_PRIO_GET_CONFIG:
		len = bsg_job->reply_payload.payload_len;
		if (!len || len > FCP_PRIO_CFG_SIZE) {
			ret = -EINVAL;
			bsg_job->reply->result = (DID_ERROR << 16);
			goto exit_fcp_prio_cfg;
		}

		bsg_job->reply->result = DID_OK;
		bsg_job->reply->reply_payload_rcv_len =
			sg_copy_from_buffer(
			bsg_job->reply_payload.sg_list,
			bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg,
			len);

		break;

	case QLFC_FCP_PRIO_SET_CONFIG:
		len = bsg_job->request_payload.payload_len;
		if (!len || len > FCP_PRIO_CFG_SIZE) {
			bsg_job->reply->result = (DID_ERROR << 16);
			ret = -EINVAL;
			goto exit_fcp_prio_cfg;
		}

		if (!ha->fcp_prio_cfg) {
			ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
			if (!ha->fcp_prio_cfg) {
				qla_printk(KERN_WARNING, ha,
					"Unable to allocate memory "
					"for fcp prio config data (%x).\n",
					FCP_PRIO_CFG_SIZE);
				bsg_job->reply->result = (DID_ERROR << 16);
				ret = -ENOMEM;
				goto exit_fcp_prio_cfg;
			}
		}

		memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE);
		sg_copy_to_buffer(bsg_job->request_payload.sg_list,
		bsg_job->request_payload.sg_cnt, ha->fcp_prio_cfg,
			FCP_PRIO_CFG_SIZE);

		/* validate fcp priority data */
		if (!qla24xx_fcp_prio_cfg_valid(
			(struct qla_fcp_prio_cfg *)
			ha->fcp_prio_cfg, 1)) {
			bsg_job->reply->result = (DID_ERROR << 16);
			ret = -EINVAL;
			/* If buffer was invalidatic int
			 * fcp_prio_cfg is of no use
			 */
			vfree(ha->fcp_prio_cfg);
			ha->fcp_prio_cfg = NULL;
			goto exit_fcp_prio_cfg;
		}

		ha->flags.fcp_prio_enabled = 0;
		if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE)
			ha->flags.fcp_prio_enabled = 1;
		qla24xx_update_all_fcp_prio(vha);
		bsg_job->reply->result = DID_OK;
		break;
	default:
		ret = -EINVAL;
		break;
	}
exit_fcp_prio_cfg:
	bsg_job->job_done(bsg_job);
	return ret;
}
Ejemplo n.º 15
0
/*
 * @part: GPP partition part number
 * @addr: GPP write group
 */
int mmc_wp_status(struct mmc_card *card, unsigned int part,
		unsigned int addr, u8 *wp_status)
{
	struct mmc_command cmd = {0};
	struct mmc_data data = {0};
	struct mmc_request mrq = {0};
	struct scatterlist sg;
	u32 status = 0;
	int err = 0;
	u8 *rbuf = NULL;

	if (!card)
		return -ENODEV;

	if (!card->ext_csd.gpp_sz[part - EXT_CSD_PART_CONFIG_ACC_GP0]) {
		pr_err("%s: doesn't have GPP%d\n", __func__,
				part - 3);
		return -ENODEV;
	}

	rbuf = kzalloc(8, GFP_KERNEL);
	if (rbuf == NULL) {
		pr_err("%s: no memory\n", __func__);
		return -ENOMEM;
	}

	cmd.opcode = MMC_SEND_WRITE_PROT_TYPE;
	cmd.arg = addr * card->ext_csd.wpg_sz;
	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;

	data.sg = &sg;
	data.sg_len = 1;
	data.blksz = 8;
	data.blocks = 1;
	data.flags = MMC_DATA_READ;
	sg_init_one(data.sg, rbuf, 8);
	mrq.data = &data;
	mrq.cmd = &cmd;

	mmc_claim_host(card->host);

	mmc_set_data_timeout(&data, card);

	err = mmc_switch_part(card, part);
	if (err) {
		mmc_release_host(card->host);
		dev_err(mmc_dev(card->host), "%s: swith error %d\n",
				__func__, err);
		goto out;
	}

	mmc_wait_for_req(card->host, &mrq);
	if (cmd.error) {
		dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
				__func__, cmd.error);
	}
	if (data.error) {
		dev_err(mmc_dev(card->host), "%s: data error %d\n",
				__func__, data.error);
	}

	/* Must check status to be sure of no errors */
	do {
		err = mmc_send_status(card, &status);
		if (err) {
			pr_err("%s: get card status err %d, status 0x%x\n",
					__func__, err, status);
			goto out;
		}
		if (card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)
			break;
		if (mmc_host_is_spi(card->host))
			break;
	} while (R1_CURRENT_STATE(status) == R1_STATE_PRG);

	if (mmc_host_is_spi(card->host)) {
		if (status & R1_SPI_ILLEGAL_COMMAND) {
			pr_err("%s: error card status 0x%x\n",
					__func__, status);
			goto out;
		}
	} else {
		if (status & 0xFDFFA000)
			pr_warn("%s: unexpected status %#x after switch",
					__func__, status);
		if (status & R1_SWITCH_ERROR) {
			pr_err("%s: card switch error, status 0x%x\n",
					__func__, status);
		}
		if (status & R1_OUT_OF_RANGE) {
			pr_err("%s: addr out of range, status 0x%x\n",
					__func__, status);
			goto out;
		}
	}

	mmc_switch_part(card, EXT_CSD_PART_CONFIG_ACC_USER);

	mmc_release_host(card->host);

	sg_copy_from_buffer(data.sg, 1, rbuf, 8);

	/*
	 * the first write protect group type is in the last two
	 * bits in the last byte read from the device.
	 */
	*wp_status = rbuf[7] & 0x3;

	kfree(rbuf);

	return 0;
out:
	kfree(rbuf);

	return -EPERM;
}
Ejemplo n.º 16
0
static int
qla4xxx_read_flash(struct bsg_job *bsg_job)
{
	struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
	struct scsi_qla_host *ha = to_qla_host(host);
	struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
	struct iscsi_bsg_request *bsg_req = bsg_job->request;
	uint32_t offset = 0;
	uint32_t length = 0;
	dma_addr_t flash_dma;
	uint8_t *flash = NULL;
	int rval = -EINVAL;

	bsg_reply->reply_payload_rcv_len = 0;

	if (unlikely(pci_channel_offline(ha->pdev)))
		goto leave;

	if (ql4xxx_reset_active(ha)) {
		ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
		rval = -EBUSY;
		goto leave;
	}

	if (ha->flash_state != QLFLASH_WAITING) {
		ql4_printk(KERN_ERR, ha, "%s: another flash operation "
			   "active\n", __func__);
		rval = -EBUSY;
		goto leave;
	}

	ha->flash_state = QLFLASH_READING;
	offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
	length = bsg_job->reply_payload.payload_len;

	flash = dma_alloc_coherent(&ha->pdev->dev, length, &flash_dma,
				   GFP_KERNEL);
	if (!flash) {
		ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for flash "
			   "data\n", __func__);
		rval = -ENOMEM;
		goto leave;
	}

	rval = qla4xxx_get_flash(ha, flash_dma, offset, length);
	if (rval) {
		ql4_printk(KERN_ERR, ha, "%s: get flash failed\n", __func__);
		bsg_reply->result = DID_ERROR << 16;
		rval = -EIO;
	} else {
		bsg_reply->reply_payload_rcv_len =
			sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
					    bsg_job->reply_payload.sg_cnt,
					    flash, length);
		bsg_reply->result = DID_OK << 16;
	}

	bsg_job_done(bsg_job, bsg_reply->result,
		     bsg_reply->reply_payload_rcv_len);
	dma_free_coherent(&ha->pdev->dev, length, flash, flash_dma);
leave:
	ha->flash_state = QLFLASH_WAITING;
	return rval;
}
Ejemplo n.º 17
0
static int ufs_bsg_request(struct bsg_job *job)
{
	struct ufs_bsg_request *bsg_request = job->request;
	struct ufs_bsg_reply *bsg_reply = job->reply;
	struct ufs_hba *hba = shost_priv(dev_to_shost(job->dev->parent));
	unsigned int req_len = job->request_len;
	unsigned int reply_len = job->reply_len;
	struct uic_command uc = {};
	int msgcode;
	uint8_t *desc_buff = NULL;
	int desc_len = 0;
	enum query_opcode desc_op = UPIU_QUERY_OPCODE_NOP;
	int ret;

	ret = ufs_bsg_verify_query_size(hba, req_len, reply_len);
	if (ret)
		goto out;

	bsg_reply->reply_payload_rcv_len = 0;

	msgcode = bsg_request->msgcode;
	switch (msgcode) {
	case UPIU_TRANSACTION_QUERY_REQ:
		desc_op = bsg_request->upiu_req.qr.opcode;
		ret = ufs_bsg_alloc_desc_buffer(hba, job, &desc_buff,
						&desc_len, desc_op);
		if (ret)
			goto out;

		/* fall through */
	case UPIU_TRANSACTION_NOP_OUT:
	case UPIU_TRANSACTION_TASK_REQ:
		ret = ufshcd_exec_raw_upiu_cmd(hba, &bsg_request->upiu_req,
					       &bsg_reply->upiu_rsp, msgcode,
					       desc_buff, &desc_len, desc_op);
		if (ret)
			dev_err(hba->dev,
				"exe raw upiu: error code %d\n", ret);

		break;
	case UPIU_TRANSACTION_UIC_CMD:
		memcpy(&uc, &bsg_request->upiu_req.uc, UIC_CMD_SIZE);
		ret = ufshcd_send_uic_cmd(hba, &uc);
		if (ret)
			dev_dbg(hba->dev,
				"send uic cmd: error code %d\n", ret);

		memcpy(&bsg_reply->upiu_rsp.uc, &uc, UIC_CMD_SIZE);

		break;
	default:
		ret = -ENOTSUPP;
		dev_err(hba->dev, "unsupported msgcode 0x%x\n", msgcode);

		break;
	}

	if (!desc_buff)
		goto out;

	if (desc_op == UPIU_QUERY_OPCODE_READ_DESC && desc_len)
		bsg_reply->reply_payload_rcv_len =
			sg_copy_from_buffer(job->request_payload.sg_list,
					    job->request_payload.sg_cnt,
					    desc_buff, desc_len);

	kfree(desc_buff);

out:
	bsg_reply->result = ret;
	job->reply_len = sizeof(struct ufs_bsg_reply);
	bsg_job_done(job, ret, bsg_reply->reply_payload_rcv_len);

	return ret;
}
Ejemplo n.º 18
0
static int mmc_test_transfer(struct mmc_test_card *test,
	struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
	unsigned blocks, unsigned blksz, int write)
{
	int ret, i;
	unsigned long flags;

	if (write) {
		for (i = 0;i < blocks * blksz;i++)
			test->scratch[i] = i;
	} else {
		memset(test->scratch, 0, BUFFER_SIZE);
	}
	local_irq_save(flags);
	sg_copy_from_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
	local_irq_restore(flags);

	ret = mmc_test_set_blksize(test, blksz);
	if (ret)
		return ret;

	ret = mmc_test_simple_transfer(test, sg, sg_len, dev_addr,
		blocks, blksz, write);
	if (ret)
		return ret;

	if (write) {
		int sectors;

		ret = mmc_test_set_blksize(test, 512);
		if (ret)
			return ret;

		sectors = (blocks * blksz + 511) / 512;
		if ((sectors * 512) == (blocks * blksz))
			sectors++;

		if ((sectors * 512) > BUFFER_SIZE)
			return -EINVAL;

		memset(test->buffer, 0, sectors * 512);

		for (i = 0;i < sectors;i++) {
			ret = mmc_test_buffer_transfer(test,
				test->buffer + i * 512,
				dev_addr + i, 512, 0);
			if (ret)
				return ret;
		}

		for (i = 0;i < blocks * blksz;i++) {
			if (test->buffer[i] != (u8)i)
				return RESULT_FAIL;
		}

		for (;i < sectors * 512;i++) {
			if (test->buffer[i] != 0xDF)
				return RESULT_FAIL;
		}
	} else {
		local_irq_save(flags);
		sg_copy_to_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
		local_irq_restore(flags);
		for (i = 0;i < blocks * blksz;i++) {
			if (test->scratch[i] != (u8)i)
				return RESULT_FAIL;
		}
	}

	return 0;
}
static int CPRM_CMD_SecureMultiRW(struct mmc_card *card,
	unsigned int command,
	unsigned int dir,
	unsigned long arg,
	unsigned char *buff,
	unsigned int length) {

	int err;

	struct mmc_request mrq;
	struct mmc_command cmd;
	struct mmc_command stop;
	struct mmc_data data;
	unsigned long flags;

	struct scatterlist sg;

	memset(&cmd, 0, sizeof(struct mmc_command));
	memset(&stop, 0, sizeof(struct mmc_command));

	cmd.opcode = MMC_APP_CMD;
	cmd.arg = card->rca << 16;
	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;

	err = mmc_wait_for_cmd(card->host, &cmd, 0);
	if (err)
		return (u32)-1;

	if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
		return (u32)-1;

	printk(KERN_DEBUG "CPRM_CMD_SecureRW: 1\n");

	memset(&cmd, 0, sizeof(struct mmc_command));

	cmd.opcode = command;

	if (command == SD_ACMD43_GET_MKB)
		cmd.arg = arg;
	else
		cmd.arg = 0;

	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;

	memset(&data, 0, sizeof(struct mmc_data));

		data.timeout_ns = 100000000;
		data.timeout_clks = 0;

#if defined(CONFIG_TARGET_LOCALE_NTT)
	data.timeout_ns = 100000000;
	data.timeout_clks = 0;
#endif

	data.blksz = 512;
	data.blocks = (length + 511) / 512;

	data.flags = dir;
	data.sg = &sg;
	data.sg_len = 1;

	stop.opcode = MMC_STOP_TRANSMISSION;
	stop.arg = 0;
	stop.flags = MMC_RSP_R1B | MMC_CMD_AC;

	memset(&mrq, 0, sizeof(struct mmc_request));

	mrq.cmd = &cmd;
	mrq.data = &data;
	mrq.stop = &stop;


	printk(KERN_DEBUG "CPRM_CMD_SecureRW: 2\n");

	sg_init_one(&sg, buff, length);

	if (dir == MMC_DATA_WRITE) {
		local_irq_save(flags);
		sg_copy_from_buffer(&sg, data.sg_len, buff, length);
		local_irq_restore(flags);
	}
	printk(KERN_DEBUG "CPRM_CMD_SecureRW: 3\n");

	mmc_wait_for_req(card->host, &mrq);

	printk(KERN_DEBUG "CPRM_CMD_SecureRW: 4\n");

	if (cmd.error) {
		printk(KERN_DEBUG "%s]cmd.error=%d\n", __func__, cmd.error);
		return cmd.error;
	}

	if (data.error) {
		printk(KERN_DEBUG "%s]data.error=%d\n", __func__, data.error);
		return data.error;
	}

	err = mmc_wait_busy(card);
	printk(KERN_DEBUG "CPRM_CMD_SecureRW: 5\n");

	if (dir == MMC_DATA_READ) {
		local_irq_save(flags);
		sg_copy_to_buffer(&sg, data.sg_len, buff, length);
		local_irq_restore(flags);
	}

	if (err)
		return err;

	return 0;
}
Ejemplo n.º 20
0
static int
qla4xxx_read_nvram(struct bsg_job *bsg_job)
{
	struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
	struct scsi_qla_host *ha = to_qla_host(host);
	struct iscsi_bsg_request *bsg_req = bsg_job->request;
	struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
	uint32_t offset = 0;
	uint32_t len = 0;
	uint32_t total_len = 0;
	dma_addr_t nvram_dma;
	uint8_t *nvram = NULL;
	int rval = -EINVAL;

	bsg_reply->reply_payload_rcv_len = 0;

	if (unlikely(pci_channel_offline(ha->pdev)))
		goto leave;

	/* Only 40xx adapters are supported */
	if (!(is_qla4010(ha) || is_qla4022(ha) || is_qla4032(ha)))
		goto leave;

	if (ql4xxx_reset_active(ha)) {
		ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
		rval = -EBUSY;
		goto leave;
	}

	offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
	len = bsg_job->reply_payload.payload_len;
	total_len = offset + len;

	/* total len should not be greater than max NVRAM size */
	if ((is_qla4010(ha) && total_len > QL4010_NVRAM_SIZE) ||
	    ((is_qla4022(ha) || is_qla4032(ha)) &&
	     total_len > QL40X2_NVRAM_SIZE)) {
		ql4_printk(KERN_ERR, ha, "%s: offset+len greater than max"
			   " nvram size, offset=%d len=%d\n",
			   __func__, offset, len);
		goto leave;
	}

	nvram = dma_alloc_coherent(&ha->pdev->dev, len, &nvram_dma,
				   GFP_KERNEL);
	if (!nvram) {
		ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for nvram "
			   "data\n", __func__);
		rval = -ENOMEM;
		goto leave;
	}

	rval = qla4xxx_get_nvram(ha, nvram_dma, offset, len);
	if (rval) {
		ql4_printk(KERN_ERR, ha, "%s: get nvram failed\n", __func__);
		bsg_reply->result = DID_ERROR << 16;
		rval = -EIO;
	} else {
		bsg_reply->reply_payload_rcv_len =
			sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
					    bsg_job->reply_payload.sg_cnt,
					    nvram, len);
		bsg_reply->result = DID_OK << 16;
	}

	bsg_job_done(bsg_job, bsg_reply->result,
		     bsg_reply->reply_payload_rcv_len);
	dma_free_coherent(&ha->pdev->dev, len, nvram, nvram_dma);
leave:
	return rval;
}
Ejemplo n.º 21
0
static int
qla4xxx_bsg_get_acb(struct bsg_job *bsg_job)
{
	struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
	struct scsi_qla_host *ha = to_qla_host(host);
	struct iscsi_bsg_request *bsg_req = bsg_job->request;
	struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
	uint32_t acb_type = 0;
	uint32_t len = 0;
	dma_addr_t acb_dma;
	uint8_t *acb = NULL;
	int rval = -EINVAL;

	bsg_reply->reply_payload_rcv_len = 0;

	if (unlikely(pci_channel_offline(ha->pdev)))
		goto leave;

	/* Only 4022 and above adapters are supported */
	if (is_qla4010(ha))
		goto leave;

	if (ql4xxx_reset_active(ha)) {
		ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
		rval = -EBUSY;
		goto leave;
	}

	acb_type = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
	len = bsg_job->reply_payload.payload_len;
	if (len < sizeof(struct addr_ctrl_blk)) {
		ql4_printk(KERN_ERR, ha, "%s: invalid acb len %d\n",
			   __func__, len);
		rval = -EINVAL;
		goto leave;
	}

	acb = dma_alloc_coherent(&ha->pdev->dev, len, &acb_dma, GFP_KERNEL);
	if (!acb) {
		ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for acb "
			   "data\n", __func__);
		rval = -ENOMEM;
		goto leave;
	}

	rval = qla4xxx_get_acb(ha, acb_dma, acb_type, len);
	if (rval) {
		ql4_printk(KERN_ERR, ha, "%s: get acb failed\n", __func__);
		bsg_reply->result = DID_ERROR << 16;
		rval = -EIO;
	} else {
		bsg_reply->reply_payload_rcv_len =
			sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
					    bsg_job->reply_payload.sg_cnt,
					    acb, len);
		bsg_reply->result = DID_OK << 16;
	}

	bsg_job_done(bsg_job, bsg_reply->result,
		     bsg_reply->reply_payload_rcv_len);
	dma_free_coherent(&ha->pdev->dev, len, acb, acb_dma);
leave:
	return rval;
}
Ejemplo n.º 22
0
static int ecdh_compute_value(struct kpp_request *req)
{
	struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
	struct ecdh_ctx *ctx = ecdh_get_ctx(tfm);
	u64 *public_key;
	u64 *shared_secret = NULL;
	void *buf;
	size_t copied, nbytes, public_key_sz;
	int ret = -ENOMEM;

	nbytes = ctx->ndigits << ECC_DIGITS_TO_BYTES_SHIFT;
	/* Public part is a point thus it has both coordinates */
	public_key_sz = 2 * nbytes;

	public_key = kmalloc(public_key_sz, GFP_KERNEL);
	if (!public_key)
		return -ENOMEM;

	if (req->src) {
		shared_secret = kmalloc(nbytes, GFP_KERNEL);
		if (!shared_secret)
			goto free_pubkey;

		/* from here on it's invalid parameters */
		ret = -EINVAL;

		/* must have exactly two points to be on the curve */
		if (public_key_sz != req->src_len)
			goto free_all;

		copied = sg_copy_to_buffer(req->src,
					   sg_nents_for_len(req->src,
							    public_key_sz),
					   public_key, public_key_sz);
		if (copied != public_key_sz)
			goto free_all;

		ret = crypto_ecdh_shared_secret(ctx->curve_id, ctx->ndigits,
						ctx->private_key, public_key,
						shared_secret);

		buf = shared_secret;
	} else {
		ret = ecc_make_pub_key(ctx->curve_id, ctx->ndigits,
				       ctx->private_key, public_key);
		buf = public_key;
		nbytes = public_key_sz;
	}

	if (ret < 0)
		goto free_all;

	/* might want less than we've got */
	nbytes = min_t(size_t, nbytes, req->dst_len);
	copied = sg_copy_from_buffer(req->dst, sg_nents_for_len(req->dst,
								nbytes),
				     buf, nbytes);
	if (copied != nbytes)
		ret = -EINVAL;

	/* fall through */
free_all:
	kzfree(shared_secret);
free_pubkey:
	kfree(public_key);
	return ret;
}