static inline void wbsd_sg_to_dma(struct wbsd_host *host, struct mmc_data *data) { size_t len = 0; int i; for (i = 0; i < data->sg_len; i++) len += data->sg[i].length; sg_copy_to_buffer(data->sg, data->sg_len, host->dma_buffer, len); }
/* * If writing, bounce the data to the buffer before the request * is sent to the host driver */ void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq) { if (!mqrq->bounce_buf) return; if (rq_data_dir(mqrq->req) != WRITE) return; sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len, mqrq->bounce_buf, mqrq->sg[0].length); }
/* Copy sg data, from to_skip to end, to dest and vice versa*/ void dx_sg_copy_part(u8 *dest, struct scatterlist *sg, int to_skip, unsigned int end, enum dx_sg_cpy_direct direct) { struct scatterlist t_sg; struct scatterlist *current_sg = sg; int sg_index, cpy_index; int nents; int lbytes; nents = sg_count_ents(sg, end, &lbytes); sg_index = current_sg->length; while (sg_index <= to_skip) { current_sg = scatterwalk_sg_next(current_sg); sg_index += current_sg->length; nents--; } cpy_index = sg_index - to_skip; /* copy current sg to temporary */ t_sg = *current_sg; /*update the offset in the sg entry*/ t_sg.offset += current_sg->length - cpy_index; /*copy the data*/ if (direct == DX_SG_TO_BUF) { sg_copy_to_buffer(&t_sg, 1, dest, cpy_index); } else { sg_copy_from_buffer(&t_sg, 1, dest, cpy_index); } current_sg = scatterwalk_sg_next(current_sg); nents--; if (end > sg_index) { if (direct == DX_SG_TO_BUF) { sg_copy_to_buffer(current_sg, nents, &dest[cpy_index], end - sg_index); } else { sg_copy_from_buffer(current_sg, nents, &dest[cpy_index], end - sg_index); } } }
/** * fc_lport_ct_request() - Send CT Passthrough request * @job: The BSG Passthrough job * @lport: The local port sending the request * @did: The destination FC-ID * @tov: The timeout period to wait for the response * * Locking Note: The lport lock is expected to be held before calling * this routine. */ static int fc_lport_ct_request(struct fc_bsg_job *job, struct fc_lport *lport, u32 did, u32 tov) { struct fc_bsg_info *info; struct fc_frame *fp; struct fc_frame_header *fh; struct fc_ct_req *ct; size_t len; fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) + job->request_payload.payload_len); if (!fp) return -ENOMEM; len = job->request_payload.payload_len; ct = fc_frame_payload_get(fp, len); sg_copy_to_buffer(job->request_payload.sg_list, job->request_payload.sg_cnt, ct, len); fh = fc_frame_header_get(fp); fh->fh_r_ctl = FC_RCTL_DD_UNSOL_CTL; hton24(fh->fh_d_id, did); hton24(fh->fh_s_id, lport->port_id); fh->fh_type = FC_TYPE_CT; hton24(fh->fh_f_ctl, FC_FCTL_REQ); fh->fh_cs_ctl = 0; fh->fh_df_ctl = 0; fh->fh_parm_offset = 0; info = kzalloc(sizeof(struct fc_bsg_info), GFP_KERNEL); if (!info) { fc_frame_free(fp); return -ENOMEM; } info->job = job; info->lport = lport; info->rsp_code = FC_FS_ACC; info->nents = job->reply_payload.sg_cnt; info->sg = job->reply_payload.sg_list; if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp, NULL, info, tov)) { kfree(info); return -ECOMM; } return 0; }
static int fc_lport_els_request(struct fc_bsg_job *job, struct fc_lport *lport, u32 did, u32 tov) { struct fc_bsg_info *info; struct fc_frame *fp; struct fc_frame_header *fh; char *pp; int len; fp = fc_frame_alloc(lport, job->request_payload.payload_len); if (!fp) return -ENOMEM; len = job->request_payload.payload_len; pp = fc_frame_payload_get(fp, len); sg_copy_to_buffer(job->request_payload.sg_list, job->request_payload.sg_cnt, pp, len); fh = fc_frame_header_get(fp); fh->fh_r_ctl = FC_RCTL_ELS_REQ; hton24(fh->fh_d_id, did); hton24(fh->fh_s_id, lport->port_id); fh->fh_type = FC_TYPE_ELS; hton24(fh->fh_f_ctl, FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT); fh->fh_cs_ctl = 0; fh->fh_df_ctl = 0; fh->fh_parm_offset = 0; info = kzalloc(sizeof(struct fc_bsg_info), GFP_KERNEL); if (!info) { fc_frame_free(fp); return -ENOMEM; } info->job = job; info->lport = lport; info->rsp_code = ELS_LS_ACC; info->nents = job->reply_payload.sg_cnt; info->sg = job->reply_payload.sg_list; if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp, NULL, info, tov)) return -ECOMM; return 0; }
/* * If writing, bounce the data to the buffer before the request * is sent to the host driver */ void mmc_queue_bounce_pre(struct mmc_queue *mq) { unsigned long flags; if (!mq->bounce_buf) return; if (rq_data_dir(mq->req) != WRITE) return; local_irq_save(flags); sg_copy_to_buffer(mq->bounce_sg, mq->bounce_sg_len, mq->bounce_buf, mq->sg[0].length); local_irq_restore(flags); }
/* * If writing, bounce the data to the buffer before the request * is sent to the host driver */ static void card_queue_bounce_pre(struct card_queue *cq) { unsigned long flags; if (!cq->bounce_buf) return; if (rq_data_dir(cq->req) != WRITE) return; local_irq_save(flags); sg_copy_to_buffer(cq->bounce_sg, cq->bounce_sg_len, cq->bounce_buf, cq->sg[0].length); local_irq_restore(flags); }
static int pkcs1pad_encrypt_sign_complete(struct akcipher_request *req, int err) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req); unsigned int pad_len; unsigned int len; u8 *out_buf; if (err) goto out; len = req_ctx->child_req.dst_len; pad_len = ctx->key_size - len; /* Four billion to one */ if (likely(!pad_len)) goto out; out_buf = kzalloc(ctx->key_size, GFP_KERNEL); err = -ENOMEM; if (!out_buf) goto out; sg_copy_to_buffer(req->dst, sg_nents_for_len(req->dst, len), out_buf + pad_len, len); sg_copy_from_buffer(req->dst, sg_nents_for_len(req->dst, ctx->key_size), out_buf, ctx->key_size); kzfree(out_buf); out: req->dst_len = ctx->key_size; kfree(req_ctx->in_buf); return err; }
static int ufs_bsg_alloc_desc_buffer(struct ufs_hba *hba, struct bsg_job *job, uint8_t **desc_buff, int *desc_len, enum query_opcode desc_op) { struct ufs_bsg_request *bsg_request = job->request; struct utp_upiu_query *qr; u8 *descp; if (desc_op != UPIU_QUERY_OPCODE_WRITE_DESC && desc_op != UPIU_QUERY_OPCODE_READ_DESC) goto out; qr = &bsg_request->upiu_req.qr; if (ufs_bsg_get_query_desc_size(hba, desc_len, qr)) { dev_err(hba->dev, "Illegal desc size\n"); return -EINVAL; } if (*desc_len > job->request_payload.payload_len) { dev_err(hba->dev, "Illegal desc size\n"); return -EINVAL; } descp = kzalloc(*desc_len, GFP_KERNEL); if (!descp) return -ENOMEM; if (desc_op == UPIU_QUERY_OPCODE_WRITE_DESC) sg_copy_to_buffer(job->request_payload.sg_list, job->request_payload.sg_cnt, descp, *desc_len); *desc_buff = descp; out: return 0; }
static void sd_normal_rw(struct realtek_pci_sdmmc *host, struct mmc_request *mrq) { struct mmc_command *cmd = mrq->cmd; struct mmc_data *data = mrq->data; u8 _cmd[5], *buf; _cmd[0] = 0x40 | (u8)cmd->opcode; put_unaligned_be32(cmd->arg, (u32 *)(&_cmd[1])); buf = kzalloc(data->blksz, GFP_NOIO); if (!buf) { cmd->error = -ENOMEM; return; } if (data->flags & MMC_DATA_READ) { if (host->initial_mode) sd_disable_initial_mode(host); cmd->error = sd_read_data(host, _cmd, (u16)data->blksz, buf, data->blksz, 200); if (host->initial_mode) sd_enable_initial_mode(host); sg_copy_from_buffer(data->sg, data->sg_len, buf, data->blksz); } else { sg_copy_to_buffer(data->sg, data->sg_len, buf, data->blksz); cmd->error = sd_write_data(host, _cmd, (u16)data->blksz, buf, data->blksz, 200); } kfree(buf); }
unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl, struct beiscsi_hba *phba, struct bsg_job *job, struct be_dma_mem *nonemb_cmd) { struct be_cmd_resp_hdr *resp; struct be_mcc_wrb *wrb = wrb_from_mccq(phba); struct be_sge *mcc_sge = nonembedded_sgl(wrb); unsigned int tag = 0; struct iscsi_bsg_request *bsg_req = job->request; struct be_bsg_vendor_cmd *req = nonemb_cmd->va; unsigned short region, sector_size, sector, offset; nonemb_cmd->size = job->request_payload.payload_len; memset(nonemb_cmd->va, 0, nonemb_cmd->size); resp = nonemb_cmd->va; region = bsg_req->rqst_data.h_vendor.vendor_cmd[1]; sector_size = bsg_req->rqst_data.h_vendor.vendor_cmd[2]; sector = bsg_req->rqst_data.h_vendor.vendor_cmd[3]; offset = bsg_req->rqst_data.h_vendor.vendor_cmd[4]; req->region = region; req->sector = sector; req->offset = offset; spin_lock(&ctrl->mbox_lock); memset(wrb, 0, sizeof(*wrb)); switch (bsg_req->rqst_data.h_vendor.vendor_cmd[0]) { case BEISCSI_WRITE_FLASH: offset = sector * sector_size + offset; be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, OPCODE_COMMON_WRITE_FLASH, sizeof(*req)); sg_copy_to_buffer(job->request_payload.sg_list, job->request_payload.sg_cnt, nonemb_cmd->va + offset, job->request_len); break; case BEISCSI_READ_FLASH: be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, OPCODE_COMMON_READ_FLASH, sizeof(*req)); break; default: beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, "BG_%d : Unsupported cmd = 0x%x\n\n", bsg_req->rqst_data.h_vendor.vendor_cmd[0]); spin_unlock(&ctrl->mbox_lock); return -ENOSYS; } tag = alloc_mcc_tag(phba); if (!tag) { spin_unlock(&ctrl->mbox_lock); return tag; } be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false, job->request_payload.sg_cnt); mcc_sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma)); mcc_sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF); mcc_sge->len = cpu_to_le32(nonemb_cmd->size); wrb->tag0 |= tag; be_mcc_notify(phba); spin_unlock(&ctrl->mbox_lock); return tag; }
static int qla4xxx_update_flash(struct bsg_job *bsg_job) { struct Scsi_Host *host = iscsi_job_to_shost(bsg_job); struct scsi_qla_host *ha = to_qla_host(host); struct iscsi_bsg_reply *bsg_reply = bsg_job->reply; struct iscsi_bsg_request *bsg_req = bsg_job->request; uint32_t length = 0; uint32_t offset = 0; uint32_t options = 0; dma_addr_t flash_dma; uint8_t *flash = NULL; int rval = -EINVAL; bsg_reply->reply_payload_rcv_len = 0; if (unlikely(pci_channel_offline(ha->pdev))) goto leave; if (ql4xxx_reset_active(ha)) { ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__); rval = -EBUSY; goto leave; } if (ha->flash_state != QLFLASH_WAITING) { ql4_printk(KERN_ERR, ha, "%s: another flash operation " "active\n", __func__); rval = -EBUSY; goto leave; } ha->flash_state = QLFLASH_WRITING; length = bsg_job->request_payload.payload_len; offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1]; options = bsg_req->rqst_data.h_vendor.vendor_cmd[2]; flash = dma_alloc_coherent(&ha->pdev->dev, length, &flash_dma, GFP_KERNEL); if (!flash) { ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for flash " "data\n", __func__); rval = -ENOMEM; goto leave; } sg_copy_to_buffer(bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, flash, length); rval = qla4xxx_set_flash(ha, flash_dma, offset, length, options); if (rval) { ql4_printk(KERN_ERR, ha, "%s: set flash failed\n", __func__); bsg_reply->result = DID_ERROR << 16; rval = -EIO; } else bsg_reply->result = DID_OK << 16; bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); dma_free_coherent(&ha->pdev->dev, length, flash, flash_dma); leave: ha->flash_state = QLFLASH_WAITING; return rval; }
int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx, struct scatterlist *src, unsigned int nbytes, unsigned int block_size, gfp_t flags) { struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx; struct device *dev = drvdata_to_dev(drvdata); u8 *curr_buff = cc_hash_buf(areq_ctx); u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx); u8 *next_buff = cc_next_buf(areq_ctx); u32 *next_buff_cnt = cc_next_buf_cnt(areq_ctx); struct mlli_params *mlli_params = &areq_ctx->mlli_params; unsigned int update_data_len; u32 total_in_len = nbytes + *curr_buff_cnt; struct buffer_array sg_data; struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle; unsigned int swap_index = 0; int rc = 0; u32 dummy = 0; u32 mapped_nents = 0; dev_dbg(dev, " update params : curr_buff=%pK curr_buff_cnt=0x%X nbytes=0x%X src=%pK curr_index=%u\n", curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index); /* Init the type of the dma buffer */ areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL; mlli_params->curr_pool = NULL; areq_ctx->curr_sg = NULL; sg_data.num_of_buffers = 0; areq_ctx->in_nents = 0; if (total_in_len < block_size) { dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n", curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]); areq_ctx->in_nents = cc_get_sgl_nents(dev, src, nbytes, &dummy, NULL); sg_copy_to_buffer(src, areq_ctx->in_nents, &curr_buff[*curr_buff_cnt], nbytes); *curr_buff_cnt += nbytes; return 1; } /* Calculate the residue size*/ *next_buff_cnt = total_in_len & (block_size - 1); /* update data len */ update_data_len = total_in_len - *next_buff_cnt; dev_dbg(dev, " temp length : *next_buff_cnt=0x%X update_data_len=0x%X\n", *next_buff_cnt, update_data_len); /* Copy the new residue to next buffer */ if (*next_buff_cnt) { dev_dbg(dev, " handle residue: next buff %pK skip data %u residue %u\n", next_buff, (update_data_len - *curr_buff_cnt), *next_buff_cnt); cc_copy_sg_portion(dev, next_buff, src, (update_data_len - *curr_buff_cnt), nbytes, CC_SG_TO_BUF); /* change the buffer index for next operation */ swap_index = 1; } if (*curr_buff_cnt) { rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt, &sg_data); if (rc) return rc; /* change the buffer index for next operation */ swap_index = 1; } if (update_data_len > *curr_buff_cnt) { rc = cc_map_sg(dev, src, (update_data_len - *curr_buff_cnt), DMA_TO_DEVICE, &areq_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents); if (rc) goto unmap_curr_buff; if (mapped_nents == 1 && areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) { /* only one entry in the SG and no previous data */ memcpy(areq_ctx->buff_sg, src, sizeof(struct scatterlist)); areq_ctx->buff_sg->length = update_data_len; areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI; areq_ctx->curr_sg = areq_ctx->buff_sg; } else { areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI; } } if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) { mlli_params->curr_pool = buff_mgr->mlli_buffs_pool; /* add the src data to the sg_data */ cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, (update_data_len - *curr_buff_cnt), 0, true, &areq_ctx->mlli_nents); rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags); if (rc) goto fail_unmap_din; } areq_ctx->buff_index = (areq_ctx->buff_index ^ swap_index); return 0; fail_unmap_din: dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE); unmap_curr_buff: if (*curr_buff_cnt) dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE); return rc; }
static int mmc_rpmb_send_command(struct mmc_card *card, u8 *buf, __u16 blks, __u16 type, u8 req_type) { struct mmc_request mrq = {NULL}; struct mmc_command cmd = {0}; struct mmc_command sbc = {0}; struct mmc_data data = {0}; struct scatterlist sg; u8 *transfer_buf = NULL; mrq.sbc = &sbc; mrq.cmd = &cmd; mrq.data = &data; mrq.stop = NULL; transfer_buf = kzalloc(512 * blks, GFP_KERNEL); if (!transfer_buf) return -ENOMEM; /* * set CMD23 */ sbc.opcode = MMC_SET_BLOCK_COUNT; sbc.arg = blks; if ((req_type == RPMB_REQ) && type == RPMB_WRITE_DATA) sbc.arg |= 1 << 31; sbc.flags = MMC_RSP_R1 | MMC_CMD_AC; /* * set CMD25/18 */ sg_init_one(&sg, transfer_buf, 512 * blks); if (req_type == RPMB_REQ) { cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK; sg_copy_from_buffer(&sg, 1, buf, 512 * blks); data.flags |= MMC_DATA_WRITE; } else { cmd.opcode = MMC_READ_MULTIPLE_BLOCK; data.flags |= MMC_DATA_READ; } cmd.arg = 0; cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; data.blksz = 512; data.blocks = blks; data.sg = &sg; data.sg_len = 1; mmc_set_data_timeout(&data, card); mmc_wait_for_req(card->host, &mrq); if (req_type != RPMB_REQ) sg_copy_to_buffer(&sg, 1, buf, 512 * blks); kfree(transfer_buf); if (cmd.error) return cmd.error; if (data.error) return data.error; return 0; }
static int qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job) { struct Scsi_Host *host = bsg_job->shost; scsi_qla_host_t *vha = shost_priv(host); struct qla_hw_data *ha = vha->hw; int ret = 0; uint32_t len; uint32_t oper; bsg_job->reply->reply_payload_rcv_len = 0; if (!IS_QLA24XX_TYPE(ha) || !IS_QLA25XX(ha)) { ret = -EINVAL; goto exit_fcp_prio_cfg; } if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { ret = -EBUSY; goto exit_fcp_prio_cfg; } /* Get the sub command */ oper = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; /* Only set config is allowed if config memory is not allocated */ if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) { ret = -EINVAL; goto exit_fcp_prio_cfg; } switch (oper) { case QLFC_FCP_PRIO_DISABLE: if (ha->flags.fcp_prio_enabled) { ha->flags.fcp_prio_enabled = 0; ha->fcp_prio_cfg->attributes &= ~FCP_PRIO_ATTR_ENABLE; qla24xx_update_all_fcp_prio(vha); bsg_job->reply->result = DID_OK; } else { ret = -EINVAL; bsg_job->reply->result = (DID_ERROR << 16); goto exit_fcp_prio_cfg; } break; case QLFC_FCP_PRIO_ENABLE: if (!ha->flags.fcp_prio_enabled) { if (ha->fcp_prio_cfg) { ha->flags.fcp_prio_enabled = 1; ha->fcp_prio_cfg->attributes |= FCP_PRIO_ATTR_ENABLE; qla24xx_update_all_fcp_prio(vha); bsg_job->reply->result = DID_OK; } else { ret = -EINVAL; bsg_job->reply->result = (DID_ERROR << 16); goto exit_fcp_prio_cfg; } } break; case QLFC_FCP_PRIO_GET_CONFIG: len = bsg_job->reply_payload.payload_len; if (!len || len > FCP_PRIO_CFG_SIZE) { ret = -EINVAL; bsg_job->reply->result = (DID_ERROR << 16); goto exit_fcp_prio_cfg; } bsg_job->reply->result = DID_OK; bsg_job->reply->reply_payload_rcv_len = sg_copy_from_buffer( bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg, len); break; case QLFC_FCP_PRIO_SET_CONFIG: len = bsg_job->request_payload.payload_len; if (!len || len > FCP_PRIO_CFG_SIZE) { bsg_job->reply->result = (DID_ERROR << 16); ret = -EINVAL; goto exit_fcp_prio_cfg; } if (!ha->fcp_prio_cfg) { ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE); if (!ha->fcp_prio_cfg) { qla_printk(KERN_WARNING, ha, "Unable to allocate memory " "for fcp prio config data (%x).\n", FCP_PRIO_CFG_SIZE); bsg_job->reply->result = (DID_ERROR << 16); ret = -ENOMEM; goto exit_fcp_prio_cfg; } } memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE); sg_copy_to_buffer(bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, ha->fcp_prio_cfg, FCP_PRIO_CFG_SIZE); /* validate fcp priority data */ if (!qla24xx_fcp_prio_cfg_valid( (struct qla_fcp_prio_cfg *) ha->fcp_prio_cfg, 1)) { bsg_job->reply->result = (DID_ERROR << 16); ret = -EINVAL; /* If buffer was invalidatic int * fcp_prio_cfg is of no use */ vfree(ha->fcp_prio_cfg); ha->fcp_prio_cfg = NULL; goto exit_fcp_prio_cfg; } ha->flags.fcp_prio_enabled = 0; if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE) ha->flags.fcp_prio_enabled = 1; qla24xx_update_all_fcp_prio(vha); bsg_job->reply->result = DID_OK; break; default: ret = -EINVAL; break; } exit_fcp_prio_cfg: bsg_job->job_done(bsg_job); return ret; }
void mmc_encrypt_req(struct mmc_host *host, struct mmc_request *mrq) { struct crypto_ablkcipher *tfm; struct ablkcipher_request *req; struct mmc_tcrypt_result result; struct scatterlist *in_sg = mrq->data->sg; struct scatterlist *out_sg = NULL; u8 *dst_data = NULL; unsigned long data_len = 0; uint32_t bytes = 0; int rc = 0; u8 IV[MMC_AES_XTS_IV_LEN]; sector_t sector = mrq->data->sector; tfm = crypto_alloc_ablkcipher("xts(aes)", 0, 0); if (IS_ERR(tfm)) { pr_err("%s:%s ablkcipher tfm allocation failed : error = %lu\n", mmc_hostname(host), __func__, PTR_ERR(tfm)); return; } req = ablkcipher_request_alloc(tfm, GFP_KERNEL); if (!req) { pr_err("%s:%s ablkcipher request allocation failed\n", mmc_hostname(host), __func__); goto ablkcipher_req_alloc_failure; } ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, mmc_crypto_cipher_complete, &result); init_completion(&result.completion); qcrypto_cipher_set_flag(req, QCRYPTO_CTX_USE_PIPE_KEY | QCRYPTO_CTX_XTS_DU_SIZE_512B); crypto_ablkcipher_clear_flags(tfm, ~0); crypto_ablkcipher_setkey(tfm, NULL, MMC_KEY_SIZE_XTS); data_len = mrq->data->blksz * mrq->data->blocks; if (data_len > MMC_512_KB) { pr_err("%s:%s Encryption operation aborted: req size > 512K\n", mmc_hostname(host), __func__); goto crypto_operation_failure; } if (mmc_crypto_buf_idx != MAX_ENCRYPTION_BUFFERS) { dst_data = mmc_crypto_bufs[mmc_crypto_buf_idx]; out_sg = mmc_crypto_out_sg[mmc_crypto_buf_idx]; mmc_crypto_buf_idx = 1-mmc_crypto_buf_idx; } else { pr_err("%s:%s encryption buffers not available\n", mmc_hostname(host), __func__); goto crypto_operation_failure; } bytes = sg_copy_to_buffer(in_sg, mrq->data->sg_len, dst_data, data_len); if (bytes != data_len) { pr_err("%s:%s error in copying data from sglist to buffer\n", mmc_hostname(host), __func__); goto crypto_operation_failure; } if (!mmc_copy_sglist(in_sg, mrq->data->sg_len, out_sg, dst_data)) { pr_err("%s:%s could not create dst sglist from in sglist\n", mmc_hostname(host), __func__); goto crypto_operation_failure; } memset(IV, 0, MMC_AES_XTS_IV_LEN); memcpy(IV, §or, sizeof(sector_t)); ablkcipher_request_set_crypt(req, in_sg, out_sg, data_len, (void *) IV); rc = crypto_ablkcipher_encrypt(req); switch (rc) { case 0: break; case -EBUSY: /* * Lets make this synchronous request by waiting on * in progress as well */ case -EINPROGRESS: wait_for_completion_interruptible(&result.completion); if (result.err) pr_err("%s:%s error = %d encrypting the request\n", mmc_hostname(host), __func__, result.err); break; default: goto crypto_operation_failure; } mrq->data->sg = out_sg; mrq->data->sg_len = mmc_count_sg(out_sg, data_len); crypto_operation_failure: ablkcipher_request_free(req); ablkcipher_req_alloc_failure: crypto_free_ablkcipher(tfm); return; }
int map_ahash_request_update(struct device *dev, struct ahash_request *req) { struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); struct ahash_req_ctx *areq_ctx = ahash_request_ctx(req); uint8_t* curr_buff = areq_ctx->buff_index ? areq_ctx->buff1 : areq_ctx->buff0; uint32_t *curr_buff_cnt = areq_ctx->buff_index ? &areq_ctx->buff1_cnt : &areq_ctx->buff0_cnt; uint8_t* next_buff = areq_ctx->buff_index ? areq_ctx->buff0 : areq_ctx->buff1; uint32_t *next_buff_cnt = areq_ctx->buff_index ? &areq_ctx->buff0_cnt : &areq_ctx->buff1_cnt; unsigned int block_size = crypto_tfm_alg_blocksize(&ahash->base); struct mlli_params *mlli_params = &areq_ctx->mlli_params; unsigned int update_data_len; int total_in_len = req->nbytes + *curr_buff_cnt; struct sg_data_array sg_data; struct buff_mgr_handle *buff_mgr = crypto_drvdata->buff_mgr_handle; unsigned int swap_index = 0; int dummy = 0; DX_LOG_DEBUG(" update params : curr_buff =0x%X " "curr_buff_cnt=0x%X req->nbytes = 0x%X " "req->src = 0x%X curr_index = %d \n", (uint32_t)curr_buff, *curr_buff_cnt, req->nbytes, (uint32_t)req->src, areq_ctx->buff_index); /* Init the type of the dma buffer */ areq_ctx->data_dma_buf_type = DX_DMA_BUF_NULL; mlli_params->curr_pool = NULL; areq_ctx->curr_sg = NULL; sg_data.num_of_sg = 0; areq_ctx->in_nents = 0; if (unlikely(total_in_len < block_size)) { DX_LOG_DEBUG(" less than one block: curr_buff =0x%X " "*curr_buff_cnt =0x%x copy_to =0x%X\n", (uint32_t)curr_buff, (uint32_t)*curr_buff_cnt, (uint32_t)&curr_buff[*curr_buff_cnt]); areq_ctx->in_nents = sg_count_ents(req->src,req->nbytes, &dummy); sg_copy_to_buffer(req->src, areq_ctx->in_nents, &curr_buff[*curr_buff_cnt], req->nbytes); *curr_buff_cnt += req->nbytes; return 1; } /* Calculate the residue size*/ *next_buff_cnt = total_in_len & (block_size - 1); /* update data len */ update_data_len = total_in_len - *next_buff_cnt; DX_LOG_DEBUG(" temp length : *next_buff_cnt =0x%X " "update_data_len=0x%X\n", (uint32_t)*next_buff_cnt, update_data_len); /* Copy the new residue to next buffer */ if (*next_buff_cnt != 0) { DX_LOG_DEBUG(" handle residue: next buff %x skip data %x" " residue %x \n",(unsigned int)next_buff, (update_data_len -*curr_buff_cnt), *next_buff_cnt); dx_sg_copy_part(next_buff, req->src, (update_data_len -*curr_buff_cnt), req->nbytes,DX_SG_TO_BUF); /* change the buffer index for next operation */ swap_index = 1; } if (*curr_buff_cnt != 0) { if (dx_ahash_handle_curr_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt, &sg_data)){ return -ENOMEM; } /* change the buffer index for next operation */ swap_index = 1; } if ( update_data_len > *curr_buff_cnt ) { if ( unlikely( dx_map_sg( dev,req->src, (update_data_len -*curr_buff_cnt), DMA_TO_DEVICE, &areq_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy))){ goto unmap_curr_buff; } if ( (areq_ctx->in_nents == 1) && (areq_ctx->data_dma_buf_type == DX_DMA_BUF_NULL) ) { /* only one entry in the SG and no previous data */ memcpy(areq_ctx->buff_sg,req->src, sizeof(struct scatterlist)); areq_ctx->buff_sg->length = update_data_len; areq_ctx->data_dma_buf_type = DX_DMA_BUF_DLLI; areq_ctx->curr_sg = areq_ctx->buff_sg; } else { areq_ctx->data_dma_buf_type = DX_DMA_BUF_MLLI; } } if (unlikely(areq_ctx->data_dma_buf_type == DX_DMA_BUF_MLLI)) { mlli_params->curr_pool = buff_mgr->mlli_buffs_pool; /* add the src data to the sg_data */ buffer_mgr_set_sg_entry(&sg_data, areq_ctx->in_nents, req->src, (update_data_len -*curr_buff_cnt), true); if (unlikely(buffer_mgr_build_mlli(dev, &sg_data, mlli_params))) { goto fail_unmap_din; } } areq_ctx->buff_index = (areq_ctx->buff_index^swap_index); DX_LOG_DEBUG(" buf type = %s \n\n", dx_get_buff_type(areq_ctx->data_dma_buf_type)); return 0; fail_unmap_din: dma_unmap_sg(dev, req->src, areq_ctx->in_nents, DMA_TO_DEVICE); unmap_curr_buff: if (*curr_buff_cnt != 0 ) { dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE); } return -ENOMEM; }
static int ecdh_compute_value(struct kpp_request *req) { struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); struct ecdh_ctx *ctx = ecdh_get_ctx(tfm); u64 *public_key; u64 *shared_secret = NULL; void *buf; size_t copied, nbytes, public_key_sz; int ret = -ENOMEM; nbytes = ctx->ndigits << ECC_DIGITS_TO_BYTES_SHIFT; /* Public part is a point thus it has both coordinates */ public_key_sz = 2 * nbytes; public_key = kmalloc(public_key_sz, GFP_KERNEL); if (!public_key) return -ENOMEM; if (req->src) { shared_secret = kmalloc(nbytes, GFP_KERNEL); if (!shared_secret) goto free_pubkey; /* from here on it's invalid parameters */ ret = -EINVAL; /* must have exactly two points to be on the curve */ if (public_key_sz != req->src_len) goto free_all; copied = sg_copy_to_buffer(req->src, sg_nents_for_len(req->src, public_key_sz), public_key, public_key_sz); if (copied != public_key_sz) goto free_all; ret = crypto_ecdh_shared_secret(ctx->curve_id, ctx->ndigits, ctx->private_key, public_key, shared_secret); buf = shared_secret; } else { ret = ecc_make_pub_key(ctx->curve_id, ctx->ndigits, ctx->private_key, public_key); buf = public_key; nbytes = public_key_sz; } if (ret < 0) goto free_all; /* might want less than we've got */ nbytes = min_t(size_t, nbytes, req->dst_len); copied = sg_copy_from_buffer(req->dst, sg_nents_for_len(req->dst, nbytes), buf, nbytes); if (copied != nbytes) ret = -EINVAL; /* fall through */ free_all: kzfree(shared_secret); free_pubkey: kfree(public_key); return ret; }
static int qla4xxx_update_nvram(struct bsg_job *bsg_job) { struct Scsi_Host *host = iscsi_job_to_shost(bsg_job); struct scsi_qla_host *ha = to_qla_host(host); struct iscsi_bsg_request *bsg_req = bsg_job->request; struct iscsi_bsg_reply *bsg_reply = bsg_job->reply; uint32_t offset = 0; uint32_t len = 0; uint32_t total_len = 0; dma_addr_t nvram_dma; uint8_t *nvram = NULL; int rval = -EINVAL; bsg_reply->reply_payload_rcv_len = 0; if (unlikely(pci_channel_offline(ha->pdev))) goto leave; if (!(is_qla4010(ha) || is_qla4022(ha) || is_qla4032(ha))) goto leave; if (ql4xxx_reset_active(ha)) { ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__); rval = -EBUSY; goto leave; } offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1]; len = bsg_job->request_payload.payload_len; total_len = offset + len; /* total len should not be greater than max NVRAM size */ if ((is_qla4010(ha) && total_len > QL4010_NVRAM_SIZE) || ((is_qla4022(ha) || is_qla4032(ha)) && total_len > QL40X2_NVRAM_SIZE)) { ql4_printk(KERN_ERR, ha, "%s: offset+len greater than max" " nvram size, offset=%d len=%d\n", __func__, offset, len); goto leave; } nvram = dma_alloc_coherent(&ha->pdev->dev, len, &nvram_dma, GFP_KERNEL); if (!nvram) { ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for flash " "data\n", __func__); rval = -ENOMEM; goto leave; } sg_copy_to_buffer(bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, nvram, len); rval = qla4xxx_set_nvram(ha, nvram_dma, offset, len); if (rval) { ql4_printk(KERN_ERR, ha, "%s: set nvram failed\n", __func__); bsg_reply->result = DID_ERROR << 16; rval = -EIO; } else bsg_reply->result = DID_OK << 16; bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); dma_free_coherent(&ha->pdev->dev, len, nvram, nvram_dma); leave: return rval; }
static int mmc_test_transfer(struct mmc_test_card *test, struct scatterlist *sg, unsigned sg_len, unsigned dev_addr, unsigned blocks, unsigned blksz, int write) { int ret, i; unsigned long flags; if (write) { for (i = 0;i < blocks * blksz;i++) test->scratch[i] = i; } else { memset(test->scratch, 0, BUFFER_SIZE); } local_irq_save(flags); sg_copy_from_buffer(sg, sg_len, test->scratch, BUFFER_SIZE); local_irq_restore(flags); ret = mmc_test_set_blksize(test, blksz); if (ret) return ret; ret = mmc_test_simple_transfer(test, sg, sg_len, dev_addr, blocks, blksz, write); if (ret) return ret; if (write) { int sectors; ret = mmc_test_set_blksize(test, 512); if (ret) return ret; sectors = (blocks * blksz + 511) / 512; if ((sectors * 512) == (blocks * blksz)) sectors++; if ((sectors * 512) > BUFFER_SIZE) return -EINVAL; memset(test->buffer, 0, sectors * 512); for (i = 0;i < sectors;i++) { ret = mmc_test_buffer_transfer(test, test->buffer + i * 512, dev_addr + i, 512, 0); if (ret) return ret; } for (i = 0;i < blocks * blksz;i++) { if (test->buffer[i] != (u8)i) return RESULT_FAIL; } for (;i < sectors * 512;i++) { if (test->buffer[i] != 0xDF) return RESULT_FAIL; } } else { local_irq_save(flags); sg_copy_to_buffer(sg, sg_len, test->scratch, BUFFER_SIZE); local_irq_restore(flags); for (i = 0;i < blocks * blksz;i++) { if (test->scratch[i] != (u8)i) return RESULT_FAIL; } } return 0; }
static int CPRM_CMD_SecureMultiRW(struct mmc_card *card, unsigned int command, unsigned int dir, unsigned long arg, unsigned char *buff, unsigned int length) { int err; struct mmc_request mrq; struct mmc_command cmd; struct mmc_command stop; struct mmc_data data; unsigned long flags; struct scatterlist sg; memset(&cmd, 0, sizeof(struct mmc_command)); memset(&stop, 0, sizeof(struct mmc_command)); cmd.opcode = MMC_APP_CMD; cmd.arg = card->rca << 16; cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; err = mmc_wait_for_cmd(card->host, &cmd, 0); if (err) return (u32)-1; if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD)) return (u32)-1; printk(KERN_DEBUG "CPRM_CMD_SecureRW: 1\n"); memset(&cmd, 0, sizeof(struct mmc_command)); cmd.opcode = command; if (command == SD_ACMD43_GET_MKB) cmd.arg = arg; else cmd.arg = 0; cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; memset(&data, 0, sizeof(struct mmc_data)); data.timeout_ns = 100000000; data.timeout_clks = 0; #if defined(CONFIG_TARGET_LOCALE_NTT) data.timeout_ns = 100000000; data.timeout_clks = 0; #endif data.blksz = 512; data.blocks = (length + 511) / 512; data.flags = dir; data.sg = &sg; data.sg_len = 1; stop.opcode = MMC_STOP_TRANSMISSION; stop.arg = 0; stop.flags = MMC_RSP_R1B | MMC_CMD_AC; memset(&mrq, 0, sizeof(struct mmc_request)); mrq.cmd = &cmd; mrq.data = &data; mrq.stop = &stop; printk(KERN_DEBUG "CPRM_CMD_SecureRW: 2\n"); sg_init_one(&sg, buff, length); if (dir == MMC_DATA_WRITE) { local_irq_save(flags); sg_copy_from_buffer(&sg, data.sg_len, buff, length); local_irq_restore(flags); } printk(KERN_DEBUG "CPRM_CMD_SecureRW: 3\n"); mmc_wait_for_req(card->host, &mrq); printk(KERN_DEBUG "CPRM_CMD_SecureRW: 4\n"); if (cmd.error) { printk(KERN_DEBUG "%s]cmd.error=%d\n", __func__, cmd.error); return cmd.error; } if (data.error) { printk(KERN_DEBUG "%s]data.error=%d\n", __func__, data.error); return data.error; } err = mmc_wait_busy(card); printk(KERN_DEBUG "CPRM_CMD_SecureRW: 5\n"); if (dir == MMC_DATA_READ) { local_irq_save(flags); sg_copy_to_buffer(&sg, data.sg_len, buff, length); local_irq_restore(flags); } if (err) return err; return 0; }