static int srp_direct_data(struct scst_cmd *sc, struct srp_direct_buf *md, enum dma_data_direction dir, srp_rdma_t rdma_io, int dma_map) { struct iu_entry *iue = NULL; struct scatterlist *sg = NULL; int err, nsg = 0, len, sg_cnt; u32 tsize; enum dma_data_direction dma_dir; iue = scst_cmd_get_tgt_priv(sc); if (dir == DMA_TO_DEVICE) { scst_cmd_get_write_fields(sc, &sg, &sg_cnt); tsize = scst_cmd_get_bufflen(sc); dma_dir = DMA_FROM_DEVICE; } else { sg = scst_cmd_get_sg(sc); sg_cnt = scst_cmd_get_sg_cnt(sc); tsize = scst_cmd_get_adjusted_resp_data_len(sc); dma_dir = DMA_TO_DEVICE; } dprintk("%p %u %u %d\n", iue, tsize, be32_to_cpu(md->len), sg_cnt); len = min(tsize, be32_to_cpu(md->len)); if (dma_map) { nsg = dma_map_sg(iue->target->dev, sg, sg_cnt, dma_dir); if (!nsg) { eprintk(KERN_ERR "fail to map %p %d\n", iue, sg_cnt); return -ENOMEM; } } err = rdma_io(sc, sg, nsg, md, 1, dir, len); if (dma_map) dma_unmap_sg(iue->target->dev, sg, nsg, dma_dir); return err; }
/* * Debug: dump command. */ void ft_cmd_dump(struct scst_cmd *cmd, const char *caller) { static atomic_t serial; struct ft_cmd *fcmd; struct fc_frame_header *fh; char prefix[30]; char buf[150]; if (!(ft_debug_logging & FT_DEBUG_IO)) return; fcmd = scst_cmd_get_tgt_priv(cmd); fh = fc_frame_header_get(fcmd->req_frame); snprintf(prefix, sizeof(prefix), FT_MODULE ": cmd %2x", atomic_inc_return(&serial) & 0xff); pr_info("%s %s oid %x oxid %x resp_len %u\n", prefix, caller, ntoh24(fh->fh_s_id), ntohs(fh->fh_ox_id), scst_cmd_get_resp_data_len(cmd)); pr_info("%s scst_cmd %p wlen %u rlen %u\n", prefix, cmd, fcmd->write_data_len, fcmd->read_data_len); pr_info("%s exp_dir %x exp_xfer_len %d exp_in_len %d\n", prefix, cmd->expected_data_direction, cmd->expected_transfer_len, cmd->expected_out_transfer_len); pr_info("%s dir %x data_len %lld bufflen %d out_bufflen %d\n", prefix, cmd->data_direction, cmd->data_len, cmd->bufflen, cmd->out_bufflen); pr_info("%s sg_cnt reg %d in %d tgt %d tgt_in %d\n", prefix, cmd->sg_cnt, cmd->out_sg_cnt, cmd->tgt_i_sg_cnt, cmd->tgt_out_sg_cnt); buf[0] = '\0'; if (cmd->sent_for_exec) ft_cmd_flag(buf, sizeof(buf), "sent"); if (cmd->completed) ft_cmd_flag(buf, sizeof(buf), "comp"); if (cmd->ua_ignore) ft_cmd_flag(buf, sizeof(buf), "ua_ign"); if (cmd->atomic) ft_cmd_flag(buf, sizeof(buf), "atom"); if (cmd->double_ua_possible) ft_cmd_flag(buf, sizeof(buf), "dbl_ua_poss"); if (cmd->is_send_status) ft_cmd_flag(buf, sizeof(buf), "send_stat"); if (cmd->retry) ft_cmd_flag(buf, sizeof(buf), "retry"); if (cmd->internal) ft_cmd_flag(buf, sizeof(buf), "internal"); if (cmd->unblock_dev) ft_cmd_flag(buf, sizeof(buf), "unblock_dev"); if (cmd->cmd_hw_pending) ft_cmd_flag(buf, sizeof(buf), "hw_pend"); if (cmd->tgt_need_alloc_data_buf) ft_cmd_flag(buf, sizeof(buf), "tgt_need_alloc"); if (cmd->tgt_i_data_buf_alloced) ft_cmd_flag(buf, sizeof(buf), "tgt_i_alloced"); if (cmd->dh_data_buf_alloced) ft_cmd_flag(buf, sizeof(buf), "dh_alloced"); if (cmd->expected_values_set) ft_cmd_flag(buf, sizeof(buf), "exp_val"); if (cmd->sg_buff_modified) ft_cmd_flag(buf, sizeof(buf), "sg_buf_mod"); if (cmd->preprocessing_only) ft_cmd_flag(buf, sizeof(buf), "pre_only"); if (cmd->sn_set) ft_cmd_flag(buf, sizeof(buf), "sn_set"); if (cmd->hq_cmd_inced) ft_cmd_flag(buf, sizeof(buf), "hq_cmd_inc"); if (cmd->set_sn_on_restart_cmd) ft_cmd_flag(buf, sizeof(buf), "set_sn_on_restart"); if (cmd->no_sgv) ft_cmd_flag(buf, sizeof(buf), "no_sgv"); if (cmd->may_need_dma_sync) ft_cmd_flag(buf, sizeof(buf), "dma_sync"); if (cmd->out_of_sn) ft_cmd_flag(buf, sizeof(buf), "oo_sn"); if (cmd->inc_expected_sn_on_done) ft_cmd_flag(buf, sizeof(buf), "inc_sn_exp"); if (cmd->done) ft_cmd_flag(buf, sizeof(buf), "done"); if (cmd->finished) ft_cmd_flag(buf, sizeof(buf), "fin"); pr_info("%s flags %s\n", prefix, buf); pr_info("%s lun %lld sn %d tag %lld cmd_flags %lx\n", prefix, cmd->lun, cmd->sn, cmd->tag, cmd->cmd_flags); pr_info("%s tgt_sn %d op_flags %x op %s\n", prefix, cmd->tgt_sn, cmd->op_flags, cmd->op_name); pr_info("%s status %x msg_status %x " "host_status %x driver_status %x\n", prefix, cmd->status, cmd->msg_status, cmd->host_status, cmd->driver_status); pr_info("%s cdb_len %d\n", prefix, cmd->cdb_len); snprintf(buf, sizeof(buf), "%s cdb ", prefix); print_hex_dump(KERN_INFO, buf, DUMP_PREFIX_NONE, 16, 4, cmd->cdb, SCST_MAX_CDB_SIZE, 0); }
static int srp_indirect_data(struct scst_cmd *sc, struct srp_cmd *cmd, struct srp_indirect_buf *id, enum dma_data_direction dir, srp_rdma_t rdma_io, int dma_map, int ext_desc) { struct iu_entry *iue = NULL; struct srp_direct_buf *md = NULL; struct scatterlist dummy, *sg = NULL; dma_addr_t token = 0; int err = 0; int nmd, nsg = 0, len, sg_cnt = 0; u32 tsize = 0; enum dma_data_direction dma_dir; iue = scst_cmd_get_tgt_priv(sc); if (dir == DMA_TO_DEVICE) { scst_cmd_get_write_fields(sc, &sg, &sg_cnt); tsize = scst_cmd_get_bufflen(sc); dma_dir = DMA_FROM_DEVICE; } else { sg = scst_cmd_get_sg(sc); sg_cnt = scst_cmd_get_sg_cnt(sc); tsize = scst_cmd_get_adjusted_resp_data_len(sc); dma_dir = DMA_TO_DEVICE; } dprintk("%p %u %u %d %d\n", iue, tsize, be32_to_cpu(id->len), be32_to_cpu(cmd->data_in_desc_cnt), be32_to_cpu(cmd->data_out_desc_cnt)); len = min(tsize, be32_to_cpu(id->len)); nmd = be32_to_cpu(id->table_desc.len) / sizeof(struct srp_direct_buf); if ((dir == DMA_FROM_DEVICE && nmd == cmd->data_in_desc_cnt) || (dir == DMA_TO_DEVICE && nmd == cmd->data_out_desc_cnt)) { md = &id->desc_list[0]; goto rdma; } if (ext_desc && dma_map) { md = dma_alloc_coherent(iue->target->dev, be32_to_cpu(id->table_desc.len), &token, GFP_KERNEL); if (!md) { eprintk("Can't get dma memory %u\n", id->table_desc.len); return -ENOMEM; } sg_init_one(&dummy, md, be32_to_cpu(id->table_desc.len)); sg_dma_address(&dummy) = token; sg_dma_len(&dummy) = be32_to_cpu(id->table_desc.len); err = rdma_io(sc, &dummy, 1, &id->table_desc, 1, DMA_TO_DEVICE, be32_to_cpu(id->table_desc.len)); if (err) { eprintk("Error copying indirect table %d\n", err); goto free_mem; } } else { eprintk("This command uses external indirect buffer\n"); return -EINVAL; } rdma: if (dma_map) { nsg = dma_map_sg(iue->target->dev, sg, sg_cnt, dma_dir); if (!nsg) { eprintk("fail to map %p %d\n", iue, sg_cnt); err = -ENOMEM; goto free_mem; } } err = rdma_io(sc, sg, nsg, md, nmd, dir, len); if (dma_map) dma_unmap_sg(iue->target->dev, sg, nsg, dma_dir); free_mem: if (token && dma_map) dma_free_coherent(iue->target->dev, be32_to_cpu(id->table_desc.len), md, token); return err; }
/* * Receive write data frame. */ void ft_recv_write_data(struct scst_cmd *cmd, struct fc_frame *fp) { struct ft_cmd *fcmd; struct fc_frame_header *fh; unsigned int bufflen; u32 rel_off; size_t frame_len; size_t mem_len; size_t tlen; void *from; void *to; int dir; u8 *buf; dir = scst_cmd_get_data_direction(cmd); if (dir == SCST_DATA_BIDI) { mem_len = scst_get_out_buf_first(cmd, &buf); bufflen = scst_cmd_get_out_bufflen(cmd); } else { mem_len = scst_get_buf_first(cmd, &buf); bufflen = scst_cmd_get_bufflen(cmd); } to = buf; fcmd = scst_cmd_get_tgt_priv(cmd); fh = fc_frame_header_get(fp); frame_len = fr_len(fp); rel_off = ntohl(fh->fh_parm_offset); FT_IO_DBG("sid %x oxid %x payload_len %zd rel_off %x\n", ntoh24(fh->fh_s_id), ntohs(fh->fh_ox_id), frame_len - sizeof(*fh), rel_off); if (!(ntoh24(fh->fh_f_ctl) & FC_FC_REL_OFF)) goto drop; if (frame_len <= sizeof(*fh)) goto drop; frame_len -= sizeof(*fh); from = fc_frame_payload_get(fp, 0); if (rel_off >= bufflen) goto drop; if (frame_len + rel_off > bufflen) frame_len = bufflen - rel_off; while (frame_len) { if (!mem_len) { if (dir == SCST_DATA_BIDI) { scst_put_out_buf(cmd, buf); mem_len = scst_get_out_buf_next(cmd, &buf); } else { scst_put_buf(cmd, buf); mem_len = scst_get_buf_next(cmd, &buf); } to = buf; if (!mem_len) break; } if (rel_off) { if (rel_off >= mem_len) { rel_off -= mem_len; mem_len = 0; continue; } mem_len -= rel_off; to += rel_off; rel_off = 0; } tlen = min(mem_len, frame_len); memcpy(to, from, tlen); from += tlen; frame_len -= tlen; mem_len -= tlen; to += tlen; fcmd->write_data_len += tlen; } if (mem_len) { if (dir == SCST_DATA_BIDI) scst_put_out_buf(cmd, buf); else scst_put_buf(cmd, buf); } if (fcmd->write_data_len == cmd->data_len) scst_rx_data(cmd, SCST_RX_STATUS_SUCCESS, SCST_CONTEXT_THREAD); drop: fc_frame_free(fp); }
/* * Send read data back to initiator. */ int ft_send_read_data(struct scst_cmd *cmd) { struct ft_cmd *fcmd; struct fc_frame *fp = NULL; struct fc_exch *ep; struct fc_lport *lport; size_t remaining; u32 fh_off = 0; u32 frame_off; size_t frame_len = 0; size_t mem_len; u32 mem_off; size_t tlen; struct page *page; int use_sg; int error; void *to = NULL; u8 *from = NULL; int loop_limit = 10000; fcmd = scst_cmd_get_tgt_priv(cmd); ep = fc_seq_exch(fcmd->seq); lport = ep->lp; frame_off = fcmd->read_data_len; tlen = scst_cmd_get_resp_data_len(cmd); FT_IO_DBG("oid %x oxid %x resp_len %zd frame_off %u\n", ep->oid, ep->oxid, tlen, frame_off); if (tlen <= frame_off) return SCST_TGT_RES_SUCCESS; remaining = tlen - frame_off; if (remaining > UINT_MAX) FT_ERR("oid %x oxid %x resp_len %zd frame_off %u\n", ep->oid, ep->oxid, tlen, frame_off); mem_len = scst_get_buf_first(cmd, &from); mem_off = 0; if (!mem_len) { FT_IO_DBG("mem_len 0\n"); return SCST_TGT_RES_SUCCESS; } FT_IO_DBG("sid %x oxid %x mem_len %zd frame_off %u remaining %zd\n", ep->sid, ep->oxid, mem_len, frame_off, remaining); /* * If we've already transferred some of the data, skip through * the buffer over the data already sent and continue with the * same sequence. Otherwise, get a new sequence for the data. */ if (frame_off) { tlen = frame_off; while (mem_len <= tlen) { tlen -= mem_len; scst_put_buf(cmd, from); mem_len = scst_get_buf_next(cmd, &from); if (!mem_len) return SCST_TGT_RES_SUCCESS; } mem_len -= tlen; mem_off = tlen; } else fcmd->seq = lport->tt.seq_start_next(fcmd->seq); /* no scatter/gather in skb for odd word length due to fc_seq_send() */ use_sg = !(remaining % 4) && lport->sg_supp; while (remaining) { if (!loop_limit) { FT_ERR("hit loop limit. remaining %zx mem_len %zx " "frame_len %zx tlen %zx\n", remaining, mem_len, frame_len, tlen); break; } loop_limit--; if (!mem_len) { scst_put_buf(cmd, from); mem_len = scst_get_buf_next(cmd, &from); mem_off = 0; if (!mem_len) { FT_ERR("mem_len 0 from get_buf_next\n"); break; } } if (!frame_len) { frame_len = fcmd->max_lso_payload; frame_len = min(frame_len, remaining); fp = fc_frame_alloc(lport, use_sg ? 0 : frame_len); if (!fp) { FT_IO_DBG("frame_alloc failed. " "use_sg %d frame_len %zd\n", use_sg, frame_len); break; } fr_max_payload(fp) = fcmd->max_payload; to = fc_frame_payload_get(fp, 0); fh_off = frame_off; } tlen = min(mem_len, frame_len); BUG_ON(!tlen); BUG_ON(tlen > remaining); BUG_ON(tlen > mem_len); BUG_ON(tlen > frame_len); if (use_sg) { page = virt_to_page(from + mem_off); get_page(page); tlen = min_t(size_t, tlen, PAGE_SIZE - (mem_off & ~PAGE_MASK)); skb_fill_page_desc(fp_skb(fp), skb_shinfo(fp_skb(fp))->nr_frags, page, offset_in_page(from + mem_off), tlen); fr_len(fp) += tlen; fp_skb(fp)->data_len += tlen; fp_skb(fp)->truesize += PAGE_SIZE << compound_order(page); frame_len -= tlen; if (skb_shinfo(fp_skb(fp))->nr_frags >= FC_FRAME_SG_LEN) frame_len = 0; } else { memcpy(to, from + mem_off, tlen); to += tlen; frame_len -= tlen; } mem_len -= tlen; mem_off += tlen; remaining -= tlen; frame_off += tlen; if (frame_len) continue; fc_fill_fc_hdr(fp, FC_RCTL_DD_SOL_DATA, ep->did, ep->sid, FC_TYPE_FCP, remaining ? (FC_FC_EX_CTX | FC_FC_REL_OFF) : (FC_FC_EX_CTX | FC_FC_REL_OFF | FC_FC_END_SEQ), fh_off); error = lport->tt.seq_send(lport, fcmd->seq, fp); if (error) { WARN_ON(1); /* XXX For now, initiator will retry */ } else fcmd->read_data_len = frame_off; } if (mem_len) scst_put_buf(cmd, from); if (remaining) { FT_IO_DBG("remaining read data %zd\n", remaining); return SCST_TGT_RES_QUEUE_FULL; } return SCST_TGT_RES_SUCCESS; }