/** * zfcp_qdio_sbal_chain - chain SBALs if more than one SBAL is needed for a * request * @fsf_req: zfcp_fsf_req to be processed * @sbtype: SBAL flags which have to be set in first SBALE of new SBAL * * This function changes sbal_curr, sbale_curr, sbal_number of fsf_req. */ static inline volatile struct qdio_buffer_element * zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) { volatile struct qdio_buffer_element *sbale; /* set last entry flag in current SBALE of current SBAL */ sbale = zfcp_qdio_sbale_curr(fsf_req); sbale->flags |= SBAL_FLAGS_LAST_ENTRY; /* don't exceed last allowed SBAL */ if (fsf_req->sbal_curr == fsf_req->sbal_last) return NULL; /* set chaining flag in first SBALE of current SBAL */ sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); sbale->flags |= SBAL_FLAGS0_MORE_SBALS; /* calculate index of next SBAL */ fsf_req->sbal_curr++; fsf_req->sbal_curr %= QDIO_MAX_BUFFERS_PER_Q; /* keep this requests number of SBALs up-to-date */ fsf_req->sbal_number++; /* start at first SBALE of new SBAL */ fsf_req->sbale_curr = 0; /* set storage-block type for new SBAL */ sbale = zfcp_qdio_sbale_curr(fsf_req); sbale->flags |= sbtype; return sbale; }
static struct qdio_buffer_element * zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req) { struct qdio_buffer_element *sbale; /* set last entry flag in current SBALE of current SBAL */ sbale = zfcp_qdio_sbale_curr(qdio, q_req); sbale->eflags |= SBAL_EFLAGS_LAST_ENTRY; /* don't exceed last allowed SBAL */ if (q_req->sbal_last == q_req->sbal_limit) return NULL; /* set chaining flag in first SBALE of current SBAL */ sbale = zfcp_qdio_sbale_req(qdio, q_req); sbale->sflags |= SBAL_SFLAGS0_MORE_SBALS; /* calculate index of next SBAL */ q_req->sbal_last++; q_req->sbal_last %= QDIO_MAX_BUFFERS_PER_Q; /* keep this requests number of SBALs up-to-date */ q_req->sbal_number++; BUG_ON(q_req->sbal_number > ZFCP_QDIO_MAX_SBALS_PER_REQ); /* start at first SBALE of new SBAL */ q_req->sbale_curr = 0; /* set storage-block type for new SBAL */ sbale = zfcp_qdio_sbale_curr(qdio, q_req); sbale->sflags |= q_req->sbtype; return sbale; }
/** * zfcp_qdio_sbals_from_sg - fill SBALs from scatter-gather list * @fsf_req: request to be processed * @sbtype: SBALE flags * @sg: scatter-gather list * @max_sbals: upper bound for number of SBALs to be used * Returns: number of bytes, or error (negativ) */ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req, unsigned long sbtype, struct scatterlist *sg, int max_sbals) { struct qdio_buffer_element *sbale; int retval, bytes = 0; /* figure out last allowed SBAL */ zfcp_qdio_sbal_limit(qdio, q_req, max_sbals); /* set storage-block type for this request */ sbale = zfcp_qdio_sbale_req(qdio, q_req); sbale->flags |= sbtype; for (; sg; sg = sg_next(sg)) { retval = zfcp_qdio_fill_sbals(qdio, q_req, sbtype, sg_virt(sg), sg->length); if (retval < 0) return retval; bytes += sg->length; } /* assume that no other SBALEs are to follow in the same SBAL */ sbale = zfcp_qdio_sbale_curr(qdio, q_req); sbale->flags |= SBAL_FLAGS_LAST_ENTRY; return bytes; }
static struct qdio_buffer_element * zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req) { if (q_req->sbale_curr == qdio->max_sbale_per_sbal - 1) return zfcp_qdio_sbal_chain(qdio, q_req); q_req->sbale_curr++; return zfcp_qdio_sbale_curr(qdio, q_req); }
static struct qdio_buffer_element * zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req) { if (q_req->sbale_curr == ZFCP_QDIO_LAST_SBALE_PER_SBAL) return zfcp_qdio_sbal_chain(qdio, q_req); q_req->sbale_curr++; return zfcp_qdio_sbale_curr(qdio, q_req); }
static struct qdio_buffer_element * zfcp_qdio_sbale_next(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) { if (fsf_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL) return zfcp_qdio_sbal_chain(fsf_req, sbtype); fsf_req->sbale_curr++; return zfcp_qdio_sbale_curr(fsf_req); }
static struct qdio_buffer_element * zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req, unsigned int sbtype) { if (q_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL) return zfcp_qdio_sbal_chain(qdio, q_req, sbtype); q_req->sbale_curr++; return zfcp_qdio_sbale_curr(qdio, q_req); }
/** * zfcp_qdio_sbale_fill - set address and lenght in current SBALE * on request_queue */ static inline void zfcp_qdio_sbale_fill(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, void *addr, int length) { volatile struct qdio_buffer_element *sbale; sbale = zfcp_qdio_sbale_curr(fsf_req); sbale->addr = addr; sbale->length = length; }
/** * zfcp_qdio_sbals_from_sg - fill SBALs from scatter-gather list * @fsf_req: request to be processed * @sbtype: SBALE flags * @sg: scatter-gather list * @sg_count: number of elements in scatter-gather list * @max_sbals: upper bound for number of SBALs to be used */ inline int zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, struct scatterlist *sg, int sg_count, int max_sbals) { int sg_index; struct scatterlist *sg_segment; int retval; volatile struct qdio_buffer_element *sbale; int bytes = 0; /* figure out last allowed SBAL */ zfcp_qdio_sbal_limit(fsf_req, max_sbals); /* set storage-block type for current SBAL */ sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); sbale->flags |= sbtype; /* process all segements of scatter-gather list */ for (sg_index = 0, sg_segment = sg, bytes = 0; sg_index < sg_count; sg_index++, sg_segment++) { retval = zfcp_qdio_sbals_from_segment( fsf_req, sbtype, zfcp_sg_to_address(sg_segment), sg_segment->length); if (retval < 0) { bytes = retval; goto out; } else bytes += retval; } /* assume that no other SBALEs are to follow in the same SBAL */ sbale = zfcp_qdio_sbale_curr(fsf_req); sbale->flags |= SBAL_FLAGS_LAST_ENTRY; out: #ifdef ZFCP_STAT_REQSIZES if (sbtype == SBAL_FLAGS0_TYPE_READ) { zfcp_statistics_inc(&zfcp_data.read_sguse_head, sg_count); zfcp_statistics_inc(&zfcp_data.read_req_head, bytes); } else { zfcp_statistics_inc(&zfcp_data.write_sguse_head, sg_count); zfcp_statistics_inc(&zfcp_data.write_req_head, bytes); } #endif return bytes; }
/** * zfcp_qdio_sbale_fill - set address and lenght in current SBALE * on request_queue */ static inline void zfcp_qdio_sbale_fill(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, void *addr, int length) { volatile struct qdio_buffer_element *sbale; sbale = zfcp_qdio_sbale_curr(fsf_req); sbale->addr = addr; sbale->length = length; #ifdef ZFCP_STAT_REQSIZES if (sbtype == SBAL_FLAGS0_TYPE_READ) zfcp_statistics_inc(&zfcp_data.read_sg_head, length); else zfcp_statistics_inc(&zfcp_data.write_sg_head, length); #endif }
/* process all segements of scatter-gather list */ for_each_sg(sgl, sg_segment, sg_count, sg_index) { retval = zfcp_qdio_sbals_from_segment( fsf_req, sbtype, zfcp_sg_to_address(sg_segment), sg_segment->length); if (retval < 0) { bytes = retval; goto out; } else bytes += retval; } /* assume that no other SBALEs are to follow in the same SBAL */ sbale = zfcp_qdio_sbale_curr(fsf_req); sbale->flags |= SBAL_FLAGS_LAST_ENTRY; out: return bytes; } /** * zfcp_qdio_sbals_from_scsicmnd - fill SBALs from scsi command * @fsf_req: request to be processed * @sbtype: SBALE flags * @scsi_cmnd: either scatter-gather list or buffer contained herein is used * to fill SBALs */ int zfcp_qdio_sbals_from_scsicmnd(struct zfcp_fsf_req *fsf_req,