/** * zfcp_qdio_send - set PCI flag in first SBALE and send req to QDIO * @fsf_req: pointer to struct zfcp_fsf_req * Returns: 0 on success, error otherwise */ int zfcp_qdio_send(struct zfcp_fsf_req *fsf_req) { struct zfcp_adapter *adapter = fsf_req->adapter; struct zfcp_qdio_queue *req_q = &adapter->req_q; int first = fsf_req->sbal_first; int count = fsf_req->sbal_number; int retval, pci, pci_batch; struct qdio_buffer_element *sbale; /* acknowledgements for transferred buffers */ pci_batch = adapter->req_q_pci_batch + count; if (unlikely(pci_batch >= ZFCP_QDIO_PCI_INTERVAL)) { pci_batch %= ZFCP_QDIO_PCI_INTERVAL; pci = first + count - (pci_batch + 1); pci %= QDIO_MAX_BUFFERS_PER_Q; sbale = zfcp_qdio_sbale(req_q, pci, 0); sbale->flags |= SBAL_FLAGS0_PCI; } retval = do_QDIO(adapter->ccw_device, QDIO_FLAG_SYNC_OUTPUT, 0, first, count); if (unlikely(retval)) { zfcp_qdio_zero_sbals(req_q->sbal, first, count); return retval; } /* account for transferred buffers */ atomic_sub(count, &req_q->count); req_q->first += count; req_q->first %= QDIO_MAX_BUFFERS_PER_Q; adapter->req_q_pci_batch = pci_batch; return 0; }
static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err, int queue_no, int first, int count, unsigned long parm) { struct zfcp_adapter *adapter = (struct zfcp_adapter *) parm; struct zfcp_qdio_queue *queue = &adapter->resp_q; struct qdio_buffer_element *sbale; int sbal_idx, sbale_idx, sbal_no; if (unlikely(qdio_err)) { zfcp_hba_dbf_event_qdio(adapter, qdio_err, first, count); zfcp_qdio_handler_error(adapter, 147); return; } /* * go through all SBALs from input queue currently * returned by QDIO layer */ for (sbal_no = 0; sbal_no < count; sbal_no++) { sbal_idx = (first + sbal_no) % QDIO_MAX_BUFFERS_PER_Q; /* go through all SBALEs of SBAL */ for (sbale_idx = 0; sbale_idx < QDIO_MAX_ELEMENTS_PER_BUFFER; sbale_idx++) { sbale = zfcp_qdio_sbale(queue, sbal_idx, sbale_idx); zfcp_qdio_reqid_check(adapter, (unsigned long) sbale->addr, sbal_idx); if (likely(sbale->flags & SBAL_FLAGS_LAST_ENTRY)) break; }; if (unlikely(!(sbale->flags & SBAL_FLAGS_LAST_ENTRY))) dev_warn(&adapter->ccw_device->dev, "A QDIO protocol error occurred, " "operations continue\n"); } /* * put range of SBALs back to response queue * (including SBALs which have already been free before) */ zfcp_qdio_resp_put_back(adapter, count); }
/** * zfcp_qdio_sbale_curr - return curr SBALE on req_q for a struct zfcp_fsf_req * @fsf_req: pointer to struct fsf_req * Returns: pointer to qdio_buffer_element (SBALE) structure */ struct qdio_buffer_element *zfcp_qdio_sbale_curr(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req) { return zfcp_qdio_sbale(&qdio->req_q, q_req->sbal_last, q_req->sbale_curr); }
/** * zfcp_qdio_sbale_curr - return curr SBALE on req_q for a struct zfcp_fsf_req * @fsf_req: pointer to struct fsf_req * Returns: pointer to qdio_buffer_element (SBALE) structure */ struct qdio_buffer_element *zfcp_qdio_sbale_curr(struct zfcp_fsf_req *req) { return zfcp_qdio_sbale(&req->adapter->req_q, req->sbal_last, req->sbale_curr); }