/** * zfcp_close_qdio - close qdio queues for an adapter * @qdio: pointer to structure zfcp_qdio */ void zfcp_qdio_close(struct zfcp_qdio *qdio) { struct zfcp_adapter *adapter = qdio->adapter; int idx, count; if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) return; /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */ spin_lock_irq(&qdio->req_q_lock); atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status); spin_unlock_irq(&qdio->req_q_lock); wake_up(&qdio->req_q_wq); qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR); /* cleanup used outbound sbals */ count = atomic_read(&qdio->req_q_free); if (count < QDIO_MAX_BUFFERS_PER_Q) { idx = (qdio->req_q_idx + count) % QDIO_MAX_BUFFERS_PER_Q; count = QDIO_MAX_BUFFERS_PER_Q - count; zfcp_qdio_zero_sbals(qdio->req_q, idx, count); } qdio->req_q_idx = 0; atomic_set(&qdio->req_q_free, 0); }
/** * zfcp_qdio_sbals_from_sg - fill SBALs from scatter-gather list * @qdio: pointer to struct zfcp_qdio * @q_req: pointer to struct zfcp_qdio_req * @sg: scatter-gather list * @max_sbals: upper bound for number of SBALs to be used * Returns: number of bytes, or error (negativ) */ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req, struct scatterlist *sg) { struct qdio_buffer_element *sbale; int bytes = 0; /* set storage-block type for this request */ sbale = zfcp_qdio_sbale_req(qdio, q_req); sbale->sflags |= q_req->sbtype; for (; sg; sg = sg_next(sg)) { sbale = zfcp_qdio_sbale_next(qdio, q_req); if (!sbale) { atomic_inc(&qdio->req_q_full); zfcp_qdio_zero_sbals(qdio->req_q, q_req->sbal_first, q_req->sbal_number); return -EINVAL; } sbale->addr = sg_virt(sg); sbale->length = sg->length; bytes += sg->length; } return bytes; }
/** * zfcp_qdio_send - set PCI flag in first SBALE and send req to QDIO * @qdio: pointer to struct zfcp_qdio * @q_req: pointer to struct zfcp_qdio_req * Returns: 0 on success, error otherwise */ int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req) { int retval; u8 sbal_number = q_req->sbal_number; spin_lock(&qdio->stat_lock); zfcp_qdio_account(qdio); spin_unlock(&qdio->stat_lock); retval = do_QDIO(qdio->adapter->ccw_device, QDIO_FLAG_SYNC_OUTPUT, 0, q_req->sbal_first, sbal_number); if (unlikely(retval)) { zfcp_qdio_zero_sbals(qdio->req_q, q_req->sbal_first, sbal_number); return retval; } /* account for transferred buffers */ atomic_sub(sbal_number, &qdio->req_q_free); qdio->req_q_idx += sbal_number; qdio->req_q_idx %= QDIO_MAX_BUFFERS_PER_Q; return 0; }
/** * zfcp_close_qdio - close qdio queues for an adapter * @qdio: pointer to structure zfcp_qdio */ void zfcp_qdio_close(struct zfcp_qdio *qdio) { struct zfcp_qdio_queue *req_q; int first, count; if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) return; /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */ req_q = &qdio->req_q; spin_lock_bh(&qdio->req_q_lock); atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status); spin_unlock_bh(&qdio->req_q_lock); wake_up(&qdio->req_q_wq); qdio_shutdown(qdio->adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR); /* cleanup used outbound sbals */ count = atomic_read(&req_q->count); if (count < QDIO_MAX_BUFFERS_PER_Q) { first = (req_q->first + count) % QDIO_MAX_BUFFERS_PER_Q; count = QDIO_MAX_BUFFERS_PER_Q - count; zfcp_qdio_zero_sbals(req_q->sbal, first, count); } req_q->first = 0; atomic_set(&req_q->count, 0); qdio->resp_q.first = 0; atomic_set(&qdio->resp_q.count, 0); }
/** * zfcp_qdio_send - set PCI flag in first SBALE and send req to QDIO * @fsf_req: pointer to struct zfcp_fsf_req * Returns: 0 on success, error otherwise */ int zfcp_qdio_send(struct zfcp_fsf_req *fsf_req) { struct zfcp_adapter *adapter = fsf_req->adapter; struct zfcp_qdio_queue *req_q = &adapter->req_q; int first = fsf_req->sbal_first; int count = fsf_req->sbal_number; int retval, pci, pci_batch; struct qdio_buffer_element *sbale; /* acknowledgements for transferred buffers */ pci_batch = adapter->req_q_pci_batch + count; if (unlikely(pci_batch >= ZFCP_QDIO_PCI_INTERVAL)) { pci_batch %= ZFCP_QDIO_PCI_INTERVAL; pci = first + count - (pci_batch + 1); pci %= QDIO_MAX_BUFFERS_PER_Q; sbale = zfcp_qdio_sbale(req_q, pci, 0); sbale->flags |= SBAL_FLAGS0_PCI; } retval = do_QDIO(adapter->ccw_device, QDIO_FLAG_SYNC_OUTPUT, 0, first, count); if (unlikely(retval)) { zfcp_qdio_zero_sbals(req_q->sbal, first, count); return retval; } /* account for transferred buffers */ atomic_sub(count, &req_q->count); req_q->first += count; req_q->first %= QDIO_MAX_BUFFERS_PER_Q; adapter->req_q_pci_batch = pci_batch; return 0; }
static void zfcp_qdio_undo_sbals(struct zfcp_fsf_req *fsf_req) { struct qdio_buffer **sbal = fsf_req->adapter->req_q.sbal; int first = fsf_req->sbal_first; int last = fsf_req->sbal_last; int count = (last - first + QDIO_MAX_BUFFERS_PER_Q) % QDIO_MAX_BUFFERS_PER_Q + 1; zfcp_qdio_zero_sbals(sbal, first, count); }
static void zfcp_qdio_undo_sbals(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req) { struct qdio_buffer **sbal = qdio->req_q.sbal; int first = q_req->sbal_first; int last = q_req->sbal_last; int count = (last - first + QDIO_MAX_BUFFERS_PER_Q) % QDIO_MAX_BUFFERS_PER_Q + 1; zfcp_qdio_zero_sbals(sbal, first, count); }
static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err, int queue_no, int first, int count, unsigned long parm) { struct zfcp_adapter *adapter = (struct zfcp_adapter *) parm; struct zfcp_qdio_queue *queue = &adapter->req_q; if (unlikely(qdio_err)) { zfcp_hba_dbf_event_qdio(adapter, qdio_err, first, count); zfcp_qdio_handler_error(adapter, 140); return; } /* cleanup all SBALs being program-owned now */ zfcp_qdio_zero_sbals(queue->sbal, first, count); atomic_add(count, &queue->count); wake_up(&adapter->request_wq); }
static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err, int queue_no, int idx, int count, unsigned long parm) { struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm; if (unlikely(qdio_err)) { zfcp_qdio_handler_error(qdio, "qdireq1", qdio_err); return; } /* cleanup all SBALs being program-owned now */ zfcp_qdio_zero_sbals(qdio->req_q, idx, count); spin_lock_irq(&qdio->stat_lock); zfcp_qdio_account(qdio); spin_unlock_irq(&qdio->stat_lock); atomic_add(count, &qdio->req_q_free); wake_up(&qdio->req_q_wq); }
/* * function: zfcp_qdio_request_handler * * purpose: is called by QDIO layer for completed SBALs in request queue * * returns: (void) */ static void zfcp_qdio_request_handler(struct ccw_device *ccw_device, unsigned int status, unsigned int qdio_error, unsigned int siga_error, unsigned int queue_number, int first_element, int elements_processed, unsigned long int_parm) { struct zfcp_adapter *adapter; struct zfcp_qdio_queue *queue; adapter = (struct zfcp_adapter *) int_parm; queue = &adapter->request_queue; ZFCP_LOG_DEBUG("adapter %s, first=%d, elements_processed=%d\n", zfcp_get_busid_by_adapter(adapter), first_element, elements_processed); if (unlikely(zfcp_qdio_handler_error_check(adapter, status, qdio_error, siga_error, first_element, elements_processed))) goto out; /* * we stored address of struct zfcp_adapter data structure * associated with irq in int_parm */ /* cleanup all SBALs being program-owned now */ zfcp_qdio_zero_sbals(queue->buffer, first_element, elements_processed); /* increase free space in outbound queue */ atomic_add(elements_processed, &queue->free_count); ZFCP_LOG_DEBUG("free_count=%d\n", atomic_read(&queue->free_count)); wake_up(&adapter->request_wq); ZFCP_LOG_DEBUG("elements_processed=%d, free count=%d\n", elements_processed, atomic_read(&queue->free_count)); out: return; }
static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err, int queue_no, int first, int count, unsigned long parm) { struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm; struct zfcp_qdio_queue *queue = &qdio->req_q; if (unlikely(qdio_err)) { zfcp_dbf_hba_qdio(qdio->adapter->dbf, qdio_err, first, count); zfcp_qdio_handler_error(qdio, "qdireq1", qdio_err); return; } /* cleanup all SBALs being program-owned now */ zfcp_qdio_zero_sbals(queue->sbal, first, count); zfcp_qdio_account(qdio); atomic_add(count, &queue->count); wake_up(&qdio->req_q_wq); }
/** * zfcp_qdio_send - set PCI flag in first SBALE and send req to QDIO * @qdio: pointer to struct zfcp_qdio * @q_req: pointer to struct zfcp_queue_req * Returns: 0 on success, error otherwise */ int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req) { struct zfcp_qdio_queue *req_q = &qdio->req_q; int first = q_req->sbal_first; int count = q_req->sbal_number; int retval; unsigned int qdio_flags = QDIO_FLAG_SYNC_OUTPUT; zfcp_qdio_account(qdio); retval = do_QDIO(qdio->adapter->ccw_device, qdio_flags, 0, first, count); if (unlikely(retval)) { zfcp_qdio_zero_sbals(req_q->sbal, first, count); return retval; } /* account for transferred buffers */ atomic_sub(count, &req_q->count); req_q->first += count; req_q->first %= QDIO_MAX_BUFFERS_PER_Q; return 0; }