static void isert_kref_free(struct kref *kref) { struct isert_connection *isert_conn = container_of(kref, struct isert_connection, kref); struct isert_device *isert_dev = isert_conn->isert_dev; struct isert_cq *cq = isert_conn->qp->recv_cq->cq_context; TRACE_ENTRY(); pr_info("isert_conn_free conn:%p\n", isert_conn); isert_free_conn_resources(isert_conn); isert_conn_qp_destroy(isert_conn); mutex_lock(&dev_list_mutex); isert_dev->cq_qps[cq->idx]--; list_del(&isert_conn->portal_node); isert_deref_device(isert_dev); if (unlikely(isert_conn->portal->state == ISERT_PORTAL_INACTIVE)) isert_portal_free(isert_conn->portal); mutex_unlock(&dev_list_mutex); rdma_destroy_id(isert_conn->cm_id); isert_conn_kfree(isert_conn); module_put(THIS_MODULE); TRACE_EXIT(); }
int isert_alloc_conn_resources(struct isert_connection *isert_conn) { struct isert_cmnd *pdu, *prev_pdu = NULL, *first_pdu = NULL; int t_datasz = 512; /* RFC states that minimum receive data size is 512 */ int i_datasz = ISER_HDRS_SZ + SCST_SENSE_BUFFERSIZE; int i, err = 0; int to_alloc; TRACE_ENTRY(); isert_conn->repost_threshold = 32; to_alloc = isert_conn->queue_depth * 2 + isert_conn->repost_threshold; if (unlikely(to_alloc > ISER_MAX_WCE)) { pr_err("QueuedCommands larger than %d not supported\n", (ISER_MAX_WCE - isert_conn->repost_threshold) / 2); err = -EINVAL; goto out; } for (i = 0; i < to_alloc; i++) { pdu = isert_rx_pdu_alloc(isert_conn, t_datasz); if (unlikely(!pdu)) { err = -ENOMEM; goto clean_pdus; } if (unlikely(first_pdu == NULL)) first_pdu = pdu; else isert_link_recv_pdu_wrs(prev_pdu, pdu); prev_pdu = pdu; pdu = isert_tx_pdu_alloc(isert_conn, i_datasz); if (unlikely(!pdu)) { err = -ENOMEM; goto clean_pdus; } } err = isert_post_recv(isert_conn, &first_pdu->wr[0], to_alloc); if (unlikely(err)) { pr_err("Failed to post recv err:%d\n", err); goto clean_pdus; } out: TRACE_EXIT_RES(err); return err; clean_pdus: isert_free_conn_resources(isert_conn); goto out; }