void isert_free_connection(struct iscsi_conn *iscsi_conn) { struct isert_connection *isert_conn = container_of(iscsi_conn, struct isert_connection, iscsi); isert_post_drain(isert_conn); isert_conn_free(isert_conn); }
static void isert_conn_drained_do_work(struct work_struct *work) #endif { #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) struct isert_connection *isert_conn = ctx; #else struct isert_connection *isert_conn = container_of(work, struct isert_connection, drain_work); #endif isert_conn_free(isert_conn); }
static void isert_conn_closed_do_work(struct work_struct *work) #endif { #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) struct isert_connection *isert_conn = ctx; #else struct isert_connection *isert_conn = container_of(work, struct isert_connection, close_work); #endif if (!test_bit(ISERT_CONNECTION_ABORTED, &isert_conn->flags)) if (!test_and_set_bit(ISERT_DISCON_CALLED, &isert_conn->flags)) isert_connection_closed(&isert_conn->iscsi); isert_conn_free(isert_conn); }
static void isert_conn_drained_do_work(struct work_struct *work) #endif { #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) struct isert_connection *isert_conn = ctx; #else struct isert_connection *isert_conn = container_of(work, struct isert_connection, drain_work); #endif /* notify upper layer */ if (!test_bit(ISERT_CONNECTION_ABORTED, &isert_conn->flags)) isert_connection_closed(&isert_conn->iscsi); isert_conn_free(isert_conn); }
void isert_post_drain(struct isert_connection *isert_conn) { if (!test_and_set_bit(ISERT_DRAIN_POSTED, &isert_conn->flags)) { struct ib_send_wr *bad_wr; int err; isert_wr_set_fields(&isert_conn->drain_wr, isert_conn, NULL); isert_conn->drain_wr.wr_op = ISER_WR_SEND; isert_conn->drain_wr.send_wr.wr_id = _ptr_to_u64(&isert_conn->drain_wr); isert_conn->drain_wr.send_wr.opcode = IB_WR_SEND; err = ib_post_send(isert_conn->qp, &isert_conn->drain_wr.send_wr, &bad_wr); if (unlikely(err)) { pr_err("Failed to post drain wr, err:%d\n", err); /* We need to decrement iser_conn->kref in order to be able to cleanup * the connection */ set_bit(ISERT_DRAIN_FAILED, &isert_conn->flags); isert_conn_free(isert_conn); } } }
static int isert_cm_connect_handler(struct rdma_cm_id *cm_id, struct rdma_cm_event *event) { struct isert_connection *isert_conn = cm_id->qp->qp_context; int push_saved_pdu = 0; int ret; TRACE_ENTRY(); if (isert_conn->state == ISER_CONN_HANDSHAKE) isert_conn->state = ISER_CONN_ACTIVE; else if (isert_conn->state == ISER_CONN_ACTIVE) push_saved_pdu = 1; ret = isert_get_addr_size((struct sockaddr *)&isert_conn->peer_addr, &isert_conn->peer_addrsz); if (unlikely(ret)) goto out; kref_get(&isert_conn->kref); /* notify upper layer */ ret = isert_conn_established(&isert_conn->iscsi, (struct sockaddr *)&isert_conn->peer_addr, isert_conn->peer_addrsz); if (unlikely(ret)) { set_bit(ISERT_CONNECTION_ABORTED, &isert_conn->flags); isert_post_drain(isert_conn); isert_conn_free(isert_conn); goto out; } if (push_saved_pdu) { pr_info("iser push saved rx pdu\n"); isert_recv_completion_handler(isert_conn->saved_wr); isert_conn->saved_wr = NULL; } out: TRACE_EXIT_RES(ret); return ret; }
static int isert_cm_evt_handler(struct rdma_cm_id *cm_id, struct rdma_cm_event *cm_ev) { enum rdma_cm_event_type ev_type; struct isert_portal *portal; int err = -EINVAL; TRACE_ENTRY(); ev_type = cm_ev->event; portal = cm_id->context; pr_info("isert_cm_evt:%s(%d) status:%d portal:%p cm_id:%p\n", cm_event_type_str(ev_type), ev_type, cm_ev->status, portal, cm_id); if (portal->cm_id == cm_id) { err = isert_cm_evt_listener_handler(cm_id, cm_ev); goto out; } switch (ev_type) { case RDMA_CM_EVENT_CONNECT_REQUEST: err = isert_cm_conn_req_handler(cm_id, cm_ev); break; case RDMA_CM_EVENT_ESTABLISHED: err = isert_cm_connect_handler(cm_id, cm_ev); if (unlikely(err)) err = isert_handle_failure(cm_id->qp->qp_context); break; case RDMA_CM_EVENT_CONNECT_ERROR: case RDMA_CM_EVENT_REJECTED: case RDMA_CM_EVENT_ADDR_CHANGE: case RDMA_CM_EVENT_DISCONNECTED: err = isert_cm_disconnect_handler(cm_id, cm_ev); break; case RDMA_CM_EVENT_DEVICE_REMOVAL: isert_cm_disconnect_handler(cm_id, cm_ev); /* fallthrough */ case RDMA_CM_EVENT_TIMEWAIT_EXIT: err = isert_cm_timewait_exit_handler(cm_id, cm_ev); break; case RDMA_CM_EVENT_MULTICAST_JOIN: case RDMA_CM_EVENT_MULTICAST_ERROR: pr_err("UD-related event:%d, ignored\n", ev_type); break; case RDMA_CM_EVENT_ADDR_RESOLVED: case RDMA_CM_EVENT_ADDR_ERROR: case RDMA_CM_EVENT_ROUTE_RESOLVED: case RDMA_CM_EVENT_ROUTE_ERROR: case RDMA_CM_EVENT_CONNECT_RESPONSE: pr_err("Active side event:%d, ignored\n", ev_type); break; /* We can receive this instead of RDMA_CM_EVENT_ESTABLISHED */ case RDMA_CM_EVENT_UNREACHABLE: { struct isert_connection *isert_conn; isert_conn = cm_id->qp->qp_context; set_bit(ISERT_CONNECTION_ABORTED, &isert_conn->flags); /* * reaching here must be with the isert_conn refcount of 2, * one from the init and one from the connect request, * thus it is safe to deref directly before the sched_conn_closed */ isert_conn_free(isert_conn); isert_sched_conn_closed(isert_conn); err = 0; } break; default: pr_err("Illegal event:%d, ignored\n", ev_type); break; } if (unlikely(err)) pr_err("Failed to handle rdma cm evt:%d, err:%d\n", ev_type, err); out: TRACE_EXIT_RES(err); return err; }