static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result) { struct zfcp_adapter *adapter = act->adapter; struct zfcp_port *port = act->port; struct zfcp_unit *unit = act->unit; switch (act->action) { case ZFCP_ERP_ACTION_REOPEN_UNIT: if ((result == ZFCP_ERP_SUCCEEDED) && !unit->device) { zfcp_unit_get(unit); if (scsi_queue_work(unit->port->adapter->scsi_host, &unit->scsi_work) <= 0) zfcp_unit_put(unit); } zfcp_unit_put(unit); break; case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: case ZFCP_ERP_ACTION_REOPEN_PORT: if (result == ZFCP_ERP_SUCCEEDED) zfcp_scsi_schedule_rport_register(port); zfcp_port_put(port); break; case ZFCP_ERP_ACTION_REOPEN_ADAPTER: if (result == ZFCP_ERP_SUCCEEDED) { register_service_level(&adapter->service_level); schedule_work(&adapter->scan_work); } else unregister_service_level(&adapter->service_level); zfcp_adapter_put(adapter); break; } }
void iser_snd_completion(struct iser_desc *tx_desc) { struct iser_dto *dto = &tx_desc->dto; struct iser_conn *ib_conn = dto->ib_conn; struct iscsi_iser_conn *iser_conn = ib_conn->iser_conn; struct iscsi_conn *conn = iser_conn->iscsi_conn; struct iscsi_task *task; int resume_tx = 0; iser_dbg("Initiator, Data sent dto=0x%p\n", dto); iser_dto_buffs_release(dto); if (tx_desc->type == ISCSI_TX_DATAOUT) kmem_cache_free(ig.desc_cache, tx_desc); if (atomic_read(&iser_conn->ib_conn->post_send_buf_count) == ISER_QP_MAX_REQ_DTOS) resume_tx = 1; atomic_dec(&ib_conn->post_send_buf_count); if (resume_tx) { iser_dbg("%ld resuming tx\n",jiffies); scsi_queue_work(conn->session->host, &conn->xmitwork); } if (tx_desc->type == ISCSI_TX_CONTROL) { /* this arithmetic is legal by libiscsi dd_data allocation */ task = (void *) ((long)(void *)tx_desc - sizeof(struct iscsi_task)); if (task->hdr->itt == RESERVED_ITT) iscsi_put_task(task); } }
void cxgb3i_conn_tx_open(struct s3_conn *c3cn) { struct iscsi_conn *conn = c3cn->user_data; cxgb3i_tx_debug("cn 0x%p.\n", c3cn); if (conn) { cxgb3i_tx_debug("cn 0x%p, cid %d.\n", c3cn, conn->id); scsi_queue_work(conn->session->host, &conn->xmitwork); } }
/** * iscsi_write_space - Called when more output buffer space is available * @sk: socket space is available for **/ static void iscsi_sw_tcp_write_space(struct sock *sk) { struct iscsi_conn *conn = (struct iscsi_conn*)sk->sk_user_data; struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data; tcp_sw_conn->old_write_space(sk); debug_tcp("iscsi_write_space: cid %d\n", conn->id); scsi_queue_work(conn->session->host, &conn->xmitwork); }
void iser_snd_completion(struct iser_desc *tx_desc) { struct iser_dto *dto = &tx_desc->dto; struct iser_conn *ib_conn = dto->ib_conn; struct iscsi_iser_conn *iser_conn = ib_conn->iser_conn; struct iscsi_conn *conn = iser_conn->iscsi_conn; struct iscsi_mgmt_task *mtask; int resume_tx = 0; iser_dbg("Initiator, Data sent dto=0x%p\n", dto); iser_dto_buffs_release(dto); if (tx_desc->type == ISCSI_TX_DATAOUT) kmem_cache_free(ig.desc_cache, tx_desc); if (atomic_read(&iser_conn->ib_conn->post_send_buf_count) == ISER_QP_MAX_REQ_DTOS) resume_tx = 1; atomic_dec(&ib_conn->post_send_buf_count); if (resume_tx) { iser_dbg("%ld resuming tx\n",jiffies); scsi_queue_work(conn->session->host, &conn->xmitwork); } if (tx_desc->type == ISCSI_TX_CONTROL) { /* this arithmetic is legal by libiscsi dd_data allocation */ mtask = (void *) ((long)(void *)tx_desc - sizeof(struct iscsi_mgmt_task)); if (mtask->hdr->itt == cpu_to_be32(ISCSI_RESERVED_TAG)) { struct iscsi_session *session = conn->session; spin_lock(&conn->session->lock); list_del(&mtask->running); __kfifo_put(session->mgmtpool.queue, (void*)&mtask, sizeof(void*)); spin_unlock(&session->lock); } } }