static int modisk_perf_exec(struct scst_cmd *cmd) { int res = SCST_EXEC_NOT_COMPLETED; int opcode = cmd->cdb[0]; TRACE_ENTRY(); cmd->status = 0; cmd->msg_status = 0; cmd->host_status = DID_OK; cmd->driver_status = 0; switch (opcode) { case WRITE_6: case WRITE_10: case WRITE_12: case WRITE_16: case READ_6: case READ_10: case READ_12: case READ_16: cmd->completed = 1; goto out_done; } out: TRACE_EXIT_RES(res); return res; out_done: res = SCST_EXEC_COMPLETED; cmd->scst_cmd_done(cmd, SCST_CMD_STATE_DEFAULT, SCST_CONTEXT_SAME); goto out; }
int isert_pdu_sent(struct iscsi_cmnd *pdu) { struct iscsi_conn *conn = pdu->conn; int res = 0; TRACE_ENTRY(); if (unlikely(pdu->should_close_conn)) { if (pdu->should_close_all_conn) { struct iscsi_target *target = pdu->conn->session->target; PRINT_INFO("Closing all connections for target %x at " "initiator's %s request", target->tid, conn->session->initiator_name); mutex_lock(&target->target_mutex); target_del_all_sess(target, 0); mutex_unlock(&target->target_mutex); } else { PRINT_INFO("Closing connection %p at initiator's %s " "request", conn, conn->session->initiator_name); mark_conn_closed(conn); } } /* we may get NULL parent req for login response */ if (likely(pdu->parent_req)) { rsp_cmnd_release(pdu); conn_put(conn); } TRACE_EXIT_RES(res); return res; }
static int __init init_this_scst_driver(void) { int res; TRACE_ENTRY(); res = scst_register_target_template(&driver_target_template); TRACE_DBG("scst_register_target_template() returned %d", res); if (res < 0) goto out; #ifdef SCST_REGISTER_INITIATOR_DRIVER driver_template.module = THIS_MODULE; scsi_register_module(MODULE_SCSI_HA, &driver_template); TRACE_DBG("driver_template.present=%d", driver_template.present); if (driver_template.present == 0) { res = -ENODEV; MOD_DEC_USE_COUNT; goto out; } #endif out: TRACE_EXIT_RES(res); return res; }
int isert_connection_closed(struct iscsi_conn *iscsi_conn) { int res = 0; TRACE_ENTRY(); if (iscsi_conn->rd_state) { res = isert_handle_close_connection(iscsi_conn); } else { struct isert_conn_dev *dev = isert_get_priv(iscsi_conn); if (dev) { isert_del_timer(dev); dev->state = CS_DISCONNECTED; if (dev->login_req) { res = isert_task_abort(dev->login_req); spin_lock(&dev->pdu_lock); dev->login_req = NULL; spin_unlock(&dev->pdu_lock); } wake_up(&dev->waitqueue); isert_dev_release(dev); } isert_free_connection(iscsi_conn); } TRACE_EXIT_RES(res); return res; }
static int isert_open(struct inode *inode, struct file *filp) { struct isert_conn_dev *dev; /* device information */ int res = 0; TRACE_ENTRY(); dev = container_of(inode->i_cdev, struct isert_conn_dev, cdev); spin_lock(&isert_listen_dev.conn_lock); if (unlikely(dev->occupied == 0)) { spin_unlock(&isert_listen_dev.conn_lock); res = -ENODEV; /* already closed */ goto out; } spin_unlock(&isert_listen_dev.conn_lock); if (unlikely(!atomic_dec_and_test(&dev->available))) { atomic_inc(&dev->available); res = -EBUSY; /* already open */ goto out; } spin_lock(&isert_listen_dev.conn_lock); kref_get(&dev->kref); spin_unlock(&isert_listen_dev.conn_lock); filp->private_data = dev; /* for other methods */ out: TRACE_EXIT_RES(res); return res; }
int isert_pdu_send(struct isert_connection *isert_conn, struct isert_cmnd *tx_pdu) { int err; struct isert_wr *wr; TRACE_ENTRY(); #ifdef CONFIG_SCST_EXTRACHECKS EXTRACHECKS_BUG_ON(!isert_conn); EXTRACHECKS_BUG_ON(!tx_pdu); #endif wr = &tx_pdu->wr[0]; wr->send_wr.num_sge = isert_pdu_prepare_send(isert_conn, tx_pdu); err = isert_post_send(isert_conn, wr, 1); if (unlikely(err)) { pr_err("Failed to send pdu conn:%p pdu:%p err:%d\n", isert_conn, tx_pdu, err); } TRACE_EXIT_RES(err); return err; }
static int add_new_connection(struct isert_listener_dev *dev, struct iscsi_conn *conn) { struct isert_conn_dev *conn_dev = get_available_dev(dev, conn); int res = 0; TRACE_ENTRY(); if (!conn_dev) { PRINT_WARNING("%s", "Unable to allocate new connection"); res = -ENOSPC; goto out; } #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) INIT_WORK(&conn->close_work, isert_close_conn_fn, conn); #else INIT_WORK(&conn->close_work, isert_close_conn_fn); #endif init_timer(&conn_dev->tmo_timer); conn_dev->tmo_timer.function = isert_conn_timer_fn; conn_dev->tmo_timer.expires = jiffies + 60 * HZ; conn_dev->tmo_timer.data = (unsigned long)conn_dev; add_timer(&conn_dev->tmo_timer); conn_dev->timer_active = 1; wake_up(&dev->waitqueue); out: TRACE_EXIT_RES(res); return res; }
int isert_rx_pdu_done(struct isert_cmnd *pdu) { int err; struct isert_connection *isert_conn = (struct isert_connection *)pdu->iscsi.conn; TRACE_ENTRY(); err = isert_reinit_rx_pdu(pdu); if (unlikely(err)) goto out; spin_lock(&isert_conn->post_recv_lock); if (unlikely(isert_conn->to_post_recv == 0)) isert_conn->post_recv_first = &pdu->wr[0]; else isert_link_recv_wrs(isert_conn->post_recv_curr, &pdu->wr[0]); isert_conn->post_recv_curr = &pdu->wr[0]; if (++isert_conn->to_post_recv > isert_conn->repost_threshold) { err = isert_post_recv(isert_conn, isert_conn->post_recv_first, isert_conn->to_post_recv); isert_conn->to_post_recv = 0; } spin_unlock(&isert_conn->post_recv_lock); out: TRACE_EXIT_RES(err); return err; }
/* Returns 0, if the pid is still running, >0 if it was exited or <0 error code */ int wait_until_finished(pid_t pid, unsigned long deadline, int *status, int child) { int res; time_t start, end; double elapsed; TRACE_ENTRY(); time(&start); do { res = waitpid(pid, status, WNOHANG); if (res != 0) { if (res < 0) { res = -errno; PRINT_ERROR("Waitpid for pid %d (child %d) " "failed: %d (%s)", pid, child, errno, strerror(errno)); } break; } sleep(0.1); time(&end); elapsed = difftime(end, start); } while (elapsed < deadline); TRACE_EXIT_RES(res); return res; }
static int isert_conn_qp_create(struct isert_connection *isert_conn) { struct rdma_cm_id *cm_id = isert_conn->cm_id; struct isert_device *isert_dev = isert_conn->isert_dev; struct ib_qp_init_attr qp_attr; int err; int cq_idx; int max_wr = ISER_MAX_WCE; TRACE_ENTRY(); cq_idx = isert_get_cq_idx(isert_dev); memset(&qp_attr, 0, sizeof(qp_attr)); qp_attr.event_handler = isert_async_evt_handler; qp_attr.qp_context = isert_conn; qp_attr.send_cq = isert_dev->cq_desc[cq_idx].cq; qp_attr.recv_cq = isert_dev->cq_desc[cq_idx].cq; isert_conn->cq_desc = &isert_dev->cq_desc[cq_idx]; qp_attr.cap.max_send_sge = isert_conn->max_sge; qp_attr.cap.max_recv_sge = 3; qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR; qp_attr.qp_type = IB_QPT_RC; do { if (max_wr < ISER_MIN_SQ_SIZE) { pr_err("Failed to create qp, not enough memory\n"); goto fail_create_qp; } qp_attr.cap.max_send_wr = max_wr; qp_attr.cap.max_recv_wr = max_wr; err = rdma_create_qp(cm_id, isert_dev->pd, &qp_attr); if (err && err != -ENOMEM) { pr_err("Failed to create qp, err:%d\n", err); goto fail_create_qp; } max_wr /= 2; } while (err == -ENOMEM); isert_conn->qp = cm_id->qp; pr_info("iser created cm_id:%p qp:0x%X\n", cm_id, cm_id->qp->qp_num); out: TRACE_EXIT_RES(err); return err; fail_create_qp: mutex_lock(&dev_list_mutex); isert_dev->cq_qps[cq_idx]--; mutex_unlock(&dev_list_mutex); goto out; }
int isert_prepare_rdma(struct isert_cmnd *isert_pdu, struct isert_connection *isert_conn, enum isert_wr_op op) { struct isert_buf *isert_buf = &isert_pdu->rdma_buf; struct isert_device *isert_dev = isert_conn->isert_dev; struct ib_device *ib_dev = isert_dev->ib_dev; int err; int buff_offset; int sg_offset, sg_cnt; int wr_cnt, i; isert_buf_init_sg(isert_buf, isert_pdu->iscsi.sg, isert_pdu->iscsi.sg_cnt, isert_pdu->iscsi.bufflen); if (op == ISER_WR_RDMA_WRITE) isert_buf->dma_dir = DMA_TO_DEVICE; else isert_buf->dma_dir = DMA_FROM_DEVICE; if (unlikely(isert_buf->sg_cnt > isert_pdu->n_sge)) { wr_cnt = isert_alloc_for_rdma(isert_pdu, isert_buf->sg_cnt, isert_conn); if (unlikely(wr_cnt)) goto out; } err = ib_dma_map_sg(ib_dev, isert_buf->sg, isert_buf->sg_cnt, isert_buf->dma_dir); if (unlikely(!err)) { pr_err("Failed to DMA map iser sg:%p len:%d\n", isert_buf->sg, isert_buf->sg_cnt); wr_cnt = -EFAULT; goto out; } buff_offset = 0; sg_cnt = 0; for (wr_cnt = 0, sg_offset = 0; sg_offset < isert_buf->sg_cnt; ++wr_cnt) { sg_cnt = min((int)isert_conn->max_sge, isert_buf->sg_cnt - sg_offset); err = isert_wr_init(&isert_pdu->wr[wr_cnt], op, isert_buf, isert_conn, isert_pdu, isert_pdu->sg_pool, sg_offset, sg_cnt, buff_offset); if (unlikely(err < 0)) { wr_cnt = err; goto out; } buff_offset = err; sg_offset += sg_cnt; } for (i = 1; i < wr_cnt; ++i) isert_link_send_wrs(&isert_pdu->wr[i - 1], &isert_pdu->wr[i]); out: TRACE_EXIT_RES(wr_cnt); return wr_cnt; }
int isert_portal_listen(struct isert_portal *portal, struct sockaddr *sa, size_t addr_len) { int err; TRACE_ENTRY(); err = rdma_bind_addr(portal->cm_id, sa); if (err) { pr_warn("Failed to bind rdma addr, err:%d\n", err); goto out; } err = rdma_listen(portal->cm_id, ISER_LISTEN_BACKLOG); if (err) { pr_err("Failed rdma listen, err:%d\n", err); goto out; } memcpy(&portal->addr, sa, addr_len); switch (sa->sa_family) { case AF_INET: #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33) pr_info("iser portal cm_id:%p listens on: " NIPQUAD_FMT ":%d\n", portal->cm_id, NIPQUAD(((struct sockaddr_in *)sa)->sin_addr.s_addr), (int)ntohs(((struct sockaddr_in *)sa)->sin_port)); #else pr_info("iser portal cm_id:%p listens on: " "%pI4:%d\n", portal->cm_id, &((struct sockaddr_in *)sa)->sin_addr.s_addr, (int)ntohs(((struct sockaddr_in *)sa)->sin_port)); #endif break; case AF_INET6: #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29) pr_info("iser portal cm_id:%p listens on: " NIP6_FMT " %d\n", portal->cm_id, NIP6(((struct sockaddr_in6 *)sa)->sin6_addr), (int)ntohs(((struct sockaddr_in6 *)sa)->sin6_port)); #else pr_info("iser portal cm_id:%p listens on: " "%pI6 %d\n", portal->cm_id, &((struct sockaddr_in6 *)sa)->sin6_addr, (int)ntohs(((struct sockaddr_in6 *)sa)->sin6_port)); #endif break; default: pr_err("Unknown address family\n"); err = -EINVAL; goto out; } out: TRACE_EXIT_RES(err); return err; }
int isert_login_req_rx(struct iscsi_cmnd *login_req) { struct isert_conn_dev *dev = isert_get_priv(login_req->conn); int res = 0; TRACE_ENTRY(); if (!dev) { PRINT_ERROR("Received PDU %p on invalid connection", login_req); res = -EINVAL; goto out; } switch (dev->state) { case CS_INIT: case CS_RSP_FINISHED: if (unlikely(dev->login_req != NULL)) { sBUG(); res = -EINVAL; goto out; } break; case CS_REQ_BHS: /* Got login request before done handling old one */ break; case CS_REQ_DATA: case CS_REQ_FINISHED: case CS_RSP_BHS: case CS_RSP_DATA: PRINT_WARNING("Received login PDU while handling previous one. State:%d", dev->state); res = -EINVAL; goto out; default: sBUG(); res = -EINVAL; goto out; } spin_lock(&dev->pdu_lock); dev->login_req = login_req; dev->read_len = sizeof(login_req->pdu.bhs); dev->read_buf = (char *)&login_req->pdu.bhs; dev->state = CS_REQ_BHS; spin_unlock(&dev->pdu_lock); wake_up(&dev->waitqueue); out: TRACE_EXIT_RES(res); return res; }
int isert_alloc_conn_resources(struct isert_connection *isert_conn) { struct isert_cmnd *pdu, *prev_pdu = NULL, *first_pdu = NULL; int t_datasz = 512; /* RFC states that minimum receive data size is 512 */ int i_datasz = ISER_HDRS_SZ + SCST_SENSE_BUFFERSIZE; int i, err = 0; int to_alloc; TRACE_ENTRY(); isert_conn->repost_threshold = 32; to_alloc = isert_conn->queue_depth * 2 + isert_conn->repost_threshold; if (unlikely(to_alloc > ISER_MAX_WCE)) { pr_err("QueuedCommands larger than %d not supported\n", (ISER_MAX_WCE - isert_conn->repost_threshold) / 2); err = -EINVAL; goto out; } for (i = 0; i < to_alloc; i++) { pdu = isert_rx_pdu_alloc(isert_conn, t_datasz); if (unlikely(!pdu)) { err = -ENOMEM; goto clean_pdus; } if (unlikely(first_pdu == NULL)) first_pdu = pdu; else isert_link_recv_pdu_wrs(prev_pdu, pdu); prev_pdu = pdu; pdu = isert_tx_pdu_alloc(isert_conn, i_datasz); if (unlikely(!pdu)) { err = -ENOMEM; goto clean_pdus; } } err = isert_post_recv(isert_conn, &first_pdu->wr[0], to_alloc); if (unlikely(err)) { pr_err("Failed to post recv err:%d\n", err); goto clean_pdus; } out: TRACE_EXIT_RES(err); return err; clean_pdus: isert_free_conn_resources(isert_conn); goto out; }
static int cdrom_done(struct scst_cmd *cmd) { int res = SCST_CMD_STATE_DEFAULT; TRACE_ENTRY(); res = scst_block_generic_dev_done(cmd, cdrom_set_block_shift); TRACE_EXIT_RES(res); return res; }
static int modisk_done(struct scst_cmd *cmd) { int res; TRACE_ENTRY(); res = scst_block_generic_dev_done(cmd, modisk_set_block_shift); TRACE_EXIT_RES(res); return res; }
static long isert_listen_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct isert_listener_dev *dev = filp->private_data; int res = 0, rc; void __user *ptr = (void __user *)arg; void *portal; TRACE_ENTRY(); switch (cmd) { case SET_LISTEN_ADDR: rc = copy_from_user(&dev->info, ptr, sizeof(dev->info)); if (unlikely(rc != 0)) { PRINT_ERROR("Failed to copy %d user's bytes\n", rc); res = -EFAULT; goto out; } if (unlikely(dev->free_portal_idx >= ISERT_MAX_PORTALS)) { PRINT_ERROR("Maximum number of portals exceeded: %d\n", ISERT_MAX_PORTALS); res = -EINVAL; goto out; } if (unlikely(dev->info.addr_len > sizeof(dev->info.addr))) { PRINT_ERROR("Invalid address length %zd > %zd", dev->info.addr_len, sizeof(dev->info.addr)); res = -EINVAL; goto out; } portal = isert_portal_add((struct sockaddr *)&dev->info.addr, dev->info.addr_len); if (IS_ERR(portal)) { PRINT_ERROR("Unable to add portal of size %zu\n", dev->info.addr_len); res = PTR_ERR(portal); goto out; } dev->portal_h[dev->free_portal_idx++] = portal; break; default: PRINT_ERROR("Invalid ioctl cmd %x", cmd); res = -EINVAL; } out: TRACE_EXIT_RES(res); return res; }
static int isert_send_locally(struct iscsi_cmnd *req, unsigned int cmd_count) { int res = 0; TRACE_ENTRY(); req_cmnd_pre_release(req); res = isert_process_all_writes(req->conn); cmnd_put(req); TRACE_EXIT_RES(res); return res; }
int isert_data_out_ready(struct iscsi_cmnd *cmnd) { int res = 0; TRACE_ENTRY(); #ifdef CONFIG_SCST_EXTRACHECKS cmnd->conn->rd_task = current; #endif cmnd_rx_end(cmnd); TRACE_EXIT_RES(res); return res; }
int __init isert_init_login_devs(unsigned int ndevs) { int res; unsigned int i; TRACE_ENTRY(); n_devs = ndevs; res = alloc_chrdev_region(&devno, 0, n_devs, "isert_scst"); isert_major = MAJOR(devno); if (unlikely(res < 0)) { PRINT_ERROR("isert: can't get major %d\n", isert_major); goto out; } /* * allocate the devices -- we can't have them static, as the number * can be specified at load time */ isert_conn_devices = kzalloc(n_devs * sizeof(struct isert_conn_dev), GFP_KERNEL); if (unlikely(!isert_conn_devices)) { res = -ENOMEM; goto fail; /* Make this more graceful */ } isert_class = class_create(THIS_MODULE, "isert_scst"); isert_setup_listener_cdev(&isert_listen_dev); /* Initialize each device. */ for (i = 0; i < n_devs; i++) isert_setup_cdev(&isert_conn_devices[i], i); res = isert_datamover_init(); if (unlikely(res)) { PRINT_ERROR("Unable to initialize datamover: %d\n", res); goto fail; } out: TRACE_EXIT_RES(res); return res; fail: isert_cleanup_login_devs(); goto out; }
int isert_pdu_post_rdma_read(struct isert_connection *isert_conn, struct isert_cmnd *isert_cmd, int wr_cnt) { int err; TRACE_ENTRY(); err = isert_post_send(isert_conn, &isert_cmd->wr[0], wr_cnt); if (unlikely(err)) { pr_err("Failed to send pdu conn:%p pdu:%p err:%d\n", isert_conn, isert_cmd, err); } TRACE_EXIT_RES(err); return err; }
static int isert_process_all_writes(struct iscsi_conn *conn) { struct iscsi_cmnd *cmnd; int res = 0; TRACE_ENTRY(); while ((cmnd = iscsi_get_send_cmnd(conn)) != NULL) { isert_update_len_sn(cmnd); conn_get(conn); isert_pdu_tx(cmnd); } TRACE_EXIT_RES(res); return res; }
static ssize_t isert_listen_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos) { struct isert_listener_dev *dev = filp->private_data; struct isert_conn_dev *conn_dev; int res = 0; char k_buff[sizeof("/dev/") + sizeof(ISER_CONN_DEV_PREFIX) + 3 + 1]; size_t to_write; TRACE_ENTRY(); if (!have_new_connection(dev)) { wait_for_connection: if (filp->f_flags & O_NONBLOCK) return -EAGAIN; res = wait_event_freezable(dev->waitqueue, !have_new_connection(dev)); if (res < 0) goto out; } spin_lock(&dev->conn_lock); if (list_empty(&dev->new_conn_list)) { /* could happen if we got disconnect */ spin_unlock(&dev->conn_lock); goto wait_for_connection; } conn_dev = list_first_entry(&dev->new_conn_list, struct isert_conn_dev, conn_list_entry); list_move(&conn_dev->conn_list_entry, &dev->curr_conn_list); spin_unlock(&dev->conn_lock); to_write = min_t(size_t, sizeof(k_buff), count); res = scnprintf(k_buff, to_write, "/dev/"ISER_CONN_DEV_PREFIX"%d", conn_dev->idx); ++res; /* copy trailing \0 as well */ if (unlikely(copy_to_user(buf, k_buff, res))) res = -EFAULT; out: TRACE_EXIT_RES(res); return res; }
static int __init init_scst_modisk_driver(void) { int res = 0; TRACE_ENTRY(); modisk_devtype.module = THIS_MODULE; res = scst_register_dev_driver(&modisk_devtype); if (res < 0) goto out; modisk_devtype_perf.module = THIS_MODULE; res = scst_register_dev_driver(&modisk_devtype_perf); if (res < 0) goto out_unreg; #ifdef CONFIG_SCST_PROC res = scst_dev_handler_build_std_proc(&modisk_devtype); if (res != 0) goto out_unreg1; res = scst_dev_handler_build_std_proc(&modisk_devtype_perf); if (res != 0) goto out_unreg2; #endif out: TRACE_EXIT_RES(res); return res; #ifdef CONFIG_SCST_PROC out_unreg2: scst_dev_handler_destroy_std_proc(&modisk_devtype); out_unreg1: scst_unregister_dev_driver(&modisk_devtype_perf); #endif out_unreg: scst_unregister_dev_driver(&modisk_devtype); goto out; }
int isert_pdu_rx(struct iscsi_cmnd *cmnd) { int res = 0; scst_data_direction dir; TRACE_ENTRY(); #ifdef CONFIG_SCST_EXTRACHECKS cmnd->conn->rd_task = current; #endif iscsi_cmnd_init(cmnd->conn, cmnd, NULL); cmnd_rx_start(cmnd); if (unlikely(!cmnd->scst_cmd)) { cmnd_rx_end(cmnd); goto out; } if (unlikely(scst_cmd_prelim_completed(cmnd->scst_cmd) || unlikely(cmnd->prelim_compl_flags != 0))) { set_bit(ISCSI_CMD_PRELIM_COMPLETED, &cmnd->prelim_compl_flags); cmnd_rx_end(cmnd); goto out; } dir = scst_cmd_get_data_direction(cmnd->scst_cmd); if (dir & SCST_DATA_WRITE) { res = iscsi_cmnd_set_write_buf(cmnd); if (unlikely(res)) goto out; res = isert_request_data_out(cmnd); cmnd->r2t_len_to_receive = 0; cmnd->r2t_len_to_send = 0; cmnd->outstanding_r2t = 0; } else { cmnd_rx_end(cmnd); } out: TRACE_EXIT_RES(res); return res; }
static ssize_t isert_get_initiator_ip(struct iscsi_conn *conn, char *buf, int size) { int pos; struct sockaddr_storage ss; size_t addr_len; TRACE_ENTRY(); isert_get_peer_addr(conn, (struct sockaddr *)&ss, &addr_len); switch (ss.ss_family) { case AF_INET: #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33) pos = scnprintf(buf, size, "%u.%u.%u.%u", NIPQUAD(((struct sockaddr_in *)&ss)->sin_addr.s_addr)); #else pos = scnprintf(buf, size, "%pI4", &((struct sockaddr_in *)&ss)->sin_addr.s_addr); #endif break; #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) case AF_INET6: #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29) pos = scnprintf(buf, size, "[%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x]", NIP6(((struct sockaddr_in6 *)&ss)->sin6_addr)); #else pos = scnprintf(buf, size, "[%p6]", &((struct sockaddr_in6 *)&ss)->sin6_addr); #endif break; #endif default: pos = scnprintf(buf, size, "Unknown family %d", ss.ss_family); break; } TRACE_EXIT_RES(pos); return pos; }
static int isert_cm_connect_handler(struct rdma_cm_id *cm_id, struct rdma_cm_event *event) { struct isert_connection *isert_conn = cm_id->qp->qp_context; int push_saved_pdu = 0; int ret; TRACE_ENTRY(); if (isert_conn->state == ISER_CONN_HANDSHAKE) isert_conn->state = ISER_CONN_ACTIVE; else if (isert_conn->state == ISER_CONN_ACTIVE) push_saved_pdu = 1; ret = isert_get_addr_size((struct sockaddr *)&isert_conn->peer_addr, &isert_conn->peer_addrsz); if (unlikely(ret)) goto out; kref_get(&isert_conn->kref); /* notify upper layer */ ret = isert_conn_established(&isert_conn->iscsi, (struct sockaddr *)&isert_conn->peer_addr, isert_conn->peer_addrsz); if (unlikely(ret)) { set_bit(ISERT_CONNECTION_ABORTED, &isert_conn->flags); isert_post_drain(isert_conn); isert_conn_free(isert_conn); goto out; } if (push_saved_pdu) { pr_info("iser push saved rx pdu\n"); isert_recv_completion_handler(isert_conn->saved_wr); isert_conn->saved_wr = NULL; } out: TRACE_EXIT_RES(ret); return ret; }
int isert_pdu_post_rdma_write(struct isert_connection *isert_conn, struct isert_cmnd *isert_cmd, struct isert_cmnd *isert_rsp, int wr_cnt) { int err; TRACE_ENTRY(); isert_rsp->wr[0].send_wr.num_sge = isert_pdu_prepare_send(isert_conn, isert_rsp); isert_link_send_pdu_wrs(isert_cmd, isert_rsp, wr_cnt); err = isert_post_send(isert_conn, &isert_cmd->wr[0], wr_cnt + 1); if (unlikely(err)) { pr_err("Failed to send pdu conn:%p pdu:%p err:%d\n", isert_conn, isert_cmd, err); } TRACE_EXIT_RES(err); return err; }
/* event_mutex supposed to be held */ static int __event_send(const void *buf, int buf_len) { int res = 0, len; struct sk_buff *skb; struct nlmsghdr *nlh; static u32 seq; /* protected by event_mutex */ TRACE_ENTRY(); if (ctr_open_state != ISCSI_CTR_OPEN_STATE_OPEN) goto out; len = NLMSG_SPACE(buf_len); skb = alloc_skb(NLMSG_SPACE(len), GFP_KERNEL); if (skb == NULL) { PRINT_ERROR("alloc_skb() failed (len %d)", len); res = -ENOMEM; goto out; } nlh = __nlmsg_put(skb, iscsid_pid, seq++, NLMSG_DONE, len - sizeof(*nlh), 0); memcpy(NLMSG_DATA(nlh), buf, buf_len); res = netlink_unicast(nl, skb, iscsid_pid, 0); if (res <= 0) { if (res != -ECONNREFUSED) PRINT_ERROR("netlink_unicast() failed: %d", res); else TRACE(TRACE_MINOR, "netlink_unicast() failed: %s. " "Not functioning user space?", "Connection refused"); goto out; } out: TRACE_EXIT_RES(res); return res; }
static int isert_poll_cq(struct isert_cq *cq) { int err; struct ib_wc *wc, *last_wc; TRACE_ENTRY(); do { err = ib_poll_cq(cq->cq, ARRAY_SIZE(cq->wc), cq->wc); last_wc = &cq->wc[err]; for (wc = cq->wc; wc < last_wc; ++wc) { if (likely(wc->status == IB_WC_SUCCESS)) isert_handle_wc(wc); else isert_handle_wc_error(wc); } } while (err > 0); TRACE_EXIT_RES(err); return err; }