static void nvmf_deactive_tx_desc(struct nvme_qp_tx_desc *tx_desc) { struct spdk_nvmf_conn *conn; RTE_VERIFY(tx_desc != NULL); conn = tx_desc->conn; RTE_VERIFY(tx_desc->conn != NULL); STAILQ_REMOVE(&conn->qp_tx_active_desc, tx_desc, nvme_qp_tx_desc, link); STAILQ_INSERT_TAIL(&conn->qp_tx_desc, tx_desc, link); }
/* Release ethdev TX queue */ static void dpdk_ethdev_tx_queue_release(unsigned lcore_id, struct vr_interface *vif) { int i; struct vr_dpdk_lcore *lcore = vr_dpdk.lcores[lcore_id]; struct vr_dpdk_queue *tx_queue = &lcore->lcore_tx_queues[vif->vif_idx]; struct vr_dpdk_queue_params *tx_queue_params = &lcore->lcore_tx_queue_params[vif->vif_idx]; /* remove queue params from the list of bonds to TX */ for (i = 0; i < lcore->lcore_nb_bonds_to_tx; i++) { if (likely(lcore->lcore_bonds_to_tx[i] == tx_queue_params)) { lcore->lcore_bonds_to_tx[i] = NULL; lcore->lcore_nb_bonds_to_tx--; RTE_VERIFY(lcore->lcore_nb_bonds_to_tx <= VR_DPDK_MAX_BONDS); /* copy the last element to the empty spot */ lcore->lcore_bonds_to_tx[i] = lcore->lcore_bonds_to_tx[lcore->lcore_nb_bonds_to_tx]; break; } } tx_queue->txq_ops.f_tx = NULL; rte_wmb(); /* flush and free the queue */ if (tx_queue->txq_ops.f_free(tx_queue->q_queue_h)) { RTE_LOG(ERR, VROUTER, " error freeing lcore %u eth device TX queue\n", lcore_id); } /* reset the queue */ vrouter_put_interface(tx_queue->q_vif); memset(tx_queue, 0, sizeof(*tx_queue)); memset(tx_queue_params, 0, sizeof(*tx_queue_params)); }
void activate_slave(struct rte_eth_dev *eth_dev, uint8_t port_id) { struct bond_dev_private *internals = eth_dev->data->dev_private; uint8_t active_count = internals->active_slave_count; if (internals->mode == BONDING_MODE_8023AD) bond_mode_8023ad_activate_slave(eth_dev, port_id); if (internals->mode == BONDING_MODE_TLB || internals->mode == BONDING_MODE_ALB) { internals->tlb_slaves_order[active_count] = port_id; } RTE_VERIFY(internals->active_slave_count < (RTE_DIM(internals->active_slaves) - 1)); internals->active_slaves[internals->active_slave_count] = port_id; internals->active_slave_count++; if (internals->mode == BONDING_MODE_TLB) bond_tlb_activate_slave(internals); if (internals->mode == BONDING_MODE_ALB) bond_mode_alb_client_list_upd(eth_dev); }
void deactivate_slave(struct rte_eth_dev *eth_dev, uint8_t port_id) { uint8_t slave_pos; struct bond_dev_private *internals = eth_dev->data->dev_private; uint8_t active_count = internals->active_slave_count; if (internals->mode == BONDING_MODE_8023AD) { bond_mode_8023ad_stop(eth_dev); bond_mode_8023ad_deactivate_slave(eth_dev, port_id); } slave_pos = find_slave_by_id(internals->active_slaves, active_count, port_id); /* If slave was not at the end of the list * shift active slaves up active array list */ if (slave_pos < active_count) { active_count--; memmove(internals->active_slaves + slave_pos, internals->active_slaves + slave_pos + 1, (active_count - slave_pos) * sizeof(internals->active_slaves[0])); } RTE_VERIFY(active_count < RTE_DIM(internals->active_slaves)); internals->active_slave_count = active_count; if (eth_dev->data->dev_started && internals->mode == BONDING_MODE_8023AD) bond_mode_8023ad_start(eth_dev); }
/* Init eth TX queue */ struct vr_dpdk_queue * vr_dpdk_ethdev_tx_queue_init(unsigned lcore_id, struct vr_interface *vif, unsigned queue_or_lcore_id) { uint8_t port_id; uint16_t tx_queue_id = queue_or_lcore_id; unsigned int vif_idx = vif->vif_idx, dpdk_queue_index; const unsigned int socket_id = rte_lcore_to_socket_id(lcore_id); struct vr_dpdk_ethdev *ethdev; struct vr_dpdk_lcore *lcore = vr_dpdk.lcores[lcore_id]; struct vr_dpdk_queue *tx_queue; struct vr_dpdk_queue_params *tx_queue_params; ethdev = (struct vr_dpdk_ethdev *)vif->vif_os; port_id = ethdev->ethdev_port_id; if (lcore->lcore_hw_queue_to_dpdk_index[vif->vif_idx]) { dpdk_queue_index = lcore->lcore_hw_queue_to_dpdk_index[vif->vif_idx][tx_queue_id]; } else { dpdk_queue_index = 0; } tx_queue = &lcore->lcore_tx_queues[vif_idx][dpdk_queue_index]; tx_queue_params = &lcore->lcore_tx_queue_params[vif_idx][dpdk_queue_index]; /* init queue */ tx_queue->txq_ops = rte_port_ethdev_writer_ops; tx_queue->q_queue_h = NULL; tx_queue->q_vif = vrouter_get_interface(vif->vif_rid, vif_idx); /* create the queue */ struct rte_port_ethdev_writer_params writer_params = { .port_id = port_id, .queue_id = tx_queue_id, .tx_burst_sz = VR_DPDK_TX_BURST_SZ, }; tx_queue->q_queue_h = tx_queue->txq_ops.f_create(&writer_params, socket_id); if (tx_queue->q_queue_h == NULL) { RTE_LOG(ERR, VROUTER, " error creating eth device %" PRIu8 " TX queue %" PRIu16 "\n", port_id, tx_queue_id); return NULL; } /* store queue params */ tx_queue_params->qp_release_op = &dpdk_ethdev_tx_queue_release; tx_queue_params->qp_ethdev.queue_id = tx_queue_id; tx_queue_params->qp_ethdev.port_id = port_id; /* for the queue 0 add queue params to the list of bonds to TX */ if (ethdev->ethdev_nb_slaves > 0 && tx_queue_id == 0) { /* make sure queue params have been stored */ rte_wmb(); lcore->lcore_bonds_to_tx[lcore->lcore_nb_bonds_to_tx++] = tx_queue_params; RTE_VERIFY(lcore->lcore_nb_bonds_to_tx <= VR_DPDK_MAX_BONDS); } return tx_queue; }
static void spdk_nvmf_conn_check_shutdown(struct rte_timer *timer, void *arg) { if (spdk_nvmf_get_active_conns() == 0) { RTE_VERIFY(timer == &g_shutdown_timer); rte_timer_stop(timer); spdk_nvmf_cleanup_conns(); spdk_app_stop(0); } }
void log_ratelimit_state_init(unsigned lcore_id, uint32_t interval, uint32_t burst) { struct log_ratelimit_state *lrs; RTE_VERIFY(lcore_id < RTE_MAX_LCORE); lrs = &log_ratelimit_states[lcore_id]; lrs->interval_cycles = interval * cycles_per_ms; lrs->burst = burst; log_ratelimit_reset(lrs, rte_rdtsc()); }
void activate_slave(struct rte_eth_dev *eth_dev, uint8_t port_id) { struct bond_dev_private *internals = eth_dev->data->dev_private; if (internals->mode == BONDING_MODE_8023AD) bond_mode_8023ad_activate_slave(eth_dev, port_id); RTE_VERIFY(internals->active_slave_count < (RTE_DIM(internals->active_slaves) - 1)); internals->active_slaves[internals->active_slave_count] = port_id; internals->active_slave_count++; }
int set_lua_path(lua_State *l, const char *path) { int ret; char new_path[1024]; lua_getglobal(l, "package"); lua_getfield(l, -1, "path"); ret = snprintf(new_path, sizeof(new_path), "%s;%s/?.lua", lua_tostring(l, -1), path); RTE_VERIFY(ret > 0 && ret < (int)sizeof(new_path)); lua_pop(l, 1); lua_pushstring(l, new_path); lua_setfield(l, -2, "path"); lua_pop(l, 1); return ret; }
int config_gatekeeper(const char *lua_base_dir, const char *gatekeeper_config_file) { int ret; char lua_entry_path[128]; lua_State *lua_state; ret = snprintf(lua_entry_path, sizeof(lua_entry_path), \ "%s/%s", lua_base_dir, gatekeeper_config_file); RTE_VERIFY(ret > 0 && ret < (int)sizeof(lua_entry_path)); lua_state = luaL_newstate(); if (!lua_state) { G_LOG(ERR, "config: failed to create new Lua state\n"); return -1; } luaL_openlibs(lua_state); luaL_register(lua_state, "staticlib", staticlib); set_lua_path(lua_state, lua_base_dir); ret = luaL_loadfile(lua_state, lua_entry_path); if (ret != 0) { G_LOG(ERR, "config: %s\n", lua_tostring(lua_state, -1)); ret = -1; goto out; } /* * Calls a function in protected mode. * int lua_pcall (lua_State *L, int nargs, int nresults, int errfunc); * @nargs: the number of arguments that you pushed onto the stack. * @nresults: the number of results that the funtion will push onto * the stack. * @errfunc: if "0", it represents the error message returned on * the stack is exactly the original error message. * Otherwise, it presents the index of the error handling function. */ ret = lua_pcall(lua_state, 0, 0, 0); if (ret != 0) { G_LOG(ERR, "config: %s\n", lua_tostring(lua_state, -1)); ret = -1; goto out; } /* Function to be called. */ lua_getglobal(lua_state, "gatekeeper_init"); ret = lua_pcall(lua_state, 0, 1, 0); if (ret != 0) { G_LOG(ERR, "config: %s\n", lua_tostring(lua_state, -1)); ret = -1; goto out; } ret = luaL_checkinteger(lua_state, -1); if (ret < 0) G_LOG(ERR, "config: gatekeeper_init() return value is %d\n", ret); out: lua_close(lua_state); return ret; }
int process_arp(struct lls_config *lls_conf, struct gatekeeper_if *iface, uint16_t tx_queue, struct rte_mbuf *buf, struct ether_hdr *eth_hdr, struct arp_hdr *arp_hdr) { struct ipaddr addr = { .proto = ETHER_TYPE_IPv4, .ip.v4.s_addr = arp_hdr->arp_data.arp_sip, }; struct lls_mod_req mod_req; uint16_t pkt_len = rte_pktmbuf_data_len(buf); /* pkt_in_skip_l2() already called by LLS. */ size_t l2_len = pkt_in_l2_hdr_len(buf); int ret; if (pkt_len < l2_len + sizeof(*arp_hdr)) { LLS_LOG(ERR, "%s interface received ARP packet of size %hu bytes, but it should be at least %zu bytes\n", iface->name, pkt_len, l2_len + sizeof(*arp_hdr)); return -1; } ret = verify_l2_hdr(iface, eth_hdr, buf->l2_type, "ARP"); if (ret < 0) return ret; if (unlikely(arp_hdr->arp_hrd != rte_cpu_to_be_16(ARP_HRD_ETHER) || arp_hdr->arp_pro != rte_cpu_to_be_16(ETHER_TYPE_IPv4) || arp_hdr->arp_hln != ETHER_ADDR_LEN || arp_hdr->arp_pln != sizeof(struct in_addr))) return -1; /* If sip is not in the same subnet as our IP address, drop. */ if (!ipv4_in_subnet(iface, &addr)) return -1; /* Update cache with source resolution, regardless of operation. */ mod_req.cache = &lls_conf->arp_cache; mod_req.addr = addr; ether_addr_copy(&arp_hdr->arp_data.arp_sha, &mod_req.ha); mod_req.port_id = iface->id; mod_req.ts = time(NULL); RTE_VERIFY(mod_req.ts >= 0); lls_process_mod(lls_conf, &mod_req); /* * If it's a Gratuitous ARP or if the target address * is not us, then no response is needed. */ if (is_garp_pkt(arp_hdr) || (iface->ip4_addr.s_addr != arp_hdr->arp_data.arp_tip)) return -1; switch (rte_be_to_cpu_16(arp_hdr->arp_op)) { case ARP_OP_REQUEST: { uint16_t num_tx; /* * We are reusing the frame, but an ARP reply always goes out * the same interface that received it. Therefore, the L2 * space of the frame is the same. If needed, the correct * VLAN tag was set in verify_l2_hdr(). */ /* Set-up Ethernet header. */ ether_addr_copy(ð_hdr->s_addr, ð_hdr->d_addr); ether_addr_copy(&iface->eth_addr, ð_hdr->s_addr); /* Set-up ARP header. */ arp_hdr->arp_op = rte_cpu_to_be_16(ARP_OP_REPLY); ether_addr_copy(&arp_hdr->arp_data.arp_sha, &arp_hdr->arp_data.arp_tha); arp_hdr->arp_data.arp_tip = arp_hdr->arp_data.arp_sip; ether_addr_copy(&iface->eth_addr, &arp_hdr->arp_data.arp_sha); arp_hdr->arp_data.arp_sip = iface->ip4_addr.s_addr; /* Need to transmit reply. */ num_tx = rte_eth_tx_burst(iface->id, tx_queue, &buf, 1); if (unlikely(num_tx != 1)) { LLS_LOG(NOTICE, "ARP reply failed\n"); return -1; } return 0; } case ARP_OP_REPLY: /* * No further action required. Could check to make sure * arp_hdr->arp_data.arp_tha is equal to arp->ether_addr, * but there's nothing that can be done if it's wrong anyway. */ return -1; default: LLS_LOG(NOTICE, "%s received an ARP packet with an unknown operation (%hu)\n", __func__, rte_be_to_cpu_16(arp_hdr->arp_op)); return -1; } }
void spdk_app_init(struct spdk_app_opts *opts) { struct spdk_conf *config; struct spdk_conf_section *sp; struct sigaction sigact; sigset_t signew; char shm_name[64]; int rc; uint64_t tpoint_group_mask; char *end; if (opts->enable_coredump) { struct rlimit core_limits; core_limits.rlim_cur = core_limits.rlim_max = RLIM_INFINITY; setrlimit(RLIMIT_CORE, &core_limits); } config = spdk_conf_allocate(); RTE_VERIFY(config != NULL); if (opts->config_file) { rc = spdk_conf_read(config, opts->config_file); if (rc != 0) { fprintf(stderr, "Could not read config file %s\n", opts->config_file); exit(EXIT_FAILURE); } if (config->section == NULL) { fprintf(stderr, "Invalid config file %s\n", opts->config_file); exit(EXIT_FAILURE); } } spdk_conf_set_as_default(config); if (opts->instance_id == -1) { sp = spdk_conf_find_section(config, "Global"); if (sp != NULL) { opts->instance_id = spdk_conf_section_get_intval(sp, "InstanceID"); } } if (opts->instance_id < 0) { opts->instance_id = 0; } memset(&g_spdk_app, 0, sizeof(g_spdk_app)); g_spdk_app.config = config; g_spdk_app.instance_id = opts->instance_id; g_spdk_app.shutdown_cb = opts->shutdown_cb; snprintf(g_spdk_app.pidfile, sizeof(g_spdk_app.pidfile), "%s/%s.pid.%d", SPDK_APP_PIDFILE_PREFIX, opts->name, opts->instance_id); spdk_app_write_pidfile(); /* open log files */ if (opts->log_facility == NULL) { opts->log_facility = spdk_get_log_facility(g_spdk_app.config); if (opts->log_facility == NULL) { fprintf(stderr, "NULL logfacility\n"); spdk_conf_free(g_spdk_app.config); exit(EXIT_FAILURE); } } rc = spdk_set_log_facility(opts->log_facility); if (rc < 0) { fprintf(stderr, "log facility error\n"); spdk_conf_free(g_spdk_app.config); exit(EXIT_FAILURE); } rc = spdk_set_log_priority(SPDK_APP_DEFAULT_LOG_PRIORITY); if (rc < 0) { fprintf(stderr, "log priority error\n"); spdk_conf_free(g_spdk_app.config); exit(EXIT_FAILURE); } spdk_open_log(); if (opts->reactor_mask == NULL) { sp = spdk_conf_find_section(g_spdk_app.config, "Global"); if (sp != NULL) { if (spdk_conf_section_get_val(sp, "ReactorMask")) { opts->reactor_mask = spdk_conf_section_get_val(sp, "ReactorMask"); } else { opts->reactor_mask = SPDK_APP_DPDK_DEFAULT_CORE_MASK; } } else { opts->reactor_mask = SPDK_APP_DPDK_DEFAULT_CORE_MASK; } } spdk_dpdk_framework_init(opts); /* * If mask not specified on command line or in configuration file, * reactor_mask will be NULL which will enable all cores to run * reactors. */ if (spdk_reactors_init(opts->reactor_mask)) { fprintf(stderr, "Invalid reactor mask.\n"); exit(EXIT_FAILURE); } /* setup signal handler thread */ pthread_sigmask(SIG_SETMASK, NULL, &signew); memset(&sigact, 0, sizeof(sigact)); sigact.sa_handler = SIG_IGN; sigemptyset(&sigact.sa_mask); rc = sigaction(SIGPIPE, &sigact, NULL); if (rc < 0) { SPDK_ERRLOG("sigaction(SIGPIPE) failed\n"); exit(EXIT_FAILURE); } if (opts->shutdown_cb != NULL) { g_shutdown_event = spdk_event_allocate(rte_lcore_id(), __shutdown_event_cb, NULL, NULL, NULL); sigact.sa_handler = __shutdown_signal; sigemptyset(&sigact.sa_mask); rc = sigaction(SIGINT, &sigact, NULL); if (rc < 0) { SPDK_ERRLOG("sigaction(SIGINT) failed\n"); exit(EXIT_FAILURE); } sigaddset(&signew, SIGINT); sigact.sa_handler = __shutdown_signal; sigemptyset(&sigact.sa_mask); rc = sigaction(SIGTERM, &sigact, NULL); if (rc < 0) { SPDK_ERRLOG("sigaction(SIGTERM) failed\n"); exit(EXIT_FAILURE); } sigaddset(&signew, SIGTERM); } if (opts->usr1_handler != NULL) { sigact.sa_handler = opts->usr1_handler; sigemptyset(&sigact.sa_mask); rc = sigaction(SIGUSR1, &sigact, NULL); if (rc < 0) { SPDK_ERRLOG("sigaction(SIGUSR1) failed\n"); exit(EXIT_FAILURE); } sigaddset(&signew, SIGUSR1); } sigaddset(&signew, SIGQUIT); sigaddset(&signew, SIGHUP); pthread_sigmask(SIG_SETMASK, &signew, NULL); snprintf(shm_name, sizeof(shm_name), "/%s_trace.%d", opts->name, opts->instance_id); spdk_trace_init(shm_name); if (opts->tpoint_group_mask == NULL) { sp = spdk_conf_find_section(g_spdk_app.config, "Global"); if (sp != NULL) { opts->tpoint_group_mask = spdk_conf_section_get_val(sp, "TpointGroupMask"); } } if (opts->tpoint_group_mask != NULL) { errno = 0; tpoint_group_mask = strtoull(opts->tpoint_group_mask, &end, 16); if (*end != '\0' || errno) { SPDK_ERRLOG("invalid tpoint mask %s\n", opts->tpoint_group_mask); } else { spdk_trace_set_tpoint_group_mask(tpoint_group_mask); } } rc = spdk_subsystem_init(); if (rc < 0) { SPDK_ERRLOG("spdk_subsystem_init() failed\n"); exit(EXIT_FAILURE); } }
static int parse_args(int argc, char **argv) { int ch; struct net_port *port; struct vxlan_peer peer; #ifdef LWIP_DEBUG while ((ch = getopt(argc, argv, "P:V:e:k:d")) != -1) { #else while ((ch = getopt(argc, argv, "P:V:e:k:")) != -1) { #endif switch (ch) { case 'P': port = &BR0.plug.net_port; if (parse_port(&port->net, optarg)) return -1; port->rte_port_type = RTE_PORT_TYPE_PLUG; break; case 'V': memset(&peer, 0, sizeof(peer)); if (parse_vxlan(&peer, optarg)) return -1; if (bridge_add_vxlan(&BR0, &peer) != 0) return -1; break; case 'e': if (nr_ports >= PORT_MAX) break; port = &ports[nr_ports]; if (parse_port(&port->net, optarg)) return -1; port->rte_port_type = RTE_PORT_TYPE_ETH; nr_ports++; break; case 'k': if (nr_ports >= PORT_MAX) break; port = &ports[nr_ports]; if (parse_port(&port->net, optarg)) return -1; port->rte_port_type = RTE_PORT_TYPE_KNI; nr_ports++; break; #ifdef LWIP_DEBUG case 'd': debug_flags |= (LWIP_DBG_ON| LWIP_DBG_TRACE| LWIP_DBG_STATE| LWIP_DBG_FRESH| LWIP_DBG_HALT); break; #endif default: return -1; } } argc -= optind; argv += optind; return 0; } #define IP4_OR_NULL(ip_addr) ((ip_addr).addr == IPADDR_ANY ? 0 : &(ip_addr)) static int create_eth_port(struct net_port *net_port, int socket_id) { RTE_VERIFY(net_port->rte_port_type == RTE_PORT_TYPE_ETH); struct net *net = &net_port->net; struct rte_port_eth_params params = { .port_id = net->port_id, .nb_rx_desc = RTE_TEST_RX_DESC_DEFAULT, .nb_tx_desc = RTE_TEST_TX_DESC_DEFAULT, .mempool = pktmbuf_pool, }; if (!IP4_OR_NULL(net_port->net.ip_addr)) { struct rte_port_eth *eth_port; eth_port = rte_port_eth_create(¶ms, socket_id, net_port); if (!eth_port) rte_exit(EXIT_FAILURE, "Cannot alloc kni port\n"); bridge_add_port(&BR0, net_port); } else { struct ethif *ethif; struct netif *netif; ethif = ethif_alloc(socket_id); if (ethif == NULL) rte_exit(EXIT_FAILURE, "Cannot alloc eth port\n"); if (ethif_init(ethif, ¶ms, socket_id, net_port) != ERR_OK) rte_exit(EXIT_FAILURE, "Cannot init eth port\n"); netif = ðif->netif; netif_add(netif, IP4_OR_NULL(net->ip_addr), IP4_OR_NULL(net->netmask), IP4_OR_NULL(net->gw), ethif, ethif_added_cb, ethernet_input); netif_set_up(netif); } return 0; } static int create_kni_port(struct net_port *net_port, int socket_id) { RTE_VERIFY(net_port->rte_port_type == RTE_PORT_TYPE_KNI); struct net *net = &net_port->net; struct rte_port_kni_params params = { .name = net->name, .mbuf_size = MAX_PACKET_SZ, .mempool = pktmbuf_pool, }; if (!IP4_OR_NULL(net_port->net.ip_addr)) { struct rte_port_kni *kni_port; kni_port = rte_port_kni_create(¶ms, socket_id, net_port); if (!kni_port) rte_exit(EXIT_FAILURE, "Cannot alloc kni port\n"); bridge_add_port(&BR0, net_port); } else { struct kniif *kniif; struct netif *netif; kniif = kniif_alloc(socket_id); if (kniif == NULL) rte_exit(EXIT_FAILURE, "Cannot alloc kni interface\n"); if (kniif_init(kniif, ¶ms, socket_id, net_port) != ERR_OK) rte_exit(EXIT_FAILURE, "Cannot init kni interface\n"); netif = &kniif->netif; netif_add(netif, IP4_OR_NULL(net->ip_addr), IP4_OR_NULL(net->netmask), IP4_OR_NULL(net->gw), kniif, kniif_added_cb, ethernet_input); netif_set_up(netif); } return 0; } static int create_plug_port(struct net_port *net_port, int socket_id) { RTE_VERIFY(net_port->rte_port_type == RTE_PORT_TYPE_PLUG); struct net *net = &net_port->net; if (!IP4_OR_NULL(net_port->net.ip_addr)) { struct rte_port_plug *plug_port; struct rte_port_plug_params params = { .tx_burst = bridge_tx_vxlan_burst, .private_data = &BR0, }; plug_port = rte_port_plug_create(¶ms, socket_id, net_port); if (!plug_port) rte_exit(EXIT_FAILURE, "Cannot alloc plug port\n"); if (bridge_add_port(&BR0, net_port) != 0) rte_exit(EXIT_FAILURE, "Cannot add bridge port\n"); } else {
int spdk_nvmf_request_prep_data(struct spdk_nvmf_request *req, void *in_cap_data, uint32_t in_cap_len, void *bb, uint32_t bb_len) { struct spdk_nvmf_conn *conn = req->conn; struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; enum spdk_nvme_data_transfer xfer; int ret; nvmf_trace_command(req->cmd, conn->type); req->length = 0; req->xfer = SPDK_NVME_DATA_NONE; req->data = NULL; if (cmd->opc == SPDK_NVME_OPC_FABRIC) { xfer = spdk_nvme_opc_get_data_transfer(req->cmd->nvmf_cmd.fctype); } else { xfer = spdk_nvme_opc_get_data_transfer(cmd->opc); } if (xfer != SPDK_NVME_DATA_NONE) { struct spdk_nvme_sgl_descriptor *sgl = (struct spdk_nvme_sgl_descriptor *)&cmd->dptr.sgl1; if (sgl->generic.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK && (sgl->keyed.subtype == SPDK_NVME_SGL_SUBTYPE_ADDRESS || sgl->keyed.subtype == SPDK_NVME_SGL_SUBTYPE_INVALIDATE_KEY)) { if (sgl->keyed.length > bb_len) { SPDK_ERRLOG("SGL length 0x%x exceeds BB length 0x%x\n", sgl->keyed.length, bb_len); rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID; return -1; } req->data = bb; req->length = sgl->keyed.length; } else if (sgl->generic.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK && sgl->unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET) { uint64_t offset = sgl->address; uint32_t max_len = in_cap_len; SPDK_TRACELOG(SPDK_TRACE_NVMF, "In-capsule data: offset 0x%" PRIx64 ", length 0x%x\n", offset, sgl->unkeyed.length); if (conn->type == CONN_TYPE_AQ) { SPDK_ERRLOG("In-capsule data not allowed for admin queue\n"); return -1; } if (offset > max_len) { SPDK_ERRLOG("In-capsule offset 0x%" PRIx64 " exceeds capsule length 0x%x\n", offset, max_len); rsp->status.sc = SPDK_NVME_SC_INVALID_SGL_OFFSET; return -1; } max_len -= (uint32_t)offset; if (sgl->unkeyed.length > max_len) { SPDK_ERRLOG("In-capsule data length 0x%x exceeds capsule length 0x%x\n", sgl->unkeyed.length, max_len); rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID; return -1; } req->data = in_cap_data + offset; req->length = sgl->unkeyed.length; } else { SPDK_ERRLOG("Invalid NVMf I/O Command SGL: Type 0x%x, Subtype 0x%x\n", sgl->generic.type, sgl->generic.subtype); rsp->status.sc = SPDK_NVME_SC_SGL_DESCRIPTOR_TYPE_INVALID; return -1; } if (req->length == 0) { xfer = SPDK_NVME_DATA_NONE; req->data = NULL; } req->xfer = xfer; /* * For any I/O that requires data to be * pulled into target BB before processing by * the backend NVMe device */ if (xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) { if (sgl->generic.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK) { SPDK_TRACELOG(SPDK_TRACE_NVMF, "Initiating Host to Controller data transfer\n"); ret = nvmf_post_rdma_read(conn, req); if (ret) { SPDK_ERRLOG("Unable to post rdma read tx descriptor\n"); rsp->status.sc = SPDK_NVME_SC_DATA_TRANSFER_ERROR; return -1; } /* Wait for transfer to complete before executing command. */ return 1; } } } if (xfer == SPDK_NVME_DATA_NONE) { SPDK_TRACELOG(SPDK_TRACE_NVMF, "No data to transfer\n"); RTE_VERIFY(req->data == NULL); RTE_VERIFY(req->length == 0); } else { RTE_VERIFY(req->data != NULL); RTE_VERIFY(req->length != 0); SPDK_TRACELOG(SPDK_TRACE_NVMF, "%s data ready\n", xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER ? "Host to Controller" : "Controller to Host"); } return 0; }
static bool nvmf_process_connect(struct spdk_nvmf_request *req) { struct spdk_nvmf_fabric_connect_cmd *connect; struct spdk_nvmf_fabric_connect_data *connect_data; struct spdk_nvmf_fabric_connect_rsp *response; struct spdk_nvmf_conn *conn = req->conn; struct nvmf_session *session; if (req->length < sizeof(struct spdk_nvmf_fabric_connect_data)) { SPDK_ERRLOG("Connect command data length 0x%x too small\n", req->length); req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INVALID_FIELD; return true; } connect = &req->cmd->connect_cmd; connect_data = (struct spdk_nvmf_fabric_connect_data *)req->data; RTE_VERIFY(connect_data != NULL); SPDK_TRACELOG(SPDK_TRACE_NVMF, "Connect cmd: cid 0x%x recfmt 0x%x qid %u sqsize %u\n", connect->cid, connect->recfmt, connect->qid, connect->sqsize); SPDK_TRACELOG(SPDK_TRACE_NVMF, "Connect data:\n"); SPDK_TRACELOG(SPDK_TRACE_NVMF, " cntlid: 0x%04x\n", connect_data->cntlid); SPDK_TRACELOG(SPDK_TRACE_NVMF, " hostid: %08x-%04x-%04x-%02x%02x-%04x%08x ***\n", ntohl(*(uint32_t *)&connect_data->hostid[0]), ntohs(*(uint16_t *)&connect_data->hostid[4]), ntohs(*(uint16_t *)&connect_data->hostid[6]), connect_data->hostid[8], connect_data->hostid[9], ntohs(*(uint16_t *)&connect_data->hostid[10]), ntohl(*(uint32_t *)&connect_data->hostid[12])); SPDK_TRACELOG(SPDK_TRACE_NVMF, " subnqn: \"%s\"\n", (char *)&connect_data->subnqn[0]); SPDK_TRACELOG(SPDK_TRACE_NVMF, " hostnqn: \"%s\"\n", (char *)&connect_data->hostnqn[0]); response = &req->rsp->connect_rsp; session = nvmf_connect((void *)conn, connect, connect_data, response); if (session != NULL) { conn->sess = session; conn->qid = connect->qid; if (connect->qid > 0) { conn->type = CONN_TYPE_IOQ; /* I/O Connection */ } else { /* When session first created, set some attributes */ nvmf_init_conn_properites(conn, session, response); } } /* Allocate RDMA reqs according to the queue depth and conn type*/ if (spdk_nvmf_rdma_alloc_reqs(conn)) { SPDK_ERRLOG("Unable to allocate sufficient RDMA work requests\n"); req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; return true; } SPDK_TRACELOG(SPDK_TRACE_NVMF, "connect capsule response: cntlid = 0x%04x\n", response->status_code_specific.success.cntlid); return true; }