void sigalrm_handler(int signo) { int res, i; TRACE_ENTRY(); TRACE_DBG("%s", "Flushing cache..."); for (i = 0; i < num_devs; i++) { res = ioctl(devs[i].scst_usr_fd, SCST_USER_FLUSH_CACHE, NULL); if (res != 0) { res = errno; PRINT_ERROR("Unable to flush cache: %s", strerror(res)); goto out; } } TRACE_DBG("%s", "Flushing cache done."); res = alarm(flush_interval); if (res != 0) { res = errno; PRINT_ERROR("alarm() failed: %s", strerror(res)); goto out; } out: TRACE_EXIT(); return; }
static int __init init_this_scst_driver(void) { int res; TRACE_ENTRY(); res = scst_register_target_template(&driver_target_template); TRACE_DBG("scst_register_target_template() returned %d", res); if (res < 0) goto out; #ifdef SCST_REGISTER_INITIATOR_DRIVER driver_template.module = THIS_MODULE; scsi_register_module(MODULE_SCSI_HA, &driver_template); TRACE_DBG("driver_template.present=%d", driver_template.present); if (driver_template.present == 0) { res = -ENODEV; MOD_DEC_USE_COUNT; goto out; } #endif out: TRACE_EXIT_RES(res); return res; }
/*----------------------------------------------------------------------------*/ static inline void NotifyConnectionReset(mtcp_manager_t mtcp, tcp_stream *cur_stream) { TRACE_DBG("Stream %d: Notifying connection reset.\n", cur_stream->id); /* TODO: implement this function */ /* signal to user "connection reset" */ }
void TrackPINList::TRACE() { for ( const_iterator it = begin() ; it != end() ; ++it ) { TRACE_DBG( " PIN %s ID %"PRId32" Comment %s", (*it)->getPIN(), (*it)->getID(), (*it)->getComment() ); } }
void sig_chld(int signal) { /* Check just in case */ if (signal == SIGCHLD) { TRACE_DBG("Cleanup zombie (pid %d)", getpid()); wait(NULL); } }
/*----------------------------------------------------------------------------*/ static inline int ProcessRST(mtcp_manager_t mtcp, tcp_stream *cur_stream, uint32_t ack_seq) { /* TODO: we need reset validation logic */ /* the sequence number of a RST should be inside window */ /* (in SYN_SENT state, it should ack the previous SYN */ TRACE_DBG("Stream %d: TCP RESET (%s)\n", cur_stream->id, TCPStateToString(cur_stream)); #if DUMP_STREAM DumpStream(mtcp, cur_stream); #endif if (cur_stream->state <= TCP_ST_SYN_SENT) { /* not handled here */ return FALSE; } if (cur_stream->state == TCP_ST_SYN_RCVD) { if (ack_seq == cur_stream->rcv_nxt) { cur_stream->state = TCP_ST_CLOSED; cur_stream->close_reason = TCP_RESET; DestroyTCPStream(mtcp, cur_stream); } return TRUE; } /* if the application is already closed the connection, just destroy the it */ if (cur_stream->state == TCP_ST_FIN_WAIT_1 || cur_stream->state == TCP_ST_FIN_WAIT_2 || cur_stream->state == TCP_ST_LAST_ACK || cur_stream->state == TCP_ST_CLOSING || cur_stream->state == TCP_ST_TIME_WAIT) { cur_stream->state = TCP_ST_CLOSED; cur_stream->close_reason = TCP_ACTIVE_CLOSE; DestroyTCPStream(mtcp, cur_stream); return TRUE; } if (cur_stream->state >= TCP_ST_ESTABLISHED && cur_stream->state <= TCP_ST_CLOSE_WAIT) { /* ESTABLISHED, FIN_WAIT_1, FIN_WAIT_2, CLOSE_WAIT */ /* TODO: flush all the segment queues */ //NotifyConnectionReset(mtcp, cur_stream); } if (!(cur_stream->sndvar->on_closeq || cur_stream->sndvar->on_closeq_int || cur_stream->sndvar->on_resetq || cur_stream->sndvar->on_resetq_int)) { //cur_stream->state = TCP_ST_CLOSED; //DestroyTCPStream(mtcp, cur_stream); cur_stream->state = TCP_ST_CLOSE_WAIT; cur_stream->close_reason = TCP_RESET; RaiseCloseEvent(mtcp, cur_stream); } return TRUE; }
static int isert_receive_cmnd_data(struct iscsi_cmnd *cmnd) { #ifdef CONFIG_SCST_EXTRACHECKS if (cmnd->scst_state == ISCSI_CMD_STATE_RX_CMD) TRACE_DBG("cmnd %p is still in RX_CMD state", cmnd); #endif EXTRACHECKS_BUG_ON(cmnd->scst_state != ISCSI_CMD_STATE_AFTER_PREPROC); return 0; }
/** * returns max numa ID while probing for rte devices */ static int probe_all_rte_devices(char **argv, int *argc, char *dev_name_list) { PciDevice pd; int fd, numa_id = -1; static char end[] = ""; static const char delim[] = " \t"; static char *dev_tokenizer; char *dev_token, *saveptr; dev_tokenizer = strdup(dev_name_list); if (dev_tokenizer == NULL) { TRACE_ERROR("Can't allocate memory for dev_tokenizer!\n"); exit(EXIT_FAILURE); } fd = open(DEV_PATH, O_RDONLY); if (fd == -1) { TRACE_ERROR("Error opening dpdk-face!\n"); exit(EXIT_FAILURE); } dev_token = strtok_r(dev_tokenizer, delim, &saveptr); while (dev_token != NULL) { strcpy(pd.ifname, dev_token); if (ioctl(fd, FETCH_PCI_ADDRESS, &pd) == -1) { TRACE_DBG("Could not find pci info on dpdk " "device: %s. Is it a dpdk-attached " "interface?\n", dev_token); goto loop_over; } argv[*argc] = strdup("-w"); argv[*argc + 1] = calloc(PCI_LENGTH, 1); if (argv[*argc] == NULL || argv[*argc + 1] == NULL) { TRACE_ERROR("Memory allocation error!\n"); exit(EXIT_FAILURE); } sprintf(argv[*argc + 1], PCI_DOM":"PCI_BUS":" PCI_DEVICE"."PCI_FUNC, pd.pa.domain, pd.pa.bus, pd.pa.device, pd.pa.function); *argc += 2; if (pd.numa_socket > numa_id) numa_id = pd.numa_socket; loop_over: dev_token = strtok_r(NULL, delim, &saveptr); } /* add the terminating "" sequence */ argv[*argc] = end; close(fd); free(dev_tokenizer); return numa_id; }
/*----------------------------------------------------------------------------*/ uint8_t * EthernetOutput(struct mtcp_manager *mtcp, struct pkt_ctx *pctx, uint16_t h_proto, int nif, unsigned char* dst_haddr, uint16_t iplen, uint32_t cur_ts) { uint8_t *buf; struct ethhdr *ethh; int i; #if E_PSIO || USE_CHUNK_BUF if (!mtcp->iom->get_wptr) { TRACE_INFO("get_wptr() in io_module is undefined."); return NULL; } buf = mtcp->iom->get_wptr(mtcp->ctx, nif, iplen + ETHERNET_HEADER_LEN); #else buf = GetWriteBuffer(mtcp->ctx, BUF_RET_MAYBE, nif, iplen + ETHERNET_HEADER_LEN); #endif if (!buf) { TRACE_DBG("Failed to get available write buffer\n"); return NULL; } TRACE_DBG("dst_hwaddr: %02X:%02X:%02X:%02X:%02X:%02X\n", stream->sndvar->d_haddr[0], stream->sndvar->d_haddr[1], stream->sndvar->d_haddr[2], stream->sndvar->d_haddr[3], stream->sndvar->d_haddr[4], stream->sndvar->d_haddr[5]); ethh = (struct ethhdr *)buf; for (i = 0; i < ETH_ALEN; i++) { ethh->h_source[i] = g_config.mos->netdev_table->ent[nif]->haddr[i]; ethh->h_dest[i] = dst_haddr[i]; } ethh->h_proto = htons(h_proto); if (pctx) FillOutPacketEthContext(pctx, cur_ts, nif, ethh, iplen + ETHERNET_HEADER_LEN); return (uint8_t *)(ethh + 1); }
static int tape_attach(struct scst_device *dev) { int res, rc; int retries; struct scsi_mode_data data; const int buffer_size = 512; uint8_t *buffer = NULL; TRACE_ENTRY(); if (dev->scsi_dev == NULL || dev->scsi_dev->type != dev->type) { PRINT_ERROR("%s", "SCSI device not define or illegal type"); res = -ENODEV; goto out; } dev->block_size = TAPE_DEF_BLOCK_SIZE; dev->block_shift = -1; /* not used */ buffer = kmalloc(buffer_size, GFP_KERNEL); if (!buffer) { PRINT_ERROR("Buffer memory allocation (size %d) failure", buffer_size); res = -ENOMEM; goto out; } retries = SCST_DEV_RETRIES_ON_UA; do { TRACE_DBG("%s", "Doing TEST_UNIT_READY"); rc = scsi_test_unit_ready(dev->scsi_dev, SCST_GENERIC_TAPE_SMALL_TIMEOUT, TAPE_RETRIES #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 25) ); #else , NULL); #endif TRACE_DBG("TEST_UNIT_READY done: %x", rc); } while ((--retries > 0) && rc);
/*----------------------------------------------------------------------------*/ void ForwardEthernetFrame(struct mtcp_manager *mtcp, struct pkt_ctx *pctx) { uint8_t *buf; if (g_config.mos->nic_forward_table != NULL) { pctx->out_ifidx = g_config.mos->nic_forward_table->nic_fwd_table[pctx->in_ifidx]; if (pctx->out_ifidx == -1) { TRACE_DBG("Could not find outgoing index (index)!\n"); return; } if (!mtcp->iom->get_wptr) { TRACE_INFO("get_wptr() in io_module is undefined."); return; } buf = mtcp->iom->get_wptr(mtcp->ctx, pctx->out_ifidx, pctx->p.eth_len); if (!buf) { TRACE_DBG("Failed to get available write buffer\n"); return; } TRACE_DBG("dst_hwaddr: %02X:%02X:%02X:%02X:%02X:%02X\n", stream->sndvar->d_haddr[0], stream->sndvar->d_haddr[1], stream->sndvar->d_haddr[2], stream->sndvar->d_haddr[3], stream->sndvar->d_haddr[4], stream->sndvar->d_haddr[5]); memcpy(buf, pctx->p.ethh, pctx->p.eth_len); } else { TRACE_DBG("Ethernet forwarding table entry does not exist.\n"); } }
static int raid_attach(struct scst_device *dev) { int res, rc; int retries; TRACE_ENTRY(); if (dev->scsi_dev == NULL || dev->scsi_dev->type != dev->type) { PRINT_ERROR("%s", "SCSI device not define or illegal type"); res = -ENODEV; goto out; } /* * If the device is offline, don't try to read capacity or any * of the other stuff */ if (dev->scsi_dev->sdev_state == SDEV_OFFLINE) { TRACE_DBG("%s", "Device is offline"); res = -ENODEV; goto out; } retries = SCST_DEV_UA_RETRIES; do { TRACE_DBG("%s", "Doing TEST_UNIT_READY"); rc = scsi_test_unit_ready(dev->scsi_dev, SCST_GENERIC_RAID_TIMEOUT, RAID_RETRIES #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 25) ); #else , NULL); #endif TRACE_DBG("TEST_UNIT_READY done: %x", rc); } while ((--retries > 0) && rc);
/*----------------------------------------------------------------------------*/ void * kvs_search(kvs_t *ht, _key_t const key) { #ifdef DBGMSG __PREPARE_DBGLOGGING(); #endif TRACE_DBG("look for %lX from 0x%p..\n", key, ht); struct kvs_entry *walk; kvs_bucket_head *head; head = &ht->kvs_table[key % ht->num_buckets]; TAILQ_FOREACH(walk, head, link) { if (key == walk->key) return walk->value; } return NULL; }
/*----------------------------------------------------------------------------*/ int kvs_insert(kvs_t *ht, _key_t const key, void * const value) { #ifdef DBGMSG __PREPARE_DBGLOGGING(); #endif /* create an entry*/ int idx; assert(value); assert(ht); assert(ht->kvs_count <= ht->num_entries); if (kvs_search(ht, key)) return -1; idx = key % ht->num_buckets; assert(idx >=0 && idx < ht->num_buckets); /* get a container */ struct kvs_entry *ent; if (!(ent = TAILQ_FIRST(&ht->kvs_free))) return -1; TAILQ_REMOVE(&ht->kvs_free, ent, link); /* put the value to the container */ ent->key = key; ent->value = value; /* insert the container */ TAILQ_INSERT_TAIL(&ht->kvs_table[idx], ent, link); ht->kvs_count++; TRACE_DBG("%lX inserted to 0x%p\n", key, ht); return 0; }
/*----------------------------------------------------------------------------*/ void * kvs_remove(kvs_t *ht, _key_t const key) { #ifdef DBGMSG __PREPARE_DBGLOGGING(); #endif struct kvs_entry *walk; kvs_bucket_head *head; head = &ht->kvs_table[key % ht->num_buckets]; TAILQ_FOREACH(walk, head, link) { if (key == walk->key) { TAILQ_REMOVE(head, walk, link); TAILQ_INSERT_TAIL(&ht->kvs_free, walk, link); ht->kvs_count--; TRACE_DBG("%lX removed from 0x%p\n", key, ht); return walk->value; } } return NULL; }
void sigusr1_handler(int signo) { int res, i; TRACE_ENTRY(); TRACE_MGMT_DBG("%s", "Capacity data changed..."); for (i = 0; i < num_devs; i++) { res = ioctl(devs[i].scst_usr_fd, SCST_USER_DEVICE_CAPACITY_CHANGED, NULL); if (res != 0) { res = errno; PRINT_ERROR("Capacity data changed failed: %s", strerror(res)); goto out; } } TRACE_DBG("%s", "Capacity data changed done."); out: TRACE_EXIT(); return; }
/*----------------------------------------------------------------------------*/ int psio_link_devices(struct mtcp_thread_context *ctxt) { struct psio_private_context *ppc; int ret; int i, working; ppc = (struct psio_private_context *)ctxt->io_private_context; working = -1; /* attaching (device, queue) */ for (i = 0 ; i < num_devices_attached ; i++) { struct ps_queue queue; queue.ifindex = devices_attached[i]; if (devices[devices_attached[i]].num_rx_queues <= ctxt->cpu) { continue; } working = 0; queue.ifindex = devices_attached[i]; queue.qidx = ctxt->cpu; #if 0 TRACE_DBG("attaching RX queue xge%d:%d to CPU%d\n", queue.ifindex, queue.qidx, mtcp->ctxt->cpu); #endif ret = ps_attach_rx_device(&ppc->handle, &queue); if (ret != 0) { perror("ps_attach_rx_device"); exit(1); } } return working; }
/*----------------------------------------------------------------------------*/ int netmap_send_pkts(struct mtcp_thread_context *ctxt, int nif) { int pkt_size, idx; struct netmap_private_context *npc; mtcp_manager_t mtcp; npc = (struct netmap_private_context *)ctxt->io_private_context; idx = nif; pkt_size = npc->snd_pkt_size[idx]; mtcp = ctxt->mtcp_manager; /* assert-type statement */ if (pkt_size == 0) return 0; #ifdef NETSTAT mtcp->nstat.tx_packets[nif]++; mtcp->nstat.tx_bytes[nif] += pkt_size + 24; #endif tx_again: if (nm_inject(npc->local_nmd[idx], npc->snd_pktbuf[idx], pkt_size) == 0) { TRACE_DBG("Failed to send pkt of size %d on interface: %d\n", pkt_size, idx); ioctl(npc->local_nmd[idx]->fd, NIOCTXSYNC, NULL); goto tx_again; } #ifdef NETSTAT // mtcp->nstat.rx_errors[idx]++; #endif npc->snd_pkt_size[idx] = 0; return 1; }
static int stpg_event_loop(void) { int res = 0, status; int event_fd; uint8_t event_user_buf[1024*1024]; pid_t c_pid = 0; struct pollfd pl; struct scst_event_user *event_user = (struct scst_event_user *)event_user_buf; struct scst_event e1; bool first_error = true; event_fd = open(SCST_EVENT_DEV, O_RDWR); if (event_fd < 0) { res = -errno; PRINT_ERROR("Unable to open SCST event device %s (%s)", SCST_EVENT_DEV, strerror(-res)); goto out; } close(stpg_init_report_pipe[0]); if (log_daemon) res = write(stpg_init_report_pipe[1], &res, sizeof(res)); close(stpg_init_report_pipe[1]); memset(&pl, 0, sizeof(pl)); pl.fd = event_fd; pl.events = POLLIN; memset(&e1, 0, sizeof(e1)); e1.event_code = SCST_EVENT_STPG_USER_INVOKE; strncpy(e1.issuer_name, SCST_EVENT_SCST_CORE_ISSUER, sizeof(e1.issuer_name)); e1.issuer_name[sizeof(e1.issuer_name)-1] = '\0'; PRINT_INFO("Setting allowed event code %d, issuer_name %s", e1.event_code, e1.issuer_name); res = ioctl(event_fd, SCST_EVENT_ALLOW_EVENT, &e1); if (res != 0) { res = -errno; PRINT_ERROR("SCST_EVENT_ALLOW_EVENT failed: %d (%s)", res, strerror(-res)); goto out; } e1.event_code = SCST_EVENT_TM_FN_RECEIVED; strncpy(e1.issuer_name, SCST_EVENT_SCST_CORE_ISSUER, sizeof(e1.issuer_name)); e1.issuer_name[sizeof(e1.issuer_name)-1] = '\0'; PRINT_INFO("Setting allowed event code %d, issuer_name %s", e1.event_code, e1.issuer_name); res = ioctl(event_fd, SCST_EVENT_ALLOW_EVENT, &e1); if (res != 0) { res = -errno; PRINT_ERROR("SCST_EVENT_ALLOW_EVENT failed: %d (%s)", res, strerror(-res)); goto out; } while (1) { memset(event_user_buf, 0, sizeof(event_user_buf)); event_user->max_event_size = sizeof(event_user_buf); res = ioctl(event_fd, SCST_EVENT_GET_NEXT_EVENT, event_user); if (res != 0) { res = -errno; switch (-res) { case ESRCH: case EBUSY: TRACE_MGMT_DBG("SCST_EVENT_GET_NEXT_EVENT " "returned %d (%s)", res, strerror(res)); /* go through */ case EINTR: continue; case EAGAIN: TRACE_DBG("SCST_EVENT_GET_NEXT_EVENT, " "returned EAGAIN (%d)", -res); continue; default: PRINT_ERROR("SCST_EVENT_GET_NEXT_EVENT " "failed: %d (%s)", res, strerror(-res)); if (!first_error) goto out; first_error = false; continue; } first_error = true; again_poll: res = poll(&pl, 1, c_pid > 0 ? 1 : 0); if (res > 0) continue; else if (res == 0) goto again_poll; else { res = -errno; switch (res) { case ESRCH: case EBUSY: case EAGAIN: TRACE_MGMT_DBG("poll() returned %d " "(%s)", res, strerror(-res)); case EINTR: goto again_poll; default: PRINT_ERROR("poll() failed: %d (%s)", res, strerror(-res)); goto again_poll; } } } first_error = true; #ifdef DEBUG PRINT_INFO("event_code %d, issuer_name %s", event_user->out_event.event_code, event_user->out_event.issuer_name); #endif if (event_user->out_event.payload_len != 0) TRACE_BUFFER("payload", event_user->out_event.payload, event_user->out_event.payload_len); if (event_user->out_event.event_code == SCST_EVENT_STPG_USER_INVOKE) { c_pid = fork(); if (c_pid == -1) PRINT_ERROR("Failed to fork: %d", c_pid); else if (c_pid == 0) { struct scst_event_notify_done d; signal(SIGCHLD, SIG_DFL); status = handle_stpg_received(event_user); memset(&d, 0, sizeof(d)); d.event_id = event_user->out_event.event_id; d.status = status; res = ioctl(event_fd, SCST_EVENT_NOTIFY_DONE, &d); if (res != 0) { res = -errno; PRINT_ERROR("SCST_EVENT_NOTIFY_DONE " "failed: %s (res %d)", strerror(-res), res); } else PRINT_INFO("STPG event completed with status %d", status); exit(res); } } else if (event_user->out_event.event_code == SCST_EVENT_TM_FN_RECEIVED) stpg_handle_tm_received(event_user); else PRINT_ERROR("Unknown event %d received", event_user->out_event.event_code); } out: return res; }
int handle_stpg_received(struct scst_event_user *event_user) { const struct scst_event_stpg_payload *p = (struct scst_event_stpg_payload *)event_user->out_event.payload; int num, k; int res = 0; pid_t pids[p->stpg_descriptors_cnt]; TRACE_DBG("device name %s, stpg_descriptors_cnt %d", p->device_name, p->stpg_descriptors_cnt); for (num = 0; num < p->stpg_descriptors_cnt; num++) { res = invoke_stpg(p->device_name, &p->stpg_descriptors[num], &pids[num]); TRACE_DBG("num %d, res %d, pid %d", num, res, pids[num]); if (res != 0) break; } TRACE_DBG("num %d", num); for (k = 0; k < num; k++) { int status = 0, rc; TRACE_DBG("k %d, pid %d", k, pids[k]); rc = wait_until_finished(pids[k], transition_timeout, &status, k); TRACE_DBG("rc %d, status %d", rc, WEXITSTATUS(status)); if (rc > 0) { if (res == 0) res = WEXITSTATUS(status); continue; } else if (rc < 0) { if (res == 0) res = rc; continue; } PRINT_WARNING("on_stpg %d (pid %d) did not finish on time - " "sending SIGTERM", k, pids[k]); if (res == 0) res = -ETIMEDOUT; rc = killpg(pids[k], SIGTERM); if (rc < 0) PRINT_ERROR("Failed to send SIGTERM to child %d (pid %d): %d/%s", k, pids[k], errno, strerror(errno)); rc = wait_until_finished(pids[k], 1, &status, k); if (rc != 0) continue; while (1) { PRINT_WARNING("on_stpg %d (pid %d) did not finish on time - " "sending SIGKILL", k, pids[k]); rc = killpg(pids[k], SIGKILL); if (rc < 0) { PRINT_ERROR("Failed to send SIGKILL to child %d " "(pid %d): %d/%s", k, pids[k], errno, strerror(errno)); break; } rc = wait_until_finished(pids[k], 1, &status, k); if (rc != 0) break; }; } TRACE_EXIT_RES(res); return res; }
/** * XXX - TODO: This is an ugly function.. I will improve it on 2nd iteration */ socket_map_t AllocateSocket(mctx_t mctx, int socktype) { mtcp_manager_t mtcp = g_mtcp[mctx->cpu]; socket_map_t socket = NULL; switch (socktype) { case MOS_SOCK_MONITOR_STREAM: mtcp->num_msp++; case MOS_SOCK_MONITOR_STREAM_ACTIVE: case MOS_SOCK_MONITOR_RAW: socket = TAILQ_FIRST(&mtcp->free_msmap); if (!socket) goto alloc_error; TAILQ_REMOVE(&mtcp->free_msmap, socket, link); /* if there is not invalidated events, insert the socket to the end */ /* and find another socket in the free smap list */ if (socket->events) { TRACE_INFO("There are still not invalidate events remaining.\n"); TRACE_DBG("There are still not invalidate events remaining.\n"); TAILQ_INSERT_TAIL(&mtcp->free_msmap, socket, link); socket = NULL; goto alloc_error; } break; default: mtcp->num_esp++; socket = TAILQ_FIRST(&mtcp->free_smap); if (!socket) goto alloc_error; TAILQ_REMOVE(&mtcp->free_smap, socket, link); /* if there is not invalidated events, insert the socket to the end */ /* and find another socket in the free smap list */ if (socket->events) { TRACE_INFO("There are still not invalidate events remaining.\n"); TRACE_DBG("There are still not invalidate events remaining.\n"); TAILQ_INSERT_TAIL(&mtcp->free_smap, socket, link); socket = NULL; goto alloc_error; } socket->stream = NULL; /* * reset a few fields (needed for client socket) * addr = INADDR_ANY, port = INPORT_ANY */ memset(&socket->saddr, 0, sizeof(struct sockaddr_in)); memset(&socket->ep_data, 0, sizeof(mtcp_epoll_data_t)); } socket->socktype = socktype; socket->opts = 0; socket->epoll = 0; socket->events = 0; return socket; alloc_error: TRACE_ERROR("The concurrent sockets are at maximum.\n"); return NULL; }
/*----------------------------------------------------------------------------*/ static inline int ValidateSequence(mtcp_manager_t mtcp, tcp_stream *cur_stream, uint32_t cur_ts, struct tcphdr *tcph, uint32_t seq, uint32_t ack_seq, int payloadlen) { /* Protect Against Wrapped Sequence number (PAWS) */ if (!tcph->rst && cur_stream->saw_timestamp) { struct tcp_timestamp ts; if (!ParseTCPTimestamp(cur_stream, &ts, (uint8_t *)tcph + TCP_HEADER_LEN, (tcph->doff << 2) - TCP_HEADER_LEN)) { /* if there is no timestamp */ /* TODO: implement here */ TRACE_DBG("No timestamp found.\n"); return FALSE; } /* RFC1323: if SEG.TSval < TS.Recent, drop and send ack */ if (TCP_SEQ_LT(ts.ts_val, cur_stream->rcvvar->ts_recent)) { /* TODO: ts_recent should be invalidated before timestamp wraparound for long idle flow */ TRACE_DBG("PAWS Detect wrong timestamp. " "seq: %u, ts_val: %u, prev: %u\n", seq, ts.ts_val, cur_stream->rcvvar->ts_recent); EnqueueACK(mtcp, cur_stream, cur_ts, ACK_OPT_NOW); return FALSE; } else { /* valid timestamp */ if (TCP_SEQ_GT(ts.ts_val, cur_stream->rcvvar->ts_recent)) { TRACE_TSTAMP("Timestamp update. cur: %u, prior: %u " "(time diff: %uus)\n", ts.ts_val, cur_stream->rcvvar->ts_recent, TS_TO_USEC(cur_ts - cur_stream->rcvvar->ts_last_ts_upd)); cur_stream->rcvvar->ts_last_ts_upd = cur_ts; } cur_stream->rcvvar->ts_recent = ts.ts_val; cur_stream->rcvvar->ts_lastack_rcvd = ts.ts_ref; } } /* TCP sequence validation */ if (!TCP_SEQ_BETWEEN(seq + payloadlen, cur_stream->rcv_nxt, cur_stream->rcv_nxt + cur_stream->rcvvar->rcv_wnd)) { /* if RST bit is set, ignore the segment */ if (tcph->rst) return FALSE; if (cur_stream->state == TCP_ST_ESTABLISHED) { /* check if it is to get window advertisement */ if (seq + 1 == cur_stream->rcv_nxt) { #if 0 TRACE_DBG("Window update request. (seq: %u, rcv_wnd: %u)\n", seq, cur_stream->rcvvar->rcv_wnd); #endif EnqueueACK(mtcp, cur_stream, cur_ts, ACK_OPT_AGGREGATE); return FALSE; } if (TCP_SEQ_LEQ(seq, cur_stream->rcv_nxt)) { EnqueueACK(mtcp, cur_stream, cur_ts, ACK_OPT_AGGREGATE); } else { EnqueueACK(mtcp, cur_stream, cur_ts, ACK_OPT_NOW); } } else { if (cur_stream->state == TCP_ST_TIME_WAIT) { TRACE_DBG("Stream %d: tw expire update to %u\n", cur_stream->id, cur_stream->rcvvar->ts_tw_expire); AddtoTimewaitList(mtcp, cur_stream, cur_ts); } AddtoControlList(mtcp, cur_stream, cur_ts); } return FALSE; } return TRUE; }
int start(int argc, char **argv) { int res = 0; int fd; int i, rc; void *rc1; static struct scst_user_dev_desc desc; pthread_t thread[MAX_VDEVS][threads]; memset(thread, 0, sizeof(thread)); i = 0; optind -= 2; while (1) { int j; optind += 2; if (optind > (argc-2)) break; devs[i].block_size = block_size; devs[i].block_shift = block_shift; devs[i].alloc_fn = alloc_fn; devs[i].rd_only_flag = rd_only_flag; devs[i].wt_flag = wt_flag; devs[i].nv_cache = nv_cache; devs[i].o_direct_flag = o_direct_flag; devs[i].nullio = nullio; devs[i].non_blocking = non_blocking; #if defined(DEBUG_TM_IGNORE) || defined(DEBUG_TM_IGNORE_ALL) devs[i].debug_tm_ignore = debug_tm_ignore; #endif devs[i].type = TYPE_DISK; devs[i].name = argv[optind]; devs[i].file_name = argv[optind+1]; TRACE_DBG("Opening file %s", devs[i].file_name); fd = open(devs[i].file_name, O_RDONLY|O_LARGEFILE); if (fd < 0) { res = -errno; PRINT_ERROR("Unable to open file %s (%s)", devs[i].file_name, strerror(-res)); continue; } devs[i].file_size = lseek64(fd, 0, SEEK_END); devs[i].nblocks = devs[i].file_size >> devs[i].block_shift; close(fd); PRINT_INFO("%s", " "); PRINT_INFO("Virtual device \"%s\", path \"%s\", size %"PRId64"MB, " "block size %d, nblocks %"PRId64", options:", devs[i].name, devs[i].file_name, (uint64_t)devs[i].file_size/1024/1024, devs[i].block_size, (uint64_t)devs[i].nblocks); snprintf(devs[i].usn, sizeof(devs[i].usn), "%"PRIx64, gen_dev_id_num(&devs[i])); TRACE_DBG("usn %s", devs[i].usn); devs[i].scst_usr_fd = open(DEV_USER_PATH DEV_USER_NAME, O_RDWR | (devs[i].non_blocking ? O_NONBLOCK : 0)); if (devs[i].scst_usr_fd < 0) { res = -errno; PRINT_ERROR("Unable to open SCST device %s (%s)", DEV_USER_PATH DEV_USER_NAME, strerror(-res)); goto out_unreg; } memset(&desc, 0, sizeof(desc)); desc.license_str = (unsigned long)"GPL"; desc.version_str = (unsigned long)DEV_USER_VERSION; strncpy(desc.name, devs[i].name, sizeof(desc.name)-1); desc.name[sizeof(desc.name)-1] = '\0'; if (sgv_shared) { desc.sgv_shared = 1; strncpy(desc.sgv_name, devs[0].name, sizeof(desc.sgv_name)-1); desc.sgv_name[sizeof(desc.sgv_name)-1] = '\0'; } desc.sgv_single_alloc_pages = sgv_single_alloc_pages; desc.sgv_purge_interval = sgv_purge_interval; desc.sgv_disable_clustered_pool = sgv_disable_clustered_pool; desc.type = devs[i].type; desc.block_size = devs[i].block_size; desc.opt.parse_type = parse_type; desc.opt.on_free_cmd_type = on_free_cmd_type; desc.opt.memory_reuse_type = memory_reuse_type; desc.opt.tst = SCST_CONTR_MODE_SEP_TASK_SETS; desc.opt.queue_alg = SCST_CONTR_MODE_QUEUE_ALG_UNRESTRICTED_REORDER; desc.opt.d_sense = SCST_CONTR_MODE_FIXED_SENSE; res = ioctl(devs[i].scst_usr_fd, SCST_USER_REGISTER_DEVICE, &desc); if (res != 0) { res = errno; PRINT_ERROR("Unable to register device: %s", strerror(res)); goto out_unreg; } if ((prealloc_buffers_num > 0) && (prealloc_buffer_size > 0)) { res = prealloc_buffers(&devs[i]); if (res != 0) goto out_unreg; } #if 1 { /* Not needed, added here only as a test */ struct scst_user_opt opt; res = ioctl(devs[i].scst_usr_fd, SCST_USER_GET_OPTIONS, &opt); if (res != 0) { res = errno; PRINT_ERROR("Unable to get options: %s", strerror(res)); goto out_unreg; } opt.parse_type = parse_type; opt.on_free_cmd_type = on_free_cmd_type; opt.memory_reuse_type = memory_reuse_type; res = ioctl(devs[i].scst_usr_fd, SCST_USER_SET_OPTIONS, &opt); if (res != 0) { res = errno; PRINT_ERROR("Unable to set options: %s", strerror(res)); goto out_unreg; } } #endif res = pthread_mutex_init(&devs[i].dev_mutex, NULL); if (res != 0) { res = errno; PRINT_ERROR("pthread_mutex_init() failed: %s", strerror(res)); goto out_unreg; } for (j = 0; j < threads; j++) { rc = pthread_create(&thread[i][j], NULL, main_loop, &devs[i]); if (rc != 0) { res = errno; PRINT_ERROR("pthread_create() failed: %s", strerror(res)); break; } } i++; num_devs++; if (num_devs >= MAX_VDEVS) { PRINT_INFO("Max devices limit %d reached", i); break; } } for (i = 0; i < num_devs; i++) { int j = 0; while (thread[i][j] != 0) { rc = pthread_join(thread[i][j], &rc1); if (rc != 0) { res = errno; PRINT_ERROR("pthread_join() failed: %s", strerror(res)); } else if (rc1 != NULL) { res = (long)rc1; PRINT_INFO("Thread %d exited (dev %s), res %lx", j, devs[i].name, (long)rc1); } else PRINT_INFO("Thread %d exited (dev %s)", j, devs[i].name); j++; } pthread_mutex_destroy(&devs[i].dev_mutex); } out_unreg: alarm(0); for (i = 0; i < num_devs; i++) { if (unreg_before_close) { res = ioctl(devs[i].scst_usr_fd, SCST_USER_UNREGISTER_DEVICE, NULL); if (res != 0) { res = errno; PRINT_ERROR("Unable to unregister device: %s", strerror(res)); /* go through */ } } close(devs[i].scst_usr_fd); } return res; }
/*----------------------------------------------------------------------------*/ static inline int ProcessRST(mtcp_manager_t mtcp, tcp_stream *cur_stream, struct pkt_ctx *pctx) { /* TODO: we need reset validation logic */ /* the sequence number of a RST should be inside window */ /* (in SYN_SENT state, it should ack the previous SYN */ TRACE_DBG("Stream %d: TCP RESET (%s)\n", cur_stream->id, TCPStateToString(cur_stream)); #if DUMP_STREAM DumpStream(mtcp, cur_stream); #endif if (cur_stream->state <= TCP_ST_SYN_SENT) { /* not handled here */ return FALSE; } if (cur_stream->state == TCP_ST_SYN_RCVD) { /* ACK number of last sent ACK packet == rcv_nxt + 1*/ if (pctx->p.seq == 0 || #ifdef BE_RESILIENT_TO_PACKET_DROP pctx->p.seq == cur_stream->rcv_nxt + 1 || #endif pctx->p.ack_seq == cur_stream->rcv_nxt + 1) { cur_stream->state = TCP_ST_CLOSED_RSVD; cur_stream->cb_events |= MOS_ON_TCP_STATE_CHANGE; cur_stream->close_reason = TCP_RESET; cur_stream->actions |= MOS_ACT_DESTROY; } else { RAISE_DEBUG_EVENT(mtcp, cur_stream, "(SYN_RCVD): Ignore invalid RST. " "ack_seq expected: %u, ack_seq rcvd: %u\n", cur_stream->rcv_nxt + 1, pctx->p.ack_seq); } return TRUE; } /* if the application is already closed the connection, just destroy the it */ if (cur_stream->state == TCP_ST_FIN_WAIT_1 || cur_stream->state == TCP_ST_FIN_WAIT_2 || cur_stream->state == TCP_ST_LAST_ACK || cur_stream->state == TCP_ST_CLOSING || cur_stream->state == TCP_ST_TIME_WAIT) { cur_stream->state = TCP_ST_CLOSED_RSVD; cur_stream->close_reason = TCP_ACTIVE_CLOSE; cur_stream->cb_events |= MOS_ON_TCP_STATE_CHANGE; cur_stream->actions |= MOS_ACT_DESTROY; return TRUE; } if (cur_stream->state >= TCP_ST_ESTABLISHED && cur_stream->state <= TCP_ST_CLOSE_WAIT) { /* ESTABLISHED, FIN_WAIT_1, FIN_WAIT_2, CLOSE_WAIT */ /* TODO: flush all the segment queues */ //NotifyConnectionReset(mtcp, cur_stream); } if (!(cur_stream->sndvar->on_closeq || cur_stream->sndvar->on_closeq_int || cur_stream->sndvar->on_resetq || cur_stream->sndvar->on_resetq_int)) { //cur_stream->state = TCP_ST_CLOSED_RSVD; //cur_stream->actions |= MOS_ACT_DESTROY; cur_stream->state = TCP_ST_CLOSED_RSVD; cur_stream->cb_events |= MOS_ON_TCP_STATE_CHANGE; cur_stream->close_reason = TCP_RESET; if (HAS_STREAM_TYPE(cur_stream, MOS_SOCK_STREAM)) RaiseCloseEvent(mtcp, cur_stream); } return TRUE; }
/*----------------------------------------------------------------------------*/ int SetNetEnv(char *dev_name_list, char *port_stat_list) { int eidx = 0; int i, j; int set_all_inf = (strncmp(dev_name_list, ALL_STRING, sizeof(ALL_STRING))==0); TRACE_CONFIG("Loading interface setting\n"); CONFIG.eths = (struct eth_table *) calloc(MAX_DEVICES, sizeof(struct eth_table)); if (!CONFIG.eths) { TRACE_ERROR("Can't allocate space for CONFIG.eths\n"); exit(EXIT_FAILURE); } if (current_iomodule_func == &ps_module_func) { #ifndef DISABLE_PSIO struct ifreq ifr; /* calculate num_devices now! */ num_devices = ps_list_devices(devices); if (num_devices == -1) { perror("ps_list_devices"); exit(EXIT_FAILURE); } /* Create socket */ int sock = socket(AF_INET, SOCK_DGRAM, IPPROTO_IP); if (sock == -1) { TRACE_ERROR("socket"); exit(EXIT_FAILURE); } /* To Do: Parse dev_name_list rather than use strstr */ for (i = 0; i < num_devices; i++) { strcpy(ifr.ifr_name, devices[i].name); /* getting interface information */ if (ioctl(sock, SIOCGIFFLAGS, &ifr) == 0) { if (!set_all_inf && strstr(dev_name_list, ifr.ifr_name) == NULL) continue; /* Setting informations */ eidx = CONFIG.eths_num++; strcpy(CONFIG.eths[eidx].dev_name, ifr.ifr_name); CONFIG.eths[eidx].ifindex = devices[i].ifindex; /* getting address */ if (ioctl(sock, SIOCGIFADDR, &ifr) == 0 ) { struct in_addr sin = ((struct sockaddr_in *)&ifr.ifr_addr)->sin_addr; CONFIG.eths[eidx].ip_addr = *(uint32_t *)&sin; } if (ioctl(sock, SIOCGIFHWADDR, &ifr) == 0 ) { for (j = 0; j < ETH_ALEN; j ++) { CONFIG.eths[eidx].haddr[j] = ifr.ifr_addr.sa_data[j]; } } /* Net MASK */ if (ioctl(sock, SIOCGIFNETMASK, &ifr) == 0) { struct in_addr sin = ((struct sockaddr_in *)&ifr.ifr_addr)->sin_addr; CONFIG.eths[eidx].netmask = *(uint32_t *)&sin; } /* add to attached devices */ for (j = 0; j < num_devices_attached; j++) { if (devices_attached[j] == devices[i].ifindex) { break; } } devices_attached[num_devices_attached] = devices[i].ifindex; num_devices_attached++; } else { perror("SIOCGIFFLAGS"); } } num_queues = GetNumQueues(); if (num_queues <= 0) { TRACE_CONFIG("Failed to find NIC queues!\n"); close(sock); return -1; } if (num_queues > num_cpus) { TRACE_CONFIG("Too many NIC queues available.\n"); close(sock); return -1; } close(sock); #endif /* !PSIO_MODULE */ } else if (current_iomodule_func == &dpdk_module_func) { #ifndef DISABLE_DPDK int cpu = CONFIG.num_cores; mpz_t _cpumask; char cpumaskbuf[32] = ""; char mem_channels[8] = ""; char socket_mem_str[32] = ""; // int i; int ret, socket_mem; static struct ether_addr ports_eth_addr[RTE_MAX_ETHPORTS]; /* STEP 1: first determine CPU mask */ mpz_init(_cpumask); if (!mpz_cmp(_cpumask, CONFIG._cpumask)) { /* get the cpu mask */ for (ret = 0; ret < cpu; ret++) mpz_setbit(_cpumask, ret); gmp_sprintf(cpumaskbuf, "%ZX", _cpumask); } else gmp_sprintf(cpumaskbuf, "%ZX", CONFIG._cpumask); mpz_clear(_cpumask); /* STEP 2: determine memory channels per socket */ /* get the mem channels per socket */ if (CONFIG.num_mem_ch == 0) { TRACE_ERROR("DPDK module requires # of memory channels " "per socket parameter!\n"); exit(EXIT_FAILURE); } sprintf(mem_channels, "%d", CONFIG.num_mem_ch); /* STEP 3: determine socket memory */ /* get socket memory threshold (in MB) */ socket_mem = RTE_ALIGN_CEIL((unsigned long)ceil((CONFIG.num_cores * (CONFIG.rcvbuf_size + CONFIG.sndbuf_size + sizeof(struct tcp_stream) + sizeof(struct tcp_recv_vars) + sizeof(struct tcp_send_vars) + sizeof(struct fragment_ctx)) * CONFIG.max_concurrency)/RTE_SOCKET_MEM_SHIFT), RTE_CACHE_LINE_SIZE); /* initialize the rte env, what a waste of implementation effort! */ int argc = 6;//8; char *argv[RTE_ARGC_MAX] = {"", "-c", cpumaskbuf, "-n", mem_channels, #if 0 "--socket-mem", socket_mem_str, #endif "--proc-type=auto" }; ret = probe_all_rte_devices(argv, &argc, dev_name_list); /* STEP 4: build up socket mem parameter */ sprintf(socket_mem_str, "%d", socket_mem); #if 0 char *smsptr = socket_mem_str + strlen(socket_mem_str); for (i = 1; i < ret + 1; i++) { sprintf(smsptr, ",%d", socket_mem); smsptr += strlen(smsptr); } TRACE_DBG("socket_mem: %s\n", socket_mem_str); #endif /* * re-set getopt extern variable optind. * this issue was a bitch to debug * rte_eal_init() internally uses getopt() syscall * mtcp applications that also use an `external' getopt * will cause a violent crash if optind is not reset to zero * prior to calling the func below... * see man getopt(3) for more details */ optind = 0; #ifdef DEBUG /* print argv's */ for (i = 0; i < argc; i++) TRACE_INFO("argv[%d]: %s\n", i, argv[i]); #endif /* initialize the dpdk eal env */ ret = rte_eal_init(argc, argv); if (ret < 0) { TRACE_ERROR("Invalid EAL args!\n"); exit(EXIT_FAILURE); } /* give me the count of 'detected' ethernet ports */ #if RTE_VERSION < RTE_VERSION_NUM(18, 5, 0, 0) num_devices = rte_eth_dev_count(); #else num_devices = rte_eth_dev_count_avail(); #endif if (num_devices == 0) { TRACE_ERROR("No Ethernet port!\n"); exit(EXIT_FAILURE); } /* get mac addr entries of 'detected' dpdk ports */ for (ret = 0; ret < num_devices; ret++) rte_eth_macaddr_get(ret, &ports_eth_addr[ret]); num_queues = MIN(CONFIG.num_cores, MAX_CPUS); struct ifaddrs *ifap; struct ifaddrs *iter_if; char *seek; if (getifaddrs(&ifap) != 0) { perror("getifaddrs: "); exit(EXIT_FAILURE); } iter_if = ifap; do { if (iter_if->ifa_addr && iter_if->ifa_addr->sa_family == AF_INET && !set_all_inf && (seek=strstr(dev_name_list, iter_if->ifa_name)) != NULL && /* check if the interface was not aliased */ *(seek + strlen(iter_if->ifa_name)) != ':') { struct ifreq ifr; /* Setting informations */ eidx = CONFIG.eths_num++; strcpy(CONFIG.eths[eidx].dev_name, iter_if->ifa_name); strcpy(ifr.ifr_name, iter_if->ifa_name); /* Create socket */ int sock = socket(AF_INET, SOCK_DGRAM, IPPROTO_IP); if (sock == -1) { perror("socket"); exit(EXIT_FAILURE); } /* getting address */ if (ioctl(sock, SIOCGIFADDR, &ifr) == 0 ) { struct in_addr sin = ((struct sockaddr_in *)&ifr.ifr_addr)->sin_addr; CONFIG.eths[eidx].ip_addr = *(uint32_t *)&sin; } if (ioctl(sock, SIOCGIFHWADDR, &ifr) == 0 ) { for (j = 0; j < ETH_ALEN; j ++) { CONFIG.eths[eidx].haddr[j] = ifr.ifr_addr.sa_data[j]; } } /* Net MASK */ if (ioctl(sock, SIOCGIFNETMASK, &ifr) == 0) { struct in_addr sin = ((struct sockaddr_in *)&ifr.ifr_addr)->sin_addr; CONFIG.eths[eidx].netmask = *(uint32_t *)&sin; } close(sock); for (j = 0; j < num_devices; j++) { if (!memcmp(&CONFIG.eths[eidx].haddr[0], &ports_eth_addr[j], ETH_ALEN)) CONFIG.eths[eidx].ifindex = j; } /* add to attached devices */ for (j = 0; j < num_devices_attached; j++) { if (devices_attached[j] == CONFIG.eths[eidx].ifindex) { break; } } devices_attached[num_devices_attached] = CONFIG.eths[eidx].ifindex; num_devices_attached++; fprintf(stderr, "Total number of attached devices: %d\n", num_devices_attached); fprintf(stderr, "Interface name: %s\n", iter_if->ifa_name); } iter_if = iter_if->ifa_next; } while (iter_if != NULL); freeifaddrs(ifap); #if 0 /* * XXX: It seems that there is a bug in the RTE SDK. * The dynamically allocated rte_argv params are left * as dangling pointers. Freeing them causes program * to crash. */ /* free up all resources */ for (; rte_argc >= 9; rte_argc--) { if (rte_argv[rte_argc] != NULL) { fprintf(stderr, "Cleaning up rte_argv[%d]: %s (%p)\n", rte_argc, rte_argv[rte_argc], rte_argv[rte_argc]); free(rte_argv[rte_argc]); rte_argv[rte_argc] = NULL; } } #endif /* check if process is primary or secondary */ CONFIG.multi_process_is_master = (eal_proc_type_detect() == RTE_PROC_PRIMARY) ? 1 : 0; #endif /* !DISABLE_DPDK */ } else if (current_iomodule_func == &netmap_module_func) { #ifndef DISABLE_NETMAP struct ifaddrs *ifap; struct ifaddrs *iter_if; char *seek; num_queues = MIN(CONFIG.num_cores, MAX_CPUS); if (getifaddrs(&ifap) != 0) { perror("getifaddrs: "); exit(EXIT_FAILURE); } iter_if = ifap; do { if (iter_if->ifa_addr && iter_if->ifa_addr->sa_family == AF_INET && !set_all_inf && (seek=strstr(dev_name_list, iter_if->ifa_name)) != NULL && /* check if the interface was not aliased */ *(seek + strlen(iter_if->ifa_name)) != ':') { struct ifreq ifr; /* Setting informations */ eidx = CONFIG.eths_num++; strcpy(CONFIG.eths[eidx].dev_name, iter_if->ifa_name); strcpy(ifr.ifr_name, iter_if->ifa_name); /* Create socket */ int sock = socket(AF_INET, SOCK_DGRAM, IPPROTO_IP); if (sock == -1) { perror("socket"); exit(EXIT_FAILURE); } /* getting address */ if (ioctl(sock, SIOCGIFADDR, &ifr) == 0 ) { struct in_addr sin = ((struct sockaddr_in *)&ifr.ifr_addr)->sin_addr; CONFIG.eths[eidx].ip_addr = *(uint32_t *)&sin; } if (ioctl(sock, SIOCGIFHWADDR, &ifr) == 0 ) { for (j = 0; j < ETH_ALEN; j ++) { CONFIG.eths[eidx].haddr[j] = ifr.ifr_addr.sa_data[j]; } } /* Net MASK */ if (ioctl(sock, SIOCGIFNETMASK, &ifr) == 0) { struct in_addr sin = ((struct sockaddr_in *)&ifr.ifr_addr)->sin_addr; CONFIG.eths[eidx].netmask = *(uint32_t *)&sin; } close(sock); #if 0 for (j = 0; j < num_devices; j++) { if (!memcmp(&CONFIG.eths[eidx].haddr[0], &ports_eth_addr[j], ETH_ALEN)) CONFIG.eths[eidx].ifindex = ifr.ifr_ifindex; #endif CONFIG.eths[eidx].ifindex = eidx; TRACE_INFO("Ifindex of interface %s is: %d\n", ifr.ifr_name, CONFIG.eths[eidx].ifindex); #if 0 } #endif /* add to attached devices */ for (j = 0; j < num_devices_attached; j++) { if (devices_attached[j] == CONFIG.eths[eidx].ifindex) { break; } } devices_attached[num_devices_attached] = if_nametoindex(ifr.ifr_name); num_devices_attached++; fprintf(stderr, "Total number of attached devices: %d\n", num_devices_attached); fprintf(stderr, "Interface name: %s\n", iter_if->ifa_name); } iter_if = iter_if->ifa_next; } while (iter_if != NULL); freeifaddrs(ifap); #endif /* !DISABLE_NETMAP */ } else if (current_iomodule_func == &onvm_module_func) { #ifdef ENABLE_ONVM int cpu = CONFIG.num_cores; mpz_t cpumask; char cpumaskbuf[32]; char mem_channels[8]; char service[6]; char instance[6]; int ret; mpz_init(cpumask); /* get the cpu mask */ for (ret = 0; ret < cpu; ret++) mpz_setbit(cpumask, ret); gmp_sprintf(cpumaskbuf, "%ZX", cpumask); mpz_clear(cpumask); /* get the mem channels per socket */ if (CONFIG.num_mem_ch == 0) { TRACE_ERROR("DPDK module requires # of memory channels " "per socket parameter!\n"); exit(EXIT_FAILURE); } sprintf(mem_channels, "%d", CONFIG.num_mem_ch); sprintf(service, "%d", CONFIG.onvm_serv); sprintf(instance, "%d", CONFIG.onvm_inst); /* initialize the rte env first, what a waste of implementation effort! */ char *argv[] = {"", "-c", cpumaskbuf, "-n", mem_channels, "--proc-type=secondary", "--", "-r", service, instance, "" }; const int argc = 10; /* * re-set getopt extern variable optind. * this issue was a bitch to debug * rte_eal_init() internally uses getopt() syscall * mtcp applications that also use an `external' getopt * will cause a violent crash if optind is not reset to zero * prior to calling the func below... * see man getopt(3) for more details */ optind = 0; /* initialize the dpdk eal env */ ret = onvm_nflib_init(argc, argv, "mtcp_nf", &CONFIG.nf_info); if (ret < 0) { TRACE_ERROR("Invalid EAL args!\n"); exit(EXIT_FAILURE); } /* give me the count of 'detected' ethernet ports */ num_devices = ports->num_ports; if (num_devices == 0) { TRACE_ERROR("No Ethernet port!\n"); exit(EXIT_FAILURE); } num_queues = MIN(CONFIG.num_cores, MAX_CPUS); struct ifaddrs *ifap; struct ifaddrs *iter_if; char *seek; if (getifaddrs(&ifap) != 0) { perror("getifaddrs: "); exit(EXIT_FAILURE); } iter_if = ifap; do { if (iter_if->ifa_addr && iter_if->ifa_addr->sa_family == AF_INET && !set_all_inf && (seek=strstr(dev_name_list, iter_if->ifa_name)) != NULL && /* check if the interface was not aliased */ *(seek + strlen(iter_if->ifa_name)) != ':') { struct ifreq ifr; /* Setting informations */ eidx = CONFIG.eths_num++; strcpy(CONFIG.eths[eidx].dev_name, iter_if->ifa_name); strcpy(ifr.ifr_name, iter_if->ifa_name); /* Create socket */ int sock = socket(AF_INET, SOCK_DGRAM, IPPROTO_IP); if (sock == -1) { perror("socket"); } /* getting address */ if (ioctl(sock, SIOCGIFADDR, &ifr) == 0 ) { struct in_addr sin = ((struct sockaddr_in *)&ifr.ifr_addr)->sin_addr; CONFIG.eths[eidx].ip_addr = *(uint32_t *)&sin; } for (j = 0; j < ETH_ALEN; j ++) { CONFIG.eths[eidx].haddr[j] = ports->mac[eidx].addr_bytes[j]; }; /* Net MASK */ if (ioctl(sock, SIOCGIFNETMASK, &ifr) == 0) { struct in_addr sin = ((struct sockaddr_in *)&ifr.ifr_addr)->sin_addr; CONFIG.eths[eidx].netmask = *(uint32_t *)&sin; } close(sock); CONFIG.eths[eidx].ifindex = ports->id[eidx]; devices_attached[num_devices_attached] = CONFIG.eths[eidx].ifindex; num_devices_attached++; fprintf(stderr, "Total number of attached devices: %d\n", num_devices_attached); fprintf(stderr, "Interface name: %s\n", iter_if->ifa_name); } iter_if = iter_if->ifa_next; } while (iter_if != NULL); freeifaddrs(ifap); #endif /* ENABLE_ONVM */ } CONFIG.nif_to_eidx = (int*)calloc(MAX_DEVICES, sizeof(int)); if (!CONFIG.nif_to_eidx) { exit(EXIT_FAILURE); } for (i = 0; i < MAX_DEVICES; ++i) { CONFIG.nif_to_eidx[i] = -1; } for (i = 0; i < CONFIG.eths_num; ++i) { j = CONFIG.eths[i].ifindex; if (j >= MAX_DEVICES) { TRACE_ERROR("ifindex of eths_%d exceed the limit: %d\n", i, j); exit(EXIT_FAILURE); } /* the physic port index of the i-th port listed in the config file is j*/ CONFIG.nif_to_eidx[j] = i; /* finally set the port stats option `on' */ if (strcmp(CONFIG.eths[i].dev_name, port_stat_list) == 0) CONFIG.eths[i].stat_print = TRUE; } return 0; }
int invoke_stpg(const uint8_t *device_name, const struct scst_event_stpg_descr *descr, pid_t *out_pid) { char *args[7], *env[7]; int res = 0, ret, i; pid_t c_pid; args[0] = stpg_path; args[1] = (char *)device_name; args[2] = (char *)descr->prev_state; args[3] = (char *)descr->new_state; args[4] = (char *)descr->dg_name; args[5] = (char *)descr->tg_name; args[6] = NULL; env[0] = "PATH=/bin:/usr/bin:/sbin:/usr/sbin"; ret = asprintf(&env[1], "SCST_DEVICE_NAME=%s", device_name); if (ret < 0) { res = -errno; PRINT_ERROR("asprintf() failed: %d (%s)", res, strerror(-res)); goto out; } ret = asprintf(&env[2], "SCST_PREV_ALUA_STATE=%s", descr->prev_state); if (ret < 0) { res = -errno; PRINT_ERROR("asprintf() failed: %d (%s)", res, strerror(-res)); goto out; } ret = asprintf(&env[3], "SCST_ALUA_STATE=%s", descr->new_state); if (ret < 0) { res = -errno; PRINT_ERROR("asprintf() failed: %d (%s)", res, strerror(-res)); goto out; } ret = asprintf(&env[4], "SCST_DEVICE_GROUP=%s", descr->dg_name); if (ret < 0) { res = -errno; PRINT_ERROR("asprintf() failed: %d (%s)", res, strerror(-res)); goto out; } ret = asprintf(&env[5], "SCST_TARGET_GROUP=%s", descr->tg_name); if (ret < 0) { res = -errno; PRINT_ERROR("asprintf() failed: %d (%s)", res, strerror(-res)); goto out; } env[6] = NULL; PRINT_INFO("Invoking script %s with parameters: %s %s %s %s %s and environment: " "%s %s %s %s %s", stpg_path, args[1], args[2], args[3], args[4], args[5], env[1], env[2], env[3], env[4], env[5]); c_pid = fork(); if (c_pid == 0) { ret = setpgid(getpid(), getpid()); if (ret < 0) { res = -errno; PRINT_ERROR("setgid failed %d (%s)", ret, strerror(-ret)); } TRACE_DBG("pgid %d (pid %d)", getpgid(getpid()), getpid()); ret = execve(stpg_path, args, env); if (ret < 0) { res = -errno; PRINT_ERROR("EXEC failed %d (%s)", ret, strerror(-ret)); } exit(0); } else if (c_pid < 0) { res = -errno; PRINT_ERROR("fork() failed: %d (%s)", res, strerror(-res)); } *out_pid = c_pid; for (i = 1; i < (signed)ARRAY_SIZE(env); i++) free(env[i]); out: return res; }
static int modisk_attach(struct scst_device *dev) { int res, rc; uint8_t cmd[10]; const int buffer_size = 512; uint8_t *buffer = NULL; int retries; unsigned char sense_buffer[SCSI_SENSE_BUFFERSIZE]; enum dma_data_direction data_dir; TRACE_ENTRY(); if (dev->scsi_dev == NULL || dev->scsi_dev->type != dev->type) { PRINT_ERROR("%s", "SCSI device not define or illegal type"); res = -ENODEV; goto out; } dev->block_shift = MODISK_DEF_BLOCK_SHIFT; dev->block_size = 1 << dev->block_shift; /* * If the device is offline, don't try to read capacity or any * of the other stuff */ if (dev->scsi_dev->sdev_state == SDEV_OFFLINE) { TRACE_DBG("%s", "Device is offline"); res = -ENODEV; goto out; } buffer = kmalloc(buffer_size, GFP_KERNEL); if (!buffer) { PRINT_ERROR("Buffer memory allocation (size %d) failure", buffer_size); res = -ENOMEM; goto out; } /* * Clear any existing UA's and get modisk capacity (modisk block * size). */ memset(cmd, 0, sizeof(cmd)); cmd[0] = READ_CAPACITY; cmd[1] = (dev->scsi_dev->scsi_level <= SCSI_2) ? ((dev->scsi_dev->lun << 5) & 0xe0) : 0; retries = SCST_DEV_RETRIES_ON_UA; while (1) { memset(buffer, 0, buffer_size); memset(sense_buffer, 0, sizeof(sense_buffer)); data_dir = SCST_DATA_READ; TRACE_DBG("%s", "Doing READ_CAPACITY"); rc = scsi_execute(dev->scsi_dev, cmd, data_dir, buffer, buffer_size, sense_buffer, SCST_GENERIC_MODISK_REG_TIMEOUT, 3, 0 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29) , NULL #endif ); TRACE_DBG("READ_CAPACITY done: %x", rc); if (!rc || !scst_analyze_sense(sense_buffer, sizeof(sense_buffer), SCST_SENSE_KEY_VALID, UNIT_ATTENTION, 0, 0)) break; if (!--retries) { PRINT_ERROR("UA not cleared after %d retries", SCST_DEV_RETRIES_ON_UA); res = -ENODEV; goto out_free_buf; } } if (rc == 0) { uint32_t sector_size = get_unaligned_be32(&buffer[4]); if (sector_size == 0) dev->block_shift = MODISK_DEF_BLOCK_SHIFT; else dev->block_shift = scst_calc_block_shift(sector_size); TRACE_DBG("Sector size is %i scsi_level %d(SCSI_2 %d)", sector_size, dev->scsi_dev->scsi_level, SCSI_2); if (dev->block_shift < 9) { PRINT_ERROR("READ CAPACITY reported an invalid sector size: %d", sector_size); res = -EINVAL; goto out_free_buf; } } else { dev->block_shift = MODISK_DEF_BLOCK_SHIFT; TRACE(TRACE_MINOR, "Read capacity failed: %x, using default " "sector size %d", rc, dev->block_shift); PRINT_BUFF_FLAG(TRACE_MINOR, "Returned sense", sense_buffer, sizeof(sense_buffer)); } dev->block_size = 1 << dev->block_shift; res = scst_obtain_device_parameters(dev, NULL); if (res != 0) { PRINT_ERROR("Failed to obtain control parameters for device " "%s: %x", dev->virt_name, res); goto out_free_buf; } out_free_buf: kfree(buffer); out: TRACE_EXIT_RES(res); return res; }