int32_t rte_service_run_iter_on_app_lcore(uint32_t id, uint32_t serialize_mt_unsafe) { /* run service on calling core, using all-ones as the service mask */ if (!service_valid(id)) return -EINVAL; struct core_state *cs = &lcore_states[rte_lcore_id()]; struct rte_service_spec_impl *s = &rte_services[id]; /* Atomically add this core to the mapped cores first, then examine if * we can run the service. This avoids a race condition between * checking the value, and atomically adding to the mapped count. */ if (serialize_mt_unsafe) rte_atomic32_inc(&s->num_mapped_cores); if (service_mt_safe(s) == 0 && rte_atomic32_read(&s->num_mapped_cores) > 1) { if (serialize_mt_unsafe) rte_atomic32_dec(&s->num_mapped_cores); return -EBUSY; } int ret = service_run(id, cs, UINT64_MAX); if (serialize_mt_unsafe) rte_atomic32_dec(&s->num_mapped_cores); return ret; }
static void spdk_nvmf_conn_destruct(struct spdk_nvmf_conn *conn) { struct spdk_event *event; SPDK_TRACELOG(SPDK_TRACE_DEBUG, "conn %p\n", conn); conn->state = CONN_STATE_INVALID; event = spdk_event_allocate(rte_lcore_id(), _conn_destruct, conn, NULL, NULL); spdk_poller_unregister(&conn->poller, event); rte_atomic32_dec(&g_num_connections[rte_lcore_id()]); }
static int32_t service_update(struct rte_service_spec *service, uint32_t lcore, uint32_t *set, uint32_t *enabled) { uint32_t i; int32_t sid = -1; for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) { if ((struct rte_service_spec *)&rte_services[i] == service && service_valid(i)) { sid = i; break; } } if (sid == -1 || lcore >= RTE_MAX_LCORE) return -EINVAL; if (!lcore_states[lcore].is_service_core) return -EINVAL; uint64_t sid_mask = UINT64_C(1) << sid; if (set) { if (*set) { lcore_states[lcore].service_mask |= sid_mask; rte_atomic32_inc(&rte_services[sid].num_mapped_cores); } else { lcore_states[lcore].service_mask &= ~(sid_mask); rte_atomic32_dec(&rte_services[sid].num_mapped_cores); } } if (enabled) *enabled = !!(lcore_states[lcore].service_mask & (sid_mask)); rte_smp_wmb(); return 0; }
// reconnect to server end perictly void *reconnect_thread(void *arg) { int i; pthread_detach(pthread_self()); rte_atomic32_inc(&thread_num); char ip[INET_ADDRSTRLEN] = {0}; struct timespec req = {20, 0}; struct in_addr addr4 = {0}; while(rte_atomic32_read(&keep_running) && client_num > 0) { for(i=0; i<client_num; ++i) { slot_t *slot = &svr_hash.slots[i]; svr_t *svr = (svr_t*)slot->data; pthread_spin_lock(&slot->lock); if(svr != NULL && svr->connected == 0) { // get server ip string from uint32_t addr4.s_addr = svr->ip; inet_ntop(AF_INET, &addr4, ip, INET_ADDRSTRLEN); // create socket int fd = socket(AF_INET, SOCK_STREAM, 0); if(fd < 0) { #ifdef DEBUG_STDOUT printf("Failed to create socket for %s:%d, %s, %s, %d\n", ip, svr->port, __FUNCTION__, __FILE__, __LINE__); #else #endif pthread_spin_unlock(&slot->lock); continue; } if(fd >= DESCRIPTOR_MAX) { #ifdef DEBUG_STDOUT printf("Too many connections %d/%d, %s, %s, %d\n", fd, DESCRIPTOR_MAX, __FUNCTION__, __FILE__, __LINE__); #else #endif close(fd); pthread_spin_unlock(&slot->lock); exit(EXIT_FAILURE); } // connect to server struct sockaddr_in addr; memset(&addr, 0, sizeof addr); addr.sin_family = AF_INET; addr.sin_port = htons(svr->port); addr.sin_addr.s_addr = svr->ip; if(connect(fd, (struct sockaddr*)&addr, sizeof addr) < 0) { if(errno != EINPROGRESS) { #ifdef DEBUG_STDOUT printf("Failed to connect to %s:%d, %s, %s, %d\n", ip, svr->port, __FUNCTION__, __FILE__, __LINE__); #endif close(fd); pthread_spin_unlock(&slot->lock); continue; } } svr->connected = 1; // add to fd manager sockinfo[fd].fd = fd; sockinfo[fd].ip = svr->ip; sockinfo[fd].type = TYPE_SERVER; } pthread_spin_unlock(&slot->lock); } nanosleep(&req, NULL); } rte_atomic32_dec(&thread_num); return NULL; }