Example #1
0
int32_t rte_service_run_iter_on_app_lcore(uint32_t id,
		uint32_t serialize_mt_unsafe)
{
	/* run service on calling core, using all-ones as the service mask */
	if (!service_valid(id))
		return -EINVAL;

	struct core_state *cs = &lcore_states[rte_lcore_id()];
	struct rte_service_spec_impl *s = &rte_services[id];

	/* Atomically add this core to the mapped cores first, then examine if
	 * we can run the service. This avoids a race condition between
	 * checking the value, and atomically adding to the mapped count.
	 */
	if (serialize_mt_unsafe)
		rte_atomic32_inc(&s->num_mapped_cores);

	if (service_mt_safe(s) == 0 &&
			rte_atomic32_read(&s->num_mapped_cores) > 1) {
		if (serialize_mt_unsafe)
			rte_atomic32_dec(&s->num_mapped_cores);
		return -EBUSY;
	}

	int ret = service_run(id, cs, UINT64_MAX);

	if (serialize_mt_unsafe)
		rte_atomic32_dec(&s->num_mapped_cores);

	return ret;
}
Example #2
0
/**

\brief Create an NVMf fabric connection from the given parameters and schedule it
       on a reactor thread.

\code

# identify reactor where the new connections work item will be scheduled
reactor = nvmf_allocate_reactor()
schedule fabric connection work item on reactor

\endcode

*/
int
spdk_nvmf_startup_conn(struct spdk_nvmf_conn *conn)
{
	int lcore;
	struct spdk_nvmf_conn *admin_conn;
	uint64_t nvmf_session_core = spdk_app_get_core_mask();

	/*
	 * if starting IO connection then determine core
	 * allocated to admin queue to request core mask.
	 * Can not assume nvmf session yet created at time
	 * of fabric connection setup.  Rely on fabric
	 * function to locate matching controller session.
	 */
	if (conn->type == CONN_TYPE_IOQ && conn->cntlid != 0) {
		admin_conn = spdk_find_nvmf_conn_by_cntlid(conn->cntlid);
		if (admin_conn != NULL) {
			SPDK_TRACELOG(SPDK_TRACE_DEBUG, "Located admin conn session core %d\n",
				      admin_conn->poller.lcore);
			nvmf_session_core = 1ULL << admin_conn->poller.lcore;
		}
	}

	lcore = nvmf_allocate_reactor(nvmf_session_core);
	if (lcore < 0) {
		SPDK_ERRLOG("Unable to find core to launch connection.\n");
		goto err0;
	}

	conn->state = CONN_STATE_RUNNING;
	SPDK_NOTICELOG("Launching nvmf connection[qid=%d] on core: %d\n",
		       conn->qid, lcore);
	conn->poller.fn = spdk_nvmf_conn_do_work;
	conn->poller.arg = conn;

	rte_atomic32_inc(&g_num_connections[lcore]);
	spdk_poller_register(&conn->poller, lcore, NULL);

	return 0;
err0:
	free_conn(conn);
	return -1;
}
Example #3
0
static int32_t
service_update(struct rte_service_spec *service, uint32_t lcore,
		uint32_t *set, uint32_t *enabled)
{
	uint32_t i;
	int32_t sid = -1;

	for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
		if ((struct rte_service_spec *)&rte_services[i] == service &&
				service_valid(i)) {
			sid = i;
			break;
		}
	}

	if (sid == -1 || lcore >= RTE_MAX_LCORE)
		return -EINVAL;

	if (!lcore_states[lcore].is_service_core)
		return -EINVAL;

	uint64_t sid_mask = UINT64_C(1) << sid;
	if (set) {
		if (*set) {
			lcore_states[lcore].service_mask |= sid_mask;
			rte_atomic32_inc(&rte_services[sid].num_mapped_cores);
		} else {
			lcore_states[lcore].service_mask &= ~(sid_mask);
			rte_atomic32_dec(&rte_services[sid].num_mapped_cores);
		}
	}

	if (enabled)
		*enabled = !!(lcore_states[lcore].service_mask & (sid_mask));

	rte_smp_wmb();

	return 0;
}
Example #4
0
/* Dataplane thread stop. */
lagopus_result_t
dataplane_stop(void) {
  lagopus_result_t rv, rv2;

#ifdef HAVE_DPDK
  rte_atomic32_inc(&dpdk_stop);
#endif /* HAVE_DPDK */

  rv = dp_thread_stop(&timer_thread, &timer_run);
  rv2 = dp_thread_stop(&sock_thread, &sock_run);
  if (rv == LAGOPUS_RESULT_OK) {
    rv = rv2;
  }
#ifdef HAVE_DPDK
  if (rawsocket_only_mode != true) {
    rv2 = dp_thread_stop(&dpdk_thread, &dpdk_run);
    if (rv == LAGOPUS_RESULT_OK) {
      rv = rv2;
    }
  }
#endif /* HAVE_DPDK */
  return rv;
}
Example #5
0
void
kni_stop_loop(void)
{
	rte_atomic32_inc(&kni_stop);
}
Example #6
0
// reconnect to server end perictly
void *reconnect_thread(void *arg) {
	int i;
	pthread_detach(pthread_self());
	rte_atomic32_inc(&thread_num);
	char ip[INET_ADDRSTRLEN] = {0};
	struct timespec req = {20, 0};
	struct in_addr addr4 = {0};
	while(rte_atomic32_read(&keep_running) && client_num > 0) {
		for(i=0; i<client_num; ++i) {
			slot_t *slot = &svr_hash.slots[i];
			svr_t *svr = (svr_t*)slot->data;
			pthread_spin_lock(&slot->lock);
			if(svr != NULL && svr->connected == 0) {
				// get server ip string from uint32_t
				addr4.s_addr = svr->ip;
				inet_ntop(AF_INET, &addr4, ip, INET_ADDRSTRLEN);
				// create socket
				int fd = socket(AF_INET, SOCK_STREAM, 0);
				if(fd < 0) {
#ifdef DEBUG_STDOUT
					printf("Failed to create socket for %s:%d, %s, %s, %d\n", ip, svr->port, __FUNCTION__, __FILE__, __LINE__);
#else
#endif
					pthread_spin_unlock(&slot->lock);
					continue;
				}
				if(fd >= DESCRIPTOR_MAX) {
#ifdef DEBUG_STDOUT
					printf("Too many connections %d/%d, %s, %s, %d\n", fd, DESCRIPTOR_MAX, __FUNCTION__, __FILE__, __LINE__);
#else
#endif
					close(fd);
					pthread_spin_unlock(&slot->lock);
					exit(EXIT_FAILURE);
				}
				// connect to server
				struct sockaddr_in addr;
				memset(&addr, 0, sizeof addr);
				addr.sin_family = AF_INET;
				addr.sin_port = htons(svr->port);
				addr.sin_addr.s_addr = svr->ip;
				if(connect(fd, (struct sockaddr*)&addr, sizeof addr) < 0) {
					if(errno != EINPROGRESS) {
#ifdef DEBUG_STDOUT
						printf("Failed to connect to %s:%d, %s, %s, %d\n", ip, svr->port, __FUNCTION__, __FILE__, __LINE__);
#endif
						close(fd);
						pthread_spin_unlock(&slot->lock);
						continue;
					}
				}
				svr->connected = 1;
				// add to fd manager
				sockinfo[fd].fd = fd;
				sockinfo[fd].ip = svr->ip;
				sockinfo[fd].type = TYPE_SERVER;
			}
			pthread_spin_unlock(&slot->lock);
		}
		nanosleep(&req, NULL);
	}
	rte_atomic32_dec(&thread_num);
	return NULL;
}
Example #7
0
static void
test_multi_cb(void *arg)
{
	rte_atomic32_inc(&cb_count);
	printf("In %s - arg = %p\n", __func__, arg);
}