예제 #1
0
파일: reset.c 프로젝트: ceph/spdk
static int
work_fn(void *arg)
{
	uint64_t tsc_end = rte_get_timer_cycles() + g_time_in_sec * g_tsc_rate;
	struct worker_thread *worker = (struct worker_thread *)arg;
	struct ns_worker_ctx *ns_ctx = NULL;

	printf("Starting thread on core %u\n", worker->lcore);

	if (spdk_nvme_register_io_thread() != 0) {
		fprintf(stderr, "spdk_nvme_register_io_thread() failed on core %u\n", worker->lcore);
		return -1;
	}

	/* Submit initial I/O for each namespace. */
	ns_ctx = worker->ns_ctx;
	while (ns_ctx != NULL) {
		submit_io(ns_ctx, g_queue_depth);
		ns_ctx = ns_ctx->next;
	}

	while (1) {
		/*
		 * Check for completed I/O for each controller. A new
		 * I/O will be submitted in the io_complete callback
		 * to replace each I/O that is completed.
		 */
		ns_ctx = worker->ns_ctx;
		while (ns_ctx != NULL) {
			check_io(ns_ctx);
			ns_ctx = ns_ctx->next;
		}

		if (((tsc_end - rte_get_timer_cycles()) / g_tsc_rate) > (uint64_t)g_time_in_sec / 5 &&
		    ((tsc_end - rte_get_timer_cycles()) / g_tsc_rate) < (uint64_t)(g_time_in_sec / 5 + 10)) {
			ns_ctx = worker->ns_ctx;
			while (ns_ctx != NULL) {
				if (spdk_nvme_ctrlr_reset(ns_ctx->ctr_entry->ctrlr) < 0) {
					fprintf(stderr, "nvme reset failed.\n");
					return -1;
				}
				ns_ctx = ns_ctx->next;
			}
		}

		if (rte_get_timer_cycles() > tsc_end) {
			break;
		}
	}

	ns_ctx = worker->ns_ctx;
	while (ns_ctx != NULL) {
		drain_io(ns_ctx);
		ns_ctx = ns_ctx->next;
	}

	spdk_nvme_unregister_io_thread();

	return 0;
}
void
rte_delay_us_block(unsigned int us)
{
	const uint64_t start = rte_get_timer_cycles();
	const uint64_t ticks = (uint64_t)us * rte_get_timer_hz() / 1E6;
	while ((rte_get_timer_cycles() - start) < ticks)
		rte_pause();
}
예제 #3
0
파일: perf.c 프로젝트: gongchuang/spdk
static int
work_fn(void *arg)
{
	uint64_t tsc_end;
	struct worker_thread *worker = (struct worker_thread *)arg;
	struct ns_worker_ctx *ns_ctx = NULL;

	printf("Starting thread on core %u\n", worker->lcore);

	/* Allocate a queue pair for each namespace. */
	ns_ctx = worker->ns_ctx;
	while (ns_ctx != NULL) {
		if (init_ns_worker_ctx(ns_ctx) != 0) {
			printf("ERROR: init_ns_worker_ctx() failed\n");
			return 1;
		}
		ns_ctx = ns_ctx->next;
	}

	tsc_end = rte_get_timer_cycles() + g_time_in_sec * g_tsc_rate;

	/* Submit initial I/O for each namespace. */
	ns_ctx = worker->ns_ctx;
	while (ns_ctx != NULL) {
		submit_io(ns_ctx, g_queue_depth);
		ns_ctx = ns_ctx->next;
	}

	while (1) {
		/*
		 * Check for completed I/O for each controller. A new
		 * I/O will be submitted in the io_complete callback
		 * to replace each I/O that is completed.
		 */
		ns_ctx = worker->ns_ctx;
		while (ns_ctx != NULL) {
			check_io(ns_ctx);
			ns_ctx = ns_ctx->next;
		}

		if (rte_get_timer_cycles() > tsc_end) {
			break;
		}
	}

	ns_ctx = worker->ns_ctx;
	while (ns_ctx != NULL) {
		drain_io(ns_ctx);
		cleanup_ns_worker_ctx(ns_ctx);
		ns_ctx = ns_ctx->next;
	}

	return 0;
}
예제 #4
0
파일: perf.c 프로젝트: gongchuang/spdk
static void
task_complete(struct perf_task *task)
{
	struct ns_worker_ctx	*ns_ctx;
	uint64_t		tsc_diff;

	ns_ctx = task->ns_ctx;
	ns_ctx->current_queue_depth--;
	ns_ctx->io_completed++;
	tsc_diff = rte_get_timer_cycles() - task->submit_tsc;
	ns_ctx->total_tsc += tsc_diff;
	if (ns_ctx->min_tsc > tsc_diff) {
		ns_ctx->min_tsc = tsc_diff;
	}
	if (ns_ctx->max_tsc < tsc_diff) {
		ns_ctx->max_tsc = tsc_diff;
	}

	rte_mempool_put(task_pool, task);

	/*
	 * is_draining indicates when time has expired for the test run
	 * and we are just waiting for the previously submitted I/O
	 * to complete.  In this case, do not submit a new I/O to replace
	 * the one just completed.
	 */
	if (!ns_ctx->is_draining) {
		submit_single_io(ns_ctx);
	}
}
예제 #5
0
파일: perf.c 프로젝트: gongchuang/spdk
static void
submit_single_io(struct ns_worker_ctx *ns_ctx)
{
	struct perf_task	*task = NULL;
	uint64_t		offset_in_ios;
	int			rc;
	struct ns_entry		*entry = ns_ctx->entry;

	if (rte_mempool_get(task_pool, (void **)&task) != 0) {
		fprintf(stderr, "task_pool rte_mempool_get failed\n");
		exit(1);
	}

	task->ns_ctx = ns_ctx;

	if (g_is_random) {
		offset_in_ios = rand_r(&seed) % entry->size_in_ios;
	} else {
		offset_in_ios = ns_ctx->offset_in_ios++;
		if (ns_ctx->offset_in_ios == entry->size_in_ios) {
			ns_ctx->offset_in_ios = 0;
		}
	}

	task->submit_tsc = rte_get_timer_cycles();

	if ((g_rw_percentage == 100) ||
	    (g_rw_percentage != 0 && ((rand_r(&seed) % 100) < g_rw_percentage))) {
#if HAVE_LIBAIO
		if (entry->type == ENTRY_TYPE_AIO_FILE) {
			rc = aio_submit(ns_ctx->u.aio.ctx, &task->iocb, entry->u.aio.fd, IO_CMD_PREAD, task->buf,
					g_io_size_bytes, offset_in_ios * g_io_size_bytes, task);
		} else
#endif
		{
			rc = spdk_nvme_ns_cmd_read(entry->u.nvme.ns, ns_ctx->u.nvme.qpair, task->buf,
						   offset_in_ios * entry->io_size_blocks,
						   entry->io_size_blocks, io_complete, task, 0);
		}
	} else {
#if HAVE_LIBAIO
		if (entry->type == ENTRY_TYPE_AIO_FILE) {
			rc = aio_submit(ns_ctx->u.aio.ctx, &task->iocb, entry->u.aio.fd, IO_CMD_PWRITE, task->buf,
					g_io_size_bytes, offset_in_ios * g_io_size_bytes, task);
		} else
#endif
		{
			rc = spdk_nvme_ns_cmd_write(entry->u.nvme.ns, ns_ctx->u.nvme.qpair, task->buf,
						    offset_in_ios * entry->io_size_blocks,
						    entry->io_size_blocks, io_complete, task, 0);
		}
	}

	if (rc != 0) {
		fprintf(stderr, "starting I/O failed\n");
	}

	ns_ctx->current_queue_depth++;
}
예제 #6
0
static int __axgbe_phy_config_aneg(struct axgbe_port *pdata)
{
	int ret;

	axgbe_set_bit(AXGBE_LINK_INIT, &pdata->dev_state);
	pdata->link_check = rte_get_timer_cycles();

	ret = pdata->phy_if.phy_impl.an_config(pdata);
	if (ret)
		return ret;

	if (pdata->phy.autoneg != AUTONEG_ENABLE) {
		ret = axgbe_phy_config_fixed(pdata);
		if (ret || !pdata->kr_redrv)
			return ret;
	}

	/* Disable auto-negotiation interrupt */
	rte_intr_disable(&pdata->pci_dev->intr_handle);

	/* Start auto-negotiation in a supported mode */
	if (axgbe_use_mode(pdata, AXGBE_MODE_KR)) {
		axgbe_set_mode(pdata, AXGBE_MODE_KR);
	} else if (axgbe_use_mode(pdata, AXGBE_MODE_KX_2500)) {
		axgbe_set_mode(pdata, AXGBE_MODE_KX_2500);
	} else if (axgbe_use_mode(pdata, AXGBE_MODE_KX_1000)) {
		axgbe_set_mode(pdata, AXGBE_MODE_KX_1000);
	} else if (axgbe_use_mode(pdata, AXGBE_MODE_SFI)) {
		axgbe_set_mode(pdata, AXGBE_MODE_SFI);
	} else if (axgbe_use_mode(pdata, AXGBE_MODE_X)) {
		axgbe_set_mode(pdata, AXGBE_MODE_X);
	} else if (axgbe_use_mode(pdata, AXGBE_MODE_SGMII_1000)) {
		axgbe_set_mode(pdata, AXGBE_MODE_SGMII_1000);
	} else if (axgbe_use_mode(pdata, AXGBE_MODE_SGMII_100)) {
		axgbe_set_mode(pdata, AXGBE_MODE_SGMII_100);
	} else {
		rte_intr_enable(&pdata->pci_dev->intr_handle);
		return -EINVAL;
	}

	/* Disable and stop any in progress auto-negotiation */
	axgbe_an_disable_all(pdata);

	/* Clear any auto-negotitation interrupts */
	axgbe_an_clear_interrupts_all(pdata);

	pdata->an_result = AXGBE_AN_READY;
	pdata->an_state = AXGBE_AN_READY;
	pdata->kr_state = AXGBE_RX_BPA;
	pdata->kx_state = AXGBE_RX_BPA;

	/* Re-enable auto-negotiation interrupt */
	rte_intr_enable(&pdata->pci_dev->intr_handle);

	axgbe_an_init(pdata);
	axgbe_an_restart(pdata);

	return 0;
}
예제 #7
0
파일: perf.c 프로젝트: jupiturliu/spdk
static int
work_fn(void *arg)
{
	uint64_t tsc_end = rte_get_timer_cycles() + g_time_in_sec * g_tsc_rate;
	struct worker_thread *worker = (struct worker_thread *)arg;
	struct ns_entry *entry = NULL;

	printf("Starting thread on core %u\n", worker->lcore);

	nvme_register_io_thread();

	/* Submit initial I/O for each namespace. */
	entry = worker->namespaces;
	while (entry != NULL) {

		submit_io(entry, g_queue_depth);
		entry = entry->next;
	}

	while (1) {
		/*
		 * Check for completed I/O for each controller. A new
		 * I/O will be submitted in the io_complete callback
		 * to replace each I/O that is completed.
		 */
		entry = worker->namespaces;
		while (entry != NULL) {
			check_io(entry);
			entry = entry->next;
		}

		if (rte_get_timer_cycles() > tsc_end) {
			break;
		}
	}

	entry = worker->namespaces;
	while (entry != NULL) {
		drain_io(entry);
		entry = entry->next;
	}

	nvme_unregister_io_thread();

	return 0;
}
예제 #8
0
static inline int
perf_producer(void *arg)
{
	struct prod_data *p  = arg;
	struct test_perf *t = p->t;
	struct evt_options *opt = t->opt;
	const uint8_t dev_id = p->dev_id;
	const uint8_t port = p->port_id;
	struct rte_mempool *pool = t->pool;
	const uint64_t nb_pkts = t->nb_pkts;
	const uint32_t nb_flows = t->nb_flows;
	uint32_t flow_counter = 0;
	uint64_t count = 0;
	struct perf_elt *m;
	struct rte_event ev;

	if (opt->verbose_level > 1)
		printf("%s(): lcore %d dev_id %d port=%d queue %d\n", __func__,
				rte_lcore_id(), dev_id, port, p->queue_id);

	ev.event = 0;
	ev.op = RTE_EVENT_OP_NEW;
	ev.queue_id = p->queue_id;
	ev.sched_type = t->opt->sched_type_list[0];
	ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
	ev.event_type =  RTE_EVENT_TYPE_CPU;
	ev.sub_event_type = 0; /* stage 0 */

	while (count < nb_pkts && t->done == false) {
		if (rte_mempool_get(pool, (void **)&m) < 0)
			continue;

		ev.flow_id = flow_counter++ % nb_flows;
		ev.event_ptr = m;
		m->timestamp = rte_get_timer_cycles();
		while (rte_event_enqueue_burst(dev_id, port, &ev, 1) != 1) {
			if (t->done)
				break;
			rte_pause();
			m->timestamp = rte_get_timer_cycles();
		}
		count++;
	}

	return 0;
}
예제 #9
0
파일: test_perf_atq.c 프로젝트: btw616/dpdk
static inline __attribute__((always_inline)) void
atq_mark_fwd_latency(struct rte_event *const ev)
{
	if (unlikely(ev->sub_event_type == 0)) {
		struct perf_elt *const m = ev->event_ptr;

		m->timestamp = rte_get_timer_cycles();
	}
}
예제 #10
0
static void finish_send(struct iperf_state *is, struct tcp_pcb *tpcb)
{
    tcp_sent(tpcb, NULL);
    is->send_end_ticks = rte_get_timer_cycles();

    print_result("tx", is->send_start_ticks, is->send_end_ticks,
                 is->sent_bytes);

    disconnect(is, tpcb);

}
예제 #11
0
static void axgbe_check_link_timeout(struct axgbe_port *pdata)
{
	unsigned long link_timeout;
	unsigned long ticks;

	link_timeout = pdata->link_check + (AXGBE_LINK_TIMEOUT *
					    2 *  rte_get_timer_hz());
	ticks = rte_get_timer_cycles();
	if (time_after(ticks, link_timeout))
		axgbe_phy_config_aneg(pdata);
}
예제 #12
0
static void reverse_connect(struct iperf_state *is, struct ip_addr *remote_ip)
{
    is->client_pcb = tcp_new();
    if (is->client_pcb != NULL) {
        tcp_arg(is->client_pcb, is);
        tcp_err(is->client_pcb, connect_error);
        tcp_connect(is->client_pcb, remote_ip, IPERF_SERVER_PORT,
                    connected);
    }

    is->send_start_ticks = rte_get_timer_cycles();
}
예제 #13
0
static enum axgbe_an axgbe_an73_page_received(struct axgbe_port *pdata)
{
	enum axgbe_rx *state;
	unsigned long an_timeout;
	enum axgbe_an ret;
	unsigned long ticks;

	if (!pdata->an_start) {
		pdata->an_start = rte_get_timer_cycles();
	} else {
		an_timeout = pdata->an_start +
			msecs_to_timer_cycles(AXGBE_AN_MS_TIMEOUT);
		ticks = rte_get_timer_cycles();
		if (time_after(ticks, an_timeout)) {
			/* Auto-negotiation timed out, reset state */
			pdata->kr_state = AXGBE_RX_BPA;
			pdata->kx_state = AXGBE_RX_BPA;

			pdata->an_start = rte_get_timer_cycles();
		}
	}

	state = axgbe_in_kr_mode(pdata) ? &pdata->kr_state
		: &pdata->kx_state;

	switch (*state) {
	case AXGBE_RX_BPA:
		ret = axgbe_an73_rx_bpa(pdata, state);
		break;
	case AXGBE_RX_XNP:
		ret = axgbe_an73_rx_xnp(pdata, state);
		break;
	default:
		ret = AXGBE_AN_ERROR;
	}

	return ret;
}
예제 #14
0
static void parse_header(struct iperf_state *is, struct tcp_pcb *tpcb, void *data)
{
    struct client_hdr *chdr = (struct client_hdr *) data;
    is->flags = ntohl(chdr->flags);
    is->amount = ntohl(chdr->mAmount);

    is->amount *= ticks_sec;
    is->amount /= 100;

    is->recv_start_ticks = rte_get_timer_cycles();
    is->valid_hdr = 1;

    if (is->flags & RUN_NOW)
        reverse_connect(is, &tpcb->remote_ip);
}
예제 #15
0
static err_t recv(void *arg, struct tcp_pcb *tpcb, struct pbuf *p, err_t err)
{
    struct iperf_state *is = (struct iperf_state *) arg;

    if (p == NULL) {
        is->recv_end_ticks = rte_get_timer_cycles();
        return disconnect(is, tpcb);
    }

    if (!is->valid_hdr)
        parse_header(is, tpcb, p->payload);

    is->recv_bytes += p->tot_len;
    tcp_recved(tpcb, p->tot_len);
    pbuf_free(p);

    return ERR_OK;
}
예제 #16
0
static err_t sent(void *arg, struct tcp_pcb *tpcb, u16_t len)
{
    struct iperf_state *is = (struct iperf_state *) arg;

    is->sent_bytes += len;

    if (is->amount > 0 && is->sent_bytes > is->amount) {
        finish_send(is, tpcb);
        return ERR_OK;
    } else if (is->amount < 0 && (rte_get_timer_cycles()-is->send_start_ticks) > -is->amount) {
        finish_send(is, tpcb);
        return ERR_OK;
    }

    int space;
    while ((space = tcp_sndbuf(tpcb)) >= sizeof(send_data)) {
        tcp_write(tpcb, send_data, sizeof(send_data), 0);
    }

    return ERR_OK;
}
예제 #17
0
파일: test_ring.c 프로젝트: YBorn/OVDK-QoS
#define	TEST_RING_FULL_EMTPY_ITER	8

static int
check_live_watermark_change(__attribute__((unused)) void *dummy)
{
	uint64_t hz = rte_get_timer_hz();
	void *obj_table[MAX_BULK];
	unsigned watermark, watermark_old = 16;
	uint64_t cur_time, end_time;
	int64_t diff = 0;
	int i, ret;
	unsigned count = 4;

	/* init the object table */
	memset(obj_table, 0, sizeof(obj_table));
	end_time = rte_get_timer_cycles() + (hz * 2);

	/* check that bulk and watermark are 4 and 32 (respectively) */
	while (diff >= 0) {

		/* add in ring until we reach watermark */
		ret = 0;
		for (i = 0; i < 16; i ++) {
			if (ret != 0)
				break;
			ret = rte_ring_enqueue_bulk(r, obj_table, count);
		}

		if (ret != -EDQUOT) {
			printf("Cannot enqueue objects, or watermark not "
			       "reached (ret=%d)\n", ret);
예제 #18
0
파일: env.c 프로젝트: spdk/spdk
uint64_t spdk_get_ticks(void)
{
	return rte_get_timer_cycles();
}
예제 #19
0
int
perf_launch_lcores(struct evt_test *test, struct evt_options *opt,
		int (*worker)(void *))
{
	int ret, lcore_id;
	struct test_perf *t = evt_test_priv(test);

	int port_idx = 0;
	/* launch workers */
	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
		if (!(opt->wlcores[lcore_id]))
			continue;

		ret = rte_eal_remote_launch(worker,
				 &t->worker[port_idx], lcore_id);
		if (ret) {
			evt_err("failed to launch worker %d", lcore_id);
			return ret;
		}
		port_idx++;
	}

	/* launch producers */
	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
		if (!(opt->plcores[lcore_id]))
			continue;

		ret = rte_eal_remote_launch(perf_producer_wrapper,
				&t->prod[port_idx], lcore_id);
		if (ret) {
			evt_err("failed to launch perf_producer %d", lcore_id);
			return ret;
		}
		port_idx++;
	}

	const uint64_t total_pkts = opt->nb_pkts *
			evt_nr_active_lcores(opt->plcores);

	uint64_t dead_lock_cycles = rte_get_timer_cycles();
	int64_t dead_lock_remaining  =  total_pkts;
	const uint64_t dead_lock_sample = rte_get_timer_hz() * 5;

	uint64_t perf_cycles = rte_get_timer_cycles();
	int64_t perf_remaining  = total_pkts;
	const uint64_t perf_sample = rte_get_timer_hz();

	static float total_mpps;
	static uint64_t samples;

	const uint64_t freq_mhz = rte_get_timer_hz() / 1000000;
	int64_t remaining = t->outstand_pkts - processed_pkts(t);

	while (t->done == false) {
		const uint64_t new_cycles = rte_get_timer_cycles();

		if ((new_cycles - perf_cycles) > perf_sample) {
			const uint64_t latency = total_latency(t);
			const uint64_t pkts = processed_pkts(t);

			remaining = t->outstand_pkts - pkts;
			float mpps = (float)(perf_remaining-remaining)/1000000;

			perf_remaining = remaining;
			perf_cycles = new_cycles;
			total_mpps += mpps;
			++samples;
			if (opt->fwd_latency && pkts > 0) {
				printf(CLGRN"\r%.3f mpps avg %.3f mpps [avg fwd latency %.3f us] "CLNRM,
					mpps, total_mpps/samples,
					(float)(latency/pkts)/freq_mhz);
			} else {
				printf(CLGRN"\r%.3f mpps avg %.3f mpps"CLNRM,
					mpps, total_mpps/samples);
			}
			fflush(stdout);

			if (remaining <= 0) {
				t->result = EVT_TEST_SUCCESS;
				if (opt->prod_type == EVT_PROD_TYPE_SYNT) {
					t->done = true;
					rte_smp_wmb();
					break;
				}
			}
		}

		if (new_cycles - dead_lock_cycles > dead_lock_sample &&
				opt->prod_type == EVT_PROD_TYPE_SYNT) {
			remaining = t->outstand_pkts - processed_pkts(t);
			if (dead_lock_remaining == remaining) {
				rte_event_dev_dump(opt->dev_id, stdout);
				evt_err("No schedules for seconds, deadlock");
				t->done = true;
				rte_smp_wmb();
				break;
			}
			dead_lock_remaining = remaining;
			dead_lock_cycles = new_cycles;
		}
	}
	printf("\n");
	return 0;
}
예제 #20
0
// Master's main process.
// Each time we take care of the pending 
// queue first, then we focus on the new cmd
static void master_fn(void){

	nvme_register_io_thread();

	// Init pending task
	master_pending.head = NULL;
	master_pending.tail = NULL;
	master_pending.cnt = 0;
	
	// Init task buffer resource
	for (int i = 0; i < QUEUE_NUM; i++){
		tasks[i] = rte_malloc(NULL, sizeof(struct perf_task), 0x200);
		tasks[i]->buf = rte_malloc(NULL, (f_maxsize+1000)*512, 0x200);	
		for (int j = 0; j < ISSUE_BUF_SIZE; j++){
			issue_buf[i].issue_queue[j].io_completed = 1;
			issue_buf[i].issue_queue[j].qid = i;
		}
	}

	//Begin timing.
	uint64_t tsc_start = rte_get_timer_cycles();
	
	uint64_t pos = 0;
	while (pos < f_len){
		//printf("%lu\n", pos);
		clear_issue(-1);		
		clear_pending();
		int target = scheduler(pos);
		if (target >= 0)
		  master_issue(pos, target);
	
		if (target != -2){
			pos += 1;	
			if ((pos % 100000) == 0)
				printf("Master has (allocated && (issued || pending)) %lu commands\n", pos);
		}
	
	}
	printf("Master has issued all of the I/O commands\n");
	
	// Clear all the pending instruction
	while (master_pending.cnt != 0) {
		clear_issue(-1);
		clear_pending();
	}
	
	// Check out all the issued commands
	int flag = 1;
	while (flag){
		flag = 0;
		clear_issue(-1);
		for (int i = 0; i < QUEUE_NUM; i++){
			if (g_master->queue_depth[i]) flag = 1;
		}
	}

	//Stop timing.
	uint64_t tsc_end = rte_get_timer_cycles();
	
	//Get the total time.
	double sec = (tsc_end - tsc_start) / (double)g_tsc_rate;
	
	printf("Stat of pending count: %lu\n", pending_count);

	//Output the result infomation.
	printf("Time: %lf seconds\n", sec);
	printf("Throughput: %lf MB/S\n", (double)f_totalblocks/2048/sec);
	printf("IOPS: %lf /S\n", (double)f_len/sec);
	for (int i = 0; i < QUEUE_NUM; i++){
		rte_free(tasks[i]->buf);
		rte_free(tasks[i]);
	}

	nvme_unregister_io_thread();
}
예제 #21
0
파일: recv_server.c 프로젝트: neimanra/LwIp
int main()
{
  sys_sem_t sem;
	
  sys_init();

  if(sys_sem_new(&sem, 0) != ERR_OK) {
    LWIP_ASSERT("failed to create semaphore", 0);
  }
  tcpip_init(tcpip_init_done, &sem);

  sys_sem_wait(&sem);
  sys_sem_free(&sem);

///////////////////////////////////////////////////////////////////////////////////////////////////

  struct netconn *conn, *newconn;
  err_t err;

  /* Create a new connection identifier. */
  conn = netconn_new(NETCONN_TCP);

  netconn_set_noautorecved(conn, 0);
  tcp_nagle_disable(conn->pcb.tcp);

  /* Bind connection to well known port number 7. */
  netconn_bind(conn, NULL, 80);

  /* Tell connection to go into listening mode. */
  netconn_listen(conn);

  while (1) {

    /* Grab new connection. */
    err = netconn_accept(conn, &newconn);
    printf("accepted new connection %p\n", newconn);
    /* Process the new connection. */
    if (err == ERR_OK) {
      struct netbuf *buf;
      void *data;
      u16_t len;
      u64_t total_rcvd = 0;
      u64_t eal_tsc_resolution_hz = rte_get_timer_hz();
      u64_t  end = rte_get_timer_cycles() + eal_tsc_resolution_hz;

      while ((err = netconn_recv(newconn, &buf)) == ERR_OK) {
          
             netbuf_data(buf, &data, &len);

             if (len > 0) 
             {
                 total_rcvd += len;
             }
             
             if (rte_get_timer_cycles() >= end) 
             {
                    printf("%llu \n", (unsigned long long)total_rcvd);
                    total_rcvd = 0;
                    end = rte_get_timer_cycles() + eal_tsc_resolution_hz;
             }
             
#if 0
            if (err != ERR_OK) {
              printf("tcpecho: netconn_write: error \"%s\"\n", lwip_strerr(err));
            }
#endif
        //} while (netbuf_next(buf) >= 0);
        netbuf_delete(buf);
      }
      /*printf("Got EOF, looping\n");*/ 
      /* Close connection and discard connection identifier. */
      netconn_close(newconn);
      netconn_delete(newconn);
    }
  }

  while (1); 
}
예제 #22
0
파일: nvme_lat.c 프로젝트: qzan9/jni-nvm
static int
u2_lat_bench(void)
{
	int i, rc;
	//int rc;

	void *buf;

	uint32_t io_size_blocks;
	uint64_t offset_in_ios, size_in_ios;

	uint64_t tsc_rate;
	uint64_t tsc_start, tsc_elapsed;
	//uint64_t tsc_end;

	buf = rte_malloc(NULL, io_size, U2_BUFFER_ALIGN);
	if (buf == NULL) {
		fprintf(stderr, "failed to rte_malloc buffer!\n");
		return 1;
	}
	memset(buf, 0xff, io_size);

	//io_num = 0;
	//io_depth = 0;

	io_size_blocks = io_size / u2_ns_sector;
	offset_in_ios = -1;
	size_in_ios = u2_ns_size / io_size;

	tsc_rate = rte_get_tsc_hz();
	tsc_elapsed = 0;
	tsc_start = rte_get_timer_cycles();
	//tsc_end = rte_get_timer_cycles() + time_in_sec * tsc_rate;
	for (i = 0; i < io_num; i++) {
	//while (1) {
		if (is_random) {
			offset_in_ios = rand_r(&seed) % size_in_ios;
		} else {
			if (++offset_in_ios >= size_in_ios) {
				offset_in_ios = 0;
			}
		}

		if (is_rw) {
			rc = spdk_nvme_ns_cmd_read (u2_ns, u2_qpair, buf, offset_in_ios * io_size_blocks, io_size_blocks, u2_io_complete, NULL, 0);
		} else {
			rc = spdk_nvme_ns_cmd_write(u2_ns, u2_qpair, buf, offset_in_ios * io_size_blocks, io_size_blocks, u2_io_complete, NULL, 0);
		}
		if (rc) {
			fprintf(stderr, "failed to submit request %d!\n", i);
			//fprintf(stderr, "failed to submit request %d!\n", io_num);
			return rc;
		}
		io_depth++;    // for latency benchmarking, queue depth stays at 1.

		while (io_depth > 0) {
			spdk_nvme_qpair_process_completions(u2_qpair, 0);
		}

		//if (rte_get_timer_cycles() > tsc_end) {
		//	break;
		//}
	}
	tsc_elapsed = rte_get_timer_cycles() - tsc_start;

	printf("\t\t%9.1f us", (float) (tsc_elapsed * 1000000) / (io_num * tsc_rate));
	printf("\t\t%10.1f s", (float) tsc_elapsed / tsc_rate);
	printf("\n");

	//printf("\t\t%9.1f us", (float) (time_in_sec * 1000000) / io_num);
	//printf("\t\t%12"PRIu64"\n", io_num);

	rte_free(buf);

	return 0;
}