Example #1
0
int queue_enq(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr)
{
	int sched = 0;

	LOCK(&queue->s.lock);
	if (odp_unlikely(queue->s.status < QUEUE_STATUS_READY)) {
		UNLOCK(&queue->s.lock);
		ODP_ERR("Bad queue status\n");
		return -1;
	}

	if (queue->s.head == NULL) {
		/* Empty queue */
		queue->s.head = buf_hdr;
		queue->s.tail = buf_hdr;
		buf_hdr->next = NULL;
	} else {
		queue->s.tail->next = buf_hdr;
		queue->s.tail = buf_hdr;
		buf_hdr->next = NULL;
	}

	if (queue->s.status == QUEUE_STATUS_NOTSCHED) {
		queue->s.status = QUEUE_STATUS_SCHED;
		sched = 1; /* retval: schedule queue */
	}
	UNLOCK(&queue->s.lock);

	/* Add queue to scheduling */
	if (sched && schedule_queue(queue))
		ODP_ABORT("schedule_queue failed\n");

	return 0;
}
Example #2
0
int schedule_pktio_start(odp_pktio_t pktio, int prio)
{
	odp_buffer_t buf;
	sched_cmd_t *sched_cmd;
	odp_queue_t pri_queue;

	buf = odp_buffer_alloc(sched->pool);

	if (buf == ODP_BUFFER_INVALID)
		return -1;

	sched_cmd        = odp_buffer_addr(buf);
	sched_cmd->cmd   = SCHED_CMD_POLL_PKTIN;
	sched_cmd->pktio = pktio;
	sched_cmd->pe    = get_pktio_entry(pktio);
	sched_cmd->prio  = prio;

	pri_queue  = pri_set_pktio(pktio, prio);

	if (odp_queue_enq(pri_queue, odp_buffer_to_event(buf)))
		ODP_ABORT("schedule_pktio_start failed\n");


	return 0;
}
Example #3
0
void _odp_timer_fini(odp_timer_pool *tp)
{
	if (timer_delete(tp->timerid) != 0)
		ODP_ABORT("timer_delete() returned error %s\n",
			  strerror(errno));
	if(_odp_timer_pool_global == tp)
		_odp_timer_pool_global = NULL;
}
Example #4
0
void odp_schedule_release_atomic(void)
{
	if (sched_local.pri_queue != ODP_QUEUE_INVALID &&
	    sched_local.num       == 0) {
		/* Release current atomic queue */
		if (odp_queue_enq(sched_local.pri_queue, sched_local.cmd_ev))
			ODP_ABORT("odp_schedule_release_atomic failed\n");
		sched_local.pri_queue = ODP_QUEUE_INVALID;
	}
}
Example #5
0
void _odp_timer_init(odp_timer_pool *tp)
{
	struct sigevent   sigev;
	struct itimerspec ispec;
	uint64_t res, sec, nsec;

	if(_odp_timer_pool_global != NULL){
		ODP_ABORT("Cannot have more than one timer at once");
	}
	ODP_DBG("Creating POSIX timer for timer pool %s, period %"
		PRIu64" ns\n", tp->name, tp->param.res_ns);

	memset(&sigev, 0, sizeof(sigev));
	memset(&ispec, 0, sizeof(ispec));

	_odp_timer_pool_global = tp;
	sigev.sigev_notify          = SIGEV_CALLBACK;
	sigev.sigev_notify_function = timer_notify;
	sigev.sigev_value.sival_ptr = tp;

	if (timer_create(CLOCK_MONOTONIC, &sigev, &tp->timerid))
		ODP_ABORT("timer_create() returned error %s\n",
			  strerror(errno));

	res  = tp->param.res_ns;
	sec  = res / ODP_TIME_SEC_IN_NS;
	nsec = res - sec * ODP_TIME_SEC_IN_NS;

	ispec.it_interval.tv_sec  = (time_t)sec;
	ispec.it_interval.tv_nsec = (long)nsec;
	ispec.it_value.tv_sec     = (time_t)sec;
	ispec.it_value.tv_nsec    = (long)nsec;

	if (timer_settime(tp->timerid, 0, &ispec, NULL))
		ODP_ABORT("timer_settime() returned error %s\n",
			  strerror(errno));
}
Example #6
0
int odp_pktio_inq_setdef(odp_pktio_t id, odp_queue_t queue)
{
	pktio_entry_t *pktio_entry = get_pktio_entry(id);
	queue_entry_t *qentry;

	if (pktio_entry == NULL || queue == ODP_QUEUE_INVALID)
		return -1;

	qentry = queue_to_qentry(queue);

	if (qentry->s.type != ODP_QUEUE_TYPE_PKTIN)
		return -1;

	lock_entry(pktio_entry);
	pktio_entry->s.inq_default = queue;
	unlock_entry(pktio_entry);

	switch (qentry->s.type) {
	/* Change to ODP_QUEUE_TYPE_POLL when ODP_QUEUE_TYPE_PKTIN is removed */
	case ODP_QUEUE_TYPE_PKTIN:

		/* User polls the input queue */
		queue_lock(qentry);
		qentry->s.pktin = id;
		queue_unlock(qentry);

		/* Uncomment when ODP_QUEUE_TYPE_PKTIN is removed
		        break;
		   case ODP_QUEUE_TYPE_SCHED:
		 */

		/* Packet input through the scheduler */
		if (schedule_pktio_start(id, ODP_SCHED_PRIO_LOWEST)) {
			ODP_ERR("Schedule pktio start failed\n");
			return -1;
		}

		break;
	default:
		ODP_ABORT("Bad queue type\n");
	}

	return 0;
}
Example #7
0
void odp_event_free(odp_event_t event)
{
	switch (odp_event_type(event)) {
	case ODP_EVENT_BUFFER:
		odp_buffer_free(odp_buffer_from_event(event));
		break;
	case ODP_EVENT_PACKET:
		odp_packet_free(odp_packet_from_event(event));
		break;
	case ODP_EVENT_TIMEOUT:
		odp_timeout_free(odp_timeout_from_event(event));
		break;
	case ODP_EVENT_CRYPTO_COMPL:
		odp_crypto_compl_free(odp_crypto_compl_from_event(event));
		break;
	default:
		ODP_ABORT("Invalid event type: %d\n", odp_event_type(event));
	}
}
Example #8
0
int odp_queue_destroy(odp_queue_t handle)
{
	queue_entry_t *queue;
	queue = queue_to_qentry(handle);

	LOCK(queue);
	INVALIDATE(queue);
	if (queue->s.status == QUEUE_STATUS_FREE) {
		UNLOCK(queue);
		ODP_ERR("queue \"%s\" already free\n", queue->s.name);
		return -1;
	}
	if (queue->s.status == QUEUE_STATUS_DESTROYED) {
		UNLOCK(queue);
		ODP_ERR("queue \"%s\" already destroyed\n", queue->s.name);
		return -1;
	}
	if (queue->s.head != NULL) {
		UNLOCK(queue);
		ODP_ERR("queue \"%s\" not empty\n", queue->s.name);
		return -1;
	}

	switch (queue->s.status) {
	case QUEUE_STATUS_READY:
		queue->s.status = QUEUE_STATUS_FREE;
		break;
	case QUEUE_STATUS_NOTSCHED:
		queue->s.status = QUEUE_STATUS_FREE;
		schedule_queue_destroy(queue);
		break;
	case QUEUE_STATUS_SCHED:
		/* Queue is still in scheduling */
		queue->s.status = QUEUE_STATUS_DESTROYED;
		break;
	default:
		ODP_ABORT("Unexpected queue status\n");
	}
	UNLOCK(queue);

	return 0;
}
Example #9
0
static void mmap_fill_ring(struct ring *ring, odp_pool_t pool_hdl, int fanout)
{
	/*@todo add Huge Pages support*/
	int pz = getpagesize();
	uint32_t pool_id;
	pool_entry_t *pool_entry;

	if (pool_hdl == ODP_POOL_INVALID)
		ODP_ABORT("Invalid pool handle\n");

	pool_id = pool_handle_to_index(pool_hdl);
	pool_entry = get_pool_entry(pool_id);

	/* Frame has to capture full packet which can fit to the pool block.*/
	ring->req.tp_frame_size = (pool_entry->s.blk_size +
				   TPACKET_HDRLEN + TPACKET_ALIGNMENT +
				   + (pz - 1)) & (-pz);

	/* Calculate how many pages do we need to hold all pool packets
	*  and align size to page boundary.
	*/
	ring->req.tp_block_size = (ring->req.tp_frame_size *
				   pool_entry->s.buf_num + (pz - 1)) & (-pz);

	if (!fanout) {
		/* Single socket is in use. Use 1 block with buf_num frames. */
		ring->req.tp_block_nr = 1;
	} else {
		/* Fanout is in use, more likely taffic split accodring to
		 * number of cpu threads. Use cpu blocks and buf_num frames. */
		ring->req.tp_block_nr = odp_cpu_count();
	}

	ring->req.tp_frame_nr = ring->req.tp_block_size /
				ring->req.tp_frame_size * ring->req.tp_block_nr;

	ring->mm_len = ring->req.tp_block_size * ring->req.tp_block_nr;
	ring->rd_num = ring->req.tp_frame_nr;
	ring->flen = ring->req.tp_frame_size;
}
Example #10
0
int queue_enq_multi(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[], int num)
{
	int sched = 0;
	int i;
	odp_buffer_hdr_t *tail;

	for (i = 0; i < num - 1; i++)
		buf_hdr[i]->next = buf_hdr[i+1];

	tail = buf_hdr[num-1];
	buf_hdr[num-1]->next = NULL;

	LOCK(&queue->s.lock);
	if (odp_unlikely(queue->s.status < QUEUE_STATUS_READY)) {
		UNLOCK(&queue->s.lock);
		ODP_ERR("Bad queue status\n");
		return -1;
	}

	/* Empty queue */
	if (queue->s.head == NULL)
		queue->s.head = buf_hdr[0];
	else
		queue->s.tail->next = buf_hdr[0];

	queue->s.tail = tail;

	if (queue->s.status == QUEUE_STATUS_NOTSCHED) {
		queue->s.status = QUEUE_STATUS_SCHED;
		sched = 1; /* retval: schedule queue */
	}
	UNLOCK(&queue->s.lock);

	/* Add queue to scheduling */
	if (sched && schedule_queue(queue))
		ODP_ABORT("schedule_queue failed\n");

	return num; /* All events enqueued */
}
Example #11
0
/*
 * Schedule queues
 *
 * TODO: SYNC_ORDERED not implemented yet
 */
static int schedule(odp_queue_t *out_queue, odp_event_t out_ev[],
		    unsigned int max_num, unsigned int max_deq)
{
	int i, j;
	int thr;
	int ret;

	if (sched_local.num) {
		ret = copy_events(out_ev, max_num);

		if (out_queue)
			*out_queue = queue_handle(sched_local.qe);

		return ret;
	}

	odp_schedule_release_atomic();

	if (odp_unlikely(sched_local.pause))
		return 0;

	thr = odp_thread_id();

	for (i = 0; i < ODP_CONFIG_SCHED_PRIOS; i++) {
		int id;

		if (sched->pri_mask[i] == 0)
			continue;

		id = thr & (QUEUES_PER_PRIO-1);

		for (j = 0; j < QUEUES_PER_PRIO; j++, id++) {
			odp_queue_t  pri_q;
			odp_event_t  ev;
			odp_buffer_t buf;
			sched_cmd_t *sched_cmd;
			queue_entry_t *qe;
			int num;

			if (id >= QUEUES_PER_PRIO)
				id = 0;

			if (odp_unlikely((sched->pri_mask[i] & (1 << id)) == 0))
				continue;

			pri_q = sched->pri_queue[i][id];
			ev    = odp_queue_deq(pri_q);
			buf   = odp_buffer_from_event(ev);

			if (buf == ODP_BUFFER_INVALID)
				continue;

			sched_cmd = odp_buffer_addr(buf);

			if (sched_cmd->cmd == SCHED_CMD_POLL_PKTIN) {
				/* Poll packet input */
				if (pktin_poll(sched_cmd->pe)) {
					/* Stop scheduling the pktio */
					pri_clr_pktio(sched_cmd->pktio,
						      sched_cmd->prio);
					odp_buffer_free(buf);
				} else {
					/* Continue scheduling the pktio */
					if (odp_queue_enq(pri_q, ev))
						ODP_ABORT("schedule failed\n");
				}

				continue;
			}

			qe  = sched_cmd->qe;
			num = queue_deq_multi(qe, sched_local.buf_hdr, max_deq);

			if (num < 0) {
				/* Destroyed queue */
				queue_destroy_finalize(qe);
				continue;
			}

			if (num == 0) {
				/* Remove empty queue from scheduling */
				continue;
			}

			sched_local.num   = num;
			sched_local.index = 0;
			sched_local.qe    = qe;
			ret = copy_events(out_ev, max_num);

			if (queue_is_atomic(qe)) {
				/* Hold queue during atomic access */
				sched_local.pri_queue = pri_q;
				sched_local.cmd_ev    = ev;
			} else {
				/* Continue scheduling the queue */
				if (odp_queue_enq(pri_q, ev))
					ODP_ABORT("schedule failed\n");
			}

			/* Output the source queue handle */
			if (out_queue)
				*out_queue = queue_handle(qe);

			return ret;
		}
	}

	return 0;
}