Пример #1
0
struct queue *service_dispatch(struct monitor *monitor, struct queue *q) {
	if (!q) {
		q = worker_queue_pop();
		if (!q) return 0;
	}

	uint32_t handle = queue_handle(q);
	struct service *s = service_grab(handle);
	if (!s) {
		queue_release(q, queue_message_dtor, (void *)(uintptr_t)handle);
		return worker_queue_pop();
	}
	struct message m;
	if (!queue_pop(q, &m)) {
		service_release(handle);
		return worker_queue_pop();
	}
	int overload = queue_overload(q);
	if (overload)
		service_log(handle, "service may overload, message queue length = %d\n", overload);
	monitor_trigger(monitor, m.source, handle);
	if (s->logfile)
		log_output(s->logfile, &m);
	s->module.dispatch(s->handle, s->ud, &m);
	service_alloc(m.data, 0);
	monitor_trigger(monitor, 0, 0);
	struct queue *next = worker_queue_pop();
	if (next) {
		worker_queue_push(q);
		q = next;
	}
	service_release(handle);
	return q;
}
Пример #2
0
void schedule_queue_destroy(queue_entry_t *qe)
{
	odp_buffer_t buf;

	buf = odp_buffer_from_event(qe->s.cmd_ev);
	odp_buffer_free(buf);

	pri_clr_queue(queue_handle(qe), queue_prio(qe));

	qe->s.cmd_ev    = ODP_EVENT_INVALID;
	qe->s.pri_queue = ODP_QUEUE_INVALID;
}
Пример #3
0
int schedule_queue_init(queue_entry_t *qe)
{
	odp_buffer_t buf;
	sched_cmd_t *sched_cmd;

	buf = odp_buffer_alloc(sched->pool);

	if (buf == ODP_BUFFER_INVALID)
		return -1;

	sched_cmd      = odp_buffer_addr(buf);
	sched_cmd->cmd = SCHED_CMD_DEQUEUE;
	sched_cmd->qe  = qe;

	qe->s.cmd_ev    = odp_buffer_to_event(buf);
	qe->s.pri_queue = pri_set_queue(queue_handle(qe), queue_prio(qe));

	return 0;
}
Пример #4
0
/*
 * Schedule queues
 *
 * TODO: SYNC_ORDERED not implemented yet
 */
static int schedule(odp_queue_t *out_queue, odp_event_t out_ev[],
		    unsigned int max_num, unsigned int max_deq)
{
	int i, j;
	int thr;
	int ret;

	if (sched_local.num) {
		ret = copy_events(out_ev, max_num);

		if (out_queue)
			*out_queue = queue_handle(sched_local.qe);

		return ret;
	}

	odp_schedule_release_atomic();

	if (odp_unlikely(sched_local.pause))
		return 0;

	thr = odp_thread_id();

	for (i = 0; i < ODP_CONFIG_SCHED_PRIOS; i++) {
		int id;

		if (sched->pri_mask[i] == 0)
			continue;

		id = thr & (QUEUES_PER_PRIO-1);

		for (j = 0; j < QUEUES_PER_PRIO; j++, id++) {
			odp_queue_t  pri_q;
			odp_event_t  ev;
			odp_buffer_t buf;
			sched_cmd_t *sched_cmd;
			queue_entry_t *qe;
			int num;

			if (id >= QUEUES_PER_PRIO)
				id = 0;

			if (odp_unlikely((sched->pri_mask[i] & (1 << id)) == 0))
				continue;

			pri_q = sched->pri_queue[i][id];
			ev    = odp_queue_deq(pri_q);
			buf   = odp_buffer_from_event(ev);

			if (buf == ODP_BUFFER_INVALID)
				continue;

			sched_cmd = odp_buffer_addr(buf);

			if (sched_cmd->cmd == SCHED_CMD_POLL_PKTIN) {
				/* Poll packet input */
				if (pktin_poll(sched_cmd->pe)) {
					/* Stop scheduling the pktio */
					pri_clr_pktio(sched_cmd->pktio,
						      sched_cmd->prio);
					odp_buffer_free(buf);
				} else {
					/* Continue scheduling the pktio */
					if (odp_queue_enq(pri_q, ev))
						ODP_ABORT("schedule failed\n");
				}

				continue;
			}

			qe  = sched_cmd->qe;
			num = queue_deq_multi(qe, sched_local.buf_hdr, max_deq);

			if (num < 0) {
				/* Destroyed queue */
				queue_destroy_finalize(qe);
				continue;
			}

			if (num == 0) {
				/* Remove empty queue from scheduling */
				continue;
			}

			sched_local.num   = num;
			sched_local.index = 0;
			sched_local.qe    = qe;
			ret = copy_events(out_ev, max_num);

			if (queue_is_atomic(qe)) {
				/* Hold queue during atomic access */
				sched_local.pri_queue = pri_q;
				sched_local.cmd_ev    = ev;
			} else {
				/* Continue scheduling the queue */
				if (odp_queue_enq(pri_q, ev))
					ODP_ABORT("schedule failed\n");
			}

			/* Output the source queue handle */
			if (out_queue)
				*out_queue = queue_handle(qe);

			return ret;
		}
	}

	return 0;
}