Beispiel #1
0
int
rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
		      const struct rte_event_queue_conf *queue_conf)
{
	struct rte_eventdev *dev;
	struct rte_event_queue_conf def_conf;

	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
	dev = &rte_eventdevs[dev_id];

	if (!is_valid_queue(dev, queue_id)) {
		RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
		return -EINVAL;
	}

	/* Check nb_atomic_flows limit */
	if (is_valid_atomic_queue_conf(queue_conf)) {
		if (queue_conf->nb_atomic_flows == 0 ||
		    queue_conf->nb_atomic_flows >
			dev->data->dev_conf.nb_event_queue_flows) {
			RTE_EDEV_LOG_ERR(
		"dev%d queue%d Invalid nb_atomic_flows=%d max_flows=%d",
			dev_id, queue_id, queue_conf->nb_atomic_flows,
			dev->data->dev_conf.nb_event_queue_flows);
			return -EINVAL;
		}
	}

	/* Check nb_atomic_order_sequences limit */
	if (is_valid_ordered_queue_conf(queue_conf)) {
		if (queue_conf->nb_atomic_order_sequences == 0 ||
		    queue_conf->nb_atomic_order_sequences >
			dev->data->dev_conf.nb_event_queue_flows) {
			RTE_EDEV_LOG_ERR(
		"dev%d queue%d Invalid nb_atomic_order_seq=%d max_flows=%d",
			dev_id, queue_id, queue_conf->nb_atomic_order_sequences,
			dev->data->dev_conf.nb_event_queue_flows);
			return -EINVAL;
		}
	}

	if (dev->data->dev_started) {
		RTE_EDEV_LOG_ERR(
		    "device %d must be stopped to allow queue setup", dev_id);
		return -EBUSY;
	}

	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_setup, -ENOTSUP);

	if (queue_conf == NULL) {
		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf,
					-ENOTSUP);
		(*dev->dev_ops->queue_def_conf)(dev, queue_id, &def_conf);
		queue_conf = &def_conf;
	}

	dev->data->queues_cfg[queue_id] = *queue_conf;
	return (*dev->dev_ops->queue_setup)(dev, queue_id, queue_conf);
}
Beispiel #2
0
int
rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
			uint32_t *attr_value)
{
	struct rte_event_queue_conf *conf;
	struct rte_eventdev *dev;

	if (!attr_value)
		return -EINVAL;

	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
	dev = &rte_eventdevs[dev_id];
	if (!is_valid_queue(dev, queue_id)) {
		RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
		return -EINVAL;
	}

	conf = &dev->data->queues_cfg[queue_id];

	switch (attr_id) {
	case RTE_EVENT_QUEUE_ATTR_PRIORITY:
		*attr_value = RTE_EVENT_DEV_PRIORITY_NORMAL;
		if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
			*attr_value = conf->priority;
		break;
	case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS:
		*attr_value = conf->nb_atomic_flows;
		break;
	case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES:
		*attr_value = conf->nb_atomic_order_sequences;
		break;
	case RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG:
		*attr_value = conf->event_queue_cfg;
		break;
	case RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE:
		if (conf->event_queue_cfg & RTE_EVENT_QUEUE_CFG_ALL_TYPES)
			return -EOVERFLOW;

		*attr_value = conf->schedule_type;
		break;
	default:
		return -EINVAL;
	};
	return 0;
}
Beispiel #3
0
int
rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
				 struct rte_event_queue_conf *queue_conf)
{
	struct rte_eventdev *dev;

	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
	dev = &rte_eventdevs[dev_id];

	if (queue_conf == NULL)
		return -EINVAL;

	if (!is_valid_queue(dev, queue_id)) {
		RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
		return -EINVAL;
	}

	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf, -ENOTSUP);
	memset(queue_conf, 0, sizeof(struct rte_event_queue_conf));
	(*dev->dev_ops->queue_def_conf)(dev, queue_id, queue_conf);
	return 0;
}
Beispiel #4
0
static void drain_result_queue(void)
{
    UvisorBoxIndex * callee_index = (UvisorBoxIndex *) *__uvisor_config.uvisor_box_context;
    uvisor_pool_queue_t * callee_queue = &callee_index->rpc_incoming_message_queue->done_queue;
    uvisor_rpc_message_t * callee_array = (uvisor_rpc_message_t *) callee_queue->pool->array;

    int callee_box = g_active_box;

    /* Verify that the callee queue is entirely in caller box BSS. We check the
     * entire queue instead of just the message we are interested in, because
     * we want to validate the queue before we attempt any operations on it,
     * like dequeing. */
    if (!is_valid_queue(callee_queue, callee_box))
    {
        /* The callee's done queue is not valid. This shouldn't happen in a
         * non-malicious system. */
        assert(false);
        return;
    }

    /* For each message in the queue: */
    do {
        uvisor_pool_slot_t callee_slot;

        /* Dequeue the first result message from the queue. */
        callee_slot = uvisor_pool_queue_try_dequeue_first(callee_queue);
        if (callee_slot >= callee_queue->pool->num) {
            /* The queue is empty or busy. */
            break;
        }

        uvisor_rpc_message_t * callee_msg = &callee_array[callee_slot];

        /* Look up the origin message. This should have been remembered
         * by uVisor when it did the initial delivery. */
        uvisor_pool_slot_t caller_slot = uvisor_result_slot(callee_msg->match_cookie);


        /* Based on the origin message, look up the box to return the result to
         * (caller box). */
        const int caller_box = callee_msg->other_box_id;

        UvisorBoxIndex * caller_index = (UvisorBoxIndex *) g_context_current_states[caller_box].bss;
        uvisor_pool_queue_t * caller_queue = &caller_index->rpc_outgoing_message_queue->queue;
        uvisor_rpc_message_t * caller_array = (uvisor_rpc_message_t *) caller_queue->pool->array;

        /* Verify that the caller queue is entirely in caller box BSS. We check the
         * entire queue instead of just the message we are interested in, because
         * we want to validate the queue before we attempt any operations on it. */
        if (!is_valid_queue(caller_queue, caller_box))
        {
            /* The caller's outgoing queue is not valid. The caller queue is
             * messed up. This shouldn't happen in a non-malicious system.
             * Discard the result message (not retrying later), because the
             * caller is malicious. */
            assert(false);
            continue;
        }

        uvisor_rpc_message_t * caller_msg = &caller_array[caller_slot];

        /* Verify that the caller box is waiting for the callee box to complete
         * the RPC in this slot. */

        /* Other box ID must be same. */
        if (caller_msg->other_box_id != callee_box) {
            /* The caller isn't waiting for this box to complete it. This
             * shouldn't happen in a non-malicious system. */
            assert(false);
            continue;
        }

        /* The caller must be waiting for a box to complete this slot. */
        if (caller_msg->state != UVISOR_RPC_MESSAGE_STATE_SENT)
        {
            /* The caller isn't waiting for any box to complete it. This
             * shouldn't happen in a non-malicious system. */
            assert(false);
            continue;
        }

        /* The match_cookie must be same. */
        if (caller_msg->match_cookie != callee_msg->match_cookie) {
            /* The match cookies didn't match. This shouldn't happen in a
             * non-malicious system. */
            assert(false);
            continue;
        }

        /* Copy the result to the message in the caller box outgoing message
         * queue. */
        caller_msg->result = callee_msg->result;
        callee_msg->state = UVISOR_RPC_MESSAGE_STATE_IDLE;
        caller_msg->state = UVISOR_RPC_MESSAGE_STATE_DONE;

        /* Now that we've copied the result, we can free the message from the
         * callee queue. The callee (the one sending result messages) doesn't
         * care about the message after they post it to their outgoing result
         * queue. */
        callee_slot = uvisor_pool_queue_try_free(callee_queue, callee_slot);
        if (callee_slot >= callee_queue->pool->num) {
            /* The queue is empty or busy. This should never happen. We were
             * able to dequeue a result message, but weren't able to free the
             * result message. It is bad to take down the entire system. It is
             * also bad to never free slots in the outgoing result queue.
             * However, if we could dequeue the slot we should have no trouble
             * freeing the slot here. */
            assert(false);
            break;
        }

        /* Post to the result semaphore, ignoring errors. */
        int status;
        status = semaphore_post(&caller_msg->semaphore);
        if (status) {
            /* We couldn't post to the result semaphore. We shouldn't really
             * bring down the entire system if one box messes up its own
             * semaphore. In a non-malicious system, this should never happen.
             * */
            assert(false);
        }
    } while (1);
}
Beispiel #5
0
static void drain_message_queue(void)
{
    UvisorBoxIndex * caller_index = (UvisorBoxIndex *) *__uvisor_config.uvisor_box_context;
    uvisor_pool_queue_t * caller_queue = &caller_index->rpc_outgoing_message_queue->queue;
    uvisor_rpc_message_t * caller_array = (uvisor_rpc_message_t *) caller_queue->pool->array;
    int caller_box = g_active_box;
    int first_slot = -1;

    /* Verify that the caller queue is entirely in caller box BSS. We check the
     * entire queue instead of just the message we are interested in, because
     * we want to validate the queue before we attempt any operations on it,
     * like dequeing. */
    if (!is_valid_queue(caller_queue, caller_box))
    {
        /* The caller's outgoing queue is not valid. This shouldn't happen in a
         * non-malicious system. */
        assert(false);
        return;
    }

    /* For each message in the queue: */
    do {
        uvisor_pool_slot_t caller_slot;

        /* NOTE: We only dequeue the message from the queue. We don't free
         * the message from the pool. The caller will free the message from the
         * pool after finish waiting for the RPC to finish. */
        caller_slot = uvisor_pool_queue_try_dequeue_first(caller_queue);
        if (caller_slot >= caller_queue->pool->num) {
            /* The queue is empty or busy. */
            break;
        }

        /* If we have seen this slot before, stop processing the queue. */
        if (first_slot == -1) {
            first_slot = caller_slot;
        } else if (caller_slot == first_slot) {
            put_it_back(caller_queue, caller_slot);

            /* Stop looping, because the system needs to continue running so
             * the callee messages can get processed to free up more room.
             * */
            break;
        }

        /* We would like to finish processing all messages in the queue, even
         * if one can't be delivered now. We currently just stop when we can't
         * deliver one message and never attempt the rest. */

        uvisor_rpc_message_t * caller_msg = &caller_array[caller_slot];

        /* Validate the gateway */
        const TRPCGateway * const gateway = caller_msg->gateway;
        if (!is_valid_rpc_gateway(gateway)) {
            /* The RPC gateway is not valid. Don't put the message back onto
             * the queue. Move on to next items. On a non-malicious system, the
             * gateway should always be valid here. */
            assert(false);
            continue;
        }

        /* Look up the callee box. */
        const int callee_box = callee_box_id(gateway);
        if (callee_box < 0) {
            /* This shouldn't happen, because the gateway was already verified.
             * */
            assert(false);
            continue;
        }

        UvisorBoxIndex * callee_index = (UvisorBoxIndex *) g_context_current_states[callee_box].bss;
        uvisor_pool_queue_t * callee_queue = &callee_index->rpc_incoming_message_queue->todo_queue;
        uvisor_rpc_message_t * callee_array = (uvisor_rpc_message_t *) callee_queue->pool->array;

        /* Verify that the callee queue is entirely in callee box BSS. We check the
         * entire queue instead of just the message we are interested in, because
         * we want to validate the queue before we attempt any operations on it,
         * like allocating. */
        if (!is_valid_queue(callee_queue, callee_box))
        {
            /* The callee's todo queue is not valid. This shouldn't happen in a
             * non-malicious system. Don't put the caller's message back into
             * the queue; this is the same behavior (from the caller's
             * perspective) as a malicious box never completing RPCs. */
            assert(false);
            continue;
        }

        /* Place the message into the callee box queue. */
        uvisor_pool_slot_t callee_slot = uvisor_pool_queue_try_allocate(callee_queue);

        /* If the queue is not busy and there is space in the callee queue: */
        if (callee_slot < callee_queue->pool->num)
        {
            int status;
            uvisor_rpc_message_t * callee_msg = &callee_array[callee_slot];

            /* Deliver the message. */
            callee_msg->p0 = caller_msg->p0;
            callee_msg->p1 = caller_msg->p1;
            callee_msg->p2 = caller_msg->p2;
            callee_msg->p3 = caller_msg->p3;
            callee_msg->gateway = caller_msg->gateway;
            /* Set the ID of the calling box in the message. */
            callee_msg->other_box_id = caller_box;
            callee_msg->match_cookie = caller_msg->match_cookie;
            callee_msg->state = UVISOR_RPC_MESSAGE_STATE_SENT;

            caller_msg->other_box_id = callee_box;
            caller_msg->state = UVISOR_RPC_MESSAGE_STATE_SENT;

            /* Enqueue the message */
            status = uvisor_pool_queue_try_enqueue(callee_queue, callee_slot);
            /* We should always be able to enqueue, since we were able to
             * allocate the slot. Nobody else should have been able to run and
             * take the spin lock. */
            if (status) {
                /* We were able to get the callee RPC slot allocated, but
                 * couldn't enqueue the message. It is bad to take down the
                 * entire system. It is also bad to keep the allocated slot
                 * around. However, if we couldn't enqueue the slot, we'll have
                 * a hard time freeing it, since that requires the same lock.
                 * */
                assert(false);

                /* Put the message back into the queue, as we may be able to
                 * enqueue the message when we try again later. This is likely
                 * to fail as well, if we couldn't enqueue the message. However,
                 * if we can't put it back now, there is nothing we can do and
                 * the message must be lost. */
                put_it_back(caller_queue, caller_slot);
                continue;
            }

            /* Poke anybody waiting on calls to this target function. If nobody
             * is waiting, the item will remain in the incoming queue. The
             * first time a rpc_fncall_waitfor is called for a function group,
             * rpc_fncall_waitfor will check to see if there are any messages
             * it can handle from before the function group existed. */
            wake_up_handlers_for_target((TFN_Ptr)gateway->target, callee_box);
        }

        /* If there was no room in the callee queue: */
        if (callee_slot >= callee_queue->pool->num)
        {
            /* Put the message back into the caller queue. This applies
             * backpressure on the caller when the callee is too busy. Note
             * that no data needs to be copied; only the caller queue's
             * management array is modified. */
            put_it_back(caller_queue, caller_slot);
        }
    } while (1);
}