Beispiel #1
0
static void handle_outbound(struct aura_node *node, struct aura_object *o, struct aura_buffer *buf)
{
	int ret = -EIO;

	if (OPCODE("export")) {
		int gpio = aura_buffer_get_u32(buf);
		slog(4, SLOG_DEBUG, "gpio: export %d", gpio);
		ret = gpio_export(gpio);
	} else if (OPCODE("write")) {
		int gpio = aura_buffer_get_u32(buf);
		int value = aura_buffer_get_u32(buf);
		slog(4, SLOG_DEBUG, "gpio: write gpio %d value %d", gpio, value);
		ret = gpio_write(gpio, value);
	} else if (OPCODE("in")) {
		int gpio = aura_buffer_get_u32(buf);
		ret = gpio_in(gpio);
	} else if (OPCODE("out")) {
		int gpio = aura_buffer_get_u32(buf);
		ret = gpio_out(gpio);
	} else if (OPCODE("read")) {
		int gpio = aura_buffer_get_u32(buf);
		ret = gpio_read(gpio, &gpio);
		aura_buffer_rewind(buf);
		aura_buffer_put_u32(buf, gpio);
	}
	slog(0, SLOG_DEBUG, "gpio ret = %d", ret);
	if (ret) {
		aura_call_fail(node, o);
		return;
	}
	aura_queue_buffer(&node->inbound_buffers, buf);
}
Beispiel #2
0
/**
 * Dequeue the next buffer from a queue and return it.
 * This functions sets buffer's internal data pointer
 * to the beginning of serialized data by calling aura_buffer_rewind() internally
 * @param head
 * @return
 */
struct aura_buffer *aura_dequeue_buffer(struct list_head *head)
{
	struct aura_buffer *ret;
	ret = aura_peek_buffer(head);
	if (ret) {
		list_del(head->next);
		aura_buffer_rewind(ret);
	}
	return ret;
}
Beispiel #3
0
/**
 * Request an aura_buffer for this node big enough to contain at least size bytes of data.
 *
 * The buffer's userdata will be set to point to the  
 *
 * 
 * If the node transport overrides buffer allocation - transport-specific allocation function
 * will be called.
 *
 * @param nd
 * @param size
 * @return
 */
struct aura_buffer *aura_buffer_request(struct aura_node *nd, int size)
{
	struct aura_buffer *ret; 
	int act_size = sizeof(struct aura_buffer) + size;

	act_size += nd->tr->buffer_overhead;

	if (!nd->tr->buffer_request)
		ret = aura_buffer_internal_request(act_size);	
	else
		ret = nd->tr->buffer_request(nd, act_size);

	ret->size = act_size - sizeof(struct aura_buffer);
	ret->owner = nd; 
	aura_buffer_rewind(ret);
	return ret;
}
Beispiel #4
0
/* This one is small, but tricky */
static void aura_handle_inbound(struct aura_node *node)
{
    while(1) {
        struct aura_buffer *buf;
        struct aura_object *o;

        buf = aura_dequeue_buffer(&node->inbound_buffers);
        if (!buf)
            break;

        o = buf->object;
        node->current_object = o;
        aura_buffer_rewind(buf);

        slog(4, SLOG_DEBUG, "Handling %s id %d (%s) sync_call_running=%d",
             object_is_method(o) ? "response" : "event",
             o->id, o->name, node->sync_call_running);

        if (object_is_method(o) && !o->pending) {
            slog(0, SLOG_WARN, "Dropping orphan call result %d (%s)",
                 o->id, o->name);
            aura_buffer_release(node, buf);
        } else if (o->calldonecb) {
            slog(4, SLOG_DEBUG, "Callback for method/event %d (%s)",
                 o->id, o->name);
            o->calldonecb(node, AURA_CALL_COMPLETED, buf, o->arg);
            aura_buffer_release(node, buf);
        } else if (object_is_method(o) && (node->sync_call_running)) {
            slog(4, SLOG_DEBUG, "Completing call for method %d (%s)",
                 o->id, o->name);
            node->sync_call_result = AURA_CALL_COMPLETED;
            node->sync_ret_buf = buf;
            o->pending--;
            if (o->pending < 0)
                BUG(node, "Internal BUG: pending evt count lesser than zero");
        } else {
            /* This one is tricky. We have an event with no callback */
            if (node->sync_event_max > 0) { /* Queue it up into event_queue if it's enabled */
                /* If we have an overrun - drop the oldest event to free up space first*/
                if (node->sync_event_max <= node->sync_event_count) {
                    struct aura_buffer *todrop;
                    const struct aura_object *dummy;
                    int ret = aura_get_next_event(node, &dummy, &todrop);
                    if (ret != 0)
                        BUG(node, "Internal bug, no next event");
                    aura_buffer_release(node, todrop);
                }

                /* Now just queue the next one */
                aura_queue_buffer(&node->event_buffers, buf);
                node->sync_event_count++;
                slog(4, SLOG_DEBUG, "Queued event %d (%s) for sync readout",
                     o->id, o->name);
            } else {
                /* Last resort - try the catch-all event callback */
                if (node->unhandled_evt_cb)
                    node->unhandled_evt_cb(node, buf, node->unhandled_evt_arg);
                else /* Or just drop it with a warning */
                    slog(0, SLOG_WARN, "Dropping event %d (%s)",
                         o->id, o->name);
                aura_buffer_release(node, buf);
            }
        }
    }

    node->current_object = NULL;
}
Beispiel #5
0
/**
 * Add an aura_buffer to a queue. 
 * This functions sets buffer's internal data pointer
 * to the beginning of serialized data by calling aura_buffer_rewind() internally
 *
 * @param queue
 * @param buf
 */
void aura_queue_buffer(struct list_head *queue, struct aura_buffer *buf)
{ 
	list_add_tail(&buf->qentry, queue);
	aura_buffer_rewind(buf);
}