uint32_t rpc_recv(void *out, uint32_t *len_io, RPC_RECV_FLAG_T flags) { CLIENT_THREAD_STATE_T *thread = CLIENT_GET_THREAD_STATE(); uint32_t res = 0; uint32_t len; bool recv_ctrl; if (!len_io) { len_io = &len; } recv_ctrl = flags & (RPC_RECV_FLAG_RES | RPC_RECV_FLAG_CTRL | RPC_RECV_FLAG_LEN); /* do we want to receive anything in the control channel at all? */ assert(recv_ctrl || (flags & RPC_RECV_FLAG_BULK)); /* must receive something... */ assert(!(flags & RPC_RECV_FLAG_CTRL) || !(flags & RPC_RECV_FLAG_BULK)); /* can't receive user data over both bulk and control... */ if (recv_ctrl || len_io[0]) { /* do nothing if we're just receiving bulk of length 0 */ merge_flush(thread); if (recv_ctrl) { VCHIQ_HEADER_T *header = vchiu_queue_pop(get_queue(thread)); uint32_t *ctrl = (uint32_t *)header->data; assert(header->size == rpc_pad_ctrl(header->size)); if (flags & RPC_RECV_FLAG_LEN) { len_io[0] = *(ctrl++); } if (flags & RPC_RECV_FLAG_RES) { res = *(ctrl++); } if (flags & RPC_RECV_FLAG_CTRL) { memcpy(out, ctrl, len_io[0]); ctrl += rpc_pad_ctrl(len_io[0]) >> 2; } assert((uint8_t *)ctrl == ((uint8_t *)header->data + header->size)); vchiq_release_message(get_handle(thread), header); }
static VCHIQ_STATUS_T shim_callback(VCHIQ_REASON_T reason, VCHIQ_HEADER_T *header, VCHIQ_SERVICE_HANDLE_T handle, void *bulk_user) { struct shim_service *service = (struct shim_service *)VCHIQ_GET_SERVICE_USERDATA(handle); if (!service->callback) goto release; switch (reason) { case VCHIQ_MESSAGE_AVAILABLE: vchiu_queue_push(&service->queue, header); service->callback(service->callback_param, VCHI_CALLBACK_MSG_AVAILABLE, NULL); goto done; break; case VCHIQ_BULK_TRANSMIT_DONE: service->callback(service->callback_param, VCHI_CALLBACK_BULK_SENT, bulk_user); break; case VCHIQ_BULK_RECEIVE_DONE: service->callback(service->callback_param, VCHI_CALLBACK_BULK_RECEIVED, bulk_user); break; case VCHIQ_SERVICE_CLOSED: service->callback(service->callback_param, VCHI_CALLBACK_SERVICE_CLOSED, NULL); break; case VCHIQ_SERVICE_OPENED: /* No equivalent VCHI reason */ break; case VCHIQ_BULK_TRANSMIT_ABORTED: service->callback(service->callback_param, VCHI_CALLBACK_BULK_TRANSMIT_ABORTED, bulk_user); break; case VCHIQ_BULK_RECEIVE_ABORTED: service->callback(service->callback_param, VCHI_CALLBACK_BULK_RECEIVE_ABORTED, bulk_user); break; default: WARN(1, "not supported\n"); break; } release: vchiq_release_message(service->handle, header); done: return VCHIQ_SUCCESS; }
/*********************************************************** * Name: vchi_msg_dequeue * * Arguments: VCHI_SERVICE_HANDLE_T handle, * void *data, * uint32_t max_data_size_to_read, * uint32_t *actual_msg_size * VCHI_FLAGS_T flags * * Description: Routine to dequeue a message into the supplied buffer * * Returns: int32_t - success == 0 * ***********************************************************/ int32_t vchi_msg_dequeue(VCHI_SERVICE_HANDLE_T handle, void *data, uint32_t max_data_size_to_read, uint32_t *actual_msg_size, VCHI_FLAGS_T flags) { struct shim_service *service = (struct shim_service *)handle; VCHIQ_HEADER_T *header; WARN_ON((flags != VCHI_FLAGS_NONE) && (flags != VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE)); if (flags == VCHI_FLAGS_NONE) if (vchiu_queue_is_empty(&service->queue)) return -1; header = vchiu_queue_pop(&service->queue); memcpy(data, header->data, header->size < max_data_size_to_read ? header->size : max_data_size_to_read); *actual_msg_size = header->size; vchiq_release_message(service->handle, header); return 0; }
/****************************************************************************** NAME gpuserv_callback SYNOPSIS void gpuserv_callback( VCHIQ_REASON_T reason, VCHIQ_HEADER_T *header, VCHIQ_SERVICE_HANDLE_T service, void *bulk_userdata ) FUNCTION VCHIQ callback RETURNS zero on success ******************************************************************************/ static VCHIQ_STATUS_T gpuserv_callback( VCHIQ_REASON_T reason, VCHIQ_HEADER_T *header, VCHIQ_SERVICE_HANDLE_T service, void *bulk_userdata ) { GPUSERV_SERVICE_T *instance = (GPUSERV_SERVICE_T *)bulk_userdata; (void)header; // reason is one of VCHIQ_MESSAGE_AVAILABLE, VCHIQ_BULK_TRANSMIT_DONE, VCHIQ_BULK_RECEIVE_DONE switch (reason) { case VCHIQ_MESSAGE_AVAILABLE: { struct gpu_callback_s *c = (struct gpu_callback_s *)header->data; if (c->func) c->func(c->cookie); vchiq_release_message(service, header); break; } default: ; } return 0; // Releases any command message (VCHIQ_MESSAGE_AVAILABLE), ignored otherwise }
static VCHIQ_STATUS_T vc_watchdog_vchiq_callback(VCHIQ_REASON_T reason, VCHIQ_HEADER_T *header, VCHIQ_SERVICE_HANDLE_T service_user, void *bulk_user) { switch (reason) { case VCHIQ_MESSAGE_AVAILABLE: { unsigned long *msg = (unsigned long *)header->data; if (msg && (*msg == WDOG_PING_RESPONSE)) { complete(&vc_wdog_state->wdog_ping_response); LOG_DBG("%s received ping response", __func__); } else { LOG_ERR("%s received unexpected message - ignoring", __func__); } } vchiq_release_message(service_user, header); break; default: break; } return VCHIQ_SUCCESS; }
/*********************************************************** * Name: vchi_msg_remove * * Arguments: const VCHI_SERVICE_HANDLE_T handle, * * Description: Routine to remove a message (after it has been read with * vchi_msg_peek) * * Returns: int32_t - success == 0 * ***********************************************************/ int32_t vchi_msg_remove(VCHI_SERVICE_HANDLE_T handle) { struct shim_service *service = (struct shim_service *)handle; VCHIQ_HEADER_T *header; header = vchiu_queue_pop(&service->queue); vchiq_release_message(service->handle, header); return 0; }
VCHIQ_STATUS_T khan_callback(VCHIQ_REASON_T reason, VCHIQ_HEADER_T *header, VCHIQ_SERVICE_HANDLE_T handle, void *bulk_userdata) { switch (reason) { case VCHIQ_MESSAGE_AVAILABLE: vchiq_release_message(handle, header); break; } return VCHIQ_SUCCESS; }
static void recv_bulk(CLIENT_THREAD_STATE_T *thread, void *out, uint32_t len) { if (len <= CTRL_THRESHOLD) { VCHIQ_HEADER_T *header = vchiu_queue_pop(get_queue(thread)); assert(header->size == len); memcpy(out, header->data, len); vchiq_release_message(get_handle(thread), header); } else { VCHIQ_STATUS_T vchiq_status = vchiq_queue_bulk_receive(get_handle(thread), out, rpc_pad_bulk(len), NULL); assert(vchiq_status == VCHIQ_SUCCESS); VCOS_STATUS_T vcos_status = vcos_event_wait(&bulk_event); assert(vcos_status == VCOS_SUCCESS); } }
/*********************************************************** * Name: vchi_held_msg_release * * Arguments: VCHI_HELD_MSG_T *message * * Description: Routine to release a held message (after it has been read with * vchi_msg_hold) * * Returns: int32_t - success == 0 * ***********************************************************/ int32_t vchi_held_msg_release(VCHI_HELD_MSG_T *message) { /* * Convert the service field pointer back to an * VCHIQ_SERVICE_HANDLE_T which is an int. * This pointer is opaque to everything except * vchi_msg_hold which simply upcasted the int * to a pointer. */ vchiq_release_message((VCHIQ_SERVICE_HANDLE_T)(long)message->service, (VCHIQ_HEADER_T *)message->message); return 0; }
/** Callback invoked by VCHIQ */ static VCHIQ_STATUS_T mmal_vc_vchiq_callback(VCHIQ_REASON_T reason, VCHIQ_HEADER_T *vchiq_header, VCHIQ_SERVICE_HANDLE_T service, void *context) { LOG_TRACE("reason %d", reason); switch (reason) { case VCHIQ_MESSAGE_AVAILABLE: { mmal_worker_msg_header *msg = (mmal_worker_msg_header*)vchiq_header->data; vcos_assert(msg->magic == MMAL_MAGIC); if (msg->msgid == MMAL_WORKER_BUFFER_TO_HOST) { LOG_TRACE("buffer to host"); mmal_worker_buffer_from_host *msg = (mmal_worker_buffer_from_host *)vchiq_header->data; LOG_TRACE("len %d context %p", msg->buffer_header.length, msg->drvbuf.client_context); vcos_assert(msg->drvbuf.client_context); vcos_assert(msg->drvbuf.client_context->magic == MMAL_MAGIC); /* If the buffer is referencing another, need to replicate it here * in order to use the reference buffer's payload and ensure the * reference is not released prematurely */ if (msg->has_reference) mmal_buffer_header_replicate(msg->drvbuf.client_context->buffer, msg->drvbuf_ref.client_context->buffer); /* Sanity check the size of the transfer so we don't overrun our buffer */ if (!vcos_verify(msg->buffer_header.offset + msg->buffer_header.length <= msg->drvbuf.client_context->buffer->alloc_size)) { LOG_TRACE("buffer too small (%i, %i)", msg->buffer_header.offset + msg->buffer_header.length, msg->drvbuf.client_context->buffer->alloc_size); msg->buffer_header.length = 0; /* FIXME: set a buffer flag to signal error */ msg->drvbuf.client_context->callback(msg); vchiq_release_message(service, vchiq_header); break; } /*To handle VC to HOST filled buffer callback of EOS buffer to receive in sync with data buffers*/ if (!msg->is_zero_copy && (msg->buffer_header.length != 0 || (msg->buffer_header.flags & MMAL_BUFFER_HEADER_FLAG_EOS))) { /* a buffer full of data for us to process */ VCHIQ_STATUS_T vst = VCHIQ_SUCCESS; LOG_TRACE("queue bulk rx: %p, %d", msg->drvbuf.client_context->buffer->data + msg->buffer_header.offset, msg->buffer_header.length); int len = msg->buffer_header.length; len = (len+3) & (~3); if (!len && (msg->buffer_header.flags & MMAL_BUFFER_HEADER_FLAG_EOS)) { len = 8; } if (!msg->payload_in_message) { /* buffer transferred using vchiq bulk xfer */ vst = vchiq_queue_bulk_receive(service, msg->drvbuf.client_context->buffer->data + msg->buffer_header.offset, len, vchiq_header); if (vst != VCHIQ_SUCCESS) { LOG_TRACE("queue bulk rx len %d failed to start", msg->buffer_header.length); msg->buffer_header.length = 0; /* FIXME: set a buffer flag to signal error */ msg->drvbuf.client_context->callback(msg); vchiq_release_message(service, vchiq_header); } } else if (msg->payload_in_message <= MMAL_VC_SHORT_DATA) { /* we have already received the buffer data in the message! */ MMAL_BUFFER_HEADER_T *dst = msg->drvbuf.client_context->buffer; LOG_TRACE("short data: dst = %p, dst->data = %p, len %d short len %d", dst, dst? dst->data : 0, msg->buffer_header.length, msg->payload_in_message); memcpy(dst->data, msg->short_data, msg->payload_in_message); dst->offset = 0; dst->length = msg->payload_in_message; vchiq_release_message(service, vchiq_header); msg->drvbuf.client_context->callback(msg); } else { /* impossible short data length */ LOG_ERROR("Message with invalid short payload length %d", msg->payload_in_message); vcos_assert(0); } } else { /* Message received from videocore; the client_context should have * been passed all the way through by videocore back to us, and will * be picked up in the callback to complete the sequence. */ LOG_TRACE("doing cb (%p) context %p", msg->drvbuf.client_context, msg->drvbuf.client_context ? msg->drvbuf.client_context->callback : 0); msg->drvbuf.client_context->callback(msg); LOG_TRACE("done callback back to client"); vchiq_release_message(service, vchiq_header); } } else if (msg->msgid == MMAL_WORKER_EVENT_TO_HOST) { mmal_vc_handle_event_msg(vchiq_header, service, context); } else { MMAL_WAITER_T *waiter = msg->u.waiter; LOG_TRACE("waking up waiter at %p", waiter); vcos_assert(waiter->inuse); int len = vcos_min(waiter->destlen, vchiq_header->size); waiter->destlen = len; LOG_TRACE("copying payload @%p to %p len %d", waiter->dest, msg, len); memcpy(waiter->dest, msg, len); vchiq_release_message(service, vchiq_header); vcos_semaphore_post(&waiter->sem); } } break; case VCHIQ_BULK_TRANSMIT_DONE: { /* nothing to do here, need to wait for the copro to tell us it * has emptied the buffer before we can recycle it, otherwise we * end up feeding the copro with buffers it cannot handle. */ #ifdef VCOS_LOGGING_ENABLED mmal_worker_buffer_from_host *msg = (mmal_worker_buffer_from_host *)context; #endif LOG_TRACE("bulk tx done: %p, %d", msg->buffer_header.data, msg->buffer_header.length); } break; case VCHIQ_BULK_RECEIVE_DONE: { VCHIQ_HEADER_T *header = (VCHIQ_HEADER_T *)context; mmal_worker_msg_header *msg_hdr = (mmal_worker_msg_header*)header->data; if (msg_hdr->msgid == MMAL_WORKER_BUFFER_TO_HOST) { mmal_worker_buffer_from_host *msg = (mmal_worker_buffer_from_host *)msg_hdr; vcos_assert(msg->drvbuf.client_context->magic == MMAL_MAGIC); msg->drvbuf.client_context->callback(msg); LOG_TRACE("bulk rx done: %p, %d", msg->buffer_header.data, msg->buffer_header.length); } else { mmal_worker_event_to_host *msg = (mmal_worker_event_to_host *)msg_hdr; MMAL_PORT_T *port = mmal_vc_port_by_number(msg->client_component, msg->port_type, msg->port_num); vcos_assert(port); mmal_buffer_header_driver_data(msg->delayed_buffer)-> client_context->callback_event(port, msg->delayed_buffer); LOG_DEBUG("event bulk rx done, length %d", msg->length); } vchiq_release_message(service, header); } break; case VCHIQ_BULK_RECEIVE_ABORTED: { VCHIQ_HEADER_T *header = (VCHIQ_HEADER_T *)context; mmal_worker_msg_header *msg_hdr = (mmal_worker_msg_header*)header->data; if (msg_hdr->msgid == MMAL_WORKER_BUFFER_TO_HOST) { mmal_worker_buffer_from_host *msg = (mmal_worker_buffer_from_host *)msg_hdr; LOG_TRACE("bulk rx aborted: %p, %d", msg->buffer_header.data, msg->buffer_header.length); vcos_assert(msg->drvbuf.client_context->magic == MMAL_MAGIC); msg->buffer_header.flags |= MMAL_BUFFER_HEADER_FLAG_TRANSMISSION_FAILED; msg->drvbuf.client_context->callback(msg); } else { mmal_worker_event_to_host *msg = (mmal_worker_event_to_host *)msg_hdr; MMAL_PORT_T *port = mmal_vc_port_by_number(msg->client_component, msg->port_type, msg->port_num); vcos_assert(port); LOG_DEBUG("event bulk rx aborted"); msg->delayed_buffer->flags |= MMAL_BUFFER_HEADER_FLAG_TRANSMISSION_FAILED; mmal_buffer_header_driver_data(msg->delayed_buffer)-> client_context->callback_event(port, msg->delayed_buffer); } vchiq_release_message(service, header); } break; case VCHIQ_BULK_TRANSMIT_ABORTED: { mmal_worker_buffer_from_host *msg = (mmal_worker_buffer_from_host *)context; LOG_INFO("bulk tx aborted: %p, %d", msg->buffer_header.data, msg->buffer_header.length); vcos_assert(msg->drvbuf.client_context->magic == MMAL_MAGIC); /* Nothing to do as the VC side will release the buffer and notify us of the error */ } break; default: break; } return VCHIQ_SUCCESS; }
static void mmal_vc_handle_event_msg(VCHIQ_HEADER_T *vchiq_header, VCHIQ_SERVICE_HANDLE_T service, void *context) { mmal_worker_event_to_host *msg = (mmal_worker_event_to_host *)vchiq_header->data; MMAL_COMPONENT_T *component = msg->client_component; MMAL_BUFFER_HEADER_T *buffer; MMAL_STATUS_T status; MMAL_PORT_T *port; LOG_DEBUG("event to host, cmd 0x%08x len %d to component %p port (%d,%d)", msg->cmd, msg->length, msg->client_component, msg->port_type, msg->port_num); (void)context; port = mmal_vc_port_by_number(component, msg->port_type, msg->port_num); if (!vcos_verify(port)) { LOG_ERROR("port (%i,%i) doesn't exist", (int)msg->port_type, (int)msg->port_num); goto error; } status = mmal_port_event_get(port, &buffer, msg->cmd); if (status != MMAL_SUCCESS) { LOG_ERROR("no event buffer available to receive event (%i)", (int)status); goto error; } if (!vcos_verify(msg->length <= buffer->alloc_size)) { LOG_ERROR("event buffer to small to receive event (%i/%i)", (int)buffer->alloc_size, (int)msg->length); goto error; } buffer->length = msg->length; /* Sanity check that the event buffers have the proper vc client context */ if (!vcos_verify(mmal_buffer_header_driver_data(buffer)->magic == MMAL_MAGIC && mmal_buffer_header_driver_data(buffer)->client_context && mmal_buffer_header_driver_data(buffer)->client_context->magic == MMAL_MAGIC && mmal_buffer_header_driver_data(buffer)->client_context->callback_event)) { LOG_ERROR("event buffers not configured properly by component"); goto error; } if (buffer->length > MMAL_WORKER_EVENT_SPACE) { /* a buffer full of data for us to process */ int len = buffer->length; len = (len+3) & (~3); LOG_DEBUG("queue event bulk rx: %p, %d", buffer->data, buffer->length); msg->delayed_buffer = buffer; VCHIQ_STATUS_T vst = vchiq_queue_bulk_receive(service, buffer->data, len, vchiq_header); if (vst != VCHIQ_SUCCESS) { LOG_TRACE("queue event bulk rx len %d failed to start", buffer->length); mmal_buffer_header_release(buffer); goto error; } } else { if (msg->length) memcpy(buffer->data, msg->data, msg->length); mmal_buffer_header_driver_data(buffer)->client_context->callback_event(port, buffer); LOG_DEBUG("done callback back to client"); vchiq_release_message(service, vchiq_header); } return; error: /* FIXME: How to abort bulk receive if necessary? */ msg->length = 0; /* FIXME: set a buffer flag to signal error */ vchiq_release_message(service, vchiq_header); }