/*********************************************************** * Name: os_cond_signal * * Arguments: OS_COND_T *cond * * Description: Routine to signal at least one thread * waiting on a condition variable. The * caller must say whether they have obtained * the semaphore. * * Returns: int32_t - success == 0 * ***********************************************************/ int32_t os_cond_signal( OS_COND_T *cond, bool_t sem_claimed ) { int32_t success = -1; if (cond) { COND_WAITER_T *w; // Ensure the condvar's semaphore is claimed for thread-safe access if (!sem_claimed) { success = os_semaphore_obtain((*cond)->semaphore); os_assert(success == 0); } w = (*cond)->waiters; if (w) { // Wake the first person waiting vcos_event_signal(&w->latch); (*cond)->waiters = w->next; } success = 0; if (!sem_claimed) { success = os_semaphore_release((*cond)->semaphore); } os_assert(success == 0); } return success; }
/*********************************************************** * Name: os_cond_broadcast * * Arguments: OS_COND_T *cond * bool_t sem_claimed * * Description: Routine to signal all threads waiting on * a condition variable. The caller must * say whether they have obtained the semaphore. * * Returns: int32_t - success == 0 * ***********************************************************/ int32_t os_cond_broadcast( OS_COND_T *cond, bool_t sem_claimed ) { int32_t success = -1; if (cond) { COND_WAITER_T *w; // Ensure the condvar's semaphore is claimed for thread-safe access if (!sem_claimed) { success = os_semaphore_obtain((*cond)->semaphore); os_assert(success == 0); } for (w = (*cond)->waiters; w; w = w->next) { vcos_event_signal(&w->latch); } (*cond)->waiters = NULL; success = 0; if (!sem_claimed) { success = os_semaphore_release((*cond)->semaphore); } os_assert(success == 0); } return success; }
/*********************************************************** * Name: vc_dispmanx_stop * * Arguments: * - * * Description: Stops the Host side part of dispmanx * * Returns: - * ***********************************************************/ VCHPRE_ void VCHPOST_ vc_dispmanx_stop( void ) { // Wait for the current lock-holder to finish before zapping dispmanx. //TODO: kill the notifier task void *dummy; uint32_t i; if (!dispmanx_client.initialised) return; lock_obtain(); for (i=0; i<dispmanx_client.num_connections; i++) { int32_t result; result = vchi_service_close(dispmanx_client.client_handle[i]); vcos_assert( result == 0 ); result = vchi_service_close(dispmanx_client.notify_handle[i]); vcos_assert( result == 0 ); } lock_release(); dispmanx_client.initialised = 0; vcos_event_signal(&dispmanx_notify_available_event); vcos_thread_join(&dispmanx_notify_task, &dummy); vcos_mutex_delete(&dispmanx_client.lock); vcos_event_delete(&dispmanx_message_available_event); vcos_event_delete(&dispmanx_notify_available_event); }
static void control_c( int signum ) { (void)signum; LOG_STD( "Shutting down..." ); vcos_event_signal( &quit_event ); }
void khrn_worker_notify(void) { #ifdef KHRN_WORKER_USE_LLAT khrn_llat_notify(llat_i); #else vcos_event_signal(&event); #endif }
static void __inline remote_event_signal_local(REMOTE_EVENT_T *event) { #ifdef VCHIQ_LOCAL vcos_event_signal(&event->event); #else event->clr_count = event->set_count; local_event_signal(&event->local); #endif }
void vchiu_queue_push(VCHIU_QUEUE_T *queue, VCHIQ_HEADER_T *header) { while (queue->write == queue->read + queue->size) vcos_event_wait(&queue->pop); queue->storage[queue->write & (queue->size - 1)] = header; queue->write++; vcos_event_signal(&queue->push); }
void *CComponent::EventThreadProc(VCOS_EVENT_FLAGS_T& EventFlags, VCOS_EVENT_T& InitialzedEvent) { vcos_event_signal(&InitialzedEvent); VCOS_UNSIGNED nEvents; try { for (;;) { CHECK_VCOS(vcos_event_flags_get(&EventFlags, CThread::s_nTerminationFlag | s_nNewEventFlag, VCOS_CONSUME, VCOS_SUSPEND, &nEvents), "failed to wait for events"); if (nEvents & CThread::s_nTerminationFlag) { // Component is being destoryed break; } else if (nEvents & s_nNewEventFlag) { // New event that cannot be handled in the notification callback bool bEventsPending = false; do { CComponentEvent Event; { CHECK_VCOS(vcos_mutex_lock(&m_EventQueueMutex), "failed to lock event queue mutex"); Event = m_EventQueue.front(); m_EventQueue.pop_front(); bEventsPending = !m_EventQueue.empty(); vcos_mutex_unlock(&m_EventQueueMutex); } switch (Event.m_EventType) { case OMX_EventPortSettingsChanged: { if (m_pGraph) { m_pGraph->OnPortSettingsChanged(this, Event.m_nData1); } } break; default: break; } } while (bEventsPending); } } } catch (std::exception& Exception) { std::cerr << "Error: " << Exception.what() << std::endl; } return NULL; }
/****************************************************************************** NAME gencmd_callback SYNOPSIS void gencmd_callback( void *callback_param, const VCHI_CALLBACK_REASON_T reason, const void *msg_handle ) FUNCTION VCHI callback RETURNS int ******************************************************************************/ static void gencmd_callback( void *callback_param, const VCHI_CALLBACK_REASON_T reason, void *msg_handle ) { VCOS_EVENT_T *event = (VCOS_EVENT_T *)callback_param; (void)msg_handle; if ( reason != VCHI_CALLBACK_MSG_AVAILABLE || !event) return; vcos_event_signal(event); }
VCHIQ_HEADER_T *vchiu_queue_pop(VCHIU_QUEUE_T *queue) { VCHIQ_HEADER_T *header; while (queue->write == queue->read) vcos_event_wait(&queue->push); header = queue->storage[queue->read & (queue->size - 1)]; queue->read++; vcos_event_signal(&queue->pop); return header; }
static void dispmanx_notify_callback( void *callback_param, const VCHI_CALLBACK_REASON_T reason, void *msg_handle ) { VCOS_EVENT_T *event = (VCOS_EVENT_T *)callback_param; (void)msg_handle; if ( reason != VCHI_CALLBACK_MSG_AVAILABLE ) return; if ( event == NULL ) return; vcos_event_signal(event); }
VCHIQ_STATUS_T khhn_callback(VCHIQ_REASON_T reason, VCHIQ_HEADER_T *header, VCHIQ_SERVICE_HANDLE_T handle, void *bulk_userdata) { switch (reason) { case VCHIQ_MESSAGE_AVAILABLE: vchiu_queue_push(&khhn_queue, header); break; case VCHIQ_BULK_TRANSMIT_DONE: case VCHIQ_BULK_RECEIVE_DONE: vcos_event_signal(&bulk_event); break; } return VCHIQ_SUCCESS; }
static void __inline remote_event_signal(REMOTE_EVENT_T *event) { #ifdef VCHIQ_LOCAL vcos_event_signal(&event->event); #else event->set_count++; #ifdef __VIDEOCORE__ /* Force a stall until the write completes */ _vasm("mov %D, %r", *(volatile *)&event->set_count); #endif /* Notify the other side */ vchiq_ring_doorbell(); #endif }
void khrn_worker_term(void) { vcos_assert(inited); vcos_assert(khrn_worker_msg.done_it == khrn_worker_msg.post); /* should have called khrn_worker_wait or equivalent before this */ vcos_assert(khrn_worker_msg.cleanup == khrn_worker_msg.post); #ifdef KHRN_WORKER_USE_LLAT khrn_llat_unregister(llat_i); #else exit_thread = true; vcos_event_signal(&event); vcos_thread_join(&thread, NULL); vcos_event_delete(&event); #endif inited = false; }
VCHIQ_STATUS_T vchiq_initialise(VCHIQ_INSTANCE_T *instance) { VCHIQ_INSTANCE_T inst = NULL; int i; vcos_global_lock(); #ifdef VCHIQ_LOCAL if (vchiq_num_instances < 2) { vchiq_init_state(&vchiq_instances[vchiq_num_instances].state, &vchiq_channels[vchiq_num_instances], &vchiq_channels[vchiq_num_instances ^ 1]); if (vchiq_num_instances == 1) { /* This state initialisation may have erased a signal - signal anyway to be sure. This is a bit of a hack, caused by the desire for the server threads to be started on the same core as the calling thread. */ vcos_event_signal(&vchiq_channels[vchiq_num_instances].trigger.event); } vchiq_num_instances++; } #endif /* VCHIQ_LOCAL */ for (i = 0; i < vchiq_num_instances; i++) { if (!vchiq_instances[i].state.initialised) { inst = &vchiq_instances[i]; inst->connected = 0; inst->state.id = i; inst->state.initialised = 1; break; } } vcos_global_unlock(); *instance = inst; return (inst != NULL) ? VCHIQ_SUCCESS : VCHIQ_ERROR; }
OMX_ERRORTYPE CComponent::EventHandler(OMX_EVENTTYPE eEvent, OMX_U32 nData1, OMX_U32 nData2, OMX_PTR pEventData) { switch (eEvent) { case OMX_EventCmdComplete: { switch (nData1) { case OMX_CommandStateSet: vcos_event_signal(&m_StateChangedEvent); break; case OMX_CommandPortEnable: case OMX_CommandPortDisable: { CPortMap::iterator PortIterator = m_Ports.find(nData2); if (PortIterator != m_Ports.end()) { if (nData1 == OMX_CommandPortEnable) { PortIterator->second->OnEnabled(); } else if (nData1 == OMX_CommandPortDisable) { PortIterator->second->OnDisabled(); } } } break; case OMX_CommandFlush: { CPortMap::iterator PortIterator = m_Ports.find(nData2); if (PortIterator != m_Ports.end()) { PortIterator->second->OnFlushed(); } } break; default: break; } } break; case OMX_EventPortSettingsChanged: if (m_pGraph) { CComponentEvent Event; Event.m_EventType = OMX_EventPortSettingsChanged; Event.m_nData1 = nData1; Event.m_nData2 = nData2; Event.m_pEventData = pEventData; { CHECK_VCOS(vcos_mutex_lock(&m_EventQueueMutex), "failed to lock event queue mutex"); m_EventQueue.push_back(Event); vcos_mutex_unlock(&m_EventQueueMutex); } vcos_event_flags_set(&m_EventThread.GetEventFlags(), s_nNewEventFlag, VCOS_OR); } break; case OMX_EventError: std::cerr << "Error: Component " << m_sName << " generated error " << Omx::OmxError(static_cast<OMX_ERRORTYPE>(nData1)) << std::endl; break; default: break; } }
static void __inline local_event_signal(LOCAL_EVENT_T *event) { vcos_event_signal(&event->event); }
static void parse_rx_slots(VCHIQ_STATE_T *state) { VCHIQ_CHANNEL_T *remote = state->remote; VCHIQ_CHANNEL_T *local = state->local; while (remote->ctrl.process != remote->ctrl.insert) { VCHIQ_HEADER_T *header = (VCHIQ_HEADER_T *)(remote->ctrl.data + (remote->ctrl.process & VCHIQ_CHANNEL_MASK)); VCHIQ_SERVICE_T *service = NULL; unsigned int stride = calc_stride(header->size); int type = VCHIQ_MSG_TYPE(header->fourcc); VCHIQ_TRACE("%d: prs %d (%d,%d)", state->id, type, VCHIQ_MSG_DSTPORT(header->fourcc), VCHIQ_MSG_SRCPORT(header->fourcc)); switch (type) { case VCHIQ_MSG_OPEN: vcos_assert(VCHIQ_MSG_DSTPORT(header->fourcc) == 0); if (vcos_verify(header->size == 4)) { VCHIQ_HEADER_T *reply; unsigned short remoteport = VCHIQ_MSG_SRCPORT(header->fourcc); int target; service = get_listening_service(local, *(int *)header->data); local_mutex_acquire(&local->ctrl.mutex); target = local->ctrl.insert + sizeof(VCHIQ_HEADER_T); reply = reserve_space(local, target); if (!reply) { local_mutex_release(&local->ctrl.mutex); return; /* Bail out */ } if (service && (service->srvstate == VCHIQ_SRVSTATE_LISTENING)) { /* A matching, listening service exists - attempt the OPEN */ VCHIQ_STATUS_T status; vchiq_set_service_state(service, VCHIQ_SRVSTATE_OPEN); /* Proceed as if the connection will be accepted */ status = service->base.callback(VCHIQ_SERVICE_OPENED, NULL, &service->base, NULL); if (status == VCHIQ_SUCCESS) { /* The open was accepted - acknowledge it */ reply->fourcc = VCHIQ_MAKE_MSG(VCHIQ_MSG_OPENACK, service->localport, remoteport); service->remoteport = remoteport; } else { vchiq_set_service_state(service, VCHIQ_SRVSTATE_LISTENING); if (status == VCHIQ_RETRY) return; /* Bail out if not ready */ /* The open was rejected - send a close */ reply->fourcc = VCHIQ_MAKE_MSG(VCHIQ_MSG_CLOSE, 0, remoteport); } } else { /* No matching, available service - send a CLOSE */ reply->fourcc = VCHIQ_MAKE_MSG(VCHIQ_MSG_CLOSE, 0, remoteport); } reply->size = 0; local->ctrl.insert = target; local_mutex_release(&local->ctrl.mutex); remote_event_signal(&remote->trigger); } break; case VCHIQ_MSG_OPENACK: { unsigned int localport = VCHIQ_MSG_DSTPORT(header->fourcc); unsigned int remoteport = VCHIQ_MSG_SRCPORT(header->fourcc); service = &local->services[localport]; if (vcos_verify((localport < VCHIQ_MAX_SERVICES) && (service->srvstate == VCHIQ_SRVSTATE_OPENING))) { service->remoteport = remoteport; vchiq_set_service_state(service, VCHIQ_SRVSTATE_OPEN); local_event_signal(&service->remove_event); } } break; case VCHIQ_MSG_CLOSE: { unsigned int localport = VCHIQ_MSG_DSTPORT(header->fourcc); unsigned int remoteport = VCHIQ_MSG_SRCPORT(header->fourcc); service = &local->services[localport]; vcos_assert(header->size == 0); /* There should be no data */ if (vcos_verify(localport < VCHIQ_MAX_SERVICES)) { switch (service->srvstate) { case VCHIQ_SRVSTATE_OPEN: if (service->remoteport != remoteport) break; /* Return the close */ if (queue_message(state, VCHIQ_MAKE_MSG(VCHIQ_MSG_CLOSE, service->localport, service->remoteport), NULL, 0, 0) == VCHIQ_RETRY) return; /* Bail out if not ready */ vchiq_set_service_state(service, VCHIQ_SRVSTATE_CLOSESENT); /* Drop through... */ case VCHIQ_SRVSTATE_CLOSESENT: if (service->remoteport != remoteport) break; vchiq_set_service_state(service, VCHIQ_SRVSTATE_CLOSING); /* Drop through... */ case VCHIQ_SRVSTATE_CLOSING: if (service->remoteport == remoteport) { /* Start the close procedure */ if (vchiq_close_service_internal(service) == VCHIQ_RETRY) return; /* Bail out if not ready */ } break; case VCHIQ_SRVSTATE_OPENING: /* A client is mid-open - this is a rejection, so just fail the open */ vchiq_set_service_state(service, VCHIQ_SRVSTATE_CLOSEWAIT); local_event_signal(&service->remove_event); break; default: break; } } } break; case VCHIQ_MSG_DATA: { unsigned int localport = VCHIQ_MSG_DSTPORT(header->fourcc); unsigned int remoteport = VCHIQ_MSG_SRCPORT(header->fourcc); service = &local->services[localport]; if (vcos_verify((localport < VCHIQ_MAX_SERVICES) && (service->remoteport == remoteport)) && (service->srvstate == VCHIQ_SRVSTATE_OPEN)) { if (service->base.callback(VCHIQ_MESSAGE_AVAILABLE, header, &service->base, NULL) == VCHIQ_RETRY) return; /* Bail out if not ready */ header = NULL; /* Don't invalidate this message - defer till vchiq_release */ } } break; case VCHIQ_MSG_CONNECT: vcos_event_signal(&state->connect); break; case VCHIQ_MSG_INVALID: default: break; } remote->ctrl.process += stride; if (header != NULL) { /* Invalidate it */ header->fourcc = VCHIQ_FOURCC_INVALID; /* Notify the other end there is some space */ remote_event_signal(&remote->ctrl.remove_event); } } }