/* User defined transfer function. */ static status_t SDIF_TransferFunction(SDIF_Type *base, sdif_transfer_t *content) { status_t error = kStatus_Success; sdif_dma_config_t dmaConfig; memset(g_sdifDmaTable, 0, sizeof(g_sdifDmaTable)); memset(&dmaConfig, 0, sizeof(dmaConfig)); /* user DMA mode transfer data */ if (content->data != NULL) { dmaConfig.enableFixBurstLen = false; dmaConfig.mode = kSDIF_DualDMAMode; dmaConfig.dmaDesBufferStartAddr = g_sdifDmaTable; dmaConfig.dmaDesBufferLen = SDIF_DMA_TABLE_WORDS; dmaConfig.dmaDesSkipLen = 0U; } do { error = SDIF_TransferNonBlocking(base, &g_sdifHandle, &dmaConfig, content); } while (error == kStatus_SDIF_SyncCmdTimeout); if ((error != kStatus_Success) || (false == EVENT_Wait(kEVENT_TransferComplete, EVENT_TIMEOUT_TRANSFER_COMPLETE)) || (!g_sdifTransferSuccessFlag)) { error = kStatus_Fail; } EVENT_Delete(kEVENT_TransferComplete); return error; }
/** * Waits for the work item to complete */ void WKI_Wait(WorkItem * w) { Waiter * waiter = WKI_AttachWaiter(w); if (waiter) { EVENT_Wait(&waiter->event); WKQ_ReleaseWaiter(&WKQ, waiter); } }
/** * Wait for event to become signaled. */ WaitState EVENT_TimeWait(Event * e, long ms) { if (ms < 0) { return EVENT_Wait(e); } else { DWORD status = WaitForSingleObject(e->handle, ms); switch (status) { case WAIT_OBJECT_0: return WAIT_STATE_OK; case WAIT_TIMEOUT: return WAIT_STATE_TIMEOUT; } TRACE1("WaitForSingleObject() status %08lX\n",status); ASSMSG1("WaitForSingleObject() failed, error %d",GetLastError()); return WAIT_STATE_ERROR; } }
/** * Waits for this work queue to stop */ void WKQ_Wait(WorkQueue * q) { EVENT_Wait(&q->stopEvent); ASSERT(q->flags & WKQ_DEAD); }
/** * The worker thread */ STATIC void WKQ_Thread(void * par) { WorkQueue * q = (WorkQueue *)par; TRACE("WKQ: starting\n"); /* start the loop */ MUTEX_Lock(&q->mutex); q->lastActivity = TIME_Now(); while ((q->flags & WKQ_ACTIVE) || !QUEUE_IsEmpty(&q->submit)) { QEntry * e; while ((e = QUEUE_RemoveHead(&q->submit)) != NULL) { WorkItem * w = QCAST(e,WorkItem,submitQ); ASSERT(!(w->flags & (WKI_DONE|WKI_CANCELED))); /* * NULL callback may be used by dummy work items whose purpose * is to wait until all pending work items have been processed */ if (w->proc) { /* update flags */ w->flags |= WKI_CALL; /* invoke the handler */ MUTEX_Unlock(&q->mutex); w->proc(w, w->param); MUTEX_Lock(&q->mutex); q->lastActivity = TIME_Now(); if (w->flags & WKI_DETACHED) { /* put the work item to the pool or deallocate it */ ASSERT(!w->waiters); QUEUE_RemoveEntry(&w->itemsQ); WKQ_ReleaseWorkItem(&WKQ, w); } else { /* * update flags. Note that we released the mutex when * were invoking the callback. Therefore, this work * item could be re-submitted to the queue. Or it could * be re-submitted and then canceled. In such cases we * don't need to set the WKI_DONE flag. */ w->flags &= ~WKI_CALL; if (!(w->flags & WKI_CANCELED) && !w->submitQ.queue) { w->flags |= WKI_DONE; } /* signal the events associated with the work item */ WKI_Signal(w); } } else { /* it's a dummy work item. Just release the waiters */ WKI_Signal(w); } } /* wait for a signal */ if (q->flags & WKQ_ACTIVE) { EVENT_Reset(&q->event); if (q->idleProc) { /* we have an idle timeout */ IdleProc idle = q->idleProc; void * param = q->idleParam; Time now = TIME_Now(); Time deadline = q->lastActivity + q->idleTimeout; if (deadline > now) { MUTEX_Unlock(&q->mutex); switch (EVENT_TimeWait(&q->event,(long)(deadline-now))) { case WAIT_STATE_OK: /* don't invoke idle callback */ MUTEX_Lock(&q->mutex); break; case WAIT_STATE_TIMEOUT: /* invoke idle callback */ MUTEX_Lock(&q->mutex); now = TIME_Now(); deadline = q->lastActivity + q->idleTimeout; if (deadline <= now) { MUTEX_Unlock(&q->mutex); q->lastActivity = now; idle(q, param); MUTEX_Lock(&q->mutex); } break; default: case WAIT_STATE_ERROR: /* terminate the thread on error */ MUTEX_Lock(&q->mutex); q->flags &= ~WKQ_ACTIVE; break; } } else { q->lastActivity = now; MUTEX_Unlock(&q->mutex); idle(q, param); MUTEX_Lock(&q->mutex); } } else { /* wait forever */ MUTEX_Unlock(&q->mutex); EVENT_Wait(&q->event); MUTEX_Lock(&q->mutex); } } } /* cleanup */ MUTEX_Unlock(&q->mutex); TRACE("WKQ: done\n"); if (q->flags & WKQ_KILLME) { TRACE1("WKQ: killing WorkQueue %p\n",q); WKQ_Free(q); } }
/** * The program entry point */ int main(int argc, char * argv[]) { int mask = ECMTGW_LISTEN_DEFAULT_MASK; Bool traceReceive = False; Bool traceSend = False; Str host = NULL; Str file = NULL; CmdLine* c; GwTrace trace; XRpcRegistry * r; /* Initialize the XRPC library */ XRPC_Init(); /* First step of initializing GwTrace context */ memset(&trace, 0, sizeof(trace)); VECTOR_Init(&trace.includeUid, 0, NULL, NULL); VECTOR_Init(&trace.excludeUid, 0, NULL, NULL); /* Parse command line */ c = CMDLINE_Create(pname); if (c) { CmdOpt* includeOpt; CmdOpt* excludeOpt; Bool done = False; Bool help = False; CMDLINE_SetMaxArgs(c, 1); CMDLINE_AddTrueOpt(c,'h',"help", "print this help and exit",&help); CMDLINE_AddTrueOpt(c,'s',"sent", "trace packets sent to the handset",&traceSend); CMDLINE_AddTrueOpt(c,'r',"receive", "trace packets received from the handset",&traceReceive); includeOpt = CMDLINE_AddOpt(c,'u',"include", "include this UID in the trace (repeatable)", GWTRACE_ParseUidOpt, &trace.includeUid, "UID"); excludeOpt = CMDLINE_AddOpt(c,'x',"exclude", "exclude this UID from the trace (repeatable)", GWTRACE_ParseUidOpt, &trace.excludeUid, "UID"); CMDLINE_SetParamName(CMDLINE_AddStrOpt(c,'o',"output", "write binary Ecmt messages into a file",&file),"FILE"); CMDLINE_SetRepeatable(includeOpt); CMDLINE_SetRepeatable(excludeOpt); CMDLINE_Exclude(includeOpt, excludeOpt); if (!CMDLINE_Parse1(c,argv+1,argc-1,0,&host) || help) { CMDLINE_Usage(c, "[HOST]", 0); CMDLINE_Delete(c); VECTOR_Destroy(&trace.includeUid); VECTOR_Destroy(&trace.excludeUid); XRPC_Deinit(); return 0; } CMDLINE_Delete(c); } if (traceReceive || traceSend) { mask = 0; if (traceReceive) mask |= ECMTGW_LISTEN_MASK_RECEIVE; if (traceSend) mask |= ECMTGW_LISTEN_MASK_SEND; } /* connect to the registry */ r = XREG_ConnectRegistry(host, XREG_DEFAULT_PORT); if (r) { /* find the server port */ XRpcPort gwPort = 0; XREG_List(r, ECMTGW_PROTOCOL, GWTRACE_ListCB, &gwPort); XREG_FreeRegistry(r); if (gwPort) { if (EVENT_Init(&exitEvent)) { if (GWTRACE_Init(&trace, host, gwPort, file)) { /* Install signal handlers */ #ifndef _WIN32 signal(SIGPIPE, GWTRACE_Interrupt); #endif /* _WIN32 */ signal(SIGINT, GWTRACE_Interrupt); /* Enable notifications */ XRPC_FormatNotify(XRPC_GetClient(trace.session), ECMTGW_PROTOCOL, ECMTGW_REGISTER_LISTENER_METHOD,"%" ECMTGW_LISTENER_PROTOCOL_PARAM"!s%" ECMTGW_LISTENER_MASK_PARAM"!i", ECMTGW_LISTENER_PROTOCOL, mask); /* Wait */ EVENT_Wait(&exitEvent); /* cleanup */ XRPC_FreeSession(trace.session); XRPC_FreeServer(trace.server); if (trace.file) FILE_Close(trace.file); } EVENT_Destroy(&exitEvent); } } else { PRINT_Error("%s: Ecmt Gateway is not running.\n",pname); } } else if (host) { PRINT_Verbose("%s: XRPC registry is not running on %s\n",pname,host); PRINT_Error("%s: Ecmt Gateway is not running on %s.\n",pname,host); } else { PRINT_Verbose("%s: XRPC registry is not running\n",pname); PRINT_Error("%s: Ecmt Gateway is not running.\n",pname); } VECTOR_Destroy(&trace.includeUid); VECTOR_Destroy(&trace.excludeUid); /* Deinitialize the XRPC library */ XRPC_Deinit(); return 0; }
/** * Locks resource for non-exclusive use, waits if necessary. Returns True * if lock has been successfully acquired, otherwise False. */ Bool RWLOCK_TimeReadLock(RWLock * lock, long ms) { Bool ok = True; Bool success = False; RWLockWaiter * waiter = NULL; Time deadline = 0; /* * this flag is False if we have found that current thread is NOT * an owner of the resource, so that we don't scan the lock entries * more than once. */ Bool maybeOwner = True; RWEntry * entry = NULL; /* calculate the deadline if it's a wait with timeout */ if (ms > 0) { deadline = TIME_Now() + ms; } MUTEX_Lock(&lock->mutex); while (ok) { Time now = 0; /* * if this thread already owns this resource either exclusively * or shared, we are all set. All we need is to increment entry * count. NOTE that we don't touch the "exclusive" flag, meaning * that if resource has been acquired exclusively, it remains * this way. */ if (maybeOwner) { entry = RWLOCK_FindEntry(lock); if (entry) { success = True; if (lock->flags & RWLOCK_FLAG_EXCLUSIVE_LOCK) { ASSERT(entry->write > 0); entry->write++; } else { ASSERT(entry->write == 0); entry->read++; } break; } else { maybeOwner = False; /* don't scan entry table again */ } } /* if resource is not owned and no one is waiting, we can have it */ if (lock->locks <= 0 && QUEUE_Size(&lock->shareWaiters) == 0 && QUEUE_Size(&lock->exclusiveWaiters) == 0) { /* * note that it's quite possible that resource is not owned * but the wait queue is not empty. this can happen for example * if this thread just released the resource and waiters didn't * yet have the chance to run. in such case, this thread should * be place into the queue to avoid starving the waiters */ entry = RWLOCK_GetEntry(lock); if (entry) { success = True; lock->flags &= ~RWLOCK_FLAG_EXCLUSIVE_LOCK; entry->read++; } break; } /* * if resource is owned in shared mode, there's a good chance that * we can have it immediately. Some restrictions apply (see below) */ if (!(lock->flags & RWLOCK_FLAG_EXCLUSIVE_LOCK)) { /* * normally we allow this thread to access the resource * in readonly mode even if there's an exclusive waiter. * However, if we always did that, the exclusive waiter * might end up waiting forever if new readonly waters * keep coming. To prevent this from happening, we count * the number of times an exclusive waiter has been bypassed * by a lucky late-coming reader. If this number exceeds * the limit, everyone has to stay in the line. */ if (QUEUE_Size(&lock->exclusiveWaiters) == 0 || lock->bypassCount < RWLOCK_MAX_BYPASS_COUNT) { entry = RWLOCK_GetEntry(lock); if (entry) { if (QUEUE_Size(&lock->exclusiveWaiters) > 0) { lock->bypassCount++; } ASSERT(entry->write == 0); success = True; entry->read++; } break; } } /* * resource cannot be acquired immediately for exclusive access. * If we cannot wait (any longer), break the loop. */ if (ms == 0) { break; } else if (ms > 0) { /* check for timeout */ now = TIME_Now(); if (now >= deadline) { break; } } /* * release the mutex and wait for event to be signalled, then * start it all over again. */ lock->contentions++; if (!waiter) { waiter = RWLOCK_GetShareWaiter(lock); if (!waiter) break; } EVENT_Reset(&lock->shareEvent); MUTEX_Unlock(&lock->mutex); /* wait */ if (ms > 0) { long tmo = (long)(deadline - now); if (EVENT_TimeWait(waiter->event, tmo) == WAIT_STATE_ERROR) { ok = False; } } else { ok = BoolValue(EVENT_Wait(waiter->event) == WAIT_STATE_OK); } MUTEX_Lock(&lock->mutex); } if (success) lock->locks++; if (waiter) RWLOCK_ReleaseWaiter(lock, waiter); MUTEX_Unlock(&lock->mutex); return success; }
/** * Locks resource for exclusive use, waits if necessary. Returns True if lock * has been successfully acquired, otherwise False. */ Bool RWLOCK_TimeWriteLock(RWLock * lock, long ms) { Bool ok = True; Bool success = False; RWLockWaiter * waiter = NULL; Time deadline = 0; /* * this flag is False if we have found that current thread is NOT * an owner of the resource, so that we don't scan the lock entries * more than once. */ Bool maybeOwner = True; RWEntry * entry = NULL; /* calculate the deadline if it's a wait with timeout */ if (ms > 0) { deadline = TIME_Now() + ms; } /* * we can acquire the resource immediately if * 1. resource is unowned and no one is waiting; or * 2. this thread is the only one that is using the resource, either * shared or exclusively */ MUTEX_Lock(&lock->mutex); while (ok) { Time now = 0; /* * if this thread already owns this resource exclusively, * we are all set. All we need is to increment entry count. */ if (lock->entriesActive == 1 && maybeOwner) { if (!entry) { entry = RWLOCK_FindEntry(lock); } if (entry) { success = True; lock->flags |= RWLOCK_FLAG_EXCLUSIVE_LOCK; entry->write++; /* convert shared to exclusive */ break; } else { maybeOwner = False; } } /* if resource is not owned and no one is waiting, we can have it */ if (lock->locks <= 0) { Bool gotIt = False; if (waiter) { gotIt = BoolValue(lock->exclusiveWaiters.head.next == &waiter->entry); } else { gotIt = BoolValue(QUEUE_Size(&lock->shareWaiters) == 0 && QUEUE_Size(&lock->exclusiveWaiters) == 0); } /* * note that it's quite possible that resource is not owned * but the wait queue is not empty. this can happen for example * if this thread just released the resource and waiters didn't * yet have the chance to run. in such case, this thread should * be place into the queue to avoid starving the waiters */ if (gotIt) { if (!entry) { entry = RWLOCK_GetEntry(lock); } if (entry) { success = True; lock->flags |= RWLOCK_FLAG_EXCLUSIVE_LOCK; entry->write++; } break; } } /* * resource cannot be acquired immediately for exclusive access. * If we cannot wait (any longer), break the loop. */ if (ms == 0) { break; } else if (ms > 0) { /* check for timeout */ now = TIME_Now(); if (now >= deadline) { break; } } /* * release the mutex and wait for event to be signaled, then * start it all over again. */ lock->contentions++; if (!waiter) { waiter = RWLOCK_GetExclusiveWaiter(lock); if (!waiter) break; } EVENT_Reset(waiter->event); MUTEX_Unlock(&lock->mutex); /* wait */ if (ms > 0) { long tmo = (long)(deadline - now); if (EVENT_TimeWait(waiter->event,tmo) == WAIT_STATE_ERROR) { ok = False; } } else { ok = BoolValue(EVENT_Wait(waiter->event) == WAIT_STATE_OK); } MUTEX_Lock(&lock->mutex); } if (success) lock->locks++; if (waiter) RWLOCK_ReleaseExclusiveWaiter(lock, waiter); lock->bypassCount = 0; MUTEX_Unlock(&lock->mutex); return success; }