/** * Stop the worker thread */ void WKQ_Stop(WorkQueue * q, Bool canWait) { int nthreads = 0; if (MUTEX_Lock(&q->mutex)) { if (q->flags & WKQ_ACTIVE) { q->flags &= ~WKQ_ACTIVE; EVENT_Set(&q->event); } if (canWait && q->nthreads) { ASSERT(!(q->flags & WKQ_DEAD)); nthreads = q->nthreads; q->nthreads = 0; } MUTEX_Unlock(&q->mutex); } if (canWait) { if (nthreads > 0) { int i, flags = WKQ_DEAD; for (i=0; i<nthreads; i++) { if (THREAD_IsSelf(q->threads[i])) { /* We can handle this, but it's a bit awkward and * usually unnecessary. Let's issue a warning in * the debug build, just in case */ TRACE("WARNING! WKQ_Stop called on its own thread\n"); /* Settings WKQ_KILLME flag will cause WKQ_Thread to * kill the associated WorkQueue. Obviously, we have * to set this flag after all other threads have * terminated ot they all will try to do the same */ flags |= WKQ_KILLME; /* it's a bit weird that a thread is detaching itself * but it seems to work */ THREAD_Detach(q->threads[i]); } else { THREAD_Join(q->threads[i]); } } ASSERT(!(q->flags & WKQ_DEAD)); MUTEX_Lock(&q->mutex); q->flags |= flags; EVENT_Set(&q->stopEvent); MUTEX_Unlock(&q->mutex); } else { WKQ_Wait(q); } } }
static void GWTRACE_Session(XRpcSession* session, void* arg) { GwTrace* trace = arg; ASSERT(trace->session == session); XRPC_RunSession(session); EVENT_Set(&exitEvent); }
/** * Signals the events associated with the work item. The caller must * hold the queue mutex. Note that the event associated with the work * item is deallocated by the thread that was waiting for this work * item to complete. */ STATIC void WKI_Signal(WorkItem * w) { if (w->waiters) { Waiter * waiter; for (waiter = w->waiters; waiter; waiter = waiter->next) { EVENT_Set(&waiter->event); } /* the waiters will release their context */ w->waiters = NULL; } }
/** * Signal handler */ static void GWTRACE_Interrupt(int sig) { switch(sig) { #ifndef _WIN32 case SIGPIPE: break; #endif case SIGINT: default: EVENT_Set(&exitEvent); break; } signal(sig, GWTRACE_Interrupt); }
/** * Converts exclusive lock to non-exclusive without releasing it. The caller * must own the lock exclusively. */ Bool RWLOCK_DropWrite(RWLock * lock) { Bool success = False; RWEntry * entry; MUTEX_Lock(&lock->mutex); entry = RWLOCK_FindEntry(lock); /* current thread must have the lock */ ASSERT(entry); if (entry) { /* and it must be the write lock */ ASSERT(entry->write > 0); if (entry->write > 0) { QEntry * e; RWLockWaiter * shareWaiter = NULL; RWLockWaiter * exclusiveWaiter = NULL; /* convert write lock to read lock */ entry->read += entry->write; entry->write = 0; /* lock is no longer owned exclusively */ ASSERT(lock->flags & RWLOCK_FLAG_EXCLUSIVE_LOCK); lock->flags &= ~RWLOCK_FLAG_EXCLUSIVE_LOCK; /* * wake up shared waiters only unless the exclusive * waiter is first in the line */ e = QUEUE_First(&lock->shareWaiters); if (e) shareWaiter = QCAST(e,RWLockWaiter,entry); e = QUEUE_First(&lock->exclusiveWaiters); if (e) exclusiveWaiter = QCAST(e,RWLockWaiter,entry); if (shareWaiter && (!exclusiveWaiter || shareWaiter->index < exclusiveWaiter->index)) { EVENT_Set(shareWaiter->event); } /* success */ success = True; } } MUTEX_Unlock(&lock->mutex); return success; }
/** * Destroys thread local variables, sets exit event and dereferences * the thread handle. */ STATIC void THREAD_Cleanup(ThrData* thr) { /* invoke destructors for thread local variables */ int i, n = 1; for (i=0; i<MAX_DESTRUCTOR_ITERATIONS && n > 0; i++) { int k; n = 0; for (k=0; k<VECTOR_Size(&thr->cleanupList); k++) { ThrKey key = VECTOR_Get(&thr->cleanupList, k); void * value = TlsGetValue(key->index); if (value) { TlsSetValue(key->index, NULL); #ifdef _USE_EXCEPTION_HANDLING __try { #endif /* _USE_EXCEPTION_HANDLING */ key->clean(value); ASSERT(!TlsGetValue(key->index)); #ifdef _USE_EXCEPTION_HANDLING } __except(EXCEPTION_EXECUTE_HANDLER) { TlsSetValue(key->index,NULL); ASSMSG1("EXCEPTION %08lX in cleanup proc!", GetExceptionCode()); } #endif /* _USE_EXCEPTION_HANDLING */ n++; } } } ASSERT(i<MAX_DESTRUCTOR_ITERATIONS); /* Dereference ThrKey structures */ while ((n = VECTOR_Size(&thr->cleanupList)) > 0) { ThrKey key = VECTOR_Remove(&thr->cleanupList, n-1); if (InterlockedDecrement(&key->ref) == 0) { ASSERT(key->index == TLS_OUT_OF_INDEXES); MEM_Free(key); } } InterlockedDecrement(&WIN32_ThreadCount); VECTOR_Destroy(&thr->cleanupList); EVENT_Set(&thr->exitEvent); THREAD_Deref(thr); }
/** * Sets the idle timeout function for the work queue. If the cb parameter * is NULL, idle timeouts are disabled and the other parameters (ms, param) * are ignored. The timeout must be positive. * * NOTE: currently, the old callback may still be invoked (btu no more than * once) after this function has returned. */ void WKQ_SetIdle(WorkQueue * q, long ms, IdleProc cb, void * param) { if (MUTEX_Lock(&q->mutex)) { if (cb) { ASSERT(ms > 0); q->idleTimeout = MAX(ms,1); q->idleProc = cb; q->idleParam = param; } else { q->idleTimeout = 0; q->idleProc = NULL; q->idleParam = NULL; } /* notify the worker thread */ EVENT_Set(&q->event); MUTEX_Unlock(&q->mutex); } }
/** * Submits a work item to the specified work queue. Re-submitting the same * work before it has been executed just moves it to the tail of the work * queue. It does NOT schedule it to run twice. */ Bool WKI_Submit(WorkItem * w) { WorkQueue * q = WKI_GetQueue(w); ASSERT(q); ASSERT(!(w->flags & WKI_DETACHED)); if (q) { if (MUTEX_Lock(&q->mutex)) { if (q->flags & WKQ_ACTIVE) { w->flags &= ~(WKI_DONE | WKI_CANCELED); QUEUE_RemoveEntry(&w->submitQ); QUEUE_InsertTail(&q->submit, &w->submitQ); EVENT_Set(&q->event); MUTEX_Unlock(&q->mutex); return True; } MUTEX_Unlock(&q->mutex); /* fall through and return False */ } } return False; }
static void AppTask(void* param) { //CLS1_SendStr("INFO: Application startup!\r\n", CLS1_GetStdio()->stdOut); EVENT_Set(EVENT_Start); ///*Sets the start up event while(1) { EVENT_HandleEvent(APP_HandleEvents); #if PL_HAS_KEYS && PL_NUM_KEYS > 0 KEY_Scan(); ///*Scans the Joystick shield for input #endif #if PL_HAS_MEALY MEALY_Step(); #endif LED1_TOGGLE; FRTOS1_vTaskDelay(10 / portTICK_RATE_MS); } }
static void GWTRACE_Dump(GwTrace* trace, Str what, const XRpcContainer* param) { /* decode parameters */ const XRpcIntElement* uidParam = XRPC_GetIntElementByName(param, ECMTGW_LISTENER_UID_PARAM); const XRpcBinaryElement* dataParam = XRPC_GetBinaryElementByName(param, ECMTGW_LISTENER_DATA_PARAM); if (uidParam && dataParam) { int uid = XRPC_GetInt(uidParam); if (GWTRACE_AcceptMessage(trace, uid)) { int len = XRPC_GetBinaryDataSize(dataParam); const XRpcByte * data = XRPC_GetBinaryData(dataParam); /* dump to the console */ PRINT_Output("%s: UID=0x%08x, %d byte(s)\n",what,uid,len); PRINT_Dump(PRINT_Output, data, len, 0); /* write to a file */ if (trace->file) { int total = len + ECMT_MSG_HEADER_SIZE; if (FILE_Printf(trace->file,"0x%08X0x%08X",total,uid) > 0 && FILE_Write(trace->file, data, len) > 0) { FILE_Flush(trace->file); } else { /* I/O error. print an error message and exit */ Str fname = FILE_Name(trace->file); if (fname) { PRINT_Error("%s: error writing %s\n",pname,fname); } EVENT_Set(&exitEvent); } } } } }
/** * Release n recursively acquired locks. */ void RWLOCK_UnlockMany(RWLock * lock, int n) { if (n > 0) { RWEntry * entry; MUTEX_Lock(&lock->mutex); entry = RWLOCK_FindEntry(lock); /* * if we cannot find the entry, it means that current thread * does not own the lock. It's a programming error. */ ASSERT(entry); if (entry) { lock->locks--; /* first release write locks */ if (entry->write > 0) { if (entry->write >= n) { entry->write -= n; n = 0; } else { n -= entry->write; entry->write = 0; } } /* then read locks */ if (n > 0) { entry->read -= n; } /* * ASSERT that current thread does not release more locks than * it has acquired */ ASSERT(lock->locks >= 0); ASSERT(entry->read >= 0); ASSERT(entry->write >= 0); /* * no more work to do unless calling thread has released the * resource (i.e. usage count came down to zero) */ if ((entry->read + entry->write) <= 0) { int i; int inUse; QEntry * e; RWLockWaiter * shareWaiter = NULL; RWLockWaiter * exclusiveWaiter = NULL; entry->id = 0; lock->entriesActive--; ASSERT(lock->entriesActive >= 0); /* * update lock->entriesInUse * NOTE that RWLOCK_FindStaticEntry() may access it without * synchronization. */ i = lock->entriesInUse - 1; inUse = 0; while (i >= 0) { RWEntry * lockEntry = GET_ENTRY(lock,i); if (lockEntry->id) { inUse = i + 1; break; } i--; } lock->entriesInUse = inUse; /* * if resource was acquired exclusively, it must be free now */ if (lock->flags & RWLOCK_FLAG_EXCLUSIVE_LOCK) { ASSERT(!lock->locks); lock->flags &= ~RWLOCK_FLAG_EXCLUSIVE_LOCK; } /* * release the waiters in the order they have arrived */ e = QUEUE_First(&lock->shareWaiters); if (e) shareWaiter = QCAST(e,RWLockWaiter,entry); e = QUEUE_First(&lock->exclusiveWaiters); if (e) exclusiveWaiter = QCAST(e,RWLockWaiter,entry); if (exclusiveWaiter && (!shareWaiter || exclusiveWaiter->index < shareWaiter->index)) { EVENT_Set(exclusiveWaiter->event); } else if (shareWaiter) { /* this should unblock all shared waiters */ EVENT_Set(shareWaiter->event); } } else if (lock->flags & RWLOCK_FLAG_EXCLUSIVE_LOCK) { /* * if the owner of the lock has released all its WRITE locks * but still have some READ locks, switch to shared mode. */ if (!entry->write) { QEntry * e; RWLockWaiter * shareWaiter = NULL; RWLockWaiter * exclusiveWaiter = NULL; ASSERT(entry->read > 0); lock->flags &= ~RWLOCK_FLAG_EXCLUSIVE_LOCK; /* * wake up shared waiters only unless the exclusive * waiter is first in the line */ e = QUEUE_First(&lock->shareWaiters); if (e) shareWaiter = QCAST(e,RWLockWaiter,entry); e = QUEUE_First(&lock->exclusiveWaiters); if (e) exclusiveWaiter = QCAST(e,RWLockWaiter,entry); if (shareWaiter && (!exclusiveWaiter || shareWaiter->index < exclusiveWaiter->index)) { EVENT_Set(shareWaiter->event); } } } } MUTEX_Unlock(&lock->mutex); } }