/** * [GetIndex] * Returns the readingIndex in a thread-safe manner, this way we know that we will never have 2 threads * reading the same value * @return [readinIndex incremented] */ int GetIndex() { sem_wait(&IndexMutex); // Entering critical section int index = readingIndex; // Temporary saving to return later IncIndex(&readingIndex); sem_post(&IndexMutex); // Exiting critical section return index; }
DWORD CSDWorkItem::ThreadRun() { while (!m_bTerminated) { PREFAST_ASSERT(m_psdSlotEvent!=NULL); if (!IsEmpty()) { // We don't need take lock since only this thread modify the Read Index. SD_SLOT_EVENT sdEvent = m_psdSlotEvent [m_dwReadIndex]; m_dwReadIndex = IncIndex(m_dwReadIndex); BOOL fRet = ReleaseSemaphore(m_hEmptySlotSem,1,NULL); ASSERT(fRet); SlotStatusChangeProcessing(sdEvent); } else ::WaitForSingleObject( m_hWakeupEvent, INFINITE ); } return FALSE; }
/////////////////////////////////////////////////////////////////////////////// // PostMessage - post a message // Input: pMessage - message to post // Output: // Return: // Notes: // /////////////////////////////////////////////////////////////////////////////// BOOL CSDWorkItem::PostEvent(SD_SLOT_EVENT sdEvent, DWORD dwWaitTick) { PREFAST_ASSERT(m_psdSlotEvent!=NULL); BOOL fRet = FALSE; if (::WaitForSingleObject( m_hEmptySlotSem, dwWaitTick ) == WAIT_OBJECT_0 ) { Lock(); if (!IsFull()) { m_psdSlotEvent[m_dwWriteIndex] = sdEvent; m_dwWriteIndex = IncIndex(m_dwWriteIndex); SetEvent(m_hWakeupEvent); fRet = TRUE; } else ASSERT(FALSE); Unlock(); } return fRet; }
int main(int argc, char *arg[]) { int taskList[TASKLIST_SIZE] = {3,6,1,7,5,4,8,5,2,1}; readingIndex = 0; // Queue index init. writingIndex = 0; // Queue index init. sem_init(&queueEmptySpots, 0, SPOTS); // Init with the number of available spots in our array and we will decrement the semaphoe until there's no more spots avaialble sem_init(&queueFullSpots, 0, 0); // This semaphore will do the oposite of the queueEmptySpots and will be incremented when ever we add value to our array sem_init(&IndexMutex, 0, 1); // Simply to prevent a double access from 2 thread to the readingIndex value pthread_t threadId[THREAD_COUNT]; for(int i=0; i<THREAD_COUNT; i++) { int pthreadReturn = pthread_create(&threadId[i], NULL, ThreadRoutine, (void *) i); if (pthreadReturn != 0) { printf("Error creation failed with error: %i .\n", pthreadReturn); break; } } //Adding items to the queue (stops if there no more room aka thread are all busy) int processedItems = TASKLIST_SIZE-1; while(processedItems > 0) { sem_wait(&queueEmptySpots); // Entering critical section printf("Main adding value %i to processing-buffer .\n", taskList[processedItems]); queue[writingIndex] = taskList[processedItems]; IncIndex(&writingIndex); processedItems--; sem_post(&queueFullSpots); // Exiting critical section } // We send a negative number to all threads to inform them that everything was processed int threadToKill = THREAD_COUNT; while(threadToKill > 0) { sem_wait(&queueEmptySpots); // Entering critical section printf("Main informing threads to terminate .\n"); queue[writingIndex] = -1; // Sending value to the thread by adding it to the processing-queue IncIndex(&writingIndex); // Increment the writing index threadToKill--; sem_post(&queueFullSpots); // Exiting critical section } printf("Waiting for threads to be done... \n"); // Waiting for threads to exit for(int j=0; j<THREAD_COUNT; j++) { pthread_join(threadId[j], NULL); printf("Thread %i done... \n", j); } sem_destroy(&queueEmptySpots); // Clean up, desalocate the memory pointed by mutex sem_destroy(&queueFullSpots); // Clean up, desalocate the memory pointed by mutex printf("All threads exited... \n"); return 0; }
inline BOOL IsFull() { return (IncIndex(m_dwWriteIndex) == m_dwReadIndex); };