void VFS_int_Select_RemThread(tVFS_SelectList *List, tThread *Thread) { int i; tVFS_SelectListEnt *block, *prev = NULL; ENTER("pList pThread", List, Thread); // Lock to avoid concurrency issues Mutex_Acquire(&List->Lock); block = &List->FirstEnt; // Look for the thread do { for( i = 0; i < NUM_THREADS_PER_ALLOC; i ++ ) { if( block->Threads[i] == Thread ) { block->Threads[i] = NULL; // Check if this block is empty if( block != &List->FirstEnt ) { for( i = 0; i < NUM_THREADS_PER_ALLOC; i ++ ) if( block->Threads[i] ) break; // If empty, free it if( i == NUM_THREADS_PER_ALLOC ) { LOG("Deleting block"); prev->Next = block->Next; free(block); } //TODO: If not empty, check if it can be merged downwards } Mutex_Release(&List->Lock); LEAVE('-'); return ; } } prev = block; block = block->Next; } while(block); // Not on list, is this an error? Mutex_Release(&List->Lock); LOG("Not on list"); LEAVE('-'); }
//--------------------------------------------------------------------------- static void Mutex_Profiling() { K_USHORT i; Mutex_t stMutex; for (i = 0; i < 10; i++) { ProfileTimer_Start( &stMutexInitTimer ); Mutex_Init( &stMutex ); Mutex_Init( &stMutex ); Mutex_Init( &stMutex ); Mutex_Init( &stMutex ); Mutex_Init( &stMutex ); Mutex_Init( &stMutex ); Mutex_Init( &stMutex ); Mutex_Init( &stMutex ); Mutex_Init( &stMutex ); Mutex_Init( &stMutex ); ProfileTimer_Stop( &stMutexInitTimer ); } for (i = 0; i < 100; i++) { ProfileTimer_Start( &stMutexClaimTimer ); Mutex_Claim( &stMutex ); ProfileTimer_Stop( &stMutexClaimTimer ); ProfileTimer_Start( &stMutexReleaseTimer ); Mutex_Release( &stMutex ); ProfileTimer_Stop( &stMutexReleaseTimer ); } }
/** * \brief Signal all threads on a list */ void VFS_int_Select_SignalAll(tVFS_SelectList *List) { int i; tVFS_SelectListEnt *block; if( !List ) return ; ENTER("pList", List); // Lock to avoid concurrency issues Mutex_Acquire(&List->Lock); block = &List->FirstEnt; // Look for the thread do { for( i = 0; i < NUM_THREADS_PER_ALLOC; i ++ ) { if( block->Threads[i] ) { LOG("block(%p)->Threads[%i] = %p", block, i, block->Threads[i]); Threads_PostEvent( block->Threads[i], THREAD_EVENT_VFS ); } } block = block->Next; } while(block); Mutex_Release(&List->Lock); LEAVE('-'); }
int Mutex_Cleanup(PVOID pVoid) { if(!pVoid) return ERROR_INVALID_ARGUMENT; return Mutex_Release(pVoid); }
//--------------------------------------------------------------------------- void App1Main(void *unused_) { while(1) { // Claim the mutex. This will prevent any other thread from claiming // this lock simulatenously. As a result, the other thread has to // wait until we're done before it can do its work. You will notice // that the Start/Done prints for the thread will come as a pair (i.e. // you won't see "Thread2: Start" then "Thread1: Start"). Mutex_Claim( &stMyMutex ); // Start our work (incrementing a counter). Notice that the Start and // Done prints wind up as a pair when simuated with flAVR. KernelAware_Print("Thread1: Start\n"); ulCounter++; while (ulCounter <= 10000) { ulCounter++; } ulCounter = 0; KernelAware_Print("Thread1: Done\n"); // Release the lock, allowing the other thread to do its thing. Mutex_Release( &stMyMutex ); } }
void MT_AddTask(Task * pt, gboolean lock) { if (lock) { Mutex_Lock(td.queueLock, "add task"); } if (td.addedTasks == 0) td.result = 0; /* Reset result for new tasks */ td.addedTasks++; td.tasks = g_list_append(td.tasks, pt); if (g_list_length(td.tasks) == 1) { /* New tasks */ SetManualEvent(td.activity); } if (lock) { multi_debug("add task: release"); Mutex_Release(td.queueLock); } }
static Task * MT_GetTask(void) { Task *task = NULL; Mutex_Lock(td.queueLock, "get task"); if (g_list_length(td.tasks) > 0) { task = (Task *) g_list_first(td.tasks)->data; td.tasks = g_list_delete_link(td.tasks, g_list_first(td.tasks)); if (g_list_length(td.tasks) == 0) { ResetManualEvent(td.activity); } } multi_debug("get task: release"); Mutex_Release(td.queueLock); return task; }
/** * \fn void Ext2_CloseFile(tVFS_Node *Node) * \brief Close a file (Remove it from the cache) */ void Ext2_CloseFile(tVFS_Node *Node) { tExt2_Disk *disk = Node->ImplPtr; ENTER("pNode", Node); if( Mutex_Acquire(&Node->Lock) != 0 ) { LEAVE('-'); return ; } if( Node->Flags & VFS_FFLAG_DIRTY ) { // Commit changes Ext2_int_WritebackNode(disk, Node); Node->Flags &= ~VFS_FFLAG_DIRTY; } int was_not_referenced = (Node->ImplInt == 0); tVFS_ACL *acls = Node->ACLs; if( Inode_UncacheNode(disk->CacheID, Node->Inode) == 1 ) { if( was_not_referenced ) { LOG("Removng inode"); // Remove inode Log_Warning("Ext2", "TODO: Remove inode when not referenced (%x)", (Uint32)Node->Inode); } if( acls != &gVFS_ACL_EveryoneRW ) { free(acls); } LOG("Node cleaned"); } else { LOG("Still referenced, releasing lock"); Mutex_Release(&Node->Lock); } LEAVE('-'); return ; }
void mt_add_tasks(unsigned int num_tasks, AsyncFun pFun, void *taskData, gpointer linked) { unsigned int i; { #ifdef DEBUG_MULTITHREADED char buf[20]; sprintf(buf, "add %u tasks", num_tasks); Mutex_Lock(td.queueLock, buf); #else Mutex_Lock(td.queueLock, NULL); #endif } for (i = 0; i < num_tasks; i++) { Task *pt = (Task *) malloc(sizeof(Task)); pt->fun = pFun; pt->data = taskData; pt->pLinkedTask = linked; MT_AddTask(pt, FALSE); } multi_debug("add many release: lock"); Mutex_Release(td.queueLock); }
/** * \return Boolean failure */ int VFS_int_Select_AddThread(tVFS_SelectList *List, tThread *Thread, int MaxAllowed) { int i, count = 0; tVFS_SelectListEnt *block, *prev; ENTER("pList pThread iMaxAllowed", List, Thread, MaxAllowed); // Lock to avoid concurrency issues Mutex_Acquire(&List->Lock); block = &List->FirstEnt; // Look for free space do { for( i = 0; i < NUM_THREADS_PER_ALLOC; i ++ ) { if( block->Threads[i] == NULL ) { block->Threads[i] = Thread; Mutex_Release(&List->Lock); LEAVE('i', 0); return 0; } count ++; if( MaxAllowed && count >= MaxAllowed ) { Mutex_Release(&List->Lock); LEAVE('i', 1); return 1; } } prev = block; block = block->Next; } while(block); LOG("New block"); // Create new block block = malloc( sizeof(tVFS_SelectListEnt) ); if( !block ) { Log_Warning("VFS", "VFS_int_Select_AddThread: malloc() failed"); Mutex_Release(&List->Lock); return -1; } block->Next = NULL; block->Threads[0] = Thread; for( i = 1; i < NUM_THREADS_PER_ALLOC; i ++ ) { block->Threads[i] = NULL; } // Add to list prev->Next = block; // Release Mutex_Release(&List->Lock); LEAVE('i', 0); return 0; }
extern void MT_Release(void) { Mutex_Release(td.multiLock); }
void IPStack_Buffer_UnlockBuffer(tIPStackBuffer *Buffer) { ASSERT(Buffer); Mutex_Release(&Buffer->lBufferLock); }