void ChecklistManager::incChecklistIndex() { if (!isAtLastChecklist() && m_fmc_sounds_handler != 0 && m_fmc_sounds_handler->fmcSounds() != 0 && !checklist(m_current_checklist_index+1).checklistSoundfile().isEmpty()) { m_fmc_sounds_handler->fmcSounds()->addSoundToQueueDirectly( checklist(m_current_checklist_index+1).checklistSoundfile(), FMCSoundBase::SOUND_SOURCE_OTHER_PILOT, true); } m_current_checklist_index = qMin(m_checklist_list.count()-1, ++m_current_checklist_index); }
/** * A runtime guard block may have monitor stores and privarg stores along with the guard * it self. This method will rearrange these stores and split the block, managing any * uncommoning necessary for eventual block order. * * The provided block will become the privarg block, containing any privarg stores and additonal * temps for uncommoning. It must be evaluated first. The returned block will contain monitor * stores and the guard. If no split is required, the provided block will be returned. * * @param comp Compilation object * @param block Block to manipulate * @param cfg Current CFG * @return The block containing the guard. */ static TR::Block* splitRuntimeGuardBlock(TR::Compilation *comp, TR::Block* block, TR::CFG *cfg) { TR::NodeChecklist checklist(comp); TR::TreeTop *start = block->getFirstRealTreeTop(); TR::TreeTop *guard = block->getLastRealTreeTop(); TR::TreeTop *firstPrivArg = NULL; TR::TreeTop *firstMonitor = NULL; // Manage the unexpected case that monitors and priv args are reversed bool privThenMonitor = false; TR_ASSERT(isMergeableGuard(guard->getNode()), "last node must be guard %p", guard->getNode()); // Search for privarg and monitor stores // Only commoned nodes under the guard are required to be anchored, due to the guard being // evaluted before the monitor stores later on bool anchoredTemps = false; for (TR::TreeTop *tt = start; tt && tt->getNode()->getOpCodeValue() != TR::BBEnd; tt = tt->getNextTreeTop()) { TR::Node * node = tt->getNode(); if (node->getOpCode().hasSymbolReference() && node->getSymbol()->holdsMonitoredObject()) firstMonitor = firstMonitor == NULL ? tt : firstMonitor; else if (node->chkIsPrivatizedInlinerArg()) { if (firstPrivArg == NULL) { firstPrivArg = tt; privThenMonitor = (firstMonitor == NULL); } } else if (isMergeableGuard(node)) anchoredTemps |= anchorCommonNodes(comp, node, start, checklist); else TR_ASSERT(0, "Node other than monitor or privarg store %p before runtime guard", node); } // If there are monitors then privargs, they must be swapped around, such that all privargs are // evaluated first if (firstPrivArg && firstMonitor && !privThenMonitor) { TR::TreeTop *monitorEnd = firstPrivArg->getPrevTreeTop(); firstMonitor->getPrevTreeTop()->join(firstPrivArg); guard->getPrevTreeTop()->join(firstMonitor); monitorEnd->join(guard); } // If there were temps created or privargs in the block, perform a split TR::TreeTop *split = NULL; if (firstPrivArg) split = firstMonitor ? firstMonitor : guard; else if (anchoredTemps) split = start; if (split) return block->split(split, cfg, true /* fixupCommoning */, false /* copyExceptionSuccessors */); return block; }
int main(int argc, char *argv[]) { OptNode *o; Node *n; xiargs *args; int verbose = 0, check=0; binary = " "; key1 = (unsigned char *)calloc(1, 40); if(!key1) { fprintf(stderr, "Memory allocation failed!\n"); return 2; } key2 = (unsigned char *)calloc(1, 40); if(!key2) { free(key1); fprintf(stderr, "Memory allocation failed!\n"); return 2; } args = xi_getopts(argc, argv, "MD5 summer (Xiqual example program)", "[-b] [-v] [-c] | [file...]", opt); if(!args) return 0; o = (OptNode *)args->opts.head; while(o) { switch(o->name[0]) { case 'b': binary = "*"; break; case 'v': verbose = 1; break; case 'c': check = 1; } o = o->next; } n = args->leftovers.head; if(check) { while(n) { checklist(n->data); n = n->next; } } else { while(n) { sumfile(n->data); n = n->next; } } free(key1); free(key2); xi_freeopts(args); return 0; }
/*===========================================================================* * void slab_sanitycheck * *===========================================================================*/ void slab_sanitycheck(char *file, int line) { int s; for(s = 0; s < SLABSIZES; s++) { int l; for(l = 0; l < LIST_NUMBER; l++) { checklist(file, line, &slabs[s], l, s + MINSIZE); } } }
// Returns 0 if no errors were found, otherwise returns the error int mm_checkheap(int verbose) { if(verbose==0){ return 0; } int flag=0; flag=((check_my_heap(verbose))|(checklist(0))); return flag; //} return 0; }
/* * taken the code from ExitOneClient() for this and placed it here. * - avalon * remove client **AND** _related structures_ from lists, * *free* them too. -krys */ void remove_client_from_list(aClient *cptr) { checklist(); /* is there another way, at this point? */ /* servers directly connected have hopcount=1, but so do their * users, hence the check for IsServer --B. */ if (cptr->hopcount == 0 || (cptr->hopcount == 1 && IsServer(cptr))) istat.is_localc--; else istat.is_remc--; if (cptr->prev) cptr->prev->next = cptr->next; else { client = cptr->next; client->prev = NULL; } if (cptr->next) cptr->next->prev = cptr->prev; if (cptr->user) { istat.is_users--; /* decrement reference counter, and eventually free it */ cptr->user->bcptr = NULL; (void)free_user(cptr->user); } if (cptr->serv) { cptr->serv->bcptr = NULL; free_server(cptr->serv); } if (cptr->service) /* ** has to be removed from the list of aService structures, ** no reference counter for services, thus this part of the ** code can safely be included in free_service() */ free_service(cptr); #ifdef DEBUGMODE if (cptr->fd == -2) cloc.inuse--; else crem.inuse--; #endif (void)free_client(cptr); numclients--; return; }
/* Each independent thread fills in its own * list. This stresses clock_gettime() lock contention. */ void *independent_thread(void *arg) { struct timespec my_list[LISTSIZE]; int count; while (!done) { /* fill the list */ for (count = 0; count < LISTSIZE; count++) clock_gettime(CLOCK_MONOTONIC, &my_list[count]); checklist(my_list, LISTSIZE); } return NULL; }
/* * checkheap - Check the heap for consistency * (iterates all the blocks starting from prologue to epilogue) * */ void _checkheap(void) { char *bp = heap_listp; size_t prev_alloc, curr_alloc; dbg1("\n[CHECK HEAP]\n"); dbg1("\n[verbose=%d]\n", verbose); if (verbose) { printf("Heap (starting address:%p):\n", heap_listp); printf("-prologue-"); printblock(bp); } /* checking prologue block (size, allocate bit) */ if ((GET_SIZE(HDRP(heap_listp)) != DSIZE) || !GET_ALLOC(HDRP(heap_listp))) { printf("Bad prologue header\n"); printf("-prologue-"); printblock(bp); } checkblock(heap_listp); /* alignment, header/footer */ prev_alloc = GET_ALLOC(HDRP(bp)); /* checking allocated/free blocks */ for (bp = NEXT_BLKP(heap_listp); GET_SIZE(HDRP(bp)) > 0; bp = NEXT_BLKP(bp)) { curr_alloc = GET_PREV_ALLOC(HDRP(bp)); if (verbose) printblock(bp); if (!prev_alloc != !curr_alloc) { /* previous block's allocate bit should match current block's prev allocate bit */ printf("prev allocate bit mismatch\n"); printblock(bp); exit(0); } prev_alloc = GET_ALLOC(HDRP(bp)); checkblock(bp); } printf("done\n"); /* checking epilouge block */ if ((GET_SIZE(HDRP(bp)) != 0) || !(GET_ALLOC(HDRP(bp)))){ printf("Bad epilogue header\n"); printf("-epilogue-"); printblock(bp); } checklist(); dbg1("[CHECK DONE]\n\n"); }
/* The shared thread shares a global list * that each thread fills while holding the lock. * This stresses clock syncronization across cpus. */ void *shared_thread(void *arg) { while (!done) { /* protect the list */ pthread_mutex_lock(&list_lock); /* see if we're ready to check the list */ if (listcount >= LISTSIZE) { checklist(global_list, LISTSIZE); listcount = 0; } clock_gettime(CLOCK_MONOTONIC, &global_list[listcount++]); pthread_mutex_unlock(&list_lock); } return NULL; }
/** * Search for direct loads in the taken side of a guard * * @param firstBlock The guard's branch destination * @param coldPathLoads BitVector of symbol reference numbers for any direct loads seen until the merge back to mainline */ static void collectColdPathLoads(TR::Block* firstBlock, TR_BitVector &coldPathLoads) { TR_Stack<TR::Block*> blocksToCheck(TR::comp()->trMemory(), 8, false, stackAlloc); blocksToCheck.push(firstBlock); TR::NodeChecklist checklist(TR::comp()); coldPathLoads.empty(); while (!blocksToCheck.isEmpty()) { TR::Block *block = blocksToCheck.pop(); for (TR::TreeTop *tt = block->getFirstRealTreeTop(); tt->getNode()->getOpCodeValue() != TR::BBEnd; tt = tt->getNextTreeTop()) collectDirectLoads(tt->getNode(), coldPathLoads, checklist); // Search for any successors that have not merged with the mainline for (auto itr = block->getSuccessors().begin(), end = block->getSuccessors().end(); itr != end; ++itr) { TR::Block *dest = (*itr)->getTo()->asBlock(); if (dest != TR::comp()->getFlowGraph()->getEnd() && dest->getPredecessors().size() == 1) blocksToCheck.push(dest); } } }
void ChecklistManager::setSoundHandler(FMCSoundsHandler* fmc_sounds_handler) { m_fmc_sounds_handler = fmc_sounds_handler; for(int index=0; index < count(); ++index) checklist(index).setSoundHandler(fmc_sounds_handler); }