void CmiInitMemAffinity(char **argv) { char *tmpstr = NULL; int maffinity_flag = CmiGetArgFlagDesc(argv,"+maffinity", "memory affinity"); if (maffinity_flag && CmiMyPe()==0) CmiPrintf("memory affinity is not supported, +maffinity flag disabled.\n"); /* consume the remaining possible arguments */ CmiGetArgStringDesc(argv, "+memnodemap", &tmpstr, "define memory node mapping"); CmiGetArgStringDesc(argv, "+mempol", &tmpstr, "define memory policy {bind, preferred or interleave} "); }
void CmiInitCPUAffinity(char **argv) { char *pemap = NULL; char *pemapfile = NULL; char *commap = NULL; int excludecore = -1; int affinity_flag = CmiGetArgFlagDesc(argv,"+setcpuaffinity", "set cpu affinity"); while (CmiGetArgIntDesc(argv,"+excludecore",&excludecore, "avoid core when setting cpuaffinity")); CmiGetArgStringDesc(argv, "+pemap", &pemap, "define pe to core mapping"); CmiGetArgStringDesc(argv, "+pemapfile", &pemapfile, "define pe to core mapping file"); CmiGetArgStringDesc(argv, "+commap", &commap, "define comm threads to core mapping"); if (affinity_flag && CmiMyPe()==0) CmiPrintf("sched_setaffinity() is not supported, +setcpuaffinity disabled.\n"); }
//! process command line arguments! void TraceCounter::traceInit(char **argv) { CpvInitialize(CountLogPool*, _logPool); CpvInitialize(char*, _logName); CpvInitialize(double, version); CpvInitialize(char**, _counterNames); CpvInitialize(char**, _counterDesc); CpvInitialize(int, _numCounters); CpvInitialize(int, _reductionID); CpvAccess(_logName) = (char *) malloc(strlen(argv[0])+1); _MEMCHECK(CpvAccess(_logName)); strcpy(CpvAccess(_logName), argv[0]); CpvAccess(version) = VER; int i; // parse command line args char* counters = NULL; commandLine_ = NULL; bool badArg = false; int numCounters = 0; if (CmiGetArgStringDesc(argv, "+counters", &counters, "Measure these performance counters")) { if (CmiMyPe()==0) { CmiPrintf("Counters: %s\n", counters); } int offset = 0; int limit = strlen(counters); char* ptr = counters; while (offset < limit && (ptr = strtok(&counters[offset], ",")) != NULL) { offset += strlen(ptr)+1; ptr = &ptr[strlen(ptr)+1]; numCounters++; } if (CmiMyPe()==0) { CmiPrintf("There are %d counters\n", numCounters); } commandLine_ = new CounterArg[numCounters]; ptr = counters; for (i=0; i<numCounters; i++) { commandLine_[i].arg = ptr; if (!matchArg(&commandLine_[i])) { if (CmiMyPe()==0) { CmiPrintf("Bad arg: [%s]\n", ptr); } badArg = true; } ptr = &ptr[strlen(ptr)+1]; } } commandLineSz_ = numCounters; // check to see if args are valid, output if not if (badArg || CmiGetArgFlagDesc(argv, "+count-help", "List available performance counters")) { if (CmiMyPe() == 0) { printHelp(); } ConverseExit(); return; } else if (counters == NULL) { if (CmiMyPe() == 0) { usage(); } ConverseExit(); return; } // get optional command line args overview_ = CmiGetArgFlag(argv, "+count-overview"); switchRandom_ = CmiGetArgFlag(argv, "+count-switchrandom"); switchByPhase_ = CmiGetArgFlag(argv, "+count-switchbyphase"); noLog_ = CmiGetArgFlag(argv, "+count-nolog"); writeByPhase_ = CmiGetArgFlag(argv, "+count-writebyphase"); char* logName = NULL; if (CmiGetArgString(argv, "+count-logname", &logName)) { CpvAccess(_logName) = logName; if (noLog_) { if (CkMyPe()==0) { CmiPrintf("+count-logname and +count-nolog are MUTUALLY EXCLUSIVE\n"); usage(); CmiAbort(""); } } } if (switchByPhase_ && overview_) { if (CkMyPe()==0) { CmiPrintf( "+count-switchbyphase and +count-overview are MUTUALLY EXCLUSIVE\n" "+count-overview automatically switches by phase.\n"); usage(); CmiAbort(""); } } if (writeByPhase_ && noLog_) { if (CkMyPe()==0) { CmiPrintf("+count-writebyphase and +count-nolog are MUTUALLY EXCLUSIVE\n"); usage(); CmiAbort(""); } } // parse through commandLine_, figure out which belongs on which list (1 vs 2) CounterArg* last1 = NULL; CounterArg* last2 = NULL; CounterArg* tmp = NULL; counter1Sz_ = counter2Sz_ = 0; for (i=0; i<commandLineSz_; i++) { tmp = &commandLine_[i]; if (tmp->code < NUM_COUNTER_ARGS/2) { if (counter1_ == NULL) { counter1_ = tmp; last1 = counter1_; } else { last1->next = tmp; last1 = tmp; } counter1Sz_++; } else { if (counter2_ == NULL) { counter2_ = tmp; last2 = counter2_; } else { last2->next = tmp; last2 = tmp; } counter2Sz_++; } } if (counter1_ == NULL) { printHelp(); if (CmiMyPe()==0) { CmiPrintf("\nMust specify some counters with code < %d\n", NUM_COUNTER_ARGS/2); } ConverseExit(); } if (counter2_ == NULL) { printHelp(); if (CmiMyPe()==0) { CmiPrintf("\nMust specify some counters with code >= %d\n", NUM_COUNTER_ARGS/2); } ConverseExit(); } last1->next = counter1_; last2->next = counter2_; // all args valid, now set up logging if (CmiMyPe() == 0) { CmiPrintf("Running with tracemode=counter and args:\n"); // print out counter1 set tmp = counter1_; i = 0; do { CmiPrintf(" <counter1-%d>=%d %s %s\n", i, tmp->code, tmp->arg, tmp->desc); tmp = tmp->next; i++; } while (tmp != counter1_); // print out counter2 set tmp = counter2_; i = 0; do { CmiPrintf(" <counter2-%d>=%d %s %s\n", i, tmp->code, tmp->arg, tmp->desc); tmp = tmp->next; i++; } while (tmp != counter2_); CmiPrintf( "+count-overview %d\n+count-switchrandom %d\n" "+count-switchbyphase %d\n+count-nolog %d\n" "+count-logname %s\n+count-writebyphase %d\n", overview_, switchRandom_, switchByPhase_, noLog_, logName, writeByPhase_); } // DEBUGF((" DEBUG: Counter1=%d Counter2=%d\n", counter1_, counter2_)); CpvAccess(_logPool) = new CountLogPool(); // allocate names so can do reduction/analysis on the fly char** counterNames = new char*[counter1Sz_+counter2Sz_]; char** counterDesc = new char*[counter1Sz_+counter2Sz_]; tmp = counter1_; for (i=0; i<counter1Sz_; i++) { tmp->index = i; counterNames[i] = tmp->arg; counterDesc[i] = tmp->desc; tmp = tmp->next; } tmp = counter2_; for (i=0; i<counter2Sz_; i++) { tmp->index = counter1Sz_+i; counterNames[counter1Sz_+i] = tmp->arg; counterDesc[counter1Sz_+i] = tmp->desc; tmp = tmp->next; } CpvAccess(_counterNames) = counterNames; CpvAccess(_counterDesc) = counterDesc; CpvAccess(_numCounters) = numCounters; // don't erase counterNames or counterDesc, // the reduction client will do it on the final reduction _MEMCHECK(CpvAccess(_logPool)); CpvAccess(_logPool)->init(numCounters); DEBUGF(("%d/%d DEBUG: Created _logPool at %08x\n", CmiMyPe(), CmiNumPes(), CpvAccess(_logPool))); }
/** This is the main charm setup routine. It's called on all processors after Converse initialization. This routine gets passed to Converse from "main.C". The main purpose of this routine is to set up the objects and Ckpv's used during a regular Charm run. See the comment at the top of the file for overall flow. */ void _initCharm(int unused_argc, char **argv) { int inCommThread = (CmiMyRank() == CmiMyNodeSize()); DEBUGF(("[%d,%.6lf ] _initCharm started\n",CmiMyPe(),CmiWallTimer())); CkpvInitialize(size_t *, _offsets); CkpvAccess(_offsets) = new size_t[32]; CkpvInitialize(PtrQ*,_buffQ); CkpvInitialize(PtrVec*,_bocInitVec); CkpvInitialize(void*, _currentChare); CkpvInitialize(int, _currentChareType); CkpvInitialize(CkGroupID, _currentGroup); CkpvInitialize(void *, _currentNodeGroupObj); CkpvInitialize(CkGroupID, _currentGroupRednMgr); CkpvInitialize(GroupTable*, _groupTable); CkpvInitialize(GroupIDTable*, _groupIDTable); CkpvInitialize(CmiImmediateLockType, _groupTableImmLock); CkpvInitialize(bool, _destroyingNodeGroup); CkpvAccess(_destroyingNodeGroup) = false; CkpvInitialize(UInt, _numGroups); CkpvInitialize(int, _numInitsRecd); CkpvInitialize(int, _initdone); CkpvInitialize(char**, Ck_argv); CkpvAccess(Ck_argv)=argv; CkpvInitialize(MsgPool*, _msgPool); CkpvInitialize(CkCoreState *, _coreState); /* Added for evacuation-sayantan */ #ifndef __BIGSIM__ CpvInitialize(char *,_validProcessors); #endif CkpvInitialize(char ,startedEvac); CpvInitialize(int,serializer); _initChareTables(); // for checkpointable plain chares CksvInitialize(UInt, _numNodeGroups); CksvInitialize(GroupTable*, _nodeGroupTable); CksvInitialize(GroupIDTable, _nodeGroupIDTable); CksvInitialize(CmiImmediateLockType, _nodeGroupTableImmLock); CksvInitialize(CmiNodeLock, _nodeLock); CksvInitialize(PtrVec*,_nodeBocInitVec); CksvInitialize(UInt,_numInitNodeMsgs); CkpvInitialize(int,_charmEpoch); CkpvAccess(_charmEpoch)=0; CksvInitialize(int, _triggersSent); CksvAccess(_triggersSent) = 0; CkpvInitialize(_CkOutStream*, _ckout); CkpvInitialize(_CkErrStream*, _ckerr); CkpvInitialize(Stats*, _myStats); CkpvAccess(_groupIDTable) = new GroupIDTable(0); CkpvAccess(_groupTable) = new GroupTable; CkpvAccess(_groupTable)->init(); CkpvAccess(_groupTableImmLock) = CmiCreateImmediateLock(); CkpvAccess(_numGroups) = 1; // make 0 an invalid group number CkpvAccess(_buffQ) = new PtrQ(); CkpvAccess(_bocInitVec) = new PtrVec(); CkpvAccess(_currentNodeGroupObj) = NULL; if(CkMyRank()==0) { CksvAccess(_numNodeGroups) = 1; //make 0 an invalid group number CksvAccess(_numInitNodeMsgs) = 0; CksvAccess(_nodeLock) = CmiCreateLock(); CksvAccess(_nodeGroupTable) = new GroupTable(); CksvAccess(_nodeGroupTable)->init(); CksvAccess(_nodeGroupTableImmLock) = CmiCreateImmediateLock(); CksvAccess(_nodeBocInitVec) = new PtrVec(); } CkCallbackInit(); CmiNodeAllBarrier(); #if ! CMK_BIGSIM_CHARM initQd(argv); // bigsim calls it in ConverseCommonInit #endif CkpvAccess(_coreState)=new CkCoreState(); CkpvAccess(_numInitsRecd) = 0; CkpvAccess(_initdone) = 0; CkpvAccess(_ckout) = new _CkOutStream(); CkpvAccess(_ckerr) = new _CkErrStream(); _charmHandlerIdx = CkRegisterHandler((CmiHandler)_bufferHandler); _initHandlerIdx = CkRegisterHandler((CmiHandler)_initHandler); CkNumberHandlerEx(_initHandlerIdx, (CmiHandlerEx)_initHandler, CkpvAccess(_coreState)); _roRestartHandlerIdx = CkRegisterHandler((CmiHandler)_roRestartHandler); _exitHandlerIdx = CkRegisterHandler((CmiHandler)_exitHandler); //added for interoperabilitY _libExitHandlerIdx = CkRegisterHandler((CmiHandler)_libExitHandler); _bocHandlerIdx = CkRegisterHandler((CmiHandler)_initHandler); CkNumberHandlerEx(_bocHandlerIdx, (CmiHandlerEx)_initHandler, CkpvAccess(_coreState)); #ifdef __BIGSIM__ if(BgNodeRank()==0) #endif _infoIdx = CldRegisterInfoFn((CldInfoFn)_infoFn); _triggerHandlerIdx = CkRegisterHandler((CmiHandler)_triggerHandler); _ckModuleInit(); CldRegisterEstimator((CldEstimator)_charmLoadEstimator); _futuresModuleInit(); // part of futures implementation is a converse module _loadbalancerInit(); _metabalancerInit(); #if CMK_MEM_CHECKPOINT init_memcheckpt(argv); #endif initCharmProjections(); #if CMK_TRACE_IN_CHARM // initialize trace module in ck traceCharmInit(argv); #endif CkpvInitialize(int, envelopeEventID); CkpvAccess(envelopeEventID) = 0; CkMessageWatcherInit(argv,CkpvAccess(_coreState)); /** The rank-0 processor of each node calls the translator-generated "_register" routines. _register routines call the charm.h "CkRegister*" routines, which record function pointers and class information for all Charm entities, like Chares, Arrays, and readonlies. There's one _register routine generated for each .ci file. _register routines *must* be called in the same order on every node, and *must not* be called by multiple threads simultaniously. */ #ifdef __BIGSIM__ if(BgNodeRank()==0) #else if(CkMyRank()==0) #endif { SDAG::registerPUPables(); CmiArgGroup("Charm++",NULL); _parseCommandLineOpts(argv); _registerInit(); CkRegisterMsg("System", 0, 0, CkFreeMsg, sizeof(int)); CkRegisterChareInCharm(CkRegisterChare("null", 0, TypeChare)); CkIndex_Chare::__idx=CkRegisterChare("Chare", sizeof(Chare), TypeChare); CkRegisterChareInCharm(CkIndex_Chare::__idx); CkIndex_Group::__idx=CkRegisterChare("Group", sizeof(Group), TypeGroup); CkRegisterChareInCharm(CkIndex_Group::__idx); CkRegisterEp("null", (CkCallFnPtr)_nullFn, 0, 0, 0+CK_EP_INTRINSIC); /** These _register calls are for the built-in Charm .ci files, like arrays and load balancing. If you add a .ci file to charm, you'll have to add a call to the _register routine here, or make your library into a "-module". */ _registerCkFutures(); _registerCkArray(); _registerLBDatabase(); _registerMetaBalancer(); _registerCkCallback(); _registertempo(); _registerwaitqd(); _registerCkCheckpoint(); #if CMK_MEM_CHECKPOINT _registerCkMemCheckpoint(); #endif /* Setup Control Point Automatic Tuning Framework. By default it is enabled as a part of charm, however it won't enable its tracing module unless a +CPEnableMeasurements command line argument is specified. See trace-common.C for more info Thus there should be no noticable overhead to always having the control point framework linked in. */ #if CMK_WITH_CONTROLPOINT _registerPathHistory(); _registerControlPoints(); _registerTraceControlPoints(); #endif /** CkRegisterMainModule is generated by the (unique) "mainmodule" .ci file. It will include calls to register all the .ci files. */ CkRegisterMainModule(); /** _registerExternalModules is actually generated by charmc at link time (as "moduleinit<pid>.C"). This generated routine calls the _register functions for the .ci files of libraries linked using "-module". This funny initialization is most useful for AMPI/FEM programs, which don't have a .ci file and hence have no other way to control the _register process. */ _registerExternalModules(argv); _registerDone(); } /* The following will happen on every virtual processor in BigEmulator, not just on once per real processor */ if (CkMyRank() == 0) { CpdBreakPointInit(); } CmiNodeAllBarrier(); // Execute the initcalls registered in modules _initCallTable.enumerateInitCalls(); #if CMK_CHARMDEBUG CpdFinishInitialization(); #endif //CmiNodeAllBarrier(); CkpvAccess(_myStats) = new Stats(); CkpvAccess(_msgPool) = new MsgPool(); CmiNodeAllBarrier(); #if !(__FAULT__) CmiBarrier(); CmiBarrier(); CmiBarrier(); #endif #if CMK_SMP_TRACE_COMMTHREAD _TRACE_BEGIN_COMPUTATION(); #else if (!inCommThread) { _TRACE_BEGIN_COMPUTATION(); } #endif #ifdef ADAPT_SCHED_MEM if(CkMyRank()==0){ memCriticalEntries = new int[numMemCriticalEntries]; int memcnt=0; for(int i=0; i<_entryTable.size(); i++){ if(_entryTable[i]->isMemCritical){ memCriticalEntries[memcnt++] = i; } } } #endif #if (defined(_FAULT_MLOG_) || defined(_FAULT_CAUSAL_)) _messageLoggingInit(); #endif #ifndef __BIGSIM__ /* FAULT_EVAC */ CpvAccess(_validProcessors) = new char[CkNumPes()]; for(int vProc=0;vProc<CkNumPes();vProc++){ CpvAccess(_validProcessors)[vProc]=1; } _ckEvacBcastIdx = CkRegisterHandler((CmiHandler)_ckEvacBcast); _ckAckEvacIdx = CkRegisterHandler((CmiHandler)_ckAckEvac); #endif CkpvAccess(startedEvac) = 0; CpvAccess(serializer) = 0; evacuate = 0; CcdCallOnCondition(CcdSIGUSR1,(CcdVoidFn)CkDecideEvacPe,0); #if (defined(_FAULT_MLOG_) || defined(_FAULT_CAUSAL_)) CcdCallOnCondition(CcdSIGUSR2,(CcdVoidFn)CkMlogRestart,0); #endif if(_raiseEvac){ processRaiseEvacFile(_raiseEvacFile); /* if(CkMyPe() == 2){ // CcdCallOnConditionKeep(CcdPERIODIC_10s,(CcdVoidFn)CkDecideEvacPe,0); CcdCallFnAfter((CcdVoidFn)CkDecideEvacPe, 0, 10000); } if(CkMyPe() == 3){ CcdCallFnAfter((CcdVoidFn)CkDecideEvacPe, 0, 10000); }*/ } if (CkMyRank() == 0) { TopoManager_init(); } CmiNodeAllBarrier(); if (!_replaySystem) { CkFtFn faultFunc_restart = CkRestartMain; if (faultFunc == NULL || faultFunc == faultFunc_restart) { // this is not restart from memory // these two are blocking calls for non-bigsim #if ! CMK_BIGSIM_CHARM CmiInitCPUAffinity(argv); CmiInitMemAffinity(argv); #endif } CmiInitCPUTopology(argv); #if CMK_SHARED_VARS_POSIX_THREADS_SMP if (CmiCpuTopologyEnabled()) { int *pelist; int num; CmiGetPesOnPhysicalNode(0, &pelist, &num); #if !CMK_MULTICORE && !CMK_SMP_NO_COMMTHD // Count communication threads, if present // XXX: Assuming uniformity of node size here num += num/CmiMyNodeSize(); #endif if (!_Cmi_forceSpinOnIdle && num > CmiNumCores()) { if (CmiMyPe() == 0) CmiPrintf("\nCharm++> Warning: the number of SMP threads (%d) is greater than the number of physical cores (%d), so threads will sleep while idling. Use +CmiSpinOnIdle or +CmiSleepOnIdle to control this directly.\n\n", num, CmiNumCores()); CmiLock(CksvAccess(_nodeLock)); if (! _Cmi_sleepOnIdle) _Cmi_sleepOnIdle = 1; CmiUnlock(CksvAccess(_nodeLock)); } } #endif } if(CmiMyPe() == 0) { char *topoFilename; if(CmiGetArgStringDesc(argv,"+printTopo",&topoFilename,"topo file name")) { std::stringstream sstm; sstm << topoFilename << "." << CmiMyPartition(); std::string result = sstm.str(); FILE *fp; fp = fopen(result.c_str(), "w"); if (fp == NULL) { CkPrintf("Error opening %s file, writing to stdout\n", topoFilename); fp = stdout; } TopoManager_printAllocation(fp); fclose(fp); } } #if CMK_USE_PXSHM && ( CMK_CRAYXE || CMK_CRAYXC ) && CMK_SMP // for SMP on Cray XE6 (hopper) it seems pxshm has to be initialized // again after cpuaffinity is done if (CkMyRank() == 0) { CmiInitPxshm(argv); } CmiNodeAllBarrier(); #endif //CldCallback(); #if CMK_BIGSIM_CHARM && CMK_CHARMDEBUG // Register the BG handler for CCS. Notice that this is put into a variable shared by // the whole real processor. This because converse needs to find it. We check that all // virtual processors register the same index for this handler. CpdBgInit(); #endif if (faultFunc) { #if CMK_WITH_STATS if (CkMyPe()==0) _allStats = new Stats*[CkNumPes()]; #endif if (!inCommThread) { CkArgMsg *msg = (CkArgMsg *)CkAllocMsg(0, sizeof(CkArgMsg), 0); msg->argc = CmiGetArgc(argv); msg->argv = argv; faultFunc(_restartDir, msg); CkFreeMsg(msg); } }else if(CkMyPe()==0){ #if CMK_WITH_STATS _allStats = new Stats*[CkNumPes()]; #endif register size_t i, nMains=_mainTable.size(); for(i=0;i<nMains;i++) /* Create all mainchares */ { register int size = _chareTable[_mainTable[i]->chareIdx]->size; register void *obj = malloc(size); _MEMCHECK(obj); _mainTable[i]->setObj(obj); CkpvAccess(_currentChare) = obj; CkpvAccess(_currentChareType) = _mainTable[i]->chareIdx; register CkArgMsg *msg = (CkArgMsg *)CkAllocMsg(0, sizeof(CkArgMsg), 0); msg->argc = CmiGetArgc(argv); msg->argv = argv; _entryTable[_mainTable[i]->entryIdx]->call(msg, obj); #if (defined(_FAULT_MLOG_) || defined(_FAULT_CAUSAL_)) CpvAccess(_currentObj) = (Chare *)obj; #endif } _mainDone = 1; _STATS_RECORD_CREATE_CHARE_N(nMains); _STATS_RECORD_PROCESS_CHARE_N(nMains); for(i=0;i<_readonlyMsgs.size();i++) /* Send out readonly messages */ { register void *roMsg = (void *) *((char **)(_readonlyMsgs[i]->pMsg)); if(roMsg==0) continue; //Pack the message and send it to all other processors register envelope *env = UsrToEnv(roMsg); env->setSrcPe(CkMyPe()); env->setMsgtype(ROMsgMsg); env->setRoIdx(i); CmiSetHandler(env, _initHandlerIdx); CkPackMessage(&env); CmiSyncBroadcast(env->getTotalsize(), (char *)env); CpvAccess(_qd)->create(CkNumPes()-1); //For processor 0, unpack and re-set the global CkUnpackMessage(&env); _processROMsgMsg(env); _numInitMsgs++; } //Determine the size of the RODataMessage PUP::sizer ps; for(i=0;i<_readonlyTable.size();i++) _readonlyTable[i]->pupData(ps); //Allocate and fill out the RODataMessage envelope *env = _allocEnv(RODataMsg, ps.size()); PUP::toMem pp((char *)EnvToUsr(env)); for(i=0;i<_readonlyTable.size();i++) _readonlyTable[i]->pupData(pp); env->setCount(++_numInitMsgs); env->setSrcPe(CkMyPe()); CmiSetHandler(env, _initHandlerIdx); DEBUGF(("[%d,%.6lf] RODataMsg being sent of size %d \n",CmiMyPe(),CmiWallTimer(),env->getTotalsize())); CmiSyncBroadcastAndFree(env->getTotalsize(), (char *)env); CpvAccess(_qd)->create(CkNumPes()-1); _initDone(); } DEBUGF(("[%d,%d%.6lf] inCommThread %d\n",CmiMyPe(),CmiMyRank(),CmiWallTimer(),inCommThread)); // when I am a communication thread, I don't participate initDone. if (inCommThread) { CkNumberHandlerEx(_bocHandlerIdx,(CmiHandlerEx)_processHandler, CkpvAccess(_coreState)); CkNumberHandlerEx(_charmHandlerIdx,(CmiHandlerEx)_processHandler , CkpvAccess(_coreState)); _processBufferedMsgs(); } #if CMK_CHARMDEBUG // Should not use CpdFreeze inside a thread (since this processor is really a user-level thread) if (CpvAccess(cpdSuspendStartup)) { //CmiPrintf("In Parallel Debugging mode .....\n"); CpdFreeze(); } #endif #if __FAULT__ if(killFlag){ readKillFile(); } #endif }
static inline void _parseCommandLineOpts(char **argv) { if (CmiGetArgFlagDesc(argv,"+cs", "Print extensive statistics at shutdown")) _STATS_ON(_printCS); if (CmiGetArgFlagDesc(argv,"+ss", "Print summary statistics at shutdown")) _STATS_ON(_printSS); if (CmiGetArgFlagDesc(argv,"+fifo", "Default to FIFO queuing")) _defaultQueueing = CK_QUEUEING_FIFO; if (CmiGetArgFlagDesc(argv,"+lifo", "Default to LIFO queuing")) _defaultQueueing = CK_QUEUEING_LIFO; if (CmiGetArgFlagDesc(argv,"+ififo", "Default to integer-prioritized FIFO queuing")) _defaultQueueing = CK_QUEUEING_IFIFO; if (CmiGetArgFlagDesc(argv,"+ilifo", "Default to integer-prioritized LIFO queuing")) _defaultQueueing = CK_QUEUEING_ILIFO; if (CmiGetArgFlagDesc(argv,"+bfifo", "Default to bitvector-prioritized FIFO queuing")) _defaultQueueing = CK_QUEUEING_BFIFO; if (CmiGetArgFlagDesc(argv,"+blifo", "Default to bitvector-prioritized LIFO queuing")) _defaultQueueing = CK_QUEUEING_BLIFO; if (CmiGetArgFlagDesc(argv,"+objq", "Default to use object queue for every obejct")) { #if CMK_OBJECT_QUEUE_AVAILABLE _defaultObjectQ = 1; if (CkMyPe()==0) CmiPrintf("Charm++> Create object queue for every Charm object.\n"); #else CmiAbort("Charm++> Object queue not enabled, recompile Charm++ with CMK_OBJECT_QUEUE_AVAILABLE defined to 1."); #endif } if(CmiGetArgString(argv,"+restart",&_restartDir)) faultFunc = CkRestartMain; #if __FAULT__ if (CmiGetArgIntDesc(argv,"+restartaftercrash",&CpvAccess(_curRestartPhase),"restarting this processor after a crash")){ # if CMK_MEM_CHECKPOINT faultFunc = CkMemRestart; # endif #if (defined(_FAULT_MLOG_) || defined(_FAULT_CAUSAL_)) faultFunc = CkMlogRestart; #endif CmiPrintf("[%d] Restarting after crash \n",CmiMyPe()); } #if CMK_MESSAGE_LOGGING // reading +ftc_disk flag if (CmiGetArgFlagDesc(argv, "+ftc_disk", "Disk Checkpointing")) { diskCkptFlag = 1; } #endif // reading the killFile if(CmiGetArgStringDesc(argv,"+killFile", &killFile,"Generates SIGKILL on specified processors")){ if(faultFunc == NULL){ //do not read the killfile if this is a restarting processor killFlag = 1; if(CmiMyPe() == 0){ printf("[%d] killFlag set to 1 for file %s\n",CkMyPe(),killFile); } } } #endif // shut down program in ring fashion to allow projections output w/o IO error if (CmiGetArgIntDesc(argv,"+ringexit",&_ringtoken, "Program exits in a ring fashion")) { _ringexit = 1; if (CkMyPe()==0) CkPrintf("Charm++> Program shutdown in token ring (%d).\n", _ringtoken); if (_ringtoken > CkNumPes()) _ringtoken = CkNumPes(); } /* FAULT_EVAC if the argument +raiseevac is present then cause faults */ if(CmiGetArgStringDesc(argv,"+raiseevac", &_raiseEvacFile,"Generates processor evacuation on random processors")){ _raiseEvac = 1; } #if (defined(_FAULT_MLOG_) || defined(_FAULT_CAUSAL_)) if(!CmiGetArgIntDesc(argv,"+teamSize",&teamSize,"Set the team size for message logging")){ teamSize = 1; } if(!CmiGetArgIntDesc(argv,"+chkptPeriod",&chkptPeriod,"Set the checkpoint period for the message logging fault tolerance algorithm in seconds")){ chkptPeriod = 100; } if(CmiGetArgIntDesc(argv,"+fastRecovery", ¶llelRecovery, "Parallel recovery with message logging protocol")){ fastRecovery = true; } #endif /* Anytime migration flag */ _isAnytimeMigration = true; if (CmiGetArgFlagDesc(argv,"+noAnytimeMigration","The program does not require support for anytime migration")) { _isAnytimeMigration = false; } _isNotifyChildInRed = true; if (CmiGetArgFlagDesc(argv,"+noNotifyChildInReduction","The program has at least one element per processor for each charm array created")) { _isNotifyChildInRed = false; } _isStaticInsertion = false; if (CmiGetArgFlagDesc(argv,"+staticInsertion","Array elements are only inserted at construction")) { _isStaticInsertion = true; } useNodeBlkMapping = false; if (CmiGetArgFlagDesc(argv,"+useNodeBlkMapping","Array elements are block-mapped in SMP-node level")) { useNodeBlkMapping = true; } #if ! CMK_WITH_CONTROLPOINT // Display a warning if charm++ wasn't compiled with control point support but user is expecting it if( CmiGetArgFlag(argv,"+CPSamplePeriod") || CmiGetArgFlag(argv,"+CPSamplePeriodMs") || CmiGetArgFlag(argv,"+CPSchemeRandom") || CmiGetArgFlag(argv,"+CPExhaustiveSearch") || CmiGetArgFlag(argv,"+CPAlwaysUseDefaults") || CmiGetArgFlag(argv,"+CPSimulAnneal") || CmiGetArgFlag(argv,"+CPCriticalPathPrio") || CmiGetArgFlag(argv,"+CPBestKnown") || CmiGetArgFlag(argv,"+CPSteering") || CmiGetArgFlag(argv,"+CPMemoryAware") || CmiGetArgFlag(argv,"+CPSimplex") || CmiGetArgFlag(argv,"+CPDivideConquer") || CmiGetArgFlag(argv,"+CPLDBPeriod") || CmiGetArgFlag(argv,"+CPLDBPeriodLinear") || CmiGetArgFlag(argv,"+CPLDBPeriodQuadratic") || CmiGetArgFlag(argv,"+CPLDBPeriodOptimal") || CmiGetArgFlag(argv,"+CPDefaultValues") || CmiGetArgFlag(argv,"+CPGatherAll") || CmiGetArgFlag(argv,"+CPGatherMemoryUsage") || CmiGetArgFlag(argv,"+CPGatherUtilization") || CmiGetArgFlag(argv,"+CPSaveData") || CmiGetArgFlag(argv,"+CPNoFilterData") || CmiGetArgFlag(argv,"+CPLoadData") || CmiGetArgFlag(argv,"+CPDataFilename") ) { CkAbort("You specified a control point command line argument, but compiled charm++ without control point support.\n"); } #endif }
void CcsInit(char **argv) { CpvInitialize(CkHashtable_c, ccsTab); CpvAccess(ccsTab) = CkCreateHashtable_string(sizeof(CcsHandlerRec),5); CpvInitialize(CcsImplHeader *, ccsReq); CpvAccess(ccsReq) = NULL; _ccsHandlerIdx = CmiRegisterHandler((CmiHandler)req_fw_handler); #if CMK_BIGSIM_CHARM CpvInitialize(int, _bgCcsHandlerIdx); CpvAccess(_bgCcsHandlerIdx) = 0; CpvInitialize(int, _bgCcsAck); CpvAccess(_bgCcsAck) = 0; #endif CpvInitialize(int, cmiArgDebugFlag); CpvInitialize(char *, displayArgument); CpvInitialize(int, cpdSuspendStartup); CpvAccess(cmiArgDebugFlag) = 0; CpvAccess(displayArgument) = NULL; CpvAccess(cpdSuspendStartup) = 0; CcsBuiltinsInit(argv); rep_fw_handler_idx = CmiRegisterHandler((CmiHandler)rep_fw_handler); #if NODE_0_IS_CONVHOST #if ! CMK_CMIPRINTF_IS_A_BUILTIN print_fw_handler_idx = CmiRegisterHandler((CmiHandler)print_fw_handler); #endif { int ccs_serverPort=0; char *ccs_serverAuth=NULL; if (CmiGetArgFlagDesc(argv,"++server", "Create a CCS server port") | CmiGetArgIntDesc(argv,"++server-port",&ccs_serverPort, "Listen on this TCP/IP port number") | CmiGetArgStringDesc(argv,"++server-auth",&ccs_serverAuth, "Use this CCS authentication file")) if (CmiMyPe()==0) {/*Create and occasionally poll on a CCS server port*/ CcsServer_new(NULL,&ccs_serverPort,ccs_serverAuth); CcdCallOnConditionKeep(CcdPERIODIC,(CcdVoidFn)CcsServerCheck,NULL); } } #endif /* if in parallel debug mode i.e ++cpd, freeze */ if (CmiGetArgFlagDesc(argv, "+cpd", "Used *only* in conjunction with parallel debugger")) { CpvAccess(cmiArgDebugFlag) = 1; if (CmiGetArgStringDesc(argv, "+DebugDisplay",&(CpvAccess(displayArgument)), "X display for gdb used only in cpd mode")) { if (CpvAccess(displayArgument) == NULL) CmiPrintf("WARNING> NULL parameter for +DebugDisplay\n***"); } else if (CmiMyPe() == 0) { /* only one processor prints the warning */ CmiPrintf("WARNING> x term for gdb needs to be specified as +DebugDisplay by debugger\n***\n"); } if (CmiGetArgFlagDesc(argv, "+DebugSuspend", "Suspend execution at beginning of program")) { CpvAccess(cpdSuspendStartup) = 1; } } CcsReleaseMessages(); }
// called from init.C void _loadbalancerInit() { CkpvInitialize(int, lbdatabaseInited); CkpvAccess(lbdatabaseInited) = 0; CkpvInitialize(int, numLoadBalancers); CkpvAccess(numLoadBalancers) = 0; CkpvInitialize(int, hasNullLB); CkpvAccess(hasNullLB) = 0; char **argv = CkGetArgv(); char *balancer = NULL; CmiArgGroup("Charm++","Load Balancer"); while (CmiGetArgStringDesc(argv, "+balancer", &balancer, "Use this load balancer")) { if (CkMyRank() == 0) lbRegistry.addRuntimeBalancer(balancer); /* lbRegistry is a static */ } // set up init value for LBPeriod time in seconds // it can also be set by calling LDSetLBPeriod() CmiGetArgDoubleDesc(argv,"+LBPeriod", &_lb_args.lbperiod(),"the minimum time period in seconds allowed for two consecutive automatic load balancing"); _lb_args.loop() = CmiGetArgFlagDesc(argv, "+LBLoop", "Use multiple load balancing strategies in loop"); // now called in cldb.c: CldModuleGeneralInit() // registerLBTopos(); CmiGetArgStringDesc(argv, "+LBTopo", &_lbtopo, "define load balancing topology"); //Read the K parameter for RefineKLB CmiGetArgIntDesc(argv, "+LBNumMoves", &_lb_args.percentMovesAllowed() , "Percentage of chares to be moved (used by RefineKLB) [0-100]"); /**************** FUTURE PREDICTOR ****************/ _lb_predict = CmiGetArgFlagDesc(argv, "+LBPredictor", "Turn on LB future predictor"); CmiGetArgIntDesc(argv, "+LBPredictorDelay", &_lb_predict_delay, "Number of balance steps before learning a model"); CmiGetArgIntDesc(argv, "+LBPredictorWindow", &_lb_predict_window, "Number of steps to use to learn a model"); if (_lb_predict_window < _lb_predict_delay) { CmiPrintf("LB> [%d] Argument LBPredictorWindow (%d) less than LBPredictorDelay (%d) , fixing\n", CkMyPe(), _lb_predict_window, _lb_predict_delay); _lb_predict_delay = _lb_predict_window; } /******************* SIMULATION *******************/ // get the step number at which to dump the LB database CmiGetArgIntDesc(argv, "+LBVersion", &_lb_args.lbversion(), "LB database file version number"); CmiGetArgIntDesc(argv, "+LBCentPE", &_lb_args.central_pe(), "CentralLB processor"); int _lb_dump_activated = 0; if (CmiGetArgIntDesc(argv, "+LBDump", &LBSimulation::dumpStep, "Dump the LB state from this step")) _lb_dump_activated = 1; if (_lb_dump_activated && LBSimulation::dumpStep < 0) { CmiPrintf("LB> Argument LBDump (%d) negative, setting to 0\n",LBSimulation::dumpStep); LBSimulation::dumpStep = 0; } CmiGetArgIntDesc(argv, "+LBDumpSteps", &LBSimulation::dumpStepSize, "Dump the LB state for this amount of steps"); if (LBSimulation::dumpStepSize <= 0) { CmiPrintf("LB> Argument LBDumpSteps (%d) too small, setting to 1\n",LBSimulation::dumpStepSize); LBSimulation::dumpStepSize = 1; } CmiGetArgStringDesc(argv, "+LBDumpFile", &LBSimulation::dumpFile, "Set the LB state file name"); // get the simulation flag and number. Now the flag can also be avoided by the presence of the number LBSimulation::doSimulation = CmiGetArgIntDesc(argv, "+LBSim", &LBSimulation::simStep, "Read LB state from LBDumpFile since this step"); // check for stupid LBSim parameter if (LBSimulation::doSimulation && LBSimulation::simStep < 0) { CmiPrintf("LB> Argument LBSim (%d) invalid, should be >= 0\n"); CkExit(); return; } CmiGetArgIntDesc(argv, "+LBSimSteps", &LBSimulation::simStepSize, "Read LB state for this number of steps"); if (LBSimulation::simStepSize <= 0) { CmiPrintf("LB> Argument LBSimSteps (%d) too small, setting to 1\n",LBSimulation::simStepSize); LBSimulation::simStepSize = 1; } LBSimulation::simProcs = 0; CmiGetArgIntDesc(argv, "+LBSimProcs", &LBSimulation::simProcs, "Number of target processors."); LBSimulation::showDecisionsOnly = CmiGetArgFlagDesc(argv, "+LBShowDecisions", "Write to File: Load Balancing Object to Processor Map decisions during LB Simulation"); // force a global barrier after migration done _lb_args.syncResume() = CmiGetArgFlagDesc(argv, "+LBSyncResume", "LB performs a barrier after migration is finished"); // both +LBDebug and +LBDebug level should work if (!CmiGetArgIntDesc(argv, "+LBDebug", &_lb_args.debug(), "Turn on LB debugging printouts")) _lb_args.debug() = CmiGetArgFlagDesc(argv, "+LBDebug", "Turn on LB debugging printouts"); // getting the size of the team with +teamSize if (!CmiGetArgIntDesc(argv, "+teamSize", &_lb_args.teamSize(), "Team size")) _lb_args.teamSize() = 1; // ask to print summary/quality of load balancer _lb_args.printSummary() = CmiGetArgFlagDesc(argv, "+LBPrintSummary", "Print load balancing result summary"); // to ignore baclground load _lb_args.ignoreBgLoad() = CmiGetArgFlagDesc(argv, "+LBNoBackground", "Load balancer ignores the background load."); #ifdef __BIGSIM__ _lb_args.ignoreBgLoad() = 1; #endif _lb_args.migObjOnly() = CmiGetArgFlagDesc(argv, "+LBObjOnly", "Only load balancing migratable objects, ignoring all others."); if (_lb_args.migObjOnly()) _lb_args.ignoreBgLoad() = 1; // assume all CPUs are identical _lb_args.testPeSpeed() = CmiGetArgFlagDesc(argv, "+LBTestPESpeed", "Load balancer test all CPUs speed."); _lb_args.samePeSpeed() = CmiGetArgFlagDesc(argv, "+LBSameCpus", "Load balancer assumes all CPUs are of same speed."); if (!_lb_args.testPeSpeed()) _lb_args.samePeSpeed() = 1; _lb_args.useCpuTime() = CmiGetArgFlagDesc(argv, "+LBUseCpuTime", "Load balancer uses CPU time instead of wallclock time."); // turn instrumentation off at startup _lb_args.statsOn() = !CmiGetArgFlagDesc(argv, "+LBOff", "Turn load balancer instrumentation off"); // turn instrumentation of communicatin off at startup _lb_args.traceComm() = !CmiGetArgFlagDesc(argv, "+LBCommOff", "Turn load balancer instrumentation of communication off"); // set alpha and beeta _lb_args.alpha() = PER_MESSAGE_SEND_OVERHEAD_DEFAULT; _lb_args.beeta() = PER_BYTE_SEND_OVERHEAD_DEFAULT; CmiGetArgDoubleDesc(argv,"+LBAlpha", &_lb_args.alpha(), "per message send overhead"); CmiGetArgDoubleDesc(argv,"+LBBeta", &_lb_args.beeta(), "per byte send overhead"); if (CkMyPe() == 0) { if (_lb_args.debug()) { CmiPrintf("CharmLB> Verbose level %d, load balancing period: %g seconds\n", _lb_args.debug(), _lb_args.lbperiod()); } if (_lb_args.debug() > 1) { CmiPrintf("CharmLB> Topology %s alpha: %es beta: %es.\n", _lbtopo, _lb_args.alpha(), _lb_args.beeta()); } if (_lb_args.printSummary()) CmiPrintf("CharmLB> Load balancer print summary of load balancing result.\n"); if (_lb_args.ignoreBgLoad()) CmiPrintf("CharmLB> Load balancer ignores processor background load.\n"); if (_lb_args.samePeSpeed()) CmiPrintf("CharmLB> Load balancer assumes all CPUs are same.\n"); if (_lb_args.useCpuTime()) CmiPrintf("CharmLB> Load balancer uses CPU time instead of wallclock time.\n"); if (LBSimulation::doSimulation) CmiPrintf("CharmLB> Load balancer running in simulation mode on file '%s' version %d.\n", LBSimulation::dumpFile, _lb_args.lbversion()); if (_lb_args.statsOn()==0) CkPrintf("CharmLB> Load balancing instrumentation is off.\n"); if (_lb_args.traceComm()==0) CkPrintf("CharmLB> Load balancing instrumentation for communication is off.\n"); if (_lb_args.migObjOnly()) CkPrintf("LB> Load balancing strategy ignores non-migratable objects.\n"); } }
void CmiInitMemAffinity(char **argv) { int i; int policy=-1; /*step1: parsing args maffinity, mempol and nodemap (nodemap is optional)*/ int maffinity_flag = CmiGetArgFlagDesc(argv, "+maffinity", "memory affinity"); /*the node here refers to the nodes that are seen by libnuma on a phy node*/ /*nodemap is a string of ints separated by ","*/ char *nodemap = NULL; char *mpol = NULL; CmiGetArgStringDesc(argv, "+memnodemap", &nodemap, "define memory node mapping"); CmiGetArgStringDesc(argv, "+mempol", &mpol, "define memory policy {bind, preferred or interleave} "); if (!maffinity_flag) return; /*Currently skip the communication thread*/ /** * Note: the cpu affinity of comm thread may not be set * if "commap" is not specified. This is why the following * code regarding the comm thd needs to be put before * the codes that checks whether cpu affinity is set * or not */ if (CmiMyPe() >= CmiNumPes()) { CmiNodeAllBarrier(); return; } /*step2: checking whether the required cpu affinity has been set*/ if (CpvInitialized(myCPUAffToCore) && CpvAccess(myCPUAffToCore)==-1) { if (CmiMyPe()==0) CmiPrintf("Charm++> memory affinity disabled because cpu affinity is not enabled!\n"); CmiNodeAllBarrier(); return; } if (CmiMyPe()==0) { CmiPrintf("Charm++> memory affinity enabled! \n"); } /*Select memory policy*/ if (mpol==NULL) { CmiAbort("Memory policy must be specified!\n"); } if (strcmp(mpol, "interleave")==0) policy = MPOL_INTERLEAVE; else if (strcmp(mpol, "preferred")==0) policy = MPOL_PREFERRED; else if (strcmp(mpol, "bind")==0) policy = MPOL_BIND; else { CmiPrintf("Error> Invalid memory policy :%s\n", mpol); CmiAbort("Invalid memory policy!"); } /** * step3: check whether nodemap is NULL or not * step 3a): nodemap is not NULL * step 3b): nodemap is NULL, set memory policy according to the result * of cpu affinity settings. */ if (nodemap!=NULL) { int *nodemapArr = NULL; int nodemapArrSize = 1; int prevIntStart,j; int curnid; for (i=0; i<strlen((const char *)nodemap); i++) { if (nodemap[i]==',') nodemapArrSize++; } nodemapArr = malloc(nodemapArrSize*sizeof(int)); prevIntStart=j=0; for (i=0; i<strlen((const char *)nodemap); i++) { if (nodemap[i]==',') { curnid = atoi(nodemap+prevIntStart); if (curnid >= CmiNumNUMANodes()) { CmiPrintf("Error> Invalid node number %d, only have %d nodes (0-%d) on the machine. \n", curnid, CmiNumNUMANodes(), CmiNumNUMANodes()-1); CmiAbort("Invalid node number!"); } nodemapArr[j++] = curnid; prevIntStart=i+1; } } /*record the last nid after the last comma*/ curnid = atoi(nodemap+prevIntStart); if (curnid >= CmiNumNUMANodes()) { CmiPrintf("Error> Invalid node number %d, only have %d nodes (0-%d) on the machine. \n", curnid, CmiNumNUMANodes(), CmiNumNUMANodes()-1); CmiAbort("Invalid node number!"); } nodemapArr[j] = curnid; int myPhyRank = CpvAccess(myCPUAffToCore); int myMemNid = nodemapArr[myPhyRank%nodemapArrSize]; int retval = -1; if (policy==MPOL_INTERLEAVE) { retval = CmiSetMemAffinity(policy, nodemapArr, nodemapArrSize); } else { retval = CmiSetMemAffinity(policy, &myMemNid, 1); } if (retval<0) { CmiAbort("set_mempolicy error w/ mem nodemap"); } } else { /*use the affinity map set by the cpu affinity*/ int myPhyRank = CpvAccess(myCPUAffToCore); /*get the NUMA node id from myPhyRank (a core id)*/ int myMemNid = getNUMANidByRank(myPhyRank); int retval=-1; if (policy==MPOL_INTERLEAVE) { int totalNUMANodes = CmiNumNUMANodes(); int *nids = (int *)malloc(totalNUMANodes*sizeof(int)); for (i=0; i<totalNUMANodes; i++) nids[i] = i; retval = CmiSetMemAffinity(policy, nids, totalNUMANodes); free(nids); } else { retval = CmiSetMemAffinity(policy, &myMemNid, 1); } if (retval<0) { CmiAbort("set_mempolicy error w/o mem nodemap"); } } /*print_mem_affinity();*/ CmiNodeAllBarrier(); }
void CmiInitCPUAffinity(char **argv) { static skt_ip_t myip; int ret, i, exclude; hostnameMsg *msg; char *pemap = NULL; char *commap = NULL; char *pemapfile = NULL; int show_affinity_flag; int affinity_flag = CmiGetArgFlagDesc(argv,"+setcpuaffinity", "set cpu affinity"); while (CmiGetArgIntDesc(argv,"+excludecore", &exclude, "avoid core when setting cpuaffinity")) { if (CmiMyRank() == 0) add_exclude(exclude); affinity_flag = 1; } if (CmiGetArgStringDesc(argv, "+pemapfile", &pemapfile, "define pe to core mapping file")) { FILE *fp; char buf[128]; pemap = (char*)malloc(1024); fp = fopen(pemapfile, "r"); if (fp == NULL) CmiAbort("pemapfile does not exist"); while (!feof(fp)) { if (fgets(buf, 128, fp)) { if (buf[strlen(buf)-1] == '\n') buf[strlen(buf)-1] = 0; strcat(pemap, buf); } } fclose(fp); if (CmiMyPe()==0) CmiPrintf("Charm++> read from pemap file '%s': %s\n", pemapfile, pemap); } CmiGetArgStringDesc(argv, "+pemap", &pemap, "define pe to core mapping"); if (pemap!=NULL && excludecount>0) CmiAbort("Charm++> +pemap can not be used with +excludecore.\n"); CmiGetArgStringDesc(argv, "+commap", &commap, "define comm threads to core mapping"); if (pemap!=NULL || commap!=NULL) affinity_flag = 1; show_affinity_flag = CmiGetArgFlagDesc(argv,"+showcpuaffinity", "print cpu affinity"); cpuAffinityHandlerIdx = CmiRegisterHandler((CmiHandler)cpuAffinityHandler); cpuAffinityRecvHandlerIdx = CmiRegisterHandler((CmiHandler)cpuAffinityRecvHandler); if (CmiMyRank() ==0) { affLock = CmiCreateLock(); } #if CMK_BLUEGENEP || CMK_BLUEGENEQ if(affinity_flag){ affinity_flag = 0; if(CmiMyPe()==0) CmiPrintf("Charm++> cpu affinity setting is not needed on Blue Gene, thus ignored.\n"); } if(show_affinity_flag){ show_affinity_flag = 0; if(CmiMyPe()==0) CmiPrintf("Charm++> printing cpu affinity is not supported on Blue Gene.\n"); } #endif if (!affinity_flag) { if (show_affinity_flag) CmiPrintCPUAffinity(); return; } if (CmiMyPe() == 0) { CmiPrintf("Charm++> cpu affinity enabled. \n"); if (excludecount > 0) { CmiPrintf("Charm++> cpuaffinity excludes core: %d", excludecore[0]); for (i=1; i<excludecount; i++) CmiPrintf(" %d", excludecore[i]); CmiPrintf(".\n"); } if (pemap!=NULL) CmiPrintf("Charm++> cpuaffinity PE-core map : %s\n", pemap); } if (CmiMyPe() >= CmiNumPes()) { /* this is comm thread */ /* comm thread either can float around, or pin down to the last rank. however it seems to be reportedly slower if it is floating */ CmiNodeAllBarrier(); if (commap != NULL) { int mycore = search_pemap(commap, CmiMyPeGlobal()-CmiNumPesGlobal()); if(CmiMyPe()-CmiNumPes()==0) printf("Charm++> set comm %d on node %d to core #%d\n", CmiMyPe()-CmiNumPes(), CmiMyNode(), mycore); if (-1 == CmiSetCPUAffinity(mycore)) CmiAbort("set_cpu_affinity abort!"); CmiNodeAllBarrier(); if (show_affinity_flag) CmiPrintCPUAffinity(); return; /* comm thread return */ } else { /* if (CmiSetCPUAffinity(CmiNumCores()-1) == -1) CmiAbort("set_cpu_affinity abort!"); */ #if !CMK_CRAYXT && !CMK_CRAYXE && !CMK_CRAYXC && !CMK_BLUEGENEQ if (pemap == NULL) { #if CMK_MACHINE_PROGRESS_DEFINED while (affinity_doneflag < CmiMyNodeSize()) CmiNetworkProgress(); #else #if CMK_SMP #error "Machine progress call needs to be implemented for cpu affinity!" #endif #endif } #endif #if CMK_CRAYXT || CMK_CRAYXE || CMK_CRAYXC /* if both pemap and commmap are NULL, will compute one */ if (pemap != NULL) #endif { CmiNodeAllBarrier(); if (show_affinity_flag) CmiPrintCPUAffinity(); return; /* comm thread return */ } } } if (pemap != NULL && CmiMyPe()<CmiNumPes()) { /* work thread */ int mycore = search_pemap(pemap, CmiMyPeGlobal()); if(show_affinity_flag) CmiPrintf("Charm++> set PE %d on node %d to core #%d\n", CmiMyPe(), CmiMyNode(), mycore); if (mycore >= CmiNumCores()) { CmiPrintf("Error> Invalid core number %d, only have %d cores (0-%d) on the node. \n", mycore, CmiNumCores(), CmiNumCores()-1); CmiAbort("Invalid core number"); } if (CmiSetCPUAffinity(mycore) == -1) CmiAbort("set_cpu_affinity abort!"); CmiNodeAllBarrier(); CmiNodeAllBarrier(); /* if (show_affinity_flag) CmiPrintCPUAffinity(); */ return; } #if CMK_CRAYXT || CMK_CRAYXE || CMK_CRAYXC { int numCores = CmiNumCores(); int myid = getXTNodeID(CmiMyNodeGlobal(), CmiNumNodesGlobal()); int myrank; int pe, mype = CmiMyPeGlobal(); int node = CmiMyNodeGlobal(); int nnodes = 0; #if CMK_SMP if (CmiMyPe() >= CmiNumPes()) { /* this is comm thread */ int node = CmiMyPe() - CmiNumPes(); mype = CmiGetPeGlobal(CmiNodeFirst(node) + CmiMyNodeSize() - 1, CmiMyPartition()); /* last pe on SMP node */ node = CmiGetNodeGlobal(node, CmiMyPartition()); } #endif pe = mype - 1; while (pe >= 0) { int n = CmiNodeOf(pe); if (n != node) { nnodes++; node = n; } if (getXTNodeID(n, CmiNumNodesGlobal()) != myid) break; pe --; } CmiAssert(numCores > 0); myrank = (mype - pe - 1 + nnodes)%numCores; #if CMK_SMP if (CmiMyPe() >= CmiNumPes()) myrank = (myrank + 1)%numCores; #endif if (-1 != CmiSetCPUAffinity(myrank)) { DEBUGP(("Processor %d is bound to core #%d on node #%d\n", CmiMyPe(), myrank, mynode)); } else{ CmiPrintf("Processor %d set affinity failed!\n", CmiMyPe()); CmiAbort("set cpu affinity abort!\n"); } } if (CmiMyPe() < CmiNumPes()) CmiNodeAllBarrier(); CmiNodeAllBarrier(); #else /* get my ip address */ if (CmiMyRank() == 0) { #if CMK_HAS_GETHOSTNAME myip = skt_my_ip(); /* not thread safe, so only calls on rank 0 */ #else CmiAbort("Can not get unique name for the compute nodes. \n"); #endif } CmiNodeAllBarrier(); /* prepare a msg to send */ msg = (hostnameMsg *)CmiAlloc(sizeof(hostnameMsg)); CmiSetHandler((char *)msg, cpuAffinityHandlerIdx); msg->pe = CmiMyPe(); msg->ip = myip; msg->ncores = CmiNumCores(); DEBUGP(("PE %d's node has %d number of cores. \n", CmiMyPe(), msg->ncores)); msg->rank = 0; CmiSyncSendAndFree(0, sizeof(hostnameMsg), (void *)msg); if (CmiMyPe() == 0) { int i; hostTable = CmmNew(); rankmsg = (rankMsg *)CmiAlloc(sizeof(rankMsg)+CmiNumPes()*sizeof(int)*2); CmiSetHandler((char *)rankmsg, cpuAffinityRecvHandlerIdx); rankmsg->ranks = (int *)((char*)rankmsg + sizeof(rankMsg)); rankmsg->nodes = (int *)((char*)rankmsg + sizeof(rankMsg) + CmiNumPes()*sizeof(int)); for (i=0; i<CmiNumPes(); i++) { rankmsg->ranks[i] = 0; rankmsg->nodes[i] = -1; } for (i=0; i<CmiNumPes(); i++) CmiDeliverSpecificMsg(cpuAffinityHandlerIdx); } /* receive broadcast from PE 0 */ CmiDeliverSpecificMsg(cpuAffinityRecvHandlerIdx); CmiLock(affLock); affinity_doneflag++; CmiUnlock(affLock); CmiNodeAllBarrier(); #endif if (show_affinity_flag) CmiPrintCPUAffinity(); }