void CqsEnumerateQueue(Queue q, void ***resp) { void **result; int num; int i,j; *resp = (void **)CmiAlloc(q->length * sizeof(void *)); j = 0; result = CqsEnumeratePrioq(&(q->negprioq), &num); for(i = 0; i < num; i++) { (*resp)[j] = result[i]; j++; } CmiFree(result); result = CqsEnumerateDeq(&(q->zeroprio), &num); for(i = 0; i < num; i++) { (*resp)[j] = result[i]; j++; } CmiFree(result); result = CqsEnumeratePrioq(&(q->posprioq), &num); for(i = 0; i < num; i++) { (*resp)[j] = result[i]; j++; } CmiFree(result); }
/* called on PE 0 */ static void cpuAffinityHandler(void *m) { static int count = 0; static int nodecount = 0; hostnameMsg *rec; hostnameMsg *msg = (hostnameMsg *)m; hostnameMsg *tmpm; int tag, tag1, pe, myrank; int npes = CmiNumPes(); /* for debug char str[128]; skt_print_ip(str, msg->ip); printf("hostname: %d %s\n", msg->pe, str); */ CmiAssert(CmiMyPe()==0 && rankmsg != NULL); tag = *(int*)&msg->ip; pe = msg->pe; if ((rec = (hostnameMsg *)CmmProbe(hostTable, 1, &tag, &tag1)) != NULL) { CmiFree(msg); } else { rec = msg; rec->seq = nodecount; nodecount++; /* a new node record */ CmmPut(hostTable, 1, &tag, msg); } myrank = rec->rank%rec->ncores; while (in_exclude(myrank)) { /* skip excluded core */ myrank = (myrank+1)%rec->ncores; rec->rank ++; } rankmsg->ranks[pe] = myrank; /* core rank */ rankmsg->nodes[pe] = rec->seq; /* on which node */ rec->rank ++; count ++; if (count == CmiNumPes()) { /* CmiPrintf("Cpuaffinity> %d unique compute nodes detected! \n", CmmEntries(hostTable)); */ tag = CmmWildCard; while (tmpm = CmmGet(hostTable, 1, &tag, &tag1)) CmiFree(tmpm); CmmFree(hostTable); #if 1 /* bubble sort ranks on each node according to the PE number */ { int i,j; for (i=0; i<npes-1; i++) for(j=i+1; j<npes; j++) { if (rankmsg->nodes[i] == rankmsg->nodes[j] && rankmsg->ranks[i] > rankmsg->ranks[j]) { int tmp = rankmsg->ranks[i]; rankmsg->ranks[i] = rankmsg->ranks[j]; rankmsg->ranks[j] = tmp; } } } #endif CmiSyncBroadcastAllAndFree(sizeof(rankMsg)+CmiNumPes()*sizeof(int)*2, (void *)rankmsg); } }
static void startMessage(Message *msg) { #if 0 EmptyMsg m; CmiFree(msg); CmiSetHandler(&m, pva(iterHandler)); //fillMessage(mm); pva(starttime) = CmiWallTimer(); CmiSyncSend(CmiMyPe(), sizeof(EmptyMsg), &m); #else Message *mm; int size; size = sizeof(Message)+sizes[pva(nextSize)].size; mm = (Message *) CmiAlloc(size); mm->srcpe = CmiMyPe(); mm->idx = pva(nextSize); CmiSetHandler(mm, pva(iterHandler)); //fillMessage(mm); pva(starttime) = CmiWallTimer(); CmiSyncSendAndFree(mm->srcpe, size, mm); CmiFree(msg); #endif }
int ScriptTcl::Tcl_replicaAtomSendrecv(ClientData clientData, Tcl_Interp *interp, int argc, char **argv) { ScriptTcl *script = (ScriptTcl *)clientData; script->initcheck(); if ( ! Node::Object()->simParameters->replicaUniformPatchGrids ) { Tcl_SetResult(interp,"replicaUniformPatchGrids is required for atom exchange",TCL_VOLATILE); return TCL_ERROR; } if ( argc < 2 || argc > 3 ) { Tcl_SetResult(interp,"bad arg count; args: dest ?source?",TCL_VOLATILE); return TCL_ERROR; } int dest = -1; if ( sscanf(argv[1], "%d", &dest) != 1 ) { Tcl_SetResult(interp,"bad dest; args: dest ?source?",TCL_VOLATILE); return TCL_ERROR; } int source = -1; if ( argc == 3 ) { if ( sscanf(argv[2], "%d", &source) != 1 ) { Tcl_SetResult(interp,"bad source; args: dest ?source?",TCL_VOLATILE); return TCL_ERROR; } } #if CMK_HAS_PARTITION if (dest != CmiMyPartition()) { DataMessage *recvMsg = NULL; replica_sendRecv((char*)&(script->state->lattice), sizeof(Lattice), dest, CkMyPe(), &recvMsg, source, CkMyPe()); CmiAssert(recvMsg != NULL); memcpy(&(script->state->lattice), recvMsg->data, recvMsg->size); CmiFree(recvMsg); } #endif char str[40]; sprintf(str, "%d", dest); script->setParameter("scriptArg1", str); sprintf(str, "%d", source); script->setParameter("scriptArg2", str); CkpvAccess(_qd)->create(2 * PatchMap::Object()->numPatches()); script->runController(SCRIPT_ATOMSENDRECV); #if CMK_HAS_PARTITION if (dest != CmiMyPartition()) { DataMessage *recvMsg = NULL; ControllerState *cstate = script->state->controller; replica_sendRecv((char*)cstate, sizeof(ControllerState), dest, CkMyPe(), &recvMsg, source, CkMyPe()); CmiAssert(recvMsg != NULL); memcpy(cstate, recvMsg->data, recvMsg->size); CmiFree(recvMsg); } #endif return TCL_OK; }
void CqsDelete(Queue q) { CmiFree(q->negprioq.heap); CmiFree(q->posprioq.heap); #if CMK_USE_STL_MSGQ if (q->stlQ != NULL) delete (conv::msgQ<prio_t>*)(q->stlQ); #endif CmiFree(q); }
int ScriptTcl::Tcl_replicaSendrecv(ClientData, Tcl_Interp *interp, int argc, char **argv) { if ( argc < 3 || argc > 4 ) { Tcl_SetResult(interp,"args: data dest ?source?",TCL_VOLATILE); return TCL_ERROR; } Tcl_DString recvstr; Tcl_DStringInit(&recvstr); int sendcount = strlen(argv[1]); int recvcount = 0; int dest = atoi(argv[2]); int source = -1; if ( argc > 3 ) source = atoi(argv[3]); #if CMK_HAS_PARTITION if (dest == CmiMyPartition()) { Tcl_DStringSetLength(&recvstr,sendcount); memcpy(Tcl_DStringValue(&recvstr),argv[1],sendcount); } else { DataMessage *recvMsg = NULL; replica_sendRecv(argv[1], sendcount, dest, CkMyPe(), &recvMsg, source, CkMyPe()); CmiAssert(recvMsg != NULL); Tcl_DStringAppend(&recvstr, recvMsg->data, recvMsg->size); CmiFree(recvMsg); } #endif Tcl_DStringResult(interp, &recvstr); Tcl_DStringFree(&recvstr); return TCL_OK; }
static void CpdDebugCallAllocationTree(char *msg) { int numNodes; int forPE; void *tree; if (CpdDebugGetAllocationTree == NULL) { CmiPrintf("Error> Invoked CpdDebugCalloAllocationTree but no function initialized.\nDid you forget to link in memory charmdebug?\n"); CcsSendReply(0, NULL); return; } sscanf(msg+CmiMsgHeaderSizeBytes, "%d", &forPE); if (CmiMyPe() == forPE) CpvAccess(allocationTreeDelayedReply) = CcsDelayReply(); if (forPE == -1 && CmiMyPe()==0) { CpvAccess(allocationTreeDelayedReply) = CcsDelayReply(); CmiSetXHandler(msg, CpvAccess(CpdDebugCallAllocationTree_Index)); CmiSetHandler(msg, _debugHandlerIdx); CmiSyncBroadcast(CmiMsgHeaderSizeBytes+strlen(msg+CmiMsgHeaderSizeBytes)+1, msg); } tree = CpdDebugGetAllocationTree(&numNodes); if (forPE == CmiMyPe()) CpdDebugReturnAllocationTree(tree); else if (forPE == -1) CmiReduceStruct(tree, CpdDebug_pupAllocationPoint, CpdDebug_MergeAllocationTree, CpdDebugReturnAllocationTree, CpdDebug_deleteAllocationPoint); else CmiAbort("Received allocationTree request for another PE!"); CmiFree(msg); }
// For source not in rectangle case, forward to corner of rectangle // void RectMulticastStrategy::forwardMulticast(envelope *env, ComlibRectSectionHashObject *obj) { ComlibPrintf("[%d] forwardMulticast \n", CkMyPe()); int *pelist = obj->pelist; int npes = obj->npes; if(npes == 0) { CmiFree(env); return; } // handler is changed to special root handler CmiSetHandler(env, handlerId); ((CmiMsgHeaderExt *) env)->stratid = getInstance(); //Collect Multicast Statistics RECORD_SENDM_STATS(getInstance(), env->getTotalsize(), pelist, npes); CkPackMessage(&env); //Sending a remote multicast ComlibMulticastMsg *cbmsg = (ComlibMulticastMsg *)EnvToUsr(env); int sectionID=cbmsg->_cookie.sInfo.cInfo.id; // CmiSyncListSendAndFree(npes, pelist, env->getTotalsize(), (char*)env); CmiSyncSendAndFree(obj->cornerRoot, env->getTotalsize(), (char*)env); //CmiSyncBroadcastAndFree(env->getTotalsize(), (char*)env); }
static void CpdList_ccs_list_items_fmt(char *msg) { CpdListItemsRequest req; CpdListAccessor *acc=CpdListHeader_ccs_list_items(msg,req); if (acc!=NULL) { int bufLen; { PUP_toNetwork_sizer ps; PUP_fmt p(ps); pupCpd(p,acc,req); bufLen=ps.size(); } char *buf=new char[bufLen]; { PUP_toNetwork_pack pp(buf); PUP_fmt p(pp); pupCpd(p,acc,req); if (pp.size()!=bufLen) CmiError("ERROR! Sizing/packing length mismatch for %s list pup function (%d sizing, %d packing)\n", acc->getPath(),bufLen,pp.size()); } CcsSendReply(bufLen,(void *)buf); delete[] buf; } CmiFree(msg); }
static void setupMessage(Message *msg) { Message *mm; int i, size; int nextSize = msg->idx; size = sizeof(Message)+sizes[nextSize].size; buffer_msgs = (char*)malloc((sizes[nextSize].numiter) * sizeof(Message*)); for(i=0; i<sizes[nextSize].numiter; i++) { mm = (Message*)CmiAlloc(size); mm->srcpe = CmiMyPe(); CmiSetHandler(mm, pva(iterHandler)); //mm->idx = pva(nextSize); //*((Message**)buffer_msgs+i*sizeof(char*)) = mm; ((Message**)buffer_msgs)[i] = mm; } mm = (Message *) CmiAlloc(size); mm->srcpe = CmiMyPe(); mm->idx = nextSize; CmiSetHandler(mm, pva(startHandler)); //fillMessage(mm); //CmiAssert(msg->srcpe == 0); CmiSyncSendAndFree(msg->srcpe, size, mm); CmiFree(msg); }
/** Double the size of a Priority Queue's hash table */ void CqsPrioqRehash(_prioq pq) { int oldHsize = pq->hash_key_size; int newHsize = oldHsize * 2; unsigned int hashval; _prioqelt pe, pe1, pe2; int i,j; _prioqelt *ohashtab = pq->hashtab; _prioqelt *nhashtab = (_prioqelt *)CmiAlloc(newHsize*sizeof(_prioqelt)); pq->hash_key_size = newHsize; for(i=0; i<newHsize; i++) nhashtab[i] = 0; for(i=0; i<oldHsize; i++) { for(pe=ohashtab[i]; pe; ) { pe2 = pe->ht_next; hashval = pe->pri.bits; for (j=0; j<pe->pri.ints; j++) hashval ^= pe->pri.data[j]; hashval = (hashval&0x7FFFFFFF)%newHsize; pe1=nhashtab[hashval]; pe->ht_next = pe1; pe->ht_handle = (nhashtab+hashval); if (pe1) pe1->ht_handle = &(pe->ht_next); nhashtab[hashval]=pe; pe = pe2; } } pq->hashtab = nhashtab; pq->hash_key_size = newHsize; CmiFree(ohashtab); }
static void startNextSize(EmptyMsg *msg) { EmptyMsg m; Message *mm; int i; //CmiAssert(CmiMyPe()==0); pva(nextSize)++; if(pva(nextSize) == pva(numSizes)) { pva(nextSize) = -1; CmiSetHandler(&m, pva(nbrHandler)); CmiSyncSend(CmiMyPe(), sizeof(EmptyMsg), &m); } else { int size = sizeof(Message)+sizes[pva(nextSize)].size; buffer_msgs = (char*)malloc((sizes[pva(nextSize)].numiter) * sizeof(Message*)); for(i=0; i<sizes[pva(nextSize)].numiter; i++) { mm = CmiAlloc(size); mm->srcpe = CmiMyPe(); mm->idx = pva(nextSize); CmiSetHandler(mm, pva(bounceHandler)); //*((Message**)(buffer_msgs+i*sizeof(char*))) = mm; ((Message**)buffer_msgs)[i] = mm; } mm = (Message *) CmiAlloc(size); mm->srcpe = CmiMyPe(); mm->idx = pva(nextSize); CmiSetHandler(mm, pva(setupHandler)); //fillMessage(mm); CmiSyncSendAndFree(pva(nextNbr), size, mm); } CmiFree(msg); }
static void startNextNbr(EmptyMsg *msg) { EmptyMsg m; TimeMessage *tm; int i, size; //CmiAssert(CmiMyPe()==0); CmiFree(msg); pva(nextNbr)++; if(pva(nextNbr) == CmiMyNode()) { CmiSetHandler(&m, pva(nbrHandler)); CmiSyncSend(CmiMyPe(), sizeof(EmptyMsg), &m); return; } if(pva(nextNbr) == CmiNumNodes()) { pva(nextNbr) = -1; CmiSetHandler(&m, pva(nodeHandler)); CmiSyncSend(CmiMyPe(), sizeof(EmptyMsg), &m); size = sizeof(TimeMessage)+pva(numSizes)*CmiNumNodes()*sizeof(double); tm = (TimeMessage *) CmiAlloc(size); for(i=0;i<CmiNumNodes();i++) memcpy(tm->data+i*pva(numSizes),pva(times)[i], sizeof(double)*pva(numSizes)); tm->srcNode = CmiMyNode(); CmiSetHandler(tm, pva(timeHandler)); CmiSyncSendAndFree(0, size, tm); } else { CmiSetHandler(&m, pva(sizeHandler)); CmiSyncSend(CmiMyPe(), sizeof(EmptyMsg), &m); } }
extern "C" void req_fw_handler(char *msg) { int offset = CmiReservedHeaderSize + sizeof(CcsImplHeader); CcsImplHeader *hdr = (CcsImplHeader *)(msg+CmiReservedHeaderSize); int destPE = (int)ChMessageInt(hdr->pe); if (CmiMyPe() == 0 && destPE == -1) { /* Broadcast message to all other processors */ int len=CmiReservedHeaderSize+sizeof(CcsImplHeader)+ChMessageInt(hdr->len); CmiSyncBroadcast(len, msg); } else if (destPE < -1) { /* Multicast the message to your children */ int len=CmiReservedHeaderSize+sizeof(CcsImplHeader)+ChMessageInt(hdr->len)-destPE*sizeof(ChMessageInt_t); int index, child, i; int *pes = (int*)(msg+CmiReservedHeaderSize+sizeof(CcsImplHeader)); ChMessageInt_t *pes_nbo = (ChMessageInt_t *)pes; offset -= destPE * sizeof(ChMessageInt_t); if (ChMessageInt(pes_nbo[0]) == CmiMyPe()) { for (index=0; index<-destPE; ++index) pes[index] = ChMessageInt(pes_nbo[index]); } for (index=0; index<-destPE; ++index) { if (pes[index] == CmiMyPe()) break; } child = (index << 2) + 1; for (i=0; i<4; ++i) { if (child+i < -destPE) { CmiSyncSend(pes[child+i], len, msg); } } } CcsHandleRequest(hdr, msg+offset); CmiFree(msg); }
static void reduction_handler(void *msg) { int i=0; int idx = CpvAccess(nextidx); EmptyMsg emsg; sizes[idx].time = CmiWallTimer() - CpvAccess(starttime); CmiFree(msg); CpvAccess(numiter) = 0; idx++; if(sizes[idx].size == (-1)) { print_results("Consecutive CmiSyncBroadcastAllAndFree"); CpvAccess(nextidx) = 0; CpvAccess(numiter) = 0; while(sizes[i].size != (-1)) { sizes[i].time = 0; i++; } CmiSetHandler(&emsg, CpvAccess(sync_reply)); CpvAccess(lasttime) = CmiWallTimer(); CmiSyncSend(CpvAccess(currentPe), sizeof(EmptyMsg), &emsg); return; } else { CpvAccess(nextidx) = idx; msg = CmiAlloc(CmiMsgHeaderSizeBytes+sizes[idx].size); CmiSetHandler(msg, CpvAccess(bcast_handler)); CpvAccess(starttime) = CmiWallTimer(); CmiSyncBroadcastAllAndFree(CmiMsgHeaderSizeBytes+sizes[idx].size, msg); } }
PeTable :: ~PeTable() { int i; for (i=0;i<NumPes;i++) CmiFree(PeList[i]); CmiFree(PeList); delete msgnum; delete MaxSize; GarbageCollect(); //CmiFree(ptrlist); PTinfo *tmp; while (PTFreeChunks) { tmp=PTFreeChunks; PTFreeChunks=PTNEXTCHUNK(tmp); CmiFree(tmp); } // delete FreeList; }
static void sync_reply(void *msg) { ptimemsg tmsg = (ptimemsg)CmiAlloc(sizeof(timemsg)); tmsg->time = CmiWallTimer(); CmiSetHandler(tmsg, CpvAccess(sync_starter)); CmiSyncSendAndFree(0, sizeof(timemsg), tmsg); CmiFree(msg); }
void _libExitHandler(envelope *env) { switch(env->getMsgtype()) { case StartExitMsg: CkAssert(CkMyPe()==0); // else goto next case ExitMsg: CkAssert(CkMyPe()==0); if(_libExitStarted) { CmiFree(env); return; } _libExitStarted = 1; env->setMsgtype(ReqStatMsg); env->setSrcPe(CkMyPe()); // if exit in ring, instead of broadcasting, send in ring if (_ringexit){ const int stride = CkNumPes()/_ringtoken; int pe = 0; while (pe<CkNumPes()) { CmiSyncSend(pe, env->getTotalsize(), (char *)env); pe += stride; } CmiFree(env); }else{ CmiSyncBroadcastAllAndFree(env->getTotalsize(), (char *)env); } break; case ReqStatMsg: if (_ringexit) { int stride = CkNumPes()/_ringtoken; int pe = CkMyPe()+1; if (pe < CkNumPes() && pe % stride != 0) CmiSyncSendAndFree(pe, env->getTotalsize(), (char *)env); else CmiFree(env); } else CmiFree(env); //everyone exits here - there may be issues with leftover messages in the queue _libExitStarted = 0; CpvAccess(charmLibExitFlag) = 1; break; default: CmiAbort("Internal Error(_libExitHandler): Unknown-msg-type. Contact Developers.\n"); } }
extern "C" void immediatering_finishHandler(void *msg) { CmiFree(msg); waitFor ++; // only send one megatest_finish when all tests finish if (waitFor%numTests == 0) { megatest_finish(); // Not safe from inside immediate } }
/** * Converse handler receiving a signal from another processors in the same node. * (On _sendTrigger there is the explanation of why this is necessary) * Simply check if with the NodeGroup processed by another processor we reached * the expected count. Notice that it can only be called before _initDone: after * _initDone, a message destined for this handler will go instead to the _discardHandler. */ static void _triggerHandler(envelope *env) { if (_numExpectInitMsgs && CkpvAccess(_numInitsRecd) + CksvAccess(_numInitNodeMsgs) == _numExpectInitMsgs) { DEBUGF(("Calling Init Done from _triggerHandler\n")); _initDone(); } if (env!=NULL) CmiFree(env); }
static void bcast_reply(void *msg) { ptimemsg tmsg = (ptimemsg)CmiAlloc(sizeof(timemsg)); tmsg->time = CmiWallTimer(); tmsg->srcpe = CmiMyPe(); CmiSetHandler(tmsg, CpvAccess(bcast_central)); CmiSyncSendAndFree(0, sizeof(timemsg), tmsg); CmiFree(msg); }
static void CpdDebugHandlerStatus(char *msg) { #if ! CMK_NO_SOCKETS ChMessageInt_t reply[2]; reply[0] = ChMessageInt_new(CmiMyPe()); reply[1] = ChMessageInt_new(CpdIsFrozen() ? 0 : 1); CcsSendReply(2*sizeof(ChMessageInt_t), reply); #endif CmiFree(msg); }
static void startNextNode(EmptyMsg *msg) { EmptyMsg m; CmiFree(msg); if((CmiMyNode()+1) != CmiNumNodes()) { CmiSetHandler(&m, pva(nbrHandler)); CmiSyncSend(pva(nodeList)[CmiMyNode()+1], sizeof(EmptyMsg), &m); } }
//Get the length of the given list: static void CpdList_ccs_list_len(char *msg) { const ChMessageInt_t *req=(const ChMessageInt_t *)(msg+CmiReservedHeaderSize); CpdListAccessor *acc=CpdListLookup(req); if (acc!=NULL) { ChMessageInt_t reply=ChMessageInt_new(acc->getLength()); CcsSendReply(sizeof(reply),(void *)&reply); } CmiFree(msg); }
static void bounceMessage(Message *msg) { Message *mm; int iter = msg->iter; mm = *((Message**)(buffer_msgs + iter*sizeof(char*))); CmiSetHandler(mm, pva(iterHandler)); //CmiAssert(msg->srcpe==0); CmiSyncSendAndFree(msg->srcpe, sizeof(Message)+sizes[msg->idx].size, mm); CmiFree(msg); }
/*Receives reply messages passed up from converse to node 0.*/ static void rep_fw_handler(char *msg) { int len; char *r=msg+CmiReservedHeaderSize; CcsImplHeader *hdr=(CcsImplHeader *)r; r+=sizeof(CcsImplHeader); len=ChMessageInt(hdr->len); CcsImpl_reply(hdr,len,r); CmiFree(msg); }
static inline void _processRODataMsg(envelope *env) { //Unpack each readonly: if(!CmiMyRank()) { PUP::fromMem pu((char *)EnvToUsr(env)); for(size_t i=0;i<_readonlyTable.size();i++) { _readonlyTable[i]->pupData(pu); } } CmiFree(env); }
static void _discardHandler(envelope *env) { // MESSAGE_PHASE_CHECK(env); DEBUGF(("[%d] _discardHandler called.\n", CkMyPe())); #if CMK_MEM_CHECKPOINT //CkPrintf("[%d] _discardHandler called!\n", CkMyPe()); if (CkInRestarting()) CpvAccess(_qd)->process(); #endif CmiFree(env); }
CmiHandler ackHandlerFunc(char *msg) { CmiFree(msg); CpvAccess(ackCount)++; int max = CpvAccess(twoway) ? CmiNumPes() : CmiNumPes()/2; if(CpvAccess(ackCount) == max) { void *sendmsg = CmiAlloc(CmiMsgHeaderSizeBytes); CmiSetHandler(sendmsg,CpvAccess(exitHandler)); CmiSyncBroadcastAllAndFree(CmiMsgHeaderSizeBytes,sendmsg); } return 0; }
void GarbageCollectMsg(OutgoingMsg ogm) { MACHSTATE2(3,"GarbageCollectMsg called on ogm %p refcount %d",ogm,ogm->refcount); if (ogm->refcount == 0) { if (ogm->freemode == 'A') { ogm->freemode = 'X'; } else { if (ogm->freemode != 'G') CmiFree(ogm->data); FreeOutgoingMsg(ogm); } } }