void ProxyPatch::boxClosed(int box) { ProxyGBISP1ResultMsg *msg1; ProxyGBISP2ResultMsg *msg2; if (box == 1) { // force Box // Note: delay the deletion of proxyDataMsg (of the // current step) until the next step. This is done // for the sake of atom migration (ProxyDataMsg) // as the ProxyPatch has to unregister the atoms // of the previous step in the AtomMap data structure // also denotes end of gbis phase 3 sendResults(); } else if (box == 5) { //end phase 1 //this msg should only have nonzero atoms if flags.doNonbonded int msgAtoms = (flags.doNonbonded) ? numAtoms : 0; msg1 = new (msgAtoms, PRIORITY_SIZE) ProxyGBISP1ResultMsg; for (int i = 0; i < msgAtoms; i++) { msg1->psiSum[i] = psiSum[i]; } msg1->patch = patchID; msg1->psiSumLen = msgAtoms; msg1->origPe = CkMyPe(); SET_PRIORITY(msg1, flags.sequence, GB1_PROXY_RESULTS_PRIORITY + PATCH_PRIORITY(patchID)); ProxyMgr::Object()->sendResult(msg1); } else if (box == 8) { //end phase 2 //this msg should only have nonzero atoms if flags.doFullElectrostatics int msgAtoms = (flags.doFullElectrostatics) ? numAtoms : 0; msg2 = new (msgAtoms, PRIORITY_SIZE) ProxyGBISP2ResultMsg; for (int i = 0; i < msgAtoms; i++) { msg2->dEdaSum[i] = dEdaSum[i]; } msg2->patch = patchID; msg2->dEdaSumLen = msgAtoms; msg2->origPe = CkMyPe(); SET_PRIORITY(msg2, flags.sequence, GB2_PROXY_RESULTS_PRIORITY + PATCH_PRIORITY(patchID)); ProxyMgr::Object()->sendResult(msg2); } else if (box == 9) { //nothing } else if (box == 10) { // LCPO do nothing } if (!--boxesOpen) { DebugM(2, patchID << ": " << "Checking message buffer.\n"); if (proxyMsgBufferStatus == PROXYALLMSGBUFFERED) { CmiAssert(curProxyMsg != NULL); DebugM(3, "Patch " << patchID << " processing buffered proxy ALL data.\n"); receiveAll(curProxyMsg); } else if (proxyMsgBufferStatus == PROXYDATAMSGBUFFERED) { CmiAssert(curProxyMsg != NULL); DebugM(3, "Patch " << patchID << " processing buffered proxy data.\n"); receiveData(curProxyMsg); } } else { DebugM(3, "ProxyPatch " << patchID << ": " << boxesOpen << " boxes left to close.\n"); } }
void ProxyPatch::sendResults(void) { DebugM(3, "sendResults(" << patchID << ")\n"); register int i = 0; register ForceList::iterator f_i, f_e, f2_i; for (i = Results::normal + 1; i <= flags.maxForceMerged; ++i) { f_i = f[Results::normal].begin(); f_e = f[Results::normal].end(); f2_i = f[i].begin(); for (; f_i != f_e; ++f_i, ++f2_i) { *f_i += *f2_i; } f[i].resize(0); //lyk!!!!!!!! } for (i = flags.maxForceUsed + 1; i < Results::maxNumForces; ++i) f[i].resize(0); #if CMK_PERSISTENT_COMM && USE_PERSISTENT_TREE CmiUsePersistentHandle(&localphs, 1); #endif if (proxyRecvSpanning == 0) { #ifdef REMOVE_PROXYRESULTMSG_EXTRACOPY ProxyResultVarsizeMsg *msg = ProxyResultVarsizeMsg::getANewMsg(CkMyPe(), patchID, PRIORITY_SIZE, f); #else ProxyResultMsg *msg = new (PRIORITY_SIZE) ProxyResultMsg; msg->node = CkMyPe(); msg->patch = patchID; for (i = 0; i < Results::maxNumForces; ++i) msg->forceList[i] = &(f[i]); #endif SET_PRIORITY(msg, flags.sequence, PROXY_RESULTS_PRIORITY + PATCH_PRIORITY(patchID)); //sending results to HomePatch ProxyMgr::Object()->sendResults(msg); } else { ProxyCombinedResultMsg *msg = new (PRIORITY_SIZE) ProxyCombinedResultMsg; SET_PRIORITY(msg, flags.sequence, PROXY_RESULTS_PRIORITY + PATCH_PRIORITY(patchID)); msg->nodes.add(CkMyPe()); msg->patch = patchID; for (i = 0; i < Results::maxNumForces; ++i) msg->forceList[i] = &(f[i]); //sending results to HomePatch ProxyMgr::Object()->sendResults(msg); } #if CMK_PERSISTENT_COMM && USE_PERSISTENT_TREE CmiUsePersistentHandle(NULL, 0); #endif }
void ComputeMgr::sendNonbondedMICSlaveEnqueue(ComputeNonbondedMIC *c, int pe, int seq, int prio, int ws) { if ( ws == 2 && c->localHostedPatches.size() == 0 ) return; LocalWorkMsg *msg = ( ws == 1 ? c->localWorkMsg : c->localWorkMsg2 ); msg->compute = c; int type = c->type(); int cid = c->cid; SET_PRIORITY(msg,seq,prio); CProxy_WorkDistrib wdProxy(CkpvAccess(BOCclass_group).workDistrib); wdProxy[pe].enqueueMIC(msg); }
void Setup_Adc() { RCC->CFGR |= 0x0000C000; RCC->APB2ENR |=__ENABLE_CLOCK_ADC; ADC1->CR1 |=__AWDEN_MASQUE_OR; ADC1->CR1 |=__EOCIE_MASQUE_OR; ADC1->CR1 |=__AWDCH_MASQUE_OR; // ADC1->CR2 |=~__CONT__MASQUE_AND; ADC1->CR2 |= __EXTSEL__MASQUE_OR; ADC1->CR2 |=__EXTTRIG__MASQUE_OR; ADC1->SMPR1 =__SMP12; ADC1->SQR1 =__SQR1; ADC1->SQR3 |=__SQR3__MASQUE_OR; NVIC->ISER[0] |=__ADC_GLOBAL_INTERRUPT; //NVIC->IPR[8] |= (SPEED_IT_LVL)<<(0); SET_PRIORITY(18,ADC_IT_LVL); }
} #define UNBLOCK(index, cpu0, cpu1) \ { \ KIND_UNBLOCK, \ index, \ { 0 }, \ { cpu0, cpu1 } \ } static const test_action test_actions[] = { RESET, UNBLOCK( 0, 0, IDLE), UNBLOCK( 1, 0, 1), UNBLOCK( 3, 0, 1), SET_PRIORITY( 1, P(2), 0, 1), SET_PRIORITY( 3, P(1), 0, 3), BLOCK( 3, 0, 1), SET_AFFINITY( 1, A(1, 1), 0, 1), SET_AFFINITY( 1, A(1, 0), 1, 0), SET_AFFINITY( 1, A(1, 1), 1, 0), SET_AFFINITY( 1, A(1, 0), 1, 0), SET_AFFINITY( 1, A(0, 1), 0, 1), BLOCK( 0, IDLE, 1), UNBLOCK( 0, 0, 1), BLOCK( 1, 0, IDLE), UNBLOCK( 1, 0, 1), RESET, /* * Show that FIFO order is honoured across all threads of the same priority. */