bool TransactionConflictManager::isRemoteOlder(int thread, int remote_thread, uint64 local_timestamp, uint64 remote_timestamp, MachineID remote_id){
  assert(local_timestamp != 0);
  assert(remote_timestamp != 0);
  /*
  if(remote_timestamp == 0){
    int remote_processor = L1CacheMachIDToProcessorNum(remote_id);
    int remote_chip      = remote_processor / RubyConfig::numberOfProcsPerChip();
    int remote_chip_ver  = remote_processor % RubyConfig::numberOfProcsPerChip();       
    cout << " " << g_eventQueue_ptr->getTime() << " " << getLogicalProcID(thread) << " [" << getProcID() << "," << thread << "] REMOTE_TIMESTAMP is 0 remote_id: " << remote_processor << endl;
    TransactionInterfaceManager* remote_mgr = g_system_ptr->getChip(remote_chip)->getTransactionInterfaceManager(remote_chip_ver);
    for (int i = 0; i < RubyConfig::numberofSMTThreads(); i++)
      cout << "[ " << i << " XACT_LEVEL: " << remote_mgr->getTransactionLevel(i) << " TIMESTAMP: " << remote_mgr->getXactConflictManager()->getTimestamp(i) << "] ";         
    cout << " current time: " << g_eventQueue_ptr->getTime() << endl;
    assert(0);
  }          
  */

  bool older = false;
  
  if (local_timestamp == remote_timestamp){        
    if (getProcID() == (int) L1CacheMachIDToProcessorNum(remote_id)){   
      older = (remote_thread < thread);
    } else {
      older = (int) L1CacheMachIDToProcessorNum(remote_id) < getProcID();
    }
  } else {
    older = (remote_timestamp < local_timestamp);  
  }
  return older;                                                  
}          
Example #2
0
int main()
{
	printf(" CHSMD = %d\n", getProcID("CHSMD"));
	printf(" ALMD = %d\n", getProcID("ALMD"));
	printf(" COND = %d\n", getProcID("COND"));
	printf(" COND1 = %d\n", getProcID("COND1"));
	printf(" COND2 = %d\n", getProcID("COND2"));
	return 0;
}
bool TransactionIsolationManager::isInWriteSetFilter(int thread, Address physicalAddr){
  physicalAddr.makeLineAddress();
  if(PERFECT_FILTER){
    // use perfect filters
    bool is_read = isInWriteSetPerfectFilter(thread, physicalAddr);
    g_system_ptr->getProfiler()->profileWriteSet(physicalAddr, is_read, is_read, getProcID(), thread);
    return is_read;
  }
  else{
    // use Bloom filters
    bool result = m_writeSetFilter[thread]->isSet(physicalAddr);
    bool is_read = isInWriteSetPerfectFilter(thread, physicalAddr);
    assert( result || !is_read);  // NO FALSE NEGATIVES
    g_system_ptr->getProfiler()->profileWriteSet(physicalAddr, result, is_read, getProcID(), thread);
    return result;
  }
}
Example #4
0
int getCellScore (ProcessData * pData, ScoringData * sData, WavesData * wData, MOATypeShape * cellIndex, MOATypeElmVal * score, int * inSearchSpace, int NeighborSearch, MOATypeInd NeighbIndex) {
    int ret = 0;
    MOATypeDimn k;
    MOATypeInd NeighbFlatIndex;
    /*Check if cellIndex is found in the current scoring partition*/
    if ((NeighborSearch == 1) && (IsCellInPart(cellIndex, sData->p_index, sData->seqNum, sData->seqLen, pData->partitionSize) == 0) && 
        (getLocalIndex (cellIndex, sData->p_index, sData->seqNum, sData->seqLen, pData->partitionSize, &sData->neighbor) == 0)) {
        NeighbFlatIndex = Gamma(sData->neighbor, sData->msaAlgn->dimn, sData->msaAlgn->shape,  sData->msaAlgn->dimn, 1);
       (*score) = sData->msaAlgn->elements[NeighbFlatIndex].val;
       if (sData->msaAlgn->elements[NeighbFlatIndex].prev != NULL && sData->msaAlgn->elements[NeighbFlatIndex].prev_ub > 0 && 
           sData->NghbMOA != NULL && NeighbIndex >= 0 && NeighbIndex < sData->NghbMOA->elements_ub) {
           sData->NghbMOA->elements[NeighbIndex].prev = mmalloc(sizeof *sData->NghbMOA->elements[NeighbIndex].prev);
           sData->NghbMOA->elements[NeighbIndex].prev_ub = 1;
           sData->NghbMOA->elements[NeighbIndex].prev[0] = mmalloc(sData->seqNum * sizeof *sData->NghbMOA->elements[NeighbIndex].prev[0]);
           for (k=0;k<sData->seqNum;k++)
                sData->NghbMOA->elements[NeighbIndex].prev[0][k] = sData->msaAlgn->elements[NeighbFlatIndex].prev[0][k];
       }
    }
    else {
        /*check if neighbor's partition is included in search space*/
        MOATypeShape * partIndex = mmalloc (pData->seqNum * sizeof *partIndex);
        if  (getPartitionIndex (cellIndex, pData->seqNum, pData->seqLen, wData->partitionSize, &partIndex) == 0) {
            if ((*inSearchSpace) = isPartInSearchSpace(partIndex, wData) == 0) {
                long waveNo, partNo;
                getPartitionPosition (wData, partIndex, &waveNo, &partNo);
                if (partNo >= 0) {
                    if (myProcid == getProcID (wData, waveNo, partNo)) {                        
                        /*Check if Neighbor is found in other local partitions OCout Buffer*/
                        if(checkPrevPartitions(pData, cellIndex, score) != 0) {
                            /*average the neighboring (up to 2 strides) cell scores*/
                            (*score) = averageNeighborsScore(pData, sData, wData, cellIndex);
                        }
                    }
                    /*Check if Neighbor is already received from other processors in OCin Buffer*/
                    else if (checkRecvOC(pData, wData, cellIndex, score, 0) != 0)    
                        ret = -1;
                }
            }
        }
        free (partIndex);
    }
    return ret;
}
Example #5
0
/*Takes a MPI LP id and a torus node LP ID, returns the process ID on which the lp is mapped */
tw_peid 
mapping( tw_lpid gid )
{
    int rank;
    int offset;
    int is_rank = 0;

    if(gid < N_nodes)   {
         rank = gid / nlp_nodes_per_pe;
      }
    else {
         rank = getProcID( gid ) / nlp_mpi_procs_per_pe;
	 is_rank = N_nodes;
      }

    if(nlp_nodes_per_pe == (N_nodes/tw_nnodes()))
       offset = is_rank + (nlp_nodes_per_pe + 1) * node_rem;
    else
	offset = is_rank + nlp_nodes_per_pe * node_rem;

    if(node_rem)
      {
	if( g_tw_mynode >= node_rem )
	  {
            if(gid < offset)
               rank = gid / (nlp_nodes_per_pe + 1);
            else
              rank = node_rem + ((gid - offset)/nlp_nodes_per_pe);	 
          }
       else
         {
            if(gid >= offset)
              rank = node_rem + ((gid - offset)/(nlp_nodes_per_pe - 1));
         }
     }
    return rank;
}
// used by Opal for SMT (intra-processor) conflicts
int TransactionConflictManager::getOldestThreadExcludingThread(int thread){
  uint64 currentTime = g_eventQueue_ptr->getTime();
  uint64 oldestTime = currentTime;
  int oldest_thread = -1;

  for (int i = 0; i < RubyConfig::numberofSMTThreads(); i++){
    if ((thread != i) && (XACT_MGR->getTransactionLevel(i) > 0) && (m_timestamp[i] < oldestTime)){
        oldestTime = m_timestamp[i];
        oldest_thread = i;
    }
  }  
  assert(oldestTime > 0);
  if(oldest_thread == -1){
    //error
    cout << "[ " << getProcID() << "] getOldestThreadExcludingThread ERROR NACKING SMT THREAD NOT FOUND thread = " << thread << " currentTime = " << currentTime << endl;
    for (int i = 0; i < RubyConfig::numberofSMTThreads(); i++){
      int read_count = XACT_MGR->getXactIsolationManager()->getTotalReadSetCount(i);
      int write_count = XACT_MGR->getXactIsolationManager()->getTotalWriteSetCount(i);
      cout << "\tThread " << i << " timestamp = " << m_timestamp[i] << " Level = " << XACT_MGR->getTransactionLevel(i) << " read_set_count = " << read_count << " write_set_count = " << write_count << endl;
    }
  }
  //assert(oldest_thread != -1);
  return oldest_thread;
}
void TransactionIsolationManager::setFiltersToXactLevel(int thread, int new_xact_level, int old_xact_level){
  assert((new_xact_level >= 0) && (new_xact_level <= m_readSet[thread].size()) && (new_xact_level <= m_writeSet[thread].size()));

  clearReadSetFilter(thread);
  clearWriteSetFilter(thread);

  for (int i = 0; i < new_xact_level; i++){
    Vector<Address> readSet = m_readSet[thread][i].keys();
    Vector<Address> writeSet = m_writeSet[thread][i].keys(); 

    for (int j = 0; j < readSet.size(); j++)
      addToReadSetFilter(thread, readSet[j]);
    for (int j = 0; j < writeSet.size(); j++)
      addToWriteSetFilter(thread, writeSet[j]);      
  }
  
  if (XACT_EAGER_CD) {
    for (int i = old_xact_level; i > new_xact_level; i--){
      g_system_ptr->getXactIsolationChecker()->clearReadSet(getLogicalProcID(thread), i);        
      g_system_ptr->getXactIsolationChecker()->clearWriteSet(getLogicalProcID(thread), i);
    }
  }          
  
  if (XACT_DEBUG && XACT_DEBUG_LEVEL > 0){
    cout << g_eventQueue_ptr->getTime() << " " << getLogicalProcID(thread) << " [" << getProcID() << "," << thread << "] SETTING FILTERS to XACT LEVEL: " << new_xact_level << endl;
  }
}                      
void TransactionIsolationManager::releaseReadIsolation(int thread){
  
  int levels = XACT_MGR->getTransactionLevel(thread);
  for (int i = 0; i < levels; i++){
    clearReadSetPerfectFilter(thread, i + 1);
  }

  if(!PERFECT_FILTER){
    m_readSetFilter[thread]->clear();
  }

  if (XACT_DEBUG && XACT_DEBUG_LEVEL > 0){
    cout << g_eventQueue_ptr->getTime() << " " << getLogicalProcID(thread) << " [" << getProcID() << "," << thread << "] ABORT RELEASING READ ISOLATION " << endl;
  }


}    
int TransactionIsolationManager::getLogicalProcID(int thread) const{
  return getProcID() * RubyConfig::numberofSMTThreads() + thread;
}
bool TransactionConflictManager::shouldNackStore(Address addr, uint64 remote_timestamp, MachineID remote_id){
  bool existConflict = false;      
  int remote_proc = L1CacheMachIDToProcessorNum(remote_id);
  int remote_thread = 0;
  int remote_logical_proc_num = remote_proc * RubyConfig::numberofSMTThreads() + remote_thread;     
  string conflict_res_policy(XACT_CONFLICT_RES);   
  if (XACT_EAGER_CD && !XACT_LAZY_VM){
    if (XACT_MGR->isInWriteFilterSummary(addr)){
      for (int i = 0; i < RubyConfig::numberofSMTThreads(); i++){
        setLowestConflictLevel(i, addr, true);
      }  
      if (conflict_res_policy == "TIMESTAMP"){
        assert(!OpalInterface::isOpalLoaded());      
        if (getTimestamp(0) >= remote_timestamp){
          XACT_MGR->setAbortFlag(0, addr);                        
          if (ATMTP_ENABLED) {
            XACT_MGR->setCPS_coh(0);
          }
          setEnemyProcessor(0, remote_logical_proc_num);
          m_magicWaitingOn[0] = remote_logical_proc_num;
          m_magicWaitingTime[0] = remote_timestamp;
          m_needMagicWait[0] = true;
          // remove this addr from isolation checker
          if(XACT_ISOLATION_CHECK){
            //cout << "REMOVING ADDR " << addr << " FROM READ SET OF " << getProcID() << endl;
            Address temp = addr;
            temp.makeLineAddress();
            int transactionLevel = XACT_MGR->getTransactionLevel(0);
            for(int level=1; level<=transactionLevel; ++level){
              g_system_ptr->getXactIsolationChecker()->removeFromWriteSet(getProcID(), temp, level);
            }
          }
        }
      } 
      return true;
    } else if (XACT_MGR->isInReadFilterSummary(addr)){  
      for (int i = 0; i < RubyConfig::numberofSMTThreads(); i++){
        setLowestConflictLevel(i, addr, true);
      }  
      if (conflict_res_policy == "HYBRID" || conflict_res_policy == "TIMESTAMP"){
        assert(!OpalInterface::isOpalLoaded());      
        if (getTimestamp(0) >= remote_timestamp){
          XACT_MGR->setAbortFlag(0, addr);                        
          if (ATMTP_ENABLED) {
            XACT_MGR->setCPS_coh(0);
          }
          setEnemyProcessor(0, remote_logical_proc_num);
          // remove this addr from isolation checker
          if(XACT_ISOLATION_CHECK){
            //cout << "REMOVING ADDR " << addr << " FROM READ SET OF " << getProcID() << endl;
            Address temp = addr;
            temp.makeLineAddress();
            int transactionLevel = XACT_MGR->getTransactionLevel(0);
            for(int level=1; level<=transactionLevel; ++level){
              g_system_ptr->getXactIsolationChecker()->removeFromReadSet(getProcID(), temp, level);
            }
          }
          return false;
        } else {
          return true;
        }
      } 
      return true;
    }                        
    return false;            
  } else if (XACT_EAGER_CD && XACT_LAZY_VM){
    if (XACT_MGR->isInWriteFilterSummary(addr) || 
            XACT_MGR->isInReadFilterSummary(addr)){        
      assert(!OpalInterface::isOpalLoaded());      
      if (XACT_MGR->isTokenOwner(0)) return true;
      if (conflict_res_policy == "BASE"){        
        int proc = getLogicalProcID(0);
        //cout << "Proc " << proc << " STORE SETTING ABORT FLAG PC = " << SIMICS_get_program_counter(proc) << " Remote Proc = " << remote_logical_proc_num << " RemotePC = " << SIMICS_get_program_counter(remote_logical_proc_num) << " addr = " << addr << endl;
        XACT_MGR->setAbortFlag(0, addr);                        
        if (ATMTP_ENABLED) {
          XACT_MGR->setCPS_coh(0);
        }
        setEnemyProcessor(0, remote_logical_proc_num);
        // remove this addr from isolation checker
        if(XACT_ISOLATION_CHECK){
          //cout << "REMOVING ADDR " << addr << " FROM READ SET OF " << getProcID() << endl;
          Address temp = addr;
          temp.makeLineAddress();
          if(XACT_MGR->isInWriteFilterSummary(addr)){
            int transactionLevel = XACT_MGR->getTransactionLevel(0);
            for(int level=1; level<=transactionLevel; ++level){
              g_system_ptr->getXactIsolationChecker()->removeFromWriteSet(getProcID(), temp, level);
            }
          }
          if(XACT_MGR->isInReadFilterSummary(addr)){
            int transactionLevel = XACT_MGR->getTransactionLevel(0);
            for(int level=1; level<=transactionLevel; ++level){
              g_system_ptr->getXactIsolationChecker()->removeFromReadSet(getProcID(), temp, level);
            }
          }
        }
        return false;
      } else if (conflict_res_policy == "TIMESTAMP"){
        if (getTimestamp(0) >= remote_timestamp){          
          XACT_MGR->setAbortFlag(0, addr);                        
          if (ATMTP_ENABLED) {
            XACT_MGR->setCPS_coh(0);
          }
          setEnemyProcessor(0, remote_logical_proc_num);
          // remove this addr from isolation checker
          if(XACT_ISOLATION_CHECK){
            //cout << "REMOVING ADDR " << addr << " FROM READ SET OF " << getProcID() << endl;
            Address temp = addr;
            temp.makeLineAddress();
            if(XACT_MGR->isInWriteFilterSummary(addr)){
              int transactionLevel = XACT_MGR->getTransactionLevel(0);
              for(int level=1; level<=transactionLevel; ++level){
                g_system_ptr->getXactIsolationChecker()->removeFromWriteSet(getProcID(), temp, level);
              }
            }
            if(XACT_MGR->isInReadFilterSummary(addr)){
              int transactionLevel = XACT_MGR->getTransactionLevel(0);
              for(int level=1; level<=transactionLevel; ++level){
                g_system_ptr->getXactIsolationChecker()->removeFromReadSet(getProcID(), temp, level);
              }
            }
          }
          return false;
        } 
        return true;
      } else if (conflict_res_policy == "CYCLE"){
        return true;
      } else {
        assert(0);
      }                     
    }
    return false;
  } else {
    // for LL systems
    if (XACT_MGR->isInReadFilterSummary(addr) || 
            XACT_MGR->isInWriteFilterSummary(addr)){
      if (conflict_res_policy != "BASE"){
        cout << "XACT_CONFLICT_RES: " << conflict_res_policy << endl;
        assert(0);
      }  
      assert(!OpalInterface::isOpalLoaded());      
      if (XACT_MGR->isTokenOwner(0)) return true;
      if (XACT_MGR->isInReadFilterSummary(addr)){
        XACT_MGR->setAbortFlag(0, addr);                        
        if (ATMTP_ENABLED) {
          XACT_MGR->setCPS_coh(0);
        }
        setEnemyProcessor(0, remote_logical_proc_num);
      }
      return false;
    }
    return false;                   
  }        
  assert(0);
  return false;       
}   
bool TransactionConflictManager::magicWait(int thread){
  if (m_enableMagicWait[thread]){
    int remote_logical_processor = m_magicWaitingOn[thread];
    int remote_thread            = remote_logical_processor % RubyConfig::numberofSMTThreads();                
    int remote_processor         = remote_logical_processor / RubyConfig::numberofSMTThreads();
    Time remote_timestamp        = g_system_ptr->getChip(remote_processor / RubyConfig::numberOfProcsPerChip())->getTransactionInterfaceManager(remote_processor % RubyConfig::numberOfProcsPerChip())->getTimestamp(remote_thread);
    /* MAGIC WAIT - WAIT FOR XACT COMMIT */
    //if (remote_timestamp > m_magicWaitingTime[thread]){
    //  m_enableMagicWait[thread] = false;
    /* MAGIC WAIT - WAIT FOR XACT NON-STALL */
    if (!g_system_ptr->getXactVisualizer()->isStalled(m_magicWaitingOn[thread]) || (remote_timestamp > m_magicWaitingTime[thread])){
      m_enableMagicWait[thread] = false;
      m_needMagicWait[thread]   = false;
      if (XACT_DEBUG && (XACT_DEBUG_LEVEL > 1)){
        cout << " " << g_eventQueue_ptr->getTime() << " " << getLogicalProcID(thread) << " [" << getProcID() << "," << thread << "] DISABLE MAGIC WAIT ON " << m_magicWaitingOn[thread]  << " WAITING ON TIMESTAMP: " << m_magicWaitingTime[thread] << " CURR_TIMESTAMP: " << remote_timestamp;
        if (XACT_DEBUG && XACT_DEBUG_LEVEL > 1){
          cout << " SYSTEM XACT STATE: " << g_system_ptr->getXactVisualizer()->getTransactionStateVector();
        }
        cout << endl;
      }  
    }  
  }
  return m_enableMagicWait[thread];
}
void TransactionConflictManager::notifyReceiveNackFinal ( int thread, Address physicalAddr){
  
  if (XACT_DEBUG && (XACT_DEBUG_LEVEL > 1)){
    if (!g_system_ptr->getXactVisualizer()->isStalled(getLogicalProcID(thread))){      
      cout << " " << g_eventQueue_ptr->getTime() << " " << getLogicalProcID(thread) << " [" << getProcID() << "," << thread << "] CONFLICTING REQUEST " << physicalAddr << " possibleCycle: " << m_possible_cycle[thread] << " shouldTrap: " << m_shouldTrap[thread] << endl;
    }  
  }
  
  for (int i = 0; i < RubyConfig::numberOfProcessors(); i++){
    if (m_nackedBy[thread][i]){
      if (m_magicWaitingTime[thread] > m_nackedByTimestamp[thread][i]){
          m_magicWaitingTime[thread] = m_nackedByTimestamp[thread][i];
          m_magicWaitingOn[thread]   = i;
      }           
      m_nackedBy[thread][i] = false;
    }                            
  }
  
  if (m_shouldTrap[thread]){
     // Call sequencer requesting abort     
    // if(SEQUENCER->isPrefetchRequest(line_address(physicalAddr))){
    if (0) {
      // reset state for aborted prefetches
      m_enemyProc[thread] = 0;
      clearPossibleCycle(thread);
      if (XACT_DEBUG && (XACT_DEBUG_LEVEL > 1)){
        cout << " " << g_eventQueue_ptr->getTime() << " " << getLogicalProcID(thread) << " [" << getProcID() << "," << thread << "] PREFETCH REQUEST " << physicalAddr << " possibleCycle: " << m_possible_cycle[thread] << " shouldTrap: " << m_shouldTrap[thread] << endl;
      }
    } else {
      // mark this request as "aborted"
      XACT_MGR->setAbortFlag(thread, line_address(physicalAddr));
      if (ATMTP_ENABLED) {
        XACT_MGR->setCPS_coh(thread);
      }
      setEnemyProcessor(thread, m_magicWaitingOn[thread]);
      m_needMagicWait[thread] = true;
    }
  }
  m_nacked[thread]     = true;
  m_shouldTrap[thread] = false;                        
} 
void TransactionConflictManager::notifyReceiveNack(int thread, Address physicalAddr, uint64 local_timestamp, uint64 remote_timestamp, MachineID remote_id){
  
  bool   possible_cycle  = m_possible_cycle[thread];
  // get the remote thread that NACKed us
  int remote_procnum = L1CacheMachIDToProcessorNum(remote_id);
  TransactionConflictManager * remote_conflict_mgr = g_system_ptr->getChip(m_chip_ptr->getID())->getTransactionInterfaceManager(remote_procnum)->getXactConflictManager();
  int    remote_thread   = remote_conflict_mgr->getOldestThread(); 
  int remote_logicalprocnum = remote_procnum*RubyConfig::numberofSMTThreads()+remote_thread;

  int my_logicalprocnum = getLogicalProcID(thread);
  Address myPC = SIMICS_get_program_counter(my_logicalprocnum);
  Address remotePC = SIMICS_get_program_counter(remote_logicalprocnum);
  //const char * my_instruction = SIMICS_disassemble_physical( my_logicalprocnum, SIMICS_translate_address( my_logicalprocnum, myPC ));
  //const char * remote_instruction = SIMICS_disassemble_physical( remote_logicalprocnum, SIMICS_translate_address( remote_logicalprocnum, remotePC ));
  m_nackedBy[thread][remote_logicalprocnum] = true;
  m_nackedByTimestamp[thread][remote_logicalprocnum] = remote_timestamp;
  m_magicWaitingTime[thread] = remote_timestamp;
  m_magicWaitingOn[thread] = remote_logicalprocnum;
    
  if (possible_cycle && isRemoteOlder(thread, remote_thread, local_timestamp, remote_timestamp, remote_id)){
    m_shouldTrap[thread] = true;
  }                
  
  if (XACT_DEBUG && (XACT_DEBUG_LEVEL > 2)){
    cout << " " << g_eventQueue_ptr->getTime() << " " << getLogicalProcID(thread) << " [" << getProcID() << "," << thread << "] RECEIVED NACK " << physicalAddr << " remote_id: " << remote_logicalprocnum << " [" << remote_procnum << "," << remote_thread << "] myPC: " << myPC << " remotePC: " << remotePC << " local_timestamp: " << local_timestamp << " remote_timestamp: " << remote_timestamp << " shouldTrap " << m_shouldTrap[thread] << endl;
  }
}
void TransactionConflictManager::setLowestConflictLevel(int thread, Address addr, bool nackedStore){
  int transactionLevel = XACT_MGR->getTransactionLevel(thread);
  bool changeLowestConflictLevel = false;
  int i;

  for (i = 1; i <= transactionLevel; i++){
    bool inReadSet = XACT_MGR->getXactIsolationManager()->isInReadSetPerfectFilter(thread, addr, i);        
    bool inWriteSet = XACT_MGR->getXactIsolationManager()->isInWriteSetPerfectFilter(thread, addr, i);        
    if (inWriteSet){
      if ((i < m_lowestConflictLevel[thread]) || m_lowestConflictLevel[thread] == -1){      
        m_lowestConflictLevel[thread] = i;
        changeLowestConflictLevel = true;
        break;
      }
    }
    if (nackedStore && inReadSet){
      if ((i < m_lowestConflictLevel[thread]) || m_lowestConflictLevel[thread] == -1){      
        m_lowestConflictLevel[thread] = i;
        changeLowestConflictLevel = true;
        break;
      }
    }
  }  
  if (changeLowestConflictLevel){
    if (XACT_DEBUG && XACT_DEBUG_LEVEL > 1){
      cout << " " << g_eventQueue_ptr->getTime() << " " << getLogicalProcID(thread) << " [" << getProcID() << "," << thread << "] SETTING LOWEST CONFLICT LEVEL " << m_lowestConflictLevel[thread] << " CONFLICT ADDR: " << addr << endl;
    }
  }  
}      
Example #15
0
void * tbMaster (ProcessData * pData, WavesData * wData) {
    int MPI_return, foundproc, done = 0; /*currProc*/
    MOATypeDimn i, k;
    long long receviedLongLong;
    
    char msg[MID_MESSAGE_SIZE];
    TracebackData * tbData;
    tbData = NULL;
    if (initTBData(&tbData, pData->seqNum, pData->seqLen) != 0) {
        printf ("Failed to allocate memory for trace back structure. Exiting\n");
        return NULL;
    }
    if (AlignmentType == Local) 
        getmaxCellScore (pData, tbData);    
    else {
        //tbData->maxCellScore = getLocalMaxCellScore(pData, wData, tbData, 0);
        pData->waveNo = wData->wavesTotal - 1;
        pData->partNo = wData->partsInWave[pData->waveNo]-1;
        tbData->maxCellScore = 0;
        if (getProcID(wData, pData->waveNo, pData->partNo) != myProcid) 
            getPrevPartition (wData, &pData->waveNo, &pData->partNo);
        if (pData->partNo != -1) {
            if (restorePartitionCheckPoint(pData, wData, pData->waveNo, pData->partNo) != 0) {
                printf ("Error Retrieving partition file. Exitiing.\n");
                return NULL;
            }
            for (k=0;k<pData->seqNum;k++)
                tbData->maxCellIndex[k] = pData->msaAlgn->indexes[pData->msaAlgn->elements_ub-1][k];
        }
        else {
            printf ("Error Retrieving part No. Exiting.\n");
            return NULL;
        }
        
        tbData->currProc = 0;
    }
    
    //currProc = tbData->currProc;
    while (done == 0) {
        printf ("currProc = %d done = %d maxCellIndex { %lld", tbData->currProc, done, tbData->maxCellIndex[0]);
        for (k=1;k<tbData->seqNum;k++)
            printf (", %lld", tbData->maxCellIndex[k]);
        printf ("} in proc %d\n", tbData->currProc);
        /*send to processor containing the maxCellScore to trace back*/
        myProcid = tbData->currProc;
        doPartitionTraceBack(pData, wData, tbData);

        /*test if end of aligned sequence is zero to exit */
        done = 2;
        for (k=0;k<tbData->seqNum;k++)
            if (tbData->maxCellIndex[k] != 0)
                done = 0;
        /* else, determine the next tracing Processor*/
        if ((done == 0) && (tbData->currProc < 0)) { 
            /*if other slave processes don't have this index, check the master*/
            if (getPartitionDetails (pData, wData, &tbData->partIndex, tbData->maxCellIndex, &pData->waveNo, &pData->partNo, &tbData->currProc) == 0) { 
#ifndef NDEBUG
                sprintf(msg, "DMTB max cell {%lld", tbData->maxCellIndex[0]);  
                for (k=1;k<tbData->seqNum;k++)
                    sprintf (msg, "%s, %lld", msg, tbData->maxCellIndex[k]);
                sprintf (msg, "%s} in proc %d\n", msg, tbData->currProc);
                mprintf(3, msg, 1);
#endif
                done = 0;
            }
            else /*other wise, end of tracing*/
                done = 2;
        }
    } /* End While*/
    /*send to all processes to exit tracing*/
    
    assemblePathParts (tbData);
    calcAlignmentSPScore(pData, tbData);
    outputAlignment(pData, tbData, 1);
    editAlignment (pData, tbData);
    calcAlignmentSPScore (pData, tbData);
    outputAlignment(pData, tbData, 1);
    outputFastaAlignment(pData, tbData);    
    outputMSFAlignment(pData, tbData);    
    if (tbData != NULL)
        freetbData(&tbData);
    return NULL;
}
Example #16
0
/*Each MPI LP in this model generates a MPI message until a certain message count is reached.
This method 
          (i) keeps generating MPI messages, 
	 (ii) breaks a MPI message down to torus packets 
         (iii) sends those packets to the underlying torus node LP */
void mpi_msg_send(mpi_process * p, 
	          tw_bf * bf, 
		  nodes_message * msg, 
		  tw_lp * lp)
{
    tw_stime ts;
    tw_event *e;
    nodes_message *m;
    tw_lpid final_dst;
    int i;
    bf->c3 = 0;
    bf->c4 = 0;

    if(p->message_counter >= injection_limit)
     {
	bf->c4 = 1;

	return;
     }

    switch(TRAFFIC)
	{
          /* UR traffic picks a random destination from the network */
	  case UNIFORM_RANDOM:
		{
                    bf->c3 = 1;

		    final_dst = tw_rand_integer( lp->rng, N_nodes, 2 * N_nodes - 1);

		    /* if the random final destination generated is the same as current LP ID then it is possible
		       that the next randomly generated destination is also the same.
			Therefore if randomly generated destination is the same as source, we use the following
			calculation to make sure that the source and destinations are different */	
		    if( final_dst == lp->gid )
		      {
                        final_dst = N_nodes + (lp->gid + N_nodes/2) % N_nodes;
		      }
		}
	  break;

       /* The nearest neighbor traffic works in a round-robin fashion. The first message is sent to the first nearest neighbor
	of the node, second message is sent to second nearest neighbor and so on. Thats why we use the message counter number
	to calculate the destination neighbor number (Its between 1 to neighbor-1). In the packet_generate function, we calculate
	the torus coordinates of the destination neighbor. */
       case NEAREST_NEIGHBOR:
         {
           final_dst = p->message_counter% (2*N_dims_sim);
         }
        break;

	/* The diagonal traffic pattern sends message to the mirror torus coordinates of the current torus node LP. The
        torus coordinates are not available at the MPI process LP level thats why we calculate destination for this pattern
	in the packet_generate function. */
	case DIAGONAL:
	  {
	    final_dst = -1;
	  }	
     }
      tw_stime base_time = MEAN_PROCESS;
	
      for( i=0; i < num_packets; i++ ) 
       {
	      // Send the packet out
	     ts = 0.1 + tw_rand_exponential(lp->rng, MEAN_INTERVAL/200); 
             msg->saved_available_time = p->available_time;
	     p->available_time = max( p->available_time, tw_now(lp) );
	     p->available_time += ts;

	     e = tw_event_new( getProcID(lp->gid), p->available_time - tw_now(lp), lp );

	     m = tw_event_data( e );
	     m->type = GENERATE;
             m->packet_ID = packet_offset * ( lp->gid * num_mpi_msgs * num_packets ) + p->message_counter;

             p->message_counter++;
	     m->travel_start_time = tw_now( lp ) + ts;

	     if(TRAFFIC == NEAREST_NEIGHBOR || TRAFFIC == DIAGONAL)
		m->dest_lp = final_dst;
	     else
	       {
	        m->dest_lp = getProcID( final_dst );
	 	}

 	     m->next_stop = -1; 
             tw_event_send( e );
     } 
     ts = 0.1 + tw_rand_exponential( lp->rng, MEAN_INTERVAL);
     e = tw_event_new( lp->gid, ts, lp );
     m = tw_event_data( e );
     m->type = MPI_SEND;
     tw_event_send( e );
}
void TransactionConflictManager::setMagicWait(int thread){
  if (m_needMagicWait[thread]){      
    m_enableMagicWait[thread] = true;
    if (XACT_DEBUG && (XACT_DEBUG_LEVEL > 1)){
      cout << " " << g_eventQueue_ptr->getTime() << " " << getLogicalProcID(thread) << " [" << getProcID() << "," << thread << "] ENABLE MAGIC WAIT ON " << m_magicWaitingOn[thread]  << endl;
    }
    m_needMagicWait[thread] = false;
  }  
}            
Example #18
0
File: mtb.c Project: mhelal/mmDST
void * tbMaster (ProcessData * pData, WavesData * wData) {
    int MPI_return, foundproc, done = 0; /*currProc*/
    MOATypeDimn i, k;
    long long receviedLongLong;
    
    char msg[MID_MESSAGE_SIZE];
    MPI_Request request;
    MPI_Status status;
    TracebackData * tbData;
    tbData = NULL;
    if (initTBData(&tbData, pData->seqNum, pData->seqLen) != 0) {
        printf ("Failed to allocate memory for trace back structure. Exiting\n");
        return NULL;
    }
    if (AlignmentType == Local) 
        getmaxCellScore (pData, tbData);    
    else {
        //tbData->maxCellScore = getLocalMaxCellScore(pData, wData, tbData, 0);
        pData->waveNo = wData->wavesTotal - 1;
        pData->partNo = wData->partsInWave[pData->waveNo]-1;
        tbData->maxCellScore = 0;
        if (getProcID(wData, pData->waveNo, pData->partNo) != myProcid) 
            getPrevPartition (wData, &pData->waveNo, &pData->partNo);
        if (pData->partNo != -1) {
            if (restorePartitionCheckPoint(pData, wData, pData->waveNo, pData->partNo) != 0) {
                printf ("Error Retrieving partition file. Exitiing.\n");
                return NULL;
            }
            for (k=0;k<pData->seqNum;k++)
                tbData->maxCellIndex[k] = pData->msaAlgn->indexes[pData->msaAlgn->elements_ub-1][k];
        }
        else {
            printf ("Error Retrieving part No. Exitiing.\n");
            return NULL;
        }
        
        tbData->currProc = 0;
    }
    
    //currProc = tbData->currProc;
    while (done == 0) {
        printf ("currProc = %d done = %d maxCellIndex { %lld", tbData->currProc, done, tbData->maxCellIndex[0]);
        for (k=1;k<tbData->seqNum;k++)
            printf (", %lld", tbData->maxCellIndex[k]);
        printf ("} in proc %d\n", tbData->currProc);
        /*send to processor containing the maxCellScore to trace back*/
        if (tbData->currProc == 0) {
                /*Perform Partition trace back*/
                tbData->pathParts ++;
                if (tbData->pathParts == 1) {
                    tbData->aSeqLen = mmalloc (((MOATypeInd) sizeof *(tbData->aSeqLen)));
                    if (tbData->aSeqLen == NULL) {
                        printf ("Failed to reallocate memory for %ld partial alignments Lengths. Exiting.\n", tbData->pathParts);
                        fflush (stdout);
                        return NULL;
                    }
                    tbData->algnseq = mmalloc (((MOATypeInd) sizeof *(tbData->algnseq)));
                    if (tbData->aSeqLen == NULL) {
                        printf ("Failed to reallocate memory for %ld partial alignments. Exiting.\n", tbData->pathParts);
                        fflush (stdout);
                        return NULL;
                    }
                }
                else {
                    tbData->aSeqLen = realloc (tbData->aSeqLen, ((MOATypeInd) tbData->pathParts) * ((MOATypeInd) sizeof *(tbData->aSeqLen)));
                    if (tbData->aSeqLen == NULL) {
                        printf ("Failed to reallocate memory for %ld partial alignments Lengths. Exiting.\n", tbData->pathParts);
                        fflush (stdout);
                        return NULL;
                    }
                    tbData->algnseq = realloc (tbData->algnseq, ((MOATypeInd) tbData->pathParts) * ((MOATypeInd) sizeof *(tbData->algnseq)));
                    if (tbData->algnseq == NULL) {
                        printf ("Failed to reallocate memory for %ld partial alignments. Exiting.\n", tbData->pathParts);
                        fflush (stdout);
                        return NULL;
                    }
                }
                tbData->algnseq[tbData->pathParts-1] = NULL;
                tbData->algnseq[tbData->pathParts-1] = mmalloc (((MOATypeInd) tbData->seqNum) * ((MOATypeInd) sizeof *(tbData->algnseq[tbData->pathParts-1])));    
                if (tbData->algnseq[tbData->pathParts-1] == NULL) {
                    printf ("Failed to reallocate memory for %ld partial alignments sequences. Exiting.\n", tbData->pathParts);
                    fflush (stdout);
                    return NULL;
                }
                tbData->aSeqLen[tbData->pathParts-1] = traceBack(pData, wData, tbData);
                sprintf(msg, "DMTB returned Local path of length %ld\n", tbData->aSeqLen[tbData->pathParts-1]);  
                mprintf(3, msg, 1);
        }
        else {
                /*1. Send Tracing flag (done = 0) to the processor where the maximum score was found*/
                MPI_return = MPI_Send (&done, 1, MPI_INT, tbData->currProc, 2, MOAMSA_COMM_WORLD);
#ifndef NDEBUG
                sprintf(msg, "DMTB sent flag %d to proc %d first\n", done, tbData->currProc);  
                mprintf(3, msg, 1);
#endif
                /*2. send starting global index*/
                MPI_return = MPI_Send (tbData->maxCellIndex, tbData->seqNum, MPI_LONG_LONG, tbData->currProc, 3, MOAMSA_COMM_WORLD);
#ifndef NDEBUG
                sprintf(msg, "DMTB sent maxCellIndex to proc %d\n", tbData->currProc);  
                mprintf(3, msg, 1);
#endif


                tbData->pathParts ++;
                if (tbData->pathParts == 1) {
                    tbData->aSeqLen = mmalloc (((MOATypeInd) sizeof *tbData->aSeqLen));
                    if (tbData->aSeqLen == NULL) {
                        printf ("Failed to reallocate memory for %ld partial alignments Lengths. Exiting.\n", tbData->pathParts);
                        fflush (stdout);
                        return NULL;
                    }
                    tbData->algnseq = mmalloc (((MOATypeInd) sizeof *tbData->algnseq));
                    if (tbData->aSeqLen == NULL) {
                        printf ("Failed to reallocate memory for %ld partial alignments. Exiting.\n", tbData->pathParts);
                        fflush (stdout);
                        return NULL;
                    }
                }
                else {
                    tbData->aSeqLen = realloc (tbData->aSeqLen, ((MOATypeInd) tbData->pathParts) * ((MOATypeInd) sizeof *tbData->aSeqLen));
                    if (tbData->aSeqLen == NULL) {
                        printf ("Failed to reallocate memory for %ld partial alignments Lengths. Exiting.\n", tbData->pathParts);
                        fflush (stdout);
                        return NULL;
                    }
                    tbData->algnseq = realloc (tbData->algnseq, ((MOATypeInd) tbData->pathParts) * ((MOATypeInd) sizeof *tbData->algnseq));
                    if (tbData->aSeqLen == NULL) {
                        printf ("Failed to reallocate memory for %ld partial alignments. Exiting.\n", tbData->pathParts);
                        fflush (stdout);
                        return NULL;
                    }
                }
                tbData->algnseq[tbData->pathParts-1] = NULL;
                tbData->algnseq[tbData->pathParts-1] = mmalloc (((MOATypeInd) tbData->seqNum) * ((MOATypeInd) sizeof *(tbData->algnseq[tbData->pathParts-1])));  
                if (tbData->algnseq[tbData->pathParts-1] == NULL) {
                    printf ("Failed to reallocate memory for %ld partial alignments sequences. Exiting.\n", tbData->pathParts);
                    fflush (stdout);
                    return NULL;
                }
                tbData->aSeqLen[tbData->pathParts-1] = 2;
                /*3. receive partital alignment length*/
                MPI_return = MPI_Recv (&receviedLongLong, 1, MPI_LONG_LONG, tbData->currProc, 4, MOAMSA_COMM_WORLD, &status);
                tbData->aSeqLen[tbData->pathParts-1] = receviedLongLong;
#ifndef NDEBUG
                printf("Master received length %ld  MPI_return %d \n", tbData->aSeqLen[tbData->pathParts-1], MPI_return);  
                fflush(stdout);
                sprintf(msg, "DMTB received Remote path of length %ld :", tbData->aSeqLen[tbData->pathParts-1]);  
                mprintf(3, msg, 1);
#endif
                /*4. receive the partital alignment itself*/
                for (i=0;i<tbData->seqNum && tbData->aSeqLen[tbData->pathParts-1] > 0;i++) {
                    tbData->algnseq[tbData->pathParts-1][i] = NULL;
                    tbData->algnseq[tbData->pathParts-1][i] = mmalloc (((MOATypeInd) (tbData->aSeqLen[tbData->pathParts-1] +1)) * ((MOATypeInd) sizeof *(tbData->algnseq[tbData->pathParts-1][i] )));    
                    if (tbData->algnseq[tbData->pathParts-1][i] == NULL) {
                        printf ("Failed to reallocate memory for %ld partial alignments sequences %lld residues. Exiting.\n", tbData->pathParts, tbData->aSeqLen[tbData->pathParts-1]);
                        fflush (stdout);
                        return NULL;
                    }
                    MPI_return = MPI_Recv (tbData->algnseq[tbData->pathParts-1][i], tbData->aSeqLen[tbData->pathParts-1], MPI_CHAR, tbData->currProc, 5, MOAMSA_COMM_WORLD, &status);
                    tbData->algnseq[tbData->pathParts-1][i][tbData->aSeqLen[tbData->pathParts-1]] = '\0';
#ifndef NDEBUG
                    printf("Master received aligned seq for seq %lld MPI_return %d = %s\n", i, MPI_return, tbData->algnseq[tbData->pathParts-1][i]);  
                    fflush(stdout);
                    sprintf(msg, " %s ", tbData->algnseq[tbData->pathParts-1][i]);  
                    mprintf(3, msg, 1);
#endif
                }
                /*5. receive the last global index in this partial alignment*/
                MPI_return = MPI_Recv (tbData->maxCellIndex, tbData->seqNum, MPI_LONG_LONG, tbData->currProc, 7, MOAMSA_COMM_WORLD, &status);
#ifndef NDEBUG
                printf ("Master received MPI_return %d maxCellIndex { %lld", MPI_return, tbData->maxCellIndex[0]);
                for (i=1;i<tbData->seqNum;i++)
                    printf (", %lld", tbData->maxCellIndex[i]);
                printf ("}\n");
                fflush(stdout);
#endif
                /*6. receive the next Processor where a next remote score was found from this partial alignment*/
                MPI_return = MPI_Recv (&tbData->currProc, 1, MPI_INT, tbData->currProc, 8, MOAMSA_COMM_WORLD, &status);
                //tbData->currProc = currProc;
#ifndef NDEBUG
                sprintf(msg, "DMTB received currProc %d, maxCellIndex {%lld", tbData->currProc, tbData->maxCellIndex[0]);  
                for (k=1;k<tbData->seqNum;k++)
                    sprintf(msg, "%s, %lld", msg, tbData->maxCellIndex[k]);
                sprintf(msg, "%s}\n", msg);
                mprintf(3, msg, 1);
#endif
            }
            /*test if end of aligned sequence is zero to exit */
            done = 2;
            for (k=0;k<tbData->seqNum;k++)
                if (tbData->maxCellIndex[k] != 0)
                    done = 0;
            /* else, determine the next tracing Processor*/
            if ((done == 0) && (tbData->currProc < 0)) { 
                /*if other slave processes don't have this index, check the master*/
                if (getPartitionDetails (pData, wData, &tbData->partIndex, tbData->maxCellIndex, &pData->waveNo, &pData->partNo, &tbData->currProc) == 0) { 
#ifndef NDEBUG
                    sprintf(msg, "DMTB max cell {%lld", tbData->maxCellIndex[0]);  
                    for (k=1;k<tbData->seqNum;k++)
                        sprintf (msg, "%s, %lld", msg, tbData->maxCellIndex[k]);
                    sprintf (msg, "%s} in proc %d\n", msg, tbData->currProc);
                    mprintf(3, msg, 1);
#endif
                    done = 0;
                }
                else /*other wise, end of tracing*/
                    done = 2;
            }
    } /* End While*/
    /*send to all processes to exit tracing*/
    done = 2;
    for (i=1;i<ClusterSize;i++) {
        MPI_return = MPI_Send (&done, 1, MPI_INT, i, 2, MOAMSA_COMM_WORLD);
        sprintf(msg, "DMTB sent flag %d to proc %ld\n", done, i);  
        mprintf(3, msg, 1);
    }
    assemblePathParts (tbData);
    calcAlignmentSPScore(pData, tbData);
    outputAlignment(pData, tbData, 1);
    editAlignment (pData, tbData);
    calcAlignmentSPScore (pData, tbData);
    outputAlignment(pData, tbData, 1);
    outputFastaAlignment(pData, tbData);    
    outputMSFAlignment(outputfilename, pData, tbData);    
    if (tbData != NULL)
        freetbData(&tbData);
    return NULL;
}