const ReachingDefinitions &DataflowAnalyzer::computeReachingDefinitions(const Term *term, const MemoryLocation &memoryLocation, const ReachingDefinitions &definitions) { auto &termDefinitions = dataflow().getDefinitions(term); if (isTracked(memoryLocation)) { definitions.project(memoryLocation, termDefinitions); } else { termDefinitions.clear(); } return termDefinitions; }
int ConfigDiffTracker<ValType, ShardType>::calculateConfigDiff( const std::vector<ChunkType>& chunks) { _assertAttached(); // Apply the chunk changes to the ranges and versions // // Overall idea here is to work in two steps : // 1. For all the new chunks we find, increment the maximum version per-shard and // per-collection, and remove any conflicting chunks from the ranges. // 2. For all the new chunks we're interested in (all of them for mongos, just chunks on // the shard for mongod) add them to the ranges. std::vector<ChunkType> newTracked; // Store epoch now so it doesn't change when we change max OID currEpoch = _maxVersion->epoch(); _validDiffs = 0; for (const ChunkType& chunk : chunks) { ChunkVersion chunkVersion = ChunkVersion::fromBSON(chunk.toBSON(), ChunkType::DEPRECATED_lastmod()); if (!chunkVersion.isSet() || !chunkVersion.hasEqualEpoch(currEpoch)) { warning() << "got invalid chunk version " << chunkVersion << " in document " << chunk.toString() << " when trying to load differing chunks at version " << ChunkVersion( _maxVersion->majorVersion(), _maxVersion->minorVersion(), currEpoch); // Don't keep loading, since we know we'll be broken here return -1; } _validDiffs++; // Get max changed version and chunk version if (chunkVersion > *_maxVersion) { *_maxVersion = chunkVersion; } // Chunk version changes ShardType shard = shardFor(chunk.getShard()); typename MaxChunkVersionMap::const_iterator shardVersionIt = _maxShardVersions->find(shard); if (shardVersionIt == _maxShardVersions->end() || shardVersionIt->second < chunkVersion) { (*_maxShardVersions)[shard] = chunkVersion; } // See if we need to remove any chunks we are currently tracking because of this // chunk's changes removeOverlapping(chunk.getMin(), chunk.getMax()); // Figure out which of the new chunks we need to track // Important - we need to actually own this doc, in case the cursor decides to getMore // or unbuffer. if (isTracked(chunk)) { newTracked.push_back(chunk); } } LOG(3) << "found " << _validDiffs << " new chunks for collection " << _ns << " (tracking " << newTracked.size() << "), new version is " << *_maxVersion; for (const ChunkType& chunk : newTracked) { // Invariant enforced by sharding - it's possible to read inconsistent state due to // getMore and yielding, so we want to detect it as early as possible. // // TODO: This checks for overlap, we also should check for holes here iff we're // tracking all chunks. if (isOverlapping(chunk.getMin(), chunk.getMax())) { return -1; } _currMap->insert(rangeFor(chunk)); } return _validDiffs; }
int ConfigDiffTracker<ValType,ShardType>:: calculateConfigDiff( DBClientCursorInterface& diffCursor ) { verifyAttached(); // Apply the chunk changes to the ranges and versions // // Overall idea here is to work in two steps : // 1. For all the new chunks we find, increment the maximum version per-shard and // per-collection, and remove any conflicting chunks from the ranges // 2. For all the new chunks we're interested in (all of them for mongos, just chunks on the // shard for mongod) add them to the ranges // vector<BSONObj> newTracked; // Store epoch now so it doesn't change when we change max OID currEpoch = _maxVersion->epoch(); _validDiffs = 0; while( diffCursor.more() ) { BSONObj diffChunkDoc = diffCursor.next(); ChunkVersion chunkVersion = ChunkVersion::fromBSON(diffChunkDoc, ChunkType::DEPRECATED_lastmod()); if( diffChunkDoc[ChunkType::min()].type() != Object || diffChunkDoc[ChunkType::max()].type() != Object || diffChunkDoc[ChunkType::shard()].type() != String ) { warning() << "got invalid chunk document " << diffChunkDoc << " when trying to load differing chunks" << endl; continue; } if( ! chunkVersion.isSet() || ! chunkVersion.hasCompatibleEpoch( currEpoch ) ) { warning() << "got invalid chunk version " << chunkVersion << " in document " << diffChunkDoc << " when trying to load differing chunks at version " << ChunkVersion( _maxVersion->toLong(), currEpoch ) << endl; // Don't keep loading, since we know we'll be broken here return -1; } _validDiffs++; // Get max changed version and chunk version if( chunkVersion > *_maxVersion ) *_maxVersion = chunkVersion; // Chunk version changes ShardType shard = shardFor( diffChunkDoc[ChunkType::shard()].String() ); typename map<ShardType, ChunkVersion>::iterator shardVersionIt = _maxShardVersions->find( shard ); if( shardVersionIt == _maxShardVersions->end() || shardVersionIt->second < chunkVersion ) { (*_maxShardVersions)[ shard ] = chunkVersion; } // See if we need to remove any chunks we are currently tracking b/c of this chunk's changes removeOverlapping(diffChunkDoc[ChunkType::min()].Obj(), diffChunkDoc[ChunkType::max()].Obj()); // Figure out which of the new chunks we need to track // Important - we need to actually own this doc, in case the cursor decides to getMore or unbuffer if( isTracked( diffChunkDoc ) ) newTracked.push_back( diffChunkDoc.getOwned() ); } LOG(3) << "found " << _validDiffs << " new chunks for collection " << _ns << " (tracking " << newTracked.size() << "), new version is " << *_maxVersion << endl; for( vector<BSONObj>::iterator it = newTracked.begin(); it != newTracked.end(); it++ ) { BSONObj chunkDoc = *it; // Important - we need to make sure we actually own the min and max here BSONObj min = chunkDoc[ChunkType::min()].Obj().getOwned(); BSONObj max = chunkDoc[ChunkType::max()].Obj().getOwned(); // Invariant enforced by sharding // It's possible to read inconsistent state b/c of getMore() and yielding, so we want // to detect as early as possible. // TODO: This checks for overlap, we also should check for holes here iff we're tracking // all chunks if( isOverlapping( min, max ) ) return -1; _currMap->insert( rangeFor( chunkDoc, min, max ) ); } return _validDiffs; }
ReturnFlag MovingPeak::evaluate_(VirtualEncoding &ss, bool rFlag, ProgramMode mode, bool flag2){ CodeVReal &s=dynamic_cast<CodeVReal &>(ss); double *x=new double[m_numDim]; copy(s.m_x.begin(),s.m_x.end(),x); if(this->m_noiseFlag) addNoise(x); double maximum = LONG_MIN, dummy; for(int i=0; i<m_numPeaks; i++){ //if(maximum>mp_height[i]) continue; //optimization on the obj evaluation dummy = functionSelection(x, i); if (dummy > maximum) maximum = dummy; } if (m_useBasisFunction){ dummy = functionSelection(x,-1); /* If value of basis function is higher return it */ if (maximum < dummy) maximum = dummy; } s.m_obj[0]=maximum; if(rFlag&&m_evals%m_changeFre==0){ Solution<CodeVReal>::initilizeWB(s); } if(rFlag&&isTracked(x,s.m_obj)) updatePeakQaulity(); if(rFlag) m_evals++; bool flag; #ifdef OFEC_CONSOLE if(Global::msp_global->mp_algorithm!=nullptr) flag=!Global::msp_global->mp_algorithm->ifTerminating(); else flag=true; #endif #ifdef OFEC_DEMON flag=true; #endif if(rFlag&&m_evals%m_changeFre==0&&flag){ //g_mutexStream.lock(); //cout<<"The number of changes: "<<m_changeCounter<<endl; //g_mutexStream.unlock(); //for(int i=0;i<m_numPeaks;i++) printPeak(i); DynamicProblem::change(); //for(int i=0;i<m_numPeaks;i++) printPeak(i); //getchar(); } delete [] x; x=0; ReturnFlag rf=Return_Normal; if(rFlag){ if(Global::msp_global->mp_algorithm!=nullptr){ if(Global::msp_global->mp_algorithm->ifTerminating()){ rf=Return_Terminate; } else if(Global::msp_global->mp_problem->isProTag(DOP)){ if(CAST_PROBLEM_DYN->getFlagTimeLinkage()&&CAST_PROBLEM_DYN->getTriggerTimelinkage()){ rf=Return_Change_Timelinkage; } if((Global::msp_global->mp_problem->getEvaluations()+1)%(CAST_PROBLEM_DYN->getChangeFre())==0){ rf=Return_ChangeNextEval; } if(Global::msp_global->mp_problem->getEvaluations()%(CAST_PROBLEM_DYN->getChangeFre())==0){ if(CAST_PROBLEM_DYN->getFlagDimensionChange()){ rf=Return_Change_Dim; } rf=Return_Change; } } } } return rf; }
void UnitBase::engageTarget() { if(target && (target.getObjPointer() == NULL)) { // the target does not exist anymore releaseTarget(); return; } if(target && (target.getObjPointer()->isActive() == false)) { // the target changed its state to inactive releaseTarget(); return; } if(target && !targetFriendly && !canAttack(target.getObjPointer())) { // the (non-friendly) target cannot be attacked anymore releaseTarget(); return; } if(target && !targetFriendly && !forced && !isInAttackRange(target.getObjPointer())) { // the (non-friendly) target left the attack mode range (and we were not forced to attack it) releaseTarget(); return; } if(target) { // we have a target unit or structure Coord targetLocation = target.getObjPointer()->getClosestPoint(location); if(destination != targetLocation) { // the location of the target has moved // => recalculate path clearPath(); } targetDistance = blockDistance(location, targetLocation); Sint8 newTargetAngle = lround(8.0f/256.0f*destinationAngle(location, targetLocation)); if(newTargetAngle == 8) { newTargetAngle = 0; } if(bFollow) { // we are following someone setDestination(targetLocation); return; } if(targetDistance > getWeaponRange()) { // we are not in attack range // => follow the target setDestination(targetLocation); return; } // we are in attack range if(targetFriendly && !forced) { // the target is friendly and we only attack these if were forced to do so return; } if(goingToRepairYard) { // we are going to the repair yard // => we do not need to change the destination targetAngle = INVALID; } else if(attackMode == CAPTURE) { // we want to capture the target building setDestination(targetLocation); targetAngle = INVALID; } else if(isTracked() && target.getObjPointer()->isInfantry() && currentGameMap->tileExists(targetLocation) && !currentGameMap->getTile(targetLocation)->isMountain() && forced) { // we squash the infantry unit because we are forced to setDestination(targetLocation); targetAngle = INVALID; } else { // we decide to fire on the target thus we can stop moving setDestination(location); targetAngle = newTargetAngle; } if(getCurrentAttackAngle() == newTargetAngle) { attack(); } } else if(attackPos) { // we attack a position targetDistance = blockDistance(location, attackPos); Sint8 newTargetAngle = lround(8.0f/256.0f*destinationAngle(location, attackPos)); if(newTargetAngle == 8) { newTargetAngle = 0; } if(targetDistance <= getWeaponRange()) { // we are in weapon range thus we can stop moving setDestination(location); targetAngle = newTargetAngle; if(getCurrentAttackAngle() == newTargetAngle) { attack(); } } else { targetAngle = INVALID; } } }
ReturnFlag CompositionDBG::evaluate_(VirtualEncoding &ss, bool rFlag, ProgramMode mode, bool flag_){ CodeVReal &s=dynamic_cast<CodeVReal &>(ss); double *x=new double[m_numDim]; copy(s.m_x.begin(),s.m_x.end(),x); if(this->m_noiseFlag) addNoise(x); vector<double> width(m_numPeaks,0),fit(m_numPeaks); for(int i=0;i<m_numPeaks;i++){ // calculate weight for each function for(int j=0;j<m_numDim;j++) width[i]+=(x[j]-mpp_peak[i][j])*(x[j]-mpp_peak[i][j]); if(width[i]!=0) width[i]=exp(-sqrt(width[i]/(2*m_numDim*mp_convergeSeverity[i]*mp_convergeSeverity[i]))); } for(int i=0;i<m_numPeaks;i++){ // calculate objective value for each function for(int j=0;j<m_numDim;j++) // calculate the objective value of tranformation function i x[j]=(x[j]-mpp_peak[i][j])/mp_stretchSeverity[i];//((1+fabs(mpp_peak[i][j]/mp_searchRange[j].m_upper))* Matrix m(m_numDim,1); m.setDataRow(x,m_numDim); m*=mp_rotationMatrix[i]; copy(m[0].begin(),m[0].end(),x); correctSolution(mp_comFunction[i],x); fit[i]=selectFun(mp_comFunction[i],x); fit[i]=m_heightNormalizeSeverity*fit[i]/fabs(mp_fmax[i]); copy(s.m_x.begin(),s.m_x.end(),x); } double sumw=0,wmax; wmax=*max_element(width.begin(),width.end()); for(int i=0;i<m_numPeaks;i++) if(width[i]!=wmax) width[i]=width[i]*(1-pow(wmax,10)); for(int i=0;i<m_numPeaks;i++) sumw+=width[i]; for(int i=0;i<m_numPeaks;i++) width[i]/=sumw; double obj=0; for(int i=0;i<m_numPeaks;i++) obj+=width[i]*(fit[i]+mp_height[i]); s.m_obj[0]=obj; if(rFlag&&m_evals%m_changeFre==0) Solution<CodeVReal>::initilizeWB(s); if(rFlag){ isTracked(x,s.m_obj); m_evals++; } bool flag; #ifdef OFEC_CONSOLE if(Global::msp_global->mp_algorithm!=nullptr) flag=!Global::msp_global->mp_algorithm->ifTerminating(); else flag=true; #endif #ifdef OFEC_DEMON flag=true; #endif if(rFlag&&m_evals%m_changeFre==0&&flag) { DynamicProblem::change(); if(m_timeLinkageFlag) updateTimeLinkage(); } delete []x; x=0; ReturnFlag rf=Return_Normal; if(rFlag){ if(Global::msp_global->mp_algorithm!=nullptr){ if(Global::msp_global->mp_algorithm->ifTerminating()){ rf=Return_Terminate; } else if(Global::msp_global->mp_problem->isProTag(DOP)){ if(CAST_PROBLEM_DYN->getFlagTimeLinkage()&&CAST_PROBLEM_DYN->getTriggerTimelinkage()){ rf=Return_Change_Timelinkage; } if((Global::msp_global->mp_problem->getEvaluations()+1)%(CAST_PROBLEM_DYN->getChangeFre())==0){ rf=Return_ChangeNextEval; } if(Global::msp_global->mp_problem->getEvaluations()%(CAST_PROBLEM_DYN->getChangeFre())==0){ if(CAST_PROBLEM_DYN->getFlagDimensionChange()){ rf=Return_Change_Dim; } rf=Return_Change; } } } } return rf; }