/*! The main planning function. Simply takes the next input grasp from the list, tries it and then places is in the output depending on the quality. */ void ListPlanner::mainLoop() { //check if we are done if (mPlanningIterator==mInputList.end()) { mCurrentStep = mMaxSteps+1; return; } //analyze the current state //we don't allow it to leave the hand in the analysis posture //so that after dynamics object gets put back bool legal; double energy; PRINT_STAT(mOut, mCurrentStep); mEnergyCalculator->analyzeState(legal, energy, *mPlanningIterator, true); //for rendering purposes; will see later if it's needed mCurrentState->copyFrom(*mPlanningIterator); mCurrentState->setLegal(legal); //put a copy of the result in list if it's legal and there's room or it's //better than the worst solution so far //this whole thing could go into a higher level fctn in EGPlanner if (legal) { double worstEnergy; if ((int)mBestList.size() < BEST_LIST_SIZE) worstEnergy = 1.0e5; else worstEnergy = mBestList.back()->getEnergy(); if (energy < worstEnergy) { GraspPlanningState *insertState = new GraspPlanningState(*mPlanningIterator); insertState->setEnergy(energy); insertState->setItNumber(mCurrentStep); DBGP("Solution at step " << mCurrentStep); mBestList.push_back(insertState); mBestList.sort(GraspPlanningState::compareStates); while ((int)mBestList.size() > BEST_LIST_SIZE) { delete(mBestList.back()); mBestList.pop_back(); } } } //advance the planning iterator mPlanningIterator++; mCurrentStep++; emit update(); PRINT_STAT(mOut, std::endl); }
void OnLinePlanner::graspLoop() { //DBGP("Grasp loop started"); //prepare input GraspPlanningState *input = NULL; if ( processInput() ) { input = mTargetState; } //call simulated annealing SimAnn::Result r = mSimAnn->iterate(mCurrentState,mEnergyCalculator,input); mCurrentStep = mSimAnn->getCurrentStep(); if ( r == SimAnn::JUMP ) { assert(mCurrentState->isLegal()); //we have a new state from the SimAnn if (mCurrentState->getEnergy() < 0 || mCurrentState->getEnergy() < mCurrentBest->getEnergy()) { DBGP("New candidate"); GraspPlanningState *insertState = new GraspPlanningState(mCurrentState); //make solution independent of reference hand position insertState->setPositionType(SPACE_COMPLETE,true); insertState->setRefTran( mCurrentState->getObject()->getTran(), true); insertState->setItNumber( mCurrentStep ); if (insertState->getEnergy() < mCurrentBest->getEnergy()) { mCurrentBest->copyFrom( insertState ); } if (!addToListOfUniqueSolutions(insertState, &mCandidateList,0.4)) { DBGP("Similar to old candidate"); delete insertState; } else { //graspItGUI->getIVmgr()->emitAnalyzeGrasp(mCandidateList.back()); mCandidateList.sort(GraspPlanningState::compareStates);//CHANGED! was compareStates while (mCandidateList.size() > CANDIDATE_BUFFER_SIZE) { delete mCandidateList.back(); mCandidateList.pop_back(); } } DBGP("Added candidate"); } } if (mCurrentStep % 100 == 0) emit update(); render(); //DBGP("Grasp loop done"); }
/*! The caller passes it a HandObjectState and a calculator that can be used to compute the quality (or in annealing terms "energy") of a HandObjectState. This function computes the next state in the annealing schedule. See SimAnn::Result declaration for possible return values. */ SimAnn::Result SimAnn::iterate(GraspPlanningState *currentState, SearchEnergy *energyCalculator, GraspPlanningState *targetState) { //using different cooling constants for probs and neighbors double T = cooling(mT0, mParams.YC, mCurrentStep, mParams.YDIMS); //attempt to compute a neighbor of the current state GraspPlanningState *newState; double energy; bool legal = false; int attempts = 0; int maxAttempts = 10; DBGP("Ngbr gen loop"); while (!legal && attempts <= maxAttempts) { newState = stateNeighbor(currentState, T * mParams.NBR_ADJ, targetState); DBGP("Analyze state..."); energyCalculator->analyzeState(legal, energy, newState); DBGP("Analysis done."); if (!legal) { delete newState; } attempts++; } if (!legal) { DBGP("Failed to compute a legal neighbor"); //we have failed to compute a legal neighbor. //weather the SimAnn should advance a step and cool down the temperature even when it fails to compute a //legal neighbor is debatable. Might be more interactive (especially for the online planner) if it does. //mCurrentStep += 1; return FAIL; } //we have a neighbor. Now we decide if we jump to it. DBGP("Legal neighbor computed; energy: " << energy) newState->setEnergy(energy); newState->setLegal(true); newState->setItNumber(mCurrentStep); //using different cooling constants for probs and neighbors T = cooling(mT0, mParams.HC, mCurrentStep, mParams.HDIMS); double P = prob(mParams.ERR_ADJ * currentState->getEnergy(), mParams.ERR_ADJ * newState->getEnergy(), T); double U = ((double)rand()) / RAND_MAX; Result r = KEEP; if (P > U) { DBGP("Jump performed"); currentState->copyFrom(newState); r = JUMP; } else { DBGP("Jump rejected"); } mCurrentStep += 1; mTotalSteps += 1; DBGP("Main iteration done.") delete newState; if (mWriteResults && mCurrentStep % 2 == 0) { assert(mFile); fprintf(mFile, "%ld %d %f %f %f %f\n", mCurrentStep, mTotalSteps, T, currentState->getEnergy(), currentState->readPosition()->readVariable("Tx"), targetState->readPosition()->readVariable("Tx")); //currentState->writeToFile(mFile); } return r; }