bool DynamicMovementPrimitive::learnFromTrajectory(const Trajectory &trajectory) { if (!initialized_) { printf("ERROR: DMP motion unit is not initialized, not learning from trajectory.\n"); params_.isLearned_ = false; return params_.isLearned_; } int numRows = trajectory.getLength(); if (numRows < MIN_NUM_DATA_POINTS) { printf("ERROR: Trajectory has %i rows, but should have at least %i.\n", numRows, MIN_NUM_DATA_POINTS); params_.isLearned_ = false; return params_.isLearned_; } double samplingFrequency = trajectory.getSamplingFrequency(); if (samplingFrequency <= 0) { printf("ERROR: Sampling frequency %f [Hz] of the trajectory is not valid.\n",samplingFrequency); params_.isLearned_ = false; return params_.isLearned_; } //set teaching duration to the duration of the trajectory params_.teachingDuration_ = static_cast<double> (numRows) / static_cast<double> (samplingFrequency); params_.deltaT_ = static_cast<double> (1.0) / samplingFrequency; params_.initialDeltaT_ = params_.deltaT_; params_.tau_ = params_.teachingDuration_; params_.initialTau_ = params_.tau_; //compute alpha_x such that the canonical system drops below the cutoff when the trajectory has finished //alpha_x is the time constant for the phase system (second order asimptotically stable system) params_.alphaX_ = -log(params_.canSysCutoff_); double mseTotal = 0.0; double normalizedMseTotal = 0.0; for (int i = 0; i < params_.numTransformationSystems_; i++) { transformationSystems_[i].trajectoryTarget_.clear(); transformationSystems_[i].resetMSE(); } trajectoryTargetFunctionInput_.clear(); //reset canonical system resetCanonicalState(); //obtain start and goal position VectorXd start = VectorXd::Zero(params_.numTransformationSystems_); if (!trajectory.getStartPosition(start)) { printf("ERROR: Could not get the start position of the trajectory\n"); params_.isLearned_ = false; return params_.isLearned_; } VectorXd goal = VectorXd::Zero(params_.numTransformationSystems_); if (!trajectory.getEndPosition(goal)) { printf("ERROR: Could not get the goal position of the trajectory\n"); params_.isLearned_ = false; return params_.isLearned_; } //set y0 to start state of trajectory and set goal to end of the trajectory for (int i = 0; i < params_.numTransformationSystems_; i++) { //check whether all this is necessary (I don't think so...) transformationSystems_[i].reset(); //set start and goal transformationSystems_[i].setStart(start(i)); transformationSystems_[i].setGoal(goal(i)); //set current state to start state (position and velocity) transformationSystems_[i].setState(start(i), 0.0); } for (int i = 0; i < params_.numTransformationSystems_; i++) { transformationSystems_[i].setInitialStart(transformationSystems_[i].y0_); transformationSystems_[i].setInitialGoal(transformationSystems_[i].goal_); } //for each time step and for each dimension, perform supervised learning of the input trajectory //Actually is not a "classical" learning problem...here the problem is how to encode the //target trajectory in the dmp by representing it as a second order system modulated with //a nonlinear function f. for (int rowIndex = 0; rowIndex < numRows; rowIndex++) { //set transformation target: //t_, td_ and tdd_ represent the current position, velocity and acceleration we want //to learn throught supervised learning. f_ represents the current values of //the nonlinear function used to modulate the dmp behaviour, while ft_ is the target //value for such nonlinear function. //NOTE: is f_ actually used anywhere????? for (int i = 0; i < params_.numTransformationSystems_; i++) { transformationSystems_[i].t_ = trajectory.getTrajectoryPosition(rowIndex, i); transformationSystems_[i].td_ = trajectory.getTrajectoryVelocity(rowIndex, i); transformationSystems_[i].tdd_ = trajectory.getTrajectoryAcceleration(rowIndex, i); transformationSystems_[i].f_ = 0.0; transformationSystems_[i].ft_ = 0.0; } //fit the target function: //it computes the ideal value of f_ (i.e. ft_) which allows to exactly reproduce //the trajectory with the dmp if (!integrateAndFit()) { printf("ERROR: Could not integrate system and fit the target function\n"); params_.isLearned_ = false; return params_.isLearned_; } } if(!writeVectorToFile(trajectoryTargetFunctionInput_, "data/trajectory_target_function_input_.txt")) return false; if(!transformationSystems_[0].writeTrajectoryTargetToFile("data/trajectory_target_.txt")) return false; if (!learnTransformationTarget()) { printf("ERROR: Could not learn transformation target.\n"); params_.isLearned_ = false; return params_.isLearned_; } mseTotal = 0.0; normalizedMseTotal = 0.0; for (int i = 0; i < params_.numTransformationSystems_; i++) { double mse; double normalizedMse; if (transformationSystems_[i].getMSE(mse)) { mseTotal += mse; } if (transformationSystems_[i].getNormalizedMSE(normalizedMse)) { normalizedMseTotal += normalizedMse; } transformationSystems_[i].resetMSE(); } printf("Successfully learned DMP from trajectory.\n"); params_.isLearned_ = true; return params_.isLearned_; }
int main(int argc, char** argv) { pvector_t v, tmp = NULL, samples = NULL; index_t i, length, step; unit_t min, max; MPI_Status status; MPI_Datatype sampleDatatype; if (initMPI(&argc, &argv) != MPI_SUCCESS) return AbortAndExit(ERRORCODE_MPI_ERROR, "Cannot initialize MPI."); if (argc < 3) { fprintf(stderr, "MPI Parallel Sorting by Regular Sampling implementation.\nUsage:\n\t%s <data set (to read)> <result file (to write)>\n", argv[0]); MPI_Finalize(); return 1; } if (ID == ROOT_ID) { tmp = openVectorFile(ARGV_FILE_NAME); printf("Data set size: %d, process number: %d\n", tmp->length, PROCESS_NUMBER); if ((tmp->length/PROCESS_NUMBER) <= PROCESS_NUMBER) AbortAndExit(ERRORCODE_SIZE_DONT_MATCH, "Processor number is too big or size of data set is too small for correct calculation.\n"); ELEMENTS_NUMBER = tmp->length; } if (MPI_Bcast(tableOfConstants, TABLE_OF_CONSTANTS_SIZE, MPI_INT, ROOT_ID, MPI_COMM_WORLD) != MPI_SUCCESS) return AbortAndExit(ERRORCODE_MPI_ERROR, "MPI_Bcast error."); ELEMENTS_PER_PROCESS = listLength(ID); initVector(&v, ELEMENTS_PER_PROCESS); if (ID == ROOT_ID) { /* Bcast data set */ copyVector(tmp, v, v->length); for(i = 1, step = ELEMENTS_PER_PROCESS; i < PROCESS_NUMBER; i++) { if (MPI_Send(&(tmp->vector[step]), listLength(i), MPI_UNIT, i, 0, MPI_COMM_WORLD) != MPI_SUCCESS) return AbortAndExit(ERRORCODE_MPI_ERROR, "MPI_Send error."); step += listLength(i); } } else if (MPI_Recv(v->vector, ELEMENTS_PER_PROCESS, MPI_UNIT, ROOT_ID, 0, MPI_COMM_WORLD, &status) != MPI_SUCCESS) return AbortAndExit(ERRORCODE_MPI_ERROR, "MPI_Recv error."); quicksortVector(v); if (initVector(&samples, PROCESS_NUMBER -1) == NULL) return AbortAndExit(ERRORCODE_CANT_MALLOC, "Cannot allocate memory for samples vector."); MPI_Type_vector(PROCESS_NUMBER, 1, ELEMENTS_NUMBER / SQR_PROCESS_NUMBER, MPI_UNIT, &sampleDatatype); MPI_Type_commit(&sampleDatatype); if (ID != ROOT_ID) { /* Sending samples to root proces */ if (MPI_Send(v->vector, 1, sampleDatatype, ROOT_ID, 0, MPI_COMM_WORLD) != MPI_SUCCESS) return AbortAndExit(ERRORCODE_MPI_ERROR, "MPI_Send error."); if (initVector(&tmp, listLength(PROCESS_NUMBER -1)) == NULL) return AbortAndExit(ERRORCODE_CANT_MALLOC, "Cannot allocate memory for temporary vector."); } else { /* Reciving samples */ copySampleToVector(v, tmp, (v->length)/PROCESS_NUMBER, PROCESS_NUMBER); for(step = PROCESS_NUMBER, i = 1; i < PROCESS_NUMBER; i++, step += PROCESS_NUMBER) if (MPI_Recv(&(tmp->vector[step]), PROCESS_NUMBER, MPI_UNIT, i, 0, MPI_COMM_WORLD, &status) != MPI_SUCCESS) return AbortAndExit(ERRORCODE_MPI_ERROR, "MPI_Recv error."); quicksort(tmp->vector, 0, SQR_PROCESS_NUMBER); copySampleToVector(tmp, samples, SQR_PROCESS_NUMBER / (PROCESS_NUMBER - 1), PROCESS_NUMBER - 1); } /* Broadcast selected samples to processors */ if (MPI_Bcast(samples->vector, PROCESS_NUMBER-1, MPI_UNIT, ROOT_ID, MPI_COMM_WORLD) != MPI_SUCCESS) return AbortAndExit(ERRORCODE_MPI_ERROR, "MPI_Bcast error."); if ((i = dataExchange((ID == 0) ? UNITT_MIN : getFromVector(samples, ID -1), (ID == (PROCESS_NUMBER - 1)) ? UNITT_MAX : getFromVector(samples, ID), &v, tmp)) != ERRORCODE_NOERRORS) return AbortAndExit(i, "Error in while of data exchange."); /* Sorting new data */ quicksortVector(v); if (ID != ROOT_ID) { /* Sending sorted data */ if (MPI_Send(&(v->length), 1, MPI_INT, ROOT_ID, 0, MPI_COMM_WORLD) != MPI_SUCCESS) return AbortAndExit(ERRORCODE_MPI_ERROR, "MPI_Send (sending size of data) error."); if (MPI_Send(v->vector, v->length, MPI_UNIT, ROOT_ID, 0, MPI_COMM_WORLD) != MPI_SUCCESS) return AbortAndExit(ERRORCODE_MPI_ERROR, "MPI_Send error."); } else { /* Receiving sorted data */ copyVector(v, tmp, v->length); for(step = v->length, i = 1; i < PROCESS_NUMBER; i++) { if (MPI_Recv(&length, 1, MPI_INT, i, 0, MPI_COMM_WORLD, &status) != MPI_SUCCESS) return AbortAndExit(ERRORCODE_MPI_ERROR, "MPI_Recv (sending size of data) error."); if (MPI_Recv(&(tmp->vector[step]), length, MPI_UNIT, i, 0, MPI_COMM_WORLD, &status) != MPI_SUCCESS) return AbortAndExit(ERRORCODE_MPI_ERROR, "MPI_Recv error."); step += length; } writeVectorToFile(tmp, ARGV_RESULT_NAME); freeVector(&tmp); } freeVector(&v); MPI_Finalize(); return 0; }