void DTW::smoothData(VectorDouble &data,UINT smoothFactor,VectorDouble &resultsData){ const UINT M = (UINT)data.size(); const UINT N = (UINT) floor(double(M)/double(smoothFactor)); resultsData.resize(N,0); for(UINT i=0; i<N; i++) resultsData[i]=0.0; if(smoothFactor==1 || M<smoothFactor){ resultsData = data; return; } for(UINT i=0; i<N; i++){ double mean = 0.0; UINT index = i*smoothFactor; for(UINT x=0; x<smoothFactor; x++){ mean += data[index+x]; } resultsData[i] = mean/smoothFactor; } //Add on the data that does not fit into the window if(M%smoothFactor!=0.0){ double mean = 0.0; for(UINT i=N*smoothFactor; i<M; i++) mean += data[i]; mean/=M-(N*smoothFactor); //Add one to the end of the vector VectorDouble tempVector(N+1); for(UINT i=0; i<N; i++) tempVector[i] = resultsData[i]; tempVector[N] = mean; resultsData = tempVector; } }
//Compute the regression data that will be stored at this node bool RegressionTree::computeNodeRegressionData( const RegressionData &trainingData, VectorDouble ®ressionData ){ const UINT M = trainingData.getNumSamples(); const UINT N = trainingData.getNumInputDimensions(); const UINT T = trainingData.getNumTargetDimensions(); if( M == 0 ){ Regressifier::errorLog << "computeNodeRegressionData(...) - Failed to compute regression data, there are zero training samples!" << endl; return false; } //Make sure the regression data is the correct size regressionData.clear(); regressionData.resize( T, 0 ); //The regression data at this node is simply an average over all the training data at this node for(unsigned int j=0; j<N; j++){ for(unsigned int i=0; i<M; i++){ regressionData[j] += trainingData[i].getTargetVector()[j]; } regressionData[j] /= M; } return true; }
bool BernoulliRBM::predict_(VectorDouble &inputData,VectorDouble &outputData){ if( !trained ){ errorLog << "predict_(VectorDouble &inputData,VectorDouble &outputData) - Failed to run prediction - the model has not been trained." << endl; return false; } if( inputData.size() != numVisibleUnits ){ errorLog << "predict_(VectorDouble &inputData,VectorDouble &outputData) - Failed to run prediction - the input data size (" << inputData.size() << ")"; errorLog << " does not match the number of visible units (" << numVisibleUnits << "). " << endl; return false; } if( outputData.size() != numHiddenUnits ){ outputData.resize( numHiddenUnits ); } //Scale the data if needed if( useScaling ){ for(UINT i=0; i<numVisibleUnits; i++){ inputData[i] = scale(inputData[i],ranges[i].minValue,ranges[i].maxValue,0,1); } } //Propagate the data up through the RBM double x = 0.0; for(UINT i=0; i<numHiddenUnits; i++){ for(UINT j=0; j<numVisibleUnits; j++) { x += weightsMatrix[i][j] * inputData[j]; } outputData[i] = sigmoid( x + hiddenLayerBias[i] ); } return true; }
bool SVM::predictSVM(VectorDouble &inputVector,double &maxProbability, VectorDouble &probabilites){ if( !trained || param.probability == 0 || inputVector.size() != numInputDimensions ) return false; double *prob_estimates = NULL; svm_node *x = NULL; //Setup the memory for the probability estimates prob_estimates = new double[ model->nr_class ]; //Copy the input data into the SVM format x = new svm_node[numInputDimensions+1]; for(UINT j=0; j<numInputDimensions; j++){ x[j].index = (int)j+1; x[j].value = inputVector[j]; } //The last value in the input vector must be set to -1 x[numInputDimensions].index = -1; x[numInputDimensions].value = 0; //Scale the input data if required if( useScaling ){ for(UINT j=0; j<numInputDimensions; j++) x[j].value = scale(x[j].value,ranges[j].minValue,ranges[j].maxValue,SVM_MIN_SCALE_RANGE,SVM_MAX_SCALE_RANGE); } //Perform the SVM prediction double predict_label = svm_predict_probability(model,x,prob_estimates); predictedClassLabel = 0; maxProbability = 0; probabilites.resize(model->nr_class); for(int k=0; k<model->nr_class; k++){ if( maxProbability < prob_estimates[k] ){ maxProbability = prob_estimates[k]; predictedClassLabel = k+1; maxLikelihood = maxProbability; } probabilites[k] = prob_estimates[k]; } if( !useNullRejection ) predictedClassLabel = (UINT)predict_label; else{ if( maxProbability >= classificationThreshold ){ predictedClassLabel = (UINT)predict_label; }else predictedClassLabel = GRT_DEFAULT_NULL_CLASS_LABEL; } //Clean up the memory delete[] prob_estimates; delete[] x; return true; }
bool KMeansFeatures::projectDataThroughLayer( const VectorDouble &input, VectorDouble &output, const UINT layer ){ if( layer >= clusters.size() ){ errorLog << "projectDataThroughLayer(...) - Layer out of bounds! It should be less than: " << clusters.size() << endl; return false; } const UINT M = clusters[ layer ].getNumRows(); const UINT N = clusters[ layer ].getNumCols(); if( input.size() != N ){ errorLog << "projectDataThroughLayer(...) - The size of the input vector (" << input.size() << ") does not match the size: " << N << endl; return false; } //Make sure the output vector size is OK if( output.size() != M ){ output.resize( M ); } UINT i,j = 0; //double gamma = 2.0*SQR(alpha); double gamma = 2.0*SQR( 1 ); for(i=0; i<M; i++){ output[i] = 0; for(j=0; j<N; j++){ output[i] += SQR( input[j] - clusters[layer][i][j] ); //output[i] += input[j] * clusters[layer][i][j]; } //cout << "i: " << i << " sum: " << output[i] << " output: " << 1.0/(1.0+exp(-output[i])) << endl; //cout << "i: " << i << " sum: " << output[i] << " output: " << exp( -output[i] / gamma ) << endl; //output[i] = exp( -output[i] / gamma ); //output[i] = 1.0/(1.0+exp(-output[i])); output[i] = sqrt( output[i] ); //L2 Norm } return true; }
VectorDouble MovementTrajectoryFeatures::update(const VectorDouble &x){ #ifdef GRT_SAFE_CHECKING if( !initialized ){ errorLog << "update(const VectorDouble &x) - Not Initialized!" << endl; return vector<double>(); } if( x.size() != numInputDimensions ){ errorLog << "update(const VectorDouble &x)- The Number Of Input Dimensions (" << numInputDimensions << ") does not match the size of the input vector (" << x.size() << ")!" << endl; return vector<double>(); } #endif //Add the new data to the trajectory data buffer trajectoryDataBuffer.push_back( x ); //Only flag that the feature data is ready if the trajectory data is full if( trajectoryDataBuffer.getBufferFilled() ){ featureDataReady = true; }else featureDataReady = false; //Compute the centroids centroids.setAllValues(0); UINT dataBufferIndex = 0; UINT numValuesPerCentroid = (UINT)floor(double(trajectoryLength/numCentroids)); for(UINT n=0; n<numInputDimensions; n++){ dataBufferIndex = 0; for(UINT i=0; i<numCentroids; i++){ for(UINT j=0; j<numValuesPerCentroid; j++){ centroids[i][n] += trajectoryDataBuffer[dataBufferIndex++][n]; } centroids[i][n] /= double(numValuesPerCentroid); } } //Copmute the features UINT featureIndex = 0; vector< MinMax > centroidNormValues(numInputDimensions); VectorDouble histSumValues; vector< vector< AngleMagnitude > > angleMagnitudeValues; switch( featureMode ){ case CENTROID_VALUE: //Simply set the feature vector as the list of centroids for(UINT n=0; n<numInputDimensions; n++){ for(UINT i=0; i<numCentroids; i++){ featureVector[ featureIndex++ ] = centroids[i][n]; } } break; case NORMALIZED_CENTROID_VALUE: for(UINT n=0; n<numInputDimensions; n++){ //Find the min and max values for(UINT i=0; i<numCentroids; i++){ centroidNormValues[n].updateMinMax( centroids[i][n] ); } //Use the normalized centroids as the features for(UINT i=0; i<numCentroids; i++){ if( centroidNormValues[n].maxValue != centroidNormValues[n].minValue ){ featureVector[ featureIndex++ ] = Util::scale(centroids[i][n],centroidNormValues[n].minValue,centroidNormValues[n].maxValue,0,1); }else featureVector[ featureIndex++ ] = 0; } //Add the start and end centroid values if needed if( useTrajStartAndEndValues ){ featureVector[ featureIndex++ ] = centroids[0][n]; featureVector[ featureIndex++ ] = centroids[numCentroids-1][n]; } } break; case CENTROID_DERIVATIVE: for(UINT n=0; n<numInputDimensions; n++){ //Compute the derivative between centroid i and centroid i+1 for(UINT i=0; i<numCentroids-1; i++){ featureVector[ featureIndex++ ] = centroids[i+1][n]-centroids[i][n]; } //Add the start and end centroid values if needed if( useTrajStartAndEndValues ){ featureVector[ featureIndex++ ] = centroids[0][n]; featureVector[ featureIndex++ ] = centroids[numCentroids-1][n]; } } break; case CENTROID_ANGLE_2D: histSumValues.resize( numInputDimensions/2, 0); angleMagnitudeValues.resize( numInputDimensions/2 ); //Zero the feature vector fill(featureVector.begin(),featureVector.end(),0); //Compute the angle and magnitude betweem each of the centroids, do this for each pair of points for(UINT n=0; n<numInputDimensions/2; n++){ //Resize the nth buffer to hold the values for each centroid angleMagnitudeValues[n].resize(numCentroids-1); for(UINT i=0; i<numCentroids-1; i++){ Util::cartToPolar(centroids[i+1][n*2]-centroids[i][n*2], centroids[i+1][n*2+1]-centroids[i][n*2+1], angleMagnitudeValues[n][i].magnitude, angleMagnitudeValues[n][i].angle); } //Add the angles to the histogram for(UINT i=0; i<numCentroids-1; i++){ UINT histBin = 0; double degreesPerBin = 360.0/numHistogramBins; double binStartValue = 0; double binEndValue = degreesPerBin; if( angleMagnitudeValues[n][i].angle < 0 || angleMagnitudeValues[n][i].angle > 360.0 ){ warningLog << "The angle of a point is not between [0 360]. Angle: " << angleMagnitudeValues[n][i].angle << endl; return vector<double>(); } //Find which hist bin the current angle is in while( true ){ if( angleMagnitudeValues[n][i].angle >= binStartValue && angleMagnitudeValues[n][i].angle < binEndValue ){ break; } histBin++; binStartValue += degreesPerBin; binEndValue += degreesPerBin; } histSumValues[ n ] += useWeightedMagnitudeValues ? angleMagnitudeValues[n][i].magnitude : 1; featureVector[ n*numHistogramBins + histBin ] += useWeightedMagnitudeValues ? angleMagnitudeValues[n][i].magnitude : 1; } //Normalize the hist bins for(UINT n=0; n<numInputDimensions/2; n++){ if( histSumValues[ n ] > 0 ){ for(UINT i=0; i<numHistogramBins; i++){ featureVector[ n*numHistogramBins + i ] /= histSumValues[ n ]; } } } } break; default: errorLog << "update(vector< double > x)- Unknown featureMode!" << endl; return featureVector; break; } return featureVector; }