bool MatrixFloat::subtract(const MatrixFloat &a,const MatrixFloat &b){ const unsigned int M = a.getNumRows(); const unsigned int N = a.getNumCols(); if( M != b.getNumRows() ){ errorLog << "subtract(const MatrixFloat &a,const MatrixFloat &b) - Failed to add matrix! The rows do not match!"; errorLog << " a rows: " << M << " b rows: " << b.getNumRows() << std::endl; return false; } if( N != b.getNumCols() ){ errorLog << "subtract(const MatrixFloat &a,const MatrixFloat &b) - Failed to add matrix! The columns do not match!"; errorLog << " a cols: " << N << " b cols: " << b.getNumCols() << std::endl; return false; } resize( M, N ); UINT i,j; //Using direct pointers really helps speed up the computation time Float **pa = a.getDataPointer(); Float **pb = b.getDataPointer(); for(i=0; i<M; i++){ for(j=0; j<N; j++){ dataPtr[i*cols+j] = pa[i][j] - pb[i][j]; } } return true; }
bool MatrixFloat::multiple(const MatrixFloat &a,const MatrixFloat &b,const bool aTranspose){ const unsigned int M = !aTranspose ? a.getNumRows() : a.getNumCols(); const unsigned int N = !aTranspose ? a.getNumCols() : a.getNumRows(); const unsigned int K = b.getNumRows(); const unsigned int L = b.getNumCols(); if( N != K ) { errorLog << "multiple(const MatrixFloat &a,const MatrixFloat &b,const bool aTranspose) - The number of rows in a (" << K << ") does not match the number of columns in matrix b (" << N << ")" << std::endl; return false; } if( !resize( M, L ) ){ errorLog << "multiple(const MatrixFloat &b,const MatrixFloat &c,const bool bTranspose) - Failed to resize matrix!" << std::endl; return false; } unsigned int i, j, k = 0; //Using direct pointers really helps speed up the computation time Float **pa = a.getDataPointer(); Float **pb = b.getDataPointer(); if( aTranspose ){ for(j=0; j<L; j++){ for(i=0; i<M; i++){ dataPtr[i*cols+j] = 0; for(k=0; k<K; k++){ dataPtr[i*cols+j] += pa[k][i] * pb[k][j]; } } } }else{ for(j=0; j<L; j++){ for(i=0; i<M; i++){ dataPtr[i*cols+j] = 0; for(k=0; k<K; k++){ dataPtr[i*cols+j] += pa[i][k] * pb[k][j]; } } } } return true; }
MatrixFloat MatrixFloat::multiple(const MatrixFloat &b) const{ const unsigned int M = rows; const unsigned int N = cols; const unsigned int K = b.getNumRows(); const unsigned int L = b.getNumCols(); if( N != K ) { errorLog << "multiple(MatrixFloat b) - The number of rows in b (" << K << ") does not match the number of columns in this matrix (" << N << ")" << std::endl; return MatrixFloat(); } MatrixFloat c(M,L); Float **pb = b.getDataPointer(); Float **pc = c.getDataPointer(); unsigned int i,j,k = 0; for(i=0; i<M; i++){ for(j=0; j<L; j++){ pc[i][j] = 0; for(k=0; k<K; k++){ pc[i][j] += dataPtr[i*cols+k] * pb[k][j]; } } } return c; }
bool BernoulliRBM::train_(MatrixFloat &data){ const UINT numTrainingSamples = data.getNumRows(); numInputDimensions = data.getNumCols(); numOutputDimensions = numHiddenUnits; numVisibleUnits = numInputDimensions; trainingLog << "NumInputDimensions: " << numInputDimensions << std::endl; trainingLog << "NumOutputDimensions: " << numOutputDimensions << std::endl; if( randomizeWeightsForTraining ){ //Init the weights matrix weightsMatrix.resize(numHiddenUnits, numVisibleUnits); Float a = 1.0 / numVisibleUnits; for(UINT i=0; i<numHiddenUnits; i++) { for(UINT j=0; j<numVisibleUnits; j++) { weightsMatrix[i][j] = rand.getRandomNumberUniform(-a, a); } } //Init the bias units visibleLayerBias.resize( numVisibleUnits ); hiddenLayerBias.resize( numHiddenUnits ); std::fill(visibleLayerBias.begin(),visibleLayerBias.end(),0); std::fill(hiddenLayerBias.begin(),hiddenLayerBias.end(),0); }else{ if( weightsMatrix.getNumRows() != numHiddenUnits ){ errorLog << "train_(MatrixFloat &data) - Weights matrix row size does not match the number of hidden units!" << std::endl; return false; } if( weightsMatrix.getNumCols() != numVisibleUnits ){ errorLog << "train_(MatrixFloat &data) - Weights matrix row size does not match the number of visible units!" << std::endl; return false; } if( visibleLayerBias.size() != numVisibleUnits ){ errorLog << "train_(MatrixFloat &data) - Visible layer bias size does not match the number of visible units!" << std::endl; return false; } if( hiddenLayerBias.size() != numHiddenUnits ){ errorLog << "train_(MatrixFloat &data) - Hidden layer bias size does not match the number of hidden units!" << std::endl; return false; } } //Flag the model has been trained encase the user wants to save the model during a training iteration using an observer trained = true; //Make sure the data is scaled between [0 1] ranges = data.getRanges(); if( useScaling ){ for(UINT i=0; i<numTrainingSamples; i++){ for(UINT j=0; j<numInputDimensions; j++){ data[i][j] = grt_scale(data[i][j], ranges[j].minValue, ranges[j].maxValue, 0.0, 1.0); } } } const UINT numBatches = static_cast<UINT>( ceil( Float(numTrainingSamples)/batchSize ) ); //Setup the batch indexs Vector< BatchIndexs > batchIndexs( numBatches ); UINT startIndex = 0; for(UINT i=0; i<numBatches; i++){ batchIndexs[i].startIndex = startIndex; batchIndexs[i].endIndex = startIndex + batchSize; //Make sure the last batch end index is not larger than the number of training examples if( batchIndexs[i].endIndex >= numTrainingSamples ){ batchIndexs[i].endIndex = numTrainingSamples; } //Get the batch size batchIndexs[i].batchSize = batchIndexs[i].endIndex - batchIndexs[i].startIndex; //Set the start index for the next batch startIndex = batchIndexs[i].endIndex; } Timer timer; UINT i,j,n,epoch,noChangeCounter = 0; Float startTime = 0; Float alpha = learningRate; Float error = 0; Float err = 0; Float delta = 0; Float lastError = 0; Vector< UINT > indexList(numTrainingSamples); TrainingResult trainingResult; MatrixFloat wT( numVisibleUnits, numHiddenUnits ); //Stores a transposed copy of the weights vector MatrixFloat vW( numHiddenUnits, numVisibleUnits ); //Stores the weight velocity updates MatrixFloat tmpW( numHiddenUnits, numVisibleUnits ); //Stores the weight values that will be used to update the main weights matrix at each batch update MatrixFloat v1( batchSize, numVisibleUnits ); //Stores the real batch data during a batch update MatrixFloat v2( batchSize, numVisibleUnits ); //Stores the sampled batch data during a batch update MatrixFloat h1( batchSize, numHiddenUnits ); //Stores the hidden states given v1 and the current weightsMatrix MatrixFloat h2( batchSize, numHiddenUnits ); //Stores the sampled hidden states given v2 and the current weightsMatrix MatrixFloat c1( numHiddenUnits, numVisibleUnits ); //Stores h1' * v1 MatrixFloat c2( numHiddenUnits, numVisibleUnits ); //Stores h2' * v2 MatrixFloat vDiff( batchSize, numVisibleUnits ); //Stores the difference between v1-v2 MatrixFloat hDiff( batchSize, numVisibleUnits ); //Stores the difference between h1-h2 MatrixFloat cDiff( numHiddenUnits, numVisibleUnits ); //Stores the difference between c1-c2 VectorFloat vDiffSum( numVisibleUnits ); //Stores the column sum of vDiff VectorFloat hDiffSum( numHiddenUnits ); //Stores the column sum of hDiff VectorFloat visibleLayerBiasVelocity( numVisibleUnits ); //Stores the velocity update of the visibleLayerBias VectorFloat hiddenLayerBiasVelocity( numHiddenUnits ); //Stores the velocity update of the hiddenLayerBias //Set all the velocity weights to zero vW.setAllValues( 0 ); std::fill(visibleLayerBiasVelocity.begin(),visibleLayerBiasVelocity.end(),0); std::fill(hiddenLayerBiasVelocity.begin(),hiddenLayerBiasVelocity.end(),0); //Randomize the order that the training samples will be used in for(UINT i=0; i<numTrainingSamples; i++) indexList[i] = i; if( randomiseTrainingOrder ){ std::random_shuffle(indexList.begin(), indexList.end()); } //Start the main training loop timer.start(); for(epoch=0; epoch<maxNumEpochs; epoch++) { startTime = timer.getMilliSeconds(); error = 0; //Randomize the batch order std::random_shuffle(batchIndexs.begin(),batchIndexs.end()); //Run each of the batch updates for(UINT k=0; k<numBatches; k+=batchStepSize){ //Resize the data matrices, the matrices will only be resized if the rows cols are different v1.resize( batchIndexs[k].batchSize, numVisibleUnits ); h1.resize( batchIndexs[k].batchSize, numHiddenUnits ); v2.resize( batchIndexs[k].batchSize, numVisibleUnits ); h2.resize( batchIndexs[k].batchSize, numHiddenUnits ); //Setup the data pointers, using data pointers saves a few ms on large matrix updates Float **w_p = weightsMatrix.getDataPointer(); Float **wT_p = wT.getDataPointer(); Float **vW_p = vW.getDataPointer(); Float **data_p = data.getDataPointer(); Float **v1_p = v1.getDataPointer(); Float **v2_p = v2.getDataPointer(); Float **h1_p = h1.getDataPointer(); Float **h2_p = h2.getDataPointer(); Float *vlb_p = &visibleLayerBias[0]; Float *hlb_p = &hiddenLayerBias[0]; //Get the batch data UINT index = 0; for(i=batchIndexs[k].startIndex; i<batchIndexs[k].endIndex; i++){ for(j=0; j<numVisibleUnits; j++){ v1_p[index][j] = data_p[ indexList[i] ][j]; } index++; } //Copy a transposed version of the weights matrix, this is used to compute h1 and h2 for(i=0; i<numHiddenUnits; i++) for(j=0; j<numVisibleUnits; j++) wT_p[j][i] = w_p[i][j]; //Compute h1 h1.multiple(v1, wT); for(n=0; n<batchIndexs[k].batchSize; n++){ for(i=0; i<numHiddenUnits; i++){ h1_p[n][i] = sigmoidRandom( h1_p[n][i] + hlb_p[i] ); } } //Compute v2 v2.multiple(h1, weightsMatrix); for(n=0; n<batchIndexs[k].batchSize; n++){ for(i=0; i<numVisibleUnits; i++){ v2_p[n][i] = sigmoidRandom( v2_p[n][i] + vlb_p[i] ); } } //Compute h2 h2.multiple(v2,wT); for(n=0; n<batchIndexs[k].batchSize; n++){ for(i=0; i<numHiddenUnits; i++){ h2_p[n][i] = grt_sigmoid( h2_p[n][i] + hlb_p[i] ); } } //Compute c1, c2 and the difference between v1-v2 c1.multiple(h1,v1,true); c2.multiple(h2,v2,true); vDiff.subtract(v1, v2); //Compute the sum of vdiff for(j=0; j<numVisibleUnits; j++){ vDiffSum[j] = 0; for(i=0; i<batchIndexs[k].batchSize; i++){ vDiffSum[j] += vDiff[i][j]; } } //Compute the difference between h1 and h2 hDiff.subtract(h1, h2); for(j=0; j<numHiddenUnits; j++){ hDiffSum[j] = 0; for(i=0; i<batchIndexs[k].batchSize; i++){ hDiffSum[j] += hDiff[i][j]; } } //Compute the difference between c1 and c2 cDiff.subtract(c1,c2); //Update the weight velocities for(i=0; i<numHiddenUnits; i++){ for(j=0; j<numVisibleUnits; j++){ vW_p[i][j] = ((momentum * vW_p[i][j]) + (alpha * cDiff[i][j])) / batchIndexs[k].batchSize; } } for(i=0; i<numVisibleUnits; i++){ visibleLayerBiasVelocity[i] = ((momentum * visibleLayerBiasVelocity[i]) + (alpha * vDiffSum[i])) / batchIndexs[k].batchSize; } for(i=0; i<numHiddenUnits; i++){ hiddenLayerBiasVelocity[i] = ((momentum * hiddenLayerBiasVelocity[i]) + (alpha * hDiffSum[i])) / batchIndexs[k].batchSize; } //Update the weights weightsMatrix.add( vW ); //Update the bias for the visible layer for(i=0; i<numVisibleUnits; i++){ visibleLayerBias[i] += visibleLayerBiasVelocity[i]; } //Update the bias for the visible layer for(i=0; i<numHiddenUnits; i++){ hiddenLayerBias[i] += hiddenLayerBiasVelocity[i]; } //Compute the reconstruction error err = 0; for(i=0; i<batchIndexs[k].batchSize; i++){ for(j=0; j<numVisibleUnits; j++){ err += SQR( v1[i][j] - v2[i][j] ); } } error += err / batchIndexs[k].batchSize; } error /= numBatches; delta = lastError - error; lastError = error; trainingLog << "Epoch: " << epoch+1 << "/" << maxNumEpochs; trainingLog << " Epoch time: " << (timer.getMilliSeconds()-startTime)/1000.0 << " seconds"; trainingLog << " Learning rate: " << alpha; trainingLog << " Momentum: " << momentum; trainingLog << " Average reconstruction error: " << error; trainingLog << " Delta: " << delta << std::endl; //Update the learning rate alpha *= learningRateUpdate; trainingResult.setClassificationResult(epoch, error, this); trainingResults.push_back(trainingResult); trainingResultsObserverManager.notifyObservers( trainingResult ); //Check for convergance if( fabs(delta) < minChange ){ if( ++noChangeCounter >= minNumEpochs ){ trainingLog << "Stopping training. MinChange limit reached!" << std::endl; break; } }else noChangeCounter = 0; } trainingLog << "Training complete after " << epoch << " epochs. Total training time: " << timer.getMilliSeconds()/1000.0 << " seconds" << std::endl; trained = true; return true; }