Example #1
0
static void softmaxGradientForWholeBatch(
    DataSet training_set,
    FeatureType* gradient) {

    memset(gradient, 0, training_set.num_features * sizeof(FeatureType) * LABEL_CLASS);

    // computes softmax function for each data point in the training set
    std::vector<float> probabilities_of_each;
    size_t idx = 0;
    for (size_t i = 0; i < training_set.num_data_points; i++) {

        idx = i * training_set.num_features;
        //http://stackoverflow.com/questions/3177241/what-is-the-best-way-to-concatenate-two-vectors
        // concatenate-two-vectors to make probability vector 
        probabilities_of_each+=softmaxFunctionFloat(
                                                training_set.parameter_vector,
                                                &training_set.data_points[idx],
                                                training_set.num_features);
    }
    //change a vector to an array
    //http://stackoverflow.com/questions/2923272/how-to-convert-vector-to-array-c
    float* probabilities_array = &probabilities_of_each[0];
    //initialize to 0 by c++ 
    std::vector<float> groundTruth(LABEL_CLASS * training_set.num_data_points);
    //establish groundtruth function to see softmaxExercise at UFLDL Tutorial 
    for (size_t i = 0; i < training_set.num_data_points; i++) {
        int idx1 = training_set.labels[i];
        groundTruth[idx1 + i * LABEL_CLASS]=1.0f;
    }



    //change a vector to an array
    float* groundTruth_array = &groundTruth[0];
    //computing 1{P(y=k|x,theta)-1{y=k}}
    addVectors(probabilities_array,
               groundTruth_array,
               training_set.num_data_points * LABEL_CLASS,
               -1);


    float factor = 1.0f / training_set.num_data_points;
    //finish computing x*sigma (i=1..k){P(y=k|x,theta)-1{y=k}}
    matrixMatrixMultiply(probabilities_array,
                         training_set.data_points,
                         factor,
                         training_set.num_data_points,
                         training_set.num_features,
                         gradient);

}
char checkLU(){
	/*
	 Tests LU factorization.

	 Parameters:
	 int n     Matrix dimension.
	 double tolMax    Tolerance for max difference.

	 Output:
	 char pass (returned) Whether all norms are under supplied tolerances.
	 */

	int j;
	int n = 100 ;
	double tolMax = 1e-10 ;
	char pass = 1;

	double cond[3] = {10.0, 1.0 / sqrt(1e-14), 1.0 / 1e-14} ;
	int numConds = 3;
	int type = 4;
	
	double startTime, endTime; 

	matrix A = allocAndInitRandomizedMatrix(n,n) ;

	matrix L = allocAndInitZeroMatrix(n, n) ;
	matrix U = allocAndInitZeroMatrix(n, n) ;

	matrix product = allocAndInitZeroMatrix(n,n) ;

	for(j=0; j<numConds; j++){

		printf("Tests for cond = %e :\n\n", cond[j]) ;

		for(type = 0; type < 5; type++){

			getCondNumberMatrix(&A, cond[j], type) ;
			// perform the matrix factorization

			startTime = read_timer(); 
			LUFactorize(A, &L, &U) ;
			endTime = read_timer(); 
			
			printf("Elapsed time = %f seconds.\n", endTime - startTime); 

			matrixMatrixMultiply(L, U, &product) ;

			char output = 0;
			if(output){
				printf("Input matrix:\n");
				printMatrix(A);
				printf("U = \n") ;
				printMatrix(U);
				printf("L = \n") ;
				printMatrix(L);
				printMatrix(product) ;
			}


			double maxDiff = maxDiffMatrix(product, A) ;

			printf("maxDiff = %e\n", maxDiff) ;

			if(maxDiff < tolMax){
				printf("LU factorization test passed on matrix of type %d.\n\n", type) ;
			}
			else{
				printf("LU factorization test failed on matrix of type %d.\n\n", type) ;
				pass = 0;
			}
		}
	}

	if(pass)
		printf("LU factorization test passed.\n\n") ;
	else
		printf("LU factorization test failed.\n\n") ;


	// free resources
	freeMatrix(A) ;
	freeMatrix(L) ;
	freeMatrix(U) ;
	freeMatrix(product) ;

	return pass;
}