示例#1
0
void mexFunctionTest(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
{
    if (nrhs != 2)
    {
        mexPrintf("Usage: [score] = LDARegTreePredict( model, feats )\n");
        mexPrintf("\tfeats must be of type SINGLE\n");
        mexErrMsgTxt("Incorrect input format.\n");
    }

    if (nlhs != 1)
        mexErrMsgTxt("Two output args expected");

    #define mFeats (prhs[1])
    #define mModel (prhs[0])

    MatlabInputMatrix<FeatsType> pFeats( mFeats, 0, 0, "feats" );

    RegTreeType tree;
    tree.loadFromMatlab( mModel );

    // for now just copy the values
    Eigen::Map< const gFeatArrayType >	feats( pFeats.data(), pFeats.rows(), pFeats.cols() );

    gWeightsArrayType pred( pFeats.rows() );

    tree.predict(   MatrixSampleIndexListType(feats),
                    MatrixFeatureValueObjectType(feats),
                    pred );

    MatlabOutputMatrix<double>   outMatrix( &plhs[0], feats.rows(), 1 );
    for (unsigned i=0; i < feats.rows(); i++)
        outMatrix.data()[i] = pred.coeff(i);
}
示例#2
0
void mexFunctionTest(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
{
    if (nrhs < 2 || nrhs > 3)
    {
        mexPrintf("Usage: [score] = SQBTreesPredict( model, feats, [maxIters] )\n");
        mexPrintf("\tfeats must be of type SINGLE\n");
        mexPrintf("\tmaxIters is optional and limits the amount of weak learners evaluated for classification.");
        mexErrMsgTxt("Incorrect input format.\n");
    }

    if (nlhs != 1)
        mexErrMsgTxt("Two output args expected");

#define mFeats (prhs[1])
#define mModel (prhs[0])
#define mMaxIters (prhs[2])

    MatlabInputMatrix<FeatsType> pFeats( mFeats, 0, 0, "feats" );

    //const unsigned maxIters = pMaxIters.data()[0];

    TreeBoosterType TB;

    // load model
    TB.loadFromMatlab( mModel );

    unsigned maxIters = TB.numWeakLearners();
    if (nrhs >= 3)
    {
        MatlabInputMatrix<unsigned int> pMaxIters( mMaxIters, 1, 1, "maxiters" );
        unsigned inputMaxIters = pMaxIters.data()[0];

        if (inputMaxIters <= 0)
            mexErrMsgTxt("maxIters must be higher than zero.");

        if (inputMaxIters > maxIters)
            mexPrintf("-- WARNING: maxIters is greater than the number of weaklearners used!\n");
        else
        {
            maxIters = inputMaxIters;
            mexPrintf("Limiting number of weak learners to %d\n", (int)maxIters);
        }
    }

    // for now just copy the values
    gFeatArrayType feats = Eigen::Map< const gFeatArrayType >( pFeats.data(), pFeats.rows(), pFeats.cols() );

    TreeBoosterType::ResponseArrayType newScores;
    TB.predict( TreeBoosterType::SampleListType(feats),
                TreeBoosterType::FeatureValueObjectType(feats),
                newScores,
                maxIters );

    MatlabOutputMatrix<double>   outMatrix( &plhs[0], feats.rows(), 1 );
    for (unsigned i=0; i < feats.rows(); i++)
        outMatrix.data()[i] = newScores.coeff(i);
}
示例#3
0
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
{
    if (nrhs != 4)
    {
        mexPrintf("Usage: alpha = LineSearch(prevScores, newScores, labels, lossType)\n");
        mexErrMsgTxt("Incorrect input format\n");
    }

#define mPrevScores (prhs[0])
#define mNewScores (prhs[1])
#define mLabels (prhs[2])
#define mLossType (prhs[3])

    if (nlhs != 1)
        mexErrMsgTxt("One output arg expected");

    char lossName[40];
    if ( mxGetString(mLossType, lossName, sizeof(lossName)) != 0 )
        mexErrMsgTxt("Error reading options.loss");

    SQB::LossType sqbLoss = SQB::ExpLoss;

    if (strcmp(lossName, "exploss") == 0)
        sqbLoss = SQB::ExpLoss;
    else if ( strcmp(lossName, "logloss") == 0 )
        sqbLoss = SQB::LogLoss;
    else if ( strcmp(lossName, "squaredloss") == 0 )
        sqbLoss = SQB::SquaredLoss;
    else
        mexErrMsgTxt("options.loss contains an invalid value");


    MatlabInputMatrix<WeightsType> pPrev( mPrevScores, 0, 1, "prevScores" );
    MatlabInputMatrix<WeightsType> pNew( mNewScores, pPrev.rows(), 1, "newScores" );
    MatlabInputMatrix<WeightsType> pLabels( mLabels, pPrev.rows(), 1, "labels" );

    // create mappings
    ArrayMapType prevMap( pPrev.data(), pPrev.rows(), pPrev.cols() );
    ArrayMapType newMap( pNew.data(), pNew.rows(), pNew.cols() );
    ArrayMapType labelsMap( pLabels.data(), pLabels.rows(), pLabels.cols() );



    SQB::LineSearch< ArrayType, ArrayMapType >  LS( prevMap, newMap, labelsMap, sqbLoss );

    WeightsType alpha = LS.run();

    MatlabOutputMatrix<WeightsType>   outMatrix( &plhs[0], 1, 1 );
    outMatrix.data()[0] = alpha;
}
示例#4
0
int Alpha(TPoint *ray){
	/* 0. Subinitialization - clear auxiliary variables and empty and nonsignificant input axes */
	properties.resize(d); for (unsigned int i = 0; i < d; i++){properties[i] = i;} // initialize properties: all available
	features.clear();

	outMatrix(x);

	/* 1. Null-cycle */
	if (numStartFeatures == 2){ // start with two features?
		Feature optFeatureX;
		Feature optFeatureY;
		for (unsigned int i = 0; i < properties.size() - 1; i++){
			for (unsigned int j = i + 1; j < properties.size(); j++){
				/* Calculating minimal error on the plane of the i-th and the j-th properties */
				Feature tmpFeature;
				curFeature = x[properties[i]];
				unsigned int error = DGetMinError(properties[j], &tmpFeature);
#ifdef DEF_OUT_ALPHA
				if (OUT_ALPHA){
					Rcout << properties[i] << ", " << properties[j] << ", " << tmpFeature.angle << ", " << error << ", " << endl;
				}
#endif
				if (error < optFeatureY.error){optFeatureX.number = properties[i]; optFeatureY = tmpFeature;}
			}
		}
		features.push_back(optFeatureX);
		features.push_back(optFeatureY);
		for (unsigned int i = 0; i < properties.size(); i++){ // delete recently found X and Y properties
			if (properties[i] == optFeatureX.number){properties.erase(properties.begin() + i);}
			if (properties[i] == optFeatureY.number){properties.erase(properties.begin() + i);}
		}
		curFeature = x[features[0].number];
		UpdateCurFeature();
		outString("Feature 1:");
		outVector(curFeature);
	}

	/* 2. Main cycle */
	/* Searching an optimal feature space while empirical error rate decreases */	
	while(features[features.size() - 1].error > 0 && properties.size() > 0){
		Feature optFeature;
		for (unsigned int i = 0; i < properties.size(); i++){
			/* Calculating minimal error on the plane of the curFeature and the j-th properties */
			Feature tmpFeature;
			unsigned int error = DGetMinError(properties[i], &tmpFeature);
#ifdef DEF_OUT_ALPHA
			if (OUT_ALPHA){
				Rcout << properties[i] << ", " << tmpFeature.angle << ", " << error << ", " << endl;
			}
#endif
			if (error < optFeature.error){optFeature = tmpFeature;}
		}		
		if (optFeature.error < features[features.size() - 1].error){
			features.push_back(optFeature);
			for (unsigned int i = 0; i < properties.size(); i++){ // delete recently found property
				if (properties[i] == optFeature.number){properties.erase(properties.begin() + i);}
			}
			UpdateCurFeature();
			outString("Feature :");
			outVector(curFeature);
		}else{break;}
	}
	
	outString("Features:");
	outFeatures(features);

	/* Restoring the projection vector */
	GetRay(ray);
	return features[features.size() - 1].error;
}