bool SVM::train_(ClassificationData &trainingData){
    
    //Clear any previous model
    clear();
    
    if( trainingData.getNumSamples() == 0 ){
        errorLog << "train_(ClassificationData &trainingData) - Training data has zero samples!" << endl;
        return false;
    }
    
    //Convert the labelled classification data into the LIBSVM data format
    if( !convertClassificationDataToLIBSVMFormat(trainingData) ){
        errorLog << "train_(ClassificationData &trainingData) - Failed To Convert Labelled Classification Data To LIBSVM Format!" << endl;
        return false;
    }
    
    if( useAutoGamma ) param.gamma = 1.0/numInputDimensions;
    
	//Train the model
	bool trainingResult = trainSVM();
    
	if(! trainingResult ){
        errorLog << "train_(ClassificationData &trainingData) - Failed To Train SVM Model!" << endl;
		return false;
	}
    
    return true;
}
void mySVM::trainTestSVM(const string& fileName)
{
	printf("training SVM, please wait");
	trainSVM(fileName);
	printf("\r");
	showSVMInfo();
	testSVM(fileName);
}
Ejemplo n.º 3
0
void Objectness::trainStageI()
{
    vecM pX, nX;
    pX.reserve(200000), nX.reserve(200000);
    Mat xP1f, xN1f;
    CV_Assert(matRead(_modelName + ".xP", xP1f) && matRead(_modelName + ".xN", xN1f));
    for (int r = 0; r < xP1f.rows; r++)
        pX.push_back(xP1f.row(r));
    for (int r = 0; r < xN1f.rows; r++)
        nX.push_back(xN1f.row(r));
    Mat crntW = trainSVM(pX, nX, L1R_L2LOSS_SVC, 10, 1);
    crntW = crntW.colRange(0, crntW.cols - 1).reshape(1, _W);
    CV_Assert(crntW.size() == Size(_W, _W));
    matWrite(_modelName + ".wS1", crntW);
}
Ejemplo n.º 4
0
int main() {
	cout << "load from file.."<<endl;
	map<string,Mat> classes_training_data;
	FileStorage fs("training_samples.yml",FileStorage::READ);
	vector<string> classes_names;
	fs["classes"] >> classes_names;
	for (vector<string>::iterator it = classes_names.begin(); it != classes_names.end(); ++it) {
		fs[(*it)] >> classes_training_data[*it];
	}
	
	cout << "train SVM.." <<endl;
	string file_postfix = "with_colors";
	Mat& one_class = (*(classes_training_data.begin())).second;
	trainSVM(classes_training_data, file_postfix, one_class.cols, one_class.type());
}
Ejemplo n.º 5
0
// pX1f, nX1f are positive and negative training samples, each is a row vector
Mat Objectness::trainSVM(const vector<Mat> &pX1f, const vector<Mat> &nX1f, int sT, double C, double bias, double eps, int maxTrainNum)
{
    vecI ind(nX1f.size());
    for (size_t i = 0; i < ind.size(); i++)
        ind[i] = i;
    int numP = pX1f.size(), feaDim = pX1f[0].cols;
    int totalSample = numP + nX1f.size();
    if (totalSample > maxTrainNum)
        random_shuffle(ind.begin(), ind.end());
    totalSample = min(totalSample, maxTrainNum);
    Mat X1f(totalSample, feaDim, CV_32F);
    vecI Y(totalSample);
    for(int i = 0; i < numP; i++) {
        pX1f[i].copyTo(X1f.row(i));
        Y[i] = 1;
    }
    for (int i = numP; i < totalSample; i++) {
        nX1f[ind[i - numP]].copyTo(X1f.row(i));
        Y[i] = -1;
    }
    return trainSVM(X1f, Y, sT, C, bias, eps);
}
Ejemplo n.º 6
0
void Objectness::trainStateII(int numPerSz)
{
    loadTrainedModel();
    const int NUM_TRAIN = _voc.trainNum;
    vector<vecI> SZ(NUM_TRAIN), Y(NUM_TRAIN);
    vector<vecF> VAL(NUM_TRAIN);

    #pragma omp parallel for
    for (int i = 0; i < _voc.trainNum; i++)	{
        const vector<Vec4i> &bbgts = _voc.gtTrainBoxes[i];
        ValStructVec<float, Vec4i> valBoxes;
        vecI &sz = SZ[i], &y = Y[i];
        vecF &val = VAL[i];
        CStr imgPath = format(_S(_voc.imgPathW), _S(_voc.trainSet[i]));
        predictBBoxSI(imread(imgPath), valBoxes, sz, numPerSz, false);
        const int num = valBoxes.size();
        CV_Assert(sz.size() == num);
        y.resize(num), val.resize(num);
        for (int j = 0; j < num; j++) {
            Vec4i bb = valBoxes[j];
            val[j] = valBoxes(j);
            y[j] = maxIntUnion(bb, bbgts) >= 0.5 ? 1 : -1;
        }
    }

    const int NUM_SZ = _svmSzIdxs.size();
    const int maxTrainNum = 100000;
    vector<vecM> rXP(NUM_SZ), rXN(NUM_SZ);
    for (int r = 0; r < NUM_SZ; r++) {
        rXP[r].reserve(maxTrainNum);
        rXN[r].reserve(1000000);
    }
    for (int i = 0; i < NUM_TRAIN; i++) {
        const vecI &sz = SZ[i], &y = Y[i];
        vecF &val = VAL[i];
        int num = sz.size();
        for (int j = 0; j < num; j++) {
            int r = sz[j];
            CV_Assert(r >= 0 && r < NUM_SZ);
            if (y[j] == 1)
                rXP[r].push_back(Mat(1, 1, CV_32F, &val[j]));
            else
                rXN[r].push_back(Mat(1, 1, CV_32F, &val[j]));
        }
    }

    Mat wMat(NUM_SZ, 2, CV_32F);
    for (int i = 0; i < NUM_SZ; i++) {
        const vecM &xP = rXP[i], &xN = rXN[i];
        if (xP.size() < 10 || xN.size() < 10)
            printf("Warning %s:%d not enough training sample for r[%d] = %d. P = %d, N = %d\n", __FILE__, __LINE__, i, _svmSzIdxs[i], xP.size(), xN.size());
        for (size_t k = 0; k < xP.size(); k++)
            CV_Assert(xP[k].size() == Size(1, 1) && xP[k].type() == CV_32F);

        Mat wr = trainSVM(xP, xN, L1R_L2LOSS_SVC, 100, 1);
        CV_Assert(wr.size() == Size(2, 1));
        wr.copyTo(wMat.row(i));
    }
    matWrite(_modelName + ".wS2", wMat);
    _svmReW1f = wMat;
}