/* -------------------------------------------------------------- Finds the second Lagrange multiplayer to be optimize. -------------------------------------------------------------- */ long examineExample( long i1 ) { double y1, alpha1, E1, r1; double tmax; double E2, temp; long k, i2; long k0; y1 = target[i1]; alpha1 = alpha[i1]; if( alpha1 > 0 && alpha1 < C(i1) ) E1 = error_cache[i1]; else E1 = learned_func(i1) - y1; r1 = y1 * E1; if(( r1 < -tolerance && alpha1 < C(i1) ) || (r1 > tolerance && alpha1 > 0)) { /* Try i2 by three ways; if successful, then immediately return 1; */ for( i2 = (-1), tmax = 0, k = 0; k < N; k++ ) { if( alpha[k] > 0 && alpha[k] < C(k) ) { E2 = error_cache[k]; temp = fabs(E1 - E2); if( temp > tmax ) { tmax = temp; i2 = k; } } } if( i2 >= 0 ) { if( takeStep(i1,i2) ) return( 1 ); } #ifdef RANDOM for( k0 = rand(), k = k0; k < N + k0; k++ ) { i2 = k % N; #else for( k = 0; k < N; k++) { i2 = k; #endif if( alpha[i2] > 0 && alpha[i2] < C(i2) ) { if( takeStep(i1,i2) ) return( 1 ); } } #ifdef RANDOM for( k0 = rand(), k = k0; k < N + k0; k++ ) { i2 = k % N; #else for( k = 0; k < N; k++) { i2 = k; #endif if( takeStep(i1,i2) ) return( 1 ); } } /* if( ... ) */ return( 0 ); } /* -------------------------------------------------------------- Main SMO optimization cycle. -------------------------------------------------------------- */ void runSMO( void ) { long numChanged = 0; long examineAll = 1; long k; while( numChanged > 0 || examineAll ) { numChanged = 0; if( examineAll ) { for( k = 0; k < N; k++ ) { numChanged += examineExample( k ); } } else { for( k = 0; k < N; k++ ) { if( alpha[k] != 0 && alpha[k] != C(k) ) numChanged += examineExample( k ); } } if( examineAll == 1 ) examineAll = 0; else if( numChanged == 0 ) examineAll = 1; } } /* ============================================================== Main MEX function - interface to Matlab. ============================================================== */ void mexFunction( int nlhs, mxArray *plhs[], int nrhs, const mxArray*prhs[] ) { long i,j ; double *labels12, *initAlpha, *nsv, *tmp, *trn_err, *margin; double nerr; double C1, C2; /* ---- get input arguments ----------------------- */ if(nrhs < 5) mexErrMsgTxt("Not enough input arguments."); /* data matrix [dim x N ] */ if( !mxIsNumeric(prhs[0]) || !mxIsDouble(prhs[0]) || mxIsEmpty(prhs[0]) || mxIsComplex(prhs[0]) ) mexErrMsgTxt("Input X must be a real matrix."); /* vector of labels (1,2) */ if( !mxIsNumeric(prhs[1]) || !mxIsDouble(prhs[1]) || mxIsEmpty(prhs[1]) || mxIsComplex(prhs[1]) || (mxGetN(prhs[1]) != 1 && mxGetM(prhs[1]) != 1)) mexErrMsgTxt("Input I must be a real vector."); labels12 = mxGetPr(prhs[1]); /* labels (1,2) */ dataA = mxGetPr(prhs[0]); /* pointer at patterns */ dataB = dataA; dim = mxGetM(prhs[0]); /* data dimension */ N = mxGetN(prhs[0]); /* number of data */ /* kernel identifier */ ker = kernel_id( prhs[2] ); if( ker == -1 ) mexErrMsgTxt("Improper kernel identifier."); /* get pointer to arguments */ arg1 = mxGetPr(prhs[3]); /* one or two real trade-off constant(s) */ if( !mxIsNumeric(prhs[4]) || !mxIsDouble(prhs[4]) || mxIsEmpty(prhs[4]) || mxIsComplex(prhs[4]) || (mxGetN(prhs[4]) != 1 && mxGetM(prhs[4]) != 1 )) mexErrMsgTxt("Improper input argument C."); else { /* allocate memory for constant C */ if( (const_C = mxCalloc(N, sizeof(double) )) == NULL) { mexErrMsgTxt("Not enough memory."); } if( MAX( mxGetN(prhs[4]), mxGetM(prhs[4])) == 1 ) { C1 = mxGetScalar(prhs[4]); for( i=0; i < N; i++ ) const_C[i] = C1; } else if( MAX( mxGetN(prhs[4]), mxGetM(prhs[4])) == 2 ) { tmp = mxGetPr(prhs[4]); C1 = tmp[0]; C2 = tmp[1]; for( i=0; i < N; i++ ) { if( labels12[i]==1) const_C[i] = C1; else const_C[i] = C2; } } else if( MAX( mxGetN(prhs[4]), mxGetM(prhs[4])) == N ) { tmp = mxGetPr(prhs[4]); for( i=0; i < N; i++ ) const_C[i] = tmp[i]; } else { mexErrMsgTxt("Improper argument C."); } } /* real parameter eps */ if( nrhs >= 6 ) { if( !mxIsNumeric(prhs[5]) || !mxIsDouble(prhs[5]) || mxIsEmpty(prhs[5]) || mxIsComplex(prhs[5]) || mxGetN(prhs[5]) != 1 || mxGetM(prhs[5]) != 1 ) mexErrMsgTxt("Input eps must be a scalar."); else eps = mxGetScalar(prhs[5]); /* take eps argument */ } /* real parameter tol */ if(nrhs >= 7) { if( !mxIsNumeric(prhs[6]) || !mxIsDouble(prhs[6]) || mxIsEmpty(prhs[6]) || mxIsComplex(prhs[6]) || mxGetN(prhs[6]) != 1 || mxGetM(prhs[6]) != 1 ) mexErrMsgTxt("Input tol must be a scalar."); else tolerance = mxGetScalar(prhs[6]); /* take tolerance argument */ } /* real vector of Lagrangeian multipliers */ if(nrhs >= 8) { if( !mxIsNumeric(prhs[7]) || !mxIsDouble(prhs[7]) || mxIsEmpty(prhs[7]) || mxIsComplex(prhs[7]) || (mxGetN(prhs[7]) != 1 && mxGetM(prhs[7]) != 1 )) mexErrMsgTxt("Input Alpha must be a vector."); } /* real scalar - bias */ if( nrhs >= 9 ) { if( !mxIsNumeric(prhs[8]) || !mxIsDouble(prhs[8]) || mxIsEmpty(prhs[8]) || mxIsComplex(prhs[8]) || mxGetN(prhs[8]) != 1 || mxGetM(prhs[8]) != 1 ) mexErrMsgTxt("Input bias must be a scalar."); } /* ---- init variables ------------------------------- */ ker_cnt = 0; /* allocate memory for targets (labels) (1,-1) */ if( (target = mxCalloc(N, sizeof(double) )) == NULL) { mexErrMsgTxt("Not enough memory."); } /* transform labels12 (1,2) from to targets (1,-1) */ for( i = 0; i < N; i++ ) { target[i] = - labels12[i]*2 + 3; } /* create output variable for bias */ plhs[1] = mxCreateDoubleMatrix(1,1,mxREAL); b = mxGetPr(plhs[1]); /* take init value of bias if given */ if( nrhs >= 9 ) { *b = -mxGetScalar(prhs[8]); } /* allocate memory for error_cache */ if( (error_cache = mxCalloc(N, sizeof(double) )) == NULL) { mexErrMsgTxt("Not enough memory for error cache."); } /* create vector for Lagrangeians */ plhs[0] = mxCreateDoubleMatrix(N,1,mxREAL); alpha = mxGetPr(plhs[0]); /* if Lagrangeians given then use them as initial values */ if( nrhs >= 8 ) { initAlpha = mxGetPr(prhs[7]); for( i = 0; i < N; i++ ) { alpha[i] = initAlpha[i]; } /* Init error cache for non-bound multipliers. */ for( i = 0; i < N; i++ ) { if( alpha[i] != 0 && alpha[i] != C(i) ) { error_cache[i] = learned_func(i) - target[i]; } } } /* ---- run SMO ------------------------------------------- */ runSMO(); /* ---- outputs --------------------------------- */ if( nlhs >= 3 ) { /* count number of support vectors */ plhs[2] = mxCreateDoubleMatrix(1,1,mxREAL); nsv = mxGetPr(plhs[2]); *nsv = 0; for( i = 0; i < N; i++ ) { if( alpha[i] > ZERO_LIM ) (*nsv)++; else alpha[i] = 0; } } if( nlhs >= 4 ) { plhs[3] = mxCreateDoubleMatrix(1,1,mxREAL); (*mxGetPr(plhs[3])) = (double)ker_cnt; } if( nlhs >= 5) { /* evaluates classification error on traning patterns */ plhs[4] = mxCreateDoubleMatrix(1,1,mxREAL); trn_err = mxGetPr(plhs[4]); nerr = 0; for( i = 0; i < N; i++ ) { if( target[i] == 1 ) { if( learned_func(i) < 0 ) nerr++; } else if( learned_func(i) >= 0 ) nerr++; } *trn_err = nerr/N; } if( nlhs >= 6) { /* compute margin */ plhs[5] = mxCreateDoubleMatrix(1,1,mxREAL); margin = mxGetPr(plhs[5]); *margin = 0; for( i = 0; i < N; i++ ) { for( j = 0; j < N; j++ ) { if( alpha[i] > 0 && alpha[j] > 0 ) *margin += alpha[i]*alpha[j]*target[i]*target[j]*kernel(i,j); } } *margin = 1/sqrt(*margin); } /* decision function of type <w,x>+b is used */ *b = -*b; /* ----- free memory --------------------------------------- */ mxFree( error_cache ); mxFree( target ); }
/* -------------------------------------------------------------- Finds the second Lagrange multiplayer to be optimize. -------------------------------------------------------------- */ long examineExample( long i1 ) { double y1, alpha1, E1, r1; double tmax; double E2, temp; long k, i2; long k0; y1 = target[i1]; alpha1 = alpha[i1]; E1 = w*data[i1] - *b - y1; r1 = y1 * E1; if(( r1 < -tolerance && alpha1 < C ) || (r1 > tolerance && alpha1 > 0)) { /* Try i2 by three ways; if successful, then immediately return 1; */ for( i2 = (-1), tmax = 0, k = 0; k < num_data; k++ ) { if( alpha[k] > 0 && alpha[k] < C ) { E2 = w*data[k] - *b - target[k]; temp = fabs(E1 - E2); if( temp > tmax ) { tmax = temp; i2 = k; } } } if( i2 >= 0 ) { if( takeStep(i1,i2) ) return( 1 ); } #ifdef RANDOM for( k0 = rand(), k = k0; k < num_data + k0; k++ ) { i2 = k % num_data; #else for( k = 0; k < num_data; k++) { i2 = k; #endif if( alpha[i2] > 0 && alpha[i2] < C ) { if( takeStep(i1,i2) ) return( 1 ); } } #ifdef RANDOM for( k0 = rand(), k = k0; k < num_data + k0; k++ ) { i2 = k % num_data; #else for( k = 0; k < num_data; k++) { i2 = k; #endif if( takeStep(i1,i2) ) return( 1 ); } } /* if( ... ) */ return( 0 ); } /* -------------------------------------------------------------- Main SMO optimization cycle. -------------------------------------------------------------- */ void runSMO( void ) { long numChanged = 0; long examineAll = 1; long k; while( numChanged > 0 || examineAll ) { numChanged = 0; if( examineAll ) { for( k = 0; k < num_data; k++ ) { numChanged += examineExample( k ); } } else { for( k = 0; k < num_data; k++ ) { if( alpha[k] != 0 && alpha[k] != C ) numChanged += examineExample( k ); } } if( examineAll == 1 ) examineAll = 0; else if( numChanged == 0 ) examineAll = 1; } } /* ============================================================== Main MEX function - interface to Matlab. ============================================================== */ void mexFunction( int nlhs, mxArray *plhs[], int nrhs, const mxArray*prhs[] ) { long i,j ; double *labels12, *nsv, *trn_err, *margin; double nerr; /* ---- check number of input arguments ------------- */ if(nrhs != 5 ) mexErrMsgTxt("Incorrect number of input arguments."); if(nlhs < 2) mexErrMsgTxt("Not enough output arguments."); /* ---- get input arguments ----------------------- */ labels12 = mxGetPr(prhs[1]); /* labels (1,2) */ data = mxGetPr(prhs[0]); /* pointer at data */ dim = mxGetM(prhs[0]); /* data dimension */ num_data = mxGetN(prhs[0]); /* number of data */ C = mxGetScalar( prhs[2] ); eps = mxGetScalar( prhs[3] ); tolerance = mxGetScalar( prhs[4] ); /* ---- init variables ------------------------------- */ ker_cnt=0; /* num of dot product evaluations */ /* allocate memory for targets (labels) (1,-1) */ if( (target = (double*)mxCalloc(num_data, sizeof(double) )) == NULL) { mexErrMsgTxt("Not enough memory."); } /* transform labels12 (1,2) from to targets (1,-1) */ for( i = 0; i < num_data; i++ ) { target[i] = - labels12[i]*2 + 3; } /* create output variable for bias */ plhs[1] = mxCreateDoubleMatrix(1,1,mxREAL); b = mxGetPr(plhs[1]); *b= 0; /* create vector for Lagrangeians */ plhs[0] = mxCreateDoubleMatrix(num_data,1,mxREAL); alpha = mxGetPr(plhs[0]); /* inicialize alpha */ for( i = 0; i < num_data; i++ ) { alpha[i] = 0; } w=0; /* ---- run SMO ------------------------------------------- */ runSMO(); /* ---- outputs ---------------------------------- */ if( nlhs >= 3 ) { /* count number of support vectors */ plhs[2] = mxCreateDoubleMatrix(1,1,mxREAL); nsv = mxGetPr(plhs[2]); *nsv = 0; for( i = 0; i < num_data; i++ ) { if( alpha[i] > 0) (*nsv)++; } } if( nlhs >= 4 ) { /* number of used iterations */ plhs[3] = mxCreateDoubleMatrix(1,1,mxREAL); (*mxGetPr(plhs[3])) = (double)ker_cnt; } if( nlhs >= 5) { /* evaluates classification error on traning patterns */ plhs[4] = mxCreateDoubleMatrix(1,1,mxREAL); trn_err = mxGetPr(plhs[4]); *trn_err = 0; for( i = 0; i < num_data; i++ ) { if( target[i]*(w*data[i]-*b) < 0) (*trn_err)++; } *trn_err = (*trn_err)/(double)num_data; } if( nlhs >= 6) { /* compute margin */ plhs[5] = mxCreateDoubleMatrix(1,1,mxREAL); margin = mxGetPr(plhs[5]); *margin = 1/sqrt(w*w); } /* decision function of type <w,x>+b is used */ *b = -*b; /* ----- free memory --------------------------------------- */ mxFree( target ); }