Example #1
0
    void prepare_to_train( const Mat& inputs, const Mat& outputs,
                           Mat& sample_weights, int flags )
    {
        if( layer_sizes.empty() )
            CV_Error( CV_StsError,
                     "The network has not been created. Use method create or the appropriate constructor" );

        if( (inputs.type() != CV_32F && inputs.type() != CV_64F) ||
            inputs.cols != layer_sizes[0] )
            CV_Error( CV_StsBadArg,
                     "input training data should be a floating-point matrix with "
                     "the number of rows equal to the number of training samples and "
                     "the number of columns equal to the size of 0-th (input) layer" );

        if( (outputs.type() != CV_32F && outputs.type() != CV_64F) ||
            outputs.cols != layer_sizes.back() )
            CV_Error( CV_StsBadArg,
                     "output training data should be a floating-point matrix with "
                     "the number of rows equal to the number of training samples and "
                     "the number of columns equal to the size of last (output) layer" );

        if( inputs.rows != outputs.rows )
            CV_Error( CV_StsUnmatchedSizes, "The numbers of input and output samples do not match" );

        Mat temp;
        double s = sum(sample_weights)[0];
        sample_weights.convertTo(temp, CV_64F, 1./s);
        sample_weights = temp;

        calc_input_scale( inputs, flags );
        calc_output_scale( outputs, flags );
    }
Example #2
0
bool CvANN_MLP::prepare_to_train( const CvMat* _inputs, const CvMat* _outputs,
            const CvMat* _sample_weights, const CvMat* _sample_idx,
            CvVectors* _ivecs, CvVectors* _ovecs, double** _sw, int _flags )
{
    bool ok = false;
    CvMat* sample_idx = 0;
    CvVectors ivecs, ovecs;
    double* sw = 0;
    int count = 0;

    CV_FUNCNAME( "CvANN_MLP::prepare_to_train" );

    ivecs.data.ptr = ovecs.data.ptr = 0;
    assert( _ivecs && _ovecs );

    __BEGIN__;

    const int* sidx = 0;
    int i, sw_type = 0, sw_count = 0;
    int sw_step = 0;
    double sw_sum = 0;

    if( !layer_sizes )
        CV_ERROR( CV_StsError,
        "The network has not been created. Use method create or the appropriate constructor" );

    if( !CV_IS_MAT(_inputs) || CV_MAT_TYPE(_inputs->type) != CV_32FC1 &&
        CV_MAT_TYPE(_inputs->type) != CV_64FC1 || _inputs->cols != layer_sizes->data.i[0] )
        CV_ERROR( CV_StsBadArg,
        "input training data should be a floating-point matrix with"
        "the number of rows equal to the number of training samples and "
        "the number of columns equal to the size of 0-th (input) layer" );

    if( !CV_IS_MAT(_outputs) || CV_MAT_TYPE(_outputs->type) != CV_32FC1 &&
        CV_MAT_TYPE(_outputs->type) != CV_64FC1 ||
        _outputs->cols != layer_sizes->data.i[layer_sizes->cols - 1] )
        CV_ERROR( CV_StsBadArg,
        "output training data should be a floating-point matrix with"
        "the number of rows equal to the number of training samples and "
        "the number of columns equal to the size of last (output) layer" );

    if( _inputs->rows != _outputs->rows )
        CV_ERROR( CV_StsUnmatchedSizes, "The numbers of input and output samples do not match" );

    if( _sample_idx )
    {
        CV_CALL( sample_idx = cvPreprocessIndexArray( _sample_idx, _inputs->rows ));
        sidx = sample_idx->data.i;
        count = sample_idx->cols + sample_idx->rows - 1;
    }
    else
        count = _inputs->rows;

    if( _sample_weights )
    {
        if( !CV_IS_MAT(_sample_weights) )
            CV_ERROR( CV_StsBadArg, "sample_weights (if passed) must be a valid matrix" );

        sw_type = CV_MAT_TYPE(_sample_weights->type);
        sw_count = _sample_weights->cols + _sample_weights->rows - 1;

        if( sw_type != CV_32FC1 && sw_type != CV_64FC1 ||
            _sample_weights->cols != 1 && _sample_weights->rows != 1 ||
            sw_count != count && sw_count != _inputs->rows )
            CV_ERROR( CV_StsBadArg,
            "sample_weights must be 1d floating-point vector containing weights "
            "of all or selected training samples" );

        sw_step = CV_IS_MAT_CONT(_sample_weights->type) ? 1 :
            _sample_weights->step/CV_ELEM_SIZE(sw_type);
        
        CV_CALL( sw = (double*)cvAlloc( count*sizeof(sw[0]) ));
    }

    CV_CALL( ivecs.data.ptr = (uchar**)cvAlloc( count*sizeof(ivecs.data.ptr[0]) ));
    CV_CALL( ovecs.data.ptr = (uchar**)cvAlloc( count*sizeof(ovecs.data.ptr[0]) ));
    
    ivecs.type = CV_MAT_TYPE(_inputs->type);
    ovecs.type = CV_MAT_TYPE(_outputs->type);
    ivecs.count = ovecs.count = count;

    for( i = 0; i < count; i++ )
    {
        int idx = sidx ? sidx[i] : i;
        ivecs.data.ptr[i] = _inputs->data.ptr + idx*_inputs->step;
        ovecs.data.ptr[i] = _outputs->data.ptr + idx*_outputs->step;
        if( sw )
        {
            int si = sw_count == count ? i : idx;
            double w = sw_type == CV_32FC1 ?
                (double)_sample_weights->data.fl[si*sw_step] :
                _sample_weights->data.db[si*sw_step];
            sw[i] = w;
            if( w < 0 )
                CV_ERROR( CV_StsOutOfRange, "some of sample weights are negative" );
            sw_sum += w;
        }
    }

    // normalize weights
    if( sw )
    {
        sw_sum = sw_sum > DBL_EPSILON ? 1./sw_sum : 0;
        for( i = 0; i < count; i++ )
            sw[i] *= sw_sum;
    }

    calc_input_scale( &ivecs, _flags );
    CV_CALL( calc_output_scale( &ovecs, _flags ));

    ok = true;

    __END__;

    if( !ok )
    {
        cvFree( &ivecs.data.ptr );
        cvFree( &ovecs.data.ptr );
        cvFree( &sw );
    }

    cvReleaseMat( &sample_idx );
    *_ivecs = ivecs;
    *_ovecs = ovecs;
    *_sw = sw;

    return ok;
}