void draw_ball(pixbuf_t *pixbuf, int x, int y, int color)
{
	x = scale_input(x);
	y = scale_input(y);
	graphics_draw_rectangle(pixbuf, x-1, y+3, x+1, y+3, color);
	graphics_draw_rectangle(pixbuf, x-2, y+2, x+2, y+2, color);
	graphics_draw_rectangle(pixbuf, x-3, y-1, x+3, y+1, color);
	graphics_draw_rectangle(pixbuf, x-2, y-2, x+2, y-2, color);
	graphics_draw_rectangle(pixbuf, x-1, y-3, x+1, y-3, color);
}
Example #2
0
    float predict( InputArray _inputs, OutputArray _outputs, int ) const
    {
        if( !trained )
            CV_Error( CV_StsError, "The network has not been trained or loaded" );

        Mat inputs = _inputs.getMat();
        int type = inputs.type(), l_count = layer_count();
        int n = inputs.rows, dn0 = n;

        CV_Assert( (type == CV_32F || type == CV_64F) && inputs.cols == layer_sizes[0] );
        int noutputs = layer_sizes[l_count-1];
        Mat outputs;

        int min_buf_sz = 2*max_lsize;
        int buf_sz = n*min_buf_sz;

        if( buf_sz > max_buf_sz )
        {
            dn0 = max_buf_sz/min_buf_sz;
            dn0 = std::max( dn0, 1 );
            buf_sz = dn0*min_buf_sz;
        }

        cv::AutoBuffer<double> _buf(buf_sz+noutputs);
        double* buf = _buf;

        if( !_outputs.needed() )
        {
            CV_Assert( n == 1 );
            outputs = Mat(n, noutputs, type, buf + buf_sz);
        }
        else
        {
            _outputs.create(n, noutputs, type);
            outputs = _outputs.getMat();
        }

        int dn = 0;
        for( int i = 0; i < n; i += dn )
        {
            dn = std::min( dn0, n - i );

            Mat layer_in = inputs.rowRange(i, i + dn);
            Mat layer_out( dn, layer_in.cols, CV_64F, buf);

            scale_input( layer_in, layer_out );
            layer_in = layer_out;

            for( int j = 1; j < l_count; j++ )
            {
                double* data = buf + ((j&1) ? max_lsize*dn0 : 0);
                int cols = layer_sizes[j];

                layer_out = Mat(dn, cols, CV_64F, data);
                Mat w = weights[j].rowRange(0, layer_in.cols);
                gemm(layer_in, w, 1, noArray(), 0, layer_out);
                calc_activ_func( layer_out, weights[j] );

                layer_in = layer_out;
            }

            layer_out = outputs.rowRange(i, i + dn);
            scale_output( layer_in, layer_out );
        }

        if( n == 1 )
        {
            int maxIdx[] = {0, 0};
            minMaxIdx(outputs, 0, 0, 0, maxIdx);
            return (float)(maxIdx[0] + maxIdx[1]);
        }

        return 0.f;
    }
Example #3
0
float CvANN_MLP::predict( const CvMat* _inputs, CvMat* _outputs ) const
{
    CV_FUNCNAME( "CvANN_MLP::predict" );

    __BEGIN__;

    double* buf;
    int i, j, n, dn = 0, l_count, dn0, buf_sz, min_buf_sz;

    if( !layer_sizes )
        CV_ERROR( CV_StsError, "The network has not been initialized" );

    if( !CV_IS_MAT(_inputs) || !CV_IS_MAT(_outputs) ||
        !CV_ARE_TYPES_EQ(_inputs,_outputs) ||
        CV_MAT_TYPE(_inputs->type) != CV_32FC1 &&
        CV_MAT_TYPE(_inputs->type) != CV_64FC1 ||
        _inputs->rows != _outputs->rows )
        CV_ERROR( CV_StsBadArg, "Both input and output must be floating-point matrices "
                                "of the same type and have the same number of rows" );

    if( _inputs->cols != layer_sizes->data.i[0] )
        CV_ERROR( CV_StsBadSize, "input matrix must have the same number of columns as "
                                 "the number of neurons in the input layer" );

    if( _outputs->cols != layer_sizes->data.i[layer_sizes->cols - 1] )
        CV_ERROR( CV_StsBadSize, "output matrix must have the same number of columns as "
                                 "the number of neurons in the output layer" );
    n = dn0 = _inputs->rows;
    min_buf_sz = 2*max_count;
    buf_sz = n*min_buf_sz;

    if( buf_sz > max_buf_sz )
    {
        dn0 = max_buf_sz/min_buf_sz;
        dn0 = MAX( dn0, 1 );
        buf_sz = dn0*min_buf_sz;
    }

    buf = (double*)cvStackAlloc( buf_sz*sizeof(buf[0]) );
    l_count = layer_sizes->cols;

    for( i = 0; i < n; i += dn )
    {
        CvMat hdr[2], _w, *layer_in = &hdr[0], *layer_out = &hdr[1], *temp;
        dn = MIN( dn0, n - i );

        cvGetRows( _inputs, layer_in, i, i + dn );
        cvInitMatHeader( layer_out, dn, layer_in->cols, CV_64F, buf );

        scale_input( layer_in, layer_out );
        CV_SWAP( layer_in, layer_out, temp );

        for( j = 1; j < l_count; j++ )
        {
            double* data = buf + (j&1 ? max_count*dn0 : 0);
            int cols = layer_sizes->data.i[j];

            cvInitMatHeader( layer_out, dn, cols, CV_64F, data );
            cvInitMatHeader( &_w, layer_in->cols, layer_out->cols, CV_64F, weights[j] );
            cvGEMM( layer_in, &_w, 1, 0, 0, layer_out );
            calc_activ_func( layer_out, _w.data.db + _w.rows*_w.cols );

            CV_SWAP( layer_in, layer_out, temp );
        }

        cvGetRows( _outputs, layer_out, i, i + dn );
        scale_output( layer_in, layer_out );
    }

    __END__;

    return 0.f;
}
void draw_paddle(pixbuf_t *pixbuf, int x, int y)
{
	x = scale_input(x);
	y = scale_input(y);
	graphics_draw_rectangle(pixbuf, x-5, y-18, x+5, y+18, 0x00);
}