void MultilayerPerceptron::initMLP(int ninputs, int noutputs, const vector<int> &hiddenLayerSizes, const TransferFunctionType &tf)
{
	tres = new MLPTrainingResult();
	nInputs = ninputs;
	setAlfa(1);
	setLayerSizes(hiddenLayerSizes);
	setInputSize(ninputs);
	setOutputSize(noutputs);
	setTransferFunctionType(tf);
	//	setOutputType(Continuous);
	randomizeWeights();
	connect(this, SIGNAL(finished()), SLOT(finished()));
}
Image *BarrelCorrection::apply(Image *image)
{
	LOG_TRACE("BarrelCorrection::apply()");
	
	if(m_outputWidth == 0 || m_outputHeight == 0)
	{
		setOutputSize(image->getWidth(), image->getHeight());
	}
	
	if(m_correctedImage == NULL || m_pixelMapping == NULL)
	{
		LOG_ERROR("BarrelCorrection::apply(): Output image or pixel mapping array is not set. The filter is not ready to be applied");
		throw CameraException("Output image or pixel mapping array is not set. The filter is not ready to be applied");
	}
	
	if(image->getFormat() != Image::FORMAT_RGB32)
	{
		LOG_ERROR("BarrelCorrection::apply(): Input image is not in RGB32 format");
		throw CameraException("Input image is not in RGB32 format");
	}
	
	int numPixels                   = m_correctedImage->getWidth() * m_correctedImage->getHeight();
	int *mapping                    = m_pixelMapping;
	ImageBuffer *cBufferAddr		= m_correctedImage->getBufferAddress();
	ImageBuffer *dBufferBaseAddr	= image->getBufferAddress();
	ImageBuffer *dBufferAddr		= NULL;
	
	if(m_pixelMappingMin < 0 || m_pixelMappingMax * 4 > image->getBufferSize())
	{
		LOG_ERROR("BarrelCorrection::apply(): Input image is not too small to contain the specified distorted rectangle");
		throw CameraException("Input image is not too small to contain the specified distorted rectangle");
	}
	
	for(int i = 0; i < numPixels; i++)
	{
		// Calculate which pixel to use from the distorted image based on the pixel mapping
		dBufferAddr = dBufferBaseAddr + (*mapping) * 4; // 4 bytes per pixel (BGRX)
		
		// Copy the pixel color from the distorted to the corrected image.
		*(cBufferAddr++) = *(dBufferAddr++); // B (Blue)
		*(cBufferAddr++) = *(dBufferAddr++); // G (Green)
		*(cBufferAddr++) = *(dBufferAddr++); // R (Red)
		*(cBufferAddr++) = *(dBufferAddr++); // X (Not used)
		mapping++;
		
		//memcpy(cBufferAddr, dBufferAddr, 4);
		//mapping += 4;
	}
	
	return m_correctedImage;
}
CFFmpegPlayer::CFFmpegPlayer(const std::string& fileName): m_formatCtx(nullptr, close_av_input),
														   m_codecCtx(nullptr, avcodec_close),
														   m_frame(avcodec_alloc_frame(), free_av_frame),
														   m_swsCtx(nullptr), m_videoStream(-1)
{
    AVDictionary* optionsDict = nullptr;
    AVFormatContext* format = nullptr;

	int error = avformat_open_input(&format, fileName.c_str(), NULL, NULL);

    CHECK_FFMPEG_RETURN_CODE(error, "avformat_open_input");

    m_formatCtx.reset(format);

    error = avformat_find_stream_info(m_formatCtx.get(), NULL);

    CHECK_FFMPEG_RETURN_CODE(error, "avformat_find_stream_info");

    AVCodec* codec = nullptr;

    m_videoStream = av_find_best_stream(m_formatCtx.get(), AVMEDIA_TYPE_VIDEO, -1, -1, &codec, 0);

    CHECK_FFMPEG_RETURN_CODE(error, "av_find_best_stream");

    assert(m_videoStream >= 0);

    m_codecCtx.reset(m_formatCtx->streams[m_videoStream]->codec);

    m_codecCtx->flags2 = 0;//CODEC_FLAG2_FAST;
    m_codecCtx->thread_count = 1;
    m_codecCtx->thread_type = 0;

    error = avcodec_open2(m_codecCtx.get(), codec, &optionsDict);

    CHECK_FFMPEG_RETURN_CODE(error, "avcodec_open2");

    m_outputWidth = 0;
    m_outputHeight = 0;
    m_pixelSize = 0;
    setOutputSize(m_codecCtx->width, m_codecCtx->height);
}