cv::Mat findFace::drawOutput(cv::Mat mouthOverlay, cv::Mat eyeOverlay){
    
    //Webcamframe zwischenspeichern
    captureFrame.copyTo(outputFrame);
    
    //Mund- und Augenframe in BGR umwandeln
    cv::cvtColor(eyeOverlay, eyeOverlay, CV_GRAY2BGR);
    cv::cvtColor(mouthOverlay, mouthOverlay, CV_GRAY2BGR);
    
    //Rechtecke an Mund. und Augenpostition erzeugen
    cv::Rect eyeRoi( eyePos, cv::Size( eyeWidth, eyeHeight ));
    cv::Rect mouthRoi( mouthPos, cv::Size( mouthWidth, mouthHeight ));
    
    //Bereich auf Ausgabeframe den Rechtecken zuweisen
    cv::Mat mouthDestinationROI = outputFrame( mouthRoi );
    cv::Mat eyeDestinationROI = outputFrame( eyeRoi );
    
    //Farben invertieren
    cv::subtract(cv::Scalar(255),mouthOverlay, mouthOverlay);
    
    //Rechtecke mit Mund und Augen füllen
    eyeOverlay.copyTo( eyeDestinationROI );
    mouthOverlay.copyTo( mouthDestinationROI );
    
    
    return outputFrame;

}
Esempio n. 2
0
bool MediaConverter::imgs2media(LPRImage *pRawImages[], size_t imgCount, EventMedia &eventMedia)
{
	if (imgCount == 0)
	{
		printf("Input is empty.\n");
		return true;
	}
	//////////////////////////////////////////////////////////////////////////
	if(!initialize(pRawImages[0], "temp.avi"))
	{
		printf("Failed to initialize.\n");
		return false;
	}
	//////////////////////////////////////////////////////////////////////////
	if (!(mOutputFormatCtxPtr->flags & AVFMT_NOFILE))
	{
		/*if (avio_open(&mOutputFormatCtxPtr->pb, mediaName.c_str(), AVIO_FLAG_WRITE) < 0)
		{
		printf("Could not open %s.\n", mediaName.c_str());
		return false;
		}*/
		if (avio_open_dyn_buf(&mOutputFormatCtxPtr->pb) < 0)
		{
			printf("Could not open avio buff.\n");
			return false;
		}
	}
	//////////////////////////////////////////////////////////////////////////
	// Output
	avformat_write_header(mOutputFormatCtxPtr, NULL);
	for (size_t i = 0; i < imgCount; ++ i)
		outputFrame(pRawImages[i]);
	flushFrames();
	av_write_trailer(mOutputFormatCtxPtr);
	//////////////////////////////////////////////////////////////////////////
	if (!(mOutputFormatCtxPtr->flags & AVFMT_NOFILE))
	{
		//avio_close(mOutputFormatCtxPtr->pb);
		eventMedia.mBufferSize = avio_close_dyn_buf(mOutputFormatCtxPtr->pb, &eventMedia.mBufferPtr);
	}
	//////////////////////////////////////////////////////////////////////////
	// 清理环境
	uninitialize();

	return true;
}
Esempio n. 3
0
static int decode_write_frame(const char *outfilename, AVCodecContext *avctx,
                              AVFrame *frame, int *frame_count, AVPacket *pkt, int last)
{
    int len, got_frame;
    char buf[1024];
    ANN_DEBUG
    len = avcodec_decode_video2(avctx, frame, &got_frame, pkt);

    if (len < 0) {
    //av_err2str(len);
      //  sprintf(msg, "Error decoding video frame (%s)\n", );
         av_make_error_string(msg, 80, len);
         __android_log_write(ANDROID_LOG_DEBUG, "jni_test", msg);
        fprintf(stderr, "Error while decoding frame %d\n", *frame_count);
        return len;
    }
    sprintf(msg, "%d len = %d\n",__LINE__, len);
    __android_log_write(ANDROID_LOG_DEBUG, "jni_test", msg);

    if (got_frame) {

        sprintf(msg, "%d Saving %sframe %3d\n", __LINE__, last ? "last " : "", *frame_count);
        __android_log_write(ANDROID_LOG_DEBUG, "jni_test", msg);
        fflush(stdout);
        /* the picture is allocated by the decoder, no need to free it */
        snprintf(buf, sizeof(buf), outfilename, *frame_count);
       // pgm_save(frame->data[0], frame->linesize[0],
       //          avctx->width, avctx->height, buf);
       outputFrame(frame->data[0], frame->linesize[0],
                        avctx->width, avctx->height, outfilename);
        (*frame_count)++;
    }
    if (pkt->data) {
        pkt->size -= len;
        pkt->data += len;
    }

    sprintf(msg, "%d pkt->size = %d\n",__LINE__, pkt->size);
    __android_log_write(ANDROID_LOG_DEBUG, "jni_test", msg);
    return 0;
}
Esempio n. 4
0
ICPWidget::ICPWidget(QWidget *parent) :
    QFrame(parent),
    ui(new Ui::ICPWidget)
{
    ui->setupUi(this);

    v.initCameraParameters();
    v.setBackgroundColor(1.0,1.0,1.0);
    v.addCoordinateSystem(0.3);
    v.registerPointPickingCallback<ICPWidget>(&ICPWidget::pick,*this,NULL);
    v.registerKeyboardCallback<ICPWidget>(&ICPWidget::key,*this,NULL);

    widget.SetRenderWindow(v.getRenderWindow());

    ui->frameView->layout()->addWidget(&widget);
    ui->tools->setCurrentIndex(0);

    QString dataInfo;
    Pipe::loadData(_FrameKeyList,dataInfo,Pipe::_FrameListKey);
    Pipe::loadData(_IdMapKeyList,dataInfo,Pipe::_IdMapListKey);
    frameCloud = FullPointCloud::Ptr(new FullPointCloud);
    segCloud = FullPointCloud::Ptr(new FullPointCloud);
    currentFrame = 0;
    currentState = PICK_FRAME;
    currentObjIndex = -1;

    connect(ui->nextFrame,SIGNAL(clicked()),this,SLOT(nextFrame()));
    connect(ui->lastFrame,SIGNAL(clicked()),this,SLOT(lastFrame()));
    connect(ui->loadFrame,SIGNAL(clicked()),this,SLOT(reLoadFrameWithView()));
    connect(ui->addObj,SIGNAL(clicked()),this,SLOT(addObj()));
    connect(ui->delObj,SIGNAL(clicked()),this,SLOT(delObj()));
    connect(ui->icpObj,SIGNAL(clicked()),this,SLOT(icpObj()));

    connect(ui->tools,SIGNAL(currentChanged(int)),this,SLOT(changeState(int)));
    connect(ui->outObj,SIGNAL(clicked()),this,SLOT(outputObj()));
    connect(ui->outFrame,SIGNAL(clicked()),this,SLOT(outputFrame()));
}
Esempio n. 5
0
bool NcpSpi::SpiTransactionComplete(uint8_t *aOutputBuf,
                                    uint16_t aOutputLen,
                                    uint8_t *aInputBuf,
                                    uint16_t aInputLen,
                                    uint16_t aTransLen)
{
    // This can be executed from an interrupt context, therefore we cannot
    // use any of OpenThread APIs here. If further processing is needed,
    // returned value `shouldProcess` is set to `true` to indicate to
    // platform SPI slave driver to invoke `SpiTransactionProcess()` callback
    // which unlike this callback must be called from the same OS context
    // that OpenThread APIs/callbacks are executed.

    uint16_t transDataLen;
    bool     shouldProcess = false;
    SpiFrame outputFrame(aOutputBuf);
    SpiFrame inputFrame(aInputBuf);
    SpiFrame sendFrame(mSendFrame);

    VerifyOrExit((aTransLen >= kSpiHeaderSize) && (aInputLen >= kSpiHeaderSize) && (aOutputLen >= kSpiHeaderSize));
    VerifyOrExit(inputFrame.IsValid() && outputFrame.IsValid());

    transDataLen = aTransLen - kSpiHeaderSize;

    if (!mHandlingRxFrame)
    {
        uint16_t rxDataLen = inputFrame.GetHeaderDataLen();

        // A new frame is successfully received if input frame
        // indicates that there is data and the "data len" is not
        // larger than than the "accept len" we provided in the
        // exchanged output frame.

        if ((rxDataLen > 0) && (rxDataLen <= transDataLen) && (rxDataLen <= outputFrame.GetHeaderAcceptLen()))
        {
            mHandlingRxFrame = true;
            shouldProcess    = true;
        }
    }

    if (mTxState == kTxStateSending)
    {
        uint16_t txDataLen = outputFrame.GetHeaderDataLen();

        // Frame transmission is successful if master indicates
        // in the input frame that it could accept the frame
        // length that was exchanged, i.e., the "data len" in
        // the output frame is smaller than or equal to "accept
        // len" in the received input frame from master.

        if ((txDataLen > 0) && (txDataLen <= transDataLen) && (txDataLen <= inputFrame.GetHeaderAcceptLen()))
        {
            mTxState      = kTxStateHandlingSendDone;
            shouldProcess = true;
        }
    }

exit:
    // Determine the input and output frames to prepare a new transaction.

    if (mResetFlag && (aTransLen > 0) && (aOutputLen > 0))
    {
        mResetFlag = false;
        sendFrame.SetHeaderFlagByte(/*aResetFlag */ false);
        SpiFrame(mEmptySendFrameFullAccept).SetHeaderFlagByte(/*aResetFlag */ false);
        SpiFrame(mEmptySendFrameZeroAccept).SetHeaderFlagByte(/*aResetFlag */ false);
    }

    if (mTxState == kTxStateSending)
    {
        aOutputBuf = mSendFrame;
        aOutputLen = mSendFrameLength;
    }
    else
    {
        aOutputBuf = mHandlingRxFrame ? mEmptySendFrameZeroAccept : mEmptySendFrameFullAccept;
        aOutputLen = kSpiHeaderSize;
    }

    if (mHandlingRxFrame)
    {
        aInputBuf = mEmptyReceiveFrame;
        aInputLen = kSpiHeaderSize;
    }
    else
    {
        aInputBuf = mReceiveFrame;
        aInputLen = kSpiBufferSize;
    }

    sendFrame.SetHeaderAcceptLen(aInputLen - kSpiHeaderSize);

    otPlatSpiSlavePrepareTransaction(aOutputBuf, aOutputLen, aInputBuf, aInputLen, (mTxState == kTxStateSending));

    return shouldProcess;
}
Esempio n. 6
0
bool MediaConverter::outputFrame(LPRImage *pRawImage)
{
	// 解析输入媒体的信息
	AVIOContext *pIOCtx = avio_alloc_context(pRawImage->pData, pRawImage->imageSize, 0, NULL, NULL, NULL, NULL);
	AVFormatContext *pInputFormatCtx = avformat_alloc_context();
	pInputFormatCtx->pb = pIOCtx;
	if (avformat_open_input(&pInputFormatCtx, "fake.jpg", mInputFmtPtr, NULL) != 0)
	{
		printf("Failed to open %s.\n", "fake.jpg");
		return false;
	}
	if (avformat_find_stream_info(pInputFormatCtx, NULL) < 0)
	{
		printf("Failed to parse %s.\n", "fake.jpg");
		return false;
	}
	//av_dump_format(pInputFormatCtx, 0, "fake.jpg", 0);
	int videoStreamIndex = -1;
	for (size_t i = 0; i < pInputFormatCtx->nb_streams; ++ i)
	{
		if (pInputFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
		{
			videoStreamIndex = i;
			break;
		}
	}
	if (videoStreamIndex == -1)
	{
		printf("Could not find a video stream.\n");
		return false;
	}
	AVCodecContext *pInputCodecCtx = pInputFormatCtx->streams[videoStreamIndex]->codec;
	// Find the decoder for the video stream
	AVCodec *pCodec = avcodec_find_decoder(pInputCodecCtx->codec_id);
	if (NULL == pCodec)
	{
		printf("Could not find decoder.\n");
		return false;
	}
	// Open codec
	if (avcodec_open2(pInputCodecCtx, pCodec, NULL) != 0)
	{
		printf("Could not open decoder.\n");
		return false;
	}
	//////////////////////////////////////////////////////////////////////////
	// Read media content
	AVPacket packet;
	int frameFinished = 0;
	while (av_read_frame(pInputFormatCtx, &packet) >= 0)
	{
		if (packet.stream_index == videoStreamIndex)
		{
			avcodec_decode_video2(pInputCodecCtx, mInputFramePtr, &frameFinished, &packet);
			if (frameFinished)
			{
				mInputPixFmt = pInputCodecCtx->pix_fmt;
				if (!outputFrame())
					return false;
			}
		}
		av_free_packet(&packet);
	}

	avcodec_close(pInputCodecCtx);
	av_free(pIOCtx);
	avformat_close_input(&pInputFormatCtx);

	return true;
}
Esempio n. 7
0
void denoise_test(const QString& signalWithNoiseFileName, const QString& noiseFileName, const QString& outputFileName)
{
    WavFile signalFile(signalWithNoiseFileName);
    signalFile.open(WavFile::ReadOnly);
    WavFile noiseFile(noiseFileName);
    noiseFile.open(WavFile::ReadOnly);

    if (signalFile.getHeader() != noiseFile.getHeader())
    {
        qDebug() << "Signal and noise files have the different headers!";
        return;
    }

    WavFile outputFile(outputFileName);
    outputFile.open(WavFile::WriteOnly, signalFile.getHeader());

    const int frameSize = 1024;
    int minSize = qMin(signalFile.size(), noiseFile.size());
    int frameNum = minSize / frameSize;

    float adaptation_rate = 1.65;
    float error = 0.1;

    float* signal_with_noise = new float[frameSize];
    float* noise = new float[frameSize];
    float* signal = new float[frameSize];
    float* filter = new float[frameSize];
    memset(signal, 0, frameSize * sizeof(float));
    memset(filter, 0, frameSize * sizeof(float));

    for (int i = 0; i < frameNum; i++)
    {
        qDebug() << "Frame #" << i;

        Signal signalFrame = signalFile.read(frameSize);
        Signal noiseFrame = noiseFile.read(frameSize);

        for (int j = 0; j < frameSize; j++)
        {
            signal_with_noise[j] = static_cast<float>(signalFrame[j].toInt()) * pow(2, -15);
            noise[j] = static_cast<float>(noiseFrame[j].toInt()) * pow(2, -15);
        }
        memset(filter, 0, frameSize * sizeof(float));
        float final_err = error;
        final_err = denoise(signal_with_noise, noise, filter, signal, frameSize, final_err, adaptation_rate);

        qDebug() << "Adapt error: " << final_err;

        Signal outputFrame(frameSize, signalFile.getHeader());
        for (int j = 0; j < frameSize; j++)
        {
            try
            {
                outputFrame[j] = static_cast<int>(signal[j] * pow(2, 15));
            }
            catch (Sample::OutOfRangeValue exc)
            {
                int tmp = static_cast<int>(signal[j] * pow(2, 15));
                if (tmp > 0)
                {
                    outputFrame[j] = 32767;
                }
                else
                {
                    outputFrame[j] = -32768;
                }
            }
        }
        outputFile.write(outputFrame);
    }

    delete[] signal_with_noise;
    delete[] noise;
    delete[] signal;
    delete[] filter;
}