예제 #1
0
/**
   \brief Constructor - opens directly a video
**/
QVideoDecoder::QVideoDecoder(QString file)
{
   InitVars();
   initCodec();

   ok = openFile(file.toStdString().c_str());
}
예제 #2
0
amci_codec_t* AmAudioFormat::getCodec()
{
  if(!codec){
    codec = AmPlugIn::instance()->codec(codec_id);
    initCodec();
  }
    
  return codec;
}
예제 #3
0
void auxQt::initQt(int argc, char **argv, bool needInitcodec)
{
	if (!qApp)
	{
		//std::cout << "Init Qt Core" << std::endl;
		new QApplication(argc, argv);
	}

	if (needInitcodec)
	{
		initCodec();
	}
}
void VideoPlayer::run()
{
    stopPlayback = false;

    while(true && !stopPlayback)
    {
        garbageCollection();
        deinitCodec();
        initCodec();

        while(playback() && !stopPlayback)
        {
        }
    }
}
예제 #5
0
int main( int argc, char *argv[] )
{
    initCodec("UTF-8");
    QApplication app(argc, argv);

    DataBaseManager *db;
    db = DataBaseManager::getInstance();
    Q_UNUSED(db);

    MainForm* mainWindow = new MainForm();
    MainPresenter* presenter = new MainPresenter(mainWindow);
    Q_UNUSED(presenter);
    mainWindow->show();

    return app.exec();
}
예제 #6
0
void auxQt::initQtDefault(bool guiAllowed, bool needInitcodec)
{
	if (!qApp)
	{
		static int defArgc = 1;
		static char** defArgv = NULL;

		if (guiAllowed)
		{
			//std::cout << "Init Qt GUI Core" << std::endl;
			new QApplication(defArgc, defArgv);
		} 
		else
		{
			//std::cout << "Init Qt Core" << std::endl;
			new QCoreApplication(defArgc, defArgv);
		}
	}
	//
	if (needInitcodec)
	{
		initCodec();
	}
}
예제 #7
0
/**
   \brief Constructor - opens a video on later openFile call
**/
QVideoDecoder::QVideoDecoder()
{
   InitVars();
   initCodec();
}
예제 #8
0
void SamplePlayer_SW::init() {



    AVPacket avpkt;
    ReSampleContext* resmplCtx;
    int compressed_bytes_consumed    = 0;
    int compressed_file_size         = 0;
    uint8_t* l_ResampleBuffer;

    initCodec();                /* Initialize audio codec   */
    av_init_packet(&avpkt);     /* Initialize packet        */
//    resmplCtx = av_audio_resample_init(m_CodecCtx->channels,
//                                        m_CodecCtx->channels,
//                                        Synthesizer::config::samplerate,
//                                        m_CodecCtx->sample_rate,
//                                        AV_SAMPLE_FMT_S32,
//                                        m_CodecCtx->sample_fmt,
//                                        16, 10, 0, 1.0);

    m_DecompressedDataBuffer = new uint8_t[AVCODEC_MAX_AUDIO_FRAME_SIZE * MAX_PLAYBACK_LENGTH_SEC];
    memset(m_DecompressedDataBuffer, 0 ,AVCODEC_MAX_AUDIO_FRAME_SIZE * MAX_PLAYBACK_LENGTH_SEC);

    LOG_DEBUG("Decoding audio file");
    LOG_DEBUG("Sample format:" << av_get_sample_fmt_name(m_CodecCtx->sample_fmt));

    while (!av_read_frame(m_ContainerCtx, &avpkt)) {

        int got_frame = 0;

        if (!m_Decoded_Frame) {

            if (!(m_Decoded_Frame = avcodec_alloc_frame())) { /* allocate once */

                LOG_ERROR("Could not allocate audio frame");
                exit(1);
            }
        }else{
            avcodec_get_frame_defaults(m_Decoded_Frame);
        }

        compressed_bytes_consumed = avcodec_decode_audio4(m_CodecCtx, m_Decoded_Frame, &got_frame, &avpkt);

        if (compressed_bytes_consumed < 0) {
            LOG_ERROR("Error while decoding audio frame");
            exit(1);
        }

        if (got_frame) {
            int samples_written = 0;
            int data_size = 0;
            size_t resampled_Bytes = 0;

            /* if a frame has been decoded, output it */
            data_size = av_samples_get_buffer_size(NULL, m_CodecCtx->channels, m_Decoded_Frame->nb_samples, m_CodecCtx->sample_fmt, 1);

            int32_t* buf  = (int32_t*) (m_DecompressedDataBuffer + m_DecompressedDataSize);

            switch(m_CodecCtx->sample_fmt){
            case AV_SAMPLE_FMT_NONE:
            case AV_SAMPLE_FMT_U8:
//                for(int i = 0; i <  m_Decoded_Frame->nb_samples * m_CodecCtx->channels; i++){
//                    buf[i] = (int32_t)( ((int8_t*)  m_Decoded_Frame->data[0])[i] << 24 );
//                }
//                break;

            //FIXME: convert other sample formats
            case AV_SAMPLE_FMT_S16:
                for(int i = 0; i <  m_Decoded_Frame->nb_samples * m_CodecCtx->channels; i++){
                    buf[i] = *(((short*)m_Decoded_Frame->data[0]) + i) << 16;
                }
                m_DecompressedDataSize += m_Decoded_Frame->nb_samples * m_CodecCtx->channels * av_get_bytes_per_sample(AV_SAMPLE_FMT_S32);
                break;

            case AV_SAMPLE_FMT_S32:

                memcpy(m_DecompressedDataBuffer + m_DecompressedDataSize, m_Decoded_Frame->data[0], data_size);

                m_DecompressedDataSize += data_size;

                break;

            case AV_SAMPLE_FMT_FLT:
            case AV_SAMPLE_FMT_DBL:

            case AV_SAMPLE_FMT_U8P:
            case AV_SAMPLE_FMT_S16P:
            case AV_SAMPLE_FMT_S32P:
            case AV_SAMPLE_FMT_FLTP:
            case AV_SAMPLE_FMT_DBLP:

            default:
                LOG_ERROR("Sample format is not supported");
            }

        }
    }


    char* writeLeftPortPtr  = m_SoundOut_Left_1_Port->getWriteBuffer();
//    char* writeRightPortPtr = m_SoundOut_Right_2_Port->getWriteBuffer();

    memset(writeLeftPortPtr,  0, Synthesizer::config::bytesPerBlock);
//    memset(writeRightPortPtr, 0, Synthesizer::config::bytesPerBlock);

    m_Trigger_1_Port->registerCallback(ICallbackPtr(new OnValueChange<int, ControlPortPtr>(m_DoPlayback, m_Trigger_1_Port)));
//   audio_resample_close(resmplCtx);
//   av_free(m_CodecCtx);
//   av_free(m_Decoded_Frame);
//  avformat_free_context(m_ContainerCtx);
}
예제 #9
0
/**
  gop: maximal interval in frames between keyframes
**/
QVideoEncoder::QVideoEncoder()
{
   initVars();
   initCodec();
}