Esempio n. 1
0
static tbool tsmf_ffmpeg_init_context(ITSMFDecoder* decoder)
{
	TSMFFFmpegDecoder* mdecoder = (TSMFFFmpegDecoder*) decoder;

	mdecoder->codec_context = avcodec_alloc_context();
	if (!mdecoder->codec_context)
	{
		DEBUG_WARN("avcodec_alloc_context failed.");
		return false;
	}

	return true;
}
Esempio n. 2
0
void
fa_imageloader_init(void)
{
  hts_mutex_init(&image_from_video_mutex);

  AVCodec *c = avcodec_find_encoder(CODEC_ID_PNG);
  if(c != NULL) {
    AVCodecContext *ctx = avcodec_alloc_context();
    if(avcodec_open(ctx, c))
      return;
    pngencoder = ctx;
  }
}
Esempio n. 3
0
		bool Init(VirtualSink* sink, AVCodec* avCodec, const char* sprops){
			//if it has been initialized before, we should do cleanup first
			Cleanup();

			avCodecContext = avcodec_alloc_context();
			if (!avCodecContext) {
				//failed to allocate codec context
				Cleanup();
				return false;
			}
			uint8_t startCode[] = {0x00, 0x00, 0x01};
			if(sprops != NULL){
				unsigned spropCount;
				SPropRecord* spropRecords = parseSPropParameterSets(sprops, spropCount);
				try{
					for (unsigned i = 0; i < spropCount; ++i) {
						AddExtraData(startCode, sizeof(startCode));
						AddExtraData(spropRecords[i].sPropBytes, spropRecords[i].sPropLength);
					}
				}catch(void*){
					//extradata exceeds size limit
					delete[] spropRecords;
					Cleanup();
					return false;
				}
				delete[] spropRecords;
					
				avCodecContext->extradata = extraDataBuffer;
				avCodecContext->extradata_size = extraDataSize;
			}
			AddExtraData(startCode, sizeof(startCode));
			avCodecContext->flags = 0;

			if (avcodec_open(avCodecContext, avCodec) < 0) {
				//failed to open codec
				Cleanup();
				return false;
			}
			if (avCodecContext->codec_id == CODEC_ID_H264){
				avCodecContext->flags2 |= CODEC_FLAG2_CHUNKS;
				//avCodecContext->flags2 |= CODEC_FLAG2_SHOW_ALL;
			}
			avFrame = avcodec_alloc_frame();
			if (!avFrame){
				//failed to allocate frame
				Cleanup();
				return false;
			}
			return true;
		}
Esempio n. 4
0
static int
tsmf_ffmpeg_init_context(ITSMFDecoder * decoder)
{
	TSMFFFmpegDecoder * mdecoder = (TSMFFFmpegDecoder *) decoder;

	mdecoder->codec_context = avcodec_alloc_context();
	if (!mdecoder->codec_context)
	{
		LLOGLN(0, ("tsmf_ffmpeg_init_context: avcodec_alloc_context failed."));
		return 1;
	}

	return 0;
}
/** It initializates the FFmpeg framework, and opens an FFmpeg videoencoder of type specified by IL client 
  */ 
OMX_ERRORTYPE omx_videoenc_component_ffmpegLibInit(omx_videoenc_component_PrivateType* omx_videoenc_component_Private) {

  omx_base_video_PortType *inPort = (omx_base_video_PortType *)omx_videoenc_component_Private->ports[OMX_BASE_FILTER_INPUTPORT_INDEX];
  OMX_U32 target_coencID;  
  avcodec_init();
  av_register_all();

  DEBUG(DEB_LEV_SIMPLE_SEQ, "FFmpeg library/encoder initialized\n");

  switch(omx_videoenc_component_Private->video_encoding_type) {
    case OMX_VIDEO_CodingMPEG4 :
      target_coencID = CODEC_ID_MPEG4;
      break;
    default :
      DEBUG(DEB_LEV_ERR, "\n encoders other than MPEG-4 are not supported -- encoder not found\n");
      return OMX_ErrorComponentNotFound;
  }

  /** Find the  encoder corresponding to the video type specified by IL client*/
  omx_videoenc_component_Private->avCodec = avcodec_find_encoder(target_coencID);
  if (omx_videoenc_component_Private->avCodec == NULL) {
    DEBUG(DEB_LEV_ERR, "Encoder Not found\n");
    return OMX_ErrorInsufficientResources;
  }

  omx_videoenc_component_Private->avCodecContext = avcodec_alloc_context();
  omx_videoenc_component_Private->picture = avcodec_alloc_frame ();

  /* put sample parameters */
  omx_videoenc_component_Private->avCodecContext->bit_rate = 400000; /* bit per second */
  omx_videoenc_component_Private->avCodecContext->width  = inPort->sPortParam.format.video.nFrameWidth;  
  omx_videoenc_component_Private->avCodecContext->height = inPort->sPortParam.format.video.nFrameHeight;

  /* frames per second */
  DEBUG(DEB_LEV_SIMPLE_SEQ, "Frame Rate=%d\n",(int)inPort->sPortParam.format.video.xFramerate);
  omx_videoenc_component_Private->avCodecContext->time_base= (AVRational){1,inPort->sPortParam.format.video.xFramerate};
  omx_videoenc_component_Private->avCodecContext->gop_size = 5; /* emit one intra frame every ten frames */
  omx_videoenc_component_Private->avCodecContext->max_b_frames=1;
  omx_videoenc_component_Private->avCodecContext->pix_fmt = PIX_FMT_YUV420P;

  if (avcodec_open(omx_videoenc_component_Private->avCodecContext, omx_videoenc_component_Private->avCodec) < 0) {
    DEBUG(DEB_LEV_ERR, "Could not open encoder\n");
    return OMX_ErrorInsufficientResources;
  }
  tsem_up(omx_videoenc_component_Private->avCodecSyncSem);
  DEBUG(DEB_LEV_SIMPLE_SEQ, "done\n");

  return OMX_ErrorNone;
}
Esempio n. 6
0
int tdav_codec_mp4ves_open_decoder(tdav_codec_mp4ves_t* self)
{
	int ret, size;

	if(!self->decoder.codec  && !(self->decoder.codec = avcodec_find_decoder(CODEC_ID_MPEG4))){
		TSK_DEBUG_ERROR("Failed to find MP4V-ES decoder");
		return -1;
	}

	if(self->decoder.context){
		TSK_DEBUG_ERROR("Decoder already opened");
		return -1;
	}

	self->decoder.context = avcodec_alloc_context();
	avcodec_get_context_defaults(self->decoder.context);
	
	self->decoder.context->pix_fmt = PIX_FMT_YUV420P;
	self->decoder.context->width = TMEDIA_CODEC_VIDEO(self)->out.width;
	self->decoder.context->height = TMEDIA_CODEC_VIDEO(self)->out.height;

	// Picture (YUV 420)
	if(!(self->decoder.picture = avcodec_alloc_frame())){
		TSK_DEBUG_ERROR("Failed to create decoder picture");
		return -2;
	}
	avcodec_get_frame_defaults(self->decoder.picture);

	size = avpicture_get_size(PIX_FMT_YUV420P, self->decoder.context->width, self->decoder.context->height);
	if(!(self->decoder.accumulator = tsk_calloc((size + FF_INPUT_BUFFER_PADDING_SIZE), sizeof(uint8_t)))){
		TSK_DEBUG_ERROR("Failed to allocate decoder buffer");
		return -2;
	}

	if(!(self->decoder.accumulator = tsk_calloc((size + FF_INPUT_BUFFER_PADDING_SIZE), sizeof(uint8_t)))){
		TSK_DEBUG_ERROR("Failed to allocate decoder buffer");
		return -2;
	}

	// Open decoder
	if((ret = avcodec_open(self->decoder.context, self->decoder.codec)) < 0){
		TSK_DEBUG_ERROR("Failed to open MP4V-ES decoder");
		return ret;
	}
    
    self->decoder.last_seq = 0;

	return ret;
}
Esempio n. 7
0
static void
gst_ffmpegenc_init (GstFFMpegEnc * ffmpegenc)
{
  GstFFMpegEncClass *oclass =
      (GstFFMpegEncClass *) (G_OBJECT_GET_CLASS (ffmpegenc));

  /* setup pads */
  ffmpegenc->sinkpad = gst_pad_new_from_template (oclass->sinktempl, "sink");
  gst_pad_set_setcaps_function (ffmpegenc->sinkpad, gst_ffmpegenc_setcaps);
  gst_pad_set_getcaps_function (ffmpegenc->sinkpad, gst_ffmpegenc_getcaps);
  ffmpegenc->srcpad = gst_pad_new_from_template (oclass->srctempl, "src");
  gst_pad_use_fixed_caps (ffmpegenc->srcpad);

  /* ffmpeg objects */
  ffmpegenc->context = avcodec_alloc_context ();
  ffmpegenc->picture = avcodec_alloc_frame ();
  ffmpegenc->opened = FALSE;

  ffmpegenc->file = NULL;
  ffmpegenc->delay = g_queue_new ();

  if (oclass->in_plugin->type == CODEC_TYPE_VIDEO) {
    gst_pad_set_chain_function (ffmpegenc->sinkpad, gst_ffmpegenc_chain_video);
    /* so we know when to flush the buffers on EOS */
    gst_pad_set_event_function (ffmpegenc->sinkpad, gst_ffmpegenc_event_video);
    gst_pad_set_event_function (ffmpegenc->srcpad, gst_ffmpegenc_event_src);

    ffmpegenc->bitrate = DEFAULT_VIDEO_BITRATE;
    ffmpegenc->me_method = ME_EPZS;
    ffmpegenc->buffer_size = 512 * 1024;
    ffmpegenc->gop_size = DEFAULT_VIDEO_GOP_SIZE;
    ffmpegenc->rtp_payload_size = 0;

    ffmpegenc->lmin = 2;
    ffmpegenc->lmax = 31;
    ffmpegenc->max_key_interval = 0;

    gst_ffmpeg_cfg_set_defaults (ffmpegenc);
  } else if (oclass->in_plugin->type == CODEC_TYPE_AUDIO) {
    gst_pad_set_chain_function (ffmpegenc->sinkpad, gst_ffmpegenc_chain_audio);

    ffmpegenc->bitrate = DEFAULT_AUDIO_BITRATE;
  }

  gst_element_add_pad (GST_ELEMENT (ffmpegenc), ffmpegenc->sinkpad);
  gst_element_add_pad (GST_ELEMENT (ffmpegenc), ffmpegenc->srcpad);

  ffmpegenc->adapter = gst_adapter_new ();
}
Esempio n. 8
0
ShowObject::ShowObject(const DisplayPara myDispara){
//    hwnd = myDispara.my_hwnd;
    ww = myDispara.my_width;
    hh = myDispara.my_height;

    pFrame=avcodec_alloc_frame();//给视频帧分配空间以便存储解码后的图片



    mutex_avcodec.lock();

    bmp = SDL_CreateYUVOverlay(pCodecCtx->width, pCodecCtx->height, SDL_YV12_OVERLAY, screen);
    img_convert_ctx = sws_getContext(pCodecCtx->width,pCodecCtx->height,pCodecCtx->pix_fmt,pCodecCtx->width,pCodecCtx->height, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
    if (img_convert_ctx == NULL){
        cout<<"img_convert_ctx == NULL\n";
        exit(-1);
    }


    pCodecCtxT=avcodec_alloc_context();
    pCodecCtxT->width = pCodecCtx->width;
    pCodecCtxT->height = pCodecCtx->height;
    pCodecCtxT->time_base.num = pCodecCtx->time_base.num;
    pCodecCtxT->time_base.den = pCodecCtx->time_base.den;
    pCodecCtxT->codec_type = pCodecCtx->codec_type;
    pCodecCtxT->bit_rate = pCodecCtx->bit_rate;
    pCodecCtxT->frame_number = pCodecCtx->frame_number;
    pCodecCtxT->coded_frame = pCodecCtx->coded_frame;
    pCodecCtxT->extradata = pCodecCtx->extradata;
    pCodecCtxT->extradata_size = pCodecCtx->extradata_size;

    if(avcodec_open(pCodecCtxT, pCodec)<0){
        cout<<"avcodec_open failure\n";
        exit(-1);
    }

    mutex_avcodec.unlock();

    fileNum = 0;
    pFrameNoNext = 1;
    pFrameNoNow = -1;

    yuv_file = NULL;

    connect(&timer,SIGNAL(timeout()),this,SLOT(deal_timeout()));

//    qDebug()<<"ShwoObject-ID = "<<QThread::currentThreadId();

}
Esempio n. 9
0
static int preinit(const char *arg)
{
    z_compression = 0;
    use_alpha = 0;
    if (subopt_parse(arg, subopts) != 0) {
        return -1;
    }
    avctx = avcodec_alloc_context();
    if (avcodec_open(avctx, avcodec_find_encoder(CODEC_ID_PNG)) < 0) {
        uninit();
        return -1;
    }
    avctx->compression_level = z_compression;
    return 0;
}
Esempio n. 10
0
/*
 * Class:     h264_com_VView
 * Method:    InitDecoder
 * Signature: ()I
 */
jint Java_h264_com_VView_InitDecoder(JNIEnv* env, jobject thiz, jint width, jint height)
{
	iWidth = width;
	iHeight = height;

	CreateYUVTab_16();
	
	c = avcodec_alloc_context(); 

	avcodec_open(c); 

	picture  = avcodec_alloc_frame();//picture= malloc(sizeof(AVFrame));
		
	return 1;
}
bool OStreamVideoDecoder::decoder_initialize ()
{
	// register all codecs
	av_register_all();

	switch ( _decoding_type ) {
		case OSTREAM_DECODING_MJPEG:
			_codec_decode = avcodec_find_decoder( CODEC_ID_MJPEG );
			break;

		case OSTREAM_DECODING_H263P:
			// decoder for h263+ is identified with CODEC_ID_H263
			_codec_decode = avcodec_find_decoder( CODEC_ID_H263 );
			break;

		default:
			//			DEBUG_PRINT ("could not find known OSTREAM_DECODING type.\n" );
			break;
	}

	if ( !_codec_decode ) {
		//    	DEBUG_PRINT( "codec for decoding not found\n" );
		return false;
	}

	_codec_context = avcodec_alloc_context();
	
	/*_codec_context->qmin =2;
	_codec_context->bit_rate = (float)500000*0.7;  
	_codec_context->bit_rate_tolerance=(float)_codec_context->bit_rate/(14);
	_codec_context->time_base.den = 15; 
	_codec_context->time_base.num =1; 
	_codec_context->gop_size = 15*5; */

	_codec_context->width=176;
	_codec_context->height=144;
	_decoded_frame = avcodec_alloc_frame();


	// open it
	if ( avcodec_open( _codec_context, _codec_decode ) < 0 ) {
		//        DEBUG_PRINT( "could not open codec\n" );
		return false;
	}


	return true;
}
Esempio n. 12
0
//////////////////////////////////////////////////////////////////////
// Init - initialized or reiniyialized encoder SDK with given input 
// and output settings
//
// NOTE: these should all be replaced with calls to the API functions
// lame_set_*().  Applications should not directly access the 'pgf'
// data structure. 
//
//////////////////////////////////////////////////////////////////////
HRESULT CEncoder2::Init()
{
	CAutoLock l(this);

  	/* Initialize avcodec lib */
	avcodec_init();
	
	/* Our "all" is only the mpeg 1 layer 2 audio anyway... */
	avcodec_register_all();

	codec = avcodec_find_encoder(CODEC_ID_MP2);
	if (!codec) 
	{
//		fprintf(logfile,"Couldn't find codec\n");
//	    fclose(logfile);
		return E_OUTOFMEMORY;
    }

//	fprintf(logfile,"Allocating context\n");
	c= avcodec_alloc_context();

	/* put sample parameters */
	if (m_mabsi.dwBitrate > 384)
	{
		m_mabsi.dwBitrate = 128;
	}
	c->bit_rate = m_mabsi.dwBitrate * 1000;
	c->sample_rate = m_wfex.nSamplesPerSec;
	c->channels = m_wfex.nChannels;
	DbgLog((LOG_TRACE, 1, TEXT("Using bitrate=%d sampling=%d"), (LONG)c->bit_rate, (LONG)c->sample_rate));

	/* open it */
	if (avcodec_open(c, codec) < 0) 
	{
//		fprintf(logfile,"Could not open codec\n");
//	    fclose(logfile);
        return E_OUTOFMEMORY;
	}

	/* the codec gives us the frame size, in samples */
	frame_size = c->frame_size;
	DbgLog((LOG_TRACE, 1, TEXT("FrameSize=%d\r\n"), (LONG)frame_size));
	samples = (UINT8 *) malloc(frame_size * 2 * c->channels);
	outbuf_size = 10000;
	outbuf = (UINT8 *) malloc(outbuf_size);
	filled=0;
	return S_OK;
}
Esempio n. 13
0
bool VideoIO::openOutputCodec(int _width, int _height)
{
	pOutputFile = fopen(outputFilename, "wb");
	if(pOutputFile == NULL)
	{
		fprintf(stderr, "could not open %s\n", outputFilename);
		exit(EXIT_FAILURE);
	}

	pOutputCodec = avcodec_find_encoder(CODEC_ID_MPEG1VIDEO);

	if(pOutputCodec == NULL)
	{
		fprintf(stderr, "could not found writing codec\n");
		exit(EXIT_FAILURE);
	}

	pOutputCodecCtx = avcodec_alloc_context();

	//configure variable
	//put sample parameters
	pOutputCodecCtx->bit_rate = 400000;
	//resulution must be a multiple of two
	pOutputCodecCtx->width = _width;
	pOutputCodecCtx->height = _height;
	//frame per second
	pOutputCodecCtx->time_base = (AVRational){1,25};
	pOutputCodecCtx->gop_size = 10;	//emit one intra frame every ten frames
	//	pOutputCodecCtx->time_base = pInputCodecCtx->time_base;
	//	pOutputCodecCtx->gop_size = pInputCodecCtx->gop_size;
	pOutputCodecCtx->max_b_frames = 1;
	pOutputCodecCtx->pix_fmt = PIX_FMT_YUV420P;

	//open it
	if(avcodec_open(pOutputCodecCtx, pOutputCodec) < 0)
	{
		fprintf(stderr, "could not open codec\n");
		exit(EXIT_FAILURE);
	}

	//prepare out buffer
	outSize = 0;
	outBufferSize = (_width) * (_height);
	pOutBuffer = (uint8_t *) malloc (sizeof(uint8_t) * outBufferSize);

	return true;
}
//________________________________________________
//   Init lame encoder
//_______________________________________________
uint8_t AUDMEncoder_Lavcodec::initialize(void)
{
  int ret;
  _context=( void *)avcodec_alloc_context();
  _wavheader->byterate=(lavConfig.bitrate*1000)>>3;

#ifdef ADM_LAV_MP2      
  if( _incoming->getInfo()->channels>2)
  {
    printf("[Lavcodec]Too many channels\n");
    return 0; 
  }
#endif
  _wavheader->byterate=(lavConfig.bitrate*1000)>>3;         
      
#ifdef ADM_LAV_MP2 
    _chunk = 1152*_wavheader->channels;
#else
    _chunk = 1536*_wavheader->channels; // AC3
#endif
  printf("[Lavcodec]Incoming : fq : %lu, channel : %lu bitrate: %lu \n",
         _wavheader->frequency,_wavheader->channels,lavConfig.bitrate);
  
  
  CONTEXT->channels     =  _wavheader->channels;
  CONTEXT->sample_rate  =  _wavheader->frequency;
  CONTEXT->bit_rate     = (lavConfig.bitrate*1000); // bits -> kbits

  AVCodec *codec;
  CodecID codecID;

  
  codecID=makeName(CODEC_ID);
  codec = avcodec_find_encoder(codecID);
  ADM_assert(codec);
  
  ret = avcodec_open(CONTEXT, codec);
  if (0> ret) 
  {
    printf("[Lavcodec] init failed err : %d!\n",ret);
    return 0;
  }


  printf("[Lavcodec]Lavcodec successfully initialized,wavTag : 0x%x\n",makeName(WAV));
  return 1;       
}
Esempio n. 15
0
long FFmpegVideo::Save2jpeg (uint8_t *buffer, int width, int height, char *fileName)
{ 
		AVFrame *pFrame = 0 ;

		pFrame = avcodec_alloc_frame();
		if(pFrame==0)
			return -1;

		avpicture_fill((AVPicture *)pFrame, buffer, PIX_FMT_BGR24, width,  height);//
		pFrame->width = width;
		pFrame->height = height;
		pFrame->format = PIX_FMT_BGR24;

		AVCodec  *pCodec = 0;
		//  寻找视频流的解码器
		pCodec = avcodec_find_encoder ( CODEC_ID_MJPEG ); 

		AVCodecContext *pCodeContext = 0 ;
		// 得到视频流编码上下文的指针
		pCodeContext = avcodec_alloc_context();
		if(pCodeContext==NULL)
			return -1;

		if(pCodecCtx!=0){
			avcodec_copy_context(pCodeContext, pCodecCtx);
		}
		
		pCodeContext->pix_fmt       = PIX_FMT_BGR24; 
		pCodeContext->codec_id      = CODEC_ID_MJPEG; 
		pCodeContext->codec_type    = AVMEDIA_TYPE_VIDEO;//CODEC_TYPE_VIDEO; 

		pCodeContext->width			= width;
		pCodeContext->height		= height;

		// 打开解码器
		if(avcodec_open(pCodeContext, pCodec)<0){
		//	return -1;//handle_error(); // 打不开解码器
		}

		int BufSizActual =  SaveFrame2jpeg( pCodeContext, pFrame, fileName);

		av_free(pFrame);
		// Close the codec
		avcodec_close(pCodecCtx);

		return BufSizActual;
} 
Esempio n. 16
0
static gboolean
gst_ffmpegscale_set_caps (GstBaseTransform * trans, GstCaps * incaps,
    GstCaps * outcaps)
{
  GstFFMpegScale *scale = GST_FFMPEGSCALE (trans);
  GstStructure *instructure = gst_caps_get_structure (incaps, 0);
  GstStructure *outstructure = gst_caps_get_structure (outcaps, 0);
  gint par_num, par_den;
  AVCodecContext *ctx;

  if (!gst_structure_get_int (instructure, "width", &scale->in_width))
    return FALSE;
  if (!gst_structure_get_int (instructure, "height", &scale->in_height))
    return FALSE;

  if (!gst_structure_get_int (outstructure, "width", &scale->out_width))
    return FALSE;
  if (!gst_structure_get_int (outstructure, "height", &scale->out_height))
    return FALSE;

  if (gst_structure_get_fraction (instructure, "pixel-aspect-ratio",
          &par_num, &par_den)) {
    gst_structure_set (outstructure,
        "pixel-aspect-ratio", GST_TYPE_FRACTION,
        par_num * scale->in_width / scale->out_width,
        par_den * scale->in_height / scale->out_height, NULL);
  }

  ctx = avcodec_alloc_context ();
  ctx->width = scale->in_width;
  ctx->height = scale->in_height;
  ctx->pix_fmt = PIX_FMT_NB;
  gst_ffmpeg_caps_with_codectype (CODEC_TYPE_VIDEO, incaps, ctx);
  if (ctx->pix_fmt == PIX_FMT_NB) {
    av_free (ctx);
    return FALSE;
  }

  scale->pixfmt = ctx->pix_fmt;

  av_free (ctx);

  scale->res = img_resample_init (scale->out_width, scale->out_height,
      scale->in_width, scale->in_height);

  return TRUE;
}
int main(int argc, char **argv)
{
	AVCodecContext *ctx;
	int c;
	DSPContext cctx, mmxctx;
	int flags[2] = { AV_CPU_FLAG_MMX, AV_CPU_FLAG_MMX2 };
	int flags_size = HAVE_MMX2 ? 2 : 1;

	for(;;)
	{
		c = getopt(argc, argv, "h");
		if (c == -1)
			break;
		switch(c)
		{
		case 'h':
			help();
			break;
		}
	}

	printf("ffmpeg motion test\n");

	ctx = avcodec_alloc_context();
	ctx->dsp_mask = AV_CPU_FLAG_FORCE;
	dsputil_init(&cctx, ctx);
	for (c = 0; c < flags_size; c++)
	{
		int x;
		ctx->dsp_mask = AV_CPU_FLAG_FORCE | flags[c];
		dsputil_init(&mmxctx, ctx);

		for (x = 0; x < 2; x++)
		{
			printf("%s for %dx%d pixels\n", c ? "mmx2" : "mmx",
			       x ? 8 : 16, x ? 8 : 16);
			test_motion("mmx",     mmxctx.pix_abs[x][0], cctx.pix_abs[x][0]);
			test_motion("mmx_x2",  mmxctx.pix_abs[x][1], cctx.pix_abs[x][1]);
			test_motion("mmx_y2",  mmxctx.pix_abs[x][2], cctx.pix_abs[x][2]);
			test_motion("mmx_xy2", mmxctx.pix_abs[x][3], cctx.pix_abs[x][3]);
		}
	}
	av_free(ctx);

	return 0;
}
Esempio n. 18
0
FFMpegVideoDecoder::FFMpegVideoDecoder(char* codecName)
{
	codec =avcodec_find_decoder_by_name(codecName);
	if (!codec)
	{
		fprintf(stderr,"failed to find decoder: %s\n",codecName);
	}

	ctx = avcodec_alloc_context();
    decodedFrame = avcodec_alloc_frame();
	avcodec_open(ctx,codec);

	AVPacket avpkt;

    av_init_packet(&avpkt);

}
Esempio n. 19
0
static int lavc_open(const char *name, AVCodecContext *params,
                     struct frame_format *ff)
{
    int x_off, y_off;
    int edge_width;
    AVCodec *codec;
    int err;

    codec = avcodec_find_decoder(params->codec_id);
    if (!codec) {
        fprintf(stderr, "Can't find codec %x\n", params->codec_id);
        return -1;
    }

    avc = avcodec_alloc_context();

    avc->width          = params->width;
    avc->height         = params->height;
    avc->time_base      = params->time_base;
    avc->extradata      = params->extradata;
    avc->extradata_size = params->extradata_size;

    avc->get_buffer     = get_buffer;
    avc->release_buffer = release_buffer;
    avc->reget_buffer   = reget_buffer;

    err = avcodec_open(avc, codec);
    if (err) {
        fprintf(stderr, "avcodec_open: %d\n", err);
        return err;
    }

    edge_width = avcodec_get_edge_width();
    x_off      = ALIGN(edge_width, 32);
    y_off      = edge_width;

    ff->width  = ALIGN(params->width  + 2 * x_off, 32);
    ff->height = ALIGN(params->height + 2 * y_off, 32);
    ff->disp_x = x_off;
    ff->disp_y = y_off;
    ff->disp_w = params->width;
    ff->disp_h = params->height;
    ff->pixfmt = params->pix_fmt;

    return 0;
}
Esempio n. 20
0
bool Java_se_forskningsavd_automatonbrain_Decoder_init(JNIEnv* env, jobject thiz) {
    avcodec_init();
    avcodec_register_all();
    pCodecCtx = avcodec_alloc_context();
    pCodec = avcodec_find_decoder( CODEC_ID_H264 );
    av_init_packet( &avpkt );
    if( !pCodec ) {
        return false;
        //printf( "RoboCortex [error]: Unable to initialize decoder\n" );
        //exit( EXIT_DECODER );
    }
    avcodec_open( pCodecCtx, pCodec );

    // Allocate decoder frame
    pFrame = avcodec_alloc_frame();
    return true;
}
//________________________________________________
//   Init lame encoder
//_______________________________________________
uint8_t AUDMEncoder_Lavcodec::init(ADM_audioEncoderDescriptor *config)
{
  int ret;
  _context=( void *)avcodec_alloc_context();
  _wavheader->byterate=(config->bitrate*1000)>>3;

      
  if(_fourcc==WAV_MP2 && _incoming->getInfo()->channels>2)
  {
    printf("[Lavcodec]Too many channels\n");
    return 0; 
  }
  _wavheader->byterate=(config->bitrate*1000)>>3;         
      
  if(_fourcc==WAV_MP2)
    _chunk = 1152*_wavheader->channels;
  else
    _chunk = 1536*_wavheader->channels; // AC3

  printf("[Lavcodec]Incoming : fq : %lu, channel : %lu bitrate: %lu \n",
         _wavheader->frequency,_wavheader->channels,config->bitrate);
  
  
  CONTEXT->channels     =  _wavheader->channels;
  CONTEXT->sample_rate  =  _wavheader->frequency;
  CONTEXT->bit_rate     = (config->bitrate*1000); // bits -> kbits

  AVCodec *codec;
  CodecID codecID;
  
  if(_fourcc==WAV_MP2) codecID=CODEC_ID_MP2;
        else codecID=CODEC_ID_AC3;
  codec = avcodec_find_encoder(codecID);
  ADM_assert(codec);
  
  ret = avcodec_open(CONTEXT, codec);
  if (0> ret) 
  {
    printf("[Lavcodec] init failed err : %d!\n",ret);
    return 0;
  }


  printf("[Lavcodec]Lavcodec successfully initialized\n");
  return 1;       
}
Esempio n. 22
0
static bool ffemu_init_audio(struct ff_audio_info *audio, struct ffemu_params *param)
{
   AVCodec *codec = avcodec_find_encoder_by_name("flac");
   if (!codec)
      return false;

   audio->encoder = codec;

   // FFmpeg just loves to deprecate stuff :)
#ifdef HAVE_FFMPEG_ALLOC_CONTEXT3
   audio->codec = avcodec_alloc_context3(codec);
#else
   audio->codec = avcodec_alloc_context();
   avcodec_get_context_defaults(audio->codec);
#endif

   audio->codec->sample_rate = (int)roundf(param->samplerate);
   audio->codec->time_base = av_d2q(1.0 / param->samplerate, 1000000);
   audio->codec->channels = param->channels;
   audio->codec->sample_fmt = AV_SAMPLE_FMT_S16;

#ifdef HAVE_FFMPEG_AVCODEC_OPEN2
   if (avcodec_open2(audio->codec, codec, NULL) != 0)
#else
   if (avcodec_open(audio->codec, codec) != 0)
#endif
   {
      return false;
   }

   audio->buffer = (int16_t*)av_malloc(
         audio->codec->frame_size *
         audio->codec->channels *
         sizeof(int16_t));

   if (!audio->buffer)
      return false;

   audio->outbuf_size = FF_MIN_BUFFER_SIZE;
   audio->outbuf = (uint8_t*)av_malloc(audio->outbuf_size);
   if (!audio->outbuf)
      return false;

   return true;
}
Esempio n. 23
0
static gboolean
gst_ffmpegdeinterlace_sink_setcaps (GstPad * pad, GstCaps * caps)
{
  GstFFMpegDeinterlace *deinterlace =
      GST_FFMPEGDEINTERLACE (gst_pad_get_parent (pad));
  GstStructure *structure = gst_caps_get_structure (caps, 0);
  AVCodecContext *ctx;
  GstCaps *src_caps;
  gboolean ret;

  if (!gst_structure_get_int (structure, "width", &deinterlace->width))
    return FALSE;
  if (!gst_structure_get_int (structure, "height", &deinterlace->height))
    return FALSE;

  deinterlace->interlaced = FALSE;
  gst_structure_get_boolean (structure, "interlaced", &deinterlace->interlaced);
  gst_ffmpegdeinterlace_update_passthrough (deinterlace);

  ctx = avcodec_alloc_context ();
  ctx->width = deinterlace->width;
  ctx->height = deinterlace->height;
  ctx->pix_fmt = PIX_FMT_NB;
  gst_ffmpeg_caps_with_codectype (AVMEDIA_TYPE_VIDEO, caps, ctx);
  if (ctx->pix_fmt == PIX_FMT_NB) {
    av_free (ctx);
    return FALSE;
  }

  deinterlace->pixfmt = ctx->pix_fmt;

  av_free (ctx);

  deinterlace->to_size =
      avpicture_get_size (deinterlace->pixfmt, deinterlace->width,
      deinterlace->height);

  src_caps = gst_caps_copy (caps);
  gst_caps_set_simple (src_caps, "interlaced", G_TYPE_BOOLEAN,
      deinterlace->interlaced, NULL);
  ret = gst_pad_set_caps (deinterlace->srcpad, src_caps);
  gst_caps_unref (src_caps);

  return ret;
}
static gboolean
gst_ffmpegdeinterlace_sink_setcaps (GstPad * pad, GstCaps * caps)
{
  GstFFMpegDeinterlace *deinterlace =
      GST_FFMPEGDEINTERLACE (gst_pad_get_parent (pad));
  GstStructure *structure = gst_caps_get_structure (caps, 0);
  AVCodecContext *ctx;
  GValue interlaced = { 0 };
  GstCaps *srcCaps;
  GstFlowReturn ret;

  if (!gst_structure_get_int (structure, "width", &deinterlace->width))
    return FALSE;
  if (!gst_structure_get_int (structure, "height", &deinterlace->height))
    return FALSE;

  ctx = avcodec_alloc_context ();
  ctx->width = deinterlace->width;
  ctx->height = deinterlace->height;
  ctx->pix_fmt = PIX_FMT_NB;
  gst_ffmpeg_caps_with_codectype (CODEC_TYPE_VIDEO, caps, ctx);
  if (ctx->pix_fmt == PIX_FMT_NB) {
    av_free (ctx);
    return FALSE;
  }

  deinterlace->pixfmt = ctx->pix_fmt;

  av_free (ctx);

  deinterlace->to_size =
      avpicture_get_size (deinterlace->pixfmt, deinterlace->width,
      deinterlace->height);

  srcCaps = gst_caps_copy (caps);
  g_value_init (&interlaced, G_TYPE_BOOLEAN);
  g_value_set_boolean (&interlaced, FALSE);
  gst_caps_set_value (srcCaps, "interlaced", &interlaced);
  g_value_unset (&interlaced);

  ret = gst_pad_set_caps (deinterlace->srcpad, srcCaps);
  gst_caps_unref (srcCaps);
  return ret;
}
Esempio n. 25
0
void CFFMPEGLoader::SaveFrame(int iFrame, const char *add) {
    if(pFrame->linesize[0]==0) return;
    FILE *pFile;
    char szFilename[128];
    int  y;


    UINT numBytes=avpicture_get_size(PIX_FMT_RGB24, pVCodecCon->width,
                                     pVCodecCon->height)+100;
    uint8_t *buffer2=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));

    AVCodec* bmpCodec = avcodec_find_encoder(CODEC_ID_BMP);

    AVCodecContext* bmpCodecContext = avcodec_alloc_context();
    avcodec_open(bmpCodecContext, bmpCodec);

    bmpCodecContext->height = pVCodecCon->height;
    bmpCodecContext->width = pVCodecCon->width;


    int encoded = bmpCodec->encode(bmpCodecContext, buffer2, numBytes,
                                   pFrame);
    avcodec_close(bmpCodecContext);

    // Open file
    sprintf(szFilename, "fr00000.bmp", add);
    UINT mul=10000,pos=2;
    while(mul>0) {
        szFilename[pos++]=iFrame/mul+'0';
        iFrame%=mul;
        mul/=10;
    }
    string s=add;
    s+=szFilename;
    pFile=fopen(s.c_str(), "wb");
    if(pFile==NULL)
        return;

    fwrite(buffer2, 1, encoded,pFile);

    // Close file
    fclose(pFile);
    av_free(buffer2);
}
AVCodecDecoder::AVCodecDecoder()
	:
	fHeader(),
	fInputFormat(),
	fOutputVideoFormat(),
	fFrame(0),
	fIsAudio(false),
	fCodec(NULL),
	fContext(avcodec_alloc_context()),
	fInputPicture(avcodec_alloc_frame()),
	fOutputPicture(avcodec_alloc_frame()),

	fCodecInitDone(false),

#if USE_SWS_FOR_COLOR_SPACE_CONVERSION
	fSwsContext(NULL),
#else
	fFormatConversionFunc(NULL),
#endif

	fExtraData(NULL),
	fExtraDataSize(0),
	fBlockAlign(0),

	fStartTime(0),
	fOutputFrameCount(0),
	fOutputFrameRate(1.0),
	fOutputFrameSize(0),

	fChunkBuffer(NULL),
	fChunkBufferOffset(0),
	fChunkBufferSize(0),
	fAudioDecodeError(false),

	fOutputBuffer(NULL),
	fOutputBufferOffset(0),
	fOutputBufferSize(0)
{
	TRACE("AVCodecDecoder::AVCodecDecoder()\n");

	fContext->error_recognition = FF_ER_CAREFUL;
	fContext->error_concealment = 3;
	avcodec_thread_init(fContext, 1);
}
Esempio n. 27
0
int FileAC3::open_file(int rd, int wr)
{
	int result = 0;

	if(rd)
	{
		if( !mpg_file )
			mpg_file = new FileMPEG(file->asset, file);
		result = mpg_file->open_file(1, 0);
	}

	if( !result && wr )
	{
  		avcodec_init();
		avcodec_register_all();
		codec = avcodec_find_encoder(CODEC_ID_AC3);
		if(!codec)
		{
			eprintf("FileAC3::open_file codec not found.\n");
			result = 1;
		}
		if( !result && !(fd = fopen(asset->path, "w")))
		{
			perror("FileAC3::open_file");
			result = 1;
		}
		if( !result ) {
			codec_context = avcodec_alloc_context();
			((AVCodecContext*)codec_context)->bit_rate = asset->ac3_bitrate * 1000;
			((AVCodecContext*)codec_context)->sample_rate = asset->sample_rate;
			((AVCodecContext*)codec_context)->channels = asset->channels;
			((AVCodecContext*)codec_context)->channel_layout =
				get_channel_layout(asset->channels);
			if(avcodec_open(((AVCodecContext*)codec_context), ((AVCodec*)codec)))
			{
				eprintf("FileAC3::open_file failed to open codec.\n");
				result = 1;
			}
		}
	}

	return 0;
}
Esempio n. 28
0
int audio_decode_init(AVCodecContext **c)
{
	AVCodec *codec;

	avcodec_init();
    avcodec_register_all();
    codec = avcodec_find_decoder(CODEC_ID_ADPCM_IMA_WAV);
    if (!codec) {
        printf( "codec not found\n");
        return 0;
    }
    *c= avcodec_alloc_context();
    if (avcodec_open(*c, codec) < 0) {
        printf( "could not open codec\n");
        return 0;
    }

	return 1;
}
AVCodecEncoder::AVCodecEncoder(uint32 codecID, int bitRateScale)
	:
	Encoder(),
	fBitRateScale(bitRateScale),
	fCodecID((enum CodecID)codecID),
	fCodec(NULL),
	fOwnContext(avcodec_alloc_context()),
	fContext(fOwnContext),
	fCodecInitStatus(CODEC_INIT_NEEDED),

	fFrame(avcodec_alloc_frame()),
	fSwsContext(NULL),

	fFramesWritten(0),

	fChunkBuffer(new(std::nothrow) uint8[kDefaultChunkBufferSize])
{
	TRACE("AVCodecEncoder::AVCodecEncoder()\n");

	if (fCodecID > 0) {
		fCodec = avcodec_find_encoder(fCodecID);
		TRACE("  found AVCodec for %u: %p\n", fCodecID, fCodec);
	}

	memset(&fInputFormat, 0, sizeof(media_format));

	fAudioFifo = av_fifo_alloc(0);

	fDstFrame.data[0] = NULL;
	fDstFrame.data[1] = NULL;
	fDstFrame.data[2] = NULL;
	fDstFrame.data[3] = NULL;

	fDstFrame.linesize[0] = 0;
	fDstFrame.linesize[1] = 0;
	fDstFrame.linesize[2] = 0;
	fDstFrame.linesize[3] = 0;

	// Initial parameters, so we know if the user changed them
	fEncodeParameters.avg_field_size = 0;
	fEncodeParameters.max_field_size = 0;
	fEncodeParameters.quality = 1.0f;
}
Esempio n. 30
0
static int config(struct vf_instance *vf,
                  int width, int height, int d_width, int d_height,
                  unsigned int flags, unsigned int outfmt) {
    int i;
    AVCodec *enc= avcodec_find_encoder(CODEC_ID_SNOW);

    for(i=0; i<3; i++) {
        int is_chroma= !!i;
        int w= ((width  + 4*BLOCK-1) & (~(2*BLOCK-1)))>>is_chroma;
        int h= ((height + 4*BLOCK-1) & (~(2*BLOCK-1)))>>is_chroma;

        vf->priv->temp_stride[i]= w;
        vf->priv->temp[i]= malloc(vf->priv->temp_stride[i]*h*sizeof(int16_t));
        vf->priv->src [i]= malloc(vf->priv->temp_stride[i]*h*sizeof(uint8_t));
    }
    for(i=0; i< (1<<vf->priv->log2_count); i++) {
        AVCodecContext *avctx_enc;

        avctx_enc=
            vf->priv->avctx_enc[i]= avcodec_alloc_context();
        avctx_enc->width = width + BLOCK;
        avctx_enc->height = height + BLOCK;
        avctx_enc->time_base= (AVRational) {
            1,25
        };  // meaningless
        avctx_enc->gop_size = 300;
        avctx_enc->max_b_frames= 0;
        avctx_enc->pix_fmt = PIX_FMT_YUV420P;
        avctx_enc->flags = CODEC_FLAG_QSCALE | CODEC_FLAG_LOW_DELAY;
        avctx_enc->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
        avctx_enc->global_quality= 123;
        avcodec_open(avctx_enc, enc);
        assert(avctx_enc->codec);
    }
    vf->priv->frame= avcodec_alloc_frame();
    vf->priv->frame_dec= avcodec_alloc_frame();

    vf->priv->outbuf_size= (width + BLOCK)*(height + BLOCK)*10;
    vf->priv->outbuf= malloc(vf->priv->outbuf_size);

    return vf_next_config(vf,width,height,d_width,d_height,flags,outfmt);
}