Ejemplo n.º 1
0
FFmpegAudioDecoder<LIBAV_VER>::FFmpegAudioDecoder(FFmpegLibWrapper* aLib,
  FlushableTaskQueue* aTaskQueue, MediaDataDecoderCallback* aCallback,
  const AudioInfo& aConfig)
  : FFmpegDataDecoder(aLib, aTaskQueue, aCallback, GetCodecId(aConfig.mMimeType))
{
  MOZ_COUNT_CTOR(FFmpegAudioDecoder);
  // Use a new MediaByteBuffer as the object will be modified during initialization.
  mExtraData = new MediaByteBuffer;
  mExtraData->AppendElements(*aConfig.mCodecSpecificConfig);
}
Ejemplo n.º 2
0
FFmpegAudioDecoder<LIBAV_VER>::FFmpegAudioDecoder(FFmpegLibWrapper* aLib,
  TaskQueue* aTaskQueue, const AudioInfo& aConfig)
  : FFmpegDataDecoder(aLib, aTaskQueue, GetCodecId(aConfig.mMimeType))
{
  MOZ_COUNT_CTOR(FFmpegAudioDecoder);
  // Use a new MediaByteBuffer as the object will be modified during
  // initialization.
  if (aConfig.mCodecSpecificConfig && aConfig.mCodecSpecificConfig->Length()) {
    mExtraData = new MediaByteBuffer;
    mExtraData->AppendElements(*aConfig.mCodecSpecificConfig);
  }
}
Ejemplo n.º 3
0
FFmpegH264Decoder<LIBAV_VER>::FFmpegH264Decoder(
  FlushableTaskQueue* aTaskQueue, MediaDataDecoderCallback* aCallback,
  const VideoInfo& aConfig,
  ImageContainer* aImageContainer)
  : FFmpegDataDecoder(aTaskQueue, aCallback, GetCodecId(aConfig.mMimeType))
  , mImageContainer(aImageContainer)
  , mDisplay(aConfig.mDisplay)
  , mImage(aConfig.mImage)
{
  MOZ_COUNT_CTOR(FFmpegH264Decoder);
  // Use a new MediaByteBuffer as the object will be modified during initialization.
  mExtraData = new MediaByteBuffer;
  mExtraData->AppendElements(*aConfig.mExtraData);
}
Ejemplo n.º 4
0
FFmpegVideoDecoder<LIBAV_VER>::FFmpegVideoDecoder(FFmpegLibWrapper* aLib,
  FlushableTaskQueue* aTaskQueue, MediaDataDecoderCallback* aCallback,
  const VideoInfo& aConfig,
  ImageContainer* aImageContainer)
  : FFmpegDataDecoder(aLib, aTaskQueue, aCallback, GetCodecId(aConfig.mMimeType))
  , mImageContainer(aImageContainer)
  , mInfo(aConfig)
  , mCodecParser(nullptr)
{
  MOZ_COUNT_CTOR(FFmpegVideoDecoder);
  // Use a new MediaByteBuffer as the object will be modified during initialization.
  mExtraData = new MediaByteBuffer;
  mExtraData->AppendElements(*aConfig.mExtraData);
}
Ejemplo n.º 5
0
FFmpegVideoDecoder<LIBAV_VER>::FFmpegVideoDecoder(
  FFmpegLibWrapper* aLib, TaskQueue* aTaskQueue, const VideoInfo& aConfig,
  ImageContainer* aImageContainer, bool aLowLatency)
  : FFmpegDataDecoder(aLib, aTaskQueue, GetCodecId(aConfig.mMimeType))
  , mImageContainer(aImageContainer)
  , mInfo(aConfig)
  , mCodecParser(nullptr)
  , mLastInputDts(INT64_MIN)
  , mLowLatency(aLowLatency)
{
  MOZ_COUNT_CTOR(FFmpegVideoDecoder);
  // Use a new MediaByteBuffer as the object will be modified during
  // initialization.
  mExtraData = new MediaByteBuffer;
  mExtraData->AppendElements(*aConfig.mExtraData);
}
	// initialization taken from plugin h263-1998 in opal-2.8.0
	bool VideoEncoderFfmpeg::InitEncoder(int bitrate, int fps, int width, int height,
			int fragsize, int *fragcount, const char *enc_params)
	{
		if (!VideoEncoder::InitEncoder(bitrate, fps, width, height, fragsize, fragcount, enc_params))
			goto InitEncoder_ErrInitParent;

		avcodec_init();
		avcodec_register_all();

		// codec
		codec_ = avcodec_find_encoder(GetCodecId());
		if (NULL == codec_)
			goto InitEncoder_ErrFindEncoder;

		// frame
		input_buffer_size_ = width * height * 3 / 2;
		input_buffer_ = static_cast<unsigned char *>(
			_aligned_malloc(input_buffer_size_, kMemAlign));
		frame_ = avcodec_alloc_frame();
		if (NULL == frame_)
			goto InitEncoder_ErrAllocFrame;
		frame_->data[0] = input_buffer_;
		frame_->data[1] = frame_->data[0] + width * height;
		frame_->data[2] = frame_->data[1] + width * height / 4;
		frame_->linesize[0] = width;
		frame_->linesize[1] =
		frame_->linesize[2] = width / 2;

		// context
		context_ = avcodec_alloc_context3(codec_);
		if (NULL == context_)
			goto InitEncoder_ErrAllocContext;
		context_->pix_fmt = PIX_FMT_YUV420P;
		context_->width = width;
		context_->height = height;
		context_->time_base.num = 1;
		context_->time_base.den = fps;
		context_->gop_size = param_video()->GetGopSize();

		context_->flags = CODEC_FLAG_INPUT_PRESERVED
			| CODEC_FLAG_EMU_EDGE
			| CODEC_FLAG_PASS1
			| GetFlags();
		context_->mb_decision = FF_MB_DECISION_SIMPLE;
		context_->me_method = ME_EPZS;
		context_->max_b_frames = 0;
		
		// target bitrate
		context_->bit_rate = bitrate * 3 / 4;
		context_->bit_rate_tolerance = bitrate / 2;
		context_->rc_min_rate = 0;
		context_->rc_max_rate = bitrate;
		context_->rc_buffer_size = bitrate / 1000;

		/* ratecontrol qmin qmax limiting method
		 0-> clipping, 1-> use a nice continous function to limit qscale wthin qmin/qmax.
		*/  
		context_->rc_qsquish = 0;            // limit q by clipping 
		context_->rc_eq = (char*) "1";       // rate control equation
		context_->rc_buffer_size = bitrate * 64;

		// temporal spatial trade off
		context_->max_qdiff = 10;  // was 3      // max q difference between frames
		context_->qcompress = 0.5;               // qscale factor between easy & hard scenes (0.0-1.0)
		context_->i_quant_factor = (float)-0.6;  // qscale factor between p and i frames
		context_->i_quant_offset = (float)0.0;   // qscale offset between p and i frames
		context_->me_subpel_quality = 8;

		context_->qmin = MIN_QUANT;
		context_->qmax = static_cast<int>(round ( (31.0 - MIN_QUANT) / 31.0 * GetTsto() + MIN_QUANT));
		context_->qmax = min(context_->qmax, 31);

		// TODO: vedere come mapparli in ffmpeg 0.10.3
		//context_->mb_qmin = context_->qmin;
		//context_->mb_qmax = context_->qmax;

		// Lagrange multipliers - this is how the context defaults do it:
		context_->lmin = context_->qmin * FF_QP2LAMBDA;
		context_->lmax = context_->qmax * FF_QP2LAMBDA; 

		context_->debug = FF_DEBUG_RC | FF_DEBUG_PICT_INFO | FF_DEBUG_MV;

		// frammentazione
		if (fragsize > 0) {
			context_->opaque = GetOpaque();
			context_->rtp_payload_size = 1; // if I leave 0, ffmpeg doesn't split at gobs, if I use a large value
				// ffmpeg aggregates gobs without inserting gob headers
		}

		if (0 != avcodec_open2(context_, codec_, &opts_)) {
			goto InitEncoder_ErrOpenCodec;
		}
		return true;

InitEncoder_ErrOpenCodec:
		avcodec_close(context_);
		av_free(context_);
		context_ = NULL;
InitEncoder_ErrAllocContext:
		av_free(frame_);	
		frame_ = NULL;
InitEncoder_ErrAllocFrame:
		codec_ = NULL;
		_aligned_free(input_buffer_);
		input_buffer_ = NULL;
		input_buffer_size_ = 0;
InitEncoder_ErrFindEncoder:
		VideoEncoder::DestroyEncoder();
InitEncoder_ErrInitParent:
		return false;
	}