SubtitleStream::SubtitleStream(AVFormatContext*& formatCtx, AVStream*& stream, DataSource& dataSource, std::shared_ptr<Timer> timer, Delegate& delegate) :
 Stream(formatCtx, stream, dataSource, timer), m_delegate(delegate)
 {
     const AVCodecDescriptor* desc = av_codec_get_codec_descriptor(m_stream->codec);
     CHECK(desc != NULL, "Could not get the codec descriptor!");
     CHECK((desc->props & AV_CODEC_PROP_BITMAP_SUB) != 0,
           "Subtitle stream doesn't provide bitmap subtitles, this is not supported yet!"
           "\nSee https://github.com/Yalir/sfeMovie/issues/7");
 }
Exemplo n.º 2
0
/**
 * Update the next thread's AVCodecContext with values from the reference thread's context.
 *
 * @param dst The destination context.
 * @param src The source context.
 * @param for_user 0 if the destination is a codec thread, 1 if the destination is the user's thread
 * @return 0 on success, negative error code on failure
 */
static int update_context_from_thread(AVCodecContext *dst, AVCodecContext *src, int for_user)
{
    int err = 0;

    if (dst != src && (for_user || !(av_codec_get_codec_descriptor(src)->props & AV_CODEC_PROP_INTRA_ONLY))) {
        dst->time_base = src->time_base;
        dst->framerate = src->framerate;
        dst->width     = src->width;
        dst->height    = src->height;
        dst->pix_fmt   = src->pix_fmt;
        dst->sw_pix_fmt = src->sw_pix_fmt;

        dst->coded_width  = src->coded_width;
        dst->coded_height = src->coded_height;

        dst->has_b_frames = src->has_b_frames;
        dst->idct_algo    = src->idct_algo;

        dst->bits_per_coded_sample = src->bits_per_coded_sample;
        dst->sample_aspect_ratio   = src->sample_aspect_ratio;
#if FF_API_AFD
FF_DISABLE_DEPRECATION_WARNINGS
        dst->dtg_active_format     = src->dtg_active_format;
FF_ENABLE_DEPRECATION_WARNINGS
#endif /* FF_API_AFD */

        dst->profile = src->profile;
        dst->level   = src->level;

        dst->bits_per_raw_sample = src->bits_per_raw_sample;
        dst->ticks_per_frame     = src->ticks_per_frame;
        dst->color_primaries     = src->color_primaries;

        dst->color_trc   = src->color_trc;
        dst->colorspace  = src->colorspace;
        dst->color_range = src->color_range;
        dst->chroma_sample_location = src->chroma_sample_location;

        dst->hwaccel = src->hwaccel;
        dst->hwaccel_context = src->hwaccel_context;

        dst->channels       = src->channels;
        dst->sample_rate    = src->sample_rate;
        dst->sample_fmt     = src->sample_fmt;
        dst->channel_layout = src->channel_layout;
        dst->internal->hwaccel_priv_data = src->internal->hwaccel_priv_data;

        if (!!dst->hw_frames_ctx != !!src->hw_frames_ctx ||
            (dst->hw_frames_ctx && dst->hw_frames_ctx->data != src->hw_frames_ctx->data)) {
            av_buffer_unref(&dst->hw_frames_ctx);

            if (src->hw_frames_ctx) {
                dst->hw_frames_ctx = av_buffer_ref(src->hw_frames_ctx);
                if (!dst->hw_frames_ctx)
                    return AVERROR(ENOMEM);
            }
        }

        dst->hwaccel_flags = src->hwaccel_flags;
    }

    if (for_user) {
        dst->delay       = src->thread_count - 1;
#if FF_API_CODED_FRAME
FF_DISABLE_DEPRECATION_WARNINGS
        dst->coded_frame = src->coded_frame;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
    } else {
        if (dst->codec->update_thread_context)
void Utility::VideoLoader::loadP() {
	isRunning_=true;
	target_->setIsComplete(false);
	// Contains information about the stream
	AVFormatContext *formatContext = NULL;

	// Contains information about the codex
	AVCodecContext *codecContext = NULL;

	// The coder with wich to decode the video
	AVCodec *codec = NULL;

	// Open video file
	// avformat_open_input(context, path, format, options)
	// format = NULL means autodetect
	if(!path_.isEmpty()
	        && avformat_open_input(&formatContext, path_.toUtf8(), NULL, NULL)!=0) {
		target_->setIsComplete(true);
		return;
	}

	// Retrieve stream information
	if(avformat_find_stream_info(formatContext, NULL)<0) {
		target_->setIsComplete(true);
		return;
	}

	// Print stream information
	// av_dump_format(formatContext, 0, path_.toUtf8(), 0);


	// Find the best video stream in context
	int videoStreamIndex = av_find_best_stream(formatContext, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);
	if(videoStreamIndex == -1) {
		target_->setIsComplete(true);
		return;
	}

	// Get a pointer to the codec context for the video stream
	codecContext = formatContext->streams[videoStreamIndex]->codec;

	// Find the decoder for the video stream
	codec = avcodec_find_decoder(codecContext->codec_id);
	if(codec == NULL) {
		target_->setIsComplete(true);
		return;
	}

	// Open codec
	if(avcodec_open2(codecContext, codec, &dict_) < 0) {
		target_->setIsComplete(true);
		return;
	}

	struct SwsContext      *sws_ctx = NULL;

	averageBitrate_=codecContext->bit_rate;
	codec_=QString(av_codec_get_codec_descriptor(codecContext)->name);
	if(codec_=="")
		codec_="N/A";

	sws_ctx =
	    sws_getContext
	    (
	        codecContext->width,
	        codecContext->height,
	        codecContext->pix_fmt,
	        codecContext->width,
	        codecContext->height,
	        AV_PIX_FMT_RGB24,
	        0,
	        0,
	        0,
	        0
	    );

	AVPacket packet;
	AVFrame *frame = NULL;
	frame = av_frame_alloc();

	AVFrame* rgbframe=NULL;
	uint8_t* buffer = NULL;
	int numbytes=avpicture_get_size(AV_PIX_FMT_RGB24, codecContext->width,codecContext->height);

	target_->setFps(codecContext->framerate.num);
	av_init_packet(&packet);
	packet.data = NULL;
	packet.size = 0;
	int gotPicture = 0;
	while(av_read_frame(formatContext, &packet) >= 0&&isRunning_) {
		avcodec_decode_video2(codecContext, frame, &gotPicture, &packet);

		if(gotPicture != 0) {
			rgbframe=av_frame_alloc();

			buffer=(uint8_t *)av_malloc(numbytes*sizeof(uint8_t));
			avpicture_fill((AVPicture *)rgbframe, buffer, AV_PIX_FMT_RGB24,codecContext->width,
			               codecContext->height);
			rgbframe->width=codecContext->width;
			rgbframe->height=codecContext->height;
			rgbframe->format=AV_PIX_FMT_RGB24;
			rgbframe->pkt_size=frame->pkt_size;

			sws_scale
			(
			    sws_ctx,
			    frame->data,
			    frame->linesize,
			    0,
			    codecContext->height,
			    rgbframe->data,
			    rgbframe->linesize
			);

			target_->appendFrame(rgbframe);
		}
	}

	packet.data=NULL;
	packet.size=0;

	while(isRunning_) {
		avcodec_decode_video2(codecContext, frame, &gotPicture, &packet);

		if(gotPicture == 0)
			break;

		rgbframe=av_frame_alloc();

		buffer=(uint8_t *)av_malloc(numbytes*sizeof(uint8_t));
		avpicture_fill((AVPicture *)rgbframe, buffer, AV_PIX_FMT_RGB24,codecContext->width,
		               codecContext->height);
		rgbframe->width=codecContext->width;
		rgbframe->height=codecContext->height;
		rgbframe->format=AV_PIX_FMT_RGB24;
		rgbframe->pkt_size=frame->pkt_size;

		sws_scale
		(
		    sws_ctx,
		    frame->data,
		    frame->linesize,
		    0,
		    codecContext->height,
		    rgbframe->data,
		    rgbframe->linesize
		);

		target_->appendFrame(rgbframe);
	}
	av_frame_unref(frame);
	av_frame_free(&frame);
	avcodec_close(codecContext);
	avformat_close_input(&formatContext);
	isRunning_=false;
	if(dict_) {
		free(dict_);
	}
	target_->setIsComplete(true);
}