Пример #1
0
static int open_codec_context(int *stream_idx, AVFormatContext *fmt_ctx, enum AVMediaType type)
{
    int ret;
    AVStream *st;
    AVCodecContext *dec_ctx = NULL;
    AVCodec *dec = NULL;
    ret = av_find_best_stream(fmt_ctx, type, -1, -1, NULL, 0);
    if (ret < 0) {
		MOTION_LOG(ERR, TYPE_NETCAM, NO_ERRNO, "%s: Could not find stream %s in input!", av_get_media_type_string(type));
        return ret;
    } else {
        *stream_idx = ret;
        st = fmt_ctx->streams[*stream_idx];
        /* find decoder for the stream */
        dec_ctx = st->codec;
        dec = avcodec_find_decoder(dec_ctx->codec_id);
        if (!dec) {
    		MOTION_LOG(ERR, TYPE_NETCAM, NO_ERRNO, "%s: Failed to find %s codec!", av_get_media_type_string(type));
            return ret;
        }
        if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) {
    		MOTION_LOG(ERR, TYPE_NETCAM, NO_ERRNO, "%s: Failed to open %s codec!", av_get_media_type_string(type));
            return ret;
        }
    }
    return 0;
}
/**
 * Check for the validity of graph.
 *
 * A graph is considered valid if all its input and output pads are
 * connected.
 *
 * @return 0 in case of success, a negative value otherwise
 */
static int graph_check_validity(AVFilterGraph *graph, AVClass *log_ctx)
{
    AVFilterContext *filt;
    int i, j;

    for (i = 0; i < graph->filter_count; i++) {
        const AVFilterPad *pad;
        filt = graph->filters[i];

        for (j = 0; j < filt->nb_inputs; j++) {
            if (!filt->inputs[j] || !filt->inputs[j]->src) {
                pad = &filt->input_pads[j];
                av_log(log_ctx, AV_LOG_ERROR,
                       "Input pad \"%s\" with type %s of the filter instance \"%s\" of %s not connected to any source\n",
                       pad->name, av_get_media_type_string(pad->type), filt->name, filt->filter->name);
                return AVERROR(EINVAL);
            }
        }

        for (j = 0; j < filt->nb_outputs; j++) {
            if (!filt->outputs[j] || !filt->outputs[j]->dst) {
                pad = &filt->output_pads[j];
                av_log(log_ctx, AV_LOG_ERROR,
                       "Output pad \"%s\" with type %s of the filter instance \"%s\" of %s not connected to any destination\n",
                       pad->name, av_get_media_type_string(pad->type), filt->name, filt->filter->name);
                return AVERROR(EINVAL);
            }
        }
    }

    return 0;
}
Пример #3
0
int open_codec_context(int *stream_idx, AVFormatContext *fmt_ctx, enum AVMediaType type) {
    int ret;
    AVStream *st;
    AVCodecContext *dec_ctx = NULL;
    AVCodec *dec = NULL;
    AVDictionary *opts = NULL;
    
    ret = av_find_best_stream(fmt_ctx, type, -1, -1, NULL, 0);
    if (ret < 0) {
        LOGE("can't find %s stream in input file\n", av_get_media_type_string(type));
        return ret;
    } else {
        *stream_idx = ret;
        st = fmt_ctx->streams[*stream_idx];
        
        dec_ctx = st->codec;
        dec = avcodec_find_decoder(dec_ctx->codec_id);
        if (!dec) {
            LOGE("failed to find %s codec\n", av_get_media_type_string(type));
            return ret;
        }
        
        av_dict_set(&opts, "refcounted_frames", "1", 0);
        if ((ret = avcodec_open2(dec_ctx, dec, &opts)) < 0) {
            LOGE("failed to open %s codec\n", av_get_media_type_string(type));
            return ret;
        }
    }
    
    return 0;
}
Пример #4
0
int open_codec_context(int *stream_idx, AVFormatContext *fmt_ctx, enum AVMediaType type, const char *src_filename) {
  int ret;
  AVStream *st;
  AVCodecContext *dec_ctx = NULL;
  AVCodec *dec = NULL;
  AVDictionary *opts = NULL;

  ret = av_find_best_stream(fmt_ctx, type, -1, -1, NULL, 0);
  if (ret < 0) {
    fprintf(stderr, "Could not find %s stream in input file '%s'\n", av_get_media_type_string(type), src_filename);
    return ret;
  } else {
    *stream_idx = ret;
    st = fmt_ctx->streams[*stream_idx];

    /* find decoder for the stream */
    dec_ctx = st->codec;
    dec = avcodec_find_decoder(dec_ctx->codec_id);
    if (!dec) {
      fprintf(stderr, "Failed to find %s codec\n", av_get_media_type_string(type));
      return ret;
    }

    /* Init the decoders, with or without reference counting */
//     if (api_mode == API_MODE_NEW_API_REF_COUNT)
//       av_dict_set(&opts, "refcounted_frames", "1", 0);
    if ((ret = avcodec_open2(dec_ctx, dec, &opts)) < 0) {
      fprintf(stderr, "Failed to open %s codec\n", av_get_media_type_string(type));
      return ret;
    }
  }

  return 0;
}
Пример #5
0
/*----------------------------------------------------------------------
|    open_code_context
+---------------------------------------------------------------------*/
static int open_codec_context(int* stream_idx, AVFormatContext* fmt_ctx, AVMediaType type)
{
    int ret;
    AVStream* st;
    AVCodecContext* dec_ctx = NULL;
    AVCodec* dec = NULL;

    ret = av_find_best_stream(fmt_ctx, type, -1, -1, NULL, 0);
    if (ret < 0) {
        LOG_ERROR(LOG_TAG, "Could not find %s stream in input file.", av_get_media_type_string(type));
        return ret;
    }

    *stream_idx = ret;
    st = fmt_ctx->streams[*stream_idx];

    // Find decoder for the stream.
    dec_ctx = st->codec;
    dec = avcodec_find_decoder(dec_ctx->codec_id);
    if (!dec) {
        LOG_ERROR(LOG_TAG, "Failed to find %s codec.", av_get_media_type_string(type));
        return ret;
    }

    if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) {
        LOG_ERROR(LOG_TAG, "Failed to open %s codec.", av_get_media_type_string(type));
        return ret;
    }

    return ret;
}
Пример #6
0
static int open_codec_context(int *stream_idx,
                              AVFormatContext *fmt_ctx, enum AVMediaType type)
{
    int ret;
    AVStream *st;
    AVCodecContext *dec_ctx = NULL;
    AVCodec *dec = NULL;

    ret = av_find_best_stream(fmt_ctx, type, -1, -1, NULL, 0);
    if (ret < 0) {
        fprintf(stderr, "Could not find %s stream in input file '%s'\n",
                av_get_media_type_string(type), src_filename);
        return ret;
    } else {
        *stream_idx = ret;
        st = fmt_ctx->streams[*stream_idx];

        /* find decoder for the stream */
        dec_ctx = st->codec;
        dec = avcodec_find_decoder(dec_ctx->codec_id);
        if (!dec) {
            fprintf(stderr, "Failed to find %s codec\n",
                    av_get_media_type_string(type));
            return ret;
        }

        if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) {
            fprintf(stderr, "Failed to open %s codec\n",
                    av_get_media_type_string(type));
            return ret;
        }
    }

    return 0;
}
static int open_codec_context(int *stream_idx,
                              AVFormatContext *fmt_ctx, enum AVMediaType type)
{
    int ret, stream_index;
    AVStream *st;
    AVCodecContext *dec_ctx = NULL;
    AVCodec *dec = NULL;
    AVDictionary *opts = NULL;
    ret = av_find_best_stream(fmt_ctx, type, -1, -1, NULL, 0);
    if (ret < 0) {
        fprintf(stderr, "Could not find %s stream in input file '%s'\n",
                av_get_media_type_string(type), src_filename);
        return ret;
    } else {
        stream_index = ret;
        st = fmt_ctx->streams[stream_index];
        /* find decoder for the stream */
        dec_ctx = st->codec;
        dec = avcodec_find_decoder(dec_ctx->codec_id);
        if (!dec) {
            fprintf(stderr, "Failed to find %s codec\n",
                    av_get_media_type_string(type));
            return AVERROR(EINVAL);
        }
        /* Init the decoders, with or without reference counting */
        av_dict_set(&opts, "refcounted_frames", refcount ? "1" : "0", 0);
        if ((ret = avcodec_open2(dec_ctx, dec, &opts)) < 0) {
            fprintf(stderr, "Failed to open %s codec\n",
                    av_get_media_type_string(type));
            return ret;
        }
        *stream_idx = stream_index;
    }
    return 0;
}
Пример #8
0
int avfilter_link(AVFilterContext *src, unsigned srcpad,
                  AVFilterContext *dst, unsigned dstpad)
{
    AVFilterLink *link;

    if (src->nb_outputs <= srcpad || dst->nb_inputs <= dstpad ||
        src->outputs[srcpad]      || dst->inputs[dstpad])
        return AVERROR(EINVAL);

    if (src->output_pads[srcpad].type != dst->input_pads[dstpad].type) {
        av_log(src, AV_LOG_ERROR,
               "Media type mismatch between the '%s' filter output pad %d (%s) and the '%s' filter input pad %d (%s)\n",
               src->name, srcpad, (char *)av_x_if_null(av_get_media_type_string(src->output_pads[srcpad].type), "?"),
               dst->name, dstpad, (char *)av_x_if_null(av_get_media_type_string(dst-> input_pads[dstpad].type), "?"));
        return AVERROR(EINVAL);
    }

    link = av_mallocz(sizeof(*link));
    if (!link)
        return AVERROR(ENOMEM);

    src->outputs[srcpad] = dst->inputs[dstpad] = link;

    link->src     = src;
    link->dst     = dst;
    link->srcpad  = &src->output_pads[srcpad];
    link->dstpad  = &dst->input_pads[dstpad];
    link->type    = src->output_pads[srcpad].type;
    av_assert0(AV_PIX_FMT_NONE == -1 && AV_SAMPLE_FMT_NONE == -1);
    link->format  = -1;

    return 0;
}
Пример #9
0
static int open_codec_context(AVFormatContext* fmt_ctx, int *stream_idx, AVMediaType media_type)
{
    int ret;
    AVStream *st;
    AVCodecContext *dec_ctx = NULL;
    AVCodec *dec = NULL;
    AVDictionary *opts = NULL;

    ret = av_find_best_stream(fmt_ctx, media_type, -1, -1, NULL, 0);
    if (ret < 0) {
		LOGE("Could not find best %s stream in input file",
			av_get_media_type_string(media_type));
        return ret;
    } else {
        *stream_idx = ret;
        st = fmt_ctx->streams[*stream_idx];

        /* find decoder for the stream */
        dec_ctx = st->codec;
        dec = avcodec_find_decoder(dec_ctx->codec_id);
        if (!dec) {
            LOGE("Failed to find %s codec", av_get_media_type_string(media_type));
            return AVERROR(EINVAL);
        }

        /* Init the decoders, with or without reference counting */
        av_dict_set(&opts, "refcounted_frames", "1", 0);
        if ((ret = avcodec_open2(dec_ctx, dec, &opts)) < 0) {
            LOGE("Failed to open %s codec", av_get_media_type_string(media_type));
            return ret;
        }
    }

    return 0;
}
Пример #10
0
/**
 * Opens the given stream, i.e. sets up a decoder.
 *
 * @param env JNIEnv
 * @param stream AVStream
 */
int ff_open_stream(JNIEnv *env, AVStream *stream, AVCodecContext **context) {
#ifdef DEBUG
    fprintf(stderr, "Opening stream...\n");
#endif

    int res = 0;
    AVCodec *decoder = NULL;
    AVDictionary *opts = NULL;
    int refcount = 0; // is this correct?

    decoder = avcodec_find_decoder(stream->codecpar->codec_id);
    if (!decoder) {
        fprintf(stderr, "Failed to find %s codec\n", av_get_media_type_string(AVMEDIA_TYPE_AUDIO));
        res = AVERROR(EINVAL);
        throwUnsupportedAudioFileExceptionIfError(env, res, "Failed to find codec.");
        goto bail;
    }

    *context = avcodec_alloc_context3(decoder);
    if (!context) {
        fprintf(stderr, "Failed to allocate context\n");
        res = AVERROR(EINVAL);
        throwUnsupportedAudioFileExceptionIfError(env, res, "Failed to allocate codec context.");
        goto bail;
    }

    /* Copy codec parameters from input stream to output codec context */
    if ((res = avcodec_parameters_to_context(*context, stream->codecpar)) < 0) {
        fprintf(stderr, "Failed to copy %s codec parameters to decoder context\n", av_get_media_type_string(AVMEDIA_TYPE_AUDIO));
        throwUnsupportedAudioFileExceptionIfError(env, res, "Failed to copy codec parameters.");
        goto bail;
    }

    /* Init the decoders, with or without reference counting */
    av_dict_set(&opts, "refcounted_frames", refcount ? "1" : "0", 0);
    if ((res = avcodec_open2(*context, decoder, &opts)) < 0) {
        fprintf(stderr, "Failed to open %s codec\n", av_get_media_type_string(AVMEDIA_TYPE_AUDIO));
        throwUnsupportedAudioFileExceptionIfError(env, res, "Failed to open codec.");
        goto bail;
    }

#ifdef DEBUG
    fprintf(stderr, "Stream was opened.\n");
#endif
    return res;

bail:
    return res;
}
Пример #11
0
static int write_frame(struct AVFormatContext *s, int stream_index,
                       AVFrame **frame, unsigned flags)
{
    AVBPrint bp;
    int ret = 0;
    enum AVMediaType type;
    const char *type_name;

    if ((flags & AV_WRITE_UNCODED_FRAME_QUERY))
        return 0;

    av_bprint_init(&bp, 0, AV_BPRINT_SIZE_UNLIMITED);
    av_bprintf(&bp, "%d, %10"PRId64"",
               stream_index, (*frame)->pts);
    type = s->streams[stream_index]->codec->codec_type;
    type_name = av_get_media_type_string(type);
    av_bprintf(&bp, ", %s", type_name ? type_name : "unknown");
    switch (type) {
        case AVMEDIA_TYPE_VIDEO:
            video_frame_cksum(&bp, *frame);
            break;
        case AVMEDIA_TYPE_AUDIO:
            audio_frame_cksum(&bp, *frame);
            break;
    }

    av_bprint_chars(&bp, '\n', 1);
    if (av_bprint_is_complete(&bp))
        avio_write(s->pb, bp.str, bp.len);
    else
        ret = AVERROR(ENOMEM);
    av_bprint_finalize(&bp, NULL);
    return ret;
}
//------------------------------------------------------------------------------
bool openCodecContext(  AVFormatContext* p_formatContext, AVMediaType p_type, VideoInfo& p_videoInfo, 
                        int& p_outStreamIndex)
{
    AVStream* stream;
    AVCodecContext* decodeCodecContext = NULL;
    AVCodec* decodeCodec = NULL;
    
    // Find the stream
    p_outStreamIndex = av_find_best_stream(p_formatContext, p_type, -1, -1, NULL, 0);
    if (p_outStreamIndex < 0) 
    {
        p_videoInfo.error = "Could not find stream of type: ";
        p_videoInfo.error.append(av_get_media_type_string(p_type));
        return false;
    } 
    else 
    {
        stream = p_formatContext->streams[p_outStreamIndex];
        
        // Find decoder codec
        decodeCodecContext = stream->codec;
        decodeCodec = avcodec_find_decoder(decodeCodecContext->codec_id);
        if (!decodeCodec) 
        {
            p_videoInfo.error = "Failed to find codec: ";
            p_videoInfo.error.append(av_get_media_type_string(p_type));
            return false;
        }
        
//        if (p_type == AVMEDIA_TYPE_AUDIO)
//        {
//            decodeCodecContext->request_sample_fmt = AV_SAMPLE_FMT_S16;
//        }
        
        // Open decodec codec & context
        if (avcodec_open2(decodeCodecContext, decodeCodec, NULL) < 0) 
        {
            p_videoInfo.error = "Failed to open codec: ";
            p_videoInfo.error.append(av_get_media_type_string(p_type));
            return false;
        }
    }
    
    return true;
}
Пример #13
0
AVStream     *Identify::_openStream(AVFormatContext *format, AVCodecContext *&context, enum AVMediaType type)
{
    int      result;
    AVStream *stream = NULL;
    AVCodec  *decoder = NULL;

    if ((result = av_find_best_stream(format, type, -1, -1, &decoder, 0)) < 0)
    {
        LOG_TRACE(QString("Could not find %1 stream in input file").arg(av_get_media_type_string(type)), "Identify", "_openStream");
        return (NULL);
    }
    stream = format->streams[result];
    if ((result = Plugin::avcodec_open2(stream->codec, decoder, NULL)))
    {
        LOG_TRACE(QString("Failed to open %1 codec").arg(av_get_media_type_string(type)), Properties("error", result).toMap(), "Identify", "_openStream");
        return (NULL);
    }
    context = stream->codec;
    return (stream);
}
Пример #14
0
static int open_codec_context(int *stream_idx,
                              AVFormatContext *fmt_ctx, enum AVMediaType type)
{
    int ret;
    AVStream *st;
    AVCodecContext *dec_ctx = NULL;
    AVCodec *dec = NULL;
    AVDictionary *opts = NULL;

    ret = av_find_best_stream(fmt_ctx, type, -1, -1, NULL, 0);
    if (ret < 0) {
        fprintf(stderr, "Could not find %s stream in input file '%s'\n",
                av_get_media_type_string(type), src_filename);
        return ret;
    } else {
        *stream_idx = ret;
        st = fmt_ctx->streams[*stream_idx];

        /* find decoder for the stream */
        dec_ctx = st->codec;
        dec = avcodec_find_decoder(dec_ctx->codec_id);
        if (!dec) {
            fprintf(stderr, "Failed to find %s codec\n",
                    av_get_media_type_string(type));
            return AVERROR(EINVAL);
        }

        /* Init the video decoder */
        av_dict_set(&opts, "flags2", "+export_mvs", 0);
        if ((ret = avcodec_open2(dec_ctx, dec, &opts)) < 0) {
            fprintf(stderr, "Failed to open %s codec\n",
                    av_get_media_type_string(type));
            return ret;
        }
    }

    return 0;
}
Пример #15
0
void plex_report_stream(const AVStream *st)
{
    if (plexContext.progress_url &&
        (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ||
         st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) &&
        !(st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
        char url[4096];
        snprintf(url, sizeof(url), "%s/stream?index=%i&id=%i&codec=%s&type=%s",
                 plexContext.progress_url, st->index, st->id,
                 avcodec_get_name(st->codecpar->codec_id),
                 av_get_media_type_string(st->codecpar->codec_type));
        av_free(PMS_IssueHttpRequest(url, "PUT"));
    }
}
void set_codec(AVFormatContext *ic, int i) {
    const char *codec_type = av_get_media_type_string(ic->streams[i]->codec->codec_type);

	if (!codec_type) {
		return;
	}

    const char *codec_name = avcodec_get_name(ic->streams[i]->codec->codec_id);

	if (strcmp(codec_type, "audio") == 0) {
		av_dict_set(&ic->metadata, AUDIO_CODEC, codec_name, 0);
    } else if (codec_type && strcmp(codec_type, "video") == 0) {
	   	av_dict_set(&ic->metadata, VIDEO_CODEC, codec_name, 0);
	}
}
Пример #17
0
static int decode_packet(struct decoder_ctx *ctx, const AVPacket *pkt, int *got_frame)
{
    int ret;
    int decoded = pkt->size;
    AVCodecContext *avctx = ctx->avctx;
    AVFrame *dec_frame = av_frame_alloc();

    *got_frame = 0;

    TRACE(ctx, "decode packet of size %d", pkt->size);

    if (!dec_frame)
        return AVERROR(ENOMEM);

    switch (avctx->codec_type) {
    case AVMEDIA_TYPE_VIDEO:
        ret = avcodec_decode_video2(avctx, dec_frame, got_frame, pkt);
        break;
    case AVMEDIA_TYPE_AUDIO:
        ret = avcodec_decode_audio4(avctx, dec_frame, got_frame, pkt);
        break;
    default:
        av_assert0(0);
    }

    if (ret < 0) {
        LOG(ctx, ERROR, "Error decoding %s packet: %s",
            av_get_media_type_string(avctx->codec_type),
            av_err2str(ret));
        av_frame_free(&dec_frame);
        return pkt->size;
    }

    TRACE(ctx, "decoded %d/%d bytes from packet -> got_frame=%d", ret, pkt->size, *got_frame);
    decoded = FFMIN(ret, pkt->size);

    if (*got_frame) {
        ret = sxpi_decoding_queue_frame(ctx->decoding_ctx, dec_frame);
        if (ret < 0) {
            TRACE(ctx, "could not queue frame: %s", av_err2str(ret));
            av_frame_free(&dec_frame);
            return ret;
        }
    } else {
        av_frame_free(&dec_frame);
    }
    return decoded;
}
Пример #18
0
static void log_slave(TeeSlave *slave, void *log_ctx, int log_level)
{
    int i;
    av_log(log_ctx, log_level, "filename:'%s' format:%s\n",
           slave->avf->filename, slave->avf->oformat->name);
    for (i = 0; i < slave->avf->nb_streams; i++) {
        AVStream *st = slave->avf->streams[i];
        AVBSFContext *bsf = slave->bsfs[i];
        const char *bsf_name;

        av_log(log_ctx, log_level, "    stream:%d codec:%s type:%s",
               i, avcodec_get_name(st->codecpar->codec_id),
               av_get_media_type_string(st->codecpar->codec_type));

        bsf_name = bsf->filter->priv_class ?
                   bsf->filter->priv_class->item_name(bsf) : bsf->filter->name;
        av_log(log_ctx, log_level, " bsfs: %s\n", bsf_name);
    }
}
Пример #19
0
static void log_slave(TeeSlave *slave, void *log_ctx, int log_level)
{
    int i;
    av_log(log_ctx, log_level, "filename:'%s' format:%s\n",
           slave->avf->filename, slave->avf->oformat->name);
    for (i = 0; i < slave->avf->nb_streams; i++) {
        AVStream *st = slave->avf->streams[i];
        AVBitStreamFilterContext *bsf = slave->bsfs[i];

        av_log(log_ctx, log_level, "    stream:%d codec:%s type:%s",
               i, avcodec_get_name(st->codec->codec_id),
               av_get_media_type_string(st->codec->codec_type));
        if (bsf) {
            av_log(log_ctx, log_level, " bsfs:");
            while (bsf) {
                av_log(log_ctx, log_level, "%s%s",
                       bsf->filter->name, bsf->next ? "," : "");
                bsf = bsf->next;
            }
        }
        av_log(log_ctx, log_level, "\n");
    }
}
Пример #20
0
bool mp_decode_init(mp_media_t *m, enum AVMediaType type, bool hw)
{
	struct mp_decode *d = type == AVMEDIA_TYPE_VIDEO ? &m->v : &m->a;
	enum AVCodecID id;
	AVStream *stream;
	int ret;

	memset(d, 0, sizeof(*d));
	d->m = m;
	d->audio = type == AVMEDIA_TYPE_AUDIO;

	ret = av_find_best_stream(m->fmt, type, -1, -1, NULL, 0);
	if (ret < 0)
		return false;
	stream = d->stream = m->fmt->streams[ret];

#if LIBAVFORMAT_VERSION_INT >= AV_VERSION_INT(57, 40, 101)
	id = stream->codecpar->codec_id;
#else
	id = stream->codec->codec_id;
#endif

	if (hw) {
		d->codec = find_hardware_decoder(id);
		if (d->codec) {
			ret = mp_open_codec(d);
			if (ret < 0)
				d->codec = NULL;
		}
	}

	if (!d->codec) {
		if (id == AV_CODEC_ID_VP8)
			d->codec = avcodec_find_decoder_by_name("libvpx");
		else if (id == AV_CODEC_ID_VP9)
			d->codec = avcodec_find_decoder_by_name("libvpx-vp9");

		if (!d->codec)
			d->codec = avcodec_find_decoder(id);
		if (!d->codec) {
			blog(LOG_WARNING, "MP: Failed to find %s codec",
					av_get_media_type_string(type));
			return false;
		}

		ret = mp_open_codec(d);
		if (ret < 0) {
			blog(LOG_WARNING, "MP: Failed to open %s decoder: %s",
					av_get_media_type_string(type),
					av_err2str(ret));
			return false;
		}
	}

	d->frame = av_frame_alloc();
	if (!d->frame) {
		blog(LOG_WARNING, "MP: Failed to allocate %s frame",
				av_get_media_type_string(type));
		return false;
	}

	if (d->codec->capabilities & AV_CODEC_CAP_TRUNCATED)
		d->decoder->flags |= AV_CODEC_FLAG_TRUNCATED;
	return true;
}
Пример #21
0
static int avi_write_header(AVFormatContext *s)
{
    AVIContext *avi = s->priv_data;
    AVIOContext *pb = s->pb;
    int bitrate, n, i, nb_frames, au_byterate, au_ssize, au_scale;
    AVCodecContext *stream, *video_enc;
    int64_t list1, list2, strh, strf;
    AVDictionaryEntry *t = NULL;

    if (s->nb_streams > AVI_MAX_STREAM_COUNT) {
        av_log(s, AV_LOG_ERROR, "AVI does not support >%d streams\n",
               AVI_MAX_STREAM_COUNT);
        return AVERROR(EINVAL);
    }

    for(n=0;n<s->nb_streams;n++) {
        s->streams[n]->priv_data= av_mallocz(sizeof(AVIStream));
        if(!s->streams[n]->priv_data)
            return AVERROR(ENOMEM);
    }

    /* header list */
    avi->riff_id = 0;
    list1 = avi_start_new_riff(s, pb, "AVI ", "hdrl");

    /* avi header */
    ffio_wfourcc(pb, "avih");
    avio_wl32(pb, 14 * 4);
    bitrate = 0;

    video_enc = NULL;
    for(n=0;n<s->nb_streams;n++) {
        stream = s->streams[n]->codec;
        bitrate += stream->bit_rate;
        if (stream->codec_type == AVMEDIA_TYPE_VIDEO)
            video_enc = stream;
    }

    nb_frames = 0;

    if(video_enc){
        avio_wl32(pb, (uint32_t)(INT64_C(1000000) * video_enc->time_base.num / video_enc->time_base.den));
    } else {
        avio_wl32(pb, 0);
    }
    avio_wl32(pb, bitrate / 8); /* XXX: not quite exact */
    avio_wl32(pb, 0); /* padding */
    if (!pb->seekable)
        avio_wl32(pb, AVIF_TRUSTCKTYPE | AVIF_ISINTERLEAVED); /* flags */
    else
        avio_wl32(pb, AVIF_TRUSTCKTYPE | AVIF_HASINDEX | AVIF_ISINTERLEAVED); /* flags */
    avi->frames_hdr_all = avio_tell(pb); /* remember this offset to fill later */
    avio_wl32(pb, nb_frames); /* nb frames, filled later */
    avio_wl32(pb, 0); /* initial frame */
    avio_wl32(pb, s->nb_streams); /* nb streams */
    avio_wl32(pb, 1024 * 1024); /* suggested buffer size */
    if(video_enc){
        avio_wl32(pb, video_enc->width);
        avio_wl32(pb, video_enc->height);
    } else {
        avio_wl32(pb, 0);
        avio_wl32(pb, 0);
    }
    avio_wl32(pb, 0); /* reserved */
    avio_wl32(pb, 0); /* reserved */
    avio_wl32(pb, 0); /* reserved */
    avio_wl32(pb, 0); /* reserved */

    /* stream list */
    for(i=0;i<n;i++) {
        AVIStream *avist= s->streams[i]->priv_data;
        list2 = ff_start_tag(pb, "LIST");
        ffio_wfourcc(pb, "strl");

        stream = s->streams[i]->codec;

        /* stream generic header */
        strh = ff_start_tag(pb, "strh");
        switch(stream->codec_type) {
        case AVMEDIA_TYPE_SUBTITLE:
            // XSUB subtitles behave like video tracks, other subtitles
            // are not (yet) supported.
            if (stream->codec_id != AV_CODEC_ID_XSUB) {
                av_log(s, AV_LOG_ERROR, "Subtitle streams other than DivX XSUB are not supported by the AVI muxer.\n");
                return AVERROR_PATCHWELCOME;
            }
        case AVMEDIA_TYPE_VIDEO: ffio_wfourcc(pb, "vids"); break;
        case AVMEDIA_TYPE_AUDIO: ffio_wfourcc(pb, "auds"); break;
//      case AVMEDIA_TYPE_TEXT : ffio_wfourcc(pb, "txts"); break;
        case AVMEDIA_TYPE_DATA : ffio_wfourcc(pb, "dats"); break;
        }
        if(stream->codec_type == AVMEDIA_TYPE_VIDEO ||
           stream->codec_id == AV_CODEC_ID_XSUB)
            avio_wl32(pb, stream->codec_tag);
        else
            avio_wl32(pb, 1);
        avio_wl32(pb, 0); /* flags */
        avio_wl16(pb, 0); /* priority */
        avio_wl16(pb, 0); /* language */
        avio_wl32(pb, 0); /* initial frame */

        ff_parse_specific_params(stream, &au_byterate, &au_ssize, &au_scale);

        if (   stream->codec_type == AVMEDIA_TYPE_VIDEO
            && stream->codec_id != AV_CODEC_ID_XSUB
            && au_byterate > 1000LL*au_scale) {
            au_byterate = 600;
            au_scale    = 1;
        }
        avpriv_set_pts_info(s->streams[i], 64, au_scale, au_byterate);
        if(stream->codec_id == AV_CODEC_ID_XSUB)
            au_scale = au_byterate = 0;

        avio_wl32(pb, au_scale); /* scale */
        avio_wl32(pb, au_byterate); /* rate */

        avio_wl32(pb, 0); /* start */
        avist->frames_hdr_strm = avio_tell(pb); /* remember this offset to fill later */
        if (!pb->seekable)
            avio_wl32(pb, AVI_MAX_RIFF_SIZE); /* FIXME: this may be broken, but who cares */
        else
            avio_wl32(pb, 0); /* length, XXX: filled later */

        /* suggested buffer size */ //FIXME set at the end to largest chunk
        if(stream->codec_type == AVMEDIA_TYPE_VIDEO)
            avio_wl32(pb, 1024 * 1024);
        else if(stream->codec_type == AVMEDIA_TYPE_AUDIO)
            avio_wl32(pb, 12 * 1024);
        else
            avio_wl32(pb, 0);
        avio_wl32(pb, -1); /* quality */
        avio_wl32(pb, au_ssize); /* sample size */
        avio_wl32(pb, 0);
        avio_wl16(pb, stream->width);
        avio_wl16(pb, stream->height);
        ff_end_tag(pb, strh);

      if(stream->codec_type != AVMEDIA_TYPE_DATA){
          int ret;

        strf = ff_start_tag(pb, "strf");
        switch(stream->codec_type) {
        case AVMEDIA_TYPE_SUBTITLE:
            // XSUB subtitles behave like video tracks, other subtitles
            // are not (yet) supported.
            if (stream->codec_id != AV_CODEC_ID_XSUB) break;
        case AVMEDIA_TYPE_VIDEO:
            ff_put_bmp_header(pb, stream, ff_codec_bmp_tags, 0, 0);
            break;
        case AVMEDIA_TYPE_AUDIO:
            if ((ret = ff_put_wav_header(pb, stream)) < 0) {
                return ret;
            }
            break;
        default:
            av_log(s, AV_LOG_ERROR,
                   "Invalid or not supported codec type '%s' found in the input\n",
                   (char *)av_x_if_null(av_get_media_type_string(stream->codec_type), "?"));
            return AVERROR(EINVAL);
        }
        ff_end_tag(pb, strf);
        if ((t = av_dict_get(s->streams[i]->metadata, "title", NULL, 0))) {
            ff_riff_write_info_tag(s->pb, "strn", t->value);
            t = NULL;
        }
        if(stream->codec_id == AV_CODEC_ID_XSUB
           && (t = av_dict_get(s->streams[i]->metadata, "language", NULL, 0))) {
            const char* langstr = av_convert_lang_to(t->value, AV_LANG_ISO639_1);
            t = NULL;
            if (langstr) {
                char* str = av_asprintf("Subtitle - %s-xx;02", langstr);
                ff_riff_write_info_tag(s->pb, "strn", str);
                av_free(str);
            }
        }
      }

        if (pb->seekable) {
            unsigned char tag[5];
            int j;

            /* Starting to lay out AVI OpenDML master index.
             * We want to make it JUNK entry for now, since we'd
             * like to get away without making AVI an OpenDML one
             * for compatibility reasons.
             */
            avist->indexes.entry = avist->indexes.ents_allocated = 0;
            avist->indexes.indx_start = ff_start_tag(pb, "JUNK");
            avio_wl16(pb, 4);        /* wLongsPerEntry */
            avio_w8(pb, 0);          /* bIndexSubType (0 == frame index) */
            avio_w8(pb, 0);          /* bIndexType (0 == AVI_INDEX_OF_INDEXES) */
            avio_wl32(pb, 0);        /* nEntriesInUse (will fill out later on) */
            ffio_wfourcc(pb, avi_stream2fourcc(tag, i, stream->codec_type));
                                    /* dwChunkId */
            avio_wl64(pb, 0);        /* dwReserved[3]
            avio_wl32(pb, 0);           Must be 0.    */
            for (j=0; j < AVI_MASTER_INDEX_SIZE * 2; j++)
                 avio_wl64(pb, 0);
            ff_end_tag(pb, avist->indexes.indx_start);
        }

        if(   stream->codec_type == AVMEDIA_TYPE_VIDEO
           && s->streams[i]->sample_aspect_ratio.num>0
           && s->streams[i]->sample_aspect_ratio.den>0){
            int vprp= ff_start_tag(pb, "vprp");
            AVRational dar = av_mul_q(s->streams[i]->sample_aspect_ratio,
                                      (AVRational){stream->width, stream->height});
            int num, den;
            av_reduce(&num, &den, dar.num, dar.den, 0xFFFF);

            avio_wl32(pb, 0); //video format  = unknown
            avio_wl32(pb, 0); //video standard= unknown
            avio_wl32(pb, lrintf(1.0/av_q2d(stream->time_base)));
            avio_wl32(pb, stream->width );
            avio_wl32(pb, stream->height);
            avio_wl16(pb, den);
            avio_wl16(pb, num);
            avio_wl32(pb, stream->width );
            avio_wl32(pb, stream->height);
            avio_wl32(pb, 1); //progressive FIXME

            avio_wl32(pb, stream->height);
            avio_wl32(pb, stream->width );
            avio_wl32(pb, stream->height);
            avio_wl32(pb, stream->width );
            avio_wl32(pb, 0);
            avio_wl32(pb, 0);

            avio_wl32(pb, 0);
            avio_wl32(pb, 0);
            ff_end_tag(pb, vprp);
        }

        ff_end_tag(pb, list2);
    }

    if (pb->seekable) {
        /* AVI could become an OpenDML one, if it grows beyond 2Gb range */
        avi->odml_list = ff_start_tag(pb, "JUNK");
        ffio_wfourcc(pb, "odml");
        ffio_wfourcc(pb, "dmlh");
        avio_wl32(pb, 248);
        for (i = 0; i < 248; i+= 4)
             avio_wl32(pb, 0);
        ff_end_tag(pb, avi->odml_list);
    }

    ff_end_tag(pb, list1);

    ff_riff_write_info(s);

    /* some padding for easier tag editing */
    list2 = ff_start_tag(pb, "JUNK");
    for (i = 0; i < 1016; i += 4)
        avio_wl32(pb, 0);
    ff_end_tag(pb, list2);

    avi->movi_list = ff_start_tag(pb, "LIST");
    ffio_wfourcc(pb, "movi");

    avio_flush(pb);

    return 0;
}
/*
 * Return
 * - 0 -- one packet was read and processed
 * - AVERROR(EAGAIN) -- no packets were available for selected file,
 *   this function should be called again
 * - AVERROR_EOF -- this function should not be called again
 */
static int process_input(int file_index)
{
    InputFile *ifile = input_files[file_index];
    AVFormatContext *is;
    InputStream *ist;
    AVPacket pkt;
    int ret, i, j;

    is  = ifile->ctx;
    // ****<<  Capture a frame: audio/video/subtitle
    ret = get_input_packet(ifile, &pkt); // ****<<  Call stack: av_read_frame() --> read_frame_internal() --> ff_read_packet() --> s->iformat->read_packet(s, pkt); --> dshow_read_frame();

    if (ret == AVERROR(EAGAIN)) {
        ifile->eagain = 1;
        return ret;
    }
    if (ret < 0) {
        if (ret != AVERROR_EOF) {
            print_error(is->filename, ret);
            if (exit_on_error)
                exit_program(1);
        }
        ifile->eof_reached = 1;

        for (i = 0; i < ifile->nb_streams; i++) {
            ist = input_streams[ifile->ist_index + i];
            if (ist->decoding_needed)
                output_packet(ist, NULL);

            /* mark all outputs that don't go through lavfi as finished */
            for (j = 0; j < nb_output_streams; j++) {
                OutputStream *ost = output_streams[j];

                if (ost->source_index == ifile->ist_index + i &&
                        (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
                    close_output_stream(ost);
            }
        }

        return AVERROR(EAGAIN);
    }

    reset_eagain();

    if (do_pkt_dump) {
        av_pkt_dump_log2(NULL, AV_LOG_DEBUG, &pkt, do_hex_dump,
                         is->streams[pkt.stream_index]);
    }
    /* the following test is needed in case new streams appear
       dynamically in stream : we ignore them */
    if (pkt.stream_index >= ifile->nb_streams) {
        report_new_stream(file_index, &pkt);
        goto discard_packet;
    }

    ist = input_streams[ifile->ist_index + pkt.stream_index];

    ist->data_size += pkt.size;
    ist->nb_packets++;

    if (ist->discard)
        goto discard_packet;

    if (debug_ts) {
        av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
               "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
               ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
               av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
               av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
               av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
               av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
               av_ts2str(input_files[ist->file_index]->ts_offset),
               av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
    }

    if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64) {
        int64_t stime, stime2;
        // Correcting starttime based on the enabled streams
        // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
        //       so we instead do it here as part of discontinuity handling
        if (   ist->next_dts == AV_NOPTS_VALUE
                && ifile->ts_offset == -is->start_time
                && (is->iformat->flags & AVFMT_TS_DISCONT)) {
            int64_t new_start_time = INT64_MAX;
            for (i=0; i<is->nb_streams; i++) {
                AVStream *st = is->streams[i];
                if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
                    continue;
                new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
            }
            if (new_start_time > is->start_time) {
                av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
                ifile->ts_offset = -new_start_time;
            }
        }

        stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
        stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
        ist->wrap_correction_done = 1;

        if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
            pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
            ist->wrap_correction_done = 0;
        }
        if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
            pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
            ist->wrap_correction_done = 0;
        }
    }

    /* add the stream-global side data to the first packet */
    if (ist->nb_packets == 1)
        if (ist->st->nb_side_data)
            av_packet_split_side_data(&pkt);
    for (i = 0; i < ist->st->nb_side_data; i++) {
        AVPacketSideData *src_sd = &ist->st->side_data[i];
        uint8_t *dst_data;

        if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
            continue;

        dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
        if (!dst_data)
            exit_program(1);

        memcpy(dst_data, src_sd->data, src_sd->size);
    }

    // ****<<  pkt.pts:   采集时间戳(以毫秒为单位; 一般为系统时间)
    // ****<<  ts_offset: 起始时间戳(以毫秒为单位; 一般为第一帧采集时间戳的相反数)
    // ****<<  最终.pts:  相对时间戳(以毫秒为单位; 从第一帧到现在的相对时间)
    if (pkt.dts != AV_NOPTS_VALUE)
        pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
    if (pkt.pts != AV_NOPTS_VALUE)
        pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);

    if (pkt.pts != AV_NOPTS_VALUE)
        pkt.pts *= ist->ts_scale;
    if (pkt.dts != AV_NOPTS_VALUE)
        pkt.dts *= ist->ts_scale;

    if (pkt.dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
            && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
        int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
        int64_t delta   = pkt_dts - ifile->last_ts;
        if(delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
                (delta > 1LL*dts_delta_threshold*AV_TIME_BASE &&
                 ist->dec_ctx->codec_type != AVMEDIA_TYPE_SUBTITLE)) {
            ifile->ts_offset -= delta;
            av_log(NULL, AV_LOG_DEBUG,
                   "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
                   delta, ifile->ts_offset);
            pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
            if (pkt.pts != AV_NOPTS_VALUE)
                pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
        }
    }

    if (pkt.dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
            !copy_ts) {
        int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
        int64_t delta   = pkt_dts - ist->next_dts;
        if (is->iformat->flags & AVFMT_TS_DISCONT) {
            if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
                    (delta > 1LL*dts_delta_threshold*AV_TIME_BASE &&
                     ist->dec_ctx->codec_type != AVMEDIA_TYPE_SUBTITLE) ||
                    pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
                ifile->ts_offset -= delta;
                av_log(NULL, AV_LOG_DEBUG,
                       "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
                       delta, ifile->ts_offset);
                pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
                if (pkt.pts != AV_NOPTS_VALUE)
                    pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
            }
        } else {
            if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
                    (delta > 1LL*dts_error_threshold*AV_TIME_BASE && ist->dec_ctx->codec_type != AVMEDIA_TYPE_SUBTITLE)) {
                av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
                pkt.dts = AV_NOPTS_VALUE;
            }
            if (pkt.pts != AV_NOPTS_VALUE) {
                int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
                delta   = pkt_pts - ist->next_dts;
                if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
                        (delta > 1LL*dts_error_threshold*AV_TIME_BASE && ist->dec_ctx->codec_type != AVMEDIA_TYPE_SUBTITLE)) {
                    av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
                    pkt.pts = AV_NOPTS_VALUE;
                }
            }
        }
    }

    if (pkt.dts != AV_NOPTS_VALUE)
        ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);

    if (debug_ts) {
        av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
               ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
               av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
               av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
               av_ts2str(input_files[ist->file_index]->ts_offset),
               av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
    }

    sub2video_heartbeat(ist, pkt.pts);

    ret = output_packet(ist, &pkt); // ****<<  see output_packet.c
    if (ret < 0) {
        av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
               ist->file_index, ist->st->index, av_err2str(ret));
        if (exit_on_error)
            exit_program(1);
    }

discard_packet:
    av_free_packet(&pkt);

    return 0;
}
Пример #23
0
void plex_report_stream_detail(const AVStream *st)
{
    if ((st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
         st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) &&
        st->codec_info_nb_frames == 0)
        return; // Unparsed stream; will be skipped in output

    if (plexContext.progress_url &&
        (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ||
         st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
         st->codecpar->codec_type == AVMEDIA_TYPE_SUBTITLE) &&
        !(st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
        char url[4096];
        AVBPrint dstbuf;
        AVDictionaryEntry *lang = av_dict_get(st->metadata, "language", NULL, 0);
        const char *profile = avcodec_profile_name(st->codecpar->codec_id, st->codecpar->profile);

        av_bprint_init_for_buffer(&dstbuf, url, sizeof(url));

        av_bprintf(&dstbuf, "%s/streamDetail?index=%i&id=%i&codec=%s&type=%s",
                   plexContext.progress_url, st->index, st->id,
                   avcodec_get_name(st->codecpar->codec_id),
                   av_get_media_type_string(st->codecpar->codec_type));

        if (st->codecpar->bit_rate)
            av_bprintf(&dstbuf, "&bitrate=%"PRId64, st->codecpar->bit_rate);

        if (profile) {
            av_bprintf(&dstbuf, "&profile=");
            av_bprint_escape(&dstbuf, profile, NULL, AV_ESCAPE_MODE_URL, 0);
        }

        if (lang && lang->value && *lang->value) {
            av_bprintf(&dstbuf, "&language=");
            av_bprint_escape(&dstbuf, lang->value, NULL, AV_ESCAPE_MODE_URL, 0);
        }

        if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
            av_bprintf(&dstbuf, "&width=%i&height=%i",
                        st->codecpar->width, st->codecpar->height);
            av_bprintf(&dstbuf, "&interlaced=%i",
                        (st->codecpar->field_order != AV_FIELD_PROGRESSIVE &&
                         st->codecpar->field_order != AV_FIELD_UNKNOWN));
            if (st->codecpar->separate_fields)
                av_bprintf(&dstbuf, "&separateFields=1");
            if (st->codecpar->sample_aspect_ratio.num && st->codecpar->sample_aspect_ratio.den)
                av_bprintf(&dstbuf, "&sar=%d:%d",
                            st->codecpar->sample_aspect_ratio.num,
                            st->codecpar->sample_aspect_ratio.den);
            if (st->codecpar->level != FF_LEVEL_UNKNOWN)
                av_bprintf(&dstbuf, "&level=%d", st->codecpar->level);
            if (st->avg_frame_rate.num && st->avg_frame_rate.den)
                av_bprintf(&dstbuf, "&frameRate=%.3f",
                            av_q2d(st->avg_frame_rate));
            if (st->internal && st->internal->avctx &&
                st->internal->avctx->properties & FF_CODEC_PROPERTY_CLOSED_CAPTIONS)
                av_bprintf(&dstbuf, "&closedCaptions=1");
        } else if (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
            char layout[256];
            char *layoutP = layout;
            char *l, *r;
            av_bprintf(&dstbuf, "&channels=%i", st->codecpar->channels);
            av_get_channel_layout_string(layout, sizeof(layout), st->codecpar->channels,
                                         st->codecpar->channel_layout);
            l = strchr(layout, '(');
            r = strrchr(layout, ')');
            if (l && r && l > layoutP + 8) {
                layoutP = l + 1;
                *r = 0;
            }
            av_bprintf(&dstbuf, "&layout=");
            av_bprint_escape(&dstbuf, layout, NULL, AV_ESCAPE_MODE_URL, 0);
            av_bprintf(&dstbuf, "&sampleRate=%i", st->codecpar->sample_rate);
            if (st->codecpar->bits_per_raw_sample)
                av_bprintf(&dstbuf, "&bitDepth=%i", st->codecpar->bits_per_raw_sample);
        }

#define SEND_DISPOSITION(flagname, name) if (st->disposition & AV_DISPOSITION_##flagname) \
    av_bprintf(&dstbuf, "&disp_" name "=1");

        SEND_DISPOSITION(DEFAULT,          "default");
        SEND_DISPOSITION(DUB,              "dub");
        SEND_DISPOSITION(ORIGINAL,         "original");
        SEND_DISPOSITION(COMMENT,          "comment");
        SEND_DISPOSITION(LYRICS,           "lyrics");
        SEND_DISPOSITION(KARAOKE,          "karaoke");
        SEND_DISPOSITION(FORCED,           "forced");
        SEND_DISPOSITION(HEARING_IMPAIRED, "hearing_impaired");
        SEND_DISPOSITION(VISUAL_IMPAIRED,  "visual_impaired");
        SEND_DISPOSITION(CLEAN_EFFECTS,    "clean_effects");
        SEND_DISPOSITION(ATTACHED_PIC,     "attached_pic");
        SEND_DISPOSITION(TIMED_THUMBNAILS, "timed_thumbnails");


        av_free(PMS_IssueHttpRequest(url, "PUT"));
    }
}
Пример #24
0
static int flv_write_header(AVFormatContext *s)
{
    int i;
    AVIOContext *pb = s->pb;
    FLVContext *flv = s->priv_data;
    int64_t data_size;

    for (i = 0; i < s->nb_streams; i++) {
        AVCodecContext *enc = s->streams[i]->codec;
        FLVStreamContext *sc;
        switch (enc->codec_type) {
        case AVMEDIA_TYPE_VIDEO:
            if (s->streams[i]->avg_frame_rate.den &&
                s->streams[i]->avg_frame_rate.num) {
                flv->framerate = av_q2d(s->streams[i]->avg_frame_rate);
            }
            if (flv->video_enc) {
                av_log(s, AV_LOG_ERROR,
                       "at most one video stream is supported in flv\n");
                return AVERROR(EINVAL);
            }
            flv->video_enc = enc;
            if (enc->codec_tag == 0) {
                av_log(s, AV_LOG_ERROR, "Video codec '%s' for stream %d is not compatible with FLV\n",
                       avcodec_get_name(enc->codec_id), i);
                return AVERROR(EINVAL);
            }
            if (enc->codec_id == AV_CODEC_ID_MPEG4 ||
                enc->codec_id == AV_CODEC_ID_H263) {
                int error = s->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL;
                av_log(s, error ? AV_LOG_ERROR : AV_LOG_WARNING,
                       "Codec %s is not supported in the official FLV specification,\n", avcodec_get_name(enc->codec_id));

                if (error) {
                    av_log(s, AV_LOG_ERROR,
                           "use vstrict=-1 / -strict -1 to use it anyway.\n");
                    return AVERROR(EINVAL);
                }
            } else if (enc->codec_id == AV_CODEC_ID_VP6) {
                av_log(s, AV_LOG_WARNING,
                       "Muxing VP6 in flv will produce flipped video on playback.\n");
            }
            break;
        case AVMEDIA_TYPE_AUDIO:
            if (flv->audio_enc) {
                av_log(s, AV_LOG_ERROR,
                       "at most one audio stream is supported in flv\n");
                return AVERROR(EINVAL);
            }
            flv->audio_enc = enc;
            if (get_audio_flags(s, enc) < 0)
                return AVERROR_INVALIDDATA;
            if (enc->codec_id == AV_CODEC_ID_PCM_S16BE)
                av_log(s, AV_LOG_WARNING,
                       "16-bit big-endian audio in flv is valid but most likely unplayable (hardware dependent); use s16le\n");
            break;
        case AVMEDIA_TYPE_DATA:
            if (enc->codec_id != AV_CODEC_ID_TEXT && enc->codec_id != AV_CODEC_ID_NONE) {
                av_log(s, AV_LOG_ERROR, "Data codec '%s' for stream %d is not compatible with FLV\n",
                       avcodec_get_name(enc->codec_id), i);
                return AVERROR_INVALIDDATA;
            }
            flv->data_enc = enc;
            break;
        case AVMEDIA_TYPE_SUBTITLE:
            if (enc->codec_id != AV_CODEC_ID_TEXT) {
                av_log(s, AV_LOG_ERROR, "Subtitle codec '%s' for stream %d is not compatible with FLV\n",
                       avcodec_get_name(enc->codec_id), i);
                return AVERROR_INVALIDDATA;
            }
            flv->data_enc = enc;
            break;
        default:
            av_log(s, AV_LOG_ERROR, "Codec type '%s' for stream %d is not compatible with FLV\n",
                   av_get_media_type_string(enc->codec_type), i);
            return AVERROR(EINVAL);
        }
        avpriv_set_pts_info(s->streams[i], 32, 1, 1000); /* 32 bit pts in ms */

        sc = av_mallocz(sizeof(FLVStreamContext));
        if (!sc)
            return AVERROR(ENOMEM);
        s->streams[i]->priv_data = sc;
        sc->last_ts = -1;
    }

    flv->delay = AV_NOPTS_VALUE;

    avio_write(pb, "FLV", 3);
    avio_w8(pb, 1);
    avio_w8(pb, FLV_HEADER_FLAG_HASAUDIO * !!flv->audio_enc +
                FLV_HEADER_FLAG_HASVIDEO * !!flv->video_enc);
    avio_wb32(pb, 9);
    avio_wb32(pb, 0);

    for (i = 0; i < s->nb_streams; i++)
        if (s->streams[i]->codec->codec_tag == 5) {
            avio_w8(pb, 8);     // message type
            avio_wb24(pb, 0);   // include flags
            avio_wb24(pb, 0);   // time stamp
            avio_wb32(pb, 0);   // reserved
            avio_wb32(pb, 11);  // size
            flv->reserved = 5;
        }

    write_metadata(s, 0);

    for (i = 0; i < s->nb_streams; i++) {
        AVCodecContext *enc = s->streams[i]->codec;
        if (enc->codec_id == AV_CODEC_ID_AAC || enc->codec_id == AV_CODEC_ID_H264 || enc->codec_id == AV_CODEC_ID_MPEG4) {
            int64_t pos;
            avio_w8(pb, enc->codec_type == AVMEDIA_TYPE_VIDEO ?
                    FLV_TAG_TYPE_VIDEO : FLV_TAG_TYPE_AUDIO);
            avio_wb24(pb, 0); // size patched later
            avio_wb24(pb, 0); // ts
            avio_w8(pb, 0);   // ts ext
            avio_wb24(pb, 0); // streamid
            pos = avio_tell(pb);
            if (enc->codec_id == AV_CODEC_ID_AAC) {
                avio_w8(pb, get_audio_flags(s, enc));
                avio_w8(pb, 0); // AAC sequence header
                avio_write(pb, enc->extradata, enc->extradata_size);
            } else {
                avio_w8(pb, enc->codec_tag | FLV_FRAME_KEY); // flags
                avio_w8(pb, 0); // AVC sequence header
                avio_wb24(pb, 0); // composition time
                ff_isom_write_avcc(pb, enc->extradata, enc->extradata_size);
            }
            data_size = avio_tell(pb) - pos;
            avio_seek(pb, -data_size - 10, SEEK_CUR);
            avio_wb24(pb, data_size);
            avio_skip(pb, data_size + 10 - 3);
            avio_wb32(pb, data_size + 11); // previous tag size
        }
    }

    return 0;
}
Пример #25
0
int Capture::openCodeContext(AVFormatContext *fmt_ctx, enum AVMediaType type, int & streamIndex)
{
	int ret;
	AVStream *st;
	AVCodecContext *dec_ctx = nullptr;
	AVCodec *dec = nullptr;
	AVDictionary *opts = nullptr;

	ret = av_find_best_stream(fmt_ctx, type, -1, -1, nullptr, 0);
	if (ret < 0)
	{
		sgct::MessageHandler::instance()->print(sgct::MessageHandler::NOTIFY_ERROR, "Could not find %s stream!\n", av_get_media_type_string(type));
		return ret;
	}
	else
	{
		streamIndex = ret;
		st = fmt_ctx->streams[streamIndex];

		/* find decoder for the stream */
		dec_ctx = st->codec;
		dec_ctx->thread_count = 0; //auto number of threads
		dec_ctx->flags |= CODEC_FLAG_LOW_DELAY;
		dec_ctx->flags2 |= CODEC_FLAG2_FAST;

		dec = avcodec_find_decoder(dec_ctx->codec_id);

		if (!dec)
		{
			sgct::MessageHandler::instance()->print(sgct::MessageHandler::NOTIFY_ERROR, "Could not find %s codec!\n", av_get_media_type_string(type));
			return AVERROR(EINVAL);
		}

		/* Init the decoders, with or without reference counting */
#if USE_REF_COUNTER
		av_dict_set(&opts, "refcounted_frames", "1", 0);
#else
		av_dict_set(&opts, "refcounted_frames", "0", 0);
#endif
		if ((ret = avcodec_open2(dec_ctx, dec, &opts)) < 0)
		{
			sgct::MessageHandler::instance()->print(sgct::MessageHandler::NOTIFY_ERROR, "Could not open %s codec!\n", av_get_media_type_string(type));
			return ret;
		}
	}

	return 0;
}
Пример #26
0
void Libav::initMeta(AVFormatContext *fmt_ctx) {
    
    AVDictionaryEntry *tag = NULL;
    char container[4];

	container[0] = '\0';
    while ((tag = av_dict_get(fmt_ctx->metadata, "", tag, AV_DICT_IGNORE_SUFFIX))) {
        meta[std::string(tag->key)] =  std::string(tag->value);
		if (strcmp(tag->key,"major_brand")==0) {
            // mp4 or mov
			if (strcmp(tag->value,"qt")==0) {
				strcpy(container,"mov");
			}
			if (strcmp(tag->value,"mp42")==0) {
				strcpy(container,"mp4");
			}
		}
	}

    if (container[0] == '\0') {
		meta[std::string("FORMAT_NAME")] = std::string(fmt_ctx->iformat->name);
	} else {
		meta[std::string("FORMAT_NAME")] = std::string(container);
	}

    std::stringstream numberStreams;
    numberStreams << fmt_ctx->nb_streams;
    
    meta[std::string("STREAMS")] = numberStreams.str();

    for(int  i=0; i<fmt_ctx->nb_streams; i++) {
        
		AVCodecContext *codec_ctx = fmt_ctx->streams[i]->codec;
        
		const char * stream_type = av_get_media_type_string(fmt_ctx->streams[i]->codec->codec_type);

        char key[128]; // possible overflow
        char val[128]; // as above
		
		sprintf (key,"STREAM_%d_TYPE",i);
        meta [std::string(key)]= std::string(stream_type);
        
		sprintf (key,"STREAM_%s_CODEC_ID",stream_type);
        meta[std::string(key)]=std::string(av_get_codecid(codec_ctx->codec_id));
        
		if (fmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
            
			sprintf(key,"STREAM_%s_AFPS_RATIO",stream_type);
            sprintf(val,"%d:%d",fmt_ctx->streams[i]->avg_frame_rate.num,fmt_ctx->streams[i]->avg_frame_rate.den);
            meta[std::string(key)]=std::string(val);
            
			sprintf(key,"STREAM_%s_FPS_RATIO",stream_type);
            sprintf(val,"%d:%d",fmt_ctx->streams[i]->r_frame_rate.num,fmt_ctx->streams[i]->r_frame_rate.den);
            meta[std::string(key)]=std::string(val);

            
			sprintf(key,"STREAM_%s_FPS",stream_type);
            sprintf(val,"%f",1.0*fmt_ctx->streams[i]->r_frame_rate.num / fmt_ctx->streams[i]->r_frame_rate.den);
            meta[std::string(key)]=std::string(val);
            
			sprintf (key,"STREAM_%s_WIDTH",stream_type);
            sprintf (val,"%d",codec_ctx->width);
            meta[std::string(key)]=std::string(val);

			sprintf (key,"STREAM_%s_HEIGHT",stream_type);
            sprintf (val,"%d",codec_ctx->height);
            meta[std::string(key)]=std::string(val);

			sprintf (key,"STREAM_%s_PIX_FMT",stream_type);
            
           // std::cerr << codec_ctx->pix_fmt << "\n";
            
            const char * pixFmtName = av_get_pix_fmt_name(codec_ctx->pix_fmt);
            
     //       std::cerr << av_get_pix_fmt_name(codec_ctx->pix_fmt) << "\n";
            
            if (pixFmtName)
                meta[std::string(key)]=std::string(pixFmtName);
            
			sprintf(key,"STREAM_%s_SAR",stream_type);
            sprintf(val,"%d:%d",codec_ctx->sample_aspect_ratio.num,codec_ctx->sample_aspect_ratio.den);
            meta[std::string(key)]=std::string(val);

			sprintf (key,"STREAM_%s_REFFRAMES",stream_type);
            sprintf (val,"%d",codec_ctx->refs);
            meta[std::string(key)]=std::string(val);

			sprintf (key,"STREAM_%s_COLORSPACE",stream_type);
            meta[std::string(key)] = std::string(av_get_colorspace(codec_ctx->colorspace));
            
			sprintf (key,"STREAM_%s_COLORRANGE",stream_type);
            meta[std::string(key)] = std::string(av_get_colorrange(codec_ctx->color_range));
			
            sprintf (key,"STREAM_%s_FIELDORDER",stream_type);
            meta[std::string(key)] = std::string(av_get_field_order(codec_ctx->field_order));
            
            
		}
		if (fmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
			sprintf (key,"STREAM_%s_SAMPLERATE",stream_type);
            sprintf(val,"%d",codec_ctx->sample_rate);
            meta[std::string(key)]=std::string(val);

            
			sprintf (key,"STREAM_%s_CHANNELS",stream_type);
            sprintf(val,"%d",codec_ctx->channels);
            meta[std::string(key)]=std::string(val);

			sprintf (key,"STREAM_%s_SAMPLEFORMAT",stream_type);
            
            const char * smpFmtName = av_get_sample_fmt_name(codec_ctx->sample_fmt);

            if (smpFmtName)
                meta[std::string(key)]=std::string(smpFmtName);
		}
		
		
		while ((tag = av_dict_get(fmt_ctx->streams[i]->metadata, "", tag, AV_DICT_IGNORE_SUFFIX)))
        {
			sprintf(key,"STREAM_%s_%s", stream_type,tag->key);
            meta[std::string(key)]=std::string(tag->value);
        }
	}
    
}
Пример #27
0
int sxpi_demuxing_init(void *log_ctx,
                       struct demuxing_ctx *ctx,
                       AVThreadMessageQueue *src_queue,
                       AVThreadMessageQueue *pkt_queue,
                       const char *filename,
                       const struct sxplayer_opts *opts)
{
    int ret;
    enum AVMediaType media_type;

    ctx->log_ctx = log_ctx;

    ctx->src_queue = src_queue;
    ctx->pkt_queue = pkt_queue;
    ctx->pkt_skip_mod = opts->pkt_skip_mod;

    switch (opts->avselect) {
    case SXPLAYER_SELECT_VIDEO: media_type = AVMEDIA_TYPE_VIDEO; break;
    case SXPLAYER_SELECT_AUDIO: media_type = AVMEDIA_TYPE_AUDIO; break;
    default:
        av_assert0(0);
    }

    TRACE(ctx, "opening %s", filename);
    ret = avformat_open_input(&ctx->fmt_ctx, filename, NULL, NULL);
    if (ret < 0) {
        LOG(ctx, ERROR, "Unable to open input file '%s'", filename);
        return ret;
    }

    /* Evil hack: we make sure avformat_find_stream_info() doesn't decode any
     * video, because it will use the software decoder, which will be slow and
     * use an indecent amount of memory (15-20MB). It slows down startup
     * significantly on embedded platforms and risks a kill from the OOM
     * because of the large and fast amount of allocated memory.
     *
     * The max_analyze_duration is accessible through avoptions, but a negative
     * value isn't actually in the allowed range, so we directly mess with the
     * field. We can't unfortunately set it to 0, because at the moment of
     * writing this code 0 (which is the default value) means "auto", which
     * will be then set to something like 5 seconds for video. */
    ctx->fmt_ctx->max_analyze_duration = -1;

    TRACE(ctx, "find stream info");
    ret = avformat_find_stream_info(ctx->fmt_ctx, NULL);
    if (ret < 0) {
        LOG(ctx, ERROR, "Unable to find input stream information");
        return ret;
    }

    TRACE(ctx, "find best stream");
    ret = av_find_best_stream(ctx->fmt_ctx, media_type, -1, -1, NULL, 0);
    if (ret < 0) {
        LOG(ctx, ERROR, "Unable to find a %s stream in the input file",
            av_get_media_type_string(media_type));
        return ret;
    }
    ctx->stream_idx = ret;
    ctx->stream = ctx->fmt_ctx->streams[ctx->stream_idx];
    ctx->is_image = strstr(ctx->fmt_ctx->iformat->name, "image2") ||
                    strstr(ctx->fmt_ctx->iformat->name, "_pipe");

    av_dump_format(ctx->fmt_ctx, 0, filename, 0);

    return 0;
}
Пример #28
0
static int flv_write_header(AVFormatContext *s)
{
    int i;
    AVIOContext *pb = s->pb;
    FLVContext *flv = s->priv_data;

    for (i = 0; i < s->nb_streams; i++) {
        AVCodecParameters *par = s->streams[i]->codecpar;
        FLVStreamContext *sc;
        switch (par->codec_type) {
        case AVMEDIA_TYPE_VIDEO:
            if (s->streams[i]->avg_frame_rate.den &&
                s->streams[i]->avg_frame_rate.num) {
                flv->framerate = av_q2d(s->streams[i]->avg_frame_rate);
            }
            if (flv->video_par) {
                av_log(s, AV_LOG_ERROR,
                       "at most one video stream is supported in flv\n");
                return AVERROR(EINVAL);
            }
            flv->video_par = par;
            if (!ff_codec_get_tag(flv_video_codec_ids, par->codec_id))
                return unsupported_codec(s, "Video", par->codec_id);

            if (par->codec_id == AV_CODEC_ID_MPEG4 ||
                par->codec_id == AV_CODEC_ID_H263) {
                int error = s->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL;
                av_log(s, error ? AV_LOG_ERROR : AV_LOG_WARNING,
                       "Codec %s is not supported in the official FLV specification,\n", avcodec_get_name(par->codec_id));

                if (error) {
                    av_log(s, AV_LOG_ERROR,
                           "use vstrict=-1 / -strict -1 to use it anyway.\n");
                    return AVERROR(EINVAL);
                }
            } else if (par->codec_id == AV_CODEC_ID_VP6) {
                av_log(s, AV_LOG_WARNING,
                       "Muxing VP6 in flv will produce flipped video on playback.\n");
            }
            break;
        case AVMEDIA_TYPE_AUDIO:
            if (flv->audio_par) {
                av_log(s, AV_LOG_ERROR,
                       "at most one audio stream is supported in flv\n");
                return AVERROR(EINVAL);
            }
            flv->audio_par = par;
            if (get_audio_flags(s, par) < 0)
                return unsupported_codec(s, "Audio", par->codec_id);
            if (par->codec_id == AV_CODEC_ID_PCM_S16BE)
                av_log(s, AV_LOG_WARNING,
                       "16-bit big-endian audio in flv is valid but most likely unplayable (hardware dependent); use s16le\n");
            break;
        case AVMEDIA_TYPE_DATA:
            if (par->codec_id != AV_CODEC_ID_TEXT && par->codec_id != AV_CODEC_ID_NONE)
                return unsupported_codec(s, "Data", par->codec_id);
            flv->data_par = par;
            break;
        case AVMEDIA_TYPE_SUBTITLE:
            if (par->codec_id != AV_CODEC_ID_TEXT) {
                av_log(s, AV_LOG_ERROR, "Subtitle codec '%s' for stream %d is not compatible with FLV\n",
                       avcodec_get_name(par->codec_id), i);
                return AVERROR_INVALIDDATA;
            }
            flv->data_par = par;
            break;
        default:
            av_log(s, AV_LOG_ERROR, "Codec type '%s' for stream %d is not compatible with FLV\n",
                   av_get_media_type_string(par->codec_type), i);
            return AVERROR(EINVAL);
        }
        avpriv_set_pts_info(s->streams[i], 32, 1, 1000); /* 32 bit pts in ms */

        sc = av_mallocz(sizeof(FLVStreamContext));
        if (!sc)
            return AVERROR(ENOMEM);
        s->streams[i]->priv_data = sc;
        sc->last_ts = -1;
    }

    flv->delay = AV_NOPTS_VALUE;

    avio_write(pb, "FLV", 3);
    avio_w8(pb, 1);
    avio_w8(pb, FLV_HEADER_FLAG_HASAUDIO * !!flv->audio_par +
                FLV_HEADER_FLAG_HASVIDEO * !!flv->video_par);
    avio_wb32(pb, 9);
    avio_wb32(pb, 0);

    for (i = 0; i < s->nb_streams; i++)
        if (s->streams[i]->codecpar->codec_tag == 5) {
            avio_w8(pb, 8);     // message type
            avio_wb24(pb, 0);   // include flags
            avio_wb24(pb, 0);   // time stamp
            avio_wb32(pb, 0);   // reserved
            avio_wb32(pb, 11);  // size
            flv->reserved = 5;
        }

    if (flv->flags & FLV_NO_METADATA) {
        pb->seekable = 0;
    } else {
        write_metadata(s, 0);
    }

    for (i = 0; i < s->nb_streams; i++) {
        flv_write_codec_header(s, s->streams[i]->codecpar);
    }

    flv->datastart_offset = avio_tell(pb);
    return 0;
}
Пример #29
0
int open_input_file(int check_yuv)
{
	AVStream *stream = NULL;
	AVCodecContext *codecCtx = NULL;
	AVCodec *dec = NULL;
	int ret;
	int streamIdx = 0;
	unsigned int i;

	/*
	*	reads file header and stores information
	*	about the file format in the AVFormatContext structure
	*	@param fmt, options If NULL acuto-detect file format,
	*						buffer size, and format options
	*/


	ret = avformat_open_input(&inFmtCtx, file_input, NULL, NULL);
	if (ret < 0)
	{
		av_log(NULL, AV_LOG_ERROR, "Cannot open input file [%s]\n", file_input);
		return ret;
	}

	if (check_yuv == 1){
		printf("it is yuvfile\n");
		printf("So you must input media size\n");
		printf("width : ");
		scanf("%d", &(inFmtCtx)->streams[streamIdx]->codec->width);
		printf("height : ");
		scanf("%d", &(inFmtCtx)->streams[streamIdx]->codec->height);

	}


	av_log(NULL, AV_LOG_INFO, "File [%s] Open Success\n", file_input);
	av_log(NULL, AV_LOG_DEBUG, "Format: %s\n", inFmtCtx->iformat->name);


	//retrieve stream information
	ret = avformat_find_stream_info(inFmtCtx, NULL);
	if (ret < 0)
	{
		av_log(NULL, AV_LOG_ERROR, "Cannot find stream information");
		return ret;
	}

	av_log(NULL, AV_LOG_INFO, "Get Stream Information Success\n");

	for (i = 0; i < inFmtCtx->nb_streams; i++)
	{
		stream = inFmtCtx->streams[i];
		codecCtx = stream->codec;

		if (codecCtx->codec_type == AVMEDIA_TYPE_VIDEO)
		{
			streamIdx = i;
			//find decoder
			dec = avcodec_find_decoder(codecCtx->codec_id);
			if (dec < 0)
			{
				av_log(NULL, AV_LOG_ERROR, "Could not Find [%s] Codec\n", av_get_media_type_string(dec->type));
				return AVERROR(EINVAL);
			}

			ret = avcodec_open2(codecCtx, dec, NULL);
			if (ret < 0)
			{
				av_log(NULL, AV_LOG_ERROR, "Failed to open decoder for stream #%u\n", i);
				return ret;
			}
		}
	}

	clip_width = inFmtCtx->streams[streamIdx]->codec->width;
	clip_height = inFmtCtx->streams[streamIdx]->codec->height;
	time_base_den = inFmtCtx->streams[streamIdx]->codec->time_base.den;
	time_base_num = inFmtCtx->streams[streamIdx]->codec->time_base.num;

	//debugging function
	av_dump_format(inFmtCtx, 0, file_input, 0);

	return 0;
}
Пример #30
0
static int flv_write_header(AVFormatContext *s)
{
    AVIOContext *pb = s->pb;
    FLVContext *flv = s->priv_data;
    AVCodecContext *audio_enc = NULL, *video_enc = NULL, *data_enc = NULL;
    int i, metadata_count = 0;
    double framerate = 0.0;
    int64_t metadata_size_pos, data_size, metadata_count_pos;
    AVDictionaryEntry *tag = NULL;

    for (i = 0; i < s->nb_streams; i++) {
        AVCodecContext *enc = s->streams[i]->codec;
        FLVStreamContext *sc;
        switch (enc->codec_type) {
        case AVMEDIA_TYPE_VIDEO:
            if (s->streams[i]->avg_frame_rate.den &&
                s->streams[i]->avg_frame_rate.num) {
                framerate = av_q2d(s->streams[i]->avg_frame_rate);
            } else {
                framerate = 1 / av_q2d(s->streams[i]->codec->time_base);
            }
            if (video_enc) {
                av_log(s, AV_LOG_ERROR,
                       "at most one video stream is supported in flv\n");
                return AVERROR(EINVAL);
            }
            video_enc = enc;
            if (enc->codec_tag == 0) {
                av_log(s, AV_LOG_ERROR, "Video codec '%s' for stream %d is not compatible with FLV\n",
                       avcodec_get_name(enc->codec_id), i);
                return AVERROR(EINVAL);
            }
            break;
        case AVMEDIA_TYPE_AUDIO:
            if (audio_enc) {
                av_log(s, AV_LOG_ERROR,
                       "at most one audio stream is supported in flv\n");
                return AVERROR(EINVAL);
            }
            audio_enc = enc;
            if (get_audio_flags(s, enc) < 0)
                return AVERROR_INVALIDDATA;
            break;
        case AVMEDIA_TYPE_DATA:
            if (enc->codec_id != AV_CODEC_ID_TEXT) {
                av_log(s, AV_LOG_ERROR, "Data codec '%s' for stream %d is not compatible with FLV\n",
                       avcodec_get_name(enc->codec_id), i);
                return AVERROR_INVALIDDATA;
            }
            data_enc = enc;
            break;
        default:
            av_log(s, AV_LOG_ERROR, "Codec type '%s' for stream %d is not compatible with FLV\n",
                   av_get_media_type_string(enc->codec_type), i);
            return AVERROR(EINVAL);
        }
        avpriv_set_pts_info(s->streams[i], 32, 1, 1000); /* 32 bit pts in ms */

        sc = av_mallocz(sizeof(FLVStreamContext));
        if (!sc)
            return AVERROR(ENOMEM);
        s->streams[i]->priv_data = sc;
        sc->last_ts = -1;
    }

    flv->delay = AV_NOPTS_VALUE;

    avio_write(pb, "FLV", 3);
    avio_w8(pb, 1);
    avio_w8(pb, FLV_HEADER_FLAG_HASAUDIO * !!audio_enc +
                FLV_HEADER_FLAG_HASVIDEO * !!video_enc);
    avio_wb32(pb, 9);
    avio_wb32(pb, 0);

    for (i = 0; i < s->nb_streams; i++)
        if (s->streams[i]->codec->codec_tag == 5) {
            avio_w8(pb, 8);     // message type
            avio_wb24(pb, 0);   // include flags
            avio_wb24(pb, 0);   // time stamp
            avio_wb32(pb, 0);   // reserved
            avio_wb32(pb, 11);  // size
            flv->reserved = 5;
        }

    /* write meta_tag */
    avio_w8(pb, 18);            // tag type META
    metadata_size_pos = avio_tell(pb);
    avio_wb24(pb, 0);           // size of data part (sum of all parts below)
    avio_wb24(pb, 0);           // timestamp
    avio_wb32(pb, 0);           // reserved

    /* now data of data_size size */

    /* first event name as a string */
    avio_w8(pb, AMF_DATA_TYPE_STRING);
    put_amf_string(pb, "onMetaData"); // 12 bytes

    /* mixed array (hash) with size and string/type/data tuples */
    avio_w8(pb, AMF_DATA_TYPE_MIXEDARRAY);
    metadata_count_pos = avio_tell(pb);
    metadata_count = 5 * !!video_enc +
                     5 * !!audio_enc +
                     1 * !!data_enc  +
                     2; // +2 for duration and file size

    avio_wb32(pb, metadata_count);

    put_amf_string(pb, "duration");
    flv->duration_offset= avio_tell(pb);

    // fill in the guessed duration, it'll be corrected later if incorrect
    put_amf_double(pb, s->duration / AV_TIME_BASE);

    if (video_enc) {
        put_amf_string(pb, "width");
        put_amf_double(pb, video_enc->width);

        put_amf_string(pb, "height");
        put_amf_double(pb, video_enc->height);

        put_amf_string(pb, "videodatarate");
        put_amf_double(pb, video_enc->bit_rate / 1024.0);

        put_amf_string(pb, "framerate");
        put_amf_double(pb, framerate);

        put_amf_string(pb, "videocodecid");
        put_amf_double(pb, video_enc->codec_tag);
    }

    if (audio_enc) {
        put_amf_string(pb, "audiodatarate");
        put_amf_double(pb, audio_enc->bit_rate / 1024.0);

        put_amf_string(pb, "audiosamplerate");
        put_amf_double(pb, audio_enc->sample_rate);

        put_amf_string(pb, "audiosamplesize");
        put_amf_double(pb, audio_enc->codec_id == AV_CODEC_ID_PCM_U8 ? 8 : 16);

        put_amf_string(pb, "stereo");
        put_amf_bool(pb, audio_enc->channels == 2);

        put_amf_string(pb, "audiocodecid");
        put_amf_double(pb, audio_enc->codec_tag);
    }

    if (data_enc) {
        put_amf_string(pb, "datastream");
        put_amf_double(pb, 0.0);
    }

    while ((tag = av_dict_get(s->metadata, "", tag, AV_DICT_IGNORE_SUFFIX))) {
        if(   !strcmp(tag->key, "width")
            ||!strcmp(tag->key, "height")
            ||!strcmp(tag->key, "videodatarate")
            ||!strcmp(tag->key, "framerate")
            ||!strcmp(tag->key, "videocodecid")
            ||!strcmp(tag->key, "audiodatarate")
            ||!strcmp(tag->key, "audiosamplerate")
            ||!strcmp(tag->key, "audiosamplesize")
            ||!strcmp(tag->key, "stereo")
            ||!strcmp(tag->key, "audiocodecid")
            ||!strcmp(tag->key, "duration")
            ||!strcmp(tag->key, "onMetaData")
        ){
            av_log(s, AV_LOG_DEBUG, "Ignoring metadata for %s\n", tag->key);
            continue;
        }
        put_amf_string(pb, tag->key);
        avio_w8(pb, AMF_DATA_TYPE_STRING);
        put_amf_string(pb, tag->value);
        metadata_count++;
    }

    put_amf_string(pb, "filesize");
    flv->filesize_offset = avio_tell(pb);
    put_amf_double(pb, 0); // delayed write

    put_amf_string(pb, "");
    avio_w8(pb, AMF_END_OF_OBJECT);

    /* write total size of tag */
    data_size = avio_tell(pb) - metadata_size_pos - 10;

    avio_seek(pb, metadata_count_pos, SEEK_SET);
    avio_wb32(pb, metadata_count);

    avio_seek(pb, metadata_size_pos, SEEK_SET);
    avio_wb24(pb, data_size);
    avio_skip(pb, data_size + 10 - 3);
    avio_wb32(pb, data_size + 11);

    for (i = 0; i < s->nb_streams; i++) {
        AVCodecContext *enc = s->streams[i]->codec;
        if (enc->codec_id == AV_CODEC_ID_AAC || enc->codec_id == AV_CODEC_ID_H264 || enc->codec_id == AV_CODEC_ID_MPEG4) {
            int64_t pos;
            avio_w8(pb, enc->codec_type == AVMEDIA_TYPE_VIDEO ?
                    FLV_TAG_TYPE_VIDEO : FLV_TAG_TYPE_AUDIO);
            avio_wb24(pb, 0); // size patched later
            avio_wb24(pb, 0); // ts
            avio_w8(pb, 0);   // ts ext
            avio_wb24(pb, 0); // streamid
            pos = avio_tell(pb);
            if (enc->codec_id == AV_CODEC_ID_AAC) {
                avio_w8(pb, get_audio_flags(s, enc));
                avio_w8(pb, 0); // AAC sequence header
                avio_write(pb, enc->extradata, enc->extradata_size);
            } else {
                avio_w8(pb, enc->codec_tag | FLV_FRAME_KEY); // flags
                avio_w8(pb, 0); // AVC sequence header
                avio_wb24(pb, 0); // composition time
                ff_isom_write_avcc(pb, enc->extradata, enc->extradata_size);
            }
            data_size = avio_tell(pb) - pos;
            avio_seek(pb, -data_size - 10, SEEK_CUR);
            avio_wb24(pb, data_size);
            avio_skip(pb, data_size + 10 - 3);
            avio_wb32(pb, data_size + 11); // previous tag size
        }
    }

    return 0;
}