bool PrivateDecoderCrystalHD::CreateFilter(AVCodecContext *avctx) { int nalsize = (avctx->extradata[4] & 0x3) + 1; if (!nalsize || nalsize == 3 || nalsize > 4) { LOG(VB_PLAYBACK, LOG_ERR, LOC + QString("Invalid nal size (%1)") .arg(nalsize)); return false; } static const uint8_t testnal[] = { 0,0,0,2,0,0 }; AVBitStreamFilterContext *bsfc = av_bitstream_filter_init("h264_mp4toannexb"); if (!bsfc) return false; m_filter = bsfc; // and test extradata const uint8_t *test = testnal; int testsize = 6; int outbuf_size = 0; uint8_t *outbuf = NULL; int res = av_bitstream_filter_filter(m_filter, avctx, NULL, &outbuf, &outbuf_size, test, testsize, 0); av_freep(&outbuf); return res > 0; }
/** * Parse list of bitstream filters and add them to the list of filters * pointed to by bsfs. * * The list must be specified in the form: * BSFS ::= BSF[,BSFS] */ static int parse_bsfs(void *log_ctx, const char *bsfs_spec, AVBitStreamFilterContext **bsfs) { char *bsf_name, *buf, *dup, *saveptr; int ret = 0; if (!(dup = buf = av_strdup(bsfs_spec))) return AVERROR(ENOMEM); while (bsf_name = av_strtok(buf, ",", &saveptr)) { AVBitStreamFilterContext *bsf = av_bitstream_filter_init(bsf_name); if (!bsf) { av_log(log_ctx, AV_LOG_ERROR, "Cannot initialize bitstream filter with name '%s', " "unknown filter or internal error happened\n", bsf_name); ret = AVERROR_UNKNOWN; goto end; } /* append bsf context to the list of bsf contexts */ *bsfs = bsf; bsfs = &bsf->next; buf = NULL; } end: av_free(dup); return ret; }
/** * Init the muxer with streams */ static int lav_muxer_init(muxer_t* m, const struct streaming_start *ss, const char *name) { int i; const streaming_start_component_t *ssc; AVFormatContext *oc; lav_muxer_t *lm = (lav_muxer_t*)m; char app[128]; snprintf(app, sizeof(app), "Tvheadend %s", tvheadend_version); oc = lm->lm_oc; av_dict_set(&oc->metadata, "title", name, 0); av_dict_set(&oc->metadata, "service_name", name, 0); av_dict_set(&oc->metadata, "service_provider", app, 0); if(lm->m_container == MC_MPEGTS) lm->lm_h264_filter = av_bitstream_filter_init("h264_mp4toannexb"); oc->max_delay = 0.7 * AV_TIME_BASE; for(i=0; i < ss->ss_num_components; i++) { ssc = &ss->ss_components[i]; if(ssc->ssc_disabled) continue; if(!lav_muxer_support_stream(lm->m_container, ssc->ssc_type)) { tvhlog(LOG_WARNING, "libav", "%s is not supported in %s", streaming_component_type2txt(ssc->ssc_type), muxer_container_type2txt(lm->m_container)); continue; } if(lav_muxer_add_stream(lm, ssc)) { tvhlog(LOG_ERR, "libav", "Failed to add %s stream", streaming_component_type2txt(ssc->ssc_type)); continue; } } if(!lm->lm_oc->nb_streams) { tvhlog(LOG_ERR, "libav", "No supported streams available"); lm->m_errors++; return -1; } else if(avformat_write_header(lm->lm_oc, NULL) < 0) { tvhlog(LOG_ERR, "libav", "Failed to write %s header", muxer_container_type2txt(lm->m_container)); lm->m_errors++; return -1; } lm->lm_init = 1; return 0; }
/** * @brief allocate segmenter context and set default values * @param output segmenter context * @return 0 on success, negative error code on failure */ int segmenter_alloc_context(SegmenterContext** context) { SegmenterContext *_context = (SegmenterContext*)malloc(sizeof(SegmenterContext)); if (!_context) { return SGERROR(SGERROR_MEM_ALLOC); } _context->output = NULL; _context->buf = NULL; _context->buf_size = 0; _context->file_base_name = NULL; _context->media_base_name = NULL; _context->segment_file_sequence = 0; _context->segment_sequence = 0; _context->segment_index = 0; _context->segment_duration = 0; _context->duration = 0; _context->target_duration = 0; _context->max_duration = 0; _context->max_bitrate = 0; _context->avg_bitrate = 0; _context->_pts = 0; _context->_dts = 0; _context->eof = 0; _context->durations_size = kAvgSegmentsCount; _context->durations = (double*)malloc(sizeof(double) * _context->durations_size); if (!_context->durations) { free(_context); return SGERROR(SGERROR_MEM_ALLOC); } _context->bfilter = av_bitstream_filter_init("h264_mp4toannexb"); *context = _context; return 0; }
static AVBitStreamFilterContext *dofiltertest(AVPacket *rp) { AVBitStreamFilterContext *bsfc; if (!(rp->data[0] == 0x00 && rp->data[1] == 0x00 && rp->data[2] == 0x00 && rp->data[3] == 0x01)) { bsfc = av_bitstream_filter_init("h264_mp4toannexb"); if (!bsfc) { printf("Failed to open filter. This is bad.\n"); } else { printf("Have a filter at %p\n", bsfc); } } return bsfc; }
bool VideoDecoderCUDA::prepare() { //TODO: destroy decoder DPTR_D(VideoDecoderCUDA); if (!d.codec_ctx) { qWarning("AVCodecContext not ready"); return false; } // d.available is true if cuda decoder is ready if (!d.can_load) { qWarning("VideoDecoderCUDA::prepare(): CUVID library not available"); return false; } if (!d.isLoaded()) //cuda_api return false; if (!d.cuctx) d.initCuda(); d.bitstream_filter_ctx = av_bitstream_filter_init("h264_mp4toannexb"); Q_ASSERT_X(d.bitstream_filter_ctx, "av_bitstream_filter_init", "Unknown bitstream filter"); // max decoder surfaces is computed in createCUVIDDecoder. createCUVIDParser use the value return d.createCUVIDDecoder(mapCodecFromFFmpeg(d.codec_ctx->codec_id), d.codec_ctx->coded_width, d.codec_ctx->coded_height) && d.createCUVIDParser(); }
bool FFmpegEncoderOpen(struct FFmpegEncoder* encoder, const char* outfile) { AVCodec* acodec = avcodec_find_encoder_by_name(encoder->audioCodec); AVCodec* vcodec = avcodec_find_encoder_by_name(encoder->videoCodec); if ((encoder->audioCodec && !acodec) || !vcodec || !FFmpegEncoderVerifyContainer(encoder)) { return false; } encoder->currentAudioSample = 0; encoder->currentAudioFrame = 0; encoder->currentVideoFrame = 0; encoder->nextAudioPts = 0; AVOutputFormat* oformat = av_guess_format(encoder->containerFormat, 0, 0); #ifndef USE_LIBAV avformat_alloc_output_context2(&encoder->context, oformat, 0, outfile); #else encoder->context = avformat_alloc_context(); strncpy(encoder->context->filename, outfile, sizeof(encoder->context->filename) - 1); encoder->context->filename[sizeof(encoder->context->filename) - 1] = '\0'; encoder->context->oformat = oformat; #endif if (acodec) { #ifdef FFMPEG_USE_CODECPAR encoder->audioStream = avformat_new_stream(encoder->context, NULL); encoder->audio = avcodec_alloc_context3(acodec); #else encoder->audioStream = avformat_new_stream(encoder->context, acodec); encoder->audio = encoder->audioStream->codec; #endif encoder->audio->bit_rate = encoder->audioBitrate; encoder->audio->channels = 2; encoder->audio->channel_layout = AV_CH_LAYOUT_STEREO; encoder->audio->sample_rate = encoder->sampleRate; encoder->audio->sample_fmt = encoder->sampleFormat; AVDictionary* opts = 0; av_dict_set(&opts, "strict", "-2", 0); if (encoder->context->oformat->flags & AVFMT_GLOBALHEADER) { #ifdef AV_CODEC_FLAG_GLOBAL_HEADER encoder->audio->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; #else encoder->audio->flags |= CODEC_FLAG_GLOBAL_HEADER; #endif } avcodec_open2(encoder->audio, acodec, &opts); av_dict_free(&opts); #if LIBAVCODEC_VERSION_MAJOR >= 55 encoder->audioFrame = av_frame_alloc(); #else encoder->audioFrame = avcodec_alloc_frame(); #endif if (!encoder->audio->frame_size) { encoder->audio->frame_size = 1; } encoder->audioFrame->nb_samples = encoder->audio->frame_size; encoder->audioFrame->format = encoder->audio->sample_fmt; encoder->audioFrame->pts = 0; #ifdef USE_LIBAVRESAMPLE encoder->resampleContext = avresample_alloc_context(); av_opt_set_int(encoder->resampleContext, "in_channel_layout", AV_CH_LAYOUT_STEREO, 0); av_opt_set_int(encoder->resampleContext, "out_channel_layout", AV_CH_LAYOUT_STEREO, 0); av_opt_set_int(encoder->resampleContext, "in_sample_rate", PREFERRED_SAMPLE_RATE, 0); av_opt_set_int(encoder->resampleContext, "out_sample_rate", encoder->sampleRate, 0); av_opt_set_int(encoder->resampleContext, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0); av_opt_set_int(encoder->resampleContext, "out_sample_fmt", encoder->sampleFormat, 0); avresample_open(encoder->resampleContext); #else encoder->resampleContext = swr_alloc_set_opts(NULL, AV_CH_LAYOUT_STEREO, encoder->sampleFormat, encoder->sampleRate, AV_CH_LAYOUT_STEREO, AV_SAMPLE_FMT_S16, PREFERRED_SAMPLE_RATE, 0, NULL); swr_init(encoder->resampleContext); #endif encoder->audioBufferSize = (encoder->audioFrame->nb_samples * PREFERRED_SAMPLE_RATE / encoder->sampleRate) * 4; encoder->audioBuffer = av_malloc(encoder->audioBufferSize); encoder->postaudioBufferSize = av_samples_get_buffer_size(0, encoder->audio->channels, encoder->audio->frame_size, encoder->audio->sample_fmt, 0); encoder->postaudioBuffer = av_malloc(encoder->postaudioBufferSize); avcodec_fill_audio_frame(encoder->audioFrame, encoder->audio->channels, encoder->audio->sample_fmt, (const uint8_t*) encoder->postaudioBuffer, encoder->postaudioBufferSize, 0); if (encoder->audio->codec->id == AV_CODEC_ID_AAC && (strcasecmp(encoder->containerFormat, "mp4") || strcasecmp(encoder->containerFormat, "m4v") || strcasecmp(encoder->containerFormat, "mov"))) { // MP4 container doesn't support the raw ADTS AAC format that the encoder spits out #ifdef FFMPEG_USE_NEW_BSF av_bsf_alloc(av_bsf_get_by_name("aac_adtstoasc"), &encoder->absf); avcodec_parameters_from_context(encoder->absf->par_in, encoder->audio); av_bsf_init(encoder->absf); #else encoder->absf = av_bitstream_filter_init("aac_adtstoasc"); #endif } #ifdef FFMPEG_USE_CODECPAR avcodec_parameters_from_context(encoder->audioStream->codecpar, encoder->audio); #endif } #ifdef FFMPEG_USE_CODECPAR encoder->videoStream = avformat_new_stream(encoder->context, NULL); encoder->video = avcodec_alloc_context3(vcodec); #else encoder->videoStream = avformat_new_stream(encoder->context, vcodec); encoder->video = encoder->videoStream->codec; #endif encoder->video->bit_rate = encoder->videoBitrate; encoder->video->width = encoder->width; encoder->video->height = encoder->height; encoder->video->time_base = (AVRational) { VIDEO_TOTAL_LENGTH, GBA_ARM7TDMI_FREQUENCY }; encoder->video->pix_fmt = encoder->pixFormat; encoder->video->gop_size = 60; encoder->video->max_b_frames = 3; if (encoder->context->oformat->flags & AVFMT_GLOBALHEADER) { #ifdef AV_CODEC_FLAG_GLOBAL_HEADER encoder->video->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; #else encoder->video->flags |= CODEC_FLAG_GLOBAL_HEADER; #endif } if (encoder->video->codec->id == AV_CODEC_ID_H264 && (strcasecmp(encoder->containerFormat, "mp4") || strcasecmp(encoder->containerFormat, "m4v") || strcasecmp(encoder->containerFormat, "mov"))) { // QuickTime and a few other things require YUV420 encoder->video->pix_fmt = AV_PIX_FMT_YUV420P; } if (strcmp(vcodec->name, "libx264") == 0) { // Try to adaptively figure out when you can use a slower encoder if (encoder->width * encoder->height > 1000000) { av_opt_set(encoder->video->priv_data, "preset", "superfast", 0); } else if (encoder->width * encoder->height > 500000) { av_opt_set(encoder->video->priv_data, "preset", "veryfast", 0); } else { av_opt_set(encoder->video->priv_data, "preset", "faster", 0); } if (encoder->videoBitrate == 0) { av_opt_set(encoder->video->priv_data, "crf", "0", 0); encoder->video->pix_fmt = AV_PIX_FMT_YUV444P; } } avcodec_open2(encoder->video, vcodec, 0); #if LIBAVCODEC_VERSION_MAJOR >= 55 encoder->videoFrame = av_frame_alloc(); #else encoder->videoFrame = avcodec_alloc_frame(); #endif encoder->videoFrame->format = encoder->video->pix_fmt; encoder->videoFrame->width = encoder->video->width; encoder->videoFrame->height = encoder->video->height; encoder->videoFrame->pts = 0; _ffmpegSetVideoDimensions(&encoder->d, encoder->iwidth, encoder->iheight); av_image_alloc(encoder->videoFrame->data, encoder->videoFrame->linesize, encoder->video->width, encoder->video->height, encoder->video->pix_fmt, 32); #ifdef FFMPEG_USE_CODECPAR avcodec_parameters_from_context(encoder->videoStream->codecpar, encoder->video); #endif if (avio_open(&encoder->context->pb, outfile, AVIO_FLAG_WRITE) < 0) { return false; } return avformat_write_header(encoder->context, 0) >= 0; }
static av_cold int ffmmal_init_decoder(AVCodecContext *avctx) { MMALDecodeContext *ctx = avctx->priv_data; MMAL_STATUS_T status; MMAL_ES_FORMAT_T *format_in; MMAL_COMPONENT_T *decoder; int ret = 0; bcm_host_init(); if (mmal_vc_init()) { av_log(avctx, AV_LOG_ERROR, "Cannot initialize MMAL VC driver!\n"); return AVERROR(ENOSYS); } if ((ret = ff_get_format(avctx, avctx->codec->pix_fmts)) < 0) return ret; avctx->pix_fmt = ret; if ((status = mmal_component_create(MMAL_COMPONENT_DEFAULT_VIDEO_DECODER, &ctx->decoder))) goto fail; decoder = ctx->decoder; format_in = decoder->input[0]->format; format_in->type = MMAL_ES_TYPE_VIDEO; format_in->encoding = MMAL_ENCODING_H264; format_in->es->video.width = FFALIGN(avctx->width, 32); format_in->es->video.height = FFALIGN(avctx->height, 16); format_in->es->video.crop.width = avctx->width; format_in->es->video.crop.height = avctx->height; format_in->es->video.frame_rate.num = 24000; format_in->es->video.frame_rate.den = 1001; format_in->es->video.par.num = avctx->sample_aspect_ratio.num; format_in->es->video.par.den = avctx->sample_aspect_ratio.den; format_in->flags = MMAL_ES_FORMAT_FLAG_FRAMED; if (avctx->codec->id == AV_CODEC_ID_H264 && avctx->extradata && avctx->extradata[0] == 1) { uint8_t *dummy_p; int dummy_int; ctx->bsfc = av_bitstream_filter_init("h264_mp4toannexb"); if (!ctx->bsfc) { av_log(avctx, AV_LOG_ERROR, "Cannot open the h264_mp4toannexb BSF!\n"); ret = AVERROR(ENOSYS); goto fail; } av_bitstream_filter_filter(ctx->bsfc, avctx, NULL, &dummy_p, &dummy_int, NULL, 0, 0); } if (avctx->extradata_size) { if ((status = mmal_format_extradata_alloc(format_in, avctx->extradata_size))) goto fail; format_in->extradata_size = avctx->extradata_size; memcpy(format_in->extradata, avctx->extradata, format_in->extradata_size); } if ((status = mmal_port_format_commit(decoder->input[0]))) goto fail; decoder->input[0]->buffer_num = FFMAX(decoder->input[0]->buffer_num_min, 20); decoder->input[0]->buffer_size = FFMAX(decoder->input[0]->buffer_size_min, 512 * 1024); ctx->pool_in = mmal_pool_create(decoder->input[0]->buffer_num, 0); if (!ctx->pool_in) { ret = AVERROR(ENOMEM); goto fail; } if ((ret = ffmal_update_format(avctx)) < 0) goto fail; ctx->queue_decoded_frames = mmal_queue_create(); if (!ctx->queue_decoded_frames) goto fail; decoder->input[0]->userdata = (void*)avctx; decoder->output[0]->userdata = (void*)avctx; decoder->control->userdata = (void*)avctx; if ((status = mmal_port_enable(decoder->control, control_port_cb))) goto fail; if ((status = mmal_port_enable(decoder->input[0], input_callback))) goto fail; if ((status = mmal_port_enable(decoder->output[0], output_callback))) goto fail; if ((status = mmal_component_enable(decoder))) goto fail; return 0; fail: ffmmal_close_decoder(avctx); return ret < 0 ? ret : AVERROR_UNKNOWN; }
/********************************************************************** * avformatInit ********************************************************************** * Allocates hb_mux_data_t structures, create file and write headers *********************************************************************/ static int avformatInit( hb_mux_object_t * m ) { hb_job_t * job = m->job; hb_audio_t * audio; hb_mux_data_t * track; int meta_mux; int max_tracks; int ii, jj, ret; int clock_min, clock_max, clock; hb_video_framerate_get_limits(&clock_min, &clock_max, &clock); const char *muxer_name = NULL; uint8_t default_track_flag = 1; uint8_t need_fonts = 0; char *lang; max_tracks = 1 + hb_list_count( job->list_audio ) + hb_list_count( job->list_subtitle ); m->tracks = calloc(max_tracks, sizeof(hb_mux_data_t*)); m->oc = avformat_alloc_context(); if (m->oc == NULL) { hb_error( "Could not initialize avformat context." ); goto error; } AVDictionary * av_opts = NULL; switch (job->mux) { case HB_MUX_AV_MP4: m->time_base.num = 1; m->time_base.den = 90000; if( job->ipod_atom ) muxer_name = "ipod"; else muxer_name = "mp4"; meta_mux = META_MUX_MP4; av_dict_set(&av_opts, "brand", "mp42", 0); if (job->mp4_optimize) av_dict_set(&av_opts, "movflags", "faststart+disable_chpl", 0); else av_dict_set(&av_opts, "movflags", "+disable_chpl", 0); break; case HB_MUX_AV_MKV: // libavformat is essentially hard coded such that it only // works with a timebase of 1/1000 m->time_base.num = 1; m->time_base.den = 1000; muxer_name = "matroska"; meta_mux = META_MUX_MKV; break; default: { hb_error("Invalid Mux %x", job->mux); goto error; } } m->oc->oformat = av_guess_format(muxer_name, NULL, NULL); if(m->oc->oformat == NULL) { hb_error("Could not guess output format %s", muxer_name); goto error; } av_strlcpy(m->oc->filename, job->file, sizeof(m->oc->filename)); ret = avio_open2(&m->oc->pb, job->file, AVIO_FLAG_WRITE, &m->oc->interrupt_callback, NULL); if( ret < 0 ) { hb_error( "avio_open2 failed, errno %d", ret); goto error; } /* Video track */ track = m->tracks[m->ntracks++] = calloc(1, sizeof( hb_mux_data_t ) ); job->mux_data = track; track->type = MUX_TYPE_VIDEO; track->prev_chapter_tc = AV_NOPTS_VALUE; track->st = avformat_new_stream(m->oc, NULL); if (track->st == NULL) { hb_error("Could not initialize video stream"); goto error; } track->st->time_base = m->time_base; avcodec_get_context_defaults3(track->st->codec, NULL); track->st->codec->codec_type = AVMEDIA_TYPE_VIDEO; track->st->codec->flags |= CODEC_FLAG_GLOBAL_HEADER; uint8_t *priv_data = NULL; int priv_size = 0; switch (job->vcodec) { case HB_VCODEC_X264_8BIT: case HB_VCODEC_X264_10BIT: case HB_VCODEC_QSV_H264: track->st->codec->codec_id = AV_CODEC_ID_H264; /* Taken from x264 muxers.c */ priv_size = 5 + 1 + 2 + job->config.h264.sps_length + 1 + 2 + job->config.h264.pps_length; priv_data = av_malloc(priv_size + FF_INPUT_BUFFER_PADDING_SIZE); if (priv_data == NULL) { hb_error("H.264 extradata: malloc failure"); goto error; } priv_data[0] = 1; priv_data[1] = job->config.h264.sps[1]; /* AVCProfileIndication */ priv_data[2] = job->config.h264.sps[2]; /* profile_compat */ priv_data[3] = job->config.h264.sps[3]; /* AVCLevelIndication */ priv_data[4] = 0xff; // nalu size length is four bytes priv_data[5] = 0xe1; // one sps priv_data[6] = job->config.h264.sps_length >> 8; priv_data[7] = job->config.h264.sps_length; memcpy(priv_data+8, job->config.h264.sps, job->config.h264.sps_length); priv_data[8+job->config.h264.sps_length] = 1; // one pps priv_data[9+job->config.h264.sps_length] = job->config.h264.pps_length >> 8; priv_data[10+job->config.h264.sps_length] = job->config.h264.pps_length; memcpy(priv_data+11+job->config.h264.sps_length, job->config.h264.pps, job->config.h264.pps_length ); break; case HB_VCODEC_FFMPEG_MPEG4: track->st->codec->codec_id = AV_CODEC_ID_MPEG4; if (job->config.mpeg4.length != 0) { priv_size = job->config.mpeg4.length; priv_data = av_malloc(priv_size + FF_INPUT_BUFFER_PADDING_SIZE); if (priv_data == NULL) { hb_error("MPEG4 extradata: malloc failure"); goto error; } memcpy(priv_data, job->config.mpeg4.bytes, priv_size); } break; case HB_VCODEC_FFMPEG_MPEG2: track->st->codec->codec_id = AV_CODEC_ID_MPEG2VIDEO; if (job->config.mpeg4.length != 0) { priv_size = job->config.mpeg4.length; priv_data = av_malloc(priv_size + FF_INPUT_BUFFER_PADDING_SIZE); if (priv_data == NULL) { hb_error("MPEG2 extradata: malloc failure"); goto error; } memcpy(priv_data, job->config.mpeg4.bytes, priv_size); } break; case HB_VCODEC_FFMPEG_VP8: track->st->codec->codec_id = AV_CODEC_ID_VP8; priv_data = NULL; priv_size = 0; break; case HB_VCODEC_FFMPEG_VP9: track->st->codec->codec_id = AV_CODEC_ID_VP9; priv_data = NULL; priv_size = 0; break; case HB_VCODEC_THEORA: { track->st->codec->codec_id = AV_CODEC_ID_THEORA; int size = 0; ogg_packet *ogg_headers[3]; for (ii = 0; ii < 3; ii++) { ogg_headers[ii] = (ogg_packet *)job->config.theora.headers[ii]; size += ogg_headers[ii]->bytes + 2; } priv_size = size; priv_data = av_malloc(priv_size + FF_INPUT_BUFFER_PADDING_SIZE); if (priv_data == NULL) { hb_error("Theora extradata: malloc failure"); goto error; } size = 0; for(ii = 0; ii < 3; ii++) { AV_WB16(priv_data + size, ogg_headers[ii]->bytes); size += 2; memcpy(priv_data+size, ogg_headers[ii]->packet, ogg_headers[ii]->bytes); size += ogg_headers[ii]->bytes; } } break; case HB_VCODEC_X265_8BIT: case HB_VCODEC_X265_10BIT: case HB_VCODEC_X265_12BIT: case HB_VCODEC_X265_16BIT: case HB_VCODEC_QSV_H265: track->st->codec->codec_id = AV_CODEC_ID_HEVC; if (job->config.h265.headers_length > 0) { priv_size = job->config.h265.headers_length; priv_data = av_malloc(priv_size + FF_INPUT_BUFFER_PADDING_SIZE); if (priv_data == NULL) { hb_error("H.265 extradata: malloc failure"); goto error; } memcpy(priv_data, job->config.h265.headers, priv_size); } break; default: hb_error("muxavformat: Unknown video codec: %x", job->vcodec); goto error; } track->st->codec->extradata = priv_data; track->st->codec->extradata_size = priv_size; track->st->sample_aspect_ratio.num = job->par.num; track->st->sample_aspect_ratio.den = job->par.den; track->st->codec->sample_aspect_ratio.num = job->par.num; track->st->codec->sample_aspect_ratio.den = job->par.den; track->st->codec->width = job->width; track->st->codec->height = job->height; track->st->disposition |= AV_DISPOSITION_DEFAULT; hb_rational_t vrate = job->vrate; // If the vrate is the internal clock rate, there's a good chance // this is a standard rate that we have in our hb_video_rates table. // Because of rounding errors and approximations made while // measuring framerate, the actual value may not be exact. So // we look for rates that are "close" and make an adjustment // to fps.den. if (vrate.num == clock) { const hb_rate_t *video_framerate = NULL; while ((video_framerate = hb_video_framerate_get_next(video_framerate)) != NULL) { if (abs(vrate.den - video_framerate->rate) < 10) { vrate.den = video_framerate->rate; break; } } } hb_reduce(&vrate.num, &vrate.den, vrate.num, vrate.den); if (job->mux == HB_MUX_AV_MP4) { // libavformat mp4 muxer requires that the codec time_base have the // same denominator as the stream time_base, it uses it for the // mdhd timescale. double scale = (double)track->st->time_base.den / vrate.num; track->st->codec->time_base.den = track->st->time_base.den; track->st->codec->time_base.num = vrate.den * scale; } else { track->st->codec->time_base.num = vrate.den; track->st->codec->time_base.den = vrate.num; } track->st->avg_frame_rate.num = vrate.num; track->st->avg_frame_rate.den = vrate.den; /* add the audio tracks */ for(ii = 0; ii < hb_list_count( job->list_audio ); ii++ ) { audio = hb_list_item( job->list_audio, ii ); track = m->tracks[m->ntracks++] = calloc(1, sizeof( hb_mux_data_t ) ); audio->priv.mux_data = track; track->type = MUX_TYPE_AUDIO; track->st = avformat_new_stream(m->oc, NULL); if (track->st == NULL) { hb_error("Could not initialize audio stream"); goto error; } avcodec_get_context_defaults3(track->st->codec, NULL); track->st->codec->codec_type = AVMEDIA_TYPE_AUDIO; track->st->codec->flags |= CODEC_FLAG_GLOBAL_HEADER; if (job->mux == HB_MUX_AV_MP4) { track->st->codec->time_base.num = audio->config.out.samples_per_frame; track->st->codec->time_base.den = audio->config.out.samplerate; track->st->time_base.num = 1; track->st->time_base.den = audio->config.out.samplerate; } else { track->st->codec->time_base = m->time_base; track->st->time_base = m->time_base; } priv_data = NULL; priv_size = 0; switch (audio->config.out.codec & HB_ACODEC_MASK) { case HB_ACODEC_DCA: case HB_ACODEC_DCA_HD: track->st->codec->codec_id = AV_CODEC_ID_DTS; break; case HB_ACODEC_AC3: track->st->codec->codec_id = AV_CODEC_ID_AC3; break; case HB_ACODEC_FFEAC3: track->st->codec->codec_id = AV_CODEC_ID_EAC3; break; case HB_ACODEC_FFTRUEHD: track->st->codec->codec_id = AV_CODEC_ID_TRUEHD; break; case HB_ACODEC_LAME: case HB_ACODEC_MP3: track->st->codec->codec_id = AV_CODEC_ID_MP3; break; case HB_ACODEC_VORBIS: { track->st->codec->codec_id = AV_CODEC_ID_VORBIS; int jj, size = 0; ogg_packet *ogg_headers[3]; for (jj = 0; jj < 3; jj++) { ogg_headers[jj] = (ogg_packet *)audio->priv.config.vorbis.headers[jj]; size += ogg_headers[jj]->bytes + 2; } priv_size = size; priv_data = av_malloc(priv_size + FF_INPUT_BUFFER_PADDING_SIZE); if (priv_data == NULL) { hb_error("Vorbis extradata: malloc failure"); goto error; } size = 0; for(jj = 0; jj < 3; jj++) { AV_WB16(priv_data + size, ogg_headers[jj]->bytes); size += 2; memcpy(priv_data+size, ogg_headers[jj]->packet, ogg_headers[jj]->bytes); size += ogg_headers[jj]->bytes; } } break; case HB_ACODEC_FFFLAC: case HB_ACODEC_FFFLAC24: track->st->codec->codec_id = AV_CODEC_ID_FLAC; if (audio->priv.config.extradata.length) { priv_size = audio->priv.config.extradata.length; priv_data = av_malloc(priv_size + FF_INPUT_BUFFER_PADDING_SIZE); if (priv_data == NULL) { hb_error("FLAC extradata: malloc failure"); goto error; } memcpy(priv_data, audio->priv.config.extradata.bytes, audio->priv.config.extradata.length); } break; case HB_ACODEC_FFAAC: case HB_ACODEC_CA_AAC: case HB_ACODEC_CA_HAAC: case HB_ACODEC_FDK_AAC: case HB_ACODEC_FDK_HAAC: track->st->codec->codec_id = AV_CODEC_ID_AAC; // libav mkv muxer expects there to be extradata for // AAC and will crash if it is NULL. // // Also, libav can over-read the buffer by up to 8 bytes // when it fills it's get_bits cache. // // So allocate extra bytes priv_size = audio->priv.config.extradata.length; priv_data = av_malloc(priv_size + FF_INPUT_BUFFER_PADDING_SIZE); if (priv_data == NULL) { hb_error("AAC extradata: malloc failure"); goto error; } memcpy(priv_data, audio->priv.config.extradata.bytes, audio->priv.config.extradata.length); // AAC from pass-through source may be ADTS. // Therefore inserting "aac_adtstoasc" bitstream filter is // preferred. // The filter does nothing for non-ADTS bitstream. if (audio->config.out.codec == HB_ACODEC_AAC_PASS) { track->bitstream_filter = av_bitstream_filter_init("aac_adtstoasc"); } break; default: hb_error("muxavformat: Unknown audio codec: %x", audio->config.out.codec); goto error; } track->st->codec->extradata = priv_data; track->st->codec->extradata_size = priv_size; if( default_track_flag ) { track->st->disposition |= AV_DISPOSITION_DEFAULT; default_track_flag = 0; } lang = lookup_lang_code(job->mux, audio->config.lang.iso639_2 ); if (lang != NULL) { av_dict_set(&track->st->metadata, "language", lang, 0); } track->st->codec->sample_rate = audio->config.out.samplerate; if (audio->config.out.codec & HB_ACODEC_PASS_FLAG) { track->st->codec->channels = av_get_channel_layout_nb_channels(audio->config.in.channel_layout); track->st->codec->channel_layout = audio->config.in.channel_layout; } else { track->st->codec->channels = hb_mixdown_get_discrete_channel_count(audio->config.out.mixdown); track->st->codec->channel_layout = hb_ff_mixdown_xlat(audio->config.out.mixdown, NULL); } char *name; if (audio->config.out.name == NULL) { switch (track->st->codec->channels) { case 1: name = "Mono"; break; case 2: name = "Stereo"; break; default: name = "Surround"; break; } } else { name = audio->config.out.name; } // Set audio track title av_dict_set(&track->st->metadata, "title", name, 0); if (job->mux == HB_MUX_AV_MP4) { // Some software (MPC, mediainfo) use hdlr description // for track title av_dict_set(&track->st->metadata, "handler", name, 0); } } // Check for audio track associations for (ii = 0; ii < hb_list_count(job->list_audio); ii++) { audio = hb_list_item(job->list_audio, ii); switch (audio->config.out.codec & HB_ACODEC_MASK) { case HB_ACODEC_FFAAC: case HB_ACODEC_CA_AAC: case HB_ACODEC_CA_HAAC: case HB_ACODEC_FDK_AAC: case HB_ACODEC_FDK_HAAC: break; default: { // Mark associated fallback audio tracks for any non-aac track for(jj = 0; jj < hb_list_count( job->list_audio ); jj++ ) { hb_audio_t * fallback; int codec; if (ii == jj) continue; fallback = hb_list_item( job->list_audio, jj ); codec = fallback->config.out.codec & HB_ACODEC_MASK; if (fallback->config.in.track == audio->config.in.track && (codec == HB_ACODEC_FFAAC || codec == HB_ACODEC_CA_AAC || codec == HB_ACODEC_CA_HAAC || codec == HB_ACODEC_FDK_AAC || codec == HB_ACODEC_FDK_HAAC)) { hb_mux_data_t * fallback_track; int * sd; track = audio->priv.mux_data; fallback_track = fallback->priv.mux_data; sd = (int*)av_stream_new_side_data(track->st, AV_PKT_DATA_FALLBACK_TRACK, sizeof(int)); if (sd != NULL) { *sd = fallback_track->st->index; } } } } break; } } char * subidx_fmt = "size: %dx%d\n" "org: %d, %d\n" "scale: 100%%, 100%%\n" "alpha: 100%%\n" "smooth: OFF\n" "fadein/out: 50, 50\n" "align: OFF at LEFT TOP\n" "time offset: 0\n" "forced subs: %s\n" "palette: %06x, %06x, %06x, %06x, %06x, %06x, " "%06x, %06x, %06x, %06x, %06x, %06x, %06x, %06x, %06x, %06x\n" "custom colors: OFF, tridx: 0000, " "colors: 000000, 000000, 000000, 000000\n"; int subtitle_default = -1; for( ii = 0; ii < hb_list_count( job->list_subtitle ); ii++ ) { hb_subtitle_t *subtitle = hb_list_item( job->list_subtitle, ii ); if( subtitle->config.dest == PASSTHRUSUB ) { if ( subtitle->config.default_track ) subtitle_default = ii; } } // Quicktime requires that at least one subtitle is enabled, // else it doesn't show any of the subtitles. // So check to see if any of the subtitles are flagged to be // the defualt. The default will the the enabled track, else // enable the first track. if (job->mux == HB_MUX_AV_MP4 && subtitle_default == -1) { subtitle_default = 0; } for( ii = 0; ii < hb_list_count( job->list_subtitle ); ii++ ) { hb_subtitle_t * subtitle; uint32_t rgb[16]; char subidx[2048]; int len; subtitle = hb_list_item( job->list_subtitle, ii ); if (subtitle->config.dest != PASSTHRUSUB) continue; track = m->tracks[m->ntracks++] = calloc(1, sizeof( hb_mux_data_t ) ); subtitle->mux_data = track; track->type = MUX_TYPE_SUBTITLE; track->st = avformat_new_stream(m->oc, NULL); if (track->st == NULL) { hb_error("Could not initialize subtitle stream"); goto error; } avcodec_get_context_defaults3(track->st->codec, NULL); track->st->codec->codec_type = AVMEDIA_TYPE_SUBTITLE; track->st->codec->flags |= CODEC_FLAG_GLOBAL_HEADER; track->st->time_base = m->time_base; track->st->codec->time_base = m->time_base; track->st->codec->width = subtitle->width; track->st->codec->height = subtitle->height; priv_data = NULL; priv_size = 0; switch (subtitle->source) { case VOBSUB: { int jj; track->st->codec->codec_id = AV_CODEC_ID_DVD_SUBTITLE; for (jj = 0; jj < 16; jj++) rgb[jj] = hb_yuv2rgb(subtitle->palette[jj]); len = snprintf(subidx, 2048, subidx_fmt, subtitle->width, subtitle->height, 0, 0, "OFF", rgb[0], rgb[1], rgb[2], rgb[3], rgb[4], rgb[5], rgb[6], rgb[7], rgb[8], rgb[9], rgb[10], rgb[11], rgb[12], rgb[13], rgb[14], rgb[15]); priv_size = len + 1; priv_data = av_malloc(priv_size + FF_INPUT_BUFFER_PADDING_SIZE); if (priv_data == NULL) { hb_error("VOBSUB extradata: malloc failure"); goto error; } memcpy(priv_data, subidx, priv_size); } break; case PGSSUB: { track->st->codec->codec_id = AV_CODEC_ID_HDMV_PGS_SUBTITLE; } break; case CC608SUB: case CC708SUB: case TX3GSUB: case SRTSUB: case UTF8SUB: case SSASUB: { if (job->mux == HB_MUX_AV_MP4) { track->st->codec->codec_id = AV_CODEC_ID_MOV_TEXT; } else { track->st->codec->codec_id = AV_CODEC_ID_SSA; need_fonts = 1; if (subtitle->extradata_size) { priv_size = subtitle->extradata_size; priv_data = av_malloc(priv_size + FF_INPUT_BUFFER_PADDING_SIZE); if (priv_data == NULL) { hb_error("SSA extradata: malloc failure"); goto error; } memcpy(priv_data, subtitle->extradata, priv_size); } } } break; default: continue; } if (track->st->codec->codec_id == AV_CODEC_ID_MOV_TEXT) { // Build codec extradata for tx3g. // If we were using a libav codec to generate this data // this would (or should) be done for us. uint8_t properties[] = { 0x00, 0x00, 0x00, 0x00, // Display Flags 0x01, // Horiz. Justification 0xff, // Vert. Justification 0x00, 0x00, 0x00, 0xff, // Bg color 0x00, 0x00, 0x00, 0x00, // Default text box 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Reserved 0x00, 0x01, // Font ID 0x00, // Font face 0x18, // Font size 0xff, 0xff, 0xff, 0xff, // Fg color // Font table: 0x00, 0x00, 0x00, 0x12, // Font table size 'f','t','a','b', // Tag 0x00, 0x01, // Count 0x00, 0x01, // Font ID 0x05, // Font name length 'A','r','i','a','l' // Font name }; int width, height = 60; width = job->width * job->par.num / job->par.den; track->st->codec->width = width; track->st->codec->height = height; properties[14] = height >> 8; properties[15] = height & 0xff; properties[16] = width >> 8; properties[17] = width & 0xff; priv_size = sizeof(properties); priv_data = av_malloc(priv_size + FF_INPUT_BUFFER_PADDING_SIZE); if (priv_data == NULL) { hb_error("TX3G extradata: malloc failure"); goto error; } memcpy(priv_data, properties, priv_size); } track->st->codec->extradata = priv_data; track->st->codec->extradata_size = priv_size; if (ii == subtitle_default) { track->st->disposition |= AV_DISPOSITION_DEFAULT; } if (subtitle->config.default_track) { track->st->disposition |= AV_DISPOSITION_FORCED; } lang = lookup_lang_code(job->mux, subtitle->iso639_2 ); if (lang != NULL) { av_dict_set(&track->st->metadata, "language", lang, 0); } }
/** * play the media * * @param channel opaque handle returned by WTSVirtualChannelOpenEx * @param stream_id unique identification number for this stream * @param filename media file to play * * @return 0 on success, -1 on error *****************************************************************************/ int xrdpvr_play_media(void *channel, int stream_id, char *filename) { int i; printf("$$$$$$ xrdpvr_play_media: setting audioTimeout & " "videoTimeout to -1\n"); g_psi.videoTimeout = -1; g_psi.audioTimeout = -1; /* register all available fileformats and codecs */ av_register_all(); /* open media file - this will read just the header */ //if (avformat_open_input(&g_psi.p_format_ctx, filename, NULL, NULL)) if (av_open_input_file(&g_psi.p_format_ctx, filename, NULL, 0, NULL)) { printf("ERROR opening %s\n", filename); return -1; } /* now get the real stream info */ //if (avformat_find_stream_info(g_psi.p_format_ctx, NULL) < 0) if (av_find_stream_info(g_psi.p_format_ctx) < 0) { printf("ERROR reading stream info\n"); return -1; } #if 1 /* print media info to standard out */ av_dump_format(g_psi.p_format_ctx, 0, filename, 0); #endif printf("nb_streams %d\n", g_psi.p_format_ctx->nb_streams); g_audio_index = -1; g_video_index = -1; /* find first audio / video stream */ for (i = 0; i < g_psi.p_format_ctx->nb_streams; i++) { if (g_psi.p_format_ctx->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO && g_psi.p_format_ctx->streams[i]->codec->codec_id == CODEC_ID_H264 && g_video_index < 0) { g_video_index = i; } if (g_psi.p_format_ctx->streams[i]->codec->codec_type == CODEC_TYPE_AUDIO && g_psi.p_format_ctx->streams[i]->codec->codec_id == CODEC_ID_AAC && g_audio_index < 0) { g_audio_index = i; } } if ((g_audio_index < 0) || (g_video_index < 0)) { /* close file and return with error */ printf("ERROR: no audio/video stream found in %s\n", filename); //avformat_close_input(&g_psi.p_format_ctx); av_close_input_file(g_psi.p_format_ctx); return -1; } g_psi.audio_stream_index = g_audio_index; g_psi.video_stream_index = g_video_index; /* get pointers to codex contexts for both streams */ g_psi.p_audio_codec_ctx = g_psi.p_format_ctx->streams[g_audio_index]->codec; g_psi.p_video_codec_ctx = g_psi.p_format_ctx->streams[g_video_index]->codec; /* find decoder for audio stream */ g_psi.p_audio_codec = avcodec_find_decoder(g_psi.p_audio_codec_ctx->codec_id); if (g_psi.p_audio_codec == NULL) { printf("ERROR: audio codec not supported\n"); } /* find decoder for video stream */ g_psi.p_video_codec = avcodec_find_decoder(g_psi.p_video_codec_ctx->codec_id); if (g_psi.p_video_codec == NULL) { printf("ERROR: video codec not supported\n"); } /* open decoder for audio stream */ //if (avcodec_open2(g_psi.p_audio_codec_ctx, g_psi.p_audio_codec, // NULL) < 0) if (avcodec_open(g_psi.p_audio_codec_ctx, g_psi.p_audio_codec) < 0) { printf("ERROR: could not open audio decoder\n"); return -1; } printf("%d\n", g_psi.p_audio_codec_ctx->extradata_size); hexdump(g_psi.p_audio_codec_ctx->extradata, g_psi.p_audio_codec_ctx->extradata_size); printf("%d %d %d %d\n", g_psi.p_audio_codec_ctx->sample_rate, g_psi.p_audio_codec_ctx->bit_rate, g_psi.p_audio_codec_ctx->channels, g_psi.p_audio_codec_ctx->block_align); /* open decoder for video stream */ //if (avcodec_open2(g_psi.p_video_codec_ctx, g_psi.p_video_codec, // NULL) < 0) if (avcodec_open(g_psi.p_video_codec_ctx, g_psi.p_video_codec) < 0) { printf("ERROR: could not open video decoder\n"); return -1; } g_psi.bsfc = av_bitstream_filter_init("h264_mp4toannexb"); printf("g_psi.bsfc %p\n", g_psi.bsfc); if (xrdpvr_set_video_format(channel, 101, 0, g_psi.p_video_codec_ctx->width, g_psi.p_video_codec_ctx->height)) { printf("xrdpvr_set_video_format() failed\n"); return -1; } printf("xrdpvr_play_media: calling xrdpvr_set_audio_format\n"); if (xrdpvr_set_audio_format(channel, 101, 0, g_psi.p_audio_codec_ctx->extradata, g_psi.p_audio_codec_ctx->extradata_size, g_psi.p_audio_codec_ctx->sample_rate, g_psi.p_audio_codec_ctx->bit_rate, g_psi.p_audio_codec_ctx->channels, g_psi.p_audio_codec_ctx->block_align)) { printf("xrdpvr_set_audio_format() failed\n"); return 1; } return 0; }
bool FFmpegEncoderOpen(struct FFmpegEncoder* encoder, const char* outfile) { AVCodec* acodec = avcodec_find_encoder_by_name(encoder->audioCodec); AVCodec* vcodec = avcodec_find_encoder_by_name(encoder->videoCodec); if ((encoder->audioCodec && !acodec) || !vcodec || !FFmpegEncoderVerifyContainer(encoder)) { return false; } encoder->currentAudioSample = 0; encoder->currentAudioFrame = 0; encoder->currentVideoFrame = 0; encoder->nextAudioPts = 0; AVOutputFormat* oformat = av_guess_format(encoder->containerFormat, 0, 0); #ifndef USE_LIBAV avformat_alloc_output_context2(&encoder->context, oformat, 0, outfile); #else encoder->context = avformat_alloc_context(); strncpy(encoder->context->filename, outfile, sizeof(encoder->context->filename)); encoder->context->oformat = oformat; #endif if (acodec) { encoder->audioStream = avformat_new_stream(encoder->context, acodec); encoder->audio = encoder->audioStream->codec; encoder->audio->bit_rate = encoder->audioBitrate; encoder->audio->channels = 2; encoder->audio->channel_layout = AV_CH_LAYOUT_STEREO; encoder->audio->sample_rate = encoder->sampleRate; encoder->audio->sample_fmt = encoder->sampleFormat; AVDictionary* opts = 0; av_dict_set(&opts, "strict", "-2", 0); if (encoder->context->oformat->flags & AVFMT_GLOBALHEADER) { encoder->audio->flags |= CODEC_FLAG_GLOBAL_HEADER; } avcodec_open2(encoder->audio, acodec, &opts); av_dict_free(&opts); #if LIBAVCODEC_VERSION_MAJOR >= 55 encoder->audioFrame = av_frame_alloc(); #else encoder->audioFrame = avcodec_alloc_frame(); #endif if (!encoder->audio->frame_size) { encoder->audio->frame_size = 1; } encoder->audioFrame->nb_samples = encoder->audio->frame_size; encoder->audioFrame->format = encoder->audio->sample_fmt; encoder->audioFrame->pts = 0; encoder->resampleContext = avresample_alloc_context(); av_opt_set_int(encoder->resampleContext, "in_channel_layout", AV_CH_LAYOUT_STEREO, 0); av_opt_set_int(encoder->resampleContext, "out_channel_layout", AV_CH_LAYOUT_STEREO, 0); av_opt_set_int(encoder->resampleContext, "in_sample_rate", PREFERRED_SAMPLE_RATE, 0); av_opt_set_int(encoder->resampleContext, "out_sample_rate", encoder->sampleRate, 0); av_opt_set_int(encoder->resampleContext, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0); av_opt_set_int(encoder->resampleContext, "out_sample_fmt", encoder->sampleFormat, 0); avresample_open(encoder->resampleContext); encoder->audioBufferSize = (encoder->audioFrame->nb_samples * PREFERRED_SAMPLE_RATE / encoder->sampleRate) * 4; encoder->audioBuffer = av_malloc(encoder->audioBufferSize); encoder->postaudioBufferSize = av_samples_get_buffer_size(0, encoder->audio->channels, encoder->audio->frame_size, encoder->audio->sample_fmt, 0); encoder->postaudioBuffer = av_malloc(encoder->postaudioBufferSize); avcodec_fill_audio_frame(encoder->audioFrame, encoder->audio->channels, encoder->audio->sample_fmt, (const uint8_t*) encoder->postaudioBuffer, encoder->postaudioBufferSize, 0); if (encoder->audio->codec->id == AV_CODEC_ID_AAC && (strcasecmp(encoder->containerFormat, "mp4") || strcasecmp(encoder->containerFormat, "m4v") || strcasecmp(encoder->containerFormat, "mov"))) { // MP4 container doesn't support the raw ADTS AAC format that the encoder spits out encoder->absf = av_bitstream_filter_init("aac_adtstoasc"); } } encoder->videoStream = avformat_new_stream(encoder->context, vcodec); encoder->video = encoder->videoStream->codec; encoder->video->bit_rate = encoder->videoBitrate; encoder->video->width = encoder->width; encoder->video->height = encoder->height; encoder->video->time_base = (AVRational) { VIDEO_TOTAL_LENGTH, GBA_ARM7TDMI_FREQUENCY }; encoder->video->pix_fmt = encoder->pixFormat; encoder->video->gop_size = 60; encoder->video->max_b_frames = 3; if (encoder->context->oformat->flags & AVFMT_GLOBALHEADER) { encoder->video->flags |= CODEC_FLAG_GLOBAL_HEADER; } if (strcmp(vcodec->name, "libx264") == 0) { // Try to adaptively figure out when you can use a slower encoder if (encoder->width * encoder->height > 1000000) { av_opt_set(encoder->video->priv_data, "preset", "superfast", 0); } else if (encoder->width * encoder->height > 500000) { av_opt_set(encoder->video->priv_data, "preset", "veryfast", 0); } else { av_opt_set(encoder->video->priv_data, "preset", "faster", 0); } av_opt_set(encoder->video->priv_data, "tune", "zerolatency", 0); } avcodec_open2(encoder->video, vcodec, 0); #if LIBAVCODEC_VERSION_MAJOR >= 55 encoder->videoFrame = av_frame_alloc(); #else encoder->videoFrame = avcodec_alloc_frame(); #endif encoder->videoFrame->format = encoder->video->pix_fmt; encoder->videoFrame->width = encoder->video->width; encoder->videoFrame->height = encoder->video->height; encoder->videoFrame->pts = 0; encoder->scaleContext = sws_getContext(VIDEO_HORIZONTAL_PIXELS, VIDEO_VERTICAL_PIXELS, #ifndef USE_LIBAV AV_PIX_FMT_0BGR32, #else AV_PIX_FMT_BGR32, #endif encoder->videoFrame->width, encoder->videoFrame->height, encoder->video->pix_fmt, SWS_POINT, 0, 0, 0); av_image_alloc(encoder->videoFrame->data, encoder->videoFrame->linesize, encoder->video->width, encoder->video->height, encoder->video->pix_fmt, 32); avio_open(&encoder->context->pb, outfile, AVIO_FLAG_WRITE); avformat_write_header(encoder->context, 0); return true; }
int _tmain(int argc, _TCHAR* argv[]) { if (argc != 4) { printf("Usage: %s in_fname_v in_fname_a out_fname\n"); return -1; } AVOutputFormat *p_ofmt = NULL; ///< Input AVFormatContext and Output AVFormatContext AVFormatContext *p_ifmt_ctx_v = NULL, *p_ifmt_ctx_a = NULL, *p_ofmt_ctx = NULL; AVPacket pkt; int ret, i; int video_idx_v = -1, video_idx_out = -1; int audio_idx_a = -1, audio_idx_out = -1; int frame_idx = 0; int64_t cur_pts_v = 0, cur_pts_a = 0; const char *p_in_fname_v = argv[1], *p_in_fname_a = argv[2], *p_out_fname = argv[3]; av_register_all(); ///< Input if ((ret = avformat_open_input(&p_ifmt_ctx_v, p_in_fname_v, NULL, NULL)) < 0) { printf("Could not open input file(: %s).\n", p_in_fname_v); goto end; } if ((ret = avformat_find_stream_info(p_ifmt_ctx_v, NULL)) < 0) { printf("Failed to retrieve input stream information.\n"); goto end; } if ((ret = avformat_open_input(&p_ifmt_ctx_a, p_in_fname_a, NULL, NULL)) < 0) { printf("Could not open input file.\n"); goto end; } if ((ret = avformat_find_stream_info(p_ifmt_ctx_a, NULL)) < 0) { printf("Failed to retrieve input stream information.\n"); goto end; } printf("=========Input Information=========\n"); av_dump_format(p_ifmt_ctx_v, 0, p_in_fname_v, 0); av_dump_format(p_ifmt_ctx_a, 0, p_in_fname_a, 0); printf("===================================\n"); ///< Output avformat_alloc_output_context2(&p_ofmt_ctx, NULL, NULL, p_out_fname); if (NULL == p_ofmt_ctx) { printf("Could not create output context.\n"); ret = AVERROR_UNKNOWN; goto end; } p_ofmt = p_ofmt_ctx->oformat; for (i = 0; i < (int)p_ifmt_ctx_v->nb_streams; ++i) { ///< Create output AVStream according to input AVStream if (AVMEDIA_TYPE_VIDEO == p_ifmt_ctx_v->streams[i]->codec->codec_type) { AVStream *p_in_strm = p_ifmt_ctx_v->streams[i]; AVStream *p_out_strm = avformat_new_stream(p_ofmt_ctx, p_in_strm->codec->codec); video_idx_v = i; if (NULL == p_out_strm) { printf("Failed allocating output stream.\n"); ret = AVERROR_UNKNOWN; goto end; } video_idx_out = p_out_strm->index; ///< Copy the settings of AVCodecContext if (avcodec_copy_context(p_out_strm->codec, p_in_strm->codec) < 0) { printf("Failed to copy context from input to output" " stream codec context.\n"); goto end; } p_out_strm->codec->codec_tag = 0; if (p_ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER) { p_out_strm->codec->flags |= CODEC_FLAG_GLOBAL_HEADER; } break; } } for (i = 0; i < (int)p_ifmt_ctx_a->nb_streams; ++i) { ///< Create output AVStream according to input AVStream if (AVMEDIA_TYPE_AUDIO == p_ifmt_ctx_a->streams[i]->codec->codec_type) { AVStream *p_in_strm = p_ifmt_ctx_a->streams[i]; AVStream *p_out_strm = avformat_new_stream(p_ofmt_ctx, p_in_strm->codec->codec); audio_idx_a = i; if (NULL == p_out_strm) { printf("Failed allocating output stream.\n"); ret = AVERROR_UNKNOWN; goto end; } audio_idx_out = p_out_strm->index; ///< Copy the settings of AVCodecContext if (avcodec_copy_context(p_out_strm->codec, p_in_strm->codec) < 0) { printf("Failed to copy context from intput to " "output stream codec context.\n"); goto end; } p_out_strm->codec->codec_tag = 0; if (p_ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER) { p_out_strm->codec->flags |= CODEC_FLAG_GLOBAL_HEADER; } break; } } printf("=========Output Information=========\n"); av_dump_format(p_ofmt_ctx, 0, p_out_fname, 1); printf("====================================\n"); ///< Open output file if (!(p_ofmt->flags & AVFMT_NOFILE)) { if (avio_open(&p_ofmt_ctx->pb, p_out_fname, AVIO_FLAG_WRITE) < 0) { printf("Could not open output file '%s'", p_out_fname); goto end; } } ///< Write file header if ((ret = avformat_write_header(p_ofmt_ctx, NULL)) < 0) { printf("Error occurred when opening output file.\n"); goto end; } ///< FIX #if USE_H264BSF AVBitStreamFilterContext *p_h264bsfc = av_bitstream_filter_init("h264_mp4toannexb"); #endif #if USE_AACBSF AVBitStreamFilterContext *p_aacbsfc = av_bitstream_filter_init("aac_adtstoasc"); #endif while (true) { AVFormatContext *p_ifmt_ctx; int strm_idx = 0; AVStream *p_in_strm, *p_out_strm; ///< Get an AVPacket if (av_compare_ts(cur_pts_v, p_ifmt_ctx_v->streams[video_idx_v]->time_base, cur_pts_a, p_ifmt_ctx_a->streams[audio_idx_a]->time_base) <= 0) { p_ifmt_ctx = p_ifmt_ctx_v; strm_idx = video_idx_out; if (av_read_frame(p_ifmt_ctx, &pkt) >= 0) { do { p_in_strm = p_ifmt_ctx->streams[pkt.stream_index]; p_out_strm = p_ofmt_ctx->streams[strm_idx]; if (pkt.stream_index == video_idx_v) { ///< FIX: No PTS (Example: Raw H.264) ///< Simple Write PTS if (pkt.pts == AV_NOPTS_VALUE) { ///< Write PTS AVRational time_base1 = p_in_strm->time_base; ///< Duration between 2 frames (us) int64_t calc_duration = (int64_t)((double)AV_TIME_BASE / av_q2d(p_in_strm->r_frame_rate)); ///< Parameters pkt.pts = (int64_t)((double)(frame_idx * calc_duration) / (double)(av_q2d(time_base1) * AV_TIME_BASE)); pkt.dts = pkt.pts; pkt.duration = (int)((double)calc_duration / (double)(av_q2d(time_base1) * AV_TIME_BASE)); ++frame_idx; } cur_pts_v = pkt.pts; break; } } while (av_read_frame(p_ifmt_ctx, &pkt)); } else { break; } } else { p_ifmt_ctx = p_ifmt_ctx_a; strm_idx = audio_idx_out; if (av_read_frame(p_ifmt_ctx, &pkt) >= 0) { do { p_in_strm = p_ifmt_ctx->streams[pkt.stream_index]; p_out_strm = p_ofmt_ctx->streams[strm_idx]; if (pkt.stream_index == audio_idx_a) { ///< FIX: No PTS ///< Simple Write PTS if (pkt.pts == AV_NOPTS_VALUE) { ///< Write PTS AVRational time_base1 = p_in_strm->time_base; ///< Duration between 2 frames (us) int64_t calc_duration = (int64_t)((double)AV_TIME_BASE / av_q2d(p_in_strm->r_frame_rate)); ///< Parameters pkt.dts = (int64_t)((double)(frame_idx * calc_duration) / (double)(av_q2d(time_base1) * AV_TIME_BASE)); pkt.dts = pkt.pts; pkt.duration = (int)((double)calc_duration / (double)(av_q2d(time_base1)* AV_TIME_BASE)); ++frame_idx; } cur_pts_a = pkt.pts; break; } } while (av_read_frame(p_ifmt_ctx, &pkt)); } else { break; } } ///< FIX: Bitstream Filter #if USE_H264BSF av_bitstream_filter_filter(p_h264bsfc, p_in_strm->codec, NULL, &pkt.data, &pkt.size, pkt.data, pkt.size, 0); #endif #if USE_AACBSF av_bitstream_filter_filter(p_aacbsfc, p_out_strm->codec, NULL, &pkt.data, &pkt.size, pkt.data, pkt.size, 0); #endif ///< Convert PTS/DTS pkt.pts = av_rescale_q_rnd(pkt.pts, p_in_strm->time_base, p_out_strm->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX)); pkt.dts = av_rescale_q_rnd(pkt.dts, p_in_strm->time_base, p_out_strm->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX)); pkt.duration = (int)av_rescale_q(pkt.duration, p_in_strm->time_base, p_out_strm->time_base); pkt.pos = -1; pkt.stream_index = strm_idx; printf("Write 1 Packet. size: %5d\tpts: %11d\n", pkt.size, pkt.pts); ///< Write if (av_interleaved_write_frame(p_ofmt_ctx, &pkt) < 0) { printf("Error muxing packet.\n"); break; } av_free_packet(&pkt); } ///< Write file trailer av_write_trailer(p_ofmt_ctx); #if USE_H264BSF av_bitstream_filter_close(p_h264bsfc); #endif #if USE_AACBSF av_bitstream_filter_close(p_aacbsfc); #endif end: avformat_close_input(&p_ifmt_ctx_v); avformat_close_input(&p_ifmt_ctx_a); ///< close output if (p_ofmt_ctx && !(p_ofmt->flags & AVFMT_NOFILE)) { avio_close(p_ofmt_ctx->pb); } avformat_free_context(p_ofmt_ctx); if (ret < 0 && ret != AVERROR_EOF) { printf("Error occurred.\n"); return -1; } return 0; }
int Testffmpeg() { AVOutputFormat *ofmt = NULL; //Input AVFormatContext and Output AVFormatContext AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx = NULL; AVPacket pkt; const char *in_filename, *out_filename; int ret, i; int videoindex=-1; int frame_index=0; in_filename = "rtmp://live.hkstv.hk.lxdns.com/live/hks"; out_filename = "receive.flv"; av_register_all(); //Network avformat_network_init(); //Input if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0) { printf( "Could not open input file."); goto end; } if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0) { printf( "Failed to retrieve input stream information"); goto end; } for(i=0; i<ifmt_ctx->nb_streams; i++) if(ifmt_ctx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO){ videoindex=i; break; } av_dump_format(ifmt_ctx, 0, in_filename, 0); //Output avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename); //RTMP if (!ofmt_ctx) { printf( "Could not create output context\n"); ret = AVERROR_UNKNOWN; goto end; } ofmt = ofmt_ctx->oformat; for (i = 0; i < ifmt_ctx->nb_streams; i++) { //Create output AVStream according to input AVStream AVStream *in_stream = ifmt_ctx->streams[i]; AVStream *out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec); if (!out_stream) { printf( "Failed allocating output stream\n"); ret = AVERROR_UNKNOWN; goto end; } //Copy the settings of AVCodecContext ret = avcodec_copy_context(out_stream->codec, in_stream->codec); if (ret < 0) { printf( "Failed to copy context from input to output stream codec context\n"); goto end; } out_stream->codec->codec_tag = 0; if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER) out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER; } //Dump Format------------------ av_dump_format(ofmt_ctx, 0, out_filename, 1); //Open output URL if (!(ofmt->flags & AVFMT_NOFILE)) { ret = avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE); if (ret < 0) { printf( "Could not open output URL '%s'", out_filename); goto end; } } //Write file header ret = avformat_write_header(ofmt_ctx, NULL); if (ret < 0) { printf( "Error occurred when opening output URL\n"); goto end; } #if USE_H264BSF AVBitStreamFilterContext* h264bsfc = av_bitstream_filter_init("h264_mp4toannexb"); #endif while (1) { AVStream *in_stream, *out_stream; //Get an AVPacket ret = av_read_frame(ifmt_ctx, &pkt); if (ret < 0) break; in_stream = ifmt_ctx->streams[pkt.stream_index]; out_stream = ofmt_ctx->streams[pkt.stream_index]; /* copy packet */ //Convert PTS/DTS pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX)); pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX)); pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base); pkt.pos = -1; //Print to Screen if(pkt.stream_index==videoindex){ printf("Receive %8d video frames from input URL\n",frame_index); frame_index++; #if USE_H264BSF av_bitstream_filter_filter(h264bsfc, in_stream->codec, NULL, &pkt.data, &pkt.size, pkt.data, pkt.size, 0); #endif } //ret = av_write_frame(ofmt_ctx, &pkt); ret = av_interleaved_write_frame(ofmt_ctx, &pkt); if (ret < 0) { printf( "Error muxing packet\n"); break; } av_free_packet(&pkt); } #if USE_H264BSF av_bitstream_filter_close(h264bsfc); #endif //Write file trailer av_write_trailer(ofmt_ctx); end: avformat_close_input(&ifmt_ctx); /* close output */ if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE)) avio_close(ofmt_ctx->pb); avformat_free_context(ofmt_ctx); if (ret < 0 && ret != AVERROR_EOF) { printf( "Error occurred.\n"); return -1; } }
int main(int argc, char *argv[]) { AVFormatContext *pFormatCtx = NULL; int i, videoStream; AVCodecContext *pCodecCtx = NULL; AVCodec *pCodec = NULL; AVFrame *pFrame = NULL; AVFrame *pFrameRGB = NULL; AVPacket packet; int frameFinished; int numBytes; uint8_t *buffer = NULL; AVDictionary *optionsDict = NULL; struct SwsContext *sws_ctx = NULL; if(argc < 2) { printf("Please provide a movie file\n"); return -1; } char out_file[1024]={0}; sprintf(out_file,"%s.nalu",argv[1]); static FILE *fp = fopen(out_file,"wb"); if(!fp){ printf("can't open output file:%s\n",out_file); } // Register all formats and codecs av_register_all(); // Open video file if(avformat_open_input(&pFormatCtx, argv[1], NULL, NULL)!=0) return -1; // Couldn't open file // Retrieve stream information if(avformat_find_stream_info(pFormatCtx, NULL)<0) return -1; // Couldn't find stream information // Dump information about file onto standard error av_dump_format(pFormatCtx, 0, argv[1], 0); // Find the first video stream videoStream=-1; for(i=0; i<pFormatCtx->nb_streams; i++) if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) { videoStream=i; break; } if(videoStream==-1) return -1; // Didn't find a video stream // Get a pointer to the codec context for the video stream pCodecCtx=pFormatCtx->streams[videoStream]->codec; // Find the decoder for the video stream pCodec=avcodec_find_decoder(pCodecCtx->codec_id); if(pCodec==NULL) { fprintf(stderr, "Unsupported codec!\n"); return -1; // Codec not found } // Open codec if(avcodec_open2(pCodecCtx, pCodec, &optionsDict)<0) return -1; // Could not open codec // Allocate video frame pFrame=av_frame_alloc(); unsigned char *dummy=NULL; //输入的指针 int dummy_len; AVBitStreamFilterContext* bsfc = av_bitstream_filter_init("h264_mp4toannexb"); av_bitstream_filter_filter(bsfc, pCodecCtx, NULL, &dummy, &dummy_len, NULL, 0, 0); fwrite(pCodecCtx->extradata,pCodecCtx->extradata_size,1,fp); av_bitstream_filter_close(bsfc); free(dummy); // Read frames and save first five frames to disk i=0; while(av_read_frame(pFormatCtx, &packet)>=0) { // Is this a packet from the video stream? if(packet.stream_index==videoStream) { // Decode video frame avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet); // Did we get a video frame? if(frameFinished) { static bool find_i = false; if(!find_i){ static unsigned char i_tag[] = {0x65}; if(memcmp(i_tag,(packet.data)+4,1) ==0) { find_i = true; printf("find i frame\n"); } else { continue; } } char nal_start[]={0,0,0,1}; fwrite(nal_start,4,1,fp); fwrite(packet.data+4,packet.size-4,1,fp); printf("write packet size:%d\n",packet.size-4); } } } // Free the packet that was allocated by av_read_frame av_free_packet(&packet); // Free the RGB image av_free(buffer); // Free the YUV frame av_free(pFrame); // Close the codec avcodec_close(pCodecCtx); // Close the video file avformat_close_input(&pFormatCtx); fclose(fp); return 0; }
IJKFF_Pipenode *ffpipenode_create_video_decoder_from_android_mediacodec(FFPlayer *ffp, IJKFF_Pipeline *pipeline, SDL_Vout *vout) { ALOGD("ffpipenode_create_video_decoder_from_android_mediacodec()\n"); if (SDL_Android_GetApiLevel() < IJK_API_16_JELLY_BEAN) return NULL; if (!ffp || !ffp->is) return NULL; IJKFF_Pipenode *node = ffpipenode_alloc(sizeof(IJKFF_Pipenode_Opaque)); if (!node) return node; VideoState *is = ffp->is; IJKFF_Pipenode_Opaque *opaque = node->opaque; JNIEnv *env = NULL; int ret = 0; int rotate_degrees = 0; jobject jsurface = NULL; node->func_destroy = func_destroy; node->func_run_sync = func_run_sync; node->func_flush = func_flush; opaque->pipeline = pipeline; opaque->ffp = ffp; opaque->decoder = &is->viddec; opaque->weak_vout = vout; opaque->avctx = opaque->decoder->avctx; switch (opaque->avctx->codec_id) { case AV_CODEC_ID_H264: if (!ffp->mediacodec_avc && !ffp->mediacodec_all_videos) { ALOGE("%s: MediaCodec: AVC/H264 is disabled. codec_id:%d \n", __func__, opaque->avctx->codec_id); goto fail; } switch (opaque->avctx->profile) { case FF_PROFILE_H264_BASELINE: ALOGI("%s: MediaCodec: H264_BASELINE: enabled\n", __func__); break; case FF_PROFILE_H264_CONSTRAINED_BASELINE: ALOGI("%s: MediaCodec: H264_CONSTRAINED_BASELINE: enabled\n", __func__); break; case FF_PROFILE_H264_MAIN: ALOGI("%s: MediaCodec: H264_MAIN: enabled\n", __func__); break; case FF_PROFILE_H264_EXTENDED: ALOGI("%s: MediaCodec: H264_EXTENDED: enabled\n", __func__); break; case FF_PROFILE_H264_HIGH: ALOGI("%s: MediaCodec: H264_HIGH: enabled\n", __func__); break; case FF_PROFILE_H264_HIGH_10: ALOGW("%s: MediaCodec: H264_HIGH_10: disabled\n", __func__); goto fail; case FF_PROFILE_H264_HIGH_10_INTRA: ALOGW("%s: MediaCodec: H264_HIGH_10_INTRA: disabled\n", __func__); goto fail; case FF_PROFILE_H264_HIGH_422: ALOGW("%s: MediaCodec: H264_HIGH_10_422: disabled\n", __func__); goto fail; case FF_PROFILE_H264_HIGH_422_INTRA: ALOGW("%s: MediaCodec: H264_HIGH_10_INTRA: disabled\n", __func__); goto fail; case FF_PROFILE_H264_HIGH_444: ALOGW("%s: MediaCodec: H264_HIGH_10_444: disabled\n", __func__); goto fail; case FF_PROFILE_H264_HIGH_444_PREDICTIVE: ALOGW("%s: MediaCodec: H264_HIGH_444_PREDICTIVE: disabled\n", __func__); goto fail; case FF_PROFILE_H264_HIGH_444_INTRA: ALOGW("%s: MediaCodec: H264_HIGH_444_INTRA: disabled\n", __func__); goto fail; case FF_PROFILE_H264_CAVLC_444: ALOGW("%s: MediaCodec: H264_CAVLC_444: disabled\n", __func__); goto fail; default: ALOGW("%s: MediaCodec: (%d) unknown profile: disabled\n", __func__, opaque->avctx->profile); goto fail; } strcpy(opaque->mcc.mime_type, SDL_AMIME_VIDEO_AVC); opaque->mcc.profile = opaque->avctx->profile; opaque->mcc.level = opaque->avctx->level; break; case AV_CODEC_ID_HEVC: if (!ffp->mediacodec_hevc && !ffp->mediacodec_all_videos) { ALOGE("%s: MediaCodec/HEVC is disabled. codec_id:%d \n", __func__, opaque->avctx->codec_id); goto fail; } strcpy(opaque->mcc.mime_type, SDL_AMIME_VIDEO_HEVC); opaque->mcc.profile = opaque->avctx->profile; opaque->mcc.level = opaque->avctx->level; break; case AV_CODEC_ID_MPEG2VIDEO: if (!ffp->mediacodec_mpeg2 && !ffp->mediacodec_all_videos) { ALOGE("%s: MediaCodec/MPEG2VIDEO is disabled. codec_id:%d \n", __func__, opaque->avctx->codec_id); goto fail; } strcpy(opaque->mcc.mime_type, SDL_AMIME_VIDEO_MPEG2VIDEO); opaque->mcc.profile = opaque->avctx->profile; opaque->mcc.level = opaque->avctx->level; break; default: ALOGE("%s:create: not H264 or H265/HEVC, codec_id:%d \n", __func__, opaque->avctx->codec_id); goto fail; } if (JNI_OK != SDL_JNI_SetupThreadEnv(&env)) { ALOGE("%s:create: SetupThreadEnv failed\n", __func__); goto fail; } opaque->acodec_mutex = SDL_CreateMutex(); opaque->acodec_cond = SDL_CreateCond(); opaque->acodec_first_dequeue_output_mutex = SDL_CreateMutex(); opaque->acodec_first_dequeue_output_cond = SDL_CreateCond(); opaque->any_input_mutex = SDL_CreateMutex(); opaque->any_input_cond = SDL_CreateCond(); if (!opaque->acodec_cond || !opaque->acodec_cond || !opaque->acodec_first_dequeue_output_mutex || !opaque->acodec_first_dequeue_output_cond) { ALOGE("%s:open_video_decoder: SDL_CreateCond() failed\n", __func__); goto fail; } ALOGI("AMediaFormat: %s, %dx%d\n", opaque->mcc.mime_type, opaque->avctx->width, opaque->avctx->height); opaque->input_aformat = SDL_AMediaFormatJava_createVideoFormat(env, opaque->mcc.mime_type, opaque->avctx->width, opaque->avctx->height); if (opaque->avctx->extradata && opaque->avctx->extradata_size > 0) { if ((opaque->avctx->codec_id == AV_CODEC_ID_H264 || opaque->avctx->codec_id == AV_CODEC_ID_HEVC) && opaque->avctx->extradata[0] == 1) { #if AMC_USE_AVBITSTREAM_FILTER if (opaque->avctx->codec_id == AV_CODEC_ID_H264) { opaque->bsfc = av_bitstream_filter_init("h264_mp4toannexb"); if (!opaque->bsfc) { ALOGE("Cannot open the h264_mp4toannexb BSF!\n"); goto fail; } } else { opaque->bsfc = av_bitstream_filter_init("hevc_mp4toannexb"); if (!opaque->bsfc) { ALOGE("Cannot open the hevc_mp4toannexb BSF!\n"); goto fail; } } opaque->orig_extradata_size = opaque->avctx->extradata_size; opaque->orig_extradata = (uint8_t*) av_mallocz(opaque->avctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE); if (!opaque->orig_extradata) { goto fail; } memcpy(opaque->orig_extradata, opaque->avctx->extradata, opaque->avctx->extradata_size); for(int i = 0; i < opaque->avctx->extradata_size; i+=4) { ALOGE("csd-0[%d]: %02x%02x%02x%02x\n", opaque->avctx->extradata_size, (int)opaque->avctx->extradata[i+0], (int)opaque->avctx->extradata[i+1], (int)opaque->avctx->extradata[i+2], (int)opaque->avctx->extradata[i+3]); } SDL_AMediaFormat_setBuffer(opaque->input_aformat, "csd-0", opaque->avctx->extradata, opaque->avctx->extradata_size); #else size_t sps_pps_size = 0; size_t convert_size = opaque->avctx->extradata_size + 20; uint8_t *convert_buffer = (uint8_t *)calloc(1, convert_size); if (!convert_buffer) { ALOGE("%s:sps_pps_buffer: alloc failed\n", __func__); goto fail; } if (opaque->avctx->codec_id == AV_CODEC_ID_H264) { if (0 != convert_sps_pps(opaque->avctx->extradata, opaque->avctx->extradata_size, convert_buffer, convert_size, &sps_pps_size, &opaque->nal_size)) { ALOGE("%s:convert_sps_pps: failed\n", __func__); goto fail; } } else { if (0 != convert_hevc_nal_units(opaque->avctx->extradata, opaque->avctx->extradata_size, convert_buffer, convert_size, &sps_pps_size, &opaque->nal_size)) { ALOGE("%s:convert_hevc_nal_units: failed\n", __func__); goto fail; } } SDL_AMediaFormat_setBuffer(opaque->input_aformat, "csd-0", convert_buffer, sps_pps_size); for(int i = 0; i < sps_pps_size; i+=4) { ALOGE("csd-0[%d]: %02x%02x%02x%02x\n", (int)sps_pps_size, (int)convert_buffer[i+0], (int)convert_buffer[i+1], (int)convert_buffer[i+2], (int)convert_buffer[i+3]); } free(convert_buffer); #endif } else { // Codec specific data // SDL_AMediaFormat_setBuffer(opaque->aformat, "csd-0", opaque->avctx->extradata, opaque->avctx->extradata_size); ALOGE("csd-0: naked\n"); } } else { ALOGE("no buffer(%d)\n", opaque->avctx->extradata_size); } rotate_degrees = ffp_get_video_rotate_degrees(ffp); if (ffp->mediacodec_auto_rotate && rotate_degrees != 0 && SDL_Android_GetApiLevel() >= IJK_API_21_LOLLIPOP) { ALOGI("amc: rotate in decoder: %d\n", rotate_degrees); opaque->frame_rotate_degrees = rotate_degrees; SDL_AMediaFormat_setInt32(opaque->input_aformat, "rotation-degrees", rotate_degrees); ffp_notify_msg2(ffp, FFP_MSG_VIDEO_ROTATION_CHANGED, 0); } else { ALOGI("amc: rotate notify: %d\n", rotate_degrees); ffp_notify_msg2(ffp, FFP_MSG_VIDEO_ROTATION_CHANGED, rotate_degrees); } if (!ffpipeline_select_mediacodec_l(pipeline, &opaque->mcc) || !opaque->mcc.codec_name[0]) { ALOGE("amc: no suitable codec\n"); goto fail; } jsurface = ffpipeline_get_surface_as_global_ref(env, pipeline); ret = reconfigure_codec_l(env, node, jsurface); J4A_DeleteGlobalRef__p(env, &jsurface); if (ret != 0) goto fail; ffp_set_video_codec_info(ffp, MEDIACODEC_MODULE_NAME, opaque->mcc.codec_name); opaque->off_buf_out = 0; if (opaque->n_buf_out) { int i; opaque->amc_buf_out = calloc(opaque->n_buf_out, sizeof(*opaque->amc_buf_out)); assert(opaque->amc_buf_out != NULL); for (i = 0; i < opaque->n_buf_out; i++) opaque->amc_buf_out[i].pts = AV_NOPTS_VALUE; } SDL_SpeedSamplerReset(&opaque->sampler); ffp->stat.vdec_type = FFP_PROPV_DECODER_MEDIACODEC; return node; fail: ffpipenode_free_p(&node); return NULL; }
mfxStatus FFmpeg_Reader_Init(const char *strFileName, mfxU32 videoType) { MSDK_CHECK_POINTER(strFileName, MFX_ERR_NULL_PTR); int res; g_videoType = videoType; // Initialize libavcodec, and register all codecs and formats av_register_all(); // Open input container res = avformat_open_input(&g_pFormatCtx, strFileName, NULL, NULL); if(res) { printf("FFMPEG: Could not open input container\n"); return MFX_ERR_UNKNOWN; } // Retrieve stream information res = avformat_find_stream_info(g_pFormatCtx, NULL); if(res < 0) { printf("FFMPEG: Couldn't find stream information\n"); return MFX_ERR_UNKNOWN; } // Dump container info to console av_dump_format(g_pFormatCtx, 0, strFileName, 0); // Find the streams in the container g_videoStreamIdx = -1; for(unsigned int i=0; i<g_pFormatCtx->nb_streams; i++) { if(g_pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO && g_videoStreamIdx == -1) { g_videoStreamIdx = i; // save decoded stream timestamp time base g_dec_time_base = g_pFormatCtx->streams[i]->time_base; if(videoType == MFX_CODEC_AVC) { // Retrieve required h264_mp4toannexb filter g_pBsfc = av_bitstream_filter_init("h264_mp4toannexb"); if (!g_pBsfc) { printf("FFMPEG: Could not aquire h264_mp4toannexb filter\n"); return MFX_ERR_UNKNOWN; } } } #ifdef PROCESS_AUDIO else if(g_pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO) { g_audioStreamIdx = i; g_pAudioStream = g_pFormatCtx->streams[i]; g_audio_dec_time_base = g_pAudioStream->time_base; } #endif } if(g_videoStreamIdx == -1) return MFX_ERR_UNKNOWN; // Didn't find any video streams in container return MFX_ERR_NONE; }
int CFfmpeg::Open(const char* pszUrl) { unsigned int i; m_sUrl = pszUrl; m_sUrl.erase(0, strlen("ffmpeg://")); //+ infmt_ctx = avformat_alloc_context(); //ring_buffer_write(&cbuffer.ringbuffer, inputbuffer, sizeof(inputbuffer)); //unsigned char* inputbuffer = NULL; //inputbuffer = (unsigned char*)malloc(MAIN_BUFFER_SIZE); init_put_byte(&inputpb, inputbuffer, MAIN_BUFFER_SIZE, 0, &cbuffer, i_read_data, NULL, i_seek_data ); //inputpb.buf_end = inputpb.buf_ptr; infmt_ctx->pb = &inputpb; //av_read_frame(infmt_ctx, &pkt); //+ avformat_open_input(&infmt_ctx, m_sUrl.c_str(), NULL, NULL); if(!infmt_ctx) { FFMPEG_ERROR("unknown url: %s", pszUrl); return -1; } av_find_stream_info(infmt_ctx); av_dump_format(infmt_ctx, 0, m_sUrl.c_str(), 0); filesize = avio_size(infmt_ctx->pb); printf("filesize = %d\n", filesize); check_transcode(); if(!transcode) { if(infmt_ctx) { av_close_input_file(infmt_ctx); infmt_ctx = NULL; } m_pFp = fopen(m_sUrl.c_str(), "rb"); if(!m_pFp) { //perror("fopen"); FFMPEG_ERROR("error fopen: %s", strerror(errno)); return -1; } } else { FFMPEG_DEBUG("transcode or remux"); avformat_alloc_output_context2(&oc, NULL, "mpegts", NULL); unsigned int pid = 0x100; for(i=0; i<infmt_ctx->nb_streams; i++) { AVStream *stream = infmt_ctx->streams[i]; if(stream->codec->codec_type==AVMEDIA_TYPE_VIDEO && video==-1) { video = i; FFMPEG_DEBUG("video index: %d, pid: 0x%x", i, pid++); vst = av_new_stream(oc, 0); avcodec_copy_context(vst->codec, infmt_ctx->streams[video]->codec); //vst->codec->time_base = infmt_ctx->streams[video]->time_base; vst->codec->sample_aspect_ratio = vst->sample_aspect_ratio = infmt_ctx->streams[video]->codec->sample_aspect_ratio; vst->stream_copy = 1; vst->avg_frame_rate = infmt_ctx->streams[video]->avg_frame_rate; vst->discard = AVDISCARD_NONE; vst->disposition = infmt_ctx->streams[video]->disposition; vst->duration = infmt_ctx->streams[video]->duration; vst->first_dts = infmt_ctx->streams[video]->first_dts; vst->r_frame_rate = infmt_ctx->streams[video]->r_frame_rate; vst->time_base = infmt_ctx->streams[video]->time_base; vst->quality = infmt_ctx->streams[video]->quality; vst->start_time = infmt_ctx->streams[video]->start_time; } else if(stream->codec->codec_type==AVMEDIA_TYPE_AUDIO && audio1==-1) { audio1 = i; FFMPEG_DEBUG("audio1 index: %d, pid: 0x%x", i, pid++); ast1 = av_new_stream(oc, 0); if(stream->codec->codec_id == CODEC_ID_AC3 || stream->codec->codec_id == CODEC_ID_DTS || stream->codec->codec_id == CODEC_ID_PCM_S16BE || stream->codec->codec_id == CODEC_ID_PCM_S16LE) { acodec1 = stream->codec; AVCodec *inAcodec = avcodec_find_decoder(stream->codec->codec_id); avcodec_open(stream->codec, inAcodec); AVCodec *outAcodec = avcodec_find_encoder(CODEC_ID_MP2); //ast1->codec = avcodec_alloc_context3(outAcodec); ast1->codec->bit_rate = 128000; ast1->codec->sample_rate = stream->codec->sample_rate; if(stream->codec->channels > 2) { stream->codec->request_channels = 2; } ast1->codec->channels = 2; ast1->codec->sample_fmt = AV_SAMPLE_FMT_S16; avcodec_open(ast1->codec, outAcodec); ast1->codec->time_base = infmt_ctx->streams[audio1]->time_base; ring_buffer_init(&adecrbuffer1, 524288); } else { avcodec_copy_context(ast1->codec, infmt_ctx->streams[audio1]->codec); //ast1->codec->time_base = infmt_ctx->streams[audio1]->time_base; ast1->stream_copy = 1; ast1->first_dts = infmt_ctx->streams[audio1]->first_dts; ast1->r_frame_rate = infmt_ctx->streams[audio1]->r_frame_rate; ast1->time_base = infmt_ctx->streams[audio1]->time_base; ast1->quality = infmt_ctx->streams[audio1]->quality; ast1->start_time = infmt_ctx->streams[audio1]->start_time; ast1->duration = infmt_ctx->streams[audio1]->duration; } } else if(stream->codec->codec_type==AVMEDIA_TYPE_AUDIO && audio1!=i && audio2==-1) { audio2 = i; FFMPEG_DEBUG("audio2 index: %d, pid: 0x%x", i, pid++); ast2 = av_new_stream(oc, 0); if(stream->codec->codec_id == CODEC_ID_AC3 || stream->codec->codec_id == CODEC_ID_DTS || stream->codec->codec_id == CODEC_ID_PCM_S16BE || stream->codec->codec_id == CODEC_ID_PCM_S16LE) { acodec2 = stream->codec; AVCodec *inAcodec = avcodec_find_decoder(stream->codec->codec_id); avcodec_open(stream->codec, inAcodec); AVCodec *outAcodec = avcodec_find_encoder(CODEC_ID_MP2); //ast2->codec = avcodec_alloc_context3(outAcodec); ast2->codec->bit_rate = 128000; ast2->codec->sample_rate = stream->codec->sample_rate; if(stream->codec->channels > 2) { stream->codec->request_channels = 2; } ast2->codec->channels = 2; ast2->codec->sample_fmt = AV_SAMPLE_FMT_S16; avcodec_open(ast2->codec, outAcodec); ast2->codec->time_base = infmt_ctx->streams[audio2]->time_base; ring_buffer_init(&adecrbuffer2, 524288); } else { avcodec_copy_context(ast2->codec, infmt_ctx->streams[audio2]->codec); //ast2->codec->time_base = infmt_ctx->streams[audio2]->time_base; ast2->stream_copy = 1; ast2->first_dts = infmt_ctx->streams[audio2]->first_dts; ast2->r_frame_rate = infmt_ctx->streams[audio2]->r_frame_rate; ast2->time_base = infmt_ctx->streams[audio2]->time_base; ast2->quality = infmt_ctx->streams[audio2]->quality; ast2->start_time = infmt_ctx->streams[audio2]->start_time; ast2->duration = infmt_ctx->streams[audio2]->duration; } } } init_put_byte(&outputpb, outputbuffer, MAIN_BUFFER_SIZE, 1, &outputringbuffer, NULL, write_data, NULL ); oc->pb = &outputpb; avformat_write_header(oc, NULL); //av_dump_format(oc, 0, "output.ts", 1); if(infmt_ctx->streams[video]->codec->codec_id == CODEC_ID_H264) { FFMPEG_DEBUG("open h264_mp4toannexb filter"); bsfc = av_bitstream_filter_init("h264_mp4toannexb"); if (!bsfc) { FFMPEG_ERROR("Cannot open the h264_mp4toannexb BSF!"); return -1; } } } return 0; }
int main(int argc, char* argv[]) { AVOutputFormat *ofmt_a = NULL,*ofmt_v = NULL; //(Input AVFormatContext and Output AVFormatContext) AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx_a = NULL, *ofmt_ctx_v = NULL; AVPacket pkt; int ret, i; int videoindex=-1,audioindex=-1; int frame_index=0; const char *in_filename = "cuc_ieschool.ts";//Input file URL //char *in_filename = "cuc_ieschool.mkv"; const char *out_filename_v = "cuc_ieschool.h264";//Output file URL //char *out_filename_a = "cuc_ieschool.mp3"; const char *out_filename_a = "cuc_ieschool.aac"; av_register_all(); //Input if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0) { printf( "Could not open input file."); goto end; } if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0) { printf( "Failed to retrieve input stream information"); goto end; } //Output avformat_alloc_output_context2(&ofmt_ctx_v, NULL, NULL, out_filename_v); if (!ofmt_ctx_v) { printf( "Could not create output context\n"); ret = AVERROR_UNKNOWN; goto end; } ofmt_v = ofmt_ctx_v->oformat; avformat_alloc_output_context2(&ofmt_ctx_a, NULL, NULL, out_filename_a); if (!ofmt_ctx_a) { printf( "Could not create output context\n"); ret = AVERROR_UNKNOWN; goto end; } ofmt_a = ofmt_ctx_a->oformat; for (i = 0; i < ifmt_ctx->nb_streams; i++) { //Create output AVStream according to input AVStream AVFormatContext *ofmt_ctx; AVStream *in_stream = ifmt_ctx->streams[i]; AVStream *out_stream = NULL; if(ifmt_ctx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO){ videoindex=i; out_stream=avformat_new_stream(ofmt_ctx_v, in_stream->codec->codec); ofmt_ctx=ofmt_ctx_v; }else if(ifmt_ctx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO){ audioindex=i; out_stream=avformat_new_stream(ofmt_ctx_a, in_stream->codec->codec); ofmt_ctx=ofmt_ctx_a; }else{ break; } if (!out_stream) { printf( "Failed allocating output stream\n"); ret = AVERROR_UNKNOWN; goto end; } //Copy the settings of AVCodecContext if (avcodec_copy_context(out_stream->codec, in_stream->codec) < 0) { printf( "Failed to copy context from input to output stream codec context\n"); goto end; } out_stream->codec->codec_tag = 0; if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER) out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER; } //Dump Format------------------ printf("\n==============Input Video=============\n"); av_dump_format(ifmt_ctx, 0, in_filename, 0); printf("\n==============Output Video============\n"); av_dump_format(ofmt_ctx_v, 0, out_filename_v, 1); printf("\n==============Output Audio============\n"); av_dump_format(ofmt_ctx_a, 0, out_filename_a, 1); printf("\n======================================\n"); //Open output file if (!(ofmt_v->flags & AVFMT_NOFILE)) { if (avio_open(&ofmt_ctx_v->pb, out_filename_v, AVIO_FLAG_WRITE) < 0) { printf( "Could not open output file '%s'", out_filename_v); goto end; } } if (!(ofmt_a->flags & AVFMT_NOFILE)) { if (avio_open(&ofmt_ctx_a->pb, out_filename_a, AVIO_FLAG_WRITE) < 0) { printf( "Could not open output file '%s'", out_filename_a); goto end; } } //Write file header if (avformat_write_header(ofmt_ctx_v, NULL) < 0) { printf( "Error occurred when opening video output file\n"); goto end; } if (avformat_write_header(ofmt_ctx_a, NULL) < 0) { printf( "Error occurred when opening audio output file\n"); goto end; } #if USE_H264BSF AVBitStreamFilterContext* h264bsfc = av_bitstream_filter_init("h264_mp4toannexb"); #endif while (1) { AVFormatContext *ofmt_ctx; AVStream *in_stream, *out_stream; //Get an AVPacket if (av_read_frame(ifmt_ctx, &pkt) < 0) break; in_stream = ifmt_ctx->streams[pkt.stream_index]; if(pkt.stream_index==videoindex){ out_stream = ofmt_ctx_v->streams[0]; ofmt_ctx=ofmt_ctx_v; printf("Write Video Packet. size:%d\tpts:%lld\n",pkt.size,pkt.pts); #if USE_H264BSF av_bitstream_filter_filter(h264bsfc, in_stream->codec, NULL, &pkt.data, &pkt.size, pkt.data, pkt.size, 0); #endif }else if(pkt.stream_index==audioindex){ out_stream = ofmt_ctx_a->streams[0]; ofmt_ctx=ofmt_ctx_a; printf("Write Audio Packet. size:%d\tpts:%lld\n",pkt.size,pkt.pts); }else{ continue; } //Convert PTS/DTS pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX)); pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX)); pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base); pkt.pos = -1; pkt.stream_index=0; //Write if (av_interleaved_write_frame(ofmt_ctx, &pkt) < 0) { printf( "Error muxing packet\n"); break; } //printf("Write %8d frames to output file\n",frame_index); av_free_packet(&pkt); frame_index++; } #if USE_H264BSF av_bitstream_filter_close(h264bsfc); #endif //Write file trailer av_write_trailer(ofmt_ctx_a); av_write_trailer(ofmt_ctx_v); end: avformat_close_input(&ifmt_ctx); /* close output */ if (ofmt_ctx_a && !(ofmt_a->flags & AVFMT_NOFILE)) avio_close(ofmt_ctx_a->pb); if (ofmt_ctx_v && !(ofmt_v->flags & AVFMT_NOFILE)) avio_close(ofmt_ctx_v->pb); avformat_free_context(ofmt_ctx_a); avformat_free_context(ofmt_ctx_v); if (ret < 0 && ret != AVERROR_EOF) { printf( "Error occurred.\n"); return -1; } return 0; }
IJKFF_Pipenode *ffpipenode_create_video_decoder_from_android_mediacodec(FFPlayer *ffp, IJKFF_Pipeline *pipeline, SDL_Vout *vout) { ALOGD("ffpipenode_create_video_decoder_from_android_mediacodec()\n"); if (SDL_Android_GetApiLevel() < IJK_API_16_JELLY_BEAN) return NULL; if (!ffp || !ffp->is) return NULL; IJKFF_Pipenode *node = ffpipenode_alloc(sizeof(IJKFF_Pipenode_Opaque)); if (!node) return node; VideoState *is = ffp->is; IJKFF_Pipenode_Opaque *opaque = node->opaque; JNIEnv *env = NULL; int ret = 0; node->func_destroy = func_destroy; node->func_run_sync = func_run_sync; node->func_flush = func_flush; opaque->pipeline = pipeline; opaque->ffp = ffp; opaque->decoder = &is->viddec; opaque->weak_vout = vout; opaque->avctx = opaque->decoder->avctx; switch (opaque->avctx->profile) { case FF_PROFILE_H264_HIGH_10: case FF_PROFILE_H264_HIGH_10_INTRA: case FF_PROFILE_H264_HIGH_422: case FF_PROFILE_H264_HIGH_422_INTRA: case FF_PROFILE_H264_HIGH_444_PREDICTIVE: case FF_PROFILE_H264_HIGH_444_INTRA: case FF_PROFILE_H264_CAVLC_444: goto fail; } switch (opaque->avctx->codec_id) { case AV_CODEC_ID_H264: strcpy(opaque->mcc.mime_type, SDL_AMIME_VIDEO_AVC); opaque->mcc.profile = opaque->avctx->profile; opaque->mcc.level = opaque->avctx->level; break; default: ALOGE("%s:create: not H264\n", __func__); goto fail; } if (JNI_OK != SDL_JNI_SetupThreadEnv(&env)) { ALOGE("%s:create: SetupThreadEnv failed\n", __func__); goto fail; } opaque->acodec_mutex = SDL_CreateMutex(); opaque->acodec_cond = SDL_CreateCond(); opaque->acodec_first_dequeue_output_mutex = SDL_CreateMutex(); opaque->acodec_first_dequeue_output_cond = SDL_CreateCond(); ffp_packet_queue_init(&opaque->fake_pictq); ffp_packet_queue_start(&opaque->fake_pictq); if (!opaque->acodec_cond || !opaque->acodec_cond || !opaque->acodec_first_dequeue_output_mutex || !opaque->acodec_first_dequeue_output_cond) { ALOGE("%s:open_video_decoder: SDL_CreateCond() failed\n", __func__); goto fail; } ALOGI("AMediaFormat: %s, %dx%d\n", opaque->mcc.mime_type, opaque->avctx->width, opaque->avctx->height); opaque->input_aformat = SDL_AMediaFormatJava_createVideoFormat(env, opaque->mcc.mime_type, opaque->avctx->width, opaque->avctx->height); if (opaque->avctx->extradata && opaque->avctx->extradata_size > 0) { if (opaque->avctx->codec_id == AV_CODEC_ID_H264 && opaque->avctx->extradata[0] == 1) { #if AMC_USE_AVBITSTREAM_FILTER opaque->bsfc = av_bitstream_filter_init("h264_mp4toannexb"); if (!opaque->bsfc) { ALOGE("Cannot open the h264_mp4toannexb BSF!\n"); goto fail; } opaque->orig_extradata_size = opaque->avctx->extradata_size; opaque->orig_extradata = (uint8_t*) av_mallocz(opaque->avctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE); if (!opaque->orig_extradata) { goto fail; } memcpy(opaque->orig_extradata, opaque->avctx->extradata, opaque->avctx->extradata_size); for(int i = 0; i < opaque->avctx->extradata_size; i+=4) { ALOGE("csd-0[%d]: %02x%02x%02x%02x\n", opaque->avctx->extradata_size, (int)opaque->avctx->extradata[i+0], (int)opaque->avctx->extradata[i+1], (int)opaque->avctx->extradata[i+2], (int)opaque->avctx->extradata[i+3]); } SDL_AMediaFormat_setBuffer(opaque->input_aformat, "csd-0", opaque->avctx->extradata, opaque->avctx->extradata_size); #else size_t sps_pps_size = 0; size_t convert_size = opaque->avctx->extradata_size + 20; uint8_t *convert_buffer = (uint8_t *)calloc(1, convert_size); if (!convert_buffer) { ALOGE("%s:sps_pps_buffer: alloc failed\n", __func__); goto fail; } if (0 != convert_sps_pps(opaque->avctx->extradata, opaque->avctx->extradata_size, convert_buffer, convert_size, &sps_pps_size, &opaque->nal_size)) { ALOGE("%s:convert_sps_pps: failed\n", __func__); goto fail; } SDL_AMediaFormat_setBuffer(opaque->input_aformat, "csd-0", convert_buffer, sps_pps_size); for(int i = 0; i < sps_pps_size; i+=4) { ALOGE("csd-0[%d]: %02x%02x%02x%02x\n", (int)sps_pps_size, (int)convert_buffer[i+0], (int)convert_buffer[i+1], (int)convert_buffer[i+2], (int)convert_buffer[i+3]); } free(convert_buffer); #endif } else { // Codec specific data // SDL_AMediaFormat_setBuffer(opaque->aformat, "csd-0", opaque->avctx->extradata, opaque->avctx->extradata_size); ALOGE("csd-0: naked\n"); } } else { ALOGE("no buffer(%d)\n", opaque->avctx->extradata_size); } ret = reconfigure_codec_l(env, node); if (ret != 0) goto fail; ffp_set_video_codec_info(ffp, MEDIACODEC_MODULE_NAME, opaque->mcc.codec_name); opaque->off_buf_out = 0; if (opaque->n_buf_out) { int i; opaque->amc_buf_out = calloc(opaque->n_buf_out, sizeof(*opaque->amc_buf_out)); assert(opaque->amc_buf_out != NULL); for (i = 0; i < opaque->n_buf_out; i++) opaque->amc_buf_out[i].pts = AV_NOPTS_VALUE; } return node; fail: ffpipenode_free_p(&node); return NULL; }
int segment(char *input_file, char* base_dirpath, char* output_index_file, char *base_file_name, char* base_file_extension, int segmentLength, int listlen) { unsigned int output_index = 1; AVOutputFormat *ofmt = NULL; AVFormatContext *ic = NULL; AVFormatContext *oc; AVStream *in_video_st = NULL; AVStream *in_audio_st = NULL; AVStream *out_video_st = NULL; AVStream *out_audio_st = NULL; AVCodec *codec; int ret; int quiet = 0; char currentOutputFileName[MAX_FILENAME_LENGTH] = {0}; double segment_start_time = 0.0; char tmp_output_index_file[MAX_FILENAME_LENGTH] = {0}; unsigned int actual_segment_durations[MAX_SEGMENTS+1]; double packet_time = 0; sprintf(tmp_output_index_file, "%s.tmp", output_index_file); av_register_all(); avformat_network_init(); //just to be safe with later version and be able to handle all kind of input urls AVStream *in_stream = NULL; AVStream *out_stream = NULL; ret = avformat_open_input(&ic, input_file, NULL, NULL); if (ret != 0) { fprintf(stderr, "Could not open input file %s. Error %d.\n", input_file, ret); exit(1); } if (avformat_find_stream_info(ic, NULL) < 0) { fprintf(stderr, "Could not read stream information.\n"); avformat_close_input(&ic); exit(1); } oc = avformat_alloc_context(); if (!oc) { fprintf(stderr, "Could not allocate output context."); avformat_close_input(&ic); exit(1); } int in_video_index = -1; int in_audio_index = -1; int out_video_index = -1; int out_audio_index = -1; int i; int listofs = 1; for (i = 0; i < ic->nb_streams; i++) { switch (ic->streams[i]->codec->codec_type) { case AVMEDIA_TYPE_VIDEO: if (!out_video_st) { in_video_st=ic->streams[i]; in_video_index = i; in_video_st->discard = AVDISCARD_NONE; out_video_st = add_output_stream(oc, in_video_st); out_video_index=out_video_st->index; } break; case AVMEDIA_TYPE_AUDIO: if (!out_audio_st) { in_audio_st=ic->streams[i]; in_audio_index = i; in_audio_st->discard = AVDISCARD_NONE; out_audio_st = add_output_stream(oc, in_audio_st); out_audio_index=out_audio_st->index; } break; default: ic->streams[i]->discard = AVDISCARD_ALL; break; } } if (in_video_index == -1) { fprintf(stderr, "Source stream must have video component.\n"); avformat_close_input(&ic); avformat_free_context(oc); exit(1); } if (!ofmt) ofmt = av_guess_format("mpegts", NULL, NULL); if (!ofmt) { fprintf(stderr, "Could not find MPEG-TS muxer.\n"); exit(1); } oc->oformat = ofmt; if (oc->oformat->flags & AVFMT_GLOBALHEADER) oc->flags |= CODEC_FLAG_GLOBAL_HEADER; av_dump_format(oc, 0, base_file_name, 1); codec = avcodec_find_decoder(in_video_st->codec->codec_id); if (!codec) { fprintf(stderr, "Could not find video decoder, key frames will not be honored.\n"); } ret = avcodec_open2(in_video_st->codec, codec, NULL); if (ret < 0) { fprintf(stderr, "Could not open video decoder, key frames will not be honored.\n"); } snprintf(currentOutputFileName, MAX_FILENAME_LENGTH, "%s/%s-%u%s", base_dirpath, base_file_name, output_index, base_file_extension); if (avio_open(&oc->pb, currentOutputFileName,AVIO_FLAG_WRITE) < 0) { fprintf(stderr, "Could not open '%s'.\n", currentOutputFileName); exit(1); } else if (!quiet) { fprintf(stderr, "Starting segment '%s'\n", currentOutputFileName); } int r = avformat_write_header(oc, NULL); if (r) { fprintf(stderr, "Could not write mpegts header to first output file.\n"); exit(1); } unsigned int num_segments = 0; int decode_done; int waitfirstpacket = 1; time_t first_frame_sec = time(NULL); int iskeyframe = 0; double vid_pts2time = (double)in_video_st->time_base.num / in_video_st->time_base.den; #if USE_H264BSF AVBitStreamFilterContext* h264bsfc = av_bitstream_filter_init("h264_mp4toannexb"); #endif double prev_packet_time = 0; do { AVPacket packet; decode_done = av_read_frame(ic, &packet); if (decode_done < 0) { break; } //get the most recent packet time //this time is used when the time for the final segment is printed. It may not be on the edge of //of a keyframe! if (packet.stream_index == in_video_index) { #if USE_H264BSF av_bitstream_filter_filter(h264bsfc, ic->streams[in_video_index]->codec, NULL, &packet.data, &packet.size, packet.data, packet.size, 0); #endif packet.stream_index = out_video_index; packet_time = (double) packet.pts * vid_pts2time; iskeyframe = packet.flags & AV_PKT_FLAG_KEY; if (iskeyframe && waitfirstpacket) { waitfirstpacket = 0; prev_packet_time = packet_time; segment_start_time = packet_time; first_frame_sec = time(NULL); } } else if (packet.stream_index == in_audio_index){ packet.stream_index = out_audio_index; iskeyframe=0; } else { //how this got here?! av_free_packet(&packet); continue; } if (waitfirstpacket) { av_free_packet(&packet); continue; } //start looking for segment splits for videos one half second before segment duration expires. This is because the //segments are split on key frames so we cannot expect all segments to be split exactly equally. if (iskeyframe && ((packet_time - segment_start_time) >= (segmentLength - 0.25)) ) { //a keyframe near or past segmentLength -> SPLIT printf("key frame packet time=%f, start time=%f\n", packet_time, segment_start_time); avio_flush(oc->pb); avio_close(oc->pb); actual_segment_durations[num_segments] = (unsigned int) rint(prev_packet_time - segment_start_time); num_segments++; if (num_segments > listlen) { //move list to exclude last: snprintf(currentOutputFileName, MAX_FILENAME_LENGTH, "%s/%s-%u%s", base_dirpath, base_file_name, listofs, base_file_extension); unlink (currentOutputFileName); listofs++; num_segments--; memmove(actual_segment_durations,actual_segment_durations+1,num_segments*sizeof(actual_segment_durations[0])); } write_index_file(output_index_file, tmp_output_index_file, segmentLength, num_segments, actual_segment_durations, listofs, base_file_name, base_file_extension, (num_segments >= MAX_SEGMENTS)); if (num_segments == MAX_SEGMENTS) { fprintf(stderr, "Reached \"hard\" max segment number %u. If this is not live stream increase segment duration. If live segmenting set max list lenth (-m ...)\n", MAX_SEGMENTS); break; } output_index++; snprintf(currentOutputFileName, MAX_FILENAME_LENGTH, "%s/%s-%u%s", base_dirpath, base_file_name, output_index, base_file_extension); if (avio_open(&oc->pb, currentOutputFileName, AVIO_FLAG_WRITE) < 0) { fprintf(stderr, "Could not open '%s'\n", currentOutputFileName); break; } else if (!quiet) { fprintf(stderr, "Starting segment '%s'\n", currentOutputFileName); } fflush(stderr); segment_start_time = packet_time; first_frame_sec=time(NULL); } if (packet.stream_index == out_video_index) { prev_packet_time = packet_time; } in_stream = ic->streams[packet.stream_index]; out_stream = oc->streams[packet.stream_index]; packet.pts = av_rescale_q_rnd(packet.pts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX); packet.dts = av_rescale_q_rnd(packet.dts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX); packet.duration = av_rescale_q(packet.duration, in_stream->time_base, out_stream->time_base); packet.pos = -1; ret = av_write_frame(oc, &packet); if (ret < 0) { fprintf(stderr, "Warning: Could not write frame of stream.\n"); } else if (ret > 0) { fprintf(stderr, "End of stream requested.\n"); av_free_packet(&packet); break; } av_free_packet(&packet); } while (!decode_done); if (in_video_st->codec->codec !=NULL) avcodec_close(in_video_st->codec); if (num_segments < MAX_SEGMENTS) { //make sure all packets are written and then close the last file. avio_flush(oc->pb); av_write_trailer(oc); for (i = 0; i < oc->nb_streams; i++) { av_freep(&oc->streams[i]->codec); av_freep(&oc->streams[i]); } avio_close(oc->pb); av_free(oc); if (num_segments>0){ actual_segment_durations[num_segments] = (unsigned int) rint(packet_time - segment_start_time); if (actual_segment_durations[num_segments] == 0) actual_segment_durations[num_segments] = 1; num_segments++; write_index_file(output_index_file, tmp_output_index_file, segmentLength, num_segments, actual_segment_durations, listofs, base_file_name, base_file_extension, 1); } } avformat_close_input(&ic); return 0; }
int main(int argc, char* argv[]) { AVFormatContext *ifmt_ctx = NULL; AVPacket pkt; int ret, i; int videoindex=-1,audioindex=-1; const char *in_filename = "cuc_ieschool.flv";//Input file URL const char *out_filename_v = "cuc_ieschool.h264";//Output file URL const char *out_filename_a = "cuc_ieschool.mp3"; av_register_all(); //Input if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0) { printf( "Could not open input file."); return -1; } if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0) { printf( "Failed to retrieve input stream information"); return -1; } videoindex=-1; for(i=0; i<ifmt_ctx->nb_streams; i++) { if(ifmt_ctx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO){ videoindex=i; }else if(ifmt_ctx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO){ audioindex=i; } } //Dump Format------------------ printf("\nInput Video===========================\n"); av_dump_format(ifmt_ctx, 0, in_filename, 0); printf("\n======================================\n"); FILE *fp_audio=fopen(out_filename_a,"wb+"); FILE *fp_video=fopen(out_filename_v,"wb+"); /* FIX: H.264 in some container format (FLV, MP4, MKV etc.) need "h264_mp4toannexb" bitstream filter (BSF) *Add SPS,PPS in front of IDR frame *Add start code ("0,0,0,1") in front of NALU H.264 in some container (MPEG2TS) don't need this BSF. */ #if USE_H264BSF AVBitStreamFilterContext* h264bsfc = av_bitstream_filter_init("h264_mp4toannexb"); #endif while(av_read_frame(ifmt_ctx, &pkt)>=0){ if(pkt.stream_index==videoindex){ #if USE_H264BSF av_bitstream_filter_filter(h264bsfc, ifmt_ctx->streams[videoindex]->codec, NULL, &pkt.data, &pkt.size, pkt.data, pkt.size, 0); #endif printf("Write Video Packet. size:%d\tpts:%lld\n",pkt.size,pkt.pts); fwrite(pkt.data,1,pkt.size,fp_video); }else if(pkt.stream_index==audioindex){ /* AAC in some container format (FLV, MP4, MKV etc.) need to add 7 Bytes ADTS Header in front of AVPacket data manually. Other Audio Codec (MP3...) works well. */ printf("Write Audio Packet. size:%d\tpts:%lld\n",pkt.size,pkt.pts); fwrite(pkt.data,1,pkt.size,fp_audio); } av_free_packet(&pkt); } #if USE_H264BSF av_bitstream_filter_close(h264bsfc); #endif fclose(fp_video); fclose(fp_audio); avformat_close_input(&ifmt_ctx); if (ret < 0 && ret != AVERROR_EOF) { printf( "Error occurred.\n"); return -1; } return 0; }
/** * Init the muxer with streams */ static int lav_muxer_init(muxer_t* m, struct streaming_start *ss, const char *name) { int i; streaming_start_component_t *ssc; AVFormatContext *oc; AVDictionary *opts = NULL; lav_muxer_t *lm = (lav_muxer_t*)m; char app[128]; snprintf(app, sizeof(app), "Tvheadend %s", tvheadend_version); oc = lm->lm_oc; av_dict_set(&oc->metadata, "title", name, 0); av_dict_set(&oc->metadata, "service_name", name, 0); av_dict_set(&oc->metadata, "service_provider", app, 0); if(lm->m_config.m_type == MC_MPEGTS) { lm->lm_h264_filter = av_bitstream_filter_init("h264_mp4toannexb"); lm->lm_hevc_filter = av_bitstream_filter_init("hevc_mp4toannexb"); } oc->max_delay = 0.7 * AV_TIME_BASE; for(i=0; i < ss->ss_num_components; i++) { ssc = &ss->ss_components[i]; if(ssc->ssc_disabled) continue; if(!lav_muxer_support_stream(lm->m_config.m_type, ssc->ssc_type)) { tvhwarn(LS_LIBAV, "%s is not supported in %s", streaming_component_type2txt(ssc->ssc_type), muxer_container_type2txt(lm->m_config.m_type)); ssc->ssc_muxer_disabled = 1; continue; } if(lav_muxer_add_stream(lm, ssc)) { tvherror(LS_LIBAV, "Failed to add %s stream", streaming_component_type2txt(ssc->ssc_type)); ssc->ssc_muxer_disabled = 1; continue; } } if(lm->m_config.m_type == MC_AVMP4) { av_dict_set(&opts, "frag_duration", "1", 0); av_dict_set(&opts, "ism_lookahead", "0", 0); } if(!lm->lm_oc->nb_streams) { tvherror(LS_LIBAV, "No supported streams available"); lm->m_errors++; return -1; } else if(avformat_write_header(lm->lm_oc, &opts) < 0) { tvherror(LS_LIBAV, "Failed to write %s header", muxer_container_type2txt(lm->m_config.m_type)); lm->m_errors++; return -1; } if (opts) av_dict_free(&opts); lm->lm_init = 1; return 0; }
FFHaaliVideo::FFHaaliVideo(const char *SourceFile, int Track, FFMS_Index &Index, int Threads, FFMS_Sources SourceMode) : Res(FFSourceResources<FFMS_VideoSource>(this)), FFMS_VideoSource(SourceFile, Index, Track, Threads) { BitStreamFilter = NULL; pMMC = HaaliOpenFile(SourceFile, SourceMode); CComPtr<IEnumUnknown> pEU; if (!SUCCEEDED(pMMC->EnumTracks(&pEU))) throw FFMS_Exception(FFMS_ERROR_DECODING, FFMS_ERROR_CODEC, "Failed to enumerate tracks"); CComPtr<IUnknown> pU; int CurrentTrack = -1; while (pEU->Next(1, &pU, NULL) == S_OK && ++CurrentTrack != Track) pU = NULL; CComQIPtr<IPropertyBag> pBag = pU; if (CurrentTrack != Track || !pBag) throw FFMS_Exception(FFMS_ERROR_DECODING, FFMS_ERROR_CODEC, "Failed to find track"); HCodecContext = InitializeCodecContextFromHaaliInfo(pBag); CodecContext = HCodecContext; const AVCodec *Codec = NULL; std::swap(Codec, CodecContext->codec); if (avcodec_open2(CodecContext, Codec, NULL) < 0) throw FFMS_Exception(FFMS_ERROR_DECODING, FFMS_ERROR_CODEC, "Could not open video codec"); CodecContext->thread_count = DecodingThreads; if (CodecContext->codec->id == FFMS_ID(H264) && SourceMode == FFMS_SOURCE_HAALIMPEG) BitStreamFilter = av_bitstream_filter_init("h264_mp4toannexb"); Res.CloseCodec(true); // Always try to decode a frame to make sure all required parameters are known int64_t Dummy; DecodeNextFrame(&Dummy); VP.FPSDenominator = 1; VP.FPSNumerator = 30; // Calculate the average framerate if (Frames.size() >= 2) { double PTSDiff = (double)(Frames.back().PTS - Frames.front().PTS); VP.FPSDenominator = (unsigned int)(PTSDiff / (double)1000 / (double)(Frames.size() - 1) + 0.5); VP.FPSNumerator = 1000000; } // Set the video properties from the codec context SetVideoProperties(); // Output the already decoded frame so it isn't wasted OutputFrame(DecodeFrame); // Set AR variables CComVariant pV; USHORT Num = 0, Den = 0; pV.Clear(); if (SUCCEEDED(pBag->Read(L"Video.DisplayWidth", &pV, NULL)) && SUCCEEDED(pV.ChangeType(VT_UI4))) Num = pV.uiVal; pV.Clear(); if (SUCCEEDED(pBag->Read(L"Video.DisplayHeight", &pV, NULL)) && SUCCEEDED(pV.ChangeType(VT_UI4))) Den = pV.uiVal; if (Num && Den) { VP.SARNum = LocalFrame.EncodedHeight * Num; VP.SARDen = LocalFrame.EncodedWidth * Den; } }
int demux(const char *in_filename, const char *out_filename_v, const char *out_filename_a) { AVOutputFormat *ofmt_a = NULL, *ofmt_v = NULL; // Input AVFormatContext and Output AVFormatContext AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx_a = NULL, *ofmt_ctx_v = NULL; AVPacket pkt, enc_pkt; int ret, i; int video_index = -1, audio_index = -1; int frame_index = 0; av_register_all(); // Input if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0) { printf("Could not open input file."); goto end; } if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0) { printf("Failed to retrieve input stream information"); goto end; } // Output avformat_alloc_output_context2(&ofmt_ctx_v, NULL, NULL, out_filename_v); if (!ofmt_ctx_v) { printf("Could not create output context.\n"); ret = AVERROR_UNKNOWN; goto end; } ofmt_v = ofmt_ctx_v->oformat; avformat_alloc_output_context2(&ofmt_ctx_a, NULL, NULL, out_filename_a); if (!ofmt_ctx_a) { printf("Could not create output context\n"); ret = AVERROR_UNKNOWN; goto end; } ofmt_a = ofmt_ctx_a->oformat; for (i = 0; i < ifmt_ctx->nb_streams; i++) { // Create output AVStream according to input AVStream AVFormatContext *ofmt_ctx; AVStream *in_stream = ifmt_ctx->streams[i]; AVStream *out_stream = NULL; if (ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) { video_index = i; out_stream = avformat_new_stream(ofmt_ctx_v, in_stream->codec->codec); ofmt_ctx = ofmt_ctx_v; } else if (ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO) { audio_index = i; out_stream = avformat_new_stream(ofmt_ctx_a, in_stream->codec->codec); ofmt_ctx = ofmt_ctx_a; } else { break; } if (!out_stream) { printf("Failed allocating output stream\n"); ret = AVERROR_UNKNOWN; goto end; } // Copy the settings of AVCodecContext if (avcodec_copy_context(out_stream->codec, in_stream->codec) < 0) { printf( "Failed to copy context from input to output stream codec context\n"); goto end; } out_stream->codec->codec_tag = 0; if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER) out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER; } // Open output file if (!(ofmt_v->flags & AVFMT_NOFILE)) { if (avio_open(&ofmt_ctx_v->pb, out_filename_v, AVIO_FLAG_WRITE) < 0) { printf("Could not open output file '%s'", out_filename_v); goto end; } } if (!(ofmt_a->flags & AVFMT_NOFILE)) { if (avio_open(&ofmt_ctx_a->pb, out_filename_a, AVIO_FLAG_WRITE) < 0) { printf("Could not open output file '%s'", out_filename_a); goto end; } } // Write file header if (avformat_write_header(ofmt_ctx_v, NULL) < 0) { printf("Error occurred when opening video output file\n"); goto end; } // if (avformat_write_header(ofmt_ctx_a, NULL) < 0) { // printf("Error occurred when opening audio output file\n"); // goto end; // } #if USE_H264BSF AVBitStreamFilterContext* h264bsfc = av_bitstream_filter_init("h264_mp4toannexb"); #endif while (1) { AVFormatContext *ofmt_ctx; AVStream *in_stream, *out_stream; AVCodecContext *dec_ctx = NULL, *enc_ctx = NULL; AVCodec *dec = NULL, *encoder = NULL; AVFrame *frame = NULL; int got_frame; // Get an AVPacket if (av_read_frame(ifmt_ctx, &pkt) < 0) break; in_stream = ifmt_ctx->streams[pkt.stream_index]; if (pkt.stream_index == video_index) { ofmt_ctx = ofmt_ctx_v; out_stream = avformat_new_stream(ofmt_ctx, NULL); /* find decoder for the stream */ dec_ctx = in_stream->codec; dec = avcodec_find_decoder(dec_ctx->codec_id); if (!dec) { fprintf(stderr, "Failed to find %s codec\n", av_get_media_type_string(AVMEDIA_TYPE_VIDEO)); return AVERROR(EINVAL); } /* Open decoder */ int ret = avcodec_open2(dec_ctx, dec, NULL); if (ret < 0) { av_log(NULL, AV_LOG_ERROR, "Failed to open decoder for stream #%u\n", i); return ret; } // decoder is MPEG-4 part 2 printf("decoder is %s\n", dec->long_name); // NOTE frame = av_frame_alloc(); ret = avcodec_decode_video2(dec_ctx, frame, &got_frame, &pkt); if (ret < 0) { av_frame_free(&frame); av_log(NULL, AV_LOG_ERROR, "Decoding failed\n"); break; } // printf("frame duration is %d\n", frame->pkt_duration); // encode encoder = avcodec_find_encoder(AV_CODEC_ID_H264); // avcodec_copy_context(enc_ctx, dec_ctx); enc_ctx = avcodec_alloc_context3(encoder); if (!encoder) { av_log(NULL, AV_LOG_FATAL, "Necessary encoder not found\n"); return AVERROR_INVALIDDATA; } enc_ctx->height = dec_ctx->height; enc_ctx->width = dec_ctx->width; enc_ctx->sample_aspect_ratio = dec_ctx->sample_aspect_ratio; enc_ctx->pix_fmt = encoder->pix_fmts[0]; enc_ctx->time_base = dec_ctx->time_base; //enc_ctx->time_base.num = 1; //enc_ctx->time_base.den = 25; //H264的必备选项,没有就会错 enc_ctx->me_range = 16; enc_ctx->max_qdiff = 4; enc_ctx->qmin = 10; enc_ctx->qmax = 51; enc_ctx->qcompress = 0.6; enc_ctx->refs = 3; enc_ctx->bit_rate = 1500; ret = avcodec_open2(enc_ctx, encoder, NULL); if (ret < 0) { av_log(NULL, AV_LOG_ERROR, "Cannot open video encoder for stream #%u\n", i); return ret; } av_opt_set(enc_ctx->priv_data, "preset", "slow", 0); // AVOutputFormat *formatOut = av_guess_format(NULL, out_filename_v, NULL); enc_pkt.data = NULL; enc_pkt.size = 0; av_init_packet(&enc_pkt); ret = avcodec_open2(enc_ctx, encoder, NULL); if (ret < 0) { av_log(NULL, AV_LOG_ERROR, "Failed to open encoder for stream #%u\n", i); return ret; } ret = avcodec_encode_video2(enc_ctx, &enc_pkt, frame, &got_frame); printf("demo is %s\n", "hello"); av_frame_free(&frame); avcodec_close(enc_ctx); avcodec_close(dec_ctx); // printf("Write Video Packet. size:%d\tpts:%lld\n", pkt.size, pkt.pts); #if USE_H264BSF av_bitstream_filter_filter(h264bsfc, in_stream->codec, NULL, &pkt.data, &pkt.size, pkt.data, pkt.size, 0); #endif } else { continue; } // Convert PTS/DTS enc_pkt.pts = av_rescale_q_rnd(enc_pkt.pts, in_stream->time_base, out_stream->time_base, (AVRounding) (AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX)); enc_pkt.dts = av_rescale_q_rnd(enc_pkt.dts, in_stream->time_base, out_stream->time_base, (AVRounding) (AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX)); enc_pkt.duration = av_rescale_q(enc_pkt.duration, in_stream->time_base, out_stream->time_base); // enc_pkt.pos = -1; enc_pkt.stream_index = video_index; if (av_interleaved_write_frame(ofmt_ctx, &enc_pkt) < 0) { printf("Error muxing packet\n"); break; } av_free_packet(&enc_pkt); av_free_packet(&pkt); frame_index++; } #if USE_H264BSF av_bitstream_filter_close(h264bsfc); #endif // Write file trailer av_write_trailer(ofmt_ctx_a); av_write_trailer(ofmt_ctx_v); end: avformat_close_input(&ifmt_ctx); /* close output */ if (ofmt_ctx_a && !(ofmt_a->flags & AVFMT_NOFILE)) avio_close(ofmt_ctx_a->pb); if (ofmt_ctx_v && !(ofmt_v->flags & AVFMT_NOFILE)) avio_close(ofmt_ctx_v->pb); avformat_free_context(ofmt_ctx_a); avformat_free_context(ofmt_ctx_v); if (ret < 0 && ret != AVERROR_EOF) { printf("Error occurred.\n"); return -1; } return 0; }
static av_cold int init(AVCodecContext *avctx) { CHDContext* priv; BC_STATUS ret; BC_INFO_CRYSTAL version; BC_INPUT_FORMAT format = { .FGTEnable = FALSE, .Progressive = TRUE, .OptFlags = 0x80000000 | vdecFrameRate59_94 | 0x40, .width = avctx->width, .height = avctx->height, }; BC_MEDIA_SUBTYPE subtype; uint32_t mode = DTS_PLAYBACK_MODE | DTS_LOAD_FILE_PLAY_FW | DTS_SKIP_TX_CHK_CPB | DTS_PLAYBACK_DROP_RPT_MODE | DTS_SINGLE_THREADED_MODE | DTS_DFLT_RESOLUTION(vdecRESOLUTION_1080p23_976); av_log(avctx, AV_LOG_VERBOSE, "CrystalHD Init for %s\n", avctx->codec->name); avctx->pix_fmt = PIX_FMT_YUYV422; /* Initialize the library */ priv = avctx->priv_data; priv->avctx = avctx; priv->is_nal = avctx->extradata_size > 0 && *(avctx->extradata) == 1; priv->last_picture = -1; priv->decode_wait = BASE_WAIT; subtype = id2subtype(priv, avctx->codec->id); switch (subtype) { case BC_MSUBTYPE_AVC1: { uint8_t *dummy_p; int dummy_int; priv->bsfc = av_bitstream_filter_init("h264_mp4toannexb"); if (!priv->bsfc) { av_log(avctx, AV_LOG_ERROR, "Cannot open the h264_mp4toannexb BSF!\n"); return AVERROR_BSF_NOT_FOUND; } av_bitstream_filter_filter(priv->bsfc, avctx, NULL, &dummy_p, &dummy_int, NULL, 0, 0); } subtype = BC_MSUBTYPE_H264; // Fall-through case BC_MSUBTYPE_H264: format.startCodeSz = 4; // Fall-through case BC_MSUBTYPE_VC1: case BC_MSUBTYPE_WVC1: case BC_MSUBTYPE_WMV3: case BC_MSUBTYPE_WMVA: case BC_MSUBTYPE_MPEG2VIDEO: case BC_MSUBTYPE_DIVX: case BC_MSUBTYPE_DIVX311: format.pMetaData = avctx->extradata; format.metaDataSz = avctx->extradata_size; break; default: av_log(avctx, AV_LOG_ERROR, "CrystalHD: Unknown codec name\n"); return AVERROR(EINVAL); } format.mSubtype = subtype; if (priv->sWidth) { format.bEnableScaling = 1; format.ScalingParams.sWidth = priv->sWidth; } /* Get a decoder instance */ av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: starting up\n"); // Initialize the Link and Decoder devices ret = DtsDeviceOpen(&priv->dev, mode); if (ret != BC_STS_SUCCESS) { av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: DtsDeviceOpen failed\n"); goto fail; } ret = DtsCrystalHDVersion(priv->dev, &version); if (ret != BC_STS_SUCCESS) { av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: DtsCrystalHDVersion failed\n"); goto fail; } priv->is_70012 = version.device == 0; if (priv->is_70012 && (subtype == BC_MSUBTYPE_DIVX || subtype == BC_MSUBTYPE_DIVX311)) { av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: BCM70012 doesn't support MPEG4-ASP/DivX/Xvid\n"); goto fail; } ret = DtsSetInputFormat(priv->dev, &format); if (ret != BC_STS_SUCCESS) { av_log(avctx, AV_LOG_ERROR, "CrystalHD: SetInputFormat failed\n"); goto fail; } ret = DtsOpenDecoder(priv->dev, BC_STREAM_TYPE_ES); if (ret != BC_STS_SUCCESS) { av_log(avctx, AV_LOG_ERROR, "CrystalHD: DtsOpenDecoder failed\n"); goto fail; } ret = DtsSetColorSpace(priv->dev, OUTPUT_MODE422_YUY2); if (ret != BC_STS_SUCCESS) { av_log(avctx, AV_LOG_ERROR, "CrystalHD: DtsSetColorSpace failed\n"); goto fail; } ret = DtsStartDecoder(priv->dev); if (ret != BC_STS_SUCCESS) { av_log(avctx, AV_LOG_ERROR, "CrystalHD: DtsStartDecoder failed\n"); goto fail; } ret = DtsStartCapture(priv->dev); if (ret != BC_STS_SUCCESS) { av_log(avctx, AV_LOG_ERROR, "CrystalHD: DtsStartCapture failed\n"); goto fail; } if (avctx->codec->id == CODEC_ID_H264) { priv->parser = av_parser_init(avctx->codec->id); if (!priv->parser) av_log(avctx, AV_LOG_WARNING, "Cannot open the h.264 parser! Interlaced h.264 content " "will not be detected reliably.\n"); priv->parser->flags = PARSER_FLAG_COMPLETE_FRAMES; } av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: Init complete.\n"); return 0; fail: uninit(avctx); return -1; } static inline CopyRet copy_frame(AVCodecContext *avctx, BC_DTS_PROC_OUT *output, void *data, int *data_size) { BC_STATUS ret; BC_DTS_STATUS decoder_status; uint8_t trust_interlaced; uint8_t interlaced; CHDContext *priv = avctx->priv_data; int64_t pkt_pts = AV_NOPTS_VALUE; uint8_t pic_type = 0; uint8_t bottom_field = (output->PicInfo.flags & VDEC_FLAG_BOTTOMFIELD) == VDEC_FLAG_BOTTOMFIELD; uint8_t bottom_first = !!(output->PicInfo.flags & VDEC_FLAG_BOTTOM_FIRST); int width = output->PicInfo.width; int height = output->PicInfo.height; int bwidth; uint8_t *src = output->Ybuff; int sStride; uint8_t *dst; int dStride; if (output->PicInfo.timeStamp != 0) { OpaqueList *node = opaque_list_pop(priv, output->PicInfo.timeStamp); if (node) { pkt_pts = node->reordered_opaque; pic_type = node->pic_type; av_free(node); } else { /* * We will encounter a situation where a timestamp cannot be * popped if a second field is being returned. In this case, * each field has the same timestamp and the first one will * cause it to be popped. To keep subsequent calculations * simple, pic_type should be set a FIELD value - doesn't * matter which, but I chose BOTTOM. */ pic_type = PICT_BOTTOM_FIELD; } av_log(avctx, AV_LOG_VERBOSE, "output \"pts\": %"PRIu64"\n", output->PicInfo.timeStamp); av_log(avctx, AV_LOG_VERBOSE, "output picture type %d\n", pic_type); } ret = DtsGetDriverStatus(priv->dev, &decoder_status); if (ret != BC_STS_SUCCESS) { av_log(avctx, AV_LOG_ERROR, "CrystalHD: GetDriverStatus failed: %u\n", ret); return RET_ERROR; } /* * For most content, we can trust the interlaced flag returned * by the hardware, but sometimes we can't. These are the * conditions under which we can trust the flag: * * 1) It's not h.264 content * 2) The UNKNOWN_SRC flag is not set * 3) We know we're expecting a second field * 4) The hardware reports this picture and the next picture * have the same picture number. * * Note that there can still be interlaced content that will * fail this check, if the hardware hasn't decoded the next * picture or if there is a corruption in the stream. (In either * case a 0 will be returned for the next picture number) */ trust_interlaced = avctx->codec->id != CODEC_ID_H264 || !(output->PicInfo.flags & VDEC_FLAG_UNKNOWN_SRC) || priv->need_second_field || (decoder_status.picNumFlags & ~0x40000000) == output->PicInfo.picture_number; /* * If we got a false negative for trust_interlaced on the first field, * we will realise our mistake here when we see that the picture number is that * of the previous picture. We cannot recover the frame and should discard the * second field to keep the correct number of output frames. */ if (output->PicInfo.picture_number == priv->last_picture && !priv->need_second_field) { av_log(avctx, AV_LOG_WARNING, "Incorrectly guessed progressive frame. Discarding second field\n"); /* Returning without providing a picture. */ return RET_OK; } interlaced = (output->PicInfo.flags & VDEC_FLAG_INTERLACED_SRC) && trust_interlaced; if (!trust_interlaced && (decoder_status.picNumFlags & ~0x40000000) == 0) { av_log(avctx, AV_LOG_VERBOSE, "Next picture number unknown. Assuming progressive frame.\n"); } av_log(avctx, AV_LOG_VERBOSE, "Interlaced state: %d | trust_interlaced %d\n", interlaced, trust_interlaced); if (priv->pic.data[0] && !priv->need_second_field) avctx->release_buffer(avctx, &priv->pic); priv->need_second_field = interlaced && !priv->need_second_field; priv->pic.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE; if (!priv->pic.data[0]) { if (avctx->get_buffer(avctx, &priv->pic) < 0) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return RET_ERROR; } } bwidth = av_image_get_linesize(avctx->pix_fmt, width, 0); if (priv->is_70012) { int pStride; if (width <= 720) pStride = 720; else if (width <= 1280) pStride = 1280; else if (width <= 1080) pStride = 1080; sStride = av_image_get_linesize(avctx->pix_fmt, pStride, 0); } else { sStride = bwidth; } dStride = priv->pic.linesize[0]; dst = priv->pic.data[0]; av_log(priv->avctx, AV_LOG_VERBOSE, "CrystalHD: Copying out frame\n"); if (interlaced) { int dY = 0; int sY = 0; height /= 2; if (bottom_field) { av_log(priv->avctx, AV_LOG_VERBOSE, "Interlaced: bottom field\n"); dY = 1; } else { av_log(priv->avctx, AV_LOG_VERBOSE, "Interlaced: top field\n"); dY = 0; } for (sY = 0; sY < height; dY++, sY++) { memcpy(&(dst[dY * dStride]), &(src[sY * sStride]), bwidth); dY++; } } else { av_image_copy_plane(dst, dStride, src, sStride, bwidth, height); } priv->pic.interlaced_frame = interlaced; if (interlaced) priv->pic.top_field_first = !bottom_first; priv->pic.pkt_pts = pkt_pts; if (!priv->need_second_field) { *data_size = sizeof(AVFrame); *(AVFrame *)data = priv->pic; } /* * Two types of PAFF content have been observed. One form causes the * hardware to return a field pair and the other individual fields, * even though the input is always individual fields. We must skip * copying on the next decode() call to maintain pipeline length in * the first case. */ if (!interlaced && (output->PicInfo.flags & VDEC_FLAG_UNKNOWN_SRC) && (pic_type == PICT_TOP_FIELD || pic_type == PICT_BOTTOM_FIELD)) { av_log(priv->avctx, AV_LOG_VERBOSE, "Fieldpair from two packets.\n"); return RET_SKIP_NEXT_COPY; } /* * Testing has shown that in all cases where we don't want to return the * full frame immediately, VDEC_FLAG_UNKNOWN_SRC is set. */ return priv->need_second_field && !(output->PicInfo.flags & VDEC_FLAG_UNKNOWN_SRC) ? RET_COPY_NEXT_FIELD : RET_OK; }
bool Remux::executeRemux() { AVPacket readPkt; int ret; if ((ret = avformat_open_input(&ifmt_ctx, in_filename.c_str(), 0, 0)) < 0) { return false; } if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0) { return false; } string::size_type pos = out_filename.find_last_of("."); if (pos == string::npos) out_filename.append(".mp4"); avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename.c_str()); if (!writeHeader()) return false; int frame_index = 0; AVBitStreamFilterContext* h264bsfc = av_bitstream_filter_init("h264_mp4toannexb"); int startFlag = 1; int64_t pts_start_time = 0; int64_t dts_start_time = 0; int64_t pre_pts = 0; int64_t pre_dts = 0; while (1) { ret = av_read_frame(ifmt_ctx, &readPkt); if (ret < 0) { break; } if (readPkt.stream_index == videoIndex) { ++frame_index; //过滤掉前面的非I帧 if (frame_index == startFlag&&readPkt.flags != AV_PKT_FLAG_KEY){ ++startFlag; continue; } if (frame_index == startFlag){ pts_start_time = readPkt.pts>0? readPkt.pts:0; dts_start_time = readPkt.dts>0? readPkt.dts:0; pre_dts = dts_start_time; pre_pts = pts_start_time; } //过滤得到h264数据包 if (isMp4) av_bitstream_filter_filter(h264bsfc, ifmt_ctx->streams[videoIndex]->codec, NULL, &readPkt.data, &readPkt.size, readPkt.data, readPkt.size, 0); if (readPkt.pts != AV_NOPTS_VALUE){ readPkt.pts = readPkt.pts - pts_start_time; } if (readPkt.dts != AV_NOPTS_VALUE){ if (readPkt.dts <= pre_dts&&frame_index != startFlag){ //保证 dts 单调递增 int64_t delta = av_rescale_q(1, ofmt_ctx->streams[0]->time_base, ifmt_ctx->streams[videoIndex]->time_base); readPkt.dts = pre_dts + delta + 1; } else{ //initDts(&readPkt.dts, dts_start_time); readPkt.dts = readPkt.dts - dts_start_time; } } pre_dts = readPkt.dts; pre_pts = readPkt.pts; av_packet_rescale_ts(&readPkt, ifmt_ctx->streams[videoIndex]->time_base, ofmt_ctx->streams[0]->time_base); if (readPkt.duration < 0) { readPkt.duration = 0; } if (readPkt.pts < readPkt.dts) { readPkt.pts = readPkt.dts + 1; } readPkt.stream_index = 0; //这里如果使用av_interleaved_write_frame 会导致有时候写的视频文件没有数据。 ret =av_write_frame(ofmt_ctx, &readPkt); if (ret < 0) { //break; std::cout << "write failed" << std::endl; } } av_packet_unref(&readPkt); } av_bitstream_filter_close(h264bsfc); av_packet_unref(&readPkt); av_write_trailer(ofmt_ctx); return true; }
//链接h264流 int joinmp4(char (*h264file)[400] ,char (*aacfile)[400],char * mp4,int length,int usefilter) { //AVOutputFormat *ofmt = NULL; AVPacket pkt; AVStream *out_vstream = NULL; AVStream *out_astream = NULL; AVFormatContext *ofmt_ctx = NULL; int join_index = 0; AVBitStreamFilterContext* aacbsfc = NULL; long last_video_pts = 0; long last_audio_pts = 0; long end_video_pts = 0; long end_audio_pts = 0; int videoindex_out = -1; int audioindex_out = -1; //Input AVFormatContext and Output AVFormatContext AVFormatContext * ifmt_ctx_v = NULL, *ifmt_ctx_a = NULL; int ret, i,retu =0,filter_ret=0; // int fps; int videoindex_v=-1; int audioindex_a=-1; int frame_index=0; int64_t cur_pts_v=0,cur_pts_a=0; //set file path char *in_filename_v = h264file[join_index]; char *in_filename_a = aacfile[join_index]; char *out_filename = mp4; joinone: //Input AVFormatContext and Output AVFormatContext ifmt_ctx_v = NULL; ifmt_ctx_a = NULL; ret = 0; i = 0;retu =0;filter_ret=0; // int fps; videoindex_v=-1; audioindex_a=-1; frame_index=0; cur_pts_v=0;cur_pts_a=0; //set file path in_filename_v = h264file[join_index]; in_filename_a = aacfile[join_index]; out_filename = mp4; //register before use av_register_all(); //open Input and set avformatcontext if ((ret = avformat_open_input(&ifmt_ctx_a, in_filename_a, 0, 0)) < 0) { retu = -1;//-1 mean audio file opened failed goto end; } if ((ret = avformat_open_input(&ifmt_ctx_v, in_filename_v, 0, 0)) < 0) { retu = -2; //-2 mean video file opened failed goto end; } if ((ret = avformat_find_stream_info(ifmt_ctx_v, 0)) < 0) { retu = -3; //-3 mean get video info failed goto end; } if ((ret = avformat_find_stream_info(ifmt_ctx_a, 0)) < 0) { retu = -4;//-4 mean get audio info failed goto end; } //open Output if(join_index == 0) { avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename); if (!ofmt_ctx) { retu = -5; goto end; } } //ofmt = ofmt_ctx->oformat; //find all video stream input type for (i = 0; i < ifmt_ctx_v->nb_streams; i++) { //Create output AVStream according to input AVStream if(ifmt_ctx_v->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO){ AVStream *in_stream = ifmt_ctx_v->streams[i]; videoindex_v=i; if(join_index == 0) { out_vstream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec); videoindex_out=out_vstream->index; //Copy the settings of AVCodecContext if (avcodec_copy_context(out_vstream->codec, in_stream->codec) < 0) { retu = -7; goto end; } out_vstream->codec->codec_tag = 0; if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER) out_vstream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER; } else { out_vstream->duration += in_stream->duration; //printf("duration = %ld\n",out_vstream->duration); } if (!out_vstream) { retu = -6; goto end; } break; } } //find all audio stream input type for (i = 0; i < ifmt_ctx_a->nb_streams; i++) { //Create output AVStream according to input AVStream if(ifmt_ctx_a->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO){ AVStream *in_stream = ifmt_ctx_a->streams[i]; audioindex_a=i; if(join_index == 0) { out_astream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec); audioindex_out=out_astream->index; //Copy the settings of AVCodecContext if (avcodec_copy_context(out_astream->codec, in_stream->codec) < 0) { retu = -7; goto end; } out_astream->codec->codec_tag = 0; if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER) out_astream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER; } else { out_astream->duration += in_stream->duration; //printf("duration = %ld\n",out_astream->duration); } if (!out_astream) { retu = -6; goto end; } break; } } if(join_index == 0) { //Open output file if (!(ofmt_ctx->oformat->flags & AVFMT_NOFILE)) { if (avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE) < 0) { retu = -10; goto end; } } //Write file header if (avformat_write_header(ofmt_ctx, NULL) < 0) { retu = -11; goto end; } } if(usefilter&& aacbsfc == NULL) aacbsfc = av_bitstream_filter_init("aac_adtstoasc"); while (true) { AVFormatContext *ifmt_ctx; int stream_index=0; AVStream *in_stream, *out_stream; //Get an AVPacket if(av_compare_ts(cur_pts_v,ifmt_ctx_v->streams[videoindex_v]->time_base,cur_pts_a, ifmt_ctx_a->streams[audioindex_a]->time_base) <= 0) { ifmt_ctx=ifmt_ctx_v; stream_index=videoindex_out; if(av_read_frame(ifmt_ctx, &pkt) >= 0){ do{ in_stream = ifmt_ctx->streams[pkt.stream_index]; out_stream = out_vstream; if(pkt.stream_index==videoindex_v){ //Simple Write PTS if(pkt.pts==AV_NOPTS_VALUE){ //Write PTS AVRational time_base1=in_stream->time_base; //Duration between 2 frames (us) int64_t calc_duration=(double)AV_TIME_BASE/av_q2d(in_stream->r_frame_rate); //Parameters pkt.pts=(double)(frame_index*calc_duration)/(double)(av_q2d(time_base1)*AV_TIME_BASE); pkt.dts=pkt.pts; pkt.duration=(double)calc_duration/(double)(av_q2d(time_base1)*AV_TIME_BASE); frame_index++; } cur_pts_v=pkt.pts; break; } } while(av_read_frame(ifmt_ctx, &pkt) >= 0); } else { //printf("pkt.duration = %ld\n",pkt.duration); join_index++; end_video_pts = last_video_pts; end_audio_pts = last_audio_pts; break; } } else { ifmt_ctx=ifmt_ctx_a; stream_index=audioindex_out; if(av_read_frame(ifmt_ctx, &pkt) >= 0){ do { in_stream = ifmt_ctx->streams[pkt.stream_index]; out_stream = out_astream; if(pkt.stream_index==audioindex_a) { //Simple Write PTS if(pkt.pts==AV_NOPTS_VALUE) { //Write PTS AVRational time_base1=in_stream->time_base; //Duration between 2 frames (us) int64_t calc_duration=(double)AV_TIME_BASE/av_q2d(in_stream->r_frame_rate); //Parameters pkt.pts=(double)(frame_index*calc_duration)/(double)(av_q2d(time_base1)*AV_TIME_BASE); pkt.dts=pkt.pts; pkt.duration=(double)calc_duration/(double)(av_q2d(time_base1)*AV_TIME_BASE); frame_index++; } cur_pts_a=pkt.pts; break; } } while(av_read_frame(ifmt_ctx, &pkt) >= 0); } else { join_index++; end_video_pts = last_video_pts; end_audio_pts = last_audio_pts; break; } } if(usefilter) filter_ret = av_bitstream_filter_filter(aacbsfc, out_stream->codec, NULL, &pkt.data,&pkt.size, pkt.data, pkt.size, 0); if(filter_ret) { retu = -10; goto end; } //Convert PTS/DTS pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base,(enum AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX)); pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base,(enum AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX)); pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base); pkt.pos = -1; pkt.stream_index=stream_index; if(pkt.stream_index == audioindex_out) { pkt.pts += end_audio_pts; pkt.dts += end_audio_pts; last_audio_pts = pkt.pts+pkt.duration; // printf("audio pts = %lld ,audio dts = %lld\n",pkt.pts,pkt.dts); } else { pkt.pts += end_video_pts; pkt.dts += end_video_pts; last_video_pts = pkt.pts+pkt.duration; } //Write if (av_interleaved_write_frame(ofmt_ctx, &pkt) < 0) { av_free_packet(&pkt); break; } //av_packet_unref(&pkt); //av_interleaved_write_frame(ofmt_ctx, &pkt); av_free_packet(&pkt); } end: avformat_close_input(&ifmt_ctx_v); avformat_close_input(&ifmt_ctx_a); avformat_free_context(ifmt_ctx_v); avformat_free_context(ifmt_ctx_a); if (ret < 0 && ret != AVERROR_EOF) { } if(join_index < length) goto joinone; av_write_trailer(ofmt_ctx); if(usefilter) av_bitstream_filter_close(aacbsfc); /* close output */ if (ofmt_ctx && !(ofmt_ctx->oformat->flags & AVFMT_NOFILE)) avio_close(ofmt_ctx->pb); avformat_free_context(ofmt_ctx); return retu; }
int ff_filter_init(int type) { aacbsf = av_bitstream_filter_init("aac_adtstoasc"); return 0; }
static av_cold int init(AVCodecContext *avctx) { CHDContext* priv; BC_STATUS ret; BC_INFO_CRYSTAL version; BC_INPUT_FORMAT format = { .FGTEnable = FALSE, .Progressive = TRUE, .OptFlags = 0x80000000 | vdecFrameRate59_94 | 0x40, .width = avctx->width, .height = avctx->height, }; BC_MEDIA_SUBTYPE subtype; uint32_t mode = DTS_PLAYBACK_MODE | DTS_LOAD_FILE_PLAY_FW | DTS_SKIP_TX_CHK_CPB | DTS_PLAYBACK_DROP_RPT_MODE | DTS_SINGLE_THREADED_MODE | DTS_DFLT_RESOLUTION(vdecRESOLUTION_1080p23_976); av_log(avctx, AV_LOG_VERBOSE, "CrystalHD Init for %s\n", avctx->codec->name); avctx->pix_fmt = PIX_FMT_YUYV422; /* Initialize the library */ priv = avctx->priv_data; priv->avctx = avctx; priv->is_nal = avctx->extradata_size > 0 && *(avctx->extradata) == 1; priv->last_picture = -1; priv->decode_wait = BASE_WAIT; subtype = id2subtype(priv, avctx->codec->id); switch (subtype) { case BC_MSUBTYPE_AVC1: { uint8_t *dummy_p; int dummy_int; AVBitStreamFilterContext *bsfc; uint32_t orig_data_size = avctx->extradata_size; uint8_t *orig_data = av_malloc(orig_data_size); if (!orig_data) { av_log(avctx, AV_LOG_ERROR, "Failed to allocate copy of extradata\n"); return AVERROR(ENOMEM); } memcpy(orig_data, avctx->extradata, orig_data_size); bsfc = av_bitstream_filter_init("h264_mp4toannexb"); if (!bsfc) { av_log(avctx, AV_LOG_ERROR, "Cannot open the h264_mp4toannexb BSF!\n"); av_free(orig_data); return AVERROR_BSF_NOT_FOUND; } av_bitstream_filter_filter(bsfc, avctx, NULL, &dummy_p, &dummy_int, NULL, 0, 0); av_bitstream_filter_close(bsfc); priv->sps_pps_buf = avctx->extradata; priv->sps_pps_size = avctx->extradata_size; avctx->extradata = orig_data; avctx->extradata_size = orig_data_size; format.pMetaData = priv->sps_pps_buf; format.metaDataSz = priv->sps_pps_size; format.startCodeSz = (avctx->extradata[4] & 0x03) + 1; } break; case BC_MSUBTYPE_H264: format.startCodeSz = 4; // Fall-through case BC_MSUBTYPE_VC1: case BC_MSUBTYPE_WVC1: case BC_MSUBTYPE_WMV3: case BC_MSUBTYPE_WMVA: case BC_MSUBTYPE_MPEG2VIDEO: case BC_MSUBTYPE_DIVX: case BC_MSUBTYPE_DIVX311: format.pMetaData = avctx->extradata; format.metaDataSz = avctx->extradata_size; break; default: av_log(avctx, AV_LOG_ERROR, "CrystalHD: Unknown codec name\n"); return AVERROR(EINVAL); } format.mSubtype = subtype; /* Get a decoder instance */ av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: starting up\n"); // Initialize the Link and Decoder devices ret = DtsDeviceOpen(&priv->dev, mode); if (ret != BC_STS_SUCCESS) { av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: DtsDeviceOpen failed\n"); goto fail; } ret = DtsCrystalHDVersion(priv->dev, &version); if (ret != BC_STS_SUCCESS) { av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: DtsCrystalHDVersion failed\n"); goto fail; } priv->is_70012 = version.device == 0; if (priv->is_70012 && (subtype == BC_MSUBTYPE_DIVX || subtype == BC_MSUBTYPE_DIVX311)) { av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: BCM70012 doesn't support MPEG4-ASP/DivX/Xvid\n"); goto fail; } ret = DtsSetInputFormat(priv->dev, &format); if (ret != BC_STS_SUCCESS) { av_log(avctx, AV_LOG_ERROR, "CrystalHD: SetInputFormat failed\n"); goto fail; } ret = DtsOpenDecoder(priv->dev, BC_STREAM_TYPE_ES); if (ret != BC_STS_SUCCESS) { av_log(avctx, AV_LOG_ERROR, "CrystalHD: DtsOpenDecoder failed\n"); goto fail; } ret = DtsSetColorSpace(priv->dev, OUTPUT_MODE422_YUY2); if (ret != BC_STS_SUCCESS) { av_log(avctx, AV_LOG_ERROR, "CrystalHD: DtsSetColorSpace failed\n"); goto fail; } ret = DtsStartDecoder(priv->dev); if (ret != BC_STS_SUCCESS) { av_log(avctx, AV_LOG_ERROR, "CrystalHD: DtsStartDecoder failed\n"); goto fail; } ret = DtsStartCapture(priv->dev); if (ret != BC_STS_SUCCESS) { av_log(avctx, AV_LOG_ERROR, "CrystalHD: DtsStartCapture failed\n"); goto fail; } av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: Init complete.\n"); return 0; fail: uninit(avctx); return -1; } /* * The CrystalHD doesn't report interlaced H.264 content in a way that allows * us to distinguish between specific cases that require different handling. * So, for now, we have to hard-code the behaviour we want. * * The default behaviour is to assume MBAFF with input and output fieldpairs. * * Define ASSUME_PAFF_OVER_MBAFF to treat input as PAFF with separate input * and output fields. * * Define ASSUME_TWO_INPUTS_ONE_OUTPUT to treat input as separate fields but * output as a single fieldpair. * * Define both to mess up your playback. */ #define ASSUME_PAFF_OVER_MBAFF 0 #define ASSUME_TWO_INPUTS_ONE_OUTPUT 0 static inline CopyRet copy_frame(AVCodecContext *avctx, BC_DTS_PROC_OUT *output, void *data, int *data_size, uint8_t second_field) { BC_STATUS ret; BC_DTS_STATUS decoder_status; uint8_t is_paff; uint8_t next_frame_same; uint8_t interlaced; CHDContext *priv = avctx->priv_data; uint8_t bottom_field = (output->PicInfo.flags & VDEC_FLAG_BOTTOMFIELD) == VDEC_FLAG_BOTTOMFIELD; uint8_t bottom_first = !!(output->PicInfo.flags & VDEC_FLAG_BOTTOM_FIRST); int width = output->PicInfo.width; int height = output->PicInfo.height; int bwidth; uint8_t *src = output->Ybuff; int sStride; uint8_t *dst; int dStride; ret = DtsGetDriverStatus(priv->dev, &decoder_status); if (ret != BC_STS_SUCCESS) { av_log(avctx, AV_LOG_ERROR, "CrystalHD: GetDriverStatus failed: %u\n", ret); return RET_ERROR; } is_paff = ASSUME_PAFF_OVER_MBAFF || !(output->PicInfo.flags & VDEC_FLAG_UNKNOWN_SRC); next_frame_same = output->PicInfo.picture_number == (decoder_status.picNumFlags & ~0x40000000); interlaced = ((output->PicInfo.flags & VDEC_FLAG_INTERLACED_SRC) && is_paff) || next_frame_same || bottom_field || second_field; av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: next_frame_same: %u | %u | %u\n", next_frame_same, output->PicInfo.picture_number, decoder_status.picNumFlags & ~0x40000000); if (priv->pic.data[0] && !priv->need_second_field) avctx->release_buffer(avctx, &priv->pic); priv->need_second_field = interlaced && !priv->need_second_field; priv->pic.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE; if (!priv->pic.data[0]) { if (avctx->get_buffer(avctx, &priv->pic) < 0) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return RET_ERROR; } } bwidth = av_image_get_linesize(avctx->pix_fmt, width, 0); if (priv->is_70012) { int pStride; if (width <= 720) pStride = 720; else if (width <= 1280) pStride = 1280; else if (width <= 1080) pStride = 1080; sStride = av_image_get_linesize(avctx->pix_fmt, pStride, 0); } else { sStride = bwidth; } dStride = priv->pic.linesize[0]; dst = priv->pic.data[0]; av_log(priv->avctx, AV_LOG_VERBOSE, "CrystalHD: Copying out frame\n"); if (interlaced) { int dY = 0; int sY = 0; height /= 2; if (bottom_field) { av_log(priv->avctx, AV_LOG_VERBOSE, "Interlaced: bottom field\n"); dY = 1; } else { av_log(priv->avctx, AV_LOG_VERBOSE, "Interlaced: top field\n"); dY = 0; } for (sY = 0; sY < height; dY++, sY++) { memcpy(&(dst[dY * dStride]), &(src[sY * sStride]), bwidth); dY++; } } else { av_image_copy_plane(dst, dStride, src, sStride, bwidth, height); } priv->pic.interlaced_frame = interlaced; if (interlaced) priv->pic.top_field_first = !bottom_first; if (output->PicInfo.timeStamp != 0) { priv->pic.pkt_pts = opaque_list_pop(priv, output->PicInfo.timeStamp); av_log(avctx, AV_LOG_VERBOSE, "output \"pts\": %"PRIu64"\n", priv->pic.pkt_pts); } if (!priv->need_second_field) { *data_size = sizeof(AVFrame); *(AVFrame *)data = priv->pic; } if (ASSUME_TWO_INPUTS_ONE_OUTPUT && output->PicInfo.flags & VDEC_FLAG_UNKNOWN_SRC) { av_log(priv->avctx, AV_LOG_VERBOSE, "Fieldpair from two packets.\n"); return RET_SKIP_NEXT_COPY; } return RET_OK; }
int muxer_mp4(void* noUse) { AVOutputFormat *ofmt = NULL; //Input AVFormatContext and Output AVFormatContext AVFormatContext *ifmt_ctx_v = NULL, *ifmt_ctx_a = NULL, *ofmt_ctx = NULL; AVPacket pkt; int ret, i; int videoindex_v = -1, videoindex_out = -1; int audioindex_a = -1, audioindex_out = -1; int frame_index = 0; int64_t cur_pts_v = 0, cur_pts_a = 0; //const char *in_filename_v = "cuc_ieschool.ts";//Input file URL const char *in_filename_v = "../testResource/bigbuckbunny_480x272.h264"; //const char *in_filename_a = "cuc_ieschool.mp3"; //const char *in_filename_a = "gowest.m4a"; //const char *in_filename_a = "gowest.aac"; const char *in_filename_a = "../testResource/WavinFlag.aac"; const char *out_filename = "bigbuckbunny.mp4";//Output file URL av_register_all(); //Input if ((ret = avformat_open_input(&ifmt_ctx_v, in_filename_v, 0, 0)) < 0) { printf("Could not open input file."); goto end; } if ((ret = avformat_find_stream_info(ifmt_ctx_v, 0)) < 0) { printf("Failed to retrieve input stream information"); goto end; } if ((ret = avformat_open_input(&ifmt_ctx_a, in_filename_a, 0, 0)) < 0) { printf("Could not open input file."); goto end; } if ((ret = avformat_find_stream_info(ifmt_ctx_a, 0)) < 0) { printf("Failed to retrieve input stream information"); goto end; } printf("===========Input Information==========\n"); av_dump_format(ifmt_ctx_v, 0, in_filename_v, 0); av_dump_format(ifmt_ctx_a, 0, in_filename_a, 0); printf("======================================\n"); //Output avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename); if (!ofmt_ctx) { printf("Could not create output context\n"); ret = AVERROR_UNKNOWN; goto end; } ofmt = ofmt_ctx->oformat; unsigned char* outbuffer = NULL; outbuffer = (unsigned char*)av_malloc(32768); AVIOContext *avio_out = avio_alloc_context(outbuffer, 32768, 0, NULL, NULL, write_buffer, NULL); if (avio_out == NULL) goto end; ofmt_ctx->pb = avio_out; ofmt_ctx->flags = AVFMT_FLAG_CUSTOM_IO; for (i = 0; i < ifmt_ctx_v->nb_streams; i++) { //Create output AVStream according to input AVStream if (ifmt_ctx_v->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) { AVStream *in_stream = ifmt_ctx_v->streams[i]; AVStream *out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec); videoindex_v = i; if (!out_stream) { printf("Failed allocating output stream\n"); ret = AVERROR_UNKNOWN; goto end; } videoindex_out = out_stream->index; //Copy the settings of AVCodecContext if (avcodec_copy_context(out_stream->codec, in_stream->codec) < 0) { printf("Failed to copy context from input to output stream codec context\n"); goto end; } out_stream->codec->codec_tag = 0; if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER) out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER; break; } } for (i = 0; i < ifmt_ctx_a->nb_streams; i++) { //Create output AVStream according to input AVStream if (ifmt_ctx_a->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO) { AVStream *in_stream = ifmt_ctx_a->streams[i]; AVStream *out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec); audioindex_a = i; if (!out_stream) { printf("Failed allocating output stream\n"); ret = AVERROR_UNKNOWN; goto end; } audioindex_out = out_stream->index; //Copy the settings of AVCodecContext if (avcodec_copy_context(out_stream->codec, in_stream->codec) < 0) { printf("Failed to copy context from input to output stream codec context\n"); goto end; } out_stream->codec->codec_tag = 0; if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER) out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER; break; } } printf("==========Output Information==========\n"); av_dump_format(ofmt_ctx, 0, out_filename, 1); printf("======================================\n"); //Open output file if (!(ofmt->flags & AVFMT_NOFILE)) { if (avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE) < 0) { printf("Could not open output file '%s'", out_filename); goto end; } } //Write file header if (avformat_write_header(ofmt_ctx, NULL) < 0) { printf("Error occurred when opening output file\n"); goto end; } //FIX #if USE_H264BSF AVBitStreamFilterContext* h264bsfc = av_bitstream_filter_init("h264_mp4toannexb"); #endif #if USE_AACBSF AVBitStreamFilterContext* aacbsfc = av_bitstream_filter_init("aac_adtstoasc"); #endif while (1) { AVFormatContext *ifmt_ctx; int stream_index = 0; AVStream *in_stream, *out_stream; //Get an AVPacket if (av_compare_ts(cur_pts_v, ifmt_ctx_v->streams[videoindex_v]->time_base, cur_pts_a, ifmt_ctx_a->streams[audioindex_a]->time_base) <= 0) { ifmt_ctx = ifmt_ctx_v; stream_index = videoindex_out; if (av_read_frame(ifmt_ctx, &pkt) >= 0) { do { in_stream = ifmt_ctx->streams[pkt.stream_index]; out_stream = ofmt_ctx->streams[stream_index]; if (pkt.stream_index == videoindex_v) { //FIX£ºNo PTS (Example: Raw H.264) //Simple Write PTS if (pkt.pts == AV_NOPTS_VALUE) { //Write PTS AVRational time_base1 = in_stream->time_base; //Duration between 2 frames (us) int64_t calc_duration = (double)AV_TIME_BASE / av_q2d(in_stream->r_frame_rate); //Parameters pkt.pts = (double)(frame_index*calc_duration) / (double)(av_q2d(time_base1)*AV_TIME_BASE); pkt.dts = pkt.pts; pkt.duration = (double)calc_duration / (double)(av_q2d(time_base1)*AV_TIME_BASE); frame_index++; } cur_pts_v = pkt.pts; break; } } while (av_read_frame(ifmt_ctx, &pkt) >= 0); } else { break; } } else { ifmt_ctx = ifmt_ctx_a; stream_index = audioindex_out; if (av_read_frame(ifmt_ctx, &pkt) >= 0) { do { in_stream = ifmt_ctx->streams[pkt.stream_index]; out_stream = ofmt_ctx->streams[stream_index]; if (pkt.stream_index == audioindex_a) { //FIX£ºNo PTS //Simple Write PTS if (pkt.pts == AV_NOPTS_VALUE) { //Write PTS AVRational time_base1 = in_stream->time_base; //Duration between 2 frames (us) int64_t calc_duration = (double)AV_TIME_BASE / av_q2d(in_stream->r_frame_rate); //Parameters pkt.pts = (double)(frame_index*calc_duration) / (double)(av_q2d(time_base1)*AV_TIME_BASE); pkt.dts = pkt.pts; pkt.duration = (double)calc_duration / (double)(av_q2d(time_base1)*AV_TIME_BASE); frame_index++; } cur_pts_a = pkt.pts; break; } } while (av_read_frame(ifmt_ctx, &pkt) >= 0); } else { break; } } //FIX:Bitstream Filter #if USE_H264BSF av_bitstream_filter_filter(h264bsfc, in_stream->codec, NULL, &pkt.data, &pkt.size, pkt.data, pkt.size, 0); #endif #if USE_AACBSF av_bitstream_filter_filter(aacbsfc, out_stream->codec, NULL, &pkt.data, &pkt.size, pkt.data, pkt.size, 0); #endif //Convert PTS/DTS pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX)); pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX)); pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base); pkt.pos = -1; pkt.stream_index = stream_index; printf("Write 1 Packet. size:%5d\tpts:%lld\n", pkt.size, pkt.pts); //Write if (av_interleaved_write_frame(ofmt_ctx, &pkt) < 0) { printf("Error muxing packet\n"); break; } av_free_packet(&pkt); } //Write file trailer av_write_trailer(ofmt_ctx); #if USE_H264BSF av_bitstream_filter_close(h264bsfc); #endif #if USE_AACBSF av_bitstream_filter_close(aacbsfc); #endif end: avformat_close_input(&ifmt_ctx_v); avformat_close_input(&ifmt_ctx_a); /* close output */ if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE)) avio_close(ofmt_ctx->pb); avformat_free_context(ofmt_ctx); if (ret < 0 && ret != AVERROR_EOF) { printf("Error occurred.\n"); return -1; } return 0; }