int main(int argc, char* argv[]) { AVOutputFormat *ofmt = NULL; //Input AVFormatContext and Output AVFormatContext AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx = NULL; AVPacket pkt; const char *in_filename, *out_filename; int ret, i; int videoindex=-1; int frame_index=0; int64_t start_time=0; //in_filename = "cuc_ieschool.mov"; //in_filename = "cuc_ieschool.mkv"; //in_filename = "cuc_ieschool.ts"; //in_filename = "cuc_ieschool.mp4"; //in_filename = "cuc_ieschool.h264"; in_filename = "cuc_ieschool.flv";//输入URL(Input file URL) //in_filename = "shanghai03_p.h264"; out_filename = "rtmp://localhost/publishlive/livestream";//输出 URL(Output URL)[RTMP] out_filename = "http://192.168.0.53:10000";//输出 URL(Output URL)[RTMP] //out_filename = "rtp://233.233.233.233:6666";//输出 URL(Output URL)[UDP] av_register_all(); //Network avformat_network_init(); //Input if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0) { printf( "Could not open input file."); goto end; } if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0) { printf( "Failed to retrieve input stream information"); goto end; } for(i=0; i<ifmt_ctx->nb_streams; i++) if(ifmt_ctx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO){ videoindex=i; break; } av_dump_format(ifmt_ctx, 0, in_filename, 0); //Output avformat_alloc_output_context2(&ofmt_ctx, NULL, "flv", out_filename); //RTMP //avformat_alloc_output_context2(&ofmt_ctx, NULL, "mpegts", out_filename);//UDP if (!ofmt_ctx) { printf( "Could not create output context\n"); ret = AVERROR_UNKNOWN; goto end; } ofmt = ofmt_ctx->oformat; for (i = 0; i < ifmt_ctx->nb_streams; i++) { //Create output AVStream according to input AVStream AVStream *in_stream = ifmt_ctx->streams[i]; AVStream *out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec); if (!out_stream) { printf( "Failed allocating output stream\n"); ret = AVERROR_UNKNOWN; goto end; } //Copy the settings of AVCodecContext ret = avcodec_copy_context(out_stream->codec, in_stream->codec); if (ret < 0) { printf( "Failed to copy context from input to output stream codec context\n"); goto end; } out_stream->codec->codec_tag = 0; if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER) out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER; } //Dump Format------------------ av_dump_format(ofmt_ctx, 0, out_filename, 1); //Open output URL if (!(ofmt->flags & AVFMT_NOFILE)) { ret = avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE); if (ret < 0) { printf( "Could not open output URL '%s'", out_filename); goto end; } } //Write file header ret = avformat_write_header(ofmt_ctx, NULL); if (ret < 0) { printf( "Error occurred when opening output URL\n"); goto end; } start_time=av_gettime(); while (1) { AVStream *in_stream, *out_stream; //Get an AVPacket ret = av_read_frame(ifmt_ctx, &pkt); if (ret < 0) break; //FIX:No PTS (Example: Raw H.264) //Simple Write PTS if(pkt.pts==AV_NOPTS_VALUE){ //Write PTS AVRational time_base1=ifmt_ctx->streams[videoindex]->time_base; //Duration between 2 frames (us) int64_t calc_duration=(double)AV_TIME_BASE/av_q2d(ifmt_ctx->streams[videoindex]->r_frame_rate); //Parameters pkt.pts=(double)(frame_index*calc_duration)/(double)(av_q2d(time_base1)*AV_TIME_BASE); pkt.dts=pkt.pts; pkt.duration=(double)calc_duration/(double)(av_q2d(time_base1)*AV_TIME_BASE); } //Important:Delay if(pkt.stream_index==videoindex){ AVRational time_base=ifmt_ctx->streams[videoindex]->time_base; AVRational time_base_q={1,AV_TIME_BASE}; int64_t pts_time = av_rescale_q(pkt.dts, time_base, time_base_q); int64_t now_time = av_gettime() - start_time; if (pts_time > now_time) av_usleep(pts_time - now_time); } in_stream = ifmt_ctx->streams[pkt.stream_index]; out_stream = ofmt_ctx->streams[pkt.stream_index]; /* copy packet */ //Convert PTS/DTS pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX)); pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX)); pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base); pkt.pos = -1; //Print to Screen if(pkt.stream_index==videoindex){ printf("Send %8d video frames to output URL\n",frame_index); frame_index++; } //ret = av_write_frame(ofmt_ctx, &pkt); ret = av_interleaved_write_frame(ofmt_ctx, &pkt); if (ret < 0) { char err[1024] = { 0 }; int nRet = av_strerror(ret, err, 1024); printf( "Error muxing packet\n"); break; } av_free_packet(&pkt); } //Write file trailer av_write_trailer(ofmt_ctx); end: avformat_close_input(&ifmt_ctx); /* close output */ if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE)) avio_close(ofmt_ctx->pb); avformat_free_context(ofmt_ctx); if (ret < 0 && ret != AVERROR_EOF) { printf( "Error occurred.\n"); return -1; } return 0; }
/** * ffmpeg_put_frame * Encodes and writes a video frame using the av_write_frame API. This is * a helper function for ffmpeg_put_image and ffmpeg_put_other_image. * * Returns * Number of bytes written or -1 if any error happens. */ int ffmpeg_put_frame(struct ffmpeg *ffmpeg, AVFrame *pic) { /** * Since the logic,return values and conditions changed so * dramatically between versions, the encoding of the frame * is 100% blocked based upon Libav/FFMpeg version */ #if (LIBAVFORMAT_VERSION_MAJOR >= 55) || ((LIBAVFORMAT_VERSION_MAJOR == 54) && (LIBAVFORMAT_VERSION_MINOR > 6)) int retcd; int got_packet_ptr; AVPacket pkt; char errstr[128]; av_init_packet(&pkt); /* Init static structure. */ if (ffmpeg->oc->oformat->flags & AVFMT_RAWPICTURE) { pkt.stream_index = ffmpeg->video_st->index; pkt.flags |= AV_PKT_FLAG_KEY; pkt.data = (uint8_t *)pic; pkt.size = sizeof(AVPicture); } else { pkt.data = NULL; pkt.size = 0; retcd = avcodec_encode_video2(AVSTREAM_CODEC_PTR(ffmpeg->video_st), &pkt, pic, &got_packet_ptr); if (retcd < 0 ){ av_strerror(retcd, errstr, sizeof(errstr)); MOTION_LOG(ERR, TYPE_ENCODER, SHOW_ERRNO, "%s: Error encoding video:%s",errstr); //Packet is freed upon failure of encoding return -1; } if (got_packet_ptr == 0){ //Buffered packet. Throw special return code av_free_packet(&pkt); return -2; } if (pkt.pts != AV_NOPTS_VALUE) pkt.pts = av_rescale_q(pkt.pts, ffmpeg->video_st->codec->time_base, ffmpeg->video_st->time_base); if (pkt.dts != AV_NOPTS_VALUE) pkt.dts = av_rescale_q(pkt.dts, ffmpeg->video_st->codec->time_base, ffmpeg->video_st->time_base); } if (ffmpeg->tlapse == TIMELAPSE_APPEND) { retcd = timelapse_append(ffmpeg, pkt); } else { retcd = av_write_frame(ffmpeg->oc, &pkt); } av_free_packet(&pkt); if (retcd != 0) { MOTION_LOG(ERR, TYPE_ENCODER, SHOW_ERRNO, "%s: Error while writing video frame"); ffmpeg_cleanups(ffmpeg); return -1; } return retcd; #else // Old versions of Libav/FFmpeg int retcd; AVPacket pkt; av_init_packet(&pkt); /* Init static structure. */ pkt.stream_index = ffmpeg->video_st->index; if (ffmpeg->oc->oformat->flags & AVFMT_RAWPICTURE) { // Raw video case. pkt.size = sizeof(AVPicture); pkt.data = (uint8_t *)pic; pkt.flags |= AV_PKT_FLAG_KEY; } else { retcd = avcodec_encode_video(AVSTREAM_CODEC_PTR(ffmpeg->video_st), ffmpeg->video_outbuf, ffmpeg->video_outbuf_size, pic); if (retcd < 0) { MOTION_LOG(ERR, TYPE_ENCODER, SHOW_ERRNO, "%s: Error encoding video"); av_free_packet(&pkt); return -1; } if (retcd == 0 ){ // No bytes encoded => buffered=>special handling av_free_packet(&pkt); return -2; } pkt.size = retcd; pkt.data = ffmpeg->video_outbuf; pkt.pts = AVSTREAM_CODEC_PTR(ffmpeg->video_st)->coded_frame->pts; if (AVSTREAM_CODEC_PTR(ffmpeg->video_st)->coded_frame->key_frame) pkt.flags |= AV_PKT_FLAG_KEY; } if (ffmpeg->tlapse == TIMELAPSE_APPEND) { retcd = timelapse_append(ffmpeg, pkt); } else { retcd = av_write_frame(ffmpeg->oc, &pkt); } if (retcd != 0) { MOTION_LOG(ERR, TYPE_ENCODER, SHOW_ERRNO, "%s: Error while writing video frame"); ffmpeg_cleanups(ffmpeg); return -1; } return retcd; #endif }
// abuffer -> volume -> asplit for each audio format // -> aformat -> abuffersink // if the volume gain is > 1.0, we use a compand filter instead // for soft limiting. static int init_filter_graph(struct GroovePlaylist *playlist, struct GrooveFile *file) { struct GroovePlaylistPrivate *p = (struct GroovePlaylistPrivate *) playlist; struct GrooveFilePrivate *f = (struct GrooveFilePrivate *) file; // destruct old graph avfilter_graph_free(&p->filter_graph); // create new graph p->filter_graph = avfilter_graph_alloc(); if (!p->filter_graph) { av_log(NULL, AV_LOG_ERROR, "unable to create filter graph: out of memory\n"); return -1; } AVFilter *abuffer = avfilter_get_by_name("abuffer"); AVFilter *volume = avfilter_get_by_name("volume"); AVFilter *compand = avfilter_get_by_name("compand"); AVFilter *asplit = avfilter_get_by_name("asplit"); AVFilter *aformat = avfilter_get_by_name("aformat"); AVFilter *abuffersink = avfilter_get_by_name("abuffersink"); int err; // create abuffer filter AVCodecContext *avctx = f->audio_st->codec; AVRational time_base = f->audio_st->time_base; snprintf(p->strbuf, sizeof(p->strbuf), "time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%"PRIx64, time_base.num, time_base.den, avctx->sample_rate, av_get_sample_fmt_name(avctx->sample_fmt), avctx->channel_layout); av_log(NULL, AV_LOG_INFO, "abuffer: %s\n", p->strbuf); // save these values so we can compare later and check // whether we have to reconstruct the graph p->in_sample_rate = avctx->sample_rate; p->in_channel_layout = avctx->channel_layout; p->in_sample_fmt = avctx->sample_fmt; p->in_time_base = time_base; err = avfilter_graph_create_filter(&p->abuffer_ctx, abuffer, NULL, p->strbuf, NULL, p->filter_graph); if (err < 0) { av_log(NULL, AV_LOG_ERROR, "error initializing abuffer filter\n"); return err; } // as we create filters, this points the next source to link to AVFilterContext *audio_src_ctx = p->abuffer_ctx; // save the volume value so we can compare later and check // whether we have to reconstruct the graph p->filter_volume = p->volume; // if volume is < 1.0, create volume filter // == 1.0, do not create a filter // > 1.0, create a compand filter (for soft limiting) double vol = p->volume; if (vol < 0.0) vol = 0.0; if (vol < 1.0) { snprintf(p->strbuf, sizeof(p->strbuf), "volume=%f", vol); av_log(NULL, AV_LOG_INFO, "volume: %s\n", p->strbuf); err = avfilter_graph_create_filter(&p->volume_ctx, volume, NULL, p->strbuf, NULL, p->filter_graph); if (err < 0) { av_log(NULL, AV_LOG_ERROR, "error initializing volume filter\n"); return err; } err = avfilter_link(audio_src_ctx, 0, p->volume_ctx, 0); if (err < 0) { av_log(NULL, AV_LOG_ERROR, "unable to link filters\n"); return err; } audio_src_ctx = p->volume_ctx; } else if (vol > 1.0) { double attack = 0.1; double decay = 0.2; const char *points = "-2/-2"; double soft_knee = 0.02; double gain = gain_to_dB(vol); double volume_param = 0.0; double delay = 0.2; snprintf(p->strbuf, sizeof(p->strbuf), "%f:%f:%s:%f:%f:%f:%f", attack, decay, points, soft_knee, gain, volume_param, delay); av_log(NULL, AV_LOG_INFO, "compand: %s\n", p->strbuf); err = avfilter_graph_create_filter(&p->compand_ctx, compand, NULL, p->strbuf, NULL, p->filter_graph); if (err < 0) { av_log(NULL, AV_LOG_ERROR, "error initializing compand filter\n"); return err; } err = avfilter_link(audio_src_ctx, 0, p->compand_ctx, 0); if (err < 0) { av_log(NULL, AV_LOG_ERROR, "unable to link filters\n"); return err; } audio_src_ctx = p->compand_ctx; } else { p->volume_ctx = NULL; } // if only one sink, no need for asplit if (p->sink_map_count < 2) { p->asplit_ctx = NULL; } else { snprintf(p->strbuf, sizeof(p->strbuf), "%d", p->sink_map_count); av_log(NULL, AV_LOG_INFO, "asplit: %s\n", p->strbuf); err = avfilter_graph_create_filter(&p->asplit_ctx, asplit, NULL, p->strbuf, NULL, p->filter_graph); if (err < 0) { av_log(NULL, AV_LOG_ERROR, "unable to create asplit filter\n"); return err; } err = avfilter_link(audio_src_ctx, 0, p->asplit_ctx, 0); if (err < 0) { av_log(NULL, AV_LOG_ERROR, "unable to link to asplit\n"); return err; } audio_src_ctx = p->asplit_ctx; } // for each audio format, create aformat and abuffersink filters struct SinkMap *map_item = p->sink_map; int pad_index = 0; while (map_item) { struct GrooveSink *example_sink = map_item->stack_head->sink; struct GrooveAudioFormat *audio_format = &example_sink->audio_format; AVFilterContext *inner_audio_src_ctx = audio_src_ctx; if (example_sink->disable_resample) { map_item->aformat_ctx = NULL; } else { // create aformat filter snprintf(p->strbuf, sizeof(p->strbuf), "sample_fmts=%s:sample_rates=%d:channel_layouts=0x%"PRIx64, av_get_sample_fmt_name((enum AVSampleFormat)audio_format->sample_fmt), audio_format->sample_rate, audio_format->channel_layout); av_log(NULL, AV_LOG_INFO, "aformat: %s\n", p->strbuf); err = avfilter_graph_create_filter(&map_item->aformat_ctx, aformat, NULL, p->strbuf, NULL, p->filter_graph); if (err < 0) { av_strerror(err, p->strbuf, sizeof(p->strbuf)); av_log(NULL, AV_LOG_ERROR, "unable to create aformat filter: %s\n", p->strbuf); return err; } err = avfilter_link(audio_src_ctx, pad_index, map_item->aformat_ctx, 0); if (err < 0) { av_log(NULL, AV_LOG_ERROR, "unable to link filters\n"); return err; } inner_audio_src_ctx = map_item->aformat_ctx; } // create abuffersink filter err = avfilter_graph_create_filter(&map_item->abuffersink_ctx, abuffersink, NULL, NULL, NULL, p->filter_graph); if (err < 0) { av_log(NULL, AV_LOG_ERROR, "unable to create abuffersink filter\n"); return err; } err = avfilter_link(inner_audio_src_ctx, 0, map_item->abuffersink_ctx, 0); if (err < 0) { av_log(NULL, AV_LOG_ERROR, "unable to link filters\n"); return err; } pad_index += 1; map_item = map_item->next; } err = avfilter_graph_config(p->filter_graph, NULL); if (err < 0) { av_strerror(err, p->strbuf, sizeof(p->strbuf)); av_log(NULL, AV_LOG_ERROR, "error configuring the filter graph: %s\n", p->strbuf); return err; } p->rebuild_filter_graph_flag = 0; return 0; }
int main(int argc, char **argv) { int ret; AVPacket packet; AVFrame *frame = av_frame_alloc(); AVFrame *filt_frame = av_frame_alloc(); int got_frame; if (!frame || !filt_frame) { perror("Could not allocate frame"); exit(1); } if (argc != 2) { fprintf(stderr, "Usage: %s file | %s\n", argv[0], player); exit(1); } avcodec_register_all(); av_register_all(); avfilter_register_all(); if ((ret = open_input_file(argv[1])) < 0) goto end; if ((ret = init_filters(filter_descr)) < 0) goto end; /* read all packets */ while (1) { if ((ret = av_read_frame(fmt_ctx, &packet)) < 0) break; if (packet.stream_index == audio_stream_index) { avcodec_get_frame_defaults(frame); got_frame = 0; ret = avcodec_decode_audio4(dec_ctx, frame, &got_frame, &packet); if (ret < 0) { av_log(NULL, AV_LOG_ERROR, "Error decoding audio\n"); continue; } if (got_frame) { /* push the audio data from decoded frame into the filtergraph */ if (av_buffersrc_add_frame_flags(buffersrc_ctx, frame, AV_BUFFERSRC_FLAG_KEEP_REF) < 0) { av_log(NULL, AV_LOG_ERROR, "Error while feeding the audio filtergraph\n"); break; } /* pull filtered audio from the filtergraph */ while (1) { ret = av_buffersink_get_frame(buffersink_ctx, filt_frame); if(ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) break; if(ret < 0) goto end; print_frame(filt_frame); av_frame_unref(filt_frame); } } } av_free_packet(&packet); } end: avfilter_graph_free(&filter_graph); if (dec_ctx) avcodec_close(dec_ctx); avformat_close_input(&fmt_ctx); av_frame_free(&frame); av_frame_free(&filt_frame); if (ret < 0 && ret != AVERROR_EOF) { char buf[1024]; av_strerror(ret, buf, sizeof(buf)); fprintf(stderr, "Error occurred: %s\n", buf); exit(1); } exit(0); }
int main(int argc, char **argv) { int bps = 0, ret, i; const char *input_url = NULL, *output_url = NULL; int64_t stream_pos = 0; int64_t start_time; char errbuf[50]; AVIOContext *input, *output; av_register_all(); avformat_network_init(); for (i = 1; i < argc; i++) { if (!strcmp(argv[i], "-b")) { bps = atoi(argv[i + 1]); i++; } else if (!input_url) { input_url = argv[i]; } else if (!output_url) { output_url = argv[i]; } else { return usage(argv[0], 1); } } if (!output_url) return usage(argv[0], 1); ret = avio_open2(&input, input_url, AVIO_FLAG_READ, NULL, NULL); if (ret) { av_strerror(ret, errbuf, sizeof(errbuf)); fprintf(stderr, "Unable to open %s: %s\n", input_url, errbuf); return 1; } ret = avio_open2(&output, output_url, AVIO_FLAG_WRITE, NULL, NULL); if (ret) { av_strerror(ret, errbuf, sizeof(errbuf)); fprintf(stderr, "Unable to open %s: %s\n", output_url, errbuf); goto fail; } start_time = av_gettime(); while (1) { uint8_t buf[1024]; int n; n = avio_read(input, buf, sizeof(buf)); if (n <= 0) break; avio_write(output, buf, n); stream_pos += n; if (bps) { avio_flush(output); while ((av_gettime() - start_time) * bps / AV_TIME_BASE < stream_pos) usleep(50 * 1000); } } avio_flush(output); avio_close(output); fail: avio_close(input); avformat_network_deinit(); return ret ? 1 : 0; }
int FfmpegCamera::Capture( Image &image ) { if (!mCanCapture){ return -1; } // If the reopen thread has a value, but mCanCapture != 0, then we have just reopened the connection to the ffmpeg device, and we can clean up the thread. if (mReopenThread != 0) { void *retval = 0; int ret; ret = pthread_tryjoin_np(mReopenThread, &retval); if (ret != 0){ Error("Could not join reopen thread."); } Info( "Successfully reopened stream." ); mReopenThread = 0; } AVPacket packet; uint8_t* directbuffer; /* Request a writeable buffer of the target image */ directbuffer = image.WriteBuffer(width, height, colours, subpixelorder); if(directbuffer == NULL) { Error("Failed requesting writeable buffer for the captured image."); return (-1); } int frameComplete = false; while ( !frameComplete ) { int avResult = av_read_frame( mFormatContext, &packet ); if ( avResult < 0 ) { char errbuf[AV_ERROR_MAX_STRING_SIZE]; av_strerror(avResult, errbuf, AV_ERROR_MAX_STRING_SIZE); if ( // Check if EOF. (avResult == AVERROR_EOF || (mFormatContext->pb && mFormatContext->pb->eof_reached)) || // Check for Connection failure. (avResult == -110) ) { Info( "av_read_frame returned \"%s\". Reopening stream.", errbuf); ReopenFfmpeg(); } Error( "Unable to read packet from stream %d: error %d \"%s\".", packet.stream_index, avResult, errbuf ); return( -1 ); } Debug( 5, "Got packet from stream %d", packet.stream_index ); if ( packet.stream_index == mVideoStreamId ) { #if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(52, 25, 0) if ( avcodec_decode_video2( mCodecContext, mRawFrame, &frameComplete, &packet ) < 0 ) #else if ( avcodec_decode_video( mCodecContext, mRawFrame, &frameComplete, packet.data, packet.size ) < 0 ) #endif Fatal( "Unable to decode frame at frame %d", frameCount ); Debug( 4, "Decoded video packet at frame %d", frameCount ); if ( frameComplete ) { Debug( 3, "Got frame %d", frameCount ); avpicture_fill( (AVPicture *)mFrame, directbuffer, imagePixFormat, width, height); #if HAVE_LIBSWSCALE if(mConvertContext == NULL) { if(config.cpu_extensions && sseversion >= 20) { mConvertContext = sws_getContext( mCodecContext->width, mCodecContext->height, mCodecContext->pix_fmt, width, height, imagePixFormat, SWS_BICUBIC | SWS_CPU_CAPS_SSE2, NULL, NULL, NULL ); } else { mConvertContext = sws_getContext( mCodecContext->width, mCodecContext->height, mCodecContext->pix_fmt, width, height, imagePixFormat, SWS_BICUBIC, NULL, NULL, NULL ); } if(mConvertContext == NULL) Fatal( "Unable to create conversion context for %s", mPath.c_str() ); } if ( sws_scale( mConvertContext, mRawFrame->data, mRawFrame->linesize, 0, mCodecContext->height, mFrame->data, mFrame->linesize ) < 0 ) Fatal( "Unable to convert raw format %u to target format %u at frame %d", mCodecContext->pix_fmt, imagePixFormat, frameCount ); #else // HAVE_LIBSWSCALE Fatal( "You must compile ffmpeg with the --enable-swscale option to use ffmpeg cameras" ); #endif // HAVE_LIBSWSCALE frameCount++; } } av_free_packet( &packet ); } return (0); }
void print_av_error(int err) { char buffer[50]; av_strerror(err, buffer, 50); printf("ffmpeg-mr Return Code: %d (%s)\n", err, buffer); }
static std::string AvStrError(int errnum) { char buf[128]; av_strerror(errnum, buf, sizeof(buf)); return std::string(buf); }
int hb_audio_resample_update(hb_audio_resample_t *resample) { if (resample == NULL) { hb_error("hb_audio_resample_update: resample is NULL"); return 1; } int ret, resample_changed; resample->resample_needed = (resample->out.sample_fmt != resample->in.sample_fmt || resample->out.channel_layout != resample->in.channel_layout); resample_changed = (resample->resample_needed && (resample->resample.sample_fmt != resample->in.sample_fmt || resample->resample.channel_layout != resample->in.channel_layout || resample->resample.lfe_mix_level != resample->in.lfe_mix_level || resample->resample.center_mix_level != resample->in.center_mix_level || resample->resample.surround_mix_level != resample->in.surround_mix_level)); if (resample_changed || (resample->resample_needed && resample->avresample == NULL)) { if (resample->avresample == NULL) { resample->avresample = avresample_alloc_context(); if (resample->avresample == NULL) { hb_error("hb_audio_resample_update: avresample_alloc_context() failed"); return 1; } av_opt_set_int(resample->avresample, "out_sample_fmt", resample->out.sample_fmt, 0); av_opt_set_int(resample->avresample, "out_channel_layout", resample->out.channel_layout, 0); av_opt_set_int(resample->avresample, "matrix_encoding", resample->out.matrix_encoding, 0); av_opt_set_int(resample->avresample, "normalize_mix_level", resample->out.normalize_mix_level, 0); } else if (resample_changed) { avresample_close(resample->avresample); } av_opt_set_int(resample->avresample, "in_sample_fmt", resample->in.sample_fmt, 0); av_opt_set_int(resample->avresample, "in_channel_layout", resample->in.channel_layout, 0); av_opt_set_double(resample->avresample, "lfe_mix_level", resample->in.lfe_mix_level, 0); av_opt_set_double(resample->avresample, "center_mix_level", resample->in.center_mix_level, 0); av_opt_set_double(resample->avresample, "surround_mix_level", resample->in.surround_mix_level, 0); if ((ret = avresample_open(resample->avresample))) { char err_desc[64]; av_strerror(ret, err_desc, 63); hb_error("hb_audio_resample_update: avresample_open() failed (%s)", err_desc); // avresample won't open, start over avresample_free(&resample->avresample); return ret; } resample->resample.sample_fmt = resample->in.sample_fmt; resample->resample.channel_layout = resample->in.channel_layout; resample->resample.channels = av_get_channel_layout_nb_channels(resample->in.channel_layout); resample->resample.lfe_mix_level = resample->in.lfe_mix_level; resample->resample.center_mix_level = resample->in.center_mix_level; resample->resample.surround_mix_level = resample->in.surround_mix_level; } return 0; }
QString RtspStreamWorker::errorMessageFromCode(int errorCode) { char error[512]; av_strerror(errorCode, error, sizeof(error)); return QString::fromLatin1(error); }
static void printError (const BarSettings_t * const settings, const char * const msg, int ret) { char avmsg[128]; av_strerror (ret, avmsg, sizeof (avmsg)); BarUiMsg (settings, MSG_ERR, "%s (%s)\n", msg, avmsg); }
/* * Class: com_example_testffmpeg_CFFmpegJni * Method: IPlay * Signature: ()I */ jint Java_com_example_testffmpeg_CFFmpegJni_IPlay(JNIEnv *env, jobject thiz) { /// 定义返回值 int nRet = -1; /// 打开文件 if(NULL != m_pFormatCtx) { avformat_close_input(&m_pFormatCtx); /// 释放数据 av_free(m_pFormatCtx); m_pFormatCtx = NULL; } if(NULL == m_pFormatCtx) { /// 打开文件 if(0 != (nRet = avformat_open_input(&m_pFormatCtx, m_szURLPath, 0, NULL/*&m_pDictOptions*/))) { char szTemp[256]; memset(szTemp, 0x00, sizeof(szTemp)); av_strerror(nRet, szTemp, 255); /// 打印错误信息 LOGD("%s, Error Code = %d, %s, Error = %s", m_szURLPath, nRet, " The Error URL Or Path--------------->", szTemp); return nRet; } } // m_pFormatCtx->max_analyze_duration = 1000; // m_pFormatCtx->probesize = 2048; if(0 > avformat_find_stream_info(m_pFormatCtx, NULL)) { LOGD("Couldn't find stream information."); return -1; } int nVideoIndex = -1; for(int i = 0; i < m_pFormatCtx->nb_streams; i++) { if(AVMEDIA_TYPE_VIDEO == m_pFormatCtx->streams[i]->codec->codec_type) { nVideoIndex = i; break; } } if(-1 == nVideoIndex) { LOGD("Didn't find a video stream."); return -1; } AVCodecContext* pCodecCtx = m_pFormatCtx->streams[nVideoIndex]->codec; AVCodec* pCodec = avcodec_find_decoder(pCodecCtx->codec_id); if(NULL == pCodec) { LOGD("Codec not found."); return -1; } if(pCodec->capabilities & CODEC_CAP_TRUNCATED) { pCodecCtx->flags |= CODEC_FLAG_TRUNCATED; } if(0 > avcodec_open2(pCodecCtx, pCodec, NULL)) { LOGD("Could not open codec."); return -1; } /// 声明数据帧变量 AVFrame *pFrame = NULL, *pFrameYUV = NULL; pFrame = avcodec_alloc_frame(); pFrameYUV = avcodec_alloc_frame(); /// 创建转换数据缓冲 int nConvertSize = avpicture_get_size(PIX_FMT_RGB565, iWidth, iHeight); uint8_t* pConvertbuffer = new uint8_t[nConvertSize]; avpicture_fill((AVPicture *)pFrameYUV, pConvertbuffer, PIX_FMT_RGB565, iWidth, iHeight); /// 声明解码参数 int nCodecRet, nHasGetPicture; /// 声明数据帧解码数据包 int nPackgeSize = pCodecCtx->width * pCodecCtx->height; AVPacket* pAVPacket = (AVPacket *)malloc(sizeof(AVPacket)); av_new_packet(pAVPacket, nPackgeSize); /// 列出输出文件的相关流信息 av_dump_format(m_pFormatCtx, 0, m_szURLPath, 0); /// 设置播放状态 m_bIsPlaying = true; /// 声明格式转换参数 struct SwsContext* img_convert_ctx = NULL; /// 格式化像素格式为YUV img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, iWidth, iHeight, PIX_FMT_RGB565, SWS_BICUBIC, NULL, NULL, NULL); /// 读取数据帧 while(0 <= av_read_frame(m_pFormatCtx, pAVPacket) && true == m_bIsPlaying) { /// 判断是否是视频数据流 if(nVideoIndex == pAVPacket->stream_index) { /// 解码数据包 nCodecRet = avcodec_decode_video2(pCodecCtx, pFrame, &nHasGetPicture, pAVPacket); if(0 < nHasGetPicture) { /// 转换格式为YUV sws_scale(img_convert_ctx, (const uint8_t* const* )pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameYUV->data, pFrameYUV->linesize); /// 回调显示数据 e_DisplayCallBack(env, pConvertbuffer, nConvertSize); } } /// 释放解码包,此数据包,在 av_read_frame 调用时被创建 av_free_packet(pAVPacket); } /// 释放个格式化信息 sws_freeContext(img_convert_ctx); /// 释放转换图片缓存 delete[] pConvertbuffer; pConvertbuffer = NULL; /// 释放数据帧对象指针 av_free(pFrame); pFrame = NULL; av_free(pFrameYUV); pFrameYUV = NULL; /// 释放解码信息对象 avcodec_close(pCodecCtx); pCodecCtx = NULL; avformat_close_input(&m_pFormatCtx); /// 释放数据 av_free(m_pFormatCtx); m_pFormatCtx = NULL; return nRet; }
int main(int argc, char *argv[]) { // char filename[]="/test400x240-mpeg4-witch.mp4"; // char filename[]="/test400x240-witch.mp4"; // char filename[]="/test800x400-witch-900kbps.mp4"; // char filename[]="/test800x400-witch-1pass.mp4"; // char filename[]="/test800x400-witch.mp4"; // char filename[]="/test800x480-witch-mpeg4.mp4"; // char filename[]="/test320x176-karanokyoukai.mp4"; char filename[] = "/test.mp4"; MovieState mvS; initServices(); // Register all formats and codecs av_register_all(); av_log_set_level(AV_LOG_INFO); printf("Press start to open the file\n"); waitForStart(); int ret = setup(&mvS, filename); if (ret) { waitForStartAndExit(); return -1; } printf("Press start to decompress\n"); waitForStart(); // Read frames and save first five frames to disk int i = 0; int frameFinished; u64 timeBeginning, timeEnd; u64 timeBefore, timeAfter; u64 timeDecodeTotal = 0, timeScaleTotal = 0, timeDisplayTotal = 0; timeBefore = osGetTime(); timeBeginning = timeBefore; bool stop = false; while (av_read_frame(mvS.pFormatCtx, &mvS.packet) >= 0 && !stop) { // Is this a packet from the video stream? if (mvS.packet.stream_index == mvS.videoStream) { /********************* * Decode video frame *********************/ int err = avcodec_decode_video2(mvS.pCodecCtx, mvS.pFrame, &frameFinished, &mvS.packet); if (err <= 0)printf("decode error\n"); // Did we get a video frame? if (frameFinished) { err = av_frame_get_decode_error_flags(mvS.pFrame); if (err) { char buf[100]; av_strerror(err, buf, 100); } timeAfter = osGetTime(); timeDecodeTotal += timeAfter - timeBefore; /******************************* * Conversion of decoded frame *******************************/ timeBefore = osGetTime(); colorConvert(&mvS); timeAfter = osGetTime(); /*********************** * Display of the frame ***********************/ timeScaleTotal += timeAfter - timeBefore; timeBefore = osGetTime(); if (mvS.renderGpu) { gpuRenderFrame(&mvS); gpuEndFrame(); } else display(mvS.outFrame); timeAfter = osGetTime(); timeDisplayTotal += timeAfter - timeBefore; ++i;//New frame hidScanInput(); u32 kDown = hidKeysDown(); if (kDown & KEY_START) stop = true; // break in order to return to hbmenu if (i % 50 == 0)printf("frame %d\n", i); timeBefore = osGetTime(); } } // Free the packet that was allocated by av_read_frame av_free_packet(&mvS.packet); } timeEnd = timeBefore; tearup(&mvS); printf("Played %d frames in %f s (%f fps)\n", i, (timeEnd - timeBeginning) / 1000.0, i / ((timeEnd - timeBeginning) / 1000.0)); printf("\tdecode:\t%llu\t%f perframe" "\n\tscale:\t%llu\t%f perframe" "\n\tdisplay:\t%llu\t%f perframe\n", timeDecodeTotal, timeDecodeTotal / (double) i, timeScaleTotal, timeScaleTotal / (double) i, timeDisplayTotal, timeDisplayTotal / (double) i); waitForStartAndExit(); return 0; }
static inline char * err2str(int errnum) { av_strerror(errnum, errbuf, sizeof(errbuf)); return errbuf; }
const char *get_error_text(const int error) { static char error_buffer[255]; av_strerror(error, error_buffer, sizeof(error_buffer)); return error_buffer; }
/** \fn initialize */ bool AUDMEncoder_Lavcodec::initialize(void) { int ret; _context=( void *)avcodec_alloc_context(); _useFloat=true; if( _incoming->getInfo()->channels>ADM_LAV_MAX_CHANNEL) { ADM_error("[Lavcodec]Too many channels\n"); return 0; } wavheader.byterate=(_config.bitrate*1000)>>3; _chunk = ADM_LAV_SAMPLE_PER_P*wavheader.channels; // AC3 ADM_info("[Lavcodec]Incoming : fq : %"PRIu32", channel : %"PRIu32" bitrate: %"PRIu32" \n", wavheader.frequency,wavheader.channels,_config.bitrate); CONTEXT->channels = wavheader.channels; CONTEXT->sample_rate = wavheader.frequency; CONTEXT->bit_rate = (_config.bitrate*1000); // bits -> kbits CONTEXT->sample_fmt = AV_SAMPLE_FMT_FLT; CONTEXT->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL; if(true==_globalHeader) { ADM_info("Configuring audio codec to use global headers\n"); CONTEXT->flags|=CODEC_FLAG_GLOBAL_HEADER; } AVCodec *codec; CodecID codecID; codecID=makeName(CODEC_ID); codec = avcodec_find_encoder(codecID); ADM_assert(codec); // Try float... ret = avcodec_open(CONTEXT, codec); if (0> ret) { char er[256]={0}; av_strerror(ret, er, sizeof(er)); ADM_info("[Lavcodec] init failed err : %d %s!\n",ret,er); ADM_info("Float failed, retrying with int16\n"); CONTEXT->sample_fmt = AV_SAMPLE_FMT_S16; ret = avcodec_open(CONTEXT, codec); if (0> ret) { char er[256]={0}; av_strerror(ret, er, sizeof(er)); ADM_error("[Lavcodec] init failed err : %d %s!\n",ret,er); ADM_info("s16 failed\n"); return 0; } _useFloat=false; ADM_info("Using int16 samples\n"); }else { _useFloat=true; ADM_info("Using float samples\n"); } ADM_info("[Lavcodec]Lavcodec successfully initialized,wavTag : 0x%x\n",makeName(WAV)); return 1; }
static const std::string av_make_error_string(int errnum) { char errbuf[AV_ERROR_MAX_STRING_SIZE]; av_strerror(errnum, errbuf, AV_ERROR_MAX_STRING_SIZE); return (std::string)errbuf; }
static GstFlowReturn gst_ffmpegaudenc_encode_audio (GstFFMpegAudEnc * ffmpegaudenc, GstBuffer * buffer, gint * have_data) { GstAudioEncoder *enc; AVCodecContext *ctx; gint res; GstFlowReturn ret; GstAudioInfo *info; AVPacket *pkt; AVFrame *frame = ffmpegaudenc->frame; gboolean planar; gint nsamples = -1; enc = GST_AUDIO_ENCODER (ffmpegaudenc); ctx = ffmpegaudenc->context; pkt = g_slice_new0 (AVPacket); if (buffer != NULL) { BufferInfo *buffer_info = g_slice_new0 (BufferInfo); guint8 *audio_in; guint in_size; buffer_info->buffer = buffer; gst_buffer_map (buffer, &buffer_info->map, GST_MAP_READ); audio_in = buffer_info->map.data; in_size = buffer_info->map.size; GST_LOG_OBJECT (ffmpegaudenc, "encoding buffer %p size:%u", audio_in, in_size); info = gst_audio_encoder_get_audio_info (enc); planar = av_sample_fmt_is_planar (ffmpegaudenc->context->sample_fmt); frame->format = ffmpegaudenc->context->sample_fmt; frame->sample_rate = ffmpegaudenc->context->sample_rate; frame->channels = ffmpegaudenc->context->channels; frame->channel_layout = ffmpegaudenc->context->channel_layout; if (planar && info->channels > 1) { gint channels; gint i, j; nsamples = frame->nb_samples = in_size / info->bpf; channels = info->channels; frame->buf[0] = av_buffer_create (NULL, 0, buffer_info_free, buffer_info, 0); if (info->channels > AV_NUM_DATA_POINTERS) { buffer_info->ext_data_array = frame->extended_data = av_malloc_array (info->channels, sizeof (uint8_t *)); } else { frame->extended_data = frame->data; } buffer_info->ext_data = frame->extended_data[0] = av_malloc (in_size); frame->linesize[0] = in_size / channels; for (i = 1; i < channels; i++) frame->extended_data[i] = frame->extended_data[i - 1] + frame->linesize[0]; switch (info->finfo->width) { case 8: { const guint8 *idata = (const guint8 *) audio_in; for (i = 0; i < nsamples; i++) { for (j = 0; j < channels; j++) { ((guint8 *) frame->extended_data[j])[i] = idata[j]; } idata += channels; } break; } case 16: { const guint16 *idata = (const guint16 *) audio_in; for (i = 0; i < nsamples; i++) { for (j = 0; j < channels; j++) { ((guint16 *) frame->extended_data[j])[i] = idata[j]; } idata += channels; } break; } case 32: { const guint32 *idata = (const guint32 *) audio_in; for (i = 0; i < nsamples; i++) { for (j = 0; j < channels; j++) { ((guint32 *) frame->extended_data[j])[i] = idata[j]; } idata += channels; } break; } case 64: { const guint64 *idata = (const guint64 *) audio_in; for (i = 0; i < nsamples; i++) { for (j = 0; j < channels; j++) { ((guint64 *) frame->extended_data[j])[i] = idata[j]; } idata += channels; } break; } default: g_assert_not_reached (); break; } gst_buffer_unmap (buffer, &buffer_info->map); gst_buffer_unref (buffer); buffer_info->buffer = NULL; } else { frame->data[0] = audio_in; frame->extended_data = frame->data; frame->linesize[0] = in_size; frame->nb_samples = nsamples = in_size / info->bpf; frame->buf[0] = av_buffer_create (NULL, 0, buffer_info_free, buffer_info, 0); } /* we have a frame to feed the encoder */ res = avcodec_encode_audio2 (ctx, pkt, frame, have_data); av_frame_unref (frame); } else { GST_LOG_OBJECT (ffmpegaudenc, "draining"); /* flushing the encoder */ res = avcodec_encode_audio2 (ctx, pkt, NULL, have_data); } if (res < 0) { char error_str[128] = { 0, }; g_slice_free (AVPacket, pkt); av_strerror (res, error_str, sizeof (error_str)); GST_ERROR_OBJECT (enc, "Failed to encode buffer: %d - %s", res, error_str); return GST_FLOW_OK; } GST_LOG_OBJECT (ffmpegaudenc, "got output size %d", res); if (*have_data) { GstBuffer *outbuf; const AVCodec *codec; GST_LOG_OBJECT (ffmpegaudenc, "pushing size %d", pkt->size); outbuf = gst_buffer_new_wrapped_full (GST_MEMORY_FLAG_READONLY, pkt->data, pkt->size, 0, pkt->size, pkt, gst_ffmpegaudenc_free_avpacket); codec = ffmpegaudenc->context->codec; if ((codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE) || !buffer) { /* FIXME: Not really correct, as -1 means "all the samples we got given so far", which may not be true depending on the codec, but we have no way to know AFAICT */ ret = gst_audio_encoder_finish_frame (enc, outbuf, -1); } else { ret = gst_audio_encoder_finish_frame (enc, outbuf, nsamples); } } else { GST_LOG_OBJECT (ffmpegaudenc, "no output produced"); g_slice_free (AVPacket, pkt); ret = GST_FLOW_OK; } return ret; }
int main(int argc, char *argv[]) { struct AVMD5 *md5; AVFilterGraph *graph; AVFilterContext *src, *sink; AVFrame *frame; uint8_t errstr[1024]; float duration; int err, nb_frames, i; if (argc < 2) { fprintf(stderr, "Usage: %s <duration>\n", argv[0]); return 1; } duration = atof(argv[1]); nb_frames = duration * INPUT_SAMPLERATE / FRAME_SIZE; if (nb_frames <= 0) { fprintf(stderr, "Invalid duration: %s\n", argv[1]); return 1; } avfilter_register_all(); /* Allocate the frame we will be using to store the data. */ frame = av_frame_alloc(); if (!frame) { fprintf(stderr, "Error allocating the frame\n"); return 1; } md5 = av_md5_alloc(); if (!md5) { fprintf(stderr, "Error allocating the MD5 context\n"); return 1; } /* Set up the filtergraph. */ err = init_filter_graph(&graph, &src, &sink); if (err < 0) { fprintf(stderr, "Unable to init filter graph:"); goto fail; } /* the main filtering loop */ for (i = 0; i < nb_frames; i++) { /* get an input frame to be filtered */ err = get_input(frame, i); if (err < 0) { fprintf(stderr, "Error generating input frame:"); goto fail; } /* Send the frame to the input of the filtergraph. */ err = av_buffersrc_add_frame(src, frame); if (err < 0) { av_frame_unref(frame); fprintf(stderr, "Error submitting the frame to the filtergraph:"); goto fail; } /* Get all the filtered output that is available. */ while ((err = av_buffersink_get_frame(sink, frame)) >= 0) { /* now do something with our filtered frame */ err = process_output(md5, frame); if (err < 0) { fprintf(stderr, "Error processing the filtered frame:"); goto fail; } av_frame_unref(frame); } if (err == AVERROR(EAGAIN)) { /* Need to feed more frames in. */ continue; } else if (err == AVERROR_EOF) { /* Nothing more to do, finish. */ break; } else if (err < 0) { /* An error occurred. */ fprintf(stderr, "Error filtering the data:"); goto fail; } } avfilter_graph_free(&graph); av_frame_free(&frame); av_freep(&md5); return 0; fail: av_strerror(err, errstr, sizeof(errstr)); fprintf(stderr, "%s\n", errstr); return 1; }
/** * * buffer is a float array like: * * - float buffer[nsamples * nchannels]; * * */ bool AV::addAudioFrame(unsigned char* buffer, int nsamples, int nchannels) { if(!use_audio) { printf("Cannot add audio stream, we're not using audio.\n"); return false; } AVCodecContext* c = ct.as->codec; // BUFFER HANDLING int samples_stored = av_audio_fifo_write(ct.afifo, (void**)&buffer, nsamples); if(samples_stored != nsamples) { return false; } int nstored = av_audio_fifo_size(ct.afifo); if(nstored < c->frame_size) { return false; } AVPacket packet = {0}; // data and size must be '0' (allocation is done for you :> ) AVFrame* frame = avcodec_alloc_frame(); int got_packet = 0; av_init_packet(&packet); packet.data = NULL; packet.size = 0; int use_nsamples = c->frame_size; frame->nb_samples = use_nsamples; // <-- important, must be set before avcodec_fill_audio_frame // GET DATA FROM BUFFER int num_bytes = av_samples_get_buffer_size(NULL, c->channels, use_nsamples, c->sample_fmt, 0); uint8_t* my_buffer = (uint8_t*)av_malloc(num_bytes); uint8_t** my_ptr = &my_buffer; int nread = av_audio_fifo_read(ct.afifo, (void**)my_ptr, use_nsamples); if(nread != use_nsamples) { printf("We only read: %d but we wanted to read %d samples.\n", nread, use_nsamples); av_free(my_buffer); return false; } // FILL int fill_result = avcodec_fill_audio_frame( frame ,c->channels ,c->sample_fmt ,(uint8_t*)my_buffer ,num_bytes ,1 ); if(fill_result != 0) { char buf[1024]; av_strerror(fill_result, buf, 1024); printf("av error: %s\n",buf); av_free(my_buffer); return false; } // ENCODE int64_t now = av_gettime(); AVRational my_time_base = (AVRational){1,1e6}; AVRational stream_time_base = ct.as->time_base; // stream time base AVRational codec_time_base = ct.as->codec->time_base; // codec time base int64_t now_frame_pts = av_rescale_q(now, my_time_base, codec_time_base); if(frame->pts == AV_NOPTS_VALUE) { frame->pts = ct.acounter; } ct.acounter = frame->pts + use_nsamples; printf("frame->nb_samples: %d, counter: %d\n", frame->nb_samples, ct.acounter); int enc_result = avcodec_encode_audio2(c, &packet, frame, &got_packet); packet.stream_index = ct.as->index; if(!got_packet) { av_free(my_buffer); return false; } if(enc_result < 0) { char buf[1024]; av_strerror(enc_result, buf, 1024); printf("av error: %s\n",buf); } // CORRECT THE PTS, FROM VIDEO_CODEC.time_base TO STREAM.time_base packet.pts = av_rescale_q(packet.pts, codec_time_base, stream_time_base); packet.dts = av_rescale_q(packet.dts, codec_time_base, stream_time_base); //packet.duration = av_rescale_q(packet.duration, codec_time_base, stream_time_base); //packet.dts = packet.pts; // just a wild guess packet.duration = 0; /* printf("Audio: stream: %d\n", packet.stream_index); printf("Audio: ct.acounter: %d\n", ct.acounter); printf("Audio: packet.duration: %d\n", packet.duration); printf("Audio: stream.time_base, num=%d, den=%d\n", stream_time_base.num, stream_time_base.den); printf("Audio: codec.time_base, num=%d, den=%d\n", codec_time_base.num, codec_time_base.den); printf("Audio: coded_frame.pts: %lld\n", ct.as->codec->coded_frame->pts); printf("Audio: packet.pts: %lld\n" ,packet.pts); printf("-------------------\n"); */ // WRITE if(av_interleaved_write_frame(ct.c, &packet) != 0) { printf("Cannot write audio frame.\n"); av_free(my_buffer); return false; } av_free(my_buffer); return true; }
bool FeMedia::onGetData( Chunk &data ) { int offset=0; data.samples = NULL; data.sampleCount = 0; if ( (!m_audio) || end_of_file() ) return false; while ( offset < m_audio->codec_ctx->sample_rate ) { AVPacket *packet = m_audio->pop_packet(); while (( packet == NULL ) && ( !end_of_file() )) { read_packet(); packet = m_audio->pop_packet(); } if ( packet == NULL ) { m_audio->at_end=true; if ( offset > 0 ) return true; return false; } #if (LIBAVCODEC_VERSION_INT < AV_VERSION_INT( 53, 25, 0 )) { sf::Lock l( m_audio->buffer_mutex ); int bsize = MAX_AUDIO_FRAME_SIZE; if ( avcodec_decode_audio3( m_audio->codec_ctx, (m_audio->buffer + offset), &bsize, packet) < 0 ) { std::cerr << "Error decoding audio." << std::endl; FeBaseStream::free_packet( packet ); return false; } else { offset += bsize / sizeof( sf::Int16 ); data.sampleCount += bsize / sizeof(sf::Int16); data.samples = m_audio->buffer; } } #else #if (LIBAVCODEC_VERSION_INT >= AV_VERSION_INT( 55, 45, 0 )) AVFrame *frame = av_frame_alloc(); m_audio->codec_ctx->refcounted_frames = 1; #else AVFrame *frame = avcodec_alloc_frame(); #endif // // TODO: avcodec_decode_audio4() can return multiple frames per packet depending on the codec. // We don't deal with this appropriately... // int got_frame( 0 ); int len = avcodec_decode_audio4( m_audio->codec_ctx, frame, &got_frame, packet ); if ( len < 0 ) { #ifdef FE_DEBUG char buff[256]; av_strerror( len, buff, 256 ); std::cerr << "Error decoding audio: " << buff << std::endl; #endif } if ( got_frame ) { int data_size = av_samples_get_buffer_size( NULL, m_audio->codec_ctx->channels, frame->nb_samples, m_audio->codec_ctx->sample_fmt, 1); #ifdef DO_RESAMPLE if ( m_audio->codec_ctx->sample_fmt == AV_SAMPLE_FMT_S16 ) #endif { sf::Lock l( m_audio->buffer_mutex ); memcpy( (m_audio->buffer + offset), frame->data[0], data_size ); offset += data_size / sizeof( sf::Int16 ); data.sampleCount += data_size / sizeof(sf::Int16); data.samples = m_audio->buffer; } #ifdef DO_RESAMPLE else { sf::Lock l( m_audio->buffer_mutex ); if ( !m_audio->resample_ctx ) { m_audio->resample_ctx = resample_alloc(); if ( !m_audio->resample_ctx ) { std::cerr << "Error allocating audio format converter." << std::endl; FeBaseStream::free_packet( packet ); FeBaseStream::free_frame( frame ); return false; } int64_t channel_layout = frame->channel_layout; if ( !channel_layout ) { channel_layout = av_get_default_channel_layout( m_audio->codec_ctx->channels ); } av_opt_set_int( m_audio->resample_ctx, "in_channel_layout", channel_layout, 0 ); av_opt_set_int( m_audio->resample_ctx, "in_sample_fmt", frame->format, 0 ); av_opt_set_int( m_audio->resample_ctx, "in_sample_rate", frame->sample_rate, 0 ); av_opt_set_int( m_audio->resample_ctx, "out_channel_layout", channel_layout, 0 ); av_opt_set_int( m_audio->resample_ctx, "out_sample_fmt", AV_SAMPLE_FMT_S16, 0 ); av_opt_set_int( m_audio->resample_ctx, "out_sample_rate", frame->sample_rate, 0 ); #ifdef FE_DEBUG std::cout << "Initializing resampler: in_sample_fmt=" << av_get_sample_fmt_name( (AVSampleFormat)frame->format ) << ", in_sample_rate=" << frame->sample_rate << ", out_sample_fmt=" << av_get_sample_fmt_name( AV_SAMPLE_FMT_S16 ) << ", out_sample_rate=" << frame->sample_rate << std::endl; #endif if ( resample_init( m_audio->resample_ctx ) < 0 ) { std::cerr << "Error initializing audio format converter, input format=" << av_get_sample_fmt_name( (AVSampleFormat)frame->format ) << ", input sample rate=" << frame->sample_rate << std::endl; FeBaseStream::free_packet( packet ); FeBaseStream::free_frame( frame ); resample_free( &m_audio->resample_ctx ); m_audio->resample_ctx = NULL; return false; } } if ( m_audio->resample_ctx ) { int out_linesize; av_samples_get_buffer_size( &out_linesize, m_audio->codec_ctx->channels, frame->nb_samples, AV_SAMPLE_FMT_S16, 0 ); uint8_t *tmp_ptr = (uint8_t *)(m_audio->buffer + offset); #ifdef USE_SWRESAMPLE int out_samples = swr_convert( m_audio->resample_ctx, &tmp_ptr, frame->nb_samples, (const uint8_t **)frame->data, frame->nb_samples ); #else // USE_AVRESAMPLE int out_samples = avresample_convert( m_audio->resample_ctx, &tmp_ptr, out_linesize, frame->nb_samples, frame->data, frame->linesize[0], frame->nb_samples ); #endif if ( out_samples < 0 ) { std::cerr << "Error performing audio conversion." << std::endl; FeBaseStream::free_packet( packet ); FeBaseStream::free_frame( frame ); break; } offset += out_samples * m_audio->codec_ctx->channels; data.sampleCount += out_samples * m_audio->codec_ctx->channels; data.samples = m_audio->buffer; } } #endif } FeBaseStream::free_frame( frame ); #endif FeBaseStream::free_packet( packet ); } return true; }
void *avhelper_get_decoder( const char *filename, int dst_pixfmt, int dst_width, int dst_height ) { char errbuf[512]; el_decoder_t *x = (el_decoder_t*) vj_calloc( sizeof( el_decoder_t )); if(!x) { return NULL; } #if LIBAVCODEC_BUILD > 5400 int err = avformat_open_input( &(x->avformat_ctx), filename, NULL, NULL ); #else int err = av_open_input_file( &(x->avformat_ctx),filename,NULL,0,NULL ); #endif if(err < 0 ) { av_strerror( err, errbuf, sizeof(errbuf)); veejay_msg(VEEJAY_MSG_DEBUG, "%s: %s", filename,errbuf ); free(x); return NULL; } #if LIBAVCODEC_BUILD > 5400 /* avformat_find_stream_info leaks memory */ err = avformat_find_stream_info( x->avformat_ctx, NULL ); #else err = av_find_stream_info( x->avformat_ctx ); #endif if( err < 0 ) { av_strerror( err, errbuf, sizeof(errbuf)); veejay_msg(VEEJAY_MSG_DEBUG, "%s: %s" ,filename,errbuf ); } if(err < 0 ) { avhelper_close_input_file( x->avformat_ctx ); free(x); return NULL; } unsigned int i,j; unsigned int n = x->avformat_ctx->nb_streams; int vi = -1; for( i = 0; i < n; i ++ ) { if( !x->avformat_ctx->streams[i]->codec ) continue; if( x->avformat_ctx->streams[i]->codec->codec_type > CODEC_ID_FIRST_SUBTITLE ) continue; if( x->avformat_ctx->streams[i]->codec->codec_type < CODEC_ID_FIRST_AUDIO ) { int sup_codec = 0; for( j = 0; _supported_codecs[j].name != NULL; j ++ ) { if( x->avformat_ctx->streams[i]->codec->codec_id == _supported_codecs[j].id ) { sup_codec = 1; goto further; } } further: if( !sup_codec ) { avhelper_close_input_file( x->avformat_ctx ); free(x); return NULL; } x->codec = avcodec_find_decoder( x->avformat_ctx->streams[i]->codec->codec_id ); if(x->codec == NULL ) { avhelper_close_input_file( x->avformat_ctx ); free(x); return NULL; } vi = i; veejay_msg(VEEJAY_MSG_DEBUG, "FFmpeg: video stream %d, codec_id %d", vi, x->avformat_ctx->streams[i]->codec->codec_id); break; } } if( vi == -1 ) { veejay_msg(VEEJAY_MSG_DEBUG, "FFmpeg: No video streams found"); avhelper_close_input_file( x->avformat_ctx ); free(x); return NULL; } x->codec_ctx = x->avformat_ctx->streams[vi]->codec; int wid = dst_width; int hei = dst_height; if( wid == -1 && hei == -1 ) { wid = x->codec_ctx->width; hei = x->codec_ctx->height; } #if LIBAVCODEC_BUILD > 5400 if ( avcodec_open2( x->codec_ctx, x->codec, NULL ) < 0 ) #else if ( avcodec_open( x->codec_ctx, x->codec ) < 0 ) #endif { avhelper_close_input_file( x->avformat_ctx ); free(x); return NULL; } veejay_memset( &(x->pkt), 0, sizeof(AVPacket)); AVFrame *f = avcodec_alloc_frame(); x->output = yuv_yuv_template( NULL,NULL,NULL, wid, hei, dst_pixfmt ); int got_picture = 0; while(1) { int ret = av_read_frame(x->avformat_ctx, &(x->pkt)); if( ret < 0 ) break; if ( x->pkt.stream_index == vi ) { avcodec_decode_video( x->codec_ctx,f,&got_picture, x->pkt.data, x->pkt.size ); avhelper_frame_unref( f ); } av_free_packet( &(x->pkt) ); if( got_picture ) break; } av_free(f); if(!got_picture) { veejay_msg(VEEJAY_MSG_ERROR, "FFmpeg: Unable to get whole picture from %s", filename ); avcodec_close( x->codec_ctx ); avhelper_close_input_file( x->avformat_ctx ); free(x->output); free(x); return NULL; } x->pixfmt = x->codec_ctx->pix_fmt; x->codec_id = x->codec_ctx->codec_id; x->frame = avcodec_alloc_frame(); x->input = yuv_yuv_template( NULL,NULL,NULL, x->codec_ctx->width,x->codec_ctx->height, x->pixfmt ); sws_template sws_tem; veejay_memset(&sws_tem, 0,sizeof(sws_template)); sws_tem.flags = yuv_which_scaler(); x->scaler = yuv_init_swscaler( x->input,x->output, &sws_tem, yuv_sws_get_cpu_flags()); if( x->scaler == NULL ) { veejay_msg(VEEJAY_MSG_ERROR,"FFmpeg: Failed to get scaler context for %dx%d in %d to %dx%d in %d", x->codec_ctx->width,x->codec_ctx->height, x->pixfmt, wid,hei,dst_pixfmt); av_free(f); avcodec_close( x->codec_ctx ); avhelper_close_input_file( x->avformat_ctx ); free(x->output); free(x->input); free(x); return NULL; } return (void*) x; }
static void log_net_error(void *ctx, int level, const char* prefix) { char errbuf[100]; av_strerror(ff_neterrno(), errbuf, sizeof(errbuf)); av_log(ctx, level, "%s: %s\n", prefix, errbuf); }
static int64_t crypto_seek(URLContext *h, int64_t pos, int whence) { CryptoContext *c = h->priv_data; int64_t block; int64_t newpos; if (c->flags & AVIO_FLAG_WRITE) { av_log(h, AV_LOG_ERROR, "Crypto: seek not supported for write\r\n"); /* seems the most appropriate error to return */ return AVERROR(ESPIPE); } // reset eof, else we won't read it correctly if we already hit eof. c->eof = 0; switch (whence) { case SEEK_SET: break; case SEEK_CUR: pos = pos + c->position; break; case SEEK_END: { int64_t newpos = ffurl_seek( c->hd, pos, AVSEEK_SIZE ); if (newpos < 0) { av_log(h, AV_LOG_ERROR, "Crypto: seek_end - can't get file size (pos=%lld)\r\n", (long long int)pos); return newpos; } pos = newpos - pos; } break; case AVSEEK_SIZE: { int64_t newpos = ffurl_seek( c->hd, pos, AVSEEK_SIZE ); return newpos; } break; default: av_log(h, AV_LOG_ERROR, "Crypto: no support for seek where 'whence' is %d\r\n", whence); return AVERROR(EINVAL); } c->outdata = 0; c->indata = 0; c->indata_used = 0; c->outptr = c->outbuffer; // identify the block containing the IV for the // next block we will decrypt block = pos/BLOCKSIZE; if (block == 0) { // restore the iv to the seed one - this is the iv for the FIRST block memcpy( c->decrypt_iv, c->iv, c->ivlen ); c->position = 0; } else { // else, go back one block - we will get av_cyrpt to read this block // which it will then store use as the iv. // note that the DECRYPTED result will not be correct, // but will be discarded block--; c->position = (block * BLOCKSIZE); } newpos = ffurl_seek( c->hd, c->position, SEEK_SET ); if (newpos < 0) { av_log(h, AV_LOG_ERROR, "Crypto: nested protocol no support for seek or seek failed\n"); return newpos; } // read and discard from here up to required position // (which will set the iv correctly to it). if (pos - c->position) { uint8_t buff[BLOCKSIZE*2]; // maximum size of pos-c->position int len = pos - c->position; int res; while (len > 0) { // note: this may not return all the bytes first time res = crypto_read(h, buff, len); if (res < 0) break; len -= res; } // if we did not get all the bytes if (len != 0) { char errbuf[100] = "unknown error"; av_strerror(res, errbuf, sizeof(errbuf)); av_log(h, AV_LOG_ERROR, "Crypto: discard read did not get all the bytes (%d remain) - read returned (%d)-%s\n", len, res, errbuf); return AVERROR(EINVAL); } } return c->position; }
/** * ffmpeg_open * Opens an mpeg file using the new libavformat method. Both mpeg1 * and mpeg4 are supported. However, if the current ffmpeg version doesn't allow * mpeg1 with non-standard framerate, the open will fail. Timelapse is a special * case and is tested separately. * * Returns * A new allocated ffmpeg struct or NULL if any error happens. */ struct ffmpeg *ffmpeg_open(const char *ffmpeg_video_codec, char *filename, unsigned char *y, unsigned char *u, unsigned char *v, int width, int height, int rate, int bps, int vbr, int tlapse) { AVCodecContext *c; AVCodec *codec; struct ffmpeg *ffmpeg; int ret; char errstr[128]; AVDictionary *opts = 0; /* * Allocate space for our ffmpeg structure. This structure contains all the * codec and image information we need to generate movies. */ ffmpeg = mymalloc(sizeof(struct ffmpeg)); memset(ffmpeg, 0, sizeof(struct ffmpeg)); ffmpeg->vbr = vbr; ffmpeg->tlapse = tlapse; /* Store codec name in ffmpeg->codec, with buffer overflow check. */ snprintf(ffmpeg->codec, sizeof(ffmpeg->codec), "%s", ffmpeg_video_codec); /* Allocation the output media context. */ ffmpeg->oc = avformat_alloc_context(); if (!ffmpeg->oc) { MOTION_LOG(ERR, TYPE_ENCODER, SHOW_ERRNO, "%s: Could not allocate output context"); ffmpeg_cleanups(ffmpeg); return NULL; } /* Setup output format */ if (ffmpeg->tlapse == TIMELAPSE_APPEND){ ffmpeg->oc->oformat = get_oformat("tlapse", filename); } else { ffmpeg->oc->oformat = get_oformat(ffmpeg_video_codec, filename); } if (!ffmpeg->oc->oformat) { ffmpeg_cleanups(ffmpeg); return NULL; } snprintf(ffmpeg->oc->filename, sizeof(ffmpeg->oc->filename), "%s", filename); /* Create a new video stream and initialize the codecs. */ ffmpeg->video_st = NULL; if (ffmpeg->oc->oformat->video_codec != MY_CODEC_ID_NONE) { codec = avcodec_find_encoder(ffmpeg->oc->oformat->video_codec); if (!codec) { MOTION_LOG(ERR, TYPE_ENCODER, NO_ERRNO, "%s: Codec %s not found", ffmpeg_video_codec); ffmpeg_cleanups(ffmpeg); return NULL; } ffmpeg->video_st = avformat_new_stream(ffmpeg->oc, codec); if (!ffmpeg->video_st) { MOTION_LOG(ERR, TYPE_ENCODER, SHOW_ERRNO, "%s: Could not alloc stream"); ffmpeg_cleanups(ffmpeg); return NULL; } } else { /* We did not get a proper video codec. */ MOTION_LOG(ERR, TYPE_ENCODER, NO_ERRNO, "%s: Could not get the codec"); ffmpeg_cleanups(ffmpeg); return NULL; } ffmpeg->c = c = AVSTREAM_CODEC_PTR(ffmpeg->video_st); c->codec_id = ffmpeg->oc->oformat->video_codec; c->codec_type = AVMEDIA_TYPE_VIDEO; c->bit_rate = bps; c->width = width; c->height = height; c->time_base.num = 1; c->time_base.den = rate; c->gop_size = 12; c->pix_fmt = MY_PIX_FMT_YUV420P; c->max_b_frames = 0; if (c->codec_id == MY_CODEC_ID_H264 || c->codec_id == MY_CODEC_ID_HEVC){ av_dict_set(&opts, "preset", "ultrafast", 0); av_dict_set(&opts, "crf", "18", 0); av_dict_set(&opts, "tune", "zerolatency", 0); } if (strcmp(ffmpeg_video_codec, "ffv1") == 0) c->strict_std_compliance = -2; if (vbr) c->flags |= CODEC_FLAG_QSCALE; if (!strcmp(ffmpeg->oc->oformat->name, "mp4") || !strcmp(ffmpeg->oc->oformat->name, "mov") || !strcmp(ffmpeg->oc->oformat->name, "3gp")) { c->flags |= CODEC_FLAG_GLOBAL_HEADER; } /* Get a mutex lock. */ pthread_mutex_lock(&global_lock); ret = avcodec_open2(c, codec, &opts); pthread_mutex_unlock(&global_lock); if (ret < 0) { if (codec->supported_framerates) { const AVRational *fps = codec->supported_framerates; while (fps->num) { MOTION_LOG(NTC, TYPE_ENCODER, NO_ERRNO, "%s Reported FPS Supported %d/%d", fps->num, fps->den); fps++; } } int chkrate = 1; pthread_mutex_lock(&global_lock); while ((chkrate < 36) && (ret != 0)) { c->time_base.den = chkrate; ret = avcodec_open2(c, codec, &opts); chkrate++; } pthread_mutex_unlock(&global_lock); if (ret < 0) { av_strerror(ret, errstr, sizeof(errstr)); MOTION_LOG(ERR, TYPE_ENCODER, NO_ERRNO, "%s: Could not open codec %s",errstr); av_dict_free(&opts); ffmpeg_cleanups(ffmpeg); return NULL; } } av_dict_free(&opts); MOTION_LOG(NTC, TYPE_ENCODER, NO_ERRNO, "%s Selected Output FPS %d", c->time_base.den); ffmpeg->video_outbuf = NULL; if (!(ffmpeg->oc->oformat->flags & AVFMT_RAWPICTURE)) { ffmpeg->video_outbuf_size = ffmpeg->c->width * 512; ffmpeg->video_outbuf = mymalloc(ffmpeg->video_outbuf_size); } ffmpeg->picture = my_frame_alloc(); if (!ffmpeg->picture) { MOTION_LOG(ERR, TYPE_ENCODER, NO_ERRNO, "%s: could not alloc frame"); ffmpeg_cleanups(ffmpeg); return NULL; } /* Set variable bitrate if requested. */ if (ffmpeg->vbr) ffmpeg->picture->quality = ffmpeg->vbr; /* Set the frame data. */ ffmpeg->picture->data[0] = y; ffmpeg->picture->data[1] = u; ffmpeg->picture->data[2] = v; ffmpeg->picture->linesize[0] = ffmpeg->c->width; ffmpeg->picture->linesize[1] = ffmpeg->c->width / 2; ffmpeg->picture->linesize[2] = ffmpeg->c->width / 2; /* Open the output file, if needed. */ if ((access(filename, W_OK) == 0) || (ffmpeg->tlapse != TIMELAPSE_APPEND)) { if (!(ffmpeg->oc->oformat->flags & AVFMT_NOFILE)) { if (avio_open(&ffmpeg->oc->pb, filename, MY_FLAG_WRITE) < 0) { if (errno == ENOENT) { if (create_path(filename) == -1) { ffmpeg_cleanups(ffmpeg); return NULL; } if (avio_open(&ffmpeg->oc->pb, filename, MY_FLAG_WRITE) < 0) { MOTION_LOG(ERR, TYPE_ENCODER, SHOW_ERRNO, "%s: error opening file %s", filename); ffmpeg_cleanups(ffmpeg); return NULL; } /* Permission denied */ } else if (errno == EACCES) { MOTION_LOG(ERR, TYPE_ENCODER, SHOW_ERRNO, "%s: Permission denied. %s",filename); ffmpeg_cleanups(ffmpeg); return NULL; } else { MOTION_LOG(ERR, TYPE_ENCODER, SHOW_ERRNO, "%s: Error opening file %s", filename); ffmpeg_cleanups(ffmpeg); return NULL; } } } /* Write the stream header, For the TIMELAPSE_APPEND * we write the data via standard file I/O so we close the * items here */ avformat_write_header(ffmpeg->oc, NULL); if (ffmpeg->tlapse == TIMELAPSE_APPEND) { av_write_trailer(ffmpeg->oc); avio_close(ffmpeg->oc->pb); } } return ffmpeg; }
static av_cold int init(AVFilterContext *ctx) { MetadataContext *s = ctx->priv; int ret; if (!s->key && s->mode != METADATA_PRINT && s->mode != METADATA_DELETE) { av_log(ctx, AV_LOG_WARNING, "Metadata key must be set\n"); return AVERROR(EINVAL); } if ((s->mode == METADATA_MODIFY || s->mode == METADATA_ADD) && !s->value) { av_log(ctx, AV_LOG_WARNING, "Missing metadata value\n"); return AVERROR(EINVAL); } switch (s->function) { case METADATAF_SAME_STR: s->compare = same_str; break; case METADATAF_STARTS_WITH: s->compare = starts_with; break; case METADATAF_LESS: s->compare = less; break; case METADATAF_EQUAL: s->compare = equal; break; case METADATAF_GREATER: s->compare = greater; break; case METADATAF_EXPR: s->compare = parse_expr; break; default: av_assert0(0); }; if (s->function == METADATAF_EXPR) { if (!s->expr_str) { av_log(ctx, AV_LOG_WARNING, "expr option not set\n"); return AVERROR(EINVAL); } if ((ret = av_expr_parse(&s->expr, s->expr_str, var_names, NULL, NULL, NULL, NULL, 0, ctx)) < 0) { av_log(ctx, AV_LOG_ERROR, "Error while parsing expression '%s'\n", s->expr_str); return ret; } } if (s->mode == METADATA_PRINT && s->file_str) { s->print = print_file; } else { s->print = print_log; } s->avio_context = NULL; if (s->file_str) { if (!strcmp("-", s->file_str)) { ret = avio_open(&s->avio_context, "pipe:1", AVIO_FLAG_WRITE); } else { ret = avio_open(&s->avio_context, s->file_str, AVIO_FLAG_WRITE); } if (ret < 0) { char buf[128]; av_strerror(ret, buf, sizeof(buf)); av_log(ctx, AV_LOG_ERROR, "Could not open %s: %s\n", s->file_str, buf); return ret; } } return 0; }
// decode one audio packet and return its uncompressed size static int audio_decode_frame(struct GroovePlaylist *playlist, struct GrooveFile *file) { struct GroovePlaylistPrivate *p = (struct GroovePlaylistPrivate *) playlist; struct GrooveFilePrivate *f = (struct GrooveFilePrivate *) file; AVPacket *pkt = &f->audio_pkt; AVCodecContext *dec = f->audio_st->codec; AVPacket *pkt_temp = &p->audio_pkt_temp; *pkt_temp = *pkt; // update the audio clock with the pts if we can if (pkt->pts != AV_NOPTS_VALUE) f->audio_clock = av_q2d(f->audio_st->time_base) * pkt->pts; int max_data_size = 0; int len1, got_frame; int new_packet = 1; AVFrame *in_frame = p->in_frame; // NOTE: the audio packet can contain several frames while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) { new_packet = 0; len1 = avcodec_decode_audio4(dec, in_frame, &got_frame, pkt_temp); if (len1 < 0) { // if error, we skip the frame pkt_temp->size = 0; return -1; } pkt_temp->data += len1; pkt_temp->size -= len1; if (!got_frame) { // stop sending empty packets if the decoder is finished if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY) return 0; continue; } // push the audio data from decoded frame into the filtergraph int err = av_buffersrc_write_frame(p->abuffer_ctx, in_frame); if (err < 0) { av_strerror(err, p->strbuf, sizeof(p->strbuf)); av_log(NULL, AV_LOG_ERROR, "error writing frame to buffersrc: %s\n", p->strbuf); return -1; } // for each data format in the sink map, pull filtered audio from its // buffersink, turn it into a GrooveBuffer and then increment the ref // count for each sink in that stack. struct SinkMap *map_item = p->sink_map; double clock_adjustment = 0; while (map_item) { struct GrooveSink *example_sink = map_item->stack_head->sink; int data_size = 0; for (;;) { AVFrame *oframe = av_frame_alloc(); int err = example_sink->buffer_sample_count == 0 ? av_buffersink_get_frame(map_item->abuffersink_ctx, oframe) : av_buffersink_get_samples(map_item->abuffersink_ctx, oframe, example_sink->buffer_sample_count); if (err == AVERROR_EOF || err == AVERROR(EAGAIN)) { av_frame_free(&oframe); break; } if (err < 0) { av_frame_free(&oframe); av_log(NULL, AV_LOG_ERROR, "error reading buffer from buffersink\n"); return -1; } struct GrooveBuffer *buffer = frame_to_groove_buffer(playlist, example_sink, oframe); if (!buffer) { av_frame_free(&oframe); return -1; } data_size += buffer->size; struct SinkStack *stack_item = map_item->stack_head; // we hold this reference to avoid cleanups until at least this loop // is done and we call unref after it. groove_buffer_ref(buffer); while (stack_item) { struct GrooveSink *sink = stack_item->sink; struct GrooveSinkPrivate *s = (struct GrooveSinkPrivate *) sink; // as soon as we call groove_queue_put, this buffer could be unref'd. // so we ref before putting it in the queue, and unref if it failed. groove_buffer_ref(buffer); if (groove_queue_put(s->audioq, buffer) < 0) { av_log(NULL, AV_LOG_ERROR, "unable to put buffer in queue\n"); groove_buffer_unref(buffer); } stack_item = stack_item->next; } groove_buffer_unref(buffer); } if (data_size > max_data_size) { max_data_size = data_size; clock_adjustment = data_size / (double)example_sink->bytes_per_sec; } map_item = map_item->next; } // if no pts, then estimate it if (pkt->pts == AV_NOPTS_VALUE) f->audio_clock += clock_adjustment; return max_data_size; } return max_data_size; }
void AudioLoader::openAudioFile(const string& filename) { E_DEBUG(EAlgorithm, "AudioLoader: opening file: " << parameter("filename").toString()); // Open file if (avformat_open_input(&_demuxCtx, filename.c_str(), NULL, NULL) != 0) { throw EssentiaException("AudioLoader: Could not open file \"", filename, "\""); } // Retrieve stream information int errnum; if ((errnum = avformat_find_stream_info(_demuxCtx, NULL)) < 0) { char errorstr[128]; string error = "Unknown error"; if (av_strerror(errnum, errorstr, 128) == 0) error = errorstr; avformat_close_input(&_demuxCtx); _demuxCtx = 0; throw EssentiaException("AudioLoader: Could not find stream information, error = ", error); } // Dump information about file onto standard error //dump_format(_demuxCtx, 0, filename.c_str(), 0); // Check that we have only 1 audio stream in the file int nAudioStreams = 0; for (int i=0; i<(int)_demuxCtx->nb_streams; i++) { if (_demuxCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO) { _streamIdx = i; nAudioStreams++; } } if (nAudioStreams != 1) { throw EssentiaException("AudioLoader ERROR: found ", nAudioStreams, " streams in the file, expecting only one audio stream"); } // Load corresponding audio codec _audioCtx = _demuxCtx->streams[_streamIdx]->codec; _audioCodec = avcodec_find_decoder(_audioCtx->codec_id); if (!_audioCodec) { throw EssentiaException("AudioLoader: Unsupported codec!"); } if (avcodec_open2(_audioCtx, _audioCodec, NULL) < 0) { throw EssentiaException("AudioLoader: Unable to instantiate codec..."); } if (_audioCtx->sample_fmt != AV_SAMPLE_FMT_S16) { #if HAVE_SWRESAMPLE E_DEBUG(EAlgorithm, "AudioLoader: using sample format conversion from libswresample"); // No samplerate conversion yet, only format int64_t layout = av_get_default_channel_layout(_audioCtx->channels); _convertCtx = swr_alloc_set_opts(_convertCtx, layout, AV_SAMPLE_FMT_S16, _audioCtx->sample_rate, layout, _audioCtx->sample_fmt, _audioCtx->sample_rate, 0, NULL); if (swr_init(_convertCtx) < 0) { throw EssentiaException("Could not initialize swresample context"); } /* const char* fmt = 0; get_format_from_sample_fmt(&fmt, _audioCtx->sample_fmt); E_DEBUG(EAlgorithm, "AudioLoader: converting from " << (fmt ? fmt : "unknown") << " to S16"); */ #else E_DEBUG(EAlgorithm, "AudioLoader: using sample format conversion from " "deprecated audioconvert"); _audioConvert = av_audio_convert_alloc(AV_SAMPLE_FMT_S16, 1, _audioCtx->sample_fmt, 1, NULL, 0); // reserve some more space _buff1 = (int16_t*)av_malloc(MAX_AUDIO_FRAME_SIZE * 3); _buff2 = (int16_t*)av_malloc(MAX_AUDIO_FRAME_SIZE * 3); #endif } else { E_DEBUG(EAlgorithm, "AudioLoader: no sample format conversion, using direct copy"); } av_init_packet(&_packet); #if LIBAVCODEC_VERSION_INT >= AVCODEC_AUDIO_DECODE4 _decodedFrame = avcodec_alloc_frame(); if (!_decodedFrame) { throw EssentiaException("Could not allocate audio frame"); } #endif #if LIBAVCODEC_VERSION_INT < AVCODEC_51_28_0 E_DEBUG(EAlgorithm, "AudioLoader: using ffmpeg avcodec_decode_audio() function"); #elif LIBAVCODEC_VERSION_INT < AVCODEC_52_47_0 E_DEBUG(EAlgorithm, "AudioLoader: using ffmpeg avcodec_decode_audio2() function"); #elif LIBAVCODEC_VERSION_INT < AVCODEC_AUDIO_DECODE4 E_DEBUG(EAlgorithm, "AudioLoader: using ffmpeg avcodec_decode_audio3() function"); #else E_DEBUG(EAlgorithm, "AudioLoader: using ffmpeg avcodec_decode_audio4() function"); #endif }
int64_t GetVideoMetadata(const char *path, char *name) { struct stat file; int ret, i; struct tm *modtime; AVFormatContext *ctx = NULL; AVCodecContext *ac = NULL, *vc = NULL; int audio_stream = -1, video_stream = -1; enum audio_profiles audio_profile = PROFILE_AUDIO_UNKNOWN; char fourcc[4]; int64_t album_art = 0; char nfo[MAXPATHLEN], *ext; struct song_metadata video; metadata_t m; uint32_t free_flags = 0xFFFFFFFF; char *path_cpy, *basepath; memset(&m, '\0', sizeof(m)); memset(&video, '\0', sizeof(video)); //DEBUG DPRINTF(E_DEBUG, L_METADATA, "Parsing video %s...\n", name); if ( stat(path, &file) != 0 ) return 0; strip_ext(name); //DEBUG DPRINTF(E_DEBUG, L_METADATA, " * size: %jd\n", file.st_size); ret = lav_open(&ctx, path); if( ret != 0 ) { char err[128]; av_strerror(ret, err, sizeof(err)); DPRINTF(E_WARN, L_METADATA, "Opening %s failed! [%s]\n", path, err); return 0; } //dump_format(ctx, 0, NULL, 0); for( i=0; i<ctx->nb_streams; i++) { if( audio_stream == -1 && ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO ) { audio_stream = i; ac = ctx->streams[audio_stream]->codec; continue; } else if( video_stream == -1 && !lav_is_thumbnail_stream(ctx->streams[i], &m.thumb_data, &m.thumb_size) && ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO ) { video_stream = i; vc = ctx->streams[video_stream]->codec; continue; } } path_cpy = strdup(path); basepath = basename(path_cpy); if( !vc ) { /* This must not be a video file. */ lav_close(ctx); if( !is_audio(path) ) DPRINTF(E_DEBUG, L_METADATA, "File %s does not contain a video stream.\n", basepath); free(path_cpy); return 0; } if( ac ) { aac_object_type_t aac_type = AAC_INVALID; switch( ac->codec_id ) { case AV_CODEC_ID_MP3: audio_profile = PROFILE_AUDIO_MP3; break; case AV_CODEC_ID_AAC: if( !ac->extradata_size || !ac->extradata ) { DPRINTF(E_DEBUG, L_METADATA, "No AAC type\n"); } else { uint8_t data; memcpy(&data, ac->extradata, 1); aac_type = data >> 3; } switch( aac_type ) { /* AAC Low Complexity variants */ case AAC_LC: case AAC_LC_ER: if( ac->sample_rate < 8000 || ac->sample_rate > 48000 ) { DPRINTF(E_DEBUG, L_METADATA, "Unsupported AAC: sample rate is not 8000 < %d < 48000\n", ac->sample_rate); break; } /* AAC @ Level 1/2 */ if( ac->channels <= 2 && ac->bit_rate <= 576000 ) audio_profile = PROFILE_AUDIO_AAC; else if( ac->channels <= 6 && ac->bit_rate <= 1440000 ) audio_profile = PROFILE_AUDIO_AAC_MULT5; else DPRINTF(E_DEBUG, L_METADATA, "Unhandled AAC: %d channels, %d bitrate\n", ac->channels, ac->bit_rate); break; default: DPRINTF(E_DEBUG, L_METADATA, "Unhandled AAC type [%d]\n", aac_type); break; } break; case AV_CODEC_ID_AC3: case AV_CODEC_ID_DTS: audio_profile = PROFILE_AUDIO_AC3; break; case AV_CODEC_ID_WMAV1: case AV_CODEC_ID_WMAV2: /* WMA Baseline: stereo, up to 48 KHz, up to 192,999 bps */ if ( ac->bit_rate <= 193000 ) audio_profile = PROFILE_AUDIO_WMA_BASE; /* WMA Full: stereo, up to 48 KHz, up to 385 Kbps */ else if ( ac->bit_rate <= 385000 ) audio_profile = PROFILE_AUDIO_WMA_FULL; break; case AV_CODEC_ID_WMAPRO: audio_profile = PROFILE_AUDIO_WMA_PRO; break; case AV_CODEC_ID_MP2: audio_profile = PROFILE_AUDIO_MP2; break; case AV_CODEC_ID_AMR_NB: audio_profile = PROFILE_AUDIO_AMR; break; default: if( (ac->codec_id >= AV_CODEC_ID_PCM_S16LE) && (ac->codec_id < AV_CODEC_ID_ADPCM_IMA_QT) ) audio_profile = PROFILE_AUDIO_PCM; else DPRINTF(E_DEBUG, L_METADATA, "Unhandled audio codec [0x%X]\n", ac->codec_id); break; } m.frequency = ac->sample_rate; m.channels = ac->channels; } if( vc ) { int off; int duration, hours, min, sec, ms; ts_timestamp_t ts_timestamp = NONE; DPRINTF(E_DEBUG, L_METADATA, "Container: '%s' [%s]\n", ctx->iformat->name, basepath); xasprintf(&m.resolution, "%dx%d", vc->width, vc->height); if( ctx->bit_rate > 8 ) m.bitrate = ctx->bit_rate / 8; if( ctx->duration > 0 ) { duration = (int)(ctx->duration / AV_TIME_BASE); hours = (int)(duration / 3600); min = (int)(duration / 60 % 60); sec = (int)(duration % 60); ms = (int)(ctx->duration / (AV_TIME_BASE/1000) % 1000); xasprintf(&m.duration, "%d:%02d:%02d.%03d", hours, min, sec, ms); } /* NOTE: The DLNA spec only provides for ASF (WMV), TS, PS, and MP4 containers. * Skip DLNA parsing for everything else. */ if( strcmp(ctx->iformat->name, "avi") == 0 ) { xasprintf(&m.mime, "video/x-msvideo"); if( vc->codec_id == AV_CODEC_ID_MPEG4 ) { fourcc[0] = vc->codec_tag & 0xff; fourcc[1] = vc->codec_tag>>8 & 0xff; fourcc[2] = vc->codec_tag>>16 & 0xff; fourcc[3] = vc->codec_tag>>24 & 0xff; if( memcmp(fourcc, "XVID", 4) == 0 || memcmp(fourcc, "DX50", 4) == 0 || memcmp(fourcc, "DIVX", 4) == 0 ) xasprintf(&m.creator, "DiVX"); } }
static QString fferror(int av_err) { char buff[1024]; return av_strerror(av_err, buff, sizeof(buff)) == 0 ? buff : "Unknown ffmpeg error"; }