AVFrame* Utility::FilterApplier::applyToFrame(AVFrame &source) { auto frame=av_frame_alloc(); if(av_buffersrc_add_frame_flags(buffersourceContext_, &source, AV_BUFFERSRC_FLAG_KEEP_REF) < 0) { av_frame_unref(frame); av_frame_free(&frame); throw std::runtime_error("Could not feed the frame into the filtergraph."); } if(av_buffersink_get_frame(buffersinkContext_, frame)<0) { av_frame_unref(frame); av_frame_free(&frame); throw std::runtime_error("Could not pull the filtered frame from the filtergraph."); } return frame; }
bool LibAVFilterPrivate::push(Frame *frame, qreal pts) { VideoFrame *vf = static_cast<VideoFrame*>(frame); if (width != vf->width() || height != vf->height() || pixfmt != vf->pixelFormatFFmpeg() || options_changed) { width = vf->width(); height = vf->height(); pixfmt = (AVPixelFormat)vf->pixelFormatFFmpeg(); options_changed = false; if (!setup()) { qWarning("setup filter graph error"); enabled = false; // skip this filter and avoid crash return false; } } Q_ASSERT(avframe); avframe->pts = pts * 1000000.0; // time_base is 1/1000000 avframe->width = vf->width(); avframe->height = vf->height(); avframe->format = pixfmt = (AVPixelFormat)vf->pixelFormatFFmpeg(); for (int i = 0; i < vf->planeCount(); ++i) { avframe->data[i] =vf->bits(i); avframe->linesize[i] = vf->bytesPerLine(i); } int ret = av_buffersrc_add_frame_flags(in_filter_ctx, avframe, AV_BUFFERSRC_FLAG_KEEP_REF); if (ret != 0) { qWarning("av_buffersrc_add_frame error: %s", av_err2str(ret)); return false; } return true; }
AVFrame* VideoDecoder::getFrame(int w, int h) { int ret = -1; if (w > 0 && h > 0) { mFilters.setScale(w, h); } if (!mFilters.hasInited() && mFilters.init(this) < 0) { LOGE("Init filters failed"); goto failed; } if (av_buffersrc_add_frame_flags(mFilters.mBufSrcCtxPtr, mFramePtr, AV_BUFFERSRC_FLAG_KEEP_REF) < 0) { LOGE("Buffer src add frame failed"); goto failed; } ret = av_buffersink_get_frame(mFilters.mBufSinkCtxPtr, mFilterFramePtr); if (ret < 0) { LOGE("Get frame failed"); goto failed; } return mFilterFramePtr; failed: return nullptr; }
void FilterGraph::process(const std::vector<IFrame*>& inputs, IFrame& output) { // Init the filter graph if(!_isInit) init(inputs, output); // Check whether we can bypass the input audio buffers const bool bypassBuffers = _inputAudioFrameBuffers.empty() || (areInputFrameSizesEqual(inputs) && areFrameBuffersEmpty()); size_t minInputFrameSamplesNb = 0; if(!bypassBuffers) { // Fill the frame buffer with inputs for(size_t index = 0; index < inputs.size(); ++index) { if(!inputs.at(index)->getDataSize()) { LOG_DEBUG("Empty frame from filter graph input " << index << ". Remaining audio frames in buffer: " << _inputAudioFrameBuffers.at(index).getBufferSize()); continue; } _inputAudioFrameBuffers.at(index).addFrame(inputs.at(index)); } // Get the minimum input frames size minInputFrameSamplesNb = getMinInputFrameSamplesNb(inputs); } // Setup input frames into the filter graph for(size_t index = 0; index < inputs.size(); ++index) { // Retrieve frame from buffer or directly from input IFrame* inputFrame = (bypassBuffers)? inputs.at(index) : _inputAudioFrameBuffers.at(index).getFrameSampleNb(minInputFrameSamplesNb); const int ret = av_buffersrc_add_frame_flags(_filters.at(index)->getAVFilterContext(), &inputFrame->getAVFrame(), AV_BUFFERSRC_FLAG_PUSH); if(ret < 0) { throw std::runtime_error("Error when adding a frame to the source buffer used to start to process filters: " + getDescriptionFromErrorCode(ret)); } } // Pull filtered data from the filter graph for(;;) { const int ret = av_buffersink_get_frame(_filters.at(_filters.size() - 1)->getAVFilterContext(), &output.getAVFrame()); if(ret == AVERROR_EOF || ret == AVERROR(EAGAIN)) break; if(ret < 0) { throw std::runtime_error("Error reading buffer from buffersink: " + getDescriptionFromErrorCode(ret)); } } }
static int filter_encode_write_frame(AVFrame *frame, unsigned int stream_index) { int ret; AVFrame *filt_frame; av_log(NULL, AV_LOG_INFO, "Pushing decoded frame to filters\n"); /* push the decoded frame into the filtergraph */ ret = av_buffersrc_add_frame_flags(filter_ctx[stream_index].buffersrc_ctx, frame, 0); if (ret < 0) { av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n"); return ret; } /* pull filtered frames from the filtergraph */ while (1) { filt_frame = av_frame_alloc(); if (!filt_frame) { ret = AVERROR(ENOMEM); break; } av_log(NULL, AV_LOG_INFO, "Pulling filtered frame from filters\n"); ret = av_buffersink_get_frame(filter_ctx[stream_index].buffersink_ctx, filt_frame); if (ret < 0) { /* if no more frames for output - returns AVERROR(EAGAIN) * if flushed and no more frames for output - returns AVERROR_EOF * rewrite retcode to 0 to show it as normal procedure completion */ if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) ret = 0; av_frame_free(&filt_frame); break; } filt_frame->pict_type = AV_PICTURE_TYPE_NONE; ret = encode_write_frame(filt_frame, stream_index, NULL); if (ret < 0) break; } return ret; }
EXPORT int start_capture(void *actx) { struct liveStream *ctx = (struct liveStream *)actx; int got_frame; int ret; AVPacket packet; AVFormatContext *ic; long long start_time; struct lsInput* input = NULL; AVRational av_time_base_q = {1, AV_TIME_BASE}; if(!ctx) { ret = -1; goto end; } while(1) { AVCodecContext *dec_ctx = NULL; input = get_best_input(ctx); if(!input) { continue; } dec_ctx = input->dec_ctx; ic = input->ic; if (ic->start_time != AV_NOPTS_VALUE) start_time = ic->start_time; ret = get_input_packet(input,&packet); if (ret == AVERROR(EAGAIN)) { continue; } else if (ret == AVERROR_EOF) { output_packet(input,NULL); input->eof_reached = 1; continue; } if(ret < 0) { av_log(NULL,AV_LOG_ERROR,"No Input packet %x\n",ret); break; } if(input->id != 1) { if (packet.pts != AV_NOPTS_VALUE) { packet.pts -= av_rescale_q(start_time, av_time_base_q, ic->streams[0]->time_base); } if (packet.dts != AV_NOPTS_VALUE) packet.dts -= av_rescale_q(start_time, av_time_base_q, ic->streams[0]->time_base); } if(packet.stream_index == 0) { ret = avcodec_decode_video2(dec_ctx, input->InFrame, &got_frame, &packet); if (ret < 0) { av_log(NULL, AV_LOG_ERROR, "Error decoding video\n"); goto end; } if(!got_frame) continue; } else { continue; } av_free_packet(&packet); input->InFrame->pts = av_frame_get_best_effort_timestamp(input->InFrame); take_filter_lock(&ctx->filter_lock); if (av_buffersrc_add_frame_flags(input->in_filter, input->InFrame, AV_BUFFERSRC_FLAG_PUSH) < 0) { av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n"); } give_filter_lock(&ctx->filter_lock); reap_filter(ctx); } av_frame_unref(input->InFrame); end: return ret; }
int main(int argc, char **argv) { int ret; AVPacket packet; AVFrame *frame = av_frame_alloc(); AVFrame *filt_frame = av_frame_alloc(); int got_frame; if (!frame || !filt_frame) { perror("Could not allocate frame"); exit(1); } if (argc != 2) { fprintf(stderr, "Usage: %s file\n", argv[0]); exit(1); } avcodec_register_all(); av_register_all(); avfilter_register_all(); if ((ret = open_input_file(argv[1])) < 0) goto end; if ((ret = init_filters(filter_descr)) < 0) goto end; /* read all packets */ while (1) { if ((ret = av_read_frame(fmt_ctx, &packet)) < 0) break; if (packet.stream_index == video_stream_index) { avcodec_get_frame_defaults(frame); got_frame = 0; ret = avcodec_decode_video2(dec_ctx, frame, &got_frame, &packet); if (ret < 0) { av_log(NULL, AV_LOG_ERROR, "Error decoding video\n"); break; } if (got_frame) { frame->pts = av_frame_get_best_effort_timestamp(frame); /* push the decoded frame into the filtergraph */ if (av_buffersrc_add_frame_flags(buffersrc_ctx, frame, AV_BUFFERSRC_FLAG_KEEP_REF) < 0) { av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n"); break; } /* pull filtered frames from the filtergraph */ while (1) { ret = av_buffersink_get_frame(buffersink_ctx, filt_frame); if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) break; if (ret < 0) goto end; display_frame(filt_frame, buffersink_ctx->inputs[0]->time_base); av_frame_unref(filt_frame); } av_frame_unref(frame); } } av_free_packet(&packet); } end: avfilter_graph_free(&filter_graph); if (dec_ctx) avcodec_close(dec_ctx); avformat_close_input(&fmt_ctx); av_frame_free(&frame); av_frame_free(&filt_frame); if (ret < 0 && ret != AVERROR_EOF) { char buf[1024]; av_strerror(ret, buf, sizeof(buf)); fprintf(stderr, "Error occurred: %s\n", buf); exit(1); } exit(0); }
int main(int argc, char **argv) { int ret; AVPacket packet0, packet; AVFrame *frame = av_frame_alloc(); AVFrame *filt_frame = av_frame_alloc(); int got_frame; if (!frame || !filt_frame) { perror("Could not allocate frame"); exit(1); } if (argc != 2) { fprintf(stderr, "Usage: %s file | %s\n", argv[0], player); exit(1); } avcodec_register_all(); av_register_all(); avfilter_register_all(); if ((ret = open_input_file(argv[1])) < 0) goto end; if ((ret = init_filters(filter_descr)) < 0) goto end; /* read all packets */ packet0.data = NULL; packet.data = NULL; while (1) { if (!packet0.data) { if ((ret = av_read_frame(fmt_ctx, &packet)) < 0) break; packet0 = packet; } if (packet.stream_index == audio_stream_index) { got_frame = 0; ret = avcodec_decode_audio4(dec_ctx, frame, &got_frame, &packet); if (ret < 0) { av_log(NULL, AV_LOG_ERROR, "Error decoding audio\n"); continue; } packet.size -= ret; packet.data += ret; if (got_frame) { /* push the audio data from decoded frame into the filtergraph */ if (av_buffersrc_add_frame_flags(buffersrc_ctx, frame, 0) < 0) { av_log(NULL, AV_LOG_ERROR, "Error while feeding the audio filtergraph\n"); break; } /* pull filtered audio from the filtergraph */ while (1) { ret = av_buffersink_get_frame(buffersink_ctx, filt_frame); if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) break; if (ret < 0) goto end; print_frame(filt_frame); av_frame_unref(filt_frame); } } if (packet.size <= 0) av_free_packet(&packet0); } else { /* discard non-wanted packets */ av_free_packet(&packet0); } } end: avfilter_graph_free(&filter_graph); avcodec_close(dec_ctx); avformat_close_input(&fmt_ctx); av_frame_free(&frame); av_frame_free(&filt_frame); if (ret < 0 && ret != AVERROR_EOF) { fprintf(stderr, "Error occurred: %s\n", av_err2str(ret)); exit(1); } exit(0); }
int main(int argc, char **argv) { int ret; AVPacket packet; AVFrame *frame = av_frame_alloc(); AVFrame *filt_frame = av_frame_alloc(); if (!frame || !filt_frame) { perror("Could not allocate frame"); exit(1); } if (argc != 2) { fprintf(stderr, "Usage: %s file | %s\n", argv[0], player); exit(1); } if ((ret = open_input_file(argv[1])) < 0) goto end; if ((ret = init_filters(filter_descr)) < 0) goto end; /* read all packets */ while (1) { if ((ret = av_read_frame(fmt_ctx, &packet)) < 0) break; if (packet.stream_index == audio_stream_index) { ret = avcodec_send_packet(dec_ctx, &packet); if (ret < 0) { av_log(NULL, AV_LOG_ERROR, "Error while sending a packet to the decoder\n"); break; } while (ret >= 0) { ret = avcodec_receive_frame(dec_ctx, frame); if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) { break; } else if (ret < 0) { av_log(NULL, AV_LOG_ERROR, "Error while receiving a frame from the decoder\n"); goto end; } if (ret >= 0) { /* push the audio data from decoded frame into the filtergraph */ if (av_buffersrc_add_frame_flags(buffersrc_ctx, frame, AV_BUFFERSRC_FLAG_KEEP_REF) < 0) { av_log(NULL, AV_LOG_ERROR, "Error while feeding the audio filtergraph\n"); break; } /* pull filtered audio from the filtergraph */ while (1) { ret = av_buffersink_get_frame(buffersink_ctx, filt_frame); if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) break; if (ret < 0) goto end; print_frame(filt_frame); av_frame_unref(filt_frame); } av_frame_unref(frame); } } } av_packet_unref(&packet); } end: avfilter_graph_free(&filter_graph); avcodec_free_context(&dec_ctx); avformat_close_input(&fmt_ctx); av_frame_free(&frame); av_frame_free(&filt_frame); if (ret < 0 && ret != AVERROR_EOF) { fprintf(stderr, "Error occurred: %s\n", av_err2str(ret)); exit(1); } exit(0); }
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame) { return av_buffersrc_add_frame_flags(ctx, frame, 0); }
int attribute_align_arg av_buffersrc_write_frame(AVFilterContext *ctx, const AVFrame *frame) { return av_buffersrc_add_frame_flags(ctx, (AVFrame *)frame, AV_BUFFERSRC_FLAG_KEEP_REF); }
static vod_status_t audio_filter_process_frame(audio_filter_state_t* state, u_char* buffer) { audio_filter_source_t* source = state->cur_source; input_frame_t* frame = source->cur_frame; AVPacket input_packet; int got_frame; int avrc; #ifdef AUDIO_FILTER_DEBUG size_t data_size; #endif // AUDIO_FILTER_DEBUG #ifdef AUDIO_FILTER_DEBUG audio_filter_append_debug_data("input", "aac", buffer, frame->size); #endif // AUDIO_FILTER_DEBUG vod_memzero(&input_packet, sizeof(input_packet)); input_packet.data = buffer; input_packet.size = frame->size; input_packet.dts = state->dts; input_packet.pts = state->dts + frame->pts_delay; input_packet.duration = frame->duration; input_packet.flags = AV_PKT_FLAG_KEY; state->dts += frame->duration; av_frame_unref(state->decoded_frame); got_frame = 0; avrc = avcodec_decode_audio4(source->decoder, state->decoded_frame, &got_frame, &input_packet); if (avrc < 0) { vod_log_error(VOD_LOG_ERR, state->request_context->log, 0, "audio_filter_process_frame: avcodec_decode_audio4 failed %d", avrc); return VOD_BAD_DATA; } if (!got_frame) { return VOD_OK; } #ifdef AUDIO_FILTER_DEBUG data_size = av_samples_get_buffer_size( NULL, source->decoder->channels, state->decoded_frame->nb_samples, source->decoder->sample_fmt, 1); audio_filter_append_debug_data(source->buffer_src->name, "pcm", state->decoded_frame->data[0], data_size); #endif // AUDIO_FILTER_DEBUG avrc = av_buffersrc_add_frame_flags(source->buffer_src, state->decoded_frame, AV_BUFFERSRC_FLAG_PUSH); if (avrc < 0) { vod_log_error(VOD_LOG_ERR, state->request_context->log, 0, "audio_filter_process_frame: av_buffersrc_add_frame_flags failed %d", avrc); return VOD_ALLOC_FAILED; } return audio_filter_read_filter_sink(state); }
vod_status_t audio_filter_process_frame(void* context, input_frame_t* frame, u_char* buffer) { audio_filter_state_t* state = (audio_filter_state_t*)context; vod_status_t rc; AVPacket output_packet; AVPacket input_packet; int got_packet; int got_frame; int ret; #ifdef AUDIO_FILTER_DEBUG size_t data_size; #endif // AUDIO_FILTER_DEBUG if (frame == NULL) { return audio_filter_flush_encoder(state); } #ifdef AUDIO_FILTER_DEBUG audio_filter_append_debug_data(AUDIO_FILTER_DEBUG_FILENAME_INPUT, buffer, frame->size); #endif // AUDIO_FILTER_DEBUG vod_memzero(&input_packet, sizeof(input_packet)); input_packet.data = buffer; input_packet.size = frame->size; input_packet.dts = state->dts; input_packet.pts = (state->dts + frame->pts_delay); input_packet.duration = frame->duration; input_packet.flags = AV_PKT_FLAG_KEY; state->dts += frame->duration; avcodec_get_frame_defaults(state->decoded_frame); got_frame = 0; ret = avcodec_decode_audio4(state->decoder, state->decoded_frame, &got_frame, &input_packet); if (ret < 0) { vod_log_error(VOD_LOG_ERR, state->request_context->log, 0, "audio_filter_process_frame: avcodec_decode_audio4 failed %d", ret); return VOD_BAD_DATA; } if (!got_frame) { return VOD_OK; } #ifdef AUDIO_FILTER_DEBUG data_size = av_samples_get_buffer_size( NULL, state->decoder->channels, state->decoded_frame->nb_samples, state->decoder->sample_fmt, 1); audio_filter_append_debug_data(AUDIO_FILTER_DEBUG_FILENAME_DECODED, state->decoded_frame->data[0], data_size); #endif // AUDIO_FILTER_DEBUG ret = av_buffersrc_add_frame_flags(state->buffer_src, state->decoded_frame, AV_BUFFERSRC_FLAG_PUSH); if (ret < 0) { vod_log_error(VOD_LOG_ERR, state->request_context->log, 0, "audio_filter_process_frame: av_buffersrc_add_frame_flags failed %d", ret); return VOD_ALLOC_FAILED; } for (;;) { ret = av_buffersink_get_frame_flags(state->buffer_sink, state->filtered_frame, AV_BUFFERSINK_FLAG_NO_REQUEST); if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) { break; } if (ret < 0) { vod_log_error(VOD_LOG_ERR, state->request_context->log, 0, "audio_filter_process_frame: av_buffersink_get_frame_flags failed %d", ret); return VOD_UNEXPECTED; } #ifdef AUDIO_FILTER_DEBUG data_size = av_samples_get_buffer_size( NULL, state->encoder->channels, state->filtered_frame->nb_samples, state->encoder->sample_fmt, 1); audio_filter_append_debug_data(AUDIO_FILTER_DEBUG_FILENAME_FILTERED, state->filtered_frame->data[0], data_size); #endif // AUDIO_FILTER_DEBUG av_init_packet(&output_packet); output_packet.data = NULL; // packet data will be allocated by the encoder output_packet.size = 0; got_packet = 0; ret = avcodec_encode_audio2(state->encoder, &output_packet, state->filtered_frame, &got_packet); if (ret < 0) { vod_log_error(VOD_LOG_ERR, state->request_context->log, 0, "audio_filter_process_frame: avcodec_encode_audio2 failed %d", ret); return VOD_ALLOC_FAILED; } if (got_packet) { rc = audio_filter_write_frame(state, &output_packet); av_free_packet(&output_packet); if (rc != VOD_OK) { return rc; } } av_frame_unref(state->filtered_frame); } return VOD_OK; }
JNIEXPORT jint JNICALL Java_com_frank_ffmpeg_VideoPlayer_filter (JNIEnv * env, jclass clazz, jstring filePath, jobject surface, jstring filterDescr){ int ret; const char * file_name = (*env)->GetStringUTFChars(env, filePath, JNI_FALSE); const char *filter_descr = (*env)->GetStringUTFChars(env, filterDescr, JNI_FALSE); //打开输入文件 if(!is_playing){ LOGI("open_input..."); if((ret = open_input(env, file_name, surface)) < 0){ LOGE("Couldn't allocate video frame."); goto end; } //注册滤波器 avfilter_register_all(); filter_frame = av_frame_alloc(); if(filter_frame == NULL) { LOGE("Couldn't allocate filter frame."); ret = -1; goto end; } //初始化音频解码器 if ((ret = init_audio(env, clazz)) < 0){ LOGE("Couldn't init_audio."); goto end; } } //初始化滤波器 if ((ret = init_filters(filter_descr)) < 0){ LOGE("init_filter error, ret=%d\n", ret); goto end; } is_playing = 1; int frameFinished; AVPacket packet; while(av_read_frame(pFormatCtx, &packet)>=0 && !release) { //切换滤波器,退出当初播放 if(again){ goto again; } //判断是否为视频流 if(packet.stream_index == video_stream_index) { //对该帧进行解码 avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet); if (frameFinished) { //把解码后视频帧添加到filter_graph if (av_buffersrc_add_frame_flags(buffersrc_ctx, pFrame, AV_BUFFERSRC_FLAG_KEEP_REF) < 0) { LOGE("Error while feeding the filter_graph\n"); break; } //把滤波后的视频帧从filter graph取出来 ret = av_buffersink_get_frame(buffersink_ctx, filter_frame); if (ret >= 0){ // lock native window ANativeWindow_lock(nativeWindow, &windowBuffer, 0); // 格式转换 sws_scale(sws_ctx, (uint8_t const * const *)filter_frame->data, filter_frame->linesize, 0, pCodecCtx->height, pFrameRGBA->data, pFrameRGBA->linesize); // 获取stride uint8_t * dst = windowBuffer.bits; int dstStride = windowBuffer.stride * 4; uint8_t * src = pFrameRGBA->data[0]; int srcStride = pFrameRGBA->linesize[0]; // 由于window的stride和帧的stride不同,因此需要逐行复制 int h; for (h = 0; h < pCodecCtx->height; h++) { memcpy(dst + h * dstStride, src + h * srcStride, (size_t) srcStride); } ANativeWindow_unlockAndPost(nativeWindow); } av_frame_unref(filter_frame); } //延迟等待 if (!playAudio){ usleep((unsigned long) (1000 * 40));//1000 * 40 } } else if(packet.stream_index == audio_stream_index){//音频帧 if (playAudio){ play_audio(env, &packet, pFrame); } } av_packet_unref(&packet); } end: is_playing = 0; //释放内存以及关闭文件 av_free(buffer); av_free(pFrameRGBA); av_free(filter_frame); av_free(pFrame); avcodec_close(pCodecCtx); avformat_close_input(&pFormatCtx); avfilter_free(buffersrc_ctx); avfilter_free(buffersink_ctx); avfilter_graph_free(&filter_graph); avcodec_close(audioCodecCtx); free(buffer); free(sws_ctx); free(&windowBuffer); free(out_buffer); free(audio_swr_ctx); free(audio_track); free(audio_track_write_mid); ANativeWindow_release(nativeWindow); (*env)->ReleaseStringUTFChars(env, filePath, file_name); (*env)->ReleaseStringUTFChars(env, filterDescr, filter_descr); LOGE("do release..."); again: again = 0; LOGE("play again..."); return ret; }