AVFilterBufferRef *avfilter_get_audio_buffer_ref_from_frame(const AVFrame *frame, int perms) { AVFilterBufferRef *samplesref = avfilter_get_audio_buffer_ref_from_arrays((uint8_t **)frame->data, frame->linesize[0], perms, frame->nb_samples, frame->format, av_frame_get_channel_layout(frame)); if (!samplesref) return NULL; if (avfilter_copy_frame_props(samplesref, frame) < 0) { samplesref->buf->data[0] = NULL; avfilter_unref_bufferp(&samplesref); } return samplesref; }
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) { HQDN3DContext *hqdn3d = inlink->dst->priv; AVFilterLink *outlink = inlink->dst->outputs[0]; AVFilterBufferRef *out; int direct, c; if ((in->perms & AV_PERM_WRITE) && !(in->perms & AV_PERM_PRESERVE)) { direct = 1; out = in; } else { out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); if (!out) { avfilter_unref_bufferp(&in); return AVERROR(ENOMEM); } avfilter_copy_buffer_ref_props(out, in); out->video->w = outlink->w; out->video->h = outlink->h; } for (c = 0; c < 3; c++) { denoise(hqdn3d, in->data[c], out->data[c], hqdn3d->line, &hqdn3d->frame_prev[c], in->video->w >> (!!c * hqdn3d->hsub), in->video->h >> (!!c * hqdn3d->vsub), in->linesize[c], out->linesize[c], hqdn3d->coefs[c?2:0], hqdn3d->coefs[c?3:1]); } if (!direct) avfilter_unref_bufferp(&in); return ff_filter_frame(outlink, out); }
AVFilterBufferRef *avfilter_get_video_buffer_ref_from_frame(const AVFrame *frame, int perms) { AVFilterBufferRef *picref = avfilter_get_video_buffer_ref_from_arrays(frame->data, frame->linesize, perms, frame->width, frame->height, frame->format); if (!picref) return NULL; if (avfilter_copy_frame_props(picref, frame) < 0) { picref->buf->data[0] = NULL; avfilter_unref_bufferp(&picref); } return picref; }
static int start_frame(AVFilterLink *inlink, AVFilterBufferRef *picref) { PixdescTestContext *priv = inlink->dst->priv; AVFilterLink *outlink = inlink->dst->outputs[0]; AVFilterBufferRef *outpicref, *for_next_filter; int i, ret = 0; outpicref = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); if (!outpicref) return AVERROR(ENOMEM); avfilter_copy_buffer_ref_props(outpicref, picref); for (i = 0; i < 4; i++) { int h = outlink->h; h = i == 1 || i == 2 ? h>>priv->pix_desc->log2_chroma_h : h; if (outpicref->data[i]) { uint8_t *data = outpicref->data[i] + (outpicref->linesize[i] > 0 ? 0 : outpicref->linesize[i] * (h-1)); memset(data, 0, FFABS(outpicref->linesize[i]) * h); } } /* copy palette */ if (priv->pix_desc->flags & PIX_FMT_PAL || priv->pix_desc->flags & PIX_FMT_PSEUDOPAL) memcpy(outpicref->data[1], outpicref->data[1], 256*4); for_next_filter = avfilter_ref_buffer(outpicref, ~0); if (for_next_filter) ret = ff_start_frame(outlink, for_next_filter); else ret = AVERROR(ENOMEM); if (ret < 0) { avfilter_unref_bufferp(&outpicref); return ret; } outlink->out_buf = outpicref; return 0; }
static av_cold void uninit(AVFilterContext *ctx) { SelectContext *select = ctx->priv; AVFilterBufferRef *picref; av_expr_free(select->expr); select->expr = NULL; while (select->pending_frames && av_fifo_generic_read(select->pending_frames, &picref, sizeof(picref), NULL) == sizeof(picref)) avfilter_unref_buffer(picref); av_fifo_free(select->pending_frames); select->pending_frames = NULL; if (select->do_scene_detect) { avfilter_unref_bufferp(&select->prev_picref); avcodec_close(select->avctx); av_freep(&select->avctx); } }
AVFilterBufferRef *avfilter_get_audio_buffer_ref_from_frame(const AVFrame *frame, int perms) { AVFilterBufferRef *samplesref; int channels = av_frame_get_channels(frame); int64_t layout = av_frame_get_channel_layout(frame); if (layout && av_get_channel_layout_nb_channels(layout) != av_frame_get_channels(frame)) { av_log(0, AV_LOG_ERROR, "Layout indicates a different number of channels than actually present\n"); return NULL; } samplesref = avfilter_get_audio_buffer_ref_from_arrays_channels( (uint8_t **)frame->extended_data, frame->linesize[0], perms, frame->nb_samples, frame->format, channels, layout); if (!samplesref) return NULL; if (avfilter_copy_frame_props(samplesref, frame) < 0) { samplesref->buf->data[0] = NULL; avfilter_unref_bufferp(&samplesref); } return samplesref; }
int main(int argc, char **argv) { //curses int color_pair; if(initscr() == NULL){ fprintf(stderr, "init failure\n"); exit(EXIT_FAILURE); } /* start_colorは色属性を使用するときは最初に必ず実行する. initscrの直後に実行するのがよい習慣らしい. */ if(has_colors() == FALSE || start_color() == ERR){ endwin(); fprintf(stderr, "This term seems not to having Color\n"); exit(EXIT_FAILURE); } if(signal(SIGINT, sig_handler) == SIG_ERR || signal(SIGQUIT, sig_handler) == SIG_ERR){ fprintf(stderr, "signal failure\n"); exit(EXIT_FAILURE); } curs_set(0); /* 色のペアを作る */ color_pair = 1; for(color_pair = 1; color_pair < 256; color_pair++){ init_pair(color_pair, color_pair, color_pair); } refresh(); char filter_descr[10000]; int w, h; w = 80;//COLS; h = 25;//LINES; bak = malloc(sizeof (int) * LINES*COLS+1); sprintf(filter_descr, "scale=%d:%d", w, h); int ret; AVPacket packet; AVFrame frame; int got_frame; if (argc != 2) { fprintf(stderr, "Usage: %s file\n", argv[0]); exit(1); } avcodec_register_all(); av_register_all(); avfilter_register_all(); if ((ret = open_input_file(argv[1])) < 0) goto end; if ((ret = init_filters(filter_descr)) < 0) goto end; /* read all packets */ while (1) { AVFilterBufferRef *picref; if ((ret = av_read_frame(fmt_ctx, &packet)) < 0) break; if (packet.stream_index == video_stream_index) { avcodec_get_frame_defaults(&frame); got_frame = 0; ret = avcodec_decode_video2(dec_ctx, &frame, &got_frame, &packet); if (ret < 0) { av_log(NULL, AV_LOG_ERROR, "Error decoding video\n"); break; } if (got_frame) { frame.pts = av_frame_get_best_effort_timestamp(&frame); /* push the decoded frame into the filtergraph */ if (av_buffersrc_add_frame(buffersrc_ctx, &frame, 0) < 0) { av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n"); break; } /* pull filtered pictures from the filtergraph */ while (repeat_flag) { ret = av_buffersink_get_buffer_ref(buffersink_ctx, &picref, 0); if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) break; if (ret < 0) goto end; if (picref) { display_picref(picref, buffersink_ctx->inputs[0]->time_base); avfilter_unref_bufferp(&picref); } } } } av_free_packet(&packet); } end: endwin(); avfilter_graph_free(&filter_graph); if (dec_ctx) avcodec_close(dec_ctx); avformat_close_input(&fmt_ctx); if (ret < 0 && ret != AVERROR_EOF) { char buf[1024]; av_strerror(ret, buf, sizeof(buf)); fprintf(stderr, "Error occurred: %s\n", buf); exit(1); } exit(0); }
int main(int argc, char **argv) { int ret; AVPacket packet; AVFrame *frame = avcodec_alloc_frame(); int got_frame; if (!frame) { perror("Could not allocate frame"); exit(1); } if (argc != 2) { fprintf(stderr, "Usage: %s file\n", argv[0]); exit(1); } avcodec_register_all(); av_register_all(); avfilter_register_all(); if ((ret = open_input_file(argv[1])) < 0) goto end; if ((ret = init_filters(filter_descr)) < 0) goto end; /* read all packets */ while (1) { AVFilterBufferRef *picref; if ((ret = av_read_frame(fmt_ctx, &packet)) < 0) break; if (packet.stream_index == video_stream_index) { avcodec_get_frame_defaults(frame); got_frame = 0; ret = avcodec_decode_video2(dec_ctx, frame, &got_frame, &packet); if (ret < 0) { av_log(NULL, AV_LOG_ERROR, "Error decoding video\n"); break; } if (got_frame) { frame->pts = av_frame_get_best_effort_timestamp(frame); /* push the decoded frame into the filtergraph */ if (av_buffersrc_add_frame(buffersrc_ctx, frame) < 0) { av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n"); break; } /* pull filtered pictures from the filtergraph */ while (1) { ret = av_buffersink_get_buffer_ref(buffersink_ctx, &picref, 0); if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) break; if (ret < 0) goto end; if (picref) { display_picref(picref, buffersink_ctx->inputs[0]->time_base); avfilter_unref_bufferp(&picref); } } } } av_free_packet(&packet); } end: avfilter_graph_free(&filter_graph); if (dec_ctx) avcodec_close(dec_ctx); avformat_close_input(&fmt_ctx); av_freep(&frame); if (ret < 0 && ret != AVERROR_EOF) { char buf[1024]; av_strerror(ret, buf, sizeof(buf)); fprintf(stderr, "Error occurred: %s\n", buf); exit(1); } exit(0); }
static int null_filter_frame(AVFilterLink *link, AVFilterBufferRef *samplesref) { avfilter_unref_bufferp(&samplesref); return 0; }
/* decode and play stream. returns 0 or av error code. */ static int play (player_t * const player) { assert (player != NULL); AVPacket pkt; av_init_packet (&pkt); pkt.data = NULL; pkt.size = 0; AVFrame *frame = NULL, *filteredFrame = NULL; frame = avcodec_alloc_frame (); assert (frame != NULL); filteredFrame = avcodec_alloc_frame (); assert (filteredFrame != NULL); while (!player->doQuit) { ping (); int ret = av_read_frame (player->fctx, &pkt); if (ret < 0) { av_free_packet (&pkt); return ret; } else if (pkt.stream_index != player->streamIdx) { av_free_packet (&pkt); continue; } AVPacket pkt_orig = pkt; /* pausing */ pthread_mutex_lock (&player->pauseMutex); while (true) { if (!player->doPause) { av_read_play (player->fctx); break; } else { av_read_pause (player->fctx); } pthread_cond_wait (&player->pauseCond, &player->pauseMutex); } pthread_mutex_unlock (&player->pauseMutex); do { int got_frame = 0; const int decoded = avcodec_decode_audio4 (player->st->codec, frame, &got_frame, &pkt); if (decoded < 0) { /* skip this one */ break; } if (got_frame != 0) { /* XXX: suppresses warning from resample filter */ if (frame->pts == (int64_t) AV_NOPTS_VALUE) { frame->pts = 0; } ret = av_buffersrc_write_frame (player->fabuf, frame); assert (ret >= 0); while (true) { AVFilterBufferRef *audioref = NULL; #ifdef HAVE_AV_BUFFERSINK_GET_BUFFER_REF /* ffmpeg’s compatibility layer is broken in some releases */ if (av_buffersink_get_buffer_ref (player->fbufsink, &audioref, 0) < 0) { #else if (av_buffersink_read (player->fbufsink, &audioref) < 0) { #endif /* try again next frame */ break; } ret = avfilter_copy_buf_props (filteredFrame, audioref); assert (ret >= 0); const int numChannels = av_get_channel_layout_nb_channels ( filteredFrame->channel_layout); const int bps = av_get_bytes_per_sample(filteredFrame->format); ao_play (player->aoDev, (char *) filteredFrame->data[0], filteredFrame->nb_samples * numChannels * bps); avfilter_unref_bufferp (&audioref); } } pkt.data += decoded; pkt.size -= decoded; } while (pkt.size > 0); av_free_packet (&pkt_orig); player->songPlayed = av_q2d (player->st->time_base) * (double) pkt.pts; player->lastTimestamp = pkt.pts; } avcodec_free_frame (&filteredFrame); avcodec_free_frame (&frame); return 0; } static void finish (player_t * const player) { ao_close (player->aoDev); player->aoDev = NULL; if (player->fgraph != NULL) { avfilter_graph_free (&player->fgraph); player->fgraph = NULL; } if (player->st != NULL && player->st->codec != NULL) { avcodec_close (player->st->codec); player->st = NULL; } if (player->fctx != NULL) { avformat_close_input (&player->fctx); } }
static av_cold void uninit(AVFilterContext *ctx) { OverlayContext *over = ctx->priv; avfilter_unref_bufferp(&over->overpicref); }
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in) { AVFilterLink *outlink = inlink->dst->outputs[0]; TransContext *trans = inlink->dst->priv; AVFilterBufferRef *out; int plane; out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); if (!out) { avfilter_unref_bufferp(&in); return AVERROR(ENOMEM); } out->pts = in->pts; if (in->video->pixel_aspect.num == 0) { out->video->pixel_aspect = in->video->pixel_aspect; } else { out->video->pixel_aspect.num = in->video->pixel_aspect.den; out->video->pixel_aspect.den = in->video->pixel_aspect.num; } for (plane = 0; out->data[plane]; plane++) { int hsub = plane == 1 || plane == 2 ? trans->hsub : 0; int vsub = plane == 1 || plane == 2 ? trans->vsub : 0; int pixstep = trans->pixsteps[plane]; int inh = in->video->h>>vsub; int outw = out->video->w>>hsub; int outh = out->video->h>>vsub; uint8_t *dst, *src; int dstlinesize, srclinesize; int x, y; dst = out->data[plane]; dstlinesize = out->linesize[plane]; src = in->data[plane]; srclinesize = in->linesize[plane]; if (trans->dir&1) { src += in->linesize[plane] * (inh-1); srclinesize *= -1; } if (trans->dir&2) { dst += out->linesize[plane] * (outh-1); dstlinesize *= -1; } for (y = 0; y < outh; y++) { switch (pixstep) { case 1: for (x = 0; x < outw; x++) dst[x] = src[x*srclinesize + y]; break; case 2: for (x = 0; x < outw; x++) *((uint16_t *)(dst + 2*x)) = *((uint16_t *)(src + x*srclinesize + y*2)); break; case 3: for (x = 0; x < outw; x++) { int32_t v = AV_RB24(src + x*srclinesize + y*3); AV_WB24(dst + 3*x, v); } break; case 4: for (x = 0; x < outw; x++) *((uint32_t *)(dst + 4*x)) = *((uint32_t *)(src + x*srclinesize + y*4)); break; } dst += dstlinesize; } } avfilter_unref_bufferp(&in); return ff_filter_frame(outlink, out); }
int main(int argc, char *argv[]) { int ret; AVPacket packet; AVFrame frame; int got_frame; avcodec_register_all(); av_register_all(); avfilter_register_all(); if ((ret = open_input_file("cuc_ieschool.flv")) < 0) goto end; if ((ret = init_filters(filter_descr)) < 0) goto end; #if ENABLE_YUVFILE FILE *fp_yuv = fopen("test.yuv", "wb+"); #endif #if ENABLE_SDL SDL_Surface *screen; SDL_Overlay *bmp; SDL_Rect rect; if (SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) { printf( "Could not initialize SDL - %s\n", SDL_GetError()); return -1; } screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 0, 0); if (!screen) { printf("SDL: could not set video mode - exiting\n"); return -1; } bmp = SDL_CreateYUVOverlay(pCodecCtx->width, pCodecCtx->height, SDL_YV12_OVERLAY, screen); SDL_WM_SetCaption("Simplest FFmpeg Video Filter", NULL); #endif /* read all packets */ while (1) { AVFilterBufferRef *picref; if ((ret = av_read_frame(pFormatCtx, &packet)) < 0) break; if (packet.stream_index == video_stream_index) { avcodec_get_frame_defaults(&frame); got_frame = 0; ret = avcodec_decode_video2(pCodecCtx, &frame, &got_frame, &packet); if (ret < 0) { printf( "Error decoding video\n"); break; } if (got_frame) { frame.pts = av_frame_get_best_effort_timestamp(&frame); /* push the decoded frame into the filtergraph */ if (av_buffersrc_add_frame(buffersrc_ctx, &frame) < 0) { printf( "Error while feeding the filtergraph\n"); break; } /* pull filtered pictures from the filtergraph */ while (1) { ret = av_buffersink_get_buffer_ref(buffersink_ctx, &picref, 0); if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) break; if (ret < 0) goto end; if (picref) { #if ENABLE_YUVFILE int y_size = picref->video->w * picref->video->h; fwrite(picref->data[0], 1, y_size, fp_yuv); //Y fwrite(picref->data[1], 1, y_size / 4, fp_yuv); //U fwrite(picref->data[2], 1, y_size / 4, fp_yuv); //V #endif #if ENABLE_SDL SDL_LockYUVOverlay(bmp); int y_size = picref->video->w * picref->video->h; memcpy(bmp->pixels[0], picref->data[0], y_size); //Y memcpy(bmp->pixels[2], picref->data[1], y_size / 4); //U memcpy(bmp->pixels[1], picref->data[2], y_size / 4); //V bmp->pitches[0] = picref->linesize[0]; bmp->pitches[2] = picref->linesize[1]; bmp->pitches[1] = picref->linesize[2]; SDL_UnlockYUVOverlay(bmp); rect.x = 0; rect.y = 0; rect.w = picref->video->w; rect.h = picref->video->h; SDL_DisplayYUVOverlay(bmp, &rect); //Delay 40ms SDL_Delay(40); #endif avfilter_unref_bufferp(&picref); } } } } av_free_packet(&packet); } #if ENABLE_YUVFILE fclose(fp_yuv); #endif end: avfilter_graph_free(&filter_graph); if (pCodecCtx) avcodec_close(pCodecCtx); avformat_close_input(&pFormatCtx); if (ret < 0 && ret != AVERROR_EOF) { char buf[1024]; av_strerror(ret, buf, sizeof(buf)); printf("Error occurred: %s\n", buf); return -1; } return 0; }