/** * Modify the bitrate curve from pass1 for one frame. */ static double get_qscale(MpegEncContext *s, RateControlEntry *rce, double rate_factor, int frame_num) { RateControlContext *rcc = &s->rc_context; AVCodecContext *a = s->avctx; const int pict_type = rce->new_pict_type; const double mb_num = s->mb_num; double q, bits; int i; double const_values[] = { M_PI, M_E, rce->i_tex_bits * rce->qscale, rce->p_tex_bits * rce->qscale, (rce->i_tex_bits + rce->p_tex_bits) * (double)rce->qscale, rce->mv_bits / mb_num, rce->pict_type == AV_PICTURE_TYPE_B ? (rce->f_code + rce->b_code) * 0.5 : rce->f_code, rce->i_count / mb_num, rce->mc_mb_var_sum / mb_num, rce->mb_var_sum / mb_num, rce->pict_type == AV_PICTURE_TYPE_I, rce->pict_type == AV_PICTURE_TYPE_P, rce->pict_type == AV_PICTURE_TYPE_B, rcc->qscale_sum[pict_type] / (double)rcc->frame_count[pict_type], a->qcompress, #if 0 rcc->last_qscale_for[AV_PICTURE_TYPE_I], rcc->last_qscale_for[AV_PICTURE_TYPE_P], rcc->last_qscale_for[AV_PICTURE_TYPE_B], rcc->next_non_b_qscale, #endif rcc->i_cplx_sum[AV_PICTURE_TYPE_I] / (double)rcc->frame_count[AV_PICTURE_TYPE_I], rcc->i_cplx_sum[AV_PICTURE_TYPE_P] / (double)rcc->frame_count[AV_PICTURE_TYPE_P], rcc->p_cplx_sum[AV_PICTURE_TYPE_P] / (double)rcc->frame_count[AV_PICTURE_TYPE_P], rcc->p_cplx_sum[AV_PICTURE_TYPE_B] / (double)rcc->frame_count[AV_PICTURE_TYPE_B], (rcc->i_cplx_sum[pict_type] + rcc->p_cplx_sum[pict_type]) / (double)rcc->frame_count[pict_type], 0 }; bits = av_expr_eval(rcc->rc_eq_eval, const_values, rce); if (isnan(bits)) { av_log(s->avctx, AV_LOG_ERROR, "Error evaluating rc_eq \"%s\"\n", s->rc_eq); return -1; } rcc->pass1_rc_eq_output_sum += bits; bits *= rate_factor; if (bits < 0.0) bits = 0.0; bits += 1.0; // avoid 1/0 issues /* user override */ for (i = 0; i < s->avctx->rc_override_count; i++) { RcOverride *rco = s->avctx->rc_override; if (rco[i].start_frame > frame_num) continue; if (rco[i].end_frame < frame_num) continue; if (rco[i].qscale) bits = qp2bits(rce, rco[i].qscale); // FIXME move at end to really force it? else bits *= rco[i].quality_factor; } q = bits2qp(rce, bits); /* I/B difference */ if (pict_type == AV_PICTURE_TYPE_I && s->avctx->i_quant_factor < 0.0) q = -q * s->avctx->i_quant_factor + s->avctx->i_quant_offset; else if (pict_type == AV_PICTURE_TYPE_B && s->avctx->b_quant_factor < 0.0) q = -q * s->avctx->b_quant_factor + s->avctx->b_quant_offset; if (q < 1) q = 1; return q; }
static int ff_filter_frame_framed(AVFilterLink *link, AVFrame *frame) { int (*filter_frame)(AVFilterLink *, AVFrame *); AVFilterContext *dstctx = link->dst; AVFilterPad *dst = link->dstpad; AVFrame *out; int ret; AVFilterCommand *cmd= link->dst->command_queue; int64_t pts; if (link->closed) { av_frame_free(&frame); return AVERROR_EOF; } if (!(filter_frame = dst->filter_frame)) filter_frame = default_filter_frame; /* copy the frame if needed */ if (dst->needs_writable && !av_frame_is_writable(frame)) { av_log(link->dst, AV_LOG_DEBUG, "Copying data in avfilter.\n"); /* Maybe use ff_copy_buffer_ref instead? */ switch (link->type) { case AVMEDIA_TYPE_VIDEO: out = ff_get_video_buffer(link, link->w, link->h); break; case AVMEDIA_TYPE_AUDIO: out = ff_get_audio_buffer(link, frame->nb_samples); break; default: return AVERROR(EINVAL); } if (!out) { av_frame_free(&frame); return AVERROR(ENOMEM); } av_frame_copy_props(out, frame); switch (link->type) { case AVMEDIA_TYPE_VIDEO: av_image_copy(out->data, out->linesize, (const uint8_t **)frame->data, frame->linesize, frame->format, frame->width, frame->height); break; case AVMEDIA_TYPE_AUDIO: av_samples_copy(out->extended_data, frame->extended_data, 0, 0, frame->nb_samples, av_get_channel_layout_nb_channels(frame->channel_layout), frame->format); break; default: return AVERROR(EINVAL); } av_frame_free(&frame); } else out = frame; while(cmd && cmd->time <= out->pts * av_q2d(link->time_base)){ av_log(link->dst, AV_LOG_DEBUG, "Processing command time:%f command:%s arg:%s\n", cmd->time, cmd->command, cmd->arg); avfilter_process_command(link->dst, cmd->command, cmd->arg, 0, 0, cmd->flags); ff_command_queue_pop(link->dst); cmd= link->dst->command_queue; } pts = out->pts; if (dstctx->enable_str) { int64_t pos = av_frame_get_pkt_pos(out); dstctx->var_values[VAR_N] = link->frame_count; dstctx->var_values[VAR_T] = pts == AV_NOPTS_VALUE ? NAN : pts * av_q2d(link->time_base); dstctx->var_values[VAR_POS] = pos == -1 ? NAN : pos; dstctx->is_disabled = !av_expr_eval(dstctx->enable, dstctx->var_values, NULL); if (dstctx->is_disabled && (dstctx->filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC)) filter_frame = default_filter_frame; } ret = filter_frame(link, out); link->frame_count++; link->frame_requested = 0; ff_update_link_current_pts(link, pts); return ret; }
int rotate_filter_frame(RotContext *rot, AVFrame *in,int frame_count_out, AVFrame* &out) { int angle_int, s, c, plane; double res; rot->var_values[ROTATE_VAR_N] = frame_count_out; rot->var_values[ROTATE_VAR_T] = 0;//TS2T(in->pts, inlink->time_base); rot->angle = res = av_expr_eval(rot->angle_expr, rot->var_values, rot); av_log(NULL, AV_LOG_DEBUG, "n:%f time:%f angle:%f/PI\n", rot->var_values[ROTATE_VAR_N], rot->var_values[ROTATE_VAR_T], rot->angle/M_PI); qDebug()<<"rotate_filter_frame n:"<<rot->var_values[ROTATE_VAR_N] <<" time:"<< rot->var_values[ROTATE_VAR_T] <<" angle(/PI): "<<(rot->angle/M_PI); angle_int = res * FIXP * 16; s = int_sin(angle_int); c = int_sin(angle_int + INT_PI/2); /* fill background */ if (rot->fillcolor_enable) ff_fill_rectangle(&rot->draw, &rot->color, out->data, out->linesize, 0, 0, out->width, out->height); for (plane = 0; plane < rot->nb_planes; plane++) { int hsub = plane == 1 || plane == 2 ? rot->hsub : 0; int vsub = plane == 1 || plane == 2 ? rot->vsub : 0; const int outw = AV_CEIL_RSHIFT(out->width, hsub); const int outh = AV_CEIL_RSHIFT(out->height, vsub); #if 0 ThreadData td = { .in = in, .out = out, .inw = AV_CEIL_RSHIFT(in->width, hsub), .inh = AV_CEIL_RSHIFT(in->height, vsub), .outh = outh, .outw = outw, .xi = -(outw-1) * c / 2, .yi = (outw-1) * s / 2, .xprime = -(outh-1) * s / 2, .yprime = -(outh-1) * c / 2, .plane = plane, .c = c, .s = s }; #endif ThreadData td; td.in = in; td.out = out; td.inw = AV_CEIL_RSHIFT(in->width, hsub); td.inh = AV_CEIL_RSHIFT(in->height, vsub); td.outh = outh; td.outw = outw; td.xi = -(outw-1) * c / 2; td.yi = (outw-1) * s / 2; td.xprime = -(outh-1) * s / 2; td.yprime = -(outh-1) * c / 2; td.plane = plane; td.c = c; td.s = s; filter_slice(rot, &td, 0, 1); } return 0; } int rotate_config_props(RotContext *rot, AVFrame *in) { const AVPixFmtDescriptor *pixdesc = av_pix_fmt_desc_get((AVPixelFormat)in->format); int ret; double res; char *expr; qDebug()<<"rotate_config_props foramt: "<<in->format; ff_draw_init(&rot->draw, (AVPixelFormat)in->format, 0); ff_draw_color(&rot->draw, &rot->color, rot->fillcolor); rot->hsub = pixdesc->log2_chroma_w; rot->vsub = pixdesc->log2_chroma_h; if (pixdesc->comp[0].depth == 8) rot->interpolate_bilinear = interpolate_bilinear8; else rot->interpolate_bilinear = interpolate_bilinear16; rot->var_values[ROTATE_VAR_IN_W] = rot->var_values[ROTATE_VAR_IW] = in->width; rot->var_values[ROTATE_VAR_IN_H] = rot->var_values[ROTATE_VAR_IH] = in->height; rot->var_values[ROTATE_VAR_HSUB] = 1<<rot->hsub; rot->var_values[ROTATE_VAR_VSUB] = 1<<rot->vsub; rot->var_values[ROTATE_VAR_N] = NAN; rot->var_values[ROTATE_VAR_T] = NAN; rot->var_values[ROTATE_VAR_OUT_W] = rot->var_values[ROTATE_VAR_OW] = NAN; rot->var_values[ROTATE_VAR_OUT_H] = rot->var_values[ROTATE_VAR_OH] = NAN; av_expr_free(rot->angle_expr); rot->angle_expr = NULL; if ((ret = av_expr_parse(&rot->angle_expr, expr = rot->angle_expr_str, var_names, func1_names, func1, NULL, NULL, 0, NULL)) < 0) { av_log(NULL, AV_LOG_ERROR, "Error occurred parsing angle expression '%s'\n", rot->angle_expr_str); return ret; } #define SET_SIZE_EXPR(name, opt_name) do { \ ret = av_expr_parse_and_eval(&res, expr = rot->name##_expr_str, \ var_names, rot->var_values, \ func1_names, func1, NULL, NULL, rot, 0, NULL); \ if (ret < 0 || isnan(res) || isinf(res) || res <= 0) { \ av_log(NULL, AV_LOG_ERROR, \ "Error parsing or evaluating expression for option %s: " \ "invalid expression '%s' or non-positive or indefinite value %f\n", \ opt_name, expr, res); \ return ret; \ } \ } while (0) /* evaluate width and height */ av_expr_parse_and_eval(&res, expr = rot->outw_expr_str, var_names, rot->var_values, func1_names, func1, NULL, NULL, rot, 0, NULL); rot->var_values[ROTATE_VAR_OUT_W] = rot->var_values[ROTATE_VAR_OW] = res; rot->outw = res + 0.5; SET_SIZE_EXPR(outh, "out_h"); rot->var_values[ROTATE_VAR_OUT_H] = rot->var_values[ROTATE_VAR_OH] = res; rot->outh = res + 0.5; /* evaluate the width again, as it may depend on the evaluated output height */ SET_SIZE_EXPR(outw, "out_w"); rot->var_values[ROTATE_VAR_OUT_W] = rot->var_values[ROTATE_VAR_OW] = res; rot->outw = res + 0.5; /* compute number of planes */ rot->nb_planes = av_pix_fmt_count_planes((AVPixelFormat)in->format); ////out->width = rot->outw; ////outlink->h = rot->outh; return 0; }
static int modplug_read_packet(AVFormatContext *s, AVPacket *pkt) { ModPlugContext *modplug = s->priv_data; if (modplug->video_stream) { modplug->video_switch ^= 1; // one video packet for one audio packet if (modplug->video_switch) { double var_values[VAR_VARS_NB]; var_values[VAR_W ] = modplug->w; var_values[VAR_H ] = modplug->h; var_values[VAR_TIME ] = modplug->packet_count * modplug->ts_per_packet; var_values[VAR_SPEED ] = ModPlug_GetCurrentSpeed (modplug->f); var_values[VAR_TEMPO ] = ModPlug_GetCurrentTempo (modplug->f); var_values[VAR_ORDER ] = ModPlug_GetCurrentOrder (modplug->f); var_values[VAR_PATTERN] = ModPlug_GetCurrentPattern(modplug->f); var_values[VAR_ROW ] = ModPlug_GetCurrentRow (modplug->f); if (av_new_packet(pkt, modplug->fsize) < 0) return AVERROR(ENOMEM); pkt->stream_index = 1; memset(pkt->data, 0, modplug->fsize); if (modplug->print_textinfo) { char intbuf[32]; PRINT_INFO(0, "speed", VAR_SPEED); PRINT_INFO(1, "tempo", VAR_TEMPO); PRINT_INFO(2, "order", VAR_ORDER); PRINT_INFO(3, "pattern", VAR_PATTERN); PRINT_INFO(4, "row", VAR_ROW); PRINT_INFO(5, "ts", VAR_TIME); } if (modplug->expr) { int x, y; for (y = 0; y < modplug->h; y++) { for (x = 0; x < modplug->w; x++) { double color; var_values[VAR_X] = x; var_values[VAR_Y] = y; color = av_expr_eval(modplug->expr, var_values, NULL); pkt->data[y*modplug->linesize + x*3 + 2] |= av_clip((int)color, 0, 0xf)<<4; } } } pkt->pts = pkt->dts = var_values[VAR_TIME]; pkt->flags |= AV_PKT_FLAG_KEY; return 0; } } if (av_new_packet(pkt, AUDIO_PKT_SIZE) < 0) return AVERROR(ENOMEM); if (modplug->video_stream) pkt->pts = pkt->dts = modplug->packet_count++ * modplug->ts_per_packet; pkt->size = ModPlug_Read(modplug->f, pkt->data, AUDIO_PKT_SIZE); if (pkt->size <= 0) { av_packet_unref(pkt); return pkt->size == 0 ? AVERROR_EOF : AVERROR(EIO); } return 0; }
static void select_frame(AVFilterContext *ctx, AVFrame *frame) { SelectContext *select = ctx->priv; AVFilterLink *inlink = ctx->inputs[0]; double res; if (isnan(select->var_values[VAR_START_PTS])) select->var_values[VAR_START_PTS] = TS2D(frame->pts); if (isnan(select->var_values[VAR_START_T])) select->var_values[VAR_START_T] = TS2D(frame->pts) * av_q2d(inlink->time_base); select->var_values[VAR_N ] = inlink->frame_count; select->var_values[VAR_PTS] = TS2D(frame->pts); select->var_values[VAR_T ] = TS2D(frame->pts) * av_q2d(inlink->time_base); select->var_values[VAR_POS] = av_frame_get_pkt_pos(frame) == -1 ? NAN : av_frame_get_pkt_pos(frame); select->var_values[VAR_KEY] = frame->key_frame; switch (inlink->type) { case AVMEDIA_TYPE_AUDIO: select->var_values[VAR_SAMPLES_N] = frame->nb_samples; break; case AVMEDIA_TYPE_VIDEO: select->var_values[VAR_INTERLACE_TYPE] = !frame->interlaced_frame ? INTERLACE_TYPE_P : frame->top_field_first ? INTERLACE_TYPE_T : INTERLACE_TYPE_B; select->var_values[VAR_PICT_TYPE] = frame->pict_type; if (select->do_scene_detect) { char buf[32]; select->var_values[VAR_SCENE] = get_scene_score(ctx, frame); // TODO: document metadata snprintf(buf, sizeof(buf), "%f", select->var_values[VAR_SCENE]); av_dict_set(avpriv_frame_get_metadatap(frame), "lavfi.scene_score", buf, 0); } break; } select->select = res = av_expr_eval(select->expr, select->var_values, NULL); av_log(inlink->dst, AV_LOG_DEBUG, "n:%f pts:%f t:%f key:%d", select->var_values[VAR_N], select->var_values[VAR_PTS], select->var_values[VAR_T], frame->key_frame); switch (inlink->type) { case AVMEDIA_TYPE_VIDEO: av_log(inlink->dst, AV_LOG_DEBUG, " interlace_type:%c pict_type:%c scene:%f", (!frame->interlaced_frame) ? 'P' : frame->top_field_first ? 'T' : 'B', av_get_picture_type_char(frame->pict_type), select->var_values[VAR_SCENE]); break; case AVMEDIA_TYPE_AUDIO: av_log(inlink->dst, AV_LOG_DEBUG, " samples_n:%d consumed_samples_n:%f", frame->nb_samples, select->var_values[VAR_CONSUMED_SAMPLES_N]); break; } if (res == 0) { select->select_out = -1; /* drop */ } else if (isnan(res) || res < 0) { select->select_out = 0; /* first output */ } else { select->select_out = FFMIN(ceilf(res)-1, select->nb_outputs-1); /* other outputs */ } av_log(inlink->dst, AV_LOG_DEBUG, " -> select:%f select_out:%d\n", res, select->select_out); if (res) { select->var_values[VAR_PREV_SELECTED_N] = select->var_values[VAR_N]; select->var_values[VAR_PREV_SELECTED_PTS] = select->var_values[VAR_PTS]; select->var_values[VAR_PREV_SELECTED_T] = select->var_values[VAR_T]; select->var_values[VAR_SELECTED_N] += 1.0; if (inlink->type == AVMEDIA_TYPE_AUDIO) select->var_values[VAR_CONSUMED_SAMPLES_N] += frame->nb_samples; } select->var_values[VAR_PREV_PTS] = select->var_values[VAR_PTS]; select->var_values[VAR_PREV_T] = select->var_values[VAR_T]; }
static int output_single_frame(AVFilterContext *ctx, AVFrame *in, double *var_values, int i, double *zoom, double *dx, double *dy) { ZPContext *s = ctx->priv; AVFilterLink *outlink = ctx->outputs[0]; int64_t pts = s->frame_count; int k, x, y, w, h, ret = 0; uint8_t *input[4]; int px[4], py[4]; AVFrame *out; var_values[VAR_PX] = s->x; var_values[VAR_PY] = s->y; var_values[VAR_PZOOM] = s->prev_zoom; var_values[VAR_PDURATION] = s->prev_nb_frames; var_values[VAR_TIME] = pts * av_q2d(outlink->time_base); var_values[VAR_FRAME] = i; var_values[VAR_ON] = outlink->frame_count_in + 1; *zoom = av_expr_eval(s->zoom_expr, var_values, NULL); *zoom = av_clipd(*zoom, 1, 10); var_values[VAR_ZOOM] = *zoom; w = in->width * (1.0 / *zoom); h = in->height * (1.0 / *zoom); *dx = av_expr_eval(s->x_expr, var_values, NULL); x = *dx = av_clipd(*dx, 0, FFMAX(in->width - w, 0)); var_values[VAR_X] = *dx; x &= ~((1 << s->desc->log2_chroma_w) - 1); *dy = av_expr_eval(s->y_expr, var_values, NULL); y = *dy = av_clipd(*dy, 0, FFMAX(in->height - h, 0)); var_values[VAR_Y] = *dy; y &= ~((1 << s->desc->log2_chroma_h) - 1); out = ff_get_video_buffer(outlink, outlink->w, outlink->h); if (!out) { ret = AVERROR(ENOMEM); return ret; } px[1] = px[2] = AV_CEIL_RSHIFT(x, s->desc->log2_chroma_w); px[0] = px[3] = x; py[1] = py[2] = AV_CEIL_RSHIFT(y, s->desc->log2_chroma_h); py[0] = py[3] = y; s->sws = sws_alloc_context(); if (!s->sws) { ret = AVERROR(ENOMEM); goto error; } for (k = 0; in->data[k]; k++) input[k] = in->data[k] + py[k] * in->linesize[k] + px[k]; av_opt_set_int(s->sws, "srcw", w, 0); av_opt_set_int(s->sws, "srch", h, 0); av_opt_set_int(s->sws, "src_format", in->format, 0); av_opt_set_int(s->sws, "dstw", outlink->w, 0); av_opt_set_int(s->sws, "dsth", outlink->h, 0); av_opt_set_int(s->sws, "dst_format", outlink->format, 0); av_opt_set_int(s->sws, "sws_flags", SWS_BICUBIC, 0); if ((ret = sws_init_context(s->sws, NULL, NULL)) < 0) goto error; sws_scale(s->sws, (const uint8_t *const *)&input, in->linesize, 0, h, out->data, out->linesize); out->pts = pts; s->frame_count++; ret = ff_filter_frame(outlink, out); sws_freeContext(s->sws); s->sws = NULL; s->current_frame++; if (s->current_frame >= s->nb_frames) { if (*dx != -1) s->x = *dx; if (*dy != -1) s->y = *dy; if (*zoom != -1) s->prev_zoom = *zoom; s->prev_nb_frames = s->nb_frames; s->nb_frames = 0; s->current_frame = 0; av_frame_free(&s->in); s->finished = 1; } return ret; error: av_frame_free(&out); return ret; }