static int cng_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr) { CNGContext *p = avctx->priv_data; int ret, i; double energy = 0; int qdbov; int16_t *samples = (int16_t*) frame->data[0]; if ((ret = ff_alloc_packet2(avctx, avpkt, 1 + p->order, 1 + p->order))) { av_log(avctx, AV_LOG_ERROR, "Error getting output packet\n"); return ret; } for (i = 0; i < frame->nb_samples; i++) { p->samples32[i] = samples[i]; energy += samples[i] * samples[i]; } energy /= frame->nb_samples; if (energy > 0) { double dbov = 10 * log10(energy / 1081109975); qdbov = av_clip_uintp2(-floor(dbov), 7); } else { qdbov = 127; } ff_lpc_calc_ref_coefs(&p->lpc, p->samples32, p->order, p->ref_coef); avpkt->data[0] = qdbov; for (i = 0; i < p->order; i++) avpkt->data[1 + i] = p->ref_coef[i] * 127 + 127; *got_packet_ptr = 1; av_assert1(avpkt->size == 1 + p->order); return 0; }
static void mp_decode_frame_helper(MotionPixelsContext *mp, GetBitContext *gb) { YuvPixel p; int y, y0; av_assert1(mp->changes_map[0]); for (y = 0; y < mp->avctx->height; ++y) { if (mp->changes_map[y * mp->avctx->width] != 0) { memset(mp->gradient_scale, 1, sizeof(mp->gradient_scale)); p = mp_get_yuv_from_rgb(mp, 0, y); } else { p.y += mp_gradient(mp, 0, mp_get_vlc(mp, gb)); p.y = av_clip_uintp2(p.y, 5); if ((y & 3) == 0) { p.v += mp_gradient(mp, 1, mp_get_vlc(mp, gb)); p.v = av_clip_intp2(p.v, 5); p.u += mp_gradient(mp, 2, mp_get_vlc(mp, gb)); p.u = av_clip_intp2(p.u, 5); } mp->vpt[y] = p; mp_set_rgb_from_yuv(mp, 0, y, &p); } } for (y0 = 0; y0 < 2; ++y0) for (y = y0; y < mp->avctx->height; y += 2) mp_decode_line(mp, gb, y); }
static void mp_decode_line(MotionPixelsContext *mp, GetBitContext *gb, int y) { YuvPixel p; const int y0 = y * mp->avctx->width; int w, i, x = 0; p = mp->vpt[y]; if (mp->changes_map[y0 + x] == 0) { memset(mp->gradient_scale, 1, sizeof(mp->gradient_scale)); ++x; } while (x < mp->avctx->width) { w = mp->changes_map[y0 + x]; if (w != 0) { if ((y & 3) == 0) { if (mp->changes_map[y0 + x + mp->avctx->width] < w || mp->changes_map[y0 + x + mp->avctx->width * 2] < w || mp->changes_map[y0 + x + mp->avctx->width * 3] < w) { for (i = (x + 3) & ~3; i < x + w; i += 4) { mp->hpt[((y / 4) * mp->avctx->width + i) / 4] = mp_get_yuv_from_rgb(mp, i, y); } } } x += w; memset(mp->gradient_scale, 1, sizeof(mp->gradient_scale)); p = mp_get_yuv_from_rgb(mp, x - 1, y); } else { p.y += mp_gradient(mp, 0, mp_get_vlc(mp, gb)); p.y = av_clip_uintp2(p.y, 5); if ((x & 3) == 0) { if ((y & 3) == 0) { p.v += mp_gradient(mp, 1, mp_get_vlc(mp, gb)); p.v = av_clip_intp2(p.v, 5); p.u += mp_gradient(mp, 2, mp_get_vlc(mp, gb)); p.u = av_clip_intp2(p.u, 5); mp->hpt[((y / 4) * mp->avctx->width + x) / 4] = p; } else { p.v = mp->hpt[((y / 4) * mp->avctx->width + x) / 4].v; p.u = mp->hpt[((y / 4) * mp->avctx->width + x) / 4].u; } } mp_set_rgb_from_yuv(mp, x, y, &p); ++x; } } }
/** * Calculate rate distortion cost for quantizing with given codebook * * @return quantization distortion */ static av_always_inline float quantize_and_encode_band_cost_template( struct AACEncContext *s, PutBitContext *pb, const float *in, const float *scaled, int size, int scale_idx, int cb, const float lambda, const float uplim, int *bits, int BT_ZERO, int BT_UNSIGNED, int BT_PAIR, int BT_ESC) { const int q_idx = POW_SF2_ZERO - scale_idx + SCALE_ONE_POS - SCALE_DIV_512; const float Q = ff_aac_pow2sf_tab [q_idx]; const float Q34 = ff_aac_pow34sf_tab[q_idx]; const float IQ = ff_aac_pow2sf_tab [POW_SF2_ZERO + scale_idx - SCALE_ONE_POS + SCALE_DIV_512]; const float CLIPPED_ESCAPE = 165140.0f*IQ; int i, j; float cost = 0; const int dim = BT_PAIR ? 2 : 4; int resbits = 0; const int range = aac_cb_range[cb]; const int maxval = aac_cb_maxval[cb]; int off; if (BT_ZERO) { for (i = 0; i < size; i++) cost += in[i]*in[i]; if (bits) *bits = 0; return cost * lambda; } if (!scaled) { abs_pow34_v(s->scoefs, in, size); scaled = s->scoefs; } quantize_bands(s->qcoefs, in, scaled, size, Q34, !BT_UNSIGNED, maxval); if (BT_UNSIGNED) { off = 0; } else { off = maxval; } for (i = 0; i < size; i += dim) { const float *vec; int *quants = s->qcoefs + i; int curidx = 0; int curbits; float rd = 0.0f; for (j = 0; j < dim; j++) { curidx *= range; curidx += quants[j] + off; } curbits = ff_aac_spectral_bits[cb-1][curidx]; vec = &ff_aac_codebook_vectors[cb-1][curidx*dim]; if (BT_UNSIGNED) { for (j = 0; j < dim; j++) { float t = fabsf(in[i+j]); float di; if (BT_ESC && vec[j] == 64.0f) { //FIXME: slow if (t >= CLIPPED_ESCAPE) { di = t - CLIPPED_ESCAPE; curbits += 21; } else { int c = av_clip_uintp2(quant(t, Q), 13); di = t - c*cbrtf(c)*IQ; curbits += av_log2(c)*2 - 4 + 1; } } else { di = t - vec[j]*IQ; } if (vec[j] != 0.0f) curbits++; rd += di*di; } } else { for (j = 0; j < dim; j++) { float di = in[i+j] - vec[j]*IQ; rd += di*di; } } cost += rd * lambda + curbits; resbits += curbits; if (cost >= uplim) return uplim; if (pb) { put_bits(pb, ff_aac_spectral_bits[cb-1][curidx], ff_aac_spectral_codes[cb-1][curidx]); if (BT_UNSIGNED) for (j = 0; j < dim; j++) if (ff_aac_codebook_vectors[cb-1][curidx*dim+j] != 0.0f) put_bits(pb, 1, in[i+j] < 0.0f); if (BT_ESC) { for (j = 0; j < 2; j++) { if (ff_aac_codebook_vectors[cb-1][curidx*2+j] == 64.0f) { int coef = av_clip_uintp2(quant(fabsf(in[i+j]), Q), 13); int len = av_log2(coef); put_bits(pb, len - 4 + 1, (1 << (len - 4 + 1)) - 2); put_bits(pb, len, coef & ((1 << len) - 1)); } } } } } if (bits) *bits = resbits; return cost; }
static int flac_write_header(struct AVFormatContext *s) { int ret; int padding = s->metadata_header_padding; AVCodecContext *codec = s->streams[0]->codec; FlacMuxerContext *c = s->priv_data; if (!c->write_header) return 0; if (s->nb_streams > 1) { av_log(s, AV_LOG_ERROR, "only one stream is supported\n"); return AVERROR(EINVAL); } if (codec->codec_id != AV_CODEC_ID_FLAC) { av_log(s, AV_LOG_ERROR, "unsupported codec\n"); return AVERROR(EINVAL); } if (padding < 0) padding = 8192; /* The FLAC specification states that 24 bits are used to represent the * size of a metadata block so we must clip this value to 2^24-1. */ padding = av_clip_uintp2(padding, 24); ret = ff_flac_write_header(s->pb, codec->extradata, codec->extradata_size, 0); if (ret) return ret; /* add the channel layout tag */ if (codec->channel_layout && !(codec->channel_layout & ~0x3ffffULL) && !ff_flac_is_native_layout(codec->channel_layout)) { AVDictionaryEntry *chmask = av_dict_get(s->metadata, "WAVEFORMATEXTENSIBLE_CHANNEL_MASK", NULL, 0); if (chmask) { av_log(s, AV_LOG_WARNING, "A WAVEFORMATEXTENSIBLE_CHANNEL_MASK is " "already present, this muxer will not overwrite it.\n"); } else { uint8_t buf[32]; snprintf(buf, sizeof(buf), "0x%"PRIx64, codec->channel_layout); av_dict_set(&s->metadata, "WAVEFORMATEXTENSIBLE_CHANNEL_MASK", buf, 0); } } ret = flac_write_block_comment(s->pb, &s->metadata, !padding, s->flags & AVFMT_FLAG_BITEXACT); if (ret) return ret; /* The command line flac encoder defaults to placing a seekpoint * every 10s. So one might add padding to allow that later * but there seems to be no simple way to get the duration here. * So just add the amount requested by the user. */ if (padding) flac_write_block_padding(s->pb, padding, 1); return ret; }
static int vaapi_encode_h264_init_sequence_params(AVCodecContext *avctx) { VAAPIEncodeContext *ctx = avctx->priv_data; VAEncSequenceParameterBufferH264 *vseq = ctx->codec_sequence_params; VAEncPictureParameterBufferH264 *vpic = ctx->codec_picture_params; VAAPIEncodeH264Context *priv = ctx->priv_data; VAAPIEncodeH264MiscSequenceParams *mseq = &priv->misc_sequence_params; int i; { vseq->seq_parameter_set_id = 0; vseq->level_idc = avctx->level; vseq->max_num_ref_frames = 2; vseq->picture_width_in_mbs = priv->mb_width; vseq->picture_height_in_mbs = priv->mb_height; vseq->seq_fields.bits.chroma_format_idc = 1; vseq->seq_fields.bits.frame_mbs_only_flag = 1; vseq->seq_fields.bits.direct_8x8_inference_flag = 1; vseq->seq_fields.bits.log2_max_frame_num_minus4 = 4; vseq->seq_fields.bits.pic_order_cnt_type = 0; if (ctx->input_width != ctx->aligned_width || ctx->input_height != ctx->aligned_height) { vseq->frame_cropping_flag = 1; vseq->frame_crop_left_offset = 0; vseq->frame_crop_right_offset = (ctx->aligned_width - ctx->input_width) / 2; vseq->frame_crop_top_offset = 0; vseq->frame_crop_bottom_offset = (ctx->aligned_height - ctx->input_height) / 2; } else { vseq->frame_cropping_flag = 0; } vseq->vui_parameters_present_flag = 1; if (avctx->sample_aspect_ratio.num != 0) { vseq->vui_fields.bits.aspect_ratio_info_present_flag = 1; // There is a large enum of these which we could support // individually rather than using the generic X/Y form? if (avctx->sample_aspect_ratio.num == avctx->sample_aspect_ratio.den) { vseq->aspect_ratio_idc = 1; } else { vseq->aspect_ratio_idc = 255; // Extended SAR. vseq->sar_width = avctx->sample_aspect_ratio.num; vseq->sar_height = avctx->sample_aspect_ratio.den; } } if (avctx->color_primaries != AVCOL_PRI_UNSPECIFIED || avctx->color_trc != AVCOL_TRC_UNSPECIFIED || avctx->colorspace != AVCOL_SPC_UNSPECIFIED) { mseq->video_signal_type_present_flag = 1; mseq->video_format = 5; // Unspecified. mseq->video_full_range_flag = 0; mseq->colour_description_present_flag = 1; // These enums are derived from the standard and hence // we can just use the values directly. mseq->colour_primaries = avctx->color_primaries; mseq->transfer_characteristics = avctx->color_trc; mseq->matrix_coefficients = avctx->colorspace; } vseq->bits_per_second = avctx->bit_rate; vseq->vui_fields.bits.timing_info_present_flag = 1; if (avctx->framerate.num > 0 && avctx->framerate.den > 0) { vseq->num_units_in_tick = avctx->framerate.num; vseq->time_scale = 2 * avctx->framerate.den; mseq->fixed_frame_rate_flag = 1; } else { vseq->num_units_in_tick = avctx->time_base.num; vseq->time_scale = 2 * avctx->time_base.den; mseq->fixed_frame_rate_flag = 0; } if (ctx->va_rc_mode == VA_RC_CBR) { priv->send_timing_sei = 1; mseq->nal_hrd_parameters_present_flag = 1; mseq->cpb_cnt_minus1 = 0; // Try to scale these to a sensible range so that the // golomb encode of the value is not overlong. mseq->bit_rate_scale = av_clip_uintp2(av_log2(avctx->bit_rate) - 15, 4); mseq->bit_rate_value_minus1[0] = (avctx->bit_rate >> mseq->bit_rate_scale) - 1; mseq->cpb_size_scale = av_clip_uintp2(av_log2(priv->hrd_params.hrd.buffer_size) - 15, 4); mseq->cpb_size_value_minus1[0] = (priv->hrd_params.hrd.buffer_size >> mseq->cpb_size_scale) - 1; // CBR mode isn't actually available here, despite naming. mseq->cbr_flag[0] = 0; mseq->initial_cpb_removal_delay_length_minus1 = 23; mseq->cpb_removal_delay_length_minus1 = 23; mseq->dpb_output_delay_length_minus1 = 7; mseq->time_offset_length = 0; // This calculation can easily overflow 32 bits. mseq->initial_cpb_removal_delay = 90000 * (uint64_t)priv->hrd_params.hrd.initial_buffer_fullness / priv->hrd_params.hrd.buffer_size; mseq->initial_cpb_removal_delay_offset = 0; } else {
static int vaapi_vp8_start_frame(AVCodecContext *avctx, av_unused const uint8_t *buffer, av_unused uint32_t size) { const VP8Context *s = avctx->priv_data; VAAPIDecodePicture *pic = s->framep[VP56_FRAME_CURRENT]->hwaccel_picture_private; VAPictureParameterBufferVP8 pp; VAProbabilityDataBufferVP8 prob; VAIQMatrixBufferVP8 quant; int err, i, j, k; pic->output_surface = vaapi_vp8_surface_id(s->framep[VP56_FRAME_CURRENT]); pp = (VAPictureParameterBufferVP8) { .frame_width = avctx->width, .frame_height = avctx->height, .last_ref_frame = vaapi_vp8_surface_id(s->framep[VP56_FRAME_PREVIOUS]), .golden_ref_frame = vaapi_vp8_surface_id(s->framep[VP56_FRAME_GOLDEN]), .alt_ref_frame = vaapi_vp8_surface_id(s->framep[VP56_FRAME_GOLDEN2]), .out_of_loop_frame = VA_INVALID_SURFACE, .pic_fields.bits = { .key_frame = !s->keyframe, .version = s->profile, .segmentation_enabled = s->segmentation.enabled, .update_mb_segmentation_map = s->segmentation.update_map, .update_segment_feature_data = s->segmentation.update_feature_data, .filter_type = s->filter.simple, .sharpness_level = s->filter.sharpness, .loop_filter_adj_enable = s->lf_delta.enabled, .mode_ref_lf_delta_update = s->lf_delta.update, .sign_bias_golden = s->sign_bias[VP56_FRAME_GOLDEN], .sign_bias_alternate = s->sign_bias[VP56_FRAME_GOLDEN2], .mb_no_coeff_skip = s->mbskip_enabled, .loop_filter_disable = s->filter.level == 0, }, .prob_skip_false = s->prob->mbskip, .prob_intra = s->prob->intra, .prob_last = s->prob->last, .prob_gf = s->prob->golden, }; for (i = 0; i < 3; i++) pp.mb_segment_tree_probs[i] = s->prob->segmentid[i]; for (i = 0; i < 4; i++) { if (s->segmentation.enabled) { pp.loop_filter_level[i] = s->segmentation.filter_level[i]; if (!s->segmentation.absolute_vals) pp.loop_filter_level[i] += s->filter.level; } else { pp.loop_filter_level[i] = s->filter.level; } pp.loop_filter_level[i] = av_clip_uintp2(pp.loop_filter_level[i], 6); } for (i = 0; i < 4; i++) { pp.loop_filter_deltas_ref_frame[i] = s->lf_delta.ref[i]; pp.loop_filter_deltas_mode[i] = s->lf_delta.mode[i + 4]; } if (s->keyframe) { static const uint8_t keyframe_y_mode_probs[4] = { 145, 156, 163, 128 }; static const uint8_t keyframe_uv_mode_probs[3] = { 142, 114, 183 }; memcpy(pp.y_mode_probs, keyframe_y_mode_probs, 4); memcpy(pp.uv_mode_probs, keyframe_uv_mode_probs, 3); } else { for (i = 0; i < 4; i++) pp.y_mode_probs[i] = s->prob->pred16x16[i]; for (i = 0; i < 3; i++) pp.uv_mode_probs[i] = s->prob->pred8x8c[i]; } for (i = 0; i < 2; i++) for (j = 0; j < 19; j++) pp.mv_probs[i][j] = s->prob->mvc[i][j]; pp.bool_coder_ctx.range = s->coder_state_at_header_end.range; pp.bool_coder_ctx.value = s->coder_state_at_header_end.value; pp.bool_coder_ctx.count = s->coder_state_at_header_end.bit_count; err = ff_vaapi_decode_make_param_buffer(avctx, pic, VAPictureParameterBufferType, &pp, sizeof(pp)); if (err < 0) goto fail; for (i = 0; i < 4; i++) { for (j = 0; j < 8; j++) { static const int coeff_bands_inverse[8] = { 0, 1, 2, 3, 5, 6, 4, 15 }; int coeff_pos = coeff_bands_inverse[j]; for (k = 0; k < 3; k++) { memcpy(prob.dct_coeff_probs[i][j][k], s->prob->token[i][coeff_pos][k], 11); } } } err = ff_vaapi_decode_make_param_buffer(avctx, pic, VAProbabilityBufferType, &prob, sizeof(prob)); if (err < 0) goto fail; for (i = 0; i < 4; i++) { int base_qi = s->segmentation.base_quant[i]; if (!s->segmentation.absolute_vals) base_qi += s->quant.yac_qi; quant.quantization_index[i][0] = av_clip_uintp2(base_qi, 7); quant.quantization_index[i][1] = av_clip_uintp2(base_qi + s->quant.ydc_delta, 7); quant.quantization_index[i][2] = av_clip_uintp2(base_qi + s->quant.y2dc_delta, 7); quant.quantization_index[i][3] = av_clip_uintp2(base_qi + s->quant.y2ac_delta, 7); quant.quantization_index[i][4] = av_clip_uintp2(base_qi + s->quant.uvdc_delta, 7); quant.quantization_index[i][5] = av_clip_uintp2(base_qi + s->quant.uvac_delta, 7); } err = ff_vaapi_decode_make_param_buffer(avctx, pic, VAIQMatrixBufferType, &quant, sizeof(quant)); if (err < 0) goto fail; return 0; fail: ff_vaapi_decode_cancel(avctx, pic); return err; }