Example #1
0
uint32_t av_q2intfloat(AVRational q) {
    int64_t n;
    int shift;
    int sign = 0;

    if (q.den < 0) {
        q.den *= -1;
        q.num *= -1;
    }
    if (q.num < 0) {
        q.num *= -1;
        sign = 1;
    }

    if (!q.num && !q.den) return 0xFFC00000;
    if (!q.num) return 0;
    if (!q.den) return 0x7F800000 | (q.num & 0x80000000);

    shift = 23 + av_log2(q.den) - av_log2(q.num);
    if (shift >= 0) n = av_rescale(q.num, 1LL<<shift, q.den);
    else            n = av_rescale(q.num, 1, ((int64_t)q.den) << -shift);

    shift -= n >= (1<<24);
    shift += n <  (1<<23);

    if (shift >= 0) n = av_rescale(q.num, 1LL<<shift, q.den);
    else            n = av_rescale(q.num, 1, ((int64_t)q.den) << -shift);

    av_assert1(n <  (1<<24));
    av_assert1(n >= (1<<23));

    return sign<<31 | (150-shift)<<23 | (n - (1<<23));
}
Example #2
0
static int init_cook_mlt(COOKContext *q) {
    int j;
    float alpha;

    /* Allocate the buffers, could be replaced with a static [512]
       array if needed. */
    q->mlt_size = q->samples_per_channel;
    q->mlt_window = av_malloc(sizeof(float)*q->mlt_size);
    q->mlt_precos = av_malloc(sizeof(float)*q->mlt_size/2);
    q->mlt_presin = av_malloc(sizeof(float)*q->mlt_size/2);
    q->mlt_postcos = av_malloc(sizeof(float)*q->mlt_size/2);

    /* Initialize the MLT window: simple sine window. */
    alpha = M_PI / (2.0 * (float)q->mlt_size);
    for(j=0 ; j<q->mlt_size ; j++) {
        q->mlt_window[j] = sin((j + 512.0/(float)q->mlt_size) * alpha);
    }

    /* pre/post twiddle factors */
    for (j=0 ; j<q->mlt_size/2 ; j++){
        q->mlt_precos[j] = cos( ((j+0.25)*M_PI)/q->mlt_size);
        q->mlt_presin[j] = sin( ((j+0.25)*M_PI)/q->mlt_size);
        q->mlt_postcos[j] = (float)sqrt(2.0/(float)q->mlt_size)*cos( ((float)j*M_PI) /q->mlt_size); //sqrt(2/MLT_size) = scalefactor
    }

    /* Initialize the FFT. */
    ff_fft_init(&q->fft_ctx, av_log2(q->mlt_size)-1, 0);
    av_log(NULL,AV_LOG_DEBUG,"FFT initialized, order = %d.\n",
           av_log2(q->samples_per_channel)-1);

    return (int)(q->mlt_window && q->mlt_precos && q->mlt_presin && q->mlt_postcos);
}
Example #3
0
/**
 * Round v to nearest power of two
 */
static int
make_powerof2(int v)
{
  int m;
  m = ((1 << (av_log2(v))) + (1 << (av_log2(v) + 1))) / 2;
  return 1 << (av_log2(v) + (v > m));
}
Example #4
0
File: ralf.c Project: kx/FFmpeg
static int decode_channel(RALFContext *ctx, GetBitContext *gb, int ch,
                          int length, int mode, int bits)
{
    int i, t;
    int code_params;
    VLCSet *set = ctx->sets + mode;
    VLC *code_vlc; int range, range2, add_bits;
    int *dst = ctx->channel_data[ch];

    ctx->filter_params = get_vlc2(gb, set->filter_params.table, 9, 2);
    ctx->filter_bits   = (ctx->filter_params - 2) >> 6;
    ctx->filter_length = ctx->filter_params - (ctx->filter_bits << 6) - 1;

    if (ctx->filter_params == FILTER_RAW) {
        for (i = 0; i < length; i++)
            dst[i] = get_bits(gb, bits);
        ctx->bias[ch] = 0;
        return 0;
    }

    ctx->bias[ch] = get_vlc2(gb, set->bias.table, 9, 2);
    ctx->bias[ch] = extend_code(gb, ctx->bias[ch], 127, 4);

    if (ctx->filter_params == FILTER_NONE) {
        memset(dst, 0, sizeof(*dst) * length);
        return 0;
    }

    if (ctx->filter_params > 1) {
        int cmode = 0, coeff = 0;
        VLC *vlc = set->filter_coeffs[ctx->filter_bits] + 5;

        add_bits = ctx->filter_bits;

        for (i = 0; i < ctx->filter_length; i++) {
            t = get_vlc2(gb, vlc[cmode].table, vlc[cmode].bits, 2);
            t = extend_code(gb, t, 21, add_bits);
            if (!cmode)
                coeff -= 12 << add_bits;
            coeff = t - coeff;
            ctx->filter[i] = coeff;

            cmode = coeff >> add_bits;
            if (cmode < 0) {
                cmode = -1 - av_log2(-cmode);
                if (cmode < -5)
                    cmode = -5;
            } else if (cmode > 0) {
                cmode =  1 + av_log2(cmode);
                if (cmode > 5)
                    cmode = 5;
            }
        }
    }
Example #5
0
/**
 * Init IMDCT and windowing tables
 */
static av_cold int init_mdct_win(TwinVQContext *tctx)
{
    int i, j, ret;
    const TwinVQModeTab *mtab = tctx->mtab;
    int size_s = mtab->size / mtab->fmode[TWINVQ_FT_SHORT].sub;
    int size_m = mtab->size / mtab->fmode[TWINVQ_FT_MEDIUM].sub;
    int channels = tctx->avctx->channels;
    float norm = channels == 1 ? 2.0 : 1.0;

    for (i = 0; i < 3; i++) {
        int bsize = tctx->mtab->size / tctx->mtab->fmode[i].sub;
        if ((ret = ff_mdct_init(&tctx->mdct_ctx[i], av_log2(bsize) + 1, 1,
                                -sqrt(norm / bsize) / (1 << 15))))
            return ret;
    }

    FF_ALLOC_OR_GOTO(tctx->avctx, tctx->tmp_buf,
                     mtab->size * sizeof(*tctx->tmp_buf), alloc_fail);

    FF_ALLOC_OR_GOTO(tctx->avctx, tctx->spectrum,
                     2 * mtab->size * channels * sizeof(*tctx->spectrum),
                     alloc_fail);
    FF_ALLOC_OR_GOTO(tctx->avctx, tctx->curr_frame,
                     2 * mtab->size * channels * sizeof(*tctx->curr_frame),
                     alloc_fail);
    FF_ALLOC_OR_GOTO(tctx->avctx, tctx->prev_frame,
                     2 * mtab->size * channels * sizeof(*tctx->prev_frame),
                     alloc_fail);

    for (i = 0; i < 3; i++) {
        int m       = 4 * mtab->size / mtab->fmode[i].sub;
        double freq = 2 * M_PI / m;
        FF_ALLOC_OR_GOTO(tctx->avctx, tctx->cos_tabs[i],
                         (m / 4) * sizeof(*tctx->cos_tabs[i]), alloc_fail);

        for (j = 0; j <= m / 8; j++)
            tctx->cos_tabs[i][j] = cos((2 * j + 1) * freq);
        for (j = 1; j < m / 8; j++)
            tctx->cos_tabs[i][m / 4 - j] = tctx->cos_tabs[i][j];
    }

    ff_init_ff_sine_windows(av_log2(size_m));
    ff_init_ff_sine_windows(av_log2(size_s / 2));
    ff_init_ff_sine_windows(av_log2(mtab->size));

    return 0;

alloc_fail:
    return AVERROR(ENOMEM);
}
static av_cold int mp_decode_init(AVCodecContext *avctx)
{
    MotionPixelsContext *mp = avctx->priv_data;
    int w4 = (avctx->width  + 3) & ~3;
    int h4 = (avctx->height + 3) & ~3;

    if(avctx->extradata_size < 2){
        av_log(avctx, AV_LOG_ERROR, "extradata too small\n");
        return AVERROR_INVALIDDATA;
    }

    motionpixels_tableinit();
    mp->avctx = avctx;
    ff_dsputil_init(&mp->dsp, avctx);
    mp->changes_map = av_mallocz(avctx->width * h4);
    mp->offset_bits_len = av_log2(avctx->width * avctx->height) + 1;
    mp->vpt = av_mallocz(avctx->height * sizeof(YuvPixel));
    mp->hpt = av_mallocz(h4 * w4 / 16 * sizeof(YuvPixel));
    if (!mp->changes_map || !mp->vpt || !mp->hpt) {
        av_freep(&mp->changes_map);
        av_freep(&mp->vpt);
        av_freep(&mp->hpt);
        return AVERROR(ENOMEM);
    }
    avctx->pix_fmt = AV_PIX_FMT_RGB555;
    avcodec_get_frame_defaults(&mp->frame);
    return 0;
}
Example #7
0
static int cbs_write_ue_golomb(CodedBitstreamContext *ctx, PutBitContext *pbc,
                               const char *name, uint32_t value,
                               uint32_t range_min, uint32_t range_max)
{
    int len;

    if (value < range_min || value > range_max) {
        av_log(ctx->log_ctx, AV_LOG_ERROR, "%s out of range: "
               "%"PRIu32", but must be in [%"PRIu32",%"PRIu32"].\n",
               name, value, range_min, range_max);
        return AVERROR_INVALIDDATA;
    }
    av_assert0(value != UINT32_MAX);

    len = av_log2(value + 1);
    if (put_bits_left(pbc) < 2 * len + 1)
        return AVERROR(ENOSPC);

    if (ctx->trace_enable) {
        char bits[65];
        int i;

        for (i = 0; i < len; i++)
            bits[i] = '0';
        bits[len] = '1';
        for (i = 0; i < len; i++)
            bits[len + i + 1] = (value + 1) >> (len - i - 1) & 1 ? '1' : '0';
        bits[len + len + 1] = 0;

        ff_cbs_trace_syntax_element(ctx, put_bits_count(pbc), name, bits, value);
    }
Example #8
0
static av_cold int mp_decode_init(AVCodecContext *avctx)
{
    MotionPixelsContext *mp = avctx->priv_data;
    int w4 = (avctx->width  + 3) & ~3;
    int h4 = (avctx->height + 3) & ~3;

    if(avctx->extradata_size < 2){
        av_log(avctx, AV_LOG_ERROR, "extradata too small\n");
        return AVERROR_INVALIDDATA;
    }

    motionpixels_tableinit();
    mp->avctx = avctx;
    ff_bswapdsp_init(&mp->bdsp);
    mp->changes_map = av_mallocz_array(avctx->width, h4);
    mp->offset_bits_len = av_log2(avctx->width * avctx->height) + 1;
    mp->vpt = av_mallocz_array(avctx->height, sizeof(YuvPixel));
    mp->hpt = av_mallocz_array(h4 / 4, w4 / 4 * sizeof(YuvPixel));
    if (!mp->changes_map || !mp->vpt || !mp->hpt) {
        av_freep(&mp->changes_map);
        av_freep(&mp->vpt);
        av_freep(&mp->hpt);
        return AVERROR(ENOMEM);
    }
    avctx->pix_fmt = AV_PIX_FMT_RGB555;

    mp->frame = av_frame_alloc();
    if (!mp->frame) {
        mp_decode_end(avctx);
        return AVERROR(ENOMEM);
    }

    return 0;
}
Example #9
0
void ff_dca_downmix_to_stereo_fixed(DCADSPContext *dcadsp, int32_t **samples,
                                    int *coeff_l, int nsamples, int ch_mask)
{
    int pos, spkr, max_spkr = av_log2(ch_mask);
    int *coeff_r = coeff_l + av_popcount(ch_mask);

    av_assert0(DCA_HAS_STEREO(ch_mask));

    // Scale left and right channels
    pos = (ch_mask & DCA_SPEAKER_MASK_C);
    dcadsp->dmix_scale(samples[DCA_SPEAKER_L], coeff_l[pos    ], nsamples);
    dcadsp->dmix_scale(samples[DCA_SPEAKER_R], coeff_r[pos + 1], nsamples);

    // Downmix remaining channels
    for (spkr = 0; spkr <= max_spkr; spkr++) {
        if (!(ch_mask & (1U << spkr)))
            continue;

        if (*coeff_l && spkr != DCA_SPEAKER_L)
            dcadsp->dmix_add(samples[DCA_SPEAKER_L], samples[spkr],
                             *coeff_l, nsamples);

        if (*coeff_r && spkr != DCA_SPEAKER_R)
            dcadsp->dmix_add(samples[DCA_SPEAKER_R], samples[spkr],
                             *coeff_r, nsamples);

        coeff_l++;
        coeff_r++;
    }
}
Example #10
0
int bstr_parse_utf8_code_length(unsigned char b)
{
    if (b < 128)
        return 1;
    int bytes = 7 - av_log2(b ^ 255);
    return (bytes >= 2 && bytes <= 4) ? bytes : -1;
}
Example #11
0
void ff_dca_downmix_to_stereo_float(AVFloatDSPContext *fdsp, float **samples,
                                    int *coeff_l, int nsamples, int ch_mask)
{
    int pos, spkr, max_spkr = av_log2(ch_mask);
    int *coeff_r = coeff_l + av_popcount(ch_mask);
    const float scale = 1.0f / (1 << 15);

    av_assert0(DCA_HAS_STEREO(ch_mask));

    // Scale left and right channels
    pos = (ch_mask & DCA_SPEAKER_MASK_C);
    fdsp->vector_fmul_scalar(samples[DCA_SPEAKER_L], samples[DCA_SPEAKER_L],
                             coeff_l[pos    ] * scale, nsamples);
    fdsp->vector_fmul_scalar(samples[DCA_SPEAKER_R], samples[DCA_SPEAKER_R],
                             coeff_r[pos + 1] * scale, nsamples);

    // Downmix remaining channels
    for (spkr = 0; spkr <= max_spkr; spkr++) {
        if (!(ch_mask & (1U << spkr)))
            continue;

        if (*coeff_l && spkr != DCA_SPEAKER_L)
            fdsp->vector_fmac_scalar(samples[DCA_SPEAKER_L], samples[spkr],
                                     *coeff_l * scale, nsamples);

        if (*coeff_r && spkr != DCA_SPEAKER_R)
            fdsp->vector_fmac_scalar(samples[DCA_SPEAKER_R], samples[spkr],
                                     *coeff_r * scale, nsamples);

        coeff_l++;
        coeff_r++;
    }
}
Example #12
0
static inline void put_symbol_inline(RangeCoder *c, uint8_t *state, int v, int is_signed){
    int i;

    if(v){
        const int a= FFABS(v);
        const int e= av_log2(a);
        put_rac(c, state+0, 0);
        if(e<=9){
            for(i=0; i<e; i++){
                put_rac(c, state+1+i, 1);  //1..10
            }
            put_rac(c, state+1+i, 0);

            for(i=e-1; i>=0; i--){
                put_rac(c, state+22+i, (a>>i)&1); //22..31
            }

            if(is_signed)
                put_rac(c, state+11 + e, v < 0); //11..21
        }else{
            for(i=0; i<e; i++){
                put_rac(c, state+1+FFMIN(i,9), 1);  //1..10
            }
            put_rac(c, state+1+9, 0);

            for(i=e-1; i>=0; i--){
                put_rac(c, state+22+FFMIN(i,9), (a>>i)&1); //22..31
            }

            if(is_signed)
                put_rac(c, state+11 + 10, v < 0); //11..21
        }
    }else{
Example #13
0
/**
 * Normalize the input samples to use the maximum available precision.
 * This assumes signed 16-bit input samples.
 *
 * @return exponent shift
 */
static int normalize_samples(AC3EncodeContext *s)
{
    int v = s->ac3dsp.ac3_max_msb_abs_int16(s->windowed_samples, AC3_WINDOW_SIZE);
    v = 14 - av_log2(v);
    if (v > 0)
        s->ac3dsp.ac3_lshift_int16(s->windowed_samples, AC3_WINDOW_SIZE, v);
    /* +6 to right-shift from 31-bit to 25-bit */
    return v + 6;
}
Example #14
0
static void encode_block(MpegEncContext *s, int16_t *block, int n)
{
    int mant, nbits, code, i, j;
    int component, dc, run, last_index, val;
    MJpegContext *m = s->mjpeg_ctx;
    uint8_t *huff_size_ac;
    uint16_t *huff_code_ac;

    /* DC coef */
    component = (n <= 3 ? 0 : (n&1) + 1);
    dc = block[0]; /* overflow is impossible */
    val = dc - s->last_dc[component];
    if (n < 4) {
        ff_mjpeg_encode_dc(&s->pb, val, m->huff_size_dc_luminance, m->huff_code_dc_luminance);
        huff_size_ac = m->huff_size_ac_luminance;
        huff_code_ac = m->huff_code_ac_luminance;
    } else {
        ff_mjpeg_encode_dc(&s->pb, val, m->huff_size_dc_chrominance, m->huff_code_dc_chrominance);
        huff_size_ac = m->huff_size_ac_chrominance;
        huff_code_ac = m->huff_code_ac_chrominance;
    }
    s->last_dc[component] = dc;

    /* AC coefs */

    run = 0;
    last_index = s->block_last_index[n];
    for(i=1;i<=last_index;i++) {
        j = s->intra_scantable.permutated[i];
        val = block[j];
        if (val == 0) {
            run++;
        } else {
            while (run >= 16) {
                put_bits(&s->pb, huff_size_ac[0xf0], huff_code_ac[0xf0]);
                run -= 16;
            }
            mant = val;
            if (val < 0) {
                val = -val;
                mant--;
            }

            nbits= av_log2(val) + 1;
            code = (run << 4) | nbits;

            put_bits(&s->pb, huff_size_ac[code], huff_code_ac[code]);

            put_sbits(&s->pb, nbits, mant);
            run = 0;
        }
    }

    /* output EOB only if not already 64 values */
    if (last_index < 63 || run != 0)
        put_bits(&s->pb, huff_size_ac[0], huff_code_ac[0]);
}
Example #15
0
static int init_segment(AVFilterContext *ctx, AudioFIRSegment *seg,
                        int offset, int nb_partitions, int part_size)
{
    AudioFIRContext *s = ctx->priv;

    seg->rdft  = av_calloc(ctx->inputs[0]->channels, sizeof(*seg->rdft));
    seg->irdft = av_calloc(ctx->inputs[0]->channels, sizeof(*seg->irdft));
    if (!seg->rdft || !seg->irdft)
        return AVERROR(ENOMEM);

    seg->fft_length    = part_size * 2 + 1;
    seg->part_size     = part_size;
    seg->block_size    = FFALIGN(seg->fft_length, 32);
    seg->coeff_size    = FFALIGN(seg->part_size + 1, 32);
    seg->nb_partitions = nb_partitions;
    seg->input_size    = offset + s->min_part_size;
    seg->input_offset  = offset;

    seg->part_index    = av_calloc(ctx->inputs[0]->channels, sizeof(*seg->part_index));
    seg->output_offset = av_calloc(ctx->inputs[0]->channels, sizeof(*seg->output_offset));
    if (!seg->part_index || !seg->output_offset)
        return AVERROR(ENOMEM);

    for (int ch = 0; ch < ctx->inputs[0]->channels; ch++) {
        seg->rdft[ch]  = av_rdft_init(av_log2(2 * part_size), DFT_R2C);
        seg->irdft[ch] = av_rdft_init(av_log2(2 * part_size), IDFT_C2R);
        if (!seg->rdft[ch] || !seg->irdft[ch])
            return AVERROR(ENOMEM);
    }

    seg->sum    = ff_get_audio_buffer(ctx->inputs[0], seg->fft_length);
    seg->block  = ff_get_audio_buffer(ctx->inputs[0], seg->nb_partitions * seg->block_size);
    seg->buffer = ff_get_audio_buffer(ctx->inputs[0], seg->part_size);
    seg->coeff  = ff_get_audio_buffer(ctx->inputs[1], seg->nb_partitions * seg->coeff_size * 2);
    seg->input  = ff_get_audio_buffer(ctx->inputs[0], seg->input_size);
    seg->output = ff_get_audio_buffer(ctx->inputs[0], seg->part_size);
    if (!seg->buffer || !seg->sum || !seg->block || !seg->coeff || !seg->input || !seg->output)
        return AVERROR(ENOMEM);

    return 0;
}
Example #16
0
static int decode_subframe_lpc(FLACContext *s, int32_t* decoded, int pred_order)
{
    int sum, i, j;
    int64_t wsum;
    int coeff_prec, qlevel;
    int coeffs[pred_order];

    /* warm up samples */
    for (i = 0; i < pred_order; i++)
    {
        decoded[i] = get_sbits(&s->gb, s->curr_bps);
    }
    
    coeff_prec = get_bits(&s->gb, 4) + 1;
    if (coeff_prec == 16)
    {
        //fprintf(stderr,"invalid coeff precision\n");
        return -6;
    }
    qlevel = get_sbits(&s->gb, 5);
    if (qlevel < 0) 
    {
        //fprintf(stderr,"qlevel %d not supported, maybe buggy stream\n", qlevel);
        return -7;
    }

    for (i = 0; i < pred_order; i++)
    {
        coeffs[i] = get_sbits(&s->gb, coeff_prec);
    }
    
    if (decode_residuals(s, decoded, pred_order) < 0)
        return -8;

    if ((s->bps + coeff_prec + av_log2(pred_order)) <= 32) {
        #if defined(CPU_COLDFIRE)
        (void)sum;
        lpc_decode_emac(s->blocksize - pred_order, qlevel, pred_order,
                        decoded + pred_order, coeffs);
        #elif defined(CPU_ARM)
        (void)sum;
        lpc_decode_arm(s->blocksize - pred_order, qlevel, pred_order,
                       decoded + pred_order, coeffs);
        #else
        for (i = pred_order; i < s->blocksize; i++)
        {
            sum = 0;
            for (j = 0; j < pred_order; j++)
                sum += coeffs[j] * decoded[i-j-1];
            decoded[i] += sum >> qlevel;
        }
        #endif
    } else {
Example #17
0
File: cook.c Project: Arcen/FFmpeg
static av_cold int init_cook_mlt(COOKContext *q) {
    int j, ret;
    int mlt_size = q->samples_per_channel;

    if ((q->mlt_window = av_malloc(mlt_size * sizeof(*q->mlt_window))) == 0)
        return AVERROR(ENOMEM);

    /* Initialize the MLT window: simple sine window. */
    ff_sine_window_init(q->mlt_window, mlt_size);
    for(j=0 ; j<mlt_size ; j++)
        q->mlt_window[j] *= sqrt(2.0 / q->samples_per_channel);

    /* Initialize the MDCT. */
    if ((ret = ff_mdct_init(&q->mdct_ctx, av_log2(mlt_size)+1, 1, 1.0/32768.0))) {
        av_free(q->mlt_window);
        return ret;
    }
    av_log(q->avctx,AV_LOG_DEBUG,"MDCT initialized, order = %d.\n",
           av_log2(mlt_size)+1);

    return 0;
}
Example #18
0
static int init_cook_mlt(COOKContext *q) {
    int j;
    int mlt_size = q->samples_per_channel;

    if ((q->mlt_window = av_malloc(sizeof(float)*mlt_size)) == 0)
      return -1;

    /* Initialize the MLT window: simple sine window. */
    ff_sine_window_init(q->mlt_window, mlt_size);
    for(j=0 ; j<mlt_size ; j++)
        q->mlt_window[j] *= sqrt(2.0 / q->samples_per_channel);

    /* Initialize the MDCT. */
    if (ff_mdct_init(&q->mdct_ctx, av_log2(mlt_size)+1, 1)) {
      av_free(q->mlt_window);
      return -1;
    }
    av_log(NULL,AV_LOG_DEBUG,"MDCT initialized, order = %d.\n",
           av_log2(mlt_size)+1);

    return 0;
}
Example #19
0
static av_always_inline int get_tail(GetBitContext *gb, int k)
{
    int p, e, res;

    if (k < 1)
        return 0;
    p   = av_log2(k);
    e   = (1 << (p + 1)) - k - 1;
    res = p ? get_bits(gb, p) : 0;
    if (res >= e)
        res = (res << 1) - e + get_bits1(gb);
    return res;
}
Example #20
0
static av_cold int mp_decode_init(AVCodecContext *avctx)
{
    MotionPixelsContext *mp = avctx->priv_data;

    motionpixels_tableinit();
    mp->avctx = avctx;
    dsputil_init(&mp->dsp, avctx);
    mp->changes_map = av_mallocz(avctx->width * avctx->height);
    mp->offset_bits_len = av_log2(avctx->width * avctx->height) + 1;
    mp->vpt = av_mallocz(avctx->height * sizeof(YuvPixel));
    mp->hpt = av_mallocz(avctx->height * avctx->width / 16 * sizeof(YuvPixel));
    avctx->pix_fmt = PIX_FMT_RGB555;
    return 0;
}
Example #21
0
static void imdct_and_window(TwinVQContext *tctx, enum TwinVQFrameType ftype,
                             int wtype, float *in, float *prev, int ch)
{
    FFTContext *mdct = &tctx->mdct_ctx[ftype];
    const TwinVQModeTab *mtab = tctx->mtab;
    int bsize = mtab->size / mtab->fmode[ftype].sub;
    int size  = mtab->size;
    float *buf1 = tctx->tmp_buf;
    int j, first_wsize, wsize; // Window size
    float *out  = tctx->curr_frame + 2 * ch * mtab->size;
    float *out2 = out;
    float *prev_buf;
    int types_sizes[] = {
        mtab->size /  mtab->fmode[TWINVQ_FT_LONG].sub,
        mtab->size /  mtab->fmode[TWINVQ_FT_MEDIUM].sub,
        mtab->size / (mtab->fmode[TWINVQ_FT_SHORT].sub * 2),
    };

    wsize       = types_sizes[wtype_to_wsize[wtype]];
    first_wsize = wsize;
    prev_buf    = prev + (size - bsize) / 2;

    for (j = 0; j < mtab->fmode[ftype].sub; j++) {
        int sub_wtype = ftype == TWINVQ_FT_MEDIUM ? 8 : wtype;

        if (!j && wtype == 4)
            sub_wtype = 4;
        else if (j == mtab->fmode[ftype].sub - 1 && wtype == 7)
            sub_wtype = 7;

        wsize = types_sizes[wtype_to_wsize[sub_wtype]];

        mdct->imdct_half(mdct, buf1 + bsize * j, in + bsize * j);

        tctx->fdsp.vector_fmul_window(out2, prev_buf + (bsize - wsize) / 2,
                                      buf1 + bsize * j,
                                      ff_sine_windows[av_log2(wsize)],
                                      wsize / 2);
        out2 += wsize;

        memcpy(out2, buf1 + bsize * j + wsize / 2,
               (bsize - wsize / 2) * sizeof(float));

        out2 += ftype == TWINVQ_FT_MEDIUM ? (bsize - wsize) / 2 : bsize - wsize;

        prev_buf = buf1 + bsize * j + bsize / 2;
    }

    tctx->last_block_pos[ch] = (size + first_wsize) / 2;
}
Example #22
0
void ff_jpegls_init_state(JLSState *state){
    int i;

    state->twonear = state->near * 2 + 1;
    state->range = ((state->maxval + state->twonear - 1) / state->twonear) + 1;

    // QBPP = ceil(log2(RANGE))
    for(state->qbpp = 0; (1 << state->qbpp) < state->range; state->qbpp++);

    state->bpp = FFMAX(av_log2(state->maxval)+1, 2);
    state->limit = 2*(state->bpp + FFMAX(state->bpp, 8)) - state->qbpp;

    for(i = 0; i < 367; i++) {
        state->A[i] = FFMAX((state->range + 32) >> 6, 2);
        state->N[i] = 1;
    }

}
Example #23
0
static av_always_inline av_flatten void put_symbol_inline(RangeCoder *c,
                                                          uint8_t *state, int v,
                                                          int is_signed,
                                                          uint64_t rc_stat[256][2],
                                                          uint64_t rc_stat2[32][2])
{
    int i;

#define put_rac(C, S, B)                        \
    do {                                        \
        if (rc_stat) {                          \
            rc_stat[*(S)][B]++;                 \
            rc_stat2[(S) - state][B]++;         \
        }                                       \
        put_rac(C, S, B);                       \
    } while (0)

    if (v) {
        const int a = FFABS(v);
        const int e = av_log2(a);
        put_rac(c, state + 0, 0);
        if (e <= 9) {
            for (i = 0; i < e; i++)
                put_rac(c, state + 1 + i, 1);  // 1..10
            put_rac(c, state + 1 + i, 0);

            for (i = e - 1; i >= 0; i--)
                put_rac(c, state + 22 + i, (a >> i) & 1);  // 22..31

            if (is_signed)
                put_rac(c, state + 11 + e, v < 0);  // 11..21
        } else {
            for (i = 0; i < e; i++)
                put_rac(c, state + 1 + FFMIN(i, 9), 1);  // 1..10
            put_rac(c, state + 1 + 9, 0);

            for (i = e - 1; i >= 0; i--)
                put_rac(c, state + 22 + FFMIN(i, 9), (a >> i) & 1);  // 22..31

            if (is_signed)
                put_rac(c, state + 11 + 10, v < 0);  // 11..21
        }
    } else {
Example #24
0
static void ac3_extract_exponents_c(uint8_t *exp, int32_t *coef, int nb_coefs)
{
    int i;

    for (i = 0; i < nb_coefs; i++) {
        int e;
        int v = abs(coef[i]);
        if (v == 0)
            e = 24;
        else {
            e = 23 - av_log2(v);
            if (e >= 24) {
                e = 24;
                coef[i] = 0;
            }
            av_assert2(e >= 0);
        }
        exp[i] = e;
    }
}
Example #25
0
static void ac3_extract_exponents_c(uint8_t *exp, int32_t *coef, int nb_coefs)
{
    int i;

    for (i = 0; i < nb_coefs; i++) {
        int e;
        int v = abs(coef[i]);
        if (v == 0)
            e = 24;
        else {
            e = 23 - av_log2(v);
            if (e >= 24) {
                e = 24;
                coef[i] = 0;
            } else if (e < 0) {
                e = 0;
                coef[i] = av_clip(coef[i], -16777215, 16777215);
            }
        }
        exp[i] = e;
    }
}
Example #26
0
static av_cold int mp_decode_init(AVCodecContext *avctx)
{
    MotionPixelsContext *mp = avctx->priv_data;
    int w4 = (avctx->width  + 3) & ~3;
    int h4 = (avctx->height + 3) & ~3;

    motionpixels_tableinit();
    mp->avctx = avctx;
    ff_bswapdsp_init(&mp->bdsp);
    mp->changes_map = av_mallocz(avctx->width * h4);
    mp->offset_bits_len = av_log2(avctx->width * avctx->height) + 1;
    mp->vpt = av_mallocz(avctx->height * sizeof(YuvPixel));
    mp->hpt = av_mallocz(h4 * w4 / 16 * sizeof(YuvPixel));
    avctx->pix_fmt = AV_PIX_FMT_RGB555;

    mp->frame = av_frame_alloc();
    if (!mp->frame) {
        mp_decode_end(avctx);
        return AVERROR(ENOMEM);
    }

    return 0;
}
Example #27
0
/**
 * put 
 */
static inline void put_symbol(CABACContext *c, uint8_t *state, int v, int is_signed, int max_exp){
    int i;

    if(v){
        const int a= ABS(v);
        const int e= av_log2(a);

        put_cabac(c, state+0, 0);
        
        for(i=0; i<e; i++){
            put_cabac(c, state+1+i, 1);  //1..8
        }

        if(e<max_exp){
            put_cabac(c, state+1+i, 0);      //1..8

            for(i=e-1; i>=0; i--){
                put_cabac(c, state+16+e+i, (a>>i)&1); //17..29
            }
            if(is_signed)
                put_cabac(c, state+9 + e, v < 0); //9..16
        }
    }else{
Example #28
0
static int rv20_decode_picture_header(MpegEncContext *s)
{
    int seq, mb_pos, i;

#if 0
    GetBitContext gb= s->gb;
    for(i=0; i<64; i++){
        av_log(s->avctx, AV_LOG_DEBUG, "%d", get_bits1(&gb));
        if(i%4==3) av_log(s->avctx, AV_LOG_DEBUG, " ");
    }
    av_log(s->avctx, AV_LOG_DEBUG, "\n");
#endif
#if 0
    av_log(s->avctx, AV_LOG_DEBUG, "%3dx%03d/%02Xx%02X ", s->width, s->height, s->width/4, s->height/4);
    for(i=0; i<s->avctx->extradata_size; i++){
        av_log(s->avctx, AV_LOG_DEBUG, "%02X ", ((uint8_t*)s->avctx->extradata)[i]);
        if(i%4==3) av_log(s->avctx, AV_LOG_DEBUG, " ");
    }
    av_log(s->avctx, AV_LOG_DEBUG, "\n");
#endif

    if(s->avctx->sub_id == 0x30202002 || s->avctx->sub_id == 0x30203002){
        if (get_bits(&s->gb, 3)){
            av_log(s->avctx, AV_LOG_ERROR, "unknown triplet set\n");
            return -1;
        }
    }

    i= get_bits(&s->gb, 2);
    switch(i){
    case 0: s->pict_type= FF_I_TYPE; break;
    case 1: s->pict_type= FF_I_TYPE; break; //hmm ...
    case 2: s->pict_type= FF_P_TYPE; break;
    case 3: s->pict_type= FF_B_TYPE; break;
    default:
        av_log(s->avctx, AV_LOG_ERROR, "unknown frame type\n");
        return -1;
    }

    if(s->last_picture_ptr==NULL && s->pict_type==FF_B_TYPE){
        av_log(s->avctx, AV_LOG_ERROR, "early B pix\n");
        return -1;
    }

    if (get_bits1(&s->gb)){
        av_log(s->avctx, AV_LOG_ERROR, "unknown bit set\n");
        return -1;
    }

    s->qscale = get_bits(&s->gb, 5);
    if(s->qscale==0){
        av_log(s->avctx, AV_LOG_ERROR, "error, qscale:0\n");
        return -1;
    }
    if(s->avctx->sub_id == 0x30203002){
        if (get_bits1(&s->gb)){
            av_log(s->avctx, AV_LOG_ERROR, "unknown bit2 set\n");
            return -1;
        }
    }

    if(s->avctx->has_b_frames){
        int f, new_w, new_h;
        int v= s->avctx->extradata_size >= 4 ? 7&((uint8_t*)s->avctx->extradata)[1] : 0;

        if (get_bits1(&s->gb)){
            av_log(s->avctx, AV_LOG_ERROR, "unknown bit3 set\n");
//            return -1;
        }
        seq= get_bits(&s->gb, 13)<<2;

        f= get_bits(&s->gb, av_log2(v)+1);

        if(f){
            new_w= 4*((uint8_t*)s->avctx->extradata)[6+2*f];
            new_h= 4*((uint8_t*)s->avctx->extradata)[7+2*f];
        }else{
            new_w= s->width; //FIXME wrong we of course must save the original in the context
            new_h= s->height;
        }
        if(new_w != s->width || new_h != s->height){
            av_log(s->avctx, AV_LOG_DEBUG, "attempting to change resolution to %dx%d\n", new_w, new_h);
            if (avcodec_check_dimensions(s->avctx, new_h, new_w) < 0)
                return -1;
            MPV_common_end(s);
            s->width  = s->avctx->width = new_w;
            s->height = s->avctx->height= new_h;
            if (MPV_common_init(s) < 0)
                return -1;
        }

        if(s->avctx->debug & FF_DEBUG_PICT_INFO){
            av_log(s->avctx, AV_LOG_DEBUG, "F %d/%d\n", f, v);
        }
    }else{
        seq= get_bits(&s->gb, 8)*128;
    }

//     if(s->avctx->sub_id <= 0x20201002){ //0x20201002 definitely needs this
    mb_pos= ff_h263_decode_mba(s);
/*    }else{
        mb_pos= get_bits(&s->gb, av_log2(s->mb_num-1)+1);
        s->mb_x= mb_pos % s->mb_width;
        s->mb_y= mb_pos / s->mb_width;
    }*/
//av_log(s->avctx, AV_LOG_DEBUG, "%d\n", seq);
    seq |= s->time &~0x7FFF;
    if(seq - s->time >  0x4000) seq -= 0x8000;
    if(seq - s->time < -0x4000) seq += 0x8000;
    if(seq != s->time){
        if(s->pict_type!=FF_B_TYPE){
            s->time= seq;
            s->pp_time= s->time - s->last_non_b_time;
            s->last_non_b_time= s->time;
        }else{
            s->time= seq;
            s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
            if(s->pp_time <=s->pb_time || s->pp_time <= s->pp_time - s->pb_time || s->pp_time<=0){
                av_log(s->avctx, AV_LOG_DEBUG, "messed up order, possible from seeking? skipping current b frame\n");
                return FRAME_SKIPPED;
            }
            ff_mpeg4_init_direct_mv(s);
        }
    }
//    printf("%d %d %d %d %d\n", seq, (int)s->time, (int)s->last_non_b_time, s->pp_time, s->pb_time);
/*for(i=0; i<32; i++){
    av_log(s->avctx, AV_LOG_DEBUG, "%d", get_bits1(&s->gb));
}
av_log(s->avctx, AV_LOG_DEBUG, "\n");*/
    s->no_rounding= get_bits1(&s->gb);

    s->f_code = 1;
    s->unrestricted_mv = 1;
    s->h263_aic= s->pict_type == FF_I_TYPE;
//    s->alt_inter_vlc=1;
//    s->obmc=1;
//    s->umvplus=1;
    s->modified_quant=1;
    s->loop_filter=1;

    if(s->avctx->debug & FF_DEBUG_PICT_INFO){
            av_log(s->avctx, AV_LOG_INFO, "num:%5d x:%2d y:%2d type:%d qscale:%2d rnd:%d\n",
                   seq, s->mb_x, s->mb_y, s->pict_type, s->qscale, s->no_rounding);
    }

    assert(s->pict_type != FF_B_TYPE || !s->low_delay);

    return s->mb_width*s->mb_height - mb_pos;
}
Example #29
0
/**
 * Prepare filter for processing audio data of given format,
 * sample rate and number of channels.
 */
static int yae_reset(ATempoContext *atempo,
                     enum AVSampleFormat format,
                     int sample_rate,
                     int channels)
{
    const int sample_size = av_get_bytes_per_sample(format);
    uint32_t nlevels  = 0;
    uint32_t pot;
    int i;

    atempo->format   = format;
    atempo->channels = channels;
    atempo->stride   = sample_size * channels;

    // pick a segment window size:
    atempo->window = sample_rate / 24;

    // adjust window size to be a power-of-two integer:
    nlevels = av_log2(atempo->window);
    pot = 1 << nlevels;
    av_assert0(pot <= atempo->window);

    if (pot < atempo->window) {
        atempo->window = pot * 2;
        nlevels++;
    }

    // initialize audio fragment buffers:
    REALLOC_OR_FAIL(atempo->frag[0].data, atempo->window * atempo->stride);
    REALLOC_OR_FAIL(atempo->frag[1].data, atempo->window * atempo->stride);
    REALLOC_OR_FAIL(atempo->frag[0].xdat, atempo->window * sizeof(FFTComplex));
    REALLOC_OR_FAIL(atempo->frag[1].xdat, atempo->window * sizeof(FFTComplex));

    // initialize rDFT contexts:
    av_rdft_end(atempo->real_to_complex);
    atempo->real_to_complex = NULL;

    av_rdft_end(atempo->complex_to_real);
    atempo->complex_to_real = NULL;

    atempo->real_to_complex = av_rdft_init(nlevels + 1, DFT_R2C);
    if (!atempo->real_to_complex) {
        yae_release_buffers(atempo);
        return AVERROR(ENOMEM);
    }

    atempo->complex_to_real = av_rdft_init(nlevels + 1, IDFT_C2R);
    if (!atempo->complex_to_real) {
        yae_release_buffers(atempo);
        return AVERROR(ENOMEM);
    }

    REALLOC_OR_FAIL(atempo->correlation, atempo->window * sizeof(FFTComplex));

    atempo->ring = atempo->window * 3;
    REALLOC_OR_FAIL(atempo->buffer, atempo->ring * atempo->stride);

    // initialize the Hann window function:
    REALLOC_OR_FAIL(atempo->hann, atempo->window * sizeof(float));

    for (i = 0; i < atempo->window; i++) {
        double t = (double)i / (double)(atempo->window - 1);
        double h = 0.5 * (1.0 - cos(2.0 * M_PI * t));
        atempo->hann[i] = (float)h;
    }

    yae_clear(atempo);
    return 0;
}
Example #30
0
static av_cold int decode_init(AVCodecContext *avctx)
{
    BinkAudioContext *s = avctx->priv_data;
    int sample_rate = avctx->sample_rate;
    int sample_rate_half;
    int i;
    int frame_len_bits;

    /* determine frame length */
    if (avctx->sample_rate < 22050) {
        frame_len_bits = 9;
    } else if (avctx->sample_rate < 44100) {
        frame_len_bits = 10;
    } else {
        frame_len_bits = 11;
    }

    if (avctx->channels > MAX_CHANNELS) {
        av_log(avctx, AV_LOG_ERROR, "too many channels: %d\n", avctx->channels);
        return -1;
    }
    avctx->channel_layout = avctx->channels == 1 ? AV_CH_LAYOUT_MONO :
                            AV_CH_LAYOUT_STEREO;

    s->version_b = avctx->extradata && avctx->extradata[3] == 'b';

    if (avctx->codec->id == AV_CODEC_ID_BINKAUDIO_RDFT) {
        // audio is already interleaved for the RDFT format variant
        avctx->sample_fmt = AV_SAMPLE_FMT_FLT;
        sample_rate  *= avctx->channels;
        s->channels = 1;
        if (!s->version_b)
            frame_len_bits += av_log2(avctx->channels);
    } else {
        s->channels = avctx->channels;
        avctx->sample_fmt = AV_SAMPLE_FMT_FLTP;
    }

    s->frame_len     = 1 << frame_len_bits;
    s->overlap_len   = s->frame_len / 16;
    s->block_size    = (s->frame_len - s->overlap_len) * s->channels;
    sample_rate_half = (sample_rate + 1) / 2;
    if (avctx->codec->id == AV_CODEC_ID_BINKAUDIO_RDFT)
        s->root = 2.0 / (sqrt(s->frame_len) * 32768.0);
    else
        s->root = s->frame_len / (sqrt(s->frame_len) * 32768.0);
    for (i = 0; i < 96; i++) {
        /* constant is result of 0.066399999/log10(M_E) */
        quant_table[i] = expf(i * 0.15289164787221953823f) * s->root;
    }

    /* calculate number of bands */
    for (s->num_bands = 1; s->num_bands < 25; s->num_bands++)
        if (sample_rate_half <= ff_wma_critical_freqs[s->num_bands - 1])
            break;

    s->bands = av_malloc((s->num_bands + 1) * sizeof(*s->bands));
    if (!s->bands)
        return AVERROR(ENOMEM);

    /* populate bands data */
    s->bands[0] = 2;
    for (i = 1; i < s->num_bands; i++)
        s->bands[i] = (ff_wma_critical_freqs[i - 1] * s->frame_len / sample_rate_half) & ~1;
    s->bands[s->num_bands] = s->frame_len;

    s->first = 1;

    if (CONFIG_BINKAUDIO_RDFT_DECODER && avctx->codec->id == AV_CODEC_ID_BINKAUDIO_RDFT)
        ff_rdft_init(&s->trans.rdft, frame_len_bits, DFT_C2R);
    else if (CONFIG_BINKAUDIO_DCT_DECODER)
        ff_dct_init(&s->trans.dct, frame_len_bits, DCT_III);
    else
        return -1;

    return 0;
}