static int generate_codebook(RoqContext *enc, RoqTempdata *tempdata,
                             int *points, int inputCount, roq_cell *results,
                             int size, int cbsize)
{
    int i, j, k, ret = 0;
    int c_size = size*size/4;
    int *buf;
    int *codebook = av_malloc_array(6*c_size, cbsize*sizeof(int));
    int *closest_cb;

    if (!codebook)
        return AVERROR(ENOMEM);

    if (size == 4) {
        closest_cb = av_malloc_array(6*c_size, inputCount*sizeof(int));
        if (!closest_cb) {
            ret = AVERROR(ENOMEM);
            goto out;
        }
    } else
        closest_cb = tempdata->closest_cb2;

    ret = avpriv_init_elbg(points, 6 * c_size, inputCount, codebook,
                       cbsize, 1, closest_cb, &enc->randctx);
    if (ret < 0)
        goto out;
    ret = avpriv_do_elbg(points, 6 * c_size, inputCount, codebook,
                     cbsize, 1, closest_cb, &enc->randctx);
    if (ret < 0)
        goto out;

    buf = codebook;
    for (i=0; i<cbsize; i++)
        for (k=0; k<c_size; k++) {
            for(j=0; j<4; j++)
                results->y[j] = *buf++;

            results->u =    (*buf++ + CHROMA_BIAS/2)/CHROMA_BIAS;
            results->v =    (*buf++ + CHROMA_BIAS/2)/CHROMA_BIAS;
            results++;
        }
out:
    if (size == 4)
        av_free(closest_cb);
    av_free(codebook);
    return ret;
}
Ejemplo n.º 2
0
static int config_input(AVFilterLink *inlink)
{
    AVFilterContext *ctx = inlink->dst;
    ConvolutionContext *s = ctx->priv;
    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
    int p;

    s->depth = desc->comp[0].depth;

    s->planewidth[1] = s->planewidth[2] = AV_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
    s->planewidth[0] = s->planewidth[3] = inlink->w;
    s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
    s->planeheight[0] = s->planeheight[3] = inlink->h;

    s->nb_planes = av_pix_fmt_count_planes(inlink->format);
    s->nb_threads = ff_filter_get_nb_threads(ctx);
    s->bptrs = av_calloc(s->nb_threads, sizeof(*s->bptrs));
    if (!s->bptrs)
        return AVERROR(ENOMEM);

    s->bstride = s->planewidth[0] + 64;
    s->bpc = (s->depth + 7) / 8;
    s->buffer = av_malloc_array(7 * s->bstride * s->nb_threads, s->bpc);
    if (!s->buffer)
        return AVERROR(ENOMEM);

    for (p = 0; p < s->nb_threads; p++) {
        s->bptrs[p] = s->buffer + 7 * s->bstride * s->bpc * p;
    }

    if (!strcmp(ctx->filter->name, "convolution")) {
        if (s->depth > 8) {
            for (p = 0; p < s->nb_planes; p++) {
                if (s->size[p] == 3)
                    s->filter[p] = filter16_3x3;
                else if (s->size[p] == 5)
                    s->filter[p] = filter16_5x5;
                else if (s->size[p] == 7)
                    s->filter[p] = filter16_7x7;
            }
        }
    } else if (!strcmp(ctx->filter->name, "prewitt")) {
        if (s->depth > 8)
            for (p = 0; p < s->nb_planes; p++)
                s->filter[p] = filter16_prewitt;
    } else if (!strcmp(ctx->filter->name, "roberts")) {
        if (s->depth > 8)
            for (p = 0; p < s->nb_planes; p++)
                s->filter[p] = filter16_roberts;
    } else if (!strcmp(ctx->filter->name, "sobel")) {
        if (s->depth > 8)
            for (p = 0; p < s->nb_planes; p++)
                s->filter[p] = filter16_sobel;
    }

    return 0;
}
Ejemplo n.º 3
0
static int config_input(AVFilterLink *inlink)
{
    AVFilterContext *ctx = inlink->dst;
    SilenceRemoveContext *s = ctx->priv;

    s->window_size = (inlink->sample_rate / 50) * inlink->channels;
    s->window = av_malloc_array(s->window_size, sizeof(*s->window));
    if (!s->window)
        return AVERROR(ENOMEM);

    clear_rms(s);

    s->start_duration = av_rescale(s->start_duration, inlink->sample_rate,
                                   AV_TIME_BASE);
    s->stop_duration  = av_rescale(s->stop_duration, inlink->sample_rate,
                                   AV_TIME_BASE);

    s->start_holdoff = av_malloc_array(FFMAX(s->start_duration, 1),
                                       sizeof(*s->start_holdoff) *
                                       inlink->channels);
    if (!s->start_holdoff)
        return AVERROR(ENOMEM);

    s->start_holdoff_offset = 0;
    s->start_holdoff_end    = 0;
    s->start_found_periods  = 0;

    s->stop_holdoff = av_malloc_array(FFMAX(s->stop_duration, 1),
                                      sizeof(*s->stop_holdoff) *
                                      inlink->channels);
    if (!s->stop_holdoff)
        return AVERROR(ENOMEM);

    s->stop_holdoff_offset = 0;
    s->stop_holdoff_end    = 0;
    s->stop_found_periods  = 0;

    if (s->start_periods)
        s->mode = SILENCE_TRIM;
    else
        s->mode = SILENCE_COPY;

    return 0;
}
Ejemplo n.º 4
0
STDMETHODIMP CDecD3D11::FindVideoServiceConversion(AVCodecID codec, int profile, DXGI_FORMAT surface_format, GUID *input )
{
  AVD3D11VADeviceContext *pDeviceContext = (AVD3D11VADeviceContext *)((AVHWDeviceContext *)m_pDevCtx->data)->hwctx;
  HRESULT hr = S_OK;

  UINT nProfiles = pDeviceContext->video_device->GetVideoDecoderProfileCount();
  GUID *guid_list = (GUID *)av_malloc_array(nProfiles, sizeof(*guid_list));

  DbgLog((LOG_TRACE, 10, L"-> Enumerating supported D3D11 modes (count: %d)", nProfiles));
  for (UINT i = 0; i < nProfiles; i++)
  {
    hr = pDeviceContext->video_device->GetVideoDecoderProfile(i, &guid_list[i]);
    if (FAILED(hr)) {
      DbgLog((LOG_ERROR, 10, L"Error retrieving decoder profile"));
      av_free(guid_list);
      return hr;
    }

#ifdef DEBUG
    const dxva_mode_t *mode = get_dxva_mode_from_guid(&guid_list[i]);
    if (mode) {
      DbgLog((LOG_TRACE, 10, L"  -> %S", mode->name));
    }
    else {
      DbgLog((LOG_TRACE, 10, L"  -> Unknown GUID (%s)", WStringFromGUID(guid_list[i]).c_str()));
    }
#endif
  }

  /* Iterate over our priority list */
  for (unsigned i = 0; dxva_modes[i].name; i++) {
    const dxva_mode_t *mode = &dxva_modes[i];
    if (!check_dxva_mode_compatibility(mode, codec, profile))
      continue;

    BOOL supported = FALSE;
    for (UINT g = 0; !supported && g < nProfiles; g++) {
      supported = IsEqualGUID(*mode->guid, guid_list[g]);
    }
    if (!supported)
      continue;

    DbgLog((LOG_TRACE, 10, L"-> Trying to use '%S'", mode->name));
    hr = pDeviceContext->video_device->CheckVideoDecoderFormat(mode->guid, surface_format, &supported);
    if (SUCCEEDED(hr) && supported) {
      *input = *mode->guid;

      av_free(guid_list);
      return S_OK;
    }
  }

  av_free(guid_list);
  return E_FAIL;
}
Ejemplo n.º 5
0
av_cold int ff_psy_init(FFPsyContext *ctx, AVCodecContext *avctx, int num_lens,
                        const uint8_t **bands, const int* num_bands,
                        int num_groups, const uint8_t *group_map)
{
    int i, j, k = 0;

    ctx->avctx = avctx;
    ctx->ch        = av_mallocz_array(sizeof(ctx->ch[0]), avctx->channels * 2);
    ctx->group     = av_mallocz_array(sizeof(ctx->group[0]), num_groups);
    ctx->bands     = av_malloc_array (sizeof(ctx->bands[0]),      num_lens);
    ctx->num_bands = av_malloc_array (sizeof(ctx->num_bands[0]),  num_lens);
    ctx->cutoff    = avctx->cutoff;

    if (!ctx->ch || !ctx->group || !ctx->bands || !ctx->num_bands) {
        ff_psy_end(ctx);
        return AVERROR(ENOMEM);
    }

    memcpy(ctx->bands,     bands,     sizeof(ctx->bands[0])     *  num_lens);
    memcpy(ctx->num_bands, num_bands, sizeof(ctx->num_bands[0]) *  num_lens);

    /* assign channels to groups (with virtual channels for coupling) */
    for (i = 0; i < num_groups; i++) {
        /* NOTE: Add 1 to handle the AAC chan_config without modification.
         *       This has the side effect of allowing an array of 0s to map
         *       to one channel per group.
         */
        ctx->group[i].num_ch = group_map[i] + 1;
        for (j = 0; j < ctx->group[i].num_ch * 2; j++)
            ctx->group[i].ch[j]  = &ctx->ch[k++];
    }

    switch (ctx->avctx->codec_id) {
    case AV_CODEC_ID_AAC:
        ctx->model = &ff_aac_psy_model;
        break;
    }
    if (ctx->model->init)
        return ctx->model->init(ctx);
    return 0;
}
Ejemplo n.º 6
0
static int filter_units_make_type_list(const char *list_string,
                                       CodedBitstreamUnitType **type_list,
                                       int *nb_types)
{
    CodedBitstreamUnitType *list = NULL;
    int pass, count;

    for (pass = 1; pass <= 2; pass++) {
        long value, range_start, range_end;
        const char *str;
        char *value_end;

        count = 0;
        for (str = list_string; *str;) {
            value = strtol(str, &value_end, 0);
            if (str == value_end)
                goto invalid;
            str = (const char *)value_end;
            if (*str == '-') {
                ++str;
                range_start = value;
                range_end   = strtol(str, &value_end, 0);
                if (str == value_end)
                    goto invalid;

                for (value = range_start; value < range_end; value++) {
                    if (pass == 2)
                        list[count] = value;
                    ++count;
                }
            } else {
                if (pass == 2)
                    list[count] = value;
                ++count;
            }
            if (*str == '|')
                ++str;
        }
        if (pass == 1) {
            list = av_malloc_array(count, sizeof(*list));
            if (!list)
                return AVERROR(ENOMEM);
        }
    }

    *type_list = list;
    *nb_types  = count;
    return 0;

invalid:
    av_freep(&list);
    return AVERROR(EINVAL);
}
Ejemplo n.º 7
0
static int config_props(AVFilterLink *inlink)
{
    PixdescTestContext *priv = inlink->dst->priv;

    priv->pix_desc = av_pix_fmt_desc_get(inlink->format);

    av_freep(&priv->line);
    if (!(priv->line = av_malloc_array(sizeof(*priv->line), inlink->w)))
        return AVERROR(ENOMEM);

    return 0;
}
Ejemplo n.º 8
0
int swri_get_dither(SwrContext *s, void *dst, int len, unsigned seed, enum AVSampleFormat noise_fmt) {
    double scale = s->dither.noise_scale;
#define TMP_EXTRA 2
    double *tmp = av_malloc_array(len + TMP_EXTRA, sizeof(double));
    int i;

    if (!tmp)
        return AVERROR(ENOMEM);

    for(i=0; i<len + TMP_EXTRA; i++){
        double v;
        seed = seed* 1664525 + 1013904223;

        switch(s->dither.method){
            case SWR_DITHER_RECTANGULAR: v= ((double)seed) / UINT_MAX - 0.5; break;
            default:
                av_assert0(s->dither.method < SWR_DITHER_NB);
                v = ((double)seed) / UINT_MAX;
                seed = seed*1664525 + 1013904223;
                v-= ((double)seed) / UINT_MAX;
                break;
        }
        tmp[i] = v;
    }

    for(i=0; i<len; i++){
        double v;

        switch(s->dither.method){
            default:
                av_assert0(s->dither.method < SWR_DITHER_NB);
                v = tmp[i];
                break;
            case SWR_DITHER_TRIANGULAR_HIGHPASS :
                v = (- tmp[i] + 2*tmp[i+1] - tmp[i+2]) / sqrt(6);
                break;
        }

        v*= scale;

        switch(noise_fmt){
            case AV_SAMPLE_FMT_S16P: ((int16_t*)dst)[i] = v; break;
            case AV_SAMPLE_FMT_S32P: ((int32_t*)dst)[i] = v; break;
            case AV_SAMPLE_FMT_FLTP: ((float  *)dst)[i] = v; break;
            case AV_SAMPLE_FMT_DBLP: ((double *)dst)[i] = v; break;
            default: av_assert0(0);
        }
    }

    av_free(tmp);
    return 0;
}
Ejemplo n.º 9
0
int ff_ffv1_allocate_initial_states(FFV1Context *f)
{
    int i;

    for (i = 0; i < f->quant_table_count; i++) {
        f->initial_states[i] = av_malloc_array(f->context_count[i],
                                         sizeof(*f->initial_states[i]));
        if (!f->initial_states[i])
            return AVERROR(ENOMEM);
        memset(f->initial_states[i], 128,
               f->context_count[i] * sizeof(*f->initial_states[i]));
    }
    return 0;
}
Ejemplo n.º 10
0
PCA *ff_pca_init(int n){
    PCA *pca;
    if(n<=0)
        return NULL;

    pca= av_mallocz(sizeof(*pca));
    pca->n= n;
    pca->z = av_malloc_array(n, sizeof(*pca->z));
    pca->count=0;
    pca->covariance= av_calloc(n*n, sizeof(double));
    pca->mean= av_calloc(n, sizeof(double));

    return pca;
}
Ejemplo n.º 11
0
/**
 * init MDCT or IMDCT computation.
 */
av_cold int ff_mdct_init(FFTContext *s, int nbits, int inverse, double scale)
{
    int n, n4, i;
    double alpha, theta;
    int tstep;

    memset(s, 0, sizeof(*s));
    n = 1 << nbits;
    s->mdct_bits = nbits;
    s->mdct_size = n;
    n4 = n >> 2;
    s->mdct_permutation = FF_MDCT_PERM_NONE;

    if (ff_fft_init(s, s->mdct_bits - 2, inverse) < 0)
        goto fail;

    s->tcos = av_malloc_array(n/2, sizeof(FFTSample));
    if (!s->tcos)
        goto fail;

    switch (s->mdct_permutation) {
    case FF_MDCT_PERM_NONE:
        s->tsin = s->tcos + n4;
        tstep = 1;
        break;
    case FF_MDCT_PERM_INTERLEAVE:
        s->tsin = s->tcos + 1;
        tstep = 2;
        break;
    default:
        goto fail;
    }

    theta = 1.0 / 8.0 + (scale < 0 ? n4 : 0);
    scale = sqrt(fabs(scale));
    for(i=0;i<n4;i++) {
        alpha = 2 * M_PI * (i + theta) / n;
#if FFT_FIXED_32
        s->tcos[i*tstep] = lrint(-cos(alpha) * 2147483648.0);
        s->tsin[i*tstep] = lrint(-sin(alpha) * 2147483648.0);
#else
        s->tcos[i*tstep] = FIX15(-cos(alpha) * scale);
        s->tsin[i*tstep] = FIX15(-sin(alpha) * scale);
#endif
    }
    return 0;
 fail:
    ff_mdct_end(s);
    return -1;
}
Ejemplo n.º 12
0
/**
 * Initialize cel evaluators and set their source coordinates
 */
static void create_cel_evals(RoqContext *enc, RoqTempdata *tempData)
{
    int n=0, x, y, i;

    tempData->cel_evals = av_malloc_array(enc->width*enc->height/64, sizeof(CelEvaluation));

    /* Map to the ROQ quadtree order */
    for (y=0; y<enc->height; y+=16)
        for (x=0; x<enc->width; x+=16)
            for(i=0; i<4; i++) {
                tempData->cel_evals[n  ].sourceX = x + (i&1)*8;
                tempData->cel_evals[n++].sourceY = y + (i&2)*4;
            }
}
Ejemplo n.º 13
0
Archivo: cngenc.c Proyecto: BtbN/FFmpeg
static av_cold int cng_encode_init(AVCodecContext *avctx)
{
    CNGContext *p = avctx->priv_data;
    int ret;

    if (avctx->channels != 1) {
        av_log(avctx, AV_LOG_ERROR, "Only mono supported\n");
        return AVERROR(EINVAL);
    }

    avctx->frame_size = 640;
    p->order = 10;
    if ((ret = ff_lpc_init(&p->lpc, avctx->frame_size, p->order, FF_LPC_TYPE_LEVINSON)) < 0)
        return ret;
    p->samples32 = av_malloc_array(avctx->frame_size, sizeof(*p->samples32));
    p->ref_coef = av_malloc_array(p->order, sizeof(*p->ref_coef));
    if (!p->samples32 || !p->ref_coef) {
        cng_encode_close(avctx);
        return AVERROR(ENOMEM);
    }

    return 0;
}
Ejemplo n.º 14
0
static int config_output(AVFilterLink *outlink)
{
    AudioPhaserContext *p = outlink->src->priv;
    AVFilterLink *inlink = outlink->src->inputs[0];

    p->delay_buffer_length = p->delay * 0.001 * inlink->sample_rate + 0.5;
    p->delay_buffer = av_calloc(p->delay_buffer_length, sizeof(*p->delay_buffer) * inlink->channels);
    p->modulation_buffer_length = inlink->sample_rate / p->speed + 0.5;
    p->modulation_buffer = av_malloc_array(p->modulation_buffer_length, sizeof(*p->modulation_buffer));

    if (!p->modulation_buffer || !p->delay_buffer)
        return AVERROR(ENOMEM);

    ff_generate_wave_table(p->type, AV_SAMPLE_FMT_S32,
                           p->modulation_buffer, p->modulation_buffer_length,
                           1., p->delay_buffer_length, M_PI / 2.0);

    p->delay_pos = p->modulation_pos = 0;

    switch (inlink->format) {
    case AV_SAMPLE_FMT_DBL:
        p->phaser = phaser_dbl;
        break;
    case AV_SAMPLE_FMT_DBLP:
        p->phaser = phaser_dblp;
        break;
    case AV_SAMPLE_FMT_FLT:
        p->phaser = phaser_flt;
        break;
    case AV_SAMPLE_FMT_FLTP:
        p->phaser = phaser_fltp;
        break;
    case AV_SAMPLE_FMT_S16:
        p->phaser = phaser_s16;
        break;
    case AV_SAMPLE_FMT_S16P:
        p->phaser = phaser_s16p;
        break;
    case AV_SAMPLE_FMT_S32:
        p->phaser = phaser_s32;
        break;
    case AV_SAMPLE_FMT_S32P:
        p->phaser = phaser_s32p;
        break;
    default:
        av_assert0(0);
    }

    return 0;
}
Ejemplo n.º 15
0
static int config_input(AVFilterLink *inlink)
{
    VagueDenoiserContext *s = inlink->dst->priv;
    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
    int nsteps_width, nsteps_height, nsteps_max;

    s->depth = desc->comp[0].depth;
    s->nb_planes = desc->nb_components;

    s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
    s->planeheight[0] = s->planeheight[3] = inlink->h;
    s->planewidth[1]  = s->planewidth[2]  = AV_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
    s->planewidth[0]  = s->planewidth[3]  = inlink->w;

    s->block = av_malloc_array(inlink->w * inlink->h, sizeof(*s->block));
    s->in    = av_malloc_array(32 + FFMAX(inlink->w, inlink->h), sizeof(*s->in));
    s->out   = av_malloc_array(32 + FFMAX(inlink->w, inlink->h), sizeof(*s->out));
    s->tmp   = av_malloc_array(32 + FFMAX(inlink->w, inlink->h), sizeof(*s->tmp));

    if (!s->block || !s->in || !s->out || !s->tmp)
        return AVERROR(ENOMEM);

    s->threshold *= 1 << (s->depth - 8);
    s->peak = (1 << s->depth) - 1;

    nsteps_width  = ((s->planes & 2 || s->planes & 4) && s->nb_planes > 1) ? s->planewidth[1] : s->planewidth[0];
    nsteps_height = ((s->planes & 2 || s->planes & 4) && s->nb_planes > 1) ? s->planeheight[1] : s->planeheight[0];

    for (nsteps_max = 1; nsteps_max < 15; nsteps_max++) {
        if (pow(2, nsteps_max) >= nsteps_width || pow(2, nsteps_max) >= nsteps_height)
            break;
    }

    s->nsteps = FFMIN(s->nsteps, nsteps_max - 2);

    return 0;
}
Ejemplo n.º 16
0
int ff_alloc_entries(AVCodecContext *avctx, int count)
{
    int i;

    if (avctx->active_thread_type & FF_THREAD_SLICE)  {
        SliceThreadContext *p = avctx->internal->thread_ctx;

        if (p->entries) {
            av_assert0(p->thread_count == avctx->thread_count);
            av_freep(&p->entries);
        }

        p->thread_count  = avctx->thread_count;
        p->entries       = av_mallocz_array(count, sizeof(int));

        if (!p->progress_mutex) {
            p->progress_mutex = av_malloc_array(p->thread_count, sizeof(pthread_mutex_t));
            p->progress_cond  = av_malloc_array(p->thread_count, sizeof(pthread_cond_t));
        }

        if (!p->entries || !p->progress_mutex || !p->progress_cond) {
            av_freep(&p->entries);
            av_freep(&p->progress_mutex);
            av_freep(&p->progress_cond);
            return AVERROR(ENOMEM);
        }
        p->entries_count  = count;

        for (i = 0; i < p->thread_count; i++) {
            pthread_mutex_init(&p->progress_mutex[i], NULL);
            pthread_cond_init(&p->progress_cond[i], NULL);
        }
    }

    return 0;
}
Ejemplo n.º 17
0
static int cuda_frames_get_constraints(AVHWDeviceContext *ctx,
                                       const void *hwconfig,
                                       AVHWFramesConstraints *constraints)
{
    int i;

    constraints->valid_sw_formats = av_malloc_array(FF_ARRAY_ELEMS(supported_formats) + 1,
                                                    sizeof(*constraints->valid_sw_formats));
    if (!constraints->valid_sw_formats)
        return AVERROR(ENOMEM);

    for (i = 0; i < FF_ARRAY_ELEMS(supported_formats); i++)
        constraints->valid_sw_formats[i] = supported_formats[i];
    constraints->valid_sw_formats[FF_ARRAY_ELEMS(supported_formats)] = AV_PIX_FMT_NONE;

    constraints->valid_hw_formats = av_malloc_array(2, sizeof(*constraints->valid_hw_formats));
    if (!constraints->valid_hw_formats)
        return AVERROR(ENOMEM);

    constraints->valid_hw_formats[0] = AV_PIX_FMT_CUDA;
    constraints->valid_hw_formats[1] = AV_PIX_FMT_NONE;

    return 0;
}
Ejemplo n.º 18
0
AVParserState *ff_store_parser_state(AVFormatContext *s)
{
    int i;
    AVStream *st;
    AVParserStreamState *ss;
    AVParserState *state = av_malloc(sizeof(AVParserState));
    if (!state)
        return NULL;

    state->stream_states = av_malloc_array(s->nb_streams, sizeof(AVParserStreamState));
    if (!state->stream_states) {
        av_free(state);
        return NULL;
    }

    state->fpos = avio_tell(s->pb);

    // copy context structures
    state->packet_buffer                    = s->packet_buffer;
    state->parse_queue                      = s->parse_queue;
    state->raw_packet_buffer                = s->raw_packet_buffer;
    state->raw_packet_buffer_remaining_size = s->raw_packet_buffer_remaining_size;

    s->packet_buffer                        = NULL;
    s->parse_queue                          = NULL;
    s->raw_packet_buffer                    = NULL;
    s->raw_packet_buffer_remaining_size     = RAW_PACKET_BUFFER_SIZE;

    // copy stream structures
    state->nb_streams = s->nb_streams;
    for (i = 0; i < s->nb_streams; i++) {
        st = s->streams[i];
        ss = &state->stream_states[i];

        ss->parser        = st->parser;
        ss->last_IP_pts   = st->last_IP_pts;
        ss->cur_dts       = st->cur_dts;
        ss->probe_packets = st->probe_packets;

        st->parser        = NULL;
        st->last_IP_pts   = AV_NOPTS_VALUE;
        st->cur_dts       = AV_NOPTS_VALUE;
        st->probe_packets = MAX_PROBE_PACKETS;
    }

    return state;
}
Ejemplo n.º 19
0
static int dxva2_transfer_get_formats(AVHWFramesContext *ctx,
                                      enum AVHWFrameTransferDirection dir,
                                      enum AVPixelFormat **formats)
{
    enum AVPixelFormat *fmts;

    fmts = av_malloc_array(2, sizeof(*fmts));
    if (!fmts)
        return AVERROR(ENOMEM);

    fmts[0] = ctx->sw_format;
    fmts[1] = AV_PIX_FMT_NONE;

    *formats = fmts;

    return 0;
}
Ejemplo n.º 20
0
int av_packet_split_side_data(AVPacket *pkt){
    if (!pkt->side_data_elems && pkt->size >12 && AV_RB64(pkt->data + pkt->size - 8) == FF_MERGE_MARKER){
        int i;
        unsigned int size;
        uint8_t *p;

        p = pkt->data + pkt->size - 8 - 5;
        for (i=1; ; i++){
            size = AV_RB32(p);
            if (size>INT_MAX - 5 || p - pkt->data < size)
                return 0;
            if (p[4]&128)
                break;
            if (p - pkt->data < size + 5)
                return 0;
            p-= size+5;
        }

        if (i > AV_PKT_DATA_NB)
            return AVERROR(ERANGE);

        pkt->side_data = av_malloc_array(i, sizeof(*pkt->side_data));
        if (!pkt->side_data)
            return AVERROR(ENOMEM);

        p= pkt->data + pkt->size - 8 - 5;
        for (i=0; ; i++){
            size= AV_RB32(p);
            av_assert0(size<=INT_MAX - 5 && p - pkt->data >= size);
            pkt->side_data[i].data = av_mallocz(size + AV_INPUT_BUFFER_PADDING_SIZE);
            pkt->side_data[i].size = size;
            pkt->side_data[i].type = p[4]&127;
            if (!pkt->side_data[i].data)
                return AVERROR(ENOMEM);
            memcpy(pkt->side_data[i].data, p-size, size);
            pkt->size -= size + 5;
            if(p[4]&128)
                break;
            p-= size+5;
        }
        pkt->size -= 8;
        pkt->side_data_elems = i+1;
        return 1;
    }
    return 0;
}
Ejemplo n.º 21
0
static void fft_ref_init(int nbits, int inverse)
{
    int n, i;
    double c1, s1, alpha;

    n = 1 << nbits;
    exptab = av_malloc_array((n / 2), sizeof(*exptab));

    for (i = 0; i < (n/2); i++) {
        alpha = 2 * M_PI * (float)i / (float)n;
        c1 = cos(alpha);
        s1 = sin(alpha);
        if (!inverse)
            s1 = -s1;
        exptab[i].re = c1;
        exptab[i].im = s1;
    }
}
Ejemplo n.º 22
0
static int fft_ref_init(int nbits, int inverse)
{
    int i, n = 1 << nbits;

    exptab = av_malloc_array((n / 2), sizeof(*exptab));
    if (!exptab)
        return AVERROR(ENOMEM);

    for (i = 0; i < (n / 2); i++) {
        double alpha = 2 * M_PI * (float) i / (float) n;
        double c1 = cos(alpha), s1 = sin(alpha);
        if (!inverse)
            s1 = -s1;
        exptab[i].re = c1;
        exptab[i].im = s1;
    }
    return 0;
}
Ejemplo n.º 23
0
av_cold int ff_ffv1_init_slice_contexts(FFV1Context *f)
{
    int i;

    f->slice_count = f->num_h_slices * f->num_v_slices;
    av_assert0(f->slice_count > 0);

    for (i = 0; i < f->slice_count; i++) {
        int sx          = i % f->num_h_slices;
        int sy          = i / f->num_h_slices;
        int sxs         = f->avctx->width  *  sx      / f->num_h_slices;
        int sxe         = f->avctx->width  * (sx + 1) / f->num_h_slices;
        int sys         = f->avctx->height *  sy      / f->num_v_slices;
        int sye         = f->avctx->height * (sy + 1) / f->num_v_slices;
        FFV1Context *fs = av_mallocz(sizeof(*fs));

        if (!fs)
            goto memfail;

        f->slice_context[i] = fs;
        memcpy(fs, f, sizeof(*fs));
        memset(fs->rc_stat2, 0, sizeof(fs->rc_stat2));

        fs->slice_width  = sxe - sxs;
        fs->slice_height = sye - sys;
        fs->slice_x      = sxs;
        fs->slice_y      = sys;

        fs->sample_buffer = av_malloc_array((fs->width + 6), 3 * MAX_PLANES *
                                      sizeof(*fs->sample_buffer));
        if (!fs->sample_buffer) {
            av_freep(&f->slice_context[i]);
            goto memfail;
        }
    }
    return 0;

memfail:
    while(--i >= 0) {
        av_freep(&f->slice_context[i]->sample_buffer);
        av_freep(&f->slice_context[i]);
    }
    return AVERROR(ENOMEM);
}
Ejemplo n.º 24
0
/**
 * Initialize MDCT tables.
 *
 * @param s  AC-3 encoder private context
 * @return   0 on success, negative error code on failure
 */
av_cold int ff_ac3_float_mdct_init(AC3EncodeContext *s)
{
    float *window;
    int i, n, n2;

    n  = 1 << 9;
    n2 = n >> 1;

    window = av_malloc_array(n, sizeof(*window));
    if (!window) {
        av_log(s->avctx, AV_LOG_ERROR, "Cannot allocate memory.\n");
        return AVERROR(ENOMEM);
    }
    ff_kbd_window_init(window, 5.0, n2);
    for (i = 0; i < n2; i++)
        window[n-1-i] = window[i];
    s->mdct_window = window;

    return ff_mdct_init(&s->mdct, 9, 0, -2.0 / n);
}
Ejemplo n.º 25
0
static int config_input(AVFilterLink *inlink)
{
    AVFilterContext *ctx = inlink->dst;
    SwapRectContext *s = ctx->priv;

    if (!s->w  || !s->h  ||
        !s->x1 || !s->y1 ||
        !s->x2 || !s->y2)
        return AVERROR(EINVAL);

    s->desc = av_pix_fmt_desc_get(inlink->format);
    av_image_fill_max_pixsteps(s->pixsteps, NULL, s->desc);
    s->nb_planes = av_pix_fmt_count_planes(inlink->format);

    s->temp = av_malloc_array(inlink->w, s->pixsteps[0]);
    if (!s->temp)
        return AVERROR(ENOMEM);

    return 0;
}
Ejemplo n.º 26
0
static int config_input(AVFilterLink *inlink)
{
    AVFilterContext *ctx = inlink->dst;
    AudioSurroundContext *s = ctx->priv;
    int ch;

    s->rdft = av_calloc(inlink->channels, sizeof(*s->rdft));
    if (!s->rdft)
        return AVERROR(ENOMEM);

    for (ch = 0; ch < inlink->channels; ch++) {
        s->rdft[ch]  = av_rdft_init(ff_log2(s->buf_size), DFT_R2C);
        if (!s->rdft[ch])
            return AVERROR(ENOMEM);
    }
    s->nb_in_channels = inlink->channels;
    s->input_levels = av_malloc_array(s->nb_in_channels, sizeof(*s->input_levels));
    if (!s->input_levels)
        return AVERROR(ENOMEM);
    for (ch = 0;  ch < s->nb_in_channels; ch++)
        s->input_levels[ch] = s->level_in;
    ch = av_get_channel_layout_channel_index(inlink->channel_layout, AV_CH_FRONT_CENTER);
    if (ch >= 0)
        s->input_levels[ch] *= s->fc_in;
    ch = av_get_channel_layout_channel_index(inlink->channel_layout, AV_CH_LOW_FREQUENCY);
    if (ch >= 0)
        s->input_levels[ch] *= s->lfe_in;

    s->input = ff_get_audio_buffer(inlink, s->buf_size * 2);
    if (!s->input)
        return AVERROR(ENOMEM);

    s->fifo = av_audio_fifo_alloc(inlink->format, inlink->channels, s->buf_size);
    if (!s->fifo)
        return AVERROR(ENOMEM);

    s->lowcut = 1.f * s->lowcutf / (inlink->sample_rate * 0.5) * (s->buf_size / 2);
    s->highcut = 1.f * s->highcutf / (inlink->sample_rate * 0.5) * (s->buf_size / 2);

    return 0;
}
Ejemplo n.º 27
0
static av_cold int init_cook_mlt(COOKContext *q)
{
    int j, ret;
    int mlt_size = q->samples_per_channel;

    if ((q->mlt_window = av_malloc_array(mlt_size, sizeof(*q->mlt_window))) == 0)
        return AVERROR(ENOMEM);

    /* Initialize the MLT window: simple sine window. */
    ff_sine_window_init(q->mlt_window, mlt_size);
    for (j = 0; j < mlt_size; j++)
        q->mlt_window[j] *= sqrt(2.0 / q->samples_per_channel);

    /* Initialize the MDCT. */
    if ((ret = ff_mdct_init(&q->mdct_ctx, av_log2(mlt_size) + 1, 1, 1.0 / 32768.0))) {
        av_freep(&q->mlt_window);
        return ret;
    }
    av_log(q->avctx, AV_LOG_DEBUG, "MDCT initialized, order = %d.\n",
           av_log2(mlt_size) + 1);

    return 0;
}
Ejemplo n.º 28
0
static int vdpau_transfer_get_formats(AVHWFramesContext *ctx,
                                      enum AVHWFrameTransferDirection dir,
                                      enum AVPixelFormat **formats)
{
    VDPAUFramesContext *priv  = ctx->internal->priv;

    enum AVPixelFormat *fmts;

    if (priv->nb_pix_fmts == 1) {
        av_log(ctx, AV_LOG_ERROR,
               "No target formats are supported for this chroma type\n");
        return AVERROR(ENOSYS);
    }

    fmts = av_malloc_array(priv->nb_pix_fmts, sizeof(*fmts));
    if (!fmts)
        return AVERROR(ENOMEM);

    memcpy(fmts, priv->pix_fmts, sizeof(*fmts) * (priv->nb_pix_fmts));
    *formats = fmts;

    return 0;
}
Ejemplo n.º 29
0
static int config_input(AVFilterLink *inlink)
{
    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
    GBlurContext *s = inlink->dst->priv;

    s->depth = desc->comp[0].depth;
    s->planewidth[1] = s->planewidth[2] = AV_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
    s->planewidth[0] = s->planewidth[3] = inlink->w;
    s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
    s->planeheight[0] = s->planeheight[3] = inlink->h;

    s->nb_planes = av_pix_fmt_count_planes(inlink->format);

    s->buffer = av_malloc_array(inlink->w, inlink->h * sizeof(*s->buffer));
    if (!s->buffer)
        return AVERROR(ENOMEM);

    if (s->sigmaV < 0) {
        s->sigmaV = s->sigma;
    }

    return 0;
}
Ejemplo n.º 30
0
static int init_pass2(MpegEncContext *s)
{
    RateControlContext *rcc = &s->rc_context;
    AVCodecContext *a       = s->avctx;
    int i, toobig;
    double fps             = get_fps(s->avctx);
    double complexity[5]   = { 0 }; // approximate bits at quant=1
    uint64_t const_bits[5] = { 0 }; // quantizer independent bits
    uint64_t all_const_bits;
    uint64_t all_available_bits = (uint64_t)(s->bit_rate *
                                             (double)rcc->num_entries / fps);
    double rate_factor          = 0;
    double step;
    const int filter_size = (int)(a->qblur * 4) | 1;
    double expected_bits = 0; // init to silence gcc warning
    double *qscale, *blurred_qscale, qscale_sum;

    /* find complexity & const_bits & decide the pict_types */
    for (i = 0; i < rcc->num_entries; i++) {
        RateControlEntry *rce = &rcc->entry[i];

        rce->new_pict_type                = rce->pict_type;
        rcc->i_cplx_sum[rce->pict_type]  += rce->i_tex_bits * rce->qscale;
        rcc->p_cplx_sum[rce->pict_type]  += rce->p_tex_bits * rce->qscale;
        rcc->mv_bits_sum[rce->pict_type] += rce->mv_bits;
        rcc->frame_count[rce->pict_type]++;

        complexity[rce->new_pict_type] += (rce->i_tex_bits + rce->p_tex_bits) *
                                          (double)rce->qscale;
        const_bits[rce->new_pict_type] += rce->mv_bits + rce->misc_bits;
    }

    all_const_bits = const_bits[AV_PICTURE_TYPE_I] +
                     const_bits[AV_PICTURE_TYPE_P] +
                     const_bits[AV_PICTURE_TYPE_B];

    if (all_available_bits < all_const_bits) {
        av_log(s->avctx, AV_LOG_ERROR, "requested bitrate is too low\n");
        return -1;
    }

    qscale         = av_malloc_array(rcc->num_entries, sizeof(double));
    blurred_qscale = av_malloc_array(rcc->num_entries, sizeof(double));
    toobig = 0;

    for (step = 256 * 256; step > 0.0000001; step *= 0.5) {
        expected_bits = 0;
        rate_factor  += step;

        rcc->buffer_index = s->avctx->rc_buffer_size / 2;

        /* find qscale */
        for (i = 0; i < rcc->num_entries; i++) {
            RateControlEntry *rce = &rcc->entry[i];

            qscale[i] = get_qscale(s, &rcc->entry[i], rate_factor, i);
            rcc->last_qscale_for[rce->pict_type] = qscale[i];
        }
        assert(filter_size % 2 == 1);

        /* fixed I/B QP relative to P mode */
        for (i = FFMAX(0, rcc->num_entries - 300); i < rcc->num_entries; i++) {
            RateControlEntry *rce = &rcc->entry[i];

            qscale[i] = get_diff_limited_q(s, rce, qscale[i]);
        }

        for (i = rcc->num_entries - 1; i >= 0; i--) {
            RateControlEntry *rce = &rcc->entry[i];

            qscale[i] = get_diff_limited_q(s, rce, qscale[i]);
        }

        /* smooth curve */
        for (i = 0; i < rcc->num_entries; i++) {
            RateControlEntry *rce = &rcc->entry[i];
            const int pict_type   = rce->new_pict_type;
            int j;
            double q = 0.0, sum = 0.0;

            for (j = 0; j < filter_size; j++) {
                int index    = i + j - filter_size / 2;
                double d     = index - i;
                double coeff = a->qblur == 0 ? 1.0 : exp(-d * d / (a->qblur * a->qblur));

                if (index < 0 || index >= rcc->num_entries)
                    continue;
                if (pict_type != rcc->entry[index].new_pict_type)
                    continue;
                q   += qscale[index] * coeff;
                sum += coeff;
            }
            blurred_qscale[i] = q / sum;
        }

        /* find expected bits */
        for (i = 0; i < rcc->num_entries; i++) {
            RateControlEntry *rce = &rcc->entry[i];
            double bits;

            rce->new_qscale = modify_qscale(s, rce, blurred_qscale[i], i);

            bits  = qp2bits(rce, rce->new_qscale) + rce->mv_bits + rce->misc_bits;
            bits += 8 * ff_vbv_update(s, bits);

            rce->expected_bits = expected_bits;
            expected_bits     += bits;
        }

        av_dlog(s->avctx,
                "expected_bits: %f all_available_bits: %d rate_factor: %f\n",
                expected_bits, (int)all_available_bits, rate_factor);
        if (expected_bits > all_available_bits) {
            rate_factor -= step;
            ++toobig;
        }
    }
    av_free(qscale);
    av_free(blurred_qscale);

    /* check bitrate calculations and print info */
    qscale_sum = 0.0;
    for (i = 0; i < rcc->num_entries; i++) {
        av_dlog(s, "[lavc rc] entry[%d].new_qscale = %.3f  qp = %.3f\n",
                i,
                rcc->entry[i].new_qscale,
                rcc->entry[i].new_qscale / FF_QP2LAMBDA);
        qscale_sum += av_clip(rcc->entry[i].new_qscale / FF_QP2LAMBDA,
                              s->avctx->qmin, s->avctx->qmax);
    }
    assert(toobig <= 40);
    av_log(s->avctx, AV_LOG_DEBUG,
           "[lavc rc] requested bitrate: %d bps  expected bitrate: %d bps\n",
           s->bit_rate,
           (int)(expected_bits / ((double)all_available_bits / s->bit_rate)));
    av_log(s->avctx, AV_LOG_DEBUG,
           "[lavc rc] estimated target average qp: %.3f\n",
           (float)qscale_sum / rcc->num_entries);
    if (toobig == 0) {
        av_log(s->avctx, AV_LOG_INFO,
               "[lavc rc] Using all of requested bitrate is not "
               "necessary for this video with these parameters.\n");
    } else if (toobig == 40) {
        av_log(s->avctx, AV_LOG_ERROR,
               "[lavc rc] Error: bitrate too low for this video "
               "with these parameters.\n");
        return -1;
    } else if (fabs(expected_bits / all_available_bits - 1.0) > 0.01) {
        av_log(s->avctx, AV_LOG_ERROR,
               "[lavc rc] Error: 2pass curve failed to converge\n");
        return -1;
    }

    return 0;
}