av_cold int ff_psy_init(FFPsyContext *ctx, AVCodecContext *avctx, int num_lens, const uint8_t **bands, const int* num_bands, int num_groups, const uint8_t *group_map) { int i, j, k = 0; ctx->avctx = avctx; ctx->ch = av_mallocz_array(sizeof(ctx->ch[0]), avctx->channels * 2); ctx->group = av_mallocz_array(sizeof(ctx->group[0]), num_groups); ctx->bands = av_malloc_array (sizeof(ctx->bands[0]), num_lens); ctx->num_bands = av_malloc_array (sizeof(ctx->num_bands[0]), num_lens); memcpy(ctx->bands, bands, sizeof(ctx->bands[0]) * num_lens); memcpy(ctx->num_bands, num_bands, sizeof(ctx->num_bands[0]) * num_lens); /* assign channels to groups (with virtual channels for coupling) */ for (i = 0; i < num_groups; i++) { /* NOTE: Add 1 to handle the AAC chan_config without modification. * This has the side effect of allowing an array of 0s to map * to one channel per group. */ ctx->group[i].num_ch = group_map[i] + 1; for (j = 0; j < ctx->group[i].num_ch * 2; j++) ctx->group[i].ch[j] = &ctx->ch[k++]; } switch (ctx->avctx->codec_id) { case AV_CODEC_ID_AAC: ctx->model = &ff_aac_psy_model; break; } if (ctx->model->init) return ctx->model->init(ctx); return 0; }
static av_cold int cng_decode_init(AVCodecContext *avctx) { CNGContext *p = avctx->priv_data; avctx->sample_fmt = AV_SAMPLE_FMT_S16; avctx->channels = 1; avctx->sample_rate = 8000; p->order = 12; avctx->frame_size = 640; p->refl_coef = av_mallocz_array(p->order, sizeof(*p->refl_coef)); p->target_refl_coef = av_mallocz_array(p->order, sizeof(*p->target_refl_coef)); p->lpc_coef = av_mallocz_array(p->order, sizeof(*p->lpc_coef)); p->filter_out = av_mallocz_array(avctx->frame_size + p->order, sizeof(*p->filter_out)); p->excitation = av_mallocz_array(avctx->frame_size, sizeof(*p->excitation)); if (!p->refl_coef || !p->target_refl_coef || !p->lpc_coef || !p->filter_out || !p->excitation) { cng_decode_close(avctx); return AVERROR(ENOMEM); } av_lfg_init(&p->lfg, 0); return 0; }
static av_cold int mp_decode_init(AVCodecContext *avctx) { MotionPixelsContext *mp = avctx->priv_data; int w4 = (avctx->width + 3) & ~3; int h4 = (avctx->height + 3) & ~3; if(avctx->extradata_size < 2){ av_log(avctx, AV_LOG_ERROR, "extradata too small\n"); return AVERROR_INVALIDDATA; } motionpixels_tableinit(); mp->avctx = avctx; ff_bswapdsp_init(&mp->bdsp); mp->changes_map = av_mallocz_array(avctx->width, h4); mp->offset_bits_len = av_log2(avctx->width * avctx->height) + 1; mp->vpt = av_mallocz_array(avctx->height, sizeof(YuvPixel)); mp->hpt = av_mallocz_array(h4 / 4, w4 / 4 * sizeof(YuvPixel)); if (!mp->changes_map || !mp->vpt || !mp->hpt) { av_freep(&mp->changes_map); av_freep(&mp->vpt); av_freep(&mp->hpt); return AVERROR(ENOMEM); } avctx->pix_fmt = AV_PIX_FMT_RGB555; mp->frame = av_frame_alloc(); if (!mp->frame) { mp_decode_end(avctx); return AVERROR(ENOMEM); } return 0; }
static av_cold int roq_encode_init(AVCodecContext *avctx) { RoqContext *enc = avctx->priv_data; av_lfg_init(&enc->randctx, 1); enc->avctx = avctx; enc->framesSinceKeyframe = 0; if ((avctx->width & 0xf) || (avctx->height & 0xf)) { av_log(avctx, AV_LOG_ERROR, "Dimensions must be divisible by 16\n"); return AVERROR(EINVAL); } if (avctx->width > 65535 || avctx->height > 65535) { av_log(avctx, AV_LOG_ERROR, "Dimensions are max %d\n", enc->quake3_compat ? 32768 : 65535); return AVERROR(EINVAL); } if (((avctx->width)&(avctx->width-1))||((avctx->height)&(avctx->height-1))) av_log(avctx, AV_LOG_ERROR, "Warning: dimensions not power of two, this is not supported by quake\n"); enc->width = avctx->width; enc->height = avctx->height; enc->framesSinceKeyframe = 0; enc->first_frame = 1; enc->last_frame = av_frame_alloc(); enc->current_frame = av_frame_alloc(); avctx->coded_frame = av_frame_alloc(); if (!enc->last_frame || !enc->current_frame || !avctx->coded_frame) { roq_encode_end(avctx); return AVERROR(ENOMEM); } enc->tmpData = av_malloc(sizeof(RoqTempdata)); enc->this_motion4 = av_mallocz_array((enc->width*enc->height/16), sizeof(motion_vect)); enc->last_motion4 = av_malloc_array ((enc->width*enc->height/16), sizeof(motion_vect)); enc->this_motion8 = av_mallocz_array((enc->width*enc->height/64), sizeof(motion_vect)); enc->last_motion8 = av_malloc_array ((enc->width*enc->height/64), sizeof(motion_vect)); if (!enc->tmpData || !enc->this_motion4 || !enc->last_motion4 || !enc->this_motion8 || !enc->last_motion8) { roq_encode_end(avctx); return AVERROR(ENOMEM); } return 0; }
static int get_audio_buffer(AVFrame *frame, int align) { int channels; int planar = av_sample_fmt_is_planar(frame->format); int planes; int ret, i; if (!frame->channels) frame->channels = av_get_channel_layout_nb_channels(frame->channel_layout); channels = frame->channels; planes = planar ? channels : 1; CHECK_CHANNELS_CONSISTENCY(frame); if (!frame->linesize[0]) { ret = av_samples_get_buffer_size(&frame->linesize[0], channels, frame->nb_samples, frame->format, align); if (ret < 0) return ret; } if (planes > AV_NUM_DATA_POINTERS) { frame->extended_data = av_mallocz_array(planes, sizeof(*frame->extended_data)); frame->extended_buf = av_mallocz_array((planes - AV_NUM_DATA_POINTERS), sizeof(*frame->extended_buf)); if (!frame->extended_data || !frame->extended_buf) { av_freep(&frame->extended_data); av_freep(&frame->extended_buf); return AVERROR(ENOMEM); } frame->nb_extended_buf = planes - AV_NUM_DATA_POINTERS; } else frame->extended_data = frame->data; for (i = 0; i < FFMIN(planes, AV_NUM_DATA_POINTERS); i++) { frame->buf[i] = av_buffer_alloc(frame->linesize[0]); if (!frame->buf[i]) { av_frame_unref(frame); return AVERROR(ENOMEM); } frame->extended_data[i] = frame->data[i] = frame->buf[i]->data; } for (i = 0; i < planes - AV_NUM_DATA_POINTERS; i++) { frame->extended_buf[i] = av_buffer_alloc(frame->linesize[0]); if (!frame->extended_buf[i]) { av_frame_unref(frame); return AVERROR(ENOMEM); } frame->extended_data[i + AV_NUM_DATA_POINTERS] = frame->extended_buf[i]->data; } return 0; }
static int qsv_frames_derive_to(AVHWFramesContext *dst_ctx, AVHWFramesContext *src_ctx, int flags) { QSVFramesContext *s = dst_ctx->internal->priv; AVQSVFramesContext *dst_hwctx = dst_ctx->hwctx; int i; switch (src_ctx->device_ctx->type) { #if CONFIG_VAAPI case AV_HWDEVICE_TYPE_VAAPI: { AVVAAPIFramesContext *src_hwctx = src_ctx->hwctx; s->surfaces_internal = av_mallocz_array(src_hwctx->nb_surfaces, sizeof(*s->surfaces_internal)); if (!s->surfaces_internal) return AVERROR(ENOMEM); for (i = 0; i < src_hwctx->nb_surfaces; i++) { qsv_init_surface(dst_ctx, &s->surfaces_internal[i]); s->surfaces_internal[i].Data.MemId = src_hwctx->surface_ids + i; } dst_hwctx->nb_surfaces = src_hwctx->nb_surfaces; dst_hwctx->frame_type = MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET; } break; #endif #if CONFIG_DXVA2 case AV_HWDEVICE_TYPE_DXVA2: { AVDXVA2FramesContext *src_hwctx = src_ctx->hwctx; s->surfaces_internal = av_mallocz_array(src_hwctx->nb_surfaces, sizeof(*s->surfaces_internal)); if (!s->surfaces_internal) return AVERROR(ENOMEM); for (i = 0; i < src_hwctx->nb_surfaces; i++) { qsv_init_surface(dst_ctx, &s->surfaces_internal[i]); s->surfaces_internal[i].Data.MemId = (mfxMemId)src_hwctx->surfaces[i]; } dst_hwctx->nb_surfaces = src_hwctx->nb_surfaces; if (src_hwctx->surface_type == DXVA2_VideoProcessorRenderTarget) dst_hwctx->frame_type = MFX_MEMTYPE_VIDEO_MEMORY_PROCESSOR_TARGET; else dst_hwctx->frame_type = MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET; } break; #endif default: return AVERROR(ENOSYS); } dst_hwctx->surfaces = s->surfaces_internal; return 0; }
static int config_output(AVFilterLink *outlink) { AVFilterContext *ctx = outlink->src; MixContext *s = ctx->priv; int i; char buf[64]; s->planar = av_sample_fmt_is_planar(outlink->format); s->sample_rate = outlink->sample_rate; outlink->time_base = (AVRational){ 1, outlink->sample_rate }; s->next_pts = AV_NOPTS_VALUE; s->frame_list = av_mallocz(sizeof(*s->frame_list)); if (!s->frame_list) return AVERROR(ENOMEM); s->fifos = av_mallocz_array(s->nb_inputs, sizeof(*s->fifos)); if (!s->fifos) return AVERROR(ENOMEM); s->nb_channels = outlink->channels; for (i = 0; i < s->nb_inputs; i++) { s->fifos[i] = av_audio_fifo_alloc(outlink->format, s->nb_channels, 1024); if (!s->fifos[i]) return AVERROR(ENOMEM); } s->input_state = av_malloc(s->nb_inputs); if (!s->input_state) return AVERROR(ENOMEM); memset(s->input_state, INPUT_ON, s->nb_inputs); s->active_inputs = s->nb_inputs; s->input_scale = av_mallocz_array(s->nb_inputs, sizeof(*s->input_scale)); s->scale_norm = av_mallocz_array(s->nb_inputs, sizeof(*s->scale_norm)); if (!s->input_scale || !s->scale_norm) return AVERROR(ENOMEM); for (i = 0; i < s->nb_inputs; i++) s->scale_norm[i] = s->weight_sum / s->weights[i]; calculate_scales(s, 0); av_get_channel_layout_string(buf, sizeof(buf), -1, outlink->channel_layout); av_log(ctx, AV_LOG_VERBOSE, "inputs:%d fmt:%s srate:%d cl:%s\n", s->nb_inputs, av_get_sample_fmt_name(outlink->format), outlink->sample_rate, buf); return 0; }
static int alloc_slice(SwsSlice *s, enum AVPixelFormat fmt, int lumLines, int chrLines, int h_sub_sample, int v_sub_sample, int ring) { int i; int size[4] = { lumLines, chrLines, chrLines, lumLines }; s->h_chr_sub_sample = h_sub_sample; s->v_chr_sub_sample = v_sub_sample; s->fmt = fmt; s->is_ring = ring; s->should_free_lines = 0; for (i = 0; i < 4; ++i) { int n = size[i] * ( ring == 0 ? 1 : 3); s->plane[i].line = av_mallocz_array(sizeof(uint8_t*), n); if (!s->plane[i].line) return AVERROR(ENOMEM); s->plane[i].tmp = ring ? s->plane[i].line + size[i] * 2 : NULL; s->plane[i].available_lines = size[i]; s->plane[i].sliceY = 0; s->plane[i].sliceH = 0; } return 0; }
static int qsv_setup_mids(mfxFrameAllocResponse *resp, AVBufferRef *hw_frames_ref, AVBufferRef *mids_buf) { AVHWFramesContext *frames_ctx = (AVHWFramesContext*)hw_frames_ref->data; AVQSVFramesContext *frames_hwctx = frames_ctx->hwctx; QSVMid *mids = (QSVMid*)mids_buf->data; int nb_surfaces = frames_hwctx->nb_surfaces; int i; // the allocated size of the array is two larger than the number of // surfaces, we store the references to the frames context and the // QSVMid array there resp->mids = av_mallocz_array(nb_surfaces + 2, sizeof(*resp->mids)); if (!resp->mids) return AVERROR(ENOMEM); for (i = 0; i < nb_surfaces; i++) resp->mids[i] = &mids[i]; resp->NumFrameActual = nb_surfaces; resp->mids[resp->NumFrameActual] = (mfxMemId)av_buffer_ref(hw_frames_ref); if (!resp->mids[resp->NumFrameActual]) { av_freep(&resp->mids); return AVERROR(ENOMEM); } resp->mids[resp->NumFrameActual + 1] = av_buffer_ref(mids_buf); if (!resp->mids[resp->NumFrameActual + 1]) { av_buffer_unref((AVBufferRef**)&resp->mids[resp->NumFrameActual]); av_freep(&resp->mids); return AVERROR(ENOMEM); } return 0; }
static int ico_write_header(AVFormatContext *s) { IcoMuxContext *ico = s->priv_data; AVIOContext *pb = s->pb; int ret; int i; if (!pb->seekable) { av_log(s, AV_LOG_ERROR, "Output is not seekable\n"); return AVERROR(EINVAL); } ico->current_image = 0; ico->nb_images = s->nb_streams; avio_wl16(pb, 0); // reserved avio_wl16(pb, 1); // 1 == icon avio_skip(pb, 2); // skip the number of images for (i = 0; i < s->nb_streams; i++) { if (ret = ico_check_attributes(s, s->streams[i]->codec)) return ret; // Fill in later when writing trailer... avio_skip(pb, 16); } ico->images = av_mallocz_array(ico->nb_images, sizeof(IcoMuxContext)); if (!ico->images) return AVERROR(ENOMEM); avio_flush(pb); return 0; }
static int win32_open(const char *filename_utf8, int oflag, int pmode) { int fd; int num_chars; wchar_t *filename_w; /* convert UTF-8 to wide chars */ num_chars = MultiByteToWideChar(CP_UTF8, MB_ERR_INVALID_CHARS, filename_utf8, -1, NULL, 0); if (num_chars <= 0) goto fallback; filename_w = av_mallocz_array(num_chars, sizeof(wchar_t)); if (!filename_w) { errno = ENOMEM; return -1; } MultiByteToWideChar(CP_UTF8, 0, filename_utf8, -1, filename_w, num_chars); fd = _wsopen(filename_w, oflag, SH_DENYNO, pmode); av_freep(&filename_w); if (fd != -1 || (oflag & O_CREAT)) return fd; fallback: /* filename may be in CP_ACP */ return _sopen(filename_utf8, oflag, SH_DENYNO, pmode); }
av_cold struct FFPsyPreprocessContext* ff_psy_preprocess_init(AVCodecContext *avctx) { FFPsyPreprocessContext *ctx; int i; float cutoff_coeff = 0; ctx = av_mallocz(sizeof(FFPsyPreprocessContext)); if (!ctx) return NULL; ctx->avctx = avctx; if (avctx->cutoff > 0) cutoff_coeff = 2.0 * avctx->cutoff / avctx->sample_rate; if (!cutoff_coeff && avctx->codec_id == AV_CODEC_ID_AAC) cutoff_coeff = 2.0 * AAC_CUTOFF(avctx) / avctx->sample_rate; if (cutoff_coeff && cutoff_coeff < 0.98) ctx->fcoeffs = ff_iir_filter_init_coeffs(avctx, FF_FILTER_TYPE_BUTTERWORTH, FF_FILTER_MODE_LOWPASS, FILT_ORDER, cutoff_coeff, 0.0, 0.0); if (ctx->fcoeffs) { ctx->fstate = av_mallocz_array(sizeof(ctx->fstate[0]), avctx->channels); if (!ctx->fstate) { av_free(ctx); return NULL; } for (i = 0; i < avctx->channels; i++) ctx->fstate[i] = ff_iir_filter_init_state(FILT_ORDER); } ff_iir_filter_init(&ctx->fiir); return ctx; }
AVResampleContext *av_resample_init(int out_rate, int in_rate, int filter_size, int phase_shift, int linear, double cutoff){ AVResampleContext *c= av_mallocz(sizeof(AVResampleContext)); double factor= FFMIN(out_rate * cutoff / in_rate, 1.0); int phase_count= 1<<phase_shift; if (!c) return NULL; c->phase_shift= phase_shift; c->phase_mask= phase_count-1; c->linear= linear; c->filter_length= FFMAX((int)ceil(filter_size/factor), 1); c->filter_bank= av_mallocz_array(c->filter_length, (phase_count+1)*sizeof(FELEM)); if (!c->filter_bank) goto error; if (build_filter(c->filter_bank, factor, c->filter_length, phase_count, 1<<FILTER_SHIFT, WINDOW_TYPE)) goto error; memcpy(&c->filter_bank[c->filter_length*phase_count+1], c->filter_bank, (c->filter_length-1)*sizeof(FELEM)); c->filter_bank[c->filter_length*phase_count]= c->filter_bank[c->filter_length - 1]; if(!av_reduce(&c->src_incr, &c->dst_incr, out_rate, in_rate * (int64_t)phase_count, INT32_MAX/2)) goto error; c->ideal_dst_incr= c->dst_incr; c->index= -phase_count*((c->filter_length-1)/2); return c; error: av_free(c->filter_bank); av_free(c); return NULL; }
int swri_realloc_audio(AudioData *a, int count){ int i, countb; AudioData old; if(count < 0 || count > INT_MAX/2/a->bps/a->ch_count) return AVERROR(EINVAL); if(a->count >= count) return 0; count*=2; countb= FFALIGN(count*a->bps, ALIGN); old= *a; av_assert0(a->bps); av_assert0(a->ch_count); a->data= av_mallocz_array(countb, a->ch_count); if(!a->data) return AVERROR(ENOMEM); for(i=0; i<a->ch_count; i++){ a->ch[i]= a->data + i*(a->planar ? countb : a->bps); if(a->planar) memcpy(a->ch[i], old.ch[i], a->count*a->bps); } if(!a->planar) memcpy(a->ch[0], old.ch[0], a->count*a->ch_count*a->bps); av_freep(&old.data); a->count= count; return 1; }
static int hwframe_pool_prealloc(AVBufferRef *ref) { AVHWFramesContext *ctx = (AVHWFramesContext*)ref->data; AVFrame **frames; int i, ret = 0; frames = av_mallocz_array(ctx->initial_pool_size, sizeof(*frames)); if (!frames) return AVERROR(ENOMEM); for (i = 0; i < ctx->initial_pool_size; i++) { frames[i] = av_frame_alloc(); if (!frames[i]) goto fail; ret = av_hwframe_get_buffer(ref, frames[i], 0); if (ret < 0) goto fail; } fail: for (i = 0; i < ctx->initial_pool_size; i++) av_frame_free(&frames[i]); av_freep(&frames); return ret; }
static int qsv_frames_derive_from(AVHWFramesContext *dst_ctx, AVHWFramesContext *src_ctx, int flags) { AVQSVFramesContext *src_hwctx = src_ctx->hwctx; int i; switch (dst_ctx->device_ctx->type) { #if CONFIG_VAAPI case AV_HWDEVICE_TYPE_VAAPI: { AVVAAPIFramesContext *dst_hwctx = dst_ctx->hwctx; dst_hwctx->surface_ids = av_mallocz_array(src_hwctx->nb_surfaces, sizeof(*dst_hwctx->surface_ids)); if (!dst_hwctx->surface_ids) return AVERROR(ENOMEM); for (i = 0; i < src_hwctx->nb_surfaces; i++) dst_hwctx->surface_ids[i] = *(VASurfaceID*)src_hwctx->surfaces[i].Data.MemId; dst_hwctx->nb_surfaces = src_hwctx->nb_surfaces; } break; #endif #if CONFIG_DXVA2 case AV_HWDEVICE_TYPE_DXVA2: { AVDXVA2FramesContext *dst_hwctx = dst_ctx->hwctx; dst_hwctx->surfaces = av_mallocz_array(src_hwctx->nb_surfaces, sizeof(*dst_hwctx->surfaces)); if (!dst_hwctx->surfaces) return AVERROR(ENOMEM); for (i = 0; i < src_hwctx->nb_surfaces; i++) dst_hwctx->surfaces[i] = (IDirect3DSurface9*)src_hwctx->surfaces[i].Data.MemId; dst_hwctx->nb_surfaces = src_hwctx->nb_surfaces; if (src_hwctx->frame_type == MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET) dst_hwctx->surface_type = DXVA2_VideoDecoderRenderTarget; else dst_hwctx->surface_type = DXVA2_VideoProcessorRenderTarget; } break; #endif default: return AVERROR(ENOSYS); } return 0; }
static int tee_write_header(AVFormatContext *avf) { TeeContext *tee = avf->priv_data; unsigned nb_slaves = 0, i; const char *filename = avf->filename; char **slaves = NULL; int ret; while (*filename) { char *slave = av_get_token(&filename, slave_delim); if (!slave) { ret = AVERROR(ENOMEM); goto fail; } ret = av_dynarray_add_nofree(&slaves, &nb_slaves, slave); if (ret < 0) { av_free(slave); goto fail; } if (strspn(filename, slave_delim)) filename++; } if (!(tee->slaves = av_mallocz_array(nb_slaves, sizeof(*tee->slaves)))) { ret = AVERROR(ENOMEM); goto fail; } tee->nb_slaves = tee->nb_alive = nb_slaves; for (i = 0; i < nb_slaves; i++) { if ((ret = open_slave(avf, slaves[i], &tee->slaves[i])) < 0) { ret = tee_process_slave_failure(avf, i, ret); if (ret < 0) goto fail; } else { log_slave(&tee->slaves[i], avf, AV_LOG_VERBOSE); } av_freep(&slaves[i]); } for (i = 0; i < avf->nb_streams; i++) { int j, mapped = 0; for (j = 0; j < tee->nb_slaves; j++) if (tee->slaves[j].avf) mapped += tee->slaves[j].stream_map[i] >= 0; if (!mapped) av_log(avf, AV_LOG_WARNING, "Input stream #%d is not mapped " "to any slave.\n", i); } av_free(slaves); return 0; fail: for (i = 0; i < nb_slaves; i++) av_freep(&slaves[i]); close_slaves(avf); av_free(slaves); return ret; }
static av_cold int qtrle_encode_init(AVCodecContext *avctx) { QtrleEncContext *s = avctx->priv_data; if (av_image_check_size(avctx->width, avctx->height, 0, avctx) < 0) { return AVERROR(EINVAL); } s->avctx=avctx; s->logical_width=avctx->width; switch (avctx->pix_fmt) { case AV_PIX_FMT_GRAY8: if (avctx->width % 4) { av_log(avctx, AV_LOG_ERROR, "Width not being a multiple of 4 is not supported\n"); return AVERROR(EINVAL); } s->logical_width = avctx->width / 4; s->pixel_size = 4; break; case AV_PIX_FMT_RGB555BE: s->pixel_size = 2; break; case AV_PIX_FMT_RGB24: s->pixel_size = 3; break; case AV_PIX_FMT_ARGB: s->pixel_size = 4; break; default: av_log(avctx, AV_LOG_ERROR, "Unsupported colorspace.\n"); break; } avctx->bits_per_coded_sample = avctx->pix_fmt == AV_PIX_FMT_GRAY8 ? 40 : s->pixel_size*8; s->rlecode_table = av_mallocz(s->logical_width); s->skip_table = av_mallocz(s->logical_width); s->length_table = av_mallocz_array(s->logical_width + 1, sizeof(int)); if (!s->skip_table || !s->length_table || !s->rlecode_table) { av_log(avctx, AV_LOG_ERROR, "Error allocating memory.\n"); return AVERROR(ENOMEM); } s->previous_frame = av_frame_alloc(); if (!s->previous_frame) { av_log(avctx, AV_LOG_ERROR, "Error allocating picture\n"); return AVERROR(ENOMEM); } s->max_buf_size = s->logical_width*s->avctx->height*s->pixel_size*2 /* image base material */ + 15 /* header + footer */ + s->avctx->height*2 /* skip code+rle end */ + s->logical_width/MAX_RLE_BULK + 1 /* rle codes */; return 0; }
av_cold void ff_mlz_init_dict(void* context, MLZ *mlz) { mlz->dict = av_mallocz_array(TABLE_SIZE, sizeof(*mlz->dict)); mlz->flush_code = FLUSH_CODE; mlz->current_dic_index_max = DIC_INDEX_INIT; mlz->dic_code_bit = CODE_BIT_INIT; mlz->bump_code = (DIC_INDEX_INIT - 1); mlz->next_code = FIRST_CODE; mlz->freeze_flag = 0; mlz->context = context; }
static av_cold int atrac3p_decode_init(AVCodecContext *avctx) { ATRAC3PContext *ctx = avctx->priv_data; int i, ch, ret; if (!avctx->block_align) { av_log(avctx, AV_LOG_ERROR, "block_align is not set\n"); return AVERROR(EINVAL); } ff_atrac3p_init_vlcs(); /* initialize IPQF */ ff_mdct_init(&ctx->ipqf_dct_ctx, 5, 1, 32.0 / 32768.0); ff_atrac3p_init_imdct(avctx, &ctx->mdct_ctx); ff_atrac_init_gain_compensation(&ctx->gainc_ctx, 6, 2); ff_atrac3p_init_wave_synth(); if ((ret = set_channel_params(ctx, avctx)) < 0) return ret; ctx->my_channel_layout = avctx->channel_layout; ctx->ch_units = av_mallocz_array(ctx->num_channel_blocks, sizeof(*ctx->ch_units)); ctx->fdsp = avpriv_float_dsp_alloc(avctx->flags & CODEC_FLAG_BITEXACT); if (!ctx->ch_units || !ctx->fdsp) { atrac3p_decode_close(avctx); return AVERROR(ENOMEM); } for (i = 0; i < ctx->num_channel_blocks; i++) { for (ch = 0; ch < 2; ch++) { ctx->ch_units[i].channels[ch].ch_num = ch; ctx->ch_units[i].channels[ch].wnd_shape = &ctx->ch_units[i].channels[ch].wnd_shape_hist[0][0]; ctx->ch_units[i].channels[ch].wnd_shape_prev = &ctx->ch_units[i].channels[ch].wnd_shape_hist[1][0]; ctx->ch_units[i].channels[ch].gain_data = &ctx->ch_units[i].channels[ch].gain_data_hist[0][0]; ctx->ch_units[i].channels[ch].gain_data_prev = &ctx->ch_units[i].channels[ch].gain_data_hist[1][0]; ctx->ch_units[i].channels[ch].tones_info = &ctx->ch_units[i].channels[ch].tones_info_hist[0][0]; ctx->ch_units[i].channels[ch].tones_info_prev = &ctx->ch_units[i].channels[ch].tones_info_hist[1][0]; } ctx->ch_units[i].waves_info = &ctx->ch_units[i].wave_synth_hist[0]; ctx->ch_units[i].waves_info_prev = &ctx->ch_units[i].wave_synth_hist[1]; } avctx->sample_fmt = AV_SAMPLE_FMT_FLTP; return 0; }
int avfilter_copy_buf_props(AVFrame *dst, const AVFilterBufferRef *src) { int planes, nb_channels; if (!dst) return AVERROR(EINVAL); /* abort in case the src is NULL and dst is not, avoid inconsistent state in dst */ av_assert0(src); memcpy(dst->data, src->data, sizeof(dst->data)); memcpy(dst->linesize, src->linesize, sizeof(dst->linesize)); dst->pts = src->pts; dst->format = src->format; av_frame_set_pkt_pos(dst, src->pos); switch (src->type) { case AVMEDIA_TYPE_VIDEO: av_assert0(src->video); dst->width = src->video->w; dst->height = src->video->h; dst->sample_aspect_ratio = src->video->sample_aspect_ratio; dst->interlaced_frame = src->video->interlaced; dst->top_field_first = src->video->top_field_first; dst->key_frame = src->video->key_frame; dst->pict_type = src->video->pict_type; break; case AVMEDIA_TYPE_AUDIO: av_assert0(src->audio); nb_channels = av_get_channel_layout_nb_channels(src->audio->channel_layout); planes = av_sample_fmt_is_planar(src->format) ? nb_channels : 1; if (planes > FF_ARRAY_ELEMS(dst->data)) { dst->extended_data = av_mallocz_array(planes, sizeof(*dst->extended_data)); if (!dst->extended_data) return AVERROR(ENOMEM); memcpy(dst->extended_data, src->extended_data, planes * sizeof(*dst->extended_data)); } else dst->extended_data = dst->data; dst->nb_samples = src->audio->nb_samples; av_frame_set_sample_rate (dst, src->audio->sample_rate); av_frame_set_channel_layout(dst, src->audio->channel_layout); av_frame_set_channels (dst, src->audio->channels); break; default: return AVERROR(EINVAL); } return 0; }
static int compute_mask_matrix(cl_mem cl_mask_matrix, int step_x, int step_y) { int i, j, ret = 0; uint32_t *mask_matrix, *mask_x, *mask_y; size_t size_matrix = sizeof(uint32_t) * (2 * step_x + 1) * (2 * step_y + 1); mask_x = av_mallocz_array(2 * step_x + 1, sizeof(uint32_t)); if (!mask_x) { ret = AVERROR(ENOMEM); goto end; } mask_y = av_mallocz_array(2 * step_y + 1, sizeof(uint32_t)); if (!mask_y) { ret = AVERROR(ENOMEM); goto end; } mask_matrix = av_mallocz(size_matrix); if (!mask_matrix) { ret = AVERROR(ENOMEM); goto end; } ret = compute_mask(step_x, mask_x); if (ret < 0) goto end; ret = compute_mask(step_y, mask_y); if (ret < 0) goto end; for (j = 0; j < 2 * step_y + 1; j++) { for (i = 0; i < 2 * step_x + 1; i++) { mask_matrix[i + j * (2 * step_x + 1)] = mask_y[j] * mask_x[i]; } } ret = av_opencl_buffer_write(cl_mask_matrix, (uint8_t *)mask_matrix, size_matrix); end: av_freep(&mask_x); av_freep(&mask_y); av_freep(&mask_matrix); return ret; }
static av_cold int peak_init_writer(AVFormatContext *s) { WAVMuxContext *wav = s->priv_data; AVCodecContext *enc = s->streams[0]->codec; if (enc->codec_id != AV_CODEC_ID_PCM_S8 && enc->codec_id != AV_CODEC_ID_PCM_S16LE && enc->codec_id != AV_CODEC_ID_PCM_U8 && enc->codec_id != AV_CODEC_ID_PCM_U16LE) { av_log(s, AV_LOG_ERROR, "%s codec not supported for Peak Chunk\n", s->streams[0]->codec->codec ? s->streams[0]->codec->codec->name : "NONE"); return -1; } wav->peak_bps = av_get_bits_per_sample(enc->codec_id) / 8; if (wav->peak_bps == 1 && wav->peak_format == PEAK_FORMAT_UINT16) { av_log(s, AV_LOG_ERROR, "Writing 16 bit peak for 8 bit audio does not make sense\n"); return AVERROR(EINVAL); } wav->peak_maxpos = av_mallocz_array(enc->channels, sizeof(*wav->peak_maxpos)); wav->peak_maxneg = av_mallocz_array(enc->channels, sizeof(*wav->peak_maxneg)); wav->peak_output = av_malloc(PEAK_BUFFER_SIZE); if (!wav->peak_maxpos || !wav->peak_maxneg || !wav->peak_output) goto nomem; wav->peak_outbuf_size = PEAK_BUFFER_SIZE; return 0; nomem: av_log(s, AV_LOG_ERROR, "Out of memory\n"); peak_free_buffers(s); return AVERROR(ENOMEM); }
static av_cold int program_opencl_init(AVFilterContext *avctx) { ProgramOpenCLContext *ctx = avctx->priv; int err; ff_opencl_filter_init(avctx); ctx->ocf.output_width = ctx->width; ctx->ocf.output_height = ctx->height; if (!strcmp(avctx->filter->name, "openclsrc")) { if (!ctx->ocf.output_width || !ctx->ocf.output_height) { av_log(avctx, AV_LOG_ERROR, "OpenCL source requires output " "dimensions to be specified.\n"); return AVERROR(EINVAL); } ctx->nb_inputs = 0; ctx->ocf.output_format = ctx->source_format; } else { int i; ctx->frames = av_mallocz_array(ctx->nb_inputs, sizeof(*ctx->frames)); if (!ctx->frames) return AVERROR(ENOMEM); for (i = 0; i < ctx->nb_inputs; i++) { AVFilterPad input; memset(&input, 0, sizeof(input)); input.type = AVMEDIA_TYPE_VIDEO; input.name = av_asprintf("input%d", i); if (!input.name) return AVERROR(ENOMEM); input.config_props = &ff_opencl_filter_config_input; err = ff_insert_inpad(avctx, i, &input); if (err < 0) { av_freep(&input.name); return err; } } } return 0; }
static int compute_mask(int step, uint32_t *mask) { int i, z, ret = 0; int counter_size = sizeof(uint32_t) * (2 * step + 1); uint32_t *temp1_counter, *temp2_counter, **counter; temp1_counter = av_mallocz(counter_size); if (!temp1_counter) { ret = AVERROR(ENOMEM); goto end; } temp2_counter = av_mallocz(counter_size); if (!temp2_counter) { ret = AVERROR(ENOMEM); goto end; } counter = av_mallocz_array(2 * step + 1, sizeof(uint32_t *)); if (!counter) { ret = AVERROR(ENOMEM); goto end; } for (i = 0; i < 2 * step + 1; i++) { counter[i] = av_mallocz(counter_size); if (!counter[i]) { ret = AVERROR(ENOMEM); goto end; } } for (i = 0; i < 2 * step + 1; i++) { memset(temp1_counter, 0, counter_size); temp1_counter[i] = 1; for (z = 0; z < step * 2; z += 2) { add_mask_counter(temp2_counter, counter[z], temp1_counter, step * 2); memcpy(counter[z], temp1_counter, counter_size); add_mask_counter(temp1_counter, counter[z + 1], temp2_counter, step * 2); memcpy(counter[z + 1], temp2_counter, counter_size); } } memcpy(mask, temp1_counter, counter_size); end: av_freep(&temp1_counter); av_freep(&temp2_counter); for (i = 0; i < 2 * step + 1; i++) { av_freep(&counter[i]); } av_freep(&counter); return ret; }
static int config_input_ref(AVFilterLink *inlink) { const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format); AVFilterContext *ctx = inlink->dst; SSIMContext *s = ctx->priv; int sum = 0, i; s->nb_components = desc->nb_components; if (ctx->inputs[0]->w != ctx->inputs[1]->w || ctx->inputs[0]->h != ctx->inputs[1]->h) { av_log(ctx, AV_LOG_ERROR, "Width and height of input videos must be same.\n"); return AVERROR(EINVAL); } if (ctx->inputs[0]->format != ctx->inputs[1]->format) { av_log(ctx, AV_LOG_ERROR, "Inputs must be of same pixel format.\n"); return AVERROR(EINVAL); } s->is_rgb = ff_fill_rgba_map(s->rgba_map, inlink->format) >= 0; s->comps[0] = s->is_rgb ? 'R' : 'Y'; s->comps[1] = s->is_rgb ? 'G' : 'U'; s->comps[2] = s->is_rgb ? 'B' : 'V'; s->comps[3] = 'A'; s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h); s->planeheight[0] = s->planeheight[3] = inlink->h; s->planewidth[1] = s->planewidth[2] = AV_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w); s->planewidth[0] = s->planewidth[3] = inlink->w; for (i = 0; i < s->nb_components; i++) sum += s->planeheight[i] * s->planewidth[i]; for (i = 0; i < s->nb_components; i++) s->coefs[i] = (double) s->planeheight[i] * s->planewidth[i] / sum; s->temp = av_mallocz_array(2 * SUM_LEN(inlink->w), (desc->comp[0].depth > 8) ? sizeof(int64_t[4]) : sizeof(int[4])); if (!s->temp) return AVERROR(ENOMEM); s->max = (1 << desc->comp[0].depth) - 1; s->ssim_plane = desc->comp[0].depth > 8 ? ssim_plane_16bit : ssim_plane; s->dsp.ssim_4x4_line = ssim_4x4xn_8bit; s->dsp.ssim_end_line = ssim_endn_8bit; if (ARCH_X86) ff_ssim_init_x86(&s->dsp); return 0; }
static av_cold int init(AVFilterContext *ctx) { MixContext *s = ctx->priv; char *p, *arg, *saveptr = NULL; float last_weight = 1.f; int i, ret; for (i = 0; i < s->nb_inputs; i++) { AVFilterPad pad = { 0 }; pad.type = AVMEDIA_TYPE_AUDIO; pad.name = av_asprintf("input%d", i); if (!pad.name) return AVERROR(ENOMEM); if ((ret = ff_insert_inpad(ctx, i, &pad)) < 0) { av_freep(&pad.name); return ret; } } s->fdsp = avpriv_float_dsp_alloc(0); if (!s->fdsp) return AVERROR(ENOMEM); s->weights = av_mallocz_array(s->nb_inputs, sizeof(*s->weights)); if (!s->weights) return AVERROR(ENOMEM); p = s->weights_str; for (i = 0; i < s->nb_inputs; i++) { if (!(arg = av_strtok(p, " ", &saveptr))) break; p = NULL; sscanf(arg, "%f", &last_weight); s->weights[i] = last_weight; s->weight_sum += last_weight; } for (; i < s->nb_inputs; i++) { s->weights[i] = last_weight; s->weight_sum += last_weight; } return 0; }
static int qsv_init_pool(AVHWFramesContext *ctx, uint32_t fourcc) { QSVFramesContext *s = ctx->internal->priv; AVQSVFramesContext *frames_hwctx = ctx->hwctx; int i, ret = 0; if (ctx->initial_pool_size <= 0) { av_log(ctx, AV_LOG_ERROR, "QSV requires a fixed frame pool size\n"); return AVERROR(EINVAL); } s->surfaces_internal = av_mallocz_array(ctx->initial_pool_size, sizeof(*s->surfaces_internal)); if (!s->surfaces_internal) return AVERROR(ENOMEM); for (i = 0; i < ctx->initial_pool_size; i++) { ret = qsv_init_surface(ctx, &s->surfaces_internal[i]); if (ret < 0) return ret; } if (!(frames_hwctx->frame_type & MFX_MEMTYPE_OPAQUE_FRAME)) { ret = qsv_init_child_ctx(ctx); if (ret < 0) return ret; } ctx->internal->pool_internal = av_buffer_pool_init2(sizeof(mfxFrameSurface1), ctx, qsv_pool_alloc, NULL); if (!ctx->internal->pool_internal) return AVERROR(ENOMEM); frames_hwctx->surfaces = s->surfaces_internal; frames_hwctx->nb_surfaces = ctx->initial_pool_size; return 0; }
static int config_input(AVFilterLink *inlink) { MEContext *s = inlink->dst->priv; int i; s->log2_mb_size = av_ceil_log2_c(s->mb_size); s->mb_size = 1 << s->log2_mb_size; s->b_width = inlink->w >> s->log2_mb_size; s->b_height = inlink->h >> s->log2_mb_size; s->b_count = s->b_width * s->b_height; for (i = 0; i < 3; i++) { s->mv_table[i] = av_mallocz_array(s->b_count, sizeof(*s->mv_table[0])); if (!s->mv_table[i]) return AVERROR(ENOMEM); } ff_me_init_context(&s->me_ctx, s->mb_size, s->search_param, inlink->w, inlink->h, 0, (s->b_width - 1) << s->log2_mb_size, 0, (s->b_height - 1) << s->log2_mb_size); return 0; }
AVEncryptionInfo *av_encryption_info_alloc(uint32_t subsample_count, uint32_t key_id_size, uint32_t iv_size) { AVEncryptionInfo *info; info = av_mallocz(sizeof(*info)); if (!info) return NULL; info->key_id = av_mallocz(key_id_size); info->key_id_size = key_id_size; info->iv = av_mallocz(iv_size); info->iv_size = iv_size; info->subsamples = av_mallocz_array(subsample_count, sizeof(*info->subsamples)); info->subsample_count = subsample_count; // Allow info->subsamples to be NULL if there are no subsamples. if (!info->key_id || !info->iv || (!info->subsamples && subsample_count)) { av_encryption_info_free(info); return NULL; } return info; }