static int vda_old_h264_end_frame(AVCodecContext *avctx) { H264Context *h = avctx->priv_data; VDAContext *vda = avctx->internal->hwaccel_priv_data; struct vda_context *vda_ctx = avctx->hwaccel_context; AVFrame *frame = &h->cur_pic_ptr->f; struct vda_buffer *context; AVBufferRef *buffer; int status; if (!vda_ctx->decoder || !vda->bitstream) return -1; status = vda_sync_decode(vda, vda_ctx); frame->data[3] = (void*)vda_ctx->cv_buffer; if (status) av_log(avctx, AV_LOG_ERROR, "Failed to decode frame (%d)\n", status); if (!vda_ctx->use_ref_buffer || status) return status; context = av_mallocz(sizeof(*context)); buffer = av_buffer_create(NULL, 0, vda_h264_release_buffer, context, 0); if (!context || !buffer) { CVPixelBufferRelease(vda_ctx->cv_buffer); av_free(context); return -1; } context->cv_buffer = vda_ctx->cv_buffer; frame->buf[3] = buffer; return status; }
// Setup frame with a new reference to buffer. The buffer must have been // allocated from the given pool. static int ffmmal_set_ref(AVFrame *frame, FFPoolRef *pool, MMAL_BUFFER_HEADER_T *buffer) { FFBufferRef *ref = av_mallocz(sizeof(*ref)); if (!ref) return AVERROR(ENOMEM); ref->pool = pool; ref->buffer = buffer; frame->buf[0] = av_buffer_create((void *)ref, sizeof(*ref), ffmmal_release_frame, NULL, AV_BUFFER_FLAG_READONLY); if (!frame->buf[0]) { av_free(ref); return AVERROR(ENOMEM); } avpriv_atomic_int_add_and_fetch(&ref->pool->refcount, 1); mmal_buffer_header_acquire(buffer); frame->format = AV_PIX_FMT_MMAL; frame->data[3] = (uint8_t *)ref->buffer; return 0; }
static inline int vs_create_plane_buffer ( vs_video_buffer_handler_t *vs_vbhp, AVBufferRef *vs_buffer_handler, AVFrame *av_frame, int av_plane, int vs_plane ) { AVBufferRef *vs_buffer_ref = av_buffer_ref( vs_buffer_handler ); if( !vs_buffer_ref ) { av_buffer_unref( &vs_buffer_handler ); return -1; } av_frame->linesize[av_plane] = vs_vbhp->vsapi->getStride( vs_vbhp->vs_frame_buffer, vs_plane ); int vs_plane_size = vs_vbhp->vsapi->getFrameHeight( vs_vbhp->vs_frame_buffer, vs_plane ) * av_frame->linesize[av_plane]; av_frame->buf[av_plane] = av_buffer_create( vs_vbhp->vsapi->getWritePtr( vs_vbhp->vs_frame_buffer, vs_plane ), vs_plane_size, vs_video_unref_buffer_handler, vs_buffer_ref, 0 ); if( !av_frame->buf[av_plane] ) return -1; av_frame->data[av_plane] = av_frame->buf[av_plane]->data; return 0; }
static AVBufferRef *vdpau_pool_alloc(void *opaque, int size) { AVHWFramesContext *ctx = opaque; VDPAUFramesContext *priv = ctx->internal->priv; AVVDPAUDeviceContext *device_hwctx = ctx->device_ctx->hwctx; VDPAUDeviceContext *device_priv = ctx->device_ctx->internal->priv; AVBufferRef *ret; VdpVideoSurface surf; VdpStatus err; err = device_priv->surf_create(device_hwctx->device, priv->chroma_type, ctx->width, ctx->height, &surf); if (err != VDP_STATUS_OK) { av_log(ctx, AV_LOG_ERROR, "Error allocating a VDPAU video surface\n"); return NULL; } ret = av_buffer_create((uint8_t*)(uintptr_t)surf, sizeof(surf), vdpau_buffer_free, ctx, AV_BUFFER_FLAG_READONLY); if (!ret) { device_priv->surf_destroy(surf); return NULL; } return ret; }
static int ffmpeg_get_va_buffer2(struct AVCodecContext *ctx, AVFrame *frame, int flags) { Q_UNUSED(flags); for (unsigned i = 0; i < AV_NUM_DATA_POINTERS; i++) { frame->data[i] = NULL; frame->linesize[i] = 0; frame->buf[i] = NULL; } //frame->reordered_opaque = ctx->reordered_opaque; //?? xbmc VideoDecoderFFmpegHWPrivate *va = (VideoDecoderFFmpegHWPrivate*)ctx->opaque; /* hwaccel_context is not present in old ffmpeg version */ // not coded_width. assume coded_width is 6 aligned of width. ?? if (!va->setup(ctx)) { qWarning("va Setup failed"); return -1; } if (!va->getBuffer(&frame->opaque, &frame->data[0])) { qWarning("va->getBuffer failed"); return -1; } ffmpeg_va_ref_t *ref = new ffmpeg_va_ref_t; ref->va = va; ref->opaque = frame->opaque; /* data[0] must be non-NULL for libavcodec internal checks. data[3] actually contains the format-specific surface handle. */ frame->data[3] = frame->data[0]; frame->buf[0] = av_buffer_create(frame->data[0], 0, ffmpeg_release_va_buffer2, ref, 0); if (Q_UNLIKELY(!frame->buf[0])) { ffmpeg_release_va_buffer2(ref, frame->data[0]); return -1; } Q_ASSERT(frame->data[0] != NULL); // FIXME: VDA may crash in debug mode return 0; }
static int lavc_va_GetFrame(struct AVCodecContext *ctx, AVFrame *frame, int flags) { decoder_t *dec = ctx->opaque; decoder_sys_t *sys = dec->p_sys; vlc_va_t *va = sys->p_va; if (vlc_va_Setup(va, &ctx->hwaccel_context, &dec->fmt_out.video.i_chroma, ctx->coded_width, ctx->coded_height)) { msg_Err(dec, "hardware acceleration setup failed"); return -1; } if (vlc_va_Get(va, &frame->opaque, &frame->data[0])) { msg_Err(dec, "hardware acceleration picture allocation failed"); return -1; } /* data[0] must be non-NULL for libavcodec internal checks. * data[3] actually contains the format-specific surface handle. */ frame->data[3] = frame->data[0]; frame->buf[0] = av_buffer_create(frame->data[0], 0, va->release, frame->opaque, 0); if (unlikely(frame->buf[0] == NULL)) { vlc_va_Release(va, frame->opaque, frame->data[0]); return -1; } assert(frame->data[0] != NULL); (void) flags; return 0; }
static int lavc_va_GetFrame(struct AVCodecContext *ctx, AVFrame *frame, picture_t *pic) { decoder_t *dec = ctx->opaque; vlc_va_t *va = dec->p_sys->p_va; if (vlc_va_Get(va, pic, &frame->data[0])) { msg_Err(dec, "hardware acceleration picture allocation failed"); picture_Release(pic); return -1; } /* data[0] must be non-NULL for libavcodec internal checks. * data[3] actually contains the format-specific surface handle. */ frame->data[3] = frame->data[0]; void (*release)(void *, uint8_t *) = va->release; if (va->release == NULL) release = lavc_ReleaseFrame; frame->buf[0] = av_buffer_create(frame->data[0], 0, release, pic, 0); if (unlikely(frame->buf[0] == NULL)) { release(pic, frame->data[0]); return -1; } frame->opaque = pic; assert(frame->data[0] != NULL); return 0; }
static int ffmpeg_get_va_buffer2(struct AVCodecContext *ctx, AVFrame *frame, int flags) { Q_UNUSED(flags); for (unsigned i = 0; i < AV_NUM_DATA_POINTERS; i++) { frame->data[i] = NULL; frame->linesize[i] = 0; frame->buf[i] = NULL; } //frame->reordered_opaque = ctx->reordered_opaque; //?? xbmc // va must be available here VideoDecoderFFmpegHWPrivate *va = (VideoDecoderFFmpegHWPrivate*)ctx->opaque; if (!va->getBuffer(&frame->opaque, &frame->data[0])) { qWarning("va->getBuffer failed"); return -1; } ffmpeg_va_ref_t *ref = new ffmpeg_va_ref_t; ref->va = va; ref->opaque = frame->opaque; /* data[0] must be non-NULL for libavcodec internal checks. data[3] actually contains the format-specific surface handle. */ frame->data[3] = frame->data[0]; frame->buf[0] = av_buffer_create(frame->data[0], 0, ffmpeg_release_va_buffer2, ref, 0); if (Q_UNLIKELY(!frame->buf[0])) { ffmpeg_release_va_buffer2(ref, frame->data[0]); return -1; } Q_ASSERT(frame->data[0] != NULL); return 0; }
static AVBufferRef *cuda_pool_alloc(void *opaque, int size) { AVHWFramesContext *ctx = opaque; AVCUDADeviceContext *hwctx = ctx->device_ctx->hwctx; CudaFunctions *cu = hwctx->internal->cuda_dl; AVBufferRef *ret = NULL; CUcontext dummy = NULL; CUdeviceptr data; CUresult err; err = cu->cuCtxPushCurrent(hwctx->cuda_ctx); if (err != CUDA_SUCCESS) { av_log(ctx, AV_LOG_ERROR, "Error setting current CUDA context\n"); return NULL; } err = cu->cuMemAlloc(&data, size); if (err != CUDA_SUCCESS) goto fail; ret = av_buffer_create((uint8_t*)data, size, cuda_buffer_free, ctx, 0); if (!ret) { cu->cuMemFree(data); goto fail; } fail: cu->cuCtxPopCurrent(&dummy); return ret; }
void _ffmpegPostAudioFrame(struct GBAAVStream* stream, int32_t left, int32_t right) { struct FFmpegEncoder* encoder = (struct FFmpegEncoder*) stream; if (!encoder->context || !encoder->audioCodec) { return; } encoder->audioBuffer[encoder->currentAudioSample * 2] = left; encoder->audioBuffer[encoder->currentAudioSample * 2 + 1] = right; ++encoder->currentAudioFrame; ++encoder->currentAudioSample; if ((encoder->currentAudioSample * 4) < encoder->audioBufferSize) { return; } encoder->currentAudioSample = 0; int channelSize = 2 * av_get_bytes_per_sample(encoder->audio->sample_fmt); avresample_convert(encoder->resampleContext, 0, 0, 0, (uint8_t**) &encoder->audioBuffer, 0, encoder->audioBufferSize / 4); if (avresample_available(encoder->resampleContext) < encoder->audioFrame->nb_samples) { return; } #if LIBAVCODEC_VERSION_MAJOR >= 55 av_frame_make_writable(encoder->audioFrame); #endif avresample_read(encoder->resampleContext, encoder->audioFrame->data, encoder->postaudioBufferSize / channelSize); AVRational timeBase = { 1, PREFERRED_SAMPLE_RATE }; encoder->audioFrame->pts = encoder->nextAudioPts; encoder->nextAudioPts = av_rescale_q(encoder->currentAudioFrame, timeBase, encoder->audioStream->time_base); AVPacket packet; av_init_packet(&packet); packet.data = 0; packet.size = 0; int gotData; avcodec_encode_audio2(encoder->audio, &packet, encoder->audioFrame, &gotData); if (gotData) { if (encoder->absf) { AVPacket tempPacket = packet; int success = av_bitstream_filter_filter(encoder->absf, encoder->audio, 0, &tempPacket.data, &tempPacket.size, packet.data, packet.size, 0); if (success > 0) { #if LIBAVUTIL_VERSION_MAJOR >= 53 tempPacket.buf = av_buffer_create(tempPacket.data, tempPacket.size, av_buffer_default_free, 0, 0); #endif av_free_packet(&packet); } packet = tempPacket; } packet.stream_index = encoder->audioStream->index; av_interleaved_write_frame(encoder->context, &packet); } av_free_packet(&packet); }
AVBufferRef *av_hwdevice_ctx_alloc(enum AVHWDeviceType type) { AVHWDeviceContext *ctx; AVBufferRef *buf; const HWContextType *hw_type = NULL; int i; for (i = 0; hw_table[i]; i++) { if (hw_table[i]->type == type) { hw_type = hw_table[i]; break; } } if (!hw_type) return NULL; ctx = av_mallocz(sizeof(*ctx)); if (!ctx) return NULL; ctx->internal = av_mallocz(sizeof(*ctx->internal)); if (!ctx->internal) goto fail; if (hw_type->device_priv_size) { ctx->internal->priv = av_mallocz(hw_type->device_priv_size); if (!ctx->internal->priv) goto fail; } if (hw_type->device_hwctx_size) { ctx->hwctx = av_mallocz(hw_type->device_hwctx_size); if (!ctx->hwctx) goto fail; } buf = av_buffer_create((uint8_t*)ctx, sizeof(*ctx), hwdevice_ctx_free, NULL, AV_BUFFER_FLAG_READONLY); if (!buf) goto fail; ctx->type = type; ctx->av_class = &hwdevice_ctx_class; ctx->internal->hw_type = hw_type; return buf; fail: if (ctx->internal) av_freep(&ctx->internal->priv); av_freep(&ctx->internal); av_freep(&ctx->hwctx); av_freep(&ctx); return NULL; }
AVBufferRef *av_hwframe_ctx_alloc(AVBufferRef *device_ref_in) { AVHWDeviceContext *device_ctx = (AVHWDeviceContext*)device_ref_in->data; const HWContextType *hw_type = device_ctx->internal->hw_type; AVHWFramesContext *ctx; AVBufferRef *buf, *device_ref = NULL;; ctx = av_mallocz(sizeof(*ctx)); if (!ctx) return NULL; ctx->internal = av_mallocz(sizeof(*ctx->internal)); if (!ctx->internal) goto fail; if (hw_type->frames_priv_size) { ctx->internal->priv = av_mallocz(hw_type->frames_priv_size); if (!ctx->internal->priv) goto fail; } if (hw_type->frames_hwctx_size) { ctx->hwctx = av_mallocz(hw_type->frames_hwctx_size); if (!ctx->hwctx) goto fail; } device_ref = av_buffer_ref(device_ref_in); if (!device_ref) goto fail; buf = av_buffer_create((uint8_t*)ctx, sizeof(*ctx), hwframe_ctx_free, NULL, AV_BUFFER_FLAG_READONLY); if (!buf) goto fail; ctx->av_class = &hwframe_ctx_class; ctx->device_ref = device_ref; ctx->device_ctx = device_ctx; ctx->format = AV_PIX_FMT_NONE; ctx->sw_format = AV_PIX_FMT_NONE; ctx->internal->hw_type = hw_type; return buf; fail: if (device_ref) av_buffer_unref(&device_ref); if (ctx->internal) av_freep(&ctx->internal->priv); av_freep(&ctx->internal); av_freep(&ctx->hwctx); av_freep(&ctx); return NULL; }
int HWAccelHelper::get_buffer(AVCodecContext *codec_ctx, AVFrame *frame, int /*flags*/) { const QMPlay2SurfaceID surface_id = ((HWAccelHelper *)codec_ctx->opaque)->getSurface(); if (surface_id != QMPlay2InvalidSurfaceID) { frame->data[3] = (uint8_t *)(uintptr_t)surface_id; frame->buf[0] = av_buffer_create(frame->data[3], 0, (ReleaseBufferProc)release_buffer, codec_ctx->opaque, AV_BUFFER_FLAG_READONLY); return 0; } /* This should never happen */ fprintf(stderr, "Surface queue is empty!\n"); return -1; }
static int get_buffer2(AVCodecContext *avctx, AVFrame *pic, int flag) { VDABufferContext *context = av_mallocz(sizeof(VDABufferContext)); AVBufferRef *buffer = av_buffer_create(NULL, 0, release_buffer, context, 0); if (!context || !buffer) { av_free(context); return AVERROR(ENOMEM); } pic->buf[0] = buffer; pic->data[0] = (void *)1; return 0; }
int ff_hwframe_map_create(AVBufferRef *hwframe_ref, AVFrame *dst, const AVFrame *src, void (*unmap)(AVHWFramesContext *ctx, HWMapDescriptor *hwmap), void *priv) { AVHWFramesContext *ctx = (AVHWFramesContext*)hwframe_ref->data; HWMapDescriptor *hwmap; int ret; hwmap = av_mallocz(sizeof(*hwmap)); if (!hwmap) { ret = AVERROR(ENOMEM); goto fail; } hwmap->source = av_frame_alloc(); if (!hwmap->source) { ret = AVERROR(ENOMEM); goto fail; } ret = av_frame_ref(hwmap->source, src); if (ret < 0) goto fail; hwmap->hw_frames_ctx = av_buffer_ref(hwframe_ref); if (!hwmap->hw_frames_ctx) { ret = AVERROR(ENOMEM); goto fail; } hwmap->unmap = unmap; hwmap->priv = priv; dst->buf[0] = av_buffer_create((uint8_t*)hwmap, sizeof(*hwmap), &ff_hwframe_unmap, ctx, 0); if (!dst->buf[0]) { ret = AVERROR(ENOMEM); goto fail; } return 0; fail: if (hwmap) { av_buffer_unref(&hwmap->hw_frames_ctx); av_frame_free(&hwmap->source); } av_free(hwmap); return ret; }
static AVBufferRef *dxva2_pool_alloc(void *opaque, int size) { AVHWFramesContext *ctx = (AVHWFramesContext*)opaque; DXVA2FramesContext *s = ctx->internal->priv; AVDXVA2FramesContext *hwctx = ctx->hwctx; if (s->nb_surfaces_used < hwctx->nb_surfaces) { s->nb_surfaces_used++; return av_buffer_create((uint8_t*)s->surfaces_internal[s->nb_surfaces_used - 1], sizeof(*hwctx->surfaces), dxva2_pool_release_dummy, 0, 0); } return NULL; }
static GstFlowReturn gst_ffmpegaudenc_encode_audio (GstFFMpegAudEnc * ffmpegaudenc, GstBuffer * buffer, gint * have_data) { GstAudioEncoder *enc; AVCodecContext *ctx; gint res; GstFlowReturn ret; GstAudioInfo *info; AVPacket *pkt; AVFrame *frame = ffmpegaudenc->frame; gboolean planar; gint nsamples = -1; enc = GST_AUDIO_ENCODER (ffmpegaudenc); ctx = ffmpegaudenc->context; pkt = g_slice_new0 (AVPacket); if (buffer != NULL) { BufferInfo *buffer_info = g_slice_new0 (BufferInfo); guint8 *audio_in; guint in_size; buffer_info->buffer = buffer; gst_buffer_map (buffer, &buffer_info->map, GST_MAP_READ); audio_in = buffer_info->map.data; in_size = buffer_info->map.size; GST_LOG_OBJECT (ffmpegaudenc, "encoding buffer %p size:%u", audio_in, in_size); info = gst_audio_encoder_get_audio_info (enc); planar = av_sample_fmt_is_planar (ffmpegaudenc->context->sample_fmt); if (planar && info->channels > 1) { gint channels; gint i, j; nsamples = frame->nb_samples = in_size / info->bpf; channels = info->channels; frame->buf[0] = av_buffer_create (NULL, 0, buffer_info_free, buffer_info, 0); if (info->channels > AV_NUM_DATA_POINTERS) { buffer_info->ext_data_array = frame->extended_data = g_new (uint8_t *, info->channels); } else {
static AVBufferRef *qsv_pool_alloc(void *opaque, int size) { AVHWFramesContext *ctx = (AVHWFramesContext*)opaque; QSVFramesContext *s = ctx->internal->priv; AVQSVFramesContext *hwctx = ctx->hwctx; if (s->nb_surfaces_used < hwctx->nb_surfaces) { s->nb_surfaces_used++; return av_buffer_create((uint8_t*)(s->surfaces_internal + s->nb_surfaces_used - 1), sizeof(*hwctx->surfaces), qsv_pool_release_dummy, NULL, 0); } return NULL; }
static int dxva2_get_buffer(AVCodecContext *s, AVFrame *frame, int flags) { HwAccelContext *hac = s->opaque; DXVA2Context *ctx = hac->hwaccel_ctx; int i, old_unused = -1; LPDIRECT3DSURFACE9 surface; DXVA2SurfaceWrapper *w = NULL; av_assert0(frame->format == AV_PIX_FMT_DXVA2_VLD); for (i = 0; i < ctx->num_surfaces; i++) { surface_info *info = &ctx->surface_infos[i]; if (!info->used && (old_unused == -1 || info->age < ctx->surface_infos[old_unused].age)) old_unused = i; } if (old_unused == -1) { av_log(NULL, AV_LOG_ERROR, "No free DXVA2 surface!\n"); return AVERROR(ENOMEM); } i = old_unused; av_log(NULL, AV_LOG_DEBUG, "dxva2_get_buffer:%d\n",i); surface = ctx->surfaces[i]; w = av_mallocz(sizeof(*w)); if (!w) return AVERROR(ENOMEM); frame->buf[0] = av_buffer_create((uint8_t*)surface, 0, dxva2_release_buffer, w, AV_BUFFER_FLAG_READONLY); if (!frame->buf[0]) { av_free(w); return AVERROR(ENOMEM); } w->ctx = ctx; w->surface = surface; IDirect3DSurface9_AddRef(w->surface); w->decoder = ctx->decoder; IDirectXVideoDecoder_AddRef(w->decoder); ctx->surface_infos[i].used = 1; ctx->surface_infos[i].age = ctx->surface_age++; frame->data[3] = (uint8_t *)surface; return 0; }
int av_packet_from_data(AVPacket *pkt, uint8_t *data, int size) { if (size >= INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE) return AVERROR(EINVAL); pkt->buf = av_buffer_create(data, size + AV_INPUT_BUFFER_PADDING_SIZE, av_buffer_default_free, NULL, 0); if (!pkt->buf) return AVERROR(ENOMEM); pkt->data = data; pkt->size = size; return 0; }
int FFmpegDecoderVideo::getBuffer(AVCodecContext * const context, AVFrame * const picture, int flags) { AVBufferRef *ref; const FFmpegDecoderVideo * const this_ = reinterpret_cast<const FFmpegDecoderVideo*>(context->opaque); const int result = avcodec_default_get_buffer2(context, picture, flags); int64_t * p_pts = reinterpret_cast<int64_t*>( av_malloc(sizeof(int64_t)) ); *p_pts = this_->m_packet_pts; picture->opaque = p_pts; ref = av_buffer_create((uint8_t *)picture->opaque, sizeof(int64_t), FFmpegDecoderVideo::freeBuffer, picture->buf[0], flags); picture->buf[0] = ref; return result; }
static int vda_h264_end_frame(AVCodecContext *avctx) { H264Context *h = avctx->priv_data; VDAContext *vda = avctx->internal->hwaccel_priv_data; AVVDAContext *vda_ctx = avctx->hwaccel_context; AVFrame *frame = h->cur_pic_ptr->f; uint32_t flush_flags = 1 << 0; ///< kVDADecoderFlush_emitFrames CFDataRef coded_frame; OSStatus status; if (!vda->bitstream_size) return AVERROR_INVALIDDATA; coded_frame = CFDataCreate(kCFAllocatorDefault, vda->bitstream, vda->bitstream_size); status = VDADecoderDecode(vda_ctx->decoder, 0, coded_frame, NULL); if (status == kVDADecoderNoErr) status = VDADecoderFlush(vda_ctx->decoder, flush_flags); CFRelease(coded_frame); if (!vda->frame) return AVERROR_UNKNOWN; if (status != kVDADecoderNoErr) { av_log(avctx, AV_LOG_ERROR, "Failed to decode frame (%d)\n", status); return AVERROR_UNKNOWN; } av_buffer_unref(&frame->buf[0]); frame->buf[0] = av_buffer_create((uint8_t*)vda->frame, sizeof(vda->frame), release_buffer, NULL, AV_BUFFER_FLAG_READONLY); if (!frame->buf[0]) return AVERROR(ENOMEM); frame->data[3] = (uint8_t*)vda->frame; vda->frame = NULL; return 0; }
int av_buffersrc_add_ref(AVFilterContext *ctx, AVFilterBufferRef *buf, int flags) { BufferSourceContext *s = ctx->priv; AVFrame *frame = NULL; AVBufferRef *dummy_buf = NULL; int ret = 0, planes, i; if (!buf) { s->eof = 1; return 0; } else if (s->eof) return AVERROR(EINVAL); frame = av_frame_alloc(); if (!frame) return AVERROR(ENOMEM); dummy_buf = av_buffer_create(NULL, 0, compat_free_buffer, buf, (buf->perms & AV_PERM_WRITE) ? 0 : AV_BUFFER_FLAG_READONLY); if (!dummy_buf) { ret = AVERROR(ENOMEM); goto fail; } AV_NOWARN_DEPRECATED( if ((ret = avfilter_copy_buf_props(frame, buf)) < 0) goto fail; ) #define WRAP_PLANE(ref_out, data, data_size) \ do { \ AVBufferRef *dummy_ref = av_buffer_ref(dummy_buf); \ if (!dummy_ref) { \ ret = AVERROR(ENOMEM); \ goto fail; \ } \ ref_out = av_buffer_create(data, data_size, compat_unref_buffer, \ dummy_ref, (buf->perms & AV_PERM_WRITE) ? 0 : AV_BUFFER_FLAG_READONLY); \ if (!ref_out) { \ av_frame_unref(frame); \ ret = AVERROR(ENOMEM); \ goto fail; \ } \ } while (0) if (ctx->outputs[0]->type == AVMEDIA_TYPE_VIDEO) {
int av_packet_from_data(AVPacket *pkt, uint8_t *data, int size) { if (size >= INT_MAX - FF_INPUT_BUFFER_PADDING_SIZE) return AVERROR(EINVAL); pkt->buf = av_buffer_create(data, size + FF_INPUT_BUFFER_PADDING_SIZE, av_buffer_default_free, NULL, 0); if (!pkt->buf) return AVERROR(ENOMEM); pkt->data = data; pkt->size = size; #if FF_API_DESTRUCT_PACKET pkt->destruct = dummy_destruct_packet; #endif return 0; }
int VSGetBuffer(AVCodecContext *avctx, AVFrame *pic, int flag) { VSData *userdata; d2vData *data = (d2vData *) avctx->opaque; int i; if (!data->format_set) { switch(avctx->pix_fmt) { case PIX_FMT_YUV420P: data->vi.format = data->api->getFormatPreset(pfYUV420P8, data->core); break; case PIX_FMT_YUV422P: data->vi.format = data->api->getFormatPreset(pfYUV422P8, data->core); break; default: return -1; } data->format_set = true; } userdata = new VSData; userdata->d2v = (d2vData *) avctx->opaque; userdata->vs_frame = data->api->newVideoFrame(data->vi.format, data->aligned_width, data->aligned_height, NULL, data->core); pic->buf[0] = av_buffer_create(NULL, 0, VSReleaseBuffer, userdata, 0); if (!pic->buf[0]) return -1; pic->opaque = (void *) userdata->vs_frame; pic->extended_data = pic->data; pic->width = data->aligned_width; pic->height = data->aligned_height; pic->format = avctx->pix_fmt; pic->sample_aspect_ratio = avctx->sample_aspect_ratio; for(i = 0; i < data->vi.format->numPlanes; i++) { pic->data[i] = data->api->getWritePtr(userdata->vs_frame, i); pic->linesize[i] = data->api->getStride(userdata->vs_frame, i); } return 0; }
static int attach_decode_data(AVFrame *frame) { AVBufferRef *fdd_buf; FrameDecodeData *fdd; fdd = av_mallocz(sizeof(*fdd)); if (!fdd) return AVERROR(ENOMEM); fdd_buf = av_buffer_create((uint8_t*)fdd, sizeof(*fdd), decode_data_free, NULL, AV_BUFFER_FLAG_READONLY); if (!fdd_buf) { av_freep(&fdd); return AVERROR(ENOMEM); } fdd->user_opaque_ref = frame->opaque_ref; frame->opaque_ref = fdd_buf; return 0; }
static AVBufferRef *vaapi_pool_alloc(void *opaque, int size) { AVHWFramesContext *hwfc = opaque; VAAPIFramesContext *ctx = hwfc->internal->priv; AVVAAPIDeviceContext *hwctx = hwfc->device_ctx->hwctx; AVVAAPIFramesContext *avfc = hwfc->hwctx; VASurfaceID surface_id; VAStatus vas; AVBufferRef *ref; vas = vaCreateSurfaces(hwctx->display, ctx->rt_format, hwfc->width, hwfc->height, &surface_id, 1, ctx->attributes, ctx->nb_attributes); if (vas != VA_STATUS_SUCCESS) { av_log(hwfc, AV_LOG_ERROR, "Failed to create surface: " "%d (%s).\n", vas, vaErrorStr(vas)); return NULL; } av_log(hwfc, AV_LOG_DEBUG, "Created surface %#x.\n", surface_id); ref = av_buffer_create((uint8_t*)(uintptr_t)surface_id, sizeof(surface_id), &vaapi_buffer_free, hwfc, AV_BUFFER_FLAG_READONLY); if (!ref) { vaDestroySurfaces(hwctx->display, &surface_id, 1); return NULL; } if (hwfc->initial_pool_size > 0) { // This is a fixed-size pool, so we must still be in the initial // allocation sequence. av_assert0(avfc->nb_surfaces < hwfc->initial_pool_size); avfc->surface_ids[avfc->nb_surfaces] = surface_id; ++avfc->nb_surfaces; } return ref; }
static int filter_packet(void *log_ctx, AVPacket *pkt, AVFormatContext *fmt_ctx, AVBitStreamFilterContext *bsf_ctx) { AVCodecContext *enc_ctx = fmt_ctx->streams[pkt->stream_index]->codec; int ret = 0; while (bsf_ctx) { AVPacket new_pkt = *pkt; ret = av_bitstream_filter_filter(bsf_ctx, enc_ctx, NULL, &new_pkt.data, &new_pkt.size, pkt->data, pkt->size, pkt->flags & AV_PKT_FLAG_KEY); if (ret == 0 && new_pkt.data != pkt->data && new_pkt.destruct) { if ((ret = av_copy_packet(&new_pkt, pkt)) < 0) break; ret = 1; } if (ret > 0) { av_free_packet(pkt); new_pkt.buf = av_buffer_create(new_pkt.data, new_pkt.size, av_buffer_default_free, NULL, 0); if (!new_pkt.buf) break; } *pkt = new_pkt; bsf_ctx = bsf_ctx->next; } if (ret < 0) { av_log(log_ctx, AV_LOG_ERROR, "Failed to filter bitstream with filter %s for stream %d in file '%s' with codec %s\n", bsf_ctx->filter->name, pkt->stream_index, fmt_ctx->filename, avcodec_get_name(enc_ctx->codec_id)); } return ret; }
/** * * @param avctx * @param pic * @return */ int decoderFFLIBVA::getBuffer(AVCodecContext *avctx, AVFrame *pic) { imageMutex.lock(); if(vaPool.freeSurfaceQueue.empty()) { aprintf("Allocating new vaSurface\n"); ADM_vaSurface *img=allocateADMVaSurface(avctx); if(!img) { imageMutex.unlock(); ADM_warning("Cannot allocate new vaSurface!\n"); return -1; } vaPool.freeSurfaceQueue.append(img); vaPool.allSurfaceQueue.append(img); }else { aprintf("Reusing vaSurface from pool\n"); } ADM_vaSurface *s= vaPool.freeSurfaceQueue[0]; vaPool.freeSurfaceQueue.popFront(); imageMutex.unlock(); s->refCount=0; markSurfaceUsed(s); // 1 ref taken by lavcodec pic->buf[0]=av_buffer_create((uint8_t *)&(s->surface), // Maybe a memleak here... sizeof(s->surface), ADM_LIBVAreleaseBuffer, (void *)this, AV_BUFFER_FLAG_READONLY); aprintf("Alloc Buffer : 0x%llx, surfaceid=%x\n",s,(int)s->surface); pic->data[0]=(uint8_t *)s; pic->data[3]=(uint8_t *)(uintptr_t)s->surface; pic->reordered_opaque= avctx->reordered_opaque; return 0; }
static int get_buffer2(struct AVCodecContext *s, AVFrame *pic, int flags) { struct pp_video_decoder_s *vd = s->opaque; VASurfaceID surface = VA_INVALID_SURFACE; for (int k = 0; k < MAX_VIDEO_SURFACES; k ++) { if (!vd->surface_used[k]) { surface = vd->surfaces[k]; vd->surface_used[k] = 1; break; } } pic->data[0] = GSIZE_TO_POINTER(surface); pic->data[1] = NULL; pic->data[2] = NULL; pic->data[3] = GSIZE_TO_POINTER(surface); #if AVCTX_HAVE_REFCOUNTED_BUFFERS == 0 pic->type = FF_BUFFER_TYPE_USER; pic->pkt_pts = s->pkt->pts; #endif if (surface == VA_INVALID_SURFACE) return -1; #if AVCTX_HAVE_REFCOUNTED_BUFFERS AVBufferRef *buf = av_buffer_create(pic->data[3], 0, release_buffer2, vd, 0); if (!buf) return -1; pic->buf[0] = buf; pic->reordered_opaque = s->reordered_opaque; #endif return 0; }