static VAStatus handlePictureParameterBuffer(vlVaDriver *drv, vlVaContext *context, vlVaBuffer *buf) { VAStatus vaStatus = VA_STATUS_SUCCESS; switch (u_reduce_video_profile(context->templat.profile)) { case PIPE_VIDEO_FORMAT_MPEG12: vlVaHandlePictureParameterBufferMPEG12(drv, context, buf); break; case PIPE_VIDEO_FORMAT_MPEG4_AVC: vlVaHandlePictureParameterBufferH264(drv, context, buf); break; case PIPE_VIDEO_FORMAT_VC1: vlVaHandlePictureParameterBufferVC1(drv, context, buf); break; case PIPE_VIDEO_FORMAT_MPEG4: vlVaHandlePictureParameterBufferMPEG4(drv, context, buf); break; case PIPE_VIDEO_FORMAT_HEVC: vlVaHandlePictureParameterBufferHEVC(drv, context, buf); break; default: break; } /* Create the decoder once max_references is known. */ if (!context->decoder) { if (!context->target) return VA_STATUS_ERROR_INVALID_CONTEXT; if (context->templat.max_references == 0) return VA_STATUS_ERROR_INVALID_BUFFER; if (u_reduce_video_profile(context->templat.profile) == PIPE_VIDEO_FORMAT_MPEG4_AVC) context->templat.level = u_get_h264_level(context->templat.width, context->templat.height, &context->templat.max_references); context->decoder = drv->pipe->create_video_codec(drv->pipe, &context->templat); if (!context->decoder) return VA_STATUS_ERROR_ALLOCATION_FAILED; context->decoder->begin_frame(context->decoder, context->target, &context->desc.base); } return vaStatus; }
/** * Create a VdpDecoder. */ VdpStatus vlVdpDecoderCreate(VdpDevice device, VdpDecoderProfile profile, uint32_t width, uint32_t height, uint32_t max_references, VdpDecoder *decoder) { struct pipe_video_codec templat = {}; struct pipe_context *pipe; struct pipe_screen *screen; vlVdpDevice *dev; vlVdpDecoder *vldecoder; VdpStatus ret; bool supported; uint32_t maxwidth, maxheight; if (!decoder) return VDP_STATUS_INVALID_POINTER; *decoder = 0; if (!(width && height)) return VDP_STATUS_INVALID_VALUE; templat.profile = ProfileToPipe(profile); if (templat.profile == PIPE_VIDEO_PROFILE_UNKNOWN) return VDP_STATUS_INVALID_DECODER_PROFILE; dev = vlGetDataHTAB(device); if (!dev) return VDP_STATUS_INVALID_HANDLE; pipe = dev->context; screen = dev->vscreen->pscreen; pipe_mutex_lock(dev->mutex); supported = screen->get_video_param ( screen, templat.profile, PIPE_VIDEO_ENTRYPOINT_BITSTREAM, PIPE_VIDEO_CAP_SUPPORTED ); if (!supported) { pipe_mutex_unlock(dev->mutex); return VDP_STATUS_INVALID_DECODER_PROFILE; } maxwidth = screen->get_video_param ( screen, templat.profile, PIPE_VIDEO_ENTRYPOINT_BITSTREAM, PIPE_VIDEO_CAP_MAX_WIDTH ); maxheight = screen->get_video_param ( screen, templat.profile, PIPE_VIDEO_ENTRYPOINT_BITSTREAM, PIPE_VIDEO_CAP_MAX_HEIGHT ); if (width > maxwidth || height > maxheight) { pipe_mutex_unlock(dev->mutex); return VDP_STATUS_INVALID_SIZE; } vldecoder = CALLOC(1,sizeof(vlVdpDecoder)); if (!vldecoder) { pipe_mutex_unlock(dev->mutex); return VDP_STATUS_RESOURCES; } DeviceReference(&vldecoder->device, dev); templat.entrypoint = PIPE_VIDEO_ENTRYPOINT_BITSTREAM; templat.chroma_format = PIPE_VIDEO_CHROMA_FORMAT_420; templat.width = width; templat.height = height; templat.max_references = max_references; if (u_reduce_video_profile(templat.profile) == PIPE_VIDEO_FORMAT_MPEG4_AVC) templat.level = u_get_h264_level(templat.width, templat.height, &templat.max_references); vldecoder->decoder = pipe->create_video_codec(pipe, &templat); if (!vldecoder->decoder) { ret = VDP_STATUS_ERROR; goto error_decoder; } *decoder = vlAddDataHTAB(vldecoder); if (*decoder == 0) { ret = VDP_STATUS_ERROR; goto error_handle; } pipe_mutex_init(vldecoder->mutex); pipe_mutex_unlock(dev->mutex); return VDP_STATUS_OK; error_handle: vldecoder->decoder->destroy(vldecoder->decoder); error_decoder: pipe_mutex_unlock(dev->mutex); DeviceReference(&vldecoder->device, NULL); FREE(vldecoder); return ret; }