static void unref_image(struct gl_hwdec *hw) { struct priv *p = hw->priv; VAStatus status; for (int n = 0; n < 4; n++) { if (p->images[n]) p->DestroyImageKHR(eglGetCurrentDisplay(), p->images[n]); p->images[n] = 0; } va_lock(p->ctx); if (p->buffer_acquired) { status = vaReleaseBufferHandle(p->display, p->current_image.buf); CHECK_VA_STATUS(p, "vaReleaseBufferHandle()"); p->buffer_acquired = false; } if (p->current_image.image_id != VA_INVALID_ID) { status = vaDestroyImage(p->display, p->current_image.image_id); CHECK_VA_STATUS(p, "vaDestroyImage()"); p->current_image.image_id = VA_INVALID_ID; } mp_image_unrefp(&p->current_ref); va_unlock(p->ctx); }
Decode_Status VideoDecoderVP8::checkHardwareCapability() { VAStatus vaStatus; VAConfigAttrib cfgAttribs[2]; cfgAttribs[0].type = VAConfigAttribMaxPictureWidth; cfgAttribs[1].type = VAConfigAttribMaxPictureHeight; vaStatus = vaGetConfigAttributes(mVADisplay, VAProfileVP8Version0_3, VAEntrypointVLD, cfgAttribs, 2); CHECK_VA_STATUS("vaGetConfigAttributes"); if (cfgAttribs[0].value * cfgAttribs[1].value < (uint32_t)mVideoFormatInfo.width * (uint32_t)mVideoFormatInfo.height) { ETRACE("hardware supports resolution %d * %d smaller than the clip resolution %d * %d", cfgAttribs[0].value, cfgAttribs[1].value, mVideoFormatInfo.width, mVideoFormatInfo.height); return DECODE_DRIVER_FAIL; } return DECODE_SUCCESS; }
static void destroy_texture(struct gl_hwdec *hw) { struct priv *p = hw->priv; VAStatus status; if (p->vaglx_surface) { va_lock(p->ctx); status = vaDestroySurfaceGLX(p->display, p->vaglx_surface); va_unlock(p->ctx); CHECK_VA_STATUS(p, "vaDestroySurfaceGLX()"); p->vaglx_surface = NULL; } glDeleteTextures(1, &p->gl_texture); p->gl_texture = 0; }
static int map_image(struct gl_hwdec *hw, struct mp_image *hw_image, GLuint *out_textures) { struct priv *p = hw->priv; VAStatus status; if (!p->vaglx_surface) return -1; va_lock(p->ctx); status = vaCopySurfaceGLX(p->display, p->vaglx_surface, va_surface_id(hw_image), va_get_colorspace_flag(hw_image->params.colorspace)); va_unlock(p->ctx); if (!CHECK_VA_STATUS(p, "vaCopySurfaceGLX()")) return -1; out_textures[0] = p->gl_texture; return 0; }
static int map_image(struct gl_hwdec *hw, struct mp_image *hw_image, GLuint *out_textures) { struct priv *p = hw->priv; VAStatus status; if (!p->pixmap) return -1; va_lock(p->ctx); status = vaPutSurface(p->display, va_surface_id(hw_image), p->pixmap, 0, 0, hw_image->w, hw_image->h, 0, 0, hw_image->w, hw_image->h, NULL, 0, va_get_colorspace_flag(hw_image->params.colorspace)); CHECK_VA_STATUS(p, "vaPutSurface()"); va_unlock(p->ctx); out_textures[0] = p->gl_texture; return 0; }
static int reinit(struct gl_hwdec *hw, const struct mp_image_params *params) { struct priv *p = hw->priv; GL *gl = hw->mpgl->gl; VAStatus status; destroy_texture(hw); gl->GenTextures(1, &p->gl_texture); gl->BindTexture(GL_TEXTURE_2D, p->gl_texture); gl->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); gl->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); gl->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); gl->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); gl->TexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, params->w, params->h, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL); gl->BindTexture(GL_TEXTURE_2D, 0); va_lock(p->ctx); status = vaCreateSurfaceGLX(p->display, GL_TEXTURE_2D, p->gl_texture, &p->vaglx_surface); va_unlock(p->ctx); return CHECK_VA_STATUS(p, "vaCreateSurfaceGLX()") ? 0 : -1; }
Decode_Status VideoDecoderVP8::decodePicture(vbp_data_vp8 *data, int32_t picIndex) { VAStatus vaStatus = VA_STATUS_SUCCESS; Decode_Status status; uint32_t bufferIDCount = 0; VABufferID bufferIDs[5]; vbp_picture_data_vp8 *picData = &(data->pic_data[picIndex]); VAPictureParameterBufferVP8 *picParams = picData->pic_parms; status = setReference(picParams); CHECK_STATUS("setReference"); vaStatus = vaBeginPicture(mVADisplay, mVAContext, mAcquiredBuffer->renderBuffer.surface); CHECK_VA_STATUS("vaBeginPicture"); // setting mDecodingFrame to true so vaEndPicture will be invoked to end the picture decoding. mDecodingFrame = true; vaStatus = vaCreateBuffer( mVADisplay, mVAContext, VAPictureParameterBufferType, sizeof(VAPictureParameterBufferVP8), 1, picParams, &bufferIDs[bufferIDCount]); CHECK_VA_STATUS("vaCreatePictureParameterBuffer"); bufferIDCount++; vaStatus = vaCreateBuffer( mVADisplay, mVAContext, VAProbabilityBufferType, sizeof(VAProbabilityDataBufferVP8), 1, data->prob_data, &bufferIDs[bufferIDCount]); CHECK_VA_STATUS("vaCreateProbabilityBuffer"); bufferIDCount++; vaStatus = vaCreateBuffer( mVADisplay, mVAContext, VAIQMatrixBufferType, sizeof(VAIQMatrixBufferVP8), 1, data->IQ_matrix_buf, &bufferIDs[bufferIDCount]); CHECK_VA_STATUS("vaCreateIQMatrixBuffer"); bufferIDCount++; /* Here picData->num_slices is always equal to 1 */ for (uint32_t i = 0; i < picData->num_slices; i++) { vaStatus = vaCreateBuffer( mVADisplay, mVAContext, VASliceParameterBufferType, sizeof(VASliceParameterBufferVP8), 1, &(picData->slc_data[i].slc_parms), &bufferIDs[bufferIDCount]); CHECK_VA_STATUS("vaCreateSliceParameterBuffer"); bufferIDCount++; vaStatus = vaCreateBuffer( mVADisplay, mVAContext, VASliceDataBufferType, picData->slc_data[i].slice_size, //size 1, //num_elements picData->slc_data[i].buffer_addr + picData->slc_data[i].slice_offset, &bufferIDs[bufferIDCount]); CHECK_VA_STATUS("vaCreateSliceDataBuffer"); bufferIDCount++; } vaStatus = vaRenderPicture( mVADisplay, mVAContext, bufferIDs, bufferIDCount); CHECK_VA_STATUS("vaRenderPicture"); vaStatus = vaEndPicture(mVADisplay, mVAContext); mDecodingFrame = false; CHECK_VA_STATUS("vaEndPicture"); return DECODE_SUCCESS; }
Decode_Status VideoDecoderAVCSecure::decodeSlice(vbp_data_h264 *data, uint32_t picIndex, uint32_t sliceIndex) { Decode_Status status; VAStatus vaStatus; uint32_t bufferIDCount = 0; // maximum 4 buffers to render a slice: picture parameter, IQMatrix, slice parameter, slice data VABufferID bufferIDs[4]; vbp_picture_data_h264 *picData = &(data->pic_data[picIndex]); vbp_slice_data_h264 *sliceData = &(picData->slc_data[sliceIndex]); VAPictureParameterBufferH264 *picParam = picData->pic_parms; VASliceParameterBufferH264 *sliceParam = &(sliceData->slc_parms); if (sliceParam->first_mb_in_slice == 0 || mDecodingFrame == false) { // either condition indicates start of a new frame if (sliceParam->first_mb_in_slice != 0) { WTRACE("The first slice is lost."); // TODO: handle the first slice lost } if (mDecodingFrame) { // interlace content, complete decoding the first field vaStatus = vaEndPicture(mVADisplay, mVAContext); CHECK_VA_STATUS("vaEndPicture"); // for interlace content, top field may be valid only after the second field is parsed mAcquiredBuffer->pictureOrder= picParam->CurrPic.TopFieldOrderCnt; } // Check there is no reference frame loss before decoding a frame // Update the reference frames and surface IDs for DPB and current frame status = updateDPB(picParam); CHECK_STATUS("updateDPB"); //We have to provide a hacked DPB rather than complete DPB for libva as workaround status = updateReferenceFrames(picData); CHECK_STATUS("updateReferenceFrames"); vaStatus = vaBeginPicture(mVADisplay, mVAContext, mAcquiredBuffer->renderBuffer.surface); CHECK_VA_STATUS("vaBeginPicture"); // start decoding a frame mDecodingFrame = true; vaStatus = vaCreateBuffer( mVADisplay, mVAContext, VAPictureParameterBufferType, sizeof(VAPictureParameterBufferH264), 1, picParam, &bufferIDs[bufferIDCount]); CHECK_VA_STATUS("vaCreatePictureParameterBuffer"); bufferIDCount++; vaStatus = vaCreateBuffer( mVADisplay, mVAContext, VAIQMatrixBufferType, sizeof(VAIQMatrixBufferH264), 1, data->IQ_matrix_buf, &bufferIDs[bufferIDCount]); CHECK_VA_STATUS("vaCreateIQMatrixBuffer"); bufferIDCount++; } status = setReference(sliceParam); CHECK_STATUS("setReference"); // find which naluinfo is correlated to current slice int naluIndex = 0; uint32_t accumulatedHeaderLen = 0; uint32_t headerLen = 0; for (; naluIndex < mMetadata.naluNumber; naluIndex++) { headerLen = mMetadata.naluInfo[naluIndex].naluHeaderLen; if (headerLen == 0) { WTRACE("lenght of current NAL unit is 0."); continue; } accumulatedHeaderLen += STARTCODE_PREFIX_LEN; if (accumulatedHeaderLen + headerLen > sliceData->slice_offset) { break; } accumulatedHeaderLen += headerLen; } if (sliceData->slice_offset != accumulatedHeaderLen) { WTRACE("unexpected slice offset %d, accumulatedHeaderLen = %d", sliceData->slice_offset, accumulatedHeaderLen); } sliceParam->slice_data_size = mMetadata.naluInfo[naluIndex].naluLen; uint32_t sliceOffset = mMetadata.naluInfo[naluIndex].naluOffset; uint32_t slice_offset_shift = sliceOffset % 16; sliceParam->slice_data_offset += slice_offset_shift; sliceData->slice_size = (sliceParam->slice_data_size + slice_offset_shift + 0xF) & ~0xF; vaStatus = vaCreateBuffer( mVADisplay, mVAContext, VASliceParameterBufferType, sizeof(VASliceParameterBufferH264), 1, sliceParam, &bufferIDs[bufferIDCount]); CHECK_VA_STATUS("vaCreateSliceParameterBuffer"); bufferIDCount++; // sliceData->slice_offset - accumulatedHeaderLen is the absolute offset to start codes of current NAL unit // offset points to first byte of NAL unit if (mInputBuffer != NULL) { vaStatus = vaCreateBuffer( mVADisplay, mVAContext, VASliceDataBufferType, sliceData->slice_size, //Slice size 1, // num_elements mInputBuffer + sliceOffset - slice_offset_shift, &bufferIDs[bufferIDCount]); } else { vaStatus = vaCreateBuffer( mVADisplay, mVAContext, VAProtectedSliceDataBufferType, sliceData->slice_size, //size 1, //num_elements (uint8_t*)sliceOffset, // IMR offset &bufferIDs[bufferIDCount]); } CHECK_VA_STATUS("vaCreateSliceDataBuffer"); bufferIDCount++; vaStatus = vaRenderPicture( mVADisplay, mVAContext, bufferIDs, bufferIDCount); CHECK_VA_STATUS("vaRenderPicture"); return DECODE_SUCCESS; }
static int map_image(struct gl_hwdec *hw, struct mp_image *hw_image, GLuint *out_textures) { struct priv *p = hw->priv; GL *gl = hw->gl; VAStatus status; VAImage *va_image = &p->current_image; unref_image(hw); mp_image_setrefp(&p->current_ref, hw_image); va_lock(p->ctx); status = vaDeriveImage(p->display, va_surface_id(hw_image), va_image); if (!CHECK_VA_STATUS(p, "vaDeriveImage()")) goto err; int mpfmt = va_fourcc_to_imgfmt(va_image->format.fourcc); if (mpfmt != IMGFMT_NV12 && mpfmt != IMGFMT_420P) { MP_FATAL(p, "unsupported VA image format %s\n", VA_STR_FOURCC(va_image->format.fourcc)); goto err; } if (!hw->converted_imgfmt) { MP_VERBOSE(p, "format: %s %s\n", VA_STR_FOURCC(va_image->format.fourcc), mp_imgfmt_to_name(mpfmt)); hw->converted_imgfmt = mpfmt; } if (hw->converted_imgfmt != mpfmt) { MP_FATAL(p, "mid-stream hwdec format change (%s -> %s) not supported\n", mp_imgfmt_to_name(hw->converted_imgfmt), mp_imgfmt_to_name(mpfmt)); goto err; } VABufferInfo buffer_info = {.mem_type = VA_SURFACE_ATTRIB_MEM_TYPE_DRM_PRIME}; status = vaAcquireBufferHandle(p->display, va_image->buf, &buffer_info); if (!CHECK_VA_STATUS(p, "vaAcquireBufferHandle()")) goto err; p->buffer_acquired = true; struct mp_image layout = {0}; mp_image_set_params(&layout, &hw_image->params); mp_image_setfmt(&layout, mpfmt); // (it would be nice if we could use EGL_IMAGE_INTERNAL_FORMAT_EXT) int drm_fmts[4] = {MP_FOURCC('R', '8', ' ', ' '), // DRM_FORMAT_R8 MP_FOURCC('G', 'R', '8', '8'), // DRM_FORMAT_GR88 MP_FOURCC('R', 'G', '2', '4'), // DRM_FORMAT_RGB888 MP_FOURCC('R', 'A', '2', '4')}; // DRM_FORMAT_RGBA8888 for (int n = 0; n < layout.num_planes; n++) { int attribs[20] = {EGL_NONE}; int num_attribs = 0; ADD_ATTRIB(EGL_LINUX_DRM_FOURCC_EXT, drm_fmts[layout.fmt.bytes[n] - 1]); ADD_ATTRIB(EGL_WIDTH, mp_image_plane_w(&layout, n)); ADD_ATTRIB(EGL_HEIGHT, mp_image_plane_h(&layout, n)); ADD_ATTRIB(EGL_DMA_BUF_PLANE0_FD_EXT, buffer_info.handle); ADD_ATTRIB(EGL_DMA_BUF_PLANE0_OFFSET_EXT, va_image->offsets[n]); ADD_ATTRIB(EGL_DMA_BUF_PLANE0_PITCH_EXT, va_image->pitches[n]); p->images[n] = p->CreateImageKHR(eglGetCurrentDisplay(), EGL_NO_CONTEXT, EGL_LINUX_DMA_BUF_EXT, NULL, attribs); if (!p->images[n]) goto err; gl->BindTexture(GL_TEXTURE_2D, p->gl_textures[n]); p->EGLImageTargetTexture2DOES(GL_TEXTURE_2D, p->images[n]); out_textures[n] = p->gl_textures[n]; } gl->BindTexture(GL_TEXTURE_2D, 0); if (va_image->format.fourcc == VA_FOURCC_YV12) MPSWAP(GLuint, out_textures[1], out_textures[2]); va_unlock(p->ctx); return 0; err: va_unlock(p->ctx); MP_FATAL(p, "mapping VAAPI EGL image failed\n"); unref_image(hw); return -1; }