Beispiel #1
0
static void vid_dec_FrameDecoded(OMX_COMPONENTTYPE *comp, OMX_BUFFERHEADERTYPE* input,
                                 OMX_BUFFERHEADERTYPE* output)
{
   vid_dec_PrivateType *priv = comp->pComponentPrivate;
   bool eos = !!(input->nFlags & OMX_BUFFERFLAG_EOS);
   OMX_TICKS timestamp;

   if (!input->pInputPortPrivate) {
      input->pInputPortPrivate = priv->Flush(priv, &timestamp);
      if (timestamp != OMX_VID_DEC_TIMESTAMP_INVALID)
         input->nTimeStamp = timestamp;
   }

   if (input->pInputPortPrivate) {
      if (output->pInputPortPrivate && !priv->disable_tunnel) {
         struct pipe_video_buffer *tmp, *vbuf, *new_vbuf;

         tmp = output->pOutputPortPrivate;
         vbuf = input->pInputPortPrivate;
         if (vbuf->interlaced) {
            /* re-allocate the progressive buffer */
            omx_base_video_PortType *port;
            struct pipe_video_buffer templat = {};
            struct u_rect src_rect, dst_rect;

            port = (omx_base_video_PortType *)
                    priv->ports[OMX_BASE_FILTER_INPUTPORT_INDEX];
            memset(&templat, 0, sizeof(templat));
            templat.chroma_format = PIPE_VIDEO_CHROMA_FORMAT_420;
            templat.width = port->sPortParam.format.video.nFrameWidth;
            templat.height = port->sPortParam.format.video.nFrameHeight;
            templat.buffer_format = PIPE_FORMAT_NV12;
            templat.interlaced = false;
            new_vbuf = priv->pipe->create_video_buffer(priv->pipe, &templat);

            /* convert the interlaced to the progressive */
            src_rect.x0 = dst_rect.x0 = 0;
            src_rect.x1 = dst_rect.x1 = templat.width;
            src_rect.y0 = dst_rect.y0 = 0;
            src_rect.y1 = dst_rect.y1 = templat.height;

            vl_compositor_yuv_deint_full(&priv->cstate, &priv->compositor,
                                         input->pInputPortPrivate, new_vbuf,
                                         &src_rect, &dst_rect, VL_COMPOSITOR_WEAVE);

            /* set the progrssive buffer for next round */
            vbuf->destroy(vbuf);
            input->pInputPortPrivate = new_vbuf;
         }
         output->pOutputPortPrivate = input->pInputPortPrivate;
         input->pInputPortPrivate = tmp;
      } else {
         vid_dec_FillOutput(priv, input->pInputPortPrivate, output);
      }
      output->nFilledLen = output->nAllocLen;
      output->nTimeStamp = input->nTimeStamp;
   }

   if (eos && input->pInputPortPrivate)
      vid_dec_FreeInputPortPrivate(input);
   else
      input->nFilledLen = 0;
}
Beispiel #2
0
VAStatus
vlVaEndPicture(VADriverContextP ctx, VAContextID context_id)
{
   vlVaDriver *drv;
   vlVaContext *context;
   vlVaBuffer *coded_buf;
   vlVaSurface *surf;
   void *feedback;
   struct pipe_screen *screen;
   bool supported;
   bool realloc = false;
   enum pipe_format format;

   if (!ctx)
      return VA_STATUS_ERROR_INVALID_CONTEXT;

   drv = VL_VA_DRIVER(ctx);
   if (!drv)
      return VA_STATUS_ERROR_INVALID_CONTEXT;

   mtx_lock(&drv->mutex);
   context = handle_table_get(drv->htab, context_id);
   mtx_unlock(&drv->mutex);
   if (!context)
      return VA_STATUS_ERROR_INVALID_CONTEXT;

   if (!context->decoder) {
      if (context->templat.profile != PIPE_VIDEO_PROFILE_UNKNOWN)
         return VA_STATUS_ERROR_INVALID_CONTEXT;

      /* VPP */
      return VA_STATUS_SUCCESS;
   }

   mtx_lock(&drv->mutex);
   surf = handle_table_get(drv->htab, context->target_id);
   context->mpeg4.frame_num++;

   screen = context->decoder->context->screen;
   supported = screen->get_video_param(screen, context->decoder->profile,
                                       context->decoder->entrypoint,
                                       surf->buffer->interlaced ?
                                       PIPE_VIDEO_CAP_SUPPORTS_INTERLACED :
                                       PIPE_VIDEO_CAP_SUPPORTS_PROGRESSIVE);

   if (!supported) {
      surf->templat.interlaced = screen->get_video_param(screen,
                                       context->decoder->profile,
                                       context->decoder->entrypoint,
                                       PIPE_VIDEO_CAP_PREFERS_INTERLACED);
      realloc = true;
   }

   format = screen->get_video_param(screen, context->decoder->profile,
                                    PIPE_VIDEO_ENTRYPOINT_BITSTREAM,
                                    PIPE_VIDEO_CAP_PREFERED_FORMAT);

   if (surf->buffer->buffer_format != format &&
       surf->buffer->buffer_format == PIPE_FORMAT_NV12) {
      /* check originally as NV12 only */
      surf->templat.buffer_format = format;
      realloc = true;
   }

   if (u_reduce_video_profile(context->templat.profile) == PIPE_VIDEO_FORMAT_JPEG &&
       surf->buffer->buffer_format == PIPE_FORMAT_NV12) {
      if (context->mjpeg.sampling_factor == 0x211111 ||
          context->mjpeg.sampling_factor == 0x221212) {
         surf->templat.buffer_format = PIPE_FORMAT_YUYV;
         realloc = true;
      } else if (context->mjpeg.sampling_factor != 0x221111) {
         /* Not NV12 either */
         mtx_unlock(&drv->mutex);
         return VA_STATUS_ERROR_INVALID_SURFACE;
      }
   }

   if (realloc) {
      struct pipe_video_buffer *old_buf = surf->buffer;

      if (vlVaHandleSurfaceAllocate(drv, surf, &surf->templat) != VA_STATUS_SUCCESS) {
         mtx_unlock(&drv->mutex);
         return VA_STATUS_ERROR_ALLOCATION_FAILED;
      }

      if (context->decoder->entrypoint == PIPE_VIDEO_ENTRYPOINT_ENCODE) {
         if (old_buf->interlaced) {
            struct u_rect src_rect, dst_rect;

            dst_rect.x0 = src_rect.x0 = 0;
            dst_rect.y0 = src_rect.y0 = 0;
            dst_rect.x1 = src_rect.x1 = surf->templat.width;
            dst_rect.y1 = src_rect.y1 = surf->templat.height;
            vl_compositor_yuv_deint_full(&drv->cstate, &drv->compositor,
                                         old_buf, surf->buffer,
                                         &src_rect, &dst_rect, VL_COMPOSITOR_WEAVE);
         } else {
            /* Can't convert from progressive to interlaced yet */
            mtx_unlock(&drv->mutex);
            return VA_STATUS_ERROR_INVALID_SURFACE;
         }
      }

      old_buf->destroy(old_buf);
      context->target = surf->buffer;
   }

   if (context->decoder->entrypoint == PIPE_VIDEO_ENTRYPOINT_ENCODE) {
      coded_buf = context->coded_buf;
      if (u_reduce_video_profile(context->templat.profile) == PIPE_VIDEO_FORMAT_MPEG4_AVC) {
         getEncParamPresetH264(context);
         context->desc.h264enc.frame_num_cnt++;
      } else if (u_reduce_video_profile(context->templat.profile) == PIPE_VIDEO_FORMAT_HEVC)
         getEncParamPresetH265(context);
      context->decoder->begin_frame(context->decoder, context->target, &context->desc.base);
      context->decoder->encode_bitstream(context->decoder, context->target,
                                         coded_buf->derived_surface.resource, &feedback);
      surf->feedback = feedback;
      surf->coded_buf = coded_buf;
   }

   context->decoder->end_frame(context->decoder, context->target, &context->desc.base);
   if (context->decoder->entrypoint == PIPE_VIDEO_ENTRYPOINT_ENCODE &&
      u_reduce_video_profile(context->templat.profile) == PIPE_VIDEO_FORMAT_MPEG4_AVC) {
      int idr_period = context->desc.h264enc.gop_size / context->gop_coeff;
      int p_remain_in_idr = idr_period - context->desc.h264enc.frame_num;
      surf->frame_num_cnt = context->desc.h264enc.frame_num_cnt;
      surf->force_flushed = false;
      if (context->first_single_submitted) {
         context->decoder->flush(context->decoder);
         context->first_single_submitted = false;
         surf->force_flushed = true;
      }
      if (p_remain_in_idr == 1) {
         if ((context->desc.h264enc.frame_num_cnt % 2) != 0) {
            context->decoder->flush(context->decoder);
            context->first_single_submitted = true;
         }
         else
            context->first_single_submitted = false;
         surf->force_flushed = true;
      }
   } else if (context->decoder->entrypoint == PIPE_VIDEO_ENTRYPOINT_ENCODE &&
              u_reduce_video_profile(context->templat.profile) == PIPE_VIDEO_FORMAT_HEVC)
      context->desc.h265enc.frame_num++;
   mtx_unlock(&drv->mutex);
   return VA_STATUS_SUCCESS;
}