static GstBuffer *
create_output_buffer (GstMfxPostproc * vpp)
{
  GstBuffer *outbuf = NULL;
  GstFlowReturn ret;

  GstBufferPool *const pool =
      GST_MFX_PLUGIN_BASE (vpp)->srcpad_buffer_pool;

  g_return_val_if_fail (pool != NULL, NULL);

  if (!gst_buffer_pool_set_active (pool, TRUE))
    goto error_activate_pool;

  ret = gst_buffer_pool_acquire_buffer (pool, &outbuf, NULL);
  if (GST_FLOW_OK != ret || !outbuf)
    goto error_create_buffer;

  return outbuf;
  /* Errors */
error_activate_pool:
  {
    GST_ERROR ("failed to activate output video buffer pool");
    return NULL;
  }
error_create_buffer:
  {
    GST_ERROR ("failed to create output video buffer");
    return NULL;
  }
}
Example #2
0
static GstBuffer *
create_output_buffer (GstMsdkVPP * thiz)
{
  GstBuffer *outbuf;
  GstFlowReturn ret;
  GstBufferPool *pool = thiz->srcpad_buffer_pool;

  g_return_val_if_fail (pool != NULL, NULL);

  if (!gst_buffer_pool_is_active (pool) &&
      !gst_buffer_pool_set_active (pool, TRUE))
    goto error_activate_pool;

  outbuf = NULL;
  ret = gst_buffer_pool_acquire_buffer (pool, &outbuf, NULL);
  if (ret != GST_FLOW_OK || !outbuf)
    goto error_create_buffer;

  return outbuf;

  /* ERRORS */
error_activate_pool:
  {
    GST_ERROR_OBJECT (thiz, "failed to activate output video buffer pool");
    return NULL;
  }
error_create_buffer:
  {
    GST_ERROR_OBJECT (thiz, "failed to create output video buffer");
    return NULL;
  }
}
Example #3
0
static MsdkSurface *
get_surface_from_pool (GstMsdkVPP * thiz, GstBufferPool * pool,
    GstBufferPoolAcquireParams * params)
{
  GstBuffer *new_buffer;
  mfxFrameSurface1 *new_surface;
  MsdkSurface *msdk_surface;

  if (!gst_buffer_pool_is_active (pool) &&
      !gst_buffer_pool_set_active (pool, TRUE)) {
    GST_ERROR_OBJECT (pool, "failed to activate buffer pool");
    return NULL;
  }

  if (gst_buffer_pool_acquire_buffer (pool, &new_buffer, params) != GST_FLOW_OK) {
    GST_ERROR_OBJECT (pool, "failed to acquire a buffer from pool");
    return NULL;
  }

  if (gst_msdk_is_msdk_buffer (new_buffer))
    new_surface = gst_msdk_get_surface_from_buffer (new_buffer);
  else {
    GST_ERROR_OBJECT (pool, "the acquired memory is not MSDK memory");
    return NULL;
  }

  msdk_surface = g_slice_new0 (MsdkSurface);
  msdk_surface->surface = new_surface;
  msdk_surface->buf = new_buffer;

  return msdk_surface;
}
static GstFlowReturn
gst_mpeg2dec_crop_buffer (GstMpeg2dec * dec, GstVideoCodecFrame * in_frame,
    GstVideoFrame * input_vframe)
{
  GstVideoCodecState *state;
  GstVideoInfo *info;
  GstVideoInfo *dinfo;
  GstVideoFrame output_frame;
  GstFlowReturn ret;
  GstBuffer *buffer = NULL;

  state = gst_video_decoder_get_output_state (GST_VIDEO_DECODER (dec));
  info = &state->info;
  dinfo = &dec->decoded_info;

  GST_CAT_LOG_OBJECT (GST_CAT_PERFORMANCE, dec,
      "Copying input buffer %ux%u (%" G_GSIZE_FORMAT ") to output buffer "
      "%ux%u (%" G_GSIZE_FORMAT ")", dinfo->width, dinfo->height,
      dinfo->size, info->width, info->height, info->size);

  ret = gst_buffer_pool_acquire_buffer (dec->downstream_pool, &buffer, NULL);
  if (ret != GST_FLOW_OK)
    goto beach;

  if (!gst_video_frame_map (&output_frame, info, buffer, GST_MAP_WRITE))
    goto map_fail;

  if (in_frame->output_buffer)
    gst_buffer_unref (in_frame->output_buffer);
  in_frame->output_buffer = buffer;

  if (!gst_video_frame_copy (&output_frame, input_vframe))
    goto copy_failed;

  gst_video_frame_unmap (&output_frame);

  GST_BUFFER_FLAGS (in_frame->output_buffer) =
      GST_BUFFER_FLAGS (input_vframe->buffer);

beach:
  gst_video_codec_state_unref (state);

  return ret;

map_fail:
  {
    GST_ERROR_OBJECT (dec, "Failed to map output frame");
    gst_video_codec_state_unref (state);
    return GST_FLOW_ERROR;
  }

copy_failed:
  {
    GST_ERROR_OBJECT (dec, "Failed to copy output frame");
    gst_video_codec_state_unref (state);
    return GST_FLOW_ERROR;
  }
}
Example #5
0
static GstFlowReturn gst_imx_compositor_get_output_buffer(GstImxBPVideoAggregator *videoaggregator, GstBuffer **outbuffer)
{
	GstImxCompositor *compositor = GST_IMX_COMPOSITOR(videoaggregator);
	GstBufferPool *pool = compositor->dma_bufferpool;

	/* Return a DMA buffer from the pool. The output buffers produced by
	 * the video aggregator base class will use this function to allocate. */

	if (!gst_buffer_pool_is_active(pool))
		gst_buffer_pool_set_active(pool, TRUE);

	return gst_buffer_pool_acquire_buffer(pool, outbuffer, NULL);
}
Example #6
0
static MsdkSurface *
get_surface (GstMsdkDec * thiz, GstBuffer * buffer)
{
  MsdkSurface *i;

  i = g_slice_new0 (MsdkSurface);

  if (gst_msdk_is_msdk_buffer (buffer)) {
    i->surface = gst_msdk_get_surface_from_buffer (buffer);
    i->buf = buffer;
  } else {
    /* Confirm to activate the side pool */
    if (!gst_buffer_pool_is_active (thiz->pool) &&
        !gst_buffer_pool_set_active (thiz->pool, TRUE)) {
      g_slice_free (MsdkSurface, i);
      return NULL;
    }

    if (!gst_video_frame_map (&i->copy, &thiz->non_msdk_pool_info, buffer,
            GST_MAP_WRITE))
      goto failed_unref_buffer;

    if (gst_buffer_pool_acquire_buffer (thiz->pool, &buffer,
            NULL) != GST_FLOW_OK)
      goto failed_unmap_copy;

    i->surface = gst_msdk_get_surface_from_buffer (buffer);
    i->buf = buffer;

    if (!gst_video_frame_map (&i->data, &thiz->output_info, buffer,
            GST_MAP_READWRITE))
      goto failed_unref_buffer2;
  }

  thiz->decoded_msdk_surfaces = g_list_append (thiz->decoded_msdk_surfaces, i);
  return i;

failed_unref_buffer2:
  gst_buffer_unref (buffer);
  buffer = i->data.buffer;
failed_unmap_copy:
  gst_video_frame_unmap (&i->copy);
failed_unref_buffer:
  gst_buffer_unref (buffer);
  g_slice_free (MsdkSurface, i);

  GST_ERROR_OBJECT (thiz, "failed to handle buffer");
  return NULL;
}
static void
gst_v4l2_buffer_pool_group_released (GstV4l2BufferPool * pool)
{
  GstBufferPoolAcquireParams params = { 0 };
  GstBuffer *buffer = NULL;
  GstFlowReturn ret;

  GST_DEBUG_OBJECT (pool, "A buffer was lost, reallocating it");

  params.flags =
      (GstBufferPoolAcquireFlags) GST_V4L2_BUFFER_POOL_ACQUIRE_FLAG_RESURRECT;
  ret =
      gst_buffer_pool_acquire_buffer (GST_BUFFER_POOL (pool), &buffer, &params);

  if (ret == GST_FLOW_OK)
    gst_buffer_unref (buffer);
}
Example #8
0
static GstFlowReturn
gst_gl_mixer_get_output_buffer (GstVideoAggregator * videoaggregator,
    GstBuffer ** outbuf)
{
  GstGLMixer *mix = GST_GL_MIXER (videoaggregator);

  if (!mix->priv->pool_active) {
    if (!gst_buffer_pool_set_active (mix->priv->pool, TRUE)) {
      GST_ELEMENT_ERROR (mix, RESOURCE, SETTINGS,
          ("failed to activate bufferpool"), ("failed to activate bufferpool"));
      return GST_FLOW_ERROR;
    }
    mix->priv->pool_active = TRUE;
  }

  return gst_buffer_pool_acquire_buffer (mix->priv->pool, outbuf, NULL);
}
static GstFlowReturn
gst_v4l2_buffer_pool_prepare_buffer (GstV4l2BufferPool * pool,
    GstBuffer * dest, GstBuffer * src)
{
  GstFlowReturn ret = GST_FLOW_OK;
  gboolean own_src = FALSE;

  if (src == NULL) {
    if (pool->other_pool == NULL) {
      GST_ERROR_OBJECT (pool, "can't prepare buffer, source buffer missing");
      return GST_FLOW_ERROR;
    }

    ret = gst_buffer_pool_acquire_buffer (pool->other_pool, &src, NULL);
    if (ret != GST_FLOW_OK) {
      GST_ERROR_OBJECT (pool, "failed to acquire buffer from downstream pool");
      goto done;
    }

    own_src = TRUE;
  }

  switch (pool->obj->mode) {
    case GST_V4L2_IO_MMAP:
    case GST_V4L2_IO_DMABUF:
      ret = gst_v4l2_buffer_pool_copy_buffer (pool, dest, src);
      break;
    case GST_V4L2_IO_USERPTR:
      ret = gst_v4l2_buffer_pool_import_userptr (pool, dest, src);
      break;
    case GST_V4L2_IO_DMABUF_IMPORT:
      ret = gst_v4l2_buffer_pool_import_dmabuf (pool, dest, src);
      break;
    default:
      break;
  }

  if (own_src)
    gst_buffer_unref (src);

done:
  return ret;
}
static GstFlowReturn
gst_gl_stereo_mix_get_output_buffer (GstVideoAggregator * videoaggregator,
    GstBuffer ** outbuf)
{
  GstGLStereoMix *mix = GST_GL_STEREO_MIX (videoaggregator);
  GstFlowReturn ret = GST_FLOW_OK;

#if 0

  if (!mix->priv->pool_active) {
    if (!gst_buffer_pool_set_active (mix->priv->pool, TRUE)) {
      GST_ELEMENT_ERROR (mix, RESOURCE, SETTINGS,
          ("failed to activate bufferpool"), ("failed to activate bufferpool"));
      return GST_FLOW_ERROR;
    }
    mix->priv->pool_active = TRUE;
  }

  return gst_buffer_pool_acquire_buffer (mix->priv->pool, outbuf, NULL);
#endif

  if (!gst_gl_stereo_mix_make_output (mix)) {
    gst_buffer_replace (&mix->primary_out, NULL);
    gst_buffer_replace (&mix->auxilliary_out, NULL);
    GST_ELEMENT_ERROR (mix, RESOURCE, SETTINGS,
        ("Failed to generate output"), ("failed to generate output"));
    ret = GST_FLOW_ERROR;
  }

  if (mix->auxilliary_out) {
    *outbuf = mix->auxilliary_out;
    mix->auxilliary_out = NULL;
  } else {
    *outbuf = mix->primary_out;
    mix->primary_out = NULL;
  }
  return ret;
}
Example #11
0
static GstFlowReturn
gst_dvdec_chain (GstPad * pad, GstObject * parent, GstBuffer * buf)
{
  GstDVDec *dvdec;
  guint8 *inframe;
  guint8 *outframe_ptrs[3];
  gint outframe_pitches[3];
  GstMapInfo map;
  GstVideoFrame frame;
  GstBuffer *outbuf;
  GstFlowReturn ret = GST_FLOW_OK;
  guint length;
  guint64 cstart = GST_CLOCK_TIME_NONE, cstop = GST_CLOCK_TIME_NONE;
  gboolean PAL, wide;

  dvdec = GST_DVDEC (parent);

  gst_buffer_map (buf, &map, GST_MAP_READ);
  inframe = map.data;

  /* buffer should be at least the size of one NTSC frame, this should
   * be enough to decode the header. */
  if (G_UNLIKELY (map.size < NTSC_BUFFER))
    goto wrong_size;

  /* preliminary dropping. unref and return if outside of configured segment */
  if ((dvdec->segment.format == GST_FORMAT_TIME) &&
      (!(gst_segment_clip (&dvdec->segment, GST_FORMAT_TIME,
                  GST_BUFFER_TIMESTAMP (buf),
                  GST_BUFFER_TIMESTAMP (buf) + GST_BUFFER_DURATION (buf),
                  &cstart, &cstop))))
    goto dropping;

  if (G_UNLIKELY (dv_parse_header (dvdec->decoder, inframe) < 0))
    goto parse_header_error;

  /* get size */
  PAL = dv_system_50_fields (dvdec->decoder);
  wide = dv_format_wide (dvdec->decoder);

  /* check the buffer is of right size after we know if we are
   * dealing with PAL or NTSC */
  length = (PAL ? PAL_BUFFER : NTSC_BUFFER);
  if (G_UNLIKELY (map.size < length))
    goto wrong_size;

  dv_parse_packs (dvdec->decoder, inframe);

  if (dvdec->video_offset % dvdec->drop_factor != 0)
    goto skip;

  /* renegotiate on change */
  if (PAL != dvdec->PAL || wide != dvdec->wide) {
    dvdec->src_negotiated = FALSE;
    dvdec->PAL = PAL;
    dvdec->wide = wide;
  }

  dvdec->height = (dvdec->PAL ? PAL_HEIGHT : NTSC_HEIGHT);

  dvdec->interlaced = !dv_is_progressive (dvdec->decoder);

  /* negotiate if not done yet */
  if (!dvdec->src_negotiated) {
    if (!gst_dvdec_src_negotiate (dvdec))
      goto not_negotiated;
  }

  if (gst_pad_check_reconfigure (dvdec->srcpad)) {
    GstCaps *caps;

    caps = gst_pad_get_current_caps (dvdec->srcpad);
    if (!caps)
      goto not_negotiated;

    gst_dvdec_negotiate_pool (dvdec, caps, &dvdec->vinfo);
    gst_caps_unref (caps);
  }

  if (dvdec->need_segment) {
    gst_pad_push_event (dvdec->srcpad, gst_event_new_segment (&dvdec->segment));
    dvdec->need_segment = FALSE;
  }

  ret = gst_buffer_pool_acquire_buffer (dvdec->pool, &outbuf, NULL);
  if (G_UNLIKELY (ret != GST_FLOW_OK))
    goto no_buffer;

  gst_video_frame_map (&frame, &dvdec->vinfo, outbuf, GST_MAP_WRITE);

  outframe_ptrs[0] = GST_VIDEO_FRAME_COMP_DATA (&frame, 0);
  outframe_pitches[0] = GST_VIDEO_FRAME_COMP_STRIDE (&frame, 0);

  /* the rest only matters for YUY2 */
  if (dvdec->bpp < 3) {
    outframe_ptrs[1] = GST_VIDEO_FRAME_COMP_DATA (&frame, 1);
    outframe_ptrs[2] = GST_VIDEO_FRAME_COMP_DATA (&frame, 2);

    outframe_pitches[1] = GST_VIDEO_FRAME_COMP_STRIDE (&frame, 1);
    outframe_pitches[2] = GST_VIDEO_FRAME_COMP_STRIDE (&frame, 2);
  }

  GST_DEBUG_OBJECT (dvdec, "decoding and pushing buffer");
  dv_decode_full_frame (dvdec->decoder, inframe,
      e_dv_color_yuv, outframe_ptrs, outframe_pitches);

  gst_video_frame_unmap (&frame);

  GST_BUFFER_FLAG_UNSET (outbuf, GST_VIDEO_BUFFER_FLAG_TFF);

  GST_BUFFER_OFFSET (outbuf) = GST_BUFFER_OFFSET (buf);
  GST_BUFFER_OFFSET_END (outbuf) = GST_BUFFER_OFFSET_END (buf);

  /* FIXME : Compute values when using non-TIME segments,
   * but for the moment make sure we at least don't set bogus values
   */
  if (GST_CLOCK_TIME_IS_VALID (cstart)) {
    GST_BUFFER_TIMESTAMP (outbuf) = cstart;
    if (GST_CLOCK_TIME_IS_VALID (cstop))
      GST_BUFFER_DURATION (outbuf) = cstop - cstart;
  }

  ret = gst_pad_push (dvdec->srcpad, outbuf);

skip:
  dvdec->video_offset++;

done:
  gst_buffer_unmap (buf, &map);
  gst_buffer_unref (buf);

  return ret;

  /* ERRORS */
wrong_size:
  {
    GST_ELEMENT_ERROR (dvdec, STREAM, DECODE,
        (NULL), ("Input buffer too small"));
    ret = GST_FLOW_ERROR;
    goto done;
  }
parse_header_error:
  {
    GST_ELEMENT_ERROR (dvdec, STREAM, DECODE,
        (NULL), ("Error parsing DV header"));
    ret = GST_FLOW_ERROR;
    goto done;
  }
not_negotiated:
  {
    GST_DEBUG_OBJECT (dvdec, "could not negotiate output");
    ret = GST_FLOW_NOT_NEGOTIATED;
    goto done;
  }
no_buffer:
  {
    GST_DEBUG_OBJECT (dvdec, "could not allocate buffer");
    goto done;
  }

dropping:
  {
    GST_DEBUG_OBJECT (dvdec,
        "dropping buffer since it's out of the configured segment");
    goto done;
  }
}
Example #12
0
static GstBuffer *
gst_rtp_vraw_depay_process (GstRTPBaseDepayload * depayload, GstBuffer * buf)
{
  GstRtpVRawDepay *rtpvrawdepay;
  guint8 *payload, *p0, *yp, *up, *vp, *headers;
  guint32 timestamp;
  guint cont, ystride, uvstride, pgroup, payload_len;
  gint width, height, xinc, yinc;
  GstRTPBuffer rtp = { NULL };
  GstVideoFrame *frame;
  gboolean marker;
  GstBuffer *outbuf = NULL;

  rtpvrawdepay = GST_RTP_VRAW_DEPAY (depayload);

  gst_rtp_buffer_map (buf, GST_MAP_READ, &rtp);

  timestamp = gst_rtp_buffer_get_timestamp (&rtp);

  if (timestamp != rtpvrawdepay->timestamp || rtpvrawdepay->outbuf == NULL) {
    GstBuffer *new_buffer;
    GstFlowReturn ret;

    GST_LOG_OBJECT (depayload, "new frame with timestamp %u", timestamp);
    /* new timestamp, flush old buffer and create new output buffer */
    if (rtpvrawdepay->outbuf) {
      gst_video_frame_unmap (&rtpvrawdepay->frame);
      gst_rtp_base_depayload_push (depayload, rtpvrawdepay->outbuf);
      rtpvrawdepay->outbuf = NULL;
    }

    if (gst_pad_check_reconfigure (GST_RTP_BASE_DEPAYLOAD_SRCPAD (depayload))) {
      GstCaps *caps;

      caps =
          gst_pad_get_current_caps (GST_RTP_BASE_DEPAYLOAD_SRCPAD (depayload));
      gst_rtp_vraw_depay_negotiate_pool (rtpvrawdepay, caps,
          &rtpvrawdepay->vinfo);
      gst_caps_unref (caps);
    }

    ret =
        gst_buffer_pool_acquire_buffer (rtpvrawdepay->pool, &new_buffer, NULL);

    if (G_UNLIKELY (ret != GST_FLOW_OK))
      goto alloc_failed;

    /* clear timestamp from alloc... */
    GST_BUFFER_TIMESTAMP (new_buffer) = -1;

    if (!gst_video_frame_map (&rtpvrawdepay->frame, &rtpvrawdepay->vinfo,
            new_buffer, GST_MAP_WRITE | GST_VIDEO_FRAME_MAP_FLAG_NO_REF)) {
      gst_buffer_unref (new_buffer);
      goto invalid_frame;
    }

    rtpvrawdepay->outbuf = new_buffer;
    rtpvrawdepay->timestamp = timestamp;
  }

  frame = &rtpvrawdepay->frame;

  g_assert (frame->buffer != NULL);

  /* get pointer and strides of the planes */
  p0 = GST_VIDEO_FRAME_PLANE_DATA (frame, 0);
  yp = GST_VIDEO_FRAME_COMP_DATA (frame, 0);
  up = GST_VIDEO_FRAME_COMP_DATA (frame, 1);
  vp = GST_VIDEO_FRAME_COMP_DATA (frame, 2);

  ystride = GST_VIDEO_FRAME_COMP_STRIDE (frame, 0);
  uvstride = GST_VIDEO_FRAME_COMP_STRIDE (frame, 1);

  pgroup = rtpvrawdepay->pgroup;
  width = GST_VIDEO_INFO_WIDTH (&rtpvrawdepay->vinfo);
  height = GST_VIDEO_INFO_HEIGHT (&rtpvrawdepay->vinfo);
  xinc = rtpvrawdepay->xinc;
  yinc = rtpvrawdepay->yinc;

  payload = gst_rtp_buffer_get_payload (&rtp);
  payload_len = gst_rtp_buffer_get_payload_len (&rtp);

  if (payload_len < 3)
    goto short_packet;

  /* skip extended seqnum */
  payload += 2;
  payload_len -= 2;

  /* remember header position */
  headers = payload;

  /* find data start */
  do {
    if (payload_len < 6)
      goto short_packet;

    cont = payload[4] & 0x80;

    payload += 6;
    payload_len -= 6;
  } while (cont);

  while (TRUE) {
    guint length, line, offs, plen;
    guint8 *datap;

    /* stop when we run out of data */
    if (payload_len == 0)
      break;

    /* read length and cont. This should work because we iterated the headers
     * above. */
    length = (headers[0] << 8) | headers[1];
    line = ((headers[2] & 0x7f) << 8) | headers[3];
    offs = ((headers[4] & 0x7f) << 8) | headers[5];
    cont = headers[4] & 0x80;
    headers += 6;

    /* length must be a multiple of pgroup */
    if (length % pgroup != 0)
      goto wrong_length;

    if (length > payload_len)
      length = payload_len;

    /* sanity check */
    if (line > (height - yinc)) {
      GST_WARNING_OBJECT (depayload, "skipping line %d: out of range", line);
      goto next;
    }
    if (offs > (width - xinc)) {
      GST_WARNING_OBJECT (depayload, "skipping offset %d: out of range", offs);
      goto next;
    }

    /* calculate the maximim amount of bytes we can use per line */
    if (offs + ((length / pgroup) * xinc) > width) {
      plen = ((width - offs) * pgroup) / xinc;
      GST_WARNING_OBJECT (depayload, "clipping length %d, offset %d, plen %d",
          length, offs, plen);
    } else
      plen = length;

    GST_LOG_OBJECT (depayload,
        "writing length %u/%u, line %u, offset %u, remaining %u", plen, length,
        line, offs, payload_len);

    switch (GST_VIDEO_INFO_FORMAT (&rtpvrawdepay->vinfo)) {
      case GST_VIDEO_FORMAT_RGB:
      case GST_VIDEO_FORMAT_RGBA:
      case GST_VIDEO_FORMAT_BGR:
      case GST_VIDEO_FORMAT_BGRA:
      case GST_VIDEO_FORMAT_UYVY:
      case GST_VIDEO_FORMAT_UYVP:
        /* samples are packed just like gstreamer packs them */
        offs /= xinc;
        datap = p0 + (line * ystride) + (offs * pgroup);

        memcpy (datap, payload, plen);
        break;
      case GST_VIDEO_FORMAT_AYUV:
      {
        gint i;
        guint8 *p;

        datap = p0 + (line * ystride) + (offs * 4);
        p = payload;

        /* samples are packed in order Cb-Y-Cr for both interlaced and
         * progressive frames */
        for (i = 0; i < plen; i += pgroup) {
          *datap++ = 0;
          *datap++ = p[1];
          *datap++ = p[0];
          *datap++ = p[2];
          p += pgroup;
        }
        break;
      }
      case GST_VIDEO_FORMAT_I420:
      {
        gint i;
        guint uvoff;
        guint8 *yd1p, *yd2p, *udp, *vdp, *p;

        yd1p = yp + (line * ystride) + (offs);
        yd2p = yd1p + ystride;
        uvoff = (line / yinc * uvstride) + (offs / xinc);

        udp = up + uvoff;
        vdp = vp + uvoff;
        p = payload;

        /* line 0/1: Y00-Y01-Y10-Y11-Cb00-Cr00 Y02-Y03-Y12-Y13-Cb01-Cr01 ...  */
        for (i = 0; i < plen; i += pgroup) {
          *yd1p++ = p[0];
          *yd1p++ = p[1];
          *yd2p++ = p[2];
          *yd2p++ = p[3];
          *udp++ = p[4];
          *vdp++ = p[5];
          p += pgroup;
        }
        break;
      }
      case GST_VIDEO_FORMAT_Y41B:
      {
        gint i;
        guint uvoff;
        guint8 *ydp, *udp, *vdp, *p;

        ydp = yp + (line * ystride) + (offs);
        uvoff = (line / yinc * uvstride) + (offs / xinc);

        udp = up + uvoff;
        vdp = vp + uvoff;
        p = payload;

        /* Samples are packed in order Cb0-Y0-Y1-Cr0-Y2-Y3 for both interlaced
         * and progressive scan lines */
        for (i = 0; i < plen; i += pgroup) {
          *udp++ = p[0];
          *ydp++ = p[1];
          *ydp++ = p[2];
          *vdp++ = p[3];
          *ydp++ = p[4];
          *ydp++ = p[5];
          p += pgroup;
        }
        break;
      }
      default:
        goto unknown_sampling;
    }

  next:
    if (!cont)
      break;

    payload += length;
    payload_len -= length;
  }

  marker = gst_rtp_buffer_get_marker (&rtp);
  gst_rtp_buffer_unmap (&rtp);

  if (marker) {
    GST_LOG_OBJECT (depayload, "marker, flushing frame");
    gst_video_frame_unmap (&rtpvrawdepay->frame);
    outbuf = rtpvrawdepay->outbuf;
    rtpvrawdepay->outbuf = NULL;
    rtpvrawdepay->timestamp = -1;
  }
  return outbuf;

  /* ERRORS */
unknown_sampling:
  {
    GST_ELEMENT_ERROR (depayload, STREAM, FORMAT,
        (NULL), ("unimplemented sampling"));
    gst_rtp_buffer_unmap (&rtp);
    return NULL;
  }
alloc_failed:
  {
    GST_WARNING_OBJECT (depayload, "failed to alloc output buffer");
    gst_rtp_buffer_unmap (&rtp);
    return NULL;
  }
invalid_frame:
  {
    GST_ERROR_OBJECT (depayload, "could not map video frame");
    return NULL;
  }
wrong_length:
  {
    GST_WARNING_OBJECT (depayload, "length not multiple of pgroup");
    gst_rtp_buffer_unmap (&rtp);
    return NULL;
  }
short_packet:
  {
    GST_WARNING_OBJECT (depayload, "short packet");
    gst_rtp_buffer_unmap (&rtp);
    return NULL;
  }
}
Example #13
0
static GstFlowReturn
gst_wayland_sink_render (GstBaseSink * bsink, GstBuffer * buffer)
{
  GstWaylandSink *sink = GST_WAYLAND_SINK (bsink);
  GstBuffer *to_render;
  GstWlBuffer *wlbuffer;
  GstFlowReturn ret = GST_FLOW_OK;

  g_mutex_lock (&sink->render_lock);

  GST_LOG_OBJECT (sink, "render buffer %p", buffer);

  if (G_UNLIKELY (!sink->window)) {
    /* ask for window handle. Unlock render_lock while doing that because
     * set_window_handle & friends will lock it in this context */
    g_mutex_unlock (&sink->render_lock);
    gst_video_overlay_prepare_window_handle (GST_VIDEO_OVERLAY (sink));
    g_mutex_lock (&sink->render_lock);

    if (!sink->window) {
      /* if we were not provided a window, create one ourselves */
      sink->window =
          gst_wl_window_new_toplevel (sink->display, &sink->video_info);
    }
  }

  /* drop buffers until we get a frame callback */
  if (g_atomic_int_get (&sink->redraw_pending) == TRUE)
    goto done;

  /* make sure that the application has called set_render_rectangle() */
  if (G_UNLIKELY (sink->window->render_rectangle.w == 0))
    goto no_window_size;

  wlbuffer = gst_buffer_get_wl_buffer (buffer);

  if (G_LIKELY (wlbuffer && wlbuffer->display == sink->display)) {
    GST_LOG_OBJECT (sink, "buffer %p has a wl_buffer from our display, "
        "writing directly", buffer);
    to_render = buffer;
  } else {
    GstMemory *mem;
    struct wl_buffer *wbuf = NULL;

    GST_LOG_OBJECT (sink, "buffer %p does not have a wl_buffer from our "
        "display, creating it", buffer);

    mem = gst_buffer_peek_memory (buffer, 0);

    if (gst_is_wl_shm_memory (mem)) {
      wbuf = gst_wl_shm_memory_construct_wl_buffer (mem, sink->display,
          &sink->video_info);
    }

    if (wbuf) {
      gst_buffer_add_wl_buffer (buffer, wbuf, sink->display);
      to_render = buffer;
    } else {
      GstMapInfo src;
      /* we don't know how to create a wl_buffer directly from the provided
       * memory, so we have to copy the data to a memory that we know how
       * to handle... */

      GST_LOG_OBJECT (sink, "buffer %p cannot have a wl_buffer, "
          "copying to wl_shm memory", buffer);

      /* sink->pool always exists (created in set_caps), but it may not
       * be active if upstream is not using it */
      if (!gst_buffer_pool_is_active (sink->pool) &&
          !gst_buffer_pool_set_active (sink->pool, TRUE))
        goto activate_failed;

      ret = gst_buffer_pool_acquire_buffer (sink->pool, &to_render, NULL);
      if (ret != GST_FLOW_OK)
        goto no_buffer;

      /* the first time we acquire a buffer,
       * we need to attach a wl_buffer on it */
      wlbuffer = gst_buffer_get_wl_buffer (to_render);
      if (G_UNLIKELY (!wlbuffer)) {
        mem = gst_buffer_peek_memory (to_render, 0);
        wbuf = gst_wl_shm_memory_construct_wl_buffer (mem, sink->display,
            &sink->video_info);
        if (G_UNLIKELY (!wbuf))
          goto no_wl_buffer;

        gst_buffer_add_wl_buffer (to_render, wbuf, sink->display);
      }

      gst_buffer_map (buffer, &src, GST_MAP_READ);
      gst_buffer_fill (to_render, 0, src.data, src.size);
      gst_buffer_unmap (buffer, &src);
    }
  }

  /* drop double rendering */
  if (G_UNLIKELY (to_render == sink->last_buffer)) {
    GST_LOG_OBJECT (sink, "Buffer already being rendered");
    goto done;
  }

  gst_buffer_replace (&sink->last_buffer, to_render);
  render_last_buffer (sink);

  if (buffer != to_render)
    gst_buffer_unref (to_render);
  goto done;

no_window_size:
  {
    GST_ELEMENT_ERROR (sink, RESOURCE, WRITE,
        ("Window has no size set"),
        ("Make sure you set the size after calling set_window_handle"));
    ret = GST_FLOW_ERROR;
    goto done;
  }
no_buffer:
  {
    GST_WARNING_OBJECT (sink, "could not create buffer");
    goto done;
  }
no_wl_buffer:
  {
    GST_ERROR_OBJECT (sink, "could not create wl_buffer out of wl_shm memory");
    ret = GST_FLOW_ERROR;
    goto done;
  }
activate_failed:
  {
    GST_ERROR_OBJECT (sink, "failed to activate bufferpool.");
    ret = GST_FLOW_ERROR;
    goto done;
  }
done:
  {
    g_mutex_unlock (&sink->render_lock);
    return ret;
  }
}
Example #14
0
gboolean gst_imx_ipu_blitter_set_input_buffer(GstImxIpuBlitter *ipu_blitter, GstBuffer *input_buffer)
{
	GstImxPhysMemMeta *phys_mem_meta;

	g_assert(input_buffer != NULL);

	phys_mem_meta = GST_IMX_PHYS_MEM_META_GET(input_buffer);

	/* Test if the input buffer uses DMA memory */
	if ((phys_mem_meta != NULL) && (phys_mem_meta->phys_addr != 0))
	{
		/* DMA memory present - the input buffer can be used as an actual input buffer */
		gst_imx_ipu_blitter_set_actual_input_buffer(ipu_blitter, gst_buffer_ref(input_buffer));

		GST_TRACE_OBJECT(ipu_blitter, "input buffer uses DMA memory - setting it as actual input buffer");
	}
	else
	{
		/* No DMA memory present; the input buffer needs to be copied to an internal
		 * temporary input buffer */

		GstBuffer *temp_input_buffer;
		GstFlowReturn flow_ret;

		GST_TRACE_OBJECT(ipu_blitter, "input buffer does not use DMA memory - need to copy it to an internal input DMA buffer");

		{
			/* The internal input buffer is the temp input frame's DMA memory.
			 * If it does not exist yet, it needs to be created here. The temp input
			 * frame is then mapped. */

			if (ipu_blitter->internal_bufferpool == NULL)
			{
				/* Internal bufferpool does not exist yet - create it now,
				 * so that it can in turn create the internal input buffer */

				GstCaps *caps = gst_video_info_to_caps(&(ipu_blitter->input_video_info));

				ipu_blitter->internal_bufferpool = gst_imx_ipu_blitter_create_bufferpool(
					ipu_blitter,
					caps,
					ipu_blitter->input_video_info.size,
					2, 0,
					NULL,
					NULL
				);

				gst_caps_unref(caps);

				if (ipu_blitter->internal_bufferpool == NULL)
				{
					GST_ERROR_OBJECT(ipu_blitter, "failed to create internal bufferpool");
					return FALSE;
				}
			}

			/* Future versions of this code may propose the internal bufferpool upstream;
			 * hence the is_active check */
			if (!gst_buffer_pool_is_active(ipu_blitter->internal_bufferpool))
				gst_buffer_pool_set_active(ipu_blitter->internal_bufferpool, TRUE);
		}

		/* Create the internal input buffer */
		flow_ret = gst_buffer_pool_acquire_buffer(ipu_blitter->internal_bufferpool, &temp_input_buffer, NULL);
		if (flow_ret != GST_FLOW_OK)
		{
			GST_ERROR_OBJECT(ipu_blitter, "error acquiring input frame buffer: %s", gst_pad_mode_get_name(flow_ret));
			return FALSE;
		}

		{
			GstVideoFrame input_frame, temp_input_frame;

			gst_video_frame_map(&input_frame, &(ipu_blitter->input_video_info), input_buffer, GST_MAP_READ);
			gst_video_frame_map(&temp_input_frame, &(ipu_blitter->input_video_info), temp_input_buffer, GST_MAP_WRITE);

			/* Copy the input buffer's pixels to the temp input frame
			 * The gst_video_frame_copy() makes sure stride and plane offset values from both
			 * frames are respected */
			gst_video_frame_copy(&temp_input_frame, &input_frame);

			gst_video_frame_unmap(&temp_input_frame);
			gst_video_frame_unmap(&input_frame);
		}

		/* Finally, set the temp input buffer as the actual input buffer */
		gst_imx_ipu_blitter_set_actual_input_buffer(ipu_blitter, temp_input_buffer);
	}

	/* Configure interlacing */
	ipu_blitter->priv->task.input.deinterlace.enable = 0;
	if (ipu_blitter->deinterlace_mode != GST_IMX_IPU_BLITTER_DEINTERLACE_NONE)
	{
		switch (ipu_blitter->input_video_info.interlace_mode)
		{
			case GST_VIDEO_INTERLACE_MODE_INTERLEAVED:
				GST_TRACE_OBJECT(ipu_blitter, "input stream uses interlacing -> deinterlacing enabled");
				ipu_blitter->priv->task.input.deinterlace.enable = 1;
				break;
			case GST_VIDEO_INTERLACE_MODE_MIXED:
			{
				GstVideoMeta *video_meta;

				GST_TRACE_OBJECT(ipu_blitter, "input stream uses mixed interlacing -> need to check video metadata deinterlacing flag");

				video_meta = gst_buffer_get_video_meta(input_buffer);
				if (video_meta != NULL)
				{
					if (video_meta->flags & GST_VIDEO_FRAME_FLAG_INTERLACED)
					{
						GST_TRACE_OBJECT(ipu_blitter, "frame has video metadata and deinterlacing flag");
						ipu_blitter->priv->task.input.deinterlace.enable = 1;
					}
					else
						GST_TRACE_OBJECT(ipu_blitter, "frame has video metadata but no deinterlacing flag");
				}
				else
					GST_TRACE_OBJECT(ipu_blitter, "frame has no video metadata -> no deinterlacing done");

				break;
			}
			case GST_VIDEO_INTERLACE_MODE_PROGRESSIVE:
			{
				GST_TRACE_OBJECT(ipu_blitter, "input stream is progressive -> no deinterlacing necessary");
				break;
			}
			case GST_VIDEO_INTERLACE_MODE_FIELDS:
				GST_FIXME_OBJECT(ipu_blitter, "2-fields deinterlacing not supported (yet)");
				break;
			default:
				break;
		}
	}

	return TRUE;
}
Example #15
0
static GstFlowReturn gst_imx_vpu_base_enc_handle_frame(GstVideoEncoder *encoder, GstVideoCodecFrame *frame)
{
	VpuEncRetCode enc_ret;
	VpuEncEncParam enc_enc_param;
	GstImxPhysMemMeta *phys_mem_meta;
	GstImxVpuBaseEncClass *klass;
	GstImxVpuBaseEnc *vpu_base_enc;
	VpuFrameBuffer input_framebuf;
	GstBuffer *input_buffer;
	gint src_stride;

	vpu_base_enc = GST_IMX_VPU_BASE_ENC(encoder);
	klass = GST_IMX_VPU_BASE_ENC_CLASS(G_OBJECT_GET_CLASS(vpu_base_enc));

	g_assert(klass->set_frame_enc_params != NULL);

	memset(&enc_enc_param, 0, sizeof(enc_enc_param));
	memset(&input_framebuf, 0, sizeof(input_framebuf));

	phys_mem_meta = GST_IMX_PHYS_MEM_META_GET(frame->input_buffer);

	/* If the incoming frame's buffer is not using physically contiguous memory,
	 * it needs to be copied to the internal input buffer, otherwise the VPU
	 * encoder cannot read the frame */
	if (phys_mem_meta == NULL)
	{
		/* No physical memory metadata found -> buffer is not physically contiguous */

		GstVideoFrame temp_input_video_frame, temp_incoming_video_frame;

		GST_LOG_OBJECT(vpu_base_enc, "input buffer not physically contiguous - frame copy is necessary");

		if (vpu_base_enc->internal_input_buffer == NULL)
		{
			/* The internal input buffer is the temp input frame's DMA memory.
			 * If it does not exist yet, it needs to be created here. The temp input
			 * frame is then mapped. */

			GstFlowReturn flow_ret;

			if (vpu_base_enc->internal_bufferpool == NULL)
			{
				/* Internal bufferpool does not exist yet - create it now,
				 * so that it can in turn create the internal input buffer */

				GstStructure *config;
				GstCaps *caps;
				GstAllocator *allocator;

				GST_DEBUG_OBJECT(vpu_base_enc, "creating internal bufferpool");

				caps = gst_video_info_to_caps(&(vpu_base_enc->video_info));
				vpu_base_enc->internal_bufferpool = gst_imx_phys_mem_buffer_pool_new(FALSE);
				allocator = gst_imx_vpu_enc_allocator_obtain();

				config = gst_buffer_pool_get_config(vpu_base_enc->internal_bufferpool);
				gst_buffer_pool_config_set_params(config, caps, vpu_base_enc->video_info.size, 2, 0);
				gst_buffer_pool_config_set_allocator(config, allocator, NULL);
				gst_buffer_pool_config_add_option(config, GST_BUFFER_POOL_OPTION_IMX_PHYS_MEM);
				gst_buffer_pool_config_add_option(config, GST_BUFFER_POOL_OPTION_VIDEO_META);
				gst_buffer_pool_set_config(vpu_base_enc->internal_bufferpool, config);

				gst_caps_unref(caps);

				if (vpu_base_enc->internal_bufferpool == NULL)
				{
					GST_ERROR_OBJECT(vpu_base_enc, "failed to create internal bufferpool");
					return GST_FLOW_ERROR;
				}
			}

			/* Future versions of this code may propose the internal bufferpool upstream;
			 * hence the is_active check */
			if (!gst_buffer_pool_is_active(vpu_base_enc->internal_bufferpool))
				gst_buffer_pool_set_active(vpu_base_enc->internal_bufferpool, TRUE);

			/* Create the internal input buffer */
			flow_ret = gst_buffer_pool_acquire_buffer(vpu_base_enc->internal_bufferpool, &(vpu_base_enc->internal_input_buffer), NULL);
			if (flow_ret != GST_FLOW_OK)
			{
				GST_ERROR_OBJECT(vpu_base_enc, "error acquiring input frame buffer: %s", gst_pad_mode_get_name(flow_ret));
				return flow_ret;
			}
		}

		/* The internal input buffer exists at this point. Since the incoming frame
		 * is not stored in physical memory, copy its pixels to the internal
		 * input buffer, so the encoder can read them. */

		gst_video_frame_map(&temp_incoming_video_frame, &(vpu_base_enc->video_info), frame->input_buffer, GST_MAP_READ);
		gst_video_frame_map(&temp_input_video_frame, &(vpu_base_enc->video_info), vpu_base_enc->internal_input_buffer, GST_MAP_WRITE);

		gst_video_frame_copy(&temp_input_video_frame, &temp_incoming_video_frame);

		gst_video_frame_unmap(&temp_incoming_video_frame);
		gst_video_frame_unmap(&temp_input_video_frame);

		/* Set the internal input buffer as the encoder's input */
		input_buffer = vpu_base_enc->internal_input_buffer;
		/* And use the internal input buffer's physical memory metadata */
		phys_mem_meta = GST_IMX_PHYS_MEM_META_GET(vpu_base_enc->internal_input_buffer);
	}
	else
	{
		/* Physical memory metadata found -> buffer is physically contiguous
		 * It can be used directly as input for the VPU encoder */
		input_buffer = frame->input_buffer;
	}

	/* Set up physical addresses for the input framebuffer */
	{
		gsize *plane_offsets;
		gint *plane_strides;
		GstVideoMeta *video_meta;
		unsigned char *phys_ptr;

		/* Try to use plane offset and stride information from the video
		 * metadata if present, since these can be more accurate than
		 * the information from the video info */
		video_meta = gst_buffer_get_video_meta(input_buffer);
		if (video_meta != NULL)
		{
			plane_offsets = video_meta->offset;
			plane_strides = video_meta->stride;
		}
		else
		{
			plane_offsets = vpu_base_enc->video_info.offset;
			plane_strides = vpu_base_enc->video_info.stride;
		}

		phys_ptr = (unsigned char*)(phys_mem_meta->phys_addr);

		input_framebuf.pbufY = phys_ptr;
		input_framebuf.pbufCb = phys_ptr + plane_offsets[1];
		input_framebuf.pbufCr = phys_ptr + plane_offsets[2];
		input_framebuf.pbufMvCol = NULL; /* not used by the VPU encoder */
		input_framebuf.nStrideY = plane_strides[0];
		input_framebuf.nStrideC = plane_strides[1];

		/* this is needed for framebuffers registration below */
		src_stride = plane_strides[0];

		GST_TRACE_OBJECT(vpu_base_enc, "width: %d   height: %d   stride 0: %d   stride 1: %d   offset 0: %d   offset 1: %d   offset 2: %d", GST_VIDEO_INFO_WIDTH(&(vpu_base_enc->video_info)), GST_VIDEO_INFO_HEIGHT(&(vpu_base_enc->video_info)), plane_strides[0], plane_strides[1], plane_offsets[0], plane_offsets[1], plane_offsets[2]);
	}

	/* Create framebuffers structure (if not already present) */
	if (vpu_base_enc->framebuffers == NULL)
	{
		GstImxVpuFramebufferParams fbparams;
		gst_imx_vpu_framebuffers_enc_init_info_to_params(&(vpu_base_enc->init_info), &fbparams);
		fbparams.pic_width = vpu_base_enc->open_param.nPicWidth;
		fbparams.pic_height = vpu_base_enc->open_param.nPicHeight;

		vpu_base_enc->framebuffers = gst_imx_vpu_framebuffers_new(&fbparams, gst_imx_vpu_enc_allocator_obtain());
		if (vpu_base_enc->framebuffers == NULL)
		{
			GST_ELEMENT_ERROR(vpu_base_enc, RESOURCE, NO_SPACE_LEFT, ("could not create framebuffers structure"), (NULL));
			return GST_FLOW_ERROR;
		}

		gst_imx_vpu_framebuffers_register_with_encoder(vpu_base_enc->framebuffers, vpu_base_enc->handle, src_stride);
	}

	/* Allocate physical buffer for output data (if not already present) */
	if (vpu_base_enc->output_phys_buffer == NULL)
	{
		vpu_base_enc->output_phys_buffer = (GstImxPhysMemory *)gst_allocator_alloc(gst_imx_vpu_enc_allocator_obtain(), vpu_base_enc->framebuffers->total_size, NULL);

		if (vpu_base_enc->output_phys_buffer == NULL)
		{
			GST_ERROR_OBJECT(vpu_base_enc, "could not allocate physical buffer for output data");
			return GST_FLOW_ERROR;
		}
	}

	/* Set up encoding parameters */
	enc_enc_param.nInVirtOutput = (unsigned int)(vpu_base_enc->output_phys_buffer->mapped_virt_addr); /* TODO */
	enc_enc_param.nInPhyOutput = (unsigned int)(vpu_base_enc->output_phys_buffer->phys_addr);
	enc_enc_param.nInOutputBufLen = vpu_base_enc->output_phys_buffer->mem.size;
	enc_enc_param.nPicWidth = vpu_base_enc->framebuffers->pic_width;
	enc_enc_param.nPicHeight = vpu_base_enc->framebuffers->pic_height;
	enc_enc_param.nFrameRate = vpu_base_enc->open_param.nFrameRate;
	enc_enc_param.pInFrame = &input_framebuf;
	enc_enc_param.nForceIPicture = 0;

	/* Force I-frame if either IS_FORCE_KEYFRAME or IS_FORCE_KEYFRAME_HEADERS is set for the current frame. */
	if (GST_VIDEO_CODEC_FRAME_IS_FORCE_KEYFRAME(frame) || GST_VIDEO_CODEC_FRAME_IS_FORCE_KEYFRAME_HEADERS(frame))
	{
		enc_enc_param.nForceIPicture = 1;
		GST_LOG_OBJECT(vpu_base_enc, "got request to make this a keyframe - forcing I frame");
		GST_VIDEO_CODEC_FRAME_SET_SYNC_POINT(frame);
	}

	/* Give the derived class a chance to set up encoding parameters too */
	if (!klass->set_frame_enc_params(vpu_base_enc, &enc_enc_param, &(vpu_base_enc->open_param)))
	{
		GST_ERROR_OBJECT(vpu_base_enc, "derived class could not frame enc params");
		return GST_FLOW_ERROR;
	}

	/* Main encoding block */
	{
		GstBuffer *output_buffer = NULL;
		gsize output_buffer_offset = 0;
		gboolean frame_finished = FALSE;

		frame->output_buffer = NULL;

		/* Run in a loop until the VPU reports the input as used */
		do
		{
			/* Feed input data */
			enc_ret = VPU_EncEncodeFrame(vpu_base_enc->handle, &enc_enc_param);
			if (enc_ret != VPU_ENC_RET_SUCCESS)
			{
				GST_ERROR_OBJECT(vpu_base_enc, "failed to encode frame: %s", gst_imx_vpu_strerror(enc_ret));
				VPU_EncReset(vpu_base_enc->handle);
				return GST_FLOW_ERROR;
			}

			if (frame_finished)
			{
				GST_WARNING_OBJECT(vpu_base_enc, "frame was already finished for the current input, but input not yet marked as used");
				continue;
			}

			if (enc_enc_param.eOutRetCode & (VPU_ENC_OUTPUT_DIS | VPU_ENC_OUTPUT_SEQHEADER))
			{
				/* Create an output buffer on demand */
				if (output_buffer == NULL)
				{
					output_buffer = gst_video_encoder_allocate_output_buffer(
						encoder,
						vpu_base_enc->output_phys_buffer->mem.size
					);
					frame->output_buffer = output_buffer;
				}

				GST_LOG_OBJECT(vpu_base_enc, "processing output data: %u bytes, output buffer offset %u", enc_enc_param.nOutOutputSize, output_buffer_offset);

				if (klass->fill_output_buffer != NULL)
				{
					/* Derived class fills data on its own */

					gsize cur_offset = output_buffer_offset;
					output_buffer_offset += klass->fill_output_buffer(
						vpu_base_enc,
						frame,
						cur_offset,
						vpu_base_enc->output_phys_buffer->mapped_virt_addr,
						enc_enc_param.nOutOutputSize,
						enc_enc_param.eOutRetCode & VPU_ENC_OUTPUT_SEQHEADER
					);
				}
				else
				{
					/* Use default data filling (= copy input to output) */

					gst_buffer_fill(
						output_buffer,
						output_buffer_offset,
						vpu_base_enc->output_phys_buffer->mapped_virt_addr,
						enc_enc_param.nOutOutputSize
					);
					output_buffer_offset += enc_enc_param.nOutOutputSize;
				}

				if (enc_enc_param.eOutRetCode & VPU_ENC_OUTPUT_DIS)
				{
					g_assert(output_buffer != NULL);

					/* Set the output buffer's size to the actual number of bytes
					 * filled by the derived class */
					gst_buffer_set_size(output_buffer, output_buffer_offset);

					/* Set the frame DTS */
					frame->dts = frame->pts;

					/* And finish the frame, handing the output data over to the base class */
					gst_video_encoder_finish_frame(encoder, frame);

					output_buffer = NULL;
					frame_finished = TRUE;

					if (!(enc_enc_param.eOutRetCode & VPU_ENC_INPUT_USED))
						GST_WARNING_OBJECT(vpu_base_enc, "frame finished, but VPU did not report the input as used");

					break;
				}
			}
		}
		while (!(enc_enc_param.eOutRetCode & VPU_ENC_INPUT_USED)); /* VPU_ENC_INPUT_NOT_USED has value 0x0 - cannot use it for flag checks */

		/* If output_buffer is NULL at this point, it means VPU_ENC_OUTPUT_DIS was never communicated
		 * by the VPU, and the buffer is unfinished. -> Drop it. */
		if (output_buffer != NULL)
		{
			GST_WARNING_OBJECT(vpu_base_enc, "frame unfinished ; dropping");
			gst_buffer_unref(output_buffer);
			frame->output_buffer = NULL; /* necessary to make finish_frame() drop the frame */
			gst_video_encoder_finish_frame(encoder, frame);
		}
	}

	return GST_FLOW_OK;
}
static GstFlowReturn
gst_monoscope_chain (GstPad * pad, GstObject * parent, GstBuffer * inbuf)
{
  GstFlowReturn flow_ret = GST_FLOW_OK;
  GstMonoscope *monoscope;

  monoscope = GST_MONOSCOPE (parent);

  if (monoscope->rate == 0) {
    gst_buffer_unref (inbuf);
    flow_ret = GST_FLOW_NOT_NEGOTIATED;
    goto out;
  }

  /* Make sure have an output format */
  flow_ret = ensure_negotiated (monoscope);
  if (flow_ret != GST_FLOW_OK) {
    gst_buffer_unref (inbuf);
    goto out;
  }

  /* don't try to combine samples from discont buffer */
  if (GST_BUFFER_FLAG_IS_SET (inbuf, GST_BUFFER_FLAG_DISCONT)) {
    gst_adapter_clear (monoscope->adapter);
    monoscope->next_ts = GST_CLOCK_TIME_NONE;
  }

  /* Match timestamps from the incoming audio */
  if (GST_BUFFER_TIMESTAMP (inbuf) != GST_CLOCK_TIME_NONE)
    monoscope->next_ts = GST_BUFFER_TIMESTAMP (inbuf);

  GST_LOG_OBJECT (monoscope, "in buffer has %d samples, ts=%" GST_TIME_FORMAT,
      gst_buffer_get_size (inbuf) / monoscope->bps,
      GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (inbuf)));

  gst_adapter_push (monoscope->adapter, inbuf);
  inbuf = NULL;

  /* Collect samples until we have enough for an output frame */
  while (flow_ret == GST_FLOW_OK) {
    gint16 *samples;
    GstBuffer *outbuf = NULL;
    guint32 *pixels, avail, bytesperframe;

    avail = gst_adapter_available (monoscope->adapter);
    GST_LOG_OBJECT (monoscope, "bytes avail now %u", avail);

    bytesperframe = monoscope->spf * monoscope->bps;
    if (avail < bytesperframe)
      break;

    /* FIXME: something is wrong with QoS, we are skipping way too much
     * stuff even with very low CPU loads */
#if 0
    if (monoscope->next_ts != -1) {
      gboolean need_skip;
      gint64 qostime;

      qostime = gst_segment_to_running_time (&monoscope->segment,
          GST_FORMAT_TIME, monoscope->next_ts);

      GST_OBJECT_LOCK (monoscope);
      /* check for QoS, don't compute buffers that are known to be late */
      need_skip =
          GST_CLOCK_TIME_IS_VALID (monoscope->earliest_time) &&
          qostime <= monoscope->earliest_time;
      GST_OBJECT_UNLOCK (monoscope);

      if (need_skip) {
        GST_WARNING_OBJECT (monoscope,
            "QoS: skip ts: %" GST_TIME_FORMAT ", earliest: %" GST_TIME_FORMAT,
            GST_TIME_ARGS (qostime), GST_TIME_ARGS (monoscope->earliest_time));
        goto skip;
      }
    }
#endif

    samples = (gint16 *) gst_adapter_map (monoscope->adapter, bytesperframe);

    if (monoscope->spf < 512) {
      gint16 in_data[512], i;

      for (i = 0; i < 512; ++i) {
        gdouble off;

        off = ((gdouble) i * (gdouble) monoscope->spf) / 512.0;
        in_data[i] = samples[MIN ((guint) off, monoscope->spf)];
      }
      pixels = monoscope_update (monoscope->visstate, in_data);
    } else {
      /* not really correct, but looks much prettier */
      pixels = monoscope_update (monoscope->visstate, samples);
    }

    GST_LOG_OBJECT (monoscope, "allocating output buffer");
    flow_ret = gst_buffer_pool_acquire_buffer (monoscope->pool, &outbuf, NULL);
    if (flow_ret != GST_FLOW_OK) {
      gst_adapter_unmap (monoscope->adapter);
      goto out;
    }

    gst_buffer_fill (outbuf, 0, pixels, monoscope->outsize);

    GST_BUFFER_TIMESTAMP (outbuf) = monoscope->next_ts;
    GST_BUFFER_DURATION (outbuf) = monoscope->frame_duration;

    flow_ret = gst_pad_push (monoscope->srcpad, outbuf);

#if 0
  skip:
#endif

    if (GST_CLOCK_TIME_IS_VALID (monoscope->next_ts))
      monoscope->next_ts += monoscope->frame_duration;

    gst_adapter_flush (monoscope->adapter, bytesperframe);
  }

out:

  return flow_ret;
}
static GstFlowReturn
gst_v4l2_transform_prepare_output_buffer (GstBaseTransform * trans,
    GstBuffer * inbuf, GstBuffer ** outbuf)
{
  GstV4l2Transform *self = GST_V4L2_TRANSFORM (trans);
  GstBufferPool *pool = GST_BUFFER_POOL (self->v4l2output->pool);
  GstFlowReturn ret = GST_FLOW_OK;
  GstBaseTransformClass *bclass = GST_BASE_TRANSFORM_CLASS (parent_class);

  if (gst_base_transform_is_passthrough (trans)) {
    GST_DEBUG_OBJECT (self, "Passthrough, no need to do anything");
    *outbuf = inbuf;
    goto beach;
  }

  /* Ensure input internal pool is active */
  if (!gst_buffer_pool_is_active (pool)) {
    GstStructure *config = gst_buffer_pool_get_config (pool);
    gst_buffer_pool_config_set_params (config, self->incaps,
        self->v4l2output->info.size, 2, 2);

    /* There is no reason to refuse this config */
    if (!gst_buffer_pool_set_config (pool, config))
      goto activate_failed;

    if (!gst_buffer_pool_set_active (pool, TRUE))
      goto activate_failed;
  }

  GST_DEBUG_OBJECT (self, "Queue input buffer");
  ret = gst_v4l2_buffer_pool_process (GST_V4L2_BUFFER_POOL (pool), &inbuf);
  if (G_UNLIKELY (ret != GST_FLOW_OK))
    goto beach;

  do {
    pool = gst_base_transform_get_buffer_pool (trans);

    if (!gst_buffer_pool_set_active (pool, TRUE))
      goto activate_failed;

    GST_DEBUG_OBJECT (self, "Dequeue output buffer");
    ret = gst_buffer_pool_acquire_buffer (pool, outbuf, NULL);
    g_object_unref (pool);

    if (ret != GST_FLOW_OK)
      goto alloc_failed;

    pool = self->v4l2capture->pool;
    ret = gst_v4l2_buffer_pool_process (GST_V4L2_BUFFER_POOL (pool), outbuf);

  } while (ret == GST_V4L2_FLOW_CORRUPTED_BUFFER);

  if (ret != GST_FLOW_OK) {
    gst_buffer_unref (*outbuf);
    *outbuf = NULL;
  }

  if (bclass->copy_metadata)
    if (!bclass->copy_metadata (trans, inbuf, *outbuf)) {
      /* something failed, post a warning */
      GST_ELEMENT_WARNING (self, STREAM, NOT_IMPLEMENTED,
          ("could not copy metadata"), (NULL));
    }

beach:
  return ret;

activate_failed:
  GST_ELEMENT_ERROR (self, RESOURCE, SETTINGS,
      ("failed to activate bufferpool"), ("failed to activate bufferpool"));
  g_object_unref (pool);
  return GST_FLOW_ERROR;

alloc_failed:
  GST_DEBUG_OBJECT (self, "could not allocate buffer from pool");
  return ret;
}
static GstFlowReturn
gst_gdk_pixbuf_dec_flush (GstGdkPixbufDec * filter)
{
  GstBuffer *outbuf;
  GdkPixbuf *pixbuf;
  int y;
  guint8 *out_pix;
  guint8 *in_pix;
  int in_rowstride, out_rowstride;
  GstFlowReturn ret;
  GstCaps *caps = NULL;
  gint width, height;
  gint n_channels;
  GstVideoFrame frame;

  pixbuf = gdk_pixbuf_loader_get_pixbuf (filter->pixbuf_loader);
  if (pixbuf == NULL)
    goto no_pixbuf;

  width = gdk_pixbuf_get_width (pixbuf);
  height = gdk_pixbuf_get_height (pixbuf);

  if (GST_VIDEO_INFO_FORMAT (&filter->info) == GST_VIDEO_FORMAT_UNKNOWN) {
    GstVideoInfo info;
    GstVideoFormat fmt;

    GST_DEBUG ("Set size to %dx%d", width, height);

    n_channels = gdk_pixbuf_get_n_channels (pixbuf);
    switch (n_channels) {
      case 3:
        fmt = GST_VIDEO_FORMAT_RGB;
        break;
      case 4:
        fmt = GST_VIDEO_FORMAT_RGBA;
        break;
      default:
        goto channels_not_supported;
    }


    gst_video_info_init (&info);
    gst_video_info_set_format (&info, fmt, width, height);
    info.fps_n = filter->in_fps_n;
    info.fps_d = filter->in_fps_d;
    caps = gst_video_info_to_caps (&info);

    filter->info = info;

    gst_pad_set_caps (filter->srcpad, caps);
    gst_caps_unref (caps);

    gst_gdk_pixbuf_dec_setup_pool (filter, &info);
  }

  ret = gst_buffer_pool_acquire_buffer (filter->pool, &outbuf, NULL);
  if (ret != GST_FLOW_OK)
    goto no_buffer;

  GST_BUFFER_TIMESTAMP (outbuf) = filter->last_timestamp;
  GST_BUFFER_DURATION (outbuf) = GST_CLOCK_TIME_NONE;

  in_pix = gdk_pixbuf_get_pixels (pixbuf);
  in_rowstride = gdk_pixbuf_get_rowstride (pixbuf);

  gst_video_frame_map (&frame, &filter->info, outbuf, GST_MAP_WRITE);
  out_pix = GST_VIDEO_FRAME_PLANE_DATA (&frame, 0);
  out_rowstride = GST_VIDEO_FRAME_PLANE_STRIDE (&frame, 0);

  for (y = 0; y < height; y++) {
    memcpy (out_pix, in_pix, width * GST_VIDEO_FRAME_COMP_PSTRIDE (&frame, 0));
    in_pix += in_rowstride;
    out_pix += out_rowstride;
  }

  gst_video_frame_unmap (&frame);

  GST_DEBUG ("pushing... %" G_GSIZE_FORMAT " bytes",
      gst_buffer_get_size (outbuf));
  ret = gst_pad_push (filter->srcpad, outbuf);

  if (ret != GST_FLOW_OK)
    GST_DEBUG_OBJECT (filter, "flow: %s", gst_flow_get_name (ret));

  return ret;

  /* ERRORS */
no_pixbuf:
  {
    GST_ELEMENT_ERROR (filter, STREAM, DECODE, (NULL), ("error geting pixbuf"));
    return GST_FLOW_ERROR;
  }
channels_not_supported:
  {
    GST_ELEMENT_ERROR (filter, STREAM, DECODE, (NULL),
        ("%d channels not supported", n_channels));
    return GST_FLOW_ERROR;
  }
no_buffer:
  {
    GST_DEBUG ("Failed to create outbuffer - %s", gst_flow_get_name (ret));
    return ret;
  }
}
static void
gst_v4l2_video_dec_loop (GstVideoDecoder * decoder)
{
  GstV4l2VideoDec *self = GST_V4L2_VIDEO_DEC (decoder);
  GstV4l2BufferPool *v4l2_pool = GST_V4L2_BUFFER_POOL (self->v4l2capture->pool);
  GstBufferPool *pool;
  GstVideoCodecFrame *frame;
  GstBuffer *buffer = NULL;
  GstFlowReturn ret;

  GST_LOG_OBJECT (decoder, "Allocate output buffer");

  do {
    /* We cannot use the base class allotate helper since it taking the internal
     * stream lock. we know that the acquire may need to poll until more frames
     * comes in and holding this lock would prevent that.
     */
    pool = gst_video_decoder_get_buffer_pool (decoder);

    /* Pool may be NULL if we started going to READY state */
    if (pool == NULL) {
      ret = GST_FLOW_FLUSHING;
      goto beach;
    }

    ret = gst_buffer_pool_acquire_buffer (pool, &buffer, NULL);
    g_object_unref (pool);

    if (ret != GST_FLOW_OK)
      goto beach;

    GST_LOG_OBJECT (decoder, "Process output buffer");
    ret = gst_v4l2_buffer_pool_process (v4l2_pool, &buffer);

  } while (ret == GST_V4L2_FLOW_CORRUPTED_BUFFER);

  if (ret != GST_FLOW_OK)
    goto beach;

  frame = gst_v4l2_video_dec_get_oldest_frame (decoder);

  if (frame) {
    frame->output_buffer = buffer;
    buffer = NULL;
    ret = gst_video_decoder_finish_frame (decoder, frame);

    if (ret != GST_FLOW_OK)
      goto beach;
  } else {
    GST_WARNING_OBJECT (decoder, "Decoder is producing too many buffers");
    gst_buffer_unref (buffer);
  }

  return;

beach:
  GST_DEBUG_OBJECT (decoder, "Leaving output thread: %s",
      gst_flow_get_name (ret));

  gst_buffer_replace (&buffer, NULL);
  self->output_flow = ret;
  g_atomic_int_set (&self->processing, FALSE);
  gst_v4l2_object_unlock (self->v4l2output);
  gst_pad_pause_task (decoder->srcpad);
}
/**
 * gst_v4l2_buffer_pool_process:
 * @bpool: a #GstBufferPool
 * @buf: a #GstBuffer, maybe be replaced
 *
 * Process @buf in @bpool. For capture devices, this functions fills @buf with
 * data from the device. For output devices, this functions send the contents of
 * @buf to the device for playback.
 *
 * Returns: %GST_FLOW_OK on success.
 */
GstFlowReturn
gst_v4l2_buffer_pool_process (GstV4l2BufferPool * pool, GstBuffer ** buf)
{
  GstFlowReturn ret = GST_FLOW_OK;
  GstBufferPool *bpool = GST_BUFFER_POOL_CAST (pool);
  GstV4l2Object *obj = pool->obj;

  GST_DEBUG_OBJECT (pool, "process buffer %p", buf);

  g_return_val_if_fail (gst_buffer_pool_is_active (bpool), GST_FLOW_ERROR);

  if (GST_BUFFER_POOL_IS_FLUSHING (pool))
    return GST_FLOW_FLUSHING;

  switch (obj->type) {
    case V4L2_BUF_TYPE_VIDEO_CAPTURE:
    case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
      /* capture */
      switch (obj->mode) {
        case GST_V4L2_IO_RW:
          /* capture into the buffer */
          ret = gst_v4l2_do_read (pool, *buf);
          break;

        case GST_V4L2_IO_MMAP:
        case GST_V4L2_IO_DMABUF:
        {
          GstBuffer *tmp;

          if ((*buf)->pool == bpool) {
            if (gst_buffer_get_size (*buf) == 0)
              goto eos;

            /* start copying buffers when we are running low on buffers */
            if (g_atomic_int_get (&pool->num_queued) < pool->copy_threshold) {
              GstBuffer *copy;

              if (GST_V4L2_ALLOCATOR_CAN_ALLOCATE (pool->vallocator, MMAP)) {

                if (gst_buffer_pool_acquire_buffer (bpool, &copy,
                        NULL) == GST_FLOW_OK) {
                  gst_v4l2_buffer_pool_release_buffer (bpool, copy);
                  goto done;
                }
              }

              /* copy the buffer */
              copy = gst_buffer_copy_region (*buf,
                  GST_BUFFER_COPY_ALL | GST_BUFFER_COPY_DEEP, 0, -1);
              GST_LOG_OBJECT (pool, "copy buffer %p->%p", *buf, copy);

              /* and requeue so that we can continue capturing */
              gst_buffer_unref (*buf);
              *buf = copy;
            }

            /* nothing, data was inside the buffer when we did _acquire() */
            goto done;
          }

          /* buffer not from our pool, grab a frame and copy it into the target */
          if ((ret = gst_v4l2_buffer_pool_dqbuf (pool, &tmp)) != GST_FLOW_OK)
            goto done;

          /* An empty buffer on capture indicates the end of stream */
          if (gst_buffer_get_size (tmp) == 0) {
            gst_v4l2_buffer_pool_release_buffer (bpool, tmp);
            goto eos;
          }

          ret = gst_v4l2_buffer_pool_copy_buffer (pool, *buf, tmp);

          /* an queue the buffer again after the copy */
          gst_v4l2_buffer_pool_release_buffer (bpool, tmp);

          if (ret != GST_FLOW_OK)
            goto copy_failed;
          break;
        }

        case GST_V4L2_IO_USERPTR:
        {
          struct UserPtrData *data;

          /* Replace our buffer with downstream allocated buffer */
          data = gst_mini_object_steal_qdata (GST_MINI_OBJECT (*buf),
              GST_V4L2_IMPORT_QUARK);
          gst_buffer_replace (buf, data->buffer);
          _unmap_userptr_frame (data);
          break;
        }

        case GST_V4L2_IO_DMABUF_IMPORT:
        {
          GstBuffer *tmp;

          /* Replace our buffer with downstream allocated buffer */
          tmp = gst_mini_object_steal_qdata (GST_MINI_OBJECT (*buf),
              GST_V4L2_IMPORT_QUARK);
          gst_buffer_replace (buf, tmp);
          gst_buffer_unref (tmp);
          break;
        }

        default:
          g_assert_not_reached ();
          break;
      }
      break;

    case V4L2_BUF_TYPE_VIDEO_OUTPUT:
    case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
      /* playback */
      switch (obj->mode) {
        case GST_V4L2_IO_RW:
          /* FIXME, do write() */
          GST_WARNING_OBJECT (pool, "implement write()");
          break;

        case GST_V4L2_IO_USERPTR:
        case GST_V4L2_IO_DMABUF_IMPORT:
        case GST_V4L2_IO_DMABUF:
        case GST_V4L2_IO_MMAP:
        {
          GstBuffer *to_queue = NULL;
          GstV4l2MemoryGroup *group;
          gint index;

          if ((*buf)->pool != bpool)
            goto copying;

          if (!gst_v4l2_is_buffer_valid (*buf, &group))
            goto copying;

          index = group->buffer.index;

          GST_LOG_OBJECT (pool, "processing buffer %i from our pool", index);

          index = group->buffer.index;
          if (pool->buffers[index] != NULL) {
            GST_LOG_OBJECT (pool, "buffer %i already queued, copying", index);
            goto copying;
          }

          /* we can queue directly */
          to_queue = gst_buffer_ref (*buf);

        copying:
          if (to_queue == NULL) {
            GstBufferPoolAcquireParams params = { 0 };

            GST_LOG_OBJECT (pool, "alloc buffer from our pool");

            /* this can return EOS if all buffers are outstanding which would
             * be strange because we would expect the upstream element to have
             * allocated them and returned to us.. */
            params.flags = GST_BUFFER_POOL_ACQUIRE_FLAG_DONTWAIT;
            ret = gst_buffer_pool_acquire_buffer (bpool, &to_queue, &params);
            if (ret != GST_FLOW_OK)
              goto acquire_failed;

            ret = gst_v4l2_buffer_pool_prepare_buffer (pool, to_queue, *buf);
            if (ret != GST_FLOW_OK) {
              gst_buffer_unref (to_queue);
              goto prepare_failed;
            }
          }

          if ((ret = gst_v4l2_buffer_pool_qbuf (pool, to_queue)) != GST_FLOW_OK)
            goto queue_failed;

          /* if we are not streaming yet (this is the first buffer, start
           * streaming now */
          if (!gst_v4l2_buffer_pool_streamon (pool)) {
            /* don't check return value because qbuf would have failed */
            gst_v4l2_is_buffer_valid (to_queue, &group);

            /* qbuf has taken the ref of the to_queue buffer but we are no in
             * streaming state, so the flush logic won't be performed.
             * To avoid leaks, flush the allocator and restore the queued
             * buffer as non-queued */
            gst_v4l2_allocator_flush (pool->vallocator);

            pool->buffers[group->buffer.index] = NULL;

            gst_mini_object_set_qdata (GST_MINI_OBJECT (to_queue),
                GST_V4L2_IMPORT_QUARK, NULL, NULL);
            gst_buffer_unref (to_queue);
            g_atomic_int_add (&pool->num_queued, -1);
            goto start_failed;
          }

          if (g_atomic_int_get (&pool->num_queued) >= pool->min_latency) {
            GstBuffer *out;
            /* all buffers are queued, try to dequeue one and release it back
             * into the pool so that _acquire can get to it again. */
            ret = gst_v4l2_buffer_pool_dqbuf (pool, &out);
            if (ret == GST_FLOW_OK)
              /* release the rendered buffer back into the pool. This wakes up any
               * thread waiting for a buffer in _acquire(). */
              gst_buffer_unref (out);
          }
          break;
        }
        default:
          g_assert_not_reached ();
          break;
      }
      break;
    default:
      g_assert_not_reached ();
      break;
  }
done:
  return ret;

  /* ERRORS */
copy_failed:
  {
    GST_ERROR_OBJECT (pool, "failed to copy buffer");
    return ret;
  }
eos:
  {
    GST_DEBUG_OBJECT (pool, "end of stream reached");
    return GST_FLOW_EOS;
  }
acquire_failed:
  {
    if (ret == GST_FLOW_FLUSHING)
      GST_DEBUG_OBJECT (pool, "flushing");
    else
      GST_WARNING_OBJECT (pool, "failed to acquire a buffer: %s",
          gst_flow_get_name (ret));
    return ret;
  }
prepare_failed:
  {
    GST_ERROR_OBJECT (pool, "failed to prepare data");
    return ret;
  }
queue_failed:
  {
    GST_ERROR_OBJECT (pool, "failed to queue buffer");
    return ret;
  }
start_failed:
  {
    GST_ERROR_OBJECT (pool, "failed to start streaming");
    return GST_FLOW_ERROR;
  }
}
Example #21
0
static GstBuffer *
gst_kms_sink_get_input_buffer (GstKMSSink * self, GstBuffer * inbuf)
{
  GstMemory *mem;
  GstBuffer *buf;
  GstFlowReturn ret;
  GstVideoFrame inframe, outframe;
  gboolean success;

  mem = gst_buffer_peek_memory (inbuf, 0);
  if (!mem)
    return NULL;

  if (gst_is_kms_memory (mem))
    return gst_buffer_ref (inbuf);

  buf = NULL;
  if (gst_kms_sink_import_dmabuf (self, inbuf, &buf))
    return buf;

  GST_CAT_INFO_OBJECT (CAT_PERFORMANCE, self, "frame copy");

  if (!gst_buffer_pool_set_active (self->pool, TRUE))
    goto activate_pool_failed;

  ret = gst_buffer_pool_acquire_buffer (self->pool, &buf, NULL);
  if (ret != GST_FLOW_OK)
    goto create_buffer_failed;

  if (!gst_video_frame_map (&inframe, &self->vinfo, inbuf, GST_MAP_READ))
    goto error_map_src_buffer;

  if (!gst_video_frame_map (&outframe, &self->vinfo, buf, GST_MAP_WRITE))
    goto error_map_dst_buffer;

  success = gst_video_frame_copy (&outframe, &inframe);
  gst_video_frame_unmap (&outframe);
  gst_video_frame_unmap (&inframe);
  if (!success)
    goto error_copy_buffer;

  return buf;

bail:
  {
    if (buf)
      gst_buffer_unref (buf);
    return NULL;
  }

  /* ERRORS */
activate_pool_failed:
  {
    GST_ELEMENT_ERROR (self, STREAM, FAILED, ("failed to activate buffer pool"),
        ("failed to activate buffer pool"));
    goto bail;
  }
create_buffer_failed:
  {
    GST_ELEMENT_ERROR (self, STREAM, FAILED, ("allocation failed"),
        ("failed to create buffer"));
    goto bail;
  }
error_copy_buffer:
  {
    GST_WARNING_OBJECT (self, "failed to upload buffer");
    goto bail;
  }
error_map_dst_buffer:
  {
    gst_video_frame_unmap (&inframe);
    /* fall-through */
  }
error_map_src_buffer:
  {
    GST_WARNING_OBJECT (self, "failed to map buffer");
    goto bail;
  }
}
Example #22
0
static GstFlowReturn
gst_mir_sink_render (GstBaseSink * bsink, GstBuffer * buffer)
{
    GstMirSink *sink = GST_MIR_SINK (bsink);
    //GstVideoRectangle src, dst, res;
    GstBuffer *to_render;
    GstMirMeta *meta;
    //GstFlowReturn ret;

    GST_DEBUG_OBJECT (sink, "render buffer %p", buffer);

    meta = gst_buffer_get_mir_meta (buffer);

    if (meta && meta->sink == sink) {
        GST_LOG_OBJECT (sink, "buffer %p from our pool, writing directly", buffer);
        to_render = buffer;
    } else {
        //GstMapInfo src;
        GST_LOG_OBJECT (sink, "buffer %p not from our pool, copying", buffer);
        to_render = buffer;

#if 0
        if (!sink->pool)
            goto no_pool;

        if (!gst_buffer_pool_set_active (sink->pool, TRUE))
            goto activate_failed;

        ret = gst_buffer_pool_acquire_buffer (sink->pool, &to_render, NULL);
        if (ret != GST_FLOW_OK)
            goto no_buffer;

        gst_buffer_map (buffer, &src, GST_MAP_READ);
        gst_buffer_fill (to_render, 0, src.data, src.size);
        gst_buffer_unmap (buffer, &src);

        meta = gst_buffer_get_mir_meta (to_render);
#endif
    }

    g_signal_emit (G_OBJECT (bsink), frame_ready_signal, 0);

#if 0
    src.w = sink->video_width;
    src.h = sink->video_height;
    dst.w = sink->window->width;
    dst.h = sink->window->height;

    gst_video_sink_center_rect (src, dst, &res, FALSE);
#endif

    if (buffer != to_render)
        gst_buffer_unref (to_render);
    return GST_FLOW_OK;

#if 0
no_buffer:
    {
        GST_WARNING_OBJECT (sink, "could not create image");
        return ret;
    }
no_pool:
    {
        GST_ELEMENT_ERROR (sink, RESOURCE, WRITE,
                           ("Internal error: can't allocate images"),
                           ("We don't have a bufferpool negotiated"));
        return GST_FLOW_ERROR;
    }
activate_failed:
    {
        GST_ERROR_OBJECT (sink, "failed to activate bufferpool.");
        ret = GST_FLOW_ERROR;
        return ret;
    }
#endif
}
static GstFlowReturn
gst_y4m_dec_chain (GstPad * pad, GstObject * parent, GstBuffer * buffer)
{
  GstY4mDec *y4mdec;
  int n_avail;
  GstFlowReturn flow_ret = GST_FLOW_OK;
#define MAX_HEADER_LENGTH 80
  char header[MAX_HEADER_LENGTH];
  int i;
  int len;

  y4mdec = GST_Y4M_DEC (parent);

  GST_DEBUG_OBJECT (y4mdec, "chain");

  if (GST_BUFFER_IS_DISCONT (buffer)) {
    GST_DEBUG ("got discont");
    gst_adapter_clear (y4mdec->adapter);
  }

  gst_adapter_push (y4mdec->adapter, buffer);
  n_avail = gst_adapter_available (y4mdec->adapter);

  if (!y4mdec->have_header) {
    gboolean ret;
    GstCaps *caps;
    GstQuery *query;

    if (n_avail < MAX_HEADER_LENGTH)
      return GST_FLOW_OK;

    gst_adapter_copy (y4mdec->adapter, (guint8 *) header, 0, MAX_HEADER_LENGTH);

    header[MAX_HEADER_LENGTH - 1] = 0;
    for (i = 0; i < MAX_HEADER_LENGTH; i++) {
      if (header[i] == 0x0a)
        header[i] = 0;
    }

    ret = gst_y4m_dec_parse_header (y4mdec, header);
    if (!ret) {
      GST_ELEMENT_ERROR (y4mdec, STREAM, DECODE,
          ("Failed to parse YUV4MPEG header"), (NULL));
      return GST_FLOW_ERROR;
    }

    y4mdec->header_size = strlen (header) + 1;
    gst_adapter_flush (y4mdec->adapter, y4mdec->header_size);

    caps = gst_video_info_to_caps (&y4mdec->info);
    ret = gst_pad_set_caps (y4mdec->srcpad, caps);

    query = gst_query_new_allocation (caps, FALSE);
    y4mdec->video_meta = FALSE;

    if (y4mdec->pool) {
      gst_buffer_pool_set_active (y4mdec->pool, FALSE);
      gst_object_unref (y4mdec->pool);
    }
    y4mdec->pool = NULL;

    if (gst_pad_peer_query (y4mdec->srcpad, query)) {
      y4mdec->video_meta =
          gst_query_find_allocation_meta (query, GST_VIDEO_META_API_TYPE, NULL);

      /* We only need a pool if we need to do stride conversion for downstream */
      if (!y4mdec->video_meta && memcmp (&y4mdec->info, &y4mdec->out_info,
              sizeof (y4mdec->info)) != 0) {
        GstBufferPool *pool = NULL;
        GstAllocator *allocator = NULL;
        GstAllocationParams params;
        GstStructure *config;
        guint size, min, max;

        if (gst_query_get_n_allocation_params (query) > 0) {
          gst_query_parse_nth_allocation_param (query, 0, &allocator, &params);
        } else {
          allocator = NULL;
          gst_allocation_params_init (&params);
        }

        if (gst_query_get_n_allocation_pools (query) > 0) {
          gst_query_parse_nth_allocation_pool (query, 0, &pool, &size, &min,
              &max);
          size = MAX (size, y4mdec->out_info.size);
        } else {
          pool = NULL;
          size = y4mdec->out_info.size;
          min = max = 0;
        }

        if (pool == NULL) {
          pool = gst_video_buffer_pool_new ();
        }

        config = gst_buffer_pool_get_config (pool);
        gst_buffer_pool_config_set_params (config, caps, size, min, max);
        gst_buffer_pool_config_set_allocator (config, allocator, &params);
        gst_buffer_pool_set_config (pool, config);

        if (allocator)
          gst_object_unref (allocator);

        y4mdec->pool = pool;
      }
    } else if (memcmp (&y4mdec->info, &y4mdec->out_info,
            sizeof (y4mdec->info)) != 0) {
      GstBufferPool *pool;
      GstStructure *config;

      /* No pool, create our own if we need to do stride conversion */
      pool = gst_video_buffer_pool_new ();
      config = gst_buffer_pool_get_config (pool);
      gst_buffer_pool_config_set_params (config, caps, y4mdec->out_info.size, 0,
          0);
      gst_buffer_pool_set_config (pool, config);
      y4mdec->pool = pool;
    }
    if (y4mdec->pool) {
      gst_buffer_pool_set_active (y4mdec->pool, TRUE);
    }
    gst_query_unref (query);
    gst_caps_unref (caps);
    if (!ret) {
      GST_DEBUG_OBJECT (y4mdec, "Couldn't set caps on src pad");
      return GST_FLOW_ERROR;
    }

    y4mdec->have_header = TRUE;
  }

  if (y4mdec->have_new_segment) {
    GstEvent *event;
    GstClockTime start = gst_y4m_dec_bytes_to_timestamp (y4mdec,
        y4mdec->segment.start);
    GstClockTime stop = gst_y4m_dec_bytes_to_timestamp (y4mdec,
        y4mdec->segment.stop);
    GstClockTime time = gst_y4m_dec_bytes_to_timestamp (y4mdec,
        y4mdec->segment.time);
    GstSegment seg;

    gst_segment_init (&seg, GST_FORMAT_TIME);
    seg.start = start;
    seg.stop = stop;
    seg.time = time;
    event = gst_event_new_segment (&seg);

    gst_pad_push_event (y4mdec->srcpad, event);
    //gst_event_unref (event);

    y4mdec->have_new_segment = FALSE;
    y4mdec->frame_index = gst_y4m_dec_bytes_to_frames (y4mdec,
        y4mdec->segment.time);
    GST_DEBUG ("new frame_index %d", y4mdec->frame_index);

  }

  while (1) {
    n_avail = gst_adapter_available (y4mdec->adapter);
    if (n_avail < MAX_HEADER_LENGTH)
      break;

    gst_adapter_copy (y4mdec->adapter, (guint8 *) header, 0, MAX_HEADER_LENGTH);
    header[MAX_HEADER_LENGTH - 1] = 0;
    for (i = 0; i < MAX_HEADER_LENGTH; i++) {
      if (header[i] == 0x0a)
        header[i] = 0;
    }
    if (memcmp (header, "FRAME", 5) != 0) {
      GST_ELEMENT_ERROR (y4mdec, STREAM, DECODE,
          ("Failed to parse YUV4MPEG frame"), (NULL));
      flow_ret = GST_FLOW_ERROR;
      break;
    }

    len = strlen (header);
    if (n_avail < y4mdec->info.size + len + 1) {
      /* not enough data */
      GST_DEBUG ("not enough data for frame %d < %" G_GSIZE_FORMAT,
          n_avail, y4mdec->info.size + len + 1);
      break;
    }

    gst_adapter_flush (y4mdec->adapter, len + 1);

    buffer = gst_adapter_take_buffer (y4mdec->adapter, y4mdec->info.size);

    GST_BUFFER_TIMESTAMP (buffer) =
        gst_y4m_dec_frames_to_timestamp (y4mdec, y4mdec->frame_index);
    GST_BUFFER_DURATION (buffer) =
        gst_y4m_dec_frames_to_timestamp (y4mdec, y4mdec->frame_index + 1) -
        GST_BUFFER_TIMESTAMP (buffer);

    y4mdec->frame_index++;

    if (y4mdec->video_meta) {
      gst_buffer_add_video_meta_full (buffer, 0, y4mdec->info.finfo->format,
          y4mdec->info.width, y4mdec->info.height, y4mdec->info.finfo->n_planes,
          y4mdec->info.offset, y4mdec->info.stride);
    } else if (memcmp (&y4mdec->info, &y4mdec->out_info,
            sizeof (y4mdec->info)) != 0) {
      GstBuffer *outbuf;
      GstVideoFrame iframe, oframe;
      gint i, j;
      gint w, h, istride, ostride;
      guint8 *src, *dest;

      /* Allocate a new buffer and do stride conversion */
      g_assert (y4mdec->pool != NULL);

      flow_ret = gst_buffer_pool_acquire_buffer (y4mdec->pool, &outbuf, NULL);
      if (flow_ret != GST_FLOW_OK) {
        gst_buffer_unref (buffer);
        break;
      }

      gst_video_frame_map (&iframe, &y4mdec->info, buffer, GST_MAP_READ);
      gst_video_frame_map (&oframe, &y4mdec->out_info, outbuf, GST_MAP_WRITE);

      for (i = 0; i < 3; i++) {
        w = GST_VIDEO_FRAME_COMP_WIDTH (&iframe, i);
        h = GST_VIDEO_FRAME_COMP_HEIGHT (&iframe, i);
        istride = GST_VIDEO_FRAME_COMP_STRIDE (&iframe, i);
        ostride = GST_VIDEO_FRAME_COMP_STRIDE (&oframe, i);
        src = GST_VIDEO_FRAME_COMP_DATA (&iframe, i);
        dest = GST_VIDEO_FRAME_COMP_DATA (&oframe, i);

        for (j = 0; j < h; j++) {
          memcpy (dest, src, w);

          dest += ostride;
          src += istride;
        }
      }

      gst_video_frame_unmap (&iframe);
      gst_video_frame_unmap (&oframe);
      gst_buffer_copy_into (outbuf, buffer, GST_BUFFER_COPY_TIMESTAMPS, 0, -1);
      gst_buffer_unref (buffer);
      buffer = outbuf;
    }

    flow_ret = gst_pad_push (y4mdec->srcpad, buffer);
    if (flow_ret != GST_FLOW_OK)
      break;
  }

  GST_DEBUG ("returning %d", flow_ret);

  return flow_ret;
}
static int
gst_droidcamsrc_stream_window_dequeue_buffer (struct preview_stream_ops *w,
    buffer_handle_t ** buffer, int *stride)
{
  GstDroidCamSrcStreamWindow *win;
  GstBuffer *buff;
  GstFlowReturn ret;
  int trials;
  GstBufferPoolAcquireParams params;
  GstMemory *mem;
  struct ANativeWindowBuffer *native;
  int res;

  GST_DEBUG ("dequeue buffer %p", buffer);

  win = container_of (w, GstDroidCamSrcStreamWindow, window);

  g_mutex_lock (&win->lock);

retry:
  GST_DEBUG ("needs reconfigure? %d", win->needs_reconfigure);

  if (!win->pool || (win->pool && win->needs_reconfigure)) {
    /* create and re/configure the pool */
    gst_droidcamsrc_stream_window_reset_buffer_pool_locked (win);
  }

  if (!win->pool) {
    GST_ERROR ("failed to create buffer pool");
    res = -1;
    goto unlock_and_exit;
  }

  mem = NULL;
  trials = ACQUIRE_BUFFER_TRIALS;
  params.flags = GST_BUFFER_POOL_ACQUIRE_FLAG_DONTWAIT;

  while (trials > 0) {
    ret =
        gst_buffer_pool_acquire_buffer (GST_BUFFER_POOL (win->pool), &buff,
        &params);
    if (ret == GST_FLOW_OK) {
      /* we have our buffer */
      break;
    } else if (ret == GST_FLOW_ERROR || ret == GST_FLOW_FLUSHING) {
      /* no point in waiting */
      break;
    }

    /* we need to unlock here to allow buffers to be returned back */
    g_mutex_unlock (&win->lock);
    usleep (ACQUIRE_BUFFER_TIMEOUT);
    g_mutex_lock (&win->lock);
    if (win->needs_reconfigure) {
      /* out of here */
      goto retry;
    }

    --trials;
  }

  if (buff) {
    /* handover */
    mem = gst_buffer_peek_memory (buff, 0);
  } else if (ret == GST_FLOW_FLUSHING) {
    GST_INFO ("pool is flushing");
  } else {
    GST_WARNING ("failed to get a buffer");
  }

  if (!mem) {
    GST_ERROR ("no buffer memory found");

    res = -1;
    goto unlock_and_exit;
  }

  native = gst_memory_get_native_buffer (mem);
  if (!native) {
    GST_ERROR ("invalid buffer");
    gst_buffer_unref (buff);

    res = -1;
    goto unlock_and_exit;
  }

  *buffer = &native->handle;
  *stride = native->stride;

  GST_LOG ("dequeue buffer done %p", *buffer);

  res = 0;

unlock_and_exit:
  g_mutex_unlock (&win->lock);
  return res;
}
/**
 * gst_vaapi_plugin_base_get_input_buffer:
 * @plugin: a #GstVaapiPluginBase
 * @incaps: the sink pad (input) buffer
 * @outbuf_ptr: the pointer to location to the VA surface backed buffer
 *
 * Acquires the sink pad (input) buffer as a VA surface backed
 * buffer. This is mostly useful for raw YUV buffers, as source
 * buffers that are already backed as a VA surface are passed
 * verbatim.
 *
 * Returns: #GST_FLOW_OK if the buffer could be acquired
 */
GstFlowReturn
gst_vaapi_plugin_base_get_input_buffer (GstVaapiPluginBase * plugin,
    GstBuffer * inbuf, GstBuffer ** outbuf_ptr)
{
  GstVaapiVideoMeta *meta;
  GstBuffer *outbuf;
  GstVideoFrame src_frame, out_frame;
  gboolean success;

  g_return_val_if_fail (inbuf != NULL, GST_FLOW_ERROR);
  g_return_val_if_fail (outbuf_ptr != NULL, GST_FLOW_ERROR);

  meta = gst_buffer_get_vaapi_video_meta (inbuf);
  if (meta) {
    *outbuf_ptr = gst_buffer_ref (inbuf);
    return GST_FLOW_OK;
  }

  if (!plugin->sinkpad_caps_is_raw)
    goto error_invalid_buffer;

  if (!plugin->sinkpad_buffer_pool)
    goto error_no_pool;

  if (!gst_buffer_pool_set_active (plugin->sinkpad_buffer_pool, TRUE))
    goto error_active_pool;

  outbuf = NULL;
  if (gst_buffer_pool_acquire_buffer (plugin->sinkpad_buffer_pool,
          &outbuf, NULL) != GST_FLOW_OK)
    goto error_create_buffer;

  if (is_dma_buffer (inbuf)) {
    if (!plugin_bind_dma_to_vaapi_buffer (plugin, inbuf, outbuf))
      goto error_bind_dma_buffer;
    goto done;
  }

  if (!gst_video_frame_map (&src_frame, &plugin->sinkpad_info, inbuf,
          GST_MAP_READ))
    goto error_map_src_buffer;

  if (!gst_video_frame_map (&out_frame, &plugin->sinkpad_info, outbuf,
          GST_MAP_WRITE))
    goto error_map_dst_buffer;

  success = gst_video_frame_copy (&out_frame, &src_frame);
  gst_video_frame_unmap (&out_frame);
  gst_video_frame_unmap (&src_frame);
  if (!success)
    goto error_copy_buffer;

done:
  gst_buffer_copy_into (outbuf, inbuf, GST_BUFFER_COPY_FLAGS |
      GST_BUFFER_COPY_TIMESTAMPS, 0, -1);
  *outbuf_ptr = outbuf;
  return GST_FLOW_OK;

  /* ERRORS */
error_no_pool:
  {
    GST_ELEMENT_ERROR (plugin, STREAM, FAILED,
        ("no buffer pool was negotiated"), ("no buffer pool was negotiated"));
    return GST_FLOW_ERROR;
  }
error_active_pool:
  {
    GST_ELEMENT_ERROR (plugin, STREAM, FAILED,
        ("failed to activate buffer pool"), ("failed to activate buffer pool"));
    return GST_FLOW_ERROR;
  }
error_map_dst_buffer:
  {
    gst_video_frame_unmap (&src_frame);
    // fall-through
  }
error_map_src_buffer:
  {
    GST_WARNING ("failed to map buffer");
    gst_buffer_unref (outbuf);
    return GST_FLOW_NOT_SUPPORTED;
  }

  /* ERRORS */
error_invalid_buffer:
  {
    GST_ELEMENT_ERROR (plugin, STREAM, FAILED,
        ("failed to validate source buffer"),
        ("failed to validate source buffer"));
    return GST_FLOW_ERROR;
  }
error_create_buffer:
  {
    GST_ELEMENT_ERROR (plugin, STREAM, FAILED, ("Allocation failed"),
        ("failed to create buffer"));
    return GST_FLOW_ERROR;
  }
error_bind_dma_buffer:
  {
    GST_ELEMENT_ERROR (plugin, STREAM, FAILED, ("Allocation failed"),
        ("failed to bind dma_buf to VA surface buffer"));
    gst_buffer_unref (outbuf);
    return GST_FLOW_ERROR;
  }
error_copy_buffer:
  {
    GST_WARNING ("failed to upload buffer to VA surface");
    gst_buffer_unref (outbuf);
    return GST_FLOW_NOT_SUPPORTED;
  }
}
Example #26
0
gboolean gst_imx_blitter_set_input_frame(GstImxBlitter *blitter, GstBuffer *frame)
{
	gboolean ret;
	GstImxPhysMemMeta *phys_mem_meta;
	GstImxBlitterClass *klass;

	g_assert(blitter != NULL);
	klass = GST_IMX_BLITTER_CLASS(G_OBJECT_GET_CLASS(blitter));
	g_assert(klass->set_input_frame != NULL);

	if (frame == NULL)
		return klass->set_input_frame(blitter, NULL);

	phys_mem_meta = GST_IMX_PHYS_MEM_META_GET(frame);

	if ((phys_mem_meta == NULL) || (phys_mem_meta->phys_addr == 0))
	{
		GstFlowReturn flow_ret;
		GstBuffer *internal_input_frame;

		/* No DMA memory present; the input frame needs to be copied to an internal input frame */

		GST_TRACE_OBJECT(blitter, "input frame does not use DMA memory - copying input frame to internal frame");

		{
			if (blitter->dma_bufferpool == NULL)
			{
				GST_TRACE_OBJECT(blitter, "need to create internal bufferpool");

				/* DMA bufferpool does not exist yet - create it now,
				 * so that it can in turn create the internal input frame */

				GstCaps *caps = gst_video_info_to_caps(&(blitter->input_video_info));

				blitter->dma_bufferpool = gst_imx_blitter_create_bufferpool(
					blitter,
					caps,
					blitter->input_video_info.size,
					0, 0,
					NULL,
					NULL
				);

				gst_caps_unref(caps);

				if (blitter->dma_bufferpool == NULL)
				{
					GST_ERROR_OBJECT(blitter, "failed to create internal bufferpool");
					return FALSE;
				}
			}

			/* Future versions of this code may propose the internal bufferpool upstream;
			 * hence the is_active check */
			if (!gst_buffer_pool_is_active(blitter->dma_bufferpool))
				gst_buffer_pool_set_active(blitter->dma_bufferpool, TRUE);
		}

		/* Create new internal input frame */
		GST_TRACE_OBJECT(blitter, "acquiring buffer for internal input frame");
		internal_input_frame = NULL;
		flow_ret = gst_buffer_pool_acquire_buffer(blitter->dma_bufferpool, &internal_input_frame, NULL);
		if (flow_ret != GST_FLOW_OK)
		{
			if (internal_input_frame != NULL)
				gst_buffer_unref(internal_input_frame);

			GST_ERROR_OBJECT(blitter, "error acquiring input frame buffer: %s", gst_pad_mode_get_name(flow_ret));
			return FALSE;
		}

		/* Copy the input buffer's pixels to the internal input frame */
		{
			GstVideoFrame input_vidframe, internal_input_vidframe;

			gst_video_frame_map(&input_vidframe, &(blitter->input_video_info), frame, GST_MAP_READ);
			gst_video_frame_map(&internal_input_vidframe, &(blitter->input_video_info), internal_input_frame, GST_MAP_WRITE);

			/* gst_video_frame_copy() makes sure stride and plane offset values from both frames are respected */
			gst_video_frame_copy(&internal_input_vidframe, &input_vidframe);

			/* copy interlace flags */
			GST_BUFFER_FLAGS(internal_input_frame) |= (GST_BUFFER_FLAGS(frame) & (GST_VIDEO_BUFFER_FLAG_INTERLACED | GST_VIDEO_BUFFER_FLAG_TFF | GST_VIDEO_BUFFER_FLAG_RFF | GST_VIDEO_BUFFER_FLAG_ONEFIELD));

			gst_video_frame_unmap(&internal_input_vidframe);
			gst_video_frame_unmap(&input_vidframe);
		}

		ret = klass->set_input_frame(blitter, internal_input_frame);

		gst_buffer_unref(internal_input_frame);
	}
	else
	{
		GST_TRACE_OBJECT(blitter, "input frame uses DMA memory - setting it directly as input frame");
		ret = klass->set_input_frame(blitter, frame);
	}

	return ret;
}
Example #27
0
static GstFlowReturn
gst_wayland_sink_render (GstBaseSink * bsink, GstBuffer * buffer)
{
  GstWaylandSink *sink = GST_WAYLAND_SINK (bsink);
  GstVideoRectangle src, dst, res;
  GstBuffer *to_render;
  GstWlMeta *meta;
  GstFlowReturn ret;
  struct window *window;
  struct display *display;

  GST_LOG_OBJECT (sink, "render buffer %p", buffer);
  if (!sink->window)
    create_window (sink, sink->display, sink->video_width, sink->video_height);

  window = sink->window;
  display = sink->display;

  meta = gst_buffer_get_wl_meta (buffer);

  if (window->redraw_pending) {
    wl_display_dispatch (display->display);
  }

  if (meta && meta->sink == sink) {
    GST_LOG_OBJECT (sink, "buffer %p from our pool, writing directly", buffer);
    to_render = buffer;
  } else {
    GstMapInfo src;
    GST_LOG_OBJECT (sink, "buffer %p not from our pool, copying", buffer);

    if (!sink->pool)
      goto no_pool;

    if (!gst_buffer_pool_set_active (sink->pool, TRUE))
      goto activate_failed;

    ret = gst_buffer_pool_acquire_buffer (sink->pool, &to_render, NULL);
    if (ret != GST_FLOW_OK)
      goto no_buffer;

    gst_buffer_map (buffer, &src, GST_MAP_READ);
    gst_buffer_fill (to_render, 0, src.data, src.size);
    gst_buffer_unmap (buffer, &src);

    meta = gst_buffer_get_wl_meta (to_render);
  }

  src.w = sink->video_width;
  src.h = sink->video_height;
  dst.w = sink->window->width;
  dst.h = sink->window->height;

  gst_video_sink_center_rect (src, dst, &res, FALSE);

  wl_surface_attach (sink->window->surface, meta->wbuffer, 0, 0);
  wl_surface_damage (sink->window->surface, 0, 0, res.w, res.h);
  window->redraw_pending = TRUE;
  window->callback = wl_surface_frame (window->surface);
  wl_callback_add_listener (window->callback, &frame_callback_listener, window);
  wl_surface_commit (window->surface);
  wl_display_dispatch (display->display);

  if (buffer != to_render)
    gst_buffer_unref (to_render);
  return GST_FLOW_OK;

no_buffer:
  {
    GST_WARNING_OBJECT (sink, "could not create image");
    return ret;
  }
no_pool:
  {
    GST_ELEMENT_ERROR (sink, RESOURCE, WRITE,
        ("Internal error: can't allocate images"),
        ("We don't have a bufferpool negotiated"));
    return GST_FLOW_ERROR;
  }
activate_failed:
  {
    GST_ERROR_OBJECT (sink, "failed to activate bufferpool.");
    ret = GST_FLOW_ERROR;
    return ret;
  }
}
Example #28
0
static GstFlowReturn
gst_goom_chain (GstPad * pad, GstObject * parent, GstBuffer * buffer)
{
  GstGoom *goom;
  GstFlowReturn ret;
  GstBuffer *outbuf = NULL;

  goom = GST_GOOM (parent);
  if (goom->bps == 0) {
    gst_buffer_unref (buffer);
    ret = GST_FLOW_NOT_NEGOTIATED;
    goto beach;
  }

  /* Make sure have an output format */
  ret = ensure_negotiated (goom);
  if (ret != GST_FLOW_OK) {
    gst_buffer_unref (buffer);
    goto beach;
  }

  /* don't try to combine samples from discont buffer */
  if (GST_BUFFER_FLAG_IS_SET (buffer, GST_BUFFER_FLAG_DISCONT)) {
    gst_adapter_clear (goom->adapter);
  }

  GST_DEBUG_OBJECT (goom,
      "Input buffer has %" G_GSIZE_FORMAT " samples, time=%" G_GUINT64_FORMAT,
      gst_buffer_get_size (buffer) / goom->bps, GST_BUFFER_TIMESTAMP (buffer));

  /* Collect samples until we have enough for an output frame */
  gst_adapter_push (goom->adapter, buffer);

  ret = GST_FLOW_OK;

  while (TRUE) {
    const guint16 *data;
    guchar *out_frame;
    gint i;
    guint avail, to_flush;
    guint64 dist, timestamp;

    avail = gst_adapter_available (goom->adapter);
    GST_DEBUG_OBJECT (goom, "avail now %u", avail);

    /* we need GOOM_SAMPLES to get a meaningful result from goom. */
    if (avail < (GOOM_SAMPLES * goom->bps))
      break;

    /* we also need enough samples to produce one frame at least */
    if (avail < goom->bpf)
      break;

    GST_DEBUG_OBJECT (goom, "processing buffer");

    /* get timestamp of the current adapter byte */
    timestamp = gst_adapter_prev_timestamp (goom->adapter, &dist);
    if (GST_CLOCK_TIME_IS_VALID (timestamp)) {
      /* convert bytes to time */
      dist /= goom->bps;
      timestamp += gst_util_uint64_scale_int (dist, GST_SECOND, goom->rate);
    }

    if (GST_CLOCK_TIME_IS_VALID (timestamp)) {
      gint64 qostime;
      gboolean need_skip;

      qostime = gst_segment_to_running_time (&goom->segment, GST_FORMAT_TIME,
          timestamp) + goom->duration;

      GST_OBJECT_LOCK (goom);
      /* check for QoS, don't compute buffers that are known to be late */
      need_skip = goom->earliest_time != -1 && qostime <= goom->earliest_time;
      GST_OBJECT_UNLOCK (goom);

      if (need_skip) {
        GST_WARNING_OBJECT (goom,
            "QoS: skip ts: %" GST_TIME_FORMAT ", earliest: %" GST_TIME_FORMAT,
            GST_TIME_ARGS (qostime), GST_TIME_ARGS (goom->earliest_time));
        goto skip;
      }
    }

    /* get next GOOM_SAMPLES, we have at least this amount of samples */
    data =
        (const guint16 *) gst_adapter_map (goom->adapter,
        GOOM_SAMPLES * goom->bps);

    if (goom->channels == 2) {
      for (i = 0; i < GOOM_SAMPLES; i++) {
        goom->datain[0][i] = *data++;
        goom->datain[1][i] = *data++;
      }
    } else {
      for (i = 0; i < GOOM_SAMPLES; i++) {
        goom->datain[0][i] = *data;
        goom->datain[1][i] = *data++;
      }
    }

    /* alloc a buffer if we don't have one yet, this happens
     * when we pushed a buffer in this while loop before */
    if (outbuf == NULL) {
      GST_DEBUG_OBJECT (goom, "allocating output buffer");
      ret = gst_buffer_pool_acquire_buffer (goom->pool, &outbuf, NULL);
      if (ret != GST_FLOW_OK) {
        gst_adapter_unmap (goom->adapter);
        goto beach;
      }
    }

    GST_BUFFER_TIMESTAMP (outbuf) = timestamp;
    GST_BUFFER_DURATION (outbuf) = goom->duration;

    out_frame = (guchar *) goom_update (goom->plugin, goom->datain, 0, 0);
    gst_buffer_fill (outbuf, 0, out_frame, goom->outsize);

    gst_adapter_unmap (goom->adapter);

    GST_DEBUG ("Pushing frame with time=%" GST_TIME_FORMAT ", duration=%"
        GST_TIME_FORMAT, GST_TIME_ARGS (timestamp),
        GST_TIME_ARGS (goom->duration));

    ret = gst_pad_push (goom->srcpad, outbuf);
    outbuf = NULL;

  skip:
    /* Now flush the samples we needed for this frame, which might be more than
     * the samples we used (GOOM_SAMPLES). */
    to_flush = goom->bpf;

    GST_DEBUG_OBJECT (goom, "finished frame, flushing %u bytes from input",
        to_flush);
    gst_adapter_flush (goom->adapter, to_flush);

    if (ret != GST_FLOW_OK)
      break;
  }

  if (outbuf != NULL)
    gst_buffer_unref (outbuf);

beach:

  return ret;
}
Example #29
0
static GstFlowReturn gst_fsl_vpu_base_enc_handle_frame(GstVideoEncoder *encoder, GstVideoCodecFrame *frame)
{
	VpuEncRetCode enc_ret;
	VpuEncEncParam enc_enc_param;
	GstFslPhysMemMeta *phys_mem_meta;
	GstFslVpuBaseEncClass *klass;
	GstFslVpuBaseEnc *vpu_base_enc;
	VpuFrameBuffer input_framebuf;
	GstBuffer *input_buffer;

	vpu_base_enc = GST_FSL_VPU_BASE_ENC(encoder);
	klass = GST_FSL_VPU_BASE_ENC_CLASS(G_OBJECT_GET_CLASS(vpu_base_enc));

	g_assert(klass->set_frame_enc_params != NULL);

	memset(&enc_enc_param, 0, sizeof(enc_enc_param));
	memset(&input_framebuf, 0, sizeof(input_framebuf));

	phys_mem_meta = GST_FSL_PHYS_MEM_META_GET(frame->input_buffer);

	if (phys_mem_meta == NULL)
	{
		GstVideoFrame temp_input_video_frame, temp_incoming_video_frame;

		if (vpu_base_enc->internal_input_buffer == NULL)
		{
			/* The internal input buffer is the temp input frame's DMA memory.
			 * If it does not exist yet, it needs to be created here. The temp input
			 * frame is then mapped. */

			GstFlowReturn flow_ret;

			if (vpu_base_enc->internal_bufferpool == NULL)
			{
				/* Internal bufferpool does not exist yet - create it now,
				 * so that it can in turn create the internal input buffer */

				GstStructure *config;
				GstCaps *caps;
				GstAllocator *allocator;

				caps = gst_video_info_to_caps(&(vpu_base_enc->video_info));
				vpu_base_enc->internal_bufferpool = gst_fsl_phys_mem_buffer_pool_new(FALSE);
				allocator = gst_fsl_vpu_enc_allocator_obtain();

				config = gst_buffer_pool_get_config(vpu_base_enc->internal_bufferpool);
				gst_buffer_pool_config_set_params(config, caps, vpu_base_enc->video_info.size, 2, 0);
				gst_buffer_pool_config_set_allocator(config, allocator, NULL);
				gst_buffer_pool_config_add_option(config, GST_BUFFER_POOL_OPTION_FSL_PHYS_MEM);
				gst_buffer_pool_config_add_option(config, GST_BUFFER_POOL_OPTION_VIDEO_META);
				gst_buffer_pool_set_config(vpu_base_enc->internal_bufferpool, config);

				gst_caps_unref(caps);

				if (vpu_base_enc->internal_bufferpool == NULL)
				{
					GST_ERROR_OBJECT(vpu_base_enc, "failed to create internal bufferpool");
					return FALSE;
				}
			}

			/* Future versions of this code may propose the internal bufferpool upstream;
			 * hence the is_active check */
			if (!gst_buffer_pool_is_active(vpu_base_enc->internal_bufferpool))
				gst_buffer_pool_set_active(vpu_base_enc->internal_bufferpool, TRUE);

			/* Create the internal input buffer */
			flow_ret = gst_buffer_pool_acquire_buffer(vpu_base_enc->internal_bufferpool, &(vpu_base_enc->internal_input_buffer), NULL);
			if (flow_ret != GST_FLOW_OK)
			{
				GST_ERROR_OBJECT(vpu_base_enc, "error acquiring input frame buffer: %s", gst_pad_mode_get_name(flow_ret));
				return FALSE;
			}
		}

		gst_video_frame_map(&temp_incoming_video_frame, &(vpu_base_enc->video_info), frame->input_buffer, GST_MAP_READ);
		gst_video_frame_map(&temp_input_video_frame, &(vpu_base_enc->video_info), vpu_base_enc->internal_input_buffer, GST_MAP_WRITE);

		gst_video_frame_copy(&temp_input_video_frame, &temp_incoming_video_frame);

		gst_video_frame_unmap(&temp_incoming_video_frame);
		gst_video_frame_unmap(&temp_input_video_frame);

		input_buffer = vpu_base_enc->internal_input_buffer;
		phys_mem_meta = GST_FSL_PHYS_MEM_META_GET(vpu_base_enc->internal_input_buffer);
	}
	else
		input_buffer = frame->input_buffer;

	{
		gsize *plane_offsets;
		gint *plane_strides;
		GstVideoMeta *video_meta;
		unsigned char *phys_ptr;

		video_meta = gst_buffer_get_video_meta(input_buffer);
		if (video_meta != NULL)
		{
			plane_offsets = video_meta->offset;
			plane_strides = video_meta->stride;
		}
		else
		{
			plane_offsets = vpu_base_enc->video_info.offset;
			plane_strides = vpu_base_enc->video_info.stride;
		}

		phys_ptr = (unsigned char*)(phys_mem_meta->phys_addr);

		input_framebuf.pbufY = phys_ptr;
		input_framebuf.pbufCb = phys_ptr + plane_offsets[1];
		input_framebuf.pbufCr = phys_ptr + plane_offsets[2];
		input_framebuf.pbufMvCol = NULL;
		input_framebuf.nStrideY = plane_strides[0];
		input_framebuf.nStrideC = plane_strides[1];

		GST_TRACE_OBJECT(vpu_base_enc, "width: %d   height: %d   stride 0: %d   stride 1: %d   offset 0: %d   offset 1: %d   offset 2: %d", GST_VIDEO_INFO_WIDTH(&(vpu_base_enc->video_info)), GST_VIDEO_INFO_HEIGHT(&(vpu_base_enc->video_info)), plane_strides[0], plane_strides[1], plane_offsets[0], plane_offsets[1], plane_offsets[2]);

		if (vpu_base_enc->framebuffers == NULL)
		{
			GstFslVpuFramebufferParams fbparams;
			gst_fsl_vpu_framebuffers_enc_init_info_to_params(&(vpu_base_enc->init_info), &fbparams);
			fbparams.pic_width = vpu_base_enc->open_param.nPicWidth;
			fbparams.pic_height = vpu_base_enc->open_param.nPicHeight;
			vpu_base_enc->framebuffers = gst_fsl_vpu_framebuffers_new(&fbparams, gst_fsl_vpu_enc_allocator_obtain());
			gst_fsl_vpu_framebuffers_register_with_encoder(vpu_base_enc->framebuffers, vpu_base_enc->handle, plane_strides[0]);
		}

		if (vpu_base_enc->output_phys_buffer == NULL)
		{
			vpu_base_enc->output_phys_buffer = (GstFslPhysMemory *)gst_allocator_alloc(gst_fsl_vpu_enc_allocator_obtain(), vpu_base_enc->framebuffers->total_size, NULL);

			if (vpu_base_enc->output_phys_buffer == NULL)
			{
				GST_ERROR_OBJECT(vpu_base_enc, "could not allocate physical buffer for output data");
				return GST_FLOW_ERROR;
			}
		}
	}

	enc_enc_param.nInVirtOutput = (unsigned int)(vpu_base_enc->output_phys_buffer->mapped_virt_addr); /* TODO */
	enc_enc_param.nInPhyOutput = (unsigned int)(vpu_base_enc->output_phys_buffer->phys_addr);
	enc_enc_param.nInOutputBufLen = vpu_base_enc->output_phys_buffer->mem.size;
	enc_enc_param.nPicWidth = vpu_base_enc->framebuffers->pic_width;
	enc_enc_param.nPicHeight = vpu_base_enc->framebuffers->pic_height;
	enc_enc_param.nFrameRate = vpu_base_enc->open_param.nFrameRate;
	enc_enc_param.pInFrame = &input_framebuf;

	if (!klass->set_frame_enc_params(vpu_base_enc, &enc_enc_param, &(vpu_base_enc->open_param)))
	{
		GST_ERROR_OBJECT(vpu_base_enc, "derived class could not frame enc params");
		return GST_FLOW_ERROR;
	}

	enc_ret = VPU_EncEncodeFrame(vpu_base_enc->handle, &enc_enc_param);
	if (enc_ret != VPU_ENC_RET_SUCCESS)
	{
		GST_ERROR_OBJECT(vpu_base_enc, "failed to encode frame: %s", gst_fsl_vpu_strerror(enc_ret));
		VPU_EncReset(vpu_base_enc->handle);
		return GST_FLOW_ERROR;
	}

	GST_LOG_OBJECT(vpu_base_enc, "out ret code: 0x%x  out size: %u", enc_enc_param.eOutRetCode, enc_enc_param.nOutOutputSize);

	if ((enc_enc_param.eOutRetCode & VPU_ENC_OUTPUT_DIS) || (enc_enc_param.eOutRetCode & VPU_ENC_OUTPUT_SEQHEADER))
	{
		gst_video_encoder_allocate_output_frame(encoder, frame, enc_enc_param.nOutOutputSize);
		gst_buffer_fill(frame->output_buffer, 0, vpu_base_enc->output_phys_buffer->mapped_virt_addr, enc_enc_param.nOutOutputSize);
		gst_video_encoder_finish_frame(encoder, frame);
	}

	return GST_FLOW_OK;
}
Example #30
0
static gboolean
gst_mpeg2dec_decide_allocation (GstVideoDecoder * decoder, GstQuery * query)
{
  GstMpeg2dec *dec = GST_MPEG2DEC (decoder);
  GstBufferPool *pool;
  guint size, min, max;
  GstStructure *config, *down_config = NULL;
  GstAllocator *allocator;
  GstAllocationParams params;
  gboolean update_allocator;
  gboolean has_videometa = FALSE;
  GstCaps *caps;

  /* Get rid of ancient pool */
  if (dec->downstream_pool) {
    gst_buffer_pool_set_active (dec->downstream_pool, FALSE);
    gst_object_unref (dec->downstream_pool);
    dec->downstream_pool = NULL;
  }

  /* Get negotiated allocation caps */
  gst_query_parse_allocation (query, &caps, NULL);

  /* Set allocation parameters to guarantee 16-byte aligned output buffers */
  if (gst_query_get_n_allocation_params (query) > 0) {
    gst_query_parse_nth_allocation_param (query, 0, &allocator, &params);
    update_allocator = TRUE;
  } else {
    allocator = NULL;
    gst_allocation_params_init (&params);
    update_allocator = FALSE;
  }

  params.align = MAX (params.align, 15);

  if (update_allocator)
    gst_query_set_nth_allocation_param (query, 0, allocator, &params);
  else
    gst_query_add_allocation_param (query, allocator, &params);

  /* Now chain up to the parent class to guarantee that we can
   * get a buffer pool from the query */
  if (!GST_VIDEO_DECODER_CLASS (parent_class)->decide_allocation (decoder,
          query)) {
    if (allocator)
      gst_object_unref (allocator);
    return FALSE;
  }

  gst_query_parse_nth_allocation_pool (query, 0, &pool, &size, &min, &max);

  config = gst_buffer_pool_get_config (pool);
  if (gst_query_find_allocation_meta (query, GST_VIDEO_META_API_TYPE, NULL)) {
    gst_buffer_pool_config_add_option (config,
        GST_BUFFER_POOL_OPTION_VIDEO_META);
    has_videometa = TRUE;
  }

  if (dec->need_alignment) {
    /* If downstream does not support video meta, we will have to copy, keep
     * the downstream pool to avoid double copying */
    if (!has_videometa) {
      dec->downstream_pool = pool;
      pool = NULL;
      down_config = config;
      config = NULL;
      min = 2;
      max = 0;
    }

    /* In case downstream support video meta, but the downstream pool does not
     * have alignment support, discard downstream pool and use video pool */
    else if (!gst_buffer_pool_has_option (pool,
            GST_BUFFER_POOL_OPTION_VIDEO_ALIGNMENT)) {
      gst_object_unref (pool);
      pool = NULL;
      gst_structure_free (config);
      config = NULL;
    }

    if (!pool)
      pool = gst_mpeg2dec_create_generic_pool (allocator, &params, caps, size,
          min, max, &config);

    gst_buffer_pool_config_add_option (config,
        GST_BUFFER_POOL_OPTION_VIDEO_ALIGNMENT);
    gst_buffer_pool_config_set_video_alignment (config, &dec->valign);
  }

  if (allocator)
    gst_object_unref (allocator);

  /* If we are copying out, we'll need to setup and activate the other pool */
  if (dec->downstream_pool) {
    if (!gst_buffer_pool_set_config (dec->downstream_pool, down_config)) {
      down_config = gst_buffer_pool_get_config (dec->downstream_pool);
      if (!gst_buffer_pool_config_validate_params (down_config, caps, size, min,
              max)) {
        gst_structure_free (down_config);
        goto config_failed;
      }

      if (!gst_buffer_pool_set_config (dec->downstream_pool, down_config))
        goto config_failed;
    }

    if (!gst_buffer_pool_set_active (dec->downstream_pool, TRUE))
      goto activate_failed;
  }

  /* Now configure the pool, if the pool had made some changes, it will
   * return FALSE. Validate the changes ...*/
  if (!gst_buffer_pool_set_config (pool, config)) {
    config = gst_buffer_pool_get_config (pool);

    /* Check basic params */
    if (!gst_buffer_pool_config_validate_params (config, caps, size, min, max)) {
      gst_structure_free (config);
      goto config_failed;
    }

    /* If needed, check that resulting alignment is still valid */
    if (dec->need_alignment) {
      GstVideoAlignment valign;

      if (!gst_buffer_pool_config_get_video_alignment (config, &valign)) {
        gst_structure_free (config);
        goto config_failed;
      }

      if (valign.padding_left != 0 || valign.padding_top != 0
          || valign.padding_right < dec->valign.padding_right
          || valign.padding_bottom < dec->valign.padding_bottom) {
        gst_structure_free (config);
        goto config_failed;
      }
    }

    if (!gst_buffer_pool_set_config (pool, config))
      goto config_failed;
  }

  /* For external pools, we need to check strides */
  if (!GST_IS_VIDEO_BUFFER_POOL (pool) && has_videometa) {
    GstBuffer *buffer;
    const GstVideoFormatInfo *finfo;
    GstVideoMeta *vmeta;
    gint uv_stride;

    if (!gst_buffer_pool_set_active (pool, TRUE))
      goto activate_failed;

    if (gst_buffer_pool_acquire_buffer (pool, &buffer, NULL) != GST_FLOW_OK) {
      gst_buffer_pool_set_active (pool, FALSE);
      goto acquire_failed;
    }

    vmeta = gst_buffer_get_video_meta (buffer);
    finfo = gst_video_format_get_info (vmeta->format);

    /* Check that strides are compatible. In this case, we can scale the
     * stride directly since all the pixel strides for the formats we support
     * is 1 */
    uv_stride = GST_VIDEO_FORMAT_INFO_SCALE_WIDTH (finfo, 1, vmeta->stride[0]);
    if (uv_stride != vmeta->stride[1] || uv_stride != vmeta->stride[2]) {
      gst_buffer_pool_set_active (pool, FALSE);
      gst_object_unref (pool);

      pool = gst_mpeg2dec_create_generic_pool (allocator, &params, caps, size,
          min, max, &config);

      if (dec->need_alignment) {
        gst_buffer_pool_config_add_option (config,
            GST_BUFFER_POOL_OPTION_VIDEO_ALIGNMENT);
        gst_buffer_pool_config_set_video_alignment (config, &dec->valign);
      }

      /* Generic pool don't fail on _set_config() */
      gst_buffer_pool_set_config (pool, config);
    }

    gst_buffer_unref (buffer);
  }

  gst_query_set_nth_allocation_pool (query, 0, pool, size, min, max);
  gst_object_unref (pool);

  return TRUE;

config_failed:
  gst_object_unref (pool);
  GST_ELEMENT_ERROR (dec, RESOURCE, SETTINGS,
      ("Failed to configure buffer pool"),
      ("Configuration is most likely invalid, please report this issue."));
  return FALSE;

activate_failed:
  gst_object_unref (pool);
  GST_ELEMENT_ERROR (dec, RESOURCE, SETTINGS,
      ("Failed to activate buffer pool"), (NULL));
  return FALSE;

acquire_failed:
  gst_object_unref (pool);
  GST_ELEMENT_ERROR (dec, RESOURCE, SETTINGS,
      ("Failed to acquire a buffer"), (NULL));
  return FALSE;
}