static GstFlowReturn gst_imx_compositor_aggregate_frames(GstImxBPVideoAggregator *videoaggregator, GstBuffer *outbuffer) { GstFlowReturn ret = GST_FLOW_OK; GList *walk; GstImxCompositor *compositor = GST_IMX_COMPOSITOR(videoaggregator); GstImxCompositorClass *klass = GST_IMX_COMPOSITOR_CLASS(G_OBJECT_GET_CLASS(videoaggregator)); g_assert(klass->set_output_frame != NULL); g_assert(klass->fill_region != NULL); g_assert(klass->draw_frame != NULL); /* This function is the heart of the compositor. Here, input frames * are drawn on the output frame, with their specific parameters. */ /* Set the output buffer */ if (!(klass->set_output_frame(compositor, outbuffer))) { GST_ERROR_OBJECT(compositor, "could not set the output frame"); return GST_FLOW_ERROR; } /* TODO: are the update_overall_region calls here necessary? * If the video aggregator calls update_caps when a pad is added/removed, * there is no need for these calls */ /* Update the overall region first if necessary to ensure that it is valid * and that the region_fill_necessary flag is set to the proper value */ gst_imx_compositor_update_overall_region(compositor); GST_LOG_OBJECT(compositor, "aggregating frames, region_fill_necessary: %d", (gint)(compositor->region_fill_necessary)); /* Check if the overall region needs to be filled. This is the case if none * of the input frames completely cover the overall region with 100% alpha * (this is determined by gst_imx_compositor_update_overall_region() ) */ if (!(compositor->region_fill_necessary) || klass->fill_region(compositor, &(compositor->overall_region), compositor->background_color)) { /* Lock object to ensure nothing is changed during composition */ GST_OBJECT_LOCK(compositor); /* First walk: check if there is a new pad. If so, recompute the * overall region, since it might need to be expanded to encompass * the new additional input frames */ walk = GST_ELEMENT(videoaggregator)->sinkpads; while (walk != NULL) { GstImxCompositorPad *compositor_pad = GST_IMX_COMPOSITOR_PAD_CAST(walk->data); if (compositor_pad->pad_is_new) { GST_DEBUG_OBJECT(compositor, "there is a new pad; invalidate overall region"); compositor_pad->pad_is_new = FALSE; compositor->overall_region_valid = FALSE; /* While this call might seem redundant, there is one * benefit in calling this function apparently twice * (once above, and once here): the earlier call * happens outside of the object lock. New pads are less * common than overall region changes, so it is good * if most update calls happen outside of the object * lock (the overall_region_valid flag ensures redundant * calls don't compute anything). */ gst_imx_compositor_update_overall_region(compositor); break; } /* Move to next pad */ walk = g_list_next(walk); } /* Second walk: draw the input frames on the output frame */ walk = GST_ELEMENT(videoaggregator)->sinkpads; while (walk != NULL) { GstImxBPVideoAggregatorPad *videoaggregator_pad = walk->data; GstImxCompositorPad *compositor_pad = GST_IMX_COMPOSITOR_PAD_CAST(videoaggregator_pad); /* If there actually is a buffer, draw it * Sometimes, pads don't deliver data right from the start; * in these cases, their buffers will be NULL * Just skip to the next pad in that case */ if (videoaggregator_pad->buffer != NULL) { GstVideoCropMeta *video_crop_meta; if (compositor_pad->input_crop && ((video_crop_meta = gst_buffer_get_video_crop_meta(videoaggregator_pad->buffer)) != NULL)) { /* Crop metadata present. Reconfigure canvas. */ GstVideoInfo *info = &(videoaggregator_pad->info); GstImxRegion source_region; source_region.x1 = video_crop_meta->x; source_region.y1 = video_crop_meta->y; source_region.x2 = video_crop_meta->x + video_crop_meta->width; source_region.y2 = video_crop_meta->y + video_crop_meta->height; /* Make sure the source region does not exceed valid bounds */ source_region.x1 = MAX(0, source_region.x1); source_region.y1 = MAX(0, source_region.y1); source_region.x2 = MIN(GST_VIDEO_INFO_WIDTH(info), source_region.x2); source_region.y2 = MIN(GST_VIDEO_INFO_HEIGHT(info), source_region.y2); GST_LOG_OBJECT(compositor, "retrieved crop rectangle %" GST_IMX_REGION_FORMAT, GST_IMX_REGION_ARGS(&source_region)); /* Canvas needs to be updated if either one of these applies: * - the current frame has crop metadata, the last one didn't * - the new crop rectangle and the last are different */ if (!(compositor_pad->last_frame_with_cropdata) || !gst_imx_region_equal(&source_region, &(compositor_pad->last_source_region))) { GST_LOG_OBJECT(compositor, "using new crop rectangle %" GST_IMX_REGION_FORMAT, GST_IMX_REGION_ARGS(&source_region)); compositor_pad->last_source_region = source_region; compositor_pad->canvas_needs_update = TRUE; } compositor_pad->last_frame_with_cropdata = TRUE; /* Update canvas and input region if necessary */ if (compositor_pad->canvas_needs_update) gst_imx_compositor_pad_update_canvas(compositor_pad, &(compositor_pad->last_source_region)); } else { /* Force an update if this frame has no crop metadata but the last one did */ if (compositor_pad->last_frame_with_cropdata) compositor_pad->canvas_needs_update = TRUE; compositor_pad->last_frame_with_cropdata = FALSE; /* Update the pad's canvas if necessary, * to ensure there is a valid canvas to draw to */ gst_imx_compositor_pad_update_canvas(compositor_pad, NULL); } GST_LOG_OBJECT( compositor, "pad %p frame %p format: %s width/height: %d/%d regions: outer %" GST_IMX_REGION_FORMAT " inner %" GST_IMX_REGION_FORMAT " source subset %" GST_IMX_REGION_FORMAT, (gpointer)(videoaggregator_pad), (gpointer)(videoaggregator_pad->buffer), gst_video_format_to_string(GST_VIDEO_INFO_FORMAT(&(videoaggregator_pad->info))), GST_VIDEO_INFO_WIDTH(&(videoaggregator_pad->info)), GST_VIDEO_INFO_HEIGHT(&(videoaggregator_pad->info)), GST_IMX_REGION_ARGS(&(compositor_pad->canvas.outer_region)), GST_IMX_REGION_ARGS(&(compositor_pad->canvas.inner_region)), GST_IMX_REGION_ARGS(&(compositor_pad->source_subset)) ); if (!klass->draw_frame( compositor, &(videoaggregator_pad->info), &(compositor_pad->source_subset), &(compositor_pad->canvas), videoaggregator_pad->buffer, (guint8)(compositor_pad->alpha * 255.0) )) { GST_ERROR_OBJECT(compositor, "error while drawing composition frame"); ret = GST_FLOW_ERROR; break; } } else { GST_LOG_OBJECT(compositor, "pad %p buffer is NULL, no frame to aggregate - skipping to next pad", (gpointer)(videoaggregator_pad)); } /* Move to next pad */ walk = g_list_next(walk); } GST_OBJECT_UNLOCK(compositor); } /* Release the output buffer, since we don't need it anymore, and * there is no reason to retain it */ klass->set_output_frame(compositor, NULL); return ret; }
static GstFlowReturn gst_imx_blitter_video_sink_show_frame(GstVideoSink *video_sink, GstBuffer *buf) { GstImxBlitterVideoSink *blitter_video_sink = GST_IMX_BLITTER_VIDEO_SINK_CAST(video_sink); GstVideoCropMeta *video_crop_meta; GST_IMX_BLITTER_VIDEO_SINK_LOCK(blitter_video_sink); if (blitter_video_sink->input_crop && ((video_crop_meta = gst_buffer_get_video_crop_meta(buf)) != NULL)) { /* Crop metadata present. Reconfigure canvas. */ GstImxRegion source_region; source_region.x1 = video_crop_meta->x; source_region.y1 = video_crop_meta->y; source_region.x2 = video_crop_meta->x + video_crop_meta->width; source_region.y2 = video_crop_meta->y + video_crop_meta->height; /* Make sure the source region does not exceed valid bounds */ source_region.x1 = MAX(0, source_region.x1); source_region.y1 = MAX(0, source_region.y1); source_region.x2 = MIN(GST_VIDEO_INFO_WIDTH(&(blitter_video_sink->input_video_info)), source_region.x2); source_region.y2 = MIN(GST_VIDEO_INFO_HEIGHT(&(blitter_video_sink->input_video_info)), source_region.y2); GST_LOG_OBJECT(blitter_video_sink, "retrieved crop rectangle %" GST_IMX_REGION_FORMAT, GST_IMX_REGION_ARGS(&source_region)); /* Canvas needs to be updated if either one of these applies: * - the current frame has crop metadata, the last one didn't * - the new crop rectangle and the last are different */ if (!(blitter_video_sink->last_frame_with_cropdata) || !gst_imx_region_equal(&source_region, &(blitter_video_sink->last_source_region))) { GST_LOG_OBJECT(blitter_video_sink, "using new crop rectangle %" GST_IMX_REGION_FORMAT, GST_IMX_REGION_ARGS(&source_region)); blitter_video_sink->last_source_region = source_region; blitter_video_sink->canvas_needs_update = TRUE; } blitter_video_sink->last_frame_with_cropdata = TRUE; /* Update canvas and input region if necessary */ if (blitter_video_sink->canvas_needs_update) gst_imx_blitter_video_sink_update_canvas(blitter_video_sink, &(blitter_video_sink->last_source_region)); } else { /* Force an update if this frame has no crop metadata but the last one did */ if (blitter_video_sink->last_frame_with_cropdata) blitter_video_sink->canvas_needs_update = TRUE; blitter_video_sink->last_frame_with_cropdata = FALSE; /* Update canvas and input region if necessary */ if (blitter_video_sink->canvas_needs_update) gst_imx_blitter_video_sink_update_canvas(blitter_video_sink, NULL); } if (blitter_video_sink->canvas.visibility_mask == 0) { /* Visibility mask 0 -> nothing to blit */ GST_IMX_BLITTER_VIDEO_SINK_UNLOCK(blitter_video_sink); return GST_FLOW_OK; } gst_imx_blitter_set_input_frame(blitter_video_sink->blitter, buf); /* If using vsync, blit to the backbuffer, and flip * The flipping is done by scrolling in Y direction * by the same number of rows as there are on screen * The scrolling is implicitely vsync'ed */ if (blitter_video_sink->use_vsync) { /* Select which page to write/blit to */ ++blitter_video_sink->old_fb_page; blitter_video_sink->old_fb_page %= 3; gst_imx_blitter_video_sink_select_fb_page(blitter_video_sink, blitter_video_sink->old_fb_page); /* The actual blitting */ gst_imx_blitter_blit(blitter_video_sink->blitter, 255); /* Flush the blitter to make sure it does not use any cached output * information (for example, the physical address of the previously * selected fb page) */ gst_imx_blitter_flush(blitter_video_sink->blitter); /* Move the current_fb_page index to the next page. See the explanation * at the set_property PROP_USE_VSYNC block for the reason why three * pages are expected instead of 2. */ blitter_video_sink->current_fb_page++; blitter_video_sink->current_fb_page %= 3; /* Flip pages now */ gst_imx_blitter_video_sink_flip_to_selected_fb_page(blitter_video_sink); } else { gst_imx_blitter_blit(blitter_video_sink->blitter, 255); } GST_IMX_BLITTER_VIDEO_SINK_UNLOCK(blitter_video_sink); return GST_FLOW_OK; }
static GstFlowReturn gst_kms_sink_show_frame (GstVideoSink * vsink, GstBuffer * buf) { gint ret; GstBuffer *buffer; guint32 fb_id; GstKMSSink *self; GstVideoCropMeta *crop; GstVideoRectangle src = { 0, }; GstVideoRectangle dst = { 0, }; GstVideoRectangle result; GstFlowReturn res; self = GST_KMS_SINK (vsink); res = GST_FLOW_ERROR; buffer = gst_kms_sink_get_input_buffer (self, buf); if (!buffer) return GST_FLOW_ERROR; fb_id = gst_kms_memory_get_fb_id (gst_buffer_peek_memory (buffer, 0)); if (fb_id == 0) goto buffer_invalid; GST_TRACE_OBJECT (self, "displaying fb %d", fb_id); { if ((crop = gst_buffer_get_video_crop_meta (buffer))) { src.x = crop->x; src.y = crop->y; src.w = crop->width; src.h = crop->height; } else { src.w = GST_VIDEO_SINK_WIDTH (self); src.h = GST_VIDEO_SINK_HEIGHT (self); } } dst.w = self->hdisplay; dst.h = self->vdisplay; gst_video_sink_center_rect (src, dst, &result, FALSE); /* if the frame size is bigger than the display size, the source * must be the display size */ src.w = MIN (src.w, self->hdisplay); src.h = MIN (src.h, self->vdisplay); ret = drmModeSetPlane (self->fd, self->plane_id, self->crtc_id, fb_id, 0, result.x, result.y, result.w, result.h, /* source/cropping coordinates are given in Q16 */ src.x << 16, src.y << 16, src.w << 16, src.h << 16); if (ret) goto set_plane_failed; /* Wait for the previous frame to complete redraw */ if (!gst_kms_sink_sync (self)) goto bail; gst_buffer_replace (&self->last_buffer, buffer); res = GST_FLOW_OK; bail: gst_buffer_unref (buffer); return res; /* ERRORS */ buffer_invalid: { GST_ERROR_OBJECT (self, "invalid buffer: it doesn't have a fb id"); goto bail; } set_plane_failed: { GST_DEBUG_OBJECT (self, "result = { %d, %d, %d, %d} / " "src = { %d, %d, %d %d } / dst = { %d, %d, %d %d }", result.x, result.y, result.w, result.h, src.x, src.y, src.w, src.h, dst.x, dst.y, dst.w, dst.h); GST_ELEMENT_ERROR (self, RESOURCE, FAILED, (NULL), ("drmModeSetPlane failed: %s (%d)", strerror (-ret), ret)); goto bail; } }
static GstFlowReturn gst_imx_blitter_video_transform_prepare_output_buffer(GstBaseTransform *transform, GstBuffer *input, GstBuffer **outbuf) { gboolean passthrough; GstImxBlitterVideoTransform *blitter_video_transform = GST_IMX_BLITTER_VIDEO_TRANSFORM(transform); GstImxBlitterVideoTransformClass *klass = GST_IMX_BLITTER_VIDEO_TRANSFORM_CLASS(G_OBJECT_GET_CLASS(transform)); GstVideoCropMeta *video_crop_meta; gboolean update_canvas = FALSE; /* If either there is no input buffer or in- and output info are not equal, * it is clear there can be no passthrough mode */ passthrough = (input != NULL) && blitter_video_transform->inout_info_equal; GST_IMX_BLITTER_VIDEO_TRANSFORM_LOCK(blitter_video_transform); /* Check if cropping needs to be done */ if ((input != NULL) && blitter_video_transform->input_crop && ((video_crop_meta = gst_buffer_get_video_crop_meta(input)) != NULL)) { GstImxRegion source_region; gint in_width, in_height; source_region.x1 = video_crop_meta->x; source_region.y1 = video_crop_meta->y; source_region.x2 = video_crop_meta->x + video_crop_meta->width; source_region.y2 = video_crop_meta->y + video_crop_meta->height; in_width = GST_VIDEO_INFO_WIDTH(&(blitter_video_transform->input_video_info)); in_height = GST_VIDEO_INFO_HEIGHT(&(blitter_video_transform->input_video_info)); /* Make sure the source region does not exceed valid bounds */ source_region.x1 = MAX(0, source_region.x1); source_region.y1 = MAX(0, source_region.y1); source_region.x2 = MIN(in_width, source_region.x2); source_region.y2 = MIN(in_height, source_region.y2); /* If the crop rectangle encompasses the entire frame, cropping is * effectively a no-op, so make it passthrough in that case, * unless passthrough is already FALSE */ passthrough = passthrough && (source_region.x1 == 0) && (source_region.y1 == 0) && (source_region.x2 == in_width) && (source_region.y2 == in_height); GST_LOG_OBJECT(blitter_video_transform, "retrieved crop rectangle %" GST_IMX_REGION_FORMAT, GST_IMX_REGION_ARGS(&source_region)); /* Canvas needs to be updated if either one of these applies: * - the current frame has crop metadata, the last one didn't * - the new crop rectangle and the last are different */ if (!(blitter_video_transform->last_frame_with_cropdata) || !gst_imx_region_equal(&source_region, &(blitter_video_transform->last_source_region))) { GST_LOG_OBJECT(blitter_video_transform, "using new crop rectangle %" GST_IMX_REGION_FORMAT, GST_IMX_REGION_ARGS(&source_region)); blitter_video_transform->last_source_region = source_region; update_canvas = TRUE; } blitter_video_transform->last_frame_with_cropdata = TRUE; } else { /* Force a canvas update if this frame has no crop metadata but the last one did */ if (blitter_video_transform->last_frame_with_cropdata) update_canvas = TRUE; blitter_video_transform->last_frame_with_cropdata = FALSE; } if (update_canvas) { GstImxRegion source_subset; GstImxCanvas *canvas = &(blitter_video_transform->canvas); gst_imx_canvas_clip( canvas, &(canvas->outer_region), &(blitter_video_transform->input_video_info), blitter_video_transform->last_frame_with_cropdata ? &(blitter_video_transform->last_source_region) : NULL, &source_subset ); gst_imx_blitter_set_input_region(blitter_video_transform->blitter, &source_subset); gst_imx_blitter_set_output_canvas(blitter_video_transform->blitter, canvas); } if ((input != NULL) && passthrough) { /* test for additional special cases for passthrough must not be enabled * such case are transforms like rotation, deinterlacing ... */ passthrough = passthrough && (blitter_video_transform->canvas.inner_rotation == GST_IMX_CANVAS_INNER_ROTATION_NONE) && (klass->are_transforms_necessary != NULL) && !(klass->are_transforms_necessary(blitter_video_transform, input)); } else if (!blitter_video_transform->inout_info_equal) GST_LOG_OBJECT(transform, "input and output caps are not equal"); else if (blitter_video_transform->last_frame_with_cropdata && !passthrough) GST_LOG_OBJECT(transform, "cropping is performed"); else if (input == NULL) GST_LOG_OBJECT(transform, "input buffer is NULL"); GST_IMX_BLITTER_VIDEO_TRANSFORM_UNLOCK(blitter_video_transform); GST_LOG_OBJECT(transform, "passthrough: %s", passthrough ? "yes" : "no"); if (passthrough) { /* This instructs the base class to not allocate a new buffer for * the output, and instead pass the input buffer as the output * (this is used in the fransform_frame function below) */ *outbuf = input; return GST_FLOW_OK; } else return GST_BASE_TRANSFORM_CLASS(gst_imx_blitter_video_transform_parent_class)->prepare_output_buffer(transform, input, outbuf); }
static GstFlowReturn gst_kms_sink_show_frame (GstVideoSink * vsink, GstBuffer * buf) { gint ret; GstBuffer *buffer; guint32 fb_id; GstKMSSink *self; GstVideoCropMeta *crop; GstVideoRectangle src = { 0, }; GstVideoRectangle dst = { 0, }; GstVideoRectangle result; GstFlowReturn res; self = GST_KMS_SINK (vsink); res = GST_FLOW_ERROR; buffer = gst_kms_sink_get_input_buffer (self, buf); if (!buffer) return GST_FLOW_ERROR; fb_id = gst_kms_memory_get_fb_id (gst_buffer_peek_memory (buffer, 0)); if (fb_id == 0) goto buffer_invalid; GST_TRACE_OBJECT (self, "displaying fb %d", fb_id); if (self->modesetting_enabled) { self->buffer_id = fb_id; goto sync_frame; } if ((crop = gst_buffer_get_video_crop_meta (buffer))) { GstVideoInfo vinfo = self->vinfo; vinfo.width = crop->width; vinfo.height = crop->height; if (!gst_kms_sink_calculate_display_ratio (self, &vinfo)) goto no_disp_ratio; src.x = crop->x; src.y = crop->y; } src.w = GST_VIDEO_SINK_WIDTH (self); src.h = GST_VIDEO_SINK_HEIGHT (self); dst.w = self->hdisplay; dst.h = self->vdisplay; gst_video_sink_center_rect (src, dst, &result, TRUE); if (crop) { src.w = crop->width; src.h = crop->height; } else { src.w = GST_VIDEO_INFO_WIDTH (&self->vinfo); src.h = GST_VIDEO_INFO_HEIGHT (&self->vinfo); } GST_TRACE_OBJECT (self, "drmModeSetPlane at (%i,%i) %ix%i sourcing at (%i,%i) %ix%i", result.x, result.y, result.w, result.h, src.x, src.y, src.w, src.h); ret = drmModeSetPlane (self->fd, self->plane_id, self->crtc_id, fb_id, 0, result.x, result.y, result.w, result.h, /* source/cropping coordinates are given in Q16 */ src.x << 16, src.y << 16, src.w << 16, src.h << 16); if (ret) goto set_plane_failed; sync_frame: /* Wait for the previous frame to complete redraw */ if (!gst_kms_sink_sync (self)) goto bail; gst_buffer_replace (&self->last_buffer, buffer); g_clear_pointer (&self->tmp_kmsmem, gst_memory_unref); res = GST_FLOW_OK; bail: gst_buffer_unref (buffer); return res; /* ERRORS */ buffer_invalid: { GST_ERROR_OBJECT (self, "invalid buffer: it doesn't have a fb id"); goto bail; } set_plane_failed: { GST_DEBUG_OBJECT (self, "result = { %d, %d, %d, %d} / " "src = { %d, %d, %d %d } / dst = { %d, %d, %d %d }", result.x, result.y, result.w, result.h, src.x, src.y, src.w, src.h, dst.x, dst.y, dst.w, dst.h); GST_ELEMENT_ERROR (self, RESOURCE, FAILED, (NULL), ("drmModeSetPlane failed: %s (%d)", strerror (-ret), ret)); goto bail; } no_disp_ratio: { GST_ELEMENT_ERROR (self, CORE, NEGOTIATION, (NULL), ("Error calculating the output display ratio of the video.")); goto bail; } }
static void gst_pvrvideosink_blit (GstPVRVideoSink * pvrvideosink, GstBuffer * buffer) { PVR2DERROR pvr_error; GstDrawContext *dcontext = pvrvideosink->dcontext; gint video_width; gint video_height; gboolean draw_border = FALSE; PPVR2D_3DBLT_EXT p_blt_3d; PVR2DMEMINFO *src_mem; PVR2DFORMAT pvr_format; GstVideoRectangle result; GstPVRMeta *meta; GstVideoCropMeta *cropmeta; GST_DEBUG_OBJECT (pvrvideosink, "buffer %p", buffer); pvr_format = GST_VIDEO_INFO_FORMAT (&pvrvideosink->info) == GST_VIDEO_FORMAT_NV12 ? PVR2D_YUV420_2PLANE : PVR2D_ARGB8888; g_mutex_lock (pvrvideosink->flow_lock); if (buffer == NULL) buffer = pvrvideosink->current_buffer; if (buffer == NULL) goto done; meta = gst_buffer_get_pvr_meta (buffer); if (G_UNLIKELY (meta == NULL)) goto no_pvr_meta; src_mem = meta->src_mem; p_blt_3d = dcontext->p_blt_info; video_width = GST_VIDEO_SINK_WIDTH (pvrvideosink); video_height = GST_VIDEO_SINK_HEIGHT (pvrvideosink); g_mutex_lock (pvrvideosink->dcontext->x_lock); /* Draw borders when displaying the first frame. After this draw borders only on expose event or after a size change. */ if (!(pvrvideosink->current_buffer) || pvrvideosink->redraw_borders) { draw_border = TRUE; } /* Store a reference to the last image we put, lose the previous one */ if (buffer && pvrvideosink->current_buffer != buffer) { if (pvrvideosink->current_buffer) { GST_LOG_OBJECT (pvrvideosink, "unreffing %p", pvrvideosink->current_buffer); gst_buffer_unref (GST_BUFFER_CAST (pvrvideosink->current_buffer)); } GST_LOG_OBJECT (pvrvideosink, "reffing %p as our current buffer", buffer); pvrvideosink->current_buffer = gst_buffer_ref (buffer); } if (pvrvideosink->keep_aspect) { GstVideoRectangle src, dst; src.w = GST_VIDEO_SINK_WIDTH (pvrvideosink); src.h = GST_VIDEO_SINK_HEIGHT (pvrvideosink); dst.w = pvrvideosink->render_rect.w; dst.h = pvrvideosink->render_rect.h; gst_video_sink_center_rect (src, dst, &result, TRUE); result.x += pvrvideosink->render_rect.x; result.y += pvrvideosink->render_rect.y; } else { memcpy (&result, &pvrvideosink->render_rect, sizeof (GstVideoRectangle)); } p_blt_3d->sDst.pSurfMemInfo = &dcontext->dst_mem; p_blt_3d->sDst.SurfOffset = 0; p_blt_3d->sDst.Stride = 4 * pvrvideosink->render_params.ui32Stride; p_blt_3d->sDst.Format = PVR2D_ARGB8888; p_blt_3d->sDst.SurfWidth = pvrvideosink->xwindow->width; p_blt_3d->sDst.SurfHeight = pvrvideosink->xwindow->height; p_blt_3d->rcDest.left = result.x; p_blt_3d->rcDest.top = result.y; p_blt_3d->rcDest.right = result.w + result.x; p_blt_3d->rcDest.bottom = result.h + result.y; p_blt_3d->sSrc.pSurfMemInfo = src_mem; p_blt_3d->sSrc.SurfOffset = 0; p_blt_3d->sSrc.Stride = GST_VIDEO_INFO_COMP_STRIDE (&pvrvideosink->info, 0); p_blt_3d->sSrc.Format = pvr_format; p_blt_3d->sSrc.SurfWidth = video_width; p_blt_3d->sSrc.SurfHeight = video_height; /* If buffer has crop information, use that */ if ((cropmeta = gst_buffer_get_video_crop_meta (buffer))) { p_blt_3d->rcSource.left = cropmeta->x; p_blt_3d->rcSource.top = cropmeta->y; p_blt_3d->rcSource.right = cropmeta->x + cropmeta->width; p_blt_3d->rcSource.bottom = cropmeta->y + cropmeta->height; } else { p_blt_3d->rcSource.left = 0; p_blt_3d->rcSource.top = 0; p_blt_3d->rcSource.right = video_width; p_blt_3d->rcSource.bottom = video_height; } p_blt_3d->hUseCode = NULL; if (GST_VIDEO_INFO_FORMAT (&pvrvideosink->info) == GST_VIDEO_FORMAT_NV12) p_blt_3d->bDisableDestInput = TRUE; else /* blit fails for RGB without this... not sure why yet... */ p_blt_3d->bDisableDestInput = FALSE; GST_DEBUG_OBJECT (pvrvideosink, "about to blit"); pvr_error = PVR2DBlt3DExt (pvrvideosink->dcontext->pvr_context, dcontext->p_blt_info); if (pvr_error != PVR2D_OK) { GST_ERROR_OBJECT (pvrvideosink, "Failed to blit. Error : %s", gst_pvr2d_error_get_string (pvr_error)); goto done; } dcontext->wsegl_table->pfnWSEGL_SwapDrawable (dcontext->drawable_handle, 1); if (draw_border) { gst_pvrvideosink_xwindow_draw_borders (pvrvideosink, pvrvideosink->xwindow, result); pvrvideosink->redraw_borders = FALSE; } g_mutex_unlock (pvrvideosink->dcontext->x_lock); done: GST_DEBUG_OBJECT (pvrvideosink, "end"); g_mutex_unlock (pvrvideosink->flow_lock); return; /* Error cases */ no_pvr_meta: { g_mutex_unlock (pvrvideosink->flow_lock); GST_ERROR_OBJECT (pvrvideosink, "Got a buffer without GstPVRMeta"); return; } }
/** * gst_video_convert_sample_async: * @sample: a #GstSample * @to_caps: the #GstCaps to convert to * @timeout: the maximum amount of time allowed for the processing. * @callback: %GstVideoConvertSampleCallback that will be called after conversion. * @user_data: extra data that will be passed to the @callback * @destroy_notify: %GDestroyNotify to be called after @user_data is not needed anymore * * Converts a raw video buffer into the specified output caps. * * The output caps can be any raw video formats or any image formats (jpeg, png, ...). * * The width, height and pixel-aspect-ratio can also be specified in the output caps. * * @callback will be called after conversion, when an error occured or if conversion didn't * finish after @timeout. @callback will always be called from the thread default * %GMainContext, see g_main_context_get_thread_default(). If GLib before 2.22 is used, * this will always be the global default main context. * * @destroy_notify will be called after the callback was called and @user_data is not needed * anymore. */ void gst_video_convert_sample_async (GstSample * sample, const GstCaps * to_caps, GstClockTime timeout, GstVideoConvertSampleCallback callback, gpointer user_data, GDestroyNotify destroy_notify) { GMainContext *context = NULL; GError *error = NULL; GstBus *bus; GstBuffer *buf; GstCaps *from_caps, *to_caps_copy = NULL; GstElement *pipeline, *src, *sink; guint i, n; GSource *source; GstVideoConvertSampleContext *ctx; g_return_if_fail (sample != NULL); buf = gst_sample_get_buffer (sample); g_return_if_fail (buf != NULL); g_return_if_fail (to_caps != NULL); from_caps = gst_sample_get_caps (sample); g_return_if_fail (from_caps != NULL); g_return_if_fail (callback != NULL); context = g_main_context_get_thread_default (); if (!context) context = g_main_context_default (); to_caps_copy = gst_caps_new_empty (); n = gst_caps_get_size (to_caps); for (i = 0; i < n; i++) { GstStructure *s = gst_caps_get_structure (to_caps, i); s = gst_structure_copy (s); gst_structure_remove_field (s, "framerate"); gst_caps_append_structure (to_caps_copy, s); } pipeline = build_convert_frame_pipeline (&src, &sink, from_caps, gst_buffer_get_video_crop_meta (buf), to_caps_copy, &error); if (!pipeline) goto no_pipeline; bus = gst_element_get_bus (pipeline); ctx = g_slice_new0 (GstVideoConvertSampleContext); g_mutex_init (&ctx->mutex); //ctx->buffer = gst_buffer_ref (buf); ctx->sample = gst_sample_ref (sample); ctx->callback = callback; ctx->user_data = user_data; ctx->destroy_notify = destroy_notify; ctx->context = g_main_context_ref (context); ctx->finished = FALSE; ctx->pipeline = pipeline; if (timeout != GST_CLOCK_TIME_NONE) { ctx->timeout_source = g_timeout_source_new (timeout / GST_MSECOND); g_source_set_callback (ctx->timeout_source, (GSourceFunc) convert_frame_timeout_callback, ctx, NULL); g_source_attach (ctx->timeout_source, context); } g_signal_connect (src, "need-data", G_CALLBACK (convert_frame_need_data_callback), ctx); g_signal_connect (sink, "new-preroll", G_CALLBACK (convert_frame_new_preroll_callback), ctx); source = gst_bus_create_watch (bus); g_source_set_callback (source, (GSourceFunc) convert_frame_bus_callback, ctx, NULL); g_source_attach (source, context); g_source_unref (source); gst_element_set_state (pipeline, GST_STATE_PLAYING); gst_object_unref (bus); gst_caps_unref (to_caps_copy); return; /* ERRORS */ no_pipeline: { GstVideoConvertSampleCallbackContext *ctx; GSource *source; gst_caps_unref (to_caps_copy); ctx = g_slice_new0 (GstVideoConvertSampleCallbackContext); ctx->callback = callback; ctx->user_data = user_data; ctx->destroy_notify = destroy_notify; ctx->sample = NULL; ctx->error = error; source = g_timeout_source_new (0); g_source_set_callback (source, (GSourceFunc) convert_frame_dispatch_callback, ctx, (GDestroyNotify) gst_video_convert_frame_callback_context_free); g_source_attach (source, context); g_source_unref (source); } }
/** * gst_video_convert_sample: * @sample: a #GstSample * @to_caps: the #GstCaps to convert to * @timeout: the maximum amount of time allowed for the processing. * @error: pointer to a #GError. Can be %NULL. * * Converts a raw video buffer into the specified output caps. * * The output caps can be any raw video formats or any image formats (jpeg, png, ...). * * The width, height and pixel-aspect-ratio can also be specified in the output caps. * * Returns: The converted #GstSample, or %NULL if an error happened (in which case @err * will point to the #GError). */ GstSample * gst_video_convert_sample (GstSample * sample, const GstCaps * to_caps, GstClockTime timeout, GError ** error) { GstMessage *msg; GstBuffer *buf; GstSample *result = NULL; GError *err = NULL; GstBus *bus; GstCaps *from_caps, *to_caps_copy = NULL; GstFlowReturn ret; GstElement *pipeline, *src, *sink; guint i, n; g_return_val_if_fail (sample != NULL, NULL); g_return_val_if_fail (to_caps != NULL, NULL); buf = gst_sample_get_buffer (sample); g_return_val_if_fail (buf != NULL, NULL); from_caps = gst_sample_get_caps (sample); g_return_val_if_fail (from_caps != NULL, NULL); to_caps_copy = gst_caps_new_empty (); n = gst_caps_get_size (to_caps); for (i = 0; i < n; i++) { GstStructure *s = gst_caps_get_structure (to_caps, i); s = gst_structure_copy (s); gst_structure_remove_field (s, "framerate"); gst_caps_append_structure (to_caps_copy, s); } pipeline = build_convert_frame_pipeline (&src, &sink, from_caps, gst_buffer_get_video_crop_meta (buf), to_caps_copy, &err); if (!pipeline) goto no_pipeline; /* now set the pipeline to the paused state, after we push the buffer into * appsrc, this should preroll the converted buffer in appsink */ GST_DEBUG ("running conversion pipeline to caps %" GST_PTR_FORMAT, to_caps_copy); gst_element_set_state (pipeline, GST_STATE_PAUSED); /* feed buffer in appsrc */ GST_DEBUG ("feeding buffer %p, size %" G_GSIZE_FORMAT ", caps %" GST_PTR_FORMAT, buf, gst_buffer_get_size (buf), from_caps); g_signal_emit_by_name (src, "push-buffer", buf, &ret); /* now see what happens. We either got an error somewhere or the pipeline * prerolled */ bus = gst_element_get_bus (pipeline); msg = gst_bus_timed_pop_filtered (bus, timeout, GST_MESSAGE_ERROR | GST_MESSAGE_ASYNC_DONE); if (msg) { switch (GST_MESSAGE_TYPE (msg)) { case GST_MESSAGE_ASYNC_DONE: { /* we're prerolled, get the frame from appsink */ g_signal_emit_by_name (sink, "pull-preroll", &result); if (result) { GST_DEBUG ("conversion successful: result = %p", result); } else { GST_ERROR ("prerolled but no result frame?!"); } break; } case GST_MESSAGE_ERROR:{ gchar *dbg = NULL; gst_message_parse_error (msg, &err, &dbg); if (err) { GST_ERROR ("Could not convert video frame: %s", err->message); GST_DEBUG ("%s [debug: %s]", err->message, GST_STR_NULL (dbg)); if (error) *error = err; else g_error_free (err); } g_free (dbg); break; } default:{ g_return_val_if_reached (NULL); } } gst_message_unref (msg); } else { GST_ERROR ("Could not convert video frame: timeout during conversion"); if (error) *error = g_error_new (GST_CORE_ERROR, GST_CORE_ERROR_FAILED, "Could not convert video frame: timeout during conversion"); } gst_element_set_state (pipeline, GST_STATE_NULL); gst_object_unref (bus); gst_object_unref (pipeline); gst_caps_unref (to_caps_copy); return result; /* ERRORS */ no_pipeline: { gst_caps_unref (to_caps_copy); if (error) *error = err; else g_error_free (err); return NULL; } }
static int gst_droidcamsrc_stream_window_enqueue_buffer (struct preview_stream_ops *w, buffer_handle_t * buffer) { GstDroidCamSrcStreamWindow *win; GstDroidCamSrc *src; GstBuffer *buff; int ret; GstVideoCropMeta *meta; GST_DEBUG ("enqueue buffer %p", buffer); win = container_of (w, GstDroidCamSrcStreamWindow, window); g_mutex_lock (&win->lock); src = GST_DROIDCAMSRC (GST_PAD_PARENT (win->pad->pad)); buff = gst_droidcamsrc_stream_window_get_buffer (buffer); if (!buff) { GST_ERROR ("no buffer corresponding to handle %p", buffer); ret = -1; goto unlock_and_out; } /* if the buffer pool is not our current pool then just release it */ if (buff->pool != GST_BUFFER_POOL (win->pool)) { GST_DEBUG ("releasing old buffer %p", buffer); gst_buffer_unref (buff); ret = 0; goto unlock_and_out; } /* now update crop meta */ meta = gst_buffer_get_video_crop_meta (buff); meta->x = win->left; meta->y = win->top; meta->width = win->right - win->left; meta->height = win->bottom - win->top; GST_LOG ("window width = %d, height = %d, crop info: left = %d, top = %d, right = %d, bottom = %d", win->width, win->height, win->left, win->top, win->right, win->bottom); g_mutex_unlock (&win->lock); /* it should be safe to access that variable without locking. * pad gets activated during READY_TO_PAUSED and deactivated during * PAUSED_TO_READY while we start the preview during PAUSED_TO_PLAYING * and stop it during PLAYING_TO_PAUSED. */ if (!win->pad->running) { gst_buffer_unref (buff); GST_DEBUG ("unreffing buffer because pad task is not running"); ret = 0; goto unlock_pad_and_out; } // TODO: duration, offset, offset_end ... gst_droidcamsrc_timestamp (src, buff); g_mutex_lock (&win->pad->queue_lock); g_queue_push_tail (win->pad->queue, buff); g_cond_signal (&win->pad->cond); ret = 0; goto unlock_pad_and_out; unlock_and_out: g_mutex_unlock (&win->lock); return ret; unlock_pad_and_out: g_mutex_unlock (&win->pad->queue_lock); return ret; }