GstFlowReturn on_new_preroll(GstAppSink *appsink, gpointer user_data) { GstSample* sample = NULL; GstBuffer* buffer; GstMemory* memory; GstMapInfo info; GstClockTime clocktime; g_debug("on_new_preroll "); sample = gst_app_sink_pull_sample (appsink); if (sample) { g_debug("pulled sample\n"); buffer = gst_sample_get_buffer(sample); clocktime = GST_BUFFER_PTS(buffer); memory = gst_buffer_get_memory(buffer, 0); gst_memory_map(memory, &info, GST_MAP_READ); /* You can access raw memory at info.data */ if(app.output_callback) app.output_callback(info.data, info.size); //fwrite(info.data, 1, info.size, app.outfile); gst_memory_unmap(memory, &info); gst_memory_unref(memory); gst_sample_unref(sample); } return GST_FLOW_OK; }
static CMBlockBufferRef cm_block_buffer_from_gst_buffer (GstBuffer * buf, GstMapFlags flags) { OSStatus status; CMBlockBufferRef bbuf; CMBlockBufferCustomBlockSource blockSource; guint memcount, i; /* Initialize custom block source structure */ blockSource.version = kCMBlockBufferCustomBlockSourceVersion; blockSource.AllocateBlock = NULL; blockSource.FreeBlock = cm_block_buffer_freeblock; /* Determine number of memory blocks */ memcount = gst_buffer_n_memory (buf); status = CMBlockBufferCreateEmpty (NULL, memcount, 0, &bbuf); if (status != kCMBlockBufferNoErr) { GST_ERROR ("CMBlockBufferCreateEmpty returned %d", (int) status); return NULL; } /* Go over all GstMemory objects and add them to the CMBlockBuffer */ for (i = 0; i < memcount; ++i) { GstMemory *mem; GstMapInfo *info; mem = gst_buffer_get_memory (buf, i); info = g_slice_new (GstMapInfo); if (!gst_memory_map (mem, info, flags)) { GST_ERROR ("failed mapping memory"); g_slice_free (GstMapInfo, info); gst_memory_unref (mem); CFRelease (bbuf); return NULL; } blockSource.refCon = info; status = CMBlockBufferAppendMemoryBlock (bbuf, info->data, info->size, NULL, &blockSource, 0, info->size, 0); if (status != kCMBlockBufferNoErr) { GST_ERROR ("CMBlockBufferAppendMemoryBlock returned %d", (int) status); gst_memory_unmap (mem, info); g_slice_free (GstMapInfo, info); gst_memory_unref (mem); CFRelease (bbuf); return NULL; } } return bbuf; }
gboolean gst_vdp_video_memory_unmap (GstVideoMeta * meta, guint plane, GstMapInfo * info) { GstVdpVideoMemory *vmem = (GstVdpVideoMemory *) gst_buffer_get_memory (meta->buffer, 0); GST_DEBUG ("plane:%d", plane); GST_FIXME ("implement unmap (and potential upload on last unmap)"); release_data (vmem); return TRUE; }
static gboolean buffer_list_copy_data (GstBuffer ** buf, guint idx, gpointer data) { GstBuffer *dest = data; guint num, i; if (idx == 0) gst_buffer_copy_into (dest, *buf, GST_BUFFER_COPY_METADATA, 0, -1); num = gst_buffer_n_memory (*buf); for (i = 0; i < num; ++i) { GstMemory *mem; mem = gst_buffer_get_memory (*buf, i); gst_buffer_append_memory (dest, mem); } return TRUE; }
gboolean gst_vdp_video_memory_map (GstVideoMeta * meta, guint plane, GstMapInfo * info, gpointer * data, gint * stride, GstMapFlags flags) { GstBuffer *buffer = meta->buffer; GstVdpVideoMemory *vmem = (GstVdpVideoMemory *) gst_buffer_get_memory (buffer, 0); /* Only handle GstVdpVideoMemory */ g_return_val_if_fail (((GstMemory *) vmem)->allocator == _vdp_video_allocator, FALSE); GST_DEBUG ("plane:%d", plane); /* download if not already done */ if (!ensure_data (vmem)) return FALSE; *data = vmem->cached_data[plane]; *stride = vmem->destination_pitches[plane]; return TRUE; }
/* called with the object lock held */ static gboolean gst_gl_stereo_mix_process_frames (GstGLStereoMix * mixer) { GstVideoAggregator *vagg = GST_VIDEO_AGGREGATOR (mixer); GstBuffer *converted_buffer, *inbuf; GstVideoInfo *out_info = &vagg->info; #ifndef G_DISABLE_ASSERT gint n; #endif gint v, views; gint valid_views = 0; GList *walk; inbuf = gst_buffer_new (); walk = GST_ELEMENT (mixer)->sinkpads; while (walk) { GstGLStereoMixPad *pad = walk->data; GstMemory *in_mem; GST_LOG_OBJECT (mixer, "Handling frame %d", valid_views); if (!pad || !pad->current_buffer) { GST_DEBUG ("skipping texture, null frame"); walk = g_list_next (walk); continue; } in_mem = gst_buffer_get_memory (pad->current_buffer, 0); GST_LOG_OBJECT (mixer, "Appending memory %" GST_PTR_FORMAT " to intermediate buffer", in_mem); /* Appending the memory to a 2nd buffer locks it * exclusive a 2nd time, which will mark it for * copy-on-write. The ref will keep the memory * alive but we add a parent_buffer_meta to also * prevent the input buffer from returning to any buffer * pool it might belong to */ gst_buffer_append_memory (inbuf, in_mem); /* Use parent buffer meta to keep input buffer alive */ gst_buffer_add_parent_buffer_meta (inbuf, pad->current_buffer); valid_views++; walk = g_list_next (walk); } if (mixer->mix_info.views != valid_views) { GST_WARNING_OBJECT (mixer, "Not enough input views to process"); return FALSE; } if (GST_VIDEO_INFO_MULTIVIEW_MODE (out_info) == GST_VIDEO_MULTIVIEW_MODE_SEPARATED) views = out_info->views; else views = 1; if (gst_gl_view_convert_submit_input_buffer (mixer->viewconvert, FALSE, inbuf) != GST_FLOW_OK) return FALSE; /* Clear any existing buffers, just in case */ gst_buffer_replace (&mixer->primary_out, NULL); gst_buffer_replace (&mixer->auxilliary_out, NULL); if (gst_gl_view_convert_get_output (mixer->viewconvert, &mixer->primary_out) != GST_FLOW_OK) return FALSE; if (GST_VIDEO_INFO_MULTIVIEW_MODE (out_info) == GST_VIDEO_MULTIVIEW_MODE_FRAME_BY_FRAME) { if (gst_gl_view_convert_get_output (mixer->viewconvert, &mixer->auxilliary_out) != GST_FLOW_OK) return FALSE; } if (mixer->primary_out == NULL) return FALSE; converted_buffer = mixer->primary_out; #ifndef G_DISABLE_ASSERT n = gst_buffer_n_memory (converted_buffer); g_assert (n == GST_VIDEO_INFO_N_PLANES (out_info) * views); #endif for (v = 0; v < views; v++) { gst_buffer_add_video_meta_full (converted_buffer, v, GST_VIDEO_INFO_FORMAT (out_info), GST_VIDEO_INFO_WIDTH (out_info), GST_VIDEO_INFO_HEIGHT (out_info), GST_VIDEO_INFO_N_PLANES (out_info), out_info->offset, out_info->stride); if (mixer->auxilliary_out) { gst_buffer_add_video_meta_full (mixer->auxilliary_out, v, GST_VIDEO_INFO_FORMAT (out_info), GST_VIDEO_INFO_WIDTH (out_info), GST_VIDEO_INFO_HEIGHT (out_info), GST_VIDEO_INFO_N_PLANES (out_info), out_info->offset, out_info->stride); } } return TRUE; }
gboolean _gst_playbin_get_current_frame (GstElement *playbin, int video_fps_n, int video_fps_d, FrameReadyCallback cb, gpointer user_data) { ScreenshotData *data; GstCaps *to_caps; GstSample *sample; GstCaps *sample_caps; GstStructure *s; int outwidth; int outheight; data = g_new0 (ScreenshotData, 1); data->cb = cb; data->user_data = user_data; /* our desired output format (RGB24) */ to_caps = gst_caps_new_simple ("video/x-raw", "format", G_TYPE_STRING, "RGB", /* Note: we don't ask for a specific width/height here, so that * videoscale can adjust dimensions from a non-1/1 pixel aspect * ratio to a 1/1 pixel-aspect-ratio. We also don't ask for a * specific framerate, because the input framerate won't * necessarily match the output framerate if there's a deinterlacer * in the pipeline. */ "pixel-aspect-ratio", GST_TYPE_FRACTION, 1, 1, NULL); /* get frame */ sample = NULL; g_signal_emit_by_name (playbin, "convert-sample", to_caps, &sample); gst_caps_unref (to_caps); if (sample == NULL) { g_warning ("Could not take screenshot: %s", "failed to retrieve or convert video frame"); screenshot_data_finalize (data); return FALSE; } sample_caps = gst_sample_get_caps (sample); if (sample_caps == NULL) { g_warning ("Could not take screenshot: %s", "no caps on output buffer"); return FALSE; } s = gst_caps_get_structure (sample_caps, 0); gst_structure_get_int (s, "width", &outwidth); gst_structure_get_int (s, "height", &outheight); if ((outwidth > 0) && (outheight > 0)) { GstMemory *memory; GstMapInfo info; memory = gst_buffer_get_memory (gst_sample_get_buffer (sample), 0); gst_memory_map (memory, &info, GST_MAP_READ); data->pixbuf = gdk_pixbuf_new_from_data (info.data, GDK_COLORSPACE_RGB, FALSE, 8, outwidth, outheight, GST_ROUND_UP_4 (outwidth * 3), destroy_pixbuf, sample); gst_memory_unmap (memory, &info); } if (data->pixbuf == NULL) g_warning ("Could not take screenshot: %s", "could not create pixbuf"); screenshot_data_finalize (data); return TRUE; }
static GstFlowReturn stereosplit_chain (GstPad * pad, GstGLStereoSplit * split, GstBuffer * buf) { GstBuffer *uploaded_buffer, *converted_buffer, *left, *right; GstBuffer *split_buffer = NULL; GstFlowReturn ret; gint i, n_planes; if (!split->upload) _init_upload (split); n_planes = GST_VIDEO_INFO_N_PLANES (&split->viewconvert->out_info); GST_LOG_OBJECT (split, "chaining buffer %" GST_PTR_FORMAT, buf); if (GST_GL_UPLOAD_DONE != gst_gl_upload_perform_with_buffer (split->upload, buf, &uploaded_buffer)) { gst_buffer_unref (buf); GST_ELEMENT_ERROR (split, RESOURCE, NOT_FOUND, ("%s", "Failed to upload buffer"), (NULL)); return GST_FLOW_ERROR; } gst_buffer_unref (buf); if (!(converted_buffer = gst_gl_color_convert_perform (split->convert, uploaded_buffer))) { GST_ELEMENT_ERROR (split, RESOURCE, NOT_FOUND, ("%s", "Failed to convert buffer"), (NULL)); gst_buffer_unref (uploaded_buffer); return GST_FLOW_ERROR; } gst_buffer_unref (uploaded_buffer); if (gst_gl_view_convert_submit_input_buffer (split->viewconvert, GST_BUFFER_IS_DISCONT (converted_buffer), converted_buffer) != GST_FLOW_OK) { GST_ELEMENT_ERROR (split, RESOURCE, NOT_FOUND, ("%s", "Failed to 3d convert buffer"), ("Could not get submit input buffer")); return GST_FLOW_ERROR; } ret = gst_gl_view_convert_get_output (split->viewconvert, &split_buffer); if (ret != GST_FLOW_OK) { GST_ELEMENT_ERROR (split, RESOURCE, NOT_FOUND, ("%s", "Failed to 3d convert buffer"), ("Could not get output buffer")); return GST_FLOW_ERROR; } if (split_buffer == NULL) return GST_FLOW_OK; /* Need another input buffer */ left = gst_buffer_new (); gst_buffer_copy_into (left, buf, GST_BUFFER_COPY_FLAGS | GST_BUFFER_COPY_TIMESTAMPS, 0, -1); GST_BUFFER_FLAG_UNSET (left, GST_VIDEO_BUFFER_FLAG_FIRST_IN_BUNDLE); gst_buffer_add_parent_buffer_meta (left, split_buffer); for (i = 0; i < n_planes; i++) { GstMemory *mem = gst_buffer_get_memory (split_buffer, i); gst_buffer_append_memory (left, mem); } ret = gst_pad_push (split->left_pad, gst_buffer_ref (left)); /* Allow unlinked on the first pad - as long as the 2nd isn't unlinked */ gst_buffer_unref (left); if (G_UNLIKELY (ret != GST_FLOW_OK && ret != GST_FLOW_NOT_LINKED)) { gst_buffer_unref (split_buffer); return ret; } right = gst_buffer_new (); gst_buffer_copy_into (right, buf, GST_BUFFER_COPY_FLAGS | GST_BUFFER_COPY_TIMESTAMPS, 0, -1); GST_BUFFER_FLAG_UNSET (left, GST_VIDEO_BUFFER_FLAG_FIRST_IN_BUNDLE); gst_buffer_add_parent_buffer_meta (right, split_buffer); for (i = n_planes; i < n_planes * 2; i++) { GstMemory *mem = gst_buffer_get_memory (split_buffer, i); gst_buffer_append_memory (right, mem); } ret = gst_pad_push (split->right_pad, gst_buffer_ref (right)); gst_buffer_unref (right); gst_buffer_unref (split_buffer); return ret; }
static GstFlowReturn gst_multiudpsink_render (GstBaseSink * bsink, GstBuffer * buffer) { GstMultiUDPSink *sink; GList *clients; GOutputVector *vec; GstMapInfo *map; guint n_mem, i; gsize size; GstMemory *mem; gint num, no_clients; GError *err = NULL; sink = GST_MULTIUDPSINK (bsink); n_mem = gst_buffer_n_memory (buffer); if (n_mem == 0) goto no_data; /* allocated on the stack, the max number of memory blocks is limited so this * should not cause stack overflows */ vec = sink->vec; map = sink->map; size = 0; for (i = 0; i < n_mem; i++) { mem = gst_buffer_get_memory (buffer, i); gst_memory_map (mem, &map[i], GST_MAP_READ); vec[i].buffer = map[i].data; vec[i].size = map[i].size; size += map[i].size; } sink->bytes_to_serve += size; /* grab lock while iterating and sending to clients, this should be * fast as UDP never blocks */ g_mutex_lock (&sink->client_lock); GST_LOG_OBJECT (bsink, "about to send %" G_GSIZE_FORMAT " bytes in %u blocks", size, n_mem); no_clients = 0; num = 0; for (clients = sink->clients; clients; clients = g_list_next (clients)) { GstUDPClient *client; GSocket *socket; GSocketFamily family; gint count; client = (GstUDPClient *) clients->data; no_clients++; GST_LOG_OBJECT (sink, "sending %" G_GSIZE_FORMAT " bytes to client %p", size, client); family = g_socket_address_get_family (G_SOCKET_ADDRESS (client->addr)); /* Select socket to send from for this address */ if (family == G_SOCKET_FAMILY_IPV6 || !sink->used_socket) socket = sink->used_socket_v6; else socket = sink->used_socket; count = sink->send_duplicates ? client->refcount : 1; while (count--) { gssize ret; ret = g_socket_send_message (socket, client->addr, vec, n_mem, NULL, 0, 0, sink->cancellable, &err); if (G_UNLIKELY (ret < 0)) { if (g_error_matches (err, G_IO_ERROR, G_IO_ERROR_CANCELLED)) goto flushing; /* we continue after posting a warning, next packets might be ok * again */ if (size > UDP_MAX_SIZE) { GST_ELEMENT_WARNING (sink, RESOURCE, WRITE, ("Attempting to send a UDP packet larger than maximum size " "(%" G_GSIZE_FORMAT " > %d)", size, UDP_MAX_SIZE), ("Reason: %s", err ? err->message : "unknown reason")); } else { GST_ELEMENT_WARNING (sink, RESOURCE, WRITE, ("Error sending UDP packet"), ("Reason: %s", err ? err->message : "unknown reason")); } g_clear_error (&err); } else { num++; client->bytes_sent += ret; client->packets_sent++; sink->bytes_served += ret; } } } g_mutex_unlock (&sink->client_lock); /* unmap all memory again */ for (i = 0; i < n_mem; i++) { gst_memory_unmap (map[i].memory, &map[i]); gst_memory_unref (map[i].memory); } GST_LOG_OBJECT (sink, "sent %" G_GSIZE_FORMAT " bytes to %d (of %d) clients", size, num, no_clients); return GST_FLOW_OK; no_data: { return GST_FLOW_OK; } flushing: { GST_DEBUG ("we are flushing"); g_mutex_unlock (&sink->client_lock); g_clear_error (&err); /* unmap all memory */ for (i = 0; i < n_mem; i++) { gst_memory_unmap (map[i].memory, &map[i]); gst_memory_unref (map[i].memory); } return GST_FLOW_FLUSHING; } }
static GstFlowReturn gst_multiudpsink_render (GstBaseSink * bsink, GstBuffer * buffer) { GstMultiUDPSink *sink; GList *clients; GOutputVector *vec; GstMapInfo *map; guint n_mem, i; gsize size; GstMemory *mem; gint num, no_clients; GError *err = NULL; sink = GST_MULTIUDPSINK (bsink); n_mem = gst_buffer_n_memory (buffer); if (n_mem == 0) goto no_data; vec = g_new (GOutputVector, n_mem); map = g_new (GstMapInfo, n_mem); size = 0; for (i = 0; i < n_mem; i++) { mem = gst_buffer_get_memory (buffer, i); gst_memory_map (mem, &map[i], GST_MAP_READ); if (map[i].size > UDP_MAX_SIZE) { GST_WARNING ("Attempting to send a UDP packet larger than maximum " "size (%" G_GSIZE_FORMAT " > %d)", map[i].size, UDP_MAX_SIZE); } vec[i].buffer = map[i].data; vec[i].size = map[i].size; size += map[i].size; } sink->bytes_to_serve += size; /* grab lock while iterating and sending to clients, this should be * fast as UDP never blocks */ g_mutex_lock (&sink->client_lock); GST_LOG_OBJECT (bsink, "about to send %" G_GSIZE_FORMAT " bytes", size); no_clients = 0; num = 0; for (clients = sink->clients; clients; clients = g_list_next (clients)) { GstUDPClient *client; gint count; client = (GstUDPClient *) clients->data; no_clients++; GST_LOG_OBJECT (sink, "sending %" G_GSIZE_FORMAT " bytes to client %p", size, client); count = sink->send_duplicates ? client->refcount : 1; while (count--) { gssize ret; ret = g_socket_send_message (sink->used_socket, client->addr, vec, n_mem, NULL, 0, 0, sink->cancellable, &err); if (ret < 0) goto send_error; num++; client->bytes_sent += ret; client->packets_sent++; sink->bytes_served += ret; } } g_mutex_unlock (&sink->client_lock); /* unmap all memory again */ for (i = 0; i < n_mem; i++) { gst_memory_unmap (map[i].memory, &map[i]); gst_memory_unref (map[i].memory); } g_free (vec); g_free (map); GST_LOG_OBJECT (sink, "sent %" G_GSIZE_FORMAT " bytes to %d (of %d) clients", size, num, no_clients); return GST_FLOW_OK; no_data: { return GST_FLOW_OK; } send_error: { g_mutex_unlock (&sink->client_lock); GST_DEBUG ("got send error %s", err->message); g_clear_error (&err); return GST_FLOW_ERROR; } }
static gboolean gst_imx_phys_meta_transform(GstBuffer *dest, GstMeta *meta, GstBuffer *buffer, GQuark type, gpointer data) { GstImxPhysMemMeta *dmeta, *smeta; smeta = (GstImxPhysMemMeta *)meta; if (GST_META_TRANSFORM_IS_COPY(type)) { GstMetaTransformCopy *copy = data; gboolean do_copy = FALSE; if (!(copy->region)) { GST_LOG("not copying metadata: only a region is being copied (not the entire block)"); } else { guint n_mem_buffer, n_mem_dest; n_mem_buffer = gst_buffer_n_memory(buffer); n_mem_dest = gst_buffer_n_memory(dest); /* only copy if both buffers have 1 identical memory */ if ((n_mem_buffer == n_mem_dest) && (n_mem_dest == 1)) { GstMemory *mem1, *mem2; mem1 = gst_buffer_get_memory(dest, 0); mem2 = gst_buffer_get_memory(buffer, 0); if (mem1 == mem2) { GST_LOG("copying physmem metadata: memory blocks identical"); do_copy = TRUE; } else GST_LOG("not copying physmem metadata: memory blocks not identical"); gst_memory_unref(mem1); gst_memory_unref(mem2); } else GST_LOG("not copying physmem metadata: num memory blocks in source/dest: %u/%u", n_mem_buffer, n_mem_dest); } if (do_copy) { /* only copy if the complete data is copied as well */ dmeta = (GstImxPhysMemMeta *)gst_buffer_add_meta(dest, gst_imx_phys_mem_meta_get_info(), NULL); if (!dmeta) { GST_ERROR("could not add physmem metadata to the dest buffer"); return FALSE; } dmeta->phys_addr = smeta->phys_addr; dmeta->x_padding = smeta->x_padding; dmeta->y_padding = smeta->y_padding; if (smeta->parent) dmeta->parent = gst_buffer_ref(smeta->parent); else dmeta->parent = gst_buffer_ref(buffer); } } return TRUE; }
/* Called by the idle function in the gl thread */ void _do_convert (GstGLContext * context, GstGLColorConvert * convert) { guint in_width, in_height, out_width, out_height; struct ConvertInfo *c_info = &convert->priv->convert_info; GstMapInfo out_info[GST_VIDEO_MAX_PLANES], in_info[GST_VIDEO_MAX_PLANES]; gboolean res = TRUE; gint i, j = 0; out_width = GST_VIDEO_INFO_WIDTH (&convert->out_info); out_height = GST_VIDEO_INFO_HEIGHT (&convert->out_info); in_width = GST_VIDEO_INFO_WIDTH (&convert->in_info); in_height = GST_VIDEO_INFO_HEIGHT (&convert->in_info); convert->outbuf = NULL; if (!_init_convert (convert)) { convert->priv->result = FALSE; return; } convert->outbuf = gst_buffer_new (); if (!gst_gl_memory_setup_buffer (convert->context, &convert->out_info, convert->outbuf)) { convert->priv->result = FALSE; return; } gst_buffer_add_video_meta_full (convert->outbuf, 0, GST_VIDEO_INFO_FORMAT (&convert->out_info), GST_VIDEO_INFO_WIDTH (&convert->out_info), GST_VIDEO_INFO_HEIGHT (&convert->out_info), GST_VIDEO_INFO_N_PLANES (&convert->out_info), convert->out_info.offset, convert->out_info.stride); for (i = 0; i < c_info->in_n_textures; i++) { convert->priv->in_tex[i] = (GstGLMemory *) gst_buffer_peek_memory (convert->inbuf, i); if (!gst_is_gl_memory ((GstMemory *) convert->priv->in_tex[i])) { GST_ERROR_OBJECT (convert, "input must be GstGLMemory"); res = FALSE; goto out; } if (!gst_memory_map ((GstMemory *) convert->priv->in_tex[i], &in_info[i], GST_MAP_READ | GST_MAP_GL)) { GST_ERROR_OBJECT (convert, "failed to map input memory %p", convert->priv->in_tex[i]); res = FALSE; goto out; } } for (j = 0; j < c_info->out_n_textures; j++) { GstGLMemory *out_tex = (GstGLMemory *) gst_buffer_peek_memory (convert->outbuf, j); gint mem_width, mem_height; if (!gst_is_gl_memory ((GstMemory *) out_tex)) { GST_ERROR_OBJECT (convert, "output must be GstGLMemory"); res = FALSE; goto out; } mem_width = gst_gl_memory_get_texture_width (out_tex); mem_height = gst_gl_memory_get_texture_height (out_tex); if (out_tex->tex_type == GST_VIDEO_GL_TEXTURE_TYPE_LUMINANCE || out_tex->tex_type == GST_VIDEO_GL_TEXTURE_TYPE_LUMINANCE_ALPHA || out_width != mem_width || out_height != mem_height) { /* Luminance formats are not color renderable */ /* renderering to a framebuffer only renders the intersection of all * the attachments i.e. the smallest attachment size */ GstVideoInfo temp_info; gst_video_info_set_format (&temp_info, GST_VIDEO_FORMAT_RGBA, out_width, out_height); if (!convert->priv->out_tex[j]) convert->priv->out_tex[j] = (GstGLMemory *) gst_gl_memory_alloc (context, &temp_info, 0); } else { convert->priv->out_tex[j] = out_tex; } if (!gst_memory_map ((GstMemory *) convert->priv->out_tex[j], &out_info[j], GST_MAP_WRITE | GST_MAP_GL)) { GST_ERROR_OBJECT (convert, "failed to map output memory %p", convert->priv->out_tex[i]); res = FALSE; goto out; } } GST_LOG_OBJECT (convert, "converting to textures:%p,%p,%p,%p " "dimensions:%ux%u, from textures:%p,%p,%p,%p dimensions:%ux%u", convert->priv->out_tex[0], convert->priv->out_tex[1], convert->priv->out_tex[2], convert->priv->out_tex[3], out_width, out_height, convert->priv->in_tex[0], convert->priv->in_tex[1], convert->priv->in_tex[2], convert->priv->in_tex[3], in_width, in_height); if (!_do_convert_draw (context, convert)) res = FALSE; out: for (j--; j >= 0; j--) { GstGLMemory *out_tex = (GstGLMemory *) gst_buffer_peek_memory (convert->outbuf, j); gint mem_width, mem_height; gst_memory_unmap ((GstMemory *) convert->priv->out_tex[j], &out_info[j]); mem_width = gst_gl_memory_get_texture_width (out_tex); mem_height = gst_gl_memory_get_texture_height (out_tex); if (out_tex->tex_type == GST_VIDEO_GL_TEXTURE_TYPE_LUMINANCE || out_tex->tex_type == GST_VIDEO_GL_TEXTURE_TYPE_LUMINANCE_ALPHA || out_width != mem_width || out_height != mem_height) { GstMapInfo to_info, from_info; if (!gst_memory_map ((GstMemory *) convert->priv->out_tex[j], &from_info, GST_MAP_READ | GST_MAP_GL)) { gst_gl_context_set_error (convert->context, "Failed to map " "intermediate memory"); res = FALSE; continue; } if (!gst_memory_map ((GstMemory *) out_tex, &to_info, GST_MAP_WRITE | GST_MAP_GL)) { gst_gl_context_set_error (convert->context, "Failed to map " "intermediate memory"); res = FALSE; continue; } gst_gl_memory_copy_into_texture (convert->priv->out_tex[j], out_tex->tex_id, out_tex->tex_type, mem_width, mem_height, GST_VIDEO_INFO_PLANE_STRIDE (&out_tex->info, out_tex->plane), FALSE); gst_memory_unmap ((GstMemory *) convert->priv->out_tex[j], &from_info); gst_memory_unmap ((GstMemory *) out_tex, &to_info); } else { convert->priv->out_tex[j] = NULL; } } /* YV12 the same as I420 except planes 1+2 swapped */ if (GST_VIDEO_INFO_FORMAT (&convert->out_info) == GST_VIDEO_FORMAT_YV12) { GstMemory *mem1 = gst_buffer_get_memory (convert->outbuf, 1); GstMemory *mem2 = gst_buffer_get_memory (convert->outbuf, 2); gst_buffer_replace_memory (convert->outbuf, 1, mem2); gst_buffer_replace_memory (convert->outbuf, 2, mem1); } for (i--; i >= 0; i--) { gst_memory_unmap ((GstMemory *) convert->priv->in_tex[i], &in_info[i]); } if (!res) { gst_buffer_unref (convert->outbuf); convert->outbuf = NULL; } convert->priv->result = res; return; }
GdkPixbuf * xplayer_gst_playbin_get_frame (GstElement *play) { GstStructure *s; GstSample *sample = NULL; GdkPixbuf *pixbuf = NULL; GstCaps *to_caps, *sample_caps; gint outwidth = 0; gint outheight = 0; GstMemory *memory; GstMapInfo info; GdkPixbufRotation rotation = GDK_PIXBUF_ROTATE_NONE; g_return_val_if_fail (play != NULL, NULL); g_return_val_if_fail (GST_IS_ELEMENT (play), NULL); /* our desired output format (RGB24) */ to_caps = gst_caps_new_simple ("video/x-raw", "format", G_TYPE_STRING, "RGB", /* Note: we don't ask for a specific width/height here, so that * videoscale can adjust dimensions from a non-1/1 pixel aspect * ratio to a 1/1 pixel-aspect-ratio. We also don't ask for a * specific framerate, because the input framerate won't * necessarily match the output framerate if there's a deinterlacer * in the pipeline. */ "pixel-aspect-ratio", GST_TYPE_FRACTION, 1, 1, NULL); /* get frame */ g_signal_emit_by_name (play, "convert-sample", to_caps, &sample); gst_caps_unref (to_caps); if (!sample) { GST_DEBUG ("Could not take screenshot: %s", "failed to retrieve or convert video frame"); g_warning ("Could not take screenshot: %s", "failed to retrieve or convert video frame"); return NULL; } sample_caps = gst_sample_get_caps (sample); if (!sample_caps) { GST_DEBUG ("Could not take screenshot: %s", "no caps on output buffer"); g_warning ("Could not take screenshot: %s", "no caps on output buffer"); return NULL; } GST_DEBUG ("frame caps: %" GST_PTR_FORMAT, sample_caps); s = gst_caps_get_structure (sample_caps, 0); gst_structure_get_int (s, "width", &outwidth); gst_structure_get_int (s, "height", &outheight); if (outwidth <= 0 || outheight <= 0) goto done; memory = gst_buffer_get_memory (gst_sample_get_buffer (sample), 0); gst_memory_map (memory, &info, GST_MAP_READ); /* create pixbuf from that - use our own destroy function */ pixbuf = gdk_pixbuf_new_from_data (info.data, GDK_COLORSPACE_RGB, FALSE, 8, outwidth, outheight, GST_ROUND_UP_4 (outwidth * 3), destroy_pixbuf, sample); gst_memory_unmap (memory, &info); done: if (!pixbuf) { GST_DEBUG ("Could not take screenshot: %s", "could not create pixbuf"); g_warning ("Could not take screenshot: %s", "could not create pixbuf"); gst_sample_unref (sample); } /* Did we check whether we need to rotate the video? */ if (g_object_get_data (G_OBJECT (play), "orientation-checked") == NULL) { GstTagList *tags = NULL; g_signal_emit_by_name (G_OBJECT (play), "get-video-tags", 0, &tags); if (tags) { char *orientation_str; gboolean ret; ret = gst_tag_list_get_string_index (tags, GST_TAG_IMAGE_ORIENTATION, 0, &orientation_str); if (!ret || !orientation_str) rotation = GDK_PIXBUF_ROTATE_NONE; else if (g_str_equal (orientation_str, "rotate-90")) rotation = GDK_PIXBUF_ROTATE_CLOCKWISE; else if (g_str_equal (orientation_str, "rotate-180")) rotation = GDK_PIXBUF_ROTATE_UPSIDEDOWN; else if (g_str_equal (orientation_str, "rotate-270")) rotation = GDK_PIXBUF_ROTATE_COUNTERCLOCKWISE; gst_tag_list_unref (tags); } g_object_set_data (G_OBJECT (play), "orientation-checked", GINT_TO_POINTER(1)); g_object_set_data (G_OBJECT (play), "orientation", GINT_TO_POINTER(rotation)); } rotation = GPOINTER_TO_INT (g_object_get_data (G_OBJECT (play), "orientation")); if (rotation != GDK_PIXBUF_ROTATE_NONE) { GdkPixbuf *rotated; rotated = gdk_pixbuf_rotate_simple (pixbuf, rotation); if (rotated) { g_object_unref (pixbuf); pixbuf = rotated; } } return pixbuf; }
static GdkPixbuf * gst_thumbnailer_capture_frame (GstElement *play, gint width) { GstCaps *to_caps; GstSample *sample = NULL; GdkPixbuf *pixbuf = NULL; GstStructure *s; GstCaps *sample_caps; gint outwidth = 0, outheight = 0; GstMemory *memory; GstMapInfo info; /* desired output format (RGB24) */ to_caps = gst_caps_new_simple ("video/x-raw", "format", G_TYPE_STRING, "RGB", "pixel-aspect-ratio", GST_TYPE_FRACTION, 1, 1, "width", G_TYPE_INT, width, NULL); /* get the frame */ g_signal_emit_by_name (play, "convert-sample", to_caps, &sample); gst_caps_unref (to_caps); if (sample == NULL) return NULL; sample_caps = gst_sample_get_caps (sample); if (sample_caps == NULL) { /* no caps on output buffer */ gst_sample_unref (sample); return NULL; } /* size of the frame */ s = gst_caps_get_structure (sample_caps, 0); gst_structure_get_int (s, "width", &outwidth); gst_structure_get_int (s, "height", &outheight); if (outwidth <= 0 || outheight <= 0) { /* invalid size */ gst_sample_unref (sample); return NULL; } /* get the memory block of the buffer */ memory = gst_buffer_get_memory (gst_sample_get_buffer (sample), 0); if (gst_memory_map (memory, &info, GST_MAP_READ)) { /* create pixmap for the data */ pixbuf = gdk_pixbuf_new_from_data (info.data, GDK_COLORSPACE_RGB, FALSE, 8, outwidth, outheight, GST_ROUND_UP_4 (width * 3), gst_thumbnailer_destroy_pixbuf, sample); /* release memory */ gst_memory_unmap (memory, &info); } gst_memory_unref (memory); /* release sample if pixbuf failed */ if (pixbuf == NULL) gst_sample_unref (sample); return pixbuf; }