static CMSampleBufferRef gst_vtdec_sample_buffer_from (GstVTDec * self, GstBuffer * buf) { GstCMApi *cm = self->ctx->cm; OSStatus status; CMBlockBufferRef bbuf = NULL; CMSampleBufferRef sbuf = NULL; GstMapInfo map; g_assert (self->fmt_desc != NULL); gst_buffer_map (buf, &map, GST_MAP_READ); status = cm->CMBlockBufferCreateWithMemoryBlock (NULL, map.data, (gint64) map.size, kCFAllocatorNull, NULL, 0, (gint64) map.size, FALSE, &bbuf); gst_buffer_unmap (buf, &map); if (status != noErr) goto error; status = cm->CMSampleBufferCreate (NULL, bbuf, TRUE, 0, 0, self->fmt_desc, 1, 0, NULL, 0, NULL, &sbuf); if (status != noErr) goto error; beach: cm->FigBlockBufferRelease (bbuf); return sbuf; error: GST_ERROR_OBJECT (self, "err %d", status); goto beach; }
static GstFlowReturn gst_mio_video_src_create (GstPushSrc * pushsrc, GstBuffer ** buf) { GstMIOVideoSrc *self = GST_MIO_VIDEO_SRC_CAST (pushsrc); GstCMApi *cm = self->ctx->cm; CMFormatDescriptionRef format; FRAME_QUEUE_LOCK (self); while (self->running && g_queue_is_empty (self->queue)) FRAME_QUEUE_WAIT (self); *buf = g_queue_pop_tail (self->queue); FRAME_QUEUE_UNLOCK (self); if (G_UNLIKELY (!self->running)) goto shutting_down; format = cm->CMSampleBufferGetFormatDescription (GST_CORE_MEDIA_BUFFER (*buf)->sample_buf); if (self->prev_format != NULL && !cm->CMFormatDescriptionEqual (format, self->prev_format)) { goto unexpected_format; } cm->FigFormatDescriptionRelease (self->prev_format); self->prev_format = cm->FigFormatDescriptionRetain (format); if (self->prev_offset == GST_BUFFER_OFFSET_NONE || GST_BUFFER_OFFSET (*buf) - self->prev_offset != 1) { GST_BUFFER_FLAG_SET (*buf, GST_BUFFER_FLAG_DISCONT); } self->prev_offset = GST_BUFFER_OFFSET (*buf); return GST_FLOW_OK; /* ERRORS */ shutting_down: { if (*buf != NULL) { gst_buffer_unref (*buf); *buf = NULL; } return GST_FLOW_FLUSHING; } unexpected_format: { GST_ELEMENT_ERROR (self, RESOURCE, READ, ("capture format changed unexpectedly"), ("another application likely reconfigured the device")); if (*buf != NULL) { gst_buffer_unref (*buf); *buf = NULL; } return GST_FLOW_ERROR; } }
static gboolean gst_vtenc_negotiate_downstream (GstVTEnc * self, CMSampleBufferRef sbuf) { gboolean result; GstCMApi *cm = self->ctx->cm; GstCaps *caps; GstStructure *s; if (self->caps_width == self->negotiated_width && self->caps_height == self->negotiated_height && self->caps_fps_n == self->negotiated_fps_n && self->caps_fps_d == self->negotiated_fps_d) { return TRUE; } caps = gst_caps_copy (gst_pad_get_pad_template_caps (self->srcpad)); s = gst_caps_get_structure (caps, 0); gst_structure_set (s, "width", G_TYPE_INT, self->negotiated_width, "height", G_TYPE_INT, self->negotiated_height, "framerate", GST_TYPE_FRACTION, self->negotiated_fps_n, self->negotiated_fps_d, NULL); if (self->details->format_id == kVTFormatH264) { CMFormatDescriptionRef fmt; CFDictionaryRef atoms; CFStringRef avccKey; CFDataRef avcc; GstBuffer *codec_data; fmt = cm->CMSampleBufferGetFormatDescription (sbuf); atoms = cm->CMFormatDescriptionGetExtension (fmt, *(cm->kCMFormatDescriptionExtension_SampleDescriptionExtensionAtoms)); avccKey = CFStringCreateWithCString (NULL, "avcC", kCFStringEncodingUTF8); avcc = CFDictionaryGetValue (atoms, avccKey); CFRelease (avccKey); codec_data = gst_buffer_new_and_alloc (CFDataGetLength (avcc)); CFDataGetBytes (avcc, CFRangeMake (0, CFDataGetLength (avcc)), GST_BUFFER_DATA (codec_data)); gst_structure_set (s, "codec_data", GST_TYPE_BUFFER, codec_data, NULL); gst_buffer_unref (codec_data); } result = gst_pad_set_caps (self->srcpad, caps); gst_caps_unref (caps); self->caps_width = self->negotiated_width; self->caps_height = self->negotiated_height; self->caps_fps_n = self->negotiated_fps_n; self->caps_fps_d = self->negotiated_fps_d; return result; }
static gboolean gst_cel_video_src_parse_stream_format (GstCelVideoSrc * self, guint index, CFDictionaryRef stream_format, GstCelVideoFormat * format) { GstCMApi *cm = self->ctx->cm; GstMTApi *mt = self->ctx->mt; CMFormatDescriptionRef desc; CMVideoDimensions dim; UInt32 subtype; CFNumberRef framerate_value; SInt32 fps_n; format->index = index; desc = CFDictionaryGetValue (stream_format, *(mt->kFigSupportedFormat_FormatDescription)); dim = cm->CMVideoFormatDescriptionGetDimensions (desc); format->width = dim.width; format->height = dim.height; subtype = cm->CMFormatDescriptionGetMediaSubType (desc); switch (subtype) { case kComponentVideoUnsigned: format->video_format = GST_VIDEO_FORMAT_YUY2; format->fourcc = GST_MAKE_FOURCC ('Y', 'U', 'Y', '2'); break; case kYUV420vCodecType: format->video_format = GST_VIDEO_FORMAT_NV12; format->fourcc = GST_MAKE_FOURCC ('N', 'V', '1', '2'); break; default: goto unsupported_format; } framerate_value = CFDictionaryGetValue (stream_format, *(mt->kFigSupportedFormat_VideoMaxFrameRate)); CFNumberGetValue (framerate_value, kCFNumberSInt32Type, &fps_n); format->fps_n = fps_n; format->fps_d = 1; return TRUE; unsupported_format: return FALSE; }
static void gst_mio_video_device_formats_foreach (GstMIOVideoDevice * self, GstMIOVideoDeviceEachFormatFunc func, gpointer user_data) { GstCMApi *cm = self->ctx->cm; GstMIOApi *mio = self->ctx->mio; TundraTargetSpec spec = { 0, }; GArray *streams; guint stream_idx; spec.name = kTundraDevicePropertyStreams; spec.scope = kTundraScopeInput; streams = gst_mio_object_get_array (self->handle, &spec, sizeof (TundraObjectID), mio); /* TODO: We only consider the first stream for now */ for (stream_idx = 0; stream_idx != MIN (streams->len, 1); stream_idx++) { TundraObjectID stream; CFArrayRef formats; CFIndex num_formats, fmt_idx; stream = g_array_index (streams, TundraObjectID, stream_idx); spec.name = kTundraStreamPropertyFormatDescriptions; spec.scope = kTundraScopeInput; formats = gst_mio_object_get_pointer (stream, &spec, mio); num_formats = CFArrayGetCount (formats); for (fmt_idx = 0; fmt_idx != num_formats; fmt_idx++) { GstMIOVideoFormat fmt; fmt.stream = stream; fmt.desc = (CMFormatDescriptionRef) CFArrayGetValueAtIndex (formats, fmt_idx); if (cm->CMFormatDescriptionGetMediaType (fmt.desc) != kFigMediaTypeVideo) continue; fmt.type = cm->CMFormatDescriptionGetMediaSubType (fmt.desc); fmt.dim = cm->CMVideoFormatDescriptionGetDimensions (fmt.desc); func (self, &fmt, user_data); } } g_array_free (streams, TRUE); }
static GstFlowReturn gst_cel_video_src_create (GstPushSrc * pushsrc, GstBuffer ** buf) { GstCelVideoSrc *self = GST_CEL_VIDEO_SRC_CAST (pushsrc); GstCMApi *cm = self->ctx->cm; CMSampleBufferRef sbuf; sbuf = cm->CMBufferQueueDequeueAndRetain (self->queue); while (sbuf == NULL) { QUEUE_READY_LOCK (self); while (!self->queue_is_ready && g_atomic_int_get (&self->is_running)) QUEUE_READY_WAIT (self); self->queue_is_ready = FALSE; QUEUE_READY_UNLOCK (self); if (G_UNLIKELY (!g_atomic_int_get (&self->is_running))) goto shutting_down; sbuf = cm->CMBufferQueueDequeueAndRetain (self->queue); } if (G_UNLIKELY (!g_atomic_int_get (&self->is_running))) goto shutting_down; *buf = gst_core_media_buffer_new (self->ctx, sbuf); gst_cel_video_src_timestamp_buffer (self, *buf); cm->FigSampleBufferRelease (sbuf); if (self->do_stats) gst_cel_video_src_update_statistics (self); return GST_FLOW_OK; /* ERRORS */ shutting_down: { cm->FigSampleBufferRelease (sbuf); return GST_FLOW_FLUSHING; } }
GstBuffer * gst_core_media_buffer_new (GstCoreMediaCtx * ctx, CMSampleBufferRef sample_buf) { GstCVApi *cv = ctx->cv; GstCMApi *cm = ctx->cm; CVImageBufferRef image_buf; CVPixelBufferRef pixel_buf; CMBlockBufferRef block_buf; Byte *data = NULL; UInt32 size; OSStatus status; GstBuffer *buf; GstCoreMediaMeta *meta; image_buf = cm->CMSampleBufferGetImageBuffer (sample_buf); pixel_buf = NULL; block_buf = cm->CMSampleBufferGetDataBuffer (sample_buf); if (image_buf != NULL && CFGetTypeID (image_buf) == cv->CVPixelBufferGetTypeID ()) { pixel_buf = (CVPixelBufferRef) image_buf; if (cv->CVPixelBufferLockBaseAddress (pixel_buf, kCVPixelBufferLock_ReadOnly) != kCVReturnSuccess) { goto error; } if (cv->CVPixelBufferIsPlanar (pixel_buf)) { gint plane_count, plane_idx; data = cv->CVPixelBufferGetBaseAddressOfPlane (pixel_buf, 0); size = 0; plane_count = cv->CVPixelBufferGetPlaneCount (pixel_buf); for (plane_idx = 0; plane_idx != plane_count; plane_idx++) { size += cv->CVPixelBufferGetBytesPerRowOfPlane (pixel_buf, plane_idx) * cv->CVPixelBufferGetHeightOfPlane (pixel_buf, plane_idx); } } else { data = cv->CVPixelBufferGetBaseAddress (pixel_buf); size = cv->CVPixelBufferGetBytesPerRow (pixel_buf) * cv->CVPixelBufferGetHeight (pixel_buf); } } else if (block_buf != NULL) { status = cm->CMBlockBufferGetDataPointer (block_buf, 0, 0, 0, &data); if (status != noErr) goto error; size = cm->CMBlockBufferGetDataLength (block_buf); } else { goto error; } buf = gst_buffer_new (); meta = (GstCoreMediaMeta *) gst_buffer_add_meta (buf, gst_core_media_meta_get_info (), NULL); meta->ctx = g_object_ref (ctx); meta->sample_buf = cm->FigSampleBufferRetain (sample_buf); meta->image_buf = image_buf; meta->pixel_buf = pixel_buf; meta->block_buf = block_buf; gst_buffer_append_memory (buf, gst_memory_new_wrapped (GST_MEMORY_FLAG_NO_SHARE, data, size, 0, size, NULL, NULL)); return buf; error: return NULL; }
static gboolean gst_cel_video_src_open_device (GstCelVideoSrc * self) { GstCoreMediaCtx *ctx = NULL; GError *error = NULL; GstCMApi *cm = NULL; GstMTApi *mt = NULL; GstCelApi *cel = NULL; OSStatus status; FigCaptureDeviceRef device = NULL; FigBaseObjectRef device_base; FigBaseVTable *device_vt; CFArrayRef stream_array = NULL; CFIndex stream_index; FigCaptureStreamRef stream = NULL; FigBaseObjectRef stream_base; FigBaseVTable *stream_vt; CMBufferQueueRef queue = NULL; CMTime ignored_time; ctx = gst_core_media_ctx_new (GST_API_CORE_VIDEO | GST_API_CORE_MEDIA | GST_API_MEDIA_TOOLBOX | GST_API_CELESTIAL, &error); if (error != NULL) goto api_error; cm = ctx->cm; mt = ctx->mt; cel = ctx->cel; status = cel->FigCreateCaptureDevicesAndStreamsForPreset (NULL, *(cel->kFigRecorderCapturePreset_VideoRecording), NULL, &device, &stream, NULL, NULL); if (status == kCelError_ResourceBusy) goto device_busy; else if (status != noErr) goto unexpected_error; device_base = mt->FigCaptureDeviceGetFigBaseObject (device); device_vt = cm->FigBaseObjectGetVTable (device_base); status = device_vt->base->CopyProperty (device_base, *(mt->kFigCaptureDeviceProperty_StreamArray), NULL, (CFTypeRef *) & stream_array); if (status != noErr) goto unexpected_error; if (self->device_index >= 0) stream_index = self->device_index; else stream_index = 0; if (stream_index >= CFArrayGetCount (stream_array)) goto invalid_device_index; CFRelease (stream); stream = (FigCaptureStreamRef) CFArrayGetValueAtIndex (stream_array, stream_index); CFRetain (stream); stream_base = mt->FigCaptureStreamGetFigBaseObject (stream); stream_vt = cm->FigBaseObjectGetVTable (stream_base); status = stream_vt->base->CopyProperty (stream_base, *(mt->kFigCaptureStreamProperty_BufferQueue), NULL, &queue); if (status != noErr) goto unexpected_error; self->queue_is_ready = FALSE; ignored_time = cm->CMTimeMake (1, 1); status = cm->CMBufferQueueInstallTrigger (queue, gst_cel_video_src_on_queue_ready, self, kCMBufferQueueTrigger_WhenDataBecomesReady, ignored_time, &self->ready_trigger); if (status != noErr) goto unexpected_error; self->ctx = ctx; self->device = device; self->device_iface = device_vt->derived; self->device_base = device_base; self->device_base_iface = device_vt->base; self->stream = stream; self->stream_iface = stream_vt->derived; self->stream_base = stream_base; self->stream_base_iface = stream_vt->base; self->queue = queue; self->duration = GST_CLOCK_TIME_NONE; CFRelease (stream_array); return TRUE; /* ERRORS */ api_error: { GST_ELEMENT_ERROR (self, RESOURCE, FAILED, ("API error"), ("%s", error->message)); g_clear_error (&error); goto any_error; } device_busy: { GST_ELEMENT_ERROR (self, RESOURCE, BUSY, ("device is already in use"), (NULL)); goto any_error; } invalid_device_index: { GST_ELEMENT_ERROR (self, RESOURCE, NOT_FOUND, ("invalid video capture device index"), (NULL)); goto any_error; } unexpected_error: { GST_ELEMENT_ERROR (self, RESOURCE, FAILED, ("unexpected error while opening device (%d)", (gint) status), (NULL)); goto any_error; } any_error: { if (stream != NULL) CFRelease (stream); if (stream_array != NULL) CFRelease (stream_array); if (device != NULL) CFRelease (device); if (ctx != NULL) { cm->FigBufferQueueRelease (queue); g_object_unref (ctx); } return FALSE; } }
void gst_mio_video_device_print_debug_info (GstMIOVideoDevice * self) { GstCMApi *cm = self->ctx->cm; GstMIOApi *mio = self->ctx->mio; TundraTargetSpec spec = { 0, }; gchar *str; GArray *streams; guint stream_idx; g_print ("Device %p with handle %d\n", self, self->handle); spec.scope = kTundraScopeGlobal; spec.name = kTundraObjectPropertyClass; str = gst_mio_object_get_fourcc (self->handle, &spec, mio); g_print (" Class: '%s'\n", str); g_free (str); spec.name = kTundraObjectPropertyCreator; str = gst_mio_object_get_string (self->handle, &spec, mio); g_print (" Creator: \"%s\"\n", str); g_free (str); spec.name = kTundraDevicePropertyModelUID; str = gst_mio_object_get_string (self->handle, &spec, mio); g_print (" Model UID: \"%s\"\n", str); g_free (str); spec.name = kTundraDevicePropertyTransportType; str = gst_mio_object_get_fourcc (self->handle, &spec, mio); g_print (" Transport Type: '%s'\n", str); g_free (str); g_print (" Streams:\n"); spec.name = kTundraDevicePropertyStreams; spec.scope = kTundraScopeInput; streams = gst_mio_object_get_array (self->handle, &spec, sizeof (TundraObjectID), mio); for (stream_idx = 0; stream_idx != streams->len; stream_idx++) { TundraObjectID stream; CFArrayRef formats; CFIndex num_formats, fmt_idx; stream = g_array_index (streams, TundraObjectID, stream_idx); g_print (" stream[%u] = %d\n", stream_idx, stream); spec.scope = kTundraScopeInput; spec.name = kTundraStreamPropertyFormatDescriptions; formats = gst_mio_object_get_pointer (stream, &spec, mio); num_formats = CFArrayGetCount (formats); g_print (" <%u formats>\n", (guint) num_formats); for (fmt_idx = 0; fmt_idx != num_formats; fmt_idx++) { CMFormatDescriptionRef fmt; gchar *media_type; gchar *media_sub_type; CMVideoDimensions dim; GArray *rates; guint rate_idx; fmt = CFArrayGetValueAtIndex (formats, fmt_idx); media_type = gst_mio_fourcc_to_string (cm->CMFormatDescriptionGetMediaType (fmt)); media_sub_type = gst_mio_fourcc_to_string (cm->CMFormatDescriptionGetMediaSubType (fmt)); dim = cm->CMVideoFormatDescriptionGetDimensions (fmt); g_print (" format[%u]: MediaType='%s' MediaSubType='%s' %ux%u\n", (guint) fmt_idx, media_type, media_sub_type, (guint) dim.width, (guint) dim.height); spec.name = kTundraStreamPropertyFrameRates; rates = gst_mio_object_get_array_full (stream, &spec, sizeof (fmt), &fmt, sizeof (TundraFramerate), mio); for (rate_idx = 0; rate_idx != rates->len; rate_idx++) { TundraFramerate *rate; rate = &g_array_index (rates, TundraFramerate, rate_idx); g_print (" %f\n", rate->value); } g_array_free (rates, TRUE); g_free (media_sub_type); g_free (media_type); } } g_array_free (streams, TRUE); }