CrowdDetectorFilterImpl::CrowdDetectorFilterImpl ( const std::vector<std::shared_ptr<RegionOfInterest>> &rois, std::shared_ptr< MediaObjectImpl > parent) : FilterImpl (parent) { GstBus *bus; std::shared_ptr<MediaPipelineImpl> pipe; GstStructure *roisStructure; pipe = std::dynamic_pointer_cast<MediaPipelineImpl> (getMediaPipeline() ); g_object_set (element, "filter-factory", "crowddetector", NULL); bus = gst_pipeline_get_bus (GST_PIPELINE (pipe->getPipeline() ) ); g_object_get (G_OBJECT (element), "filter", &crowdDetector, NULL); if (crowdDetector == NULL) { g_object_unref (bus); throw KurentoException (MEDIA_OBJECT_NOT_AVAILABLE, "Media Object not available"); } roisStructure = gst_structure_new_empty ("Rois"); for (auto roi : rois) { GstStructure *roiStructureAux = get_structure_from_roi (roi); gst_structure_set (roisStructure, roi->getId().c_str(), GST_TYPE_STRUCTURE, roiStructureAux, NULL); gst_structure_free (roiStructureAux); } g_object_set (G_OBJECT (crowdDetector), ROIS_PARAM, roisStructure, NULL); gst_structure_free (roisStructure); busMessageLambda = [&] (GstMessage * message) { const GstStructure *st; gchar *roiID; const gchar *type; std::string roiIDStr, typeStr; if (GST_MESSAGE_SRC (message) != GST_OBJECT (crowdDetector) || GST_MESSAGE_TYPE (message) != GST_MESSAGE_ELEMENT) { return; } st = gst_message_get_structure (message); type = gst_structure_get_name (st); if (!gst_structure_get (st, "roi", G_TYPE_STRING , &roiID, NULL) ) { GST_WARNING ("The message does not contain the roi ID"); return; } roiIDStr = roiID; typeStr = type; g_free (roiID); if (typeStr == "fluidity-event") { double fluidity_percentage; int fluidity_level; if (! (gst_structure_get (st, "fluidity_percentage", G_TYPE_DOUBLE, &fluidity_percentage, NULL) ) ) { GST_WARNING ("The message does not contain the fluidity percentage"); return; } if (! (gst_structure_get (st, "fluidity_level", G_TYPE_INT, &fluidity_level, NULL) ) ) { GST_WARNING ("The message does not contain the fluidity level"); return; } try { CrowdDetectorFluidity event (fluidity_percentage, fluidity_level, roiIDStr, shared_from_this(), CrowdDetectorFluidity::getName() ); signalCrowdDetectorFluidity (event); } catch (std::bad_weak_ptr &e) { } } else if (typeStr == "occupancy-event") { double occupancy_percentage; int occupancy_level; if (! (gst_structure_get (st, "occupancy_percentage", G_TYPE_DOUBLE, &occupancy_percentage, NULL) ) ) { GST_WARNING ("The message does not contain the occupancy percentage"); return; } if (! (gst_structure_get (st, "occupancy_level", G_TYPE_INT, &occupancy_level, NULL) ) ) { GST_WARNING ("The message does not contain the occupancy level"); return; } try { CrowdDetectorOccupancy event (occupancy_level, occupancy_percentage, roiIDStr, shared_from_this(), CrowdDetectorOccupancy::getName() ); signalCrowdDetectorOccupancy (event); } catch (std::bad_weak_ptr &e) { } } else if (typeStr == "direction-event") { double direction_angle; if (! (gst_structure_get (st, "direction_angle", G_TYPE_DOUBLE, &direction_angle, NULL) ) ) { GST_WARNING ("The message does not contain the direction angle"); return; } try { CrowdDetectorDirection event (direction_angle, roiIDStr, shared_from_this(), CrowdDetectorDirection::getName() ); signalCrowdDetectorDirection (event); } catch (std::bad_weak_ptr &e) { } } else { GST_WARNING ("The message does not have the correct name"); } }; bus_handler_id = g_signal_connect (bus, "message", G_CALLBACK (bus_message_adaptor), &busMessageLambda); g_object_unref (bus); // There is no need to reference crowddetector because its life cycle is the same as the filter life cycle g_object_unref (crowdDetector); }
static gboolean gst_osx_audio_sink_allowed_caps (GstOsxAudioSink * osxsink) { gint i, channels; gboolean spdif_allowed; AudioChannelLayout *layout; GstElementClass *element_class; GstPadTemplate *pad_template; GstCaps *caps, *in_caps; guint64 channel_mask = 0; GstAudioChannelPosition *pos = osxsink->channel_positions; /* First collect info about the HW capabilites and preferences */ spdif_allowed = gst_core_audio_audio_device_is_spdif_avail (osxsink->device_id); layout = gst_core_audio_audio_device_get_channel_layout (osxsink->device_id); GST_DEBUG_OBJECT (osxsink, "Selected device ID: %u SPDIF allowed: %d", (unsigned) osxsink->device_id, spdif_allowed); if (layout) { channels = MIN (layout->mNumberChannelDescriptions, GST_OSX_AUDIO_MAX_CHANNEL); } else { GST_WARNING_OBJECT (osxsink, "This driver does not support " "kAudioDevicePropertyPreferredChannelLayout."); channels = 2; } switch (channels) { case 0: pos[0] = GST_AUDIO_CHANNEL_POSITION_NONE; break; case 1: pos[0] = GST_AUDIO_CHANNEL_POSITION_MONO; break; case 2: pos[0] = GST_AUDIO_CHANNEL_POSITION_FRONT_LEFT; pos[1] = GST_AUDIO_CHANNEL_POSITION_FRONT_RIGHT; channel_mask |= GST_AUDIO_CHANNEL_POSITION_MASK (FRONT_LEFT); channel_mask |= GST_AUDIO_CHANNEL_POSITION_MASK (FRONT_RIGHT); break; default: channels = MIN (layout->mNumberChannelDescriptions, GST_OSX_AUDIO_MAX_CHANNEL); for (i = 0; i < channels; i++) { switch (layout->mChannelDescriptions[i].mChannelLabel) { case kAudioChannelLabel_Left: pos[i] = GST_AUDIO_CHANNEL_POSITION_FRONT_LEFT; break; case kAudioChannelLabel_Right: pos[i] = GST_AUDIO_CHANNEL_POSITION_FRONT_RIGHT; break; case kAudioChannelLabel_Center: pos[i] = GST_AUDIO_CHANNEL_POSITION_FRONT_CENTER; break; case kAudioChannelLabel_LFEScreen: pos[i] = GST_AUDIO_CHANNEL_POSITION_LFE1; break; case kAudioChannelLabel_LeftSurround: pos[i] = GST_AUDIO_CHANNEL_POSITION_REAR_LEFT; break; case kAudioChannelLabel_RightSurround: pos[i] = GST_AUDIO_CHANNEL_POSITION_REAR_RIGHT; break; case kAudioChannelLabel_RearSurroundLeft: pos[i] = GST_AUDIO_CHANNEL_POSITION_SIDE_LEFT; break; case kAudioChannelLabel_RearSurroundRight: pos[i] = GST_AUDIO_CHANNEL_POSITION_SIDE_RIGHT; break; case kAudioChannelLabel_CenterSurround: pos[i] = GST_AUDIO_CHANNEL_POSITION_REAR_CENTER; break; default: GST_WARNING_OBJECT (osxsink, "unrecognized channel: %d", (int) layout->mChannelDescriptions[i].mChannelLabel); channel_mask = 0; channels = 2; break; } } } g_free (layout); /* Recover the template caps */ element_class = GST_ELEMENT_GET_CLASS (osxsink); pad_template = gst_element_class_get_pad_template (element_class, "sink"); in_caps = gst_pad_template_get_caps (pad_template); /* Create the allowed subset */ caps = gst_caps_new_empty (); for (i = 0; i < gst_caps_get_size (in_caps); i++) { GstStructure *in_s, *out_s; in_s = gst_caps_get_structure (in_caps, i); if (gst_structure_has_name (in_s, "audio/x-ac3") || gst_structure_has_name (in_s, "audio/x-dts")) { if (spdif_allowed) { gst_caps_append_structure (caps, gst_structure_copy (in_s)); } } gst_audio_channel_positions_to_mask (pos, channels, false, &channel_mask); out_s = gst_structure_copy (in_s); gst_structure_remove_fields (out_s, "channels", "channel-mask", NULL); gst_structure_set (out_s, "channels", G_TYPE_INT, channels, "channel-mask", GST_TYPE_BITMASK, channel_mask, NULL); gst_caps_append_structure (caps, out_s); } if (osxsink->cached_caps) { gst_caps_unref (osxsink->cached_caps); } osxsink->cached_caps = caps; osxsink->channels = channels; return TRUE; }
static gboolean sink_setcaps(GstPad *pad, GstCaps *caps) { GstDspVEnc *self; GstDspBase *base; GstStructure *in_struc; GstCaps *out_caps; GstStructure *out_struc; gint width = 0, height = 0; GstCaps *allowed_caps; gint tgt_level = -1; struct td_codec *codec; self = GST_DSP_VENC(GST_PAD_PARENT(pad)); base = GST_DSP_BASE(self); codec = base->codec; if (!codec) return FALSE; #ifdef DEBUG { gchar *str = gst_caps_to_string(caps); pr_info(self, "sink caps: %s", str); g_free(str); } #endif in_struc = gst_caps_get_structure(caps, 0); out_caps = gst_caps_new_empty(); switch (base->alg) { case GSTDSP_JPEGENC: out_struc = gst_structure_new("image/jpeg", NULL); break; case GSTDSP_H263ENC: out_struc = gst_structure_new("video/x-h263", "variant", G_TYPE_STRING, "itu", NULL); break; case GSTDSP_MP4VENC: out_struc = gst_structure_new("video/mpeg", "mpegversion", G_TYPE_INT, 4, "systemstream", G_TYPE_BOOLEAN, FALSE, NULL); break; case GSTDSP_H264ENC: out_struc = gst_structure_new("video/x-h264", "alignment", G_TYPE_STRING, "au", NULL); break; default: return FALSE; } if (gst_structure_get_int(in_struc, "width", &width)) gst_structure_set(out_struc, "width", G_TYPE_INT, width, NULL); if (gst_structure_get_int(in_struc, "height", &height)) gst_structure_set(out_struc, "height", G_TYPE_INT, height, NULL); gst_structure_get_fourcc(in_struc, "format", &self->color_format); switch (base->alg) { case GSTDSP_H263ENC: case GSTDSP_MP4VENC: case GSTDSP_H264ENC: base->output_buffer_size = width * height / 2; break; case GSTDSP_JPEGENC: if (width % 2 || height % 2) return FALSE; if (self->color_format == GST_MAKE_FOURCC('I', '4', '2', '0')) base->input_buffer_size = ROUND_UP(width, 16) * ROUND_UP(height, 16) * 3 / 2; else base->input_buffer_size = ROUND_UP(width, 16) * ROUND_UP(height, 16) * 2; base->output_buffer_size = width * height; if (self->quality < 10) base->output_buffer_size /= 10; else if (self->quality < 100) base->output_buffer_size /= (100 / self->quality); break; default: break; } if (base->node) goto skip_setup; switch (base->alg) { case GSTDSP_JPEGENC: du_port_alloc_buffers(base->ports[0], 1); #if SN_API > 1 du_port_alloc_buffers(base->ports[1], 2); #else /* old versions of the sn can't handle 2 buffers */ /* * Some constrained pipelines might starve because of this. You * might want to try enable-last-buffer=false on some sinks. * TODO Is there any way around this? */ du_port_alloc_buffers(base->ports[1], 1); #endif break; default: du_port_alloc_buffers(base->ports[0], 2); du_port_alloc_buffers(base->ports[1], 4); break; } skip_setup: self->width = width; self->height = height; { const GValue *framerate = NULL; framerate = gst_structure_get_value(in_struc, "framerate"); if (framerate) { gst_structure_set_value(out_struc, "framerate", framerate); /* calculate nearest integer */ self->framerate = (gst_value_get_fraction_numerator(framerate) * 2 / gst_value_get_fraction_denominator(framerate) + 1) / 2; } } /* see if downstream caps express something */ allowed_caps = gst_pad_get_allowed_caps(base->srcpad); if (allowed_caps) { if (gst_caps_get_size(allowed_caps) > 0) { GstStructure *s; s = gst_caps_get_structure(allowed_caps, 0); gst_structure_get_int(s, "level", &tgt_level); if (base->alg == GSTDSP_H264ENC) { const char *stream_format; stream_format = gst_structure_get_string(s, "stream-format"); if (stream_format && !strcmp(stream_format, "avc")) self->priv.h264.bytestream = false; else stream_format = "byte-stream"; gst_structure_set(out_struc, "stream-format", G_TYPE_STRING, stream_format, NULL); } } gst_caps_unref(allowed_caps); } check_supported_levels(self, tgt_level); if (self->bitrate == 0) self->bitrate = self->max_bitrate; else if (self->bitrate > self->max_bitrate) self->bitrate = self->max_bitrate; gst_caps_append_structure(out_caps, out_struc); #ifdef DEBUG { gchar *str = gst_caps_to_string(out_caps); pr_info(self, "src caps: %s", str); g_free(str); } #endif if (!gst_pad_take_caps(base->srcpad, out_caps)) return FALSE; if (base->node) return TRUE; base->node = create_node(self); if (!base->node) { pr_err(self, "dsp node creation failed"); return FALSE; } if (codec->setup_params) codec->setup_params(base); if (!gstdsp_start(base)) { pr_err(self, "dsp start failed"); return FALSE; } if (codec->send_params) codec->send_params(base, base->node); return TRUE; }
static gboolean gst_video_flip_src_event (GstBaseTransform * trans, GstEvent * event) { GstVideoFlip *vf = GST_VIDEO_FLIP (trans); gdouble new_x, new_y, x, y; GstStructure *structure; gboolean ret; GST_DEBUG_OBJECT (vf, "handling %s event", GST_EVENT_TYPE_NAME (event)); switch (GST_EVENT_TYPE (event)) { case GST_EVENT_NAVIGATION: event = GST_EVENT (gst_mini_object_make_writable (GST_MINI_OBJECT (event))); structure = (GstStructure *) gst_event_get_structure (event); if (gst_structure_get_double (structure, "pointer_x", &x) && gst_structure_get_double (structure, "pointer_y", &y)) { GST_DEBUG_OBJECT (vf, "converting %fx%f", x, y); switch (vf->method) { case GST_VIDEO_FLIP_METHOD_90R: new_x = y; new_y = vf->to_width - x; break; case GST_VIDEO_FLIP_METHOD_90L: new_x = vf->to_height - y; new_y = x; break; case GST_VIDEO_FLIP_METHOD_OTHER: new_x = vf->to_height - y; new_y = vf->to_width - x; break; case GST_VIDEO_FLIP_METHOD_TRANS: new_x = y; new_y = x; break; case GST_VIDEO_FLIP_METHOD_180: new_x = vf->to_width - x; new_y = vf->to_height - y; break; case GST_VIDEO_FLIP_METHOD_HORIZ: new_x = vf->to_width - x; new_y = y; break; case GST_VIDEO_FLIP_METHOD_VERT: new_x = x; new_y = vf->to_height - y; break; default: new_x = x; new_y = y; break; } GST_DEBUG_OBJECT (vf, "to %fx%f", new_x, new_y); gst_structure_set (structure, "pointer_x", G_TYPE_DOUBLE, new_x, "pointer_y", G_TYPE_DOUBLE, new_y, NULL); } break; default: break; } ret = GST_BASE_TRANSFORM_CLASS (parent_class)->src_event (trans, event); return ret; }
/** * gst_base_video_encoder_finish_frame: * @base_video_encoder: a #GstBaseVideoEncoder * @frame: an encoded #GstVideoFrame * * @frame must have a valid encoded data buffer, whose metadata fields * are then appropriately set according to frame data or no buffer at * all if the frame should be dropped. * It is subsequently pushed downstream or provided to @shape_output. * In any case, the frame is considered finished and released. * * Returns: a #GstFlowReturn resulting from sending data downstream */ GstFlowReturn gst_base_video_encoder_finish_frame (GstBaseVideoEncoder * base_video_encoder, GstVideoFrame * frame) { GstFlowReturn ret = GST_FLOW_OK; GstBaseVideoEncoderClass *base_video_encoder_class; GList *l; base_video_encoder_class = GST_BASE_VIDEO_ENCODER_GET_CLASS (base_video_encoder); GST_LOG_OBJECT (base_video_encoder, "finish frame fpn %d", frame->presentation_frame_number); /* FIXME get rid of this ? * seems a roundabout way that adds little benefit to simply get * and subsequently set. subclass is adult enough to set_caps itself ... * so simply check/ensure/assert that src pad caps are set by now */ if (!base_video_encoder->set_output_caps) { if (!GST_PAD_CAPS (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_encoder))) { GstCaps *caps; if (base_video_encoder_class->get_caps) { caps = base_video_encoder_class->get_caps (base_video_encoder); } else { caps = gst_caps_new_simple ("video/unknown", NULL); } GST_DEBUG_OBJECT (base_video_encoder, "src caps %" GST_PTR_FORMAT, caps); gst_pad_set_caps (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_encoder), caps); gst_caps_unref (caps); } base_video_encoder->set_output_caps = TRUE; } /* Push all pending events that arrived before this frame */ for (l = base_video_encoder->base_video_codec.frames; l; l = l->next) { GstVideoFrame *tmp = l->data; if (tmp->events) { GList *k; for (k = g_list_last (tmp->events); k; k = k->prev) gst_pad_push_event (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_encoder), k->data); g_list_free (tmp->events); tmp->events = NULL; } if (tmp == frame) break; } if (frame->force_keyframe) { GstClockTime stream_time; GstClockTime running_time; GstEvent *ev; running_time = gst_segment_to_running_time (&GST_BASE_VIDEO_CODEC (base_video_encoder)->segment, GST_FORMAT_TIME, frame->presentation_timestamp); stream_time = gst_segment_to_stream_time (&GST_BASE_VIDEO_CODEC (base_video_encoder)->segment, GST_FORMAT_TIME, frame->presentation_timestamp); /* re-use upstream event if any so it also conveys any additional * info upstream arranged in there */ GST_OBJECT_LOCK (base_video_encoder); if (base_video_encoder->force_keyunit_event) { ev = base_video_encoder->force_keyunit_event; base_video_encoder->force_keyunit_event = NULL; } else { ev = gst_event_new_custom (GST_EVENT_CUSTOM_DOWNSTREAM, gst_structure_new ("GstForceKeyUnit", NULL)); } GST_OBJECT_UNLOCK (base_video_encoder); gst_structure_set (ev->structure, "timestamp", G_TYPE_UINT64, frame->presentation_timestamp, "stream-time", G_TYPE_UINT64, stream_time, "running-time", G_TYPE_UINT64, running_time, NULL); gst_pad_push_event (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_encoder), ev); } /* no buffer data means this frame is skipped/dropped */ if (!frame->src_buffer) { GST_DEBUG_OBJECT (base_video_encoder, "skipping frame %" GST_TIME_FORMAT, GST_TIME_ARGS (frame->presentation_timestamp)); goto done; } if (frame->is_sync_point) { GST_LOG_OBJECT (base_video_encoder, "key frame"); base_video_encoder->distance_from_sync = 0; GST_BUFFER_FLAG_UNSET (frame->src_buffer, GST_BUFFER_FLAG_DELTA_UNIT); } else { GST_BUFFER_FLAG_SET (frame->src_buffer, GST_BUFFER_FLAG_DELTA_UNIT); } frame->distance_from_sync = base_video_encoder->distance_from_sync; base_video_encoder->distance_from_sync++; frame->decode_frame_number = frame->system_frame_number - 1; if (frame->decode_frame_number < 0) { frame->decode_timestamp = 0; } else { frame->decode_timestamp = gst_util_uint64_scale (frame->decode_frame_number, GST_SECOND * GST_BASE_VIDEO_CODEC (base_video_encoder)->state.fps_d, GST_BASE_VIDEO_CODEC (base_video_encoder)->state.fps_n); } GST_BUFFER_TIMESTAMP (frame->src_buffer) = frame->presentation_timestamp; GST_BUFFER_DURATION (frame->src_buffer) = frame->presentation_duration; GST_BUFFER_OFFSET (frame->src_buffer) = frame->decode_timestamp; /* update rate estimate */ GST_BASE_VIDEO_CODEC (base_video_encoder)->bytes += GST_BUFFER_SIZE (frame->src_buffer); if (GST_CLOCK_TIME_IS_VALID (frame->presentation_duration)) { GST_BASE_VIDEO_CODEC (base_video_encoder)->time += frame->presentation_duration; } else { /* better none than nothing valid */ GST_BASE_VIDEO_CODEC (base_video_encoder)->time = GST_CLOCK_TIME_NONE; } if (G_UNLIKELY (GST_BASE_VIDEO_CODEC (base_video_encoder)->discont)) { GST_LOG_OBJECT (base_video_encoder, "marking discont"); GST_BUFFER_FLAG_SET (frame->src_buffer, GST_BUFFER_FLAG_DISCONT); GST_BASE_VIDEO_CODEC (base_video_encoder)->discont = FALSE; } gst_buffer_set_caps (GST_BUFFER (frame->src_buffer), GST_PAD_CAPS (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_encoder))); if (base_video_encoder_class->shape_output) { ret = base_video_encoder_class->shape_output (base_video_encoder, frame); } else { ret = gst_pad_push (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_encoder), frame->src_buffer); } frame->src_buffer = NULL; done: /* handed out */ GST_BASE_VIDEO_CODEC (base_video_encoder)->frames = g_list_remove (GST_BASE_VIDEO_CODEC (base_video_encoder)->frames, frame); gst_base_video_codec_free_frame (frame); return ret; }
static gboolean gst_aravis_set_caps (GstBaseSrc *src, GstCaps *caps) { GstAravis* gst_aravis = GST_ARAVIS(src); GstStructure *structure; ArvPixelFormat pixel_format; int height, width; int bpp, depth; const GValue *frame_rate; const char *caps_string; unsigned int i; guint32 fourcc; GST_LOG_OBJECT (gst_aravis, "Requested caps = %" GST_PTR_FORMAT, caps); arv_camera_stop_acquisition (gst_aravis->camera); if (gst_aravis->stream != NULL) g_object_unref (gst_aravis->stream); structure = gst_caps_get_structure (caps, 0); gst_structure_get_int (structure, "width", &width); gst_structure_get_int (structure, "height", &height); frame_rate = gst_structure_get_value (structure, "framerate"); gst_structure_get_int (structure, "bpp", &bpp); gst_structure_get_int (structure, "depth", &depth); if (gst_structure_get_field_type (structure, "format") == G_TYPE_STRING) { const char *string; string = gst_structure_get_string (structure, "format"); fourcc = GST_STR_FOURCC (string); } else if (gst_structure_get_field_type (structure, "format") == GST_TYPE_FOURCC) { gst_structure_get_fourcc (structure, "format", &fourcc); } else fourcc = 0; pixel_format = arv_pixel_format_from_gst_caps (gst_structure_get_name (structure), bpp, depth, fourcc); arv_camera_set_region (gst_aravis->camera, gst_aravis->offset_x, gst_aravis->offset_y, width, height); arv_camera_set_binning (gst_aravis->camera, gst_aravis->h_binning, gst_aravis->v_binning); arv_camera_set_pixel_format (gst_aravis->camera, pixel_format); if (frame_rate != NULL) { double dbl_frame_rate; dbl_frame_rate = (double) gst_value_get_fraction_numerator (frame_rate) / (double) gst_value_get_fraction_denominator (frame_rate); GST_DEBUG_OBJECT (gst_aravis, "Frame rate = %g Hz", dbl_frame_rate); arv_camera_set_frame_rate (gst_aravis->camera, dbl_frame_rate); if (dbl_frame_rate > 0.0) gst_aravis->buffer_timeout_us = MAX (GST_ARAVIS_BUFFER_TIMEOUT_DEFAULT, 3e6 / dbl_frame_rate); else gst_aravis->buffer_timeout_us = GST_ARAVIS_BUFFER_TIMEOUT_DEFAULT; } else gst_aravis->buffer_timeout_us = GST_ARAVIS_BUFFER_TIMEOUT_DEFAULT; GST_DEBUG_OBJECT (gst_aravis, "Buffer timeout = %Ld µs", gst_aravis->buffer_timeout_us); GST_DEBUG_OBJECT (gst_aravis, "Actual frame rate = %g Hz", arv_camera_get_frame_rate (gst_aravis->camera)); if(gst_aravis->gain_auto) { arv_camera_set_gain_auto (gst_aravis->camera, ARV_AUTO_CONTINUOUS); GST_DEBUG_OBJECT (gst_aravis, "Auto Gain = continuous", gst_aravis->gain_auto); } else { if (gst_aravis->gain >= 0) { GST_DEBUG_OBJECT (gst_aravis, "Gain = %d", gst_aravis->gain); arv_camera_set_gain_auto (gst_aravis->camera, ARV_AUTO_OFF); arv_camera_set_gain (gst_aravis->camera, gst_aravis->gain); } GST_DEBUG_OBJECT (gst_aravis, "Actual gain = %d", arv_camera_get_gain (gst_aravis->camera)); } if(gst_aravis->exposure_auto) { arv_camera_set_exposure_time_auto (gst_aravis->camera, ARV_AUTO_CONTINUOUS); GST_DEBUG_OBJECT (gst_aravis, "Auto Exposure = contiuous", gst_aravis->exposure_auto); } else { if (gst_aravis->exposure_time_us > 0.0) { GST_DEBUG_OBJECT (gst_aravis, "Exposure = %g µs", gst_aravis->exposure_time_us); arv_camera_set_exposure_time_auto (gst_aravis->camera, ARV_AUTO_OFF); arv_camera_set_exposure_time (gst_aravis->camera, gst_aravis->exposure_time_us); } GST_DEBUG_OBJECT (gst_aravis, "Actual exposure = %g µs", arv_camera_get_exposure_time (gst_aravis->camera)); } if (gst_aravis->fixed_caps != NULL) gst_caps_unref (gst_aravis->fixed_caps); caps_string = arv_pixel_format_to_gst_caps_string (pixel_format); if (caps_string != NULL) { GstStructure *structure; GstCaps *caps; caps = gst_caps_new_empty (); structure = gst_structure_from_string (caps_string, NULL); gst_structure_set (structure, "width", G_TYPE_INT, width, "height", G_TYPE_INT, height, NULL); if (frame_rate != NULL) gst_structure_set_value (structure, "framerate", frame_rate); gst_caps_append_structure (caps, structure); gst_aravis->fixed_caps = caps; } else gst_aravis->fixed_caps = NULL; gst_aravis->payload = arv_camera_get_payload (gst_aravis->camera); gst_aravis->stream = arv_camera_create_stream (gst_aravis->camera, NULL, NULL); for (i = 0; i < GST_ARAVIS_N_BUFFERS; i++) arv_stream_push_buffer (gst_aravis->stream, arv_buffer_new (gst_aravis->payload, NULL)); GST_LOG_OBJECT (gst_aravis, "Start acquisition"); arv_camera_start_acquisition (gst_aravis->camera); gst_aravis->timestamp_offset = 0; gst_aravis->last_timestamp = 0; return TRUE; }
GstCaps * gst_dshow_new_video_caps (GstVideoFormat video_format, const gchar * name, GstCapturePinMediaType * pin_mediatype) { GstCaps *video_caps = NULL; GstStructure *video_structure = NULL; gint min_w, max_w; gint min_h, max_h; gint min_fr, max_fr; /* raw video format */ switch (video_format) { case GST_VIDEO_FORMAT_BGR: video_caps = gst_caps_from_string (GST_VIDEO_CAPS_BGR); break; case GST_VIDEO_FORMAT_I420: video_caps = gst_caps_from_string (GST_VIDEO_CAPS_YUV ("I420")); break; case GST_VIDEO_FORMAT_YUY2: video_caps = gst_caps_from_string (GST_VIDEO_CAPS_YUV ("YUY2")); break; default: break; } /* other video format */ if (!video_caps) { if (g_ascii_strncasecmp (name, "video/x-dv, systemstream=FALSE", 31) == 0) { video_caps = gst_caps_new_simple ("video/x-dv", "systemstream", G_TYPE_BOOLEAN, FALSE, "format", GST_TYPE_FOURCC, GST_MAKE_FOURCC ('d', 'v', 's', 'd'), NULL); } else if (g_ascii_strncasecmp (name, "video/x-dv, systemstream=TRUE", 31) == 0) { video_caps = gst_caps_new_simple ("video/x-dv", "systemstream", G_TYPE_BOOLEAN, TRUE, NULL); return video_caps; } } if (!video_caps) return NULL; video_structure = gst_caps_get_structure (video_caps, 0); /* Hope GST_TYPE_INT_RANGE_STEP will exits in future gstreamer releases */ /* because we could use : */ /* "width", GST_TYPE_INT_RANGE_STEP, video_default->minWidth, video_default->maxWidth, video_default->granularityWidth */ /* instead of : */ /* "width", GST_TYPE_INT_RANGE, video_default->minWidth, video_default->maxWidth */ /* For framerate we do not need a step (granularity) because */ /* "The IAMStreamConfig::SetFormat method will set the frame rate to the closest */ /* value that the filter supports" as it said in the VIDEO_STREAM_CONFIG_CAPS dshwo doc */ min_w = pin_mediatype->vscc.MinOutputSize.cx; max_w = pin_mediatype->vscc.MaxOutputSize.cx; min_h = pin_mediatype->vscc.MinOutputSize.cy; max_h = pin_mediatype->vscc.MaxOutputSize.cy; min_fr = (gint) (10000000 / pin_mediatype->vscc.MaxFrameInterval); max_fr = (gint)(10000000 / pin_mediatype->vscc.MinFrameInterval); if (min_w == max_w) gst_structure_set (video_structure, "width", G_TYPE_INT, min_w, NULL); else gst_structure_set (video_structure, "width", GST_TYPE_INT_RANGE, min_w, max_w, NULL); if (min_h == max_h) gst_structure_set (video_structure, "height", G_TYPE_INT, min_h, NULL); else gst_structure_set (video_structure, "height", GST_TYPE_INT_RANGE, min_h, max_h, NULL); if (min_fr == max_fr) gst_structure_set (video_structure, "framerate", GST_TYPE_FRACTION, min_fr, 1, NULL); else gst_structure_set (video_structure, "framerate", GST_TYPE_FRACTION_RANGE, min_fr, 1, max_fr, 1, NULL); return video_caps; }
static GstCaps * gst_rtp_h263p_pay_sink_getcaps (GstRTPBasePayload * payload, GstPad * pad, GstCaps * filter) { GstRtpH263PPay *rtph263ppay; GstCaps *caps = NULL, *templ; GstCaps *peercaps = NULL; GstCaps *intersect = NULL; guint i; rtph263ppay = GST_RTP_H263P_PAY (payload); peercaps = gst_pad_peer_query_caps (GST_RTP_BASE_PAYLOAD_SRCPAD (payload), filter); if (!peercaps) return gst_pad_get_pad_template_caps (GST_RTP_BASE_PAYLOAD_SINKPAD (payload)); templ = gst_pad_get_pad_template_caps (GST_RTP_BASE_PAYLOAD_SRCPAD (payload)); intersect = gst_caps_intersect (peercaps, templ); gst_caps_unref (peercaps); gst_caps_unref (templ); if (gst_caps_is_empty (intersect)) return intersect; caps = gst_caps_new_empty (); for (i = 0; i < gst_caps_get_size (intersect); i++) { GstStructure *s = gst_caps_get_structure (intersect, i); const gchar *encoding_name = gst_structure_get_string (s, "encoding-name"); if (!strcmp (encoding_name, "H263-2000")) { const gchar *profile_str = gst_structure_get_string (s, "profile"); const gchar *level_str = gst_structure_get_string (s, "level"); int profile = 0; int level = 0; if (profile_str && level_str) { gboolean i = FALSE, j = FALSE, l = FALSE, t = FALSE, f = FALSE, v = FALSE; GstStructure *new_s = gst_structure_new ("video/x-h263", "variant", G_TYPE_STRING, "itu", NULL); profile = atoi (profile_str); level = atoi (level_str); /* These profiles are defined in the H.263 Annex X */ switch (profile) { case 0: /* The Baseline Profile (Profile 0) */ break; case 1: /* H.320 Coding Efficiency Version 2 Backward-Compatibility Profile * (Profile 1) * Baseline + Annexes I, J, L.4 and T */ i = j = l = t = TRUE; break; case 2: /* Version 1 Backward-Compatibility Profile (Profile 2) * Baseline + Annex F */ i = j = l = t = f = TRUE; break; case 3: /* Version 2 Interactive and Streaming Wireless Profile * Baseline + Annexes I, J, T */ i = j = t = TRUE; break; case 4: /* Version 3 Interactive and Streaming Wireless Profile (Profile 4) * Baseline + Annexes I, J, T, V, W.6.3.8, */ /* Missing W.6.3.8 */ i = j = t = v = TRUE; break; case 5: /* Conversational High Compression Profile (Profile 5) * Baseline + Annexes F, I, J, L.4, T, D, U */ /* Missing D, U */ f = i = j = l = t = TRUE; break; case 6: /* Conversational Internet Profile (Profile 6) * Baseline + Annexes F, I, J, L.4, T, D, U and * K with arbitratry slice ordering */ /* Missing D, U, K with arbitratry slice ordering */ f = i = j = l = t = TRUE; break; case 7: /* Conversational Interlace Profile (Profile 7) * Baseline + Annexes F, I, J, L.4, T, D, U, W.6.3.11 */ /* Missing D, U, W.6.3.11 */ f = i = j = l = t = TRUE; break; case 8: /* High Latency Profile (Profile 8) * Baseline + Annexes F, I, J, L.4, T, D, U, P.5, O.1.1 and * K with arbitratry slice ordering */ /* Missing D, U, P.5, O.1.1 */ f = i = j = l = t = TRUE; break; } if (f || i || j || t || l || v) { GValue list = { 0 }; GValue vstr = { 0 }; g_value_init (&list, GST_TYPE_LIST); g_value_init (&vstr, G_TYPE_STRING); g_value_set_static_string (&vstr, "h263"); gst_value_list_append_value (&list, &vstr); g_value_set_static_string (&vstr, "h263p"); gst_value_list_append_value (&list, &vstr); if (l || v) { g_value_set_static_string (&vstr, "h263pp"); gst_value_list_append_value (&list, &vstr); } g_value_unset (&vstr); gst_structure_set_value (new_s, "h263version", &list); g_value_unset (&list); } else { gst_structure_set (new_s, "h263version", G_TYPE_STRING, "h263", NULL); } if (!f) gst_structure_set (new_s, "annex-f", G_TYPE_BOOLEAN, FALSE, NULL); if (!i) gst_structure_set (new_s, "annex-i", G_TYPE_BOOLEAN, FALSE, NULL); if (!j) gst_structure_set (new_s, "annex-j", G_TYPE_BOOLEAN, FALSE, NULL); if (!t) gst_structure_set (new_s, "annex-t", G_TYPE_BOOLEAN, FALSE, NULL); if (!l) gst_structure_set (new_s, "annex-l", G_TYPE_BOOLEAN, FALSE, NULL); if (!v) gst_structure_set (new_s, "annex-v", G_TYPE_BOOLEAN, FALSE, NULL); if (level <= 10 || level == 45) { gst_structure_set (new_s, "width", GST_TYPE_INT_RANGE, 1, 176, "height", GST_TYPE_INT_RANGE, 1, 144, "framerate", GST_TYPE_FRACTION_RANGE, 0, 1, 30000, 2002, NULL); caps = gst_caps_merge_structure (caps, new_s); } else if (level <= 20) { GstStructure *s_copy = gst_structure_copy (new_s); gst_structure_set (new_s, "width", GST_TYPE_INT_RANGE, 1, 352, "height", GST_TYPE_INT_RANGE, 1, 288, "framerate", GST_TYPE_FRACTION_RANGE, 0, 1, 30000, 2002, NULL); caps = gst_caps_merge_structure (caps, new_s); gst_structure_set (s_copy, "width", GST_TYPE_INT_RANGE, 1, 176, "height", GST_TYPE_INT_RANGE, 1, 144, "framerate", GST_TYPE_FRACTION_RANGE, 0, 1, 30000, 1001, NULL); caps = gst_caps_merge_structure (caps, s_copy); } else if (level <= 40) { gst_structure_set (new_s, "width", GST_TYPE_INT_RANGE, 1, 352, "height", GST_TYPE_INT_RANGE, 1, 288, "framerate", GST_TYPE_FRACTION_RANGE, 0, 1, 30000, 1001, NULL); caps = gst_caps_merge_structure (caps, new_s); } else if (level <= 50) { GstStructure *s_copy = gst_structure_copy (new_s); gst_structure_set (new_s, "width", GST_TYPE_INT_RANGE, 1, 352, "height", GST_TYPE_INT_RANGE, 1, 288, "framerate", GST_TYPE_FRACTION_RANGE, 0, 1, 50, 1, NULL); caps = gst_caps_merge_structure (caps, new_s); gst_structure_set (s_copy, "width", GST_TYPE_INT_RANGE, 1, 352, "height", GST_TYPE_INT_RANGE, 1, 240, "framerate", GST_TYPE_FRACTION_RANGE, 0, 1, 60000, 1001, NULL); caps = gst_caps_merge_structure (caps, s_copy); } else if (level <= 60) { GstStructure *s_copy = gst_structure_copy (new_s); gst_structure_set (new_s, "width", GST_TYPE_INT_RANGE, 1, 720, "height", GST_TYPE_INT_RANGE, 1, 288, "framerate", GST_TYPE_FRACTION_RANGE, 0, 1, 50, 1, NULL); caps = gst_caps_merge_structure (caps, new_s); gst_structure_set (s_copy, "width", GST_TYPE_INT_RANGE, 1, 720, "height", GST_TYPE_INT_RANGE, 1, 240, "framerate", GST_TYPE_FRACTION_RANGE, 0, 1, 60000, 1001, NULL); caps = gst_caps_merge_structure (caps, s_copy); } else if (level <= 70) { GstStructure *s_copy = gst_structure_copy (new_s); gst_structure_set (new_s, "width", GST_TYPE_INT_RANGE, 1, 720, "height", GST_TYPE_INT_RANGE, 1, 576, "framerate", GST_TYPE_FRACTION_RANGE, 0, 1, 50, 1, NULL); caps = gst_caps_merge_structure (caps, new_s); gst_structure_set (s_copy, "width", GST_TYPE_INT_RANGE, 1, 720, "height", GST_TYPE_INT_RANGE, 1, 480, "framerate", GST_TYPE_FRACTION_RANGE, 0, 1, 60000, 1001, NULL); caps = gst_caps_merge_structure (caps, s_copy); } else { caps = gst_caps_merge_structure (caps, new_s); } } else { GstStructure *new_s = gst_structure_new ("video/x-h263", "variant", G_TYPE_STRING, "itu", "h263version", G_TYPE_STRING, "h263", NULL); GST_DEBUG_OBJECT (rtph263ppay, "No profile or level specified" " for H263-2000, defaulting to baseline H263"); caps = gst_caps_merge_structure (caps, new_s); } } else { gboolean f = FALSE, i = FALSE, j = FALSE, t = FALSE; /* FIXME: ffmpeg support the Appendix K too, how do we express it ? * guint k; */ const gchar *str; GstStructure *new_s = gst_structure_new ("video/x-h263", "variant", G_TYPE_STRING, "itu", NULL); gboolean added = FALSE; str = gst_structure_get_string (s, "f"); if (str && !strcmp (str, "1")) f = TRUE; str = gst_structure_get_string (s, "i"); if (str && !strcmp (str, "1")) i = TRUE; str = gst_structure_get_string (s, "j"); if (str && !strcmp (str, "1")) j = TRUE; str = gst_structure_get_string (s, "t"); if (str && !strcmp (str, "1")) t = TRUE; if (f || i || j || t) { GValue list = { 0 }; GValue vstr = { 0 }; g_value_init (&list, GST_TYPE_LIST); g_value_init (&vstr, G_TYPE_STRING); g_value_set_static_string (&vstr, "h263"); gst_value_list_append_value (&list, &vstr); g_value_set_static_string (&vstr, "h263p"); gst_value_list_append_value (&list, &vstr); g_value_unset (&vstr); gst_structure_set_value (new_s, "h263version", &list); g_value_unset (&list); } else { gst_structure_set (new_s, "h263version", G_TYPE_STRING, "h263", NULL); } if (!f) gst_structure_set (new_s, "annex-f", G_TYPE_BOOLEAN, FALSE, NULL); if (!i) gst_structure_set (new_s, "annex-i", G_TYPE_BOOLEAN, FALSE, NULL); if (!j) gst_structure_set (new_s, "annex-j", G_TYPE_BOOLEAN, FALSE, NULL); if (!t) gst_structure_set (new_s, "annex-t", G_TYPE_BOOLEAN, FALSE, NULL); str = gst_structure_get_string (s, "custom"); if (str) { unsigned int xmax, ymax, mpi; if (sscanf (str, "%u,%u,%u", &xmax, &ymax, &mpi) == 3) { if (xmax % 4 && ymax % 4 && mpi >= 1 && mpi <= 32) { caps = caps_append (caps, new_s, xmax, ymax, mpi); added = TRUE; } else { GST_WARNING_OBJECT (rtph263ppay, "Invalid custom framesize/MPI" " %u x %u at %u, ignoring", xmax, ymax, mpi); } } else { GST_WARNING_OBJECT (rtph263ppay, "Invalid custom framesize/MPI: %s," " ignoring", str); } } str = gst_structure_get_string (s, "16cif"); if (str) { int mpi = atoi (str); caps = caps_append (caps, new_s, 1408, 1152, mpi); added = TRUE; } str = gst_structure_get_string (s, "4cif"); if (str) { int mpi = atoi (str); caps = caps_append (caps, new_s, 704, 576, mpi); added = TRUE; } str = gst_structure_get_string (s, "cif"); if (str) { int mpi = atoi (str); caps = caps_append (caps, new_s, 352, 288, mpi); added = TRUE; } str = gst_structure_get_string (s, "qcif"); if (str) { int mpi = atoi (str); caps = caps_append (caps, new_s, 176, 144, mpi); added = TRUE; } str = gst_structure_get_string (s, "sqcif"); if (str) { int mpi = atoi (str); caps = caps_append (caps, new_s, 128, 96, mpi); added = TRUE; } if (added) gst_structure_free (new_s); else caps = gst_caps_merge_structure (caps, new_s); } } gst_caps_unref (intersect); return caps; }
gboolean gst_gl_handle_context_query (GstElement * element, GstQuery * query, GstGLDisplay ** display, GstGLContext ** other_context) { gboolean res = FALSE; const gchar *context_type; GstContext *context, *old_context; g_return_val_if_fail (element != NULL, FALSE); g_return_val_if_fail (query != NULL, FALSE); g_return_val_if_fail (display != NULL, FALSE); g_return_val_if_fail (other_context != NULL, FALSE); gst_query_parse_context_type (query, &context_type); if (g_strcmp0 (context_type, GST_GL_DISPLAY_CONTEXT_TYPE) == 0) { gst_query_parse_context (query, &old_context); if (old_context) context = gst_context_copy (old_context); else context = gst_context_new (GST_GL_DISPLAY_CONTEXT_TYPE, TRUE); gst_context_set_gl_display (context, *display); gst_query_set_context (query, context); gst_context_unref (context); res = *display != NULL; } #if GST_GL_HAVE_WINDOW_X11 else if (g_strcmp0 (context_type, "gst.x11.display.handle") == 0) { GstStructure *s; Display *x11_display = NULL; gst_query_parse_context (query, &old_context); if (old_context) context = gst_context_copy (old_context); else context = gst_context_new ("gst.x11.display.handle", TRUE); if (*display && ((*display)->type & GST_GL_DISPLAY_TYPE_X11) == GST_GL_DISPLAY_TYPE_X11) x11_display = (Display *) gst_gl_display_get_handle (*display); s = gst_context_writable_structure (context); gst_structure_set (s, "display", G_TYPE_POINTER, x11_display, NULL); gst_query_set_context (query, context); gst_context_unref (context); res = x11_display != NULL; } #endif #if GST_GL_HAVE_WINDOW_WAYLAND else if (g_strcmp0 (context_type, "GstWaylandDisplayHandleContextType") == 0) { GstStructure *s; struct wl_display *wayland_display = NULL; gst_query_parse_context (query, &old_context); if (old_context) context = gst_context_copy (old_context); else context = gst_context_new ("GstWaylandDisplayHandleContextType", TRUE); if (*display && ((*display)->type & GST_GL_DISPLAY_TYPE_WAYLAND) == GST_GL_DISPLAY_TYPE_WAYLAND) wayland_display = (struct wl_display *) gst_gl_display_get_handle (*display); s = gst_context_writable_structure (context); gst_structure_set (s, "display", G_TYPE_POINTER, wayland_display, NULL); gst_query_set_context (query, context); gst_context_unref (context); res = wayland_display != NULL; } #endif else if (g_strcmp0 (context_type, "gst.gl.app_context") == 0) { GstStructure *s; gst_query_parse_context (query, &old_context); if (old_context) context = gst_context_copy (old_context); else context = gst_context_new ("gst.gl.app_context", TRUE); s = gst_context_writable_structure (context); gst_structure_set (s, "context", GST_GL_TYPE_CONTEXT, *other_context, NULL); gst_query_set_context (query, context); gst_context_unref (context); res = *other_context != NULL; } return res; }
static GstCaps * gst_rtp_h264_pay_getcaps (GstRTPBasePayload * payload, GstPad * pad, GstCaps * filter) { GstCaps *template_caps; GstCaps *allowed_caps; GstCaps *caps, *icaps; gboolean append_unrestricted; guint i; allowed_caps = gst_pad_peer_query_caps (GST_RTP_BASE_PAYLOAD_SRCPAD (payload), filter); if (allowed_caps == NULL) return NULL; template_caps = gst_static_pad_template_get_caps (&gst_rtp_h264_pay_sink_template); if (gst_caps_is_any (allowed_caps)) { caps = gst_caps_ref (template_caps); goto done; } if (gst_caps_is_empty (allowed_caps)) { caps = gst_caps_ref (allowed_caps); goto done; } caps = gst_caps_new_empty (); append_unrestricted = FALSE; for (i = 0; i < gst_caps_get_size (allowed_caps); i++) { GstStructure *s = gst_caps_get_structure (allowed_caps, i); GstStructure *new_s = gst_structure_new_empty ("video/x-h264"); const gchar *profile_level_id; profile_level_id = gst_structure_get_string (s, "profile-level-id"); if (profile_level_id && strlen (profile_level_id) == 6) { const gchar *profile; const gchar *level; long int spsint; guint8 sps[3]; spsint = strtol (profile_level_id, NULL, 16); sps[0] = spsint >> 16; sps[1] = spsint >> 8; sps[2] = spsint; profile = gst_codec_utils_h264_get_profile (sps, 3); level = gst_codec_utils_h264_get_level (sps, 3); if (profile && level) { GST_LOG_OBJECT (payload, "In caps, have profile %s and level %s", profile, level); if (!strcmp (profile, "constrained-baseline")) gst_structure_set (new_s, "profile", G_TYPE_STRING, profile, NULL); else { GValue val = { 0, }; GValue profiles = { 0, }; g_value_init (&profiles, GST_TYPE_LIST); g_value_init (&val, G_TYPE_STRING); g_value_set_static_string (&val, profile); gst_value_list_append_value (&profiles, &val); g_value_set_static_string (&val, "constrained-baseline"); gst_value_list_append_value (&profiles, &val); gst_structure_take_value (new_s, "profile", &profiles); } if (!strcmp (level, "1")) gst_structure_set (new_s, "level", G_TYPE_STRING, level, NULL); else { GValue levels = { 0, }; GValue val = { 0, }; int j; g_value_init (&levels, GST_TYPE_LIST); g_value_init (&val, G_TYPE_STRING); for (j = 0; j < G_N_ELEMENTS (all_levels); j++) { g_value_set_static_string (&val, all_levels[j]); gst_value_list_prepend_value (&levels, &val); if (!strcmp (level, all_levels[j])) break; } gst_structure_take_value (new_s, "level", &levels); } } else { /* Invalid profile-level-id means baseline */ gst_structure_set (new_s, "profile", G_TYPE_STRING, "constrained-baseline", NULL); } } else { /* No profile-level-id means baseline or unrestricted */ gst_structure_set (new_s, "profile", G_TYPE_STRING, "constrained-baseline", NULL); append_unrestricted = TRUE; } caps = gst_caps_merge_structure (caps, new_s); }
static void * create_node(GstDspBase *base) { GstDspVpp *self; struct td_codec *codec; int dsp_handle; struct dsp_node *node; const struct dsp_uuid usn_uuid = { 0x79A3C8B3, 0x95F2, 0x403F, 0x9A, 0x4B, { 0xCF, 0x80, 0x57, 0x73, 0x05, 0x41 } }; self = GST_DSP_VPP(base); dsp_handle = base->dsp_handle; if (!gstdsp_register(dsp_handle, &usn_uuid, DSP_DCD_LIBRARYTYPE, "usn.dll64P")) { pr_err(self, "failed to register usn node library"); return NULL; } codec = base->codec; if (!codec) { pr_err(self, "unknown algorithm"); return NULL; } pr_info(base, "algo=%s", codec->filename); if (!gstdsp_register(dsp_handle, codec->uuid, DSP_DCD_LIBRARYTYPE, codec->filename)) { pr_err(self, "failed to register algo node library"); return NULL; } if (!gstdsp_register(dsp_handle, codec->uuid, DSP_DCD_NODETYPE, codec->filename)) { pr_err(self, "failed to register algo node"); return NULL; } { struct dsp_node_attr_in attrs = { .cb = sizeof(attrs), .priority = 5, .timeout = 1000, }; void *arg_data; codec->create_args(base, &attrs.profile_id, &arg_data); if (!arg_data) return NULL; if (!dsp_node_allocate(dsp_handle, base->proc, codec->uuid, arg_data, &attrs, &node)) { pr_err(self, "dsp node allocate failed"); free(arg_data); return NULL; } free(arg_data); } if (!dsp_node_create(dsp_handle, node)) { pr_err(self, "dsp node create failed"); dsp_node_free(dsp_handle, node); return NULL; } pr_info(self, "dsp node created"); if (codec->setup_params) codec->setup_params(base); if (codec->send_params) codec->send_params(base, node); return node; } static inline bool destroy_node(GstDspVpp *self, int dsp_handle, struct dsp_node *node) { if (node) { if (!dsp_node_free(dsp_handle, node)) { pr_err(self, "dsp node free failed"); return false; } pr_info(self, "dsp node deleted"); } return true; } static inline void configure_caps(GstDspVpp *self, GstCaps *in, GstCaps *out) { GstDspBase *base; GstStructure *out_struc, *in_struc; const GValue *aspect_ratio; const GValue *framerate; GstCaps *allowed_caps; base = GST_DSP_BASE(self); in_struc = gst_caps_get_structure(in, 0); out_struc = gst_structure_new("video/x-raw-yuv", "format", GST_TYPE_FOURCC, GST_MAKE_FOURCC('I', '4', '2', '0'), NULL); if (gst_structure_get_int(in_struc, "width", &self->width)) self->out_width = self->width; if (gst_structure_get_int(in_struc, "height", &self->height)) self->out_height = self->height; allowed_caps = gst_pad_get_allowed_caps(base->srcpad); if (allowed_caps) { if (gst_caps_get_size(allowed_caps) > 0) { GstStructure *s; s = gst_caps_get_structure(allowed_caps, 0); gst_structure_get_int(s, "width", &self->out_width); gst_structure_get_int(s, "height", &self->out_height); } gst_caps_unref(allowed_caps); } gst_structure_set(out_struc, "width", G_TYPE_INT, self->out_width, NULL); gst_structure_set(out_struc, "height", G_TYPE_INT, self->out_height, NULL); aspect_ratio = gst_structure_get_value(in_struc, "pixel-aspect-ratio"); if (aspect_ratio) gst_structure_set_value(out_struc, "pixel-aspect-ratio", aspect_ratio); framerate = gst_structure_get_value(in_struc, "framerate"); if (framerate) gst_structure_set_value(out_struc, "framerate", framerate); base->output_buffer_size = self->out_width * self->out_height * 1.5; gst_caps_append_structure(out, out_struc); }
static void * create_node(GstDspBase *base) { GstDspADec *self; struct td_codec *codec; int dsp_handle; struct dsp_node *node; const struct dsp_uuid usn_uuid = { 0x79A3C8B3, 0x95F2, 0x403F, 0x9A, 0x4B, { 0xCF, 0x80, 0x57, 0x73, 0x05, 0x41 } }; self = GST_DSP_ADEC(base); dsp_handle = base->dsp_handle; if (!gstdsp_register(dsp_handle, &usn_uuid, DSP_DCD_LIBRARYTYPE, "usn.dll64P")) { pr_err(self, "failed to register usn node library"); return NULL; } codec = base->codec; if (!codec) { pr_err(self, "unknown algorithm"); return NULL; } pr_info(base, "algo=%s", codec->filename); if (!gstdsp_register(dsp_handle, codec->uuid, DSP_DCD_LIBRARYTYPE, codec->filename)) { pr_err(self, "failed to register algo node library"); return NULL; } if (!gstdsp_register(dsp_handle, codec->uuid, DSP_DCD_NODETYPE, codec->filename)) { pr_err(self, "failed to register algo node"); return NULL; } { struct dsp_node_attr_in attrs = { .cb = sizeof(attrs), .priority = 10, .timeout = 10000, }; void *arg_data; codec->create_args(base, &attrs.profile_id, &arg_data); if (!arg_data) return NULL; if (!dsp_node_allocate(dsp_handle, base->proc, codec->uuid, arg_data, &attrs, &node)) { pr_err(self, "dsp node allocate failed"); free(arg_data); return NULL; } free(arg_data); } if (!dsp_node_create(dsp_handle, node)) { pr_err(self, "dsp node create failed"); dsp_node_free(dsp_handle, node); return NULL; } pr_info(self, "dsp node created"); if (codec->send_params) codec->send_params(base, node); if (codec->setup_params) codec->setup_params(base); base->flush_buffer = codec->flush_buffer; return node; } static inline void configure_caps(GstDspADec *self, GstCaps *in, GstCaps *out) { GstDspBase *base; GstStructure *out_struc, *in_struc; int channels; base = GST_DSP_BASE(self); in_struc = gst_caps_get_structure(in, 0); out_struc = gst_structure_new("audio/x-raw-int", "endianness", G_TYPE_INT, G_BYTE_ORDER, "signed", G_TYPE_BOOLEAN, TRUE, "width", G_TYPE_INT, 16, "depth", G_TYPE_INT, 16, NULL); if (gst_structure_get_int(in_struc, "channels", &channels)) gst_structure_set(out_struc, "channels", G_TYPE_INT, channels, NULL); if (gst_structure_get_int(in_struc, "rate", &self->samplerate)) gst_structure_set(out_struc, "rate", G_TYPE_INT, self->samplerate, NULL); if (base->alg == GSTDSP_AACDEC) { const char *fmt; gboolean tmp; gst_structure_get_boolean(in_struc, "framed", &tmp); self->packetized = tmp; fmt = gst_structure_get_string(in_struc, "stream-format"); self->raw = strcmp(fmt, "raw") == 0; } base->output_buffer_size = 4 * 1024; gst_caps_append_structure(out, out_struc); }
int main (int argc, char **argv) { static const GOptionEntry test_goptions[] = { {"videosink", '\0', 0, G_OPTION_ARG_STRING, &opt_videosink_str, "videosink to use (default: " DEFAULT_VIDEOSINK ")", NULL}, {"caps", '\0', 0, G_OPTION_ARG_STRING, &opt_filtercaps_str, "filter caps to narrow down formats to test", NULL}, {"with-ffmpegcolorspace", '\0', 0, G_OPTION_ARG_NONE, &opt_with_ffmpegcolorspace, "whether to add an ffmpegcolorspace element in front of the sink", NULL}, {NULL, '\0', 0, 0, NULL, NULL, NULL} }; GOptionContext *ctx; GError *opt_err = NULL; GstElement *pipeline, *src, *filter1, *crop, *scale, *filter2, *csp, *sink; GstCaps *filter_caps = NULL; GList *caps_list, *l; #if !GLIB_CHECK_VERSION (2, 31, 0) if (!g_thread_supported ()) g_thread_init (NULL); #endif /* command line option parsing */ ctx = g_option_context_new (""); g_option_context_add_group (ctx, gst_init_get_option_group ()); g_option_context_add_main_entries (ctx, test_goptions, NULL); if (!g_option_context_parse (ctx, &argc, &argv, &opt_err)) { g_error ("Error parsing command line options: %s", opt_err->message); return -1; } GST_DEBUG_CATEGORY_INIT (videocrop_test_debug, "videocroptest", 0, "vctest"); pipeline = gst_pipeline_new ("pipeline"); src = gst_element_factory_make ("videotestsrc", "videotestsrc"); g_assert (src != NULL); filter1 = gst_element_factory_make ("capsfilter", "capsfilter1"); g_assert (filter1 != NULL); crop = gst_element_factory_make ("videocrop", "videocrop"); g_assert (crop != NULL); scale = gst_element_factory_make ("videoscale", "videoscale"); g_assert (scale != NULL); filter2 = gst_element_factory_make ("capsfilter", "capsfilter2"); g_assert (filter2 != NULL); if (opt_with_ffmpegcolorspace) { g_print ("Adding ffmpegcolorspace\n"); csp = gst_element_factory_make ("ffmpegcolorspace", "colorspace"); } else { csp = gst_element_factory_make ("identity", "colorspace"); } g_assert (csp != NULL); if (opt_filtercaps_str) { filter_caps = gst_caps_from_string (opt_filtercaps_str); if (filter_caps == NULL) { g_error ("Invalid filter caps string '%s'", opt_filtercaps_str); } else { g_print ("Using filter caps '%s'\n", opt_filtercaps_str); } } if (opt_videosink_str) { g_print ("Trying videosink '%s' ...", opt_videosink_str); sink = gst_element_factory_make (opt_videosink_str, "sink"); g_print ("%s\n", (sink) ? "ok" : "element couldn't be created"); } else { sink = NULL; } if (sink == NULL) { g_print ("Trying videosink '%s' ...", DEFAULT_VIDEOSINK); sink = gst_element_factory_make (DEFAULT_VIDEOSINK, "sink"); g_print ("%s\n", (sink) ? "ok" : "element couldn't be created"); } if (sink == NULL) { g_print ("Trying videosink '%s' ...", "xvimagesink"); sink = gst_element_factory_make ("xvimagesink", "sink"); g_print ("%s\n", (sink) ? "ok" : "element couldn't be created"); } if (sink == NULL) { g_print ("Trying videosink '%s' ...", "ximagesink"); sink = gst_element_factory_make ("ximagesink", "sink"); g_print ("%s\n", (sink) ? "ok" : "element couldn't be created"); } g_assert (sink != NULL); gst_bin_add_many (GST_BIN (pipeline), src, filter1, crop, scale, filter2, csp, sink, NULL); if (!gst_element_link (src, filter1)) g_error ("Failed to link videotestsrc to capsfilter1"); if (!gst_element_link (filter1, crop)) g_error ("Failed to link capsfilter1 to videocrop"); if (!gst_element_link (crop, scale)) g_error ("Failed to link videocrop to videoscale"); if (!gst_element_link (scale, filter2)) g_error ("Failed to link videoscale to capsfilter2"); if (!gst_element_link (filter2, csp)) g_error ("Failed to link capsfilter2 to ffmpegcolorspace"); if (!gst_element_link (csp, sink)) g_error ("Failed to link ffmpegcolorspace to video sink"); caps_list = video_crop_get_test_caps (crop); for (l = caps_list; l != NULL; l = l->next) { GstStateChangeReturn ret; GstCaps *caps, *out_caps; gboolean skip = FALSE; gchar *s; if (filter_caps) { GstCaps *icaps; icaps = gst_caps_intersect (filter_caps, GST_CAPS (l->data)); skip = gst_caps_is_empty (icaps); gst_caps_unref (icaps); } /* this is the size of our window (stays fixed) */ out_caps = gst_caps_copy (GST_CAPS (l->data)); gst_structure_set (gst_caps_get_structure (out_caps, 0), "width", G_TYPE_INT, OUT_WIDTH, "height", G_TYPE_INT, OUT_HEIGHT, NULL); g_object_set (filter2, "caps", out_caps, NULL); /* filter1 gets these too to prevent videotestsrc from renegotiating */ g_object_set (filter1, "caps", out_caps, NULL); gst_caps_unref (out_caps); caps = gst_caps_copy (GST_CAPS (l->data)); GST_INFO ("testing format: %" GST_PTR_FORMAT, caps); s = gst_caps_to_string (caps); if (skip) { g_print ("Skipping format: %s\n", s); g_free (s); continue; } g_print ("Format: %s\n", s); caps = gst_caps_make_writable (caps); /* FIXME: check return values */ ret = gst_element_set_state (pipeline, GST_STATE_PLAYING); if (ret != GST_STATE_CHANGE_FAILURE) { ret = gst_element_get_state (pipeline, NULL, NULL, -1); if (ret != GST_STATE_CHANGE_FAILURE) { test_with_caps (src, crop, caps); } else { g_print ("Format: %s not supported (failed to go to PLAYING)\n", s); } } else { g_print ("Format: %s not supported\n", s); } gst_element_set_state (pipeline, GST_STATE_NULL); gst_caps_unref (caps); g_free (s); } g_list_foreach (caps_list, (GFunc) gst_caps_unref, NULL); g_list_free (caps_list); gst_element_set_state (pipeline, GST_STATE_NULL); gst_object_unref (pipeline); return 0; }
static GstFlowReturn pad_chain(GstPad *pad, GstBuffer *buf) { struct obj *self; GstFlowReturn ret = GST_FLOW_OK; AVCodecContext *ctx; AVFrame *frame = NULL; int got_pic; AVPacket pkt; int read; self = (struct obj *)((GstObject *)pad)->parent; GST_DEBUG_OBJECT (self, "pad chain"); ctx = self->av_ctx; if (G_UNLIKELY(!self->initialized)) { GstCaps *new_caps; GstStructure *struc; self->initialized = true; if (gst_av_codec_open(ctx, self->codec) < 0) { ret = GST_FLOW_ERROR; goto leave; } if (self->parse_func) self->parse_func(self, buf); new_caps = gst_caps_new_empty(); struc = gst_structure_new("video/x-raw-yuv", "width", G_TYPE_INT, ctx->width, "height", G_TYPE_INT, ctx->height, "format", GST_TYPE_FOURCC, GST_MAKE_FOURCC('I','4','2','0'), NULL); if (ctx->time_base.num) gst_structure_set(struc, "framerate", GST_TYPE_FRACTION, ctx->time_base.den, ctx->time_base.num * ctx->ticks_per_frame, NULL); if (ctx->sample_aspect_ratio.num) gst_structure_set(struc, "pixel-aspect-ratio", GST_TYPE_FRACTION, ctx->sample_aspect_ratio.num, ctx->sample_aspect_ratio.den, NULL); gst_caps_append_structure(new_caps, struc); GST_INFO_OBJECT(self, "caps are: %" GST_PTR_FORMAT, new_caps); gst_pad_set_caps(self->srcpad, new_caps); gst_caps_unref(new_caps); } av_new_packet(&pkt, buf->size); memcpy(pkt.data, buf->data, buf->size); memset(pkt.data + pkt.size, 0, FF_INPUT_BUFFER_PADDING_SIZE); frame = avcodec_alloc_frame(); pkt.dts = pkt.pts = gstav_timestamp_to_pts(ctx, buf->timestamp); #if LIBAVCODEC_VERSION_MAJOR < 53 ctx->reordered_opaque = pkt.dts; #endif g_mutex_lock(&self->mutex); read = avcodec_decode_video2(ctx, frame, &got_pic, &pkt); av_free_packet(&pkt); if (read < 0) { GST_WARNING_OBJECT(self, "error: %i", read); g_mutex_unlock(&self->mutex); goto leave; } if (got_pic) { GstBuffer *out_buf; out_buf = convert_frame(self, frame); g_mutex_unlock(&self->mutex); ret = gst_pad_push(self->srcpad, out_buf); } else g_mutex_unlock(&self->mutex); leave: av_free(frame); gst_buffer_unref(buf); GST_DEBUG_OBJECT (self, "pad chain returning %s", gst_flow_get_name (ret)); return ret; }
/** * adapt_image_capture: * @self: camerasrc object * @in_caps: caps object that describes incoming image format * * Adjust capsfilters and crop according image capture caps if necessary. * The captured image format from video source might be different from * what application requested, so we can try to fix that in camerabin. * */ static void adapt_image_capture (GstWrapperCameraBinSrc * self, GstCaps * in_caps) { GstBaseCameraBinSrc *bcamsrc = GST_BASE_CAMERA_SRC (self); GstStructure *in_st, *new_st, *req_st; gint in_width = 0, in_height = 0, req_width = 0, req_height = 0, crop = 0; gdouble ratio_w, ratio_h; GstCaps *filter_caps = NULL; GST_LOG_OBJECT (self, "in caps: %" GST_PTR_FORMAT, in_caps); GST_LOG_OBJECT (self, "requested caps: %" GST_PTR_FORMAT, self->image_capture_caps); in_st = gst_caps_get_structure (in_caps, 0); gst_structure_get_int (in_st, "width", &in_width); gst_structure_get_int (in_st, "height", &in_height); req_st = gst_caps_get_structure (self->image_capture_caps, 0); gst_structure_get_int (req_st, "width", &req_width); gst_structure_get_int (req_st, "height", &req_height); GST_INFO_OBJECT (self, "we requested %dx%d, and got %dx%d", req_width, req_height, in_width, in_height); new_st = gst_structure_copy (req_st); /* If new fields have been added, we need to copy them */ gst_structure_foreach (in_st, copy_missing_fields, new_st); gst_structure_set (new_st, "width", G_TYPE_INT, in_width, "height", G_TYPE_INT, in_height, NULL); GST_LOG_OBJECT (self, "new image capture caps: %" GST_PTR_FORMAT, new_st); /* Crop if requested aspect ratio differs from incoming frame aspect ratio */ if (self->src_zoom_crop) { ratio_w = (gdouble) in_width / req_width; ratio_h = (gdouble) in_height / req_height; if (ratio_w < ratio_h) { crop = in_height - (req_height * ratio_w); self->base_crop_top = crop / 2; self->base_crop_bottom = crop / 2; } else { crop = in_width - (req_width * ratio_h); self->base_crop_left = crop / 2; self->base_crop_right += crop / 2; } GST_INFO_OBJECT (self, "setting base crop: left:%d, right:%d, top:%d, bottom:%d", self->base_crop_left, self->base_crop_right, self->base_crop_top, self->base_crop_bottom); g_object_set (G_OBJECT (self->src_zoom_crop), "top", self->base_crop_top, "bottom", self->base_crop_bottom, "left", self->base_crop_left, "right", self->base_crop_right, NULL); } /* Update capsfilters */ if (self->image_capture_caps) { gst_caps_unref (self->image_capture_caps); } self->image_capture_caps = gst_caps_new_full (new_st, NULL); set_capsfilter_caps (self, self->image_capture_caps); /* Adjust the capsfilter before crop and videoscale elements if necessary */ if (in_width == bcamsrc->width && in_height == bcamsrc->height) { GST_DEBUG_OBJECT (self, "no adaptation with resolution needed"); } else { GST_DEBUG_OBJECT (self, "changing %" GST_PTR_FORMAT " from %dx%d to %dx%d", self->src_filter, bcamsrc->width, bcamsrc->height, in_width, in_height); /* Apply the width and height to filter caps */ g_object_get (G_OBJECT (self->src_filter), "caps", &filter_caps, NULL); filter_caps = gst_caps_make_writable (filter_caps); gst_caps_set_simple (filter_caps, "width", G_TYPE_INT, in_width, "height", G_TYPE_INT, in_height, NULL); g_object_set (G_OBJECT (self->src_filter), "caps", filter_caps, NULL); gst_caps_unref (filter_caps); } }
/** * gst_basertppayload_set_outcaps: * @payload: a #GstBaseRTPPayload * @fieldname: the first field name or %NULL * @...: field values * * Configure the output caps with the optional parameters. * * Variable arguments should be in the form field name, field type * (as a GType), value(s). The last variable argument should be NULL. * * Returns: %TRUE if the caps could be set. */ gboolean gst_basertppayload_set_outcaps (GstBaseRTPPayload * payload, gchar * fieldname, ...) { GstCaps *srccaps, *peercaps; gboolean res; /* fill in the defaults, there properties cannot be negotiated. */ srccaps = gst_caps_new_simple ("application/x-rtp", "media", G_TYPE_STRING, payload->media, "clock-rate", G_TYPE_INT, payload->clock_rate, "encoding-name", G_TYPE_STRING, payload->encoding_name, NULL); GST_DEBUG_OBJECT (payload, "defaults: %" GST_PTR_FORMAT, srccaps); if (fieldname) { va_list varargs; /* override with custom properties */ va_start (varargs, fieldname); gst_caps_set_simple_valist (srccaps, fieldname, varargs); va_end (varargs); GST_DEBUG_OBJECT (payload, "custom added: %" GST_PTR_FORMAT, srccaps); } /* the peer caps can override some of the defaults */ peercaps = gst_pad_peer_get_caps (payload->srcpad); if (peercaps == NULL) { /* no peer caps, just add the other properties */ gst_caps_set_simple (srccaps, "payload", G_TYPE_INT, GST_BASE_RTP_PAYLOAD_PT (payload), "ssrc", G_TYPE_UINT, payload->current_ssrc, "clock-base", G_TYPE_UINT, payload->ts_base, "seqnum-base", G_TYPE_UINT, payload->seqnum_base, NULL); GST_DEBUG_OBJECT (payload, "no peer caps: %" GST_PTR_FORMAT, srccaps); } else { GstCaps *temp; GstStructure *s, *d; const GValue *value; gint pt; /* peer provides caps we can use to fixate, intersect. This always returns a * writable caps. */ temp = gst_caps_intersect (srccaps, peercaps); gst_caps_unref (srccaps); gst_caps_unref (peercaps); /* now fixate, start by taking the first caps */ gst_caps_truncate (temp); /* get first structure */ s = gst_caps_get_structure (temp, 0); if (gst_structure_get_int (s, "payload", &pt)) { /* use peer pt */ GST_BASE_RTP_PAYLOAD_PT (payload) = pt; GST_LOG_OBJECT (payload, "using peer pt %d", pt); } else { if (gst_structure_has_field (s, "payload")) { /* can only fixate if there is a field */ gst_structure_fixate_field_nearest_int (s, "payload", GST_BASE_RTP_PAYLOAD_PT (payload)); gst_structure_get_int (s, "payload", &pt); GST_LOG_OBJECT (payload, "using peer pt %d", pt); } else { /* no pt field, use the internal pt */ pt = GST_BASE_RTP_PAYLOAD_PT (payload); gst_structure_set (s, "payload", G_TYPE_INT, pt, NULL); GST_LOG_OBJECT (payload, "using internal pt %d", pt); } } if (gst_structure_has_field_typed (s, "ssrc", G_TYPE_UINT)) { value = gst_structure_get_value (s, "ssrc"); payload->current_ssrc = g_value_get_uint (value); GST_LOG_OBJECT (payload, "using peer ssrc %08x", payload->current_ssrc); } else { /* FIXME, fixate_nearest_uint would be even better */ gst_structure_set (s, "ssrc", G_TYPE_UINT, payload->current_ssrc, NULL); GST_LOG_OBJECT (payload, "using internal ssrc %08x", payload->current_ssrc); } if (gst_structure_has_field_typed (s, "clock-base", G_TYPE_UINT)) { value = gst_structure_get_value (s, "clock-base"); payload->ts_base = g_value_get_uint (value); GST_LOG_OBJECT (payload, "using peer clock-base %u", payload->ts_base); } else { /* FIXME, fixate_nearest_uint would be even better */ gst_structure_set (s, "clock-base", G_TYPE_UINT, payload->ts_base, NULL); GST_LOG_OBJECT (payload, "using internal clock-base %u", payload->ts_base); } if (gst_structure_has_field_typed (s, "seqnum-base", G_TYPE_UINT)) { value = gst_structure_get_value (s, "seqnum-base"); payload->seqnum_base = g_value_get_uint (value); GST_LOG_OBJECT (payload, "using peer seqnum-base %u", payload->seqnum_base); } else { /* FIXME, fixate_nearest_uint would be even better */ gst_structure_set (s, "seqnum-base", G_TYPE_UINT, payload->seqnum_base, NULL); GST_LOG_OBJECT (payload, "using internal seqnum-base %u", payload->seqnum_base); } /* make the target caps by copying over all the fixed caps, removing the * unfixed caps. */ srccaps = gst_caps_new_simple (gst_structure_get_name (s), NULL); d = gst_caps_get_structure (srccaps, 0); gst_structure_foreach (s, (GstStructureForeachFunc) copy_fixed, d); gst_caps_unref (temp); GST_DEBUG_OBJECT (payload, "with peer caps: %" GST_PTR_FORMAT, srccaps); } res = gst_pad_set_caps (GST_BASE_RTP_PAYLOAD_SRCPAD (payload), srccaps); gst_caps_unref (srccaps); return res; }
static GstFlowReturn gst_skor_sink_transform_frame_ip (GstVideoFilter * vfilter, GstVideoFrame * frame) { GstSkorSink *sink = GST_SKORSINK (vfilter); gpointer data; gint stride, height; zbar_image_t *image; const zbar_symbol_t *symbol; int n; image = zbar_image_create (); /* all formats we support start with an 8-bit Y plane. zbar doesn't need * to know about the chroma plane(s) */ data = GST_VIDEO_FRAME_COMP_DATA (frame, 0); stride = GST_VIDEO_FRAME_COMP_STRIDE (frame, 0); height = GST_VIDEO_FRAME_HEIGHT (frame); zbar_image_set_format (image, GST_MAKE_FOURCC ('Y', '8', '0', '0')); zbar_image_set_size (image, stride, height); zbar_image_set_data (image, (gpointer) data, stride * height, NULL); /* scan the image for barcodes */ n = zbar_scan_image (sink->scanner, image); if (G_UNLIKELY (n == -1)) { GST_WARNING_OBJECT (sink, "Error trying to scan frame. Skipping"); goto out; } if (n == 0) goto out; /* extract results */ symbol = zbar_image_first_symbol (image); for (; symbol; symbol = zbar_symbol_next (symbol)) { zbar_symbol_type_t typ = zbar_symbol_get_type (symbol); const char *data = zbar_symbol_get_data (symbol); gint quality = zbar_symbol_get_quality (symbol); GST_DEBUG_OBJECT (sink, "decoded %s symbol \"%s\" at quality %d", zbar_get_symbol_name (typ), data, quality); if (sink->cache && zbar_symbol_get_count (symbol) != 0) continue; if (sink->data_consumer) sink->data_consumer (data); if (sink->message) { GstMessage *m; GstStructure *s; GstSample *sample; GstCaps *sample_caps; s = gst_structure_new ("barcode", "timestamp", G_TYPE_UINT64, GST_BUFFER_TIMESTAMP (frame->buffer), "type", G_TYPE_STRING, zbar_get_symbol_name (typ), "symbol", G_TYPE_STRING, data, "quality", G_TYPE_INT, quality, NULL); if (sink->attach_frame) { /* create a sample from image */ sample_caps = gst_video_info_to_caps (&frame->info); sample = gst_sample_new (frame->buffer, sample_caps, NULL, NULL); gst_caps_unref (sample_caps); gst_structure_set (s, "frame", GST_TYPE_SAMPLE, sample, NULL); gst_sample_unref (sample); } m = gst_message_new_element (GST_OBJECT (sink), s); gst_element_post_message (GST_ELEMENT (sink), m); } else if (sink->attach_frame) GST_WARNING_OBJECT (sink, "attach-frame=true has no effect if message=false"); } out: /* clean up */ zbar_image_scanner_recycle_image (sink->scanner, image); zbar_image_destroy (image); return GST_FLOW_OK; }
static GstStructure* vlc_to_gst_fmt( const es_format_t *p_fmt ) { const video_format_t *p_vfmt = &p_fmt->video; GstStructure *p_str = NULL; switch( p_fmt->i_codec ){ case VLC_CODEC_H264: p_str = gst_structure_new_empty( "video/x-h264" ); gst_structure_set( p_str, "alignment", G_TYPE_STRING, "au", NULL ); if( p_fmt->i_extra ) gst_structure_set( p_str, "stream-format", G_TYPE_STRING, "avc", NULL ); else gst_structure_set( p_str, "stream-format", G_TYPE_STRING, "byte-stream", NULL ); break; case VLC_CODEC_MP4V: p_str = gst_structure_new_empty( "video/mpeg" ); gst_structure_set( p_str, "mpegversion", G_TYPE_INT, 4, "systemstream", G_TYPE_BOOLEAN, FALSE, NULL ); break; case VLC_CODEC_VP8: p_str = gst_structure_new_empty( "video/x-vp8" ); break; case VLC_CODEC_MPGV: p_str = gst_structure_new_empty( "video/mpeg" ); gst_structure_set( p_str, "mpegversion", G_TYPE_INT, 2, "systemstream", G_TYPE_BOOLEAN, FALSE, NULL ); break; case VLC_CODEC_FLV1: p_str = gst_structure_new_empty( "video/x-flash-video" ); gst_structure_set( p_str, "flvversion", G_TYPE_INT, 1, NULL ); break; case VLC_CODEC_WMV1: p_str = gst_structure_new_empty( "video/x-wmv" ); gst_structure_set( p_str, "wmvversion", G_TYPE_INT, 1, "format", G_TYPE_STRING, "WMV1", NULL ); break; case VLC_CODEC_WMV2: p_str = gst_structure_new_empty( "video/x-wmv" ); gst_structure_set( p_str, "wmvversion", G_TYPE_INT, 2, "format", G_TYPE_STRING, "WMV2", NULL ); break; case VLC_CODEC_WMV3: p_str = gst_structure_new_empty( "video/x-wmv" ); gst_structure_set( p_str, "wmvversion", G_TYPE_INT, 3, "format", G_TYPE_STRING, "WMV3", NULL ); break; case VLC_CODEC_VC1: p_str = gst_structure_new_empty( "video/x-wmv" ); gst_structure_set( p_str, "wmvversion", G_TYPE_INT, 3, "format", G_TYPE_STRING, "WVC1", NULL ); break; default: /* unsupported codec */ return NULL; } if( p_vfmt->i_width && p_vfmt->i_height ) gst_structure_set( p_str, "width", G_TYPE_INT, p_vfmt->i_width, "height", G_TYPE_INT, p_vfmt->i_height, NULL ); if( p_vfmt->i_frame_rate && p_vfmt->i_frame_rate_base ) gst_structure_set( p_str, "framerate", GST_TYPE_FRACTION, p_vfmt->i_frame_rate, p_vfmt->i_frame_rate_base, NULL ); if( p_vfmt->i_sar_num && p_vfmt->i_sar_den ) gst_structure_set( p_str, "pixel-aspect-ratio", GST_TYPE_FRACTION, p_vfmt->i_sar_num, p_vfmt->i_sar_den, NULL ); if( p_fmt->i_extra ) { GstBuffer *p_buf; p_buf = gst_buffer_new_wrapped_full( GST_MEMORY_FLAG_READONLY, p_fmt->p_extra, p_fmt->i_extra, 0, p_fmt->i_extra, NULL, NULL ); if( p_buf == NULL ) { gst_structure_free( p_str ); return NULL; } gst_structure_set( p_str, "codec_data", GST_TYPE_BUFFER, p_buf, NULL ); gst_buffer_unref( p_buf ); } return p_str; }
static GstCaps * gst_video_rate_transform_caps (GstBaseTransform * trans, GstPadDirection direction, GstCaps * caps, GstCaps * filter) { GstVideoRate *videorate = GST_VIDEO_RATE (trans); GstCaps *ret; GstStructure *s, *s1, *s2, *s3 = NULL; int maxrate = g_atomic_int_get (&videorate->max_rate); gint i; ret = gst_caps_new_empty (); for (i = 0; i < gst_caps_get_size (caps); i++) { s = gst_caps_get_structure (caps, i); s1 = gst_structure_copy (s); s2 = gst_structure_copy (s); if (videorate->drop_only) { gint min_num = 0, min_denom = 1; gint max_num = G_MAXINT, max_denom = 1; /* Clamp the caps to our maximum rate as the first caps if possible */ if (!gst_video_max_rate_clamp_structure (s1, maxrate, &min_num, &min_denom, &max_num, &max_denom)) { min_num = 0; min_denom = 1; max_num = maxrate; max_denom = 1; /* clamp wouldn't be a real subset of 1..maxrate, in this case the sink * caps should become [1..maxrate], [1..maxint] and the src caps just * [1..maxrate]. In case there was a caps incompatibility things will * explode later as appropriate :) * * In case [X..maxrate] == [X..maxint], skip as we'll set it later */ if (direction == GST_PAD_SRC && maxrate != G_MAXINT) gst_structure_set (s1, "framerate", GST_TYPE_FRACTION_RANGE, min_num, min_denom, maxrate, 1, NULL); else { gst_structure_free (s1); s1 = NULL; } } if (direction == GST_PAD_SRC) { /* We can accept anything as long as it's at least the minimal framerate * the the sink needs */ gst_structure_set (s2, "framerate", GST_TYPE_FRACTION_RANGE, min_num, min_denom, G_MAXINT, 1, NULL); /* Also allow unknown framerate, if it isn't already */ if (min_num != 0 || min_denom != 1) { s3 = gst_structure_copy (s); gst_structure_set (s3, "framerate", GST_TYPE_FRACTION, 0, 1, NULL); } } else if (max_num != 0 || max_denom != 1) { /* We can provide everything upto the maximum framerate at the src */ gst_structure_set (s2, "framerate", GST_TYPE_FRACTION_RANGE, 0, 1, max_num, max_denom, NULL); } } else if (direction == GST_PAD_SINK) { gint min_num = 0, min_denom = 1; gint max_num = G_MAXINT, max_denom = 1; if (!gst_video_max_rate_clamp_structure (s1, maxrate, &min_num, &min_denom, &max_num, &max_denom)) { gst_structure_free (s1); s1 = NULL; } gst_structure_set (s2, "framerate", GST_TYPE_FRACTION_RANGE, 0, 1, maxrate, 1, NULL); } else { /* set the framerate as a range */ gst_structure_set (s2, "framerate", GST_TYPE_FRACTION_RANGE, 0, 1, G_MAXINT, 1, NULL); } if (s1 != NULL) ret = gst_caps_merge_structure (ret, s1); ret = gst_caps_merge_structure (ret, s2); if (s3 != NULL) ret = gst_caps_merge_structure (ret, s3); } if (filter) { GstCaps *intersection; intersection = gst_caps_intersect_full (filter, ret, GST_CAPS_INTERSECT_FIRST); gst_caps_unref (ret); ret = intersection; } return ret; }