static GstCaps * gst_dshowvideosrc_src_fixate (GstBaseSrc * bsrc, GstCaps * caps) { /* If there is no desired video size, set default video size to device preffered video size */ GstDshowVideoSrc *src = GST_DSHOWVIDEOSRC (bsrc); GstStructure *structure = gst_caps_get_structure (caps, 0); guint i = 0; gint res = -1; for (; i < gst_caps_get_size (src->caps) && res == -1; i++) { GstCaps *capstmp = gst_caps_copy_nth (src->caps, i); if (gst_caps_is_subset (caps, capstmp)) { res = i; } gst_caps_unref (capstmp); } if (res != -1) { GList *type_pin_mediatype = g_list_nth (src->pins_mediatypes, res); if (type_pin_mediatype) { GstCapturePinMediaType *pin_mediatype = (GstCapturePinMediaType *) type_pin_mediatype->data; gst_structure_fixate_field_nearest_int (structure, "width", pin_mediatype->defaultWidth); gst_structure_fixate_field_nearest_int (structure, "height", pin_mediatype->defaultHeight); gst_structure_fixate_field_nearest_fraction (structure, "framerate", pin_mediatype->defaultFPS, 1); } } return GST_BASE_SRC_CLASS (gst_dshowvideosrc_parent_class)->fixate (bsrc, caps); }
static GstStateChangeReturn gst_smart_encoder_find_elements (GstSmartEncoder * smart_encoder) { guint i, n; GstCaps *tmpl, *st, *res; GstElementFactory *dec, *enc; GstStateChangeReturn ret = GST_STATE_CHANGE_SUCCESS; if (G_UNLIKELY (smart_encoder->available_caps)) goto beach; /* Iterate over all pad template caps and see if we have both an * encoder and a decoder for those media types */ tmpl = gst_static_pad_template_get_caps (&src_template); res = gst_caps_new_empty (); n = gst_caps_get_size (tmpl); for (i = 0; i < n; i++) { st = gst_caps_copy_nth (tmpl, i); GST_DEBUG_OBJECT (smart_encoder, "Checking for available decoder and encoder for %" GST_PTR_FORMAT, st); if (!(dec = get_decoder_factory (st))) { gst_caps_unref (st); continue; } gst_object_unref (dec); if (!(enc = get_encoder_factory (st))) { gst_caps_unref (st); continue; } gst_object_unref (enc); GST_DEBUG_OBJECT (smart_encoder, "OK"); gst_caps_append (res, st); } gst_caps_unref (tmpl); if (gst_caps_is_empty (res)) { gst_caps_unref (res); ret = GST_STATE_CHANGE_FAILURE; } else smart_encoder->available_caps = res; GST_DEBUG_OBJECT (smart_encoder, "Done, available_caps:%" GST_PTR_FORMAT, smart_encoder->available_caps); beach: return ret; }
static gboolean gst_vis_src_negotiate (GstVisual * visual) { GstCaps *othercaps, *target, *intersect; GstStructure *structure; GstCaps *caps; caps = gst_pad_get_caps (visual->srcpad); /* see what the peer can do */ othercaps = gst_pad_peer_get_caps (visual->srcpad); if (othercaps) { intersect = gst_caps_intersect (othercaps, caps); gst_caps_unref (othercaps); gst_caps_unref (caps); if (gst_caps_is_empty (intersect)) goto no_format; target = gst_caps_copy_nth (intersect, 0); gst_caps_unref (intersect); } else { /* need a copy, we'll be modifying it when fixating */ target = gst_caps_copy (caps); gst_caps_unref (caps); } /* fixate in case something is not fixed. This does nothing if the value is * already fixed. For video we always try to fixate to something like * 320x240x30 by convention. */ structure = gst_caps_get_structure (target, 0); gst_structure_fixate_field_nearest_int (structure, "width", 320); gst_structure_fixate_field_nearest_int (structure, "height", 240); gst_structure_fixate_field_nearest_fraction (structure, "framerate", 30, 1); gst_pad_set_caps (visual->srcpad, target); gst_caps_unref (target); return TRUE; /* ERRORS */ no_format: { GST_ELEMENT_ERROR (visual, STREAM, FORMAT, (NULL), ("could not negotiate output format")); gst_caps_unref (intersect); return FALSE; } }
static gboolean gst_monoscope_src_negotiate (GstMonoscope * monoscope) { GstCaps *othercaps, *target, *intersect; GstStructure *structure; const GstCaps *templ; templ = gst_pad_get_pad_template_caps (monoscope->srcpad); GST_DEBUG_OBJECT (monoscope, "performing negotiation"); /* see what the peer can do */ othercaps = gst_pad_peer_get_caps (monoscope->srcpad); if (othercaps) { intersect = gst_caps_intersect (othercaps, templ); gst_caps_unref (othercaps); if (gst_caps_is_empty (intersect)) goto no_format; target = gst_caps_copy_nth (intersect, 0); gst_caps_unref (intersect); } else { target = gst_caps_ref ((GstCaps *) templ); } structure = gst_caps_get_structure (target, 0); gst_structure_fixate_field_nearest_int (structure, "width", 320); gst_structure_fixate_field_nearest_int (structure, "height", 240); gst_structure_fixate_field_nearest_fraction (structure, "framerate", 25, 1); gst_pad_set_caps (monoscope->srcpad, target); gst_caps_unref (target); return TRUE; no_format: { gst_caps_unref (intersect); return FALSE; } }
static GstFlowReturn gst_jasper_dec_negotiate (GstJasperDec * dec, jas_image_t * image) { GstFlowReturn flow_ret = GST_FLOW_OK; gint width, height, channels; gint i, j; gboolean negotiate = FALSE; jas_clrspc_t clrspc; GstCaps *allowed_caps, *caps; width = jas_image_width (image); height = jas_image_height (image); channels = jas_image_numcmpts (image); GST_LOG_OBJECT (dec, "%d x %d, %d components", width, height, channels); /* jp2c bitstream has no real colour space info (kept in container), * so decoder may only pretend to know, where it really does not */ if (!jas_clrspc_isunknown (dec->clrspc)) { clrspc = dec->clrspc; GST_DEBUG_OBJECT (dec, "forcing container supplied colour space %d", clrspc); jas_image_setclrspc (image, clrspc); } else clrspc = jas_image_clrspc (image); if (!width || !height || !channels || jas_clrspc_isunknown (clrspc)) goto fail_image; if (dec->width != width || dec->height != height || dec->channels != channels || dec->clrspc != clrspc) negotiate = TRUE; if (channels != 3) goto not_supported; for (i = 0; i < channels; i++) { gint cheight, cwidth, depth, sgnd; cheight = jas_image_cmptheight (image, i); cwidth = jas_image_cmptwidth (image, i); depth = jas_image_cmptprec (image, i); sgnd = jas_image_cmptsgnd (image, i); GST_LOG_OBJECT (dec, "image component %d, %dx%d, depth %d, sgnd %d", i, cwidth, cheight, depth, sgnd); if (depth != 8 || sgnd) goto not_supported; if (dec->cheight[i] != cheight || dec->cwidth[i] != cwidth) { dec->cheight[i] = cheight; dec->cwidth[i] = cwidth; negotiate = TRUE; } } if (!negotiate && dec->format != GST_VIDEO_FORMAT_UNKNOWN) goto done; /* clear and refresh to new state */ flow_ret = GST_FLOW_NOT_NEGOTIATED; dec->format = GST_VIDEO_FORMAT_UNKNOWN; dec->width = width; dec->height = height; dec->channels = channels; /* retrieve allowed caps, and find the first one that reasonably maps * to the parameters of the colourspace */ caps = gst_pad_get_allowed_caps (dec->srcpad); if (!caps) { GST_DEBUG_OBJECT (dec, "... but no peer, using template caps"); /* need to copy because get_allowed_caps returns a ref, and get_pad_template_caps doesn't */ caps = gst_caps_copy (gst_pad_get_pad_template_caps (dec->srcpad)); } /* avoid lists of fourcc, etc */ allowed_caps = gst_caps_normalize (caps); caps = NULL; GST_LOG_OBJECT (dec, "allowed source caps %" GST_PTR_FORMAT, allowed_caps); for (i = 0; i < gst_caps_get_size (allowed_caps); i++) { GstVideoFormat format; gboolean ok; if (caps) gst_caps_unref (caps); caps = gst_caps_copy_nth (allowed_caps, i); /* sigh, ds and _parse_caps need fixed caps for parsing, fixate */ gst_pad_fixate_caps (dec->srcpad, caps); GST_LOG_OBJECT (dec, "checking caps %" GST_PTR_FORMAT, caps); if (!gst_video_format_parse_caps (caps, &format, NULL, NULL)) continue; if (gst_video_format_is_rgb (format) && jas_clrspc_fam (clrspc) == JAS_CLRSPC_FAM_RGB) { GST_DEBUG_OBJECT (dec, "trying RGB"); if ((dec->cmpt[0] = jas_image_getcmptbytype (image, JAS_IMAGE_CT_COLOR (JAS_CLRSPC_CHANIND_RGB_R))) < 0 || (dec->cmpt[1] = jas_image_getcmptbytype (image, JAS_IMAGE_CT_COLOR (JAS_CLRSPC_CHANIND_RGB_G))) < 0 || (dec->cmpt[2] = jas_image_getcmptbytype (image, JAS_IMAGE_CT_COLOR (JAS_CLRSPC_CHANIND_RGB_B))) < 0) { GST_DEBUG_OBJECT (dec, "missing RGB color component"); continue; } } else if (gst_video_format_is_yuv (format) && jas_clrspc_fam (clrspc) == JAS_CLRSPC_FAM_YCBCR) { GST_DEBUG_OBJECT (dec, "trying YUV"); if ((dec->cmpt[0] = jas_image_getcmptbytype (image, JAS_IMAGE_CT_COLOR (JAS_CLRSPC_CHANIND_YCBCR_Y))) < 0 || (dec->cmpt[1] = jas_image_getcmptbytype (image, JAS_IMAGE_CT_COLOR (JAS_CLRSPC_CHANIND_YCBCR_CB))) < 0 || (dec->cmpt[2] = jas_image_getcmptbytype (image, JAS_IMAGE_CT_COLOR (JAS_CLRSPC_CHANIND_YCBCR_CR))) < 0) { GST_DEBUG_OBJECT (dec, "missing YUV color component"); continue; } } else continue; /* match format with validity checks */ ok = TRUE; for (j = 0; j < channels; j++) { gint cmpt; cmpt = dec->cmpt[j]; if (dec->cwidth[cmpt] != gst_video_format_get_component_width (format, j, width) || dec->cheight[cmpt] != gst_video_format_get_component_height (format, j, height)) ok = FALSE; } /* commit to this format */ if (ok) { dec->format = format; break; } } if (caps) gst_caps_unref (caps); gst_caps_unref (allowed_caps); if (dec->format != GST_VIDEO_FORMAT_UNKNOWN) { /* cache some video format properties */ for (j = 0; j < channels; ++j) { dec->offset[j] = gst_video_format_get_component_offset (dec->format, j, dec->width, dec->height); dec->inc[j] = gst_video_format_get_pixel_stride (dec->format, j); dec->stride[j] = gst_video_format_get_row_stride (dec->format, j, dec->width); } dec->image_size = gst_video_format_get_size (dec->format, width, height); dec->alpha = gst_video_format_has_alpha (dec->format); if (dec->buf) g_free (dec->buf); dec->buf = g_new0 (glong, dec->width); caps = gst_video_format_new_caps (dec->format, dec->width, dec->height, dec->framerate_numerator, dec->framerate_denominator, 1, 1); GST_DEBUG_OBJECT (dec, "Set format to %d, size to %dx%d", dec->format, dec->width, dec->height); if (!gst_pad_set_caps (dec->srcpad, caps)) flow_ret = GST_FLOW_NOT_NEGOTIATED; else flow_ret = GST_FLOW_OK; gst_caps_unref (caps); } done: return flow_ret; /* ERRORS */ fail_image: { GST_DEBUG_OBJECT (dec, "Failed to process decoded image."); flow_ret = GST_FLOW_NOT_NEGOTIATED; goto done; } not_supported: { GST_DEBUG_OBJECT (dec, "Decoded image has unsupported colour space."); GST_ELEMENT_ERROR (dec, STREAM, DECODE, (NULL), ("Unsupported colorspace")); flow_ret = GST_FLOW_ERROR; goto done; } }
static gboolean gst_dshowvideosrc_set_caps (GstBaseSrc * bsrc, GstCaps * caps) { HRESULT hres; IPin *input_pin = NULL; IPin *output_pin = NULL; GstDshowVideoSrc *src = GST_DSHOWVIDEOSRC (bsrc); GstStructure *s = gst_caps_get_structure (caps, 0); OAFilterState ds_graph_state; GstCaps *current_caps; /* search the negociated caps in our caps list to get its index and the corresponding mediatype */ if (gst_caps_is_subset (caps, src->caps)) { guint i = 0; gint res = -1; hres = src->media_control->GetState(0, &ds_graph_state); if(ds_graph_state == State_Running) { GST_INFO("Setting caps while DirectShow graph is already running"); current_caps = gst_pad_get_current_caps(GST_BASE_SRC_PAD(src)); if(gst_caps_is_equal(current_caps, caps)) { /* no need to set caps, just return */ GST_INFO("Not resetting caps"); gst_caps_unref(current_caps); return TRUE; } else { /* stop graph and disconnect filters so new caps can be set */ GST_INFO("Different caps, stopping DirectShow graph"); hres = src->media_control->Stop(); hres = src->media_control->GetState(2000, &ds_graph_state); if(hres != S_OK) { GST_ERROR("Could not stop DirectShow graph. Cannot renegoiate pins."); goto error; } gst_dshow_get_pin_from_filter (src->dshow_fakesink, PINDIR_INPUT, &input_pin); if (!input_pin) { input_pin->Release(); GST_ERROR ("Can't get input pin from our dshow fakesink"); goto error; } input_pin->ConnectedTo(&output_pin); hres = input_pin->Disconnect(); hres = output_pin->Disconnect(); input_pin->Release(); output_pin->Release(); } gst_caps_unref(current_caps); } for (; i < gst_caps_get_size (src->caps) && res == -1; i++) { GstCaps *capstmp = gst_caps_copy_nth (src->caps, i); if (gst_caps_is_subset (caps, capstmp)) { res = i; } gst_caps_unref (capstmp); } if (res != -1 && src->pins_mediatypes) { /* get the corresponding media type and build the dshow graph */ GList *type_pin_mediatype = g_list_nth (src->pins_mediatypes, res); if (type_pin_mediatype) { GstCapturePinMediaType *pin_mediatype = (GstCapturePinMediaType *) type_pin_mediatype->data; gchar *src_caps_string = NULL; const gchar *format_string = NULL; /* retrieve the desired video size */ VIDEOINFOHEADER *video_info = NULL; gint width = 0; gint height = 0; gint numerator = 0; gint denominator = 0; gst_structure_get_int (s, "width", &width); gst_structure_get_int (s, "height", &height); gst_structure_get_fraction (s, "framerate", &numerator, &denominator); /* check if the desired video size is valid about granularity */ /* This check will be removed when GST_TYPE_INT_RANGE_STEP exits */ /* See remarks in gst_dshow_new_video_caps function */ if (pin_mediatype->granularityWidth != 0 && width % pin_mediatype->granularityWidth != 0) g_warning ("your desired video size is not valid : %d mod %d !=0\n", width, pin_mediatype->granularityWidth); if (pin_mediatype->granularityHeight != 0 && height % pin_mediatype->granularityHeight != 0) g_warning ("your desired video size is not valid : %d mod %d !=0\n", height, pin_mediatype->granularityHeight); /* update mediatype */ video_info = (VIDEOINFOHEADER *) pin_mediatype->mediatype->pbFormat; video_info->bmiHeader.biWidth = width; video_info->bmiHeader.biHeight = height; video_info->AvgTimePerFrame = (LONGLONG) (10000000 * denominator / (double) numerator); video_info->bmiHeader.biSizeImage = DIBSIZE (video_info->bmiHeader); pin_mediatype->mediatype->lSampleSize = DIBSIZE (video_info->bmiHeader); src->dshow_fakesink->gst_set_media_type (pin_mediatype->mediatype); src->dshow_fakesink->gst_set_buffer_callback ( (push_buffer_func) gst_dshowvideosrc_push_buffer, src); gst_dshow_get_pin_from_filter (src->dshow_fakesink, PINDIR_INPUT, &input_pin); if (!input_pin) { GST_ERROR ("Can't get input pin from our dshow fakesink"); goto error; } hres = src->filter_graph->ConnectDirect (pin_mediatype->capture_pin, input_pin, pin_mediatype->mediatype); input_pin->Release (); if (hres != S_OK) { GST_ERROR ("Can't connect capture filter with fakesink filter (error=0x%x)", hres); goto error; } /* save width and height negociated */ gst_structure_get_int (s, "width", &src->width); gst_structure_get_int (s, "height", &src->height); src->is_rgb = FALSE; format_string = gst_structure_get_string (s, "format"); if(format_string) { if(!strcmp(format_string, "BGR")) { src->is_rgb = TRUE; } else { src->is_rgb = FALSE; } } hres = src->media_control->Run(); hres = src->media_control->GetState(5000, &ds_graph_state); if(hres != S_OK || ds_graph_state != State_Running) { GST_ERROR("Could not run graph"); goto error; } } } } return TRUE; error: return FALSE; }
static gboolean gst_dshowaudiosrc_prepare (GstAudioSrc * asrc, GstAudioRingBufferSpec * spec) { HRESULT hres; IPin *input_pin = NULL; GstDshowAudioSrc *src = GST_DSHOWAUDIOSRC (asrc); GstCaps *current_caps = gst_pad_get_current_caps (GST_BASE_SRC_PAD (asrc)); if (current_caps) { if (gst_caps_is_equal (spec->caps, current_caps)) { gst_caps_unref (current_caps); return TRUE; } gst_caps_unref (current_caps); } /* In 1.0, prepare() seems to be called in the PLAYING state. Most of the time you can't do much on a running graph. */ gboolean was_running = src->is_running; if (was_running) { HRESULT hres = src->media_filter->Stop (); if (hres != S_OK) { GST_ERROR("Can't STOP the directshow capture graph for preparing (error=0x%x)", hres); return FALSE; } src->is_running = FALSE; } /* search the negociated caps in our caps list to get its index and the corresponding mediatype */ if (gst_caps_is_subset (spec->caps, src->caps)) { guint i = 0; gint res = -1; for (; i < gst_caps_get_size (src->caps) && res == -1; i++) { GstCaps *capstmp = gst_caps_copy_nth (src->caps, i); if (gst_caps_is_subset (spec->caps, capstmp)) { res = i; } gst_caps_unref (capstmp); } if (res != -1 && src->pins_mediatypes) { /*get the corresponding media type and build the dshow graph */ GstCapturePinMediaType *pin_mediatype = NULL; GList *type = g_list_nth (src->pins_mediatypes, res); if (type) { pin_mediatype = (GstCapturePinMediaType *) type->data; src->dshow_fakesink->gst_set_media_type (pin_mediatype->mediatype); src->dshow_fakesink->gst_set_buffer_callback ( (push_buffer_func) gst_dshowaudiosrc_push_buffer, src); gst_dshow_get_pin_from_filter (src->dshow_fakesink, PINDIR_INPUT, &input_pin); if (!input_pin) { GST_ERROR ("Can't get input pin from our directshow fakesink filter"); goto error; } spec->segsize = (gint) (spec->info.bpf * spec->info.rate * spec->latency_time / GST_MSECOND); spec->segtotal = (gint) ((gfloat) spec->buffer_time / (gfloat) spec->latency_time + 0.5); if (!gst_dshow_configure_latency (pin_mediatype->capture_pin, spec->segsize)) { GST_WARNING ("Could not change capture latency"); spec->segsize = spec->info.rate * spec->info.channels; spec->segtotal = 2; }; GST_INFO ("Configuring with segsize:%d segtotal:%d", spec->segsize, spec->segtotal); if (gst_dshow_is_pin_connected (pin_mediatype->capture_pin)) { GST_DEBUG_OBJECT (src, "capture_pin already connected, disconnecting"); src->filter_graph->Disconnect (pin_mediatype->capture_pin); } if (gst_dshow_is_pin_connected (input_pin)) { GST_DEBUG_OBJECT (src, "input_pin already connected, disconnecting"); src->filter_graph->Disconnect (input_pin); } hres = src->filter_graph->ConnectDirect (pin_mediatype->capture_pin, input_pin, NULL); input_pin->Release (); if (hres != S_OK) { GST_ERROR ("Can't connect capture filter with fakesink filter (error=0x%x)", hres); goto error; } } } } if (was_running) { HRESULT hres = src->media_filter->Run (0); if (hres != S_OK) { GST_ERROR("Can't RUN the directshow capture graph after prepare (error=0x%x)", hres); return FALSE; } src->is_running = TRUE; } return TRUE; error: /* Don't restart the graph, we're out anyway. */ return FALSE; }
static gboolean gst_dshowaudiosrc_prepare (GstAudioSrc * asrc, GstRingBufferSpec * spec) { HRESULT hres; IGstDshowInterface *srcinterface = NULL; IPin *input_pin = NULL; GstDshowAudioSrc *src = GST_DSHOWAUDIOSRC (asrc); /* search the negociated caps in our caps list to get its index and the corresponding mediatype */ if (gst_caps_is_subset (spec->caps, src->caps)) { guint i = 0; gint res = -1; for (; i < gst_caps_get_size (src->caps) && res == -1; i++) { GstCaps *capstmp = gst_caps_copy_nth (src->caps, i); if (gst_caps_is_subset (spec->caps, capstmp)) { res = i; } gst_caps_unref (capstmp); } if (res != -1 && src->pins_mediatypes) { /*get the corresponding media type and build the dshow graph */ GstCapturePinMediaType *pin_mediatype = NULL; GList *type = g_list_nth (src->pins_mediatypes, res); if (type) { pin_mediatype = (GstCapturePinMediaType *) type->data; hres = IBaseFilter_QueryInterface (src->dshow_fakesink, &IID_IGstDshowInterface, (void **) &srcinterface); if (hres != S_OK || !srcinterface) { GST_CAT_ERROR (dshowaudiosrc_debug, "Can't get IGstDshowInterface interface from our dshow fakesink filter (error=%d)", hres); goto error; } IGstDshowInterface_gst_set_media_type (srcinterface, pin_mediatype->mediatype); IGstDshowInterface_gst_set_buffer_callback (srcinterface, (byte *) gst_dshowaudiosrc_push_buffer, (byte *) src); if (srcinterface) { IGstDshowInterface_Release (srcinterface); } gst_dshow_get_pin_from_filter (src->dshow_fakesink, PINDIR_INPUT, &input_pin); if (!input_pin) { GST_CAT_ERROR (dshowaudiosrc_debug, "Can't get input pin from our directshow fakesink filter"); goto error; } hres = IFilterGraph_ConnectDirect (src->filter_graph, pin_mediatype->capture_pin, input_pin, NULL); IPin_Release (input_pin); if (hres != S_OK) { GST_CAT_ERROR (dshowaudiosrc_debug, "Can't connect capture filter with fakesink filter (error=%d)", hres); goto error; } spec->segsize = spec->rate * spec->channels; spec->segtotal = 1; } } } return TRUE; error: if (srcinterface) { IGstDshowInterface_Release (srcinterface); } return FALSE; }
GstCaps* tcam_gst_find_largest_caps (const GstCaps* incoming) { /** * find_largest_caps tries to find the largest caps * according to the following rules: * * 1. determine the preferred format * prefer bayer 8-bit over everything else * if bayer 8-bit does not exist order according to the following list: * color formats like BGR * formats like MJPEG * GRAY16 * GRAY8 * bayer12/16 * * 2. find the largest resolution * 3. for the format with the largest resolution take the highest framerate */ std::vector<uint32_t> format_fourccs = index_format_fourccs(incoming); uint32_t preferred_fourcc = find_preferred_format(format_fourccs); if(!g_strcmp0(gst_caps_to_string(incoming), "EMPTY")) { return nullptr; } int largest_index = -1; int largest_width = -1; int largest_height = -1; for (guint i = 0; i < gst_caps_get_size(incoming); ++i) { GstStructure* struc = gst_caps_get_structure(incoming, i); const char* format = gst_structure_get_string(struc, "format"); uint32_t fourcc = tcam_fourcc_from_gst_1_0_caps_string(gst_structure_get_name(struc), format); // TODO: what about video/x-raw, format={GRAY8, GRAY16_LE} if (fourcc != preferred_fourcc) { continue; } int width = -1; int height = -1; bool new_width = false; bool new_height = false; // will fail if width is a range so we only handle // halfway fixated caps if (gst_structure_get_int(struc, "width", &width)) { if (largest_width <= width) { largest_width = width; new_width = true; } } else { tcam_warning("Field 'width' does not have the type 'int'"); } if (gst_structure_get_int(struc, "height", &height)) { if (largest_height <= height) { largest_height = height; new_height = true; } } else { tcam_warning("Field 'height' does not have the type 'int'"); } if (new_width && new_height) { largest_index = i; } } GstCaps* largest_caps = gst_caps_copy_nth(incoming, largest_index); tcam_info("Fixating assumed largest caps: %s", gst_caps_to_string(largest_caps)); if (!tcam_gst_fixate_caps(largest_caps)) { tcam_error("Cannot fixate largest caps. Returning NULL"); return nullptr; } GstStructure* s = gst_caps_get_structure(largest_caps, 0); int h; gst_structure_get_int(s, "height", &h); int w; gst_structure_get_int(s, "width", &w); int num; int den; gst_structure_get_fraction(s, "framerate", &num, &den); GValue vh = G_VALUE_INIT; g_value_init(&vh, G_TYPE_INT); g_value_set_int(&vh, h); gst_caps_set_value(largest_caps, "height", &vh); largest_caps = gst_caps_new_simple (gst_structure_get_name(s), "framerate", GST_TYPE_FRACTION, num, den, "width", G_TYPE_INT, w, "height", G_TYPE_INT, h, NULL); if (gst_structure_has_field(s, "format")) { gst_caps_set_value(largest_caps, "format", gst_structure_get_value(s, "format")); } return largest_caps; }
static GstFlowReturn gst_dtsdec_handle_frame (GstDtsDec * dts, guint8 * data, guint length, gint flags, gint sample_rate, gint bit_rate) { gboolean need_renegotiation = FALSE; gint channels, num_blocks; GstBuffer *out; gint i, s, c, num_c; sample_t *samples; GstFlowReturn result = GST_FLOW_OK; /* go over stream properties, update caps/streaminfo if needed */ if (dts->sample_rate != sample_rate) { need_renegotiation = TRUE; dts->sample_rate = sample_rate; } dts->stream_channels = flags; if (bit_rate != dts->bit_rate) { dts->bit_rate = bit_rate; gst_dtsdec_update_streaminfo (dts); } if (dts->request_channels == DTS_CHANNEL) { GstCaps *caps; caps = gst_pad_get_allowed_caps (dts->srcpad); if (caps && gst_caps_get_size (caps) > 0) { GstCaps *copy = gst_caps_copy_nth (caps, 0); GstStructure *structure = gst_caps_get_structure (copy, 0); gint channels; const int dts_channels[6] = { DTS_MONO, DTS_STEREO, DTS_STEREO | DTS_LFE, DTS_2F2R, DTS_2F2R | DTS_LFE, DTS_3F2R | DTS_LFE, }; /* Prefer the original number of channels, but fixate to something * preferred (first in the caps) downstream if possible. */ gst_structure_fixate_field_nearest_int (structure, "channels", flags ? gst_dtsdec_channels (flags, NULL) : 6); gst_structure_get_int (structure, "channels", &channels); if (channels <= 6) dts->request_channels = dts_channels[channels - 1]; else dts->request_channels = dts_channels[5]; gst_caps_unref (copy); } else if (flags) { dts->request_channels = dts->stream_channels; } else { dts->request_channels = DTS_3F2R | DTS_LFE; } if (caps) gst_caps_unref (caps); } /* process */ flags = dts->request_channels | DTS_ADJUST_LEVEL; dts->level = 1; if (dts_frame (dts->state, data, &flags, &dts->level, dts->bias)) { GST_WARNING ("dts_frame error"); return GST_FLOW_OK; } channels = flags & (DTS_CHANNEL_MASK | DTS_LFE); if (dts->using_channels != channels) { need_renegotiation = TRUE; dts->using_channels = channels; } if (need_renegotiation == TRUE) { GST_DEBUG ("dtsdec: sample_rate:%d stream_chans:0x%x using_chans:0x%x", dts->sample_rate, dts->stream_channels, dts->using_channels); if (!gst_dtsdec_renegotiate (dts)) { GST_ELEMENT_ERROR (dts, CORE, NEGOTIATION, (NULL), (NULL)); return GST_FLOW_ERROR; } } if (dts->dynamic_range_compression == FALSE) { dts_dynrng (dts->state, NULL, NULL); } /* handle decoded data, one block is 256 samples */ num_blocks = dts_blocks_num (dts->state); for (i = 0; i < num_blocks; i++) { if (dts_block (dts->state)) { GST_WARNING ("dts_block error %d", i); continue; } samples = dts_samples (dts->state); num_c = gst_dtsdec_channels (dts->using_channels, NULL); result = gst_pad_alloc_buffer_and_set_caps (dts->srcpad, 0, (SAMPLE_WIDTH / 8) * 256 * num_c, GST_PAD_CAPS (dts->srcpad), &out); if (result != GST_FLOW_OK) break; GST_BUFFER_TIMESTAMP (out) = dts->current_ts; GST_BUFFER_DURATION (out) = GST_SECOND * 256 / dts->sample_rate; dts->current_ts += GST_BUFFER_DURATION (out); /* libdts returns buffers in 256-sample-blocks per channel, * we want interleaved. And we need to copy anyway... */ data = GST_BUFFER_DATA (out); for (s = 0; s < 256; s++) { for (c = 0; c < num_c; c++) { *(sample_t *) data = samples[s + c * 256]; data += (SAMPLE_WIDTH / 8); } } /* push on */ result = gst_pad_push (dts->srcpad, out); if (result != GST_FLOW_OK) break; } return result; }
static gboolean gst_xvidenc_setcaps (GstPad * pad, GstCaps * vscaps) { GstXvidEnc *xvidenc; GstStructure *structure; gint w, h; const GValue *fps, *par; gint xvid_cs = -1; xvidenc = GST_XVIDENC (GST_PAD_PARENT (pad)); /* if there's something old around, remove it */ if (xvidenc->handle) { gst_xvidenc_flush_buffers (xvidenc, TRUE); xvid_encore (xvidenc->handle, XVID_ENC_DESTROY, NULL, NULL); xvidenc->handle = NULL; } structure = gst_caps_get_structure (vscaps, 0); if (!gst_structure_get_int (structure, "width", &w) || !gst_structure_get_int (structure, "height", &h)) { return FALSE; } fps = gst_structure_get_value (structure, "framerate"); if (fps == NULL || !GST_VALUE_HOLDS_FRACTION (fps)) { GST_WARNING_OBJECT (pad, "no framerate specified, or not a GstFraction"); return FALSE; } /* optional par info */ par = gst_structure_get_value (structure, "pixel-aspect-ratio"); xvid_cs = gst_xvid_structure_to_csp (structure); if (xvid_cs == -1) { gchar *sstr; sstr = gst_structure_to_string (structure); GST_DEBUG_OBJECT (xvidenc, "Did not find xvid colourspace for caps %s", sstr); g_free (sstr); return FALSE; } xvidenc->csp = xvid_cs; xvidenc->width = w; xvidenc->height = h; xvidenc->fbase = gst_value_get_fraction_numerator (fps); xvidenc->fincr = gst_value_get_fraction_denominator (fps); if ((par != NULL) && GST_VALUE_HOLDS_FRACTION (par)) { xvidenc->par_width = gst_value_get_fraction_numerator (par); xvidenc->par_height = gst_value_get_fraction_denominator (par); } else { xvidenc->par_width = 1; xvidenc->par_height = 1; } /* wipe xframe cache given possible change caps properties */ g_free (xvidenc->xframe_cache); xvidenc->xframe_cache = NULL; if (gst_xvidenc_setup (xvidenc)) { gboolean ret = FALSE; GstCaps *new_caps = NULL, *allowed_caps; /* please downstream with preferred caps */ allowed_caps = gst_pad_get_allowed_caps (xvidenc->srcpad); GST_DEBUG_OBJECT (xvidenc, "allowed caps: %" GST_PTR_FORMAT, allowed_caps); if (allowed_caps && !gst_caps_is_empty (allowed_caps)) { new_caps = gst_caps_copy_nth (allowed_caps, 0); } else { new_caps = gst_caps_new_simple ("video/x-xvid", NULL); } if (allowed_caps) gst_caps_unref (allowed_caps); gst_caps_set_simple (new_caps, "width", G_TYPE_INT, w, "height", G_TYPE_INT, h, "framerate", GST_TYPE_FRACTION, xvidenc->fbase, xvidenc->fincr, "pixel-aspect-ratio", GST_TYPE_FRACTION, xvidenc->par_width, xvidenc->par_height, NULL); /* just to be sure */ gst_pad_fixate_caps (xvidenc->srcpad, new_caps); if (xvidenc->used_profile != 0) { switch (xvidenc->used_profile) { case XVID_PROFILE_S_L0: gst_caps_set_simple (new_caps, "profile", G_TYPE_STRING, "simple", "level", G_TYPE_STRING, "0", NULL); break; case XVID_PROFILE_S_L1: gst_caps_set_simple (new_caps, "profile", G_TYPE_STRING, "simple", "level", G_TYPE_STRING, "1", NULL); break; case XVID_PROFILE_S_L2: gst_caps_set_simple (new_caps, "profile", G_TYPE_STRING, "simple", "level", G_TYPE_STRING, "2", NULL); break; case XVID_PROFILE_S_L3: gst_caps_set_simple (new_caps, "profile", G_TYPE_STRING, "simple", "level", G_TYPE_STRING, "3", NULL); break; /* case XVID_PROFILE_S_L4a: gst_caps_set_simple (new_caps, "profile", G_TYPE_STRING, "simple", "level", G_TYPE_STRING, "4a", NULL); break; case XVID_PROFILE_S_L5: gst_caps_set_simple (new_caps, "profile", G_TYPE_STRING, "simple", "level", G_TYPE_STRING, "5", NULL); break; case XVID_PROFILE_S_L6: gst_caps_set_simple (new_caps, "profile", G_TYPE_STRING, "simple", "level", G_TYPE_STRING, "6", NULL); break;*/ case XVID_PROFILE_ARTS_L1: gst_caps_set_simple (new_caps, "profile", G_TYPE_STRING, "advanced-real-time-simple", "level", G_TYPE_STRING, "1", NULL); break; case XVID_PROFILE_ARTS_L2: gst_caps_set_simple (new_caps, "profile", G_TYPE_STRING, "advanced-real-time-simple", "level", G_TYPE_STRING, "2", NULL); break; case XVID_PROFILE_ARTS_L3: gst_caps_set_simple (new_caps, "profile", G_TYPE_STRING, "advanced-real-time-simple", "level", G_TYPE_STRING, "3", NULL); break; case XVID_PROFILE_ARTS_L4: gst_caps_set_simple (new_caps, "profile", G_TYPE_STRING, "advanced-real-time-simple", "level", G_TYPE_STRING, "4", NULL); break; case XVID_PROFILE_AS_L0: gst_caps_set_simple (new_caps, "profile", G_TYPE_STRING, "advanced-simple", "level", G_TYPE_STRING, "0", NULL); break; case XVID_PROFILE_AS_L1: gst_caps_set_simple (new_caps, "profile", G_TYPE_STRING, "advanced-simple", "level", G_TYPE_STRING, "1", NULL); break; case XVID_PROFILE_AS_L2: gst_caps_set_simple (new_caps, "profile", G_TYPE_STRING, "advanced-simple", "level", G_TYPE_STRING, "2", NULL); break; case XVID_PROFILE_AS_L3: gst_caps_set_simple (new_caps, "profile", G_TYPE_STRING, "advanced-simple", "level", G_TYPE_STRING, "3", NULL); break; case XVID_PROFILE_AS_L4: gst_caps_set_simple (new_caps, "profile", G_TYPE_STRING, "advanced-simple", "level", G_TYPE_STRING, "4", NULL); break; default: g_assert_not_reached (); break; } } /* src pad should accept anyway */ ret = gst_pad_set_caps (xvidenc->srcpad, new_caps); gst_caps_unref (new_caps); if (!ret && xvidenc->handle) { xvid_encore (xvidenc->handle, XVID_ENC_DESTROY, NULL, NULL); xvidenc->handle = NULL; } return ret; } else /* setup did not work out */ return FALSE; }
/* compare output with ffmpegcolorspace */ static void colorspace_compare (gint width, gint height, gboolean comp) { GstBus *bus; GstElement *pipeline, *src, *filter1, *filter2, *csp, *fcsp, *fakesink; GstElement *queue1, *queue2, *tee, *compare; GstCaps *caps, *tcaps, *rcaps, *fcaps; GstCaps *ccaps; GstPad *pad; gint i, j; /* create elements */ pipeline = gst_pipeline_new ("pipeline"); src = gst_element_factory_make ("videotestsrc", "videotestsrc"); fail_unless (src != NULL); filter1 = gst_element_factory_make ("capsfilter", "capsfilter1"); fail_unless (filter1 != NULL); csp = gst_element_factory_make ("colorspace", "colorspace"); fail_unless (csp != NULL); filter2 = gst_element_factory_make ("capsfilter", "capsfilter2"); fail_unless (filter2 != NULL); if (comp) { fcsp = gst_element_factory_make ("ffmpegcolorspace", "ffmpegcolorspace"); fail_unless (fcsp != NULL); tee = gst_element_factory_make ("tee", "tee"); fail_unless (tee != NULL); queue1 = gst_element_factory_make ("queue", "queue1"); fail_unless (queue1 != NULL); queue2 = gst_element_factory_make ("queue", "queue2"); fail_unless (queue2 != NULL); compare = gst_element_factory_make ("compare", "compare"); fail_unless (compare != NULL); } else { fcsp = tee = queue1 = queue2 = compare = NULL; } fakesink = gst_element_factory_make ("fakesink", "fakesink"); fail_unless (fakesink != NULL); /* add and link */ gst_bin_add_many (GST_BIN (pipeline), src, filter1, filter2, csp, fakesink, tee, queue1, queue2, fcsp, compare, NULL); fail_unless (gst_element_link (src, filter1)); if (comp) { fail_unless (gst_element_link (filter1, tee)); fail_unless (gst_element_link (tee, queue1)); fail_unless (gst_element_link (queue1, fcsp)); fail_unless (gst_element_link_pads (fcsp, NULL, compare, "sink")); fail_unless (gst_element_link (tee, queue2)); fail_unless (gst_element_link (queue2, csp)); fail_unless (gst_element_link_pads (csp, NULL, compare, "check")); fail_unless (gst_element_link (compare, filter2)); } else { fail_unless (gst_element_link (filter1, csp)); fail_unless (gst_element_link (csp, filter2)); } fail_unless (gst_element_link (filter2, fakesink)); /* obtain possible caps combinations */ if (comp) { pad = gst_element_get_static_pad (fcsp, "sink"); fail_unless (pad != NULL); ccaps = gst_pad_get_pad_template_caps (pad); fail_unless (ccaps != NULL); fcaps = ccaps; gst_object_unref (pad); } else { fcaps = gst_caps_new_any (); } pad = gst_element_get_static_pad (csp, "sink"); fail_unless (pad != NULL); ccaps = gst_pad_get_pad_template_caps (pad); fail_unless (ccaps != NULL); gst_object_unref (pad); /* handle videotestsrc limitations */ pad = gst_element_get_static_pad (src, "src"); fail_unless (pad != NULL); caps = (GstCaps *) gst_pad_get_pad_template_caps (pad); fail_unless (caps != NULL); gst_object_unref (pad); rcaps = gst_caps_new_simple ("video/x-raw-yuv", "width", G_TYPE_INT, width, "height", G_TYPE_INT, height, "framerate", GST_TYPE_FRACTION, 25, 1, "color-matrix", G_TYPE_STRING, "sdtv", "chroma-site", G_TYPE_STRING, "mpeg2", NULL); gst_caps_append (rcaps, gst_caps_new_simple ("video/x-raw-rgb", "width", G_TYPE_INT, width, "height", G_TYPE_INT, height, "framerate", GST_TYPE_FRACTION, 25, 1, "depth", G_TYPE_INT, 32, NULL)); /* FIXME also allow x-raw-gray if/when colorspace actually handles those */ /* limit to supported compare types */ if (comp) { gst_caps_append (rcaps, gst_caps_new_simple ("video/x-raw-rgb", "width", G_TYPE_INT, width, "height", G_TYPE_INT, height, "framerate", GST_TYPE_FRACTION, 25, 1, "depth", G_TYPE_INT, 24, NULL)); } tcaps = gst_caps_intersect (fcaps, ccaps); gst_caps_unref (fcaps); gst_caps_unref (ccaps); caps = gst_caps_intersect (tcaps, caps); gst_caps_unref (tcaps); tcaps = caps; caps = gst_caps_intersect (tcaps, rcaps); gst_caps_unref (tcaps); gst_caps_unref (rcaps); /* normalize to finally have a list of acceptable fixed formats */ caps = gst_caps_simplify (caps); caps = gst_caps_normalize (caps); /* set up for running stuff */ loop = g_main_loop_new (NULL, FALSE); bus = gst_element_get_bus (pipeline); gst_bus_add_signal_watch (bus); g_signal_connect (bus, "message::eos", (GCallback) message_cb, NULL); gst_object_unref (bus); g_object_set (src, "num-buffers", 5, NULL); if (comp) { /* set lower bound for ssim comparison, and allow slightly different caps */ g_object_set (compare, "method", 2, NULL); g_object_set (compare, "meta", 3, NULL); g_object_set (compare, "threshold", 0.90, NULL); g_object_set (compare, "upper", FALSE, NULL); } GST_INFO ("possible caps to check %d", gst_caps_get_size (caps)); /* loop over all input and output combinations */ for (i = 0; i < gst_caps_get_size (caps); i++) { for (j = 0; j < gst_caps_get_size (caps); j++) { GstCaps *in_caps, *out_caps; GstStructure *s; const gchar *fourcc; in_caps = gst_caps_copy_nth (caps, i); out_caps = gst_caps_copy_nth (caps, j); /* FIXME remove if videotestsrc and video format handle these properly */ s = gst_caps_get_structure (in_caps, 0); if ((fourcc = gst_structure_get_string (s, "format"))) { if (!strcmp (fourcc, "YUV9") || !strcmp (fourcc, "YVU9") || !strcmp (fourcc, "v216")) { gst_caps_unref (in_caps); gst_caps_unref (out_caps); continue; } } GST_INFO ("checking conversion from %" GST_PTR_FORMAT " (%d)" " to %" GST_PTR_FORMAT " (%d)", in_caps, i, out_caps, j); g_object_set (filter1, "caps", in_caps, NULL); g_object_set (filter2, "caps", out_caps, NULL); fail_unless (gst_element_set_state (pipeline, GST_STATE_PLAYING) != GST_STATE_CHANGE_FAILURE); g_main_loop_run (loop); fail_unless (gst_element_set_state (pipeline, GST_STATE_NULL) == GST_STATE_CHANGE_SUCCESS); gst_caps_unref (in_caps); gst_caps_unref (out_caps); } } gst_caps_unref (caps); gst_object_unref (pipeline); g_main_loop_unref (loop); }
int main (int argc, char *argv[]) { gst_init (&argc, &argv); GstElementFactory *factory = gst_element_factory_find("ffmpegcolorspace"); const GList *list = gst_element_factory_get_static_pad_templates(factory); while (NULL != list) { GstStaticPadTemplate *templ = (GstStaticPadTemplate *)list->data; // name g_print("+++ template name %s\n", templ->name_template); // direction g_print ("direction: "); switch (templ->direction) { case GST_PAD_UNKNOWN: g_print ("unknown\n"); break; case GST_PAD_SRC: g_print ("src\n"); break; case GST_PAD_SINK: g_print ("sink\n"); break; default: g_print ("this is a bug\n"); break; } // presence g_print ("presence: "); switch (templ->presence) { case GST_PAD_ALWAYS: g_print ("always\n"); break; case GST_PAD_SOMETIMES: g_print ("sometimes\n"); break; case GST_PAD_REQUEST: g_print ("request\n"); break; default: g_print ("this is a bug\n"); break; } // caps GstCaps *caps = gst_static_caps_get(&templ->static_caps); // copying for removing fields in struture GstCaps *copy = gst_caps_copy(caps); gst_caps_unref(caps); guint size = gst_caps_get_size(copy); guint i = 0; g_print("size %u\n", size); for (; i < size; i++) { GstStructure *structure = gst_caps_get_structure(copy, i); gst_structure_remove_fields(structure, "format", "width", "height", "framerate", NULL); GstCaps *copy_nth = gst_caps_copy_nth(copy, i); gchar *caps_str = gst_caps_to_string(copy_nth); g_print(" caps num %u is %s\n", i, caps_str); g_free(caps_str); gst_caps_unref(copy_nth); } gst_caps_unref(copy); list = g_list_next(list); } gst_object_unref(factory); gst_deinit(); // for memory testing return 0; }
static GstFlowReturn gst_a52dec_handle_frame (GstAudioDecoder * bdec, GstBuffer * buffer) { GstA52Dec *a52dec; gint channels, i; gboolean need_reneg = FALSE; gint chans; gint length = 0, flags, sample_rate, bit_rate; GstMapInfo map; GstFlowReturn result = GST_FLOW_OK; GstBuffer *outbuf; const gint num_blocks = 6; a52dec = GST_A52DEC (bdec); /* no fancy draining */ if (G_UNLIKELY (!buffer)) return GST_FLOW_OK; /* parsed stuff already, so this should work out fine */ gst_buffer_map (buffer, &map, GST_MAP_READ); g_assert (map.size >= 7); /* re-obtain some sync header info, * should be same as during _parse and could also be cached there, * but anyway ... */ bit_rate = a52dec->bit_rate; sample_rate = a52dec->sample_rate; flags = 0; length = a52_syncinfo (map.data, &flags, &sample_rate, &bit_rate); g_assert (length == map.size); /* update stream information, renegotiate or re-streaminfo if needed */ need_reneg = FALSE; if (a52dec->sample_rate != sample_rate) { GST_DEBUG_OBJECT (a52dec, "sample rate changed"); need_reneg = TRUE; a52dec->sample_rate = sample_rate; } if (flags) { if (a52dec->stream_channels != (flags & (A52_CHANNEL_MASK | A52_LFE))) { GST_DEBUG_OBJECT (a52dec, "stream channel flags changed, marking update"); a52dec->flag_update = TRUE; } a52dec->stream_channels = flags & (A52_CHANNEL_MASK | A52_LFE); } if (bit_rate != a52dec->bit_rate) { a52dec->bit_rate = bit_rate; gst_a52dec_update_streaminfo (a52dec); } /* If we haven't had an explicit number of channels chosen through properties * at this point, choose what to downmix to now, based on what the peer will * accept - this allows a52dec to do downmixing in preference to a * downstream element such as audioconvert. */ if (a52dec->request_channels != A52_CHANNEL) { flags = a52dec->request_channels; } else if (a52dec->flag_update) { GstCaps *caps; a52dec->flag_update = FALSE; caps = gst_pad_get_allowed_caps (GST_AUDIO_DECODER_SRC_PAD (a52dec)); if (caps && gst_caps_get_size (caps) > 0) { GstCaps *copy = gst_caps_copy_nth (caps, 0); GstStructure *structure = gst_caps_get_structure (copy, 0); gint orig_channels = flags ? gst_a52dec_channels (flags, NULL) : 6; gint fixed_channels = 0; const int a52_channels[6] = { A52_MONO, A52_STEREO, A52_STEREO | A52_LFE, A52_2F2R, A52_2F2R | A52_LFE, A52_3F2R | A52_LFE, }; /* Prefer the original number of channels, but fixate to something * preferred (first in the caps) downstream if possible. */ gst_structure_fixate_field_nearest_int (structure, "channels", orig_channels); if (gst_structure_get_int (structure, "channels", &fixed_channels) && fixed_channels <= 6) { if (fixed_channels < orig_channels) flags = a52_channels[fixed_channels - 1]; } else { flags = a52_channels[5]; } gst_caps_unref (copy); } else if (flags) flags = a52dec->stream_channels; else flags = A52_3F2R | A52_LFE; if (caps) gst_caps_unref (caps); } else { flags = a52dec->using_channels; } /* process */ flags |= A52_ADJUST_LEVEL; a52dec->level = 1; if (a52_frame (a52dec->state, map.data, &flags, &a52dec->level, a52dec->bias)) { gst_buffer_unmap (buffer, &map); GST_AUDIO_DECODER_ERROR (a52dec, 1, STREAM, DECODE, (NULL), ("a52_frame error"), result); goto exit; } gst_buffer_unmap (buffer, &map); channels = flags & (A52_CHANNEL_MASK | A52_LFE); if (a52dec->using_channels != channels) { need_reneg = TRUE; a52dec->using_channels = channels; } /* negotiate if required */ if (need_reneg) { GST_DEBUG_OBJECT (a52dec, "a52dec reneg: sample_rate:%d stream_chans:%d using_chans:%d", a52dec->sample_rate, a52dec->stream_channels, a52dec->using_channels); if (!gst_a52dec_reneg (a52dec)) goto failed_negotiation; } if (a52dec->dynamic_range_compression == FALSE) { a52_dynrng (a52dec->state, NULL, NULL); } flags &= (A52_CHANNEL_MASK | A52_LFE); chans = gst_a52dec_channels (flags, NULL); if (!chans) goto invalid_flags; /* handle decoded data; * each frame has 6 blocks, one block is 256 samples, ea */ outbuf = gst_buffer_new_and_alloc (256 * chans * (SAMPLE_WIDTH / 8) * num_blocks); gst_buffer_map (outbuf, &map, GST_MAP_WRITE); { guint8 *ptr = map.data; for (i = 0; i < num_blocks; i++) { if (a52_block (a52dec->state)) { /* also marks discont */ GST_AUDIO_DECODER_ERROR (a52dec, 1, STREAM, DECODE, (NULL), ("error decoding block %d", i), result); if (result != GST_FLOW_OK) { gst_buffer_unmap (outbuf, &map); goto exit; } } else { gint n, c; gint *reorder_map = a52dec->channel_reorder_map; for (n = 0; n < 256; n++) { for (c = 0; c < chans; c++) { ((sample_t *) ptr)[n * chans + reorder_map[c]] = a52dec->samples[c * 256 + n]; } } } ptr += 256 * chans * (SAMPLE_WIDTH / 8); } } gst_buffer_unmap (outbuf, &map); result = gst_audio_decoder_finish_frame (bdec, outbuf, 1); exit: return result; /* ERRORS */ failed_negotiation: { GST_ELEMENT_ERROR (a52dec, CORE, NEGOTIATION, (NULL), (NULL)); return GST_FLOW_ERROR; } invalid_flags: { GST_ELEMENT_ERROR (GST_ELEMENT (a52dec), STREAM, DECODE, (NULL), ("Invalid channel flags: %d", flags)); return GST_FLOW_ERROR; } }
static GstFlowReturn gst_dtsdec_handle_frame (GstAudioDecoder * bdec, GstBuffer * buffer) { GstDtsDec *dts; gint channels, i, num_blocks; gboolean need_renegotiation = FALSE; guint8 *data; gsize size; GstMapInfo map; gint chans; gint length = 0, flags, sample_rate, bit_rate, frame_length; GstFlowReturn result = GST_FLOW_OK; GstBuffer *outbuf; dts = GST_DTSDEC (bdec); /* no fancy draining */ if (G_UNLIKELY (!buffer)) return GST_FLOW_OK; /* parsed stuff already, so this should work out fine */ gst_buffer_map (buffer, &map, GST_MAP_READ); data = map.data; size = map.size; g_assert (size >= 7); bit_rate = dts->bit_rate; sample_rate = dts->sample_rate; flags = 0; length = dca_syncinfo (dts->state, data, &flags, &sample_rate, &bit_rate, &frame_length); g_assert (length == size); if (flags != dts->prev_flags) { dts->prev_flags = flags; dts->flag_update = TRUE; } /* go over stream properties, renegotiate or update streaminfo if needed */ if (dts->sample_rate != sample_rate) { need_renegotiation = TRUE; dts->sample_rate = sample_rate; } if (flags) { dts->stream_channels = flags & (DCA_CHANNEL_MASK | DCA_LFE); } if (bit_rate != dts->bit_rate) { dts->bit_rate = bit_rate; gst_dtsdec_update_streaminfo (dts); } /* If we haven't had an explicit number of channels chosen through properties * at this point, choose what to downmix to now, based on what the peer will * accept - this allows a52dec to do downmixing in preference to a * downstream element such as audioconvert. * FIXME: Add the property back in for forcing output channels. */ if (dts->request_channels != DCA_CHANNEL) { flags = dts->request_channels; } else if (dts->flag_update) { GstCaps *caps; dts->flag_update = FALSE; caps = gst_pad_get_allowed_caps (GST_AUDIO_DECODER_SRC_PAD (dts)); if (caps && gst_caps_get_size (caps) > 0) { GstCaps *copy = gst_caps_copy_nth (caps, 0); GstStructure *structure = gst_caps_get_structure (copy, 0); gint channels; const int dts_channels[6] = { DCA_MONO, DCA_STEREO, DCA_STEREO | DCA_LFE, DCA_2F2R, DCA_2F2R | DCA_LFE, DCA_3F2R | DCA_LFE, }; /* Prefer the original number of channels, but fixate to something * preferred (first in the caps) downstream if possible. */ gst_structure_fixate_field_nearest_int (structure, "channels", flags ? gst_dtsdec_channels (flags, NULL) : 6); gst_structure_get_int (structure, "channels", &channels); if (channels <= 6) flags = dts_channels[channels - 1]; else flags = dts_channels[5]; gst_caps_unref (copy); } else if (flags) { flags = dts->stream_channels; } else { flags = DCA_3F2R | DCA_LFE; } if (caps) gst_caps_unref (caps); } else { flags = dts->using_channels; } /* process */ flags |= DCA_ADJUST_LEVEL; dts->level = 1; if (dca_frame (dts->state, data, &flags, &dts->level, dts->bias)) { gst_buffer_unmap (buffer, &map); GST_AUDIO_DECODER_ERROR (dts, 1, STREAM, DECODE, (NULL), ("dts_frame error"), result); goto exit; } gst_buffer_unmap (buffer, &map); channels = flags & (DCA_CHANNEL_MASK | DCA_LFE); if (dts->using_channels != channels) { need_renegotiation = TRUE; dts->using_channels = channels; } /* negotiate if required */ if (need_renegotiation) { GST_DEBUG_OBJECT (dts, "dtsdec: sample_rate:%d stream_chans:0x%x using_chans:0x%x", dts->sample_rate, dts->stream_channels, dts->using_channels); if (!gst_dtsdec_renegotiate (dts)) goto failed_negotiation; } if (dts->dynamic_range_compression == FALSE) { dca_dynrng (dts->state, NULL, NULL); } flags &= (DCA_CHANNEL_MASK | DCA_LFE); chans = gst_dtsdec_channels (flags, NULL); if (!chans) goto invalid_flags; /* handle decoded data, one block is 256 samples */ num_blocks = dca_blocks_num (dts->state); outbuf = gst_buffer_new_and_alloc (256 * chans * (SAMPLE_WIDTH / 8) * num_blocks); gst_buffer_map (outbuf, &map, GST_MAP_WRITE); data = map.data; size = map.size; { guint8 *ptr = data; for (i = 0; i < num_blocks; i++) { if (dca_block (dts->state)) { /* also marks discont */ GST_AUDIO_DECODER_ERROR (dts, 1, STREAM, DECODE, (NULL), ("error decoding block %d", i), result); if (result != GST_FLOW_OK) goto exit; } else { gint n, c; gint *reorder_map = dts->channel_reorder_map; for (n = 0; n < 256; n++) { for (c = 0; c < chans; c++) { ((sample_t *) ptr)[n * chans + reorder_map[c]] = dts->samples[c * 256 + n]; } } } ptr += 256 * chans * (SAMPLE_WIDTH / 8); } } gst_buffer_unmap (outbuf, &map); result = gst_audio_decoder_finish_frame (bdec, outbuf, 1); exit: return result; /* ERRORS */ failed_negotiation: { GST_ELEMENT_ERROR (dts, CORE, NEGOTIATION, (NULL), (NULL)); return GST_FLOW_ERROR; } invalid_flags: { GST_ELEMENT_ERROR (GST_ELEMENT (dts), STREAM, DECODE, (NULL), ("Invalid channel flags: %d", flags)); return GST_FLOW_ERROR; } }
CapsPtr Caps::copyNth(uint index) const { return CapsPtr::wrap(gst_caps_copy_nth(object<GstCaps>(), index), false); }
/* * Get Caps * * As can be seen this method violates the API between the GST element * and the Android device. Should be fixed... (FIXME) * */ static GstCaps * gst_android_video_source_get_caps(GstBaseSrc * p_basesrc, GstCaps * p_filter) { int i; int minFps; int maxFps; int fmtPos; int minWidth, minHeight; int maxWidth, maxHeight; GstCaps *caps; GstCaps *capsVec = NULL; GA_LOGTRACE("ENTER %s --xx--> thread(%ld)", __FUNCTION__, pthread_self()); GstAndroidVideoSource *p_src = GST_ANDROIDVIDEOSOURCE(p_basesrc); if (GST_STATE(p_src) == GST_STATE_NULL || GST_STATE(p_src) <= GST_STATE_NULL) { GA_LOGINFO("%s: Called in state %s. Don't know device support yet. Will return NULL caps.", __FUNCTION__, gst_element_state_get_name(GST_STATE(p_src))); return NULL; } if (VCD_GetWidestFpsRange(p_src->m_devHandle, &minFps, &maxFps) != VCD_NO_ERROR) { return NULL; } if (VCD_NO_ERROR != VCD_GetMinResolution(p_src->m_devHandle, &minWidth, &minHeight)) { return NULL; } if (VCD_NO_ERROR != VCD_GetMaxResolution(p_src->m_devHandle, &maxWidth, &maxHeight)) { return NULL; } capsVec = gst_caps_new_empty(); for (fmtPos = 0; fmtPos < VCD_getMediaSupportFmtLen(p_src->m_devHandle); fmtPos++) { int fmt = VCD_getMediaSupportFmt(p_src->m_devHandle)[fmtPos]; GstVideoFormat gstVideoFmt = vcd_int_to_gst_video_format(fmt); if (gstVideoFmt != GST_VIDEO_FORMAT_UNKNOWN) { caps = gst_caps_new_simple( "video/x-raw", "format", G_TYPE_STRING, gst_video_format_to_string(gstVideoFmt), "width", GST_TYPE_INT_RANGE, minWidth, maxWidth, "height", GST_TYPE_INT_RANGE, minHeight, maxHeight, #ifdef ACCEPT_FPS_CAPS_DOWN_TO_1FPS "framerate", GST_TYPE_FRACTION_RANGE, 1000, ANDROID_FPS_DENOMINATOR, maxFps, ANDROID_FPS_DENOMINATOR, #else "framerate", GST_TYPE_FRACTION_RANGE, minFps, ANDROID_FPS_DENOMINATOR, maxFps, ANDROID_FPS_DENOMINATOR, #endif "pixel-aspect-ratio", GST_TYPE_FRACTION, 1, 1, NULL); gst_caps_append(capsVec, caps); } } // Some Android devices report one or more supported formats (or other stuff) // more than once, which gives caps duplicates. Those are removed by doing // gst_caps_do_simplify()... capsVec = gst_caps_simplify(capsVec); GA_LOGINFO("%s: By Android video device supported CAPS:", __FUNCTION__); GA_LOGINFO("%s:-----------------------------------------------------------", __FUNCTION__); for (i = 0; i < gst_caps_get_size(capsVec); i++) { // Android log cannot print that long messages so we need to take one caps at a time GstCaps *capsCopy = gst_caps_copy_nth(capsVec, i); GA_LOGINFO("CAPS%d: %s", i+1, gst_caps_to_string(capsCopy)); gst_caps_unref(capsCopy); } GA_LOGINFO("%s:-----------------------------------------------------------", __FUNCTION__); GA_LOGTRACE("EXIT %s", __FUNCTION__); return capsVec; }
static gboolean gst_dshowvideosrc_set_caps (GstBaseSrc * bsrc, GstCaps * caps) { HRESULT hres; IPin *input_pin = NULL; GstDshowVideoSrc *src = GST_DSHOWVIDEOSRC (bsrc); GstStructure *s = gst_caps_get_structure (caps, 0); GstCaps *current_caps = gst_pad_get_current_caps (GST_BASE_SRC_PAD (bsrc)); if (current_caps) { if (gst_caps_is_equal (caps, current_caps)) { gst_caps_unref (current_caps); return TRUE; } gst_caps_unref (current_caps); } /* Same remark as in gstdshowaudiosrc. */ gboolean was_running = src->is_running; if (was_running) { HRESULT hres = src->media_filter->Stop (); if (hres != S_OK) { GST_ERROR ("Can't STOP the directshow capture graph (error=0x%x)", hres); return FALSE; } src->is_running = FALSE; } /* search the negociated caps in our caps list to get its index and the corresponding mediatype */ if (gst_caps_is_subset (caps, src->caps)) { guint i = 0; gint res = -1; for (; i < gst_caps_get_size (src->caps) && res == -1; i++) { GstCaps *capstmp = gst_caps_copy_nth (src->caps, i); if (gst_caps_is_subset (caps, capstmp)) { res = i; } gst_caps_unref (capstmp); } if (res != -1 && src->pins_mediatypes) { /* get the corresponding media type and build the dshow graph */ GList *type_pin_mediatype = g_list_nth (src->pins_mediatypes, res); if (type_pin_mediatype) { GstCapturePinMediaType *pin_mediatype = (GstCapturePinMediaType *) type_pin_mediatype->data; gchar *caps_string = NULL; gchar *src_caps_string = NULL; /* retrieve the desired video size */ VIDEOINFOHEADER *video_info = NULL; gint width = 0; gint height = 0; gint numerator = 0; gint denominator = 0; gst_structure_get_int (s, "width", &width); gst_structure_get_int (s, "height", &height); gst_structure_get_fraction (s, "framerate", &numerator, &denominator); /* check if the desired video size is valid about granularity */ /* This check will be removed when GST_TYPE_INT_RANGE_STEP exits */ /* See remarks in gst_dshow_new_video_caps function */ if (pin_mediatype->granularityWidth != 0 && width % pin_mediatype->granularityWidth != 0) g_warning ("your desired video size is not valid : %d mod %d !=0\n", width, pin_mediatype->granularityWidth); if (pin_mediatype->granularityHeight != 0 && height % pin_mediatype->granularityHeight != 0) g_warning ("your desired video size is not valid : %d mod %d !=0\n", height, pin_mediatype->granularityHeight); /* update mediatype */ video_info = (VIDEOINFOHEADER *) pin_mediatype->mediatype->pbFormat; video_info->bmiHeader.biWidth = width; video_info->bmiHeader.biHeight = height; video_info->AvgTimePerFrame = (LONGLONG) (10000000 * denominator / (double) numerator); video_info->bmiHeader.biSizeImage = DIBSIZE (video_info->bmiHeader); pin_mediatype->mediatype->lSampleSize = DIBSIZE (video_info->bmiHeader); src->dshow_fakesink->gst_set_media_type (pin_mediatype->mediatype); src->dshow_fakesink->gst_set_buffer_callback ( (push_buffer_func) gst_dshowvideosrc_push_buffer, src); gst_dshow_get_pin_from_filter (src->dshow_fakesink, PINDIR_INPUT, &input_pin); if (!input_pin) { GST_ERROR ("Can't get input pin from our dshow fakesink"); goto error; } if (gst_dshow_is_pin_connected (pin_mediatype->capture_pin)) { GST_DEBUG_OBJECT (src, "capture_pin already connected, disconnecting"); src->filter_graph->Disconnect (pin_mediatype->capture_pin); } if (gst_dshow_is_pin_connected (input_pin)) { GST_DEBUG_OBJECT (src, "input_pin already connected, disconnecting"); src->filter_graph->Disconnect (input_pin); } hres = src->filter_graph->ConnectDirect (pin_mediatype->capture_pin, input_pin, pin_mediatype->mediatype); input_pin->Release (); if (hres != S_OK) { GST_ERROR ("Can't connect capture filter with fakesink filter (error=0x%x)", hres); goto error; } /* save width and height negociated */ gst_structure_get_int (s, "width", &src->width); gst_structure_get_int (s, "height", &src->height); GstVideoInfo info; gst_video_info_from_caps(&info, caps); switch (GST_VIDEO_INFO_FORMAT(&info)) { case GST_VIDEO_FORMAT_RGB: case GST_VIDEO_FORMAT_BGR: src->is_rgb = TRUE; break; default: src->is_rgb = FALSE; break; } } } } if (was_running) { HRESULT hres = src->media_filter->Run (0); if (hres != S_OK) { GST_ERROR ("Can't RUN the directshow capture graph (error=0x%x)", hres); return FALSE; } src->is_running = TRUE; } return TRUE; error: return FALSE; }
static gboolean gst_v4l2src_negotiate (GstBaseSrc * basesrc) { GstV4l2Src *v4l2src; GstV4l2Object *obj; GstCaps *thiscaps; GstCaps *caps = NULL; GstCaps *peercaps = NULL; gboolean result = FALSE; v4l2src = GST_V4L2SRC (basesrc); obj = v4l2src->v4l2object; /* We don't allow renegotiation, just return TRUE in that case */ if (GST_V4L2_IS_ACTIVE (obj)) return TRUE; /* first see what is possible on our source pad */ thiscaps = gst_pad_query_caps (GST_BASE_SRC_PAD (basesrc), NULL); GST_DEBUG_OBJECT (basesrc, "caps of src: %" GST_PTR_FORMAT, thiscaps); LOG_CAPS (basesrc, thiscaps); /* nothing or anything is allowed, we're done */ if (thiscaps == NULL || gst_caps_is_any (thiscaps)) goto no_nego_needed; /* get the peer caps without a filter as we'll filter ourselves later on */ peercaps = gst_pad_peer_query_caps (GST_BASE_SRC_PAD (basesrc), NULL); GST_DEBUG_OBJECT (basesrc, "caps of peer: %" GST_PTR_FORMAT, peercaps); LOG_CAPS (basesrc, peercaps); if (peercaps && !gst_caps_is_any (peercaps)) { GstCaps *icaps = NULL; int i; /* Prefer the first caps we are compatible with that the peer proposed */ for (i = 0; i < gst_caps_get_size (peercaps); i++) { /* get intersection */ GstCaps *ipcaps = gst_caps_copy_nth (peercaps, i); GST_DEBUG_OBJECT (basesrc, "peer: %" GST_PTR_FORMAT, ipcaps); LOG_CAPS (basesrc, ipcaps); icaps = gst_caps_intersect (thiscaps, ipcaps); gst_caps_unref (ipcaps); if (!gst_caps_is_empty (icaps)) break; gst_caps_unref (icaps); icaps = NULL; } GST_DEBUG_OBJECT (basesrc, "intersect: %" GST_PTR_FORMAT, icaps); LOG_CAPS (basesrc, icaps); if (icaps) { /* If there are multiple intersections pick the one with the smallest * resolution strictly bigger then the first peer caps */ if (gst_caps_get_size (icaps) > 1) { GstStructure *s = gst_caps_get_structure (peercaps, 0); int best = 0; int twidth, theight; int width = G_MAXINT, height = G_MAXINT; if (gst_structure_get_int (s, "width", &twidth) && gst_structure_get_int (s, "height", &theight)) { /* Walk the structure backwards to get the first entry of the * smallest resolution bigger (or equal to) the preferred resolution) */ for (i = gst_caps_get_size (icaps) - 1; i >= 0; i--) { GstStructure *is = gst_caps_get_structure (icaps, i); int w, h; if (gst_structure_get_int (is, "width", &w) && gst_structure_get_int (is, "height", &h)) { if (w >= twidth && w <= width && h >= theight && h <= height) { width = w; height = h; best = i; } } } } caps = gst_caps_copy_nth (icaps, best); gst_caps_unref (icaps); } else { caps = icaps; } } gst_caps_unref (thiscaps); } else { /* no peer or peer have ANY caps, work with our own caps then */ caps = thiscaps; } if (peercaps) gst_caps_unref (peercaps); if (caps) { caps = gst_caps_truncate (caps); /* now fixate */ if (!gst_caps_is_empty (caps)) { caps = gst_v4l2src_fixate (basesrc, caps); GST_DEBUG_OBJECT (basesrc, "fixated to: %" GST_PTR_FORMAT, caps); LOG_CAPS (basesrc, caps); if (gst_caps_is_any (caps)) { /* hmm, still anything, so element can do anything and * nego is not needed */ result = TRUE; } else if (gst_caps_is_fixed (caps)) { /* yay, fixed caps, use those then */ result = gst_base_src_set_caps (basesrc, caps); } } gst_caps_unref (caps); } return result; no_nego_needed: { GST_DEBUG_OBJECT (basesrc, "no negotiation needed"); if (thiscaps) gst_caps_unref (thiscaps); return TRUE; } }
static GstFlowReturn gst_a52dec_handle_frame (GstA52Dec * a52dec, guint8 * data, guint length, gint flags, gint sample_rate, gint bit_rate) { gint channels, i; gboolean need_reneg = FALSE; /* update stream information, renegotiate or re-streaminfo if needed */ need_reneg = FALSE; if (a52dec->sample_rate != sample_rate) { need_reneg = TRUE; a52dec->sample_rate = sample_rate; } if (flags) { a52dec->stream_channels = flags & (A52_CHANNEL_MASK | A52_LFE); } if (bit_rate != a52dec->bit_rate) { a52dec->bit_rate = bit_rate; gst_a52dec_update_streaminfo (a52dec); } /* If we haven't had an explicit number of channels chosen through properties * at this point, choose what to downmix to now, based on what the peer will * accept - this allows a52dec to do downmixing in preference to a * downstream element such as audioconvert. */ if (a52dec->request_channels != A52_CHANNEL) { flags = a52dec->request_channels; } else if (a52dec->flag_update) { GstCaps *caps; a52dec->flag_update = FALSE; caps = gst_pad_get_allowed_caps (a52dec->srcpad); if (caps && gst_caps_get_size (caps) > 0) { GstCaps *copy = gst_caps_copy_nth (caps, 0); GstStructure *structure = gst_caps_get_structure (copy, 0); gint channels; const int a52_channels[6] = { A52_MONO, A52_STEREO, A52_STEREO | A52_LFE, A52_2F2R, A52_2F2R | A52_LFE, A52_3F2R | A52_LFE, }; /* Prefer the original number of channels, but fixate to something * preferred (first in the caps) downstream if possible. */ gst_structure_fixate_field_nearest_int (structure, "channels", flags ? gst_a52dec_channels (flags, NULL) : 6); gst_structure_get_int (structure, "channels", &channels); if (channels <= 6) flags = a52_channels[channels - 1]; else flags = a52_channels[5]; gst_caps_unref (copy); } else if (flags) flags = a52dec->stream_channels; else flags = A52_3F2R | A52_LFE; if (caps) gst_caps_unref (caps); } else { flags = a52dec->using_channels; } /* process */ flags |= A52_ADJUST_LEVEL; a52dec->level = 1; if (a52_frame (a52dec->state, data, &flags, &a52dec->level, a52dec->bias)) { GST_WARNING ("a52_frame error"); a52dec->discont = TRUE; return GST_FLOW_OK; } channels = flags & (A52_CHANNEL_MASK | A52_LFE); if (a52dec->using_channels != channels) { need_reneg = TRUE; a52dec->using_channels = channels; } /* negotiate if required */ if (need_reneg) { GST_DEBUG ("a52dec reneg: sample_rate:%d stream_chans:%d using_chans:%d", a52dec->sample_rate, a52dec->stream_channels, a52dec->using_channels); if (!gst_a52dec_reneg (a52dec, a52dec->srcpad)) { GST_ELEMENT_ERROR (a52dec, CORE, NEGOTIATION, (NULL), (NULL)); return GST_FLOW_ERROR; } } if (a52dec->dynamic_range_compression == FALSE) { a52_dynrng (a52dec->state, NULL, NULL); } /* each frame consists of 6 blocks */ for (i = 0; i < 6; i++) { if (a52_block (a52dec->state)) { /* ignore errors but mark a discont */ GST_WARNING ("a52_block error %d", i); a52dec->discont = TRUE; } else { GstFlowReturn ret; /* push on */ ret = gst_a52dec_push (a52dec, a52dec->srcpad, a52dec->using_channels, a52dec->samples, a52dec->time); if (ret != GST_FLOW_OK) return ret; } a52dec->time += 256 * GST_SECOND / a52dec->sample_rate; } return GST_FLOW_OK; }