static void gst_ladspa_source_type_set_property (GObject * object, guint prop_id, const GValue * value, GParamSpec * pspec) { GstLADSPASource *ladspa = GST_LADSPA_SOURCE (object); switch (prop_id) { case GST_LADSPA_SOURCE_PROP_SAMPLES_PER_BUFFER: ladspa->samples_per_buffer = g_value_get_int (value); gst_base_src_set_blocksize (GST_BASE_SRC (ladspa), GST_AUDIO_INFO_BPF (&ladspa->info) * ladspa->samples_per_buffer); break; case GST_LADSPA_SOURCE_PROP_IS_LIVE: gst_base_src_set_live (GST_BASE_SRC (ladspa), g_value_get_boolean (value)); break; case GST_LADSPA_SOURCE_PROP_TIMESTAMP_OFFSET: ladspa->timestamp_offset = g_value_get_int64 (value); break; case GST_LADSPA_SOURCE_PROP_CAN_ACTIVATE_PUSH: GST_BASE_SRC (ladspa)->can_activate_push = g_value_get_boolean (value); break; case GST_LADSPA_SOURCE_PROP_CAN_ACTIVATE_PULL: ladspa->can_activate_pull = g_value_get_boolean (value); break; default: gst_ladspa_object_set_property (&ladspa->ladspa, object, prop_id, value, pspec); break; } }
static gboolean gst_audio_test_src_setcaps (GstBaseSrc * basesrc, GstCaps * caps) { GstAudioTestSrc *src = GST_AUDIO_TEST_SRC (basesrc); GstAudioInfo info; if (!gst_audio_info_from_caps (&info, caps)) goto invalid_caps; GST_DEBUG_OBJECT (src, "negotiated to caps %" GST_PTR_FORMAT, caps); src->info = info; gst_base_src_set_blocksize (basesrc, GST_AUDIO_INFO_BPF (&info) * src->samples_per_buffer); gst_audio_test_src_change_wave (src); return TRUE; /* ERROR */ invalid_caps: { GST_ERROR_OBJECT (basesrc, "received invalid caps"); return FALSE; } }
static gboolean gst_lv2_source_set_caps (GstBaseSrc * base, GstCaps * caps) { GstLV2Source *lv2 = (GstLV2Source *) base; GstAudioInfo info; if (!gst_audio_info_from_caps (&info, caps)) { GST_ERROR_OBJECT (base, "received invalid caps"); return FALSE; } GST_DEBUG_OBJECT (lv2, "negotiated to caps %" GST_PTR_FORMAT, caps); lv2->info = info; gst_base_src_set_blocksize (base, GST_AUDIO_INFO_BPF (&info) * lv2->samples_per_buffer); if (!gst_lv2_setup (&lv2->lv2, GST_AUDIO_INFO_RATE (&info))) goto no_instance; return TRUE; no_instance: { GST_ERROR_OBJECT (lv2, "could not create instance"); return FALSE; } }
/* GObject vmethods implementation */ static void gst_lv2_source_set_property (GObject * object, guint prop_id, const GValue * value, GParamSpec * pspec) { GstLV2Source *self = (GstLV2Source *) object; switch (prop_id) { case GST_LV2_SOURCE_PROP_SAMPLES_PER_BUFFER: self->samples_per_buffer = g_value_get_int (value); gst_base_src_set_blocksize (GST_BASE_SRC (self), GST_AUDIO_INFO_BPF (&self->info) * self->samples_per_buffer); break; case GST_LV2_SOURCE_PROP_IS_LIVE: gst_base_src_set_live (GST_BASE_SRC (self), g_value_get_boolean (value)); break; case GST_LV2_SOURCE_PROP_TIMESTAMP_OFFSET: self->timestamp_offset = g_value_get_int64 (value); break; case GST_LV2_SOURCE_PROP_CAN_ACTIVATE_PUSH: GST_BASE_SRC (self)->can_activate_push = g_value_get_boolean (value); break; case GST_LV2_SOURCE_PROP_CAN_ACTIVATE_PULL: self->can_activate_pull = g_value_get_boolean (value); break; default: gst_lv2_object_set_property (&self->lv2, object, prop_id, value, pspec); break; } }
gboolean gst_niimaqsrc_set_caps (GstBaseSrc * bsrc, GstCaps * caps) { GstNiImaqSrc *src = GST_NIIMAQSRC (bsrc); gboolean res = TRUE; int depth, ncomps; GstVideoInfo vinfo; res = gst_video_info_from_caps (&vinfo, caps); if (!res) { GST_WARNING_OBJECT (src, "Unable to parse video info from caps"); return res; } src->format = GST_VIDEO_INFO_FORMAT (&vinfo); src->width = GST_VIDEO_INFO_WIDTH (&vinfo); src->height = GST_VIDEO_INFO_HEIGHT (&vinfo); /* this will handle byte alignment (i.e. row multiple of 4 bytes) */ src->framesize = GST_VIDEO_INFO_SIZE (&vinfo); gst_base_src_set_blocksize (bsrc, src->framesize); ncomps = GST_VIDEO_INFO_N_COMPONENTS (&vinfo); depth = GST_VIDEO_INFO_COMP_DEPTH (&vinfo, 0); /* use this so NI can give us proper byte alignment */ src->rowpixels = GST_VIDEO_INFO_COMP_STRIDE (&vinfo, 0) / (ncomps * depth / 8); GST_LOG_OBJECT (src, "Caps set, framesize=%d, rowpixels=%d", src->framesize, src->rowpixels); return res; }
static void gst_audio_test_src_init (GstAudioTestSrc * src, GstAudioTestSrcClass * g_class) { GstPad *pad = GST_BASE_SRC_PAD (src); gst_pad_set_fixatecaps_function (pad, gst_audio_test_src_src_fixate); src->samplerate = 44100; src->format = GST_AUDIO_TEST_SRC_FORMAT_NONE; src->volume = DEFAULT_VOLUME; src->freq = DEFAULT_FREQ; /* we operate in time */ gst_base_src_set_format (GST_BASE_SRC (src), GST_FORMAT_TIME); gst_base_src_set_live (GST_BASE_SRC (src), DEFAULT_IS_LIVE); src->samples_per_buffer = DEFAULT_SAMPLES_PER_BUFFER; src->generate_samples_per_buffer = src->samples_per_buffer; src->timestamp_offset = DEFAULT_TIMESTAMP_OFFSET; src->can_activate_pull = DEFAULT_CAN_ACTIVATE_PULL; src->gen = NULL; src->wave = DEFAULT_WAVE; gst_base_src_set_blocksize (GST_BASE_SRC (src), -1); }
static gboolean gst_avdtp_src_start (GstBaseSrc * bsrc) { GstAvdtpSrc *avdtpsrc = GST_AVDTP_SRC (bsrc); /* None of this can go into prepare() since we need to set up the * connection to figure out what format the device is going to send us. */ if (!gst_avdtp_connection_acquire (&avdtpsrc->conn, FALSE)) { GST_ERROR_OBJECT (avdtpsrc, "Failed to acquire connection"); return FALSE; } if (!gst_avdtp_connection_get_properties (&avdtpsrc->conn)) { GST_ERROR_OBJECT (avdtpsrc, "Failed to get transport properties"); goto fail; } if (!gst_avdtp_connection_conf_recv_stream_fd (&avdtpsrc->conn)) { GST_ERROR_OBJECT (avdtpsrc, "Failed to configure stream fd"); goto fail; } GST_DEBUG_OBJECT (avdtpsrc, "Setting block size to link MTU (%d)", avdtpsrc->conn.data.link_mtu); gst_base_src_set_blocksize (GST_BASE_SRC (avdtpsrc), avdtpsrc->conn.data.link_mtu); avdtpsrc->dev_caps = gst_avdtp_connection_get_caps (&avdtpsrc->conn); if (!avdtpsrc->dev_caps) { GST_ERROR_OBJECT (avdtpsrc, "Failed to get device caps"); goto fail; } gst_poll_fd_init (&avdtpsrc->pfd); avdtpsrc->pfd.fd = g_io_channel_unix_get_fd (avdtpsrc->conn.stream); gst_poll_add_fd (avdtpsrc->poll, &avdtpsrc->pfd); gst_poll_fd_ctl_read (avdtpsrc->poll, &avdtpsrc->pfd, TRUE); gst_poll_set_flushing (avdtpsrc->poll, FALSE); g_atomic_int_set (&avdtpsrc->unlocked, FALSE); /* The life time of the connection is shorter than the src object, so we * don't need to worry about memory management */ gst_avdtp_connection_notify_volume (&avdtpsrc->conn, G_OBJECT (avdtpsrc), "transport-volume"); gst_avdtp_src_start_avrcp (avdtpsrc); return TRUE; fail: gst_avdtp_connection_release (&avdtpsrc->conn); return FALSE; }
static void gst_flite_test_src_init (GstFliteTestSrc * src) { src->samples_per_buffer = DEFAULT_SAMPLES_PER_BUFFER; /* we operate in time */ gst_base_src_set_format (GST_BASE_SRC (src), GST_FORMAT_TIME); gst_base_src_set_blocksize (GST_BASE_SRC (src), -1); }
static void gst_lv2_source_init (GstLV2Source * self, GstLV2SourceClass * klass) { gst_lv2_init (&self->lv2, &klass->lv2); gst_base_src_set_format (GST_BASE_SRC (self), GST_FORMAT_TIME); gst_base_src_set_blocksize (GST_BASE_SRC (self), -1); self->samples_per_buffer = 1024; self->generate_samples_per_buffer = self->samples_per_buffer; }
static void gst_flite_src_init (GstFliteSrc * src) { src->filename = NULL; src->fd = 0; src->uri = NULL; src->is_regular = FALSE; gst_base_src_set_blocksize (GST_BASE_SRC (src), DEFAULT_BLOCKSIZE); }
static void gst_inter_audio_src_init (GstInterAudioSrc * interaudiosrc) { gst_base_src_set_format (GST_BASE_SRC (interaudiosrc), GST_FORMAT_TIME); gst_base_src_set_live (GST_BASE_SRC (interaudiosrc), TRUE); gst_base_src_set_blocksize (GST_BASE_SRC (interaudiosrc), -1); interaudiosrc->channel = g_strdup (DEFAULT_CHANNEL); interaudiosrc->buffer_time = DEFAULT_AUDIO_BUFFER_TIME; interaudiosrc->latency_time = DEFAULT_AUDIO_LATENCY_TIME; interaudiosrc->period_time = DEFAULT_AUDIO_PERIOD_TIME; }
static void gst_linsys_sdi_src_init (GstLinsysSdiSrc * linsyssdisrc, GstLinsysSdiSrcClass * linsyssdisrc_class) { gst_base_src_set_live (GST_BASE_SRC (linsyssdisrc), TRUE); gst_base_src_set_blocksize (GST_BASE_SRC (linsyssdisrc), 720 * 480 * 2); linsyssdisrc->device = g_strdup (DEFAULT_DEVICE); linsyssdisrc->is_625 = FALSE; linsyssdisrc->fd = -1; }
static gboolean gst_euresys_set_caps (GstBaseSrc * bsrc, GstCaps * caps) { GstVideoInfo vinfo; GST_DEBUG_OBJECT (bsrc, "set_caps with caps=%" GST_PTR_FORMAT, caps); gst_video_info_from_caps (&vinfo, caps); /* TODO: check stride alignment */ gst_base_src_set_blocksize (bsrc, GST_VIDEO_INFO_SIZE (&vinfo)); return TRUE; }
static gboolean gstbt_audio_synth_set_caps (GstBaseSrc * basesrc, GstCaps * caps) { GstBtAudioSynth *self = GSTBT_AUDIO_SYNTH (basesrc); GstBtAudioSynthClass *klass = GSTBT_AUDIO_SYNTH_GET_CLASS (self); gboolean ret; GST_INFO_OBJECT (self, "set_caps"); if ((ret = gst_audio_info_from_caps (&self->info, caps))) { if (klass->setup) { klass->setup (self, &self->info); } gst_base_src_set_blocksize (basesrc, gstbt_audio_synth_calculate_buffer_size (self)); } return ret; }
static void gst_test_http_src_init (GstTestHTTPSrc * src) { g_mutex_init (&src->mutex); src->uri = NULL; memset (&src->input, 0, sizeof (src->input)); src->compress = FALSE; src->keep_alive = FALSE; src->http_method_name = NULL; src->http_method = METHOD_GET; src->user_agent = NULL; src->position = 0; src->segment_end = 0; src->http_headers_event = NULL; src->duration_changed = FALSE; if (gst_test_http_src_blocksize) gst_base_src_set_blocksize (GST_BASE_SRC (src), gst_test_http_src_blocksize); }
static void gst_ladspa_source_type_init (GstLADSPASource * ladspa, LADSPA_Descriptor * desc) { GstLADSPASourceClass *ladspa_class = GST_LADSPA_SOURCE_GET_CLASS (ladspa); gst_ladspa_init (&ladspa->ladspa, &ladspa_class->ladspa); /* we operate in time */ gst_base_src_set_format (GST_BASE_SRC (ladspa), GST_FORMAT_TIME); gst_base_src_set_live (GST_BASE_SRC (ladspa), GST_LADSPA_SOURCE_DEFAULT_IS_LIVE); ladspa->samples_per_buffer = GST_LADSPA_SOURCE_DEFAULT_SAMPLES_PER_BUFFER; ladspa->generate_samples_per_buffer = ladspa->samples_per_buffer; ladspa->timestamp_offset = GST_LADSPA_SOURCE_DEFAULT_TIMESTAMP_OFFSET; ladspa->can_activate_pull = GST_LADSPA_SOURCE_DEFAULT_CAN_ACTIVATE_PULL; gst_base_src_set_blocksize (GST_BASE_SRC (ladspa), -1); }
static void gst_audio_test_src_init (GstAudioTestSrc * src) { src->volume = DEFAULT_VOLUME; src->freq = DEFAULT_FREQ; /* we operate in time */ gst_base_src_set_format (GST_BASE_SRC (src), GST_FORMAT_TIME); gst_base_src_set_live (GST_BASE_SRC (src), DEFAULT_IS_LIVE); src->samples_per_buffer = DEFAULT_SAMPLES_PER_BUFFER; src->generate_samples_per_buffer = src->samples_per_buffer; src->timestamp_offset = DEFAULT_TIMESTAMP_OFFSET; src->can_activate_pull = DEFAULT_CAN_ACTIVATE_PULL; src->gen = NULL; src->wave = DEFAULT_WAVE; gst_base_src_set_blocksize (GST_BASE_SRC (src), -1); }
static gboolean gst_ladspa_source_type_set_caps (GstBaseSrc * base, GstCaps * caps) { GstLADSPASource *ladspa = GST_LADSPA_SOURCE (base); GstAudioInfo info; if (!gst_audio_info_from_caps (&info, caps)) { GST_ERROR_OBJECT (base, "received invalid caps"); return FALSE; } GST_DEBUG_OBJECT (ladspa, "negotiated to caps %" GST_PTR_FORMAT, caps); ladspa->info = info; gst_base_src_set_blocksize (base, GST_AUDIO_INFO_BPF (&info) * ladspa->samples_per_buffer); return gst_ladspa_setup (&ladspa->ladspa, GST_AUDIO_INFO_RATE (&info)); }
static void gstbt_audio_synth_calculate_buffer_frames (GstBtAudioSynth * self) { const gdouble ticks_per_minute = (gdouble) (self->beats_per_minute * self->ticks_per_beat); const gdouble div = 60.0 / self->subticks_per_beat; const GstClockTime ticktime = (GstClockTime) (0.5 + ((GST_SECOND * 60.0) / ticks_per_minute)); self->ticktime = (GstClockTime) (0.5 + ((GST_SECOND * div) / ticks_per_minute)); self->samples_per_buffer = ((self->info.rate * div) / ticks_per_minute); GST_DEBUG ("samples_per_buffer=%lf", self->samples_per_buffer); self->generate_samples_per_buffer = (guint) (0.5 + self->samples_per_buffer); gst_base_src_set_blocksize (GST_BASE_SRC (self), gstbt_audio_synth_calculate_buffer_size (self)); // the sequence is quantized to ticks and not subticks // we need to compensate for the rounding errors :/ self->ticktime_err = ((gdouble) ticktime - (gdouble) (self->subticks_per_beat * self->ticktime)) / (gdouble) self->subticks_per_beat; GST_DEBUG ("ticktime err=%lf", self->ticktime_err); }
/* Set Caps */ static gboolean gst_android_video_source_set_caps(GstBaseSrc * p_basesrc, GstCaps * p_caps) { GstAndroidVideoSource *p_src; GstStructure *p_structure; GstVideoFormat format; gint width; gint height; const GValue* p_framerate; gint framerate_num; gint framerate_den; gboolean caps_status = FALSE; GstVideoInfo video_info; int vcd_ret; GA_LOGTRACE("ENTER %s --xx--> thread(%ld)", __FUNCTION__, pthread_self()); p_src = GST_ANDROIDVIDEOSOURCE(p_basesrc); if (gst_caps_get_size(p_caps) != 1) { goto set_caps_err_caps_not_simple; } if (!gst_video_info_from_caps(&video_info, p_caps)) { goto set_caps_err_parsing_caps; } format = GST_VIDEO_INFO_FORMAT(&video_info); width = GST_VIDEO_INFO_WIDTH(&video_info); height = GST_VIDEO_INFO_HEIGHT(&video_info); p_structure = gst_caps_get_structure(p_caps, 0); if (!p_structure) { goto set_caps_err_get_structure; } p_framerate = gst_structure_get_value(p_structure, "framerate"); if (!p_framerate) { goto set_caps_err_no_framerate; } framerate_num = gst_value_get_fraction_numerator(p_framerate); framerate_den = gst_value_get_fraction_denominator(p_framerate); if (accept_caps(p_src, format, width, height, framerate_num, framerate_den)) { gchar *caps_str = gst_caps_to_string(p_caps); GA_LOGINFO("%s: Caps are accepted! - caps are: %s", __FUNCTION__, caps_str ? caps_str : "[cannot print caps, gst_caps_to_string() failed]"); g_free(caps_str); caps_str = NULL; caps_status = TRUE; } else { goto set_caps_err_caps_not_accepted; } // Caps are ok so fixate them on the device vcd_ret = VCD_fixateMediaType( p_src->m_devHandle, gst_video_fmt_to_vcd_fmt(format), width, height, (framerate_num * ANDROID_FPS_DENOMINATOR) / framerate_den); if (vcd_ret != VCD_NO_ERROR) { return FALSE; } p_src->m_bufSize = (gint) VCD_getBufferSize( p_src->m_devHandle, gst_video_fmt_to_vcd_fmt(format), width, height); if (p_src->m_bufSize <= 0) { return FALSE; } gst_base_src_set_blocksize(p_basesrc, (guint) p_src->m_bufSize); p_src->m_duration = gst_util_uint64_scale(GST_SECOND, framerate_den, framerate_num); GA_LOGINFO("%s: setting static duration (GstBuffer) to: %"G_GUINT64_FORMAT, __FUNCTION__, p_src->m_duration); if (!p_src->vcdStarted) { AV_CHECK_ERR(VCD_start(p_src->m_devHandle), set_caps_err_vcd_start); p_src->vcdStarted = TRUE; } GA_LOGTRACE("EXIT %s", __FUNCTION__); return caps_status; /* * propagate unhandled errors */ set_caps_err_caps_not_simple: { GA_LOGERROR("%s: ERROR: Got empty caps or caps list (not a simple caps (one caps))", __FUNCTION__); return FALSE; } set_caps_err_parsing_caps: { GA_LOGERROR("%s: ERROR: Unknown error when parsning caps", __FUNCTION__); return FALSE; } set_caps_err_get_structure: { GA_LOGERROR("%s: ERROR: Could not get GstStructure from caps", __FUNCTION__); return FALSE; } set_caps_err_no_framerate: { GA_LOGERROR("%s: ERROR: Caps does not contain framerate", __FUNCTION__); return FALSE; } set_caps_err_caps_not_accepted: { GA_LOGINFO("%s: Caps are NOT accepted. FORMAT=%d, width=%d, height=%d, framerate num=%d, framerate den=%d", __FUNCTION__, format, width, height, framerate_num, framerate_den); return FALSE; } set_caps_err_vcd_start: { GA_LOGERROR("%s: ERROR: Could not start the video device", __FUNCTION__); return FALSE; } }
static GstFlowReturn gst_devsound_src_create(GstBaseSrc *src, guint64 offset, guint size, GstBuffer **buf) { GstDevsoundSrc *dsrc= GST_DEVSOUND_SRC(src); int bufferpos=0; int ret = KErrNone; if(!g_queue_get_length(dataqueue) && (dsrc->eosreceived == TRUE)) { pthread_mutex_lock(&(create_mutex1)); pthread_cond_signal(&(create_condition1)); pthread_mutex_unlock(&(create_mutex1)); post_symbian_error( src,KErrCancel ); return GST_FLOW_UNEXPECTED; } //gst_debug_log(devsound_debug, GST_LEVEL_LOG, "", "", 0, (GObject *) dsrc, "gst_devsound_src_create ENTER "); //gst_debug_log(devsound_debug, GST_LEVEL_LOG, "", "", 0, (GObject *) dsrc, "Before Buffer Alloc in CREATE ",NULL); *buf = gst_buffer_try_new_and_alloc(size); //gst_debug_log(devsound_debug, GST_LEVEL_LOG, "", "", 0, (GObject *) dsrc, "AFter Buffer Alloc in CREATE ",NULL); if(*buf == NULL) { post_symbian_error( src,KErrNoMemory ); return GST_FLOW_UNEXPECTED; } while (size > 0) { if (dataleft >= size) { // if there is some data left in the popped buffer previously whose size // is more then the buffer which is incoming fresh to get filled, fill it //here. and if the data left in the popped buffer is 0, then unref it //gst_debug_log(devsound_debug, GST_LEVEL_LOG, "", "", 0, (GObject *) dsrc, "dataleft >=size in CREATE ", NULL); memcpy(GST_BUFFER_DATA(*buf)+bufferpos,GST_BUFFER_DATA(popBuffer)+dataCopied,size); bufferpos+=size; dataCopied += size; dataleft = GST_BUFFER_SIZE(popBuffer) - dataCopied; size = 0; if (dataleft == 0) { dataCopied = 0; gst_buffer_unref(popBuffer); popBuffer = NULL; } } else { // if the dataleft in the popped buffer is greater then 0 and less then // the size of data needed for the fresh buffer. copy the remaining data // from the popped buffer and then unref it. if (dataleft > 0) { //gst_debug_log(devsound_debug, GST_LEVEL_LOG, "", "", 0, (GObject *) dsrc, "dataleft >0 in CREATE ",NULL); memcpy(GST_BUFFER_DATA(*buf)+bufferpos,GST_BUFFER_DATA(popBuffer)+dataCopied,dataleft); size -= dataleft; bufferpos += dataleft; dataCopied = 0; dataleft = 0; gst_buffer_unref(popBuffer); popBuffer = NULL; } // we wait here if the dataqueue length is 0 and we need data // to be filled in the queue from the DevSound Thread if (!g_queue_get_length(dataqueue)) { //gst_debug_log(devsound_debug, GST_LEVEL_LOG, "", "", 0, (GObject *) dsrc, "Before WAIT in CREATE ",NULL); if(dsrc->eosreceived == TRUE) { post_symbian_error( src,KErrCancel ); return GST_FLOW_UNEXPECTED; } else { cmd = RECORDING; return_error = KErrNone; pthread_mutex_lock(&(create_mutex1)); pthread_cond_signal(&(create_condition1)); pthread_mutex_unlock(&(create_mutex1)); pthread_mutex_lock(&(create_mutex1)); pthread_cond_wait(&(create_condition1), &(create_mutex1)); ret = return_error; pthread_mutex_unlock(&(create_mutex1)); } //gst_debug_log(devsound_debug, GST_LEVEL_LOG, "", "", 0, (GObject *) dsrc, "AFTER WAIT in CREATE ",NULL); } if( ret ) { post_symbian_error( src,ret ); return GST_FLOW_UNEXPECTED; } //gst_debug_log(devsound_debug, GST_LEVEL_LOG, "", "", 0, (GObject *) dsrc, "Before POP in CREATE ",NULL); GST_OBJECT_LOCK(dsrc); popBuffer = (GstBuffer*)g_queue_pop_tail(dataqueue); GST_OBJECT_UNLOCK(dsrc); if(!popBuffer ) { post_symbian_error( src,KErrNoMemory ); return GST_FLOW_UNEXPECTED; } if(dsrc->firstTimeInit != kPlayed) { dsrc->prevbuffersize = gst_base_src_get_blocksize(src); gst_base_src_set_blocksize (src, GST_BUFFER_SIZE(popBuffer)); (*buf)->size = GST_BUFFER_SIZE(popBuffer); } // copy the data from the popped buffer based on how much of the incoming //buffer size is left to fill. we might have filled the fresh buffer somewhat // where the size of the fresh buffer is more then the data remaining in the // popped buffer. if (size < GST_BUFFER_SIZE(popBuffer)) { //gst_debug_log(devsound_debug, GST_LEVEL_LOG, "", "", 0, (GObject *) dsrc, "SIZE < POPBUFFER CREATE ",NULL); memcpy(GST_BUFFER_DATA(*buf)+ bufferpos,GST_BUFFER_DATA(popBuffer),size); bufferpos+=size; dataCopied = size; dataleft = GST_BUFFER_SIZE(popBuffer) - dataCopied; size = 0; } else { //gst_debug_log(devsound_debug, GST_LEVEL_LOG, "", "", 0, (GObject *) dsrc, "SIZE >= POPBUFFER CREATE ",NULL); memcpy(GST_BUFFER_DATA(*buf)+ bufferpos,GST_BUFFER_DATA(popBuffer),GST_BUFFER_SIZE(popBuffer)); bufferpos+=GST_BUFFER_SIZE(popBuffer); dataCopied = 0; dataleft = 0; size = size - GST_BUFFER_SIZE(popBuffer); } if (!dataleft) { gst_buffer_unref(popBuffer); popBuffer = NULL; } } if (dsrc->firstTimeInit == kPlayBufferPreRoll) { gst_base_src_set_blocksize (src, dsrc->prevbuffersize); dsrc->firstTimeInit = kPlayed; return GST_FLOW_OK; } if (dsrc->firstTimeInit == kPausedToPlaying) { dsrc->firstTimeInit = kPlayBufferPreRoll; return GST_FLOW_OK; } } //gst_debug_log(devsound_debug, GST_LEVEL_LOG, "", "", 0, (GObject *) dsrc, "gst_devsound_src_create EXIT ",NULL); return GST_FLOW_OK; }