static gboolean gst_visual_render (GstAudioVisualizer * bscope, GstBuffer * audio, GstVideoFrame * video) { GstVisual *visual = GST_VISUAL (bscope); GstMapInfo amap; const guint16 *adata; gint i, channels; gboolean res = TRUE; VisBuffer *lbuf, *rbuf; guint16 ldata[VISUAL_SAMPLES], rdata[VISUAL_SAMPLES]; VisAudioSampleRateType vrate; visual_video_set_buffer (visual->video, GST_VIDEO_FRAME_PLANE_DATA (video, 0)); visual_video_set_pitch (visual->video, GST_VIDEO_FRAME_PLANE_STRIDE (video, 0)); channels = GST_AUDIO_INFO_CHANNELS (&bscope->ainfo); gst_buffer_map (audio, &amap, GST_MAP_READ); adata = (const guint16 *) amap.data; lbuf = visual_buffer_new_with_buffer (ldata, sizeof (ldata), NULL); rbuf = visual_buffer_new_with_buffer (rdata, sizeof (rdata), NULL); if (channels == 2) { for (i = 0; i < VISUAL_SAMPLES; i++) { ldata[i] = *adata++; rdata[i] = *adata++; } } else { for (i = 0; i < VISUAL_SAMPLES; i++) { ldata[i] = *adata; rdata[i] = *adata++; } } /* TODO(ensonic): move to setup */ switch (bscope->ainfo.rate) { case 8000: vrate = VISUAL_AUDIO_SAMPLE_RATE_8000; break; case 11250: vrate = VISUAL_AUDIO_SAMPLE_RATE_11250; break; case 22500: vrate = VISUAL_AUDIO_SAMPLE_RATE_22500; break; case 32000: vrate = VISUAL_AUDIO_SAMPLE_RATE_32000; break; case 44100: vrate = VISUAL_AUDIO_SAMPLE_RATE_44100; break; case 48000: vrate = VISUAL_AUDIO_SAMPLE_RATE_48000; break; case 96000: vrate = VISUAL_AUDIO_SAMPLE_RATE_96000; break; default: visual_object_unref (VISUAL_OBJECT (lbuf)); visual_object_unref (VISUAL_OBJECT (rbuf)); GST_ERROR_OBJECT (visual, "unsupported rate %d", bscope->ainfo.rate); res = FALSE; goto done; } visual_audio_samplepool_input_channel (visual->audio->samplepool, lbuf, vrate, VISUAL_AUDIO_SAMPLE_FORMAT_S16, (char *) VISUAL_AUDIO_CHANNEL_LEFT); visual_audio_samplepool_input_channel (visual->audio->samplepool, rbuf, vrate, VISUAL_AUDIO_SAMPLE_FORMAT_S16, (char *) VISUAL_AUDIO_CHANNEL_RIGHT); visual_object_unref (VISUAL_OBJECT (lbuf)); visual_object_unref (VISUAL_OBJECT (rbuf)); visual_audio_analyze (visual->audio); visual_actor_run (visual->actor, visual->audio); visual_video_set_buffer (visual->video, NULL); GST_DEBUG_OBJECT (visual, "rendered one frame"); done: gst_buffer_unmap (audio, &amap); return res; }
static GstFlowReturn gst_visual_chain (GstPad * pad, GstBuffer * buffer) { GstBuffer *outbuf = NULL; guint i; GstVisual *visual = GST_VISUAL (gst_pad_get_parent (pad)); GstFlowReturn ret = GST_FLOW_OK; guint avail; GST_DEBUG_OBJECT (visual, "chain function called"); /* If we don't have an output format yet, preallocate a buffer to try and * set one */ if (GST_PAD_CAPS (visual->srcpad) == NULL) { ret = get_buffer (visual, &outbuf); if (ret != GST_FLOW_OK) { gst_buffer_unref (buffer); goto beach; } } /* resync on DISCONT */ if (GST_BUFFER_FLAG_IS_SET (buffer, GST_BUFFER_FLAG_DISCONT)) { gst_adapter_clear (visual->adapter); } GST_DEBUG_OBJECT (visual, "Input buffer has %d samples, time=%" G_GUINT64_FORMAT, GST_BUFFER_SIZE (buffer) / visual->bps, GST_BUFFER_TIMESTAMP (buffer)); gst_adapter_push (visual->adapter, buffer); while (TRUE) { gboolean need_skip; const guint16 *data; guint64 dist, timestamp; GST_DEBUG_OBJECT (visual, "processing buffer"); avail = gst_adapter_available (visual->adapter); GST_DEBUG_OBJECT (visual, "avail now %u", avail); /* we need at least 512 samples */ if (avail < 512 * visual->bps) break; /* we need at least enough samples to make one frame */ if (avail < visual->spf * visual->bps) break; /* get timestamp of the current adapter byte */ timestamp = gst_adapter_prev_timestamp (visual->adapter, &dist); if (GST_CLOCK_TIME_IS_VALID (timestamp)) { /* convert bytes to time */ dist /= visual->bps; timestamp += gst_util_uint64_scale_int (dist, GST_SECOND, visual->rate); } if (timestamp != -1) { gint64 qostime; /* QoS is done on running time */ qostime = gst_segment_to_running_time (&visual->segment, GST_FORMAT_TIME, timestamp); GST_OBJECT_LOCK (visual); /* check for QoS, don't compute buffers that are known to be late */ need_skip = visual->earliest_time != -1 && qostime <= visual->earliest_time; GST_OBJECT_UNLOCK (visual); if (need_skip) { GST_WARNING_OBJECT (visual, "QoS: skip ts: %" GST_TIME_FORMAT ", earliest: %" GST_TIME_FORMAT, GST_TIME_ARGS (qostime), GST_TIME_ARGS (visual->earliest_time)); goto skip; } } /* Read 512 samples per channel */ data = (const guint16 *) gst_adapter_peek (visual->adapter, 512 * visual->bps); #if defined(VISUAL_API_VERSION) && VISUAL_API_VERSION >= 4000 && VISUAL_API_VERSION < 5000 { VisBuffer *lbuf, *rbuf; guint16 ldata[512], rdata[512]; VisAudioSampleRateType rate; lbuf = visual_buffer_new_with_buffer (ldata, sizeof (ldata), NULL); rbuf = visual_buffer_new_with_buffer (rdata, sizeof (rdata), NULL); if (visual->channels == 2) { for (i = 0; i < 512; i++) { ldata[i] = *data++; rdata[i] = *data++; } } else { for (i = 0; i < 512; i++) { ldata[i] = *data; rdata[i] = *data++; } } switch (visual->rate) { case 8000: rate = VISUAL_AUDIO_SAMPLE_RATE_8000; break; case 11250: rate = VISUAL_AUDIO_SAMPLE_RATE_11250; break; case 22500: rate = VISUAL_AUDIO_SAMPLE_RATE_22500; break; case 32000: rate = VISUAL_AUDIO_SAMPLE_RATE_32000; break; case 44100: rate = VISUAL_AUDIO_SAMPLE_RATE_44100; break; case 48000: rate = VISUAL_AUDIO_SAMPLE_RATE_48000; break; case 96000: rate = VISUAL_AUDIO_SAMPLE_RATE_96000; break; default: visual_object_unref (VISUAL_OBJECT (lbuf)); visual_object_unref (VISUAL_OBJECT (rbuf)); GST_ERROR_OBJECT (visual, "unsupported rate %d", visual->rate); ret = GST_FLOW_ERROR; goto beach; break; } visual_audio_samplepool_input_channel (visual->audio->samplepool, lbuf, rate, VISUAL_AUDIO_SAMPLE_FORMAT_S16, (char *) VISUAL_AUDIO_CHANNEL_LEFT); visual_audio_samplepool_input_channel (visual->audio->samplepool, rbuf, rate, VISUAL_AUDIO_SAMPLE_FORMAT_S16, (char *) VISUAL_AUDIO_CHANNEL_RIGHT); visual_object_unref (VISUAL_OBJECT (lbuf)); visual_object_unref (VISUAL_OBJECT (rbuf)); } #else if (visual->channels == 2) { for (i = 0; i < 512; i++) { visual->audio->plugpcm[0][i] = *data++; visual->audio->plugpcm[1][i] = *data++; } } else { for (i = 0; i < 512; i++) { visual->audio->plugpcm[0][i] = *data; visual->audio->plugpcm[1][i] = *data++; } } #endif /* alloc a buffer if we don't have one yet, this happens * when we pushed a buffer in this while loop before */ if (outbuf == NULL) { ret = get_buffer (visual, &outbuf); if (ret != GST_FLOW_OK) { goto beach; } } visual_video_set_buffer (visual->video, GST_BUFFER_DATA (outbuf)); visual_audio_analyze (visual->audio); visual_actor_run (visual->actor, visual->audio); visual_video_set_buffer (visual->video, NULL); GST_DEBUG_OBJECT (visual, "rendered one frame"); GST_BUFFER_TIMESTAMP (outbuf) = timestamp; GST_BUFFER_DURATION (outbuf) = visual->duration; ret = gst_pad_push (visual->srcpad, outbuf); outbuf = NULL; skip: GST_DEBUG_OBJECT (visual, "finished frame, flushing %u samples from input", visual->spf); /* Flush out the number of samples per frame */ gst_adapter_flush (visual->adapter, visual->spf * visual->bps); /* quit the loop if something was wrong */ if (ret != GST_FLOW_OK) break; } beach: if (outbuf != NULL) gst_buffer_unref (outbuf); gst_object_unref (visual); return ret; }
static void render_frame (GstVisualGL * visual) { const guint16 *data; VisBuffer *lbuf, *rbuf; guint16 ldata[VISUAL_SAMPLES], rdata[VISUAL_SAMPLES]; guint i; gcahr *name; /* Read VISUAL_SAMPLES samples per channel */ data = (const guint16 *) gst_adapter_peek (visual->adapter, VISUAL_SAMPLES * visual->bps); lbuf = visual_buffer_new_with_buffer (ldata, sizeof (ldata), NULL); rbuf = visual_buffer_new_with_buffer (rdata, sizeof (rdata), NULL); if (visual->channels == 2) { for (i = 0; i < VISUAL_SAMPLES; i++) { ldata[i] = *data++; rdata[i] = *data++; } } else { for (i = 0; i < VISUAL_SAMPLES; i++) { ldata[i] = *data; rdata[i] = *data++; } } visual_audio_samplepool_input_channel (visual->audio->samplepool, lbuf, visual->libvisual_rate, VISUAL_AUDIO_SAMPLE_FORMAT_S16, VISUAL_AUDIO_CHANNEL_LEFT); visual_audio_samplepool_input_channel (visual->audio->samplepool, rbuf, visual->libvisual_rate, VISUAL_AUDIO_SAMPLE_FORMAT_S16, VISUAL_AUDIO_CHANNEL_RIGHT); visual_object_unref (VISUAL_OBJECT (lbuf)); visual_object_unref (VISUAL_OBJECT (rbuf)); visual_audio_analyze (visual->audio); /* apply the matrices that the actor set up */ glPushAttrib (GL_ALL_ATTRIB_BITS); glMatrixMode (GL_PROJECTION); glPushMatrix (); glLoadMatrixd (visual->actor_projection_matrix); glMatrixMode (GL_MODELVIEW); glPushMatrix (); glLoadMatrixd (visual->actor_modelview_matrix); /* This line try to hacks compatiblity with libprojectM * If libprojectM version <= 2.0.0 then we have to unbind our current * fbo to see something. But it's incorrect and we cannot use fbo chainning (append other glfilters * after libvisual_gl_projectM will not work) * To have full compatibility, libprojectM needs to take care of our fbo. * Indeed libprojectM has to unbind it before the first rendering pass * and then rebind it before the final pass. It's done from 2.0.1 */ name = gst_element_get_name (GST_ELEMENT (visual)); if (g_ascii_strncasecmp (name, "visualglprojectm", 16) == 0 && !HAVE_PROJECTM_TAKING_CARE_OF_EXTERNAL_FBO) glBindFramebufferEXT (GL_FRAMEBUFFER_EXT, 0); g_free (name); actor_negotiate (visual->display, visual); if (visual->is_enabled_gl_depth_test) { glEnable (GL_DEPTH_TEST); glDepthFunc (visual->gl_depth_func); } if (visual->is_enabled_gl_blend) { glEnable (GL_BLEND); glBlendFunc (visual->gl_blend_src_alpha, GL_ZERO); } visual_actor_run (visual->actor, visual->audio); check_gl_matrix (); glMatrixMode (GL_PROJECTION); glPopMatrix (); glMatrixMode (GL_MODELVIEW); glPopMatrix (); glPopAttrib (); glDisable (GL_DEPTH_TEST); glDisable (GL_BLEND); /*glDisable (GL_LIGHT0); glDisable (GL_LIGHTING); glDisable (GL_POLYGON_OFFSET_FILL); glDisable (GL_COLOR_MATERIAL); glDisable (GL_CULL_FACE); */ GST_DEBUG_OBJECT (visual, "rendered one frame"); }