/* * Loop to skip the first few samples of a stream */ static int pulse_skip(struct pulse_data *data) { uint64_t skip = 1; const void *frames; size_t bytes; uint64_t pa_time; while (os_event_try(data->event) == EAGAIN) { pulse_iterate(data); pa_stream_peek(data->stream, &frames, &bytes); if (!bytes) continue; if (!frames || pa_stream_get_time(data->stream, &pa_time) < 0) { pa_stream_drop(data->stream); continue; } if (skip == 1 && pa_time) skip = pa_time; if (skip + pulse_start_delay < pa_time) return 0; pa_stream_drop(data->stream); } return -1; }
int pa_simple_read(pa_simple *p, void*data, size_t length, int *rerror) { pa_assert(p); CHECK_VALIDITY_RETURN_ANY(rerror, p->direction == PA_STREAM_RECORD, PA_ERR_BADSTATE, -1); CHECK_VALIDITY_RETURN_ANY(rerror, data, PA_ERR_INVALID, -1); CHECK_VALIDITY_RETURN_ANY(rerror, length > 0, PA_ERR_INVALID, -1); pa_threaded_mainloop_lock(p->mainloop); CHECK_DEAD_GOTO(p, rerror, unlock_and_fail); while (length > 0) { size_t l; while (!p->read_data) { int r; r = pa_stream_peek(p->stream, &p->read_data, &p->read_length); CHECK_SUCCESS_GOTO(p, rerror, r == 0, unlock_and_fail); if (p->read_length <= 0) { pa_threaded_mainloop_wait(p->mainloop); CHECK_DEAD_GOTO(p, rerror, unlock_and_fail); } else if (!p->read_data) { /* There's a hole in the stream, skip it. We could generate * silence, but that wouldn't work for compressed streams. */ r = pa_stream_drop(p->stream); CHECK_SUCCESS_GOTO(p, rerror, r == 0, unlock_and_fail); } else p->read_index = 0; } l = p->read_length < length ? p->read_length : length; memcpy(data, (const uint8_t*) p->read_data+p->read_index, l); data = (uint8_t*) data + l; length -= l; p->read_index += l; p->read_length -= l; if (!p->read_length) { int r; r = pa_stream_drop(p->stream); p->read_data = NULL; p->read_length = 0; p->read_index = 0; CHECK_SUCCESS_GOTO(p, rerror, r == 0, unlock_and_fail); } } pa_threaded_mainloop_unlock(p->mainloop); return 0; unlock_and_fail: pa_threaded_mainloop_unlock(p->mainloop); return -1; }
/** * Callback for pulse which gets executed when new audio data is available * * @warning The function may be called even after disconnecting the stream */ static void pulse_stream_read(pa_stream *p, size_t nbytes, void *userdata) { UNUSED_PARAMETER(p); UNUSED_PARAMETER(nbytes); PULSE_DATA(userdata); const void *frames; size_t bytes; int64_t latency; if (!data->stream) goto exit; pa_stream_peek(data->stream, &frames, &bytes); // check if we got data if (!bytes) goto exit; if (!frames) { blog(LOG_ERROR, "pulse-input: Got audio hole of %u bytes", (unsigned int) bytes); pa_stream_drop(data->stream); goto exit; } if (pulse_get_stream_latency(data->stream, &latency) < 0) { blog(LOG_ERROR, "pulse-input: Failed to get timing info !"); pa_stream_drop(data->stream); goto exit; } struct source_audio out; out.speakers = data->speakers; out.samples_per_sec = data->samples_per_sec; out.format = pulse_to_obs_audio_format(data->format); out.data[0] = (uint8_t *) frames; out.frames = bytes / data->bytes_per_frame; out.timestamp = os_gettime_ns() - (latency * 1000ULL); obs_source_output_audio(data->source, &out); data->packets++; data->frames += out.frames; pa_stream_drop(data->stream); exit: pulse_signal(0); }
void on_monitor_read_callback(pa_stream *p, size_t length, void *userdata) { const void *data; double v; printf("read callback length: %d\n", length); printf("\tget_device_index: %d\n", pa_stream_get_device_index(p)); printf("\tget_device_name: %s\n", pa_stream_get_device_name(p)); printf("\tget_monitor_stream: %d\n", pa_stream_get_monitor_stream(p)); if (pa_stream_peek(p, &data, &length) < 0) { printf("Failed to read data from stream\n"); return; } assert(length > 0); assert(length % sizeof(float) == 0); v = ((const float*) data)[length / sizeof(float) -1]; pa_stream_drop(p); if (v < 0) v = 0; //if (v > 1) v = 1; printf("\tread callback peek: %f\n", v); ret = v; g_main_loop_quit(mainloop); }
static void __stream_read_callback(pa_stream* stream, size_t length, void* data) { guac_client* client = (guac_client*) data; vnc_guac_client_data* client_data = (vnc_guac_client_data*) client->data; guac_audio_stream* audio = client_data->audio; const void* buffer; /* Read data */ pa_stream_peek(stream, &buffer, &length); /* Write data */ guac_audio_stream_write_pcm(audio, buffer, length); /* Flush occasionally */ if (audio->pcm_bytes_written > GUAC_VNC_PCM_WRITE_RATE) { guac_audio_stream_end(audio); guac_audio_stream_begin(client_data->audio, GUAC_VNC_AUDIO_RATE, GUAC_VNC_AUDIO_CHANNELS, GUAC_VNC_AUDIO_BPS); } /* Advance buffer */ pa_stream_drop(stream); }
/* * Set the threshold in the recorder_context structure. */ static void detect_threshold_cb(pa_stream *stream, size_t length, void *userdata) { //TODO: Calculate a dynamic threshold both when the environment has changed // and when a SIGPIPE signal has been received. const void *data; size_t size = 0; static size_t acc_size = 0; double sum; static double acc_sqsum = 0; recorder_context_t *rctx = (recorder_context_t *) userdata; pa_stream_peek(stream, &data, &size); #ifdef DEBUG fwrite(data, sizeof(uint8_t), size, threshold_file); #endif if (data) pa_stream_drop(stream); /* We are sampling with 16 bits but size is measured in bytes */ size >>= 1; sum = sqsum((const uint8_t *) data, size); acc_sqsum += sum; acc_size += size; rctx->threshold = sqrt(acc_sqsum / acc_size); }
static void __stream_read_callback(pa_stream* stream, size_t length, void* data) { guac_client* client = (guac_client*) data; vnc_guac_client_data* client_data = (vnc_guac_client_data*) client->data; guac_audio_stream* audio = client_data->audio; const void* buffer; /* Read data */ pa_stream_peek(stream, &buffer, &length); /* Avoid sending silence unless data is waiting to be flushed */ if (audio->pcm_bytes_written != 0 || !guac_pa_is_silence(buffer, length)) { /* Write data */ guac_audio_stream_write_pcm(audio, buffer, length); /* Flush occasionally */ if (audio->pcm_bytes_written > GUAC_VNC_PCM_WRITE_RATE) { guac_audio_stream_end(audio); guac_audio_stream_begin(client_data->audio, GUAC_VNC_AUDIO_RATE, GUAC_VNC_AUDIO_CHANNELS, GUAC_VNC_AUDIO_BPS); guac_socket_flush(client->socket); } } /* Advance buffer */ pa_stream_drop(stream); }
static ALCuint pulse_available_samples(ALCdevice *device) //{{{ { pulse_data *data = device->ExtraData; size_t samples; pa_threaded_mainloop_lock(data->loop); /* Capture is done in fragment-sized chunks, so we loop until we get all * that's available */ samples = (device->Connected ? pa_stream_readable_size(data->stream) : 0); while(samples > 0) { const void *buf; size_t length; if(pa_stream_peek(data->stream, &buf, &length) < 0) { ERR("pa_stream_peek() failed: %s\n", pa_strerror(pa_context_errno(data->context))); break; } WriteRingBuffer(data->ring, buf, length/data->frame_size); samples -= length; pa_stream_drop(data->stream); } pa_threaded_mainloop_unlock(data->loop); return RingBufferSize(data->ring); } //}}}
/** * Record callback. * @stream: The stream. * @nbytes: The number of bytes available. * @arg: The argument. */ static void conn_record(pa_stream *stream, size_t nbytes, void *arg) { struct pulse_conn_t *conn = arg; unsigned int i, j, cnt; const float *data; pa_stream_peek(stream, (const void **)&data, &nbytes); cnt = nbytes / (conn->conf.in * sizeof(float)); if(conn->reset[0]) conn->wr = conn->lat, conn->reset[0] = 0; for(i = 0; i < cnt; i++) { for(j = 0; j < conn->conf.in; j++) conn->buf[j][conn->wr] = *data++; for(; j < conn->width; j++) conn->buf[j][conn->wr] = 0.0f; conn->wr = (conn->wr + 1) % (2 * conn->lat); if(conn->wr == conn->rd) conn->reset[0] = conn->reset[1] = 1; } pa_stream_drop(stream); }
void AudioSinksManager::InternalAudioSink::stream_read_callback(pa_stream* /*stream*/, size_t /*nbytes*/, void* userdata) { AudioSinksManager::InternalAudioSink* sink = static_cast<AudioSinksManager::InternalAudioSink*>(userdata); const void* data; size_t data_size; if (pa_stream_peek(sink->stream, &data, &data_size) < 0) { sink->manager->logger->error("(AudioSink '{}') Failed to read data from stream: {}", sink->name, sink->manager->get_pa_error()); return; } if (data_size % sizeof(AudioSample) != 0) { sink->manager->logger->warn("(AudioSink '{}') Not rounded sample data in buffer"); } if (data_size == 0) { return; } else if (data == NULL) { sink->manager->logger->trace("(AudioSink '{}') There is a hole in a record stream!"); } if (sink->samples_callback && sink->activated) { sink->samples_callback(static_cast<const AudioSample*>(data), data_size / sizeof(AudioSample)); } if (pa_stream_drop(sink->stream) < 0) { sink->manager->logger->error("(AudioSink '{}') Failed to drop data from stream: {}", sink->name, sink->manager->get_pa_error()); } }
/** * Callback for pulse which gets executed when new audio data is available * * @warning The function may be called even after disconnecting the stream */ static void pulse_stream_read(pa_stream *p, size_t nbytes, void *userdata) { UNUSED_PARAMETER(p); UNUSED_PARAMETER(nbytes); PULSE_DATA(userdata); const void *frames; size_t bytes; if (!data->stream) goto exit; pa_stream_peek(data->stream, &frames, &bytes); // check if we got data if (!bytes) goto exit; if (!frames) { blog(LOG_ERROR, "Got audio hole of %u bytes", (unsigned int) bytes); pa_stream_drop(data->stream); goto exit; } struct obs_source_audio out; out.speakers = data->speakers; out.samples_per_sec = data->samples_per_sec; out.format = pulse_to_obs_audio_format(data->format); out.data[0] = (uint8_t *) frames; out.frames = bytes / data->bytes_per_frame; out.timestamp = get_sample_time(out.frames, out.samples_per_sec); if (!data->first_ts) data->first_ts = out.timestamp + STARTUP_TIMEOUT_NS; if (out.timestamp > data->first_ts) obs_source_output_audio(data->source, &out); data->packets++; data->frames += out.frames; pa_stream_drop(data->stream); exit: pulse_signal(0); }
static ALCenum pulse_capture_samples(ALCdevice *device, ALCvoid *buffer, ALCuint samples) { pulse_data *data = device->ExtraData; ALCuint todo = samples * pa_frame_size(&data->spec); pa_threaded_mainloop_lock(data->loop); /* Capture is done in fragment-sized chunks, so we loop until we get all * that's available */ data->last_readable -= todo; while(todo > 0) { size_t rem = todo; if(data->cap_len == 0) { pa_stream_state_t state; state = pa_stream_get_state(data->stream); if(!PA_STREAM_IS_GOOD(state)) { aluHandleDisconnect(device); break; } if(pa_stream_peek(data->stream, &data->cap_store, &data->cap_len) < 0) { ERR("pa_stream_peek() failed: %s\n", pa_strerror(pa_context_errno(data->context))); aluHandleDisconnect(device); break; } data->cap_remain = data->cap_len; } if(rem > data->cap_remain) rem = data->cap_remain; memcpy(buffer, data->cap_store, rem); buffer = (ALbyte*)buffer + rem; todo -= rem; data->cap_store = (ALbyte*)data->cap_store + rem; data->cap_remain -= rem; if(data->cap_remain == 0) { pa_stream_drop(data->stream); data->cap_len = 0; } } if(todo > 0) memset(buffer, ((device->FmtType==DevFmtUByte) ? 0x80 : 0), todo); pa_threaded_mainloop_unlock(data->loop); return ALC_NO_ERROR; }
static void audin_pulse_stream_request_callback(pa_stream* stream, size_t length, void* userdata) { int frames; int cframes; tbool ret; const void* data; const uint8* src; int encoded_size; uint8* encoded_data; AudinPulseDevice* pulse = (AudinPulseDevice*) userdata; pa_stream_peek(stream, &data, &length); frames = length / pulse->bytes_per_frame; DEBUG_DVC("length %d frames %d", (int) length, frames); src = (const uint8*) data; while (frames > 0) { cframes = pulse->frames_per_packet - pulse->buffer_frames; if (cframes > frames) cframes = frames; memcpy(pulse->buffer + pulse->buffer_frames * pulse->bytes_per_frame, src, cframes * pulse->bytes_per_frame); pulse->buffer_frames += cframes; if (pulse->buffer_frames >= pulse->frames_per_packet) { if (pulse->format == 0x11) { encoded_data = dsp_encode_ima_adpcm(&pulse->adpcm, pulse->buffer, pulse->buffer_frames * pulse->bytes_per_frame, pulse->sample_spec.channels, pulse->block_size, &encoded_size); DEBUG_DVC("encoded %d to %d", pulse->buffer_frames * pulse->bytes_per_frame, encoded_size); } else { encoded_data = pulse->buffer; encoded_size = pulse->buffer_frames * pulse->bytes_per_frame; } ret = pulse->receive(encoded_data, encoded_size, pulse->user_data); pulse->buffer_frames = 0; if (encoded_data != pulse->buffer) xfree(encoded_data); if (!ret) break; } src += cframes * pulse->bytes_per_frame; frames -= cframes; } pa_stream_drop(stream); }
static int qpa_simple_read (PAVoiceIn *p, void *data, size_t length, int *rerror) { paaudio *g = p->g; pa_threaded_mainloop_lock (g->mainloop); CHECK_DEAD_GOTO (g, p->stream, rerror, unlock_and_fail); while (length > 0) { size_t l; while (!p->read_data) { int r; r = pa_stream_peek (p->stream, &p->read_data, &p->read_length); CHECK_SUCCESS_GOTO (g, rerror, r == 0, unlock_and_fail); if (!p->read_data) { pa_threaded_mainloop_wait (g->mainloop); CHECK_DEAD_GOTO (g, p->stream, rerror, unlock_and_fail); } else { p->read_index = 0; } } l = p->read_length < length ? p->read_length : length; memcpy (data, (const uint8_t *) p->read_data+p->read_index, l); data = (uint8_t *) data + l; length -= l; p->read_index += l; p->read_length -= l; if (!p->read_length) { int r; r = pa_stream_drop (p->stream); p->read_data = NULL; p->read_length = 0; p->read_index = 0; CHECK_SUCCESS_GOTO (g, rerror, r == 0, unlock_and_fail); } } pa_threaded_mainloop_unlock (g->mainloop); return 0; unlock_and_fail: pa_threaded_mainloop_unlock (g->mainloop); return -1; }
/* This is called whenever new data may is available */ void QPulseAudioThread::stream_read_callback ( pa_stream *s, size_t length, void *userdata ) { const void *data; assert ( s && length ); if ( stdio_event ) mainloop_api->io_enable ( stdio_event, PA_IO_EVENT_OUTPUT ); if ( pa_stream_peek ( s, &data, &length ) < 0 ) { fprintf ( stderr, "pa_stream_peek() failed: %s\n", pa_strerror ( pa_context_errno ( context ) ) ); pulseQuit ( 1 ); return ; } if ((!s_qprojectM_MainWindowPtr) || (!*s_qprojectM_MainWindowPtr)) return; assert ( data && length ); if ( buffer ) { fprintf ( stderr, "Buffer overrun, dropping incoming data\n" ); if ( pa_stream_drop ( s ) < 0 ) { fprintf ( stderr, "pa_stream_drop() failed: %s\n", pa_strerror ( pa_context_errno ( context ) ) ); pulseQuit ( 1 ); } return; } (*s_qprojectM_MainWindowPtr)->addPCM( (float*)data, length / ( sizeof ( float ) ) ); //buffer = ( float* ) pa_xmalloc ( buffer_length = length ); //memcpy ( buffer, data, length ); buffer_index = 0; pa_stream_drop ( s ); }
static void stream_read_cb(pa_stream *p, size_t nbytes, void *userdata) { /* We don't care about the data, just drop it */ for (;;) { const void *data; pa_assert_se((nbytes = pa_stream_readable_size(p)) != (size_t) -1); if (nbytes <= 0) break; fail_unless(pa_stream_peek(p, &data, &nbytes) == 0); fail_unless(pa_stream_drop(p) == 0); } }
static int instream_end_read_pa(struct SoundIoPrivate *si, struct SoundIoInStreamPrivate *is) { struct SoundIoInStream *instream = &is->pub; struct SoundIoInStreamPulseAudio *ispa = &is->backend_data.pulseaudio; pa_stream *stream = ispa->stream; // hole if (!ispa->peek_buf) { if (pa_stream_drop(stream)) return SoundIoErrorStreaming; return 0; } size_t advance_bytes = ispa->read_frame_count * instream->bytes_per_frame; ispa->peek_buf_index += advance_bytes; ispa->peek_buf_frames_left -= ispa->read_frame_count; if (ispa->peek_buf_index >= ispa->peek_buf_size) { if (pa_stream_drop(stream)) return SoundIoErrorStreaming; ispa->peek_buf = NULL; } return 0; }
/** * Pulseaudio callback when new data is available. */ static void stream_read_callback (pa_stream * s, size_t length, void *userdata) { const void *data; GNUNET_log (GNUNET_ERROR_TYPE_DEBUG, "Got %u/%u bytes of PCM data\n", length, pcm_length); GNUNET_assert (NULL != s); GNUNET_assert (length > 0); if (stdio_event) mainloop_api->io_enable (stdio_event, PA_IO_EVENT_OUTPUT); if (pa_stream_peek (s, (const void **) &data, &length) < 0) { GNUNET_log (GNUNET_ERROR_TYPE_ERROR, _("pa_stream_peek() failed: %s\n"), pa_strerror (pa_context_errno (context))); quit (1); return; } GNUNET_assert (NULL != data); GNUNET_assert (length > 0); if (NULL != transmit_buffer) { transmit_buffer = pa_xrealloc (transmit_buffer, transmit_buffer_length + length); memcpy (&transmit_buffer[transmit_buffer_length], data, length); transmit_buffer_length += length; } else { transmit_buffer = pa_xmalloc (length); memcpy (transmit_buffer, data, length); transmit_buffer_length = length; transmit_buffer_index = 0; } pa_stream_drop (s); packetizer (); }
static void stream_read_cb(pa_stream *s, size_t length, void *userdata) { demux_t *demux = userdata; demux_sys_t *sys = demux->p_sys; const void *ptr; unsigned samples = length / sys->framesize; if (pa_stream_peek(s, &ptr, &length) < 0) { vlc_pa_error(demux, "cannot peek stream", sys->context); return; } mtime_t pts = mdate(); pa_usec_t latency; int negative; if (pa_stream_get_latency(s, &latency, &negative) < 0) { vlc_pa_error(demux, "cannot determine latency", sys->context); return; } if (negative) pts += latency; else pts -= latency; es_out_Control(demux->out, ES_OUT_SET_PCR, pts); if (unlikely(sys->es == NULL)) goto race; block_t *block = block_Alloc(length); if (likely(block != NULL)) { memcpy(block->p_buffer, ptr, length); block->i_nb_samples = samples; block->i_dts = block->i_pts = pts; if (sys->discontinuity) { block->i_flags |= BLOCK_FLAG_DISCONTINUITY; sys->discontinuity = false; } es_out_Send(demux->out, sys->es, block); } else sys->discontinuity = true; race: pa_stream_drop(s); }
static void kradpulse_capture_cb(pa_stream *stream, size_t length, void *userdata) { krad_pulse_t *kradpulse = (krad_pulse_t *)userdata; pa_usec_t usec; int neg; const void *samples; int c, s; pa_stream_get_latency(stream, &usec, &neg); //printf(" latency %8d us wanted %d frames\n", (int)usec, length / 4 / 2 ); pa_stream_peek(stream, &samples, &length); if ((krad_ringbuffer_write_space (kradpulse->kradaudio->input_ringbuffer[1]) >= length / 2 ) && (krad_ringbuffer_write_space (kradpulse->kradaudio->input_ringbuffer[0]) >= length / 2 )) { memcpy(kradpulse->capture_interleaved_samples, samples, length); pa_stream_drop(stream); for (s = 0; s < length / 4 / 2; s++) { for (c = 0; c < 2; c++) { kradpulse->capture_samples[c][s] = kradpulse->capture_interleaved_samples[s * 2 + c]; } } for (c = 0; c < 2; c++) { krad_ringbuffer_write (kradpulse->kradaudio->input_ringbuffer[c], (char *)kradpulse->capture_samples[c], (length / 2) ); } for (c = 0; c < 2; c++) { compute_peak(kradpulse->kradaudio, KINPUT, &kradpulse->capture_interleaved_samples[c], c, length / 4 / 2 , 1); } } if (kradpulse->kradaudio->process_callback != NULL) { kradpulse->kradaudio->process_callback(length / 4 / 2, kradpulse->kradaudio->userdata); } }
pa_stream_request_cb_t read_cb(pa_stream *stream, size_t nbytes, void* userdata) { JNIEnv *env; jclass cls; jmethodID mid; jenv_status_t status; const void *data; double v; jni_pa_cb_info_t *cbdata = (jni_pa_cb_info_t*)userdata; if (cbdata->cb_runnable == NULL) { return; } if ((status = get_jnienv(&env)) == JENV_UNSUCCESSFUL) { return; } if (pa_stream_peek(stream, &data, &nbytes) < 0) { LOGE("Peek error."); return; } assert(length > 0); assert(length % sizeof(float) == 0); v = ((const float*) data)[nbytes / sizeof(float) -1]; pa_stream_drop(stream); if (v < 0) v = 0; if (v > 1) v = 1; if ((cls = (*env)->GetObjectClass(env, cbdata->cb_runnable))) { if ((mid = (*env)->GetMethodID(env, cls, "run", "(D)V"))) { // Run the actual Java callback method (*env)->CallVoidMethod(env, cbdata->cb_runnable, mid, v); } } detach_jnienv(status); }
static void pulse_read_process(MSFilter *f){ PulseReadState *s=(PulseReadState *)f->data; const void *buffer=NULL; size_t nbytes=0; if (s->stream!=NULL){ pa_threaded_mainloop_lock(pa_loop); while (pa_stream_peek(s->stream,&buffer,&nbytes)==0 && nbytes>0){ mblk_t *om; om=allocb(nbytes,0); memcpy(om->b_wptr,buffer,nbytes); om->b_wptr+=nbytes; ms_queue_put(f->outputs[0],om); nbytes=0; pa_stream_drop(s->stream); } pa_threaded_mainloop_unlock(pa_loop); } }
RTC::ReturnCode_t PulseAudioInput::onExecute(RTC::UniqueId ec_id) { RTC_DEBUG(("onExecute start")); m_mutex.lock(); RTC_DEBUG(("onExecute:mutex lock")); if( m_simple ) { int r; simple_recast *psimple = (simple_recast *)m_simple; pa_threaded_mainloop_lock( psimple->mainloop ); RTC_DEBUG(("pa_threaded_mainloop_lock()")); while ( !psimple->read_data ) { r = pa_stream_peek( psimple->stream, &psimple->read_data, &psimple->read_length ); if ( !psimple->read_data ) { RTC_DEBUG(("pa_stream_peek():no readable data. wait start.")); pa_threaded_mainloop_wait(psimple->mainloop); } } m_out_data.data.length( psimple->read_length ); //!< set outport data length memcpy((void *)&(m_out_data.data[0]), (const uint8_t*) psimple->read_data, psimple->read_length); r = pa_stream_drop( psimple->stream ); if ( r < 0 ) { RTC_WARN(("pa_stream_drop():capture stream drop failed.")); } psimple->read_data = NULL; psimple->read_length = 0; psimple->read_index = 0; setTimestamp( m_out_data ); m_out_dataOut.write(); RTC_DEBUG(("AudioDataOut port:ON_BUFFER_WRITE")); pa_threaded_mainloop_unlock( psimple->mainloop ); RTC_DEBUG(("pa_threaded_mainloop_unlock()")); } m_mutex.unlock(); RTC_DEBUG(("onExecute:mutex unlock")); RTC_DEBUG(("onExecute finish")); return RTC::RTC_OK; }
void on_monitor_read_callback(pa_stream *p, size_t length, void *userdata) { const void *data; double v; printf("read callback length: %d\n", length); if (pa_stream_peek(p, &data, &length) < 0) { printf("Failed to read data from stream\n"); return; } assert(length > 0); assert(length % sizeof(float) == 0); v = ((const float*) data)[length / sizeof(float) -1]; pa_stream_drop(p); if (v < 0) v = 0; if (v > 1) v = 1; printf("\tread callback peek: %f\n", v); }
static guint gst_pulsesrc_read (GstAudioSrc * asrc, gpointer data, guint length) { GstPulseSrc *pulsesrc = GST_PULSESRC_CAST (asrc); size_t sum = 0; pa_threaded_mainloop_lock (pulsesrc->mainloop); pulsesrc->in_read = TRUE; if (pulsesrc->paused) goto was_paused; while (length > 0) { size_t l; GST_LOG_OBJECT (pulsesrc, "reading %u bytes", length); /*check if we have a leftover buffer */ if (!pulsesrc->read_buffer) { for (;;) { if (gst_pulsesrc_is_dead (pulsesrc, TRUE)) goto unlock_and_fail; /* read all available data, we keep a pointer to the data and the length * and take from it what we need. */ if (pa_stream_peek (pulsesrc->stream, &pulsesrc->read_buffer, &pulsesrc->read_buffer_length) < 0) goto peek_failed; GST_LOG_OBJECT (pulsesrc, "have data of %" G_GSIZE_FORMAT " bytes", pulsesrc->read_buffer_length); /* if we have data, process if */ if (pulsesrc->read_buffer && pulsesrc->read_buffer_length) break; /* now wait for more data to become available */ GST_LOG_OBJECT (pulsesrc, "waiting for data"); pa_threaded_mainloop_wait (pulsesrc->mainloop); if (pulsesrc->paused) goto was_paused; } } l = pulsesrc->read_buffer_length > length ? length : pulsesrc->read_buffer_length; memcpy (data, pulsesrc->read_buffer, l); pulsesrc->read_buffer = (const guint8 *) pulsesrc->read_buffer + l; pulsesrc->read_buffer_length -= l; data = (guint8 *) data + l; length -= l; sum += l; if (pulsesrc->read_buffer_length <= 0) { /* we copied all of the data, drop it now */ if (pa_stream_drop (pulsesrc->stream) < 0) goto drop_failed; /* reset pointer to data */ pulsesrc->read_buffer = NULL; pulsesrc->read_buffer_length = 0; } } pulsesrc->in_read = FALSE; pa_threaded_mainloop_unlock (pulsesrc->mainloop); return sum; /* ERRORS */ was_paused: { GST_LOG_OBJECT (pulsesrc, "we are paused"); goto unlock_and_fail; } peek_failed: { GST_ELEMENT_ERROR (pulsesrc, RESOURCE, FAILED, ("pa_stream_peek() failed: %s", pa_strerror (pa_context_errno (pulsesrc->context))), (NULL)); goto unlock_and_fail; } drop_failed: { GST_ELEMENT_ERROR (pulsesrc, RESOURCE, FAILED, ("pa_stream_drop() failed: %s", pa_strerror (pa_context_errno (pulsesrc->context))), (NULL)); goto unlock_and_fail; } unlock_and_fail: { pulsesrc->in_read = FALSE; pa_threaded_mainloop_unlock (pulsesrc->mainloop); return (guint) - 1; } }
/* * The callback will record when mute isn't activated and: * - it'll always record the first SILENCE_BREAKPOINTS events that are * above the low_point but below the high_point (these are normally the * trailing of a utterance). * - it'll always record if the event is well below the high_level * (these are normally the utterances) * * The counter_activity (which is one of the responsible to record after all, * see the above comment) is reseted mainly by a detected high_point utterance * or when we have detected a long streak of idle. * * The callback will split an utterance when nothing interesting has been * said in the last HOT_ZONE seconds. */ static void stream_request_cb(pa_stream *stream, size_t length, void *userdata) { const void *data; size_t size = 0; double power, low_point, high_point; int retval, retries; time_t current_time; recorder_context_t *rctx = (recorder_context_t *) userdata; if (rctx->dirty_filename){ fclose(rctx->recording_file); fclose(rctx->length_file); retries = 0; do{ retval = init_filenames(rctx); retries++; } while(retval != 0 && retries < 20); if (retries == 20){ Log(LOG_ERR, "There was some nasty problems with the opening of %s file.\n", rctx->filename); stop_recording(rctx, false); } rctx->dirty_filename = false; } if (!rctx->mute){ pa_stream_peek(stream, &data, &size); rctx->is_recording = false; power = calculate_rms_power(data, size); low_point = rctx->threshold * LOW_BREAKPOINT; high_point = rctx->threshold * HIGH_BREAKPOINT; rctx->total_activity++; if (data){ if (power >= low_point){ if (rctx->counter_silence < SILENCE_BREAKPOINT || power > high_point){ rctx->counter_idle = 0; current_time = time(NULL); if (difftime(current_time, rctx->timestamp) >= HOT_ZONE){ if (is_interesting(rctx)){ dump(rctx); rctx->high_activity = rctx->total_activity = 0; } rctx->timestamp = current_time; } if (power <= high_point){ rctx->counter_silence++; }else{ rctx->counter_silence = 0; rctx->high_activity++; } rctx->is_recording = true; buffer(rctx, data, size); Log(LOG_DEBUG, "-> power: %12.6f[%f, %f] threshold: %f silence: %d idle: %d\n", power, low_point, high_point, rctx->threshold, rctx->counter_silence, rctx->counter_idle); }else{ Log(LOG_DEBUG, "SS power: %12.6f[%f, %f] threshold: %f silence: %d idle: %d\n", power, low_point, high_point, rctx->threshold, rctx->counter_silence, rctx->counter_idle); } }else{ rctx->counter_idle = fmin(++rctx->counter_idle, IDLE_BREAKPOINT); if (rctx->counter_idle == IDLE_BREAKPOINT) rctx->counter_silence = 0; Log(LOG_DEBUG, " power: %12.6f[%f, %f] threshold: %f silence: %d idle: %d\n", power, low_point, high_point, rctx->threshold, rctx->counter_silence, rctx->counter_idle); } pa_stream_drop(stream); } } }
void PulseAudioSystem::read_callback(pa_stream *s, size_t bytes, void *userdata) { PulseAudioSystem *pas = reinterpret_cast<PulseAudioSystem *>(userdata); size_t length = bytes; const void *data = NULL; pa_stream_peek(s, &data, &length); if (data == NULL && length > 0) { qWarning("PulseAudio: pa_stream_peek reports no data at current read index."); } else if (data == NULL && length == 0) { qWarning("PulseAudio: pa_stream_peek reports empty memblockq."); } else if (data == NULL || length == 0) { qWarning("PulseAudio: invalid pa_stream_peek state encountered."); return; } AudioInputPtr ai = g.ai; PulseAudioInput *pai = dynamic_cast<PulseAudioInput *>(ai.get()); if (! pai) { if (length > 0) { pa_stream_drop(s); } pas->wakeup(); return; } const pa_sample_spec *pss = pa_stream_get_sample_spec(s); if (s == pas->pasInput) { if (!pa_sample_spec_equal(pss, &pai->pssMic)) { pai->pssMic = *pss; pai->iMicFreq = pss->rate; pai->iMicChannels = pss->channels; if (pss->format == PA_SAMPLE_FLOAT32NE) pai->eMicFormat = PulseAudioInput::SampleFloat; else pai->eMicFormat = PulseAudioInput::SampleShort; pai->initializeMixer(); } if (data != NULL) { pai->addMic(data, static_cast<unsigned int>(length) / pai->iMicSampleSize); } } else if (s == pas->pasSpeaker) { if (!pa_sample_spec_equal(pss, &pai->pssEcho)) { pai->pssEcho = *pss; pai->iEchoFreq = pss->rate; pai->iEchoChannels = pss->channels; if (pss->format == PA_SAMPLE_FLOAT32NE) pai->eEchoFormat = PulseAudioInput::SampleFloat; else pai->eEchoFormat = PulseAudioInput::SampleShort; pai->initializeMixer(); } if (data != NULL) { pai->addEcho(data, static_cast<unsigned int>(length) / pai->iEchoSampleSize); } } if (length > 0) { pa_stream_drop(s); } }
/* This is called whenever new data may is available */ static void stream_read_callback(pa_stream *s, size_t length, void *userdata) { pa_assert(s); pa_assert(length > 0); if (raw) { pa_assert(!sndfile); if (stdio_event) mainloop_api->io_enable(stdio_event, PA_IO_EVENT_OUTPUT); while (pa_stream_readable_size(s) > 0) { const void *data; if (pa_stream_peek(s, &data, &length) < 0) { pa_log(_("pa_stream_peek() failed: %s"), pa_strerror(pa_context_errno(context))); quit(1); return; } pa_assert(length > 0); /* If there is a hole in the stream, we generate silence, except * if it's a passthrough stream in which case we skip the hole. */ if (data || !(flags & PA_STREAM_PASSTHROUGH)) { buffer = pa_xrealloc(buffer, buffer_length + length); if (data) memcpy((uint8_t *) buffer + buffer_length, data, length); else pa_silence_memory((uint8_t *) buffer + buffer_length, length, &sample_spec); buffer_length += length; } pa_stream_drop(s); } } else { pa_assert(sndfile); while (pa_stream_readable_size(s) > 0) { sf_count_t bytes; const void *data; if (pa_stream_peek(s, &data, &length) < 0) { pa_log(_("pa_stream_peek() failed: %s"), pa_strerror(pa_context_errno(context))); quit(1); return; } pa_assert(length > 0); if (!data && (flags & PA_STREAM_PASSTHROUGH)) { pa_stream_drop(s); continue; } if (!data && length > silence_buffer_length) { silence_buffer = pa_xrealloc(silence_buffer, length); pa_silence_memory((uint8_t *) silence_buffer + silence_buffer_length, length - silence_buffer_length, &sample_spec); silence_buffer_length = length; } if (writef_function) { size_t k = pa_frame_size(&sample_spec); if ((bytes = writef_function(sndfile, data ? data : silence_buffer, (sf_count_t) (length/k))) > 0) bytes *= (sf_count_t) k; } else bytes = sf_write_raw(sndfile, data ? data : silence_buffer, (sf_count_t) length); if (bytes < (sf_count_t) length) quit(1); pa_stream_drop(s); } } }
/* This is called whenever new data may is available */ static void stream_read_callback(pa_stream *s, size_t length, void *userdata) { pa_assert(s); pa_assert(length > 0); if (raw) { pa_assert(!sndfile); if (stdio_event) mainloop_api->io_enable(stdio_event, PA_IO_EVENT_OUTPUT); while (pa_stream_readable_size(s) > 0) { const void *data; if (pa_stream_peek(s, &data, &length) < 0) { pa_log(_("pa_stream_peek() failed: %s"), pa_strerror(pa_context_errno(context))); quit(1); return; } pa_assert(data); pa_assert(length > 0); if (buffer) { buffer = pa_xrealloc(buffer, buffer_length + length); memcpy((uint8_t*) buffer + buffer_length, data, length); buffer_length += length; } else { buffer = pa_xmalloc(length); memcpy(buffer, data, length); buffer_length = length; buffer_index = 0; } pa_stream_drop(s); } } else { pa_assert(sndfile); while (pa_stream_readable_size(s) > 0) { sf_count_t bytes; const void *data; if (pa_stream_peek(s, &data, &length) < 0) { pa_log(_("pa_stream_peek() failed: %s"), pa_strerror(pa_context_errno(context))); quit(1); return; } pa_assert(data); pa_assert(length > 0); if (writef_function) { size_t k = pa_frame_size(&sample_spec); if ((bytes = writef_function(sndfile, data, (sf_count_t) (length/k))) > 0) bytes *= (sf_count_t) k; } else bytes = sf_write_raw(sndfile, data, (sf_count_t) length); if (bytes < (sf_count_t) length) quit(1); pa_stream_drop(s); } } }
/* * Worker thread to get audio data * * Will run until signaled */ static void *pulse_thread(void *vptr) { PULSE_DATA(vptr); if (pulse_connect(data) < 0) return NULL; if (pulse_get_server_info(data) < 0) return NULL; if (pulse_connect_stream(data) < 0) return NULL; if (pulse_skip(data) < 0) return NULL; blog(LOG_DEBUG, "pulse-input: Start recording"); const void *frames; size_t bytes; uint64_t pa_time; int64_t pa_latency; struct source_audio out; out.speakers = data->speakers; out.samples_per_sec = data->samples_per_sec; out.format = pulse_to_obs_audio_format(data->format); while (os_event_try(data->event) == EAGAIN) { pulse_iterate(data); pa_stream_peek(data->stream, &frames, &bytes); // check if we got data if (!bytes) continue; if (!frames) { blog(LOG_DEBUG, "pulse-input: Got audio hole of %u bytes", (unsigned int) bytes); pa_stream_drop(data->stream); continue; } if (pa_stream_get_time(data->stream, &pa_time) < 0) { blog(LOG_ERROR, "pulse-input: Failed to get timing info !"); pa_stream_drop(data->stream); continue; } pulse_get_stream_latency(data->stream, &pa_latency); out.data[0] = (uint8_t *) frames; out.frames = frames_to_bytes(data, bytes); out.timestamp = (pa_time - pa_latency) * 1000; obs_source_output_audio(data->source, &out); pa_stream_drop(data->stream); } pulse_diconnect_stream(data); pulse_disconnect(data); return NULL; }