static bool audio_frame(struct ff_frame *frame, void *opaque) { struct ffmpeg_source *s = opaque; struct obs_source_audio audio_data = {0}; uint64_t pts; // Media ended if (frame == NULL) return true; pts = (uint64_t)(frame->pts * 1000000000.0L); int channels = av_frame_get_channels(frame->frame); for(int i = 0; i < channels; i++) audio_data.data[i] = frame->frame->data[i]; audio_data.samples_per_sec = frame->frame->sample_rate; audio_data.frames = frame->frame->nb_samples; audio_data.timestamp = pts; audio_data.format = convert_ffmpeg_sample_format(frame->frame->format); audio_data.speakers = channels; obs_source_output_audio(s->source, &audio_data); return true; }
static OSStatus input_callback( void *data, AudioUnitRenderActionFlags *action_flags, const AudioTimeStamp *ts_data, UInt32 bus_num, UInt32 frames, AudioBufferList *ignored_buffers) { struct coreaudio_data *ca = data; OSStatus stat; struct source_audio audio; stat = AudioUnitRender(ca->unit, action_flags, ts_data, bus_num, frames, ca->buf_list); if (!ca_success(stat, ca, "input_callback", "audio retrieval")) return noErr; for (UInt32 i = 0; i < ca->buf_list->mNumberBuffers; i++) audio.data[i] = ca->buf_list->mBuffers[i].mData; audio.frames = frames; audio.speakers = ca->speakers; audio.format = ca->format; audio.samples_per_sec = ca->sample_rate; audio.timestamp = ts_data->mHostTime; obs_source_output_audio(ca->source, &audio); UNUSED_PARAMETER(ignored_buffers); return noErr; }
static void *sinewave_thread(void *pdata) { struct sinewave_data *swd = pdata; uint64_t last_time = os_gettime_ns(); uint64_t ts = 0; double cos_val = 0.0; uint8_t bytes[480]; while (event_try(swd->event) == EAGAIN) { if (!os_sleepto_ns(last_time += 10000000)) last_time = os_gettime_ns(); for (size_t i = 0; i < 480; i++) { cos_val += rate * M_PI_X2; if (cos_val > M_PI_X2) cos_val -= M_PI_X2; double wave = cos(cos_val) * 0.5; bytes[i] = (uint8_t)((wave+1.0)*0.5 * 255.0); } struct source_audio data; data.data[0] = bytes; data.frames = 480; data.speakers = SPEAKERS_MONO; data.samples_per_sec = 48000; data.timestamp = ts; data.format = AUDIO_FORMAT_U8BIT; obs_source_output_audio(swd->source, &data); ts += 10000000; } return NULL; }
int jack_process_callback(jack_nframes_t nframes, void* arg) { struct jack_data* data = (struct jack_data*)arg; if (data == 0) return 0; pthread_mutex_lock(&data->jack_mutex); struct obs_source_audio out; out.speakers = jack_channels_to_obs_speakers(data->channels); out.samples_per_sec = jack_get_sample_rate (data->jack_client); /* format is always 32 bit float for jack */ out.format = AUDIO_FORMAT_FLOAT_PLANAR; for (unsigned int i = 0; i < data->channels; ++i) { jack_default_audio_sample_t *jack_buffer = (jack_default_audio_sample_t *)jack_port_get_buffer( data->jack_ports[i], nframes); out.data[i] = (uint8_t *)jack_buffer; } out.frames = nframes; out.timestamp = os_gettime_ns() - jack_frames_to_time(data->jack_client, nframes); obs_source_output_audio(data->source, &out); pthread_mutex_unlock(&data->jack_mutex); return 0; }
/** * Callback for pulse which gets executed when new audio data is available * * @warning The function may be called even after disconnecting the stream */ static void pulse_stream_read(pa_stream *p, size_t nbytes, void *userdata) { UNUSED_PARAMETER(p); UNUSED_PARAMETER(nbytes); PULSE_DATA(userdata); const void *frames; size_t bytes; int64_t latency; if (!data->stream) goto exit; pa_stream_peek(data->stream, &frames, &bytes); // check if we got data if (!bytes) goto exit; if (!frames) { blog(LOG_ERROR, "pulse-input: Got audio hole of %u bytes", (unsigned int) bytes); pa_stream_drop(data->stream); goto exit; } if (pulse_get_stream_latency(data->stream, &latency) < 0) { blog(LOG_ERROR, "pulse-input: Failed to get timing info !"); pa_stream_drop(data->stream); goto exit; } struct source_audio out; out.speakers = data->speakers; out.samples_per_sec = data->samples_per_sec; out.format = pulse_to_obs_audio_format(data->format); out.data[0] = (uint8_t *) frames; out.frames = bytes / data->bytes_per_frame; out.timestamp = os_gettime_ns() - (latency * 1000ULL); obs_source_output_audio(data->source, &out); data->packets++; data->frames += out.frames; pa_stream_drop(data->stream); exit: pulse_signal(0); }
/** * Callback for pulse which gets executed when new audio data is available * * @warning The function may be called even after disconnecting the stream */ static void pulse_stream_read(pa_stream *p, size_t nbytes, void *userdata) { UNUSED_PARAMETER(p); UNUSED_PARAMETER(nbytes); PULSE_DATA(userdata); const void *frames; size_t bytes; if (!data->stream) goto exit; pa_stream_peek(data->stream, &frames, &bytes); // check if we got data if (!bytes) goto exit; if (!frames) { blog(LOG_ERROR, "Got audio hole of %u bytes", (unsigned int) bytes); pa_stream_drop(data->stream); goto exit; } struct obs_source_audio out; out.speakers = data->speakers; out.samples_per_sec = data->samples_per_sec; out.format = pulse_to_obs_audio_format(data->format); out.data[0] = (uint8_t *) frames; out.frames = bytes / data->bytes_per_frame; out.timestamp = get_sample_time(out.frames, out.samples_per_sec); if (!data->first_ts) data->first_ts = out.timestamp + STARTUP_TIMEOUT_NS; if (out.timestamp > data->first_ts) obs_source_output_audio(data->source, &out); data->packets++; data->frames += out.frames; pa_stream_drop(data->stream); exit: pulse_signal(0); }
void DeckLinkDeviceInstance::HandleAudioPacket( IDeckLinkAudioInputPacket *audioPacket, const uint64_t timestamp) { if (audioPacket == nullptr) return; void *bytes; if (audioPacket->GetBytes(&bytes) != S_OK) { LOG(LOG_WARNING, "Failed to get audio packet data"); return; } currentPacket.data[0] = (uint8_t *)bytes; currentPacket.frames = (uint32_t)audioPacket->GetSampleFrameCount(); currentPacket.timestamp = timestamp; obs_source_output_audio(decklink->GetSource(), ¤tPacket); }
/* * Worker thread to get audio data * * Will run until signaled */ static void *pulse_thread(void *vptr) { PULSE_DATA(vptr); if (pulse_connect(data) < 0) return NULL; if (pulse_get_server_info(data) < 0) return NULL; if (pulse_connect_stream(data) < 0) return NULL; if (pulse_skip(data) < 0) return NULL; blog(LOG_DEBUG, "pulse-input: Start recording"); const void *frames; size_t bytes; uint64_t pa_time; int64_t pa_latency; struct source_audio out; out.speakers = data->speakers; out.samples_per_sec = data->samples_per_sec; out.format = pulse_to_obs_audio_format(data->format); while (os_event_try(data->event) == EAGAIN) { pulse_iterate(data); pa_stream_peek(data->stream, &frames, &bytes); // check if we got data if (!bytes) continue; if (!frames) { blog(LOG_DEBUG, "pulse-input: Got audio hole of %u bytes", (unsigned int) bytes); pa_stream_drop(data->stream); continue; } if (pa_stream_get_time(data->stream, &pa_time) < 0) { blog(LOG_ERROR, "pulse-input: Failed to get timing info !"); pa_stream_drop(data->stream); continue; } pulse_get_stream_latency(data->stream, &pa_latency); out.data[0] = (uint8_t *) frames; out.frames = frames_to_bytes(data, bytes); out.timestamp = (pa_time - pa_latency) * 1000; obs_source_output_audio(data->source, &out); pa_stream_drop(data->stream); } pulse_diconnect_stream(data); pulse_disconnect(data); return NULL; }
static void get_audio(void *opaque, struct obs_source_audio *a) { struct ffmpeg_source *s = opaque; obs_source_output_audio(s->source, a); }