virtual boost::unique_future<bool> send(const safe_ptr<core::read_frame>& frame) override { CASPAR_VERIFY(format_desc_.height * format_desc_.width * 4 == static_cast<unsigned>(frame->image_data().size())); return executor_.begin_invoke([=]() -> bool { graph_->set_value("tick-time", tick_timer_.elapsed() * format_desc_.fps * 0.5); tick_timer_.restart(); frame_timer_.restart(); // AUDIO std::vector<int16_t, tbb::cache_aligned_allocator<int16_t>> audio_buffer; if (core::needs_rearranging( frame->multichannel_view(), channel_layout_, channel_layout_.num_channels)) { core::audio_buffer downmixed; downmixed.resize( frame->multichannel_view().num_samples() * channel_layout_.num_channels, 0); auto dest_view = core::make_multichannel_view<int32_t>( downmixed.begin(), downmixed.end(), channel_layout_); core::rearrange_or_rearrange_and_mix( frame->multichannel_view(), dest_view, core::default_mix_config_repository()); audio_buffer = core::audio_32_to_16(downmixed); } else { audio_buffer = core::audio_32_to_16(frame->audio_data()); } airsend::add_audio(air_send_.get(), audio_buffer.data(), audio_buffer.size() / channel_layout_.num_channels); // VIDEO connected_ = airsend::add_frame_bgra(air_send_.get(), frame->image_data().begin()); graph_->set_text(print()); graph_->set_value("frame-time", frame_timer_.elapsed() * format_desc_.fps * 0.5); return true; }); }
virtual boost::unique_future<bool> send(const safe_ptr<core::read_frame>& frame) override { auto buffer = std::make_shared<audio_buffer_16>( core::audio_32_to_16(core::get_rearranged_and_mixed(frame->multichannel_view(), channel_layout_, channel_layout_.num_channels))); if (!input_.try_push(std::make_pair(frame, buffer))) graph_->set_tag("dropped-frame"); if (Status() != Playing && !started_) { sf::SoundStream::Initialize(2, format_desc_.audio_sample_rate); Play(); started_ = true; } return wrap_as_future(is_running_.load()); }