std::future<bool> send(core::const_frame frame) override { CASPAR_VERIFY(format_desc_.height * format_desc_.width * 4 == frame.image_data(0).size()); graph_->set_value("tick-time", tick_timer_.elapsed() * format_desc_.fps * 0.5); tick_timer_.restart(); caspar::timer frame_timer; { auto audio_buffer = core::audio_32_to_16(frame.audio_data()); airsend::add_audio(air_send_.get(), audio_buffer.data(), static_cast<int>(audio_buffer.size()) / format_desc_.audio_channels); } { connected_ = airsend::add_frame_bgra(air_send_.get(), frame.image_data(0).begin()); } graph_->set_text(print()); graph_->set_value("frame-time", frame_timer.elapsed() * format_desc_.fps * 0.5); return make_ready_future(true); }
void configure_filtergraph( AVFilterGraph& graph, const std::string& filtergraph, AVFilterContext& source_ctx, AVFilterContext& sink_ctx) { AVFilterInOut* outputs = nullptr; AVFilterInOut* inputs = nullptr; try { if(!filtergraph.empty()) { outputs = avfilter_inout_alloc(); inputs = avfilter_inout_alloc(); CASPAR_VERIFY(outputs && inputs); outputs->name = av_strdup("in"); outputs->filter_ctx = &source_ctx; outputs->pad_idx = 0; outputs->next = nullptr; inputs->name = av_strdup("out"); inputs->filter_ctx = &sink_ctx; inputs->pad_idx = 0; inputs->next = nullptr; FF(avfilter_graph_parse( &graph, filtergraph.c_str(), inputs, outputs, nullptr)); } else { FF(avfilter_link( &source_ctx, 0, &sink_ctx, 0)); } FF(avfilter_graph_config( &graph, nullptr)); } catch(...) { avfilter_inout_free(&outputs); avfilter_inout_free(&inputs); throw; } }
virtual boost::unique_future<bool> send(const safe_ptr<core::read_frame>& frame) override { CASPAR_VERIFY(format_desc_.height * format_desc_.width * 4 == static_cast<unsigned>(frame->image_data().size())); return executor_.begin_invoke([=]() -> bool { graph_->set_value("tick-time", tick_timer_.elapsed() * format_desc_.fps * 0.5); tick_timer_.restart(); frame_timer_.restart(); // AUDIO std::vector<int16_t, tbb::cache_aligned_allocator<int16_t>> audio_buffer; if (core::needs_rearranging( frame->multichannel_view(), channel_layout_, channel_layout_.num_channels)) { core::audio_buffer downmixed; downmixed.resize( frame->multichannel_view().num_samples() * channel_layout_.num_channels, 0); auto dest_view = core::make_multichannel_view<int32_t>( downmixed.begin(), downmixed.end(), channel_layout_); core::rearrange_or_rearrange_and_mix( frame->multichannel_view(), dest_view, core::default_mix_config_repository()); audio_buffer = core::audio_32_to_16(downmixed); } else { audio_buffer = core::audio_32_to_16(frame->audio_data()); } airsend::add_audio(air_send_.get(), audio_buffer.data(), audio_buffer.size() / channel_layout_.num_channels); // VIDEO connected_ = airsend::add_frame_bgra(air_send_.get(), frame->image_data().begin()); graph_->set_text(print()); graph_->set_value("frame-time", frame_timer_.elapsed() * format_desc_.fps * 0.5); return true; }); }
void initialize(const core::video_format_desc& format_desc, int channel_index) override { format_desc_ = format_desc; air_send_.reset(airsend::create(format_desc.width, format_desc.height, format_desc.time_scale, format_desc.duration, true, static_cast<float>(format_desc.square_width) / static_cast<float>(format_desc.square_height), true, format_desc.audio_channels, format_desc.audio_sample_rate), airsend::destroy); CASPAR_VERIFY(air_send_); }
virtual void initialize(const core::video_format_desc& format_desc, int channel_index) override { air_send_.reset( airsend::create( format_desc.width, format_desc.height, format_desc.time_scale, format_desc.duration, format_desc.field_mode == core::field_mode::progressive, static_cast<float>(format_desc.square_width) / static_cast<float>(format_desc.square_height), true, channel_layout_.num_channels, format_desc.audio_sample_rate), airsend::destroy); CASPAR_VERIFY(air_send_); format_desc_ = format_desc; CASPAR_LOG(info) << print() << L" Successfully Initialized."; }