~oal_consumer() { is_running_ = false; input_.try_push(std::make_pair(std::shared_ptr<core::read_frame>(), std::make_shared<audio_buffer_16>())); input_.try_push(std::make_pair(std::shared_ptr<core::read_frame>(), std::make_shared<audio_buffer_16>())); Stop(); input_.try_push(std::make_pair(std::shared_ptr<core::read_frame>(), std::make_shared<audio_buffer_16>())); input_.try_push(std::make_pair(std::shared_ptr<core::read_frame>(), std::make_shared<audio_buffer_16>())); CASPAR_LOG(info) << print() << L" Successfully Uninitialized."; }
virtual boost::unique_future<bool> send(const safe_ptr<read_frame>& frame) override { bool pushed = frame_buffer_.try_push(frame); if (pushed && !first_frame_reported_) { first_frame_promise_.set_value(); first_frame_reported_ = true; } return caspar::wrap_as_future(is_running_.load()); }
virtual void send(const safe_ptr<basic_frame>& src_frame) override { bool pushed = frame_buffer_.try_push(src_frame); // frame_buffer_.push(src_frame); if (pushed && !first_frame_reported_) //changed to fix compilation //if (!first_frame_reported_) { first_frame_promise_.set_value(); first_frame_reported_ = true; } }
virtual boost::unique_future<bool> send(const safe_ptr<core::read_frame>& frame) override { auto buffer = std::make_shared<audio_buffer_16>( core::audio_32_to_16(core::get_rearranged_and_mixed(frame->multichannel_view(), channel_layout_, channel_layout_.num_channels))); if (!input_.try_push(std::make_pair(frame, buffer))) graph_->set_tag("dropped-frame"); if (Status() != Playing && !started_) { sf::SoundStream::Initialize(2, format_desc_.audio_sample_rate); Play(); started_ = true; } return wrap_as_future(is_running_.load()); }
route_producer(std::shared_ptr<route> route, int buffer) : route_(route) , connection_(route_->signal.connect([this](const core::draw_frame& frame) { if (!buffer_.try_push(frame)) { graph_->set_tag(diagnostics::tag_severity::WARNING, "dropped-frame"); } graph_->set_value("produce-time", produce_timer_.elapsed() * route_->format_desc.fps * 0.5); produce_timer_.restart(); })) { buffer_.set_capacity(buffer > 0 ? buffer : route->format_desc.field_count); graph_->set_color("late-frame", diagnostics::color(0.6f, 0.3f, 0.3f)); graph_->set_color("produce-time", caspar::diagnostics::color(0.0f, 1.0f, 0.0f)); graph_->set_color("consume-time", caspar::diagnostics::color(1.0f, 0.4f, 0.0f, 0.8f)); graph_->set_color("dropped-frame", diagnostics::color(0.3f, 0.6f, 0.3f)); graph_->set_text(print()); diagnostics::register_graph(graph_); CASPAR_LOG(debug) << print() << L" Initialized"; }
void stop() { is_running_ = false; frame_buffer_.try_push(make_safe<read_frame>()); }
virtual HRESULT STDMETHODCALLTYPE VideoInputFrameArrived(IDeckLinkVideoInputFrame* video, IDeckLinkAudioInputPacket* audio) { if(!video) return S_OK; try { graph_->set_value("tick-time", tick_timer_.elapsed()*format_desc_.fps*0.5); tick_timer_.restart(); frame_timer_.restart(); // PUSH void* bytes = nullptr; if(FAILED(video->GetBytes(&bytes)) || !bytes) return S_OK; safe_ptr<AVFrame> av_frame(avcodec_alloc_frame(), av_free); avcodec_get_frame_defaults(av_frame.get()); av_frame->data[0] = reinterpret_cast<uint8_t*>(bytes); av_frame->linesize[0] = video->GetRowBytes(); av_frame->format = PIX_FMT_UYVY422; av_frame->width = video->GetWidth(); av_frame->height = video->GetHeight(); av_frame->interlaced_frame = format_desc_.field_mode != core::field_mode::progressive; av_frame->top_field_first = format_desc_.field_mode == core::field_mode::upper ? 1 : 0; std::shared_ptr<core::audio_buffer> audio_buffer; // It is assumed that audio is always equal or ahead of video. if(audio && SUCCEEDED(audio->GetBytes(&bytes)) && bytes) { auto sample_frame_count = audio->GetSampleFrameCount(); auto audio_data = reinterpret_cast<int32_t*>(bytes); audio_buffer = std::make_shared<core::audio_buffer>(audio_data, audio_data + sample_frame_count*format_desc_.audio_channels); } else audio_buffer = std::make_shared<core::audio_buffer>(audio_cadence_.front(), 0); // Note: Uses 1 step rotated cadence for 1001 modes (1602, 1602, 1601, 1602, 1601) // This cadence fills the audio mixer most optimally. sync_buffer_.push_back(audio_buffer->size()); if(!boost::range::equal(sync_buffer_, audio_cadence_)) { CASPAR_LOG(trace) << print() << L" Syncing audio."; return S_OK; } muxer_.push(audio_buffer); muxer_.push(av_frame, hints_); boost::range::rotate(audio_cadence_, std::begin(audio_cadence_)+1); // POLL for(auto frame = muxer_.poll(); frame; frame = muxer_.poll()) { if(!frame_buffer_.try_push(make_safe_ptr(frame))) { auto dummy = core::basic_frame::empty(); frame_buffer_.try_pop(dummy); frame_buffer_.try_push(make_safe_ptr(frame)); graph_->set_tag("dropped-frame"); } } graph_->set_value("frame-time", frame_timer_.elapsed()*format_desc_.fps*0.5); graph_->set_value("output-buffer", static_cast<float>(frame_buffer_.size())/static_cast<float>(frame_buffer_.capacity())); } catch(...) { exception_ = std::current_exception(); return E_FAIL; } return S_OK; }