void FFmpegDecoderAudio::decodeLoop() { const bool skip_audio = ! validContext() || ! m_audio_sink.valid(); if (! skip_audio && ! m_audio_sink->playing()) { m_clocks.audioSetDelay(m_audio_sink->getDelay()); m_audio_sink->play(); } else { m_clocks.audioDisable(); } while (! m_exit) { if(m_paused) { m_clocks.pause(true); m_pause_timer.setStartTick(); while(m_paused && !m_exit) { microSleep(10000); } m_clocks.setPauseTime(m_pause_timer.time_s()); m_clocks.pause(false); } // If skipping audio, make sure the audio stream is still consumed. if (skip_audio) { bool is_empty; FFmpegPacket packet = m_packets.timedPop(is_empty, 10); if (packet.valid()) packet.clear(); } // Else, just idle in this thread. // Note: If m_audio_sink has an audio callback, this thread will still be awaken // from time to time to refill the audio buffer. else { OpenThreads::Thread::microSleep(10000); } } }
void FFmpegDecoderVideo::decodeLoop() { FFmpegPacket packet; double pts; while (! m_exit) { // Work on the current packet until we have decoded all of it while (m_bytes_remaining > 0) { // Save global PTS to be stored in m_frame via getBuffer() m_packet_pts = packet.packet.pts; // Decode video frame int frame_finished = 0; // We want to use the entire packet since some codecs will require extra information for decoding const int bytes_decoded = avcodec_decode_video2(m_context, m_frame.get(), &frame_finished, &(packet.packet)); if (bytes_decoded < 0) throw std::runtime_error("avcodec_decode_video failed()"); m_bytes_remaining -= bytes_decoded; m_packet_data += bytes_decoded; // Publish the frame if we have decoded a complete frame if (frame_finished) { AVRational timebase; // Find out the frame pts if (m_frame->pts != int64_t(AV_NOPTS_VALUE)) { pts = m_frame->pts; timebase = m_context->time_base; } else if (packet.packet.dts == int64_t(AV_NOPTS_VALUE) && m_frame->opaque != 0 && *reinterpret_cast<const int64_t*>(m_frame->opaque) != int64_t(AV_NOPTS_VALUE)) { pts = *reinterpret_cast<const int64_t*>(m_frame->opaque); timebase = m_stream->time_base; } else if (packet.packet.dts != int64_t(AV_NOPTS_VALUE)) { pts = packet.packet.dts; timebase = m_stream->time_base; } else { pts = 0; timebase = m_context->time_base; } pts *= av_q2d(timebase); const double synched_pts = m_clocks.videoSynchClock(m_frame.get(), av_q2d(timebase), pts); const double frame_delay = m_clocks.videoRefreshSchedule(synched_pts); publishFrame(frame_delay, m_clocks.audioDisabled()); } } while(m_paused && !m_exit) { microSleep(10000); } // Get the next packet pts = 0; if (packet.valid()) packet.clear(); bool is_empty = true; packet = m_packets.timedPop(is_empty, 10); if (! is_empty) { if (packet.type == FFmpegPacket::PACKET_DATA) { m_bytes_remaining = packet.packet.size; m_packet_data = packet.packet.data; } else if (packet.type == FFmpegPacket::PACKET_FLUSH) { avcodec_flush_buffers(m_context); } } } }
void operator ()(FFmpegPacket &packet) const { packet.clear(); }