bool Packetizer::putPacket(PacketPtr ptr) {
          bool result = false;
          //in case of stream is not mapped through stream_data element, discard packet
          if (_streams.find(ptr->getStreamIndex()) == _streams.end()) {
            return false;
          }

          if (_codec_overlap.find(_streams[ptr->getStreamIndex()].decoder->getCodecId()) == _codec_overlap.end()) {
            _codec_overlap[_streams[ptr->getStreamIndex()].decoder->getCodecId()] = 0;
          }
          result = processPacket(ptr);
          return result;
        }
Ejemplo n.º 2
0
void formatWriter(const ContainerPtr& container, const PacketPtr& packet)
{
    packetSync.doTimeSync(packet);

    //if (packet->getPts() != av::NoPts)
    //    packet->setDts(packet->getPts());

    clog << "Write FRAME: " << packet->getStreamIndex()
         << ", PTS: "       << packet->getPts()
         << ", DTS: "       << packet->getDts()
         << ", timeBase: "  << packet->getTimeBase()
         << ", time: "      << packet->getTimeBase().getDouble() * packet->getPts()
         << endl;

    container->writePacket(packet, true);
}
  boost::shared_ptr<org::esb::hive::job::ProcessUnit> ProcessUnitBuilder::build(org::esb::av::PacketListPtr list) {
    boost::shared_ptr<org::esb::hive::job::ProcessUnit> u(new org::esb::hive::job::ProcessUnit());
    PacketPtr p = list.front();
    if(list.size()==0)return u;
    int idx = p->getStreamIndex();
    u->_source_stream = _map_data[idx].instream;
    u->_target_stream = _map_data[idx].outstream;
    u->_deinterlace = _map_data[idx].deinterlace;
    u->_decoder = _map_data[idx].decoder;
    u->_encoder = _map_data[idx].encoder;
    u->_2passdecoder = _map_data[idx].pass2decoder;
    u->_2passencoder = _map_data[idx].pass2encoder;
    u->_input_packets = std::list<boost::shared_ptr<Packet> >(list.begin(), list.end());
    u->_gop_size = list.size(); //- _map_data[idx].b_frame_offset;
    int cou = u->_gop_size;
    u->_frameRateCompensateBase = _map_data[idx].frameRateCompensateBase;



    if (u->_decoder->getCodecType() == AVMEDIA_TYPE_VIDEO) {
      AVRational input_framerate = u->_decoder->getFrameRate();
      AVRational output_framerate = u->_encoder->getFrameRate();
      //
      double in = (((double) u->_gop_size) / input_framerate.num * input_framerate.den * output_framerate.num / output_framerate.den);
      //double in = ((((((double) u->_gop_size)*u->_decoder->getFrameRate().den)/u->_decoder->getFrameRate().num)/u->_encoder->getFrameRate().den)*u->_encoder->getFrameRate().num);
      in += _map_data[idx].frameRateCompensateBase;

      /*this against some round issues*/
      in += 0.000000001;
      LOGDEBUG("Decimal input:"<<in);
      /*spliting the double value into Integral and Fractional parts*/
      org::esb::util::Decimal dec(in);
      org::esb::util::Decimal::MantissaType i;
      org::esb::util::Decimal::MantissaType f;
      org::esb::util::Decimal::ExponentType exp = 0;
      dec.getIntegralFractionalExponent<org::esb::util::Decimal::MantissaType > (i, f, exp, dec.getExponent());

      /*in case to BFrames the resulting frame count is -1
       * because there 1 I-Frame to much at the end
       *
       * e.g. IBBPBBPBBPBBPBBIBBP
       *                     |
       *            this i-frame is to much
       *    it is always the last i-frame in a ProcessUnit
       *
       * this only happend when the decoder has B-Frames
       */
      if (u->_decoder->getCodecId() == CODEC_ID_MPEG2VIDEO && u->_decoder->getCodecOption("has_b_frames") == "1")
        i -= 1;

      u->_expected_frame_count = static_cast<int> (i);

      _map_data[idx].frameRateCompensateBase = f / pow((double) 10, dec.getExponent()*-1);
    }
    if (u->_decoder->getCodecType() == AVMEDIA_TYPE_AUDIO) {
      AVRational input_timebase = u->_decoder->getTimeBase();
      AVRational output_timebase = u->_encoder->getTimeBase();

      int64_t input_samplerate = u->_decoder->getSampleRate();
      int64_t output_samplerate = u->_encoder->getSampleRate();

      int64_t output_framesize = u->_encoder->getFrameBytes();
      /*
       * the decoder did not returns a correct frame size
       * calculation on the base of the packet->duration and packet->timebase
       */
      int64_t input_framesize = av_rescale_q(p->getDuration(), p->getTimeBase(), input_timebase);
      int osize = av_get_bytes_per_sample(u->_decoder->getSampleFormat());
      input_framesize *= osize;
      input_framesize *= u->_decoder->ctx->request_channel_layout;

      int64_t raw_in_samples = input_framesize * list.size();
      int64_t raw_out_samples = av_rescale_q(raw_in_samples, input_timebase, output_timebase);
      raw_out_samples += _map_data[idx].frameRateCompensateBase;
      int rest = raw_out_samples % (output_framesize > 0 ? output_framesize : 1);
      double out;
      double delta = modf(raw_out_samples / (output_framesize > 0 ? output_framesize : 1), &out);
      u->_expected_frame_count = -1; //static_cast<int> (out);
      _map_data[idx].frameRateCompensateBase = rest;
      //            LOGDEBUG(rest);
    }

    return u;
  }
        /***
         * this function packetizes the GOPs in chunks between the IFrames
         * i normal cases the order of the Pictures are
         * here will be each group of Picture be chunked, all of them cutted in the position of an I-Frame
         * IPPPPPPPPIPPPPPPPPIPPPPPPPP
         * IPPPPPPPP
         *          IPPPPPPPP
         *                   IPPPPPPPP
         *
         * in a MPeg Stream there is a reorder with BFrames
         * each group will be also chunked in the position of the I-Frame, but in the case of the delay from the Decoder
         * we will append the next packets until a P-Frame arrived.
         * IPBBPBBPBBIBBPBBPBBIBBPBBPBBIBBP
         * IPBBPBBPBBIBB
         *           I  PBBPBBIBB
         *                    I  PBBPBBIBB
         *
         *
         */
        bool Packetizer::processPacket(PacketPtr ptr) {
          bool result = false;
          int stream_idx = ptr->getStreamIndex();

          if (_streams[stream_idx].state == STATE_NOP && (ptr->isKeyFrame() || _streams[stream_idx].decoder->getCodecType() == AVMEDIA_TYPE_AUDIO)) {
            _streams[stream_idx].state = STATE_START_I_FRAME;
          }

          if (_streams[stream_idx].state == STATE_START_I_FRAME && _streams[stream_idx].packets.size() >= _streams[stream_idx].min_packet_count && ptr->isKeyFrame()) {
            _streams[stream_idx].state = STATE_END_I_FRAME;
          }

          if (_streams[stream_idx].state == STATE_START_I_FRAME) {
            _streams[stream_idx].packets.push_back(ptr);
          }

          if (_streams[stream_idx].state == STATE_END_I_FRAME) {
            _overlap_queue[stream_idx].push_back(ptr);
          }
          /**
           * case handling for mpeg2 video packets(formaly streams with b frames)
           * */
          if (_streams[stream_idx].state == STATE_END_I_FRAME && (_streams[stream_idx].decoder->getCodecId() == CODEC_ID_MPEG2VIDEO && ptr->_pict_type == AV_PICTURE_TYPE_P)) {
            /*in the first roundtrip the stream packets look like this, the first B Frames are not removed*/
            /*_streams[stream_idx].packets =IBBPBBPBBPBB*/
            /*in the following roundtrip the stream packets look like this*/
            /*_streams[stream_idx].packets =IPBBPBBPBB*/
            /*_overlap_queue[stream_idx]   =IBBP*/
            _streams[stream_idx].state = STATE_START_I_FRAME;
            /**
             * appending the next IBB from the IBBP order to the actual packet_list
             * */
            _streams[stream_idx].packets.insert(_streams[stream_idx].packets.end(), _overlap_queue[stream_idx].begin(), _overlap_queue[stream_idx].end() - 1);
            /*_streams[stream_idx].packets =IBBPBBPBBPBBIBB*/
            /*_overlap_queue[stream_idx]   =IBBP*/

            _packet_list.push_back(_streams[stream_idx].packets);
            _streams[stream_idx].packets.clear();
            /*_streams[stream_idx].packets =    */
            /*_overlap_queue[stream_idx]   =IBBP*/


            /**
             * appending the IP frames from the IBBP order to the actual packet_list
             * that are the first and the last entries in the overlap queue
             * */
            _streams[stream_idx].packets.insert(_streams[stream_idx].packets.end(), _overlap_queue[stream_idx].begin(), _overlap_queue[stream_idx].begin() + 1);
            _streams[stream_idx].packets.insert(_streams[stream_idx].packets.end(), _overlap_queue[stream_idx].end() - 1, _overlap_queue[stream_idx].end());
            //            _streams[stream_idx].packets.insert(_streams[stream_idx].packets.end(), _overlap_queue[stream_idx].begin(), _overlap_queue[stream_idx].end());
            /*_streams[stream_idx].packets =IP  */
            /*_overlap_queue[stream_idx]   =IBBP*/


            _overlap_queue[stream_idx].clear();
            /*_streams[stream_idx].packets =IP */
            /*_overlap_queue[stream_idx]   =   */
            result = true;

          } else if (_streams[stream_idx].state == STATE_END_I_FRAME && _streams[stream_idx].decoder->getCodecType() == AVMEDIA_TYPE_AUDIO) {
            /**************************************
             * this is used for all audio streams
             **************************************/
            _streams[stream_idx].state = STATE_START_I_FRAME;
            /**copying all Packets into the actual ProcessUnit*/
            _packet_list.push_back(_streams[stream_idx].packets);
            /**clear out the PacketList, because they are in the ProcessUnit*/
            _streams[stream_idx].packets.clear();
            /**bring in the first I-Frame for the next Process Unit*/
            _streams[stream_idx].packets.insert(_streams[stream_idx].packets.end(), _overlap_queue[stream_idx].begin(), _overlap_queue[stream_idx].end());
            /**clear out the overlap queue, because there is only an I-Frame packet and this is now in the next ProcessUnit*/
            _overlap_queue[stream_idx].clear();
            result = true;
          } else if (_streams[stream_idx].state == STATE_END_I_FRAME && _streams[stream_idx].decoder->getCodecId() != CODEC_ID_MPEG2VIDEO) {
            /**********************************************************************
             * this is used for all video stream types except MPEG2 Video Streams
             **********************************************************************/

            _streams[stream_idx].state = STATE_START_I_FRAME;
            /**copying all Packets into the actual ProcessUnit*/
            _packet_list.push_back(_streams[stream_idx].packets);
            /**clear out the PacketList, because they are in the ProcessUnit*/
            _streams[stream_idx].packets.clear();
            /**bring in the first I-Frame for the next Process Unit*/
            _streams[stream_idx].packets.insert(_streams[stream_idx].packets.end(), _overlap_queue[stream_idx].begin(), _overlap_queue[stream_idx].end());
            /**clear out the overlap queue, because there is only an I-Frame packet and this is now in the next ProcessUnit*/
            _overlap_queue[stream_idx].clear();
            result = true;
          }
          return result;
        }