Example #1
0
void SyntheticInput::tick() {
  // This method will be called periodically to send audio/video frames
  time_point now = clock_->now();
  if (now >= next_audio_frame_time_) {
    sendAudioFrame(audio_frame_size_);
    next_audio_frame_time_ += audio_period_;
  }
  if (now >= next_video_frame_time_) {
    bool is_keyframe = false;
    size_t frame_size = getRandomValue(video_avg_frame_size_, video_dev_frame_size_);
    if (now - last_video_keyframe_time_ > kDefaultVideoKeyframePeriod || keyframe_requested_) {
      is_keyframe = true;
      frame_size = getRandomValue(video_avg_keyframe_size_, video_dev_keyframe_size_);
    }
    while (frame_size > kMaxPacketSize) {
      sendVideoframe(is_keyframe, false, kMaxPacketSize);
      is_keyframe = false;
      frame_size = frame_size - kMaxPacketSize;
    }
    sendVideoframe(is_keyframe, true, frame_size);

    next_video_frame_time_ += video_period_;
  }
  now = clock_->now();
  if ((next_video_frame_time_ <= now || next_audio_frame_time_ <= now) && consecutive_ticks_ < kMaxConsecutiveTicks) {
    consecutive_ticks_++;
    tick();
  } else {
    consecutive_ticks_ = 0;
  }
}
Example #2
0
void AudioInput::flushCheck(const QByteArray &frame, bool terminator) {
	qlFrames << frame;

	if (! terminator && iBufferedFrames < iAudioFrames)
		return;

	int flags = g.iTarget;
	if (terminator)
		flags = g.iPrevTarget;

	if (g.s.lmLoopMode == Settings::Server)
		flags = 0x1f; // Server loopback

	flags |= (umtType << 5);

	char data[1024];
	data[0] = static_cast<unsigned char>(flags);

	int frames = iBufferedFrames;
	iBufferedFrames = 0;

	PacketDataStream pds(data + 1, 1023);
	// Sequence number
	pds << iFrameCounter - frames;

	if (umtType == MessageHandler::UDPVoiceOpus) {
		const QByteArray &qba = qlFrames.takeFirst();
		int size = qba.size();
		if (terminator)
			size |= 1 << 13;
		pds << size;
		pds.append(qba.constData(), qba.size());
	} else {
		if (terminator) {
			qlFrames << QByteArray();
			++frames;
		}

		for (int i = 0; i < frames; ++i) {
			const QByteArray &qba = qlFrames.takeFirst();
			unsigned char head = static_cast<unsigned char>(qba.size());
			if (i < frames - 1)
				head |= 0x80;
			pds.append(head);
			pds.append(qba.constData(), qba.size());
		}
	}

	if (g.s.bTransmitPosition && g.p && ! g.bCenterPosition && g.p->fetch()) {
		pds << g.p->fPosition[0];
		pds << g.p->fPosition[1];
		pds << g.p->fPosition[2];
	}

	sendAudioFrame(data, pds);

	Q_ASSERT(qlFrames.isEmpty());
}
void CConferenceInfo::doAudioProc(void)
{
	bool bGetTempMember = false;
	CLockMap<CConferenceMember*, CMemberData::pointer> toSendMembers;
	CLockList<CMemberData::pointer> memberTemps;

	while (!m_killed)
	{
		CMemberData::pointer memberData;
		if (bGetTempMember)
		{
			if (!memberTemps.front(memberData, true))
			{
				bGetTempMember = false;
				continue;
			}

		}else if (!m_datasa.front(memberData, true))
		{
#ifdef WIN32
			Sleep(20);
#else
			usleep(20000);
#endif
			continue;
		}

		CConferenceMember * pDataConferenceMember = (CConferenceMember*)memberData->getRtpParam();
		if (pDataConferenceMember->getClosed() || pDataConferenceMember->getAudioHandler() == 0 || pDataConferenceMember->getVideoHandler() == 0)
		{
			continue;
		}

		BOOST_ASSERT (pDataConferenceMember->getAudioHandler().get() == memberData->getDoRtpHandler().get());

		if (toSendMembers.exist(pDataConferenceMember))
		{
			if (toSendMembers.size() < (unsigned int)countAudioMember())
			{
				setToMemberFront(memberTemps, memberData);
				bGetTempMember = false;
				continue;
			}

			sendAudioFrame(toSendMembers);
			toSendMembers.clear(false);
		}

		toSendMembers.insert(pDataConferenceMember, memberData);
		bGetTempMember = !memberTemps.empty();
	}

}
Example #4
0
/*****************************************************************************
 Function:    handleMediaFrame
 Description: 拼完一帧PS数据后处理媒体数据
 Input:       rtpFrameList PS数据包列表
 Output:      
 Return:      N/A
*****************************************************************************/
void CPs2EsProcessor::handleMediaFrame(RTP_FRAME_LIST_T &rtpFrameList)
{
	if(NULL == m_pExtendHeader )
	{
		ERROR_LOG("m_pExtendHeader is NULL");
		return;
	}
	if(NULL == m_pRtpFrameCache)
	{
		ERROR_LOG("m_pRtpFrameCache is NULL");
		return;
	}
    if (rtpFrameList.empty())
    {
        ERROR_LOG("Handle PS media frame abnormal , the frame list is empty");
        return;
    }

    if (MAX_RTP_PACKET_COUNT < rtpFrameList.size())
    {
        ERROR_LOG("Handle PS media frame abnormal , the frame list exceeds the Threshold[1024], the rtp packet count: %d",rtpFrameList.size());
        return;
    }

    // 新的一帧到达,缓存应该是空的
    if (m_pWritePos != m_pRtpFrameCache)
    {
        m_pWritePos = m_pRtpFrameCache;
        //BP_RUN_LOG_INF("Handle PS media frame abnormal",            "Write postion not in cache head, serviceID=%s.", m_strServiceID.c_str());
    }

    // 将收到的一帧PS数据拷贝至缓冲区
    ACE_Message_Block* pRtpBlock = NULL;
    bool bFirst = true;
    CRtpPacket rtpPacket;
    unsigned int unCacheSize = RTP_FRAME_CACHE_SIZE;
    int iRet = IVS_SUCCEED;
    for (RTP_FRAME_LIST_T_ITER iter = rtpFrameList.begin(); iter != rtpFrameList.end(); ++iter)
    {
        pRtpBlock = *iter;
        iRet = rtpPacket.ParsePacket(pRtpBlock->rd_ptr(), pRtpBlock->length());
        if (IVS_SUCCEED != iRet)
        {
            m_pWritePos = m_pRtpFrameCache;
            m_pExtendHeader->reset();
            ERROR_LOG("Parse rtp packet fail ,retcode:%d",iRet);
            return;
        }

        if (bFirst)
        {
            if (1 == rtpPacket.GetExtension())
            {
                if(NULL != rtpPacket.GetMuExtData())
                {
                    m_pExtendHeader->copy((char *)rtpPacket.GetMuExtData(), sizeof(RTP_EXTENSION_DATA_MU_S));
                }
                else if(NULL != rtpPacket.GetExtData())
                {
                    m_pExtendHeader->copy((char *)rtpPacket.GetExtData(), sizeof(RTP_EXTENSION_DATA_S));
                }
                else
                {
                   ERROR_LOG("Error extension label");
                }
            }

            if (m_bAppendExtInfo)
            {
                REAL_RECORD_TIME* realRecordTime = (REAL_RECORD_TIME*)(pRtpBlock->base());
                if (NULL != realRecordTime)
                {
                    m_uiRealRecordSecond = realRecordTime->uiSecond;
                    m_uiRealRecordMSecond = realRecordTime->uiMSecond;
                    uint32_t* pStreamRate = (uint32_t*)(pRtpBlock->base() + sizeof(REAL_RECORD_TIME));
                    m_uiReserved = *pStreamRate;
                }
                else
                {
                    ERROR_LOG("Error real record time info");
                }
            }

            bFirst = false;
        }

        // 移除RTP消息头
        pRtpBlock->rd_ptr(rtpPacket.GetHeadLen());

        if (unCacheSize >= pRtpBlock->length())
        {
            memcpy(m_pWritePos, pRtpBlock->rd_ptr(), pRtpBlock->length());
            m_pWritePos += pRtpBlock->length();
            unCacheSize -= pRtpBlock->length();
        }
        else
        {
            // 缓冲区长度不够
            ERROR_LOG("Current frame is too big exceed cache size 1.5M , will discard part data,  rtp package list size=%d,curr rtp package length &d",rtpFrameList.size(),rtpPacket.GetPacketLen());
            m_pWritePos = m_pRtpFrameCache;
            m_pExtendHeader->reset();
            return;
        }
    }

    // 将PS数据转换成ES数据
    int iVideoLen = 0;
    int iAudioLen = 0;
    int iTotalLen = m_pWritePos - m_pRtpFrameCache;
    unsigned char *pFrame = NULL;
    if (NRU_ZERO > iTotalLen || RTP_FRAME_CACHE_SIZE < iTotalLen)
    {
        m_pWritePos = m_pRtpFrameCache;
        m_pExtendHeader->reset();
         ERROR_LOG("Parse PS packet to ES fail and discard curr frame , ulVideoTimeTick:%d,iTotalLen:%d",m_ulVideoTimeTick,iTotalLen);
        return;
    }

    iRet = HSPspkt2ESFrm((unsigned char*)m_pRtpFrameCache, iTotalLen,
                                         pFrame, iVideoLen, iAudioLen);
    if (IVS_SUCCEED != iRet)
    {
        m_pWritePos = m_pRtpFrameCache;
        m_pExtendHeader->reset();
        ERROR_LOG("Parse PS packet to ES fail and discard curr frame,  ulVideoTimeTick:%d",m_ulVideoTimeTick);
        return;
    }

    if ((NRU_ZERO > iVideoLen || RTP_FRAME_CACHE_SIZE < iVideoLen)
        || (NRU_ZERO > iAudioLen || MAX_AUDIO_LENGTH < iAudioLen))
    {
        m_pWritePos = m_pRtpFrameCache;
        m_pExtendHeader->reset();
        ERROR_LOG("Parse PS packet to ES fail and discard curr frame, ulVideoTimeTick: %d,iVideoLen:%d,iAudioLen:%d",m_ulVideoTimeTick,iVideoLen,iAudioLen);
        return;
    }

    if (0 != iVideoLen)
    {
        (void)sendVideoFrame((char *)pFrame, iVideoLen);
    }

    if (0 != iAudioLen)
    {
        (void)sendAudioFrame((char *)(pFrame + iVideoLen), iAudioLen);
    }

    // 重置缓冲区写位置
    m_pWritePos = m_pRtpFrameCache;

    m_pExtendHeader->reset();
    return;
}