static int waitfordata(STREAM_AV_DATA_T *pData) { PKTQUEUE_T *pQ = (PKTQUEUE_T *) pData->pDataSrc->pCbData; STREAM_AV_DATA_T *pDataComplement = NULL; uint32_t qWrIdx; uint32_t qWrIdxComplement = 0; int rc = 1; qWrIdx = pktqueue_havepkt(pQ); //fprintf(stderr, "waitfordata called for %s qwridx:%d/%d\n", pData == &((STREAM_PES_T *)pData->pPes)->vid ? "vid" : "aud", qWrIdx, pData->lastQWrIdx); if(qWrIdx != 0 && qWrIdx != pData->lastQWrIdx) { return 1; } pDataComplement = getComplement(pData); if(pDataComplement && ((PKTQUEUE_T *) pDataComplement->pDataSrc->pCbData)->haveRdr) { qWrIdxComplement = pktqueue_havepkt((PKTQUEUE_T *) pDataComplement->pDataSrc->pCbData); //fprintf(stderr, "waitfordata complement %s qwridx:%d/%d\n", pDataComplement == &pPes->vid ? "vid" : "aud", qWrIdxComplement, pDataComplement->lastQWrIdx); if(qWrIdxComplement != 0 && qWrIdxComplement != pDataComplement->lastQWrIdx) { pDataComplement->lastQWrIdx = qWrIdxComplement; return 1; } } VSX_DEBUGLOG3("waiting for data %s qwridx:%d/%d, comp qwridx:%d\n", pData == &((STREAM_PES_T *)pData->pPes)->vid ? "vid" : "aud", qWrIdx, pData->lastQWrIdx, qWrIdxComplement); rc = pktqueue_waitforunreaddata(pQ); VSX_DEBUGLOG3("waiting for data %s done:%d\n", &((STREAM_PES_T *)pData->pPes)->vid ? "vid" : "aud", rc); return rc; }
int stream_udp(STREAM_XMIT_NODE_T *pList, double durationSec) { STREAM_XMIT_NODE_T *pStream; COLLECT_STREAM_PKTDATA_T collectPkt; int triedXmit; unsigned int idxDest; TIME_VAL tmstart, tm1, tmnow; TIME_VAL tmprevbwdescr; int rc; int sz; int szPkt; unsigned int szData; const unsigned char *pData; unsigned int szDataPayload; const unsigned char *pDataPayload; uint64_t totBytes = 0; unsigned int totPkts = 0; unsigned int bytes = 0; unsigned int pkts = 0; unsigned int bytes2 = 0; unsigned int pkts2 = 0; //char dstStr[64]; #ifndef WIN32 unsigned int countIterations = 0; #endif // WIN32 if(!pList #if defined(VSX_HAVE_LICENSE) || !pList->pLic #endif // VSX_HAVE_LICENSE ) { return -1; } //snprintf(dstStr, sizeof(dstStr), "%s:%d", inet_ntoa(pList->pRtpMulti->pdests[0].saDsts.sin_addr), // ntohs(pList->pRtpMulti->pdests[0].saDsts.sin_port)); tmstart = tmnow = timer_GetTime(); tm1 = tmstart; tmprevbwdescr = tmstart; while(*pList->prunning == STREAMER_STATE_RUNNING && !g_proc_exit) { pStream = pList; triedXmit = 0; while(pStream) { if((rc = pStream->cbCanSend(pStream->pCbData)) > 0) { pData = stream_rtp_data(pStream->pRtpMulti); szData = stream_rtp_datalen(pStream->pRtpMulti); if(szData >= RTP_HEADER_LEN) { pDataPayload = pData + RTP_HEADER_LEN; szDataPayload = szData - RTP_HEADER_LEN; } else { pDataPayload = NULL; szDataPayload = 0; } if(pStream->pXmitAction->do_stream) { triedXmit = 1; totPkts++; pkts++; pkts2++; if(pStream->pXmitAction->do_output) { szPkt = 0; for(idxDest = 0; idxDest < pStream->pRtpMulti->numDests; idxDest++) { if(pStream->pXmitDestRc[idxDest] != 0) { continue; } if(pStream->rawSend) { if(pktgen_Queue(pData, szData) != 0) { pStream->pXmitDestRc[idxDest] = -1; sz = -1; } else { sz = szData; } } else { // // Check and send an rtcp sender report // if(*pStream->pfrtcp_sr_intervalsec > 0 && (tmnow - pStream->pRtpMulti->pdests[idxDest].tmLastRtcpSr) / TIME_VAL_MS > *pStream->pfrtcp_sr_intervalsec * 1000) { sendRtcpSr(pStream, idxDest); pStream->pRtpMulti->pdests[idxDest].tmLastRtcpSr = tmnow; } if((sz = sendPktUdpRtp(pStream, idxDest, pData, szData)) < 0) { pStream->pXmitDestRc[idxDest] = sz; } } if(szPkt == 0 && sz > 0) { szPkt = sz; } } // end of for // Exit if there are no good transmitters in the list if(pStream->pXmitAction->do_output && szPkt == 0) { return -1; } } else { // if do_output szPkt = szData; } if(!pStream->pXmitAction->do_output_rtphdr && szPkt > RTP_HEADER_LEN) { szPkt -= RTP_HEADER_LEN; } totBytes += szPkt; bytes += szPkt; // // Add the packet data to any outbound avclive subscribers // // TODO: do not hardcode szData > RTP_HEADER_LEN rtp hdr len if(pStream->pLiveQ && pStream->pLiveQ->numActive > 0 && szDataPayload > 0) { pthread_mutex_lock(&pStream->pLiveQ->mtx); for(sz = 0; (unsigned int) sz < pStream->pLiveQ->max; sz++) { if(pStream->pLiveQ->pQs[sz]) { pktqueue_addpkt(pStream->pLiveQ->pQs[sz], pDataPayload, szDataPayload, NULL, 0); } } pthread_mutex_unlock(&pStream->pLiveQ->mtx); } bytes2 += szPkt; } else { // preserve rtp sequence number during 'live pause' //pStream->pRtp->m_pRtp->sequence_num = // htons(htons(pStream->pRtp->m_pRtp->sequence_num) - 1); //During 'live pause', update the last seq # //if(pStream->pXmitAction->prior_do_stream && pStream->prtp_sequence_at_end) { // *pStream->prtp_sequence_at_end = pStream->pRtp->m_pRtp->sequence_num; //} //fprintf(stderr, "not streaming\n"); } // // Record output stream // if(pStream->pXmitAction->do_record_post && pStream->pXmitCbs->cbRecord && pStream->pXmitCbs->pCbRecordData) { memset(&collectPkt, 0, sizeof(collectPkt)); collectPkt.payload.pData = (unsigned char *) pDataPayload; PKTCAPLEN(collectPkt.payload) = szDataPayload; if((sz = pStream->pXmitCbs->cbRecord(pStream->pXmitCbs->pCbRecordData, &collectPkt)) < 0) { return -1; } if(triedXmit == 0) { triedXmit = 1; } } // // Call post processing function, such as http live streaming // callback to segment and package output ts files // if(pStream->pXmitAction->do_httplive && pStream->pXmitCbs->cbPostProc && pStream->pXmitCbs->pCbPostProcData && pStream->pXmitAction->do_stream) { if((sz = pStream->pXmitCbs->cbPostProc(pStream->pXmitCbs->pCbPostProcData, pDataPayload, szDataPayload)) < 0) { return -1; } if(triedXmit == 0) { triedXmit = 1; } } //if(pStream->pXmitAction->do_stream != pStream->pXmitAction->prior_do_stream) { // pStream->pXmitAction->prior_do_stream = pStream->pXmitAction->do_stream; //} pStream->pRtpMulti->payloadLen = 0; if((rc = pStream->cbPreparePkt(pStream->pCbData)) < 0) { return -1; } //fprintf(stderr, "streamer prepare pkt returned %d\n", rc); } else if(rc < 0) { LOG(X_DEBUG("Stream ending, sent: %"LL64"u bytes %u pkts"), totBytes, totPkts); return -1; } else { //fprintf(stderr, "streamer cansend rc:%d\n", rc); } pStream = pStream->pNext; } // while(pStream) pktgen_SendQueued(); if(triedXmit == 0) { #ifdef WIN32 //sl1 = timer_GetTime(); //Sleep(1) may sleep past its short bedtime on win32, even from a thread w/ SCHED_RR //However, sleep(0) uses alot more cpu slices //TODO: make this a WaitForSingleObject for any registered pktqueue writers if(pList->pSleepQ) { //TODO: this does not return if a pkt has been queued and no subsequent pkt arrives pktqueue_waitforunreaddata(pList->pSleepQ); //pthread_cond_wait(pList->pCond, pList->pMtxCond); } else { Sleep(1); } } else { // On windows Sleep 0 relinquishes execution for any waiting threads Sleep(0); #else // WIN32 VSX_DEBUG2(tmnow = timer_GetTime()) usleep(1000); countIterations = 0; VSX_DEBUGLOG3("stream_udp slept for %lld ns\n", timer_GetTime() - tmnow); } else { if(countIterations++ > 10000) { // During continuous xmit, sleep to prevent unresponsive system usleep(1); countIterations = 0; } #endif // WIN32 } tmnow = timer_GetTime(); if(pList->pBwDescr && (tmnow / TIME_VAL_US) > (tmprevbwdescr / TIME_VAL_US) + 1) { pList->pBwDescr->intervalMs = (float)(tmnow - tmprevbwdescr)/ TIME_VAL_MS; pList->pBwDescr->pkts = pkts2; pList->pBwDescr->bytes = bytes2; TV_FROM_TIMEVAL(pList->pBwDescr->updateTv, tmnow); //pList->pBwDescr->updateTv.tv_sec = tmnow / TIME_VAL_US; //pList->pBwDescr->updateTv.tv_usec = tmnow % TIME_VAL_US; bytes2 = 0; pkts2 = 0; tmprevbwdescr = tmnow; } if(durationSec > 0 && tmnow > tmstart + (durationSec * TIME_VAL_US)) { LOG(X_DEBUG("Stream duration %.1f sec limit reached"), durationSec); *pList->prunning = STREAMER_STATE_FINISHED; } #if defined (VSX_HAVE_LICENSE) // Check if stream time is limited if(!(pList->pLic->capabilities & LIC_CAP_STREAM_TIME_UNLIMITED)) { if(tmnow > tmstart + (STREAM_LIMIT_SEC * TIME_VAL_US)) { LOG(X_INFO("Stream time limited. Stopping stream transmission after %d sec"), (int) (tmnow - tmstart) / TIME_VAL_US); *pList->prunning = STREAMER_STATE_FINISHED; if(!(g_proc_exit_flags & PROC_EXIT_FLAG_NO_EXIT_ON_STREAM_TIME_LIMITED)) { g_proc_exit = 1; } } } #endif // VSX_HAVE_LICENSE #if defined(LITE_VERSION) if(tmnow > tmstart + (STREAM_LIMIT_LITE_SEC * TIME_VAL_US)) { LOG(X_INFO("Stream time limited. Stopping stream transmission after %d sec"), (int) (tmnow - tmstart) / TIME_VAL_US); *pList->prunning = STREAMER_STATE_FINISHED; if(!(g_proc_exit_flags & PROC_EXIT_FLAG_NO_EXIT_ON_STREAM_TIME_LIMITED)) { g_proc_exit = 1; } } #endif // (LITE_VERSION) /* if(0 && pList->verbosity > 1 && tv2.tv_sec > tv1.tv_sec+3) { elapsedMs0 = ((tv2.tv_sec - tv0.tv_sec) * 1000) + ((tv2.tv_usec - tv0.tv_usec) /1000); elapsedMs1 = ((tv2.tv_sec - tv1.tv_sec) * 1000) + ((tv2.tv_usec - tv1.tv_usec) /1000); fprintf(stdout, "%u", elapsedMs0/1000); if(durationSec != 0) { fprintf(stdout, "/%.1f", durationSec); } fprintf(stdout, " sec, %s %.1fKb/s %.1fpkts/s (total: %u pkts, %.1fKB, %.1fKb/s)", dstStr, (double)(bytes / 128.0f / ((double)elapsedMs1/1000.0f)), (double)(pkts/ ((double)elapsedMs1/1000.0f)), totPkts, (double)totBytes/1024.0f, (double)(totBytes / 128.0f / ((double)elapsedMs0/1000.0f))); fprintf(stdout, "\n"); bytes = 0; pkts = 0; tv1.tv_sec = tv2.tv_sec; tv1.tv_usec = tv2.tv_usec; } */ }
enum STREAM_NET_ADVFR_RC stream_net_av_advanceFrame(STREAM_NET_ADVFR_DATA_T *pArg) { STREAM_AV_DATA_T *pData = (STREAM_AV_DATA_T *) pArg->pArgIn; STREAM_PES_T *pPes = (STREAM_PES_T *) pData->pPes; enum STREAM_NET_ADVFR_RC rc = STREAM_NET_ADVFR_RC_OK; PKTQUEUE_T *pQ; const PKTQUEUE_PKT_T *pQPkt; if(!pData->pDataSrc || !pData->pDataSrc->pCbData) { if(pArg->plen) { *pArg->plen = 0; } return STREAM_NET_ADVFR_RC_NOCONTENT; } if(pArg->pkeyframeIn) { *pArg->pkeyframeIn = 0; } //fprintf(stderr, "av_advanceFrame called for %s stype:0x%x\n", pData == &pPes->vid ? "vid" : "aud", pData->pXcodeData->inStreamType); waitfordata(pData); pQ = (PKTQUEUE_T *) pData->pDataSrc->pCbData; if(!pktqueue_havepkt(pQ) || !(pQPkt = pktqueue_readpktdirect(pQ))) { //fprintf(stderr, "ad_advanceFrame NOTAVAIL qid:%d haveData:%d wr-1:%d\n", pQ->cfg.id, pQ->haveData, pQ->uniqueWrIdx - 1); pktqueue_readpktdirect_done(pQ); return STREAM_NET_ADVFR_RC_NOTAVAIL; } // // Avoid memcpy of the frame data // if(pktqueue_swapreadslots(pQ, &pData->pXcodeData->curFrame.pSwappedSlot) < 0) { LOG(X_ERROR("Failed to swap slot in queue id:%d"), pQ->cfg.id); pktqueue_readpktdirect_done(pQ); return STREAM_NET_ADVFR_RC_ERROR; } else { pQPkt = pData->pXcodeData->curFrame.pSwappedSlot; } //fprintf(stderr, "stream_net_av advanceFr pQ[%d] len:%d\n", pQ->cfg.id, pQPkt->len); // Note this is just a shallow copy, not of the frame data contents memcpy(&pData->pXcodeData->curFrame.pkt, pQPkt, sizeof(pData->pXcodeData->curFrame.pkt)); //fprintf(stderr, "ad_advanceFrame got fr len:%d wrIdx:%d pQ->userDataType:0x%x\n", pQPkt->len,pData->pXcodeData->curFrame.pkt.idx, pQ->cfg.userDataType); pData->pXcodeData->curFrame.idxReadInFrame = 0; pData->pXcodeData->curFrame.idxReadFrame = pQ->idxRd; //fprintf(stderr, "AVREAD PKT FROM Q pts:%.3f dts:%.3f\n", PTSF(pData->pXcodeData->curFrame.pkt.xtra.tm.pts), PTSF(pData->pXcodeData->curFrame.pkt.xtra.tm.dts)); pktqueue_readpktdirect_done(pQ); #if 1 // // If we're reading video & audio fromlive capture, and the input video sequence headers have not yet been set, // then just keep advancing the audio queue rdr position, otherwise, we keep on queing audio frames, // then when the vid seq start has been detected, the audio frames will be read, but by then it's possible // the audio queue has been reset, overwriting some 'to-be-played' audio frames // //fprintf(stderr, "TRY... vid:%d, xcodevid:%d, vid in-seq:%d, complement:0x%x\n", IS_STREAM_PES_DATA_VID(pData), pData->pXcodeData->piXcode->vid.common.cfgDo_xcode, ((STREAM_XCODE_VID_UDATA_T *) pData->pXcodeData->piXcode->vid.pUserData)->haveSeqStart, getComplement(pData)); STREAM_AV_DATA_T *pDataComplement; PKTQUEUE_T *pQComplement; if(IS_STREAM_PES_DATA_VID(pData) && pData->pXcodeData->piXcode->vid.common.cfgDo_xcode && !((STREAM_XCODE_VID_UDATA_T *) pData->pXcodeData->piXcode->vid.pUserData)->haveSeqStart && (pDataComplement = getComplement(pData)) && (pQComplement = (PKTQUEUE_T *) pDataComplement->pDataSrc->pCbData)) { // Only retain the last 'x' elements in the queue... to prevent any overwrite / reset //fprintf(stderr, "Setting av complement reader to catchup. haveFrameTmOffset:%d %.3f\n", pDataComplement->pXcodeData->haveFrameTmStartOffset, PTSF(pDataComplement->pXcodeData->frameTmStartOffset)); pktqueue_setrdr(pQComplement, 1); if(!pDataComplement->pXcodeData->haveFrameTmStartOffset) { pthread_mutex_lock(&pQComplement->mtx); if(pQComplement->idxRd != pQComplement->idxWr) { LOG(X_DEBUG("Setting av %s stream "MP2PES_STREAMTYPE_FMT_STR" pts start offset from %.3f to %.3f"), IS_STREAM_PES_DATA_VID(pData) ? "audio" : "video", MP2PES_STREAMTYPE_FMT_ARGS(pData->pXcodeData->inStreamType), PTSF(pData->pXcodeData->frameTmStartOffset), PTSF(pQComplement->pkts[pQComplement->idxRd].xtra.tm.pts)); LOG(X_DEBUG("Setting av complement to idxRd:%d, idxWr:%d, rd:%.3f, wr:%.3f, wr-1:%.3f"), pQComplement->idxRd, pQComplement->idxWr, PTSF(pQComplement->pkts[pQComplement->idxRd].xtra.tm.pts), PTSF(pQComplement->pkts[pQComplement->idxWr].xtra.tm.pts), PTSF(pQComplement->pkts[pQComplement->idxWr == 0 ? pQComplement->cfg.maxPkts-1 : pQComplement->idxWr -1].xtra.tm.pts)); pDataComplement->pXcodeData->frameTmStartOffset = pQComplement->pkts[pQComplement->idxRd].xtra.tm.pts; pDataComplement->pXcodeData->haveFrameTmStartOffset = 1; } pthread_mutex_unlock(&pQComplement->mtx); } //pDataComplement->pXcodeData->frameTmStartOffset = pData->pXcodeData->frameTmStartOffset; } #endif // 0 VSX_DEBUGLOG3("lastQWrIdx now:%d\n", pData->lastQWrIdx); memcpy(&pData->curPesTm.qtm, &pData->pXcodeData->curFrame.pkt.xtra.tm, sizeof(pData->curPesTm.qtm)); // Do not use PKTQUEUE_T pkt contents directly to allow for any // prebuf contents such as SPS / PPS packaged w/ each I-frame pData->pXcodeData->curFrame.pData = pData->pXcodeData->curFrame.pkt.pData; pData->pXcodeData->curFrame.lenData = pData->pXcodeData->curFrame.pkt.len; pArg->isvid = (pData == &pPes->vid) ? 1 : 0; if(pArg->plen) { *pArg->plen = pData->pXcodeData->curFrame.lenData; } //if((rc = checktiming(pData)) == STREAM_NET_ADVFR_RC_RESET_TMGAP) { if((rc = checktiming(pData)) == STREAM_NET_ADVFR_RC_RESET_TMGAP || rc == STREAM_NET_ADVFR_RC_RESET_TMBKWD) { stream_net_av_reset(pData->pPes); } if(pArg->pPts) { *pArg->pPts = xcode_getFrameTm(pData->pXcodeData, 0, 0); //fprintf(stderr, "AV curF: pts:%.3f dts:%.3f start:%.3f, fr:%.3f (%llu)\n", PTSF(pData->pXcodeData->curFrame.pkt.xtra.tm.pts), PTSF(pData->pXcodeData->curFrame.pkt.xtra.tm.dts), PTSF(pData->pXcodeData->frameTmStartOffset), PTSF(*pArg->pPts), *pArg->pPts); } if(pArg->pkeyframeIn && (pData->pXcodeData->curFrame.pkt.flags & PKTQUEUE_FLAG_KEYFRAME)) { //k(pData->pXcodeData->curFrame.pkt.xtra.flags & CAPTURE_SP_FLAG_KEYFRAME)) { *pArg->pkeyframeIn = 1; } else { *pArg->pkeyframeIn = 0; } pArg->codecType = pQ->cfg.userDataType; //LOG(X_DEBUG("av_advanceFrame %s key:%d rc:%d pts:%.3f (%.3f) (dts:%.3f) start:%.3f len:%u, Q[rd:%d,wr:%d/%d]"), pData == &pPes->vid ? "vid" : "aud", *pArg->pkeyframeIn, rc, PTSF(*pArg->pPts), PTSF(pData->pXcodeData->curFrame.pkt.xtra.tm.pts), PTSF(pData->pXcodeData->curFrame.pkt.xtra.tm.dts), PTSF(pData->pXcodeData->frameTmStartOffset), pData->pXcodeData->curFrame.lenData, pQ->idxRd, pQ->idxWr, pQ->cfg.maxPkts); return rc; }