예제 #1
0
void DeviceSource::deliverFrame() {
  // This would be called when new frame data is available from the device.
  // This function should deliver the next frame of data from the device,
  // using the following parameters (class members):
  // 'in' parameters (these should *not* be modified by this function):
  //     fTo: The frame data is copied to this address.
  //         (Note that the variable "fTo" is *not* modified.  Instead,
  //          the frame data is copied to the address pointed to by "fTo".)
  //     fMaxSize: This is the maximum number of bytes that can be copied
  //         (If the actual frame is larger than this, then it should
  //          be truncated, and "fNumTruncatedBytes" set accordingly.)
  // 'out' parameters (these are modified by this function):
  //     fFrameSize: Should be set to the delivered frame size (<= fMaxSize).
  //     fNumTruncatedBytes: Should be set iff the delivered frame would have been
  //         bigger than "fMaxSize", in which case it's set to the number of bytes
  //         that have been omitted.
  //     fPresentationTime: Should be set to the frame's presentation time
  //         (seconds, microseconds).
  //     fDurationInMicroseconds: Should be set to the frame's duration, if known.
  if (!isCurrentlyAwaitingData()) return; // we're not ready for the data yet

  // Deliver the data here:

  // After delivering the data, inform the reader that it is now available:
  FramedSource::afterGetting(this);
}
예제 #2
0
void LiveAudioStreamSource::streamData(StreamData &data)
{
    // check if we're ready for data
    if (!isCurrentlyAwaitingData()) {
        envir() << "WARN: LiveAudioStreamSource is not ready for data yet\n";
        return;
    }

    fPresentationTime = data.tstamp;
    fFrameSize = 0;
    for (int i = 0; i < (int)data.pack_count; i++) {
        uint8_t *ptr = data.pack[i].addr;
        uint32_t len = data.pack[i].len;

        if ((ptr == NULL) || (len == 0))
            break;

        if (fFrameSize + len < fMaxSize) {
            memmove(&fTo[fFrameSize], ptr, len);
            fFrameSize += len;
        }
        else {
            fNumTruncatedBytes += len;
        }
    }

    // After delivering the data, inform the reader that it is now available:
    FramedSource::afterGetting(this);
}
예제 #3
0
void BasicUDPSource::incomingPacketHandler1() {
  if (!isCurrentlyAwaitingData()) return; // we're not ready for the data yet

	// Read the packet into our desired destination:
  struct sockaddr_in fromAddress;
  if (!fInputGS->handleRead(fTo, fMaxSize, fFrameSize, fromAddress)) return;

	// Tell our client that we have new data:
  afterGetting(this); // we're preceded by a net read; no infinite recursion
}
예제 #4
0
void StreamDeviceSource::deliverFrame() {
  // This function is called when new frame data is available from the device.
  // We deliver this data by copying it to the 'downstream' object, using the following parameters (class members):
  // 'in' parameters (these should *not* be modified by this function):
  //     fTo: The frame data is copied to this address.
  //         (Note that the variable "fTo" is *not* modified.  Instead,
  //          the frame data is copied to the address pointed to by "fTo".)
  //     fMaxSize: This is the maximum number of bytes that can be copied
  //         (If the actual frame is larger than this, then it should
  //          be truncated, and "fNumTruncatedBytes" set accordingly.)
  // 'out' parameters (these are modified by this function):
  //     fFrameSize: Should be set to the delivered frame size (<= fMaxSize).
  //     fNumTruncatedBytes: Should be set iff the delivered frame would have been
  //         bigger than "fMaxSize", in which case it's set to the number of bytes
  //         that have been omitted.
  //     fPresentationTime: Should be set to the frame's presentation time
  //         (seconds, microseconds).  This time must be aligned with 'wall-clock time' - i.e., the time that you would get
  //         by calling "gettimeofday()".
  //     fDurationInMicroseconds: Should be set to the frame's duration, if known.
  //         If, however, the device is a 'live source' (e.g., encoded from a camera or microphone), then we probably don't need
  //         to set this variable, because - in this case - data will never arrive 'early'.
  // Note the code below.
    
  if (isCurrentlyAwaitingData()) return; // we're not ready for the data yet

    // allocate temporary buffer
    static void *buffer = NULL;
    if (NULL == buffer) {
        buffer = malloc(8192);
    }
    // by specifying length = 8192 we telling to discardBytesFromGlobalCircularBuffer that it may copy maximum 8192 bytes
    int32_t length  = 8192;
    discardBytesFromGlobalCircularBuffer(&buffer, &length);
    
    u_int8_t* newFrameDataStart = (u_int8_t*)buffer;
    unsigned newFrameSize = length;

  // Deliver the data here:
  if (newFrameSize > fMaxSize) {
    fFrameSize = fMaxSize;
    fNumTruncatedBytes = newFrameSize - fMaxSize;
  } else {
    fFrameSize = newFrameSize;
  }
  gettimeofday(&fPresentationTime, NULL); // If you have a more accurate time - e.g., from an encoder - then use that instead.
  // If the device is *not* a 'live source' (e.g., it comes instead from a file or buffer), then set "fDurationInMicroseconds" here.
  memmove(fTo, newFrameDataStart, fFrameSize);

    // After delivering the data, inform the reader that it is now available:
    nextTask() = envir().taskScheduler().scheduleDelayedTask(0, (TaskFunc*)FramedSource::afterGetting, this);
}
예제 #5
0
void T140IdleFilter::afterGettingFrame(unsigned frameSize,
				       unsigned numTruncatedBytes,
				       struct timeval presentationTime,
				       unsigned durationInMicroseconds) {
  // First, cancel any pending idle timer:
  envir().taskScheduler().unscheduleDelayedTask(fIdleTimerTask);

  // Then note the new data that we have in our buffer:
  fNumBufferedBytes = frameSize;
  fBufferedNumTruncatedBytes = numTruncatedBytes;
  fBufferedDataPresentationTime = presentationTime;
  fBufferedDataDurationInMicroseconds = durationInMicroseconds;

  // Then, attempt to deliver this data.  (If we can't deliver it now, we'll do so the next time the reader asks for data.)
  if (isCurrentlyAwaitingData()) (void)deliverFromBuffer();
}
예제 #6
0
void LiveAudioStreamSource::deliverFrame()
{
    if (!isCurrentlyAwaitingData()) return; // we're not ready for the data yet

    AUDIO_STREAM_S stStream;
    HI_S32 s32Ret;

    s32Ret = HI_MPI_AENC_GetStream(fChannelNo, &stStream, HI_FALSE);
    if (HI_SUCCESS != s32Ret)
    {
        g_critical("HI_MPI_AENC_GetStream failed with %#x!\n", s32Ret);
        return;
    }

    fPresentationTime.tv_sec = stStream.u64TimeStamp / 1000000UL;
    fPresentationTime.tv_usec = stStream.u64TimeStamp % 1000000UL;

    fFrameSize = 0;
	if (stStream.u32Len <= fMaxSize) {
		fFrameSize = stStream.u32Len - 4;
		fNumTruncatedBytes = 0;
	}
	else {
		fFrameSize = fMaxSize;
		fNumTruncatedBytes = stStream.u32Len - 4 - fMaxSize;
	}
	memmove(fTo, stStream.pStream + 4, fFrameSize);

    s32Ret = HI_MPI_AENC_ReleaseStream(fChannelNo, &stStream);
    if (HI_SUCCESS != s32Ret)
    {
        g_critical("HI_MPI_AENC_ReleaseStream failed with %#x!\n", s32Ret);
    }

    // After delivering the data, inform the reader that it is now available:
    FramedSource::afterGetting(this);
}
	virtual void doGetNextFrame()
	{
		if (!_isInitialised)
		{
			_isInitialised = true;
			if (!initialise())
			{
				printf("Video device initialisation failed, stopping.");
				return;
			}
		}

		if (!isCurrentlyAwaitingData()) return;

		DWORD processOutputStatus = 0;
		IMFSample *videoSample = NULL;
		DWORD streamIndex, flags;
		LONGLONG llVideoTimeStamp, llSampleDuration;
		HRESULT mftProcessInput = S_OK;
		HRESULT mftProcessOutput = S_OK;
		MFT_OUTPUT_STREAM_INFO StreamInfo;
		IMFMediaBuffer *pBuffer = NULL;
		IMFSample *mftOutSample = NULL;
		DWORD mftOutFlags;
		bool frameSent = false;

		CHECK_HR(_videoReader->ReadSample(
			MF_SOURCE_READER_FIRST_VIDEO_STREAM,
			0,                              // Flags.
			&streamIndex,                   // Receives the actual stream index. 
			&flags,                         // Receives status flags.
			&llVideoTimeStamp,              // Receives the time stamp.
			&videoSample                    // Receives the sample or NULL.
			), "Error reading video sample.");

		if (videoSample)
		{
			_frameCount++;

			CHECK_HR(videoSample->SetSampleTime(llVideoTimeStamp), "Error setting the video sample time.\n");
			CHECK_HR(videoSample->GetSampleDuration(&llSampleDuration), "Error getting video sample duration.\n");

			// Pass the video sample to the H.264 transform.

			CHECK_HR(_pTransform->ProcessInput(0, videoSample, 0), "The resampler H264 ProcessInput call failed.\n");

			CHECK_HR(_pTransform->GetOutputStatus(&mftOutFlags), "H264 MFT GetOutputStatus failed.\n");

			if (mftOutFlags == MFT_OUTPUT_STATUS_SAMPLE_READY)
			{
				printf("Sample ready.\n");

				CHECK_HR(_pTransform->GetOutputStreamInfo(0, &StreamInfo), "Failed to get output stream info from H264 MFT.\n");

				CHECK_HR(MFCreateSample(&mftOutSample), "Failed to create MF sample.\n");
				CHECK_HR(MFCreateMemoryBuffer(StreamInfo.cbSize, &pBuffer), "Failed to create memory buffer.\n");
				CHECK_HR(mftOutSample->AddBuffer(pBuffer), "Failed to add sample to buffer.\n");

				while (true)
				{
					_outputDataBuffer.dwStreamID = 0;
					_outputDataBuffer.dwStatus = 0;
					_outputDataBuffer.pEvents = NULL;
					_outputDataBuffer.pSample = mftOutSample;

					mftProcessOutput = _pTransform->ProcessOutput(0, 1, &_outputDataBuffer, &processOutputStatus);

					if (mftProcessOutput != MF_E_TRANSFORM_NEED_MORE_INPUT)
					{
						CHECK_HR(_outputDataBuffer.pSample->SetSampleTime(llVideoTimeStamp), "Error setting MFT sample time.\n");
						CHECK_HR(_outputDataBuffer.pSample->SetSampleDuration(llSampleDuration), "Error setting MFT sample duration.\n");

						IMFMediaBuffer *buf = NULL;
						DWORD bufLength;
						CHECK_HR(_outputDataBuffer.pSample->ConvertToContiguousBuffer(&buf), "ConvertToContiguousBuffer failed.\n");
						CHECK_HR(buf->GetCurrentLength(&bufLength), "Get buffer length failed.\n");
						BYTE * rawBuffer = NULL;

						auto now = GetTickCount();

						printf("Writing sample %i, spacing %I64dms, sample time %I64d, sample duration %I64d, sample size %i.\n", _frameCount, now - _lastSendAt, llVideoTimeStamp, llSampleDuration, bufLength);

						fFrameSize = bufLength;
						fDurationInMicroseconds = 0;
						gettimeofday(&fPresentationTime, NULL);

						buf->Lock(&rawBuffer, NULL, NULL);
						memmove(fTo, rawBuffer, fFrameSize);

						FramedSource::afterGetting(this);

						buf->Unlock();
						SafeRelease(&buf);

						frameSent = true;
						_lastSendAt = GetTickCount();
					}

					SafeRelease(&pBuffer);
					SafeRelease(&mftOutSample);

					break;
				}
			}
			else {
				printf("No sample.\n");
			}

			SafeRelease(&videoSample);
		}

		if (!frameSent)
		{
			envir().taskScheduler().triggerEvent(eventTriggerId, this);
		}

		return;

	done:

		printf("MediaFoundationH264LiveSource doGetNextFrame failed.\n");
	}
예제 #8
0
void RawPixelSource::deliverFrame()
{
	// This function is called when new frame data is available from the device.
	// We deliver this data by copying it to the 'downstream' object, using the following parameters (class members):
	// 'in' parameters (these should *not* be modified by this function):
	//     fTo: The frame data is copied to this address.
	//         (Note that the variable "fTo" is *not* modified.  Instead,
	//          the frame data is copied to the address pointed to by "fTo".)
	//     fMaxSize: This is the maximum number of bytes that can be copied
	//         (If the actual frame is larger than this, then it should
	//          be truncated, and "fNumTruncatedBytes" set accordingly.)
	// 'out' parameters (these are modified by this function):
	//     fFrameSize: Should be set to the delivered frame size (<= fMaxSize).
	//     fNumTruncatedBytes: Should be set iff the delivered frame would have been
	//         bigger than "fMaxSize", in which case it's set to the number of bytes
	//         that have been omitted.
	//     fPresentationTime: Should be set to the frame's presentation time
	//         (seconds, microseconds).  This time must be aligned with 'wall-clock time' - i.e., the time that you would get
	//         by calling "gettimeofday()".
	//     fDurationInMicroseconds: Should be set to the frame's duration, if known.
	//         If, however, the device is a 'live source' (e.g., encoded from a camera or microphone), then we probably don't need
	//         to set this variable, because - in this case - data will never arrive 'early'.
	// Note the code below.

	//std::cout << "available size: " << fMaxSize << std::endl;

	if (!isCurrentlyAwaitingData())
	{
		//std::cout << this << ": deliver skipped" << std::endl;
		return; // we're not ready for the data yet
	}


	int64_t thisTime = av_gettime();

	//fprintf(myfile, "fMaxSize at beginning of function: %i \n", fMaxSize);
	//fflush(myfile);

	// set the duration of this frame since we have variable frame rate
	// %% Time has to be fixed
	//this->fDurationInMicroseconds = 1000000 / 70;// thisTime - lastFrameTime;

	
	//gettimeofday(&fPresentationTime, NULL); // If you have a more accurate time - e.g., from an encoder - then use that instead.

	//std::cout << this << ": pktBuffer size: " << pktBuffer.size() << std::endl;

	AVPacket pkt;
	if (!pktBuffer.wait_and_pop(pkt))
	{
		// queue did close
		return;
	}

	if (pktBuffer.size() == 0)
	{
		AVPacket dummy;
		pktPool.push(dummy);
	}
    
    //std::cout << this << " send" << std::endl;

	// Set the presentation time of this frame
	AVRational secBase = { 1, 1 };
	AVRational microSecBase = { 1, 1000000 };
	fPresentationTime.tv_sec = pkt.pts / 1000000; //av_rescale_q(pkt.pts, codecContext->time_base, secBase);
	fPresentationTime.tv_usec = pkt.pts % 1000000; // av_rescale_q(pkt.pts, codecContext->time_base, microSecBase) -

	//std::cout << fPresentationTime.tv_sec << " " << fPresentationTime.tv_usec << std::endl;

	// Live555 does not like start codes.
	// So, we remove the start code which is there in front of every nal unit.  
	// the start code might be 0x00000001 or 0x000001. so detect it and remove it.
	int truncateBytes = 0;
	if (pkt.size >= 4 &&
		pkt.data[0] == 0 &&
		pkt.data[1] == 0 &&
		pkt.data[2] == 0 &&
		pkt.data[3] == 1)
	{
		truncateBytes = 4;
	}
	else if (pkt.size >= 3 &&
		pkt.data[0] == 0 &&
		pkt.data[1] == 0 &&
		pkt.data[2] == 1)
	{
		truncateBytes = 3;
	}

	u_int8_t* newFrameDataStart = (u_int8_t*)(pkt.data /*+ truncateBytes*/);
	unsigned newFrameSize = pkt.size/* - truncateBytes*/;

	if ((int)(pkt.data[0] & 0x1F) == 5)
	{
		//std::cout << newFrameSize << std::endl;
	}
	

	u_int8_t nal_unit_type = newFrameDataStart[0] & 0x1F;

	//std::cout << "sent NALU type " << (int)nal_unit_type << " (" << newFrameSize << ")" << std::endl;

	//if (nal_unit_type == 8) // PPS
	//{
	//	envir() << "PPS seen\n";
	//}
	//else if (nal_unit_type == 7) // SPS
	//{
	//	envir() << "SPS seen; siz\n";
	//}
	//else
	//{
	//	//envir() << nal_unit_type << " seen; size: " << frameSize << "\n";
	//}
	

	


	// Deliver the data here:
	if (newFrameSize > fMaxSize)
	{
		fFrameSize = fMaxSize;
		fNumTruncatedBytes = newFrameSize - fMaxSize;
		//fprintf(myfile, "frameSize %i larger than maxSize %i\n", pkt.size, fMaxSize);
		//fflush(myfile);
	}
	else
	{
		fFrameSize = newFrameSize;
		/*rest =*/ fNumTruncatedBytes = 0;
		//
	}

	memmove(fTo, newFrameDataStart, fFrameSize);


	av_free_packet(&pkt);
	//pktPool.push(pkt);

	if (fNumTruncatedBytes > 0)
	{
		std::cout << this << ": truncated " << fNumTruncatedBytes << " bytes" << std::endl;
	}

	//std::cout << fFrameSize << std::endl;

	// Tell live555 that a new frame is available
	FramedSource::afterGetting(this);

	//std::cout << pkt.pts << std::endl;

	if (onSentNALU) onSentNALU(this, nal_unit_type, fFrameSize);

	//std::cout << "sent frame" << std::endl;

	lastFrameTime = thisTime;

	//std::cout << this << ": delivered" << std::endl;

	//boost::this_thread::sleep_for(boost::chrono::microseconds((size_t)(1 * fFrameSize)));
}
예제 #9
0
void GAVideoLiveSource
::deliverFrame() {
	// This function is called when new frame data is available from the device.
	// We deliver this data by copying it to the 'downstream' object, using the following parameters (class members):
	// 'in' parameters (these should *not* be modified by this function):
	//     fTo: The frame data is copied to this address.
	//         (Note that the variable "fTo" is *not* modified.  Instead,
	//          the frame data is copied to the address pointed to by "fTo".)
	//     fMaxSize: This is the maximum number of bytes that can be copied
	//         (If the actual frame is larger than this, then it should
	//          be truncated, and "fNumTruncatedBytes" set accordingly.)
	// 'out' parameters (these are modified by this function):
	//     fFrameSize: Should be set to the delivered frame size (<= fMaxSize).
	//     fNumTruncatedBytes: Should be set iff the delivered frame would have been
	//         bigger than "fMaxSize", in which case it's set to the number of bytes
	//         that have been omitted.
	//     fPresentationTime: Should be set to the frame's presentation time
	//         (seconds, microseconds).  This time must be aligned with 'wall-clock time' - i.e., the time that you would get
	//         by calling "gettimeofday()".
	//     fDurationInMicroseconds: Should be set to the frame's duration, if known.
	//         If, however, the device is a 'live source' (e.g., encoded from a camera or microphone), then we probably don't need
	//         to set this variable, because - in this case - data will never arrive 'early'.
	// Note the code below.

	if (!isCurrentlyAwaitingData()) return; // we're not ready for the data yet

	encoder_packet_t pkt;
	u_int8_t* newFrameDataStart = NULL; //%%% TO BE WRITTEN %%%
	unsigned newFrameSize = 0; //%%% TO BE WRITTEN %%%

	newFrameDataStart = (u_int8_t*) encoder_pktqueue_front(this->channelId, &pkt);
	if(newFrameDataStart == NULL)
		return;
	newFrameSize = pkt.size;
#ifdef DISCRETE_FRAMER	// special handling for packets with startcode
	if(remove_startcode != 0) {
		if(newFrameDataStart[0] == 0
		&& newFrameDataStart[1] == 0) {
			if(newFrameDataStart[2] == 0
			&& newFrameDataStart[3] == 1) {
				newFrameDataStart += 4;
				newFrameSize -= 4;
			} else if(newFrameDataStart[2] == 1) {
				newFrameDataStart += 3;
				newFrameSize -= 3;
			}
		}
	}
#endif
	// Deliver the data here:
	if (newFrameSize > fMaxSize) {
		fFrameSize = fMaxSize;
#ifdef DISCRETE_FRAMER
		fNumTruncatedBytes = newFrameSize - fMaxSize;
		ga_error("video encoder: packet truncated (%d > %d).\n", newFrameSize, fMaxSize);
#else		// for regular H264Framer
		encoder_pktqueue_split_packet(this->channelId, (char*) newFrameDataStart + fMaxSize);
#endif
	} else {
		fFrameSize = newFrameSize;
	}
	//gettimeofday(&fPresentationTime, NULL); // If you have a more accurate time - e.g., from an encoder - then use that instead.
	fPresentationTime = pkt.pts_tv;
	// If the device is *not* a 'live source' (e.g., it comes instead from a file or buffer), then set "fDurationInMicroseconds" here.
	memmove(fTo, newFrameDataStart, fFrameSize);

	encoder_pktqueue_pop_front(channelId);

	// After delivering the data, inform the reader that it is now available:
	FramedSource::afterGetting(this);
}
예제 #10
0
void LiveVideoStreamSource::deliverFrame() {
    // This function is called when new frame data is available from the device.
    // We deliver this data by copying it to the 'downstream' object, using the following parameters (class members):
    // 'in' parameters (these should *not* be modified by this function):
    //     fTo: The frame data is copied to this address.
    //         (Note that the variable "fTo" is *not* modified.  Instead,
    //          the frame data is copied to the address pointed to by "fTo".)
    //     fMaxSize: This is the maximum number of bytes that can be copied
    //         (If the actual frame is larger than this, then it should
    //          be truncated, and "fNumTruncatedBytes" set accordingly.)
    // 'out' parameters (these are modified by this function):
    //     fFrameSize: Should be set to the delivered frame size (<= fMaxSize).
    //     fNumTruncatedBytes: Should be set iff the delivered frame would have been
    //         bigger than "fMaxSize", in which case it's set to the number of bytes
    //         that have been omitted.
    //     fPresentationTime: Should be set to the frame's presentation time
    //         (seconds, microseconds).  This time must be aligned with 'wall-clock time' - i.e., the time that you would get
    //         by calling "gettimeofday()".
    //     fDurationInMicroseconds: Should be set to the frame's duration, if known.
    //         If, however, the device is a 'live source' (e.g., encoded from a camera or microphone), then we probably don't need
    //         to set this variable, because - in this case - data will never arrive 'early'.
    // Note the code below.

    // we're not ready for the data yet
    if (!isCurrentlyAwaitingData()) {
		printf("Frame LOSS!!!!!!!!\n");
        return;
	}

    VENC_CHN_STAT_S stStat;
    VENC_STREAM_S stStream;
    HI_S32 s32Ret;

    s32Ret = HI_MPI_VENC_Query(fChannelNo, &stStat);
    if (HI_SUCCESS != s32Ret)
    {
        return;
    }

    if (stStat.u32CurPacks <= 0) {
        return;
    }

    stStream.pstPack = (VENC_PACK_S *)alloca(sizeof(VENC_PACK_S) * stStat.u32CurPacks);
    stStream.u32PackCount = stStat.u32CurPacks;
    stStream.u32Seq = 0;
    memset(&stStream.stH264Info, 0, sizeof(VENC_STREAM_INFO_H264_S));
    s32Ret = HI_MPI_VENC_GetStream(fChannelNo, &stStream, HI_FALSE);
    if (HI_SUCCESS != s32Ret)
    {
        g_critical("HI_MPI_VENC_GetStream failed with %#x!\n", s32Ret);
        return;
    }

    fPresentationTime.tv_sec = stStream.pstPack[0].u64PTS / 1000000UL;
    fPresentationTime.tv_usec = stStream.pstPack[0].u64PTS % 1000000UL;

    fFrameSize = 0;
    for (int i = 0; i < stStream.u32PackCount; i++) {
        for (int j = 0; j < ARRAY_SIZE(stStream.pstPack[i].pu8Addr); j++) {
            HI_U8 *p = stStream.pstPack[i].pu8Addr[j];
            HI_U32 len = stStream.pstPack[i].u32Len[j];

            if (len == 0)
                continue;

            if (len >= 3 && p[0] == 0x00 && p[1] == 0x00 && p[2] == 0x01) {
                p += 3;
                len -= 3;
            }
            if (len >= 4 && p[0] == 0x00 && p[1] == 0x00 && p[2] == 0x00 && p[3] == 0x01) {
                p += 4;
                len -= 4;
            }

            if (fFrameSize + len > fMaxSize) {
                g_critical("Package Length execute the fMaxSize\n");
                break;
            }

            memmove(&fTo[fFrameSize], p, len);
            fFrameSize += len;
        }
    }

    s32Ret = HI_MPI_VENC_ReleaseStream(fChannelNo, &stStream);
    if (HI_SUCCESS != s32Ret)
    {
        g_critical("HI_MPI_VENC_ReleaseStream failed with %#x!\n", s32Ret);
    }

	doStopGettingFrames();

    // After delivering the data, inform the reader that it is now available:
    FramedSource::afterGetting(this);
}
예제 #11
0
void LiveDeviceSource::deliverFrame()
{
  //  VLOG(2) << "LiveDeviceSource::deliverFrame()";

  // This would be called when new frame data is available from the device.
  // This function should deliver the next frame of data from the device,
  // using the following parameters (class members):
  // 'in' parameters (these should *not* be modified by this function):
  //     fTo: The frame data is copied to this address.
  //         (Note that the variable "fTo" is *not* modified.  Instead,
  //          the frame data is copied to the address pointed to by "fTo".)
  //     fMaxSize: This is the maximum number of bytes that can be copied
  //         (If the actual frame is larger than this, then it should
  //          be truncated, and "fNumTruncatedBytes" set accordingly.)
  // 'out' parameters (these are modified by this function):
  //     fFrameSize: Should be set to the delivered frame size (<= fMaxSize).
  //     fNumTruncatedBytes: Should be set iff the delivered frame would have been
  //         bigger than "fMaxSize", in which case it's set to the number of bytes
  //         that have been omitted.
  //     fPresentationTime: Should be set to the frame's presentation time
  //         (seconds, microseconds).
  //     fDurationInMicroseconds: Should be set to the frame's duration, if known.
  if (!isCurrentlyAwaitingData()) return; // we're not ready for the data yet

  assert( !m_qMediaSamples.empty() );

  // Deliver the data here:
  MediaSample mediaSample = m_qMediaSamples.front();
  m_qMediaSamples.pop_front();

  double dStartTime = mediaSample.getPresentationTime();
  int nSize = mediaSample.getMediaSize();
  const BYTE* pBuffer = mediaSample.getDataBuffer().data();
  //  VLOG(2) << "LiveDeviceSource::deliverFrame() Sample size: " << nSize;
  // The start time of the first sample is stored as a reference start time for the media samples
  // Similarly we store the current time obtained by gettimeofday in m_tOffsetTime.
  // The reason for this is that we need to start timestamping the samples with timestamps starting at gettimeofday
  // This is a requirement that allows us to receive the original timestamp on the RTP client side.
  // The reason is that the RTCP implementation starts running using gettimeofday.
  // If this is not done, samples on the receiver side have really strange time stamps
  if (!m_bOffsetSet)
  {
    // Set initial values
    gettimeofday(&m_tOffsetTime, NULL);
    m_dOffsetTime = dStartTime;
    m_bOffsetSet = true;

    // Set the presentation time of the first sample
    gettimeofday(&fPresentationTime, NULL);

    //    VLOG(2) << "Delivering first media frame";
  }
  else
  {
    //    VLOG(2) << "Delivering next media frame";

    // Calculate the difference between this samples start time and the initial samples start time
    double dDifference = dStartTime - m_dOffsetTime;
    long lDiffSecs = (long)dDifference;
    long lDiffUSecs = static_cast<long>((dDifference - lDiffSecs) * 1000000);
    // Now add these offsets to the initial presentation time obtained through gettimeofday
    fPresentationTime.tv_sec = m_tOffsetTime.tv_sec + lDiffSecs;
    fPresentationTime.tv_usec = m_tOffsetTime.tv_usec + lDiffUSecs;
  }

  if (nSize > (int)fMaxSize)
  {
    // TODONB
    //TOREVISE/TODO
    fNumTruncatedBytes = nSize -  fFrameSize;
    fFrameSize = fMaxSize;
    //TODO How do we send the rest in the following packet???
    //TODO How do we delete the frame??? Unless we store extra attributes in the MediaFrame class
    LOG(WARNING) << "TODO: Truncated packet";
  }
  else
  {
    fFrameSize = nSize;
    memcpy(fTo, pBuffer, fFrameSize);
    // Testing with current time of day
    //gettimeofday(&fPresentationTime, NULL);
    // 04/04/2008 RG: http://lists.live555.com/pipermail/live-devel/2008-April/008395.html
    //Testing with 'live' config
    fDurationInMicroseconds = 0;
  }

  // After delivering the data, inform the reader that it is now available:
  FramedSource::afterGetting(this);
}