コード例 #1
0
ファイル: WISInput.cpp プロジェクト: epheatt/wis-streamer
void WISOpenFileSource::incomingDataHandler1() {
  // Read the data from our file into the client's buffer:
  readFromFile();

  // Stop handling any more input, until we're ready again:
  envir().taskScheduler().turnOffBackgroundReadHandling(fFileNo);

  // Tell our client that we have new data:
  afterGetting(this);
}
コード例 #2
0
void BasicUDPSource::incomingPacketHandler1() {
  if (!isCurrentlyAwaitingData()) return; // we're not ready for the data yet

	// Read the packet into our desired destination:
  struct sockaddr_in fromAddress;
  if (!fInputGS->handleRead(fTo, fMaxSize, fFrameSize, fromAddress)) return;

	// Tell our client that we have new data:
  afterGetting(this); // we're preceded by a net read; no infinite recursion
}
コード例 #3
0
void MPEG2TransportStreamFramer::afterGettingFrame1(unsigned frameSize,
						    struct timeval presentationTime) {
  fFrameSize += frameSize;
  unsigned const numTSPackets = fFrameSize/TRANSPORT_PACKET_SIZE;
  fNumTSPacketsToStream -= numTSPackets;
  fFrameSize = numTSPackets*TRANSPORT_PACKET_SIZE; // an integral # of TS packets
  if (fFrameSize == 0) {
    // We didn't read a complete TS packet; assume that the input source has closed.
    handleClosure();
    return;
  }

  // Make sure the data begins with a sync byte:
  unsigned syncBytePosition;
  for (syncBytePosition = 0; syncBytePosition < fFrameSize; ++syncBytePosition) {
    if (fTo[syncBytePosition] == TRANSPORT_SYNC_BYTE) break;
  }
  if (syncBytePosition == fFrameSize) {
    envir() << "No Transport Stream sync byte in data.";
    handleClosure();
    return;
  } else if (syncBytePosition > 0) {
    // There's a sync byte, but not at the start of the data.  Move the good data
    // to the start of the buffer, then read more to fill it up again:
    memmove(fTo, &fTo[syncBytePosition], fFrameSize - syncBytePosition);
    fFrameSize -= syncBytePosition;
    fInputSource->getNextFrame(&fTo[fFrameSize], syncBytePosition,
			       afterGettingFrame, this,
			       FramedSource::handleClosure, this);
    return;
  } // else normal case: the data begins with a sync byte

  fPresentationTime = presentationTime;

  // Scan through the TS packets that we read, and update our estimate of
  // the duration of each packet:
  struct timeval tvNow;
  gettimeofday(&tvNow, NULL);
  double timeNow = tvNow.tv_sec + tvNow.tv_usec/1000000.0;
  for (unsigned i = 0; i < numTSPackets; ++i) {
    if (!updateTSPacketDurationEstimate(&fTo[i*TRANSPORT_PACKET_SIZE], timeNow)) {
      // We hit a preset limit (based on PCR) within the stream.  Handle this as if the input source has closed:
      handleClosure();
      return;
    }
  }

  fDurationInMicroseconds
    = numTSPackets * (unsigned)(fTSPacketDurationEstimate*1000000);

  // Complete the delivery to our client:
  afterGetting(this);
}
コード例 #4
0
void MPEG2TransportStreamAccumulator::doGetNextFrame() {
  if (fNumBytesGathered >= fDesiredPacketSize) {
    // Complete the delivery to the client:
    fFrameSize = fNumBytesGathered;
    fNumBytesGathered = 0;
    afterGetting(this);
  } else {
    // Ask for more data (delivered directly to the client's buffer);
    fInputSource->getNextFrame(fTo, fMaxSize, afterGettingFrame, this,
			       FramedSource::handleClosure, this);
  }
}
void WindowsAudioInputDevice_common::onceAudioIsReady() {
  fFrameSize = readFromBuffers(fTo, fMaxSize, fPresentationTime);
  if (fFrameSize == 0) {
    // The source is no longer readable
    handleClosure(this);
    return;
  }
  fDurationInMicroseconds = 1000000/fSamplingFrequency;

  // Call our own 'after getting' function.  Because we sometimes get here
  // after returning from a delay, we can call this directly, without risking
  // infinite recursion
  afterGetting(this);
}
コード例 #6
0
void H264VideoStreamFramer::continueReadProcessing()
{
   unsigned acquiredFrameSize; 
//     std::cout << "H264VideoStreamFramer: in continueReadProcessing" << std::endl;
   u_int64_t frameDuration;  // in ms


   acquiredFrameSize = fParser->parse();
//   fprintf(stderr, "acquiredFrameSize = %d\n",acquiredFrameSize);


   if (acquiredFrameSize > 0) {

        check++;
      // We were able to acquire a frame from the input.
      // It has already been copied to the reader's space.
      fFrameSize = acquiredFrameSize;
//    fNumTruncatedBytes = fParser->numTruncatedBytes(); // not needed so far
      frameDuration = 17;    //1000/60
      fFrameRate = frameDuration == 0 ? 0.0 : 1000./(long)frameDuration;

      // Compute "fPresentationTime" 
      if (acquiredFrameSize == 5) // first frame
         fPresentationTime = fPresentationTimeBase;
      else 
         fPresentationTime.tv_usec += (long) frameDuration*1000;

      while (fPresentationTime.tv_usec >= 1000000) {
         fPresentationTime.tv_usec -= 1000000;
         ++fPresentationTime.tv_sec;
      }

      // Compute "fDurationInMicroseconds" 
      fDurationInMicroseconds = (unsigned int) frameDuration*1000;;
      printf(" Compute fDurationInMicroseconds \n");

      // Call our own 'after getting' function.  Because we're not a 'leaf'
      // source, we can call this directly, without risking infinite recursion.
      afterGetting(this);
   } else {

      // We were unable to parse a complete frame from the input, because:
      // - we had to read more data from the source stream, or
      // - the source stream has ended.
   }
}
コード例 #7
0
ファイル: MP3ADUinterleaving.cpp プロジェクト: LiYX/live555
void MP3ADUdeinterleaver::doGetNextFrame() {
  // If there's a frame immediately available, deliver it, otherwise get new
  // frames from the source until one's available:
  if (fFrames->haveReleaseableFrame()) {
    releaseOutgoingFrame();

    // Call our own 'after getting' function.  Because we're not a 'leaf'
    // source, we can call this directly, without risking infinite recursion.
    afterGetting(this);
  } else {
#ifdef TEST_LOSS
  NOTE: This code no longer works, because it uses synchronous reads,
  which are no longer supported. 
    static unsigned const framesPerPacket = 3;
    static unsigned const frameCount = 0;
    static Boolean packetIsLost;
    while (1) {
      unsigned packetCount = frameCount/framesPerPacket;
      if ((frameCount++)%framesPerPacket == 0) {
	packetIsLost = (our_random()%10 == 0); // simulate 10% packet loss #####
      }

      if (packetIsLost) {
	// Read and discard the next input frame (that would be part of
	// a lost packet):
	unsigned char dummyBuf[2000];
	unsigned numBytesRead;
	struct timeval presentationTime;
	// (this works only if the source can be read synchronously)
	fInputSource->syncGetNextFrame(dummyBuf, sizeof dummyBuf,
				       numBytesRead, presentationTime);
      } else {
	break; // from while (1)
      }
    }
#endif
    unsigned char* dataPtr;
    unsigned bytesAvailable;
    fFrames->getIncomingFrameParams(dataPtr, bytesAvailable);

    // Read the next incoming frame (asynchronously)
    fInputSource->getNextFrame(dataPtr, bytesAvailable,
			       &MP3ADUinterleaverBase::afterGettingFrame, this,
			       handleClosure, this);
  }
}
コード例 #8
0
// Note: We should change the following to use asynchronous file reading, #####
// as we now do with ByteStreamFileSource. #####
void G711AudioStreamSource::doGetNextFrame() {
    // Try to read as many bytes as will fit in the buffer provided (or "fPreferredFrameSize" if less)
    if (fLimitNumBytesToStream && fNumBytesToStream < fMaxSize) {
        fMaxSize = fNumBytesToStream;
    }
    if (fPreferredFrameSize < fMaxSize) {
        fMaxSize = fPreferredFrameSize;
    }
    unsigned bytesPerSample = (fNumChannels*fBitsPerSample)/8;
    if (bytesPerSample == 0) bytesPerSample = 1; // because we can't read less than a byte at a time
    //unsigned bytesToRead = fMaxSize - fMaxSize%bytesPerSample;

    //fFrameSize : 1000
    audioGetOneFrame(fTo, &fFrameSize);

    // Set the 'presentation time' and 'duration' of this frame:
    if (fPresentationTime.tv_sec == 0 && fPresentationTime.tv_usec == 0) {
        // This is the first frame, so use the current time:
        gettimeofday(&fPresentationTime, NULL);
    } else {
        // Increment by the play time of the previous data:
        unsigned uSeconds	= fPresentationTime.tv_usec + fLastPlayTime;
        fPresentationTime.tv_sec += uSeconds/1000000;
        fPresentationTime.tv_usec = uSeconds%1000000;
    }

    // Remember the play time of this data:
    fDurationInMicroseconds = fLastPlayTime
        = (unsigned)((fPlayTimePerSample*fFrameSize)/bytesPerSample);

    // Switch to another task, and inform the reader that he has data:
#if defined(__WIN32__) || defined(_WIN32)
    // HACK: One of our applications that uses this source uses an
    // implementation of scheduleDelayedTask() that performs very badly
    // (chewing up lots of CPU time, apparently polling) on Windows.
    // Until this is fixed, we just call our "afterGetting()" function
    // directly.  This avoids infinite recursion, as long as our sink
    // is discontinuous, which is the case for the RTP sink that
    // this application uses. #####
    afterGetting(this);
#else
    nextTask() = envir().taskScheduler().scheduleDelayedTask(0,
            (TaskFunc*)FramedSource::afterGetting, this);
#endif
}
コード例 #9
0
void MP3ADUTranscoder::afterGettingFrame1(unsigned numBytesRead,
					  unsigned numTruncatedBytes,
					  struct timeval presentationTime,
					  unsigned durationInMicroseconds) {
  fNumTruncatedBytes = numTruncatedBytes; // but can we handle this being >0? #####
  fPresentationTime = presentationTime;
  fDurationInMicroseconds = durationInMicroseconds;
  fFrameSize = TranscodeMP3ADU(fOrigADU, numBytesRead, fOutBitrate,
			    fTo, fMaxSize, fAvailableBytesForBackpointer);
  if (fFrameSize == 0) { // internal error - bad ADU data?
    handleClosure();
    return;
  }

  // Call our own 'after getting' function.  Because we're not a 'leaf'
  // source, we can call this directly, without risking infinite recursion.
  afterGetting(this);
}
コード例 #10
0
void MPEG2TransportStreamTrickModeFilter::attemptDeliveryToClient() {
  if (fCurrentTSPacketNum == fDesiredTSPacketNum) {
    //    fprintf(stderr, "\t\tdelivering ts %d:%d, %d bytes, PCR %f\n", fCurrentTSPacketNum, fDesiredDataOffset, fDesiredDataSize, fDesiredDataPCR);//#####
    // We already have the Transport Packet that we want.  Deliver its data:
    memmove(fTo, &fInputBuffer[fDesiredDataOffset], fDesiredDataSize);
    fFrameSize = fDesiredDataSize;
    float deliveryPCR = fDirection*(fDesiredDataPCR - fFirstPCR)/fScale;
    if (deliveryPCR < 0.0) deliveryPCR = 0.0;
    fPresentationTime.tv_sec = (unsigned long)deliveryPCR;
    fPresentationTime.tv_usec
      = (unsigned long)((deliveryPCR - fPresentationTime.tv_sec)*1000000.0f);
    //    fprintf(stderr, "#####DGNF9\n");

    afterGetting(this);
  } else {
    // Arrange to read the Transport Packet that we want:
    readTransportPacket(fDesiredTSPacketNum);
  }
}
コード例 #11
0
void EncoderAudioSource::doGetNextFrame()
{
	// Copy an encoded audio frame into buffer `fTo'
	// Read a new frame into fBuffer, YUV420 format is assumed
	unsigned char frame[100];
	int size = fFrameLength;
	int audioType;
	Debug(ckite_log_message, "EncoderAudioSource::doGetNextFrame.\n");
	computeAudioPresentationTime(); 
	if (strcmp(mediaType, "store") == 0)
	{
//		getFileAudioFrame( fp, 0, (unsigned char*)fBuffer, &size, &audioType);
	}
	else
	{
		audioGetFrameInfo(fp, mediaType,(char *)fBuffer, &size, &audioType);
	}
	if (size <= 0)
		return ;
	// Encode the frame
	int ret = 0; 
	if(audioType == AUDIO_AMRNB || audioType == AUDIO_AMRWB) 
	{
		fFrameSize = size;
	} 
	else if(audioType == AUDIO_RAW)
	{
		if (fAudioCodec != NULL)
		{
#ifndef __WIN32__
			ret = Encoder_Interface_Encode(fAudioCodec, MR122, (const short int*)fBuffer, frame, 0);
#endif
		}
		if (ret > 0)
		{
			fLastFrameHeader = frame[0];
			fFrameSize = ret-1;
			memcpy(fTo, frame+1, ret-1);
		}
	}
	afterGetting(this);
}
コード例 #12
0
ファイル: MP3FileSource.cpp プロジェクト: 3660628/live555
void MP3FileSource::doGetNextFrame() {
  if (!doGetNextFrame1()) {
    handleClosure();
    return;
  }

  // Switch to another task:
#if defined(__WIN32__) || defined(_WIN32)
  // HACK: liveCaster/lc uses an implementation of scheduleDelayedTask()
  // that performs very badly (chewing up lots of CPU time, apparently polling)
  // on Windows.  Until this is fixed, we just call our "afterGetting()"
  // function directly.  This avoids infinite recursion, as long as our sink
  // is discontinuous, which is the case for the RTP sink that liveCaster/lc
  // uses. #####
  afterGetting(this);
#else
  nextTask() = envir().taskScheduler().scheduleDelayedTask(0,
				(TaskFunc*)afterGetting, this);
#endif
}
コード例 #13
0
ファイル: zk_v4l2_x264_source.cpp プロジェクト: gozfree/src
	void getNextFrame1 ()
	{
		// capture:
		Picture pic;
		if (capture_get_picture(mp_capture, &pic) < 0) {
			fprintf(stderr, "==== %s: capture_get_picture err\n", __func__);
			m_started = 0;
			return;
		}

		// compress
		const void *outbuf;
		int outlen;
		if (vc_compress(mp_compress, pic.data, pic.stride, &outbuf, &outlen) < 0) {
			fprintf(stderr, "==== %s: vc_compress err\n", __func__);
			m_started = 0;
			return;
		}

		int64_t pts, dts;
		int key;
		vc_get_last_frame_info(mp_compress, &key, &pts, &dts);

		// save outbuf
		gettimeofday(&fPresentationTime, 0);
		fFrameSize = outlen;
		if (fFrameSize > fMaxSize) {
			fNumTruncatedBytes = fFrameSize - fMaxSize;
			fFrameSize = fMaxSize;
		}
		else {
			fNumTruncatedBytes = 0;
		}

		memmove(fTo, outbuf, fFrameSize);

		// notify
		afterGetting(this);

		m_started = 0;
	}
コード例 #14
0
void H263plusVideoStreamFramer::continueReadProcessing()
{
   unsigned acquiredFrameSize; 
 
   u_int64_t frameDuration;  // in ms

   acquiredFrameSize = fParser->parse(frameDuration);   
// Calculate some average bitrate information (to be adapted)	
//	avgBitrate = (totalBytes * 8 * H263_TIMESCALE) / totalDuration;

   if (acquiredFrameSize > 0) {
      // We were able to acquire a frame from the input.
      // It has already been copied to the reader's space.
      fFrameSize = acquiredFrameSize;
//    fNumTruncatedBytes = fParser->numTruncatedBytes(); // not needed so far

      fFrameRate = frameDuration == 0 ? 0.0 : 1000./(long)frameDuration;

      // Compute "fPresentationTime" 
      if (acquiredFrameSize == 5) // first frame
         fPresentationTime = fPresentationTimeBase;
      else 
         fPresentationTime.tv_usec += (long) frameDuration*1000;

      while (fPresentationTime.tv_usec >= 1000000) {
         fPresentationTime.tv_usec -= 1000000;
         ++fPresentationTime.tv_sec;
      }

      // Compute "fDurationInMicroseconds" 
      fDurationInMicroseconds = (unsigned int) frameDuration*1000;;

      // Call our own 'after getting' function.  Because we're not a 'leaf'
      // source, we can call this directly, without risking infinite recursion.
      afterGetting(this);
   } else {
      // We were unable to parse a complete frame from the input, because:
      // - we had to read more data from the source stream, or
      // - the source stream has ended.
   }
}
コード例 #15
0
void AMRAudioEncoder::doGetNextFrame() {
  // If we have enough samples in order to encode a frame, do this now:
  if (fInputSampleBufferBytesFull >= fInputSampleBufferBytesDesired) {
    if (fMaxSize < AMR_MAX_CODED_FRAME_SIZE) {
      // Our sink hasn't given us enough space for a frame.  We can't encode.
      fFrameSize = 0;
      fNumTruncatedBytes = AMR_MAX_CODED_FRAME_SIZE;
    } else {
      enum Mode ourAMRMode = MR122; // the only mode that we support
      fFrameSize = Encoder_Interface_Encode(fEncoderState, ourAMRMode,
					    (short*)fInputSampleBuffer, fTo,
					    0/*disable DTX*/);
      // Note the 1-byte AMR frame header (which wasn't included in the encoded data):
      fLastFrameHeader = toc_byte[ourAMRMode];

      fNumTruncatedBytes = 0;

      // Shift any remaining samples down the the start of the buffer:
      fInputSampleBufferBytesFull -= fInputSampleBufferBytesDesired;
      memmove(fInputSampleBuffer,
	      &fInputSampleBuffer[fInputSampleBufferBytesDesired],
	      fInputSampleBufferBytesFull);
    }

    // Complete delivery to the client:
    fPresentationTime = fLastInputDataPresentationTime;
    //fDurationInMicroseconds = AMR_MICROSECONDS_PER_FRAME;
    fDurationInMicroseconds = 0; // because audio capture is bursty, check for it ASAP
    afterGetting(this);
  } else {
    // Read more samples from our source, then try again:
    unsigned maxBytesToRead
      = fInputSampleBufferSize - fInputSampleBufferBytesFull;
    fInputPCMSource
      ->getNextFrame(&fInputSampleBuffer[fInputSampleBufferBytesFull],
		     maxBytesToRead,
		     afterGettingFrame, this,
		     FramedSource::handleClosure, this);
  }
}
コード例 #16
0
void EncoderAudioSource::doGetNextFrame()
{
	// Copy an encoded audio frame into buffer `fTo'
	// Read a new frame into fBuffer, YUV420 format is assumed
	unsigned char frame[100];
	int size = fFrameLength;
	int audioType;
	Debug(ckite_log_message, "EncoderAudioSource::doGetNextFrame.\n");
	computeAudioPresentationTime(); 
	if (strcmp(mediaType, "store") == 0)
	{
		getStoreAudioFrame( fp, 0, (unsigned char*)fBuffer, &size, &audioType);
	}
	else
	{
		getRealAudioFrame(fp, mediaType,(char *)fBuffer, &size, &audioType);
	}
	if (size <= 0)
		size = 0;
	// Encode the frame
	
#ifdef ENC_SOURCE
	int ret = 0; 
	if (fAudioCodec != NULL)
	{
		ret = Encoder_Interface_Encode(fAudioCodec, MR122, (const short int*)fBuffer, frame, 0);
	}
	if (ret > 0)
	{
		fLastFrameHeader = frame[0];
		fFrameSize = ret-1;
		memcpy(fTo, frame+1, ret-1);
	}
#else
	if (size != 0)
		memcpy(fTo, fBuffer, size);
	fFrameSize = size;
#endif
	afterGetting(this);
}
コード例 #17
0
ファイル: MP3ADUinterleaving.cpp プロジェクト: LiYX/live555
void MP3ADUinterleaver::doGetNextFrame() {
  // If there's a frame immediately available, deliver it, otherwise get new
  // frames from the source until one's available:
  if (fFrames->haveReleaseableFrame()) {
    releaseOutgoingFrame();

    // Call our own 'after getting' function.  Because we're not a 'leaf'
    // source, we can call this directly, without risking infinite recursion.
    afterGetting(this);
  } else {
    fPositionOfNextIncomingFrame = fInterleaving.lookupInverseCycle(fII);
    unsigned char* dataPtr;
    unsigned bytesAvailable;
    fFrames->getIncomingFrameParams(fPositionOfNextIncomingFrame,
				    dataPtr, bytesAvailable);

    // Read the next incoming frame (asynchronously)
    fInputSource->getNextFrame(dataPtr, bytesAvailable,
			       &MP3ADUinterleaverBase::afterGettingFrame, this,
			       handleClosure, this);
  }
}
コード例 #18
0
void DVVideoStreamFramer::doGetNextFrame() {
  fFrameSize = 0; // initially, until we deliver data

  // If we have saved initial blocks (and won't be seeking back to re-read this data), so use this data first.
  if (fInitialBlocksPresent && !fSourceIsSeekable) {
    // For simplicity, we require the downstream object's buffer to be >= this data's size:
    if (fMaxSize < DV_SAVED_INITIAL_BLOCKS_SIZE) {
      fNumTruncatedBytes = fMaxSize;
      afterGetting(this);
      return;
    }

    memmove(fTo, fSavedInitialBlocks, DV_SAVED_INITIAL_BLOCKS_SIZE);
    fFrameSize = DV_SAVED_INITIAL_BLOCKS_SIZE;
    fTo += DV_SAVED_INITIAL_BLOCKS_SIZE;
    fInitialBlocksPresent = False; // for the future
  }
    
  // Arrange to read the (rest of the) requested data.
  // (But first, make sure that we read an integral multiple of the DV block size.)
  fMaxSize -= fMaxSize%DV_DIF_BLOCK_SIZE;
  getAndDeliverData();
}
コード例 #19
0
void TsMPEG2TransportStreamFramer::afterGettingFrame1(unsigned frameSize,
struct timeval presentationTime) {
	fFrameSize += frameSize;
	unsigned const numTSPackets = fFrameSize/TRANSPORT_PACKET_SIZE;
  unsigned const dataGoingToBeLost=fFrameSize % TRANSPORT_PACKET_SIZE;
	fFrameSize = numTSPackets*TRANSPORT_PACKET_SIZE; // an integral # of TS packets
	if (fFrameSize == 0) {
		// We didn't read a complete TS packet; assume that the input source has closed.
		handleClosure(this);
		return;
	}
  if (dataGoingToBeLost>0)
  {
    //need to handle a mid buffer
  }
	// Make sure the data begins with a sync byte:
	unsigned syncBytePosition;
	for (syncBytePosition = 0; syncBytePosition < fFrameSize; ++syncBytePosition) {
		if (fTo[syncBytePosition] == TRANSPORT_SYNC_BYTE) break;
	}
	if (syncBytePosition == fFrameSize) {
		envir() << "No Transport Stream sync byte in data.";
		handleClosure(this);
		return;
	} else if (syncBytePosition > 0) {
		// There's a sync byte, but not at the start of the data.  Move the good data
		// to the start of the buffer, then read more to fill it up again:
		memmove(fTo, &fTo[syncBytePosition], frameSize - syncBytePosition);
		fFrameSize -= syncBytePosition-dataGoingToBeLost;
		fInputSource->getNextFrame(&fTo[fFrameSize], syncBytePosition,
			afterGettingFrame, this,
			FramedSource::handleClosure, this);
		return;
	}
  else if (dataGoingToBeLost>0)// there is a problem in the buffer somewhere
  {
    unsigned badPacket = 0;
    for (badPacket=0;badPacket<numTSPackets;badPacket++)
    {
      if (fTo[badPacket*TRANSPORT_PACKET_SIZE]!=TRANSPORT_SYNC_BYTE && badPacket*TRANSPORT_PACKET_SIZE<frameSize) break;
    }
    //we know it's the previous one...
    if (badPacket!=0)
    {
	    for (syncBytePosition = 1; syncBytePosition < TRANSPORT_PACKET_SIZE; ++syncBytePosition) {
		    if (fTo[badPacket*TRANSPORT_PACKET_SIZE-syncBytePosition] == TRANSPORT_SYNC_BYTE) break;
	    }
   		memmove(&fTo[(badPacket-1)*TRANSPORT_PACKET_SIZE], &fTo[badPacket*TRANSPORT_PACKET_SIZE-syncBytePosition], frameSize - (badPacket*TRANSPORT_PACKET_SIZE-syncBytePosition));
		  fFrameSize -= TRANSPORT_PACKET_SIZE-syncBytePosition-dataGoingToBeLost;
		  fInputSource->getNextFrame(&fTo[fFrameSize], syncBytePosition,
			  afterGettingFrame, this,
			  FramedSource::handleClosure, this);
		  return;
    }
  }// else normal case: the data begins with a sync byte


	fPresentationTime = presentationTime;

	// Scan through the TS packets that we read, and update our estimate of
	// the duration of each packet:
	struct timeval tvNow;
	gettimeofday(&tvNow, NULL);
	double timeNow = tvNow.tv_sec + tvNow.tv_usec/1000000.0;
	for (unsigned i = 0; i < numTSPackets; ++i) {
		updateTSPacketDurationEstimate(&fTo[i*TRANSPORT_PACKET_SIZE], timeNow);
	}

	fDurationInMicroseconds
		= numTSPackets * (unsigned)(fTSPacketDurationEstimate*1000000);

	// Complete the delivery to our client:
	afterGetting(this);
}
コード例 #20
0
void EncoderVideoSource::H264_doGetNextFrame()
{
#ifdef SDKH264 
	int size = (fWidth*fHeight *3/2);
	int videoType;

	Debug(ckite_log_message, "EncoderVideoSource::H264_doGetNextFrame ENTRY\n");
	Debug(ckite_log_message, "fMaxSize = %d\n", fMaxSize);
	if (fp == NULL)
	{
		Debug(ckite_log_message, "video fp is NULL\n");
		return;
	}
	// handle per of nal
	for(int i = 0; i < 4; i++)
	{
		if(more_nal[i] != NULL)
		{
			Debug(ckite_log_message, "more_nal address %p\n", more_nal[i]);
			Debug(ckite_log_message, "more_nal len  %d\n", more_nal_len[i]);
			memcpy(fTo, more_nal[i], more_nal_len[i]);
			fFrameSize = more_nal_len[i];
			if(more_nal[i] != NULL)
			{
				delete [] more_nal[i];
				more_nal[i] = NULL;
				more_nal_len[i] = 0;
			}
			fPictureEndMarker = True;
			afterGetting(this);
			return ;
		}
	}
	computePresentationTime();
	if (strcmp(mediaType, "store") == 0)	
	{
		if (fWidth == 720 && fHeight == 576)
		{
			videoType = getLivehdFrame();
		}
		else
		{
//			getFileVideoFrame( fp, 0, (unsigned char *)fBuffer, &size, &videoType, false);
		}
	}
	else
	{
		if (fWidth == 720 && fHeight == 576)
		{
			videoType = getLivehdFrame();
		}
		else
		{
			videoGetFrameInfo(fChannel, fp, mediaType, fBuffer, &size, &videoType);
		}
	}
	if(size <= 0) return ;
	if (videoType == VIDEO_MPEG4 || videoType == VIDEO_H264)
	{
		fFrameSize = size;
	}
	else if (videoType == VIDEO_RAW)
	{
		if( x264_picture_alloc(&m_pic, m_param.i_csp, m_param.i_width, m_param.i_height) < 0)
		{
			Debug(ckite_log_message, "x264_picture_alloc is failed \n");
			return;
		}
		memcpy(m_pic.img.plane[0], fBuffer, m_param.i_width * m_param.i_height);
		memcpy(m_pic.img.plane[1], fBuffer + m_param.i_width * m_param.i_height, m_param.i_width * m_param.i_height / 4);
		memcpy(m_pic.img.plane[2], fBuffer + m_param.i_width * m_param.i_height * 5 / 4, m_param.i_width * m_param.i_height / 4);   

		static x264_picture_t pic_out;
		x264_nal_t *nal = NULL;
		int i_nal, i;

		if(x264_handle != NULL)
		{
			if( x264_encoder_encode( x264_handle, &nal, &i_nal, &m_pic, &pic_out ) < 0 )
			{
				return;
			}
		}
		int offset = 0;
		static int t = 0;
		FILE *fout;

		//unsigned char nal_type;
		Debug(ckite_log_message, "i_nal = %d\n", i_nal);
		for ( i = 0; i < i_nal; i++ )
		{
			if (t < 4)
			{
				char name[100] = {0};
				t++;
				snprintf(name, sizeof name, "nal%d.dat", t);
				fout = fopen(name, "wb+");
				size = fwrite(nal[i].p_payload,1,nal[i].i_payload,fout);
				fclose(fout);
				Debug(ckite_log_message, "size = %d\n",size);

			}
			if(nal[i].p_payload[2] == 1)
			{
				offset = 3;
				//nal_type = nal[i].p_payload[3];
			}
			else if (nal[i].p_payload[3] == 1)
			{
				offset = 4;
				//nal_type = nal[i].p_payload[4];
			}
			if(i >= 1)
			{
				if(more_nal[i-1] == NULL)
				{
					more_nal_len[i-1] = nal[i].i_payload - offset;
					more_nal[i-1] = new char [more_nal_len[i-1] + 1];
					if (more_nal[i-1] != NULL)
					{
						memset(more_nal[i-1], 0x0, nal[i].i_payload - offset + 1);
						memcpy(more_nal[i-1], nal[i].p_payload + offset, nal[i].i_payload - offset);
						//Debug(ckite_log_message, "new sucess more_nal[%d], nal size %d\n", i-1, more_nal_len[i-1]);
					}
					else
					{
						Debug(ckite_log_message, "new failed with %d nal\n", i);
					}
				}
			}
			else 
			{
				memcpy(fTo, nal[i].p_payload + offset, nal[i].i_payload - offset);
				fFrameSize = nal[i].i_payload - offset;
			}
		}
	}
	//Debug(ckite_log_message, "Deliver nal type %d with %d bytes.\n", nal_type, fFrameSize);
	fPictureEndMarker = True;
	afterGetting(this);
	x264_picture_clean(&m_pic);
#endif

}
コード例 #21
0
ファイル: MP3ADU.cpp プロジェクト: 3660628/live555
Boolean ADUFromMP3Source::doGetNextFrame1() {
  // First, check whether we have enough previously-read data to output an
  // ADU for the last-read MP3 frame:
  unsigned tailIndex;
  Segment* tailSeg;
  Boolean needMoreData;

  if (fSegments->isEmpty()) {
    needMoreData = True;
    tailSeg = NULL; tailIndex = 0; // unneeded, but stops compiler warnings
  } else {
    tailIndex = SegmentQueue::prevIndex(fSegments->nextFreeIndex());
    tailSeg = &(fSegments->s[tailIndex]);

    needMoreData
	  = fTotalDataSizeBeforePreviousRead < tailSeg->backpointer // bp points back too far
      || tailSeg->backpointer + tailSeg->dataHere() < tailSeg->aduSize; // not enough data
  }

  if (needMoreData) {
    // We don't have enough data to output an ADU from the last-read MP3
    // frame, so need to read another one and try again:
    doGetNextFrame();
    return True;
  }

  // Output an ADU from the tail segment:
  fFrameSize = tailSeg->headerSize+tailSeg->sideInfoSize+tailSeg->aduSize;
  fPresentationTime = tailSeg->presentationTime;
  fDurationInMicroseconds = tailSeg->durationInMicroseconds;
  unsigned descriptorSize
    = fIncludeADUdescriptors ? ADUdescriptor::computeSize(fFrameSize) : 0;
#ifdef DEBUG
  fprintf(stderr, "m->a:outputting ADU %d<-%d, nbr:%d, sis:%d, dh:%d, (descriptor size: %d)\n", tailSeg->aduSize, tailSeg->backpointer, fFrameSize, tailSeg->sideInfoSize, tailSeg->dataHere(), descriptorSize);
#endif
  if (descriptorSize + fFrameSize > fMaxSize) {
    envir() << "ADUFromMP3Source::doGetNextFrame1(): not enough room ("
	    << descriptorSize + fFrameSize << ">"
	    << fMaxSize << ")\n";
    fFrameSize = 0;
    return False;
  }

  unsigned char* toPtr = fTo;
  // output the ADU descriptor:
  if (fIncludeADUdescriptors) {
    fFrameSize += ADUdescriptor::generateDescriptor(toPtr, fFrameSize);
  }

  // output header and side info:
  memmove(toPtr, tailSeg->dataStart(),
	  tailSeg->headerSize + tailSeg->sideInfoSize);
  toPtr += tailSeg->headerSize + tailSeg->sideInfoSize;

  // go back to the frame that contains the start of our data:
  unsigned offset = 0;
  unsigned i = tailIndex;
  unsigned prevBytes = tailSeg->backpointer;
  while (prevBytes > 0) {
    i = SegmentQueue::prevIndex(i);
    unsigned dataHere = fSegments->s[i].dataHere();
    if (dataHere < prevBytes) {
      prevBytes -= dataHere;
    } else {
      offset = dataHere - prevBytes;
      break;
    }
  }

  // dequeue any segments that we no longer need:
  while (fSegments->headIndex() != i) {
    fSegments->dequeue(); // we're done with it
  }

  unsigned bytesToUse = tailSeg->aduSize;
  while (bytesToUse > 0) {
    Segment& seg = fSegments->s[i];
    unsigned char* fromPtr
      = &seg.dataStart()[seg.headerSize + seg.sideInfoSize + offset];
    unsigned dataHere = seg.dataHere() - offset;
    unsigned bytesUsedHere = dataHere < bytesToUse ? dataHere : bytesToUse;
    memmove(toPtr, fromPtr, bytesUsedHere);
    bytesToUse -= bytesUsedHere;
    toPtr += bytesUsedHere;
    offset = 0;
    i = SegmentQueue::nextIndex(i);
  }


  if (fFrameCounter++%fScale == 0) {
    // Call our own 'after getting' function.  Because we're not a 'leaf'
    // source, we can call this directly, without risking infinite recursion.
    afterGetting(this);
  } else {
    // Don't use this frame; get another one:
    doGetNextFrame();
  }

  return True;
}
コード例 #22
0
void H264RealTimeStreamFramer::doGetNextFrame() {
    struct timespec TimeSpec = {0, 0};

    if (fNeedNextFrame) {
        if (fFirstFrame) {
            nextTask() = envir().taskScheduler().scheduleDelayedTask(0, (TaskFunc *)&tryGetNextFrame, this);
            fFirstFrame = False;
            PRINT_LOG(ASSERT, "First H264 frame");
            return;
        }

        MediaFrame * nextFrame = fFrameCapture->GetNextFrame(fCurrentFrame);
        if (NULL == nextFrame) {
            nextTask() = envir().taskScheduler().scheduleDelayedTask(10000, (TaskFunc *)&tryGetNextFrame, this);
            return;
        }

        // ASSERT(nextFrame->IsH264Frame(), "nextFrame MUST be H264Frame");

        PRINT_LOG(ASSERT, "Get %c frame, size = %u", nextFrame->IsKeyFrame() ? 'I' : 'P', nextFrame->Length());

        clock_gettime(CLOCK_MONOTONIC, &TimeSpec);
        PRINT_LOG(VERBOSE, "Time to get frame :  %12ld.%9ld", TimeSpec.tv_sec, TimeSpec.tv_nsec);

        fCurrentFrame = dynamic_cast<H264Frame *>(nextFrame);
        fNeedNextFrame = False;
        fCurrentNaluUnitIndex = 0;
        fOffset = 1;

        // ASSERT(fCurrentFrame->NaluCount() > 0, "H264 frame MUST have at least 1 nalu unit");
    }

    const H264Frame::NaluUnit & Nalu     = fCurrentFrame->GetNaluUnit(fCurrentNaluUnitIndex);
    const uint8_t             * addr     = Nalu.s_Addr;
    uint32_t                    size     = Nalu.s_Length;
    uint8_t                     naluType = (addr[0] & 0x1F);

    if (naluType == NALU_TYPE_SPS && fNeedSPS) {
        // Save SPS(Sequence Parameter Set)
        saveCopyOfSPS((u_int8_t *)addr, size);
        fNeedSPS = False;
    } else if (naluType == NALU_TYPE_PPS && fNeedPPS) {
        // Save PPS(Picture Parameter Set)
        saveCopyOfPPS((u_int8_t *)addr, size);
        fNeedPPS = False;
    }

#ifdef USE_H264_VIDEO_RTP_SINK

    fFrameSize = (size > fMaxSize) ? fMaxSize : size;
    fNumTruncatedBytes = size - fFrameSize;

    memmove(fTo, addr, fFrameSize);

    fPresentationTime = fCurrentFrame->TimeStamp();

    fCurrentNaluUnitIndex ++;

#else // USE_H264_VIDEO_RTP_SINK

    // Make sure max size of the data MUST NOT be greater then (MAX_BYTES_PER_UDP_PACKET - RTP_HEADER_SIZE)
    if (fMaxSize > MAX_BYTES_PER_UDP_PACKET - RTP_HEADER_SIZE) {
        fMaxSize = MAX_BYTES_PER_UDP_PACKET - RTP_HEADER_SIZE;
    }

    if (size > fMaxSize) {
        fFrameSize = size - fOffset + 2;
        fFrameSize = (fFrameSize > fMaxSize) ? fMaxSize : fFrameSize;
        memmove(fTo + 2, addr + fOffset, fFrameSize - 2);

        fTo[0] = (addr[0] & 0xE0) | 0x1C;
        if (fOffset == 1) {
            fTo[1] = (addr[0] & 0x1F) | 0x80;
        } else if (fOffset + fFrameSize - 2 >= size) {
            fTo[1] = (addr[0] & 0x1F) | 0x40;
            fCurrentNaluUnitIndex ++;
            fOffset = 1;
        } else {
            fTo[1] = (addr[0] & 0x1F);
        }

        fOffset += fFrameSize - 2;

    } else {
        fFrameSize = size;
        memmove(fTo, addr, fFrameSize);
        fCurrentNaluUnitIndex ++;
    }

    fPresentationTime = fCurrentFrame->TimeStamp();
    fNumTruncatedBytes = 0;

#endif // USE_H264_VIDEO_RTP_SINK

    if (fCurrentNaluUnitIndex >= fCurrentFrame->NaluCount()) {
        fPictureEndMarker = True;
        fNeedNextFrame = True;
        fDurationInMicroseconds = fCurrentFrame->Duration();
    } else {
        fPictureEndMarker = False;
        fDurationInMicroseconds = 0;
    }

    afterGetting(this);

    if (fNeedNextFrame)
    {
        clock_gettime(CLOCK_MONOTONIC, &TimeSpec);
        PRINT_LOG(VERBOSE, "Time to sent frame : %12ld.%9ld", TimeSpec.tv_sec, TimeSpec.tv_nsec);
    }
}
コード例 #23
0
void MPEG2TransportStreamTrickModeFilter::doGetNextFrame() {
  //  fprintf(stderr, "#####DGNF1\n");
  // If our client's buffer size is too small, then deliver
  // a 0-byte 'frame', to tell it to process all of the data that it has
  // already read, before asking for more data from us:
  if (fMaxSize < TRANSPORT_PACKET_SIZE) {
    fFrameSize = 0;
    afterGetting(this);
    return;
  }

  while (1) {
    // Get the next record from our index file.
    // This tells us the type of frame this data is, which Transport Stream packet
    // (from the input source) the data comes from, and where in the Transport Stream
    // packet it comes from:
    u_int8_t recordType;
    float recordPCR;
    Boolean endOfIndexFile = False;
    if (!fIndexFile->readIndexRecordValues(fNextIndexRecordNum,
					   fDesiredTSPacketNum, fDesiredDataOffset,
					   fDesiredDataSize, recordPCR,
					   recordType)) {
      // We ran off the end of the index file.  If we're not delivering a
      // pre-saved frame, then handle this the same way as if the
      // input Transport Stream source ended.
      if (fState != DELIVERING_SAVED_FRAME) {
	onSourceClosure1();
	return;
      }
      endOfIndexFile = True;
    } else if (!fHaveStarted) {
      fFirstPCR = recordPCR;
      fHaveStarted = True;
    }
    //    fprintf(stderr, "#####read index record %ld: ts %ld: %c, PCR %f\n", fNextIndexRecordNum, fDesiredTSPacketNum, isIFrameStart(recordType) ? 'I' : isNonIFrameStart(recordType) ? 'j' : 'x', recordPCR);
    fNextIndexRecordNum
      += (fState == DELIVERING_SAVED_FRAME) ? 1 : fDirection;

    // Handle this index record, depending on the record type and our current state:
    switch (fState) {
    case SKIPPING_FRAME:
    case SAVING_AND_DELIVERING_FRAME: {
      //      if (fState == SKIPPING_FRAME) fprintf(stderr, "\tSKIPPING_FRAME\n"); else fprintf(stderr, "\tSAVING_AND_DELIVERING_FRAME\n");//#####
      if (isIFrameStart(recordType)) {
	// Save a record of this frame:
	fSavedFrameIndexRecordStart = fNextIndexRecordNum - fDirection;
	fUseSavedFrameNextTime = True;
	//	fprintf(stderr, "\trecording\n");//#####
	if ((fFrameCount++)%fScale == 0 && fUseSavedFrameNextTime) {
	  // A frame is due now.
	  fFrameCount = 1; // reset to avoid overflow
	  if (fDirection > 0) {
	    // Begin delivering this frame, as we're scanning it:
	    fState = SAVING_AND_DELIVERING_FRAME;
	    //	    fprintf(stderr, "\tdelivering\n");//#####
	    fDesiredDataPCR = recordPCR; // use this frame's PCR
	    attemptDeliveryToClient();
	    return;
	  } else {
	    // Deliver this frame, then resume normal scanning:
	    // (This relies on the index records having begun with an I-frame.)
	    fState = DELIVERING_SAVED_FRAME;
	    fSavedSequentialIndexRecordNum = fNextIndexRecordNum;
	    fDesiredDataPCR = recordPCR;
	    // use this frame's (not the saved frame's) PCR
	    fNextIndexRecordNum = fSavedFrameIndexRecordStart;
	    //	    fprintf(stderr, "\tbeginning delivery of saved frame\n");//#####
	  }
	} else {
	  // No frame is needed now:
	  fState = SKIPPING_FRAME;
	}
      } else if (isNonIFrameStart(recordType)) {
	if ((fFrameCount++)%fScale == 0 && fUseSavedFrameNextTime) {
	  // A frame is due now, so begin delivering the one that we had saved:
	  // (This relies on the index records having begun with an I-frame.)
	  fFrameCount = 1; // reset to avoid overflow
	  fState = DELIVERING_SAVED_FRAME;
	  fSavedSequentialIndexRecordNum = fNextIndexRecordNum;
	  fDesiredDataPCR = recordPCR;
	  // use this frame's (not the saved frame's) PCR
	  fNextIndexRecordNum = fSavedFrameIndexRecordStart;
	  //	  fprintf(stderr, "\tbeginning delivery of saved frame\n");//#####
	} else {
	  // No frame is needed now:
	  fState = SKIPPING_FRAME;
	}
      } else {
	// Not the start of a frame, but deliver it, if it's needed:
	if (fState == SAVING_AND_DELIVERING_FRAME) {
	  //	  fprintf(stderr, "\tdelivering\n");//#####
	  fDesiredDataPCR = recordPCR; // use this frame's PCR
	  attemptDeliveryToClient();
	  return;
	}
      }
      break;
    }
    case DELIVERING_SAVED_FRAME: {
      //      fprintf(stderr, "\tDELIVERING_SAVED_FRAME\n");//#####
      if (endOfIndexFile
	  || (isIFrameStart(recordType)
	      && fNextIndexRecordNum-1 != fSavedFrameIndexRecordStart)
	  || isNonIFrameStart(recordType)) {
	//	fprintf(stderr, "\tended delivery of saved frame\n");//#####
	// We've reached the end of the saved frame, so revert to the
	// original sequence of index records:
	fNextIndexRecordNum = fSavedSequentialIndexRecordNum;
	fUseSavedFrameNextTime = KEEP_ORIGINAL_FRAME_RATE;
	fState = SKIPPING_FRAME;
      } else {
	// Continue delivering:
	//	fprintf(stderr, "\tdelivering\n");//#####
	attemptDeliveryToClient();
	return;
      }
      break;
    }
    }
  }
}
コード例 #24
0
void EncoderVideoSource::MPEG4_doGetNextFrame()
{
	// Read a new frame into fBuffer, YUV420 format is assumed
	int size = (fWidth*fHeight*3/2);
	int videoType;
	unsigned int intervalTime = 0;

	Debug(ckite_log_message, "fMaxSize = %d\n", fMaxSize);
	intervalTime = computePresentationTime();

	if (strcmp(mediaType, "store") == 0)
	{
		if (fWidth == 720 && fHeight == 576)
		{
			videoType = getLivehdFrame();
		}
		else
		{
		  getStoreVideoFrame( fp, 0, (unsigned char *)fBuffer, &size, &videoType, false);
		}
	}
	else
	{
		if (fWidth == 720 && fHeight == 576)
		{
			videoType = getLivehdFrame();
		}
		else
		{  
			Debug(ckite_log_message, "cif video is ready. size is of = %d\n", size);
			getRealVideoFrame(fChannel, fp, mediaType, fBuffer, &size, &videoType);
		}
	}
	if(size <= 0)
		 size = 0;

	// Encode the frame
	int ret = 0;
	
#if ENC_SOUCE
	if (fEncoderHandle != NULL)
	{
			Debug(ckite_log_message, "xvid_encode_frame fWidth = %d, fHeight = %d\n", fWidth, fHeight);
			ret = xvid_encode_frame(fEncoderHandle, fBuffer, fWidth, fHeight, fTo, intervalTime);
			if (ret > 0)
			{
				Debug(ckite_log_message, "Frame length %d, header %02x, %02x, %02x, %02x\n", ret, fTo[0], fTo[1], fTo[2], fTo[3]);
				fFrameSize = ret;
			}
	}
#else
	if (videoType == VIDEO_MPEG4 || videoType == VIDEO_H264)
	{
		if (size != 0)
			memcpy(fTo, fBuffer, size);
		fFrameSize = size;
	}
#endif
	fPictureEndMarker = True;
	afterGetting(this);
}
コード例 #25
0
void DVVideoStreamFramer::afterGettingFrame1(unsigned frameSize, unsigned numTruncatedBytes) {
  if (fOurProfile == NULL && frameSize >= DV_SAVED_INITIAL_BLOCKS_SIZE) {
    // (Try to) parse this data enough to figure out its profile.
    // We assume that the data begins on a (80-byte) block boundary, but not necessarily on a (150-block) sequence boundary.
    // We therefore scan each 80-byte block, until we find the 6-block header that begins a sequence:
    u_int8_t const* data = (fTo == NULL) ? fSavedInitialBlocks : fTo;
    for (u_int8_t const* ptr = data; ptr + 6*DV_DIF_BLOCK_SIZE <= &data[DV_SAVED_INITIAL_BLOCKS_SIZE]; ptr += DV_DIF_BLOCK_SIZE) {
      // Check whether "ptr" points to an appropriate header:
      u_int8_t const sectionHeader = DVSectionId(0);
      u_int8_t const sectionVAUX = DVSectionId(5);
      u_int8_t const packHeaderNum = DVData(0,0);

      if (sectionHeader == DV_SECTION_HEADER
	  && (packHeaderNum == DV_PACK_HEADER_10 || packHeaderNum == DV_PACK_HEADER_12)
	  && (sectionVAUX >= DV_SECTION_VAUX_MIN && sectionVAUX <= DV_SECTION_VAUX_MAX)) {
	// This data begins a sequence; look up the DV profile from this:
	u_int8_t const apt = DVData(0,1)&0x07;
	u_int8_t const sType = DVData(5,48)&0x1F;
	u_int8_t const sequenceCount = (packHeaderNum == DV_PACK_HEADER_10) ? 10 : 12;

	// Use these three parameters (apt, sType, sequenceCount) to look up the DV profile:
	for (DVVideoProfile const* profile = profiles; profile->name != NULL; ++profile) {
	  if (profile->apt == apt && profile->sType == sType && profile->sequenceCount == sequenceCount) {
	    fOurProfile = profile;
	    break;
	  }
	}
	break; // because we found a correct sequence header (even if we don't happen to define a profile for it)
      }
    }
  }

  if (fTo != NULL) { // There is a downstream object; complete delivery to it (or read more data, if necessary)
    unsigned const totFrameSize
      = fOurProfile != NULL ? ((DVVideoProfile const*)fOurProfile)->dvFrameSize : DV_SMALLEST_POSSIBLE_FRAME_SIZE;
    fFrameSize += frameSize;
    fTo += frameSize;

    if (fFrameSize < totFrameSize && fFrameSize < fMaxSize && numTruncatedBytes == 0) {
      // We have more data to deliver; get it now:
      getAndDeliverData();
    } else {
      // We're done delivering this DV frame (but check for truncation):
      fNumTruncatedBytes = totFrameSize - fFrameSize;

      if (fOurProfile != NULL) {
	// Also set the presentation time, and increment it for next time,
	// based on the length of this frame:
	fPresentationTime = fNextFramePresentationTime;

	DVVideoProfile const* ourProfile =(DVVideoProfile const*)fOurProfile;
	double durationInMicroseconds = (fFrameSize*ourProfile->frameDuration)/ourProfile->dvFrameSize;
	fDurationInMicroseconds = (unsigned)durationInMicroseconds;
	fNextFramePresentationTime.tv_usec += fDurationInMicroseconds;
	fNextFramePresentationTime.tv_sec += fNextFramePresentationTime.tv_usec/MILLION;
	fNextFramePresentationTime.tv_usec %= MILLION;
      }

      afterGetting(this);
    }
  } else {
    // We read data into our special buffer; signal that it has arrived:
    fInitialBlocksPresent = True;
  }
}
コード例 #26
0
void MultiFramedRTPSource::doGetNextFrame1() {
  while (fNeedDelivery) {
    // If we already have packet data available, then deliver it now.
    Boolean packetLossPrecededThis;
    BufferedPacket* nextPacket
      = fReorderingBuffer->getNextCompletedPacket(packetLossPrecededThis);
    if (nextPacket == NULL) break;

    fNeedDelivery = False;

    if (nextPacket->useCount() == 0) {
      // Before using the packet, check whether it has a special header
      // that needs to be processed:
      unsigned specialHeaderSize;
      if (!processSpecialHeader(nextPacket, specialHeaderSize)) {
	// Something's wrong with the header; reject the packet:
	fReorderingBuffer->releaseUsedPacket(nextPacket);
	fNeedDelivery = True;
	continue;
      }
      nextPacket->skip(specialHeaderSize);
    }

    // Check whether we're part of a multi-packet frame, and whether
    // there was packet loss that would render this packet unusable:
    if (fCurrentPacketBeginsFrame) {
      if (packetLossPrecededThis || fPacketLossInFragmentedFrame) {
	// We didn't get all of the previous frame.
	// Forget any data that we used from it:
	fTo = fSavedTo; fMaxSize = fSavedMaxSize;
	fFrameSize = 0;
      }
      fPacketLossInFragmentedFrame = False;
    } else if (packetLossPrecededThis) {
      // We're in a multi-packet frame, with preceding packet loss
      fPacketLossInFragmentedFrame = True;
    }
    if (fPacketLossInFragmentedFrame) {
      // This packet is unusable; reject it:
      fReorderingBuffer->releaseUsedPacket(nextPacket);
      fNeedDelivery = True;
      continue;
    }

    // The packet is usable. Deliver all or part of it to our caller:
    unsigned frameSize;
    nextPacket->use(fTo, fMaxSize, frameSize, fNumTruncatedBytes,
		    fCurPacketRTPSeqNum, fCurPacketRTPTimestamp,
		    fPresentationTime, fCurPacketHasBeenSynchronizedUsingRTCP,
		    fCurPacketMarkerBit);
    fFrameSize += frameSize;

    if (!nextPacket->hasUsableData()) {
      // We're completely done with this packet now
      fReorderingBuffer->releaseUsedPacket(nextPacket);
    }

    if (fCurrentPacketCompletesFrame && fFrameSize > 0) {
      // We have all the data that the client wants.
      if (fNumTruncatedBytes > 0) {
	envir() << "MultiFramedRTPSource::doGetNextFrame1(): The total received frame size exceeds the client's buffer size ("
		<< fSavedMaxSize << ").  "
		<< fNumTruncatedBytes << " bytes of trailing data will be dropped!\n";
      }
      // Call our own 'after getting' function, so that the downstream object can consume the data:
      if (fReorderingBuffer->isEmpty()) {
	// Common case optimization: There are no more queued incoming packets, so this code will not get
	// executed again without having first returned to the event loop.  Call our 'after getting' function
	// directly, because there's no risk of a long chain of recursion (and thus stack overflow):
	afterGetting(this);
      } else {
	// Special case: Call our 'after getting' function via the event loop.
	nextTask() = envir().taskScheduler().scheduleDelayedTask(0,
								 (TaskFunc*)FramedSource::afterGetting, this);
      }
    } else {
      // This packet contained fragmented data, and does not complete
      // the data that the client wants.  Keep getting data:
      fTo += frameSize; fMaxSize -= frameSize;
      fNeedDelivery = True;
    }
  }
}
コード例 #27
0
// Note: We should change the following to use asynchronous file reading, #####
// as we now do with ByteStreamFileSource. #####
void WAVAudioFileSource::doGetNextFrame() {
  if (feof(fFid) || ferror(fFid) || (fLimitNumBytesToStream && fNumBytesToStream == 0)) {
    handleClosure(this);
    return;
  }

  // Try to read as many bytes as will fit in the buffer provided (or "fPreferredFrameSize" if less)
  if (fLimitNumBytesToStream && fNumBytesToStream < fMaxSize) {
    fMaxSize = fNumBytesToStream;
  }
  if (fPreferredFrameSize < fMaxSize) {
    fMaxSize = fPreferredFrameSize;
  }
  unsigned bytesPerSample = (fNumChannels*fBitsPerSample)/8;
  if (bytesPerSample == 0) bytesPerSample = 1; // because we can't read less than a byte at a time
  unsigned bytesToRead = fMaxSize - fMaxSize%bytesPerSample;
  if (fScaleFactor == 1) {
    // Common case - read samples in bulk:
    fFrameSize = fread(fTo, 1, bytesToRead, fFid);
    fNumBytesToStream -= fFrameSize;
  } else {
    // We read every 'fScaleFactor'th sample:
    fFrameSize = 0;
    while (bytesToRead > 0) {
      size_t bytesRead = fread(fTo, 1, bytesPerSample, fFid);
      if (bytesRead <= 0) break;
      fTo += bytesRead;
      fFrameSize += bytesRead;
      fNumBytesToStream -= bytesRead;
      bytesToRead -= bytesRead;

      // Seek to the appropriate place for the next sample:
      fseek(fFid, (fScaleFactor-1)*bytesPerSample, SEEK_CUR);
    }
  }

  // Set the 'presentation time' and 'duration' of this frame:
  if (fPresentationTime.tv_sec == 0 && fPresentationTime.tv_usec == 0) {
    // This is the first frame, so use the current time:
    gettimeofday(&fPresentationTime, NULL);
  } else {
    // Increment by the play time of the previous data:
    unsigned uSeconds	= fPresentationTime.tv_usec + fLastPlayTime;
    fPresentationTime.tv_sec += uSeconds/1000000;
    fPresentationTime.tv_usec = uSeconds%1000000;
  }

  // Remember the play time of this data:
  fDurationInMicroseconds = fLastPlayTime
    = (unsigned)((fPlayTimePerSample*fFrameSize)/bytesPerSample);

  // Switch to another task, and inform the reader that he has data:
#if defined(__WIN32__) || defined(_WIN32)
  // HACK: One of our applications that uses this source uses an
  // implementation of scheduleDelayedTask() that performs very badly
  // (chewing up lots of CPU time, apparently polling) on Windows.
  // Until this is fixed, we just call our "afterGetting()" function
  // directly.  This avoids infinite recursion, as long as our sink
  // is discontinuous, which is the case for the RTP sink that
  // this application uses. #####
  afterGetting(this);
#else
  nextTask() = envir().taskScheduler().scheduleDelayedTask(0,
			(TaskFunc*)FramedSource::afterGetting, this);
#endif
}