예제 #1
0
void AMRAudioEncoder
::afterGettingFrame1(unsigned frameSize, unsigned numTruncatedBytes,
                     struct timeval presentationTime, unsigned durationInMicroseconds) {
  // Adjust presentationTime to allow for the data that's still in our buffer:
  int uSecondsAdjustment
    = (int)(fInputSampleBufferBytesFull*fMicrosecondsPerByte);
  presentationTime.tv_sec -= uSecondsAdjustment/MILLION;
  uSecondsAdjustment %= MILLION;
  if (presentationTime.tv_usec < uSecondsAdjustment) {
    --presentationTime.tv_sec;
    presentationTime.tv_usec += MILLION;
  }
  presentationTime.tv_usec -= uSecondsAdjustment;

  // Don't allow the presentation time to decrease:
  if (presentationTime.tv_sec > fLastInputDataPresentationTime.tv_sec ||
      (presentationTime.tv_sec == fLastInputDataPresentationTime.tv_sec &&
       presentationTime.tv_usec > fLastInputDataPresentationTime.tv_usec)) {
    fLastInputDataPresentationTime = presentationTime;
  }
  fInputSampleBufferBytesFull += frameSize;  

  // Try again to encode and deliver data to the sink:
  doGetNextFrame();
}
예제 #2
0
void FramedSource::getNextFrame(unsigned char* to, unsigned maxSize,
				afterGettingFunc* afterGettingFunc,
				void* afterGettingClientData,
				onCloseFunc* onCloseFunc,
				void* onCloseClientData) {
  // Make sure we're not already being read:
  if (fIsCurrentlyAwaitingData) {
    envir() << "FramedSource[" << this << "]::getNextFrame(): attempting to read more than once at the same time!\n";
    envir().internalError();
  }

  liveLogInfo(" FramedSource::getNextFrame   \n");

  fTo = to;
  fMaxSize = maxSize;
  fNumTruncatedBytes = 0; // by default; could be changed by doGetNextFrame()
  fDurationInMicroseconds = 0; // by default; could be changed by doGetNextFrame()
  fAfterGettingFunc = afterGettingFunc;
  fAfterGettingClientData = afterGettingClientData;
  fOnCloseFunc = onCloseFunc;
  fOnCloseClientData = onCloseClientData;
  fIsCurrentlyAwaitingData = True;

  doGetNextFrame();
}
예제 #3
0
void AMRDeinterleaver
::afterGettingFrame1(unsigned frameSize, struct timeval presentationTime) {
  RawAMRRTPSource* source = (RawAMRRTPSource*)fInputSource;

  // First, put the frame into our deinterleaving buffer:
  fDeinterleavingBuffer->deliverIncomingFrame(frameSize, source, presentationTime);

  // Then, try delivering a frame to the client (if he wants one):
  if (fNeedAFrame) doGetNextFrame();
}
void H264FUAFragmenter::afterGettingFrame1(unsigned frameSize,
					   unsigned numTruncatedBytes,
					   struct timeval presentationTime,
					   unsigned durationInMicroseconds) {
  fNumValidDataBytes += frameSize;
  fSaveNumTruncatedBytes = numTruncatedBytes;
  fPresentationTime = presentationTime;
  fDurationInMicroseconds = durationInMicroseconds;

  // Deliver data to the client:
  doGetNextFrame();
}
예제 #5
0
void ByteStreamMultiFileSource::onSourceClosure1() {
  // This routine was called because the currently-read source was closed
  // (probably due to EOF).  Close this source down, and move to the
  // next one:
  ByteStreamFileSource*& source
    = fSourceArray[fCurrentlyReadSourceNumber++];
  Medium::close(source);
  source = NULL;

  // Try reading again:
  doGetNextFrame();
}
void QCELPDeinterleaver
::afterGettingFrame1(unsigned frameSize, struct timeval presentationTime) {
  RawQCELPRTPSource* source = (RawQCELPRTPSource*)fInputSource;

  // First, put the frame into our deinterleaving buffer:
  fDeinterleavingBuffer
    ->deliverIncomingFrame(frameSize, source->interleaveL(),
			   source->interleaveN(), source->frameIndex(),
			   source->curPacketRTPSeqNum(),
			   presentationTime);

  // Then, try delivering a frame to the client (if he wants one):
  if (fNeedAFrame) doGetNextFrame();
}
void MPEG2TransportStreamAccumulator
::afterGettingFrame1(unsigned frameSize,
		     unsigned numTruncatedBytes,
		     struct timeval presentationTime,
		     unsigned durationInMicroseconds) {
  if (fNumBytesGathered == 0) { // this is the first frame of the new chunk
    fPresentationTime = presentationTime;
    fDurationInMicroseconds = 0;
  }
  fNumBytesGathered += frameSize;
  fTo += frameSize;
  fMaxSize -= frameSize;
  fDurationInMicroseconds += durationInMicroseconds;

  // Try again to complete delivery:
  doGetNextFrame();
}
예제 #8
0
파일: MP3ADU.cpp 프로젝트: 3660628/live555
Boolean ADUFromMP3Source::doGetNextFrame1() {
  // First, check whether we have enough previously-read data to output an
  // ADU for the last-read MP3 frame:
  unsigned tailIndex;
  Segment* tailSeg;
  Boolean needMoreData;

  if (fSegments->isEmpty()) {
    needMoreData = True;
    tailSeg = NULL; tailIndex = 0; // unneeded, but stops compiler warnings
  } else {
    tailIndex = SegmentQueue::prevIndex(fSegments->nextFreeIndex());
    tailSeg = &(fSegments->s[tailIndex]);

    needMoreData
	  = fTotalDataSizeBeforePreviousRead < tailSeg->backpointer // bp points back too far
      || tailSeg->backpointer + tailSeg->dataHere() < tailSeg->aduSize; // not enough data
  }

  if (needMoreData) {
    // We don't have enough data to output an ADU from the last-read MP3
    // frame, so need to read another one and try again:
    doGetNextFrame();
    return True;
  }

  // Output an ADU from the tail segment:
  fFrameSize = tailSeg->headerSize+tailSeg->sideInfoSize+tailSeg->aduSize;
  fPresentationTime = tailSeg->presentationTime;
  fDurationInMicroseconds = tailSeg->durationInMicroseconds;
  unsigned descriptorSize
    = fIncludeADUdescriptors ? ADUdescriptor::computeSize(fFrameSize) : 0;
#ifdef DEBUG
  fprintf(stderr, "m->a:outputting ADU %d<-%d, nbr:%d, sis:%d, dh:%d, (descriptor size: %d)\n", tailSeg->aduSize, tailSeg->backpointer, fFrameSize, tailSeg->sideInfoSize, tailSeg->dataHere(), descriptorSize);
#endif
  if (descriptorSize + fFrameSize > fMaxSize) {
    envir() << "ADUFromMP3Source::doGetNextFrame1(): not enough room ("
	    << descriptorSize + fFrameSize << ">"
	    << fMaxSize << ")\n";
    fFrameSize = 0;
    return False;
  }

  unsigned char* toPtr = fTo;
  // output the ADU descriptor:
  if (fIncludeADUdescriptors) {
    fFrameSize += ADUdescriptor::generateDescriptor(toPtr, fFrameSize);
  }

  // output header and side info:
  memmove(toPtr, tailSeg->dataStart(),
	  tailSeg->headerSize + tailSeg->sideInfoSize);
  toPtr += tailSeg->headerSize + tailSeg->sideInfoSize;

  // go back to the frame that contains the start of our data:
  unsigned offset = 0;
  unsigned i = tailIndex;
  unsigned prevBytes = tailSeg->backpointer;
  while (prevBytes > 0) {
    i = SegmentQueue::prevIndex(i);
    unsigned dataHere = fSegments->s[i].dataHere();
    if (dataHere < prevBytes) {
      prevBytes -= dataHere;
    } else {
      offset = dataHere - prevBytes;
      break;
    }
  }

  // dequeue any segments that we no longer need:
  while (fSegments->headIndex() != i) {
    fSegments->dequeue(); // we're done with it
  }

  unsigned bytesToUse = tailSeg->aduSize;
  while (bytesToUse > 0) {
    Segment& seg = fSegments->s[i];
    unsigned char* fromPtr
      = &seg.dataStart()[seg.headerSize + seg.sideInfoSize + offset];
    unsigned dataHere = seg.dataHere() - offset;
    unsigned bytesUsedHere = dataHere < bytesToUse ? dataHere : bytesToUse;
    memmove(toPtr, fromPtr, bytesUsedHere);
    bytesToUse -= bytesUsedHere;
    toPtr += bytesUsedHere;
    offset = 0;
    i = SegmentQueue::nextIndex(i);
  }


  if (fFrameCounter++%fScale == 0) {
    // Call our own 'after getting' function.  Because we're not a 'leaf'
    // source, we can call this directly, without risking infinite recursion.
    afterGetting(this);
  } else {
    // Don't use this frame; get another one:
    doGetNextFrame();
  }

  return True;
}
void MPEG1or2VideoStreamDiscreteFramer
::afterGettingFrame1(unsigned frameSize, unsigned numTruncatedBytes,
                     struct timeval presentationTime,
                     unsigned durationInMicroseconds) {
  // Check that the first 4 bytes are a system code:
  if (frameSize >= 4 && fTo[0] == 0 && fTo[1] == 0 && fTo[2] == 1) {
    fPictureEndMarker = True; // Assume that we have a complete 'picture' here

    u_int8_t nextCode = fTo[3];
    if (nextCode == 0xB3) { // VIDEO_SEQUENCE_HEADER_START_CODE
      // Note the following 'frame rate' code:
      if (frameSize >= 8) {
	u_int8_t frame_rate_code = fTo[7]&0x0F;
	fFrameRate = frameRateFromCode[frame_rate_code];
      }

      // Also, save away this Video Sequence Header, in case we need it later:
      // First, figure out how big it is:
      unsigned vshSize;
      for (vshSize = 4; vshSize < frameSize-3; ++vshSize) {
	if (fTo[vshSize] == 0 && fTo[vshSize+1] == 0 && fTo[vshSize+2] == 1 &&
	    (fTo[vshSize+3] == 0xB8 || fTo[vshSize+3] == 0x00)) break;
      }
      if (vshSize == frameSize-3) vshSize = frameSize; // There was nothing else following it
      if (vshSize <= sizeof fSavedVSHBuffer) {
	memmove(fSavedVSHBuffer, fTo, vshSize);
	fSavedVSHSize = vshSize;
	fSavedVSHTimestamp
	  = presentationTime.tv_sec + presentationTime.tv_usec/(double)MILLION;
      }
    } else if (nextCode == 0xB8) { // GROUP_START_CODE
      // If necessary, insert a saved Video Sequence Header in front of this:
      double pts = presentationTime.tv_sec + presentationTime.tv_usec/(double)MILLION;
      if (pts > fSavedVSHTimestamp + fVSHPeriod &&
	  fSavedVSHSize + frameSize <= fMaxSize) {
	memmove(&fTo[fSavedVSHSize], &fTo[0], frameSize); // make room for the header
	memmove(&fTo[0], fSavedVSHBuffer, fSavedVSHSize); // insert it
	frameSize += fSavedVSHSize;
	fSavedVSHTimestamp = pts;
      }
    }

    unsigned i = 3;
    if (nextCode == 0xB3 /*VIDEO_SEQUENCE_HEADER_START_CODE*/ ||
	nextCode == 0xB8 /*GROUP_START_CODE*/) {
      // Skip to the following PICTURE_START_CODE (if any):
      for (i += 4; i < frameSize; ++i) {
	if (fTo[i] == 0x00 /*PICTURE_START_CODE*/
	    && fTo[i-1] == 1 && fTo[i-2] == 0 && fTo[i-3] == 0) {
	  nextCode = fTo[i];
	  break;
	}
      }
    }

    if (nextCode == 0x00 /*PICTURE_START_CODE*/ && i+2 < frameSize) {
      // Get the 'temporal_reference' and 'picture_coding_type' from the
      // following 2 bytes:
      ++i;
      unsigned short temporal_reference = (fTo[i]<<2)|(fTo[i+1]>>6);
      unsigned char picture_coding_type = (fTo[i+1]&0x38)>>3;

      // If this is not an "I" frame, but we were asked for "I" frames only, then try again:
      if (fIFramesOnly && picture_coding_type != 1) {
	doGetNextFrame();
	return;
      }

      // If this is a "B" frame, then we have to tweak "presentationTime":
      if (picture_coding_type == 3/*B*/
	  && (fLastNonBFramePresentationTime.tv_usec > 0 ||
	      fLastNonBFramePresentationTime.tv_sec > 0)) {
	int trIncrement
            = fLastNonBFrameTemporal_reference - temporal_reference;
	if (trIncrement < 0) trIncrement += 1024; // field is 10 bits in size

	unsigned usIncrement = fFrameRate == 0.0 ? 0
	  : (unsigned)((trIncrement*MILLION)/fFrameRate);
	unsigned secondsToSubtract = usIncrement/MILLION;
	unsigned uSecondsToSubtract = usIncrement%MILLION;

	presentationTime = fLastNonBFramePresentationTime;
	if ((unsigned)presentationTime.tv_usec < uSecondsToSubtract) {
	  presentationTime.tv_usec += MILLION;
	  if (presentationTime.tv_sec > 0) --presentationTime.tv_sec;
	}
	presentationTime.tv_usec -= uSecondsToSubtract;
	if ((unsigned)presentationTime.tv_sec > secondsToSubtract) {
	  presentationTime.tv_sec -= secondsToSubtract;
	} else {
	  presentationTime.tv_sec = presentationTime.tv_usec = 0;
	}
      } else {
	fLastNonBFramePresentationTime = presentationTime;
	fLastNonBFrameTemporal_reference = temporal_reference;
      }
    }