Example #1
0
void AMRAudioRTPSink::doSpecialFrameHandling(unsigned fragmentationOffset,
					     unsigned char* frameStart,
					     unsigned numBytesInFrame,
					     struct timeval framePresentationTime,
					     unsigned numRemainingBytes) {
  // If this is the 1st frame in the 1st packet, set the RTP 'M' (marker)
  // bit (because this is considered the start of a talk spurt):
  if (isFirstPacket() && isFirstFrameInPacket()) {
    setMarkerBit();
  }

  // If this is the first frame in the packet, set the 1-byte payload
  // header (using CMR 15)
  if (isFirstFrameInPacket()) {
    u_int8_t payloadHeader = 0xF0;
    setSpecialHeaderBytes(&payloadHeader, 1, 0);
  }

  // Set the TOC field for the current frame, based on the "FT" and "Q"
  // values from our source:
  AMRAudioSource* amrSource = (AMRAudioSource*)fSource;
  if (amrSource == NULL) return; // sanity check

  u_int8_t toc = amrSource->lastFrameHeader();
  // Clear the "F" bit, because we're the last frame in this packet: #####
  toc &=~ 0x80;
  setSpecialHeaderBytes(&toc, 1, 1+numFramesUsedSoFar());

  // Important: Also call our base class's doSpecialFrameHandling(),
  // to set the packet's timestamp:
  MultiFramedRTPSink::doSpecialFrameHandling(fragmentationOffset,
                                             frameStart, numBytesInFrame,
                                             framePresentationTime,
                                             numRemainingBytes);
}
void LiveAMRAudioRTPSink::doSpecialFrameHandling( unsigned fragmentationOffset, unsigned char* frameStart, unsigned numBytesInFrame, struct timeval frameTimestamp, unsigned numRemainingBytes )
{
#define AUDIO_FRAMES_PER_RTP_FRAME 5

  // If this is the 1st frame in the 1st packet, set the RTP 'M' (marker)
  // bit (because this is considered the start of a talk spurt):
  if (isFirstPacket() && isFirstFrameInPacket()) {
    setMarkerBit();
  }

  // If this is the first frame in the packet, set the 1-byte payload
  // header (using CMR 15)
  if (isFirstFrameInPacket()) {
    u_int8_t payloadHeader = 0xF0;
    setSpecialHeaderBytes(&payloadHeader, 1, 0);
  }

  // Set the TOC field for the current frame, based on the "FT" and "Q"
  // values from our source:
  LiveAMRAudioDeviceSource* amrSource = (LiveAMRAudioDeviceSource*)fSource;
  if (amrSource == NULL) return; // sanity check

  u_int8_t toc = amrSource->lastFrameHeader();
  if (numFramesUsedSoFar() < AUDIO_FRAMES_PER_RTP_FRAME - 1) {
	  toc |= 0x80;
  }
  else {
	  toc &=~ 0x80;
  }
  setSpecialHeaderBytes(&toc, 1, 1+numFramesUsedSoFar());
  
  // Removing toc in custom AMR device source
  // update packet toc?
  
  // Important: Also call our base class's doSpecialFrameHandling(),
  // to set the packet's timestamp:
  MultiFramedRTPSink::doSpecialFrameHandling(fragmentationOffset,
    frameStart, numBytesInFrame,
    frameTimestamp,
    numRemainingBytes);
}
void MPEG1or2AudioRTPSink::doSpecialFrameHandling(unsigned fragmentationOffset,
					      unsigned char* frameStart,
					      unsigned numBytesInFrame,
					      struct timeval frameTimestamp,
					      unsigned numRemainingBytes) {
  // If this is the 1st frame in the 1st packet, set the RTP 'M' (marker)
  // bit (because this is considered the start of a talk spurt):
  if (isFirstPacket() && isFirstFrameInPacket()) {
    setMarkerBit();
  }

  // If this is the first frame in the packet, set the lower half of the
  // audio-specific header (to the "fragmentationOffset"):
  if (isFirstFrameInPacket()) {
    setSpecialHeaderWord(fragmentationOffset&0xFFFF);
  }

  // Important: Also call our base class's doSpecialFrameHandling(),
  // to set the packet's timestamp:
  MultiFramedRTPSink::doSpecialFrameHandling(fragmentationOffset,
					     frameStart, numBytesInFrame,
					     frameTimestamp,
					     numRemainingBytes);
}