Beispiel #1
0
void TCPStreamSink::processBuffer() {
  // First, try writing data to our output socket, if we can:
  if (fOutputSocketIsWritable && numUnwrittenBytes() > 0) {
    int numBytesWritten
      = send(fOutputSocketNum, (const char*)&fBuffer[fUnwrittenBytesStart], numUnwrittenBytes(), 0);
    if (numBytesWritten < (int)numUnwrittenBytes()) {
      // The output socket is no longer writable.  Set a handler to be called when it becomes writable again.
      fOutputSocketIsWritable = False;
      envir().taskScheduler().setBackgroundHandling(fOutputSocketNum, SOCKET_WRITABLE, socketWritableHandler, this);
    }
    if (numBytesWritten > 0) {
      // We wrote at least some of our data.  Update our buffer pointers:
      fUnwrittenBytesStart += numBytesWritten;
      if (fUnwrittenBytesStart > fUnwrittenBytesEnd) fUnwrittenBytesStart = fUnwrittenBytesEnd; // sanity check
      if (fUnwrittenBytesStart == fUnwrittenBytesEnd && (!fInputSourceIsOpen || !fSource->isCurrentlyAwaitingData())) {
	fUnwrittenBytesStart = fUnwrittenBytesEnd = 0; // reset the buffer to empty
      }
    }
  }

  // Then, read from our input source, if we can (& we're not already reading from it):
  if (fInputSourceIsOpen && freeBufferSpace() >= TCP_STREAM_SINK_MIN_READ_SIZE && !fSource->isCurrentlyAwaitingData()) {
    fSource->getNextFrame(&fBuffer[fUnwrittenBytesEnd], freeBufferSpace(), afterGettingFrame, this, ourOnSourceClosure, this);
  }

  if (!fInputSourceIsOpen && numUnwrittenBytes() == 0) {
    // We're now done:
    onSourceClosure(this);
  }
}
Beispiel #2
0
void HTTPSink::ourOnSourceClosure(void* clientData) {
  // No more input frames - we're done:
  HTTPSink* sink = (HTTPSink*) clientData;
  ::closeSocket(sink->fClientSocket);
  sink->fClientSocket = -1;
  onSourceClosure(sink);
}
void FileSink::afterGettingFrame(unsigned frameSize,
				 unsigned numTruncatedBytes,
				 struct timeval presentationTime) {
  if (numTruncatedBytes > 0) {
    envir() << "FileSink::afterGettingFrame(): The input frame data was too large for our buffer size ("
	    << fBufferSize << ").  "
            << numTruncatedBytes << " bytes of trailing data was dropped!  Correct this by increasing the \"bufferSize\" parameter in the \"createNew()\" call to at least "
            << fBufferSize + numTruncatedBytes << "\n";
  }
  addData(fBuffer, frameSize, presentationTime);

  if (fOutFid == NULL || fflush(fOutFid) == EOF) {
    // The output file has closed.  Handle this the same way as if the input source had closed:
    if (fSource != NULL) fSource->stopGettingFrames();
    onSourceClosure();
    return;
  }

  if (fPerFrameFileNameBuffer != NULL) {
    if (fOutFid != NULL) { fclose(fOutFid); fOutFid = NULL; }
  }

  // Then try getting the next frame:
  continuePlaying();
}
void MFSD_DummySink::afterGettingFrame1() {
  if (fReturnFirstSeenCode && fOurDemux.lastSeenSCR().isValid) {
    // We were asked to return the first SCR that we saw, and we've seen one,
    // so we're done.  (Handle this as if the input source had closed.)
    onSourceClosure(this);
    return;
  }

  continuePlaying();
}
Beispiel #5
0
void DefaultSink::onAfterGettingFrame(unsigned frame_size,
                                      unsigned truncated_bytes,
                                      struct timeval const & presentation_time,
                                      unsigned UNUSED_PARAM(duration_in_microseconds))
{
    crLogIfD(_verbose, getFrameInfo(frame_size, truncated_bytes, presentation_time));

    if (!_have_written_first_frame) {
        // If we have NAL units encoded in "sprop parameter strings",
        // prepend these to the file:

        for (auto & param : _sprop_parameter_sets) {
            unsigned int sprop_records_size = 0;

            // Returns the binary value of each 'parameter set' specified in a "sprop-parameter-sets" string
            // (in the SDP description for a H.264/RTP stream).
            //
            // The value is returned as an array (length "numSPropRecords") of "SPropRecord"s.
            // This array is dynamically allocated by this routine, and must be delete[]d by the caller.
            SPropRecord * sprop_records = parseSPropParameterSets(param.data(), sprop_records_size);
            for (unsigned int i = 0; i < sprop_records_size; ++i) {
                write(NAL_START_CODE, sizeof(NAL_START_CODE), presentation_time);
                write(sprop_records[i].sPropBytes, sprop_records[i].sPropLength, presentation_time);
            }
            delete [] sprop_records;
        }
        _have_written_first_frame = true;
    }

    if (truncated_bytes > 0) {
        auto const BUFFER_SIZE = _receive_buffer.size();
        crLogW("DefaultSink::onAfterGettingFrame() The input frame data was too large for our buffer size ({})"
               "{}bytes of trailing data was dropped!"
               "Correct this by increasing the 'bufferSize' parameter in the 'createNew()' call to at least {}",
               BUFFER_SIZE, truncated_bytes, BUFFER_SIZE + truncated_bytes);
    }

    // Write the input data to the file, with the start code in front:
    write(NAL_START_CODE, sizeof(NAL_START_CODE), presentation_time);
    write(_receive_buffer.data(), frame_size, presentation_time);

    if (isClosed()) {
        // The output file has closed.
        // Handle this the same way as if the input source had closed:
        if (fSource != nullptr) {
            fSource->stopGettingFrames();
        }
        onSourceClosure();
        return;
    }

    // Then continue, to request the next frame of data:
    continuePlaying();
}
void MultiFramedRTPSink::sendPacketIfNecessary() {
  if (fNumFramesUsedSoFar > 0) {
    // Send the packet:
#ifdef TEST_LOSS
    if ((our_random()%10) != 0) // simulate 10% packet loss #####
#endif
      if (!fRTPInterface.sendPacket(fOutBuf->packet(), fOutBuf->curPacketSize())) {
	// if failure handler has been specified, call it
	if (fOnSendErrorFunc != NULL) (*fOnSendErrorFunc)(fOnSendErrorData);
      }
    ++fPacketCount;
    fTotalOctetCount += fOutBuf->curPacketSize();
    fOctetCount += fOutBuf->curPacketSize()
      - rtpHeaderSize - fSpecialHeaderSize - fTotalFrameSpecificHeaderSizes;

    ++fSeqNo; // for next time
  }

  if (fOutBuf->haveOverflowData()
      && fOutBuf->totalBytesAvailable() > fOutBuf->totalBufferSize()/2) {
    // Efficiency hack: Reset the packet start pointer to just in front of
    // the overflow data (allowing for the RTP header and special headers),
    // so that we probably don't have to "memmove()" the overflow data
    // into place when building the next packet:
    unsigned newPacketStart = fOutBuf->curPacketSize()
      - (rtpHeaderSize + fSpecialHeaderSize + frameSpecificHeaderSize());
    fOutBuf->adjustPacketStart(newPacketStart);
  } else {
    // Normal case: Reset the packet start pointer back to the start:
    fOutBuf->resetPacketStart();
  }
  fOutBuf->resetOffset();
  fNumFramesUsedSoFar = 0;

  if (fNoFramesLeft) {
    // We're done:
    onSourceClosure();
  } else {
    // We have more frames left to send.  Figure out when the next frame
    // is due to start playing, then make sure that we wait this long before
    // sending the next packet.
    struct timeval timeNow;
    gettimeofday(&timeNow, NULL);
    int secsDiff = fNextSendTime.tv_sec - timeNow.tv_sec;
    int64_t uSecondsToGo = secsDiff*1000000 + (fNextSendTime.tv_usec - timeNow.tv_usec);
    if (uSecondsToGo < 0 || secsDiff < 0) { // sanity check: Make sure that the time-to-delay is non-negative:
      uSecondsToGo = 0;
    }

    // Delay this amount of time:
    nextTask() = envir().taskScheduler().scheduleDelayedTask(uSecondsToGo, (TaskFunc*)sendNext, this);
  }
}
Beispiel #7
0
void FileSink::afterGettingFrame1(unsigned frameSize,
				  struct timeval presentationTime) {
  addData(fBuffer, frameSize, presentationTime);

  if (fOutFid == NULL || fflush(fOutFid) == EOF) {
    // The output file has closed.  Handle this the same way as if the
    // input source had closed:
    onSourceClosure(this);

    stopPlaying();
    return;
  }
 
  if (fPerFrameFileNameBuffer != NULL) {
    if (fOutFid != NULL) { fclose(fOutFid); fOutFid = NULL; }
  }

  // Then try getting the next frame:
  continuePlaying();
}
void MultiFramedRTPSink::sendPacketIfNecessary() {
  if (fNumFramesUsedSoFar > 0) {
    // Send the packet:
#ifdef TEST_LOSS
    if ((our_random()%10) != 0) // simulate 10% packet loss #####
#endif
	if(fSeqNo < 10)
	{
		Debug(ckite_log_message, "fSeqNo = %d\n", fSeqNo);
	}
    int sndIsSucess = fRTPInterface.sendPacket(fOutBuf->packet(), fOutBuf->curPacketSize());
	Debug(ckite_log_message, "sndIsSucess = %d, fOwner = %x\n", sndIsSucess, fOwner);
	if (fOwner != NULL && sndIsSucess == 1)
	{
		Debug(ckite_log_message, "sendPacketIfNecessary.\n");
		((RTSPServer::RTSPClientSession*)fOwner)->noteLiveness();
	}
	if(fOwner != NULL && sndIsSucess == -1)
	{
		((RTSPServer::RTSPClientSession*)fOwner)->closeHttpSocketAndFreeResource();
	}
	//Debug(ckite_log_message, "[wayde]MultiFramedRTPSink::sendPacketIfNecessary packetSize = %d\n",fOutBuf->curPacketSize());
    ++fPacketCount;
    fTotalOctetCount += fOutBuf->curPacketSize();
    fOctetCount += fOutBuf->curPacketSize()
      - rtpHeaderSize - fSpecialHeaderSize - fTotalFrameSpecificHeaderSizes;

    ++fSeqNo; // for next time
  }

  if (fOutBuf->haveOverflowData()
      && fOutBuf->totalBytesAvailable() > fOutBuf->totalBufferSize()/2) {
    // Efficiency hack: Reset the packet start pointer to just in front of
    // the overflow data (allowing for the RTP header and special headers),
    // so that we probably don't have to "memmove()" the overflow data
    // into place when building the next packet:
    unsigned newPacketStart = fOutBuf->curPacketSize()
      - (rtpHeaderSize + fSpecialHeaderSize + frameSpecificHeaderSize());
    fOutBuf->adjustPacketStart(newPacketStart);
  } else {
    // Normal case: Reset the packet start pointer back to the start:
    fOutBuf->resetPacketStart();
  }
  fOutBuf->resetOffset();
  fNumFramesUsedSoFar = 0;

  if (fNoFramesLeft) {
    // We're done:
    onSourceClosure(this);
  } else {
    // We have more frames left to send.  Figure out when the next frame
    // is due to start playing, then make sure that we wait this long before
    // sending the next packet.
    struct timeval timeNow;
    gettimeofday(&timeNow, NULL);
    int uSecondsToGo;
    if (fNextSendTime.tv_sec < timeNow.tv_sec
	|| (fNextSendTime.tv_sec == timeNow.tv_sec && fNextSendTime.tv_usec < timeNow.tv_usec)) {
      uSecondsToGo = 0; // prevents integer underflow if too far behind
    } else {
      uSecondsToGo = (fNextSendTime.tv_sec - timeNow.tv_sec)*1000000 + (fNextSendTime.tv_usec - timeNow.tv_usec);
    }

    // Delay this amount of time:
    nextTask() = envir().taskScheduler().scheduleDelayedTask(uSecondsToGo,
						(TaskFunc*)sendNext, this);
  }
}