コード例 #1
0
ファイル: demuxer.c プロジェクト: complynx/mplayer2
void ds_read_packet(demux_stream_t *ds, stream_t *stream, int len,
                    double pts, off_t pos, bool keyframe)
{
    demux_packet_t *dp = new_demux_packet(len);
    len = stream_read(stream, dp->buffer, len);
    resize_demux_packet(dp, len);
    dp->pts = pts;
    dp->pos = pos;
    dp->keyframe = keyframe;
    // append packet to DS stream:
    ds_add_packet(ds, dp);
}
コード例 #2
0
static int demux_vqf_fill_buffer(demuxer_t* demuxer, demux_stream_t *ds) {
  sh_audio_t* sh_audio = demuxer->audio->sh;
  int l = sh_audio->wf->nAvgBytesPerSec;
  loff_t spos = stream_tell(demuxer->stream);
  demux_packet_t*  dp;

  if(stream_eof(demuxer->stream))
    return 0;

  dp = new_demux_packet(l);
  ds->pts = spos / (float)(sh_audio->wf->nAvgBytesPerSec);
  ds->pos = spos;

  l=stream_read(demuxer->stream,dp->buffer,l);
  resize_demux_packet(dp,l);
  ds_add_packet(ds,dp);

  return 1;
}
コード例 #3
0
ファイル: demux_rawaudio.c プロジェクト: maletor/mpv
static int demux_rawaudio_fill_buffer(demuxer_t* demuxer, demux_stream_t *ds) {
  sh_audio_t* sh_audio = demuxer->audio->sh;
  int l = sh_audio->wf->nAvgBytesPerSec;
  int64_t spos = stream_tell(demuxer->stream);
  demux_packet_t*  dp;

  if(demuxer->stream->eof)
    return 0;

  dp = new_demux_packet(l);
  dp->pts = (spos - demuxer->movi_start)  / (float)(sh_audio->wf->nAvgBytesPerSec);
  dp->pos = (spos - demuxer->movi_start);

  l = stream_read(demuxer->stream,dp->buffer,l);
  resize_demux_packet(dp, l);
  ds_add_packet(ds,dp);

  return 1;
}
コード例 #4
0
static void afterReading(void* clientData, unsigned frameSize,
			 unsigned /*numTruncatedBytes*/,
			 struct timeval presentationTime,
			 unsigned /*durationInMicroseconds*/) {
  int headersize = 0;
  if (frameSize >= MAX_RTP_FRAME_SIZE) {
    fprintf(stderr, "Saw an input frame too large (>=%d).  Increase MAX_RTP_FRAME_SIZE in \"demux_rtp.cpp\".\n",
	    MAX_RTP_FRAME_SIZE);
  }
  ReadBufferQueue* bufferQueue = (ReadBufferQueue*)clientData;
  demuxer_t* demuxer = bufferQueue->ourDemuxer();
  RTPState* rtpState = (RTPState*)(demuxer->priv);

  if (frameSize > 0) demuxer->stream->eof = 0;

  demux_packet_t* dp = bufferQueue->dp;

  if (bufferQueue->readSource()->isAMRAudioSource())
    headersize = 1;
  else if (bufferQueue == rtpState->videoBufferQueue &&
      ((sh_video_t*)demuxer->video->sh)->format == mmioFOURCC('H','2','6','4')) {
    dp->buffer[0]=0x00;
    dp->buffer[1]=0x00;
    dp->buffer[2]=0x01;
    headersize = 3;
  }

  resize_demux_packet(dp, frameSize + headersize);

  // Set the packet's presentation time stamp, depending on whether or
  // not our RTP source's timestamps have been synchronized yet:
  Boolean hasBeenSynchronized
    = bufferQueue->rtpSource()->hasBeenSynchronizedUsingRTCP();
  if (hasBeenSynchronized) {
    if (verbose > 0 && !bufferQueue->prevPacketWasSynchronized) {
      fprintf(stderr, "%s stream has been synchronized using RTCP \n",
	      bufferQueue->tag());
    }

    struct timeval* fst = &(rtpState->firstSyncTime); // abbrev
    if (fst->tv_sec == 0 && fst->tv_usec == 0) {
      *fst = presentationTime;
    }

    // For the "pts" field, use the time differential from the first
    // synchronized time, rather than absolute time, in order to avoid
    // round-off errors when converting to a float:
    dp->pts = presentationTime.tv_sec - fst->tv_sec
      + (presentationTime.tv_usec - fst->tv_usec)/1000000.0;
    bufferQueue->prevPacketPTS = dp->pts;
  } else {
    if (verbose > 0 && bufferQueue->prevPacketWasSynchronized) {
      fprintf(stderr, "%s stream is no longer RTCP-synchronized \n",
	      bufferQueue->tag());
    }

    // use the previous packet's "pts" once again:
    dp->pts = bufferQueue->prevPacketPTS;
  }
  bufferQueue->prevPacketWasSynchronized = hasBeenSynchronized;

  dp->pos = demuxer->filepos;
  demuxer->filepos += frameSize + headersize;

  // Signal any pending 'doEventLoop()' call on this queue:
  bufferQueue->blockingFlag = ~0;
}