示例#1
0
/*
 * seek_to - get close to where they want to go
 */
void CMpeg2tFile::seek_to (uint64_t ts_in_msec)
{
  lock_file_mutex();
  clearerr(m_ifile);
  // If we've already seeked, indicate that we haven't (this will not
  // work for 3 streams, but I'm lazy now
  if (m_ts_seeked_in_msec == ts_in_msec) {
    m_ts_seeked_in_msec = UINT64_MAX;
    unlock_file_mutex();
    return;
  }

  // clear the buffer on and buffer size - this will force a new
  // read
  m_buffer_on = m_buffer_size = 0;

  // If they are looking for < 1 second, just rewind
  m_ts_seeked_in_msec = ts_in_msec;
  if (ts_in_msec < TO_U64(1000)) {
    rewind(m_ifile);
    unlock_file_mutex();
    return;
  }
  
  uint64_t pts_seeked;
  // come 1 second or so earlier - this is so we don't need to track
  // pts vs dts, but can just get close
  ts_in_msec -= TO_U64(1000); 
  pts_seeked = ts_in_msec * TO_U64(90);
  pts_seeked += m_start_psts;

  const frame_file_pos_t *start_pos;

  start_pos = m_file_record.find_closest_point(pts_seeked);
  
  if (start_pos == NULL) {
    rewind(m_ifile);
    unlock_file_mutex();
    return;
  }
#ifdef DEBUG_MPEG2F_SEARCH
  mpeg2f_message(LOG_DEBUG, "Looking for pts "U64" found "U64, 
		       pts_seeked, start_pos->timestamp);
#endif

  fpos_t fpos;
  VAR_TO_FPOS(fpos, start_pos->file_position);
  fsetpos(m_ifile, &fpos);
  // Now, if we wanted, we could start reading frames, and read until 
  // we got an i frame close to where we wanted to be.  I'm lazy now, so
  // I won't
  unlock_file_mutex();
}
示例#2
0
/*
 * calculate_wallclock_offset_from_rtcp
 * Given certain information from an rtcp message, Calculate what the
 * wallclock time for rtp sequence number 0 would be.
 */
void
CRtpByteStreamBase::calculate_wallclock_offset_from_rtcp (uint32_t ntp_frac,
							  uint32_t ntp_sec,
							  uint32_t rtp_ts)
{
  uint64_t wclock;
  wclock = ntp_frac;
  wclock *= TO_U64(1000);
  wclock /= (TO_U64(1) << 32);
  uint64_t offset;
  offset = ntp_sec;
  offset -= NTP_TO_UNIX_TIME;
  offset *= TO_U64(1000);
  wclock += offset;
#ifdef DEBUG_RTP_WCLOCK
  rtp_message(LOG_DEBUG, "%s RTCP data - sec %u frac %u value "U64" ts %u", 
	      m_name, ntp_sec, ntp_frac, wclock, rtp_ts);
#endif
  set_wallclock_offset(wclock, rtp_ts);
}
int CMpeg2fAudioByteStream::get_timestamp_for_frame (mpeg2t_frame_t *fptr,
						     frame_timestamp_t *ts)
{
  uint64_t pts_in_msec;
  // all ts for audio are stored in msec, not in timescale
#ifdef DEBUG_MPEG2T_PSTS
  player_debug_message("audio frame len %d have  dts %d ts "U64, 
		       fptr->frame_len, fptr->have_dts, fptr->dts);
  player_debug_message("audio frame len %d have psts %d ts "U64" %d %d", 
		       fptr->frame_len, fptr->have_ps_ts, fptr->ps_ts,
		       m_es_pid->sample_per_frame, 
		       m_es_pid->sample_freq);
#endif
  if (fptr->have_ps_ts != 0 || fptr->have_dts != 0) {
    m_timestamp_loaded = 1;
    pts_in_msec = fptr->have_dts ? fptr->dts : fptr->ps_ts;
    m_audio_last_freq_timestamp = 
      ((pts_in_msec * m_es_pid->sample_freq) / TO_U64(90000));
    ts->audio_freq_timestamp = m_audio_last_freq_timestamp;
    ts->audio_freq = m_es_pid->sample_freq;
    pts_in_msec *= TO_U64(1000);
    pts_in_msec /= TO_U64(90000);
    m_last_timestamp = pts_in_msec;
    ts->msec_timestamp = m_last_timestamp;
    m_frames_since_last_timestamp = 0;
    return 0;
  }

  if (m_timestamp_loaded == 0) return -1;
  if (m_es_pid->info_loaded == 0) return -1;

  ts->msec_timestamp = m_last_timestamp;
  ts->audio_freq_timestamp = m_audio_last_freq_timestamp;
  ts->audio_freq = m_es_pid->sample_freq;
  return 0;
}
示例#4
0
/*
 * check_buffering is called to check and see if we should be buffering
 */
int CRtpByteStreamBase::check_buffering (void)
{
  if (m_buffering == 0) {
    uint32_t head_ts, tail_ts;
    if (m_head != NULL) {
      /*
       * Payload type the same.  Make sure we have at least 2 seconds of
       * good data
       */
      if (check_rtp_frame_complete_for_payload_type()) {
	head_ts = m_head->rtp_pak_ts;
	tail_ts = m_tail->rtp_pak_ts;
	if (head_ts > tail_ts &&
	    ((head_ts & (1 << 31)) == (tail_ts & (1 << 31)))) {
	  return 0;
	}
	uint64_t calc;
	calc = tail_ts;
	calc -= head_ts;
	calc *= TO_U64(1000);
	calc /= m_timescale;
	if (calc >= m_rtp_buffer_time) {
	  if (m_base_ts_set == false) {
	    rtp_message(LOG_NOTICE, 
			"%s - Setting rtp seq and time from 1st pak",
			m_name);
	    set_rtp_base_ts(m_head->rtp_pak_ts);
	    m_rtpinfo_set_from_pak = 1;
	  } else {
	    m_rtpinfo_set_from_pak = 0;
	  }
	  m_buffering = 1;
#if 1
	  rtp_message(LOG_INFO, 
		      "%s buffering complete - head seq %d %u tail seq %d %u "D64, 
		      m_name, m_head->rtp_pak_seq, head_ts, 
		      m_tail->rtp_pak_seq,
		      tail_ts, calc);
#endif
	  m_next_seq = m_head->rtp_pak_seq - 1;
	  
	}
      }
    }
  }
  return m_buffering;
}
示例#5
0
/*
 * rtp_periodic - called from the player media rtp task.  This basically just
 * checks for the end of the range.
 */
void CRtpByteStreamBase::rtp_periodic (void)
{
  if (m_buffering != 0) {
    if (m_recvd_pak == false) {
      if (m_recvd_pak_timeout == false) {
	m_recvd_pak_timeout_time = get_time_of_day();
#if 0
	rtp_message(LOG_DEBUG, "%s Starting timeout at "U64, 
		    m_name, m_recvd_pak_timeout_time);
#endif
      } else {
	uint64_t timeout;
	if (m_eof == 0) {
	  timeout = get_time_of_day() - m_recvd_pak_timeout_time;
	  if (m_stream_ondemand && get_max_playtime() != 0.0) {
	    uint64_t range_end = (uint64_t)(get_max_playtime() * 1000.0);
	    if (m_last_realtime + timeout >= range_end) {
	      rtp_message(LOG_DEBUG, 
			  "%s Timedout at range end - last "U64" range end "U64" "U64, 
			  m_name, m_last_realtime, range_end, timeout);
	      m_eof = 1;
	    }
	  } else {
	  // broadcast - perhaps if we time out for a second or 2, we
	  // should re-init rtp ?  We definately need to put some timing
	  // checks here.
	    session_desc_t *sptr = m_fmt->media->parent;
	    if (sptr->time_desc != NULL &&
		sptr->time_desc->end_time != 0) {
	      time_t this_time;
	      this_time = time(NULL);
	      if (this_time > sptr->time_desc->end_time && 
		  timeout >= TO_U64(1000)) {
		m_eof = 1;
	      }
	    }
	  }
	}
      }
      m_recvd_pak_timeout = true;
    } else {
      m_recvd_pak = false;
      m_recvd_pak_timeout = false;
    }
  }
}
示例#6
0
uint64_t CRtpByteStreamBase::rtp_ts_to_msec (uint32_t rtp_ts,
					     uint64_t uts,
					     uint64_t &wrap_offset)
{
  uint64_t timetick;
  uint64_t adjusted_rtp_ts;
  uint64_t adjusted_wc_rtp_ts;
  bool have_wrap = false;
  uint32_t this_mask, last_mask;

  last_mask = m_last_rtp_ts & (1U << 31);
  this_mask = rtp_ts & (1U << 31);
  
  if (last_mask != this_mask) {
    if (this_mask == 0) {
      wrap_offset += (TO_U64(1) << 32);
      have_wrap = true;
      rtp_message(LOG_DEBUG, "%s - have wrap %x new %x", m_name, 
		  m_last_rtp_ts, rtp_ts);
    } else {
      // need to do something here
    }
  }

  if (m_stream_ondemand) {
    adjusted_rtp_ts = wrap_offset;
    adjusted_rtp_ts += rtp_ts;
    adjusted_wc_rtp_ts = m_base_rtp_ts;

    if (adjusted_wc_rtp_ts > adjusted_rtp_ts) {
      timetick = adjusted_wc_rtp_ts - adjusted_rtp_ts;
      timetick *= TO_U64(1000);
      timetick /= m_timescale;
      if (timetick > m_play_start_time) {
	timetick = 0;
      } else {
	timetick = m_play_start_time - timetick;
      }
    } else {
      timetick = adjusted_rtp_ts - adjusted_wc_rtp_ts;
      timetick *= TO_U64(1000);
      timetick /= m_timescale;
      timetick += m_play_start_time;
    }
  } else {
    // We've got a broadcast scenario here...
    if (m_have_first_pak_ts == false) {
      // We haven't processed the first packet yet - we record
      // the data here.
      m_first_pak_rtp_ts = rtp_ts;
      m_first_pak_ts = uts;
      m_have_first_pak_ts = true;
      rtp_message(LOG_DEBUG, "%s first pak ts %u "U64, 
		  m_name, m_first_pak_rtp_ts, m_first_pak_ts);
      // if we have received RTCP, set the wallclock offset, which
      // triggers the synchronization effort.
      if (m_rtcp_received) {
	// calculate other stuff
	//rtp_message(LOG_DEBUG, "%s rtp_ts_to_msec calling wallclock", m_name);
	set_wallclock_offset(m_rtcp_ts, m_rtcp_rtp_ts);
      }
    }
    SDL_LockMutex(m_rtp_packet_mutex);
    // fairly simple calculation to calculate the timestamp
    // based on this rtp timestamp, the first pak rtp timestamp and
    // the first packet timestamp.
    int32_t adder;
    int64_t ts_adder;
    if (have_wrap) {
      adder = rtp_ts - m_first_pak_rtp_ts;
      // adjust once an hour, to keep errors low
      // we'll adjust the timestamp and rtp timestamp
      ts_adder = (int64_t)adder;
      ts_adder *= TO_D64(1000);
      ts_adder /= (int64_t)m_timescale;
      m_first_pak_ts += ts_adder;
      m_first_pak_rtp_ts = rtp_ts;
#ifdef DEBUG_RTP_BCAST
      rtp_message(LOG_DEBUG, "%s adjust for wrap - first pak ts is now "U64" rtp %u", 
		  m_name, m_first_pak_ts, m_first_pak_rtp_ts);
#endif
    }

    // adder could be negative here, based on the RTCP we receive
    adder = rtp_ts - m_first_pak_rtp_ts;
    ts_adder = (int64_t)adder;
    ts_adder *= TO_D64(1000);
    ts_adder /= (int64_t)m_timescale;
    timetick = m_first_pak_ts;
    timetick += ts_adder;
    SDL_UnlockMutex(m_rtp_packet_mutex);

#ifdef DEBUG_RTP_BCAST
    rtp_message(LOG_DEBUG, "%s ts %x base %x "U64" tp "U64" adder %d "D64,
		m_name, rtp_ts, m_first_pak_rtp_ts, m_first_pak_ts, 
		timetick, adder, ts_adder);
#endif
  }
#ifdef DEBUG_RTP_TS
  rtp_message(LOG_DEBUG,"%s time "U64" %u", m_name, timetick, rtp_ts);
#endif
  // record time
  m_last_rtp_ts = rtp_ts;
  m_last_realtime = timetick;
  return (timetick);
}
示例#7
0
void CRtpByteStreamBase::set_wallclock_offset (uint64_t wclock, 
					       uint32_t rtp_ts) 
{
  int32_t rtp_ts_diff;
  int64_t wclock_diff;
  uint64_t wclock_calc;
  bool set = true;
  bool had_recvd_rtcp;
  if (m_rtcp_received == 1 /*&&
			     m_stream_ondemand == 0*/) {
    rtp_ts_diff = rtp_ts;
    rtp_ts_diff -= m_rtcp_rtp_ts;
    wclock_diff = (int64_t)rtp_ts_diff;
    wclock_diff *= TO_D64(1000);
    wclock_diff /= (int64_t)m_timescale;
    wclock_calc = m_rtcp_ts;
    wclock_calc += wclock_diff;
    set = false;
    if (wclock_calc != wclock) {
#ifdef DEBUG_RTP_WCLOCK
      rtp_message(LOG_DEBUG, 
		  "%s - set wallclock - wclock should be "U64" is "U64, 
		m_name, wclock_calc, wclock);
#endif
      // don't change wclock offset if it's > 100 msec - otherwise, 
      // it's annoying noise
      int64_t diff = wclock_calc - wclock;
      if (abs(diff) > 2 && abs(diff) < 100) {
	set = false;
	//	rtp_message(LOG_DEBUG, "not changing");
	// we'll allow a msec drift here or there to allow for rounding - 
	// we want this to change every so often
      }
    }
    
  }
  had_recvd_rtcp = m_rtcp_received;
  m_rtcp_received = true;
  SDL_LockMutex(m_rtp_packet_mutex);
  if (set) {
    m_rtcp_ts = wclock;
    m_rtcp_rtp_ts = rtp_ts;
  }
  if (m_have_first_pak_ts) {
    // we only want positives here
    int32_t diff;
    diff = rtp_ts - m_first_pak_rtp_ts;
    int32_t compare = 3600 * m_timescale;
#ifdef DEBUG_RTP_WCLOCK
    rtp_message(LOG_DEBUG, "%s - 1st rtp ts %u rtp %u %u", 
		m_name, m_first_pak_rtp_ts, rtp_ts, diff);
    rtp_message(LOG_DEBUG, "%s - 1st ts "U64, m_name, m_first_pak_ts);
#endif
    if (diff > compare) {
      // adjust once an hour, to keep errors low
      // we'll adjust the timestamp and rtp timestamp
      int64_t ts_diff;
      ts_diff = (int64_t)diff;
      ts_diff *= TO_U64(1000);
      ts_diff /= (int64_t)m_timescale;
      m_first_pak_ts += ts_diff;
      m_first_pak_rtp_ts += diff;
#ifdef DEBUG_RTP_WCLOCK
      rtp_message(LOG_DEBUG, "CHANGE %s - first pak ts is now "U64" rtp %u", 
		  m_name, m_first_pak_ts, m_first_pak_rtp_ts);
#endif
    }
    // We've received an RTCP - see if we need to syncronize
    // the video streams.
    if (m_psptr != NULL) {
      rtcp_sync_t sync;
      sync.first_pak_ts = m_first_pak_ts;
      sync.first_pak_rtp_ts = m_first_pak_rtp_ts;
      sync.rtcp_ts = m_rtcp_ts;
      sync.rtcp_rtp_ts = m_rtcp_rtp_ts;
      sync.timescale = m_timescale;
      m_psptr->synchronize_rtp_bytestreams(&sync);
    } else {
      // if this is our first rtcp, try to synchronize
      if (!had_recvd_rtcp) synchronize(NULL);
    }
  }

  SDL_UnlockMutex(m_rtp_packet_mutex);
}
示例#8
0
CRtpByteStreamBase::CRtpByteStreamBase(const char *name,
				       format_list_t *fmt,
				       unsigned int rtp_pt,
				       int ondemand,
				       uint64_t tps,
				       rtp_packet **head, 
				       rtp_packet **tail,
				       int rtp_seq_set,
				       uint16_t rtp_base_seq,
				       int rtp_ts_set,
				       uint32_t rtp_base_ts,
				       int rtcp_received,
				       uint32_t ntp_frac,
				       uint32_t ntp_sec,
				       uint32_t rtp_ts) :
  COurInByteStream(name)
{
  m_fmt = fmt;
  m_head = *head;
  *head = NULL;
  m_tail = *tail;
  *tail = NULL;

  if (rtp_ts_set) {
    set_rtp_base_ts(rtp_base_ts);
  } else {
    m_base_ts_set = false;
  }

  if (rtp_seq_set) {
    set_rtp_base_seq(rtp_base_seq);
  } else {
    m_rtp_base_seq_set = false;
  }

  m_have_first_pak_ts = false;
  m_rtp_pt = rtp_pt;
  uint64_t temp;
  temp = config.get_config_value(CONFIG_RTP_BUFFER_TIME_MSEC);
  if (temp > 0) {
    m_rtp_buffer_time = temp;
  } else {
    m_rtp_buffer_time = TO_U64(2000);
  }

  m_timescale = tps;

  reset();

  m_last_rtp_ts = 0;
  m_total =0;
  m_skip_on_advance_bytes = 0;
  m_stream_ondemand = ondemand;
  m_rtcp_received = false;
  m_rtp_packet_mutex = SDL_CreateMutex();
  m_buffering = 0;
  m_eof = 0;
  m_psptr = NULL;
  m_have_sync_info = false;
  if (rtcp_received) {
    calculate_wallclock_offset_from_rtcp(ntp_frac, ntp_sec, rtp_ts);
  }
}
示例#9
0
/*
 * Decode task call for CELP
 */
static int celp_decode (codec_data_t *ptr,
			frame_timestamp_t *pts,
			int from_rtp,
			int *sync_frame,
			uint8_t *buffer,
			uint32_t buflen,
			void *userdata)
{
  int usedNumBit;	
  celp_codec_t *celp = (celp_codec_t *)ptr;

  uint32_t freq_ts;

  freq_ts = pts->audio_freq_timestamp;
  if (pts->audio_freq != celp->m_freq) {
    freq_ts = convert_timescale(freq_ts, pts->audio_freq, celp->m_freq);
  }
  if (celp->m_record_sync_time) {
    celp->m_current_frame = 0;
    celp->m_record_sync_time = 0;
    celp->m_current_time = pts->msec_timestamp;
    celp->m_last_rtp_ts = freq_ts;
    celp->m_current_freq_time = freq_ts;
  } else {
    if (celp->m_last_rtp_ts == pts->audio_freq_timestamp) {
      celp->m_current_frame++;
      celp->m_current_time = celp->m_last_rtp_ts;
      celp->m_current_time += 
	celp->m_samples_per_frame * celp->m_current_frame * TO_U64(1000) / 
	celp->m_freq;
      celp->m_current_freq_time += celp->m_samples_per_frame;
    } else {
      celp->m_last_rtp_ts = celp->m_current_freq_time = freq_ts;
      celp->m_current_time = pts->msec_timestamp;
      celp->m_current_frame = 0;
    }

    // Note - here m_current_time should pretty much always be >= rtpts.  
    // If we're not, we most likely want to stop and resync.  We don't
    // need to keep decoding - just decode this frame and indicate we
    // need a resync... That should handle fast forwards...  We need
    // someway to handle reverses - perhaps if we're more than .5 seconds
    // later...
  }

	

  if (celp->m_celp_inited == 0) {
    
	
    /*
     * If not initialized, do so.  
     */
    
    //
    celp->m_celp_inited = 1;
	
  }
  //printf("buflen:%d\n",buflen);

  //if ( ((celp->m_last-buflen)/celp->m_last) < 0.2) return (0);
  if ( buflen<5) return (-1);
  
  BsBitBuffer local;
  local.data= (unsigned char *)buffer;
  local.numBit=buflen*8;
  local.size=buflen*8;
	
  DecLpcFrame(&local,celp->m_sampleBuf,&usedNumBit);
	
  //AudioWriteData(celp->audiFile,celp->m_sampleBuf,celp->m_output_frame_size);
	
  int chan,sample;

  uint8_t *now = celp->m_vft->audio_get_buffer(celp->m_ifptr,
					       celp->m_current_freq_time,
					       celp->m_current_time);
  if (now != NULL) {
    uint16_t *buf = (uint16_t *)now;
    
    for(chan=0;chan<celp->m_chans;chan++){
      for(sample=0;sample < celp->m_output_frame_size; sample++){
	buf[sample +(chan*celp->m_output_frame_size)]=
	  (uint16_t)celp->m_sampleBuf[chan][sample];
	
      }
    }
  }
	
#if DUMP_OUTPUT_TO_FILE
  fwrite(buff, celp->m_output_frame_size * 4, 1, celp->m_outfile);
#endif
  celp->m_vft->audio_filled_buffer(celp->m_ifptr);
      
  return bit2byte(usedNumBit);
}
示例#10
0
/*
 * Create CAACodec class
 */
static codec_data_t *aac_codec_create (const char *stream_type,
				       const char *compressor, 
				       int type, 
				       int profile, 
				       format_list_t *media_fmt,
				       audio_info_t *audio,
				       const uint8_t *userdata,
				       uint32_t userdata_size,
				       audio_vft_t *vft,
				       void *ifptr)

{
  aac_codec_t *aac;

  aac = (aac_codec_t *)malloc(sizeof(aac_codec_t));
  memset(aac, 0, sizeof(aac_codec_t));

  aac->m_vft = vft;
  aac->m_ifptr = ifptr;
  fmtp_parse_t *fmtp = NULL;
  // Start setting up FAAC stuff...

  aac->m_resync_with_header = 1;
  aac->m_record_sync_time = 1;
  
  aac->m_faad_inited = 0;
  aac->m_audio_inited = 0;
  aac->m_temp_buff = (uint8_t *)malloc(4096);

  // Use media_fmt to indicate that we're streaming.
  if (media_fmt != NULL) {
    // haven't checked for null buffer
    // This is not necessarilly right - it is, for the most part, but
    // we should be reading the fmtp statement, and looking at the config.
    // (like we do below in the userdata section...
    aac->m_freq = media_fmt->rtpmap->clock_rate;
    fmtp = parse_fmtp_for_mpeg4(media_fmt->fmt_param, vft->log_msg);
    if (fmtp != NULL) {
      userdata = fmtp->config_binary;
      userdata_size = fmtp->config_binary_len;
    }
  } else {
    if (audio != NULL) {
      aac->m_freq = audio->freq;
    } else {
      aac->m_freq = 44100;
    }
  }
  aac->m_chans = 2; // this may be wrong - the isma spec, Appendix A.1.1 of
  // Appendix H says the default is 1 channel...
  aac->m_output_frame_size = 1024;
  aac->m_object_type = AACMAIN;
  if (userdata != NULL || fmtp != NULL) {
    mpeg4_audio_config_t audio_config;
    decode_mpeg4_audio_config(userdata, userdata_size, &audio_config);
    aac->m_object_type = audio_config.audio_object_type;
    aac->m_freq = audio_config.frequency;
    aac->m_chans = audio_config.channels;
    if (audio_config.codec.aac.frame_len_1024 == 0) {
      aac->m_output_frame_size = 960;
    }
  }

  aa_message(LOG_INFO, aaclib,"AAC object type is %d", aac->m_object_type);
  aac->m_info = faacDecOpen();
  faacDecConfiguration config;
  config.defObjectType = aac->m_object_type;
  config.defSampleRate = aac->m_freq;
  faacDecSetConfiguration(aac->m_info, &config);
  aac->m_msec_per_frame = aac->m_output_frame_size;
  aac->m_msec_per_frame *= TO_U64(1000);
  aac->m_msec_per_frame /= aac->m_freq;

  //  faad_init_bytestream(&m_info->ld, c_read_byte, c_bookmark, m_bytestream);

  aa_message(LOG_INFO, aaclib, "Setting freq to %d", aac->m_freq);
#if DUMP_OUTPUT_TO_FILE
  aac->m_outfile = fopen("temp.raw", "w");
#endif
  if (fmtp != NULL) {
    free_fmtp_parse(fmtp);
  }
  return (codec_data_t *)aac;
}
示例#11
0
/*
 * Decode task call for FAAC
 */
static int aac_decode (codec_data_t *ptr,
		       frame_timestamp_t *ts,
		       int from_rtp,
		       int *sync_frame,
		       uint8_t *buffer,
		       uint32_t buflen, 
		       void *userdata)
{
  aac_codec_t *aac = (aac_codec_t *)ptr;
  unsigned long bytes_consummed;
  int bits = -1;
  //  struct timezone tz;
  uint32_t freq_timestamp;

  freq_timestamp = ts->audio_freq_timestamp;
  if (ts->audio_freq != aac->m_freq) {
    freq_timestamp = convert_timescale(freq_timestamp,
				       ts->audio_freq,
				       aac->m_freq);
  }
  if (aac->m_record_sync_time) {
    aac->m_current_frame = 0;
    aac->m_record_sync_time = 0;
    aac->m_current_time = ts->msec_timestamp;
    aac->m_last_rtp_ts = ts->msec_timestamp;
  } else {
    if (aac->m_last_rtp_ts == ts->msec_timestamp) {
      aac->m_current_frame++;
      aac->m_current_time = aac->m_last_rtp_ts;
      aac->m_current_time += 
	aac->m_output_frame_size * aac->m_current_frame * 
	TO_U64(1000) / aac->m_freq;
      freq_timestamp += aac->m_output_frame_size * aac->m_current_frame;
    } else {
      aac->m_last_rtp_ts = ts->msec_timestamp;
      aac->m_current_time = ts->msec_timestamp;
      aac->m_current_frame = 0;
    }

    // Note - here m_current_time should pretty much always be >= rtpts.  
    // If we're not, we most likely want to stop and resync.  We don't
    // need to keep decoding - just decode this frame and indicate we
    // need a resync... That should handle fast forwards...  We need
    // someway to handle reverses - perhaps if we're more than .5 seconds
    // later...
  }

    if (aac->m_faad_inited == 0) {
      /*
       * If not initialized, do so.  
     */
      unsigned long freq, chans;

      faacDecInit(aac->m_info,
		  (unsigned char *)buffer,
		  &freq,
		  &chans);
      aac->m_freq = freq;
      aac->m_chans = chans;
      aac->m_faad_inited = 1;
    }

    uint8_t *buff;

    /* 
     * Get an audio buffer
     */
    if (aac->m_audio_inited == 0) {
      buff = aac->m_temp_buff;
    } else {
      buff = aac->m_vft->audio_get_buffer(aac->m_ifptr,
					  freq_timestamp,
					  aac->m_current_time);
    }
    if (buff == NULL) {
      //player_debug_message("Can't get buffer in aa");
      return (0);
    }

    unsigned long samples;
    bytes_consummed = buflen;
    //aa_message(LOG_DEBUG, aaclib, "decoding %d bits", buflen * 8);
    bits = faacDecDecode(aac->m_info,
			 (unsigned char *)buffer, 
			 &bytes_consummed,
			 (short *)buff, 
			 &samples);
    switch (bits) {
    case FAAD_OK_CHUPDATE:
      if (aac->m_audio_inited != 0) {
	int tempchans = faacDecGetProgConfig(aac->m_info, NULL);
	if (tempchans != aac->m_chans) {
	  aa_message(LOG_NOTICE, aaclib, "chupdate - chans from data is %d", 
			       tempchans);
	}
      }
      // fall through...
    case FAAD_OK:
      if (aac->m_audio_inited == 0) {
	int tempchans = faacDecGetProgConfig(aac->m_info, NULL);
	if (tempchans == 0) {
	  aac->m_resync_with_header = 1;
	  aac->m_record_sync_time = 1;
	  return bytes_consummed;
	}
	if (tempchans != aac->m_chans) {
	  aa_message(LOG_NOTICE, aaclib, "chans from data is %d conf %d", 
		     tempchans, aac->m_chans);
	  aac->m_chans = tempchans;
	}
	aac->m_vft->audio_configure(aac->m_ifptr,
				     aac->m_freq, 
				     aac->m_chans, 
				     AUDIO_FMT_S16, 
				     aac->m_output_frame_size);
	uint8_t *now = aac->m_vft->audio_get_buffer(aac->m_ifptr,
						    freq_timestamp,
						    aac->m_current_time);

	if (now != NULL) {
	  memcpy(now, buff, tempchans * aac->m_output_frame_size * sizeof(int16_t));
	}
	aac->m_audio_inited = 1;
      }
      /*
       * good result - give it to audio sync class
       */
#if DUMP_OUTPUT_TO_FILE
      fwrite(buff, aac->m_output_frame_size * 4, 1, aac->m_outfile);
#endif
      aac->m_vft->audio_filled_buffer(aac->m_ifptr);
      if (aac->m_resync_with_header == 1) {
	aac->m_resync_with_header = 0;
#ifdef DEBUG_SYNC
	aa_message(LOG_DEBUG, aaclib, "Back to good at "U64, aac->m_current_time);
#endif
      }
      break;
    default:
      aa_message(LOG_ERR, aaclib, "Bits return is %d", bits);
      aac->m_resync_with_header = 1;
#ifdef DEBUG_SYNC
      aa_message(LOG_ERR, aaclib, "Audio decode problem - at "U64, 
		 aac->m_current_time);
#endif
      break;
    }
  return (bytes_consummed);
}
示例#12
0
/*
 * Create - will determine pids and psts ranges in file.  Will also
 * loop through the file and determine CFilePosRec points at percentages
 */
int CMpeg2tFile::create (CPlayerSession *psptr)
{
  m_mpeg2t = create_mpeg2_transport();
  if (m_mpeg2t == NULL) {
    psptr->set_message("Couldn't create mpeg2 transport");
    fclose(m_ifile);
    return -1;
  }
  // nice, large buffers to process
  m_buffer_size_max = 188 * 2000;
  m_buffer = (uint8_t *)malloc(m_buffer_size_max);

  if (m_buffer == NULL) {
    psptr->set_message("Malloc error");
    return -1;
  }
  m_buffer[0] = MPEG2T_SYNC_BYTE;
  m_buffer_size = fread(&m_buffer[1], 1, m_buffer_size_max - 1, m_ifile) + 1;

  bool done = false;
  mpeg2t_pid_t *pidptr;
  uint32_t buflen_used;
  bool have_psts = false;
  uint64_t earliest_psts = 0;
  mpeg2t_es_t *es_pid;

  int olddebuglevel;
  olddebuglevel = config.get_config_value(CONFIG_MPEG2T_DEBUG);
  if (olddebuglevel != LOG_DEBUG)
    mpeg2t_set_loglevel(LOG_CRIT);
  m_mpeg2t->save_frames_at_start = 1;
  /*
   * We need to determine which PIDs are present, and try to establish
   * a starting psts.  We also want to establish what type of video and
   * audio are in the mix.  Note: if we try to run this on a file that
   * we don't understand the video, this could take a while, because the
   * info never gets loaded.
   */
  do {
    m_buffer_on = 0;
    while (m_buffer_on + 188 < m_buffer_size && done == false) {
      
      pidptr = mpeg2t_process_buffer(m_mpeg2t, 
				     &m_buffer[m_buffer_on],
				     m_buffer_size - m_buffer_on,
				     &buflen_used);
      m_buffer_on += buflen_used;
      if (pidptr != NULL && pidptr->pak_type == MPEG2T_ES_PAK) {
	es_pid = (mpeg2t_es_t *)pidptr;
	mpeg2t_frame_t *fptr;

	// determine earliest PS_TS
	while ((fptr = mpeg2t_get_es_list_head(es_pid)) != NULL) {
	  if (fptr->have_ps_ts != 0 || fptr->have_dts != 0) {
	    uint64_t ps_ts = 0;
	    bool store_psts = true;
	    if (fptr->have_dts != 0) {
	      ps_ts = fptr->dts;
	    } else {
	      if (es_pid->is_video == 1) { // mpeg2
		// video - make sure we get the first I frame, then we can
		// get the real timestamp
		if (fptr->frame_type != 1) {
		  store_psts = false;
		} else {
		  ps_ts = fptr->ps_ts;
		  uint16_t temp_ref = MP4AV_Mpeg3PictHdrTempRef(fptr->frame + fptr->pict_header_offset);
		  ps_ts -= ((temp_ref + 1) * es_pid->tick_per_frame);
		}
	      } else {
		ps_ts = fptr->ps_ts;
	      }
	    }
	    if (store_psts) {
	      // when we have the first psts for a ES_PID, turn off
	      // parsing frames for that PID.
	      mpeg2t_set_frame_status(es_pid, MPEG2T_PID_NOTHING);
	      if (have_psts) {
		earliest_psts = MIN(earliest_psts, ps_ts);
	      } else {
		earliest_psts = ps_ts;
		have_psts = true;
	      }
	    }
	  }
	  mpeg2t_free_frame(fptr);
	}

	// Each time, search through and see if there are any ES_PIDs 
	// that have not returned a psts.  We're done when the info is
	// loaded for all the es pids.
	pidptr = m_mpeg2t->pas.pid.next_pid;
	bool finished = true;
	while (pidptr != NULL && finished) {
	  if (pidptr->pak_type == MPEG2T_ES_PAK) {
	    es_pid = (mpeg2t_es_t *)pidptr;
	    if (es_pid->info_loaded == 0) {
	      finished = false;
	    }
	  }
	  pidptr = pidptr->next_pid;
	}
	done = finished || have_psts;
      }
    }
    if (done == false) {
      m_buffer_size = fread(m_buffer, 1, m_buffer_size_max, m_ifile);
    }
  } while (m_buffer_size >=188 && done == false);

  if (done == false) {
    psptr->set_message("Could not find information in TS");
    mpeg2t_set_loglevel(olddebuglevel);
    return -1;
  }

#ifdef DEBUG_MPEG2F_SEARCH
  mpeg2f_message(LOG_DEBUG, "initial psts is "U64, earliest_psts);
#endif
  m_start_psts = earliest_psts;

  // Now, we'll try to build a rough index for the file
  // enable psts reading for the pid
  for (pidptr = m_mpeg2t->pas.pid.next_pid; pidptr != NULL; pidptr = pidptr->next_pid) {
    if (pidptr->pak_type == MPEG2T_ES_PAK) {
      es_pid = (mpeg2t_es_t *)pidptr;
      mpeg2t_set_frame_status(es_pid, MPEG2T_PID_REPORT_PSTS);
    }
  }
  m_file_record.record_point(0, earliest_psts, 0);
  fpos_t fpos;
  uint64_t end;
  uint64_t perc, cur;

  // find out the length of the file.
  struct stat filestat;
  if (fstat(fileno(m_ifile), &filestat) != 0) {
    return -1;
  }
  end = filestat.st_size;
  perc = end;
  // perc is what size of the file to skip through to get a rough
  // timetable.  We want to do 10% chunks, or 100Mb chunks, whichever is
  // less.
  while (perc > TO_U64(100000000)) {
    perc /= 2;
  }
  if (perc > (end / TO_U64(10))) {
    perc = end / TO_U64(10);
  }
  if (perc < (end / TO_U64(50))) {
    perc = end / TO_U64(50);
  }
#ifdef DEBUG_MPEG2F_SEARCH
  mpeg2f_message(LOG_DEBUG, "perc is "U64" "U64, perc, (perc * TO_U64(100)) / end );
#endif

  cur = perc;

  bool is_seekable = true;
  uint64_t last_psts, ts;
  last_psts = earliest_psts;

  // Now - skip to the next perc chunk, and try to find the next psts
  // we'll record this info.
  do {
#ifdef DEBUG_MPEG2F_SEARCH
    mpeg2f_message(LOG_DEBUG, "current "U64" end "U64, cur, end);
#endif
    VAR_TO_FPOS(fpos, cur);
    fsetpos(m_ifile, &fpos);
    done = false;
    uint64_t count = 0;
    m_buffer_on = 0;
    m_buffer_size = 0;
    do {
      if (m_buffer_on + 188 > m_buffer_size) {
	if (m_buffer_on < m_buffer_size) {
	  memmove(m_buffer, m_buffer + m_buffer_on, 
		  m_buffer_size - m_buffer_on);
	  m_buffer_on = m_buffer_size - m_buffer_on;
	} else {
	  m_buffer_on = 0;
	}
	m_buffer_size = fread(m_buffer + m_buffer_on, 
			      1, 
			      (188 * 10) - m_buffer_on, 
			      m_ifile);

	count += m_buffer_size - m_buffer_on;
	m_buffer_size += m_buffer_on;
	m_buffer_on = 0;
	if (m_buffer_size < 188) {
	  m_buffer_size = 0;
	  done = true;
	}
      }

      pidptr = mpeg2t_process_buffer(m_mpeg2t,
				     m_buffer + m_buffer_on, 
				     m_buffer_size - m_buffer_on, 
				     &buflen_used);
      m_buffer_on += buflen_used;
      if (pidptr != NULL && pidptr->pak_type == MPEG2T_ES_PAK) {
	es_pid = (mpeg2t_es_t *)pidptr;
	// If we have a psts, record it.
	// If it is less than the previous one, we've got a discontinuity, so
	// we can't seek.
	if (es_pid->have_ps_ts || es_pid->have_dts) {
	  ts = es_pid->have_ps_ts ? es_pid->ps_ts : es_pid->dts;
	  if (ts < last_psts) {
	    player_error_message("pid %x psts "U64" is less than prev record point "U64, 
				 es_pid->pid.pid, ts, last_psts);
	    cur = end;
	    is_seekable = false;
	  } else {
#ifdef DEBUG_MPEG2F_SEARCH
	    mpeg2f_message(LOG_DEBUG, "pid %x psts "U64" %d", 
			       pidptr->pid, ts, 
			       es_pid->is_video);
#endif
	    m_file_record.record_point(cur, ts, 0);
	  }
	  done = true;
	}
      }

    } while (done == false && count < perc / 2);
    cur += perc;

  } while (cur < end - (m_buffer_size_max * 2));

  //mpeg2f_message(LOG_DEBUG, "starting end search");
  // Now, we'll go to close to the end of the file, and look for a 
  // final PSTS.  This gives us a rough estimate of the elapsed time
  long seek_offset;
  seek_offset = 0;
  seek_offset -= (m_buffer_size_max) * 2;
  fseek(m_ifile, seek_offset, SEEK_END);
  m_buffer_on = m_buffer_size = 0;
  uint64_t max_psts;
  max_psts = m_start_psts;
  do {
    while (m_buffer_on + 188 <= m_buffer_size) {
      
      pidptr = mpeg2t_process_buffer(m_mpeg2t, 
				     &m_buffer[m_buffer_on],
				     m_buffer_size - m_buffer_on,
				     &buflen_used);
      m_buffer_on += buflen_used;
      if (pidptr != NULL && pidptr->pak_type == MPEG2T_ES_PAK) {
	es_pid = (mpeg2t_es_t *)pidptr;
	if (es_pid->have_ps_ts) {
	  es_pid->have_ps_ts = 0;
	  max_psts = MAX(es_pid->ps_ts, max_psts);
	} else if (es_pid->have_dts) {
	  es_pid->have_dts = 0;
	  max_psts = MAX(es_pid->dts, max_psts);
	}
      }
    }
    if (m_buffer_size > m_buffer_on) {
      memmove(m_buffer, m_buffer + m_buffer_on, m_buffer_size - m_buffer_on);
    }
    m_buffer_on = m_buffer_size - m_buffer_on;
    m_buffer_size = fread(m_buffer + m_buffer_on, 1, 
			  m_buffer_size_max - m_buffer_on, m_ifile);
    m_buffer_size += m_buffer_on;
    m_buffer_on = 0;
    if (m_buffer_size < 188) m_buffer_size = 0;
  } while (m_buffer_size > 188) ;
  m_last_psts = max_psts;
  // Calculate the rough max time; hopefully it will be greater than the
  // initial...
  m_max_time = max_psts;
  m_max_time -= m_start_psts;
  m_max_time /= 90000.0;
#ifdef DEBUG_MPEG2F_SEARCH
  player_debug_message("last psts is "U64" "U64" %g", max_psts,
		       (max_psts - m_start_psts) / TO_U64(90000),
		       m_max_time);
#endif
  mpeg2t_set_loglevel(olddebuglevel);

  if (is_seekable) {
    psptr->session_set_seekable(1);
  }
  m_ts_seeked_in_msec = UINT64_MAX;
  rewind(m_ifile);

  return 0;
}
示例#13
0
static void DumpTrack (MP4FileHandle mp4file, MP4TrackId tid, 
		       bool dump_off, bool dump_rend)
{
  uint32_t numSamples;
  MP4SampleId sid;
  uint8_t *buffer;
  uint32_t max_frame_size;
  uint32_t timescale;
  uint64_t msectime;
  const char *media_data_name;
  uint32_t len_size = 0;
  uint8_t video_type = 0;
  numSamples = MP4GetTrackNumberOfSamples(mp4file, tid);
  max_frame_size = MP4GetTrackMaxSampleSize(mp4file, tid) + 4;
  media_data_name = MP4GetTrackMediaDataName(mp4file, tid);
  if (strcasecmp(media_data_name, "avc1") == 0) {
    MP4GetTrackH264LengthSize(mp4file, tid, &len_size);
  } else if (strcasecmp(media_data_name, "mp4v") == 0) {
    video_type = MP4GetTrackEsdsObjectTypeId(mp4file, tid);
  }
  buffer = (uint8_t *)malloc(max_frame_size);
  if (buffer == NULL) {
    printf("couldn't get buffer\n");
    return;
  }

  timescale = MP4GetTrackTimeScale(mp4file, tid);
  printf("mp4file %s, track %d, samples %d, timescale %d\n", 
	 Mp4FileName, tid, numSamples, timescale);

  for (sid = 1; sid <= numSamples; sid++) {
    MP4Timestamp sampleTime;
    MP4Duration sampleDuration, sampleRenderingOffset;
    bool isSyncSample = FALSE;
    bool ret;
    u_int8_t *temp;
    uint32_t this_frame_size = max_frame_size;
    temp = buffer;
    ret = MP4ReadSample(mp4file, 
			tid,
			sid,
			&temp,
			&this_frame_size,
			&sampleTime,
			&sampleDuration,
			&sampleRenderingOffset,
			&isSyncSample);

    msectime = sampleTime;
    msectime *= TO_U64(1000);
    msectime /= timescale;

    printf("sampleId %6d, size %5u time "U64"("U64")",
	  sid,  MP4GetSampleSize(mp4file, tid, sid), 
	   sampleTime, msectime);
    if (dump_rend) printf(" %6"U64F, sampleRenderingOffset);
    if (strcasecmp(media_data_name, "mp4v") == 0) {
      if (MP4_IS_MPEG4_VIDEO_TYPE(video_type))
	ParseMpeg4(temp, this_frame_size, dump_off);
    } else if (strcasecmp(media_data_name, "avc1") == 0) {
      ParseH264(temp, this_frame_size, len_size, dump_off);
    }
    printf("\n");
  }
}
示例#14
0
static codec_data_t *celp_codec_create (const char *stream_type,
					const char *compressor, 
					int type, 
					int profile, 
					format_list_t *media_fmt,
					audio_info_t *audio,
					const uint8_t *userdata,
					uint32_t userdata_size,
					audio_vft_t *vft,
					void *ifptr)

{


  int i;
  celp_codec_t *celp;
  celp = (celp_codec_t *)malloc(sizeof(celp_codec_t));
  memset(celp, 0, sizeof(celp_codec_t));
  
#if 1 	 
  celp->m_vft = vft;
  celp->m_ifptr = ifptr;
  fmtp_parse_t *fmtp = NULL;

  BsInit(0, 0, 0);
  
  // Start setting up CELP stuff...
  
  celp->m_record_sync_time = 1;
  
  celp->m_celp_inited = 0;
  celp->m_audio_inited = 0;
  //celp->m_temp_buff = (float *)malloc(4096);

  
  // Use media_fmt to indicate that we're streaming.
  if (media_fmt != NULL) {
    // haven't checked for null buffer
    // This is not necessarilly right - it is, for the most part, but
    // we should be reading the fmtp statement, and looking at the config.
    // (like we do below in the userdata section...
    celp->m_freq = media_fmt->rtpmap->clock_rate;
    fmtp = parse_fmtp_for_mpeg4(media_fmt->fmt_param, vft->log_msg);
    if (fmtp != NULL) {
      userdata = fmtp->config_binary;
      userdata_size = fmtp->config_binary_len;
    }
  } else {
    if (audio != NULL) {
      celp->m_freq = audio->freq;
    } else {
      celp->m_freq = 44100;
    }
  }
  //celp->m_chans = 1; // this may be wrong - the isma spec, Appendix A.1.1 of
  // Appendix H says the default is 1 channel...
  //celp->m_output_frame_size = 2048;
  // celp->m_object_type = 8;CELP  AACMAIN;
  mpeg4_audio_config_t audio_config;
  if (userdata != NULL || fmtp != NULL) {
    
    celp_message(LOG_DEBUG, celplib, "config len %d %02x %02x %02x %02x", 
		 userdata_size, userdata[0], userdata[1], userdata[2], 
		 userdata[3]);
    decode_mpeg4_audio_config(userdata, userdata_size, &audio_config, false);
    celp->m_object_type = audio_config.audio_object_type;
    celp->m_freq = audio_config.frequency;
    celp->m_chans = audio_config.channels;
  }


	

  // write 
  BsBitBuffer *bitHeader;
  BsBitStream	*hdrStream;


  bitHeader=BsAllocBuffer(userdata_size * 8);

  //wmay removed
  bitHeader->numBit=userdata_size*8;
  bitHeader->size=userdata_size*8;

  memcpy(bitHeader->data,userdata,userdata_size);


  hdrStream = BsOpenBufferRead(bitHeader);

  BsGetSkip (hdrStream,userdata_size*8-audio_config.codec.celp.NumOfBitsInBuffer);
  BsBitBuffer *bBuffer=BsAllocBuffer(userdata_size*8);
  BsGetBuffer (hdrStream, bBuffer,audio_config.codec.celp.NumOfBitsInBuffer);
  int delayNumSample;


  DecLpcInit(celp->m_chans,celp->m_freq,0,NULL,
	     bBuffer ,&celp->m_output_frame_size,&delayNumSample);


  celp->m_samples_per_frame = celp->m_output_frame_size;
  celp->m_msec_per_frame *= TO_U64(1000);
  celp->m_msec_per_frame /= celp->m_freq;
  celp->m_last=userdata_size;
	
  BsFreeBuffer (bitHeader);

  BsFreeBuffer (bBuffer);

	
  celp->m_sampleBuf=(float**)malloc(celp->m_chans*sizeof(float*));
  for(i=0;i<celp->m_chans;i++)
    // wmay - added 2 times - return for frame size was samples, not bytes
    celp->m_sampleBuf[i]=(float*)malloc(2*celp->m_output_frame_size*sizeof(float));

  celp->m_bufs = 
    (uint16_t *)malloc(sizeof(uint16_t) * 2 * celp->m_chans * celp->m_output_frame_size);
  //celp->audiFile = AudioOpenWrite("out1.au",".au",
  //		  celp->m_chans,celp->m_freq);




  celp_message(LOG_INFO, celplib,"CELP object type is %d", celp->m_object_type);
  //celp_message(LOG_INFO, celplib,"CELP channel are %d", celp->m_chans );
  celp_message(LOG_INFO, celplib, "Setting freq to %d", celp->m_freq);
  celp_message(LOG_INFO, celplib, "output frame size is %d", celp->m_output_frame_size);
  
 


#if DUMP_OUTPUT_TO_FILE
  celp->m_outfile = fopen("temp.raw", "w");
#endif
  if (fmtp != NULL) {
    free_fmtp_parse(fmtp);
  }
#endif	 
  celp->m_vft->audio_configure(celp->m_ifptr,
			       celp->m_freq, 
			       celp->m_chans, 
			       AUDIO_FMT_S16,
			       celp->m_output_frame_size);

  return (codec_data_t *)celp;


}
int CMpeg2fVideoByteStream::get_timestamp_for_frame (mpeg2t_frame_t *fptr,
						     frame_timestamp_t *ts)

{
#ifdef DEBUG_MPEG2T_PSTS
  if (fptr->have_dts) {
    player_debug_message("video frame len %d have  dts %d ts "U64,
			 fptr->frame_len, fptr->have_dts, fptr->dts);
  } if (fptr->have_ps_ts) {
    player_debug_message("video frame len %d have psts %d ts "U64, 
			 fptr->frame_len, fptr->have_ps_ts, fptr->ps_ts);
  }
#endif
#if 0
  if (m_es_pid->stream_type == MPEG2T_STREAM_H264) {
    if (fptr->have_dts || fptr->have_ps_ts) {
      if (fptr->have_dts)
	outts = fptr->dts;
      else
	outts = fptr->ps_ts;
      outts *= TO_U64(1000);
      outts /= TO_U64(90000); // get msec from 90000 timescale
      return 0;
    }
    return -1;
  }
  uint64_t ts;
  //  m_es_pid->frame_rate = 24;
  double value = 90000.0 / m_es_pid->frame_rate;
  uint64_t frame_time = (uint64_t)value;
  if (fptr->have_ps_ts == 0 && fptr->have_dts == 0) {
    // We don't have a timestamp on this - just increment from
    // the previous timestamp.
    if (m_timestamp_loaded == 0) return -1;
    if (m_es_pid->info_loaded == 0) return -1;

    outts = m_prev_ts + frame_time;
    m_have_prev_frame_type = 1;
    m_prev_frame_type = fptr->frame_type;
    m_prev_ts = outts;
    outts *= TO_U64(1000);
    outts /= TO_U64(90000); // get msec from 90000 timescale
    return 0;
  }
  m_timestamp_loaded = 1;
  if (fptr->have_dts != 0) {
    outts = fptr->dts;
  } else {
    ts = fptr->ps_ts;

    if (m_have_prev_frame_type) {
      if (fptr->frame_type == 3) {
	// B frame
	outts = ts;
      } else {
	outts = m_prev_ts + frame_time;
      }
    } else {
      if (fptr->frame_type == 1) {
	uint16_t temp_ref = MP4AV_Mpeg3PictHdrTempRef(fptr->frame + fptr->pict_header_offset);
	ts -= ((temp_ref + 1) * m_es_pid->tick_per_frame);
	outts = ts;
      } else {
	player_error_message( "no psts and no prev frame");
	outts = ts;
      }
    }
  }

  m_have_prev_frame_type = 1;
  m_prev_frame_type = fptr->frame_type;
  m_prev_ts = outts;

  outts *= TO_U64(1000);
  outts /= TO_U64(90000); // get msec from 90000 timescale
  
  return 0;
#endif
  ts->timestamp_is_pts = fptr->have_dts == false;
  uint64_t outts;
  if (fptr->have_dts)
    outts = fptr->dts;
  else
    outts = fptr->ps_ts;
  outts *= TO_U64(1000);
  outts /= TO_U64(90000); // get msec from 90000 timescale
  ts->msec_timestamp = outts;
  return 0;
}
示例#16
0
文件: mfrafix.c 项目: nevil/mfrafix
int main(int argc, char* argv[])
{
    int retval = 1;
    int fdout = -1;
    uint8_t* pIn  = NULL;
    uint8_t* pOut = NULL;

    if (argc < 3 || argc > 3)
    {
        fprintf(stderr, "Usage: mfrafix infile outfile\n");
        return 1;
    }

    int fdin = open(argv[1], O_RDONLY);
    if (fdin < 0)
    {
        fprintf(stderr, "Error: failed to open '%s' for reading\n", argv[1]);
        goto cleanup;
    }

    off_t length = lseek(fdin, 0, SEEK_END);

    lseek(fdin, 0, SEEK_SET);
    debug("Length of in file: %d\n", (int)length);

    pIn = mmap(NULL, length, PROT_READ, MAP_SHARED, fdin, 0);
    if (pIn == NULL)
    {
        fprintf(stderr, "Error: mmap failed with error %d - '%s'\n", errno, strerror(errno));
        goto cleanup;
    }

    // Look for the mfra by checking the last four bytes in the mfro
    // For now we assume that the mfra is at the end of the file...
    uint32_t mfro = U32_AT(pIn + length - sizeof(uint32_t));
    uint8_t* pMFRA = pIn + length - mfro;
    uint32_t mfraSize = U32_AT(pMFRA);
    uint8_t* pMFRAEnd = pMFRA + mfraSize;
    SKIP(pMFRA, 4); // Size processed

    debug("mfro offset %x\n", mfro);
    debug("MFRA size %x\n", mfraSize);
    debug("MFRA box '%08x'\n", U32_AT(pMFRA));

    if (FOURCC('m', 'f', 'r', 'a') != U32_AT(pMFRA))
    {
        fprintf(stderr, "Error: The mfra box was not found\n");
        goto cleanup;
    }
    SKIP(pMFRA, 4); // Type processed

    debug("Found mfra\n");

    TFRAEntry* tfraEntries = NULL;
    int numTFRAEntries = 0;

    while (pMFRA < pMFRAEnd)
    {
        uint32_t boxSize = U32_AT(pMFRA);
        SKIP(pMFRA, 4); // Size read

        debug("Box size %x\n", boxSize);
        debug("Box type '%08x'\n", U32_AT(pMFRA));
        switch (U32_AT(pMFRA))
        {
            case FOURCC('t', 'f', 'r', 'a'):
                debug("Found tfra\n");
                SKIP(pMFRA, 4); // Type processed
                SKIP(pMFRA, 4); // Ignore version and flags

                uint32_t trackId = U32_AT(pMFRA);
                SKIP(pMFRA, 4); // trackId processed

                uint32_t tmp = U32LE_AT(pMFRA);
                int trafNum   = (tmp >> 4) & 0x3;
                int trunNum   = (tmp >> 2) & 0x3;
                int sampleNum = (tmp >> 0) & 0x3;
                SKIP(pMFRA, 4); // Traf, trun and samples processed
                debug("trackId %d\ntraf %d trun %d sample %d\n", trackId, trafNum, trunNum, sampleNum);

                uint32_t numEntry = U32_AT(pMFRA);
                SKIP(pMFRA, 4); // numEntry processed
                debug("numEntry: %d\n", numEntry);

                tfraEntries = realloc(tfraEntries, sizeof(TFRAEntry) * (numEntry + numTFRAEntries));
                if (tfraEntries == NULL)
                {
                    fprintf(stderr, "Error: Failed to allocate memory for tfra entries\n");
                    goto cleanup;
                }

                for (int i = numTFRAEntries; i < (numEntry + numTFRAEntries); ++i)
                {
                    tfraEntries[i].trackId = trackId;

                    debug("time: %lx\n", U64_AT(pMFRA));
                    tfraEntries[i].time = U64_AT(pMFRA);
                    SKIP(pMFRA, 8); // Skip uint64, time

                    debug("moof: %lx\n", U64_AT(pMFRA));
                    tfraEntries[i].moof = U64_AT(pMFRA);
                    tfraEntries[i].moofOffset = (pMFRA - pIn);
                    SKIP(pMFRA, 8); // Skip uint64, moof offset

                    int skip = trafNum + 1 + trunNum + 1 + sampleNum + 1;
                    pMFRA = pMFRA + skip;
                }

                // Update here as we use numTFRAEntries as offset when indexing the array
                numTFRAEntries += numEntry; 

                break;
            case FOURCC('m', 'f', 'r', 'o'):
                debug("found mfro box\n");
                pMFRA = pMFRA + boxSize - sizeof(uint32_t);
                break;

            default:
                fprintf(stderr, "Error: Unknown box found '%08x'\n", U32_AT(pMFRA));
                goto cleanup;
        }

    }

    if (numTFRAEntries == 0)
    {
        fprintf(stderr, "Error: No TFRA entries found.\n");
        goto cleanup;
    }

    // Start by checking if the first entry points to a moof
    uint8_t* p1 = pIn + tfraEntries[0].moof;
    if (FOURCC('m', 'o', 'o', 'f') != U32_AT(p1 + 4))
    {
        uint32_t offset = U32_AT(p1);
        fprintf(stdout, "We found '%08x' instead of moof, try to correct with offset %x\n", U32_AT(p1 + 4), offset);

        for (int i = 0; i < numTFRAEntries; ++i)
        {
            debug("p = %x\n", tfraEntries[i].moof);
            uint8_t *p = pIn + tfraEntries[i].moof + offset;

            if (U32_AT(p + 4) == FOURCC('m', 'o', 'o', 'f'))
                debug("Found moof\n");
            else
            {
                debug("Found '%08x'\n", U32_AT(p + 4));
                goto cleanup;
            }
        }

        debug("Open output file\n");

        fdout = open(argv[2], O_RDWR | O_CREAT | O_TRUNC);
        if (fdout < 0)
        {
            fprintf(stderr, "Error: failed to open '%s' for writing\n", argv[2]);
            goto cleanup;
        }

        debug("Set size of out file\n");

        lseek(fdout, length - 1, SEEK_SET);
        write(fdout, "", 1);

        debug("Call mmap for output file\n");

        pOut = mmap(NULL, length, PROT_WRITE | PROT_READ, MAP_SHARED, fdout, 0);
        if (pOut == NULL)
        {
            fprintf(stderr, "Error: mmap failed with error %d - '%s'\n", errno, strerror(errno));
            goto cleanup;
        }

        debug("Do memcpy\n");
        memcpy(pOut, pIn, length);

        debug("Update moof references\n");
        for (int i = 0; i < numTFRAEntries; ++i)
        {
            debug("p = %x\n", tfraEntries[i].moof);
            debug("offset = %x\n", offset);
            debug("out = %x\n", tfraEntries[i].moof + offset);
            uint64_t *p = (uint64_t*)(pOut + tfraEntries[i].moofOffset);
            (*p) = TO_U64(tfraEntries[i].moof + offset);
        }

        debug("Do msync\n");
        msync(pOut, length, MS_SYNC);
        fprintf(stdout, "Fixed all entries...\n");
    }
    else
    {
        fprintf(stdout, "The first moof reference is correct, assume the rest are as well and exit\n");
    }

    retval = 0;

cleanup:
    if (tfraEntries != NULL)
        free(tfraEntries);

    if (fdout >= 0)
        close(fdout);

    if (pOut != NULL)
        munmap(pOut, length);

    if (pIn != NULL)
        munmap(pIn, length);

    if (fdin >= 0)
        close(fdin);

    return retval;
}
/*
 * Main decode thread.
 */
int CPlayerMedia::decode_thread (void) 
{
  //  uint32_t msec_per_frame = 0;
  int ret = 0;
  int thread_stop = 0, decoding = 0;
  uint32_t decode_skipped_frames = 0;
  frame_timestamp_t ourtime, lasttime;
      // Tell bytestream we're starting the next frame - they'll give us
      // the time.
  uint8_t *frame_buffer;
  uint32_t frame_len;
  void *ud = NULL;
  
  uint32_t frames_decoded;
  uint64_t start_decode_time = 0;
  uint64_t last_decode_time = 0;
  bool have_start_time = false;
  bool have_frame_ts = false;
  bool found_out_of_range_ts = false;
  uint64_t bytes_decoded;

  lasttime.msec_timestamp = 0;
  frames_decoded = 0;
  bytes_decoded = 0;


  while (thread_stop == 0) {
    // waiting here for decoding or thread stop
    ret = SDL_SemWait(m_decode_thread_sem);
#ifdef DEBUG_DECODE
    media_message(LOG_DEBUG, "%s Decode thread awake",
		  get_name());
#endif
    parse_decode_message(thread_stop, decoding);

    if (decoding == 1) {
      // We've been told to start decoding - if we don't have a codec, 
      // create one
      m_sync->set_wait_sem(m_decode_thread_sem);
      if (m_plugin == NULL) {
	switch (m_sync_type) {
	case VIDEO_SYNC:
	  create_video_plugin(NULL, 
			      STREAM_TYPE_RTP,
			      NULL,
			      -1,
			      -1,
			      m_media_fmt,
			      NULL,
			      m_user_data,
			      m_user_data_size);
	  break;
	case AUDIO_SYNC:
	  create_audio_plugin(NULL,
			      STREAM_TYPE_RTP,
			      NULL,
			      -1, 
			      -1, 
			      m_media_fmt,
			      NULL,
			      m_user_data,
			      m_user_data_size);
	  break;
	case TIMED_TEXT_SYNC:
	  create_text_plugin(NULL,
			     STREAM_TYPE_RTP, 
			     NULL, 
			     m_media_fmt, 
			     m_user_data, 
			     m_user_data_size);
	  break;
	}
	if (m_plugin_data == NULL) {
	  m_plugin = NULL;
	} else {
	  media_message(LOG_DEBUG, "Starting %s codec from decode thread",
			m_plugin->c_name);
	}
      }
      if (m_plugin != NULL) {
	m_plugin->c_do_pause(m_plugin_data);
      } else {
	while (thread_stop == 0 && decoding) {
	  SDL_Delay(100);
	  if (m_rtp_byte_stream) {
	    m_rtp_byte_stream->flush_rtp_packets();
	  }
	  parse_decode_message(thread_stop, decoding);
	}
      }
    }
    /*
     * this is our main decode loop
     */
#ifdef DEBUG_DECODE
    media_message(LOG_DEBUG, "%s Into decode loop", get_name());
#endif
    while ((thread_stop == 0) && decoding) {
      parse_decode_message(thread_stop, decoding);
      if (thread_stop != 0)
	continue;
      if (decoding == 0) {
	m_plugin->c_do_pause(m_plugin_data);
	have_frame_ts = false;
	continue;
      }
      if (m_byte_stream->eof()) {
	media_message(LOG_INFO, "%s hit eof", get_name());
	if (m_sync) m_sync->set_eof();
	decoding = 0;
	continue;
      }
      if (m_byte_stream->have_frame() == false) {
	// Indicate that we're waiting, and wait for a message from RTP
	// task.
	wait_on_bytestream();
	continue;
      }

      frame_buffer = NULL;
      bool have_frame;
      memset(&ourtime, 0, sizeof(ourtime));
      have_frame = m_byte_stream->start_next_frame(&frame_buffer, 
						   &frame_len,
						   &ourtime,
						   &ud);
      if (have_frame == false) continue;
      /*
       * If we're decoding video, see if we're playing - if so, check
       * if we've fallen significantly behind the audio
       */
      if (get_sync_type() == VIDEO_SYNC &&
	  (m_parent->get_session_state() == SESSION_PLAYING) &&
	  have_frame_ts) {
	int64_t ts_diff = ourtime.msec_timestamp - lasttime.msec_timestamp;

	if (ts_diff > TO_D64(1000) ||
	    ts_diff < TO_D64(-1000)) {
	  // out of range timestamp - we'll want to not skip here
	  found_out_of_range_ts = true;
	  media_message(LOG_INFO, "found out of range ts "U64" last "U64" "D64,
			ourtime.msec_timestamp, 
			lasttime.msec_timestamp,
			ts_diff);
	} else {
	  uint64_t current_time = m_parent->get_playing_time();
	  if (found_out_of_range_ts) {
	    ts_diff = current_time - ourtime.msec_timestamp;
	    if (ts_diff > TO_D64(0) && ts_diff < TO_D64(5000)) {
	      found_out_of_range_ts = false;
	      media_message(LOG_INFO, 
			    "ts back in playing range "U64" "D64,
			    ourtime.msec_timestamp, ts_diff);
	    }
	  } else {
	    // regular time
	    if (current_time >= ourtime.msec_timestamp) {
	      media_message(LOG_INFO, 
			    "Candidate for skip decode "U64" our "U64, 
			    current_time, ourtime.msec_timestamp);
	      // If the bytestream can skip ahead, let's do so
	      if (m_byte_stream->can_skip_frame() != 0) {
		int ret;
		int hassync;
		int count;
		current_time += 200; 
		count = 0;
		// Skip up to the current time + 200 msec
		ud = NULL;
		do {
		  if (ud != NULL) free(ud);
		  frame_buffer = NULL;
		  ret = m_byte_stream->skip_next_frame(&ourtime, 
						       &hassync,
						       &frame_buffer, 
						       &frame_len,
						       &ud);
		  decode_skipped_frames++;
		} while (ret != 0 &&
			 !m_byte_stream->eof() && 
			 current_time > ourtime.msec_timestamp);
		if (m_byte_stream->eof() || ret == 0) continue;
		media_message(LOG_INFO, "Skipped ahead "U64 " to "U64, 
			      current_time - 200, ourtime.msec_timestamp);
		/*
		 * Ooh - fun - try to match to the next sync value - if not, 
		 * 15 frames
		 */
		do {
		  if (ud != NULL) free(ud);
		  ret = m_byte_stream->skip_next_frame(&ourtime, 
						       &hassync,
						       &frame_buffer, 
						       &frame_len,
						       &ud);
		  if (hassync < 0) {
		    uint64_t diff = ourtime.msec_timestamp - current_time;
		    if (diff > TO_U64(200)) {
		      hassync = 1;
		    }
		  }
		  decode_skipped_frames++;
		  count++;
		} while (ret != 0 &&
			 hassync <= 0 &&
			 count < 30 &&
			 !m_byte_stream->eof());
		if (m_byte_stream->eof() || ret == 0) continue;
#ifdef DEBUG_DECODE
		media_message(LOG_INFO, 
			      "Matched ahead - count %d, sync %d time "U64,
			      count, hassync, ourtime.msec_timestamp);
#endif
	      }
	    }
	  } // end regular time
	}
      }
      lasttime = ourtime;
      have_frame_ts = true;
#ifdef DEBUG_DECODE
      media_message(LOG_DEBUG, "Decoding %s frame " U64, get_name(),
		    ourtime.msec_timestamp);
#endif
      if (frame_buffer != NULL && frame_len != 0) {
	int sync_frame;
	ret = m_plugin->c_decode_frame(m_plugin_data,
				       &ourtime,
				       m_streaming,
				       &sync_frame,
				       frame_buffer, 
				       frame_len,
				       ud);
#ifdef DEBUG_DECODE
	media_message(LOG_DEBUG, "Decoding %s frame return %d", 
		      get_name(), ret);
#endif
	if (ret > 0) {
	  frames_decoded++;
	  if (have_start_time == false) {
	    have_start_time = true;
	    start_decode_time = ourtime.msec_timestamp;
	  }
	  last_decode_time = ourtime.msec_timestamp;
	  m_byte_stream->used_bytes_for_frame(ret);
	  bytes_decoded += ret;
	} else {
	  m_byte_stream->used_bytes_for_frame(frame_len);
	}

      }
    }
    // calculate frame rate for session
  }
  if (is_audio() == false)
    media_message(LOG_NOTICE, "Video decoder skipped %u frames", 
		  decode_skipped_frames);
  if (last_decode_time > start_decode_time) {
    double fps, bps;
    double secs;
    uint64_t total_time = last_decode_time - start_decode_time;
    secs = UINT64_TO_DOUBLE(total_time);
    secs /= 1000.0;
#if 0
    media_message(LOG_DEBUG, "last time "U64" first "U64, 
		  last_decode_time, start_decode_time);
#endif
    fps = frames_decoded;
    fps /= secs;
    bps = UINT64_TO_DOUBLE(bytes_decoded);
    bps *= 8.0 / secs;
    media_message(LOG_NOTICE, "%s - bytes "U64", seconds %g, fps %g bps %g",
		  get_name(),
		  bytes_decoded, secs, 
		  fps, bps);
  }
  if (m_plugin) {
    m_plugin->c_close(m_plugin_data);
    m_plugin_data = NULL;
  }
  return (0);
}