예제 #1
0
파일: tv.c 프로젝트: basinilya/mplayer
static int demux_tv_fill_buffer(demuxer_t *demux, demux_stream_t *ds)
{
    tvi_handle_t *tvh=(tvi_handle_t*)(demux->priv);
    demux_packet_t* dp;
    unsigned int len=0;

    /* ================== ADD AUDIO PACKET =================== */

    if (ds==demux->audio && tvh->tv_param->noaudio == 0 &&
            tvh->functions->control(tvh->priv,
                                    TVI_CONTROL_IS_AUDIO, 0) == TVI_CONTROL_TRUE)
    {
        len = tvh->functions->get_audio_framesize(tvh->priv);

        dp=new_demux_packet(len);
        dp->flags|=1; /* Keyframe */
        dp->pts=tvh->functions->grab_audio_frame(tvh->priv, dp->buffer,len);
        ds_add_packet(demux->audio,dp);
    }

    /* ================== ADD VIDEO PACKET =================== */

    if (ds==demux->video && tvh->functions->control(tvh->priv,
            TVI_CONTROL_IS_VIDEO, 0) == TVI_CONTROL_TRUE)
    {
        len = tvh->functions->get_video_framesize(tvh->priv);
        dp=new_demux_packet(len);
        dp->flags|=1; /* Keyframe */
        dp->pts=tvh->functions->grab_video_frame(tvh->priv, dp->buffer, len);
        ds_add_packet(demux->video,dp);
    }

    if (tvh->tv_param->scan) tv_scan(tvh);
    return 1;
}
예제 #2
0
static int demux_dshow_fill_buffer(demuxer_t *demuxer, demux_stream_t *ds)
{
	demux_packet_t *dp;
	sh_video_t *sh_video;
	float new_ftime, new_fps, diff;

	if(b_stopped) return 0;

    /* Video */
    if (ds == demuxer->video) {
		if(b_vfinished) return 0;
		if(WaitForSingleObject(hWaitVDec, 5000) == WAIT_TIMEOUT)
			return 0;
		if (!g_vpkg.pData || g_vpkg.iLen < 1) {
			SetEvent(hWaitVSend);
			return 0;
		}
		sh_video = demuxer->video->sh;
        dp = new_demux_packet(g_vpkg.iLen);
        dp->pts = (float)(g_vpkg.i_pts / 1E7);
        memcpy(dp->buffer, g_vpkg.pData, g_vpkg.iLen);
		SetEvent(hWaitVSend);
		new_ftime = dp->pts - last_vpts;
		if(last_vpts > 0 && new_ftime > 0) {
			new_fps = 1.0/ new_ftime;
			diff = new_fps - sh_video->fps;
			if(diff > 0.9 || diff < -0.9) {
				sh_video->frametime = new_ftime;
				sh_video->fps = new_fps;
			}
		}
		if(last_vpts < 0)
			last_vpts = 0;
		else
			stream_pos = last_vpts = dp->pts;
        ds_add_packet(ds, dp);
    }

    /* Audio */
	if (ds == demuxer->audio) {
		if(b_afinished) return 0;
		if(WaitForSingleObject(hWaitADec, wait_time) == WAIT_TIMEOUT)
			return 0;
		if (!g_apkg.pData || g_apkg.iLen < 1) {
			SetEvent(hWaitASend);
			return 0;
		}
        dp = new_demux_packet(g_apkg.iLen);
        dp->pts = (float)(g_apkg.i_pts / 1E7);
        memcpy(dp->buffer, g_apkg.pData, g_apkg.iLen);
		SetEvent(hWaitASend);
		if(!VideoInfo.haveVideo)
			stream_pos = dp->pts;
        ds_add_packet(ds, dp);
	}

    return 1;
}
예제 #3
0
파일: demux_tv.c 프로젝트: chyiz/mpv
static int demux_tv_fill_buffer(demuxer_t *demux)
{
    tvi_handle_t *tvh=(tvi_handle_t*)(demux->priv);
    demux_packet_t* dp;
    unsigned int len=0;
    struct sh_stream *want_audio = NULL, *want_video = NULL;

    int num_streams = demux_get_num_stream(demux);
    for (int n = 0; n < num_streams; n++) {
        struct sh_stream *sh = demux_get_stream(demux, n);
        if (!demux_has_packet(sh) && demux_stream_is_selected(sh)) {
            if (sh->type == STREAM_AUDIO)
                want_audio = sh;
            if (sh->type == STREAM_VIDEO)
                want_video = sh;
        }
    }

    /* ================== ADD AUDIO PACKET =================== */

    if (want_audio && tvh->tv_param->audio &&
        tvh->functions->control(tvh->priv,
                                TVI_CONTROL_IS_AUDIO, 0) == TVI_CONTROL_TRUE)
    {
        len = tvh->functions->get_audio_framesize(tvh->priv);

        dp=new_demux_packet(len);
        if (dp) {
            dp->keyframe = true;
            dp->pts=tvh->functions->grab_audio_frame(tvh->priv, dp->buffer,len);
            demux_add_packet(want_audio, dp);
        }
    }

    /* ================== ADD VIDEO PACKET =================== */

    if (want_video && tvh->functions->control(tvh->priv,
                            TVI_CONTROL_IS_VIDEO, 0) == TVI_CONTROL_TRUE)
    {
        len = tvh->functions->get_video_framesize(tvh->priv);
        dp=new_demux_packet(len);
        if (dp) {
            dp->keyframe = true;
            dp->pts=tvh->functions->grab_video_frame(tvh->priv, dp->buffer, len);
            demux_add_packet(want_video, dp);
        }
    }

    if (tvh->tv_param->scan) tv_scan(tvh);
    return 1;
}
예제 #4
0
int demux_rtp_fill_buffer(demuxer_t* demuxer, demux_stream_t* ds)
{
    Nemesi_DemuxerStreamData * ndsd = demuxer->priv;
    Nemesi_SessionType stype;
    rtsp_ctrl * ctl = ndsd->rtsp;
    rtp_thread * rtp_th = rtsp_get_rtp_th(ctl);
    rtp_frame fr;

    demux_packet_t* dp;

    if ( (!ctl->rtsp_queue) || (demuxer->stream->eof) || (rtp_fill_buffers(rtp_th)) ) {
        mp_msg(MSGT_DEMUX, MSGL_INFO, "End of Stream...\n");
        demuxer->stream->eof = 1;
        return 0;
    }

    if (ds == demuxer->video)
        stype = NEMESI_SESSION_VIDEO;
    else if (ds == demuxer->audio)
        stype = NEMESI_SESSION_AUDIO;
    else
        return 0;

    if(!get_data_for_session(ndsd, stype, &fr)) {
        dp = new_demux_packet(fr.len);
        memcpy(dp->buffer, fr.data, fr.len);
        fr.time_sec += ndsd->seek;
        ndsd->time[stype] = dp->pts = fr.time_sec;
        ds_add_packet(ds, dp);
    }
    else {
        stype = (stype + 1) % 2;
        if (stype == NEMESI_SESSION_VIDEO)
            ds = demuxer->video;
        else
            ds = demuxer->audio;

        if(!get_data_for_session(ndsd, stype, &fr)) {
            dp = new_demux_packet(fr.len);
            memcpy(dp->buffer, fr.data, fr.len);
            fr.time_sec += ndsd->seek;
            ndsd->time[stype] = dp->pts = fr.time_sec;
            ds_add_packet(ds, dp);
        }
    }

    return 1;
}
예제 #5
0
파일: demux_mf.c 프로젝트: CrimsonVoid/mpv
// return value:
//     0 = EOF or no stream found
//     1 = successfully read a packet
static int demux_mf_fill_buffer(demuxer_t *demuxer)
{
    mf_t *mf = demuxer->priv;
    if (mf->curr_frame >= mf->nr_of_files)
        return 0;

    struct stream *entry_stream = NULL;
    if (mf->streams)
        entry_stream = mf->streams[mf->curr_frame];
    struct stream *stream = entry_stream;
    if (!stream) {
        char *filename = mf->names[mf->curr_frame];
        if (filename)
            stream = stream_open(filename, demuxer->opts);
    }

    if (stream) {
        stream_seek(stream, 0);
        bstr data = stream_read_complete(stream, NULL, MF_MAX_FILE_SIZE);
        if (data.len) {
            demux_packet_t *dp = new_demux_packet(data.len);
            memcpy(dp->buffer, data.start, data.len);
            dp->pts = mf->curr_frame / mf->sh->fps;
            dp->keyframe = true;
            demuxer_add_packet(demuxer, demuxer->streams[0], dp);
        }
        talloc_free(data.start);
    }

    if (stream && stream != entry_stream)
        free_stream(stream);

    mf->curr_frame++;
    return 1;
}
예제 #6
0
// return value:
//     0 = EOF or no stream found
//     1 = successfully read a packet
static int demux_mf_fill_buffer(demuxer_t *demuxer, demux_stream_t *ds) {
    mf_t         * mf;
    struct stat    fs;
    FILE         * f;

    mf=(mf_t*)demuxer->priv;
    if ( mf->curr_frame >= mf->nr_of_files ) return 0;

    stat( mf->names[mf->curr_frame],&fs );
//  printf( "[demux_mf] frame: %d (%s,%d)\n",mf->curr_frame,mf->names[mf->curr_frame],fs.st_size );

    if ( !( f=fopen( mf->names[mf->curr_frame],"rb" ) ) ) return 0;
    {
        sh_video_t     * sh_video = demuxer->video->sh;
        demux_packet_t * dp = new_demux_packet( fs.st_size );
        if ( !fread( dp->buffer,fs.st_size,1,f ) ) return 0;
        dp->pts=mf->curr_frame / sh_video->fps;
        dp->pos=mf->curr_frame;
        dp->flags=0;
        // append packet to DS stream:
        ds_add_packet( demuxer->video,dp );
    }
    fclose( f );

    demuxer->filepos=mf->curr_frame++;
    return 1;
}
예제 #7
0
static int demux_xmms_fill_buffer(demuxer_t* demuxer, demux_stream_t *ds) {
    sh_audio_t *sh_audio = demuxer->audio->sh;
    xmms_priv_t *priv=demuxer->priv;
    demux_packet_t*  dp;

    if (xmms_length<=0) demuxer->seekable=0;
    else demuxer->seekable=1;

    while (xmms_audiopos<XMMS_PACKETSIZE/2) {
        if((priv->ip->get_time()<0) || !xmms_playing)
            return 0;
        usleep(1000);
    }

    pthread_mutex_lock(&xmms_mutex);
    dp = new_demux_packet(XMMS_PACKETSIZE/2);
    dp->pts = priv->spos / sh_audio->wf->nAvgBytesPerSec;
    ds->pos = priv->spos;

    memcpy(dp->buffer,xmms_audiobuffer,XMMS_PACKETSIZE/2);
    memcpy(xmms_audiobuffer,&xmms_audiobuffer[XMMS_PACKETSIZE/2],
                                            xmms_audiopos-XMMS_PACKETSIZE/2);
    xmms_audiopos-=XMMS_PACKETSIZE/2;
    pthread_mutex_unlock(&xmms_mutex);

    ds_add_packet(ds,dp);

    return 1;
}
예제 #8
0
// return value:
//     0 = EOF or no stream found
//     1 = successfully read a packet
static int demux_rawdv_fill_buffer(demuxer_t *demuxer, demux_stream_t *ds)
{
   rawdv_frames_t *frames = (rawdv_frames_t *)demuxer->priv;
   demux_packet_t* dp_video=NULL;
   sh_video_t *sh_video = demuxer->video->sh;
   int bytes_read=0;
//   fprintf(stderr,"demux_rawdv_fill_buffer() seek to %qu, size: %d\n",frames->current_filepos,frames->frame_size);
   // fetch the frame from the file
   // first, position the file properly since ds_read_packet() doesn't
   // seem to do it, even though it takes a file offset as a parameter
   stream_seek(demuxer->stream, frames->current_filepos);

   dp_video=new_demux_packet(frames->frame_size);
   bytes_read=stream_read(demuxer->stream,dp_video->buffer,frames->frame_size);
   if (bytes_read<frames->frame_size)
      return 0;
   dp_video->pts=frames->current_frame/sh_video->fps;
   dp_video->pos=frames->current_filepos;
   dp_video->flags=0;

   if (demuxer->audio && demuxer->audio->id>=-1)
   {
      demux_packet_t* dp_audio=clone_demux_packet(dp_video);
      ds_add_packet(demuxer->audio,dp_audio);
   }
   ds_add_packet(demuxer->video,dp_video);
   // get the next frame ready
   frames->current_filepos+=frames->frame_size;
   frames->current_frame++;
//   fprintf(stderr," audio->packs: %d , video->packs: %d \n",demuxer->audio->packs, demuxer->video->packs);
   return 1;
}
예제 #9
0
static int demux_ppbox_fill_buffer(demuxer_t* demuxer, demux_stream_t *ds) {
  ppbox_demux_priv_t *priv = (ppbox_demux_priv_t *) demuxer->priv;
  PPBOX_SampleEx2 sample;
  PP_int32 ret;

  while (1) {
    ret = PPBOX_ReadSampleEx2()(&sample);
    if (ret == ppbox_success) {
      demux_packet_t *dp = new_demux_packet(sample.buffer_length);
      memcpy(dp->buffer, sample.buffer, sample.buffer_length);
      dp->pts = (double)(sample.start_time + sample.composite_time_delta) * 0.000001;
      ds_add_packet(priv->streams[sample.stream_index], dp);
      if (priv->streams[sample.stream_index] == ds)
        return 1;
    } else if (ret == ppbox_would_block) {
        if (mp_input_check_interrupt(100))
          return 0;
    } else if (ret == ppbox_stream_end) {
      demuxer->stream->eof = 1;
      return 0;
    } else {
      return 0;
    }
  };
  return 0;
}
예제 #10
0
static int demux_lavf_fill_buffer(demuxer_t *demux, demux_stream_t *dsds){
    lavf_priv_t *priv= demux->priv;
    AVPacket pkt;
    demux_packet_t *dp;
    demux_stream_t *ds;
    int id;
    mp_msg(MSGT_DEMUX,MSGL_DBG2,"demux_lavf_fill_buffer()\n");

    demux->filepos=stream_tell(demux->stream);

    if(av_read_frame(priv->avfc, &pkt) < 0)
        return 0;
        
    id= pkt.stream_index;

    if(id==demux->audio->id){
        // audio
        ds=demux->audio;
        if(!ds->sh){
            ds->sh=demux->a_streams[id];
            mp_msg(MSGT_DEMUX,MSGL_V,"Auto-selected LAVF audio ID = %d\n",ds->id);
        }
    } else if(id==demux->video->id){
        // video
        ds=demux->video;
        if(!ds->sh){
            ds->sh=demux->v_streams[id];
            mp_msg(MSGT_DEMUX,MSGL_V,"Auto-selected LAVF video ID = %d\n",ds->id);
        }
    } else {
        av_free_packet(&pkt);
        return 1;
    }
        
    if(0/*pkt.destruct == av_destruct_packet*/){
        //ok kids, dont try this at home :)
        dp=malloc(sizeof(demux_packet_t));
        dp->len=pkt.size;
        dp->next=NULL;
        dp->refcount=1;
        dp->master=NULL;
        dp->buffer=pkt.data;
        pkt.destruct= NULL;
    }else{
        dp=new_demux_packet(pkt.size);
        memcpy(dp->buffer, pkt.data, pkt.size);
        av_free_packet(&pkt);
    }

    if(pkt.pts != AV_NOPTS_VALUE){
        dp->pts=pkt.pts * av_q2d(priv->avfc->streams[id]->time_base);
        priv->last_pts= dp->pts * AV_TIME_BASE;
    }
    dp->pos=demux->filepos;
    dp->flags= !!(pkt.flags&PKT_FLAG_KEY);
    // append packet to DS stream:
    ds_add_packet(ds,dp);
    return 1;
}
예제 #11
0
// return value:
//     0 = EOF or no stream found
//     1 = successfully read a packet
static int demux_y4m_fill_buffer(demuxer_t *demux, demux_stream_t *dsds) {
  demux_stream_t *ds=demux->video;
  demux_packet_t *dp;
  y4m_priv_t *priv=demux->priv;
  y4m_frame_info_t fi;
  unsigned char *buf[3];
  int err, size;

  y4m_init_frame_info(&fi);

  demux->filepos=stream_tell(demux->stream);

  size = ((sh_video_t*)ds->sh)->disp_w*((sh_video_t*)ds->sh)->disp_h;

  dp = new_demux_packet(3*size/2);

  /* swap U and V components */
  buf[0] = dp->buffer;
  buf[1] = dp->buffer + 5*size/4;
  buf[2] = dp->buffer + size;

  if (priv->is_older)
  {
    int c;
    
    c = stream_read_char(demux->stream); /* F */
    if (c == -256)
	return 0; /* EOF */
    if (c != 'F')
    {
	mp_msg(MSGT_DEMUX, MSGL_V, "Bad frame at %d\n", (int)stream_tell(demux->stream)-1);
	return 0;
    }
    stream_skip(demux->stream, 5); /* RAME\n */
    stream_read(demux->stream, buf[0], size);
    stream_read(demux->stream, buf[1], size/4);
    stream_read(demux->stream, buf[2], size/4);
  }
  else
  {
    if ((err=y4m_read_frame(demux->stream, priv->si, &fi, buf)) != Y4M_OK) {
      mp_msg(MSGT_DEMUX, MSGL_V, "error reading frame %s\n", y4m_strerr(err));
      return 0;
    }
  }

  /* This seems to be the right way to calculate the presentation time stamp */
  dp->pts=(float)priv->framenum/((sh_video_t*)ds->sh)->fps;
  priv->framenum++;
  dp->pos=demux->filepos;
  dp->flags=0;
  ds_add_packet(ds, dp);

  return 1;
}
static void demux_ty_CopyToDemuxPacket( demux_stream_t *ds,
       unsigned char *buffer, int size, loff_t pos, int64_t pts )
{
   demux_packet_t *dp = new_demux_packet( size );
   memcpy( dp->buffer, buffer, size );
   if (pts != MP_NOPTS_VALUE)
   dp->pts = pts / 90000.0;
   dp->pos = pos;
   dp->flags = 0;
   ds_add_packet( ds, dp );
}
예제 #13
0
static void stream_add_packet(Nemesi_DemuxerStreamData * ndsd,
                              Nemesi_SessionType stype,
                              demux_stream_t* ds, rtp_frame * fr)
{
    demux_packet_t* dp = new_demux_packet(fr->len);
    memcpy(dp->buffer, fr->data, fr->len);

    fr->time_sec += ndsd->seek;
    ndsd->time[stype] = dp->pts = fr->time_sec;

    ds_add_packet(ds, dp);
}
예제 #14
0
파일: demuxer.c 프로젝트: complynx/mplayer2
void ds_read_packet(demux_stream_t *ds, stream_t *stream, int len,
                    double pts, off_t pos, bool keyframe)
{
    demux_packet_t *dp = new_demux_packet(len);
    len = stream_read(stream, dp->buffer, len);
    resize_demux_packet(dp, len);
    dp->pts = pts;
    dp->pos = pos;
    dp->keyframe = keyframe;
    // append packet to DS stream:
    ds_add_packet(ds, dp);
}
예제 #15
0
static int demux_aac_adif_fill_buffer(demuxer_t *demuxer, demux_stream_t *ds)
{
  aac_adif_priv_t *priv = (aac_adif_priv_t *) demuxer->priv;
  demux_packet_t *dp;
  int c1, c2, len, srate, num;
  float tm = 0;

  if(demuxer->stream->eof || (demuxer->movi_end && stream_tell(demuxer->stream) >= demuxer->movi_end))
    return 0;

  len = READLEN;

  dp = new_demux_packet(len);
  if(! dp)
  {
    fprintf(stderr,"fill_buffer,NEW ADIF PACKET(%d)FAILED\n", len);
    mp_msg(MSGT_DEMUX, MSGL_ERR, "fill_buffer, NEW ADIF PACKET(%d)FAILED\n", len);
    return 0;
  }
					
  len = stream_read(demuxer->stream, dp->buffer, READLEN);

  if (len != READLEN)
    len = len;


  if(priv->bitrate)
  {
    tm = (len * 8);
    tm /= (priv->bitrate); // FIXME assumes CBR
  }

  priv->last_pts += tm;
  dp->len = len;
  dp->pts = priv->last_pts;
  //  fprintf(stderr, "\nPTS: %.3f\n", dp->pts);
  ds_add_packet(demuxer->audio, dp);
  priv->size += len;
  priv->time += tm;
	
  demuxer->filepos = stream_tell(demuxer->stream);
	
  return len;
}
예제 #16
0
static int demux_rawaudio_fill_buffer(demuxer_t* demuxer, demux_stream_t *ds) {
  sh_audio_t* sh_audio = demuxer->audio->sh;
  int l = sh_audio->wf->nAvgBytesPerSec;
  int64_t spos = stream_tell(demuxer->stream);
  demux_packet_t*  dp;

  if(demuxer->stream->eof)
    return 0;

  dp = new_demux_packet(l);
  dp->pts = (spos - demuxer->movi_start)  / (float)(sh_audio->wf->nAvgBytesPerSec);
  dp->pos = (spos - demuxer->movi_start);

  l = stream_read(demuxer->stream,dp->buffer,l);
  resize_demux_packet(dp, l);
  ds_add_packet(ds,dp);

  return 1;
}
static int demux_vqf_fill_buffer(demuxer_t* demuxer, demux_stream_t *ds) {
  sh_audio_t* sh_audio = demuxer->audio->sh;
  int l = sh_audio->wf->nAvgBytesPerSec;
  loff_t spos = stream_tell(demuxer->stream);
  demux_packet_t*  dp;

  if(stream_eof(demuxer->stream))
    return 0;

  dp = new_demux_packet(l);
  ds->pts = spos / (float)(sh_audio->wf->nAvgBytesPerSec);
  ds->pos = spos;

  l=stream_read(demuxer->stream,dp->buffer,l);
  resize_demux_packet(dp,l);
  ds_add_packet(ds,dp);

  return 1;
}
void add_audio_packet(double ts,unsigned char *p,int len)
{
  	//LOGE("a pts %f len:%d %x\n",ts,len,p);

	 demux_packet_t* dp;
	 dp = new_demux_packet(len);
	 cur_ts=ts;
	 //   	LOGE("end a pts %f len:%d %x\n",ts,len,p);
	 if(dp)
	 {
	      memcpy(dp->buffer, p, len);
	      mtv_pos +=len;
	      dp->pts = ts;
	      dp->pos = mtv_pos;
	      dp->len = len;
	      dp->flags = 0;
	      ds_add_packet(dsdemuxer->audio, dp);
	 }
	 //LOGE("AUD pts %f len:%d \n",ts,len);
}
void add_video_packet(double ts,unsigned char *p,int len)
{
     //LOGE("v pts %f len:%d %x\n",ts,len,p);

	 demux_packet_t* dp;
	 dp = new_demux_packet(len);
	 //	 cur_ts=ts;

	 if(dp)
	 {
	      memcpy(dp->buffer, p, len);
	      mtv_pos +=len;
      	      dp->pts = ts;
	      dp->pos = mtv_pos;
	      dp->len = len;
	      dp->flags = 0;
	      ds_add_packet(dsdemuxer->video, dp);
	 }

}
예제 #20
0
// return value:
//     0 = EOF or no stream found
//     1 = successfully read a packet
static int demux_film_fill_buffer(demuxer_t *demuxer, demux_stream_t *ds)
{
  int i;
  unsigned char byte_swap;
  int cvid_size;
  sh_video_t *sh_video = demuxer->video->sh;
  sh_audio_t *sh_audio = demuxer->audio->sh;
  film_data_t *film_data = (film_data_t *)demuxer->priv;
  film_chunk_t film_chunk;
  int length_fix_bytes;
  demux_packet_t* dp;

  // see if the end has been reached
  if (film_data->current_chunk >= film_data->total_chunks)
    return 0;

  film_chunk = film_data->chunks[film_data->current_chunk];

  // position stream and fetch chunk
  stream_seek(demuxer->stream, film_chunk.chunk_offset);

  // load the chunks manually (instead of using ds_read_packet()), since
  // they require some adjustment
  // (all ones in syncinfo1 indicates an audio chunk)
  if (film_chunk.syncinfo1 == 0xFFFFFFFF)
  {
   if(demuxer->audio->id>=-1){   // audio not disabled
    dp = new_demux_packet(film_chunk.chunk_size);
    if (stream_read(demuxer->stream, dp->buffer, film_chunk.chunk_size) !=
      film_chunk.chunk_size)
      return 0;
    dp->pts = film_chunk.pts;
    dp->pos = film_chunk.chunk_offset;
    dp->flags = 0;

    // adjust the data before queuing it:
    //   8-bit: signed -> unsigned
    //  16-bit: big-endian -> little-endian
    if (sh_audio->wf->wBitsPerSample == 8)
      for (i = 0; i < film_chunk.chunk_size; i++)
        dp->buffer[i] += 128;
    else
      for (i = 0; i < film_chunk.chunk_size; i += 2)
      {
        byte_swap = dp->buffer[i];
        dp->buffer[i] = dp->buffer[i + 1];
        dp->buffer[i + 1] = byte_swap;
      }

    /* for SegaSaturn .cpk file, translate audio data if stereo */
    if (sh_audio->wf->nChannels == 2) {
      if (sh_audio->wf->wBitsPerSample == 8) {
        unsigned char* tmp = dp->buffer;
        unsigned char  buf[film_chunk.chunk_size];
        for(i = 0; i < film_chunk.chunk_size/2; i++) {
          buf[i*2] = tmp[i];
          buf[i*2+1] = tmp[film_chunk.chunk_size/2+i];
        }
        memcpy( tmp, buf, film_chunk.chunk_size );
      }
      else {/* for 16bit */
        unsigned short* tmp = dp->buffer;
        unsigned short  buf[film_chunk.chunk_size/2];
        for(i = 0; i < film_chunk.chunk_size/4; i++) {
          buf[i*2] = tmp[i];
          buf[i*2+1] = tmp[film_chunk.chunk_size/4+i];
        }
        memcpy( tmp, buf, film_chunk.chunk_size );
      }
    }

    // append packet to DS stream
    ds_add_packet(demuxer->audio, dp);
   }
  }
  else
  {
    // if the demuxer is dealing with CVID data, deal with it a special way
    if (sh_video->format == mmioFOURCC('c', 'v', 'i', 'd'))
    {
      if (film_data->film_version)
        length_fix_bytes = 2;
      else
        length_fix_bytes = 6;

      // account for the fix bytes when allocating the buffer
      dp = new_demux_packet(film_chunk.chunk_size - length_fix_bytes);

      // these CVID data chunks have a few extra bytes; skip them
      if (stream_read(demuxer->stream, dp->buffer, 10) != 10)
        return 0;
      stream_skip(demuxer->stream, length_fix_bytes);

      if (stream_read(demuxer->stream, dp->buffer + 10,
        film_chunk.chunk_size - (10 + length_fix_bytes)) !=
        (film_chunk.chunk_size - (10 + length_fix_bytes)))
        return 0;

      dp->pts = film_chunk.pts;
      dp->pos = film_chunk.chunk_offset;
      dp->flags = (film_chunk.syncinfo1 & 0x80000000) ? 1 : 0;

      // fix the CVID chunk size
      cvid_size = film_chunk.chunk_size - length_fix_bytes;
      dp->buffer[1] = (cvid_size >> 16) & 0xFF;
      dp->buffer[2] = (cvid_size >>  8) & 0xFF;
      dp->buffer[3] = (cvid_size >>  0) & 0xFF;

      // append packet to DS stream
      ds_add_packet(demuxer->video, dp);
    }
    else
    {
      ds_read_packet(demuxer->video, demuxer->stream, film_chunk.chunk_size,
        film_chunk.pts,
        film_chunk.chunk_offset, (film_chunk.syncinfo1 & 0x80000000) ? 1 : 0);
    }
  }
static int demux_lavf_fill_buffer(demuxer_t *demux, demux_stream_t *dsds){
    lavf_priv_t *priv= demux->priv;
    AVPacket pkt;
    demux_packet_t *dp;
    demux_stream_t *ds;
    int id;

    mp_msg(MSGT_DEMUX,MSGL_DBG2,"demux_lavf_fill_buffer()\n");
    demux->filepos=stream_tell(demux->stream);
    
    if(demux->a_changed_id != -2){
      if(demux->a_changed_id!=demux->audio->id && demux->a_streams[demux->a_changed_id]){
	priv->avfc->streams[demux->audio->id]->discard = AVDISCARD_ALL;
	demux->audio->id = demux->a_changed_id;
	demux->audio->sh = demux->a_streams[demux->audio->id];
	priv->avfc->streams[demux->audio->id]->discard = AVDISCARD_NONE;
      }
      demux->a_changed_id = -2;
    }

    if(demux->s_changed_id){
      if(demux->s_changed_id!=demux->sub->id){
	priv->avfc->streams[demux->sub->id]->discard = AVDISCARD_ALL;
	demux->sub->id = demux->s_changed_id;
	demux->sub->sh = demux->s_streams[demux->sub->id];
	priv->avfc->streams[demux->sub->id]->discard = AVDISCARD_NONE;
      }
      demux->s_changed_id = 0;
    }

    if(av_read_frame(priv->avfc, &pkt) < 0)
        return 0;

    id= pkt.stream_index;
    
    if(id==demux->audio->id){
        // audio
        ds=demux->audio;
        if(!ds->sh){
            ds->sh=demux->a_streams[id];
            mp_msg(MSGT_DEMUX,MSGL_V,"Auto-selected LAVF audio ID = %d\n",ds->id);
        }
    } else if(id==demux->video->id){
        // video
        ds=demux->video;
        if(!ds->sh){
            ds->sh=demux->v_streams[id];
            mp_msg(MSGT_DEMUX,MSGL_V,"Auto-selected LAVF video ID = %d\n",ds->id);
        }
    } else if(id==demux->sub->id){
        // subtitle
        ds=demux->sub;
        sub_utf8=1;
    } else {
        av_free_packet(&pkt);
        return 1;
    }
    if(pkt.destruct == av_destruct_packet && !CONFIG_MEMALIGN_HACK){
        dp=new_demux_packet(0);
        dp->len=pkt.size;
        dp->buffer=pkt.data;
        pkt.destruct= NULL;
    }else{
        dp=new_demux_packet(pkt.size);
        memcpy(dp->buffer, pkt.data, pkt.size);
        av_free_packet(&pkt);
    }
    if(pkt.pts != AV_NOPTS_VALUE){
      dp->pts=pkt.pts * av_q2d(priv->avfc->streams[id]->time_base);

#if 0
      if(demux->filepos <= demux->movi_start + 1 && dp->pts > 10.0){
	start_time = dp->pts;
      }
      dp->pts -= start_time;
#endif
      priv->last_pts= dp->pts * AV_TIME_BASE;
      // always set endpts for subtitles, even if PKT_FLAG_KEY is not set,
      // otherwise they will stay on screen to long if e.g. ASS is demuxed from mkv
        if((ds == demux->sub || (pkt.flags & PKT_FLAG_KEY)) &&
           pkt.convergence_duration > 0)
            dp->endpts = dp->pts + pkt.convergence_duration * av_q2d(priv->avfc->streams[id]->time_base);
    }
    dp->pos=demux->filepos;
    dp->flags= !!(pkt.flags&PKT_FLAG_KEY);
    // append packet to DS stream:
    ds_add_packet(ds,dp);
    return 1;
}
예제 #22
0
static int demux_avs_fill_buffer(demuxer_t *demuxer, demux_stream_t *ds)
{
    AVS_VideoFrame *curr_frame;
    demux_packet_t *dp = NULL;
    AVS_T *AVS = demuxer->priv;

    if (ds == demuxer->video)
    {
        sh_video_t *sh_video = demuxer->video->sh;
        char *dst;
        int w, h;
        if (AVS->video_info->num_frames <= AVS->frameno) return 0; // EOF

        curr_frame = AVS->avs_get_frame(AVS->clip, AVS->frameno);
        if (!curr_frame)
        {
            mp_msg(MSGT_DEMUX, MSGL_V, "AVS: error getting frame -- EOF??\n");
            return 0;
        }
        w = curr_frame->row_size;
        h = curr_frame->height;

        dp = new_demux_packet(w * h + 2 * (w / 2) * (h / 2));

        dp->pts=AVS->frameno / sh_video->fps;

        dst = dp->buffer;
        memcpy_pic(dst, curr_frame->vfb->data + curr_frame->offset,
                   w, h, w, curr_frame->pitch);
        dst += w * h;
        w /= 2; h /= 2;
        memcpy_pic(dst, curr_frame->vfb->data + curr_frame->offsetV,
                   w, h, w, curr_frame->pitchUV);
        dst += w * h;
        memcpy_pic(dst, curr_frame->vfb->data + curr_frame->offsetU,
                   w, h, w, curr_frame->pitchUV);
        ds_add_packet(demuxer->video, dp);

        AVS->frameno++;
        AVS->avs_release_video_frame(curr_frame);
    }

    /* Audio */
    if (ds == demuxer->audio)
    {
        sh_audio_t *sh_audio = ds->sh;
        int samples = sh_audio->samplerate;
        uint64_t l;
        samples = FFMIN(samples, AVS->video_info->num_audio_samples - AVS->sampleno);
        if (!samples) return 0;
        l = samples * sh_audio->channels * sh_audio->samplesize;
        if (l > INT_MAX) {
            mp_msg(MSGT_DEMUX, MSGL_FATAL, "AVS: audio packet too big\n");
            return 0;
        }
        dp = new_demux_packet(l);
        dp->pts = AVS->sampleno / sh_audio->samplerate;

        if (AVS->avs_get_audio(AVS->clip, dp->buffer, AVS->sampleno, samples))
        {
            mp_msg(MSGT_DEMUX, MSGL_V, "AVS: avs_get_audio() failed\n");
            return 0;
        }
        ds_add_packet(demuxer->audio, dp);

        AVS->sampleno += samples;
    }

    return 1;
}
예제 #23
0
파일: demux_mng.c 프로젝트: azuwis/mplayer
/**
 * \brief MPlayer callback: Fill buffer from MNG stream.
 * \param[in] demuxer demuxer structure
 * \param[in] ds demuxer stream
 * \return \p 1 on success, \p 0 on error
 */
static int demux_mng_fill_buffer(demuxer_t * demuxer,
                                 demux_stream_t * ds)
{
    mng_priv_t * mng_priv = demuxer->priv;
    mng_handle h_mng = mng_priv->h_mng;
    mng_retcode mng_ret;
    demux_packet_t * dp;

    // exit if animation is finished
    if (mng_priv->finished)
        return 0;

    // advance animation to requested next show time
    while (mng_priv->anim_cur_time_ms + mng_priv->anim_frame_duration_ms
           <= mng_priv->show_next_time_ms && !mng_priv->finished) {

        // advance global and animation time
        mng_priv->global_time_ms += mng_priv->anim_frame_duration_ms;
        mng_priv->anim_cur_time_ms += mng_priv->anim_frame_duration_ms;

        // Clear variable MNG library will write number of milliseconds to
        // (via settimer callback).
        mng_priv->timer_ms = 0;

        // get next image from MNG library
        if (mng_priv->displaying)
            mng_ret = mng_display_resume(h_mng); // resume displaying MNG data
                                                 // to canvas
        else
            mng_ret = mng_display(h_mng); // start displaying MNG data to canvas
        if (mng_ret && mng_ret != MNG_NEEDTIMERWAIT) {
            mp_msg(MSGT_DEMUX, MSGL_ERR,
                   "demux_mng: could not display MNG data to canvas: "
                   "mng_retcode %d\n", mng_ret);
            return 0;
        }
        mng_priv->displaying = 1; // mng_display() has been called now
        mng_priv->finished   = mng_ret == 0; // animation is finished iff
                                             // mng_display() returned 0

        // save current frame duration
        mng_priv->anim_frame_duration_ms = mng_priv->timer_ms < 1
                                           ? 1 : mng_priv->timer_ms;

    } // while (mng_priv->anim_cur_time_ms + ...

    // create a new demuxer packet
    dp = new_demux_packet(mng_priv->height * mng_priv->width * 4);

    // copy image data into demuxer packet
    memcpy(dp->buffer, mng_priv->canvas,
           mng_priv->height * mng_priv->width * 4);

    // set current show time to requested show time
    mng_priv->show_cur_time_ms = mng_priv->show_next_time_ms;

    // get time of next frame to show
    mng_priv->show_next_time_ms = mng_priv->anim_cur_time_ms
                                + mng_priv->anim_frame_duration_ms;

    // Set position and timing information in demuxer video and demuxer packet.
    //  - Time must be time of next frame and always be > 0 for the variable
    //    frame time mechanism (GIF, MATROSKA, MNG) in video.c to work.
    demuxer->video->dpos++;
    dp->pts = (float)mng_priv->show_next_time_ms / 1000.0f + MNG_START_PTS;
    dp->pos = stream_tell(demuxer->stream);
    ds_add_packet(demuxer->video, dp);

    return 1;
}
static int demux_aac_fill_buffer(demuxer_t *demuxer, demux_stream_t *ds)
{
	aac_priv_t *priv = (aac_priv_t *) demuxer->priv;
	demux_packet_t *dp;
	int c1, c2, len, srate, num;
	float tm = 0;

	if(demuxer->stream->eof || (demuxer->movi_end && stream_tell(demuxer->stream) >= demuxer->movi_end))
        	return 0;

	while(! demuxer->stream->eof)
	{
		c1 = c2 = 0;
		while(c1 != 0xFF)
		{
			c1 = stream_read_char(demuxer->stream);
			if(c1 < 0)
				return 0;
		}
		c2 = stream_read_char(demuxer->stream);
		if(c2 < 0)
			return 0;
		if((c2 & 0xF6) != 0xF0)
			continue;

		priv->buf[0] = (unsigned char) c1;
		priv->buf[1] = (unsigned char) c2;
		if(stream_read(demuxer->stream, &(priv->buf[2]), 6) < 6)
			return 0;

		len = aac_parse_frame(priv->buf, &srate, &num);
		if(len > 0)
		{
			dp = new_demux_packet(len);
			if(! dp)
			{
				mp_msg(MSGT_DEMUX, MSGL_ERR, "fill_buffer, NEW_ADD_PACKET(%d)FAILED\n", len);
				return 0;
			}


			memcpy(dp->buffer, priv->buf, 8);
			stream_read(demuxer->stream, &(dp->buffer[8]), len-8);
			if(srate)
				tm = (float) (num * 1024.0/srate);
			priv->last_pts += tm;
			dp->pts = priv->last_pts;
			//fprintf(stderr, "\nPTS: %.3f\n", dp->pts);
			ds_add_packet(demuxer->audio, dp);
			priv->size += len;
			priv->time += tm;

			priv->bitrate = (int) (priv->size / priv->time);
			demuxer->filepos = stream_tell(demuxer->stream);

			return len;
		}
		else
			stream_skip(demuxer->stream, -6);
	}

	return 0;
}
예제 #25
0
static demux_packet_t* getBuffer(demuxer_t* demuxer, demux_stream_t* ds,
				 Boolean mustGetNewData,
				 float& ptsBehind) {
  // Begin by finding the buffer queue that we want to read from:
  // (Get this from the RTP state, which we stored in
  //  the demuxer's 'priv' field)
  RTPState* rtpState = (RTPState*)(demuxer->priv);
  ReadBufferQueue* bufferQueue = NULL;
  int headersize = 0;
  TaskToken task;

  if (demuxer->stream->eof) return NULL;

  if (ds == demuxer->video) {
    bufferQueue = rtpState->videoBufferQueue;
    if (((sh_video_t*)ds->sh)->format == mmioFOURCC('H','2','6','4'))
      headersize = 3;
  } else if (ds == demuxer->audio) {
    bufferQueue = rtpState->audioBufferQueue;
    if (bufferQueue->readSource()->isAMRAudioSource())
      headersize = 1;
  } else {
    fprintf(stderr, "(demux_rtp)getBuffer: internal error: unknown stream\n");
    return NULL;
  }

  if (bufferQueue == NULL || bufferQueue->readSource() == NULL) {
    fprintf(stderr, "(demux_rtp)getBuffer failed: no appropriate RTP subsession has been set up\n");
    return NULL;
  }

  demux_packet_t* dp = NULL;
  if (!mustGetNewData) {
    // Check whether we have a previously-saved buffer that we can use:
    dp = bufferQueue->getPendingBuffer();
    if (dp != NULL) {
      ptsBehind = 0.0; // so that we always accept this data
      return dp;
    }
  }

  // Allocate a new packet buffer, and arrange to read into it:
    if (!bufferQueue->nextpacket) {
  dp = new_demux_packet(MAX_RTP_FRAME_SIZE);
  bufferQueue->dp = dp;
  if (dp == NULL) return NULL;
    }

#ifdef CONFIG_LIBAVCODEC
  extern AVCodecParserContext * h264parserctx;
  int consumed, poutbuf_size = 1;
  const uint8_t *poutbuf = NULL;
  float lastpts = 0.0;

  do {
    if (!bufferQueue->nextpacket) {
#endif
  // Schedule the read operation:
  bufferQueue->blockingFlag = 0;
  bufferQueue->readSource()->getNextFrame(&dp->buffer[headersize], MAX_RTP_FRAME_SIZE - headersize,
					  afterReading, bufferQueue,
					  onSourceClosure, bufferQueue);
  // Block ourselves until data becomes available:
  TaskScheduler& scheduler
    = bufferQueue->readSource()->envir().taskScheduler();
  int delay = 10000000;
  if (bufferQueue->prevPacketPTS * 1.05 > rtpState->mediaSession->playEndTime())
    delay /= 10;
  task = scheduler.scheduleDelayedTask(delay, onSourceClosure, bufferQueue);
  scheduler.doEventLoop(&bufferQueue->blockingFlag);
  scheduler.unscheduleDelayedTask(task);
  if (demuxer->stream->eof) {
    free_demux_packet(dp);
    return NULL;
  }

  if (headersize == 1) // amr
    dp->buffer[0] =
        ((AMRAudioSource*)bufferQueue->readSource())->lastFrameHeader();
#ifdef CONFIG_LIBAVCODEC
    } else {
      bufferQueue->dp = dp = bufferQueue->nextpacket;
      bufferQueue->nextpacket = NULL;
    }
    if (headersize == 3 && h264parserctx) { // h264
      consumed = h264parserctx->parser->parser_parse(h264parserctx,
                               avcctx,
                               &poutbuf, &poutbuf_size,
                               dp->buffer, dp->len);

      if (!consumed && !poutbuf_size)
        return NULL;

      if (!poutbuf_size) {
        lastpts=dp->pts;
        free_demux_packet(dp);
        bufferQueue->dp = dp = new_demux_packet(MAX_RTP_FRAME_SIZE);
      } else {
        bufferQueue->nextpacket = dp;
        bufferQueue->dp = dp = new_demux_packet(poutbuf_size);
        memcpy(dp->buffer, poutbuf, poutbuf_size);
        dp->pts=lastpts;
      }
    }
  } while (!poutbuf_size);
#endif

  // Set the "ptsBehind" result parameter:
  if (bufferQueue->prevPacketPTS != 0.0
      && bufferQueue->prevPacketWasSynchronized
      && *(bufferQueue->otherQueue) != NULL
      && (*(bufferQueue->otherQueue))->prevPacketPTS != 0.0
      && (*(bufferQueue->otherQueue))->prevPacketWasSynchronized) {
    ptsBehind = (*(bufferQueue->otherQueue))->prevPacketPTS
		 - bufferQueue->prevPacketPTS;
  } else {
    ptsBehind = 0.0;
  }

  if (mustGetNewData) {
    // Save this buffer for future reads:
    bufferQueue->savePendingBuffer(dp);
  }

  return dp;
}
예제 #26
0
static int demux_lavf_fill_buffer(demuxer_t *demux, demux_stream_t *dsds){
    lavf_priv_t *priv= demux->priv;
    AVPacket pkt;
    demux_packet_t *dp;
    demux_stream_t *ds;
    int id;
    mp_msg(MSGT_DEMUX,MSGL_DBG2,"demux_lavf_fill_buffer()\n");

    demux->filepos=stream_tell(demux->stream);

    if(av_read_frame(priv->avfc, &pkt) < 0)
        return 0;

    // handle any new streams that might have been added
    for (id = priv->nb_streams_last; id < priv->avfc->nb_streams; id++)
        handle_stream(demux, priv->avfc, id);
    priv->nb_streams_last = priv->avfc->nb_streams;

    id= pkt.stream_index;

    if(id==demux->audio->id){
        // audio
        ds=demux->audio;
        if(!ds->sh){
            ds->sh=demux->a_streams[id];
            mp_msg(MSGT_DEMUX,MSGL_V,"Auto-selected LAVF audio ID = %d\n",ds->id);
        }
    } else if(id==demux->video->id){
        // video
        ds=demux->video;
        if(!ds->sh){
            ds->sh=demux->v_streams[id];
            mp_msg(MSGT_DEMUX,MSGL_V,"Auto-selected LAVF video ID = %d\n",ds->id);
        }
    } else if(id==demux->sub->id){
        // subtitle
        ds=demux->sub;
        sub_utf8=1;
    } else {
        av_free_packet(&pkt);
        return 1;
    }

    if(pkt.destruct == av_destruct_packet && !CONFIG_MEMALIGN_HACK){
        dp=new_demux_packet(0);
        dp->len=pkt.size;
        dp->buffer=pkt.data;
        pkt.destruct= NULL;
    }else{
        dp=new_demux_packet(pkt.size);
        memcpy(dp->buffer, pkt.data, pkt.size);
        av_free_packet(&pkt);
    }

    if(pkt.pts != AV_NOPTS_VALUE){
        dp->pts=pkt.pts * av_q2d(priv->avfc->streams[id]->time_base);
        priv->last_pts= dp->pts * AV_TIME_BASE;
        if(pkt.duration > 0)
            dp->endpts = dp->pts + pkt.duration * av_q2d(priv->avfc->streams[id]->time_base);
        /* subtitle durations are sometimes stored in convergence_duration */
        if(ds == demux->sub && pkt.convergence_duration > 0)
            dp->endpts = dp->pts + pkt.convergence_duration * av_q2d(priv->avfc->streams[id]->time_base);
    }
    dp->pos=demux->filepos;
    dp->flags= !!(pkt.flags&AV_PKT_FLAG_KEY);
    // append packet to DS stream:
    ds_add_packet(ds,dp);
    return 1;
}
예제 #27
0
static int pva_get_payload(demuxer_t *d, pva_payload_t *payload)
{
	uint8_t flags,pes_head_len;
	uint16_t pack_size;
	off_t next_offset,pva_payload_start;
	unsigned char buffer[256];
#ifndef PVA_NEW_PREBYTES_CODE
	demux_packet_t * dp; 	//hack to deliver the preBytes (see PVA doc)
#endif
	pva_priv_t * priv;


	if(d==NULL)
	{
		mp_msg(MSGT_DEMUX,MSGL_ERR,"demux_pva: pva_get_payload got passed a NULL pointer!\n");
		return 0;
	}

	priv = (pva_priv_t *)d->priv;
	d->filepos=stream_tell(d->stream);




	if(d->stream->eof)
	{
		mp_msg(MSGT_DEMUX,MSGL_V,"demux_pva: pva_get_payload() detected stream->eof!!!\n");
		return 0;
	}

	//printf("priv->just_synced %s\n",priv->just_synced?"SET":"UNSET");

#ifdef PVA_NEW_PREBYTES_CODE
	if(priv->prebytes_delivered)
		/* The previous call to this fn has delivered the preBytes. Then we are already inside
		 * the payload. Let's just deliver the video along with its right PTS, the one we stored
		 * in the priv structure and was in the PVA header before the PreBytes.
		 */
	{
		//printf("prebytes_delivered=1. Resetting.\n");
		payload->size = priv->video_size_after_prebytes;
		payload->pts = priv->video_pts_after_prebytes;
		payload->is_packet_start = 1;
		payload->offset = stream_tell(d->stream);
		payload->type = VIDEOSTREAM;
		priv->prebytes_delivered = 0;
		return 1;
	}
#endif
	if(!priv->just_synced)
	{
		if(stream_read_word(d->stream) != (('A'<<8)|'V'))
		{
			mp_msg(MSGT_DEMUX,MSGL_V,"demux_pva: pva_get_payload() missed a SyncWord at %"PRId64"!! Trying to sync...\n",(int64_t)stream_tell(d->stream));
			if(!pva_sync(d))
			{
				if (!d->stream->eof)
				{
					mp_msg(MSGT_DEMUX,MSGL_ERR,"demux_pva: couldn't sync! (broken file?)");
				}
				return 0;
			}
		}
	}
	if(priv->just_synced)
	{
		payload->type=priv->synced_stream_id;
		priv->just_synced=0;
	}
	else
	{
		payload->type=stream_read_char(d->stream);
		stream_skip(d->stream,2); //counter and reserved
	}
	flags=stream_read_char(d->stream);
	payload->is_packet_start=flags & 0x10;
	pack_size=stream_read_word(d->stream);
	mp_msg(MSGT_DEMUX,MSGL_DBG2,"demux_pva::pva_get_payload(): pack_size=%u field read at offset %"PRIu64"\n",pack_size,(int64_t)stream_tell(d->stream)-2);
	pva_payload_start=stream_tell(d->stream);
	next_offset=pva_payload_start+pack_size;


	/*
	 * The code in the #ifdef directive below is a hack needed to get badly formatted PVA files
	 * such as the ones written by MultiDec played back correctly.
	 * Basically, it works like this: if the PVA packet does not signal a PES header, but the
	 * payload looks like one, let's assume it IS one. It has worked for me up to now.
	 * It can be disabled since it's quite an ugly hack and could potentially break things up
	 * if the PVA audio payload happens to start with 0x000001 even without being a non signalled
	 * PES header start.
	 * Though it's quite unlikely, it potentially could (AFAIK).
	 */
#ifdef DEMUX_PVA_MULTIDEC_HACK
	if(payload->type==MAINAUDIOSTREAM)
	{
		stream_read(d->stream,buffer,3);
		if(buffer[0]==0x00 && buffer[1]==0x00 && buffer[2]==0x01 && !payload->is_packet_start)
		{
			mp_msg(MSGT_DEMUX,MSGL_V,"demux_pva: suspecting non signaled audio PES packet start. Maybe file by MultiDec?\n");
			payload->is_packet_start=1;
		}
		stream_seek(d->stream,stream_tell(d->stream)-3);
	}
#endif


	if(!payload->is_packet_start)
	{
		payload->offset=stream_tell(d->stream);
		payload->size=pack_size;
	}
	else
	{	//here comes the good part...
		switch(payload->type)
		{
			case VIDEOSTREAM:
				payload->pts=(float)(stream_read_dword(d->stream))/90000;
				//printf("Video PTS: %f\n",payload->pts);
				if((flags&0x03)
#ifdef PVA_NEW_PREBYTES_CODE
						&& !priv->prebytes_delivered
#endif
						)
				{
#ifndef PVA_NEW_PREBYTES_CODE
					dp=new_demux_packet(flags&0x03);
					stream_read(d->stream,dp->buffer,flags & 0x03); //read PreBytes
					ds_add_packet(d->video,dp);
#else
					//printf("Delivering prebytes. Setting prebytes_delivered.");
					payload->offset=stream_tell(d->stream);
					payload->size = flags & 0x03;
					priv->video_pts_after_prebytes = payload->pts;
					priv->video_size_after_prebytes = pack_size - 4 - (flags & 0x03);
					payload->pts=priv->last_video_pts;
					payload->is_packet_start=0;
					priv->prebytes_delivered=1;
					return 1;
#endif
				}


				//now we are at real beginning of payload.
				payload->offset=stream_tell(d->stream);
				//size is pack_size minus PTS size minus PreBytes size.
				payload->size=pack_size - 4 - (flags & 0x03);
				break;
			case MAINAUDIOSTREAM:
				stream_skip(d->stream,3); //FIXME properly parse PES header.
				//printf("StreamID in audio PES header: 0x%2X\n",stream_read_char(d->stream));
				stream_skip(d->stream,4);

				buffer[255]=stream_read_char(d->stream);
				pes_head_len=stream_read_char(d->stream);
				stream_read(d->stream,buffer,pes_head_len);
				if(!(buffer[255]&0x80)) //PES header does not contain PTS.
				{
					mp_msg(MSGT_DEMUX,MSGL_V,"Audio PES packet does not contain PTS. (pes_head_len=%d)\n",pes_head_len);
					payload->pts=priv->last_audio_pts;
					break;
				}
				else		//PES header DOES contain PTS
				{
					if((buffer[0] & 0xf0)!=0x20) // PTS badly formatted
					{
						mp_msg(MSGT_DEMUX,MSGL_V,"demux_pva: expected audio PTS but badly formatted... (read 0x%02X). Falling back to previous PTS (hack).\n",buffer[0]);
						payload->pts=priv->last_audio_pts;
					//	return 0;
					}
					else
					{
						uint64_t temp_pts;

						temp_pts=0LL;
						temp_pts|=((uint64_t)(buffer[0] & 0x0e) << 29);
						temp_pts|=buffer[1]<<22;
						temp_pts|=(buffer[2] & 0xfe) << 14;
						temp_pts|=buffer[3]<<7;
						temp_pts|=(buffer[4] & 0xfe) >> 1;
						/*
					 	* PTS parsing is hopefully finished.
					 	*/
						payload->pts=(float)temp_pts/90000;
					}
				}
				payload->offset=stream_tell(d->stream);
				payload->size=pack_size-stream_tell(d->stream)+pva_payload_start;
				break;
		}
	}
	return 1;
}
예제 #28
0
// 0 = EOF or no stream found
// 1 = successfully read a packet
static int demux_pva_fill_buffer (demuxer_t * demux, demux_stream_t *ds)
{
	uint8_t done=0;
	demux_packet_t * dp;
	pva_priv_t * priv=demux->priv;
	pva_payload_t current_payload;

	while(!done)
	{
		if(!pva_get_payload(demux,&current_payload)) return 0;
		switch(current_payload.type)
		{
			case VIDEOSTREAM:
				if(demux->video->id==-1) demux->video->id=0;
				if(!current_payload.is_packet_start && priv->last_video_pts==-1)
				{
					/* We should only be here at the beginning of a stream, when we have
					 * not yet encountered a valid Video PTS, or after a seek.
					 * So, skip these starting packets in order not to deliver the
					 * player a bogus PTS.
					 */
					done=0;
				}
				else
				{
					/*
					 * In every other condition, we are delivering the payload. Set this
					 * so that the following code knows whether to skip it or read it.
					 */
					done=1;
				}
				if(demux->video->id!=0) done=0;
				if(current_payload.is_packet_start)
				{
					priv->last_video_pts=current_payload.pts;
					//mp_msg(MSGT_DEMUXER,MSGL_DBG2,"demux_pva: Video PTS=%llu , delivered %f\n",current_payload.pts,priv->last_video_pts);
				}
				if(done)
				{
					dp=new_demux_packet(current_payload.size);
					dp->pts=priv->last_video_pts;
					stream_read(demux->stream,dp->buffer,current_payload.size);
					ds_add_packet(demux->video,dp);
				}
				else
				{
					//printf("Skipping %u video bytes\n",current_payload.size);
					stream_skip(demux->stream,current_payload.size);
				}
				break;
			case MAINAUDIOSTREAM:
				if(demux->audio->id==-1) demux->audio->id=0;
				if(!current_payload.is_packet_start && priv->last_audio_pts==-1)
				{
					/* Same as above for invalid video PTS, just for audio. */
					done=0;
				}
				else
				{
					done=1;
				}
				if(current_payload.is_packet_start)
				{
					priv->last_audio_pts=current_payload.pts;
				}
				if(demux->audio->id!=0) done=0;
				if(done)
				{
					dp=new_demux_packet(current_payload.size);
					dp->pts=priv->last_audio_pts;
					if(current_payload.offset != stream_tell(demux->stream))
						stream_seek(demux->stream,current_payload.offset);
					stream_read(demux->stream,dp->buffer,current_payload.size);
					ds_add_packet(demux->audio,dp);
				}
				else
				{
					stream_skip(demux->stream,current_payload.size);
				}
				break;
		}
	}
	return 1;
}
예제 #29
0
// return value:
//     0 = EOF or no stream found
//     1 = successfully read a packet
static int demux_vivo_fill_buffer(demuxer_t *demux, demux_stream_t *dsds){
  demux_stream_t *ds=NULL;
  int c;
  int len=0;
  int seq;
  int prefix=0;
  demux->filepos=stream_tell(demux->stream);
  
  c=stream_read_char(demux->stream);
  if (c == -256) /* EOF */
    return 0;
//  printf("c=%x,%02X\n",c,c&0xf0);
  if (c == 0x82)
  {
      /* ok, this works, but pts calculating from header is required! */
#warning "Calculate PTS from picture header!"
      prefix = 1;
      c = stream_read_char(demux->stream);
      mp_msg(MSGT_DEMUX, MSGL_V, "packet 0x82(pos=%u) chunk=%x\n",
        (int)stream_tell(demux->stream), c);
  }
  switch(c&0xF0){
  case 0x00: // header - skip it!
  {
      len=stream_read_char(demux->stream);
      if(len>=0x80) len=0x80*(len-0x80)+stream_read_char(demux->stream);
      mp_msg(MSGT_DEMUX, MSGL_V, "vivo extra header: %d bytes\n",len);
#ifdef TEXTPARSE_ALL
{
      int pos;
      /* also try to parse all headers */
      pos = stream_tell(demux->stream);
      vivo_parse_text_header(demux, len);
      stream_seek(demux->stream, pos);
}
#endif
      break;
  }
  case 0x10:  // video packet
      if (prefix == 1)
        len = stream_read_char(demux->stream);
      else
        len=128;
      ds=demux->video;
      break;
  case 0x20:  // video packet
      len=stream_read_char(demux->stream);
      ds=demux->video;
      break;
  case 0x30:  // audio packet
      if (prefix == 1)
        len = stream_read_char(demux->stream);
      else
        len=40;	/* 40kbps */
      ds=demux->audio;
      audio_pos+=len;
      break;
  case 0x40:  // audio packet
      if (prefix == 1)
        len = stream_read_char(demux->stream);
      else
        len=24;	/* 24kbps */
      ds=demux->audio;
      audio_pos+=len;
      break;
  default:
      mp_msg(MSGT_DEMUX,MSGL_WARN,"VIVO - unknown ID found: %02X at pos %"PRIu64" contact author!\n",
        c, (int64_t)stream_tell(demux->stream));
      return 0;
  }

//  printf("chunk=%x, len=%d\n", c, len);
  
  if(!ds || ds->id<-1){
      if(len) stream_skip(demux->stream,len);
      return 1;
  }
  
  seq=c&0x0F;

    if(ds->asf_packet){
      if(ds->asf_seq!=seq){
        // closed segment, finalize packet:
        ds_add_packet(ds,ds->asf_packet);
        ds->asf_packet=NULL;
//	printf("packet!\n");
      } else {
        // append data to it!
        demux_packet_t* dp=ds->asf_packet;
        if(dp->len + len + FF_INPUT_BUFFER_PADDING_SIZE < 0)
	    return 0;
        dp->buffer=realloc(dp->buffer,dp->len+len+FF_INPUT_BUFFER_PADDING_SIZE);
        memset(dp->buffer+dp->len+len, 0, FF_INPUT_BUFFER_PADDING_SIZE);
        //memcpy(dp->buffer+dp->len,data,len);
	stream_read(demux->stream,dp->buffer+dp->len,len);
        mp_dbg(MSGT_DEMUX,MSGL_DBG4,"data appended! %d+%d\n",dp->len,len);
        dp->len+=len;
        // we are ready now.
	if((c&0xF0)==0x20) --ds->asf_seq; // hack!
        return 1;
      }
    }
    // create new packet:
    { demux_packet_t* dp;
      dp=new_demux_packet(len);
      //memcpy(dp->buffer,data,len);
      stream_read(demux->stream,dp->buffer,len);
      dp->pts=audio_rate?((float)audio_pos/(float)audio_rate):0;
//      dp->flags=keyframe;
//      if(ds==demux->video) printf("ASF time: %8d  dur: %5d  \n",time,dur);
      dp->pos=demux->filepos;
      ds->asf_packet=dp;
      ds->asf_seq=seq;
      // we are ready now.
      return 1;
    }

}
예제 #30
0
static int
demux_mkv_open_audio (FileInfo *finfo, mkv_track_t *track)
{
  WAVEFORMATEX *finfowf = &finfo->wf;

  if (track->ms_compat && (track->private_size >= sizeof(WAVEFORMATEX)))
    {
      WAVEFORMATEX *wf = (WAVEFORMATEX *)track->private_data;
      finfowf->wFormatTag = le2me_16 (wf->wFormatTag);
      finfowf->nChannels = le2me_16 (wf->nChannels);
      finfowf->nSamplesPerSec = le2me_32 (wf->nSamplesPerSec);
      finfowf->nAvgBytesPerSec = le2me_32 (wf->nAvgBytesPerSec);
      finfowf->nBlockAlign = le2me_16 (wf->nBlockAlign);
      finfowf->wBitsPerSample = le2me_16 (wf->wBitsPerSample);
      if (track->a_sfreq == 0.0)
        track->a_sfreq = finfowf->nSamplesPerSec;
      if (track->a_channels == 0)
        track->a_channels = finfowf->nChannels;
      if (track->a_bps == 0)
        track->a_bps = finfowf->wBitsPerSample;
      track->a_formattag = finfowf->wFormatTag;
    }
  else
    {
      memset(finfowf, 0, sizeof (WAVEFORMATEX));
      if (!strcmp(track->codec_id, MKV_A_MP3) ||
          !strcmp(track->codec_id, MKV_A_MP2))
        track->a_formattag = 0x0055;
      else if (!strncmp(track->codec_id, MKV_A_AC3, strlen(MKV_A_AC3)))
        track->a_formattag = 0x2000;
      else if (!strncmp(track->codec_id, MKV_A_EAC3, strlen(MKV_A_EAC3)))
        track->a_formattag = AUDIO_EAC3;
      else if (!strcmp(track->codec_id, MKV_A_DTS))
	  {
        track->a_formattag = 0x2001;
        //dts_packet = 1;
	  }
      else if (!strcmp(track->codec_id, MKV_A_PCM) ||
               !strcmp(track->codec_id, MKV_A_PCM_BE))
        track->a_formattag = 0x0001;
      else if (!strcmp(track->codec_id, MKV_A_AAC_2MAIN) ||
               !strncmp(track->codec_id, MKV_A_AAC_2LC,
                        strlen(MKV_A_AAC_2LC)) ||
               !strcmp(track->codec_id, MKV_A_AAC_2SSR) ||
               !strcmp(track->codec_id, MKV_A_AAC_4MAIN) ||
               !strncmp(track->codec_id, MKV_A_AAC_4LC,
                        strlen(MKV_A_AAC_4LC)) ||
               !strcmp(track->codec_id, MKV_A_AAC_4SSR) ||
               !strcmp(track->codec_id, MKV_A_AAC_4LTP) ||
               !strcmp(track->codec_id, MKV_A_AAC))
        track->a_formattag = mmioFOURCC('M', 'P', '4', 'A');
      else if (!strcmp(track->codec_id, MKV_A_VORBIS))
        {
          if (track->private_data == NULL)
            return 1;
          track->a_formattag = mmioFOURCC('v', 'r', 'b', 's');
        }
      else if (!strcmp(track->codec_id, MKV_A_QDMC))
        track->a_formattag = mmioFOURCC('Q', 'D', 'M', 'C');
      else if (!strcmp(track->codec_id, MKV_A_QDMC2))
        track->a_formattag = mmioFOURCC('Q', 'D', 'M', '2');
      else if (!strcmp(track->codec_id, MKV_A_WAVPACK))
        track->a_formattag = mmioFOURCC('W', 'V', 'P', 'K');
      else if (!strcmp(track->codec_id, MKV_A_TRUEHD))
        track->a_formattag = mmioFOURCC('T', 'R', 'H', 'D');
      else if (!strcmp(track->codec_id, MKV_A_FLAC))
        {
          if (track->private_data == NULL || track->private_size == 0)
            {
              mp_msg (MSGT_DEMUX, MSGL_WARN,
                      MSGTR_MPDEMUX_MKV_FlacTrackDoesNotContainValidHeaders);
              return 1;
            }
          track->a_formattag = mmioFOURCC ('f', 'L', 'a', 'C');
        }
      else if (track->private_size >= RAPROPERTIES4_SIZE)
        {
          if (!strcmp(track->codec_id, MKV_A_REAL28))
            track->a_formattag = mmioFOURCC('2', '8', '_', '8');
          else if (!strcmp(track->codec_id, MKV_A_REALATRC))
            track->a_formattag = mmioFOURCC('a', 't', 'r', 'c');
          else if (!strcmp(track->codec_id, MKV_A_REALCOOK))
            track->a_formattag = mmioFOURCC('c', 'o', 'o', 'k');
          else if (!strcmp(track->codec_id, MKV_A_REALDNET))
            track->a_formattag = mmioFOURCC('d', 'n', 'e', 't');
          else if (!strcmp(track->codec_id, MKV_A_REALSIPR))
            track->a_formattag = mmioFOURCC('s', 'i', 'p', 'r');
        }
      else
        {
          mp_msg (MSGT_DEMUX, MSGL_WARN, MSGTR_MPDEMUX_MKV_UnknownAudioCodec,
                  track->codec_id, track->tnum);
          return 1;
        }
    }

  finfowf->wFormatTag = track->a_formattag;
  finfowf->nChannels = track->a_channels;
  finfowf->nSamplesPerSec = (uint32_t) track->a_sfreq;
  if (track->a_bps == 0)
    {
      finfowf->wBitsPerSample = 16;
    }
  else
    {
      finfowf->wBitsPerSample = track->a_bps;
    }
  if (track->a_formattag == 0x0055)  /* MP3 || MP2 */
    {
      finfowf->nAvgBytesPerSec = 16000;
      finfowf->nBlockAlign = 1152;
    }
  else if (track->a_formattag == 0x2000 )/* AC3 */
    {
    }
  else if (track->a_formattag == AUDIO_EAC3 )/* EAC3 */
    {
    }
  else if (track->a_formattag == 0x2001) /* DTS */
    {
      //dts_packet = 1;
    }
  else if (track->a_formattag == 0x0001)  /* PCM || PCM_BE */
    {
      finfowf->nAvgBytesPerSec = track->a_channels * track->a_sfreq * 2;
      finfowf->nBlockAlign = finfowf->nAvgBytesPerSec;
	  /*
      if (!strcmp(track->codec_id, MKV_A_PCM_BE))
        sh_a->format = mmioFOURCC('t', 'w', 'o', 's');
		*/
    }
  else if (!strcmp(track->codec_id, MKV_A_QDMC) ||
           !strcmp(track->codec_id, MKV_A_QDMC2))
    {
      finfowf->nAvgBytesPerSec = 16000;
      finfowf->nBlockAlign = 1486;
      track->fix_i_bps = 1;
      track->qt_last_a_pts = 0.0;
    }
  else if (track->a_formattag == mmioFOURCC('M', 'P', '4', 'A'))
    {
      int profile, srate_idx;

      finfowf->nAvgBytesPerSec = 16000;
      finfowf->nBlockAlign = 1024;

      if (!strcmp (track->codec_id, MKV_A_AAC) &&
          (NULL != track->private_data))
        {
          return 0;
        }

      /* Recreate the 'private data' */
      /* which faad2 uses in its initialization */
      srate_idx = aac_get_sample_rate_index (track->a_sfreq);
      if (!strncmp (&track->codec_id[12], "MAIN", 4))
        profile = 0;
      else if (!strncmp (&track->codec_id[12], "LC", 2))
        profile = 1;
      else if (!strncmp (&track->codec_id[12], "SSR", 3))
        profile = 2;
      else
        profile = 3;

      if (strstr(track->codec_id, "SBR") != NULL)
        {
          /* HE-AAC (aka SBR AAC) */
          track->default_duration = 1024.0 / finfowf->nSamplesPerSec;
          finfowf->nSamplesPerSec *= 2;
        }
      else
        {
          track->default_duration = 1024.0 / (float)finfowf->nSamplesPerSec;
        }
    }
  else if (track->a_formattag == mmioFOURCC('v', 'r', 'b', 's'))  /* VORBIS */
    {
      //finfowf->cbSize = track->private_size;
    }
  else if (track->private_size >= RAPROPERTIES4_SIZE
           && !strncmp (track->codec_id, MKV_A_REALATRC, 7))
    {
      /* Common initialization for all RealAudio codecs */
      unsigned char *src = (unsigned char *)track->private_data;
      int codecdata_length, version;
      int flavor;

      finfowf->nAvgBytesPerSec = 0;  /* FIXME !? */

      version = AV_RB16(src + 4);
      flavor = AV_RB16(src + 22);
      track->coded_framesize = AV_RB32(src + 24);
      track->sub_packet_h = AV_RB16(src + 40);
      finfowf->nBlockAlign =
      track->audiopk_size = AV_RB16(src + 42);
      track->sub_packet_size = AV_RB16(src + 44);
      if (version == 4)
        {
          src += RAPROPERTIES4_SIZE;
          src += src[0] + 1;
          src += src[0] + 1;
        }
      else
        src += RAPROPERTIES5_SIZE;

      src += 3;
      if (version == 5)
        src++;
      codecdata_length = AV_RB32(src);
      src += 4;

      switch (track->a_formattag) {
        case mmioFOURCC('a', 't', 'r', 'c'):
          finfowf->nAvgBytesPerSec = atrc_fl2bps[flavor];
          finfowf->nBlockAlign = track->sub_packet_size;
          break;
        case mmioFOURCC('c', 'o', 'o', 'k'):
          finfowf->nAvgBytesPerSec = cook_fl2bps[flavor];
          finfowf->nBlockAlign = track->sub_packet_size;
          break;
        case mmioFOURCC('s', 'i', 'p', 'r'):
          finfowf->nAvgBytesPerSec = sipr_fl2bps[flavor];
          finfowf->nBlockAlign = track->coded_framesize;
          break;
        case mmioFOURCC('2', '8', '_', '8'):
          finfowf->nAvgBytesPerSec = 3600;
          finfowf->nBlockAlign = track->coded_framesize;
          break;
      }

      track->realmedia = 1;
    }
  else if (!strcmp(track->codec_id, MKV_A_FLAC) ||
           (track->a_formattag == 0xf1ac))
    {
      unsigned char *ptr;
      int size;
#if 1	//Fuchun 2010.06.23
	if(track->a_formattag == mmioFOURCC('f', 'L', 'a', 'C'))
	{
		ptr = (unsigned char *)track->private_data;
		size = track->private_size;
	}
	else
	{
		//sh_a->format = mmioFOURCC('f', 'L', 'a', 'C');
		ptr = (unsigned char *) track->private_data + sizeof (WAVEFORMATEX);
		size = track->private_size - sizeof (WAVEFORMATEX);
	}
	if(size < 4 || ptr[0] != 'f' || ptr[1] != 'L' ||ptr[2] != 'a' || ptr[3] != 'C')
	{
		//finfowf->cbSize = 4;
	}
	else
	{
		//finfowf->cbSize = size;
	}
#else
      free(finfowf);
      finfowf = NULL;

      if (track->a_formattag == mmioFOURCC('f', 'L', 'a', 'C'))
        {
          ptr = (unsigned char *)track->private_data;
          size = track->private_size;
        }
      else
        {
          sh_a->format = mmioFOURCC('f', 'L', 'a', 'C');
          ptr = (unsigned char *) track->private_data
            + sizeof (WAVEFORMATEX);
          size = track->private_size - sizeof (WAVEFORMATEX);
        }
      if (size < 4 || ptr[0] != 'f' || ptr[1] != 'L' ||
          ptr[2] != 'a' || ptr[3] != 'C')
        {
          dp = new_demux_packet (4);
          memcpy (dp->buffer, "fLaC", 4);
        }
      else
        {
          dp = new_demux_packet (size);
          memcpy (dp->buffer, ptr, size);
        }
      dp->pts = 0;
      dp->flags = 0;
      ds_add_packet (demuxer->audio, dp);
#endif
    }
  else if (track->a_formattag == mmioFOURCC('W', 'V', 'P', 'K') ||
           track->a_formattag == mmioFOURCC('T', 'R', 'H', 'D'))
    {  /* do nothing, still works */  }
  else if (!track->ms_compat || (track->private_size < sizeof(WAVEFORMATEX)))
    {
      return 1;
    }

  return 0;
}