int libavsmash_video_find_first_valid_frame
(
    libavsmash_video_decode_handler_t *vdhp
)
{
    codec_configuration_t *config = &vdhp->config;
    for( uint32_t i = 1; i <= vdhp->sample_count + get_decoder_delay( config->ctx ); i++ )
    {
        AVPacket pkt = { 0 };
        get_sample( vdhp->root, vdhp->track_id, i, config, &pkt );
        av_frame_unref( vdhp->frame_buffer );
        int got_picture;
        if( decode_video_packet( config->ctx, vdhp->frame_buffer, &got_picture, &pkt ) >= 0 && got_picture )
        {
            vdhp->first_valid_frame_number = i - MIN( get_decoder_delay( config->ctx ), config->delay_count );
            if( vdhp->first_valid_frame_number > 1 || vdhp->sample_count == 1 )
            {
                vdhp->first_valid_frame = av_frame_clone( vdhp->frame_buffer );
                if( !vdhp->first_valid_frame )
                    return -1;
                av_frame_unref( vdhp->frame_buffer );
            }
            break;
        }
        else if( pkt.data )
            ++ config->delay_count;
    }
    return 0;
}
static int decode_video_sample
(
    libavsmash_video_decode_handler_t *vdhp,
    AVFrame                           *picture,
    int                               *got_picture,
    uint32_t                           sample_number
)
{
    codec_configuration_t *config = &vdhp->config;
    AVPacket pkt = { 0 };
    int ret = get_sample( vdhp->root, vdhp->track_id, sample_number, config, &pkt );
    if( ret )
        return ret;
    if( pkt.flags != ISOM_SAMPLE_RANDOM_ACCESS_FLAG_NONE )
    {
        pkt.flags = AV_PKT_FLAG_KEY;
        vdhp->last_rap_number = sample_number;
    }
    else
        pkt.flags = 0;
    av_frame_unref( picture );
    uint64_t cts = pkt.pts;
    ret = decode_video_packet( config->ctx, picture, got_picture, &pkt );
    picture->pts = cts;
    if( ret < 0 )
    {
        lw_log_show( &config->lh, LW_LOG_WARNING, "Failed to decode a video frame." );
        return -1;
    }
    return 0;
}
static int get_picture
(
    libavsmash_video_decode_handler_t *vdhp,
    AVFrame                           *picture,
    uint32_t                           current,
    uint32_t                           goal
)
{
    codec_configuration_t *config = &vdhp->config;
    int got_picture = (current > goal);
    while( current <= goal )
    {
        int ret = decode_video_sample( vdhp, picture, &got_picture, current );
        if( ret == -1 )
            return -1;
        else if( ret == 1 )
            /* Sample doesn't exist. */
            break;
        ++current;
        if( config->update_pending )
            /* A new decoder configuration is needed. Anyway, stop getting picture. */
            break;
        if( !got_picture )
        {
            /* Fundamental seek operations after the decoder initialization is already done, but
             * more input samples are required to output and the goal become more distant. */
            ++ config->delay_count;
            ++ goal;
        }
    }
    /* Flush the last frames. */
    if( current > vdhp->sample_count && get_decoder_delay( config->ctx ) )
        while( current <= goal )
        {
            AVPacket pkt = { 0 };
            av_init_packet( &pkt );
            pkt.data = NULL;
            pkt.size = 0;
            av_frame_unref( picture );
            if( decode_video_packet( config->ctx, picture, &got_picture, &pkt ) < 0 )
            {
                lw_log_show( &config->lh, LW_LOG_WARNING, "Failed to decode and flush a video frame." );
                return -1;
            }
            ++current;
        }
    return got_picture ? 0 : -1;
}
Esempio n. 4
0
void *VideoLayer::feed() {
  int got_picture=0;
  int len1=0 ;
  int ret=0;
  bool got_it=false;

  double now = get_master_clock();


  if(paused)
    return rgba_picture->data[0];

  /**
   * follow user video loop
   */
  if(mark_in!=NO_MARK && mark_out!=NO_MARK && seekable) {
    if (now >= mark_out)
      seek((int64_t)mark_in * AV_TIME_BASE);
  }
  
  // operate seek if was requested
  if(to_seek>=0) {
    seek(to_seek);
    to_seek = -1;
  }
    
  got_it=false;
  
  while (!got_it) {
    
    
    if(packet_len<=0) {
      /**
       * Read one packet from the media and put it in pkt
       */
      while(1) {
#ifdef DEBUG
	func("av_read_frame ...");
#endif
	ret = av_read_frame(avformat_context, &pkt);

#ifdef DEBUG
	if(pkt.stream_index == video_index)
	  std::cout << "video read packet";
	else if(pkt.stream_index == audio_index)
	  std::cout << "audio read packet";
	std::cout << " pkt.data=" << pkt.data;
	std::cout << " pkt.size=" << pkt.size;
	std::cout << " pkt.pts/dts=" << pkt.pts << "/" << pkt.dts << std::endl;
	std::cout << "pkt.duration=" << pkt.duration;
	std::cout << " avformat_context->start_time=" << avformat_context->start_time;
	std::cout << " avformat_context->duration=" << avformat_context->duration/AV_TIME_BASE << std::endl;
	std::cout << "avformat_context->duration=" << avformat_context->duration << std::endl;
#endif
	
	/* TODO(shammash): this may be good for streams but breaks
	 * looping in files, needs fixing. */
	// 	      if(!pkt.duration) continue;
	
	// 	      if(!pkt.size || !pkt.data) {
	// 		return NULL;
	// 	      }
	
	
	/**
	 * check eof and loop
	 */
	if(ret!= 0) {	//does not enter if data are available
	  eos->notify();
	  //	  eos->dispatcher->do_jobs(); /// XXX hack hack hack
	  ret = seek(avformat_context->start_time);
	  if (ret < 0) {
	    error("VideoLayer::could not loop file");
	    return rgba_picture->data[0];
	  }
	  continue;
	} else if( (pkt.stream_index == video_index) 
		   || (pkt.stream_index == audio_index) )
	  break; /* exit loop */
      }
    } // loop break after a known index is found
    
    
    frame_number++;
	//std::cout << "frame_number :" << frame_number << std::endl;
    
    /**
     * Decode video
     */
    if(pkt.stream_index == video_index) {
      
      len1 = decode_video_packet(&got_picture);
      
      AVFrame *yuv_picture=&av_frame;
      if(len1<0) {
	//	  error("VideoLayer::Error while decoding frame");
	func("one frame only?");
	return NULL;
      }
      else if (len1 == 0) {
	packet_len=0;
	return NULL;
      }
      
      /**
       * We've found a picture
       */
      ptr += len1;
      packet_len -= len1;
      if (got_picture!=0) {
	got_it=true;
	avformat_stream=avformat_context->streams[video_index];
	
	/** Deinterlace input if requested */
	if(deinterlaced)
	  deinterlace((AVPicture *)yuv_picture);
	
#ifdef WITH_SWSCALE
	sws_scale(img_convert_ctx, yuv_picture->data, yuv_picture->linesize,
		  0, video_codec_ctx->height,
		  rgba_picture->data, rgba_picture->linesize);	  
#else
	/**
	 * yuv2rgb
	 */
	img_convert(rgba_picture, PIX_FMT_RGB32, (AVPicture *)yuv_picture,
		    video_codec_ctx->pix_fmt, 
		    //avformat_stream.codec->pix_fmt,
		    video_codec_ctx->width,
		    video_codec_ctx->height);
#endif
	// memcpy(frame_fifo.picture[fifo_position % FIFO_SIZE]->data[0],rgba_picture->data[0],geo.size);
	/* TODO move */
	if(fifo_position == FIFO_SIZE)
	  fifo_position=0;
	
	/* workaround since sws_scale conversion from YUV
	   returns an buffer RGBA with alpha set to 0x0  */
	{
	  register int bufsize = ( rgba_picture->linesize[0] * video_codec_ctx->height ) /4;
	  int32_t *pbuf =  (int32_t*)rgba_picture->data[0];
	  
	  for(; bufsize>0; bufsize--) {
	    *pbuf = (*pbuf | alpha_bitmask);
	    pbuf++;
	  }
	} 
	
	jmemcpy(frame_fifo.picture[fifo_position]->data[0],
		rgba_picture->data[0],
		rgba_picture->linesize[0] * video_codec_ctx->height);
	
	//			    avpicture_get_size(PIX_FMT_RGBA32, enc->width, enc->height));
	fifo_position++;
      }
    } // end video packet decoding
    

    ////////////////////////
    // audio packet decoding
    else if(pkt.stream_index == audio_index) {
      // XXX(shammash): audio decoding seems to depend on screen properties, so
      //                we skip decoding audio frames if there's no screen
      //  long unsigned int m_SampleRate = screen->m_SampleRate?*(screen->m_SampleRate):48000;
      //  ringbuffer_write(screen->audio, (const char*)audio_float_buf,  samples*sizeof(float));
      //  ... and so on ...
      if(use_audio && screen) {
	int data_size;
	len1 = decode_audio_packet(&data_size);
	if (len1 > 0)  {
	  int samples = data_size/sizeof(uint16_t);
	  long unsigned int m_SampleRate = screen->m_SampleRate?*(screen->m_SampleRate):48000;
	  double m_ResampleRatio = (double)(m_SampleRate)/(double)audio_samplerate; 
	  long unsigned max_buf = ceil(AVCODEC_MAX_AUDIO_FRAME_SIZE * m_ResampleRatio * audio_channels);

	  if (audio_resampled_buf_len < max_buf) {
		if (audio_resampled_buf) free (audio_resampled_buf);
		audio_resampled_buf = (float*) malloc(max_buf * sizeof(float));
		audio_resampled_buf_len = max_buf;
	  }

	  src_short_to_float_array ((const short*) audio_buf, audio_float_buf, samples);
	  if (m_ResampleRatio == 1.0) 
	  {
	    ringbuffer_write(screen->audio, (const char*)audio_float_buf,  samples*sizeof(float));
	    time_t *tm = (time_t *)malloc(sizeof(time_t));
	    time (tm);
// 	    std::cerr << "-- VL:" << asctime(localtime(tm));
	  } 
	  else 
	  {
	    src_short_to_float_array ((const short*) audio_buf, audio_float_buf, samples);

	    SRC_DATA src_data;
	    int offset = 0;

            do {
	      src_data.input_frames  = samples/audio_channels;
	      src_data.output_frames = audio_resampled_buf_len/audio_channels - offset;
	      src_data.end_of_input  = 0;
	      src_data.src_ratio     =  m_ResampleRatio;
	      src_data.input_frames_used = 0;
	      src_data.output_frames_gen = 0;
	      src_data.data_in       = audio_float_buf + offset; 
	      src_data.data_out      = audio_resampled_buf + offset;

	      src_simple (&src_data, SRC_SINC_MEDIUM_QUALITY, audio_channels) ;
	      ringbuffer_write(screen->audio,
			       (const char*)audio_resampled_buf,
			       src_data.output_frames_gen * audio_channels *sizeof(float));

	      offset += src_data.input_frames_used * audio_channels;
	      samples -= src_data.input_frames_used * audio_channels;

	      if (samples>0)
		warning("resampling left: %i < %i",
			src_data.input_frames_used, samples/audio_channels);

	    } while (samples > audio_channels);
	  }
	}
      }
    }
    
    av_free_packet(&pkt); /* sun's good. love's bad */
    
  } // end of while(!got_it)
  
  return frame_fifo.picture[fifo_position-1]->data[0];
}