Exemple #1
0
void t_audio_tx::conceal(short num) {
	// Some codecs have a PLC.
	// Only use this PLC is the sound card sample rate equals the codec
	// sample rate. If they differ, then we should resample the codec
	// samples. As this should be a rare case, we are lazy here. In
	// this rare case, use Twinkle's low-tech PLC.
	if (map_audio_decoder[codec]->has_plc() && audio_sample_rate(codec) == sc_sample_rate) {
		short *sb = (short *)sample_buf;
		for (int i = 0; i < num; i++) {
			int nsamples;
			nsamples = map_audio_decoder[codec]->conceal(sb, SAMPLE_BUF_SIZE);
			if (nsamples > 0) {
				play_pcm(sample_buf, nsamples * 2);
			}
		}
		
		return;
	}

	// Replay previous packets for other codecs
	short i = (conceal_pos + (MAX_CONCEALMENT - num)) % MAX_CONCEALMENT;

	if (i >= conceal_pos) {
		for (int j = i; j < MAX_CONCEALMENT; j++) {
			play_pcm(conceal_buf[j], conceal_buflen[j]);
		}

		for (int j = 0; j < conceal_pos; j++) {
			play_pcm(conceal_buf[j], conceal_buflen[j]);
		}
	} else {
		for (int j = i; j < conceal_pos; j++) {
			play_pcm(conceal_buf[j], conceal_buflen[j]);
		}
	}
}
Exemple #2
0
int main(int argc, char *argv[]) {
  int rc = -1;

  void *buf=NULL;
  size_t sz;
  if (tpl_gather(TPL_GATHER_BLOCKING, STDIN_FILENO, &buf, &sz) <= 0) goto done;

  /* peek into the saved image to see how many samples it has in it */
  uint32_t num_fxlens, *fxlens;
  char *fmt = tpl_peek(TPL_MEM|TPL_FXLENS, buf, sz, &num_fxlens, &fxlens);
  if ((!fmt) || (num_fxlens<1)) {fprintf(stderr,"invalid buffer\n"); goto done;}
  cfg.nsamples = fxlens[0];
  free(fxlens);

  /* make a buffer to load the PCM data into */
  /* TODO assert cfg.resolution == cfg.resolution in the image */
  size_t pcmlen = cfg.resolution * cfg.nsamples;
  int16_t *pcm;
  pcm = (int16_t*)malloc(pcmlen);
  if (!pcm) {fprintf(stderr,"out of memory\n"); goto done;}
  
  tpl_node *tn = tpl_map("iiij#", &cfg.sample_rate, &cfg.duration, &cfg.resolution, 
                         pcm, cfg.nsamples);
  tpl_load(tn, TPL_MEM, buf, sz);
  tpl_unpack(tn,0);
  tpl_free(tn);

  if (cfg.verbose) fprintf(stderr,"read the PCM file: "
                 "duration %u s, sample rate %u hz, resolution %u bits\n",
                 cfg.duration, cfg.sample_rate, cfg.resolution*8);

  play_pcm(argc, argv, pcm, pcmlen, cfg.sample_rate, cfg.verbose);

  /* TODO cycle if requested, reusing buf? */
  rc = 0;

 done:
  if (buf) free(buf);
  return rc;
}
Exemple #3
0
void t_audio_tx::run(void) {
	const AppDataUnit* adu;
	struct timespec sleeptimer;
	//struct timeval debug_timer, debug_timer_prev;
	int last_seqnum = -1; // seqnum of last received RTP packet
	
	// RTP packets with multiple SSRCs may be received. Each SSRC
	// represents an audio stream. Twinkle will only play 1 audio stream.
	// On a reception of a new SSRC, Twinkle will switch over to play the
	// new stream. This supports devices that change SSRC during a call.
	uint32 ssrc_current = 0;
	
	bool recvd_dtmf = false; // indicates if last RTP packets is a DTMF event

	// The running flag is set already in t_audio_session::run to prevent
	// a crash when the thread gets destroyed before it starts running.
	// is_running = true;

	uint32 rtp_timestamp = 0;
	
	// This thread may not take the lock on the transaction layer to
	// prevent dead locks
	phone->add_prohibited_thread();
	ui->add_prohibited_thread();
	
	while (true) {
		do {
			adu = NULL;
			if (stop_running) break;
			rtp_timestamp = rtp_session->getFirstTimestamp();
			adu = rtp_session->getData(
					rtp_session->getFirstTimestamp());
			if (adu == NULL || adu->getSize() <= 0) {
				// There is no packet available. This may have
				// several reasons:
				// - the thread scheduling granularity does
				//   not match ptime
				// - packet lost
				// - packet delayed
				// Wait another cycle for a packet. The
				// jitter buffer will cope with this variation.
				if (adu) {
					delete adu;
					adu = NULL;
				}

				// If we are the mixer in a 3-way call and there
				// is enough media from the other far-end then
				// this must be sent to the dsp.
				if (is_3way && is_3way_mixer &&
				    media_3way_peer_tx->size_content() >=
				    	ptime * (audio_sample_rate(codec) / 1000) * 2)
				{
					// Fill the sample buffer with silence
					int len = ptime * (audio_sample_rate(codec) / 1000) * 2;
					memset(sample_buf, 0, len);
					play_pcm(sample_buf, len, true);
				}

				// Sleep ptime ms
				sleeptimer.tv_sec = 0;

				if (ptime >= 20) {
					sleeptimer.tv_nsec =
						ptime * 1000000 - 10000000;
				} else {
					// With a thread schedule of 10ms
					// granularity, this will schedule the
					// thread every 10ms.
					sleeptimer.tv_nsec = 5000000;
				}
				nanosleep(&sleeptimer, NULL);
			}
		} while (adu == NULL || (adu->getSize() <= 0));
		
		if (stop_running) {
			if (adu) delete adu;
			break;
		}

		if (adu) {
			// adu is created by ccRTP, but we have to delete it,
			// so report it to MEMMAN
			MEMMAN_NEW(const_cast<ost::AppDataUnit*>(adu));
		}

		// Check for a codec change
		map<unsigned short, t_audio_codec>::const_iterator it_codec;
		it_codec = payload2codec.find(adu->getType());
		t_audio_codec recvd_codec = CODEC_NULL;
		if (it_codec != payload2codec.end()) {
			recvd_codec = it_codec->second;
		}
		
		// Switch over to new SSRC
		if (last_seqnum == -1 || ssrc_current != adu->getSource().getID()) {
			if (recvd_codec != CODEC_NULL) {
				ssrc_current = adu->getSource().getID();
				
				// An SSRC defines a sequence number space. So a new
				// SSRC starts with a new random sequence number
				last_seqnum = -1;
				
				log_file->write_header("t_audio_tx::run", 
					LOG_NORMAL);
				log_file->write_raw("Audio tx line ");
				log_file->write_raw(get_line()->get_line_number()+1);
				log_file->write_raw(": play SSRC ");
				log_file->write_raw(ssrc_current);
				log_file->write_endl();
				log_file->write_footer();
			} else {
				// SSRC received had an unsupported codec
				// Discard.
				// KLUDGE: for now this supports a scenario where a
				// far-end starts ZRTP negotiation by sending CN
				// packets with a separate SSRC while ZRTP is disabled
				// in Twinkle. Twinkle will then receive the CN packets
				// and discard them here as CN is an unsupported codec.
				log_file->write_header("t_audio_tx::run", 
					LOG_NORMAL, LOG_DEBUG);
				log_file->write_raw("Audio tx line ");
				log_file->write_raw(get_line()->get_line_number()+1);
				log_file->write_raw(": SSRC received (");
				log_file->write_raw(adu->getSource().getID());
				log_file->write_raw(") has unsupported codec ");
				log_file->write_raw(adu->getType());
				log_file->write_endl();
				log_file->write_footer();
				
				MEMMAN_DELETE(const_cast<ost::AppDataUnit*>(adu));
				delete adu;
				continue;
			}
		}
		
		map<t_audio_codec, t_audio_decoder *>::const_iterator it_decoder;
		it_decoder = map_audio_decoder.find(recvd_codec);
		if (it_decoder != map_audio_decoder.end()) {
			if (codec != recvd_codec) {
				codec = recvd_codec;
				get_line()->ci_set_recv_codec(codec);
				ui->cb_async_recv_codec_changed(get_line()->get_line_number(),
					codec);

				log_file->write_header("t_audio_tx::run", 
					LOG_NORMAL, LOG_DEBUG);
				log_file->write_raw("Audio tx line ");
				log_file->write_raw(get_line()->get_line_number()+1);
				log_file->write_raw(": codec change to ");
				log_file->write_raw(ui->format_codec(codec));
				log_file->write_endl();
				log_file->write_footer();
			}
		} else {
			if (adu->getType() == pt_telephone_event ||
			    adu->getType() == pt_telephone_event_alt) 
			{
				recvd_dtmf = true;
			} else {
				if (codec != CODEC_UNSUPPORTED) {
					codec = CODEC_UNSUPPORTED;
					get_line()->ci_set_recv_codec(codec);
					ui->cb_async_recv_codec_changed(
						get_line()->get_line_number(), codec);
	
					log_file->write_header("t_audio_tx::run", 
						LOG_NORMAL, LOG_DEBUG);
					log_file->write_raw("Audio tx line ");
					log_file->write_raw(get_line()->get_line_number()+1);
					log_file->write_raw(": payload type ");
					log_file->write_raw(adu->getType());
					log_file->write_raw(" not supported\n");
					log_file->write_footer();
				}
	
				last_seqnum = adu->getSeqNum();
				MEMMAN_DELETE(const_cast<ost::AppDataUnit*>(adu));
				delete adu;
				continue;
			}
		}

		// DTMF event
		if (recvd_dtmf) {
			// NOTE: the DTMF tone will be detected here
			// while there might still be data in the jitter
			// buffer. If the jitter buffer was already sent
			// to the DSP, then the DSP will continue to play
			// out the buffer sound samples.

			if (dtmf_previous_timestamp != rtp_timestamp) {
				// A new DTMF tone has been received.
				dtmf_previous_timestamp = rtp_timestamp;
				t_rtp_telephone_event *e =
					(t_rtp_telephone_event *)adu->getData();
				ui->cb_async_dtmf_detected(get_line()->get_line_number(),
					e->get_event());

				// Log DTMF event
				log_file->write_header("t_audio_tx::run");
				log_file->write_raw("Audio tx line ");
				log_file->write_raw(get_line()->get_line_number()+1);
				log_file->write_raw(": detected DTMF event - ");
				log_file->write_raw(e->get_event());
				log_file->write_endl();
				log_file->write_footer();
			}

			recvd_dtmf = false;
			last_seqnum = adu->getSeqNum();
			MEMMAN_DELETE(const_cast<ost::AppDataUnit*>(adu));
			delete adu;
			continue;
		}

		// Discard invalide payload sizes
		if (!map_audio_decoder[codec]->valid_payload_size(
				adu->getSize(), SAMPLE_BUF_SIZE / 2))
		{
			log_file->write_header("t_audio_tx::run", LOG_NORMAL, LOG_DEBUG);
			log_file->write_raw("Audio tx line ");
			log_file->write_raw(get_line()->get_line_number()+1);
			log_file->write_raw(": RTP payload size (");
			log_file->write_raw((unsigned long)(adu->getSize()));
			log_file->write_raw(" bytes) invalid for \n");
			log_file->write_raw(ui->format_codec(codec));
			log_file->write_footer();
			last_seqnum = adu->getSeqNum();
			MEMMAN_DELETE(const_cast<ost::AppDataUnit*>(adu));
			delete adu;
			continue;
		}
		
		unsigned short recvd_ptime;
		recvd_ptime = map_audio_decoder[codec]->get_ptime(adu->getSize());

		// Log a change of ptime
		if (ptime != recvd_ptime) {
			log_file->write_header("t_audio_tx::run", LOG_NORMAL, LOG_DEBUG);
			log_file->write_raw("Audio tx line ");
			log_file->write_raw(get_line()->get_line_number()+1);
			log_file->write_raw(": ptime changed from ");
			log_file->write_raw(ptime);
			log_file->write_raw(" ms to ");
			log_file->write_raw(recvd_ptime);
			log_file->write_raw(" ms\n");
			log_file->write_footer();
			ptime = recvd_ptime;
		}
		
		// Check for lost packets
		// This must be done before decoding the received samples as the
		// speex decoder has its own PLC algorithm for which it needs the decoding
		// state before decoding the new samples.
		seq16_t seq_recvd(adu->getSeqNum());
		seq16_t seq_last(static_cast<uint16>(last_seqnum));
		if (last_seqnum != -1 && seq_recvd - seq_last > 1) {
			// Packets have been lost
			uint16 num_lost = (seq_recvd - seq_last) - 1;
			log_file->write_header("t_audio_tx::run", LOG_NORMAL, LOG_DEBUG);
			log_file->write_raw("Audio tx line ");
			log_file->write_raw(get_line()->get_line_number()+1);
			log_file->write_raw(": ");
			log_file->write_raw(num_lost);
			log_file->write_raw(" RTP packets lost.\n");
			log_file->write_footer();

			if (num_lost <= conceal_num) {
				// Conceal packet loss
				conceal(num_lost);
			}
			clear_conceal_buf();
		}
		
		// Determine if resampling is needed due to dynamic change to
		// codec with other sample rate.
		short downsample_factor = 1;
		short upsample_factor = 1;
		if (audio_sample_rate(codec) > sc_sample_rate) {
			downsample_factor = audio_sample_rate(codec) / sc_sample_rate;
		} else if (audio_sample_rate(codec) < sc_sample_rate) {
			upsample_factor = sc_sample_rate / audio_sample_rate(codec);
		}
		
		// Create sample buffer. If no resampling is needed, the sample
		// buffer from the audio_tx object can be used directly.
		// Otherwise a temporary sample buffers is created that will
		// be resampled to the object's sample buffer later.
		short *sb;
		int sb_size;
		if (downsample_factor > 1) {
			sb_size = SAMPLE_BUF_SIZE / 2 * downsample_factor;
			sb = new short[sb_size];
			MEMMAN_NEW_ARRAY(sb);
		} else if (upsample_factor > 1) {
			sb_size = SAMPLE_BUF_SIZE / 2;
			sb = new short[SAMPLE_BUF_SIZE / 2];
			MEMMAN_NEW_ARRAY(sb);
		} else {
			sb_size = SAMPLE_BUF_SIZE / 2;
			sb = (short *)sample_buf;
		}
				
				
		// Decode the audio
		unsigned char *payload = const_cast<uint8 *>(adu->getData());
		short sample_size; // size in bytes
		
		sample_size = 2 * map_audio_decoder[codec]->decode(payload, adu->getSize(), sb, sb_size);
				
		// Resample if needed
		if (downsample_factor > 1) {
			short *p = sb;
			sb = (short *)sample_buf;
			for (int i = 0; i < sample_size / 2; i += downsample_factor) {
				sb[i / downsample_factor] = p[i];
			}
			MEMMAN_DELETE_ARRAY(p);
			delete [] p;
			sample_size /= downsample_factor;
		} else if (upsample_factor > 1) {
			short *p = sb;
			sb = (short *)sample_buf;
			for (int i = 0; i < sample_size / 2; i++) {
				for (int j = 0; j < upsample_factor; j++) {
					sb[i * upsample_factor + j] = p[i];
				}
			}
			MEMMAN_DELETE_ARRAY(p);
			delete [] p;
			sample_size *= upsample_factor;
		}
		
		// If the decoder deliverd 0 bytes, then it failed
		if (sample_size == 0) {
			last_seqnum = adu->getSeqNum();
			MEMMAN_DELETE(const_cast<ost::AppDataUnit*>(adu));
			delete adu;	
			continue;
		}
		
		// Discard packet if we are lacking behind. This happens if the
		// soundcard plays at a rate less than the requested sample rate.
		if (rtp_session->isWaiting(&(adu->getSource()))) {

			uint32 last_ts = rtp_session->getLastTimestamp(&(adu->getSource()));
			uint32 diff;
			
			diff = last_ts - rtp_timestamp;
			
			if (diff > (uint32_t)(JITTER_BUF_SIZE(sc_sample_rate) / AUDIO_SAMPLE_SIZE) * 8)
			{
				log_file->write_header("t_audio_tx::run", LOG_NORMAL, LOG_DEBUG);
				log_file->write_raw("Audio tx line ");
				log_file->write_raw(get_line()->get_line_number()+1);
				log_file->write_raw(": discard delayed packet.\n");
				log_file->write_raw("Timestamp: ");
				log_file->write_raw(rtp_timestamp);
				log_file->write_raw(", Last timestamp: ");
				log_file->write_raw((long unsigned int)last_ts);
				log_file->write_endl();
				log_file->write_footer();
					
				last_seqnum = adu->getSeqNum();
				MEMMAN_DELETE(const_cast<ost::AppDataUnit*>(adu));
				delete adu;
				continue;
			}
		}

		play_pcm(sample_buf, sample_size);
		retain_for_concealment(sample_buf, sample_size);
		last_seqnum = adu->getSeqNum();
		MEMMAN_DELETE(const_cast<ost::AppDataUnit*>(adu));
		delete adu;

		// No sleep is done here but in the loop waiting
		// for a new packet. If a packet is already available
		// it can be send to the sound card immediately so
		// the play-out buffer keeps filled.
		// If the play-out buffer gets empty you hear a
		// crack in the sound.


#ifdef HAVE_SPEEX		
		// store decoded output for (optional) echo cancellation 
		if (audio_session->get_do_echo_cancellation()) {
		    if (audio_session->get_echo_captured_last()) {
			speex_echo_playback(audio_session->get_speex_echo_state(), (spx_int16_t *) sb);
			audio_session->set_echo_captured_last(false);;
		    }
		}
#endif

	}

	phone->remove_prohibited_thread();
	ui->remove_prohibited_thread();
	is_running = false;
}
Exemple #4
0
int main(int argc, char **argv)
{
    AVFormatContext* pCtx = 0;
    AVCodecContext *pCodecCtx = 0;
    AVCodec *pCodec = 0;
    AVPacket packet;
    AVFrame *pFrame = 0;
    FILE *fpo1 = NULL;
    FILE *fpo2 = NULL;
    int nframe;
    int err;
    int got_picture = -1;
    int picwidth, picheight, linesize;
    unsigned char *pBuf;
    int i;
    int64_t timestamp;
    struct options opt;
    int usefo = 0;
    struct audio_dsp dsp;
    int dusecs;
    float usecs1 = 0;
    float usecs2 = 0;
    struct timeval elapsed1, elapsed2;
    int decoded = 0;

	//taoanran add +++++++++
	int ret = -1;
	int videoStream = -1; //video streamID
	// ----------------------

	int flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
 #if 0
    if (SDL_Init (flags)) {
        fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
        fprintf(stderr, "(Did you set the DISPLAY variable?)\n");
        exit(1);
    }
#endif
    av_register_all();

    av_log_set_callback(log_callback);
    av_log_set_level(50);

    if (Myparse_option(&opt, argc, argv) < 0 || (strlen(opt.finput) == 0))
    {
        Myshow_help(argv[0]);
        return 0;
    }

    err = avformat_open_input(&pCtx, opt.finput, 0, 0);
    if (err < 0)
    {
        printf("\n->(avformat_open_input)\tERROR:\t%d\n", err);
        goto fail;
    }
	printf("=========================\n");
    err = avformat_find_stream_info(pCtx, 0);

    if (err < 0)
    {
        printf("\n->(avformat_find_stream_info)\tERROR:\t%d\n", err);
        goto fail;
    }
	av_dump_format(pCtx, 0, opt.finput, 0);

	// check the video stream
	videoStream = find_video_stream(pCtx);
	if (videoStream < 0)
	{
		printf("there is not audio stream !!!!!!! \n");
		return -1;
	}

	pCodecCtx = pCtx->streams[videoStream]->codec;
	pCodec = avcodec_find_decoder(pCodecCtx->codec_id);//find the video decoder
 	if (!pCodec)
    {
        printf("\ncan't find the audio decoder!\n");
        goto fail;
    }

	pFrame = avcodec_alloc_frame();

	//open videDecoder
	ret = avcodec_open2(pCodecCtx, pCodec, 0);

	if (ret < 0)
	{
		printf("avcodec_open2 error \n");
		return -1;
	}

#if 0
	//only for audio
	pFrame->nb_samples = pCodecCtx->frame_size;
	pFrame->format = pCodecCtx->sample_fmt;
	pFrame->channel_layout = pCodecCtx->channel_layout;
#endif
#if 0
	//set the param of SDL
	SDL_AudioSpec wanted_spec, spec; 
	wanted_spec.freq = pCodecCtx->sample_rate;  
	wanted_spec.format = AUDIO_S16SYS;  
	wanted_spec.channels = pCodecCtx->channels;  
	wanted_spec.silence = 0;  
	wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;  
	wanted_spec.callback = audio_callback;//audio_callback;  
	wanted_spec.userdata = pCodecCtx;;//pCodecCtx;  
	if(SDL_OpenAudio(&wanted_spec, &spec) < 0)  
    {  
        fprintf(stderr, "SDL_OpenAudio: %s/n", SDL_GetError());  
        return -1;  
    } 
#endif			

	 printf(" bit_rate = %d \r\n", pCodecCtx->bit_rate);
     printf(" sample_rate = %d \r\n", pCodecCtx->sample_rate);
     printf(" channels = %d \r\n", pCodecCtx->channels);
     printf(" code_name = %s \r\n", pCodecCtx->codec->name);

	char *data = NULL;
	while(av_read_frame(pCtx, &packet) >= 0)
	{
		//found the  audio frame !!!
		if (packet.stream_index == videoStream)
		{
			int got;
			int i;
		
			avcodec_decode_video2(pCodecCtx, pFrame,&got_picture,&packet);
			data = (char *)malloc(pFrame->width * pFrame->height);
			memset(data,0,pFrame->width * pFrame->height);
			printf("pFrame->width = %d\n", pFrame->width);
			printf("pFrame->height = %d\n", pFrame->height);
			printf("pFrame->linesize[0] = %d\n", pFrame->linesize[0]);
			printf("pFrame->linesize[1] = %d\n", pFrame->linesize[1]);
			printf("pFrame->linesize[2] = %d\n", pFrame->linesize[2]);

			//catch the YUV420P data
			saveYUV420P(pFrame->data[0], pFrame->linesize[0], pCodecCtx->width, pCodecCtx->height);      //Y: 4
			saveYUV420P(pFrame->data[1], pFrame->linesize[1], pCodecCtx->width/2, pCodecCtx->height/2);    //U : 1
			saveYUV420P(pFrame->data[2], pFrame->linesize[2], pCodecCtx->width/2, pCodecCtx->height/2);    //V : 1
		}
	}
	
	return 0;
#if 0	
	if (!opt.nodec)
    {
        
        pCodecCtx = pCtx->streams[opt.streamId]->codec;
 
        if (opt.thread_count <= 16 && opt.thread_count > 0 )
        {
            pCodecCtx->thread_count = opt.thread_count;
            pCodecCtx->thread_type = FF_THREAD_FRAME;
        }
        pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
        if (!pCodec)
        {
            printf("\n->不能找到编解码器!\n");
            goto fail;
        }
        err = avcodec_open2(pCodecCtx, pCodec, 0);
        if (err < 0)
        {
            printf("\n->(avcodec_open)\tERROR:\t%d\n", err);
            goto fail;
        }
        pFrame = avcodec_alloc_frame();
 
        if (opt.bplay)
        {
            dsp.audio_fd = open(OSS_DEVICE, O_WRONLY);
            if (dsp.audio_fd == -1)
            {
                printf("\n-> 无法打开音频设备\n");
                goto fail;
            }
            dsp.channels = pCodecCtx->channels;
            dsp.speed = pCodecCtx->sample_rate;
            dsp.format = map_formats(pCodecCtx->sample_fmt);
            if (set_audio(&dsp) < 0)
            {
                printf("\n-> 不能设置音频设备\n");
                goto fail;
            }
        }
    }
    nframe = 0;
	printf("=========================444444\n");
    while(nframe < opt.frames || opt.frames == -1)
    {
        gettimeofday(&elapsed1, NULL);
        err = av_read_frame(pCtx, &packet);
        if (err < 0)
        {
            printf("\n->(av_read_frame)\tERROR:\t%d\n", err);
            break;
        }
        gettimeofday(&elapsed2, NULL);
        dusecs = (elapsed2.tv_sec - elapsed1.tv_sec)*1000000 + (elapsed2.tv_usec - elapsed1.tv_usec);
        usecs2 += dusecs;
        timestamp = av_rescale_q(packet.dts, pCtx->streams[packet.stream_index]->time_base, (AVRational){1, AV_TIME_BASE});
        printf("\nFrame No ] stream#%d\tsize mB, timestamp:%6lld, dts:%6lld, pts:%6lld, ", nframe++, packet.stream_index, packet.size,
               timestamp, packet.dts, packet.pts);
        if (packet.stream_index == opt.streamId)
        {
#if 0
            for (i = 0; i < 16; i++)
            {
                if (i == 0) printf("\n pktdata: ");
                printf("%2x ", packet.data[i]);
            }
            printf("\n");
#endif
            if (usefo)
            {
                fwrite(packet.data, packet.size, 1, fpo1);
                fflush(fpo1);
            }
            if (pCtx->streams[opt.streamId]->codec->codec_type == AVMEDIA_TYPE_VIDEO && !opt.nodec)
            {
                picheight = pCtx->streams[opt.streamId]->codec->height;
                picwidth = pCtx->streams[opt.streamId]->codec->width;
 
                gettimeofday(&elapsed1, NULL);
                avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, &packet);
                decoded++;
                gettimeofday(&elapsed2, NULL);
                dusecs = (elapsed2.tv_sec - elapsed1.tv_sec)*1000000 + (elapsed2.tv_usec - elapsed1.tv_usec);
                usecs1 += dusecs;
                if (got_picture)
                {
                    printf("[Video: type %d, ref %d, pts %lld, pkt_pts %lld, pkt_dts %lld]",
                            pFrame->pict_type, pFrame->reference, pFrame->pts, pFrame->pkt_pts, pFrame->pkt_dts);
 
                    if (pCtx->streams[opt.streamId]->codec->pix_fmt == PIX_FMT_YUV420P)
                    {
                        if (usefo)
                        {
                            linesize = pFrame->linesize[0];
                            pBuf = pFrame->data[0];
                            for (i = 0; i < picheight; i++)
                            {
                                fwrite(pBuf, picwidth, 1, fpo2);
                                pBuf += linesize;
                            }
                            linesize = pFrame->linesize[1];
                            pBuf = pFrame->data[1];
                            for (i = 0; i < picheight/2; i++)
                            {
                                fwrite(pBuf, picwidth/2, 1, fpo2);
                                pBuf += linesize;
                            }
                            linesize = pFrame->linesize[2];
                            pBuf = pFrame->data[2];
                            for (i = 0; i < picheight/2; i++)
                            {
                                fwrite(pBuf, picwidth/2, 1, fpo2);
                                pBuf += linesize;
                            }
                            fflush(fpo2);
                        }
 
                        if (opt.bplay)
                        {
                            
                        }
                    }
                }
                av_free_packet(&packet);
            }
            else if (pCtx->streams[opt.streamId]->codec->codec_type == AVMEDIA_TYPE_AUDIO && !opt.nodec)
            {
                int got;
                gettimeofday(&elapsed1, NULL);
                avcodec_decode_audio4(pCodecCtx, pFrame, &got, &packet);
                decoded++;
                gettimeofday(&elapsed2, NULL);
                dusecs = (elapsed2.tv_sec - elapsed1.tv_sec)*1000000 + (elapsed2.tv_usec - elapsed1.tv_usec);
                usecs1 += dusecs;
                                if (got)
                                {
                    printf("[Audio: ]B raw data, decoding time: %d]", pFrame->linesize[0], dusecs);
                    if (usefo)
                    {
                        fwrite(pFrame->data[0], pFrame->linesize[0], 1, fpo2);
                        fflush(fpo2);
                    }
                    if (opt.bplay)
                    {
                        play_pcm(&dsp, pFrame->data[0], pFrame->linesize[0]);
                    }
                                }
            }
        }
    }
    if (!opt.nodec && pCodecCtx)
    {
        avcodec_close(pCodecCtx);
    }
    printf("\n%d 帧解析, average %.2f us per frame\n", nframe, usecs2/nframe);
    printf("%d 帧解码,平均 %.2f 我们每帧\n", decoded, usecs1/decoded);

#endif

fail:
    if (pCtx)
    {
        avformat_close_input(&pCtx);
    }


    return 0;
}