int audio_decode_frame(AVCodecContext *aCodecCtx, uint8_t *audio_buf, int buf_size) { static AVPacket pkt; static uint8_t *audio_pkt_data = NULL; static int audio_pkt_size = 0; int len1, data_size; for(;;) { if(packet_queue_get(audioq, &pkt, 1) < 0) { return -1; } audio_pkt_data = pkt.data; audio_pkt_size = pkt.size; while(audio_pkt_size > 0) { data_size = buf_size; len1 = avcodec_decode_audio2(aCodecCtx, (int16_t *)audio_buf, &data_size,audio_pkt_data, audio_pkt_size); if(len1 < 0) { audio_pkt_size = 0; break; } audio_pkt_data += len1; audio_pkt_size -= len1; if(data_size <= 0) { continue; } return data_size; } if(pkt.data) av_free_packet(&pkt); } }
int video_thread(void *arg) { VideoState *is = (VideoState *) arg; AVPacket pkt1, *packet = &pkt1; //int len1; int frameFinished; AVFrame *pFrame; pFrame = av_frame_alloc(); for (;;) { if (packet_queue_get(&is->videoq, packet, 1) < 0) { // means we quit getting packets break; } // Decode video frame //len1 = avcodec_decode_video2(is->video_st->codec, pFrame, &frameFinished, packet); // Did we get a video frame? if (frameFinished) { if (queue_picture(is, pFrame) < 0) { break; } } av_free_packet(packet); } av_free(pFrame); return 0; }
int audio_decode_frame(VideoState *is, double *pts_ptr) { int len1, data_size = 0, n; AVPacket *pkt = &is->audio_pkt; double pts; for (; ;) { while (is->audio_pkt_size > 0) { int got_frame = 0; len1 = avcodec_decode_audio4(is->audio_st->codec, &is->audio_frame, &got_frame, pkt); if (len1 < 0) { /* if error, skip frame */ is->audio_pkt_size = 0; break; } if (got_frame) { data_size = av_samples_get_buffer_size ( NULL, is->audio_st->codec->channels, is->audio_frame.nb_samples, is->audio_st->codec->sample_fmt, 1 ); memcpy(is->audio_buf, is->audio_frame.data[0], data_size); } is->audio_pkt_data += len1; is->audio_pkt_size -= len1; if (data_size <= 0) { /* No data yet, get more frames */ continue; } pts = is->audio_clock; *pts_ptr = pts; n = 2 * is->audio_st->codec->channels; is->audio_clock += (double) data_size / (double) (n * is->audio_st->codec->sample_rate); /* We have data, return it and come back for more later */ return data_size; } if (pkt->data) av_free_packet(pkt); if (is->quit) { return -1; } /* next packet */ if (packet_queue_get(&is->audioq, pkt, 1) < 0) { return -1; } is->audio_pkt_data = pkt->data; is->audio_pkt_size = pkt->size; /* if update, update the audio clock w/pts */ if (pkt->pts != AV_NOPTS_VALUE) { is->audio_clock = av_q2d(is->audio_st->time_base) * pkt->pts; } } }
void Player::ScheduleNextFrame(bool prerolling) { AVPacket pkt; AVPicture picture; if (serial_fd > 0 && packet_queue_get(&dataqueue, &pkt, 0)) { if (pkt.data[0] != ' '){ fprintf(stderr,"written %.*s \n", pkt.size, pkt.data); write(serial_fd, pkt.data, pkt.size); } av_free_packet(&pkt); } if (packet_queue_get(&videoqueue, &pkt, 1) < 0) return; IDeckLinkMutableVideoFrame *videoFrame; m_deckLinkOutput->CreateVideoFrame(m_frameWidth, m_frameHeight, m_frameWidth * 2, pix, bmdFrameFlagDefault, &videoFrame); void *frame; int got_picture; videoFrame->GetBytes(&frame); avcodec_decode_video2(video_st->codec, avframe, &got_picture, &pkt); if (got_picture) { avpicture_fill(&picture, (uint8_t *)frame, pix_fmt, m_frameWidth, m_frameHeight); sws_scale(sws, avframe->data, avframe->linesize, 0, avframe->height, picture.data, picture.linesize); if (m_deckLinkOutput->ScheduleVideoFrame(videoFrame, pkt.pts * video_st->time_base.num, pkt.duration * video_st->time_base.num, video_st->time_base.den) != S_OK) fprintf(stderr, "Error scheduling frame\n"); } videoFrame->Release(); av_free_packet(&pkt); }
static void *video_thread(ALLEGRO_THREAD * t, void *arg) { VideoState *is = (VideoState *) arg; AVPacket pkt1, *packet = &pkt1; int len1, frameFinished; AVFrame *pFrame; double pts; (void)t; pFrame = avcodec_alloc_frame(); for (;;) { if (packet_queue_get(is, &is->videoq, packet, 1) < 0) { // means we quit getting packets break; } if (packet->data == flush_pkt.data) { avcodec_flush_buffers(is->video_st->codec); continue; } pts = 0; // Save global pts to be stored in pFrame FIXME_global_video_pkt_pts = packet->pts; // Decode video frame #ifdef FFMPEG_0_8 len1 = avcodec_decode_video2(is->video_st->codec, pFrame, &frameFinished, packet); #else len1 = avcodec_decode_video(is->video_st->codec, pFrame, &frameFinished, packet->data, packet->size); #endif if (packet->dts == NOPTS_VALUE && pFrame->opaque && *(int64_t *) pFrame->opaque != NOPTS_VALUE) { pts = 0;//*(uint64_t *) pFrame->opaque; } else if (packet->dts != NOPTS_VALUE) { pts = packet->dts; } else { pts = 0; } pts *= av_q2d(is->video_st->time_base); // Did we get a video frame? if (frameFinished) { //pts = synchronize_video(is, pFrame, pts); if (queue_picture(is, pFrame, pts) < 0) { break; } } av_free_packet(packet); } av_free(pFrame); return NULL; }
void *video_thread(void *arg) { JNIEnv *env; if((*g_jvm)->AttachCurrentThread(g_jvm, &env, NULL) != JNI_OK) { LOGE(1, "### start video thead error"); return; } VideoState *is = (VideoState*)arg; AVPacket pkt1, *packet = &pkt1; int len1, frameFinished; AVFrame *pFrame; double pts; int numBytes; pFrame=avcodec_alloc_frame(); int ret; for(;;) { if(is->quit == 1 || is->quit == 2) { break; } if(packet_queue_get(&is->videoq, packet, 1) < 0) { if(debug) LOGI(10,"video_thread get packet exit"); break; } pts = 0; global_video_pkt_pts = packet->pts; len1 = avcodec_decode_video2(is->video_st->codec, pFrame, &frameFinished, packet); if(packet->dts == AV_NOPTS_VALUE && pFrame->opaque && *(uint64_t*)pFrame->opaque != AV_NOPTS_VALUE) { pts = *(uint64_t*) pFrame->opaque; } else if (packet->dts != AV_NOPTS_VALUE) { pts = packet->dts; } else { pts = 0; } pts *= av_q2d(is->video_st->time_base); //pts *= av_q2d(pCodecCtx->time_base); if (frameFinished) { pts = synchronize_video(is, pFrame, pts); if (queue_picture(is, pFrame, pts) < 0) { break; } } av_free_packet(packet); } av_free(pFrame); if((*g_jvm)->DetachCurrentThread(g_jvm) != JNI_OK) { LOGE(1,"### detach video thread error"); } pthread_exit(0); if(debug) { LOGI(1,"### video_thread exit"); } return ((void *)0); }
int audio_decode_frame(VideoState *is, uint8_t *audio_buf, int buf_size, double *pts_ptr) { int len1, data_size, n; AVPacket *pkt = &is->audio_pkt; double pts; for(;;) { while(is->audio_pkt_size > 0) { data_size = buf_size; len1 = avcodec_decode_audio3(is->audio_ctx, (int16_t *)audio_buf, &data_size, pkt); //is->audio_pkt_data, is->audio_pkt_size); //len1 = avcodec_decode_audio2(aCodecCtx, (int16_t *)audio_buf, &data_size, // audio_pkt_data, audio_pkt_size); if(len1 < 0) { // if error, skip frame is->audio_pkt_size = 0; break; } is->audio_pkt_data += len1; is->audio_pkt_size -= len1; if(data_size <= 0) { // No data yet, get more frames continue; } pts = is->audio_clock; *pts_ptr = pts; //n = 2 * is->audio_ctx->channels; n = 2; is->audio_clock += (double)data_size / (double)(n * is->audio_ctx->sample_rate); // We have data, return it and come back for more later return data_size; } if(pkt->data) av_free_packet(pkt); if(is->quit) { return -1; } // next packet if(packet_queue_get(&is->audioq, pkt, 1) < 0) { return -1; } is->audio_pkt_data = pkt->data; is->audio_pkt_size = pkt->size; // if update, update the audio clock w/pts if(pkt->pts != AV_NOPTS_VALUE) { is->audio_clock = av_q2d(is->audio_ctx->time_base)*pkt->pts; } } }
int audio_decode_frame(AVCodecContext *aCodecCtx, uint8_t *audio_buf, int buf_size) { static AVPacket pkt; static uint8_t *audio_pkt_data = NULL; static int audio_pkt_size = 0; int len1, data_size = 0; for(;;) { while(audio_pkt_size > 0) { int got_frame = 0; len1 = avcodec_decode_audio4 (aCodecCtx, m_audio_frame, &got_frame, &pkt); if(len1 < 0) { audio_pkt_size = 0; break; } audio_pkt_data += len1; audio_pkt_size -= len1; data_size = 0; if(got_frame) { data_size = av_samples_get_buffer_size(NULL, aCodecCtx->channels, m_audio_frame->nb_samples, aCodecCtx->sample_fmt, 1); //assert(data_size <= buf_size); memcpy(audio_buf, m_audio_frame->data[0], data_size); } if(data_size <= 0) { /* No data yet, get more frames */ continue; } /* We have data, return it and come back for more later */ return data_size; } if(pkt.data) av_packet_unref(&pkt); if(quit) { return -1; } if(packet_queue_get(&m_audio_q, &pkt, 1) < 0) { return -1; } audio_pkt_data = pkt.data; audio_pkt_size = pkt.size; } }
int audio_decode_frame( void ) { //printf( "In Audio decode frame : Thread \n" ); SDL_Event quit_audio_event; quit_audio_event.type = QUIT_AUDIO_EVENT; PacketQueue *pAQueue = &( gMedia->audioQueue ) ; static AVPacket packet; AVFrame *pFrame = avcodec_alloc_frame() ; int pkt_bytes_decd = 0; int audio_data_size = 0; int frame_fin = 0; if ( packet.size == 0 ) { if ( !packet_queue_get( pAQueue, &packet ) ) { SDL_PushEvent( &quit_audio_event ); av_free( pFrame ); return -1; } } while ( packet.size > 0 ) { // printf("Size of packet is %d\n",packet.size); pkt_bytes_decd = avcodec_decode_audio4( gMedia->aCodecContext, pFrame, &frame_fin, &packet ); printf( "%d bytes from packet decoded\n", pkt_bytes_decd ); // printf("Format of Decoded frame is %d\n",pFrame->format); // printf("Format of audio is %d\n",pFrame->nb_samples); // aud_frame_pts = pFrame->pkt_pts ; //printf( " audio frame : pts is %" PRId64 "\n", aud_frame_pts ); if ( pkt_bytes_decd < 0 ) { /* if error, skip packet */ break; } if ( frame_fin ) { audio_data_size = create_channel_data( pFrame ); packet.size -= pkt_bytes_decd; av_free( pFrame ); return audio_data_size ; } } /* if ( pkt->pts != AV_NOPTS_VALUE ) { gMedia->audio_clock = av_q2d( gMedia->pFormatContext-> streams[aud_stream_index] ) * pkt->pts; } */ return 1; //Never comes here }
int audio_decode_frame(AVCodecContext *aCodecCtx, uint8_t *audio_buf, int buf_size) { //ffplay_info("Start.\n"); static AVPacket pkt; static uint8_t *audio_pkt_data = NULL; static int audio_pkt_size = 0; int len1, data_size; for(;;) { while(audio_pkt_size > 0) { data_size = buf_size; len1 = avcodec_decode_audio3(aCodecCtx, (int16_t *)audio_buf, &data_size, &pkt); ffplay_info("audio_buf = 0x%8x, data_size = %d, pkt = 0x%8x\n",audio_buf,data_size,&pkt); if(len1 < 0) { /* if error, skip frame */ audio_pkt_size = 0; break; } audio_pkt_data += len1; audio_pkt_size -= len1; if(data_size <= 0) { /* No data yet, get more frames */ continue; } /* We have data, return it and come back for more later */ return data_size; } if(pkt.data) { ffplay_info("Here.\n"); av_free_packet(&pkt); } if(quit) { ffplay_info("Here.\n"); return -1; } if(packet_queue_get(&audioq, &pkt, 1) < 0) { ffplay_info("Here.\n"); return -1; } audio_pkt_data = pkt.data; audio_pkt_size = pkt.size; } //ffplay_info("end.\n"); }
int video_thread(void *arg) { VideoState *is = (VideoState *)arg; AVPacket pkt1, *packet = &pkt1; int frameFinished; AVFrame *pFrame; double pts; pFrame = avcodec_alloc_frame(); for(;;) { if(packet_queue_get(&is->videoq, packet, 1) < 0) { // means we quit getting packets break; } if(packet->data == flush_pkt.data) { avcodec_flush_buffers(is->video_st->codec); continue; } pts = 0; // Save global pts to be stored in pFrame in first call global_video_pkt_pts = packet->pts; // Decode video frame avcodec_decode_video2(is->video_st->codec, pFrame, &frameFinished, packet); if(packet->dts == AV_NOPTS_VALUE && pFrame->opaque && *(uint64_t*)pFrame->opaque != AV_NOPTS_VALUE) { pts = *(uint64_t *)pFrame->opaque; } else if(packet->dts != AV_NOPTS_VALUE) { pts = packet->dts; } else { pts = 0; } pts *= av_q2d(is->video_st->time_base); // Did we get a video frame? if(frameFinished) { pts = synchronize_video(is, pFrame, pts); if(queue_picture(is, pFrame, pts) < 0) { break; } } av_free_packet(packet); } av_free(pFrame); return 0; }
int audio_decode_frame(AVCodecContext *aCodecCtx, AVPacket *pkt, AVPacket *pkt_temp, AVFrame *frame, uint8_t *audio_buf) { int len1, data_size; int got_frame = 0; int new_packet = 0; while(1) { while(pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) { if(!frame) { if(!(frame = avcodec_alloc_frame())) return AVERROR(ENOMEM); } else { avcodec_get_frame_defaults(frame); } new_packet = 0; got_frame = 0; len1 = avcodec_decode_audio4(aCodecCtx, frame, &got_frame, pkt_temp); if(len1 < 0) { /*if error, skip frame*/ pkt_temp->size = 0; av_free_packet(pkt_temp); continue; } pkt_temp->data += len1; pkt_temp->size -= len1; if(!got_frame) { /*stop sending empty packets if the decoder is finished*/ continue; //break; } data_size = av_samples_get_buffer_size(NULL, aCodecCtx->channels, frame->nb_samples, aCodecCtx->sample_fmt, 1); memcpy(audio_buf, frame->data[0], frame->linesize[0]); return data_size; } if(pkt->data) av_free_packet(pkt); memset(pkt_temp, 0, sizeof(*pkt_temp)); if(quit) { return -1; } if((new_packet = packet_queue_get(&audioq, pkt, 1)) < 0) { return -1; } *pkt_temp = *pkt; } }
int video_thread(void *arg) { VideoState *is = (VideoState *)arg; AVPacket pkt1, *packet = &pkt1; int len1, frameFinished; AVFrame *pFrame; double pts; pFrame = avcodec_alloc_frame(); is->rgbaFrame = avcodec_alloc_frame(); avpicture_alloc ((AVPicture *)is->rgbaFrame, PIX_FMT_RGBA, is->video_st->codec->width, is->video_st->codec->height); for(;;) { if(packet_queue_get(&is->videoq, packet, 1) < 0) { // means we quit getting packets break; } pts = 0; // Save global pts to be stored in pFrame global_video_pkt_pts = packet->pts; // Decode video frame len1 = avcodec_decode_video2(is->video_st->codec, pFrame, &frameFinished, packet); if(packet->dts == AV_NOPTS_VALUE && pFrame->opaque && *(uint64_t*)pFrame->opaque != AV_NOPTS_VALUE) { pts = (double)(*(uint64_t *)pFrame->opaque); } else if(packet->dts != AV_NOPTS_VALUE) { pts = (double)packet->dts; } else { pts = 0; } pts *= av_q2d(is->video_st->time_base); // Did we get a video frame? if(frameFinished) { pts = synchronize_video(is, pFrame, pts); if(queue_picture(is, pFrame, pts) < 0) { break; } } av_free_packet(packet); } SDL_CloseAudio(); av_free(pFrame); avpicture_free((AVPicture *)is->rgbaFrame); av_free(is->rgbaFrame); return 0; }
int decode_audio_frame (PlayerContext *ctx, uint8_t **buf) { static AVPacket pkt, cur_pkt; static AVFrame *frame; int got_frame, decoded_bytes; if (!frame) { frame = avcodec_alloc_frame (); if (!frame) return AVERROR (ENOMEM); } for (;;) { while (pkt.size > 0) { avcodec_get_frame_defaults (frame); decoded_bytes = avcodec_decode_audio4 (ctx->audio_codec, frame, &got_frame, &pkt); if (decoded_bytes < 0) { // error, skip the frame pkt.size = 0; break; } pkt.data += decoded_bytes; pkt.size -= decoded_bytes; *buf = frame->data[0]; return av_samples_get_buffer_size(NULL, frame->channels, frame->nb_samples, frame->format, 1); } // free the current packet if (cur_pkt.data) av_free_packet (&cur_pkt); memset (&pkt, 0, sizeof (pkt)); if (quit) return -1; // read next packet if (packet_queue_get (&ctx->audioq, &cur_pkt, 1) < 0) return -1; pkt = cur_pkt; } }
int audio_decode_frame(AVCodecContext *aCodecCtx, uint8_t *audio_buf, int buf_size) { static AVPacket pkt; static uint8_t *audio_pkt_data = NULL; static int audio_pkt_size = 0; static AVFrame frame; int len1, resampled_data_size=0; for (;;) { while (audio_pkt_size > 0) { int got_frame = 0; len1 = avcodec_decode_audio4(aCodecCtx, &frame, &got_frame, &pkt); if (len1 < 0) { /* if error, skip frame */ audio_pkt_size = 0; break; } audio_pkt_data += len1; audio_pkt_size -= len1; if (got_frame) { // --------------- //准备调用 swr_convert 的其他4个必须参数: out,out_samples_per_ch,in,in_samples_per_ch uint8_t **out = &audio_buf; const uint8_t **in = (const uint8_t **)frame.extended_data; //int out_samples_per_ch = buf_size/ (av_get_bytes_per_sample(AV_SAMPLE_FMT_S16)*2); //调用 swr_convert 进行转换 int len2 = 0; len2 = swr_convert(swr_ctx, out, frame.nb_samples, in, frame.nb_samples); resampled_data_size = len2 * 2 * av_get_bytes_per_sample(AV_SAMPLE_FMT_S16); //memcpy(audio_buf, frame.data[0], data_size); // ---------------- } /* We have data, return it and come back for more later */ return resampled_data_size; } if (pkt.data) av_free_packet(&pkt); if (quit) { return -1; } if (packet_queue_get(&audioq, &pkt, 1) < 0) { return -1; } audio_pkt_data = pkt.data; audio_pkt_size = pkt.size; } }
int video_thread(void *arg) { VideoState *is = (VideoState *)arg; AVPacket pkt1, *packet = &pkt1; int frameFinished; AVFrame *pFrame; double pts; pFrame = av_frame_alloc(); for(;;) { if(packet_queue_get(&is->videoq, packet, 1) < 0) { // means we quit getting packets break; } if(packet_queue_get(&is->videoq, packet, 1) < 0) { // means we quit getting packets break; } pts = 0; // Decode video frame avcodec_decode_video2(is->video_ctx, pFrame, &frameFinished, packet); if((pts = av_frame_get_best_effort_timestamp(pFrame)) == AV_NOPTS_VALUE) { pts = av_frame_get_best_effort_timestamp(pFrame); } else { pts = 0; } pts *= av_q2d(is->video_st->time_base); // Did we get a video frame? if(frameFinished) { pts = synchronize_video(is, pFrame, pts); if(queue_picture(is, pFrame, pts) < 0) { break; } } av_free_packet(packet); } av_frame_free(&pFrame); return 0; }
int video_decode_audio_frame( AVCodecContext *context, uint8_t *buffer, int buffer_size ) { static AVPacket packet; int used, data_size; for(;;) { while( audio_packet_size > 0 ) { data_size = buffer_size; AVPacket avp; av_init_packet( &avp ); avp.data = audio_packet_data; avp.size = audio_packet_size; used = avcodec_decode_audio3( context, (int16_t *)audio_buffer, &data_size, &avp ); if( used < 0 ) { /* if error, skip frame */ audio_packet_size = 0; break; } audio_packet_data += used; audio_packet_size -= used; if( data_size <= 0 ) { /* No data yet, get more frames */ continue; } audio_clock += (double)data_size / (double)(format_context->streams[audio_stream]->codec->sample_rate * (2 * format_context->streams[audio_stream]->codec->channels)); /* We have data, return it and come back for more later */ return data_size; } if( packet.data ) av_free_packet( &packet ); if( stop ) { audio_running = 0; return -1; } if( packet_queue_get( &audio_queue, &packet, 1 ) < 0 ) return -1; audio_packet_data = packet.data; audio_packet_size = packet.size; if( packet.pts != AV_NOPTS_VALUE ) { audio_clock = packet.pts * av_q2d( format_context->streams[audio_stream]->time_base ); } } }
int video_thread(void *arg) { VideoState *is = (VideoState*)arg; AVPacket pkt1, *packet = &pkt1; int len1, frameFinished; AVFrame *pFrame; double pts; pFrame = avcodec_alloc_frame(); for(;;) { //printf("video_thread loop 1\n"); if(packet_queue_get(&is->videoq, packet, 1) < 0) { fprintf(stderr, "%d: packet_queue_get errror\n", __LINE__); break; } //printf("video_thread loop 2\n"); if(packet->data == flush_pkt.data) { avcodec_flush_buffers(is->video_st->codec); continue; } //printf("video_thread loop 3\n"); pts = 0; global_video_pkt_pts = packet->pts; //printf("video_thread loop 4\n"); len1 = avcodec_decode_video2(is->video_st->codec, pFrame, &frameFinished, packet); /* if(packet->dts == AV_NOPTS_VALUE && *(uint64_t*)pFrame->opaque != AV_NOPTS_VALUE) { pts = *(uint64_t*)pFrame->opaque; } else if(packet->dts != AV_NOPTS_VALUE) { pts = packet->dts; } else { pts = 0; } pts *= av_q2d(is->video_st->time_base); */ //printf("video_thread loop 5\n"); if(frameFinished) { //printf("video_thread loop 6\n"); pts = synchronize_video(is, pFrame, pts); //printf("video_thread loop 7\n"); if(queue_picture(is, pFrame, pts) < 0) { //printf("video_thread loop 8\n"); break; } } //printf("video_thread loop 6\n"); av_free_packet(packet); } av_free(pFrame); //printf("video_thread loop end\n"); return 0; }
int audio_decode_frame(VideoState *is, uint8_t *audio_buf, int buf_size,double *pts_ptr) { AVCodecContext *aCodecCtx = is->audio_st->codec; static AVPacket pkt; static uint8_t *audio_pkt_data = NULL; static int audio_pkt_size = 0; static AVFrame frame; int len1, data_size = 0; double pts; for(;;) { while(audio_pkt_size > 0) { int got_frame = 0; len1 = avcodec_decode_audio4(aCodecCtx, &frame, &got_frame, &pkt); if(len1 < 0) { /* if error, skip frame */ audio_pkt_size = 0; break; } data_size = AudioResampling(aCodecCtx,&frame, AV_SAMPLE_FMT_S16,aCodecCtx->channels, 44100, audio_buf); audio_pkt_data += len1; audio_pkt_size -= len1; if(data_size <= 0) { continue; } pts = is->audio_clock; *pts_ptr = pts; int n = 2 * is->audio_st->codec->channels; is->audio_clock += (double)data_size / (double)(n * is->audio_st->codec->sample_rate); /* We have data, return it and come back for more later */ return data_size; } if(pkt.data) av_free_packet(&pkt); if(is->quit) { return -1; } if(packet_queue_get(&is->audioq, &pkt, 1) < 0) { return -1; } audio_pkt_data = pkt.data; audio_pkt_size = pkt.size; if(pkt.pts != AV_NOPTS_VALUE){ is->audio_clock = av_q2d(is->audio_st->time_base)*pkt.pts; } } }
int audio_decode_frame(AVCodecContext *aCodecCtx, uint8_t *audio_buf, int buf_size) { static AVPacket pkt; static uint8_t *audio_pkt_data = NULL; static int audio_pkt_size = 0; AVFrame *pFrame = av_frame_alloc(); int got_frame; int len1, data_size; for(;;) { while(audio_pkt_size > 0) { data_size = buf_size; #if 0 len1 = avcodec_decode_audio4(aCodecCtx, pFrame, &got_frame, &pkt); #else len1 = avcodec_decode_audio3(aCodecCtx, (int16_t *)audio_buf, &data_size, &pkt); #endif if(len1 < 0) { /* if error, skip frame */ audio_pkt_size = 0; break; } audio_pkt_data += len1; audio_pkt_size -= len1; if(data_size <= 0) { /* No data yet, get more frames */ continue; } /* We have data, return it and come back for more later */ av_frame_free(&pFrame); return data_size; } if(pkt.data) av_free_packet(&pkt); if(quit) { av_frame_free(&pFrame); return -1; } if(packet_queue_get(&audioq, &pkt, 1) < 0) { av_frame_free(&pFrame); return -1; } audio_pkt_data = pkt.data; audio_pkt_size = pkt.size; printf("\r\n audio_pkt_size:%d!!\r\n",audio_pkt_size); } }
int video_thread(void* arg) { VideoState* is = (VideoState*) arg; AVPacket pkt1, *packet = &pkt1; int frameFinished; AVFrame* pFrame, *pFrameYUV; uint8_t *out_buffer; double pts; pFrame = av_frame_alloc(); pFrameYUV = av_frame_alloc(); out_buffer = (uint8_t*) av_malloc( avpicture_get_size(AV_PIX_FMT_YUV420P, is->video_ctx->width, is->video_ctx->height)); avpicture_fill((AVPicture*) pFrameYUV, out_buffer, AV_PIX_FMT_YUV420P, is->video_ctx->width, is->video_ctx->height); for (;;) { if (packet_queue_get(&is->videoq, packet, 1, 1) < 0) { break; } if (packet->data == flush_pkt.data) { avcodec_flush_buffers(is->video_ctx); continue; } pts = 0; avcodec_decode_video2(is->video_ctx, pFrame, &frameFinished, packet); if ((pts = av_frame_get_best_effort_timestamp(pFrame)) == AV_NOPTS_VALUE) { pts = 0; } pts *= av_q2d(is->video_st->time_base); printf("queue_picture frameFinished=%d packet->size=%d pts=%lf\n", frameFinished, packet->size, pts); if (frameFinished) { pts = synchronize_video(is, pFrame, pts); if (queue_picture(is, pFrame, pFrameYUV, pts) < 0) { break; } } av_free_packet(packet); } av_frame_free(&pFrame); av_frame_free(&pFrameYUV); return 0; }
/* decode one audio frame and returns its uncompressed size */ static int audio_decode_frame(FFMovie *movie, uint8_t *audio_buf, double *pts_ptr) { /*SDL AUDIO THREAD*/ AVPacket *pkt = &movie->audio_pkt; int len1, data_size; double pts; for(;;) { if (movie->paused || movie->audioq.abort_request || Global_abort_all) { return -1; } while (movie->audio_pkt_size > 0) { len1 = avcodec_decode_audio(&movie->audio_st->codec, (int16_t *)audio_buf, &data_size, movie->audio_pkt_data, movie->audio_pkt_size); if (len1 < 0) break; movie->audio_pkt_data += len1; movie->audio_pkt_size -= len1; if (data_size > 0) { pts = 0; if (movie->audio_pkt_ipts != AV_NOPTS_VALUE) pts = (double)movie->audio_pkt_ipts * movie->context->pts_num / movie->context->pts_den; /* if no pts, then compute it */ if (pts != 0) { movie->audio_clock = pts; } else { int n; n = 2 * movie->audio_st->codec.channels; movie->audio_clock += (double)data_size / (double)(n * movie->audio_st->codec.sample_rate); } *pts_ptr = movie->audio_clock; movie->audio_pkt_ipts = AV_NOPTS_VALUE; /* we got samples : we can exit now */ return data_size; } } /* free previous packet if any */ if (pkt->destruct) av_free_packet(pkt); /* read next packet */ if (packet_queue_get(&movie->audioq, pkt, 1) < 0) return -1; movie->audio_pkt_data = pkt->data; movie->audio_pkt_size = pkt->size; movie->audio_pkt_ipts = pkt->pts; } }
int audio_decode_frame(VideoState *is) { int len1, data_size = 0; AVPacket *pkt = &is->audio_pkt; for (;;) { while (is->audio_pkt_size > 0) { int got_frame = 0; len1 = avcodec_decode_audio4(is->audio_st->codec, &is->audio_frame, &got_frame, pkt); if (len1 < 0) { /* if error, skip frame */ is->audio_pkt_size = 0; break; } if (got_frame) { data_size = is->audio_frame.linesize[0]; /*av_samples_get_buffer_size(NULL, is->audio_st->codec->channels, is->audio_frame.nb_samples, is->audio_st->codec->sample_fmt, 1);*/ memcpy(is->audio_buf, is->audio_frame.data[0], data_size); } is->audio_pkt_data += len1; is->audio_pkt_size -= len1; if (data_size <= 0) { /* No data yet, get more frames */ continue; } /* We have data, return it and come back for more later */ return data_size; } if (pkt->data) av_free_packet(pkt); if (is->quit) { return -1; } /* next packet */ if (packet_queue_get(&is->audioq, pkt, 1) < 0) { return -1; } is->audio_pkt_data = pkt->data; is->audio_pkt_size = pkt->size; } return 0; }
int audio_decode_frame(AVCodecContext *aCodecCtx, uint8_t *audio_buf, int buf_size) { static AVPacket pkt; static uint8_t *audio_pkt_data = NULL; static int audio_pkt_size = 0; static AVFrame frame; int len1, data_size; for (;;) { while (audio_pkt_size > 0) { int got_frame = 0; len1 = avcodec_decode_audio4(aCodecCtx, &frame, &got_frame, &pkt); if (len1 < 0) { // if error, skip frame audio_pkt_size = 0; break; } audio_pkt_data += len1; audio_pkt_size -= len1; if (got_frame) { data_size = av_samples_get_buffer_size(NULL, aCodecCtx->channels, frame.nb_samples, aCodecCtx->sample_fmt, 1); memcpy(audio_buf, frame.data[0], data_size); } if (data_size <= 0) { // No data yet, get more frames continue; } // We have data, return it and come back for more later return data_size; } if (pkt.data) { av_free_packet(&pkt); } if (quit) { return -1; } if (packet_queue_get(&audioq, &pkt, 1) < 0) { return -1; } audio_pkt_data = pkt.data; audio_pkt_size = pkt.size; } }
int audio_decode_frame(AVCodecContext *aCodecCtx, uint8_t *audio_buf, int buf_size) { static AVPacket pkt; static uint8_t *audio_pkt_data = NULL; static int audio_pkt_size = 0; int len1, data_size; for(;;) { while(audio_pkt_size > 0) { data_size = buf_size; /* len1 = avcodec_decode_audio3(aCodecCtx, (int16_t *)audio_buf, &data_size, audio_pkt_data, audio_pkt_size); */ len1 = avcodec_decode_audio3(aCodecCtx, (int16_t *)audio_buf, &data_size, &pkt); audio_pkt_data = pkt.data; audio_pkt_size = pkt.size; if(len1 < 0) { /* if error, skip frame */ audio_pkt_size = 0; break; } audio_pkt_data += len1; audio_pkt_size -= len1; if(data_size <= 0) { /* No data yet, get more frames */ continue; } /* We have data, return it and come back for more later */ return data_size; } if(pkt.data) av_free_packet(&pkt); if(quit) { return -1; } if(packet_queue_get(&audioq, &pkt, 1) < 0) { return -1; } audio_pkt_data = pkt.data; audio_pkt_size = pkt.size; } }
void *video_thread(void *arg) { //fprintf(stdout, "[FFmpeg-decode thread] video_thread created\n"); VideoState *is = (VideoState *)arg; AVPacket pkt1, *packet = &pkt1; int frameFinished=0; int index=0; //pthread_create(&is->trans_tid, NULL, transcode_thread, (void *)is); //fprintf(stdout, "[FFmpeg-video thread] decode frame\n"); index=get_frame_status(is,FRAME_WAIT_WRITE); for(;;) { if(is->quit) { break; } if(packet_queue_get(&is->videoq, packet, 1) < 0) { // means we quit getting packets break; } // Decode video frame avcodec_decode_video2(is->video_st->codec, is->frames[0], &frameFinished, packet); //fprintf(stdout,"Current packs in queue:%d\n",is->videoq.nb_packets); if(frameFinished) { init_picture(is, 0); // if(index!=FRAME_SIZE) // { // pthread_mutex_lock(&is->mutex); // pthread_cond_signal(&is->cond); // set_frame_status(is, FRAME_WAIT_TRANSCODE); // pthread_mutex_unlock(&is->mutex); // } // index=get_frame_status(is,FRAME_WAIT_WRITE); } av_free_packet(packet); } fprintf(stdout, "[FFmpeg-video thread] thread terminated\n"); return NULL; }
int audio_decode_frame(VideoState*is, int16_t *audio_buf, int buf_size, double *pts_ptr) { int len1, data_size, n; AVPacket *pkt = &is->audio_pkt; double pts; int index = 0; for (;;) { while (is->audio_pkt_size > 0) { data_size = AVCODEC_MAX_AUDIO_FRAME_SIZE*3/2; len1 = avcodec_decode_audio3(is->audio_st->codec, audio_buf, &data_size, pkt); if (len1 < 0) { is->audio_pkt_size = 0; break; } is->audio_pkt_data += len1; is->audio_pkt_size -= len1; if (data_size < 0) { continue; } index +=data_size; pts = is->audio_clock; *pts_ptr = pts; n = 2 * is->audio_st->codec->channels; is->audio_clock += (double)data_size / (double)(n*is->audio_st->codec->sample_rate); return data_size; } if (pkt->data) { av_free_packet(pkt); } if (is->quit) { return -1; } if (packet_queue_get(&is->audioq, pkt, 1) < 0) { return -1; } is->audio_pkt_data = pkt->data; is->audio_pkt_size = pkt->size; if (pkt->pts != AV_NOPTS_VALUE) { is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts; } } }
int video_thread(void *arg) { VideoState *is = (VideoState *)arg; AVPacket pkt1, *packet = &pkt1; int len1, frameFinished; AVFrame *pFrame; double pts; pFrame = avcodec_alloc_frame(); for(;;) { // if we stopped getting packets while( packet_queue_get(&is->videoq, packet, 1) == 0 ) { } pts = 0; // Save global pts to be stored in pFrame in first call global_video_pkt_pts = packet->pts; // Decode video frame //frameCount++; len1 = avcodec_decode_video2(is->video_ctx, pFrame, &frameFinished, packet ); if(packet->dts != AV_NOPTS_VALUE) { pts = packet->dts; } else { pts = 0; } pts *= av_q2d(is->video_ctx->time_base); // Frame is decoded, queue it to be played if(frameFinished) { pts = synchronize_video( is, pFrame, pts); if(queue_picture(is, pFrame, pts) < 0) { break; } } // Cleanup... av_free_packet(packet); } av_free(pFrame); return 0; }
int audio_decode_frame(AVCodecContext *aCodecCtx, uint8_t *audio_buf, int buf_size) { static AVPacket pkt; static uint8_t *audio_pkt_data = NULL; static int audio_pkt_size = 0; // len1: number of bytes used to decompress // data_size: out put buffer size int len1, data_size; for(;;) { while(audio_pkt_size > 0) { data_size = buf_size; // @see http://dranger.com/ffmpeg/functions.html#avcodec_decode_audio2 len1 = avcodec_decode_audio2(aCodecCtx, (int16_t *)audio_buf, &data_size, audio_pkt_data, audio_pkt_size); if(len1 < 0) { /* if error, skip frame */ audio_pkt_size = 0; break; } audio_pkt_data += len1; audio_pkt_size -= len1; if(data_size <= 0) { /* No data yet, get more frames */ continue; } /* We have data, return it and come back for more later */ return data_size; } if(pkt.data) av_free_packet(&pkt); if(quit) { return -1; } if(packet_queue_get(&audioq, &pkt, 1) < 0) { return -1; } audio_pkt_data = pkt.data; audio_pkt_size = pkt.size; } }
int video_thread(void *arg) { VideoState *is = (VideoState *)arg; AVPacket pkt1, *packet = &pkt1; int frameFinished; AVFrame *pFrame; pFrame = av_frame_alloc(); double pts; for(;;) { if(packet_queue_get(&is->videoq, packet, 1) < 0) { // means we quit getting packets break; } pts = 0; global_video_pkt_pts = packet->pts; // Decode video frame avcodec_decode_video2(is->video_st->codec, pFrame, &frameFinished, packet); /* if(packet->dts == AV_NOPTS_VALUE */ /* && pFrame->opaque && *(uint64_t*)pFrame->opaque != (uint64_t)AV_NOPTS_VALUE) { */ /* pts = *(uint64_t *)pFrame->opaque; */ /* } else if(packet->dts != AV_NOPTS_VALUE) { */ /* pts = packet->dts; */ /* } else { */ /* pts = 0; */ /* } */ if(packet->dts !=AV_NOPTS_VALUE) pts = packet->dts; //转化pts以秒显示 pts *= av_q2d(is->video_st->time_base); // Did we get a video frame? if(frameFinished) { synchronize_video(is,pFrame,pts); if(queue_picture(is, pFrame,pts) < 0) { break; } } av_free_packet(packet); } av_free(pFrame); return 0; }