static double update_video_nocorrect_pts(struct MPContext *mpctx) { struct sh_video *sh_video = mpctx->sh_video; double frame_time = 0; while (1) { // In nocorrect-pts mode there is no way to properly time these frames if (load_next_vo_frame(mpctx, false)) break; frame_time = sh_video->next_frame_time; if (mpctx->restart_playback) frame_time = 0; struct demux_packet *pkt = video_read_frame(mpctx); if (!pkt) return -1; if (mpctx->sh_audio) mpctx->delay -= frame_time; // video_read_frame can change fps (e.g. for ASF video) update_fps(mpctx); int framedrop_type = check_framedrop(mpctx, frame_time); void *decoded_frame = decode_video(sh_video, pkt, framedrop_type, sh_video->pts); talloc_free(pkt); if (decoded_frame) { filter_video(mpctx, decoded_frame); } break; } return frame_time; }
// Reconfigure the video chain and the VO on a format change. This is separate, // because we wait with the reconfig until the currently buffered video has // finished displaying. Otherwise, we'd resize the window and then wait for the // video finishing, which would result in a black window for that frame. // Does nothing if there was no pending change. void video_execute_format_change(struct MPContext *mpctx) { struct dec_video *d_video = mpctx->d_video; struct mp_image *decoded_frame = d_video->waiting_decoded_mpi; d_video->waiting_decoded_mpi = NULL; if (decoded_frame) filter_video(mpctx, decoded_frame, true); }
static double update_video_attached_pic(struct MPContext *mpctx) { struct sh_video *sh_video = mpctx->sh_video; // Try to decode the picture multiple times, until it is displayed. if (mpctx->video_out->hasframe) return -1; struct mp_image *decoded_frame = decode_video(sh_video, sh_video->gsh->attached_picture, 0, 0); if (decoded_frame) filter_video(mpctx, decoded_frame); load_next_vo_frame(mpctx, true); mpctx->sh_video->pts = MP_NOPTS_VALUE; return 0; }
double update_video(struct MPContext *mpctx, double endpts) { struct sh_video *sh_video = mpctx->sh_video; struct vo *video_out = mpctx->video_out; sh_video->vfilter->control(sh_video->vfilter, VFCTRL_SET_OSD_OBJ, mpctx->osd); // for vf_sub if (!mpctx->opts->correct_pts) return update_video_nocorrect_pts(mpctx); if (sh_video->gsh->attached_picture) return update_video_attached_pic(mpctx); double pts; while (1) { if (load_next_vo_frame(mpctx, false)) break; pts = MP_NOPTS_VALUE; struct demux_packet *pkt = NULL; while (1) { pkt = demux_read_packet(mpctx->sh_video->gsh); if (!pkt || pkt->len) break; /* Packets with size 0 are assumed to not correspond to frames, * but to indicate the absence of a frame in formats like AVI * that must have packets at fixed timecode intervals. */ talloc_free(pkt); } if (pkt) pts = pkt->pts; if (pts != MP_NOPTS_VALUE) pts += mpctx->video_offset; if (pts >= mpctx->hrseek_pts - .005) mpctx->hrseek_framedrop = false; int framedrop_type = mpctx->hrseek_active && mpctx->hrseek_framedrop ? 1 : check_framedrop(mpctx, -1); struct mp_image *decoded_frame = decode_video(sh_video, pkt, framedrop_type, pts); talloc_free(pkt); if (decoded_frame) { determine_frame_pts(mpctx); filter_video(mpctx, decoded_frame); } else if (!pkt) { if (!load_next_vo_frame(mpctx, true)) return -1; } break; } if (!video_out->frame_loaded) return 0; pts = video_out->next_pts; if (pts == MP_NOPTS_VALUE) { MP_ERR(mpctx, "Video pts after filters MISSING\n"); // Try to use decoder pts from before filters pts = sh_video->pts; if (pts == MP_NOPTS_VALUE) pts = sh_video->last_pts; } if (endpts == MP_NOPTS_VALUE || pts < endpts) add_frame_pts(mpctx, pts); if (mpctx->hrseek_active && pts < mpctx->hrseek_pts - .005) { vo_skip_frame(video_out); return 0; } mpctx->hrseek_active = false; sh_video->pts = pts; if (sh_video->last_pts == MP_NOPTS_VALUE) sh_video->last_pts = sh_video->pts; else if (sh_video->last_pts > sh_video->pts) { MP_WARN(mpctx, "Decreasing video pts: %f < %f\n", sh_video->pts, sh_video->last_pts); /* If the difference in pts is small treat it as jitter around the * right value (possibly caused by incorrect timestamp ordering) and * just show this frame immediately after the last one. * Treat bigger differences as timestamp resets and start counting * timing of later frames from the position of this one. */ if (sh_video->last_pts - sh_video->pts > 0.5) sh_video->last_pts = sh_video->pts; else sh_video->pts = sh_video->last_pts; } else if (sh_video->pts >= sh_video->last_pts + 60) { // Assume a PTS difference >= 60 seconds is a discontinuity. MP_WARN(mpctx, "Jump in video pts: %f -> %f\n", sh_video->last_pts, sh_video->pts); sh_video->last_pts = sh_video->pts; } double frame_time = sh_video->pts - sh_video->last_pts; sh_video->last_pts = sh_video->pts; if (mpctx->sh_audio) mpctx->delay -= frame_time; return frame_time; }
double update_video(struct MPContext *mpctx, double endpts) { struct dec_video *d_video = mpctx->d_video; struct vo *video_out = mpctx->video_out; if (d_video->header->attached_picture) return update_video_attached_pic(mpctx); if (load_next_vo_frame(mpctx, false)) { // Use currently queued VO frame } else if (d_video->waiting_decoded_mpi) { // Draining on reconfig if (!load_next_vo_frame(mpctx, true)) return -1; } else { // Decode a new frame struct demux_packet *pkt = demux_read_packet(d_video->header); if (pkt && pkt->pts != MP_NOPTS_VALUE) pkt->pts += mpctx->video_offset; if ((pkt && pkt->pts >= mpctx->hrseek_pts - .005) || d_video->has_broken_packet_pts) { mpctx->hrseek_framedrop = false; } int framedrop_type = mpctx->hrseek_active && mpctx->hrseek_framedrop ? 1 : check_framedrop(mpctx, -1); struct mp_image *decoded_frame = video_decode(d_video, pkt, framedrop_type); talloc_free(pkt); if (decoded_frame) { filter_video(mpctx, decoded_frame, false); } else if (!pkt) { if (!load_next_vo_frame(mpctx, true)) return -1; } } // Whether the VO has an image queued. // If it does, it will be used to time and display the next frame. if (!video_out->frame_loaded) return 0; double pts = video_out->next_pts; if (endpts == MP_NOPTS_VALUE || pts < endpts) add_frame_pts(mpctx, pts); if (mpctx->hrseek_active && pts < mpctx->hrseek_pts - .005) { vo_skip_frame(video_out); return 0; } mpctx->hrseek_active = false; double last_pts = mpctx->video_next_pts; if (last_pts == MP_NOPTS_VALUE) last_pts = pts; double frame_time = pts - last_pts; if (frame_time < 0 || frame_time >= 60) { // Assume a PTS difference >= 60 seconds is a discontinuity. MP_WARN(mpctx, "Jump in video pts: %f -> %f\n", last_pts, pts); frame_time = 0; } mpctx->video_next_pts = pts; if (mpctx->d_audio) mpctx->delay -= frame_time; return frame_time; }
int main(int argc, char ** argv) { if (argc != 2) { fprintf(stderr, "Para rodar o programa, use: %s [video_path]\n", argv[0]); return -1; } filename = argv[1]; omp_set_num_threads(OMP_THREADS); //Registra todos os codecs e formatos de videos av_register_all(); //Abre o arquivo de midia; if (avformat_open_input(&pFormatCtx, filename, NULL, NULL)!=0) { fprintf(stderr, "Nao foi possivel abrir o arquivo %s\n", filename); return -1; } //Recupera a informacao do stream; if (avformat_find_stream_info(pFormatCtx, NULL) < 0) { fprintf(stderr, "Nao foi possivel encontrar a informacao do stream\n"); return -1; } //Informacao bruta sobre o arquivo de video; av_dump_format(pFormatCtx, 0, filename, 0); //Encontra o primeiro stream de video (video principal) int video_stream = -1; for (unsigned i = 0; i < pFormatCtx->nb_streams; i++) { if(pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) { video_stream = i; break; } } if (video_stream == -1) { fprintf(stderr, "Nao foi possivel encontrar o stream de video\n"); return -1; } //Captura o ponteiro referente ao codec do stream de video pCodecCtx = pFormatCtx->streams[video_stream]->codec; //Busca o decode do video if ((pCodec = avcodec_find_decoder(pCodecCtx->codec_id)) == NULL) { fprintf(stderr, "Codec nao suportado :(\n"); return -1; } //Abre o codec if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) { fprintf(stderr, "Nao foi possivel abrir o codec\n"); } // Aloca espaco de memoria para o frame de video (AVFrame) pDecodedFrame = avcodec_alloc_frame(); if ((pFrameRGB = avcodec_alloc_frame()) == NULL) { fprintf(stderr, "Nao foi possivel alocar memoria para o frame de video\n"); return -1; } if ((pOutputFrame = avcodec_alloc_frame()) == NULL) { fprintf(stderr, "Nao foi possivel alocar memoria para o frame de video\n"); return -1; } //Determina o tamanho necessario do buffer e aloca a memoria numBytesRGB = avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height); bufferRGB = (uint8_t *) av_malloc(numBytesRGB*sizeof(uint8_t)); //Configura o contexto para o escalonamento sws_ctx = sws_getContext ( pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, PIX_FMT_RGB24, SWS_BILINEAR, NULL, NULL, NULL ); //Aplica para o buffer os frames no formato FMT_RGB24 (pacote RGB 8:8:8, 24bpp, RGBRGB...) avpicture_fill((AVPicture *) pFrameRGB, bufferRGB , PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height); //Preparando AVCodecContext de saida AVCodecContext * c = NULL; AVCodec * codec = avcodec_find_encoder(CODEC_ID_MPEG1VIDEO); if (!codec) { fprintf(stderr, "Codec nao encontrado\n"); return -1; } c = avcodec_alloc_context3(codec); //Configurando valores para o contexto do video de saida c->bit_rate = pCodecCtx->bit_rate; c->width = pCodecCtx->width; c->height = pCodecCtx->height; c->time_base = pCodecCtx->time_base; c->gop_size = pCodecCtx->gop_size; c->max_b_frames = pCodecCtx->max_b_frames; c->pix_fmt = PIX_FMT_YUV420P; if (avcodec_open2(c, codec, NULL) < 0) return -1; #ifdef SAVE_VIDEO FILE * pFile = fopen("out.mpg", "wb"); if (!pFile) { fprintf(stderr, "could not open out.mpg\n"); return -1; } int out_size; #endif int outbuf_size = 300000; uint8_t * outbuf = (uint8_t *) av_malloc(outbuf_size); //Criacao de contexto para converter um tipo RGB24 para YUV240P (preparacao para encoded) numBytesYUV = avpicture_get_size(PIX_FMT_YUV420P, c->width, c->height); bufferYUV = (uint8_t *) av_malloc(numBytesYUV*sizeof(uint8_t)); out_sws_ctx = sws_getContext ( c->width, c->height, PIX_FMT_RGB24, c->width, c->height, PIX_FMT_YUV420P, SWS_FAST_BILINEAR, NULL, NULL, NULL ); avpicture_fill((AVPicture *) pOutputFrame, bufferYUV , PIX_FMT_YUV420P, c->width, c->height); #ifdef SDL_INTERFACE bmp = init_sdl_window(pCodecCtx, bmp); if (bmp == NULL) { return -1; } play_original_video(argv[1]); #endif double start_time; while(av_read_frame(pFormatCtx, &packet) >= 0) { //Testa se e unm pacote com de stream de video if(packet.stream_index == video_stream) { start_time = get_clock_msec(); // Decode frame de video avcodec_decode_video2(pCodecCtx, pDecodedFrame, &frameFinished, &packet); //Testa se ja existe um quadro de video if (frameFinished) { #ifdef SDL_INTERFACE SDL_LockYUVOverlay(bmp); #endif //Converte a imagem de seu formato nativo para RGB sws_scale ( sws_ctx, (uint8_t const * const *) pDecodedFrame->data, pDecodedFrame->linesize, 0, pCodecCtx->height, pFrameRGB->data, pFrameRGB->linesize ); filter_video(pFrameRGB, pCodecCtx->width, pCodecCtx->height); // for (int i = 0; i < 50; i++) // filter_average(pFrameRGB, pCodecCtx->width, pCodecCtx->height); #if defined (SAVE_VIDEO) || defined (SDL_INTERFACE) //Convertendo de RFB para YUV sws_scale ( out_sws_ctx, (uint8_t const * const *) pFrameRGB->data, pFrameRGB->linesize, 0, c->height, pOutputFrame->data, pOutputFrame->linesize ); #endif #ifdef SDL_INTERFACE pOutputFrame->data[0] = bmp->pixels[0]; pOutputFrame->data[1] = bmp->pixels[2]; pOutputFrame->data[2] = bmp->pixels[1]; pOutputFrame->linesize[0] = bmp->pitches[0]; pOutputFrame->linesize[1] = bmp->pitches[2]; pOutputFrame->linesize[2] = bmp->pitches[1]; SDL_UnlockYUVOverlay(bmp); rect.x = 0; rect.y = 0; rect.w = 1280; rect.h = 720; SDL_DisplayYUVOverlay(bmp, &rect); #endif #ifdef SAVE_VIDEO //codigo para salvar frames em uma saida fflush(stdout); out_size = avcodec_encode_video(c, outbuf, outbuf_size, pOutputFrame); std::cout << "write frame " << counter_frames << "(size = " << out_size << ")" << std::endl; fwrite(outbuf, 1, out_size, pFile); #endif std::cout << "Frame [" << counter_frames <<"] : " << get_clock_msec() - start_time<< " ms" << std::endl; counter_frames++; } } // Libera o pacote alocado pelo pacote av_free_packet(&packet); #ifdef SDL_INTERFACE SDL_PollEvent(&event); switch(event.type) { case SDL_QUIT: SDL_Quit(); return 0; break; default: break; } #endif } #ifdef SAVE_VIDEO //captura frames atrasados for(; out_size; counter_frames++) { fflush(stdout); out_size = avcodec_encode_video(c, outbuf, outbuf_size, NULL); std::cout << "write frame " << counter_frames << "(size = " << out_size << ")" << std::endl; // fwrite(outbuf, 1, outbuf_size, pFile); } // adiciona sequencia para um real mpeg outbuf[0] = 0x00; outbuf[1] = 0x00; outbuf[2] = 0x01; outbuf[3] = 0xb7; fwrite(outbuf, 1, 4, pFile); fclose(pFile); #endif free(outbuf); //Fecha o codec avcodec_close(pCodecCtx); //Fecha o arquivo de video avformat_close_input(&pFormatCtx); return 0; }
void decode_thread( void ) { int in_size,v_off,flags = 0,playsize; sh_video_t *sh_video; sh_audio_t *sh_audio; demux_stream_t *d_video,*d_audio; demuxer_t *demuxer; unsigned int * start; double pts,delay,speed; unsigned char * pcm_o; vd_ffmpeg_ctx *ctx; AVCodecContext *avctx; demuxer = mpctx->demuxer; sh_video = mpctx->sh_video; sh_audio = mpctx->sh_audio; d_video = mpctx->d_video; d_audio = mpctx->d_audio; v_off = 0; *(mts_p->err_degree) = 1; speed = 1.0; flags |= AOPLAY_FINAL_CHUNK; ctx = sh_video->context; avctx = ctx->avctx; avctx->thread_count = 1; // avctx->unused = 0x4d5456; while(1) { F("1\n"); if ( mts_p->get_first_pts(demuxer,&delay) == TYPE_AUDIO ) { //faacDecHandle faac_hdec; F("2\n"); faacDecFrameInfo faac_finfo; in_size = mts_p->get_packet(demuxer,d_audio, &start, &pts); //get a audio packet pcm_o = mtv_faacDecDecode_funcp(&faac_finfo, start, in_size); //decode a audio packet // pcm_o = faacDecDecode(faac_hdec, &faac_finfo, start, in_size); //decode a audio packet if ( faac_finfo.error > 0 ) { F("Fe!\n"); *(mts_p->packet_err) = 1; continue; } else if (faac_finfo.samples > 0 ) //&& speed < 0.6 ) { F("3\n"); playsize = sh_audio->samplesize*faac_finfo.samples; #if 0 //for external sync,do not delet! at -= GetTimer(); if ( at > 10000 & at < 40000 ) { delay_int = (int)at /20000; delay_int *= 10000; usleep(delay_int); } #endif while(mpctx->audio_out->get_space() < playsize) ; mpctx->audio_out->play(pcm_o,playsize,flags); //play a audio packet // at = GetTimer() + delay * 1000000; } else { continue; } } else //decode video { F("4\n"); in_size = mts_p->get_packet(demuxer,d_video, &start, &pts); //get a video packet mpi = mpvdec->decode(sh_video, start, in_size, 0); //decode a video packet if ( mpi ) { #ifdef GAOTEST at -= GetTimer(); if ( at > 10000 & at < 40000 ) { delay_int = (int)at /10000; delay_int *= 10000; usleep(delay_int); } #endif filter_video(sh_video, mpi, 0); //play a video packet // at = GetTimer() + delay * 1000000; } else { printf("He!\n"); // *(mts_p->packet_err) = 1; continue; } } } }