int decode_thread(void *arg) { // 上半部分的函数没什么新东西;它的工作就是打开文件和找到视频流和音频流的索引。 // 唯一不同的地方是把格式内容保存到大结构体中。当找到流后,调用另一个将要定义的函数 stream_component_open()。 // 这是一个一般的分离的方法,自从我们设置很多相似的视频和音频解码的代码,我们通过编写这个函数来重用它们。 VideoState *is = (VideoState *)arg; AVFormatContext *pFormatCtx = NULL; AVPacket pkt1, *packet = &pkt1; // http://www.ffmpeg.org/doxygen/3.1/group__lavu__dict.html#details AVDictionary *io_dict = NULL; // AVDictionary 元数据,Simple key:value store. // http://www.ffmpeg.org/doxygen/3.1/structAVIOInterruptCB.html#details AVIOInterruptCB callback; // Callback for checking whether to abort blocking functions. int video_index = -1; int audio_index = -1; int i; is->videoStream=-1; is->audioStream=-1; global_video_state = is; // will interrupt blocking functions if we quit! callback.callback = decode_interrupt_cb; callback.opaque = is; // http://www.ffmpeg.org/doxygen/3.1/aviobuf_8c.html#ae8589aae955d16ca228b6b9d66ced33d // 使用is->filename的内容初始化is->io_context,用于管理文件的读写 if (avio_open2(&is->io_context, is->filename, 0, &callback, &io_dict)) { fprintf(stderr, "Unable to open I/O for %s\n", is->filename); return -1; } // 从传入的第二个参数获得文件路径,这个函数会读取文件头信息,并把信息保存在 pFormatCtx 结构体当中。 // 这个函数后面两个参数分别是: 指定文件格式、格式化选项,当我们设置为 NULL 或 0 时,libavformat 会自动完成这些工作。 if(avformat_open_input(&pFormatCtx, is->filename, NULL, NULL)!=0) return -1; // Couldn't open file is->pFormatCtx = pFormatCtx; // Retrieve stream information // 得到流信息 if(avformat_find_stream_info(pFormatCtx, NULL)<0) return -1; // Couldn't find stream information // Dump information about file onto standard error // 这个函数填充了 pFormatCtx->streams 流信息, 可以通过 dump_format 把信息打印出来: av_dump_format(pFormatCtx, 0, is->filename, 0); // 查找一个音频和视频流 for(i=0; i<pFormatCtx->nb_streams; i++) { if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO && video_index < 0) { video_index=i; } if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO && audio_index < 0) { audio_index=i; } } // stream_component_open()函数的作用是找到解码器,设置音频参数,保存重要信息到大结构体中,然后启动音频和视频线程。 if(audio_index >= 0) { stream_component_open(is, audio_index); } if(video_index >= 0) { stream_component_open(is, video_index); } if(is->videoStream < 0 || is->audioStream < 0) { fprintf(stderr, "%s: could not open codecs\n", is->filename); goto fail; } // main decode loop // 上面都是打开文件和找到视频流和音频流的索引等工作,下面才是主循环 for(;;) { if(is->quit) { break; // 控制循环是否退出 } // seek stuff goes here if(is->audioq.size > MAX_AUDIOQ_SIZE || is->videoq.size > MAX_VIDEOQ_SIZE) { SDL_Delay(10); continue; } if(av_read_frame(is->pFormatCtx, packet) < 0) { if(is->pFormatCtx->pb->error == 0) { SDL_Delay(100); /* no error; wait for user input */ continue; } else { break; } } // Is this a packet from the video stream? if(packet->stream_index == is->videoStream) { packet_queue_put(&is->videoq, packet); } else if(packet->stream_index == is->audioStream) { packet_queue_put(&is->audioq, packet); } else { av_free_packet(packet); } } /* all done - wait for it */ while(!is->quit) { SDL_Delay(100); } fail: { SDL_Event event; event.type = FF_QUIT_EVENT; event.user.data1 = is; SDL_PushEvent(&event); } return 0; }
void *fill_queues(void *unused) { AVPacket pkt; AVStream *st; int once = 0; while (fill_me) { int err = av_read_frame(ic, &pkt); if (err) { return NULL; } if (videoqueue.nb_packets > 1000) { if (!once++) fprintf(stderr, "Queue size %d problems ahead\n", videoqueue.size); } st = ic->streams[pkt.stream_index]; switch (st->codec->codec_type) { case AVMEDIA_TYPE_VIDEO: if (pkt.pts != AV_NOPTS_VALUE) { if (first_pts == AV_NOPTS_VALUE) { first_pts = first_video_pts = pkt.pts; first_audio_pts = av_rescale_q(pkt.pts, video_st->time_base, audio_st->time_base); } pkt.pts -= first_video_pts; } packet_queue_put(&videoqueue, &pkt); break; case AVMEDIA_TYPE_AUDIO: if (pkt.pts != AV_NOPTS_VALUE) { if (first_pts == AV_NOPTS_VALUE) { first_pts = first_audio_pts = pkt.pts; first_video_pts = av_rescale_q(pkt.pts, audio_st->time_base, video_st->time_base); } pkt.pts -= first_audio_pts; } packet_queue_put(&audioqueue, &pkt); break; case AVMEDIA_TYPE_DATA: packet_queue_put(&dataqueue, &pkt); break; } /* while (videoqueue.nb_packets>10) * usleep(30); */ //fprintf(stderr, "V %d A %d\n", videoqueue.nb_packets, audioqueue.nb_packets); //-----> } return NULL; }
/********* read data thread**************/ int read_thread(PLAYER_HANDLE handle ) { log_chris(ANDROID_LOG_INFO ,TAG ," in read_thread"); media_handle_union_t * media_handle = (media_handle_union_t *) handle; if (media_handle == NULL) { log_chris(ANDROID_LOG_ERROR ,TAG ,"media handle is null"); // return -1; return call_java_onerror(g_obj ,NULL_POINTER_EXCEPTION); } log_chris(ANDROID_LOG_INFO ,TAG ," after media_handle acquire"); AVPacket packet; AVFormatContext *fmt_ctx_tmp = media_handle->ptr_format_ctx; int audio_stream = media_handle->decode_audio_var->audio_stream; int video_stream = media_handle->decode_video_var->video_stream; PacketQueue *video_queue_tmp = &media_handle->decode_video_var->video_queue; PacketQueue *audio_queue_tmp = &media_handle->decode_audio_var->audio_queue; //????in here ,can not use like this ,stack memory /* start to read the packet from the media file */ while (av_read_frame(fmt_ctx_tmp, &packet) >= 0) { //log_chris(ANDROID_LOG_INFO ,TAG ,"in av_read_frame ,stop_mark = %d" ,media_handle->stop_mark); if(media_handle->stop_mark == 1 ){ log_chris(ANDROID_LOG_INFO ,TAG ,"==press stop_mark button in av_read_frame"); return 0; break; } if (packet.stream_index == video_stream) { //log_chris(ANDROID_LOG_INFO ,TAG ,"##video put in queue ,pakcet.num = %d" ,video_queue_tmp->nb_packets); if(-1 == packet_queue_put(video_queue_tmp, &packet ,(int)media_handle) ){ log_chris(ANDROID_LOG_INFO ,TAG ,"==video packet_queue_put failed."); break; } } else if (packet.stream_index == audio_stream) { if( -1 == packet_queue_put( audio_queue_tmp, &packet ,(int)media_handle) ){ log_chris(ANDROID_LOG_INFO ,TAG ,"==audio packet_queue_put failed."); break; } //log_chris(ANDROID_LOG_INFO ,TAG ,"##audio put in queue ,pakcet.num = %d ,audio_queue.nb_packets = %d" ,audio_queue_tmp->nb_packets ,media_handle->decode_audio_var->audio_queue.nb_packets); } } media_handle->stop_mark = 1; //set file_over_mark media_handle->finish_mark = 1; media_handle->decode_video_var->finish_mark = 1; media_handle->decode_audio_var->finish_mark = 1; log_chris(ANDROID_LOG_INFO,TAG, ".....finish_mark = %d ,and player over" ,media_handle->finish_mark); return 0; }
int video_reader_thread( void *data ) { static AVPacket packet; if( !format_context || !video_codec_context || !video_buffer ) return -1; reader_running = 1; while( !stop ) { if( video_queue.frames >= MAX_QUEUE_FRAMES || audio_queue.packets >= MAX_QUEUE_PACKETS ) { SDL_Delay( QUEUE_FULL_DELAY ); } else { if( av_read_frame( format_context, &packet ) >= 0 ) { if( packet.stream_index == video_stream ) { video_decode_video_frame( &packet ); } else if( packet.stream_index == audio_stream && audio_codec_context ) { packet_queue_put( &audio_queue, &packet ); } else { av_free_packet( &packet ); } } else { av_seek_frame( format_context, -1, 0, AVSEEK_FLAG_BACKWARD|AVSEEK_FLAG_BYTE ); } } } reader_running = 0; return 0; }
/* this thread gets the stream from the disk or the network */ static int decode_thread(void *arg) { /* DECODE THREAD */ FFMovie *movie = arg; int status; AVPacket pkt1, *pkt = &pkt1; while(!movie->abort_request && !Global_abort_all) { /* read if the queues have room */ if (movie->audioq.size < MAX_AUDIOQ_SIZE && !movie->dest_showtime) { if (av_read_packet(movie->context, pkt) < 0) { break; } if (movie->audio_st && pkt->stream_index == movie->audio_st->index) { packet_queue_put(&movie->audioq, pkt); } else if (movie->video_st && pkt->stream_index == movie->video_st->index) { status = video_read_packet(movie, pkt); av_free_packet(pkt); if(status < 0) { break; } } else { av_free_packet(pkt); } } if(movie->dest_showtime) { double now = get_master_clock(movie); if(now >= movie->dest_showtime) { video_display(movie); movie->dest_showtime = 0; } else { // printf("showtime not ready, waiting... (%.2f,%.2f)\n", // (float)now, (float)movie->dest_showtime); SDL_Delay(10); } } if(movie->paused) { double endpause, startpause = SDL_GetTicks() / 1000.0; while(movie->paused && !movie->abort_request && !Global_abort_all) { SDL_Delay(100); } endpause = SDL_GetTicks() / 1000.0; movie->dest_showtime = 0; movie->time_offset += endpause - startpause; } } ffmovie_cleanup(movie); return 0; }
bool ff_decoder_accept(struct ff_decoder *decoder, struct ff_packet *packet) { if (decoder && packet->base.stream_index == decoder->stream->index) { packet_queue_put(&decoder->packet_queue, packet); return true; } return false; }
void *dispatch_data_thread(void *arg) { JNIEnv *env; if((*g_jvm)->AttachCurrentThread(g_jvm, &env, NULL) != JNI_OK) { LOGE(1, "### start decode thead error"); return; } VideoState *is = (VideoState*)arg; AVPacket packet; while(1) { if(is->audioq.size > MAX_AUDIOQ_SIZE || is->videoq.size > MAX_VIDEOQ_SIZE) {// if(is->quit) { break; } usleep(5000); //5 ms continue; } if(av_read_frame(is->pFormatCtx, &packet)<0){ LOGE(10,"av_read_frame over !!! "); is->quit = 2; usleep(100000); break; } if(packet.stream_index==is->videoStream) { packet_queue_put(&is->videoq, &packet); } else if (packet.stream_index == is->audioStream) { packet_queue_put(&is->audioq, &packet); } else { av_free_packet(&packet); } } while (!is->quit) { usleep(10000); } LOGI(10,"dispatch_data_thread exit\n"); if((*g_jvm)->DetachCurrentThread(g_jvm) != JNI_OK) { LOGE(1,"### detach decode thread error"); } pthread_exit(0); if(debug) { LOGE(1, "### dispatch_data_thread exit"); } return ((void *)0); }
void ff_demuxer_reset(struct ff_demuxer *demuxer) { struct ff_packet packet = {0}; struct ff_clock *clock = ff_clock_init(); clock->sync_type = demuxer->clock.sync_type; clock->sync_clock = demuxer->clock.sync_clock; clock->opaque = demuxer->clock.opaque; packet.clock = clock; if (demuxer->audio_decoder != NULL) { packet_queue_put(&demuxer->audio_decoder->packet_queue, &packet); ff_clock_retain(clock); } if (demuxer->video_decoder != NULL) { packet_queue_put(&demuxer->video_decoder->packet_queue, &packet); ff_clock_retain(clock); } }
void VideoPlayer::readPacket() { if (pFormatCtx == NULL) return; SDL_LockMutex(mutex); currenttime+=10; if (currenttime >= nextPacket.dts) { if(nextPacket.stream_index == videoStream) { if (!decodeVideoThread->isRunning()) { decodeVideoThread->setPacket(nextPacket); decodeVideoThread->start(); } else if (!decodeVideoThread_2->isRunning()) { decodeVideoThread_2->setPacket(nextPacket); decodeVideoThread_2->start(); } else if (!decodeVideoThread_3->isRunning()) { decodeVideoThread_3->setPacket(nextPacket); decodeVideoThread_3->start(); } else { //qDebug()<<"running..."; //提高性能在此多添加几个线程即可 } } else if(nextPacket.stream_index==audioStream) { packet_queue_put(audioq, &nextPacket); emit updateTime(currenttime); } if(av_read_frame(pFormatCtx, &nextPacket) < 0) { //整个视频文件读取完毕 stop(); currenttime = totaltime; emit updateTime(currenttime); emit finished(); } } SDL_UnlockMutex(mutex); }
static int process_packet(AVPacket *pkt, PlayerContext *ctx) { AVCodecContext *codec = ctx->video_codec; AVFrame *frame = ctx->frame; if (pkt->stream_index == ctx->video_stream_index) { if (avcodec_decode_video2(codec, frame, &ctx->got_frame, pkt) < 0) { av_log (NULL, AV_LOG_ERROR, "Error decoding video frame\n"); return -1; } if (ctx->got_frame) { SDL_LockYUVOverlay (ctx->overlay); AVPicture pict; pict.data[0] = ctx->overlay->pixels[0]; pict.data[1] = ctx->overlay->pixels[2]; pict.data[2] = ctx->overlay->pixels[1]; pict.linesize[0] = ctx->overlay->pitches[0]; pict.linesize[1] = ctx->overlay->pitches[2]; pict.linesize[2] = ctx->overlay->pitches[1]; av_image_copy(pict.data, pict.linesize, (const uint8_t **)frame->data, frame->linesize, codec->pix_fmt, codec->width, codec->height); SDL_UnlockYUVOverlay (ctx->overlay); SDL_DisplayYUVOverlay (ctx->overlay, &ctx->rect); av_free_packet (pkt); } } else if (pkt->stream_index == ctx->audio_stream_index) { packet_queue_put (&ctx->audioq, pkt); } else { av_free_packet (pkt); } return 0; }
static int decode_from_thread(void *userdata) { int ret = -1; AVPacket pkt1, *packet = &pkt1; FFmpegState *st = (FFmpegState*)userdata; while (1) { if (st->quit) break; if (st->audioq.size > MAX_AUDIOQ_SIZE) { SDL_Delay(10); continue; } ret = av_read_frame(st->aFormatCtx, packet); if (ret < 0) { if (ret == AVERROR_EOF || url_feof(st->aFormatCtx->pb)) { break; } if (st->aFormatCtx->pb && st->aFormatCtx->pb->error) { break; } continue; } if (packet->stream_index == st->audioStream) { packet_queue_put(&st->audioq, packet); } else { av_free_packet(packet); } } while (!st->quit) { SDL_Delay(100); } return 0; }
void ZWVideoThread::run() { memset(&mVideoState,0,sizeof(VideoState)); //为了安全起见 先将结构体的数据初始化成0了 memset(&mVideoState,0,sizeof(VideoState)); //为了安全起见 先将结构体的数据初始化成0了 VideoState *is = &mVideoState; AVFormatContext *pFormatCtx; AVCodecContext *pCodecCtx; AVCodec *pCodec; AVCodecContext *aCodecCtx; AVCodec *aCodec; int audioStream ,videoStream, i; //Allocate an AVFormatContext. pFormatCtx = avformat_alloc_context(); if (avformat_open_input(&pFormatCtx, m_filePath.toLocal8Bit().data(), NULL, NULL) != 0) { printf("can't open the file. \n"); return; } if (avformat_find_stream_info(pFormatCtx, NULL) < 0) { printf("Could't find stream infomation.\n"); return; } videoStream = -1; audioStream = -1; ///循环查找视频中包含的流信息, for (i = 0; i < pFormatCtx->nb_streams; i++) { if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) { videoStream = i; } if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO && audioStream < 0) { audioStream = i; } } ///如果videoStream为-1 说明没有找到视频流 if (videoStream == -1) { printf("Didn't find a video stream.\n"); return; } if (audioStream == -1) { printf("Didn't find a audio stream.\n"); return; } is->ic = pFormatCtx; is->videoStream = videoStream; is->audioStream = audioStream; // emit sig_TotalTimeChanged(getTotalTime()); if (audioStream >= 0) { /* 所有设置SDL音频流信息的步骤都在这个函数里完成 */ audio_stream_component_open(&mVideoState, audioStream); } ///查找音频解码器 aCodecCtx = pFormatCtx->streams[audioStream]->codec; aCodec = avcodec_find_decoder(aCodecCtx->codec_id); if (aCodec == NULL) { printf("ACodec not found.\n"); return; } ///打开音频解码器 if (avcodec_open2(aCodecCtx, aCodec, NULL) < 0) { printf("Could not open audio codec.\n"); return; } is->audio_st = pFormatCtx->streams[audioStream]; ///查找视频解码器 pCodecCtx = pFormatCtx->streams[videoStream]->codec; pCodec = avcodec_find_decoder(pCodecCtx->codec_id); if (pCodec == NULL) { printf("PCodec not found.\n"); return; } ///打开视频解码器 if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) { printf("Could not open video codec.\n"); return; } is->video_st = pFormatCtx->streams[videoStream]; packet_queue_init(&is->videoq); ///创建一个线程专门用来解码视频 is->video_tid = SDL_CreateThread(video_thread, "video_thread", &mVideoState); is->player = this; // int y_size = pCodecCtx->width * pCodecCtx->height; AVPacket *packet = (AVPacket *) malloc(sizeof(AVPacket)); //分配一个packet 用来存放读取的视频 // av_new_packet(packet, y_size); //av_read_frame 会给它分配空间 因此这里不需要了 av_dump_format(pFormatCtx, 0, m_filePath.toLocal8Bit().data(), 0); //输出视频信息 while (1) { if (is->quit) { ZW_LOG_WARNING(QString("TTTTTTTV")); //停止播放了 break; } if (is->seek_req) { int stream_index = -1; int64_t seek_target = is->seek_pos; if (is->videoStream >= 0) stream_index = is->videoStream; else if (is->audioStream >= 0) stream_index = is->audioStream; AVRational aVRational = {1, AV_TIME_BASE}; if (stream_index >= 0) { seek_target = av_rescale_q(seek_target, aVRational, pFormatCtx->streams[stream_index]->time_base); } if (av_seek_frame(is->ic, stream_index, seek_target, AVSEEK_FLAG_BACKWARD) < 0) { fprintf(stderr, "%s: error while seeking\n",is->ic->filename); } else { if (is->audioStream >= 0) { AVPacket *packet = (AVPacket *) malloc(sizeof(AVPacket)); //分配一个packet av_new_packet(packet, 10); strcpy((char*)packet->data,FLUSH_DATA); packet_queue_flush(&is->audioq); //清除队列 packet_queue_put(&is->audioq, packet); //往队列中存入用来清除的包 } if (is->videoStream >= 0) { AVPacket *packet = (AVPacket *) malloc(sizeof(AVPacket)); //分配一个packet av_new_packet(packet, 10); strcpy((char*)packet->data,FLUSH_DATA); packet_queue_flush(&is->videoq); //清除队列 packet_queue_put(&is->videoq, packet); //往队列中存入用来清除的包 is->video_clock = 0; } } is->seek_req = 0; is->seek_time = is->seek_pos / 1000000.0; is->seek_flag_audio = 1; is->seek_flag_video = 1; } //这里做了个限制 当队列里面的数据超过某个大小的时候 就暂停读取 防止一下子就把视频读完了,导致的空间分配不足 /* 这里audioq.size是指队列中的所有数据包带的音频数据的总量或者视频数据总量,并不是包的数量 */ //这个值可以稍微写大一些 if (is->audioq.size > MAX_AUDIO_SIZE || is->videoq.size > MAX_VIDEO_SIZE) { SDL_Delay(10); continue; } if (is->isPause == true) { SDL_Delay(10); continue; } if (av_read_frame(pFormatCtx, packet) < 0) { is->readFinished = true; if (is->quit) { break; //解码线程也执行完了 可以退出了 } SDL_Delay(10); continue; } if (packet->stream_index == videoStream) { packet_queue_put(&is->videoq, packet); //这里我们将数据存入队列 因此不调用 av_free_packet 释放 } else if( packet->stream_index == audioStream ) { packet_queue_put(&is->audioq, packet); //这里我们将数据存入队列 因此不调用 av_free_packet 释放 } else { // Free the packet that was allocated by av_read_frame av_free_packet(packet); } } ///文件读取结束 跳出循环的情况 ///等待播放完毕 while (!is->quit) { SDL_Delay(100); } stop(); avcodec_close(pCodecCtx); avformat_close_input(&pFormatCtx); ZW_LOG_WARNING(QString("TTTTTTTV")); is->readThreadFinished = true; }
int decode_thread(void *arg) { VideoState *is = (VideoState *)arg; AVFormatContext *pFormatCtx = NULL; AVPacket pkt1, *packet = &pkt1; AVDictionary *io_dict = NULL; AVIOInterruptCB callback; int video_index = -1; int audio_index = -1; int i; is->videoStream=-1; is->audioStream=-1; global_video_state = is; // will interrupt blocking functions if we quit! callback.callback = decode_interrupt_cb; callback.opaque = is; if (avio_open2(&is->io_context, is->filename, 0, &callback, &io_dict)) { fprintf(stderr, "Unable to open I/O for %s\n", is->filename); return -1; } // Open video file if(avformat_open_input(&pFormatCtx, is->filename, NULL, NULL)!=0) return -1; // Couldn't open file is->pFormatCtx = pFormatCtx; // Retrieve stream information if(avformat_find_stream_info(pFormatCtx, NULL)<0) return -1; // Couldn't find stream information // Dump information about file onto standard error av_dump_format(pFormatCtx, 0, is->filename, 0); // Find the first video stream for(i=0; i<pFormatCtx->nb_streams; i++) { if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO && video_index < 0) { video_index=i; } if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO && audio_index < 0) { audio_index=i; } } if(audio_index >= 0) { stream_component_open(is, audio_index); } if(video_index >= 0) { stream_component_open(is, video_index); } if(is->videoStream < 0 || is->audioStream < 0) { fprintf(stderr, "%s: could not open codecs\n", is->filename); goto fail; } // main decode loop for(;;) { if(is->quit) { break; } // seek stuff goes here if(is->audioq.size > MAX_AUDIOQ_SIZE || is->videoq.size > MAX_VIDEOQ_SIZE) { SDL_Delay(10); continue; } if(av_read_frame(is->pFormatCtx, packet) < 0) { if(is->pFormatCtx->pb->error == 0) { SDL_Delay(100); /* no error; wait for user input */ continue; } else { break; } } // Is this a packet from the video stream? if(packet->stream_index == is->videoStream) { packet_queue_put(&is->videoq, packet); } else if(packet->stream_index == is->audioStream) { packet_queue_put(&is->audioq, packet); } else { av_free_packet(packet); } } /* all done - wait for it */ while(!is->quit) { SDL_Delay(100); } fail: { SDL_Event event; event.type = FF_QUIT_EVENT; event.user.data1 = is; SDL_PushEvent(&event); } return 0; }
int decode_thread(void *arg) { VideoState *is = (VideoState *)arg; AVFormatContext *pFormatCtx; AVPacket pkt1, *packet = &pkt1; int video_index = -1; int audio_index = -1; int i; is->videoStream=-1; is->audioStream=-1; global_video_state = is; // will interrupt blocking functions if we quit! url_set_interrupt_cb(decode_interrupt_cb); // Open video file if(av_open_input_file(&pFormatCtx, is->filename, NULL, 0, NULL)!=0) return -1; // Couldn't open file is->pFormatCtx = pFormatCtx; // Retrieve stream information if(av_find_stream_info(pFormatCtx)<0) return -1; // Couldn't find stream information // Dump information about file onto standard error dump_format(pFormatCtx, 0, is->filename, 0); // Find the first video stream for(i=0; i<pFormatCtx->nb_streams; i++) { if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_VIDEO && video_index < 0) { video_index=i; } if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_AUDIO && audio_index < 0) { audio_index=i; } } if(audio_index >= 0) { stream_component_open(is, audio_index); } if(video_index >= 0) { stream_component_open(is, video_index); } if(is->videoStream < 0 || is->audioStream < 0) { fprintf(stderr, "%s: could not open codecs\n", is->filename); goto fail; } // main decode loop for(;;) { if(is->quit) { break; } // seek stuff goes here if(is->seek_req) { int stream_index= -1; int64_t seek_target = is->seek_pos; if (is->videoStream >= 0) stream_index = is->videoStream; else if(is->audioStream >= 0) stream_index = is->audioStream; if(stream_index>=0){ seek_target= av_rescale_q(seek_target, AV_TIME_BASE_Q, pFormatCtx->streams[stream_index]->time_base); } if(!av_seek_frame(is->pFormatCtx, stream_index, seek_target, is->seek_flags)) { fprintf(stderr, "%s: error while seeking\n", is->pFormatCtx->filename); } else { if(is->audioStream >= 0) { packet_queue_flush(&is->audioq); packet_queue_put(&is->audioq, &flush_pkt); } if(is->videoStream >= 0) { packet_queue_flush(&is->videoq); packet_queue_put(&is->videoq, &flush_pkt); } } is->seek_req = 0; } if(is->audioq.size > MAX_AUDIOQ_SIZE || is->videoq.size > MAX_VIDEOQ_SIZE) { SDL_Delay(10); continue; } if(av_read_frame(is->pFormatCtx, packet) < 0) { if(url_ferror(&pFormatCtx->pb) == 0) { SDL_Delay(100); /* no error; wait for user input */ continue; } else { break; } } // Is this a packet from the video stream? if(packet->stream_index == is->videoStream) { packet_queue_put(&is->videoq, packet); } else if(packet->stream_index == is->audioStream) { packet_queue_put(&is->audioq, packet); } else { av_free_packet(packet); } } /* all done - wait for it */ while(!is->quit) { SDL_Delay(100); } fail: { SDL_Event event; event.type = FF_QUIT_EVENT; event.user.data1 = is; SDL_PushEvent(&event); } return 0; }
static void *decode_thread(ALLEGRO_THREAD *t, void *arg) { VideoState *is = (VideoState *) arg; AVFormatContext *format_context = is->format_context; AVPacket pkt1, *packet = &pkt1; is->videoStream = -1; is->audioStream = -1; if (is->audio_index >= 0) { stream_component_open(is, is->audio_index); } if (is->video_index >= 0) { stream_component_open(is, is->video_index); } if (is->videoStream < 0 && is->audioStream < 0) { ALLEGRO_ERROR("%s: could not open codecs\n", is->filename); goto fail; } for (;;) { if (is->quit) { break; } if (is->seek_req) { int stream_index = -1; int64_t seek_target = is->seek_pos; if (is->videoStream >= 0) stream_index = is->videoStream; else if (is->audioStream >= 0) stream_index = is->audioStream; if (stream_index >= 0) { seek_target = av_rescale_q(seek_target, AV_TIME_BASE_Q, format_context->streams[stream_index]->time_base); } if (av_seek_frame(is->format_context, stream_index, seek_target, is->seek_flags) < 0) { ALLEGRO_WARN("%s: error while seeking (%d, %lu)\n", is->format_context->filename, stream_index, seek_target); } else { if (is->audioStream >= 0) { packet_queue_flush(&is->audioq); packet_queue_put(&is->audioq, &flush_pkt); } if (is->videoStream >= 0) { packet_queue_flush(&is->videoq); packet_queue_put(&is->videoq, &flush_pkt); } } is->seek_req = 0; is->after_seek_sync = true; } if (is->audioq.size > MAX_AUDIOQ_SIZE || is->videoq.size > MAX_VIDEOQ_SIZE) { al_rest(0.01); continue; } if (av_read_frame(is->format_context, packet) < 0) { #ifdef FFMPEG_0_8 if (!format_context->pb->eof_reached && !format_context->pb->error) { #else if (url_ferror((void *)&format_context->pb) == 0) { #endif al_rest(0.01); continue; } else { break; } } // Is this a packet from the video stream? if (packet->stream_index == is->videoStream) { packet_queue_put(&is->videoq, packet); } else if (packet->stream_index == is->audioStream) { packet_queue_put(&is->audioq, packet); } else { av_free_packet(packet); } } /* all done - wait for it */ while (!is->quit) { al_rest(0.1); } fail: return t; } /* We want to be able to send an event to the user exactly at the time * a new video frame should be displayed. */ static void *timer_thread(ALLEGRO_THREAD *t, void *arg) { VideoState *is = (VideoState *) arg; double ot = 0, nt = 0; while (!is->quit) { ALLEGRO_EVENT event; double d; /* Wait here until someone signals to us when a new frame was * scheduled at is->show_next. */ al_lock_mutex(is->timer_mutex); al_wait_cond(is->timer_cond, is->timer_mutex); al_unlock_mutex(is->timer_mutex); if (is->quit) break; /* Wait until that time. This wait is why we have our own thread * here so the user doesn't need to do it. */ while (1) { d = is->show_next - get_master_clock(is); if (d <= 0) break; //printf("waiting %4.1f ms\n", d * 1000); al_rest(d); } nt = get_master_clock(is); //printf("event after %4.1f ms\n", (nt - ot) * 1000); ot = nt; /* Now is the time. */ event.type = ALLEGRO_EVENT_VIDEO_FRAME_SHOW; event.user.data1 = (intptr_t)is->video; al_emit_user_event(&is->video->es, &event, NULL); } return t; }
int main(int argc ,char **argv) { av_register_all(); AVFormatContext *pFormatCtx = NULL; AVInputFormat *file_iformat = NULL; //avio_set_interrupt_cb(decode_interrupt_cb); //Open video file printf("open video file:%s\n", argv[1]); if(avformat_open_input(&pFormatCtx, argv[1], file_iformat, NULL) < 0) { printf("canot open input file: %s\n", argv[1]); return -1; //Cannot open file } printf("open input file: %s OK\n", argv[1]); //Retrieve stream information if(av_find_stream_info(pFormatCtx) < 0) return -1;//cannot find stream infomation //Dump information about file no to standard error av_dump_format(pFormatCtx, 0, argv[1], 0); int i; int videoStream; int audioStream; videoStream = -1; audioStream = -1; AVCodecContext *vCodecCtx; AVCodecContext *aCodecCtx; //Find the first video stream for(i = 0; i < pFormatCtx->nb_streams; i++) { if(pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO && videoStream < 0) { videoStream = i; } if(pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO && audioStream < 0) { audioStream = i; } } if(videoStream == -1) { printf("no video stream\n"); return -1;//Did not find a video stream } if(audioStream == -1) { printf("no audio stream\n"); return -1;//Did not find a audio stream } printf("find video strean: %d\n", videoStream); printf("find audio strean: %d\n", audioStream); //Get a pointer to the codec context for the video stream vCodecCtx = pFormatCtx->streams[videoStream]->codec; AVCodec *vCodec; vCodec = avcodec_find_decoder(vCodecCtx->codec_id); if(vCodec == NULL) { fprintf(stderr, "Unsupported video codec\n"); return -1;//codec not find } //Open video codec if(avcodec_open(vCodecCtx, vCodec) < 0) { fprintf(stderr, "open video codec error\n"); return -1;//Could not open codec } //Get a pointer to the codec context for the audio stream aCodecCtx = pFormatCtx->streams[audioStream]->codec; static SDL_AudioSpec wanted_spec, spec; wanted_spec.freq = aCodecCtx->sample_rate; wanted_spec.format = AUDIO_S16SYS; wanted_spec.channels = aCodecCtx->channels; wanted_spec.silence = 0; wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE; wanted_spec.callback = audio_callback; wanted_spec.userdata = aCodecCtx; if(SDL_OpenAudio(&wanted_spec, &spec) < 0) { fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError()); return -1; } AVCodec *aCodec; aCodec = avcodec_find_decoder(aCodecCtx->codec_id); if(aCodec == NULL) { fprintf(stderr, "Unsupport audio codec\n"); return -1;//codec not found } if(avcodec_open(aCodecCtx, aCodec) < 0) { fprintf(stderr, "open avcodec error\n"); return -1; } packet_queue_init(&audioq); SDL_PauseAudio(0); AVFrame *pFrame; //Allocate video frame pFrame = avcodec_alloc_frame(); AVFrame *pFrameRGB; //Allocate an AVFrame structure pFrameRGB = avcodec_alloc_frame(); if(pFrameRGB == NULL) return -1; uint8_t *buffer; int numBytes; //Detemine required buffer size and allocate buffer numBytes = avpicture_get_size(PIX_FMT_RGB24, vCodecCtx->width, vCodecCtx->height); buffer = (uint8_t*)av_malloc(numBytes * sizeof(uint8_t)); //Assign appropriate parts of buffer to image planes in pFrameRGB //Note that pFrameRGB is an AVFrame, but AVFrame is a superset //of AVPicture avpicture_fill((AVPicture*)pFrameRGB, buffer, PIX_FMT_RGB24, vCodecCtx->width, vCodecCtx->height); if((SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER))) { fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError()); exit(1); } SDL_Surface *screen; screen = SDL_SetVideoMode(vCodecCtx->width, vCodecCtx->height, 0, 0); if(!screen) { fprintf(stderr, "SDL: could not set video mode\n"); exit(1); } SDL_Overlay *bmp; bmp = SDL_CreateYUVOverlay(vCodecCtx->width, vCodecCtx->height, SDL_YV12_OVERLAY, screen); int frameFinished; AVPacket packet; SDL_Rect rect; i = 0; while(av_read_frame(pFormatCtx, &packet) >=0) { //is this a packet from video stream? if(packet.stream_index == videoStream) { //Decoder video frame avcodec_decode_video2(vCodecCtx, pFrame, &frameFinished, &packet); //Did we got a video frame? if(frameFinished) { usleep(40 * 1000); SDL_LockYUVOverlay(bmp); AVPicture pict; pict.data[0] = bmp->pixels[0]; pict.data[1] = bmp->pixels[2]; pict.data[2] = bmp->pixels[1]; pict.linesize[0] = bmp->pitches[0]; pict.linesize[1] = bmp->pitches[2]; pict.linesize[2] = bmp->pitches[1]; //Convert the image into YUV format that SDL uses static struct SwsContext *img_convert_ctx; img_convert_ctx = sws_getCachedContext(img_convert_ctx, vCodecCtx->width, vCodecCtx->height, vCodecCtx->pix_fmt, vCodecCtx->width, vCodecCtx->height, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL); sws_scale(img_convert_ctx, pFrame->data, pFrame->linesize, 0, pFrame->height, pict.data, pict.linesize); SDL_UnlockYUVOverlay(bmp); rect.x = 0; rect.y = 0; rect.w = vCodecCtx->width; rect.h = vCodecCtx->height; SDL_DisplayYUVOverlay(bmp, &rect); } //Free the packet that was allocated by av_read_frame av_free_packet(&packet); SDL_Event event; SDL_PollEvent(&event); switch(event.type) { case SDL_QUIT: quit = 1; SDL_Quit(); exit(0); break; defalut: break; } } else if(packet.stream_index == audioStream) { packet_queue_put(&audioq, &packet); } else { av_free_packet(&packet); } } //Free the RGB image av_free(buffer); av_free(pFrameRGB); //Free the YUV freame av_free(pFrame); //Close the codec avcodec_close(vCodecCtx); //Close the video file avformat_close_input(&pFormatCtx); }
int main(int argc, char *argv[]) { AVFormatContext *pFormatCtx = NULL; int i, videoStream, audioStream; AVCodecContext *pCodecCtx; AVCodec *pCodec; AVFrame *pFrame; AVPacket packet; int frameFinished; float aspect_ratio; SwsContext *pConvertCtx = NULL; AVCodecContext *aCodecCtx; AVCodec *aCodec = NULL; SDL_Overlay *bmp; SDL_Surface *screen; SDL_Rect rect; SDL_Event event; SDL_AudioSpec wanted_spec, spec; if(argc < 2) { fprintf(stderr, "Usage: test <file>\n"); exit(1); } // Register all formats and codecs av_register_all(); printf("\r\n This is so cool!!\r\n"); if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) { fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError()); exit(1); } if(avformat_open_input(&pFormatCtx, argv[1], NULL, NULL) != 0) { printf("\r\n Open fail!!\r\n"); return -1; // Couldn't open file } else { printf("\r\n Open successful!!\r\n"); } // Retrieve stream information if(avformat_find_stream_info(pFormatCtx, NULL)<0) { printf("\r\n Find stream fail!!\r\n"); return -1; // Couldn't find stream information } else { printf("\r\n Find stream successful!!\r\n"); } av_dump_format(pFormatCtx, 0, argv[1], 0); // Find the first video stream videoStream = -1; audioStream = -1; for(i=0; i<pFormatCtx->nb_streams; i++) { if((pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) && videoStream < 0) { videoStream=i; } else if((pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO) && audioStream < 0) { audioStream = i; } } if(videoStream == -1) { printf("\r\n Didn't find a video stream!!\r\n"); return -1; // Didn't find a video stream } if(audioStream == -1) { printf("\r\n Didn't find a audio stream!!\r\n"); return -1; // Didn't find a audio stream } aCodecCtx=pFormatCtx->streams[audioStream]->codec; // Set audio settings from codec info wanted_spec.freq = aCodecCtx->sample_rate; wanted_spec.format = AUDIO_S16SYS; wanted_spec.channels = aCodecCtx->channels; wanted_spec.silence = 0; wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE; wanted_spec.callback = audio_callback; wanted_spec.userdata = aCodecCtx; if(SDL_OpenAudio(&wanted_spec, &spec) < 0) { fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError()); return -1; } aCodec = avcodec_find_decoder(aCodecCtx->codec_id); if(!aCodec) { fprintf(stderr, "Unsupported codec!\n"); return -1; } // Open codec if(avcodec_open2(aCodecCtx, aCodec, NULL)<0) { fprintf(stderr, "codec can't open!\n"); return -1; // Could not open codec } packet_queue_init(&audioq); SDL_PauseAudio(0); // Get a pointer to the codec context for the video stream pCodecCtx = pFormatCtx->streams[videoStream]->codec; // Find the decoder for the video stream pCodec = avcodec_find_decoder(pCodecCtx->codec_id); if(pCodec == NULL) { fprintf(stderr, "Unsupported codec!\n"); return -1; // Codec not found } // Open codec if(avcodec_open2(pCodecCtx, pCodec, NULL)<0) { fprintf(stderr, "codec can't open!\n"); return -1; // Could not open codec } // Allocate video frame pFrame = av_frame_alloc(); screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 0, 0); if(!screen) { fprintf(stderr, "SDL: could not set video mode - exiting\n"); exit(1); } // Allocate a place to put our YUV image on that screen bmp = SDL_CreateYUVOverlay(pCodecCtx->width, pCodecCtx->height, SDL_YV12_OVERLAY, screen); i = 0; while(av_read_frame(pFormatCtx, &packet)>=0) { // Is this a packet from the video stream? if(packet.stream_index == videoStream) { // Decode video frame avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet); // Did we get a video frame? if(frameFinished) { SDL_LockYUVOverlay(bmp); AVPicture pict; pict.data[0] = bmp->pixels[0]; pict.data[1] = bmp->pixels[2]; pict.data[2] = bmp->pixels[1]; pict.linesize[0] = bmp->pitches[0]; pict.linesize[1] = bmp->pitches[2]; pict.linesize[2] = bmp->pitches[1]; //Convert the image into YUV format that SDL uses pConvertCtx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_YUV420P, SWS_BILINEAR, NULL, NULL, NULL); if (pConvertCtx == NULL) { fprintf(stderr, "Cannot initialize the conversion context/n"); return -1; } sws_scale(pConvertCtx, pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pict.data, pict.linesize); SDL_UnlockYUVOverlay(bmp); rect.x = 0; rect.y = 0; rect.w = pCodecCtx->width; rect.h = pCodecCtx->height; SDL_DisplayYUVOverlay(bmp, &rect); sws_freeContext(pConvertCtx); } } else if(packet.stream_index==audioStream) { // printf("\r\n audioStream!\r\n"); packet_queue_put(&audioq, &packet); } else { av_free_packet(&packet); } //Free the packet that was allocated by av_read_frame SDL_PollEvent(&event); switch(event.type) { case SDL_QUIT: printf("\r\n SDL_Quit!\r\n"); SDL_Quit(); exit(0); break; default: break; } } printf("\r\n Read stream End!\r\n"); // Free the YUV frame printf("\r\n av_frame_free!\r\n"); av_frame_free(&pFrame); // Close the codec printf("\r\n avcodec_close!\r\n"); avcodec_close(pCodecCtx); printf("\r\n avformat_close_input!\r\n"); avformat_close_input(&pFormatCtx); printf("\r\n end!\r\n"); return 0; }
static int read_thread(void *arg) { VideoState *is = (VideoState*)arg; AVFormatContext *ic = NULL; int i; int st_index[AVMEDIA_TYPE_NB]; // 讲解一下 AVPacket pkt1, *pkt = &pkt1; memset(st_index, -1, sizeof(st_index)); ic = avformat_alloc_context(); avformat_open_input(&ic, is->filename, NULL, NULL); is->ic = ic; avformat_find_stream_info(ic, NULL); for (i = 0; i<ic->nb_streams; i++) { if (ic->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) st_index[AVMEDIA_TYPE_VIDEO] = i; if (ic->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO) st_index[AVMEDIA_TYPE_AUDIO] = i; } /* open the streams,并开启相应的解码线程 */ if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) { stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]); } if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) { stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]); } if (is->video_stream < 0 && is->audio_stream < 0) { av_log(NULL, AV_LOG_FATAL, "Failed to open file '%s' or configure filtergraph\n", is->filename); goto fail; } for (;;) { /* if the queue are full, no need to read more */ // 暂时把以下这个判断注掉,不限制packet queue的大小 // 不然的话,可能因为视频消耗packet比较慢(比如我们强制41ms刷新一次),导致视频queue超过max // 此时如果不再读包,会导致音频没包可解,听起来就是杂音 /* if (is->audioq.size > MAX_AUDIOQ_SIZE || is->videoq.size > MAX_VIDEOQ_SIZE) { SDL_Delay(10); continue; } */ if (av_read_frame(ic, pkt) < 0) { //break; } if (pkt->stream_index == is->audio_stream) { packet_queue_put(&is->audioq, pkt); } else if (pkt->stream_index == is->video_stream) { packet_queue_put(&is->videoq, pkt); } else { av_free_packet(pkt); } } fail: /* close each stream */ // TODO close streams if (ic) { avformat_close_input(&ic); is->ic = NULL; } SDL_Event event; event.type = FF_QUIT_EVENT; event.user.data1 = is; SDL_PushEvent(&event); return 0; }
int packet_queue_put_flush_packet(struct ff_packet_queue *q) { return packet_queue_put(q, &q->flush_packet); }
int main(int argc, char *argv[]) { AVFormatContext *pFormatCtx = NULL; int i, videoStream, audioStream; AVCodecContext *pCodecCtxOrig = NULL; AVCodecContext *pCodecCtx = NULL; AVCodec *pCodec = NULL; AVFrame *pFrame = NULL; AVPacket packet; int frameFinished; struct SwsContext *sws_ctx = NULL; AVCodecContext *aCodecCtxOrig = NULL; AVCodecContext *aCodecCtx = NULL; AVCodec *aCodec = NULL; SDL_Overlay *bmp; SDL_Surface *screen; SDL_Rect rect; SDL_Event event; SDL_AudioSpec wanted_spec, spec; if (argc < 2) { fprintf(stderr, "Usage: test <file>\n"); exit(1); } // Register all formats and codecs av_register_all(); if (SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) { fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError()); exit(1); } // Open video file if (avformat_open_input(&pFormatCtx, argv[1], NULL, NULL) != 0) return -1; // Couldn't open file // Retrieve stream information if (avformat_find_stream_info(pFormatCtx, NULL)<0) return -1; // Couldn't find stream information // Dump information about file onto standard error av_dump_format(pFormatCtx, 0, argv[1], 0); // Find the first video stream videoStream = -1; audioStream = -1; for (i = 0; i<pFormatCtx->nb_streams; i++) { if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO && videoStream < 0) { videoStream = i; } if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO && audioStream < 0) { audioStream = i; } } if (videoStream == -1) return -1; // Didn't find a video stream if (audioStream == -1) return -1; aCodecCtxOrig = pFormatCtx->streams[audioStream]->codec; aCodec = avcodec_find_decoder(aCodecCtxOrig->codec_id); if (!aCodec) { fprintf(stderr, "Unsupported codec!\n"); return -1; } // Copy context aCodecCtx = avcodec_alloc_context3(aCodec); if (avcodec_copy_context(aCodecCtx, aCodecCtxOrig) != 0) { fprintf(stderr, "Couldn't copy codec context"); return -1; // Error copying codec context } // Set audio settings from codec info wanted_spec.freq = aCodecCtx->sample_rate; wanted_spec.format = AUDIO_S16SYS; wanted_spec.channels = aCodecCtx->channels; wanted_spec.silence = 0; wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE; wanted_spec.callback = audio_callback; wanted_spec.userdata = aCodecCtx; // 需要先把解出来的 raw audio 转换成 SDL 需要的格式 // 根据 raw audio 的格式 和 SDL 的格式设置 swr_ctx swr_ctx = swr_alloc_set_opts(NULL, AV_CH_LAYOUT_STEREO, AV_SAMPLE_FMT_S16, aCodecCtx->sample_rate, av_get_default_channel_layout(aCodecCtx->channels), aCodecCtx->sample_fmt, aCodecCtx->sample_rate, 0, NULL); //初始化 swr_ctx swr_init(swr_ctx); if (SDL_OpenAudio(&wanted_spec, &spec) < 0) { fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError()); return -1; } avcodec_open2(aCodecCtx, aCodec, NULL); // audio_st = pFormatCtx->streams[index] packet_queue_init(&audioq); SDL_PauseAudio(0); // Get a pointer to the codec context for the video stream pCodecCtxOrig = pFormatCtx->streams[videoStream]->codec; // Find the decoder for the video stream pCodec = avcodec_find_decoder(pCodecCtxOrig->codec_id); if (pCodec == NULL) { fprintf(stderr, "Unsupported codec!\n"); return -1; // Codec not found } // Copy context pCodecCtx = avcodec_alloc_context3(pCodec); if (avcodec_copy_context(pCodecCtx, pCodecCtxOrig) != 0) { fprintf(stderr, "Couldn't copy codec context"); return -1; // Error copying codec context } // Open codec if (avcodec_open2(pCodecCtx, pCodec, NULL)<0) return -1; // Could not open codec // Allocate video frame pFrame = av_frame_alloc(); // Make a screen to put our video #ifndef __DARWIN__ screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 0, 0); #else screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 24, 0); #endif if (!screen) { fprintf(stderr, "SDL: could not set video mode - exiting\n"); exit(1); } // Allocate a place to put our YUV image on that screen bmp = SDL_CreateYUVOverlay(pCodecCtx->width, pCodecCtx->height, SDL_YV12_OVERLAY, screen); // initialize SWS context for software scaling sws_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, PIX_FMT_YUV420P, SWS_BILINEAR, NULL, NULL, NULL ); // Read frames and save first five frames to disk i = 0; while (av_read_frame(pFormatCtx, &packet) >= 0) { // Is this a packet from the video stream? if (packet.stream_index == videoStream) { // Decode video frame avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet); // Did we get a video frame? if (frameFinished) { SDL_LockYUVOverlay(bmp); AVPicture pict; pict.data[0] = bmp->pixels[0]; pict.data[1] = bmp->pixels[2]; pict.data[2] = bmp->pixels[1]; pict.linesize[0] = bmp->pitches[0]; pict.linesize[1] = bmp->pitches[2]; pict.linesize[2] = bmp->pitches[1]; // Convert the image into YUV format that SDL uses sws_scale(sws_ctx, (uint8_t const * const *)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pict.data, pict.linesize); SDL_UnlockYUVOverlay(bmp); rect.x = 0; rect.y = 0; rect.w = pCodecCtx->width; rect.h = pCodecCtx->height; SDL_DisplayYUVOverlay(bmp, &rect); av_free_packet(&packet); Sleep(40); } else { av_free_packet(&packet); } } else if (packet.stream_index == audioStream) { packet_queue_put(&audioq, &packet); } else { av_free_packet(&packet); } // Free the packet that was allocated by av_read_frame SDL_PollEvent(&event); switch (event.type) { case SDL_QUIT: quit = 1; SDL_Quit(); exit(0); break; default: break; } } // Free the YUV frame av_frame_free(&pFrame); // Close the codecs avcodec_close(pCodecCtxOrig); avcodec_close(pCodecCtx); avcodec_close(aCodecCtxOrig); avcodec_close(aCodecCtx); // Close the video file avformat_close_input(&pFormatCtx); // free swr context swr_free(&swr_ctx); return 0; }
int decode_thread(void *arg) { VideoState *is = (VideoState *)arg; AVFormatContext *pFormatCtx = NULL; AVPacket pkt1, *packet = &pkt1; AVDictionary *io_dict = NULL; AVIOInterruptCB callback; int video_index = -1; int audio_index = -1; int i; is->videoStream = -1; is->audioStream = -1; is->audio_need_resample = 0; global_video_state = is; // will interrupt blocking functions if we quit! callback.callback = decode_interrupt_cb; callback.opaque = is; if(avio_open2(&is->io_context, is->filename, 0, &callback, &io_dict)) { fprintf(stderr, "Unable to open I/O for %s\n", is->filename); return -1; } // Open video file if(avformat_open_input(&pFormatCtx, is->filename, NULL, NULL) != 0) { return -1; // Couldn't open file } is->pFormatCtx = pFormatCtx; // Retrieve stream information if(avformat_find_stream_info(pFormatCtx, NULL) < 0) { return -1; // Couldn't find stream information } // Dump information about file onto standard error av_dump_format(pFormatCtx, 0, is->filename, 0); // Find the first video stream for(i = 0; i < pFormatCtx->nb_streams; i++) { if(pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO && video_index < 0) { video_index = i; } if(pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO && audio_index < 0) { audio_index = i; } } if(audio_index >= 0) { stream_component_open(is, audio_index); } if(video_index >= 0) { stream_component_open(is, video_index); } if(is->videoStream < 0 && is->audioStream < 0) { fprintf(stderr, "%s: could not open codecs\n", is->filename); goto fail; } #ifdef __RESAMPLER__ if( audio_index >= 0 && pFormatCtx->streams[audio_index]->codec->sample_fmt != AV_SAMPLE_FMT_S16) { is->audio_need_resample = 1; is->pResampledOut = NULL; is->pSwrCtx = NULL; printf("Configure resampler: "); #ifdef __LIBAVRESAMPLE__ printf("libAvResample\n"); is->pSwrCtx = avresample_alloc_context(); #endif #ifdef __LIBSWRESAMPLE__ printf("libSwResample\n"); is->pSwrCtx = swr_alloc(); #endif // Some MP3/WAV don't tell this so make assumtion that // They are stereo not 5.1 if (pFormatCtx->streams[audio_index]->codec->channel_layout == 0 && pFormatCtx->streams[audio_index]->codec->channels == 2) { pFormatCtx->streams[audio_index]->codec->channel_layout = AV_CH_LAYOUT_STEREO; } else if (pFormatCtx->streams[audio_index]->codec->channel_layout == 0 && pFormatCtx->streams[audio_index]->codec->channels == 1) { pFormatCtx->streams[audio_index]->codec->channel_layout = AV_CH_LAYOUT_MONO; } else if (pFormatCtx->streams[audio_index]->codec->channel_layout == 0 && pFormatCtx->streams[audio_index]->codec->channels == 0) { pFormatCtx->streams[audio_index]->codec->channel_layout = AV_CH_LAYOUT_STEREO; pFormatCtx->streams[audio_index]->codec->channels = 2; } av_opt_set_int(is->pSwrCtx, "in_channel_layout", pFormatCtx->streams[audio_index]->codec->channel_layout, 0); av_opt_set_int(is->pSwrCtx, "in_sample_fmt", pFormatCtx->streams[audio_index]->codec->sample_fmt, 0); av_opt_set_int(is->pSwrCtx, "in_sample_rate", pFormatCtx->streams[audio_index]->codec->sample_rate, 0); av_opt_set_int(is->pSwrCtx, "out_channel_layout", AV_CH_LAYOUT_STEREO, 0); av_opt_set_int(is->pSwrCtx, "out_sample_fmt", AV_SAMPLE_FMT_S16, 0); av_opt_set_int(is->pSwrCtx, "out_sample_rate", 44100, 0); #ifdef __LIBAVRESAMPLE__ if (avresample_open(is->pSwrCtx) < 0) { #else if (swr_init(is->pSwrCtx) < 0) { #endif fprintf(stderr, " ERROR!! From Samplert: %d Hz Sample format: %s\n", pFormatCtx->streams[audio_index]->codec->sample_rate, av_get_sample_fmt_name(pFormatCtx->streams[audio_index]->codec->sample_fmt)); fprintf(stderr, " To 44100 Sample format: s16\n"); is->audio_need_resample = 0; is->pSwrCtx = NULL;; } } #endif // main decode loop for(;;) { if(is->quit) { break; } // seek stuff goes here if(is->seek_req) { int stream_index = -1; int64_t seek_target = is->seek_pos; if(is->videoStream >= 0) { stream_index = is->videoStream; } else if(is->audioStream >= 0) { stream_index = is->audioStream; } if(stream_index >= 0) { seek_target = av_rescale_q(seek_target, AV_TIME_BASE_Q, pFormatCtx->streams[stream_index]->time_base); } if(av_seek_frame(is->pFormatCtx, stream_index, seek_target, is->seek_flags) < 0) { fprintf(stderr, "%s: error while seeking\n", is->pFormatCtx->filename); } else { if(is->audioStream >= 0) { packet_queue_flush(&is->audioq); packet_queue_put(&is->audioq, &flush_pkt); } if(is->videoStream >= 0) { packet_queue_flush(&is->videoq); packet_queue_put(&is->videoq, &flush_pkt); } } is->seek_req = 0; } if(is->audioq.size > MAX_AUDIOQ_SIZE || is->videoq.size > MAX_VIDEOQ_SIZE) { SDL_Delay(10); continue; } if(av_read_frame(is->pFormatCtx, packet) < 0) { if(is->pFormatCtx->pb->error == 0) { SDL_Delay(100); /* no error; wait for user input */ continue; } else { break; } } // Is this a packet from the video stream? if(packet->stream_index == is->videoStream) { packet_queue_put(&is->videoq, packet); } else if(packet->stream_index == is->audioStream) { packet_queue_put(&is->audioq, packet); } else { av_free_packet(packet); } } /* all done - wait for it */ while(!is->quit) { SDL_Delay(100); } fail: { SDL_Event event; event.type = FF_QUIT_EVENT; event.user.data1 = is; SDL_PushEvent(&event); } return 0; } void stream_seek(VideoState *is, int64_t pos, int rel) { if(!is->seek_req) { is->seek_pos = pos; is->seek_flags = rel < 0 ? AVSEEK_FLAG_BACKWARD : 0; is->seek_req = 1; } } int main(int argc, char *argv[]) { SDL_Event event; //double pts; VideoState *is; is = av_mallocz(sizeof(VideoState)); if(argc < 2) { fprintf(stderr, "Usage: test <file>\n"); exit(1); } // Register all formats and codecs av_register_all(); if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) { fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError()); exit(1); } // Make a screen to put our video #ifndef __DARWIN__ screen = SDL_SetVideoMode(640, 480, 0, 0); #else screen = SDL_SetVideoMode(640, 480, 24, 0); #endif if(!screen) { fprintf(stderr, "SDL: could not set video mode - exiting\n"); exit(1); } av_strlcpy(is->filename, argv[1], 1024); is->pictq_mutex = SDL_CreateMutex(); is->pictq_cond = SDL_CreateCond(); schedule_refresh(is, 40); is->av_sync_type = DEFAULT_AV_SYNC_TYPE; is->parse_tid = SDL_CreateThread(decode_thread, is); if(!is->parse_tid) { av_free(is); return -1; } av_init_packet(&flush_pkt); flush_pkt.data = (unsigned char *)"FLUSH"; for(;;) { double incr, pos; SDL_WaitEvent(&event); switch(event.type) { case SDL_KEYDOWN: switch(event.key.keysym.sym) { case SDLK_LEFT: incr = -10.0; goto do_seek; case SDLK_RIGHT: incr = 10.0; goto do_seek; case SDLK_UP: incr = 60.0; goto do_seek; case SDLK_DOWN: incr = -60.0; goto do_seek; do_seek: if(global_video_state) { pos = get_master_clock(global_video_state); pos += incr; stream_seek(global_video_state, (int64_t)(pos * AV_TIME_BASE), incr); } break; default: break; } break; case FF_QUIT_EVENT: case SDL_QUIT: is->quit = 1; /* * If the video has finished playing, then both the picture and * audio queues are waiting for more data. Make them stop * waiting and terminate normally. */ SDL_CondSignal(is->audioq.cond); SDL_CondSignal(is->videoq.cond); SDL_Quit(); exit(0); break; case FF_ALLOC_EVENT: alloc_picture(event.user.data1); break; case FF_REFRESH_EVENT: video_refresh_timer(event.user.data1); break; default: break; } } return 0; }
int decode_thread(void* arg) { // CoInitialize(NULL); CoInitializeEx(NULL, COINIT_MULTITHREADED); printf("decode_thread\n"); VideoState* is = (VideoState*) arg; AVFormatContext* pFormatCtx; AVPacket pkt1, *packet = &pkt1; int video_index = -1; int audio_index = -1; int i; global_video_state = is; if (avformat_open_input(&pFormatCtx, is->filename, NULL, NULL) != 0) { return -1; } is->pFormatCtx = pFormatCtx; if (avformat_find_stream_info(pFormatCtx, NULL) < 0) { return -1; } av_dump_format(pFormatCtx, 0, is->filename, 0); for (i = 0; i < pFormatCtx->nb_streams; i++) { if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO && video_index < 0) video_index = i; if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO && audio_index < 0) audio_index = i; } if (audio_index >= 0) stream_component_open(is, audio_index); if (video_index >= 0) stream_component_open(is, video_index); if (is->videoStream < 0 || is->audioStream < 0) { fprintf(stderr, "%s: could not open codecs\n", is->filename); goto fail; } // main decode loop for (;;) { if (is->quit) { SDL_LockMutex(is->audioq.mutex); SDL_CondSignal(is->audioq.cond); SDL_UnlockMutex(is->audioq.mutex); break; } //seek stuff goes here if (is->seek_req) { int stream_index = -1; int64_t seek_target = is->seek_pos; if (is->videoStream >= 0) stream_index = is->videoStream; else if (is->audioStream >= 0) stream_index = is->audioStream; if (stream_index >= 0) { seek_target = av_rescale_q(seek_target, AV_TIME_BASE_Q, pFormatCtx->streams[stream_index]->time_base); } if (av_seek_frame(is->pFormatCtx, stream_index, seek_target, is->seek_flags) < 0) { fprintf(stderr, "%s: error while seeking\n", is->pFormatCtx->filename); } else { if (is->audioStream >= 0) { packet_queue_flush(&is->audioq); packet_queue_put(&is->audioq, &flush_pkt, 0); } } is->seek_req = 0; } if (is->audioq.size > MAX_AUDIOQ_SIZE || is->videoq.size > MAX_AUDIOQ_SIZE) { SDL_Delay(10); continue; } if (av_read_frame(is->pFormatCtx, packet) < 0) { printf("av_read_frame\n"); fflush(stdout); if (is->pFormatCtx->pb->error == 0) { SDL_Delay(100); // no error, wait for user input continue; } else { break; } } if (packet->stream_index == is->videoStream) { packet_queue_put(&is->videoq, packet, 1); } else if (packet->stream_index == is->audioStream) { packet_queue_put(&is->audioq, packet, 0); } else { av_free_packet(packet); } } // all done - wait for it while (!is->quit) { SDL_Delay(100); } fail: if (1) { SDL_Event event; event.type = FF_QUIT_EVENT; event.user.data1 = is; SDL_PushEvent(&event); } return 0; }
/*============================================================================== * - _T_play_thread() * * - a task that reads packets from media file and dispatch them to queue */ static void _T_play_thread () { AVPacket packet; int wait_times = 0; //serial_printf("\033[H\033[0J"); /* clear screen */ av_init_packet(&_G_flush_pkt); _G_flush_pkt.data = (uint8_t *)"FLUSH"; // Main loop while (atomic_get(&_G_vs.quit) == 0) { /* hold */ if (_G_vs.hold) { delayQ_delay (10); continue; } // seek stuff goes here if (_G_vs.seek_req) { int stream_index= -1; int64_t seek_target = _G_vs.seek_pos; if (_G_vs.videoStream >= 0) { stream_index = _G_vs.videoStream; } else if(_G_vs.audioStream >= 0) { stream_index = _G_vs.audioStream; } if(stream_index>=0){ seek_target= av_rescale_q(seek_target, AV_TIME_BASE_Q, _G_vs.pFormatCtx->streams[stream_index]->time_base); } if(av_seek_frame(_G_vs.pFormatCtx, stream_index, seek_target, _G_vs.seek_flags) < 0) { serial_printf("%s: error while seeking\n", _G_vs.pFormatCtx->filename); } else { if(_G_vs.audioStream >= 0) { } if(_G_vs.videoStream >= 0) { packet_queue_flush(&_G_vs.videoq); packet_queue_put(&_G_vs.videoq, &_G_flush_pkt); } } _G_vs.seek_req = 0; } if (av_read_frame(_G_vs.pFormatCtx, &packet) >= 0) { // Is this a packet from the video stream? if(packet.stream_index == _G_vs.videoStream) { packet_queue_put(&_G_vs.videoq, &packet); } else if (packet.stream_index == _G_vs.audioStream) { av_free_packet(&packet); } else { av_free_packet(&packet); } } else { if(_G_vs.pFormatCtx->pb && _G_vs.pFormatCtx->pb->error) { goto _play_over; } else { if (wait_times >= 10) { goto _play_over; } else { wait_times++; delayQ_delay(SYS_CLK_RATE / 10); /* no error; wait for user input */ continue; } } } } // (atomic_get(&_G_vs.quit) == 0) _play_over: /* Mark this task is over */ atomic_dec (&_G_vs.alive_thread_num); serial_printf ("tPlay is over.\n"); }
int main(int argc, char *argv[]) { AVFormatContext *pFormatCtx = NULL; int i, videoStream, audioStream; AVCodecContext *pCodecCtx = NULL; AVCodec *pCodec = NULL; AVFrame *pFrame = NULL; AVPacket packet; int frameFinished; //float aspect_ratio; AVCodecContext *aCodecCtx = NULL; AVCodec *aCodec = NULL; //SDL_Overlay *bmp = NULL; //SDL_Surface *screen = NULL; SDL_Window *m_pWindow = NULL; SDL_Renderer *m_pRenderer = NULL; SDL_Rect rect; SDL_Event event; SDL_AudioSpec wanted_spec, spec; //struct SwsContext *sws_ctx = NULL; AVDictionary *videoOptionsDict = NULL; AVDictionary *audioOptionsDict = NULL; if(argc < 2) { fprintf(stderr, "Usage: test <file>\n"); exit(1); } // Register all formats and codecs av_register_all(); if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) { fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError()); exit(1); } // Open video file if(avformat_open_input(&pFormatCtx, argv[1], NULL, NULL)!=0) return -1; // Couldn't open file // Retrieve stream information if(avformat_find_stream_info(pFormatCtx, NULL)<0) return -1; // Couldn't find stream information // Dump information about file onto standard error av_dump_format(pFormatCtx, 0, argv[1], 0); // Find the first video stream videoStream=-1; audioStream=-1; for(i=0; i<pFormatCtx->nb_streams; i++) { if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO && videoStream < 0) { videoStream=i; // printf("video stream:%d",i); } if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO && audioStream < 0) { audioStream=i; // printf("audio stream:%d",i); } } // for(i=0; i<pFormatCtx->nb_streams; i++) { // if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) { // printf("video stream:%d\n",i); // } // if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO ) { // printf("audio stream:%d\n",i); // } // } if(videoStream==-1) return -1; // Didn't find a video stream if(audioStream==-1) return -1; aCodecCtx=pFormatCtx->streams[audioStream]->codec; int count = SDL_GetNumAudioDevices(0); for (int i = 0; i < count; ++i) { SDL_Log("Audio device %d: %s", i, SDL_GetAudioDeviceName(i, 0)); } // Set audio settings from codec info wanted_spec.freq = aCodecCtx->sample_rate; wanted_spec.format = AUDIO_S16SYS; wanted_spec.channels = aCodecCtx->channels; wanted_spec.silence = 0; wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE; wanted_spec.callback = audio_callback; wanted_spec.userdata = aCodecCtx; // if(SDL_OpenAudio(&wanted_spec, &spec) < 0) // { // fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError()); // return -1; // } SDL_AudioDeviceID dev; dev = SDL_OpenAudioDevice(NULL, 0, &wanted_spec, &spec, SDL_AUDIO_ALLOW_FORMAT_CHANGE); if(dev == 0) { fprintf(stderr, "Failed to open audio: %s\n", SDL_GetError()); } else { if(wanted_spec.format != spec.format){ fprintf(stderr, "We didn't get AUDIO_S16SYS audio format.\n"); return -1; } } aCodec = avcodec_find_decoder(aCodecCtx->codec_id); if(!aCodec) { fprintf(stderr, "Unsupported codec!\n"); return -1; } avcodec_open2(aCodecCtx, aCodec, &audioOptionsDict); // audio_st = pFormatCtx->streams[index] packet_queue_init(&audioq); //SDL_PauseAudio(0); SDL_PauseAudioDevice(dev,0); // Get a pointer to the codec context for the video stream pCodecCtx=pFormatCtx->streams[videoStream]->codec; // Find the decoder for the video stream pCodec=avcodec_find_decoder(pCodecCtx->codec_id); if(pCodec==NULL) { fprintf(stderr, "Unsupported codec!\n"); return -1; // Codec not found } // Open codec if(avcodec_open2(pCodecCtx, pCodec, &videoOptionsDict)<0) return -1; // Could not open codec // Allocate video frame pFrame=av_frame_alloc(); AVFrame* m_pFrameYUV = av_frame_alloc(); //int t_alloc_ret = av_image_alloc(m_pFrameYUV->data,m_pFrameYUV->linesize,pCodecCtx->width,pCodecCtx->height,AV_PIX_FMT_YUV420P,1); // int t_size0 = avpicture_get_size(AV_PIX_FMT_YUV420P, pCodecCtx->coded_width, pCodecCtx->coded_height); // int t_size1 = av_image_get_buffer_size(AV_PIX_FMT_YUV420P, pCodecCtx->coded_width, pCodecCtx->coded_height,1); //uint8_t * out_buffer = (uint8_t *)av_malloc(avpicture_get_size(AV_PIX_FMT_YUV420P, pCodecCtx->coded_width, pCodecCtx->coded_height)); uint8_t * out_buffer = (uint8_t *)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height)); //avpicture_fill((AVPicture *)m_pFrameYUV , out_buffer, AV_PIX_FMT_YUV420P, pCodecCtx->coded_width, pCodecCtx->coded_height); av_image_fill_arrays(m_pFrameYUV->data , m_pFrameYUV->linesize, out_buffer,AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height,1); struct SwsContext *img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height,pCodecCtx->sw_pix_fmt, pCodecCtx->width, pCodecCtx->height,AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL); // Make a screen to put our video //#ifndef __DARWIN__ // screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 0, 0); //#else // screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 24, 0); //#endif // // // if(!screen) { // fprintf(stderr, "SDL: could not set video mode - exiting\n"); // exit(1); // } // Allocate a place to put our YUV image on that screen // bmp = SDL_CreateYUVOverlay(pCodecCtx->width, // pCodecCtx->height, // SDL_YV12_OVERLAY, // screen); // Make a screen to put our video m_pWindow = SDL_CreateWindow("test windows", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED,pCodecCtx->width, pCodecCtx->height,SDL_WINDOW_SHOWN); if(!m_pWindow) { printf("SDL: could not create window - exiting:%s\n",SDL_GetError()); return -1; } m_pRenderer = SDL_CreateRenderer(m_pWindow, -1, 0); SDL_RenderClear(m_pRenderer); SDL_Texture *m_pSdlTexture = SDL_CreateTexture(m_pRenderer, SDL_PIXELFORMAT_IYUV, SDL_TEXTUREACCESS_STREAMING, pCodecCtx->width, pCodecCtx->height); rect.x = 0; rect.y = 0; rect.w = pCodecCtx->width; rect.h = pCodecCtx->height; // Read frames and save first five frames to disk i=0; while(av_read_frame(pFormatCtx, &packet)>=0) { // Is this a packet from the video stream? if(packet.stream_index==videoStream) { // Decode video frame avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet); // Did we get a video frame? if(frameFinished) { //SDL_LockTexture(m_pSdlTexture, &rect, m_pFrameYUV->data, m_pFrameYUV->linesize); //SDL_LockYUVOverlay(bmp); // AVPicture pict; // pict.data[0] = bmp->pixels[0]; // pict.data[1] = bmp->pixels[2]; // pict.data[2] = bmp->pixels[1]; // // pict.linesize[0] = bmp->pitches[0]; // pict.linesize[1] = bmp->pitches[2]; // pict.linesize[2] = bmp->pitches[1]; // Convert the image into YUV format that SDL uses // sws_scale // ( // sws_ctx, // (uint8_t const * const *)pFrame->data, // pFrame->linesize, // 0, // pCodecCtx->height, // pict.data, // pict.linesize // ); sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, m_pFrameYUV->data, m_pFrameYUV->linesize); //SDL_UnlockYUVOverlay(bmp); // SDL_UnlockTexture(m_pSdlTexture); SDL_UpdateYUVTexture(m_pSdlTexture, &rect, m_pFrameYUV->data[0], m_pFrameYUV->linesize[0], m_pFrameYUV->data[1], m_pFrameYUV->linesize[1], m_pFrameYUV->data[2], m_pFrameYUV->linesize[2]); // rect.x = 0; // rect.y = 0; // rect.w = pCodecCtx->width; // rect.h = pCodecCtx->height; //SDL_DisplayYUVOverlay(bmp, &rect); SDL_RenderClear( m_pRenderer );//this line seems nothing to do SDL_RenderCopy( m_pRenderer, m_pSdlTexture, NULL, &rect); SDL_RenderPresent(m_pRenderer); SDL_Delay(38); // av_free_packet(&packet); av_packet_unref(&packet); } } else if(packet.stream_index==audioStream) { packet_queue_put(&audioq, &packet); } else { // av_free_packet(&packet); av_packet_unref(&packet); } // Free the packet that was allocated by av_read_frame SDL_PollEvent(&event); switch(event.type) { case SDL_QUIT: quit = 1; SDL_Quit(); exit(0); break; default: break; } } // Free the YUV frame av_free(pFrame); // Close the codec avcodec_close(pCodecCtx); // Close the video file avformat_close_input(&pFormatCtx); return 0; }
void *decode_thread(void *arg) { //fprintf(stdout, "[FFmpeg-main thread] decode_thread created\n"); VideoState *is = (VideoState *)arg; AVFormatContext *pFormatCtx = NULL; AVPacket pkt1, *packet = &pkt1; AVIOInterruptCB callback; int video_index = -1; int i; is->videoStream=-1; // Will interrupt blocking functions ifwe quit! callback.callback = decode_interrupt_cb; callback.opaque = is; fprintf(stdout, "[FFmpeg-decode thread] Try to open I/O\n"); if(avio_open2(&is->io_context, is->filename, 0, &callback, NULL) < 0) { fprintf(stdout, "Unable to open I/O for file\n"); return NULL; } //fprintf(stdout, "avio_open2 done\n"); // Open video file fprintf(stdout, "[FFmpeg-decode thread] Try to open format context\n"); if(avformat_open_input(&pFormatCtx, is->filename, NULL, NULL)!=0) { fprintf(stdout, "Couldn't Open file\n"); return NULL; // Couldn't open file } //fprintf(stdout, "open_input done\n"); is->pFormatCtx = pFormatCtx; // Retrieve stream information fprintf(stdout, "[FFmpeg-decode thread] Try to Retrieve stream info\n"); if(avformat_find_stream_info(pFormatCtx, NULL)<0) { fprintf(stdout, "Couldn't Retrieve stream information\n"); return NULL; // Couldn't find stream information } // Dump information about file onto standard error av_dump_format(pFormatCtx, 0, is->filename, 0); // Find the first video stream for(i=0; i<pFormatCtx->nb_streams; i++) { if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO && video_index < 0) { video_index=i; break; } } if(video_index == -1) { fprintf(stdout, "Couldn't find video stream\n"); return NULL; } //fprintf(stdout, "stream component open\n"); stream_component_open(is, video_index); // main decode loop //fprintf(stdout, "[FFmpeg-decode thread] read frame\n"); for(;;) { if(is->quit) { break; } if(av_read_frame(is->pFormatCtx, packet) < 0) { if(is->pFormatCtx->pb->error == 0) continue; else break; } if(packet->stream_index == is->videoStream) { packet_queue_put(&is->videoq, packet); //fprintf(stdout, "packs ready to load: %d\n",is->videoq.nb_packets); } else av_free_packet(packet); } fprintf(stdout, "[FFmpeg-decode thread] thread terminated\n"); return NULL; }
int decode_thread(void *arg) { VideoState *is = (VideoState *)arg; AVPacket pkt1, *packet = &pkt1; AVDictionary *io_dict = NULL; AVIOInterruptCB callback; int video_index = -1; int audio_index = -1; int i; int ret; int eof = 0; is->videoStream=-1; is->audioStream=-1; AVDictionary *options = NULL; av_dict_set(&options, "icy", "1", 0); av_dict_set(&options, "user-agent", "FFmpegMediaPlayer", 0); if (is->headers) { av_dict_set(&options, "headers", is->headers, 0); } if (is->offset > 0) { is->pFormatCtx = avformat_alloc_context(); is->pFormatCtx->skip_initial_bytes = is->offset; //is->pFormatCtx->iformat = av_find_input_format("mp3"); } // will interrupt blocking functions if we quit! callback.callback = decode_interrupt_cb; callback.opaque = is; if (avio_open2(&is->io_context, is->filename, 0, &callback, &io_dict)) { fprintf(stderr, "Unable to open I/O for %s\n", is->filename); return -1; } // Open video file if(avformat_open_input(&is->pFormatCtx, is->filename, NULL, &options)!=0) return -1; // Couldn't open file // Retrieve stream information if(avformat_find_stream_info(is->pFormatCtx, NULL)<0) return -1; // Couldn't find stream information // Dump information about file onto standard error av_dump_format(is->pFormatCtx, 0, is->filename, 0); // Find the first video stream for(i=0; i<is->pFormatCtx->nb_streams; i++) { if(is->pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO && video_index < 0) { video_index=i; } if(is->pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO && audio_index < 0) { audio_index=i; } set_codec(is->pFormatCtx, i); } if(audio_index >= 0) { stream_component_open(is, audio_index); } if(video_index >= 0) { stream_component_open(is, video_index); } if(is->videoStream < 0 && is->audioStream < 0) { //if(is->videoStream < 0 || is->audioStream < 0) { fprintf(stderr, "%s: could not open codecs\n", is->filename); notify(is, MEDIA_ERROR, 0, 0); return 0; } set_rotation(is->pFormatCtx, is->audio_st, is->video_st); set_framerate(is->pFormatCtx, is->audio_st, is->video_st); set_filesize(is->pFormatCtx); set_chapter_count(is->pFormatCtx); // main decode loop for(;;) { if(is->quit) { break; } /*if (is->paused != is->last_paused) { is->last_paused = is->paused; if (is->paused) is->read_pause_return = av_read_pause(is->pFormatCtx); else av_read_play(is->pFormatCtx); }*/ // seek stuff goes here if(is->seek_req) { int64_t seek_target = is->seek_pos; int64_t seek_min = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN; int64_t seek_max = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX; int ret = avformat_seek_file(is->pFormatCtx, -1, seek_min, seek_target, seek_max, is->seek_flags); if(ret < 0) { fprintf(stderr, "%s: error while seeking\n", is->pFormatCtx->filename); } else { if(is->audioStream >= 0) { packet_queue_flush(&is->audioq); packet_queue_put(is, &is->audioq, &is->flush_pkt); } if(is->videoStream >= 0) { packet_queue_flush(&is->videoq); packet_queue_put(is, &is->videoq, &is->flush_pkt); } notify(is, MEDIA_SEEK_COMPLETE, 0, 0); } is->seek_req = 0; eof = 0; } if (is->audioq.size >= MAX_AUDIOQ_SIZE && !is->prepared) { queueAudioSamples(&is->audio_player, is); notify(is, MEDIA_PREPARED, 0, 0); is->prepared = 1; } if(is->audioq.size > MAX_AUDIOQ_SIZE || is->videoq.size > MAX_VIDEOQ_SIZE) { SDL_Delay(10); continue; } if((ret = av_read_frame(is->pFormatCtx, packet)) < 0) { if (ret == AVERROR_EOF || !is->pFormatCtx->pb->eof_reached) { eof = 1; break; } if(is->pFormatCtx->pb->error == 0) { SDL_Delay(100); /* no error; wait for user input */ continue; } else { break; } } // Is this a packet from the video stream? if(packet->stream_index == is->videoStream) { packet_queue_put(is, &is->videoq, packet); } else if(packet->stream_index == is->audioStream) { packet_queue_put(is, &is->audioq, packet); } else { av_free_packet(packet); } if (eof) { break; } } if (eof) { notify(is, MEDIA_PLAYBACK_COMPLETE, 0, 0); } return 0; }
int main(int argc, char* argv[]) { int i, videoStream, audioStream; VideoState *is; is = av_mallocz(sizeof(VideoState)); if(argc < 2) { fprintf(stderr, "Usage: test <file>\n"); exit(1); } if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) { fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError()); exit(1); } av_register_all(); AVFormatContext *pFormatCtx = NULL; av_strlcpy(is->filename, argv[1], sizeof(is->filename)); is->pictq_mutex = SDL_CreateMutex(); is->pictq_cond = SDL_CreateCond(); schedule_refresh(is, 40); is->parse_tid = SDL_CreateThread(decode_thread, is); if(!is->parse_tid) { av_free(is); return -1; } // Open video file if(avformat_open_input(&pFormatCtx, argv[1], NULL, NULL) != 0) { return -1; // Couldn't open file } // Retrive stream information if(avformat_find_stream_info(pFormatCtx, NULL) < 0) { return -1; //Couldn't find stream information } // Dump information about file onto standard error av_dump_format(pFormatCtx, 0, argv[1], 0); AVCodecContext *pCodecCtxOrig = NULL; AVCodecContext *pCodecCtx = NULL; AVCodecContext *aCodecCtxOrig = NULL; AVCodecContext *aCodecCtx = NULL; // Find the first video stream videoStream = -1; audioStream = -1; for(i=0; i < pFormatCtx->nb_streams; i++) { if(pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO && videoStream < 0) { videoStream = i; } } for(i=0; i < pFormatCtx->nb_streams; i++) { if(pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO && audioStream < 0) { audioStream = i; } } if(videoStream == -1) { return -1; // Didn't find a video stream } if(audioStream == -1) { return -1; } // Get a pointer to the codec context for the video stream pCodecCtxOrig = pFormatCtx->streams[videoStream]->codec; aCodecCtxOrig = pFormatCtx->streams[audioStream]->codec; AVCodec *pCodec = NULL; AVCodec *aCodec = NULL; //Find the decoder for the video stream pCodec = avcodec_find_decoder(pCodecCtxOrig->codec_id); if(pCodec == NULL) { return -1; } aCodec = avcodec_find_decoder(aCodecCtxOrig->codec_id); if(aCodec == NULL) { return -1; } // Copy context pCodecCtx = avcodec_alloc_context3(pCodec); if(avcodec_copy_context(pCodecCtx, pCodecCtxOrig) != 0) { return -1; } aCodecCtx = avcodec_alloc_context3(aCodec); if(avcodec_copy_context(aCodecCtx, aCodecCtxOrig) != 0) { return -1; } SDL_AudioSpec wanted_spec, spec; wanted_spec.freq = aCodecCtx->sample_rate; wanted_spec.format = AUDIO_S16SYS; wanted_spec.channels = aCodecCtx->channels; wanted_spec.silence = 0; wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE; wanted_spec.callback = audio_callback; wanted_spec.userdata = aCodecCtx; if (SDL_OpenAudio(&wanted_spec, &spec) < 0) { fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError()); return -1; } // Open codec AVDictionary *optionDict = NULL; if(avcodec_open2(pCodecCtx, pCodec, &optionDict) < 0) { return -1; } if(avcodec_open2(aCodecCtx, aCodec, NULL) < 0) { return -1; } packet_queue_init(&audioq); SDL_PauseAudio(0); // Allocate video frame AVFrame *pFrame = NULL; pFrame = av_frame_alloc(); SDL_Surface *screen; screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 0, 0); if(!screen) { fprintf(stderr, "SDL: could not set video mode - exiting\n"); exit(1); } SDL_Overlay *bmp = NULL; bmp = SDL_CreateYUVOverlay(pCodecCtx->width, pCodecCtx->height, SDL_YV12_OVERLAY, screen); printf("[loop]==========================\n"); struct SwsContext *sws_ctx = NULL; int frameFinished; AVPacket packet; //initialize SWS context for software scaling sws_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_YUV420P, SWS_BILINEAR, NULL, NULL, NULL); // Read frame and display i = 0; while(av_read_frame(pFormatCtx, &packet) >= 0) { if(packet.stream_index == videoStream) { //Decode video frame avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet); // Did we get a video frame? if(frameFinished) { SDL_LockYUVOverlay(bmp); AVPicture pict; pict.data[0] = bmp->pixels[0]; pict.data[1] = bmp->pixels[2]; pict.data[2] = bmp->pixels[1]; pict.linesize[0] = bmp->pitches[0]; pict.linesize[1] = bmp->pitches[2]; pict.linesize[2] = bmp->pitches[1]; // Convert the image into YUV format that SDL uses sws_scale(sws_ctx, (uint8_t const * const *)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pict.data, pict.linesize); SDL_UnlockYUVOverlay(bmp); SDL_Rect rect; rect.x = 0; rect.y = 0; rect.w = pCodecCtx->width; rect.h = pCodecCtx->height; SDL_DisplayYUVOverlay(bmp, &rect); av_free_packet(&packet); } } else if (packet.stream_index == audioStream) { packet_queue_put(&audioq, &packet); } else { // Free the packet that was allocated by av_read_frame av_free_packet(&packet); } SDL_Event event; SDL_PollEvent(&event); switch(event.type) { case SDL_QUIT: quit = 1; SDL_Quit(); exit(0); break; default: break; } } // Free the YUV frame av_free(pFrame); // Close the codec avcodec_close(pCodecCtx); avcodec_close(pCodecCtxOrig); // Close the video file avformat_close_input(&pFormatCtx); return 0; }
int decode_thread(void *arg) { VideoState *is = (VideoState *)arg; AVFormatContext *pFormatCtx = avformat_alloc_context (); AVPacket pkt1, *packet = &pkt1; int video_index = -1; int audio_index = -1; int i; is->videoStream=-1; is->audioStream=-1; is->quit = 0; Ogre::DataStreamPtr stream = Ogre::ResourceGroupManager::getSingleton ().openResource (is->resourceName); if(stream.isNull ()) throw std::runtime_error("Failed to open video resource"); is->stream = stream; AVIOContext *ioContext = 0; ioContext = avio_alloc_context(NULL, 0, 0, is, OgreResource_Read, OgreResource_Write, OgreResource_Seek); if (!ioContext) throw std::runtime_error("Failed to allocate ioContext "); pFormatCtx->pb = ioContext; global_video_state = is; // will interrupt blocking functions if we quit! //url_set_interrupt_cb(decode_interrupt_cb); // Open video file /// \todo leak here, ffmpeg or valgrind bug ? if (avformat_open_input(&pFormatCtx, is->resourceName.c_str(), NULL, NULL)) throw std::runtime_error("Failed to open video input"); // Retrieve stream information if(avformat_find_stream_info(pFormatCtx, NULL)<0) throw std::runtime_error("Failed to retrieve stream information"); // Dump information about file onto standard error av_dump_format(pFormatCtx, 0, is->resourceName.c_str(), 0); for(i=0; i < (int)pFormatCtx->nb_streams; i++) { if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO && video_index < 0) { video_index=i; } if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO && audio_index < 0) { audio_index=i; } } if(audio_index >= 0) { stream_component_open(is, audio_index, pFormatCtx); } if(video_index >= 0) { stream_component_open(is, video_index, pFormatCtx); } if(is->videoStream >= 0 /*|| is->audioStream < 0*/) { // main decode loop for(;;) { if(is->quit) { break; } if( (is->audioStream >= 0 && is->audioq.size > MAX_AUDIOQ_SIZE) || is->videoq.size > MAX_VIDEOQ_SIZE) { boost::this_thread::sleep(boost::posix_time::milliseconds(10)); continue; } if(av_read_frame(pFormatCtx, packet) < 0) { break; } // Is this a packet from the video stream? if(packet->stream_index == is->videoStream) { packet_queue_put(&is->videoq, packet); } else if(packet->stream_index == is->audioStream) { packet_queue_put(&is->audioq, packet); } else { av_free_packet(packet); } } /* all done - wait for it */ while(!is->quit) { // EOF reached, all packets processed, we can exit now if (is->audioq.nb_packets == 0 && is->videoq.nb_packets == 0) break; boost::this_thread::sleep(boost::posix_time::milliseconds(100)); } } is->quit = 1; is->audioq.cond.notify_one(); is->videoq.cond.notify_one(); if (is->video_thread.joinable()) is->video_thread.join(); if (is->audioStream >= 0) avcodec_close(is->audio_st->codec); if (is->videoStream >= 0) avcodec_close(is->video_st->codec); sws_freeContext (is->sws_context); avformat_close_input(&pFormatCtx); pFormatCtx = NULL; av_free(ioContext); return 0; }
int main(int argc, char *argv[]) { AVFormatContext *pFormatCtx; int i, videoStream, audioStream; AVCodecContext *pCodecCtx; AVCodec *pCodec; AVFrame *pFrame; AVPacket packet; int frameFinished; float aspect_ratio; struct SwsContext *img_convert_ctx; AVCodecContext *aCodecCtx; AVCodec *aCodec; SDL_Overlay *bmp; SDL_Surface *screen; SDL_Rect rect; SDL_Event event; SDL_AudioSpec wanted_spec, spec; if(argc < 2) { fprintf(stderr, "Usage: test <file>\n"); exit(1); } // Register all formats and codecs av_register_all(); if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) { fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError()); exit(1); } // Open video file if(av_open_input_file(&pFormatCtx, argv[1], NULL, 0, NULL)!=0) return -1; // Couldn't open file // Retrieve stream information if(av_find_stream_info(pFormatCtx)<0) return -1; // Couldn't find stream information // Dump information about file onto standard error dump_format(pFormatCtx, 0, argv[1], 0); // Find the first video stream videoStream=-1; audioStream=-1; for(i=0; i<pFormatCtx->nb_streams; i++) { if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_VIDEO && videoStream < 0) { videoStream=i; } if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_AUDIO && audioStream < 0) { audioStream=i; } } if(videoStream==-1) return -1; // Didn't find a video stream if(audioStream==-1) return -1; aCodecCtx=pFormatCtx->streams[audioStream]->codec; // Set audio settings from codec info wanted_spec.freq = aCodecCtx->sample_rate; wanted_spec.format = AUDIO_S16SYS; wanted_spec.channels = aCodecCtx->channels; wanted_spec.silence = 0; wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE; wanted_spec.callback = audio_callback; wanted_spec.userdata = aCodecCtx; if(SDL_OpenAudio(&wanted_spec, &spec) < 0) { fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError()); return -1; } aCodec = avcodec_find_decoder(aCodecCtx->codec_id); if(!aCodec) { fprintf(stderr, "Unsupported codec!\n"); return -1; } if (avcodec_open(aCodecCtx, aCodec) < 0) { fprintf(stderr, "Cannot open audio codec!\n"); return -1; } // audio_st = pFormatCtx->streams[index] packet_queue_init(&audioq); SDL_PauseAudio(0); // Get a pointer to the codec context for the video stream pCodecCtx=pFormatCtx->streams[videoStream]->codec; // Find the decoder for the video stream pCodec=avcodec_find_decoder(pCodecCtx->codec_id); if(pCodec==NULL) { fprintf(stderr, "Unsupported codec!\n"); return -1; // Codec not found } // Open codec if(avcodec_open(pCodecCtx, pCodec)<0) { fprintf(stderr, "Cannot open video codec!\n"); return -1; // Could not open codec } // construct the scale context, conversing to PIX_FMT_YUV420P img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height,pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);// other codes if (img_convert_ctx == NULL) { fprintf(stderr, "Cannot initialize the conversion context!\n"); return -1; } // Allocate video frame pFrame=avcodec_alloc_frame(); // Make a screen to put our video #ifndef __DARWIN__ screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 0, 0); #else screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 24, 0); #endif if(!screen) { fprintf(stderr, "SDL: could not set video mode - exiting\n"); exit(1); } // Allocate a place to put our YUV image on that screen bmp = SDL_CreateYUVOverlay(pCodecCtx->width, pCodecCtx->height, SDL_YV12_OVERLAY, screen); // Read frames and save first five frames to disk i=0; while(av_read_frame(pFormatCtx, &packet)>=0) { // Is this a packet from the video stream? if(packet.stream_index==videoStream) { // Decode video frame avcodec_decode_video(pCodecCtx, pFrame, &frameFinished, packet.data, packet.size); // Did we get a video frame? if(frameFinished) { SDL_LockYUVOverlay(bmp); AVPicture pict; pict.data[0] = bmp->pixels[0]; pict.data[1] = bmp->pixels[2]; pict.data[2] = bmp->pixels[1]; pict.linesize[0] = bmp->pitches[0]; pict.linesize[1] = bmp->pitches[2]; pict.linesize[2] = bmp->pitches[1]; // Convert the image into YUV format that SDL uses /* img_convert(&pict, PIX_FMT_YUV420P, (AVPicture *)pFrame, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height); */ sws_scale(img_convert_ctx, (const uint8_t * const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pict.data, pict.linesize); SDL_UnlockYUVOverlay(bmp); rect.x = 0; rect.y = 0; rect.w = pCodecCtx->width; rect.h = pCodecCtx->height; SDL_DisplayYUVOverlay(bmp, &rect); av_free_packet(&packet); } } else if(packet.stream_index==audioStream) { packet_queue_put(&audioq, &packet); } else { av_free_packet(&packet); } // Free the packet that was allocated by av_read_frame SDL_PollEvent(&event); switch(event.type) { case SDL_QUIT: quit = 1; SDL_Quit(); exit(0); break; default: break; } } sws_freeContext(img_convert_ctx); // Free the YUV frame av_free(pFrame); // Close the codec avcodec_close(pCodecCtx); // Close the video file av_close_input_file(pFormatCtx); return 0; }
int decode_thread(void *arg) { VideoState *is = (VideoState *)arg; AVFormatContext *pFormatCtx = NULL; AVPacket pkt1, *packet = &pkt1; AVDictionary *io_dict = NULL; AVIOInterruptCB callback; int video_index = -1; int audio_index = -1; int i; is->videoStream = -1; is->audioStream = -1; is->audio_need_resample = 0; global_video_state = is; // will interrupt blocking functions if we quit! callback.callback = decode_interrupt_cb; callback.opaque = is; if(avio_open2(&is->io_context, is->filename, 0, &callback, &io_dict)) { fprintf(stderr, "Unable to open I/O for %s\n", is->filename); return -1; } // Open video file if(avformat_open_input(&pFormatCtx, is->filename, NULL, NULL) != 0) { return -1; // Couldn't open file } is->pFormatCtx = pFormatCtx; // Retrieve stream information if(avformat_find_stream_info(pFormatCtx, NULL) < 0) { return -1; // Couldn't find stream information } // Dump information about file onto standard error av_dump_format(pFormatCtx, 0, is->filename, 0); // Find the first video stream for(i = 0; i < pFormatCtx->nb_streams; i++) { if(pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO && video_index < 0) { video_index = i; } if(pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO && audio_index < 0) { audio_index = i; } } if(audio_index >= 0) { stream_component_open(is, audio_index); } if(video_index >= 0) { stream_component_open(is, video_index); } if(is->videoStream < 0 && is->audioStream < 0) { fprintf(stderr, "%s: could not open codecs\n", is->filename); goto fail; } #ifdef __RESAMPLER__ if( audio_index >= 0 && pFormatCtx->streams[audio_index]->codec->sample_fmt != AV_SAMPLE_FMT_S16) { is->audio_need_resample = 1; is->pResampledOut = NULL; is->pSwrCtx = NULL; printf("Configure resampler: "); #ifdef __LIBAVRESAMPLE__ printf("libAvResample\n"); is->pSwrCtx = avresample_alloc_context(); #endif #ifdef __LIBSWRESAMPLE__ printf("libSwResample\n"); is->pSwrCtx = swr_alloc(); #endif // Some MP3/WAV don't tell this so make assumtion that // They are stereo not 5.1 if (pFormatCtx->streams[audio_index]->codec->channel_layout == 0 && pFormatCtx->streams[audio_index]->codec->channels == 2) { pFormatCtx->streams[audio_index]->codec->channel_layout = AV_CH_LAYOUT_STEREO; } else if (pFormatCtx->streams[audio_index]->codec->channel_layout == 0 && pFormatCtx->streams[audio_index]->codec->channels == 1) { pFormatCtx->streams[audio_index]->codec->channel_layout = AV_CH_LAYOUT_MONO; } else if (pFormatCtx->streams[audio_index]->codec->channel_layout == 0 && pFormatCtx->streams[audio_index]->codec->channels == 0) { pFormatCtx->streams[audio_index]->codec->channel_layout = AV_CH_LAYOUT_STEREO; pFormatCtx->streams[audio_index]->codec->channels = 2; } av_opt_set_int(is->pSwrCtx, "in_channel_layout", pFormatCtx->streams[audio_index]->codec->channel_layout, 0); av_opt_set_int(is->pSwrCtx, "in_sample_fmt", pFormatCtx->streams[audio_index]->codec->sample_fmt, 0); av_opt_set_int(is->pSwrCtx, "in_sample_rate", pFormatCtx->streams[audio_index]->codec->sample_rate, 0); av_opt_set_int(is->pSwrCtx, "out_channel_layout", AV_CH_LAYOUT_STEREO, 0); av_opt_set_int(is->pSwrCtx, "out_sample_fmt", AV_SAMPLE_FMT_S16, 0); av_opt_set_int(is->pSwrCtx, "out_sample_rate", 44100, 0); #ifdef __LIBAVRESAMPLE__ if (avresample_open(is->pSwrCtx) < 0) { #else if (swr_init(is->pSwrCtx) < 0) { #endif fprintf(stderr, " ERROR!! From Samplert: %d Hz Sample format: %s\n", pFormatCtx->streams[audio_index]->codec->sample_rate, av_get_sample_fmt_name(pFormatCtx->streams[audio_index]->codec->sample_fmt)); fprintf(stderr, " To 44100 Sample format: s16\n"); is->audio_need_resample = 0; is->pSwrCtx = NULL;; } } #endif // main decode loop for(;;) { if(is->quit) { break; } // seek stuff goes here if(is->audioq.size > MAX_AUDIOQ_SIZE || is->videoq.size > MAX_VIDEOQ_SIZE) { SDL_Delay(10); continue; } if(av_read_frame(is->pFormatCtx, packet) < 0) { if(is->pFormatCtx->pb->error == 0) { SDL_Delay(100); /* no error; wait for user input */ continue; } else { break; } } // Is this a packet from the video stream? if(packet->stream_index == is->videoStream) { packet_queue_put(&is->videoq, packet); } else if(packet->stream_index == is->audioStream) { packet_queue_put(&is->audioq, packet); } else { av_free_packet(packet); } } /* all done - wait for it */ while(!is->quit) { SDL_Delay(100); } fail: { SDL_Event event; event.type = FF_QUIT_EVENT; event.user.data1 = is; SDL_PushEvent(&event); } return 0; } const char g_szClassName[] = "myWindowClass"; // Step 4: the Window Procedure LRESULT CALLBACK WndProc(HWND hwnd, UINT msg, WPARAM wParam, LPARAM lParam) { switch(msg) { case WM_CLOSE: DestroyWindow(hwnd); break; case WM_DESTROY: PostQuitMessage(0); break; default: return DefWindowProc(hwnd, msg, wParam, lParam); } return 0; } sem_t mutex; HWND hwnd; static void * window_manager(void * data) { printf("Window manager has been started!\n"); WNDCLASSEX wc; MSG Msg; //Step 1: Registering the Window Class wc.cbSize = sizeof(WNDCLASSEX); wc.style = 0; wc.lpfnWndProc = WndProc; wc.cbClsExtra = 0; wc.cbWndExtra = 0; //wc.hInstance = hInstance; wc.hIcon = LoadIcon(NULL, IDI_APPLICATION); wc.hCursor = LoadCursor(NULL, IDC_ARROW); wc.hbrBackground = (HBRUSH)(COLOR_WINDOW+1); wc.lpszMenuName = NULL; wc.lpszClassName = g_szClassName; wc.hIconSm = LoadIcon(NULL, IDI_APPLICATION); if(!RegisterClassEx(&wc)) { MessageBox(NULL, "Window Registration Failed!", "Error!", MB_ICONEXCLAMATION | MB_OK); return 0; } // Step 2: Creating the Window hwnd = CreateWindowEx( WS_EX_CLIENTEDGE, g_szClassName, "The title of my window", WS_OVERLAPPEDWINDOW, CW_USEDEFAULT, CW_USEDEFAULT, 240, 120, NULL, NULL, NULL, NULL); sem_post(&mutex); if(hwnd == NULL) { MessageBox(NULL, "Window Creation Failed!", "Error!", MB_ICONEXCLAMATION | MB_OK); return 0; } else { printf("window hwnd = %d\n", hwnd); } ShowWindow(hwnd, TRUE); UpdateWindow(hwnd); // Step 3: The Message Loop while(GetMessage(&Msg, NULL, 0, 0) > 0) { TranslateMessage(&Msg); DispatchMessage(&Msg); } return Msg.wParam; }