int main (int argc, char *argv[]) { av_register_all(); // registed all file format AVFormatContext *pFormatCtx; // to define the structure *point of which being used // Open media(video/audio whatever) file if (avformat_open_input(&pFormatCtx, argv[1], NULL, NULL)!=0){//changed av_open_input_file into the new avformat_open_input, thus no use for the past argument int buf_size inside av_open_input_file. return -1; // couldn't open file avformat_input_file(&pFormatCtx);//av_close_input_file has be deprecated } // Retrieve stream information if (avformat_find_stream_info(pFormatCtx,NULL)<0)//also use the new one instead of int av_find_stream_info and set NULL for the AVDictionary **options. But notice that, it could be nb_streams. return -1; // couldn't find stream information // Dump information about file onto standard error av_dump_format (pFormatCtx, 0, argv[1], 0);//use the new av_dump_format instead of dump_format // Find the first sound stream int i; AVCodecContext *aCodecCtx; int audioStream=-1; for (i=0;i<pFormatCtx->nb_streams;i++) if (pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO && audioStream < 0) //it's new one but not CODEC_TYPE_AUDIO { audioStream=i; break; } if(audioStream==-1) return -1; //didn't find a audio stream // Get a pointer to the codec context for the video stream aCodecCtx=pFormatCtx->streams[audioStream]->codec; // printf ("%d\n",audioStream); //for test SDL_AudioSpec *wanted_spec; SDL_AudioSpec *spec; if(SDL_OpenAudio(&wanted_spec, &spec)<0){ fprintf(stderr,"SDL_OpenAudio:%s\n",SDL_GetError()); return -1; } AVCodec *aCodec; aCodec=avcodec_find_decoder(aCodecCtx->codec_id); if(!aCodec){ fprintf(stderr,"Unsupported codec!\n"); return -1; } avcodec_open2(aCodecCtx,aCodec,NULL);//use the new 2 version instead of the original version packet_queue_init(&audioq); SDL_PauseAudio(0); AVPacket packet; while (av_read_frame(pFormatCtx, &packet)>=0){ // if(packet.stream_index==videoStream){ //for there's not concern with the viedoStream so no use for the determination /* } else*/ if (packet.stream_index==audioStream){ packet_queue_put(&audioq, &packet); } else { av_free_packet(&packet); } } /* SDL_Event event; static VideoState *global_video_state; static int decode_interrupt_cb(void *ctx){ return global_video_state && global_video_state->abort_request; } // url_set_interrupt_cb(decode_interrupt_cb);//undefined in the current libavformat/avio.h version. Use above instead. SDL_PollEvent(&event); switch(event.type){ case SDL_QUIT: quit = 1; }*/ }
int stream_component_open(VideoState *is, int stream_index) { AVFormatContext *pFormatCtx = is->pFormatCtx; AVCodecContext *codecCtx = NULL; AVCodec *codec = NULL; SDL_AudioSpec wanted_spec, spec; if (stream_index < 0 || stream_index >= pFormatCtx->nb_streams) { return -1; } codec = avcodec_find_decoder(pFormatCtx->streams[stream_index]->codec->codec_id); if (!codec) { fprintf(stderr, "Unsupported codec!\n"); return -1; } codecCtx = avcodec_alloc_context3(codec); if (avcodec_copy_context(codecCtx, pFormatCtx->streams[stream_index]->codec) != 0) { fprintf(stderr, "Couldn't copy codec context"); return -1; // Error copying codec context } if (codecCtx->codec_type == AVMEDIA_TYPE_AUDIO) { // Set audio settings from codec info wanted_spec.freq = codecCtx->sample_rate; wanted_spec.format = AUDIO_S16SYS; wanted_spec.channels = codecCtx->channels; wanted_spec.silence = 0; wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE; wanted_spec.callback = audio_callback; wanted_spec.userdata = is; if (SDL_OpenAudio(&wanted_spec, &spec) < 0) { fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError()); return -1; } is->audio_hw_buf_size = spec.size; } if (avcodec_open2(codecCtx, codec, NULL) < 0) { fprintf(stderr, "Unsupported codec!\n"); return -1; } switch (codecCtx->codec_type) { case AVMEDIA_TYPE_AUDIO: is->audioStream = stream_index; is->audio_st = pFormatCtx->streams[stream_index]; is->audio_ctx = codecCtx; is->audio_buf_size = 0; is->audio_buf_index = 0; memset(&is->audio_pkt, 0, sizeof(is->audio_pkt)); packet_queue_init(&is->audioq); // 需要先把解出来的 raw audio 转换成 SDL 需要的格式 // 根据 raw audio 的格式 和 SDL 的格式设置 swr_ctx is->swr_ctx = swr_alloc_set_opts(NULL, AV_CH_LAYOUT_STEREO, AV_SAMPLE_FMT_S16, is->audio_ctx->sample_rate, av_get_default_channel_layout(is->audio_ctx->channels), is->audio_ctx->sample_fmt, is->audio_ctx->sample_rate, 0, NULL); //初始化 swr_ctx swr_init(is->swr_ctx); SDL_PauseAudio(0); break; case AVMEDIA_TYPE_VIDEO: is->videoStream = stream_index; is->video_st = pFormatCtx->streams[stream_index]; is->video_ctx = codecCtx; is->frame_timer = (double)av_gettime() / 1000000.0; is->frame_last_delay = 40e-3; packet_queue_init(&is->videoq); is->video_tid = SDL_CreateThread(video_thread, is); is->sws_ctx = sws_getContext(is->video_ctx->width, is->video_ctx->height, is->video_ctx->pix_fmt, is->video_ctx->width, is->video_ctx->height, PIX_FMT_YUV420P, SWS_BILINEAR, NULL, NULL, NULL ); break; default: break; } }
int stream_component_open(VideoState *is, int stream_index) { AVFormatContext *pFormatCtx = is->pFormatCtx; AVCodecContext *codecCtx = NULL; AVCodec *codec = NULL; AVDictionary *optionsDict = NULL; SDL_AudioSpec wanted_spec, spec; if(stream_index < 0 || stream_index >= pFormatCtx->nb_streams) { return -1; } // Get a pointer to the codec context for the video stream codecCtx = pFormatCtx->streams[stream_index]->codec; if(codecCtx->codec_type == AVMEDIA_TYPE_AUDIO) { // Set audio settings from codec info wanted_spec.freq = codecCtx->sample_rate; wanted_spec.format = AUDIO_S16SYS; wanted_spec.channels = codecCtx->channels; wanted_spec.silence = 0; wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE; wanted_spec.callback = audio_callback; wanted_spec.userdata = is; if(SDL_OpenAudio(&wanted_spec, &spec) < 0) { fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError()); return -1; } is->audio_hw_buf_size = spec.size; } codec = avcodec_find_decoder(codecCtx->codec_id); if(!codec || (avcodec_open2(codecCtx, codec, &optionsDict) < 0)) { fprintf(stderr, "Unsupported codec!\n"); return -1; } switch(codecCtx->codec_type) { case AVMEDIA_TYPE_AUDIO: is->audioStream = stream_index; is->audio_st = pFormatCtx->streams[stream_index]; is->audio_buf_size = 0; is->audio_buf_index = 0; /* averaging filter for audio sync */ is->audio_diff_avg_coef = exp(log(0.01 / AUDIO_DIFF_AVG_NB)); is->audio_diff_avg_count = 0; /* Correct audio only if larger error than this */ is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / codecCtx->sample_rate; memset(&is->audio_pkt, 0, sizeof(is->audio_pkt)); packet_queue_init(&is->audioq); SDL_PauseAudio(0); break; case AVMEDIA_TYPE_VIDEO: is->videoStream = stream_index; is->video_st = pFormatCtx->streams[stream_index]; is->frame_timer = (double)av_gettime() / 1000000.0; is->frame_last_delay = 40e-3; is->video_current_pts_time = av_gettime(); packet_queue_init(&is->videoq); is->video_tid = SDL_CreateThread(video_thread, is); is->sws_ctx = sws_getContext ( is->video_st->codec->width, is->video_st->codec->height, is->video_st->codec->pix_fmt, is->video_st->codec->width, is->video_st->codec->height, PIX_FMT_YUV420P, SWS_BILINEAR, NULL, NULL, NULL ); codecCtx->get_buffer2 = our_get_buffer; codecCtx->release_buffer = our_release_buffer; break; default: break; } return 0; }
int stream_component_open(VideoState *is, int stream_index) { AVFormatContext *pFormatCtx = is->pFormatCtx; AVCodecContext *codecCtx = NULL; AVCodec *codec = NULL; AVDictionary *optionsDict = NULL; SDL_AudioSpec wanted_spec, spec; if(stream_index < 0 || stream_index >= pFormatCtx->nb_streams) { return -1; } // Get a pointer to the codec context for the video stream codecCtx = pFormatCtx->streams[stream_index]->codec; if(codecCtx->codec_type == AVMEDIA_TYPE_AUDIO) { // Set audio settings from codec info wanted_spec.freq = codecCtx->sample_rate; wanted_spec.format = AUDIO_S16SYS; wanted_spec.channels = codecCtx->channels; wanted_spec.silence = 0; wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE; wanted_spec.callback = audio_callback; wanted_spec.userdata = is; if(SDL_OpenAudio(&wanted_spec, &spec) < 0) { fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError()); return -1; } } codec = avcodec_find_decoder(codecCtx->codec_id); if(!codec || (avcodec_open2(codecCtx, codec, &optionsDict) < 0)) { fprintf(stderr, "Unsupported codec!\n"); return -1; } switch(codecCtx->codec_type) { case AVMEDIA_TYPE_AUDIO: is->audioStream = stream_index; is->audio_st = pFormatCtx->streams[stream_index]; is->audio_buf_size = 0; is->audio_buf_index = 0; memset(&is->audio_pkt, 0, sizeof(is->audio_pkt)); packet_queue_init(&is->audioq); SDL_PauseAudio(0); break; case AVMEDIA_TYPE_VIDEO: is->videoStream = stream_index; is->video_st = pFormatCtx->streams[stream_index]; packet_queue_init(&is->videoq); is->video_tid = SDL_CreateThread(video_thread, is); is->sws_ctx = sws_getContext ( is->video_st->codec->width, is->video_st->codec->height, is->video_st->codec->pix_fmt, is->video_st->codec->width, is->video_st->codec->height, PIX_FMT_YUV420P, SWS_BILINEAR, NULL, NULL, NULL ); break; default: break; } return 0; }
int main(int argc, char *argv[]) { AVFormatContext *pFormatCtx = NULL; int i, videoStream, audioStream; AVCodecContext *pCodecCtx = NULL; AVCodec *pCodec = NULL; AVFrame *pFrame = NULL; AVPacket packet; int frameFinished; //float aspect_ratio; AVCodecContext *aCodecCtx = NULL; AVCodec *aCodec = NULL; SDL_Overlay *bmp = NULL; SDL_Surface *screen = NULL; SDL_Rect rect; SDL_Event event; SDL_AudioSpec wanted_spec, spec; struct SwsContext *sws_ctx = NULL; AVDictionary *videoOptionsDict = NULL; AVDictionary *audioOptionsDict = NULL; if(argc < 2) { fprintf(stderr, "Usage: test <file>\n"); exit(1); } // Register all formats and codecs av_register_all(); if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) { fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError()); exit(1); } // Open video file if(avformat_open_input(&pFormatCtx, argv[1], NULL, NULL)!=0) return -1; // Couldn't open file // Retrieve stream information if(avformat_find_stream_info(pFormatCtx, NULL)<0) return -1; // Couldn't find stream information // Dump information about file onto standard error av_dump_format(pFormatCtx, 0, argv[1], 0); // Find the first video stream videoStream=-1; audioStream=-1; for(i=0; i<pFormatCtx->nb_streams; i++) { if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO && videoStream < 0) { videoStream=i; } if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO && audioStream < 0) { audioStream=i; } } if(videoStream==-1) return -1; // Didn't find a video stream if(audioStream==-1) return -1; aCodecCtx=pFormatCtx->streams[audioStream]->codec; // Set audio settings from codec info wanted_spec.freq = aCodecCtx->sample_rate; wanted_spec.format = AUDIO_S16SYS; wanted_spec.channels = aCodecCtx->channels; wanted_spec.silence = 0; wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE; wanted_spec.callback = audio_callback; wanted_spec.userdata = aCodecCtx; if(SDL_OpenAudio(&wanted_spec, &spec) < 0) { fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError()); return -1; } aCodec = avcodec_find_decoder(aCodecCtx->codec_id); if(!aCodec) { fprintf(stderr, "Unsupported codec!\n"); return -1; } avcodec_open2(aCodecCtx, aCodec, &audioOptionsDict); // audio_st = pFormatCtx->streams[index] packet_queue_init(&audioq); SDL_PauseAudio(0); // Get a pointer to the codec context for the video stream pCodecCtx=pFormatCtx->streams[videoStream]->codec; // Find the decoder for the video stream pCodec=avcodec_find_decoder(pCodecCtx->codec_id); if(pCodec==NULL) { fprintf(stderr, "Unsupported codec!\n"); return -1; // Codec not found } // Open codec if(avcodec_open2(pCodecCtx, pCodec, &videoOptionsDict)<0) return -1; // Could not open codec // Allocate video frame pFrame=av_frame_alloc(); // Make a screen to put our video #ifndef __DARWIN__ screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 0, 0); #else screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 24, 0); #endif if(!screen) { fprintf(stderr, "SDL: could not set video mode - exiting\n"); exit(1); } // Allocate a place to put our YUV image on that screen bmp = SDL_CreateYUVOverlay(pCodecCtx->width, pCodecCtx->height, SDL_YV12_OVERLAY, screen); sws_ctx = sws_getContext ( pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_YUV420P, SWS_BILINEAR, NULL, NULL, NULL ); // Read frames and save first five frames to disk i=0; while(av_read_frame(pFormatCtx, &packet)>=0) { // Is this a packet from the video stream? if(packet.stream_index==videoStream) { // Decode video frame avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet); // Did we get a video frame? if(frameFinished) { SDL_LockYUVOverlay(bmp); AVFrame pict; pict.data[0] = bmp->pixels[0]; pict.data[1] = bmp->pixels[2]; pict.data[2] = bmp->pixels[1]; pict.linesize[0] = bmp->pitches[0]; pict.linesize[1] = bmp->pitches[2]; pict.linesize[2] = bmp->pitches[1]; // Convert the image into YUV format that SDL uses sws_scale ( sws_ctx, (uint8_t const * const *)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pict.data, pict.linesize ); SDL_UnlockYUVOverlay(bmp); rect.x = 0; rect.y = 0; rect.w = pCodecCtx->width; rect.h = pCodecCtx->height; SDL_DisplayYUVOverlay(bmp, &rect); av_packet_unref(&packet); } } else if(packet.stream_index==audioStream) { packet_queue_put(&audioq, &packet); } else { av_packet_unref(&packet); } // Free the packet that was allocated by av_read_frame SDL_PollEvent(&event); switch(event.type) { case SDL_QUIT: quit = 1; SDL_Quit(); exit(0); break; default: break; } } // Free the YUV frame av_free(pFrame); // Close the codec avcodec_close(pCodecCtx); // Close the video file avformat_close_input(&pFormatCtx); return 0; }
int stream_component_open(VideoState *is, int stream_index) { AVFormatContext *pFormatCtx = is->pFormatCtx; AVCodecContext *codecCtx; AVCodec *codec; SDL_AudioSpec wanted_spec, spec; if(stream_index < 0 || stream_index >= pFormatCtx->nb_streams) { return -1; } // Get a pointer to the codec context for the video stream codecCtx = pFormatCtx->streams[stream_index]->codec; if(codecCtx->codec_type == CODEC_TYPE_AUDIO) { // Set audio settings from codec info wanted_spec.freq = codecCtx->sample_rate; wanted_spec.format = AUDIO_S16SYS; wanted_spec.channels = codecCtx->channels; wanted_spec.silence = 0; wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE; wanted_spec.callback = audio_callback; wanted_spec.userdata = is; if(SDL_OpenAudio(&wanted_spec, &spec) < 0) { fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError()); return -1; } is->audio_hw_buf_size = spec.size; } codec = avcodec_find_decoder(codecCtx->codec_id); if(!codec || (avcodec_open(codecCtx, codec) < 0)) { fprintf(stderr, "Unsupported codec!\n"); return -1; } switch(codecCtx->codec_type) { case CODEC_TYPE_AUDIO: is->audioStream = stream_index; is->audio_st = pFormatCtx->streams[stream_index]; is->audio_buf_size = 0; is->audio_buf_index = 0; memset(&is->audio_pkt, 0, sizeof(is->audio_pkt)); packet_queue_init(&is->audioq); SDL_PauseAudio(0); break; case CODEC_TYPE_VIDEO: is->videoStream = stream_index; is->video_st = pFormatCtx->streams[stream_index]; is->frame_timer = (double)av_gettime() / 1000000.0; is->frame_last_delay = 40e-3; packet_queue_init(&is->videoq); is->video_tid = SDL_CreateThread(video_thread, is); codecCtx->get_buffer = our_get_buffer; codecCtx->release_buffer = our_release_buffer; break; default: break; } }
int main(int argc ,char **argv) { av_register_all(); AVFormatContext *pFormatCtx = NULL; AVInputFormat *file_iformat = NULL; //avio_set_interrupt_cb(decode_interrupt_cb); //Open video file printf("open video file:%s\n", argv[1]); if(avformat_open_input(&pFormatCtx, argv[1], file_iformat, NULL) < 0) { printf("canot open input file: %s\n", argv[1]); return -1; //Cannot open file } printf("open input file: %s OK\n", argv[1]); //Retrieve stream information if(av_find_stream_info(pFormatCtx) < 0) return -1;//cannot find stream infomation //Dump information about file no to standard error av_dump_format(pFormatCtx, 0, argv[1], 0); int i; int videoStream; int audioStream; videoStream = -1; audioStream = -1; AVCodecContext *vCodecCtx; AVCodecContext *aCodecCtx; //Find the first video stream for(i = 0; i < pFormatCtx->nb_streams; i++) { if(pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO && videoStream < 0) { videoStream = i; } if(pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO && audioStream < 0) { audioStream = i; } } if(videoStream == -1) { printf("no video stream\n"); return -1;//Did not find a video stream } if(audioStream == -1) { printf("no audio stream\n"); return -1;//Did not find a audio stream } printf("find video strean: %d\n", videoStream); printf("find audio strean: %d\n", audioStream); //Get a pointer to the codec context for the video stream vCodecCtx = pFormatCtx->streams[videoStream]->codec; //set vCodecCtx for vdpau vCodecCtx->get_format = decoder_get_format; vCodecCtx->get_buffer = decoder_get_buffer; vCodecCtx->release_buffer = decoder_release_buffer; vCodecCtx->draw_horiz_band = decoder_draw_horiz_band; vCodecCtx->reget_buffer = decoder_get_buffer; vCodecCtx->slice_flags = SLICE_FLAG_CODEC_ORDER | SLICE_FLAG_ALLOW_FIELD; AVCodec *vCodec; vCodec = avcodec_find_decoder(vCodecCtx->codec_id); if(vCodec == NULL) { fprintf(stderr, "Unsupported video codec\n"); return -1;//codec not find } //Open video codec if(avcodec_open(vCodecCtx, vCodec) < 0) { fprintf(stderr, "open video codec error\n"); return -1;//Could not open codec } //Get a pointer to the codec context for the audio stream aCodecCtx = pFormatCtx->streams[audioStream]->codec; static SDL_AudioSpec wanted_spec, spec; wanted_spec.freq = aCodecCtx->sample_rate; wanted_spec.format = AUDIO_S16SYS; wanted_spec.channels = aCodecCtx->channels; wanted_spec.silence = 0; wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE; wanted_spec.callback = audio_callback; wanted_spec.userdata = aCodecCtx; if(SDL_OpenAudio(&wanted_spec, &spec) < 0) { fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError()); return -1; } AVCodec *aCodec; aCodec = avcodec_find_decoder(aCodecCtx->codec_id); if(aCodec == NULL) { fprintf(stderr, "Unsupport audio codec\n"); return -1;//codec not found } if(avcodec_open(aCodecCtx, aCodec) < 0) { fprintf(stderr, "open avcodec error\n"); return -1; } packet_queue_init(&audioq); SDL_PauseAudio(0); AVFrame *pFrame; //Allocate video frame pFrame = avcodec_alloc_frame(); AVFrame *pFrameRGB; //Allocate an AVFrame structure pFrameRGB = avcodec_alloc_frame(); if(pFrameRGB == NULL) return -1; uint8_t *buffer; int numBytes; //Detemine required buffer size and allocate buffer numBytes = avpicture_get_size(PIX_FMT_RGB24, vCodecCtx->width, vCodecCtx->height); buffer = (uint8_t*)av_malloc(numBytes * sizeof(uint8_t)); //Assign appropriate parts of buffer to image planes in pFrameRGB //Note that pFrameRGB is an AVFrame, but AVFrame is a superset //of AVPicture avpicture_fill((AVPicture*)pFrameRGB, buffer, PIX_FMT_RGB24, vCodecCtx->width, vCodecCtx->height); if((SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER))) { fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError()); exit(1); } SDL_Surface *screen; screen = SDL_SetVideoMode(vCodecCtx->width, vCodecCtx->height, 0, 0); if(!screen) { fprintf(stderr, "SDL: could not set video mode\n"); exit(1); } SDL_Overlay *bmp; bmp = SDL_CreateYUVOverlay(vCodecCtx->width, vCodecCtx->height, SDL_YV12_OVERLAY, screen); int frameFinished; AVPacket packet; SDL_Rect rect; i = 0; while(av_read_frame(pFormatCtx, &packet) >=0) { //is this a packet from video stream? if(packet.stream_index == videoStream) { //Decoder video frame avcodec_decode_video2(vCodecCtx, pFrame, &frameFinished, &packet); //Did we got a video frame? if(frameFinished) { //Convert the image into OPENGL AVPicture pict; static struct SwsContext *img_convert_ctx; img_convert_ctx = sws_getCachedContext(img_convert_ctx, vCodecCtx->width, vCodecCtx->height, vCodecCtx->pix_fmt, vCodecCtx->width, vCodecCtx->height, PIX_FMT_RGB32, SWS_BICUBIC, NULL, NULL, NULL); sws_scale(img_convert_ctx, pFrame->data, pFrame->linesize, 0, pFrame->height, pict.data, pict.linesize); /* usleep(40 * 1000); SDL_LockYUVOverlay(bmp); AVPicture pict; pict.data[0] = bmp->pixels[0]; pict.data[1] = bmp->pixels[2]; pict.data[2] = bmp->pixels[1]; pict.linesize[0] = bmp->pitches[0]; pict.linesize[1] = bmp->pitches[2]; pict.linesize[2] = bmp->pitches[1]; //Convert the image into YUV format that SDL uses static struct SwsContext *img_convert_ctx; img_convert_ctx = sws_getCachedContext(img_convert_ctx, vCodecCtx->width, vCodecCtx->height, vCodecCtx->pix_fmt, vCodecCtx->width, vCodecCtx->height, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL); sws_scale(img_convert_ctx, pFrame->data, pFrame->linesize, 0, pFrame->height, pict.data, pict.linesize); SDL_UnlockYUVOverlay(bmp); rect.x = 0; rect.y = 0; rect.w = vCodecCtx->width; rect.h = vCodecCtx->height; SDL_DisplayYUVOverlay(bmp, &rect); */ } //Free the packet that was allocated by av_read_frame av_free_packet(&packet); /* SDL_Event event; SDL_PollEvent(&event); switch(event.type) { case SDL_QUIT: quit = 1; SDL_Quit(); exit(0); break; defalut: break; } */ } else if(packet.stream_index == audioStream) { packet_queue_put(&audioq, &packet); } else { av_free_packet(&packet); } } //Free the RGB image av_free(buffer); av_free(pFrameRGB); //Free the YUV freame av_free(pFrame); //Close the codec avcodec_close(vCodecCtx); //Close the video file avformat_close_input(&pFormatCtx); }
int stream_component_open(VideoState *is, int stream_index) { //int stream_component_open(VideoState *is, AVCodecContext* codecCtx) { AVFormatContext *pFormatCtx = is->pFormatCtx; AVCodecContext *codecCtx; AVCodec *codec; SDL_AudioSpec wanted_spec, spec; if(stream_index < 0 || stream_index >= pFormatCtx->nb_streams) { return -1; } // Get a pointer to the codec context for the video stream codecCtx = pFormatCtx->streams[stream_index]->codec; if(codecCtx->codec_type == AVMEDIA_TYPE_AUDIO) { fprintf(stderr, "sample rate: %d\n", codecCtx->sample_rate); fprintf(stderr, "channels: %d\n", codecCtx->channels); // Set audio settings from codec info wanted_spec.freq = codecCtx->sample_rate; wanted_spec.format = AUDIO_S16SYS; wanted_spec.channels = codecCtx->channels; wanted_spec.silence = 0; wanted_spec.samples = 1024; //SDL audio buffer size wanted_spec.callback = audio_callback; wanted_spec.userdata = is; if(SDL_OpenAudio(&wanted_spec, &spec) < 0) { fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError()); return -1; } } fprintf(stderr, "codec id = %d\n", codecCtx->codec_id); codec = avcodec_find_decoder(codecCtx->codec_id); if(!codec ) { fprintf(stderr, "Codec is false!\n"); return -1; } /* if( avcodec_open(codecCtx, codec) < 0) { fprintf(stderr, "Unsupported codec!\n"); // return -1; } */ printf("codec type: %d\n", codecCtx->codec_type); if( codecCtx->codec_id == 13) codecCtx->codec_type = CODEC_TYPE_VIDEO; else if( codecCtx->codec_id == 86016) codecCtx->codec_type = AVMEDIA_TYPE_AUDIO; switch(codecCtx->codec_type) { case AVMEDIA_TYPE_AUDIO: is->audioStream = stream_index; is->audio_ctx = pFormatCtx->streams[stream_index]; //is->audio_ctx = codecCtx; is->audio_buf_size = 0; is->audio_buf_index = 0; memset(&is->audio_pkt, 0, sizeof(is->audio_pkt)); packet_queue_init(&is->audioq); SDL_PauseAudio(0); break; case CODEC_TYPE_VIDEO: is->videoStream = stream_index; is->video_ctx = pFormatCtx->streams[stream_index]; //is->video_ctx = codecCtx; // Initialize timer stuff is->frame_timer = (double)av_gettime() / 1000000.0; is->frame_last_delay = 40e-3; packet_queue_init(&is->videoq); is->video_tid = SDL_CreateThread(video_thread, is); // printf("[PLAYER] launched video thread %d\n", is->video_tid); // Custom buffer allocation functions codecCtx->get_buffer = our_get_buffer; codecCtx->release_buffer = our_release_buffer; break; default: break; } return 0; }
int main(void) { (RCC->AHBENR |= RCC_AHBENR_GPIOAEN); (RCC->AHBENR |= RCC_AHBENR_GPIOBEN); // initialize ringbuffer for received packets packet_queue_init(); switch_init(); led_init(); board_uart0_init(); //uart_init wird von syscalls schon vorher aufgerufen - hoffentlich zumindest //<just for debug> // GPIO_9 SW1 // GPIO_10 SW2 // GPIO_11 SW4 // GPIO_12 nRF-IRQ gpio_init_int(GPIO_9, GPIO_PULLDOWN, GPIO_FALLING, (void *)test, 0); //wird extern auf high gezogen gpio_irq_enable(GPIO_9); led1_on(); // </just for debug> // initialize radio driver radio_nrf_init(); radio_nrf_register_rx_callback(packet_received); //eeprom_init(); // TODO get node address from eeprom // if not available, go through algorithm to determine new address: // 1. generate randomly address of length 11 bit // 2. send PING with generated address // 3. wait X seconds for answer (ping_answer) // 4. if no answer received, accept address and save it // 5. if an answer is received, start over with 1. // (or determine another address on another way) // while (1) { // we can go to sleep here // THIS DOES NOT WAKE UP! // (if needed, thread has to be woken up externally) thread_sleep(); } return 0; }
int stream_component_open(VideoState *is, int stream_index) { AVFormatContext *pFormatCtx = is->pFormatCtx; AVCodecContext *codecCtx = NULL; AVCodec *codec = NULL; AVDictionary *optionsDict = NULL; if(stream_index < 0 || stream_index >= pFormatCtx->nb_streams) { return -1; } // Get a pointer to the codec context for the video stream codecCtx = pFormatCtx->streams[stream_index]->codec; if(codecCtx->codec_type == AVMEDIA_TYPE_AUDIO) { is->audio_callback = audio_callback; // Set audio settings from codec info AudioPlayer *player = malloc(sizeof(AudioPlayer)); is->audio_player = player; createEngine(&is->audio_player); createBufferQueueAudioPlayer(&is->audio_player, is, codecCtx->channels, codecCtx->sample_rate); //is->audio_hw_buf_size = 4096; } else if (codecCtx->codec_type == AVMEDIA_TYPE_VIDEO) { // Set video settings from codec info VideoPlayer *player = malloc(sizeof(VideoPlayer)); is->video_player = player; createVideoEngine(&is->video_player); } codec = avcodec_find_decoder(codecCtx->codec_id); if(!codec || (avcodec_open2(codecCtx, codec, &optionsDict) < 0)) { fprintf(stderr, "Unsupported codec!\n"); return -1; } switch(codecCtx->codec_type) { case AVMEDIA_TYPE_AUDIO: is->audioStream = stream_index; is->audio_st = pFormatCtx->streams[stream_index]; is->audio_buf_size = 0; is->audio_buf_index = 0; /* averaging filter for audio sync */ is->audio_diff_avg_coef = exp(log(0.01 / AUDIO_DIFF_AVG_NB)); is->audio_diff_avg_count = 0; /* Correct audio only if larger error than this */ is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / codecCtx->sample_rate; is->sws_ctx_audio = swr_alloc(); if (!is->sws_ctx_audio) { fprintf(stderr, "Could not allocate resampler context\n"); return -1; } memset(&is->audio_pkt, 0, sizeof(is->audio_pkt)); packet_queue_init(&is->audioq); break; case AVMEDIA_TYPE_VIDEO: is->videoStream = stream_index; is->video_st = pFormatCtx->streams[stream_index]; is->frame_timer = (double)av_gettime() / 1000000.0; is->frame_last_delay = 40e-3; is->video_current_pts_time = av_gettime(); packet_queue_init(&is->videoq); is->video_tid = malloc(sizeof(*(is->video_tid))); // uncomment for video pthread_create(is->video_tid, NULL, (void *) &video_thread, is); is->sws_ctx = sws_getContext ( is->video_st->codec->width, is->video_st->codec->height, is->video_st->codec->pix_fmt, is->video_st->codec->width, is->video_st->codec->height, AV_PIX_FMT_YUV420P, SWS_BILINEAR, NULL, NULL, NULL ); /*is->sws_ctx = sws_getContext ( is->video_st->codec->width, is->video_st->codec->height, is->video_st->codec->pix_fmt, //pCodecCtx->width, //pCodecCtx->height, is->video_st->codec->width, is->video_st->codec->height, TARGET_IMAGE_FORMAT, SWS_BILINEAR, NULL, NULL, NULL );*/ codecCtx->get_buffer2 = our_get_buffer; break; default: break; } return 0; }