/*---------------------------------------------------------------------- | SdlVideoOutput_PutPacket +---------------------------------------------------------------------*/ BLT_METHOD SdlVideoOutput_PutPacket(BLT_PacketConsumer* _self, BLT_MediaPacket* packet) { SdlVideoOutput* self = ATX_SELF(SdlVideoOutput, BLT_PacketConsumer); unsigned char* pixel_data = (unsigned char*)BLT_MediaPacket_GetPayloadBuffer(packet); const BLT_RawVideoMediaType* media_type; unsigned int plane; SDL_Rect rect; /* check the media type */ BLT_MediaPacket_GetMediaType(packet, (const BLT_MediaType**)&media_type); if (media_type->base.id != BLT_MEDIA_TYPE_ID_VIDEO_RAW) { ATX_LOG_FINE_1("rejecting media type id %d", media_type->base.id); return BLT_ERROR_INVALID_MEDIA_TYPE; } if (media_type->format != BLT_PIXEL_FORMAT_YV12) { ATX_LOG_FINE_1("rejecting pixel format %d", media_type->format); return BLT_ERROR_INVALID_MEDIA_TYPE; } /* resize/create the window and overlay if needed */ if (self->yuv_overlay == NULL || self->yuv_overlay->w != media_type->width || self->yuv_overlay->h != media_type->height) { self->screen = SDL_SetVideoMode(media_type->width, media_type->height, 24, SDL_HWSURFACE | SDL_RESIZABLE); if (self->screen == NULL) { ATX_LOG_WARNING("SDL_SetVideoMode() failed"); return BLT_FAILURE; } self->yuv_overlay = SDL_CreateYUVOverlay(media_type->width, media_type->height, SDL_YV12_OVERLAY, self->screen); if (self->yuv_overlay == NULL) { ATX_LOG_WARNING("SDL_CreateYUVOverlay() failed"); return BLT_FAILURE; } } /* transfer the pixels */ SDL_LockYUVOverlay(self->yuv_overlay); for (plane=0; plane<3; plane++) { unsigned int plane_width = (plane==0?media_type->width:(media_type->width/2)); unsigned int plane_height = (plane==0?media_type->height:(media_type->height/2)); unsigned char* src = pixel_data+media_type->planes[plane].offset; unsigned int src_pitch = media_type->planes[plane].bytes_per_line; unsigned char* dst = self->yuv_overlay->pixels[plane==0?0:3-plane]; unsigned int dst_pitch = self->yuv_overlay->pitches[plane==0?0:3-plane]; while (plane_height--) { ATX_CopyMemory(dst, src, plane_width); src += src_pitch; dst += dst_pitch; } } SDL_UnlockYUVOverlay(self->yuv_overlay); rect.x = 0; rect.y = 0; rect.w = self->screen->w; rect.h = self->screen->h; SDL_DisplayYUVOverlay(self->yuv_overlay, &rect); { ATX_TimeStamp now; ATX_System_GetCurrentTimeStamp(&now); if (ATX_TimeStamp_IsLaterOrEqual(self->next_display_time, now)) { ATX_TimeInterval delta; ATX_TimeStamp_Sub(delta, self->next_display_time, now); /* sanity check */ if (delta.seconds == 0 && delta.nanoseconds > 1000000) { ATX_System_Sleep(&delta); } } else { self->next_display_time = now; } { ATX_TimeStamp frame_duration = {0, 41708000}; ATX_TimeStamp_Add(self->next_display_time, self->next_display_time, frame_duration); } } return BLT_SUCCESS; }
int main(int argc, char* argv[]) { AVFormatContext *pFormatCtx; int i, videoindex; AVCodecContext *pCodecCtx; AVCodec *pCodec; char filepath[]="cuc60anniversary_start.mkv"; av_register_all(); avformat_network_init(); pFormatCtx = avformat_alloc_context(); fp_open=fopen(filepath,"rb+"); //Init AVIOContext unsigned char *aviobuffer=(unsigned char *)av_malloc(32768); AVIOContext *avio =avio_alloc_context(aviobuffer, 32768,0,NULL,read_buffer,NULL,NULL); pFormatCtx->pb=avio; if(avformat_open_input(&pFormatCtx,NULL,NULL,NULL)!=0){ printf("Couldn't open input stream.\n"); return -1; } if(avformat_find_stream_info(pFormatCtx,NULL)<0){ printf("Couldn't find stream information.\n"); return -1; } videoindex=-1; for(i=0; i<pFormatCtx->nb_streams; i++) if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO){ videoindex=i; break; } if(videoindex==-1){ printf("Didn't find a video stream.\n"); return -1; } pCodecCtx=pFormatCtx->streams[videoindex]->codec; pCodec=avcodec_find_decoder(pCodecCtx->codec_id); if(pCodec==NULL){ printf("Codec not found.\n"); return -1; } if(avcodec_open2(pCodecCtx, pCodec,NULL)<0){ printf("Could not open codec.\n"); return -1; } AVFrame *pFrame,*pFrameYUV; pFrame=av_frame_alloc(); pFrameYUV=av_frame_alloc(); //uint8_t *out_buffer=(uint8_t *)av_malloc(avpicture_get_size(AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height)); //avpicture_fill((AVPicture *)pFrameYUV, out_buffer, AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height); //SDL---------------------------- if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) { printf( "Could not initialize SDL - %s\n", SDL_GetError()); return -1; } int screen_w=0,screen_h=0; SDL_Surface *screen; screen_w = pCodecCtx->width; screen_h = pCodecCtx->height; screen = SDL_SetVideoMode(screen_w, screen_h, 0,0); if(!screen) { printf("SDL: could not set video mode - exiting:%s\n",SDL_GetError()); return -1; } SDL_Overlay *bmp; bmp = SDL_CreateYUVOverlay(pCodecCtx->width, pCodecCtx->height,SDL_YV12_OVERLAY, screen); SDL_Rect rect; rect.x = 0; rect.y = 0; rect.w = screen_w; rect.h = screen_h; //SDL End------------------------ int ret, got_picture; AVPacket *packet=(AVPacket *)av_malloc(sizeof(AVPacket)); #if OUTPUT_YUV420P FILE *fp_yuv=fopen("output.yuv","wb+"); #endif SDL_WM_SetCaption("Simplest FFmpeg Mem Player",NULL); struct SwsContext *img_convert_ctx; img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL); //------------------------------ while(av_read_frame(pFormatCtx, packet)>=0){ if(packet->stream_index==videoindex){ ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet); if(ret < 0){ printf("Decode Error.\n"); return -1; } if(got_picture){ SDL_LockYUVOverlay(bmp); pFrameYUV->data[0]=bmp->pixels[0]; pFrameYUV->data[1]=bmp->pixels[2]; pFrameYUV->data[2]=bmp->pixels[1]; pFrameYUV->linesize[0]=bmp->pitches[0]; pFrameYUV->linesize[1]=bmp->pitches[2]; pFrameYUV->linesize[2]=bmp->pitches[1]; sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameYUV->data, pFrameYUV->linesize); #if OUTPUT_YUV420P int y_size=pCodecCtx->width*pCodecCtx->height; fwrite(pFrameYUV->data[0],1,y_size,fp_yuv); //Y fwrite(pFrameYUV->data[1],1,y_size/4,fp_yuv); //U fwrite(pFrameYUV->data[2],1,y_size/4,fp_yuv); //V #endif SDL_UnlockYUVOverlay(bmp); SDL_DisplayYUVOverlay(bmp, &rect); //Delay 40ms SDL_Delay(40); } } av_free_packet(packet); } sws_freeContext(img_convert_ctx); #if OUTPUT_YUV420P fclose(fp_yuv); #endif fclose(fp_open); SDL_Quit(); //av_free(out_buffer); av_free(pFrameYUV); avcodec_close(pCodecCtx); avformat_close_input(&pFormatCtx); return 0; }
static void del(SDL_Overlay *o) { if (o) { SDL_UnlockYUVOverlay(o); SDL_FreeYUVOverlay(o); } }
int queue_picture(VideoState *is, AVFrame *pFrame,AVFrame *pFrameRGB, double pts){ VideoPicture *vp; AVPicture pict; int k; /* wait until we have space for a new pic */ SDL_LockMutex(is->pictq_mutex); while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE && !is->quit){ SDL_CondWait(is->pictq_cond, is->pictq_mutex); } SDL_UnlockMutex(is->pictq_mutex); if (is->quit){ return -1; } // windex is set to 0 initially vp = &is->pictq[is->pictq_windex]; /* allocate or resize the buffer! */ if (!vp->bmp || vp->width != is->video_st->codec->width || vp->height != is->video_st->codec->height){ SDL_Event event; vp->allocated = 0; /* we have to do it in the main thread */ event.type = FF_ALLOC_EVENT; event.user.data1 = is; SDL_PushEvent(&event); /* wait until we have a picture allocated */ SDL_LockMutex(is->pictq_mutex); while (!vp->allocated && !is->quit){ SDL_CondWait(is->pictq_cond, is->pictq_mutex); } SDL_UnlockMutex(is->pictq_mutex); if (is->quit){ return -1; } } /* We have a place to put our picture on the queue */ /* If we are skipping a frame, do we set this to null but still return vp->allocated = 1? */ if (vp->bmp){ // Convert the image into YUV format that SDL uses sws_scale(is->sws_ctxRGB,(const uint8_t * const *) pFrame->data,pFrame->linesize, 0, is->video_st->codec->height,pFrameRGB->data, pFrameRGB->linesize); int videoSize = (is->video_st->codec->height) * (is->video_st->codec->width); if(is->color_req=='b'){ for (k=0; k<videoSize; ++k) { pFrameRGB->data[0][3*k]='\0'; pFrameRGB->data[0][3*k+1]='\0'; } }else if(is->color_req=='r'){ for (k=0; k<videoSize; ++k) { pFrameRGB->data[0][3*k+1]='\0'; pFrameRGB->data[0][3*k+2]='\0'; } }else if(is->color_req=='g'){ for (k=0; k<videoSize; ++k) { pFrameRGB->data[0][3*k]='\0'; pFrameRGB->data[0][3*k+2]='\0'; } } SDL_LockYUVOverlay(vp->bmp); //dst_pix_fmt = PIX_FMT_YUV420P; /* point pict at the queue */ pict.data[0] = vp->bmp->pixels[0]; pict.data[1] = vp->bmp->pixels[2]; pict.data[2] = vp->bmp->pixels[1]; pict.linesize[0] = vp->bmp->pitches[0]; pict.linesize[1] = vp->bmp->pitches[2]; pict.linesize[2] = vp->bmp->pitches[1]; sws_scale(is->sws_ctx, (const uint8_t * const *) pFrameRGB->data, pFrameRGB->linesize, 0, is->video_st->codec->height, pict.data, pict.linesize); if(is->color_req=='w'){ // Change color to black & white memset(pict.data[1],128, 480*240); memset(pict.data[2],128, 480*240); } SDL_UnlockYUVOverlay(vp->bmp); vp->pts = pts; /* now we inform our display thread that we have a pic ready */ if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE){ is->pictq_windex = 0; } SDL_LockMutex(is->pictq_mutex); is->pictq_size++; SDL_UnlockMutex(is->pictq_mutex); } return 0; }
int main(int argc, char* argv[]) { int i, videoStream, audioStream; VideoState *is; is = av_mallocz(sizeof(VideoState)); if(argc < 2) { fprintf(stderr, "Usage: test <file>\n"); exit(1); } if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) { fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError()); exit(1); } av_register_all(); AVFormatContext *pFormatCtx = NULL; av_strlcpy(is->filename, argv[1], sizeof(is->filename)); is->pictq_mutex = SDL_CreateMutex(); is->pictq_cond = SDL_CreateCond(); schedule_refresh(is, 40); is->parse_tid = SDL_CreateThread(decode_thread, is); if(!is->parse_tid) { av_free(is); return -1; } // Open video file if(avformat_open_input(&pFormatCtx, argv[1], NULL, NULL) != 0) { return -1; // Couldn't open file } // Retrive stream information if(avformat_find_stream_info(pFormatCtx, NULL) < 0) { return -1; //Couldn't find stream information } // Dump information about file onto standard error av_dump_format(pFormatCtx, 0, argv[1], 0); AVCodecContext *pCodecCtxOrig = NULL; AVCodecContext *pCodecCtx = NULL; AVCodecContext *aCodecCtxOrig = NULL; AVCodecContext *aCodecCtx = NULL; // Find the first video stream videoStream = -1; audioStream = -1; for(i=0; i < pFormatCtx->nb_streams; i++) { if(pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO && videoStream < 0) { videoStream = i; } } for(i=0; i < pFormatCtx->nb_streams; i++) { if(pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO && audioStream < 0) { audioStream = i; } } if(videoStream == -1) { return -1; // Didn't find a video stream } if(audioStream == -1) { return -1; } // Get a pointer to the codec context for the video stream pCodecCtxOrig = pFormatCtx->streams[videoStream]->codec; aCodecCtxOrig = pFormatCtx->streams[audioStream]->codec; AVCodec *pCodec = NULL; AVCodec *aCodec = NULL; //Find the decoder for the video stream pCodec = avcodec_find_decoder(pCodecCtxOrig->codec_id); if(pCodec == NULL) { return -1; } aCodec = avcodec_find_decoder(aCodecCtxOrig->codec_id); if(aCodec == NULL) { return -1; } // Copy context pCodecCtx = avcodec_alloc_context3(pCodec); if(avcodec_copy_context(pCodecCtx, pCodecCtxOrig) != 0) { return -1; } aCodecCtx = avcodec_alloc_context3(aCodec); if(avcodec_copy_context(aCodecCtx, aCodecCtxOrig) != 0) { return -1; } SDL_AudioSpec wanted_spec, spec; wanted_spec.freq = aCodecCtx->sample_rate; wanted_spec.format = AUDIO_S16SYS; wanted_spec.channels = aCodecCtx->channels; wanted_spec.silence = 0; wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE; wanted_spec.callback = audio_callback; wanted_spec.userdata = aCodecCtx; if (SDL_OpenAudio(&wanted_spec, &spec) < 0) { fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError()); return -1; } // Open codec AVDictionary *optionDict = NULL; if(avcodec_open2(pCodecCtx, pCodec, &optionDict) < 0) { return -1; } if(avcodec_open2(aCodecCtx, aCodec, NULL) < 0) { return -1; } packet_queue_init(&audioq); SDL_PauseAudio(0); // Allocate video frame AVFrame *pFrame = NULL; pFrame = av_frame_alloc(); SDL_Surface *screen; screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 0, 0); if(!screen) { fprintf(stderr, "SDL: could not set video mode - exiting\n"); exit(1); } SDL_Overlay *bmp = NULL; bmp = SDL_CreateYUVOverlay(pCodecCtx->width, pCodecCtx->height, SDL_YV12_OVERLAY, screen); printf("[loop]==========================\n"); struct SwsContext *sws_ctx = NULL; int frameFinished; AVPacket packet; //initialize SWS context for software scaling sws_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_YUV420P, SWS_BILINEAR, NULL, NULL, NULL); // Read frame and display i = 0; while(av_read_frame(pFormatCtx, &packet) >= 0) { if(packet.stream_index == videoStream) { //Decode video frame avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet); // Did we get a video frame? if(frameFinished) { SDL_LockYUVOverlay(bmp); AVPicture pict; pict.data[0] = bmp->pixels[0]; pict.data[1] = bmp->pixels[2]; pict.data[2] = bmp->pixels[1]; pict.linesize[0] = bmp->pitches[0]; pict.linesize[1] = bmp->pitches[2]; pict.linesize[2] = bmp->pitches[1]; // Convert the image into YUV format that SDL uses sws_scale(sws_ctx, (uint8_t const * const *)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pict.data, pict.linesize); SDL_UnlockYUVOverlay(bmp); SDL_Rect rect; rect.x = 0; rect.y = 0; rect.w = pCodecCtx->width; rect.h = pCodecCtx->height; SDL_DisplayYUVOverlay(bmp, &rect); av_free_packet(&packet); } } else if (packet.stream_index == audioStream) { packet_queue_put(&audioq, &packet); } else { // Free the packet that was allocated by av_read_frame av_free_packet(&packet); } SDL_Event event; SDL_PollEvent(&event); switch(event.type) { case SDL_QUIT: quit = 1; SDL_Quit(); exit(0); break; default: break; } } // Free the YUV frame av_free(pFrame); // Close the codec avcodec_close(pCodecCtx); avcodec_close(pCodecCtxOrig); // Close the video file avformat_close_input(&pFormatCtx); return 0; }
/* Called from the main */ int main(int argc, char **argv) { AVFormatContext *pFormatCtx = NULL; int err; int i; int videoStream; AVCodecContext *pCodecCtx; AVCodec *pCodec; AVFrame *pFrame; AVPacket packet; int frameFinished; float aspect_ratio; SDL_Overlay *bmp; SDL_Surface *screen; SDL_Rect rect; SDL_Event event; if(argc < 2) { printf("Please provide a movie file\n"); return -1; } // Register all formats and codecs av_register_all(); if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) { fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError()); exit(1); } pFormatCtx = avformat_alloc_context(); // Open video file err = avformat_open_input(&pFormatCtx, argv[1],NULL,NULL); printf("nb_streams = %d\n",pFormatCtx->nb_streams); if(err<0) { printf("error ret = %d\n",err); } // Retrieve stream information err = avformat_find_stream_info(pFormatCtx, NULL); if(err<0) { printf("error ret = %d\n",err); } // Dump information about file onto standard error av_dump_format(pFormatCtx, 0, argv[1], 0); // Find the first video stream videoStream=AVMEDIA_TYPE_UNKNOWN; for(i=0; i<pFormatCtx->nb_streams; i++) { if(AVMEDIA_TYPE_VIDEO==pFormatCtx->streams[i]->codec->codec_type) { videoStream=i; break; } } if(videoStream==AVMEDIA_TYPE_UNKNOWN) { return -1; // Didn't find a video stream } printf("videoStream = %d\n",videoStream); // Get a pointer to the codec context for the video stream pCodecCtx=pFormatCtx->streams[videoStream]->codec; // Find the decoder for the video stream pCodec=avcodec_find_decoder(pCodecCtx->codec_id); if(pCodec==NULL) { fprintf(stderr, "Unsupported codec!\n"); return -1; // Codec not found } // Open codec if(avcodec_open2(pCodecCtx, pCodec,NULL)<0) return -1; // Could not open codec // Allocate video frame pFrame=avcodec_alloc_frame(); // Make a screen to put our video #ifndef __DARWIN__ screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 0, 0); #else screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 24, 0); #endif if(!screen) { fprintf(stderr, "SDL: could not set video mode - exiting\n"); exit(1); } // Allocate a place to put our YUV image on that screen bmp = SDL_CreateYUVOverlay(pCodecCtx->width, pCodecCtx->height, SDL_YV12_OVERLAY, screen); // Read frames and save first five frames to disk i=0; while(av_read_frame(pFormatCtx, &packet)>=0) { // Is this a packet from the video stream? if(packet.stream_index==videoStream) { // Decode video frame avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet); // Did we get a video frame? if(frameFinished) { SDL_LockYUVOverlay(bmp); AVPicture pict; pict.data[0] = bmp->pixels[0]; pict.data[1] = bmp->pixels[2]; pict.data[2] = bmp->pixels[1]; pict.linesize[0] = bmp->pitches[0]; pict.linesize[1] = bmp->pitches[2]; pict.linesize[2] = bmp->pitches[1]; // Convert the image into YUV format that SDL uses #include <libswscale/swscale.h> // other codes static struct SwsContext *img_convert_ctx; // other codes img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL); // other codes // Convert the image from its native format to RGB sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pict.data, pict.linesize); SDL_UnlockYUVOverlay(bmp); rect.x = 0; rect.y = 0; rect.w = pCodecCtx->width; rect.h = pCodecCtx->height; SDL_DisplayYUVOverlay(bmp, &rect); } } // Free the packet that was allocated by av_read_frame av_free_packet(&packet); SDL_PollEvent(&event); switch(event.type) { case SDL_QUIT: SDL_Quit(); exit(0); break; default: break; } } // Free the YUV frame av_free(pFrame); // Close the codec avcodec_close(pCodecCtx); // Close the video file avformat_close_input(&pFormatCtx); return 0; }
int main(int argc, char *argv[]) { AVFormatContext *pFormatCtx = NULL; int i, videoStream, audioStream; AVCodecContext *pCodecCtx = NULL; AVCodec *pCodec = NULL; AVFrame *pFrame = NULL; AVPacket packet; int frameFinished; //float aspect_ratio; AVCodecContext *aCodecCtx = NULL; AVCodec *aCodec = NULL; SDL_Overlay *bmp = NULL; SDL_Surface *screen = NULL; SDL_Rect rect; SDL_Event event; SDL_AudioSpec wanted_spec, spec; struct SwsContext *sws_ctx = NULL; AVDictionary *videoOptionsDict = NULL; AVDictionary *audioOptionsDict = NULL; if(argc < 2) { fprintf(stderr, "Usage: test <file>\n"); exit(1); } // Register all formats and codecs av_register_all(); if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) { fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError()); exit(1); } // Open video file if(avformat_open_input(&pFormatCtx, argv[1], NULL, NULL)!=0) return -1; // Couldn't open file // Retrieve stream information if(avformat_find_stream_info(pFormatCtx, NULL)<0) return -1; // Couldn't find stream information // Dump information about file onto standard error av_dump_format(pFormatCtx, 0, argv[1], 0); // Find the first video stream videoStream=-1; audioStream=-1; for(i=0; i<pFormatCtx->nb_streams; i++) { if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO && videoStream < 0) { videoStream=i; } if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO && audioStream < 0) { audioStream=i; } } if(videoStream==-1) return -1; // Didn't find a video stream if(audioStream==-1) return -1; aCodecCtx=pFormatCtx->streams[audioStream]->codec; // Set audio settings from codec info wanted_spec.freq = aCodecCtx->sample_rate; wanted_spec.format = AUDIO_S16SYS; wanted_spec.channels = aCodecCtx->channels; wanted_spec.silence = 0; wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE; wanted_spec.callback = audio_callback; wanted_spec.userdata = aCodecCtx; if(SDL_OpenAudio(&wanted_spec, &spec) < 0) { fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError()); return -1; } aCodec = avcodec_find_decoder(aCodecCtx->codec_id); if(!aCodec) { fprintf(stderr, "Unsupported codec!\n"); return -1; } avcodec_open2(aCodecCtx, aCodec, &audioOptionsDict); // audio_st = pFormatCtx->streams[index] packet_queue_init(&audioq); SDL_PauseAudio(0); // Get a pointer to the codec context for the video stream pCodecCtx=pFormatCtx->streams[videoStream]->codec; // Find the decoder for the video stream pCodec=avcodec_find_decoder(pCodecCtx->codec_id); if(pCodec==NULL) { fprintf(stderr, "Unsupported codec!\n"); return -1; // Codec not found } // Open codec if(avcodec_open2(pCodecCtx, pCodec, &videoOptionsDict)<0) return -1; // Could not open codec // Allocate video frame pFrame=av_frame_alloc(); // Make a screen to put our video #ifndef __DARWIN__ screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 0, 0); #else screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 24, 0); #endif if(!screen) { fprintf(stderr, "SDL: could not set video mode - exiting\n"); exit(1); } // Allocate a place to put our YUV image on that screen bmp = SDL_CreateYUVOverlay(pCodecCtx->width, pCodecCtx->height, SDL_YV12_OVERLAY, screen); sws_ctx = sws_getContext ( pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, PIX_FMT_YUV420P, SWS_BILINEAR, NULL, NULL, NULL ); // Read frames and save first five frames to disk i=0; while(av_read_frame(pFormatCtx, &packet)>=0) { // Is this a packet from the video stream? if(packet.stream_index==videoStream) { // Decode video frame avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet); // Did we get a video frame? if(frameFinished) { SDL_LockYUVOverlay(bmp); AVPicture pict; pict.data[0] = bmp->pixels[0]; pict.data[1] = bmp->pixels[2]; pict.data[2] = bmp->pixels[1]; pict.linesize[0] = bmp->pitches[0]; pict.linesize[1] = bmp->pitches[2]; pict.linesize[2] = bmp->pitches[1]; // Convert the image into YUV format that SDL uses sws_scale ( sws_ctx, (uint8_t const * const *)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pict.data, pict.linesize ); SDL_UnlockYUVOverlay(bmp); rect.x = 0; rect.y = 0; rect.w = pCodecCtx->width; rect.h = pCodecCtx->height; SDL_DisplayYUVOverlay(bmp, &rect); av_free_packet(&packet); } } else if(packet.stream_index==audioStream) { packet_queue_put(&audioq, &packet); } else { av_free_packet(&packet); } // Free the packet that was allocated by av_read_frame SDL_PollEvent(&event); switch(event.type) { case SDL_QUIT: quit = 1; SDL_Quit(); exit(0); break; default: break; } } // Free the YUV frame av_free(pFrame); // Close the codec avcodec_close(pCodecCtx); // Close the video file avformat_close_input(&pFormatCtx); return 0; }
static void sdl_discard (vo_instance_t * _instance, uint8_t * const * buf, void * id) { SDL_UnlockYUVOverlay ((SDL_Overlay *) id); }
int luvcview() { const SDL_VideoInfo *info; char driver[128]; SDL_Surface *pscreen; SDL_Surface *pparent; SDL_Overlay *overlay; SDL_Rect drect; int status; Uint32 currtime; Uint32 lasttime; unsigned char *p = NULL; int hwaccel = 0; const char *videodevice = NULL; const char *mode = NULL; int format = V4L2_PIX_FMT_MJPEG; int i; int grabmethod = 1; int width = 320; int height = 240; int fps = 15; unsigned char frmrate = 0; char *avifilename = NULL; int queryformats = 0; int querycontrols = 0; int readconfigfile = 0; char *separateur; char *sizestring = NULL; char *fpsstring = NULL; int enableRawStreamCapture = 0; int enableRawFrameCapture = 0; format = V4L2_PIX_FMT_YUYV; videodevice = "/dev/video0"; printf("luvcview version %s \n", version); if (SDL_VideoDriverName(driver, sizeof(driver))) { printf("Video driver: %s\n", driver); } info = SDL_GetVideoInfo(); if (info->wm_available) { printf("A window manager is available\n"); } if (info->hw_available) { printf("Hardware surfaces are available (%dK video memory)\n", info->video_mem); SDL_VIDEO_Flags |= SDL_HWSURFACE; } if (info->blit_hw) { printf("Copy blits between hardware surfaces are accelerated\n"); SDL_VIDEO_Flags |= SDL_ASYNCBLIT; } if (info->blit_hw_CC) { printf ("Colorkey blits between hardware surfaces are accelerated\n"); } if (info->blit_hw_A) { printf("Alpha blits between hardware surfaces are accelerated\n"); } if (info->blit_sw) { printf ("Copy blits from software surfaces to hardware surfaces are accelerated\n"); } if (info->blit_sw_CC) { printf ("Colorkey blits from software surfaces to hardware surfaces are accelerated\n"); } if (info->blit_sw_A) { printf ("Alpha blits from software surfaces to hardware surfaces are accelerated\n"); } if (info->blit_fill) { printf("Color fills on hardware surfaces are accelerated\n"); } if (!(SDL_VIDEO_Flags & SDL_HWSURFACE)) SDL_VIDEO_Flags |= SDL_SWSURFACE; if (avifilename == NULL || *avifilename == 0) { avifilename = "video.avi"; } videoIn = (struct vdIn *) calloc(1, sizeof(struct vdIn)); if (init_videoIn (videoIn, (char *) videodevice, width, height, fps, format, grabmethod, avifilename) < 0) exit(1); pscreen = SDL_GetVideoSurface(); overlay = SDL_CreateYUVOverlay(videoIn->width, videoIn->height , SDL_YUY2_OVERLAY, pscreen); p = (unsigned char *) overlay->pixels[0]; drect.x = 0; drect.y = 0; drect.w = pscreen->w; drect.h = pscreen->h; initLut(); lasttime = SDL_GetTicks(); int loop = 0; /* main big loop */ while (videoIn->signalquit) { currtime = SDL_GetTicks(); if (currtime - lasttime > 0) { frmrate = 1000/(currtime - lasttime); } lasttime = currtime; if (uvcGrab(videoIn) < 0) { printf("Error grabbing \n"); break; } SDL_LockYUVOverlay(overlay); memcpy(p, videoIn->framebuffer, videoIn->width * (videoIn->height) * 2); SDL_UnlockYUVOverlay(overlay); SDL_DisplayYUVOverlay(overlay, &drect); if (loop > 35) { printf( "loop: %d\n", loop); break; } ++loop; SDL_Delay(10); } close_v4l2(videoIn); free(videoIn); freeLut(); printf(" Clean Up done Quit \n"); }
int queue_picture(VideoState *is, AVFrame *pFrame) { VideoPicture *vp; int dst_pix_fmt; AVPicture pict; /* wait until we have space for a new pic */ SDL_LockMutex(is->pictq_mutex); while(is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE && !is->quit) { SDL_CondWait(is->pictq_cond, is->pictq_mutex); } SDL_UnlockMutex(is->pictq_mutex); if(is->quit) return -1; // windex is set to 0 initially vp = &is->pictq[is->pictq_windex]; /* allocate or resize the buffer! */ if(!vp->bmp || vp->width != is->video_st->codec->width || vp->height != is->video_st->codec->height) { SDL_Event event; vp->allocated = 0; /* we have to do it in the main thread */ event.type = FF_ALLOC_EVENT; event.user.data1 = is; SDL_PushEvent(&event); /* wait until we have a picture allocated */ SDL_LockMutex(is->pictq_mutex); while(!vp->allocated && !is->quit) { SDL_CondWait(is->pictq_cond, is->pictq_mutex); } SDL_UnlockMutex(is->pictq_mutex); if(is->quit) { return -1; } } /* We have a place to put our picture on the queue */ if(vp->bmp) { SDL_LockYUVOverlay(vp->bmp); dst_pix_fmt = PIX_FMT_YUV420P; /* point pict at the queue */ pict.data[0] = vp->bmp->pixels[0]; pict.data[1] = vp->bmp->pixels[2]; pict.data[2] = vp->bmp->pixels[1]; pict.linesize[0] = vp->bmp->pitches[0]; pict.linesize[1] = vp->bmp->pitches[2]; pict.linesize[2] = vp->bmp->pitches[1]; // Convert the image into YUV format that SDL uses img_convert(&pict, dst_pix_fmt, (AVPicture *)pFrame, is->video_st->codec->pix_fmt, is->video_st->codec->width, is->video_st->codec->height); SDL_UnlockYUVOverlay(vp->bmp); /* now we inform our display thread that we have a pic ready */ if(++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE) { is->pictq_windex = 0; } SDL_LockMutex(is->pictq_mutex); is->pictq_size++; SDL_UnlockMutex(is->pictq_mutex); } return 0; }
static mrb_value mrb_sdl_video_unlock_yuv_overlay (mrb_state *mrb, mrb_value self) { mrb_value overlay = mrb_nil_value(); mrb_get_args(mrb, "|o", &overlay); SDL_UnlockYUVOverlay(mrb_value_to_sdl_overlay(mrb, overlay)); return mrb_nil_value(); }
static GF_Err SDL_Blit(GF_VideoOutput *dr, GF_VideoSurface *video_src, GF_Window *src_wnd, GF_Window *dst_wnd, u32 overlay_type) { SDLVID(); u32 amask = 0; u32 bpp, i; u8 *dst, *src; SDL_Rect srcrc, dstrc; SDL_Surface **pool; if (overlay_type) { if (!video_src) { if (ctx->yuv_overlay) { SDL_FreeYUVOverlay(ctx->yuv_overlay); ctx->yuv_overlay=NULL; } return GF_OK; } if (!ctx->yuv_overlay || (ctx->yuv_overlay->w != src_wnd->w) || (ctx->yuv_overlay->h != src_wnd->h) ) { if (ctx->yuv_overlay) SDL_FreeYUVOverlay(ctx->yuv_overlay); ctx->yuv_overlay = SDL_CreateYUVOverlay(src_wnd->w, src_wnd->h, SDL_YV12_OVERLAY, ctx->screen); if (!ctx->yuv_overlay) return GF_NOT_SUPPORTED; } /*copy pixels*/ SDL_LockYUVOverlay(ctx->yuv_overlay); copy_yuv(ctx->yuv_overlay->pixels[0], ctx->yuv_overlay->pixels[1], ctx->yuv_overlay->pixels[2], GF_PIXEL_YV12, ctx->yuv_overlay->pitches[0], video_src->video_buffer, video_src->pitch_y, video_src->pixel_format, video_src->width, video_src->height, src_wnd); SDL_UnlockYUVOverlay(ctx->yuv_overlay); dstrc.w = dst_wnd->w; dstrc.h = dst_wnd->h; dstrc.x = dst_wnd->x; dstrc.y = dst_wnd->y; SDL_DisplayYUVOverlay(ctx->yuv_overlay, &dstrc); return GF_OK; } /*SDL doesn't support stretching ...*/ if ((src_wnd->w != dst_wnd->w) || (src_wnd->h!=dst_wnd->h)) return GF_NOT_SUPPORTED; switch (video_src->pixel_format) { case GF_PIXEL_RGB_24: pool = &ctx->pool_rgb; bpp = 3; break; case GF_PIXEL_RGBA: pool = &ctx->pool_rgba; amask = 0xFF000000; bpp = 4; break; default: return GF_NOT_SUPPORTED; } if (! *pool || ((*pool)->w < (int) src_wnd->w) || ((*pool)->h < (int) src_wnd->h) ) { if ((*pool)) SDL_FreeSurface((*pool)); (*pool) = SDL_CreateRGBSurface(ctx->use_systems_memory ? SDL_SWSURFACE : SDL_HWSURFACE, src_wnd->w, src_wnd->h, 8*bpp, 0x000000FF, 0x0000FF00, 0x00FF0000, amask); if (! (*pool) ) return GF_IO_ERR; } SDL_LockSurface(*pool); dst = (u8 *) ( (*pool)->pixels); src = video_src->video_buffer + video_src->pitch_y*src_wnd->y + src_wnd->x*bpp; for (i=0; i<src_wnd->h; i++) { memcpy(dst, src, bpp * src_wnd->w); src += video_src->pitch_y; dst += (*pool)->pitch; } SDL_UnlockSurface(*pool); srcrc.w = src_wnd->w; srcrc.h = src_wnd->h; srcrc.x = 0; srcrc.y = 0; dstrc.w = dst_wnd->w; dstrc.h = dst_wnd->h; dstrc.x = dst_wnd->x; dstrc.y = dst_wnd->y; SDL_BlitSurface(*pool, &srcrc, ctx->back_buffer, &dstrc); return GF_OK; }
int queue_picture(VideoState *is, AVFrame *pFrame, double pts) { VideoPicture *vp; int dst_pix_fmt; AVPicture pict; /* wait until we have space for a new pic */ SDL_LockMutex(is->pictq_mutex); while(is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE && !is->quit) { SDL_CondWait(is->pictq_cond, is->pictq_mutex); } SDL_UnlockMutex(is->pictq_mutex); if(is->quit) return -1; // windex is set to 0 initially vp = &is->pictq[is->pictq_windex]; // allocate or resize the buffer! if(!vp->bmp || vp->width != is->video_ctx->width || vp->height != is->video_ctx->height) { SDL_Event event; vp->allocated = 0; // Send event to the main thread so we can allocate the picture event.type = FF_ALLOC_EVENT; event.user.data1 = is; SDL_PushEvent(&event); // wait until we have a picture allocated SDL_LockMutex(is->pictq_mutex); while(!vp->allocated && !is->quit) { SDL_CondWait(is->pictq_cond, is->pictq_mutex); } SDL_UnlockMutex(is->pictq_mutex); if(is->quit) { return -1; } } if(vp->bmp) { SDL_LockYUVOverlay(vp->bmp); dst_pix_fmt = PIX_FMT_YUV420P; // point pict at the queue pict.data[0] = vp->bmp->pixels[0]; pict.data[1] = vp->bmp->pixels[2]; pict.data[2] = vp->bmp->pixels[1]; pict.linesize[0] = vp->bmp->pitches[0]; pict.linesize[1] = vp->bmp->pitches[2]; pict.linesize[2] = vp->bmp->pitches[1]; // Convert the image into YUV format that SDL uses static struct SwsContext *img_convert_ctx; img_convert_ctx = sws_getContext(is->video_ctx->width, is->video_ctx->height, PIX_FMT_YUV420P, is->video_ctx->width, is->video_ctx->height, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL); sws_scale(img_convert_ctx, pFrame->data, pFrame->linesize, 0, is->video_ctx->height, pict.data, pict.linesize); SDL_UnlockYUVOverlay(vp->bmp); vp->pts = pts; /* now we inform our display thread that we have a pic ready */ if(++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE) { is->pictq_windex = 0; } SDL_LockMutex(is->pictq_mutex); is->pictq_size++; SDL_UnlockMutex(is->pictq_mutex); } return 0; }
int video_frame_queue(VideoState *video, AVFrame *pFrame) { AVPicture pict; VideoFrame *vf; int dstPixFmt; static struct SwsContext *imgConvertCtx; SDL_LockMutex(video->vFrameqMutex); while(video->vFrameqSize >= VIDEO_PICTURE_QUEUE_SIZE) { SDL_CondWait(video->vFrameqCond, video->vFrameqMutex); } SDL_UnlockMutex(video->vFrameqMutex); //vFrameqWindex is set to 0 by default vf = &video->vFrameq[video->vFrameqWindex]; // allocate or resize the buffer if(!vf->bmp || vf->width != video->videoStream->codec->width || vf->height != video->videoStream->codec->height) { SDL_Event event; vf->allocated = 0; event.type = FF_ALLOC_EVENT; event.user.data1 = video; SDL_PushEvent(&event); SDL_LockMutex(video->vFrameqMutex); //wait for frame allocated while(!vf->allocated) { SDL_CondWait(video->vFrameqCond, video->vFrameqMutex); } SDL_UnlockMutex(video->vFrameqMutex); } //put our pict on the queue if(vf->bmp) { SDL_LockYUVOverlay(vf->bmp); dstPixFmt = PIX_FMT_YUV420P; pict.data[0] = vf->bmp->pixels[0]; pict.data[1] = vf->bmp->pixels[2]; pict.data[2] = vf->bmp->pixels[1]; pict.linesize[0] = vf->bmp->pitches[0]; pict.linesize[1] = vf->bmp->pitches[2]; pict.linesize[2] = vf->bmp->pitches[1]; if(imgConvertCtx == NULL) { int w = video->videoStream->codec->width; int h = video->videoStream->codec->height; imgConvertCtx = sws_getContext(w, h, video->videoStream->codec->pix_fmt, w, h, dstPixFmt, SWS_BICUBIC, NULL, NULL, NULL); if(imgConvertCtx == NULL) { fprintf(stderr, "Cannot init the conversion context!\n"); exit(-1); } } sws_scale(imgConvertCtx, pFrame->data, pFrame->linesize, 0, video->videoStream->codec->height, pict.data, pict.linesize); SDL_UnlockYUVOverlay(vf->bmp); //vp->pts = pts; if(++video->vFrameqWindex == VIDEO_PICTURE_QUEUE_SIZE) { video->vFrameqWindex = 0; } SDL_LockMutex(video->vFrameqMutex); video->vFrameqSize++; SDL_UnlockMutex(video->vFrameqMutex); } return 0; }
int main( int argc, char **argv ) { unicap_handle_t handle; unicap_device_t device; unicap_format_t format_spec; unicap_format_t format; unicap_data_buffer_t buffer; unicap_data_buffer_t *returned_buffer; int width, height; int i; SDL_Surface *screen; SDL_Overlay *overlay; int quit=0; int imgcnt = 0; printf( "select video device\n" ); for( i = 0; SUCCESS( unicap_enumerate_devices( NULL, &device, i ) ); i++ ) { printf( "%i: %s\n", i, device.identifier ); } if( --i > 0 ) { printf( "Select video capture device: " ); scanf( "%d", &i ); } if( !SUCCESS( unicap_enumerate_devices( NULL, &device, i ) ) ) { fprintf( stderr, "Failed to get info for device '%s'\n", device.identifier ); exit( 1 ); } /* Acquire a handle to this device */ if( !SUCCESS( unicap_open( &handle, &device ) ) ) { fprintf( stderr, "Failed to open device: %s\n", device.identifier ); exit( 1 ); } printf( "Opened video capture device: %s\n", device.identifier ); /* Create a format specification to limit the list of formats returned by unicap_enumerate_formats to the ones with the color format 'UYVY' */ unicap_void_format( &format_spec ); format_spec.fourcc = FOURCC('U','Y','V','Y'); /* Get the list of video formats of the colorformat UYVY */ for( i = 0; SUCCESS( unicap_enumerate_formats( handle, &format_spec, &format, i ) ); i++ ) { printf( "%d: %s [%dx%d]\n", i, format.identifier, format.size.width, format.size.height ); } if( --i > 0 ) { printf( "Select video format: " ); scanf( "%d", &i ); } if( !SUCCESS( unicap_enumerate_formats( handle, &format_spec, &format, i ) ) ) { fprintf( stderr, "Failed to get video format\n" ); exit( 1 ); } /* If a video format has more than one size, ask for which size to use */ if( format.size_count ) { for( i = 0; i < format.size_count; i++ ) { printf( "%d: %dx%d\n", i, format.sizes[i].width, format.sizes[i].height ); } do { printf( "Select video format size: " ); scanf( "%d", &i ); }while( ( i < 0 ) && ( i > format.size_count ) ); format.size.width = format.sizes[i].width; format.size.height = format.sizes[i].height; } /* Set this video format */ if( !SUCCESS( unicap_set_format( handle, &format ) ) ) { fprintf( stderr, "Failed to set video format\n" ); exit( 1 ); } /* Initialize the image buffer */ memset( &buffer, 0x0, sizeof( unicap_data_buffer_t ) ); /** Init SDL & SDL_Overlay **/ if ( SDL_Init(SDL_INIT_VIDEO) < 0 ) { fprintf(stderr, "Failed to initialize SDL: %s\n", SDL_GetError()); exit(1); } atexit(SDL_Quit); /* Make sure the video window does not get too big. */ width = MIN( format.size.width, 800 ); height = MIN( format.size.height, 600 ); screen = SDL_SetVideoMode( width, height, 32, SDL_HWSURFACE); if ( screen == NULL ) { fprintf(stderr, "Unable to set video mode: %s\n", SDL_GetError()); exit(1); } overlay = SDL_CreateYUVOverlay( format.size.width, format.size.height, SDL_UYVY_OVERLAY, screen ); if( overlay == NULL ) { fprintf( stderr, "Unable to create overlay: %s\n", SDL_GetError() ); exit( 1 ); } /* Pass the pointer to the overlay to the unicap data buffer. */ buffer.data = overlay->pixels[0]; buffer.buffer_size = format.size.width * format.size.height * format.bpp / 8; /* Start the capture process on the device */ if( !SUCCESS( unicap_start_capture( handle ) ) ) { fprintf( stderr, "Failed to start capture on device: %s\n", device.identifier ); exit( 1 ); } while( !quit ) { SDL_Rect rect; SDL_Event event; rect.x = 0; rect.y = 0; rect.w = width; rect.h = height; /* Queue the buffer The buffer now gets filled with image data by the capture device */ if( !SUCCESS( unicap_queue_buffer( handle, &buffer ) ) ) { fprintf( stderr, "Failed to queue a buffer on device: %s\n", device.identifier ); exit( 1 ); } /* Wait until the image buffer is ready */ if( !SUCCESS( unicap_wait_buffer( handle, &returned_buffer ) ) ) { fprintf( stderr, "Failed to wait for buffer on device: %s\n", device.identifier ); } /* Display the video data */ SDL_UnlockYUVOverlay( overlay ); SDL_DisplayYUVOverlay( overlay, &rect ); while( SDL_PollEvent( &event ) ) { switch( event.type ) { case SDL_QUIT: quit = 1; break; case SDL_MOUSEBUTTONDOWN: { unsigned char *pixels; struct jpeg_compress_struct cinfo; struct jpeg_error_mgr jerr; FILE *outfile; JSAMPROW row_pointer[1]; int row_stride; char filename[128]; struct timeval t1, t2; unsigned long long usecs; sprintf( filename, "%04d.jpg", imgcnt++ ); cinfo.err = jpeg_std_error(&jerr); /* Now we can initialize the JPEG compression object. */ jpeg_create_compress(&cinfo); if ((outfile = fopen( filename, "wb" ) ) == NULL ) { fprintf(stderr, "can't open %s\n", "file"); exit(1); } jpeg_stdio_dest(&cinfo, outfile); cinfo.image_width = format.size.width; /* image width and height, in pixels */ cinfo.image_height = format.size.height; cinfo.input_components = 3; /* # of color components per pixel */ cinfo.in_color_space = JCS_RGB; /* colorspace of input image */ jpeg_set_defaults(&cinfo); pixels = malloc( format.size.width * format.size.height * 3 ); uyvy2rgb24( pixels, returned_buffer->data, format.size.width * format.size.height * 3, format.size.width * format.size.height * 2 ); gettimeofday( &t1, NULL ); jpeg_start_compress(&cinfo, TRUE); while( cinfo.next_scanline < cinfo.image_height ) { row_pointer[0] = &pixels[cinfo.next_scanline * format.size.width * 3 ]; (void) jpeg_write_scanlines(&cinfo, row_pointer, 1); } jpeg_finish_compress(&cinfo); gettimeofday( &t2, NULL ); usecs = t2.tv_sec * 1000000LL + t2.tv_usec; usecs -= ( t1.tv_sec * 1000000LL + t1.tv_usec ); printf( "Compression took: %lld usec\n", usecs ); /* After finish_compress, we can close the output file. */ fclose(outfile); jpeg_destroy_compress(&cinfo); free( pixels ); } break; default: break; } } SDL_LockYUVOverlay(overlay); } /* Stop the device */ if( !SUCCESS( unicap_stop_capture( handle ) ) ) { fprintf( stderr, "Failed to stop capture on device: %s\n", device.identifier ); } /* Close the device This invalidates the handle */ if( !SUCCESS( unicap_close( handle ) ) ) { fprintf( stderr, "Failed to close the device: %s\n", device.identifier ); } SDL_Quit(); return 0; }
int queue_picture(VideoState *is, AVFrame *pFrame, double pts) { VideoPicture *vp; int dst_pix_fmt; AVPicture pict; /* wait until we have space for a new pic */ SDL_LockMutex(is->pictq_mutex); while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE && !is->quit) { SDL_CondWait(is->pictq_cond, is->pictq_mutex); } SDL_UnlockMutex(is->pictq_mutex); if (is->quit) return -1; // windex is set to 0 initially vp = &is->pictq[is->pictq_windex]; /* allocate or resize the buffer! */ if (!vp->bmp || vp->width != is->video_ctx->width || vp->height != is->video_ctx->height) { SDL_Event event; vp->allocated = 0; alloc_picture(is); if (is->quit) { return -1; } } /* We have a place to put our picture on the queue */ if (vp->bmp) { SDL_LockYUVOverlay(vp->bmp); vp->pts = pts; dst_pix_fmt = PIX_FMT_YUV420P; /* point pict at the queue */ pict.data[0] = vp->bmp->pixels[0]; pict.data[1] = vp->bmp->pixels[2]; pict.data[2] = vp->bmp->pixels[1]; pict.linesize[0] = vp->bmp->pitches[0]; pict.linesize[1] = vp->bmp->pitches[2]; pict.linesize[2] = vp->bmp->pitches[1]; // Convert the image into YUV format that SDL uses sws_scale(is->sws_ctx, (uint8_t const * const *)pFrame->data, pFrame->linesize, 0, is->video_ctx->height, pict.data, pict.linesize); SDL_UnlockYUVOverlay(vp->bmp); /* now we inform our display thread that we have a pic ready */ if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE) { is->pictq_windex = 0; } SDL_LockMutex(is->pictq_mutex); is->pictq_size++; SDL_UnlockMutex(is->pictq_mutex); } return 0; }
int main(int argc, char *argv[]) { AVFormatContext *pFormatCtx; int i, videoStream; AVCodecContext *pCodecCtx; AVCodec *pCodec; AVFrame *pFrame; AVPacket packet; int frameFinished; float aspect_ratio; SDL_Overlay *bmp; SDL_Surface *screen; SDL_Rect rect; SDL_Event event; if(argc < 2) { fprintf(stderr, "Usage: test <file>\n"); exit(1); } // Register all formats and codecs av_register_all(); if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) { fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError()); exit(1); } // Open video file if(av_open_input_file(&pFormatCtx, argv[1], NULL, 0, NULL)!=0) return -1; // Couldn't open file // Retrieve stream information if(av_find_stream_info(pFormatCtx)<0) return -1; // Couldn't find stream information // Dump information about file onto standard error dump_format(pFormatCtx, 0, argv[1], 0); // Find the first video stream videoStream=-1; for(i=0; i<pFormatCtx->nb_streams; i++) if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_VIDEO) { videoStream=i; break; } if(videoStream==-1) return -1; // Didn't find a video stream // Get a pointer to the codec context for the video stream pCodecCtx=pFormatCtx->streams[videoStream]->codec; // Find the decoder for the video stream pCodec=avcodec_find_decoder(pCodecCtx->codec_id); if(pCodec==NULL) { fprintf(stderr, "Unsupported codec!\n"); return -1; // Codec not found } // Open codec if(avcodec_open(pCodecCtx, pCodec)<0) return -1; // Could not open codec // Allocate video frame pFrame=avcodec_alloc_frame(); // Make a screen to put our video #ifndef __DARWIN__ screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 0, 0); #else screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 24, 0); #endif if(!screen) { fprintf(stderr, "SDL: could not set video mode - exiting\n"); exit(1); } // Allocate a place to put our YUV image on that screen bmp = SDL_CreateYUVOverlay(pCodecCtx->width, pCodecCtx->height, SDL_YV12_OVERLAY, screen); // Read frames and save first five frames to disk i=0; while(av_read_frame(pFormatCtx, &packet)>=0) { // Is this a packet from the video stream? if(packet.stream_index==videoStream) { // Decode video frame avcodec_decode_video(pCodecCtx, pFrame, &frameFinished, packet.data, packet.size); // Did we get a video frame? if(frameFinished) { SDL_LockYUVOverlay(bmp); AVPicture pict; pict.data[0] = bmp->pixels[0]; pict.data[1] = bmp->pixels[2]; pict.data[2] = bmp->pixels[1]; pict.linesize[0] = bmp->pitches[0]; pict.linesize[1] = bmp->pitches[2]; pict.linesize[2] = bmp->pitches[1]; // Convert the image into YUV format that SDL uses img_convert(&pict, PIX_FMT_YUV420P, (AVPicture *)pFrame, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height); SDL_UnlockYUVOverlay(bmp); rect.x = 0; rect.y = 0; rect.w = pCodecCtx->width; rect.h = pCodecCtx->height; SDL_DisplayYUVOverlay(bmp, &rect); } } // Free the packet that was allocated by av_read_frame av_free_packet(&packet); SDL_PollEvent(&event); switch(event.type) { case SDL_QUIT: SDL_Quit(); exit(0); break; default: break; } } // Free the YUV frame av_free(pFrame); // Close the codec avcodec_close(pCodecCtx); // Close the video file av_close_input_file(pFormatCtx); return 0; }
/** * Display a video frame * * @param st Video display state * @param title Window title * @param frame Video frame * * @return 0 if success, otherwise errorcode * * @note: On Darwin, this must be called from the main thread */ static int display(struct vidisp_st *st, const char *title, const struct vidframe *frame) { SDL_Rect rect; if (!st || !sdl.open) return EINVAL; if (!vidsz_cmp(&sdl.size, &frame->size)) { if (sdl.size.w && sdl.size.h) { info("sdl: reset size %u x %u ---> %u x %u\n", sdl.size.w, sdl.size.h, frame->size.w, frame->size.h); } sdl_reset(); } if (!sdl.screen) { int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL; char capt[256]; if (sdl.fullscreen) flags |= SDL_FULLSCREEN; else if (sdl.resizeh) flags |= SDL_RESIZABLE; if (title) { re_snprintf(capt, sizeof(capt), "%s - %u x %u", title, frame->size.w, frame->size.h); } else { re_snprintf(capt, sizeof(capt), "%u x %u", frame->size.w, frame->size.h); } SDL_WM_SetCaption(capt, capt); sdl.screen = SDL_SetVideoMode(frame->size.w, frame->size.h, 0, flags); if (!sdl.screen) { warning("sdl: unable to get video screen: %s\n", SDL_GetError()); return ENODEV; } sdl.size = frame->size; } if (!sdl.bmp) { sdl.bmp = SDL_CreateYUVOverlay(frame->size.w, frame->size.h, SDL_YV12_OVERLAY, sdl.screen); if (!sdl.bmp) { warning("sdl: unable to create overlay: %s\n", SDL_GetError()); return ENODEV; } } SDL_LockYUVOverlay(sdl.bmp); picture_copy(sdl.bmp->pixels, sdl.bmp->pitches, frame); SDL_UnlockYUVOverlay(sdl.bmp); rect.x = 0; rect.y = 0; rect.w = sdl.size.w; rect.h = sdl.size.h; SDL_DisplayYUVOverlay(sdl.bmp, &rect); return 0; }
static MMAL_BOOL_T sdl_do_processing(MMAL_COMPONENT_T *component) { MMAL_PORT_T *port = component->input[0]; MMAL_COMPONENT_MODULE_T *module = component->priv->module; unsigned int width = port->format->es->video.width; unsigned int height = port->format->es->video.height; MMAL_BUFFER_HEADER_T *buffer; uint8_t *src_plane[3]; uint32_t *src_pitch; unsigned int i, line; MMAL_BOOL_T eos; SDL_Rect rect; buffer = mmal_queue_get(module->queue); if (!buffer) return 0; eos = buffer->flags & MMAL_BUFFER_HEADER_FLAG_EOS; /* Handle event buffers */ if (buffer->cmd) { MMAL_EVENT_FORMAT_CHANGED_T *event = mmal_event_format_changed_get(buffer); if (event) { mmal_format_copy(port->format, event->format); module->status = port->priv->pf_set_format(port); if (module->status != MMAL_SUCCESS) { LOG_ERROR("format not set on port %p", port); if (mmal_event_error_send(port->component, module->status) != MMAL_SUCCESS) LOG_ERROR("unable to send an error event buffer"); } } else { LOG_ERROR("discarding event %i on port %p", (int)buffer->cmd, port); } buffer->length = 0; mmal_port_buffer_header_callback(port, buffer); return 1; } if (module->status != MMAL_SUCCESS) return 1; /* Ignore empty buffers */ if (!buffer->length) goto end; // FIXME: sanity check the size of the buffer /* Blit the buffer onto the overlay. */ src_pitch = buffer->type->video.pitch; src_plane[0] = buffer->data + buffer->type->video.offset[0]; src_plane[1] = buffer->data + buffer->type->video.offset[2]; src_plane[2] = buffer->data + buffer->type->video.offset[1]; SDL_LockYUVOverlay(module->sdl_overlay); for (i=0; i<3; i++) { uint8_t *src = src_plane[i]; uint8_t *dst = module->sdl_overlay->pixels[i]; if(i == 1) {width /= 2; height /= 2;} for(line = 0; line < height; line++) { memcpy(dst, src, width); src += src_pitch[i]; dst += module->sdl_overlay->pitches[i]; } } SDL_UnlockYUVOverlay(module->sdl_overlay); width = port->format->es->video.width; height = port->format->es->video.height; rect.x = module->display_region.x; rect.w = module->display_region.width; height = rect.w * height / width; rect.y = module->display_region.y + (module->display_region.height - height) / 2; rect.h = height; SDL_DisplayYUVOverlay(module->sdl_overlay, &rect); end: buffer->offset = buffer->length = 0; mmal_port_buffer_header_callback(port, buffer); /* Generate EOS events */ if (eos) mmal_event_eos_send(port); return 1; }
static OMX_ERRORTYPE sdlivr_proc_render_buffer (const sdlivr_prc_t *ap_prc, OMX_BUFFERHEADERTYPE * p_hdr) { assert (ap_prc); if (ap_prc->p_overlay) { /* AVPicture pict; */ SDL_Rect rect; uint8_t *y; uint8_t *u; uint8_t *v; unsigned int bytes; int pitch0, pitch1; /* align pitch on 16-pixel boundary. */ pitch0 = (ap_prc->vportdef_.nFrameWidth + 15) & ~15; pitch1 = pitch0 / 2; /* hard-coded to be YUV420 plannar */ y = p_hdr->pBuffer; u = y + pitch0 * ap_prc->vportdef_.nFrameHeight; v = u + pitch1 * ap_prc->vportdef_.nFrameHeight / 2; SDL_LockYUVOverlay (ap_prc->p_overlay); if (ap_prc->p_overlay->pitches[0] != pitch0 || ap_prc->p_overlay->pitches[1] != pitch1 || ap_prc->p_overlay->pitches[2] != pitch1) { int hh; uint8_t *y2; uint8_t *u2; uint8_t *v2; y2 = ap_prc->p_overlay->pixels[0]; u2 = ap_prc->p_overlay->pixels[2]; v2 = ap_prc->p_overlay->pixels[1]; for (hh = 0; hh < ap_prc->vportdef_.nFrameHeight; hh++) { memcpy (y2, y, ap_prc->p_overlay->pitches[0]); y2 += ap_prc->p_overlay->pitches[0]; y += pitch0; } for (hh = 0; hh < ap_prc->vportdef_.nFrameHeight / 2; hh++) { memcpy (u2, u, ap_prc->p_overlay->pitches[2]); u2 += ap_prc->p_overlay->pitches[2]; u += pitch1; } for (hh = 0; hh < ap_prc->vportdef_.nFrameHeight / 2; hh++) { memcpy (v2, v, ap_prc->p_overlay->pitches[1]); v2 += ap_prc->p_overlay->pitches[1]; v += pitch1; } } else { bytes = pitch0 * ap_prc->vportdef_.nFrameHeight; memcpy (ap_prc->p_overlay->pixels[0], y, bytes); bytes = pitch1 * ap_prc->vportdef_.nFrameHeight / 2; memcpy (ap_prc->p_overlay->pixels[2], u, bytes); bytes = pitch1 * ap_prc->vportdef_.nFrameHeight / 2; memcpy (ap_prc->p_overlay->pixels[1], v, bytes); } SDL_UnlockYUVOverlay (ap_prc->p_overlay); rect.x = 0; rect.y = 0; rect.w = ap_prc->vportdef_.nFrameWidth; rect.h = ap_prc->vportdef_.nFrameHeight; SDL_DisplayYUVOverlay (ap_prc->p_overlay, &rect); } p_hdr->nFilledLen = 0; return OMX_ErrorNone; }
int queue_picture(VideoState *is, AVFrame *pFrame, double pts) { VideoPicture *vp; AVPicture pict; /* wait until we have space for a new pic */ SDL_LockMutex(is->pictq_mutex); while(is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE && !is->quit) { SDL_CondWait(is->pictq_cond, is->pictq_mutex); } SDL_UnlockMutex(is->pictq_mutex); if(is->quit) { return -1; } // windex is set to 0 initially vp = &is->pictq[is->pictq_windex]; /* allocate or resize the buffer! */ if(!vp->bmp || vp->width != is->video_st->codec->width || vp->height != is->video_st->codec->height) { SDL_Event event; vp->allocated = 0; /* we have to do it in the main thread */ event.type = FF_ALLOC_EVENT; event.user.data1 = is; SDL_PushEvent(&event); /* wait until we have a picture allocated */ SDL_LockMutex(is->pictq_mutex); while(!vp->allocated && !is->quit) { SDL_CondWait(is->pictq_cond, is->pictq_mutex); } SDL_UnlockMutex(is->pictq_mutex); if(is->quit) { return -1; } } /* We have a place to put our picture on the queue */ /* If we are skipping a frame, do we set this to null but still return vp->allocated = 1? */ if(vp->bmp) { SDL_LockYUVOverlay(vp->bmp); /* point pict at the queue */ pict.data[0] = vp->bmp->pixels[0]; pict.data[1] = vp->bmp->pixels[2]; pict.data[2] = vp->bmp->pixels[1]; pict.linesize[0] = vp->bmp->pitches[0]; pict.linesize[1] = vp->bmp->pitches[2]; pict.linesize[2] = vp->bmp->pitches[1]; // Convert the image into YUV format that SDL uses sws_scale ( is->sws_ctx, (uint8_t const * const *)pFrame->data, pFrame->linesize, 0, is->video_st->codec->height, pict.data, pict.linesize ); SDL_UnlockYUVOverlay(vp->bmp); vp->pts = pts; /* now we inform our display thread that we have a pic ready */ if(++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE) { is->pictq_windex = 0; } SDL_LockMutex(is->pictq_mutex); is->pictq_size++; SDL_UnlockMutex(is->pictq_mutex); } return 0; }
int main(int argc, char *argv[]) { int ret; AVPacket packet; AVFrame frame; int got_frame; avcodec_register_all(); av_register_all(); avfilter_register_all(); if ((ret = open_input_file("cuc_ieschool.flv")) < 0) goto end; if ((ret = init_filters(filter_descr)) < 0) goto end; #if ENABLE_YUVFILE FILE *fp_yuv = fopen("test.yuv", "wb+"); #endif #if ENABLE_SDL SDL_Surface *screen; SDL_Overlay *bmp; SDL_Rect rect; if (SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) { printf( "Could not initialize SDL - %s\n", SDL_GetError()); return -1; } screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 0, 0); if (!screen) { printf("SDL: could not set video mode - exiting\n"); return -1; } bmp = SDL_CreateYUVOverlay(pCodecCtx->width, pCodecCtx->height, SDL_YV12_OVERLAY, screen); SDL_WM_SetCaption("Simplest FFmpeg Video Filter", NULL); #endif /* read all packets */ while (1) { AVFilterBufferRef *picref; if ((ret = av_read_frame(pFormatCtx, &packet)) < 0) break; if (packet.stream_index == video_stream_index) { avcodec_get_frame_defaults(&frame); got_frame = 0; ret = avcodec_decode_video2(pCodecCtx, &frame, &got_frame, &packet); if (ret < 0) { printf( "Error decoding video\n"); break; } if (got_frame) { frame.pts = av_frame_get_best_effort_timestamp(&frame); /* push the decoded frame into the filtergraph */ if (av_buffersrc_add_frame(buffersrc_ctx, &frame) < 0) { printf( "Error while feeding the filtergraph\n"); break; } /* pull filtered pictures from the filtergraph */ while (1) { ret = av_buffersink_get_buffer_ref(buffersink_ctx, &picref, 0); if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) break; if (ret < 0) goto end; if (picref) { #if ENABLE_YUVFILE int y_size = picref->video->w * picref->video->h; fwrite(picref->data[0], 1, y_size, fp_yuv); //Y fwrite(picref->data[1], 1, y_size / 4, fp_yuv); //U fwrite(picref->data[2], 1, y_size / 4, fp_yuv); //V #endif #if ENABLE_SDL SDL_LockYUVOverlay(bmp); int y_size = picref->video->w * picref->video->h; memcpy(bmp->pixels[0], picref->data[0], y_size); //Y memcpy(bmp->pixels[2], picref->data[1], y_size / 4); //U memcpy(bmp->pixels[1], picref->data[2], y_size / 4); //V bmp->pitches[0] = picref->linesize[0]; bmp->pitches[2] = picref->linesize[1]; bmp->pitches[1] = picref->linesize[2]; SDL_UnlockYUVOverlay(bmp); rect.x = 0; rect.y = 0; rect.w = picref->video->w; rect.h = picref->video->h; SDL_DisplayYUVOverlay(bmp, &rect); //Delay 40ms SDL_Delay(40); #endif avfilter_unref_bufferp(&picref); } } } } av_free_packet(&packet); } #if ENABLE_YUVFILE fclose(fp_yuv); #endif end: avfilter_graph_free(&filter_graph); if (pCodecCtx) avcodec_close(pCodecCtx); avformat_close_input(&pFormatCtx); if (ret < 0 && ret != AVERROR_EOF) { char buf[1024]; av_strerror(ret, buf, sizeof(buf)); printf("Error occurred: %s\n", buf); return -1; } return 0; }