Exemplo n.º 1
0
 void run() {
     std::cout << "[DIS] Initializing Video Display System.\n";
     pCodecCtx = media->decode->getpCodecCtx();
     pFrame = media->decode->getpFrame();
     pFrame_BGR24 = media->opencv->getpFrame_BGR24();
     // 2.1.1. Prepare format conversion for diplaying with SDL
     // Allocate an AVFrame structure
     pFrame_YUV420P = avcodec_alloc_frame();
     if (pFrame_YUV420P == NULL) {
         std::cout << "[DIS] Could not allocate pFrame_YUV420P\n";
         exit(1);
     }
     // Determine required buffer size and allocate buffer
     buffer_YUV420P = (uint8_t *) malloc(avpicture_get_size(PIX_FMT_YUV420P,
             pCodecCtx->width, pCodecCtx->height));
     // Assign buffer to image planes
     avpicture_fill((AVPicture *) pFrame_YUV420P, buffer_YUV420P,
             PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height);
     // format conversion context
     pConvertCtx_YUV420P = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt,
             pCodecCtx->width, pCodecCtx->height, PIX_FMT_YUV420P,
             SWS_SPLINE, NULL, NULL, NULL);
     // 3.1.1 prepare SDL for YUV
     // allocate window, renderer, texture
     pWindow1 = SDL_CreateWindow("YUV", 0, 0, pCodecCtx->width, pCodecCtx->height, SDL_WINDOW_SHOWN);
     pRenderer1 = SDL_CreateRenderer(pWindow1, -1, SDL_RENDERER_ACCELERATED);
     bmpTex1 = SDL_CreateTexture(pRenderer1, SDL_PIXELFORMAT_YV12,
             SDL_TEXTUREACCESS_STREAMING, pCodecCtx->width, pCodecCtx->height);
     size1 = pCodecCtx->width * pCodecCtx->height;
     if (pWindow1 == NULL | pRenderer1 == NULL | bmpTex1 == NULL) {
         std::cout << "[DIS] Could not open window1: " << SDL_GetError() << std::endl;
         exit(1);
     }
     // 3.2.1 prepare SDL for BGR
     // allocate window, renderer, texture
     pWindow2 = SDL_CreateWindow("BGR", pCodecCtx->width + 5, 0, pCodecCtx->width, pCodecCtx->height, SDL_WINDOW_SHOWN);
     pRenderer2 = SDL_CreateRenderer(pWindow2, -1, SDL_RENDERER_ACCELERATED);
     bmpTex2 = SDL_CreateTexture(pRenderer2, SDL_PIXELFORMAT_BGR24,
             SDL_TEXTUREACCESS_STREAMING, pCodecCtx->width, pCodecCtx->height);
     size2 = pCodecCtx->width * pCodecCtx->height * 3;
     if (pWindow2 == NULL | pRenderer2 == NULL | bmpTex2 == NULL) {
         std::cout << "[DIS] Could not open window2: " << SDL_GetError() << std::endl;
         exit(1);
     }
     // 1.6. get video frames
     while (!terminated) {
         if (media->decode->frameDecoded) {
             // 2.1.2. convert frame to YUV for Displaying
             sws_scale(pConvertCtx_YUV420P, (const uint8_t * const*) pFrame->data, pFrame->linesize, 0,
                     pCodecCtx->height, pFrame_YUV420P->data, pFrame_YUV420P->linesize);
             // 3.1.2. copy converted YUV to SDL 2.0 texture
             SDL_LockTexture(bmpTex1, NULL, (void **) &pixels1, &pitch1);
             memcpy(pixels1, pFrame_YUV420P->data[0], size1);
             memcpy(pixels1 + size1, pFrame_YUV420P->data[2], size1 / 4);
             memcpy(pixels1 + size1 * 5 / 4, pFrame_YUV420P->data[1], size1 / 4);
             SDL_UnlockTexture(bmpTex1);
             SDL_UpdateTexture(bmpTex1, NULL, pixels1, pitch1);
             // refresh screen
             SDL_RenderClear(pRenderer1);
             SDL_RenderCopy(pRenderer1, bmpTex1, NULL, NULL);
             SDL_RenderPresent(pRenderer1);
             // 3.2.2. copy converted BGR to SDL 2.0 texture
             SDL_LockTexture(bmpTex2, NULL, (void **) &pixels2, &pitch2);
             memcpy(pixels2, pFrame_BGR24->data[0], size2);
             SDL_UnlockTexture(bmpTex2);
             SDL_UpdateTexture(bmpTex2, NULL, pixels2, pitch2);
             // refresh screen
             SDL_RenderClear(pRenderer2);
             SDL_RenderCopy(pRenderer2, bmpTex2, NULL, NULL);
             SDL_RenderPresent(pRenderer2);
         }
         // process user input
         SDL_PollEvent(&event);
         switch (event.type) {
             case SDL_KEYDOWN:
                 // check for the keyboard state
                 switch (event.key.keysym.sym) {
                         // quit
                     case SDLK_ESCAPE:
                         media->display->terminated = true;
                         break;
                         // reset emergency
                     case SDLK_r:
                         media->command->flag_reset = true;
                         break;
                         // decode qr code from image
                     case SDLK_g:
                         media->opencv->save_image = true;
                         break;
                     case SDLK_h:
                         media->opencv->send_data = true;
                         break;
                         // trim sensors
                     case SDLK_BACKSPACE:
                         media->command->flag_trim = true;
                         break;
                         // land
                     case SDLK_SPACE:
                         media->command->flag_land = true;
                         break;
                         // toggle absolute control mode
                     case SDLK_t:
                         media->command->flag_absl = !(media->command->flag_absl);
                         break;
                         // toggle combined yaw mode
                     case SDLK_y:
                         media->command->flag_cYaw = !(media->command->flag_cYaw);
                         break;
                         // calibrate magnetometer
                     case SDLK_c:
                         media->command->flag_mgnt = true;
                         break;
                         // take off
                     case SDLK_RETURN:
                         media->command->flag_takeoff = true;
                         break;
                     default:
                         break;
                 }
                 break;
         }
         // check for the keyboard state for moving keys
         Uint8 *state = SDL_GetKeyboardState(NULL);
         if (state[SDL_SCANCODE_I] || state[SDL_SCANCODE_J] ||
                 state[SDL_SCANCODE_K] || state[SDL_SCANCODE_L] ||
                 state[SDL_SCANCODE_W] || state[SDL_SCANCODE_S] ||
                 state[SDL_SCANCODE_A] || state[SDL_SCANCODE_D] ||
                 state[SDL_SCANCODE_LSHIFT] || state[SDL_SCANCODE_RSHIFT]
                 ) {
             float mult;
             float base = 0.015;
             if (state[SDL_SCANCODE_LSHIFT] || state[SDL_SCANCODE_RSHIFT]) {
                 mult = 6;
             } else {
                 mult = 1;
             }
             // W for ascend
             if (state[SDL_SCANCODE_W]) {
                 if (media->command->arg3 < 0)
                     media->command->arg3 = 0;
                 media->command->arg3 += 2 * mult*base;
                 if (media->command->arg3 > 1)
                     media->command->arg3 = 1;
             }
             // S for descend
             if (state[SDL_SCANCODE_S]) {
                 if (media->command->arg3 > 0)
                     media->command->arg3 = 0;
                 media->command->arg3 -= 2 * mult*base;
                 if (media->command->arg3 < -1)
                     media->command->arg3 = -1;
             }
             // D for move right
             if (state[SDL_SCANCODE_D]) {
                 if (media->command->arg1 < 0)
                     media->command->arg1 = 0;
                 media->command->arg1 += mult*base;
                 if (media->command->arg1 > 1)
                     media->command->arg1 = 1;
             }
             // A for move left
             if (state[SDL_SCANCODE_A]) {
                 if (media->command->arg1 > 0)
                     media->command->arg1 = 0;
                 media->command->arg1 -= mult*base;
                 if (media->command->arg1 < -1)
                     media->command->arg1 = -1;
             }
             // K for move backword
             if (state[SDL_SCANCODE_K]) {
                 if (media->command->arg2 < 0)
                     media->command->arg2 = 0;
                 media->command->arg2 += mult*base;
                 if (media->command->arg2 > 1)
                     media->command->arg2 = 1;
             }
             // I for move forward
             if (state[SDL_SCANCODE_I]) {
                 if (media->command->arg2 > 0)
                     media->command->arg2 = 0;
                 media->command->arg2 -= mult*base;
                 if (media->command->arg2 < -1)
                     media->command->arg2 = -1;
             }
             // L for turn right
             if (state[SDL_SCANCODE_L]) {
                 if (media->command->arg4 < 0)
                     media->command->arg4 = 0;
                 media->command->arg4 += mult*base;
                 if (media->command->arg4 > 1)
                     media->command->arg4 = 1;
             }
             // J for turn left
             if (state[SDL_SCANCODE_J]) {
                 if (media->command->arg4 > 0)
                     media->command->arg4 = 0;
                 media->command->arg4 -= mult*base;
                 if (media->command->arg4 < -1)
                     media->command->arg4 = -1;
             }
             media->command->flag_PCMD = true;
         }
         boost::this_thread::sleep(boost::posix_time::microseconds(300));
     }
     // release
     // *note SDL objects have to be freed before closing avcodec.
     // otherwise it causes segmentation fault for some reason.
     SDL_DestroyTexture(bmpTex1);
     SDL_DestroyTexture(bmpTex2);
     SDL_DestroyRenderer(pRenderer1);
     SDL_DestroyRenderer(pRenderer2);
     SDL_DestroyWindow(pWindow1);
     SDL_DestroyWindow(pWindow2);
     free(pFrame_YUV420P);
     free(buffer_YUV420P);
     sws_freeContext(pConvertCtx_YUV420P);
 }
Exemplo n.º 2
0
// use ffmpeg sws_scale, for cygwin win32 and android common
// not for IOS
status_t FFRender::render_sws(AVFrame* frame)
{
#ifdef OS_IOS
    return ERROR;
#else
    if (mConvertCtx == NULL || mSurfaceFrame == NULL) {
        if (mConvertCtx != NULL) {
            sws_freeContext(mConvertCtx);
            mConvertCtx = NULL;
        }
        if (mSurfaceFrame != NULL) {
            av_frame_free(&mSurfaceFrame);
        }
        //just do color format conversion
        //avoid doing scaling as it cost lots of cpu
        AVPixelFormat out_fmt;
#if defined(__CYGWIN__) || defined(_MSC_VER)
        out_fmt = AV_PIX_FMT_RGB32;
#else
#ifdef RENDER_RGB565
        out_fmt = PIX_FMT_RGB565;
#else
        out_fmt = AV_PIX_FMT_RGB0;
#endif
#endif
        mConvertCtx = sws_getContext(
                          frame->width, frame->height,
                          (AVPixelFormat)frame->format,
                          mFrameWidth, mFrameHeight,
                          out_fmt,
                          s_swsFlag, NULL, NULL, NULL);
        if (mConvertCtx == NULL) {
            LOGE("create convert ctx failed, width:%d, height:%d, pix:%d",
                 mFrameWidth,
                 mFrameHeight,
                 mFrameFormat);
            return ERROR;
        }
        LOGI("sws context created %dx%d %d->%d", mFrameWidth, mFrameHeight, mFrameFormat, AV_PIX_FMT_BGR24);

        mSurfaceFrame = av_frame_alloc();
        if (mSurfaceFrame == NULL) {
            LOGE("alloc frame failed");
            return ERROR;
        }

    }

    void* surfacePixels = NULL;
#ifdef __ANDROID__
    if (Surface_getPixels(mNativeWindow, &mSurfaceWidth, &mSurfaceHeight, &mSurfaceStride, &surfacePixels) != OK)
        return ERROR;
#else
    if (Surface_getPixels(&mSurfaceWidth, &mSurfaceHeight, &mSurfaceStride, &surfacePixels) != OK)
        return ERROR;
#endif

    // Convert the image
    int64_t begin_scale = getNowMs();
    if (mSurfaceStride >= mFrameWidth) {
        mSurfaceFrame->data[0] = (uint8_t*)surfacePixels;
#ifdef RENDER_RGB565
        mSurfaceFrame->linesize[0] = mSurfaceStride * 2;
#else
        mSurfaceFrame->linesize[0] = mSurfaceStride * 4;
#endif
        sws_scale(mConvertCtx,
                  frame->data,
                  frame->linesize,
                  0,
                  frame->height,
                  mSurfaceFrame->data,
                  mSurfaceFrame->linesize);
        LOGD("sws_scale frame width:%d", mFrameWidth);
        LOGD("sws_scale frame height:%d", mFrameHeight);
        LOGD("sws_scale surface width:%d", mSurfaceWidth);
        LOGD("sws_scale surface height:%d", mSurfaceHeight);
        LOGD("sws_scale surface stride:%d", mSurfaceStride);
    }
    else {
        LOGE("Surface memory is too small");
    }

    LOGD("before rendering frame");
#ifdef __ANDROID__
    if(Surface_updateSurface(mNativeWindow) != OK) {
#else
    if(Surface_updateSurface() != OK) {
#endif
        LOGE("Failed to render picture");
        return ERROR;
    }
    LOGD("after rendering frame");

    int64_t end_scale = getNowMs();
    int64_t costTime = end_scale-begin_scale;
    if(mAveScaleTimeMs == 0)
        mAveScaleTimeMs = costTime;
    else
        mAveScaleTimeMs = (mAveScaleTimeMs*4+costTime)/5;
    LOGD("sws scale picture cost %lld[ms]", costTime);
    LOGV("mAveScaleTimeMs %lld[ms]", mAveScaleTimeMs);

    //For debug
    /*
    char path[1024] = {0};
    static int num=0;
    num++;
    sprintf(path, "/mnt/sdcard/frame_rgb_%d", num);
    LOGD("mSurfaceFrame->linesize[0]:%d, mOptiSurfaceHeight:%d", mSurfaceFrame->linesize[0], mOptiSurfaceHeight);
    saveFrameRGB(mSurfaceFrame->data[0], mSurfaceFrame->linesize[0], mOptiSurfaceHeight, path);
    */
    return OK;
#endif
}

FFRender::~FFRender()
{
    if (mConvertCtx != NULL) {
        sws_freeContext(mConvertCtx);
        mConvertCtx = NULL;
    }
    if (mSurfaceFrame != NULL) {
        av_frame_free(&mSurfaceFrame);
    }
    if (mScalePixels != NULL) {
        free(mScalePixels);
        mScalePixels = NULL;
    }
    if (mScaleFrame != NULL) {
        av_frame_free(&mScaleFrame);
    }
#if defined(__CYGWIN__)
    //todo
#else
#ifdef __ANDROID__
    if (mNativeWindow)
        Surface_close(mNativeWindow);
#else
    Surface_close();
#endif
    mSurface = NULL;
#endif
    LOGD("FFRender destructor");
}

//For debug
void FFRender::saveFrameRGB(void* data, int stride, int height, char* path)
{
    if(path==NULL) return;

    FILE *pFile = NULL;
    LOGD("Start open file %s", path);
    pFile = fopen(path, "wb");
    if(pFile == NULL) {
        LOGE("open file %s failed", path);
        return;
    }
    LOGD("open file %s success", path);

    fwrite(data, 1, stride*height, pFile);
    fclose(pFile);
}

#if defined(__ANDROID__) && defined(__aarch64__)
// Convert I420 to ARGB.
static int I420ToABGR(const uint8_t* src_y, int src_stride_y,
                      const uint8_t* src_u, int src_stride_u,
                      const uint8_t* src_v, int src_stride_v,
                      uint8_t* dst_argb, int dst_stride_argb,
                      int width, int height)
{
    int y;
    for (y = 0; y < height; ++y) {
        libyuv::I422ToABGRRow_NEON(src_y, src_u, src_v, dst_argb, width);
        dst_argb += dst_stride_argb;
        src_y += src_stride_y;
        if (y & 1) {
            src_u += src_stride_u;
            src_v += src_stride_v;
        }
    }
    return 0;
}
Exemplo n.º 3
0
static int config(struct vf_instance *vf,
        int width, int height, int d_width, int d_height,
	unsigned int flags, unsigned int outfmt){
    unsigned int best=find_best_out(vf, outfmt);
    int vo_flags;
    int int_sws_flags=0;
    int round_w=0, round_h=0;
    int i;
    SwsFilter *srcFilter, *dstFilter;
    enum PixelFormat dfmt, sfmt;

    if(!best){
	mp_msg(MSGT_VFILTER,MSGL_WARN,"SwScale: no supported outfmt found :(\n");
	return 0;
    }
    sfmt = imgfmt2pixfmt(outfmt);
    if (outfmt == IMGFMT_RGB8 || outfmt == IMGFMT_BGR8) sfmt = PIX_FMT_PAL8;
    dfmt = imgfmt2pixfmt(best);

    vo_flags=vf->next->query_format(vf->next,best);

    // scaling to dwidth*d_height, if all these TRUE:
    // - option -zoom
    // - no other sw/hw up/down scaling avail.
    // - we're after postproc
    // - user didn't set w:h
    if(!(vo_flags&VFCAP_POSTPROC) && (flags&4) &&
	    vf->priv->w<0 && vf->priv->h<0){	// -zoom
	int x=(vo_flags&VFCAP_SWSCALE) ? 0 : 1;
	if(d_width<width || d_height<height){
	    // downscale!
	    if(vo_flags&VFCAP_HWSCALE_DOWN) x=0;
	} else {
	    // upscale:
	    if(vo_flags&VFCAP_HWSCALE_UP) x=0;
	}
	if(x){
	    // user wants sw scaling! (-zoom)
	    vf->priv->w=d_width;
	    vf->priv->h=d_height;
	}
    }

    if(vf->priv->noup){
        if((vf->priv->w > width) + (vf->priv->h > height) >= vf->priv->noup){
            vf->priv->w= width;
            vf->priv->h= height;
        }
    }

    if (vf->priv->w <= -8) {
      vf->priv->w += 8;
      round_w = 1;
    }
    if (vf->priv->h <= -8) {
      vf->priv->h += 8;
      round_h = 1;
    }

    if (vf->priv->w < -3 || vf->priv->h < -3 ||
         (vf->priv->w < -1 && vf->priv->h < -1)) {
      // TODO: establish a direct connection to the user's brain
      // and find out what the heck he thinks MPlayer should do
      // with this nonsense.
      mp_msg(MSGT_VFILTER, MSGL_ERR, "SwScale: EUSERBROKEN Check your parameters, they make no sense!\n");
      return 0;
    }

    if (vf->priv->w == -1)
      vf->priv->w = width;
    if (vf->priv->w == 0)
      vf->priv->w = d_width;

    if (vf->priv->h == -1)
      vf->priv->h = height;
    if (vf->priv->h == 0)
      vf->priv->h = d_height;

    if (vf->priv->w == -3)
      vf->priv->w = vf->priv->h * width / height;
    if (vf->priv->w == -2)
      vf->priv->w = vf->priv->h * d_width / d_height;

    if (vf->priv->h == -3)
      vf->priv->h = vf->priv->w * height / width;
    if (vf->priv->h == -2)
      vf->priv->h = vf->priv->w * d_height / d_width;

    if (round_w)
      vf->priv->w = ((vf->priv->w + 8) / 16) * 16;
    if (round_h)
      vf->priv->h = ((vf->priv->h + 8) / 16) * 16;

    // calculate the missing parameters:
    switch(best) {
    case IMGFMT_YV12:		/* YV12 needs w & h rounded to 2 */
    case IMGFMT_I420:
    case IMGFMT_IYUV:
    case IMGFMT_NV12:
    case IMGFMT_NV21:
      vf->priv->h = (vf->priv->h + 1) & ~1;
    case IMGFMT_YUY2:		/* YUY2 needs w rounded to 2 */
    case IMGFMT_UYVY:
      vf->priv->w = (vf->priv->w + 1) & ~1;
    }

    mp_msg(MSGT_VFILTER,MSGL_DBG2,"SwScale: scaling %dx%d %s to %dx%d %s  \n",
	width,height,vo_format_name(outfmt),
	vf->priv->w,vf->priv->h,vo_format_name(best));

    // free old ctx:
    if(vf->priv->ctx) sws_freeContext(vf->priv->ctx);
    if(vf->priv->ctx2)sws_freeContext(vf->priv->ctx2);

    // new swscaler:
    sws_getFlagsAndFilterFromCmdLine(&int_sws_flags, &srcFilter, &dstFilter);
    int_sws_flags|= vf->priv->v_chr_drop << SWS_SRC_V_CHR_DROP_SHIFT;
    int_sws_flags|= vf->priv->accurate_rnd * SWS_ACCURATE_RND;
    vf->priv->ctx=sws_getContext(width, height >> vf->priv->interlaced,
	    sfmt,
		  vf->priv->w, vf->priv->h >> vf->priv->interlaced,
	    dfmt,
	    int_sws_flags | get_sws_cpuflags(), srcFilter, dstFilter, vf->priv->param);
    if(vf->priv->interlaced){
        vf->priv->ctx2=sws_getContext(width, height >> 1,
	    sfmt,
		  vf->priv->w, vf->priv->h >> 1,
	    dfmt,
	    int_sws_flags | get_sws_cpuflags(), srcFilter, dstFilter, vf->priv->param);
    }
int main(int argc, char* argv[])
{
	AVCodec *pCodec;
    AVCodecContext *pCodecCtx= NULL;
	AVCodecParserContext *pCodecParserCtx=NULL;

    int frame_count;
    FILE *fp_in;
	FILE *fp_out;
    AVFrame	*pFrame,*pFrameYUV;
	uint8_t *out_buffer;
	const int in_buffer_size=4096;
	uint8_t in_buffer[in_buffer_size + FF_INPUT_BUFFER_PADDING_SIZE]={0};
	uint8_t *cur_ptr;
	int cur_size;

    AVPacket packet;
	int ret, got_picture;
	
	int y_size;

#if TEST_HEVC
	enum AVCodecID codec_id=AV_CODEC_ID_HEVC;
	char filepath_in[]="bigbuckbunny_480x272.hevc";
#elif TEST_H264
	AVCodecID codec_id=AV_CODEC_ID_H264;
	char filepath_in[]="bigbuckbunny_480x272.h264";
#else
	AVCodecID codec_id=AV_CODEC_ID_MPEG2VIDEO;
	char filepath_in[]="bigbuckbunny_480x272.m2v";
#endif

	char filepath_out[]="bigbuckbunny_480x272.yuv";
	int first_time=1;

	struct SwsContext *img_convert_ctx;

	//av_log_set_level(AV_LOG_DEBUG);
	
	avcodec_register_all();

    pCodec = avcodec_find_decoder(codec_id);
    if (!pCodec) {
        printf("Codec not found\n");
        return -1;
    }
    pCodecCtx = avcodec_alloc_context3(pCodec);
    if (!pCodecCtx){
        printf("Could not allocate video codec context\n");
        return -1;
    }

	pCodecParserCtx=av_parser_init(codec_id);
	if (!pCodecParserCtx){
		printf("Could not allocate video parser context\n");
		return -1;
	}

    //if(pCodec->capabilities&CODEC_CAP_TRUNCATED)
    //    pCodecCtx->flags|= CODEC_FLAG_TRUNCATED; /* we do not send complete frames */
    
    if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {
        printf("Could not open codec\n");
        return -1;
    }
	//Input File
    fp_in = fopen(filepath_in, "rb");
    if (!fp_in) {
        printf("Could not open input stream\n");
        return -1;
    }
	//Output File
	fp_out = fopen(filepath_out, "wb");
	if (!fp_out) {
		printf("Could not open output YUV file\n");
		return -1;
	}

    pFrame = av_frame_alloc();
	av_init_packet(&packet);


	while (1) {
        cur_size = fread(in_buffer, 1, in_buffer_size, fp_in);
        if (cur_size == 0)
            break;
        cur_ptr=in_buffer;

        while (cur_size>0){

			int len = av_parser_parse2(
				pCodecParserCtx, pCodecCtx,
				&packet.data, &packet.size,
				cur_ptr , cur_size ,
				AV_NOPTS_VALUE, AV_NOPTS_VALUE, AV_NOPTS_VALUE);

			cur_ptr += len;
			cur_size -= len;

			if(packet.size==0)
				continue;

			//Some Info from AVCodecParserContext
			printf("Packet Size:%6d\t",packet.size);
			switch(pCodecParserCtx->pict_type){
				case AV_PICTURE_TYPE_I: printf("Type: I\t");break;
				case AV_PICTURE_TYPE_P: printf("Type: P\t");break;
				case AV_PICTURE_TYPE_B: printf("Type: B\t");break;
				default: printf("Type: Other\t");break;
			}
			printf("Output Number:%4d\t",pCodecParserCtx->output_picture_number);
			printf("Offset:%lld\n",pCodecParserCtx->cur_offset);

			ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, &packet);
			if (ret < 0) {
				printf("Decode Error.\n");
				return ret;
			}
			if (got_picture) {
				if(first_time){
					printf("\nCodec Full Name:%s\n",pCodecCtx->codec->long_name);
					printf("width:%d\nheight:%d\n\n",pCodecCtx->width,pCodecCtx->height);
					//SwsContext
					img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, 
						pCodecCtx->width, pCodecCtx->height, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL); 
					
					pFrameYUV=av_frame_alloc();
					out_buffer=(uint8_t *)av_malloc(avpicture_get_size(PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height));
					avpicture_fill((AVPicture *)pFrameYUV, out_buffer, PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height);
					
					y_size=pCodecCtx->width*pCodecCtx->height;

					first_time=0;
				}

				printf("Succeed to decode 1 frame!\n");
				sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, 
					pFrameYUV->data, pFrameYUV->linesize);

				fwrite(pFrameYUV->data[0],1,y_size,fp_out);     //Y 
				fwrite(pFrameYUV->data[1],1,y_size/4,fp_out);   //U
				fwrite(pFrameYUV->data[2],1,y_size/4,fp_out);   //V
			}
		}

    }

	//Flush Decoder
    packet.data = NULL;
    packet.size = 0;
	while(1){
		ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, &packet);
		if (ret < 0) {
			printf("Decode Error.\n");
			return ret;
		}
		if (!got_picture)
			break;
		if (got_picture) {
			printf("Flush Decoder: Succeed to decode 1 frame!\n");
			sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, 
				pFrameYUV->data, pFrameYUV->linesize);

			fwrite(pFrameYUV->data[0],1,y_size,fp_out);     //Y
			fwrite(pFrameYUV->data[1],1,y_size/4,fp_out);   //U
			fwrite(pFrameYUV->data[2],1,y_size/4,fp_out);   //V
		}
	}

    fclose(fp_in);
	fclose(fp_out);
    
	sws_freeContext(img_convert_ctx);
	av_parser_close(pCodecParserCtx);

	av_frame_free(&pFrameYUV);
	av_frame_free(&pFrame);
	avcodec_close(pCodecCtx);
	av_free(pCodecCtx);

	return 0;
}
bool VideoPlayer::playback()
{
    clock.restart();

    while (!stopPlayback && pFormatCtx && av_read_frame(pFormatCtx, &(packet))>=0)
    {
        if (!stopPlayback && (packet.stream_index == streamIndex))
        {
            AVPacket avpkt;
            av_init_packet(&avpkt);
            avpkt.data = packet.data;
            avpkt.size = packet.size;
            avcodec_decode_video2(pCodecCtx, pFrame, &(frameFinished), &avpkt);

            double pts = 0;

            if (packet.dts == AV_NOPTS_VALUE && pFrame->opaque && *(uint64_t*)pFrame->opaque != AV_NOPTS_VALUE)
            {
                pts = *(uint64_t *)pFrame->opaque;
            } else 
            if (packet.dts != AV_NOPTS_VALUE) 
            {
                pts = packet.dts;
            } else 
            {
                pts = 0;
            }

            pts *= av_q2d(video_st->time_base);

            if (frameFinished)
            {
                dd->boundingWidth = dd->boundingRect().width();
                dd->boundingHeight = dd->boundingRect().height();

                if (dd->boundingWidth > screenWidth)
                {
                    dd->boundingWidth = screenWidth;
                }

                if (dd->boundingHeight > screenHeight)
                {
                    dd->boundingHeight = screenHeight;
                }

                int useFilter = SWS_FAST_BILINEAR;

                switch (dd->m_swsFilter)
                {
                    case DD_F_FAST_BILINEAR: useFilter = SWS_FAST_BILINEAR; break;
                    case DD_F_BILINEAR: useFilter = SWS_BILINEAR; break;
                    case DD_F_BICUBIC: useFilter = SWS_BICUBIC; break;
                    case DD_F_X: useFilter = SWS_X; break;
                    case DD_F_POINT: useFilter = SWS_POINT; break;
                    case DD_F_AREA: useFilter = SWS_AREA; break;
                    case DD_F_BICUBLIN: useFilter = SWS_BICUBLIN; break;
                    case DD_F_GAUSS: useFilter = SWS_GAUSS; break;
                    case DD_F_SINC: useFilter = SWS_SINC; break;
                    case DD_F_LANCZOS: useFilter = SWS_LANCZOS; break;
                    case DD_F_SPLINE: useFilter = SWS_SPLINE; break;
                }

                SwsContext *img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, dd->boundingWidth, dd->boundingHeight, PIX_FMT_RGB32, useFilter, NULL, NULL, NULL);

                dd->mutex->lock();

                sws_scale(img_convert_ctx, pFrame->data, pFrame->linesize, 0, pCodecCtx->height, dd->pFrameRGB->data, dd->pFrameRGB->linesize);
                sws_freeContext(img_convert_ctx);

                dd->mutex->unlock();

                pts = synchronize_video(pts);
                double delay = 0;

                switch (dd->m_fpsRate)
                {
                    case DD_FPS_AUTO: delay = (pts - last_pts); break;
                    case DD_FPS_LIMIT_30: delay = 0.0333; break;
                    case DD_FPS_LIMIT_25: delay = 0.04; break;
                    case DD_FPS_LIMIT_20: delay = 0.05; break;
                    case DD_FPS_LIMIT_15: delay = 0.0666; break;
                    case DD_FPS_LIMIT_10: delay = 0.1; break;
                }

                if (delay <= 0 || delay >= 1.0)
                {
                    delay = last_delay;
                }

                last_pts = pts;
                last_delay = delay;

                int elapsed = clock.restart();

                int wait = (delay*1000)-elapsed;

                dd->updateFrame();

                if (wait > 0)
                {
                    QThread::msleep(wait);
                }

                clock.restart();

            }
        }

        av_free_packet(&(packet));
    }

    if (pFormatCtx)
    { 
        av_seek_frame(pFormatCtx, streamIndex, 0,  AVSEEK_FLAG_FRAME);
        return true;
    } else
    {
        return false;
    }
}
Exemplo n.º 6
0
Arquivo: vo_x11.c Projeto: kax4/mpv
static int config(struct vo *vo, uint32_t width, uint32_t height,
                  uint32_t d_width, uint32_t d_height, uint32_t flags,
                  uint32_t format)
{
    struct priv *p = vo->priv;

    Colormap theCmap;
    const struct fmt2Xfmtentry_s *fmte = fmt2Xfmt;

#ifdef CONFIG_XF86VM
    int vm = flags & VOFLAG_MODESWITCHING;
#endif
    p->Flip_Flag = flags & VOFLAG_FLIPPING;
    p->zoomFlag = 1;

    p->old_vo_dwidth = -1;
    p->old_vo_dheight = -1;

    p->in_format = format;
    p->srcW = width;
    p->srcH = height;

    XGetWindowAttributes(vo->x11->display, vo->x11->rootwin, &p->attribs);
    p->depth = p->attribs.depth;

    if (p->depth != 15 && p->depth != 16 && p->depth != 24 && p->depth != 32) {
        Visual *visual;

        p->depth = vo_find_depth_from_visuals(vo->x11->display, vo->x11->screen,
                                              &visual);
    }
    if (!XMatchVisualInfo(vo->x11->display, vo->x11->screen, p->depth,
                          DirectColor, &p->vinfo)
         || (WinID > 0
             && p->vinfo.visualid != XVisualIDFromVisual(p->attribs.visual)))
    {
        XMatchVisualInfo(vo->x11->display, vo->x11->screen, p->depth, TrueColor,
                         &p->vinfo);
    }

    /* set image size (which is indeed neither the input nor output size),
       if zoom is on it will be changed during draw_slice anyway so we don't
       duplicate the aspect code here
     */
    p->image_width = (width + 7) & (~7);
    p->image_height = height;

    {
#ifdef CONFIG_XF86VM
        if (vm)
            vo_vm_switch(vo);

#endif
        theCmap = vo_x11_create_colormap(vo, &p->vinfo);

        vo_x11_create_vo_window(vo, &p->vinfo, vo->dx, vo->dy, vo->dwidth,
                                vo->dheight, flags, theCmap, "x11");
        if (WinID > 0)
            p->depth = vo_x11_update_geometry(vo, true);

#ifdef CONFIG_XF86VM
        if (vm) {
            /* Grab the mouse pointer in our window */
            if (vo_grabpointer)
                XGrabPointer(vo->x11->display, vo->x11->window, True, 0,
                             GrabModeAsync, GrabModeAsync,
                             vo->x11->window, None, CurrentTime);
            XSetInputFocus(vo->x11->display, vo->x11->window, RevertToNone,
                           CurrentTime);
        }
#endif
    }

    if (p->myximage) {
        freeMyXImage(p);
        sws_freeContext(p->swsContext);
    }
    getMyXImage(p);

    while (fmte->mpfmt) {
        int depth = IMGFMT_RGB_DEPTH(fmte->mpfmt);
        /* bits_per_pixel in X seems to be set to 16 for 15 bit formats
           => force depth to 16 so that only the color masks are used for the format check */
        if (depth == 15)
            depth = 16;

        if (depth == p->myximage->bits_per_pixel &&
            fmte->byte_order == p->myximage->byte_order &&
            fmte->red_mask == p->myximage->red_mask &&
            fmte->green_mask == p->myximage->green_mask &&
            fmte->blue_mask == p->myximage->blue_mask)
            break;
        fmte++;
    }
    if (!fmte->mpfmt) {
        mp_msg(
            MSGT_VO, MSGL_ERR,
            "X server image format not supported, please contact the developers\n");
        return -1;
    }
    p->out_format = fmte->mpfmt;
    p->bpp = p->myximage->bits_per_pixel;
    p->out_offset = 0;
    // We can easily "emulate" non-native RGB32 and BGR32
    if (p->out_format == (IMGFMT_BGR32 | 128)
        || p->out_format == (IMGFMT_RGB32 | 128))
    {
        p->out_format &= ~128;
#if BYTE_ORDER == BIG_ENDIAN
        p->out_offset = 1;
#else
        p->out_offset = -1;
#endif
    }

    /* always allocate swsContext as size could change between frames */
    p->swsContext = sws_getContextFromCmdLine(width, height, p->in_format,
                                              width, height, p->out_format);
    if (!p->swsContext)
        return -1;

    p->dst_width = width;

    return 0;
}
Exemplo n.º 7
0
static int config(uint32_t width, uint32_t height, uint32_t d_width,
                       uint32_t d_height, uint32_t flags, char *title,
                       uint32_t format)
{
// int screen;

// int interval, prefer_blank, allow_exp, nothing;
    Colormap theCmap;
    const struct fmt2Xfmtentry_s *fmte = fmt2Xfmt;

#ifdef CONFIG_XF86VM
    int vm = flags & VOFLAG_MODESWITCHING;
#endif
    Flip_Flag = flags & VOFLAG_FLIPPING;
    zoomFlag = flags & VOFLAG_SWSCALE;

    old_vo_dwidth = -1;
    old_vo_dheight = -1;

    int_pause = 0;
    if (!title)
        title = "MPlayer X11 (XImage/Shm) render";

    in_format = format;
    srcW = width;
    srcH = height;

    XGetWindowAttributes(mDisplay, mRootWin, &attribs);
    depth = attribs.depth;

    if (depth != 15 && depth != 16 && depth != 24 && depth != 32)
    {
        Visual *visual;

        depth = vo_find_depth_from_visuals(mDisplay, mScreen, &visual);
    }
    if (!XMatchVisualInfo(mDisplay, mScreen, depth, DirectColor, &vinfo) ||
        (WinID > 0
         && vinfo.visualid != XVisualIDFromVisual(attribs.visual)))
        XMatchVisualInfo(mDisplay, mScreen, depth, TrueColor, &vinfo);

    /* set image size (which is indeed neither the input nor output size),
       if zoom is on it will be changed during draw_slice anyway so we don't duplicate the aspect code here
     */
    image_width = (width + 7) & (~7);
    image_height = height;

    {
#ifdef CONFIG_XF86VM
        if (vm)
        {
            vo_vm_switch();
        }
#endif

        theCmap = vo_x11_create_colormap(&vinfo);

            vo_x11_create_vo_window(&vinfo, vo_dx, vo_dy, vo_dwidth, vo_dheight,
                    flags, theCmap, "x11", title);
        if (WinID > 0)
            depth = vo_x11_update_geometry();

#ifdef CONFIG_XF86VM
        if (vm)
        {
            /* Grab the mouse pointer in our window */
            if (vo_grabpointer)
                XGrabPointer(mDisplay, vo_window, True, 0,
                             GrabModeAsync, GrabModeAsync,
                             vo_window, None, CurrentTime);
            XSetInputFocus(mDisplay, vo_window, RevertToNone, CurrentTime);
        }
#endif
    }

    if (myximage)
    {
        freeMyXImage();
        sws_freeContext(swsContext);
    }
    getMyXImage();

    while (fmte->mpfmt) {
      int depth = IMGFMT_RGB_DEPTH(fmte->mpfmt);
      /* bits_per_pixel in X seems to be set to 16 for 15 bit formats
         => force depth to 16 so that only the color masks are used for the format check */
      if (depth == 15)
          depth = 16;

      if (depth            == myximage->bits_per_pixel &&
          fmte->byte_order == myximage->byte_order &&
          fmte->red_mask   == myximage->red_mask   &&
          fmte->green_mask == myximage->green_mask &&
          fmte->blue_mask  == myximage->blue_mask)
        break;
      fmte++;
    }
    if (!fmte->mpfmt) {
      mp_msg(MSGT_VO, MSGL_ERR,
             "X server image format not supported, please contact the developers\n");
      return -1;
    }
    out_format = fmte->mpfmt;
    switch ((bpp = myximage->bits_per_pixel))
    {
        case 24:
            draw_alpha_fnc = draw_alpha_24;
            break;
        case 32:
            draw_alpha_fnc = draw_alpha_32;
            break;
        case 15:
        case 16:
            if (depth == 15)
                draw_alpha_fnc = draw_alpha_15;
            else
                draw_alpha_fnc = draw_alpha_16;
            break;
        default:
            draw_alpha_fnc = draw_alpha_null;
    }
    out_offset = 0;
    // for these formats conversion is currently not support and
    // we can easily "emulate" them.
    if (out_format & 64 && (IMGFMT_IS_RGB(out_format) || IMGFMT_IS_BGR(out_format))) {
      out_format &= ~64;
#if HAVE_BIGENDIAN
      out_offset = 1;
#else
      out_offset = -1;
#endif
    }

    /* always allocate swsContext as size could change between frames */
    swsContext =
        sws_getContextFromCmdLine(width, height, in_format, width, height,
                                  out_format);
    if (!swsContext)
        return -1;

    dst_width = width;
    //printf( "X11 bpp: %d  color mask:  R:%lX  G:%lX  B:%lX\n",bpp,myximage->red_mask,myximage->green_mask,myximage->blue_mask );

    return 0;
}
Exemplo n.º 8
0
bool FFmpegVideoDecoder::nextFrame( CBaseTexture * texture )
{
  // Just in case
  if ( !m_pCodecCtx )
	return false;

  // If we did not preallocate the picture or the texture size changed, (re)allocate it
  if ( !m_pFrameRGB || texture->GetWidth() != m_frameRGBwidth || texture->GetHeight() != m_frameRGBheight )
  {
    if ( m_pFrameRGB )
    {
      avpicture_free( m_pFrameRGB );
      av_free( m_pFrameRGB );
    }

    m_frameRGBwidth = texture->GetWidth();
    m_frameRGBheight = texture->GetHeight();

    // Allocate the conversion frame and relevant picture
    m_pFrameRGB = (AVPicture*)av_mallocz(sizeof(AVPicture));

    if ( !m_pFrameRGB )
      return false;

    // Due to a bug in swsscale we need to allocate one extra line of data
    if ( avpicture_alloc( m_pFrameRGB, PIX_FMT_RGB32, m_frameRGBwidth, m_frameRGBheight + 1 ) < 0 )
      return false;
  }

  AVPacket packet;
  int frameFinished;

  while ( true )
  {
    // Read a frame
    if ( av_read_frame( m_pFormatCtx, &packet ) < 0 )
      return false;  // Frame read failed (e.g. end of stream)

    if ( packet.stream_index == m_videoStream )
    {
      // Is this a packet from the video stream -> decode video frame
      avcodec_decode_video2( m_pCodecCtx, m_pFrame, &frameFinished, &packet );

      // Did we get a video frame?
      if ( frameFinished )
      {
        if ( packet.dts != (int64_t)AV_NOPTS_VALUE )
	  m_lastFrameTime = packet.dts * av_q2d( m_pFormatCtx->streams[ m_videoStream ]->time_base );
        else
	   m_lastFrameTime = 0.0;

	break;
      }
    }

    av_free_packet( &packet );
  }

  // We got the video frame, render it into the picture buffer
  struct SwsContext * context = sws_getContext( m_pCodecCtx->width, m_pCodecCtx->height, m_pCodecCtx->pix_fmt,
                           m_frameRGBwidth, m_frameRGBheight, PIX_FMT_RGB32, SWS_FAST_BILINEAR, NULL, NULL, NULL );

  sws_scale( context, m_pFrame->data, m_pFrame->linesize, 0, m_pCodecCtx->height,
                                                                     m_pFrameRGB->data, m_pFrameRGB->linesize );
  sws_freeContext( context );
  av_free_packet( &packet );

  // And into the texture
  texture->Update( m_frameRGBwidth, m_frameRGBheight, m_frameRGBwidth * 4, XB_FMT_A8R8G8B8, m_pFrameRGB->data[0], false );

  return true;
}
Exemplo n.º 9
0
bool CFFmpegImage::Decode(unsigned char * const pixels, unsigned int width, unsigned int height,
                          unsigned int pitch, unsigned int format)
{
  if (m_width == 0 || m_height == 0 || format != XB_FMT_A8R8G8B8)
    return false;

  if (!m_pFrame || !m_pFrame->data[0])
  {
    CLog::LogFunction(LOGERROR, __FUNCTION__, "AVFrame member not allocated");
    return false;
  }

  AVPicture* pictureRGB = static_cast<AVPicture*>(av_mallocz(sizeof(AVPicture)));
  if (!pictureRGB)
  {
    CLog::LogFunction(LOGERROR, __FUNCTION__, "AVPicture could not be allocated");
    return false;
  }

  int size = avpicture_fill(pictureRGB, NULL, AV_PIX_FMT_RGB32, width, height);
  if (size < 0)
  {
    CLog::LogFunction(LOGERROR, __FUNCTION__, "Could not allocate AVPicture member with %i x %i pixes", width, height);
    av_free(pictureRGB);
    return false;
  }

  bool needsCopy = false;
  int pixelsSize = pitch * height;
  if (size == pixelsSize && (int) pitch == pictureRGB->linesize[0])
  {
    // We can use the pixels buffer directly
    pictureRGB->data[0] = pixels;
  }
  else
  {
    // We need an extra buffer and copy it manually afterwards
    if (avpicture_alloc(pictureRGB, AV_PIX_FMT_RGB32, width, height) < 0)
    {
      CLog::LogFunction(LOGERROR, __FUNCTION__, "Could not allocate temp buffer of size %i bytes", size);
      av_free(pictureRGB);
      return false;
    }
    needsCopy = true;
  }

  // Especially jpeg formats are full range this we need to take care here
  // Input Formats like RGBA are handled correctly automatically
  AVColorRange range = av_frame_get_color_range(m_pFrame);
  AVPixelFormat pixFormat = ConvertFormats(m_pFrame);

  // assumption quadratic maximums e.g. 2048x2048
  float ratio = m_width / (float) m_height;
  unsigned int nHeight = m_originalHeight;
  unsigned int nWidth = m_originalWidth;
  if (nHeight > height)
  {
    nHeight = height;
    nWidth = (unsigned int) (nHeight * ratio + 0.5f);
  }
  if (nWidth > width)
  {
    nWidth = width;
    nHeight = (unsigned int) (nWidth / ratio + 0.5f);
  }

  struct SwsContext* context = sws_getContext(m_originalWidth, m_originalHeight, pixFormat,
    nWidth, nHeight, AV_PIX_FMT_RGB32, SWS_BICUBIC, NULL, NULL, NULL);

  if (range == AVCOL_RANGE_JPEG)
  {
    int* inv_table = nullptr;
    int* table = nullptr;
    int srcRange, dstRange, brightness, contrast, saturation;
    sws_getColorspaceDetails(context, &inv_table, &srcRange, &table, &dstRange, &brightness, &contrast, &saturation);
    srcRange = 1;
    sws_setColorspaceDetails(context, inv_table, srcRange, table, dstRange, brightness, contrast, saturation);
  }

  sws_scale(context, m_pFrame->data, m_pFrame->linesize, 0, m_originalHeight,
    pictureRGB->data, pictureRGB->linesize);
  sws_freeContext(context);

  if (needsCopy)
  {
    int minPitch = std::min((int)pitch, pictureRGB->linesize[0]);
    if (minPitch < 0)
    {
      CLog::LogFunction(LOGERROR, __FUNCTION__, "negative pitch or height");
      av_free(pictureRGB);
      return false;
    }
    const unsigned char *src = pictureRGB->data[0];
    unsigned char* dst = pixels;

    for (unsigned int y = 0; y < nHeight; y++)
    {
      memcpy(dst, src, minPitch);
      src += pictureRGB->linesize[0];
      dst += pitch;
    }

    avpicture_free(pictureRGB);
  }
  pictureRGB->data[0] = nullptr;
  avpicture_free(pictureRGB);

  // update width and height original dimensions are kept
  m_height = nHeight;
  m_width = nWidth;

  return true;
}
Exemplo n.º 10
0
void FreeEncoder(x264_t* encoder) {
	x264_encoder_close(encoder);
	sws_freeContext(convertCtx);
}
Exemplo n.º 11
0
void FreeFfmpeg(AVCodecContext* ctx) {
	avcodec_close(ctx);
	av_frame_free(&av_frame_rgba);
	av_frame_free(&av_frame);
	sws_freeContext(convert_context);
}
Exemplo n.º 12
0
 ~ImageConverterFFPrivate() {
     if (sws_ctx) {
         sws_freeContext(sws_ctx);
         sws_ctx = 0;
     }
 }
Exemplo n.º 13
0
RasterRenderPrivate::~RasterRenderPrivate()
{
	sws_freeContext(swsctx);
	delete srcFrame;
	delete dstFrame;
}
Exemplo n.º 14
0
static int output_single_frame(AVFilterContext *ctx, AVFrame *in, double *var_values, int i,
                               double *zoom, double *dx, double *dy)
{
    ZPContext *s = ctx->priv;
    AVFilterLink *outlink = ctx->outputs[0];
    int64_t pts = s->frame_count;
    int k, x, y, w, h, ret = 0;
    uint8_t *input[4];
    int px[4], py[4];
    AVFrame *out;

    var_values[VAR_PX]    = s->x;
    var_values[VAR_PY]    = s->y;
    var_values[VAR_PZOOM] = s->prev_zoom;
    var_values[VAR_PDURATION] = s->prev_nb_frames;
    var_values[VAR_TIME] = pts * av_q2d(outlink->time_base);
    var_values[VAR_FRAME] = i;
    var_values[VAR_ON] = outlink->frame_count + 1;
    if ((ret = av_expr_parse_and_eval(zoom, s->zoom_expr_str,
                                      var_names, var_values,
                                      NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
        return ret;

    *zoom = av_clipd(*zoom, 1, 10);
    var_values[VAR_ZOOM] = *zoom;
    w = in->width * (1.0 / *zoom);
    h = in->height * (1.0 / *zoom);

    if ((ret = av_expr_parse_and_eval(dx, s->x_expr_str,
                                      var_names, var_values,
                                      NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
        return ret;
    x = *dx = av_clipd(*dx, 0, FFMAX(in->width - w, 0));
    var_values[VAR_X] = *dx;
    x &= ~((1 << s->desc->log2_chroma_w) - 1);

    if ((ret = av_expr_parse_and_eval(dy, s->y_expr_str,
                                      var_names, var_values,
                                      NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
        return ret;
    y = *dy = av_clipd(*dy, 0, FFMAX(in->height - h, 0));
    var_values[VAR_Y] = *dy;
    y &= ~((1 << s->desc->log2_chroma_h) - 1);

    out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
    if (!out) {
        ret = AVERROR(ENOMEM);
        return ret;
    }

    px[1] = px[2] = AV_CEIL_RSHIFT(x, s->desc->log2_chroma_w);
    px[0] = px[3] = x;

    py[1] = py[2] = AV_CEIL_RSHIFT(y, s->desc->log2_chroma_h);
    py[0] = py[3] = y;

    s->sws = sws_alloc_context();
    if (!s->sws) {
        ret = AVERROR(ENOMEM);
        return ret;
    }

    for (k = 0; in->data[k]; k++)
        input[k] = in->data[k] + py[k] * in->linesize[k] + px[k];

    av_opt_set_int(s->sws, "srcw", w, 0);
    av_opt_set_int(s->sws, "srch", h, 0);
    av_opt_set_int(s->sws, "src_format", in->format, 0);
    av_opt_set_int(s->sws, "dstw", outlink->w, 0);
    av_opt_set_int(s->sws, "dsth", outlink->h, 0);
    av_opt_set_int(s->sws, "dst_format", outlink->format, 0);
    av_opt_set_int(s->sws, "sws_flags", SWS_BICUBIC, 0);

    if ((ret = sws_init_context(s->sws, NULL, NULL)) < 0)
        return ret;

    sws_scale(s->sws, (const uint8_t *const *)&input, in->linesize, 0, h, out->data, out->linesize);

    out->pts = pts;
    s->frame_count++;

    ret = ff_filter_frame(outlink, out);
    sws_freeContext(s->sws);
    s->sws = NULL;
    s->current_frame++;
    return ret;
}
Exemplo n.º 15
0
void *encode_video_thread(void *arg)
{
    INFO("Started encode video thread!");

    av_session_t *_phone = arg;

    _phone->running_encvid = 1;
    //CodecState *cs = get_cs_temp(_phone->av);
    AVPacket pkt1, *packet = &pkt1;
    //int p = 0;
    //int got_packet;
    int video_frame_finished;
    AVFrame *s_video_frame;
    AVFrame *webcam_frame;
    s_video_frame = avcodec_alloc_frame();
    webcam_frame = avcodec_alloc_frame();
    //AVPacket enc_video_packet;

    uint8_t *buffer;
    int numBytes;
    /* Determine required buffer size and allocate buffer */
    numBytes = avpicture_get_size(PIX_FMT_YUV420P, _phone->webcam_decoder_ctx->width, _phone->webcam_decoder_ctx->height);
    buffer = (uint8_t *)av_calloc(numBytes * sizeof(uint8_t), 1);
    avpicture_fill((AVPicture *)s_video_frame, buffer, PIX_FMT_YUV420P, _phone->webcam_decoder_ctx->width,
                   _phone->webcam_decoder_ctx->height);
    _phone->sws_ctx = sws_getContext(_phone->webcam_decoder_ctx->width, _phone->webcam_decoder_ctx->height,
                                     _phone->webcam_decoder_ctx->pix_fmt, _phone->webcam_decoder_ctx->width, _phone->webcam_decoder_ctx->height,
                                     PIX_FMT_YUV420P,
                                     SWS_BILINEAR, NULL, NULL, NULL);


    vpx_image_t *image =
        vpx_img_alloc(NULL, VPX_IMG_FMT_I420, _phone->webcam_decoder_ctx->width, _phone->webcam_decoder_ctx->height, 1);

    //uint32_t frame_counter = 0;
    while (_phone->running_encvid) {

        if (av_read_frame(_phone->video_format_ctx, packet) < 0) {
            printf("error reading frame\n");

            if (_phone->video_format_ctx->pb->error != 0)
                break;

            continue;
        }

        if (packet->stream_index == _phone->video_stream) {
            if (avcodec_decode_video2(_phone->webcam_decoder_ctx, webcam_frame, &video_frame_finished, packet) < 0) {
                printf("couldn't decode\n");
                continue;
            }

            av_free_packet(packet);
            sws_scale(_phone->sws_ctx, (uint8_t const * const *)webcam_frame->data, webcam_frame->linesize, 0,
                      _phone->webcam_decoder_ctx->height, s_video_frame->data, s_video_frame->linesize);
            /* create a new I-frame every 60 frames */
            //++p;
            /*
            if (p == 60) {

                s_video_frame->pict_type = AV_PICTURE_TYPE_BI ;
            } else if (p == 61) {
                s_video_frame->pict_type = AV_PICTURE_TYPE_I ;
                p = 0;
            } else {
                s_video_frame->pict_type = AV_PICTURE_TYPE_P ;
            }*/

            if (video_frame_finished) {
                memcpy(image->planes[VPX_PLANE_Y], s_video_frame->data[0],
                       s_video_frame->linesize[0] * _phone->webcam_decoder_ctx->height);
                memcpy(image->planes[VPX_PLANE_U], s_video_frame->data[1],
                       s_video_frame->linesize[1] * _phone->webcam_decoder_ctx->height / 2);
                memcpy(image->planes[VPX_PLANE_V], s_video_frame->data[2],
                       s_video_frame->linesize[2] * _phone->webcam_decoder_ctx->height / 2);
                toxav_send_video (_phone->av, image);
                //if (avcodec_encode_video2(cs->video_encoder_ctx, &enc_video_packet, s_video_frame, &got_packet) < 0) {
                /*if (vpx_codec_encode(&cs->v_encoder, image, frame_counter, 1, 0, 0) != VPX_CODEC_OK) {
                    printf("could not encode video frame\n");
                    continue;
                }
                ++frame_counter;

                vpx_codec_iter_t iter = NULL;
                vpx_codec_cx_pkt_t *pkt;
                while( (pkt = vpx_codec_get_cx_data(&cs->v_encoder, &iter)) ) {
                    if (pkt->kind == VPX_CODEC_CX_FRAME_PKT)
                        toxav_send_rtp_payload(_phone->av, TypeVideo, pkt->data.frame.buf, pkt->data.frame.sz);
                }*/
                //if (!got_packet) {
                //    continue;
                //}

                //if (!enc_video_packet.data) fprintf(stderr, "video packet data is NULL\n");

                //toxav_send_rtp_payload(_phone->av, TypeVideo, enc_video_packet.data, enc_video_packet.size);

                //av_free_packet(&enc_video_packet);
            }
        } else {
            av_free_packet(packet);
        }
    }

    vpx_img_free(image);

    /* clean up codecs */
    //pthread_mutex_lock(&cs->ctrl_mutex);
    av_free(buffer);
    av_free(webcam_frame);
    av_free(s_video_frame);
    sws_freeContext(_phone->sws_ctx);
    //avcodec_close(webcam_decoder_ctx);
    //avcodec_close(cs->video_encoder_ctx);
    //pthread_mutex_unlock(&cs->ctrl_mutex);

    _phone->running_encvid = -1;

    pthread_exit ( NULL );
}
Exemplo n.º 16
0
void *recorder_thread(void *ptr) {
  struct camera *cam = (struct camera *)ptr;
  struct motion_detection md;
  AVPacket packet;
  AVFrame *frame;
  int got_frame, ret;
  unsigned int cnt = 0;
  time_t first_activity = 0;
  time_t last_activity = 0;

  if(open_camera(cam) < 0)
    return NULL;
  if(open_output(cam) < 0)
    return NULL;

  av_dump_format(cam->context, 0, cam->context->filename, 0);
  av_dump_format(cam->output_context, 0, cam->output_context->filename, 1);

  md.cam = cam;

  md.prev = cvCreateImage(cvSize(cam->codec->width, cam->codec->height), IPL_DEPTH_8U, 1);
  md.cur  = cvCreateImage(cvSize(cam->codec->width, cam->codec->height), IPL_DEPTH_8U, 1);
  md.silh = cvCreateImage(cvSize(cam->codec->width, cam->codec->height), IPL_DEPTH_8U, 1);
  cvZero(md.prev);
  cvZero(md.cur);
  cvZero(md.silh);

  md.img_convert_ctx = sws_getContext(
    cam->codec->width, cam->codec->height, cam->codec->pix_fmt,
    cam->codec->width, cam->codec->height, PIX_FMT_GRAY8,
    SWS_BICUBIC, NULL, NULL, NULL);
  md.buffer = (uint8_t*)av_malloc(3 * cam->codec->width * cam->codec->height);

  int got_key_frame = 0, first_detection = 1;
  frame = avcodec_alloc_frame();
  if(!frame) {
    av_err_msg("avcodec_alloc_frame", 0);
    return NULL;
  }

  while(1) {
    cam->last_io = time(NULL);

    if((ret = av_read_frame(cam->context, &packet)) < 0) {
      if(ret == AVERROR_EOF) break;
      else av_err_msg("av_read_frame", ret);
    }

    if(packet.stream_index == cam->video_stream_index) {
      // start on keyframe
      if(!got_key_frame && !(packet.flags & AV_PKT_FLAG_KEY)) {
        continue;
      }
      got_key_frame = 1;

      avcodec_get_frame_defaults(frame);
      got_frame = 0;

      cnt = (cnt + 1) % cam->analize_frames;
      if(cnt == 0) {
        if((ret = avcodec_decode_video2(cam->codec, frame, &got_frame, &packet)) < 0)
          av_err_msg("avcodec_decode_video2", ret);

        if(got_frame) {
          if(detect_motion(&md, frame)) {
            if(first_activity == 0) first_activity = time(NULL);
            last_activity = time(NULL);
          } else {
            if(first_activity > 0 && time(NULL) - last_activity > cam->motion_delay) {
              if(!first_detection)
                db_create_event(cam->id, first_activity, last_activity);
              else
                first_detection = 0;
              first_activity = 0;
            }
          }
        }

        if(time(NULL) - cam->last_screenshot > 60 && (packet.flags & AV_PKT_FLAG_KEY)) {
          char fname[128];
          snprintf(fname, sizeof(fname), "%s/%s/screenshot.png", store_dir, cam->name);
          cvSaveImage(fname, md.cur, 0);
          cam->last_screenshot = time(NULL);
        }
      }

      packet.stream_index = cam->output_stream->id;
      if((ret = av_write_frame(cam->output_context, &packet)) < 0)
        av_err_msg("av_write_frame", ret);

      pthread_mutex_lock(&cam->consumers_lock);
      for(l1 *p = cam->cam_consumers_list; p != NULL; p = p->next) {
        struct cam_consumer *consumer = (struct cam_consumer *)p->value;
        if(!consumer->screen->active)
          continue;

        if(consumer->screen->tmpl_size == 1) {
          packet.stream_index = 0;
          if((ret = av_write_frame(consumer->screen->rtp_context, &packet)) < 0)
            av_err_msg("av_write_frame", ret);
        } else {
          if(!got_frame) {
            if((ret = avcodec_decode_video2(cam->codec, frame, &got_frame, &packet)) < 0) {
              av_err_msg("avcodec_decode_video2", ret);
              break;
            }
          }
          if(got_frame)
            copy_frame_to_consumer(frame, cam->codec->height, consumer);
        }
      }
      pthread_mutex_unlock(&cam->consumers_lock);
    }

    av_free_packet(&packet);
    
    if(!cam->active) {
      break;
    }

    if(time(NULL) - cam->file_started_at > 60 * 60) {
      db_update_videofile(cam);
      close_output(cam);
      open_output(cam);
      got_key_frame = 0;
    }
  }

  db_update_videofile(cam);
  close_output(cam);

  if((ret = avcodec_close(cam->codec)) < 0)
    av_err_msg("avcodec_close", ret);
  avformat_close_input(&cam->context);
  av_free(frame);

  cvReleaseImage(&md.prev);
  cvReleaseImage(&md.cur);
  cvReleaseImage(&md.silh);
  av_free(md.buffer);
  sws_freeContext(md.img_convert_ctx);

  return NULL;
}
Exemplo n.º 17
0
bool CRetroPlayerVideo::CheckConfiguration(const DVDVideoPicture &picture)
{
  const double framerate = 1 / picture.iDuration;

  if (g_renderManager.IsConfigured() &&
      m_outputWidth == picture.iWidth &&
      m_outputHeight == picture.iHeight &&
      m_outputFramerate == framerate)
  {
    // Already configured properly
    return true;
  }

  // Determine RenderManager flags
  unsigned int flags = 0;
  if (picture.color_range == 1)
    flags |= CONF_FLAGS_YUV_FULLRANGE;
  flags |= CONF_FLAGS_YUVCOEF_BT601; // picture.color_matrix = 4
  if (m_bAllowFullscreen)
  {
    flags |= CONF_FLAGS_FULLSCREEN;
    m_bAllowFullscreen = false; // only allow on first configure
  }

  CLog::Log(LOGDEBUG, "RetroPlayerVideo: Change configuration: %dx%d, %4.2f fps", picture.iWidth, picture.iHeight, framerate);

  int orientation = 0; // (90 = 5, 180 = 2, 270 = 7), if we ever want to use RETRO_ENVIRONMENT_SET_ROTATION

  if (!g_renderManager.Configure(picture.iWidth,
                                 picture.iHeight,
                                 picture.iDisplayWidth,
                                 picture.iDisplayHeight,
                                 (float)framerate,
                                 flags,
                                 picture.format,
                                 picture.extended_format,
                                 orientation))
  {
    CLog::Log(LOGERROR, "RetroPlayerVideo: Failed to configure renderer");
    return false;
  }

  m_outputWidth = picture.iWidth;
  m_outputHeight = picture.iHeight;
  m_outputFramerate = framerate;

  PixelFormat format;
  switch (m_pixelFormat)
  {
  case GAME_PIXEL_FORMAT_XRGB8888:
    CLog::Log(LOGINFO, "RetroPlayerVideo: Pixel Format: XRGB8888, using PIX_FMT_0RGB32");
    format = PIX_FMT_0RGB32;
    break;
  case GAME_PIXEL_FORMAT_RGB565:
  	CLog::Log(LOGINFO, "RetroPlayerVideo: Pixel Format: RGB565, using PIX_FMT_RGB565");
    format = PIX_FMT_RGB565;
    break;
  case GAME_PIXEL_FORMAT_0RGB1555:
  default:
    CLog::Log(LOGINFO, "RetroPlayerVideo: Pixel Format: 0RGB1555, using PIX_FMT_RGB555");
    format = PIX_FMT_RGB555;
    break;
  }

  if (m_swsContext)
    sws_freeContext(m_swsContext);

  m_swsContext = sws_getContext(
    picture.iWidth, picture.iHeight, format,
    picture.iWidth, picture.iHeight, PIX_FMT_YUV420P,
    SWS_FAST_BILINEAR | SwScaleCPUFlags(), NULL, NULL, NULL
  );

  return true;
}
Exemplo n.º 18
0
int		process_instance( livido_port_t *my_instance, double timecode )
{
	uint8_t *A[4] = {NULL,NULL,NULL,NULL};
	uint8_t *O[4]= {NULL,NULL,NULL,NULL};

	int palette;
	int w;
	int h;
	
	lvd_crop_t *crop = NULL;
	livido_property_get( my_instance, "PLUGIN_private", 0, &crop );
	
	if( crop == NULL )
		return LIVIDO_ERROR_INTERNAL;

	int error  = lvd_extract_channel_values( my_instance, "out_channels", 0, &w,&h, O,&palette );
	if( error != LIVIDO_NO_ERROR )
		return LIVIDO_ERROR_NO_OUTPUT_CHANNELS;

    error = lvd_extract_channel_values( my_instance, "in_channels" , 0, &w, &h, A, &palette );
	if( error != LIVIDO_NO_ERROR )
		return LIVIDO_ERROR_NO_INPUT_CHANNELS;

	int	left = lvd_extract_param_index( my_instance,"in_parameters", 0 );
	int	right = lvd_extract_param_index( my_instance,"in_parameters", 1 );
	int	top = lvd_extract_param_index( my_instance, "in_parameters", 2 );
	int	bottom = lvd_extract_param_index( my_instance, "in_parameters", 3);
	int scale = lvd_extract_param_index( my_instance, "in_parameters", 4);

	int tmp_w = ( w - left - right);
	int tmp_h = h - top - bottom;

	if( tmp_w < 0 )
		tmp_w = 0;
	if( tmp_h < 0 )
		tmp_h = 0;

	if( tmp_w != crop->w || tmp_h != crop->h ) {
		if( crop->sws ) {
			sws_freeContext( crop->sws );
			crop->sws = NULL;
		}
		crop->w = tmp_w;
		crop->h = tmp_h;
	}

	int crop_strides[4] = { crop->w, crop->w, crop->w, 0 };
	int dst_strides[4]  = { w, w, w, 0 }; 

	if( !lvd_crop_plane( crop->buf[0], A[0], left, right, top, bottom, w, h ) )
		return LIVIDO_NO_ERROR;

	if( !lvd_crop_plane( crop->buf[1], A[1], left, right, top, bottom, w, h ) )
		return LIVIDO_NO_ERROR;

	if( !lvd_crop_plane( crop->buf[2], A[2], left, right, top, bottom, w, h ) )
		return LIVIDO_NO_ERROR;

	if( crop->sws == NULL ) {
		crop->sws = sws_getContext(crop->w,crop->h,PIX_FMT_YUV444P,w,h,PIX_FMT_YUV444P,crop->flags,NULL,NULL,NULL);
		if( crop->sws == NULL )
			return LIVIDO_ERROR_INTERNAL;
	}

	sws_scale(crop->sws,(const uint8_t * const *)crop->buf,crop_strides,0,crop->h,(uint8_t * const *) O,dst_strides);

	return LIVIDO_NO_ERROR;
}
Exemplo n.º 19
0
Arquivo: vo_x11.c Projeto: kax4/mpv
static void draw_osd(struct vo *vo, struct osd_state *osd)
{
    struct priv *p = vo->priv;

    struct mp_image img = get_x_buffer(p);

    struct mp_osd_res res = {
        .w = img.w,
        .h = img.h,
        .display_par = vo->monitor_par,
        .video_par = vo->aspdat.par,
    };

    osd_draw_on_image_bk(osd, res, osd->vo_pts, 0, p->osd_backup, &img);
}

static mp_image_t *get_screenshot(struct vo *vo)
{
    struct priv *p = vo->priv;

    struct mp_image img = get_x_buffer(p);
    struct mp_image *res = alloc_mpi(img.w, img.h, img.imgfmt);
    copy_mpi(res, &img);
    mp_draw_sub_backup_restore(p->osd_backup, res);

    return res;
}

static int redraw_frame(struct vo *vo)
{
    struct priv *p = vo->priv;

    struct mp_image img = get_x_buffer(p);
    mp_draw_sub_backup_restore(p->osd_backup, &img);

    return true;
}

static void flip_page(struct vo *vo)
{
    struct priv *p = vo->priv;
    Display_Image(p, p->myximage, p->ImageData);
    XSync(vo->x11->display, False);
}

static int draw_slice(struct vo *vo, uint8_t *src[], int stride[], int w, int h,
                      int x, int y)
{
    struct priv *p = vo->priv;
    uint8_t *dst[MP_MAX_PLANES] = {NULL};
    int dstStride[MP_MAX_PLANES] = {0};

    if ((p->old_vo_dwidth != vo->dwidth || p->old_vo_dheight != vo->dheight)
        /*&& y==0 */ && p->zoomFlag)
    {
        int newW = vo->dwidth;
        int newH = vo->dheight;
        struct SwsContext *oldContext = p->swsContext;

        p->old_vo_dwidth = vo->dwidth;
        p->old_vo_dheight = vo->dheight;

        if (vo_fs)
            aspect(vo, &newW, &newH, A_ZOOM);
        if (sws_flags == 0)
            newW &= (~31);      // not needed but, if the user wants the FAST_BILINEAR SCALER, then its needed

        p->swsContext
            = sws_getContextFromCmdLine(p->srcW, p->srcH, p->in_format, newW,
                                        newH, p->out_format);
        if (p->swsContext) {
            p->image_width = (newW + 7) & (~7);
            p->image_height = newH;

            freeMyXImage(p);
            getMyXImage(p);
            sws_freeContext(oldContext);
        } else
            p->swsContext = oldContext;
        p->dst_width = newW;
    }

    dstStride[0] = p->image_width * ((p->bpp + 7) / 8);
    dst[0] = p->ImageData;
    if (p->Flip_Flag) {
        dst[0] += dstStride[0] * (p->image_height - 1);
        dstStride[0] = -dstStride[0];
    }
    sws_scale(p->swsContext, (const uint8_t **)src, stride, y, h, dst,
              dstStride);
    mp_draw_sub_backup_reset(p->osd_backup);
    return 0;
}

static int query_format(struct vo *vo, uint32_t format)
{
    mp_msg(MSGT_VO, MSGL_DBG2,
           "vo_x11: query_format was called: %x (%s)\n", format,
           vo_format_name(format));
    if (IMGFMT_IS_BGR(format)) {
        if (IMGFMT_BGR_DEPTH(format) <= 8)
            return 0;           // TODO 8bpp not yet fully implemented
        if (IMGFMT_BGR_DEPTH(format) == vo->x11->depthonscreen)
            return VFCAP_CSP_SUPPORTED | VFCAP_CSP_SUPPORTED_BY_HW |
                   VFCAP_OSD | VFCAP_FLIP |
                   VFCAP_ACCEPT_STRIDE;
        else
            return VFCAP_CSP_SUPPORTED | VFCAP_OSD |
                   VFCAP_FLIP |
                   VFCAP_ACCEPT_STRIDE;
    }

    switch (format) {
    case IMGFMT_I420:
    case IMGFMT_IYUV:
    case IMGFMT_YV12:
        return VFCAP_CSP_SUPPORTED | VFCAP_OSD |
               VFCAP_ACCEPT_STRIDE;
    }
    return 0;
}
Exemplo n.º 20
0
VideoDecoder::~VideoDecoder()
{
    if (m_conversionContext)
        sws_freeContext(m_conversionContext);
}
Exemplo n.º 21
0
int FFMPEG::convert_cmodel(AVPicture *picture_in, PixelFormat pix_fmt_in,
                           int width_in, int height_in, VFrame *frame_out) {

    // set up a temporary picture_out from frame_out
    AVPicture picture_out;
    init_picture_from_frame(&picture_out, frame_out);
    int cmodel_out = frame_out->get_color_model();
    PixelFormat pix_fmt_out = color_model_to_pix_fmt(cmodel_out);

#ifdef HAVE_SWSCALER
    // We need a context for swscale
    struct SwsContext *convert_ctx;
#endif
    int result;
#ifndef HAVE_SWSCALER
    // do conversion within libavcodec if possible
    if (pix_fmt_out != PIX_FMT_NB) {
        result = img_convert(&picture_out,
                             pix_fmt_out,
                             picture_in,
                             pix_fmt_in,
                             width_in,
                             height_in);
        if (result) {
            printf("FFMPEG::convert_cmodel img_convert() failed\n");
        }
        return result;
    }
#else
    convert_ctx = sws_getContext(width_in, height_in,pix_fmt_in,
                                 frame_out->get_w(),frame_out->get_h(),pix_fmt_out,
                                 SWS_BICUBIC, NULL, NULL, NULL);

    if(convert_ctx == NULL) {
        printf("FFMPEG::convert_cmodel : swscale context initialization failed\n");
        return 1;
    }

    result = sws_scale(convert_ctx,
                       picture_in->data, picture_in->linesize,
                       width_in, height_in,
                       picture_out.data, picture_out.linesize);


    sws_freeContext(convert_ctx);

    if(result) {
        printf("FFMPEG::convert_cmodel sws_scale() failed\n");
    }
#endif

    // make an intermediate temp frame only if necessary
    int cmodel_in = pix_fmt_to_color_model(pix_fmt_in);
    if (cmodel_in == BC_TRANSPARENCY) {
        if (pix_fmt_in == PIX_FMT_RGB32) {
            // avoid infinite recursion if things are broken
            printf("FFMPEG::convert_cmodel pix_fmt_in broken!\n");
            return 1;
        }

        // NOTE: choose RGBA8888 as a hopefully non-lossy colormodel
        VFrame *temp_frame = new VFrame(0, width_in, height_in,
                                        BC_RGBA8888);
        if (convert_cmodel(picture_in, pix_fmt_in,
                           width_in, height_in, temp_frame)) {
            delete temp_frame;
            return 1;  // recursed call will print error message
        }

        int result = convert_cmodel(temp_frame, frame_out);
        delete temp_frame;
        return result;
    }


    // NOTE: no scaling possible in img_convert() so none possible here
    if (frame_out->get_w() != width_in ||
            frame_out->get_h() != height_in) {
        printf("scaling from %dx%d to %dx%d not allowed\n",
               width_in, height_in,
               frame_out->get_w(), frame_out->get_h());
        return 1;
    }


    // if we reach here we know that cmodel_transfer() will work
    uint8_t *yuv_in[3] = {0,0,0};
    uint8_t *row_pointers_in[height_in];
    if (cmodel_is_planar(cmodel_in)) {
        yuv_in[0] = picture_in->data[0];
        yuv_in[1] = picture_in->data[1];
        yuv_in[2] = picture_in->data[2];
    }
    else {
        // set row pointers for picture_in
        uint8_t *data = picture_in->data[0];
        int bytes_per_line =
            cmodel_calculate_pixelsize(cmodel_in) * height_in;
        for (int i = 0; i < height_in; i++) {
            row_pointers_in[i] = data + i * bytes_per_line;
        }
    }

    cmodel_transfer
    (// Packed data out
        frame_out->get_rows(),
        // Packed data in
        row_pointers_in,

        // Planar data out
        frame_out->get_y(), frame_out->get_u(), frame_out->get_v(),
        // Planar data in
        yuv_in[0], yuv_in[1], yuv_in[2],

        // Dimensions in
        0, 0, width_in, height_in,  // NOTE: dimensions are same
        // Dimensions out
        0, 0, width_in, height_in,

        // Color model in, color model out
        cmodel_in, cmodel_out,

        // Background color
        0,

        // Rowspans in, out (of luma for YUV)
        width_in, width_in

    );

    return 0;
}
int main(int argc, char* argv[])
{

	AVFormatContext	*pFormatCtx;
	int				i, videoindex;
	AVCodecContext	*pCodecCtx;
	AVCodec			*pCodec;
	AVFrame	*pFrame,*pFrameYUV;
	unsigned char *out_buffer;
	AVPacket *packet;
	int ret, got_picture;

	//------------SDL----------------
	int screen_w,screen_h;
	SDL_Window *screen; 
	SDL_Renderer* sdlRenderer;
	SDL_Texture* sdlTexture;
	SDL_Rect sdlRect;
	SDL_Thread *video_tid;
	SDL_Event event;

	struct SwsContext *img_convert_ctx;

	//char filepath[]="bigbuckbunny_480x272.h265";
	char filepath[]="Titanic.ts";

	av_register_all();
	avformat_network_init();
	pFormatCtx = avformat_alloc_context();

	if(avformat_open_input(&pFormatCtx,filepath,NULL,NULL)!=0){
		printf("Couldn't open input stream.\n");
		return -1;
	}
	if(avformat_find_stream_info(pFormatCtx,NULL)<0){
		printf("Couldn't find stream information.\n");
		return -1;
	}
	videoindex=-1;
	for(i=0; i<pFormatCtx->nb_streams; i++) 
		if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO){
			videoindex=i;
			break;
		}
	if(videoindex==-1){
		printf("Didn't find a video stream.\n");
		return -1;
	}
	pCodecCtx=pFormatCtx->streams[videoindex]->codec;
	pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
	if(pCodec==NULL){
		printf("Codec not found.\n");
		return -1;
	}
	if(avcodec_open2(pCodecCtx, pCodec,NULL)<0){
		printf("Could not open codec.\n");
		return -1;
	}
	pFrame=av_frame_alloc();
	pFrameYUV=av_frame_alloc();

	out_buffer=(unsigned char *)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_YUV420P,  pCodecCtx->width, pCodecCtx->height,1));
	av_image_fill_arrays(pFrameYUV->data, pFrameYUV->linesize,out_buffer,
		AV_PIX_FMT_YUV420P,pCodecCtx->width, pCodecCtx->height,1);

	//Output Info-----------------------------
	printf("---------------- File Information ---------------\n");
	av_dump_format(pFormatCtx,0,filepath,0);
	printf("-------------------------------------------------\n");
	
	img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, 
		pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL); 
	

	if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {  
		printf( "Could not initialize SDL - %s\n", SDL_GetError()); 
		return -1;
	} 
	//SDL 2.0 Support for multiple windows
	screen_w = pCodecCtx->width;
	screen_h = pCodecCtx->height;
	screen = SDL_CreateWindow("Simplest ffmpeg player's Window", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED,
		screen_w, screen_h,SDL_WINDOW_OPENGL);

	if(!screen) {  
		printf("SDL: could not create window - exiting:%s\n",SDL_GetError());  
		return -1;
	}
	sdlRenderer = SDL_CreateRenderer(screen, -1, 0);  
	//IYUV: Y + U + V  (3 planes)
	//YV12: Y + V + U  (3 planes)
	sdlTexture = SDL_CreateTexture(sdlRenderer, SDL_PIXELFORMAT_IYUV, SDL_TEXTUREACCESS_STREAMING,pCodecCtx->width,pCodecCtx->height);  

	sdlRect.x=0;
	sdlRect.y=0;
	sdlRect.w=screen_w;
	sdlRect.h=screen_h;

	packet=(AVPacket *)av_malloc(sizeof(AVPacket));

	video_tid = SDL_CreateThread(sfp_refresh_thread,NULL,NULL);
	//------------SDL End------------
	//Event Loop
	
	for (;;) {
		//Wait
		SDL_WaitEvent(&event);
		if(event.type==SFM_REFRESH_EVENT){
			while(1){
				if(av_read_frame(pFormatCtx, packet)<0)
					thread_exit=1;

				if(packet->stream_index==videoindex)
					break;
			}
			ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);
			if(ret < 0){
				printf("Decode Error.\n");
				return -1;
			}
			if(got_picture){
				sws_scale(img_convert_ctx, (const unsigned char* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameYUV->data, pFrameYUV->linesize);
				//SDL---------------------------
				SDL_UpdateTexture( sdlTexture, NULL, pFrameYUV->data[0], pFrameYUV->linesize[0] );  
				SDL_RenderClear( sdlRenderer );  
				//SDL_RenderCopy( sdlRenderer, sdlTexture, &sdlRect, &sdlRect );  
				SDL_RenderCopy( sdlRenderer, sdlTexture, NULL, NULL);  
				SDL_RenderPresent( sdlRenderer );  
				//SDL End-----------------------
			}
			av_free_packet(packet);
		}else if(event.type==SDL_KEYDOWN){
			//Pause
			if(event.key.keysym.sym==SDLK_SPACE)
				thread_pause=!thread_pause;
		}else if(event.type==SDL_QUIT){
			thread_exit=1;
		}else if(event.type==SFM_BREAK_EVENT){
			break;
		}

	}

	sws_freeContext(img_convert_ctx);

	SDL_Quit();
	//--------------
	av_frame_free(&pFrameYUV);
	av_frame_free(&pFrame);
	avcodec_close(pCodecCtx);
	avformat_close_input(&pFormatCtx);

	return 0;
}
Exemplo n.º 23
0
bool CDVDFileInfo::ExtractThumb(const std::string &strPath,
                                CTextureDetails &details,
                                CStreamDetails *pStreamDetails, int pos)
{
  std::string redactPath = CURL::GetRedacted(strPath);
  unsigned int nTime = XbmcThreads::SystemClockMillis();
  CDVDInputStream *pInputStream = CDVDFactoryInputStream::CreateInputStream(NULL, strPath, "");
  if (!pInputStream)
  {
    CLog::Log(LOGERROR, "InputStream: Error creating stream for %s", redactPath.c_str());
    return false;
  }

  if (pInputStream->IsStreamType(DVDSTREAM_TYPE_DVD)
   || pInputStream->IsStreamType(DVDSTREAM_TYPE_BLURAY))
  {
    CLog::Log(LOGDEBUG, "%s: disc streams not supported for thumb extraction, file: %s", __FUNCTION__, redactPath.c_str());
    delete pInputStream;
    return false;
  }

  if (pInputStream->IsStreamType(DVDSTREAM_TYPE_PVRMANAGER))
  {
    SAFE_DELETE(pInputStream);
    return false;
  }

  if (!pInputStream->Open(strPath.c_str(), "", true))
  {
    CLog::Log(LOGERROR, "InputStream: Error opening, %s", redactPath.c_str());
    if (pInputStream)
      SAFE_DELETE(pInputStream);
    return false;
  }

  CDVDDemux *pDemuxer = NULL;

  try
  {
    pDemuxer = CDVDFactoryDemuxer::CreateDemuxer(pInputStream, true);
    if(!pDemuxer)
    {
      SAFE_DELETE(pInputStream);
      CLog::Log(LOGERROR, "%s - Error creating demuxer", __FUNCTION__);
      return false;
    }
  }
  catch(...)
  {
    CLog::Log(LOGERROR, "%s - Exception thrown when opening demuxer", __FUNCTION__);
    if (pDemuxer)
      SAFE_DELETE(pDemuxer);
    SAFE_DELETE(pInputStream);
    return false;
  }

  if (pStreamDetails)
  {
    DemuxerToStreamDetails(pInputStream, pDemuxer, *pStreamDetails, strPath);

    //extern subtitles
    std::vector<std::string> filenames;
    std::string video_path;
    if (strPath.empty())
      video_path = pInputStream->GetFileName();
    else
      video_path = strPath;

    CUtil::ScanForExternalSubtitles(video_path, filenames);

    for(unsigned int i=0;i<filenames.size();i++)
    {
      // if vobsub subtitle:
      if (URIUtils::GetExtension(filenames[i]) == ".idx")
      {
        std::string strSubFile;
        if ( CUtil::FindVobSubPair(filenames, filenames[i], strSubFile) )
          AddExternalSubtitleToDetails(video_path, *pStreamDetails, filenames[i], strSubFile);
      }
      else
      {
        if ( !CUtil::IsVobSub(filenames, filenames[i]) )
        {
          AddExternalSubtitleToDetails(video_path, *pStreamDetails, filenames[i]);
        }
      }
    }
  }

  int nVideoStream = -1;
  for (int i = 0; i < pDemuxer->GetNrOfStreams(); i++)
  {
    CDemuxStream* pStream = pDemuxer->GetStream(i);
    if (pStream)
    {
      // ignore if it's a picture attachment (e.g. jpeg artwork)
      if(pStream->type == STREAM_VIDEO && !(pStream->flags & AV_DISPOSITION_ATTACHED_PIC))
        nVideoStream = i;
      else
        pStream->SetDiscard(AVDISCARD_ALL);
    }
  }

  bool bOk = false;
  int packetsTried = 0;

  if (nVideoStream != -1)
  {
    CDVDVideoCodec *pVideoCodec;

    CDVDStreamInfo hint(*pDemuxer->GetStream(nVideoStream), true);
    hint.software = true;

    if (hint.codec == AV_CODEC_ID_MPEG2VIDEO || hint.codec == AV_CODEC_ID_MPEG1VIDEO)
    {
      // libmpeg2 is not thread safe so use ffmepg for mpeg2/mpeg1 thumb extraction
      CDVDCodecOptions dvdOptions;
      pVideoCodec = CDVDFactoryCodec::OpenCodec(new CDVDVideoCodecFFmpeg(), hint, dvdOptions);
    }
    else
    {
      pVideoCodec = CDVDFactoryCodec::CreateVideoCodec( hint );
    }

    if (pVideoCodec)
    {
      int nTotalLen = pDemuxer->GetStreamLength();
      int nSeekTo = (pos==-1?nTotalLen / 3:pos);

      CLog::Log(LOGDEBUG,"%s - seeking to pos %dms (total: %dms) in %s", __FUNCTION__, nSeekTo, nTotalLen, redactPath.c_str());
      if (pDemuxer->SeekTime(nSeekTo, true))
      {
        int iDecoderState = VC_ERROR;
        DVDVideoPicture picture;

        memset(&picture, 0, sizeof(picture));

        // num streams * 160 frames, should get a valid frame, if not abort.
        int abort_index = pDemuxer->GetNrOfStreams() * 160;
        do
        {
          DemuxPacket* pPacket = pDemuxer->Read();
          packetsTried++;

          if (!pPacket)
            break;

          if (pPacket->iStreamId != nVideoStream)
          {
            CDVDDemuxUtils::FreeDemuxPacket(pPacket);
            continue;
          }

          iDecoderState = pVideoCodec->Decode(pPacket->pData, pPacket->iSize, pPacket->dts, pPacket->pts);
          CDVDDemuxUtils::FreeDemuxPacket(pPacket);

          if (iDecoderState & VC_ERROR)
            break;

          if (iDecoderState & VC_PICTURE)
          {
            memset(&picture, 0, sizeof(DVDVideoPicture));
            if (pVideoCodec->GetPicture(&picture))
            {
              if(!(picture.iFlags & DVP_FLAG_DROPPED))
                break;
            }
          }

        } while (abort_index--);

        if (iDecoderState & VC_PICTURE && !(picture.iFlags & DVP_FLAG_DROPPED))
        {
          {
            unsigned int nWidth = g_advancedSettings.GetThumbSize();
            double aspect = (double)picture.iDisplayWidth / (double)picture.iDisplayHeight;
            if(hint.forced_aspect && hint.aspect != 0)
              aspect = hint.aspect;
            unsigned int nHeight = (unsigned int)((double)g_advancedSettings.GetThumbSize() / aspect);

            uint8_t *pOutBuf = new uint8_t[nWidth * nHeight * 4];
            struct SwsContext *context = sws_getContext(picture.iWidth, picture.iHeight,
                  PIX_FMT_YUV420P, nWidth, nHeight, PIX_FMT_BGRA, SWS_FAST_BILINEAR | SwScaleCPUFlags(), NULL, NULL, NULL);

            if (context)
            {
              uint8_t *src[] = { picture.data[0], picture.data[1], picture.data[2], 0 };
              int     srcStride[] = { picture.iLineSize[0], picture.iLineSize[1], picture.iLineSize[2], 0 };
              uint8_t *dst[] = { pOutBuf, 0, 0, 0 };
              int     dstStride[] = { (int)nWidth*4, 0, 0, 0 };
              int orientation = DegreeToOrientation(hint.orientation);
              sws_scale(context, src, srcStride, 0, picture.iHeight, dst, dstStride);
              sws_freeContext(context);

              details.width = nWidth;
              details.height = nHeight;
              CPicture::CacheTexture(pOutBuf, nWidth, nHeight, nWidth * 4, orientation, nWidth, nHeight, CTextureCache::GetCachedPath(details.file));
              bOk = true;
            }
            SAFE_DELETE_ARRAY(pOutBuf);
          }
        }
        else
        {
          CLog::Log(LOGDEBUG,"%s - decode failed in %s after %d packets.", __FUNCTION__, redactPath.c_str(), packetsTried);
        }
      }
      SAFE_DELETE(pVideoCodec);
    }
  }

  if (pDemuxer)
    SAFE_DELETE(pDemuxer);

  SAFE_DELETE(pInputStream);

  if(!bOk)
  {
    XFILE::CFile file;
    if(file.OpenForWrite(CTextureCache::GetCachedPath(details.file)))
      file.Close();
  }

  unsigned int nTotalTime = XbmcThreads::SystemClockMillis() - nTime;
  CLog::Log(LOGDEBUG,"%s - measured %u ms to extract thumb from file <%s> in %d packets. ", __FUNCTION__, nTotalTime, redactPath.c_str(), packetsTried);
  return bOk;
}
Exemplo n.º 24
0
status_t
AVCodecDecoder::_NegotiateVideoOutputFormat(media_format* inOutFormat)
{
	TRACE("AVCodecDecoder::_NegotiateVideoOutputFormat()\n");

	TRACE("  requested video format 0x%x\n",
		inOutFormat->u.raw_video.display.format);

	// Make MediaPlayer happy (if not in rgb32 screen depth and no overlay,
	// it will only ask for YCbCr, which DrawBitmap doesn't handle, so the
	// default colordepth is RGB32).
	if (inOutFormat->u.raw_video.display.format == B_YCbCr422)
		fOutputColorSpace = B_YCbCr422;
	else
		fOutputColorSpace = B_RGB32;

#if USE_SWS_FOR_COLOR_SPACE_CONVERSION
	if (fSwsContext != NULL)
		sws_freeContext(fSwsContext);
	fSwsContext = NULL;
#else
	fFormatConversionFunc = 0;
#endif

	fContext->extradata = (uint8_t*)fExtraData;
	fContext->extradata_size = fExtraDataSize;

	bool codecCanHandleIncompleteFrames
		= (fCodec->capabilities & CODEC_CAP_TRUNCATED) != 0;
	if (codecCanHandleIncompleteFrames) {
		// Expect and handle video frames to be splitted across consecutive
		// data chunks.
		fContext->flags |= CODEC_FLAG_TRUNCATED;
	}

	// close any previous instance
	if (fCodecInitDone) {
		fCodecInitDone = false;
		avcodec_close(fContext);
	}

	if (avcodec_open2(fContext, fCodec, NULL) >= 0)
		fCodecInitDone = true;
	else {
		TRACE("avcodec_open() failed to init codec!\n");
		return B_ERROR;
	}

	_ResetTempPacket();

	status_t statusOfDecodingFirstFrame = _DecodeNextVideoFrame();
	if (statusOfDecodingFirstFrame != B_OK) {
		TRACE("[v] decoding first video frame failed\n");
		return B_ERROR;
	}

	// Note: fSwsContext / fFormatConversionFunc should have been initialized
	// by first call to _DecodeNextVideoFrame() above.
#if USE_SWS_FOR_COLOR_SPACE_CONVERSION
	if (fSwsContext == NULL) {
		TRACE("No SWS Scale context or decoder has not set the pixel format "
			"yet!\n");
	}
#else
	if (fFormatConversionFunc == NULL) {
		TRACE("no pixel format conversion function found or decoder has "
			"not set the pixel format yet!\n");
	}
#endif

	inOutFormat->type = B_MEDIA_RAW_VIDEO;
	inOutFormat->require_flags = 0;
	inOutFormat->deny_flags = B_MEDIA_MAUI_UNDEFINED_FLAGS;

	inOutFormat->u.raw_video = fInputFormat.u.encoded_video.output;

	inOutFormat->u.raw_video.interlace = 1;
		// Progressive (non-interlaced) video frames are delivered
	inOutFormat->u.raw_video.first_active = fHeader.u.raw_video.first_active_line;
	inOutFormat->u.raw_video.last_active = fHeader.u.raw_video.line_count;
	inOutFormat->u.raw_video.pixel_width_aspect = fHeader.u.raw_video.pixel_width_aspect;
	inOutFormat->u.raw_video.pixel_height_aspect = fHeader.u.raw_video.pixel_height_aspect;
	inOutFormat->u.raw_video.field_rate = fOutputFrameRate;
		// Was calculated by first call to _DecodeNextVideoFrame()

	inOutFormat->u.raw_video.display.format = fOutputColorSpace;
	inOutFormat->u.raw_video.display.line_width = fHeader.u.raw_video.display_line_width;
	inOutFormat->u.raw_video.display.line_count = fHeader.u.raw_video.display_line_count;
	inOutFormat->u.raw_video.display.bytes_per_row = fHeader.u.raw_video.bytes_per_row;

#ifdef TRACE_AV_CODEC
	char buffer[1024];
	string_for_format(*inOutFormat, buffer, sizeof(buffer));
	TRACE("[v]  outFormat = %s\n", buffer);
	TRACE("  returned  video format 0x%x\n",
		inOutFormat->u.raw_video.display.format);
#endif

	return B_OK;
}
void convert_image(AVCodecContext *pCodecCtx, AVFrame *pFrame, AVPacket *avpkt, int *got_packet_ptr, int width, int height) {
    
    AVCodecContext *codecCtx = NULL;
    AVCodec *codec = NULL;
    AVFrame *frame = NULL;
    struct SwsContext *scalerCtx = NULL;
    int ret = 0;
    
    *got_packet_ptr = 0;
    
    if (width == -1) {
        width = pCodecCtx->width;
    }
    
    if (height == -1) {
        height = pCodecCtx->height;
    }
    
    codec = avcodec_find_encoder(AV_CODEC_ID_MJPEG);
    if (!codec) {
        printf("avcodec_find_decoder() failed to find decoder\n");
        goto fail;
    }
    
    codecCtx = avcodec_alloc_context3(codec);
    if (!codecCtx) {
        printf("avcodec_alloc_context3 failed\n");
        goto fail;
    }
    
    codecCtx->bit_rate = pCodecCtx->bit_rate;
    //codecCtx->width = pCodecCtx->width;
    //codecCtx->height = pCodecCtx->height;
    codecCtx->width = width;
    codecCtx->height = height;
    codecCtx->pix_fmt = TARGET_IMAGE_FORMAT;
    codecCtx->codec_type = AVMEDIA_TYPE_VIDEO;
    codecCtx->time_base.num = pCodecCtx->time_base.num;
    codecCtx->time_base.den = pCodecCtx->time_base.den;
    codecCtx->mb_lmin        = pCodecCtx->lmin = pCodecCtx->qmin * FF_QP2LAMBDA;
    codecCtx->mb_lmax        = pCodecCtx->lmax = pCodecCtx->qmax * FF_QP2LAMBDA;
    codecCtx->flags          = CODEC_FLAG_QSCALE;
    codecCtx->global_quality = pCodecCtx->qmin * FF_QP2LAMBDA;
    
    
    
    if (!codec || avcodec_open2(codecCtx, codec, NULL) < 0) {
        printf("avcodec_open2() failed\n");
        goto fail;
    }
    
    frame = av_frame_alloc();
    
    if (!frame) {
        goto fail;
    }
    
    uint8_t *dst_buffer = av_malloc (avpicture_get_size(TARGET_IMAGE_FORMAT,width, height));
    
    avpicture_fill ((AVPicture *) frame, dst_buffer,TARGET_IMAGE_FORMAT, width, height);
    
    frame->pts     = 1;
    frame->quality = pCodecCtx->global_quality;
    
    scalerCtx = sws_getContext(pCodecCtx->width,
                                                  pCodecCtx->height,
                                                  pCodecCtx->pix_fmt,
                                                  //pCodecCtx->width,
                                                  //pCodecCtx->height,
                                                  width,
                                                  height,
                                                  TARGET_IMAGE_FORMAT,
                                                  SWS_BICUBIC , 0, 0, 0);
    
    if (!scalerCtx) {
        printf("sws_getContext() failed\n");
        goto fail;
    }
    
    sws_scale(scalerCtx,
              (const uint8_t * const *) pFrame->data,
              pFrame->linesize,
              0,
              pFrame->height,
              frame->data,
              frame->linesize);
    
    ret = avcodec_encode_video2(codecCtx, avpkt, frame, got_packet_ptr);
    __android_log_print(ANDROID_LOG_INFO, LOG_TAG, "Encoded Size: %d", ret);
    if (ret < 0) {
        *got_packet_ptr = 0;
    }
    
fail:
    av_free(frame);
    
    if (codecCtx) {
        avcodec_close(codecCtx);
        av_free(codecCtx);
    }
    
    if (scalerCtx) {
        sws_freeContext(scalerCtx);
    }
    
    if (ret < 0 || !*got_packet_ptr) {
        av_free_packet(avpkt);
    }
    
}
Exemplo n.º 26
0
static void ff_sws_free(MSScalerContext *ctx){
	MSFFScalerContext *fctx=(MSFFScalerContext*)ctx;
	if (fctx->ctx) sws_freeContext(fctx->ctx);
	ms_free(ctx);
}
FFMpegEncoder::~FFMpegEncoder()
{
	sws_freeContext(swsCtx);
	close();
}
Exemplo n.º 28
0
QByteArray AVDecoder::WriteJPEG(AVCodecContext *pCodecCtx, AVFrame *pFrame, int width, int height)
{
    AVCodecContext *pOCodecCtx;
    AVCodec        *pOCodec;

    QByteArray data;

    pOCodec = avcodec_find_encoder(AV_CODEC_ID_MJPEG);

    if (!pOCodec) {
        return data;
    }

    SwsContext *sws_ctx = sws_getContext(
                pCodecCtx->width, pCodecCtx->height,
                pCodecCtx->pix_fmt,
                width, height,
                PIX_FMT_YUVJ420P, SWS_BICUBIC,
                NULL, NULL, NULL);

    if(!sws_ctx) {
        return data;
    }

#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(55,28,1)
    AVFrame *pFrameRGB = av_frame_alloc();
#else
    AVFrame *pFrameRGB = avcodec_alloc_frame();
#endif

    if(pFrameRGB == NULL) {
        sws_freeContext(sws_ctx);
        return data;
    }

    int numBytes = avpicture_get_size(PIX_FMT_YUVJ420P, width, height);

    uint8_t *buffer = (uint8_t *)av_malloc(numBytes);

    if(!buffer) {
#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(55,28,1)
        av_frame_free(&pFrameRGB);
#else
        av_free(pFrameRGB);
#endif
        sws_freeContext(sws_ctx);
        return data;
    }

    avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_YUVJ420P, width, height);

    sws_scale(
        sws_ctx,
        pFrame->data,
        pFrame->linesize,
        0,
        pCodecCtx->height,
        pFrameRGB->data,
        pFrameRGB->linesize
    );

    pOCodecCtx = avcodec_alloc_context3(pOCodec);

    if(pOCodecCtx == NULL) {
#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(55,52,0)
        avcodec_free_context(&pOCodecCtx);
#else
        avcodec_close(pOCodecCtx);
        av_free(pOCodecCtx);
#endif
        av_free(buffer);
#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(55,28,1)
        av_frame_free(&pFrameRGB);
#else
        av_free(&pFrameRGB);
#endif
        sws_freeContext(sws_ctx);
        return  0;
    }

    pOCodecCtx->bit_rate      = pCodecCtx->bit_rate;
    pOCodecCtx->width         = width;
    pOCodecCtx->height        = height;
    pOCodecCtx->pix_fmt       = AV_PIX_FMT_YUVJ420P;
    pOCodecCtx->codec_id      = AV_CODEC_ID_MJPEG;
    pOCodecCtx->codec_type    = AVMEDIA_TYPE_VIDEO;
    pOCodecCtx->time_base.num = pCodecCtx->time_base.num;
    pOCodecCtx->time_base.den = pCodecCtx->time_base.den;

    AVDictionary *opts = NULL;
    if(avcodec_open2(pOCodecCtx, pOCodec, &opts) < 0) {
#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(55,52,0)
        avcodec_free_context(&pOCodecCtx);
#else
        avcodec_close(pOCodecCtx);
        av_free(pOCodecCtx);
#endif
        av_free(buffer);
#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(55,28,1)
        av_frame_free(&pFrameRGB);
#else
        av_free(&pFrameRGB);
#endif
        sws_freeContext(sws_ctx);
         return  0;
    }

    pOCodecCtx->mb_lmin        = pOCodecCtx->lmin = pOCodecCtx->qmin * FF_QP2LAMBDA;
    pOCodecCtx->mb_lmax        = pOCodecCtx->lmax = pOCodecCtx->qmax * FF_QP2LAMBDA;
    pOCodecCtx->flags          = CODEC_FLAG_QSCALE;
    pOCodecCtx->global_quality = pOCodecCtx->qmin * FF_QP2LAMBDA;

    pFrame->pts     = 1;
    pFrame->quality = pOCodecCtx->global_quality;

    AVPacket pkt;
    av_init_packet(&pkt);
    pkt.data = NULL;
    pkt.size = 0;

    int gotPacket;

    avcodec_encode_video2(pOCodecCtx, &pkt, pFrameRGB, &gotPacket);

    QByteArray buffer2(reinterpret_cast<char *>(pkt.data), pkt.size);

#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(55,52,0)
        avcodec_free_context(&pOCodecCtx);
#else
        avcodec_close(pOCodecCtx);
        av_free(pOCodecCtx);
#endif
    av_free(buffer);
#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(55,28,1)
    av_frame_free(&pFrameRGB);
#else
    av_free(&pFrameRGB);
#endif
    avcodec_close(pOCodecCtx);
    sws_freeContext(sws_ctx);

    return buffer2;
}
Exemplo n.º 29
0
static pixmap_t *
fa_image_from_video2(const char *url, const image_meta_t *im, 
		     const char *cacheid, char *errbuf, size_t errlen,
		     int sec, time_t mtime, cancellable_t *c)
{
  pixmap_t *pm = NULL;

  if(ifv_url == NULL || strcmp(url, ifv_url)) {
    // Need to open
    int i;
    AVFormatContext *fctx;
    fa_handle_t *fh = fa_open_ex(url, errbuf, errlen, FA_BUFFERED_BIG, NULL);

    if(fh == NULL)
      return NULL;

    AVIOContext *avio = fa_libav_reopen(fh, 0);

    if((fctx = fa_libav_open_format(avio, url, NULL, 0, NULL, 0, 0,
				    0)) == NULL) {
      fa_libav_close(avio);
      snprintf(errbuf, errlen, "Unable to open format");
      return NULL;
    }

    if(!strcmp(fctx->iformat->name, "avi"))
      fctx->flags |= AVFMT_FLAG_GENPTS;

    AVCodecContext *ctx = NULL;
    for(i = 0; i < fctx->nb_streams; i++) {
      if(fctx->streams[i]->codec != NULL && 
	 fctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
	ctx = fctx->streams[i]->codec;
	break;
      }
    }
    if(ctx == NULL) {
      fa_libav_close_format(fctx);
      return NULL;
    }

    AVCodec *codec = avcodec_find_decoder(ctx->codec_id);
    if(codec == NULL) {
      fa_libav_close_format(fctx);
      snprintf(errbuf, errlen, "Unable to find codec");
      return NULL;
    }

    if(avcodec_open2(ctx, codec, NULL) < 0) {
      fa_libav_close_format(fctx);
      snprintf(errbuf, errlen, "Unable to open codec");
      return NULL;
    }

    ifv_close();

    ifv_stream = i;
    ifv_url = strdup(url);
    ifv_fctx = fctx;
    ifv_ctx = ctx;
  }

  AVPacket pkt;
  AVFrame *frame = av_frame_alloc();
  int got_pic;


  AVStream *st = ifv_fctx->streams[ifv_stream];
  int64_t ts = av_rescale(sec, st->time_base.den, st->time_base.num);

  if(av_seek_frame(ifv_fctx, ifv_stream, ts, AVSEEK_FLAG_BACKWARD) < 0) {
    ifv_close();
    snprintf(errbuf, errlen, "Unable to seek to %"PRId64, ts);
    return NULL;
  }
  
  avcodec_flush_buffers(ifv_ctx);

#define MAX_FRAME_SCAN 500
  
  int cnt = MAX_FRAME_SCAN;
  while(1) {
    int r;

    r = av_read_frame(ifv_fctx, &pkt);

    if(r == AVERROR(EAGAIN))
      continue;

    if(r == AVERROR_EOF)
      break;

    if(cancellable_is_cancelled(c)) {
      snprintf(errbuf, errlen, "Cancelled");
      av_free_packet(&pkt);
      break;
    }

    if(r != 0) {
      ifv_close();
      break;
    }

    if(pkt.stream_index != ifv_stream) {
      av_free_packet(&pkt);
      continue;
    }
    cnt--;
    int want_pic = pkt.pts >= ts || cnt <= 0;

    ifv_ctx->skip_frame = want_pic ? AVDISCARD_DEFAULT : AVDISCARD_NONREF;
    
    avcodec_decode_video2(ifv_ctx, frame, &got_pic, &pkt);
    av_free_packet(&pkt);
    if(got_pic == 0 || !want_pic) {
      continue;
    }
    int w,h;

    if(im->im_req_width != -1 && im->im_req_height != -1) {
      w = im->im_req_width;
      h = im->im_req_height;
    } else if(im->im_req_width != -1) {
      w = im->im_req_width;
      h = im->im_req_width * ifv_ctx->height / ifv_ctx->width;

    } else if(im->im_req_height != -1) {
      w = im->im_req_height * ifv_ctx->width / ifv_ctx->height;
      h = im->im_req_height;
    } else {
      w = im->im_req_width;
      h = im->im_req_height;
    }

    pm = pixmap_create(w, h, PIXMAP_BGR32, 0);

    if(pm == NULL) {
      ifv_close();
      snprintf(errbuf, errlen, "Out of memory");
      av_free(frame);
      return NULL;
    }

    struct SwsContext *sws;
    sws = sws_getContext(ifv_ctx->width, ifv_ctx->height, ifv_ctx->pix_fmt,
			 w, h, AV_PIX_FMT_BGR32, SWS_BILINEAR,
                         NULL, NULL, NULL);
    if(sws == NULL) {
      ifv_close();
      snprintf(errbuf, errlen, "Scaling failed");
      pixmap_release(pm);
      av_free(frame);
      return NULL;
    }
    
    uint8_t *ptr[4] = {0,0,0,0};
    int strides[4] = {0,0,0,0};

    ptr[0] = pm->pm_pixels;
    strides[0] = pm->pm_linesize;

    sws_scale(sws, (const uint8_t **)frame->data, frame->linesize,
	      0, ifv_ctx->height, ptr, strides);

    sws_freeContext(sws);

    write_thumb(ifv_ctx, frame, w, h, cacheid, mtime);

    break;
  }

  av_frame_free(&frame);
  if(pm == NULL)
    snprintf(errbuf, errlen, "Frame not found (scanned %d)", 
	     MAX_FRAME_SCAN - cnt);

  avcodec_flush_buffers(ifv_ctx);
  callout_arm(&thumb_flush_callout, ifv_autoclose, NULL, 5);
  return pm;
}
Exemplo n.º 30
0
void guacenc_video_prepare_frame(guacenc_video* video, guacenc_buffer* buffer) {

    int lsize;
    int psize;

    /* Ignore NULL buffers */
    if (buffer == NULL || buffer->surface == NULL)
        return;

    /* Obtain destination frame */
    AVFrame* dst = video->next_frame;

    /* Determine width of image if height is scaled to match destination */
    int scaled_width = buffer->width * dst->height / buffer->height;

    /* Determine height of image if width is scaled to match destination */
    int scaled_height = buffer->height * dst->width / buffer->width;

    /* If height-based scaling results in a fit width, add pillarboxes */
    if (scaled_width <= dst->width) {
        lsize = 0;
        psize = (dst->width - scaled_width)
               * buffer->height / dst->height / 2;
    }

    /* If width-based scaling results in a fit width, add letterboxes */
    else {
        assert(scaled_height <= dst->height);
        psize = 0;
        lsize = (dst->height - scaled_height)
               * buffer->width / dst->width / 2;
    }

    /* Prepare source frame for buffer */
    AVFrame* src = guacenc_video_frame_convert(buffer, lsize, psize);
    if (src == NULL) {
        guacenc_log(GUAC_LOG_WARNING, "Failed to allocate source frame. "
                "Frame dropped.");
        return;
    }

    /* Prepare scaling context */
    struct SwsContext* sws = sws_getContext(src->width, src->height,
            AV_PIX_FMT_RGB32, dst->width, dst->height, AV_PIX_FMT_YUV420P,
            SWS_BICUBIC, NULL, NULL, NULL);

    /* Abort if scaling context could not be created */
    if (sws == NULL) {
        guacenc_log(GUAC_LOG_WARNING, "Failed to allocate software scaling "
                "context. Frame dropped.");
        av_freep(&src->data[0]);
        av_frame_free(&src);
        return;
    }

    /* Apply scaling, copying the source frame to the destination */
    sws_scale(sws, (const uint8_t* const*) src->data, src->linesize,
            0, src->height, dst->data, dst->linesize);

    /* Free scaling context */
    sws_freeContext(sws);

    /* Free source frame */
    av_freep(&src->data[0]);
    av_frame_free(&src);

}