예제 #1
0
// decode a frame
static mp_image_t* decode(sh_video_t *sh,void* data,int len,int flags){
    long result = 1;
    int i;
    mp_image_t* mpi;
    ComponentResult cres;

    if(len<=0) return NULL; // skipped frame

    mpi=mpcodecs_get_image(sh, MP_IMGTYPE_STATIC, MP_IMGFLAG_PRESERVE,
	sh->disp_w, sh->disp_h);
    if(!mpi) return NULL;

    decpar.data = (char*)data;
    decpar.bufferSize = len;
    (**framedescHandle).dataSize=len;

if(!codec_initialized){
    result = QTNewGWorldFromPtr(
        &OutBufferGWorld,
//        kYUVSPixelFormat, //pixel format of new GWorld == YUY2
	(OSType)sh->context,
        &OutBufferRect,   //we should benchmark if yvu9 is faster for svq3, too
        0,
        0,
        0,
        mpi->planes[0],
        mpi->stride[0]);
    mp_msg(MSGT_DECVIDEO,MSGL_DBG2,"NewGWorldFromPtr returned:%ld\n",65536-(result&0xffff));
//    if (65536-(result&0xFFFF) != 10000)
//	return NULL;

//    printf("IDesc=%d\n",sizeof(ImageDescription));

    decpar.imageDescription = framedescHandle;
    decpar.startLine=0;
    decpar.stopLine=(**framedescHandle).height;
    decpar.frameNumber = 1; //1
//    decpar.conditionFlags=0xFFD; // first
//    decpar.callerFlags=0x2001; // first
    decpar.matrixFlags = 0;
    decpar.matrixType = 0;
    decpar.matrix = 0;
    decpar.capabilities=&codeccap;
//    decpar.accuracy = 0x1680000; //codecNormalQuality;
    decpar.accuracy = codecNormalQuality;
//    decpar.port = OutBufferGWorld;
//    decpar.preferredOffscreenPixelSize=17207;

//    decpar.sequenceID=malloc(1000);
//    memset(decpar.sequenceID,0,1000);

//    SrcRect.top=17207;
//    SrcRect.left=0;
//    SrcRect.right=0;//image_width;
//    SrcRect.bottom=0;//image_height;

//    decpar.srcRect = SrcRect;
    decpar.srcRect = OutBufferRect;

    decpar.transferMode = srcCopy;
    decpar.dstPixMap = **GetGWorldPixMap( OutBufferGWorld);//destPixmap;

    cres=ImageCodecPreDecompress(ci,&decpar);
    mp_msg(MSGT_DECVIDEO,MSGL_DBG2,"ImageCodecPreDecompress cres=0x%X\n",cres);

    if(decpar.wantedDestinationPixelTypes)
    { OSType *p=*(decpar.wantedDestinationPixelTypes);
      if(p) while(*p){
          mp_msg(MSGT_DECVIDEO,MSGL_DBG2,"supported csp: 0x%08X %.4s\n",*p,(char *)p);
	  ++p;
      }
    }


//    decpar.conditionFlags=0x10FFF; // first
//    decpar.preferredOffscreenPixelSize=17207;

//    decpar.conditionFlags=0x10FFD; // first

//	cres=ImageCodecPreDecompress(ci,&decpar);
//    printf("ImageCodecPreDecompress cres=0x%X\n",cres);


    codec_initialized=1;
}

#if 0
    if(decpar.frameNumber==124){
	decpar.frameNumber=1;
	cres=ImageCodecPreDecompress(ci,&decpar);
	mp_msg(MSGT_DECVIDEO,MSGL_DBG2,"ImageCodecPreDecompress cres=0x%lX\n",cres);
    }
#endif

    cres=ImageCodecBandDecompress(ci,&decpar);

    ++decpar.frameNumber;

    if(cres&0xFFFF){
	mp_msg(MSGT_DECVIDEO,MSGL_DBG2,"ImageCodecBandDecompress cres=0x%X (-0x%X) %d\n",cres,-cres,cres);
	return NULL;
    }

//    for(i=0;i<8;i++)
//	printf("img_base[%d]=%p\n",i,((int*)decpar.dstPixMap.baseAddr)[i]);

if((int)sh->context==0x73797639){	// Sorenson 16-bit YUV -> std YVU9

    short *src0=(short *)((char*)decpar.dstPixMap.baseAddr+0x20);

    for(i=0;i<mpi->h;i++){
	int x;
	unsigned char* dst=mpi->planes[0]+i*mpi->stride[0];
	unsigned short* src=src0+i*((mpi->w+15)&(~15));
	for(x=0;x<mpi->w;x++) dst[x]=src[x];
    }
    src0+=((mpi->w+15)&(~15))*((mpi->h+15)&(~15));
    for(i=0;i<mpi->h/4;i++){
	int x;
	unsigned char* dst=mpi->planes[1]+i*mpi->stride[1];
	unsigned short* src=src0+i*(((mpi->w+63)&(~63))/4);
	for(x=0;x<mpi->w/4;x++) dst[x]=src[x];
	src+=((mpi->w+63)&(~63))/4;
    }
    src0+=(((mpi->w+63)&(~63))/4)*(((mpi->h+63)&(~63))/4);
    for(i=0;i<mpi->h/4;i++){
	int x;
	unsigned char* dst=mpi->planes[2]+i*mpi->stride[2];
	unsigned short* src=src0+i*(((mpi->w+63)&(~63))/4);
	for(x=0;x<mpi->w/4;x++) dst[x]=src[x];
	src+=((mpi->w+63)&(~63))/4;
    }

}


    return mpi;
}
예제 #2
0
static mp_image_t* decode(sh_video_t *sh, void* data, int len, int flags)
{
	xvid_dec_frame_t dec;
	xvid_dec_stats_t stats;
	mp_image_t* mpi = NULL;

	priv_t* p = sh->context;


	if(!data || len <= 0)
		return NULL;

	memset(&dec,0,sizeof(xvid_dec_frame_t));
	memset(&stats, 0, sizeof(xvid_dec_stats_t));
	dec.version = XVID_VERSION;
	stats.version = XVID_VERSION;

	dec.bitstream = data;
	dec.length = len;

	dec.general |= XVID_LOWDELAY
	/* XXX: if lowdelay is unset, and xvidcore internal buffers are
	 *      used => crash. MUST FIX */
	        | (filmeffect ? XVID_FILMEFFECT : 0 )
	        | (lumadeblock ? XVID_DEBLOCKY : 0 )
	        | (chromadeblock ? XVID_DEBLOCKUV : 0 );
#if XVID_API >= XVID_MAKE_API(4,1)
	dec.general |= (lumadering ? XVID_DEBLOCKY|XVID_DERINGY : 0 );
	dec.general |= (chromadering ? XVID_DEBLOCKUV|XVID_DERINGUV : 0 );
#endif
	dec.output.csp = p->cs;

	/* Decoding loop because xvidcore may return VOL information for
	 * on the fly buffer resizing. In that case we must decode VOL,
	 * init VO, then decode the frame */
	do {
		int consumed;

		/* If we don't know frame size yet, don't even try to request
		 * a buffer, we must loop until we find a VOL, so VO plugin
		 * is initialized and we can obviously output something */
		if (p->vo_initialized) {
			mpi = mpcodecs_get_image(sh, p->img_type,
					MP_IMGFLAG_ACCEPT_STRIDE,
					sh->disp_w, sh->disp_h);
			if (!mpi) return NULL;

			if(p->cs != XVID_CSP_INTERNAL) {
				dec.output.plane[0] = mpi->planes[0];
				dec.output.plane[1] = mpi->planes[1];
				dec.output.plane[2] = mpi->planes[2];

				dec.output.stride[0] = mpi->stride[0];
				dec.output.stride[1] = mpi->stride[1];
				dec.output.stride[2] = mpi->stride[2];
			}
		}

		/* Decode data */
		consumed = xvid_decore(p->hdl, XVID_DEC_DECODE, &dec, &stats);
		if (consumed < 0) {
			mp_msg(MSGT_DECVIDEO, MSGL_ERR, "Decoding error\n");
			return NULL;
		}

		/* Found a VOL information stats, if VO plugin is not initialized
		 * yet then do it now */
		if (stats.type == XVID_TYPE_VOL && !p->vo_initialized) {
			sh->original_aspect = stats2aspect(&stats);
			if(!mpcodecs_config_vo(sh, stats.data.vol.width, stats.data.vol.height, IMGFMT_YV12))
				return NULL;

			/* Don't take this path twice */
			p->vo_initialized = !p->vo_initialized;
		}

		/* Don't forget to update buffer position and buffer length */
		dec.bitstream += consumed;
		dec.length -= consumed;
	} while ((stats.type == XVID_TYPE_VOL || stats.type == XVID_TYPE_NOTHING) && dec.length > 0);

	/* There are two ways to get out of the decoding loop:
	 *  - a frame has been returned
	 *  - no more data in buffer and no frames returned */

	/* If mpi is NULL, it proves nothing has been returned by the decoder
	 * so don't try to display internal buffers. */
	if (mpi != NULL && p->cs == XVID_CSP_INTERNAL) {
		mpi->planes[0] = dec.output.plane[0];
		mpi->planes[1] = dec.output.plane[1];
		mpi->planes[2] = dec.output.plane[2];

		mpi->stride[0] = dec.output.stride[0];
		mpi->stride[1] = dec.output.stride[1];
		mpi->stride[2] = dec.output.stride[2];
	}

	/* If we got out the decoding loop because the buffer was empty and there was nothing
	 * to output yet, then just return NULL */
	return (stats.type == XVID_TYPE_NOTHING) ? NULL : mpi;
}
예제 #3
0
// decode a frame
static mp_image_t* decode(sh_video_t *sh,void* data,int len,int flags){
    mp_image_t* mpi;
    int frame_size;
    int format = sh->bih ? sh->bih->biCompression : sh->format;
    
    if(len<=0) return NULL; // skipped frame

    mpi=mpcodecs_get_image(sh, MP_IMGTYPE_EXPORT, 0, 
	sh->disp_w, sh->disp_h);
    if(!mpi) return NULL;

    if(mpi->flags&MP_IMGFLAG_PLANAR){
	// TODO !!!
	mpi->planes[0]=data;
	mpi->stride[0]=mpi->width;
	frame_size=mpi->stride[0]*mpi->h;
	if((mpi->imgfmt == IMGFMT_NV12) || (mpi->imgfmt == IMGFMT_NV21))
	{
	    mpi->planes[1]=mpi->planes[0]+mpi->width*mpi->height;
	    mpi->stride[1]=mpi->chroma_width;
	    frame_size+=mpi->chroma_width*mpi->chroma_height;
	} else if(mpi->flags&MP_IMGFLAG_YUV) {
    	    int cb=2, cr=1;
    	    if(mpi->flags&MP_IMGFLAG_SWAPPED) {
        	cb=1; cr=2;
    	    }
            // Support for some common Planar YUV formats
	    /* YV12,I420,IYUV */
            mpi->planes[cb]=mpi->planes[0]+mpi->width*mpi->height;
            mpi->stride[cb]=mpi->chroma_width;
            mpi->planes[cr]=mpi->planes[cb]+mpi->chroma_width*mpi->chroma_height;
            mpi->stride[cr]=mpi->chroma_width;
	    frame_size+=2*mpi->chroma_width*mpi->chroma_height;
       	}
    } else {
	mpi->planes[0]=data;
	mpi->stride[0]=mpi->width*(mpi->bpp/8);
	// .AVI files has uncompressed lines 4-byte aligned:
	if(sh->format==0 || sh->format==3) mpi->stride[0]=(mpi->stride[0]+3)&(~3);
	if(mpi->imgfmt==IMGFMT_RGB8 || mpi->imgfmt==IMGFMT_BGR8){
	    // export palette:
	    mpi->planes[1]=sh->bih ? (unsigned char*)(sh->bih+1) : NULL;
#if 0
	    printf("Exporting palette: %p !!\n",mpi->planes[1]);
	    {	unsigned char* p=mpi->planes[1];
		int i;
		for(i=0;i<64;i++) printf("%3d: %02X %02X %02X (%02X)\n",i,p[4*i],p[4*i+1],p[4*i+2],p[4*i+3]);
	    }
#endif
	}
	frame_size=mpi->stride[0]*mpi->h;
	if (format == MKTAG('y', 'u', 'v', '2')) {
	  int i;
	  for (i = 1; i < frame_size; i += 2)
	    mpi->planes[0][i] ^= 128;
	}
	if(mpi->bpp<8) frame_size=frame_size*mpi->bpp/8;
    }

    if(len<frame_size){
        mp_msg(MSGT_DECVIDEO,MSGL_WARN,"Frame too small! (%d<%d) Wrong format?\n",
	    len,frame_size);
	return NULL;
    }
    
    return mpi;
}
예제 #4
0
/* decode a frame */
static
mp_image_t *decode(sh_video_t *sh, void *raw, int len, int flags)
{
  SGIInfo *info = sh->context;
  unsigned char *data = raw;
  mp_image_t *mpi;

  if (len <= 0) {
    return NULL; /* skip frame */
  }

  read_sgi_header(data, info);

  /* make sure this is an SGI image file */
  if (info->magic != SGI_MAGIC) {
    mp_msg(MSGT_DECVIDEO, MSGL_INFO, "Bad magic number in image.\n");
    return NULL;
  }

  /* check image depth */
  if (info->bytes_per_channel != 1) {
    mp_msg(MSGT_DECVIDEO, MSGL_INFO,
        "Unsupported bytes per channel value %i.\n", info->bytes_per_channel);
    return NULL;
  }

  /* check image dimension */
  if (info->dimension != 2 && info->dimension != 3) {
    mp_msg(MSGT_DECVIDEO, MSGL_INFO, "Unsupported image dimension %i.\n",
        info->dimension);
    return NULL;
  }

  /* change rgba images to rgb so alpha channel will be ignored */
  if (info->zsize == SGI_RGBA_IMAGE) {
    info->zsize = SGI_RGB_IMAGE;
  }

  /* check image depth */
  if (info->zsize != SGI_RGB_IMAGE && info->zsize != SGI_GRAYSCALE_IMAGE) {
    mp_msg(MSGT_DECVIDEO, MSGL_INFO, "Unsupported image depth.\n");
    return NULL;
  }

  /* (re)init libvo if image size is changed */
  if (last_x != info->xsize || last_y != info->ysize)
  {
    last_x = info->xsize;
    last_y = info->ysize;

    if (!mpcodecs_config_vo(sh, info->xsize, info->ysize, outfmt)) {
      mp_msg(MSGT_DECVIDEO, MSGL_INFO, "Config vo failed:\n");
      return NULL;
    }
  }

  if (!(mpi = mpcodecs_get_image(sh, MP_IMGTYPE_TEMP, MP_IMGFLAG_ACCEPT_STRIDE,
          info->xsize, info->ysize))) {
    return NULL;
  }

  if (info->rle) {
    decode_rle_sgi(info, data, mpi);
  } else {
    decode_uncompressed_sgi(info, data, mpi);
  }

  return mpi;
}
예제 #5
0
// decode a frame
static mp_image_t* decode(sh_video_t *sh,void* data,int len,int flags)
{
    int r;
    mp_image_t* mpi;
    lzo_context_t *priv = sh->context;
    int w = priv->bufsz;

    if (len <= 0) {
	    return NULL; // skipped frame
    }

    r = av_lzo1x_decode(priv->buffer, &w, data, &len);
    if (r) {
	/* this should NEVER happen */
	mp_msg (MSGT_DECVIDEO, MSGL_ERR,
		"[%s] internal error - decompression failed: %d\n", MOD_NAME, r);
      return NULL;
    }

    if (priv->codec == -1) {
	// detect RGB24 vs. YV12 via decoded size
	mp_msg (MSGT_DECVIDEO, MSGL_V, "[%s] 2 depth %d, format %d data %p len (%d) (%d)\n",
	    MOD_NAME, sh->bih->biBitCount, sh->format, data, len, sh->bih->biSizeImage
	    );

	if (w == 0) {
	    priv->codec = IMGFMT_BGR24;
	    mp_msg (MSGT_DECVIDEO, MSGL_V, "[%s] codec choosen is BGR24\n", MOD_NAME);
	} else if (w == (sh->bih->biSizeImage)/2) {
	    priv->codec = IMGFMT_YV12;
	    mp_msg (MSGT_DECVIDEO, MSGL_V, "[%s] codec choosen is YV12\n", MOD_NAME);
	} else {
	    priv->codec = -1;
	    mp_msg(MSGT_DECVIDEO,MSGL_ERR,"[%s] Unsupported out_fmt\n", MOD_NAME);
	    return NULL;
	}

	if(!mpcodecs_config_vo(sh,sh->disp_w,sh->disp_h,priv->codec)) {
	    priv->codec = -1;
	    return NULL;
	}
    }

    mpi = mpcodecs_get_image(sh, MP_IMGTYPE_EXPORT, 0,
	sh->disp_w, sh->disp_h);


    if (!mpi) {
	    mp_msg (MSGT_DECVIDEO, MSGL_ERR, "[%s] mpcodecs_get_image failed\n", MOD_NAME);
	    return NULL;
    }

    mpi->planes[0] = priv->buffer;
    if (priv->codec == IMGFMT_BGR24)
        mpi->stride[0] = 3 * sh->disp_w;
    else {
        mpi->stride[0] = sh->disp_w;
        mpi->planes[2] = priv->buffer + sh->disp_w*sh->disp_h;
        mpi->stride[2] = sh->disp_w / 2;
        mpi->planes[1] = priv->buffer + sh->disp_w*sh->disp_h*5/4;
        mpi->stride[1] = sh->disp_w / 2;
    }

    mp_msg (MSGT_DECVIDEO, MSGL_DBG2,
		"[%s] decompressed %lu bytes into %lu bytes\n", MOD_NAME,
		(long) len, (long)w);

    return mpi;
}
예제 #6
0
파일: vd_qtvideo.c 프로젝트: azuwis/mplayer
// init driver
static int init(sh_video_t *sh){
    OSErr result = 1;
    int extradata_size = sh->bih ? sh->bih->biSize - sizeof(*sh->bih) : 0;
    void *extradata = sh->bih + 1;

    if (!sh->ImageDesc)
        mp_msg(MSGT_DECVIDEO,MSGL_ERR,"sh->ImageDesc not set, try -demuxer mov if this fails.\n");

#ifndef CONFIG_QUICKTIME
#ifdef WIN32_LOADER
    Setup_LDT_Keeper();
#endif

    //preload quicktime.qts to avoid the problems caused by the hardcoded path inside the dll
    qtime_qts = LoadLibraryA("QuickTime.qts");
    if(!qtime_qts){
        mp_msg(MSGT_DECVIDEO,MSGL_ERR,"unable to load QuickTime.qts\n" );
        return 0;
    }

    handler = LoadLibraryA("qtmlClient.dll");
    if(!handler){
        mp_msg(MSGT_DECVIDEO,MSGL_ERR,"unable to load qtmlClient.dll\n");
        return 0;
    }

    InitializeQTML = (OSErr (*)(long))GetProcAddress(handler, "InitializeQTML");
    EnterMovies = (OSErr (*)(void))GetProcAddress(handler, "EnterMovies");
    ExitMovies = (void (*)(void))GetProcAddress(handler, "ExitMovies");
    DecompressSequenceBegin = (OSErr (*)(ImageSequence*,ImageDescriptionHandle,CGrafPtr,void *,const Rect *,MatrixRecordPtr,short,RgnHandle,CodecFlags,CodecQ,DecompressorComponent))GetProcAddress(handler, "DecompressSequenceBegin");
    DecompressSequenceFrameS = (OSErr (*)(ImageSequence,Ptr,long,CodecFlags,CodecFlags*,ICMCompletionProcRecordPtr))GetProcAddress(handler, "DecompressSequenceFrameS");
    GetGWorldPixMap = (PixMapHandle (*)(GWorldPtr))GetProcAddress(handler, "GetGWorldPixMap");
    QTNewGWorldFromPtr = (OSErr(*)(GWorldPtr *,OSType,const Rect *,CTabHandle,void*,GWorldFlags,void *,long))GetProcAddress(handler, "QTNewGWorldFromPtr");
    NewHandleClear = (OSErr(*)(Size))GetProcAddress(handler, "NewHandleClear");
    DisposeHandle = (void (*)(Handle))GetProcAddress(handler, "DisposeHandle");
    DisposeGWorld = (void (*)(GWorldPtr))GetProcAddress(handler, "DisposeGWorld");
    CDSequenceEnd = (OSErr (*)(ImageSequence))GetProcAddress(handler, "CDSequenceEnd");

    if(!InitializeQTML || !EnterMovies || !DecompressSequenceBegin || !DecompressSequenceFrameS){
	mp_msg(MSGT_DECVIDEO,MSGL_ERR,"invalid qtmlClient.dll!\n");
	return 0;
    }

    result=InitializeQTML(kInitializeQTMLDisableDirectSound |
                          kInitializeQTMLUseGDIFlag |
                          kInitializeQTMLDisableDDClippers);
    mp_msg(MSGT_DECVIDEO,MSGL_DBG2,"InitializeQTML returned %d\n",result);
#endif /* CONFIG_QUICKTIME */

    result=EnterMovies();
    mp_msg(MSGT_DECVIDEO,MSGL_DBG2,"EnterMovies returned %d\n",result);

    //make a yuy2 gworld
    OutBufferRect.top=0;
    OutBufferRect.left=0;
    OutBufferRect.right=sh->disp_w;
    OutBufferRect.bottom=sh->disp_h;

    //Fill the imagedescription for our SVQ3 frame
    //we can probably get this from Demuxer
    if (!sh->ImageDesc && extradata_size >= sizeof(ImageDescription) &&
        ((ImageDescription *)extradata)->idSize <= extradata_size)
        sh->ImageDesc = extradata;
    if (sh->ImageDesc) {
        mp_msg(MSGT_DECVIDEO,MSGL_DBG2,"ImageDescription size: %d\n",((ImageDescription*)(sh->ImageDesc))->idSize);
        framedescHandle=(ImageDescriptionHandle)NewHandleClear(((ImageDescription*)(sh->ImageDesc))->idSize);
        memcpy(*framedescHandle,sh->ImageDesc,((ImageDescription*)(sh->ImageDesc))->idSize);
    } else {
        // assume extradata consists only of the atoms, build the other parts
        ImageDescription *idesc;
        int size = sizeof(*idesc) + extradata_size;
        mp_msg(MSGT_DECVIDEO, MSGL_V, "Generating a ImageDescription\n");
        framedescHandle=(ImageDescriptionHandle)NewHandleClear(size);
        idesc = *framedescHandle;
        memcpy(idesc + 1, extradata, extradata_size);
        idesc->idSize = size;
        idesc->width  = sh->disp_w;
        idesc->height = sh->disp_h;
    }
    dump_ImageDescription(*framedescHandle);

    (**framedescHandle).cType = bswap_32(sh->format);
    sh->context = (void *)kYUVSPixelFormat;
    {
	int imgfmt = sh->codec->outfmt[sh->outfmtidx];
	int qt_imgfmt;
    switch(imgfmt)
    {
	case IMGFMT_YUY2:
	    qt_imgfmt = kYUVSPixelFormat;
	    break;
	case IMGFMT_YVU9:
	    qt_imgfmt = 0x73797639; //kYVU9PixelFormat;
	    break;
	case IMGFMT_YV12:
	    qt_imgfmt = 0x79343230;
	    break;
	case IMGFMT_UYVY:
	    qt_imgfmt = k2vuyPixelFormat;
	    break;
	case IMGFMT_YVYU:
	    qt_imgfmt = kYVYU422PixelFormat;
	    imgfmt = IMGFMT_YUY2;
	    break;
	case IMGFMT_RGB16:
	    qt_imgfmt = k16LE555PixelFormat;
	    break;
	case IMGFMT_BGR24:
	    qt_imgfmt = k24BGRPixelFormat;
	    break;
	case IMGFMT_BGR32:
	    qt_imgfmt = k32BGRAPixelFormat;
	    break;
	case IMGFMT_RGB32:
	    qt_imgfmt = k32RGBAPixelFormat;
	    break;
	default:
	    mp_msg(MSGT_DECVIDEO,MSGL_ERR,"Unknown requested csp\n");
	    return 0;
    }
    mp_msg(MSGT_DECVIDEO,MSGL_DBG2,"imgfmt: %s qt_imgfmt: %.4s\n", vo_format_name(imgfmt), (char *)&qt_imgfmt);
    sh->context = (void *)qt_imgfmt;
    if(!mpcodecs_config_vo(sh,sh->disp_w,sh->disp_h,imgfmt)) return 0;
    }

    mpi=mpcodecs_get_image(sh, MP_IMGTYPE_STATIC, MP_IMGFLAG_PRESERVE,
	sh->disp_w, sh->disp_h);
    if(!mpi) return 0;

    result = QTNewGWorldFromPtr(
        &OutBufferGWorld,
	(OSType)sh->context,
        &OutBufferRect,   //we should benchmark if yvu9 is faster for svq3, too
        0,
        0,
        0,
        mpi->planes[0],
        mpi->stride[0]);
    if (result) {
        mp_msg(MSGT_DECVIDEO,MSGL_ERR,"QTNewGWorldFromPtr result=%d\n",result);
        return 0;
    }

    result = DecompressSequenceBegin(&imageSeq, framedescHandle, (CGrafPtr)OutBufferGWorld,
                                     NULL, NULL, NULL, srcCopy,  NULL, 0,
                                     codecNormalQuality, 0);
    if(result) {
        mp_msg(MSGT_DECVIDEO,MSGL_ERR,"DecompressSequenceBegin result=%d\n",result);
        return 0;
    }

    return 1;
}
예제 #7
0
파일: vd_xanim.c 프로젝트: HermiG/mplayer2
// decode a frame
static mp_image_t* decode(sh_video_t *sh, void *data, int len, int flags)
{
    vd_xanim_ctx *priv = sh->context;
    unsigned int ret;

    if (len <= 0)
	return NULL; // skipped frame

    priv->decinfo->skip_flag = (flags&3)?1:0;

    if(sh->codec->outflags[sh->outfmtidx] & CODECS_FLAG_STATIC){
	// allocate static buffer for cvid-like codecs:
	priv->mpi = mpcodecs_get_image(sh, MP_IMGTYPE_STATIC,
	    MP_IMGFLAG_ACCEPT_STRIDE|MP_IMGFLAG_PREFER_ALIGNED_STRIDE,
	    (sh->disp_w+3)&(~3), (sh->disp_h+3)&(~3));
	if (!priv->mpi) return NULL;
	ret = priv->dec_func((uint8_t*)priv->mpi, data, len, priv->decinfo);
    } else {
	// left the buffer allocation to the codecs, pass sh_video && priv
	priv->mpi=NULL;
	ret = priv->dec_func((uint8_t*)sh, data, len, priv->decinfo);
    }

    if (ret == ACT_DLTA_NORM)
	return priv->mpi;

    if (ret & ACT_DLTA_MAPD)
	mp_msg(MSGT_DECVIDEO, MSGL_DBG2, "mapd\n");
/*
    if (!(ret & ACT_DLT_MAPD))
	xacodec_driver->decinfo->map_flag = 0;
    else
    {
	xacodec_driver->decinfo->map_flag = 1;
	xacodec_driver->decinfo->map = ...
    }
*/

    if (ret & ACT_DLTA_XOR)
    {
	mp_msg(MSGT_DECVIDEO, MSGL_DBG2, "xor\n");
	return priv->mpi;
    }

    /* nothing changed */
    if (ret & ACT_DLTA_NOP)
    {
	mp_msg(MSGT_DECVIDEO, MSGL_DBG2, "nop\n");
	return NULL;
    }

    /* frame dropped (also display latest frame) */
    if (ret & ACT_DLTA_DROP)
    {
	mp_msg(MSGT_DECVIDEO, MSGL_DBG2, "drop\n");
	return NULL;
    }

    if (ret & ACT_DLTA_BAD)
    {
	mp_msg(MSGT_DECVIDEO, MSGL_DBG2, "bad\n");
	return NULL;
    }

    /* used for double buffer */
    if (ret & ACT_DLTA_BODY)
    {
	mp_msg(MSGT_DECVIDEO, MSGL_DBG2, "body\n");
	return NULL;
    }

    return priv->mpi;
}
예제 #8
0
파일: vd_xanim.c 프로젝트: HermiG/mplayer2
static void XA_YUV1611_Convert(unsigned char *image_p, unsigned int imagex, unsigned int imagey,
    unsigned int i_x, unsigned int i_y, YUVBufs *yuv, YUVTabs *yuv_tabs,
    unsigned int map_flag, unsigned int *map, XA_CHDR *chdr)
{
    sh_video_t *sh = (sh_video_t*)image_p;
    vd_xanim_ctx *priv = sh->context;
    mp_image_t *mpi;
    int y;
    int ystride=(yuv->y_w)?yuv->y_w:imagex;
    int uvstride=(yuv->uv_w)?yuv->uv_w:(imagex/4);

    mp_dbg(MSGT_DECVIDEO,MSGL_DBG3, "YUVTabs:  %ld %p %p %p %p %p\n",yuv_tabs->Uskip_mask,
	yuv_tabs->YUV_Y_tab,
	yuv_tabs->YUV_UB_tab,
	yuv_tabs->YUV_VR_tab,
	yuv_tabs->YUV_UG_tab,
	yuv_tabs->YUV_VG_tab );

    mp_dbg(MSGT_DECVIDEO,MSGL_DBG3, "XA_YUV1611_Convert('image: %p', 'imagex: %d', 'imagey: %d', 'i_x: %d', 'i_y: %d', 'yuv_bufs: %p', 'yuv_tabs: %p', 'map_flag: %d', 'map: %p', 'chdr: %p')",
	image_p, imagex, imagey, i_x, i_y, yuv, yuv_tabs, map_flag, map, chdr);

    mp_dbg(MSGT_DECVIDEO,MSGL_DBG3, "YUV: %p %p %p %p (%d) %dx%d %dx%d\n",
	yuv->Ybuf,yuv->Ubuf,yuv->Vbuf,yuv->the_buf,yuv->the_buf_size,
	yuv->y_w,yuv->y_h,yuv->uv_w,yuv->uv_h);

    if(!yuv_tabs->YUV_Y_tab){
	// standard YVU9 - simply export it!
	mpi = mpcodecs_get_image(sh, MP_IMGTYPE_EXPORT, 0,
	    sh->disp_w, sh->disp_h);
	priv->mpi=mpi; if(!mpi) return; // ERROR!
	mpi->planes[0]=yuv->Ybuf;
	mpi->planes[1]=yuv->Ubuf;
	mpi->planes[2]=yuv->Vbuf;
	mpi->width=imagex;
	mpi->stride[0]=ystride; //i_x; // yuv->y_w
	mpi->stride[1]=mpi->stride[2]=uvstride; //i_x/4; // yuv->uv_w
	return;
    }

    // allocate TEMP buffer and convert the image:
    mpi = mpcodecs_get_image(sh, MP_IMGTYPE_TEMP, MP_IMGFLAG_ACCEPT_STRIDE,
	sh->disp_w, sh->disp_h);
    priv->mpi=mpi; if(!mpi) return; // ERROR!

    // convert the Y plane:
    for(y=0;y<(int)imagey;y++){
	unsigned int x;
	unsigned char* s=yuv->Ybuf+ystride*y;
	unsigned char* d=mpi->planes[0]+mpi->stride[0]*y;
	for(x=0;x<imagex;x++) d[x]=s[x]<<1;
    }

    imagex>>=2;
    imagey>>=2;

    // convert the U plane:
    for(y=0;y<(int)imagey;y++){
	unsigned int x;
	unsigned char* s=yuv->Ubuf+uvstride*y;
	unsigned char* d=mpi->planes[1]+mpi->stride[1]*y;
	for(x=0;x<imagex;x++) d[x]=s[x]<<1;
    }

    // convert the V plane:
    for(y=0;y<(int)imagey;y++){
	unsigned int x;
	unsigned char* s=yuv->Vbuf+uvstride*y;
	unsigned char* d=mpi->planes[2]+mpi->stride[2]*y;
	for(x=0;x<imagex;x++) d[x]=s[x]<<1;
    }
}
예제 #9
0
파일: vd_ijpg.c 프로젝트: BOTCrusher/sagetv
// decode a frame
static mp_image_t* decode(sh_video_t *sh,void* data,int len,int flags){
 mp_image_t * mpi = NULL;
 int	      width,height,depth,i;

 if ( len <= 0 ) return NULL; // skipped frame

 cinfo.err=jpeg_std_error( &jerr.pub );
 jerr.pub.error_exit=my_error_exit;
 if( setjmp( jerr.setjmp_buffer ) )
  {
   mp_msg( MSGT_DECVIDEO,MSGL_ERR,"[ijpg] setjmp error ...\n" );
   return NULL;
  }
  
 jpeg_create_decompress( &cinfo );
 jpeg_buf_src( &cinfo,data,len );
 jpeg_read_header( &cinfo,TRUE );
 sh->disp_w=width=cinfo.image_width;
 sh->disp_h=height=cinfo.image_height;
 jpeg_start_decompress( &cinfo );
 depth=cinfo.output_components * 8;

 switch( depth ) {
   case 8:
   case 24: break;
   default: mp_msg( MSGT_DECVIDEO,MSGL_ERR,"Sorry, unsupported JPEG colorspace: %d.\n",depth ); return NULL;
 }

 if ( last_w!=width || last_h!=height )
  {
   if(!mpcodecs_config_vo( sh,width,height, IMGFMT_RGB24 )) return NULL;
   if(temp_row) free(temp_row);
   temp_row=malloc(3*width+16);
   last_w=width; last_h=height;
  }

 mpi=mpcodecs_get_image( sh,MP_IMGTYPE_TEMP,MP_IMGFLAG_ACCEPT_STRIDE,width,height );
 if ( !mpi ) return NULL;

 row_stride=cinfo.output_width * cinfo.output_components;

 for ( i=0;i < height;i++ )
  {
   unsigned char * drow = mpi->planes[0] + mpi->stride[0] * i;
   unsigned char * row = (mpi->imgfmt==IMGFMT_RGB24 && depth==24) ? drow : temp_row;
   jpeg_read_scanlines( &cinfo,(JSAMPLE**)&row,1 );
   if(depth==8){
       // grayscale -> rgb/bgr 24/32
       int x;
       if(mpi->bpp==32)
         for(x=0;x<width;x++) drow[4*x]=0x010101*row[x];
       else
         for(x=0;x<width;x++) drow[3*x+0]=drow[3*x+1]=drow[3*x+2]=row[x];
   } else {
       int x;
       switch(mpi->imgfmt){
       // rgb24 -> bgr24
       case IMGFMT_BGR24:
           for(x=0;x<3*width;x+=3){
	       drow[x+0]=row[x+2];
	       drow[x+1]=row[x+1];
	       drow[x+2]=row[x+0];
	   }
	   break;
       // rgb24 -> bgr32
       case IMGFMT_BGR32:
           for(x=0;x<width;x++){
#ifdef WORDS_BIGENDIAN
	       drow[4*x+1]=row[3*x+0];
	       drow[4*x+2]=row[3*x+1];
	       drow[4*x+3]=row[3*x+2];
#else
	       drow[4*x+0]=row[3*x+2];
	       drow[4*x+1]=row[3*x+1];
	       drow[4*x+2]=row[3*x+0];
#endif
	   }
	   break;
       }
   }
  }
  
 jpeg_finish_decompress(&cinfo);                                                                   
 jpeg_destroy_decompress(&cinfo);                                                                  
	    
 return mpi;
}