コード例 #1
0
ファイル: transform8x8.c プロジェクト: Cheng-Hsu/JM
/*!
 ***********************************************************************
 * \brief
 *    Inverse 8x8 transformation
 ***********************************************************************
 */ 
void itrans8x8(Macroblock *currMB,   //!< current macroblock
               ColorPlane pl,        //!< used color plane       
               int ioff,             //!< index to 4x4 block
               int joff)             //!< index to 4x4 block
{
  ImageParameters *p_Img = currMB->p_Img;
  Slice *currSlice = currMB->p_Slice;
  int i,j;

  imgpel **mpr    = currSlice->mb_pred[pl];
  imgpel **mb_rec = currSlice->mb_rec[pl];
  int    **m7     = currSlice->mb_rres[pl];
  int     max_imgpel_value = p_Img->max_imgpel_value_comp[pl];

  if (currMB->is_lossless == TRUE)
  {
    for( j = joff; j < joff + 8; j++)
    {
      for( i = ioff; i < ioff + 8; i++)
        mb_rec[j][i] = (imgpel) iClip1(max_imgpel_value, (m7[j][i] + (long)mpr[j][i])); 
    }
  }
  else
  {
    inverse8x8(m7, m7, joff, ioff);
    for( j = joff; j < joff + 8; j++)
    {
      for( i = ioff; i < ioff + 8; i++)
        //mb_rec[j][i] = (imgpel) iClip1(max_imgpel_value, rshift_rnd_sf((m7[j][i] + ((long)mpr[j][i] << DQ_BITS_8)), DQ_BITS_8)); 
        mb_rec[j][i] = (imgpel) iClip1(max_imgpel_value, mpr[j][i] + rshift_rnd_sf(m7[j][i], DQ_BITS_8)); 
    }
  }
}
コード例 #2
0
ファイル: cconv_yuv2rgb.c プロジェクト: Cheng-Hsu/JM
/*! 
*************************************************************************************
* \brief
*    YUV to RGB conversion
*    ITU 601 with and without offset consideration
*    Upsampling by repetition of chroma samples in case of 4:2:0 and 4:2:2
*    Method not support for 4:0:0 content
*************************************************************************************
*/
void YUVtoRGB(ImageParameters *p_Img, ImageStructure *YUV, ImageStructure *RGB)
{
  int i, j, j_cr, i_cr;
  int sy, su, sv;
  int wbuv, wguv, wruv;
  imgpel *Y, *U, *V, *R, *G, *B;
  FrameFormat format = YUV->format;
  int width = format.width;
  int height = format.height;
  int max_value = format.max_value[0];

  // Color conversion
  for (j = 0; j < height; j++)
  {
    j_cr = j >> p_Img->shift_cr_y;
    Y = YUV->data[0][j];
    U = YUV->data[1][j_cr];
    V = YUV->data[2][j_cr];
    R = RGB->data[0][j];
    G = RGB->data[1][j];
    B = RGB->data[2][j];

    for (i = 0; i < width; i++)
    {
      i_cr = i >> p_Img->shift_cr_x;

      su = U[i_cr] - p_Img->offset_cr;
      sv = V[i_cr] - p_Img->offset_cr;

      wruv = p_Img->wka1 * sv;
      wguv = p_Img->wka2 * su + p_Img->wka3 * sv;
      wbuv = p_Img->wka4 * su;

#ifdef YUV2RGB_YOFFSET // Y offset value of 16 is considered
      sy = p_Img->wka0 * (Y[i] - p_Img->offset_y);
#else
      sy = p_Img->wka0 * Y[i];
#endif

      R[i] = (imgpel) iClip1( max_value, rshift_rnd(sy + wruv, 16));
      G[i] = (imgpel) iClip1( max_value, rshift_rnd(sy + wguv, 16));
      B[i] = (imgpel) iClip1( max_value, rshift_rnd(sy + wbuv, 16));
    }
  }
  // Setting RGB FrameFormat
  RGB->format = format;  // copy format information from YUV to RGB
  RGB->format.yuv_format  = YUV444;
  RGB->format.color_model = CM_RGB;
  RGB->format.height_cr   = format.height;
  RGB->format.width_cr    = format.width;
  for (i = 1; i < 3; i++)
  {
    RGB->format.size_cmp[i]     = format.size_cmp[0];
    RGB->format.bit_depth[i]    = format.bit_depth[0];
    RGB->format.max_value[i]    = max_value;
    RGB->format.max_value_sq[i] = format.max_value_sq[0];
  }
}
コード例 #3
0
ファイル: transform8x8.c プロジェクト: c444b774/jm-decision
static void recon8x8_lossless(int **m7, imgpel **mb_rec, imgpel **mpr, int max_imgpel_value, int ioff)
{
  int i, j;
  for( j = 0; j < 8; j++)
  {
    for( i = ioff; i < ioff + 8; i++)
      (*mb_rec)[i] = (imgpel) iClip1(max_imgpel_value, ((*m7)[i] + (long)(*mpr)[i])); 
    mb_rec++;
    m7++;
    mpr++;
  }
}
コード例 #4
0
static inline imgpel filter_safe_positions( const ImgProcessFilter1D *flt, const imgpel *p_in )
{
  const short *c_coef = flt->coef1;
  int htap = flt->c_tap;
  const imgpel *p1 = p_in - flt->c_taps_div2;// + 1; //bug;

  int val = 0, k;

  for (k = 0; k < htap ; k++)
  { 
    val += *(p1++) * *(c_coef++);
  }
  return (imgpel)iClip1( flt->max_pel_value, shift_off_sf( val, flt->c_offset, flt->c_normal1 ) );
}
コード例 #5
0
void sample_reconstruct (imgpel **curImg, imgpel **mpr, int **mb_rres, int mb_x, int opix_x, int width, int height, int max_imgpel_value, int dq_bits)
{
  imgpel *imgOrg, *imgPred;
  int    *m7;
  int i, j;

  for (j = 0; j < height; j++)
  {
    imgOrg = &curImg[j][opix_x];
    imgPred = &mpr[j][mb_x];
    m7 = &mb_rres[j][mb_x]; 
    for (i=0;i<width;i++)
      *imgOrg++ = (imgpel) iClip1( max_imgpel_value, rshift_rnd_sf(*m7++, dq_bits) + *imgPred++);
  }
}
コード例 #6
0
ファイル: blk_prediction.c プロジェクト: petrkalos/JM
void sample_reconstruct2 (imgpel **curImg, imgpel **mpr, int **mb_rres, int mb_y,int mb_x, int opix_x, int width, int height, int max_imgpel_value, int dq_bits,int pl,Macroblock *currMB)
{
	imgpel *imgOrg, *imgPred;
	int    *m7;
	int i, j;

	quantize_mb(mb_rres,width,height,mb_y,mb_x,pl,currMB);

	for (j = 0; j < height; j++)
	{
		imgOrg = &curImg[j][opix_x];
		imgPred = &mpr[j][mb_x];
		m7 = &mb_rres[j][mb_x];
		for (i=0;i<width;i++){
			*imgOrg++ = (imgpel) iClip1( max_imgpel_value, rshift_rnd_sf(*m7++, dq_bits) + *imgPred++);
		}
	}
}
コード例 #7
0
static inline imgpel filter_even_unsafe_right_positions( const ImgProcessFilter1D *flt, const imgpel *p_in, int right_samples  )
{
  const short *c_coef = flt->coef1;
  int hmin = -flt->c_taps_div2;
  int hmax = flt->c_tap + hmin;
  const imgpel *p1 = p_in - flt->c_taps_div2 -1;

  int val = 0, k;

  for (k = hmin; k < right_samples ; k++)
  { 
    val +=  *(++p1) * *(c_coef++);
  }
  for (k = right_samples; k < hmax ; k++)
  { 
    val += *p1 * *(c_coef++);
  }
  return (imgpel)iClip1( flt->max_pel_value, shift_off_sf( val, flt->c_offset, flt->c_normal1 ) );
}
コード例 #8
0
ファイル: img_process_mux_tab.cpp プロジェクト: hwyhit/JSVM
static inline imgpel filter_unsafe_down_positions( const ImgProcessFilter1D *flt, const imgpel *p_in, int down_samples, int stride_in_imgpel  )
{
  const short *c_coef = flt->coef1;
  int hmin = -flt->c_taps_div2;// + 1;
  int hmax = flt->c_tap + hmin;
  const imgpel *p1 = p_in - (flt->c_taps_div2 +1)*stride_in_imgpel;

  int val = 0, k;

  for (k = hmin; k <= down_samples ; k++)
  { 
    p1 += stride_in_imgpel;
    val +=  *p1 * *(c_coef++);
  }
  for (k = down_samples + 1; k < hmax ; k++)
  { 
    val += *p1 * *(c_coef++);
  }
  return (imgpel)iClip1( flt->max_pel_value, shift_off_sf( val, flt->c_offset, flt->c_normal1 ) );
}
コード例 #9
0
ファイル: transform8x8.c プロジェクト: c444b774/jm-decision
static void recon8x8(int **m7, imgpel **mb_rec, imgpel **mpr, int max_imgpel_value, int ioff)
{
  int j;
  int    *m_tr  = NULL;
  imgpel *m_rec = NULL;
  imgpel *m_prd = NULL;

  for( j = 0; j < 8; j++)
  {
    m_tr = (*m7++) + ioff;
    m_rec = (*mb_rec++) + ioff;
    m_prd = (*mpr++) + ioff;

    *m_rec++ = (imgpel) iClip1(max_imgpel_value, (*m_prd++) + rshift_rnd_sf(*m_tr++, DQ_BITS_8)); 
    *m_rec++ = (imgpel) iClip1(max_imgpel_value, (*m_prd++) + rshift_rnd_sf(*m_tr++, DQ_BITS_8)); 
    *m_rec++ = (imgpel) iClip1(max_imgpel_value, (*m_prd++) + rshift_rnd_sf(*m_tr++, DQ_BITS_8)); 
    *m_rec++ = (imgpel) iClip1(max_imgpel_value, (*m_prd++) + rshift_rnd_sf(*m_tr++, DQ_BITS_8)); 
    *m_rec++ = (imgpel) iClip1(max_imgpel_value, (*m_prd++) + rshift_rnd_sf(*m_tr++, DQ_BITS_8)); 
    *m_rec++ = (imgpel) iClip1(max_imgpel_value, (*m_prd++) + rshift_rnd_sf(*m_tr++, DQ_BITS_8)); 
    *m_rec++ = (imgpel) iClip1(max_imgpel_value, (*m_prd++) + rshift_rnd_sf(*m_tr++, DQ_BITS_8)); 
    *m_rec   = (imgpel) iClip1(max_imgpel_value, (*m_prd  ) + rshift_rnd_sf(*m_tr  , DQ_BITS_8)); 
  }
}
コード例 #10
0
ファイル: mc_prediction_otf.c プロジェクト: xxthink/jm
/*!
 ************************************************************************
 * \brief
 *    Weighted Prediction
 ************************************************************************
 */
static void weighted_mc_prediction(imgpel** mb_pred, 
                                   imgpel* lpred, 
                                   int block_size_y, 
                                   int block_size_x,
                                   int ioff, 
                                   int max_imgpel_value,
                                   short wp_scale, 
                                   short wp_offset,
                                   short wp_round, 
                                   short weight_denom)
{
  int i, j;
  int result;
  int block_x4 = ioff + block_size_x;

  for   (j = 0; j < block_size_y; j++)
  {
    for (i = ioff; i < block_x4; i++)
    {
      result = rshift_rnd((wp_scale * *lpred++), weight_denom) + wp_offset;      
      mb_pred[j][i] = (imgpel) iClip1( max_imgpel_value, result);
     }
  }
}
コード例 #11
0
ファイル: mc_prediction_otf.c プロジェクト: xxthink/jm
/*!
 ************************************************************************
 * \brief
 *    block weighted biprediction
 ************************************************************************
 */
static void weighted_bi_prediction(imgpel** mb_pred, 
                                   imgpel *block_l0, 
                                   imgpel *block_l1, 
                                   int block_size_y, 
                                   int block_x, 
                                   int block_size_x,
                                   int max_imgpel_value,
                                   int wp_scale_l0, 
                                   int wp_scale_l1, 
                                   int wp_offset, 
                                   int weight_denom)
{
  int i, j, result;
  int block_x4 = block_x + block_size_x;

  for   (j = 0; j< block_size_y; j++)
  {
    for (i=block_x; i<block_x4; i++)
    {
      result = rshift_rnd_sf((wp_scale_l0 * *(block_l0++) + wp_scale_l1 * *(block_l1++)),  weight_denom);
      mb_pred[j][i] = (imgpel) iClip1( max_imgpel_value, result + wp_offset); 
    }
  }
}
コード例 #12
0
ファイル: img_luma.c プロジェクト: Cheng-Hsu/JM
/*!
 ************************************************************************
 * \brief
 *    Does _vertical_ interpolation using the SIX TAP filters
 *
 * \param p_Img
 *    pointer to ImageParameters structure
 * \param s
 *    pointer to StorablePicture structure
 * \param dstImg
 *    pointer to source image
 ************************************************************************
 */
void getVerSubImageSixTapTmp( ImageParameters *p_Img, StorablePicture *s, imgpel **dstImg)
{
  int is, jpad, ipad;
  int ypadded_size = s->size_y_padded;
  int xpadded_size = s->size_x_padded;
  int maxy = ypadded_size - 1;

  imgpel *wxLineDst;
  int *srcImgA, *srcImgB, *srcImgC, *srcImgD, *srcImgE, *srcImgF;
  const int tap0 = ONE_FOURTH_TAP[0][0];
  const int tap1 = ONE_FOURTH_TAP[0][1];
  const int tap2 = ONE_FOURTH_TAP[0][2];

  // top
  for (jpad = 0; jpad < 2; jpad++)
  {
    wxLineDst = dstImg[jpad];
    srcImgA = p_Img->imgY_sub_tmp[jpad ];
    srcImgB = p_Img->imgY_sub_tmp[0];      
    srcImgC = p_Img->imgY_sub_tmp[0];
    srcImgD = p_Img->imgY_sub_tmp[jpad + 1];
    srcImgE = p_Img->imgY_sub_tmp[jpad + 2];
    srcImgF = p_Img->imgY_sub_tmp[jpad + 3];

    for (ipad = 0; ipad < xpadded_size; ipad++)
    {
      is =
        (tap0 * (*srcImgA++ + *srcImgD++) +
        tap1 *  (*srcImgB++ + *srcImgE++) +
        tap2 *  (*srcImgC++ + *srcImgF++));

      wxLineDst[ipad] = (imgpel) iClip1 ( p_Img->max_imgpel_value, rshift_rnd_sf( is, 10 ) );
    }
  }

  // center
  for (jpad = 2; jpad < ypadded_size - 3; jpad++)
  {
    wxLineDst = dstImg[jpad];
    srcImgA = p_Img->imgY_sub_tmp[jpad ];
    srcImgB = p_Img->imgY_sub_tmp[jpad - 1];      
    srcImgC = p_Img->imgY_sub_tmp[jpad - 2];
    srcImgD = p_Img->imgY_sub_tmp[jpad + 1];
    srcImgE = p_Img->imgY_sub_tmp[jpad + 2];
    srcImgF = p_Img->imgY_sub_tmp[jpad + 3];
    for (ipad = 0; ipad < xpadded_size; ipad++)
    {
      is =
        (tap0 * (*srcImgA++ + *srcImgD++) +
        tap1 *  (*srcImgB++ + *srcImgE++) +
        tap2 *  (*srcImgC++ + *srcImgF++));

      wxLineDst[ipad] = (imgpel) iClip1 ( p_Img->max_imgpel_value, rshift_rnd_sf( is, 10 ) );
    }
  }

  // bottom
  for (jpad = ypadded_size - 3; jpad < ypadded_size; jpad++)
  {
    wxLineDst = dstImg[jpad];
    srcImgA = p_Img->imgY_sub_tmp[jpad ];
    srcImgB = p_Img->imgY_sub_tmp[jpad - 1];      
    srcImgC = p_Img->imgY_sub_tmp[jpad - 2];
    srcImgD = p_Img->imgY_sub_tmp[imin (maxy, jpad + 1)];
    srcImgE = p_Img->imgY_sub_tmp[maxy];
    srcImgF = p_Img->imgY_sub_tmp[maxy];
    for (ipad = 0; ipad < xpadded_size; ipad++)
    {
      is =
        (tap0 * (*srcImgA++ + *srcImgD++) +
        tap1 *  (*srcImgB++ + *srcImgE++) +
        tap2 *  (*srcImgC++ + *srcImgF++));

      wxLineDst[ipad] = (imgpel) iClip1 ( p_Img->max_imgpel_value, rshift_rnd_sf( is, 10 ) );
    }
  }
}
コード例 #13
0
ファイル: img_luma.c プロジェクト: Cheng-Hsu/JM
/*!
 ************************************************************************
 * \brief
 *    Does _horizontal_ interpolation using the SIX TAP filters
 *
 * \param p_Img
 *    pointer to ImageParameters structure
 * \param s
 *    pointer to StorablePicture structure
 * \param dstImg
 *    destination image
 * \param srcImg
 *    source image
 ************************************************************************
 */
void getHorSubImageSixTap( ImageParameters *p_Img, StorablePicture *s, imgpel **dstImg, imgpel **srcImg)
{
  int is, jpad, ipad;
  int ypadded_size = s->size_y_padded;
  int xpadded_size = s->size_x_padded;

  imgpel *wBufSrc, *wBufDst;
  imgpel *srcImgA, *srcImgB, *srcImgC, *srcImgD, *srcImgE, *srcImgF;
  int *iBufDst;
  const int tap0 = ONE_FOURTH_TAP[0][0];
  const int tap1 = ONE_FOURTH_TAP[0][1];
  const int tap2 = ONE_FOURTH_TAP[0][2];

  for (jpad = 0; jpad < ypadded_size; jpad++)
  {
    wBufSrc = srcImg[jpad];     // 4:4:4 independent mode
    wBufDst = dstImg[jpad];     // 4:4:4 independent mode
    iBufDst = p_Img->imgY_sub_tmp[jpad];

    srcImgA = &wBufSrc[0];
    srcImgB = &wBufSrc[0];      
    srcImgC = &wBufSrc[0];
    srcImgD = &wBufSrc[1];
    srcImgE = &wBufSrc[2];
    srcImgF = &wBufSrc[3];

    // left padded area
    is =
      (tap0 * (*srcImgA++ + *srcImgD++) +
      tap1 *  (*srcImgB   + *srcImgE++) +
      tap2 *  (*srcImgC   + *srcImgF++));

    *iBufDst++ =  is;
    *wBufDst++ = (imgpel) iClip1 ( p_Img->max_imgpel_value, rshift_rnd_sf( is, 5 ) );      

    is =
      (tap0 * (*srcImgA++ + *srcImgD++) +
      tap1 *  (*srcImgB++ + *srcImgE++) +
      tap2 *  (*srcImgC   + *srcImgF++));

    *iBufDst++ =  is;
    *wBufDst++ = (imgpel) iClip1 ( p_Img->max_imgpel_value, rshift_rnd_sf( is, 5 ) );      

    // center
    for (ipad = 2; ipad < xpadded_size - 4; ipad++)
    {
      is =
        (tap0 * (*srcImgA++ + *srcImgD++) +
        tap1 *  (*srcImgB++ + *srcImgE++) +
        tap2 *  (*srcImgC++ + *srcImgF++));

      *iBufDst++ =  is;
      *wBufDst++ = (imgpel) iClip1 ( p_Img->max_imgpel_value, rshift_rnd_sf( is, 5 ) );      
    }

    is = (
      tap0 * (*srcImgA++ + *srcImgD++) +
      tap1 * (*srcImgB++ + *srcImgE++) +
      tap2 * (*srcImgC++ + *srcImgF  ));

    *iBufDst++ =  is;
    *wBufDst++ = (imgpel) iClip1 ( p_Img->max_imgpel_value, rshift_rnd_sf( is, 5 ) );      

    // right padded area
    is = (
      tap0 * (*srcImgA++ + *srcImgD++) +
      tap1 * (*srcImgB++ + *srcImgE) +
      tap2 * (*srcImgC++ + *srcImgF));

    *iBufDst++ =  is;
    *wBufDst++ = (imgpel) iClip1 ( p_Img->max_imgpel_value, rshift_rnd_sf( is, 5 ) );      

    is = (
      tap0 * (*srcImgA++ + *srcImgD) +
      tap1 * (*srcImgB++ + *srcImgE) +
      tap2 * (*srcImgC++ + *srcImgF));

    *iBufDst++ =  is;
    *wBufDst++ = (imgpel) iClip1 ( p_Img->max_imgpel_value, rshift_rnd_sf( is, 5 ) );      

    is = (
      tap0 * (*srcImgA + *srcImgD) +
      tap1 * (*srcImgB + *srcImgE) +
      tap2 * (*srcImgC + *srcImgF));

    *iBufDst =  is;
    *wBufDst = (imgpel) iClip1 ( p_Img->max_imgpel_value, rshift_rnd_sf( is, 5 ) );      

  }
}