Exemple #1
0
    (int)((-BIASF+1) * LOG2),                 /* FLT_MIN_10_EXP */
    (int)(BIASF+1),                           /* FLT_MAX_EXP */
    (int)((BIASF+1) * LOG2),                  /* FLT_MAX_10_EXP */
    { INIT4(MAXEF-1,        0xFF, 0xFF) },    /* FLT_MAX */
    { INIT4(BIASF-(SIGF-1), 0x00, 0x00) },    /* FLT_EPSILON */
    { INIT4(0x01,           0x00, 0x00) },    /* FLT_MIN */
};

_Fparam _Dbl = {
    (int)SIGD,                                /* DBL_MANT_DIG */
    (int)((SIGD-1) * LOG2),                   /* DBL_DIG */
    (int)(1-BIASD+1),                         /* DBL_MIN_EXP */
    (int)((-BIASD+1) * LOG2),                 /* DBL_MIN_10_EXP */
    (int)(BIASD+1),                           /* DBL_MAX_EXP */
    (int)((BIASD+1) * LOG2),                  /* DBL_MAX_10_EXP */
    { INIT8(MAXED-1,        0xFF, 0xFF) },    /* DBL_MAX */
    { INIT8(BIASD-(SIGD-1), 0x00, 0x00) },    /* DBL_EPSILON */
    { INIT8(0x001,          0x00, 0x00) },    /* DBL_MIN */
};

_Fparam _Ldbl = {
    (int)SIGL,                                 /* LDBL_MANT_DIG */
    (int)((SIGL-1) * LOG2),                    /* LDBL_DIG */
    (int)(1-BIASL+1),                          /* LDBL_MIN_EXP */
    (int)((-BIASL+1) * LOG2),                  /* LDBL_MIN_10_EXP */
    (int)(BIASL+1),                            /* LDBL_MAX_EXP */
    (int)((BIASL+1) * LOG2),                   /* LDBL_MAX_10_EXP */
    { INIT12(MAXEL-1,        0xFF, 0xFF) },    /* LDBL_MAX */
    { INIT12(BIASL-(SIGL-1), 0x00, 0x00) },    /* LDBL_EPSILON */
    { INIT12(0x0001,         0x00, 0x00) },    /* LDBL_MIN */
};
void Setup_Assembly_Primitives(EncoderPrimitives &p, int cpuMask)
{
#if HIGH_BIT_DEPTH
    if (cpuMask & X265_CPU_SSE2) p.sa8d[0] = p.sa8d[0];
#else
    if (cpuMask & X265_CPU_SSE2)
    {
        INIT8_NAME(sse_pp, ssd, _mmx);
        INIT8(sad, _mmx2);
        INIT8(sad_x3, _mmx2);
        INIT8(sad_x4, _mmx2);
        INIT8(satd, _mmx2);
        p.satd[LUMA_8x32] = x265_pixel_satd_8x32_sse2;
        p.satd[LUMA_12x16] = x265_pixel_satd_12x16_sse2;
        p.satd[LUMA_16x4] = x265_pixel_satd_16x4_sse2;
        p.satd[LUMA_16x12] = x265_pixel_satd_16x12_sse2;
        p.satd[LUMA_16x32] = x265_pixel_satd_16x32_sse2;
        p.satd[LUMA_16x64] = x265_pixel_satd_16x64_sse2;
        p.satd[LUMA_32x8]  = x265_pixel_satd_32x8_sse2;
        p.satd[LUMA_32x16] = x265_pixel_satd_32x16_sse2;
        p.satd[LUMA_32x24] = x265_pixel_satd_32x24_sse2;
        p.sa8d[BLOCK_4x4]  = x265_pixel_satd_4x4_mmx2;
        p.frame_init_lowres_core = x265_frame_init_lowres_core_mmx2;

        PIXEL_AVG(sse2);
        PIXEL_AVG_W4(mmx2);

        p.sad[LUMA_8x32]  = x265_pixel_sad_8x32_sse2;
        p.sad[LUMA_16x4]  = x265_pixel_sad_16x4_sse2;
        p.sad[LUMA_16x12] = x265_pixel_sad_16x12_sse2;
        p.sad[LUMA_16x32] = x265_pixel_sad_16x32_sse2;
        p.sad[LUMA_16x64] = x265_pixel_sad_16x64_sse2;

        p.sad[LUMA_32x8]  = x265_pixel_sad_32x8_sse2;
        p.sad[LUMA_32x16] = x265_pixel_sad_32x16_sse2;
        p.sad[LUMA_32x24] = x265_pixel_sad_32x24_sse2;
        p.sad[LUMA_32x32] = x265_pixel_sad_32x32_sse2;
        p.sad[LUMA_32x64] = x265_pixel_sad_32x64_sse2;

        p.sad[LUMA_64x16] = x265_pixel_sad_64x16_sse2;
        p.sad[LUMA_64x32] = x265_pixel_sad_64x32_sse2;
        p.sad[LUMA_64x48] = x265_pixel_sad_64x48_sse2;
        p.sad[LUMA_64x64] = x265_pixel_sad_64x64_sse2;

        p.sad[LUMA_48x64] = x265_pixel_sad_48x64_sse2;
        p.sad[LUMA_24x32] = x265_pixel_sad_24x32_sse2;
        p.sad[LUMA_12x16] = x265_pixel_sad_12x16_sse2;

        ASSGN_SSE(sse2);
        INIT2(sad, _sse2);
        INIT2(sad_x3, _sse2);
        INIT2(sad_x4, _sse2);
        INIT6(satd, _sse2);
        HEVC_SATD(sse2);

        CHROMA_BLOCKCOPY(_sse2);
        LUMA_BLOCKCOPY(_sse2);

        CHROMA_SS_FILTERS(_sse2);
        LUMA_SS_FILTERS(_sse2);

        // This function pointer initialization is temporary will be removed
        // later with macro definitions.  It is used to avoid linker errors
        // until all partitions are coded and commit smaller patches, easier to
        // review.

        p.chroma_copy_sp[CHROMA_4x2] = x265_blockcopy_sp_4x2_sse2;
        p.chroma_copy_sp[CHROMA_4x4] = x265_blockcopy_sp_4x4_sse2;
        p.chroma_copy_sp[CHROMA_4x8] = x265_blockcopy_sp_4x8_sse2;
        p.chroma_copy_sp[CHROMA_4x16] = x265_blockcopy_sp_4x16_sse2;
        p.chroma_copy_sp[CHROMA_8x2] = x265_blockcopy_sp_8x2_sse2;
        p.chroma_copy_sp[CHROMA_8x4] = x265_blockcopy_sp_8x4_sse2;
        p.chroma_copy_sp[CHROMA_8x6] = x265_blockcopy_sp_8x6_sse2;
        p.chroma_copy_sp[CHROMA_8x8] = x265_blockcopy_sp_8x8_sse2;
        p.chroma_copy_sp[CHROMA_8x16] = x265_blockcopy_sp_8x16_sse2;
        p.chroma_copy_sp[CHROMA_12x16] = x265_blockcopy_sp_12x16_sse2;
        p.chroma_copy_sp[CHROMA_16x4] = x265_blockcopy_sp_16x4_sse2;
        p.chroma_copy_sp[CHROMA_16x8] = x265_blockcopy_sp_16x8_sse2;
        p.chroma_copy_sp[CHROMA_16x12] = x265_blockcopy_sp_16x12_sse2;
        p.chroma_copy_sp[CHROMA_16x16] = x265_blockcopy_sp_16x16_sse2;
        p.chroma_copy_sp[CHROMA_16x32] = x265_blockcopy_sp_16x32_sse2;
        p.luma_copy_sp[LUMA_16x64] = x265_blockcopy_sp_16x64_sse2;
        p.chroma_copy_sp[CHROMA_24x32] = x265_blockcopy_sp_24x32_sse2;
        p.chroma_copy_sp[CHROMA_32x8] = x265_blockcopy_sp_32x8_sse2;
        p.chroma_copy_sp[CHROMA_32x16] = x265_blockcopy_sp_32x16_sse2;
        p.chroma_copy_sp[CHROMA_32x24] = x265_blockcopy_sp_32x24_sse2;
        p.chroma_copy_sp[CHROMA_32x32] = x265_blockcopy_sp_32x32_sse2;
        p.luma_copy_sp[LUMA_32x64] = x265_blockcopy_sp_32x64_sse2;
        p.luma_copy_sp[LUMA_48x64] = x265_blockcopy_sp_48x64_sse2;
        p.luma_copy_sp[LUMA_64x16] = x265_blockcopy_sp_64x16_sse2;
        p.luma_copy_sp[LUMA_64x32] = x265_blockcopy_sp_64x32_sse2;
        p.luma_copy_sp[LUMA_64x48] = x265_blockcopy_sp_64x48_sse2;
        p.luma_copy_sp[LUMA_64x64] = x265_blockcopy_sp_64x64_sse2;

        p.blockfill_s[BLOCK_4x4] = x265_blockfill_s_4x4_sse2;
        p.blockfill_s[BLOCK_8x8] = x265_blockfill_s_8x8_sse2;
        p.blockfill_s[BLOCK_16x16] = x265_blockfill_s_16x16_sse2;
        p.blockfill_s[BLOCK_32x32] = x265_blockfill_s_32x32_sse2;

        p.frame_init_lowres_core = x265_frame_init_lowres_core_sse2;
        p.sa8d[BLOCK_8x8]   = x265_pixel_sa8d_8x8_sse2;
        p.sa8d[BLOCK_16x16] = x265_pixel_sa8d_16x16_sse2;
        SA8D_INTER_FROM_BLOCK(sse2);

        p.cvt32to16_shr = x265_cvt32to16_shr_sse2;
        p.ipfilter_ss[FILTER_V_S_S_8] = x265_interp_8tap_v_ss_sse2;
        p.calcrecon[BLOCK_4x4] = x265_calcRecons4_sse2;
        p.calcrecon[BLOCK_8x8] = x265_calcRecons8_sse2;
        p.calcresidual[BLOCK_4x4] = x265_getResidual4_sse2;
        p.calcresidual[BLOCK_8x8] = x265_getResidual8_sse2;
    }
    if (cpuMask & X265_CPU_SSSE3)
    {
        p.frame_init_lowres_core = x265_frame_init_lowres_core_ssse3;
        p.sa8d[BLOCK_8x8]   = x265_pixel_sa8d_8x8_ssse3;
        p.sa8d[BLOCK_16x16] = x265_pixel_sa8d_16x16_ssse3;
        SA8D_INTER_FROM_BLOCK(ssse3);
        p.sse_pp[LUMA_4x4] = x265_pixel_ssd_4x4_ssse3;
        ASSGN_SSE(ssse3);
        PIXEL_AVG(ssse3);
        PIXEL_AVG_W4(ssse3);

        p.scale1D_128to64 = x265_scale1D_128to64_ssse3;

        p.sad_x4[LUMA_8x4] = x265_pixel_sad_x4_8x4_ssse3;
        p.sad_x4[LUMA_8x8] = x265_pixel_sad_x4_8x8_ssse3;
        p.sad_x3[LUMA_8x16] = x265_pixel_sad_x3_8x16_ssse3;
        p.sad_x4[LUMA_8x16] = x265_pixel_sad_x4_8x16_ssse3;
        p.sad_x3[LUMA_8x32]  = x265_pixel_sad_x3_8x32_ssse3;
        p.sad_x4[LUMA_8x32]  = x265_pixel_sad_x4_8x32_ssse3;

        p.sad_x3[LUMA_12x16] = x265_pixel_sad_x3_12x16_ssse3;
        p.sad_x4[LUMA_12x16] = x265_pixel_sad_x4_12x16_ssse3;
        p.sad_x3[LUMA_16x12] = x265_pixel_sad_x3_16x12_ssse3;
        p.sad_x4[LUMA_16x12] = x265_pixel_sad_x4_16x12_ssse3;
        p.sad_x3[LUMA_16x32] = x265_pixel_sad_x3_16x32_ssse3;
        p.sad_x4[LUMA_16x32] = x265_pixel_sad_x4_16x32_ssse3;
        p.sad_x3[LUMA_16x64] = x265_pixel_sad_x3_16x64_ssse3;
        p.sad_x4[LUMA_16x64] = x265_pixel_sad_x4_16x64_ssse3;
        p.sad_x3[LUMA_24x32] = x265_pixel_sad_x3_24x32_ssse3;
        p.sad_x4[LUMA_24x32] = x265_pixel_sad_x4_24x32_ssse3;
        p.sad_x3[LUMA_32x8] = x265_pixel_sad_x3_32x8_ssse3;
        p.sad_x3[LUMA_32x16] = x265_pixel_sad_x3_32x16_ssse3;
        p.sad_x3[LUMA_32x24] = x265_pixel_sad_x3_32x24_ssse3;
        p.sad_x3[LUMA_32x32] = x265_pixel_sad_x3_32x32_ssse3;
        p.sad_x3[LUMA_32x64] = x265_pixel_sad_x3_32x64_ssse3;
        p.sad_x4[LUMA_32x8] = x265_pixel_sad_x4_32x8_ssse3;
        p.sad_x4[LUMA_32x16] = x265_pixel_sad_x4_32x16_ssse3;
        p.sad_x4[LUMA_32x24] = x265_pixel_sad_x4_32x24_ssse3;
        p.sad_x4[LUMA_32x32] = x265_pixel_sad_x4_32x32_ssse3;
        p.sad_x4[LUMA_32x64] = x265_pixel_sad_x4_32x64_ssse3;
        p.sad_x3[LUMA_48x64] = x265_pixel_sad_x3_48x64_ssse3;
        p.sad_x4[LUMA_48x64] = x265_pixel_sad_x4_48x64_ssse3;
        p.sad_x3[LUMA_64x16] = x265_pixel_sad_x3_64x16_ssse3;
        p.sad_x3[LUMA_64x32] = x265_pixel_sad_x3_64x32_ssse3;
        p.sad_x3[LUMA_64x48] = x265_pixel_sad_x3_64x48_ssse3;
        p.sad_x3[LUMA_64x64] = x265_pixel_sad_x3_64x64_ssse3;
        p.sad_x4[LUMA_64x16] = x265_pixel_sad_x4_64x16_ssse3;
        p.sad_x4[LUMA_64x32] = x265_pixel_sad_x4_64x32_ssse3;
        p.sad_x4[LUMA_64x48] = x265_pixel_sad_x4_64x48_ssse3;
        p.sad_x4[LUMA_64x64] = x265_pixel_sad_x4_64x64_ssse3;

        p.luma_hvpp[LUMA_8x8] = x265_interp_8tap_hv_pp_8x8_ssse3;
        p.luma_p2s = x265_luma_p2s_ssse3;
        p.chroma_p2s = x265_chroma_p2s_ssse3;
        
        CHROMA_SP_FILTERS(_ssse3);
        LUMA_SP_FILTERS(_ssse3);

    }
    if (cpuMask & X265_CPU_SSE4)
    {
        p.satd[LUMA_4x16]   = x265_pixel_satd_4x16_sse4;
        p.satd[LUMA_12x16]  = x265_pixel_satd_12x16_sse4;
        p.satd[LUMA_32x8] = x265_pixel_satd_32x8_sse4;
        p.satd[LUMA_32x16] = x265_pixel_satd_32x16_sse4;
        p.satd[LUMA_32x24] = x265_pixel_satd_32x24_sse4;
        p.sa8d[BLOCK_8x8]   = x265_pixel_sa8d_8x8_sse4;
        p.sa8d[BLOCK_16x16] = x265_pixel_sa8d_16x16_sse4;
        SA8D_INTER_FROM_BLOCK(sse4);

        CHROMA_FILTERS(_sse4);
        LUMA_FILTERS(_sse4);
        HEVC_SATD(sse4);
        p.chroma_copy_sp[CHROMA_2x4] = x265_blockcopy_sp_2x4_sse4;
        p.chroma_copy_sp[CHROMA_2x8] = x265_blockcopy_sp_2x8_sse4;
        p.chroma_copy_sp[CHROMA_6x8] = x265_blockcopy_sp_6x8_sse4;

        p.chroma_vsp[CHROMA_2x4] = x265_interp_4tap_vert_sp_2x4_sse4;
        p.chroma_vsp[CHROMA_2x8] = x265_interp_4tap_vert_sp_2x8_sse4;
        p.chroma_vsp[CHROMA_6x8] = x265_interp_4tap_vert_sp_6x8_sse4;

        p.calcrecon[BLOCK_16x16] = x265_calcRecons16_sse4;
        p.calcrecon[BLOCK_32x32] = x265_calcRecons32_sse4;
        p.calcresidual[BLOCK_16x16] = x265_getResidual16_sse4;
        p.calcresidual[BLOCK_32x32] = x265_getResidual32_sse4;
    }
    if (cpuMask & X265_CPU_AVX)
    {
        p.frame_init_lowres_core = x265_frame_init_lowres_core_avx;
        p.satd[LUMA_4x16]   = x265_pixel_satd_4x16_avx;
        p.satd[LUMA_12x16]  = x265_pixel_satd_12x16_avx;
        p.satd[LUMA_32x8] = x265_pixel_satd_32x8_avx;
        p.satd[LUMA_32x16] = x265_pixel_satd_32x16_avx;
        p.satd[LUMA_32x24] = x265_pixel_satd_32x24_avx;
        p.sa8d[BLOCK_8x8]   = x265_pixel_sa8d_8x8_avx;
        p.sa8d[BLOCK_16x16] = x265_pixel_sa8d_16x16_avx;
        SA8D_INTER_FROM_BLOCK(avx);
        ASSGN_SSE(avx);
        HEVC_SATD(avx);

        p.sad_x3[LUMA_12x16] = x265_pixel_sad_x3_12x16_avx;
        p.sad_x4[LUMA_12x16] = x265_pixel_sad_x4_12x16_avx;
        p.sad_x3[LUMA_16x4]  = x265_pixel_sad_x3_16x4_avx;
        p.sad_x4[LUMA_16x4]  = x265_pixel_sad_x4_16x4_avx;
        p.sad_x3[LUMA_16x12] = x265_pixel_sad_x3_16x12_avx;
        p.sad_x4[LUMA_16x12] = x265_pixel_sad_x4_16x12_avx;
        p.sad_x3[LUMA_16x32] = x265_pixel_sad_x3_16x32_avx;
        p.sad_x4[LUMA_16x32] = x265_pixel_sad_x4_16x32_avx;
        p.sad_x3[LUMA_16x64] = x265_pixel_sad_x3_16x64_avx;
        p.sad_x4[LUMA_16x64] = x265_pixel_sad_x4_16x64_avx;
        p.sad_x3[LUMA_24x32] = x265_pixel_sad_x3_24x32_avx;
        p.sad_x4[LUMA_24x32] = x265_pixel_sad_x4_24x32_avx;

        p.sad_x3[LUMA_32x8]  = x265_pixel_sad_x3_32x8_avx;
        p.sad_x3[LUMA_32x16] = x265_pixel_sad_x3_32x16_avx;
        p.sad_x3[LUMA_32x24] = x265_pixel_sad_x3_32x24_avx;
        p.sad_x3[LUMA_32x32] = x265_pixel_sad_x3_32x32_avx;
        p.sad_x3[LUMA_32x64] = x265_pixel_sad_x3_32x64_avx;
        p.sad_x4[LUMA_32x8]  = x265_pixel_sad_x4_32x8_avx;
        p.sad_x4[LUMA_32x16] = x265_pixel_sad_x4_32x16_avx;
        p.sad_x4[LUMA_32x24] = x265_pixel_sad_x4_32x24_avx;
        p.sad_x4[LUMA_32x32] = x265_pixel_sad_x4_32x32_avx;
        p.sad_x4[LUMA_32x64] = x265_pixel_sad_x4_32x64_avx;
        p.sad_x3[LUMA_48x64] = x265_pixel_sad_x3_48x64_avx;
        p.sad_x4[LUMA_48x64] = x265_pixel_sad_x4_48x64_avx;
        p.sad_x3[LUMA_64x16] = x265_pixel_sad_x3_64x16_avx;
        p.sad_x3[LUMA_64x32] = x265_pixel_sad_x3_64x32_avx;
        p.sad_x3[LUMA_64x48] = x265_pixel_sad_x3_64x48_avx;
        p.sad_x3[LUMA_64x64] = x265_pixel_sad_x3_64x64_avx;
        p.sad_x4[LUMA_64x16] = x265_pixel_sad_x4_64x16_avx;
        p.sad_x4[LUMA_64x32] = x265_pixel_sad_x4_64x32_avx;
        p.sad_x4[LUMA_64x48] = x265_pixel_sad_x4_64x48_avx;
        p.sad_x4[LUMA_64x64] = x265_pixel_sad_x4_64x64_avx;
    }
    if (cpuMask & X265_CPU_XOP)
    {
        p.frame_init_lowres_core = x265_frame_init_lowres_core_xop;
        p.sa8d[BLOCK_8x8]   = x265_pixel_sa8d_8x8_xop;
        p.sa8d[BLOCK_16x16] = x265_pixel_sa8d_16x16_xop;
        SA8D_INTER_FROM_BLOCK(xop);
        INIT7(satd, _xop);
        INIT5_NAME(sse_pp, ssd, _xop);
        HEVC_SATD(xop);
    }
    if (cpuMask & X265_CPU_AVX2)
    {
        INIT2(sad_x4, _avx2);
        INIT4(satd, _avx2);
        INIT2_NAME(sse_pp, ssd, _avx2);
        p.sa8d[BLOCK_8x8] = x265_pixel_sa8d_8x8_avx2;
        SA8D_INTER_FROM_BLOCK8(avx2);
        p.satd[LUMA_32x32] = cmp<32, 32, 16, 16, x265_pixel_satd_16x16_avx2>;
        p.satd[LUMA_24x32] = cmp<24, 32, 8, 16, x265_pixel_satd_8x16_avx2>;
        p.satd[LUMA_64x64] = cmp<64, 64, 16, 16, x265_pixel_satd_16x16_avx2>;
        p.satd[LUMA_64x32] = cmp<64, 32, 16, 16, x265_pixel_satd_16x16_avx2>;
        p.satd[LUMA_32x64] = cmp<32, 64, 16, 16, x265_pixel_satd_16x16_avx2>;
        p.satd[LUMA_64x48] = cmp<64, 48, 16, 16, x265_pixel_satd_16x16_avx2>;
        p.satd[LUMA_48x64] = cmp<48, 64, 16, 16, x265_pixel_satd_16x16_avx2>;
        p.satd[LUMA_64x16] = cmp<64, 16, 16, 16, x265_pixel_satd_16x16_avx2>;

        p.sad_x4[LUMA_16x12] = x265_pixel_sad_x4_16x12_avx2;
        p.sad_x4[LUMA_16x32] = x265_pixel_sad_x4_16x32_avx2;
    }
#endif // if HIGH_BIT_DEPTH
}