Ejemplo n.º 1
0
LLVMValueRef
lp_build_round(struct lp_build_context *bld,
               LLVMValueRef a)
{
   const struct lp_type type = bld->type;

   assert(type.floating);
   assert(lp_check_value(type, a));

   if(util_cpu_caps.has_sse4_1)
      return lp_build_round_sse41(bld, a, LP_BUILD_ROUND_SSE41_NEAREST);
   else {
      LLVMTypeRef vec_type = lp_build_vec_type(type);
      LLVMValueRef res;
      res = lp_build_iround(bld, a);
      res = LLVMBuildSIToFP(bld->builder, res, vec_type, "");
      return res;
   }
}
Ejemplo n.º 2
0
/**
 * Convert linear float soa values to packed srgb AoS values.
 * This only handles packed formats which are 4x8bit in size
 * (rgba and rgbx plus swizzles), and 16bit 565-style formats
 * with no alpha. (In the latter case the return values won't be
 * fully packed, it will look like r5g6b5x16r5g6b5x16...)
 *
 * @param src   float SoA (vector) values to convert.
 */
LLVMValueRef
lp_build_float_to_srgb_packed(struct gallivm_state *gallivm,
                              const struct util_format_description *dst_fmt,
                              struct lp_type src_type,
                              LLVMValueRef *src)
{
   LLVMBuilderRef builder = gallivm->builder;
   unsigned chan;
   struct lp_build_context f32_bld;
   struct lp_type int32_type = lp_int_type(src_type);
   LLVMValueRef tmpsrgb[4], alpha, dst;

   lp_build_context_init(&f32_bld, gallivm, src_type);

   /* rgb is subject to linear->srgb conversion, alpha is not */
   for (chan = 0; chan < 3; chan++) {
      unsigned chan_bits = dst_fmt->channel[dst_fmt->swizzle[chan]].size;
      tmpsrgb[chan] = lp_build_linear_to_srgb(gallivm, src_type, chan_bits, src[chan]);
   }
   /*
    * can't use lp_build_conv since we want to keep values as 32bit
    * here so we can interleave with rgb to go from SoA->AoS.
    */
   alpha = lp_build_clamp_zero_one_nanzero(&f32_bld, src[3]);
   alpha = lp_build_mul(&f32_bld, alpha,
                        lp_build_const_vec(gallivm, src_type, 255.0f));
   tmpsrgb[3] = lp_build_iround(&f32_bld, alpha);

   dst = lp_build_zero(gallivm, int32_type);
   for (chan = 0; chan < dst_fmt->nr_channels; chan++) {
      if (dst_fmt->swizzle[chan] <= PIPE_SWIZZLE_W) {
         unsigned ls;
         LLVMValueRef shifted, shift_val;
         ls = dst_fmt->channel[dst_fmt->swizzle[chan]].shift;
         shift_val = lp_build_const_int_vec(gallivm, int32_type, ls);
         shifted = LLVMBuildShl(builder, tmpsrgb[chan], shift_val, "");
         dst = LLVMBuildOr(builder, dst, shifted, "");
      }
   }
   return dst;
}
Ejemplo n.º 3
0
/**
 * Generic type conversion.
 *
 * TODO: Take a precision argument, or even better, add a new precision member
 * to the lp_type union.
 */
void
lp_build_conv(struct gallivm_state *gallivm,
              struct lp_type src_type,
              struct lp_type dst_type,
              const LLVMValueRef *src, unsigned num_srcs,
              LLVMValueRef *dst, unsigned num_dsts)
{
    LLVMBuilderRef builder = gallivm->builder;
    struct lp_type tmp_type;
    LLVMValueRef tmp[LP_MAX_VECTOR_LENGTH];
    unsigned num_tmps;
    unsigned i;

    /* We must not loose or gain channels. Only precision */
    assert(src_type.length * num_srcs == dst_type.length * num_dsts);

    assert(src_type.length <= LP_MAX_VECTOR_LENGTH);
    assert(dst_type.length <= LP_MAX_VECTOR_LENGTH);
    assert(num_srcs <= LP_MAX_VECTOR_LENGTH);
    assert(num_dsts <= LP_MAX_VECTOR_LENGTH);

    tmp_type = src_type;
    for(i = 0; i < num_srcs; ++i) {
        assert(lp_check_value(src_type, src[i]));
        tmp[i] = src[i];
    }
    num_tmps = num_srcs;


    /* Special case 4x4f --> 1x16ub
     */
    if (src_type.floating == 1 &&
            src_type.fixed    == 0 &&
            src_type.sign     == 1 &&
            src_type.norm     == 0 &&
            src_type.width    == 32 &&
            src_type.length   == 4 &&

            dst_type.floating == 0 &&
            dst_type.fixed    == 0 &&
            dst_type.sign     == 0 &&
            dst_type.norm     == 1 &&
            dst_type.width    == 8 &&
            dst_type.length   == 16 &&

            4 * num_dsts      == num_srcs &&

            util_cpu_caps.has_sse2)
    {
        struct lp_build_context bld;
        struct lp_type int16_type = dst_type;
        struct lp_type int32_type = dst_type;
        LLVMValueRef const_255f;
        unsigned i, j;

        lp_build_context_init(&bld, gallivm, src_type);

        int16_type.width *= 2;
        int16_type.length /= 2;
        int16_type.sign = 1;

        int32_type.width *= 4;
        int32_type.length /= 4;
        int32_type.sign = 1;

        const_255f = lp_build_const_vec(gallivm, src_type, 255.0f);

        for (i = 0; i < num_dsts; ++i, src += 4) {
            LLVMValueRef lo, hi;

            for (j = 0; j < 4; ++j) {
                tmp[j] = LLVMBuildFMul(builder, src[j], const_255f, "");
                tmp[j] = lp_build_iround(&bld, tmp[j]);
            }

            /* relying on clamping behavior of sse2 intrinsics here */
            lo = lp_build_pack2(gallivm, int32_type, int16_type, tmp[0], tmp[1]);
            hi = lp_build_pack2(gallivm, int32_type, int16_type, tmp[2], tmp[3]);
            dst[i] = lp_build_pack2(gallivm, int16_type, dst_type, lo, hi);
        }

        return;
    }

    /* Special case 2x8f --> 1x16ub
     */
    else if (src_type.floating == 1 &&
             src_type.fixed    == 0 &&
             src_type.sign     == 1 &&
             src_type.norm     == 0 &&
             src_type.width    == 32 &&
             src_type.length   == 8 &&

             dst_type.floating == 0 &&
             dst_type.fixed    == 0 &&
             dst_type.sign     == 0 &&
             dst_type.norm     == 1 &&
             dst_type.width    == 8 &&
             dst_type.length   == 16 &&

             2 * num_dsts      == num_srcs &&

             util_cpu_caps.has_avx) {

        struct lp_build_context bld;
        struct lp_type int16_type = dst_type;
        struct lp_type int32_type = dst_type;
        LLVMValueRef const_255f;
        unsigned i;

        lp_build_context_init(&bld, gallivm, src_type);

        int16_type.width *= 2;
        int16_type.length /= 2;
        int16_type.sign = 1;

        int32_type.width *= 4;
        int32_type.length /= 4;
        int32_type.sign = 1;

        const_255f = lp_build_const_vec(gallivm, src_type, 255.0f);

        for (i = 0; i < num_dsts; ++i, src += 2) {
            LLVMValueRef lo, hi, a, b;

            a = LLVMBuildFMul(builder, src[0], const_255f, "");
            b = LLVMBuildFMul(builder, src[1], const_255f, "");

            a = lp_build_iround(&bld, a);
            b = lp_build_iround(&bld, b);

            tmp[0] = lp_build_extract_range(gallivm, a, 0, 4);
            tmp[1] = lp_build_extract_range(gallivm, a, 4, 4);
            tmp[2] = lp_build_extract_range(gallivm, b, 0, 4);
            tmp[3] = lp_build_extract_range(gallivm, b, 4, 4);

            /* relying on clamping behavior of sse2 intrinsics here */
            lo = lp_build_pack2(gallivm, int32_type, int16_type, tmp[0], tmp[1]);
            hi = lp_build_pack2(gallivm, int32_type, int16_type, tmp[2], tmp[3]);
            dst[i] = lp_build_pack2(gallivm, int16_type, dst_type, lo, hi);
        }
        return;
    }

    /* Pre convert half-floats to floats
     */
    else if (src_type.floating && src_type.width == 16)
    {
        for(i = 0; i < num_tmps; ++i)
            tmp[i] = lp_build_half_to_float(gallivm, src_type, tmp[i]);

        tmp_type.width = 32;
    }

    /*
     * Clamp if necessary
     */

    if(memcmp(&src_type, &dst_type, sizeof src_type) != 0) {
        struct lp_build_context bld;
        double src_min = lp_const_min(src_type);
        double dst_min = lp_const_min(dst_type);
        double src_max = lp_const_max(src_type);
        double dst_max = lp_const_max(dst_type);
        LLVMValueRef thres;

        lp_build_context_init(&bld, gallivm, tmp_type);

        if(src_min < dst_min) {
            if(dst_min == 0.0)
                thres = bld.zero;
            else
                thres = lp_build_const_vec(gallivm, src_type, dst_min);
            for(i = 0; i < num_tmps; ++i)
                tmp[i] = lp_build_max(&bld, tmp[i], thres);
        }

        if(src_max > dst_max) {
            if(dst_max == 1.0)
                thres = bld.one;
            else
                thres = lp_build_const_vec(gallivm, src_type, dst_max);
            for(i = 0; i < num_tmps; ++i)
                tmp[i] = lp_build_min(&bld, tmp[i], thres);
        }
    }

    /*
     * Scale to the narrowest range
     */

    if(dst_type.floating) {
        /* Nothing to do */
    }
    else if(tmp_type.floating) {
        if(!dst_type.fixed && !dst_type.sign && dst_type.norm) {
            for(i = 0; i < num_tmps; ++i) {
                tmp[i] = lp_build_clamped_float_to_unsigned_norm(gallivm,
                         tmp_type,
                         dst_type.width,
                         tmp[i]);
            }
            tmp_type.floating = FALSE;
        }
        else {
            double dst_scale = lp_const_scale(dst_type);
            LLVMTypeRef tmp_vec_type;

            if (dst_scale != 1.0) {
                LLVMValueRef scale = lp_build_const_vec(gallivm, tmp_type, dst_scale);
                for(i = 0; i < num_tmps; ++i)
                    tmp[i] = LLVMBuildFMul(builder, tmp[i], scale, "");
            }

            /* Use an equally sized integer for intermediate computations */
            tmp_type.floating = FALSE;
            tmp_vec_type = lp_build_vec_type(gallivm, tmp_type);
            for(i = 0; i < num_tmps; ++i) {
#if 0
                if(dst_type.sign)
                    tmp[i] = LLVMBuildFPToSI(builder, tmp[i], tmp_vec_type, "");
                else
                    tmp[i] = LLVMBuildFPToUI(builder, tmp[i], tmp_vec_type, "");
#else
                /* FIXME: there is no SSE counterpart for LLVMBuildFPToUI */
                tmp[i] = LLVMBuildFPToSI(builder, tmp[i], tmp_vec_type, "");
#endif
            }
        }
    }
    else {
        unsigned src_shift = lp_const_shift(src_type);
        unsigned dst_shift = lp_const_shift(dst_type);
        unsigned src_offset = lp_const_offset(src_type);
        unsigned dst_offset = lp_const_offset(dst_type);

        /* Compensate for different offsets */
        if (dst_offset > src_offset && src_type.width > dst_type.width) {
            for (i = 0; i < num_tmps; ++i) {
                LLVMValueRef shifted;
                LLVMValueRef shift = lp_build_const_int_vec(gallivm, tmp_type, src_shift - 1);
                if(src_type.sign)
                    shifted = LLVMBuildAShr(builder, tmp[i], shift, "");
                else
                    shifted = LLVMBuildLShr(builder, tmp[i], shift, "");

                tmp[i] = LLVMBuildSub(builder, tmp[i], shifted, "");
            }
        }

        if(src_shift > dst_shift) {
            LLVMValueRef shift = lp_build_const_int_vec(gallivm, tmp_type,
                                 src_shift - dst_shift);
            for(i = 0; i < num_tmps; ++i)
                if(src_type.sign)
                    tmp[i] = LLVMBuildAShr(builder, tmp[i], shift, "");
                else
                    tmp[i] = LLVMBuildLShr(builder, tmp[i], shift, "");
        }
    }

    /*
     * Truncate or expand bit width
     *
     * No data conversion should happen here, although the sign bits are
     * crucial to avoid bad clamping.
     */

    {
        struct lp_type new_type;

        new_type = tmp_type;
        new_type.sign   = dst_type.sign;
        new_type.width  = dst_type.width;
        new_type.length = dst_type.length;

        lp_build_resize(gallivm, tmp_type, new_type, tmp, num_srcs, tmp, num_dsts);

        tmp_type = new_type;
        num_tmps = num_dsts;
    }

    /*
     * Scale to the widest range
     */

    if(src_type.floating) {
        /* Nothing to do */
    }
    else if(!src_type.floating && dst_type.floating) {
        if(!src_type.fixed && !src_type.sign && src_type.norm) {
            for(i = 0; i < num_tmps; ++i) {
                tmp[i] = lp_build_unsigned_norm_to_float(gallivm,
                         src_type.width,
                         dst_type,
                         tmp[i]);
            }
            tmp_type.floating = TRUE;
        }
        else {
            double src_scale = lp_const_scale(src_type);
            LLVMTypeRef tmp_vec_type;

            /* Use an equally sized integer for intermediate computations */
            tmp_type.floating = TRUE;
            tmp_type.sign = TRUE;
            tmp_vec_type = lp_build_vec_type(gallivm, tmp_type);
            for(i = 0; i < num_tmps; ++i) {
#if 0
                if(dst_type.sign)
                    tmp[i] = LLVMBuildSIToFP(builder, tmp[i], tmp_vec_type, "");
                else
                    tmp[i] = LLVMBuildUIToFP(builder, tmp[i], tmp_vec_type, "");
#else
                /* FIXME: there is no SSE counterpart for LLVMBuildUIToFP */
                tmp[i] = LLVMBuildSIToFP(builder, tmp[i], tmp_vec_type, "");
#endif
            }

            if (src_scale != 1.0) {
                LLVMValueRef scale = lp_build_const_vec(gallivm, tmp_type, 1.0/src_scale);
                for(i = 0; i < num_tmps; ++i)
                    tmp[i] = LLVMBuildFMul(builder, tmp[i], scale, "");
            }
        }
    }
    else {
        unsigned src_shift = lp_const_shift(src_type);
        unsigned dst_shift = lp_const_shift(dst_type);
        unsigned src_offset = lp_const_offset(src_type);
        unsigned dst_offset = lp_const_offset(dst_type);

        if (src_shift < dst_shift) {
            LLVMValueRef pre_shift[LP_MAX_VECTOR_LENGTH];
            LLVMValueRef shift = lp_build_const_int_vec(gallivm, tmp_type, dst_shift - src_shift);

            for (i = 0; i < num_tmps; ++i) {
                pre_shift[i] = tmp[i];
                tmp[i] = LLVMBuildShl(builder, tmp[i], shift, "");
            }

            /* Compensate for different offsets */
            if (dst_offset > src_offset) {
                for (i = 0; i < num_tmps; ++i) {
                    tmp[i] = LLVMBuildSub(builder, tmp[i], pre_shift[i], "");
                }
            }
        }
    }

    for(i = 0; i < num_dsts; ++i) {
        dst[i] = tmp[i];
        assert(lp_check_value(dst_type, dst[i]));
    }
}
Ejemplo n.º 4
0
/**
 * Generate code to compute texture level of detail (lambda).
 * \param ddx  partial derivatives of (s, t, r, q) with respect to X
 * \param ddy  partial derivatives of (s, t, r, q) with respect to Y
 * \param lod_bias  optional float vector with the shader lod bias
 * \param explicit_lod  optional float vector with the explicit lod
 * \param width  scalar int texture width
 * \param height  scalar int texture height
 * \param depth  scalar int texture depth
 *
 * XXX: The resulting lod is scalar, so ignore all but the first element of
 * derivatives, lod_bias, etc that are passed by the shader.
 */
void
lp_build_lod_selector(struct lp_build_sample_context *bld,
                      unsigned unit,
                      const LLVMValueRef ddx[4],
                      const LLVMValueRef ddy[4],
                      LLVMValueRef lod_bias, /* optional */
                      LLVMValueRef explicit_lod, /* optional */
                      unsigned mip_filter,
                      LLVMValueRef *out_lod_ipart,
                      LLVMValueRef *out_lod_fpart)

{
   LLVMBuilderRef builder = bld->gallivm->builder;
   struct lp_build_context *float_bld = &bld->float_bld;
   LLVMValueRef lod;

   *out_lod_ipart = bld->int_bld.zero;
   *out_lod_fpart = bld->float_bld.zero;

   if (bld->static_state->min_max_lod_equal) {
      /* User is forcing sampling from a particular mipmap level.
       * This is hit during mipmap generation.
       */
      LLVMValueRef min_lod =
         bld->dynamic_state->min_lod(bld->dynamic_state, bld->gallivm, unit);

      lod = min_lod;
   }
   else {
      LLVMValueRef sampler_lod_bias =
         bld->dynamic_state->lod_bias(bld->dynamic_state, bld->gallivm, unit);
      LLVMValueRef index0 = lp_build_const_int32(bld->gallivm, 0);

      if (explicit_lod) {
         lod = LLVMBuildExtractElement(builder, explicit_lod,
                                       index0, "");
      }
      else {
         LLVMValueRef rho;

         rho = lp_build_rho(bld, unit, ddx, ddy);

         /*
          * Compute lod = log2(rho)
          */

         if (!lod_bias &&
             !bld->static_state->lod_bias_non_zero &&
             !bld->static_state->apply_max_lod &&
             !bld->static_state->apply_min_lod) {
            /*
             * Special case when there are no post-log2 adjustments, which
             * saves instructions but keeping the integer and fractional lod
             * computations separate from the start.
             */

            if (mip_filter == PIPE_TEX_MIPFILTER_NONE ||
                mip_filter == PIPE_TEX_MIPFILTER_NEAREST) {
               *out_lod_ipart = lp_build_ilog2(float_bld, rho);
               *out_lod_fpart = bld->float_bld.zero;
               return;
            }
            if (mip_filter == PIPE_TEX_MIPFILTER_LINEAR &&
                !(gallivm_debug & GALLIVM_DEBUG_NO_BRILINEAR)) {
               lp_build_brilinear_rho(float_bld, rho, BRILINEAR_FACTOR,
                                      out_lod_ipart, out_lod_fpart);
               return;
            }
         }

         if (0) {
            lod = lp_build_log2(float_bld, rho);
         }
         else {
            lod = lp_build_fast_log2(float_bld, rho);
         }

         /* add shader lod bias */
         if (lod_bias) {
            lod_bias = LLVMBuildExtractElement(builder, lod_bias,
                                               index0, "");
            lod = LLVMBuildFAdd(builder, lod, lod_bias, "shader_lod_bias");
         }
      }

      /* add sampler lod bias */
      if (bld->static_state->lod_bias_non_zero)
         lod = LLVMBuildFAdd(builder, lod, sampler_lod_bias, "sampler_lod_bias");


      /* clamp lod */
      if (bld->static_state->apply_max_lod) {
         LLVMValueRef max_lod =
            bld->dynamic_state->max_lod(bld->dynamic_state, bld->gallivm, unit);

         lod = lp_build_min(float_bld, lod, max_lod);
      }
      if (bld->static_state->apply_min_lod) {
         LLVMValueRef min_lod =
            bld->dynamic_state->min_lod(bld->dynamic_state, bld->gallivm, unit);

         lod = lp_build_max(float_bld, lod, min_lod);
      }
   }

   if (mip_filter == PIPE_TEX_MIPFILTER_LINEAR) {
      if (!(gallivm_debug & GALLIVM_DEBUG_NO_BRILINEAR)) {
         lp_build_brilinear_lod(float_bld, lod, BRILINEAR_FACTOR,
                                out_lod_ipart, out_lod_fpart);
      }
      else {
         lp_build_ifloor_fract(float_bld, lod, out_lod_ipart, out_lod_fpart);
      }

      lp_build_name(*out_lod_fpart, "lod_fpart");
   }
   else {
      *out_lod_ipart = lp_build_iround(float_bld, lod);
   }

   lp_build_name(*out_lod_ipart, "lod_ipart");

   return;
}
Ejemplo n.º 5
0
/**
 * Convert linear float values to srgb int values.
 * Several possibilities how to do this, e.g.
 * - use table (based on exponent/highest order mantissa bits) and do
 *   linear interpolation (https://gist.github.com/rygorous/2203834)
 * - Chebyshev polynomial
 * - Approximation using reciprocals
 * - using int-to-float and float-to-int tricks for pow()
 *   (http://stackoverflow.com/questions/6475373/optimizations-for-pow-with-const-non-integer-exponent)
 *
 * @param src   float (vector) value(s) to convert.
 */
static LLVMValueRef
lp_build_linear_to_srgb(struct gallivm_state *gallivm,
                        struct lp_type src_type,
                        LLVMValueRef src)
{
   LLVMBuilderRef builder = gallivm->builder;
   struct lp_build_context f32_bld;
   LLVMValueRef lin_thresh, lin, lin_const, is_linear, tmp, pow_final;

   lp_build_context_init(&f32_bld, gallivm, src_type);

   src = lp_build_clamp(&f32_bld, src, f32_bld.zero, f32_bld.one);

   if (0) {
      /*
       * using int-to-float and float-to-int trick for pow().
       * This is much more accurate than necessary thanks to the correction,
       * but it most certainly makes no sense without rsqrt available.
       * Bonus points if you understand how this works...
       * All in all (including min/max clamp, conversion) 19 instructions.
       */

      float exp_f = 2.0f / 3.0f;
      /* some compilers can't do exp2f, so this is exp2f(127.0f/exp_f - 127.0f) */
      float exp2f_c = 1.30438178253e+19f;
      float coeff_f = 0.62996f;
      LLVMValueRef pow_approx, coeff, x2, exponent, pow_1, pow_2;
      struct lp_type int_type = lp_int_type(src_type);

      /*
       * First calculate approx x^8/12
       */
      exponent = lp_build_const_vec(gallivm, src_type, exp_f);
      coeff = lp_build_const_vec(gallivm, src_type,
                                 exp2f_c * powf(coeff_f, 1.0f / exp_f));

      /* premultiply src */
      tmp = lp_build_mul(&f32_bld, coeff, src);
      /* "log2" */
      tmp = LLVMBuildBitCast(builder, tmp, lp_build_vec_type(gallivm, int_type), "");
      tmp = lp_build_int_to_float(&f32_bld, tmp);
      /* multiply for pow */
      tmp = lp_build_mul(&f32_bld, tmp, exponent);
      /* "exp2" */
      pow_approx = lp_build_itrunc(&f32_bld, tmp);
      pow_approx = LLVMBuildBitCast(builder, pow_approx,
                                    lp_build_vec_type(gallivm, src_type), "");

      /*
       * Since that pow was inaccurate (like 3 bits, though each sqrt step would
       * give another bit), compensate the error (which is why we chose another
       * exponent in the first place).
       */
      /* x * x^(8/12) = x^(20/12) */
      pow_1 = lp_build_mul(&f32_bld, pow_approx, src);

      /* x * x * x^(-4/12) = x^(20/12) */
      /* Should avoid using rsqrt if it's not available, but
       * using x * x^(4/12) * x^(4/12) instead will change error weight */
      tmp = lp_build_fast_rsqrt(&f32_bld, pow_approx);
      x2 = lp_build_mul(&f32_bld, src, src);
      pow_2 = lp_build_mul(&f32_bld, x2, tmp);

      /* average the values so the errors cancel out, compensate bias,
       * we also squeeze the 1.055 mul of the srgb conversion plus the 255.0 mul
       * for conversion to int in here */
      tmp = lp_build_add(&f32_bld, pow_1, pow_2);
      coeff = lp_build_const_vec(gallivm, src_type,
                                 1.0f / (3.0f * coeff_f) * 0.999852f *
                                 powf(1.055f * 255.0f, 4.0f));
      pow_final = lp_build_mul(&f32_bld, tmp, coeff);

      /* x^(5/12) = rsqrt(rsqrt(x^20/12)) */
      if (lp_build_fast_rsqrt_available(src_type)) {
         pow_final = lp_build_fast_rsqrt(&f32_bld,
                        lp_build_fast_rsqrt(&f32_bld, pow_final));
      }
      else {
         pow_final = lp_build_sqrt(&f32_bld, lp_build_sqrt(&f32_bld, pow_final));
      }
      pow_final = lp_build_add(&f32_bld, pow_final,
                               lp_build_const_vec(gallivm, src_type, -0.055f * 255.0f));
   }

   else {
      /*
       * using "rational polynomial" approximation here.
       * Essentially y = a*x^0.375 + b*x^0.5 + c, with also
       * factoring in the 255.0 mul and the scaling mul.
       * (a is closer to actual value so has higher weight than b.)
       * Note: the constants are magic values. They were found empirically,
       * possibly could be improved but good enough (be VERY careful with
       * error metric if you'd want to tweak them, they also MUST fit with
       * the crappy polynomial above for srgb->linear since it is required
       * that each srgb value maps back to the same value).
       * This function has an error of max +-0.17 (and we'd only require +-0.6),
       * for the approximated srgb->linear values the error is naturally larger
       * (+-0.42) but still accurate enough (required +-0.5 essentially).
       * All in all (including min/max clamp, conversion) 15 instructions.
       * FMA would help (minus 2 instructions).
       */

      LLVMValueRef x05, x0375, a_const, b_const, c_const, tmp2;

      if (lp_build_fast_rsqrt_available(src_type)) {
         tmp = lp_build_fast_rsqrt(&f32_bld, src);
         x05 = lp_build_mul(&f32_bld, src, tmp);
      }
      else {
         /*
          * I don't really expect this to be practical without rsqrt
          * but there's no reason for triple punishment so at least
          * save the otherwise resulting division and unnecessary mul...
          */
         x05 = lp_build_sqrt(&f32_bld, src);
      }

      tmp = lp_build_mul(&f32_bld, x05, src);
      if (lp_build_fast_rsqrt_available(src_type)) {
         x0375 = lp_build_fast_rsqrt(&f32_bld, lp_build_fast_rsqrt(&f32_bld, tmp));
      }
      else {
         x0375 = lp_build_sqrt(&f32_bld, lp_build_sqrt(&f32_bld, tmp));
      }

      a_const = lp_build_const_vec(gallivm, src_type, 0.675f * 1.0622 * 255.0f);
      b_const = lp_build_const_vec(gallivm, src_type, 0.325f * 1.0622 * 255.0f);
      c_const = lp_build_const_vec(gallivm, src_type, -0.0620f * 255.0f);

      tmp = lp_build_mul(&f32_bld, a_const, x0375);
      tmp2 = lp_build_mul(&f32_bld, b_const, x05);
      tmp2 = lp_build_add(&f32_bld, tmp2, c_const);
      pow_final = lp_build_add(&f32_bld, tmp, tmp2);
   }

   /* linear part is easy */
   lin_const = lp_build_const_vec(gallivm, src_type, 12.92f * 255.0f);
   lin = lp_build_mul(&f32_bld, src, lin_const);

   lin_thresh = lp_build_const_vec(gallivm, src_type, 0.0031308f);
   is_linear = lp_build_compare(gallivm, src_type, PIPE_FUNC_LEQUAL, src, lin_thresh);
   tmp = lp_build_select(&f32_bld, is_linear, lin, pow_final);

   f32_bld.type.sign = 0;
   return lp_build_iround(&f32_bld, tmp);
}