static void emit_ishr(const struct lp_build_tgsi_action *action,
		      struct lp_build_tgsi_context *bld_base,
		      struct lp_build_emit_data *emit_data)
{
	LLVMBuilderRef builder = bld_base->base.gallivm->builder;
	emit_data->output[emit_data->chan] = LLVMBuildAShr(builder,
			emit_data->args[0], emit_data->args[1], "");
}
Beispiel #2
0
/**
 * Convert float[] to int[] with floor().
 */
LLVMValueRef
lp_build_ifloor(struct lp_build_context *bld,
                LLVMValueRef a)
{
   const struct lp_type type = bld->type;
   LLVMTypeRef int_vec_type = lp_build_int_vec_type(type);
   LLVMValueRef res;

   assert(type.floating);
   assert(lp_check_value(type, a));

   if(util_cpu_caps.has_sse4_1) {
      res = lp_build_round_sse41(bld, a, LP_BUILD_ROUND_SSE41_FLOOR);
   }
   else {
      /* Take the sign bit and add it to 1 constant */
      LLVMTypeRef vec_type = lp_build_vec_type(type);
      unsigned mantissa = lp_mantissa(type);
      LLVMValueRef mask = lp_build_int_const_scalar(type, (unsigned long long)1 << (type.width - 1));
      LLVMValueRef sign;
      LLVMValueRef offset;

      /* sign = a < 0 ? ~0 : 0 */
      sign = LLVMBuildBitCast(bld->builder, a, int_vec_type, "");
      sign = LLVMBuildAnd(bld->builder, sign, mask, "");
      sign = LLVMBuildAShr(bld->builder, sign, lp_build_int_const_scalar(type, type.width - 1), "");
      lp_build_name(sign, "floor.sign");

      /* offset = -0.99999(9)f */
      offset = lp_build_const_scalar(type, -(double)(((unsigned long long)1 << mantissa) - 1)/((unsigned long long)1 << mantissa));
      offset = LLVMConstBitCast(offset, int_vec_type);

      /* offset = a < 0 ? -0.99999(9)f : 0.0f */
      offset = LLVMBuildAnd(bld->builder, offset, sign, "");
      offset = LLVMBuildBitCast(bld->builder, offset, vec_type, "");
      lp_build_name(offset, "floor.offset");

      res = LLVMBuildAdd(bld->builder, a, offset, "");
      lp_build_name(res, "floor.res");
   }

   res = LLVMBuildFPToSI(bld->builder, res, int_vec_type, "");
   lp_build_name(res, "floor");

   return res;
}
Beispiel #3
0
/**
 * Shift right.
 */
LLVMValueRef
lp_build_shr(struct lp_build_context *bld, LLVMValueRef a, LLVMValueRef b)
{
   LLVMBuilderRef builder = bld->gallivm->builder;
   const struct lp_type type = bld->type;
   LLVMValueRef res;

   assert(!type.floating);

   assert(lp_check_value(type, a));
   assert(lp_check_value(type, b));

   if (type.sign) {
      res = LLVMBuildAShr(builder, a, b, "");
   } else {
      res = LLVMBuildLShr(builder, a, b, "");
   }

   return res;
}
Beispiel #4
0
/**
 * Double the bit width.
 *
 * This will only change the number of bits the values are represented, not the
 * values themselves.
 */
void
lp_build_unpack2(struct gallivm_state *gallivm,
                 struct lp_type src_type,
                 struct lp_type dst_type,
                 LLVMValueRef src,
                 LLVMValueRef *dst_lo,
                 LLVMValueRef *dst_hi)
{
   LLVMBuilderRef builder = gallivm->builder;
   LLVMValueRef msb;
   LLVMTypeRef dst_vec_type;

   assert(!src_type.floating);
   assert(!dst_type.floating);
   assert(dst_type.width == src_type.width * 2);
   assert(dst_type.length * 2 == src_type.length);

   if(dst_type.sign && src_type.sign) {
      /* Replicate the sign bit in the most significant bits */
      msb = LLVMBuildAShr(builder, src, lp_build_const_int_vec(gallivm, src_type, src_type.width - 1), "");
   }
   else
      /* Most significant bits always zero */
      msb = lp_build_zero(gallivm, src_type);

   /* Interleave bits */
#ifdef PIPE_ARCH_LITTLE_ENDIAN
   *dst_lo = lp_build_interleave2(gallivm, src_type, src, msb, 0);
   *dst_hi = lp_build_interleave2(gallivm, src_type, src, msb, 1);
#else
   *dst_lo = lp_build_interleave2(gallivm, src_type, msb, src, 0);
   *dst_hi = lp_build_interleave2(gallivm, src_type, msb, src, 1);
#endif

   /* Cast the result into the new type (twice as wide) */

   dst_vec_type = lp_build_vec_type(gallivm, dst_type);

   *dst_lo = LLVMBuildBitCast(builder, *dst_lo, dst_vec_type, "");
   *dst_hi = LLVMBuildBitCast(builder, *dst_hi, dst_vec_type, "");
}
Beispiel #5
0
LLVMValueRef gen_shr(compile_t* c, ast_t* left, ast_t* right)
{
  ast_t* type = ast_type(left);
  bool sign = is_signed(c->opt, type);

  LLVMValueRef l_value = gen_expr(c, left);
  LLVMValueRef r_value = gen_expr(c, right);

  if((l_value == NULL) || (r_value == NULL))
    return NULL;

  if(LLVMIsConstant(l_value) && LLVMIsConstant(r_value))
  {
    if(sign)
      return LLVMConstAShr(l_value, r_value);

    return LLVMConstLShr(l_value, r_value);
  }

  if(sign)
    return LLVMBuildAShr(c->builder, l_value, r_value, "");

  return LLVMBuildLShr(c->builder, l_value, r_value, "");
}
Beispiel #6
0
static INLINE void
yuv_to_rgb_soa(struct gallivm_state *gallivm,
               unsigned n,
               LLVMValueRef y, LLVMValueRef u, LLVMValueRef v,
               LLVMValueRef *r, LLVMValueRef *g, LLVMValueRef *b)
{
   LLVMBuilderRef builder = gallivm->builder;
   struct lp_type type;
   struct lp_build_context bld;

   LLVMValueRef c0;
   LLVMValueRef c8;
   LLVMValueRef c16;
   LLVMValueRef c128;
   LLVMValueRef c255;

   LLVMValueRef cy;
   LLVMValueRef cug;
   LLVMValueRef cub;
   LLVMValueRef cvr;
   LLVMValueRef cvg;

   memset(&type, 0, sizeof type);
   type.sign = TRUE;
   type.width = 32;
   type.length = n;

   lp_build_context_init(&bld, gallivm, type);

   assert(lp_check_value(type, y));
   assert(lp_check_value(type, u));
   assert(lp_check_value(type, v));

   /*
    * Constants
    */

   c0   = lp_build_const_int_vec(gallivm, type,   0);
   c8   = lp_build_const_int_vec(gallivm, type,   8);
   c16  = lp_build_const_int_vec(gallivm, type,  16);
   c128 = lp_build_const_int_vec(gallivm, type, 128);
   c255 = lp_build_const_int_vec(gallivm, type, 255);

   cy  = lp_build_const_int_vec(gallivm, type,  298);
   cug = lp_build_const_int_vec(gallivm, type, -100);
   cub = lp_build_const_int_vec(gallivm, type,  516);
   cvr = lp_build_const_int_vec(gallivm, type,  409);
   cvg = lp_build_const_int_vec(gallivm, type, -208);

   /*
    *  y -= 16;
    *  u -= 128;
    *  v -= 128;
    */

   y = LLVMBuildSub(builder, y, c16, "");
   u = LLVMBuildSub(builder, u, c128, "");
   v = LLVMBuildSub(builder, v, c128, "");

   /*
    * r = 298 * _y            + 409 * _v + 128;
    * g = 298 * _y - 100 * _u - 208 * _v + 128;
    * b = 298 * _y + 516 * _u            + 128;
    */

   y = LLVMBuildMul(builder, y, cy, "");
   y = LLVMBuildAdd(builder, y, c128, "");

   *r = LLVMBuildMul(builder, v, cvr, "");
   *g = LLVMBuildAdd(builder,
                     LLVMBuildMul(builder, u, cug, ""),
                     LLVMBuildMul(builder, v, cvg, ""),
                     "");
   *b = LLVMBuildMul(builder, u, cub, "");

   *r = LLVMBuildAdd(builder, *r, y, "");
   *g = LLVMBuildAdd(builder, *g, y, "");
   *b = LLVMBuildAdd(builder, *b, y, "");

   /*
    * r >>= 8;
    * g >>= 8;
    * b >>= 8;
    */

   *r = LLVMBuildAShr(builder, *r, c8, "r");
   *g = LLVMBuildAShr(builder, *g, c8, "g");
   *b = LLVMBuildAShr(builder, *b, c8, "b");

   /*
    * Clamp
    */

   *r = lp_build_clamp(&bld, *r, c0, c255);
   *g = lp_build_clamp(&bld, *g, c0, c255);
   *b = lp_build_clamp(&bld, *b, c0, c255);
}
Beispiel #7
0
/**
 * Unpack several pixels in SoA.
 *
 * It takes a vector of packed pixels:
 *
 *   packed = {P0, P1, P2, P3, ..., Pn}
 *
 * And will produce four vectors:
 *
 *   red    = {R0, R1, R2, R3, ..., Rn}
 *   green  = {G0, G1, G2, G3, ..., Gn}
 *   blue   = {B0, B1, B2, B3, ..., Bn}
 *   alpha  = {A0, A1, A2, A3, ..., An}
 *
 * It requires that a packed pixel fits into an element of the output
 * channels. The common case is when converting pixel with a depth of 32 bit or
 * less into floats.
 *
 * \param format_desc  the format of the 'packed' incoming pixel vector
 * \param type  the desired type for rgba_out (type.length = n, above)
 * \param packed  the incoming vector of packed pixels
 * \param rgba_out  returns the SoA R,G,B,A vectors
 */
void
lp_build_unpack_rgba_soa(struct gallivm_state *gallivm,
                         const struct util_format_description *format_desc,
                         struct lp_type type,
                         LLVMValueRef packed,
                         LLVMValueRef rgba_out[4])
{
   LLVMBuilderRef builder = gallivm->builder;
   struct lp_build_context bld;
   LLVMValueRef inputs[4];
   unsigned chan;

   assert(format_desc->layout == UTIL_FORMAT_LAYOUT_PLAIN);
   assert(format_desc->block.width == 1);
   assert(format_desc->block.height == 1);
   assert(format_desc->block.bits <= type.width);
   /* FIXME: Support more output types */
   assert(type.width == 32);

   lp_build_context_init(&bld, gallivm, type);

   /* Decode the input vector components */
   for (chan = 0; chan < format_desc->nr_channels; ++chan) {
      const unsigned width = format_desc->channel[chan].size;
      const unsigned start = format_desc->channel[chan].shift;
      const unsigned stop = start + width;
      LLVMValueRef input;

      input = packed;

      switch(format_desc->channel[chan].type) {
      case UTIL_FORMAT_TYPE_VOID:
         input = lp_build_undef(gallivm, type);
         break;

      case UTIL_FORMAT_TYPE_UNSIGNED:
         /*
          * Align the LSB
          */

         if (start) {
            input = LLVMBuildLShr(builder, input, lp_build_const_int_vec(gallivm, type, start), "");
         }

         /*
          * Zero the MSBs
          */

         if (stop < format_desc->block.bits) {
            unsigned mask = ((unsigned long long)1 << width) - 1;
            input = LLVMBuildAnd(builder, input, lp_build_const_int_vec(gallivm, type, mask), "");
         }

         /*
          * Type conversion
          */

         if (type.floating) {
            if (format_desc->colorspace == UTIL_FORMAT_COLORSPACE_SRGB) {
               assert(width == 8);
               if (format_desc->swizzle[3] == chan) {
                  input = lp_build_unsigned_norm_to_float(gallivm, width, type, input);
               }
               else {
                  struct lp_type conv_type = lp_uint_type(type);
                  input = lp_build_srgb_to_linear(gallivm, conv_type, input);
               }
            }
            else {
               if(format_desc->channel[chan].normalized)
                  input = lp_build_unsigned_norm_to_float(gallivm, width, type, input);
               else
                  input = LLVMBuildSIToFP(builder, input,
                                          lp_build_vec_type(gallivm, type), "");
            }
         }
         else if (format_desc->channel[chan].pure_integer) {
            /* Nothing to do */
         } else {
             /* FIXME */
             assert(0);
         }

         break;

      case UTIL_FORMAT_TYPE_SIGNED:
         /*
          * Align the sign bit first.
          */

         if (stop < type.width) {
            unsigned bits = type.width - stop;
            LLVMValueRef bits_val = lp_build_const_int_vec(gallivm, type, bits);
            input = LLVMBuildShl(builder, input, bits_val, "");
         }

         /*
          * Align the LSB (with an arithmetic shift to preserve the sign)
          */

         if (format_desc->channel[chan].size < type.width) {
            unsigned bits = type.width - format_desc->channel[chan].size;
            LLVMValueRef bits_val = lp_build_const_int_vec(gallivm, type, bits);
            input = LLVMBuildAShr(builder, input, bits_val, "");
         }

         /*
          * Type conversion
          */

         if (type.floating) {
            input = LLVMBuildSIToFP(builder, input, lp_build_vec_type(gallivm, type), "");
            if (format_desc->channel[chan].normalized) {
               double scale = 1.0 / ((1 << (format_desc->channel[chan].size - 1)) - 1);
               LLVMValueRef scale_val = lp_build_const_vec(gallivm, type, scale);
               input = LLVMBuildFMul(builder, input, scale_val, "");
               /* the formula above will produce value below -1.0 for most negative
                * value but everything seems happy with that hence disable for now */
               if (0)
                  input = lp_build_max(&bld, input,
                                       lp_build_const_vec(gallivm, type, -1.0f));
            }
         }
         else if (format_desc->channel[chan].pure_integer) {
            /* Nothing to do */
         } else {
             /* FIXME */
             assert(0);
         }

         break;

      case UTIL_FORMAT_TYPE_FLOAT:
         if (type.floating) {
            assert(start == 0);
            assert(stop == 32);
            assert(type.width == 32);
            input = LLVMBuildBitCast(builder, input, lp_build_vec_type(gallivm, type), "");
         }
         else {
            /* FIXME */
            assert(0);
            input = lp_build_undef(gallivm, type);
         }
         break;

      case UTIL_FORMAT_TYPE_FIXED:
         if (type.floating) {
            double scale = 1.0 / ((1 << (format_desc->channel[chan].size/2)) - 1);
            LLVMValueRef scale_val = lp_build_const_vec(gallivm, type, scale);
            input = LLVMBuildSIToFP(builder, input, lp_build_vec_type(gallivm, type), "");
            input = LLVMBuildFMul(builder, input, scale_val, "");
         }
         else {
            /* FIXME */
            assert(0);
            input = lp_build_undef(gallivm, type);
         }
         break;

      default:
         assert(0);
         input = lp_build_undef(gallivm, type);
         break;
      }

      inputs[chan] = input;
   }

   lp_build_format_swizzle_soa(format_desc, &bld, inputs, rgba_out);
}
Beispiel #8
0
/**
 * Generic type conversion.
 *
 * TODO: Take a precision argument, or even better, add a new precision member
 * to the lp_type union.
 */
void
lp_build_conv(struct gallivm_state *gallivm,
              struct lp_type src_type,
              struct lp_type dst_type,
              const LLVMValueRef *src, unsigned num_srcs,
              LLVMValueRef *dst, unsigned num_dsts)
{
    LLVMBuilderRef builder = gallivm->builder;
    struct lp_type tmp_type;
    LLVMValueRef tmp[LP_MAX_VECTOR_LENGTH];
    unsigned num_tmps;
    unsigned i;

    /* We must not loose or gain channels. Only precision */
    assert(src_type.length * num_srcs == dst_type.length * num_dsts);

    assert(src_type.length <= LP_MAX_VECTOR_LENGTH);
    assert(dst_type.length <= LP_MAX_VECTOR_LENGTH);
    assert(num_srcs <= LP_MAX_VECTOR_LENGTH);
    assert(num_dsts <= LP_MAX_VECTOR_LENGTH);

    tmp_type = src_type;
    for(i = 0; i < num_srcs; ++i) {
        assert(lp_check_value(src_type, src[i]));
        tmp[i] = src[i];
    }
    num_tmps = num_srcs;


    /* Special case 4x4f --> 1x16ub
     */
    if (src_type.floating == 1 &&
            src_type.fixed    == 0 &&
            src_type.sign     == 1 &&
            src_type.norm     == 0 &&
            src_type.width    == 32 &&
            src_type.length   == 4 &&

            dst_type.floating == 0 &&
            dst_type.fixed    == 0 &&
            dst_type.sign     == 0 &&
            dst_type.norm     == 1 &&
            dst_type.width    == 8 &&
            dst_type.length   == 16 &&

            4 * num_dsts      == num_srcs &&

            util_cpu_caps.has_sse2)
    {
        struct lp_build_context bld;
        struct lp_type int16_type = dst_type;
        struct lp_type int32_type = dst_type;
        LLVMValueRef const_255f;
        unsigned i, j;

        lp_build_context_init(&bld, gallivm, src_type);

        int16_type.width *= 2;
        int16_type.length /= 2;
        int16_type.sign = 1;

        int32_type.width *= 4;
        int32_type.length /= 4;
        int32_type.sign = 1;

        const_255f = lp_build_const_vec(gallivm, src_type, 255.0f);

        for (i = 0; i < num_dsts; ++i, src += 4) {
            LLVMValueRef lo, hi;

            for (j = 0; j < 4; ++j) {
                tmp[j] = LLVMBuildFMul(builder, src[j], const_255f, "");
                tmp[j] = lp_build_iround(&bld, tmp[j]);
            }

            /* relying on clamping behavior of sse2 intrinsics here */
            lo = lp_build_pack2(gallivm, int32_type, int16_type, tmp[0], tmp[1]);
            hi = lp_build_pack2(gallivm, int32_type, int16_type, tmp[2], tmp[3]);
            dst[i] = lp_build_pack2(gallivm, int16_type, dst_type, lo, hi);
        }

        return;
    }

    /* Special case 2x8f --> 1x16ub
     */
    else if (src_type.floating == 1 &&
             src_type.fixed    == 0 &&
             src_type.sign     == 1 &&
             src_type.norm     == 0 &&
             src_type.width    == 32 &&
             src_type.length   == 8 &&

             dst_type.floating == 0 &&
             dst_type.fixed    == 0 &&
             dst_type.sign     == 0 &&
             dst_type.norm     == 1 &&
             dst_type.width    == 8 &&
             dst_type.length   == 16 &&

             2 * num_dsts      == num_srcs &&

             util_cpu_caps.has_avx) {

        struct lp_build_context bld;
        struct lp_type int16_type = dst_type;
        struct lp_type int32_type = dst_type;
        LLVMValueRef const_255f;
        unsigned i;

        lp_build_context_init(&bld, gallivm, src_type);

        int16_type.width *= 2;
        int16_type.length /= 2;
        int16_type.sign = 1;

        int32_type.width *= 4;
        int32_type.length /= 4;
        int32_type.sign = 1;

        const_255f = lp_build_const_vec(gallivm, src_type, 255.0f);

        for (i = 0; i < num_dsts; ++i, src += 2) {
            LLVMValueRef lo, hi, a, b;

            a = LLVMBuildFMul(builder, src[0], const_255f, "");
            b = LLVMBuildFMul(builder, src[1], const_255f, "");

            a = lp_build_iround(&bld, a);
            b = lp_build_iround(&bld, b);

            tmp[0] = lp_build_extract_range(gallivm, a, 0, 4);
            tmp[1] = lp_build_extract_range(gallivm, a, 4, 4);
            tmp[2] = lp_build_extract_range(gallivm, b, 0, 4);
            tmp[3] = lp_build_extract_range(gallivm, b, 4, 4);

            /* relying on clamping behavior of sse2 intrinsics here */
            lo = lp_build_pack2(gallivm, int32_type, int16_type, tmp[0], tmp[1]);
            hi = lp_build_pack2(gallivm, int32_type, int16_type, tmp[2], tmp[3]);
            dst[i] = lp_build_pack2(gallivm, int16_type, dst_type, lo, hi);
        }
        return;
    }

    /* Pre convert half-floats to floats
     */
    else if (src_type.floating && src_type.width == 16)
    {
        for(i = 0; i < num_tmps; ++i)
            tmp[i] = lp_build_half_to_float(gallivm, src_type, tmp[i]);

        tmp_type.width = 32;
    }

    /*
     * Clamp if necessary
     */

    if(memcmp(&src_type, &dst_type, sizeof src_type) != 0) {
        struct lp_build_context bld;
        double src_min = lp_const_min(src_type);
        double dst_min = lp_const_min(dst_type);
        double src_max = lp_const_max(src_type);
        double dst_max = lp_const_max(dst_type);
        LLVMValueRef thres;

        lp_build_context_init(&bld, gallivm, tmp_type);

        if(src_min < dst_min) {
            if(dst_min == 0.0)
                thres = bld.zero;
            else
                thres = lp_build_const_vec(gallivm, src_type, dst_min);
            for(i = 0; i < num_tmps; ++i)
                tmp[i] = lp_build_max(&bld, tmp[i], thres);
        }

        if(src_max > dst_max) {
            if(dst_max == 1.0)
                thres = bld.one;
            else
                thres = lp_build_const_vec(gallivm, src_type, dst_max);
            for(i = 0; i < num_tmps; ++i)
                tmp[i] = lp_build_min(&bld, tmp[i], thres);
        }
    }

    /*
     * Scale to the narrowest range
     */

    if(dst_type.floating) {
        /* Nothing to do */
    }
    else if(tmp_type.floating) {
        if(!dst_type.fixed && !dst_type.sign && dst_type.norm) {
            for(i = 0; i < num_tmps; ++i) {
                tmp[i] = lp_build_clamped_float_to_unsigned_norm(gallivm,
                         tmp_type,
                         dst_type.width,
                         tmp[i]);
            }
            tmp_type.floating = FALSE;
        }
        else {
            double dst_scale = lp_const_scale(dst_type);
            LLVMTypeRef tmp_vec_type;

            if (dst_scale != 1.0) {
                LLVMValueRef scale = lp_build_const_vec(gallivm, tmp_type, dst_scale);
                for(i = 0; i < num_tmps; ++i)
                    tmp[i] = LLVMBuildFMul(builder, tmp[i], scale, "");
            }

            /* Use an equally sized integer for intermediate computations */
            tmp_type.floating = FALSE;
            tmp_vec_type = lp_build_vec_type(gallivm, tmp_type);
            for(i = 0; i < num_tmps; ++i) {
#if 0
                if(dst_type.sign)
                    tmp[i] = LLVMBuildFPToSI(builder, tmp[i], tmp_vec_type, "");
                else
                    tmp[i] = LLVMBuildFPToUI(builder, tmp[i], tmp_vec_type, "");
#else
                /* FIXME: there is no SSE counterpart for LLVMBuildFPToUI */
                tmp[i] = LLVMBuildFPToSI(builder, tmp[i], tmp_vec_type, "");
#endif
            }
        }
    }
    else {
        unsigned src_shift = lp_const_shift(src_type);
        unsigned dst_shift = lp_const_shift(dst_type);
        unsigned src_offset = lp_const_offset(src_type);
        unsigned dst_offset = lp_const_offset(dst_type);

        /* Compensate for different offsets */
        if (dst_offset > src_offset && src_type.width > dst_type.width) {
            for (i = 0; i < num_tmps; ++i) {
                LLVMValueRef shifted;
                LLVMValueRef shift = lp_build_const_int_vec(gallivm, tmp_type, src_shift - 1);
                if(src_type.sign)
                    shifted = LLVMBuildAShr(builder, tmp[i], shift, "");
                else
                    shifted = LLVMBuildLShr(builder, tmp[i], shift, "");

                tmp[i] = LLVMBuildSub(builder, tmp[i], shifted, "");
            }
        }

        if(src_shift > dst_shift) {
            LLVMValueRef shift = lp_build_const_int_vec(gallivm, tmp_type,
                                 src_shift - dst_shift);
            for(i = 0; i < num_tmps; ++i)
                if(src_type.sign)
                    tmp[i] = LLVMBuildAShr(builder, tmp[i], shift, "");
                else
                    tmp[i] = LLVMBuildLShr(builder, tmp[i], shift, "");
        }
    }

    /*
     * Truncate or expand bit width
     *
     * No data conversion should happen here, although the sign bits are
     * crucial to avoid bad clamping.
     */

    {
        struct lp_type new_type;

        new_type = tmp_type;
        new_type.sign   = dst_type.sign;
        new_type.width  = dst_type.width;
        new_type.length = dst_type.length;

        lp_build_resize(gallivm, tmp_type, new_type, tmp, num_srcs, tmp, num_dsts);

        tmp_type = new_type;
        num_tmps = num_dsts;
    }

    /*
     * Scale to the widest range
     */

    if(src_type.floating) {
        /* Nothing to do */
    }
    else if(!src_type.floating && dst_type.floating) {
        if(!src_type.fixed && !src_type.sign && src_type.norm) {
            for(i = 0; i < num_tmps; ++i) {
                tmp[i] = lp_build_unsigned_norm_to_float(gallivm,
                         src_type.width,
                         dst_type,
                         tmp[i]);
            }
            tmp_type.floating = TRUE;
        }
        else {
            double src_scale = lp_const_scale(src_type);
            LLVMTypeRef tmp_vec_type;

            /* Use an equally sized integer for intermediate computations */
            tmp_type.floating = TRUE;
            tmp_type.sign = TRUE;
            tmp_vec_type = lp_build_vec_type(gallivm, tmp_type);
            for(i = 0; i < num_tmps; ++i) {
#if 0
                if(dst_type.sign)
                    tmp[i] = LLVMBuildSIToFP(builder, tmp[i], tmp_vec_type, "");
                else
                    tmp[i] = LLVMBuildUIToFP(builder, tmp[i], tmp_vec_type, "");
#else
                /* FIXME: there is no SSE counterpart for LLVMBuildUIToFP */
                tmp[i] = LLVMBuildSIToFP(builder, tmp[i], tmp_vec_type, "");
#endif
            }

            if (src_scale != 1.0) {
                LLVMValueRef scale = lp_build_const_vec(gallivm, tmp_type, 1.0/src_scale);
                for(i = 0; i < num_tmps; ++i)
                    tmp[i] = LLVMBuildFMul(builder, tmp[i], scale, "");
            }
        }
    }
    else {
        unsigned src_shift = lp_const_shift(src_type);
        unsigned dst_shift = lp_const_shift(dst_type);
        unsigned src_offset = lp_const_offset(src_type);
        unsigned dst_offset = lp_const_offset(dst_type);

        if (src_shift < dst_shift) {
            LLVMValueRef pre_shift[LP_MAX_VECTOR_LENGTH];
            LLVMValueRef shift = lp_build_const_int_vec(gallivm, tmp_type, dst_shift - src_shift);

            for (i = 0; i < num_tmps; ++i) {
                pre_shift[i] = tmp[i];
                tmp[i] = LLVMBuildShl(builder, tmp[i], shift, "");
            }

            /* Compensate for different offsets */
            if (dst_offset > src_offset) {
                for (i = 0; i < num_tmps; ++i) {
                    tmp[i] = LLVMBuildSub(builder, tmp[i], pre_shift[i], "");
                }
            }
        }
    }

    for(i = 0; i < num_dsts; ++i) {
        dst[i] = tmp[i];
        assert(lp_check_value(dst_type, dst[i]));
    }
}
Beispiel #9
0
/*
 * gen_fetch
 *
 * Generates a load operation for a fetch expression.
 */
static LLVMValueRef
gen_fetch (gencodectx_t gctx, expr_node_t *rhs, LLVMTypeRef neededtype)
{
    LLVMBuilderRef builder = gctx->curfn->builder;
    llvm_accinfo_t accinfo;
    LLVMValueRef addr, val;
    LLVMTypeRef type;
    int shifts_required = 0;
    int signext;

    // For field references with non-zero bit position, or with
    // non-CTCE size, we'll have to do bit shifting to extract
    // the field.
    addr = llvmgen_addr_expression(gctx, rhs, &accinfo);
    if (accinfo.posval != 0 || accinfo.sizeval != 0) {
        type = gctx->fullwordtype;
        if ((accinfo.flags & LLVMGEN_M_ACC_CONSTSIZ)) {
            accinfo.sizeval = LLVMConstInt(gctx->fullwordtype, accinfo.size, 0);
        }
        shifts_required = 1;
    } else if ((accinfo.flags & LLVMGEN_M_ACC_CONSTSIZ)) {
        if (accinfo.size == 0) {
            // XXX signal invalid size
            type = gctx->int1type;
        } else {
            type = LLVMIntTypeInContext(gctx->llvmctx, accinfo.size);
        }
    } else {
        type = gctx->fullwordtype;
    }
    signext = ((accinfo.flags & LLVMGEN_M_SEG_SIGNEXT) != 0);

    // If we're fetching from a register, there's no load intruction
    // required - EXCEPT if this was a scalar BIND, where the BIND

    if ((accinfo.segclass == LLVM_REG &&
        (accinfo.flags & LLVMGEN_M_SEG_DEREFED) == 0) &&
        (accinfo.flags & LLVMGEN_M_SEG_BINDPTR) == 0) {
        val = llvmgen_adjustval(gctx, addr, type, signext);
    } else {
        addr = llvmgen_adjustval(gctx, addr, LLVMPointerType(type, 0), 0);
        val = LLVMBuildLoad(builder, addr, llvmgen_temp(gctx));
        if ((accinfo.flags & LLVMGEN_M_SEG_VOLATILE) != 0) LLVMSetVolatile(val, 1);
    }
    if (shifts_required) {
        val = llvmgen_adjustval(gctx, val, gctx->fullwordtype, signext);
        if (signext) {
            val = LLVMBuildAShr(builder, val, accinfo.posval, llvmgen_temp(gctx));
        } else {
            val = LLVMBuildLShr(builder, val, accinfo.posval, llvmgen_temp(gctx));
        }

        if ((accinfo.flags & LLVMGEN_M_ACC_CONSTSIZ) != 0) {
            LLVMTypeRef trunctype = LLVMIntTypeInContext(gctx->llvmctx, accinfo.size);
            val = llvmgen_adjustval(gctx, val, trunctype, signext);
        } else {
            LLVMValueRef neg1 = LLVMConstAllOnes(gctx->fullwordtype);
            LLVMValueRef mask;

            mask = LLVMBuildShl(builder, neg1, accinfo.sizeval, llvmgen_temp(gctx));
            mask = LLVMBuildNeg(builder, mask, llvmgen_temp(gctx));
            val = LLVMBuildAnd(builder, val, mask, llvmgen_temp(gctx));
            if (signext) {
                val = LLVMBuildSExt(builder, val, gctx->fullwordtype, llvmgen_temp(gctx));
            }
        }
    }

    return llvmgen_adjustval(gctx, val, neededtype, signext);

} /* gen_fetch */
/**
 * Generic type conversion.
 *
 * TODO: Take a precision argument, or even better, add a new precision member
 * to the lp_type union.
 */
void
lp_build_conv(LLVMBuilderRef builder,
              struct lp_type src_type,
              struct lp_type dst_type,
              const LLVMValueRef *src, unsigned num_srcs,
              LLVMValueRef *dst, unsigned num_dsts)
{
   struct lp_type tmp_type;
   LLVMValueRef tmp[LP_MAX_VECTOR_LENGTH];
   unsigned num_tmps;
   unsigned i;

   /* We must not loose or gain channels. Only precision */
   assert(src_type.length * num_srcs == dst_type.length * num_dsts);

   assert(src_type.length <= LP_MAX_VECTOR_LENGTH);
   assert(dst_type.length <= LP_MAX_VECTOR_LENGTH);
   assert(num_srcs <= LP_MAX_VECTOR_LENGTH);
   assert(num_dsts <= LP_MAX_VECTOR_LENGTH);

   tmp_type = src_type;
   for(i = 0; i < num_srcs; ++i) {
      assert(lp_check_value(src_type, src[i]));
      tmp[i] = src[i];
   }
   num_tmps = num_srcs;

   /*
    * Clamp if necessary
    */

   if(memcmp(&src_type, &dst_type, sizeof src_type) != 0) {
      struct lp_build_context bld;
      double src_min = lp_const_min(src_type);
      double dst_min = lp_const_min(dst_type);
      double src_max = lp_const_max(src_type);
      double dst_max = lp_const_max(dst_type);
      LLVMValueRef thres;

      lp_build_context_init(&bld, builder, tmp_type);

      if(src_min < dst_min) {
         if(dst_min == 0.0)
            thres = bld.zero;
         else
            thres = lp_build_const_vec(src_type, dst_min);
         for(i = 0; i < num_tmps; ++i)
            tmp[i] = lp_build_max(&bld, tmp[i], thres);
      }

      if(src_max > dst_max) {
         if(dst_max == 1.0)
            thres = bld.one;
         else
            thres = lp_build_const_vec(src_type, dst_max);
         for(i = 0; i < num_tmps; ++i)
            tmp[i] = lp_build_min(&bld, tmp[i], thres);
      }
   }

   /*
    * Scale to the narrowest range
    */

   if(dst_type.floating) {
      /* Nothing to do */
   }
   else if(tmp_type.floating) {
      if(!dst_type.fixed && !dst_type.sign && dst_type.norm) {
         for(i = 0; i < num_tmps; ++i) {
            tmp[i] = lp_build_clamped_float_to_unsigned_norm(builder,
                                                             tmp_type,
                                                             dst_type.width,
                                                             tmp[i]);
         }
         tmp_type.floating = FALSE;
      }
      else {
         double dst_scale = lp_const_scale(dst_type);
         LLVMTypeRef tmp_vec_type;

         if (dst_scale != 1.0) {
            LLVMValueRef scale = lp_build_const_vec(tmp_type, dst_scale);
            for(i = 0; i < num_tmps; ++i)
               tmp[i] = LLVMBuildFMul(builder, tmp[i], scale, "");
         }

         /* Use an equally sized integer for intermediate computations */
         tmp_type.floating = FALSE;
         tmp_vec_type = lp_build_vec_type(tmp_type);
         for(i = 0; i < num_tmps; ++i) {
#if 0
            if(dst_type.sign)
               tmp[i] = LLVMBuildFPToSI(builder, tmp[i], tmp_vec_type, "");
            else
               tmp[i] = LLVMBuildFPToUI(builder, tmp[i], tmp_vec_type, "");
#else
           /* FIXME: there is no SSE counterpart for LLVMBuildFPToUI */
            tmp[i] = LLVMBuildFPToSI(builder, tmp[i], tmp_vec_type, "");
#endif
         }
      }
   }
   else {
      unsigned src_shift = lp_const_shift(src_type);
      unsigned dst_shift = lp_const_shift(dst_type);

      /* FIXME: compensate different offsets too */
      if(src_shift > dst_shift) {
         LLVMValueRef shift = lp_build_const_int_vec(tmp_type, src_shift - dst_shift);
         for(i = 0; i < num_tmps; ++i)
            if(src_type.sign)
               tmp[i] = LLVMBuildAShr(builder, tmp[i], shift, "");
            else
               tmp[i] = LLVMBuildLShr(builder, tmp[i], shift, "");
      }
   }

   /*
    * Truncate or expand bit width
    *
    * No data conversion should happen here, although the sign bits are
    * crucial to avoid bad clamping.
    */

   {
      struct lp_type new_type;

      new_type = tmp_type;
      new_type.sign   = dst_type.sign;
      new_type.width  = dst_type.width;
      new_type.length = dst_type.length;

      lp_build_resize(builder, tmp_type, new_type, tmp, num_srcs, tmp, num_dsts);

      tmp_type = new_type;
      num_tmps = num_dsts;
   }

   /*
    * Scale to the widest range
    */

   if(src_type.floating) {
      /* Nothing to do */
   }
   else if(!src_type.floating && dst_type.floating) {
      if(!src_type.fixed && !src_type.sign && src_type.norm) {
         for(i = 0; i < num_tmps; ++i) {
            tmp[i] = lp_build_unsigned_norm_to_float(builder,
                                                     src_type.width,
                                                     dst_type,
                                                     tmp[i]);
         }
         tmp_type.floating = TRUE;
      }
      else {
         double src_scale = lp_const_scale(src_type);
         LLVMTypeRef tmp_vec_type;

         /* Use an equally sized integer for intermediate computations */
         tmp_type.floating = TRUE;
         tmp_type.sign = TRUE;
         tmp_vec_type = lp_build_vec_type(tmp_type);
         for(i = 0; i < num_tmps; ++i) {
#if 0
            if(dst_type.sign)
               tmp[i] = LLVMBuildSIToFP(builder, tmp[i], tmp_vec_type, "");
            else
               tmp[i] = LLVMBuildUIToFP(builder, tmp[i], tmp_vec_type, "");
#else
            /* FIXME: there is no SSE counterpart for LLVMBuildUIToFP */
            tmp[i] = LLVMBuildSIToFP(builder, tmp[i], tmp_vec_type, "");
#endif
          }

          if (src_scale != 1.0) {
             LLVMValueRef scale = lp_build_const_vec(tmp_type, 1.0/src_scale);
             for(i = 0; i < num_tmps; ++i)
                tmp[i] = LLVMBuildFMul(builder, tmp[i], scale, "");
          }
      }
    }
    else {
       unsigned src_shift = lp_const_shift(src_type);
       unsigned dst_shift = lp_const_shift(dst_type);

       /* FIXME: compensate different offsets too */
       if(src_shift < dst_shift) {
          LLVMValueRef shift = lp_build_const_int_vec(tmp_type, dst_shift - src_shift);
          for(i = 0; i < num_tmps; ++i)
             tmp[i] = LLVMBuildShl(builder, tmp[i], shift, "");
       }
    }

   for(i = 0; i < num_dsts; ++i) {
      dst[i] = tmp[i];
      assert(lp_check_value(dst_type, dst[i]));
   }
}
Beispiel #11
0
/**
 * Sample a single texture image with (bi-)(tri-)linear sampling.
 * Return filtered color as two vectors of 16-bit fixed point values.
 */
static void
lp_build_sample_image_linear(struct lp_build_sample_context *bld,
                             LLVMValueRef int_size,
                             LLVMValueRef row_stride_vec,
                             LLVMValueRef img_stride_vec,
                             LLVMValueRef data_ptr,
                             LLVMValueRef s,
                             LLVMValueRef t,
                             LLVMValueRef r,
                             LLVMValueRef *colors_lo,
                             LLVMValueRef *colors_hi)
{
   const unsigned dims = bld->dims;
   LLVMBuilderRef builder = bld->gallivm->builder;
   struct lp_build_context i32, h16, u8n;
   LLVMTypeRef i32_vec_type, h16_vec_type, u8n_vec_type;
   LLVMValueRef i32_c8, i32_c128, i32_c255;
   LLVMValueRef width_vec, height_vec, depth_vec;
   LLVMValueRef s_ipart, s_fpart, s_fpart_lo, s_fpart_hi;
   LLVMValueRef t_ipart = NULL, t_fpart = NULL, t_fpart_lo = NULL, t_fpart_hi = NULL;
   LLVMValueRef r_ipart = NULL, r_fpart = NULL, r_fpart_lo = NULL, r_fpart_hi = NULL;
   LLVMValueRef x_stride, y_stride, z_stride;
   LLVMValueRef x_offset0, x_offset1;
   LLVMValueRef y_offset0, y_offset1;
   LLVMValueRef z_offset0, z_offset1;
   LLVMValueRef offset[2][2][2]; /* [z][y][x] */
   LLVMValueRef x_subcoord[2], y_subcoord[2], z_subcoord[2];
   LLVMValueRef neighbors_lo[2][2][2]; /* [z][y][x] */
   LLVMValueRef neighbors_hi[2][2][2]; /* [z][y][x] */
   LLVMValueRef packed_lo, packed_hi;
   unsigned x, y, z;
   unsigned i, j, k;
   unsigned numj, numk;

   lp_build_context_init(&i32, bld->gallivm, lp_type_int_vec(32));
   lp_build_context_init(&h16, bld->gallivm, lp_type_ufixed(16));
   lp_build_context_init(&u8n, bld->gallivm, lp_type_unorm(8));

   i32_vec_type = lp_build_vec_type(bld->gallivm, i32.type);
   h16_vec_type = lp_build_vec_type(bld->gallivm, h16.type);
   u8n_vec_type = lp_build_vec_type(bld->gallivm, u8n.type);

   lp_build_extract_image_sizes(bld,
                                bld->int_size_type,
                                bld->int_coord_type,
                                int_size,
                                &width_vec,
                                &height_vec,
                                &depth_vec);

   if (bld->static_state->normalized_coords) {
      LLVMValueRef scaled_size;
      LLVMValueRef flt_size;

      /* scale size by 256 (8 fractional bits) */
      scaled_size = lp_build_shl_imm(&bld->int_size_bld, int_size, 8);

      flt_size = lp_build_int_to_float(&bld->float_size_bld, scaled_size);

      lp_build_unnormalized_coords(bld, flt_size, &s, &t, &r);
   }
   else {
      /* scale coords by 256 (8 fractional bits) */
      s = lp_build_mul_imm(&bld->coord_bld, s, 256);
      if (dims >= 2)
         t = lp_build_mul_imm(&bld->coord_bld, t, 256);
      if (dims >= 3)
         r = lp_build_mul_imm(&bld->coord_bld, r, 256);
   }

   /* convert float to int */
   s = LLVMBuildFPToSI(builder, s, i32_vec_type, "");
   if (dims >= 2)
      t = LLVMBuildFPToSI(builder, t, i32_vec_type, "");
   if (dims >= 3)
      r = LLVMBuildFPToSI(builder, r, i32_vec_type, "");

   /* subtract 0.5 (add -128) */
   i32_c128 = lp_build_const_int_vec(bld->gallivm, i32.type, -128);
   s = LLVMBuildAdd(builder, s, i32_c128, "");
   if (dims >= 2) {
      t = LLVMBuildAdd(builder, t, i32_c128, "");
   }
   if (dims >= 3) {
      r = LLVMBuildAdd(builder, r, i32_c128, "");
   }

   /* compute floor (shift right 8) */
   i32_c8 = lp_build_const_int_vec(bld->gallivm, i32.type, 8);
   s_ipart = LLVMBuildAShr(builder, s, i32_c8, "");
   if (dims >= 2)
      t_ipart = LLVMBuildAShr(builder, t, i32_c8, "");
   if (dims >= 3)
      r_ipart = LLVMBuildAShr(builder, r, i32_c8, "");

   /* compute fractional part (AND with 0xff) */
   i32_c255 = lp_build_const_int_vec(bld->gallivm, i32.type, 255);
   s_fpart = LLVMBuildAnd(builder, s, i32_c255, "");
   if (dims >= 2)
      t_fpart = LLVMBuildAnd(builder, t, i32_c255, "");
   if (dims >= 3)
      r_fpart = LLVMBuildAnd(builder, r, i32_c255, "");

   /* get pixel, row and image strides */
   x_stride = lp_build_const_vec(bld->gallivm, bld->int_coord_bld.type,
                                 bld->format_desc->block.bits/8);
   y_stride = row_stride_vec;
   z_stride = img_stride_vec;

   /* do texcoord wrapping and compute texel offsets */
   lp_build_sample_wrap_linear_int(bld,
                                   bld->format_desc->block.width,
                                   s_ipart, width_vec, x_stride,
                                   bld->static_state->pot_width,
                                   bld->static_state->wrap_s,
                                   &x_offset0, &x_offset1,
                                   &x_subcoord[0], &x_subcoord[1]);
   for (z = 0; z < 2; z++) {
      for (y = 0; y < 2; y++) {
         offset[z][y][0] = x_offset0;
         offset[z][y][1] = x_offset1;
      }
   }

   if (dims >= 2) {
      lp_build_sample_wrap_linear_int(bld,
                                      bld->format_desc->block.height,
                                      t_ipart, height_vec, y_stride,
                                      bld->static_state->pot_height,
                                      bld->static_state->wrap_t,
                                      &y_offset0, &y_offset1,
                                      &y_subcoord[0], &y_subcoord[1]);

      for (z = 0; z < 2; z++) {
         for (x = 0; x < 2; x++) {
            offset[z][0][x] = lp_build_add(&bld->int_coord_bld,
                                           offset[z][0][x], y_offset0);
            offset[z][1][x] = lp_build_add(&bld->int_coord_bld,
                                           offset[z][1][x], y_offset1);
         }
      }
   }

   if (dims >= 3) {
      lp_build_sample_wrap_linear_int(bld,
                                      bld->format_desc->block.height,
                                      r_ipart, depth_vec, z_stride,
                                      bld->static_state->pot_depth,
                                      bld->static_state->wrap_r,
                                      &z_offset0, &z_offset1,
                                      &z_subcoord[0], &z_subcoord[1]);
      for (y = 0; y < 2; y++) {
         for (x = 0; x < 2; x++) {
            offset[0][y][x] = lp_build_add(&bld->int_coord_bld,
                                           offset[0][y][x], z_offset0);
            offset[1][y][x] = lp_build_add(&bld->int_coord_bld,
                                           offset[1][y][x], z_offset1);
         }
      }
   }
   else if (bld->static_state->target == PIPE_TEXTURE_CUBE) {
      LLVMValueRef z_offset;
      z_offset = lp_build_mul(&bld->int_coord_bld, r, img_stride_vec);
      for (y = 0; y < 2; y++) {
         for (x = 0; x < 2; x++) {
            /* The r coord is the cube face in [0,5] */
            offset[0][y][x] = lp_build_add(&bld->int_coord_bld,
                                           offset[0][y][x], z_offset);
         }
      }
   }

   /*
    * Transform 4 x i32 in
    *
    *   s_fpart = {s0, s1, s2, s3}
    *
    * into 8 x i16
    *
    *   s_fpart = {00, s0, 00, s1, 00, s2, 00, s3}
    *
    * into two 8 x i16
    *
    *   s_fpart_lo = {s0, s0, s0, s0, s1, s1, s1, s1}
    *   s_fpart_hi = {s2, s2, s2, s2, s3, s3, s3, s3}
    *
    * and likewise for t_fpart. There is no risk of loosing precision here
    * since the fractional parts only use the lower 8bits.
    */
   s_fpart = LLVMBuildBitCast(builder, s_fpart, h16_vec_type, "");
   if (dims >= 2)
      t_fpart = LLVMBuildBitCast(builder, t_fpart, h16_vec_type, "");
   if (dims >= 3)
      r_fpart = LLVMBuildBitCast(builder, r_fpart, h16_vec_type, "");

   {
      LLVMTypeRef elem_type = LLVMInt32TypeInContext(bld->gallivm->context);
      LLVMValueRef shuffles_lo[LP_MAX_VECTOR_LENGTH];
      LLVMValueRef shuffles_hi[LP_MAX_VECTOR_LENGTH];
      LLVMValueRef shuffle_lo;
      LLVMValueRef shuffle_hi;

      for (j = 0; j < h16.type.length; j += 4) {
#ifdef PIPE_ARCH_LITTLE_ENDIAN
         unsigned subindex = 0;
#else
         unsigned subindex = 1;
#endif
         LLVMValueRef index;

         index = LLVMConstInt(elem_type, j/2 + subindex, 0);
         for (i = 0; i < 4; ++i)
            shuffles_lo[j + i] = index;

         index = LLVMConstInt(elem_type, h16.type.length/2 + j/2 + subindex, 0);
         for (i = 0; i < 4; ++i)
            shuffles_hi[j + i] = index;
      }

      shuffle_lo = LLVMConstVector(shuffles_lo, h16.type.length);
      shuffle_hi = LLVMConstVector(shuffles_hi, h16.type.length);

      s_fpart_lo = LLVMBuildShuffleVector(builder, s_fpart, h16.undef,
                                          shuffle_lo, "");
      s_fpart_hi = LLVMBuildShuffleVector(builder, s_fpart, h16.undef,
                                          shuffle_hi, "");
      if (dims >= 2) {
         t_fpart_lo = LLVMBuildShuffleVector(builder, t_fpart, h16.undef,
                                             shuffle_lo, "");
         t_fpart_hi = LLVMBuildShuffleVector(builder, t_fpart, h16.undef,
                                             shuffle_hi, "");
      }
      if (dims >= 3) {
         r_fpart_lo = LLVMBuildShuffleVector(builder, r_fpart, h16.undef,
                                             shuffle_lo, "");
         r_fpart_hi = LLVMBuildShuffleVector(builder, r_fpart, h16.undef,
                                             shuffle_hi, "");
      }
   }

   /*
    * Fetch the pixels as 4 x 32bit (rgba order might differ):
    *
    *   rgba0 rgba1 rgba2 rgba3
    *
    * bit cast them into 16 x u8
    *
    *   r0 g0 b0 a0 r1 g1 b1 a1 r2 g2 b2 a2 r3 g3 b3 a3
    *
    * unpack them into two 8 x i16:
    *
    *   r0 g0 b0 a0 r1 g1 b1 a1
    *   r2 g2 b2 a2 r3 g3 b3 a3
    *
    * The higher 8 bits of the resulting elements will be zero.
    */
   numj = 1 + (dims >= 2);
   numk = 1 + (dims >= 3);

   for (k = 0; k < numk; k++) {
      for (j = 0; j < numj; j++) {
         for (i = 0; i < 2; i++) {
            LLVMValueRef rgba8;

            if (util_format_is_rgba8_variant(bld->format_desc)) {
               /*
                * Given the format is a rgba8, just read the pixels as is,
                * without any swizzling. Swizzling will be done later.
                */
               rgba8 = lp_build_gather(bld->gallivm,
                                       bld->texel_type.length,
                                       bld->format_desc->block.bits,
                                       bld->texel_type.width,
                                       data_ptr, offset[k][j][i]);

               rgba8 = LLVMBuildBitCast(builder, rgba8, u8n_vec_type, "");
            }
            else {
               rgba8 = lp_build_fetch_rgba_aos(bld->gallivm,
                                               bld->format_desc,
                                               u8n.type,
                                               data_ptr, offset[k][j][i],
                                               x_subcoord[i],
                                               y_subcoord[j]);
            }

            /* Expand one 4*rgba8 to two 2*rgba16 */
            lp_build_unpack2(bld->gallivm, u8n.type, h16.type,
                             rgba8,
                             &neighbors_lo[k][j][i], &neighbors_hi[k][j][i]);
         }
      }
   }

   /*
    * Linear interpolation with 8.8 fixed point.
    */
   if (dims == 1) {
      /* 1-D lerp */
      packed_lo = lp_build_lerp(&h16,
				s_fpart_lo,
				neighbors_lo[0][0][0],
				neighbors_lo[0][0][1]);

      packed_hi = lp_build_lerp(&h16,
				s_fpart_hi,
				neighbors_hi[0][0][0],
				neighbors_hi[0][0][1]);
   }
   else {
      /* 2-D lerp */
      packed_lo = lp_build_lerp_2d(&h16,
				   s_fpart_lo, t_fpart_lo,
				   neighbors_lo[0][0][0],
				   neighbors_lo[0][0][1],
				   neighbors_lo[0][1][0],
				   neighbors_lo[0][1][1]);

      packed_hi = lp_build_lerp_2d(&h16,
				   s_fpart_hi, t_fpart_hi,
				   neighbors_hi[0][0][0],
				   neighbors_hi[0][0][1],
				   neighbors_hi[0][1][0],
				   neighbors_hi[0][1][1]);

      if (dims >= 3) {
	 LLVMValueRef packed_lo2, packed_hi2;

	 /* lerp in the second z slice */
	 packed_lo2 = lp_build_lerp_2d(&h16,
				       s_fpart_lo, t_fpart_lo,
				       neighbors_lo[1][0][0],
				       neighbors_lo[1][0][1],
				       neighbors_lo[1][1][0],
				       neighbors_lo[1][1][1]);

	 packed_hi2 = lp_build_lerp_2d(&h16,
				       s_fpart_hi, t_fpart_hi,
				       neighbors_hi[1][0][0],
				       neighbors_hi[1][0][1],
				       neighbors_hi[1][1][0],
				       neighbors_hi[1][1][1]);
	 /* interp between two z slices */
	 packed_lo = lp_build_lerp(&h16, r_fpart_lo,
				   packed_lo, packed_lo2);
	 packed_hi = lp_build_lerp(&h16, r_fpart_hi,
				   packed_hi, packed_hi2);
      }
   }

   *colors_lo = packed_lo;
   *colors_hi = packed_hi;
}
Beispiel #12
0
/**
 * Sample a single texture image with nearest sampling.
 * If sampling a cube texture, r = cube face in [0,5].
 * Return filtered color as two vectors of 16-bit fixed point values.
 */
static void
lp_build_sample_image_nearest(struct lp_build_sample_context *bld,
                              LLVMValueRef int_size,
                              LLVMValueRef row_stride_vec,
                              LLVMValueRef img_stride_vec,
                              LLVMValueRef data_ptr,
                              LLVMValueRef s,
                              LLVMValueRef t,
                              LLVMValueRef r,
                              LLVMValueRef *colors_lo,
                              LLVMValueRef *colors_hi)
{
   const unsigned dims = bld->dims;
   LLVMBuilderRef builder = bld->gallivm->builder;
   struct lp_build_context i32, h16, u8n;
   LLVMTypeRef i32_vec_type, h16_vec_type, u8n_vec_type;
   LLVMValueRef i32_c8;
   LLVMValueRef width_vec, height_vec, depth_vec;
   LLVMValueRef s_ipart, t_ipart = NULL, r_ipart = NULL;
   LLVMValueRef x_stride;
   LLVMValueRef x_offset, offset;
   LLVMValueRef x_subcoord, y_subcoord, z_subcoord;

   lp_build_context_init(&i32, bld->gallivm, lp_type_int_vec(32));
   lp_build_context_init(&h16, bld->gallivm, lp_type_ufixed(16));
   lp_build_context_init(&u8n, bld->gallivm, lp_type_unorm(8));

   i32_vec_type = lp_build_vec_type(bld->gallivm, i32.type);
   h16_vec_type = lp_build_vec_type(bld->gallivm, h16.type);
   u8n_vec_type = lp_build_vec_type(bld->gallivm, u8n.type);

   lp_build_extract_image_sizes(bld,
                                bld->int_size_type,
                                bld->int_coord_type,
                                int_size,
                                &width_vec,
                                &height_vec,
                                &depth_vec);

   if (bld->static_state->normalized_coords) {
      LLVMValueRef scaled_size;
      LLVMValueRef flt_size;

      /* scale size by 256 (8 fractional bits) */
      scaled_size = lp_build_shl_imm(&bld->int_size_bld, int_size, 8);

      flt_size = lp_build_int_to_float(&bld->float_size_bld, scaled_size);

      lp_build_unnormalized_coords(bld, flt_size, &s, &t, &r);
   }
   else {
      /* scale coords by 256 (8 fractional bits) */
      s = lp_build_mul_imm(&bld->coord_bld, s, 256);
      if (dims >= 2)
         t = lp_build_mul_imm(&bld->coord_bld, t, 256);
      if (dims >= 3)
         r = lp_build_mul_imm(&bld->coord_bld, r, 256);
   }

   /* convert float to int */
   s = LLVMBuildFPToSI(builder, s, i32_vec_type, "");
   if (dims >= 2)
      t = LLVMBuildFPToSI(builder, t, i32_vec_type, "");
   if (dims >= 3)
      r = LLVMBuildFPToSI(builder, r, i32_vec_type, "");

   /* compute floor (shift right 8) */
   i32_c8 = lp_build_const_int_vec(bld->gallivm, i32.type, 8);
   s_ipart = LLVMBuildAShr(builder, s, i32_c8, "");
   if (dims >= 2)
      t_ipart = LLVMBuildAShr(builder, t, i32_c8, "");
   if (dims >= 3)
      r_ipart = LLVMBuildAShr(builder, r, i32_c8, "");

   /* get pixel, row, image strides */
   x_stride = lp_build_const_vec(bld->gallivm,
                                 bld->int_coord_bld.type,
                                 bld->format_desc->block.bits/8);

   /* Do texcoord wrapping, compute texel offset */
   lp_build_sample_wrap_nearest_int(bld,
                                    bld->format_desc->block.width,
                                    s_ipart, width_vec, x_stride,
                                    bld->static_state->pot_width,
                                    bld->static_state->wrap_s,
                                    &x_offset, &x_subcoord);
   offset = x_offset;
   if (dims >= 2) {
      LLVMValueRef y_offset;
      lp_build_sample_wrap_nearest_int(bld,
                                       bld->format_desc->block.height,
                                       t_ipart, height_vec, row_stride_vec,
                                       bld->static_state->pot_height,
                                       bld->static_state->wrap_t,
                                       &y_offset, &y_subcoord);
      offset = lp_build_add(&bld->int_coord_bld, offset, y_offset);
      if (dims >= 3) {
         LLVMValueRef z_offset;
         lp_build_sample_wrap_nearest_int(bld,
                                          1, /* block length (depth) */
                                          r_ipart, depth_vec, img_stride_vec,
                                          bld->static_state->pot_height,
                                          bld->static_state->wrap_r,
                                          &z_offset, &z_subcoord);
         offset = lp_build_add(&bld->int_coord_bld, offset, z_offset);
      }
      else if (bld->static_state->target == PIPE_TEXTURE_CUBE) {
         LLVMValueRef z_offset;
         /* The r coord is the cube face in [0,5] */
         z_offset = lp_build_mul(&bld->int_coord_bld, r, img_stride_vec);
         offset = lp_build_add(&bld->int_coord_bld, offset, z_offset);
      }
   }

   /*
    * Fetch the pixels as 4 x 32bit (rgba order might differ):
    *
    *   rgba0 rgba1 rgba2 rgba3
    *
    * bit cast them into 16 x u8
    *
    *   r0 g0 b0 a0 r1 g1 b1 a1 r2 g2 b2 a2 r3 g3 b3 a3
    *
    * unpack them into two 8 x i16:
    *
    *   r0 g0 b0 a0 r1 g1 b1 a1
    *   r2 g2 b2 a2 r3 g3 b3 a3
    *
    * The higher 8 bits of the resulting elements will be zero.
    */
   {
      LLVMValueRef rgba8;

      if (util_format_is_rgba8_variant(bld->format_desc)) {
         /*
          * Given the format is a rgba8, just read the pixels as is,
          * without any swizzling. Swizzling will be done later.
          */
         rgba8 = lp_build_gather(bld->gallivm,
                                 bld->texel_type.length,
                                 bld->format_desc->block.bits,
                                 bld->texel_type.width,
                                 data_ptr, offset);

         rgba8 = LLVMBuildBitCast(builder, rgba8, u8n_vec_type, "");
      }
      else {
         rgba8 = lp_build_fetch_rgba_aos(bld->gallivm,
                                         bld->format_desc,
                                         u8n.type,
                                         data_ptr, offset,
                                         x_subcoord,
                                         y_subcoord);
      }

      /* Expand one 4*rgba8 to two 2*rgba16 */
      lp_build_unpack2(bld->gallivm, u8n.type, h16.type,
                       rgba8,
                       colors_lo, colors_hi);
   }
}
static LLVMValueRef
lp_build_extract_soa_chan(struct lp_build_context *bld,
                          unsigned blockbits,
                          boolean srgb_chan,
                          struct util_format_channel_description chan_desc,
                          LLVMValueRef packed)
{
   struct gallivm_state *gallivm = bld->gallivm;
   LLVMBuilderRef builder = gallivm->builder;
   struct lp_type type = bld->type;
   LLVMValueRef input = packed;
   const unsigned width = chan_desc.size;
   const unsigned start = chan_desc.shift;
   const unsigned stop = start + width;

   /* Decode the input vector component */

   switch(chan_desc.type) {
   case UTIL_FORMAT_TYPE_VOID:
      input = bld->undef;
      break;

   case UTIL_FORMAT_TYPE_UNSIGNED:
      /*
       * Align the LSB
       */
      if (start) {
         input = LLVMBuildLShr(builder, input,
                               lp_build_const_int_vec(gallivm, type, start), "");
      }

      /*
       * Zero the MSBs
       */
      if (stop < blockbits) {
         unsigned mask = ((unsigned long long)1 << width) - 1;
         input = LLVMBuildAnd(builder, input,
                              lp_build_const_int_vec(gallivm, type, mask), "");
      }

      /*
       * Type conversion
       */
      if (type.floating) {
         if (srgb_chan) {
            struct lp_type conv_type = lp_uint_type(type);
            input = lp_build_srgb_to_linear(gallivm, conv_type, width, input);
         }
         else {
            if(chan_desc.normalized)
               input = lp_build_unsigned_norm_to_float(gallivm, width, type, input);
            else
               input = LLVMBuildSIToFP(builder, input, bld->vec_type, "");
         }
      }
      else if (chan_desc.pure_integer) {
         /* Nothing to do */
      } else {
          /* FIXME */
          assert(0);
      }
      break;

   case UTIL_FORMAT_TYPE_SIGNED:
      /*
       * Align the sign bit first.
       */
      if (stop < type.width) {
         unsigned bits = type.width - stop;
         LLVMValueRef bits_val = lp_build_const_int_vec(gallivm, type, bits);
         input = LLVMBuildShl(builder, input, bits_val, "");
      }

      /*
       * Align the LSB (with an arithmetic shift to preserve the sign)
       */
      if (chan_desc.size < type.width) {
         unsigned bits = type.width - chan_desc.size;
         LLVMValueRef bits_val = lp_build_const_int_vec(gallivm, type, bits);
         input = LLVMBuildAShr(builder, input, bits_val, "");
      }

      /*
       * Type conversion
       */
      if (type.floating) {
         input = LLVMBuildSIToFP(builder, input, bld->vec_type, "");
         if (chan_desc.normalized) {
            double scale = 1.0 / ((1 << (chan_desc.size - 1)) - 1);
            LLVMValueRef scale_val = lp_build_const_vec(gallivm, type, scale);
            input = LLVMBuildFMul(builder, input, scale_val, "");
            /*
             * The formula above will produce value below -1.0 for most negative
             * value but everything seems happy with that hence disable for now.
             */
            if (0)
               input = lp_build_max(bld, input,
                                    lp_build_const_vec(gallivm, type, -1.0f));
         }
      }
      else if (chan_desc.pure_integer) {
         /* Nothing to do */
      } else {
          /* FIXME */
          assert(0);
      }
      break;

   case UTIL_FORMAT_TYPE_FLOAT:
      if (type.floating) {
         if (chan_desc.size == 16) {
            struct lp_type f16i_type = type;
            f16i_type.width /= 2;
            f16i_type.floating = 0;
            if (start) {
               input = LLVMBuildLShr(builder, input,
                                     lp_build_const_int_vec(gallivm, type, start), "");
            }
            input = LLVMBuildTrunc(builder, input,
                                   lp_build_vec_type(gallivm, f16i_type), "");
            input = lp_build_half_to_float(gallivm, input);
         } else {
            assert(start == 0);
            assert(stop == 32);
            assert(type.width == 32);
         }
         input = LLVMBuildBitCast(builder, input, bld->vec_type, "");
      }
      else {
         /* FIXME */
         assert(0);
         input = bld->undef;
      }
      break;

   case UTIL_FORMAT_TYPE_FIXED:
      if (type.floating) {
         double scale = 1.0 / ((1 << (chan_desc.size/2)) - 1);
         LLVMValueRef scale_val = lp_build_const_vec(gallivm, type, scale);
         input = LLVMBuildSIToFP(builder, input, bld->vec_type, "");
         input = LLVMBuildFMul(builder, input, scale_val, "");
      }
      else {
         /* FIXME */
         assert(0);
         input = bld->undef;
      }
      break;

   default:
      assert(0);
      input = bld->undef;
      break;
   }

   return input;
}
Beispiel #14
0
/**
 * Generate a * b
 */
LLVMValueRef
lp_build_mul(struct lp_build_context *bld,
             LLVMValueRef a,
             LLVMValueRef b)
{
   const struct lp_type type = bld->type;
   LLVMValueRef shift;
   LLVMValueRef res;

   if(a == bld->zero)
      return bld->zero;
   if(a == bld->one)
      return b;
   if(b == bld->zero)
      return bld->zero;
   if(b == bld->one)
      return a;
   if(a == bld->undef || b == bld->undef)
      return bld->undef;

   if(!type.floating && !type.fixed && type.norm) {
      if(type.width == 8) {
         struct lp_type i16_type = lp_wider_type(type);
         LLVMValueRef al, ah, bl, bh, abl, abh, ab;

         lp_build_unpack2(bld->builder, type, i16_type, a, &al, &ah);
         lp_build_unpack2(bld->builder, type, i16_type, b, &bl, &bh);

         /* PMULLW, PSRLW, PADDW */
         abl = lp_build_mul_u8n(bld->builder, i16_type, al, bl);
         abh = lp_build_mul_u8n(bld->builder, i16_type, ah, bh);

         ab = lp_build_pack2(bld->builder, i16_type, type, abl, abh);
         
         return ab;
      }

      /* FIXME */
      assert(0);
   }

   if(type.fixed)
      shift = lp_build_int_const_scalar(type, type.width/2);
   else
      shift = NULL;

   if(LLVMIsConstant(a) && LLVMIsConstant(b)) {
      res =  LLVMConstMul(a, b);
      if(shift) {
         if(type.sign)
            res = LLVMConstAShr(res, shift);
         else
            res = LLVMConstLShr(res, shift);
      }
   }
   else {
      res = LLVMBuildMul(bld->builder, a, b, "");
      if(shift) {
         if(type.sign)
            res = LLVMBuildAShr(bld->builder, res, shift, "");
         else
            res = LLVMBuildLShr(bld->builder, res, shift, "");
      }
   }

   return res;
}