static void vmx128_combine_atop_ca (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { int i; __vector4 vdest, vsrc, vmask, vsrca; __vector4 tmp1, tmp2, tmp3, tmp4, edges, dest_mask, mask_mask, src_mask, store_mask; COMPUTE_SHIFT_MASKC (dest, src, mask); /* printf ("%s\n",__PRETTY_FUNCTION__); */ for (i = width / 4; i > 0; i--) { LOAD_VECTORSC (dest, src, mask); vsrca = splat_alpha (vsrc); vsrc = pix_multiply (vsrc, vmask); vmask = pix_multiply (vmask, vsrca); vdest = pix_add_mul (vsrc, splat_alpha (vdest), negate (vmask), vdest); STORE_VECTOR (dest); src += 4; dest += 4; mask += 4; } for (i = width % 4; --i >= 0;) { uint32_t a = mask[i]; uint32_t s = src[i]; uint32_t d = dest[i]; uint32_t sa = ALPHA_8 (s); uint32_t da = ALPHA_8 (d); UN8x4_MUL_UN8x4 (s, a); UN8x4_MUL_UN8 (a, sa); UN8x4_MUL_UN8x4_ADD_UN8x4_MUL_UN8 (d, ~a, s, da); dest[i] = d; } }
static void vmx_combine_out_reverse_ca (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { int i; vector unsigned int vdest, vsrc, vmask; vector unsigned char tmp1, tmp2, tmp3, tmp4, edges, dest_mask, mask_mask, src_mask, store_mask; COMPUTE_SHIFT_MASKC (dest, src, mask); /* printf ("%s\n",__PRETTY_FUNCTION__); */ for (i = width / 4; i > 0; i--) { LOAD_VECTORSC (dest, src, mask); vdest = pix_multiply ( vdest, negate (pix_multiply (vmask, splat_alpha (vsrc)))); STORE_VECTOR (dest); src += 4; dest += 4; mask += 4; } for (i = width % 4; --i >= 0;) { uint32_t a = mask[i]; uint32_t s = src[i]; uint32_t d = dest[i]; uint32_t sa = ALPHA_8 (s); UN8x4_MUL_UN8 (a, sa); UN8x4_MUL_UN8x4 (d, ~a); dest[i] = d; } }
static void vmx128_combine_over_reverse_ca (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { int i; __vector4 vdest, vsrc, vmask; __vector4 tmp1, tmp2, tmp3, tmp4, edges, dest_mask, mask_mask, src_mask, store_mask; COMPUTE_SHIFT_MASKC (dest, src, mask); /* printf("%s\n",__PRETTY_FUNCTION__); */ for (i = width / 4; i > 0; i--) { LOAD_VECTORSC (dest, src, mask); vdest = over (vdest, splat_alpha (vdest), pix_multiply (vsrc, vmask)); STORE_VECTOR (dest); mask += 4; src += 4; dest += 4; } for (i = width % 4; --i >= 0;) { uint32_t a = mask[i]; uint32_t s = src[i]; uint32_t d = dest[i]; uint32_t ida = ALPHA_8 (~d); UN8x4_MUL_UN8x4 (s, a); UN8x4_MUL_UN8_ADD_UN8x4 (s, ida, d); dest[i] = s; } }
static void vmx_combine_add_ca (pixman_implementation_t *imp, pixman_op_t op, uint32_t * dest, const uint32_t * src, const uint32_t * mask, int width) { int i; vector unsigned int vdest, vsrc, vmask; vector unsigned char tmp1, tmp2, tmp3, tmp4, edges, dest_mask, mask_mask, src_mask, store_mask; COMPUTE_SHIFT_MASKC (dest, src, mask); /* printf ("%s\n",__PRETTY_FUNCTION__); */ for (i = width / 4; i > 0; i--) { LOAD_VECTORSC (dest, src, mask); vdest = pix_add (pix_multiply (vsrc, vmask), vdest); STORE_VECTOR (dest); src += 4; dest += 4; mask += 4; } for (i = width % 4; --i >= 0;) { uint32_t a = mask[i]; uint32_t s = src[i]; uint32_t d = dest[i]; UN8x4_MUL_UN8x4 (s, a); UN8x4_ADD_UN8x4 (s, d); dest[i] = s; } }