void SkRGB16BlitterBlitV_neon(uint16_t* device, int height, size_t deviceRB, unsigned scale, uint32_t src32) { if (height >= 8) { uint16_t* dst = device; // prepare constants uint16x8_t vdev = vdupq_n_u16(0); uint16x8_t vmaskq_g16 = vdupq_n_u16(SK_G16_MASK_IN_PLACE); uint16x8_t vmaskq_ng16 = vdupq_n_u16(~SK_G16_MASK_IN_PLACE); uint32x4_t vsrc32 = vdupq_n_u32(src32); uint32x4_t vscale5 = vdupq_n_u32((uint32_t)scale); while (height >= 8){ LOAD_LANE_16(vdev, 0) LOAD_LANE_16(vdev, 1) LOAD_LANE_16(vdev, 2) LOAD_LANE_16(vdev, 3) LOAD_LANE_16(vdev, 4) LOAD_LANE_16(vdev, 5) LOAD_LANE_16(vdev, 6) LOAD_LANE_16(vdev, 7) // Expand_rgb_16 uint16x8x2_t vdst = vzipq_u16((vdev & vmaskq_ng16), (vdev & vmaskq_g16)); uint32x4_t vdst32_lo = vmulq_u32(vreinterpretq_u32_u16(vdst.val[0]), vscale5); uint32x4_t vdst32_hi = vmulq_u32(vreinterpretq_u32_u16(vdst.val[1]), vscale5); // Compact_rgb_16 vdst32_lo = vaddq_u32(vdst32_lo, vsrc32); vdst32_hi = vaddq_u32(vdst32_hi, vsrc32); vdst32_lo = vshrq_n_u32(vdst32_lo, 5); vdst32_hi = vshrq_n_u32(vdst32_hi, 5); uint16x4_t vtmp_lo = vmovn_u32(vdst32_lo) & vget_low_u16(vmaskq_ng16); uint16x4_t vtmp_hi = vshrn_n_u32(vdst32_lo, 16) & vget_low_u16(vmaskq_g16); uint16x4_t vdst16_lo = vorr_u16(vtmp_lo, vtmp_hi); vtmp_lo = vmovn_u32(vdst32_hi) & vget_low_u16(vmaskq_ng16); vtmp_hi = vshrn_n_u32(vdst32_hi, 16) & vget_low_u16(vmaskq_g16); uint16x4_t vdst16_hi = vorr_u16(vtmp_lo, vtmp_hi); STORE_LANE_16(vdst16_lo, 0) STORE_LANE_16(vdst16_lo, 1) STORE_LANE_16(vdst16_lo, 2) STORE_LANE_16(vdst16_lo, 3) STORE_LANE_16(vdst16_hi, 0) STORE_LANE_16(vdst16_hi, 1) STORE_LANE_16(vdst16_hi, 2) STORE_LANE_16(vdst16_hi, 3) height -= 8; } } while (height != 0){ uint32_t dst32 = SkExpand_rgb_16(*device) * scale; *device = SkCompact_rgb_16((src32 + dst32) >> 5); device = (uint16_t*)((char*)device + deviceRB); height--; } }
static void neon_w32_split_4_32_multiply_region(gf_t *gf, uint32_t *src, uint32_t *dst, uint32_t *d_end, uint8_t btable[8][4][16], uint32_t val, int xor, int altmap) { int i, j; #ifdef ARCH_AARCH64 uint8x16_t tables[8][4]; #else uint8x8x2_t tables[8][4]; #endif uint32x4_t v0, v1, v2, v3, s0, s1, s2, s3; uint8x16_t p0, p1, p2, p3, si, mask1; uint16x8x2_t r0, r1; uint8x16x2_t q0, q1; for (i = 0; i < 8; i++) { for (j = 0; j < 4; j++) { #ifdef ARCH_AARCH64 tables[i][j] = vld1q_u8(btable[i][j]); #else tables[i][j].val[0] = vld1_u8(btable[i][j]); tables[i][j].val[1] = vld1_u8(btable[i][j] + 8); #endif } } mask1 = vdupq_n_u8(0xf); while (dst < d_end) { v0 = vld1q_u32(src); src += 4; v1 = vld1q_u32(src); src += 4; v2 = vld1q_u32(src); src += 4; v3 = vld1q_u32(src); src += 4; if (altmap) { q0.val[0] = vreinterpretq_u8_u32(v0); q0.val[1] = vreinterpretq_u8_u32(v1); q1.val[0] = vreinterpretq_u8_u32(v2); q1.val[1] = vreinterpretq_u8_u32(v3); } else { r0 = vtrnq_u16(vreinterpretq_u16_u32(v0), vreinterpretq_u16_u32(v2)); r1 = vtrnq_u16(vreinterpretq_u16_u32(v1), vreinterpretq_u16_u32(v3)); q0 = vtrnq_u8(vreinterpretq_u8_u16(r0.val[0]), vreinterpretq_u8_u16(r1.val[0])); q1 = vtrnq_u8(vreinterpretq_u8_u16(r0.val[1]), vreinterpretq_u8_u16(r1.val[1])); } si = vandq_u8(q0.val[0], mask1); p0 = vqtbl1q_u8(tables[0][0], si); p1 = vqtbl1q_u8(tables[0][1], si); p2 = vqtbl1q_u8(tables[0][2], si); p3 = vqtbl1q_u8(tables[0][3], si); si = vshrq_n_u8(q0.val[0], 4); p0 = veorq_u8(p0, vqtbl1q_u8(tables[1][0], si)); p1 = veorq_u8(p1, vqtbl1q_u8(tables[1][1], si)); p2 = veorq_u8(p2, vqtbl1q_u8(tables[1][2], si)); p3 = veorq_u8(p3, vqtbl1q_u8(tables[1][3], si)); si = vandq_u8(q0.val[1], mask1); p0 = veorq_u8(p0, vqtbl1q_u8(tables[2][0], si)); p1 = veorq_u8(p1, vqtbl1q_u8(tables[2][1], si)); p2 = veorq_u8(p2, vqtbl1q_u8(tables[2][2], si)); p3 = veorq_u8(p3, vqtbl1q_u8(tables[2][3], si)); si = vshrq_n_u8(q0.val[1], 4); p0 = veorq_u8(p0, vqtbl1q_u8(tables[3][0], si)); p1 = veorq_u8(p1, vqtbl1q_u8(tables[3][1], si)); p2 = veorq_u8(p2, vqtbl1q_u8(tables[3][2], si)); p3 = veorq_u8(p3, vqtbl1q_u8(tables[3][3], si)); si = vandq_u8(q1.val[0], mask1); p0 = veorq_u8(p0, vqtbl1q_u8(tables[4][0], si)); p1 = veorq_u8(p1, vqtbl1q_u8(tables[4][1], si)); p2 = veorq_u8(p2, vqtbl1q_u8(tables[4][2], si)); p3 = veorq_u8(p3, vqtbl1q_u8(tables[4][3], si)); si = vshrq_n_u8(q1.val[0], 4); p0 = veorq_u8(p0, vqtbl1q_u8(tables[5][0], si)); p1 = veorq_u8(p1, vqtbl1q_u8(tables[5][1], si)); p2 = veorq_u8(p2, vqtbl1q_u8(tables[5][2], si)); p3 = veorq_u8(p3, vqtbl1q_u8(tables[5][3], si)); si = vandq_u8(q1.val[1], mask1); p0 = veorq_u8(p0, vqtbl1q_u8(tables[6][0], si)); p1 = veorq_u8(p1, vqtbl1q_u8(tables[6][1], si)); p2 = veorq_u8(p2, vqtbl1q_u8(tables[6][2], si)); p3 = veorq_u8(p3, vqtbl1q_u8(tables[6][3], si)); si = vshrq_n_u8(q1.val[1], 4); p0 = veorq_u8(p0, vqtbl1q_u8(tables[7][0], si)); p1 = veorq_u8(p1, vqtbl1q_u8(tables[7][1], si)); p2 = veorq_u8(p2, vqtbl1q_u8(tables[7][2], si)); p3 = veorq_u8(p3, vqtbl1q_u8(tables[7][3], si)); if (altmap) { s0 = vreinterpretq_u32_u8(p0); s1 = vreinterpretq_u32_u8(p1); s2 = vreinterpretq_u32_u8(p2); s3 = vreinterpretq_u32_u8(p3); } else { q0 = vtrnq_u8(p0, p1); q1 = vtrnq_u8(p2, p3); r0 = vtrnq_u16(vreinterpretq_u16_u8(q0.val[0]), vreinterpretq_u16_u8(q1.val[0])); r1 = vtrnq_u16(vreinterpretq_u16_u8(q0.val[1]), vreinterpretq_u16_u8(q1.val[1])); s0 = vreinterpretq_u32_u16(r0.val[0]); s1 = vreinterpretq_u32_u16(r1.val[0]); s2 = vreinterpretq_u32_u16(r0.val[1]); s3 = vreinterpretq_u32_u16(r1.val[1]); } if (xor) { v0 = vld1q_u32(dst); v1 = vld1q_u32(dst + 4); v2 = vld1q_u32(dst + 8); v3 = vld1q_u32(dst + 12); s0 = veorq_u32(s0, v0); s1 = veorq_u32(s1, v1); s2 = veorq_u32(s2, v2); s3 = veorq_u32(s3, v3); } vst1q_u32(dst, s0); vst1q_u32(dst + 4, s1); vst1q_u32(dst + 8, s2); vst1q_u32(dst + 12, s3); dst += 16; } }