void test_vaddQu8 (void) { uint8x16_t out_uint8x16_t; uint8x16_t arg0_uint8x16_t; uint8x16_t arg1_uint8x16_t; out_uint8x16_t = vaddq_u8 (arg0_uint8x16_t, arg1_uint8x16_t); }
void add3 (uint8x16_t *data) { /* Set each sixteen values of the vector to 3. * * Remark: a 'q' suffix to intrinsics indicates * the instruction run for 128 bits registers. */ uint8x16_t three = vmovq_n_u8 (3); /* Add 3 to the value given in argument. */ *data = vaddq_u8 (*data, three); }
static void AddGreenToBlueAndRed(uint32_t* argb_data, int num_pixels) { const uint32_t* const end = argb_data + (num_pixels & ~3); const uint8x8_t shuffle = vld1_u8(kGreenShuffle); for (; argb_data < end; argb_data += 4) { const uint8x16_t argb = vld1q_u8((uint8_t*)argb_data); const uint8x16_t greens = vcombine_u8(vtbl1_u8(vget_low_u8(argb), shuffle), vtbl1_u8(vget_high_u8(argb), shuffle)); vst1q_u8((uint8_t*)argb_data, vaddq_u8(argb, greens)); } // fallthrough and finish off with plain-C VP8LAddGreenToBlueAndRed_C(argb_data, num_pixels & 3); }
void ar_vadd_u8_neon(uint8_t* res, const uint8_t* a, const uint8_t* b, uint32_t n) { uint8x16_t a_loaded; uint8x16_t b_loaded; uint8x16_t res_loaded; for (uint32_t i = 0; i < n; i += 16) { a_loaded = vld1q_u8(&(a[i])); b_loaded = vld1q_u8(&(b[i])); res_loaded = vaddq_u8(a_loaded, b_loaded); vst1q_u8(&(res[i]),res_loaded); } }
void png_read_filter_row_up_neon(png_row_infop row_info, png_bytep row, png_const_bytep prev_row) { png_bytep rp = row; png_bytep rp_stop = row + row_info->rowbytes; png_const_bytep pp = prev_row; for (; rp < rp_stop; rp += 16, pp += 16) { uint8x16_t qrp, qpp; qrp = vld1q_u8(rp); qpp = vld1q_u8(pp); qrp = vaddq_u8(qrp, qpp); vst1q_u8(rp, qrp); } }
uint64_t popcnt_neon_vcnt(const uint8_t* data, const size_t size) { const size_t chunk_size = 16 * 4 * 2; uint8_t* ptr = const_cast<uint8_t*>(data); const size_t n = size / chunk_size; const size_t k = size % chunk_size; uint32x4_t sum = vcombine_u32(vcreate_u32(0), vcreate_u32(0)); for (size_t i=0; i < n; i++, ptr += chunk_size) { uint8x16x4_t input0 = vld4q_u8(ptr + 0 * 16 * 4); uint8x16x4_t input1 = vld4q_u8(ptr + 1 * 16 * 4); uint8x16_t t0 = vcntq_u8(input0.val[0]); t0 = vaddq_u8(t0, vcntq_u8(input0.val[1])); t0 = vaddq_u8(t0, vcntq_u8(input0.val[2])); t0 = vaddq_u8(t0, vcntq_u8(input0.val[3])); t0 = vaddq_u8(t0, vcntq_u8(input1.val[0])); t0 = vaddq_u8(t0, vcntq_u8(input1.val[1])); t0 = vaddq_u8(t0, vcntq_u8(input1.val[2])); t0 = vaddq_u8(t0, vcntq_u8(input1.val[3])); const uint16x8_t t1 = vpaddlq_u8(t0); sum = vpadalq_u16(sum, t1); } uint32_t scalar = 0; uint32_t tmp[4]; vst1q_u32(tmp, sum); for (int i=0; i < 4; i++) { scalar += tmp[i]; } for (size_t j=0; j < k; j++) { scalar += lookup8bit[ptr[j]]; } return scalar; }
/* u8x16 add */ void mw_neon_mm_add_u8x16(unsigned char * A, int Row, int Col, unsigned char * B, unsigned char * C) { uint8x16_t neon_a, neon_b, neon_c; int size = Row * Col; int i = 0; int k = 0; for (i = 16; i <= size ; i+=16) { k = i - 16; neon_a = vld1q_u8(A + k); neon_b = vld1q_u8(B + k); neon_c = vaddq_u8(neon_a, neon_b); vst1q_u8(C + k, neon_c); } k = i - 16; for (i = 0; i < size % 16; i++) { C[k + i] = A[k + i] + B[k + i]; } }
int vp8_denoiser_filter_neon(YV12_BUFFER_CONFIG *mc_running_avg, YV12_BUFFER_CONFIG *running_avg, MACROBLOCK *signal, unsigned int motion_magnitude, int y_offset, int uv_offset) { /* If motion_magnitude is small, making the denoiser more aggressive by * increasing the adjustment for each level, level1 adjustment is * increased, the deltas stay the same. */ const uint8x16_t v_level1_adjustment = vdupq_n_u8( (motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ? 4 : 3); const uint8x16_t v_delta_level_1_and_2 = vdupq_n_u8(1); const uint8x16_t v_delta_level_2_and_3 = vdupq_n_u8(2); const uint8x16_t v_level1_threshold = vdupq_n_u8(4); const uint8x16_t v_level2_threshold = vdupq_n_u8(8); const uint8x16_t v_level3_threshold = vdupq_n_u8(16); /* Local variables for array pointers and strides. */ unsigned char *sig = signal->thismb; int sig_stride = 16; unsigned char *mc_running_avg_y = mc_running_avg->y_buffer + y_offset; int mc_running_avg_y_stride = mc_running_avg->y_stride; unsigned char *running_avg_y = running_avg->y_buffer + y_offset; int running_avg_y_stride = running_avg->y_stride; /* Go over lines. */ int i; int sum_diff = 0; for (i = 0; i < 16; ++i) { int8x16_t v_sum_diff = vdupq_n_s8(0); uint8x16_t v_running_avg_y; /* Load inputs. */ const uint8x16_t v_sig = vld1q_u8(sig); const uint8x16_t v_mc_running_avg_y = vld1q_u8(mc_running_avg_y); /* Calculate absolute difference and sign masks. */ const uint8x16_t v_abs_diff = vabdq_u8(v_sig, v_mc_running_avg_y); const uint8x16_t v_diff_pos_mask = vcltq_u8(v_sig, v_mc_running_avg_y); const uint8x16_t v_diff_neg_mask = vcgtq_u8(v_sig, v_mc_running_avg_y); /* Figure out which level that put us in. */ const uint8x16_t v_level1_mask = vcleq_u8(v_level1_threshold, v_abs_diff); const uint8x16_t v_level2_mask = vcleq_u8(v_level2_threshold, v_abs_diff); const uint8x16_t v_level3_mask = vcleq_u8(v_level3_threshold, v_abs_diff); /* Calculate absolute adjustments for level 1, 2 and 3. */ const uint8x16_t v_level2_adjustment = vandq_u8(v_level2_mask, v_delta_level_1_and_2); const uint8x16_t v_level3_adjustment = vandq_u8(v_level3_mask, v_delta_level_2_and_3); const uint8x16_t v_level1and2_adjustment = vaddq_u8(v_level1_adjustment, v_level2_adjustment); const uint8x16_t v_level1and2and3_adjustment = vaddq_u8( v_level1and2_adjustment, v_level3_adjustment); /* Figure adjustment absolute value by selecting between the absolute * difference if in level0 or the value for level 1, 2 and 3. */ const uint8x16_t v_abs_adjustment = vbslq_u8(v_level1_mask, v_level1and2and3_adjustment, v_abs_diff); /* Calculate positive and negative adjustments. Apply them to the signal * and accumulate them. Adjustments are less than eight and the maximum * sum of them (7 * 16) can fit in a signed char. */ const uint8x16_t v_pos_adjustment = vandq_u8(v_diff_pos_mask, v_abs_adjustment); const uint8x16_t v_neg_adjustment = vandq_u8(v_diff_neg_mask, v_abs_adjustment); v_running_avg_y = vqaddq_u8(v_sig, v_pos_adjustment); v_running_avg_y = vqsubq_u8(v_running_avg_y, v_neg_adjustment); v_sum_diff = vqaddq_s8(v_sum_diff, vreinterpretq_s8_u8(v_pos_adjustment)); v_sum_diff = vqsubq_s8(v_sum_diff, vreinterpretq_s8_u8(v_neg_adjustment)); /* Store results. */ vst1q_u8(running_avg_y, v_running_avg_y); /* Sum all the accumulators to have the sum of all pixel differences * for this macroblock. */ { int s0 = vgetq_lane_s8(v_sum_diff, 0) + vgetq_lane_s8(v_sum_diff, 1) + vgetq_lane_s8(v_sum_diff, 2) + vgetq_lane_s8(v_sum_diff, 3); int s1 = vgetq_lane_s8(v_sum_diff, 4) + vgetq_lane_s8(v_sum_diff, 5) + vgetq_lane_s8(v_sum_diff, 6) + vgetq_lane_s8(v_sum_diff, 7); int s2 = vgetq_lane_s8(v_sum_diff, 8) + vgetq_lane_s8(v_sum_diff, 9) + vgetq_lane_s8(v_sum_diff, 10) + vgetq_lane_s8(v_sum_diff, 11); int s3 = vgetq_lane_s8(v_sum_diff, 12) + vgetq_lane_s8(v_sum_diff, 13) + vgetq_lane_s8(v_sum_diff, 14) + vgetq_lane_s8(v_sum_diff, 15); sum_diff += s0 + s1+ s2 + s3; } /* Update pointers for next iteration. */ sig += sig_stride; mc_running_avg_y += mc_running_avg_y_stride; running_avg_y += running_avg_y_stride; } /* Too much adjustments => copy block. */ if (abs(sum_diff) > SUM_DIFF_THRESHOLD) return COPY_BLOCK; /* Tell above level that block was filtered. */ vp8_copy_mem16x16(running_avg->y_buffer + y_offset, running_avg_y_stride, signal->thismb, sig_stride); return FILTER_BLOCK; }
int vp8_denoiser_filter_neon(unsigned char *mc_running_avg_y, int mc_running_avg_y_stride, unsigned char *running_avg_y, int running_avg_y_stride, unsigned char *sig, int sig_stride, unsigned int motion_magnitude, int increase_denoising) { /* If motion_magnitude is small, making the denoiser more aggressive by * increasing the adjustment for each level, level1 adjustment is * increased, the deltas stay the same. */ int shift_inc = (increase_denoising && motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ? 1 : 0; const uint8x16_t v_level1_adjustment = vmovq_n_u8( (motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ? 4 + shift_inc : 3); const uint8x16_t v_delta_level_1_and_2 = vdupq_n_u8(1); const uint8x16_t v_delta_level_2_and_3 = vdupq_n_u8(2); const uint8x16_t v_level1_threshold = vmovq_n_u8(4 + shift_inc); const uint8x16_t v_level2_threshold = vdupq_n_u8(8); const uint8x16_t v_level3_threshold = vdupq_n_u8(16); int64x2_t v_sum_diff_total = vdupq_n_s64(0); /* Go over lines. */ int r; for (r = 0; r < 16; ++r) { /* Load inputs. */ const uint8x16_t v_sig = vld1q_u8(sig); const uint8x16_t v_mc_running_avg_y = vld1q_u8(mc_running_avg_y); /* Calculate absolute difference and sign masks. */ const uint8x16_t v_abs_diff = vabdq_u8(v_sig, v_mc_running_avg_y); const uint8x16_t v_diff_pos_mask = vcltq_u8(v_sig, v_mc_running_avg_y); const uint8x16_t v_diff_neg_mask = vcgtq_u8(v_sig, v_mc_running_avg_y); /* Figure out which level that put us in. */ const uint8x16_t v_level1_mask = vcleq_u8(v_level1_threshold, v_abs_diff); const uint8x16_t v_level2_mask = vcleq_u8(v_level2_threshold, v_abs_diff); const uint8x16_t v_level3_mask = vcleq_u8(v_level3_threshold, v_abs_diff); /* Calculate absolute adjustments for level 1, 2 and 3. */ const uint8x16_t v_level2_adjustment = vandq_u8(v_level2_mask, v_delta_level_1_and_2); const uint8x16_t v_level3_adjustment = vandq_u8(v_level3_mask, v_delta_level_2_and_3); const uint8x16_t v_level1and2_adjustment = vaddq_u8(v_level1_adjustment, v_level2_adjustment); const uint8x16_t v_level1and2and3_adjustment = vaddq_u8( v_level1and2_adjustment, v_level3_adjustment); /* Figure adjustment absolute value by selecting between the absolute * difference if in level0 or the value for level 1, 2 and 3. */ const uint8x16_t v_abs_adjustment = vbslq_u8(v_level1_mask, v_level1and2and3_adjustment, v_abs_diff); /* Calculate positive and negative adjustments. Apply them to the signal * and accumulate them. Adjustments are less than eight and the maximum * sum of them (7 * 16) can fit in a signed char. */ const uint8x16_t v_pos_adjustment = vandq_u8(v_diff_pos_mask, v_abs_adjustment); const uint8x16_t v_neg_adjustment = vandq_u8(v_diff_neg_mask, v_abs_adjustment); uint8x16_t v_running_avg_y = vqaddq_u8(v_sig, v_pos_adjustment); v_running_avg_y = vqsubq_u8(v_running_avg_y, v_neg_adjustment); /* Store results. */ vst1q_u8(running_avg_y, v_running_avg_y); /* Sum all the accumulators to have the sum of all pixel differences * for this macroblock. */ { const int8x16_t v_sum_diff = vqsubq_s8(vreinterpretq_s8_u8(v_pos_adjustment), vreinterpretq_s8_u8(v_neg_adjustment)); const int16x8_t fe_dc_ba_98_76_54_32_10 = vpaddlq_s8(v_sum_diff); const int32x4_t fedc_ba98_7654_3210 = vpaddlq_s16(fe_dc_ba_98_76_54_32_10); const int64x2_t fedcba98_76543210 = vpaddlq_s32(fedc_ba98_7654_3210); v_sum_diff_total = vqaddq_s64(v_sum_diff_total, fedcba98_76543210); } /* Update pointers for next iteration. */ sig += sig_stride; mc_running_avg_y += mc_running_avg_y_stride; running_avg_y += running_avg_y_stride; } /* Too much adjustments => copy block. */ { int64x1_t x = vqadd_s64(vget_high_s64(v_sum_diff_total), vget_low_s64(v_sum_diff_total)); int sum_diff = vget_lane_s32(vabs_s32(vreinterpret_s32_s64(x)), 0); int sum_diff_thresh = SUM_DIFF_THRESHOLD; if (increase_denoising) sum_diff_thresh = SUM_DIFF_THRESHOLD_HIGH; if (sum_diff > sum_diff_thresh) { // Before returning to copy the block (i.e., apply no denoising), // checK if we can still apply some (weaker) temporal filtering to // this block, that would otherwise not be denoised at all. Simplest // is to apply an additional adjustment to running_avg_y to bring it // closer to sig. The adjustment is capped by a maximum delta, and // chosen such that in most cases the resulting sum_diff will be // within the accceptable range given by sum_diff_thresh. // The delta is set by the excess of absolute pixel diff over the // threshold. int delta = ((sum_diff - sum_diff_thresh) >> 8) + 1; // Only apply the adjustment for max delta up to 3. if (delta < 4) { const uint8x16_t k_delta = vmovq_n_u8(delta); sig -= sig_stride * 16; mc_running_avg_y -= mc_running_avg_y_stride * 16; running_avg_y -= running_avg_y_stride * 16; for (r = 0; r < 16; ++r) { uint8x16_t v_running_avg_y = vld1q_u8(running_avg_y); const uint8x16_t v_sig = vld1q_u8(sig); const uint8x16_t v_mc_running_avg_y = vld1q_u8(mc_running_avg_y); /* Calculate absolute difference and sign masks. */ const uint8x16_t v_abs_diff = vabdq_u8(v_sig, v_mc_running_avg_y); const uint8x16_t v_diff_pos_mask = vcltq_u8(v_sig, v_mc_running_avg_y); const uint8x16_t v_diff_neg_mask = vcgtq_u8(v_sig, v_mc_running_avg_y); // Clamp absolute difference to delta to get the adjustment. const uint8x16_t v_abs_adjustment = vminq_u8(v_abs_diff, (k_delta)); const uint8x16_t v_pos_adjustment = vandq_u8(v_diff_pos_mask, v_abs_adjustment); const uint8x16_t v_neg_adjustment = vandq_u8(v_diff_neg_mask, v_abs_adjustment); v_running_avg_y = vqsubq_u8(v_running_avg_y, v_pos_adjustment); v_running_avg_y = vqaddq_u8(v_running_avg_y, v_neg_adjustment); /* Store results. */ vst1q_u8(running_avg_y, v_running_avg_y); { const int8x16_t v_sum_diff = vqsubq_s8(vreinterpretq_s8_u8(v_neg_adjustment), vreinterpretq_s8_u8(v_pos_adjustment)); const int16x8_t fe_dc_ba_98_76_54_32_10 = vpaddlq_s8(v_sum_diff); const int32x4_t fedc_ba98_7654_3210 = vpaddlq_s16(fe_dc_ba_98_76_54_32_10); const int64x2_t fedcba98_76543210 = vpaddlq_s32(fedc_ba98_7654_3210); v_sum_diff_total = vqaddq_s64(v_sum_diff_total, fedcba98_76543210); } /* Update pointers for next iteration. */ sig += sig_stride; mc_running_avg_y += mc_running_avg_y_stride; running_avg_y += running_avg_y_stride; } { // Update the sum of all pixel differences of this MB. x = vqadd_s64(vget_high_s64(v_sum_diff_total), vget_low_s64(v_sum_diff_total)); sum_diff = vget_lane_s32(vabs_s32(vreinterpret_s32_s64(x)), 0); if (sum_diff > sum_diff_thresh) { return COPY_BLOCK; } } } else { return COPY_BLOCK; } } }
/* u8x16 mv mul */ void mw_neon_mv_mul_u8x16(unsigned char * A, int Row, int T, unsigned char * B, unsigned char * C) { int i = 0; int k = 0; uint8x16_t neon_b, neon_c; uint8x16_t neon_a0, neon_a1, neon_a2, neon_a3, neon_a4, neon_a5, neon_a6, neon_a7; uint8x16_t neon_a8, neon_a9, neon_a10, neon_a11, neon_a12, neon_a13, neon_a14, neon_a15; uint8x16_t neon_b0, neon_b1, neon_b2, neon_b3, neon_b4, neon_b5, neon_b6, neon_b7; uint8x16_t neon_b8, neon_b9, neon_b10, neon_b11, neon_b12, neon_b13, neon_b14, neon_b15; for (i = 0; i < Row; i+=16) { neon_c = vmovq_n_u8(0); for (k = 0; k < T; k+=16) { int j = k * T + i; neon_a0 = vld1q_u8(A + j); j+=Row; neon_a1 = vld1q_u8(A + j); j+=Row; neon_a2 = vld1q_u8(A + j); j+=Row; neon_a3 = vld1q_u8(A + j); j+=Row; neon_a4 = vld1q_u8(A + j); j+=Row; neon_a5 = vld1q_u8(A + j); j+=Row; neon_a6 = vld1q_u8(A + j); j+=Row; neon_a7 = vld1q_u8(A + j); j+=Row; neon_a8 = vld1q_u8(A + j); j+=Row; neon_a9 = vld1q_u8(A + j); j+=Row; neon_a10 = vld1q_u8(A + j); j+=Row; neon_a11 = vld1q_u8(A + j); j+=Row; neon_a12 = vld1q_u8(A + j); j+=Row; neon_a13 = vld1q_u8(A + j); j+=Row; neon_a14 = vld1q_u8(A + j); j+=Row; neon_a15 = vld1q_u8(A + j); neon_b = vld1q_u8(B + k); neon_b0 = vdupq_n_u8(vgetq_lane_u8(neon_b, 0)); neon_b1 = vdupq_n_u8(vgetq_lane_u8(neon_b, 1)); neon_b2 = vdupq_n_u8(vgetq_lane_u8(neon_b, 2)); neon_b3 = vdupq_n_u8(vgetq_lane_u8(neon_b, 3)); neon_b4 = vdupq_n_u8(vgetq_lane_u8(neon_b, 4)); neon_b5 = vdupq_n_u8(vgetq_lane_u8(neon_b, 5)); neon_b6 = vdupq_n_u8(vgetq_lane_u8(neon_b, 6)); neon_b7 = vdupq_n_u8(vgetq_lane_u8(neon_b, 7)); neon_b8 = vdupq_n_u8(vgetq_lane_u8(neon_b, 8)); neon_b9 = vdupq_n_u8(vgetq_lane_u8(neon_b, 9)); neon_b10 = vdupq_n_u8(vgetq_lane_u8(neon_b, 10)); neon_b11 = vdupq_n_u8(vgetq_lane_u8(neon_b, 11)); neon_b12 = vdupq_n_u8(vgetq_lane_u8(neon_b, 12)); neon_b13 = vdupq_n_u8(vgetq_lane_u8(neon_b, 13)); neon_b14 = vdupq_n_u8(vgetq_lane_u8(neon_b, 14)); neon_b15 = vdupq_n_u8(vgetq_lane_u8(neon_b, 15)); neon_c = vaddq_u8(vmulq_u8(neon_a0, neon_b0), neon_c); neon_c = vaddq_u8(vmulq_u8(neon_a1, neon_b1), neon_c); neon_c = vaddq_u8(vmulq_u8(neon_a2, neon_b2), neon_c); neon_c = vaddq_u8(vmulq_u8(neon_a3, neon_b3), neon_c); neon_c = vaddq_u8(vmulq_u8(neon_a4, neon_b4), neon_c); neon_c = vaddq_u8(vmulq_u8(neon_a5, neon_b5), neon_c); neon_c = vaddq_u8(vmulq_u8(neon_a6, neon_b6), neon_c); neon_c = vaddq_u8(vmulq_u8(neon_a7, neon_b7), neon_c); neon_c = vaddq_u8(vmulq_u8(neon_a8, neon_b8), neon_c); neon_c = vaddq_u8(vmulq_u8(neon_a9, neon_b9), neon_c); neon_c = vaddq_u8(vmulq_u8(neon_a10, neon_b10), neon_c); neon_c = vaddq_u8(vmulq_u8(neon_a11, neon_b11), neon_c); neon_c = vaddq_u8(vmulq_u8(neon_a12, neon_b12), neon_c); neon_c = vaddq_u8(vmulq_u8(neon_a13, neon_b13), neon_c); neon_c = vaddq_u8(vmulq_u8(neon_a14, neon_b14), neon_c); neon_c = vaddq_u8(vmulq_u8(neon_a15, neon_b15), neon_c); } vst1q_u8(C + i, neon_c); } }
inline uint8x16_t vaddq(const uint8x16_t & v0, const uint8x16_t & v1) { return vaddq_u8 (v0, v1); }
size_t mempopcnt(const void *s, size_t len) { uint8x16_t v_0; uint8x16_t c; uint32x4_t v_sum; uint32x2_t v_tsum; unsigned char *p; size_t r; unsigned shift; prefetch(s); // TODO: do this in 64 bit? the mem model seems more that way... v_0 = (uint8x16_t){0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; v_sum = (uint32x4_t)v_0; p = (unsigned char *)ALIGN_DOWN(s, SOVUCQ); shift = ALIGN_DOWN_DIFF(s, SOVUCQ); c = *(const uint8x16_t *)p; if(HOST_IS_BIGENDIAN) c = neon_simple_alignq(v_0, c, SOVUCQ - shift); else c = neon_simple_alignq(c, v_0, shift); if(len >= SOVUCQ || len + shift >= SOVUCQ) { p += SOVUCQ; len -= SOVUCQ - shift; v_sum = vpadalq_u16(v_sum, vpaddlq_u8(vcntq_u8(c))); while(len >= SOVUCQ * 2) { uint8x16_t v_sumb = v_0; r = len / (SOVUCQ * 2); r = r > 15 ? 15 : r; len -= r * SOVUCQ * 2; /* * NEON has a vector popcnt instruction, so no compression. * We trust the speed given in the handbook (adding more * instructions would not make it faster), 1-2 cycles. */ for(; r; r--, p += SOVUCQ * 2) { c = *(const uint8x16_t *)p; v_sumb = vaddq_u8(v_sumb, vcntq_u8(c)); c = *((const uint8x16_t *)(p + SOVUCQ)); v_sumb = vaddq_u8(v_sumb, vcntq_u8(c)); } v_sum = vpadalq_u16(v_sum, vpaddlq_u8(v_sumb)); } if(len >= SOVUCQ) { c = *(const uint8x16_t *)p; p += SOVUCQ; v_sum = vpadalq_u16(v_sum, vpaddlq_u8(vcntq_u8(c))); len -= SOVUCQ; } if(len) c = *(const uint8x16_t *)p; } if(len) { if(HOST_IS_BIGENDIAN) c = neon_simple_alignq(c, v_0, SOVUCQ - len); else c = neon_simple_alignq(v_0, c, len); v_sum = vpadalq_u16(v_sum, vpaddlq_u8(vcntq_u8(c))); } v_tsum = vpadd_u32(vget_high_u32(v_sum), vget_low_u32(v_sum)); v_tsum = vpadd_u32(v_tsum, v_tsum); return vget_lane_u32(v_tsum, 0); }
static inline uint8x16x4_t enc_translate (uint8x16x4_t in) { uint8x16x4_t mask1, mask2, mask3, mask4, out; // Translate values 0..63 to the Base64 alphabet. There are five sets: // # From To Abs Delta Characters // 0 [0..25] [65..90] +65 +65 ABCDEFGHIJKLMNOPQRSTUVWXYZ // 1 [26..51] [97..122] +71 +6 abcdefghijklmnopqrstuvwxyz // 2 [52..61] [48..57] -4 -75 0123456789 // 3 [62] [43] -19 -15 + // 4 [63] [47] -16 +3 / // Create cumulative masks for characters in sets [1,2,3,4], [2,3,4], // [3,4], and [4]: mask1.val[0] = CMPGT(in.val[0], 25); mask1.val[1] = CMPGT(in.val[1], 25); mask1.val[2] = CMPGT(in.val[2], 25); mask1.val[3] = CMPGT(in.val[3], 25); mask2.val[0] = CMPGT(in.val[0], 51); mask2.val[1] = CMPGT(in.val[1], 51); mask2.val[2] = CMPGT(in.val[2], 51); mask2.val[3] = CMPGT(in.val[3], 51); mask3.val[0] = CMPGT(in.val[0], 61); mask3.val[1] = CMPGT(in.val[1], 61); mask3.val[2] = CMPGT(in.val[2], 61); mask3.val[3] = CMPGT(in.val[3], 61); mask4.val[0] = CMPEQ(in.val[0], 63); mask4.val[1] = CMPEQ(in.val[1], 63); mask4.val[2] = CMPEQ(in.val[2], 63); mask4.val[3] = CMPEQ(in.val[3], 63); // All characters are at least in cumulative set 0, so add 'A': out.val[0] = vaddq_u8(in.val[0], vdupq_n_u8(65)); out.val[1] = vaddq_u8(in.val[1], vdupq_n_u8(65)); out.val[2] = vaddq_u8(in.val[2], vdupq_n_u8(65)); out.val[3] = vaddq_u8(in.val[3], vdupq_n_u8(65)); // For inputs which are also in any of the other cumulative sets, // add delta values against the previous set(s) to correct the shift: out.val[0] = vaddq_u8(out.val[0], REPLACE(mask1.val[0], 6)); out.val[1] = vaddq_u8(out.val[1], REPLACE(mask1.val[1], 6)); out.val[2] = vaddq_u8(out.val[2], REPLACE(mask1.val[2], 6)); out.val[3] = vaddq_u8(out.val[3], REPLACE(mask1.val[3], 6)); out.val[0] = vsubq_u8(out.val[0], REPLACE(mask2.val[0], 75)); out.val[1] = vsubq_u8(out.val[1], REPLACE(mask2.val[1], 75)); out.val[2] = vsubq_u8(out.val[2], REPLACE(mask2.val[2], 75)); out.val[3] = vsubq_u8(out.val[3], REPLACE(mask2.val[3], 75)); out.val[0] = vsubq_u8(out.val[0], REPLACE(mask3.val[0], 15)); out.val[1] = vsubq_u8(out.val[1], REPLACE(mask3.val[1], 15)); out.val[2] = vsubq_u8(out.val[2], REPLACE(mask3.val[2], 15)); out.val[3] = vsubq_u8(out.val[3], REPLACE(mask3.val[3], 15)); out.val[0] = vaddq_u8(out.val[0], REPLACE(mask4.val[0], 3)); out.val[1] = vaddq_u8(out.val[1], REPLACE(mask4.val[1], 3)); out.val[2] = vaddq_u8(out.val[2], REPLACE(mask4.val[2], 3)); out.val[3] = vaddq_u8(out.val[3], REPLACE(mask4.val[3], 3)); return out; }