inline int v_signmask(const v_uint32x4& a) { int32x2_t m0 = vcreate_s32(CV_BIG_UINT(0x0000000100000000)); uint32x4_t v0 = vshlq_u32(vshrq_n_u32(a.val, 31), vcombine_s32(m0, m0)); uint64x2_t v1 = vpaddlq_u32(v0); return (int)vgetq_lane_u64(v1, 0) + ((int)vgetq_lane_u64(v1, 1) << 2); }
void test_vshlQu32 (void) { uint32x4_t out_uint32x4_t; uint32x4_t arg0_uint32x4_t; int32x4_t arg1_int32x4_t; out_uint32x4_t = vshlq_u32 (arg0_uint32x4_t, arg1_int32x4_t); }
/* * Notice: * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST * numbers of DD bits */ static inline uint16_t _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts, uint8_t *split_packet) { volatile union i40e_rx_desc *rxdp; struct i40e_rx_entry *sw_ring; uint16_t nb_pkts_recd; int pos; uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl; /* mask to shuffle from desc. to mbuf */ uint8x16_t shuf_msk = { 0xFF, 0xFF, /* pkt_type set as unknown */ 0xFF, 0xFF, /* pkt_type set as unknown */ 14, 15, /* octet 15~14, low 16 bits pkt_len */ 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */ 14, 15, /* octet 15~14, 16 bits data_len */ 2, 3, /* octet 2~3, low 16 bits vlan_macip */ 4, 5, 6, 7 /* octet 4~7, 32bits rss */ }; uint8x16_t eop_check = { 0x02, 0x00, 0x02, 0x00, 0x02, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; uint16x8_t crc_adjust = { 0, 0, /* ignore pkt_type field */ rxq->crc_len, /* sub crc on pkt_len */ 0, /* ignore high-16bits of pkt_len */ rxq->crc_len, /* sub crc on data_len */ 0, 0, 0 /* ignore non-length fields */ }; /* nb_pkts shall be less equal than RTE_I40E_MAX_RX_BURST */ nb_pkts = RTE_MIN(nb_pkts, RTE_I40E_MAX_RX_BURST); /* nb_pkts has to be floor-aligned to RTE_I40E_DESCS_PER_LOOP */ nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_I40E_DESCS_PER_LOOP); /* Just the act of getting into the function from the application is * going to cost about 7 cycles */ rxdp = rxq->rx_ring + rxq->rx_tail; rte_prefetch_non_temporal(rxdp); /* See if we need to rearm the RX queue - gives the prefetch a bit * of time to act */ if (rxq->rxrearm_nb > RTE_I40E_RXQ_REARM_THRESH) i40e_rxq_rearm(rxq); /* Before we start moving massive data around, check to see if * there is actually a packet available */ if (!(rxdp->wb.qword1.status_error_len & rte_cpu_to_le_32(1 << I40E_RX_DESC_STATUS_DD_SHIFT))) return 0; /* Cache is empty -> need to scan the buffer rings, but first move * the next 'n' mbufs into the cache */ sw_ring = &rxq->sw_ring[rxq->rx_tail]; /* A. load 4 packet in one loop * [A*. mask out 4 unused dirty field in desc] * B. copy 4 mbuf point from swring to rx_pkts * C. calc the number of DD bits among the 4 packets * [C*. extract the end-of-packet bit, if requested] * D. fill info. from desc to mbuf */ for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts; pos += RTE_I40E_DESCS_PER_LOOP, rxdp += RTE_I40E_DESCS_PER_LOOP) { uint64x2_t descs[RTE_I40E_DESCS_PER_LOOP]; uint8x16_t pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4; uint16x8x2_t sterr_tmp1, sterr_tmp2; uint64x2_t mbp1, mbp2; uint16x8_t staterr; uint16x8_t tmp; uint64_t stat; int32x4_t len_shl = {0, 0, 0, PKTLEN_SHIFT}; /* B.1 load 1 mbuf point */ mbp1 = vld1q_u64((uint64_t *)&sw_ring[pos]); /* Read desc statuses backwards to avoid race condition */ /* A.1 load 4 pkts desc */ descs[3] = vld1q_u64((uint64_t *)(rxdp + 3)); rte_rmb(); /* B.2 copy 2 mbuf point into rx_pkts */ vst1q_u64((uint64_t *)&rx_pkts[pos], mbp1); /* B.1 load 1 mbuf point */ mbp2 = vld1q_u64((uint64_t *)&sw_ring[pos + 2]); descs[2] = vld1q_u64((uint64_t *)(rxdp + 2)); /* B.1 load 2 mbuf point */ descs[1] = vld1q_u64((uint64_t *)(rxdp + 1)); descs[0] = vld1q_u64((uint64_t *)(rxdp)); /* B.2 copy 2 mbuf point into rx_pkts */ vst1q_u64((uint64_t *)&rx_pkts[pos + 2], mbp2); if (split_packet) { rte_mbuf_prefetch_part2(rx_pkts[pos]); rte_mbuf_prefetch_part2(rx_pkts[pos + 1]); rte_mbuf_prefetch_part2(rx_pkts[pos + 2]); rte_mbuf_prefetch_part2(rx_pkts[pos + 3]); } /* avoid compiler reorder optimization */ rte_compiler_barrier(); /* pkt 3,4 shift the pktlen field to be 16-bit aligned*/ uint32x4_t len3 = vshlq_u32(vreinterpretq_u32_u64(descs[3]), len_shl); descs[3] = vreinterpretq_u64_u32(len3); uint32x4_t len2 = vshlq_u32(vreinterpretq_u32_u64(descs[2]), len_shl); descs[2] = vreinterpretq_u64_u32(len2); /* D.1 pkt 3,4 convert format from desc to pktmbuf */ pkt_mb4 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[3]), shuf_msk); pkt_mb3 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[2]), shuf_msk); /* C.1 4=>2 filter staterr info only */ sterr_tmp2 = vzipq_u16(vreinterpretq_u16_u64(descs[1]), vreinterpretq_u16_u64(descs[3])); /* C.1 4=>2 filter staterr info only */ sterr_tmp1 = vzipq_u16(vreinterpretq_u16_u64(descs[0]), vreinterpretq_u16_u64(descs[2])); /* C.2 get 4 pkts staterr value */ staterr = vzipq_u16(sterr_tmp1.val[1], sterr_tmp2.val[1]).val[0]; desc_to_olflags_v(rxq, descs, &rx_pkts[pos]); /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */ tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb4), crc_adjust); pkt_mb4 = vreinterpretq_u8_u16(tmp); tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb3), crc_adjust); pkt_mb3 = vreinterpretq_u8_u16(tmp); /* pkt 1,2 shift the pktlen field to be 16-bit aligned*/ uint32x4_t len1 = vshlq_u32(vreinterpretq_u32_u64(descs[1]), len_shl); descs[1] = vreinterpretq_u64_u32(len1); uint32x4_t len0 = vshlq_u32(vreinterpretq_u32_u64(descs[0]), len_shl); descs[0] = vreinterpretq_u64_u32(len0); /* D.1 pkt 1,2 convert format from desc to pktmbuf */ pkt_mb2 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[1]), shuf_msk); pkt_mb1 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[0]), shuf_msk); /* D.3 copy final 3,4 data to rx_pkts */ vst1q_u8((void *)&rx_pkts[pos + 3]->rx_descriptor_fields1, pkt_mb4); vst1q_u8((void *)&rx_pkts[pos + 2]->rx_descriptor_fields1, pkt_mb3); /* D.2 pkt 1,2 set in_port/nb_seg and remove crc */ tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb2), crc_adjust); pkt_mb2 = vreinterpretq_u8_u16(tmp); tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb1), crc_adjust); pkt_mb1 = vreinterpretq_u8_u16(tmp); /* C* extract and record EOP bit */ if (split_packet) { uint8x16_t eop_shuf_mask = { 0x00, 0x02, 0x04, 0x06, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; uint8x16_t eop_bits; /* and with mask to extract bits, flipping 1-0 */ eop_bits = vmvnq_u8(vreinterpretq_u8_u16(staterr)); eop_bits = vandq_u8(eop_bits, eop_check); /* the staterr values are not in order, as the count * count of dd bits doesn't care. However, for end of * packet tracking, we do care, so shuffle. This also * compresses the 32-bit values to 8-bit */ eop_bits = vqtbl1q_u8(eop_bits, eop_shuf_mask); /* store the resulting 32-bit value */ vst1q_lane_u32((uint32_t *)split_packet, vreinterpretq_u32_u8(eop_bits), 0); split_packet += RTE_I40E_DESCS_PER_LOOP; /* zero-out next pointers */ rx_pkts[pos]->next = NULL; rx_pkts[pos + 1]->next = NULL; rx_pkts[pos + 2]->next = NULL; rx_pkts[pos + 3]->next = NULL; } staterr = vshlq_n_u16(staterr, I40E_UINT16_BIT - 1); staterr = vreinterpretq_u16_s16( vshrq_n_s16(vreinterpretq_s16_u16(staterr), I40E_UINT16_BIT - 1)); stat = ~vgetq_lane_u64(vreinterpretq_u64_u16(staterr), 0); rte_prefetch_non_temporal(rxdp + RTE_I40E_DESCS_PER_LOOP); /* D.3 copy final 1,2 data to rx_pkts */ vst1q_u8((void *)&rx_pkts[pos + 1]->rx_descriptor_fields1, pkt_mb2); vst1q_u8((void *)&rx_pkts[pos]->rx_descriptor_fields1, pkt_mb1); desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl); /* C.4 calc avaialbe number of desc */ if (unlikely(stat == 0)) { nb_pkts_recd += RTE_I40E_DESCS_PER_LOOP; } else { nb_pkts_recd += __builtin_ctzl(stat) / I40E_UINT16_BIT; break; } } /* Update our internal tail pointer */ rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd); rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1)); rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd); return nb_pkts_recd; }
// // box blur a square array of pixels (power of 2, actually) // if we insist on powers of 2, we don't need to special case some end-of-row/col conditions // to a specific blur width // // also, we're using NEON to vectorize our arithmetic. // we need to do a division along the way, but NEON doesn't support integer division. // so rather than divide by, say "w", we multiply by magic(w). // magic(w) is chosen so that the result of multiplying by it will be the same as // dividing by w, except that the result will be in the high half of the result. // yes, dorothy... this is what compilers do, too... void NEONboxBlur(pixel *src, pixel *dest, unsigned int size, unsigned int blurRad) { unsigned int wid = 2 * blurRad + 1; // because NEON doesn't have integer division, we use "magic constants" that will give // use the result of division by multiplication -- the upper half of the result will be // (more or less) the result of the division. // for this, we need to compute the magic numbers corresponding to a given divisor struct magicu_info minfo = compute_unsigned_magic_info(wid, 16); int16x8_t preshift = vdupq_n_s16(-minfo.pre_shift); // negative means shift right int32x4_t postshift = vdupq_n_s32(-(minfo.post_shift+16)); // negative means shift right uint16x4_t magic = vdup_n_u16(minfo.multiplier); // fprintf(stderr,"width %5d, preshift %d, postshift %d + 16, increment %d, magic %d\n", wid, // minfo.pre_shift, minfo.post_shift, minfo.increment, minfo.multiplier); // if (minfo.pre_shift > 0) fprintf(stderr,"hey, not an odd number!\n"); int i, j, k, ch; for (i = 0 ; i < size ; i+=8) { // first, initialize the sum so that we can loop from 0 to size-1 // we'll initialize boxsum for index -1, so that we can move into 0 as part of our loop uint16x8x4_t boxsum; uint8x8x4_t firstpixel = vld4_u8((uint8_t *)(src + 0 * size + i)); for (ch = 0 ; ch < 4 ; ch++) { // boxsum[ch] = blurRad * srcpixel[ch] boxsum.val[ch] = vmulq_n_u16(vmovl_u8(firstpixel.val[ch]),(blurRad+1)+1); } for ( k = 1 ; k < blurRad ; k++) { uint8x8x4_t srcpixel = vld4_u8((uint8_t *)(src + k * size + i)); for (ch = 0 ; ch < 4 ; ch++ ) { boxsum.val[ch] = vaddw_u8(boxsum.val[ch], srcpixel.val[ch]); } } int right = blurRad-1; int left = -blurRad-1; if (minfo.increment) { for ( k = 0 ; k < size ; k++) { // move to next pixel unsigned int l = (left < 0)?0:left; // take off the old left left++; right++; unsigned int r = (right < size)?right:(size-1); // but add the new right uint8x8x4_t addpixel = vld4_u8((uint8_t *)(src + r * size + i)); uint8x8x4_t subpixel = vld4_u8((uint8_t *)(src + l * size + i)); for (ch = 0 ; ch < 4 ; ch++ ) { // boxsum[ch] += addpixel[ch] - subpixel[ch]; boxsum.val[ch] = vsubw_u8(vaddw_u8(boxsum.val[ch], addpixel.val[ch]), subpixel.val[ch]); } uint8x8x4_t destpixel; for (ch = 0 ; ch < 4 ; ch++ ) { // compute: destpixel = boxsum / wid // since 16bit multiplication leads to 32bit results, we need to // split our task into two chunks, for the hi and low half of our vector // (because otherwise, it won't all fit into 128 bits) // this is the meat of the magic division algorithm (see the include file...) uint16x8_t bsum_preshifted = vshlq_u16(boxsum.val[ch],preshift); // multiply by the magic number uint32x4_t res_hi = vmull_u16(vget_high_u16(bsum_preshifted), magic); res_hi = vaddw_u16(res_hi, magic); // take the high half and post-shift uint16x4_t q_hi = vmovn_u32(vshlq_u32(res_hi, postshift)); // pre-shift and multiply by the magic number uint32x4_t res_lo = vmull_u16(vget_low_u16(bsum_preshifted), magic); res_lo = vaddw_u16(res_lo, magic); // take the high half and post-shift uint16x4_t q_lo = vmovn_u32(vshlq_u32(res_lo, postshift)); destpixel.val[ch] = vqmovn_u16(vcombine_u16(q_lo, q_hi)); } pixel block[8]; vst4_u8((uint8_t *)&block, destpixel); for (j = 0 ; j < 8 ; j++ ) { dest[(i + j)*size + k] = block[j]; } // vst4_u8((uint8_t *)(dest + k * size + i), destpixel); } } else { for ( k = 0 ; k < size ; k++) { // move to next pixel unsigned int l = (left < 0)?0:left; // take off the old left left++; right++; unsigned int r = (right < size)?right:(size-1); // but add the new right uint8x8x4_t addpixel = vld4_u8((uint8_t *)(src + r * size + i)); uint8x8x4_t subpixel = vld4_u8((uint8_t *)(src + l * size + i)); for (ch = 0 ; ch < 4 ; ch++ ) { // boxsum[ch] += addpixel[ch] - subpixel[ch]; boxsum.val[ch] = vsubw_u8(vaddw_u8(boxsum.val[ch], addpixel.val[ch]), subpixel.val[ch]); } uint8x8x4_t destpixel; for (ch = 0 ; ch < 4 ; ch++ ) { // compute: destpixel = boxsum / wid // since 16bit multiplication leads to 32bit results, we need to // split our task into two chunks, for the hi and low half of our vector // (because otherwise, it won't all fit into 128 bits) // this is the meat of the magic division algorithm (see the include file...) uint16x8_t bsum_preshifted = vshlq_u16(boxsum.val[ch],preshift); // multiply by the magic number // take the high half and post-shift uint32x4_t res_hi = vmull_u16(vget_high_u16(bsum_preshifted), magic); uint16x4_t q_hi = vmovn_u32(vshlq_u32(res_hi, postshift)); // multiply by the magic number // take the high half and post-shift uint32x4_t res_lo = vmull_u16(vget_low_u16(bsum_preshifted), magic); uint16x4_t q_lo = vmovn_u32(vshlq_u32(res_lo, postshift)); destpixel.val[ch] = vqmovn_u16(vcombine_u16(q_lo, q_hi)); } pixel block[8]; vst4_u8((uint8_t *)&block, destpixel); for (j = 0 ; j < 8 ; j++ ) { dest[(i + j)*size + k] = block[j]; } // vst4_u8((uint8_t *)(dest + k * size + i), destpixel); } } } }