For the loop to process packets in Rx vector path, some notes for the
code are wrong, fix these errors.
Fixes:
7092be8437bd ("fm10k: add vector Rx")
Fixes:
c3def6a8724c ("net/i40e: implement vector PMD for altivec")
Fixes:
ae0eb310f253 ("net/i40e: implement vector PMD for ARM")
Fixes:
9ed94e5bb04e ("i40e: add vector Rx")
Fixes:
319c421f3890 ("net/avf: enable SSE Rx Tx")
Fixes:
1162f5a0ef31 ("net/iavf: support flexible Rx descriptor in SSE path")
Fixes:
c68a52b8b38c ("net/ice: support vector SSE in Rx")
Fixes:
cf4b4708a88a ("ixgbe: improve slow-path perf with vector scattered Rx")
Cc: stable@dpdk.org
Suggested-by: Ruifeng Wang <ruifeng.wang@arm.com>
Signed-off-by: Feifei Wang <feifei.wang2@arm.com>
Reviewed-by: Ruifeng Wang <ruifeng.wang@arm.com>
mbp1 = _mm_loadu_si128((__m128i *)&mbufp[pos]);
/* Read desc statuses backwards to avoid race condition */
- /* A.1 load 4 pkts desc */
+ /* A.1 load desc[3] */
descs0[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
rte_compiler_barrier();
mbp2 = _mm_loadu_si128((__m128i *)&mbufp[pos+2]);
#endif
+ /* A.1 load desc[2-0] */
descs0[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
rte_compiler_barrier();
- /* B.1 load 2 mbuf point */
descs0[1] = _mm_loadu_si128((__m128i *)(rxdp + 1));
rte_compiler_barrier();
descs0[0] = _mm_loadu_si128((__m128i *)(rxdp));
* in one XMM reg.
*/
- /* B.1 load 1 mbuf point */
+ /* B.1 load 2 mbuf point */
mbp1 = *(vector unsigned long *)&sw_ring[pos];
/* Read desc statuses backwards to avoid race condition */
- /* A.1 load 4 pkts desc */
+ /* A.1 load desc[3] */
descs[3] = *(vector unsigned long *)(rxdp + 3);
rte_compiler_barrier();
/* B.2 copy 2 mbuf point into rx_pkts */
*(vector unsigned long *)&rx_pkts[pos] = mbp1;
- /* B.1 load 1 mbuf point */
+ /* B.1 load 2 mbuf point */
mbp2 = *(vector unsigned long *)&sw_ring[pos + 2];
+ /* A.1 load desc[2-0] */
descs[2] = *(vector unsigned long *)(rxdp + 2);
rte_compiler_barrier();
- /* B.1 load 2 mbuf point */
descs[1] = *(vector unsigned long *)(rxdp + 1);
rte_compiler_barrier();
descs[0] = *(vector unsigned long *)(rxdp);
int32x4_t len_shl = {0, 0, 0, PKTLEN_SHIFT};
- /* B.1 load 1 mbuf point */
+ /* B.1 load 2 mbuf point */
mbp1 = vld1q_u64((uint64_t *)&sw_ring[pos]);
/* Read desc statuses backwards to avoid race condition */
- /* A.1 load 4 pkts desc */
+ /* A.1 load desc[3] */
descs[3] = vld1q_u64((uint64_t *)(rxdp + 3));
/* B.2 copy 2 mbuf point into rx_pkts */
vst1q_u64((uint64_t *)&rx_pkts[pos], mbp1);
- /* B.1 load 1 mbuf point */
+ /* B.1 load 2 mbuf point */
mbp2 = vld1q_u64((uint64_t *)&sw_ring[pos + 2]);
+ /* A.1 load desc[2-0] */
descs[2] = vld1q_u64((uint64_t *)(rxdp + 2));
- /* B.1 load 2 mbuf point */
descs[1] = vld1q_u64((uint64_t *)(rxdp + 1));
descs[0] = vld1q_u64((uint64_t *)(rxdp));
/* B.1 load 2 (64 bit) or 4 (32 bit) mbuf points */
mbp1 = _mm_loadu_si128((__m128i *)&sw_ring[pos]);
/* Read desc statuses backwards to avoid race condition */
- /* A.1 load 4 pkts desc */
+ /* A.1 load desc[3] */
descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
rte_compiler_barrier();
mbp2 = _mm_loadu_si128((__m128i *)&sw_ring[pos+2]);
#endif
+ /* A.1 load desc[2-0] */
descs[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
rte_compiler_barrier();
- /* B.1 load 2 mbuf point */
descs[1] = _mm_loadu_si128((__m128i *)(rxdp + 1));
rte_compiler_barrier();
descs[0] = _mm_loadu_si128((__m128i *)(rxdp));
/* B.1 load 2 (64 bit) or 4 (32 bit) mbuf points */
mbp1 = _mm_loadu_si128((__m128i *)&sw_ring[pos]);
/* Read desc statuses backwards to avoid race condition */
- /* A.1 load 4 pkts desc */
+ /* A.1 load desc[3] */
descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
rte_compiler_barrier();
mbp2 = _mm_loadu_si128((__m128i *)&sw_ring[pos + 2]);
#endif
+ /* A.1 load desc[2-0] */
descs[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
rte_compiler_barrier();
- /* B.1 load 2 mbuf point */
descs[1] = _mm_loadu_si128((__m128i *)(rxdp + 1));
rte_compiler_barrier();
descs[0] = _mm_loadu_si128((__m128i *)(rxdp));
/* B.1 load 2 (64 bit) or 4 (32 bit) mbuf points */
mbp1 = _mm_loadu_si128((__m128i *)&sw_ring[pos]);
/* Read desc statuses backwards to avoid race condition */
- /* A.1 load 4 pkts desc */
+ /* A.1 load desc[3] */
descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
rte_compiler_barrier();
mbp2 = _mm_loadu_si128((__m128i *)&sw_ring[pos + 2]);
#endif
+ /* A.1 load desc[2-0] */
descs[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
rte_compiler_barrier();
- /* B.1 load 2 mbuf point */
descs[1] = _mm_loadu_si128((__m128i *)(rxdp + 1));
rte_compiler_barrier();
descs[0] = _mm_loadu_si128((__m128i *)(rxdp));
/* B.1 load 2 (64 bit) or 4 (32 bit) mbuf points */
mbp1 = _mm_loadu_si128((__m128i *)&sw_ring[pos]);
/* Read desc statuses backwards to avoid race condition */
- /* A.1 load 4 pkts desc */
+ /* A.1 load desc[3] */
descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
rte_compiler_barrier();
mbp2 = _mm_loadu_si128((__m128i *)&sw_ring[pos + 2]);
#endif
+ /* A.1 load desc[2-0] */
descs[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
rte_compiler_barrier();
- /* B.1 load 2 mbuf point */
descs[1] = _mm_loadu_si128((__m128i *)(rxdp + 1));
rte_compiler_barrier();
descs[0] = _mm_loadu_si128((__m128i *)(rxdp));
mbp1 = _mm_loadu_si128((__m128i *)&sw_ring[pos]);
/* Read desc statuses backwards to avoid race condition */
- /* A.1 load 4 pkts desc */
+ /* A.1 load desc[3] */
descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
rte_compiler_barrier();
mbp2 = _mm_loadu_si128((__m128i *)&sw_ring[pos+2]);
#endif
+ /* A.1 load desc[2-0] */
descs[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
rte_compiler_barrier();
- /* B.1 load 2 mbuf point */
descs[1] = _mm_loadu_si128((__m128i *)(rxdp + 1));
rte_compiler_barrier();
descs[0] = _mm_loadu_si128((__m128i *)(rxdp));