]> git.droids-corp.org - dpdk.git/commitdiff
net/bnxt: fix reordering in NEON Rx
authorRuifeng Wang <ruifeng.wang@arm.com>
Wed, 13 Apr 2022 10:31:56 +0000 (18:31 +0800)
committerAjit Khaparde <ajit.khaparde@broadcom.com>
Tue, 10 May 2022 05:00:14 +0000 (07:00 +0200)
Rx descriptor contains a valid bit which indicates readiness of the rest
of descriptor words. Hence, the word contains valid bit must be read
prior to other words.

In NEON vector path, two contiguous 8B descriptor are loaded to a single
NEON register. Given vector load ensures no 16B atomicity, read of the
word that includes valid bit could be reordered after read of other words.
In this case, data could be invalid.

Reloaded lower 64b after read barrier. This ensures what fetched is
correct.

Also fixed comments that not pertains to Arm platform architecture.

Fixes: deae85145c64 ("net/bnxt: handle multiple packets per loop in vector Rx")
Cc: stable@dpdk.org
Signed-off-by: Ruifeng Wang <ruifeng.wang@arm.com>
Reviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
drivers/net/bnxt/bnxt_rxtx_vec_neon.c

index 779e23ac4f7655f7105e8a8553c77f111274e5bc..32f8e59b3a5cc9c68bd629a4558a624edeac6908 100644 (file)
@@ -231,25 +231,38 @@ recv_burst_vec_neon(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                }
 
                /*
-                * Load the four current descriptors into SSE registers in
-                * reverse order to ensure consistent state.
+                * Load the four current descriptors into NEON registers.
+                * IO barriers are used to ensure consistent state.
                 */
                rxcmp1[3] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 7]);
                rte_io_rmb();
+               /* Reload lower 64b of descriptors to make it ordered after info3_v. */
+               rxcmp1[3] = vreinterpretq_u32_u64(vld1q_lane_u64
+                               ((void *)&cpr->cp_desc_ring[cons + 7],
+                               vreinterpretq_u64_u32(rxcmp1[3]), 0));
                rxcmp[3] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 6]);
 
                rxcmp1[2] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 5]);
                rte_io_rmb();
+               rxcmp1[2] = vreinterpretq_u32_u64(vld1q_lane_u64
+                               ((void *)&cpr->cp_desc_ring[cons + 5],
+                               vreinterpretq_u64_u32(rxcmp1[2]), 0));
                rxcmp[2] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 4]);
 
                t1 = vreinterpretq_u64_u32(vzip2q_u32(rxcmp1[2], rxcmp1[3]));
 
                rxcmp1[1] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 3]);
                rte_io_rmb();
+               rxcmp1[1] = vreinterpretq_u32_u64(vld1q_lane_u64
+                               ((void *)&cpr->cp_desc_ring[cons + 3],
+                               vreinterpretq_u64_u32(rxcmp1[1]), 0));
                rxcmp[1] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 2]);
 
                rxcmp1[0] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 1]);
                rte_io_rmb();
+               rxcmp1[0] = vreinterpretq_u32_u64(vld1q_lane_u64
+                               ((void *)&cpr->cp_desc_ring[cons + 1],
+                               vreinterpretq_u64_u32(rxcmp1[0]), 0));
                rxcmp[0] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 0]);
 
                t0 = vreinterpretq_u64_u32(vzip2q_u32(rxcmp1[0], rxcmp1[1]));