const vector unsigned long shmax = {64, 64};
#endif
- if (!(pos & 0x7) && pos + 8 < mcqe_n)
- rte_prefetch0((void *)(cq + pos + 8));
+ for (i = 0; i < MLX5_VPMD_DESCS_PER_LOOP; ++i)
+ if (likely(pos + i < mcqe_n))
+ rte_prefetch0((void *)(cq + pos + i));
/* A.1 load mCQEs into a 128bit register. */
mcqe1 = (vector unsigned char)vec_vsx_ld(0,
-1UL << ((mcqe_n - pos) *
sizeof(uint16_t) * 8) : 0);
#endif
-
- if (!(pos & 0x7) && pos + 8 < mcqe_n)
- rte_prefetch0((void *)(cq + pos + 8));
+ for (i = 0; i < MLX5_VPMD_DESCS_PER_LOOP; ++i)
+ if (likely(pos + i < mcqe_n))
+ rte_prefetch0((void *)(cq + pos + i));
__asm__ volatile (
/* A.1 load mCQEs into a 128bit register. */
"ld1 {v16.16b - v17.16b}, [%[mcq]] \n\t"
__m128i byte_cnt, invalid_mask;
#endif
- if (!(pos & 0x7) && pos + 8 < mcqe_n)
- rte_prefetch0((void *)(cq + pos + 8));
+ for (i = 0; i < MLX5_VPMD_DESCS_PER_LOOP; ++i)
+ if (likely(pos + i < mcqe_n))
+ rte_prefetch0((void *)(cq + pos + i));
+
/* A.1 load mCQEs into a 128bit register. */
mcqe1 = _mm_loadu_si128((__m128i *)&mcq[pos % 8]);
mcqe2 = _mm_loadu_si128((__m128i *)&mcq[pos % 8 + 2]);