}
/**
+ * vPMD raw receive routine, only accept(nb_pkts >= ICE_DESCS_PER_LOOP)
+ *
* Notice:
* - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet
- * - nb_pkts > ICE_VPMD_RX_BURST, only scan ICE_VPMD_RX_BURST
- * numbers of DD bits
+ * - floor align nb_pkts to a ICE_DESCS_PER_LOOP power-of-two
*/
static inline uint16_t
_ice_recv_raw_pkts_vec(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts,
const __m128i eop_check = _mm_set_epi64x(0x0000000200000002LL,
0x0000000200000002LL);
- /* nb_pkts shall be less equal than ICE_MAX_RX_BURST */
- nb_pkts = RTE_MIN(nb_pkts, ICE_MAX_RX_BURST);
-
/* nb_pkts has to be floor-aligned to ICE_DESCS_PER_LOOP */
nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, ICE_DESCS_PER_LOOP);
return _ice_recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
}
-/* vPMD receive routine that reassembles scattered packets
+/**
+ * vPMD receive routine that reassembles single burst of 32 scattered packets
+ *
* Notice:
* - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet
- * - nb_pkts > ICE_VPMD_RX_BURST, only scan ICE_VPMD_RX_BURST
- * numbers of DD bits
*/
-uint16_t
-ice_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
- uint16_t nb_pkts)
+static uint16_t
+ice_recv_scattered_burst_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
{
struct ice_rx_queue *rxq = rx_queue;
uint8_t split_flags[ICE_VPMD_RX_BURST] = {0};
&split_flags[i]);
}
+/**
+ * vPMD receive routine that reassembles scattered packets.
+ */
+uint16_t
+ice_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t retval = 0;
+
+ while (nb_pkts > ICE_VPMD_RX_BURST) {
+ uint16_t burst;
+
+ burst = ice_recv_scattered_burst_vec(rx_queue,
+ rx_pkts + retval,
+ ICE_VPMD_RX_BURST);
+ retval += burst;
+ nb_pkts -= burst;
+ if (burst < ICE_VPMD_RX_BURST)
+ return retval;
+ }
+
+ return retval + ice_recv_scattered_burst_vec(rx_queue,
+ rx_pkts + retval,
+ nb_pkts);
+}
+
static inline void
ice_vtx1(volatile struct ice_tx_desc *txdp, struct rte_mbuf *pkt,
uint64_t flags)