+ rxq->rx_tail = rx_id;
+
+ iavf_update_rx_tail(rxq, nb_hold, rx_id);
+
+ return nb_rx;
+}
+
+/* implement recv_scattered_pkts for flexible Rx descriptor */
+uint16_t
+iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct iavf_rx_queue *rxq = rx_queue;
+ union iavf_rx_flex_desc rxd;
+ struct rte_mbuf *rxe;
+ struct rte_mbuf *first_seg = rxq->pkt_first_seg;
+ struct rte_mbuf *last_seg = rxq->pkt_last_seg;
+ struct rte_mbuf *nmb, *rxm;
+ uint16_t rx_id = rxq->rx_tail;
+ uint16_t nb_rx = 0, nb_hold = 0, rx_packet_len;
+ struct rte_eth_dev *dev;
+ uint16_t rx_stat_err0;
+ uint64_t dma_addr;
+ uint64_t pkt_flags;
+
+ volatile union iavf_rx_desc *rx_ring = rxq->rx_ring;
+ volatile union iavf_rx_flex_desc *rxdp;
+ const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
+
+ while (nb_rx < nb_pkts) {
+ rxdp = (volatile union iavf_rx_flex_desc *)&rx_ring[rx_id];
+ rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
+
+ /* Check the DD bit */
+ if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
+ break;
+ IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
+
+ nmb = rte_mbuf_raw_alloc(rxq->mp);
+ if (unlikely(!nmb)) {
+ PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
+ "queue_id=%u", rxq->port_id, rxq->queue_id);
+ dev = &rte_eth_devices[rxq->port_id];
+ dev->data->rx_mbuf_alloc_failed++;
+ break;
+ }
+
+ rxd = *rxdp;
+ nb_hold++;
+ rxe = rxq->sw_ring[rx_id];
+ rx_id++;
+ if (rx_id == rxq->nb_rx_desc)
+ rx_id = 0;
+
+ /* Prefetch next mbuf */
+ rte_prefetch0(rxq->sw_ring[rx_id]);
+
+ /* When next RX descriptor is on a cache line boundary,
+ * prefetch the next 4 RX descriptors and next 8 pointers
+ * to mbufs.
+ */
+ if ((rx_id & 0x3) == 0) {
+ rte_prefetch0(&rx_ring[rx_id]);
+ rte_prefetch0(rxq->sw_ring[rx_id]);
+ }
+
+ rxm = rxe;
+ rxe = nmb;
+ dma_addr =
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
+
+ /* Set data buffer address and data length of the mbuf */
+ rxdp->read.hdr_addr = 0;
+ rxdp->read.pkt_addr = dma_addr;
+ rx_packet_len = rte_le_to_cpu_16(rxd.wb.pkt_len) &
+ IAVF_RX_FLX_DESC_PKT_LEN_M;
+ rxm->data_len = rx_packet_len;
+ rxm->data_off = RTE_PKTMBUF_HEADROOM;
+
+ /* If this is the first buffer of the received packet, set the
+ * pointer to the first mbuf of the packet and initialize its
+ * context. Otherwise, update the total length and the number
+ * of segments of the current scattered packet, and update the
+ * pointer to the last mbuf of the current packet.
+ */
+ if (!first_seg) {
+ first_seg = rxm;
+ first_seg->nb_segs = 1;
+ first_seg->pkt_len = rx_packet_len;
+ } else {
+ first_seg->pkt_len =
+ (uint16_t)(first_seg->pkt_len +
+ rx_packet_len);
+ first_seg->nb_segs++;
+ last_seg->next = rxm;
+ }
+
+ /* If this is not the last buffer of the received packet,
+ * update the pointer to the last mbuf of the current scattered
+ * packet and continue to parse the RX ring.
+ */
+ if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_EOF_S))) {
+ last_seg = rxm;
+ continue;
+ }
+
+ /* This is the last buffer of the received packet. If the CRC
+ * is not stripped by the hardware:
+ * - Subtract the CRC length from the total packet length.
+ * - If the last buffer only contains the whole CRC or a part
+ * of it, free the mbuf associated to the last buffer. If part
+ * of the CRC is also contained in the previous mbuf, subtract
+ * the length of that CRC part from the data length of the
+ * previous mbuf.
+ */
+ rxm->next = NULL;
+ if (unlikely(rxq->crc_len > 0)) {
+ first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
+ if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
+ rte_pktmbuf_free_seg(rxm);
+ first_seg->nb_segs--;
+ last_seg->data_len =
+ (uint16_t)(last_seg->data_len -
+ (RTE_ETHER_CRC_LEN - rx_packet_len));
+ last_seg->next = NULL;
+ } else {
+ rxm->data_len = (uint16_t)(rx_packet_len -
+ RTE_ETHER_CRC_LEN);
+ }
+ }
+
+ first_seg->port = rxq->port_id;
+ first_seg->ol_flags = 0;
+ first_seg->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
+ rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
+ iavf_flex_rxd_to_vlan_tci(first_seg, &rxd);
+#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
+ iavf_rxd_to_pkt_fields(first_seg, &rxd);
+#endif
+ pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
+
+ first_seg->ol_flags |= pkt_flags;
+
+ /* Prefetch data of first segment, if configured to do so. */
+ rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
+ first_seg->data_off));
+ rx_pkts[nb_rx++] = first_seg;
+ first_seg = NULL;
+ }
+
+ /* Record index of the next RX descriptor to probe. */
+ rxq->rx_tail = rx_id;
+ rxq->pkt_first_seg = first_seg;
+ rxq->pkt_last_seg = last_seg;
+
+ iavf_update_rx_tail(rxq, nb_hold, rx_id);