IAVF_RX_DESC_FLTSTAT_RSS_HASH) ==
IAVF_RX_DESC_FLTSTAT_RSS_HASH) ? PKT_RX_RSS_HASH : 0;
+ /* Check if FDIR Match */
+ flags |= (qword & (1 << IAVF_RX_DESC_STATUS_FLM_SHIFT) ?
+ PKT_RX_FDIR : 0);
+
if (likely((error_bits & IAVF_RX_ERR_BITS) == 0)) {
flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
return flags;
return flags;
}
+static inline uint64_t
+iavf_rxd_build_fdir(volatile union iavf_rx_desc *rxdp, struct rte_mbuf *mb)
+{
+ uint64_t flags = 0;
#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
+ uint16_t flexbh;
+
+ flexbh = (rte_le_to_cpu_32(rxdp->wb.qword2.ext_status) >>
+ IAVF_RX_DESC_EXT_STATUS_FLEXBH_SHIFT) &
+ IAVF_RX_DESC_EXT_STATUS_FLEXBH_MASK;
+
+ if (flexbh == IAVF_RX_DESC_EXT_STATUS_FLEXBH_FD_ID) {
+ mb->hash.fdir.hi =
+ rte_le_to_cpu_32(rxdp->wb.qword3.hi_dword.fd_id);
+ flags |= PKT_RX_FDIR_ID;
+ }
+#else
+ mb->hash.fdir.hi =
+ rte_le_to_cpu_32(rxdp->wb.qword0.hi_dword.fd_id);
+ flags |= PKT_RX_FDIR_ID;
+#endif
+ return flags;
+}
+
+
/* Translate the rx flex descriptor status to pkt flags */
static inline void
iavf_rxd_to_pkt_fields(struct rte_mbuf *mb,
{
volatile struct iavf_32b_rx_flex_desc_comms_ovs *desc =
(volatile struct iavf_32b_rx_flex_desc_comms_ovs *)rxdp;
+#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
uint16_t stat_err;
stat_err = rte_le_to_cpu_16(desc->status_error0);
mb->ol_flags |= PKT_RX_RSS_HASH;
mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
}
-}
#endif
+ if (desc->flow_id != 0xFFFFFFFF) {
+ mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+ mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
+ }
+}
+
#define IAVF_RX_FLEX_ERR0_BITS \
((1 << IAVF_RX_FLEX_DESC_STATUS0_HBO_S) | \
(1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | \
rte_prefetch0(rxq->sw_ring[rx_id]);
}
rxm = rxe;
- rxe = nmb;
dma_addr =
rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
rxdp->read.hdr_addr = 0;
rxm->hash.rss =
rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
+ if (pkt_flags & PKT_RX_FDIR)
+ pkt_flags |= iavf_rxd_build_fdir(&rxd, rxm);
+
rxm->ol_flags |= pkt_flags;
rx_pkts[nb_rx++] = rxm;
rte_prefetch0(rxq->sw_ring[rx_id]);
}
rxm = rxe;
- rxe = nmb;
dma_addr =
rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
rxdp->read.hdr_addr = 0;
rxm->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
iavf_flex_rxd_to_vlan_tci(rxm, &rxd);
-#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
iavf_rxd_to_pkt_fields(rxm, &rxd);
-#endif
pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
rxm->ol_flags |= pkt_flags;
}
rxm = rxe;
- rxe = nmb;
dma_addr =
rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
first_seg->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
iavf_flex_rxd_to_vlan_tci(first_seg, &rxd);
-#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
iavf_rxd_to_pkt_fields(first_seg, &rxd);
-#endif
pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
first_seg->ol_flags |= pkt_flags;
}
rxm = rxe;
- rxe = nmb;
dma_addr =
rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
first_seg->hash.rss =
rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
+ if (pkt_flags & PKT_RX_FDIR)
+ pkt_flags |= iavf_rxd_build_fdir(&rxd, first_seg);
+
first_seg->ol_flags |= pkt_flags;
/* Prefetch data of first segment, if configured to do so. */
mb->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j]);
-#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
iavf_rxd_to_pkt_fields(mb, &rxdp[j]);
-#endif
stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
mb->hash.rss = rte_le_to_cpu_32(
rxdp[j].wb.qword0.hi_dword.rss);
+ if (pkt_flags & PKT_RX_FDIR)
+ pkt_flags |= iavf_rxd_build_fdir(&rxdp[j], mb);
+
mb->ol_flags |= pkt_flags;
}
tx_offload.l3_len = tx_pkt->l3_len;
tx_offload.l4_len = tx_pkt->l4_len;
tx_offload.tso_segsz = tx_pkt->tso_segsz;
-
/* Calculate the number of context descriptors needed. */
nb_ctx = iavf_calc_context_desc(ol_flags);
dev->tx_pkt_prepare = iavf_prep_pkts;
}
+static int
+iavf_tx_done_cleanup_full(struct iavf_tx_queue *txq,
+ uint32_t free_cnt)
+{
+ struct iavf_tx_entry *swr_ring = txq->sw_ring;
+ uint16_t i, tx_last, tx_id;
+ uint16_t nb_tx_free_last;
+ uint16_t nb_tx_to_clean;
+ uint32_t pkt_cnt;
+
+ /* Start free mbuf from the next of tx_tail */
+ tx_last = txq->tx_tail;
+ tx_id = swr_ring[tx_last].next_id;
+
+ if (txq->nb_free == 0 && iavf_xmit_cleanup(txq))
+ return 0;
+
+ nb_tx_to_clean = txq->nb_free;
+ nb_tx_free_last = txq->nb_free;
+ if (!free_cnt)
+ free_cnt = txq->nb_tx_desc;
+
+ /* Loop through swr_ring to count the amount of
+ * freeable mubfs and packets.
+ */
+ for (pkt_cnt = 0; pkt_cnt < free_cnt; ) {
+ for (i = 0; i < nb_tx_to_clean &&
+ pkt_cnt < free_cnt &&
+ tx_id != tx_last; i++) {
+ if (swr_ring[tx_id].mbuf != NULL) {
+ rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf);
+ swr_ring[tx_id].mbuf = NULL;
+
+ /*
+ * last segment in the packet,
+ * increment packet count
+ */
+ pkt_cnt += (swr_ring[tx_id].last_id == tx_id);
+ }
+
+ tx_id = swr_ring[tx_id].next_id;
+ }
+
+ if (txq->rs_thresh > txq->nb_tx_desc -
+ txq->nb_free || tx_id == tx_last)
+ break;
+
+ if (pkt_cnt < free_cnt) {
+ if (iavf_xmit_cleanup(txq))
+ break;
+
+ nb_tx_to_clean = txq->nb_free - nb_tx_free_last;
+ nb_tx_free_last = txq->nb_free;
+ }
+ }
+
+ return (int)pkt_cnt;
+}
+
+int
+iavf_dev_tx_done_cleanup(void *txq, uint32_t free_cnt)
+{
+ struct iavf_tx_queue *q = (struct iavf_tx_queue *)txq;
+
+ return iavf_tx_done_cleanup_full(q, free_cnt);
+}
+
void
iavf_dev_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
struct rte_eth_rxq_info *qinfo)