net/ice: cleanup Tx buffers
authorChenxu Di <chenxux.di@intel.com>
Mon, 13 Jan 2020 09:57:06 +0000 (09:57 +0000)
committerFerruh Yigit <ferruh.yigit@intel.com>
Fri, 17 Jan 2020 18:46:26 +0000 (19:46 +0100)
Add support to the ice driver for the API rte_eth_tx_done_cleanup
to force free consumed buffers on Tx ring.

Signed-off-by: Chenxu Di <chenxux.di@intel.com>
Acked-by: Qiming Yang <qiming.yang@intel.com>
drivers/net/ice/ice_ethdev.c
drivers/net/ice/ice_rxtx.c
drivers/net/ice/ice_rxtx.h

index 6b344cb..f99eb4e 100644 (file)
@@ -225,6 +225,7 @@ static const struct eth_dev_ops ice_eth_dev_ops = {
        .filter_ctrl                  = ice_dev_filter_ctrl,
        .udp_tunnel_port_add          = ice_dev_udp_tunnel_port_add,
        .udp_tunnel_port_del          = ice_dev_udp_tunnel_port_del,
+       .tx_done_cleanup              = ice_tx_done_cleanup,
 };
 
 /* store statistics names and its offset in stats structure */
index b4f5367..5f4fc42 100644 (file)
@@ -2643,6 +2643,116 @@ ice_tx_free_bufs(struct ice_tx_queue *txq)
        return txq->tx_rs_thresh;
 }
 
+static int
+ice_tx_done_cleanup_full(struct ice_tx_queue *txq,
+                       uint32_t free_cnt)
+{
+       struct ice_tx_entry *swr_ring = txq->sw_ring;
+       uint16_t i, tx_last, tx_id;
+       uint16_t nb_tx_free_last;
+       uint16_t nb_tx_to_clean;
+       uint32_t pkt_cnt;
+
+       /* Start free mbuf from the next of tx_tail */
+       tx_last = txq->tx_tail;
+       tx_id  = swr_ring[tx_last].next_id;
+
+       if (txq->nb_tx_free == 0 && ice_xmit_cleanup(txq))
+               return 0;
+
+       nb_tx_to_clean = txq->nb_tx_free;
+       nb_tx_free_last = txq->nb_tx_free;
+       if (!free_cnt)
+               free_cnt = txq->nb_tx_desc;
+
+       /* Loop through swr_ring to count the amount of
+        * freeable mubfs and packets.
+        */
+       for (pkt_cnt = 0; pkt_cnt < free_cnt; ) {
+               for (i = 0; i < nb_tx_to_clean &&
+                       pkt_cnt < free_cnt &&
+                       tx_id != tx_last; i++) {
+                       if (swr_ring[tx_id].mbuf != NULL) {
+                               rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf);
+                               swr_ring[tx_id].mbuf = NULL;
+
+                               /*
+                                * last segment in the packet,
+                                * increment packet count
+                                */
+                               pkt_cnt += (swr_ring[tx_id].last_id == tx_id);
+                       }
+
+                       tx_id = swr_ring[tx_id].next_id;
+               }
+
+               if (txq->tx_rs_thresh > txq->nb_tx_desc -
+                       txq->nb_tx_free || tx_id == tx_last)
+                       break;
+
+               if (pkt_cnt < free_cnt) {
+                       if (ice_xmit_cleanup(txq))
+                               break;
+
+                       nb_tx_to_clean = txq->nb_tx_free - nb_tx_free_last;
+                       nb_tx_free_last = txq->nb_tx_free;
+               }
+       }
+
+       return (int)pkt_cnt;
+}
+
+#ifdef RTE_ARCH_X86
+static int
+ice_tx_done_cleanup_vec(struct ice_tx_queue *txq __rte_unused,
+                       uint32_t free_cnt __rte_unused)
+{
+       return -ENOTSUP;
+}
+#endif
+
+static int
+ice_tx_done_cleanup_simple(struct ice_tx_queue *txq,
+                       uint32_t free_cnt)
+{
+       int i, n, cnt;
+
+       if (free_cnt == 0 || free_cnt > txq->nb_tx_desc)
+               free_cnt = txq->nb_tx_desc;
+
+       cnt = free_cnt - free_cnt % txq->tx_rs_thresh;
+
+       for (i = 0; i < cnt; i += n) {
+               if (txq->nb_tx_desc - txq->nb_tx_free < txq->tx_rs_thresh)
+                       break;
+
+               n = ice_tx_free_bufs(txq);
+
+               if (n == 0)
+                       break;
+       }
+
+       return i;
+}
+
+int
+ice_tx_done_cleanup(void *txq, uint32_t free_cnt)
+{
+       struct ice_tx_queue *q = (struct ice_tx_queue *)txq;
+       struct rte_eth_dev *dev = &rte_eth_devices[q->port_id];
+       struct ice_adapter *ad =
+               ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+#ifdef RTE_ARCH_X86
+       if (ad->tx_vec_allowed)
+               return ice_tx_done_cleanup_vec(q, free_cnt);
+#endif
+       if (ad->tx_simple_allowed)
+               return ice_tx_done_cleanup_simple(q, free_cnt);
+       else
+               return ice_tx_done_cleanup_full(q, free_cnt);
+}
+
 /* Populate 4 descriptors with data from 4 mbufs */
 static inline void
 tx4(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkts)
index 9e3d2cd..0946ee6 100644 (file)
@@ -202,4 +202,6 @@ uint16_t ice_recv_scattered_pkts_vec_avx2(void *rx_queue,
 uint16_t ice_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
                                uint16_t nb_pkts);
 int ice_fdir_programming(struct ice_pf *pf, struct ice_fltr_desc *fdir_desc);
+int ice_tx_done_cleanup(void *txq, uint32_t free_cnt);
+
 #endif /* _ICE_RXTX_H_ */