]> git.droids-corp.org - dpdk.git/commitdiff
net/enic: revert mbuf fast free offload
authorHyong Youb Kim <hyonkim@cisco.com>
Thu, 26 Jul 2018 02:30:28 +0000 (19:30 -0700)
committerThomas Monjalon <thomas@monjalon.net>
Thu, 2 Aug 2018 08:26:02 +0000 (10:26 +0200)
This reverts the patch that enabled mbuf fast free.

There are two main reasons.

First, enic_fast_free_wq_bufs is broken. When
DEV_TX_OFFLOAD_MBUF_FAST_FREE is enabled, the driver calls this
function to free transmitted mbufs. This function currently does not
reset next and nb_segs. This is simply wrong as the fast-free flag
does not imply anything about next and nb_segs.

We could fix enic_fast_free_wq_bufs by making it to call
rte_pktmbuf_prefree_seg to reset the required fields. But, it negates
most of cycle saving.

Second, there are customer applications that blindly enable all Tx
offloads supported by the device. Some of these applications do not
satisfy the requirements of mbuf fast free (i.e. a single pool per
queue and refcnt = 1), and end up crashing or behaving badly.

Fixes: bcaa54c1a148 ("net/enic: support mbuf fast free offload")
Signed-off-by: Hyong Youb Kim <hyonkim@cisco.com>
Reviewed-by: John Daley <johndale@cisco.com>
doc/guides/nics/features/enic.ini
doc/guides/rel_notes/release_18_08.rst
drivers/net/enic/enic_main.c
drivers/net/enic/enic_res.c
drivers/net/enic/enic_rxtx.c

index d2b866b6646e0b3ee9e7c6d4a6b6a33ec87ff89d..8a4bad29fa25623189adfb17c61d43ed8516ef62 100644 (file)
@@ -6,7 +6,6 @@
 [Features]
 Link status          = Y
 Link status event    = Y
-Fast mbuf free       = Y
 Rx interrupt         = Y
 Queue start/stop     = Y
 MTU update           = Y
index 5f240140103fac98a21d0492fdb378e9a06242ba..607845f1722e69be17c9c13a8c9464dd48678a68 100644 (file)
@@ -70,8 +70,7 @@ New Features
 
 * **Updated the enic driver.**
 
-  * Add support for mbuf fast free offload.
-  * Add low cycle count Tx handler for no-offload Tx (except mbuf fast free).
+  * Add low cycle count Tx handler for no-offload Tx.
   * Add low cycle count Rx handler for non-scattered Rx.
   * Minor performance improvements to scattered Rx handler.
   * Add handlers to add/delete VxLAN port number.
index f04dc087822345dc640988822672b3d1ece6deb3..d9ce602ba1e659406967dceb8b6c56acdc895601 100644 (file)
@@ -572,11 +572,10 @@ int enic_enable(struct enic *enic)
        }
 
        /*
-        * Use the simple TX handler if possible. All offloads must be disabled
-        * except mbuf fast free.
+        * Use the simple TX handler if possible. All offloads must be
+        * disabled.
         */
-       if ((eth_dev->data->dev_conf.txmode.offloads &
-            ~DEV_TX_OFFLOAD_MBUF_FAST_FREE) == 0) {
+       if (eth_dev->data->dev_conf.txmode.offloads == 0) {
                PMD_INIT_LOG(DEBUG, " use the simple tx handler");
                eth_dev->tx_pkt_burst = &enic_simple_xmit_pkts;
                for (index = 0; index < enic->wq_count; index++)
index 11d66a626081685d208b27937083d905c2fe1ccf..8d493ffed1f0278353e91885dccd878975e7eb74 100644 (file)
@@ -183,7 +183,7 @@ int enic_get_vnic_config(struct enic *enic)
         * Default hardware capabilities. enic_dev_init() may add additional
         * flags if it enables overlay offloads.
         */
-       enic->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+       enic->tx_queue_offload_capa = 0;
        enic->tx_offload_capa =
                enic->tx_queue_offload_capa |
                DEV_TX_OFFLOAD_MULTI_SEGS |
index 7ae03f31be5dbdc6514c8c5517e9f273aaaacb97..7129e1217cf117956e94d8a5adbf909f48964f03 100644 (file)
@@ -588,31 +588,6 @@ enic_noscatter_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
        return rx - rx_pkts;
 }
 
-static void enic_fast_free_wq_bufs(struct vnic_wq *wq, u16 completed_index)
-{
-       unsigned int desc_count, n, nb_to_free, tail_idx;
-       struct rte_mempool *pool;
-       struct rte_mbuf **m;
-
-       desc_count = wq->ring.desc_count;
-       nb_to_free = enic_ring_sub(desc_count, wq->tail_idx, completed_index)
-                                  + 1;
-       tail_idx = wq->tail_idx;
-       wq->tail_idx += nb_to_free;
-       wq->ring.desc_avail += nb_to_free;
-       if (wq->tail_idx >= desc_count)
-               wq->tail_idx -= desc_count;
-       /* First, free at most until the end of ring */
-       m = &wq->bufs[tail_idx];
-       pool = (*m)->pool;
-       n = RTE_MIN(nb_to_free, desc_count - tail_idx);
-       rte_mempool_put_bulk(pool, (void **)m, n);
-       n = nb_to_free - n;
-       /* Then wrap and free the rest */
-       if (unlikely(n))
-               rte_mempool_put_bulk(pool, (void **)wq->bufs, n);
-}
-
 static inline void enic_free_wq_bufs(struct vnic_wq *wq, u16 completed_index)
 {
        struct rte_mbuf *buf;
@@ -660,10 +635,7 @@ unsigned int enic_cleanup_wq(__rte_unused struct enic *enic, struct vnic_wq *wq)
        completed_index = *((uint32_t *)wq->cqmsg_rz->addr) & 0xffff;
 
        if (wq->last_completed_index != completed_index) {
-               if (wq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
-                       enic_fast_free_wq_bufs(wq, completed_index);
-               else
-                       enic_free_wq_bufs(wq, completed_index);
+               enic_free_wq_bufs(wq, completed_index);
                wq->last_completed_index = completed_index;
        }
        return 0;