From: Hyong Youb Kim Date: Thu, 26 Jul 2018 02:30:28 +0000 (-0700) Subject: net/enic: revert mbuf fast free offload X-Git-Url: http://git.droids-corp.org/?a=commitdiff_plain;h=d16623dd393de3788abd619b80c20d2bbfffb4d4;p=dpdk.git net/enic: revert mbuf fast free offload This reverts the patch that enabled mbuf fast free. There are two main reasons. First, enic_fast_free_wq_bufs is broken. When DEV_TX_OFFLOAD_MBUF_FAST_FREE is enabled, the driver calls this function to free transmitted mbufs. This function currently does not reset next and nb_segs. This is simply wrong as the fast-free flag does not imply anything about next and nb_segs. We could fix enic_fast_free_wq_bufs by making it to call rte_pktmbuf_prefree_seg to reset the required fields. But, it negates most of cycle saving. Second, there are customer applications that blindly enable all Tx offloads supported by the device. Some of these applications do not satisfy the requirements of mbuf fast free (i.e. a single pool per queue and refcnt = 1), and end up crashing or behaving badly. Fixes: bcaa54c1a148 ("net/enic: support mbuf fast free offload") Signed-off-by: Hyong Youb Kim Reviewed-by: John Daley --- diff --git a/doc/guides/nics/features/enic.ini b/doc/guides/nics/features/enic.ini index d2b866b664..8a4bad29fa 100644 --- a/doc/guides/nics/features/enic.ini +++ b/doc/guides/nics/features/enic.ini @@ -6,7 +6,6 @@ [Features] Link status = Y Link status event = Y -Fast mbuf free = Y Rx interrupt = Y Queue start/stop = Y MTU update = Y diff --git a/doc/guides/rel_notes/release_18_08.rst b/doc/guides/rel_notes/release_18_08.rst index 5f24014010..607845f172 100644 --- a/doc/guides/rel_notes/release_18_08.rst +++ b/doc/guides/rel_notes/release_18_08.rst @@ -70,8 +70,7 @@ New Features * **Updated the enic driver.** - * Add support for mbuf fast free offload. - * Add low cycle count Tx handler for no-offload Tx (except mbuf fast free). + * Add low cycle count Tx handler for no-offload Tx. * Add low cycle count Rx handler for non-scattered Rx. * Minor performance improvements to scattered Rx handler. * Add handlers to add/delete VxLAN port number. diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c index f04dc08782..d9ce602ba1 100644 --- a/drivers/net/enic/enic_main.c +++ b/drivers/net/enic/enic_main.c @@ -572,11 +572,10 @@ int enic_enable(struct enic *enic) } /* - * Use the simple TX handler if possible. All offloads must be disabled - * except mbuf fast free. + * Use the simple TX handler if possible. All offloads must be + * disabled. */ - if ((eth_dev->data->dev_conf.txmode.offloads & - ~DEV_TX_OFFLOAD_MBUF_FAST_FREE) == 0) { + if (eth_dev->data->dev_conf.txmode.offloads == 0) { PMD_INIT_LOG(DEBUG, " use the simple tx handler"); eth_dev->tx_pkt_burst = &enic_simple_xmit_pkts; for (index = 0; index < enic->wq_count; index++) diff --git a/drivers/net/enic/enic_res.c b/drivers/net/enic/enic_res.c index 11d66a6260..8d493ffed1 100644 --- a/drivers/net/enic/enic_res.c +++ b/drivers/net/enic/enic_res.c @@ -183,7 +183,7 @@ int enic_get_vnic_config(struct enic *enic) * Default hardware capabilities. enic_dev_init() may add additional * flags if it enables overlay offloads. */ - enic->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE; + enic->tx_queue_offload_capa = 0; enic->tx_offload_capa = enic->tx_queue_offload_capa | DEV_TX_OFFLOAD_MULTI_SEGS | diff --git a/drivers/net/enic/enic_rxtx.c b/drivers/net/enic/enic_rxtx.c index 7ae03f31be..7129e1217c 100644 --- a/drivers/net/enic/enic_rxtx.c +++ b/drivers/net/enic/enic_rxtx.c @@ -588,31 +588,6 @@ enic_noscatter_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, return rx - rx_pkts; } -static void enic_fast_free_wq_bufs(struct vnic_wq *wq, u16 completed_index) -{ - unsigned int desc_count, n, nb_to_free, tail_idx; - struct rte_mempool *pool; - struct rte_mbuf **m; - - desc_count = wq->ring.desc_count; - nb_to_free = enic_ring_sub(desc_count, wq->tail_idx, completed_index) - + 1; - tail_idx = wq->tail_idx; - wq->tail_idx += nb_to_free; - wq->ring.desc_avail += nb_to_free; - if (wq->tail_idx >= desc_count) - wq->tail_idx -= desc_count; - /* First, free at most until the end of ring */ - m = &wq->bufs[tail_idx]; - pool = (*m)->pool; - n = RTE_MIN(nb_to_free, desc_count - tail_idx); - rte_mempool_put_bulk(pool, (void **)m, n); - n = nb_to_free - n; - /* Then wrap and free the rest */ - if (unlikely(n)) - rte_mempool_put_bulk(pool, (void **)wq->bufs, n); -} - static inline void enic_free_wq_bufs(struct vnic_wq *wq, u16 completed_index) { struct rte_mbuf *buf; @@ -660,10 +635,7 @@ unsigned int enic_cleanup_wq(__rte_unused struct enic *enic, struct vnic_wq *wq) completed_index = *((uint32_t *)wq->cqmsg_rz->addr) & 0xffff; if (wq->last_completed_index != completed_index) { - if (wq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) - enic_fast_free_wq_bufs(wq, completed_index); - else - enic_free_wq_bufs(wq, completed_index); + enic_free_wq_bufs(wq, completed_index); wq->last_completed_index = completed_index; } return 0;