net/bnxt: fix Rx performance by removing spinlock
authorRahul Gupta <rahul.gupta@broadcom.com>
Mon, 26 Oct 2020 03:56:16 +0000 (20:56 -0700)
committerFerruh Yigit <ferruh.yigit@intel.com>
Tue, 3 Nov 2020 22:35:03 +0000 (23:35 +0100)
The spinlock was trying to protect scenarios where rx_queue stop/start
could be initiated dynamically. Assigning bnxt_dummy_recv_pkts and
bnxt_dummy_xmit_pkts immediately to avoid concurrent access of mbuf in Rx
and cleanup path should help achieve the same result.

Fixes: 14255b351537 ("net/bnxt: fix queue start/stop operations")

Reviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
Reviewed-by: Somnath Kotur <somnath.kotur@broadcom.com>
Signed-off-by: Rahul Gupta <rahul.gupta@broadcom.com>
drivers/net/bnxt/bnxt.h
drivers/net/bnxt/bnxt_cpr.c
drivers/net/bnxt/bnxt_cpr.h
drivers/net/bnxt/bnxt_rxq.c
drivers/net/bnxt/bnxt_rxq.h
drivers/net/bnxt/bnxt_rxr.c
drivers/net/bnxt/bnxt_rxr.h
drivers/net/bnxt/bnxt_txr.h

index 5717819..90ced97 100644 (file)
@@ -890,6 +890,10 @@ void bnxt_print_link_info(struct rte_eth_dev *eth_dev);
 uint16_t bnxt_rss_hash_tbl_size(const struct bnxt *bp);
 int bnxt_link_update_op(struct rte_eth_dev *eth_dev,
                        int wait_to_complete);
+uint16_t bnxt_dummy_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+                             uint16_t nb_pkts);
+uint16_t bnxt_dummy_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+                             uint16_t nb_pkts);
 
 extern const struct rte_flow_ops bnxt_flow_ops;
 
index 91d1ffe..ee96ae8 100644 (file)
@@ -121,6 +121,12 @@ void bnxt_handle_async_event(struct bnxt *bp,
                PMD_DRV_LOG(INFO, "Port conn async event\n");
                break;
        case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY:
+               /*
+                * Avoid any rx/tx packet processing during firmware reset
+                * operation.
+                */
+               bnxt_stop_rxtx(bp);
+
                /* Ignore reset notify async events when stopping the port */
                if (!bp->eth_dev->data->dev_started) {
                        bp->flags |= BNXT_FLAG_FATAL_ERROR;
@@ -337,3 +343,9 @@ bool bnxt_is_recovery_enabled(struct bnxt *bp)
 
        return false;
 }
+
+void bnxt_stop_rxtx(struct bnxt *bp)
+{
+       bp->eth_dev->rx_pkt_burst = &bnxt_dummy_recv_pkts;
+       bp->eth_dev->tx_pkt_burst = &bnxt_dummy_xmit_pkts;
+}
index cccd6cd..ff9697f 100644 (file)
@@ -126,4 +126,5 @@ void bnxt_wait_for_device_shutdown(struct bnxt *bp);
 bool bnxt_is_recovery_enabled(struct bnxt *bp);
 bool bnxt_is_master_func(struct bnxt *bp);
 
+void bnxt_stop_rxtx(struct bnxt *bp);
 #endif
index 7851414..e0ec342 100644 (file)
@@ -210,8 +210,6 @@ void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq)
        if (!rxq || !rxq->rx_ring)
                return;
 
-       rte_spinlock_lock(&rxq->lock);
-
        sw_ring = rxq->rx_ring->rx_buf_ring;
        if (sw_ring) {
                for (i = 0;
@@ -248,7 +246,6 @@ void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq)
                }
        }
 
-       rte_spinlock_unlock(&rxq->lock);
 }
 
 void bnxt_free_rx_mbufs(struct bnxt *bp)
@@ -389,7 +386,6 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
                rxq->rx_started = true;
        }
        eth_dev->data->rx_queue_state[queue_idx] = queue_state;
-       rte_spinlock_init(&rxq->lock);
 
        /* Configure mtu if it is different from what was configured before */
        if (!queue_idx)
index 201bda2..c72105c 100644 (file)
@@ -16,9 +16,6 @@ struct bnxt;
 struct bnxt_rx_ring_info;
 struct bnxt_cp_ring_info;
 struct bnxt_rx_queue {
-       rte_spinlock_t          lock;   /* Synchronize between rx_queue_stop
-                                        * and fast path
-                                        */
        struct rte_mempool      *mb_pool; /* mbuf pool for RX ring */
        uint64_t                mbuf_initializer; /* val to init mbuf */
        uint16_t                nb_rx_desc; /* num of RX desc */
index 4d0ab60..e375b31 100644 (file)
@@ -843,8 +843,7 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                return 0;
 
        /* If Rx Q was stopped return */
-       if (unlikely(!rxq->rx_started ||
-                    !rte_spinlock_trylock(&rxq->lock)))
+       if (unlikely(!rxq->rx_started))
                return 0;
 
 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
@@ -946,8 +945,6 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
        }
 
 done:
-       rte_spinlock_unlock(&rxq->lock);
-
        return nb_rx_pkts;
 }
 
index f9e60d0..3fc901f 100644 (file)
@@ -77,8 +77,6 @@ struct bnxt_rx_ring_info {
 
 uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                               uint16_t nb_pkts);
-uint16_t bnxt_dummy_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
-                             uint16_t nb_pkts);
 void bnxt_free_rx_rings(struct bnxt *bp);
 int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id);
 int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq);
index d241227..3dfc8ef 100644 (file)
@@ -49,8 +49,6 @@ int bnxt_init_one_tx_ring(struct bnxt_tx_queue *txq);
 int bnxt_init_tx_ring_struct(struct bnxt_tx_queue *txq, unsigned int socket_id);
 uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                               uint16_t nb_pkts);
-uint16_t bnxt_dummy_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
-                             uint16_t nb_pkts);
 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
 uint16_t bnxt_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
                            uint16_t nb_pkts);