The spinlock was trying to protect scenarios where rx_queue stop/start
could be initiated dynamically. Assigning bnxt_dummy_recv_pkts and
bnxt_dummy_xmit_pkts immediately to avoid concurrent access of mbuf in Rx
and cleanup path should help achieve the same result.
Fixes:
14255b351537 ("net/bnxt: fix queue start/stop operations")
Reviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
Reviewed-by: Somnath Kotur <somnath.kotur@broadcom.com>
Signed-off-by: Rahul Gupta <rahul.gupta@broadcom.com>
uint16_t bnxt_rss_hash_tbl_size(const struct bnxt *bp);
int bnxt_link_update_op(struct rte_eth_dev *eth_dev,
int wait_to_complete);
+uint16_t bnxt_dummy_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+uint16_t bnxt_dummy_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
extern const struct rte_flow_ops bnxt_flow_ops;
PMD_DRV_LOG(INFO, "Port conn async event\n");
break;
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY:
+ /*
+ * Avoid any rx/tx packet processing during firmware reset
+ * operation.
+ */
+ bnxt_stop_rxtx(bp);
+
/* Ignore reset notify async events when stopping the port */
if (!bp->eth_dev->data->dev_started) {
bp->flags |= BNXT_FLAG_FATAL_ERROR;
return false;
}
+
+void bnxt_stop_rxtx(struct bnxt *bp)
+{
+ bp->eth_dev->rx_pkt_burst = &bnxt_dummy_recv_pkts;
+ bp->eth_dev->tx_pkt_burst = &bnxt_dummy_xmit_pkts;
+}
bool bnxt_is_recovery_enabled(struct bnxt *bp);
bool bnxt_is_master_func(struct bnxt *bp);
+void bnxt_stop_rxtx(struct bnxt *bp);
#endif
if (!rxq || !rxq->rx_ring)
return;
- rte_spinlock_lock(&rxq->lock);
-
sw_ring = rxq->rx_ring->rx_buf_ring;
if (sw_ring) {
for (i = 0;
}
}
- rte_spinlock_unlock(&rxq->lock);
}
void bnxt_free_rx_mbufs(struct bnxt *bp)
rxq->rx_started = true;
}
eth_dev->data->rx_queue_state[queue_idx] = queue_state;
- rte_spinlock_init(&rxq->lock);
/* Configure mtu if it is different from what was configured before */
if (!queue_idx)
struct bnxt_rx_ring_info;
struct bnxt_cp_ring_info;
struct bnxt_rx_queue {
- rte_spinlock_t lock; /* Synchronize between rx_queue_stop
- * and fast path
- */
struct rte_mempool *mb_pool; /* mbuf pool for RX ring */
uint64_t mbuf_initializer; /* val to init mbuf */
uint16_t nb_rx_desc; /* num of RX desc */
return 0;
/* If Rx Q was stopped return */
- if (unlikely(!rxq->rx_started ||
- !rte_spinlock_trylock(&rxq->lock)))
+ if (unlikely(!rxq->rx_started))
return 0;
#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
}
done:
- rte_spinlock_unlock(&rxq->lock);
-
return nb_rx_pkts;
}
uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
-uint16_t bnxt_dummy_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
- uint16_t nb_pkts);
void bnxt_free_rx_rings(struct bnxt *bp);
int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id);
int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq);
int bnxt_init_tx_ring_struct(struct bnxt_tx_queue *txq, unsigned int socket_id);
uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
-uint16_t bnxt_dummy_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts);
#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
uint16_t bnxt_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);