net/bnxt: check flush status during ring free
authorAjit Khaparde <ajit.khaparde@broadcom.com>
Thu, 4 Mar 2021 09:07:26 +0000 (14:37 +0530)
committerAjit Khaparde <ajit.khaparde@broadcom.com>
Fri, 12 Mar 2021 06:13:27 +0000 (07:13 +0100)
When host SW issues a HWRM_RING_FREE for Tx/Rx/AGG ring in HW,
the FW flushes the BDs associated with the ring and performs other
cleanup in the HW. The host software should ideally check for an
indication from the FW indicating this step has been completed
successfully to avoid unexpected errors during cleanup.

The FW issues a HWRM_DONE response to the RING_FREE request on
the corresponding CQ ring. Poll the CQs during cleanup and
ensure the HWRM_FREE command is completed not just based on the
value of valid bit but also the HWRM_DONE response for the ring.

If the HWRM_DONE response is not seen, force the cleanup to
complete just based on the valid bit.

Signed-off-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
Reviewed-by: Somnath Kotur <somnath.kotur@broadcom.com>
drivers/net/bnxt/bnxt_hwrm.c
drivers/net/bnxt/bnxt_hwrm.h
drivers/net/bnxt/bnxt_rxr.c
drivers/net/bnxt/bnxt_rxr.h
drivers/net/bnxt/bnxt_txr.c
drivers/net/bnxt/bnxt_txr.h

index 0b5318e..02b0a21 100644 (file)
@@ -75,6 +75,82 @@ static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem,
        }
 }
 
+static struct bnxt_cp_ring_info*
+bnxt_get_ring_info_by_id(struct bnxt *bp, uint16_t rid, uint16_t type)
+{
+       struct bnxt_cp_ring_info *cp_ring = NULL;
+       uint16_t i;
+
+       switch (type) {
+       case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
+       case HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG:
+               /* FALLTHROUGH */
+               for (i = 0; i < bp->rx_cp_nr_rings; i++) {
+                       struct bnxt_rx_queue *rxq = bp->rx_queues[i];
+
+                       if (rxq->cp_ring->cp_ring_struct->fw_ring_id ==
+                           rte_cpu_to_le_16(rid)) {
+                               return rxq->cp_ring;
+                       }
+               }
+               break;
+       case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
+               for (i = 0; i < bp->tx_cp_nr_rings; i++) {
+                       struct bnxt_tx_queue *txq = bp->tx_queues[i];
+
+                       if (txq->cp_ring->cp_ring_struct->fw_ring_id ==
+                           rte_cpu_to_le_16(rid)) {
+                               return txq->cp_ring;
+                       }
+               }
+               break;
+       default:
+               return cp_ring;
+       }
+       return cp_ring;
+}
+
+/* Complete a sweep of the CQ ring for the corresponding Tx/Rx/AGG ring.
+ * If the CMPL_BASE_TYPE_HWRM_DONE is not encountered by the last pass,
+ * before timeout, we force the done bit for the cleanup to proceed.
+ * Also if cpr is null, do nothing.. The HWRM command is  not for a
+ * Tx/Rx/AGG ring cleanup.
+ */
+static int
+bnxt_check_cq_hwrm_done(struct bnxt_cp_ring_info *cpr,
+                       bool tx, bool rx, bool timeout)
+{
+       int done = 0;
+
+       if (cpr != NULL) {
+               if (tx)
+                       done = bnxt_flush_tx_cmp(cpr);
+
+               if (rx)
+                       done = bnxt_flush_rx_cmp(cpr);
+
+               if (done)
+                       PMD_DRV_LOG(DEBUG, "HWRM DONE for %s ring\n",
+                                   rx ? "Rx" : "Tx");
+
+               /* We are about to timeout and still haven't seen the
+                * HWRM done for the Ring free. Force the cleanup.
+                */
+               if (!done && timeout) {
+                       done = 1;
+                       PMD_DRV_LOG(DEBUG, "Timing out for %s ring\n",
+                                   rx ? "Rx" : "Tx");
+               }
+       } else {
+               /* This HWRM command is not for a Tx/Rx/AGG ring cleanup.
+                * Otherwise the cpr would have been valid. So do nothing.
+                */
+               done = 1;
+       }
+
+       return done;
+}
+
 /*
  * HWRM Functions (sent to HWRM)
  * These are named bnxt_hwrm_*() and return 0 on success or -110 if the
@@ -97,6 +173,9 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
                GRCPF_REG_KONG_CHANNEL_OFFSET : GRCPF_REG_CHIMP_CHANNEL_OFFSET;
        uint16_t mb_trigger_offset = use_kong_mb ?
                GRCPF_REG_KONG_COMM_TRIGGER : GRCPF_REG_CHIMP_COMM_TRIGGER;
+       struct bnxt_cp_ring_info *cpr = NULL;
+       bool is_rx = false;
+       bool is_tx = false;
        uint32_t timeout;
 
        /* Do not send HWRM commands to firmware in error state */
@@ -153,14 +232,42 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
         */
        rte_io_mb();
 
+       /* Check ring flush is done.
+        * This is valid only for Tx and Rx rings (including AGG rings).
+        * The Tx and Rx rings should be freed once the HW confirms all
+        * the internal buffers and BDs associated with the rings are
+        * consumed and the corresponding DMA is handled.
+        */
+       if (rte_cpu_to_le_16(req->cmpl_ring) != INVALID_HW_RING_ID) {
+               /* Check if the TxCQ matches. If that fails check if RxCQ
+                * matches. And if neither match, is_rx = false, is_tx = false.
+                */
+               cpr = bnxt_get_ring_info_by_id(bp, req->cmpl_ring,
+                                              HWRM_RING_FREE_INPUT_RING_TYPE_TX);
+               if (cpr == NULL) {
+                       /* Not a TxCQ. Check if the RxCQ matches. */
+                       cpr =
+                       bnxt_get_ring_info_by_id(bp, req->cmpl_ring,
+                                                HWRM_RING_FREE_INPUT_RING_TYPE_RX);
+                       if (cpr != NULL)
+                               is_rx = true;
+               } else {
+                       is_tx = true;
+               }
+       }
+
        /* Poll for the valid bit */
        for (i = 0; i < timeout; i++) {
+               int done;
+
+               done = bnxt_check_cq_hwrm_done(cpr, is_tx, is_rx,
+                                              i == timeout - 1);
                /* Sanity check on the resp->resp_len */
                rte_io_rmb();
                if (resp->resp_len && resp->resp_len <= bp->max_resp_len) {
                        /* Last byte of resp contains the valid key */
                        valid = (uint8_t *)resp + resp->resp_len - 1;
-                       if (*valid == HWRM_RESP_VALID_KEY)
+                       if (*valid == HWRM_RESP_VALID_KEY && done)
                                break;
                }
                rte_delay_us(1);
@@ -1672,7 +1779,8 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp,
 }
 
 int bnxt_hwrm_ring_free(struct bnxt *bp,
-                       struct bnxt_ring *ring, uint32_t ring_type)
+                       struct bnxt_ring *ring, uint32_t ring_type,
+                       uint16_t cp_ring_id)
 {
        int rc;
        struct hwrm_ring_free_input req = {.req_type = 0 };
@@ -1682,6 +1790,7 @@ int bnxt_hwrm_ring_free(struct bnxt *bp,
 
        req.ring_type = ring_type;
        req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
+       req.cmpl_ring = rte_cpu_to_le_16(cp_ring_id);
 
        rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
 
@@ -2541,7 +2650,8 @@ void bnxt_free_nq_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
        struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
 
        bnxt_hwrm_ring_free(bp, cp_ring,
-                           HWRM_RING_FREE_INPUT_RING_TYPE_NQ);
+                           HWRM_RING_FREE_INPUT_RING_TYPE_NQ,
+                           INVALID_HW_RING_ID);
        cp_ring->fw_ring_id = INVALID_HW_RING_ID;
        memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
                                     sizeof(*cpr->cp_desc_ring));
@@ -2554,7 +2664,8 @@ void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
        struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
 
        bnxt_hwrm_ring_free(bp, cp_ring,
-                       HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
+                       HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL,
+                       INVALID_HW_RING_ID);
        cp_ring->fw_ring_id = INVALID_HW_RING_ID;
        memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
                        sizeof(*cpr->cp_desc_ring));
@@ -2571,7 +2682,8 @@ void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index)
 
        if (ring->fw_ring_id != INVALID_HW_RING_ID) {
                bnxt_hwrm_ring_free(bp, ring,
-                                   HWRM_RING_FREE_INPUT_RING_TYPE_RX);
+                                   HWRM_RING_FREE_INPUT_RING_TYPE_RX,
+                                   cpr->cp_ring_struct->fw_ring_id);
                ring->fw_ring_id = INVALID_HW_RING_ID;
                if (BNXT_HAS_RING_GRPS(bp))
                        bp->grp_info[queue_index].rx_fw_ring_id =
@@ -2582,7 +2694,8 @@ void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index)
                bnxt_hwrm_ring_free(bp, ring,
                                    BNXT_CHIP_P5(bp) ?
                                    HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG :
-                                   HWRM_RING_FREE_INPUT_RING_TYPE_RX);
+                                   HWRM_RING_FREE_INPUT_RING_TYPE_RX,
+                                   cpr->cp_ring_struct->fw_ring_id);
                if (BNXT_HAS_RING_GRPS(bp))
                        bp->grp_info[queue_index].ag_fw_ring_id =
                                                        INVALID_HW_RING_ID;
@@ -2607,7 +2720,8 @@ bnxt_free_all_hwrm_rings(struct bnxt *bp)
 
                if (ring->fw_ring_id != INVALID_HW_RING_ID) {
                        bnxt_hwrm_ring_free(bp, ring,
-                                       HWRM_RING_FREE_INPUT_RING_TYPE_TX);
+                                       HWRM_RING_FREE_INPUT_RING_TYPE_TX,
+                                       cpr->cp_ring_struct->fw_ring_id);
                        ring->fw_ring_id = INVALID_HW_RING_ID;
                        memset(txr->tx_desc_ring, 0,
                                        txr->tx_ring_struct->ring_size *
index 785e321..3d05acf 100644 (file)
@@ -162,7 +162,8 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp,
                         uint32_t stats_ctx_id, uint32_t cmpl_ring_id,
                         uint16_t tx_cosq_id);
 int bnxt_hwrm_ring_free(struct bnxt *bp,
-                       struct bnxt_ring *ring, uint32_t ring_type);
+                       struct bnxt_ring *ring, uint32_t ring_type,
+                       uint16_t cp_ring_id);
 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx);
 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx);
 
index 30d22b7..c72545a 100644 (file)
@@ -991,7 +991,9 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                                        cpr->cp_ring_struct->ring_mask,
                                        cpr->valid);
 
-               if ((CMP_TYPE(rxcmp) >= CMPL_BASE_TYPE_RX_TPA_START_V2) &&
+               if (CMP_TYPE(rxcmp) == CMPL_BASE_TYPE_HWRM_DONE) {
+                       PMD_DRV_LOG(ERR, "Rx flush done\n");
+               } else if ((CMP_TYPE(rxcmp) >= CMPL_BASE_TYPE_RX_TPA_START_V2) &&
                     (CMP_TYPE(rxcmp) <= RX_TPA_V2_ABUF_CMPL_TYPE_RX_TPA_AGG)) {
                        rc = bnxt_rx_pkt(&rx_pkts[nb_rx_pkts], rxq, &raw_cons);
                        if (!rc)
@@ -1291,3 +1293,35 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
 
        return 0;
 }
+
+/* Sweep the Rx completion queue till HWRM_DONE for ring flush is received.
+ * The mbufs will not be freed in this call.
+ * They will be freed during ring free as a part of mem cleanup.
+ */
+int bnxt_flush_rx_cmp(struct bnxt_cp_ring_info *cpr)
+{
+       struct bnxt_ring *cp_ring_struct = cpr->cp_ring_struct;
+       uint32_t ring_mask = cp_ring_struct->ring_mask;
+       uint32_t raw_cons = cpr->cp_raw_cons;
+       struct rx_pkt_cmpl *rxcmp;
+       uint32_t nb_rx = 0;
+       uint32_t cons;
+
+       do {
+               cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
+               rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
+
+               if (CMP_TYPE(rxcmp) == CMPL_BASE_TYPE_HWRM_DONE)
+                       return 1;
+
+               raw_cons = NEXT_RAW_CMP(raw_cons);
+               nb_rx++;
+       } while (nb_rx < ring_mask);
+
+       cpr->cp_raw_cons = raw_cons;
+
+       /* Ring the completion queue doorbell. */
+       bnxt_db_cq(cpr);
+
+       return 0;
+}
index 06d1084..a6fdd77 100644 (file)
@@ -100,6 +100,7 @@ int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id);
 int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq);
 int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
 int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+int bnxt_flush_rx_cmp(struct bnxt_cp_ring_info *cpr);
 
 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
 uint16_t bnxt_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
index 2810906..01db0cc 100644 (file)
@@ -569,3 +569,39 @@ int bnxt_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 
        return 0;
 }
+
+/* Sweep the Tx completion queue till HWRM_DONE for ring flush is received.
+ * The mbufs will not be freed in this call.
+ * They will be freed during ring free as a part of mem cleanup.
+ */
+int bnxt_flush_tx_cmp(struct bnxt_cp_ring_info *cpr)
+{
+       uint32_t raw_cons = cpr->cp_raw_cons;
+       uint32_t cons;
+       uint32_t nb_tx_pkts = 0;
+       struct tx_cmpl *txcmp;
+       struct cmpl_base *cp_desc_ring = cpr->cp_desc_ring;
+       struct bnxt_ring *cp_ring_struct = cpr->cp_ring_struct;
+       uint32_t ring_mask = cp_ring_struct->ring_mask;
+       uint32_t opaque = 0;
+
+       do {
+               cons = RING_CMPL(ring_mask, raw_cons);
+               txcmp = (struct tx_cmpl *)&cp_desc_ring[cons];
+
+               opaque = rte_cpu_to_le_32(txcmp->opaque);
+               raw_cons = NEXT_RAW_CMP(raw_cons);
+
+               if (CMP_TYPE(txcmp) == TX_CMPL_TYPE_TX_L2)
+                       nb_tx_pkts += opaque;
+               else if (CMP_TYPE(txcmp) == HWRM_CMPL_TYPE_HWRM_DONE)
+                       return 1;
+       } while (nb_tx_pkts < ring_mask);
+
+       if (nb_tx_pkts) {
+               cpr->cp_raw_cons = raw_cons;
+               bnxt_db_cq(cpr);
+       }
+
+       return 0;
+}
index 281a3e2..91e10db 100644 (file)
@@ -56,6 +56,7 @@ uint16_t bnxt_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
 
 int bnxt_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
 int bnxt_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+int bnxt_flush_tx_cmp(struct bnxt_cp_ring_info *cpr);
 
 #define PKT_TX_OIP_IIP_TCP_UDP_CKSUM   (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | \
                                        PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM)