]> git.droids-corp.org - dpdk.git/commitdiff
net/bnxt: maintain mbuf alloc failure counter per queue
authorAjit Khaparde <ajit.khaparde@broadcom.com>
Tue, 17 Apr 2018 01:11:21 +0000 (18:11 -0700)
committerFerruh Yigit <ferruh.yigit@intel.com>
Fri, 27 Apr 2018 14:54:56 +0000 (15:54 +0100)
Currently we have a single counter for mbuf alloc failure.
Make it per RxQ instead.

Signed-off-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
drivers/net/bnxt/bnxt.h
drivers/net/bnxt/bnxt_ethdev.c
drivers/net/bnxt/bnxt_rxq.c
drivers/net/bnxt/bnxt_rxq.h
drivers/net/bnxt/bnxt_rxr.c
drivers/net/bnxt/bnxt_stats.c

index d3eab8d364d284e99e7327787150037838f9b534..bdca2622fc05afa0c3b95daf752103da0d721462 100644 (file)
@@ -295,7 +295,6 @@ struct bnxt {
        uint16_t                geneve_fw_dst_port_id;
        uint32_t                fw_ver;
        uint32_t                hwrm_spec_code;
-       rte_atomic64_t          rx_mbuf_alloc_fail;
 
        struct bnxt_led_info    leds[BNXT_MAX_LED];
        uint8_t                 num_leds;
index b7aab65ababbfb6bb56ef0237881bfa4b40c9392..3cf845089c8c5463e4cde3e99fcb7b1dfe043f42 100644 (file)
@@ -3129,7 +3129,6 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
 
        bp = eth_dev->data->dev_private;
 
-       rte_atomic64_init(&bp->rx_mbuf_alloc_fail);
        bp->dev_stopped = 1;
 
        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
index ce3f0a1d9108125d501417b57977207eb699bfd5..d797a47e99b9b101913d42ded463b228d11b3765 100644 (file)
@@ -336,6 +336,7 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
                rc = -ENOMEM;
                goto out;
        }
+       rte_atomic64_init(&rxq->rx_mbuf_alloc_fail);
 
 out:
        return rc;
index 616163e634c4dab65d46df256fdbc22d71cc7760..3350d77199e4f3bd4b8058d238bccfc3ed475972 100644 (file)
@@ -32,6 +32,7 @@ struct bnxt_rx_queue {
        uint32_t                        rx_buf_use_size;  /* useable size */
        struct bnxt_rx_ring_info        *rx_ring;
        struct bnxt_cp_ring_info        *cp_ring;
+       rte_atomic64_t          rx_mbuf_alloc_fail;
 };
 
 void bnxt_free_rxq_stats(struct bnxt_rx_queue *rxq);
index d9b4d768d695f7b8c791d789c6c24a974f05ef0f..4bc3204305179dd99792b86eded72c1f8a898361 100644 (file)
@@ -41,7 +41,7 @@ static inline int bnxt_alloc_rx_data(struct bnxt_rx_queue *rxq,
 
        mbuf = __bnxt_alloc_rx_data(rxq->mb_pool);
        if (!mbuf) {
-               rte_atomic64_inc(&rxq->bp->rx_mbuf_alloc_fail);
+               rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail);
                return -ENOMEM;
        }
 
@@ -62,7 +62,7 @@ static inline int bnxt_alloc_ag_data(struct bnxt_rx_queue *rxq,
 
        mbuf = __bnxt_alloc_rx_data(rxq->mb_pool);
        if (!mbuf) {
-               rte_atomic64_inc(&rxq->bp->rx_mbuf_alloc_fail);
+               rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail);
                return -ENOMEM;
        }
 
@@ -299,7 +299,7 @@ static inline struct rte_mbuf *bnxt_tpa_end(
        struct rte_mbuf *new_data = __bnxt_alloc_rx_data(rxq->mb_pool);
        RTE_ASSERT(new_data != NULL);
        if (!new_data) {
-               rte_atomic64_inc(&rxq->bp->rx_mbuf_alloc_fail);
+               rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail);
                return NULL;
        }
        tpa_info->mbuf = new_data;
@@ -767,7 +767,7 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
                        rxr->tpa_info[i].mbuf =
                                __bnxt_alloc_rx_data(rxq->mb_pool);
                        if (!rxr->tpa_info[i].mbuf) {
-                               rte_atomic64_inc(&rxq->bp->rx_mbuf_alloc_fail);
+                               rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail);
                                return -ENOMEM;
                        }
                }
index 5a1c073886f80605b5db4f9c1f3e12426d1cb327..1b586f3336c6abec46d75b2cd66fc6e9aed8e6ca 100644 (file)
@@ -221,6 +221,8 @@ int bnxt_stats_get_op(struct rte_eth_dev *eth_dev,
                                     bnxt_stats, 1);
                if (unlikely(rc))
                        return rc;
+               bnxt_stats->rx_nombuf +=
+                               rte_atomic64_read(&rxq->rx_mbuf_alloc_fail);
        }
 
        for (i = 0; i < bp->tx_cp_nr_rings; i++) {
@@ -235,13 +237,13 @@ int bnxt_stats_get_op(struct rte_eth_dev *eth_dev,
        rc = bnxt_hwrm_func_qstats(bp, 0xffff, bnxt_stats);
        if (unlikely(rc))
                return rc;
-       bnxt_stats->rx_nombuf = rte_atomic64_read(&bp->rx_mbuf_alloc_fail);
        return rc;
 }
 
 void bnxt_stats_reset_op(struct rte_eth_dev *eth_dev)
 {
        struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+       unsigned int i;
 
        if (!(bp->flags & BNXT_FLAG_INIT_DONE)) {
                PMD_DRV_LOG(ERR, "Device Initialization not complete!\n");
@@ -249,7 +251,11 @@ void bnxt_stats_reset_op(struct rte_eth_dev *eth_dev)
        }
 
        bnxt_clear_all_hwrm_stat_ctxs(bp);
-       rte_atomic64_clear(&bp->rx_mbuf_alloc_fail);
+       for (i = 0; i < bp->rx_cp_nr_rings; i++) {
+               struct bnxt_rx_queue *rxq = bp->rx_queues[i];
+
+               rte_atomic64_clear(&rxq->rx_mbuf_alloc_fail);
+       }
 }
 
 int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev,