net/qede: fix memory alloc for multiple port reconfig
authorRasesh Mody <rasesh.mody@cavium.com>
Thu, 7 Jun 2018 16:30:20 +0000 (09:30 -0700)
committerFerruh Yigit <ferruh.yigit@intel.com>
Thu, 14 Jun 2018 17:27:50 +0000 (19:27 +0200)
Multiple port reconfigurations can lead to memory allocation failures
due to hitting RTE memzone limit or no more room in config while
reserving memzone.

When freeing memzones, update the memzone mapping and the memzone count.
Release Rx and Tx queue rings allocated during queue setup.

Fixes: a39001d90dbd ("net/qede: fix DMA memory leak")
Cc: stable@dpdk.org
Signed-off-by: Rasesh Mody <rasesh.mody@cavium.com>
Reviewed-by: Kevin Traynor <ktraynor@redhat.com>
drivers/net/qede/base/bcm_osal.c
drivers/net/qede/qede_rxtx.c

index ca1c2b1..72627df 100644 (file)
@@ -201,6 +201,11 @@ void osal_dma_free_mem(struct ecore_dev *p_dev, dma_addr_t phys)
                        DP_VERBOSE(p_dev, ECORE_MSG_SP,
                                "Free memzone %s\n", ecore_mz_mapping[j]->name);
                        rte_memzone_free(ecore_mz_mapping[j]);
+                       while (j < ecore_mz_count - 1) {
+                               ecore_mz_mapping[j] = ecore_mz_mapping[j + 1];
+                               j++;
+                       }
+                       ecore_mz_count--;
                        return;
                }
        }
index bdb5d6f..4fa1c61 100644 (file)
@@ -192,9 +192,15 @@ static void qede_rx_queue_release_mbufs(struct qede_rx_queue *rxq)
 void qede_rx_queue_release(void *rx_queue)
 {
        struct qede_rx_queue *rxq = rx_queue;
+       struct qede_dev *qdev = rxq->qdev;
+       struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+
+       PMD_INIT_FUNC_TRACE(edev);
 
        if (rxq) {
                qede_rx_queue_release_mbufs(rxq);
+               qdev->ops->common->chain_free(edev, &rxq->rx_bd_ring);
+               qdev->ops->common->chain_free(edev, &rxq->rx_comp_ring);
                rte_free(rxq->sw_rx_ring);
                rte_free(rxq);
        }
@@ -350,9 +356,14 @@ static void qede_tx_queue_release_mbufs(struct qede_tx_queue *txq)
 void qede_tx_queue_release(void *tx_queue)
 {
        struct qede_tx_queue *txq = tx_queue;
+       struct qede_dev *qdev = txq->qdev;
+       struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+
+       PMD_INIT_FUNC_TRACE(edev);
 
        if (txq) {
                qede_tx_queue_release_mbufs(txq);
+               qdev->ops->common->chain_free(edev, &txq->tx_pbl);
                rte_free(txq->sw_tx_ring);
                rte_free(txq);
        }
@@ -441,8 +452,6 @@ void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev)
        struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
        struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
        struct qede_fastpath *fp;
-       struct qede_rx_queue *rxq;
-       struct qede_tx_queue *txq;
        uint16_t sb_idx;
        uint8_t i;
 
@@ -467,21 +476,13 @@ void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev)
        for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
                if (eth_dev->data->rx_queues[i]) {
                        qede_rx_queue_release(eth_dev->data->rx_queues[i]);
-                       rxq = eth_dev->data->rx_queues[i];
-                       qdev->ops->common->chain_free(edev,
-                                                     &rxq->rx_bd_ring);
-                       qdev->ops->common->chain_free(edev,
-                                                     &rxq->rx_comp_ring);
                        eth_dev->data->rx_queues[i] = NULL;
                }
        }
 
        for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
                if (eth_dev->data->tx_queues[i]) {
-                       txq = eth_dev->data->tx_queues[i];
                        qede_tx_queue_release(eth_dev->data->tx_queues[i]);
-                       qdev->ops->common->chain_free(edev,
-                                                     &txq->tx_pbl);
                        eth_dev->data->tx_queues[i] = NULL;
                }
        }