X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fbnxt%2Fbnxt_txq.c;h=72a55ea643281846f6cf4d3543445f417d87f55c;hb=9fda31c3229ca6e036cae80392578ed6e5a51119;hp=8679ac91e7a3a7c6652dd69c684b78cb3c203320;hpb=2addc463eed9e2d7cbda25b7bf9d2ed1412b7236;p=dpdk.git diff --git a/drivers/net/bnxt/bnxt_txq.c b/drivers/net/bnxt/bnxt_txq.c index 8679ac91e7..72a55ea643 100644 --- a/drivers/net/bnxt/bnxt_txq.c +++ b/drivers/net/bnxt/bnxt_txq.c @@ -8,6 +8,7 @@ #include #include "bnxt.h" +#include "bnxt_hwrm.h" #include "bnxt_ring.h" #include "bnxt_txq.h" #include "bnxt_txr.h" @@ -24,7 +25,7 @@ void bnxt_free_txq_stats(struct bnxt_tx_queue *txq) static void bnxt_tx_queue_release_mbufs(struct bnxt_tx_queue *txq) { - struct bnxt_sw_tx_bd *sw_ring; + struct rte_mbuf **sw_ring; uint16_t i; if (!txq || !txq->tx_ring) @@ -33,9 +34,9 @@ static void bnxt_tx_queue_release_mbufs(struct bnxt_tx_queue *txq) sw_ring = txq->tx_ring->tx_buf_ring; if (sw_ring) { for (i = 0; i < txq->tx_ring->tx_ring_struct->ring_size; i++) { - if (sw_ring[i].mbuf) { - rte_pktmbuf_free_seg(sw_ring[i].mbuf); - sw_ring[i].mbuf = NULL; + if (sw_ring[i]) { + rte_pktmbuf_free_seg(sw_ring[i]); + sw_ring[i] = NULL; } } } @@ -52,15 +53,16 @@ void bnxt_free_tx_mbufs(struct bnxt *bp) } } -void bnxt_tx_queue_release_op(void *tx_queue) +void bnxt_tx_queue_release_op(struct rte_eth_dev *dev, uint16_t queue_idx) { - struct bnxt_tx_queue *txq = (struct bnxt_tx_queue *)tx_queue; + struct bnxt_tx_queue *txq = dev->data->tx_queues[queue_idx]; if (txq) { if (is_bnxt_in_error(txq->bp)) return; /* Free TX ring hardware descriptors */ + bnxt_free_hwrm_tx_ring(txq->bp, txq->queue_id); bnxt_tx_queue_release_mbufs(txq); if (txq->tx_ring) { bnxt_free_ring(txq->tx_ring->tx_ring_struct); @@ -81,6 +83,7 @@ void bnxt_tx_queue_release_op(void *tx_queue) rte_free(txq->free); rte_free(txq); + dev->data->tx_queues[queue_idx] = NULL; } } @@ -112,10 +115,8 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev, if (eth_dev->data->tx_queues) { txq = eth_dev->data->tx_queues[queue_idx]; - if (txq) { - bnxt_tx_queue_release_op(txq); - txq = NULL; - } + if (txq) + bnxt_tx_queue_release_op(eth_dev, queue_idx); } txq = rte_zmalloc_socket("bnxt_tx_queue", sizeof(struct bnxt_tx_queue), RTE_CACHE_LINE_SIZE, socket_id); @@ -124,6 +125,9 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev, return -ENOMEM; } + txq->bp = bp; + eth_dev->data->tx_queues[queue_idx] = txq; + txq->free = rte_zmalloc_socket(NULL, sizeof(struct rte_mbuf *) * nb_desc, RTE_CACHE_LINE_SIZE, socket_id); @@ -132,7 +136,6 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev, rc = -ENOMEM; goto err; } - txq->bp = bp; txq->nb_tx_desc = nb_desc; txq->tx_free_thresh = RTE_MIN(rte_align32pow2(nb_desc) / 4, RTE_BNXT_MAX_TX_BURST); @@ -149,8 +152,8 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev, txq->port_id = eth_dev->data->port_id; /* Allocate TX ring hardware descriptors */ - if (bnxt_alloc_rings(bp, queue_idx, txq, NULL, txq->cp_ring, NULL, - "txr")) { + if (bnxt_alloc_rings(bp, socket_id, queue_idx, txq, NULL, txq->cp_ring, + NULL, "txr")) { PMD_DRV_LOG(ERR, "ring_dma_zone_reserve for tx_ring failed!"); rc = -ENOMEM; goto err; @@ -162,15 +165,8 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev, goto err; } - eth_dev->data->tx_queues[queue_idx] = txq; - - if (txq->tx_deferred_start) - txq->tx_started = false; - else - txq->tx_started = true; - return 0; err: - bnxt_tx_queue_release_op(txq); + bnxt_tx_queue_release_op(eth_dev, queue_idx); return rc; }