memset((char *)*ring->vmem, 0, ring->vmem_size);
*ring->vmem = NULL;
}
- rte_memzone_free((const struct rte_memzone *)ring->mem_zone);
+ ring->mem_zone = NULL;
}
/*
* rx bd ring - Only non-zero length if rx_ring_info is not NULL
*/
int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
- struct bnxt_tx_ring_info *tx_ring_info,
- struct bnxt_rx_ring_info *rx_ring_info,
+ struct bnxt_tx_queue *txq,
+ struct bnxt_rx_queue *rxq,
struct bnxt_cp_ring_info *cp_ring_info,
const char *suffix)
{
struct bnxt_ring *cp_ring = cp_ring_info->cp_ring_struct;
+ struct bnxt_rx_ring_info *rx_ring_info = rxq ? rxq->rx_ring : NULL;
+ struct bnxt_tx_ring_info *tx_ring_info = txq ? txq->tx_ring : NULL;
struct bnxt_ring *tx_ring;
struct bnxt_ring *rx_ring;
struct rte_pci_device *pdev = bp->pdev;
}
if (tx_ring_info) {
+ txq->mz = mz;
tx_ring = tx_ring_info->tx_ring_struct;
tx_ring->bd = ((char *)mz->addr + tx_ring_start);
}
if (rx_ring_info) {
+ rxq->mz = mz;
rx_ring = rx_ring_info->rx_ring_struct;
rx_ring->bd = ((char *)mz->addr + rx_ring_start);
void bnxt_free_ring(struct bnxt_ring *ring);
int bnxt_init_ring_grps(struct bnxt *bp);
int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
- struct bnxt_tx_ring_info *tx_ring_info,
- struct bnxt_rx_ring_info *rx_ring_info,
+ struct bnxt_tx_queue *txq,
+ struct bnxt_rx_queue *rxq,
struct bnxt_cp_ring_info *cp_ring_info,
const char *suffix);
int bnxt_alloc_hwrm_rings(struct bnxt *bp);
bnxt_free_ring(rxq->cp_ring->cp_ring_struct);
bnxt_free_rxq_stats(rxq);
+ rte_memzone_free(rxq->mz);
+ rxq->mz = NULL;
rte_free(rxq);
}
eth_dev->data->rx_queues[queue_idx] = rxq;
/* Allocate RX ring hardware descriptors */
- if (bnxt_alloc_rings(bp, queue_idx, NULL, rxq->rx_ring, rxq->cp_ring,
+ if (bnxt_alloc_rings(bp, queue_idx, NULL, rxq, rxq->cp_ring,
"rxr")) {
PMD_DRV_LOG(ERR,
"ring_dma_zone_reserve for rx_ring failed!\n");
struct bnxt_rx_ring_info *rx_ring;
struct bnxt_cp_ring_info *cp_ring;
rte_atomic64_t rx_mbuf_alloc_fail;
+ const struct rte_memzone *mz;
};
void bnxt_free_rxq_stats(struct bnxt_rx_queue *rxq);
bnxt_free_ring(txq->cp_ring->cp_ring_struct);
bnxt_free_txq_stats(txq);
+ rte_memzone_free(txq->mz);
+ txq->mz = NULL;
rte_free(txq);
}
txq->port_id = eth_dev->data->port_id;
/* Allocate TX ring hardware descriptors */
- if (bnxt_alloc_rings(bp, queue_idx, txq->tx_ring, NULL, txq->cp_ring,
+ if (bnxt_alloc_rings(bp, queue_idx, txq, NULL, txq->cp_ring,
"txr")) {
PMD_DRV_LOG(ERR, "ring_dma_zone_reserve for tx_ring failed!");
bnxt_tx_queue_release_op(txq);
unsigned int cp_nr_rings;
struct bnxt_cp_ring_info *cp_ring;
+ const struct rte_memzone *mz;
};
void bnxt_free_txq_stats(struct bnxt_tx_queue *txq);