* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#include <rte_malloc.h>
+
#include "bnxt.h"
#include "bnxt_cpr.h"
#include "bnxt_hwrm.h"
void bnxt_free_def_cp_ring(struct bnxt *bp)
{
struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
- struct bnxt_ring *ring = cpr->cp_ring_struct;
- bnxt_free_ring(ring);
+ bnxt_free_ring(cpr->cp_ring_struct);
+ rte_free(cpr->cp_ring_struct);
+ rte_free(cpr);
}
/* For the default completion ring only */
-void bnxt_init_def_ring_struct(struct bnxt *bp)
+int bnxt_init_def_ring_struct(struct bnxt *bp, unsigned int socket_id)
{
- struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
- struct bnxt_ring *ring = cpr->cp_ring_struct;
+ struct bnxt_cp_ring_info *cpr;
+ struct bnxt_ring *ring;
+ cpr = rte_zmalloc_socket("cpr",
+ sizeof(struct bnxt_cp_ring_info),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (cpr == NULL)
+ return -ENOMEM;
+ bp->def_cp_ring = cpr;
+
+ ring = rte_zmalloc_socket("bnxt_cp_ring_struct",
+ sizeof(struct bnxt_ring),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (ring == NULL)
+ return -ENOMEM;
+ cpr->cp_ring_struct = ring;
ring->bd = (void *)cpr->cp_desc_ring;
ring->bd_dma = cpr->cp_desc_mapping;
ring->ring_size = rte_align32pow2(DEFAULT_CP_RING_SIZE);
ring->ring_mask = ring->ring_size - 1;
ring->vmem_size = 0;
ring->vmem = NULL;
+
+ return 0;
}
struct bnxt;
void bnxt_free_def_cp_ring(struct bnxt *bp);
-void bnxt_init_def_ring_struct(struct bnxt *bp);
+int bnxt_init_def_ring_struct(struct bnxt *bp, unsigned int socket_id);
void bnxt_handle_async_event(struct bnxt *bp, struct cmpl_base *cmp);
void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmp);
memset((char *)*ring->vmem, 0, ring->vmem_size);
*ring->vmem = NULL;
}
+ rte_memzone_free((const struct rte_memzone *)ring->mem_zone);
}
/*
tx_ring_info->tx_desc_ring = (struct tx_bd_long *)tx_ring->bd;
tx_ring->bd_dma = mz->phys_addr + tx_ring_start;
tx_ring_info->tx_desc_mapping = tx_ring->bd_dma;
+ tx_ring->mem_zone = (const void *)mz;
if (!tx_ring->bd)
return -ENOMEM;
(struct rx_prod_pkt_bd *)rx_ring->bd;
rx_ring->bd_dma = mz->phys_addr + rx_ring_start;
rx_ring_info->rx_desc_mapping = rx_ring->bd_dma;
+ rx_ring->mem_zone = (const void *)mz;
if (!rx_ring->bd)
return -ENOMEM;
cp_ring->bd_dma = mz->phys_addr + cp_ring_start;
cp_ring_info->cp_desc_ring = cp_ring->bd;
cp_ring_info->cp_desc_mapping = cp_ring->bd_dma;
+ cp_ring->mem_zone = (const void *)mz;
if (!cp_ring->bd)
return -ENOMEM;
void **vmem;
uint16_t fw_ring_id; /* Ring id filled by Chimp FW */
+ const void *mem_zone;
};
struct bnxt_ring_grp_info {
{
struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
- /* 'Unreserve' rte_memzone */
-
if (cpr->hw_stats)
cpr->hw_stats = NULL;
}
{
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
struct bnxt_rx_queue *rxq;
+ int rc = 0;
if (!nb_desc || nb_desc > MAX_RX_DESC_CNT) {
RTE_LOG(ERR, PMD, "nb_desc %d is invalid", nb_desc);
- return -EINVAL;
+ rc = -EINVAL;
+ goto out;
}
if (eth_dev->data->rx_queues) {
RTE_CACHE_LINE_SIZE, socket_id);
if (!rxq) {
RTE_LOG(ERR, PMD, "bnxt_rx_queue allocation failed!");
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto out;
}
rxq->bp = bp;
rxq->mb_pool = mp;
rxq->nb_rx_desc = nb_desc;
rxq->rx_free_thresh = rx_conf->rx_free_thresh;
- bnxt_init_rx_ring_struct(rxq);
+ rc = bnxt_init_rx_ring_struct(rxq, socket_id);
+ if (rc)
+ goto out;
rxq->queue_id = queue_idx;
rxq->port_id = eth_dev->data->port_id;
"rxr")) {
RTE_LOG(ERR, PMD, "ring_dma_zone_reserve for rx_ring failed!");
bnxt_rx_queue_release_op(rxq);
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto out;
}
- return 0;
+out:
+ return rc;
}
if (!rxq)
continue;
- /* TODO: free() rxq->rx_ring and rxq->rx_ring->rx_ring_struct */
bnxt_free_ring(rxq->rx_ring->rx_ring_struct);
- /* TODO: free() rxq->cp_ring and rxq->cp_ring->cp_ring_struct */
+ rte_free(rxq->rx_ring->rx_ring_struct);
+ rte_free(rxq->rx_ring);
+
bnxt_free_ring(rxq->cp_ring->cp_ring_struct);
+ rte_free(rxq->cp_ring->cp_ring_struct);
+ rte_free(rxq->cp_ring);
rte_free(rxq);
bp->rx_queues[i] = NULL;
}
}
-void bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq)
+int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id)
{
struct bnxt *bp = rxq->bp;
struct bnxt_cp_ring_info *cpr;
(2 * VLAN_TAG_SIZE);
rxq->rx_buf_size = rxq->rx_buf_use_size + sizeof(struct rte_mbuf);
- rxr = rxq->rx_ring;
- ring = rxr->rx_ring_struct;
+ rxr = rte_zmalloc_socket("bnxt_rx_ring",
+ sizeof(struct bnxt_rx_ring_info),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (rxr == NULL)
+ return -ENOMEM;
+ rxq->rx_ring = rxr;
+
+ ring = rte_zmalloc_socket("bnxt_rx_ring_struct",
+ sizeof(struct bnxt_ring),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (ring == NULL)
+ return -ENOMEM;
+ rxr->rx_ring_struct = ring;
ring->ring_size = rte_align32pow2(rxq->nb_rx_desc);
ring->ring_mask = ring->ring_size - 1;
ring->bd = (void *)rxr->rx_desc_ring;
ring->vmem_size = ring->ring_size * sizeof(struct bnxt_sw_rx_bd);
ring->vmem = (void **)&rxr->rx_buf_ring;
- cpr = rxq->cp_ring;
- ring = cpr->cp_ring_struct;
+ cpr = rte_zmalloc_socket("bnxt_rx_ring",
+ sizeof(struct bnxt_cp_ring_info),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (cpr == NULL)
+ return -ENOMEM;
+ rxq->cp_ring = cpr;
+
+ ring = rte_zmalloc_socket("bnxt_rx_ring_struct",
+ sizeof(struct bnxt_ring),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (ring == NULL)
+ return -ENOMEM;
+ cpr->cp_ring_struct = ring;
ring->ring_size = rxr->rx_ring_struct->ring_size * 2;
ring->ring_mask = ring->ring_size - 1;
ring->bd = (void *)cpr->cp_desc_ring;
ring->bd_dma = cpr->cp_desc_mapping;
ring->vmem_size = 0;
ring->vmem = NULL;
+
+ return 0;
}
static void bnxt_init_rxbds(struct bnxt_ring *ring, uint32_t type,
type = RX_PROD_PKT_BD_TYPE_RX_PROD_PKT | RX_PROD_PKT_BD_FLAGS_EOP_PAD;
- /* TODO: These need to be allocated */
rxr = rxq->rx_ring;
ring = rxr->rx_ring_struct;
bnxt_init_rxbds(ring, type, rxq->rx_buf_use_size);
uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
void bnxt_free_rx_rings(struct bnxt *bp);
-void bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq);
+int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id);
int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq);
#endif
{
struct bnxt_cp_ring_info *cpr = txq->cp_ring;
- /* 'Unreserve' rte_memzone */
-
if (cpr->hw_stats)
cpr->hw_stats = NULL;
}
{
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
struct bnxt_tx_queue *txq;
+ int rc = 0;
if (!nb_desc || nb_desc > MAX_TX_DESC_CNT) {
RTE_LOG(ERR, PMD, "nb_desc %d is invalid", nb_desc);
- return -EINVAL;
+ rc = -EINVAL;
+ goto out;
}
if (eth_dev->data->tx_queues) {
}
txq = rte_zmalloc_socket("bnxt_tx_queue", sizeof(struct bnxt_tx_queue),
RTE_CACHE_LINE_SIZE, socket_id);
- if (txq == NULL) {
+ if (!txq) {
RTE_LOG(ERR, PMD, "bnxt_tx_queue allocation failed!");
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto out;
}
txq->bp = bp;
txq->nb_tx_desc = nb_desc;
txq->tx_free_thresh = tx_conf->tx_free_thresh;
- bnxt_init_tx_ring_struct(txq);
+ rc = bnxt_init_tx_ring_struct(txq, socket_id);
+ if (rc)
+ goto out;
txq->queue_id = queue_idx;
txq->port_id = eth_dev->data->port_id;
"txr")) {
RTE_LOG(ERR, PMD, "ring_dma_zone_reserve for tx_ring failed!");
bnxt_tx_queue_release_op(txq);
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto out;
}
if (bnxt_init_one_tx_ring(txq)) {
RTE_LOG(ERR, PMD, "bnxt_init_one_tx_ring failed!");
bnxt_tx_queue_release_op(txq);
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto out;
}
eth_dev->data->tx_queues[queue_idx] = txq;
- return 0;
+
+out:
+ return rc;
}
continue;
bnxt_free_ring(txq->tx_ring->tx_ring_struct);
- /* TODO: free() txq->tx_ring and txq->tx_ring->tx_ring_struct */
+ rte_free(txq->tx_ring->tx_ring_struct);
+ rte_free(txq->tx_ring);
+
bnxt_free_ring(txq->cp_ring->cp_ring_struct);
- /* TODO: free() txq->cp_ring and txq->cp_ring->cp_ring_struct */
+ rte_free(txq->cp_ring->cp_ring_struct);
+ rte_free(txq->cp_ring);
rte_free(txq);
bp->tx_queues[i] = NULL;
return 0;
}
-void bnxt_init_tx_ring_struct(struct bnxt_tx_queue *txq)
+int bnxt_init_tx_ring_struct(struct bnxt_tx_queue *txq, unsigned int socket_id)
{
struct bnxt_cp_ring_info *cpr;
struct bnxt_tx_ring_info *txr;
struct bnxt_ring *ring;
- /* TODO: These need to be allocated */
- txr = txq->tx_ring;
- ring = txr->tx_ring_struct;
+ txr = rte_zmalloc_socket("bnxt_tx_ring",
+ sizeof(struct bnxt_tx_ring_info),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (txr == NULL)
+ return -ENOMEM;
+ txq->tx_ring = txr;
+
+ ring = rte_zmalloc_socket("bnxt_tx_ring_struct",
+ sizeof(struct bnxt_ring),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (ring == NULL)
+ return -ENOMEM;
+ txr->tx_ring_struct = ring;
ring->ring_size = rte_align32pow2(txq->nb_tx_desc + 1);
ring->ring_mask = ring->ring_size - 1;
ring->bd = (void *)txr->tx_desc_ring;
ring->vmem_size = ring->ring_size * sizeof(struct bnxt_sw_tx_bd);
ring->vmem = (void **)&txr->tx_buf_ring;
- /* TODO: These need to be allocated */
- cpr = txq->cp_ring;
- ring = cpr->cp_ring_struct;
+ cpr = rte_zmalloc_socket("bnxt_tx_ring",
+ sizeof(struct bnxt_cp_ring_info),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (cpr == NULL)
+ return -ENOMEM;
+ txq->cp_ring = cpr;
+
+ ring = rte_zmalloc_socket("bnxt_tx_ring_struct",
+ sizeof(struct bnxt_ring),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (ring == NULL)
+ return -ENOMEM;
+ cpr->cp_ring_struct = ring;
ring->ring_size = txr->tx_ring_struct->ring_size;
ring->ring_mask = ring->ring_size - 1;
ring->bd = (void *)cpr->cp_desc_ring;
ring->bd_dma = cpr->cp_desc_mapping;
ring->vmem_size = 0;
ring->vmem = NULL;
+
+ return 0;
}
static inline uint32_t bnxt_tx_avail(struct bnxt_tx_ring_info *txr)
void bnxt_free_tx_rings(struct bnxt *bp);
int bnxt_init_one_tx_ring(struct bnxt_tx_queue *txq);
-void bnxt_init_tx_ring_struct(struct bnxt_tx_queue *txq);
+int bnxt_init_tx_ring_struct(struct bnxt_tx_queue *txq, unsigned int socket_id);
uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);