mbuf: add raw allocation function
[dpdk.git] / drivers / net / bnx2x / bnx2x_rxtx.c
index 2d2b0db..55d2bd7 100644 (file)
 #include "bnx2x.h"
 #include "bnx2x_rxtx.h"
 
-static inline struct rte_mbuf *
-bnx2x_rxmbuf_alloc(struct rte_mempool *mp)
-{
-       struct rte_mbuf *m;
-
-       m = __rte_mbuf_raw_alloc(mp);
-       __rte_mbuf_sanity_check(m, 0);
-
-       return m;
-}
-
 static const struct rte_memzone *
 ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
                      uint16_t queue_id, uint32_t ring_size, int socket_id)
@@ -89,7 +78,7 @@ bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev,
                                 RTE_CACHE_LINE_SIZE, socket_id);
        if (NULL == rxq) {
                PMD_INIT_LOG(ERR, "rte_zmalloc for rxq failed!");
-               return (-ENOMEM);
+               return -ENOMEM;
        }
        rxq->sc = sc;
        rxq->mb_pool = mp;
@@ -110,8 +99,10 @@ bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev,
 
        PMD_INIT_LOG(DEBUG, "fp[%02d] req_bd=%u, thresh=%u, usable_bd=%lu, "
                       "total_bd=%lu, rx_pages=%u, cq_pages=%u",
-                      queue_idx, nb_desc, rxq->rx_free_thresh, USABLE_RX_BD(rxq),
-                      TOTAL_RX_BD(rxq), rxq->nb_rx_pages, rxq->nb_cq_pages);
+                      queue_idx, nb_desc, rxq->rx_free_thresh,
+                      (unsigned long)USABLE_RX_BD(rxq),
+                      (unsigned long)TOTAL_RX_BD(rxq), rxq->nb_rx_pages,
+                      rxq->nb_cq_pages);
 
        /* Allocate RX ring hardware descriptors */
        dma_size = rxq->nb_rx_desc * sizeof(struct eth_rx_bd);
@@ -119,7 +110,7 @@ bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev,
        if (NULL == dma) {
                PMD_RX_LOG(ERR, "ring_dma_zone_reserve for rx_ring failed!");
                bnx2x_rx_queue_release(rxq);
-               return (-ENOMEM);
+               return -ENOMEM;
        }
        fp->rx_desc_mapping = rxq->rx_ring_phys_addr = (uint64_t)dma->phys_addr;
        rxq->rx_ring = (uint64_t*)dma->addr;
@@ -140,18 +131,18 @@ bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev,
        if (NULL == rxq->sw_ring) {
                PMD_RX_LOG(ERR, "rte_zmalloc for sw_ring failed!");
                bnx2x_rx_queue_release(rxq);
-               return (-ENOMEM);
+               return -ENOMEM;
        }
 
        /* Initialize software ring entries */
        rxq->rx_mbuf_alloc = 0;
        for (idx = 0; idx < rxq->nb_rx_desc; idx = NEXT_RX_BD(idx)) {
-               mbuf = bnx2x_rxmbuf_alloc(mp);
+               mbuf = rte_mbuf_raw_alloc(mp);
                if (NULL == mbuf) {
                        PMD_RX_LOG(ERR, "RX mbuf alloc failed queue_id=%u, idx=%d",
                                   (unsigned)rxq->queue_id, idx);
                        bnx2x_rx_queue_release(rxq);
-                       return (-ENOMEM);
+                       return -ENOMEM;
                }
                rxq->sw_ring[idx] = mbuf;
                rxq->rx_ring[idx] = mbuf->buf_physaddr;
@@ -167,7 +158,7 @@ bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev,
        dma = ring_dma_zone_reserve(dev, "bnx2x_rcq", queue_idx, dma_size, socket_id);
        if (NULL == dma) {
                PMD_RX_LOG(ERR, "RCQ  alloc failed");
-               return (-ENOMEM);
+               return -ENOMEM;
        }
        fp->rx_comp_mapping = rxq->cq_ring_phys_addr = (uint64_t)dma->phys_addr;
        rxq->cq_ring = (union eth_rx_cqe*)dma->addr;
@@ -276,7 +267,7 @@ bnx2x_dev_tx_queue_setup(struct rte_eth_dev *dev,
        txq = rte_zmalloc("ethdev TX queue", sizeof(struct bnx2x_tx_queue),
                          RTE_CACHE_LINE_SIZE);
        if (txq == NULL)
-               return (-ENOMEM);
+               return -ENOMEM;
        txq->sc = sc;
 
        txq->nb_tx_pages = 1;
@@ -291,15 +282,16 @@ bnx2x_dev_tx_queue_setup(struct rte_eth_dev *dev,
 
        PMD_INIT_LOG(DEBUG, "fp[%02d] req_bd=%u, thresh=%u, usable_bd=%lu, "
                     "total_bd=%lu, tx_pages=%u",
-                    queue_idx, nb_desc, txq->tx_free_thresh, USABLE_TX_BD(txq),
-                    TOTAL_TX_BD(txq), txq->nb_tx_pages);
+                    queue_idx, nb_desc, txq->tx_free_thresh,
+                    (unsigned long)USABLE_TX_BD(txq),
+                    (unsigned long)TOTAL_TX_BD(txq), txq->nb_tx_pages);
 
        /* Allocate TX ring hardware descriptors */
        tsize = txq->nb_tx_desc * sizeof(union eth_tx_bd_types);
        tz = ring_dma_zone_reserve(dev, "tx_hw_ring", queue_idx, tsize, socket_id);
        if (tz == NULL) {
                bnx2x_tx_queue_release(txq);
-               return (-ENOMEM);
+               return -ENOMEM;
        }
        fp->tx_desc_mapping = txq->tx_ring_phys_addr = (uint64_t)tz->phys_addr;
        txq->tx_ring = (union eth_tx_bd_types *) tz->addr;
@@ -311,7 +303,7 @@ bnx2x_dev_tx_queue_setup(struct rte_eth_dev *dev,
                                   RTE_CACHE_LINE_SIZE);
        if (txq->sw_ring == NULL) {
                bnx2x_tx_queue_release(txq);
-               return (-ENOMEM);
+               return -ENOMEM;
        }
 
        /* PMD_DRV_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
@@ -402,7 +394,7 @@ bnx2x_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                len = cqe_fp->pkt_len_or_gro_seg_len;
                pad = cqe_fp->placement_offset;
 
-               new_mb = bnx2x_rxmbuf_alloc(rxq->mb_pool);
+               new_mb = rte_mbuf_raw_alloc(rxq->mb_pool);
                if (unlikely(!new_mb)) {
                        PMD_RX_LOG(ERR, "mbuf alloc fail fp[%02d]", fp->index);
                        goto next_rx;