X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fbnx2x%2Fbnx2x_rxtx.c;h=adf0309255da2f191c9f6c4f3071ee7381d31ad1;hb=9a30013b9884ead4d7771c516fc02cab079fab63;hp=2d2b0db2e92073dd457ef2c5a16f896ea2d3f7e1;hpb=059113cced1c6a1ee98eb4e0446fe541f286691c;p=dpdk.git diff --git a/drivers/net/bnx2x/bnx2x_rxtx.c b/drivers/net/bnx2x/bnx2x_rxtx.c index 2d2b0db2e9..adf0309255 100644 --- a/drivers/net/bnx2x/bnx2x_rxtx.c +++ b/drivers/net/bnx2x/bnx2x_rxtx.c @@ -11,17 +11,6 @@ #include "bnx2x.h" #include "bnx2x_rxtx.h" -static inline struct rte_mbuf * -bnx2x_rxmbuf_alloc(struct rte_mempool *mp) -{ - struct rte_mbuf *m; - - m = __rte_mbuf_raw_alloc(mp); - __rte_mbuf_sanity_check(m, 0); - - return m; -} - static const struct rte_memzone * ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name, uint16_t queue_id, uint32_t ring_size, int socket_id) @@ -30,7 +19,8 @@ ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name, const struct rte_memzone *mz; snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d", - dev->driver->pci_drv.name, ring_name, dev->data->port_id, queue_id); + dev->driver->pci_drv.driver.name, ring_name, + dev->data->port_id, queue_id); mz = rte_memzone_lookup(z_name); if (mz) @@ -70,7 +60,7 @@ bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t nb_desc, unsigned int socket_id, - const struct rte_eth_rxconf *rx_conf, + __rte_unused const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp) { uint16_t j, idx; @@ -89,13 +79,12 @@ bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev, RTE_CACHE_LINE_SIZE, socket_id); if (NULL == rxq) { PMD_INIT_LOG(ERR, "rte_zmalloc for rxq failed!"); - return (-ENOMEM); + return -ENOMEM; } rxq->sc = sc; rxq->mb_pool = mp; rxq->queue_id = queue_idx; rxq->port_id = dev->data->port_id; - rxq->crc_len = (uint8_t)((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 : ETHER_CRC_LEN); rxq->nb_rx_pages = 1; while (USABLE_RX_BD(rxq) < nb_desc) @@ -105,13 +94,11 @@ bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev, sc->rx_ring_size = USABLE_RX_BD(rxq); rxq->nb_cq_pages = RCQ_BD_PAGES(rxq); - rxq->rx_free_thresh = rx_conf->rx_free_thresh ? - rx_conf->rx_free_thresh : DEFAULT_RX_FREE_THRESH; - - PMD_INIT_LOG(DEBUG, "fp[%02d] req_bd=%u, thresh=%u, usable_bd=%lu, " + PMD_INIT_LOG(DEBUG, "fp[%02d] req_bd=%u, usable_bd=%lu, " "total_bd=%lu, rx_pages=%u, cq_pages=%u", - queue_idx, nb_desc, rxq->rx_free_thresh, USABLE_RX_BD(rxq), - TOTAL_RX_BD(rxq), rxq->nb_rx_pages, rxq->nb_cq_pages); + queue_idx, nb_desc, (unsigned long)USABLE_RX_BD(rxq), + (unsigned long)TOTAL_RX_BD(rxq), rxq->nb_rx_pages, + rxq->nb_cq_pages); /* Allocate RX ring hardware descriptors */ dma_size = rxq->nb_rx_desc * sizeof(struct eth_rx_bd); @@ -119,7 +106,7 @@ bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev, if (NULL == dma) { PMD_RX_LOG(ERR, "ring_dma_zone_reserve for rx_ring failed!"); bnx2x_rx_queue_release(rxq); - return (-ENOMEM); + return -ENOMEM; } fp->rx_desc_mapping = rxq->rx_ring_phys_addr = (uint64_t)dma->phys_addr; rxq->rx_ring = (uint64_t*)dma->addr; @@ -140,22 +127,20 @@ bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev, if (NULL == rxq->sw_ring) { PMD_RX_LOG(ERR, "rte_zmalloc for sw_ring failed!"); bnx2x_rx_queue_release(rxq); - return (-ENOMEM); + return -ENOMEM; } /* Initialize software ring entries */ - rxq->rx_mbuf_alloc = 0; for (idx = 0; idx < rxq->nb_rx_desc; idx = NEXT_RX_BD(idx)) { - mbuf = bnx2x_rxmbuf_alloc(mp); + mbuf = rte_mbuf_raw_alloc(mp); if (NULL == mbuf) { PMD_RX_LOG(ERR, "RX mbuf alloc failed queue_id=%u, idx=%d", (unsigned)rxq->queue_id, idx); bnx2x_rx_queue_release(rxq); - return (-ENOMEM); + return -ENOMEM; } rxq->sw_ring[idx] = mbuf; rxq->rx_ring[idx] = mbuf->buf_physaddr; - rxq->rx_mbuf_alloc++; } rxq->pkt_first_seg = NULL; rxq->pkt_last_seg = NULL; @@ -167,7 +152,7 @@ bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev, dma = ring_dma_zone_reserve(dev, "bnx2x_rcq", queue_idx, dma_size, socket_id); if (NULL == dma) { PMD_RX_LOG(ERR, "RCQ alloc failed"); - return (-ENOMEM); + return -ENOMEM; } fp->rx_comp_mapping = rxq->cq_ring_phys_addr = (uint64_t)dma->phys_addr; rxq->cq_ring = (union eth_rx_cqe*)dma->addr; @@ -220,40 +205,40 @@ bnx2x_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) struct bnx2x_tx_queue *txq; struct bnx2x_softc *sc; struct bnx2x_fastpath *fp; - uint32_t burst, nb_tx; - struct rte_mbuf **m = tx_pkts; - int ret; + uint16_t nb_tx_pkts; + uint16_t nb_pkt_sent = 0; + uint32_t ret; txq = p_txq; sc = txq->sc; fp = &sc->fp[txq->queue_id]; - nb_tx = nb_pkts; - - do { - burst = RTE_MIN(nb_pkts, RTE_PMD_BNX2X_TX_MAX_BURST); + if ((unlikely((txq->nb_tx_desc - txq->nb_tx_avail) > + txq->tx_free_thresh))) + bnx2x_txeof(sc, fp); - ret = bnx2x_tx_encap(txq, m, burst); - if (unlikely(ret)) { - PMD_TX_LOG(ERR, "tx_encap failed!"); - } - - bnx2x_update_fp_sb_idx(fp); - - if ((txq->nb_tx_desc - txq->nb_tx_avail) > txq->tx_free_thresh) { - bnx2x_txeof(sc, fp); - } + nb_tx_pkts = RTE_MIN(nb_pkts, txq->nb_tx_avail / BDS_PER_TX_PKT); + if (unlikely(nb_tx_pkts == 0)) + return 0; - if (unlikely(ret == -ENOMEM)) { - break; - } + while (nb_tx_pkts--) { + struct rte_mbuf *m = *tx_pkts++; + assert(m != NULL); + ret = bnx2x_tx_encap(txq, m); + fp->tx_db.data.prod += ret; + nb_pkt_sent++; + } - m += burst; - nb_pkts -= burst; + bnx2x_update_fp_sb_idx(fp); + mb(); + DOORBELL(sc, txq->queue_id, fp->tx_db.raw); + mb(); - } while (nb_pkts); + if ((txq->nb_tx_desc - txq->nb_tx_avail) > + txq->tx_free_thresh) + bnx2x_txeof(sc, fp); - return nb_tx - nb_pkts; + return nb_pkt_sent; } int @@ -276,7 +261,7 @@ bnx2x_dev_tx_queue_setup(struct rte_eth_dev *dev, txq = rte_zmalloc("ethdev TX queue", sizeof(struct bnx2x_tx_queue), RTE_CACHE_LINE_SIZE); if (txq == NULL) - return (-ENOMEM); + return -ENOMEM; txq->sc = sc; txq->nb_tx_pages = 1; @@ -288,18 +273,21 @@ bnx2x_dev_tx_queue_setup(struct rte_eth_dev *dev, txq->tx_free_thresh = tx_conf->tx_free_thresh ? tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH; + txq->tx_free_thresh = min(txq->tx_free_thresh, + txq->nb_tx_desc - BDS_PER_TX_PKT); PMD_INIT_LOG(DEBUG, "fp[%02d] req_bd=%u, thresh=%u, usable_bd=%lu, " "total_bd=%lu, tx_pages=%u", - queue_idx, nb_desc, txq->tx_free_thresh, USABLE_TX_BD(txq), - TOTAL_TX_BD(txq), txq->nb_tx_pages); + queue_idx, nb_desc, txq->tx_free_thresh, + (unsigned long)USABLE_TX_BD(txq), + (unsigned long)TOTAL_TX_BD(txq), txq->nb_tx_pages); /* Allocate TX ring hardware descriptors */ tsize = txq->nb_tx_desc * sizeof(union eth_tx_bd_types); tz = ring_dma_zone_reserve(dev, "tx_hw_ring", queue_idx, tsize, socket_id); if (tz == NULL) { bnx2x_tx_queue_release(txq); - return (-ENOMEM); + return -ENOMEM; } fp->tx_desc_mapping = txq->tx_ring_phys_addr = (uint64_t)tz->phys_addr; txq->tx_ring = (union eth_tx_bd_types *) tz->addr; @@ -311,7 +299,7 @@ bnx2x_dev_tx_queue_setup(struct rte_eth_dev *dev, RTE_CACHE_LINE_SIZE); if (txq->sw_ring == NULL) { bnx2x_tx_queue_release(txq); - return (-ENOMEM); + return -ENOMEM; } /* PMD_DRV_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64, @@ -402,9 +390,11 @@ bnx2x_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) len = cqe_fp->pkt_len_or_gro_seg_len; pad = cqe_fp->placement_offset; - new_mb = bnx2x_rxmbuf_alloc(rxq->mb_pool); + new_mb = rte_mbuf_raw_alloc(rxq->mb_pool); if (unlikely(!new_mb)) { PMD_RX_LOG(ERR, "mbuf alloc fail fp[%02d]", fp->index); + rte_eth_devices[rxq->port_id].data-> + rx_mbuf_alloc_failed++; goto next_rx; } @@ -424,7 +414,6 @@ bnx2x_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) rx_mb->next = NULL; rx_mb->pkt_len = rx_mb->data_len = len; rx_mb->port = rxq->port_id; - rx_mb->buf_len = len + pad; rte_prefetch1(rte_pktmbuf_mtod(rx_mb, void *)); /*