X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fbnx2x%2Fbnx2x_rxtx.c;h=170e48fbd1aa8921d666f10c3a2ff58f3ba222ba;hb=f9565517ff4f;hp=93791276de381102bac1e3a41922bacd3c9f52eb;hpb=540a211084a7695a1c7bc43068934c140d6989be;p=dpdk.git diff --git a/drivers/net/bnx2x/bnx2x_rxtx.c b/drivers/net/bnx2x/bnx2x_rxtx.c index 93791276de..170e48fbd1 100644 --- a/drivers/net/bnx2x/bnx2x_rxtx.c +++ b/drivers/net/bnx2x/bnx2x_rxtx.c @@ -1,23 +1,16 @@ /* * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. * + * Copyright (c) 2015 QLogic Corporation. * All rights reserved. + * www.qlogic.com + * + * See LICENSE.bnx2x_pmd for copyright and licensing details. */ #include "bnx2x.h" #include "bnx2x_rxtx.h" -static inline struct rte_mbuf * -bnx2x_rxmbuf_alloc(struct rte_mempool *mp) -{ - struct rte_mbuf *m; - - m = __rte_mbuf_raw_alloc(mp); - __rte_mbuf_sanity_check(m, 0); - - return m; -} - static const struct rte_memzone * ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name, uint16_t queue_id, uint32_t ring_size, int socket_id) @@ -26,7 +19,8 @@ ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name, const struct rte_memzone *mz; snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d", - dev->driver->pci_drv.name, ring_name, dev->data->port_id, queue_id); + dev->driver->pci_drv.driver.name, ring_name, + dev->data->port_id, queue_id); mz = rte_memzone_lookup(z_name); if (mz) @@ -66,7 +60,7 @@ bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t nb_desc, unsigned int socket_id, - const struct rte_eth_rxconf *rx_conf, + __rte_unused const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp) { uint16_t j, idx; @@ -85,13 +79,12 @@ bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev, RTE_CACHE_LINE_SIZE, socket_id); if (NULL == rxq) { PMD_INIT_LOG(ERR, "rte_zmalloc for rxq failed!"); - return (-ENOMEM); + return -ENOMEM; } rxq->sc = sc; rxq->mb_pool = mp; rxq->queue_id = queue_idx; rxq->port_id = dev->data->port_id; - rxq->crc_len = (uint8_t)((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 : ETHER_CRC_LEN); rxq->nb_rx_pages = 1; while (USABLE_RX_BD(rxq) < nb_desc) @@ -101,13 +94,11 @@ bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev, sc->rx_ring_size = USABLE_RX_BD(rxq); rxq->nb_cq_pages = RCQ_BD_PAGES(rxq); - rxq->rx_free_thresh = rx_conf->rx_free_thresh ? - rx_conf->rx_free_thresh : DEFAULT_RX_FREE_THRESH; - - PMD_INIT_LOG(DEBUG, "fp[%02d] req_bd=%u, thresh=%u, usable_bd=%lu, " + PMD_INIT_LOG(DEBUG, "fp[%02d] req_bd=%u, usable_bd=%lu, " "total_bd=%lu, rx_pages=%u, cq_pages=%u", - queue_idx, nb_desc, rxq->rx_free_thresh, USABLE_RX_BD(rxq), - TOTAL_RX_BD(rxq), rxq->nb_rx_pages, rxq->nb_cq_pages); + queue_idx, nb_desc, (unsigned long)USABLE_RX_BD(rxq), + (unsigned long)TOTAL_RX_BD(rxq), rxq->nb_rx_pages, + rxq->nb_cq_pages); /* Allocate RX ring hardware descriptors */ dma_size = rxq->nb_rx_desc * sizeof(struct eth_rx_bd); @@ -115,7 +106,7 @@ bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev, if (NULL == dma) { PMD_RX_LOG(ERR, "ring_dma_zone_reserve for rx_ring failed!"); bnx2x_rx_queue_release(rxq); - return (-ENOMEM); + return -ENOMEM; } fp->rx_desc_mapping = rxq->rx_ring_phys_addr = (uint64_t)dma->phys_addr; rxq->rx_ring = (uint64_t*)dma->addr; @@ -136,34 +127,32 @@ bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev, if (NULL == rxq->sw_ring) { PMD_RX_LOG(ERR, "rte_zmalloc for sw_ring failed!"); bnx2x_rx_queue_release(rxq); - return (-ENOMEM); + return -ENOMEM; } /* Initialize software ring entries */ - rxq->rx_mbuf_alloc = 0; for (idx = 0; idx < rxq->nb_rx_desc; idx = NEXT_RX_BD(idx)) { - mbuf = bnx2x_rxmbuf_alloc(mp); + mbuf = rte_mbuf_raw_alloc(mp); if (NULL == mbuf) { PMD_RX_LOG(ERR, "RX mbuf alloc failed queue_id=%u, idx=%d", (unsigned)rxq->queue_id, idx); bnx2x_rx_queue_release(rxq); - return (-ENOMEM); + return -ENOMEM; } rxq->sw_ring[idx] = mbuf; rxq->rx_ring[idx] = mbuf->buf_physaddr; - rxq->rx_mbuf_alloc++; } rxq->pkt_first_seg = NULL; rxq->pkt_last_seg = NULL; rxq->rx_bd_head = 0; - rxq->rx_bd_tail = idx; + rxq->rx_bd_tail = rxq->nb_rx_desc; /* Allocate CQ chain. */ dma_size = BNX2X_RX_CHAIN_PAGE_SZ * rxq->nb_cq_pages; dma = ring_dma_zone_reserve(dev, "bnx2x_rcq", queue_idx, dma_size, socket_id); if (NULL == dma) { PMD_RX_LOG(ERR, "RCQ alloc failed"); - return (-ENOMEM); + return -ENOMEM; } fp->rx_comp_mapping = rxq->cq_ring_phys_addr = (uint64_t)dma->phys_addr; rxq->cq_ring = (union eth_rx_cqe*)dma->addr; @@ -216,40 +205,40 @@ bnx2x_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) struct bnx2x_tx_queue *txq; struct bnx2x_softc *sc; struct bnx2x_fastpath *fp; - uint32_t burst, nb_tx; - struct rte_mbuf **m = tx_pkts; - int ret; + uint16_t nb_tx_pkts; + uint16_t nb_pkt_sent = 0; + uint32_t ret; txq = p_txq; sc = txq->sc; fp = &sc->fp[txq->queue_id]; - nb_tx = nb_pkts; + if ((unlikely((txq->nb_tx_desc - txq->nb_tx_avail) > + txq->tx_free_thresh))) + bnx2x_txeof(sc, fp); - do { - burst = RTE_MIN(nb_pkts, RTE_PMD_BNX2X_TX_MAX_BURST); + nb_tx_pkts = RTE_MIN(nb_pkts, txq->nb_tx_avail / BDS_PER_TX_PKT); + if (unlikely(nb_tx_pkts == 0)) + return 0; - ret = bnx2x_tx_encap(txq, m, burst); - if (unlikely(ret)) { - PMD_TX_LOG(ERR, "tx_encap failed!"); - } - - bnx2x_update_fp_sb_idx(fp); - - if ((txq->nb_tx_desc - txq->nb_tx_avail) > txq->tx_free_thresh) { - bnx2x_txeof(sc, fp); - } - - if (unlikely(ret == ENOMEM)) { - break; - } + while (nb_tx_pkts--) { + struct rte_mbuf *m = *tx_pkts++; + assert(m != NULL); + ret = bnx2x_tx_encap(txq, m); + fp->tx_db.data.prod += ret; + nb_pkt_sent++; + } - m += burst; - nb_pkts -= burst; + bnx2x_update_fp_sb_idx(fp); + mb(); + DOORBELL(sc, txq->queue_id, fp->tx_db.raw); + mb(); - } while (nb_pkts); + if ((txq->nb_tx_desc - txq->nb_tx_avail) > + txq->tx_free_thresh) + bnx2x_txeof(sc, fp); - return nb_tx - nb_pkts; + return nb_pkt_sent; } int @@ -272,7 +261,7 @@ bnx2x_dev_tx_queue_setup(struct rte_eth_dev *dev, txq = rte_zmalloc("ethdev TX queue", sizeof(struct bnx2x_tx_queue), RTE_CACHE_LINE_SIZE); if (txq == NULL) - return (-ENOMEM); + return -ENOMEM; txq->sc = sc; txq->nb_tx_pages = 1; @@ -287,15 +276,16 @@ bnx2x_dev_tx_queue_setup(struct rte_eth_dev *dev, PMD_INIT_LOG(DEBUG, "fp[%02d] req_bd=%u, thresh=%u, usable_bd=%lu, " "total_bd=%lu, tx_pages=%u", - queue_idx, nb_desc, txq->tx_free_thresh, USABLE_TX_BD(txq), - TOTAL_TX_BD(txq), txq->nb_tx_pages); + queue_idx, nb_desc, txq->tx_free_thresh, + (unsigned long)USABLE_TX_BD(txq), + (unsigned long)TOTAL_TX_BD(txq), txq->nb_tx_pages); /* Allocate TX ring hardware descriptors */ tsize = txq->nb_tx_desc * sizeof(union eth_tx_bd_types); tz = ring_dma_zone_reserve(dev, "tx_hw_ring", queue_idx, tsize, socket_id); if (tz == NULL) { bnx2x_tx_queue_release(txq); - return (-ENOMEM); + return -ENOMEM; } fp->tx_desc_mapping = txq->tx_ring_phys_addr = (uint64_t)tz->phys_addr; txq->tx_ring = (union eth_tx_bd_types *) tz->addr; @@ -307,7 +297,7 @@ bnx2x_dev_tx_queue_setup(struct rte_eth_dev *dev, RTE_CACHE_LINE_SIZE); if (txq->sw_ring == NULL) { bnx2x_tx_queue_release(txq); - return (-ENOMEM); + return -ENOMEM; } /* PMD_DRV_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64, @@ -374,6 +364,9 @@ bnx2x_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) sw_cq_cons = rxq->rx_cq_head; sw_cq_prod = rxq->rx_cq_tail; + if (sw_cq_cons == hw_cq_cons) + return 0; + while (nb_rx < nb_pkts && sw_cq_cons != hw_cq_cons) { bd_prod &= MAX_RX_BD(rxq); @@ -395,9 +388,11 @@ bnx2x_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) len = cqe_fp->pkt_len_or_gro_seg_len; pad = cqe_fp->placement_offset; - new_mb = bnx2x_rxmbuf_alloc(rxq->mb_pool); + new_mb = rte_mbuf_raw_alloc(rxq->mb_pool); if (unlikely(!new_mb)) { PMD_RX_LOG(ERR, "mbuf alloc fail fp[%02d]", fp->index); + rte_eth_devices[rxq->port_id].data-> + rx_mbuf_alloc_failed++; goto next_rx; } @@ -417,7 +412,6 @@ bnx2x_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) rx_mb->next = NULL; rx_mb->pkt_len = rx_mb->data_len = len; rx_mb->port = rxq->port_id; - rx_mb->buf_len = len + pad; rte_prefetch1(rte_pktmbuf_mtod(rx_mb, void *)); /*