X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fbnx2x%2Fbnx2x_rxtx.c;h=57e2ce50458756deb1c8a586337f33cf4eb4056d;hb=63d1db710fbc0a05d187fef37fbc4417cf7388b1;hp=2d2b0db2e92073dd457ef2c5a16f896ea2d3f7e1;hpb=059113cced1c6a1ee98eb4e0446fe541f286691c;p=dpdk.git diff --git a/drivers/net/bnx2x/bnx2x_rxtx.c b/drivers/net/bnx2x/bnx2x_rxtx.c index 2d2b0db2e9..57e2ce5045 100644 --- a/drivers/net/bnx2x/bnx2x_rxtx.c +++ b/drivers/net/bnx2x/bnx2x_rxtx.c @@ -1,42 +1,19 @@ -/* +/* SPDX-License-Identifier: BSD-3-Clause * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. - * - * Copyright (c) 2015 QLogic Corporation. + * Copyright (c) 2015-2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.bnx2x_pmd for copyright and licensing details. + * www.cavium.com */ #include "bnx2x.h" #include "bnx2x_rxtx.h" -static inline struct rte_mbuf * -bnx2x_rxmbuf_alloc(struct rte_mempool *mp) -{ - struct rte_mbuf *m; - - m = __rte_mbuf_raw_alloc(mp); - __rte_mbuf_sanity_check(m, 0); - - return m; -} - static const struct rte_memzone * ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name, uint16_t queue_id, uint32_t ring_size, int socket_id) { - char z_name[RTE_MEMZONE_NAMESIZE]; - const struct rte_memzone *mz; - - snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d", - dev->driver->pci_drv.name, ring_name, dev->data->port_id, queue_id); - - mz = rte_memzone_lookup(z_name); - if (mz) - return mz; - - return rte_memzone_reserve_aligned(z_name, ring_size, socket_id, 0, BNX2X_PAGE_SIZE); + return rte_eth_dma_zone_reserve(dev, ring_name, queue_id, + ring_size, BNX2X_PAGE_SIZE, socket_id); } static void @@ -70,7 +47,7 @@ bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t nb_desc, unsigned int socket_id, - const struct rte_eth_rxconf *rx_conf, + __rte_unused const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp) { uint16_t j, idx; @@ -81,21 +58,20 @@ bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev, struct bnx2x_softc *sc = dev->data->dev_private; struct bnx2x_fastpath *fp = &sc->fp[queue_idx]; struct eth_rx_cqe_next_page *nextpg; - phys_addr_t *rx_bd; - phys_addr_t busaddr; + rte_iova_t *rx_bd; + rte_iova_t busaddr; /* First allocate the rx queue data structure */ rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct bnx2x_rx_queue), RTE_CACHE_LINE_SIZE, socket_id); if (NULL == rxq) { - PMD_INIT_LOG(ERR, "rte_zmalloc for rxq failed!"); - return (-ENOMEM); + PMD_DRV_LOG(ERR, sc, "rte_zmalloc for rxq failed!"); + return -ENOMEM; } rxq->sc = sc; rxq->mb_pool = mp; rxq->queue_id = queue_idx; rxq->port_id = dev->data->port_id; - rxq->crc_len = (uint8_t)((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 : ETHER_CRC_LEN); rxq->nb_rx_pages = 1; while (USABLE_RX_BD(rxq) < nb_desc) @@ -105,13 +81,11 @@ bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev, sc->rx_ring_size = USABLE_RX_BD(rxq); rxq->nb_cq_pages = RCQ_BD_PAGES(rxq); - rxq->rx_free_thresh = rx_conf->rx_free_thresh ? - rx_conf->rx_free_thresh : DEFAULT_RX_FREE_THRESH; - - PMD_INIT_LOG(DEBUG, "fp[%02d] req_bd=%u, thresh=%u, usable_bd=%lu, " + PMD_DRV_LOG(DEBUG, sc, "fp[%02d] req_bd=%u, usable_bd=%lu, " "total_bd=%lu, rx_pages=%u, cq_pages=%u", - queue_idx, nb_desc, rxq->rx_free_thresh, USABLE_RX_BD(rxq), - TOTAL_RX_BD(rxq), rxq->nb_rx_pages, rxq->nb_cq_pages); + queue_idx, nb_desc, (unsigned long)USABLE_RX_BD(rxq), + (unsigned long)TOTAL_RX_BD(rxq), rxq->nb_rx_pages, + rxq->nb_cq_pages); /* Allocate RX ring hardware descriptors */ dma_size = rxq->nb_rx_desc * sizeof(struct eth_rx_bd); @@ -119,9 +93,9 @@ bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev, if (NULL == dma) { PMD_RX_LOG(ERR, "ring_dma_zone_reserve for rx_ring failed!"); bnx2x_rx_queue_release(rxq); - return (-ENOMEM); + return -ENOMEM; } - fp->rx_desc_mapping = rxq->rx_ring_phys_addr = (uint64_t)dma->phys_addr; + fp->rx_desc_mapping = rxq->rx_ring_phys_addr = (uint64_t)dma->iova; rxq->rx_ring = (uint64_t*)dma->addr; memset((void *)rxq->rx_ring, 0, dma_size); @@ -140,22 +114,21 @@ bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev, if (NULL == rxq->sw_ring) { PMD_RX_LOG(ERR, "rte_zmalloc for sw_ring failed!"); bnx2x_rx_queue_release(rxq); - return (-ENOMEM); + return -ENOMEM; } /* Initialize software ring entries */ - rxq->rx_mbuf_alloc = 0; for (idx = 0; idx < rxq->nb_rx_desc; idx = NEXT_RX_BD(idx)) { - mbuf = bnx2x_rxmbuf_alloc(mp); + mbuf = rte_mbuf_raw_alloc(mp); if (NULL == mbuf) { PMD_RX_LOG(ERR, "RX mbuf alloc failed queue_id=%u, idx=%d", (unsigned)rxq->queue_id, idx); bnx2x_rx_queue_release(rxq); - return (-ENOMEM); + return -ENOMEM; } rxq->sw_ring[idx] = mbuf; - rxq->rx_ring[idx] = mbuf->buf_physaddr; - rxq->rx_mbuf_alloc++; + rxq->rx_ring[idx] = + rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf)); } rxq->pkt_first_seg = NULL; rxq->pkt_last_seg = NULL; @@ -167,9 +140,9 @@ bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev, dma = ring_dma_zone_reserve(dev, "bnx2x_rcq", queue_idx, dma_size, socket_id); if (NULL == dma) { PMD_RX_LOG(ERR, "RCQ alloc failed"); - return (-ENOMEM); + return -ENOMEM; } - fp->rx_comp_mapping = rxq->cq_ring_phys_addr = (uint64_t)dma->phys_addr; + fp->rx_comp_mapping = rxq->cq_ring_phys_addr = (uint64_t)dma->iova; rxq->cq_ring = (union eth_rx_cqe*)dma->addr; /* Link the CQ chain pages. */ @@ -220,40 +193,40 @@ bnx2x_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) struct bnx2x_tx_queue *txq; struct bnx2x_softc *sc; struct bnx2x_fastpath *fp; - uint32_t burst, nb_tx; - struct rte_mbuf **m = tx_pkts; - int ret; + uint16_t nb_tx_pkts; + uint16_t nb_pkt_sent = 0; + uint32_t ret; txq = p_txq; sc = txq->sc; fp = &sc->fp[txq->queue_id]; - nb_tx = nb_pkts; - - do { - burst = RTE_MIN(nb_pkts, RTE_PMD_BNX2X_TX_MAX_BURST); + if ((unlikely((txq->nb_tx_desc - txq->nb_tx_avail) > + txq->tx_free_thresh))) + bnx2x_txeof(sc, fp); - ret = bnx2x_tx_encap(txq, m, burst); - if (unlikely(ret)) { - PMD_TX_LOG(ERR, "tx_encap failed!"); - } - - bnx2x_update_fp_sb_idx(fp); - - if ((txq->nb_tx_desc - txq->nb_tx_avail) > txq->tx_free_thresh) { - bnx2x_txeof(sc, fp); - } + nb_tx_pkts = RTE_MIN(nb_pkts, txq->nb_tx_avail / BDS_PER_TX_PKT); + if (unlikely(nb_tx_pkts == 0)) + return 0; - if (unlikely(ret == -ENOMEM)) { - break; - } + while (nb_tx_pkts--) { + struct rte_mbuf *m = *tx_pkts++; + assert(m != NULL); + ret = bnx2x_tx_encap(txq, m); + fp->tx_db.data.prod += ret; + nb_pkt_sent++; + } - m += burst; - nb_pkts -= burst; + bnx2x_update_fp_sb_idx(fp); + mb(); + DOORBELL(sc, txq->queue_id, fp->tx_db.raw); + mb(); - } while (nb_pkts); + if ((txq->nb_tx_desc - txq->nb_tx_avail) > + txq->tx_free_thresh) + bnx2x_txeof(sc, fp); - return nb_tx - nb_pkts; + return nb_pkt_sent; } int @@ -276,7 +249,7 @@ bnx2x_dev_tx_queue_setup(struct rte_eth_dev *dev, txq = rte_zmalloc("ethdev TX queue", sizeof(struct bnx2x_tx_queue), RTE_CACHE_LINE_SIZE); if (txq == NULL) - return (-ENOMEM); + return -ENOMEM; txq->sc = sc; txq->nb_tx_pages = 1; @@ -288,20 +261,23 @@ bnx2x_dev_tx_queue_setup(struct rte_eth_dev *dev, txq->tx_free_thresh = tx_conf->tx_free_thresh ? tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH; + txq->tx_free_thresh = min(txq->tx_free_thresh, + txq->nb_tx_desc - BDS_PER_TX_PKT); - PMD_INIT_LOG(DEBUG, "fp[%02d] req_bd=%u, thresh=%u, usable_bd=%lu, " + PMD_DRV_LOG(DEBUG, sc, "fp[%02d] req_bd=%u, thresh=%u, usable_bd=%lu, " "total_bd=%lu, tx_pages=%u", - queue_idx, nb_desc, txq->tx_free_thresh, USABLE_TX_BD(txq), - TOTAL_TX_BD(txq), txq->nb_tx_pages); + queue_idx, nb_desc, txq->tx_free_thresh, + (unsigned long)USABLE_TX_BD(txq), + (unsigned long)TOTAL_TX_BD(txq), txq->nb_tx_pages); /* Allocate TX ring hardware descriptors */ tsize = txq->nb_tx_desc * sizeof(union eth_tx_bd_types); tz = ring_dma_zone_reserve(dev, "tx_hw_ring", queue_idx, tsize, socket_id); if (tz == NULL) { bnx2x_tx_queue_release(txq); - return (-ENOMEM); + return -ENOMEM; } - fp->tx_desc_mapping = txq->tx_ring_phys_addr = (uint64_t)tz->phys_addr; + fp->tx_desc_mapping = txq->tx_ring_phys_addr = (uint64_t)tz->iova; txq->tx_ring = (union eth_tx_bd_types *) tz->addr; memset(txq->tx_ring, 0, tsize); @@ -311,10 +287,10 @@ bnx2x_dev_tx_queue_setup(struct rte_eth_dev *dev, RTE_CACHE_LINE_SIZE); if (txq->sw_ring == NULL) { bnx2x_tx_queue_release(txq); - return (-ENOMEM); + return -ENOMEM; } - /* PMD_DRV_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64, + /* PMD_DRV_LOG(DEBUG, sc, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64, txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr); */ /* Link TX pages */ @@ -323,7 +299,9 @@ bnx2x_dev_tx_queue_setup(struct rte_eth_dev *dev, busaddr = txq->tx_ring_phys_addr + BNX2X_PAGE_SIZE * (i % txq->nb_tx_pages); tx_n_bd->addr_hi = rte_cpu_to_le_32(U64_HI(busaddr)); tx_n_bd->addr_lo = rte_cpu_to_le_32(U64_LO(busaddr)); - /* PMD_DRV_LOG(DEBUG, "link tx page %lu", (TOTAL_TX_BD_PER_PAGE * i - 1)); */ + /* PMD_DRV_LOG(DEBUG, sc, "link tx page %lu", + * (TOTAL_TX_BD_PER_PAGE * i - 1)); + */ } txq->queue_id = queue_idx; @@ -333,7 +311,6 @@ bnx2x_dev_tx_queue_setup(struct rte_eth_dev *dev, txq->tx_bd_tail = 0; txq->tx_bd_head = 0; txq->nb_tx_avail = txq->nb_tx_desc; - dev->tx_pkt_burst = bnx2x_xmit_pkts; dev->data->tx_queues[queue_idx] = txq; if (!sc->tx_queues) sc->tx_queues = dev->data->tx_queues; @@ -344,12 +321,14 @@ static inline void bnx2x_upd_rx_prod_fast(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp, uint16_t rx_bd_prod, uint16_t rx_cq_prod) { - union ustorm_eth_rx_producers rx_prods; + struct ustorm_eth_rx_producers rx_prods = { 0 }; + uint32_t *val = NULL; - rx_prods.prod.bd_prod = rx_bd_prod; - rx_prods.prod.cqe_prod = rx_cq_prod; + rx_prods.bd_prod = rx_bd_prod; + rx_prods.cqe_prod = rx_cq_prod; - REG_WR(sc, fp->ustorm_rx_prods_offset, rx_prods.raw_data[0]); + val = (uint32_t *)&rx_prods; + REG_WR(sc, fp->ustorm_rx_prods_offset, val[0]); } static uint16_t @@ -364,8 +343,11 @@ bnx2x_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) struct rte_mbuf *new_mb; uint16_t rx_pref; struct eth_fast_path_rx_cqe *cqe_fp; - uint16_t len, pad; + uint16_t len, pad, bd_len, buf_len; struct rte_mbuf *rx_mb = NULL; + static bool log_once = true; + + rte_spinlock_lock(&(fp)->rx_mtx); hw_cq_cons = le16toh(*fp->rx_cq_cons_sb); if ((hw_cq_cons & USABLE_RCQ_ENTRIES_PER_PAGE) == @@ -378,8 +360,10 @@ bnx2x_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) sw_cq_cons = rxq->rx_cq_head; sw_cq_prod = rxq->rx_cq_tail; - if (sw_cq_cons == hw_cq_cons) + if (sw_cq_cons == hw_cq_cons) { + rte_spinlock_unlock(&(fp)->rx_mtx); return 0; + } while (nb_rx < nb_pkts && sw_cq_cons != hw_cq_cons) { @@ -401,16 +385,33 @@ bnx2x_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) len = cqe_fp->pkt_len_or_gro_seg_len; pad = cqe_fp->placement_offset; + bd_len = cqe_fp->len_on_bd; + buf_len = rxq->sw_ring[bd_cons]->buf_len; + + /* Check for sufficient buffer length */ + if (unlikely(buf_len < len + (pad + RTE_PKTMBUF_HEADROOM))) { + if (unlikely(log_once)) { + PMD_DRV_LOG(ERR, sc, "mbuf size %d is not enough to hold Rx packet length more than %d", + buf_len - RTE_PKTMBUF_HEADROOM, + buf_len - + (pad + RTE_PKTMBUF_HEADROOM)); + log_once = false; + } + goto next_rx; + } - new_mb = bnx2x_rxmbuf_alloc(rxq->mb_pool); + new_mb = rte_mbuf_raw_alloc(rxq->mb_pool); if (unlikely(!new_mb)) { PMD_RX_LOG(ERR, "mbuf alloc fail fp[%02d]", fp->index); + rte_eth_devices[rxq->port_id].data-> + rx_mbuf_alloc_failed++; goto next_rx; } rx_mb = rxq->sw_ring[bd_cons]; rxq->sw_ring[bd_cons] = new_mb; - rxq->rx_ring[bd_prod] = new_mb->buf_physaddr; + rxq->rx_ring[bd_prod] = + rte_cpu_to_le_64(rte_mbuf_data_iova_default(new_mb)); rx_pref = NEXT_RX_BD(bd_cons) & MAX_RX_BD(rxq); rte_prefetch0(rxq->sw_ring[rx_pref]); @@ -419,12 +420,12 @@ bnx2x_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) rte_prefetch0(&rxq->sw_ring[rx_pref]); } - rx_mb->data_off = pad; + rx_mb->data_off = pad + RTE_PKTMBUF_HEADROOM; rx_mb->nb_segs = 1; rx_mb->next = NULL; - rx_mb->pkt_len = rx_mb->data_len = len; + rx_mb->pkt_len = len; + rx_mb->data_len = bd_len; rx_mb->port = rxq->port_id; - rx_mb->buf_len = len + pad; rte_prefetch1(rte_pktmbuf_mtod(rx_mb, void *)); /* @@ -433,7 +434,7 @@ bnx2x_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) */ if (cqe_fp->pars_flags.flags & PARSING_FLAGS_VLAN) { rx_mb->vlan_tci = cqe_fp->vlan_tag; - rx_mb->ol_flags |= PKT_RX_VLAN_PKT; + rx_mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; } rx_pkts[nb_rx] = rx_mb; @@ -458,23 +459,38 @@ next_rx: bnx2x_upd_rx_prod_fast(sc, fp, bd_prod, sw_cq_prod); + rte_spinlock_unlock(&(fp)->rx_mtx); + return nb_rx; } -int -bnx2x_dev_rx_init(struct rte_eth_dev *dev) +static uint16_t +bnx2x_rxtx_pkts_dummy(__rte_unused void *p_rxq, + __rte_unused struct rte_mbuf **rx_pkts, + __rte_unused uint16_t nb_pkts) { - dev->rx_pkt_burst = bnx2x_recv_pkts; - return 0; } +void bnx2x_dev_rxtx_init_dummy(struct rte_eth_dev *dev) +{ + dev->rx_pkt_burst = bnx2x_rxtx_pkts_dummy; + dev->tx_pkt_burst = bnx2x_rxtx_pkts_dummy; +} + +void bnx2x_dev_rxtx_init(struct rte_eth_dev *dev) +{ + dev->rx_pkt_burst = bnx2x_recv_pkts; + dev->tx_pkt_burst = bnx2x_xmit_pkts; +} + void bnx2x_dev_clear_queues(struct rte_eth_dev *dev) { + struct bnx2x_softc *sc = dev->data->dev_private; uint8_t i; - PMD_INIT_FUNC_TRACE(); + PMD_INIT_FUNC_TRACE(sc); for (i = 0; i < dev->data->nb_tx_queues; i++) { struct bnx2x_tx_queue *txq = dev->data->tx_queues[i];