X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fbnx2x%2Fbnx2x_rxtx.c;h=e5a2b25b56ce65e9d0455b33bc329ebdd7e7325c;hb=eaa45270aa6646aefc223d0734e14ccb8838c2ef;hp=09e3ebff7969a15f17a314d0e6d5d5cfad9143ec;hpb=638f2066a668590cfe6b8d17cf40054e9fcf3554;p=dpdk.git diff --git a/drivers/net/bnx2x/bnx2x_rxtx.c b/drivers/net/bnx2x/bnx2x_rxtx.c index 09e3ebff79..e5a2b25b56 100644 --- a/drivers/net/bnx2x/bnx2x_rxtx.c +++ b/drivers/net/bnx2x/bnx2x_rxtx.c @@ -1,38 +1,19 @@ -/* +/* SPDX-License-Identifier: BSD-3-Clause * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. - * + * Copyright (c) 2015-2018 Cavium Inc. * All rights reserved. + * www.cavium.com */ #include "bnx2x.h" #include "bnx2x_rxtx.h" -static inline struct rte_mbuf * -bnx2x_rxmbuf_alloc(struct rte_mempool *mp) -{ - struct rte_mbuf *m; - - m = __rte_mbuf_raw_alloc(mp); - __rte_mbuf_sanity_check(m, 0); - - return m; -} - static const struct rte_memzone * ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name, uint16_t queue_id, uint32_t ring_size, int socket_id) { - char z_name[RTE_MEMZONE_NAMESIZE]; - const struct rte_memzone *mz; - - snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d", - dev->driver->pci_drv.name, ring_name, dev->data->port_id, queue_id); - - mz = rte_memzone_lookup(z_name); - if (mz) - return mz; - - return rte_memzone_reserve_aligned(z_name, ring_size, socket_id, 0, BNX2X_PAGE_SIZE); + return rte_eth_dma_zone_reserve(dev, ring_name, queue_id, + ring_size, BNX2X_PAGE_SIZE, socket_id); } static void @@ -66,7 +47,7 @@ bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t nb_desc, unsigned int socket_id, - const struct rte_eth_rxconf *rx_conf, + __rte_unused const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp) { uint16_t j, idx; @@ -77,21 +58,20 @@ bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev, struct bnx2x_softc *sc = dev->data->dev_private; struct bnx2x_fastpath *fp = &sc->fp[queue_idx]; struct eth_rx_cqe_next_page *nextpg; - phys_addr_t *rx_bd; - phys_addr_t busaddr; + rte_iova_t *rx_bd; + rte_iova_t busaddr; /* First allocate the rx queue data structure */ rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct bnx2x_rx_queue), RTE_CACHE_LINE_SIZE, socket_id); if (NULL == rxq) { - PMD_INIT_LOG(ERR, "rte_zmalloc for rxq failed!"); - return (-ENOMEM); + PMD_DRV_LOG(ERR, sc, "rte_zmalloc for rxq failed!"); + return -ENOMEM; } rxq->sc = sc; rxq->mb_pool = mp; rxq->queue_id = queue_idx; rxq->port_id = dev->data->port_id; - rxq->crc_len = (uint8_t)((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 : ETHER_CRC_LEN); rxq->nb_rx_pages = 1; while (USABLE_RX_BD(rxq) < nb_desc) @@ -101,13 +81,11 @@ bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev, sc->rx_ring_size = USABLE_RX_BD(rxq); rxq->nb_cq_pages = RCQ_BD_PAGES(rxq); - rxq->rx_free_thresh = rx_conf->rx_free_thresh ? - rx_conf->rx_free_thresh : DEFAULT_RX_FREE_THRESH; - - PMD_INIT_LOG(DEBUG, "fp[%02d] req_bd=%u, thresh=%u, usable_bd=%lu, " + PMD_DRV_LOG(DEBUG, sc, "fp[%02d] req_bd=%u, usable_bd=%lu, " "total_bd=%lu, rx_pages=%u, cq_pages=%u", - queue_idx, nb_desc, rxq->rx_free_thresh, USABLE_RX_BD(rxq), - TOTAL_RX_BD(rxq), rxq->nb_rx_pages, rxq->nb_cq_pages); + queue_idx, nb_desc, (unsigned long)USABLE_RX_BD(rxq), + (unsigned long)TOTAL_RX_BD(rxq), rxq->nb_rx_pages, + rxq->nb_cq_pages); /* Allocate RX ring hardware descriptors */ dma_size = rxq->nb_rx_desc * sizeof(struct eth_rx_bd); @@ -115,9 +93,9 @@ bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev, if (NULL == dma) { PMD_RX_LOG(ERR, "ring_dma_zone_reserve for rx_ring failed!"); bnx2x_rx_queue_release(rxq); - return (-ENOMEM); + return -ENOMEM; } - fp->rx_desc_mapping = rxq->rx_ring_phys_addr = (uint64_t)dma->phys_addr; + fp->rx_desc_mapping = rxq->rx_ring_phys_addr = (uint64_t)dma->iova; rxq->rx_ring = (uint64_t*)dma->addr; memset((void *)rxq->rx_ring, 0, dma_size); @@ -136,22 +114,21 @@ bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev, if (NULL == rxq->sw_ring) { PMD_RX_LOG(ERR, "rte_zmalloc for sw_ring failed!"); bnx2x_rx_queue_release(rxq); - return (-ENOMEM); + return -ENOMEM; } /* Initialize software ring entries */ - rxq->rx_mbuf_alloc = 0; for (idx = 0; idx < rxq->nb_rx_desc; idx = NEXT_RX_BD(idx)) { - mbuf = bnx2x_rxmbuf_alloc(mp); + mbuf = rte_mbuf_raw_alloc(mp); if (NULL == mbuf) { PMD_RX_LOG(ERR, "RX mbuf alloc failed queue_id=%u, idx=%d", (unsigned)rxq->queue_id, idx); bnx2x_rx_queue_release(rxq); - return (-ENOMEM); + return -ENOMEM; } rxq->sw_ring[idx] = mbuf; - rxq->rx_ring[idx] = mbuf->buf_physaddr; - rxq->rx_mbuf_alloc++; + rxq->rx_ring[idx] = + rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf)); } rxq->pkt_first_seg = NULL; rxq->pkt_last_seg = NULL; @@ -163,9 +140,9 @@ bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev, dma = ring_dma_zone_reserve(dev, "bnx2x_rcq", queue_idx, dma_size, socket_id); if (NULL == dma) { PMD_RX_LOG(ERR, "RCQ alloc failed"); - return (-ENOMEM); + return -ENOMEM; } - fp->rx_comp_mapping = rxq->cq_ring_phys_addr = (uint64_t)dma->phys_addr; + fp->rx_comp_mapping = rxq->cq_ring_phys_addr = (uint64_t)dma->iova; rxq->cq_ring = (union eth_rx_cqe*)dma->addr; /* Link the CQ chain pages. */ @@ -216,40 +193,40 @@ bnx2x_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) struct bnx2x_tx_queue *txq; struct bnx2x_softc *sc; struct bnx2x_fastpath *fp; - uint32_t burst, nb_tx; - struct rte_mbuf **m = tx_pkts; - int ret; + uint16_t nb_tx_pkts; + uint16_t nb_pkt_sent = 0; + uint32_t ret; txq = p_txq; sc = txq->sc; fp = &sc->fp[txq->queue_id]; - nb_tx = nb_pkts; + if ((unlikely((txq->nb_tx_desc - txq->nb_tx_avail) > + txq->tx_free_thresh))) + bnx2x_txeof(sc, fp); - do { - burst = RTE_MIN(nb_pkts, RTE_PMD_BNX2X_TX_MAX_BURST); + nb_tx_pkts = RTE_MIN(nb_pkts, txq->nb_tx_avail / BDS_PER_TX_PKT); + if (unlikely(nb_tx_pkts == 0)) + return 0; - ret = bnx2x_tx_encap(txq, m, burst); - if (unlikely(ret)) { - PMD_TX_LOG(ERR, "tx_encap failed!"); - } - - bnx2x_update_fp_sb_idx(fp); - - if ((txq->nb_tx_desc - txq->nb_tx_avail) > txq->tx_free_thresh) { - bnx2x_txeof(sc, fp); - } - - if (unlikely(ret == -ENOMEM)) { - break; - } + while (nb_tx_pkts--) { + struct rte_mbuf *m = *tx_pkts++; + assert(m != NULL); + ret = bnx2x_tx_encap(txq, m); + fp->tx_db.data.prod += ret; + nb_pkt_sent++; + } - m += burst; - nb_pkts -= burst; + bnx2x_update_fp_sb_idx(fp); + mb(); + DOORBELL(sc, txq->queue_id, fp->tx_db.raw); + mb(); - } while (nb_pkts); + if ((txq->nb_tx_desc - txq->nb_tx_avail) > + txq->tx_free_thresh) + bnx2x_txeof(sc, fp); - return nb_tx - nb_pkts; + return nb_pkt_sent; } int @@ -272,7 +249,7 @@ bnx2x_dev_tx_queue_setup(struct rte_eth_dev *dev, txq = rte_zmalloc("ethdev TX queue", sizeof(struct bnx2x_tx_queue), RTE_CACHE_LINE_SIZE); if (txq == NULL) - return (-ENOMEM); + return -ENOMEM; txq->sc = sc; txq->nb_tx_pages = 1; @@ -284,20 +261,23 @@ bnx2x_dev_tx_queue_setup(struct rte_eth_dev *dev, txq->tx_free_thresh = tx_conf->tx_free_thresh ? tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH; + txq->tx_free_thresh = min(txq->tx_free_thresh, + txq->nb_tx_desc - BDS_PER_TX_PKT); - PMD_INIT_LOG(DEBUG, "fp[%02d] req_bd=%u, thresh=%u, usable_bd=%lu, " + PMD_DRV_LOG(DEBUG, sc, "fp[%02d] req_bd=%u, thresh=%u, usable_bd=%lu, " "total_bd=%lu, tx_pages=%u", - queue_idx, nb_desc, txq->tx_free_thresh, USABLE_TX_BD(txq), - TOTAL_TX_BD(txq), txq->nb_tx_pages); + queue_idx, nb_desc, txq->tx_free_thresh, + (unsigned long)USABLE_TX_BD(txq), + (unsigned long)TOTAL_TX_BD(txq), txq->nb_tx_pages); /* Allocate TX ring hardware descriptors */ tsize = txq->nb_tx_desc * sizeof(union eth_tx_bd_types); tz = ring_dma_zone_reserve(dev, "tx_hw_ring", queue_idx, tsize, socket_id); if (tz == NULL) { bnx2x_tx_queue_release(txq); - return (-ENOMEM); + return -ENOMEM; } - fp->tx_desc_mapping = txq->tx_ring_phys_addr = (uint64_t)tz->phys_addr; + fp->tx_desc_mapping = txq->tx_ring_phys_addr = (uint64_t)tz->iova; txq->tx_ring = (union eth_tx_bd_types *) tz->addr; memset(txq->tx_ring, 0, tsize); @@ -307,10 +287,10 @@ bnx2x_dev_tx_queue_setup(struct rte_eth_dev *dev, RTE_CACHE_LINE_SIZE); if (txq->sw_ring == NULL) { bnx2x_tx_queue_release(txq); - return (-ENOMEM); + return -ENOMEM; } - /* PMD_DRV_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64, + /* PMD_DRV_LOG(DEBUG, sc, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64, txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr); */ /* Link TX pages */ @@ -319,7 +299,9 @@ bnx2x_dev_tx_queue_setup(struct rte_eth_dev *dev, busaddr = txq->tx_ring_phys_addr + BNX2X_PAGE_SIZE * (i % txq->nb_tx_pages); tx_n_bd->addr_hi = rte_cpu_to_le_32(U64_HI(busaddr)); tx_n_bd->addr_lo = rte_cpu_to_le_32(U64_LO(busaddr)); - /* PMD_DRV_LOG(DEBUG, "link tx page %lu", (TOTAL_TX_BD_PER_PAGE * i - 1)); */ + /* PMD_DRV_LOG(DEBUG, sc, "link tx page %lu", + * (TOTAL_TX_BD_PER_PAGE * i - 1)); + */ } txq->queue_id = queue_idx; @@ -329,7 +311,6 @@ bnx2x_dev_tx_queue_setup(struct rte_eth_dev *dev, txq->tx_bd_tail = 0; txq->tx_bd_head = 0; txq->nb_tx_avail = txq->nb_tx_desc; - dev->tx_pkt_burst = bnx2x_xmit_pkts; dev->data->tx_queues[queue_idx] = txq; if (!sc->tx_queues) sc->tx_queues = dev->data->tx_queues; @@ -374,6 +355,9 @@ bnx2x_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) sw_cq_cons = rxq->rx_cq_head; sw_cq_prod = rxq->rx_cq_tail; + if (sw_cq_cons == hw_cq_cons) + return 0; + while (nb_rx < nb_pkts && sw_cq_cons != hw_cq_cons) { bd_prod &= MAX_RX_BD(rxq); @@ -395,15 +379,18 @@ bnx2x_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) len = cqe_fp->pkt_len_or_gro_seg_len; pad = cqe_fp->placement_offset; - new_mb = bnx2x_rxmbuf_alloc(rxq->mb_pool); + new_mb = rte_mbuf_raw_alloc(rxq->mb_pool); if (unlikely(!new_mb)) { PMD_RX_LOG(ERR, "mbuf alloc fail fp[%02d]", fp->index); + rte_eth_devices[rxq->port_id].data-> + rx_mbuf_alloc_failed++; goto next_rx; } rx_mb = rxq->sw_ring[bd_cons]; rxq->sw_ring[bd_cons] = new_mb; - rxq->rx_ring[bd_prod] = new_mb->buf_physaddr; + rxq->rx_ring[bd_prod] = + rte_cpu_to_le_64(rte_mbuf_data_iova_default(new_mb)); rx_pref = NEXT_RX_BD(bd_cons) & MAX_RX_BD(rxq); rte_prefetch0(rxq->sw_ring[rx_pref]); @@ -412,12 +399,11 @@ bnx2x_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) rte_prefetch0(&rxq->sw_ring[rx_pref]); } - rx_mb->data_off = pad; + rx_mb->data_off = pad + RTE_PKTMBUF_HEADROOM; rx_mb->nb_segs = 1; rx_mb->next = NULL; rx_mb->pkt_len = rx_mb->data_len = len; rx_mb->port = rxq->port_id; - rx_mb->buf_len = len + pad; rte_prefetch1(rte_pktmbuf_mtod(rx_mb, void *)); /* @@ -426,7 +412,7 @@ bnx2x_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) */ if (cqe_fp->pars_flags.flags & PARSING_FLAGS_VLAN) { rx_mb->vlan_tci = cqe_fp->vlan_tag; - rx_mb->ol_flags |= PKT_RX_VLAN_PKT; + rx_mb->ol_flags |= PKT_RX_VLAN; } rx_pkts[nb_rx] = rx_mb; @@ -454,20 +440,33 @@ next_rx: return nb_rx; } -int -bnx2x_dev_rx_init(struct rte_eth_dev *dev) +static uint16_t +bnx2x_rxtx_pkts_dummy(__rte_unused void *p_rxq, + __rte_unused struct rte_mbuf **rx_pkts, + __rte_unused uint16_t nb_pkts) { - dev->rx_pkt_burst = bnx2x_recv_pkts; - return 0; } +void bnx2x_dev_rxtx_init_dummy(struct rte_eth_dev *dev) +{ + dev->rx_pkt_burst = bnx2x_rxtx_pkts_dummy; + dev->tx_pkt_burst = bnx2x_rxtx_pkts_dummy; +} + +void bnx2x_dev_rxtx_init(struct rte_eth_dev *dev) +{ + dev->rx_pkt_burst = bnx2x_recv_pkts; + dev->tx_pkt_burst = bnx2x_xmit_pkts; +} + void bnx2x_dev_clear_queues(struct rte_eth_dev *dev) { + struct bnx2x_softc *sc = dev->data->dev_private; uint8_t i; - PMD_INIT_FUNC_TRACE(); + PMD_INIT_FUNC_TRACE(sc); for (i = 0; i < dev->data->nb_tx_queues; i++) { struct bnx2x_tx_queue *txq = dev->data->tx_queues[i];