X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fbnx2x%2Fbnx2x_rxtx.c;h=ca28aaccfa8b0e0a97c39b80ea648ae49f31c1d9;hb=5e046832f13a4bac8a850c7d5816bf56d0aaf949;hp=99d7d285c3476fb30400bd9d85a7730e36cf6db0;hpb=f17ca7870ff29fab42f6743cd2795fd60346f147;p=dpdk.git diff --git a/drivers/net/bnx2x/bnx2x_rxtx.c b/drivers/net/bnx2x/bnx2x_rxtx.c index 99d7d285c3..ca28aaccfa 100644 --- a/drivers/net/bnx2x/bnx2x_rxtx.c +++ b/drivers/net/bnx2x/bnx2x_rxtx.c @@ -1,11 +1,8 @@ -/* +/* SPDX-License-Identifier: BSD-3-Clause * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. - * - * Copyright (c) 2015 QLogic Corporation. + * Copyright (c) 2015-2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.bnx2x_pmd for copyright and licensing details. + * www.cavium.com */ #include "bnx2x.h" @@ -15,18 +12,8 @@ static const struct rte_memzone * ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name, uint16_t queue_id, uint32_t ring_size, int socket_id) { - char z_name[RTE_MEMZONE_NAMESIZE]; - const struct rte_memzone *mz; - - snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d", - dev->device->driver->name, ring_name, - dev->data->port_id, queue_id); - - mz = rte_memzone_lookup(z_name); - if (mz) - return mz; - - return rte_memzone_reserve_aligned(z_name, ring_size, socket_id, 0, BNX2X_PAGE_SIZE); + return rte_eth_dma_zone_reserve(dev, ring_name, queue_id, + ring_size, BNX2X_PAGE_SIZE, socket_id); } static void @@ -71,14 +58,14 @@ bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev, struct bnx2x_softc *sc = dev->data->dev_private; struct bnx2x_fastpath *fp = &sc->fp[queue_idx]; struct eth_rx_cqe_next_page *nextpg; - phys_addr_t *rx_bd; - phys_addr_t busaddr; + rte_iova_t *rx_bd; + rte_iova_t busaddr; /* First allocate the rx queue data structure */ rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct bnx2x_rx_queue), RTE_CACHE_LINE_SIZE, socket_id); if (NULL == rxq) { - PMD_INIT_LOG(ERR, "rte_zmalloc for rxq failed!"); + PMD_DRV_LOG(ERR, sc, "rte_zmalloc for rxq failed!"); return -ENOMEM; } rxq->sc = sc; @@ -94,7 +81,7 @@ bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev, sc->rx_ring_size = USABLE_RX_BD(rxq); rxq->nb_cq_pages = RCQ_BD_PAGES(rxq); - PMD_INIT_LOG(DEBUG, "fp[%02d] req_bd=%u, usable_bd=%lu, " + PMD_DRV_LOG(DEBUG, sc, "fp[%02d] req_bd=%u, usable_bd=%lu, " "total_bd=%lu, rx_pages=%u, cq_pages=%u", queue_idx, nb_desc, (unsigned long)USABLE_RX_BD(rxq), (unsigned long)TOTAL_RX_BD(rxq), rxq->nb_rx_pages, @@ -140,7 +127,8 @@ bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev, return -ENOMEM; } rxq->sw_ring[idx] = mbuf; - rxq->rx_ring[idx] = mbuf->buf_physaddr; + rxq->rx_ring[idx] = + rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf)); } rxq->pkt_first_seg = NULL; rxq->pkt_last_seg = NULL; @@ -276,7 +264,7 @@ bnx2x_dev_tx_queue_setup(struct rte_eth_dev *dev, txq->tx_free_thresh = min(txq->tx_free_thresh, txq->nb_tx_desc - BDS_PER_TX_PKT); - PMD_INIT_LOG(DEBUG, "fp[%02d] req_bd=%u, thresh=%u, usable_bd=%lu, " + PMD_DRV_LOG(DEBUG, sc, "fp[%02d] req_bd=%u, thresh=%u, usable_bd=%lu, " "total_bd=%lu, tx_pages=%u", queue_idx, nb_desc, txq->tx_free_thresh, (unsigned long)USABLE_TX_BD(txq), @@ -302,7 +290,7 @@ bnx2x_dev_tx_queue_setup(struct rte_eth_dev *dev, return -ENOMEM; } - /* PMD_DRV_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64, + /* PMD_DRV_LOG(DEBUG, sc, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64, txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr); */ /* Link TX pages */ @@ -311,7 +299,9 @@ bnx2x_dev_tx_queue_setup(struct rte_eth_dev *dev, busaddr = txq->tx_ring_phys_addr + BNX2X_PAGE_SIZE * (i % txq->nb_tx_pages); tx_n_bd->addr_hi = rte_cpu_to_le_32(U64_HI(busaddr)); tx_n_bd->addr_lo = rte_cpu_to_le_32(U64_LO(busaddr)); - /* PMD_DRV_LOG(DEBUG, "link tx page %lu", (TOTAL_TX_BD_PER_PAGE * i - 1)); */ + /* PMD_DRV_LOG(DEBUG, sc, "link tx page %lu", + * (TOTAL_TX_BD_PER_PAGE * i - 1)); + */ } txq->queue_id = queue_idx; @@ -400,7 +390,8 @@ bnx2x_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) rx_mb = rxq->sw_ring[bd_cons]; rxq->sw_ring[bd_cons] = new_mb; - rxq->rx_ring[bd_prod] = new_mb->buf_physaddr; + rxq->rx_ring[bd_prod] = + rte_cpu_to_le_64(rte_mbuf_data_iova_default(new_mb)); rx_pref = NEXT_RX_BD(bd_cons) & MAX_RX_BD(rxq); rte_prefetch0(rxq->sw_ring[rx_pref]); @@ -409,7 +400,7 @@ bnx2x_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) rte_prefetch0(&rxq->sw_ring[rx_pref]); } - rx_mb->data_off = pad; + rx_mb->data_off = pad + RTE_PKTMBUF_HEADROOM; rx_mb->nb_segs = 1; rx_mb->next = NULL; rx_mb->pkt_len = rx_mb->data_len = len; @@ -461,9 +452,10 @@ bnx2x_dev_rx_init(struct rte_eth_dev *dev) void bnx2x_dev_clear_queues(struct rte_eth_dev *dev) { + struct bnx2x_softc *sc = dev->data->dev_private; uint8_t i; - PMD_INIT_FUNC_TRACE(); + PMD_INIT_FUNC_TRACE(sc); for (i = 0; i < dev->data->nb_tx_queues; i++) { struct bnx2x_tx_queue *txq = dev->data->tx_queues[i];