X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fbnx2x%2Fbnx2x_rxtx.c;h=e5a2b25b56ce65e9d0455b33bc329ebdd7e7325c;hb=eaa45270aa6646aefc223d0734e14ccb8838c2ef;hp=6be7277aa40867d84230a65593171b10d81ad8c6;hpb=66291ed828fea66b635bd0791f580e2053e0c013;p=dpdk.git diff --git a/drivers/net/bnx2x/bnx2x_rxtx.c b/drivers/net/bnx2x/bnx2x_rxtx.c index 6be7277aa4..e5a2b25b56 100644 --- a/drivers/net/bnx2x/bnx2x_rxtx.c +++ b/drivers/net/bnx2x/bnx2x_rxtx.c @@ -1,11 +1,8 @@ -/* +/* SPDX-License-Identifier: BSD-3-Clause * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. - * - * Copyright (c) 2015 QLogic Corporation. + * Copyright (c) 2015-2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.bnx2x_pmd for copyright and licensing details. + * www.cavium.com */ #include "bnx2x.h" @@ -15,19 +12,8 @@ static const struct rte_memzone * ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name, uint16_t queue_id, uint32_t ring_size, int socket_id) { - char z_name[RTE_MEMZONE_NAMESIZE]; - const struct rte_memzone *mz; - - snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d", - dev->device->driver->name, ring_name, - dev->data->port_id, queue_id); - - mz = rte_memzone_lookup(z_name); - if (mz) - return mz; - - return rte_memzone_reserve_aligned(z_name, ring_size, socket_id, - RTE_MEMZONE_IOVA_CONTIG, BNX2X_PAGE_SIZE); + return rte_eth_dma_zone_reserve(dev, ring_name, queue_id, + ring_size, BNX2X_PAGE_SIZE, socket_id); } static void @@ -79,7 +65,7 @@ bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev, rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct bnx2x_rx_queue), RTE_CACHE_LINE_SIZE, socket_id); if (NULL == rxq) { - PMD_INIT_LOG(ERR, "rte_zmalloc for rxq failed!"); + PMD_DRV_LOG(ERR, sc, "rte_zmalloc for rxq failed!"); return -ENOMEM; } rxq->sc = sc; @@ -95,7 +81,7 @@ bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev, sc->rx_ring_size = USABLE_RX_BD(rxq); rxq->nb_cq_pages = RCQ_BD_PAGES(rxq); - PMD_INIT_LOG(DEBUG, "fp[%02d] req_bd=%u, usable_bd=%lu, " + PMD_DRV_LOG(DEBUG, sc, "fp[%02d] req_bd=%u, usable_bd=%lu, " "total_bd=%lu, rx_pages=%u, cq_pages=%u", queue_idx, nb_desc, (unsigned long)USABLE_RX_BD(rxq), (unsigned long)TOTAL_RX_BD(rxq), rxq->nb_rx_pages, @@ -141,7 +127,8 @@ bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev, return -ENOMEM; } rxq->sw_ring[idx] = mbuf; - rxq->rx_ring[idx] = mbuf->buf_iova; + rxq->rx_ring[idx] = + rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf)); } rxq->pkt_first_seg = NULL; rxq->pkt_last_seg = NULL; @@ -277,7 +264,7 @@ bnx2x_dev_tx_queue_setup(struct rte_eth_dev *dev, txq->tx_free_thresh = min(txq->tx_free_thresh, txq->nb_tx_desc - BDS_PER_TX_PKT); - PMD_INIT_LOG(DEBUG, "fp[%02d] req_bd=%u, thresh=%u, usable_bd=%lu, " + PMD_DRV_LOG(DEBUG, sc, "fp[%02d] req_bd=%u, thresh=%u, usable_bd=%lu, " "total_bd=%lu, tx_pages=%u", queue_idx, nb_desc, txq->tx_free_thresh, (unsigned long)USABLE_TX_BD(txq), @@ -303,7 +290,7 @@ bnx2x_dev_tx_queue_setup(struct rte_eth_dev *dev, return -ENOMEM; } - /* PMD_DRV_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64, + /* PMD_DRV_LOG(DEBUG, sc, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64, txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr); */ /* Link TX pages */ @@ -312,7 +299,9 @@ bnx2x_dev_tx_queue_setup(struct rte_eth_dev *dev, busaddr = txq->tx_ring_phys_addr + BNX2X_PAGE_SIZE * (i % txq->nb_tx_pages); tx_n_bd->addr_hi = rte_cpu_to_le_32(U64_HI(busaddr)); tx_n_bd->addr_lo = rte_cpu_to_le_32(U64_LO(busaddr)); - /* PMD_DRV_LOG(DEBUG, "link tx page %lu", (TOTAL_TX_BD_PER_PAGE * i - 1)); */ + /* PMD_DRV_LOG(DEBUG, sc, "link tx page %lu", + * (TOTAL_TX_BD_PER_PAGE * i - 1)); + */ } txq->queue_id = queue_idx; @@ -322,7 +311,6 @@ bnx2x_dev_tx_queue_setup(struct rte_eth_dev *dev, txq->tx_bd_tail = 0; txq->tx_bd_head = 0; txq->nb_tx_avail = txq->nb_tx_desc; - dev->tx_pkt_burst = bnx2x_xmit_pkts; dev->data->tx_queues[queue_idx] = txq; if (!sc->tx_queues) sc->tx_queues = dev->data->tx_queues; @@ -401,7 +389,8 @@ bnx2x_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) rx_mb = rxq->sw_ring[bd_cons]; rxq->sw_ring[bd_cons] = new_mb; - rxq->rx_ring[bd_prod] = new_mb->buf_iova; + rxq->rx_ring[bd_prod] = + rte_cpu_to_le_64(rte_mbuf_data_iova_default(new_mb)); rx_pref = NEXT_RX_BD(bd_cons) & MAX_RX_BD(rxq); rte_prefetch0(rxq->sw_ring[rx_pref]); @@ -410,7 +399,7 @@ bnx2x_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) rte_prefetch0(&rxq->sw_ring[rx_pref]); } - rx_mb->data_off = pad; + rx_mb->data_off = pad + RTE_PKTMBUF_HEADROOM; rx_mb->nb_segs = 1; rx_mb->next = NULL; rx_mb->pkt_len = rx_mb->data_len = len; @@ -451,20 +440,33 @@ next_rx: return nb_rx; } -int -bnx2x_dev_rx_init(struct rte_eth_dev *dev) +static uint16_t +bnx2x_rxtx_pkts_dummy(__rte_unused void *p_rxq, + __rte_unused struct rte_mbuf **rx_pkts, + __rte_unused uint16_t nb_pkts) { - dev->rx_pkt_burst = bnx2x_recv_pkts; - return 0; } +void bnx2x_dev_rxtx_init_dummy(struct rte_eth_dev *dev) +{ + dev->rx_pkt_burst = bnx2x_rxtx_pkts_dummy; + dev->tx_pkt_burst = bnx2x_rxtx_pkts_dummy; +} + +void bnx2x_dev_rxtx_init(struct rte_eth_dev *dev) +{ + dev->rx_pkt_burst = bnx2x_recv_pkts; + dev->tx_pkt_burst = bnx2x_xmit_pkts; +} + void bnx2x_dev_clear_queues(struct rte_eth_dev *dev) { + struct bnx2x_softc *sc = dev->data->dev_private; uint8_t i; - PMD_INIT_FUNC_TRACE(); + PMD_INIT_FUNC_TRACE(sc); for (i = 0; i < dev->data->nb_tx_queues; i++) { struct bnx2x_tx_queue *txq = dev->data->tx_queues[i];