X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;ds=inline;f=drivers%2Fnet%2Fbnx2x%2Fbnx2x_rxtx.c;h=331884cf56cc54dbfe1679f419c2af7eaaa52565;hb=a4a5cd21d20a38fc317d938b156324a6ad78d119;hp=170e48fbd1aa8921d666f10c3a2ff58f3ba222ba;hpb=cf3d07b1e6723343ecf63e51fff47aa2ff92af70;p=dpdk.git diff --git a/drivers/net/bnx2x/bnx2x_rxtx.c b/drivers/net/bnx2x/bnx2x_rxtx.c index 170e48fbd1..331884cf56 100644 --- a/drivers/net/bnx2x/bnx2x_rxtx.c +++ b/drivers/net/bnx2x/bnx2x_rxtx.c @@ -1,9 +1,9 @@ /* * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. * - * Copyright (c) 2015 QLogic Corporation. + * Copyright (c) 2015-2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.bnx2x_pmd for copyright and licensing details. */ @@ -19,14 +19,15 @@ ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name, const struct rte_memzone *mz; snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d", - dev->driver->pci_drv.driver.name, ring_name, + dev->device->driver->name, ring_name, dev->data->port_id, queue_id); mz = rte_memzone_lookup(z_name); if (mz) return mz; - return rte_memzone_reserve_aligned(z_name, ring_size, socket_id, 0, BNX2X_PAGE_SIZE); + return rte_memzone_reserve_aligned(z_name, ring_size, socket_id, + RTE_MEMZONE_IOVA_CONTIG, BNX2X_PAGE_SIZE); } static void @@ -71,8 +72,8 @@ bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev, struct bnx2x_softc *sc = dev->data->dev_private; struct bnx2x_fastpath *fp = &sc->fp[queue_idx]; struct eth_rx_cqe_next_page *nextpg; - phys_addr_t *rx_bd; - phys_addr_t busaddr; + rte_iova_t *rx_bd; + rte_iova_t busaddr; /* First allocate the rx queue data structure */ rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct bnx2x_rx_queue), @@ -108,7 +109,7 @@ bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev, bnx2x_rx_queue_release(rxq); return -ENOMEM; } - fp->rx_desc_mapping = rxq->rx_ring_phys_addr = (uint64_t)dma->phys_addr; + fp->rx_desc_mapping = rxq->rx_ring_phys_addr = (uint64_t)dma->iova; rxq->rx_ring = (uint64_t*)dma->addr; memset((void *)rxq->rx_ring, 0, dma_size); @@ -140,7 +141,8 @@ bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev, return -ENOMEM; } rxq->sw_ring[idx] = mbuf; - rxq->rx_ring[idx] = mbuf->buf_physaddr; + rxq->rx_ring[idx] = + rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf)); } rxq->pkt_first_seg = NULL; rxq->pkt_last_seg = NULL; @@ -154,7 +156,7 @@ bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev, PMD_RX_LOG(ERR, "RCQ alloc failed"); return -ENOMEM; } - fp->rx_comp_mapping = rxq->cq_ring_phys_addr = (uint64_t)dma->phys_addr; + fp->rx_comp_mapping = rxq->cq_ring_phys_addr = (uint64_t)dma->iova; rxq->cq_ring = (union eth_rx_cqe*)dma->addr; /* Link the CQ chain pages. */ @@ -273,6 +275,8 @@ bnx2x_dev_tx_queue_setup(struct rte_eth_dev *dev, txq->tx_free_thresh = tx_conf->tx_free_thresh ? tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH; + txq->tx_free_thresh = min(txq->tx_free_thresh, + txq->nb_tx_desc - BDS_PER_TX_PKT); PMD_INIT_LOG(DEBUG, "fp[%02d] req_bd=%u, thresh=%u, usable_bd=%lu, " "total_bd=%lu, tx_pages=%u", @@ -287,7 +291,7 @@ bnx2x_dev_tx_queue_setup(struct rte_eth_dev *dev, bnx2x_tx_queue_release(txq); return -ENOMEM; } - fp->tx_desc_mapping = txq->tx_ring_phys_addr = (uint64_t)tz->phys_addr; + fp->tx_desc_mapping = txq->tx_ring_phys_addr = (uint64_t)tz->iova; txq->tx_ring = (union eth_tx_bd_types *) tz->addr; memset(txq->tx_ring, 0, tsize); @@ -398,7 +402,8 @@ bnx2x_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) rx_mb = rxq->sw_ring[bd_cons]; rxq->sw_ring[bd_cons] = new_mb; - rxq->rx_ring[bd_prod] = new_mb->buf_physaddr; + rxq->rx_ring[bd_prod] = + rte_cpu_to_le_64(rte_mbuf_data_iova_default(new_mb)); rx_pref = NEXT_RX_BD(bd_cons) & MAX_RX_BD(rxq); rte_prefetch0(rxq->sw_ring[rx_pref]); @@ -407,7 +412,7 @@ bnx2x_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) rte_prefetch0(&rxq->sw_ring[rx_pref]); } - rx_mb->data_off = pad; + rx_mb->data_off = pad + RTE_PKTMBUF_HEADROOM; rx_mb->nb_segs = 1; rx_mb->next = NULL; rx_mb->pkt_len = rx_mb->data_len = len; @@ -420,7 +425,7 @@ bnx2x_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) */ if (cqe_fp->pars_flags.flags & PARSING_FLAGS_VLAN) { rx_mb->vlan_tci = cqe_fp->vlan_tag; - rx_mb->ol_flags |= PKT_RX_VLAN_PKT; + rx_mb->ol_flags |= PKT_RX_VLAN; } rx_pkts[nb_rx] = rx_mb;