-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
- *
- * Copyright (c) 2015 QLogic Corporation.
+ * Copyright (c) 2015-2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.bnx2x_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#include "bnx2x.h"
ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
uint16_t queue_id, uint32_t ring_size, int socket_id)
{
- char z_name[RTE_MEMZONE_NAMESIZE];
- const struct rte_memzone *mz;
-
- snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
- dev->driver->pci_drv.driver.name, ring_name,
- dev->data->port_id, queue_id);
-
- mz = rte_memzone_lookup(z_name);
- if (mz)
- return mz;
-
- return rte_memzone_reserve_aligned(z_name, ring_size, socket_id, 0, BNX2X_PAGE_SIZE);
+ return rte_eth_dma_zone_reserve(dev, ring_name, queue_id,
+ ring_size, BNX2X_PAGE_SIZE, socket_id);
}
static void
sw_ring = rx_queue->sw_ring;
if (NULL != sw_ring) {
for (i = 0; i < rx_queue->nb_rx_desc; i++) {
- if (NULL != sw_ring[i])
- rte_pktmbuf_free(sw_ring[i]);
+ rte_pktmbuf_free(sw_ring[i]);
}
rte_free(sw_ring);
}
}
void
-bnx2x_dev_rx_queue_release(void *rxq)
+bnx2x_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx)
{
- bnx2x_rx_queue_release(rxq);
+ bnx2x_rx_queue_release(dev->data->rx_queues[queue_idx]);
}
int
struct bnx2x_softc *sc = dev->data->dev_private;
struct bnx2x_fastpath *fp = &sc->fp[queue_idx];
struct eth_rx_cqe_next_page *nextpg;
- phys_addr_t *rx_bd;
- phys_addr_t busaddr;
+ rte_iova_t *rx_bd;
+ rte_iova_t busaddr;
/* First allocate the rx queue data structure */
rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct bnx2x_rx_queue),
RTE_CACHE_LINE_SIZE, socket_id);
if (NULL == rxq) {
- PMD_INIT_LOG(ERR, "rte_zmalloc for rxq failed!");
+ PMD_DRV_LOG(ERR, sc, "rte_zmalloc for rxq failed!");
return -ENOMEM;
}
rxq->sc = sc;
sc->rx_ring_size = USABLE_RX_BD(rxq);
rxq->nb_cq_pages = RCQ_BD_PAGES(rxq);
- PMD_INIT_LOG(DEBUG, "fp[%02d] req_bd=%u, usable_bd=%lu, "
+ PMD_DRV_LOG(DEBUG, sc, "fp[%02d] req_bd=%u, usable_bd=%lu, "
"total_bd=%lu, rx_pages=%u, cq_pages=%u",
queue_idx, nb_desc, (unsigned long)USABLE_RX_BD(rxq),
(unsigned long)TOTAL_RX_BD(rxq), rxq->nb_rx_pages,
bnx2x_rx_queue_release(rxq);
return -ENOMEM;
}
- fp->rx_desc_mapping = rxq->rx_ring_phys_addr = (uint64_t)dma->phys_addr;
+ fp->rx_desc_mapping = rxq->rx_ring_phys_addr = (uint64_t)dma->iova;
rxq->rx_ring = (uint64_t*)dma->addr;
memset((void *)rxq->rx_ring, 0, dma_size);
return -ENOMEM;
}
rxq->sw_ring[idx] = mbuf;
- rxq->rx_ring[idx] = mbuf->buf_physaddr;
+ rxq->rx_ring[idx] =
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
}
rxq->pkt_first_seg = NULL;
rxq->pkt_last_seg = NULL;
PMD_RX_LOG(ERR, "RCQ alloc failed");
return -ENOMEM;
}
- fp->rx_comp_mapping = rxq->cq_ring_phys_addr = (uint64_t)dma->phys_addr;
+ fp->rx_comp_mapping = rxq->cq_ring_phys_addr = (uint64_t)dma->iova;
rxq->cq_ring = (union eth_rx_cqe*)dma->addr;
/* Link the CQ chain pages. */
sw_ring = tx_queue->sw_ring;
if (NULL != sw_ring) {
for (i = 0; i < tx_queue->nb_tx_desc; i++) {
- if (NULL != sw_ring[i])
- rte_pktmbuf_free(sw_ring[i]);
+ rte_pktmbuf_free(sw_ring[i]);
}
rte_free(sw_ring);
}
}
void
-bnx2x_dev_tx_queue_release(void *txq)
+bnx2x_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx)
{
- bnx2x_tx_queue_release(txq);
+ bnx2x_tx_queue_release(dev->data->tx_queues[queue_idx]);
}
static uint16_t
txq->tx_free_thresh = min(txq->tx_free_thresh,
txq->nb_tx_desc - BDS_PER_TX_PKT);
- PMD_INIT_LOG(DEBUG, "fp[%02d] req_bd=%u, thresh=%u, usable_bd=%lu, "
+ PMD_DRV_LOG(DEBUG, sc, "fp[%02d] req_bd=%u, thresh=%u, usable_bd=%lu, "
"total_bd=%lu, tx_pages=%u",
queue_idx, nb_desc, txq->tx_free_thresh,
(unsigned long)USABLE_TX_BD(txq),
bnx2x_tx_queue_release(txq);
return -ENOMEM;
}
- fp->tx_desc_mapping = txq->tx_ring_phys_addr = (uint64_t)tz->phys_addr;
+ fp->tx_desc_mapping = txq->tx_ring_phys_addr = (uint64_t)tz->iova;
txq->tx_ring = (union eth_tx_bd_types *) tz->addr;
memset(txq->tx_ring, 0, tsize);
return -ENOMEM;
}
- /* PMD_DRV_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
+ /* PMD_DRV_LOG(DEBUG, sc, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr); */
/* Link TX pages */
busaddr = txq->tx_ring_phys_addr + BNX2X_PAGE_SIZE * (i % txq->nb_tx_pages);
tx_n_bd->addr_hi = rte_cpu_to_le_32(U64_HI(busaddr));
tx_n_bd->addr_lo = rte_cpu_to_le_32(U64_LO(busaddr));
- /* PMD_DRV_LOG(DEBUG, "link tx page %lu", (TOTAL_TX_BD_PER_PAGE * i - 1)); */
+ /* PMD_DRV_LOG(DEBUG, sc, "link tx page %lu",
+ * (TOTAL_TX_BD_PER_PAGE * i - 1));
+ */
}
txq->queue_id = queue_idx;
txq->tx_bd_tail = 0;
txq->tx_bd_head = 0;
txq->nb_tx_avail = txq->nb_tx_desc;
- dev->tx_pkt_burst = bnx2x_xmit_pkts;
dev->data->tx_queues[queue_idx] = txq;
if (!sc->tx_queues) sc->tx_queues = dev->data->tx_queues;
bnx2x_upd_rx_prod_fast(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp,
uint16_t rx_bd_prod, uint16_t rx_cq_prod)
{
- union ustorm_eth_rx_producers rx_prods;
+ union {
+ struct ustorm_eth_rx_producers rx_prods;
+ uint32_t val;
+ } val = { {0} };
- rx_prods.prod.bd_prod = rx_bd_prod;
- rx_prods.prod.cqe_prod = rx_cq_prod;
+ val.rx_prods.bd_prod = rx_bd_prod;
+ val.rx_prods.cqe_prod = rx_cq_prod;
- REG_WR(sc, fp->ustorm_rx_prods_offset, rx_prods.raw_data[0]);
+ REG_WR(sc, fp->ustorm_rx_prods_offset, val.val);
}
static uint16_t
struct rte_mbuf *new_mb;
uint16_t rx_pref;
struct eth_fast_path_rx_cqe *cqe_fp;
- uint16_t len, pad;
+ uint16_t len, pad, bd_len, buf_len;
struct rte_mbuf *rx_mb = NULL;
+ static bool log_once = true;
+
+ rte_spinlock_lock(&(fp)->rx_mtx);
hw_cq_cons = le16toh(*fp->rx_cq_cons_sb);
if ((hw_cq_cons & USABLE_RCQ_ENTRIES_PER_PAGE) ==
sw_cq_cons = rxq->rx_cq_head;
sw_cq_prod = rxq->rx_cq_tail;
- if (sw_cq_cons == hw_cq_cons)
+ if (sw_cq_cons == hw_cq_cons) {
+ rte_spinlock_unlock(&(fp)->rx_mtx);
return 0;
+ }
while (nb_rx < nb_pkts && sw_cq_cons != hw_cq_cons) {
len = cqe_fp->pkt_len_or_gro_seg_len;
pad = cqe_fp->placement_offset;
+ bd_len = cqe_fp->len_on_bd;
+ buf_len = rxq->sw_ring[bd_cons]->buf_len;
+
+ /* Check for sufficient buffer length */
+ if (unlikely(buf_len < len + (pad + RTE_PKTMBUF_HEADROOM))) {
+ if (unlikely(log_once)) {
+ PMD_DRV_LOG(ERR, sc, "mbuf size %d is not enough to hold Rx packet length more than %d",
+ buf_len - RTE_PKTMBUF_HEADROOM,
+ buf_len -
+ (pad + RTE_PKTMBUF_HEADROOM));
+ log_once = false;
+ }
+ goto next_rx;
+ }
new_mb = rte_mbuf_raw_alloc(rxq->mb_pool);
if (unlikely(!new_mb)) {
rx_mb = rxq->sw_ring[bd_cons];
rxq->sw_ring[bd_cons] = new_mb;
- rxq->rx_ring[bd_prod] = new_mb->buf_physaddr;
+ rxq->rx_ring[bd_prod] =
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(new_mb));
rx_pref = NEXT_RX_BD(bd_cons) & MAX_RX_BD(rxq);
rte_prefetch0(rxq->sw_ring[rx_pref]);
rte_prefetch0(&rxq->sw_ring[rx_pref]);
}
- rx_mb->data_off = pad;
+ rx_mb->data_off = pad + RTE_PKTMBUF_HEADROOM;
rx_mb->nb_segs = 1;
rx_mb->next = NULL;
- rx_mb->pkt_len = rx_mb->data_len = len;
+ rx_mb->pkt_len = len;
+ rx_mb->data_len = bd_len;
rx_mb->port = rxq->port_id;
rte_prefetch1(rte_pktmbuf_mtod(rx_mb, void *));
*/
if (cqe_fp->pars_flags.flags & PARSING_FLAGS_VLAN) {
rx_mb->vlan_tci = cqe_fp->vlan_tag;
- rx_mb->ol_flags |= PKT_RX_VLAN_PKT;
+ rx_mb->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
}
rx_pkts[nb_rx] = rx_mb;
bnx2x_upd_rx_prod_fast(sc, fp, bd_prod, sw_cq_prod);
+ rte_spinlock_unlock(&(fp)->rx_mtx);
+
return nb_rx;
}
-int
-bnx2x_dev_rx_init(struct rte_eth_dev *dev)
+void bnx2x_dev_rxtx_init_dummy(struct rte_eth_dev *dev)
{
- dev->rx_pkt_burst = bnx2x_recv_pkts;
+ dev->rx_pkt_burst = rte_eth_pkt_burst_dummy;
+ dev->tx_pkt_burst = rte_eth_pkt_burst_dummy;
+}
- return 0;
+void bnx2x_dev_rxtx_init(struct rte_eth_dev *dev)
+{
+ dev->rx_pkt_burst = bnx2x_recv_pkts;
+ dev->tx_pkt_burst = bnx2x_xmit_pkts;
}
void
bnx2x_dev_clear_queues(struct rte_eth_dev *dev)
{
+ struct bnx2x_softc *sc = dev->data->dev_private;
uint8_t i;
- PMD_INIT_FUNC_TRACE();
+ PMD_INIT_FUNC_TRACE(sc);
for (i = 0; i < dev->data->nb_tx_queues; i++) {
struct bnx2x_tx_queue *txq = dev->data->tx_queues[i];