net/bnx2x: add Rx descriptor MTU segment limitation
authorRasesh Mody <rmody@marvell.com>
Tue, 5 May 2020 03:08:12 +0000 (20:08 -0700)
committerFerruh Yigit <ferruh.yigit@intel.com>
Mon, 11 May 2020 20:27:39 +0000 (22:27 +0200)
Add Rx descriptor limit for number of segments per MTU.
PMD doesn't support Jumbo Rx scatter gather hence set 1 segment per
MTU. Some applications can adjust mbuf_size based on this value.
For others PMD detects the condition where Rx packet length cannot
be held by configured mbuf size and logs the message.

Signed-off-by: Rasesh Mody <rmody@marvell.com>
Signed-off-by: Igor Russkikh <irusskikh@marvell.com>
doc/guides/nics/bnx2x.rst
drivers/net/bnx2x/bnx2x_ethdev.c
drivers/net/bnx2x/bnx2x_rxtx.c

index 67d765a..ab90d8a 100644 (file)
@@ -108,6 +108,23 @@ Driver compilation and testing
 Refer to the document :ref:`compiling and testing a PMD for a NIC <pmd_build_and_test>`
 for details.
 
+Jumbo: Limitation
+-----------------
+
+Rx descriptor limit for number of segments per MTU is set to 1.
+PMD doesn't support Jumbo Rx scatter gather. Some applciations can
+adjust mbuf_size based on this param and max_pkt_len.
+
+For others, PMD detects the condition where Rx packet length cannot
+be held by configured mbuf size and logs the message.
+
+Example output:
+
+   .. code-block:: console
+
+      [...]
+      [bnx2x_recv_pkts:397(04:00.0:dpdk-port-0)] mbuf size 2048 is not enough to hold Rx packet length more than 2046
+
 SR-IOV: Prerequisites and sample Application Notes
 --------------------------------------------------
 
index 30588b1..adc3690 100644 (file)
@@ -533,6 +533,7 @@ bnx2x_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 
        dev_info->rx_desc_lim.nb_max = MAX_RX_AVAIL;
        dev_info->rx_desc_lim.nb_min = MIN_RX_SIZE_NONTPA;
+       dev_info->rx_desc_lim.nb_mtu_seg_max = 1;
        dev_info->tx_desc_lim.nb_max = MAX_TX_AVAIL;
 
        return 0;
index e201b68..57e2ce5 100644 (file)
@@ -343,8 +343,9 @@ bnx2x_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
        struct rte_mbuf *new_mb;
        uint16_t rx_pref;
        struct eth_fast_path_rx_cqe *cqe_fp;
-       uint16_t len, pad;
+       uint16_t len, pad, bd_len, buf_len;
        struct rte_mbuf *rx_mb = NULL;
+       static bool log_once = true;
 
        rte_spinlock_lock(&(fp)->rx_mtx);
 
@@ -384,6 +385,20 @@ bnx2x_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 
                len = cqe_fp->pkt_len_or_gro_seg_len;
                pad = cqe_fp->placement_offset;
+               bd_len = cqe_fp->len_on_bd;
+               buf_len = rxq->sw_ring[bd_cons]->buf_len;
+
+               /* Check for sufficient buffer length */
+               if (unlikely(buf_len < len + (pad + RTE_PKTMBUF_HEADROOM))) {
+                       if (unlikely(log_once)) {
+                               PMD_DRV_LOG(ERR, sc, "mbuf size %d is not enough to hold Rx packet length more than %d",
+                                           buf_len - RTE_PKTMBUF_HEADROOM,
+                                           buf_len -
+                                           (pad + RTE_PKTMBUF_HEADROOM));
+                               log_once = false;
+                       }
+                       goto next_rx;
+               }
 
                new_mb = rte_mbuf_raw_alloc(rxq->mb_pool);
                if (unlikely(!new_mb)) {
@@ -408,7 +423,8 @@ bnx2x_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                rx_mb->data_off = pad + RTE_PKTMBUF_HEADROOM;
                rx_mb->nb_segs = 1;
                rx_mb->next = NULL;
-               rx_mb->pkt_len = rx_mb->data_len = len;
+               rx_mb->pkt_len = len;
+               rx_mb->data_len = bd_len;
                rx_mb->port = rxq->port_id;
                rte_prefetch1(rte_pktmbuf_mtod(rx_mb, void *));