]> git.droids-corp.org - dpdk.git/commitdiff
net/mlx5: handle LRO packets in regular Rx queue
authorMatan Azrad <matan@mellanox.com>
Mon, 29 Jul 2019 11:53:27 +0000 (11:53 +0000)
committerFerruh Yigit <ferruh.yigit@intel.com>
Mon, 29 Jul 2019 14:54:27 +0000 (16:54 +0200)
When LRO offload is configured in Rx queue, the HW may coalesce TCP
packets from same TCP connection into single packet.

In this case the SW should fix the relevant packet headers because
the HW doesn't update them according to the new created packet
characteristics but provides the update values in the CQE.

Add update header code to the regular Rx burst function to support LRO
feature.

Make sure the first mbuf has enough space to include each TCP header,
otherwise the header update may cross mbufs what complicates the
operation too match.

Signed-off-by: Matan Azrad <matan@mellanox.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
doc/guides/nics/mlx5.rst
drivers/net/mlx5/mlx5_rxq.c
drivers/net/mlx5/mlx5_rxtx.c

index cd550f46ed01b8c1e09a34991383d2d95326aa19..6f0c382ea038d86b7f989ccbf0fbfb0d9b98bb72 100644 (file)
@@ -165,7 +165,9 @@ Limitations
 
 - LRO:
 
-  - scatter_fcs is disabled when LRO is configured.
+  - KEEP_CRC offload cannot be supported with LRO.
+  - The first mbuf length, without head-room,  must be big enough to include the
+    TCP header (122B).
 
 Statistics
 ----------
index e96bb1e3789419b95037a0e123ab4b261c8dbddb..3705d07a6863e133c983f25a53ac9a258b198d7c 100644 (file)
@@ -1541,6 +1541,11 @@ exit:
 #define MLX5_MAX_TCP_HDR_OFFSET ((unsigned int)(sizeof(struct rte_ether_hdr) + \
                                        sizeof(struct rte_vlan_hdr) * 2 + \
                                        sizeof(struct rte_ipv6_hdr)))
+#define MAX_TCP_OPTION_SIZE 40u
+#define MLX5_MAX_LRO_HEADER_FIX ((unsigned int)(MLX5_MAX_TCP_HDR_OFFSET + \
+                                sizeof(struct rte_tcp_hdr) + \
+                                MAX_TCP_OPTION_SIZE))
+
 /**
  * Adjust the maximum LRO massage size.
  *
@@ -1607,6 +1612,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
        unsigned int non_scatter_min_mbuf_size = max_rx_pkt_len +
                                                        RTE_PKTMBUF_HEADROOM;
        unsigned int max_lro_size = 0;
+       unsigned int first_mb_free_size = mb_len - RTE_PKTMBUF_HEADROOM;
 
        if (non_scatter_min_mbuf_size > mb_len && !(offloads &
                                                    DEV_RX_OFFLOAD_SCATTER)) {
@@ -1670,8 +1676,8 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
                                              config->mprq.min_stride_size_n);
                tmpl->rxq.strd_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT;
                tmpl->rxq.strd_headroom_en = strd_headroom_en;
-               tmpl->rxq.mprq_max_memcpy_len = RTE_MIN(mb_len -
-                           RTE_PKTMBUF_HEADROOM, config->mprq.max_memcpy_len);
+               tmpl->rxq.mprq_max_memcpy_len = RTE_MIN(first_mb_free_size,
+                               config->mprq.max_memcpy_len);
                max_lro_size = RTE_MIN(max_rx_pkt_len,
                                       (1u << tmpl->rxq.strd_num_n) *
                                       (1u << tmpl->rxq.strd_sz_n));
@@ -1680,13 +1686,21 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
                        " strd_num_n = %u, strd_sz_n = %u",
                        dev->data->port_id, idx,
                        tmpl->rxq.strd_num_n, tmpl->rxq.strd_sz_n);
-       } else if (max_rx_pkt_len <= (mb_len - RTE_PKTMBUF_HEADROOM)) {
+       } else if (max_rx_pkt_len <= first_mb_free_size) {
                tmpl->rxq.sges_n = 0;
                max_lro_size = max_rx_pkt_len;
        } else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
                unsigned int size = non_scatter_min_mbuf_size;
                unsigned int sges_n;
 
+               if (mlx5_lro_on(dev) && first_mb_free_size <
+                   MLX5_MAX_LRO_HEADER_FIX) {
+                       DRV_LOG(ERR, "Not enough space in the first segment(%u)"
+                               " to include the max header size(%u) for LRO",
+                               first_mb_free_size, MLX5_MAX_LRO_HEADER_FIX);
+                       rte_errno = ENOTSUP;
+                       goto error;
+               }
                /*
                 * Determine the number of SGEs needed for a full packet
                 * and round it to the next power of two.
index 003eefdd40be0f2d482d21e78275cf8150e99e79..6627b54c9b2603f9e548785ee0fcf81ea0390226 100644 (file)
@@ -107,6 +107,16 @@ static int
 mlx5_queue_state_modify(struct rte_eth_dev *dev,
                        struct mlx5_mp_arg_queue_state_modify *sm);
 
+static inline void
+mlx5_lro_update_tcp_hdr(struct rte_tcp_hdr *restrict tcp,
+                       volatile struct mlx5_cqe *restrict cqe,
+                       uint32_t phcsum);
+
+static inline void
+mlx5_lro_update_hdr(uint8_t *restrict padd,
+                   volatile struct mlx5_cqe *restrict cqe,
+                   uint32_t len);
+
 uint32_t mlx5_ptype_table[] __rte_cache_aligned = {
        [0xff] = RTE_PTYPE_ALL_MASK, /* Last entry for errored packet. */
 };
@@ -1323,6 +1333,13 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
                        if (rxq->crc_present)
                                len -= RTE_ETHER_CRC_LEN;
                        PKT_LEN(pkt) = len;
+                       if (cqe->lro_num_seg > 1) {
+                               mlx5_lro_update_hdr
+                                       (rte_pktmbuf_mtod(pkt, uint8_t *), cqe,
+                                        len);
+                               pkt->ol_flags |= PKT_RX_LRO;
+                               pkt->tso_segsz = len / cqe->lro_num_seg;
+                       }
                }
                DATA_LEN(rep) = DATA_LEN(seg);
                PKT_LEN(rep) = PKT_LEN(seg);