net/mlx5: allow implicit LRO flow
[dpdk.git] / drivers / net / mlx5 / mlx5_rxq.c
index ad5b0a9..f7e861c 100644 (file)
@@ -1126,7 +1126,7 @@ mlx5_devx_rq_new(struct rte_eth_dev *dev, uint16_t idx, uint32_t cqn)
        /* Calculate and allocate WQ memory space. */
        wqe_size = 1 << log_wqe_size; /* round up power of two.*/
        wq_size = wqe_n * wqe_size;
-       buf = rte_calloc_socket(__func__, 1, wq_size, RTE_CACHE_LINE_SIZE,
+       buf = rte_calloc_socket(__func__, 1, wq_size, MLX5_WQE_BUF_ALIGNMENT,
                                rxq_ctrl->socket);
        if (!buf)
                return NULL;
@@ -1541,6 +1541,11 @@ exit:
 #define MLX5_MAX_TCP_HDR_OFFSET ((unsigned int)(sizeof(struct rte_ether_hdr) + \
                                        sizeof(struct rte_vlan_hdr) * 2 + \
                                        sizeof(struct rte_ipv6_hdr)))
+#define MAX_TCP_OPTION_SIZE 40u
+#define MLX5_MAX_LRO_HEADER_FIX ((unsigned int)(MLX5_MAX_TCP_HDR_OFFSET + \
+                                sizeof(struct rte_tcp_hdr) + \
+                                MAX_TCP_OPTION_SIZE))
+
 /**
  * Adjust the maximum LRO massage size.
  *
@@ -1607,6 +1612,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
        unsigned int non_scatter_min_mbuf_size = max_rx_pkt_len +
                                                        RTE_PKTMBUF_HEADROOM;
        unsigned int max_lro_size = 0;
+       unsigned int first_mb_free_size = mb_len - RTE_PKTMBUF_HEADROOM;
 
        if (non_scatter_min_mbuf_size > mb_len && !(offloads &
                                                    DEV_RX_OFFLOAD_SCATTER)) {
@@ -1670,8 +1676,8 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
                                              config->mprq.min_stride_size_n);
                tmpl->rxq.strd_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT;
                tmpl->rxq.strd_headroom_en = strd_headroom_en;
-               tmpl->rxq.mprq_max_memcpy_len = RTE_MIN(mb_len -
-                           RTE_PKTMBUF_HEADROOM, config->mprq.max_memcpy_len);
+               tmpl->rxq.mprq_max_memcpy_len = RTE_MIN(first_mb_free_size,
+                               config->mprq.max_memcpy_len);
                max_lro_size = RTE_MIN(max_rx_pkt_len,
                                       (1u << tmpl->rxq.strd_num_n) *
                                       (1u << tmpl->rxq.strd_sz_n));
@@ -1680,13 +1686,21 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
                        " strd_num_n = %u, strd_sz_n = %u",
                        dev->data->port_id, idx,
                        tmpl->rxq.strd_num_n, tmpl->rxq.strd_sz_n);
-       } else if (max_rx_pkt_len <= (mb_len - RTE_PKTMBUF_HEADROOM)) {
+       } else if (max_rx_pkt_len <= first_mb_free_size) {
                tmpl->rxq.sges_n = 0;
                max_lro_size = max_rx_pkt_len;
        } else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
                unsigned int size = non_scatter_min_mbuf_size;
                unsigned int sges_n;
 
+               if (mlx5_lro_on(dev) && first_mb_free_size <
+                   MLX5_MAX_LRO_HEADER_FIX) {
+                       DRV_LOG(ERR, "Not enough space in the first segment(%u)"
+                               " to include the max header size(%u) for LRO",
+                               first_mb_free_size, MLX5_MAX_LRO_HEADER_FIX);
+                       rte_errno = ENOTSUP;
+                       goto error;
+               }
                /*
                 * Determine the number of SGEs needed for a full packet
                 * and round it to the next power of two.
@@ -2086,8 +2100,6 @@ mlx5_ind_table_obj_verify(struct rte_eth_dev *dev)
  *   Number of queues.
  * @param tunnel
  *   Tunnel type.
- * @param lro
- *   Flow rule is relevant for LRO, i.e. contains IPv4/IPv6 and TCP.
  *
  * @return
  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
@@ -2097,7 +2109,7 @@ mlx5_hrxq_new(struct rte_eth_dev *dev,
              const uint8_t *rss_key, uint32_t rss_key_len,
              uint64_t hash_fields,
              const uint16_t *queues, uint32_t queues_n,
-             int tunnel __rte_unused, int lro)
+             int tunnel __rte_unused)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_hrxq *hrxq;
@@ -2204,11 +2216,13 @@ mlx5_hrxq_new(struct rte_eth_dev *dev,
                if (dev->data->dev_conf.lpbk_mode)
                        tir_attr.self_lb_block =
                                        MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
-               if (lro) {
+               if (mlx5_lro_on(dev)) {
                        tir_attr.lro_timeout_period_usecs =
                                        priv->config.lro.timeout;
                        tir_attr.lro_max_msg_sz = priv->max_lro_msg_size;
-                       tir_attr.lro_enable_mask = lro;
+                       tir_attr.lro_enable_mask =
+                                       MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
+                                       MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO;
                }
                tir = mlx5_devx_cmd_create_tir(priv->sh->ctx, &tir_attr);
                if (!tir) {