net/mlx5: support LRO with single RxQ object
authorDekel Peled <dekelp@mellanox.com>
Mon, 22 Jul 2019 14:52:19 +0000 (14:52 +0000)
committerFerruh Yigit <ferruh.yigit@intel.com>
Tue, 23 Jul 2019 12:31:36 +0000 (14:31 +0200)
Implement LRO support using a single RQ object per DPDK RxQ.

Signed-off-by: Dekel Peled <dekelp@mellanox.com>
Acked-by: Matan Azrad <matan@mellanox.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
drivers/net/mlx5/mlx5_flow.h
drivers/net/mlx5/mlx5_flow_dv.c
drivers/net/mlx5/mlx5_flow_verbs.c
drivers/net/mlx5/mlx5_rxq.c
drivers/net/mlx5/mlx5_rxtx.h
drivers/net/mlx5/mlx5_trigger.c

index f3c563e..3f96bec 100644 (file)
        (MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_OUTER_L3 | \
         MLX5_FLOW_LAYER_OUTER_L4)
 
+/* LRO support mask, i.e. flow contains IPv4/IPv6 and TCP. */
+#define MLX5_FLOW_LAYER_IPV4_LRO \
+       (MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L4_TCP)
+#define MLX5_FLOW_LAYER_IPV6_LRO \
+       (MLX5_FLOW_LAYER_OUTER_L3_IPV6 | MLX5_FLOW_LAYER_OUTER_L4_TCP)
+
 /* Tunnel Masks. */
 #define MLX5_FLOW_LAYER_TUNNEL \
        (MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE | \
index 36696c8..7240d3b 100644 (file)
@@ -62,6 +62,9 @@ union flow_dv_attr {
        uint32_t attr;
 };
 
+#define MLX5_FLOW_IPV4_LRO (1 << 0)
+#define MLX5_FLOW_IPV6_LRO (1 << 1)
+
 /**
  * Initialize flow attributes structure according to flow items' types.
  *
@@ -5161,13 +5164,27 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
                                             dv->hash_fields,
                                             (*flow->queue),
                                             flow->rss.queue_num);
-                       if (!hrxq)
+                       if (!hrxq) {
+                               int lro = 0;
+
+                               if (mlx5_lro_on(dev)) {
+                                       if ((dev_flow->layers &
+                                            MLX5_FLOW_LAYER_IPV4_LRO)
+                                           == MLX5_FLOW_LAYER_IPV4_LRO)
+                                               lro = MLX5_FLOW_IPV4_LRO;
+                                       else if ((dev_flow->layers &
+                                                 MLX5_FLOW_LAYER_IPV6_LRO)
+                                                == MLX5_FLOW_LAYER_IPV6_LRO)
+                                               lro = MLX5_FLOW_IPV6_LRO;
+                               }
                                hrxq = mlx5_hrxq_new
                                        (dev, flow->key, MLX5_RSS_HASH_KEY_LEN,
                                         dv->hash_fields, (*flow->queue),
                                         flow->rss.queue_num,
                                         !!(dev_flow->layers &
-                                           MLX5_FLOW_LAYER_TUNNEL));
+                                           MLX5_FLOW_LAYER_TUNNEL), lro);
+                       }
+
                        if (!hrxq) {
                                rte_flow_error_set
                                        (error, rte_errno,
index b3395b8..bcec3b4 100644 (file)
@@ -1669,7 +1669,8 @@ flow_verbs_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
                                                     (*flow->queue),
                                                     flow->rss.queue_num,
                                                     !!(dev_flow->layers &
-                                                     MLX5_FLOW_LAYER_TUNNEL));
+                                                       MLX5_FLOW_LAYER_TUNNEL),
+                                                    0);
                        if (!hrxq) {
                                rte_flow_error_set
                                        (error, rte_errno,
index 1e09078..b87eecc 100644 (file)
@@ -2030,6 +2030,8 @@ mlx5_ind_table_obj_verify(struct rte_eth_dev *dev)
  *   Number of queues.
  * @param tunnel
  *   Tunnel type.
+ * @param lro
+ *   Flow rule is relevant for LRO, i.e. contains IPv4/IPv6 and TCP.
  *
  * @return
  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
@@ -2039,7 +2041,7 @@ mlx5_hrxq_new(struct rte_eth_dev *dev,
              const uint8_t *rss_key, uint32_t rss_key_len,
              uint64_t hash_fields,
              const uint16_t *queues, uint32_t queues_n,
-             int tunnel __rte_unused)
+             int tunnel __rte_unused, int lro)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_hrxq *hrxq;
@@ -2146,6 +2148,12 @@ mlx5_hrxq_new(struct rte_eth_dev *dev,
                if (dev->data->dev_conf.lpbk_mode)
                        tir_attr.self_lb_block =
                                        MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
+               if (lro) {
+                       tir_attr.lro_timeout_period_usecs =
+                                       priv->config.lro.timeout;
+                       tir_attr.lro_max_msg_sz = 0xff;
+                       tir_attr.lro_enable_mask = lro;
+               }
                tir = mlx5_devx_cmd_create_tir(priv->sh->ctx, &tir_attr);
                if (!tir) {
                        DRV_LOG(ERR, "port %u cannot create DevX TIR",
index bd4ae80..ed5f637 100644 (file)
@@ -346,7 +346,7 @@ struct mlx5_hrxq *mlx5_hrxq_new(struct rte_eth_dev *dev,
                                const uint8_t *rss_key, uint32_t rss_key_len,
                                uint64_t hash_fields,
                                const uint16_t *queues, uint32_t queues_n,
-                               int tunnel __rte_unused);
+                               int tunnel __rte_unused, int lro);
 struct mlx5_hrxq *mlx5_hrxq_get(struct rte_eth_dev *dev,
                                const uint8_t *rss_key, uint32_t rss_key_len,
                                uint64_t hash_fields,
index acd2902..8bc2174 100644 (file)
@@ -99,6 +99,9 @@ mlx5_rxq_start(struct rte_eth_dev *dev)
        struct mlx5_priv *priv = dev->data->dev_private;
        unsigned int i;
        int ret = 0;
+       unsigned int lro_on = mlx5_lro_on(dev);
+       enum mlx5_rxq_obj_type obj_type = lro_on ? MLX5_RXQ_OBJ_TYPE_DEVX_RQ :
+                                                  MLX5_RXQ_OBJ_TYPE_IBV;
 
        /* Allocate/reuse/resize mempool for Multi-Packet RQ. */
        if (mlx5_mprq_alloc_mp(dev)) {
@@ -123,11 +126,13 @@ mlx5_rxq_start(struct rte_eth_dev *dev)
                ret = rxq_alloc_elts(rxq_ctrl);
                if (ret)
                        goto error;
-               rxq_ctrl->obj = mlx5_rxq_obj_new(dev, i,
-                                                MLX5_RXQ_OBJ_TYPE_DEVX_RQ);
+               rxq_ctrl->obj = mlx5_rxq_obj_new(dev, i, obj_type);
                if (!rxq_ctrl->obj)
                        goto error;
-               rxq_ctrl->wqn = rxq_ctrl->obj->wq->wq_num;
+               if (obj_type == MLX5_RXQ_OBJ_TYPE_IBV)
+                       rxq_ctrl->wqn = rxq_ctrl->obj->wq->wq_num;
+               else if (obj_type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ)
+                       rxq_ctrl->wqn = rxq_ctrl->obj->rq->id;
        }
        return 0;
 error: