]> git.droids-corp.org - dpdk.git/commitdiff
net/mlx5: fix packet type offload for tunnels
authorMatan Azrad <matan@mellanox.com>
Wed, 1 Aug 2018 08:37:38 +0000 (08:37 +0000)
committerShahaf Shuler <shahafs@mellanox.com>
Thu, 2 Aug 2018 10:34:18 +0000 (12:34 +0200)
There are dedicated QP attributes, tunnel offload flag and mask, which
must be configured in order to allow part of the HW tunnel offloads.

So, if a QP is pointed by a tunnel flow, the above QP attributes
should be configured.

The mask configuration is wrongly only performed if an internal RSS was
configured by the user, while there is no reason to condition the
tunnel offloads in RSS configurations.

Consequently, some of the tunnel offloads was not performed by the HW
when a tunnel flow was configured, for example, the packet tunnel
types was not reported to the user.

Replace the internal RSS condition with the tunnel flow condition.

Fixes: df6afd377ace ("net/mlx5: remove useless arguments in hrxq API")
Signed-off-by: Matan Azrad <matan@mellanox.com>
drivers/net/mlx5/mlx5_flow.c
drivers/net/mlx5/mlx5_rxq.c
drivers/net/mlx5/mlx5_rxtx.h

index adda4df6565d7f4eae9f0acb2cb6a21eb1e2dda4..923fc28589441cb465e1679157fbafcf266cfbbd 100644 (file)
@@ -2879,7 +2879,9 @@ mlx5_flow_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
                                                     MLX5_RSS_HASH_KEY_LEN,
                                                     verbs->hash_fields,
                                                     (*flow->queue),
-                                                    flow->rss.queue_num);
+                                                    flow->rss.queue_num,
+                                                    !!(flow->layers &
+                                                     MLX5_FLOW_LAYER_TUNNEL));
                        if (!hrxq) {
                                rte_flow_error_set
                                        (error, rte_errno,
index 16e1641d00bc0f539b6e54966f471258772ecb12..97b3e8ef0c674e7b7ef93e3e7755f4eb32e6e809 100644 (file)
@@ -1752,7 +1752,8 @@ struct mlx5_hrxq *
 mlx5_hrxq_new(struct rte_eth_dev *dev,
              const uint8_t *rss_key, uint32_t rss_key_len,
              uint64_t hash_fields,
-             const uint16_t *queues, uint32_t queues_n)
+             const uint16_t *queues, uint32_t queues_n,
+             int tunnel __rte_unused)
 {
        struct priv *priv = dev->data->dev_private;
        struct mlx5_hrxq *hrxq;
@@ -1794,9 +1795,8 @@ mlx5_hrxq_new(struct rte_eth_dev *dev,
                        .pd = priv->pd,
                 },
                 &(struct mlx5dv_qp_init_attr){
-                       .comp_mask = (hash_fields & IBV_RX_HASH_INNER) ?
-                                MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS :
-                                0,
+                       .comp_mask = tunnel ?
+                               MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS : 0,
                        .create_flags = MLX5DV_QP_CREATE_TUNNEL_OFFLOADS,
                 });
 #else
index 992e9770039def9db69e1d1398ac2be42f0e5660..48ed2b209ab0cc8363d1f8eb5fca53080011d6c9 100644 (file)
@@ -283,7 +283,8 @@ void mlx5_ind_table_ibv_drop_release(struct rte_eth_dev *dev);
 struct mlx5_hrxq *mlx5_hrxq_new(struct rte_eth_dev *dev,
                                const uint8_t *rss_key, uint32_t rss_key_len,
                                uint64_t hash_fields,
-                               const uint16_t *queues, uint32_t queues_n);
+                               const uint16_t *queues, uint32_t queues_n,
+                               int tunnel __rte_unused);
 struct mlx5_hrxq *mlx5_hrxq_get(struct rte_eth_dev *dev,
                                const uint8_t *rss_key, uint32_t rss_key_len,
                                uint64_t hash_fields,