net/mlx5: support NVGRE encap action in sampling
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
index ecabf63..51c95f1 100644 (file)
@@ -4650,6 +4650,27 @@ flow_dv_validate_action_modify_field(struct rte_eth_dev *dev,
                                NULL,
                                "modifications of an arbitrary"
                                " place in a packet is not supported");
+       if (action_modify_field->dst.field == RTE_FLOW_FIELD_VLAN_TYPE ||
+           action_modify_field->src.field == RTE_FLOW_FIELD_VLAN_TYPE)
+               return rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ACTION,
+                               NULL,
+                               "modifications of the 802.1Q Tag"
+                               " Identifier is not supported");
+       if (action_modify_field->dst.field == RTE_FLOW_FIELD_VXLAN_VNI ||
+           action_modify_field->src.field == RTE_FLOW_FIELD_VXLAN_VNI)
+               return rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ACTION,
+                               NULL,
+                               "modifications of the VXLAN Network"
+                               " Identifier is not supported");
+       if (action_modify_field->dst.field == RTE_FLOW_FIELD_GENEVE_VNI ||
+           action_modify_field->src.field == RTE_FLOW_FIELD_GENEVE_VNI)
+               return rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ACTION,
+                               NULL,
+                               "modifications of the GENEVE Network"
+                               " Identifier is not supported");
        if (action_modify_field->dst.field == RTE_FLOW_FIELD_MARK ||
            action_modify_field->src.field == RTE_FLOW_FIELD_MARK) {
                if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
@@ -5237,6 +5258,17 @@ flow_dv_validate_action_sample(uint64_t *action_flags,
                                return ret;
                        ++actions_n;
                        break;
+               case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+               case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
+                       ret = flow_dv_validate_action_l2_encap(dev,
+                                                              sub_action_flags,
+                                                              act, attr,
+                                                              error);
+                       if (ret < 0)
+                               return ret;
+                       sub_action_flags |= MLX5_FLOW_ACTION_ENCAP;
+                       ++actions_n;
+                       break;
                default:
                        return rte_flow_error_set(error, ENOTSUP,
                                                  RTE_FLOW_ERROR_TYPE_ACTION,
@@ -10399,6 +10431,8 @@ flow_dv_translate_action_sample(struct rte_eth_dev *dev,
                        action_flags |= MLX5_FLOW_ACTION_PORT_ID;
                        break;
                }
+               case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+               case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
                case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
                        /* Save the encap resource before sample */
                        pre_rix = dev_flow->handle->dvh.rix_encap_decap;
@@ -11874,28 +11908,51 @@ flow_dv_translate(struct rte_eth_dev *dev,
 static int
 __flow_dv_action_rss_hrxq_set(struct mlx5_shared_action_rss *action,
                              const uint64_t hash_fields,
-                             const int tunnel,
                              uint32_t hrxq_idx)
 {
-       uint32_t *hrxqs = tunnel ? action->hrxq : action->hrxq_tunnel;
+       uint32_t *hrxqs = action->hrxq;
 
        switch (hash_fields & ~IBV_RX_HASH_INNER) {
        case MLX5_RSS_HASH_IPV4:
+               /* fall-through. */
+       case MLX5_RSS_HASH_IPV4_DST_ONLY:
+               /* fall-through. */
+       case MLX5_RSS_HASH_IPV4_SRC_ONLY:
                hrxqs[0] = hrxq_idx;
                return 0;
        case MLX5_RSS_HASH_IPV4_TCP:
+               /* fall-through. */
+       case MLX5_RSS_HASH_IPV4_TCP_DST_ONLY:
+               /* fall-through. */
+       case MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY:
                hrxqs[1] = hrxq_idx;
                return 0;
        case MLX5_RSS_HASH_IPV4_UDP:
+               /* fall-through. */
+       case MLX5_RSS_HASH_IPV4_UDP_DST_ONLY:
+               /* fall-through. */
+       case MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY:
                hrxqs[2] = hrxq_idx;
                return 0;
        case MLX5_RSS_HASH_IPV6:
+               /* fall-through. */
+       case MLX5_RSS_HASH_IPV6_DST_ONLY:
+               /* fall-through. */
+       case MLX5_RSS_HASH_IPV6_SRC_ONLY:
                hrxqs[3] = hrxq_idx;
                return 0;
        case MLX5_RSS_HASH_IPV6_TCP:
+               /* fall-through. */
+       case MLX5_RSS_HASH_IPV6_TCP_DST_ONLY:
+               /* fall-through. */
+       case MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY:
                hrxqs[4] = hrxq_idx;
                return 0;
        case MLX5_RSS_HASH_IPV6_UDP:
+               /* fall-through. */
+       case MLX5_RSS_HASH_IPV6_UDP_DST_ONLY:
+               /* fall-through. */
+       case MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY:
                hrxqs[5] = hrxq_idx;
                return 0;
        case MLX5_RSS_HASH_NONE:
@@ -11924,33 +11981,56 @@ __flow_dv_action_rss_hrxq_set(struct mlx5_shared_action_rss *action,
  */
 static uint32_t
 __flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx,
-                                const uint64_t hash_fields,
-                                const int tunnel)
+                                const uint64_t hash_fields)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_shared_action_rss *shared_rss =
            mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
-       const uint32_t *hrxqs = tunnel ? shared_rss->hrxq :
-                                                       shared_rss->hrxq_tunnel;
+       const uint32_t *hrxqs = shared_rss->hrxq;
 
        switch (hash_fields & ~IBV_RX_HASH_INNER) {
        case MLX5_RSS_HASH_IPV4:
+               /* fall-through. */
+       case MLX5_RSS_HASH_IPV4_DST_ONLY:
+               /* fall-through. */
+       case MLX5_RSS_HASH_IPV4_SRC_ONLY:
                return hrxqs[0];
        case MLX5_RSS_HASH_IPV4_TCP:
+               /* fall-through. */
+       case MLX5_RSS_HASH_IPV4_TCP_DST_ONLY:
+               /* fall-through. */
+       case MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY:
                return hrxqs[1];
        case MLX5_RSS_HASH_IPV4_UDP:
+               /* fall-through. */
+       case MLX5_RSS_HASH_IPV4_UDP_DST_ONLY:
+               /* fall-through. */
+       case MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY:
                return hrxqs[2];
        case MLX5_RSS_HASH_IPV6:
+               /* fall-through. */
+       case MLX5_RSS_HASH_IPV6_DST_ONLY:
+               /* fall-through. */
+       case MLX5_RSS_HASH_IPV6_SRC_ONLY:
                return hrxqs[3];
        case MLX5_RSS_HASH_IPV6_TCP:
+               /* fall-through. */
+       case MLX5_RSS_HASH_IPV6_TCP_DST_ONLY:
+               /* fall-through. */
+       case MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY:
                return hrxqs[4];
        case MLX5_RSS_HASH_IPV6_UDP:
+               /* fall-through. */
+       case MLX5_RSS_HASH_IPV6_UDP_DST_ONLY:
+               /* fall-through. */
+       case MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY:
                return hrxqs[5];
        case MLX5_RSS_HASH_NONE:
                return hrxqs[6];
        default:
                return 0;
        }
+
 }
 
 /**
@@ -11992,11 +12072,19 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
                n = dv->actions_n;
                if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
                        if (dv->transfer) {
-                               dv->actions[n++] = priv->sh->esw_drop_action;
+                               MLX5_ASSERT(priv->sh->dr_drop_action);
+                               dv->actions[n++] = priv->sh->dr_drop_action;
                        } else {
+#ifdef HAVE_MLX5DV_DR
+                               /* DR supports drop action placeholder. */
+                               MLX5_ASSERT(priv->sh->dr_drop_action);
+                               dv->actions[n++] = priv->sh->dr_drop_action;
+#else
+                               /* For DV we use the explicit drop queue. */
                                MLX5_ASSERT(priv->drop_queue.hrxq);
                                dv->actions[n++] =
                                                priv->drop_queue.hrxq->action;
+#endif
                        }
                } else if ((dh->fate_action == MLX5_FLOW_FATE_QUEUE &&
                           !dv_h->rix_sample && !dv_h->rix_dest_array)) {
@@ -12020,9 +12108,7 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
 
                        hrxq_idx = __flow_dv_action_rss_hrxq_lookup(dev,
                                                rss_desc->shared_rss,
-                                               dev_flow->hash_fields,
-                                               !!(dh->layers &
-                                               MLX5_FLOW_LAYER_TUNNEL));
+                                               dev_flow->hash_fields);
                        if (hrxq_idx)
                                hrxq = mlx5_ipool_get
                                        (priv->sh->ipool[MLX5_IPOOL_HRXQ],
@@ -12633,8 +12719,85 @@ static int
 __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev,
                                 struct mlx5_shared_action_rss *shared_rss)
 {
-       return __flow_dv_hrxqs_release(dev, &shared_rss->hrxq) +
-               __flow_dv_hrxqs_release(dev, &shared_rss->hrxq_tunnel);
+       return __flow_dv_hrxqs_release(dev, &shared_rss->hrxq);
+}
+
+/**
+ * Adjust L3/L4 hash value of pre-created shared RSS hrxq according to
+ * user input.
+ *
+ * Only one hash value is available for one L3+L4 combination:
+ * for example:
+ * MLX5_RSS_HASH_IPV4, MLX5_RSS_HASH_IPV4_SRC_ONLY, and
+ * MLX5_RSS_HASH_IPV4_DST_ONLY are mutually exclusive so they can share
+ * same slot in mlx5_rss_hash_fields.
+ *
+ * @param[in] rss
+ *   Pointer to the shared action RSS conf.
+ * @param[in, out] hash_field
+ *   hash_field variable needed to be adjusted.
+ *
+ * @return
+ *   void
+ */
+static void
+__flow_dv_action_rss_l34_hash_adjust(struct mlx5_shared_action_rss *rss,
+                                    uint64_t *hash_field)
+{
+       uint64_t rss_types = rss->origin.types;
+
+       switch (*hash_field & ~IBV_RX_HASH_INNER) {
+       case MLX5_RSS_HASH_IPV4:
+               if (rss_types & MLX5_IPV4_LAYER_TYPES) {
+                       *hash_field &= ~MLX5_RSS_HASH_IPV4;
+                       if (rss_types & ETH_RSS_L3_DST_ONLY)
+                               *hash_field |= IBV_RX_HASH_DST_IPV4;
+                       else if (rss_types & ETH_RSS_L3_SRC_ONLY)
+                               *hash_field |= IBV_RX_HASH_SRC_IPV4;
+                       else
+                               *hash_field |= MLX5_RSS_HASH_IPV4;
+               }
+               return;
+       case MLX5_RSS_HASH_IPV6:
+               if (rss_types & MLX5_IPV6_LAYER_TYPES) {
+                       *hash_field &= ~MLX5_RSS_HASH_IPV6;
+                       if (rss_types & ETH_RSS_L3_DST_ONLY)
+                               *hash_field |= IBV_RX_HASH_DST_IPV6;
+                       else if (rss_types & ETH_RSS_L3_SRC_ONLY)
+                               *hash_field |= IBV_RX_HASH_SRC_IPV6;
+                       else
+                               *hash_field |= MLX5_RSS_HASH_IPV6;
+               }
+               return;
+       case MLX5_RSS_HASH_IPV4_UDP:
+               /* fall-through. */
+       case MLX5_RSS_HASH_IPV6_UDP:
+               if (rss_types & ETH_RSS_UDP) {
+                       *hash_field &= ~MLX5_UDP_IBV_RX_HASH;
+                       if (rss_types & ETH_RSS_L4_DST_ONLY)
+                               *hash_field |= IBV_RX_HASH_DST_PORT_UDP;
+                       else if (rss_types & ETH_RSS_L4_SRC_ONLY)
+                               *hash_field |= IBV_RX_HASH_SRC_PORT_UDP;
+                       else
+                               *hash_field |= MLX5_UDP_IBV_RX_HASH;
+               }
+               return;
+       case MLX5_RSS_HASH_IPV4_TCP:
+               /* fall-through. */
+       case MLX5_RSS_HASH_IPV6_TCP:
+               if (rss_types & ETH_RSS_TCP) {
+                       *hash_field &= ~MLX5_TCP_IBV_RX_HASH;
+                       if (rss_types & ETH_RSS_L4_DST_ONLY)
+                               *hash_field |= IBV_RX_HASH_DST_PORT_TCP;
+                       else if (rss_types & ETH_RSS_L4_SRC_ONLY)
+                               *hash_field |= IBV_RX_HASH_SRC_PORT_TCP;
+                       else
+                               *hash_field |= MLX5_TCP_IBV_RX_HASH;
+               }
+               return;
+       default:
+               return;
+       }
 }
 
 /**
@@ -12680,23 +12843,26 @@ __flow_dv_action_rss_setup(struct rte_eth_dev *dev,
        for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
                uint32_t hrxq_idx;
                uint64_t hash_fields = mlx5_rss_hash_fields[i];
-               int tunnel;
+               int tunnel = 0;
 
-               for (tunnel = 0; tunnel < 2; tunnel++) {
-                       rss_desc.tunnel = tunnel;
-                       rss_desc.hash_fields = hash_fields;
-                       hrxq_idx = mlx5_hrxq_get(dev, &rss_desc);
-                       if (!hrxq_idx) {
-                               rte_flow_error_set
-                                       (error, rte_errno,
-                                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
-                                        "cannot get hash queue");
-                               goto error_hrxq_new;
-                       }
-                       err = __flow_dv_action_rss_hrxq_set
-                               (shared_rss, hash_fields, tunnel, hrxq_idx);
-                       MLX5_ASSERT(!err);
+               __flow_dv_action_rss_l34_hash_adjust(shared_rss, &hash_fields);
+               if (shared_rss->origin.level > 1) {
+                       hash_fields |= IBV_RX_HASH_INNER;
+                       tunnel = 1;
+               }
+               rss_desc.tunnel = tunnel;
+               rss_desc.hash_fields = hash_fields;
+               hrxq_idx = mlx5_hrxq_get(dev, &rss_desc);
+               if (!hrxq_idx) {
+                       rte_flow_error_set
+                               (error, rte_errno,
+                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+                                "cannot get hash queue");
+                       goto error_hrxq_new;
                }
+               err = __flow_dv_action_rss_hrxq_set
+                       (shared_rss, hash_fields, hrxq_idx);
+               MLX5_ASSERT(!err);
        }
        return 0;
 error_hrxq_new:
@@ -13704,7 +13870,8 @@ mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev)
                                                    &actions[0]);
        if (ret)
                goto err;
-       actions[1] = priv->drop_queue.hrxq->action;
+       actions[1] = sh->dr_drop_action ? sh->dr_drop_action :
+                                         priv->drop_queue.hrxq->action;
        dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
        ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,
                                               &matcher);