net/iavf: fix adding multicast MAC address
[dpdk.git] / drivers / net / mlx5 / mlx5_flow.c
index bf9ca54..39ad712 100644 (file)
@@ -800,6 +800,8 @@ mlx5_flow_ext_mreg_supported(struct rte_eth_dev *dev)
  *   Bit-masks covering supported fields by the NIC to compare with user mask.
  * @param[in] size
  *   Bit-masks size in bytes.
+ * @param[in] range_accepted
+ *   True if range of values is accepted for specific fields, false otherwise.
  * @param[out] error
  *   Pointer to error structure.
  *
@@ -811,6 +813,7 @@ mlx5_flow_item_acceptable(const struct rte_flow_item *item,
                          const uint8_t *mask,
                          const uint8_t *nic_mask,
                          unsigned int size,
+                         bool range_accepted,
                          struct rte_flow_error *error)
 {
        unsigned int i;
@@ -828,7 +831,7 @@ mlx5_flow_item_acceptable(const struct rte_flow_item *item,
                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
                                          "mask/last without a spec is not"
                                          " supported");
-       if (item->spec && item->last) {
+       if (item->spec && item->last && !range_accepted) {
                uint8_t spec[size];
                uint8_t last[size];
                unsigned int i;
@@ -1603,7 +1606,8 @@ mlx5_flow_validate_item_icmp6(const struct rte_flow_item *item,
        ret = mlx5_flow_item_acceptable
                (item, (const uint8_t *)mask,
                 (const uint8_t *)&rte_flow_item_icmp6_mask,
-                sizeof(struct rte_flow_item_icmp6), error);
+                sizeof(struct rte_flow_item_icmp6),
+                MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
        if (ret < 0)
                return ret;
        return 0;
@@ -1661,7 +1665,8 @@ mlx5_flow_validate_item_icmp(const struct rte_flow_item *item,
        ret = mlx5_flow_item_acceptable
                (item, (const uint8_t *)mask,
                 (const uint8_t *)&nic_mask,
-                sizeof(struct rte_flow_item_icmp), error);
+                sizeof(struct rte_flow_item_icmp),
+                MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
        if (ret < 0)
                return ret;
        return 0;
@@ -1716,7 +1721,7 @@ mlx5_flow_validate_item_eth(const struct rte_flow_item *item,
        ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
                                        (const uint8_t *)&nic_mask,
                                        sizeof(struct rte_flow_item_eth),
-                                       error);
+                                       MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
        return ret;
 }
 
@@ -1770,7 +1775,7 @@ mlx5_flow_validate_item_vlan(const struct rte_flow_item *item,
        ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
                                        (const uint8_t *)&nic_mask,
                                        sizeof(struct rte_flow_item_vlan),
-                                       error);
+                                       MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
        if (ret)
                return ret;
        if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
@@ -1822,6 +1827,8 @@ mlx5_flow_validate_item_vlan(const struct rte_flow_item *item,
  * @param[in] acc_mask
  *   Acceptable mask, if NULL default internal default mask
  *   will be used to check whether item fields are supported.
+ * @param[in] range_accepted
+ *   True if range of values is accepted for specific fields, false otherwise.
  * @param[out] error
  *   Pointer to error structure.
  *
@@ -1834,6 +1841,7 @@ mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
                             uint64_t last_item,
                             uint16_t ether_type,
                             const struct rte_flow_item_ipv4 *acc_mask,
+                            bool range_accepted,
                             struct rte_flow_error *error)
 {
        const struct rte_flow_item_ipv4 *mask = item->mask;
@@ -1904,7 +1912,7 @@ mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
                                        acc_mask ? (const uint8_t *)acc_mask
                                                 : (const uint8_t *)&nic_mask,
                                        sizeof(struct rte_flow_item_ipv4),
-                                       error);
+                                       range_accepted, error);
        if (ret < 0)
                return ret;
        return 0;
@@ -1969,9 +1977,9 @@ mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
                                          "IPv6 cannot follow L2/VLAN layer "
                                          "which ether type is not IPv6");
+       if (mask && mask->hdr.proto == UINT8_MAX && spec)
+               next_proto = spec->hdr.proto;
        if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP) {
-               if (mask && spec)
-                       next_proto = mask->hdr.proto & spec->hdr.proto;
                if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6)
                        return rte_flow_error_set(error, EINVAL,
                                                  RTE_FLOW_ERROR_TYPE_ITEM,
@@ -1979,6 +1987,16 @@ mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
                                                  "multiple tunnel "
                                                  "not supported");
        }
+       if (next_proto == IPPROTO_HOPOPTS  ||
+           next_proto == IPPROTO_ROUTING  ||
+           next_proto == IPPROTO_FRAGMENT ||
+           next_proto == IPPROTO_ESP      ||
+           next_proto == IPPROTO_AH       ||
+           next_proto == IPPROTO_DSTOPTS)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         "IPv6 proto (next header) should "
+                                         "not be set as extension header");
        if (item_flags & MLX5_FLOW_LAYER_IPIP)
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
@@ -2003,7 +2021,7 @@ mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
                                        acc_mask ? (const uint8_t *)acc_mask
                                                 : (const uint8_t *)&nic_mask,
                                        sizeof(struct rte_flow_item_ipv6),
-                                       error);
+                                       MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
        if (ret < 0)
                return ret;
        return 0;
@@ -2058,7 +2076,8 @@ mlx5_flow_validate_item_udp(const struct rte_flow_item *item,
        ret = mlx5_flow_item_acceptable
                (item, (const uint8_t *)mask,
                 (const uint8_t *)&rte_flow_item_udp_mask,
-                sizeof(struct rte_flow_item_udp), error);
+                sizeof(struct rte_flow_item_udp), MLX5_ITEM_RANGE_NOT_ACCEPTED,
+                error);
        if (ret < 0)
                return ret;
        return 0;
@@ -2113,7 +2132,8 @@ mlx5_flow_validate_item_tcp(const struct rte_flow_item *item,
        ret = mlx5_flow_item_acceptable
                (item, (const uint8_t *)mask,
                 (const uint8_t *)flow_mask,
-                sizeof(struct rte_flow_item_tcp), error);
+                sizeof(struct rte_flow_item_tcp), MLX5_ITEM_RANGE_NOT_ACCEPTED,
+                error);
        if (ret < 0)
                return ret;
        return 0;
@@ -2167,7 +2187,7 @@ mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item,
                (item, (const uint8_t *)mask,
                 (const uint8_t *)&rte_flow_item_vxlan_mask,
                 sizeof(struct rte_flow_item_vxlan),
-                error);
+                MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
        if (ret < 0)
                return ret;
        if (spec) {
@@ -2238,7 +2258,7 @@ mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item,
                (item, (const uint8_t *)mask,
                 (const uint8_t *)&rte_flow_item_vxlan_gpe_mask,
                 sizeof(struct rte_flow_item_vxlan_gpe),
-                error);
+                MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
        if (ret < 0)
                return ret;
        if (spec) {
@@ -2312,7 +2332,7 @@ mlx5_flow_validate_item_gre_key(const struct rte_flow_item *item,
        ret = mlx5_flow_item_acceptable
                (item, (const uint8_t *)mask,
                 (const uint8_t *)&gre_key_default_mask,
-                sizeof(rte_be32_t), error);
+                sizeof(rte_be32_t), MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
        return ret;
 }
 
@@ -2364,7 +2384,8 @@ mlx5_flow_validate_item_gre(const struct rte_flow_item *item,
        ret = mlx5_flow_item_acceptable
                (item, (const uint8_t *)mask,
                 (const uint8_t *)&nic_mask,
-                sizeof(struct rte_flow_item_gre), error);
+                sizeof(struct rte_flow_item_gre), MLX5_ITEM_RANGE_NOT_ACCEPTED,
+                error);
        if (ret < 0)
                return ret;
 #ifndef HAVE_MLX5DV_DR
@@ -2439,7 +2460,8 @@ mlx5_flow_validate_item_geneve(const struct rte_flow_item *item,
        ret = mlx5_flow_item_acceptable
                                  (item, (const uint8_t *)mask,
                                   (const uint8_t *)&nic_mask,
-                                  sizeof(struct rte_flow_item_geneve), error);
+                                  sizeof(struct rte_flow_item_geneve),
+                                  MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
        if (ret)
                return ret;
        if (spec) {
@@ -2522,7 +2544,8 @@ mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev __rte_unused,
        ret = mlx5_flow_item_acceptable
                (item, (const uint8_t *)mask,
                 (const uint8_t *)&rte_flow_item_mpls_mask,
-                sizeof(struct rte_flow_item_mpls), error);
+                sizeof(struct rte_flow_item_mpls),
+                MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
        if (ret < 0)
                return ret;
        return 0;
@@ -2577,7 +2600,8 @@ mlx5_flow_validate_item_nvgre(const struct rte_flow_item *item,
        ret = mlx5_flow_item_acceptable
                (item, (const uint8_t *)mask,
                 (const uint8_t *)&rte_flow_item_nvgre_mask,
-                sizeof(struct rte_flow_item_nvgre), error);
+                sizeof(struct rte_flow_item_nvgre),
+                MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
        if (ret < 0)
                return ret;
        return 0;
@@ -2671,7 +2695,7 @@ mlx5_flow_validate_item_ecpri(const struct rte_flow_item *item,
                                         acc_mask ? (const uint8_t *)acc_mask
                                                  : (const uint8_t *)&nic_mask,
                                         sizeof(struct rte_flow_item_ecpri),
-                                        error);
+                                        MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
 }
 
 /* Allocate unique ID for the split Q/RSS subflows. */
@@ -4251,6 +4275,8 @@ flow_mreg_tx_copy_prep(struct rte_eth_dev *dev,
  *
  * @param[in] actions
  *   Pointer to the list of actions.
+ * @param[in] attr
+ *   Flow rule attributes.
  * @param[in] action
  *   The action to be check if exist.
  * @param[out] match_action_pos
@@ -4264,10 +4290,15 @@ flow_mreg_tx_copy_prep(struct rte_eth_dev *dev,
  */
 static int
 flow_check_match_action(const struct rte_flow_action actions[],
+                       const struct rte_flow_attr *attr,
                        enum rte_flow_action_type action,
                        int *match_action_pos, int *qrss_action_pos)
 {
+       const struct rte_flow_action_sample *sample;
        int actions_n = 0;
+       int jump_flag = 0;
+       uint32_t ratio = 0;
+       int sub_type = 0;
        int flag = 0;
 
        *match_action_pos = -1;
@@ -4280,8 +4311,25 @@ flow_check_match_action(const struct rte_flow_action actions[],
                if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE ||
                    actions->type == RTE_FLOW_ACTION_TYPE_RSS)
                        *qrss_action_pos = actions_n;
+               if (actions->type == RTE_FLOW_ACTION_TYPE_JUMP)
+                       jump_flag = 1;
+               if (actions->type == RTE_FLOW_ACTION_TYPE_SAMPLE) {
+                       sample = actions->conf;
+                       ratio = sample->ratio;
+                       sub_type = ((const struct rte_flow_action *)
+                                       (sample->actions))->type;
+               }
                actions_n++;
        }
+       if (flag && action == RTE_FLOW_ACTION_TYPE_SAMPLE && attr->transfer) {
+               if (ratio == 1) {
+                       /* JUMP Action not support for Mirroring;
+                        * Mirroring support multi-destination;
+                        */
+                       if (!jump_flag && sub_type != RTE_FLOW_ACTION_TYPE_END)
+                               flag = 0;
+               }
+       }
        /* Count RTE_FLOW_ACTION_TYPE_END. */
        return flag ? actions_n + 1 : 0;
 }
@@ -4833,7 +4881,7 @@ flow_create_split_sample(struct rte_eth_dev *dev,
        int ret = 0;
 
        if (priv->sampler_en)
-               actions_n = flow_check_match_action(actions,
+               actions_n = flow_check_match_action(actions, attr,
                                        RTE_FLOW_ACTION_TYPE_SAMPLE,
                                        &sample_action_pos, &qrss_action_pos);
        if (actions_n) {
@@ -6646,7 +6694,7 @@ next_container:
        offset = batch ? 0 : dcs->id % MLX5_COUNTERS_PER_POOL;
        /*
         * Identify the counters released between query trigger and query
-        * handle more effiecntly. The counter released in this gap period
+        * handle more efficiently. The counter released in this gap period
         * should wait for a new round of query as the new arrived packets
         * will not be taken into account.
         */
@@ -6700,19 +6748,26 @@ mlx5_flow_aging_check(struct mlx5_dev_ctx_shared *sh,
        struct mlx5_age_param *age_param;
        struct mlx5_counter_stats_raw *cur = pool->raw_hw;
        struct mlx5_counter_stats_raw *prev = pool->raw;
-       uint16_t curr = rte_rdtsc() / (rte_get_tsc_hz() / 10);
+       const uint64_t curr_time = MLX5_CURR_TIME_SEC;
+       const uint32_t time_delta = curr_time - pool->time_of_last_age_check;
+       uint16_t expected = AGE_CANDIDATE;
        uint32_t i;
 
+       pool->time_of_last_age_check = curr_time;
        for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
                cnt = MLX5_POOL_GET_CNT(pool, i);
                age_param = MLX5_CNT_TO_AGE(cnt);
-               if (rte_atomic16_read(&age_param->state) != AGE_CANDIDATE)
+               if (__atomic_load_n(&age_param->state,
+                                   __ATOMIC_RELAXED) != AGE_CANDIDATE)
                        continue;
                if (cur->data[i].hits != prev->data[i].hits) {
-                       age_param->expire = curr + age_param->timeout;
+                       __atomic_store_n(&age_param->sec_since_last_hit, 0,
+                                        __ATOMIC_RELAXED);
                        continue;
                }
-               if ((uint16_t)(curr - age_param->expire) >= (UINT16_MAX / 2))
+               if (__atomic_add_fetch(&age_param->sec_since_last_hit,
+                                      time_delta,
+                                      __ATOMIC_RELAXED) <= age_param->timeout)
                        continue;
                /**
                 * Hold the lock first, or if between the
@@ -6723,12 +6778,10 @@ mlx5_flow_aging_check(struct mlx5_dev_ctx_shared *sh,
                priv = rte_eth_devices[age_param->port_id].data->dev_private;
                age_info = GET_PORT_AGE_INFO(priv);
                rte_spinlock_lock(&age_info->aged_sl);
-               /* If the cpmset fails, release happens. */
-               if (rte_atomic16_cmpset((volatile uint16_t *)
-                                       &age_param->state,
-                                       AGE_CANDIDATE,
-                                       AGE_TMOUT) ==
-                                       AGE_CANDIDATE) {
+               if (__atomic_compare_exchange_n(&age_param->state, &expected,
+                                               AGE_TMOUT, false,
+                                               __ATOMIC_RELAXED,
+                                               __ATOMIC_RELAXED)) {
                        TAILQ_INSERT_TAIL(&age_info->aged_counters, cnt, next);
                        MLX5_AGE_SET(age_info, MLX5_AGE_EVENT_NEW);
                }