examples/ip_frag: fix use of ethdev internal device array
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
index 7197c82..9cc09e7 100644 (file)
 #endif
 
 #ifndef HAVE_MLX5DV_DR_ESWITCH
+#ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
 #endif
+#endif
 
 #ifndef HAVE_MLX5DV_DR
 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
@@ -134,6 +136,8 @@ struct field_modify_info modify_udp[] = {
 struct field_modify_info modify_tcp[] = {
        {2, 0, MLX5_MODI_OUT_TCP_SPORT},
        {2, 2, MLX5_MODI_OUT_TCP_DPORT},
+       {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
+       {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
        {0, 0, 0},
 };
 
@@ -558,6 +562,96 @@ flow_dv_convert_action_modify_dec_ttl
                                             MLX5_MODIFICATION_TYPE_ADD, error);
 }
 
+/**
+ * Convert modify-header increment/decrement TCP Sequence number
+ * to DV specification.
+ *
+ * @param[in,out] resource
+ *   Pointer to the modify-header resource.
+ * @param[in] action
+ *   Pointer to action specification.
+ * @param[out] error
+ *   Pointer to the error structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_convert_action_modify_tcp_seq
+                       (struct mlx5_flow_dv_modify_hdr_resource *resource,
+                        const struct rte_flow_action *action,
+                        struct rte_flow_error *error)
+{
+       const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
+       uint64_t value = rte_be_to_cpu_32(*conf);
+       struct rte_flow_item item;
+       struct rte_flow_item_tcp tcp;
+       struct rte_flow_item_tcp tcp_mask;
+
+       memset(&tcp, 0, sizeof(tcp));
+       memset(&tcp_mask, 0, sizeof(tcp_mask));
+       if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
+               /*
+                * The HW has no decrement operation, only increment operation.
+                * To simulate decrement X from Y using increment operation
+                * we need to add UINT32_MAX X times to Y.
+                * Each adding of UINT32_MAX decrements Y by 1.
+                */
+               value *= UINT32_MAX;
+       tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
+       tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
+       item.type = RTE_FLOW_ITEM_TYPE_TCP;
+       item.spec = &tcp;
+       item.mask = &tcp_mask;
+       return flow_dv_convert_modify_action(&item, modify_tcp, resource,
+                                            MLX5_MODIFICATION_TYPE_ADD, error);
+}
+
+/**
+ * Convert modify-header increment/decrement TCP Acknowledgment number
+ * to DV specification.
+ *
+ * @param[in,out] resource
+ *   Pointer to the modify-header resource.
+ * @param[in] action
+ *   Pointer to action specification.
+ * @param[out] error
+ *   Pointer to the error structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_convert_action_modify_tcp_ack
+                       (struct mlx5_flow_dv_modify_hdr_resource *resource,
+                        const struct rte_flow_action *action,
+                        struct rte_flow_error *error)
+{
+       const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
+       uint64_t value = rte_be_to_cpu_32(*conf);
+       struct rte_flow_item item;
+       struct rte_flow_item_tcp tcp;
+       struct rte_flow_item_tcp tcp_mask;
+
+       memset(&tcp, 0, sizeof(tcp));
+       memset(&tcp_mask, 0, sizeof(tcp_mask));
+       if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
+               /*
+                * The HW has no decrement operation, only increment operation.
+                * To simulate decrement X from Y using increment operation
+                * we need to add UINT32_MAX X times to Y.
+                * Each adding of UINT32_MAX decrements Y by 1.
+                */
+               value *= UINT32_MAX;
+       tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
+       tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
+       item.type = RTE_FLOW_ITEM_TYPE_TCP;
+       item.spec = &tcp;
+       item.mask = &tcp_mask;
+       return flow_dv_convert_modify_action(&item, modify_tcp, resource,
+                                            MLX5_MODIFICATION_TYPE_ADD, error);
+}
+
 /**
  * Validate META item.
  *
@@ -943,15 +1037,15 @@ flow_dv_encap_decap_resource_register
        struct mlx5_ibv_shared *sh = priv->sh;
        struct mlx5_flow_dv_encap_decap_resource *cache_resource;
        struct rte_flow *flow = dev_flow->flow;
-       struct mlx5dv_dr_ns *ns;
+       struct mlx5dv_dr_domain *domain;
 
        resource->flags = flow->group ? 0 : 1;
        if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
-               ns = sh->fdb_ns;
+               domain = sh->fdb_domain;
        else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
-               ns = sh->rx_ns;
+               domain = sh->rx_domain;
        else
-               ns = sh->tx_ns;
+               domain = sh->tx_domain;
 
        /* Lookup a matching resource from cache. */
        LIST_FOREACH(cache_resource, &sh->encaps_decaps, next) {
@@ -980,7 +1074,7 @@ flow_dv_encap_decap_resource_register
        cache_resource->verbs_action =
                mlx5_glue->dv_create_flow_action_packet_reformat
                        (sh->ctx, cache_resource->reformat_type,
-                        cache_resource->ft_type, ns, cache_resource->flags,
+                        cache_resource->ft_type, domain, cache_resource->flags,
                         cache_resource->size,
                         (cache_resource->size ? cache_resource->buf : NULL));
        if (!cache_resource->verbs_action) {
@@ -1108,8 +1202,8 @@ flow_dv_port_id_action_resource_register
                                          "cannot allocate resource memory");
        *cache_resource = *resource;
        cache_resource->action =
-               mlx5_glue->dr_create_flow_action_dest_vport(priv->sh->fdb_ns,
-                                                           resource->port_id);
+               mlx5_glue->dr_create_flow_action_dest_vport
+                       (priv->sh->fdb_domain, resource->port_id);
        if (!cache_resource->action) {
                rte_free(cache_resource);
                return rte_flow_error_set(error, ENOMEM,
@@ -1209,14 +1303,14 @@ static int
 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
                           size_t *size, struct rte_flow_error *error)
 {
-       struct ether_hdr *eth = NULL;
-       struct vlan_hdr *vlan = NULL;
-       struct ipv4_hdr *ipv4 = NULL;
-       struct ipv6_hdr *ipv6 = NULL;
-       struct udp_hdr *udp = NULL;
-       struct vxlan_hdr *vxlan = NULL;
-       struct vxlan_gpe_hdr *vxlan_gpe = NULL;
-       struct gre_hdr *gre = NULL;
+       struct rte_ether_hdr *eth = NULL;
+       struct rte_vlan_hdr *vlan = NULL;
+       struct rte_ipv4_hdr *ipv4 = NULL;
+       struct rte_ipv6_hdr *ipv6 = NULL;
+       struct rte_udp_hdr *udp = NULL;
+       struct rte_vxlan_hdr *vxlan = NULL;
+       struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
+       struct rte_gre_hdr *gre = NULL;
        size_t len;
        size_t temp_size = 0;
 
@@ -1235,20 +1329,20 @@ flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
                rte_memcpy((void *)&buf[temp_size], items->spec, len);
                switch (items->type) {
                case RTE_FLOW_ITEM_TYPE_ETH:
-                       eth = (struct ether_hdr *)&buf[temp_size];
+                       eth = (struct rte_ether_hdr *)&buf[temp_size];
                        break;
                case RTE_FLOW_ITEM_TYPE_VLAN:
-                       vlan = (struct vlan_hdr *)&buf[temp_size];
+                       vlan = (struct rte_vlan_hdr *)&buf[temp_size];
                        if (!eth)
                                return rte_flow_error_set(error, EINVAL,
                                                RTE_FLOW_ERROR_TYPE_ACTION,
                                                (void *)items->type,
                                                "eth header not found");
                        if (!eth->ether_type)
-                               eth->ether_type = RTE_BE16(ETHER_TYPE_VLAN);
+                               eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
                        break;
                case RTE_FLOW_ITEM_TYPE_IPV4:
-                       ipv4 = (struct ipv4_hdr *)&buf[temp_size];
+                       ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
                        if (!vlan && !eth)
                                return rte_flow_error_set(error, EINVAL,
                                                RTE_FLOW_ERROR_TYPE_ACTION,
@@ -1256,9 +1350,9 @@ flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
                                                "neither eth nor vlan"
                                                " header found");
                        if (vlan && !vlan->eth_proto)
-                               vlan->eth_proto = RTE_BE16(ETHER_TYPE_IPv4);
+                               vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
                        else if (eth && !eth->ether_type)
-                               eth->ether_type = RTE_BE16(ETHER_TYPE_IPv4);
+                               eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
                        if (!ipv4->version_ihl)
                                ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
                                                    MLX5_ENCAP_IPV4_IHL_MIN;
@@ -1266,7 +1360,7 @@ flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
                                ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
                        break;
                case RTE_FLOW_ITEM_TYPE_IPV6:
-                       ipv6 = (struct ipv6_hdr *)&buf[temp_size];
+                       ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
                        if (!vlan && !eth)
                                return rte_flow_error_set(error, EINVAL,
                                                RTE_FLOW_ERROR_TYPE_ACTION,
@@ -1274,9 +1368,9 @@ flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
                                                "neither eth nor vlan"
                                                " header found");
                        if (vlan && !vlan->eth_proto)
-                               vlan->eth_proto = RTE_BE16(ETHER_TYPE_IPv6);
+                               vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
                        else if (eth && !eth->ether_type)
-                               eth->ether_type = RTE_BE16(ETHER_TYPE_IPv6);
+                               eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
                        if (!ipv6->vtc_flow)
                                ipv6->vtc_flow =
                                        RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
@@ -1284,7 +1378,7 @@ flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
                                ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
                        break;
                case RTE_FLOW_ITEM_TYPE_UDP:
-                       udp = (struct udp_hdr *)&buf[temp_size];
+                       udp = (struct rte_udp_hdr *)&buf[temp_size];
                        if (!ipv4 && !ipv6)
                                return rte_flow_error_set(error, EINVAL,
                                                RTE_FLOW_ERROR_TYPE_ACTION,
@@ -1296,7 +1390,7 @@ flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
                                ipv6->proto = IPPROTO_UDP;
                        break;
                case RTE_FLOW_ITEM_TYPE_VXLAN:
-                       vxlan = (struct vxlan_hdr *)&buf[temp_size];
+                       vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
                        if (!udp)
                                return rte_flow_error_set(error, EINVAL,
                                                RTE_FLOW_ERROR_TYPE_ACTION,
@@ -1309,7 +1403,7 @@ flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
                                        RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
                        break;
                case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
-                       vxlan_gpe = (struct vxlan_gpe_hdr *)&buf[temp_size];
+                       vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
                        if (!udp)
                                return rte_flow_error_set(error, EINVAL,
                                                RTE_FLOW_ERROR_TYPE_ACTION,
@@ -1329,7 +1423,7 @@ flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
                        break;
                case RTE_FLOW_ITEM_TYPE_GRE:
                case RTE_FLOW_ITEM_TYPE_NVGRE:
-                       gre = (struct gre_hdr *)&buf[temp_size];
+                       gre = (struct rte_gre_hdr *)&buf[temp_size];
                        if (!gre->proto)
                                return rte_flow_error_set(error, EINVAL,
                                                RTE_FLOW_ERROR_TYPE_ACTION,
@@ -1360,6 +1454,50 @@ flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
        return 0;
 }
 
+static int
+flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
+{
+       struct rte_ether_hdr *eth = NULL;
+       struct rte_vlan_hdr *vlan = NULL;
+       struct rte_ipv6_hdr *ipv6 = NULL;
+       struct rte_udp_hdr *udp = NULL;
+       char *next_hdr;
+       uint16_t proto;
+
+       eth = (struct rte_ether_hdr *)data;
+       next_hdr = (char *)(eth + 1);
+       proto = RTE_BE16(eth->ether_type);
+
+       /* VLAN skipping */
+       while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
+               next_hdr += sizeof(struct rte_vlan_hdr);
+               vlan = (struct rte_vlan_hdr *)next_hdr;
+               proto = RTE_BE16(vlan->eth_proto);
+       }
+
+       /* HW calculates IPv4 csum. no need to proceed */
+       if (proto == RTE_ETHER_TYPE_IPV4)
+               return 0;
+
+       /* non IPv4/IPv6 header. not supported */
+       if (proto != RTE_ETHER_TYPE_IPV6) {
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ACTION,
+                                         NULL, "Cannot offload non IPv4/IPv6");
+       }
+
+       ipv6 = (struct rte_ipv6_hdr *)next_hdr;
+
+       /* ignore non UDP */
+       if (ipv6->proto != IPPROTO_UDP)
+               return 0;
+
+       udp = (struct rte_udp_hdr *)(ipv6 + 1);
+       udp->dgram_cksum = 0;
+
+       return 0;
+}
+
 /**
  * Convert L2 encap action to DV specification.
  *
@@ -1398,6 +1536,8 @@ flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
                        (const struct rte_flow_action_raw_encap *)action->conf;
                res.size = raw_encap_data->size;
                memcpy(res.buf, raw_encap_data->data, res.size);
+               if (flow_dv_zero_encap_udp_csum(res.buf, error))
+                       return -rte_errno;
        } else {
                if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
                        encap_data =
@@ -1665,6 +1805,96 @@ flow_dv_validate_action_modify_tp(const uint64_t action_flags,
        return ret;
 }
 
+/**
+ * Validate the modify-header actions of increment/decrement
+ * TCP Sequence-number.
+ *
+ * @param[in] action_flags
+ *   Holds the actions detected until now.
+ * @param[in] action
+ *   Pointer to the modify action.
+ * @param[in] item_flags
+ *   Holds the items detected.
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
+                                      const struct rte_flow_action *action,
+                                      const uint64_t item_flags,
+                                      struct rte_flow_error *error)
+{
+       int ret = 0;
+
+       ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
+       if (!ret) {
+               if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
+                       return rte_flow_error_set(error, EINVAL,
+                                                 RTE_FLOW_ERROR_TYPE_ACTION,
+                                                 NULL, "no TCP item in"
+                                                 " pattern");
+               if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
+                       (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
+                   (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
+                       (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
+                       return rte_flow_error_set(error, EINVAL,
+                                                 RTE_FLOW_ERROR_TYPE_ACTION,
+                                                 NULL,
+                                                 "cannot decrease and increase"
+                                                 " TCP sequence number"
+                                                 " at the same time");
+       }
+       return ret;
+}
+
+/**
+ * Validate the modify-header actions of increment/decrement
+ * TCP Acknowledgment number.
+ *
+ * @param[in] action_flags
+ *   Holds the actions detected until now.
+ * @param[in] action
+ *   Pointer to the modify action.
+ * @param[in] item_flags
+ *   Holds the items detected.
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
+                                      const struct rte_flow_action *action,
+                                      const uint64_t item_flags,
+                                      struct rte_flow_error *error)
+{
+       int ret = 0;
+
+       ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
+       if (!ret) {
+               if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
+                       return rte_flow_error_set(error, EINVAL,
+                                                 RTE_FLOW_ERROR_TYPE_ACTION,
+                                                 NULL, "no TCP item in"
+                                                 " pattern");
+               if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
+                       (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
+                   (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
+                       (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
+                       return rte_flow_error_set(error, EINVAL,
+                                                 RTE_FLOW_ERROR_TYPE_ACTION,
+                                                 NULL,
+                                                 "cannot decrease and increase"
+                                                 " TCP acknowledgment number"
+                                                 " at the same time");
+       }
+       return ret;
+}
+
 /**
  * Validate the modify-header TTL actions.
  *
@@ -1826,14 +2056,14 @@ flow_dv_modify_hdr_resource_register
        struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_ibv_shared *sh = priv->sh;
        struct mlx5_flow_dv_modify_hdr_resource *cache_resource;
-       struct mlx5dv_dr_ns *ns;
+       struct mlx5dv_dr_domain *ns;
 
        if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
-               ns = sh->fdb_ns;
+               ns = sh->fdb_domain;
        else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
-               ns = sh->tx_ns;
+               ns = sh->tx_domain;
        else
-               ns = sh->rx_ns;
+               ns = sh->rx_domain;
        resource->flags =
                dev_flow->flow->group ? 0 : MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
        /* Lookup a matching resource from cache. */
@@ -2064,6 +2294,13 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
        uint64_t last_item = 0;
        uint8_t next_protocol = 0xff;
        int actions_n = 0;
+       struct rte_flow_item_tcp nic_tcp_mask = {
+               .hdr = {
+                       .tcp_flags = 0xFF,
+                       .src_port = RTE_BE16(UINT16_MAX),
+                       .dst_port = RTE_BE16(UINT16_MAX),
+               }
+       };
 
        if (items == NULL)
                return -1;
@@ -2144,7 +2381,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
                        ret = mlx5_flow_validate_item_tcp
                                                (items, item_flags,
                                                 next_protocol,
-                                                &rte_flow_item_tcp_mask,
+                                                &nic_tcp_mask,
                                                 error);
                        if (ret < 0)
                                return ret;
@@ -2198,6 +2435,22 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
                                return ret;
                        last_item = MLX5_FLOW_ITEM_METADATA;
                        break;
+               case RTE_FLOW_ITEM_TYPE_ICMP:
+                       ret = mlx5_flow_validate_item_icmp(items, item_flags,
+                                                          next_protocol,
+                                                          error);
+                       if (ret < 0)
+                               return ret;
+                       item_flags |= MLX5_FLOW_LAYER_ICMP;
+                       break;
+               case RTE_FLOW_ITEM_TYPE_ICMP6:
+                       ret = mlx5_flow_validate_item_icmp6(items, item_flags,
+                                                           next_protocol,
+                                                           error);
+                       if (ret < 0)
+                               return ret;
+                       item_flags |= MLX5_FLOW_LAYER_ICMP6;
+                       break;
                default:
                        return rte_flow_error_set(error, ENOTSUP,
                                                  RTE_FLOW_ERROR_TYPE_ITEM,
@@ -2407,6 +2660,40 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
                        ++actions_n;
                        action_flags |= MLX5_FLOW_ACTION_JUMP;
                        break;
+               case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
+               case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
+                       ret = flow_dv_validate_action_modify_tcp_seq
+                                                               (action_flags,
+                                                                actions,
+                                                                item_flags,
+                                                                error);
+                       if (ret < 0)
+                               return ret;
+                       /* Count all modify-header actions as one action. */
+                       if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
+                               ++actions_n;
+                       action_flags |= actions->type ==
+                                       RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
+                                               MLX5_FLOW_ACTION_INC_TCP_SEQ :
+                                               MLX5_FLOW_ACTION_DEC_TCP_SEQ;
+                       break;
+               case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
+               case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
+                       ret = flow_dv_validate_action_modify_tcp_ack
+                                                               (action_flags,
+                                                                actions,
+                                                                item_flags,
+                                                                error);
+                       if (ret < 0)
+                               return ret;
+                       /* Count all modify-header actions as one action. */
+                       if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
+                               ++actions_n;
+                       action_flags |= actions->type ==
+                                       RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
+                                               MLX5_FLOW_ACTION_INC_TCP_ACK :
+                                               MLX5_FLOW_ACTION_DEC_TCP_ACK;
+                       break;
                default:
                        return rte_flow_error_set(error, ENOTSUP,
                                                  RTE_FLOW_ERROR_TYPE_ACTION,
@@ -2484,7 +2771,7 @@ flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused,
                                   "not enough memory to create flow");
                return NULL;
        }
-       flow->dv.value.size = MLX5_ST_SZ_DB(fte_match_param);
+       flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
        return flow;
 }
 
@@ -2508,7 +2795,7 @@ flow_dv_check_valid_spec(void *match_mask, void *match_value)
        uint8_t *v = match_value;
        unsigned int i;
 
-       for (i = 0; i < MLX5_ST_SZ_DB(fte_match_param); ++i) {
+       for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
                if (v[i] & ~m[i]) {
                        DRV_LOG(ERR,
                                "match_value differs from match_criteria"
@@ -2860,6 +3147,10 @@ flow_dv_translate_item_tcp(void *matcher, void *key,
                 rte_be_to_cpu_16(tcp_m->hdr.dst_port));
        MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
                 rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
+       MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
+                tcp_m->hdr.tcp_flags);
+       MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
+                (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
 }
 
 /**
@@ -3092,7 +3383,7 @@ flow_dv_translate_item_mpls(void *matcher, void *key,
        case MLX5_FLOW_LAYER_GRE:
                MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
                MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
-                        ETHER_TYPE_MPLS);
+                        RTE_ETHER_TYPE_MPLS);
                break;
        default:
                MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
@@ -3232,6 +3523,102 @@ flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
        return 0;
 }
 
+/**
+ * Add ICMP6 item to matcher and to the value.
+ *
+ * @param[in, out] matcher
+ *   Flow matcher.
+ * @param[in, out] key
+ *   Flow matcher value.
+ * @param[in] item
+ *   Flow pattern to translate.
+ * @param[in] inner
+ *   Item is inner pattern.
+ */
+static void
+flow_dv_translate_item_icmp6(void *matcher, void *key,
+                             const struct rte_flow_item *item,
+                             int inner)
+{
+       const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
+       const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
+       void *headers_m;
+       void *headers_v;
+       void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
+                                    misc_parameters_3);
+       void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
+       if (inner) {
+               headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+                                        inner_headers);
+               headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
+       } else {
+               headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+                                        outer_headers);
+               headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+       }
+       MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
+       MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
+       if (!icmp6_v)
+               return;
+       if (!icmp6_m)
+               icmp6_m = &rte_flow_item_icmp6_mask;
+       MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
+       MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
+                icmp6_v->type & icmp6_m->type);
+       MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
+       MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
+                icmp6_v->code & icmp6_m->code);
+}
+
+/**
+ * Add ICMP item to matcher and to the value.
+ *
+ * @param[in, out] matcher
+ *   Flow matcher.
+ * @param[in, out] key
+ *   Flow matcher value.
+ * @param[in] item
+ *   Flow pattern to translate.
+ * @param[in] inner
+ *   Item is inner pattern.
+ */
+static void
+flow_dv_translate_item_icmp(void *matcher, void *key,
+                           const struct rte_flow_item *item,
+                           int inner)
+{
+       const struct rte_flow_item_icmp *icmp_m = item->mask;
+       const struct rte_flow_item_icmp *icmp_v = item->spec;
+       void *headers_m;
+       void *headers_v;
+       void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
+                                    misc_parameters_3);
+       void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
+       if (inner) {
+               headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+                                        inner_headers);
+               headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
+       } else {
+               headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+                                        outer_headers);
+               headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+       }
+       MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
+       MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
+       if (!icmp_v)
+               return;
+       if (!icmp_m)
+               icmp_m = &rte_flow_item_icmp_mask;
+       MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
+                icmp_m->hdr.icmp_type);
+       MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
+                icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
+       MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
+                icmp_m->hdr.icmp_code);
+       MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
+                icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
+}
+
 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
 
 #define HEADER_IS_ZERO(match_criteria, headers)                                     \
@@ -3305,17 +3692,17 @@ flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
                tbl = &sh->fdb_tbl[table_id];
                if (!tbl->obj)
                        tbl->obj = mlx5_glue->dr_create_flow_tbl
-                               (sh->fdb_ns, table_id);
+                               (sh->fdb_domain, table_id);
        } else if (egress) {
                tbl = &sh->tx_tbl[table_id];
                if (!tbl->obj)
                        tbl->obj = mlx5_glue->dr_create_flow_tbl
-                               (sh->tx_ns, table_id);
+                               (sh->tx_domain, table_id);
        } else {
                tbl = &sh->rx_tbl[table_id];
                if (!tbl->obj)
                        tbl->obj = mlx5_glue->dr_create_flow_tbl
-                               (sh->rx_ns, table_id);
+                               (sh->rx_domain, table_id);
        }
        if (!tbl->obj) {
                rte_flow_error_set(error, ENOMEM,
@@ -3882,6 +4269,27 @@ cnt_err:
                                return -rte_errno;
                        action_flags |= MLX5_FLOW_ACTION_SET_TTL;
                        break;
+               case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
+               case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
+                       if (flow_dv_convert_action_modify_tcp_seq(&res, actions,
+                                                                 error))
+                               return -rte_errno;
+                       action_flags |= actions->type ==
+                                       RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
+                                       MLX5_FLOW_ACTION_INC_TCP_SEQ :
+                                       MLX5_FLOW_ACTION_DEC_TCP_SEQ;
+                       break;
+
+               case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
+               case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
+                       if (flow_dv_convert_action_modify_tcp_ack(&res, actions,
+                                                                 error))
+                               return -rte_errno;
+                       action_flags |= actions->type ==
+                                       RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
+                                       MLX5_FLOW_ACTION_INC_TCP_ACK :
+                                       MLX5_FLOW_ACTION_DEC_TCP_ACK;
+                       break;
                case RTE_FLOW_ACTION_TYPE_END:
                        actions_end = true;
                        if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) {
@@ -4007,6 +4415,16 @@ cnt_err:
                                                    items);
                        last_item = MLX5_FLOW_ITEM_METADATA;
                        break;
+               case RTE_FLOW_ITEM_TYPE_ICMP:
+                       flow_dv_translate_item_icmp(match_mask, match_value,
+                                                   items, tunnel);
+                       item_flags |= MLX5_FLOW_LAYER_ICMP;
+                       break;
+               case RTE_FLOW_ITEM_TYPE_ICMP6:
+                       flow_dv_translate_item_icmp6(match_mask, match_value,
+                                                     items, tunnel);
+                       item_flags |= MLX5_FLOW_LAYER_ICMP6;
+                       break;
                default:
                        break;
                }