app/testpmd: fix use of indirect action after port close
[dpdk.git] / drivers / net / sfc / sfc_flow.c
index 9be1f9a..fb59abd 100644 (file)
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: BSD-3-Clause
  *
- * Copyright (c) 2017-2018 Solarflare Communications Inc.
- * All rights reserved.
+ * Copyright(c) 2019-2021 Xilinx, Inc.
+ * Copyright(c) 2017-2019 Solarflare Communications Inc.
  *
  * This software was jointly developed between OKTET Labs (under contract
  * for Solarflare) and Solarflare Communications, Inc.
@@ -10,7 +10,7 @@
 #include <rte_byteorder.h>
 #include <rte_tailq.h>
 #include <rte_common.h>
-#include <rte_ethdev_driver.h>
+#include <ethdev_driver.h>
 #include <rte_ether.h>
 #include <rte_flow.h>
 #include <rte_flow_driver.h>
 #include "efx.h"
 
 #include "sfc.h"
+#include "sfc_debug.h"
 #include "sfc_rx.h"
 #include "sfc_filter.h"
 #include "sfc_flow.h"
+#include "sfc_flow_rss.h"
+#include "sfc_flow_tunnel.h"
 #include "sfc_log.h"
 #include "sfc_dp_rx.h"
+#include "sfc_mae_counter.h"
+#include "sfc_switch.h"
+
+struct sfc_flow_ops_by_spec {
+       sfc_flow_parse_cb_t     *parse;
+       sfc_flow_verify_cb_t    *verify;
+       sfc_flow_cleanup_cb_t   *cleanup;
+       sfc_flow_insert_cb_t    *insert;
+       sfc_flow_remove_cb_t    *remove;
+       sfc_flow_query_cb_t     *query;
+};
+
+static sfc_flow_parse_cb_t sfc_flow_parse_rte_to_filter;
+static sfc_flow_parse_cb_t sfc_flow_parse_rte_to_mae;
+static sfc_flow_insert_cb_t sfc_flow_filter_insert;
+static sfc_flow_remove_cb_t sfc_flow_filter_remove;
+static sfc_flow_cleanup_cb_t sfc_flow_cleanup;
+
+static const struct sfc_flow_ops_by_spec sfc_flow_ops_filter = {
+       .parse = sfc_flow_parse_rte_to_filter,
+       .verify = NULL,
+       .cleanup = sfc_flow_cleanup,
+       .insert = sfc_flow_filter_insert,
+       .remove = sfc_flow_filter_remove,
+       .query = NULL,
+};
+
+static const struct sfc_flow_ops_by_spec sfc_flow_ops_mae = {
+       .parse = sfc_flow_parse_rte_to_mae,
+       .verify = sfc_mae_flow_verify,
+       .cleanup = sfc_mae_flow_cleanup,
+       .insert = sfc_mae_flow_insert,
+       .remove = sfc_mae_flow_remove,
+       .query = sfc_mae_flow_query,
+};
+
+static const struct sfc_flow_ops_by_spec *
+sfc_flow_get_ops_by_spec(struct rte_flow *flow)
+{
+       struct sfc_flow_spec *spec = &flow->spec;
+       const struct sfc_flow_ops_by_spec *ops = NULL;
+
+       switch (spec->type) {
+       case SFC_FLOW_SPEC_FILTER:
+               ops = &sfc_flow_ops_filter;
+               break;
+       case SFC_FLOW_SPEC_MAE:
+               ops = &sfc_flow_ops_mae;
+               break;
+       default:
+               SFC_ASSERT(false);
+               break;
+       }
+
+       return ops;
+}
 
 /*
  * Currently, filter-based (VNIC) flow API is implemented in such a manner
  * of such a field.
  */
 
-enum sfc_flow_item_layers {
-       SFC_FLOW_ITEM_ANY_LAYER,
-       SFC_FLOW_ITEM_START_LAYER,
-       SFC_FLOW_ITEM_L2,
-       SFC_FLOW_ITEM_L3,
-       SFC_FLOW_ITEM_L4,
-};
-
-typedef int (sfc_flow_item_parse)(const struct rte_flow_item *item,
-                                 efx_filter_spec_t *spec,
-                                 struct rte_flow_error *error);
-
-struct sfc_flow_item {
-       enum rte_flow_item_type type;           /* Type of item */
-       enum sfc_flow_item_layers layer;        /* Layer of item */
-       enum sfc_flow_item_layers prev_layer;   /* Previous layer of item */
-       sfc_flow_item_parse *parse;             /* Parsing function */
-};
-
 static sfc_flow_item_parse sfc_flow_parse_void;
 static sfc_flow_item_parse sfc_flow_parse_eth;
 static sfc_flow_item_parse sfc_flow_parse_vlan;
@@ -64,6 +104,7 @@ static sfc_flow_item_parse sfc_flow_parse_udp;
 static sfc_flow_item_parse sfc_flow_parse_vxlan;
 static sfc_flow_item_parse sfc_flow_parse_geneve;
 static sfc_flow_item_parse sfc_flow_parse_nvgre;
+static sfc_flow_item_parse sfc_flow_parse_pppoex;
 
 typedef int (sfc_flow_spec_set_vals)(struct sfc_flow_spec *spec,
                                     unsigned int filters_count_for_one_val,
@@ -110,7 +151,7 @@ sfc_flow_is_zero(const uint8_t *buf, unsigned int size)
 /*
  * Validate item and prepare structures spec and mask for parsing
  */
-static int
+int
 sfc_flow_parse_init(const struct rte_flow_item *item,
                    const void **spec_ptr,
                    const void **mask_ptr,
@@ -209,7 +250,7 @@ exit:
 
 static int
 sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item,
-                   __rte_unused efx_filter_spec_t *efx_spec,
+                   __rte_unused struct sfc_flow_parse_ctx *parse_ctx,
                    __rte_unused struct rte_flow_error *error)
 {
        return 0;
@@ -231,10 +272,11 @@ sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item,
  */
 static int
 sfc_flow_parse_eth(const struct rte_flow_item *item,
-                  efx_filter_spec_t *efx_spec,
+                  struct sfc_flow_parse_ctx *parse_ctx,
                   struct rte_flow_error *error)
 {
        int rc;
+       efx_filter_spec_t *efx_spec = parse_ctx->filter;
        const struct rte_flow_item_eth *spec = NULL;
        const struct rte_flow_item_eth *mask = NULL;
        const struct rte_flow_item_eth supp_mask = {
@@ -343,15 +385,16 @@ fail_bad_mask:
  */
 static int
 sfc_flow_parse_vlan(const struct rte_flow_item *item,
-                   efx_filter_spec_t *efx_spec,
+                   struct sfc_flow_parse_ctx *parse_ctx,
                    struct rte_flow_error *error)
 {
        int rc;
        uint16_t vid;
+       efx_filter_spec_t *efx_spec = parse_ctx->filter;
        const struct rte_flow_item_vlan *spec = NULL;
        const struct rte_flow_item_vlan *mask = NULL;
        const struct rte_flow_item_vlan supp_mask = {
-               .tci = rte_cpu_to_be_16(ETH_VLAN_ID_MAX),
+               .tci = rte_cpu_to_be_16(RTE_ETH_VLAN_ID_MAX),
                .inner_type = RTE_BE16(0xffff),
        };
 
@@ -429,10 +472,11 @@ sfc_flow_parse_vlan(const struct rte_flow_item *item,
  */
 static int
 sfc_flow_parse_ipv4(const struct rte_flow_item *item,
-                   efx_filter_spec_t *efx_spec,
+                   struct sfc_flow_parse_ctx *parse_ctx,
                    struct rte_flow_error *error)
 {
        int rc;
+       efx_filter_spec_t *efx_spec = parse_ctx->filter;
        const struct rte_flow_item_ipv4 *spec = NULL;
        const struct rte_flow_item_ipv4 *mask = NULL;
        const uint16_t ether_type_ipv4 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV4);
@@ -519,10 +563,11 @@ fail_bad_mask:
  */
 static int
 sfc_flow_parse_ipv6(const struct rte_flow_item *item,
-                   efx_filter_spec_t *efx_spec,
+                   struct sfc_flow_parse_ctx *parse_ctx,
                    struct rte_flow_error *error)
 {
        int rc;
+       efx_filter_spec_t *efx_spec = parse_ctx->filter;
        const struct rte_flow_item_ipv6 *spec = NULL;
        const struct rte_flow_item_ipv6 *mask = NULL;
        const uint16_t ether_type_ipv6 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV6);
@@ -627,10 +672,11 @@ fail_bad_mask:
  */
 static int
 sfc_flow_parse_tcp(const struct rte_flow_item *item,
-                  efx_filter_spec_t *efx_spec,
+                  struct sfc_flow_parse_ctx *parse_ctx,
                   struct rte_flow_error *error)
 {
        int rc;
+       efx_filter_spec_t *efx_spec = parse_ctx->filter;
        const struct rte_flow_item_tcp *spec = NULL;
        const struct rte_flow_item_tcp *mask = NULL;
        const struct rte_flow_item_tcp supp_mask = {
@@ -708,10 +754,11 @@ fail_bad_mask:
  */
 static int
 sfc_flow_parse_udp(const struct rte_flow_item *item,
-                  efx_filter_spec_t *efx_spec,
+                  struct sfc_flow_parse_ctx *parse_ctx,
                   struct rte_flow_error *error)
 {
        int rc;
+       efx_filter_spec_t *efx_spec = parse_ctx->filter;
        const struct rte_flow_item_udp *spec = NULL;
        const struct rte_flow_item_udp *mask = NULL;
        const struct rte_flow_item_udp supp_mask = {
@@ -866,10 +913,11 @@ sfc_flow_set_efx_spec_vni_or_vsid(efx_filter_spec_t *efx_spec,
  */
 static int
 sfc_flow_parse_vxlan(const struct rte_flow_item *item,
-                    efx_filter_spec_t *efx_spec,
+                    struct sfc_flow_parse_ctx *parse_ctx,
                     struct rte_flow_error *error)
 {
        int rc;
+       efx_filter_spec_t *efx_spec = parse_ctx->filter;
        const struct rte_flow_item_vxlan *spec = NULL;
        const struct rte_flow_item_vxlan *mask = NULL;
        const struct rte_flow_item_vxlan supp_mask = {
@@ -918,10 +966,11 @@ sfc_flow_parse_vxlan(const struct rte_flow_item *item,
  */
 static int
 sfc_flow_parse_geneve(const struct rte_flow_item *item,
-                     efx_filter_spec_t *efx_spec,
+                     struct sfc_flow_parse_ctx *parse_ctx,
                      struct rte_flow_error *error)
 {
        int rc;
+       efx_filter_spec_t *efx_spec = parse_ctx->filter;
        const struct rte_flow_item_geneve *spec = NULL;
        const struct rte_flow_item_geneve *mask = NULL;
        const struct rte_flow_item_geneve supp_mask = {
@@ -985,10 +1034,11 @@ sfc_flow_parse_geneve(const struct rte_flow_item *item,
  */
 static int
 sfc_flow_parse_nvgre(const struct rte_flow_item *item,
-                    efx_filter_spec_t *efx_spec,
+                    struct sfc_flow_parse_ctx *parse_ctx,
                     struct rte_flow_error *error)
 {
        int rc;
+       efx_filter_spec_t *efx_spec = parse_ctx->filter;
        const struct rte_flow_item_nvgre *spec = NULL;
        const struct rte_flow_item_nvgre *mask = NULL;
        const struct rte_flow_item_nvgre supp_mask = {
@@ -1022,65 +1072,158 @@ sfc_flow_parse_nvgre(const struct rte_flow_item *item,
        return rc;
 }
 
+/**
+ * Convert PPPoEx item to EFX filter specification.
+ *
+ * @param item[in]
+ *   Item specification.
+ *   Matching on PPPoEx fields is not supported.
+ *   This item can only be used to set or validate the EtherType filter.
+ *   Only zero masks are allowed.
+ *   Ranging is not supported.
+ * @param efx_spec[in, out]
+ *   EFX filter specification to update.
+ * @param[out] error
+ *   Perform verbose error reporting if not NULL.
+ */
+static int
+sfc_flow_parse_pppoex(const struct rte_flow_item *item,
+                     struct sfc_flow_parse_ctx *parse_ctx,
+                     struct rte_flow_error *error)
+{
+       efx_filter_spec_t *efx_spec = parse_ctx->filter;
+       const struct rte_flow_item_pppoe *spec = NULL;
+       const struct rte_flow_item_pppoe *mask = NULL;
+       const struct rte_flow_item_pppoe supp_mask = {};
+       const struct rte_flow_item_pppoe def_mask = {};
+       uint16_t ether_type;
+       int rc;
+
+       rc = sfc_flow_parse_init(item,
+                                (const void **)&spec,
+                                (const void **)&mask,
+                                &supp_mask,
+                                &def_mask,
+                                sizeof(struct rte_flow_item_pppoe),
+                                error);
+       if (rc != 0)
+               return rc;
+
+       if (item->type == RTE_FLOW_ITEM_TYPE_PPPOED)
+               ether_type = RTE_ETHER_TYPE_PPPOE_DISCOVERY;
+       else
+               ether_type = RTE_ETHER_TYPE_PPPOE_SESSION;
+
+       if ((efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE) != 0) {
+               if (efx_spec->efs_ether_type != ether_type) {
+                       rte_flow_error_set(error, EINVAL,
+                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                          "Invalid EtherType for a PPPoE flow item");
+                       return -rte_errno;
+               }
+       } else {
+               efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
+               efx_spec->efs_ether_type = ether_type;
+       }
+
+       return 0;
+}
+
 static const struct sfc_flow_item sfc_flow_items[] = {
        {
                .type = RTE_FLOW_ITEM_TYPE_VOID,
+               .name = "VOID",
                .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
                .layer = SFC_FLOW_ITEM_ANY_LAYER,
+               .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
                .parse = sfc_flow_parse_void,
        },
        {
                .type = RTE_FLOW_ITEM_TYPE_ETH,
+               .name = "ETH",
                .prev_layer = SFC_FLOW_ITEM_START_LAYER,
                .layer = SFC_FLOW_ITEM_L2,
+               .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
                .parse = sfc_flow_parse_eth,
        },
        {
                .type = RTE_FLOW_ITEM_TYPE_VLAN,
+               .name = "VLAN",
                .prev_layer = SFC_FLOW_ITEM_L2,
                .layer = SFC_FLOW_ITEM_L2,
+               .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
                .parse = sfc_flow_parse_vlan,
        },
+       {
+               .type = RTE_FLOW_ITEM_TYPE_PPPOED,
+               .name = "PPPOED",
+               .prev_layer = SFC_FLOW_ITEM_L2,
+               .layer = SFC_FLOW_ITEM_L2,
+               .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
+               .parse = sfc_flow_parse_pppoex,
+       },
+       {
+               .type = RTE_FLOW_ITEM_TYPE_PPPOES,
+               .name = "PPPOES",
+               .prev_layer = SFC_FLOW_ITEM_L2,
+               .layer = SFC_FLOW_ITEM_L2,
+               .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
+               .parse = sfc_flow_parse_pppoex,
+       },
        {
                .type = RTE_FLOW_ITEM_TYPE_IPV4,
+               .name = "IPV4",
                .prev_layer = SFC_FLOW_ITEM_L2,
                .layer = SFC_FLOW_ITEM_L3,
+               .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
                .parse = sfc_flow_parse_ipv4,
        },
        {
                .type = RTE_FLOW_ITEM_TYPE_IPV6,
+               .name = "IPV6",
                .prev_layer = SFC_FLOW_ITEM_L2,
                .layer = SFC_FLOW_ITEM_L3,
+               .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
                .parse = sfc_flow_parse_ipv6,
        },
        {
                .type = RTE_FLOW_ITEM_TYPE_TCP,
+               .name = "TCP",
                .prev_layer = SFC_FLOW_ITEM_L3,
                .layer = SFC_FLOW_ITEM_L4,
+               .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
                .parse = sfc_flow_parse_tcp,
        },
        {
                .type = RTE_FLOW_ITEM_TYPE_UDP,
+               .name = "UDP",
                .prev_layer = SFC_FLOW_ITEM_L3,
                .layer = SFC_FLOW_ITEM_L4,
+               .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
                .parse = sfc_flow_parse_udp,
        },
        {
                .type = RTE_FLOW_ITEM_TYPE_VXLAN,
+               .name = "VXLAN",
                .prev_layer = SFC_FLOW_ITEM_L4,
                .layer = SFC_FLOW_ITEM_START_LAYER,
+               .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
                .parse = sfc_flow_parse_vxlan,
        },
        {
                .type = RTE_FLOW_ITEM_TYPE_GENEVE,
+               .name = "GENEVE",
                .prev_layer = SFC_FLOW_ITEM_L4,
                .layer = SFC_FLOW_ITEM_START_LAYER,
+               .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
                .parse = sfc_flow_parse_geneve,
        },
        {
                .type = RTE_FLOW_ITEM_TYPE_NVGRE,
+               .name = "NVGRE",
                .prev_layer = SFC_FLOW_ITEM_L3,
                .layer = SFC_FLOW_ITEM_START_LAYER,
+               .ctx_type = SFC_FLOW_PARSE_CTX_FILTER,
                .parse = sfc_flow_parse_nvgre,
        },
 };
@@ -1089,12 +1232,15 @@ static const struct sfc_flow_item sfc_flow_items[] = {
  * Protocol-independent flow API support
  */
 static int
-sfc_flow_parse_attr(const struct rte_flow_attr *attr,
+sfc_flow_parse_attr(struct sfc_adapter *sa,
+                   const struct rte_flow_attr *attr,
                    struct rte_flow *flow,
                    struct rte_flow_error *error)
 {
        struct sfc_flow_spec *spec = &flow->spec;
        struct sfc_flow_spec_filter *spec_filter = &spec->filter;
+       struct sfc_flow_spec_mae *spec_mae = &spec->mae;
+       struct sfc_mae *mae = &sa->mae;
 
        if (attr == NULL) {
                rte_flow_error_set(error, EINVAL,
@@ -1108,62 +1254,79 @@ sfc_flow_parse_attr(const struct rte_flow_attr *attr,
                                   "Groups are not supported");
                return -rte_errno;
        }
-       if (attr->priority != 0) {
-               rte_flow_error_set(error, ENOTSUP,
-                                  RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr,
-                                  "Priorities are not supported");
-               return -rte_errno;
-       }
-       if (attr->egress != 0) {
+       if (attr->egress != 0 && attr->transfer == 0) {
                rte_flow_error_set(error, ENOTSUP,
                                   RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
                                   "Egress is not supported");
                return -rte_errno;
        }
-       if (attr->transfer != 0) {
-               rte_flow_error_set(error, ENOTSUP,
-                                  RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, attr,
-                                  "Transfer is not supported");
-               return -rte_errno;
-       }
-       if (attr->ingress == 0) {
+       if (attr->ingress == 0 && attr->transfer == 0) {
                rte_flow_error_set(error, ENOTSUP,
                                   RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
-                                  "Only ingress is supported");
+                                  "Ingress is compulsory");
                return -rte_errno;
        }
-
-       spec->type = SFC_FLOW_SPEC_FILTER;
-       spec_filter->template.efs_flags |= EFX_FILTER_FLAG_RX;
-       spec_filter->template.efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
+       if (attr->transfer == 0) {
+               if (attr->priority != 0) {
+                       rte_flow_error_set(error, ENOTSUP,
+                                          RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+                                          attr, "Priorities are unsupported");
+                       return -rte_errno;
+               }
+               spec->type = SFC_FLOW_SPEC_FILTER;
+               spec_filter->template.efs_flags |= EFX_FILTER_FLAG_RX;
+               spec_filter->template.efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
+               spec_filter->template.efs_priority = EFX_FILTER_PRI_MANUAL;
+       } else {
+               if (mae->status != SFC_MAE_STATUS_ADMIN) {
+                       rte_flow_error_set(error, ENOTSUP,
+                                          RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+                                          attr, "Transfer is not supported");
+                       return -rte_errno;
+               }
+               if (attr->priority > mae->nb_action_rule_prios_max) {
+                       rte_flow_error_set(error, ENOTSUP,
+                                          RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+                                          attr, "Unsupported priority level");
+                       return -rte_errno;
+               }
+               spec->type = SFC_FLOW_SPEC_MAE;
+               spec_mae->priority = attr->priority;
+               spec_mae->match_spec = NULL;
+               spec_mae->action_set = NULL;
+               spec_mae->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
+       }
 
        return 0;
 }
 
 /* Get item from array sfc_flow_items */
 static const struct sfc_flow_item *
-sfc_flow_get_item(enum rte_flow_item_type type)
+sfc_flow_get_item(const struct sfc_flow_item *items,
+                 unsigned int nb_items,
+                 enum rte_flow_item_type type)
 {
        unsigned int i;
 
-       for (i = 0; i < RTE_DIM(sfc_flow_items); i++)
-               if (sfc_flow_items[i].type == type)
-                       return &sfc_flow_items[i];
+       for (i = 0; i < nb_items; i++)
+               if (items[i].type == type)
+                       return &items[i];
 
        return NULL;
 }
 
-static int
-sfc_flow_parse_pattern(const struct rte_flow_item pattern[],
-                      struct rte_flow *flow,
+int
+sfc_flow_parse_pattern(struct sfc_adapter *sa,
+                      const struct sfc_flow_item *flow_items,
+                      unsigned int nb_flow_items,
+                      const struct rte_flow_item pattern[],
+                      struct sfc_flow_parse_ctx *parse_ctx,
                       struct rte_flow_error *error)
 {
        int rc;
        unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER;
        boolean_t is_ifrm = B_FALSE;
        const struct sfc_flow_item *item;
-       struct sfc_flow_spec *spec = &flow->spec;
-       struct sfc_flow_spec_filter *spec_filter = &spec->filter;
 
        if (pattern == NULL) {
                rte_flow_error_set(error, EINVAL,
@@ -1173,7 +1336,8 @@ sfc_flow_parse_pattern(const struct rte_flow_item pattern[],
        }
 
        for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
-               item = sfc_flow_get_item(pattern->type);
+               item = sfc_flow_get_item(flow_items, nb_flow_items,
+                                        pattern->type);
                if (item == NULL) {
                        rte_flow_error_set(error, ENOTSUP,
                                           RTE_FLOW_ERROR_TYPE_ITEM, pattern,
@@ -1217,7 +1381,8 @@ sfc_flow_parse_pattern(const struct rte_flow_item pattern[],
                        break;
 
                default:
-                       if (is_ifrm) {
+                       if (parse_ctx->type == SFC_FLOW_PARSE_CTX_FILTER &&
+                           is_ifrm) {
                                rte_flow_error_set(error, EINVAL,
                                        RTE_FLOW_ERROR_TYPE_ITEM,
                                        pattern,
@@ -1228,9 +1393,19 @@ sfc_flow_parse_pattern(const struct rte_flow_item pattern[],
                        break;
                }
 
-               rc = item->parse(pattern, &spec_filter->template, error);
-               if (rc != 0)
+               if (parse_ctx->type != item->ctx_type) {
+                       rte_flow_error_set(error, EINVAL,
+                                       RTE_FLOW_ERROR_TYPE_ITEM, pattern,
+                                       "Parse context type mismatch");
+                       return -rte_errno;
+               }
+
+               rc = item->parse(pattern, parse_ctx, error);
+               if (rc != 0) {
+                       sfc_err(sa, "failed to parse item %s: %s",
+                               item->name, strerror(-rc));
                        return rc;
+               }
 
                if (item->layer != SFC_FLOW_ITEM_ANY_LAYER)
                        prev_layer = item->layer;
@@ -1247,13 +1422,24 @@ sfc_flow_parse_queue(struct sfc_adapter *sa,
        struct sfc_flow_spec *spec = &flow->spec;
        struct sfc_flow_spec_filter *spec_filter = &spec->filter;
        struct sfc_rxq *rxq;
+       struct sfc_rxq_info *rxq_info;
 
-       if (queue->index >= sfc_sa2shared(sa)->rxq_count)
+       if (queue->index >= sfc_sa2shared(sa)->ethdev_rxq_count)
                return -EINVAL;
 
-       rxq = &sa->rxq_ctrl[queue->index];
+       rxq = sfc_rxq_ctrl_by_ethdev_qid(sa, queue->index);
        spec_filter->template.efs_dmaq_id = (uint16_t)rxq->hw_index;
 
+       rxq_info = &sfc_sa2shared(sa)->rxq_info[queue->index];
+
+       if ((rxq_info->rxq_flags & SFC_RXQ_FLAG_RSS_HASH) != 0) {
+               struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
+               struct sfc_rss *ethdev_rss = &sas->rss;
+
+               spec_filter->template.efs_flags |= EFX_FILTER_FLAG_RX_RSS;
+               spec_filter->rss_ctx = &ethdev_rss->dummy_ctx;
+       }
+
        return 0;
 }
 
@@ -1262,102 +1448,30 @@ sfc_flow_parse_rss(struct sfc_adapter *sa,
                   const struct rte_flow_action_rss *action_rss,
                   struct rte_flow *flow)
 {
-       struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
-       struct sfc_rss *rss = &sas->rss;
-       unsigned int rxq_sw_index;
+       struct sfc_flow_spec_filter *spec_filter = &flow->spec.filter;
+       struct sfc_flow_rss_conf conf;
+       uint16_t sw_qid_min;
        struct sfc_rxq *rxq;
-       unsigned int rxq_hw_index_min;
-       unsigned int rxq_hw_index_max;
-       efx_rx_hash_type_t efx_hash_types;
-       const uint8_t *rss_key;
-       struct sfc_flow_spec *spec = &flow->spec;
-       struct sfc_flow_spec_filter *spec_filter = &spec->filter;
-       struct sfc_flow_rss *sfc_rss_conf = &spec_filter->rss_conf;
-       unsigned int i;
-
-       if (action_rss->queue_num == 0)
-               return -EINVAL;
-
-       rxq_sw_index = sfc_sa2shared(sa)->rxq_count - 1;
-       rxq = &sa->rxq_ctrl[rxq_sw_index];
-       rxq_hw_index_min = rxq->hw_index;
-       rxq_hw_index_max = 0;
-
-       for (i = 0; i < action_rss->queue_num; ++i) {
-               rxq_sw_index = action_rss->queue[i];
-
-               if (rxq_sw_index >= sfc_sa2shared(sa)->rxq_count)
-                       return -EINVAL;
-
-               rxq = &sa->rxq_ctrl[rxq_sw_index];
-
-               if (rxq->hw_index < rxq_hw_index_min)
-                       rxq_hw_index_min = rxq->hw_index;
+       int rc;
 
-               if (rxq->hw_index > rxq_hw_index_max)
-                       rxq_hw_index_max = rxq->hw_index;
-       }
+       spec_filter->template.efs_flags |= EFX_FILTER_FLAG_RX_RSS;
 
-       switch (action_rss->func) {
-       case RTE_ETH_HASH_FUNCTION_DEFAULT:
-       case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
-               break;
-       default:
-               return -EINVAL;
-       }
+       rc = sfc_flow_rss_parse_conf(sa, action_rss, &conf, &sw_qid_min);
+       if (rc != 0)
+               return -rc;
 
-       if (action_rss->level)
-               return -EINVAL;
+       rxq = sfc_rxq_ctrl_by_ethdev_qid(sa, sw_qid_min);
+       spec_filter->template.efs_dmaq_id = rxq->hw_index;
 
-       /*
-        * Dummy RSS action with only one queue and no specific settings
-        * for hash types and key does not require dedicated RSS context
-        * and may be simplified to single queue action.
-        */
-       if (action_rss->queue_num == 1 && action_rss->types == 0 &&
-           action_rss->key_len == 0) {
-               spec_filter->template.efs_dmaq_id = rxq_hw_index_min;
+       spec_filter->rss_ctx = sfc_flow_rss_ctx_reuse(sa, &conf, sw_qid_min,
+                                                     action_rss->queue);
+       if (spec_filter->rss_ctx != NULL)
                return 0;
-       }
 
-       if (action_rss->types) {
-               int rc;
-
-               rc = sfc_rx_hf_rte_to_efx(sa, action_rss->types,
-                                         &efx_hash_types);
-               if (rc != 0)
-                       return -rc;
-       } else {
-               unsigned int i;
-
-               efx_hash_types = 0;
-               for (i = 0; i < rss->hf_map_nb_entries; ++i)
-                       efx_hash_types |= rss->hf_map[i].efx;
-       }
-
-       if (action_rss->key_len) {
-               if (action_rss->key_len != sizeof(rss->key))
-                       return -EINVAL;
-
-               rss_key = action_rss->key;
-       } else {
-               rss_key = rss->key;
-       }
-
-       spec_filter->rss = B_TRUE;
-
-       sfc_rss_conf->rxq_hw_index_min = rxq_hw_index_min;
-       sfc_rss_conf->rxq_hw_index_max = rxq_hw_index_max;
-       sfc_rss_conf->rss_hash_types = efx_hash_types;
-       rte_memcpy(sfc_rss_conf->rss_key, rss_key, sizeof(rss->key));
-
-       for (i = 0; i < RTE_DIM(sfc_rss_conf->rss_tbl); ++i) {
-               unsigned int nb_queues = action_rss->queue_num;
-               unsigned int rxq_sw_index = action_rss->queue[i % nb_queues];
-               struct sfc_rxq *rxq = &sa->rxq_ctrl[rxq_sw_index];
-
-               sfc_rss_conf->rss_tbl[i] = rxq->hw_index - rxq_hw_index_min;
-       }
+       rc = sfc_flow_rss_ctx_add(sa, &conf, sw_qid_min, action_rss->queue,
+                                 &spec_filter->rss_ctx);
+       if (rc != 0)
+               return -rc;
 
        return 0;
 }
@@ -1414,37 +1528,16 @@ static int
 sfc_flow_filter_insert(struct sfc_adapter *sa,
                       struct rte_flow *flow)
 {
-       struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
-       struct sfc_rss *rss = &sas->rss;
        struct sfc_flow_spec_filter *spec_filter = &flow->spec.filter;
-       struct sfc_flow_rss *flow_rss = &spec_filter->rss_conf;
-       uint32_t efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
-       unsigned int i;
+       struct sfc_flow_rss_ctx *rss_ctx = spec_filter->rss_ctx;
        int rc = 0;
 
-       if (spec_filter->rss) {
-               unsigned int rss_spread = MIN(flow_rss->rxq_hw_index_max -
-                                             flow_rss->rxq_hw_index_min + 1,
-                                             EFX_MAXRSS);
-
-               rc = efx_rx_scale_context_alloc(sa->nic,
-                                               EFX_RX_SCALE_EXCLUSIVE,
-                                               rss_spread,
-                                               &efs_rss_context);
-               if (rc != 0)
-                       goto fail_scale_context_alloc;
-
-               rc = efx_rx_scale_mode_set(sa->nic, efs_rss_context,
-                                          rss->hash_alg,
-                                          flow_rss->rss_hash_types, B_TRUE);
-               if (rc != 0)
-                       goto fail_scale_mode_set;
+       rc = sfc_flow_rss_ctx_program(sa, rss_ctx);
+       if (rc != 0)
+               goto fail_rss_ctx_program;
 
-               rc = efx_rx_scale_key_set(sa->nic, efs_rss_context,
-                                         flow_rss->rss_key,
-                                         sizeof(rss->key));
-               if (rc != 0)
-                       goto fail_scale_key_set;
+       if (rss_ctx != NULL) {
+               unsigned int i;
 
                /*
                 * At this point, fully elaborated filter specifications
@@ -1455,9 +1548,7 @@ sfc_flow_filter_insert(struct sfc_adapter *sa,
                for (i = 0; i < spec_filter->count; i++) {
                        efx_filter_spec_t *spec = &spec_filter->filters[i];
 
-                       spec->efs_rss_context = efs_rss_context;
-                       spec->efs_dmaq_id = flow_rss->rxq_hw_index_min;
-                       spec->efs_flags |= EFX_FILTER_FLAG_RX_RSS;
+                       spec->efs_rss_context = rss_ctx->nic_handle;
                }
        }
 
@@ -1465,34 +1556,12 @@ sfc_flow_filter_insert(struct sfc_adapter *sa,
        if (rc != 0)
                goto fail_filter_insert;
 
-       if (spec_filter->rss) {
-               /*
-                * Scale table is set after filter insertion because
-                * the table entries are relative to the base RxQ ID
-                * and the latter is submitted to the HW by means of
-                * inserting a filter, so by the time of the request
-                * the HW knows all the information needed to verify
-                * the table entries, and the operation will succeed
-                */
-               rc = efx_rx_scale_tbl_set(sa->nic, efs_rss_context,
-                                         flow_rss->rss_tbl,
-                                         RTE_DIM(flow_rss->rss_tbl));
-               if (rc != 0)
-                       goto fail_scale_tbl_set;
-       }
-
        return 0;
 
-fail_scale_tbl_set:
-       sfc_flow_spec_remove(sa, &flow->spec);
-
 fail_filter_insert:
-fail_scale_key_set:
-fail_scale_mode_set:
-       if (efs_rss_context != EFX_RSS_CONTEXT_DEFAULT)
-               efx_rx_scale_context_free(sa->nic, efs_rss_context);
+       sfc_flow_rss_ctx_terminate(sa, rss_ctx);
 
-fail_scale_context_alloc:
+fail_rss_ctx_program:
        return rc;
 }
 
@@ -1507,18 +1576,9 @@ sfc_flow_filter_remove(struct sfc_adapter *sa,
        if (rc != 0)
                return rc;
 
-       if (spec_filter->rss) {
-               /*
-                * All specifications for a given flow rule have the same RSS
-                * context, so that RSS context value is taken from the first
-                * filter specification
-                */
-               efx_filter_spec_t *spec = &spec_filter->filters[0];
-
-               rc = efx_rx_scale_context_free(sa->nic, spec->efs_rss_context);
-       }
+       sfc_flow_rss_ctx_terminate(sa, spec_filter->rss_ctx);
 
-       return rc;
+       return 0;
 }
 
 static int
@@ -1529,8 +1589,13 @@ sfc_flow_parse_mark(struct sfc_adapter *sa,
        struct sfc_flow_spec *spec = &flow->spec;
        struct sfc_flow_spec_filter *spec_filter = &spec->filter;
        const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+       uint32_t mark_max;
+
+       mark_max = encp->enc_filter_action_mark_max;
+       if (sfc_ft_is_active(sa))
+               mark_max = RTE_MIN(mark_max, SFC_FT_USER_MARK_MASK);
 
-       if (mark == NULL || mark->id > encp->enc_filter_action_mark_max)
+       if (mark == NULL || mark->id > mark_max)
                return EINVAL;
 
        spec_filter->template.efs_flags |= EFX_FILTER_FLAG_ACTION_MARK;
@@ -1549,6 +1614,7 @@ sfc_flow_parse_actions(struct sfc_adapter *sa,
        struct sfc_flow_spec *spec = &flow->spec;
        struct sfc_flow_spec_filter *spec_filter = &spec->filter;
        const unsigned int dp_rx_features = sa->priv.dp_rx->features;
+       const uint64_t rx_metadata = sa->negotiated_rx_metadata;
        uint32_t actions_set = 0;
        const uint32_t fate_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_QUEUE) |
                                           (1UL << RTE_FLOW_ACTION_TYPE_RSS) |
@@ -1563,9 +1629,6 @@ sfc_flow_parse_actions(struct sfc_adapter *sa,
                return -rte_errno;
        }
 
-#define SFC_BUILD_SET_OVERFLOW(_action, _set) \
-       RTE_BUILD_BUG_ON(_action >= sizeof(_set) * CHAR_BIT)
-
        for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
                switch (actions->type) {
                case RTE_FLOW_ACTION_TYPE_VOID:
@@ -1624,6 +1687,12 @@ sfc_flow_parse_actions(struct sfc_adapter *sa,
                                        RTE_FLOW_ERROR_TYPE_ACTION, NULL,
                                        "FLAG action is not supported on the current Rx datapath");
                                return -rte_errno;
+                       } else if ((rx_metadata &
+                                   RTE_ETH_RX_METADATA_USER_FLAG) == 0) {
+                               rte_flow_error_set(error, ENOTSUP,
+                                       RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+                                       "flag delivery has not been negotiated");
+                               return -rte_errno;
                        }
 
                        spec_filter->template.efs_flags |=
@@ -1641,6 +1710,12 @@ sfc_flow_parse_actions(struct sfc_adapter *sa,
                                        RTE_FLOW_ERROR_TYPE_ACTION, NULL,
                                        "MARK action is not supported on the current Rx datapath");
                                return -rte_errno;
+                       } else if ((rx_metadata &
+                                   RTE_ETH_RX_METADATA_USER_MARK) == 0) {
+                               rte_flow_error_set(error, ENOTSUP,
+                                       RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+                                       "mark delivery has not been negotiated");
+                               return -rte_errno;
                        }
 
                        rc = sfc_flow_parse_mark(sa, actions->conf, flow);
@@ -1661,7 +1736,6 @@ sfc_flow_parse_actions(struct sfc_adapter *sa,
 
                actions_set |= (1UL << actions->type);
        }
-#undef SFC_BUILD_SET_OVERFLOW
 
        /* When fate is unknown, drop traffic. */
        if ((actions_set & fate_actions_mask) == 0) {
@@ -2277,21 +2351,23 @@ sfc_flow_validate_match_flags(struct sfc_adapter *sa,
 }
 
 static int
-sfc_flow_parse(struct rte_eth_dev *dev,
-              const struct rte_flow_attr *attr,
-              const struct rte_flow_item pattern[],
-              const struct rte_flow_action actions[],
-              struct rte_flow *flow,
-              struct rte_flow_error *error)
+sfc_flow_parse_rte_to_filter(struct rte_eth_dev *dev,
+                            const struct rte_flow_item pattern[],
+                            const struct rte_flow_action actions[],
+                            struct rte_flow *flow,
+                            struct rte_flow_error *error)
 {
        struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
+       struct sfc_flow_spec *spec = &flow->spec;
+       struct sfc_flow_spec_filter *spec_filter = &spec->filter;
+       struct sfc_flow_parse_ctx ctx;
        int rc;
 
-       rc = sfc_flow_parse_attr(attr, flow, error);
-       if (rc != 0)
-               goto fail_bad_value;
+       ctx.type = SFC_FLOW_PARSE_CTX_FILTER;
+       ctx.filter = &spec_filter->template;
 
-       rc = sfc_flow_parse_pattern(pattern, flow, error);
+       rc = sfc_flow_parse_pattern(sa, sfc_flow_items, RTE_DIM(sfc_flow_items),
+                                   pattern, &ctx, error);
        if (rc != 0)
                goto fail_bad_value;
 
@@ -2309,6 +2385,195 @@ fail_bad_value:
        return rc;
 }
 
+static int
+sfc_flow_parse_rte_to_mae(struct rte_eth_dev *dev,
+                         const struct rte_flow_item pattern[],
+                         const struct rte_flow_action actions[],
+                         struct rte_flow *flow,
+                         struct rte_flow_error *error)
+{
+       struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
+       struct sfc_flow_spec *spec = &flow->spec;
+       struct sfc_flow_spec_mae *spec_mae = &spec->mae;
+       int rc;
+
+       /*
+        * If the flow is meant to be a TUNNEL rule in a FT context,
+        * preparse its actions and save its properties in spec_mae.
+        */
+       rc = sfc_ft_tunnel_rule_detect(sa, actions, spec_mae, error);
+       if (rc != 0)
+               goto fail;
+
+       rc = sfc_mae_rule_parse_pattern(sa, pattern, spec_mae, error);
+       if (rc != 0)
+               goto fail;
+
+       if (spec_mae->ft_rule_type == SFC_FT_RULE_TUNNEL) {
+               /*
+                * By design, this flow should be represented solely by the
+                * outer rule. But the HW/FW hasn't got support for setting
+                * Rx mark from RECIRC_ID on outer rule lookup yet. Neither
+                * does it support outer rule counters. As a workaround, an
+                * action rule of lower priority is used to do the job.
+                *
+                * So don't skip sfc_mae_rule_parse_actions() below.
+                */
+       }
+
+       rc = sfc_mae_rule_parse_actions(sa, actions, spec_mae, error);
+       if (rc != 0)
+               goto fail;
+
+       if (spec_mae->ft_ctx != NULL) {
+               if (spec_mae->ft_rule_type == SFC_FT_RULE_TUNNEL)
+                       spec_mae->ft_ctx->tunnel_rule_is_set = B_TRUE;
+
+               ++(spec_mae->ft_ctx->refcnt);
+       }
+
+       return 0;
+
+fail:
+       /* Reset these values to avoid confusing sfc_mae_flow_cleanup(). */
+       spec_mae->ft_rule_type = SFC_FT_RULE_NONE;
+       spec_mae->ft_ctx = NULL;
+
+       return rc;
+}
+
+static int
+sfc_flow_parse(struct rte_eth_dev *dev,
+              const struct rte_flow_attr *attr,
+              const struct rte_flow_item pattern[],
+              const struct rte_flow_action actions[],
+              struct rte_flow *flow,
+              struct rte_flow_error *error)
+{
+       struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
+       const struct sfc_flow_ops_by_spec *ops;
+       int rc;
+
+       rc = sfc_flow_parse_attr(sa, attr, flow, error);
+       if (rc != 0)
+               return rc;
+
+       ops = sfc_flow_get_ops_by_spec(flow);
+       if (ops == NULL || ops->parse == NULL) {
+               rte_flow_error_set(error, ENOTSUP,
+                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+                                  "No backend to handle this flow");
+               return -rte_errno;
+       }
+
+       return ops->parse(dev, pattern, actions, flow, error);
+}
+
+static struct rte_flow *
+sfc_flow_zmalloc(struct rte_flow_error *error)
+{
+       struct rte_flow *flow;
+
+       flow = rte_zmalloc("sfc_rte_flow", sizeof(*flow), 0);
+       if (flow == NULL) {
+               rte_flow_error_set(error, ENOMEM,
+                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+                                  "Failed to allocate memory");
+       }
+
+       return flow;
+}
+
+static void
+sfc_flow_free(struct sfc_adapter *sa, struct rte_flow *flow)
+{
+       const struct sfc_flow_ops_by_spec *ops;
+
+       ops = sfc_flow_get_ops_by_spec(flow);
+       if (ops != NULL && ops->cleanup != NULL)
+               ops->cleanup(sa, flow);
+
+       rte_free(flow);
+}
+
+static int
+sfc_flow_insert(struct sfc_adapter *sa, struct rte_flow *flow,
+               struct rte_flow_error *error)
+{
+       const struct sfc_flow_ops_by_spec *ops;
+       int rc;
+
+       ops = sfc_flow_get_ops_by_spec(flow);
+       if (ops == NULL || ops->insert == NULL) {
+               rte_flow_error_set(error, ENOTSUP,
+                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+                                  "No backend to handle this flow");
+               return rte_errno;
+       }
+
+       rc = ops->insert(sa, flow);
+       if (rc != 0) {
+               rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                  NULL, "Failed to insert the flow rule");
+       }
+
+       return rc;
+}
+
+static int
+sfc_flow_remove(struct sfc_adapter *sa, struct rte_flow *flow,
+               struct rte_flow_error *error)
+{
+       const struct sfc_flow_ops_by_spec *ops;
+       int rc;
+
+       ops = sfc_flow_get_ops_by_spec(flow);
+       if (ops == NULL || ops->remove == NULL) {
+               rte_flow_error_set(error, ENOTSUP,
+                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+                                  "No backend to handle this flow");
+               return rte_errno;
+       }
+
+       rc = ops->remove(sa, flow);
+       if (rc != 0) {
+               rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                  NULL, "Failed to remove the flow rule");
+       }
+
+       return rc;
+}
+
+static int
+sfc_flow_verify(struct sfc_adapter *sa, struct rte_flow *flow,
+               struct rte_flow_error *error)
+{
+       const struct sfc_flow_ops_by_spec *ops;
+       int rc = 0;
+
+       ops = sfc_flow_get_ops_by_spec(flow);
+       if (ops == NULL) {
+               rte_flow_error_set(error, ENOTSUP,
+                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+                                  "No backend to handle this flow");
+               return -rte_errno;
+       }
+
+       if (ops->verify != NULL) {
+               SFC_ASSERT(sfc_adapter_is_locked(sa));
+               rc = ops->verify(sa, flow);
+       }
+
+       if (rc != 0) {
+               rte_flow_error_set(error, rc,
+                       RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+                       "Failed to verify flow validity with FW");
+               return -rte_errno;
+       }
+
+       return 0;
+}
+
 static int
 sfc_flow_validate(struct rte_eth_dev *dev,
                  const struct rte_flow_attr *attr,
@@ -2316,11 +2581,25 @@ sfc_flow_validate(struct rte_eth_dev *dev,
                  const struct rte_flow_action actions[],
                  struct rte_flow_error *error)
 {
-       struct rte_flow flow;
+       struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
+       struct rte_flow *flow;
+       int rc;
+
+       flow = sfc_flow_zmalloc(error);
+       if (flow == NULL)
+               return -rte_errno;
 
-       memset(&flow, 0, sizeof(flow));
+       sfc_adapter_lock(sa);
+
+       rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
+       if (rc == 0)
+               rc = sfc_flow_verify(sa, flow, error);
+
+       sfc_flow_free(sa, flow);
+
+       sfc_adapter_unlock(sa);
 
-       return sfc_flow_parse(dev, attr, pattern, actions, &flow, error);
+       return rc;
 }
 
 static struct rte_flow *
@@ -2334,70 +2613,39 @@ sfc_flow_create(struct rte_eth_dev *dev,
        struct rte_flow *flow = NULL;
        int rc;
 
-       flow = rte_zmalloc("sfc_rte_flow", sizeof(*flow), 0);
-       if (flow == NULL) {
-               rte_flow_error_set(error, ENOMEM,
-                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
-                                  "Failed to allocate memory");
+       flow = sfc_flow_zmalloc(error);
+       if (flow == NULL)
                goto fail_no_mem;
-       }
+
+       sfc_adapter_lock(sa);
 
        rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
        if (rc != 0)
                goto fail_bad_value;
 
-       sfc_adapter_lock(sa);
-
        TAILQ_INSERT_TAIL(&sa->flow_list, flow, entries);
 
-       if (sa->state == SFC_ADAPTER_STARTED) {
-               rc = sfc_flow_filter_insert(sa, flow);
-               if (rc != 0) {
-                       rte_flow_error_set(error, rc,
-                               RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
-                               "Failed to insert filter");
-                       goto fail_filter_insert;
-               }
+       if (sa->state == SFC_ETHDEV_STARTED) {
+               rc = sfc_flow_insert(sa, flow, error);
+               if (rc != 0)
+                       goto fail_flow_insert;
        }
 
        sfc_adapter_unlock(sa);
 
        return flow;
 
-fail_filter_insert:
+fail_flow_insert:
        TAILQ_REMOVE(&sa->flow_list, flow, entries);
 
 fail_bad_value:
-       rte_free(flow);
+       sfc_flow_free(sa, flow);
        sfc_adapter_unlock(sa);
 
 fail_no_mem:
        return NULL;
 }
 
-static int
-sfc_flow_remove(struct sfc_adapter *sa,
-               struct rte_flow *flow,
-               struct rte_flow_error *error)
-{
-       int rc = 0;
-
-       SFC_ASSERT(sfc_adapter_is_locked(sa));
-
-       if (sa->state == SFC_ADAPTER_STARTED) {
-               rc = sfc_flow_filter_remove(sa, flow);
-               if (rc != 0)
-                       rte_flow_error_set(error, rc,
-                               RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
-                               "Failed to destroy flow rule");
-       }
-
-       TAILQ_REMOVE(&sa->flow_list, flow, entries);
-       rte_free(flow);
-
-       return rc;
-}
-
 static int
 sfc_flow_destroy(struct rte_eth_dev *dev,
                 struct rte_flow *flow,
@@ -2420,7 +2668,11 @@ sfc_flow_destroy(struct rte_eth_dev *dev,
                goto fail_bad_value;
        }
 
-       rc = sfc_flow_remove(sa, flow, error);
+       if (sa->state == SFC_ETHDEV_STARTED)
+               rc = sfc_flow_remove(sa, flow, error);
+
+       TAILQ_REMOVE(&sa->flow_list, flow, entries);
+       sfc_flow_free(sa, flow);
 
 fail_bad_value:
        sfc_adapter_unlock(sa);
@@ -2434,15 +2686,21 @@ sfc_flow_flush(struct rte_eth_dev *dev,
 {
        struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
        struct rte_flow *flow;
-       int rc = 0;
        int ret = 0;
 
        sfc_adapter_lock(sa);
 
        while ((flow = TAILQ_FIRST(&sa->flow_list)) != NULL) {
-               rc = sfc_flow_remove(sa, flow, error);
-               if (rc != 0)
-                       ret = rc;
+               if (sa->state == SFC_ETHDEV_STARTED) {
+                       int rc;
+
+                       rc = sfc_flow_remove(sa, flow, error);
+                       if (rc != 0)
+                               ret = rc;
+               }
+
+               TAILQ_REMOVE(&sa->flow_list, flow, entries);
+               sfc_flow_free(sa, flow);
        }
 
        sfc_adapter_unlock(sa);
@@ -2450,6 +2708,49 @@ sfc_flow_flush(struct rte_eth_dev *dev,
        return -ret;
 }
 
+static int
+sfc_flow_query(struct rte_eth_dev *dev,
+              struct rte_flow *flow,
+              const struct rte_flow_action *action,
+              void *data,
+              struct rte_flow_error *error)
+{
+       struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
+       const struct sfc_flow_ops_by_spec *ops;
+       int ret;
+
+       sfc_adapter_lock(sa);
+
+       ops = sfc_flow_get_ops_by_spec(flow);
+       if (ops == NULL || ops->query == NULL) {
+               ret = rte_flow_error_set(error, ENOTSUP,
+                       RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+                       "No backend to handle this flow");
+               goto fail_no_backend;
+       }
+
+       if (sa->state != SFC_ETHDEV_STARTED) {
+               ret = rte_flow_error_set(error, EINVAL,
+                       RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+                       "Can't query the flow: the adapter is not started");
+               goto fail_not_started;
+       }
+
+       ret = ops->query(dev, flow, action, data, error);
+       if (ret != 0)
+               goto fail_query;
+
+       sfc_adapter_unlock(sa);
+
+       return 0;
+
+fail_query:
+fail_not_started:
+fail_no_backend:
+       sfc_adapter_unlock(sa);
+       return ret;
+}
+
 static int
 sfc_flow_isolate(struct rte_eth_dev *dev, int enable,
                 struct rte_flow_error *error)
@@ -2458,7 +2759,7 @@ sfc_flow_isolate(struct rte_eth_dev *dev, int enable,
        int ret = 0;
 
        sfc_adapter_lock(sa);
-       if (sa->state != SFC_ADAPTER_INITIALIZED) {
+       if (sa->state != SFC_ETHDEV_INITIALIZED) {
                rte_flow_error_set(error, EBUSY,
                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
                                   NULL, "please close the port first");
@@ -2471,13 +2772,38 @@ sfc_flow_isolate(struct rte_eth_dev *dev, int enable,
        return ret;
 }
 
+static int
+sfc_flow_pick_transfer_proxy(struct rte_eth_dev *dev,
+                            uint16_t *transfer_proxy_port,
+                            struct rte_flow_error *error)
+{
+       struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
+       int ret;
+
+       ret = sfc_mae_get_switch_domain_admin(sa->mae.switch_domain_id,
+                                             transfer_proxy_port);
+       if (ret != 0) {
+               return rte_flow_error_set(error, ret,
+                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                         NULL, NULL);
+       }
+
+       return 0;
+}
+
 const struct rte_flow_ops sfc_flow_ops = {
        .validate = sfc_flow_validate,
        .create = sfc_flow_create,
        .destroy = sfc_flow_destroy,
        .flush = sfc_flow_flush,
-       .query = NULL,
+       .query = sfc_flow_query,
        .isolate = sfc_flow_isolate,
+       .tunnel_decap_set = sfc_ft_decap_set,
+       .tunnel_match = sfc_ft_match,
+       .tunnel_action_decap_release = sfc_ft_action_decap_release,
+       .tunnel_item_release = sfc_ft_item_release,
+       .get_restore_info = sfc_ft_get_restore_info,
+       .pick_transfer_proxy = sfc_flow_pick_transfer_proxy,
 };
 
 void
@@ -2497,7 +2823,7 @@ sfc_flow_fini(struct sfc_adapter *sa)
 
        while ((flow = TAILQ_FIRST(&sa->flow_list)) != NULL) {
                TAILQ_REMOVE(&sa->flow_list, flow, entries);
-               rte_free(flow);
+               sfc_flow_free(sa, flow);
        }
 }
 
@@ -2509,7 +2835,13 @@ sfc_flow_stop(struct sfc_adapter *sa)
        SFC_ASSERT(sfc_adapter_is_locked(sa));
 
        TAILQ_FOREACH(flow, &sa->flow_list, entries)
-               sfc_flow_filter_remove(sa, flow);
+               sfc_flow_remove(sa, flow, NULL);
+
+       /*
+        * MAE counter service is not stopped on flow rule remove to avoid
+        * extra work. Make sure that it is stopped here.
+        */
+       sfc_mae_counter_stop(sa);
 }
 
 int
@@ -2522,8 +2854,10 @@ sfc_flow_start(struct sfc_adapter *sa)
 
        SFC_ASSERT(sfc_adapter_is_locked(sa));
 
+       sfc_ft_counters_reset(sa);
+
        TAILQ_FOREACH(flow, &sa->flow_list, entries) {
-               rc = sfc_flow_filter_insert(sa, flow);
+               rc = sfc_flow_insert(sa, flow, NULL);
                if (rc != 0)
                        goto fail_bad_flow;
        }
@@ -2533,3 +2867,12 @@ sfc_flow_start(struct sfc_adapter *sa)
 fail_bad_flow:
        return rc;
 }
+
+static void
+sfc_flow_cleanup(struct sfc_adapter *sa, struct rte_flow *flow)
+{
+       if (flow == NULL)
+               return;
+
+       sfc_flow_rss_ctx_del(sa, flow->spec.filter.rss_ctx);
+}