net/sfc: fix printout label for count action
[dpdk.git] / drivers / net / sfc / sfc_mae.c
index 2515b9a..d046cc2 100644 (file)
@@ -23,7 +23,7 @@
 #include "sfc_service.h"
 
 static int
-sfc_mae_assign_entity_mport(struct sfc_adapter *sa,
+sfc_mae_assign_ethdev_mport(struct sfc_adapter *sa,
                            efx_mport_sel_t *mportp)
 {
        const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
@@ -32,6 +32,35 @@ sfc_mae_assign_entity_mport(struct sfc_adapter *sa,
                                              mportp);
 }
 
+static int
+sfc_mae_assign_entity_mport(struct sfc_adapter *sa,
+                           efx_mport_sel_t *mportp)
+{
+       const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+       int rc = 0;
+
+       if (encp->enc_mae_admin) {
+               /*
+                * This ethdev sits on MAE admin PF. The represented
+                * entity is the network port assigned to that PF.
+                */
+               rc = efx_mae_mport_by_phy_port(encp->enc_assigned_port, mportp);
+       } else {
+               /*
+                * This ethdev sits on unprivileged PF / VF. The entity
+                * represented by the ethdev can change dynamically
+                * as MAE admin changes default traffic rules.
+                *
+                * For the sake of simplicity, do not fill in the m-port
+                * and assume that flow rules should not be allowed to
+                * reference the entity represented by this ethdev.
+                */
+               efx_mae_mport_invalid(mportp);
+       }
+
+       return rc;
+}
+
 static int
 sfc_mae_counter_registry_init(struct sfc_mae_counter_registry *registry,
                              uint32_t nb_counters_max)
@@ -184,6 +213,7 @@ sfc_mae_attach(struct sfc_adapter *sa)
        struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
        struct sfc_mae_switch_port_request switch_port_request = {0};
        const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+       efx_mport_sel_t ethdev_mport;
        efx_mport_sel_t entity_mport;
        struct sfc_mae *mae = &sa->mae;
        struct sfc_mae_bounce_eh *bounce_eh = &mae->bounce_eh;
@@ -197,25 +227,32 @@ sfc_mae_attach(struct sfc_adapter *sa)
                return 0;
        }
 
-       sfc_log_init(sa, "init MAE");
-       rc = efx_mae_init(sa->nic);
-       if (rc != 0)
-               goto fail_mae_init;
+       if (encp->enc_mae_admin) {
+               sfc_log_init(sa, "init MAE");
+               rc = efx_mae_init(sa->nic);
+               if (rc != 0)
+                       goto fail_mae_init;
 
-       sfc_log_init(sa, "get MAE limits");
-       rc = efx_mae_get_limits(sa->nic, &limits);
-       if (rc != 0)
-               goto fail_mae_get_limits;
+               sfc_log_init(sa, "get MAE limits");
+               rc = efx_mae_get_limits(sa->nic, &limits);
+               if (rc != 0)
+                       goto fail_mae_get_limits;
 
-       sfc_log_init(sa, "init MAE counter registry");
-       rc = sfc_mae_counter_registry_init(&mae->counter_registry,
-                                          limits.eml_max_n_counters);
-       if (rc != 0) {
-               sfc_err(sa, "failed to init MAE counters registry for %u entries: %s",
-                       limits.eml_max_n_counters, rte_strerror(rc));
-               goto fail_counter_registry_init;
+               sfc_log_init(sa, "init MAE counter registry");
+               rc = sfc_mae_counter_registry_init(&mae->counter_registry,
+                                                  limits.eml_max_n_counters);
+               if (rc != 0) {
+                       sfc_err(sa, "failed to init MAE counters registry for %u entries: %s",
+                               limits.eml_max_n_counters, rte_strerror(rc));
+                       goto fail_counter_registry_init;
+               }
        }
 
+       sfc_log_init(sa, "assign ethdev MPORT");
+       rc = sfc_mae_assign_ethdev_mport(sa, &ethdev_mport);
+       if (rc != 0)
+               goto fail_mae_assign_ethdev_mport;
+
        sfc_log_init(sa, "assign entity MPORT");
        rc = sfc_mae_assign_entity_mport(sa, &entity_mport);
        if (rc != 0)
@@ -228,31 +265,39 @@ sfc_mae_attach(struct sfc_adapter *sa)
 
        sfc_log_init(sa, "assign RTE switch port");
        switch_port_request.type = SFC_MAE_SWITCH_PORT_INDEPENDENT;
+       switch_port_request.ethdev_mportp = &ethdev_mport;
        switch_port_request.entity_mportp = &entity_mport;
-       /* RTE ethdev MPORT matches that of the entity for independent ports. */
-       switch_port_request.ethdev_mportp = &entity_mport;
        switch_port_request.ethdev_port_id = sas->port_id;
+       switch_port_request.port_data.indep.mae_admin =
+               encp->enc_mae_admin == B_TRUE;
        rc = sfc_mae_assign_switch_port(mae->switch_domain_id,
                                        &switch_port_request,
                                        &mae->switch_port_id);
        if (rc != 0)
                goto fail_mae_assign_switch_port;
 
-       sfc_log_init(sa, "allocate encap. header bounce buffer");
-       bounce_eh->buf_size = limits.eml_encap_header_size_limit;
-       bounce_eh->buf = rte_malloc("sfc_mae_bounce_eh",
-                                   bounce_eh->buf_size, 0);
-       if (bounce_eh->buf == NULL)
-               goto fail_mae_alloc_bounce_eh;
-
-       mae->status = SFC_MAE_STATUS_SUPPORTED;
-       mae->nb_outer_rule_prios_max = limits.eml_max_n_outer_prios;
-       mae->nb_action_rule_prios_max = limits.eml_max_n_action_prios;
-       mae->encap_types_supported = limits.eml_encap_types_supported;
+       if (encp->enc_mae_admin) {
+               sfc_log_init(sa, "allocate encap. header bounce buffer");
+               bounce_eh->buf_size = limits.eml_encap_header_size_limit;
+               bounce_eh->buf = rte_malloc("sfc_mae_bounce_eh",
+                                           bounce_eh->buf_size, 0);
+               if (bounce_eh->buf == NULL)
+                       goto fail_mae_alloc_bounce_eh;
+
+               mae->nb_outer_rule_prios_max = limits.eml_max_n_outer_prios;
+               mae->nb_action_rule_prios_max = limits.eml_max_n_action_prios;
+               mae->encap_types_supported = limits.eml_encap_types_supported;
+       }
+
        TAILQ_INIT(&mae->outer_rules);
        TAILQ_INIT(&mae->encap_headers);
        TAILQ_INIT(&mae->action_sets);
 
+       if (encp->enc_mae_admin)
+               mae->status = SFC_MAE_STATUS_ADMIN;
+       else
+               mae->status = SFC_MAE_STATUS_SUPPORTED;
+
        sfc_log_init(sa, "done");
 
        return 0;
@@ -261,11 +306,14 @@ fail_mae_alloc_bounce_eh:
 fail_mae_assign_switch_port:
 fail_mae_assign_switch_domain:
 fail_mae_assign_entity_mport:
-       sfc_mae_counter_registry_fini(&mae->counter_registry);
+fail_mae_assign_ethdev_mport:
+       if (encp->enc_mae_admin)
+               sfc_mae_counter_registry_fini(&mae->counter_registry);
 
 fail_counter_registry_init:
 fail_mae_get_limits:
-       efx_mae_fini(sa->nic);
+       if (encp->enc_mae_admin)
+               efx_mae_fini(sa->nic);
 
 fail_mae_init:
        sfc_log_init(sa, "failed %d", rc);
@@ -284,7 +332,7 @@ sfc_mae_detach(struct sfc_adapter *sa)
        mae->nb_action_rule_prios_max = 0;
        mae->status = SFC_MAE_STATUS_UNKNOWN;
 
-       if (status_prev != SFC_MAE_STATUS_SUPPORTED)
+       if (status_prev != SFC_MAE_STATUS_ADMIN)
                return;
 
        rte_free(mae->bounce_eh.buf);
@@ -737,6 +785,8 @@ sfc_mae_action_set_add(struct sfc_adapter *sa,
                       const struct rte_flow_action actions[],
                       efx_mae_actions_t *spec,
                       struct sfc_mae_encap_header *encap_header,
+                      uint64_t *ft_group_hit_counter,
+                      struct sfc_flow_tunnel *ft,
                       unsigned int n_counters,
                       struct sfc_mae_action_set **action_setp)
 {
@@ -763,6 +813,16 @@ sfc_mae_action_set_add(struct sfc_adapter *sa,
                        return ENOMEM;
                }
 
+               for (i = 0; i < n_counters; ++i) {
+                       action_set->counters[i].rte_id_valid = B_FALSE;
+                       action_set->counters[i].mae_id.id =
+                               EFX_MAE_RSRC_ID_INVALID;
+
+                       action_set->counters[i].ft_group_hit_counter =
+                               ft_group_hit_counter;
+                       action_set->counters[i].ft = ft;
+               }
+
                for (action = actions, i = 0;
                     action->type != RTE_FLOW_ACTION_TYPE_END && i < n_counters;
                     ++action) {
@@ -773,8 +833,7 @@ sfc_mae_action_set_add(struct sfc_adapter *sa,
 
                        conf = action->conf;
 
-                       action_set->counters[i].mae_id.id =
-                               EFX_MAE_RSRC_ID_INVALID;
+                       action_set->counters[i].rte_id_valid = B_TRUE;
                        action_set->counters[i].rte_id = conf->id;
                        i++;
                }
@@ -1252,12 +1311,90 @@ sfc_mae_rule_parse_item_port_id(const struct rte_flow_item *item,
                                          "The port ID is too large");
        }
 
-       rc = sfc_mae_switch_port_by_ethdev(ctx_mae->sa->mae.switch_domain_id,
-                                          spec->id, &mport_sel);
+       rc = sfc_mae_switch_get_ethdev_mport(ctx_mae->sa->mae.switch_domain_id,
+                                            spec->id, &mport_sel);
        if (rc != 0) {
                return rte_flow_error_set(error, rc,
                                RTE_FLOW_ERROR_TYPE_ITEM, item,
-                               "Can't find RTE ethdev by the port ID");
+                               "Can't get m-port for the given ethdev");
+       }
+
+       rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec,
+                                         &mport_sel, NULL);
+       if (rc != 0) {
+               return rte_flow_error_set(error, rc,
+                               RTE_FLOW_ERROR_TYPE_ITEM, item,
+                               "Failed to set MPORT for the port ID");
+       }
+
+       ctx_mae->match_mport_set = B_TRUE;
+
+       return 0;
+}
+
+static int
+sfc_mae_rule_parse_item_ethdev_based(const struct rte_flow_item *item,
+                                    struct sfc_flow_parse_ctx *ctx,
+                                    struct rte_flow_error *error)
+{
+       struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
+       const struct rte_flow_item_ethdev supp_mask = {
+               .port_id = 0xffff,
+       };
+       const void *def_mask = &rte_flow_item_ethdev_mask;
+       const struct rte_flow_item_ethdev *spec = NULL;
+       const struct rte_flow_item_ethdev *mask = NULL;
+       efx_mport_sel_t mport_sel;
+       int rc;
+
+       if (ctx_mae->match_mport_set) {
+               return rte_flow_error_set(error, ENOTSUP,
+                               RTE_FLOW_ERROR_TYPE_ITEM, item,
+                               "Can't handle multiple traffic source items");
+       }
+
+       rc = sfc_flow_parse_init(item,
+                                (const void **)&spec, (const void **)&mask,
+                                (const void *)&supp_mask, def_mask,
+                                sizeof(struct rte_flow_item_ethdev), error);
+       if (rc != 0)
+               return rc;
+
+       if (mask->port_id != supp_mask.port_id) {
+               return rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ITEM, item,
+                               "Bad mask in the ethdev-based pattern item");
+       }
+
+       /* If "spec" is not set, could be any port ID */
+       if (spec == NULL)
+               return 0;
+
+       switch (item->type) {
+       case RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR:
+               rc = sfc_mae_switch_get_ethdev_mport(
+                               ctx_mae->sa->mae.switch_domain_id,
+                               spec->port_id, &mport_sel);
+               if (rc != 0) {
+                       return rte_flow_error_set(error, rc,
+                                       RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                       "Can't get m-port for the given ethdev");
+               }
+               break;
+       case RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT:
+               rc = sfc_mae_switch_get_entity_mport(
+                               ctx_mae->sa->mae.switch_domain_id,
+                               spec->port_id, &mport_sel);
+               if (rc != 0) {
+                       return rte_flow_error_set(error, rc,
+                                       RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                       "Can't get m-port for the given ethdev");
+               }
+               break;
+       default:
+               return rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ITEM, item,
+                               "Unsupported ethdev-based flow item");
        }
 
        rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec,
@@ -1525,6 +1662,7 @@ sfc_mae_rule_parse_item_eth(const struct rte_flow_item *item,
                            struct rte_flow_error *error)
 {
        struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
+       struct rte_flow_item_eth override_mask;
        struct rte_flow_item_eth supp_mask;
        const uint8_t *spec = NULL;
        const uint8_t *mask = NULL;
@@ -1542,6 +1680,22 @@ sfc_mae_rule_parse_item_eth(const struct rte_flow_item *item,
        if (rc != 0)
                return rc;
 
+       if (ctx_mae->ft_rule_type == SFC_FT_RULE_JUMP && mask != NULL) {
+               /*
+                * The HW/FW hasn't got support for match on MAC addresses in
+                * outer rules yet (this will change). Match on VLAN presence
+                * isn't supported either. Ignore these match criteria.
+                */
+               memcpy(&override_mask, mask, sizeof(override_mask));
+               memset(&override_mask.hdr.dst_addr, 0,
+                      sizeof(override_mask.hdr.dst_addr));
+               memset(&override_mask.hdr.src_addr, 0,
+                      sizeof(override_mask.hdr.src_addr));
+               override_mask.has_vlan = 0;
+
+               mask = (const uint8_t *)&override_mask;
+       }
+
        if (spec != NULL) {
                struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
                struct sfc_mae_ethertype *ethertypes = pdata->ethertypes;
@@ -2093,14 +2247,21 @@ sfc_mae_rule_parse_item_tunnel(const struct rte_flow_item *item,
        const uint8_t *mask = NULL;
        int rc;
 
-       /*
-        * We're about to start processing inner frame items.
-        * Process pattern data that has been deferred so far
-        * and reset pattern data storage.
-        */
-       rc = sfc_mae_rule_process_pattern_data(ctx_mae, error);
-       if (rc != 0)
-               return rc;
+       if (ctx_mae->ft_rule_type == SFC_FT_RULE_GROUP) {
+               /*
+                * As a workaround, pattern processing has started from
+                * this (tunnel) item. No pattern data to process yet.
+                */
+       } else {
+               /*
+                * We're about to start processing inner frame items.
+                * Process pattern data that has been deferred so far
+                * and reset pattern data storage.
+                */
+               rc = sfc_mae_rule_process_pattern_data(ctx_mae, error);
+               if (rc != 0)
+                       return rc;
+       }
 
        memset(&ctx_mae->pattern_data, 0, sizeof(ctx_mae->pattern_data));
 
@@ -2176,6 +2337,30 @@ static const struct sfc_flow_item sfc_flow_items[] = {
                .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
                .parse = sfc_mae_rule_parse_item_port_id,
        },
+       {
+               .type = RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR,
+               .name = "PORT_REPRESENTOR",
+               /*
+                * In terms of RTE flow, this item is a META one,
+                * and its position in the pattern is don't care.
+                */
+               .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
+               .layer = SFC_FLOW_ITEM_ANY_LAYER,
+               .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
+               .parse = sfc_mae_rule_parse_item_ethdev_based,
+       },
+       {
+               .type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,
+               .name = "REPRESENTED_PORT",
+               /*
+                * In terms of RTE flow, this item is a META one,
+                * and its position in the pattern is don't care.
+                */
+               .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
+               .layer = SFC_FLOW_ITEM_ANY_LAYER,
+               .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
+               .parse = sfc_mae_rule_parse_item_ethdev_based,
+       },
        {
                .type = RTE_FLOW_ITEM_TYPE_PHY_PORT,
                .name = "PHY_PORT",
@@ -2419,10 +2604,10 @@ sfc_mae_rule_preparse_item_mark(const struct rte_flow_item_mark *spec,
 
 static int
 sfc_mae_rule_encap_parse_init(struct sfc_adapter *sa,
-                             const struct rte_flow_item pattern[],
                              struct sfc_mae_parse_ctx *ctx,
                              struct rte_flow_error *error)
 {
+       const struct rte_flow_item *pattern = ctx->pattern;
        struct sfc_mae *mae = &sa->mae;
        uint8_t recirc_id = 0;
        int rc;
@@ -2497,6 +2682,13 @@ sfc_mae_rule_encap_parse_init(struct sfc_adapter *sa,
                                                  RTE_FLOW_ERROR_TYPE_ITEM,
                                                  pattern, "tunnel offload: GROUP: tunnel type mismatch");
                }
+
+               /*
+                * The HW/FW hasn't got support for the use of "ENC" fields in
+                * action rules (except the VNET_ID one) yet. As a workaround,
+                * start parsing the pattern from the tunnel item.
+                */
+               ctx->pattern = pattern;
                break;
        default:
                SFC_ASSERT(B_FALSE);
@@ -2580,6 +2772,7 @@ sfc_mae_rule_parse_pattern(struct sfc_adapter *sa,
                           struct rte_flow_error *error)
 {
        struct sfc_mae_parse_ctx ctx_mae;
+       unsigned int priority_shift = 0;
        struct sfc_flow_parse_ctx ctx;
        int rc;
 
@@ -2591,13 +2784,32 @@ sfc_mae_rule_parse_pattern(struct sfc_adapter *sa,
 
        switch (ctx_mae.ft_rule_type) {
        case SFC_FT_RULE_JUMP:
-               /* No action rule */
-               break;
+               /*
+                * By design, this flow should be represented solely by the
+                * outer rule. But the HW/FW hasn't got support for setting
+                * Rx mark from RECIRC_ID on outer rule lookup yet. Neither
+                * does it support outer rule counters. As a workaround, an
+                * action rule of lower priority is used to do the job.
+                */
+               priority_shift = 1;
+
+               /* FALLTHROUGH */
        case SFC_FT_RULE_GROUP:
+               if (ctx_mae.priority != 0) {
+                       /*
+                        * Because of the above workaround, deny the
+                        * use of priorities to JUMP and GROUP rules.
+                        */
+                       rc = rte_flow_error_set(error, ENOTSUP,
+                               RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, NULL,
+                               "tunnel offload: priorities are not supported");
+                       goto fail_priority_check;
+               }
+
                /* FALLTHROUGH */
        case SFC_FT_RULE_NONE:
                rc = efx_mae_match_spec_init(sa->nic, EFX_MAE_RULE_ACTION,
-                                            spec->priority,
+                                            spec->priority + priority_shift,
                                             &ctx_mae.match_spec_action);
                if (rc != 0) {
                        rc = rte_flow_error_set(error, rc,
@@ -2621,11 +2833,12 @@ sfc_mae_rule_parse_pattern(struct sfc_adapter *sa,
        ctx_mae.encap_type = EFX_TUNNEL_PROTOCOL_NONE;
        ctx_mae.match_spec = ctx_mae.match_spec_action;
        ctx_mae.field_ids_remap = field_ids_no_remap;
+       ctx_mae.pattern = pattern;
 
        ctx.type = SFC_FLOW_PARSE_CTX_MAE;
        ctx.mae = &ctx_mae;
 
-       rc = sfc_mae_rule_encap_parse_init(sa, pattern, &ctx_mae, error);
+       rc = sfc_mae_rule_encap_parse_init(sa, &ctx_mae, error);
        if (rc != 0)
                goto fail_encap_parse_init;
 
@@ -2637,7 +2850,7 @@ sfc_mae_rule_parse_pattern(struct sfc_adapter *sa,
        spec->ft = ctx_mae.ft;
 
        rc = sfc_flow_parse_pattern(sa, sfc_flow_items, RTE_DIM(sfc_flow_items),
-                                   pattern, &ctx, error);
+                                   ctx_mae.pattern, &ctx, error);
        if (rc != 0)
                goto fail_parse_pattern;
 
@@ -2672,6 +2885,7 @@ fail_encap_parse_init:
                efx_mae_match_spec_fini(sa->nic, ctx_mae.match_spec_action);
 
 fail_init_match_spec_action:
+fail_priority_check:
        return rc;
 }
 
@@ -3121,11 +3335,14 @@ sfc_mae_rule_parse_action_vxlan_encap(
 static int
 sfc_mae_rule_parse_action_mark(struct sfc_adapter *sa,
                               const struct rte_flow_action_mark *conf,
+                              const struct sfc_flow_spec_mae *spec_mae,
                               efx_mae_actions_t *spec)
 {
        int rc;
 
-       if (conf->id > SFC_FT_USER_MARK_MASK) {
+       if (spec_mae->ft_rule_type == SFC_FT_RULE_JUMP) {
+               /* Workaround. See sfc_flow_parse_rte_to_mae() */
+       } else if (conf->id > SFC_FT_USER_MARK_MASK) {
                sfc_err(sa, "the mark value is too large");
                return EINVAL;
        }
@@ -3254,10 +3471,10 @@ sfc_mae_rule_parse_action_port_id(struct sfc_adapter *sa,
 
        port_id = (conf->original != 0) ? sas->port_id : conf->id;
 
-       rc = sfc_mae_switch_port_by_ethdev(mae->switch_domain_id,
-                                          port_id, &mport);
+       rc = sfc_mae_switch_get_ethdev_mport(mae->switch_domain_id,
+                                            port_id, &mport);
        if (rc != 0) {
-               sfc_err(sa, "failed to find MAE switch port SW entry for RTE ethdev port %u: %s",
+               sfc_err(sa, "failed to get m-port for the given ethdev (port_id=%u): %s",
                        port_id, strerror(rc));
                return rc;
        }
@@ -3271,6 +3488,58 @@ sfc_mae_rule_parse_action_port_id(struct sfc_adapter *sa,
        return rc;
 }
 
+static int
+sfc_mae_rule_parse_action_port_representor(struct sfc_adapter *sa,
+               const struct rte_flow_action_ethdev *conf,
+               efx_mae_actions_t *spec)
+{
+       struct sfc_mae *mae = &sa->mae;
+       efx_mport_sel_t mport;
+       int rc;
+
+       rc = sfc_mae_switch_get_ethdev_mport(mae->switch_domain_id,
+                                            conf->port_id, &mport);
+       if (rc != 0) {
+               sfc_err(sa, "failed to get m-port for the given ethdev (port_id=%u): %s",
+                       conf->port_id, strerror(rc));
+               return rc;
+       }
+
+       rc = efx_mae_action_set_populate_deliver(spec, &mport);
+       if (rc != 0) {
+               sfc_err(sa, "failed to request action DELIVER with m-port selector 0x%08x: %s",
+                       mport.sel, strerror(rc));
+       }
+
+       return rc;
+}
+
+static int
+sfc_mae_rule_parse_action_represented_port(struct sfc_adapter *sa,
+               const struct rte_flow_action_ethdev *conf,
+               efx_mae_actions_t *spec)
+{
+       struct sfc_mae *mae = &sa->mae;
+       efx_mport_sel_t mport;
+       int rc;
+
+       rc = sfc_mae_switch_get_entity_mport(mae->switch_domain_id,
+                                            conf->port_id, &mport);
+       if (rc != 0) {
+               sfc_err(sa, "failed to get m-port for the given ethdev (port_id=%u): %s",
+                       conf->port_id, strerror(rc));
+               return rc;
+       }
+
+       rc = efx_mae_action_set_populate_deliver(spec, &mport);
+       if (rc != 0) {
+               sfc_err(sa, "failed to request action DELIVER with m-port selector 0x%08x: %s",
+                       mport.sel, strerror(rc));
+       }
+
+       return rc;
+}
+
 static const char * const action_names[] = {
        [RTE_FLOW_ACTION_TYPE_VXLAN_DECAP] = "VXLAN_DECAP",
        [RTE_FLOW_ACTION_TYPE_OF_POP_VLAN] = "OF_POP_VLAN",
@@ -3278,23 +3547,28 @@ static const char * const action_names[] = {
        [RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID] = "OF_SET_VLAN_VID",
        [RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP] = "OF_SET_VLAN_PCP",
        [RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP] = "VXLAN_ENCAP",
+       [RTE_FLOW_ACTION_TYPE_COUNT] = "COUNT",
        [RTE_FLOW_ACTION_TYPE_FLAG] = "FLAG",
        [RTE_FLOW_ACTION_TYPE_MARK] = "MARK",
        [RTE_FLOW_ACTION_TYPE_PHY_PORT] = "PHY_PORT",
        [RTE_FLOW_ACTION_TYPE_PF] = "PF",
        [RTE_FLOW_ACTION_TYPE_VF] = "VF",
        [RTE_FLOW_ACTION_TYPE_PORT_ID] = "PORT_ID",
+       [RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR] = "PORT_REPRESENTOR",
+       [RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT] = "REPRESENTED_PORT",
        [RTE_FLOW_ACTION_TYPE_DROP] = "DROP",
+       [RTE_FLOW_ACTION_TYPE_JUMP] = "JUMP",
 };
 
 static int
 sfc_mae_rule_parse_action(struct sfc_adapter *sa,
                          const struct rte_flow_action *action,
-                         const struct sfc_mae_outer_rule *outer_rule,
+                         const struct sfc_flow_spec_mae *spec_mae,
                          struct sfc_mae_actions_bundle *bundle,
                          efx_mae_actions_t *spec,
                          struct rte_flow_error *error)
 {
+       const struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule;
        const uint64_t rx_metadata = sa->negotiated_rx_metadata;
        bool custom_error = B_FALSE;
        int rc = 0;
@@ -3314,6 +3588,14 @@ sfc_mae_rule_parse_action(struct sfc_adapter *sa,
                                       bundle->actions_mask);
                rc = efx_mae_action_set_populate_vlan_pop(spec);
                break;
+       case RTE_FLOW_ACTION_TYPE_OF_DEC_NW_TTL:
+       case RTE_FLOW_ACTION_TYPE_DEC_TTL:
+               SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_DEC_NW_TTL,
+                                      bundle->actions_mask);
+               SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DEC_TTL,
+                                      bundle->actions_mask);
+               rc = efx_mae_action_set_populate_decr_ip_ttl(spec);
+               break;
        case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
                SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN,
                                       bundle->actions_mask);
@@ -3358,9 +3640,10 @@ sfc_mae_rule_parse_action(struct sfc_adapter *sa,
        case RTE_FLOW_ACTION_TYPE_MARK:
                SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_MARK,
                                       bundle->actions_mask);
-               if ((rx_metadata & RTE_ETH_RX_METADATA_USER_MARK) != 0) {
+               if ((rx_metadata & RTE_ETH_RX_METADATA_USER_MARK) != 0 ||
+                   spec_mae->ft_rule_type == SFC_FT_RULE_JUMP) {
                        rc = sfc_mae_rule_parse_action_mark(sa, action->conf,
-                                                           spec);
+                                                           spec_mae, spec);
                } else {
                        rc = rte_flow_error_set(error, ENOTSUP,
                                                RTE_FLOW_ERROR_TYPE_ACTION,
@@ -3389,11 +3672,29 @@ sfc_mae_rule_parse_action(struct sfc_adapter *sa,
                                       bundle->actions_mask);
                rc = sfc_mae_rule_parse_action_port_id(sa, action->conf, spec);
                break;
+       case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
+               SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR,
+                                      bundle->actions_mask);
+               rc = sfc_mae_rule_parse_action_port_representor(sa,
+                               action->conf, spec);
+               break;
+       case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
+               SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT,
+                                      bundle->actions_mask);
+               rc = sfc_mae_rule_parse_action_represented_port(sa,
+                               action->conf, spec);
+               break;
        case RTE_FLOW_ACTION_TYPE_DROP:
                SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DROP,
                                       bundle->actions_mask);
                rc = efx_mae_action_set_populate_drop(spec);
                break;
+       case RTE_FLOW_ACTION_TYPE_JUMP:
+               if (spec_mae->ft_rule_type == SFC_FT_RULE_JUMP) {
+                       /* Workaround. See sfc_flow_parse_rte_to_mae() */
+                       break;
+               }
+               /* FALLTHROUGH */
        default:
                return rte_flow_error_set(error, ENOTSUP,
                                RTE_FLOW_ERROR_TYPE_ACTION, NULL,
@@ -3449,10 +3750,12 @@ sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
 {
        struct sfc_mae_encap_header *encap_header = NULL;
        struct sfc_mae_actions_bundle bundle = {0};
+       struct sfc_flow_tunnel *counter_ft = NULL;
+       uint64_t *ft_group_hit_counter = NULL;
        const struct rte_flow_action *action;
        struct sfc_mae *mae = &sa->mae;
+       unsigned int n_count = 0;
        efx_mae_actions_t *spec;
-       unsigned int n_count;
        int rc;
 
        rte_errno = 0;
@@ -3467,11 +3770,31 @@ sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
        if (rc != 0)
                goto fail_action_set_spec_init;
 
+       for (action = actions;
+            action->type != RTE_FLOW_ACTION_TYPE_END; ++action) {
+               if (action->type == RTE_FLOW_ACTION_TYPE_COUNT)
+                       ++n_count;
+       }
+
        if (spec_mae->ft_rule_type == SFC_FT_RULE_GROUP) {
                /* JUMP rules don't decapsulate packets. GROUP rules do. */
                rc = efx_mae_action_set_populate_decap(spec);
                if (rc != 0)
                        goto fail_enforce_ft_decap;
+
+               if (n_count == 0 && sfc_mae_counter_stream_enabled(sa)) {
+                       /*
+                        * The user opted not to use action COUNT in this rule,
+                        * but the counter should be enabled implicitly because
+                        * packets hitting this rule contribute to the tunnel's
+                        * total number of hits. See sfc_mae_counter_get().
+                        */
+                       rc = efx_mae_action_set_populate_count(spec);
+                       if (rc != 0)
+                               goto fail_enforce_ft_count;
+
+                       n_count = 1;
+               }
        }
 
        /* Cleanup after previous encap. header bounce buffer usage. */
@@ -3483,7 +3806,7 @@ sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
                if (rc != 0)
                        goto fail_rule_parse_action;
 
-               rc = sfc_mae_rule_parse_action(sa, action, spec_mae->outer_rule,
+               rc = sfc_mae_rule_parse_action(sa, action, spec_mae,
                                               &bundle, spec, error);
                if (rc != 0)
                        goto fail_rule_parse_action;
@@ -3497,7 +3820,6 @@ sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
        if (rc != 0)
                goto fail_process_encap_header;
 
-       n_count = efx_mae_action_set_get_nb_count(spec);
        if (n_count > 1) {
                rc = ENOTSUP;
                sfc_err(sa, "too many count actions requested: %u", n_count);
@@ -3507,6 +3829,14 @@ sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
        switch (spec_mae->ft_rule_type) {
        case SFC_FT_RULE_NONE:
                break;
+       case SFC_FT_RULE_JUMP:
+               /* Workaround. See sfc_flow_parse_rte_to_mae() */
+               rc = sfc_mae_rule_parse_action_pf_vf(sa, NULL, spec);
+               if (rc != 0)
+                       goto fail_workaround_jump_delivery;
+
+               counter_ft = spec_mae->ft;
+               break;
        case SFC_FT_RULE_GROUP:
                /*
                 * Packets that go to the rule's AR have FT mark set (from the
@@ -3515,6 +3845,8 @@ sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
                 * MARK above, so don't check the return value here.
                 */
                (void)efx_mae_action_set_populate_mark(spec, 0);
+
+               ft_group_hit_counter = &spec_mae->ft->group_hit_counter;
                break;
        default:
                SFC_ASSERT(B_FALSE);
@@ -3528,7 +3860,8 @@ sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
                return 0;
        }
 
-       rc = sfc_mae_action_set_add(sa, actions, spec, encap_header, n_count,
+       rc = sfc_mae_action_set_add(sa, actions, spec, encap_header,
+                                   ft_group_hit_counter, counter_ft, n_count,
                                    &spec_mae->action_set);
        if (rc != 0)
                goto fail_action_set_add;
@@ -3536,6 +3869,7 @@ sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
        return 0;
 
 fail_action_set_add:
+fail_workaround_jump_delivery:
 fail_nb_count:
        sfc_mae_encap_header_del(sa, encap_header);
 
@@ -3543,6 +3877,7 @@ fail_process_encap_header:
 fail_rule_parse_action:
        efx_mae_action_set_spec_fini(sa->nic, spec);
 
+fail_enforce_ft_count:
 fail_enforce_ft_decap:
 fail_action_set_spec_init:
        if (rc > 0 && rte_errno == 0) {
@@ -3690,6 +4025,11 @@ sfc_mae_flow_insert(struct sfc_adapter *sa,
                        goto fail_outer_rule_enable;
        }
 
+       if (spec_mae->ft_rule_type == SFC_FT_RULE_JUMP) {
+               spec_mae->ft->reset_jump_hit_counter =
+                       spec_mae->ft->group_hit_counter;
+       }
+
        if (action_set == NULL) {
                sfc_dbg(sa, "enabled flow=%p (no AR)", flow);
                return 0;
@@ -3789,7 +4129,8 @@ sfc_mae_query_counter(struct sfc_adapter *sa,
        for (i = 0; i < action_set->n_counters; i++) {
                /*
                 * Get the first available counter of the flow rule if
-                * counter ID is not specified.
+                * counter ID is not specified, provided that this
+                * counter is not an automatic (implicit) one.
                 */
                if (conf != NULL && action_set->counters[i].rte_id != conf->id)
                        continue;
@@ -3807,7 +4148,7 @@ sfc_mae_query_counter(struct sfc_adapter *sa,
 
        return rte_flow_error_set(error, ENOENT,
                                  RTE_FLOW_ERROR_TYPE_ACTION, action,
-                                 "No such flow rule action count ID");
+                                 "no such flow rule action or such count ID");
 }
 
 int
@@ -3848,9 +4189,9 @@ sfc_mae_switchdev_init(struct sfc_adapter *sa)
                return 0;
        }
 
-       if (mae->status != SFC_MAE_STATUS_SUPPORTED) {
+       if (mae->status != SFC_MAE_STATUS_ADMIN) {
                rc = ENOTSUP;
-               sfc_err(sa, "failed to init switchdev - no MAE support");
+               sfc_err(sa, "failed to init switchdev - no admin MAE privilege");
                goto fail_no_mae;
        }