net/hns3: fix return value for unsupported tuple
[dpdk.git] / drivers / net / enic / enic_fm_flow.c
index ee671e1..c87d3af 100644 (file)
@@ -5,9 +5,12 @@
 #include <errno.h>
 #include <stdint.h>
 #include <rte_log.h>
-#include <rte_ethdev_driver.h>
+#include <ethdev_driver.h>
 #include <rte_flow_driver.h>
 #include <rte_ether.h>
+#include <rte_geneve.h>
+#include <rte_hash.h>
+#include <rte_jhash.h>
 #include <rte_ip.h>
 #include <rte_udp.h>
 #include <rte_memzone.h>
@@ -20,9 +23,6 @@
 #define IP_DEFTTL  64   /* from RFC 1340. */
 #define IP6_VTC_FLOW 0x60000000
 
-/* Highest Item type supported by Flowman */
-#define FM_MAX_ITEM_TYPE RTE_FLOW_ITEM_TYPE_VXLAN
-
 /* Up to 1024 TCAM entries */
 #define FM_MAX_TCAM_TABLE_SIZE 1024
 
@@ -43,6 +43,9 @@
 /* Tag used for implicit VF <-> representor flows */
 #define FM_VF_REP_TAG 1
 
+/* Max number of actions supported by VIC is 2K. Make hash table double that. */
+#define FM_MAX_ACTION_TABLE_SIZE 4096
+
 /*
  * Flow exact match tables (FET) in the VIC and rte_flow groups.
  * Use a simple scheme to map groups to tables.
@@ -90,11 +93,17 @@ struct enic_fm_counter {
        uint32_t handle;
 };
 
+struct enic_fm_action {
+       int ref;
+       uint64_t handle;
+       struct fm_action key;
+};
+
 /* rte_flow.fm */
 struct enic_fm_flow {
        bool counter_valid;
        uint64_t entry_handle;
-       uint64_t action_handle;
+       struct enic_fm_action  *action;
        struct enic_fm_counter *counter;
        struct enic_fm_fet *fet;
        /* Auto-added steer action for hairpin flows (e.g. vnic->vnic) */
@@ -155,6 +164,8 @@ struct enic_flowman {
         */
        struct enic_fm_fet *default_eg_fet;
        struct enic_fm_fet *default_ig_fet;
+       /* hash table for Action reuse */
+       struct rte_hash *action_hash;
        /* Flows that jump to the default table above */
        TAILQ_HEAD(jump_flow_list, enic_fm_jump_flow) jump_list;
        /*
@@ -195,6 +206,7 @@ struct copy_item_args {
        const struct rte_flow_item *item;
        struct fm_tcam_match_entry *fm_tcam_entry;
        uint8_t header_level;
+       struct rte_flow_error *error;
 };
 
 /* functions for copying items into flowman match */
@@ -222,6 +234,10 @@ static enic_copy_item_fn enic_fm_copy_item_tcp;
 static enic_copy_item_fn enic_fm_copy_item_udp;
 static enic_copy_item_fn enic_fm_copy_item_vlan;
 static enic_copy_item_fn enic_fm_copy_item_vxlan;
+static enic_copy_item_fn enic_fm_copy_item_gtp;
+static enic_copy_item_fn enic_fm_copy_item_geneve;
+static enic_copy_item_fn enic_fm_copy_item_geneve_opt;
+static enic_copy_item_fn enic_fm_copy_item_ecpri;
 
 /* Ingress actions */
 static const enum rte_flow_action_type enic_fm_supported_ig_actions[] = {
@@ -333,6 +349,59 @@ static const struct enic_fm_items enic_fm_items[] = {
                               RTE_FLOW_ITEM_TYPE_END,
                },
        },
+       [RTE_FLOW_ITEM_TYPE_GTP] = {
+               .copy_item = enic_fm_copy_item_gtp,
+               .valid_start_item = 0,
+               .prev_items = (const enum rte_flow_item_type[]) {
+                              RTE_FLOW_ITEM_TYPE_UDP,
+                              RTE_FLOW_ITEM_TYPE_END,
+               },
+       },
+       [RTE_FLOW_ITEM_TYPE_GTPC] = {
+               .copy_item = enic_fm_copy_item_gtp,
+               .valid_start_item = 1,
+               .prev_items = (const enum rte_flow_item_type[]) {
+                              RTE_FLOW_ITEM_TYPE_UDP,
+                              RTE_FLOW_ITEM_TYPE_END,
+               },
+       },
+       [RTE_FLOW_ITEM_TYPE_GTPU] = {
+               .copy_item = enic_fm_copy_item_gtp,
+               .valid_start_item = 1,
+               .prev_items = (const enum rte_flow_item_type[]) {
+                              RTE_FLOW_ITEM_TYPE_UDP,
+                              RTE_FLOW_ITEM_TYPE_END,
+               },
+       },
+       [RTE_FLOW_ITEM_TYPE_GENEVE] = {
+               .copy_item = enic_fm_copy_item_geneve,
+               .valid_start_item = 1,
+               .prev_items = (const enum rte_flow_item_type[]) {
+                              RTE_FLOW_ITEM_TYPE_ETH,
+                              RTE_FLOW_ITEM_TYPE_IPV4,
+                              RTE_FLOW_ITEM_TYPE_IPV6,
+                              RTE_FLOW_ITEM_TYPE_UDP,
+                              RTE_FLOW_ITEM_TYPE_END,
+               },
+       },
+       [RTE_FLOW_ITEM_TYPE_GENEVE_OPT] = {
+               .copy_item = enic_fm_copy_item_geneve_opt,
+               .valid_start_item = 1,
+               /* Can match at most 1 option */
+               .prev_items = (const enum rte_flow_item_type[]) {
+                              RTE_FLOW_ITEM_TYPE_GENEVE,
+                              RTE_FLOW_ITEM_TYPE_END,
+               },
+       },
+       [RTE_FLOW_ITEM_TYPE_ECPRI] = {
+               .copy_item = enic_fm_copy_item_ecpri,
+               .valid_start_item = 1,
+               .prev_items = (const enum rte_flow_item_type[]) {
+                              RTE_FLOW_ITEM_TYPE_ETH,
+                              RTE_FLOW_ITEM_TYPE_UDP,
+                              RTE_FLOW_ITEM_TYPE_END,
+               },
+       },
 };
 
 static int
@@ -355,8 +424,8 @@ enic_fm_copy_item_eth(struct copy_item_args *arg)
        fm_mask = &entry->ftm_mask.fk_hdrset[lvl];
        fm_data->fk_header_select |= FKH_ETHER;
        fm_mask->fk_header_select |= FKH_ETHER;
-       memcpy(&fm_data->l2.eth, spec, sizeof(*spec));
-       memcpy(&fm_mask->l2.eth, mask, sizeof(*mask));
+       memcpy(&fm_data->l2.eth, spec, sizeof(struct rte_ether_hdr));
+       memcpy(&fm_mask->l2.eth, mask, sizeof(struct rte_ether_hdr));
        return 0;
 }
 
@@ -392,8 +461,11 @@ enic_fm_copy_item_vlan(struct copy_item_args *arg)
        eth_mask = (void *)&fm_mask->l2.eth;
        eth_val = (void *)&fm_data->l2.eth;
 
-       /* Outer TPID cannot be matched */
-       if (eth_mask->ether_type)
+       /*
+        * Outer TPID cannot be matched. If inner_type is 0, use what is
+        * in the eth header.
+        */
+       if (eth_mask->ether_type && mask->inner_type)
                return -ENOTSUP;
 
        /*
@@ -401,8 +473,10 @@ enic_fm_copy_item_vlan(struct copy_item_args *arg)
         * L2, regardless of vlan stripping settings. So, the inner type
         * from vlan becomes the ether type of the eth header.
         */
-       eth_mask->ether_type = mask->inner_type;
-       eth_val->ether_type = spec->inner_type;
+       if (mask->inner_type) {
+               eth_mask->ether_type = mask->inner_type;
+               eth_val->ether_type = spec->inner_type;
+       }
        fm_data->fk_header_select |= FKH_ETHER | FKH_QTAG;
        fm_mask->fk_header_select |= FKH_ETHER | FKH_QTAG;
        fm_data->fk_vlan = rte_be_to_cpu_16(spec->tci);
@@ -461,8 +535,8 @@ enic_fm_copy_item_ipv6(struct copy_item_args *arg)
 
        fm_data->fk_header_select |= FKH_IPV6;
        fm_mask->fk_header_select |= FKH_IPV6;
-       memcpy(&fm_data->l3.ip6, spec, sizeof(*spec));
-       memcpy(&fm_mask->l3.ip6, mask, sizeof(*mask));
+       memcpy(&fm_data->l3.ip6, spec, sizeof(struct rte_ipv6_hdr));
+       memcpy(&fm_mask->l3.ip6, mask, sizeof(struct rte_ipv6_hdr));
        return 0;
 }
 
@@ -611,6 +685,263 @@ enic_fm_copy_item_vxlan(struct copy_item_args *arg)
        return 0;
 }
 
+static int
+enic_fm_copy_item_gtp(struct copy_item_args *arg)
+{
+       const struct rte_flow_item *item = arg->item;
+       const struct rte_flow_item_gtp *spec = item->spec;
+       const struct rte_flow_item_gtp *mask = item->mask;
+       struct fm_tcam_match_entry *entry = arg->fm_tcam_entry;
+       struct fm_header_set *fm_data, *fm_mask;
+       int off;
+       uint16_t udp_gtp_uc_port_be = 0;
+
+       ENICPMD_FUNC_TRACE();
+       /* Only 2 header levels (outer and inner) allowed */
+       if (arg->header_level > 0)
+               return -EINVAL;
+
+       fm_data = &entry->ftm_data.fk_hdrset[0];
+       fm_mask = &entry->ftm_mask.fk_hdrset[0];
+
+       switch (item->type) {
+       case RTE_FLOW_ITEM_TYPE_GTP:
+       {
+               /* For vanilla GTP, the UDP destination port must be specified
+                * but value of the port is not enforced here.
+                */
+               if (!(fm_data->fk_metadata & FKM_UDP) ||
+                   !(fm_data->fk_header_select & FKH_UDP) ||
+                   fm_data->l4.udp.fk_dest == 0)
+                       return -EINVAL;
+               if (!(fm_mask->fk_metadata & FKM_UDP) ||
+                   !(fm_mask->fk_header_select & FKH_UDP) ||
+                   fm_mask->l4.udp.fk_dest != 0xFFFF)
+                       return -EINVAL;
+               break;
+       }
+       case RTE_FLOW_ITEM_TYPE_GTPC:
+       {
+               udp_gtp_uc_port_be = rte_cpu_to_be_16(RTE_GTPC_UDP_PORT);
+               break;
+       }
+       case RTE_FLOW_ITEM_TYPE_GTPU:
+       {
+               udp_gtp_uc_port_be = rte_cpu_to_be_16(RTE_GTPU_UDP_PORT);
+               break;
+       }
+       default:
+               RTE_ASSERT(0);
+       }
+
+       /* The GTP-C or GTP-U UDP destination port must be matched. */
+       if (udp_gtp_uc_port_be) {
+               if (fm_data->fk_metadata & FKM_UDP &&
+                   fm_data->fk_header_select & FKH_UDP &&
+                   fm_data->l4.udp.fk_dest != udp_gtp_uc_port_be)
+                       return -EINVAL;
+               if (fm_mask->fk_metadata & FKM_UDP &&
+                   fm_mask->fk_header_select & FKH_UDP &&
+                   fm_mask->l4.udp.fk_dest != 0xFFFF)
+                       return -EINVAL;
+
+               /* In any case, add match for GTP-C GTP-U UDP dst port */
+               fm_data->fk_metadata |= FKM_UDP;
+               fm_data->fk_header_select |= FKH_UDP;
+               fm_data->l4.udp.fk_dest = udp_gtp_uc_port_be;
+               fm_mask->fk_metadata |= FKM_UDP;
+               fm_mask->fk_header_select |= FKH_UDP;
+               fm_mask->l4.udp.fk_dest = 0xFFFF;
+       }
+
+       /* NIC does not support GTP tunnels. No Items are allowed after this.
+        * This prevents the specification of further items.
+        */
+       arg->header_level = 0;
+
+       /* Match all if no spec */
+       if (!spec)
+               return 0;
+       if (!mask)
+               mask = &rte_flow_item_gtp_mask;
+
+       /*
+        * Use the raw L4 buffer to match GTP as fm_header_set does not have
+        * GTP header. UDP dst port must be specific. Using the raw buffer
+        * does not affect such UDP item, since we skip UDP in the raw buffer.
+        */
+       fm_data->fk_header_select |= FKH_L4RAW;
+       fm_mask->fk_header_select |= FKH_L4RAW;
+       off = sizeof(fm_data->l4.udp);
+       memcpy(&fm_data->l4.rawdata[off], spec, sizeof(*spec));
+       memcpy(&fm_mask->l4.rawdata[off], mask, sizeof(*mask));
+       return 0;
+}
+
+static int
+enic_fm_copy_item_geneve(struct copy_item_args *arg)
+{
+       const struct rte_flow_item *item = arg->item;
+       const struct rte_flow_item_geneve *spec = item->spec;
+       const struct rte_flow_item_geneve *mask = item->mask;
+       struct fm_tcam_match_entry *entry = arg->fm_tcam_entry;
+       struct fm_header_set *fm_data, *fm_mask;
+       int off;
+
+       ENICPMD_FUNC_TRACE();
+       /* Only 2 header levels (outer and inner) allowed */
+       if (arg->header_level > 0)
+               return -EINVAL;
+
+       fm_data = &entry->ftm_data.fk_hdrset[0];
+       fm_mask = &entry->ftm_mask.fk_hdrset[0];
+       fm_data->fk_metadata |= FKM_GENEVE;
+       fm_mask->fk_metadata |= FKM_GENEVE;
+       /* items from here on out are inner header items, except options */
+       arg->header_level = 1;
+
+       /* Match all if no spec */
+       if (!spec)
+               return 0;
+       if (!mask)
+               mask = &rte_flow_item_geneve_mask;
+
+       /*
+        * Use the raw L4 buffer to match geneve as fm_header_set does
+        * not have geneve header. A UDP item may precede the geneve
+        * item. Using the raw buffer does not affect such UDP item,
+        * since we skip UDP in the raw buffer.
+        */
+       fm_data->fk_header_select |= FKH_L4RAW;
+       fm_mask->fk_header_select |= FKH_L4RAW;
+       off = sizeof(fm_data->l4.udp);
+       memcpy(&fm_data->l4.rawdata[off], spec, sizeof(struct rte_geneve_hdr));
+       memcpy(&fm_mask->l4.rawdata[off], mask, sizeof(struct rte_geneve_hdr));
+       return 0;
+}
+
+static int
+enic_fm_copy_item_geneve_opt(struct copy_item_args *arg)
+{
+       const struct rte_flow_item *item = arg->item;
+       const struct rte_flow_item_geneve_opt *spec = item->spec;
+       const struct rte_flow_item_geneve_opt *mask = item->mask;
+       struct fm_tcam_match_entry *entry = arg->fm_tcam_entry;
+       struct fm_header_set *fm_data, *fm_mask;
+       struct rte_geneve_hdr *geneve;
+       int off, len;
+
+       ENICPMD_FUNC_TRACE();
+       fm_data = &entry->ftm_data.fk_hdrset[0];
+       fm_mask = &entry->ftm_mask.fk_hdrset[0];
+       /* Match all if no spec */
+       if (!spec)
+               return 0;
+       if (!mask)
+               mask = &rte_flow_item_geneve_opt_mask;
+
+       if (spec->option_len > 0 &&
+           (spec->data == NULL || mask->data == NULL)) {
+               return rte_flow_error_set(arg->error, EINVAL,
+                       RTE_FLOW_ERROR_TYPE_ITEM,
+                       NULL, "enic: geneve_opt unexpected null data");
+       }
+       /*
+        * Geneve item must already be in the raw buffer. Append the
+        * option pattern to it. There are two limitations.
+        * (1) Can match only the 1st option, the first one following Geneve
+        * (2) Geneve header must specify option length, as HW does not
+        *     have "has Geneve option" flag.
+        */
+       RTE_ASSERT((fm_data->fk_header_select & FKH_L4RAW) != 0);
+       RTE_ASSERT((fm_mask->fk_header_select & FKH_L4RAW) != 0);
+       off = sizeof(fm_data->l4.udp);
+       geneve = (struct rte_geneve_hdr *)&fm_data->l4.rawdata[off];
+       if (geneve->opt_len == 0) {
+               return rte_flow_error_set(arg->error, EINVAL,
+                       RTE_FLOW_ERROR_TYPE_ITEM,
+                       NULL, "enic: geneve_opt requires non-zero geneve option length");
+       }
+       geneve = (struct rte_geneve_hdr *)&fm_mask->l4.rawdata[off];
+       if (geneve->opt_len == 0) {
+               return rte_flow_error_set(arg->error, EINVAL,
+                       RTE_FLOW_ERROR_TYPE_ITEM,
+                       NULL, "enic: geneve_opt requires non-zero geneve option length mask");
+       }
+       off = sizeof(fm_data->l4.udp) + sizeof(struct rte_geneve_hdr);
+       if (off + (spec->option_len + 1) * 4 > FM_LAYER_SIZE) {
+               return rte_flow_error_set(arg->error, EINVAL,
+                       RTE_FLOW_ERROR_TYPE_ITEM,
+                       NULL, "enic: geneve_opt too large");
+       }
+       /* Copy option header */
+       memcpy(&fm_data->l4.rawdata[off], spec, 4);
+       memcpy(&fm_mask->l4.rawdata[off], mask, 4);
+       /* Copy option data */
+       if (spec->option_len > 0) {
+               off += 4;
+               len = spec->option_len * 4;
+               memcpy(&fm_data->l4.rawdata[off], spec->data, len);
+               memcpy(&fm_mask->l4.rawdata[off], mask->data, len);
+       }
+       return 0;
+}
+
+/* Match eCPRI combined message header */
+static int
+enic_fm_copy_item_ecpri(struct copy_item_args *arg)
+{
+       const struct rte_flow_item *item = arg->item;
+       const struct rte_flow_item_ecpri *spec = item->spec;
+       const struct rte_flow_item_ecpri *mask = item->mask;
+       struct fm_tcam_match_entry *entry = arg->fm_tcam_entry;
+       struct fm_header_set *fm_data, *fm_mask;
+       uint8_t *fm_data_to, *fm_mask_to;
+
+       ENICPMD_FUNC_TRACE();
+
+       /* Tunneling not supported- only matching on inner eCPRI fields. */
+       if (arg->header_level > 0)
+               return -EINVAL;
+
+       /* Need both spec and mask */
+       if (!spec || !mask)
+               return -EINVAL;
+
+       fm_data = &entry->ftm_data.fk_hdrset[0];
+       fm_mask = &entry->ftm_mask.fk_hdrset[0];
+
+       /* eCPRI can only follow L2/VLAN layer if ethernet type is 0xAEFE. */
+       if (!(fm_data->fk_metadata & FKM_UDP) &&
+           (fm_mask->l2.eth.fk_ethtype != UINT16_MAX ||
+           rte_cpu_to_be_16(fm_data->l2.eth.fk_ethtype) !=
+           RTE_ETHER_TYPE_ECPRI))
+               return -EINVAL;
+
+       if (fm_data->fk_metadata & FKM_UDP) {
+               /* eCPRI on UDP */
+               fm_data->fk_header_select |= FKH_L4RAW;
+               fm_mask->fk_header_select |= FKH_L4RAW;
+               fm_data_to = &fm_data->l4.rawdata[sizeof(fm_data->l4.udp)];
+               fm_mask_to = &fm_mask->l4.rawdata[sizeof(fm_data->l4.udp)];
+       } else {
+               /* eCPRI directly after Etherent header */
+               fm_data->fk_header_select |= FKH_L3RAW;
+               fm_mask->fk_header_select |= FKH_L3RAW;
+               fm_data_to = &fm_data->l3.rawdata[0];
+               fm_mask_to = &fm_mask->l3.rawdata[0];
+       }
+
+       /*
+        * Use the raw L3 or L4 buffer to match eCPRI since fm_header_set does
+        * not have eCPRI header. Only 1st message header of PDU can be matched.
+        * "C" * bit ignored.
+        */
+       memcpy(fm_data_to, spec, sizeof(*spec));
+       memcpy(fm_mask_to, mask, sizeof(*mask));
+       return 0;
+}
+
 /*
  * Currently, raw pattern match is very limited. It is intended for matching
  * UDP tunnel header (e.g. vxlan or geneve).
@@ -845,22 +1176,38 @@ enic_fm_copy_entry(struct enic_flowman *fm,
 
                item_info = &enic_fm_items[item->type];
 
-               if (item->type > FM_MAX_ITEM_TYPE ||
+               if (item->type >= RTE_DIM(enic_fm_items) ||
                    item_info->copy_item == NULL) {
                        return rte_flow_error_set(error, ENOTSUP,
                                RTE_FLOW_ERROR_TYPE_ITEM,
                                NULL, "enic: unsupported item");
                }
-
+               /*
+                * Check vNIC feature dependencies. Geneve item needs
+                * Geneve offload feature
+                */
+               if (item->type == RTE_FLOW_ITEM_TYPE_GENEVE &&
+                   !fm->user_enic->geneve) {
+                       return rte_flow_error_set(error, ENOTSUP,
+                               RTE_FLOW_ERROR_TYPE_ITEM,
+                               NULL, "enic: geneve not supported");
+               }
                /* check to see if item stacking is valid */
                if (!fm_item_stacking_valid(prev_item, item_info,
                                            is_first_item))
                        goto stacking_error;
 
                args.item = item;
+               args.error = error;
+               if (error)
+                       error->type = RTE_FLOW_ERROR_TYPE_NONE;
                ret = item_info->copy_item(&args);
-               if (ret)
+               if (ret) {
+                       /* If copy_item set the error, return that */
+                       if (error && error->type != RTE_FLOW_ERROR_TYPE_NONE)
+                               return ret;
                        goto item_not_supported;
+               }
                /* Going from outer to inner? Treat it as a new packet start */
                if (prev_header_level != args.header_level) {
                        prev_item = RTE_FLOW_ITEM_TYPE_END;
@@ -918,6 +1265,20 @@ enic_fm_append_action_op(struct enic_flowman *fm,
        return 0;
 }
 
+static struct fm_action_op *
+find_prev_action_op(struct enic_flowman *fm, uint32_t opcode)
+{
+       struct fm_action_op *op;
+       int i;
+
+       for (i = 0; i < fm->action_op_count; i++) {
+               op = &fm->action.fma_action_ops[i];
+               if (op->fa_op == opcode)
+                       return op;
+       }
+       return NULL;
+}
+
 /* NIC requires that 1st steer appear before decap.
  * Correct example: steer, decap, steer, steer, ...
  */
@@ -933,7 +1294,8 @@ enic_fm_reorder_action_op(struct enic_flowman *fm)
        steer = NULL;
        decap = NULL;
        while (op->fa_op != FMOP_END) {
-               if (!decap && op->fa_op == FMOP_DECAP_NOSTRIP)
+               if (!decap && (op->fa_op == FMOP_DECAP_NOSTRIP ||
+                              op->fa_op == FMOP_DECAP_STRIP))
                        decap = op;
                else if (!steer && op->fa_op == FMOP_RQ_STEER)
                        steer = op;
@@ -973,6 +1335,17 @@ enic_fm_copy_vxlan_decap(struct enic_flowman *fm,
        return enic_fm_append_action_op(fm, &fm_op, error);
 }
 
+/* Generate a reasonable source port number */
+static uint16_t
+gen_src_port(void)
+{
+       /* Min/max below are the default values in OVS-DPDK and Linux */
+       uint16_t p = rte_rand();
+       p = RTE_MAX(p, 32768);
+       p = RTE_MIN(p, 61000);
+       return rte_cpu_to_be_16(p);
+}
+
 /* VXLAN encap is done via flowman compound action */
 static int
 enic_fm_copy_vxlan_encap(struct enic_flowman *fm,
@@ -981,6 +1354,7 @@ enic_fm_copy_vxlan_encap(struct enic_flowman *fm,
 {
        struct fm_action_op fm_op;
        struct rte_ether_hdr *eth;
+       struct rte_udp_hdr *udp;
        uint16_t *ethertype;
        void *template;
        uint8_t off;
@@ -1002,7 +1376,7 @@ enic_fm_copy_vxlan_encap(struct enic_flowman *fm,
        eth = (struct rte_ether_hdr *)template;
        ethertype = &eth->ether_type;
        append_template(&template, &off, item->spec,
-                       sizeof(struct rte_flow_item_eth));
+                       sizeof(struct rte_ether_hdr));
        item++;
        flow_item_skip_void(&item);
        /* Optional VLAN */
@@ -1079,8 +1453,17 @@ enic_fm_copy_vxlan_encap(struct enic_flowman *fm,
                off + offsetof(struct rte_udp_hdr, dgram_len);
        fm_op.encap.len2_delta =
                sizeof(struct rte_udp_hdr) + sizeof(struct rte_vxlan_hdr);
+       udp = (struct rte_udp_hdr *)template;
        append_template(&template, &off, item->spec,
                        sizeof(struct rte_udp_hdr));
+       /*
+        * Firmware does not hash/fill source port yet. Generate a
+        * random port, as there is *usually* one rte_flow for the
+        * given inner packet stream (i.e. a single stream has one
+        * random port).
+        */
+       if (udp->src_port == 0)
+               udp->src_port = gen_src_port();
        item++;
        flow_item_skip_void(&item);
 
@@ -1188,6 +1571,35 @@ vf_egress_port_id_action(struct enic_flowman *fm,
        return 0;
 }
 
+static int
+enic_fm_check_transfer_dst(struct enic *enic, uint16_t dst_port_id,
+                          struct rte_eth_dev **dst_dev,
+                          struct rte_flow_error *error)
+{
+       struct rte_eth_dev *dev;
+
+       ENICPMD_LOG(DEBUG, "port id %u", dst_port_id);
+       if (!rte_eth_dev_is_valid_port(dst_port_id)) {
+               return rte_flow_error_set(error, EINVAL,
+                       RTE_FLOW_ERROR_TYPE_ACTION,
+                       NULL, "invalid port_id");
+       }
+       dev = &rte_eth_devices[dst_port_id];
+       if (!dev_is_enic(dev)) {
+               return rte_flow_error_set(error, EINVAL,
+                       RTE_FLOW_ERROR_TYPE_ACTION,
+                       NULL, "port_id is not enic");
+       }
+       if (enic->switch_domain_id != pmd_priv(dev)->switch_domain_id) {
+               return rte_flow_error_set(error, EINVAL,
+                       RTE_FLOW_ERROR_TYPE_ACTION,
+                       NULL, "destination and source ports are not in the same switch domain");
+       }
+
+       *dst_dev = dev;
+       return 0;
+}
+
 /* Translate flow actions to flowman TCAM entry actions */
 static int
 enic_fm_copy_action(struct enic_flowman *fm,
@@ -1260,6 +1672,8 @@ enic_fm_copy_action(struct enic_flowman *fm,
                        const struct rte_flow_action_mark *mark =
                                actions->conf;
 
+                       if (enic->use_noscatter_vec_rx_handler)
+                               goto unsupported;
                        if (mark->id >= ENIC_MAGIC_FILTER_ID - 1)
                                return rte_flow_error_set(error, EINVAL,
                                        RTE_FLOW_ERROR_TYPE_ACTION,
@@ -1273,6 +1687,8 @@ enic_fm_copy_action(struct enic_flowman *fm,
                        break;
                }
                case RTE_FLOW_ACTION_TYPE_FLAG: {
+                       if (enic->use_noscatter_vec_rx_handler)
+                               goto unsupported;
                        /* ENIC_MAGIC_FILTER_ID is reserved for flagging */
                        memset(&fm_op, 0, sizeof(fm_op));
                        fm_op.fa_op = FMOP_MARK;
@@ -1377,7 +1793,7 @@ enic_fm_copy_action(struct enic_flowman *fm,
                }
                case RTE_FLOW_ACTION_TYPE_PORT_ID: {
                        const struct rte_flow_action_port_id *port;
-                       struct rte_eth_dev *dev;
+                       struct rte_eth_dev *dev = NULL;
 
                        if (!ingress && (overlap & PORT_ID)) {
                                ENICPMD_LOG(DEBUG, "cannot have multiple egress PORT_ID actions");
@@ -1388,24 +1804,10 @@ enic_fm_copy_action(struct enic_flowman *fm,
                                vnic_h = enic->fm_vnic_handle; /* This port */
                                break;
                        }
-                       ENICPMD_LOG(DEBUG, "port id %u", port->id);
-                       if (!rte_eth_dev_is_valid_port(port->id)) {
-                               return rte_flow_error_set(error, EINVAL,
-                                       RTE_FLOW_ERROR_TYPE_ACTION,
-                                       NULL, "invalid port_id");
-                       }
-                       dev = &rte_eth_devices[port->id];
-                       if (!dev_is_enic(dev)) {
-                               return rte_flow_error_set(error, EINVAL,
-                                       RTE_FLOW_ERROR_TYPE_ACTION,
-                                       NULL, "port_id is not enic");
-                       }
-                       if (enic->switch_domain_id !=
-                           pmd_priv(dev)->switch_domain_id) {
-                               return rte_flow_error_set(error, EINVAL,
-                                       RTE_FLOW_ERROR_TYPE_ACTION,
-                                       NULL, "destination and source ports are not in the same switch domain");
-                       }
+                       ret = enic_fm_check_transfer_dst(enic, port->id, &dev,
+                                                        error);
+                       if (ret)
+                               return ret;
                        vnic_h = pmd_priv(dev)->fm_vnic_handle;
                        overlap |= PORT_ID;
                        /*
@@ -1446,6 +1848,19 @@ enic_fm_copy_action(struct enic_flowman *fm,
                        break;
                }
                case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN: {
+                       struct fm_action_op *decap;
+
+                       /*
+                        * If decap-nostrip appears before pop vlan, this pop
+                        * applies to the inner packet vlan. Turn it into
+                        * decap-strip.
+                        */
+                       decap = find_prev_action_op(fm, FMOP_DECAP_NOSTRIP);
+                       if (decap) {
+                               ENICPMD_LOG(DEBUG, "pop-vlan inner: decap-nostrip => decap-strip");
+                               decap->fa_op = FMOP_DECAP_STRIP;
+                               break;
+                       }
                        memset(&fm_op, 0, sizeof(fm_op));
                        fm_op.fa_op = FMOP_POP_VLAN;
                        ret = enic_fm_append_action_op(fm, &fm_op, error);
@@ -1489,6 +1904,48 @@ enic_fm_copy_action(struct enic_flowman *fm,
                        ovlan |= rte_be_to_cpu_16(vid->vlan_vid);
                        break;
                }
+               case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR: {
+                       const struct rte_flow_action_ethdev *ethdev;
+                       struct rte_eth_dev *dev = NULL;
+
+                       ethdev = actions->conf;
+                       ret = enic_fm_check_transfer_dst(enic, ethdev->port_id,
+                                                        &dev, error);
+                       if (ret)
+                               return ret;
+                       vnic_h = pmd_priv(dev)->fm_vnic_handle;
+                       overlap |= PORT_ID;
+                       /*
+                        * Action PORT_REPRESENTOR implies ingress destination.
+                        * Noting to do. We add an implicit stree at the
+                        * end if needed.
+                        */
+                       ingress = 1;
+                       break;
+               }
+               case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT: {
+                       const struct rte_flow_action_ethdev *ethdev;
+                       struct rte_eth_dev *dev = NULL;
+
+                       if (overlap & PORT_ID) {
+                               ENICPMD_LOG(DEBUG, "cannot have multiple egress PORT_ID actions");
+                               goto unsupported;
+                       }
+                       ethdev = actions->conf;
+                       ret = enic_fm_check_transfer_dst(enic, ethdev->port_id,
+                                                        &dev, error);
+                       if (ret)
+                               return ret;
+                       vnic_h = pmd_priv(dev)->fm_vnic_handle;
+                       overlap |= PORT_ID;
+                       /* Action REPRESENTED_PORT: always egress destination */
+                       ingress = 0;
+                       ret = vf_egress_port_id_action(fm, dev, vnic_h, &fm_op,
+                               error);
+                       if (ret)
+                               return ret;
+                       break;
+               }
                default:
                        goto unsupported;
                }
@@ -1603,7 +2060,7 @@ enic_fm_dump_tcam_actions(const struct fm_action *fm_action)
        /* Remove trailing comma */
        if (buf[0])
                *(bp - 1) = '\0';
-       ENICPMD_LOG(DEBUG, "       Acions: %s", buf);
+       ENICPMD_LOG(DEBUG, "       Actions: %s", buf);
 }
 
 static int
@@ -1899,19 +2356,26 @@ enic_fm_counter_alloc(struct enic_flowman *fm, struct rte_flow_error *error,
 }
 
 static int
-enic_fm_action_free(struct enic_flowman *fm, uint64_t handle)
+enic_fm_action_free(struct enic_flowman *fm, struct enic_fm_action *ah)
 {
        uint64_t args[2];
-       int rc;
+       int ret = 0;
 
        ENICPMD_FUNC_TRACE();
-       args[0] = FM_ACTION_FREE;
-       args[1] = handle;
-       rc = flowman_cmd(fm, args, 2);
-       if (rc)
-               ENICPMD_LOG(ERR, "cannot free action: rc=%d handle=0x%" PRIx64,
-                           rc, handle);
-       return rc;
+       RTE_ASSERT(ah->ref > 0);
+       ah->ref--;
+       if (ah->ref == 0) {
+               args[0] = FM_ACTION_FREE;
+               args[1] = ah->handle;
+               ret = flowman_cmd(fm, args, 2);
+               if (ret)
+                       /* This is a "should never happen" error. */
+                       ENICPMD_LOG(ERR, "freeing action rc=%d handle=0x%"
+                                   PRIx64, ret, ah->handle);
+               rte_hash_del_key(fm->action_hash, (const void *)&ah->key);
+               free(ah);
+       }
+       return ret;
 }
 
 static int
@@ -1987,9 +2451,9 @@ __enic_fm_flow_free(struct enic_flowman *fm, struct enic_fm_flow *fm_flow)
                enic_fm_entry_free(fm, fm_flow->entry_handle);
                fm_flow->entry_handle = FM_INVALID_HANDLE;
        }
-       if (fm_flow->action_handle != FM_INVALID_HANDLE) {
-               enic_fm_action_free(fm, fm_flow->action_handle);
-               fm_flow->action_handle = FM_INVALID_HANDLE;
+       if (fm_flow->action != NULL) {
+               enic_fm_action_free(fm, fm_flow->action);
+               fm_flow->action = NULL;
        }
        enic_fm_counter_free(fm, fm_flow);
        if (fm_flow->fet) {
@@ -2099,6 +2563,75 @@ enic_fm_add_exact_entry(struct enic_flowman *fm,
        return 0;
 }
 
+static int
+enic_action_handle_get(struct enic_flowman *fm, struct fm_action *action_in,
+                      struct rte_flow_error *error,
+                      struct enic_fm_action **ah_o)
+{
+       struct enic_fm_action *ah;
+       struct fm_action *fma;
+       uint64_t args[2];
+       int ret = 0;
+
+       ret = rte_hash_lookup_data(fm->action_hash, action_in,
+                                  (void **)&ah);
+       if (ret < 0 && ret != -ENOENT)
+               return rte_flow_error_set(error, -ret,
+                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                  NULL, "enic: rte_hash_lookup(action)");
+
+       if (ret == -ENOENT) {
+               /* Allocate a new action on the NIC. */
+               fma = &fm->cmd.va->fm_action;
+               memcpy(fma, action_in, sizeof(*fma));
+
+               ah = calloc(1, sizeof(*ah));
+               if (ah == NULL)
+                       return rte_flow_error_set(error, ENOMEM,
+                                          RTE_FLOW_ERROR_TYPE_HANDLE,
+                                          NULL, "enic: calloc(fm-action)");
+               memcpy(&ah->key, action_in, sizeof(struct fm_action));
+               args[0] = FM_ACTION_ALLOC;
+               args[1] = fm->cmd.pa;
+               ret = flowman_cmd(fm, args, 2);
+               if (ret != 0) {
+                       rte_flow_error_set(error, -ret,
+                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                          NULL, "enic: devcmd(action-alloc)");
+                       goto error_with_ah;
+               }
+               ah->handle = args[0];
+               ret = rte_hash_add_key_data(fm->action_hash,
+                                           (const void *)action_in,
+                                           (void *)ah);
+               if (ret != 0) {
+                       rte_flow_error_set(error, -ret,
+                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                          NULL,
+                                          "enic: rte_hash_add_key_data(actn)");
+                       goto error_with_action_handle;
+               }
+               ENICPMD_LOG(DEBUG, "action allocated: handle=0x%" PRIx64,
+                           ah->handle);
+       }
+
+       /* Action handle struct is valid, increment reference count. */
+       ah->ref++;
+       *ah_o = ah;
+       return 0;
+error_with_action_handle:
+       args[0] = FM_ACTION_FREE;
+       args[1] = ah->handle;
+       ret = flowman_cmd(fm, args, 2);
+       if (ret != 0)
+               rte_flow_error_set(error, -ret,
+                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                  NULL, "enic: devcmd(action-free)");
+error_with_ah:
+       free(ah);
+       return ret;
+}
+
 /* Push match-action to the NIC. */
 static int
 __enic_fm_flow_add_entry(struct enic_flowman *fm,
@@ -2110,29 +2643,18 @@ __enic_fm_flow_add_entry(struct enic_flowman *fm,
                         struct rte_flow_error *error)
 {
        struct enic_fm_counter *ctr;
-       struct fm_action *fma;
-       uint64_t action_h;
+       struct enic_fm_action *ah = NULL;
        uint64_t entry_h;
-       uint64_t args[3];
        int ret;
 
        ENICPMD_FUNC_TRACE();
-       /* Allocate action. */
-       fma = &fm->cmd.va->fm_action;
-       memcpy(fma, action_in, sizeof(*fma));
-       args[0] = FM_ACTION_ALLOC;
-       args[1] = fm->cmd.pa;
-       ret = flowman_cmd(fm, args, 2);
-       if (ret != 0) {
-               ENICPMD_LOG(ERR, "allocating TCAM table action rc=%d", ret);
-               rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
-                       NULL, "enic: devcmd(action-alloc)");
+
+       /* Get or create an action handle. */
+       ret = enic_action_handle_get(fm, action_in, error, &ah);
+       if (ret)
                return ret;
-       }
-       action_h = args[0];
-       fm_flow->action_handle = action_h;
-       match_in->ftm_action = action_h;
-       ENICPMD_LOG(DEBUG, "action allocated: handle=0x%" PRIx64, action_h);
+       match_in->ftm_action = ah->handle;
+       fm_flow->action = ah;
 
        /* Allocate counter if requested. */
        if (match_in->ftm_flags & FMEF_COUNTER) {
@@ -2206,7 +2728,7 @@ enic_fm_flow_add_entry(struct enic_flowman *fm,
                return NULL;
        }
        flow->fm = fm_flow;
-       fm_flow->action_handle = FM_INVALID_HANDLE;
+       fm_flow->action = NULL;
        fm_flow->entry_handle = FM_INVALID_HANDLE;
        if (__enic_fm_flow_add_entry(fm, fm_flow, match_in, action_in,
                                     attrs->group, attrs->ingress, error)) {
@@ -2302,7 +2824,7 @@ add_hairpin_steer(struct enic_flowman *fm, struct rte_flow *flow,
        if (ret)
                goto error_with_flow;
        /* Add the ingress flow */
-       fm_flow->action_handle = FM_INVALID_HANDLE;
+       fm_flow->action = NULL;
        fm_flow->entry_handle = FM_INVALID_HANDLE;
        ret = __enic_fm_flow_add_entry(fm, fm_flow, fm_tcam_entry, fm_action,
                                       FM_TCAM_RTE_GROUP, 1 /* ingress */, error);
@@ -2556,7 +3078,7 @@ enic_fm_flow_flush(struct rte_eth_dev *dev,
                 */
                if (fm->ig_tcam_hndl == FM_INVALID_HANDLE) {
                        fm_flow->entry_handle = FM_INVALID_HANDLE;
-                       fm_flow->action_handle = FM_INVALID_HANDLE;
+                       fm_flow->action = NULL;
                        fm_flow->fet = NULL;
                }
                enic_fm_flow_free(fm, flow);
@@ -2612,6 +3134,31 @@ enic_fm_tcam_tbl_alloc(struct enic_flowman *fm, uint32_t direction,
        return 0;
 }
 
+static int
+enic_fm_init_actions(struct enic_flowman *fm)
+{
+       struct rte_hash *a_hash;
+       char name[RTE_HASH_NAMESIZE];
+       struct rte_hash_parameters params = {
+               .entries = FM_MAX_ACTION_TABLE_SIZE,
+               .key_len = sizeof(struct fm_action),
+               .hash_func = rte_jhash,
+               .hash_func_init_val = 0,
+               .socket_id = rte_socket_id(),
+       };
+
+       ENICPMD_FUNC_TRACE();
+       snprintf((char *)name, sizeof(name), "fm-ah-%s",
+                fm->owner_enic->bdf_name);
+       params.name = name;
+
+       a_hash = rte_hash_create(&params);
+       if (a_hash == NULL)
+               return -rte_errno;
+       fm->action_hash = a_hash;
+       return 0;
+}
+
 static int
 enic_fm_init_counters(struct enic_flowman *fm)
 {
@@ -2725,6 +3272,12 @@ enic_fm_init(struct enic *enic)
                ENICPMD_LOG(ERR, "cannot alloc counters");
                goto error_tables;
        }
+       /* set up action handle hash */
+       rc = enic_fm_init_actions(fm);
+       if (rc) {
+               ENICPMD_LOG(ERR, "cannot create action hash, error:%d", rc);
+               goto error_counters;
+       }
        /*
         * One default exact match table for each direction. We hold onto
         * it until close.
@@ -2732,7 +3285,7 @@ enic_fm_init(struct enic *enic)
        rc = enic_fet_alloc(fm, 1, NULL, 128, &fm->default_ig_fet);
        if (rc) {
                ENICPMD_LOG(ERR, "cannot alloc default IG exact match table");
-               goto error_counters;
+               goto error_actions;
        }
        fm->default_ig_fet->ref = 1;
        rc = enic_fet_alloc(fm, 0, NULL, 128, &fm->default_eg_fet);
@@ -2747,6 +3300,8 @@ enic_fm_init(struct enic *enic)
 
 error_ig_fet:
        enic_fet_free(fm, fm->default_ig_fet);
+error_actions:
+       rte_hash_free(fm->action_hash);
 error_counters:
        enic_fm_free_all_counters(fm);
 error_tables:
@@ -2773,6 +3328,7 @@ enic_fm_destroy(struct enic *enic)
        if (enic->fm == NULL)
                return;
        fm = enic->fm;
+       enic_fm_flow_flush(enic->rte_dev, NULL);
        enic_fet_free(fm, fm->default_eg_fet);
        enic_fet_free(fm, fm->default_ig_fet);
        /* Free all exact match tables still open */
@@ -2782,6 +3338,7 @@ enic_fm_destroy(struct enic *enic)
        }
        enic_fm_free_tcam_tables(fm);
        enic_fm_free_all_counters(fm);
+       rte_hash_free(fm->action_hash);
        enic_free_consistent(enic, sizeof(union enic_flowman_cmd_mem),
                fm->cmd.va, fm->cmd.pa);
        fm->cmd.va = NULL;