net/i40e: fix crash in AVX512
[dpdk.git] / drivers / net / enic / enic_fm_flow.c
index d815f36..cd364ee 100644 (file)
@@ -5,9 +5,11 @@
 #include <errno.h>
 #include <stdint.h>
 #include <rte_log.h>
-#include <rte_ethdev_driver.h>
+#include <ethdev_driver.h>
 #include <rte_flow_driver.h>
 #include <rte_ether.h>
+#include <rte_hash.h>
+#include <rte_jhash.h>
 #include <rte_ip.h>
 #include <rte_udp.h>
 #include <rte_memzone.h>
 
 #define FM_INVALID_HANDLE 0
 
+/* Low priority used for implicit VF -> representor flow */
+#define FM_LOWEST_PRIORITY 100000
+
+/* High priority used for implicit representor -> VF flow */
+#define FM_HIGHEST_PRIORITY 0
+
+/* Tag used for implicit VF <-> representor flows */
+#define FM_VF_REP_TAG 1
+
+/* Max number of actions supported by VIC is 2K. Make hash table double that. */
+#define FM_MAX_ACTION_TABLE_SIZE 4096
+
 /*
  * Flow exact match tables (FET) in the VIC and rte_flow groups.
  * Use a simple scheme to map groups to tables.
@@ -81,13 +95,21 @@ struct enic_fm_counter {
        uint32_t handle;
 };
 
+struct enic_fm_action {
+       int ref;
+       uint64_t handle;
+       struct fm_action key;
+};
+
 /* rte_flow.fm */
 struct enic_fm_flow {
        bool counter_valid;
        uint64_t entry_handle;
-       uint64_t action_handle;
+       struct enic_fm_action  *action;
        struct enic_fm_counter *counter;
        struct enic_fm_fet *fet;
+       /* Auto-added steer action for hairpin flows (e.g. vnic->vnic) */
+       struct enic_fm_flow *hairpin_steer_flow;
 };
 
 struct enic_fm_jump_flow {
@@ -110,8 +132,20 @@ union enic_flowman_cmd_mem {
        struct fm_action fm_action;
 };
 
+/*
+ * PF has a flowman instance, and VF representors share it with PF.
+ * PF allocates this structure and owns it. VF representors borrow
+ * the PF's structure during API calls (e.g. create, query).
+ */
 struct enic_flowman {
-       struct enic *enic;
+       struct enic *owner_enic; /* PF */
+       struct enic *user_enic;  /* API caller (PF or representor) */
+       /*
+        * Representors and PF share the same underlying flowman.
+        * Lock API calls to serialize accesses from them. Only used
+        * when VF representors are present.
+        */
+       rte_spinlock_t lock;
        /* Command buffer */
        struct {
                union enic_flowman_cmd_mem *va;
@@ -132,6 +166,8 @@ struct enic_flowman {
         */
        struct enic_fm_fet *default_eg_fet;
        struct enic_fm_fet *default_ig_fet;
+       /* hash table for Action reuse */
+       struct rte_hash *action_hash;
        /* Flows that jump to the default table above */
        TAILQ_HEAD(jump_flow_list, enic_fm_jump_flow) jump_list;
        /*
@@ -143,9 +179,23 @@ struct enic_flowman {
        struct fm_action action;
        struct fm_action action_tmp; /* enic_fm_reorder_action_op */
        int action_op_count;
+       /* Tags used for representor flows */
+       uint8_t vf_rep_tag;
+       /* For auto-added steer action for hairpin */
+       int need_hairpin_steer;
+       uint64_t hairpin_steer_vnic_h;
 };
 
 static int enic_fm_tbl_free(struct enic_flowman *fm, uint64_t handle);
+/*
+ * API functions (create, destroy, validate, flush) call begin_fm()
+ * upon entering to save the caller enic (PF or VF representor) and
+ * lock. Upon exit, they call end_fm() to unlock.
+ */
+static struct enic_flowman *begin_fm(struct enic *enic);
+static void end_fm(struct enic_flowman *fm);
+/* Delete internal flows created for representor paths */
+static void delete_rep_flows(struct enic *enic);
 
 /*
  * Common arguments passed to copy_item functions. Use this structure
@@ -193,6 +243,7 @@ static const enum rte_flow_action_type enic_fm_supported_ig_actions[] = {
        RTE_FLOW_ACTION_TYPE_FLAG,
        RTE_FLOW_ACTION_TYPE_JUMP,
        RTE_FLOW_ACTION_TYPE_MARK,
+       RTE_FLOW_ACTION_TYPE_OF_POP_VLAN,
        RTE_FLOW_ACTION_TYPE_PORT_ID,
        RTE_FLOW_ACTION_TYPE_PASSTHRU,
        RTE_FLOW_ACTION_TYPE_QUEUE,
@@ -208,6 +259,10 @@ static const enum rte_flow_action_type enic_fm_supported_eg_actions[] = {
        RTE_FLOW_ACTION_TYPE_COUNT,
        RTE_FLOW_ACTION_TYPE_DROP,
        RTE_FLOW_ACTION_TYPE_JUMP,
+       RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN,
+       RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP,
+       RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID,
+       RTE_FLOW_ACTION_TYPE_PORT_ID,
        RTE_FLOW_ACTION_TYPE_PASSTHRU,
        RTE_FLOW_ACTION_TYPE_VOID,
        RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP,
@@ -313,8 +368,8 @@ enic_fm_copy_item_eth(struct copy_item_args *arg)
        fm_mask = &entry->ftm_mask.fk_hdrset[lvl];
        fm_data->fk_header_select |= FKH_ETHER;
        fm_mask->fk_header_select |= FKH_ETHER;
-       memcpy(&fm_data->l2.eth, spec, sizeof(*spec));
-       memcpy(&fm_mask->l2.eth, mask, sizeof(*mask));
+       memcpy(&fm_data->l2.eth, spec, sizeof(struct rte_ether_hdr));
+       memcpy(&fm_mask->l2.eth, mask, sizeof(struct rte_ether_hdr));
        return 0;
 }
 
@@ -350,8 +405,11 @@ enic_fm_copy_item_vlan(struct copy_item_args *arg)
        eth_mask = (void *)&fm_mask->l2.eth;
        eth_val = (void *)&fm_data->l2.eth;
 
-       /* Outer TPID cannot be matched */
-       if (eth_mask->ether_type)
+       /*
+        * Outer TPID cannot be matched. If inner_type is 0, use what is
+        * in the eth header.
+        */
+       if (eth_mask->ether_type && mask->inner_type)
                return -ENOTSUP;
 
        /*
@@ -359,8 +417,10 @@ enic_fm_copy_item_vlan(struct copy_item_args *arg)
         * L2, regardless of vlan stripping settings. So, the inner type
         * from vlan becomes the ether type of the eth header.
         */
-       eth_mask->ether_type = mask->inner_type;
-       eth_val->ether_type = spec->inner_type;
+       if (mask->inner_type) {
+               eth_mask->ether_type = mask->inner_type;
+               eth_val->ether_type = spec->inner_type;
+       }
        fm_data->fk_header_select |= FKH_ETHER | FKH_QTAG;
        fm_mask->fk_header_select |= FKH_ETHER | FKH_QTAG;
        fm_data->fk_vlan = rte_be_to_cpu_16(spec->tci);
@@ -419,8 +479,8 @@ enic_fm_copy_item_ipv6(struct copy_item_args *arg)
 
        fm_data->fk_header_select |= FKH_IPV6;
        fm_mask->fk_header_select |= FKH_IPV6;
-       memcpy(&fm_data->l3.ip6, spec, sizeof(*spec));
-       memcpy(&fm_mask->l3.ip6, mask, sizeof(*mask));
+       memcpy(&fm_data->l3.ip6, spec, sizeof(struct rte_ipv6_hdr));
+       memcpy(&fm_mask->l3.ip6, mask, sizeof(struct rte_ipv6_hdr));
        return 0;
 }
 
@@ -623,6 +683,12 @@ enic_fm_copy_item_raw(struct copy_item_args *arg)
        return 0;
 }
 
+static int
+flowman_cmd(struct enic_flowman *fm, uint64_t *args, int nargs)
+{
+       return vnic_dev_flowman_cmd(fm->owner_enic->vdev, args, nargs);
+}
+
 static int
 enic_fet_alloc(struct enic_flowman *fm, uint8_t ingress,
               struct fm_key_template *key, int entries,
@@ -661,7 +727,7 @@ enic_fet_alloc(struct enic_flowman *fm, uint8_t ingress,
 
        args[0] = FM_EXACT_TABLE_ALLOC;
        args[1] = fm->cmd.pa;
-       ret = vnic_dev_flowman_cmd(fm->enic->vdev, args, 2);
+       ret = flowman_cmd(fm, args, 2);
        if (ret) {
                ENICPMD_LOG(ERR, "cannot alloc exact match table: rc=%d", ret);
                free(fet);
@@ -870,46 +936,51 @@ enic_fm_append_action_op(struct enic_flowman *fm,
        return 0;
 }
 
-/* Steer operations need to appear before other ops */
+static struct fm_action_op *
+find_prev_action_op(struct enic_flowman *fm, uint32_t opcode)
+{
+       struct fm_action_op *op;
+       int i;
+
+       for (i = 0; i < fm->action_op_count; i++) {
+               op = &fm->action.fma_action_ops[i];
+               if (op->fa_op == opcode)
+                       return op;
+       }
+       return NULL;
+}
+
+/* NIC requires that 1st steer appear before decap.
+ * Correct example: steer, decap, steer, steer, ...
+ */
 static void
 enic_fm_reorder_action_op(struct enic_flowman *fm)
 {
-       struct fm_action_op *dst, *dst_head, *src, *src_head;
+       struct fm_action_op *op, *steer, *decap;
+       struct fm_action_op tmp_op;
 
        ENICPMD_FUNC_TRACE();
-       /* Move steer ops to the front. */
-       src = fm->action.fma_action_ops;
-       src_head = src;
-       dst = fm->action_tmp.fma_action_ops;
-       dst_head = dst;
-       /* Copy steer ops to tmp */
-       while (src->fa_op != FMOP_END) {
-               if (src->fa_op == FMOP_RQ_STEER) {
-                       ENICPMD_LOG(DEBUG, "move op: %ld -> dst %ld",
-                                   (long)(src - src_head),
-                                   (long)(dst - dst_head));
-                       *dst = *src;
-                       dst++;
-               }
-               src++;
-       }
-       /* Then append non-steer ops */
-       src = src_head;
-       while (src->fa_op != FMOP_END) {
-               if (src->fa_op != FMOP_RQ_STEER) {
-                       ENICPMD_LOG(DEBUG, "move op: %ld -> dst %ld",
-                                   (long)(src - src_head),
-                                   (long)(dst - dst_head));
-                       *dst = *src;
-                       dst++;
-               }
-               src++;
+       /* Find 1st steer and decap */
+       op = fm->action.fma_action_ops;
+       steer = NULL;
+       decap = NULL;
+       while (op->fa_op != FMOP_END) {
+               if (!decap && (op->fa_op == FMOP_DECAP_NOSTRIP ||
+                              op->fa_op == FMOP_DECAP_STRIP))
+                       decap = op;
+               else if (!steer && op->fa_op == FMOP_RQ_STEER)
+                       steer = op;
+               op++;
+       }
+       /* If decap is before steer, swap */
+       if (steer && decap && decap < steer) {
+               op = fm->action.fma_action_ops;
+               ENICPMD_LOG(DEBUG, "swap decap %ld <-> steer %ld",
+                           (long)(decap - op), (long)(steer - op));
+               tmp_op = *decap;
+               *decap = *steer;
+               *steer = tmp_op;
        }
-       /* Copy END */
-       *dst = *src;
-       /* Finally replace the original action with the reordered one */
-       memcpy(fm->action.fma_action_ops, fm->action_tmp.fma_action_ops,
-              sizeof(fm->action.fma_action_ops));
 }
 
 /* VXLAN decap is done via flowman compound action */
@@ -935,6 +1006,17 @@ enic_fm_copy_vxlan_decap(struct enic_flowman *fm,
        return enic_fm_append_action_op(fm, &fm_op, error);
 }
 
+/* Generate a reasonable source port number */
+static uint16_t
+gen_src_port(void)
+{
+       /* Min/max below are the default values in OVS-DPDK and Linux */
+       uint16_t p = rte_rand();
+       p = RTE_MAX(p, 32768);
+       p = RTE_MIN(p, 61000);
+       return rte_cpu_to_be_16(p);
+}
+
 /* VXLAN encap is done via flowman compound action */
 static int
 enic_fm_copy_vxlan_encap(struct enic_flowman *fm,
@@ -943,6 +1025,7 @@ enic_fm_copy_vxlan_encap(struct enic_flowman *fm,
 {
        struct fm_action_op fm_op;
        struct rte_ether_hdr *eth;
+       struct rte_udp_hdr *udp;
        uint16_t *ethertype;
        void *template;
        uint8_t off;
@@ -964,7 +1047,7 @@ enic_fm_copy_vxlan_encap(struct enic_flowman *fm,
        eth = (struct rte_ether_hdr *)template;
        ethertype = &eth->ether_type;
        append_template(&template, &off, item->spec,
-                       sizeof(struct rte_flow_item_eth));
+                       sizeof(struct rte_ether_hdr));
        item++;
        flow_item_skip_void(&item);
        /* Optional VLAN */
@@ -1041,8 +1124,17 @@ enic_fm_copy_vxlan_encap(struct enic_flowman *fm,
                off + offsetof(struct rte_udp_hdr, dgram_len);
        fm_op.encap.len2_delta =
                sizeof(struct rte_udp_hdr) + sizeof(struct rte_vxlan_hdr);
+       udp = (struct rte_udp_hdr *)template;
        append_template(&template, &off, item->spec,
                        sizeof(struct rte_udp_hdr));
+       /*
+        * Firmware does not hash/fill source port yet. Generate a
+        * random port, as there is *usually* one rte_flow for the
+        * given inner packet stream (i.e. a single stream has one
+        * random port).
+        */
+       if (udp->src_port == 0)
+               udp->src_port = gen_src_port();
        item++;
        flow_item_skip_void(&item);
 
@@ -1079,7 +1171,8 @@ enic_fm_find_vnic(struct enic *enic, const struct rte_pci_addr *addr,
        args[1] = bdf;
        rc = vnic_dev_flowman_cmd(enic->vdev, args, 2);
        if (rc != 0) {
-               ENICPMD_LOG(ERR, "allocating counters rc=%d", rc);
+               /* Expected to fail if BDF is not on the adapter */
+               ENICPMD_LOG(DEBUG, "cannot find vnic handle: rc=%d", rc);
                return rc;
        }
        *handle = args[0];
@@ -1087,6 +1180,68 @@ enic_fm_find_vnic(struct enic *enic, const struct rte_pci_addr *addr,
        return 0;
 }
 
+/*
+ * Egress: target port should be either PF uplink or VF.
+ * Supported cases
+ * 1. VF egress -> PF uplink
+ *   PF may be this VF's PF, or another PF, as long as they are on the same VIC.
+ * 2. VF egress -> VF
+ *
+ * Unsupported cases
+ * 1. PF egress -> VF
+ *   App should be using representor to pass packets to VF
+ */
+static int
+vf_egress_port_id_action(struct enic_flowman *fm,
+                        struct rte_eth_dev *dst_dev,
+                        uint64_t dst_vnic_h,
+                        struct fm_action_op *fm_op,
+                        struct rte_flow_error *error)
+{
+       struct enic *src_enic, *dst_enic;
+       struct enic_vf_representor *vf;
+       uint8_t uif;
+       int ret;
+
+       ENICPMD_FUNC_TRACE();
+       src_enic = fm->user_enic;
+       dst_enic = pmd_priv(dst_dev);
+       if (!(src_enic->rte_dev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)) {
+               return rte_flow_error_set(error, EINVAL,
+                       RTE_FLOW_ERROR_TYPE_ACTION,
+                       NULL, "source port is not VF representor");
+       }
+
+       /* VF -> PF uplink. dst is not VF representor */
+       if (!(dst_dev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)) {
+               /* PF is the VF's PF? Then nothing to do */
+               vf = VF_ENIC_TO_VF_REP(src_enic);
+               if (vf->pf == dst_enic) {
+                       ENICPMD_LOG(DEBUG, "destination port is VF's PF");
+                       return 0;
+               }
+               /* If not, steer to the remote PF's uplink */
+               uif = dst_enic->fm_vnic_uif;
+               ENICPMD_LOG(DEBUG, "steer to uplink %u", uif);
+               memset(fm_op, 0, sizeof(*fm_op));
+               fm_op->fa_op = FMOP_SET_EGPORT;
+               fm_op->set_egport.egport = uif;
+               ret = enic_fm_append_action_op(fm, fm_op, error);
+               return ret;
+       }
+
+       /* VF -> VF loopback. Hairpin and steer to vnic */
+       memset(fm_op, 0, sizeof(*fm_op));
+       fm_op->fa_op = FMOP_EG_HAIRPIN;
+       ret = enic_fm_append_action_op(fm, fm_op, error);
+       if (ret)
+               return ret;
+       ENICPMD_LOG(DEBUG, "egress hairpin");
+       fm->hairpin_steer_vnic_h = dst_vnic_h;
+       fm->need_hairpin_steer = 1;
+       return 0;
+}
+
 /* Translate flow actions to flowman TCAM entry actions */
 static int
 enic_fm_copy_action(struct enic_flowman *fm,
@@ -1096,25 +1251,34 @@ enic_fm_copy_action(struct enic_flowman *fm,
 {
        enum {
                FATE = 1 << 0,
-               MARK = 1 << 1,
+               DECAP = 1 << 1,
                PASSTHRU = 1 << 2,
                COUNT = 1 << 3,
                ENCAP = 1 << 4,
+               PUSH_VLAN = 1 << 5,
+               PORT_ID = 1 << 6,
        };
        struct fm_tcam_match_entry *fmt;
        struct fm_action_op fm_op;
+       bool need_ovlan_action;
        struct enic *enic;
        uint32_t overlap;
        uint64_t vnic_h;
+       uint16_t ovlan;
        bool first_rq;
+       bool steer;
        int ret;
 
        ENICPMD_FUNC_TRACE();
        fmt = &fm->tcam_entry;
+       need_ovlan_action = false;
+       ovlan = 0;
        first_rq = true;
-       enic = fm->enic;
+       steer = false;
+       enic = fm->user_enic;
        overlap = 0;
-       vnic_h = 0; /* 0 = current vNIC */
+       vnic_h = enic->fm_vnic_handle;
+
        for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
                switch (actions->type) {
                case RTE_FLOW_ACTION_TYPE_VOID:
@@ -1150,9 +1314,8 @@ enic_fm_copy_action(struct enic_flowman *fm,
                        const struct rte_flow_action_mark *mark =
                                actions->conf;
 
-                       if (overlap & MARK)
+                       if (enic->use_noscatter_vec_rx_handler)
                                goto unsupported;
-                       overlap |= MARK;
                        if (mark->id >= ENIC_MAGIC_FILTER_ID - 1)
                                return rte_flow_error_set(error, EINVAL,
                                        RTE_FLOW_ERROR_TYPE_ACTION,
@@ -1166,9 +1329,8 @@ enic_fm_copy_action(struct enic_flowman *fm,
                        break;
                }
                case RTE_FLOW_ACTION_TYPE_FLAG: {
-                       if (overlap & MARK)
+                       if (enic->use_noscatter_vec_rx_handler)
                                goto unsupported;
-                       overlap |= MARK;
                        /* ENIC_MAGIC_FILTER_ID is reserved for flagging */
                        memset(&fm_op, 0, sizeof(fm_op));
                        fm_op.fa_op = FMOP_MARK;
@@ -1183,8 +1345,8 @@ enic_fm_copy_action(struct enic_flowman *fm,
                                actions->conf;
 
                        /*
-                        * If other fate kind is set, fail.  Multiple
-                        * queue actions are ok.
+                        * If fate other than QUEUE or RSS, fail. Multiple
+                        * rss and queue actions are ok.
                         */
                        if ((overlap & FATE) && first_rq)
                                goto unsupported;
@@ -1194,12 +1356,14 @@ enic_fm_copy_action(struct enic_flowman *fm,
                        fm_op.fa_op = FMOP_RQ_STEER;
                        fm_op.rq_steer.rq_index =
                                enic_rte_rq_idx_to_sop_idx(queue->index);
+                       fm_op.rq_steer.rq_count = 1;
                        fm_op.rq_steer.vnic_handle = vnic_h;
                        ret = enic_fm_append_action_op(fm, &fm_op, error);
                        if (ret)
                                return ret;
                        ENICPMD_LOG(DEBUG, "create QUEUE action rq: %u",
                                    fm_op.rq_steer.rq_index);
+                       steer = true;
                        break;
                }
                case RTE_FLOW_ACTION_TYPE_DROP: {
@@ -1228,37 +1392,58 @@ enic_fm_copy_action(struct enic_flowman *fm,
                        uint16_t i;
 
                        /*
-                        * Hardware does not support general RSS actions, but
-                        * we can still support the dummy one that is used to
-                        * "receive normally".
+                        * If fate other than QUEUE or RSS, fail. Multiple
+                        * rss and queue actions are ok.
+                        */
+                       if ((overlap & FATE) && first_rq)
+                               goto unsupported;
+                       first_rq = false;
+                       overlap |= FATE;
+
+                       /*
+                        * Hardware only supports RSS actions on outer level
+                        * with default type and function. Queues must be
+                        * sequential.
                         */
                        allow = rss->func == RTE_ETH_HASH_FUNCTION_DEFAULT &&
-                               rss->level == 0 &&
-                               (rss->types == 0 ||
-                                rss->types == enic->rss_hf) &&
-                               rss->queue_num == enic->rq_count &&
-                               rss->key_len == 0;
-                       /* Identity queue map is ok */
-                       for (i = 0; i < rss->queue_num; i++)
-                               allow = allow && (i == rss->queue[i]);
+                               rss->level == 0 && (rss->types == 0 ||
+                               rss->types == enic->rss_hf) &&
+                               rss->queue_num <= enic->rq_count &&
+                               rss->queue[rss->queue_num - 1] < enic->rq_count;
+
+
+                       /* Identity queue map needs to be sequential */
+                       for (i = 1; i < rss->queue_num; i++)
+                               allow = allow && (rss->queue[i] ==
+                                       rss->queue[i - 1] + 1);
                        if (!allow)
                                goto unsupported;
-                       if (overlap & FATE)
-                               goto unsupported;
-                       /* Need MARK or FLAG */
-                       if (!(overlap & MARK))
-                               goto unsupported;
-                       overlap |= FATE;
+
+                       memset(&fm_op, 0, sizeof(fm_op));
+                       fm_op.fa_op = FMOP_RQ_STEER;
+                       fm_op.rq_steer.rq_index =
+                               enic_rte_rq_idx_to_sop_idx(rss->queue[0]);
+                       fm_op.rq_steer.rq_count = rss->queue_num;
+                       fm_op.rq_steer.vnic_handle = vnic_h;
+                       ret = enic_fm_append_action_op(fm, &fm_op, error);
+                       if (ret)
+                               return ret;
+                       ENICPMD_LOG(DEBUG, "create QUEUE action rq: %u",
+                                   fm_op.rq_steer.rq_index);
+                       steer = true;
                        break;
                }
                case RTE_FLOW_ACTION_TYPE_PORT_ID: {
                        const struct rte_flow_action_port_id *port;
-                       struct rte_pci_device *pdev;
                        struct rte_eth_dev *dev;
 
+                       if (!ingress && (overlap & PORT_ID)) {
+                               ENICPMD_LOG(DEBUG, "cannot have multiple egress PORT_ID actions");
+                               goto unsupported;
+                       }
                        port = actions->conf;
                        if (port->original) {
-                               vnic_h = 0; /* This port */
+                               vnic_h = enic->fm_vnic_handle; /* This port */
                                break;
                        }
                        ENICPMD_LOG(DEBUG, "port id %u", port->id);
@@ -1273,15 +1458,32 @@ enic_fm_copy_action(struct enic_flowman *fm,
                                        RTE_FLOW_ERROR_TYPE_ACTION,
                                        NULL, "port_id is not enic");
                        }
-                       pdev = RTE_ETH_DEV_TO_PCI(dev);
-                       if (enic_fm_find_vnic(enic, &pdev->addr, &vnic_h)) {
+                       if (enic->switch_domain_id !=
+                           pmd_priv(dev)->switch_domain_id) {
                                return rte_flow_error_set(error, EINVAL,
                                        RTE_FLOW_ERROR_TYPE_ACTION,
-                                       NULL, "port_id is not vnic");
+                                       NULL, "destination and source ports are not in the same switch domain");
                        }
+                       vnic_h = pmd_priv(dev)->fm_vnic_handle;
+                       overlap |= PORT_ID;
+                       /*
+                        * Ingress. Nothing more to do. We add an implicit
+                        * steer at the end if needed.
+                        */
+                       if (ingress)
+                               break;
+                       /* Egress */
+                       ret = vf_egress_port_id_action(fm, dev, vnic_h, &fm_op,
+                               error);
+                       if (ret)
+                               return ret;
                        break;
                }
                case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP: {
+                       if (overlap & DECAP)
+                               goto unsupported;
+                       overlap |= DECAP;
+
                        ret = enic_fm_copy_vxlan_decap(fm, fmt, actions,
                                error);
                        if (ret != 0)
@@ -1301,13 +1503,99 @@ enic_fm_copy_action(struct enic_flowman *fm,
                                return ret;
                        break;
                }
+               case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN: {
+                       struct fm_action_op *decap;
+
+                       /*
+                        * If decap-nostrip appears before pop vlan, this pop
+                        * applies to the inner packet vlan. Turn it into
+                        * decap-strip.
+                        */
+                       decap = find_prev_action_op(fm, FMOP_DECAP_NOSTRIP);
+                       if (decap) {
+                               ENICPMD_LOG(DEBUG, "pop-vlan inner: decap-nostrip => decap-strip");
+                               decap->fa_op = FMOP_DECAP_STRIP;
+                               break;
+                       }
+                       memset(&fm_op, 0, sizeof(fm_op));
+                       fm_op.fa_op = FMOP_POP_VLAN;
+                       ret = enic_fm_append_action_op(fm, &fm_op, error);
+                       if (ret)
+                               return ret;
+                       break;
+               }
+               case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: {
+                       const struct rte_flow_action_of_push_vlan *vlan;
+
+                       if (overlap & PASSTHRU)
+                               goto unsupported;
+                       vlan = actions->conf;
+                       if (vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN)) {
+                               return rte_flow_error_set(error, EINVAL,
+                                       RTE_FLOW_ERROR_TYPE_ACTION,
+                                       NULL, "unexpected push_vlan ethertype");
+                       }
+                       overlap |= PUSH_VLAN;
+                       need_ovlan_action = true;
+                       break;
+               }
+               case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP: {
+                       const struct rte_flow_action_of_set_vlan_pcp *pcp;
+
+                       pcp = actions->conf;
+                       if (pcp->vlan_pcp > 7) {
+                               return rte_flow_error_set(error, EINVAL,
+                                       RTE_FLOW_ERROR_TYPE_ACTION,
+                                       NULL, "invalid vlan_pcp");
+                       }
+                       need_ovlan_action = true;
+                       ovlan |= ((uint16_t)pcp->vlan_pcp) << 13;
+                       break;
+               }
+               case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID: {
+                       const struct rte_flow_action_of_set_vlan_vid *vid;
+
+                       vid = actions->conf;
+                       need_ovlan_action = true;
+                       ovlan |= rte_be_to_cpu_16(vid->vlan_vid);
+                       break;
+               }
                default:
                        goto unsupported;
                }
        }
 
-       if (!(overlap & (FATE | PASSTHRU | COUNT)))
+       if (!(overlap & (FATE | PASSTHRU | COUNT | PORT_ID)))
                goto unsupported;
+       /* Egress from VF: need implicit WQ match */
+       if (enic_is_vf_rep(enic) && !ingress) {
+               fmt->ftm_data.fk_wq_id = 0;
+               fmt->ftm_mask.fk_wq_id = 0xffff;
+               fmt->ftm_data.fk_wq_vnic = enic->fm_vnic_handle;
+               ENICPMD_LOG(DEBUG, "add implicit wq id match for vf %d",
+                           VF_ENIC_TO_VF_REP(enic)->vf_id);
+       }
+       if (need_ovlan_action) {
+               memset(&fm_op, 0, sizeof(fm_op));
+               fm_op.fa_op = FMOP_SET_OVLAN;
+               fm_op.ovlan.vlan = ovlan;
+               ret = enic_fm_append_action_op(fm, &fm_op, error);
+               if (ret)
+                       return ret;
+       }
+       /* Add steer op for PORT_ID without QUEUE */
+       if ((overlap & PORT_ID) && !steer && ingress) {
+               memset(&fm_op, 0, sizeof(fm_op));
+               /* Always to queue 0 for now as generic RSS is not available */
+               fm_op.fa_op = FMOP_RQ_STEER;
+               fm_op.rq_steer.rq_index = 0;
+               fm_op.rq_steer.vnic_handle = vnic_h;
+               ret = enic_fm_append_action_op(fm, &fm_op, error);
+               if (ret)
+                       return ret;
+               ENICPMD_LOG(DEBUG, "add implicit steer op");
+       }
+       /* Add required END */
        memset(&fm_op, 0, sizeof(fm_op));
        fm_op.fa_op = FMOP_END;
        ret = enic_fm_append_action_op(fm, &fm_op, error);
@@ -1354,6 +1642,13 @@ enic_fm_dump_tcam_actions(const struct fm_action *fm_action)
                [FMOP_ENCAP] = "encap",
                [FMOP_SET_OVLAN] = "set_ovlan",
                [FMOP_DECAP_NOSTRIP] = "decap_nostrip",
+               [FMOP_DECAP_STRIP] = "decap_strip",
+               [FMOP_POP_VLAN] = "pop_vlan",
+               [FMOP_SET_EGPORT] = "set_egport",
+               [FMOP_RQ_STEER_ONLY] = "rq_steer_only",
+               [FMOP_SET_ENCAP_VLAN] = "set_encap_vlan",
+               [FMOP_EMIT] = "emit",
+               [FMOP_MODIFY] = "modify",
        };
        const struct fm_action_op *op = &fm_action->fma_action_ops[0];
        char buf[128], *bp = buf;
@@ -1488,9 +1783,10 @@ enic_fm_dump_tcam_match(const struct fm_tcam_match_entry *match,
        memset(buf, 0, sizeof(buf));
        __enic_fm_dump_tcam_match(&match->ftm_mask.fk_hdrset[0],
                                  buf, sizeof(buf));
-       ENICPMD_LOG(DEBUG, " TCAM %s Outer: %s %scounter",
+       ENICPMD_LOG(DEBUG, " TCAM %s Outer: %s %scounter position %u",
                    (ingress) ? "IG" : "EG", buf,
-                   (match->ftm_flags & FMEF_COUNTER) ? "" : "no ");
+                   (match->ftm_flags & FMEF_COUNTER) ? "" : "no ",
+                   match->ftm_position);
        memset(buf, 0, sizeof(buf));
        __enic_fm_dump_tcam_match(&match->ftm_mask.fk_hdrset[1],
                                  buf, sizeof(buf));
@@ -1537,13 +1833,13 @@ enic_fm_flow_parse(struct enic_flowman *fm,
        }
 
        if (attrs) {
-               if (attrs->priority) {
+               if (attrs->group != FM_TCAM_RTE_GROUP && attrs->priority) {
                        rte_flow_error_set(error, ENOTSUP,
                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
                                           NULL,
-                                          "priorities are not supported");
+                                          "priorities are not supported for non-default (0) groups");
                        return -rte_errno;
-               } else if (attrs->transfer) {
+               } else if (!fm->owner_enic->switchdev_mode && attrs->transfer) {
                        rte_flow_error_set(error, ENOTSUP,
                                           RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
                                           NULL,
@@ -1600,12 +1896,10 @@ enic_fm_more_counters(struct enic_flowman *fm)
 {
        struct enic_fm_counter *new_stack;
        struct enic_fm_counter *ctrs;
-       struct enic *enic;
        int i, rc;
        uint64_t args[2];
 
        ENICPMD_FUNC_TRACE();
-       enic = fm->enic;
        new_stack = rte_realloc(fm->counter_stack, (fm->counters_alloced +
                                FM_COUNTERS_EXPAND) *
                                sizeof(struct enic_fm_counter), 0);
@@ -1617,7 +1911,7 @@ enic_fm_more_counters(struct enic_flowman *fm)
 
        args[0] = FM_COUNTER_BRK;
        args[1] = fm->counters_alloced + FM_COUNTERS_EXPAND;
-       rc = vnic_dev_flowman_cmd(enic->vdev, args, 2);
+       rc = flowman_cmd(fm, args, 2);
        if (rc != 0) {
                ENICPMD_LOG(ERR, "cannot alloc counters rc=%d", rc);
                return rc;
@@ -1637,16 +1931,14 @@ enic_fm_more_counters(struct enic_flowman *fm)
 static int
 enic_fm_counter_zero(struct enic_flowman *fm, struct enic_fm_counter *c)
 {
-       struct enic *enic;
        uint64_t args[3];
        int ret;
 
        ENICPMD_FUNC_TRACE();
-       enic = fm->enic;
        args[0] = FM_COUNTER_QUERY;
        args[1] = c->handle;
        args[2] = 1; /* clear */
-       ret = vnic_dev_flowman_cmd(enic->vdev, args, 3);
+       ret = flowman_cmd(fm, args, 3);
        if (ret) {
                ENICPMD_LOG(ERR, "counter init: rc=%d handle=0x%x",
                            ret, c->handle);
@@ -1678,19 +1970,26 @@ enic_fm_counter_alloc(struct enic_flowman *fm, struct rte_flow_error *error,
 }
 
 static int
-enic_fm_action_free(struct enic_flowman *fm, uint64_t handle)
+enic_fm_action_free(struct enic_flowman *fm, struct enic_fm_action *ah)
 {
        uint64_t args[2];
-       int rc;
+       int ret = 0;
 
        ENICPMD_FUNC_TRACE();
-       args[0] = FM_ACTION_FREE;
-       args[1] = handle;
-       rc = vnic_dev_flowman_cmd(fm->enic->vdev, args, 2);
-       if (rc)
-               ENICPMD_LOG(ERR, "cannot free action: rc=%d handle=0x%" PRIx64,
-                           rc, handle);
-       return rc;
+       RTE_ASSERT(ah->ref > 0);
+       ah->ref--;
+       if (ah->ref == 0) {
+               args[0] = FM_ACTION_FREE;
+               args[1] = ah->handle;
+               ret = flowman_cmd(fm, args, 2);
+               if (ret)
+                       /* This is a "should never happen" error. */
+                       ENICPMD_LOG(ERR, "freeing action rc=%d handle=0x%"
+                                   PRIx64, ret, ah->handle);
+               rte_hash_del_key(fm->action_hash, (const void *)&ah->key);
+               free(ah);
+       }
+       return ret;
 }
 
 static int
@@ -1702,7 +2001,7 @@ enic_fm_entry_free(struct enic_flowman *fm, uint64_t handle)
        ENICPMD_FUNC_TRACE();
        args[0] = FM_MATCH_ENTRY_REMOVE;
        args[1] = handle;
-       rc = vnic_dev_flowman_cmd(fm->enic->vdev, args, 2);
+       rc = flowman_cmd(fm, args, 2);
        if (rc)
                ENICPMD_LOG(ERR, "cannot free match entry: rc=%d"
                            " handle=0x%" PRIx64, rc, handle);
@@ -1766,9 +2065,9 @@ __enic_fm_flow_free(struct enic_flowman *fm, struct enic_fm_flow *fm_flow)
                enic_fm_entry_free(fm, fm_flow->entry_handle);
                fm_flow->entry_handle = FM_INVALID_HANDLE;
        }
-       if (fm_flow->action_handle != FM_INVALID_HANDLE) {
-               enic_fm_action_free(fm, fm_flow->action_handle);
-               fm_flow->action_handle = FM_INVALID_HANDLE;
+       if (fm_flow->action != NULL) {
+               enic_fm_action_free(fm, fm_flow->action);
+               fm_flow->action = NULL;
        }
        enic_fm_counter_free(fm, fm_flow);
        if (fm_flow->fet) {
@@ -1780,9 +2079,15 @@ __enic_fm_flow_free(struct enic_flowman *fm, struct enic_fm_flow *fm_flow)
 static void
 enic_fm_flow_free(struct enic_flowman *fm, struct rte_flow *flow)
 {
+       struct enic_fm_flow *steer = flow->fm->hairpin_steer_flow;
+
        if (flow->fm->fet && flow->fm->fet->default_key)
                remove_jump_flow(fm, flow);
        __enic_fm_flow_free(fm, flow->fm);
+       if (steer) {
+               __enic_fm_flow_free(fm, steer);
+               free(steer);
+       }
        free(flow->fm);
        free(flow);
 }
@@ -1806,7 +2111,7 @@ enic_fm_add_tcam_entry(struct enic_flowman *fm,
        args[0] = FM_TCAM_ENTRY_INSTALL;
        args[1] = ingress ? fm->ig_tcam_hndl : fm->eg_tcam_hndl;
        args[2] = fm->cmd.pa;
-       ret = vnic_dev_flowman_cmd(fm->enic->vdev, args, 3);
+       ret = flowman_cmd(fm, args, 3);
        if (ret != 0) {
                ENICPMD_LOG(ERR, "cannot add %s TCAM entry: rc=%d",
                            ingress ? "ingress" : "egress", ret);
@@ -1856,7 +2161,7 @@ enic_fm_add_exact_entry(struct enic_flowman *fm,
        args[0] = FM_EXACT_ENTRY_INSTALL;
        args[1] = fet->handle;
        args[2] = fm->cmd.pa;
-       ret = vnic_dev_flowman_cmd(fm->enic->vdev, args, 3);
+       ret = flowman_cmd(fm, args, 3);
        if (ret != 0) {
                ENICPMD_LOG(ERR, "cannot add %s exact entry: group=%u",
                            fet->ingress ? "ingress" : "egress", fet->group);
@@ -1872,6 +2177,75 @@ enic_fm_add_exact_entry(struct enic_flowman *fm,
        return 0;
 }
 
+static int
+enic_action_handle_get(struct enic_flowman *fm, struct fm_action *action_in,
+                      struct rte_flow_error *error,
+                      struct enic_fm_action **ah_o)
+{
+       struct enic_fm_action *ah;
+       struct fm_action *fma;
+       uint64_t args[2];
+       int ret = 0;
+
+       ret = rte_hash_lookup_data(fm->action_hash, action_in,
+                                  (void **)&ah);
+       if (ret < 0 && ret != -ENOENT)
+               return rte_flow_error_set(error, -ret,
+                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                  NULL, "enic: rte_hash_lookup(aciton)");
+
+       if (ret == -ENOENT) {
+               /* Allocate a new action on the NIC. */
+               fma = &fm->cmd.va->fm_action;
+               memcpy(fma, action_in, sizeof(*fma));
+
+               ah = calloc(1, sizeof(*ah));
+               memcpy(&ah->key, action_in, sizeof(struct fm_action));
+               if (ah == NULL)
+                       return rte_flow_error_set(error, ENOMEM,
+                                          RTE_FLOW_ERROR_TYPE_HANDLE,
+                                          NULL, "enic: calloc(fm-action)");
+               args[0] = FM_ACTION_ALLOC;
+               args[1] = fm->cmd.pa;
+               ret = flowman_cmd(fm, args, 2);
+               if (ret != 0) {
+                       rte_flow_error_set(error, -ret,
+                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                          NULL, "enic: devcmd(action-alloc)");
+                       goto error_with_ah;
+               }
+               ah->handle = args[0];
+               ret = rte_hash_add_key_data(fm->action_hash,
+                                           (const void *)action_in,
+                                           (void *)ah);
+               if (ret != 0) {
+                       rte_flow_error_set(error, -ret,
+                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                          NULL,
+                                          "enic: rte_hash_add_key_data(actn)");
+                       goto error_with_action_handle;
+               }
+               ENICPMD_LOG(DEBUG, "action allocated: handle=0x%" PRIx64,
+                           ah->handle);
+       }
+
+       /* Action handle struct is valid, increment reference count. */
+       ah->ref++;
+       *ah_o = ah;
+       return 0;
+error_with_action_handle:
+       args[0] = FM_ACTION_FREE;
+       args[1] = ah->handle;
+       ret = flowman_cmd(fm, args, 2);
+       if (ret != 0)
+               rte_flow_error_set(error, -ret,
+                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                  NULL, "enic: devcmd(action-free)");
+error_with_ah:
+       free(ah);
+       return ret;
+}
+
 /* Push match-action to the NIC. */
 static int
 __enic_fm_flow_add_entry(struct enic_flowman *fm,
@@ -1883,29 +2257,18 @@ __enic_fm_flow_add_entry(struct enic_flowman *fm,
                         struct rte_flow_error *error)
 {
        struct enic_fm_counter *ctr;
-       struct fm_action *fma;
-       uint64_t action_h;
+       struct enic_fm_action *ah = NULL;
        uint64_t entry_h;
-       uint64_t args[3];
        int ret;
 
        ENICPMD_FUNC_TRACE();
-       /* Allocate action. */
-       fma = &fm->cmd.va->fm_action;
-       memcpy(fma, action_in, sizeof(*fma));
-       args[0] = FM_ACTION_ALLOC;
-       args[1] = fm->cmd.pa;
-       ret = vnic_dev_flowman_cmd(fm->enic->vdev, args, 2);
-       if (ret != 0) {
-               ENICPMD_LOG(ERR, "allocating TCAM table action rc=%d", ret);
-               rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
-                       NULL, "enic: devcmd(action-alloc)");
+
+       /* Get or create an aciton handle. */
+       ret = enic_action_handle_get(fm, action_in, error, &ah);
+       if (ret)
                return ret;
-       }
-       action_h = args[0];
-       fm_flow->action_handle = action_h;
-       match_in->ftm_action = action_h;
-       ENICPMD_LOG(DEBUG, "action allocated: handle=0x%" PRIx64, action_h);
+       match_in->ftm_action = ah->handle;
+       fm_flow->action = ah;
 
        /* Allocate counter if requested. */
        if (match_in->ftm_flags & FMEF_COUNTER) {
@@ -1967,6 +2330,7 @@ enic_fm_flow_add_entry(struct enic_flowman *fm,
        struct rte_flow *flow;
 
        ENICPMD_FUNC_TRACE();
+       match_in->ftm_position = attrs->priority;
        enic_fm_dump_tcam_entry(match_in, action_in, attrs->ingress);
        flow = calloc(1, sizeof(*flow));
        fm_flow = calloc(1, sizeof(*fm_flow));
@@ -1978,7 +2342,7 @@ enic_fm_flow_add_entry(struct enic_flowman *fm,
                return NULL;
        }
        flow->fm = fm_flow;
-       fm_flow->action_handle = FM_INVALID_HANDLE;
+       fm_flow->action = NULL;
        fm_flow->entry_handle = FM_INVALID_HANDLE;
        if (__enic_fm_flow_add_entry(fm, fm_flow, match_in, action_in,
                                     attrs->group, attrs->ingress, error)) {
@@ -2033,11 +2397,71 @@ convert_jump_flows(struct enic_flowman *fm, struct enic_fm_fet *fet,
        }
 }
 
+static int
+add_hairpin_steer(struct enic_flowman *fm, struct rte_flow *flow,
+                 struct rte_flow_error *error)
+{
+       struct fm_tcam_match_entry *fm_tcam_entry;
+       struct enic_fm_flow *fm_flow;
+       struct fm_action *fm_action;
+       struct fm_action_op fm_op;
+       int ret;
+
+       ENICPMD_FUNC_TRACE();
+       fm_flow = calloc(1, sizeof(*fm_flow));
+       if (fm_flow == NULL) {
+               rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+                       NULL, "enic: cannot allocate rte_flow");
+               return -ENOMEM;
+       }
+       /* Original egress hairpin flow */
+       fm_tcam_entry = &fm->tcam_entry;
+       fm_action = &fm->action;
+       /* Use the match pattern of the egress flow as is, without counters */
+       fm_tcam_entry->ftm_flags &= ~FMEF_COUNTER;
+       /* The only action is steer to vnic */
+       fm->action_op_count = 0;
+       memset(fm_action, 0, sizeof(*fm_action));
+       memset(&fm_op, 0, sizeof(fm_op));
+       /* Always to queue 0 for now */
+       fm_op.fa_op = FMOP_RQ_STEER;
+       fm_op.rq_steer.rq_index = 0;
+       fm_op.rq_steer.vnic_handle = fm->hairpin_steer_vnic_h;
+       ret = enic_fm_append_action_op(fm, &fm_op, error);
+       if (ret)
+               goto error_with_flow;
+       ENICPMD_LOG(DEBUG, "add steer op");
+       /* Add required END */
+       memset(&fm_op, 0, sizeof(fm_op));
+       fm_op.fa_op = FMOP_END;
+       ret = enic_fm_append_action_op(fm, &fm_op, error);
+       if (ret)
+               goto error_with_flow;
+       /* Add the ingress flow */
+       fm_flow->action = NULL;
+       fm_flow->entry_handle = FM_INVALID_HANDLE;
+       ret = __enic_fm_flow_add_entry(fm, fm_flow, fm_tcam_entry, fm_action,
+                                      FM_TCAM_RTE_GROUP, 1 /* ingress */, error);
+       if (ret) {
+               ENICPMD_LOG(ERR, "cannot add hairpin-steer flow");
+               goto error_with_flow;
+       }
+       /* The new flow is now the egress flow's paired flow */
+       flow->fm->hairpin_steer_flow = fm_flow;
+       return 0;
+
+error_with_flow:
+       free(fm_flow);
+       return ret;
+}
+
 static void
 enic_fm_open_scratch(struct enic_flowman *fm)
 {
        fm->action_op_count = 0;
        fm->fet = NULL;
+       fm->need_hairpin_steer = 0;
+       fm->hairpin_steer_vnic_h = 0;
        memset(&fm->tcam_entry, 0, sizeof(fm->tcam_entry));
        memset(&fm->action, 0, sizeof(fm->action));
 }
@@ -2065,7 +2489,7 @@ enic_fm_flow_validate(struct rte_eth_dev *dev,
        int ret;
 
        ENICPMD_FUNC_TRACE();
-       fm = pmd_priv(dev)->fm;
+       fm = begin_fm(pmd_priv(dev));
        if (fm == NULL)
                return -ENOTSUP;
        enic_fm_open_scratch(fm);
@@ -2077,6 +2501,7 @@ enic_fm_flow_validate(struct rte_eth_dev *dev,
                                        attrs->ingress);
        }
        enic_fm_close_scratch(fm);
+       end_fm(fm);
        return ret;
 }
 
@@ -2087,33 +2512,38 @@ enic_fm_flow_query_count(struct rte_eth_dev *dev,
 {
        struct rte_flow_query_count *query;
        struct enic_fm_flow *fm_flow;
-       struct enic *enic;
+       struct enic_flowman *fm;
        uint64_t args[3];
        int rc;
 
        ENICPMD_FUNC_TRACE();
-       enic = pmd_priv(dev);
+       fm = begin_fm(pmd_priv(dev));
        query = data;
        fm_flow = flow->fm;
-       if (!fm_flow->counter_valid)
-               return rte_flow_error_set(error, ENOTSUP,
+       if (!fm_flow->counter_valid) {
+               rc = rte_flow_error_set(error, ENOTSUP,
                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
                        "enic: flow does not have counter");
+               goto exit;
+       }
 
        args[0] = FM_COUNTER_QUERY;
        args[1] = fm_flow->counter->handle;
        args[2] = query->reset;
-       rc = vnic_dev_flowman_cmd(enic->vdev, args, 3);
+       rc = flowman_cmd(fm, args, 3);
        if (rc) {
                ENICPMD_LOG(ERR, "cannot query counter: rc=%d handle=0x%x",
                            rc, fm_flow->counter->handle);
-               return rc;
+               goto exit;
        }
        query->hits_set = 1;
        query->hits = args[0];
        query->bytes_set = 1;
        query->bytes = args[1];
-       return 0;
+       rc = 0;
+exit:
+       end_fm(fm);
+       return rc;
 }
 
 static int
@@ -2162,7 +2592,7 @@ enic_fm_flow_create(struct rte_eth_dev *dev,
 
        ENICPMD_FUNC_TRACE();
        enic = pmd_priv(dev);
-       fm = enic->fm;
+       fm = begin_fm(enic);
        if (fm == NULL) {
                rte_flow_error_set(error, ENOTSUP,
                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
@@ -2179,6 +2609,15 @@ enic_fm_flow_create(struct rte_eth_dev *dev,
        flow = enic_fm_flow_add_entry(fm, fm_tcam_entry, fm_action,
                                      attrs, error);
        if (flow) {
+               /* Add ingress rule that pairs with hairpin rule */
+               if (fm->need_hairpin_steer) {
+                       ret = add_hairpin_steer(fm, flow, error);
+                       if (ret) {
+                               enic_fm_flow_free(fm, flow);
+                               flow = NULL;
+                               goto error_with_scratch;
+                       }
+               }
                LIST_INSERT_HEAD(&enic->flows, flow, next);
                fet = flow->fm->fet;
                if (fet && fet->default_key) {
@@ -2200,6 +2639,7 @@ enic_fm_flow_create(struct rte_eth_dev *dev,
 
 error_with_scratch:
        enic_fm_close_scratch(fm);
+       end_fm(fm);
        return flow;
 }
 
@@ -2208,12 +2648,15 @@ enic_fm_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
                     __rte_unused struct rte_flow_error *error)
 {
        struct enic *enic = pmd_priv(dev);
+       struct enic_flowman *fm;
 
        ENICPMD_FUNC_TRACE();
-       if (enic->fm == NULL)
+       fm = begin_fm(enic);
+       if (fm == NULL)
                return 0;
        LIST_REMOVE(flow, next);
-       enic_fm_flow_free(enic->fm, flow);
+       enic_fm_flow_free(fm, flow);
+       end_fm(fm);
        return 0;
 }
 
@@ -2221,19 +2664,27 @@ static int
 enic_fm_flow_flush(struct rte_eth_dev *dev,
                   __rte_unused struct rte_flow_error *error)
 {
+       LIST_HEAD(enic_flows, rte_flow) internal;
        struct enic_fm_flow *fm_flow;
        struct enic_flowman *fm;
        struct rte_flow *flow;
        struct enic *enic = pmd_priv(dev);
 
        ENICPMD_FUNC_TRACE();
-       if (enic->fm == NULL)
+
+       fm = begin_fm(enic);
+       if (fm == NULL)
                return 0;
-       fm = enic->fm;
+       /* Destroy all non-internal flows */
+       LIST_INIT(&internal);
        while (!LIST_EMPTY(&enic->flows)) {
                flow = LIST_FIRST(&enic->flows);
                fm_flow = flow->fm;
                LIST_REMOVE(flow, next);
+               if (flow->internal) {
+                       LIST_INSERT_HEAD(&internal, flow, next);
+                       continue;
+               }
                /*
                 * If tables are null, then vNIC is closing, and the firmware
                 * has already cleaned up flowman state. So do not try to free
@@ -2241,11 +2692,17 @@ enic_fm_flow_flush(struct rte_eth_dev *dev,
                 */
                if (fm->ig_tcam_hndl == FM_INVALID_HANDLE) {
                        fm_flow->entry_handle = FM_INVALID_HANDLE;
-                       fm_flow->action_handle = FM_INVALID_HANDLE;
+                       fm_flow->action = NULL;
                        fm_flow->fet = NULL;
                }
                enic_fm_flow_free(fm, flow);
        }
+       while (!LIST_EMPTY(&internal)) {
+               flow = LIST_FIRST(&internal);
+               LIST_REMOVE(flow, next);
+               LIST_INSERT_HEAD(&enic->flows, flow, next);
+       }
+       end_fm(fm);
        return 0;
 }
 
@@ -2257,7 +2714,7 @@ enic_fm_tbl_free(struct enic_flowman *fm, uint64_t handle)
 
        args[0] = FM_MATCH_TABLE_FREE;
        args[1] = handle;
-       rc = vnic_dev_flowman_cmd(fm->enic->vdev, args, 2);
+       rc = flowman_cmd(fm, args, 2);
        if (rc)
                ENICPMD_LOG(ERR, "cannot free table: rc=%d handle=0x%" PRIx64,
                            rc, handle);
@@ -2269,19 +2726,17 @@ enic_fm_tcam_tbl_alloc(struct enic_flowman *fm, uint32_t direction,
                        uint32_t max_entries, uint64_t *handle)
 {
        struct fm_tcam_match_table *tcam_tbl;
-       struct enic *enic;
        uint64_t args[2];
        int rc;
 
        ENICPMD_FUNC_TRACE();
-       enic = fm->enic;
        tcam_tbl = &fm->cmd.va->fm_tcam_match_table;
        tcam_tbl->ftt_direction = direction;
        tcam_tbl->ftt_stage = FM_STAGE_LAST;
        tcam_tbl->ftt_max_entries = max_entries;
        args[0] = FM_TCAM_TABLE_ALLOC;
        args[1] = fm->cmd.pa;
-       rc = vnic_dev_flowman_cmd(enic->vdev, args, 2);
+       rc = flowman_cmd(fm, args, 2);
        if (rc) {
                ENICPMD_LOG(ERR, "cannot alloc %s TCAM table: rc=%d",
                            (direction == FM_INGRESS) ? "IG" : "EG", rc);
@@ -2293,6 +2748,31 @@ enic_fm_tcam_tbl_alloc(struct enic_flowman *fm, uint32_t direction,
        return 0;
 }
 
+static int
+enic_fm_init_actions(struct enic_flowman *fm)
+{
+       struct rte_hash *a_hash;
+       char name[RTE_HASH_NAMESIZE];
+       struct rte_hash_parameters params = {
+               .entries = FM_MAX_ACTION_TABLE_SIZE,
+               .key_len = sizeof(struct fm_action),
+               .hash_func = rte_jhash,
+               .hash_func_init_val = 0,
+               .socket_id = rte_socket_id(),
+       };
+
+       ENICPMD_FUNC_TRACE();
+       snprintf((char *)name, sizeof(name), "fm-ah-%s",
+                fm->owner_enic->bdf_name);
+       params.name = name;
+
+       a_hash = rte_hash_create(&params);
+       if (a_hash == NULL)
+               return -rte_errno;
+       fm->action_hash = a_hash;
+       return 0;
+}
+
 static int
 enic_fm_init_counters(struct enic_flowman *fm)
 {
@@ -2304,14 +2784,12 @@ enic_fm_init_counters(struct enic_flowman *fm)
 static void
 enic_fm_free_all_counters(struct enic_flowman *fm)
 {
-       struct enic *enic;
        uint64_t args[2];
        int rc;
 
-       enic = fm->enic;
        args[0] = FM_COUNTER_BRK;
        args[1] = 0;
-       rc = vnic_dev_flowman_cmd(enic->vdev, args, 2);
+       rc = flowman_cmd(fm, args, 2);
        if (rc != 0)
                ENICPMD_LOG(ERR, "cannot free counters: rc=%d", rc);
        rte_free(fm->counter_stack);
@@ -2353,6 +2831,7 @@ enic_fm_free_tcam_tables(struct enic_flowman *fm)
 int
 enic_fm_init(struct enic *enic)
 {
+       const struct rte_pci_addr *addr;
        struct enic_flowman *fm;
        uint8_t name[RTE_MEMZONE_NAMESIZE];
        int rc;
@@ -2360,12 +2839,30 @@ enic_fm_init(struct enic *enic)
        if (enic->flow_filter_mode != FILTER_FLOWMAN)
                return 0;
        ENICPMD_FUNC_TRACE();
+       /* Get vnic handle and save for port-id action */
+       if (enic_is_vf_rep(enic))
+               addr = &VF_ENIC_TO_VF_REP(enic)->bdf;
+       else
+               addr = &RTE_ETH_DEV_TO_PCI(enic->rte_dev)->addr;
+       rc = enic_fm_find_vnic(enic, addr, &enic->fm_vnic_handle);
+       if (rc) {
+               ENICPMD_LOG(ERR, "cannot find vnic handle for %x:%x:%x",
+                           addr->bus, addr->devid, addr->function);
+               return rc;
+       }
+       /* Save UIF for egport action */
+       enic->fm_vnic_uif = vnic_dev_uif(enic->vdev);
+       ENICPMD_LOG(DEBUG, "uif %u", enic->fm_vnic_uif);
+       /* Nothing else to do for representor. It will share the PF flowman */
+       if (enic_is_vf_rep(enic))
+               return 0;
        fm = calloc(1, sizeof(*fm));
        if (fm == NULL) {
                ENICPMD_LOG(ERR, "cannot alloc flowman struct");
                return -ENOMEM;
        }
-       fm->enic = enic;
+       fm->owner_enic = enic;
+       rte_spinlock_init(&fm->lock);
        TAILQ_INIT(&fm->fet_list);
        TAILQ_INIT(&fm->jump_list);
        /* Allocate host memory for flowman commands */
@@ -2389,6 +2886,12 @@ enic_fm_init(struct enic *enic)
                ENICPMD_LOG(ERR, "cannot alloc counters");
                goto error_tables;
        }
+       /* set up action handle hash */
+       rc = enic_fm_init_actions(fm);
+       if (rc) {
+               ENICPMD_LOG(ERR, "cannot create action hash, error:%d", rc);
+               goto error_counters;
+       }
        /*
         * One default exact match table for each direction. We hold onto
         * it until close.
@@ -2396,7 +2899,7 @@ enic_fm_init(struct enic *enic)
        rc = enic_fet_alloc(fm, 1, NULL, 128, &fm->default_ig_fet);
        if (rc) {
                ENICPMD_LOG(ERR, "cannot alloc default IG exact match table");
-               goto error_counters;
+               goto error_actions;
        }
        fm->default_ig_fet->ref = 1;
        rc = enic_fet_alloc(fm, 0, NULL, 128, &fm->default_eg_fet);
@@ -2405,11 +2908,14 @@ enic_fm_init(struct enic *enic)
                goto error_ig_fet;
        }
        fm->default_eg_fet->ref = 1;
+       fm->vf_rep_tag = FM_VF_REP_TAG;
        enic->fm = fm;
        return 0;
 
 error_ig_fet:
        enic_fet_free(fm, fm->default_ig_fet);
+error_actions:
+       rte_hash_free(fm->action_hash);
 error_counters:
        enic_fm_free_all_counters(fm);
 error_tables:
@@ -2428,10 +2934,15 @@ enic_fm_destroy(struct enic *enic)
        struct enic_flowman *fm;
        struct enic_fm_fet *fet;
 
+       ENICPMD_FUNC_TRACE();
+       if (enic_is_vf_rep(enic)) {
+               delete_rep_flows(enic);
+               return;
+       }
        if (enic->fm == NULL)
                return;
-       ENICPMD_FUNC_TRACE();
        fm = enic->fm;
+       enic_fm_flow_flush(enic->rte_dev, NULL);
        enic_fet_free(fm, fm->default_eg_fet);
        enic_fet_free(fm, fm->default_ig_fet);
        /* Free all exact match tables still open */
@@ -2441,6 +2952,7 @@ enic_fm_destroy(struct enic *enic)
        }
        enic_fm_free_tcam_tables(fm);
        enic_fm_free_all_counters(fm);
+       rte_hash_free(fm->action_hash);
        enic_free_consistent(enic, sizeof(union enic_flowman_cmd_mem),
                fm->cmd.va, fm->cmd.pa);
        fm->cmd.va = NULL;
@@ -2448,6 +2960,58 @@ enic_fm_destroy(struct enic *enic)
        enic->fm = NULL;
 }
 
+int
+enic_fm_allocate_switch_domain(struct enic *pf)
+{
+       const struct rte_pci_addr *cur_a, *prev_a;
+       struct rte_eth_dev *dev;
+       struct enic *cur, *prev;
+       uint16_t domain_id;
+       uint64_t vnic_h;
+       uint16_t pid;
+       int ret;
+
+       ENICPMD_FUNC_TRACE();
+       if (enic_is_vf_rep(pf))
+               return -EINVAL;
+       cur = pf;
+       cur_a = &RTE_ETH_DEV_TO_PCI(cur->rte_dev)->addr;
+       /* Go through ports and find another PF that is on the same adapter */
+       RTE_ETH_FOREACH_DEV(pid) {
+               dev = &rte_eth_devices[pid];
+               if (!dev_is_enic(dev))
+                       continue;
+               if (dev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)
+                       continue;
+               if (dev == cur->rte_dev)
+                       continue;
+               /* dev is another PF. Is it on the same adapter? */
+               prev = pmd_priv(dev);
+               prev_a = &RTE_ETH_DEV_TO_PCI(dev)->addr;
+               if (!enic_fm_find_vnic(cur, prev_a, &vnic_h)) {
+                       ENICPMD_LOG(DEBUG, "Port %u (PF BDF %x:%x:%x) and port %u (PF BDF %x:%x:%x domain %u) are on the same VIC",
+                               cur->rte_dev->data->port_id,
+                               cur_a->bus, cur_a->devid, cur_a->function,
+                               dev->data->port_id,
+                               prev_a->bus, prev_a->devid, prev_a->function,
+                               prev->switch_domain_id);
+                       cur->switch_domain_id = prev->switch_domain_id;
+                       return 0;
+               }
+       }
+       ret = rte_eth_switch_domain_alloc(&domain_id);
+       if (ret) {
+               ENICPMD_LOG(WARNING, "failed to allocate switch domain for device %d",
+                           ret);
+       }
+       cur->switch_domain_id = domain_id;
+       ENICPMD_LOG(DEBUG, "Port %u (PF BDF %x:%x:%x) is the 1st PF on the VIC. Allocated switch domain id %u",
+                   cur->rte_dev->data->port_id,
+                   cur_a->bus, cur_a->devid, cur_a->function,
+                   domain_id);
+       return ret;
+}
+
 const struct rte_flow_ops enic_fm_flow_ops = {
        .validate = enic_fm_flow_validate,
        .create = enic_fm_flow_create,
@@ -2455,3 +3019,237 @@ const struct rte_flow_ops enic_fm_flow_ops = {
        .flush = enic_fm_flow_flush,
        .query = enic_fm_flow_query,
 };
+
+/* Add a high priority flow that loops representor packets to VF */
+int
+enic_fm_add_rep2vf_flow(struct enic_vf_representor *vf)
+{
+       struct fm_tcam_match_entry *fm_tcam_entry;
+       struct rte_flow *flow0, *flow1;
+       struct fm_action *fm_action;
+       struct rte_flow_error error;
+       struct rte_flow_attr attrs;
+       struct fm_action_op fm_op;
+       struct enic_flowman *fm;
+       struct enic *pf;
+       uint8_t tag;
+
+       pf = vf->pf;
+       fm = pf->fm;
+       tag = fm->vf_rep_tag;
+       enic_fm_open_scratch(fm);
+       fm_tcam_entry = &fm->tcam_entry;
+       fm_action = &fm->action;
+       /* Egress rule: match WQ ID and tag+hairpin */
+       fm_tcam_entry->ftm_data.fk_wq_id = vf->pf_wq_idx;
+       fm_tcam_entry->ftm_mask.fk_wq_id = 0xffff;
+       fm_tcam_entry->ftm_flags |= FMEF_COUNTER;
+       memset(&fm_op, 0, sizeof(fm_op));
+       fm_op.fa_op = FMOP_TAG;
+       fm_op.tag.tag = tag;
+       enic_fm_append_action_op(fm, &fm_op, &error);
+       memset(&fm_op, 0, sizeof(fm_op));
+       fm_op.fa_op = FMOP_EG_HAIRPIN;
+       enic_fm_append_action_op(fm, &fm_op, &error);
+       memset(&fm_op, 0, sizeof(fm_op));
+       fm_op.fa_op = FMOP_END;
+       enic_fm_append_action_op(fm, &fm_op, &error);
+       attrs.group = 0;
+       attrs.ingress = 0;
+       attrs.egress = 1;
+       attrs.priority = FM_HIGHEST_PRIORITY;
+       flow0 = enic_fm_flow_add_entry(fm, fm_tcam_entry, fm_action,
+                                      &attrs, &error);
+       enic_fm_close_scratch(fm);
+       if (flow0 == NULL) {
+               ENICPMD_LOG(ERR, "Cannot create flow 0 for representor->VF");
+               return -EINVAL;
+       }
+       LIST_INSERT_HEAD(&pf->flows, flow0, next);
+       /* Make this flow internal, so the user app cannot delete it */
+       flow0->internal = 1;
+       ENICPMD_LOG(DEBUG, "representor->VF %d flow created: wq %d -> tag %d hairpin",
+                   vf->vf_id, vf->pf_wq_idx, tag);
+
+       /* Ingress: steer hairpinned to VF RQ 0 */
+       enic_fm_open_scratch(fm);
+       fm_tcam_entry->ftm_flags |= FMEF_COUNTER;
+       fm_tcam_entry->ftm_data.fk_hdrset[0].fk_metadata |= FKM_EG_HAIRPINNED;
+       fm_tcam_entry->ftm_mask.fk_hdrset[0].fk_metadata |= FKM_EG_HAIRPINNED;
+       fm_tcam_entry->ftm_data.fk_packet_tag = tag;
+       fm_tcam_entry->ftm_mask.fk_packet_tag = 0xff;
+       memset(&fm_op, 0, sizeof(fm_op));
+       fm_op.fa_op = FMOP_RQ_STEER;
+       fm_op.rq_steer.rq_index = 0;
+       fm_op.rq_steer.vnic_handle = vf->enic.fm_vnic_handle;
+       enic_fm_append_action_op(fm, &fm_op, &error);
+       memset(&fm_op, 0, sizeof(fm_op));
+       fm_op.fa_op = FMOP_END;
+       enic_fm_append_action_op(fm, &fm_op, &error);
+       attrs.group = 0;
+       attrs.ingress = 1;
+       attrs.egress = 0;
+       attrs.priority = FM_HIGHEST_PRIORITY;
+       flow1 = enic_fm_flow_add_entry(fm, fm_tcam_entry, fm_action,
+                                      &attrs, &error);
+       enic_fm_close_scratch(fm);
+       if (flow1 == NULL) {
+               ENICPMD_LOG(ERR, "Cannot create flow 1 for representor->VF");
+               enic_fm_flow_destroy(pf->rte_dev, flow0, &error);
+               return -EINVAL;
+       }
+       LIST_INSERT_HEAD(&pf->flows, flow1, next);
+       flow1->internal = 1;
+       ENICPMD_LOG(DEBUG, "representor->VF %d flow created: tag %d hairpinned -> VF RQ %d",
+                   vf->vf_id, tag, fm_op.rq_steer.rq_index);
+       vf->rep2vf_flow[0] = flow0;
+       vf->rep2vf_flow[1] = flow1;
+       /* Done with this tag, use a different one next time */
+       fm->vf_rep_tag++;
+       return 0;
+}
+
+/*
+ * Add a low priority flow that matches all packets from VF and loops them
+ * back to the representor.
+ */
+int
+enic_fm_add_vf2rep_flow(struct enic_vf_representor *vf)
+{
+       struct fm_tcam_match_entry *fm_tcam_entry;
+       struct rte_flow *flow0, *flow1;
+       struct fm_action *fm_action;
+       struct rte_flow_error error;
+       struct rte_flow_attr attrs;
+       struct fm_action_op fm_op;
+       struct enic_flowman *fm;
+       struct enic *pf;
+       uint8_t tag;
+
+       pf = vf->pf;
+       fm = pf->fm;
+       tag = fm->vf_rep_tag;
+       enic_fm_open_scratch(fm);
+       fm_tcam_entry = &fm->tcam_entry;
+       fm_action = &fm->action;
+       /* Egress rule: match-any and tag+hairpin */
+       fm_tcam_entry->ftm_data.fk_wq_id = 0;
+       fm_tcam_entry->ftm_mask.fk_wq_id = 0xffff;
+       fm_tcam_entry->ftm_data.fk_wq_vnic = vf->enic.fm_vnic_handle;
+       fm_tcam_entry->ftm_flags |= FMEF_COUNTER;
+       memset(&fm_op, 0, sizeof(fm_op));
+       fm_op.fa_op = FMOP_TAG;
+       fm_op.tag.tag = tag;
+       enic_fm_append_action_op(fm, &fm_op, &error);
+       memset(&fm_op, 0, sizeof(fm_op));
+       fm_op.fa_op = FMOP_EG_HAIRPIN;
+       enic_fm_append_action_op(fm, &fm_op, &error);
+       memset(&fm_op, 0, sizeof(fm_op));
+       fm_op.fa_op = FMOP_END;
+       enic_fm_append_action_op(fm, &fm_op, &error);
+       attrs.group = 0;
+       attrs.ingress = 0;
+       attrs.egress = 1;
+       attrs.priority = FM_LOWEST_PRIORITY;
+       flow0 = enic_fm_flow_add_entry(fm, fm_tcam_entry, fm_action,
+                                      &attrs, &error);
+       enic_fm_close_scratch(fm);
+       if (flow0 == NULL) {
+               ENICPMD_LOG(ERR, "Cannot create flow 0 for VF->representor");
+               return -EINVAL;
+       }
+       LIST_INSERT_HEAD(&pf->flows, flow0, next);
+       /* Make this flow internal, so the user app cannot delete it */
+       flow0->internal = 1;
+       ENICPMD_LOG(DEBUG, "VF %d->representor flow created: wq %d (low prio) -> tag %d hairpin",
+                   vf->vf_id, fm_tcam_entry->ftm_data.fk_wq_id, tag);
+
+       /* Ingress: steer hairpinned to VF rep RQ */
+       enic_fm_open_scratch(fm);
+       fm_tcam_entry->ftm_flags |= FMEF_COUNTER;
+       fm_tcam_entry->ftm_data.fk_hdrset[0].fk_metadata |= FKM_EG_HAIRPINNED;
+       fm_tcam_entry->ftm_mask.fk_hdrset[0].fk_metadata |= FKM_EG_HAIRPINNED;
+       fm_tcam_entry->ftm_data.fk_packet_tag = tag;
+       fm_tcam_entry->ftm_mask.fk_packet_tag = 0xff;
+       memset(&fm_op, 0, sizeof(fm_op));
+       fm_op.fa_op = FMOP_RQ_STEER;
+       fm_op.rq_steer.rq_index = vf->pf_rq_sop_idx;
+       fm_op.rq_steer.vnic_handle = pf->fm_vnic_handle;
+       enic_fm_append_action_op(fm, &fm_op, &error);
+       memset(&fm_op, 0, sizeof(fm_op));
+       fm_op.fa_op = FMOP_END;
+       enic_fm_append_action_op(fm, &fm_op, &error);
+       attrs.group = 0;
+       attrs.ingress = 1;
+       attrs.egress = 0;
+       attrs.priority = FM_HIGHEST_PRIORITY;
+       flow1 = enic_fm_flow_add_entry(fm, fm_tcam_entry, fm_action,
+                                      &attrs, &error);
+       enic_fm_close_scratch(fm);
+       if (flow1 == NULL) {
+               ENICPMD_LOG(ERR, "Cannot create flow 1 for VF->representor");
+               enic_fm_flow_destroy(pf->rte_dev, flow0, &error);
+               return -EINVAL;
+       }
+       LIST_INSERT_HEAD(&pf->flows, flow1, next);
+       flow1->internal = 1;
+       ENICPMD_LOG(DEBUG, "VF %d->representor flow created: tag %d hairpinned -> PF RQ %d",
+                   vf->vf_id, tag, vf->pf_rq_sop_idx);
+       vf->vf2rep_flow[0] = flow0;
+       vf->vf2rep_flow[1] = flow1;
+       /* Done with this tag, use a different one next time */
+       fm->vf_rep_tag++;
+       return 0;
+}
+
+/* Destroy representor flows created by enic_fm_add_{rep2vf,vf2rep}_flow */
+static void
+delete_rep_flows(struct enic *enic)
+{
+       struct enic_vf_representor *vf;
+       struct rte_flow_error error;
+       struct rte_eth_dev *dev;
+       uint32_t i;
+
+       RTE_ASSERT(enic_is_vf_rep(enic));
+       vf = VF_ENIC_TO_VF_REP(enic);
+       dev = vf->pf->rte_dev;
+       for (i = 0; i < ARRAY_SIZE(vf->vf2rep_flow); i++) {
+               if (vf->vf2rep_flow[i])
+                       enic_fm_flow_destroy(dev, vf->vf2rep_flow[i], &error);
+       }
+       for (i = 0; i < ARRAY_SIZE(vf->rep2vf_flow); i++) {
+               if (vf->rep2vf_flow[i])
+                       enic_fm_flow_destroy(dev, vf->rep2vf_flow[i], &error);
+       }
+}
+
+static struct enic_flowman *
+begin_fm(struct enic *enic)
+{
+       struct enic_vf_representor *vf;
+       struct enic_flowman *fm;
+
+       /* Representor uses PF flowman */
+       if (enic_is_vf_rep(enic)) {
+               vf = VF_ENIC_TO_VF_REP(enic);
+               fm = vf->pf->fm;
+       } else {
+               fm = enic->fm;
+       }
+       /* Save the API caller and lock if representors exist */
+       if (fm) {
+               if (fm->owner_enic->switchdev_mode)
+                       rte_spinlock_lock(&fm->lock);
+               fm->user_enic = enic;
+       }
+       return fm;
+}
+
+static void
+end_fm(struct enic_flowman *fm)
+{
+       fm->user_enic = NULL;
+       if (fm->owner_enic->switchdev_mode)
+               rte_spinlock_unlock(&fm->lock);
+}