net/enic: enable flow API for VF representor
[dpdk.git] / drivers / net / enic / enic_fm_flow.c
index c0ddfe9..e299b32 100644 (file)
 
 #define FM_INVALID_HANDLE 0
 
+/* Low priority used for implicit VF -> representor flow */
+#define FM_LOWEST_PRIORITY 100000
+
+/* High priority used for implicit representor -> VF flow */
+#define FM_HIGHEST_PRIORITY 0
+
+/* Tag used for implicit VF <-> representor flows */
+#define FM_VF_REP_TAG 1
+
 /*
  * Flow exact match tables (FET) in the VIC and rte_flow groups.
  * Use a simple scheme to map groups to tables.
@@ -110,8 +119,20 @@ union enic_flowman_cmd_mem {
        struct fm_action fm_action;
 };
 
+/*
+ * PF has a flowman instance, and VF representors share it with PF.
+ * PF allocates this structure and owns it. VF representors borrow
+ * the PF's structure during API calls (e.g. create, query).
+ */
 struct enic_flowman {
-       struct enic *enic;
+       struct enic *owner_enic; /* PF */
+       struct enic *user_enic;  /* API caller (PF or representor) */
+       /*
+        * Representors and PF share the same underlying flowman.
+        * Lock API calls to serialize accesses from them. Only used
+        * when VF representors are present.
+        */
+       rte_spinlock_t lock;
        /* Command buffer */
        struct {
                union enic_flowman_cmd_mem *va;
@@ -143,9 +164,20 @@ struct enic_flowman {
        struct fm_action action;
        struct fm_action action_tmp; /* enic_fm_reorder_action_op */
        int action_op_count;
+       /* Tags used for representor flows */
+       uint8_t vf_rep_tag;
 };
 
 static int enic_fm_tbl_free(struct enic_flowman *fm, uint64_t handle);
+/*
+ * API functions (create, destroy, validate, flush) call begin_fm()
+ * upon entering to save the caller enic (PF or VF representor) and
+ * lock. Upon exit, they call end_fm() to unlock.
+ */
+static struct enic_flowman *begin_fm(struct enic *enic);
+static void end_fm(struct enic_flowman *fm);
+/* Delete internal flows created for representor paths */
+static void delete_rep_flows(struct enic *enic);
 
 /*
  * Common arguments passed to copy_item functions. Use this structure
@@ -193,6 +225,7 @@ static const enum rte_flow_action_type enic_fm_supported_ig_actions[] = {
        RTE_FLOW_ACTION_TYPE_FLAG,
        RTE_FLOW_ACTION_TYPE_JUMP,
        RTE_FLOW_ACTION_TYPE_MARK,
+       RTE_FLOW_ACTION_TYPE_OF_POP_VLAN,
        RTE_FLOW_ACTION_TYPE_PORT_ID,
        RTE_FLOW_ACTION_TYPE_PASSTHRU,
        RTE_FLOW_ACTION_TYPE_QUEUE,
@@ -208,6 +241,9 @@ static const enum rte_flow_action_type enic_fm_supported_eg_actions[] = {
        RTE_FLOW_ACTION_TYPE_COUNT,
        RTE_FLOW_ACTION_TYPE_DROP,
        RTE_FLOW_ACTION_TYPE_JUMP,
+       RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN,
+       RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP,
+       RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID,
        RTE_FLOW_ACTION_TYPE_PASSTHRU,
        RTE_FLOW_ACTION_TYPE_VOID,
        RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP,
@@ -623,6 +659,12 @@ enic_fm_copy_item_raw(struct copy_item_args *arg)
        return 0;
 }
 
+static int
+flowman_cmd(struct enic_flowman *fm, uint64_t *args, int nargs)
+{
+       return vnic_dev_flowman_cmd(fm->owner_enic->vdev, args, nargs);
+}
+
 static int
 enic_fet_alloc(struct enic_flowman *fm, uint8_t ingress,
               struct fm_key_template *key, int entries,
@@ -661,7 +703,7 @@ enic_fet_alloc(struct enic_flowman *fm, uint8_t ingress,
 
        args[0] = FM_EXACT_TABLE_ALLOC;
        args[1] = fm->cmd.pa;
-       ret = vnic_dev_flowman_cmd(fm->enic->vdev, args, 2);
+       ret = flowman_cmd(fm, args, 2);
        if (ret) {
                ENICPMD_LOG(ERR, "cannot alloc exact match table: rc=%d", ret);
                free(fet);
@@ -870,46 +912,36 @@ enic_fm_append_action_op(struct enic_flowman *fm,
        return 0;
 }
 
-/* Steer operations need to appear before other ops */
+/* NIC requires that 1st steer appear before decap.
+ * Correct example: steer, decap, steer, steer, ...
+ */
 static void
 enic_fm_reorder_action_op(struct enic_flowman *fm)
 {
-       struct fm_action_op *dst, *dst_head, *src, *src_head;
+       struct fm_action_op *op, *steer, *decap;
+       struct fm_action_op tmp_op;
 
        ENICPMD_FUNC_TRACE();
-       /* Move steer ops to the front. */
-       src = fm->action.fma_action_ops;
-       src_head = src;
-       dst = fm->action_tmp.fma_action_ops;
-       dst_head = dst;
-       /* Copy steer ops to tmp */
-       while (src->fa_op != FMOP_END) {
-               if (src->fa_op == FMOP_RQ_STEER) {
-                       ENICPMD_LOG(DEBUG, "move op: %ld -> dst %ld",
-                                   (long)(src - src_head),
-                                   (long)(dst - dst_head));
-                       *dst = *src;
-                       dst++;
-               }
-               src++;
-       }
-       /* Then append non-steer ops */
-       src = src_head;
-       while (src->fa_op != FMOP_END) {
-               if (src->fa_op != FMOP_RQ_STEER) {
-                       ENICPMD_LOG(DEBUG, "move op: %ld -> dst %ld",
-                                   (long)(src - src_head),
-                                   (long)(dst - dst_head));
-                       *dst = *src;
-                       dst++;
-               }
-               src++;
+       /* Find 1st steer and decap */
+       op = fm->action.fma_action_ops;
+       steer = NULL;
+       decap = NULL;
+       while (op->fa_op != FMOP_END) {
+               if (!decap && op->fa_op == FMOP_DECAP_NOSTRIP)
+                       decap = op;
+               else if (!steer && op->fa_op == FMOP_RQ_STEER)
+                       steer = op;
+               op++;
+       }
+       /* If decap is before steer, swap */
+       if (steer && decap && decap < steer) {
+               op = fm->action.fma_action_ops;
+               ENICPMD_LOG(DEBUG, "swap decap %ld <-> steer %ld",
+                           (long)(decap - op), (long)(steer - op));
+               tmp_op = *decap;
+               *decap = *steer;
+               *steer = tmp_op;
        }
-       /* Copy END */
-       *dst = *src;
-       /* Finally replace the original action with the reordered one */
-       memcpy(fm->action.fma_action_ops, fm->action_tmp.fma_action_ops,
-              sizeof(fm->action.fma_action_ops));
 }
 
 /* VXLAN decap is done via flowman compound action */
@@ -1079,7 +1111,8 @@ enic_fm_find_vnic(struct enic *enic, const struct rte_pci_addr *addr,
        args[1] = bdf;
        rc = vnic_dev_flowman_cmd(enic->vdev, args, 2);
        if (rc != 0) {
-               ENICPMD_LOG(ERR, "allocating counters rc=%d", rc);
+               /* Expected to fail if BDF is not on the adapter */
+               ENICPMD_LOG(DEBUG, "cannot find vnic handle: rc=%d", rc);
                return rc;
        }
        *handle = args[0];
@@ -1096,25 +1129,34 @@ enic_fm_copy_action(struct enic_flowman *fm,
 {
        enum {
                FATE = 1 << 0,
-               MARK = 1 << 1,
+               DECAP = 1 << 1,
                PASSTHRU = 1 << 2,
                COUNT = 1 << 3,
                ENCAP = 1 << 4,
+               PUSH_VLAN = 1 << 5,
+               PORT_ID = 1 << 6,
        };
        struct fm_tcam_match_entry *fmt;
        struct fm_action_op fm_op;
+       bool need_ovlan_action;
        struct enic *enic;
        uint32_t overlap;
        uint64_t vnic_h;
+       uint16_t ovlan;
        bool first_rq;
+       bool steer;
        int ret;
 
        ENICPMD_FUNC_TRACE();
        fmt = &fm->tcam_entry;
+       need_ovlan_action = false;
+       ovlan = 0;
        first_rq = true;
-       enic = fm->enic;
+       steer = false;
+       enic = fm->user_enic;
        overlap = 0;
-       vnic_h = 0; /* 0 = current vNIC */
+       vnic_h = enic->fm_vnic_handle;
+
        for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
                switch (actions->type) {
                case RTE_FLOW_ACTION_TYPE_VOID:
@@ -1150,9 +1192,6 @@ enic_fm_copy_action(struct enic_flowman *fm,
                        const struct rte_flow_action_mark *mark =
                                actions->conf;
 
-                       if (overlap & MARK)
-                               goto unsupported;
-                       overlap |= MARK;
                        if (mark->id >= ENIC_MAGIC_FILTER_ID - 1)
                                return rte_flow_error_set(error, EINVAL,
                                        RTE_FLOW_ERROR_TYPE_ACTION,
@@ -1166,9 +1205,6 @@ enic_fm_copy_action(struct enic_flowman *fm,
                        break;
                }
                case RTE_FLOW_ACTION_TYPE_FLAG: {
-                       if (overlap & MARK)
-                               goto unsupported;
-                       overlap |= MARK;
                        /* ENIC_MAGIC_FILTER_ID is reserved for flagging */
                        memset(&fm_op, 0, sizeof(fm_op));
                        fm_op.fa_op = FMOP_MARK;
@@ -1183,8 +1219,8 @@ enic_fm_copy_action(struct enic_flowman *fm,
                                actions->conf;
 
                        /*
-                        * If other fate kind is set, fail.  Multiple
-                        * queue actions are ok.
+                        * If fate other than QUEUE or RSS, fail. Multiple
+                        * rss and queue actions are ok.
                         */
                        if ((overlap & FATE) && first_rq)
                                goto unsupported;
@@ -1194,12 +1230,14 @@ enic_fm_copy_action(struct enic_flowman *fm,
                        fm_op.fa_op = FMOP_RQ_STEER;
                        fm_op.rq_steer.rq_index =
                                enic_rte_rq_idx_to_sop_idx(queue->index);
+                       fm_op.rq_steer.rq_count = 1;
                        fm_op.rq_steer.vnic_handle = vnic_h;
                        ret = enic_fm_append_action_op(fm, &fm_op, error);
                        if (ret)
                                return ret;
                        ENICPMD_LOG(DEBUG, "create QUEUE action rq: %u",
                                    fm_op.rq_steer.rq_index);
+                       steer = true;
                        break;
                }
                case RTE_FLOW_ACTION_TYPE_DROP: {
@@ -1228,37 +1266,54 @@ enic_fm_copy_action(struct enic_flowman *fm,
                        uint16_t i;
 
                        /*
-                        * Hardware does not support general RSS actions, but
-                        * we can still support the dummy one that is used to
-                        * "receive normally".
+                        * If fate other than QUEUE or RSS, fail. Multiple
+                        * rss and queue actions are ok.
+                        */
+                       if ((overlap & FATE) && first_rq)
+                               goto unsupported;
+                       first_rq = false;
+                       overlap |= FATE;
+
+                       /*
+                        * Hardware only supports RSS actions on outer level
+                        * with default type and function. Queues must be
+                        * sequential.
                         */
                        allow = rss->func == RTE_ETH_HASH_FUNCTION_DEFAULT &&
-                               rss->level == 0 &&
-                               (rss->types == 0 ||
-                                rss->types == enic->rss_hf) &&
-                               rss->queue_num == enic->rq_count &&
-                               rss->key_len == 0;
-                       /* Identity queue map is ok */
-                       for (i = 0; i < rss->queue_num; i++)
-                               allow = allow && (i == rss->queue[i]);
+                               rss->level == 0 && (rss->types == 0 ||
+                               rss->types == enic->rss_hf) &&
+                               rss->queue_num <= enic->rq_count &&
+                               rss->queue[rss->queue_num - 1] < enic->rq_count;
+
+
+                       /* Identity queue map needs to be sequential */
+                       for (i = 1; i < rss->queue_num; i++)
+                               allow = allow && (rss->queue[i] ==
+                                       rss->queue[i - 1] + 1);
                        if (!allow)
                                goto unsupported;
-                       if (overlap & FATE)
-                               goto unsupported;
-                       /* Need MARK or FLAG */
-                       if (!(overlap & MARK))
-                               goto unsupported;
-                       overlap |= FATE;
+
+                       memset(&fm_op, 0, sizeof(fm_op));
+                       fm_op.fa_op = FMOP_RQ_STEER;
+                       fm_op.rq_steer.rq_index =
+                               enic_rte_rq_idx_to_sop_idx(rss->queue[0]);
+                       fm_op.rq_steer.rq_count = rss->queue_num;
+                       fm_op.rq_steer.vnic_handle = vnic_h;
+                       ret = enic_fm_append_action_op(fm, &fm_op, error);
+                       if (ret)
+                               return ret;
+                       ENICPMD_LOG(DEBUG, "create QUEUE action rq: %u",
+                                   fm_op.rq_steer.rq_index);
+                       steer = true;
                        break;
                }
                case RTE_FLOW_ACTION_TYPE_PORT_ID: {
                        const struct rte_flow_action_port_id *port;
-                       struct rte_pci_device *pdev;
                        struct rte_eth_dev *dev;
 
                        port = actions->conf;
                        if (port->original) {
-                               vnic_h = 0; /* This port */
+                               vnic_h = enic->fm_vnic_handle; /* This port */
                                break;
                        }
                        ENICPMD_LOG(DEBUG, "port id %u", port->id);
@@ -1273,15 +1328,25 @@ enic_fm_copy_action(struct enic_flowman *fm,
                                        RTE_FLOW_ERROR_TYPE_ACTION,
                                        NULL, "port_id is not enic");
                        }
-                       pdev = RTE_ETH_DEV_TO_PCI(dev);
-                       if (enic_fm_find_vnic(enic, &pdev->addr, &vnic_h)) {
+                       if (enic->switch_domain_id !=
+                           pmd_priv(dev)->switch_domain_id) {
                                return rte_flow_error_set(error, EINVAL,
                                        RTE_FLOW_ERROR_TYPE_ACTION,
-                                       NULL, "port_id is not vnic");
+                                       NULL, "destination and source ports are not in the same switch domain");
                        }
+                       vnic_h = pmd_priv(dev)->fm_vnic_handle;
+                       overlap |= PORT_ID;
+                       /*
+                        * Ingress. Nothing more to do. We add an implicit
+                        * steer at the end if needed.
+                        */
                        break;
                }
                case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP: {
+                       if (overlap & DECAP)
+                               goto unsupported;
+                       overlap |= DECAP;
+
                        ret = enic_fm_copy_vxlan_decap(fm, fmt, actions,
                                error);
                        if (ret != 0)
@@ -1301,13 +1366,86 @@ enic_fm_copy_action(struct enic_flowman *fm,
                                return ret;
                        break;
                }
+               case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN: {
+                       memset(&fm_op, 0, sizeof(fm_op));
+                       fm_op.fa_op = FMOP_POP_VLAN;
+                       ret = enic_fm_append_action_op(fm, &fm_op, error);
+                       if (ret)
+                               return ret;
+                       break;
+               }
+               case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: {
+                       const struct rte_flow_action_of_push_vlan *vlan;
+
+                       if (overlap & PASSTHRU)
+                               goto unsupported;
+                       vlan = actions->conf;
+                       if (vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN)) {
+                               return rte_flow_error_set(error, EINVAL,
+                                       RTE_FLOW_ERROR_TYPE_ACTION,
+                                       NULL, "unexpected push_vlan ethertype");
+                       }
+                       overlap |= PUSH_VLAN;
+                       need_ovlan_action = true;
+                       break;
+               }
+               case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP: {
+                       const struct rte_flow_action_of_set_vlan_pcp *pcp;
+
+                       pcp = actions->conf;
+                       if (pcp->vlan_pcp > 7) {
+                               return rte_flow_error_set(error, EINVAL,
+                                       RTE_FLOW_ERROR_TYPE_ACTION,
+                                       NULL, "invalid vlan_pcp");
+                       }
+                       need_ovlan_action = true;
+                       ovlan |= ((uint16_t)pcp->vlan_pcp) << 13;
+                       break;
+               }
+               case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID: {
+                       const struct rte_flow_action_of_set_vlan_vid *vid;
+
+                       vid = actions->conf;
+                       need_ovlan_action = true;
+                       ovlan |= rte_be_to_cpu_16(vid->vlan_vid);
+                       break;
+               }
                default:
                        goto unsupported;
                }
        }
 
-       if (!(overlap & (FATE | PASSTHRU | COUNT)))
+       if (!(overlap & (FATE | PASSTHRU | COUNT | PORT_ID)))
                goto unsupported;
+       /* Egress from VF: need implicit WQ match */
+       if (enic_is_vf_rep(enic) && !ingress) {
+               fmt->ftm_data.fk_wq_id = 0;
+               fmt->ftm_mask.fk_wq_id = 0xffff;
+               fmt->ftm_data.fk_wq_vnic = enic->fm_vnic_handle;
+               ENICPMD_LOG(DEBUG, "add implicit wq id match for vf %d",
+                           VF_ENIC_TO_VF_REP(enic)->vf_id);
+       }
+       if (need_ovlan_action) {
+               memset(&fm_op, 0, sizeof(fm_op));
+               fm_op.fa_op = FMOP_SET_OVLAN;
+               fm_op.ovlan.vlan = ovlan;
+               ret = enic_fm_append_action_op(fm, &fm_op, error);
+               if (ret)
+                       return ret;
+       }
+       /* Add steer op for PORT_ID without QUEUE */
+       if ((overlap & PORT_ID) && !steer && ingress) {
+               memset(&fm_op, 0, sizeof(fm_op));
+               /* Always to queue 0 for now as generic RSS is not available */
+               fm_op.fa_op = FMOP_RQ_STEER;
+               fm_op.rq_steer.rq_index = 0;
+               fm_op.rq_steer.vnic_handle = vnic_h;
+               ret = enic_fm_append_action_op(fm, &fm_op, error);
+               if (ret)
+                       return ret;
+               ENICPMD_LOG(DEBUG, "add implicit steer op");
+       }
+       /* Add required END */
        memset(&fm_op, 0, sizeof(fm_op));
        fm_op.fa_op = FMOP_END;
        ret = enic_fm_append_action_op(fm, &fm_op, error);
@@ -1354,6 +1492,13 @@ enic_fm_dump_tcam_actions(const struct fm_action *fm_action)
                [FMOP_ENCAP] = "encap",
                [FMOP_SET_OVLAN] = "set_ovlan",
                [FMOP_DECAP_NOSTRIP] = "decap_nostrip",
+               [FMOP_DECAP_STRIP] = "decap_strip",
+               [FMOP_POP_VLAN] = "pop_vlan",
+               [FMOP_SET_EGPORT] = "set_egport",
+               [FMOP_RQ_STEER_ONLY] = "rq_steer_only",
+               [FMOP_SET_ENCAP_VLAN] = "set_encap_vlan",
+               [FMOP_EMIT] = "emit",
+               [FMOP_MODIFY] = "modify",
        };
        const struct fm_action_op *op = &fm_action->fma_action_ops[0];
        char buf[128], *bp = buf;
@@ -1504,7 +1649,7 @@ enic_fm_dump_tcam_entry(const struct fm_tcam_match_entry *fm_match,
                        const struct fm_action *fm_action,
                        uint8_t ingress)
 {
-       if (rte_log_get_level(enic_pmd_logtype) < (int)RTE_LOG_DEBUG)
+       if (!rte_log_can_log(enic_pmd_logtype, RTE_LOG_DEBUG))
                return;
        enic_fm_dump_tcam_match(fm_match, ingress);
        enic_fm_dump_tcam_actions(fm_action);
@@ -1543,7 +1688,7 @@ enic_fm_flow_parse(struct enic_flowman *fm,
                                           NULL,
                                           "priorities are not supported");
                        return -rte_errno;
-               } else if (attrs->transfer) {
+               } else if (!fm->owner_enic->switchdev_mode && attrs->transfer) {
                        rte_flow_error_set(error, ENOTSUP,
                                           RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
                                           NULL,
@@ -1600,12 +1745,10 @@ enic_fm_more_counters(struct enic_flowman *fm)
 {
        struct enic_fm_counter *new_stack;
        struct enic_fm_counter *ctrs;
-       struct enic *enic;
        int i, rc;
        uint64_t args[2];
 
        ENICPMD_FUNC_TRACE();
-       enic = fm->enic;
        new_stack = rte_realloc(fm->counter_stack, (fm->counters_alloced +
                                FM_COUNTERS_EXPAND) *
                                sizeof(struct enic_fm_counter), 0);
@@ -1617,7 +1760,7 @@ enic_fm_more_counters(struct enic_flowman *fm)
 
        args[0] = FM_COUNTER_BRK;
        args[1] = fm->counters_alloced + FM_COUNTERS_EXPAND;
-       rc = vnic_dev_flowman_cmd(enic->vdev, args, 2);
+       rc = flowman_cmd(fm, args, 2);
        if (rc != 0) {
                ENICPMD_LOG(ERR, "cannot alloc counters rc=%d", rc);
                return rc;
@@ -1637,16 +1780,14 @@ enic_fm_more_counters(struct enic_flowman *fm)
 static int
 enic_fm_counter_zero(struct enic_flowman *fm, struct enic_fm_counter *c)
 {
-       struct enic *enic;
        uint64_t args[3];
        int ret;
 
        ENICPMD_FUNC_TRACE();
-       enic = fm->enic;
        args[0] = FM_COUNTER_QUERY;
        args[1] = c->handle;
        args[2] = 1; /* clear */
-       ret = vnic_dev_flowman_cmd(enic->vdev, args, 3);
+       ret = flowman_cmd(fm, args, 3);
        if (ret) {
                ENICPMD_LOG(ERR, "counter init: rc=%d handle=0x%x",
                            ret, c->handle);
@@ -1686,7 +1827,7 @@ enic_fm_action_free(struct enic_flowman *fm, uint64_t handle)
        ENICPMD_FUNC_TRACE();
        args[0] = FM_ACTION_FREE;
        args[1] = handle;
-       rc = vnic_dev_flowman_cmd(fm->enic->vdev, args, 2);
+       rc = flowman_cmd(fm, args, 2);
        if (rc)
                ENICPMD_LOG(ERR, "cannot free action: rc=%d handle=0x%" PRIx64,
                            rc, handle);
@@ -1702,7 +1843,7 @@ enic_fm_entry_free(struct enic_flowman *fm, uint64_t handle)
        ENICPMD_FUNC_TRACE();
        args[0] = FM_MATCH_ENTRY_REMOVE;
        args[1] = handle;
-       rc = vnic_dev_flowman_cmd(fm->enic->vdev, args, 2);
+       rc = flowman_cmd(fm, args, 2);
        if (rc)
                ENICPMD_LOG(ERR, "cannot free match entry: rc=%d"
                            " handle=0x%" PRIx64, rc, handle);
@@ -1806,7 +1947,7 @@ enic_fm_add_tcam_entry(struct enic_flowman *fm,
        args[0] = FM_TCAM_ENTRY_INSTALL;
        args[1] = ingress ? fm->ig_tcam_hndl : fm->eg_tcam_hndl;
        args[2] = fm->cmd.pa;
-       ret = vnic_dev_flowman_cmd(fm->enic->vdev, args, 3);
+       ret = flowman_cmd(fm, args, 3);
        if (ret != 0) {
                ENICPMD_LOG(ERR, "cannot add %s TCAM entry: rc=%d",
                            ingress ? "ingress" : "egress", ret);
@@ -1856,7 +1997,7 @@ enic_fm_add_exact_entry(struct enic_flowman *fm,
        args[0] = FM_EXACT_ENTRY_INSTALL;
        args[1] = fet->handle;
        args[2] = fm->cmd.pa;
-       ret = vnic_dev_flowman_cmd(fm->enic->vdev, args, 3);
+       ret = flowman_cmd(fm, args, 3);
        if (ret != 0) {
                ENICPMD_LOG(ERR, "cannot add %s exact entry: group=%u",
                            fet->ingress ? "ingress" : "egress", fet->group);
@@ -1895,7 +2036,7 @@ __enic_fm_flow_add_entry(struct enic_flowman *fm,
        memcpy(fma, action_in, sizeof(*fma));
        args[0] = FM_ACTION_ALLOC;
        args[1] = fm->cmd.pa;
-       ret = vnic_dev_flowman_cmd(fm->enic->vdev, args, 2);
+       ret = flowman_cmd(fm, args, 2);
        if (ret != 0) {
                ENICPMD_LOG(ERR, "allocating TCAM table action rc=%d", ret);
                rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
@@ -2065,7 +2206,7 @@ enic_fm_flow_validate(struct rte_eth_dev *dev,
        int ret;
 
        ENICPMD_FUNC_TRACE();
-       fm = pmd_priv(dev)->fm;
+       fm = begin_fm(pmd_priv(dev));
        if (fm == NULL)
                return -ENOTSUP;
        enic_fm_open_scratch(fm);
@@ -2077,6 +2218,7 @@ enic_fm_flow_validate(struct rte_eth_dev *dev,
                                        attrs->ingress);
        }
        enic_fm_close_scratch(fm);
+       end_fm(fm);
        return ret;
 }
 
@@ -2087,33 +2229,38 @@ enic_fm_flow_query_count(struct rte_eth_dev *dev,
 {
        struct rte_flow_query_count *query;
        struct enic_fm_flow *fm_flow;
-       struct enic *enic;
+       struct enic_flowman *fm;
        uint64_t args[3];
        int rc;
 
        ENICPMD_FUNC_TRACE();
-       enic = pmd_priv(dev);
+       fm = begin_fm(pmd_priv(dev));
        query = data;
        fm_flow = flow->fm;
-       if (!fm_flow->counter_valid)
-               return rte_flow_error_set(error, ENOTSUP,
+       if (!fm_flow->counter_valid) {
+               rc = rte_flow_error_set(error, ENOTSUP,
                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
                        "enic: flow does not have counter");
+               goto exit;
+       }
 
        args[0] = FM_COUNTER_QUERY;
        args[1] = fm_flow->counter->handle;
        args[2] = query->reset;
-       rc = vnic_dev_flowman_cmd(enic->vdev, args, 3);
+       rc = flowman_cmd(fm, args, 3);
        if (rc) {
                ENICPMD_LOG(ERR, "cannot query counter: rc=%d handle=0x%x",
                            rc, fm_flow->counter->handle);
-               return rc;
+               goto exit;
        }
        query->hits_set = 1;
        query->hits = args[0];
        query->bytes_set = 1;
        query->bytes = args[1];
-       return 0;
+       rc = 0;
+exit:
+       end_fm(fm);
+       return rc;
 }
 
 static int
@@ -2162,7 +2309,7 @@ enic_fm_flow_create(struct rte_eth_dev *dev,
 
        ENICPMD_FUNC_TRACE();
        enic = pmd_priv(dev);
-       fm = enic->fm;
+       fm = begin_fm(enic);
        if (fm == NULL) {
                rte_flow_error_set(error, ENOTSUP,
                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
@@ -2200,6 +2347,7 @@ enic_fm_flow_create(struct rte_eth_dev *dev,
 
 error_with_scratch:
        enic_fm_close_scratch(fm);
+       end_fm(fm);
        return flow;
 }
 
@@ -2208,12 +2356,15 @@ enic_fm_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
                     __rte_unused struct rte_flow_error *error)
 {
        struct enic *enic = pmd_priv(dev);
+       struct enic_flowman *fm;
 
        ENICPMD_FUNC_TRACE();
-       if (enic->fm == NULL)
+       fm = begin_fm(enic);
+       if (fm == NULL)
                return 0;
        LIST_REMOVE(flow, next);
-       enic_fm_flow_free(enic->fm, flow);
+       enic_fm_flow_free(fm, flow);
+       end_fm(fm);
        return 0;
 }
 
@@ -2221,19 +2372,27 @@ static int
 enic_fm_flow_flush(struct rte_eth_dev *dev,
                   __rte_unused struct rte_flow_error *error)
 {
+       LIST_HEAD(enic_flows, rte_flow) internal;
        struct enic_fm_flow *fm_flow;
        struct enic_flowman *fm;
        struct rte_flow *flow;
        struct enic *enic = pmd_priv(dev);
 
        ENICPMD_FUNC_TRACE();
-       if (enic->fm == NULL)
+
+       fm = begin_fm(enic);
+       if (fm == NULL)
                return 0;
-       fm = enic->fm;
+       /* Destroy all non-internal flows */
+       LIST_INIT(&internal);
        while (!LIST_EMPTY(&enic->flows)) {
                flow = LIST_FIRST(&enic->flows);
                fm_flow = flow->fm;
                LIST_REMOVE(flow, next);
+               if (flow->internal) {
+                       LIST_INSERT_HEAD(&internal, flow, next);
+                       continue;
+               }
                /*
                 * If tables are null, then vNIC is closing, and the firmware
                 * has already cleaned up flowman state. So do not try to free
@@ -2246,6 +2405,12 @@ enic_fm_flow_flush(struct rte_eth_dev *dev,
                }
                enic_fm_flow_free(fm, flow);
        }
+       while (!LIST_EMPTY(&internal)) {
+               flow = LIST_FIRST(&internal);
+               LIST_REMOVE(flow, next);
+               LIST_INSERT_HEAD(&enic->flows, flow, next);
+       }
+       end_fm(fm);
        return 0;
 }
 
@@ -2257,7 +2422,7 @@ enic_fm_tbl_free(struct enic_flowman *fm, uint64_t handle)
 
        args[0] = FM_MATCH_TABLE_FREE;
        args[1] = handle;
-       rc = vnic_dev_flowman_cmd(fm->enic->vdev, args, 2);
+       rc = flowman_cmd(fm, args, 2);
        if (rc)
                ENICPMD_LOG(ERR, "cannot free table: rc=%d handle=0x%" PRIx64,
                            rc, handle);
@@ -2269,19 +2434,17 @@ enic_fm_tcam_tbl_alloc(struct enic_flowman *fm, uint32_t direction,
                        uint32_t max_entries, uint64_t *handle)
 {
        struct fm_tcam_match_table *tcam_tbl;
-       struct enic *enic;
        uint64_t args[2];
        int rc;
 
        ENICPMD_FUNC_TRACE();
-       enic = fm->enic;
        tcam_tbl = &fm->cmd.va->fm_tcam_match_table;
        tcam_tbl->ftt_direction = direction;
        tcam_tbl->ftt_stage = FM_STAGE_LAST;
        tcam_tbl->ftt_max_entries = max_entries;
        args[0] = FM_TCAM_TABLE_ALLOC;
        args[1] = fm->cmd.pa;
-       rc = vnic_dev_flowman_cmd(enic->vdev, args, 2);
+       rc = flowman_cmd(fm, args, 2);
        if (rc) {
                ENICPMD_LOG(ERR, "cannot alloc %s TCAM table: rc=%d",
                            (direction == FM_INGRESS) ? "IG" : "EG", rc);
@@ -2304,14 +2467,12 @@ enic_fm_init_counters(struct enic_flowman *fm)
 static void
 enic_fm_free_all_counters(struct enic_flowman *fm)
 {
-       struct enic *enic;
        uint64_t args[2];
        int rc;
 
-       enic = fm->enic;
        args[0] = FM_COUNTER_BRK;
        args[1] = 0;
-       rc = vnic_dev_flowman_cmd(enic->vdev, args, 2);
+       rc = flowman_cmd(fm, args, 2);
        if (rc != 0)
                ENICPMD_LOG(ERR, "cannot free counters: rc=%d", rc);
        rte_free(fm->counter_stack);
@@ -2353,6 +2514,7 @@ enic_fm_free_tcam_tables(struct enic_flowman *fm)
 int
 enic_fm_init(struct enic *enic)
 {
+       const struct rte_pci_addr *addr;
        struct enic_flowman *fm;
        uint8_t name[RTE_MEMZONE_NAMESIZE];
        int rc;
@@ -2360,12 +2522,30 @@ enic_fm_init(struct enic *enic)
        if (enic->flow_filter_mode != FILTER_FLOWMAN)
                return 0;
        ENICPMD_FUNC_TRACE();
+       /* Get vnic handle and save for port-id action */
+       if (enic_is_vf_rep(enic))
+               addr = &VF_ENIC_TO_VF_REP(enic)->bdf;
+       else
+               addr = &RTE_ETH_DEV_TO_PCI(enic->rte_dev)->addr;
+       rc = enic_fm_find_vnic(enic, addr, &enic->fm_vnic_handle);
+       if (rc) {
+               ENICPMD_LOG(ERR, "cannot find vnic handle for %x:%x:%x",
+                           addr->bus, addr->devid, addr->function);
+               return rc;
+       }
+       /* Save UIF for egport action */
+       enic->fm_vnic_uif = vnic_dev_uif(enic->vdev);
+       ENICPMD_LOG(DEBUG, "uif %u", enic->fm_vnic_uif);
+       /* Nothing else to do for representor. It will share the PF flowman */
+       if (enic_is_vf_rep(enic))
+               return 0;
        fm = calloc(1, sizeof(*fm));
        if (fm == NULL) {
                ENICPMD_LOG(ERR, "cannot alloc flowman struct");
                return -ENOMEM;
        }
-       fm->enic = enic;
+       fm->owner_enic = enic;
+       rte_spinlock_init(&fm->lock);
        TAILQ_INIT(&fm->fet_list);
        TAILQ_INIT(&fm->jump_list);
        /* Allocate host memory for flowman commands */
@@ -2405,6 +2585,7 @@ enic_fm_init(struct enic *enic)
                goto error_ig_fet;
        }
        fm->default_eg_fet->ref = 1;
+       fm->vf_rep_tag = FM_VF_REP_TAG;
        enic->fm = fm;
        return 0;
 
@@ -2428,9 +2609,13 @@ enic_fm_destroy(struct enic *enic)
        struct enic_flowman *fm;
        struct enic_fm_fet *fet;
 
+       ENICPMD_FUNC_TRACE();
+       if (enic_is_vf_rep(enic)) {
+               delete_rep_flows(enic);
+               return;
+       }
        if (enic->fm == NULL)
                return;
-       ENICPMD_FUNC_TRACE();
        fm = enic->fm;
        enic_fet_free(fm, fm->default_eg_fet);
        enic_fet_free(fm, fm->default_ig_fet);
@@ -2448,6 +2633,58 @@ enic_fm_destroy(struct enic *enic)
        enic->fm = NULL;
 }
 
+int
+enic_fm_allocate_switch_domain(struct enic *pf)
+{
+       const struct rte_pci_addr *cur_a, *prev_a;
+       struct rte_eth_dev *dev;
+       struct enic *cur, *prev;
+       uint16_t domain_id;
+       uint64_t vnic_h;
+       uint16_t pid;
+       int ret;
+
+       ENICPMD_FUNC_TRACE();
+       if (enic_is_vf_rep(pf))
+               return -EINVAL;
+       cur = pf;
+       cur_a = &RTE_ETH_DEV_TO_PCI(cur->rte_dev)->addr;
+       /* Go through ports and find another PF that is on the same adapter */
+       RTE_ETH_FOREACH_DEV(pid) {
+               dev = &rte_eth_devices[pid];
+               if (!dev_is_enic(dev))
+                       continue;
+               if (dev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)
+                       continue;
+               if (dev == cur->rte_dev)
+                       continue;
+               /* dev is another PF. Is it on the same adapter? */
+               prev = pmd_priv(dev);
+               prev_a = &RTE_ETH_DEV_TO_PCI(dev)->addr;
+               if (!enic_fm_find_vnic(cur, prev_a, &vnic_h)) {
+                       ENICPMD_LOG(DEBUG, "Port %u (PF BDF %x:%x:%x) and port %u (PF BDF %x:%x:%x domain %u) are on the same VIC",
+                               cur->rte_dev->data->port_id,
+                               cur_a->bus, cur_a->devid, cur_a->function,
+                               dev->data->port_id,
+                               prev_a->bus, prev_a->devid, prev_a->function,
+                               prev->switch_domain_id);
+                       cur->switch_domain_id = prev->switch_domain_id;
+                       return 0;
+               }
+       }
+       ret = rte_eth_switch_domain_alloc(&domain_id);
+       if (ret) {
+               ENICPMD_LOG(WARNING, "failed to allocate switch domain for device %d",
+                           ret);
+       }
+       cur->switch_domain_id = domain_id;
+       ENICPMD_LOG(DEBUG, "Port %u (PF BDF %x:%x:%x) is the 1st PF on the VIC. Allocated switch domain id %u",
+                   cur->rte_dev->data->port_id,
+                   cur_a->bus, cur_a->devid, cur_a->function,
+                   domain_id);
+       return ret;
+}
+
 const struct rte_flow_ops enic_fm_flow_ops = {
        .validate = enic_fm_flow_validate,
        .create = enic_fm_flow_create,
@@ -2455,3 +2692,237 @@ const struct rte_flow_ops enic_fm_flow_ops = {
        .flush = enic_fm_flow_flush,
        .query = enic_fm_flow_query,
 };
+
+/* Add a high priority flow that loops representor packets to VF */
+int
+enic_fm_add_rep2vf_flow(struct enic_vf_representor *vf)
+{
+       struct fm_tcam_match_entry *fm_tcam_entry;
+       struct rte_flow *flow0, *flow1;
+       struct fm_action *fm_action;
+       struct rte_flow_error error;
+       struct rte_flow_attr attrs;
+       struct fm_action_op fm_op;
+       struct enic_flowman *fm;
+       struct enic *pf;
+       uint8_t tag;
+
+       pf = vf->pf;
+       fm = pf->fm;
+       tag = fm->vf_rep_tag;
+       enic_fm_open_scratch(fm);
+       fm_tcam_entry = &fm->tcam_entry;
+       fm_action = &fm->action;
+       /* Egress rule: match WQ ID and tag+hairpin */
+       fm_tcam_entry->ftm_data.fk_wq_id = vf->pf_wq_idx;
+       fm_tcam_entry->ftm_mask.fk_wq_id = 0xffff;
+       fm_tcam_entry->ftm_flags |= FMEF_COUNTER;
+       memset(&fm_op, 0, sizeof(fm_op));
+       fm_op.fa_op = FMOP_TAG;
+       fm_op.tag.tag = tag;
+       enic_fm_append_action_op(fm, &fm_op, &error);
+       memset(&fm_op, 0, sizeof(fm_op));
+       fm_op.fa_op = FMOP_EG_HAIRPIN;
+       enic_fm_append_action_op(fm, &fm_op, &error);
+       memset(&fm_op, 0, sizeof(fm_op));
+       fm_op.fa_op = FMOP_END;
+       enic_fm_append_action_op(fm, &fm_op, &error);
+       attrs.group = 0;
+       attrs.ingress = 0;
+       attrs.egress = 1;
+       attrs.priority = FM_HIGHEST_PRIORITY;
+       flow0 = enic_fm_flow_add_entry(fm, fm_tcam_entry, fm_action,
+                                      &attrs, &error);
+       enic_fm_close_scratch(fm);
+       if (flow0 == NULL) {
+               ENICPMD_LOG(ERR, "Cannot create flow 0 for representor->VF");
+               return -EINVAL;
+       }
+       LIST_INSERT_HEAD(&pf->flows, flow0, next);
+       /* Make this flow internal, so the user app cannot delete it */
+       flow0->internal = 1;
+       ENICPMD_LOG(DEBUG, "representor->VF %d flow created: wq %d -> tag %d hairpin",
+                   vf->vf_id, vf->pf_wq_idx, tag);
+
+       /* Ingress: steer hairpinned to VF RQ 0 */
+       enic_fm_open_scratch(fm);
+       fm_tcam_entry->ftm_flags |= FMEF_COUNTER;
+       fm_tcam_entry->ftm_data.fk_hdrset[0].fk_metadata |= FKM_EG_HAIRPINNED;
+       fm_tcam_entry->ftm_mask.fk_hdrset[0].fk_metadata |= FKM_EG_HAIRPINNED;
+       fm_tcam_entry->ftm_data.fk_packet_tag = tag;
+       fm_tcam_entry->ftm_mask.fk_packet_tag = 0xff;
+       memset(&fm_op, 0, sizeof(fm_op));
+       fm_op.fa_op = FMOP_RQ_STEER;
+       fm_op.rq_steer.rq_index = 0;
+       fm_op.rq_steer.vnic_handle = vf->enic.fm_vnic_handle;
+       enic_fm_append_action_op(fm, &fm_op, &error);
+       memset(&fm_op, 0, sizeof(fm_op));
+       fm_op.fa_op = FMOP_END;
+       enic_fm_append_action_op(fm, &fm_op, &error);
+       attrs.group = 0;
+       attrs.ingress = 1;
+       attrs.egress = 0;
+       attrs.priority = FM_HIGHEST_PRIORITY;
+       flow1 = enic_fm_flow_add_entry(fm, fm_tcam_entry, fm_action,
+                                      &attrs, &error);
+       enic_fm_close_scratch(fm);
+       if (flow1 == NULL) {
+               ENICPMD_LOG(ERR, "Cannot create flow 1 for representor->VF");
+               enic_fm_flow_destroy(pf->rte_dev, flow0, &error);
+               return -EINVAL;
+       }
+       LIST_INSERT_HEAD(&pf->flows, flow1, next);
+       flow1->internal = 1;
+       ENICPMD_LOG(DEBUG, "representor->VF %d flow created: tag %d hairpinned -> VF RQ %d",
+                   vf->vf_id, tag, fm_op.rq_steer.rq_index);
+       vf->rep2vf_flow[0] = flow0;
+       vf->rep2vf_flow[1] = flow1;
+       /* Done with this tag, use a different one next time */
+       fm->vf_rep_tag++;
+       return 0;
+}
+
+/*
+ * Add a low priority flow that matches all packets from VF and loops them
+ * back to the representor.
+ */
+int
+enic_fm_add_vf2rep_flow(struct enic_vf_representor *vf)
+{
+       struct fm_tcam_match_entry *fm_tcam_entry;
+       struct rte_flow *flow0, *flow1;
+       struct fm_action *fm_action;
+       struct rte_flow_error error;
+       struct rte_flow_attr attrs;
+       struct fm_action_op fm_op;
+       struct enic_flowman *fm;
+       struct enic *pf;
+       uint8_t tag;
+
+       pf = vf->pf;
+       fm = pf->fm;
+       tag = fm->vf_rep_tag;
+       enic_fm_open_scratch(fm);
+       fm_tcam_entry = &fm->tcam_entry;
+       fm_action = &fm->action;
+       /* Egress rule: match-any and tag+hairpin */
+       fm_tcam_entry->ftm_data.fk_wq_id = 0;
+       fm_tcam_entry->ftm_mask.fk_wq_id = 0xffff;
+       fm_tcam_entry->ftm_data.fk_wq_vnic = vf->enic.fm_vnic_handle;
+       fm_tcam_entry->ftm_flags |= FMEF_COUNTER;
+       memset(&fm_op, 0, sizeof(fm_op));
+       fm_op.fa_op = FMOP_TAG;
+       fm_op.tag.tag = tag;
+       enic_fm_append_action_op(fm, &fm_op, &error);
+       memset(&fm_op, 0, sizeof(fm_op));
+       fm_op.fa_op = FMOP_EG_HAIRPIN;
+       enic_fm_append_action_op(fm, &fm_op, &error);
+       memset(&fm_op, 0, sizeof(fm_op));
+       fm_op.fa_op = FMOP_END;
+       enic_fm_append_action_op(fm, &fm_op, &error);
+       attrs.group = 0;
+       attrs.ingress = 0;
+       attrs.egress = 1;
+       attrs.priority = FM_LOWEST_PRIORITY;
+       flow0 = enic_fm_flow_add_entry(fm, fm_tcam_entry, fm_action,
+                                      &attrs, &error);
+       enic_fm_close_scratch(fm);
+       if (flow0 == NULL) {
+               ENICPMD_LOG(ERR, "Cannot create flow 0 for VF->representor");
+               return -EINVAL;
+       }
+       LIST_INSERT_HEAD(&pf->flows, flow0, next);
+       /* Make this flow internal, so the user app cannot delete it */
+       flow0->internal = 1;
+       ENICPMD_LOG(DEBUG, "VF %d->representor flow created: wq %d (low prio) -> tag %d hairpin",
+                   vf->vf_id, fm_tcam_entry->ftm_data.fk_wq_id, tag);
+
+       /* Ingress: steer hairpinned to VF rep RQ */
+       enic_fm_open_scratch(fm);
+       fm_tcam_entry->ftm_flags |= FMEF_COUNTER;
+       fm_tcam_entry->ftm_data.fk_hdrset[0].fk_metadata |= FKM_EG_HAIRPINNED;
+       fm_tcam_entry->ftm_mask.fk_hdrset[0].fk_metadata |= FKM_EG_HAIRPINNED;
+       fm_tcam_entry->ftm_data.fk_packet_tag = tag;
+       fm_tcam_entry->ftm_mask.fk_packet_tag = 0xff;
+       memset(&fm_op, 0, sizeof(fm_op));
+       fm_op.fa_op = FMOP_RQ_STEER;
+       fm_op.rq_steer.rq_index = vf->pf_rq_sop_idx;
+       fm_op.rq_steer.vnic_handle = pf->fm_vnic_handle;
+       enic_fm_append_action_op(fm, &fm_op, &error);
+       memset(&fm_op, 0, sizeof(fm_op));
+       fm_op.fa_op = FMOP_END;
+       enic_fm_append_action_op(fm, &fm_op, &error);
+       attrs.group = 0;
+       attrs.ingress = 1;
+       attrs.egress = 0;
+       attrs.priority = FM_HIGHEST_PRIORITY;
+       flow1 = enic_fm_flow_add_entry(fm, fm_tcam_entry, fm_action,
+                                      &attrs, &error);
+       enic_fm_close_scratch(fm);
+       if (flow1 == NULL) {
+               ENICPMD_LOG(ERR, "Cannot create flow 1 for VF->representor");
+               enic_fm_flow_destroy(pf->rte_dev, flow0, &error);
+               return -EINVAL;
+       }
+       LIST_INSERT_HEAD(&pf->flows, flow1, next);
+       flow1->internal = 1;
+       ENICPMD_LOG(DEBUG, "VF %d->representor flow created: tag %d hairpinned -> PF RQ %d",
+                   vf->vf_id, tag, vf->pf_rq_sop_idx);
+       vf->vf2rep_flow[0] = flow0;
+       vf->vf2rep_flow[1] = flow1;
+       /* Done with this tag, use a different one next time */
+       fm->vf_rep_tag++;
+       return 0;
+}
+
+/* Destroy representor flows created by enic_fm_add_{rep2vf,vf2rep}_flow */
+static void
+delete_rep_flows(struct enic *enic)
+{
+       struct enic_vf_representor *vf;
+       struct rte_flow_error error;
+       struct rte_eth_dev *dev;
+       uint32_t i;
+
+       RTE_ASSERT(enic_is_vf_rep(enic));
+       vf = VF_ENIC_TO_VF_REP(enic);
+       dev = vf->pf->rte_dev;
+       for (i = 0; i < ARRAY_SIZE(vf->vf2rep_flow); i++) {
+               if (vf->vf2rep_flow[i])
+                       enic_fm_flow_destroy(dev, vf->vf2rep_flow[i], &error);
+       }
+       for (i = 0; i < ARRAY_SIZE(vf->rep2vf_flow); i++) {
+               if (vf->rep2vf_flow[i])
+                       enic_fm_flow_destroy(dev, vf->rep2vf_flow[i], &error);
+       }
+}
+
+static struct enic_flowman *
+begin_fm(struct enic *enic)
+{
+       struct enic_vf_representor *vf;
+       struct enic_flowman *fm;
+
+       /* Representor uses PF flowman */
+       if (enic_is_vf_rep(enic)) {
+               vf = VF_ENIC_TO_VF_REP(enic);
+               fm = vf->pf->fm;
+       } else {
+               fm = enic->fm;
+       }
+       /* Save the API caller and lock if representors exist */
+       if (fm) {
+               if (fm->owner_enic->switchdev_mode)
+                       rte_spinlock_lock(&fm->lock);
+               fm->user_enic = enic;
+       }
+       return fm;
+}
+
+static void
+end_fm(struct enic_flowman *fm)
+{
+       fm->user_enic = NULL;
+       if (fm->owner_enic->switchdev_mode)
+               rte_spinlock_unlock(&fm->lock);
+}