net/mlx5: fix port action for LAG
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
index aa00b3e..4dcd640 100644 (file)
@@ -2209,7 +2209,7 @@ flow_dv_validate_action_raw_decap(uint64_t action_flags,
 /**
  * Find existing encap/decap resource or create and register a new one.
  *
- * @param dev[in, out]
+ * @param[in, out] dev
  *   Pointer to rte_eth_dev structure.
  * @param[in, out] resource
  *   Pointer to encap/decap resource.
@@ -2290,10 +2290,10 @@ flow_dv_encap_decap_resource_register
 /**
  * Find existing table jump resource or create and register a new one.
  *
- * @param dev[in, out]
+ * @param[in, out] dev
  *   Pointer to rte_eth_dev structure.
- * @param[in, out] resource
- *   Pointer to jump table resource.
+ * @param[in, out] tbl
+ *   Pointer to flow table resource.
  * @parm[in, out] dev_flow
  *   Pointer to the dev_flow.
  * @param[out] error
@@ -2304,56 +2304,41 @@ flow_dv_encap_decap_resource_register
  */
 static int
 flow_dv_jump_tbl_resource_register
-                       (struct rte_eth_dev *dev,
-                        struct mlx5_flow_dv_jump_tbl_resource *resource,
+                       (struct rte_eth_dev *dev __rte_unused,
+                        struct mlx5_flow_tbl_resource *tbl,
                         struct mlx5_flow *dev_flow,
                         struct rte_flow_error *error)
 {
-       struct mlx5_priv *priv = dev->data->dev_private;
-       struct mlx5_ibv_shared *sh = priv->sh;
-       struct mlx5_flow_dv_jump_tbl_resource *cache_resource;
+       struct mlx5_flow_tbl_data_entry *tbl_data =
+               container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
+       int cnt;
 
-       /* Lookup a matching resource from cache. */
-       LIST_FOREACH(cache_resource, &sh->jump_tbl, next) {
-               if (resource->tbl == cache_resource->tbl) {
-                       DRV_LOG(DEBUG, "jump table resource resource %p: refcnt %d++",
-                               (void *)cache_resource,
-                               rte_atomic32_read(&cache_resource->refcnt));
-                       rte_atomic32_inc(&cache_resource->refcnt);
-                       dev_flow->dv.jump = cache_resource;
-                       return 0;
-               }
-       }
-       /* Register new jump table resource. */
-       cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
-       if (!cache_resource)
-               return rte_flow_error_set(error, ENOMEM,
-                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
-                                         "cannot allocate resource memory");
-       *cache_resource = *resource;
-       cache_resource->action =
-               mlx5_glue->dr_create_flow_action_dest_flow_tbl
-               (resource->tbl->obj);
-       if (!cache_resource->action) {
-               rte_free(cache_resource);
-               return rte_flow_error_set(error, ENOMEM,
-                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
-                                         NULL, "cannot create action");
+       assert(tbl);
+       cnt = rte_atomic32_read(&tbl_data->jump.refcnt);
+       if (!cnt) {
+               tbl_data->jump.action =
+                       mlx5_glue->dr_create_flow_action_dest_flow_tbl
+                       (tbl->obj);
+               if (!tbl_data->jump.action)
+                       return rte_flow_error_set(error, ENOMEM,
+                                       RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                       NULL, "cannot create jump action");
+               DRV_LOG(DEBUG, "new jump table resource %p: refcnt %d++",
+                       (void *)&tbl_data->jump, cnt);
+       } else {
+               assert(tbl_data->jump.action);
+               DRV_LOG(DEBUG, "existed jump table resource %p: refcnt %d++",
+                       (void *)&tbl_data->jump, cnt);
        }
-       rte_atomic32_init(&cache_resource->refcnt);
-       rte_atomic32_inc(&cache_resource->refcnt);
-       LIST_INSERT_HEAD(&sh->jump_tbl, cache_resource, next);
-       dev_flow->dv.jump = cache_resource;
-       DRV_LOG(DEBUG, "new jump table  resource %p: refcnt %d++",
-               (void *)cache_resource,
-               rte_atomic32_read(&cache_resource->refcnt));
+       rte_atomic32_inc(&tbl_data->jump.refcnt);
+       dev_flow->dv.jump = &tbl_data->jump;
        return 0;
 }
 
 /**
  * Find existing table port ID resource or create and register a new one.
  *
- * @param dev[in, out]
+ * @param[in, out] dev
  *   Pointer to rte_eth_dev structure.
  * @param[in, out] resource
  *   Pointer to port ID action resource.
@@ -2395,8 +2380,13 @@ flow_dv_port_id_action_resource_register
                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
                                          "cannot allocate resource memory");
        *cache_resource = *resource;
+       /*
+        * Depending on rdma_core version the glue routine calls
+        * either mlx5dv_dr_action_create_dest_ib_port(domain, ibv_port)
+        * or mlx5dv_dr_action_create_dest_vport(domain, vport_id).
+        */
        cache_resource->action =
-               mlx5_glue->dr_create_flow_action_dest_vport
+               mlx5_glue->dr_create_flow_action_dest_port
                        (priv->sh->fdb_domain, resource->port_id);
        if (!cache_resource->action) {
                rte_free(cache_resource);
@@ -2417,7 +2407,7 @@ flow_dv_port_id_action_resource_register
 /**
  * Find existing push vlan resource or create and register a new one.
  *
- * @param dev[in, out]
+ * @param [in, out] dev
  *   Pointer to rte_eth_dev structure.
  * @param[in, out] resource
  *   Pointer to port ID action resource.
@@ -3255,8 +3245,6 @@ flow_dv_validate_action_jump(const struct rte_flow_action *action,
                             const struct rte_flow_attr *attributes,
                             bool external, struct rte_flow_error *error)
 {
-       uint32_t max_group = attributes->transfer ? MLX5_MAX_TABLES_FDB :
-                                                   MLX5_MAX_TABLES;
        uint32_t target_group, table;
        int ret = 0;
 
@@ -3266,6 +3254,10 @@ flow_dv_validate_action_jump(const struct rte_flow_action *action,
                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
                                          "can't have 2 fate actions in"
                                          " same flow");
+       if (action_flags & MLX5_FLOW_ACTION_METER)
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+                                         "jump with meter not support");
        if (!action->conf)
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ACTION_CONF,
@@ -3276,10 +3268,6 @@ flow_dv_validate_action_jump(const struct rte_flow_action *action,
                                       &table, error);
        if (ret)
                return ret;
-       if (table >= max_group)
-               return rte_flow_error_set(error, EINVAL,
-                                         RTE_FLOW_ERROR_TYPE_ATTR_GROUP, NULL,
-                                         "target group index out of range");
        if (attributes->group == target_group)
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
@@ -3379,6 +3367,63 @@ flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev)
        return mlx5_flow_ext_mreg_supported(dev) ? MLX5_MODIFY_NUM :
                                                   MLX5_MODIFY_NUM_NO_MREG;
 }
+
+/**
+ * Validate the meter action.
+ *
+ * @param[in] dev
+ *   Pointer to rte_eth_dev structure.
+ * @param[in] action_flags
+ *   Bit-fields that holds the actions detected until now.
+ * @param[in] action
+ *   Pointer to the meter action.
+ * @param[in] attr
+ *   Attributes of flow that includes this action.
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_ernno is set.
+ */
+static int
+mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
+                               uint64_t action_flags,
+                               const struct rte_flow_action *action,
+                               const struct rte_flow_attr *attr,
+                               struct rte_flow_error *error)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       const struct rte_flow_action_meter *am = action->conf;
+       struct mlx5_flow_meter *fm = mlx5_flow_meter_find(priv, am->mtr_id);
+
+       if (action_flags & MLX5_FLOW_ACTION_METER)
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+                                         "meter chaining not support");
+       if (action_flags & MLX5_FLOW_ACTION_JUMP)
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+                                         "meter with jump not support");
+       if (!priv->mtr_en)
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                         NULL,
+                                         "meter action not supported");
+       if (!fm)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+                                         "Meter not found");
+       if (fm->ref_cnt && (!(fm->attr.transfer == attr->transfer ||
+             (!fm->attr.ingress && !attr->ingress && attr->egress) ||
+             (!fm->attr.egress && !attr->egress && attr->ingress))))
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+                                         "Flow attributes are either invalid "
+                                         "or have a conflict with current "
+                                         "meter attributes");
+       return 0;
+}
+
 /**
  * Find existing modify-header resource or create and register a new one.
  *
@@ -4111,11 +4156,6 @@ flow_dv_validate_attributes(struct rte_eth_dev *dev,
                                          NULL,
                                          "groups are not supported");
 #else
-       uint32_t max_group = attributes->transfer ?
-                            MLX5_MAX_TABLES_FDB :
-                               external ?
-                               MLX5_MAX_TABLES_EXTERNAL :
-                               MLX5_MAX_TABLES;
        uint32_t table;
        int ret;
 
@@ -4124,10 +4164,6 @@ flow_dv_validate_attributes(struct rte_eth_dev *dev,
                                       &table, error);
        if (ret)
                return ret;
-       if (table >= max_group)
-               return rte_flow_error_set(error, EINVAL,
-                                         RTE_FLOW_ERROR_TYPE_ATTR_GROUP, NULL,
-                                         "group index out of range");
 #endif
        if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
            attributes->priority >= priority_max)
@@ -4752,6 +4788,16 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
                case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
                case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
                        break;
+               case RTE_FLOW_ACTION_TYPE_METER:
+                       ret = mlx5_flow_validate_action_meter(dev,
+                                                             action_flags,
+                                                             actions, attr,
+                                                             error);
+                       if (ret < 0)
+                               return ret;
+                       action_flags |= MLX5_FLOW_ACTION_METER;
+                       ++actions_n;
+                       break;
                default:
                        return rte_flow_error_set(error, ENOTSUP,
                                                  RTE_FLOW_ERROR_TYPE_ACTION,
@@ -6101,7 +6147,7 @@ flow_dv_matcher_enable(uint32_t *match_criteria)
 /**
  * Get a flow table.
  *
- * @param dev[in, out]
+ * @param[in, out] dev
  *   Pointer to rte_eth_dev structure.
  * @param[in] table_id
  *   Table id to use.
@@ -6124,47 +6170,76 @@ flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
        struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_ibv_shared *sh = priv->sh;
        struct mlx5_flow_tbl_resource *tbl;
+       union mlx5_flow_tbl_key table_key = {
+               {
+                       .table_id = table_id,
+                       .reserved = 0,
+                       .domain = !!transfer,
+                       .direction = !!egress,
+               }
+       };
+       struct mlx5_hlist_entry *pos = mlx5_hlist_lookup(sh->flow_tbls,
+                                                        table_key.v64);
+       struct mlx5_flow_tbl_data_entry *tbl_data;
+       int ret;
+       void *domain;
 
-#ifdef HAVE_MLX5DV_DR
-       if (transfer) {
-               tbl = &sh->fdb_tbl[table_id];
-               if (!tbl->obj)
-                       tbl->obj = mlx5_glue->dr_create_flow_tbl
-                               (sh->fdb_domain, table_id);
-       } else if (egress) {
-               tbl = &sh->tx_tbl[table_id];
-               if (!tbl->obj)
-                       tbl->obj = mlx5_glue->dr_create_flow_tbl
-                               (sh->tx_domain, table_id);
-       } else {
-               tbl = &sh->rx_tbl[table_id];
-               if (!tbl->obj)
-                       tbl->obj = mlx5_glue->dr_create_flow_tbl
-                               (sh->rx_domain, table_id);
+       if (pos) {
+               tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry,
+                                       entry);
+               tbl = &tbl_data->tbl;
+               rte_atomic32_inc(&tbl->refcnt);
+               return tbl;
        }
-       if (!tbl->obj) {
+       tbl_data = rte_zmalloc(NULL, sizeof(*tbl_data), 0);
+       if (!tbl_data) {
                rte_flow_error_set(error, ENOMEM,
                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
-                                  NULL, "cannot create table");
+                                  NULL,
+                                  "cannot allocate flow table data entry");
                return NULL;
        }
-       rte_atomic32_inc(&tbl->refcnt);
-       return tbl;
-#else
-       (void)error;
-       (void)tbl;
+       tbl = &tbl_data->tbl;
+       pos = &tbl_data->entry;
        if (transfer)
-               return &sh->fdb_tbl[table_id];
+               domain = sh->fdb_domain;
        else if (egress)
-               return &sh->tx_tbl[table_id];
+               domain = sh->tx_domain;
        else
-               return &sh->rx_tbl[table_id];
-#endif
+               domain = sh->rx_domain;
+       tbl->obj = mlx5_glue->dr_create_flow_tbl(domain, table_id);
+       if (!tbl->obj) {
+               rte_flow_error_set(error, ENOMEM,
+                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                  NULL, "cannot create flow table object");
+               rte_free(tbl_data);
+               return NULL;
+       }
+       /*
+        * No multi-threads now, but still better to initialize the reference
+        * count before insert it into the hash list.
+        */
+       rte_atomic32_init(&tbl->refcnt);
+       /* Jump action reference count is initialized here. */
+       rte_atomic32_init(&tbl_data->jump.refcnt);
+       pos->key = table_key.v64;
+       ret = mlx5_hlist_insert(sh->flow_tbls, pos);
+       if (ret < 0) {
+               rte_flow_error_set(error, -ret,
+                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+                                  "cannot insert flow table data entry");
+               mlx5_glue->dr_destroy_flow_tbl(tbl->obj);
+               rte_free(tbl_data);
+       }
+       rte_atomic32_inc(&tbl->refcnt);
+       return tbl;
 }
 
 /**
  * Release a flow table.
  *
+ * @param[in] dev
+ *   Pointer to rte_eth_dev structure.
  * @param[in] tbl
  *   Table resource to be released.
  *
@@ -6172,13 +6247,24 @@ flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
  *   Returns 0 if table was released, else return 1;
  */
 static int
-flow_dv_tbl_resource_release(struct mlx5_flow_tbl_resource *tbl)
+flow_dv_tbl_resource_release(struct rte_eth_dev *dev,
+                            struct mlx5_flow_tbl_resource *tbl)
 {
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_ibv_shared *sh = priv->sh;
+       struct mlx5_flow_tbl_data_entry *tbl_data =
+               container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
+
        if (!tbl)
                return 0;
        if (rte_atomic32_dec_and_test(&tbl->refcnt)) {
+               struct mlx5_hlist_entry *pos = &tbl_data->entry;
+
                mlx5_glue->dr_destroy_flow_tbl(tbl->obj);
                tbl->obj = NULL;
+               /* remove the entry from the hash list and free memory. */
+               mlx5_hlist_remove(sh->flow_tbls, pos);
+               rte_free(tbl_data);
                return 0;
        }
        return 1;
@@ -6187,10 +6273,12 @@ flow_dv_tbl_resource_release(struct mlx5_flow_tbl_resource *tbl)
 /**
  * Register the flow matcher.
  *
- * @param dev[in, out]
+ * @param[in, out] dev
  *   Pointer to rte_eth_dev structure.
  * @param[in, out] matcher
  *   Pointer to flow matcher.
+ * @param[in, out] key
+ *   Pointer to flow table key.
  * @parm[in, out] dev_flow
  *   Pointer to the dev_flow.
  * @param[out] error
@@ -6202,6 +6290,7 @@ flow_dv_tbl_resource_release(struct mlx5_flow_tbl_resource *tbl)
 static int
 flow_dv_matcher_register(struct rte_eth_dev *dev,
                         struct mlx5_flow_dv_matcher *matcher,
+                        union mlx5_flow_tbl_key *key,
                         struct mlx5_flow *dev_flow,
                         struct rte_flow_error *error)
 {
@@ -6212,69 +6301,73 @@ flow_dv_matcher_register(struct rte_eth_dev *dev,
                .type = IBV_FLOW_ATTR_NORMAL,
                .match_mask = (void *)&matcher->mask,
        };
-       struct mlx5_flow_tbl_resource *tbl = NULL;
+       struct mlx5_flow_tbl_resource *tbl;
+       struct mlx5_flow_tbl_data_entry *tbl_data;
 
+       tbl = flow_dv_tbl_resource_get(dev, key->table_id, key->direction,
+                                      key->domain, error);
+       if (!tbl)
+               return -rte_errno;      /* No need to refill the error info */
+       tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
        /* Lookup from cache. */
-       LIST_FOREACH(cache_matcher, &sh->matchers, next) {
+       LIST_FOREACH(cache_matcher, &tbl_data->matchers, next) {
                if (matcher->crc == cache_matcher->crc &&
                    matcher->priority == cache_matcher->priority &&
-                   matcher->egress == cache_matcher->egress &&
-                   matcher->group == cache_matcher->group &&
-                   matcher->transfer == cache_matcher->transfer &&
                    !memcmp((const void *)matcher->mask.buf,
                            (const void *)cache_matcher->mask.buf,
                            cache_matcher->mask.size)) {
                        DRV_LOG(DEBUG,
-                               "priority %hd use %s matcher %p: refcnt %d++",
+                               "%s group %u priority %hd use %s "
+                               "matcher %p: refcnt %d++",
+                               key->domain ? "FDB" : "NIC", key->table_id,
                                cache_matcher->priority,
-                               cache_matcher->egress ? "tx" : "rx",
+                               key->direction ? "tx" : "rx",
                                (void *)cache_matcher,
                                rte_atomic32_read(&cache_matcher->refcnt));
                        rte_atomic32_inc(&cache_matcher->refcnt);
                        dev_flow->dv.matcher = cache_matcher;
+                       /* old matcher should not make the table ref++. */
+                       flow_dv_tbl_resource_release(dev, tbl);
                        return 0;
                }
        }
        /* Register new matcher. */
        cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
-       if (!cache_matcher)
+       if (!cache_matcher) {
+               flow_dv_tbl_resource_release(dev, tbl);
                return rte_flow_error_set(error, ENOMEM,
                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
                                          "cannot allocate matcher memory");
-       tbl = flow_dv_tbl_resource_get(dev, matcher->group,
-                                      matcher->egress, matcher->transfer,
-                                      error);
-       if (!tbl) {
-               rte_free(cache_matcher);
-               return rte_flow_error_set(error, ENOMEM,
-                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
-                                         NULL, "cannot create table");
        }
        *cache_matcher = *matcher;
        dv_attr.match_criteria_enable =
                flow_dv_matcher_enable(cache_matcher->mask.buf);
        dv_attr.priority = matcher->priority;
-       if (matcher->egress)
+       if (key->direction)
                dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
        cache_matcher->matcher_object =
                mlx5_glue->dv_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj);
        if (!cache_matcher->matcher_object) {
                rte_free(cache_matcher);
 #ifdef HAVE_MLX5DV_DR
-               flow_dv_tbl_resource_release(tbl);
+               flow_dv_tbl_resource_release(dev, tbl);
 #endif
                return rte_flow_error_set(error, ENOMEM,
                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
                                          NULL, "cannot create matcher");
        }
+       /* Save the table information */
+       cache_matcher->tbl = tbl;
+       rte_atomic32_init(&cache_matcher->refcnt);
+       /* only matcher ref++, table ref++ already done above in get API. */
        rte_atomic32_inc(&cache_matcher->refcnt);
-       LIST_INSERT_HEAD(&sh->matchers, cache_matcher, next);
+       LIST_INSERT_HEAD(&tbl_data->matchers, cache_matcher, next);
        dev_flow->dv.matcher = cache_matcher;
-       DRV_LOG(DEBUG, "priority %hd new %s matcher %p: refcnt %d",
+       DRV_LOG(DEBUG, "%s group %u priority %hd new %s matcher %p: refcnt %d",
+               key->domain ? "FDB" : "NIC", key->table_id,
                cache_matcher->priority,
-               cache_matcher->egress ? "tx" : "rx", (void *)cache_matcher,
+               key->direction ? "tx" : "rx", (void *)cache_matcher,
                rte_atomic32_read(&cache_matcher->refcnt));
-       rte_atomic32_inc(&tbl->refcnt);
        return 0;
 }
 
@@ -6283,8 +6376,8 @@ flow_dv_matcher_register(struct rte_eth_dev *dev,
  *
  * @param dev[in, out]
  *   Pointer to rte_eth_dev structure.
- * @param[in, out] resource
- *   Pointer to tag resource.
+ * @param[in, out] tag_be24
+ *   Tag value in big endian then R-shift 8.
  * @parm[in, out] dev_flow
  *   Pointer to the dev_flow.
  * @param[out] error
@@ -6296,34 +6389,35 @@ flow_dv_matcher_register(struct rte_eth_dev *dev,
 static int
 flow_dv_tag_resource_register
                        (struct rte_eth_dev *dev,
-                        struct mlx5_flow_dv_tag_resource *resource,
+                        uint32_t tag_be24,
                         struct mlx5_flow *dev_flow,
                         struct rte_flow_error *error)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_ibv_shared *sh = priv->sh;
        struct mlx5_flow_dv_tag_resource *cache_resource;
+       struct mlx5_hlist_entry *entry;
 
        /* Lookup a matching resource from cache. */
-       LIST_FOREACH(cache_resource, &sh->tags, next) {
-               if (resource->tag == cache_resource->tag) {
-                       DRV_LOG(DEBUG, "tag resource %p: refcnt %d++",
-                               (void *)cache_resource,
-                               rte_atomic32_read(&cache_resource->refcnt));
-                       rte_atomic32_inc(&cache_resource->refcnt);
-                       dev_flow->dv.tag_resource = cache_resource;
-                       return 0;
-               }
+       entry = mlx5_hlist_lookup(sh->tag_table, (uint64_t)tag_be24);
+       if (entry) {
+               cache_resource = container_of
+                       (entry, struct mlx5_flow_dv_tag_resource, entry);
+               rte_atomic32_inc(&cache_resource->refcnt);
+               dev_flow->dv.tag_resource = cache_resource;
+               DRV_LOG(DEBUG, "cached tag resource %p: refcnt now %d++",
+                       (void *)cache_resource,
+                       rte_atomic32_read(&cache_resource->refcnt));
+               return 0;
        }
-       /* Register new  resource. */
+       /* Register new resource. */
        cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
        if (!cache_resource)
                return rte_flow_error_set(error, ENOMEM,
                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
                                          "cannot allocate resource memory");
-       *cache_resource = *resource;
-       cache_resource->action = mlx5_glue->dv_create_flow_action_tag
-               (resource->tag);
+       cache_resource->entry.key = (uint64_t)tag_be24;
+       cache_resource->action = mlx5_glue->dv_create_flow_action_tag(tag_be24);
        if (!cache_resource->action) {
                rte_free(cache_resource);
                return rte_flow_error_set(error, ENOMEM,
@@ -6332,9 +6426,15 @@ flow_dv_tag_resource_register
        }
        rte_atomic32_init(&cache_resource->refcnt);
        rte_atomic32_inc(&cache_resource->refcnt);
-       LIST_INSERT_HEAD(&sh->tags, cache_resource, next);
+       if (mlx5_hlist_insert(sh->tag_table, &cache_resource->entry)) {
+               mlx5_glue->destroy_flow_action(cache_resource->action);
+               rte_free(cache_resource);
+               return rte_flow_error_set(error, EEXIST,
+                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                         NULL, "cannot insert tag");
+       }
        dev_flow->dv.tag_resource = cache_resource;
-       DRV_LOG(DEBUG, "new tag resource %p: refcnt %d++",
+       DRV_LOG(DEBUG, "new tag resource %p: refcnt now %d++",
                (void *)cache_resource,
                rte_atomic32_read(&cache_resource->refcnt));
        return 0;
@@ -6355,13 +6455,16 @@ static int
 flow_dv_tag_release(struct rte_eth_dev *dev,
                    struct mlx5_flow_dv_tag_resource *tag)
 {
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_ibv_shared *sh = priv->sh;
+
        assert(tag);
        DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
                dev->data->port_id, (void *)tag,
                rte_atomic32_read(&tag->refcnt));
        if (rte_atomic32_dec_and_test(&tag->refcnt)) {
                claim_zero(mlx5_glue->destroy_flow_action(tag->action));
-               LIST_REMOVE(tag, next);
+               mlx5_hlist_remove(sh->tag_table, &tag->entry);
                DRV_LOG(DEBUG, "port %u tag %p: removed",
                        dev->data->port_id, (void *)tag);
                rte_free(tag);
@@ -6403,10 +6506,20 @@ flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
                                          RTE_FLOW_ERROR_TYPE_ACTION,
                                          NULL,
                                          "No eswitch info was found for port");
-       if (priv->vport_meta_mask)
-               *dst_port_id = priv->vport_meta_tag;
-       else
-               *dst_port_id = priv->vport_id;
+#ifdef HAVE_MLX5DV_DR_DEVX_PORT
+       /*
+        * This parameter is transferred to
+        * mlx5dv_dr_action_create_dest_ib_port().
+        */
+       *dst_port_id = priv->ibv_port;
+#else
+       /*
+        * Legacy mode, no LAG configurations is supported.
+        * This parameter is transferred to
+        * mlx5dv_dr_action_create_dest_vport().
+        */
+       *dst_port_id = priv->vport_id;
+#endif
        return 0;
 }
 
@@ -6502,7 +6615,8 @@ __flow_dv_translate(struct rte_eth_dev *dev,
                                          MLX5DV_FLOW_TABLE_TYPE_NIC_RX
        };
        union flow_dv_attr flow_attr = { .attr = 0 };
-       struct mlx5_flow_dv_tag_resource tag_resource;
+       uint32_t tag_be;
+       union mlx5_flow_tbl_key tbl_key;
        uint32_t modify_action_position = UINT32_MAX;
        void *match_mask = matcher.mask.buf;
        void *match_value = dev_flow->dv.value.buf;
@@ -6527,7 +6641,7 @@ __flow_dv_translate(struct rte_eth_dev *dev,
                const struct rte_flow_action_count *count = action->conf;
                const uint8_t *rss_key;
                const struct rte_flow_action_jump *jump_data;
-               struct mlx5_flow_dv_jump_tbl_resource jump_tbl_resource;
+               const struct rte_flow_action_meter *mtr;
                struct mlx5_flow_tbl_resource *tbl;
                uint32_t port_id = 0;
                struct mlx5_flow_dv_port_id_action_resource port_id_resource;
@@ -6563,12 +6677,11 @@ __flow_dv_translate(struct rte_eth_dev *dev,
                                action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
                                break;
                        }
-                       tag_resource.tag =
-                               mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
+                       tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
                        if (!dev_flow->dv.tag_resource)
                                if (flow_dv_tag_resource_register
-                                   (dev, &tag_resource, dev_flow, error))
-                                       return errno;
+                                   (dev, tag_be, dev_flow, error))
+                                       return -rte_errno;
                        dev_flow->dv.actions[actions_n++] =
                                dev_flow->dv.tag_resource->action;
                        break;
@@ -6589,13 +6702,13 @@ __flow_dv_translate(struct rte_eth_dev *dev,
                        /* Fall-through */
                case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
                        /* Legacy (non-extensive) MARK action. */
-                       tag_resource.tag = mlx5_flow_mark_set
+                       tag_be = mlx5_flow_mark_set
                              (((const struct rte_flow_action_mark *)
                               (actions->conf))->id);
                        if (!dev_flow->dv.tag_resource)
                                if (flow_dv_tag_resource_register
-                                   (dev, &tag_resource, dev_flow, error))
-                                       return errno;
+                                   (dev, tag_be, dev_flow, error))
+                                       return -rte_errno;
                        dev_flow->dv.actions[actions_n++] =
                                dev_flow->dv.tag_resource->action;
                        break;
@@ -6792,10 +6905,9 @@ cnt_err:
                                                 RTE_FLOW_ERROR_TYPE_ACTION,
                                                 NULL,
                                                 "cannot create jump action.");
-                       jump_tbl_resource.tbl = tbl;
                        if (flow_dv_jump_tbl_resource_register
-                           (dev, &jump_tbl_resource, dev_flow, error)) {
-                               flow_dv_tbl_resource_release(tbl);
+                           (dev, tbl, dev_flow, error)) {
+                               flow_dv_tbl_resource_release(dev, tbl);
                                return rte_flow_error_set
                                                (error, errno,
                                                 RTE_FLOW_ERROR_TYPE_ACTION,
@@ -6893,6 +7005,25 @@ cnt_err:
                                return -rte_errno;
                        action_flags |= MLX5_FLOW_ACTION_SET_TAG;
                        break;
+               case RTE_FLOW_ACTION_TYPE_METER:
+                       mtr = actions->conf;
+                       if (!flow->meter) {
+                               flow->meter = mlx5_flow_meter_attach(priv,
+                                                       mtr->mtr_id, attr,
+                                                       error);
+                               if (!flow->meter)
+                                       return rte_flow_error_set(error,
+                                               rte_errno,
+                                               RTE_FLOW_ERROR_TYPE_ACTION,
+                                               NULL,
+                                               "meter not found "
+                                               "or invalid parameters");
+                       }
+                       /* Set the meter action. */
+                       dev_flow->dv.actions[actions_n++] =
+                               flow->meter->mfts->meter_action;
+                       action_flags |= MLX5_FLOW_ACTION_METER;
+                       break;
                case RTE_FLOW_ACTION_TYPE_END:
                        actions_end = true;
                        if (mhdr_res.actions_num) {
@@ -7117,10 +7248,11 @@ cnt_err:
                                    matcher.mask.size);
        matcher.priority = mlx5_flow_adjust_priority(dev, priority,
                                                     matcher.priority);
-       matcher.egress = attr->egress;
-       matcher.group = dev_flow->group;
-       matcher.transfer = attr->transfer;
-       if (flow_dv_matcher_register(dev, &matcher, dev_flow, error))
+       /* reserved field no needs to be set to 0 here. */
+       tbl_key.domain = attr->transfer;
+       tbl_key.direction = attr->egress;
+       tbl_key.table_id = dev_flow->group;
+       if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow, error))
                return -rte_errno;
        return 0;
 }
@@ -7256,9 +7388,6 @@ flow_dv_matcher_release(struct rte_eth_dev *dev,
                        struct mlx5_flow *flow)
 {
        struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
-       struct mlx5_priv *priv = dev->data->dev_private;
-       struct mlx5_ibv_shared *sh = priv->sh;
-       struct mlx5_flow_tbl_resource *tbl;
 
        assert(matcher->matcher_object);
        DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
@@ -7268,11 +7397,8 @@ flow_dv_matcher_release(struct rte_eth_dev *dev,
                claim_zero(mlx5_glue->dv_destroy_flow_matcher
                           (matcher->matcher_object));
                LIST_REMOVE(matcher, next);
-               if (matcher->egress)
-                       tbl = &sh->tx_tbl[matcher->group];
-               else
-                       tbl = &sh->rx_tbl[matcher->group];
-               flow_dv_tbl_resource_release(tbl);
+               /* table ref-- in release interface. */
+               flow_dv_tbl_resource_release(dev, matcher->tbl);
                rte_free(matcher);
                DRV_LOG(DEBUG, "port %u matcher %p: removed",
                        dev->data->port_id, (void *)matcher);
@@ -7315,6 +7441,8 @@ flow_dv_encap_decap_resource_release(struct mlx5_flow *flow)
 /**
  * Release an jump to table action resource.
  *
+ * @param dev
+ *   Pointer to Ethernet device.
  * @param flow
  *   Pointer to mlx5_flow.
  *
@@ -7322,10 +7450,13 @@ flow_dv_encap_decap_resource_release(struct mlx5_flow *flow)
  *   1 while a reference on it exists, 0 when freed.
  */
 static int
-flow_dv_jump_tbl_resource_release(struct mlx5_flow *flow)
+flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
+                                 struct mlx5_flow *flow)
 {
-       struct mlx5_flow_dv_jump_tbl_resource *cache_resource =
-                                               flow->dv.jump;
+       struct mlx5_flow_dv_jump_tbl_resource *cache_resource = flow->dv.jump;
+       struct mlx5_flow_tbl_data_entry *tbl_data =
+                       container_of(cache_resource,
+                                    struct mlx5_flow_tbl_data_entry, jump);
 
        assert(cache_resource->action);
        DRV_LOG(DEBUG, "jump table resource %p: refcnt %d--",
@@ -7334,9 +7465,8 @@ flow_dv_jump_tbl_resource_release(struct mlx5_flow *flow)
        if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
                claim_zero(mlx5_glue->destroy_flow_action
                                (cache_resource->action));
-               LIST_REMOVE(cache_resource, next);
-               flow_dv_tbl_resource_release(cache_resource->tbl);
-               rte_free(cache_resource);
+               /* jump action memory free is inside the table release. */
+               flow_dv_tbl_resource_release(dev, &tbl_data->tbl);
                DRV_LOG(DEBUG, "jump table resource %p: removed",
                        (void *)cache_resource);
                return 0;
@@ -7494,6 +7624,10 @@ __flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
                flow_dv_counter_release(dev, flow->counter);
                flow->counter = NULL;
        }
+       if (flow->meter) {
+               mlx5_flow_meter_detach(flow->meter);
+               flow->meter = NULL;
+       }
        while (!LIST_EMPTY(&flow->dev_flows)) {
                dev_flow = LIST_FIRST(&flow->dev_flows);
                LIST_REMOVE(dev_flow, next);
@@ -7504,7 +7638,7 @@ __flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
                if (dev_flow->dv.modify_hdr)
                        flow_dv_modify_hdr_resource_release(dev_flow);
                if (dev_flow->dv.jump)
-                       flow_dv_jump_tbl_resource_release(dev_flow);
+                       flow_dv_jump_tbl_resource_release(dev, dev_flow);
                if (dev_flow->dv.port_id_action)
                        flow_dv_port_id_action_resource_release(dev_flow);
                if (dev_flow->dv.push_vlan_res)
@@ -7637,7 +7771,8 @@ flow_dv_destroy_mtr_tbl(struct rte_eth_dev *dev,
                claim_zero(mlx5_glue->dv_destroy_flow_matcher
                          (mtd->egress.any_matcher));
        if (mtd->egress.tbl)
-               claim_zero(flow_dv_tbl_resource_release(mtd->egress.tbl));
+               claim_zero(flow_dv_tbl_resource_release(dev,
+                                                       mtd->egress.tbl));
        if (mtd->ingress.color_matcher)
                claim_zero(mlx5_glue->dv_destroy_flow_matcher
                          (mtd->ingress.color_matcher));
@@ -7645,7 +7780,8 @@ flow_dv_destroy_mtr_tbl(struct rte_eth_dev *dev,
                claim_zero(mlx5_glue->dv_destroy_flow_matcher
                          (mtd->ingress.any_matcher));
        if (mtd->ingress.tbl)
-               claim_zero(flow_dv_tbl_resource_release(mtd->ingress.tbl));
+               claim_zero(flow_dv_tbl_resource_release(dev,
+                                                       mtd->ingress.tbl));
        if (mtd->transfer.color_matcher)
                claim_zero(mlx5_glue->dv_destroy_flow_matcher
                          (mtd->transfer.color_matcher));
@@ -7653,7 +7789,8 @@ flow_dv_destroy_mtr_tbl(struct rte_eth_dev *dev,
                claim_zero(mlx5_glue->dv_destroy_flow_matcher
                          (mtd->transfer.any_matcher));
        if (mtd->transfer.tbl)
-               claim_zero(flow_dv_tbl_resource_release(mtd->transfer.tbl));
+               claim_zero(flow_dv_tbl_resource_release(dev,
+                                                       mtd->transfer.tbl));
        if (mtd->drop_actn)
                claim_zero(mlx5_glue->destroy_flow_action(mtd->drop_actn));
        rte_free(mtd);
@@ -7756,6 +7893,8 @@ flow_dv_prepare_mtr_tables(struct rte_eth_dev *dev,
                DRV_LOG(ERR, "Failed to create meter policer color matcher.");
                goto error_exit;
        }
+       if (mtb->count_actns[RTE_MTR_DROPPED])
+               actions[i++] = mtb->count_actns[RTE_MTR_DROPPED];
        actions[i++] = mtb->drop_actn;
        /* Default rule: lowest priority, match any, actions: drop. */
        dtb->policer_rules[RTE_MTR_DROPPED] =
@@ -7776,16 +7915,20 @@ error_exit:
  *
  * @param[in] dev
  *   Pointer to Ethernet device.
+ * @param[in] fm
+ *   Pointer to the flow meter.
  *
  * @return
  *   Pointer to table set on success, NULL otherwise and rte_errno is set.
  */
 static struct mlx5_meter_domains_infos *
-flow_dv_create_mtr_tbl(struct rte_eth_dev *dev)
+flow_dv_create_mtr_tbl(struct rte_eth_dev *dev,
+                      const struct mlx5_flow_meter *fm)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_meter_domains_infos *mtb;
        int ret;
+       int i;
 
        if (!priv->mtr_en) {
                rte_errno = ENOTSUP;
@@ -7796,6 +7939,12 @@ flow_dv_create_mtr_tbl(struct rte_eth_dev *dev)
                DRV_LOG(ERR, "Failed to allocate memory for meter.");
                return NULL;
        }
+       /* Create meter count actions */
+       for (i = 0; i <= RTE_MTR_DROPPED; i++) {
+               if (!fm->policer_stats.cnt[i])
+                       continue;
+               mtb->count_actns[i] = fm->policer_stats.cnt[i]->action;
+       }
        /* Create drop action. */
        mtb->drop_actn = mlx5_glue->dr_create_flow_action_drop();
        if (!mtb->drop_actn) {
@@ -7931,6 +8080,8 @@ flow_dv_create_policer_forward_rule(struct mlx5_flow_meter *fm,
 
                flow_dv_match_meta_reg(matcher.buf, value.buf, mtr_reg_c,
                                       rte_col_2_mlx5_col(i), UINT32_MAX);
+               if (mtb->count_actns[i])
+                       actions[j++] = mtb->count_actns[i];
                if (fm->params.action[i] == MTR_POLICER_ACTION_DROP)
                        actions[j++] = mtb->drop_actn;
                else
@@ -8005,6 +8156,46 @@ error:
        return -1;
 }
 
+/**
+ * Query a devx counter.
+ *
+ * @param[in] dev
+ *   Pointer to the Ethernet device structure.
+ * @param[in] cnt
+ *   Pointer to the flow counter.
+ * @param[in] clear
+ *   Set to clear the counter statistics.
+ * @param[out] pkts
+ *   The statistics value of packets.
+ * @param[out] bytes
+ *   The statistics value of bytes.
+ *
+ * @return
+ *   0 on success, otherwise return -1.
+ */
+static int
+flow_dv_counter_query(struct rte_eth_dev *dev,
+                     struct mlx5_flow_counter *cnt, bool clear,
+                     uint64_t *pkts, uint64_t *bytes)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       uint64_t inn_pkts, inn_bytes;
+       int ret;
+
+       if (!priv->config.devx)
+               return -1;
+       ret = _flow_dv_query_count(dev, cnt, &inn_pkts, &inn_bytes);
+       if (ret)
+               return -1;
+       *pkts = inn_pkts - cnt->hits;
+       *bytes = inn_bytes - cnt->bytes;
+       if (clear) {
+               cnt->hits = inn_pkts;
+               cnt->bytes = inn_bytes;
+       }
+       return 0;
+}
+
 /*
  * Mutex-protected thunk to lock-free  __flow_dv_translate().
  */
@@ -8062,6 +8253,31 @@ flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
        flow_dv_shared_unlock(dev);
 }
 
+/*
+ * Mutex-protected thunk to lock-free flow_dv_counter_alloc().
+ */
+static struct mlx5_flow_counter *
+flow_dv_counter_allocate(struct rte_eth_dev *dev)
+{
+       struct mlx5_flow_counter *cnt;
+
+       flow_dv_shared_lock(dev);
+       cnt = flow_dv_counter_alloc(dev, 0, 0, 1);
+       flow_dv_shared_unlock(dev);
+       return cnt;
+}
+
+/*
+ * Mutex-protected thunk to lock-free flow_dv_counter_release().
+ */
+static void
+flow_dv_counter_free(struct rte_eth_dev *dev, struct mlx5_flow_counter *cnt)
+{
+       flow_dv_shared_lock(dev);
+       flow_dv_counter_release(dev, cnt);
+       flow_dv_shared_unlock(dev);
+}
+
 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
        .validate = flow_dv_validate,
        .prepare = flow_dv_prepare,
@@ -8074,6 +8290,9 @@ const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
        .destroy_mtr_tbls = flow_dv_destroy_mtr_tbl,
        .create_policer_rules = flow_dv_create_policer_rules,
        .destroy_policer_rules = flow_dv_destroy_policer_rules,
+       .counter_alloc = flow_dv_counter_allocate,
+       .counter_free = flow_dv_counter_free,
+       .counter_query = flow_dv_counter_query,
 };
 
 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */