#include <rte_vxlan.h>
#include <rte_gtp.h>
-#include <mlx5_glue.h>
#include <mlx5_devx_cmds.h>
#include <mlx5_prm.h>
#include "mlx5_defs.h"
#include "mlx5.h"
+#include "mlx5_common_os.h"
#include "mlx5_flow.h"
+#include "mlx5_flow_os.h"
#include "mlx5_rxtx.h"
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
flow_dv_tbl_resource_release(struct rte_eth_dev *dev,
struct mlx5_flow_tbl_resource *tbl);
+static int
+flow_dv_default_miss_resource_release(struct rte_eth_dev *dev);
+
/**
* Initialize flow attributes structure according to flow items' types.
*
flow_dv_shared_lock(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
if (sh->dv_refcnt > 1) {
int ret;
flow_dv_shared_unlock(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
if (sh->dv_refcnt > 1) {
int ret;
return 0;
}
-/*
- * GTP flags are contained in 1 byte of the format:
- * -------------------------------------------
- * | bit | 0 - 2 | 3 | 4 | 5 | 6 | 7 |
- * |-----------------------------------------|
- * | value | Version | PT | Res | E | S | PN |
- * -------------------------------------------
- *
- * Matching is supported only for GTP flags E, S, PN.
- */
-#define MLX5_GTP_FLAGS_MASK 0x07
-
/**
* Validate VLAN item.
*
return 0;
}
+/*
+ * GTP flags are contained in 1 byte of the format:
+ * -------------------------------------------
+ * | bit | 0 - 2 | 3 | 4 | 5 | 6 | 7 |
+ * |-----------------------------------------|
+ * | value | Version | PT | Res | E | S | PN |
+ * -------------------------------------------
+ *
+ * Matching is supported only for GTP flags E, S, PN.
+ */
+#define MLX5_GTP_FLAGS_MASK 0x07
+
/**
* Validate GTP item.
*
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
struct mlx5_flow_dv_encap_decap_resource *cache_resource;
struct mlx5dv_dr_domain *domain;
uint32_t idx = 0;
+ int ret;
resource->flags = dev_flow->dv.group ? 0 : 1;
if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"cannot allocate resource memory");
*cache_resource = *resource;
- cache_resource->verbs_action =
- mlx5_glue->dv_create_flow_action_packet_reformat
- (sh->ctx, cache_resource->reformat_type,
- cache_resource->ft_type, domain, cache_resource->flags,
- cache_resource->size,
- (cache_resource->size ? cache_resource->buf : NULL));
- if (!cache_resource->verbs_action) {
+ ret = mlx5_flow_os_create_flow_action_packet_reformat
+ (sh->ctx, domain, cache_resource,
+ &cache_resource->action);
+ if (ret) {
rte_free(cache_resource);
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
{
struct mlx5_flow_tbl_data_entry *tbl_data =
container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
- int cnt;
+ int cnt, ret;
MLX5_ASSERT(tbl);
cnt = rte_atomic32_read(&tbl_data->jump.refcnt);
if (!cnt) {
- tbl_data->jump.action =
- mlx5_glue->dr_create_flow_action_dest_flow_tbl
- (tbl->obj);
- if (!tbl_data->jump.action)
+ ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
+ (tbl->obj, &tbl_data->jump.action);
+ if (ret)
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "cannot create jump action");
return 0;
}
+/**
+ * Find existing default miss resource or create and register a new one.
+ *
+ * @param[in, out] dev
+ * Pointer to rte_eth_dev structure.
+ * @param[out] error
+ * pointer to error structure.
+ *
+ * @return
+ * 0 on success otherwise -errno and errno is set.
+ */
+static int
+flow_dv_default_miss_resource_register(struct rte_eth_dev *dev,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
+ struct mlx5_flow_default_miss_resource *cache_resource =
+ &sh->default_miss;
+ int cnt = rte_atomic32_read(&cache_resource->refcnt);
+
+ if (!cnt) {
+ MLX5_ASSERT(cache_resource->action);
+ cache_resource->action =
+ mlx5_glue->dr_create_flow_action_default_miss();
+ if (!cache_resource->action)
+ return rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "cannot create default miss action");
+ DRV_LOG(DEBUG, "new default miss resource %p: refcnt %d++",
+ (void *)cache_resource->action, cnt);
+ }
+ rte_atomic32_inc(&cache_resource->refcnt);
+ return 0;
+}
+
/**
* Find existing table port ID resource or create and register a new one.
*
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
struct mlx5_flow_dv_port_id_action_resource *cache_resource;
uint32_t idx = 0;
+ int ret;
/* Lookup a matching resource from cache. */
ILIST_FOREACH(sh->ipool[MLX5_IPOOL_PORT_ID], sh->port_id_action_list,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"cannot allocate resource memory");
*cache_resource = *resource;
- /*
- * Depending on rdma_core version the glue routine calls
- * either mlx5dv_dr_action_create_dest_ib_port(domain, ibv_port)
- * or mlx5dv_dr_action_create_dest_vport(domain, vport_id).
- */
- cache_resource->action =
- mlx5_glue->dr_create_flow_action_dest_port
- (priv->sh->fdb_domain, resource->port_id);
- if (!cache_resource->action) {
+ ret = mlx5_flow_os_create_flow_action_dest_port
+ (priv->sh->fdb_domain, resource->port_id,
+ &cache_resource->action);
+ if (ret) {
rte_free(cache_resource);
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
struct mlx5_flow_dv_push_vlan_action_resource *cache_resource;
struct mlx5dv_dr_domain *domain;
uint32_t idx = 0;
+ int ret;
/* Lookup a matching resource from cache. */
ILIST_FOREACH(sh->ipool[MLX5_IPOOL_PUSH_VLAN],
domain = sh->rx_domain;
else
domain = sh->tx_domain;
- cache_resource->action =
- mlx5_glue->dr_create_flow_action_push_vlan(domain,
- resource->vlan_tag);
- if (!cache_resource->action) {
+ ret = mlx5_flow_os_create_flow_action_push_vlan
+ (domain, resource->vlan_tag,
+ &cache_resource->action);
+ if (ret) {
rte_free(cache_resource);
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
struct mlx5_flow_dv_modify_hdr_resource *cache_resource;
struct mlx5dv_dr_domain *ns;
uint32_t actions_len;
+ int ret;
resource->flags = dev_flow->dv.group ? 0 :
MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
"cannot allocate resource memory");
*cache_resource = *resource;
rte_memcpy(cache_resource->actions, resource->actions, actions_len);
- cache_resource->verbs_action =
- mlx5_glue->dv_create_flow_action_modify_header
- (sh->ctx, cache_resource->ft_type, ns,
- cache_resource->flags, actions_len,
- (uint64_t *)cache_resource->actions);
- if (!cache_resource->verbs_action) {
+ ret = mlx5_flow_os_create_flow_action_modify_header
+ (sh->ctx, ns, cache_resource,
+ actions_len, &cache_resource->action);
+ if (ret) {
rte_free(cache_resource);
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
}
+/**
+ * Check the devx counter belongs to the pool.
+ *
+ * @param[in] pool
+ * Pointer to the counter pool.
+ * @param[in] id
+ * The counter devx ID.
+ *
+ * @return
+ * True if counter belongs to the pool, false otherwise.
+ */
+static bool
+flow_dv_is_counter_in_pool(struct mlx5_flow_counter_pool *pool, int id)
+{
+ int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
+ MLX5_COUNTERS_PER_POOL;
+
+ if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
+ return true;
+ return false;
+}
+
/**
* Get a pool by devx counter ID.
*
flow_dv_find_pool_by_id(struct mlx5_pools_container *cont, int id)
{
uint32_t i;
- uint32_t n_valid = rte_atomic16_read(&cont->n_valid);
- for (i = 0; i < n_valid; i++) {
+ /* Check last used pool. */
+ if (cont->last_pool_idx != POOL_IDX_INVALID &&
+ flow_dv_is_counter_in_pool(cont->pools[cont->last_pool_idx], id))
+ return cont->pools[cont->last_pool_idx];
+ /* ID out of range means no suitable pool in the container. */
+ if (id > cont->max_id || id < cont->min_id)
+ return NULL;
+ /*
+ * Find the pool from the end of the container, since mostly counter
+ * ID is sequence increasing, and the last pool should be the needed
+ * one.
+ */
+ i = rte_atomic16_read(&cont->n_valid);
+ while (i--) {
struct mlx5_flow_counter_pool *pool = cont->pools[i];
- int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
- MLX5_COUNTERS_PER_POOL;
- if (id >= base && id < base + MLX5_COUNTERS_PER_POOL) {
- /*
- * Move the pool to the head, as counter allocate
- * always gets the first pool in the container.
- */
- if (pool != TAILQ_FIRST(&cont->pool_list)) {
- TAILQ_REMOVE(&cont->pool_list, pool, next);
- TAILQ_INSERT_HEAD(&cont->pool_list, pool, next);
- }
+ if (flow_dv_is_counter_in_pool(pool, id))
return pool;
- }
}
return NULL;
}
flow_dv_create_counter_stat_mem_mng(struct rte_eth_dev *dev, int raws_n)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
struct mlx5_devx_mkey_attr mkey_attr;
struct mlx5_counter_stats_mem_mng *mem_mng;
volatile struct flow_counter_stats *raw_data;
}
mkey_attr.addr = (uintptr_t)mem;
mkey_attr.size = size;
- mkey_attr.umem_id = mem_mng->umem->umem_id;
+ mkey_attr.umem_id = mlx5_os_get_umem_id(mem_mng->umem);
mkey_attr.pd = sh->pdn;
mkey_attr.log_entity_size = 0;
mkey_attr.pg_access = 0;
pool->type = 0;
pool->type |= (batch ? 0 : CNT_POOL_TYPE_EXT);
pool->type |= (!age ? 0 : CNT_POOL_TYPE_AGE);
+ pool->query_gen = 0;
rte_spinlock_init(&pool->sl);
- /*
- * The generation of the new allocated counters in this pool is 0, 2 in
- * the pool generation makes all the counters valid for allocation.
- * The start and end query generation protect the counters be released
- * between the query and update gap period will not be reallocated
- * without the last query finished and stats updated to the memory.
- */
- rte_atomic64_set(&pool->start_query_gen, 0x2);
- /*
- * There's no background query thread for fallback mode, set the
- * end_query_gen to the maximum value since no need to wait for
- * statistics update.
- */
- rte_atomic64_set(&pool->end_query_gen, priv->counter_fallback ?
- INT64_MAX : 0x2);
- TAILQ_INIT(&pool->counters);
+ TAILQ_INIT(&pool->counters[0]);
+ TAILQ_INIT(&pool->counters[1]);
TAILQ_INSERT_HEAD(&cont->pool_list, pool, next);
pool->index = n_valid;
cont->pools[n_valid] = pool;
+ if (!batch) {
+ int base = RTE_ALIGN_FLOOR(dcs->id, MLX5_COUNTERS_PER_POOL);
+
+ if (base < cont->min_id)
+ cont->min_id = base;
+ if (base > cont->max_id)
+ cont->max_id = base + MLX5_COUNTERS_PER_POOL - 1;
+ cont->last_pool_idx = pool->index;
+ }
/* Pool initialization must be updated before host thread access. */
rte_cio_wmb();
rte_atomic16_add(&cont->n_valid, 1);
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_pools_container *cont;
struct mlx5_flow_counter_pool *pool;
+ struct mlx5_counters tmp_tq;
struct mlx5_devx_obj *dcs = NULL;
struct mlx5_flow_counter *cnt;
uint32_t i;
pool, batch, age);
i = dcs->id % MLX5_COUNTERS_PER_POOL;
cnt = MLX5_POOL_GET_CNT(pool, i);
- TAILQ_INSERT_HEAD(&pool->counters, cnt, next);
+ cnt->pool = pool;
MLX5_GET_POOL_CNT_EXT(pool, i)->dcs = dcs;
*cnt_free = cnt;
return pool;
mlx5_devx_cmd_destroy(dcs);
return NULL;
}
- for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
+ TAILQ_INIT(&tmp_tq);
+ for (i = 1; i < MLX5_COUNTERS_PER_POOL; ++i) {
cnt = MLX5_POOL_GET_CNT(pool, i);
- TAILQ_INSERT_HEAD(&pool->counters, cnt, next);
+ cnt->pool = pool;
+ TAILQ_INSERT_HEAD(&tmp_tq, cnt, next);
}
+ rte_spinlock_lock(&cont->csl);
+ TAILQ_CONCAT(&cont->counters, &tmp_tq, next);
+ rte_spinlock_unlock(&cont->csl);
*cnt_free = MLX5_POOL_GET_CNT(pool, 0);
+ (*cnt_free)->pool = pool;
return pool;
}
/**
* Search for existed shared counter.
*
- * @param[in] cont
- * Pointer to the relevant counter pool container.
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
* @param[in] id
* The shared counter ID to search.
* @param[out] ppool
* NULL if not existed, otherwise pointer to the shared extend counter.
*/
static struct mlx5_flow_counter_ext *
-flow_dv_counter_shared_search(struct mlx5_pools_container *cont, uint32_t id,
+flow_dv_counter_shared_search(struct rte_eth_dev *dev, uint32_t id,
struct mlx5_flow_counter_pool **ppool)
{
- struct mlx5_flow_counter_ext *cnt;
- struct mlx5_flow_counter_pool *pool;
- uint32_t i, j;
- uint32_t n_valid = rte_atomic16_read(&cont->n_valid);
-
- for (i = 0; i < n_valid; i++) {
- pool = cont->pools[i];
- for (j = 0; j < MLX5_COUNTERS_PER_POOL; ++j) {
- cnt = MLX5_GET_POOL_CNT_EXT(pool, j);
- if (cnt->ref_cnt && cnt->shared && cnt->id == id) {
- if (ppool)
- *ppool = cont->pools[i];
- return cnt;
- }
- }
- }
- return NULL;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ union mlx5_l3t_data data;
+ uint32_t cnt_idx;
+
+ if (mlx5_l3t_get_entry(priv->sh->cnt_id_tbl, id, &data) || !data.dword)
+ return NULL;
+ cnt_idx = data.dword;
+ /*
+ * Shared counters don't have age info. The counter extend is after
+ * the counter datat structure.
+ */
+ return (struct mlx5_flow_counter_ext *)
+ ((flow_dv_counter_get_by_idx(dev, cnt_idx, ppool)) + 1);
}
/**
return 0;
}
if (shared) {
- cnt_ext = flow_dv_counter_shared_search(cont, id, &pool);
+ cnt_ext = flow_dv_counter_shared_search(dev, id, &pool);
if (cnt_ext) {
if (cnt_ext->ref_cnt + 1 == 0) {
rte_errno = E2BIG;
return cnt_idx;
}
}
- /* Pools which has a free counters are in the start. */
- TAILQ_FOREACH(pool, &cont->pool_list, next) {
- /*
- * The free counter reset values must be updated between the
- * counter release to the counter allocation, so, at least one
- * query must be done in this time. ensure it by saving the
- * query generation in the release time.
- * The free list is sorted according to the generation - so if
- * the first one is not updated, all the others are not
- * updated too.
- */
- cnt_free = TAILQ_FIRST(&pool->counters);
- if (cnt_free && cnt_free->query_gen <
- rte_atomic64_read(&pool->end_query_gen))
- break;
- cnt_free = NULL;
- }
- if (!cnt_free) {
- pool = flow_dv_counter_pool_prepare(dev, &cnt_free, batch, age);
- if (!pool)
- return 0;
- }
+ /* Get free counters from container. */
+ rte_spinlock_lock(&cont->csl);
+ cnt_free = TAILQ_FIRST(&cont->counters);
+ if (cnt_free)
+ TAILQ_REMOVE(&cont->counters, cnt_free, next);
+ rte_spinlock_unlock(&cont->csl);
+ if (!cnt_free && !flow_dv_counter_pool_prepare(dev, &cnt_free,
+ batch, age))
+ goto err;
+ pool = cnt_free->pool;
if (!batch)
cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt_free);
/* Create a DV counter action only in the first time usage. */
if (!cnt_free->action) {
uint16_t offset;
struct mlx5_devx_obj *dcs;
+ int ret;
if (batch) {
offset = MLX5_CNT_ARRAY_IDX(pool, cnt_free);
offset = 0;
dcs = cnt_ext->dcs;
}
- cnt_free->action = mlx5_glue->dv_create_flow_action_counter
- (dcs->obj, offset);
- if (!cnt_free->action) {
+ ret = mlx5_flow_os_create_flow_action_count(dcs->obj, offset,
+ &cnt_free->action);
+ if (ret) {
rte_errno = errno;
- return 0;
+ goto err;
}
}
cnt_idx = MLX5_MAKE_CNT_IDX(pool->index,
/* Update the counter reset values. */
if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits,
&cnt_free->bytes))
- return 0;
+ goto err;
if (cnt_ext) {
cnt_ext->shared = shared;
cnt_ext->ref_cnt = 1;
cnt_ext->id = id;
+ if (shared) {
+ union mlx5_l3t_data data;
+
+ data.dword = cnt_idx;
+ if (mlx5_l3t_set_entry(priv->sh->cnt_id_tbl, id, &data))
+ return 0;
+ }
}
if (!priv->counter_fallback && !priv->sh->cmng.query_thread_on)
/* Start the asynchronous batch query by the host thread. */
mlx5_set_query_alarm(priv->sh);
- TAILQ_REMOVE(&pool->counters, cnt_free, next);
- if (TAILQ_EMPTY(&pool->counters)) {
- /* Move the pool to the end of the container pool list. */
- TAILQ_REMOVE(&cont->pool_list, pool, next);
- TAILQ_INSERT_TAIL(&cont->pool_list, pool, next);
- }
return cnt_idx;
+err:
+ if (cnt_free) {
+ cnt_free->pool = pool;
+ rte_spinlock_lock(&cont->csl);
+ TAILQ_INSERT_TAIL(&cont->counters, cnt_free, next);
+ rte_spinlock_unlock(&cont->csl);
+ }
+ return 0;
}
/**
static void
flow_dv_counter_release(struct rte_eth_dev *dev, uint32_t counter)
{
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_counter_pool *pool = NULL;
struct mlx5_flow_counter *cnt;
struct mlx5_flow_counter_ext *cnt_ext = NULL;
MLX5_ASSERT(pool);
if (counter < MLX5_CNT_BATCH_OFFSET) {
cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt);
- if (cnt_ext && --cnt_ext->ref_cnt)
- return;
+ if (cnt_ext) {
+ if (--cnt_ext->ref_cnt)
+ return;
+ if (cnt_ext->shared)
+ mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl,
+ cnt_ext->id);
+ }
}
if (IS_AGE_POOL(pool))
flow_dv_counter_remove_from_age(dev, counter, cnt);
- /* Put the counter in the end - the last updated one. */
- TAILQ_INSERT_TAIL(&pool->counters, cnt, next);
+ cnt->pool = pool;
/*
- * Counters released between query trigger and handler need
- * to wait the next round of query. Since the packets arrive
- * in the gap period will not be taken into account to the
- * old counter.
+ * Put the counter back to list to be updated in none fallback mode.
+ * Currently, we are using two list alternately, while one is in query,
+ * add the freed counter to the other list based on the pool query_gen
+ * value. After query finishes, add counter the list to the global
+ * container counter list. The list changes while query starts. In
+ * this case, lock will not be needed as query callback and release
+ * function both operate with the different list.
+ *
*/
- cnt->query_gen = rte_atomic64_read(&pool->start_query_gen);
+ if (!priv->counter_fallback)
+ TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], cnt, next);
+ else
+ TAILQ_INSERT_TAIL(&((MLX5_CNT_CONTAINER
+ (priv->sh, 0, 0))->counters),
+ cnt, next);
}
/**
int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
int type = items->type;
+ if (!mlx5_flow_os_item_supported(type))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "item not supported");
switch (type) {
case RTE_FLOW_ITEM_TYPE_VOID:
break;
}
for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
int type = actions->type;
+
+ if (!mlx5_flow_os_action_supported(type))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "action not supported");
if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
action_flags |= MLX5_FLOW_ACTION_RSS;
++actions_n;
break;
+ case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
+ ret =
+ mlx5_flow_validate_action_default_miss(action_flags,
+ attr, error);
+ if (ret < 0)
+ return ret;
+ action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
+ ++actions_n;
+ break;
case RTE_FLOW_ACTION_TYPE_COUNT:
ret = flow_dv_validate_action_count(dev, error);
if (ret < 0)
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
struct mlx5_flow_tbl_resource *tbl;
union mlx5_flow_tbl_key table_key = {
{
domain = sh->tx_domain;
else
domain = sh->rx_domain;
- tbl->obj = mlx5_glue->dr_create_flow_tbl(domain, table_id);
- if (!tbl->obj) {
+ ret = mlx5_flow_os_create_flow_tbl(domain, table_id, &tbl->obj);
+ if (ret) {
rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "cannot create flow table object");
rte_flow_error_set(error, -ret,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"cannot insert flow table data entry");
- mlx5_glue->dr_destroy_flow_tbl(tbl->obj);
+ mlx5_flow_os_destroy_flow_tbl(tbl->obj);
mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
}
rte_atomic32_inc(&tbl->refcnt);
struct mlx5_flow_tbl_resource *tbl)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
struct mlx5_flow_tbl_data_entry *tbl_data =
container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
if (rte_atomic32_dec_and_test(&tbl->refcnt)) {
struct mlx5_hlist_entry *pos = &tbl_data->entry;
- mlx5_glue->dr_destroy_flow_tbl(tbl->obj);
+ mlx5_flow_os_destroy_flow_tbl(tbl->obj);
tbl->obj = NULL;
/* remove the entry from the hash list and free memory. */
mlx5_hlist_remove(sh->flow_tbls, pos);
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
struct mlx5_flow_dv_matcher *cache_matcher;
struct mlx5dv_flow_matcher_attr dv_attr = {
.type = IBV_FLOW_ATTR_NORMAL,
};
struct mlx5_flow_tbl_resource *tbl;
struct mlx5_flow_tbl_data_entry *tbl_data;
+ int ret;
tbl = flow_dv_tbl_resource_get(dev, key->table_id, key->direction,
key->domain, error);
dv_attr.priority = matcher->priority;
if (key->direction)
dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
- cache_matcher->matcher_object =
- mlx5_glue->dv_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj);
- if (!cache_matcher->matcher_object) {
+ ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,
+ &cache_matcher->matcher_object);
+ if (ret) {
rte_free(cache_matcher);
#ifdef HAVE_MLX5DV_DR
flow_dv_tbl_resource_release(dev, tbl);
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
struct mlx5_flow_dv_tag_resource *cache_resource;
struct mlx5_hlist_entry *entry;
+ int ret;
/* Lookup a matching resource from cache. */
entry = mlx5_hlist_lookup(sh->tag_table, (uint64_t)tag_be24);
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"cannot allocate resource memory");
cache_resource->entry.key = (uint64_t)tag_be24;
- cache_resource->action = mlx5_glue->dv_create_flow_action_tag(tag_be24);
- if (!cache_resource->action) {
+ ret = mlx5_flow_os_create_flow_action_tag(tag_be24,
+ &cache_resource->action);
+ if (ret) {
rte_free(cache_resource);
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
rte_atomic32_init(&cache_resource->refcnt);
rte_atomic32_inc(&cache_resource->refcnt);
if (mlx5_hlist_insert(sh->tag_table, &cache_resource->entry)) {
- mlx5_glue->destroy_flow_action(cache_resource->action);
+ mlx5_flow_os_destroy_flow_action(cache_resource->action);
rte_free(cache_resource);
return rte_flow_error_set(error, EEXIST,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
uint32_t tag_idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
struct mlx5_flow_dv_tag_resource *tag;
tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx);
dev->data->port_id, (void *)tag,
rte_atomic32_read(&tag->refcnt));
if (rte_atomic32_dec_and_test(&tag->refcnt)) {
- claim_zero(mlx5_glue->destroy_flow_action(tag->action));
+ claim_zero(mlx5_flow_os_destroy_flow_action(tag->action));
mlx5_hlist_remove(sh->tag_table, &tag->entry);
DRV_LOG(DEBUG, "port %u tag %p: removed",
dev->data->port_id, (void *)tag);
* This parameter is transferred to
* mlx5dv_dr_action_create_dest_ib_port().
*/
- *dst_port_id = priv->ibv_port;
+ *dst_port_id = priv->dev_port;
#else
/*
* Legacy mode, no LAG configurations is supported.
const struct rte_flow_action *found_action = NULL;
struct mlx5_flow_meter *fm = NULL;
+ if (!mlx5_flow_os_action_supported(action_type))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "action not supported");
switch (action_type) {
case RTE_FLOW_ACTION_TYPE_VOID:
break;
if (flow_dv_translate_action_port_id(dev, action,
&port_id, error))
return -rte_errno;
- memset(&port_id_resource, 0, sizeof(port_id_resource));
port_id_resource.port_id = port_id;
MLX5_ASSERT(!handle->rix_port_id_action);
if (flow_dv_port_id_action_resource_register
error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.encap_decap->verbs_action;
+ dev_flow->dv.encap_decap->action;
action_flags |= MLX5_FLOW_ACTION_ENCAP;
break;
case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.encap_decap->verbs_action;
+ dev_flow->dv.encap_decap->action;
action_flags |= MLX5_FLOW_ACTION_DECAP;
break;
case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
(dev, actions, dev_flow, attr, error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.encap_decap->verbs_action;
+ dev_flow->dv.encap_decap->action;
} else {
/* Handle encap without preceding decap. */
if (flow_dv_create_action_l2_encap
error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.encap_decap->verbs_action;
+ dev_flow->dv.encap_decap->action;
}
action_flags |= MLX5_FLOW_ACTION_ENCAP;
break;
(dev, dev_flow, attr->transfer, error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.encap_decap->verbs_action;
+ dev_flow->dv.encap_decap->action;
}
/* If decap is followed by encap, handle it at encap. */
action_flags |= MLX5_FLOW_ACTION_DECAP;
return -rte_errno;
action_flags |= MLX5_FLOW_ACTION_SET_TAG;
break;
+ case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
+ action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
+ dev_flow->handle->fate_action =
+ MLX5_FLOW_FATE_DEFAULT_MISS;
+ break;
case RTE_FLOW_ACTION_TYPE_METER:
mtr = actions->conf;
if (!flow->meter) {
(dev, mhdr_res, dev_flow, error))
return -rte_errno;
dev_flow->dv.actions[modify_action_position] =
- handle->dvh.modify_hdr->verbs_action;
+ handle->dvh.modify_hdr->action;
}
if (action_flags & MLX5_FLOW_ACTION_COUNT) {
flow->counter =
int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
int item_type = items->type;
+ if (!mlx5_flow_os_item_supported(item_type))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "item not supported");
switch (item_type) {
case RTE_FLOW_ITEM_TYPE_PORT_ID:
flow_dv_translate_item_port_id(dev, match_mask,
flow_dv_translate_item_eth(match_mask, match_value,
items, tunnel,
dev_flow->dv.group);
- matcher.priority = MLX5_PRIORITY_MAP_L2;
+ matcher.priority = action_flags &
+ MLX5_FLOW_ACTION_DEFAULT_MISS &&
+ !dev_flow->external ?
+ MLX5_PRIORITY_MAP_L3 :
+ MLX5_PRIORITY_MAP_L2;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
MLX5_FLOW_LAYER_OUTER_L2;
break;
}
dh->rix_hrxq = hrxq_idx;
dv->actions[n++] = hrxq->action;
+ } else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) {
+ if (flow_dv_default_miss_resource_register
+ (dev, error)) {
+ rte_flow_error_set
+ (error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "cannot create default miss resource");
+ goto error_default_miss;
+ }
+ dh->rix_default_fate = MLX5_FLOW_FATE_DEFAULT_MISS;
+ dv->actions[n++] = priv->sh->default_miss.action;
}
- dh->ib_flow =
- mlx5_glue->dv_create_flow(dv_h->matcher->matcher_object,
- (void *)&dv->value, n,
- dv->actions);
- if (!dh->ib_flow) {
+ err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object,
+ (void *)&dv->value, n,
+ dv->actions, &dh->drv_flow);
+ if (err) {
rte_flow_error_set(error, errno,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
}
return 0;
error:
+ if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS)
+ flow_dv_default_miss_resource_release(dev);
+error_default_miss:
err = rte_errno; /* Save rte_errno before cleanup. */
SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
handle_idx, dh, next) {
dev->data->port_id, (void *)matcher,
rte_atomic32_read(&matcher->refcnt));
if (rte_atomic32_dec_and_test(&matcher->refcnt)) {
- claim_zero(mlx5_glue->dv_destroy_flow_matcher
+ claim_zero(mlx5_flow_os_destroy_flow_matcher
(matcher->matcher_object));
LIST_REMOVE(matcher, next);
/* table ref-- in release interface. */
idx);
if (!cache_resource)
return 0;
- MLX5_ASSERT(cache_resource->verbs_action);
+ MLX5_ASSERT(cache_resource->action);
DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
- claim_zero(mlx5_glue->destroy_flow_action
- (cache_resource->verbs_action));
+ claim_zero(mlx5_flow_os_destroy_flow_action
+ (cache_resource->action));
ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
&priv->sh->encaps_decaps, idx,
cache_resource, next);
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
- claim_zero(mlx5_glue->destroy_flow_action
- (cache_resource->action));
+ claim_zero(mlx5_flow_os_destroy_flow_action
+ (cache_resource->action));
/* jump action memory free is inside the table release. */
flow_dv_tbl_resource_release(dev, &tbl_data->tbl);
DRV_LOG(DEBUG, "jump table resource %p: removed",
return 1;
}
+/**
+ * Release a default miss resource.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @return
+ * 1 while a reference on it exists, 0 when freed.
+ */
+static int
+flow_dv_default_miss_resource_release(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
+ struct mlx5_flow_default_miss_resource *cache_resource =
+ &sh->default_miss;
+
+ MLX5_ASSERT(cache_resource->action);
+ DRV_LOG(DEBUG, "default miss resource %p: refcnt %d--",
+ (void *)cache_resource->action,
+ rte_atomic32_read(&cache_resource->refcnt));
+ if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
+ claim_zero(mlx5_glue->destroy_flow_action
+ (cache_resource->action));
+ DRV_LOG(DEBUG, "default miss resource %p: removed",
+ (void *)cache_resource->action);
+ return 0;
+ }
+ return 1;
+}
+
/**
* Release a modify-header resource.
*
struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
handle->dvh.modify_hdr;
- MLX5_ASSERT(cache_resource->verbs_action);
+ MLX5_ASSERT(cache_resource->action);
DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
- claim_zero(mlx5_glue->destroy_flow_action
- (cache_resource->verbs_action));
+ claim_zero(mlx5_flow_os_destroy_flow_action
+ (cache_resource->action));
LIST_REMOVE(cache_resource, next);
rte_free(cache_resource);
DRV_LOG(DEBUG, "modify-header resource %p: removed",
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
- claim_zero(mlx5_glue->destroy_flow_action
- (cache_resource->action));
+ claim_zero(mlx5_flow_os_destroy_flow_action
+ (cache_resource->action));
ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_PORT_ID],
&priv->sh->port_id_action_list, idx,
cache_resource, next);
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
- claim_zero(mlx5_glue->destroy_flow_action
- (cache_resource->action));
+ claim_zero(mlx5_flow_os_destroy_flow_action
+ (cache_resource->action));
ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN],
&priv->sh->push_vlan_action_list, idx,
cache_resource, next);
{
if (!handle->rix_fate)
return;
- if (handle->fate_action == MLX5_FLOW_FATE_DROP)
+ switch (handle->fate_action) {
+ case MLX5_FLOW_FATE_DROP:
mlx5_hrxq_drop_release(dev);
- else if (handle->fate_action == MLX5_FLOW_FATE_QUEUE)
+ break;
+ case MLX5_FLOW_FATE_QUEUE:
mlx5_hrxq_release(dev, handle->rix_hrxq);
- else if (handle->fate_action == MLX5_FLOW_FATE_JUMP)
+ break;
+ case MLX5_FLOW_FATE_JUMP:
flow_dv_jump_tbl_resource_release(dev, handle);
- else if (handle->fate_action == MLX5_FLOW_FATE_PORT_ID)
+ break;
+ case MLX5_FLOW_FATE_PORT_ID:
flow_dv_port_id_action_resource_release(dev, handle);
- else
+ break;
+ case MLX5_FLOW_FATE_DEFAULT_MISS:
+ flow_dv_default_miss_resource_release(dev);
+ break;
+ default:
DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
+ break;
+ }
handle->rix_fate = 0;
}
handle_idx);
if (!dh)
return;
- if (dh->ib_flow) {
- claim_zero(mlx5_glue->dv_destroy_flow(dh->ib_flow));
- dh->ib_flow = NULL;
+ if (dh->drv_flow) {
+ claim_zero(mlx5_flow_os_destroy_flow(dh->drv_flow));
+ dh->drv_flow = NULL;
}
if (dh->fate_action == MLX5_FLOW_FATE_DROP ||
- dh->fate_action == MLX5_FLOW_FATE_QUEUE)
+ dh->fate_action == MLX5_FLOW_FATE_QUEUE ||
+ dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS)
flow_dv_fate_resource_release(dev, dh);
if (dh->vf_vlan.tag && dh->vf_vlan.created)
mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
if (!mtd || !priv->config.dv_flow_en)
return 0;
if (mtd->ingress.policer_rules[RTE_MTR_DROPPED])
- claim_zero(mlx5_glue->dv_destroy_flow
- (mtd->ingress.policer_rules[RTE_MTR_DROPPED]));
+ claim_zero(mlx5_flow_os_destroy_flow
+ (mtd->ingress.policer_rules[RTE_MTR_DROPPED]));
if (mtd->egress.policer_rules[RTE_MTR_DROPPED])
- claim_zero(mlx5_glue->dv_destroy_flow
- (mtd->egress.policer_rules[RTE_MTR_DROPPED]));
+ claim_zero(mlx5_flow_os_destroy_flow
+ (mtd->egress.policer_rules[RTE_MTR_DROPPED]));
if (mtd->transfer.policer_rules[RTE_MTR_DROPPED])
- claim_zero(mlx5_glue->dv_destroy_flow
- (mtd->transfer.policer_rules[RTE_MTR_DROPPED]));
+ claim_zero(mlx5_flow_os_destroy_flow
+ (mtd->transfer.policer_rules[RTE_MTR_DROPPED]));
if (mtd->egress.color_matcher)
- claim_zero(mlx5_glue->dv_destroy_flow_matcher
- (mtd->egress.color_matcher));
+ claim_zero(mlx5_flow_os_destroy_flow_matcher
+ (mtd->egress.color_matcher));
if (mtd->egress.any_matcher)
- claim_zero(mlx5_glue->dv_destroy_flow_matcher
- (mtd->egress.any_matcher));
+ claim_zero(mlx5_flow_os_destroy_flow_matcher
+ (mtd->egress.any_matcher));
if (mtd->egress.tbl)
flow_dv_tbl_resource_release(dev, mtd->egress.tbl);
if (mtd->egress.sfx_tbl)
flow_dv_tbl_resource_release(dev, mtd->egress.sfx_tbl);
if (mtd->ingress.color_matcher)
- claim_zero(mlx5_glue->dv_destroy_flow_matcher
- (mtd->ingress.color_matcher));
+ claim_zero(mlx5_flow_os_destroy_flow_matcher
+ (mtd->ingress.color_matcher));
if (mtd->ingress.any_matcher)
- claim_zero(mlx5_glue->dv_destroy_flow_matcher
- (mtd->ingress.any_matcher));
+ claim_zero(mlx5_flow_os_destroy_flow_matcher
+ (mtd->ingress.any_matcher));
if (mtd->ingress.tbl)
flow_dv_tbl_resource_release(dev, mtd->ingress.tbl);
if (mtd->ingress.sfx_tbl)
flow_dv_tbl_resource_release(dev, mtd->ingress.sfx_tbl);
if (mtd->transfer.color_matcher)
- claim_zero(mlx5_glue->dv_destroy_flow_matcher
- (mtd->transfer.color_matcher));
+ claim_zero(mlx5_flow_os_destroy_flow_matcher
+ (mtd->transfer.color_matcher));
if (mtd->transfer.any_matcher)
- claim_zero(mlx5_glue->dv_destroy_flow_matcher
- (mtd->transfer.any_matcher));
+ claim_zero(mlx5_flow_os_destroy_flow_matcher
+ (mtd->transfer.any_matcher));
if (mtd->transfer.tbl)
flow_dv_tbl_resource_release(dev, mtd->transfer.tbl);
if (mtd->transfer.sfx_tbl)
flow_dv_tbl_resource_release(dev, mtd->transfer.sfx_tbl);
if (mtd->drop_actn)
- claim_zero(mlx5_glue->destroy_flow_action(mtd->drop_actn));
+ claim_zero(mlx5_flow_os_destroy_flow_action(mtd->drop_actn));
rte_free(mtd);
return 0;
}
uint32_t color_reg_c_idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ibv_shared *sh = priv->sh;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
struct mlx5_flow_dv_match_params mask = {
.size = sizeof(mask.buf),
};
struct mlx5_meter_domain_info *dtb;
struct rte_flow_error error;
int i = 0;
+ int ret;
if (transfer)
dtb = &mtb->transfer;
/* Create matchers, Any and Color. */
dv_attr.priority = 3;
dv_attr.match_criteria_enable = 0;
- dtb->any_matcher = mlx5_glue->dv_create_flow_matcher(sh->ctx,
- &dv_attr,
- dtb->tbl->obj);
- if (!dtb->any_matcher) {
+ ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, dtb->tbl->obj,
+ &dtb->any_matcher);
+ if (ret) {
DRV_LOG(ERR, "Failed to create meter"
" policer default matcher.");
goto error_exit;
1 << MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
flow_dv_match_meta_reg(mask.buf, value.buf, color_reg_c_idx,
rte_col_2_mlx5_col(RTE_COLORS), UINT8_MAX);
- dtb->color_matcher = mlx5_glue->dv_create_flow_matcher(sh->ctx,
- &dv_attr,
- dtb->tbl->obj);
- if (!dtb->color_matcher) {
+ ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, dtb->tbl->obj,
+ &dtb->color_matcher);
+ if (ret) {
DRV_LOG(ERR, "Failed to create meter policer color matcher.");
goto error_exit;
}
actions[i++] = mtb->count_actns[RTE_MTR_DROPPED];
actions[i++] = mtb->drop_actn;
/* Default rule: lowest priority, match any, actions: drop. */
- dtb->policer_rules[RTE_MTR_DROPPED] =
- mlx5_glue->dv_create_flow(dtb->any_matcher,
- (void *)&value, i, actions);
- if (!dtb->policer_rules[RTE_MTR_DROPPED]) {
+ ret = mlx5_flow_os_create_flow(dtb->any_matcher, (void *)&value, i,
+ actions,
+ &dtb->policer_rules[RTE_MTR_DROPPED]);
+ if (ret) {
DRV_LOG(ERR, "Failed to create meter policer drop rule.");
goto error_exit;
}
mtb->count_actns[i] = cnt->action;
}
/* Create drop action. */
- mtb->drop_actn = mlx5_glue->dr_create_flow_action_drop();
- if (!mtb->drop_actn) {
+ ret = mlx5_flow_os_create_flow_action_drop(&mtb->drop_actn);
+ if (ret) {
DRV_LOG(ERR, "Failed to create drop action.");
goto error_exit;
}
for (i = 0; i < RTE_MTR_DROPPED; i++) {
if (dt->policer_rules[i]) {
- claim_zero(mlx5_glue->dv_destroy_flow
- (dt->policer_rules[i]));
+ claim_zero(mlx5_flow_os_destroy_flow
+ (dt->policer_rules[i]));
dt->policer_rules[i] = NULL;
}
}
if (dt->jump_actn) {
- claim_zero(mlx5_glue->destroy_flow_action(dt->jump_actn));
+ claim_zero(mlx5_flow_os_destroy_flow_action(dt->jump_actn));
dt->jump_actn = NULL;
}
}
struct mlx5_meter_domains_infos *mtb = fm->mfts;
void *actions[METER_ACTIONS];
int i;
+ int ret = 0;
/* Create jump action. */
if (!dtb->jump_actn)
- dtb->jump_actn =
- mlx5_glue->dr_create_flow_action_dest_flow_tbl
- (dtb->sfx_tbl->obj);
- if (!dtb->jump_actn) {
+ ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
+ (dtb->sfx_tbl->obj, &dtb->jump_actn);
+ if (ret) {
DRV_LOG(ERR, "Failed to create policer jump action.");
goto error;
}
actions[j++] = mtb->drop_actn;
else
actions[j++] = dtb->jump_actn;
- dtb->policer_rules[i] =
- mlx5_glue->dv_create_flow(dtb->color_matcher,
- (void *)&value,
- j, actions);
- if (!dtb->policer_rules[i]) {
+ ret = mlx5_flow_os_create_flow(dtb->color_matcher,
+ (void *)&value, j, actions,
+ &dtb->policer_rules[i]);
+ if (ret) {
DRV_LOG(ERR, "Failed to create policer rule.");
goto error;
}