#include <string.h>
#include <unistd.h>
-/* Verbs header. */
-/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
-#ifdef PEDANTIC
-#pragma GCC diagnostic ignored "-Wpedantic"
-#endif
-#include <infiniband/verbs.h>
-#ifdef PEDANTIC
-#pragma GCC diagnostic error "-Wpedantic"
-#endif
-
#include <rte_common.h>
#include <rte_ether.h>
#include <rte_ethdev_driver.h>
#include <rte_gre.h>
#include <rte_vxlan.h>
#include <rte_gtp.h>
+#include <rte_eal_paging.h>
+#include <rte_mpls.h>
#include <mlx5_glue.h>
#include <mlx5_devx_cmds.h>
#include <mlx5_prm.h>
+#include <mlx5_malloc.h>
#include "mlx5_defs.h"
#include "mlx5.h"
#include "mlx5_common_os.h"
#include "mlx5_flow.h"
+#include "mlx5_flow_os.h"
#include "mlx5_rxtx.h"
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
.mask = &mask,
};
struct field_modify_info reg_c_x[] = {
- {4, 0, 0}, /* dynamic instead of MLX5_MODI_META_REG_C_1. */
- {0, 0, 0},
+ [1] = {0, 0, 0},
};
int reg;
mask = rte_cpu_to_be_32(mask) & msk_c0;
mask = rte_cpu_to_be_32(mask << shl_c0);
}
- reg_c_x[0].id = reg_to_field[reg];
+ reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
MLX5_MODIFICATION_TYPE_SET, error);
}
"isn't supported");
if (reg != REG_A)
nic_mask.data = priv->sh->dv_meta_mask;
+ } else if (attr->transfer) {
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "extended metadata feature "
+ "should be enabled when "
+ "meta item is requested "
+ "with e-switch mode ");
}
if (!mask)
mask = &rte_flow_item_meta_mask;
RTE_FLOW_ERROR_TYPE_ACTION, action,
"no support for multiple VLAN "
"actions");
- if (!(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
+ /* Pop VLAN with preceding Decap requires inner header with VLAN. */
+ if ((action_flags & MLX5_FLOW_ACTION_DECAP) &&
+ !(item_flags & MLX5_FLOW_LAYER_INNER_VLAN))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "cannot pop vlan after decap without "
+ "match on inner vlan in the flow");
+ /* Pop VLAN without preceding Decap requires outer header with VLAN. */
+ if (!(action_flags & MLX5_FLOW_ACTION_DECAP) &&
+ !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
const struct mlx5_priv *priv = dev->data->dev_private;
- if (!attr->transfer && attr->ingress)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
- NULL,
- "push VLAN action not supported for "
- "ingress");
if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, action,
"invalid vlan ethertype");
- if (action_flags & MLX5_FLOW_VLAN_ACTIONS)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION, action,
- "no support for multiple VLAN "
- "actions");
if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, action,
{
const struct mlx5_priv *priv = dev->data->dev_private;
+ if (priv->config.hca_attr.scatter_fcs_w_decap_disable &&
+ !priv->config.decap_en)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "decap is not enabled");
if (action_flags & MLX5_FLOW_XCAP_ACTIONS)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
struct mlx5_flow_dv_encap_decap_resource *cache_resource;
struct mlx5dv_dr_domain *domain;
uint32_t idx = 0;
+ int ret;
resource->flags = dev_flow->dv.group ? 0 : 1;
if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"cannot allocate resource memory");
*cache_resource = *resource;
- cache_resource->verbs_action =
- mlx5_glue->dv_create_flow_action_packet_reformat
- (sh->ctx, cache_resource->reformat_type,
- cache_resource->ft_type, domain, cache_resource->flags,
- cache_resource->size,
- (cache_resource->size ? cache_resource->buf : NULL));
- if (!cache_resource->verbs_action) {
- rte_free(cache_resource);
+ ret = mlx5_flow_os_create_flow_action_packet_reformat
+ (sh->ctx, domain, cache_resource,
+ &cache_resource->action);
+ if (ret) {
+ mlx5_free(cache_resource);
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "cannot create action");
{
struct mlx5_flow_tbl_data_entry *tbl_data =
container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
- int cnt;
+ int cnt, ret;
MLX5_ASSERT(tbl);
cnt = rte_atomic32_read(&tbl_data->jump.refcnt);
if (!cnt) {
- tbl_data->jump.action =
- mlx5_glue->dr_create_flow_action_dest_flow_tbl
- (tbl->obj);
- if (!tbl_data->jump.action)
+ ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
+ (tbl->obj, &tbl_data->jump.action);
+ if (ret)
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "cannot create jump action");
struct mlx5_dev_ctx_shared *sh = priv->sh;
struct mlx5_flow_dv_port_id_action_resource *cache_resource;
uint32_t idx = 0;
+ int ret;
/* Lookup a matching resource from cache. */
ILIST_FOREACH(sh->ipool[MLX5_IPOOL_PORT_ID], sh->port_id_action_list,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"cannot allocate resource memory");
*cache_resource = *resource;
- /*
- * Depending on rdma_core version the glue routine calls
- * either mlx5dv_dr_action_create_dest_ib_port(domain, dev_port)
- * or mlx5dv_dr_action_create_dest_vport(domain, vport_id).
- */
- cache_resource->action =
- mlx5_glue->dr_create_flow_action_dest_port
- (priv->sh->fdb_domain, resource->port_id);
- if (!cache_resource->action) {
- rte_free(cache_resource);
+ ret = mlx5_flow_os_create_flow_action_dest_port
+ (priv->sh->fdb_domain, resource->port_id,
+ &cache_resource->action);
+ if (ret) {
+ mlx5_free(cache_resource);
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "cannot create action");
struct mlx5_flow_dv_push_vlan_action_resource *cache_resource;
struct mlx5dv_dr_domain *domain;
uint32_t idx = 0;
+ int ret;
/* Lookup a matching resource from cache. */
ILIST_FOREACH(sh->ipool[MLX5_IPOOL_PUSH_VLAN],
domain = sh->rx_domain;
else
domain = sh->tx_domain;
- cache_resource->action =
- mlx5_glue->dr_create_flow_action_push_vlan(domain,
- resource->vlan_tag);
- if (!cache_resource->action) {
- rte_free(cache_resource);
+ ret = mlx5_flow_os_create_flow_action_push_vlan
+ (domain, resource->vlan_tag,
+ &cache_resource->action);
+ if (ret) {
+ mlx5_free(cache_resource);
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "cannot create action");
return 0;
}
/**
- * Get the size of specific rte_flow_item_type
+ * Get the size of specific rte_flow_item_type hdr size
*
* @param[in] item_type
* Tested rte_flow_item_type.
* sizeof struct item_type, 0 if void or irrelevant.
*/
static size_t
-flow_dv_get_item_len(const enum rte_flow_item_type item_type)
+flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type)
{
size_t retval;
switch (item_type) {
case RTE_FLOW_ITEM_TYPE_ETH:
- retval = sizeof(struct rte_flow_item_eth);
+ retval = sizeof(struct rte_ether_hdr);
break;
case RTE_FLOW_ITEM_TYPE_VLAN:
- retval = sizeof(struct rte_flow_item_vlan);
+ retval = sizeof(struct rte_vlan_hdr);
break;
case RTE_FLOW_ITEM_TYPE_IPV4:
- retval = sizeof(struct rte_flow_item_ipv4);
+ retval = sizeof(struct rte_ipv4_hdr);
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
- retval = sizeof(struct rte_flow_item_ipv6);
+ retval = sizeof(struct rte_ipv6_hdr);
break;
case RTE_FLOW_ITEM_TYPE_UDP:
- retval = sizeof(struct rte_flow_item_udp);
+ retval = sizeof(struct rte_udp_hdr);
break;
case RTE_FLOW_ITEM_TYPE_TCP:
- retval = sizeof(struct rte_flow_item_tcp);
+ retval = sizeof(struct rte_tcp_hdr);
break;
case RTE_FLOW_ITEM_TYPE_VXLAN:
- retval = sizeof(struct rte_flow_item_vxlan);
+ case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
+ retval = sizeof(struct rte_vxlan_hdr);
break;
case RTE_FLOW_ITEM_TYPE_GRE:
- retval = sizeof(struct rte_flow_item_gre);
- break;
case RTE_FLOW_ITEM_TYPE_NVGRE:
- retval = sizeof(struct rte_flow_item_nvgre);
- break;
- case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
- retval = sizeof(struct rte_flow_item_vxlan_gpe);
+ retval = sizeof(struct rte_gre_hdr);
break;
case RTE_FLOW_ITEM_TYPE_MPLS:
- retval = sizeof(struct rte_flow_item_mpls);
+ retval = sizeof(struct rte_mpls_hdr);
break;
case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
default:
RTE_FLOW_ERROR_TYPE_ACTION,
NULL, "invalid empty data");
for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
- len = flow_dv_get_item_len(items->type);
+ len = flow_dv_get_item_hdr_len(items->type);
if (len + temp_size > MLX5_ENCAP_MAX_LEN)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
struct mlx5_flow_dv_modify_hdr_resource *cache_resource;
struct mlx5dv_dr_domain *ns;
uint32_t actions_len;
+ int ret;
resource->flags = dev_flow->dv.group ? 0 :
MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
}
}
/* Register new modify-header resource. */
- cache_resource = rte_calloc(__func__, 1,
- sizeof(*cache_resource) + actions_len, 0);
+ cache_resource = mlx5_malloc(MLX5_MEM_ZERO,
+ sizeof(*cache_resource) + actions_len, 0,
+ SOCKET_ID_ANY);
if (!cache_resource)
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"cannot allocate resource memory");
*cache_resource = *resource;
rte_memcpy(cache_resource->actions, resource->actions, actions_len);
- cache_resource->verbs_action =
- mlx5_glue->dv_create_flow_action_modify_header
- (sh->ctx, cache_resource->ft_type, ns,
- cache_resource->flags, actions_len,
- (uint64_t *)cache_resource->actions);
- if (!cache_resource->verbs_action) {
- rte_free(cache_resource);
+ ret = mlx5_flow_os_create_flow_action_modify_header
+ (sh->ctx, ns, cache_resource,
+ actions_len, &cache_resource->action);
+ if (ret) {
+ mlx5_free(cache_resource);
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "cannot create action");
MLX5_COUNTERS_PER_POOL +
sizeof(struct mlx5_counter_stats_raw)) * raws_n +
sizeof(struct mlx5_counter_stats_mem_mng);
- uint8_t *mem = rte_calloc(__func__, 1, size, sysconf(_SC_PAGESIZE));
+ size_t pgsize = rte_mem_page_size();
+ if (pgsize == (size_t)-1) {
+ DRV_LOG(ERR, "Failed to get mem page size");
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ uint8_t *mem = mlx5_malloc(MLX5_MEM_ZERO, size, pgsize,
+ SOCKET_ID_ANY);
int i;
if (!mem) {
IBV_ACCESS_LOCAL_WRITE);
if (!mem_mng->umem) {
rte_errno = errno;
- rte_free(mem);
+ mlx5_free(mem);
return NULL;
}
mkey_attr.addr = (uintptr_t)mem;
if (!mem_mng->dm) {
mlx5_glue->devx_umem_dereg(mem_mng->umem);
rte_errno = errno;
- rte_free(mem);
+ mlx5_free(mem);
return NULL;
}
mem_mng->raws = (struct mlx5_counter_stats_raw *)(mem + size);
void *old_pools = cont->pools;
uint32_t resize = cont->n + MLX5_CNT_CONTAINER_RESIZE;
uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
- void *pools = rte_calloc(__func__, 1, mem_size, 0);
+ void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
if (!pools) {
rte_errno = ENOMEM;
mem_mng = flow_dv_create_counter_stat_mem_mng(dev,
MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES);
if (!mem_mng) {
- rte_free(pools);
+ mlx5_free(pools);
return -ENOMEM;
}
for (i = 0; i < MLX5_MAX_PENDING_QUERIES; ++i)
cont->pools = pools;
rte_spinlock_unlock(&cont->resize_sl);
if (old_pools)
- rte_free(old_pools);
+ mlx5_free(old_pools);
return 0;
}
size += MLX5_COUNTERS_PER_POOL * CNT_SIZE;
size += (batch ? 0 : MLX5_COUNTERS_PER_POOL * CNTEXT_SIZE);
size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * AGE_SIZE);
- pool = rte_calloc(__func__, 1, size, 0);
+ pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
if (!pool) {
rte_errno = ENOMEM;
return NULL;
}
/**
- * Update the minimum dcs-id for aged or no-aged counter pool.
+ * Restore skipped counters in the pool.
+ *
+ * As counter pool query requires the first counter dcs
+ * ID start with 4 alinged, if the pool counters with
+ * min_dcs ID are not aligned with 4, the counters will
+ * be skipped.
+ * Once other min_dcs ID less than these skipped counter
+ * dcs ID appears, the skipped counters will be safe to
+ * use.
+ * Should be called when min_dcs is updated.
*
- * @param[in] dev
- * Pointer to the Ethernet device structure.
* @param[in] pool
* Current counter pool.
- * @param[in] batch
- * Whether the pool is for counter that was allocated by batch command.
- * @param[in] age
- * Whether the counter is for aging.
+ * @param[in] last_min_dcs
+ * Last min_dcs.
*/
static void
-flow_dv_counter_update_min_dcs(struct rte_eth_dev *dev,
- struct mlx5_flow_counter_pool *pool,
- uint32_t batch, uint32_t age)
+flow_dv_counter_restore(struct mlx5_flow_counter_pool *pool,
+ struct mlx5_devx_obj *last_min_dcs)
{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_flow_counter_pool *other;
- struct mlx5_pools_container *cont;
+ struct mlx5_flow_counter_ext *cnt_ext;
+ uint32_t offset, new_offset;
+ uint32_t skip_cnt = 0;
+ uint32_t i;
- cont = MLX5_CNT_CONTAINER(priv->sh, batch, (age ^ 0x1));
- other = flow_dv_find_pool_by_id(cont, pool->min_dcs->id);
- if (!other)
+ if (!pool->skip_cnt)
return;
- if (pool->min_dcs->id < other->min_dcs->id) {
- rte_atomic64_set(&other->a64_dcs,
- rte_atomic64_read(&pool->a64_dcs));
- } else {
- rte_atomic64_set(&pool->a64_dcs,
- rte_atomic64_read(&other->a64_dcs));
+ /*
+ * If last min_dcs is not valid. The skipped counter may even after
+ * last min_dcs, set the offset to the whole pool.
+ */
+ if (last_min_dcs->id & (MLX5_CNT_BATCH_QUERY_ID_ALIGNMENT - 1))
+ offset = MLX5_COUNTERS_PER_POOL;
+ else
+ offset = last_min_dcs->id % MLX5_COUNTERS_PER_POOL;
+ new_offset = pool->min_dcs->id % MLX5_COUNTERS_PER_POOL;
+ /*
+ * Check the counters from 1 to the last_min_dcs range. Counters
+ * before new min_dcs indicates pool still has skipped counters.
+ * Counters be skipped after new min_dcs will be ready to use.
+ * Offset 0 counter must be empty or min_dcs, start from 1.
+ */
+ for (i = 1; i < offset; i++) {
+ cnt_ext = MLX5_GET_POOL_CNT_EXT(pool, i);
+ if (cnt_ext->skipped) {
+ if (i > new_offset) {
+ cnt_ext->skipped = 0;
+ TAILQ_INSERT_TAIL
+ (&pool->counters[pool->query_gen],
+ MLX5_POOL_GET_CNT(pool, i), next);
+ } else {
+ skip_cnt++;
+ }
+ }
}
+ if (!skip_cnt)
+ pool->skip_cnt = 0;
}
+
/**
* Prepare a new counter and/or a new counter pool.
*
struct mlx5_pools_container *cont;
struct mlx5_flow_counter_pool *pool;
struct mlx5_counters tmp_tq;
+ struct mlx5_devx_obj *last_min_dcs;
struct mlx5_devx_obj *dcs = NULL;
struct mlx5_flow_counter *cnt;
+ uint32_t add2other;
uint32_t i;
cont = MLX5_CNT_CONTAINER(priv->sh, batch, age);
if (!batch) {
+retry:
+ add2other = 0;
/* bulk_bitmap must be 0 for single counter allocation. */
dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
if (!dcs)
return NULL;
pool = flow_dv_find_pool_by_id(cont, dcs->id);
+ /* Check if counter belongs to exist pool ID range. */
if (!pool) {
- pool = flow_dv_pool_create(dev, dcs, batch, age);
- if (!pool) {
- mlx5_devx_cmd_destroy(dcs);
- return NULL;
+ pool = flow_dv_find_pool_by_id
+ (MLX5_CNT_CONTAINER
+ (priv->sh, batch, (age ^ 0x1)), dcs->id);
+ /*
+ * Pool eixsts, counter will be added to the other
+ * container, need to reallocate it later.
+ */
+ if (pool) {
+ add2other = 1;
+ } else {
+ pool = flow_dv_pool_create(dev, dcs, batch,
+ age);
+ if (!pool) {
+ mlx5_devx_cmd_destroy(dcs);
+ return NULL;
+ }
}
- } else if (dcs->id < pool->min_dcs->id) {
+ }
+ if ((dcs->id < pool->min_dcs->id ||
+ pool->min_dcs->id &
+ (MLX5_CNT_BATCH_QUERY_ID_ALIGNMENT - 1)) &&
+ !(dcs->id & (MLX5_CNT_BATCH_QUERY_ID_ALIGNMENT - 1))) {
+ /*
+ * Update the pool min_dcs only if current dcs is
+ * valid and exist min_dcs is not valid or greater
+ * than new dcs.
+ */
+ last_min_dcs = pool->min_dcs;
rte_atomic64_set(&pool->a64_dcs,
(int64_t)(uintptr_t)dcs);
+ /*
+ * Restore any skipped counters if the new min_dcs
+ * ID is smaller or min_dcs is not valid.
+ */
+ if (dcs->id < last_min_dcs->id ||
+ last_min_dcs->id &
+ (MLX5_CNT_BATCH_QUERY_ID_ALIGNMENT - 1))
+ flow_dv_counter_restore(pool, last_min_dcs);
}
- flow_dv_counter_update_min_dcs(dev,
- pool, batch, age);
i = dcs->id % MLX5_COUNTERS_PER_POOL;
cnt = MLX5_POOL_GET_CNT(pool, i);
cnt->pool = pool;
MLX5_GET_POOL_CNT_EXT(pool, i)->dcs = dcs;
+ /*
+ * If min_dcs is not valid, it means the new allocated dcs
+ * also fail to become the valid min_dcs, just skip it.
+ * Or if min_dcs is valid, and new dcs ID is smaller than
+ * min_dcs, but not become the min_dcs, also skip it.
+ */
+ if (pool->min_dcs->id &
+ (MLX5_CNT_BATCH_QUERY_ID_ALIGNMENT - 1) ||
+ dcs->id < pool->min_dcs->id) {
+ MLX5_GET_POOL_CNT_EXT(pool, i)->skipped = 1;
+ pool->skip_cnt = 1;
+ goto retry;
+ }
+ if (add2other) {
+ TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen],
+ cnt, next);
+ goto retry;
+ }
*cnt_free = cnt;
return pool;
}
if (!cnt_free->action) {
uint16_t offset;
struct mlx5_devx_obj *dcs;
+ int ret;
if (batch) {
offset = MLX5_CNT_ARRAY_IDX(pool, cnt_free);
offset = 0;
dcs = cnt_ext->dcs;
}
- cnt_free->action = mlx5_glue->dv_create_flow_action_counter
- (dcs->obj, offset);
- if (!cnt_free->action) {
+ ret = mlx5_flow_os_create_flow_action_count(dcs->obj, offset,
+ &cnt_free->action);
+ if (ret) {
rte_errno = errno;
goto err;
}
.hop_limits = 0xff,
},
};
+ const struct rte_flow_item_ecpri nic_ecpri_mask = {
+ .hdr = {
+ .common = {
+ .u32 =
+ RTE_BE32(((const struct rte_ecpri_common_hdr) {
+ .type = 0xFF,
+ }).u32),
+ },
+ .dummy[0] = 0xffffffff,
+ },
+ };
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_dev_config *dev_conf = &priv->config;
uint16_t queue_index = 0xFFFF;
int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
int type = items->type;
+ if (!mlx5_flow_os_item_supported(type))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "item not supported");
switch (type) {
case RTE_FLOW_ITEM_TYPE_VOID:
break;
return ret;
last_item = MLX5_FLOW_LAYER_GTP;
break;
+ case RTE_FLOW_ITEM_TYPE_ECPRI:
+ /* Capacity will be checked in the translate stage. */
+ ret = mlx5_flow_validate_item_ecpri(items, item_flags,
+ last_item,
+ ether_type,
+ &nic_ecpri_mask,
+ error);
+ if (ret < 0)
+ return ret;
+ last_item = MLX5_FLOW_LAYER_ECPRI;
+ break;
default:
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM,
}
for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
int type = actions->type;
+
+ if (!mlx5_flow_os_action_supported(type))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "action not supported");
if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
actions,
"no fate action is found");
}
- /* Continue validation for Xcap actions.*/
- if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) && (queue_index == 0xFFFF ||
- mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) {
+ /* Continue validation for Xcap and VLAN actions.*/
+ if ((action_flags & (MLX5_FLOW_XCAP_ACTIONS |
+ MLX5_FLOW_VLAN_ACTIONS)) &&
+ (queue_index == 0xFFFF ||
+ mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) {
if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
MLX5_FLOW_XCAP_ACTIONS)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL, "encap and decap "
"combination aren't supported");
- if (!attr->transfer && attr->ingress && (action_flags &
- MLX5_FLOW_ACTION_ENCAP))
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION,
- NULL, "encap is not supported"
- " for ingress traffic");
+ if (!attr->transfer && attr->ingress) {
+ if (action_flags & MLX5_FLOW_ACTION_ENCAP)
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "encap is not supported"
+ " for ingress traffic");
+ else if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "push VLAN action not "
+ "supported for ingress");
+ else if ((action_flags & MLX5_FLOW_VLAN_ACTIONS) ==
+ MLX5_FLOW_VLAN_ACTIONS)
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "no support for "
+ "multiple VLAN actions");
+ }
}
/* Hairpin flow will add one more TAG action. */
if (hairpin > 0)
dev_flow = &((struct mlx5_flow *)priv->inter_flows)[priv->flow_idx++];
dev_flow->handle = dev_handle;
dev_flow->handle_idx = handle_idx;
- dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
+ /*
+ * In some old rdma-core releases, before continuing, a check of the
+ * length of matching parameter will be done at first. It needs to use
+ * the length without misc4 param. If the flow has misc4 support, then
+ * the length needs to be adjusted accordingly. Each param member is
+ * aligned with a 64B boundary naturally.
+ */
+ dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param) -
+ MLX5_ST_SZ_BYTES(fte_match_set_misc4);
/*
* The matching value needs to be cleared to 0 before using. In the
* past, it will be automatically cleared when using rte_*alloc
* Set match on ethertype only if ETH header is not followed by VLAN.
* HW is optimized for IPv4/IPv6. In such cases, avoid setting
* ethertype, and use ip_version field instead.
+ * eCPRI over Ether layer will use type value 0xAEFE.
*/
if (eth_v->type == RTE_BE16(RTE_ETHER_TYPE_IPV4) &&
eth_m->type == 0xFFFF) {
const struct rte_flow_item_nvgre *nvgre_v = item->spec;
void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
- const char *tni_flow_id_m = (const char *)nvgre_m->tni;
- const char *tni_flow_id_v = (const char *)nvgre_v->tni;
+ const char *tni_flow_id_m;
+ const char *tni_flow_id_v;
char *gre_key_m;
char *gre_key_v;
int size;
return;
if (!nvgre_m)
nvgre_m = &rte_flow_item_nvgre_mask;
+ tni_flow_id_m = (const char *)nvgre_m->tni;
+ tni_flow_id_v = (const char *)nvgre_v->tni;
size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
rte_be_to_cpu_32(gtp_v->teid & gtp_m->teid));
}
+/**
+ * Add eCPRI item to matcher and to the value.
+ *
+ * @param[in] dev
+ * The devich to configure through.
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ * @param[in] samples
+ * Sample IDs to be used in the matching.
+ */
+static void
+flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher,
+ void *key, const struct rte_flow_item *item)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ const struct rte_flow_item_ecpri *ecpri_m = item->mask;
+ const struct rte_flow_item_ecpri *ecpri_v = item->spec;
+ void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ misc_parameters_4);
+ void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4);
+ uint32_t *samples;
+ void *dw_m;
+ void *dw_v;
+
+ if (!ecpri_v)
+ return;
+ if (!ecpri_m)
+ ecpri_m = &rte_flow_item_ecpri_mask;
+ /*
+ * Maximal four DW samples are supported in a single matching now.
+ * Two are used now for a eCPRI matching:
+ * 1. Type: one byte, mask should be 0x00ff0000 in network order
+ * 2. ID of a message: one or two bytes, mask 0xffff0000 or 0xff000000
+ * if any.
+ */
+ if (!ecpri_m->hdr.common.u32)
+ return;
+ samples = priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0].ids;
+ /* Need to take the whole DW as the mask to fill the entry. */
+ dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
+ prog_sample_field_value_0);
+ dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
+ prog_sample_field_value_0);
+ /* Already big endian (network order) in the header. */
+ *(uint32_t *)dw_m = ecpri_m->hdr.common.u32;
+ *(uint32_t *)dw_v = ecpri_v->hdr.common.u32;
+ /* Sample#0, used for matching type, offset 0. */
+ MLX5_SET(fte_match_set_misc4, misc4_m,
+ prog_sample_field_id_0, samples[0]);
+ /* It makes no sense to set the sample ID in the mask field. */
+ MLX5_SET(fte_match_set_misc4, misc4_v,
+ prog_sample_field_id_0, samples[0]);
+ /*
+ * Checking if message body part needs to be matched.
+ * Some wildcard rules only matching type field should be supported.
+ */
+ if (ecpri_m->hdr.dummy[0]) {
+ switch (ecpri_v->hdr.common.type) {
+ case RTE_ECPRI_MSG_TYPE_IQ_DATA:
+ case RTE_ECPRI_MSG_TYPE_RTC_CTRL:
+ case RTE_ECPRI_MSG_TYPE_DLY_MSR:
+ dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
+ prog_sample_field_value_1);
+ dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
+ prog_sample_field_value_1);
+ *(uint32_t *)dw_m = ecpri_m->hdr.dummy[0];
+ *(uint32_t *)dw_v = ecpri_v->hdr.dummy[0];
+ /* Sample#1, to match message body, offset 4. */
+ MLX5_SET(fte_match_set_misc4, misc4_m,
+ prog_sample_field_id_1, samples[1]);
+ MLX5_SET(fte_match_set_misc4, misc4_v,
+ prog_sample_field_id_1, samples[1]);
+ break;
+ default:
+ /* Others, do not match any sample ID. */
+ break;
+ }
+ }
+}
+
static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
#define HEADER_IS_ZERO(match_criteria, headers) \
match_criteria_enable |=
(!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
+ match_criteria_enable |=
+ (!HEADER_IS_ZERO(match_criteria, misc_parameters_4)) <<
+ MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT;
return match_criteria_enable;
}
domain = sh->tx_domain;
else
domain = sh->rx_domain;
- tbl->obj = mlx5_glue->dr_create_flow_tbl(domain, table_id);
- if (!tbl->obj) {
+ ret = mlx5_flow_os_create_flow_tbl(domain, table_id, &tbl->obj);
+ if (ret) {
rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "cannot create flow table object");
rte_flow_error_set(error, -ret,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"cannot insert flow table data entry");
- mlx5_glue->dr_destroy_flow_tbl(tbl->obj);
+ mlx5_flow_os_destroy_flow_tbl(tbl->obj);
mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
}
rte_atomic32_inc(&tbl->refcnt);
if (rte_atomic32_dec_and_test(&tbl->refcnt)) {
struct mlx5_hlist_entry *pos = &tbl_data->entry;
- mlx5_glue->dr_destroy_flow_tbl(tbl->obj);
+ mlx5_flow_os_destroy_flow_tbl(tbl->obj);
tbl->obj = NULL;
/* remove the entry from the hash list and free memory. */
mlx5_hlist_remove(sh->flow_tbls, pos);
};
struct mlx5_flow_tbl_resource *tbl;
struct mlx5_flow_tbl_data_entry *tbl_data;
+ int ret;
tbl = flow_dv_tbl_resource_get(dev, key->table_id, key->direction,
key->domain, error);
}
}
/* Register new matcher. */
- cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
+ cache_matcher = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*cache_matcher), 0,
+ SOCKET_ID_ANY);
if (!cache_matcher) {
flow_dv_tbl_resource_release(dev, tbl);
return rte_flow_error_set(error, ENOMEM,
dv_attr.priority = matcher->priority;
if (key->direction)
dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
- cache_matcher->matcher_object =
- mlx5_glue->dv_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj);
- if (!cache_matcher->matcher_object) {
- rte_free(cache_matcher);
+ ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,
+ &cache_matcher->matcher_object);
+ if (ret) {
+ mlx5_free(cache_matcher);
#ifdef HAVE_MLX5DV_DR
flow_dv_tbl_resource_release(dev, tbl);
#endif
struct mlx5_dev_ctx_shared *sh = priv->sh;
struct mlx5_flow_dv_tag_resource *cache_resource;
struct mlx5_hlist_entry *entry;
+ int ret;
/* Lookup a matching resource from cache. */
entry = mlx5_hlist_lookup(sh->tag_table, (uint64_t)tag_be24);
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"cannot allocate resource memory");
cache_resource->entry.key = (uint64_t)tag_be24;
- cache_resource->action = mlx5_glue->dv_create_flow_action_tag(tag_be24);
- if (!cache_resource->action) {
- rte_free(cache_resource);
+ ret = mlx5_flow_os_create_flow_action_tag(tag_be24,
+ &cache_resource->action);
+ if (ret) {
+ mlx5_free(cache_resource);
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "cannot create action");
rte_atomic32_init(&cache_resource->refcnt);
rte_atomic32_inc(&cache_resource->refcnt);
if (mlx5_hlist_insert(sh->tag_table, &cache_resource->entry)) {
- mlx5_glue->destroy_flow_action(cache_resource->action);
- rte_free(cache_resource);
+ mlx5_flow_os_destroy_flow_action(cache_resource->action);
+ mlx5_free(cache_resource);
return rte_flow_error_set(error, EEXIST,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "cannot insert tag");
dev->data->port_id, (void *)tag,
rte_atomic32_read(&tag->refcnt));
if (rte_atomic32_dec_and_test(&tag->refcnt)) {
- claim_zero(mlx5_glue->destroy_flow_action(tag->action));
+ claim_zero(mlx5_flow_os_destroy_flow_action(tag->action));
mlx5_hlist_remove(sh->tag_table, &tag->entry);
DRV_LOG(DEBUG, "port %u tag %p: removed",
dev->data->port_id, (void *)tag);
uint64_t priority = attr->priority;
struct mlx5_flow_dv_matcher matcher = {
.mask = {
- .size = sizeof(matcher.mask.buf),
+ .size = sizeof(matcher.mask.buf) -
+ MLX5_ST_SZ_BYTES(fte_match_set_misc4),
},
};
int actions_n = 0;
const struct rte_flow_action *found_action = NULL;
struct mlx5_flow_meter *fm = NULL;
+ if (!mlx5_flow_os_action_supported(action_type))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "action not supported");
switch (action_type) {
case RTE_FLOW_ACTION_TYPE_VOID:
break;
error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.encap_decap->verbs_action;
+ dev_flow->dv.encap_decap->action;
action_flags |= MLX5_FLOW_ACTION_ENCAP;
break;
case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.encap_decap->verbs_action;
+ dev_flow->dv.encap_decap->action;
action_flags |= MLX5_FLOW_ACTION_DECAP;
break;
case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
(dev, actions, dev_flow, attr, error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.encap_decap->verbs_action;
+ dev_flow->dv.encap_decap->action;
} else {
/* Handle encap without preceding decap. */
if (flow_dv_create_action_l2_encap
error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.encap_decap->verbs_action;
+ dev_flow->dv.encap_decap->action;
}
action_flags |= MLX5_FLOW_ACTION_ENCAP;
break;
(dev, dev_flow, attr->transfer, error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.encap_decap->verbs_action;
+ dev_flow->dv.encap_decap->action;
}
/* If decap is followed by encap, handle it at encap. */
action_flags |= MLX5_FLOW_ACTION_DECAP;
(dev, mhdr_res, dev_flow, error))
return -rte_errno;
dev_flow->dv.actions[modify_action_position] =
- handle->dvh.modify_hdr->verbs_action;
+ handle->dvh.modify_hdr->action;
}
if (action_flags & MLX5_FLOW_ACTION_COUNT) {
flow->counter =
int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
int item_type = items->type;
+ if (!mlx5_flow_os_item_supported(item_type))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "item not supported");
switch (item_type) {
case RTE_FLOW_ITEM_TYPE_PORT_ID:
flow_dv_translate_item_port_id(dev, match_mask,
case RTE_FLOW_ITEM_TYPE_GRE:
flow_dv_translate_item_gre(match_mask, match_value,
items, tunnel);
- matcher.priority = rss_desc->level >= 2 ?
- MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
+ matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
last_item = MLX5_FLOW_LAYER_GRE;
break;
case RTE_FLOW_ITEM_TYPE_GRE_KEY:
case RTE_FLOW_ITEM_TYPE_NVGRE:
flow_dv_translate_item_nvgre(match_mask, match_value,
items, tunnel);
- matcher.priority = rss_desc->level >= 2 ?
- MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
+ matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
last_item = MLX5_FLOW_LAYER_GRE;
break;
case RTE_FLOW_ITEM_TYPE_VXLAN:
flow_dv_translate_item_vxlan(match_mask, match_value,
items, tunnel);
- matcher.priority = rss_desc->level >= 2 ?
- MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
+ matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
last_item = MLX5_FLOW_LAYER_VXLAN;
break;
case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
flow_dv_translate_item_vxlan_gpe(match_mask,
match_value, items,
tunnel);
- matcher.priority = rss_desc->level >= 2 ?
- MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
+ matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
break;
case RTE_FLOW_ITEM_TYPE_GENEVE:
flow_dv_translate_item_geneve(match_mask, match_value,
items, tunnel);
- matcher.priority = rss_desc->level >= 2 ?
- MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
+ matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
last_item = MLX5_FLOW_LAYER_GENEVE;
break;
case RTE_FLOW_ITEM_TYPE_MPLS:
flow_dv_translate_item_mpls(match_mask, match_value,
items, last_item, tunnel);
- matcher.priority = rss_desc->level >= 2 ?
- MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
+ matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
last_item = MLX5_FLOW_LAYER_MPLS;
break;
case RTE_FLOW_ITEM_TYPE_MARK:
case RTE_FLOW_ITEM_TYPE_GTP:
flow_dv_translate_item_gtp(match_mask, match_value,
items, tunnel);
- matcher.priority = rss_desc->level >= 2 ?
- MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
+ matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
last_item = MLX5_FLOW_LAYER_GTP;
break;
+ case RTE_FLOW_ITEM_TYPE_ECPRI:
+ if (!mlx5_flex_parser_ecpri_exist(dev)) {
+ /* Create it only the first time to be used. */
+ ret = mlx5_flex_parser_ecpri_alloc(dev);
+ if (ret)
+ return rte_flow_error_set
+ (error, -ret,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "cannot create eCPRI parser");
+ }
+ /* Adjust the length matcher and device flow value. */
+ matcher.mask.size = MLX5_ST_SZ_BYTES(fte_match_param);
+ dev_flow->dv.value.size =
+ MLX5_ST_SZ_BYTES(fte_match_param);
+ flow_dv_translate_item_ecpri(dev, match_mask,
+ match_value, items);
+ /* No other protocol should follow eCPRI layer. */
+ last_item = MLX5_FLOW_LAYER_ECPRI;
+ break;
default:
break;
}
dh->rix_default_fate = MLX5_FLOW_FATE_DEFAULT_MISS;
dv->actions[n++] = priv->sh->default_miss.action;
}
- dh->drv_flow =
- mlx5_glue->dv_create_flow(dv_h->matcher->matcher_object,
- (void *)&dv->value, n,
- dv->actions);
- if (!dh->drv_flow) {
+ err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object,
+ (void *)&dv->value, n,
+ dv->actions, &dh->drv_flow);
+ if (err) {
rte_flow_error_set(error, errno,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
dev->data->port_id, (void *)matcher,
rte_atomic32_read(&matcher->refcnt));
if (rte_atomic32_dec_and_test(&matcher->refcnt)) {
- claim_zero(mlx5_glue->dv_destroy_flow_matcher
+ claim_zero(mlx5_flow_os_destroy_flow_matcher
(matcher->matcher_object));
LIST_REMOVE(matcher, next);
/* table ref-- in release interface. */
flow_dv_tbl_resource_release(dev, matcher->tbl);
- rte_free(matcher);
+ mlx5_free(matcher);
DRV_LOG(DEBUG, "port %u matcher %p: removed",
dev->data->port_id, (void *)matcher);
return 0;
idx);
if (!cache_resource)
return 0;
- MLX5_ASSERT(cache_resource->verbs_action);
+ MLX5_ASSERT(cache_resource->action);
DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
- claim_zero(mlx5_glue->destroy_flow_action
- (cache_resource->verbs_action));
+ claim_zero(mlx5_flow_os_destroy_flow_action
+ (cache_resource->action));
ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
&priv->sh->encaps_decaps, idx,
cache_resource, next);
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
- claim_zero(mlx5_glue->destroy_flow_action
- (cache_resource->action));
+ claim_zero(mlx5_flow_os_destroy_flow_action
+ (cache_resource->action));
/* jump action memory free is inside the table release. */
flow_dv_tbl_resource_release(dev, &tbl_data->tbl);
DRV_LOG(DEBUG, "jump table resource %p: removed",
struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
handle->dvh.modify_hdr;
- MLX5_ASSERT(cache_resource->verbs_action);
+ MLX5_ASSERT(cache_resource->action);
DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
- claim_zero(mlx5_glue->destroy_flow_action
- (cache_resource->verbs_action));
+ claim_zero(mlx5_flow_os_destroy_flow_action
+ (cache_resource->action));
LIST_REMOVE(cache_resource, next);
- rte_free(cache_resource);
+ mlx5_free(cache_resource);
DRV_LOG(DEBUG, "modify-header resource %p: removed",
(void *)cache_resource);
return 0;
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
- claim_zero(mlx5_glue->destroy_flow_action
- (cache_resource->action));
+ claim_zero(mlx5_flow_os_destroy_flow_action
+ (cache_resource->action));
ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_PORT_ID],
&priv->sh->port_id_action_list, idx,
cache_resource, next);
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
- claim_zero(mlx5_glue->destroy_flow_action
- (cache_resource->action));
+ claim_zero(mlx5_flow_os_destroy_flow_action
+ (cache_resource->action));
ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN],
&priv->sh->push_vlan_action_list, idx,
cache_resource, next);
if (!dh)
return;
if (dh->drv_flow) {
- claim_zero(mlx5_glue->dv_destroy_flow(dh->drv_flow));
+ claim_zero(mlx5_flow_os_destroy_flow(dh->drv_flow));
dh->drv_flow = NULL;
}
if (dh->fate_action == MLX5_FLOW_FATE_DROP ||
if (!mtd || !priv->config.dv_flow_en)
return 0;
if (mtd->ingress.policer_rules[RTE_MTR_DROPPED])
- claim_zero(mlx5_glue->dv_destroy_flow
- (mtd->ingress.policer_rules[RTE_MTR_DROPPED]));
+ claim_zero(mlx5_flow_os_destroy_flow
+ (mtd->ingress.policer_rules[RTE_MTR_DROPPED]));
if (mtd->egress.policer_rules[RTE_MTR_DROPPED])
- claim_zero(mlx5_glue->dv_destroy_flow
- (mtd->egress.policer_rules[RTE_MTR_DROPPED]));
+ claim_zero(mlx5_flow_os_destroy_flow
+ (mtd->egress.policer_rules[RTE_MTR_DROPPED]));
if (mtd->transfer.policer_rules[RTE_MTR_DROPPED])
- claim_zero(mlx5_glue->dv_destroy_flow
- (mtd->transfer.policer_rules[RTE_MTR_DROPPED]));
+ claim_zero(mlx5_flow_os_destroy_flow
+ (mtd->transfer.policer_rules[RTE_MTR_DROPPED]));
if (mtd->egress.color_matcher)
- claim_zero(mlx5_glue->dv_destroy_flow_matcher
- (mtd->egress.color_matcher));
+ claim_zero(mlx5_flow_os_destroy_flow_matcher
+ (mtd->egress.color_matcher));
if (mtd->egress.any_matcher)
- claim_zero(mlx5_glue->dv_destroy_flow_matcher
- (mtd->egress.any_matcher));
+ claim_zero(mlx5_flow_os_destroy_flow_matcher
+ (mtd->egress.any_matcher));
if (mtd->egress.tbl)
flow_dv_tbl_resource_release(dev, mtd->egress.tbl);
if (mtd->egress.sfx_tbl)
flow_dv_tbl_resource_release(dev, mtd->egress.sfx_tbl);
if (mtd->ingress.color_matcher)
- claim_zero(mlx5_glue->dv_destroy_flow_matcher
- (mtd->ingress.color_matcher));
+ claim_zero(mlx5_flow_os_destroy_flow_matcher
+ (mtd->ingress.color_matcher));
if (mtd->ingress.any_matcher)
- claim_zero(mlx5_glue->dv_destroy_flow_matcher
- (mtd->ingress.any_matcher));
+ claim_zero(mlx5_flow_os_destroy_flow_matcher
+ (mtd->ingress.any_matcher));
if (mtd->ingress.tbl)
flow_dv_tbl_resource_release(dev, mtd->ingress.tbl);
if (mtd->ingress.sfx_tbl)
flow_dv_tbl_resource_release(dev, mtd->ingress.sfx_tbl);
if (mtd->transfer.color_matcher)
- claim_zero(mlx5_glue->dv_destroy_flow_matcher
- (mtd->transfer.color_matcher));
+ claim_zero(mlx5_flow_os_destroy_flow_matcher
+ (mtd->transfer.color_matcher));
if (mtd->transfer.any_matcher)
- claim_zero(mlx5_glue->dv_destroy_flow_matcher
- (mtd->transfer.any_matcher));
+ claim_zero(mlx5_flow_os_destroy_flow_matcher
+ (mtd->transfer.any_matcher));
if (mtd->transfer.tbl)
flow_dv_tbl_resource_release(dev, mtd->transfer.tbl);
if (mtd->transfer.sfx_tbl)
flow_dv_tbl_resource_release(dev, mtd->transfer.sfx_tbl);
if (mtd->drop_actn)
- claim_zero(mlx5_glue->destroy_flow_action(mtd->drop_actn));
- rte_free(mtd);
+ claim_zero(mlx5_flow_os_destroy_flow_action(mtd->drop_actn));
+ mlx5_free(mtd);
return 0;
}
struct mlx5_meter_domain_info *dtb;
struct rte_flow_error error;
int i = 0;
+ int ret;
if (transfer)
dtb = &mtb->transfer;
/* Create matchers, Any and Color. */
dv_attr.priority = 3;
dv_attr.match_criteria_enable = 0;
- dtb->any_matcher = mlx5_glue->dv_create_flow_matcher(sh->ctx,
- &dv_attr,
- dtb->tbl->obj);
- if (!dtb->any_matcher) {
+ ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, dtb->tbl->obj,
+ &dtb->any_matcher);
+ if (ret) {
DRV_LOG(ERR, "Failed to create meter"
" policer default matcher.");
goto error_exit;
1 << MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
flow_dv_match_meta_reg(mask.buf, value.buf, color_reg_c_idx,
rte_col_2_mlx5_col(RTE_COLORS), UINT8_MAX);
- dtb->color_matcher = mlx5_glue->dv_create_flow_matcher(sh->ctx,
- &dv_attr,
- dtb->tbl->obj);
- if (!dtb->color_matcher) {
+ ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, dtb->tbl->obj,
+ &dtb->color_matcher);
+ if (ret) {
DRV_LOG(ERR, "Failed to create meter policer color matcher.");
goto error_exit;
}
actions[i++] = mtb->count_actns[RTE_MTR_DROPPED];
actions[i++] = mtb->drop_actn;
/* Default rule: lowest priority, match any, actions: drop. */
- dtb->policer_rules[RTE_MTR_DROPPED] =
- mlx5_glue->dv_create_flow(dtb->any_matcher,
- (void *)&value, i, actions);
- if (!dtb->policer_rules[RTE_MTR_DROPPED]) {
+ ret = mlx5_flow_os_create_flow(dtb->any_matcher, (void *)&value, i,
+ actions,
+ &dtb->policer_rules[RTE_MTR_DROPPED]);
+ if (ret) {
DRV_LOG(ERR, "Failed to create meter policer drop rule.");
goto error_exit;
}
rte_errno = ENOTSUP;
return NULL;
}
- mtb = rte_calloc(__func__, 1, sizeof(*mtb), 0);
+ mtb = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*mtb), 0, SOCKET_ID_ANY);
if (!mtb) {
DRV_LOG(ERR, "Failed to allocate memory for meter.");
return NULL;
mtb->count_actns[i] = cnt->action;
}
/* Create drop action. */
- mtb->drop_actn = mlx5_glue->dr_create_flow_action_drop();
- if (!mtb->drop_actn) {
+ ret = mlx5_flow_os_create_flow_action_drop(&mtb->drop_actn);
+ if (ret) {
DRV_LOG(ERR, "Failed to create drop action.");
goto error_exit;
}
for (i = 0; i < RTE_MTR_DROPPED; i++) {
if (dt->policer_rules[i]) {
- claim_zero(mlx5_glue->dv_destroy_flow
- (dt->policer_rules[i]));
+ claim_zero(mlx5_flow_os_destroy_flow
+ (dt->policer_rules[i]));
dt->policer_rules[i] = NULL;
}
}
if (dt->jump_actn) {
- claim_zero(mlx5_glue->destroy_flow_action(dt->jump_actn));
+ claim_zero(mlx5_flow_os_destroy_flow_action(dt->jump_actn));
dt->jump_actn = NULL;
}
}
struct mlx5_meter_domains_infos *mtb = fm->mfts;
void *actions[METER_ACTIONS];
int i;
+ int ret = 0;
/* Create jump action. */
if (!dtb->jump_actn)
- dtb->jump_actn =
- mlx5_glue->dr_create_flow_action_dest_flow_tbl
- (dtb->sfx_tbl->obj);
- if (!dtb->jump_actn) {
+ ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
+ (dtb->sfx_tbl->obj, &dtb->jump_actn);
+ if (ret) {
DRV_LOG(ERR, "Failed to create policer jump action.");
goto error;
}
actions[j++] = mtb->drop_actn;
else
actions[j++] = dtb->jump_actn;
- dtb->policer_rules[i] =
- mlx5_glue->dv_create_flow(dtb->color_matcher,
- (void *)&value,
- j, actions);
- if (!dtb->policer_rules[i]) {
+ ret = mlx5_flow_os_create_flow(dtb->color_matcher,
+ (void *)&value, j, actions,
+ &dtb->policer_rules[i]);
+ if (ret) {
DRV_LOG(ERR, "Failed to create policer rule.");
goto error;
}
};
#endif /* HAVE_IBV_FLOW_DV_SUPPORT */
+