X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_flow_dv.c;h=7c160551329a0e64113987f30adfe3082bc217f5;hb=fd143711a6eaf5821ca0458200ac550a2d90b883;hp=4dec57d6969475cf3e880c33b93f5d2e0749bf81;hpb=6e88bc42c7a8c1e0fe60fd0d36f686cc1ca08507;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index 4dec57d696..7c16055132 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -8,16 +8,6 @@ #include #include -/* Verbs header. */ -/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ -#ifdef PEDANTIC -#pragma GCC diagnostic ignored "-Wpedantic" -#endif -#include -#ifdef PEDANTIC -#pragma GCC diagnostic error "-Wpedantic" -#endif - #include #include #include @@ -29,14 +19,19 @@ #include #include #include +#include +#include #include #include #include +#include #include "mlx5_defs.h" #include "mlx5.h" +#include "mlx5_common_os.h" #include "mlx5_flow.h" +#include "mlx5_flow_os.h" #include "mlx5_rxtx.h" #ifdef HAVE_IBV_FLOW_DV_SUPPORT @@ -78,6 +73,9 @@ static int flow_dv_tbl_resource_release(struct rte_eth_dev *dev, struct mlx5_flow_tbl_resource *tbl); +static int +flow_dv_default_miss_resource_release(struct rte_eth_dev *dev); + /** * Initialize flow attributes structure according to flow items' types. * @@ -1479,6 +1477,13 @@ flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused, "isn't supported"); if (reg != REG_A) nic_mask.data = priv->sh->dv_meta_mask; + } else if (attr->transfer) { + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "extended metadata feature " + "should be enabled when " + "meta item is requested " + "with e-switch mode "); } if (!mask) mask = &rte_flow_item_meta_mask; @@ -1639,18 +1644,6 @@ flow_dv_validate_item_port_id(struct rte_eth_dev *dev, return 0; } -/* - * GTP flags are contained in 1 byte of the format: - * ------------------------------------------- - * | bit | 0 - 2 | 3 | 4 | 5 | 6 | 7 | - * |-----------------------------------------| - * | value | Version | PT | Res | E | S | PN | - * ------------------------------------------- - * - * Matching is supported only for GTP flags E, S, PN. - */ -#define MLX5_GTP_FLAGS_MASK 0x07 - /** * Validate VLAN item. * @@ -1724,6 +1717,18 @@ flow_dv_validate_item_vlan(const struct rte_flow_item *item, return 0; } +/* + * GTP flags are contained in 1 byte of the format: + * ------------------------------------------- + * | bit | 0 - 2 | 3 | 4 | 5 | 6 | 7 | + * |-----------------------------------------| + * | value | Version | PT | Res | E | S | PN | + * ------------------------------------------- + * + * Matching is supported only for GTP flags E, S, PN. + */ +#define MLX5_GTP_FLAGS_MASK 0x07 + /** * Validate GTP item. * @@ -1828,7 +1833,17 @@ flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev, RTE_FLOW_ERROR_TYPE_ACTION, action, "no support for multiple VLAN " "actions"); - if (!(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN)) + /* Pop VLAN with preceding Decap requires inner header with VLAN. */ + if ((action_flags & MLX5_FLOW_ACTION_DECAP) && + !(item_flags & MLX5_FLOW_LAYER_INNER_VLAN)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "cannot pop vlan after decap without " + "match on inner vlan in the flow"); + /* Pop VLAN without preceding Decap requires outer header with VLAN. */ + if (!(action_flags & MLX5_FLOW_ACTION_DECAP) && + !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN)) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, @@ -1937,22 +1952,11 @@ flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev, const struct rte_flow_action_of_push_vlan *push_vlan = action->conf; const struct mlx5_priv *priv = dev->data->dev_private; - if (!attr->transfer && attr->ingress) - return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, - NULL, - "push VLAN action not supported for " - "ingress"); if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) && push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ)) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, action, "invalid vlan ethertype"); - if (action_flags & MLX5_FLOW_VLAN_ACTIONS) - return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ACTION, action, - "no support for multiple VLAN " - "actions"); if (action_flags & MLX5_FLOW_ACTION_PORT_ID) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, action, @@ -2417,6 +2421,11 @@ flow_dv_validate_action_decap(struct rte_eth_dev *dev, { const struct mlx5_priv *priv = dev->data->dev_private; + if (priv->config.hca_attr.scatter_fcs_w_decap_disable && + !priv->config.decap_en) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "decap is not enabled"); if (action_flags & MLX5_FLOW_XCAP_ACTIONS) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, NULL, @@ -2564,6 +2573,7 @@ flow_dv_encap_decap_resource_register struct mlx5_flow_dv_encap_decap_resource *cache_resource; struct mlx5dv_dr_domain *domain; uint32_t idx = 0; + int ret; resource->flags = dev_flow->dv.group ? 0 : 1; if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) @@ -2599,14 +2609,11 @@ flow_dv_encap_decap_resource_register RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot allocate resource memory"); *cache_resource = *resource; - cache_resource->verbs_action = - mlx5_glue->dv_create_flow_action_packet_reformat - (sh->ctx, cache_resource->reformat_type, - cache_resource->ft_type, domain, cache_resource->flags, - cache_resource->size, - (cache_resource->size ? cache_resource->buf : NULL)); - if (!cache_resource->verbs_action) { - rte_free(cache_resource); + ret = mlx5_flow_os_create_flow_action_packet_reformat + (sh->ctx, domain, cache_resource, + &cache_resource->action); + if (ret) { + mlx5_free(cache_resource); return rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot create action"); @@ -2647,15 +2654,14 @@ flow_dv_jump_tbl_resource_register { struct mlx5_flow_tbl_data_entry *tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl); - int cnt; + int cnt, ret; MLX5_ASSERT(tbl); cnt = rte_atomic32_read(&tbl_data->jump.refcnt); if (!cnt) { - tbl_data->jump.action = - mlx5_glue->dr_create_flow_action_dest_flow_tbl - (tbl->obj); - if (!tbl_data->jump.action) + ret = mlx5_flow_os_create_flow_action_dest_flow_tbl + (tbl->obj, &tbl_data->jump.action); + if (ret) return rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot create jump action"); @@ -2674,6 +2680,42 @@ flow_dv_jump_tbl_resource_register return 0; } +/** + * Find existing default miss resource or create and register a new one. + * + * @param[in, out] dev + * Pointer to rte_eth_dev structure. + * @param[out] error + * pointer to error structure. + * + * @return + * 0 on success otherwise -errno and errno is set. + */ +static int +flow_dv_default_miss_resource_register(struct rte_eth_dev *dev, + struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_dev_ctx_shared *sh = priv->sh; + struct mlx5_flow_default_miss_resource *cache_resource = + &sh->default_miss; + int cnt = rte_atomic32_read(&cache_resource->refcnt); + + if (!cnt) { + MLX5_ASSERT(cache_resource->action); + cache_resource->action = + mlx5_glue->dr_create_flow_action_default_miss(); + if (!cache_resource->action) + return rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "cannot create default miss action"); + DRV_LOG(DEBUG, "new default miss resource %p: refcnt %d++", + (void *)cache_resource->action, cnt); + } + rte_atomic32_inc(&cache_resource->refcnt); + return 0; +} + /** * Find existing table port ID resource or create and register a new one. * @@ -2700,6 +2742,7 @@ flow_dv_port_id_action_resource_register struct mlx5_dev_ctx_shared *sh = priv->sh; struct mlx5_flow_dv_port_id_action_resource *cache_resource; uint32_t idx = 0; + int ret; /* Lookup a matching resource from cache. */ ILIST_FOREACH(sh->ipool[MLX5_IPOOL_PORT_ID], sh->port_id_action_list, @@ -2723,16 +2766,11 @@ flow_dv_port_id_action_resource_register RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot allocate resource memory"); *cache_resource = *resource; - /* - * Depending on rdma_core version the glue routine calls - * either mlx5dv_dr_action_create_dest_ib_port(domain, ibv_port) - * or mlx5dv_dr_action_create_dest_vport(domain, vport_id). - */ - cache_resource->action = - mlx5_glue->dr_create_flow_action_dest_port - (priv->sh->fdb_domain, resource->port_id); - if (!cache_resource->action) { - rte_free(cache_resource); + ret = mlx5_flow_os_create_flow_action_dest_port + (priv->sh->fdb_domain, resource->port_id, + &cache_resource->action); + if (ret) { + mlx5_free(cache_resource); return rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot create action"); @@ -2776,6 +2814,7 @@ flow_dv_push_vlan_action_resource_register struct mlx5_flow_dv_push_vlan_action_resource *cache_resource; struct mlx5dv_dr_domain *domain; uint32_t idx = 0; + int ret; /* Lookup a matching resource from cache. */ ILIST_FOREACH(sh->ipool[MLX5_IPOOL_PUSH_VLAN], @@ -2806,11 +2845,11 @@ flow_dv_push_vlan_action_resource_register domain = sh->rx_domain; else domain = sh->tx_domain; - cache_resource->action = - mlx5_glue->dr_create_flow_action_push_vlan(domain, - resource->vlan_tag); - if (!cache_resource->action) { - rte_free(cache_resource); + ret = mlx5_flow_os_create_flow_action_push_vlan + (domain, resource->vlan_tag, + &cache_resource->action); + if (ret) { + mlx5_free(cache_resource); return rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot create action"); @@ -2828,7 +2867,7 @@ flow_dv_push_vlan_action_resource_register return 0; } /** - * Get the size of specific rte_flow_item_type + * Get the size of specific rte_flow_item_type hdr size * * @param[in] item_type * Tested rte_flow_item_type. @@ -2837,43 +2876,39 @@ flow_dv_push_vlan_action_resource_register * sizeof struct item_type, 0 if void or irrelevant. */ static size_t -flow_dv_get_item_len(const enum rte_flow_item_type item_type) +flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type) { size_t retval; switch (item_type) { case RTE_FLOW_ITEM_TYPE_ETH: - retval = sizeof(struct rte_flow_item_eth); + retval = sizeof(struct rte_ether_hdr); break; case RTE_FLOW_ITEM_TYPE_VLAN: - retval = sizeof(struct rte_flow_item_vlan); + retval = sizeof(struct rte_vlan_hdr); break; case RTE_FLOW_ITEM_TYPE_IPV4: - retval = sizeof(struct rte_flow_item_ipv4); + retval = sizeof(struct rte_ipv4_hdr); break; case RTE_FLOW_ITEM_TYPE_IPV6: - retval = sizeof(struct rte_flow_item_ipv6); + retval = sizeof(struct rte_ipv6_hdr); break; case RTE_FLOW_ITEM_TYPE_UDP: - retval = sizeof(struct rte_flow_item_udp); + retval = sizeof(struct rte_udp_hdr); break; case RTE_FLOW_ITEM_TYPE_TCP: - retval = sizeof(struct rte_flow_item_tcp); + retval = sizeof(struct rte_tcp_hdr); break; case RTE_FLOW_ITEM_TYPE_VXLAN: - retval = sizeof(struct rte_flow_item_vxlan); + case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: + retval = sizeof(struct rte_vxlan_hdr); break; case RTE_FLOW_ITEM_TYPE_GRE: - retval = sizeof(struct rte_flow_item_gre); - break; case RTE_FLOW_ITEM_TYPE_NVGRE: - retval = sizeof(struct rte_flow_item_nvgre); - break; - case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: - retval = sizeof(struct rte_flow_item_vxlan_gpe); + retval = sizeof(struct rte_gre_hdr); break; case RTE_FLOW_ITEM_TYPE_MPLS: - retval = sizeof(struct rte_flow_item_mpls); + retval = sizeof(struct rte_mpls_hdr); break; case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */ default: @@ -2926,7 +2961,7 @@ flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "invalid empty data"); for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { - len = flow_dv_get_item_len(items->type); + len = flow_dv_get_item_hdr_len(items->type); if (len + temp_size > MLX5_ENCAP_MAX_LEN) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, @@ -3950,6 +3985,7 @@ flow_dv_modify_hdr_resource_register struct mlx5_flow_dv_modify_hdr_resource *cache_resource; struct mlx5dv_dr_domain *ns; uint32_t actions_len; + int ret; resource->flags = dev_flow->dv.group ? 0 : MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL; @@ -3982,21 +4018,20 @@ flow_dv_modify_hdr_resource_register } } /* Register new modify-header resource. */ - cache_resource = rte_calloc(__func__, 1, - sizeof(*cache_resource) + actions_len, 0); + cache_resource = mlx5_malloc(MLX5_MEM_ZERO, + sizeof(*cache_resource) + actions_len, 0, + SOCKET_ID_ANY); if (!cache_resource) return rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot allocate resource memory"); *cache_resource = *resource; rte_memcpy(cache_resource->actions, resource->actions, actions_len); - cache_resource->verbs_action = - mlx5_glue->dv_create_flow_action_modify_header - (sh->ctx, cache_resource->ft_type, ns, - cache_resource->flags, actions_len, - (uint64_t *)cache_resource->actions); - if (!cache_resource->verbs_action) { - rte_free(cache_resource); + ret = mlx5_flow_os_create_flow_action_modify_header + (sh->ctx, ns, cache_resource, + actions_len, &cache_resource->action); + if (ret) { + mlx5_free(cache_resource); return rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot create action"); @@ -4050,6 +4085,28 @@ flow_dv_counter_get_by_idx(struct rte_eth_dev *dev, return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL); } +/** + * Check the devx counter belongs to the pool. + * + * @param[in] pool + * Pointer to the counter pool. + * @param[in] id + * The counter devx ID. + * + * @return + * True if counter belongs to the pool, false otherwise. + */ +static bool +flow_dv_is_counter_in_pool(struct mlx5_flow_counter_pool *pool, int id) +{ + int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) * + MLX5_COUNTERS_PER_POOL; + + if (id >= base && id < base + MLX5_COUNTERS_PER_POOL) + return true; + return false; +} + /** * Get a pool by devx counter ID. * @@ -4065,24 +4122,25 @@ static struct mlx5_flow_counter_pool * flow_dv_find_pool_by_id(struct mlx5_pools_container *cont, int id) { uint32_t i; - uint32_t n_valid = rte_atomic16_read(&cont->n_valid); - for (i = 0; i < n_valid; i++) { + /* Check last used pool. */ + if (cont->last_pool_idx != POOL_IDX_INVALID && + flow_dv_is_counter_in_pool(cont->pools[cont->last_pool_idx], id)) + return cont->pools[cont->last_pool_idx]; + /* ID out of range means no suitable pool in the container. */ + if (id > cont->max_id || id < cont->min_id) + return NULL; + /* + * Find the pool from the end of the container, since mostly counter + * ID is sequence increasing, and the last pool should be the needed + * one. + */ + i = rte_atomic16_read(&cont->n_valid); + while (i--) { struct mlx5_flow_counter_pool *pool = cont->pools[i]; - int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) * - MLX5_COUNTERS_PER_POOL; - if (id >= base && id < base + MLX5_COUNTERS_PER_POOL) { - /* - * Move the pool to the head, as counter allocate - * always gets the first pool in the container. - */ - if (pool != TAILQ_FIRST(&cont->pool_list)) { - TAILQ_REMOVE(&cont->pool_list, pool, next); - TAILQ_INSERT_HEAD(&cont->pool_list, pool, next); - } + if (flow_dv_is_counter_in_pool(pool, id)) return pool; - } } return NULL; } @@ -4112,7 +4170,14 @@ flow_dv_create_counter_stat_mem_mng(struct rte_eth_dev *dev, int raws_n) MLX5_COUNTERS_PER_POOL + sizeof(struct mlx5_counter_stats_raw)) * raws_n + sizeof(struct mlx5_counter_stats_mem_mng); - uint8_t *mem = rte_calloc(__func__, 1, size, sysconf(_SC_PAGESIZE)); + size_t pgsize = rte_mem_page_size(); + if (pgsize == (size_t)-1) { + DRV_LOG(ERR, "Failed to get mem page size"); + rte_errno = ENOMEM; + return NULL; + } + uint8_t *mem = mlx5_malloc(MLX5_MEM_ZERO, size, pgsize, + SOCKET_ID_ANY); int i; if (!mem) { @@ -4125,12 +4190,12 @@ flow_dv_create_counter_stat_mem_mng(struct rte_eth_dev *dev, int raws_n) IBV_ACCESS_LOCAL_WRITE); if (!mem_mng->umem) { rte_errno = errno; - rte_free(mem); + mlx5_free(mem); return NULL; } mkey_attr.addr = (uintptr_t)mem; mkey_attr.size = size; - mkey_attr.umem_id = mem_mng->umem->umem_id; + mkey_attr.umem_id = mlx5_os_get_umem_id(mem_mng->umem); mkey_attr.pd = sh->pdn; mkey_attr.log_entity_size = 0; mkey_attr.pg_access = 0; @@ -4144,7 +4209,7 @@ flow_dv_create_counter_stat_mem_mng(struct rte_eth_dev *dev, int raws_n) if (!mem_mng->dm) { mlx5_glue->devx_umem_dereg(mem_mng->umem); rte_errno = errno; - rte_free(mem); + mlx5_free(mem); return NULL; } mem_mng->raws = (struct mlx5_counter_stats_raw *)(mem + size); @@ -4181,7 +4246,7 @@ flow_dv_container_resize(struct rte_eth_dev *dev, void *old_pools = cont->pools; uint32_t resize = cont->n + MLX5_CNT_CONTAINER_RESIZE; uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize; - void *pools = rte_calloc(__func__, 1, mem_size, 0); + void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY); if (!pools) { rte_errno = ENOMEM; @@ -4200,7 +4265,7 @@ flow_dv_container_resize(struct rte_eth_dev *dev, mem_mng = flow_dv_create_counter_stat_mem_mng(dev, MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES); if (!mem_mng) { - rte_free(pools); + mlx5_free(pools); return -ENOMEM; } for (i = 0; i < MLX5_MAX_PENDING_QUERIES; ++i) @@ -4215,7 +4280,7 @@ flow_dv_container_resize(struct rte_eth_dev *dev, cont->pools = pools; rte_spinlock_unlock(&cont->resize_sl); if (old_pools) - rte_free(old_pools); + mlx5_free(old_pools); return 0; } @@ -4304,7 +4369,7 @@ flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs, size += MLX5_COUNTERS_PER_POOL * CNT_SIZE; size += (batch ? 0 : MLX5_COUNTERS_PER_POOL * CNTEXT_SIZE); size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * AGE_SIZE); - pool = rte_calloc(__func__, 1, size, 0); + pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY); if (!pool) { rte_errno = ENOMEM; return NULL; @@ -4317,65 +4382,28 @@ flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs, pool->type = 0; pool->type |= (batch ? 0 : CNT_POOL_TYPE_EXT); pool->type |= (!age ? 0 : CNT_POOL_TYPE_AGE); + pool->query_gen = 0; rte_spinlock_init(&pool->sl); - /* - * The generation of the new allocated counters in this pool is 0, 2 in - * the pool generation makes all the counters valid for allocation. - * The start and end query generation protect the counters be released - * between the query and update gap period will not be reallocated - * without the last query finished and stats updated to the memory. - */ - rte_atomic64_set(&pool->start_query_gen, 0x2); - /* - * There's no background query thread for fallback mode, set the - * end_query_gen to the maximum value since no need to wait for - * statistics update. - */ - rte_atomic64_set(&pool->end_query_gen, priv->counter_fallback ? - INT64_MAX : 0x2); - TAILQ_INIT(&pool->counters); + TAILQ_INIT(&pool->counters[0]); + TAILQ_INIT(&pool->counters[1]); TAILQ_INSERT_HEAD(&cont->pool_list, pool, next); pool->index = n_valid; cont->pools[n_valid] = pool; + if (!batch) { + int base = RTE_ALIGN_FLOOR(dcs->id, MLX5_COUNTERS_PER_POOL); + + if (base < cont->min_id) + cont->min_id = base; + if (base > cont->max_id) + cont->max_id = base + MLX5_COUNTERS_PER_POOL - 1; + cont->last_pool_idx = pool->index; + } /* Pool initialization must be updated before host thread access. */ rte_cio_wmb(); rte_atomic16_add(&cont->n_valid, 1); return pool; } -/** - * Update the minimum dcs-id for aged or no-aged counter pool. - * - * @param[in] dev - * Pointer to the Ethernet device structure. - * @param[in] pool - * Current counter pool. - * @param[in] batch - * Whether the pool is for counter that was allocated by batch command. - * @param[in] age - * Whether the counter is for aging. - */ -static void -flow_dv_counter_update_min_dcs(struct rte_eth_dev *dev, - struct mlx5_flow_counter_pool *pool, - uint32_t batch, uint32_t age) -{ - struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_flow_counter_pool *other; - struct mlx5_pools_container *cont; - - cont = MLX5_CNT_CONTAINER(priv->sh, batch, (age ^ 0x1)); - other = flow_dv_find_pool_by_id(cont, pool->min_dcs->id); - if (!other) - return; - if (pool->min_dcs->id < other->min_dcs->id) { - rte_atomic64_set(&other->a64_dcs, - rte_atomic64_read(&pool->a64_dcs)); - } else { - rte_atomic64_set(&pool->a64_dcs, - rte_atomic64_read(&other->a64_dcs)); - } -} /** * Prepare a new counter and/or a new counter pool. * @@ -4400,33 +4428,53 @@ flow_dv_counter_pool_prepare(struct rte_eth_dev *dev, struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_pools_container *cont; struct mlx5_flow_counter_pool *pool; + struct mlx5_counters tmp_tq; struct mlx5_devx_obj *dcs = NULL; struct mlx5_flow_counter *cnt; + uint32_t add2other; uint32_t i; cont = MLX5_CNT_CONTAINER(priv->sh, batch, age); if (!batch) { +retry: + add2other = 0; /* bulk_bitmap must be 0 for single counter allocation. */ dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0); if (!dcs) return NULL; pool = flow_dv_find_pool_by_id(cont, dcs->id); + /* Check if counter belongs to exist pool ID range. */ if (!pool) { - pool = flow_dv_pool_create(dev, dcs, batch, age); - if (!pool) { - mlx5_devx_cmd_destroy(dcs); - return NULL; + pool = flow_dv_find_pool_by_id + (MLX5_CNT_CONTAINER + (priv->sh, batch, (age ^ 0x1)), dcs->id); + /* + * Pool eixsts, counter will be added to the other + * container, need to reallocate it later. + */ + if (pool) { + add2other = 1; + } else { + pool = flow_dv_pool_create(dev, dcs, batch, + age); + if (!pool) { + mlx5_devx_cmd_destroy(dcs); + return NULL; + } } - } else if (dcs->id < pool->min_dcs->id) { + } + if (dcs->id < pool->min_dcs->id) rte_atomic64_set(&pool->a64_dcs, (int64_t)(uintptr_t)dcs); - } - flow_dv_counter_update_min_dcs(dev, - pool, batch, age); i = dcs->id % MLX5_COUNTERS_PER_POOL; cnt = MLX5_POOL_GET_CNT(pool, i); - TAILQ_INSERT_HEAD(&pool->counters, cnt, next); + cnt->pool = pool; MLX5_GET_POOL_CNT_EXT(pool, i)->dcs = dcs; + if (add2other) { + TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], + cnt, next); + goto retry; + } *cnt_free = cnt; return pool; } @@ -4442,19 +4490,25 @@ flow_dv_counter_pool_prepare(struct rte_eth_dev *dev, mlx5_devx_cmd_destroy(dcs); return NULL; } - for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) { + TAILQ_INIT(&tmp_tq); + for (i = 1; i < MLX5_COUNTERS_PER_POOL; ++i) { cnt = MLX5_POOL_GET_CNT(pool, i); - TAILQ_INSERT_HEAD(&pool->counters, cnt, next); + cnt->pool = pool; + TAILQ_INSERT_HEAD(&tmp_tq, cnt, next); } + rte_spinlock_lock(&cont->csl); + TAILQ_CONCAT(&cont->counters, &tmp_tq, next); + rte_spinlock_unlock(&cont->csl); *cnt_free = MLX5_POOL_GET_CNT(pool, 0); + (*cnt_free)->pool = pool; return pool; } /** * Search for existed shared counter. * - * @param[in] cont - * Pointer to the relevant counter pool container. + * @param[in] dev + * Pointer to the Ethernet device structure. * @param[in] id * The shared counter ID to search. * @param[out] ppool @@ -4464,26 +4518,22 @@ flow_dv_counter_pool_prepare(struct rte_eth_dev *dev, * NULL if not existed, otherwise pointer to the shared extend counter. */ static struct mlx5_flow_counter_ext * -flow_dv_counter_shared_search(struct mlx5_pools_container *cont, uint32_t id, +flow_dv_counter_shared_search(struct rte_eth_dev *dev, uint32_t id, struct mlx5_flow_counter_pool **ppool) { - struct mlx5_flow_counter_ext *cnt; - struct mlx5_flow_counter_pool *pool; - uint32_t i, j; - uint32_t n_valid = rte_atomic16_read(&cont->n_valid); - - for (i = 0; i < n_valid; i++) { - pool = cont->pools[i]; - for (j = 0; j < MLX5_COUNTERS_PER_POOL; ++j) { - cnt = MLX5_GET_POOL_CNT_EXT(pool, j); - if (cnt->ref_cnt && cnt->shared && cnt->id == id) { - if (ppool) - *ppool = cont->pools[i]; - return cnt; - } - } - } - return NULL; + struct mlx5_priv *priv = dev->data->dev_private; + union mlx5_l3t_data data; + uint32_t cnt_idx; + + if (mlx5_l3t_get_entry(priv->sh->cnt_id_tbl, id, &data) || !data.dword) + return NULL; + cnt_idx = data.dword; + /* + * Shared counters don't have age info. The counter extend is after + * the counter datat structure. + */ + return (struct mlx5_flow_counter_ext *) + ((flow_dv_counter_get_by_idx(dev, cnt_idx, ppool)) + 1); } /** @@ -4529,7 +4579,7 @@ flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t shared, uint32_t id, return 0; } if (shared) { - cnt_ext = flow_dv_counter_shared_search(cont, id, &pool); + cnt_ext = flow_dv_counter_shared_search(dev, id, &pool); if (cnt_ext) { if (cnt_ext->ref_cnt + 1 == 0) { rte_errno = E2BIG; @@ -4542,34 +4592,23 @@ flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t shared, uint32_t id, return cnt_idx; } } - /* Pools which has a free counters are in the start. */ - TAILQ_FOREACH(pool, &cont->pool_list, next) { - /* - * The free counter reset values must be updated between the - * counter release to the counter allocation, so, at least one - * query must be done in this time. ensure it by saving the - * query generation in the release time. - * The free list is sorted according to the generation - so if - * the first one is not updated, all the others are not - * updated too. - */ - cnt_free = TAILQ_FIRST(&pool->counters); - if (cnt_free && cnt_free->query_gen < - rte_atomic64_read(&pool->end_query_gen)) - break; - cnt_free = NULL; - } - if (!cnt_free) { - pool = flow_dv_counter_pool_prepare(dev, &cnt_free, batch, age); - if (!pool) - return 0; - } + /* Get free counters from container. */ + rte_spinlock_lock(&cont->csl); + cnt_free = TAILQ_FIRST(&cont->counters); + if (cnt_free) + TAILQ_REMOVE(&cont->counters, cnt_free, next); + rte_spinlock_unlock(&cont->csl); + if (!cnt_free && !flow_dv_counter_pool_prepare(dev, &cnt_free, + batch, age)) + goto err; + pool = cnt_free->pool; if (!batch) cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt_free); /* Create a DV counter action only in the first time usage. */ if (!cnt_free->action) { uint16_t offset; struct mlx5_devx_obj *dcs; + int ret; if (batch) { offset = MLX5_CNT_ARRAY_IDX(pool, cnt_free); @@ -4578,11 +4617,11 @@ flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t shared, uint32_t id, offset = 0; dcs = cnt_ext->dcs; } - cnt_free->action = mlx5_glue->dv_create_flow_action_counter - (dcs->obj, offset); - if (!cnt_free->action) { + ret = mlx5_flow_os_create_flow_action_count(dcs->obj, offset, + &cnt_free->action); + if (ret) { rte_errno = errno; - return 0; + goto err; } } cnt_idx = MLX5_MAKE_CNT_IDX(pool->index, @@ -4592,22 +4631,31 @@ flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t shared, uint32_t id, /* Update the counter reset values. */ if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits, &cnt_free->bytes)) - return 0; + goto err; if (cnt_ext) { cnt_ext->shared = shared; cnt_ext->ref_cnt = 1; cnt_ext->id = id; + if (shared) { + union mlx5_l3t_data data; + + data.dword = cnt_idx; + if (mlx5_l3t_set_entry(priv->sh->cnt_id_tbl, id, &data)) + return 0; + } } if (!priv->counter_fallback && !priv->sh->cmng.query_thread_on) /* Start the asynchronous batch query by the host thread. */ mlx5_set_query_alarm(priv->sh); - TAILQ_REMOVE(&pool->counters, cnt_free, next); - if (TAILQ_EMPTY(&pool->counters)) { - /* Move the pool to the end of the container pool list. */ - TAILQ_REMOVE(&cont->pool_list, pool, next); - TAILQ_INSERT_TAIL(&cont->pool_list, pool, next); - } return cnt_idx; +err: + if (cnt_free) { + cnt_free->pool = pool; + rte_spinlock_lock(&cont->csl); + TAILQ_INSERT_TAIL(&cont->counters, cnt_free, next); + rte_spinlock_unlock(&cont->csl); + } + return 0; } /** @@ -4679,6 +4727,7 @@ flow_dv_counter_remove_from_age(struct rte_eth_dev *dev, static void flow_dv_counter_release(struct rte_eth_dev *dev, uint32_t counter) { + struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_flow_counter_pool *pool = NULL; struct mlx5_flow_counter *cnt; struct mlx5_flow_counter_ext *cnt_ext = NULL; @@ -4689,20 +4738,33 @@ flow_dv_counter_release(struct rte_eth_dev *dev, uint32_t counter) MLX5_ASSERT(pool); if (counter < MLX5_CNT_BATCH_OFFSET) { cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt); - if (cnt_ext && --cnt_ext->ref_cnt) - return; + if (cnt_ext) { + if (--cnt_ext->ref_cnt) + return; + if (cnt_ext->shared) + mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl, + cnt_ext->id); + } } if (IS_AGE_POOL(pool)) flow_dv_counter_remove_from_age(dev, counter, cnt); - /* Put the counter in the end - the last updated one. */ - TAILQ_INSERT_TAIL(&pool->counters, cnt, next); + cnt->pool = pool; /* - * Counters released between query trigger and handler need - * to wait the next round of query. Since the packets arrive - * in the gap period will not be taken into account to the - * old counter. + * Put the counter back to list to be updated in none fallback mode. + * Currently, we are using two list alternately, while one is in query, + * add the freed counter to the other list based on the pool query_gen + * value. After query finishes, add counter the list to the global + * container counter list. The list changes while query starts. In + * this case, lock will not be needed as query callback and release + * function both operate with the different list. + * */ - cnt->query_gen = rte_atomic64_read(&pool->start_query_gen); + if (!priv->counter_fallback) + TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], cnt, next); + else + TAILQ_INSERT_TAIL(&((MLX5_CNT_CONTAINER + (priv->sh, 0, 0))->counters), + cnt, next); } /** @@ -4849,6 +4911,17 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, .hop_limits = 0xff, }, }; + const struct rte_flow_item_ecpri nic_ecpri_mask = { + .hdr = { + .common = { + .u32 = + RTE_BE32(((const struct rte_ecpri_common_hdr) { + .type = 0xFF, + }).u32), + }, + .dummy[0] = 0xffffffff, + }, + }; struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_dev_config *dev_conf = &priv->config; uint16_t queue_index = 0xFFFF; @@ -4866,6 +4939,10 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); int type = items->type; + if (!mlx5_flow_os_item_supported(type)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "item not supported"); switch (type) { case RTE_FLOW_ITEM_TYPE_VOID: break; @@ -5095,6 +5172,17 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, return ret; last_item = MLX5_FLOW_LAYER_GTP; break; + case RTE_FLOW_ITEM_TYPE_ECPRI: + /* Capacity will be checked in the translate stage. */ + ret = mlx5_flow_validate_item_ecpri(items, item_flags, + last_item, + ether_type, + &nic_ecpri_mask, + error); + if (ret < 0) + return ret; + last_item = MLX5_FLOW_LAYER_ECPRI; + break; default: return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, @@ -5104,6 +5192,12 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, } for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { int type = actions->type; + + if (!mlx5_flow_os_action_supported(type)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "action not supported"); if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, @@ -5215,6 +5309,15 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, action_flags |= MLX5_FLOW_ACTION_RSS; ++actions_n; break; + case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS: + ret = + mlx5_flow_validate_action_default_miss(action_flags, + attr, error); + if (ret < 0) + return ret; + action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS; + ++actions_n; + break; case RTE_FLOW_ACTION_TYPE_COUNT: ret = flow_dv_validate_action_count(dev, error); if (ret < 0) @@ -5560,21 +5663,38 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, actions, "no fate action is found"); } - /* Continue validation for Xcap actions.*/ - if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) && (queue_index == 0xFFFF || - mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) { + /* Continue validation for Xcap and VLAN actions.*/ + if ((action_flags & (MLX5_FLOW_XCAP_ACTIONS | + MLX5_FLOW_VLAN_ACTIONS)) && + (queue_index == 0xFFFF || + mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) { if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) == MLX5_FLOW_XCAP_ACTIONS) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "encap and decap " "combination aren't supported"); - if (!attr->transfer && attr->ingress && (action_flags & - MLX5_FLOW_ACTION_ENCAP)) - return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ACTION, - NULL, "encap is not supported" - " for ingress traffic"); + if (!attr->transfer && attr->ingress) { + if (action_flags & MLX5_FLOW_ACTION_ENCAP) + return rte_flow_error_set + (error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, "encap is not supported" + " for ingress traffic"); + else if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) + return rte_flow_error_set + (error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, "push VLAN action not " + "supported for ingress"); + else if ((action_flags & MLX5_FLOW_VLAN_ACTIONS) == + MLX5_FLOW_VLAN_ACTIONS) + return rte_flow_error_set + (error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, "no support for " + "multiple VLAN actions"); + } } /* Hairpin flow will add one more TAG action. */ if (hairpin > 0) @@ -5644,7 +5764,15 @@ flow_dv_prepare(struct rte_eth_dev *dev, dev_flow = &((struct mlx5_flow *)priv->inter_flows)[priv->flow_idx++]; dev_flow->handle = dev_handle; dev_flow->handle_idx = handle_idx; - dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param); + /* + * In some old rdma-core releases, before continuing, a check of the + * length of matching parameter will be done at first. It needs to use + * the length without misc4 param. If the flow has misc4 support, then + * the length needs to be adjusted accordingly. Each param member is + * aligned with a 64B boundary naturally. + */ + dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param) - + MLX5_ST_SZ_BYTES(fte_match_set_misc4); /* * The matching value needs to be cleared to 0 before using. In the * past, it will be automatically cleared when using rte_*alloc @@ -5789,6 +5917,7 @@ flow_dv_translate_item_eth(void *matcher, void *key, * Set match on ethertype only if ETH header is not followed by VLAN. * HW is optimized for IPv4/IPv6. In such cases, avoid setting * ethertype, and use ip_version field instead. + * eCPRI over Ether layer will use type value 0xAEFE. */ if (eth_v->type == RTE_BE16(RTE_ETHER_TYPE_IPV4) && eth_m->type == 0xFFFF) { @@ -7143,6 +7272,90 @@ flow_dv_translate_item_gtp(void *matcher, void *key, rte_be_to_cpu_32(gtp_v->teid & gtp_m->teid)); } +/** + * Add eCPRI item to matcher and to the value. + * + * @param[in] dev + * The devich to configure through. + * @param[in, out] matcher + * Flow matcher. + * @param[in, out] key + * Flow matcher value. + * @param[in] item + * Flow pattern to translate. + * @param[in] samples + * Sample IDs to be used in the matching. + */ +static void +flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher, + void *key, const struct rte_flow_item *item) +{ + struct mlx5_priv *priv = dev->data->dev_private; + const struct rte_flow_item_ecpri *ecpri_m = item->mask; + const struct rte_flow_item_ecpri *ecpri_v = item->spec; + void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher, + misc_parameters_4); + void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4); + uint32_t *samples; + void *dw_m; + void *dw_v; + + if (!ecpri_v) + return; + if (!ecpri_m) + ecpri_m = &rte_flow_item_ecpri_mask; + /* + * Maximal four DW samples are supported in a single matching now. + * Two are used now for a eCPRI matching: + * 1. Type: one byte, mask should be 0x00ff0000 in network order + * 2. ID of a message: one or two bytes, mask 0xffff0000 or 0xff000000 + * if any. + */ + if (!ecpri_m->hdr.common.u32) + return; + samples = priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0].ids; + /* Need to take the whole DW as the mask to fill the entry. */ + dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m, + prog_sample_field_value_0); + dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v, + prog_sample_field_value_0); + /* Already big endian (network order) in the header. */ + *(uint32_t *)dw_m = ecpri_m->hdr.common.u32; + *(uint32_t *)dw_v = ecpri_v->hdr.common.u32; + /* Sample#0, used for matching type, offset 0. */ + MLX5_SET(fte_match_set_misc4, misc4_m, + prog_sample_field_id_0, samples[0]); + /* It makes no sense to set the sample ID in the mask field. */ + MLX5_SET(fte_match_set_misc4, misc4_v, + prog_sample_field_id_0, samples[0]); + /* + * Checking if message body part needs to be matched. + * Some wildcard rules only matching type field should be supported. + */ + if (ecpri_m->hdr.dummy[0]) { + switch (ecpri_v->hdr.common.type) { + case RTE_ECPRI_MSG_TYPE_IQ_DATA: + case RTE_ECPRI_MSG_TYPE_RTC_CTRL: + case RTE_ECPRI_MSG_TYPE_DLY_MSR: + dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m, + prog_sample_field_value_1); + dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v, + prog_sample_field_value_1); + *(uint32_t *)dw_m = ecpri_m->hdr.dummy[0]; + *(uint32_t *)dw_v = ecpri_v->hdr.dummy[0]; + /* Sample#1, to match message body, offset 4. */ + MLX5_SET(fte_match_set_misc4, misc4_m, + prog_sample_field_id_1, samples[1]); + MLX5_SET(fte_match_set_misc4, misc4_v, + prog_sample_field_id_1, samples[1]); + break; + default: + /* Others, do not match any sample ID. */ + break; + } + } +} + static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 }; #define HEADER_IS_ZERO(match_criteria, headers) \ @@ -7178,6 +7391,9 @@ flow_dv_matcher_enable(uint32_t *match_criteria) match_criteria_enable |= (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) << MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT; + match_criteria_enable |= + (!HEADER_IS_ZERO(match_criteria, misc_parameters_4)) << + MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT; return match_criteria_enable; } @@ -7247,8 +7463,8 @@ flow_dv_tbl_resource_get(struct rte_eth_dev *dev, domain = sh->tx_domain; else domain = sh->rx_domain; - tbl->obj = mlx5_glue->dr_create_flow_tbl(domain, table_id); - if (!tbl->obj) { + ret = mlx5_flow_os_create_flow_tbl(domain, table_id, &tbl->obj); + if (ret) { rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot create flow table object"); @@ -7268,7 +7484,7 @@ flow_dv_tbl_resource_get(struct rte_eth_dev *dev, rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot insert flow table data entry"); - mlx5_glue->dr_destroy_flow_tbl(tbl->obj); + mlx5_flow_os_destroy_flow_tbl(tbl->obj); mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx); } rte_atomic32_inc(&tbl->refcnt); @@ -7300,7 +7516,7 @@ flow_dv_tbl_resource_release(struct rte_eth_dev *dev, if (rte_atomic32_dec_and_test(&tbl->refcnt)) { struct mlx5_hlist_entry *pos = &tbl_data->entry; - mlx5_glue->dr_destroy_flow_tbl(tbl->obj); + mlx5_flow_os_destroy_flow_tbl(tbl->obj); tbl->obj = NULL; /* remove the entry from the hash list and free memory. */ mlx5_hlist_remove(sh->flow_tbls, pos); @@ -7344,6 +7560,7 @@ flow_dv_matcher_register(struct rte_eth_dev *dev, }; struct mlx5_flow_tbl_resource *tbl; struct mlx5_flow_tbl_data_entry *tbl_data; + int ret; tbl = flow_dv_tbl_resource_get(dev, key->table_id, key->direction, key->domain, error); @@ -7373,7 +7590,8 @@ flow_dv_matcher_register(struct rte_eth_dev *dev, } } /* Register new matcher. */ - cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0); + cache_matcher = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*cache_matcher), 0, + SOCKET_ID_ANY); if (!cache_matcher) { flow_dv_tbl_resource_release(dev, tbl); return rte_flow_error_set(error, ENOMEM, @@ -7386,10 +7604,10 @@ flow_dv_matcher_register(struct rte_eth_dev *dev, dv_attr.priority = matcher->priority; if (key->direction) dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS; - cache_matcher->matcher_object = - mlx5_glue->dv_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj); - if (!cache_matcher->matcher_object) { - rte_free(cache_matcher); + ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj, + &cache_matcher->matcher_object); + if (ret) { + mlx5_free(cache_matcher); #ifdef HAVE_MLX5DV_DR flow_dv_tbl_resource_release(dev, tbl); #endif @@ -7438,6 +7656,7 @@ flow_dv_tag_resource_register struct mlx5_dev_ctx_shared *sh = priv->sh; struct mlx5_flow_dv_tag_resource *cache_resource; struct mlx5_hlist_entry *entry; + int ret; /* Lookup a matching resource from cache. */ entry = mlx5_hlist_lookup(sh->tag_table, (uint64_t)tag_be24); @@ -7460,9 +7679,10 @@ flow_dv_tag_resource_register RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot allocate resource memory"); cache_resource->entry.key = (uint64_t)tag_be24; - cache_resource->action = mlx5_glue->dv_create_flow_action_tag(tag_be24); - if (!cache_resource->action) { - rte_free(cache_resource); + ret = mlx5_flow_os_create_flow_action_tag(tag_be24, + &cache_resource->action); + if (ret) { + mlx5_free(cache_resource); return rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot create action"); @@ -7470,8 +7690,8 @@ flow_dv_tag_resource_register rte_atomic32_init(&cache_resource->refcnt); rte_atomic32_inc(&cache_resource->refcnt); if (mlx5_hlist_insert(sh->tag_table, &cache_resource->entry)) { - mlx5_glue->destroy_flow_action(cache_resource->action); - rte_free(cache_resource); + mlx5_flow_os_destroy_flow_action(cache_resource->action); + mlx5_free(cache_resource); return rte_flow_error_set(error, EEXIST, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot insert tag"); @@ -7509,7 +7729,7 @@ flow_dv_tag_release(struct rte_eth_dev *dev, dev->data->port_id, (void *)tag, rte_atomic32_read(&tag->refcnt)); if (rte_atomic32_dec_and_test(&tag->refcnt)) { - claim_zero(mlx5_glue->destroy_flow_action(tag->action)); + claim_zero(mlx5_flow_os_destroy_flow_action(tag->action)); mlx5_hlist_remove(sh->tag_table, &tag->entry); DRV_LOG(DEBUG, "port %u tag %p: removed", dev->data->port_id, (void *)tag); @@ -7557,7 +7777,7 @@ flow_dv_translate_action_port_id(struct rte_eth_dev *dev, * This parameter is transferred to * mlx5dv_dr_action_create_dest_ib_port(). */ - *dst_port_id = priv->ibv_port; + *dst_port_id = priv->dev_port; #else /* * Legacy mode, no LAG configurations is supported. @@ -7773,7 +7993,8 @@ __flow_dv_translate(struct rte_eth_dev *dev, uint64_t priority = attr->priority; struct mlx5_flow_dv_matcher matcher = { .mask = { - .size = sizeof(matcher.mask.buf), + .size = sizeof(matcher.mask.buf) - + MLX5_ST_SZ_BYTES(fte_match_set_misc4), }, }; int actions_n = 0; @@ -7825,6 +8046,11 @@ __flow_dv_translate(struct rte_eth_dev *dev, const struct rte_flow_action *found_action = NULL; struct mlx5_flow_meter *fm = NULL; + if (!mlx5_flow_os_action_supported(action_type)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "action not supported"); switch (action_type) { case RTE_FLOW_ACTION_TYPE_VOID: break; @@ -8011,7 +8237,7 @@ __flow_dv_translate(struct rte_eth_dev *dev, error)) return -rte_errno; dev_flow->dv.actions[actions_n++] = - dev_flow->dv.encap_decap->verbs_action; + dev_flow->dv.encap_decap->action; action_flags |= MLX5_FLOW_ACTION_ENCAP; break; case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP: @@ -8021,7 +8247,7 @@ __flow_dv_translate(struct rte_eth_dev *dev, error)) return -rte_errno; dev_flow->dv.actions[actions_n++] = - dev_flow->dv.encap_decap->verbs_action; + dev_flow->dv.encap_decap->action; action_flags |= MLX5_FLOW_ACTION_DECAP; break; case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: @@ -8031,7 +8257,7 @@ __flow_dv_translate(struct rte_eth_dev *dev, (dev, actions, dev_flow, attr, error)) return -rte_errno; dev_flow->dv.actions[actions_n++] = - dev_flow->dv.encap_decap->verbs_action; + dev_flow->dv.encap_decap->action; } else { /* Handle encap without preceding decap. */ if (flow_dv_create_action_l2_encap @@ -8039,7 +8265,7 @@ __flow_dv_translate(struct rte_eth_dev *dev, error)) return -rte_errno; dev_flow->dv.actions[actions_n++] = - dev_flow->dv.encap_decap->verbs_action; + dev_flow->dv.encap_decap->action; } action_flags |= MLX5_FLOW_ACTION_ENCAP; break; @@ -8051,7 +8277,7 @@ __flow_dv_translate(struct rte_eth_dev *dev, (dev, dev_flow, attr->transfer, error)) return -rte_errno; dev_flow->dv.actions[actions_n++] = - dev_flow->dv.encap_decap->verbs_action; + dev_flow->dv.encap_decap->action; } /* If decap is followed by encap, handle it at encap. */ action_flags |= MLX5_FLOW_ACTION_DECAP; @@ -8178,6 +8404,11 @@ __flow_dv_translate(struct rte_eth_dev *dev, return -rte_errno; action_flags |= MLX5_FLOW_ACTION_SET_TAG; break; + case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS: + action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS; + dev_flow->handle->fate_action = + MLX5_FLOW_FATE_DEFAULT_MISS; + break; case RTE_FLOW_ACTION_TYPE_METER: mtr = actions->conf; if (!flow->meter) { @@ -8228,7 +8459,7 @@ __flow_dv_translate(struct rte_eth_dev *dev, (dev, mhdr_res, dev_flow, error)) return -rte_errno; dev_flow->dv.actions[modify_action_position] = - handle->dvh.modify_hdr->verbs_action; + handle->dvh.modify_hdr->action; } if (action_flags & MLX5_FLOW_ACTION_COUNT) { flow->counter = @@ -8260,6 +8491,10 @@ __flow_dv_translate(struct rte_eth_dev *dev, int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); int item_type = items->type; + if (!mlx5_flow_os_item_supported(item_type)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "item not supported"); switch (item_type) { case RTE_FLOW_ITEM_TYPE_PORT_ID: flow_dv_translate_item_port_id(dev, match_mask, @@ -8270,7 +8505,11 @@ __flow_dv_translate(struct rte_eth_dev *dev, flow_dv_translate_item_eth(match_mask, match_value, items, tunnel, dev_flow->dv.group); - matcher.priority = MLX5_PRIORITY_MAP_L2; + matcher.priority = action_flags & + MLX5_FLOW_ACTION_DEFAULT_MISS && + !dev_flow->external ? + MLX5_PRIORITY_MAP_L3 : + MLX5_PRIORITY_MAP_L2; last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 : MLX5_FLOW_LAYER_OUTER_L2; break; @@ -8348,8 +8587,7 @@ __flow_dv_translate(struct rte_eth_dev *dev, case RTE_FLOW_ITEM_TYPE_GRE: flow_dv_translate_item_gre(match_mask, match_value, items, tunnel); - matcher.priority = rss_desc->level >= 2 ? - MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4; + matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc); last_item = MLX5_FLOW_LAYER_GRE; break; case RTE_FLOW_ITEM_TYPE_GRE_KEY: @@ -8360,37 +8598,32 @@ __flow_dv_translate(struct rte_eth_dev *dev, case RTE_FLOW_ITEM_TYPE_NVGRE: flow_dv_translate_item_nvgre(match_mask, match_value, items, tunnel); - matcher.priority = rss_desc->level >= 2 ? - MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4; + matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc); last_item = MLX5_FLOW_LAYER_GRE; break; case RTE_FLOW_ITEM_TYPE_VXLAN: flow_dv_translate_item_vxlan(match_mask, match_value, items, tunnel); - matcher.priority = rss_desc->level >= 2 ? - MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4; + matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc); last_item = MLX5_FLOW_LAYER_VXLAN; break; case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: flow_dv_translate_item_vxlan_gpe(match_mask, match_value, items, tunnel); - matcher.priority = rss_desc->level >= 2 ? - MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4; + matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc); last_item = MLX5_FLOW_LAYER_VXLAN_GPE; break; case RTE_FLOW_ITEM_TYPE_GENEVE: flow_dv_translate_item_geneve(match_mask, match_value, items, tunnel); - matcher.priority = rss_desc->level >= 2 ? - MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4; + matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc); last_item = MLX5_FLOW_LAYER_GENEVE; break; case RTE_FLOW_ITEM_TYPE_MPLS: flow_dv_translate_item_mpls(match_mask, match_value, items, last_item, tunnel); - matcher.priority = rss_desc->level >= 2 ? - MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4; + matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc); last_item = MLX5_FLOW_LAYER_MPLS; break; case RTE_FLOW_ITEM_TYPE_MARK: @@ -8432,10 +8665,29 @@ __flow_dv_translate(struct rte_eth_dev *dev, case RTE_FLOW_ITEM_TYPE_GTP: flow_dv_translate_item_gtp(match_mask, match_value, items, tunnel); - matcher.priority = rss_desc->level >= 2 ? - MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4; + matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc); last_item = MLX5_FLOW_LAYER_GTP; break; + case RTE_FLOW_ITEM_TYPE_ECPRI: + if (!mlx5_flex_parser_ecpri_exist(dev)) { + /* Create it only the first time to be used. */ + ret = mlx5_flex_parser_ecpri_alloc(dev); + if (ret) + return rte_flow_error_set + (error, -ret, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, + "cannot create eCPRI parser"); + } + /* Adjust the length matcher and device flow value. */ + matcher.mask.size = MLX5_ST_SZ_BYTES(fte_match_param); + dev_flow->dv.value.size = + MLX5_ST_SZ_BYTES(fte_match_param); + flow_dv_translate_item_ecpri(dev, match_mask, + match_value, items); + /* No other protocol should follow eCPRI layer. */ + last_item = MLX5_FLOW_LAYER_ECPRI; + break; default: break; } @@ -8570,12 +8822,22 @@ __flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, } dh->rix_hrxq = hrxq_idx; dv->actions[n++] = hrxq->action; + } else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) { + if (flow_dv_default_miss_resource_register + (dev, error)) { + rte_flow_error_set + (error, rte_errno, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "cannot create default miss resource"); + goto error_default_miss; + } + dh->rix_default_fate = MLX5_FLOW_FATE_DEFAULT_MISS; + dv->actions[n++] = priv->sh->default_miss.action; } - dh->ib_flow = - mlx5_glue->dv_create_flow(dv_h->matcher->matcher_object, - (void *)&dv->value, n, - dv->actions); - if (!dh->ib_flow) { + err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object, + (void *)&dv->value, n, + dv->actions, &dh->drv_flow); + if (err) { rte_flow_error_set(error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, @@ -8595,6 +8857,9 @@ __flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, } return 0; error: + if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) + flow_dv_default_miss_resource_release(dev); +error_default_miss: err = rte_errno; /* Save rte_errno before cleanup. */ SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles, handle_idx, dh, next) { @@ -8637,12 +8902,12 @@ flow_dv_matcher_release(struct rte_eth_dev *dev, dev->data->port_id, (void *)matcher, rte_atomic32_read(&matcher->refcnt)); if (rte_atomic32_dec_and_test(&matcher->refcnt)) { - claim_zero(mlx5_glue->dv_destroy_flow_matcher + claim_zero(mlx5_flow_os_destroy_flow_matcher (matcher->matcher_object)); LIST_REMOVE(matcher, next); /* table ref-- in release interface. */ flow_dv_tbl_resource_release(dev, matcher->tbl); - rte_free(matcher); + mlx5_free(matcher); DRV_LOG(DEBUG, "port %u matcher %p: removed", dev->data->port_id, (void *)matcher); return 0; @@ -8673,13 +8938,13 @@ flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev, idx); if (!cache_resource) return 0; - MLX5_ASSERT(cache_resource->verbs_action); + MLX5_ASSERT(cache_resource->action); DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--", (void *)cache_resource, rte_atomic32_read(&cache_resource->refcnt)); if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) { - claim_zero(mlx5_glue->destroy_flow_action - (cache_resource->verbs_action)); + claim_zero(mlx5_flow_os_destroy_flow_action + (cache_resource->action)); ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP], &priv->sh->encaps_decaps, idx, cache_resource, next); @@ -8720,8 +8985,8 @@ flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev, (void *)cache_resource, rte_atomic32_read(&cache_resource->refcnt)); if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) { - claim_zero(mlx5_glue->destroy_flow_action - (cache_resource->action)); + claim_zero(mlx5_flow_os_destroy_flow_action + (cache_resource->action)); /* jump action memory free is inside the table release. */ flow_dv_tbl_resource_release(dev, &tbl_data->tbl); DRV_LOG(DEBUG, "jump table resource %p: removed", @@ -8731,6 +8996,36 @@ flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev, return 1; } +/** + * Release a default miss resource. + * + * @param dev + * Pointer to Ethernet device. + * @return + * 1 while a reference on it exists, 0 when freed. + */ +static int +flow_dv_default_miss_resource_release(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_dev_ctx_shared *sh = priv->sh; + struct mlx5_flow_default_miss_resource *cache_resource = + &sh->default_miss; + + MLX5_ASSERT(cache_resource->action); + DRV_LOG(DEBUG, "default miss resource %p: refcnt %d--", + (void *)cache_resource->action, + rte_atomic32_read(&cache_resource->refcnt)); + if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) { + claim_zero(mlx5_glue->destroy_flow_action + (cache_resource->action)); + DRV_LOG(DEBUG, "default miss resource %p: removed", + (void *)cache_resource->action); + return 0; + } + return 1; +} + /** * Release a modify-header resource. * @@ -8746,15 +9041,15 @@ flow_dv_modify_hdr_resource_release(struct mlx5_flow_handle *handle) struct mlx5_flow_dv_modify_hdr_resource *cache_resource = handle->dvh.modify_hdr; - MLX5_ASSERT(cache_resource->verbs_action); + MLX5_ASSERT(cache_resource->action); DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--", (void *)cache_resource, rte_atomic32_read(&cache_resource->refcnt)); if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) { - claim_zero(mlx5_glue->destroy_flow_action - (cache_resource->verbs_action)); + claim_zero(mlx5_flow_os_destroy_flow_action + (cache_resource->action)); LIST_REMOVE(cache_resource, next); - rte_free(cache_resource); + mlx5_free(cache_resource); DRV_LOG(DEBUG, "modify-header resource %p: removed", (void *)cache_resource); return 0; @@ -8790,8 +9085,8 @@ flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev, (void *)cache_resource, rte_atomic32_read(&cache_resource->refcnt)); if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) { - claim_zero(mlx5_glue->destroy_flow_action - (cache_resource->action)); + claim_zero(mlx5_flow_os_destroy_flow_action + (cache_resource->action)); ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_PORT_ID], &priv->sh->port_id_action_list, idx, cache_resource, next); @@ -8831,8 +9126,8 @@ flow_dv_push_vlan_action_resource_release(struct rte_eth_dev *dev, (void *)cache_resource, rte_atomic32_read(&cache_resource->refcnt)); if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) { - claim_zero(mlx5_glue->destroy_flow_action - (cache_resource->action)); + claim_zero(mlx5_flow_os_destroy_flow_action + (cache_resource->action)); ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], &priv->sh->push_vlan_action_list, idx, cache_resource, next); @@ -8858,16 +9153,26 @@ flow_dv_fate_resource_release(struct rte_eth_dev *dev, { if (!handle->rix_fate) return; - if (handle->fate_action == MLX5_FLOW_FATE_DROP) + switch (handle->fate_action) { + case MLX5_FLOW_FATE_DROP: mlx5_hrxq_drop_release(dev); - else if (handle->fate_action == MLX5_FLOW_FATE_QUEUE) + break; + case MLX5_FLOW_FATE_QUEUE: mlx5_hrxq_release(dev, handle->rix_hrxq); - else if (handle->fate_action == MLX5_FLOW_FATE_JUMP) + break; + case MLX5_FLOW_FATE_JUMP: flow_dv_jump_tbl_resource_release(dev, handle); - else if (handle->fate_action == MLX5_FLOW_FATE_PORT_ID) + break; + case MLX5_FLOW_FATE_PORT_ID: flow_dv_port_id_action_resource_release(dev, handle); - else + break; + case MLX5_FLOW_FATE_DEFAULT_MISS: + flow_dv_default_miss_resource_release(dev); + break; + default: DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action); + break; + } handle->rix_fate = 0; } @@ -8895,12 +9200,13 @@ __flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow) handle_idx); if (!dh) return; - if (dh->ib_flow) { - claim_zero(mlx5_glue->dv_destroy_flow(dh->ib_flow)); - dh->ib_flow = NULL; + if (dh->drv_flow) { + claim_zero(mlx5_flow_os_destroy_flow(dh->drv_flow)); + dh->drv_flow = NULL; } if (dh->fate_action == MLX5_FLOW_FATE_DROP || - dh->fate_action == MLX5_FLOW_FATE_QUEUE) + dh->fate_action == MLX5_FLOW_FATE_QUEUE || + dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) flow_dv_fate_resource_release(dev, dh); if (dh->vf_vlan.tag && dh->vf_vlan.created) mlx5_vlan_vmwa_release(dev, &dh->vf_vlan); @@ -9076,47 +9382,47 @@ flow_dv_destroy_mtr_tbl(struct rte_eth_dev *dev, if (!mtd || !priv->config.dv_flow_en) return 0; if (mtd->ingress.policer_rules[RTE_MTR_DROPPED]) - claim_zero(mlx5_glue->dv_destroy_flow - (mtd->ingress.policer_rules[RTE_MTR_DROPPED])); + claim_zero(mlx5_flow_os_destroy_flow + (mtd->ingress.policer_rules[RTE_MTR_DROPPED])); if (mtd->egress.policer_rules[RTE_MTR_DROPPED]) - claim_zero(mlx5_glue->dv_destroy_flow - (mtd->egress.policer_rules[RTE_MTR_DROPPED])); + claim_zero(mlx5_flow_os_destroy_flow + (mtd->egress.policer_rules[RTE_MTR_DROPPED])); if (mtd->transfer.policer_rules[RTE_MTR_DROPPED]) - claim_zero(mlx5_glue->dv_destroy_flow - (mtd->transfer.policer_rules[RTE_MTR_DROPPED])); + claim_zero(mlx5_flow_os_destroy_flow + (mtd->transfer.policer_rules[RTE_MTR_DROPPED])); if (mtd->egress.color_matcher) - claim_zero(mlx5_glue->dv_destroy_flow_matcher - (mtd->egress.color_matcher)); + claim_zero(mlx5_flow_os_destroy_flow_matcher + (mtd->egress.color_matcher)); if (mtd->egress.any_matcher) - claim_zero(mlx5_glue->dv_destroy_flow_matcher - (mtd->egress.any_matcher)); + claim_zero(mlx5_flow_os_destroy_flow_matcher + (mtd->egress.any_matcher)); if (mtd->egress.tbl) flow_dv_tbl_resource_release(dev, mtd->egress.tbl); if (mtd->egress.sfx_tbl) flow_dv_tbl_resource_release(dev, mtd->egress.sfx_tbl); if (mtd->ingress.color_matcher) - claim_zero(mlx5_glue->dv_destroy_flow_matcher - (mtd->ingress.color_matcher)); + claim_zero(mlx5_flow_os_destroy_flow_matcher + (mtd->ingress.color_matcher)); if (mtd->ingress.any_matcher) - claim_zero(mlx5_glue->dv_destroy_flow_matcher - (mtd->ingress.any_matcher)); + claim_zero(mlx5_flow_os_destroy_flow_matcher + (mtd->ingress.any_matcher)); if (mtd->ingress.tbl) flow_dv_tbl_resource_release(dev, mtd->ingress.tbl); if (mtd->ingress.sfx_tbl) flow_dv_tbl_resource_release(dev, mtd->ingress.sfx_tbl); if (mtd->transfer.color_matcher) - claim_zero(mlx5_glue->dv_destroy_flow_matcher - (mtd->transfer.color_matcher)); + claim_zero(mlx5_flow_os_destroy_flow_matcher + (mtd->transfer.color_matcher)); if (mtd->transfer.any_matcher) - claim_zero(mlx5_glue->dv_destroy_flow_matcher - (mtd->transfer.any_matcher)); + claim_zero(mlx5_flow_os_destroy_flow_matcher + (mtd->transfer.any_matcher)); if (mtd->transfer.tbl) flow_dv_tbl_resource_release(dev, mtd->transfer.tbl); if (mtd->transfer.sfx_tbl) flow_dv_tbl_resource_release(dev, mtd->transfer.sfx_tbl); if (mtd->drop_actn) - claim_zero(mlx5_glue->destroy_flow_action(mtd->drop_actn)); - rte_free(mtd); + claim_zero(mlx5_flow_os_destroy_flow_action(mtd->drop_actn)); + mlx5_free(mtd); return 0; } @@ -9164,6 +9470,7 @@ flow_dv_prepare_mtr_tables(struct rte_eth_dev *dev, struct mlx5_meter_domain_info *dtb; struct rte_flow_error error; int i = 0; + int ret; if (transfer) dtb = &mtb->transfer; @@ -9189,10 +9496,9 @@ flow_dv_prepare_mtr_tables(struct rte_eth_dev *dev, /* Create matchers, Any and Color. */ dv_attr.priority = 3; dv_attr.match_criteria_enable = 0; - dtb->any_matcher = mlx5_glue->dv_create_flow_matcher(sh->ctx, - &dv_attr, - dtb->tbl->obj); - if (!dtb->any_matcher) { + ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, dtb->tbl->obj, + &dtb->any_matcher); + if (ret) { DRV_LOG(ERR, "Failed to create meter" " policer default matcher."); goto error_exit; @@ -9202,10 +9508,9 @@ flow_dv_prepare_mtr_tables(struct rte_eth_dev *dev, 1 << MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT; flow_dv_match_meta_reg(mask.buf, value.buf, color_reg_c_idx, rte_col_2_mlx5_col(RTE_COLORS), UINT8_MAX); - dtb->color_matcher = mlx5_glue->dv_create_flow_matcher(sh->ctx, - &dv_attr, - dtb->tbl->obj); - if (!dtb->color_matcher) { + ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, dtb->tbl->obj, + &dtb->color_matcher); + if (ret) { DRV_LOG(ERR, "Failed to create meter policer color matcher."); goto error_exit; } @@ -9213,10 +9518,10 @@ flow_dv_prepare_mtr_tables(struct rte_eth_dev *dev, actions[i++] = mtb->count_actns[RTE_MTR_DROPPED]; actions[i++] = mtb->drop_actn; /* Default rule: lowest priority, match any, actions: drop. */ - dtb->policer_rules[RTE_MTR_DROPPED] = - mlx5_glue->dv_create_flow(dtb->any_matcher, - (void *)&value, i, actions); - if (!dtb->policer_rules[RTE_MTR_DROPPED]) { + ret = mlx5_flow_os_create_flow(dtb->any_matcher, (void *)&value, i, + actions, + &dtb->policer_rules[RTE_MTR_DROPPED]); + if (ret) { DRV_LOG(ERR, "Failed to create meter policer drop rule."); goto error_exit; } @@ -9250,7 +9555,7 @@ flow_dv_create_mtr_tbl(struct rte_eth_dev *dev, rte_errno = ENOTSUP; return NULL; } - mtb = rte_calloc(__func__, 1, sizeof(*mtb), 0); + mtb = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*mtb), 0, SOCKET_ID_ANY); if (!mtb) { DRV_LOG(ERR, "Failed to allocate memory for meter."); return NULL; @@ -9265,8 +9570,8 @@ flow_dv_create_mtr_tbl(struct rte_eth_dev *dev, mtb->count_actns[i] = cnt->action; } /* Create drop action. */ - mtb->drop_actn = mlx5_glue->dr_create_flow_action_drop(); - if (!mtb->drop_actn) { + ret = mlx5_flow_os_create_flow_action_drop(&mtb->drop_actn); + if (ret) { DRV_LOG(ERR, "Failed to create drop action."); goto error_exit; } @@ -9310,13 +9615,13 @@ flow_dv_destroy_domain_policer_rule(struct mlx5_meter_domain_info *dt) for (i = 0; i < RTE_MTR_DROPPED; i++) { if (dt->policer_rules[i]) { - claim_zero(mlx5_glue->dv_destroy_flow - (dt->policer_rules[i])); + claim_zero(mlx5_flow_os_destroy_flow + (dt->policer_rules[i])); dt->policer_rules[i] = NULL; } } if (dt->jump_actn) { - claim_zero(mlx5_glue->destroy_flow_action(dt->jump_actn)); + claim_zero(mlx5_flow_os_destroy_flow_action(dt->jump_actn)); dt->jump_actn = NULL; } } @@ -9379,13 +9684,13 @@ flow_dv_create_policer_forward_rule(struct mlx5_flow_meter *fm, struct mlx5_meter_domains_infos *mtb = fm->mfts; void *actions[METER_ACTIONS]; int i; + int ret = 0; /* Create jump action. */ if (!dtb->jump_actn) - dtb->jump_actn = - mlx5_glue->dr_create_flow_action_dest_flow_tbl - (dtb->sfx_tbl->obj); - if (!dtb->jump_actn) { + ret = mlx5_flow_os_create_flow_action_dest_flow_tbl + (dtb->sfx_tbl->obj, &dtb->jump_actn); + if (ret) { DRV_LOG(ERR, "Failed to create policer jump action."); goto error; } @@ -9400,11 +9705,10 @@ flow_dv_create_policer_forward_rule(struct mlx5_flow_meter *fm, actions[j++] = mtb->drop_actn; else actions[j++] = dtb->jump_actn; - dtb->policer_rules[i] = - mlx5_glue->dv_create_flow(dtb->color_matcher, - (void *)&value, - j, actions); - if (!dtb->policer_rules[i]) { + ret = mlx5_flow_os_create_flow(dtb->color_matcher, + (void *)&value, j, actions, + &dtb->policer_rules[i]); + if (ret) { DRV_LOG(ERR, "Failed to create policer rule."); goto error; } @@ -9664,3 +9968,4 @@ const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = { }; #endif /* HAVE_IBV_FLOW_DV_SUPPORT */ +