X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_rxq.c;h=abd8ce798986c386801a0f69cdaf8b6bbb2f0316;hb=f17e4b4ffef9ecf2547ad8ee628e94db8f561b2f;hp=0693c924d07d2c4ea50fddd76a4a269c7f3d65ce;hpb=41217cec2faa5ab5cf0a0a714135ed49b80911b5;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index 0693c924d0..abd8ce7989 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -12,7 +12,7 @@ #include #include -#include +#include #include #include #include @@ -24,7 +24,8 @@ #include "mlx5_defs.h" #include "mlx5.h" -#include "mlx5_rxtx.h" +#include "mlx5_tx.h" +#include "mlx5_rx.h" #include "mlx5_utils.h" #include "mlx5_autoconf.h" @@ -48,77 +49,6 @@ static_assert(MLX5_RSS_HASH_KEY_LEN == (unsigned int)sizeof(rss_hash_default_key), "wrong RSS default key size."); -/** - * Check whether Multi-Packet RQ can be enabled for the device. - * - * @param dev - * Pointer to Ethernet device. - * - * @return - * 1 if supported, negative errno value if not. - */ -inline int -mlx5_check_mprq_support(struct rte_eth_dev *dev) -{ - struct mlx5_priv *priv = dev->data->dev_private; - - if (priv->config.mprq.enabled && - priv->rxqs_n >= priv->config.mprq.min_rxqs_num) - return 1; - return -ENOTSUP; -} - -/** - * Check whether Multi-Packet RQ is enabled for the Rx queue. - * - * @param rxq - * Pointer to receive queue structure. - * - * @return - * 0 if disabled, otherwise enabled. - */ -inline int -mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq) -{ - return rxq->strd_num_n > 0; -} - -/** - * Check whether Multi-Packet RQ is enabled for the device. - * - * @param dev - * Pointer to Ethernet device. - * - * @return - * 0 if disabled, otherwise enabled. - */ -inline int -mlx5_mprq_enabled(struct rte_eth_dev *dev) -{ - struct mlx5_priv *priv = dev->data->dev_private; - uint32_t i; - uint16_t n = 0; - uint16_t n_ibv = 0; - - if (mlx5_check_mprq_support(dev) < 0) - return 0; - /* All the configured queues should be enabled. */ - for (i = 0; i < priv->rxqs_n; ++i) { - struct mlx5_rxq_data *rxq = (*priv->rxqs)[i]; - struct mlx5_rxq_ctrl *rxq_ctrl = container_of - (rxq, struct mlx5_rxq_ctrl, rxq); - - if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD) - continue; - n_ibv++; - if (mlx5_rxq_mprq_enabled(rxq)) - ++n; - } - /* Multi-Packet RQ can't be partially configured. */ - MLX5_ASSERT(n == 0 || n == n_ibv); - return n == n_ibv; -} - /** * Calculate the number of CQEs in CQ for the Rx queue. * @@ -346,7 +276,9 @@ rxq_free_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl) (1 << rxq->elts_n) * (1 << rxq->strd_num_n) : (1 << rxq->elts_n); const uint16_t q_mask = q_n - 1; - uint16_t used = q_n - (rxq->rq_ci - rxq->rq_pi); + uint16_t elts_ci = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ? + rxq->elts_ci : rxq->rq_ci; + uint16_t used = q_n - (elts_ci - rxq->rq_pi); uint16_t i; DRV_LOG(DEBUG, "port %u Rx queue %u freeing %d WRs", @@ -359,8 +291,8 @@ rxq_free_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl) */ if (mlx5_rxq_check_vec_support(rxq) > 0) { for (i = 0; i < used; ++i) - (*rxq->elts)[(rxq->rq_ci + i) & q_mask] = NULL; - rxq->rq_pi = rxq->rq_ci; + (*rxq->elts)[(elts_ci + i) & q_mask] = NULL; + rxq->rq_pi = elts_ci; } for (i = 0; i != q_n; ++i) { if ((*rxq->elts)[i] != NULL) @@ -402,14 +334,14 @@ mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev) struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_dev_config *config = &priv->config; uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER | - RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT | DEV_RX_OFFLOAD_TIMESTAMP | DEV_RX_OFFLOAD_JUMBO_FRAME | DEV_RX_OFFLOAD_RSS_HASH); + if (!config->mprq.enabled) + offloads |= RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT; if (config->hw_fcs_strip) offloads |= DEV_RX_OFFLOAD_KEEP_CRC; - if (config->hw_csum) offloads |= (DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM | @@ -1302,7 +1234,7 @@ mlx5_mprq_alloc_mp(struct rte_eth_dev *dev) snprintf(name, sizeof(name), "port-%u-mprq", dev->data->port_id); mp = rte_mempool_create(name, obj_num, obj_size, MLX5_MPRQ_MP_CACHE_SZ, 0, NULL, NULL, mlx5_mprq_buf_init, - (void *)(uintptr_t)(1 << strd_num_n), + (void *)((uintptr_t)1 << strd_num_n), dev->device->numa_node, 0); if (mp == NULL) { DRV_LOG(ERR, @@ -1683,6 +1615,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next); return tmpl; error: + mlx5_mr_btree_free(&tmpl->rxq.mr_ctrl.cache_bh); mlx5_free(tmpl); return NULL; } @@ -1773,7 +1706,7 @@ mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx) struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_rxq_ctrl *rxq_ctrl; - if (!(*priv->rxqs)[idx]) + if (priv->rxqs == NULL || (*priv->rxqs)[idx] == NULL) return 0; rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq); if (__atomic_sub_fetch(&rxq_ctrl->refcnt, 1, __ATOMIC_RELAXED) > 1) @@ -1918,20 +1851,18 @@ mlx5_ind_table_obj_get(struct rte_eth_dev *dev, const uint16_t *queues, struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_ind_table_obj *ind_tbl; + rte_rwlock_read_lock(&priv->ind_tbls_lock); LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) { if ((ind_tbl->queues_n == queues_n) && (memcmp(ind_tbl->queues, queues, ind_tbl->queues_n * sizeof(ind_tbl->queues[0])) - == 0)) + == 0)) { + __atomic_fetch_add(&ind_tbl->refcnt, 1, + __ATOMIC_RELAXED); break; + } } - if (ind_tbl) { - unsigned int i; - - __atomic_fetch_add(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED); - for (i = 0; i != ind_tbl->queues_n; ++i) - mlx5_rxq_get(dev, ind_tbl->queues[i]); - } + rte_rwlock_read_unlock(&priv->ind_tbls_lock); return ind_tbl; } @@ -1954,19 +1885,20 @@ mlx5_ind_table_obj_release(struct rte_eth_dev *dev, bool standalone) { struct mlx5_priv *priv = dev->data->dev_private; - unsigned int i; - - if (__atomic_sub_fetch(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED) == 0) - priv->obj_ops.ind_table_destroy(ind_tbl); + unsigned int i, ret; + + rte_rwlock_write_lock(&priv->ind_tbls_lock); + ret = __atomic_sub_fetch(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED); + if (!ret && !standalone) + LIST_REMOVE(ind_tbl, next); + rte_rwlock_write_unlock(&priv->ind_tbls_lock); + if (ret) + return 1; + priv->obj_ops.ind_table_destroy(ind_tbl); for (i = 0; i != ind_tbl->queues_n; ++i) claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i])); - if (__atomic_load_n(&ind_tbl->refcnt, __ATOMIC_RELAXED) == 0) { - if (!standalone) - LIST_REMOVE(ind_tbl, next); - mlx5_free(ind_tbl); - return 0; - } - return 1; + mlx5_free(ind_tbl); + return 0; } /** @@ -1985,12 +1917,59 @@ mlx5_ind_table_obj_verify(struct rte_eth_dev *dev) struct mlx5_ind_table_obj *ind_tbl; int ret = 0; + rte_rwlock_read_lock(&priv->ind_tbls_lock); LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) { DRV_LOG(DEBUG, "port %u indirection table obj %p still referenced", dev->data->port_id, (void *)ind_tbl); ++ret; } + rte_rwlock_read_unlock(&priv->ind_tbls_lock); + return ret; +} + +/** + * Setup an indirection table structure fields. + * + * @param dev + * Pointer to Ethernet device. + * @param ind_table + * Indirection table to modify. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_ind_table_obj_setup(struct rte_eth_dev *dev, + struct mlx5_ind_table_obj *ind_tbl) +{ + struct mlx5_priv *priv = dev->data->dev_private; + uint32_t queues_n = ind_tbl->queues_n; + uint16_t *queues = ind_tbl->queues; + unsigned int i, j; + int ret = 0, err; + const unsigned int n = rte_is_power_of_2(queues_n) ? + log2above(queues_n) : + log2above(priv->config.ind_table_max_size); + + for (i = 0; i != queues_n; ++i) { + if (!mlx5_rxq_get(dev, queues[i])) { + ret = -rte_errno; + goto error; + } + } + ret = priv->obj_ops.ind_table_new(dev, n, ind_tbl); + if (ret) + goto error; + __atomic_fetch_add(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED); + return 0; +error: + err = rte_errno; + for (j = 0; j < i; j++) + mlx5_rxq_release(dev, ind_tbl->queues[j]); + rte_errno = err; + DRV_LOG(DEBUG, "Port %u cannot setup indirection table.", + dev->data->port_id); return ret; } @@ -2015,10 +1994,6 @@ mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues, { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_ind_table_obj *ind_tbl; - const unsigned int n = rte_is_power_of_2(queues_n) ? - log2above(queues_n) : - log2above(priv->config.ind_table_max_size); - unsigned int i, j; int ret; ind_tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*ind_tbl) + @@ -2028,62 +2003,104 @@ mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues, return NULL; } ind_tbl->queues_n = queues_n; - for (i = 0; i != queues_n; ++i) { - struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev, queues[i]); - if (!rxq) - goto error; - ind_tbl->queues[i] = queues[i]; + ind_tbl->queues = (uint16_t *)(ind_tbl + 1); + memcpy(ind_tbl->queues, queues, queues_n * sizeof(*queues)); + ret = mlx5_ind_table_obj_setup(dev, ind_tbl); + if (ret < 0) { + mlx5_free(ind_tbl); + return NULL; } - ret = priv->obj_ops.ind_table_new(dev, n, ind_tbl); - if (ret < 0) - goto error; - __atomic_fetch_add(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED); - if (!standalone) + if (!standalone) { + rte_rwlock_write_lock(&priv->ind_tbls_lock); LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next); + rte_rwlock_write_unlock(&priv->ind_tbls_lock); + } return ind_tbl; -error: - ret = rte_errno; - for (j = 0; j < i; j++) - mlx5_rxq_release(dev, ind_tbl->queues[j]); - rte_errno = ret; - mlx5_free(ind_tbl); - DEBUG("Port %u cannot create indirection table.", dev->data->port_id); - return NULL; } /** - * Match an Rx Hash queue. + * Modify an indirection table. * - * @param list - * Cache list pointer. - * @param entry - * Hash queue entry pointer. - * @param cb_ctx - * Context of the callback function. + * @param dev + * Pointer to Ethernet device. + * @param ind_table + * Indirection table to modify. + * @param queues + * Queues replacement for the indirection table. + * @param queues_n + * Number of queues in the array. + * @param standalone + * Indirection table for Standalone queue. * * @return - * 0 if match, none zero if not match. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int -mlx5_hrxq_match_cb(struct mlx5_cache_list *list, - struct mlx5_cache_entry *entry, +mlx5_ind_table_obj_modify(struct rte_eth_dev *dev, + struct mlx5_ind_table_obj *ind_tbl, + uint16_t *queues, const uint32_t queues_n, + bool standalone) +{ + struct mlx5_priv *priv = dev->data->dev_private; + unsigned int i, j; + int ret = 0, err; + const unsigned int n = rte_is_power_of_2(queues_n) ? + log2above(queues_n) : + log2above(priv->config.ind_table_max_size); + + MLX5_ASSERT(standalone); + RTE_SET_USED(standalone); + if (__atomic_load_n(&ind_tbl->refcnt, __ATOMIC_RELAXED) > 1) { + /* + * Modification of indirection ntables having more than 1 + * reference unsupported. Intended for standalone indirection + * tables only. + */ + DRV_LOG(DEBUG, + "Port %u cannot modify indirection table (refcnt> 1).", + dev->data->port_id); + rte_errno = EINVAL; + return -rte_errno; + } + for (i = 0; i != queues_n; ++i) { + if (!mlx5_rxq_get(dev, queues[i])) { + ret = -rte_errno; + goto error; + } + } + MLX5_ASSERT(priv->obj_ops.ind_table_modify); + ret = priv->obj_ops.ind_table_modify(dev, n, queues, queues_n, ind_tbl); + if (ret) + goto error; + for (j = 0; j < ind_tbl->queues_n; j++) + mlx5_rxq_release(dev, ind_tbl->queues[j]); + ind_tbl->queues_n = queues_n; + ind_tbl->queues = queues; + return 0; +error: + err = rte_errno; + for (j = 0; j < i; j++) + mlx5_rxq_release(dev, queues[j]); + rte_errno = err; + DRV_LOG(DEBUG, "Port %u cannot setup indirection table.", + dev->data->port_id); + return ret; +} + +int +mlx5_hrxq_match_cb(void *tool_ctx __rte_unused, struct mlx5_list_entry *entry, void *cb_ctx) { - struct rte_eth_dev *dev = list->ctx; struct mlx5_flow_cb_ctx *ctx = cb_ctx; struct mlx5_flow_rss_desc *rss_desc = ctx->data; struct mlx5_hrxq *hrxq = container_of(entry, typeof(*hrxq), entry); - struct mlx5_ind_table_obj *ind_tbl; - if (hrxq->rss_key_len != rss_desc->key_len || + return (hrxq->rss_key_len != rss_desc->key_len || memcmp(hrxq->rss_key, rss_desc->key, rss_desc->key_len) || - hrxq->hash_fields != rss_desc->hash_fields) - return 1; - ind_tbl = mlx5_ind_table_obj_get(dev, rss_desc->queue, - rss_desc->queue_num); - if (ind_tbl) - mlx5_ind_table_obj_release(dev, ind_tbl, hrxq->standalone); - return ind_tbl != hrxq->ind_table; + hrxq->hash_fields != rss_desc->hash_fields || + hrxq->ind_table->queues_n != rss_desc->queue_num || + memcmp(hrxq->ind_table->queues, rss_desc->queue, + rss_desc->queue_num * sizeof(rss_desc->queue[0]))); } /** @@ -2136,6 +2153,14 @@ mlx5_hrxq_modify(struct rte_eth_dev *dev, uint32_t hrxq_idx, queues, queues_n)) { ind_tbl = hrxq->ind_table; } else { + if (hrxq->standalone) { + /* + * Replacement of indirection table unsupported for + * stanalone hrxq objects (used by shared RSS). + */ + rte_errno = ENOTSUP; + return -rte_errno; + } ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n); if (!ind_tbl) ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n, @@ -2153,6 +2178,7 @@ mlx5_hrxq_modify(struct rte_eth_dev *dev, uint32_t hrxq_idx, goto error; } if (ind_tbl != hrxq->ind_table) { + MLX5_ASSERT(!hrxq->standalone); mlx5_ind_table_obj_release(dev, hrxq->ind_table, hrxq->standalone); hrxq->ind_table = ind_tbl; @@ -2162,8 +2188,10 @@ mlx5_hrxq_modify(struct rte_eth_dev *dev, uint32_t hrxq_idx, return 0; error: err = rte_errno; - if (ind_tbl != hrxq->ind_table) + if (ind_tbl != hrxq->ind_table) { + MLX5_ASSERT(!hrxq->standalone); mlx5_ind_table_obj_release(dev, ind_tbl, hrxq->standalone); + } rte_errno = err; return -rte_errno; } @@ -2177,7 +2205,10 @@ __mlx5_hrxq_remove(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq) mlx5_glue->destroy_flow_action(hrxq->action); #endif priv->obj_ops.hrxq_destroy(hrxq); - mlx5_ind_table_obj_release(dev, hrxq->ind_table, hrxq->standalone); + if (!hrxq->standalone) { + mlx5_ind_table_obj_release(dev, hrxq->ind_table, + hrxq->standalone); + } mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq->idx); } @@ -2190,15 +2221,14 @@ __mlx5_hrxq_remove(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq) * Index to Hash Rx queue to release. * * @param list - * Cache list pointer. + * mlx5 list pointer. * @param entry * Hash queue entry pointer. */ void -mlx5_hrxq_remove_cb(struct mlx5_cache_list *list, - struct mlx5_cache_entry *entry) +mlx5_hrxq_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry) { - struct rte_eth_dev *dev = list->ctx; + struct rte_eth_dev *dev = tool_ctx; struct mlx5_hrxq *hrxq = container_of(entry, typeof(*hrxq), entry); __mlx5_hrxq_remove(dev, hrxq); @@ -2217,12 +2247,12 @@ __mlx5_hrxq_create(struct rte_eth_dev *dev, uint32_t queues_n = rss_desc->queue_num; struct mlx5_hrxq *hrxq = NULL; uint32_t hrxq_idx = 0; - struct mlx5_ind_table_obj *ind_tbl; + struct mlx5_ind_table_obj *ind_tbl = rss_desc->ind_tbl; int ret; queues_n = rss_desc->hash_fields ? queues_n : 1; - ind_tbl = standalone ? NULL : - mlx5_ind_table_obj_get(dev, queues, queues_n); + if (!ind_tbl) + ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n); if (!ind_tbl) ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n, standalone); @@ -2242,31 +2272,17 @@ __mlx5_hrxq_create(struct rte_eth_dev *dev, goto error; return hrxq; error: - mlx5_ind_table_obj_release(dev, ind_tbl, standalone); + if (!rss_desc->ind_tbl) + mlx5_ind_table_obj_release(dev, ind_tbl, standalone); if (hrxq) mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx); return NULL; } -/** - * Create an Rx Hash queue. - * - * @param list - * Cache list pointer. - * @param entry - * Hash queue entry pointer. - * @param cb_ctx - * Context of the callback function. - * - * @return - * queue entry on success, NULL otherwise. - */ -struct mlx5_cache_entry * -mlx5_hrxq_create_cb(struct mlx5_cache_list *list, - struct mlx5_cache_entry *entry __rte_unused, - void *cb_ctx) +struct mlx5_list_entry * +mlx5_hrxq_create_cb(void *tool_ctx, void *cb_ctx) { - struct rte_eth_dev *dev = list->ctx; + struct rte_eth_dev *dev = tool_ctx; struct mlx5_flow_cb_ctx *ctx = cb_ctx; struct mlx5_flow_rss_desc *rss_desc = ctx->data; struct mlx5_hrxq *hrxq; @@ -2275,6 +2291,33 @@ mlx5_hrxq_create_cb(struct mlx5_cache_list *list, return hrxq ? &hrxq->entry : NULL; } +struct mlx5_list_entry * +mlx5_hrxq_clone_cb(void *tool_ctx, struct mlx5_list_entry *entry, + void *cb_ctx __rte_unused) +{ + struct rte_eth_dev *dev = tool_ctx; + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_hrxq *hrxq; + uint32_t hrxq_idx = 0; + + hrxq = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_HRXQ], &hrxq_idx); + if (!hrxq) + return NULL; + memcpy(hrxq, entry, sizeof(*hrxq) + MLX5_RSS_HASH_KEY_LEN); + hrxq->idx = hrxq_idx; + return &hrxq->entry; +} + +void +mlx5_hrxq_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry) +{ + struct rte_eth_dev *dev = tool_ctx; + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_hrxq *hrxq = container_of(entry, typeof(*hrxq), entry); + + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq->idx); +} + /** * Get an Rx Hash queue. * @@ -2291,7 +2334,7 @@ uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev, { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_hrxq *hrxq; - struct mlx5_cache_entry *entry; + struct mlx5_list_entry *entry; struct mlx5_flow_cb_ctx ctx = { .data = rss_desc, }; @@ -2299,12 +2342,14 @@ uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev, if (rss_desc->shared_rss) { hrxq = __mlx5_hrxq_create(dev, rss_desc); } else { - entry = mlx5_cache_register(&priv->hrxqs, &ctx); + entry = mlx5_list_register(priv->hrxqs, &ctx); if (!entry) return 0; hrxq = container_of(entry, typeof(*hrxq), entry); } - return hrxq->idx; + if (hrxq) + return hrxq->idx; + return 0; } /** @@ -2327,7 +2372,7 @@ int mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hrxq_idx) if (!hrxq) return 0; if (!hrxq->standalone) - return mlx5_cache_unregister(&priv->hrxqs, &hrxq->entry); + return mlx5_list_unregister(priv->hrxqs, &hrxq->entry); __mlx5_hrxq_remove(dev, hrxq); return 0; } @@ -2415,7 +2460,7 @@ mlx5_hrxq_verify(struct rte_eth_dev *dev) { struct mlx5_priv *priv = dev->data->dev_private; - return mlx5_cache_list_get_entry_num(&priv->hrxqs); + return mlx5_list_get_entry_num(priv->hrxqs); } /**