X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_rxq.c;h=f2247de488755da8513b879666c254f69f79434f;hb=0947ed380febad9d6f794b6f4e9aa9137860a06e;hp=a14724b6c3c188151628f3fb1ce7c17af7c9d717;hpb=204891763c91185e1fe7ba9bfd2c225973c939b6;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index a14724b6c3..f2247de488 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -21,11 +21,11 @@ #include #include +#include #include #include "mlx5_defs.h" #include "mlx5.h" -#include "mlx5_tx.h" #include "mlx5_rx.h" #include "mlx5_utils.h" #include "mlx5_autoconf.h" @@ -67,7 +67,7 @@ mlx5_rxq_cqe_num(struct mlx5_rxq_data *rxq_data) unsigned int wqe_n = 1 << rxq_data->elts_n; if (mlx5_rxq_mprq_enabled(rxq_data)) - cqe_n = wqe_n * (1 << rxq_data->strd_num_n) - 1; + cqe_n = wqe_n * RTE_BIT32(rxq_data->log_strd_num) - 1; else cqe_n = wqe_n - 1; return cqe_n; @@ -137,8 +137,9 @@ rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl) { const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n; unsigned int elts_n = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ? - (1 << rxq_ctrl->rxq.elts_n) * (1 << rxq_ctrl->rxq.strd_num_n) : - (1 << rxq_ctrl->rxq.elts_n); + RTE_BIT32(rxq_ctrl->rxq.elts_n) * + RTE_BIT32(rxq_ctrl->rxq.log_strd_num) : + RTE_BIT32(rxq_ctrl->rxq.elts_n); unsigned int i; int err; @@ -293,8 +294,8 @@ rxq_free_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl) { struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq; const uint16_t q_n = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ? - (1 << rxq->elts_n) * (1 << rxq->strd_num_n) : - (1 << rxq->elts_n); + RTE_BIT32(rxq->elts_n) * RTE_BIT32(rxq->log_strd_num) : + RTE_BIT32(rxq->elts_n); const uint16_t q_mask = q_n - 1; uint16_t elts_ci = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ? rxq->elts_ci : rxq->rq_ci; @@ -775,11 +776,14 @@ mlx5_shared_rxq_match(struct mlx5_rxq_ctrl *rxq_ctrl, struct rte_eth_dev *dev, dev->data->port_id, idx); return false; } else if (mp == NULL) { + if (conf->rx_nseg != rxq_ctrl->rxseg_n) { + DRV_LOG(ERR, "port %u queue index %u failed to join shared group: segment number mismatch", + dev->data->port_id, idx); + return false; + } for (i = 0; i < conf->rx_nseg; i++) { - if (conf->rx_seg[i].split.mp != - rxq_ctrl->rxq.rxseg[i].mp || - conf->rx_seg[i].split.length != - rxq_ctrl->rxq.rxseg[i].length) { + if (memcmp(&conf->rx_seg[i].split, &rxq_ctrl->rxseg[i], + sizeof(struct rte_eth_rxseg_split))) { DRV_LOG(ERR, "port %u queue index %u failed to join shared group: segment %u configuration mismatch", dev->data->port_id, idx, i); return false; @@ -1171,15 +1175,13 @@ mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq) int sq_n = 0; uint32_t doorbell_hi; uint64_t doorbell; - void *cq_db_reg = (char *)rxq->cq_uar + MLX5_CQ_DOORBELL; sq_n = sq_n_rxq & MLX5_CQ_SQN_MASK; doorbell_hi = sq_n << MLX5_CQ_SQN_OFFSET | (rxq->cq_ci & MLX5_CI_MASK); doorbell = (uint64_t)doorbell_hi << 32; doorbell |= rxq->cqn; - rxq->cq_db[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi); - mlx5_uar_write64(rte_cpu_to_be_64(doorbell), - cq_db_reg, rxq->uar_lock_cq); + mlx5_doorbell_ring(&rxq->uar_data, rte_cpu_to_be_64(doorbell), + doorbell_hi, &rxq->cq_db[MLX5_CQ_ARM_DB], 0); } /** @@ -1273,6 +1275,8 @@ mlx5_rxq_obj_verify(struct rte_eth_dev *dev) struct mlx5_rxq_obj *rxq_obj; LIST_FOREACH(rxq_obj, &priv->rxqsobj, next) { + if (rxq_obj->rxq_ctrl == NULL) + continue; if (rxq_obj->rxq_ctrl->rxq.shared && !LIST_EMPTY(&rxq_obj->rxq_ctrl->owners)) continue; @@ -1375,8 +1379,8 @@ mlx5_mprq_alloc_mp(struct rte_eth_dev *dev) unsigned int buf_len; unsigned int obj_num; unsigned int obj_size; - unsigned int strd_num_n = 0; - unsigned int strd_sz_n = 0; + unsigned int log_strd_num = 0; + unsigned int log_strd_sz = 0; unsigned int i; unsigned int n_ibv = 0; int ret; @@ -1395,16 +1399,18 @@ mlx5_mprq_alloc_mp(struct rte_eth_dev *dev) n_ibv++; desc += 1 << rxq->elts_n; /* Get the max number of strides. */ - if (strd_num_n < rxq->strd_num_n) - strd_num_n = rxq->strd_num_n; + if (log_strd_num < rxq->log_strd_num) + log_strd_num = rxq->log_strd_num; /* Get the max size of a stride. */ - if (strd_sz_n < rxq->strd_sz_n) - strd_sz_n = rxq->strd_sz_n; - } - MLX5_ASSERT(strd_num_n && strd_sz_n); - buf_len = (1 << strd_num_n) * (1 << strd_sz_n); - obj_size = sizeof(struct mlx5_mprq_buf) + buf_len + (1 << strd_num_n) * - sizeof(struct rte_mbuf_ext_shared_info) + RTE_PKTMBUF_HEADROOM; + if (log_strd_sz < rxq->log_strd_sz) + log_strd_sz = rxq->log_strd_sz; + } + MLX5_ASSERT(log_strd_num && log_strd_sz); + buf_len = RTE_BIT32(log_strd_num) * RTE_BIT32(log_strd_sz); + obj_size = sizeof(struct mlx5_mprq_buf) + buf_len + + RTE_BIT32(log_strd_num) * + sizeof(struct rte_mbuf_ext_shared_info) + + RTE_PKTMBUF_HEADROOM; /* * Received packets can be either memcpy'd or externally referenced. In * case that the packet is attached to an mbuf as an external buffer, as @@ -1450,7 +1456,7 @@ mlx5_mprq_alloc_mp(struct rte_eth_dev *dev) snprintf(name, sizeof(name), "port-%u-mprq", dev->data->port_id); mp = rte_mempool_create(name, obj_num, obj_size, MLX5_MPRQ_MP_CACHE_SZ, 0, NULL, NULL, mlx5_mprq_buf_init, - (void *)((uintptr_t)1 << strd_num_n), + (void *)((uintptr_t)1 << log_strd_num), dev->device->numa_node, 0); if (mp == NULL) { DRV_LOG(ERR, @@ -1460,7 +1466,7 @@ mlx5_mprq_alloc_mp(struct rte_eth_dev *dev) rte_errno = ENOMEM; return -rte_errno; } - ret = mlx5_mr_mempool_register(priv->sh->cdev, mp); + ret = mlx5_mr_mempool_register(priv->sh->cdev, mp, false); if (ret < 0 && rte_errno != EEXIST) { ret = rte_errno; DRV_LOG(ERR, "port %u failed to register a mempool for Multi-Packet RQ", @@ -1566,15 +1572,18 @@ mlx5_rxq_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq, unsigned int first_mb_free_size = mb_len - RTE_PKTMBUF_HEADROOM; const int mprq_en = mlx5_check_mprq_support(dev) > 0 && n_seg == 1 && !rx_seg[0].offset && !rx_seg[0].length; - unsigned int mprq_stride_nums = config->mprq.stride_num_n ? - config->mprq.stride_num_n : MLX5_MPRQ_STRIDE_NUM_N; - unsigned int mprq_stride_size = non_scatter_min_mbuf_size <= - (1U << config->mprq.max_stride_size_n) ? - log2above(non_scatter_min_mbuf_size) : MLX5_MPRQ_STRIDE_SIZE_N; - unsigned int mprq_stride_cap = (config->mprq.stride_num_n ? - (1U << config->mprq.stride_num_n) : (1U << mprq_stride_nums)) * - (config->mprq.stride_size_n ? - (1U << config->mprq.stride_size_n) : (1U << mprq_stride_size)); + unsigned int log_mprq_stride_nums = config->mprq.log_stride_num ? + config->mprq.log_stride_num : MLX5_MPRQ_DEFAULT_LOG_STRIDE_NUM; + unsigned int log_mprq_stride_size = non_scatter_min_mbuf_size <= + RTE_BIT32(config->mprq.log_max_stride_size) ? + log2above(non_scatter_min_mbuf_size) : + MLX5_MPRQ_DEFAULT_LOG_STRIDE_SIZE; + unsigned int mprq_stride_cap = (config->mprq.log_stride_num ? + RTE_BIT32(config->mprq.log_stride_num) : + RTE_BIT32(log_mprq_stride_nums)) * + (config->mprq.log_stride_size ? + RTE_BIT32(config->mprq.log_stride_size) : + RTE_BIT32(log_mprq_stride_size)); /* * Always allocate extra slots, even if eventually * the vector Rx will not be used. @@ -1586,7 +1595,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq, tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl) + desc_n * sizeof(struct rte_mbuf *) + (!!mprq_en) * - (desc >> mprq_stride_nums) * sizeof(struct mlx5_mprq_buf *), + (desc >> log_mprq_stride_nums) * sizeof(struct mlx5_mprq_buf *), 0, socket); if (!tmpl) { rte_errno = ENOMEM; @@ -1602,6 +1611,13 @@ mlx5_rxq_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq, rxq->ctrl = tmpl; LIST_INSERT_HEAD(&tmpl->owners, rxq, owner_entry); MLX5_ASSERT(n_seg && n_seg <= MLX5_MAX_RXQ_NSEG); + /* + * Save the original segment configuration in the shared queue + * descriptor for the later check on the sibling queue creation. + */ + tmpl->rxseg_n = n_seg; + rte_memcpy(tmpl->rxseg, qs_seg, + sizeof(struct rte_eth_rxseg_split) * n_seg); /* * Build the array of actual buffer offsets and lengths. * Pad with the buffers from the last memory pool if @@ -1677,7 +1693,8 @@ mlx5_rxq_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq, goto error; } tmpl->type = MLX5_RXQ_TYPE_STANDARD; - if (mlx5_mr_ctrl_init(&tmpl->rxq.mr_ctrl, priv->sh->cdev, socket)) { + if (mlx5_mr_ctrl_init(&tmpl->rxq.mr_ctrl, + &priv->sh->cdev->mr_scache.dev_gen, socket)) { /* rte_errno is already set. */ goto error; } @@ -1690,37 +1707,37 @@ mlx5_rxq_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq, * - MPRQ is enabled. * - The number of descs is more than the number of strides. * - max_rx_pktlen plus overhead is less than the max size - * of a stride or mprq_stride_size is specified by a user. + * of a stride or log_mprq_stride_size is specified by a user. * Need to make sure that there are enough strides to encap - * the maximum packet size in case mprq_stride_size is set. + * the maximum packet size in case log_mprq_stride_size is set. * Otherwise, enable Rx scatter if necessary. */ - if (mprq_en && desc > (1U << mprq_stride_nums) && + if (mprq_en && desc > RTE_BIT32(log_mprq_stride_nums) && (non_scatter_min_mbuf_size <= - (1U << config->mprq.max_stride_size_n) || - (config->mprq.stride_size_n && + RTE_BIT32(config->mprq.log_max_stride_size) || + (config->mprq.log_stride_size && non_scatter_min_mbuf_size <= mprq_stride_cap))) { /* TODO: Rx scatter isn't supported yet. */ tmpl->rxq.sges_n = 0; /* Trim the number of descs needed. */ - desc >>= mprq_stride_nums; - tmpl->rxq.strd_num_n = config->mprq.stride_num_n ? - config->mprq.stride_num_n : mprq_stride_nums; - tmpl->rxq.strd_sz_n = config->mprq.stride_size_n ? - config->mprq.stride_size_n : mprq_stride_size; + desc >>= log_mprq_stride_nums; + tmpl->rxq.log_strd_num = config->mprq.log_stride_num ? + config->mprq.log_stride_num : log_mprq_stride_nums; + tmpl->rxq.log_strd_sz = config->mprq.log_stride_size ? + config->mprq.log_stride_size : log_mprq_stride_size; tmpl->rxq.strd_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT; tmpl->rxq.strd_scatter_en = !!(offloads & RTE_ETH_RX_OFFLOAD_SCATTER); tmpl->rxq.mprq_max_memcpy_len = RTE_MIN(first_mb_free_size, config->mprq.max_memcpy_len); max_lro_size = RTE_MIN(max_rx_pktlen, - (1u << tmpl->rxq.strd_num_n) * - (1u << tmpl->rxq.strd_sz_n)); + RTE_BIT32(tmpl->rxq.log_strd_num) * + RTE_BIT32(tmpl->rxq.log_strd_sz)); DRV_LOG(DEBUG, "port %u Rx queue %u: Multi-Packet RQ is enabled" " strd_num_n = %u, strd_sz_n = %u", dev->data->port_id, idx, - tmpl->rxq.strd_num_n, tmpl->rxq.strd_sz_n); + tmpl->rxq.log_strd_num, tmpl->rxq.log_strd_sz); } else if (tmpl->rxq.rxseg_n == 1) { MLX5_ASSERT(max_rx_pktlen <= first_mb_free_size); tmpl->rxq.sges_n = 0; @@ -1763,15 +1780,15 @@ mlx5_rxq_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq, " min_stride_sz = %u, max_stride_sz = %u).", dev->data->port_id, non_scatter_min_mbuf_size, desc, priv->rxqs_n, - config->mprq.stride_size_n ? - (1U << config->mprq.stride_size_n) : - (1U << mprq_stride_size), - config->mprq.stride_num_n ? - (1U << config->mprq.stride_num_n) : - (1U << mprq_stride_nums), + config->mprq.log_stride_size ? + RTE_BIT32(config->mprq.log_stride_size) : + RTE_BIT32(log_mprq_stride_size), + config->mprq.log_stride_num ? + RTE_BIT32(config->mprq.log_stride_num) : + RTE_BIT32(log_mprq_stride_nums), config->mprq.min_rxqs_num, - (1U << config->mprq.min_stride_size_n), - (1U << config->mprq.max_stride_size_n)); + RTE_BIT32(config->mprq.log_min_stride_size), + RTE_BIT32(config->mprq.log_max_stride_size)); DRV_LOG(DEBUG, "port %u maximum number of segments per packet: %u", dev->data->port_id, 1 << tmpl->rxq.sges_n); if (desc % (1 << tmpl->rxq.sges_n)) { @@ -1842,9 +1859,6 @@ mlx5_rxq_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq, (struct rte_mbuf *(*)[desc_n])(tmpl + 1); tmpl->rxq.mprq_bufs = (struct mlx5_mprq_buf *(*)[desc])(*tmpl->rxq.elts + desc_n); -#ifndef RTE_ARCH_64 - tmpl->rxq.uar_lock_cq = &priv->sh->uar_lock_cq; -#endif tmpl->rxq.idx = idx; LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next); return tmpl; @@ -2144,7 +2158,7 @@ mlx5_rxq_get_hairpin_conf(struct rte_eth_dev *dev, uint16_t idx) * Number of queues in the array. * * @return - * 1 if all queues in indirection table match 0 othrwise. + * 1 if all queues in indirection table match 0 otherwise. */ static int mlx5_ind_table_obj_match_queues(const struct mlx5_ind_table_obj *ind_tbl, @@ -2199,6 +2213,9 @@ mlx5_ind_table_obj_get(struct rte_eth_dev *dev, const uint16_t *queues, * Indirection table to release. * @param standalone * Indirection table for Standalone queue. + * @param deref_rxqs + * If true, then dereference RX queues related to indirection table. + * Otherwise, no additional action will be taken. * * @return * 1 while a reference on it exists, 0 when freed. @@ -2206,7 +2223,8 @@ mlx5_ind_table_obj_get(struct rte_eth_dev *dev, const uint16_t *queues, int mlx5_ind_table_obj_release(struct rte_eth_dev *dev, struct mlx5_ind_table_obj *ind_tbl, - bool standalone) + bool standalone, + bool deref_rxqs) { struct mlx5_priv *priv = dev->data->dev_private; unsigned int i, ret; @@ -2219,8 +2237,10 @@ mlx5_ind_table_obj_release(struct rte_eth_dev *dev, if (ret) return 1; priv->obj_ops.ind_table_destroy(ind_tbl); - for (i = 0; i != ind_tbl->queues_n; ++i) - claim_nonzero(mlx5_rxq_deref(dev, ind_tbl->queues[i])); + if (deref_rxqs) { + for (i = 0; i != ind_tbl->queues_n; ++i) + claim_nonzero(mlx5_rxq_deref(dev, ind_tbl->queues[i])); + } mlx5_free(ind_tbl); return 0; } @@ -2259,39 +2279,45 @@ mlx5_ind_table_obj_verify(struct rte_eth_dev *dev) * Pointer to Ethernet device. * @param ind_table * Indirection table to modify. + * @param ref_qs + * Whether to increment RxQ reference counters. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_ind_table_obj_setup(struct rte_eth_dev *dev, - struct mlx5_ind_table_obj *ind_tbl) + struct mlx5_ind_table_obj *ind_tbl, + bool ref_qs) { struct mlx5_priv *priv = dev->data->dev_private; uint32_t queues_n = ind_tbl->queues_n; uint16_t *queues = ind_tbl->queues; - unsigned int i, j; + unsigned int i = 0, j; int ret = 0, err; const unsigned int n = rte_is_power_of_2(queues_n) ? log2above(queues_n) : log2above(priv->config.ind_table_max_size); - for (i = 0; i != queues_n; ++i) { - if (mlx5_rxq_ref(dev, queues[i]) == NULL) { - ret = -rte_errno; - goto error; + if (ref_qs) + for (i = 0; i != queues_n; ++i) { + if (mlx5_rxq_ref(dev, queues[i]) == NULL) { + ret = -rte_errno; + goto error; + } } - } ret = priv->obj_ops.ind_table_new(dev, n, ind_tbl); if (ret) goto error; __atomic_fetch_add(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED); return 0; error: - err = rte_errno; - for (j = 0; j < i; j++) - mlx5_rxq_deref(dev, ind_tbl->queues[j]); - rte_errno = err; + if (ref_qs) { + err = rte_errno; + for (j = 0; j < i; j++) + mlx5_rxq_deref(dev, queues[j]); + rte_errno = err; + } DRV_LOG(DEBUG, "Port %u cannot setup indirection table.", dev->data->port_id); return ret; @@ -2308,13 +2334,15 @@ error: * Number of queues in the array. * @param standalone * Indirection table for Standalone queue. + * @param ref_qs + * Whether to increment RxQ reference counters. * * @return * The Verbs/DevX object initialized, NULL otherwise and rte_errno is set. */ static struct mlx5_ind_table_obj * mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues, - uint32_t queues_n, bool standalone) + uint32_t queues_n, bool standalone, bool ref_qs) { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_ind_table_obj *ind_tbl; @@ -2329,7 +2357,7 @@ mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues, ind_tbl->queues_n = queues_n; ind_tbl->queues = (uint16_t *)(ind_tbl + 1); memcpy(ind_tbl->queues, queues, queues_n * sizeof(*queues)); - ret = mlx5_ind_table_obj_setup(dev, ind_tbl); + ret = mlx5_ind_table_obj_setup(dev, ind_tbl, ref_qs); if (ret < 0) { mlx5_free(ind_tbl); return NULL; @@ -2375,6 +2403,10 @@ mlx5_ind_table_obj_check_standalone(struct rte_eth_dev *dev __rte_unused, * Number of queues in the array. * @param standalone * Indirection table for Standalone queue. + * @param ref_new_qs + * Whether to increment new RxQ set reference counters. + * @param deref_old_qs + * Whether to decrement old RxQ set reference counters. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. @@ -2383,10 +2415,10 @@ int mlx5_ind_table_obj_modify(struct rte_eth_dev *dev, struct mlx5_ind_table_obj *ind_tbl, uint16_t *queues, const uint32_t queues_n, - bool standalone) + bool standalone, bool ref_new_qs, bool deref_old_qs) { struct mlx5_priv *priv = dev->data->dev_private; - unsigned int i; + unsigned int i = 0, j; int ret = 0, err; const unsigned int n = rte_is_power_of_2(queues_n) ? log2above(queues_n) : @@ -2396,22 +2428,30 @@ mlx5_ind_table_obj_modify(struct rte_eth_dev *dev, RTE_SET_USED(standalone); if (mlx5_ind_table_obj_check_standalone(dev, ind_tbl) < 0) return -rte_errno; - for (i = 0; i != queues_n; ++i) { - if (!mlx5_rxq_get(dev, queues[i])) { - ret = -rte_errno; - goto error; + if (ref_new_qs) + for (i = 0; i != queues_n; ++i) { + if (!mlx5_rxq_ref(dev, queues[i])) { + ret = -rte_errno; + goto error; + } } - } MLX5_ASSERT(priv->obj_ops.ind_table_modify); ret = priv->obj_ops.ind_table_modify(dev, n, queues, queues_n, ind_tbl); if (ret) goto error; + if (deref_old_qs) + for (i = 0; i < ind_tbl->queues_n; i++) + claim_nonzero(mlx5_rxq_deref(dev, ind_tbl->queues[i])); ind_tbl->queues_n = queues_n; ind_tbl->queues = queues; return 0; error: - err = rte_errno; - rte_errno = err; + if (ref_new_qs) { + err = rte_errno; + for (j = 0; j < i; j++) + mlx5_rxq_deref(dev, queues[j]); + rte_errno = err; + } DRV_LOG(DEBUG, "Port %u cannot setup indirection table.", dev->data->port_id); return ret; @@ -2432,19 +2472,17 @@ int mlx5_ind_table_obj_attach(struct rte_eth_dev *dev, struct mlx5_ind_table_obj *ind_tbl) { - unsigned int i; int ret; ret = mlx5_ind_table_obj_modify(dev, ind_tbl, ind_tbl->queues, - ind_tbl->queues_n, true); - if (ret != 0) { + ind_tbl->queues_n, + true /* standalone */, + true /* ref_new_qs */, + false /* deref_old_qs */); + if (ret != 0) DRV_LOG(ERR, "Port %u could not modify indirect table obj %p", dev->data->port_id, (void *)ind_tbl); - return ret; - } - for (i = 0; i < ind_tbl->queues_n; i++) - mlx5_rxq_get(dev, ind_tbl->queues[i]); - return 0; + return ret; } /** @@ -2533,6 +2571,7 @@ mlx5_hrxq_modify(struct rte_eth_dev *dev, uint32_t hrxq_idx, struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_hrxq *hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx); + bool dev_started = !!dev->data->dev_started; int ret; if (!hrxq) { @@ -2553,7 +2592,7 @@ mlx5_hrxq_modify(struct rte_eth_dev *dev, uint32_t hrxq_idx, if (hrxq->standalone) { /* * Replacement of indirection table unsupported for - * stanalone hrxq objects (used by shared RSS). + * standalone hrxq objects (used by shared RSS). */ rte_errno = ENOTSUP; return -rte_errno; @@ -2561,7 +2600,8 @@ mlx5_hrxq_modify(struct rte_eth_dev *dev, uint32_t hrxq_idx, ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n); if (!ind_tbl) ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n, - hrxq->standalone); + hrxq->standalone, + dev_started); } if (!ind_tbl) { rte_errno = ENOMEM; @@ -2577,7 +2617,7 @@ mlx5_hrxq_modify(struct rte_eth_dev *dev, uint32_t hrxq_idx, if (ind_tbl != hrxq->ind_table) { MLX5_ASSERT(!hrxq->standalone); mlx5_ind_table_obj_release(dev, hrxq->ind_table, - hrxq->standalone); + hrxq->standalone, true); hrxq->ind_table = ind_tbl; } hrxq->hash_fields = hash_fields; @@ -2587,7 +2627,8 @@ error: err = rte_errno; if (ind_tbl != hrxq->ind_table) { MLX5_ASSERT(!hrxq->standalone); - mlx5_ind_table_obj_release(dev, ind_tbl, hrxq->standalone); + mlx5_ind_table_obj_release(dev, ind_tbl, hrxq->standalone, + true); } rte_errno = err; return -rte_errno; @@ -2604,7 +2645,7 @@ __mlx5_hrxq_remove(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq) priv->obj_ops.hrxq_destroy(hrxq); if (!hrxq->standalone) { mlx5_ind_table_obj_release(dev, hrxq->ind_table, - hrxq->standalone); + hrxq->standalone, true); } mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq->idx); } @@ -2652,7 +2693,8 @@ __mlx5_hrxq_create(struct rte_eth_dev *dev, ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n); if (!ind_tbl) ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n, - standalone); + standalone, + !!dev->data->dev_started); if (!ind_tbl) return NULL; hrxq = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_HRXQ], &hrxq_idx); @@ -2670,7 +2712,7 @@ __mlx5_hrxq_create(struct rte_eth_dev *dev, return hrxq; error: if (!rss_desc->ind_tbl) - mlx5_ind_table_obj_release(dev, ind_tbl, standalone); + mlx5_ind_table_obj_release(dev, ind_tbl, standalone, true); if (hrxq) mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx); return NULL;