X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_rxq.c;h=f2247de488755da8513b879666c254f69f79434f;hb=0947ed380febad9d6f794b6f4e9aa9137860a06e;hp=2dd9490c365b5060c6d378a1ff3eb0d1f9926006;hpb=c65d6844970a762c907f2de1763e0c71a3c65c66;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index 2dd9490c36..f2247de488 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -67,7 +67,7 @@ mlx5_rxq_cqe_num(struct mlx5_rxq_data *rxq_data) unsigned int wqe_n = 1 << rxq_data->elts_n; if (mlx5_rxq_mprq_enabled(rxq_data)) - cqe_n = wqe_n * (1 << rxq_data->strd_num_n) - 1; + cqe_n = wqe_n * RTE_BIT32(rxq_data->log_strd_num) - 1; else cqe_n = wqe_n - 1; return cqe_n; @@ -137,8 +137,9 @@ rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl) { const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n; unsigned int elts_n = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ? - (1 << rxq_ctrl->rxq.elts_n) * (1 << rxq_ctrl->rxq.strd_num_n) : - (1 << rxq_ctrl->rxq.elts_n); + RTE_BIT32(rxq_ctrl->rxq.elts_n) * + RTE_BIT32(rxq_ctrl->rxq.log_strd_num) : + RTE_BIT32(rxq_ctrl->rxq.elts_n); unsigned int i; int err; @@ -293,8 +294,8 @@ rxq_free_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl) { struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq; const uint16_t q_n = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ? - (1 << rxq->elts_n) * (1 << rxq->strd_num_n) : - (1 << rxq->elts_n); + RTE_BIT32(rxq->elts_n) * RTE_BIT32(rxq->log_strd_num) : + RTE_BIT32(rxq->elts_n); const uint16_t q_mask = q_n - 1; uint16_t elts_ci = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ? rxq->elts_ci : rxq->rq_ci; @@ -775,11 +776,14 @@ mlx5_shared_rxq_match(struct mlx5_rxq_ctrl *rxq_ctrl, struct rte_eth_dev *dev, dev->data->port_id, idx); return false; } else if (mp == NULL) { + if (conf->rx_nseg != rxq_ctrl->rxseg_n) { + DRV_LOG(ERR, "port %u queue index %u failed to join shared group: segment number mismatch", + dev->data->port_id, idx); + return false; + } for (i = 0; i < conf->rx_nseg; i++) { - if (conf->rx_seg[i].split.mp != - rxq_ctrl->rxq.rxseg[i].mp || - conf->rx_seg[i].split.length != - rxq_ctrl->rxq.rxseg[i].length) { + if (memcmp(&conf->rx_seg[i].split, &rxq_ctrl->rxseg[i], + sizeof(struct rte_eth_rxseg_split))) { DRV_LOG(ERR, "port %u queue index %u failed to join shared group: segment %u configuration mismatch", dev->data->port_id, idx, i); return false; @@ -1375,8 +1379,8 @@ mlx5_mprq_alloc_mp(struct rte_eth_dev *dev) unsigned int buf_len; unsigned int obj_num; unsigned int obj_size; - unsigned int strd_num_n = 0; - unsigned int strd_sz_n = 0; + unsigned int log_strd_num = 0; + unsigned int log_strd_sz = 0; unsigned int i; unsigned int n_ibv = 0; int ret; @@ -1395,16 +1399,18 @@ mlx5_mprq_alloc_mp(struct rte_eth_dev *dev) n_ibv++; desc += 1 << rxq->elts_n; /* Get the max number of strides. */ - if (strd_num_n < rxq->strd_num_n) - strd_num_n = rxq->strd_num_n; + if (log_strd_num < rxq->log_strd_num) + log_strd_num = rxq->log_strd_num; /* Get the max size of a stride. */ - if (strd_sz_n < rxq->strd_sz_n) - strd_sz_n = rxq->strd_sz_n; - } - MLX5_ASSERT(strd_num_n && strd_sz_n); - buf_len = (1 << strd_num_n) * (1 << strd_sz_n); - obj_size = sizeof(struct mlx5_mprq_buf) + buf_len + (1 << strd_num_n) * - sizeof(struct rte_mbuf_ext_shared_info) + RTE_PKTMBUF_HEADROOM; + if (log_strd_sz < rxq->log_strd_sz) + log_strd_sz = rxq->log_strd_sz; + } + MLX5_ASSERT(log_strd_num && log_strd_sz); + buf_len = RTE_BIT32(log_strd_num) * RTE_BIT32(log_strd_sz); + obj_size = sizeof(struct mlx5_mprq_buf) + buf_len + + RTE_BIT32(log_strd_num) * + sizeof(struct rte_mbuf_ext_shared_info) + + RTE_PKTMBUF_HEADROOM; /* * Received packets can be either memcpy'd or externally referenced. In * case that the packet is attached to an mbuf as an external buffer, as @@ -1450,7 +1456,7 @@ mlx5_mprq_alloc_mp(struct rte_eth_dev *dev) snprintf(name, sizeof(name), "port-%u-mprq", dev->data->port_id); mp = rte_mempool_create(name, obj_num, obj_size, MLX5_MPRQ_MP_CACHE_SZ, 0, NULL, NULL, mlx5_mprq_buf_init, - (void *)((uintptr_t)1 << strd_num_n), + (void *)((uintptr_t)1 << log_strd_num), dev->device->numa_node, 0); if (mp == NULL) { DRV_LOG(ERR, @@ -1566,15 +1572,18 @@ mlx5_rxq_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq, unsigned int first_mb_free_size = mb_len - RTE_PKTMBUF_HEADROOM; const int mprq_en = mlx5_check_mprq_support(dev) > 0 && n_seg == 1 && !rx_seg[0].offset && !rx_seg[0].length; - unsigned int mprq_stride_nums = config->mprq.stride_num_n ? - config->mprq.stride_num_n : MLX5_MPRQ_STRIDE_NUM_N; - unsigned int mprq_stride_size = non_scatter_min_mbuf_size <= - (1U << config->mprq.max_stride_size_n) ? - log2above(non_scatter_min_mbuf_size) : MLX5_MPRQ_STRIDE_SIZE_N; - unsigned int mprq_stride_cap = (config->mprq.stride_num_n ? - (1U << config->mprq.stride_num_n) : (1U << mprq_stride_nums)) * - (config->mprq.stride_size_n ? - (1U << config->mprq.stride_size_n) : (1U << mprq_stride_size)); + unsigned int log_mprq_stride_nums = config->mprq.log_stride_num ? + config->mprq.log_stride_num : MLX5_MPRQ_DEFAULT_LOG_STRIDE_NUM; + unsigned int log_mprq_stride_size = non_scatter_min_mbuf_size <= + RTE_BIT32(config->mprq.log_max_stride_size) ? + log2above(non_scatter_min_mbuf_size) : + MLX5_MPRQ_DEFAULT_LOG_STRIDE_SIZE; + unsigned int mprq_stride_cap = (config->mprq.log_stride_num ? + RTE_BIT32(config->mprq.log_stride_num) : + RTE_BIT32(log_mprq_stride_nums)) * + (config->mprq.log_stride_size ? + RTE_BIT32(config->mprq.log_stride_size) : + RTE_BIT32(log_mprq_stride_size)); /* * Always allocate extra slots, even if eventually * the vector Rx will not be used. @@ -1586,7 +1595,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq, tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl) + desc_n * sizeof(struct rte_mbuf *) + (!!mprq_en) * - (desc >> mprq_stride_nums) * sizeof(struct mlx5_mprq_buf *), + (desc >> log_mprq_stride_nums) * sizeof(struct mlx5_mprq_buf *), 0, socket); if (!tmpl) { rte_errno = ENOMEM; @@ -1602,6 +1611,13 @@ mlx5_rxq_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq, rxq->ctrl = tmpl; LIST_INSERT_HEAD(&tmpl->owners, rxq, owner_entry); MLX5_ASSERT(n_seg && n_seg <= MLX5_MAX_RXQ_NSEG); + /* + * Save the original segment configuration in the shared queue + * descriptor for the later check on the sibling queue creation. + */ + tmpl->rxseg_n = n_seg; + rte_memcpy(tmpl->rxseg, qs_seg, + sizeof(struct rte_eth_rxseg_split) * n_seg); /* * Build the array of actual buffer offsets and lengths. * Pad with the buffers from the last memory pool if @@ -1691,37 +1707,37 @@ mlx5_rxq_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq, * - MPRQ is enabled. * - The number of descs is more than the number of strides. * - max_rx_pktlen plus overhead is less than the max size - * of a stride or mprq_stride_size is specified by a user. + * of a stride or log_mprq_stride_size is specified by a user. * Need to make sure that there are enough strides to encap - * the maximum packet size in case mprq_stride_size is set. + * the maximum packet size in case log_mprq_stride_size is set. * Otherwise, enable Rx scatter if necessary. */ - if (mprq_en && desc > (1U << mprq_stride_nums) && + if (mprq_en && desc > RTE_BIT32(log_mprq_stride_nums) && (non_scatter_min_mbuf_size <= - (1U << config->mprq.max_stride_size_n) || - (config->mprq.stride_size_n && + RTE_BIT32(config->mprq.log_max_stride_size) || + (config->mprq.log_stride_size && non_scatter_min_mbuf_size <= mprq_stride_cap))) { /* TODO: Rx scatter isn't supported yet. */ tmpl->rxq.sges_n = 0; /* Trim the number of descs needed. */ - desc >>= mprq_stride_nums; - tmpl->rxq.strd_num_n = config->mprq.stride_num_n ? - config->mprq.stride_num_n : mprq_stride_nums; - tmpl->rxq.strd_sz_n = config->mprq.stride_size_n ? - config->mprq.stride_size_n : mprq_stride_size; + desc >>= log_mprq_stride_nums; + tmpl->rxq.log_strd_num = config->mprq.log_stride_num ? + config->mprq.log_stride_num : log_mprq_stride_nums; + tmpl->rxq.log_strd_sz = config->mprq.log_stride_size ? + config->mprq.log_stride_size : log_mprq_stride_size; tmpl->rxq.strd_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT; tmpl->rxq.strd_scatter_en = !!(offloads & RTE_ETH_RX_OFFLOAD_SCATTER); tmpl->rxq.mprq_max_memcpy_len = RTE_MIN(first_mb_free_size, config->mprq.max_memcpy_len); max_lro_size = RTE_MIN(max_rx_pktlen, - (1u << tmpl->rxq.strd_num_n) * - (1u << tmpl->rxq.strd_sz_n)); + RTE_BIT32(tmpl->rxq.log_strd_num) * + RTE_BIT32(tmpl->rxq.log_strd_sz)); DRV_LOG(DEBUG, "port %u Rx queue %u: Multi-Packet RQ is enabled" " strd_num_n = %u, strd_sz_n = %u", dev->data->port_id, idx, - tmpl->rxq.strd_num_n, tmpl->rxq.strd_sz_n); + tmpl->rxq.log_strd_num, tmpl->rxq.log_strd_sz); } else if (tmpl->rxq.rxseg_n == 1) { MLX5_ASSERT(max_rx_pktlen <= first_mb_free_size); tmpl->rxq.sges_n = 0; @@ -1764,15 +1780,15 @@ mlx5_rxq_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq, " min_stride_sz = %u, max_stride_sz = %u).", dev->data->port_id, non_scatter_min_mbuf_size, desc, priv->rxqs_n, - config->mprq.stride_size_n ? - (1U << config->mprq.stride_size_n) : - (1U << mprq_stride_size), - config->mprq.stride_num_n ? - (1U << config->mprq.stride_num_n) : - (1U << mprq_stride_nums), + config->mprq.log_stride_size ? + RTE_BIT32(config->mprq.log_stride_size) : + RTE_BIT32(log_mprq_stride_size), + config->mprq.log_stride_num ? + RTE_BIT32(config->mprq.log_stride_num) : + RTE_BIT32(log_mprq_stride_nums), config->mprq.min_rxqs_num, - (1U << config->mprq.min_stride_size_n), - (1U << config->mprq.max_stride_size_n)); + RTE_BIT32(config->mprq.log_min_stride_size), + RTE_BIT32(config->mprq.log_max_stride_size)); DRV_LOG(DEBUG, "port %u maximum number of segments per packet: %u", dev->data->port_id, 1 << tmpl->rxq.sges_n); if (desc % (1 << tmpl->rxq.sges_n)) { @@ -2142,7 +2158,7 @@ mlx5_rxq_get_hairpin_conf(struct rte_eth_dev *dev, uint16_t idx) * Number of queues in the array. * * @return - * 1 if all queues in indirection table match 0 othrwise. + * 1 if all queues in indirection table match 0 otherwise. */ static int mlx5_ind_table_obj_match_queues(const struct mlx5_ind_table_obj *ind_tbl, @@ -2387,6 +2403,10 @@ mlx5_ind_table_obj_check_standalone(struct rte_eth_dev *dev __rte_unused, * Number of queues in the array. * @param standalone * Indirection table for Standalone queue. + * @param ref_new_qs + * Whether to increment new RxQ set reference counters. + * @param deref_old_qs + * Whether to decrement old RxQ set reference counters. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. @@ -2395,10 +2415,10 @@ int mlx5_ind_table_obj_modify(struct rte_eth_dev *dev, struct mlx5_ind_table_obj *ind_tbl, uint16_t *queues, const uint32_t queues_n, - bool standalone) + bool standalone, bool ref_new_qs, bool deref_old_qs) { struct mlx5_priv *priv = dev->data->dev_private; - unsigned int i; + unsigned int i = 0, j; int ret = 0, err; const unsigned int n = rte_is_power_of_2(queues_n) ? log2above(queues_n) : @@ -2408,22 +2428,30 @@ mlx5_ind_table_obj_modify(struct rte_eth_dev *dev, RTE_SET_USED(standalone); if (mlx5_ind_table_obj_check_standalone(dev, ind_tbl) < 0) return -rte_errno; - for (i = 0; i != queues_n; ++i) { - if (!mlx5_rxq_get(dev, queues[i])) { - ret = -rte_errno; - goto error; + if (ref_new_qs) + for (i = 0; i != queues_n; ++i) { + if (!mlx5_rxq_ref(dev, queues[i])) { + ret = -rte_errno; + goto error; + } } - } MLX5_ASSERT(priv->obj_ops.ind_table_modify); ret = priv->obj_ops.ind_table_modify(dev, n, queues, queues_n, ind_tbl); if (ret) goto error; + if (deref_old_qs) + for (i = 0; i < ind_tbl->queues_n; i++) + claim_nonzero(mlx5_rxq_deref(dev, ind_tbl->queues[i])); ind_tbl->queues_n = queues_n; ind_tbl->queues = queues; return 0; error: - err = rte_errno; - rte_errno = err; + if (ref_new_qs) { + err = rte_errno; + for (j = 0; j < i; j++) + mlx5_rxq_deref(dev, queues[j]); + rte_errno = err; + } DRV_LOG(DEBUG, "Port %u cannot setup indirection table.", dev->data->port_id); return ret; @@ -2444,19 +2472,17 @@ int mlx5_ind_table_obj_attach(struct rte_eth_dev *dev, struct mlx5_ind_table_obj *ind_tbl) { - unsigned int i; int ret; ret = mlx5_ind_table_obj_modify(dev, ind_tbl, ind_tbl->queues, - ind_tbl->queues_n, true); - if (ret != 0) { + ind_tbl->queues_n, + true /* standalone */, + true /* ref_new_qs */, + false /* deref_old_qs */); + if (ret != 0) DRV_LOG(ERR, "Port %u could not modify indirect table obj %p", dev->data->port_id, (void *)ind_tbl); - return ret; - } - for (i = 0; i < ind_tbl->queues_n; i++) - mlx5_rxq_ref(dev, ind_tbl->queues[i]); - return 0; + return ret; } /** @@ -2566,7 +2592,7 @@ mlx5_hrxq_modify(struct rte_eth_dev *dev, uint32_t hrxq_idx, if (hrxq->standalone) { /* * Replacement of indirection table unsupported for - * stanalone hrxq objects (used by shared RSS). + * standalone hrxq objects (used by shared RSS). */ rte_errno = ENOTSUP; return -rte_errno;