unsigned int wqe_n = 1 << rxq_data->elts_n;
if (mlx5_rxq_mprq_enabled(rxq_data))
- cqe_n = wqe_n * (1 << rxq_data->strd_num_n) - 1;
+ cqe_n = wqe_n * RTE_BIT32(rxq_data->log_strd_num) - 1;
else
cqe_n = wqe_n - 1;
return cqe_n;
{
const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n;
unsigned int elts_n = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
- (1 << rxq_ctrl->rxq.elts_n) * (1 << rxq_ctrl->rxq.strd_num_n) :
- (1 << rxq_ctrl->rxq.elts_n);
+ RTE_BIT32(rxq_ctrl->rxq.elts_n) *
+ RTE_BIT32(rxq_ctrl->rxq.log_strd_num) :
+ RTE_BIT32(rxq_ctrl->rxq.elts_n);
unsigned int i;
int err;
{
struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
const uint16_t q_n = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
- (1 << rxq->elts_n) * (1 << rxq->strd_num_n) :
- (1 << rxq->elts_n);
+ RTE_BIT32(rxq->elts_n) * RTE_BIT32(rxq->log_strd_num) :
+ RTE_BIT32(rxq->elts_n);
const uint16_t q_mask = q_n - 1;
uint16_t elts_ci = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
rxq->elts_ci : rxq->rq_ci;
dev->data->port_id, idx);
return false;
} else if (mp == NULL) {
+ if (conf->rx_nseg != rxq_ctrl->rxseg_n) {
+ DRV_LOG(ERR, "port %u queue index %u failed to join shared group: segment number mismatch",
+ dev->data->port_id, idx);
+ return false;
+ }
for (i = 0; i < conf->rx_nseg; i++) {
- if (conf->rx_seg[i].split.mp !=
- rxq_ctrl->rxq.rxseg[i].mp ||
- conf->rx_seg[i].split.length !=
- rxq_ctrl->rxq.rxseg[i].length) {
+ if (memcmp(&conf->rx_seg[i].split, &rxq_ctrl->rxseg[i],
+ sizeof(struct rte_eth_rxseg_split))) {
DRV_LOG(ERR, "port %u queue index %u failed to join shared group: segment %u configuration mismatch",
dev->data->port_id, idx, i);
return false;
struct mlx5_rxq_obj *rxq_obj;
LIST_FOREACH(rxq_obj, &priv->rxqsobj, next) {
+ if (rxq_obj->rxq_ctrl == NULL)
+ continue;
if (rxq_obj->rxq_ctrl->rxq.shared &&
!LIST_EMPTY(&rxq_obj->rxq_ctrl->owners))
continue;
unsigned int buf_len;
unsigned int obj_num;
unsigned int obj_size;
- unsigned int strd_num_n = 0;
- unsigned int strd_sz_n = 0;
+ unsigned int log_strd_num = 0;
+ unsigned int log_strd_sz = 0;
unsigned int i;
unsigned int n_ibv = 0;
int ret;
n_ibv++;
desc += 1 << rxq->elts_n;
/* Get the max number of strides. */
- if (strd_num_n < rxq->strd_num_n)
- strd_num_n = rxq->strd_num_n;
+ if (log_strd_num < rxq->log_strd_num)
+ log_strd_num = rxq->log_strd_num;
/* Get the max size of a stride. */
- if (strd_sz_n < rxq->strd_sz_n)
- strd_sz_n = rxq->strd_sz_n;
- }
- MLX5_ASSERT(strd_num_n && strd_sz_n);
- buf_len = (1 << strd_num_n) * (1 << strd_sz_n);
- obj_size = sizeof(struct mlx5_mprq_buf) + buf_len + (1 << strd_num_n) *
- sizeof(struct rte_mbuf_ext_shared_info) + RTE_PKTMBUF_HEADROOM;
+ if (log_strd_sz < rxq->log_strd_sz)
+ log_strd_sz = rxq->log_strd_sz;
+ }
+ MLX5_ASSERT(log_strd_num && log_strd_sz);
+ buf_len = RTE_BIT32(log_strd_num) * RTE_BIT32(log_strd_sz);
+ obj_size = sizeof(struct mlx5_mprq_buf) + buf_len +
+ RTE_BIT32(log_strd_num) *
+ sizeof(struct rte_mbuf_ext_shared_info) +
+ RTE_PKTMBUF_HEADROOM;
/*
* Received packets can be either memcpy'd or externally referenced. In
* case that the packet is attached to an mbuf as an external buffer, as
snprintf(name, sizeof(name), "port-%u-mprq", dev->data->port_id);
mp = rte_mempool_create(name, obj_num, obj_size, MLX5_MPRQ_MP_CACHE_SZ,
0, NULL, NULL, mlx5_mprq_buf_init,
- (void *)((uintptr_t)1 << strd_num_n),
+ (void *)((uintptr_t)1 << log_strd_num),
dev->device->numa_node, 0);
if (mp == NULL) {
DRV_LOG(ERR,
unsigned int first_mb_free_size = mb_len - RTE_PKTMBUF_HEADROOM;
const int mprq_en = mlx5_check_mprq_support(dev) > 0 && n_seg == 1 &&
!rx_seg[0].offset && !rx_seg[0].length;
- unsigned int mprq_stride_nums = config->mprq.stride_num_n ?
- config->mprq.stride_num_n : MLX5_MPRQ_STRIDE_NUM_N;
- unsigned int mprq_stride_size = non_scatter_min_mbuf_size <=
- (1U << config->mprq.max_stride_size_n) ?
- log2above(non_scatter_min_mbuf_size) : MLX5_MPRQ_STRIDE_SIZE_N;
- unsigned int mprq_stride_cap = (config->mprq.stride_num_n ?
- (1U << config->mprq.stride_num_n) : (1U << mprq_stride_nums)) *
- (config->mprq.stride_size_n ?
- (1U << config->mprq.stride_size_n) : (1U << mprq_stride_size));
+ unsigned int log_mprq_stride_nums = config->mprq.log_stride_num ?
+ config->mprq.log_stride_num : MLX5_MPRQ_DEFAULT_LOG_STRIDE_NUM;
+ unsigned int log_mprq_stride_size = non_scatter_min_mbuf_size <=
+ RTE_BIT32(config->mprq.log_max_stride_size) ?
+ log2above(non_scatter_min_mbuf_size) :
+ MLX5_MPRQ_DEFAULT_LOG_STRIDE_SIZE;
+ unsigned int mprq_stride_cap = (config->mprq.log_stride_num ?
+ RTE_BIT32(config->mprq.log_stride_num) :
+ RTE_BIT32(log_mprq_stride_nums)) *
+ (config->mprq.log_stride_size ?
+ RTE_BIT32(config->mprq.log_stride_size) :
+ RTE_BIT32(log_mprq_stride_size));
/*
* Always allocate extra slots, even if eventually
* the vector Rx will not be used.
tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
sizeof(*tmpl) + desc_n * sizeof(struct rte_mbuf *) +
(!!mprq_en) *
- (desc >> mprq_stride_nums) * sizeof(struct mlx5_mprq_buf *),
+ (desc >> log_mprq_stride_nums) * sizeof(struct mlx5_mprq_buf *),
0, socket);
if (!tmpl) {
rte_errno = ENOMEM;
rxq->ctrl = tmpl;
LIST_INSERT_HEAD(&tmpl->owners, rxq, owner_entry);
MLX5_ASSERT(n_seg && n_seg <= MLX5_MAX_RXQ_NSEG);
+ /*
+ * Save the original segment configuration in the shared queue
+ * descriptor for the later check on the sibling queue creation.
+ */
+ tmpl->rxseg_n = n_seg;
+ rte_memcpy(tmpl->rxseg, qs_seg,
+ sizeof(struct rte_eth_rxseg_split) * n_seg);
/*
* Build the array of actual buffer offsets and lengths.
* Pad with the buffers from the last memory pool if
* - MPRQ is enabled.
* - The number of descs is more than the number of strides.
* - max_rx_pktlen plus overhead is less than the max size
- * of a stride or mprq_stride_size is specified by a user.
+ * of a stride or log_mprq_stride_size is specified by a user.
* Need to make sure that there are enough strides to encap
- * the maximum packet size in case mprq_stride_size is set.
+ * the maximum packet size in case log_mprq_stride_size is set.
* Otherwise, enable Rx scatter if necessary.
*/
- if (mprq_en && desc > (1U << mprq_stride_nums) &&
+ if (mprq_en && desc > RTE_BIT32(log_mprq_stride_nums) &&
(non_scatter_min_mbuf_size <=
- (1U << config->mprq.max_stride_size_n) ||
- (config->mprq.stride_size_n &&
+ RTE_BIT32(config->mprq.log_max_stride_size) ||
+ (config->mprq.log_stride_size &&
non_scatter_min_mbuf_size <= mprq_stride_cap))) {
/* TODO: Rx scatter isn't supported yet. */
tmpl->rxq.sges_n = 0;
/* Trim the number of descs needed. */
- desc >>= mprq_stride_nums;
- tmpl->rxq.strd_num_n = config->mprq.stride_num_n ?
- config->mprq.stride_num_n : mprq_stride_nums;
- tmpl->rxq.strd_sz_n = config->mprq.stride_size_n ?
- config->mprq.stride_size_n : mprq_stride_size;
+ desc >>= log_mprq_stride_nums;
+ tmpl->rxq.log_strd_num = config->mprq.log_stride_num ?
+ config->mprq.log_stride_num : log_mprq_stride_nums;
+ tmpl->rxq.log_strd_sz = config->mprq.log_stride_size ?
+ config->mprq.log_stride_size : log_mprq_stride_size;
tmpl->rxq.strd_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT;
tmpl->rxq.strd_scatter_en =
!!(offloads & RTE_ETH_RX_OFFLOAD_SCATTER);
tmpl->rxq.mprq_max_memcpy_len = RTE_MIN(first_mb_free_size,
config->mprq.max_memcpy_len);
max_lro_size = RTE_MIN(max_rx_pktlen,
- (1u << tmpl->rxq.strd_num_n) *
- (1u << tmpl->rxq.strd_sz_n));
+ RTE_BIT32(tmpl->rxq.log_strd_num) *
+ RTE_BIT32(tmpl->rxq.log_strd_sz));
DRV_LOG(DEBUG,
"port %u Rx queue %u: Multi-Packet RQ is enabled"
" strd_num_n = %u, strd_sz_n = %u",
dev->data->port_id, idx,
- tmpl->rxq.strd_num_n, tmpl->rxq.strd_sz_n);
+ tmpl->rxq.log_strd_num, tmpl->rxq.log_strd_sz);
} else if (tmpl->rxq.rxseg_n == 1) {
MLX5_ASSERT(max_rx_pktlen <= first_mb_free_size);
tmpl->rxq.sges_n = 0;
" min_stride_sz = %u, max_stride_sz = %u).",
dev->data->port_id, non_scatter_min_mbuf_size,
desc, priv->rxqs_n,
- config->mprq.stride_size_n ?
- (1U << config->mprq.stride_size_n) :
- (1U << mprq_stride_size),
- config->mprq.stride_num_n ?
- (1U << config->mprq.stride_num_n) :
- (1U << mprq_stride_nums),
+ config->mprq.log_stride_size ?
+ RTE_BIT32(config->mprq.log_stride_size) :
+ RTE_BIT32(log_mprq_stride_size),
+ config->mprq.log_stride_num ?
+ RTE_BIT32(config->mprq.log_stride_num) :
+ RTE_BIT32(log_mprq_stride_nums),
config->mprq.min_rxqs_num,
- (1U << config->mprq.min_stride_size_n),
- (1U << config->mprq.max_stride_size_n));
+ RTE_BIT32(config->mprq.log_min_stride_size),
+ RTE_BIT32(config->mprq.log_max_stride_size));
DRV_LOG(DEBUG, "port %u maximum number of segments per packet: %u",
dev->data->port_id, 1 << tmpl->rxq.sges_n);
if (desc % (1 << tmpl->rxq.sges_n)) {
* Number of queues in the array.
*
* @return
- * 1 if all queues in indirection table match 0 othrwise.
+ * 1 if all queues in indirection table match 0 otherwise.
*/
static int
mlx5_ind_table_obj_match_queues(const struct mlx5_ind_table_obj *ind_tbl,
* Indirection table to release.
* @param standalone
* Indirection table for Standalone queue.
+ * @param deref_rxqs
+ * If true, then dereference RX queues related to indirection table.
+ * Otherwise, no additional action will be taken.
*
* @return
* 1 while a reference on it exists, 0 when freed.
int
mlx5_ind_table_obj_release(struct rte_eth_dev *dev,
struct mlx5_ind_table_obj *ind_tbl,
- bool standalone)
+ bool standalone,
+ bool deref_rxqs)
{
struct mlx5_priv *priv = dev->data->dev_private;
unsigned int i, ret;
if (ret)
return 1;
priv->obj_ops.ind_table_destroy(ind_tbl);
- for (i = 0; i != ind_tbl->queues_n; ++i)
- claim_nonzero(mlx5_rxq_deref(dev, ind_tbl->queues[i]));
+ if (deref_rxqs) {
+ for (i = 0; i != ind_tbl->queues_n; ++i)
+ claim_nonzero(mlx5_rxq_deref(dev, ind_tbl->queues[i]));
+ }
mlx5_free(ind_tbl);
return 0;
}
* Pointer to Ethernet device.
* @param ind_table
* Indirection table to modify.
+ * @param ref_qs
+ * Whether to increment RxQ reference counters.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_ind_table_obj_setup(struct rte_eth_dev *dev,
- struct mlx5_ind_table_obj *ind_tbl)
+ struct mlx5_ind_table_obj *ind_tbl,
+ bool ref_qs)
{
struct mlx5_priv *priv = dev->data->dev_private;
uint32_t queues_n = ind_tbl->queues_n;
uint16_t *queues = ind_tbl->queues;
- unsigned int i, j;
+ unsigned int i = 0, j;
int ret = 0, err;
const unsigned int n = rte_is_power_of_2(queues_n) ?
log2above(queues_n) :
log2above(priv->config.ind_table_max_size);
- for (i = 0; i != queues_n; ++i) {
- if (mlx5_rxq_ref(dev, queues[i]) == NULL) {
- ret = -rte_errno;
- goto error;
+ if (ref_qs)
+ for (i = 0; i != queues_n; ++i) {
+ if (mlx5_rxq_ref(dev, queues[i]) == NULL) {
+ ret = -rte_errno;
+ goto error;
+ }
}
- }
ret = priv->obj_ops.ind_table_new(dev, n, ind_tbl);
if (ret)
goto error;
__atomic_fetch_add(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED);
return 0;
error:
- err = rte_errno;
- for (j = 0; j < i; j++)
- mlx5_rxq_deref(dev, ind_tbl->queues[j]);
- rte_errno = err;
+ if (ref_qs) {
+ err = rte_errno;
+ for (j = 0; j < i; j++)
+ mlx5_rxq_deref(dev, queues[j]);
+ rte_errno = err;
+ }
DRV_LOG(DEBUG, "Port %u cannot setup indirection table.",
dev->data->port_id);
return ret;
* Number of queues in the array.
* @param standalone
* Indirection table for Standalone queue.
+ * @param ref_qs
+ * Whether to increment RxQ reference counters.
*
* @return
* The Verbs/DevX object initialized, NULL otherwise and rte_errno is set.
*/
static struct mlx5_ind_table_obj *
mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues,
- uint32_t queues_n, bool standalone)
+ uint32_t queues_n, bool standalone, bool ref_qs)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_ind_table_obj *ind_tbl;
ind_tbl->queues_n = queues_n;
ind_tbl->queues = (uint16_t *)(ind_tbl + 1);
memcpy(ind_tbl->queues, queues, queues_n * sizeof(*queues));
- ret = mlx5_ind_table_obj_setup(dev, ind_tbl);
+ ret = mlx5_ind_table_obj_setup(dev, ind_tbl, ref_qs);
if (ret < 0) {
mlx5_free(ind_tbl);
return NULL;
* Number of queues in the array.
* @param standalone
* Indirection table for Standalone queue.
+ * @param ref_new_qs
+ * Whether to increment new RxQ set reference counters.
+ * @param deref_old_qs
+ * Whether to decrement old RxQ set reference counters.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
mlx5_ind_table_obj_modify(struct rte_eth_dev *dev,
struct mlx5_ind_table_obj *ind_tbl,
uint16_t *queues, const uint32_t queues_n,
- bool standalone)
+ bool standalone, bool ref_new_qs, bool deref_old_qs)
{
struct mlx5_priv *priv = dev->data->dev_private;
- unsigned int i;
+ unsigned int i = 0, j;
int ret = 0, err;
const unsigned int n = rte_is_power_of_2(queues_n) ?
log2above(queues_n) :
RTE_SET_USED(standalone);
if (mlx5_ind_table_obj_check_standalone(dev, ind_tbl) < 0)
return -rte_errno;
- for (i = 0; i != queues_n; ++i) {
- if (!mlx5_rxq_get(dev, queues[i])) {
- ret = -rte_errno;
- goto error;
+ if (ref_new_qs)
+ for (i = 0; i != queues_n; ++i) {
+ if (!mlx5_rxq_ref(dev, queues[i])) {
+ ret = -rte_errno;
+ goto error;
+ }
}
- }
MLX5_ASSERT(priv->obj_ops.ind_table_modify);
ret = priv->obj_ops.ind_table_modify(dev, n, queues, queues_n, ind_tbl);
if (ret)
goto error;
+ if (deref_old_qs)
+ for (i = 0; i < ind_tbl->queues_n; i++)
+ claim_nonzero(mlx5_rxq_deref(dev, ind_tbl->queues[i]));
ind_tbl->queues_n = queues_n;
ind_tbl->queues = queues;
return 0;
error:
- err = rte_errno;
- rte_errno = err;
+ if (ref_new_qs) {
+ err = rte_errno;
+ for (j = 0; j < i; j++)
+ mlx5_rxq_deref(dev, queues[j]);
+ rte_errno = err;
+ }
DRV_LOG(DEBUG, "Port %u cannot setup indirection table.",
dev->data->port_id);
return ret;
mlx5_ind_table_obj_attach(struct rte_eth_dev *dev,
struct mlx5_ind_table_obj *ind_tbl)
{
- unsigned int i;
int ret;
ret = mlx5_ind_table_obj_modify(dev, ind_tbl, ind_tbl->queues,
- ind_tbl->queues_n, true);
- if (ret != 0) {
+ ind_tbl->queues_n,
+ true /* standalone */,
+ true /* ref_new_qs */,
+ false /* deref_old_qs */);
+ if (ret != 0)
DRV_LOG(ERR, "Port %u could not modify indirect table obj %p",
dev->data->port_id, (void *)ind_tbl);
- return ret;
- }
- for (i = 0; i < ind_tbl->queues_n; i++)
- mlx5_rxq_ref(dev, ind_tbl->queues[i]);
- return 0;
+ return ret;
}
/**
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_hrxq *hrxq =
mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
+ bool dev_started = !!dev->data->dev_started;
int ret;
if (!hrxq) {
if (hrxq->standalone) {
/*
* Replacement of indirection table unsupported for
- * stanalone hrxq objects (used by shared RSS).
+ * standalone hrxq objects (used by shared RSS).
*/
rte_errno = ENOTSUP;
return -rte_errno;
ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
if (!ind_tbl)
ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n,
- hrxq->standalone);
+ hrxq->standalone,
+ dev_started);
}
if (!ind_tbl) {
rte_errno = ENOMEM;
if (ind_tbl != hrxq->ind_table) {
MLX5_ASSERT(!hrxq->standalone);
mlx5_ind_table_obj_release(dev, hrxq->ind_table,
- hrxq->standalone);
+ hrxq->standalone, true);
hrxq->ind_table = ind_tbl;
}
hrxq->hash_fields = hash_fields;
err = rte_errno;
if (ind_tbl != hrxq->ind_table) {
MLX5_ASSERT(!hrxq->standalone);
- mlx5_ind_table_obj_release(dev, ind_tbl, hrxq->standalone);
+ mlx5_ind_table_obj_release(dev, ind_tbl, hrxq->standalone,
+ true);
}
rte_errno = err;
return -rte_errno;
priv->obj_ops.hrxq_destroy(hrxq);
if (!hrxq->standalone) {
mlx5_ind_table_obj_release(dev, hrxq->ind_table,
- hrxq->standalone);
+ hrxq->standalone, true);
}
mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq->idx);
}
ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
if (!ind_tbl)
ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n,
- standalone);
+ standalone,
+ !!dev->data->dev_started);
if (!ind_tbl)
return NULL;
hrxq = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_HRXQ], &hrxq_idx);
return hrxq;
error:
if (!rss_desc->ind_tbl)
- mlx5_ind_table_obj_release(dev, ind_tbl, standalone);
+ mlx5_ind_table_obj_release(dev, ind_tbl, standalone, true);
if (hrxq)
mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
return NULL;