#include <mlx5_glue.h>
#include <mlx5_malloc.h>
+#include <mlx5_common.h>
#include <mlx5_common_mr.h>
#include "mlx5_defs.h"
#include "mlx5.h"
-#include "mlx5_tx.h"
#include "mlx5_rx.h"
#include "mlx5_utils.h"
#include "mlx5_autoconf.h"
dev->data->port_id, idx);
return false;
} else if (mp == NULL) {
+ if (conf->rx_nseg != rxq_ctrl->rxseg_n) {
+ DRV_LOG(ERR, "port %u queue index %u failed to join shared group: segment number mismatch",
+ dev->data->port_id, idx);
+ return false;
+ }
for (i = 0; i < conf->rx_nseg; i++) {
- if (conf->rx_seg[i].split.mp !=
- rxq_ctrl->rxq.rxseg[i].mp ||
- conf->rx_seg[i].split.length !=
- rxq_ctrl->rxq.rxseg[i].length) {
+ if (memcmp(&conf->rx_seg[i].split, &rxq_ctrl->rxseg[i],
+ sizeof(struct rte_eth_rxseg_split))) {
DRV_LOG(ERR, "port %u queue index %u failed to join shared group: segment %u configuration mismatch",
dev->data->port_id, idx, i);
return false;
int sq_n = 0;
uint32_t doorbell_hi;
uint64_t doorbell;
- void *cq_db_reg = (char *)rxq->cq_uar + MLX5_CQ_DOORBELL;
sq_n = sq_n_rxq & MLX5_CQ_SQN_MASK;
doorbell_hi = sq_n << MLX5_CQ_SQN_OFFSET | (rxq->cq_ci & MLX5_CI_MASK);
doorbell = (uint64_t)doorbell_hi << 32;
doorbell |= rxq->cqn;
- rxq->cq_db[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi);
- mlx5_uar_write64(rte_cpu_to_be_64(doorbell),
- cq_db_reg, rxq->uar_lock_cq);
+ mlx5_doorbell_ring(&rxq->uar_data, rte_cpu_to_be_64(doorbell),
+ doorbell_hi, &rxq->cq_db[MLX5_CQ_ARM_DB], 0);
}
/**
struct mlx5_rxq_obj *rxq_obj;
LIST_FOREACH(rxq_obj, &priv->rxqsobj, next) {
+ if (rxq_obj->rxq_ctrl == NULL)
+ continue;
if (rxq_obj->rxq_ctrl->rxq.shared &&
!LIST_EMPTY(&rxq_obj->rxq_ctrl->owners))
continue;
rte_errno = ENOMEM;
return -rte_errno;
}
- ret = mlx5_mr_mempool_register(priv->sh->cdev, mp);
+ ret = mlx5_mr_mempool_register(priv->sh->cdev, mp, false);
if (ret < 0 && rte_errno != EEXIST) {
ret = rte_errno;
DRV_LOG(ERR, "port %u failed to register a mempool for Multi-Packet RQ",
rxq->ctrl = tmpl;
LIST_INSERT_HEAD(&tmpl->owners, rxq, owner_entry);
MLX5_ASSERT(n_seg && n_seg <= MLX5_MAX_RXQ_NSEG);
+ /*
+ * Save the original segment configuration in the shared queue
+ * descriptor for the later check on the sibling queue creation.
+ */
+ tmpl->rxseg_n = n_seg;
+ rte_memcpy(tmpl->rxseg, qs_seg,
+ sizeof(struct rte_eth_rxseg_split) * n_seg);
/*
* Build the array of actual buffer offsets and lengths.
* Pad with the buffers from the last memory pool if
goto error;
}
tmpl->type = MLX5_RXQ_TYPE_STANDARD;
- if (mlx5_mr_ctrl_init(&tmpl->rxq.mr_ctrl, priv->sh->cdev, socket)) {
+ if (mlx5_mr_ctrl_init(&tmpl->rxq.mr_ctrl,
+ &priv->sh->cdev->mr_scache.dev_gen, socket)) {
/* rte_errno is already set. */
goto error;
}
(struct rte_mbuf *(*)[desc_n])(tmpl + 1);
tmpl->rxq.mprq_bufs =
(struct mlx5_mprq_buf *(*)[desc])(*tmpl->rxq.elts + desc_n);
-#ifndef RTE_ARCH_64
- tmpl->rxq.uar_lock_cq = &priv->sh->uar_lock_cq;
-#endif
tmpl->rxq.idx = idx;
LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
return tmpl;
* Indirection table to release.
* @param standalone
* Indirection table for Standalone queue.
+ * @param deref_rxqs
+ * If true, then dereference RX queues related to indirection table.
+ * Otherwise, no additional action will be taken.
*
* @return
* 1 while a reference on it exists, 0 when freed.
int
mlx5_ind_table_obj_release(struct rte_eth_dev *dev,
struct mlx5_ind_table_obj *ind_tbl,
- bool standalone)
+ bool standalone,
+ bool deref_rxqs)
{
struct mlx5_priv *priv = dev->data->dev_private;
unsigned int i, ret;
if (ret)
return 1;
priv->obj_ops.ind_table_destroy(ind_tbl);
- for (i = 0; i != ind_tbl->queues_n; ++i)
- claim_nonzero(mlx5_rxq_deref(dev, ind_tbl->queues[i]));
+ if (deref_rxqs) {
+ for (i = 0; i != ind_tbl->queues_n; ++i)
+ claim_nonzero(mlx5_rxq_deref(dev, ind_tbl->queues[i]));
+ }
mlx5_free(ind_tbl);
return 0;
}
* Pointer to Ethernet device.
* @param ind_table
* Indirection table to modify.
+ * @param ref_qs
+ * Whether to increment RxQ reference counters.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_ind_table_obj_setup(struct rte_eth_dev *dev,
- struct mlx5_ind_table_obj *ind_tbl)
+ struct mlx5_ind_table_obj *ind_tbl,
+ bool ref_qs)
{
struct mlx5_priv *priv = dev->data->dev_private;
uint32_t queues_n = ind_tbl->queues_n;
uint16_t *queues = ind_tbl->queues;
- unsigned int i, j;
+ unsigned int i = 0, j;
int ret = 0, err;
const unsigned int n = rte_is_power_of_2(queues_n) ?
log2above(queues_n) :
log2above(priv->config.ind_table_max_size);
- for (i = 0; i != queues_n; ++i) {
- if (mlx5_rxq_ref(dev, queues[i]) == NULL) {
- ret = -rte_errno;
- goto error;
+ if (ref_qs)
+ for (i = 0; i != queues_n; ++i) {
+ if (mlx5_rxq_ref(dev, queues[i]) == NULL) {
+ ret = -rte_errno;
+ goto error;
+ }
}
- }
ret = priv->obj_ops.ind_table_new(dev, n, ind_tbl);
if (ret)
goto error;
__atomic_fetch_add(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED);
return 0;
error:
- err = rte_errno;
- for (j = 0; j < i; j++)
- mlx5_rxq_deref(dev, ind_tbl->queues[j]);
- rte_errno = err;
+ if (ref_qs) {
+ err = rte_errno;
+ for (j = 0; j < i; j++)
+ mlx5_rxq_deref(dev, queues[j]);
+ rte_errno = err;
+ }
DRV_LOG(DEBUG, "Port %u cannot setup indirection table.",
dev->data->port_id);
return ret;
* Number of queues in the array.
* @param standalone
* Indirection table for Standalone queue.
+ * @param ref_qs
+ * Whether to increment RxQ reference counters.
*
* @return
* The Verbs/DevX object initialized, NULL otherwise and rte_errno is set.
*/
static struct mlx5_ind_table_obj *
mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues,
- uint32_t queues_n, bool standalone)
+ uint32_t queues_n, bool standalone, bool ref_qs)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_ind_table_obj *ind_tbl;
ind_tbl->queues_n = queues_n;
ind_tbl->queues = (uint16_t *)(ind_tbl + 1);
memcpy(ind_tbl->queues, queues, queues_n * sizeof(*queues));
- ret = mlx5_ind_table_obj_setup(dev, ind_tbl);
+ ret = mlx5_ind_table_obj_setup(dev, ind_tbl, ref_qs);
if (ret < 0) {
mlx5_free(ind_tbl);
return NULL;
* Number of queues in the array.
* @param standalone
* Indirection table for Standalone queue.
+ * @param ref_new_qs
+ * Whether to increment new RxQ set reference counters.
+ * @param deref_old_qs
+ * Whether to decrement old RxQ set reference counters.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
mlx5_ind_table_obj_modify(struct rte_eth_dev *dev,
struct mlx5_ind_table_obj *ind_tbl,
uint16_t *queues, const uint32_t queues_n,
- bool standalone)
+ bool standalone, bool ref_new_qs, bool deref_old_qs)
{
struct mlx5_priv *priv = dev->data->dev_private;
- unsigned int i;
+ unsigned int i = 0, j;
int ret = 0, err;
const unsigned int n = rte_is_power_of_2(queues_n) ?
log2above(queues_n) :
RTE_SET_USED(standalone);
if (mlx5_ind_table_obj_check_standalone(dev, ind_tbl) < 0)
return -rte_errno;
- for (i = 0; i != queues_n; ++i) {
- if (!mlx5_rxq_get(dev, queues[i])) {
- ret = -rte_errno;
- goto error;
+ if (ref_new_qs)
+ for (i = 0; i != queues_n; ++i) {
+ if (!mlx5_rxq_ref(dev, queues[i])) {
+ ret = -rte_errno;
+ goto error;
+ }
}
- }
MLX5_ASSERT(priv->obj_ops.ind_table_modify);
ret = priv->obj_ops.ind_table_modify(dev, n, queues, queues_n, ind_tbl);
if (ret)
goto error;
+ if (deref_old_qs)
+ for (i = 0; i < ind_tbl->queues_n; i++)
+ claim_nonzero(mlx5_rxq_deref(dev, ind_tbl->queues[i]));
ind_tbl->queues_n = queues_n;
ind_tbl->queues = queues;
return 0;
error:
- err = rte_errno;
- rte_errno = err;
+ if (ref_new_qs) {
+ err = rte_errno;
+ for (j = 0; j < i; j++)
+ mlx5_rxq_deref(dev, queues[j]);
+ rte_errno = err;
+ }
DRV_LOG(DEBUG, "Port %u cannot setup indirection table.",
dev->data->port_id);
return ret;
mlx5_ind_table_obj_attach(struct rte_eth_dev *dev,
struct mlx5_ind_table_obj *ind_tbl)
{
- unsigned int i;
int ret;
ret = mlx5_ind_table_obj_modify(dev, ind_tbl, ind_tbl->queues,
- ind_tbl->queues_n, true);
- if (ret != 0) {
+ ind_tbl->queues_n,
+ true /* standalone */,
+ true /* ref_new_qs */,
+ false /* deref_old_qs */);
+ if (ret != 0)
DRV_LOG(ERR, "Port %u could not modify indirect table obj %p",
dev->data->port_id, (void *)ind_tbl);
- return ret;
- }
- for (i = 0; i < ind_tbl->queues_n; i++)
- mlx5_rxq_get(dev, ind_tbl->queues[i]);
- return 0;
+ return ret;
}
/**
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_hrxq *hrxq =
mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
+ bool dev_started = !!dev->data->dev_started;
int ret;
if (!hrxq) {
ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
if (!ind_tbl)
ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n,
- hrxq->standalone);
+ hrxq->standalone,
+ dev_started);
}
if (!ind_tbl) {
rte_errno = ENOMEM;
if (ind_tbl != hrxq->ind_table) {
MLX5_ASSERT(!hrxq->standalone);
mlx5_ind_table_obj_release(dev, hrxq->ind_table,
- hrxq->standalone);
+ hrxq->standalone, true);
hrxq->ind_table = ind_tbl;
}
hrxq->hash_fields = hash_fields;
err = rte_errno;
if (ind_tbl != hrxq->ind_table) {
MLX5_ASSERT(!hrxq->standalone);
- mlx5_ind_table_obj_release(dev, ind_tbl, hrxq->standalone);
+ mlx5_ind_table_obj_release(dev, ind_tbl, hrxq->standalone,
+ true);
}
rte_errno = err;
return -rte_errno;
priv->obj_ops.hrxq_destroy(hrxq);
if (!hrxq->standalone) {
mlx5_ind_table_obj_release(dev, hrxq->ind_table,
- hrxq->standalone);
+ hrxq->standalone, true);
}
mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq->idx);
}
ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
if (!ind_tbl)
ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n,
- standalone);
+ standalone,
+ !!dev->data->dev_started);
if (!ind_tbl)
return NULL;
hrxq = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_HRXQ], &hrxq_idx);
return hrxq;
error:
if (!rss_desc->ind_tbl)
- mlx5_ind_table_obj_release(dev, ind_tbl, standalone);
+ mlx5_ind_table_obj_release(dev, ind_tbl, standalone, true);
if (hrxq)
mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
return NULL;