#include <mlx5_glue.h>
#include <mlx5_malloc.h>
+#include <mlx5_common.h>
#include <mlx5_common_mr.h>
#include "mlx5_defs.h"
#include "mlx5.h"
-#include "mlx5_tx.h"
#include "mlx5_rx.h"
#include "mlx5_utils.h"
#include "mlx5_autoconf.h"
+#include "mlx5_devx.h"
/* Default RSS hash key also used for ConnectX-3. */
buf = rte_pktmbuf_alloc(seg->mp);
if (buf == NULL) {
- DRV_LOG(ERR, "port %u empty mbuf pool",
- PORT_ID(rxq_ctrl->priv));
+ if (rxq_ctrl->share_group == 0)
+ DRV_LOG(ERR, "port %u queue %u empty mbuf pool",
+ RXQ_PORT_ID(rxq_ctrl),
+ rxq_ctrl->rxq.idx);
+ else
+ DRV_LOG(ERR, "share group %u queue %u empty mbuf pool",
+ rxq_ctrl->share_group,
+ rxq_ctrl->share_qid);
rte_errno = ENOMEM;
goto error;
}
mbuf_init->data_off = RTE_PKTMBUF_HEADROOM;
rte_mbuf_refcnt_set(mbuf_init, 1);
mbuf_init->nb_segs = 1;
- mbuf_init->port = rxq->port_id;
+ /* For shared queues port is provided in CQE */
+ mbuf_init->port = rxq->shared ? 0 : rxq->port_id;
if (priv->flags & RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF)
mbuf_init->ol_flags = RTE_MBUF_F_EXTERNAL;
/*
for (j = 0; j < MLX5_VPMD_DESCS_PER_LOOP; ++j)
(*rxq->elts)[elts_n + j] = &rxq->fake_mbuf;
}
- DRV_LOG(DEBUG,
- "port %u SPRQ queue %u allocated and configured %u segments"
- " (max %u packets)",
- PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx, elts_n,
- elts_n / (1 << rxq_ctrl->rxq.sges_n));
+ if (rxq_ctrl->share_group == 0)
+ DRV_LOG(DEBUG,
+ "port %u SPRQ queue %u allocated and configured %u segments (max %u packets)",
+ RXQ_PORT_ID(rxq_ctrl), rxq_ctrl->rxq.idx, elts_n,
+ elts_n / (1 << rxq_ctrl->rxq.sges_n));
+ else
+ DRV_LOG(DEBUG,
+ "share group %u SPRQ queue %u allocated and configured %u segments (max %u packets)",
+ rxq_ctrl->share_group, rxq_ctrl->share_qid, elts_n,
+ elts_n / (1 << rxq_ctrl->rxq.sges_n));
return 0;
error:
err = rte_errno; /* Save rte_errno before cleanup. */
rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]);
(*rxq_ctrl->rxq.elts)[i] = NULL;
}
- DRV_LOG(DEBUG, "port %u SPRQ queue %u failed, freed everything",
- PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx);
+ if (rxq_ctrl->share_group == 0)
+ DRV_LOG(DEBUG, "port %u SPRQ queue %u failed, freed everything",
+ RXQ_PORT_ID(rxq_ctrl), rxq_ctrl->rxq.idx);
+ else
+ DRV_LOG(DEBUG, "share group %u SPRQ queue %u failed, freed everything",
+ rxq_ctrl->share_group, rxq_ctrl->share_qid);
rte_errno = err; /* Restore rte_errno. */
return -rte_errno;
}
uint16_t used = q_n - (elts_ci - rxq->rq_pi);
uint16_t i;
- DRV_LOG(DEBUG, "port %u Rx queue %u freeing %d WRs",
- PORT_ID(rxq_ctrl->priv), rxq->idx, q_n);
+ if (rxq_ctrl->share_group == 0)
+ DRV_LOG(DEBUG, "port %u Rx queue %u freeing %d WRs",
+ RXQ_PORT_ID(rxq_ctrl), rxq->idx, q_n);
+ else
+ DRV_LOG(DEBUG, "share group %u Rx queue %u freeing %d WRs",
+ rxq_ctrl->share_group, rxq_ctrl->share_qid, q_n);
if (rxq->elts == NULL)
return;
/**
static int
mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx)
{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_rxq_ctrl *rxq_ctrl;
+ struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
- if (!(*priv->rxqs)[idx]) {
+ if (rxq == NULL) {
rte_errno = EINVAL;
return -rte_errno;
}
- rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
- return (__atomic_load_n(&rxq_ctrl->refcnt, __ATOMIC_RELAXED) == 1);
+ return (__atomic_load_n(&rxq->refcnt, __ATOMIC_RELAXED) == 1);
}
/* Fetches and drops all SW-owned and error CQEs to synchronize CQ. */
mlx5_rx_queue_stop_primary(struct rte_eth_dev *dev, uint16_t idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
- struct mlx5_rxq_ctrl *rxq_ctrl =
- container_of(rxq, struct mlx5_rxq_ctrl, rxq);
+ struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
+ struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;
int ret;
+ MLX5_ASSERT(rxq != NULL && rxq_ctrl != NULL);
MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
- ret = priv->obj_ops.rxq_obj_modify(rxq_ctrl->obj, MLX5_RXQ_MOD_RDY2RST);
+ ret = priv->obj_ops.rxq_obj_modify(rxq, MLX5_RXQ_MOD_RDY2RST);
if (ret) {
DRV_LOG(ERR, "Cannot change Rx WQ state to RESET: %s",
strerror(errno));
return ret;
}
/* Remove all processes CQEs. */
- rxq_sync_cq(rxq);
+ rxq_sync_cq(&rxq_ctrl->rxq);
/* Free all involved mbufs. */
rxq_free_elts(rxq_ctrl);
/* Set the actual queue state. */
mlx5_rx_queue_start_primary(struct rte_eth_dev *dev, uint16_t idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
- struct mlx5_rxq_ctrl *rxq_ctrl =
- container_of(rxq, struct mlx5_rxq_ctrl, rxq);
+ struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
+ struct mlx5_rxq_data *rxq_data = &rxq->ctrl->rxq;
int ret;
- MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
+ MLX5_ASSERT(rxq != NULL && rxq->ctrl != NULL);
+ MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
/* Allocate needed buffers. */
- ret = rxq_alloc_elts(rxq_ctrl);
+ ret = rxq_alloc_elts(rxq->ctrl);
if (ret) {
DRV_LOG(ERR, "Cannot reallocate buffers for Rx WQ");
rte_errno = errno;
return ret;
}
rte_io_wmb();
- *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
+ *rxq_data->cq_db = rte_cpu_to_be_32(rxq_data->cq_ci);
rte_io_wmb();
/* Reset RQ consumer before moving queue to READY state. */
- *rxq->rq_db = rte_cpu_to_be_32(0);
+ *rxq_data->rq_db = rte_cpu_to_be_32(0);
rte_io_wmb();
- ret = priv->obj_ops.rxq_obj_modify(rxq_ctrl->obj, MLX5_RXQ_MOD_RST2RDY);
+ ret = priv->obj_ops.rxq_obj_modify(rxq, MLX5_RXQ_MOD_RST2RDY);
if (ret) {
DRV_LOG(ERR, "Cannot change Rx WQ state to READY: %s",
strerror(errno));
return ret;
}
/* Reinitialize RQ - set WQEs. */
- mlx5_rxq_initialize(rxq);
- rxq->err_state = MLX5_RXQ_ERR_STATE_NO_ERROR;
+ mlx5_rxq_initialize(rxq_data);
+ rxq_data->err_state = MLX5_RXQ_ERR_STATE_NO_ERROR;
/* Set actual queue state. */
dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
return 0;
* RX queue index.
* @param desc
* Number of descriptors to configure in queue.
+ * @param[out] rxq_ctrl
+ * Address of pointer to shared Rx queue control.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-mlx5_rx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t *desc)
+mlx5_rx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t *desc,
+ struct mlx5_rxq_ctrl **rxq_ctrl)
{
struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_rxq_priv *rxq;
+ bool empty;
if (!rte_is_power_of_2(*desc)) {
*desc = 1 << log2above(*desc);
rte_errno = EOVERFLOW;
return -rte_errno;
}
- if (!mlx5_rxq_releasable(dev, idx)) {
- DRV_LOG(ERR, "port %u unable to release queue index %u",
- dev->data->port_id, idx);
- rte_errno = EBUSY;
- return -rte_errno;
+ if (rxq_ctrl == NULL || *rxq_ctrl == NULL)
+ return 0;
+ if (!(*rxq_ctrl)->rxq.shared) {
+ if (!mlx5_rxq_releasable(dev, idx)) {
+ DRV_LOG(ERR, "port %u unable to release queue index %u",
+ dev->data->port_id, idx);
+ rte_errno = EBUSY;
+ return -rte_errno;
+ }
+ mlx5_rxq_release(dev, idx);
+ } else {
+ if ((*rxq_ctrl)->obj != NULL)
+ /* Some port using shared Rx queue has been started. */
+ return 0;
+ /* Release all owner RxQ to reconfigure Shared RxQ. */
+ do {
+ rxq = LIST_FIRST(&(*rxq_ctrl)->owners);
+ LIST_REMOVE(rxq, owner_entry);
+ empty = LIST_EMPTY(&(*rxq_ctrl)->owners);
+ mlx5_rxq_release(ETH_DEV(rxq->priv), rxq->idx);
+ } while (!empty);
+ *rxq_ctrl = NULL;
}
- mlx5_rxq_release(dev, idx);
return 0;
}
+/**
+ * Get the shared Rx queue object that matches group and queue index.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param group
+ * Shared RXQ group.
+ * @param share_qid
+ * Shared RX queue index.
+ *
+ * @return
+ * Shared RXQ object that matching, or NULL if not found.
+ */
+static struct mlx5_rxq_ctrl *
+mlx5_shared_rxq_get(struct rte_eth_dev *dev, uint32_t group, uint16_t share_qid)
+{
+ struct mlx5_rxq_ctrl *rxq_ctrl;
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ LIST_FOREACH(rxq_ctrl, &priv->sh->shared_rxqs, share_entry) {
+ if (rxq_ctrl->share_group == group &&
+ rxq_ctrl->share_qid == share_qid)
+ return rxq_ctrl;
+ }
+ return NULL;
+}
+
+/**
+ * Check whether requested Rx queue configuration matches shared RXQ.
+ *
+ * @param rxq_ctrl
+ * Pointer to shared RXQ.
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param idx
+ * Queue index.
+ * @param desc
+ * Number of descriptors to configure in queue.
+ * @param socket
+ * NUMA socket on which memory must be allocated.
+ * @param[in] conf
+ * Thresholds parameters.
+ * @param mp
+ * Memory pool for buffer allocations.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static bool
+mlx5_shared_rxq_match(struct mlx5_rxq_ctrl *rxq_ctrl, struct rte_eth_dev *dev,
+ uint16_t idx, uint16_t desc, unsigned int socket,
+ const struct rte_eth_rxconf *conf,
+ struct rte_mempool *mp)
+{
+ struct mlx5_priv *spriv = LIST_FIRST(&rxq_ctrl->owners)->priv;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ unsigned int i;
+
+ RTE_SET_USED(conf);
+ if (rxq_ctrl->socket != socket) {
+ DRV_LOG(ERR, "port %u queue index %u failed to join shared group: socket mismatch",
+ dev->data->port_id, idx);
+ return false;
+ }
+ if (rxq_ctrl->rxq.elts_n != log2above(desc)) {
+ DRV_LOG(ERR, "port %u queue index %u failed to join shared group: descriptor number mismatch",
+ dev->data->port_id, idx);
+ return false;
+ }
+ if (priv->mtu != spriv->mtu) {
+ DRV_LOG(ERR, "port %u queue index %u failed to join shared group: mtu mismatch",
+ dev->data->port_id, idx);
+ return false;
+ }
+ if (priv->dev_data->dev_conf.intr_conf.rxq !=
+ spriv->dev_data->dev_conf.intr_conf.rxq) {
+ DRV_LOG(ERR, "port %u queue index %u failed to join shared group: interrupt mismatch",
+ dev->data->port_id, idx);
+ return false;
+ }
+ if (mp != NULL && rxq_ctrl->rxq.mp != mp) {
+ DRV_LOG(ERR, "port %u queue index %u failed to join shared group: mempool mismatch",
+ dev->data->port_id, idx);
+ return false;
+ } else if (mp == NULL) {
+ for (i = 0; i < conf->rx_nseg; i++) {
+ if (conf->rx_seg[i].split.mp !=
+ rxq_ctrl->rxq.rxseg[i].mp ||
+ conf->rx_seg[i].split.length !=
+ rxq_ctrl->rxq.rxseg[i].length) {
+ DRV_LOG(ERR, "port %u queue index %u failed to join shared group: segment %u configuration mismatch",
+ dev->data->port_id, idx, i);
+ return false;
+ }
+ }
+ }
+ if (priv->config.hw_padding != spriv->config.hw_padding) {
+ DRV_LOG(ERR, "port %u queue index %u failed to join shared group: padding mismatch",
+ dev->data->port_id, idx);
+ return false;
+ }
+ if (priv->config.cqe_comp != spriv->config.cqe_comp ||
+ (priv->config.cqe_comp &&
+ priv->config.cqe_comp_fmt != spriv->config.cqe_comp_fmt)) {
+ DRV_LOG(ERR, "port %u queue index %u failed to join shared group: CQE compression mismatch",
+ dev->data->port_id, idx);
+ return false;
+ }
+ return true;
+}
+
/**
*
* @param dev
struct rte_mempool *mp)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
- struct mlx5_rxq_ctrl *rxq_ctrl =
- container_of(rxq, struct mlx5_rxq_ctrl, rxq);
+ struct mlx5_rxq_priv *rxq;
+ struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
struct rte_eth_rxseg_split *rx_seg =
(struct rte_eth_rxseg_split *)conf->rx_seg;
struct rte_eth_rxseg_split rx_single = {.mp = mp};
uint16_t n_seg = conf->rx_nseg;
int res;
+ uint64_t offloads = conf->offloads |
+ dev->data->dev_conf.rxmode.offloads;
if (mp) {
/*
n_seg = 1;
}
if (n_seg > 1) {
- uint64_t offloads = conf->offloads |
- dev->data->dev_conf.rxmode.offloads;
-
/* The offloads should be checked on rte_eth_dev layer. */
MLX5_ASSERT(offloads & RTE_ETH_RX_OFFLOAD_SCATTER);
if (!(offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) {
}
MLX5_ASSERT(n_seg < MLX5_MAX_RXQ_NSEG);
}
- res = mlx5_rx_queue_pre_setup(dev, idx, &desc);
+ if (conf->share_group > 0) {
+ if (!priv->config.hca_attr.mem_rq_rmp) {
+ DRV_LOG(ERR, "port %u queue index %u shared Rx queue not supported by fw",
+ dev->data->port_id, idx);
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ if (priv->obj_ops.rxq_obj_new != devx_obj_ops.rxq_obj_new) {
+ DRV_LOG(ERR, "port %u queue index %u shared Rx queue needs DevX api",
+ dev->data->port_id, idx);
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ if (conf->share_qid >= priv->rxqs_n) {
+ DRV_LOG(ERR, "port %u shared Rx queue index %u > number of Rx queues %u",
+ dev->data->port_id, conf->share_qid,
+ priv->rxqs_n);
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ if (priv->config.mprq.enabled) {
+ DRV_LOG(ERR, "port %u shared Rx queue index %u: not supported when MPRQ enabled",
+ dev->data->port_id, conf->share_qid);
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ /* Try to reuse shared RXQ. */
+ rxq_ctrl = mlx5_shared_rxq_get(dev, conf->share_group,
+ conf->share_qid);
+ if (rxq_ctrl != NULL &&
+ !mlx5_shared_rxq_match(rxq_ctrl, dev, idx, desc, socket,
+ conf, mp)) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ }
+ res = mlx5_rx_queue_pre_setup(dev, idx, &desc, &rxq_ctrl);
if (res)
return res;
- rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, rx_seg, n_seg);
- if (!rxq_ctrl) {
- DRV_LOG(ERR, "port %u unable to allocate queue index %u",
+ /* Allocate RXQ. */
+ rxq = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*rxq), 0,
+ SOCKET_ID_ANY);
+ if (!rxq) {
+ DRV_LOG(ERR, "port %u unable to allocate rx queue index %u private data",
dev->data->port_id, idx);
rte_errno = ENOMEM;
return -rte_errno;
}
+ rxq->priv = priv;
+ rxq->idx = idx;
+ (*priv->rxq_privs)[idx] = rxq;
+ if (rxq_ctrl != NULL) {
+ /* Join owner list. */
+ LIST_INSERT_HEAD(&rxq_ctrl->owners, rxq, owner_entry);
+ rxq->ctrl = rxq_ctrl;
+ } else {
+ rxq_ctrl = mlx5_rxq_new(dev, rxq, desc, socket, conf, rx_seg,
+ n_seg);
+ if (rxq_ctrl == NULL) {
+ DRV_LOG(ERR, "port %u unable to allocate rx queue index %u",
+ dev->data->port_id, idx);
+ mlx5_free(rxq);
+ (*priv->rxq_privs)[idx] = NULL;
+ rte_errno = ENOMEM;
+ return -rte_errno;
+ }
+ }
+ mlx5_rxq_ref(dev, idx);
DRV_LOG(DEBUG, "port %u adding Rx queue %u to list",
dev->data->port_id, idx);
- (*priv->rxqs)[idx] = &rxq_ctrl->rxq;
+ dev->data->rx_queues[idx] = &rxq_ctrl->rxq;
return 0;
}
const struct rte_eth_hairpin_conf *hairpin_conf)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
- struct mlx5_rxq_ctrl *rxq_ctrl =
- container_of(rxq, struct mlx5_rxq_ctrl, rxq);
+ struct mlx5_rxq_priv *rxq;
+ struct mlx5_rxq_ctrl *rxq_ctrl;
int res;
- res = mlx5_rx_queue_pre_setup(dev, idx, &desc);
+ res = mlx5_rx_queue_pre_setup(dev, idx, &desc, NULL);
if (res)
return res;
if (hairpin_conf->peer_count != 1) {
return -rte_errno;
}
}
- rxq_ctrl = mlx5_rxq_hairpin_new(dev, idx, desc, hairpin_conf);
+ rxq = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*rxq), 0,
+ SOCKET_ID_ANY);
+ if (!rxq) {
+ DRV_LOG(ERR, "port %u unable to allocate hairpin rx queue index %u private data",
+ dev->data->port_id, idx);
+ rte_errno = ENOMEM;
+ return -rte_errno;
+ }
+ rxq->priv = priv;
+ rxq->idx = idx;
+ (*priv->rxq_privs)[idx] = rxq;
+ rxq_ctrl = mlx5_rxq_hairpin_new(dev, rxq, desc, hairpin_conf);
if (!rxq_ctrl) {
- DRV_LOG(ERR, "port %u unable to allocate queue index %u",
+ DRV_LOG(ERR, "port %u unable to allocate hairpin queue index %u",
dev->data->port_id, idx);
+ mlx5_free(rxq);
+ (*priv->rxq_privs)[idx] = NULL;
rte_errno = ENOMEM;
return -rte_errno;
}
- DRV_LOG(DEBUG, "port %u adding Rx queue %u to list",
+ DRV_LOG(DEBUG, "port %u adding hairpin Rx queue %u to list",
dev->data->port_id, idx);
- (*priv->rxqs)[idx] = &rxq_ctrl->rxq;
+ dev->data->rx_queues[idx] = &rxq_ctrl->rxq;
return 0;
}
for (i = 0; i != n; ++i) {
/* This rxq obj must not be released in this function. */
- struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_get(dev, i);
- struct mlx5_rxq_obj *rxq_obj = rxq_ctrl ? rxq_ctrl->obj : NULL;
+ struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, i);
+ struct mlx5_rxq_obj *rxq_obj = rxq ? rxq->ctrl->obj : NULL;
int rc;
/* Skip queues that cannot request interrupts. */
if (rte_intr_vec_list_index_set(intr_handle, i,
RTE_INTR_VEC_RXTX_OFFSET + RTE_MAX_RXTX_INTR_VEC_ID))
return -rte_errno;
- /* Decrease the rxq_ctrl's refcnt */
- if (rxq_ctrl)
- mlx5_rxq_release(dev, i);
continue;
}
+ mlx5_rxq_ref(dev, i);
if (count >= RTE_MAX_RXTX_INTR_VEC_ID) {
DRV_LOG(ERR,
"port %u too many Rx queues for interrupt"
* Need to access directly the queue to release the reference
* kept in mlx5_rx_intr_vec_enable().
*/
- mlx5_rxq_release(dev, i);
+ mlx5_rxq_deref(dev, i);
}
free:
rte_intr_free_epoll_fd(intr_handle);
int sq_n = 0;
uint32_t doorbell_hi;
uint64_t doorbell;
- void *cq_db_reg = (char *)rxq->cq_uar + MLX5_CQ_DOORBELL;
sq_n = sq_n_rxq & MLX5_CQ_SQN_MASK;
doorbell_hi = sq_n << MLX5_CQ_SQN_OFFSET | (rxq->cq_ci & MLX5_CI_MASK);
doorbell = (uint64_t)doorbell_hi << 32;
doorbell |= rxq->cqn;
- rxq->cq_db[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi);
- mlx5_uar_write64(rte_cpu_to_be_64(doorbell),
- cq_db_reg, rxq->uar_lock_cq);
+ mlx5_doorbell_ring(&rxq->uar_data, rte_cpu_to_be_64(doorbell),
+ doorbell_hi, &rxq->cq_db[MLX5_CQ_ARM_DB], 0);
}
/**
int
mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
- struct mlx5_rxq_ctrl *rxq_ctrl;
-
- rxq_ctrl = mlx5_rxq_get(dev, rx_queue_id);
- if (!rxq_ctrl)
+ struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, rx_queue_id);
+ if (!rxq)
goto error;
- if (rxq_ctrl->irq) {
- if (!rxq_ctrl->obj) {
- mlx5_rxq_release(dev, rx_queue_id);
+ if (rxq->ctrl->irq) {
+ if (!rxq->ctrl->obj)
goto error;
- }
- mlx5_arm_cq(&rxq_ctrl->rxq, rxq_ctrl->rxq.cq_arm_sn);
+ mlx5_arm_cq(&rxq->ctrl->rxq, rxq->ctrl->rxq.cq_arm_sn);
}
- mlx5_rxq_release(dev, rx_queue_id);
return 0;
error:
rte_errno = EINVAL;
mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_rxq_ctrl *rxq_ctrl;
+ struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, rx_queue_id);
int ret = 0;
- rxq_ctrl = mlx5_rxq_get(dev, rx_queue_id);
- if (!rxq_ctrl) {
+ if (!rxq) {
rte_errno = EINVAL;
return -rte_errno;
}
- if (!rxq_ctrl->obj)
+ if (!rxq->ctrl->obj)
goto error;
- if (rxq_ctrl->irq) {
- ret = priv->obj_ops.rxq_event_get(rxq_ctrl->obj);
+ if (rxq->ctrl->irq) {
+ ret = priv->obj_ops.rxq_event_get(rxq->ctrl->obj);
if (ret < 0)
goto error;
- rxq_ctrl->rxq.cq_arm_sn++;
+ rxq->ctrl->rxq.cq_arm_sn++;
}
- mlx5_rxq_release(dev, rx_queue_id);
return 0;
error:
/**
rte_errno = errno;
else
rte_errno = EINVAL;
- ret = rte_errno; /* Save rte_errno before cleanup. */
- mlx5_rxq_release(dev, rx_queue_id);
- if (ret != EAGAIN)
+ if (rte_errno != EAGAIN)
DRV_LOG(WARNING, "port %u unable to disable interrupt on Rx queue %d",
dev->data->port_id, rx_queue_id);
- rte_errno = ret; /* Restore rte_errno. */
return -rte_errno;
}
struct mlx5_rxq_obj *rxq_obj;
LIST_FOREACH(rxq_obj, &priv->rxqsobj, next) {
+ if (rxq_obj->rxq_ctrl->rxq.shared &&
+ !LIST_EMPTY(&rxq_obj->rxq_ctrl->owners))
+ continue;
DRV_LOG(DEBUG, "port %u Rx queue %u still referenced",
dev->data->port_id, rxq_obj->rxq_ctrl->rxq.idx);
++ret;
rte_mempool_free(mp);
/* Unset mempool for each Rx queue. */
for (i = 0; i != priv->rxqs_n; ++i) {
- struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
+ struct mlx5_rxq_data *rxq = mlx5_rxq_data_get(dev, i);
if (rxq == NULL)
continue;
return 0;
/* Count the total number of descriptors configured. */
for (i = 0; i != priv->rxqs_n; ++i) {
- struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
- struct mlx5_rxq_ctrl *rxq_ctrl = container_of
- (rxq, struct mlx5_rxq_ctrl, rxq);
+ struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, i);
+ struct mlx5_rxq_data *rxq;
- if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
+ if (rxq_ctrl == NULL ||
+ rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
continue;
+ rxq = &rxq_ctrl->rxq;
n_ibv++;
desc += 1 << rxq->elts_n;
/* Get the max number of strides. */
rte_errno = ENOMEM;
return -rte_errno;
}
- ret = mlx5_mr_mempool_register(&priv->sh->cdev->mr_scache,
- priv->sh->cdev->pd, mp, &priv->mp_id);
+ ret = mlx5_mr_mempool_register(priv->sh->cdev, mp, false);
if (ret < 0 && rte_errno != EEXIST) {
ret = rte_errno;
DRV_LOG(ERR, "port %u failed to register a mempool for Multi-Packet RQ",
exit:
/* Set mempool for each Rx queue. */
for (i = 0; i != priv->rxqs_n; ++i) {
- struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
- struct mlx5_rxq_ctrl *rxq_ctrl = container_of
- (rxq, struct mlx5_rxq_ctrl, rxq);
+ struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, i);
- if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
+ if (rxq_ctrl == NULL ||
+ rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
continue;
- rxq->mprq_mp = mp;
+ rxq_ctrl->rxq.mprq_mp = mp;
}
DRV_LOG(INFO, "port %u Multi-Packet RQ is configured",
dev->data->port_id);
*
* @param dev
* Pointer to Ethernet device.
- * @param idx
- * RX queue index.
+ * @param rxq
+ * RX queue private data.
* @param desc
* Number of descriptors to configure in queue.
* @param socket
* A DPDK queue object on success, NULL otherwise and rte_errno is set.
*/
struct mlx5_rxq_ctrl *
-mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+mlx5_rxq_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq,
+ uint16_t desc,
unsigned int socket, const struct rte_eth_rxconf *conf,
const struct rte_eth_rxseg_split *rx_seg, uint16_t n_seg)
{
+ uint16_t idx = rxq->idx;
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rxq_ctrl *tmpl;
unsigned int mb_len = rte_pktmbuf_data_room_size(rx_seg[0].mp);
rte_errno = ENOMEM;
return NULL;
}
+ LIST_INIT(&tmpl->owners);
+ if (conf->share_group > 0) {
+ tmpl->rxq.shared = 1;
+ tmpl->share_group = conf->share_group;
+ tmpl->share_qid = conf->share_qid;
+ LIST_INSERT_HEAD(&priv->sh->shared_rxqs, tmpl, share_entry);
+ }
+ rxq->ctrl = tmpl;
+ LIST_INSERT_HEAD(&tmpl->owners, rxq, owner_entry);
MLX5_ASSERT(n_seg && n_seg <= MLX5_MAX_RXQ_NSEG);
/*
* Build the array of actual buffer offsets and lengths.
tmpl->rxq.rss_hash = !!priv->rss_conf.rss_hf &&
(!!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS));
tmpl->rxq.port_id = dev->data->port_id;
- tmpl->priv = priv;
+ tmpl->sh = priv->sh;
tmpl->rxq.mp = rx_seg[0].mp;
tmpl->rxq.elts_n = log2above(desc);
tmpl->rxq.rq_repl_thresh =
(struct rte_mbuf *(*)[desc_n])(tmpl + 1);
tmpl->rxq.mprq_bufs =
(struct mlx5_mprq_buf *(*)[desc])(*tmpl->rxq.elts + desc_n);
-#ifndef RTE_ARCH_64
- tmpl->rxq.uar_lock_cq = &priv->sh->uar_lock_cq;
-#endif
tmpl->rxq.idx = idx;
- __atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
return tmpl;
error:
*
* @param dev
* Pointer to Ethernet device.
- * @param idx
- * RX queue index.
+ * @param rxq
+ * RX queue.
* @param desc
* Number of descriptors to configure in queue.
* @param hairpin_conf
* A DPDK queue object on success, NULL otherwise and rte_errno is set.
*/
struct mlx5_rxq_ctrl *
-mlx5_rxq_hairpin_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+mlx5_rxq_hairpin_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq,
+ uint16_t desc,
const struct rte_eth_hairpin_conf *hairpin_conf)
{
+ uint16_t idx = rxq->idx;
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rxq_ctrl *tmpl;
rte_errno = ENOMEM;
return NULL;
}
+ LIST_INIT(&tmpl->owners);
+ rxq->ctrl = tmpl;
+ LIST_INSERT_HEAD(&tmpl->owners, rxq, owner_entry);
tmpl->type = MLX5_RXQ_TYPE_HAIRPIN;
tmpl->socket = SOCKET_ID_ANY;
tmpl->rxq.rss_hash = 0;
tmpl->rxq.port_id = dev->data->port_id;
- tmpl->priv = priv;
+ tmpl->sh = priv->sh;
tmpl->rxq.mp = NULL;
tmpl->rxq.elts_n = log2above(desc);
tmpl->rxq.elts = NULL;
tmpl->rxq.mr_ctrl.cache_bh = (struct mlx5_mr_btree) { 0 };
- tmpl->hairpin_conf = *hairpin_conf;
tmpl->rxq.idx = idx;
- __atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
+ rxq->hairpin_conf = *hairpin_conf;
+ mlx5_rxq_ref(dev, idx);
LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
return tmpl;
}
+/**
+ * Increase Rx queue reference count.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param idx
+ * RX queue index.
+ *
+ * @return
+ * A pointer to the queue if it exists, NULL otherwise.
+ */
+struct mlx5_rxq_priv *
+mlx5_rxq_ref(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
+
+ if (rxq != NULL)
+ __atomic_fetch_add(&rxq->refcnt, 1, __ATOMIC_RELAXED);
+ return rxq;
+}
+
+/**
+ * Dereference a Rx queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param idx
+ * RX queue index.
+ *
+ * @return
+ * Updated reference count.
+ */
+uint32_t
+mlx5_rxq_deref(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
+
+ if (rxq == NULL)
+ return 0;
+ return __atomic_sub_fetch(&rxq->refcnt, 1, __ATOMIC_RELAXED);
+}
+
/**
* Get a Rx queue.
*
* @return
* A pointer to the queue if it exists, NULL otherwise.
*/
-struct mlx5_rxq_ctrl *
+struct mlx5_rxq_priv *
mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
- struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
- if (rxq_data) {
- rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
- __atomic_fetch_add(&rxq_ctrl->refcnt, 1, __ATOMIC_RELAXED);
- }
- return rxq_ctrl;
+ MLX5_ASSERT(priv->rxq_privs != NULL);
+ return (*priv->rxq_privs)[idx];
+}
+
+/**
+ * Get Rx queue shareable control.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param idx
+ * RX queue index.
+ *
+ * @return
+ * A pointer to the queue control if it exists, NULL otherwise.
+ */
+struct mlx5_rxq_ctrl *
+mlx5_rxq_ctrl_get(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
+
+ return rxq == NULL ? NULL : rxq->ctrl;
+}
+
+/**
+ * Get Rx queue shareable data.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param idx
+ * RX queue index.
+ *
+ * @return
+ * A pointer to the queue data if it exists, NULL otherwise.
+ */
+struct mlx5_rxq_data *
+mlx5_rxq_data_get(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
+
+ return rxq == NULL ? NULL : &rxq->ctrl->rxq;
}
/**
mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_rxq_priv *rxq;
struct mlx5_rxq_ctrl *rxq_ctrl;
+ uint32_t refcnt;
- if (priv->rxqs == NULL || (*priv->rxqs)[idx] == NULL)
+ if (priv->rxq_privs == NULL)
+ return 0;
+ rxq = mlx5_rxq_get(dev, idx);
+ if (rxq == NULL || rxq->refcnt == 0)
return 0;
- rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
- if (__atomic_sub_fetch(&rxq_ctrl->refcnt, 1, __ATOMIC_RELAXED) > 1)
+ rxq_ctrl = rxq->ctrl;
+ refcnt = mlx5_rxq_deref(dev, idx);
+ if (refcnt > 1) {
return 1;
- if (rxq_ctrl->obj) {
- priv->obj_ops.rxq_obj_release(rxq_ctrl->obj);
- LIST_REMOVE(rxq_ctrl->obj, next);
- mlx5_free(rxq_ctrl->obj);
- rxq_ctrl->obj = NULL;
- }
- if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {
- rxq_free_elts(rxq_ctrl);
- dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
- }
- if (!__atomic_load_n(&rxq_ctrl->refcnt, __ATOMIC_RELAXED)) {
- if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD)
- mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
- LIST_REMOVE(rxq_ctrl, next);
- mlx5_free(rxq_ctrl);
- (*priv->rxqs)[idx] = NULL;
+ } else if (refcnt == 1) { /* RxQ stopped. */
+ priv->obj_ops.rxq_obj_release(rxq);
+ if (!rxq_ctrl->started && rxq_ctrl->obj != NULL) {
+ LIST_REMOVE(rxq_ctrl->obj, next);
+ mlx5_free(rxq_ctrl->obj);
+ rxq_ctrl->obj = NULL;
+ }
+ if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {
+ if (!rxq_ctrl->started)
+ rxq_free_elts(rxq_ctrl);
+ dev->data->rx_queue_state[idx] =
+ RTE_ETH_QUEUE_STATE_STOPPED;
+ }
+ } else { /* Refcnt zero, closing device. */
+ LIST_REMOVE(rxq, owner_entry);
+ if (LIST_EMPTY(&rxq_ctrl->owners)) {
+ if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD)
+ mlx5_mr_btree_free
+ (&rxq_ctrl->rxq.mr_ctrl.cache_bh);
+ if (rxq_ctrl->rxq.shared)
+ LIST_REMOVE(rxq_ctrl, share_entry);
+ LIST_REMOVE(rxq_ctrl, next);
+ mlx5_free(rxq_ctrl);
+ }
+ dev->data->rx_queues[idx] = NULL;
+ mlx5_free(rxq);
+ (*priv->rxq_privs)[idx] = NULL;
}
return 0;
}
mlx5_rxq_get_type(struct rte_eth_dev *dev, uint16_t idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
+ struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, idx);
- if (idx < priv->rxqs_n && (*priv->rxqs)[idx]) {
- rxq_ctrl = container_of((*priv->rxqs)[idx],
- struct mlx5_rxq_ctrl,
- rxq);
+ if (idx < priv->rxqs_n && rxq_ctrl != NULL)
return rxq_ctrl->type;
- }
return MLX5_RXQ_TYPE_UNDEFINED;
}
mlx5_rxq_get_hairpin_conf(struct rte_eth_dev *dev, uint16_t idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
+ struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
- if (idx < priv->rxqs_n && (*priv->rxqs)[idx]) {
- rxq_ctrl = container_of((*priv->rxqs)[idx],
- struct mlx5_rxq_ctrl,
- rxq);
- if (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN)
- return &rxq_ctrl->hairpin_conf;
+ if (idx < priv->rxqs_n && rxq != NULL) {
+ if (rxq->ctrl->type == MLX5_RXQ_TYPE_HAIRPIN)
+ return &rxq->hairpin_conf;
}
return NULL;
}
* Indirection table to release.
* @param standalone
* Indirection table for Standalone queue.
+ * @param deref_rxqs
+ * If true, then dereference RX queues related to indirection table.
+ * Otherwise, no additional action will be taken.
*
* @return
* 1 while a reference on it exists, 0 when freed.
int
mlx5_ind_table_obj_release(struct rte_eth_dev *dev,
struct mlx5_ind_table_obj *ind_tbl,
- bool standalone)
+ bool standalone,
+ bool deref_rxqs)
{
struct mlx5_priv *priv = dev->data->dev_private;
unsigned int i, ret;
if (ret)
return 1;
priv->obj_ops.ind_table_destroy(ind_tbl);
- for (i = 0; i != ind_tbl->queues_n; ++i)
- claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i]));
+ if (deref_rxqs) {
+ for (i = 0; i != ind_tbl->queues_n; ++i)
+ claim_nonzero(mlx5_rxq_deref(dev, ind_tbl->queues[i]));
+ }
mlx5_free(ind_tbl);
return 0;
}
log2above(priv->config.ind_table_max_size);
for (i = 0; i != queues_n; ++i) {
- if (!mlx5_rxq_get(dev, queues[i])) {
+ if (mlx5_rxq_ref(dev, queues[i]) == NULL) {
ret = -rte_errno;
goto error;
}
error:
err = rte_errno;
for (j = 0; j < i; j++)
- mlx5_rxq_release(dev, ind_tbl->queues[j]);
+ mlx5_rxq_deref(dev, ind_tbl->queues[j]);
rte_errno = err;
DRV_LOG(DEBUG, "Port %u cannot setup indirection table.",
dev->data->port_id);
bool standalone)
{
struct mlx5_priv *priv = dev->data->dev_private;
- unsigned int i, j;
+ unsigned int i;
int ret = 0, err;
const unsigned int n = rte_is_power_of_2(queues_n) ?
log2above(queues_n) :
ret = priv->obj_ops.ind_table_modify(dev, n, queues, queues_n, ind_tbl);
if (ret)
goto error;
- for (j = 0; j < ind_tbl->queues_n; j++)
- mlx5_rxq_release(dev, ind_tbl->queues[j]);
ind_tbl->queues_n = queues_n;
ind_tbl->queues = queues;
return 0;
error:
err = rte_errno;
- for (j = 0; j < i; j++)
- mlx5_rxq_release(dev, queues[j]);
rte_errno = err;
DRV_LOG(DEBUG, "Port %u cannot setup indirection table.",
dev->data->port_id);
return ret;
}
for (i = 0; i < ind_tbl->queues_n; i++)
- mlx5_rxq_get(dev, ind_tbl->queues[i]);
+ mlx5_rxq_ref(dev, ind_tbl->queues[i]);
return 0;
}
if (ind_tbl != hrxq->ind_table) {
MLX5_ASSERT(!hrxq->standalone);
mlx5_ind_table_obj_release(dev, hrxq->ind_table,
- hrxq->standalone);
+ hrxq->standalone, true);
hrxq->ind_table = ind_tbl;
}
hrxq->hash_fields = hash_fields;
err = rte_errno;
if (ind_tbl != hrxq->ind_table) {
MLX5_ASSERT(!hrxq->standalone);
- mlx5_ind_table_obj_release(dev, ind_tbl, hrxq->standalone);
+ mlx5_ind_table_obj_release(dev, ind_tbl, hrxq->standalone,
+ true);
}
rte_errno = err;
return -rte_errno;
priv->obj_ops.hrxq_destroy(hrxq);
if (!hrxq->standalone) {
mlx5_ind_table_obj_release(dev, hrxq->ind_table,
- hrxq->standalone);
+ hrxq->standalone, true);
}
mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq->idx);
}
return hrxq;
error:
if (!rss_desc->ind_tbl)
- mlx5_ind_table_obj_release(dev, ind_tbl, standalone);
+ mlx5_ind_table_obj_release(dev, ind_tbl, standalone, true);
if (hrxq)
mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
return NULL;
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_dev_ctx_shared *sh = priv->sh;
- struct mlx5_rxq_data *data;
unsigned int i;
for (i = 0; i != priv->rxqs_n; ++i) {
- if (!(*priv->rxqs)[i])
+ struct mlx5_rxq_data *data = mlx5_rxq_data_get(dev, i);
+
+ if (data == NULL)
continue;
- data = (*priv->rxqs)[i];
data->sh = sh;
data->rt_timestamp = priv->config.rt_timestamp;
}