#include <rte_mbuf.h>
#include <rte_malloc.h>
-#include <rte_ethdev_driver.h>
+#include <ethdev_driver.h>
#include <rte_common.h>
#include <rte_interrupts.h>
#include <rte_debug.h>
#include <mlx5_glue.h>
#include <mlx5_malloc.h>
+#include <mlx5_common.h>
+#include <mlx5_common_mr.h>
#include "mlx5_defs.h"
#include "mlx5.h"
-#include "mlx5_rxtx.h"
+#include "mlx5_rx.h"
#include "mlx5_utils.h"
#include "mlx5_autoconf.h"
+#include "mlx5_devx.h"
/* Default RSS hash key also used for ConnectX-3. */
(unsigned int)sizeof(rss_hash_default_key),
"wrong RSS default key size.");
-/**
- * Check whether Multi-Packet RQ can be enabled for the device.
- *
- * @param dev
- * Pointer to Ethernet device.
- *
- * @return
- * 1 if supported, negative errno value if not.
- */
-inline int
-mlx5_check_mprq_support(struct rte_eth_dev *dev)
-{
- struct mlx5_priv *priv = dev->data->dev_private;
-
- if (priv->config.mprq.enabled &&
- priv->rxqs_n >= priv->config.mprq.min_rxqs_num)
- return 1;
- return -ENOTSUP;
-}
-
-/**
- * Check whether Multi-Packet RQ is enabled for the Rx queue.
- *
- * @param rxq
- * Pointer to receive queue structure.
- *
- * @return
- * 0 if disabled, otherwise enabled.
- */
-inline int
-mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq)
-{
- return rxq->strd_num_n > 0;
-}
-
-/**
- * Check whether Multi-Packet RQ is enabled for the device.
- *
- * @param dev
- * Pointer to Ethernet device.
- *
- * @return
- * 0 if disabled, otherwise enabled.
- */
-inline int
-mlx5_mprq_enabled(struct rte_eth_dev *dev)
-{
- struct mlx5_priv *priv = dev->data->dev_private;
- uint32_t i;
- uint16_t n = 0;
- uint16_t n_ibv = 0;
-
- if (mlx5_check_mprq_support(dev) < 0)
- return 0;
- /* All the configured queues should be enabled. */
- for (i = 0; i < priv->rxqs_n; ++i) {
- struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
- struct mlx5_rxq_ctrl *rxq_ctrl = container_of
- (rxq, struct mlx5_rxq_ctrl, rxq);
-
- if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
- continue;
- n_ibv++;
- if (mlx5_rxq_mprq_enabled(rxq))
- ++n;
- }
- /* Multi-Packet RQ can't be partially configured. */
- MLX5_ASSERT(n == 0 || n == n_ibv);
- return n == n_ibv;
-}
-
/**
* Calculate the number of CQEs in CQ for the Rx queue.
*
unsigned int wqe_n = 1 << rxq_data->elts_n;
if (mlx5_rxq_mprq_enabled(rxq_data))
- cqe_n = wqe_n * (1 << rxq_data->strd_num_n) - 1;
+ cqe_n = wqe_n * RTE_BIT32(rxq_data->log_strd_num) - 1;
else
cqe_n = wqe_n - 1;
return cqe_n;
rxq->mprq_repl = buf;
}
DRV_LOG(DEBUG,
- "port %u Rx queue %u allocated and configured %u segments",
+ "port %u MPRQ queue %u allocated and configured %u segments",
rxq->port_id, rxq->idx, wqe_n);
return 0;
error:
(*rxq->mprq_bufs)[i]);
(*rxq->mprq_bufs)[i] = NULL;
}
- DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything",
+ DRV_LOG(DEBUG, "port %u MPRQ queue %u failed, freed everything",
rxq->port_id, rxq->idx);
rte_errno = err; /* Restore rte_errno. */
return -rte_errno;
* Pointer to RX queue structure.
*
* @return
- * 0 on success, errno value on failure.
+ * 0 on success, negative errno value on failure.
*/
static int
rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
{
const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n;
- unsigned int elts_n = 1 << rxq_ctrl->rxq.elts_n;
+ unsigned int elts_n = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
+ RTE_BIT32(rxq_ctrl->rxq.elts_n) *
+ RTE_BIT32(rxq_ctrl->rxq.log_strd_num) :
+ RTE_BIT32(rxq_ctrl->rxq.elts_n);
+ bool has_vec_support = mlx5_rxq_check_vec_support(&rxq_ctrl->rxq) > 0;
unsigned int i;
int err;
/* Iterate on segments. */
for (i = 0; (i != elts_n); ++i) {
+ struct mlx5_eth_rxseg *seg = &rxq_ctrl->rxq.rxseg[i % sges_n];
struct rte_mbuf *buf;
- buf = rte_pktmbuf_alloc(rxq_ctrl->rxq.mp);
+ buf = rte_pktmbuf_alloc(seg->mp);
if (buf == NULL) {
- DRV_LOG(ERR, "port %u empty mbuf pool",
- PORT_ID(rxq_ctrl->priv));
+ if (rxq_ctrl->share_group == 0)
+ DRV_LOG(ERR, "port %u queue %u empty mbuf pool",
+ RXQ_PORT_ID(rxq_ctrl),
+ rxq_ctrl->rxq.idx);
+ else
+ DRV_LOG(ERR, "share group %u queue %u empty mbuf pool",
+ rxq_ctrl->share_group,
+ rxq_ctrl->share_qid);
rte_errno = ENOMEM;
goto error;
}
- /* Headroom is reserved by rte_pktmbuf_alloc(). */
- MLX5_ASSERT(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM);
+ /* Only vectored Rx routines rely on headroom size. */
+ MLX5_ASSERT(!has_vec_support ||
+ DATA_OFF(buf) >= RTE_PKTMBUF_HEADROOM);
/* Buffer is supposed to be empty. */
MLX5_ASSERT(rte_pktmbuf_data_len(buf) == 0);
MLX5_ASSERT(rte_pktmbuf_pkt_len(buf) == 0);
MLX5_ASSERT(!buf->next);
- /* Only the first segment keeps headroom. */
- if (i % sges_n)
- SET_DATA_OFF(buf, 0);
+ SET_DATA_OFF(buf, seg->offset);
PORT(buf) = rxq_ctrl->rxq.port_id;
- DATA_LEN(buf) = rte_pktmbuf_tailroom(buf);
- PKT_LEN(buf) = DATA_LEN(buf);
+ DATA_LEN(buf) = seg->length;
+ PKT_LEN(buf) = seg->length;
NB_SEGS(buf) = 1;
(*rxq_ctrl->rxq.elts)[i] = buf;
}
/* If Rx vector is activated. */
- if (mlx5_rxq_check_vec_support(&rxq_ctrl->rxq) > 0) {
+ if (has_vec_support) {
struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
struct rte_mbuf *mbuf_init = &rxq->fake_mbuf;
struct rte_pktmbuf_pool_private *priv =
mbuf_init->data_off = RTE_PKTMBUF_HEADROOM;
rte_mbuf_refcnt_set(mbuf_init, 1);
mbuf_init->nb_segs = 1;
- mbuf_init->port = rxq->port_id;
+ /* For shared queues port is provided in CQE */
+ mbuf_init->port = rxq->shared ? 0 : rxq->port_id;
if (priv->flags & RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF)
- mbuf_init->ol_flags = EXT_ATTACHED_MBUF;
+ mbuf_init->ol_flags = RTE_MBUF_F_EXTERNAL;
/*
* prevent compiler reordering:
* rearm_data covers previous fields.
for (j = 0; j < MLX5_VPMD_DESCS_PER_LOOP; ++j)
(*rxq->elts)[elts_n + j] = &rxq->fake_mbuf;
}
- DRV_LOG(DEBUG,
- "port %u Rx queue %u allocated and configured %u segments"
- " (max %u packets)",
- PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx, elts_n,
- elts_n / (1 << rxq_ctrl->rxq.sges_n));
+ if (rxq_ctrl->share_group == 0)
+ DRV_LOG(DEBUG,
+ "port %u SPRQ queue %u allocated and configured %u segments (max %u packets)",
+ RXQ_PORT_ID(rxq_ctrl), rxq_ctrl->rxq.idx, elts_n,
+ elts_n / (1 << rxq_ctrl->rxq.sges_n));
+ else
+ DRV_LOG(DEBUG,
+ "share group %u SPRQ queue %u allocated and configured %u segments (max %u packets)",
+ rxq_ctrl->share_group, rxq_ctrl->share_qid, elts_n,
+ elts_n / (1 << rxq_ctrl->rxq.sges_n));
return 0;
error:
err = rte_errno; /* Save rte_errno before cleanup. */
rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]);
(*rxq_ctrl->rxq.elts)[i] = NULL;
}
- DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything",
- PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx);
+ if (rxq_ctrl->share_group == 0)
+ DRV_LOG(DEBUG, "port %u SPRQ queue %u failed, freed everything",
+ RXQ_PORT_ID(rxq_ctrl), rxq_ctrl->rxq.idx);
+ else
+ DRV_LOG(DEBUG, "share group %u SPRQ queue %u failed, freed everything",
+ rxq_ctrl->share_group, rxq_ctrl->share_qid);
rte_errno = err; /* Restore rte_errno. */
return -rte_errno;
}
* Pointer to RX queue structure.
*
* @return
- * 0 on success, errno value on failure.
+ * 0 on success, negative errno value on failure.
*/
int
rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
{
- return mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
- rxq_alloc_elts_mprq(rxq_ctrl) : rxq_alloc_elts_sprq(rxq_ctrl);
+ int ret = 0;
+
+ /**
+ * For MPRQ we need to allocate both MPRQ buffers
+ * for WQEs and simple mbufs for vector processing.
+ */
+ if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq))
+ ret = rxq_alloc_elts_mprq(rxq_ctrl);
+ if (ret == 0)
+ ret = rxq_alloc_elts_sprq(rxq_ctrl);
+ return ret;
}
/**
struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
uint16_t i;
- DRV_LOG(DEBUG, "port %u Multi-Packet Rx queue %u freeing WRs",
- rxq->port_id, rxq->idx);
+ DRV_LOG(DEBUG, "port %u Multi-Packet Rx queue %u freeing %d WRs",
+ rxq->port_id, rxq->idx, (1u << rxq->elts_n));
if (rxq->mprq_bufs == NULL)
return;
- MLX5_ASSERT(mlx5_rxq_check_vec_support(rxq) < 0);
for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
if ((*rxq->mprq_bufs)[i] != NULL)
mlx5_mprq_buf_free((*rxq->mprq_bufs)[i]);
rxq_free_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
{
struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
- const uint16_t q_n = (1 << rxq->elts_n);
+ const uint16_t q_n = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
+ RTE_BIT32(rxq->elts_n) * RTE_BIT32(rxq->log_strd_num) :
+ RTE_BIT32(rxq->elts_n);
const uint16_t q_mask = q_n - 1;
- uint16_t used = q_n - (rxq->rq_ci - rxq->rq_pi);
+ uint16_t elts_ci = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
+ rxq->elts_ci : rxq->rq_ci;
+ uint16_t used = q_n - (elts_ci - rxq->rq_pi);
uint16_t i;
- DRV_LOG(DEBUG, "port %u Rx queue %u freeing WRs",
- PORT_ID(rxq_ctrl->priv), rxq->idx);
+ if (rxq_ctrl->share_group == 0)
+ DRV_LOG(DEBUG, "port %u Rx queue %u freeing %d WRs",
+ RXQ_PORT_ID(rxq_ctrl), rxq->idx, q_n);
+ else
+ DRV_LOG(DEBUG, "share group %u Rx queue %u freeing %d WRs",
+ rxq_ctrl->share_group, rxq_ctrl->share_qid, q_n);
if (rxq->elts == NULL)
return;
/**
- * Some mbuf in the Ring belongs to the application. They cannot be
- * freed.
+ * Some mbuf in the Ring belongs to the application.
+ * They cannot be freed.
*/
if (mlx5_rxq_check_vec_support(rxq) > 0) {
for (i = 0; i < used; ++i)
- (*rxq->elts)[(rxq->rq_ci + i) & q_mask] = NULL;
- rxq->rq_pi = rxq->rq_ci;
+ (*rxq->elts)[(elts_ci + i) & q_mask] = NULL;
+ rxq->rq_pi = elts_ci;
}
- for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
+ for (i = 0; i != q_n; ++i) {
if ((*rxq->elts)[i] != NULL)
rte_pktmbuf_free_seg((*rxq->elts)[i]);
(*rxq->elts)[i] = NULL;
static void
rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
{
+ /*
+ * For MPRQ we need to allocate both MPRQ buffers
+ * for WQEs and simple mbufs for vector processing.
+ */
if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq))
rxq_free_elts_mprq(rxq_ctrl);
- else
- rxq_free_elts_sprq(rxq_ctrl);
+ rxq_free_elts_sprq(rxq_ctrl);
}
/**
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_dev_config *config = &priv->config;
- uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER |
- DEV_RX_OFFLOAD_TIMESTAMP |
- DEV_RX_OFFLOAD_JUMBO_FRAME |
- DEV_RX_OFFLOAD_RSS_HASH);
+ uint64_t offloads = (RTE_ETH_RX_OFFLOAD_SCATTER |
+ RTE_ETH_RX_OFFLOAD_TIMESTAMP |
+ RTE_ETH_RX_OFFLOAD_RSS_HASH);
+ if (!config->mprq.enabled)
+ offloads |= RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT;
if (config->hw_fcs_strip)
- offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
-
+ offloads |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
if (config->hw_csum)
- offloads |= (DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM);
+ offloads |= (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM);
if (config->hw_vlan_strip)
- offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+ offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
if (MLX5_LRO_SUPPORTED(dev))
- offloads |= DEV_RX_OFFLOAD_TCP_LRO;
+ offloads |= RTE_ETH_RX_OFFLOAD_TCP_LRO;
return offloads;
}
uint64_t
mlx5_get_rx_port_offloads(void)
{
- uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
+ uint64_t offloads = RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
return offloads;
}
static int
mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx)
{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_rxq_ctrl *rxq_ctrl;
+ struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
- if (!(*priv->rxqs)[idx]) {
+ if (rxq == NULL) {
rte_errno = EINVAL;
return -rte_errno;
}
- rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
- return (rte_atomic32_read(&rxq_ctrl->refcnt) == 1);
+ return (__atomic_load_n(&rxq->refcnt, __ATOMIC_RELAXED) == 1);
}
/* Fetches and drops all SW-owned and error CQEs to synchronize CQ. */
mlx5_rx_queue_stop_primary(struct rte_eth_dev *dev, uint16_t idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
- struct mlx5_rxq_ctrl *rxq_ctrl =
- container_of(rxq, struct mlx5_rxq_ctrl, rxq);
+ struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
+ struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;
int ret;
+ MLX5_ASSERT(rxq != NULL && rxq_ctrl != NULL);
MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
- ret = priv->obj_ops.rxq_obj_modify(rxq_ctrl->obj, false);
+ ret = priv->obj_ops.rxq_obj_modify(rxq, MLX5_RXQ_MOD_RDY2RST);
if (ret) {
DRV_LOG(ERR, "Cannot change Rx WQ state to RESET: %s",
strerror(errno));
return ret;
}
/* Remove all processes CQEs. */
- rxq_sync_cq(rxq);
+ rxq_sync_cq(&rxq_ctrl->rxq);
/* Free all involved mbufs. */
rxq_free_elts(rxq_ctrl);
/* Set the actual queue state. */
mlx5_rx_queue_start_primary(struct rte_eth_dev *dev, uint16_t idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
- struct mlx5_rxq_ctrl *rxq_ctrl =
- container_of(rxq, struct mlx5_rxq_ctrl, rxq);
+ struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
+ struct mlx5_rxq_data *rxq_data = &rxq->ctrl->rxq;
int ret;
- MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
+ MLX5_ASSERT(rxq != NULL && rxq->ctrl != NULL);
+ MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
/* Allocate needed buffers. */
- ret = rxq_alloc_elts(rxq_ctrl);
+ ret = rxq_alloc_elts(rxq->ctrl);
if (ret) {
DRV_LOG(ERR, "Cannot reallocate buffers for Rx WQ");
rte_errno = errno;
return ret;
}
rte_io_wmb();
- *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
+ *rxq_data->cq_db = rte_cpu_to_be_32(rxq_data->cq_ci);
rte_io_wmb();
- /* Reset RQ consumer before moving queue ro READY state. */
- *rxq->rq_db = rte_cpu_to_be_32(0);
+ /* Reset RQ consumer before moving queue to READY state. */
+ *rxq_data->rq_db = rte_cpu_to_be_32(0);
rte_io_wmb();
- ret = priv->obj_ops.rxq_obj_modify(rxq_ctrl->obj, true);
+ ret = priv->obj_ops.rxq_obj_modify(rxq, MLX5_RXQ_MOD_RST2RDY);
if (ret) {
DRV_LOG(ERR, "Cannot change Rx WQ state to READY: %s",
strerror(errno));
return ret;
}
/* Reinitialize RQ - set WQEs. */
- mlx5_rxq_initialize(rxq);
- rxq->err_state = MLX5_RXQ_ERR_STATE_NO_ERROR;
+ mlx5_rxq_initialize(rxq_data);
+ rxq_data->err_state = MLX5_RXQ_ERR_STATE_NO_ERROR;
/* Set actual queue state. */
dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
return 0;
* RX queue index.
* @param desc
* Number of descriptors to configure in queue.
+ * @param[out] rxq_ctrl
+ * Address of pointer to shared Rx queue control.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-mlx5_rx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t *desc)
+mlx5_rx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t *desc,
+ struct mlx5_rxq_ctrl **rxq_ctrl)
{
struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_rxq_priv *rxq;
+ bool empty;
if (!rte_is_power_of_2(*desc)) {
*desc = 1 << log2above(*desc);
rte_errno = EOVERFLOW;
return -rte_errno;
}
- if (!mlx5_rxq_releasable(dev, idx)) {
- DRV_LOG(ERR, "port %u unable to release queue index %u",
- dev->data->port_id, idx);
- rte_errno = EBUSY;
- return -rte_errno;
+ if (rxq_ctrl == NULL || *rxq_ctrl == NULL)
+ return 0;
+ if (!(*rxq_ctrl)->rxq.shared) {
+ if (!mlx5_rxq_releasable(dev, idx)) {
+ DRV_LOG(ERR, "port %u unable to release queue index %u",
+ dev->data->port_id, idx);
+ rte_errno = EBUSY;
+ return -rte_errno;
+ }
+ mlx5_rxq_release(dev, idx);
+ } else {
+ if ((*rxq_ctrl)->obj != NULL)
+ /* Some port using shared Rx queue has been started. */
+ return 0;
+ /* Release all owner RxQ to reconfigure Shared RxQ. */
+ do {
+ rxq = LIST_FIRST(&(*rxq_ctrl)->owners);
+ LIST_REMOVE(rxq, owner_entry);
+ empty = LIST_EMPTY(&(*rxq_ctrl)->owners);
+ mlx5_rxq_release(ETH_DEV(rxq->priv), rxq->idx);
+ } while (!empty);
+ *rxq_ctrl = NULL;
}
- mlx5_rxq_release(dev, idx);
return 0;
}
+/**
+ * Get the shared Rx queue object that matches group and queue index.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param group
+ * Shared RXQ group.
+ * @param share_qid
+ * Shared RX queue index.
+ *
+ * @return
+ * Shared RXQ object that matching, or NULL if not found.
+ */
+static struct mlx5_rxq_ctrl *
+mlx5_shared_rxq_get(struct rte_eth_dev *dev, uint32_t group, uint16_t share_qid)
+{
+ struct mlx5_rxq_ctrl *rxq_ctrl;
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ LIST_FOREACH(rxq_ctrl, &priv->sh->shared_rxqs, share_entry) {
+ if (rxq_ctrl->share_group == group &&
+ rxq_ctrl->share_qid == share_qid)
+ return rxq_ctrl;
+ }
+ return NULL;
+}
+
+/**
+ * Check whether requested Rx queue configuration matches shared RXQ.
+ *
+ * @param rxq_ctrl
+ * Pointer to shared RXQ.
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param idx
+ * Queue index.
+ * @param desc
+ * Number of descriptors to configure in queue.
+ * @param socket
+ * NUMA socket on which memory must be allocated.
+ * @param[in] conf
+ * Thresholds parameters.
+ * @param mp
+ * Memory pool for buffer allocations.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static bool
+mlx5_shared_rxq_match(struct mlx5_rxq_ctrl *rxq_ctrl, struct rte_eth_dev *dev,
+ uint16_t idx, uint16_t desc, unsigned int socket,
+ const struct rte_eth_rxconf *conf,
+ struct rte_mempool *mp)
+{
+ struct mlx5_priv *spriv = LIST_FIRST(&rxq_ctrl->owners)->priv;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ unsigned int i;
+
+ RTE_SET_USED(conf);
+ if (rxq_ctrl->socket != socket) {
+ DRV_LOG(ERR, "port %u queue index %u failed to join shared group: socket mismatch",
+ dev->data->port_id, idx);
+ return false;
+ }
+ if (rxq_ctrl->rxq.elts_n != log2above(desc)) {
+ DRV_LOG(ERR, "port %u queue index %u failed to join shared group: descriptor number mismatch",
+ dev->data->port_id, idx);
+ return false;
+ }
+ if (priv->mtu != spriv->mtu) {
+ DRV_LOG(ERR, "port %u queue index %u failed to join shared group: mtu mismatch",
+ dev->data->port_id, idx);
+ return false;
+ }
+ if (priv->dev_data->dev_conf.intr_conf.rxq !=
+ spriv->dev_data->dev_conf.intr_conf.rxq) {
+ DRV_LOG(ERR, "port %u queue index %u failed to join shared group: interrupt mismatch",
+ dev->data->port_id, idx);
+ return false;
+ }
+ if (mp != NULL && rxq_ctrl->rxq.mp != mp) {
+ DRV_LOG(ERR, "port %u queue index %u failed to join shared group: mempool mismatch",
+ dev->data->port_id, idx);
+ return false;
+ } else if (mp == NULL) {
+ if (conf->rx_nseg != rxq_ctrl->rxseg_n) {
+ DRV_LOG(ERR, "port %u queue index %u failed to join shared group: segment number mismatch",
+ dev->data->port_id, idx);
+ return false;
+ }
+ for (i = 0; i < conf->rx_nseg; i++) {
+ if (memcmp(&conf->rx_seg[i].split, &rxq_ctrl->rxseg[i],
+ sizeof(struct rte_eth_rxseg_split))) {
+ DRV_LOG(ERR, "port %u queue index %u failed to join shared group: segment %u configuration mismatch",
+ dev->data->port_id, idx, i);
+ return false;
+ }
+ }
+ }
+ if (priv->config.hw_padding != spriv->config.hw_padding) {
+ DRV_LOG(ERR, "port %u queue index %u failed to join shared group: padding mismatch",
+ dev->data->port_id, idx);
+ return false;
+ }
+ if (priv->config.cqe_comp != spriv->config.cqe_comp ||
+ (priv->config.cqe_comp &&
+ priv->config.cqe_comp_fmt != spriv->config.cqe_comp_fmt)) {
+ DRV_LOG(ERR, "port %u queue index %u failed to join shared group: CQE compression mismatch",
+ dev->data->port_id, idx);
+ return false;
+ }
+ return true;
+}
+
/**
*
* @param dev
struct rte_mempool *mp)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
- struct mlx5_rxq_ctrl *rxq_ctrl =
- container_of(rxq, struct mlx5_rxq_ctrl, rxq);
+ struct mlx5_rxq_priv *rxq;
+ struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
+ struct rte_eth_rxseg_split *rx_seg =
+ (struct rte_eth_rxseg_split *)conf->rx_seg;
+ struct rte_eth_rxseg_split rx_single = {.mp = mp};
+ uint16_t n_seg = conf->rx_nseg;
int res;
+ uint64_t offloads = conf->offloads |
+ dev->data->dev_conf.rxmode.offloads;
- res = mlx5_rx_queue_pre_setup(dev, idx, &desc);
+ if (mp) {
+ /*
+ * The parameters should be checked on rte_eth_dev layer.
+ * If mp is specified it means the compatible configuration
+ * without buffer split feature tuning.
+ */
+ rx_seg = &rx_single;
+ n_seg = 1;
+ }
+ if (n_seg > 1) {
+ /* The offloads should be checked on rte_eth_dev layer. */
+ MLX5_ASSERT(offloads & RTE_ETH_RX_OFFLOAD_SCATTER);
+ if (!(offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) {
+ DRV_LOG(ERR, "port %u queue index %u split "
+ "offload not configured",
+ dev->data->port_id, idx);
+ rte_errno = ENOSPC;
+ return -rte_errno;
+ }
+ MLX5_ASSERT(n_seg < MLX5_MAX_RXQ_NSEG);
+ }
+ if (conf->share_group > 0) {
+ if (!priv->config.hca_attr.mem_rq_rmp) {
+ DRV_LOG(ERR, "port %u queue index %u shared Rx queue not supported by fw",
+ dev->data->port_id, idx);
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ if (priv->obj_ops.rxq_obj_new != devx_obj_ops.rxq_obj_new) {
+ DRV_LOG(ERR, "port %u queue index %u shared Rx queue needs DevX api",
+ dev->data->port_id, idx);
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ if (conf->share_qid >= priv->rxqs_n) {
+ DRV_LOG(ERR, "port %u shared Rx queue index %u > number of Rx queues %u",
+ dev->data->port_id, conf->share_qid,
+ priv->rxqs_n);
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ if (priv->config.mprq.enabled) {
+ DRV_LOG(ERR, "port %u shared Rx queue index %u: not supported when MPRQ enabled",
+ dev->data->port_id, conf->share_qid);
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ /* Try to reuse shared RXQ. */
+ rxq_ctrl = mlx5_shared_rxq_get(dev, conf->share_group,
+ conf->share_qid);
+ if (rxq_ctrl != NULL &&
+ !mlx5_shared_rxq_match(rxq_ctrl, dev, idx, desc, socket,
+ conf, mp)) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ }
+ res = mlx5_rx_queue_pre_setup(dev, idx, &desc, &rxq_ctrl);
if (res)
return res;
- rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, mp);
- if (!rxq_ctrl) {
- DRV_LOG(ERR, "port %u unable to allocate queue index %u",
+ /* Allocate RXQ. */
+ rxq = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*rxq), 0,
+ SOCKET_ID_ANY);
+ if (!rxq) {
+ DRV_LOG(ERR, "port %u unable to allocate rx queue index %u private data",
dev->data->port_id, idx);
rte_errno = ENOMEM;
return -rte_errno;
}
+ rxq->priv = priv;
+ rxq->idx = idx;
+ (*priv->rxq_privs)[idx] = rxq;
+ if (rxq_ctrl != NULL) {
+ /* Join owner list. */
+ LIST_INSERT_HEAD(&rxq_ctrl->owners, rxq, owner_entry);
+ rxq->ctrl = rxq_ctrl;
+ } else {
+ rxq_ctrl = mlx5_rxq_new(dev, rxq, desc, socket, conf, rx_seg,
+ n_seg);
+ if (rxq_ctrl == NULL) {
+ DRV_LOG(ERR, "port %u unable to allocate rx queue index %u",
+ dev->data->port_id, idx);
+ mlx5_free(rxq);
+ (*priv->rxq_privs)[idx] = NULL;
+ rte_errno = ENOMEM;
+ return -rte_errno;
+ }
+ }
+ mlx5_rxq_ref(dev, idx);
DRV_LOG(DEBUG, "port %u adding Rx queue %u to list",
dev->data->port_id, idx);
- (*priv->rxqs)[idx] = &rxq_ctrl->rxq;
+ dev->data->rx_queues[idx] = &rxq_ctrl->rxq;
return 0;
}
const struct rte_eth_hairpin_conf *hairpin_conf)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
- struct mlx5_rxq_ctrl *rxq_ctrl =
- container_of(rxq, struct mlx5_rxq_ctrl, rxq);
+ struct mlx5_rxq_priv *rxq;
+ struct mlx5_rxq_ctrl *rxq_ctrl;
int res;
- res = mlx5_rx_queue_pre_setup(dev, idx, &desc);
+ res = mlx5_rx_queue_pre_setup(dev, idx, &desc, NULL);
if (res)
return res;
- if (hairpin_conf->peer_count != 1 ||
- hairpin_conf->peers[0].port != dev->data->port_id ||
- hairpin_conf->peers[0].queue >= priv->txqs_n) {
- DRV_LOG(ERR, "port %u unable to setup hairpin queue index %u "
- " invalid hairpind configuration", dev->data->port_id,
- idx);
+ if (hairpin_conf->peer_count != 1) {
rte_errno = EINVAL;
+ DRV_LOG(ERR, "port %u unable to setup Rx hairpin queue index %u"
+ " peer count is %u", dev->data->port_id,
+ idx, hairpin_conf->peer_count);
+ return -rte_errno;
+ }
+ if (hairpin_conf->peers[0].port == dev->data->port_id) {
+ if (hairpin_conf->peers[0].queue >= priv->txqs_n) {
+ rte_errno = EINVAL;
+ DRV_LOG(ERR, "port %u unable to setup Rx hairpin queue"
+ " index %u, Tx %u is larger than %u",
+ dev->data->port_id, idx,
+ hairpin_conf->peers[0].queue, priv->txqs_n);
+ return -rte_errno;
+ }
+ } else {
+ if (hairpin_conf->manual_bind == 0 ||
+ hairpin_conf->tx_explicit == 0) {
+ rte_errno = EINVAL;
+ DRV_LOG(ERR, "port %u unable to setup Rx hairpin queue"
+ " index %u peer port %u with attributes %u %u",
+ dev->data->port_id, idx,
+ hairpin_conf->peers[0].port,
+ hairpin_conf->manual_bind,
+ hairpin_conf->tx_explicit);
+ return -rte_errno;
+ }
+ }
+ rxq = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*rxq), 0,
+ SOCKET_ID_ANY);
+ if (!rxq) {
+ DRV_LOG(ERR, "port %u unable to allocate hairpin rx queue index %u private data",
+ dev->data->port_id, idx);
+ rte_errno = ENOMEM;
return -rte_errno;
}
- rxq_ctrl = mlx5_rxq_hairpin_new(dev, idx, desc, hairpin_conf);
+ rxq->priv = priv;
+ rxq->idx = idx;
+ (*priv->rxq_privs)[idx] = rxq;
+ rxq_ctrl = mlx5_rxq_hairpin_new(dev, rxq, desc, hairpin_conf);
if (!rxq_ctrl) {
- DRV_LOG(ERR, "port %u unable to allocate queue index %u",
+ DRV_LOG(ERR, "port %u unable to allocate hairpin queue index %u",
dev->data->port_id, idx);
+ mlx5_free(rxq);
+ (*priv->rxq_privs)[idx] = NULL;
rte_errno = ENOMEM;
return -rte_errno;
}
- DRV_LOG(DEBUG, "port %u adding Rx queue %u to list",
+ DRV_LOG(DEBUG, "port %u adding hairpin Rx queue %u to list",
dev->data->port_id, idx);
- (*priv->rxqs)[idx] = &rxq_ctrl->rxq;
+ dev->data->rx_queues[idx] = &rxq_ctrl->rxq;
return 0;
}
/**
* DPDK callback to release a RX queue.
*
- * @param dpdk_rxq
- * Generic RX queue pointer.
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param qid
+ * Receive queue index.
*/
void
-mlx5_rx_queue_release(void *dpdk_rxq)
+mlx5_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- struct mlx5_rxq_data *rxq = (struct mlx5_rxq_data *)dpdk_rxq;
- struct mlx5_rxq_ctrl *rxq_ctrl;
- struct mlx5_priv *priv;
+ struct mlx5_rxq_data *rxq = dev->data->rx_queues[qid];
if (rxq == NULL)
return;
- rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);
- priv = rxq_ctrl->priv;
- if (!mlx5_rxq_releasable(ETH_DEV(priv), rxq_ctrl->rxq.idx))
+ if (!mlx5_rxq_releasable(dev, qid))
rte_panic("port %u Rx queue %u is still used by a flow and"
- " cannot be removed\n",
- PORT_ID(priv), rxq->idx);
- mlx5_rxq_release(ETH_DEV(priv), rxq_ctrl->rxq.idx);
+ " cannot be removed\n", dev->data->port_id, qid);
+ mlx5_rxq_release(dev, qid);
}
/**
if (!dev->data->dev_conf.intr_conf.rxq)
return 0;
mlx5_rx_intr_vec_disable(dev);
- intr_handle->intr_vec = mlx5_malloc(0,
- n * sizeof(intr_handle->intr_vec[0]),
- 0, SOCKET_ID_ANY);
- if (intr_handle->intr_vec == NULL) {
+ if (rte_intr_vec_list_alloc(intr_handle, NULL, n)) {
DRV_LOG(ERR,
"port %u failed to allocate memory for interrupt"
" vector, Rx interrupts will not be supported",
rte_errno = ENOMEM;
return -rte_errno;
}
- intr_handle->type = RTE_INTR_HANDLE_EXT;
+
+ if (rte_intr_type_set(intr_handle, RTE_INTR_HANDLE_EXT))
+ return -rte_errno;
+
for (i = 0; i != n; ++i) {
/* This rxq obj must not be released in this function. */
- struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_get(dev, i);
- struct mlx5_rxq_obj *rxq_obj = rxq_ctrl ? rxq_ctrl->obj : NULL;
+ struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, i);
+ struct mlx5_rxq_obj *rxq_obj = rxq ? rxq->ctrl->obj : NULL;
int rc;
/* Skip queues that cannot request interrupts. */
if (!rxq_obj || (!rxq_obj->ibv_channel &&
!rxq_obj->devx_channel)) {
/* Use invalid intr_vec[] index to disable entry. */
- intr_handle->intr_vec[i] =
- RTE_INTR_VEC_RXTX_OFFSET +
- RTE_MAX_RXTX_INTR_VEC_ID;
- /* Decrease the rxq_ctrl's refcnt */
- if (rxq_ctrl)
- mlx5_rxq_release(dev, i);
+ if (rte_intr_vec_list_index_set(intr_handle, i,
+ RTE_INTR_VEC_RXTX_OFFSET + RTE_MAX_RXTX_INTR_VEC_ID))
+ return -rte_errno;
continue;
}
+ mlx5_rxq_ref(dev, i);
if (count >= RTE_MAX_RXTX_INTR_VEC_ID) {
DRV_LOG(ERR,
"port %u too many Rx queues for interrupt"
mlx5_rx_intr_vec_disable(dev);
return -rte_errno;
}
- intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count;
- intr_handle->efds[count] = rxq_obj->fd;
+
+ if (rte_intr_vec_list_index_set(intr_handle, i,
+ RTE_INTR_VEC_RXTX_OFFSET + count))
+ return -rte_errno;
+ if (rte_intr_efds_index_set(intr_handle, count,
+ rxq_obj->fd))
+ return -rte_errno;
count++;
}
if (!count)
mlx5_rx_intr_vec_disable(dev);
- else
- intr_handle->nb_efd = count;
+ else if (rte_intr_nb_efd_set(intr_handle, count))
+ return -rte_errno;
return 0;
}
if (!dev->data->dev_conf.intr_conf.rxq)
return;
- if (!intr_handle->intr_vec)
+ if (rte_intr_vec_list_index_get(intr_handle, 0) < 0)
goto free;
for (i = 0; i != n; ++i) {
- if (intr_handle->intr_vec[i] == RTE_INTR_VEC_RXTX_OFFSET +
- RTE_MAX_RXTX_INTR_VEC_ID)
+ if (rte_intr_vec_list_index_get(intr_handle, i) ==
+ RTE_INTR_VEC_RXTX_OFFSET + RTE_MAX_RXTX_INTR_VEC_ID)
continue;
/**
* Need to access directly the queue to release the reference
* kept in mlx5_rx_intr_vec_enable().
*/
- mlx5_rxq_release(dev, i);
+ mlx5_rxq_deref(dev, i);
}
free:
rte_intr_free_epoll_fd(intr_handle);
- if (intr_handle->intr_vec)
- mlx5_free(intr_handle->intr_vec);
- intr_handle->nb_efd = 0;
- intr_handle->intr_vec = NULL;
+
+ rte_intr_vec_list_free(intr_handle);
+
+ rte_intr_nb_efd_set(intr_handle, 0);
}
/**
int sq_n = 0;
uint32_t doorbell_hi;
uint64_t doorbell;
- void *cq_db_reg = (char *)rxq->cq_uar + MLX5_CQ_DOORBELL;
sq_n = sq_n_rxq & MLX5_CQ_SQN_MASK;
doorbell_hi = sq_n << MLX5_CQ_SQN_OFFSET | (rxq->cq_ci & MLX5_CI_MASK);
doorbell = (uint64_t)doorbell_hi << 32;
doorbell |= rxq->cqn;
- rxq->cq_db[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi);
- mlx5_uar_write64(rte_cpu_to_be_64(doorbell),
- cq_db_reg, rxq->uar_lock_cq);
+ mlx5_doorbell_ring(&rxq->uar_data, rte_cpu_to_be_64(doorbell),
+ doorbell_hi, &rxq->cq_db[MLX5_CQ_ARM_DB], 0);
}
/**
int
mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
- struct mlx5_rxq_ctrl *rxq_ctrl;
-
- rxq_ctrl = mlx5_rxq_get(dev, rx_queue_id);
- if (!rxq_ctrl)
+ struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, rx_queue_id);
+ if (!rxq)
goto error;
- if (rxq_ctrl->irq) {
- if (!rxq_ctrl->obj) {
- mlx5_rxq_release(dev, rx_queue_id);
+ if (rxq->ctrl->irq) {
+ if (!rxq->ctrl->obj)
goto error;
- }
- mlx5_arm_cq(&rxq_ctrl->rxq, rxq_ctrl->rxq.cq_arm_sn);
+ mlx5_arm_cq(&rxq->ctrl->rxq, rxq->ctrl->rxq.cq_arm_sn);
}
- mlx5_rxq_release(dev, rx_queue_id);
return 0;
error:
rte_errno = EINVAL;
mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_rxq_ctrl *rxq_ctrl;
+ struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, rx_queue_id);
int ret = 0;
- rxq_ctrl = mlx5_rxq_get(dev, rx_queue_id);
- if (!rxq_ctrl) {
+ if (!rxq) {
rte_errno = EINVAL;
return -rte_errno;
}
- if (!rxq_ctrl->obj)
+ if (!rxq->ctrl->obj)
goto error;
- if (rxq_ctrl->irq) {
- ret = priv->obj_ops.rxq_event_get(rxq_ctrl->obj);
+ if (rxq->ctrl->irq) {
+ ret = priv->obj_ops.rxq_event_get(rxq->ctrl->obj);
if (ret < 0)
goto error;
- rxq_ctrl->rxq.cq_arm_sn++;
+ rxq->ctrl->rxq.cq_arm_sn++;
}
- mlx5_rxq_release(dev, rx_queue_id);
return 0;
error:
/**
rte_errno = errno;
else
rte_errno = EINVAL;
- ret = rte_errno; /* Save rte_errno before cleanup. */
- mlx5_rxq_release(dev, rx_queue_id);
- if (ret != EAGAIN)
+ if (rte_errno != EAGAIN)
DRV_LOG(WARNING, "port %u unable to disable interrupt on Rx queue %d",
dev->data->port_id, rx_queue_id);
- rte_errno = ret; /* Restore rte_errno. */
return -rte_errno;
}
struct mlx5_rxq_obj *rxq_obj;
LIST_FOREACH(rxq_obj, &priv->rxqsobj, next) {
+ if (rxq_obj->rxq_ctrl == NULL)
+ continue;
+ if (rxq_obj->rxq_ctrl->rxq.shared &&
+ !LIST_EMPTY(&rxq_obj->rxq_ctrl->owners))
+ continue;
DRV_LOG(DEBUG, "port %u Rx queue %u still referenced",
dev->data->port_id, rxq_obj->rxq_ctrl->rxq.idx);
++ret;
rte_mempool_free(mp);
/* Unset mempool for each Rx queue. */
for (i = 0; i != priv->rxqs_n; ++i) {
- struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
+ struct mlx5_rxq_data *rxq = mlx5_rxq_data_get(dev, i);
if (rxq == NULL)
continue;
unsigned int buf_len;
unsigned int obj_num;
unsigned int obj_size;
- unsigned int strd_num_n = 0;
- unsigned int strd_sz_n = 0;
+ unsigned int log_strd_num = 0;
+ unsigned int log_strd_sz = 0;
unsigned int i;
unsigned int n_ibv = 0;
+ int ret;
if (!mlx5_mprq_enabled(dev))
return 0;
/* Count the total number of descriptors configured. */
for (i = 0; i != priv->rxqs_n; ++i) {
- struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
- struct mlx5_rxq_ctrl *rxq_ctrl = container_of
- (rxq, struct mlx5_rxq_ctrl, rxq);
+ struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, i);
+ struct mlx5_rxq_data *rxq;
- if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
+ if (rxq_ctrl == NULL ||
+ rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
continue;
+ rxq = &rxq_ctrl->rxq;
n_ibv++;
desc += 1 << rxq->elts_n;
/* Get the max number of strides. */
- if (strd_num_n < rxq->strd_num_n)
- strd_num_n = rxq->strd_num_n;
+ if (log_strd_num < rxq->log_strd_num)
+ log_strd_num = rxq->log_strd_num;
/* Get the max size of a stride. */
- if (strd_sz_n < rxq->strd_sz_n)
- strd_sz_n = rxq->strd_sz_n;
- }
- MLX5_ASSERT(strd_num_n && strd_sz_n);
- buf_len = (1 << strd_num_n) * (1 << strd_sz_n);
- obj_size = sizeof(struct mlx5_mprq_buf) + buf_len + (1 << strd_num_n) *
- sizeof(struct rte_mbuf_ext_shared_info) + RTE_PKTMBUF_HEADROOM;
+ if (log_strd_sz < rxq->log_strd_sz)
+ log_strd_sz = rxq->log_strd_sz;
+ }
+ MLX5_ASSERT(log_strd_num && log_strd_sz);
+ buf_len = RTE_BIT32(log_strd_num) * RTE_BIT32(log_strd_sz);
+ obj_size = sizeof(struct mlx5_mprq_buf) + buf_len +
+ RTE_BIT32(log_strd_num) *
+ sizeof(struct rte_mbuf_ext_shared_info) +
+ RTE_PKTMBUF_HEADROOM;
/*
* Received packets can be either memcpy'd or externally referenced. In
* case that the packet is attached to an mbuf as an external buffer, as
snprintf(name, sizeof(name), "port-%u-mprq", dev->data->port_id);
mp = rte_mempool_create(name, obj_num, obj_size, MLX5_MPRQ_MP_CACHE_SZ,
0, NULL, NULL, mlx5_mprq_buf_init,
- (void *)(uintptr_t)(1 << strd_num_n),
+ (void *)((uintptr_t)1 << log_strd_num),
dev->device->numa_node, 0);
if (mp == NULL) {
DRV_LOG(ERR,
rte_errno = ENOMEM;
return -rte_errno;
}
+ ret = mlx5_mr_mempool_register(priv->sh->cdev, mp, false);
+ if (ret < 0 && rte_errno != EEXIST) {
+ ret = rte_errno;
+ DRV_LOG(ERR, "port %u failed to register a mempool for Multi-Packet RQ",
+ dev->data->port_id);
+ rte_mempool_free(mp);
+ rte_errno = ret;
+ return -rte_errno;
+ }
priv->mprq_mp = mp;
exit:
/* Set mempool for each Rx queue. */
for (i = 0; i != priv->rxqs_n; ++i) {
- struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
- struct mlx5_rxq_ctrl *rxq_ctrl = container_of
- (rxq, struct mlx5_rxq_ctrl, rxq);
+ struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, i);
- if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
+ if (rxq_ctrl == NULL ||
+ rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
continue;
- rxq->mprq_mp = mp;
+ rxq_ctrl->rxq.mprq_mp = mp;
}
DRV_LOG(INFO, "port %u Multi-Packet RQ is configured",
dev->data->port_id);
}
/**
- * Create a DPDK Rx queue.
+ * Prepare both size and number of stride for Multi-Packet RQ.
*
* @param dev
* Pointer to Ethernet device.
* RX queue index.
* @param desc
* Number of descriptors to configure in queue.
+ * @param rx_seg_en
+ * Indicator if Rx segment enables, if so Multi-Packet RQ doesn't enable.
+ * @param min_mbuf_size
+ * Non scatter min mbuf size, max_rx_pktlen plus overhead.
+ * @param actual_log_stride_num
+ * Log number of strides to configure for this queue.
+ * @param actual_log_stride_size
+ * Log stride size to configure for this queue.
+ *
+ * @return
+ * 0 if Multi-Packet RQ is supported, otherwise -1.
+ */
+static int
+mlx5_mprq_prepare(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+ bool rx_seg_en, uint32_t min_mbuf_size,
+ uint32_t *actual_log_stride_num,
+ uint32_t *actual_log_stride_size)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_config *config = &priv->config;
+ uint32_t log_min_stride_num = config->mprq.log_min_stride_num;
+ uint32_t log_max_stride_num = config->mprq.log_max_stride_num;
+ uint32_t log_def_stride_num =
+ RTE_MIN(RTE_MAX(MLX5_MPRQ_DEFAULT_LOG_STRIDE_NUM,
+ log_min_stride_num),
+ log_max_stride_num);
+ uint32_t log_min_stride_size = config->mprq.log_min_stride_size;
+ uint32_t log_max_stride_size = config->mprq.log_max_stride_size;
+ uint32_t log_def_stride_size =
+ RTE_MIN(RTE_MAX(MLX5_MPRQ_DEFAULT_LOG_STRIDE_SIZE,
+ log_min_stride_size),
+ log_max_stride_size);
+ uint32_t log_stride_wqe_size;
+
+ if (mlx5_check_mprq_support(dev) != 1 || rx_seg_en)
+ goto unsupport;
+ /* Checks if chosen number of strides is in supported range. */
+ if (config->mprq.log_stride_num > log_max_stride_num ||
+ config->mprq.log_stride_num < log_min_stride_num) {
+ *actual_log_stride_num = log_def_stride_num;
+ DRV_LOG(WARNING,
+ "Port %u Rx queue %u number of strides for Multi-Packet RQ is out of range, setting default value (%u)",
+ dev->data->port_id, idx, RTE_BIT32(log_def_stride_num));
+ } else {
+ *actual_log_stride_num = config->mprq.log_stride_num;
+ }
+ if (config->mprq.log_stride_size) {
+ /* Checks if chosen size of stride is in supported range. */
+ if (config->mprq.log_stride_size > log_max_stride_size ||
+ config->mprq.log_stride_size < log_min_stride_size) {
+ *actual_log_stride_size = log_def_stride_size;
+ DRV_LOG(WARNING,
+ "Port %u Rx queue %u size of a stride for Multi-Packet RQ is out of range, setting default value (%u)",
+ dev->data->port_id, idx,
+ RTE_BIT32(log_def_stride_size));
+ } else {
+ *actual_log_stride_size = config->mprq.log_stride_size;
+ }
+ } else {
+ if (min_mbuf_size <= RTE_BIT32(log_max_stride_size))
+ *actual_log_stride_size = log2above(min_mbuf_size);
+ else
+ goto unsupport;
+ }
+ log_stride_wqe_size = *actual_log_stride_num + *actual_log_stride_size;
+ /* Check if WQE buffer size is supported by hardware. */
+ if (log_stride_wqe_size < config->mprq.log_min_stride_wqe_size) {
+ *actual_log_stride_num = log_def_stride_num;
+ *actual_log_stride_size = log_def_stride_size;
+ DRV_LOG(WARNING,
+ "Port %u Rx queue %u size of WQE buffer for Multi-Packet RQ is too small, setting default values (stride_num_n=%u, stride_size_n=%u)",
+ dev->data->port_id, idx, RTE_BIT32(log_def_stride_num),
+ RTE_BIT32(log_def_stride_size));
+ log_stride_wqe_size = log_def_stride_num + log_def_stride_size;
+ }
+ MLX5_ASSERT(log_stride_wqe_size >= config->mprq.log_min_stride_wqe_size);
+ if (desc <= RTE_BIT32(*actual_log_stride_num))
+ goto unsupport;
+ if (min_mbuf_size > RTE_BIT32(log_stride_wqe_size)) {
+ DRV_LOG(WARNING, "Port %u Rx queue %u "
+ "Multi-Packet RQ is unsupported, WQE buffer size (%u) "
+ "is smaller than min mbuf size (%u)",
+ dev->data->port_id, idx, RTE_BIT32(log_stride_wqe_size),
+ min_mbuf_size);
+ goto unsupport;
+ }
+ DRV_LOG(DEBUG, "Port %u Rx queue %u "
+ "Multi-Packet RQ is enabled strd_num_n = %u, strd_sz_n = %u",
+ dev->data->port_id, idx, RTE_BIT32(*actual_log_stride_num),
+ RTE_BIT32(*actual_log_stride_size));
+ return 0;
+unsupport:
+ if (config->mprq.enabled)
+ DRV_LOG(WARNING,
+ "Port %u MPRQ is requested but cannot be enabled\n"
+ " (requested: pkt_sz = %u, desc_num = %u,"
+ " rxq_num = %u, stride_sz = %u, stride_num = %u\n"
+ " supported: min_rxqs_num = %u, min_buf_wqe_sz = %u"
+ " min_stride_sz = %u, max_stride_sz = %u).\n"
+ "Rx segment is %senable.",
+ dev->data->port_id, min_mbuf_size, desc, priv->rxqs_n,
+ RTE_BIT32(config->mprq.log_stride_size),
+ RTE_BIT32(config->mprq.log_stride_num),
+ config->mprq.min_rxqs_num,
+ RTE_BIT32(config->mprq.log_min_stride_wqe_size),
+ RTE_BIT32(config->mprq.log_min_stride_size),
+ RTE_BIT32(config->mprq.log_max_stride_size),
+ rx_seg_en ? "" : "not ");
+ return -1;
+}
+
+/**
+ * Create a DPDK Rx queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param rxq
+ * RX queue private data.
+ * @param desc
+ * Number of descriptors to configure in queue.
* @param socket
* NUMA socket on which memory must be allocated.
*
* A DPDK queue object on success, NULL otherwise and rte_errno is set.
*/
struct mlx5_rxq_ctrl *
-mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+mlx5_rxq_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq,
+ uint16_t desc,
unsigned int socket, const struct rte_eth_rxconf *conf,
- struct rte_mempool *mp)
+ const struct rte_eth_rxseg_split *rx_seg, uint16_t n_seg)
{
+ uint16_t idx = rxq->idx;
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rxq_ctrl *tmpl;
- unsigned int mb_len = rte_pktmbuf_data_room_size(mp);
- unsigned int mprq_stride_nums;
- unsigned int mprq_stride_size;
- unsigned int mprq_stride_cap;
+ unsigned int mb_len = rte_pktmbuf_data_room_size(rx_seg[0].mp);
struct mlx5_dev_config *config = &priv->config;
- /*
- * Always allocate extra slots, even if eventually
- * the vector Rx will not be used.
- */
- uint16_t desc_n =
- desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
uint64_t offloads = conf->offloads |
dev->data->dev_conf.rxmode.offloads;
- unsigned int lro_on_queue = !!(offloads & DEV_RX_OFFLOAD_TCP_LRO);
- const int mprq_en = mlx5_check_mprq_support(dev) > 0;
- unsigned int max_rx_pkt_len = lro_on_queue ?
+ unsigned int lro_on_queue = !!(offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO);
+ unsigned int max_rx_pktlen = lro_on_queue ?
dev->data->dev_conf.rxmode.max_lro_pkt_size :
- dev->data->dev_conf.rxmode.max_rx_pkt_len;
- unsigned int non_scatter_min_mbuf_size = max_rx_pkt_len +
+ dev->data->mtu + (unsigned int)RTE_ETHER_HDR_LEN +
+ RTE_ETHER_CRC_LEN;
+ unsigned int non_scatter_min_mbuf_size = max_rx_pktlen +
RTE_PKTMBUF_HEADROOM;
unsigned int max_lro_size = 0;
unsigned int first_mb_free_size = mb_len - RTE_PKTMBUF_HEADROOM;
+ uint32_t mprq_log_actual_stride_num = 0;
+ uint32_t mprq_log_actual_stride_size = 0;
+ bool rx_seg_en = n_seg != 1 || rx_seg[0].offset || rx_seg[0].length;
+ const int mprq_en = !mlx5_mprq_prepare(dev, idx, desc, rx_seg_en,
+ non_scatter_min_mbuf_size,
+ &mprq_log_actual_stride_num,
+ &mprq_log_actual_stride_size);
+ /*
+ * Always allocate extra slots, even if eventually
+ * the vector Rx will not be used.
+ */
+ uint16_t desc_n = desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
+ size_t alloc_size = sizeof(*tmpl) + desc_n * sizeof(struct rte_mbuf *);
+ const struct rte_eth_rxseg_split *qs_seg = rx_seg;
+ unsigned int tail_len;
- if (non_scatter_min_mbuf_size > mb_len && !(offloads &
- DEV_RX_OFFLOAD_SCATTER)) {
- DRV_LOG(ERR, "port %u Rx queue %u: Scatter offload is not"
- " configured and no enough mbuf space(%u) to contain "
- "the maximum RX packet length(%u) with head-room(%u)",
- dev->data->port_id, idx, mb_len, max_rx_pkt_len,
- RTE_PKTMBUF_HEADROOM);
- rte_errno = ENOSPC;
- return NULL;
+ if (mprq_en) {
+ /* Trim the number of descs needed. */
+ desc >>= mprq_log_actual_stride_num;
+ alloc_size += desc * sizeof(struct mlx5_mprq_buf *);
}
- tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl) +
- desc_n * sizeof(struct rte_mbuf *), 0, socket);
+ tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, alloc_size, 0, socket);
if (!tmpl) {
rte_errno = ENOMEM;
return NULL;
}
- tmpl->type = MLX5_RXQ_TYPE_STANDARD;
- if (mlx5_mr_btree_init(&tmpl->rxq.mr_ctrl.cache_bh,
- MLX5_MR_BTREE_CACHE_N, socket)) {
- /* rte_errno is already set. */
- goto error;
+ LIST_INIT(&tmpl->owners);
+ if (conf->share_group > 0) {
+ tmpl->rxq.shared = 1;
+ tmpl->share_group = conf->share_group;
+ tmpl->share_qid = conf->share_qid;
+ LIST_INSERT_HEAD(&priv->sh->shared_rxqs, tmpl, share_entry);
}
- tmpl->socket = socket;
- if (dev->data->dev_conf.intr_conf.rxq)
- tmpl->irq = 1;
- mprq_stride_nums = config->mprq.stride_num_n ?
- config->mprq.stride_num_n : MLX5_MPRQ_STRIDE_NUM_N;
- mprq_stride_size = non_scatter_min_mbuf_size <=
- (1U << config->mprq.max_stride_size_n) ?
- log2above(non_scatter_min_mbuf_size) : MLX5_MPRQ_STRIDE_SIZE_N;
- mprq_stride_cap = (config->mprq.stride_num_n ?
- (1U << config->mprq.stride_num_n) : (1U << mprq_stride_nums)) *
- (config->mprq.stride_size_n ?
- (1U << config->mprq.stride_size_n) : (1U << mprq_stride_size));
+ rxq->ctrl = tmpl;
+ LIST_INSERT_HEAD(&tmpl->owners, rxq, owner_entry);
+ MLX5_ASSERT(n_seg && n_seg <= MLX5_MAX_RXQ_NSEG);
/*
- * This Rx queue can be configured as a Multi-Packet RQ if all of the
- * following conditions are met:
- * - MPRQ is enabled.
- * - The number of descs is more than the number of strides.
- * - max_rx_pkt_len plus overhead is less than the max size
- * of a stride or mprq_stride_size is specified by a user.
- * Need to nake sure that there are enough stides to encap
- * the maximum packet size in case mprq_stride_size is set.
- * Otherwise, enable Rx scatter if necessary.
+ * Save the original segment configuration in the shared queue
+ * descriptor for the later check on the sibling queue creation.
*/
- if (mprq_en && desc > (1U << mprq_stride_nums) &&
- (non_scatter_min_mbuf_size <=
- (1U << config->mprq.max_stride_size_n) ||
- (config->mprq.stride_size_n &&
- non_scatter_min_mbuf_size <= mprq_stride_cap))) {
- /* TODO: Rx scatter isn't supported yet. */
- tmpl->rxq.sges_n = 0;
- /* Trim the number of descs needed. */
- desc >>= mprq_stride_nums;
- tmpl->rxq.strd_num_n = config->mprq.stride_num_n ?
- config->mprq.stride_num_n : mprq_stride_nums;
- tmpl->rxq.strd_sz_n = config->mprq.stride_size_n ?
- config->mprq.stride_size_n : mprq_stride_size;
- tmpl->rxq.strd_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT;
- tmpl->rxq.strd_scatter_en =
- !!(offloads & DEV_RX_OFFLOAD_SCATTER);
- tmpl->rxq.mprq_max_memcpy_len = RTE_MIN(first_mb_free_size,
- config->mprq.max_memcpy_len);
- max_lro_size = RTE_MIN(max_rx_pkt_len,
- (1u << tmpl->rxq.strd_num_n) *
- (1u << tmpl->rxq.strd_sz_n));
- DRV_LOG(DEBUG,
- "port %u Rx queue %u: Multi-Packet RQ is enabled"
- " strd_num_n = %u, strd_sz_n = %u",
- dev->data->port_id, idx,
- tmpl->rxq.strd_num_n, tmpl->rxq.strd_sz_n);
- } else if (max_rx_pkt_len <= first_mb_free_size) {
- tmpl->rxq.sges_n = 0;
- max_lro_size = max_rx_pkt_len;
- } else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
- unsigned int size = non_scatter_min_mbuf_size;
- unsigned int sges_n;
+ tmpl->rxseg_n = n_seg;
+ rte_memcpy(tmpl->rxseg, qs_seg,
+ sizeof(struct rte_eth_rxseg_split) * n_seg);
+ /*
+ * Build the array of actual buffer offsets and lengths.
+ * Pad with the buffers from the last memory pool if
+ * needed to handle max size packets, replace zero length
+ * with the buffer length from the pool.
+ */
+ tail_len = max_rx_pktlen;
+ do {
+ struct mlx5_eth_rxseg *hw_seg =
+ &tmpl->rxq.rxseg[tmpl->rxq.rxseg_n];
+ uint32_t buf_len, offset, seg_len;
+
+ /*
+ * For the buffers beyond descriptions offset is zero,
+ * the first buffer contains head room.
+ */
+ buf_len = rte_pktmbuf_data_room_size(qs_seg->mp);
+ offset = (tmpl->rxq.rxseg_n >= n_seg ? 0 : qs_seg->offset) +
+ (tmpl->rxq.rxseg_n ? 0 : RTE_PKTMBUF_HEADROOM);
+ /*
+ * For the buffers beyond descriptions the length is
+ * pool buffer length, zero lengths are replaced with
+ * pool buffer length either.
+ */
+ seg_len = tmpl->rxq.rxseg_n >= n_seg ? buf_len :
+ qs_seg->length ?
+ qs_seg->length :
+ (buf_len - offset);
+ /* Check is done in long int, now overflows. */
+ if (buf_len < seg_len + offset) {
+ DRV_LOG(ERR, "port %u Rx queue %u: Split offset/length "
+ "%u/%u can't be satisfied",
+ dev->data->port_id, idx,
+ qs_seg->length, qs_seg->offset);
+ rte_errno = EINVAL;
+ goto error;
+ }
+ if (seg_len > tail_len)
+ seg_len = buf_len - offset;
+ if (++tmpl->rxq.rxseg_n > MLX5_MAX_RXQ_NSEG) {
+ DRV_LOG(ERR,
+ "port %u too many SGEs (%u) needed to handle"
+ " requested maximum packet size %u, the maximum"
+ " supported are %u", dev->data->port_id,
+ tmpl->rxq.rxseg_n, max_rx_pktlen,
+ MLX5_MAX_RXQ_NSEG);
+ rte_errno = ENOTSUP;
+ goto error;
+ }
+ /* Build the actual scattering element in the queue object. */
+ hw_seg->mp = qs_seg->mp;
+ MLX5_ASSERT(offset <= UINT16_MAX);
+ MLX5_ASSERT(seg_len <= UINT16_MAX);
+ hw_seg->offset = (uint16_t)offset;
+ hw_seg->length = (uint16_t)seg_len;
+ /*
+ * Advance the segment descriptor, the padding is the based
+ * on the attributes of the last descriptor.
+ */
+ if (tmpl->rxq.rxseg_n < n_seg)
+ qs_seg++;
+ tail_len -= RTE_MIN(tail_len, seg_len);
+ } while (tail_len || !rte_is_power_of_2(tmpl->rxq.rxseg_n));
+ MLX5_ASSERT(tmpl->rxq.rxseg_n &&
+ tmpl->rxq.rxseg_n <= MLX5_MAX_RXQ_NSEG);
+ if (tmpl->rxq.rxseg_n > 1 && !(offloads & RTE_ETH_RX_OFFLOAD_SCATTER)) {
+ DRV_LOG(ERR, "port %u Rx queue %u: Scatter offload is not"
+ " configured and no enough mbuf space(%u) to contain "
+ "the maximum RX packet length(%u) with head-room(%u)",
+ dev->data->port_id, idx, mb_len, max_rx_pktlen,
+ RTE_PKTMBUF_HEADROOM);
+ rte_errno = ENOSPC;
+ goto error;
+ }
+ tmpl->type = MLX5_RXQ_TYPE_STANDARD;
+ if (mlx5_mr_ctrl_init(&tmpl->rxq.mr_ctrl,
+ &priv->sh->cdev->mr_scache.dev_gen, socket)) {
+ /* rte_errno is already set. */
+ goto error;
+ }
+ tmpl->socket = socket;
+ if (dev->data->dev_conf.intr_conf.rxq)
+ tmpl->irq = 1;
+ if (mprq_en) {
+ /* TODO: Rx scatter isn't supported yet. */
+ tmpl->rxq.sges_n = 0;
+ tmpl->rxq.log_strd_num = mprq_log_actual_stride_num;
+ tmpl->rxq.log_strd_sz = mprq_log_actual_stride_size;
+ tmpl->rxq.strd_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT;
+ tmpl->rxq.strd_scatter_en =
+ !!(offloads & RTE_ETH_RX_OFFLOAD_SCATTER);
+ tmpl->rxq.mprq_max_memcpy_len = RTE_MIN(first_mb_free_size,
+ config->mprq.max_memcpy_len);
+ max_lro_size = RTE_MIN(max_rx_pktlen,
+ RTE_BIT32(tmpl->rxq.log_strd_num) *
+ RTE_BIT32(tmpl->rxq.log_strd_sz));
+ } else if (tmpl->rxq.rxseg_n == 1) {
+ MLX5_ASSERT(max_rx_pktlen <= first_mb_free_size);
+ tmpl->rxq.sges_n = 0;
+ max_lro_size = max_rx_pktlen;
+ } else if (offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
+ unsigned int sges_n;
if (lro_on_queue && first_mb_free_size <
MLX5_MAX_LRO_HEADER_FIX) {
* Determine the number of SGEs needed for a full packet
* and round it to the next power of two.
*/
- sges_n = log2above((size / mb_len) + !!(size % mb_len));
+ sges_n = log2above(tmpl->rxq.rxseg_n);
if (sges_n > MLX5_MAX_LOG_RQ_SEGS) {
DRV_LOG(ERR,
"port %u too many SGEs (%u) needed to handle"
" requested maximum packet size %u, the maximum"
" supported are %u", dev->data->port_id,
- 1 << sges_n, max_rx_pkt_len,
+ 1 << sges_n, max_rx_pktlen,
1u << MLX5_MAX_LOG_RQ_SEGS);
rte_errno = ENOTSUP;
goto error;
}
tmpl->rxq.sges_n = sges_n;
- max_lro_size = max_rx_pkt_len;
+ max_lro_size = max_rx_pktlen;
}
- if (config->mprq.enabled && !mlx5_rxq_mprq_enabled(&tmpl->rxq))
- DRV_LOG(WARNING,
- "port %u MPRQ is requested but cannot be enabled\n"
- " (requested: pkt_sz = %u, desc_num = %u,"
- " rxq_num = %u, stride_sz = %u, stride_num = %u\n"
- " supported: min_rxqs_num = %u,"
- " min_stride_sz = %u, max_stride_sz = %u).",
- dev->data->port_id, non_scatter_min_mbuf_size,
- desc, priv->rxqs_n,
- config->mprq.stride_size_n ?
- (1U << config->mprq.stride_size_n) :
- (1U << mprq_stride_size),
- config->mprq.stride_num_n ?
- (1U << config->mprq.stride_num_n) :
- (1U << mprq_stride_nums),
- config->mprq.min_rxqs_num,
- (1U << config->mprq.min_stride_size_n),
- (1U << config->mprq.max_stride_size_n));
DRV_LOG(DEBUG, "port %u maximum number of segments per packet: %u",
dev->data->port_id, 1 << tmpl->rxq.sges_n);
if (desc % (1 << tmpl->rxq.sges_n)) {
}
mlx5_max_lro_msg_size_adjust(dev, idx, max_lro_size);
/* Toggle RX checksum offload if hardware supports it. */
- tmpl->rxq.csum = !!(offloads & DEV_RX_OFFLOAD_CHECKSUM);
- tmpl->rxq.hw_timestamp = !!(offloads & DEV_RX_OFFLOAD_TIMESTAMP);
+ tmpl->rxq.csum = !!(offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM);
+ /* Configure Rx timestamp. */
+ tmpl->rxq.hw_timestamp = !!(offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP);
+ tmpl->rxq.timestamp_rx_flag = 0;
+ if (tmpl->rxq.hw_timestamp && rte_mbuf_dyn_rx_timestamp_register(
+ &tmpl->rxq.timestamp_offset,
+ &tmpl->rxq.timestamp_rx_flag) != 0) {
+ DRV_LOG(ERR, "Cannot register Rx timestamp field/flag");
+ goto error;
+ }
/* Configure VLAN stripping. */
- tmpl->rxq.vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+ tmpl->rxq.vlan_strip = !!(offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
/* By default, FCS (CRC) is stripped by hardware. */
tmpl->rxq.crc_present = 0;
tmpl->rxq.lro = lro_on_queue;
- if (offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+ if (offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
if (config->hw_fcs_strip) {
/*
* RQs used for LRO-enabled TIRs should not be
dev->data->port_id,
tmpl->rxq.crc_present ? "disabled" : "enabled",
tmpl->rxq.crc_present << 2);
- /* Save port ID. */
tmpl->rxq.rss_hash = !!priv->rss_conf.rss_hf &&
- (!!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS));
+ (!!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS));
+ /* Save port ID. */
tmpl->rxq.port_id = dev->data->port_id;
- tmpl->priv = priv;
- tmpl->rxq.mp = mp;
+ tmpl->sh = priv->sh;
+ tmpl->rxq.mp = rx_seg[0].mp;
tmpl->rxq.elts_n = log2above(desc);
- tmpl->rxq.rq_repl_thresh =
- MLX5_VPMD_RXQ_RPLNSH_THRESH(1 << tmpl->rxq.elts_n);
- tmpl->rxq.elts =
- (struct rte_mbuf *(*)[1 << tmpl->rxq.elts_n])(tmpl + 1);
-#ifndef RTE_ARCH_64
- tmpl->rxq.uar_lock_cq = &priv->sh->uar_lock_cq;
-#endif
+ tmpl->rxq.rq_repl_thresh = MLX5_VPMD_RXQ_RPLNSH_THRESH(desc_n);
+ tmpl->rxq.elts = (struct rte_mbuf *(*)[desc_n])(tmpl + 1);
+ tmpl->rxq.mprq_bufs =
+ (struct mlx5_mprq_buf *(*)[desc])(*tmpl->rxq.elts + desc_n);
tmpl->rxq.idx = idx;
- rte_atomic32_inc(&tmpl->refcnt);
LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
return tmpl;
error:
+ mlx5_mr_btree_free(&tmpl->rxq.mr_ctrl.cache_bh);
mlx5_free(tmpl);
return NULL;
}
*
* @param dev
* Pointer to Ethernet device.
- * @param idx
- * RX queue index.
+ * @param rxq
+ * RX queue.
* @param desc
* Number of descriptors to configure in queue.
* @param hairpin_conf
* A DPDK queue object on success, NULL otherwise and rte_errno is set.
*/
struct mlx5_rxq_ctrl *
-mlx5_rxq_hairpin_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+mlx5_rxq_hairpin_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq,
+ uint16_t desc,
const struct rte_eth_hairpin_conf *hairpin_conf)
{
+ uint16_t idx = rxq->idx;
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rxq_ctrl *tmpl;
rte_errno = ENOMEM;
return NULL;
}
+ LIST_INIT(&tmpl->owners);
+ rxq->ctrl = tmpl;
+ LIST_INSERT_HEAD(&tmpl->owners, rxq, owner_entry);
tmpl->type = MLX5_RXQ_TYPE_HAIRPIN;
tmpl->socket = SOCKET_ID_ANY;
tmpl->rxq.rss_hash = 0;
tmpl->rxq.port_id = dev->data->port_id;
- tmpl->priv = priv;
+ tmpl->sh = priv->sh;
tmpl->rxq.mp = NULL;
tmpl->rxq.elts_n = log2above(desc);
tmpl->rxq.elts = NULL;
tmpl->rxq.mr_ctrl.cache_bh = (struct mlx5_mr_btree) { 0 };
- tmpl->hairpin_conf = *hairpin_conf;
tmpl->rxq.idx = idx;
- rte_atomic32_inc(&tmpl->refcnt);
+ rxq->hairpin_conf = *hairpin_conf;
+ mlx5_rxq_ref(dev, idx);
LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
return tmpl;
}
+/**
+ * Increase Rx queue reference count.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param idx
+ * RX queue index.
+ *
+ * @return
+ * A pointer to the queue if it exists, NULL otherwise.
+ */
+struct mlx5_rxq_priv *
+mlx5_rxq_ref(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
+
+ if (rxq != NULL)
+ __atomic_fetch_add(&rxq->refcnt, 1, __ATOMIC_RELAXED);
+ return rxq;
+}
+
+/**
+ * Dereference a Rx queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param idx
+ * RX queue index.
+ *
+ * @return
+ * Updated reference count.
+ */
+uint32_t
+mlx5_rxq_deref(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
+
+ if (rxq == NULL)
+ return 0;
+ return __atomic_sub_fetch(&rxq->refcnt, 1, __ATOMIC_RELAXED);
+}
+
/**
* Get a Rx queue.
*
* @return
* A pointer to the queue if it exists, NULL otherwise.
*/
-struct mlx5_rxq_ctrl *
+struct mlx5_rxq_priv *
mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
- struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
- if (rxq_data) {
- rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
- rte_atomic32_inc(&rxq_ctrl->refcnt);
- }
- return rxq_ctrl;
+ MLX5_ASSERT(priv->rxq_privs != NULL);
+ return (*priv->rxq_privs)[idx];
+}
+
+/**
+ * Get Rx queue shareable control.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param idx
+ * RX queue index.
+ *
+ * @return
+ * A pointer to the queue control if it exists, NULL otherwise.
+ */
+struct mlx5_rxq_ctrl *
+mlx5_rxq_ctrl_get(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
+
+ return rxq == NULL ? NULL : rxq->ctrl;
+}
+
+/**
+ * Get Rx queue shareable data.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param idx
+ * RX queue index.
+ *
+ * @return
+ * A pointer to the queue data if it exists, NULL otherwise.
+ */
+struct mlx5_rxq_data *
+mlx5_rxq_data_get(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
+
+ return rxq == NULL ? NULL : &rxq->ctrl->rxq;
}
/**
mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_rxq_priv *rxq;
struct mlx5_rxq_ctrl *rxq_ctrl;
+ uint32_t refcnt;
- if (!(*priv->rxqs)[idx])
+ if (priv->rxq_privs == NULL)
return 0;
- rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
- if (!rte_atomic32_dec_and_test(&rxq_ctrl->refcnt))
+ rxq = mlx5_rxq_get(dev, idx);
+ if (rxq == NULL || rxq->refcnt == 0)
+ return 0;
+ rxq_ctrl = rxq->ctrl;
+ refcnt = mlx5_rxq_deref(dev, idx);
+ if (refcnt > 1) {
return 1;
- if (rxq_ctrl->obj) {
- priv->obj_ops.rxq_obj_release(rxq_ctrl->obj);
- LIST_REMOVE(rxq_ctrl->obj, next);
- mlx5_free(rxq_ctrl->obj);
- rxq_ctrl->obj = NULL;
- }
- if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {
- mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
- rxq_free_elts(rxq_ctrl);
- }
- LIST_REMOVE(rxq_ctrl, next);
- mlx5_free(rxq_ctrl);
- (*priv->rxqs)[idx] = NULL;
+ } else if (refcnt == 1) { /* RxQ stopped. */
+ priv->obj_ops.rxq_obj_release(rxq);
+ if (!rxq_ctrl->started && rxq_ctrl->obj != NULL) {
+ LIST_REMOVE(rxq_ctrl->obj, next);
+ mlx5_free(rxq_ctrl->obj);
+ rxq_ctrl->obj = NULL;
+ }
+ if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {
+ if (!rxq_ctrl->started)
+ rxq_free_elts(rxq_ctrl);
+ dev->data->rx_queue_state[idx] =
+ RTE_ETH_QUEUE_STATE_STOPPED;
+ }
+ } else { /* Refcnt zero, closing device. */
+ LIST_REMOVE(rxq, owner_entry);
+ if (LIST_EMPTY(&rxq_ctrl->owners)) {
+ if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD)
+ mlx5_mr_btree_free
+ (&rxq_ctrl->rxq.mr_ctrl.cache_bh);
+ if (rxq_ctrl->rxq.shared)
+ LIST_REMOVE(rxq_ctrl, share_entry);
+ LIST_REMOVE(rxq_ctrl, next);
+ mlx5_free(rxq_ctrl);
+ }
+ dev->data->rx_queues[idx] = NULL;
+ mlx5_free(rxq);
+ (*priv->rxq_privs)[idx] = NULL;
+ }
return 0;
}
mlx5_rxq_get_type(struct rte_eth_dev *dev, uint16_t idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
+ struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, idx);
- if (idx < priv->rxqs_n && (*priv->rxqs)[idx]) {
- rxq_ctrl = container_of((*priv->rxqs)[idx],
- struct mlx5_rxq_ctrl,
- rxq);
+ if (idx < priv->rxqs_n && rxq_ctrl != NULL)
return rxq_ctrl->type;
- }
return MLX5_RXQ_TYPE_UNDEFINED;
}
+/*
+ * Get a Rx hairpin queue configuration.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param idx
+ * Rx queue index.
+ *
+ * @return
+ * Pointer to the configuration if a hairpin RX queue, otherwise NULL.
+ */
+const struct rte_eth_hairpin_conf *
+mlx5_rxq_get_hairpin_conf(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
+
+ if (idx < priv->rxqs_n && rxq != NULL) {
+ if (rxq->ctrl->type == MLX5_RXQ_TYPE_HAIRPIN)
+ return &rxq->hairpin_conf;
+ }
+ return NULL;
+}
+
+/**
+ * Match queues listed in arguments to queues contained in indirection table
+ * object.
+ *
+ * @param ind_tbl
+ * Pointer to indirection table to match.
+ * @param queues
+ * Queues to match to ques in indirection table.
+ * @param queues_n
+ * Number of queues in the array.
+ *
+ * @return
+ * 1 if all queues in indirection table match 0 otherwise.
+ */
+static int
+mlx5_ind_table_obj_match_queues(const struct mlx5_ind_table_obj *ind_tbl,
+ const uint16_t *queues, uint32_t queues_n)
+{
+ return (ind_tbl->queues_n == queues_n) &&
+ (!memcmp(ind_tbl->queues, queues,
+ ind_tbl->queues_n * sizeof(ind_tbl->queues[0])));
+}
+
/**
* Get an indirection table.
*
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_ind_table_obj *ind_tbl;
+ rte_rwlock_read_lock(&priv->ind_tbls_lock);
LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
if ((ind_tbl->queues_n == queues_n) &&
(memcmp(ind_tbl->queues, queues,
ind_tbl->queues_n * sizeof(ind_tbl->queues[0]))
- == 0))
+ == 0)) {
+ __atomic_fetch_add(&ind_tbl->refcnt, 1,
+ __ATOMIC_RELAXED);
break;
+ }
}
- if (ind_tbl) {
- unsigned int i;
-
- rte_atomic32_inc(&ind_tbl->refcnt);
- for (i = 0; i != ind_tbl->queues_n; ++i)
- mlx5_rxq_get(dev, ind_tbl->queues[i]);
- }
+ rte_rwlock_read_unlock(&priv->ind_tbls_lock);
return ind_tbl;
}
* Pointer to Ethernet device.
* @param ind_table
* Indirection table to release.
+ * @param standalone
+ * Indirection table for Standalone queue.
+ * @param deref_rxqs
+ * If true, then dereference RX queues related to indirection table.
+ * Otherwise, no additional action will be taken.
*
* @return
* 1 while a reference on it exists, 0 when freed.
*/
int
mlx5_ind_table_obj_release(struct rte_eth_dev *dev,
- struct mlx5_ind_table_obj *ind_tbl)
+ struct mlx5_ind_table_obj *ind_tbl,
+ bool standalone,
+ bool deref_rxqs)
{
struct mlx5_priv *priv = dev->data->dev_private;
- unsigned int i;
+ unsigned int i, ret;
- if (rte_atomic32_dec_and_test(&ind_tbl->refcnt))
- priv->obj_ops.ind_table_destroy(ind_tbl);
- for (i = 0; i != ind_tbl->queues_n; ++i)
- claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i]));
- if (!rte_atomic32_read(&ind_tbl->refcnt)) {
+ rte_rwlock_write_lock(&priv->ind_tbls_lock);
+ ret = __atomic_sub_fetch(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED);
+ if (!ret && !standalone)
LIST_REMOVE(ind_tbl, next);
- mlx5_free(ind_tbl);
- return 0;
+ rte_rwlock_write_unlock(&priv->ind_tbls_lock);
+ if (ret)
+ return 1;
+ priv->obj_ops.ind_table_destroy(ind_tbl);
+ if (deref_rxqs) {
+ for (i = 0; i != ind_tbl->queues_n; ++i)
+ claim_nonzero(mlx5_rxq_deref(dev, ind_tbl->queues[i]));
}
- return 1;
+ mlx5_free(ind_tbl);
+ return 0;
}
/**
struct mlx5_ind_table_obj *ind_tbl;
int ret = 0;
+ rte_rwlock_read_lock(&priv->ind_tbls_lock);
LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
DRV_LOG(DEBUG,
"port %u indirection table obj %p still referenced",
dev->data->port_id, (void *)ind_tbl);
++ret;
}
+ rte_rwlock_read_unlock(&priv->ind_tbls_lock);
+ return ret;
+}
+
+/**
+ * Setup an indirection table structure fields.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param ind_table
+ * Indirection table to modify.
+ * @param ref_qs
+ * Whether to increment RxQ reference counters.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_ind_table_obj_setup(struct rte_eth_dev *dev,
+ struct mlx5_ind_table_obj *ind_tbl,
+ bool ref_qs)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ uint32_t queues_n = ind_tbl->queues_n;
+ uint16_t *queues = ind_tbl->queues;
+ unsigned int i = 0, j;
+ int ret = 0, err;
+ const unsigned int n = rte_is_power_of_2(queues_n) ?
+ log2above(queues_n) :
+ log2above(priv->config.ind_table_max_size);
+
+ if (ref_qs)
+ for (i = 0; i != queues_n; ++i) {
+ if (mlx5_rxq_ref(dev, queues[i]) == NULL) {
+ ret = -rte_errno;
+ goto error;
+ }
+ }
+ ret = priv->obj_ops.ind_table_new(dev, n, ind_tbl);
+ if (ret)
+ goto error;
+ __atomic_fetch_add(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED);
+ return 0;
+error:
+ if (ref_qs) {
+ err = rte_errno;
+ for (j = 0; j < i; j++)
+ mlx5_rxq_deref(dev, queues[j]);
+ rte_errno = err;
+ }
+ DRV_LOG(DEBUG, "Port %u cannot setup indirection table.",
+ dev->data->port_id);
return ret;
}
* Queues entering in the indirection table.
* @param queues_n
* Number of queues in the array.
+ * @param standalone
+ * Indirection table for Standalone queue.
+ * @param ref_qs
+ * Whether to increment RxQ reference counters.
*
* @return
* The Verbs/DevX object initialized, NULL otherwise and rte_errno is set.
*/
static struct mlx5_ind_table_obj *
mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues,
- uint32_t queues_n)
+ uint32_t queues_n, bool standalone, bool ref_qs)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_ind_table_obj *ind_tbl;
- const unsigned int n = rte_is_power_of_2(queues_n) ?
- log2above(queues_n) :
- log2above(priv->config.ind_table_max_size);
- unsigned int i, j;
int ret;
ind_tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*ind_tbl) +
return NULL;
}
ind_tbl->queues_n = queues_n;
- for (i = 0; i != queues_n; ++i) {
- struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev, queues[i]);
- if (!rxq)
- goto error;
- ind_tbl->queues[i] = queues[i];
+ ind_tbl->queues = (uint16_t *)(ind_tbl + 1);
+ memcpy(ind_tbl->queues, queues, queues_n * sizeof(*queues));
+ ret = mlx5_ind_table_obj_setup(dev, ind_tbl, ref_qs);
+ if (ret < 0) {
+ mlx5_free(ind_tbl);
+ return NULL;
+ }
+ if (!standalone) {
+ rte_rwlock_write_lock(&priv->ind_tbls_lock);
+ LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
+ rte_rwlock_write_unlock(&priv->ind_tbls_lock);
}
- ret = priv->obj_ops.ind_table_new(dev, n, ind_tbl);
- if (ret < 0)
- goto error;
- rte_atomic32_inc(&ind_tbl->refcnt);
- LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
return ind_tbl;
-error:
- ret = rte_errno;
- for (j = 0; j < i; j++)
- mlx5_rxq_release(dev, ind_tbl->queues[j]);
- rte_errno = ret;
- mlx5_free(ind_tbl);
- DEBUG("Port %u cannot create indirection table.", dev->data->port_id);
- return NULL;
+}
+
+static int
+mlx5_ind_table_obj_check_standalone(struct rte_eth_dev *dev __rte_unused,
+ struct mlx5_ind_table_obj *ind_tbl)
+{
+ uint32_t refcnt;
+
+ refcnt = __atomic_load_n(&ind_tbl->refcnt, __ATOMIC_RELAXED);
+ if (refcnt <= 1)
+ return 0;
+ /*
+ * Modification of indirection tables having more than 1
+ * reference is unsupported.
+ */
+ DRV_LOG(DEBUG,
+ "Port %u cannot modify indirection table %p (refcnt %u > 1).",
+ dev->data->port_id, (void *)ind_tbl, refcnt);
+ rte_errno = EINVAL;
+ return -rte_errno;
}
/**
- * Get an Rx Hash queue.
+ * Modify an indirection table.
*
* @param dev
* Pointer to Ethernet device.
- * @param rss_conf
- * RSS configuration for the Rx hash queue.
+ * @param ind_table
+ * Indirection table to modify.
* @param queues
- * Queues entering in hash queue. In case of empty hash_fields only the
- * first queue index will be taken for the indirection table.
+ * Queues replacement for the indirection table.
* @param queues_n
- * Number of queues.
+ * Number of queues in the array.
+ * @param standalone
+ * Indirection table for Standalone queue.
+ * @param ref_new_qs
+ * Whether to increment new RxQ set reference counters.
+ * @param deref_old_qs
+ * Whether to decrement old RxQ set reference counters.
*
* @return
- * An hash Rx queue index on success.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-uint32_t
-mlx5_hrxq_get(struct rte_eth_dev *dev,
- const uint8_t *rss_key, uint32_t rss_key_len,
- uint64_t hash_fields,
- const uint16_t *queues, uint32_t queues_n)
+int
+mlx5_ind_table_obj_modify(struct rte_eth_dev *dev,
+ struct mlx5_ind_table_obj *ind_tbl,
+ uint16_t *queues, const uint32_t queues_n,
+ bool standalone, bool ref_new_qs, bool deref_old_qs)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_hrxq *hrxq;
- uint32_t idx;
-
- queues_n = hash_fields ? queues_n : 1;
- ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_HRXQ], priv->hrxqs, idx,
- hrxq, next) {
- struct mlx5_ind_table_obj *ind_tbl;
+ unsigned int i = 0, j;
+ int ret = 0, err;
+ const unsigned int n = rte_is_power_of_2(queues_n) ?
+ log2above(queues_n) :
+ log2above(priv->config.ind_table_max_size);
- if (hrxq->rss_key_len != rss_key_len)
- continue;
- if (memcmp(hrxq->rss_key, rss_key, rss_key_len))
- continue;
- if (hrxq->hash_fields != hash_fields)
- continue;
- ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
- if (!ind_tbl)
- continue;
- if (ind_tbl != hrxq->ind_table) {
- mlx5_ind_table_obj_release(dev, ind_tbl);
- continue;
+ MLX5_ASSERT(standalone);
+ RTE_SET_USED(standalone);
+ if (mlx5_ind_table_obj_check_standalone(dev, ind_tbl) < 0)
+ return -rte_errno;
+ if (ref_new_qs)
+ for (i = 0; i != queues_n; ++i) {
+ if (!mlx5_rxq_ref(dev, queues[i])) {
+ ret = -rte_errno;
+ goto error;
+ }
}
- rte_atomic32_inc(&hrxq->refcnt);
- return idx;
- }
+ MLX5_ASSERT(priv->obj_ops.ind_table_modify);
+ ret = priv->obj_ops.ind_table_modify(dev, n, queues, queues_n, ind_tbl);
+ if (ret)
+ goto error;
+ if (deref_old_qs)
+ for (i = 0; i < ind_tbl->queues_n; i++)
+ claim_nonzero(mlx5_rxq_deref(dev, ind_tbl->queues[i]));
+ ind_tbl->queues_n = queues_n;
+ ind_tbl->queues = queues;
return 0;
+error:
+ if (ref_new_qs) {
+ err = rte_errno;
+ for (j = 0; j < i; j++)
+ mlx5_rxq_deref(dev, queues[j]);
+ rte_errno = err;
+ }
+ DRV_LOG(DEBUG, "Port %u cannot setup indirection table.",
+ dev->data->port_id);
+ return ret;
}
/**
- * Release the hash Rx queue.
+ * Attach an indirection table to its queues.
*
* @param dev
* Pointer to Ethernet device.
- * @param hrxq
- * Index to Hash Rx queue to release.
+ * @param ind_table
+ * Indirection table to attach.
*
* @return
- * 1 while a reference on it exists, 0 when freed.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
-mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hrxq_idx)
+mlx5_ind_table_obj_attach(struct rte_eth_dev *dev,
+ struct mlx5_ind_table_obj *ind_tbl)
+{
+ int ret;
+
+ ret = mlx5_ind_table_obj_modify(dev, ind_tbl, ind_tbl->queues,
+ ind_tbl->queues_n,
+ true /* standalone */,
+ true /* ref_new_qs */,
+ false /* deref_old_qs */);
+ if (ret != 0)
+ DRV_LOG(ERR, "Port %u could not modify indirect table obj %p",
+ dev->data->port_id, (void *)ind_tbl);
+ return ret;
+}
+
+/**
+ * Detach an indirection table from its queues.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param ind_table
+ * Indirection table to detach.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_ind_table_obj_detach(struct rte_eth_dev *dev,
+ struct mlx5_ind_table_obj *ind_tbl)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_hrxq *hrxq;
+ const unsigned int n = rte_is_power_of_2(ind_tbl->queues_n) ?
+ log2above(ind_tbl->queues_n) :
+ log2above(priv->config.ind_table_max_size);
+ unsigned int i;
+ int ret;
- hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
- if (!hrxq)
- return 0;
- if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
- mlx5_glue->destroy_flow_action(hrxq->action);
-#endif
- priv->obj_ops.hrxq_destroy(hrxq);
- mlx5_ind_table_obj_release(dev, hrxq->ind_table);
- ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_HRXQ], &priv->hrxqs,
- hrxq_idx, hrxq, next);
- mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
- return 0;
+ ret = mlx5_ind_table_obj_check_standalone(dev, ind_tbl);
+ if (ret != 0)
+ return ret;
+ MLX5_ASSERT(priv->obj_ops.ind_table_modify);
+ ret = priv->obj_ops.ind_table_modify(dev, n, NULL, 0, ind_tbl);
+ if (ret != 0) {
+ DRV_LOG(ERR, "Port %u could not modify indirect table obj %p",
+ dev->data->port_id, (void *)ind_tbl);
+ return ret;
}
- claim_nonzero(mlx5_ind_table_obj_release(dev, hrxq->ind_table));
- return 1;
+ for (i = 0; i < ind_tbl->queues_n; i++)
+ mlx5_rxq_release(dev, ind_tbl->queues[i]);
+ return ret;
+}
+
+int
+mlx5_hrxq_match_cb(void *tool_ctx __rte_unused, struct mlx5_list_entry *entry,
+ void *cb_ctx)
+{
+ struct mlx5_flow_cb_ctx *ctx = cb_ctx;
+ struct mlx5_flow_rss_desc *rss_desc = ctx->data;
+ struct mlx5_hrxq *hrxq = container_of(entry, typeof(*hrxq), entry);
+
+ return (hrxq->rss_key_len != rss_desc->key_len ||
+ memcmp(hrxq->rss_key, rss_desc->key, rss_desc->key_len) ||
+ hrxq->hash_fields != rss_desc->hash_fields ||
+ hrxq->ind_table->queues_n != rss_desc->queue_num ||
+ memcmp(hrxq->ind_table->queues, rss_desc->queue,
+ rss_desc->queue_num * sizeof(rss_desc->queue[0])));
}
/**
- * Create an Rx Hash queue.
+ * Modify an Rx Hash queue configuration.
*
* @param dev
* Pointer to Ethernet device.
+ * @param hrxq
+ * Index to Hash Rx queue to modify.
* @param rss_key
* RSS key for the Rx hash queue.
* @param rss_key_len
* first queue index will be taken for the indirection table.
* @param queues_n
* Number of queues.
- * @param tunnel
- * Tunnel type.
*
* @return
- * The DevX object initialized index, 0 otherwise and rte_errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-uint32_t
-mlx5_hrxq_new(struct rte_eth_dev *dev,
- const uint8_t *rss_key, uint32_t rss_key_len,
- uint64_t hash_fields,
- const uint16_t *queues, uint32_t queues_n,
- int tunnel __rte_unused)
+int
+mlx5_hrxq_modify(struct rte_eth_dev *dev, uint32_t hrxq_idx,
+ const uint8_t *rss_key, uint32_t rss_key_len,
+ uint64_t hash_fields,
+ const uint16_t *queues, uint32_t queues_n)
{
+ int err;
+ struct mlx5_ind_table_obj *ind_tbl = NULL;
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_hrxq *hrxq = NULL;
- uint32_t hrxq_idx = 0;
- struct mlx5_ind_table_obj *ind_tbl;
+ struct mlx5_hrxq *hrxq =
+ mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
+ bool dev_started = !!dev->data->dev_started;
int ret;
+ if (!hrxq) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ /* validations */
+ if (hrxq->rss_key_len != rss_key_len) {
+ /* rss_key_len is fixed size 40 byte & not supposed to change */
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
queues_n = hash_fields ? queues_n : 1;
- ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
- if (!ind_tbl)
- ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n);
+ if (mlx5_ind_table_obj_match_queues(hrxq->ind_table,
+ queues, queues_n)) {
+ ind_tbl = hrxq->ind_table;
+ } else {
+ if (hrxq->standalone) {
+ /*
+ * Replacement of indirection table unsupported for
+ * standalone hrxq objects (used by shared RSS).
+ */
+ rte_errno = ENOTSUP;
+ return -rte_errno;
+ }
+ ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
+ if (!ind_tbl)
+ ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n,
+ hrxq->standalone,
+ dev_started);
+ }
if (!ind_tbl) {
rte_errno = ENOMEM;
- return 0;
+ return -rte_errno;
+ }
+ MLX5_ASSERT(priv->obj_ops.hrxq_modify);
+ ret = priv->obj_ops.hrxq_modify(dev, hrxq, rss_key,
+ hash_fields, ind_tbl);
+ if (ret) {
+ rte_errno = errno;
+ goto error;
+ }
+ if (ind_tbl != hrxq->ind_table) {
+ MLX5_ASSERT(!hrxq->standalone);
+ mlx5_ind_table_obj_release(dev, hrxq->ind_table,
+ hrxq->standalone, true);
+ hrxq->ind_table = ind_tbl;
+ }
+ hrxq->hash_fields = hash_fields;
+ memcpy(hrxq->rss_key, rss_key, rss_key_len);
+ return 0;
+error:
+ err = rte_errno;
+ if (ind_tbl != hrxq->ind_table) {
+ MLX5_ASSERT(!hrxq->standalone);
+ mlx5_ind_table_obj_release(dev, ind_tbl, hrxq->standalone,
+ true);
+ }
+ rte_errno = err;
+ return -rte_errno;
+}
+
+static void
+__mlx5_hrxq_remove(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+ mlx5_glue->destroy_flow_action(hrxq->action);
+#endif
+ priv->obj_ops.hrxq_destroy(hrxq);
+ if (!hrxq->standalone) {
+ mlx5_ind_table_obj_release(dev, hrxq->ind_table,
+ hrxq->standalone, true);
}
+ mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq->idx);
+}
+
+/**
+ * Release the hash Rx queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param hrxq
+ * Index to Hash Rx queue to release.
+ *
+ * @param list
+ * mlx5 list pointer.
+ * @param entry
+ * Hash queue entry pointer.
+ */
+void
+mlx5_hrxq_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
+{
+ struct rte_eth_dev *dev = tool_ctx;
+ struct mlx5_hrxq *hrxq = container_of(entry, typeof(*hrxq), entry);
+
+ __mlx5_hrxq_remove(dev, hrxq);
+}
+
+static struct mlx5_hrxq *
+__mlx5_hrxq_create(struct rte_eth_dev *dev,
+ struct mlx5_flow_rss_desc *rss_desc)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ const uint8_t *rss_key = rss_desc->key;
+ uint32_t rss_key_len = rss_desc->key_len;
+ bool standalone = !!rss_desc->shared_rss;
+ const uint16_t *queues =
+ standalone ? rss_desc->const_q : rss_desc->queue;
+ uint32_t queues_n = rss_desc->queue_num;
+ struct mlx5_hrxq *hrxq = NULL;
+ uint32_t hrxq_idx = 0;
+ struct mlx5_ind_table_obj *ind_tbl = rss_desc->ind_tbl;
+ int ret;
+
+ queues_n = rss_desc->hash_fields ? queues_n : 1;
+ if (!ind_tbl)
+ ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
+ if (!ind_tbl)
+ ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n,
+ standalone,
+ !!dev->data->dev_started);
+ if (!ind_tbl)
+ return NULL;
hrxq = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_HRXQ], &hrxq_idx);
if (!hrxq)
goto error;
+ hrxq->standalone = standalone;
+ hrxq->idx = hrxq_idx;
hrxq->ind_table = ind_tbl;
hrxq->rss_key_len = rss_key_len;
- hrxq->hash_fields = hash_fields;
+ hrxq->hash_fields = rss_desc->hash_fields;
memcpy(hrxq->rss_key, rss_key, rss_key_len);
- ret = priv->obj_ops.hrxq_new(dev, hrxq, tunnel);
- if (ret < 0) {
- rte_errno = errno;
+ ret = priv->obj_ops.hrxq_new(dev, hrxq, rss_desc->tunnel);
+ if (ret < 0)
goto error;
- }
- rte_atomic32_inc(&hrxq->refcnt);
- ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_HRXQ], &priv->hrxqs, hrxq_idx,
- hrxq, next);
- return hrxq_idx;
+ return hrxq;
error:
- ret = rte_errno; /* Save rte_errno before cleanup. */
- mlx5_ind_table_obj_release(dev, ind_tbl);
+ if (!rss_desc->ind_tbl)
+ mlx5_ind_table_obj_release(dev, ind_tbl, standalone, true);
if (hrxq)
mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
- rte_errno = ret; /* Restore rte_errno. */
+ return NULL;
+}
+
+struct mlx5_list_entry *
+mlx5_hrxq_create_cb(void *tool_ctx, void *cb_ctx)
+{
+ struct rte_eth_dev *dev = tool_ctx;
+ struct mlx5_flow_cb_ctx *ctx = cb_ctx;
+ struct mlx5_flow_rss_desc *rss_desc = ctx->data;
+ struct mlx5_hrxq *hrxq;
+
+ hrxq = __mlx5_hrxq_create(dev, rss_desc);
+ return hrxq ? &hrxq->entry : NULL;
+}
+
+struct mlx5_list_entry *
+mlx5_hrxq_clone_cb(void *tool_ctx, struct mlx5_list_entry *entry,
+ void *cb_ctx __rte_unused)
+{
+ struct rte_eth_dev *dev = tool_ctx;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_hrxq *hrxq;
+ uint32_t hrxq_idx = 0;
+
+ hrxq = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_HRXQ], &hrxq_idx);
+ if (!hrxq)
+ return NULL;
+ memcpy(hrxq, entry, sizeof(*hrxq) + MLX5_RSS_HASH_KEY_LEN);
+ hrxq->idx = hrxq_idx;
+ return &hrxq->entry;
+}
+
+void
+mlx5_hrxq_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
+{
+ struct rte_eth_dev *dev = tool_ctx;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_hrxq *hrxq = container_of(entry, typeof(*hrxq), entry);
+
+ mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq->idx);
+}
+
+/**
+ * Get an Rx Hash queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param rss_desc
+ * RSS configuration for the Rx hash queue.
+ *
+ * @return
+ * An hash Rx queue index on success.
+ */
+uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev,
+ struct mlx5_flow_rss_desc *rss_desc)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_hrxq *hrxq;
+ struct mlx5_list_entry *entry;
+ struct mlx5_flow_cb_ctx ctx = {
+ .data = rss_desc,
+ };
+
+ if (rss_desc->shared_rss) {
+ hrxq = __mlx5_hrxq_create(dev, rss_desc);
+ } else {
+ entry = mlx5_list_register(priv->hrxqs, &ctx);
+ if (!entry)
+ return 0;
+ hrxq = container_of(entry, typeof(*hrxq), entry);
+ }
+ if (hrxq)
+ return hrxq->idx;
+ return 0;
+}
+
+/**
+ * Release the hash Rx queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param hrxq_idx
+ * Index to Hash Rx queue to release.
+ *
+ * @return
+ * 1 while a reference on it exists, 0 when freed.
+ */
+int mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hrxq_idx)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_hrxq *hrxq;
+
+ hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
+ if (!hrxq)
+ return 0;
+ if (!hrxq->standalone)
+ return mlx5_list_unregister(priv->hrxqs, &hrxq->entry);
+ __mlx5_hrxq_remove(dev, hrxq);
return 0;
}
struct mlx5_hrxq *hrxq = NULL;
int ret;
- if (priv->drop_queue.hrxq) {
- rte_atomic32_inc(&priv->drop_queue.hrxq->refcnt);
+ if (priv->drop_queue.hrxq)
return priv->drop_queue.hrxq;
- }
hrxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*hrxq), 0, SOCKET_ID_ANY);
if (!hrxq) {
DRV_LOG(WARNING,
ret = priv->obj_ops.drop_action_create(dev);
if (ret < 0)
goto error;
- rte_atomic32_set(&hrxq->refcnt, 1);
return hrxq;
error:
if (hrxq) {
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;
- if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
- priv->obj_ops.drop_action_destroy(dev);
- mlx5_free(priv->drop_queue.rxq);
- mlx5_free(hrxq->ind_table);
- mlx5_free(hrxq);
- priv->drop_queue.rxq = NULL;
- priv->drop_queue.hrxq = NULL;
- }
+ if (!priv->drop_queue.hrxq)
+ return;
+ priv->obj_ops.drop_action_destroy(dev);
+ mlx5_free(priv->drop_queue.rxq);
+ mlx5_free(hrxq->ind_table);
+ mlx5_free(hrxq);
+ priv->drop_queue.rxq = NULL;
+ priv->drop_queue.hrxq = NULL;
}
/**
* @return
* The number of object not released.
*/
-int
+uint32_t
mlx5_hrxq_verify(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_hrxq *hrxq;
- uint32_t idx;
- int ret = 0;
- ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_HRXQ], priv->hrxqs, idx,
- hrxq, next) {
- DRV_LOG(DEBUG,
- "port %u hash Rx queue %p still referenced",
- dev->data->port_id, (void *)hrxq);
- ++ret;
- }
- return ret;
+ return mlx5_list_get_entry_num(priv->hrxqs);
}
/**
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_dev_ctx_shared *sh = priv->sh;
- struct mlx5_rxq_data *data;
unsigned int i;
for (i = 0; i != priv->rxqs_n; ++i) {
- if (!(*priv->rxqs)[i])
+ struct mlx5_rxq_data *data = mlx5_rxq_data_get(dev, i);
+
+ if (data == NULL)
continue;
- data = (*priv->rxqs)[i];
data->sh = sh;
data->rt_timestamp = priv->config.rt_timestamp;
}