#include <string.h>
#include <stdint.h>
#include <unistd.h>
-#include <sys/mman.h>
#include <inttypes.h>
-/* Verbs header. */
-/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
-#ifdef PEDANTIC
-#pragma GCC diagnostic ignored "-Wpedantic"
-#endif
-#include <infiniband/verbs.h>
-#include <infiniband/mlx5dv.h>
-#ifdef PEDANTIC
-#pragma GCC diagnostic error "-Wpedantic"
-#endif
-
#include <rte_mbuf.h>
#include <rte_malloc.h>
-#include <rte_ethdev_driver.h>
+#include <ethdev_driver.h>
+#include <rte_bus_pci.h>
#include <rte_common.h>
+#include <rte_eal_paging.h>
-#include <mlx5_glue.h>
-#include <mlx5_devx_cmds.h>
#include <mlx5_common.h>
#include <mlx5_common_mr.h>
+#include <mlx5_malloc.h>
#include "mlx5_defs.h"
#include "mlx5_utils.h"
#include "mlx5.h"
+#include "mlx5_tx.h"
#include "mlx5_rxtx.h"
#include "mlx5_autoconf.h"
DEV_TX_OFFLOAD_TCP_CKSUM);
if (config->tso)
offloads |= DEV_TX_OFFLOAD_TCP_TSO;
+ if (config->tx_pp)
+ offloads |= DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP;
if (config->swp) {
if (config->hw_csum)
offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
DEV_TX_OFFLOAD_GRE_TNL_TSO |
DEV_TX_OFFLOAD_GENEVE_TNL_TSO);
}
+ if (!config->mprq.enabled)
+ offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
return offloads;
}
+/* Fetches and drops all SW-owned and error CQEs to synchronize CQ. */
+static void
+txq_sync_cq(struct mlx5_txq_data *txq)
+{
+ volatile struct mlx5_cqe *cqe;
+ int ret, i;
+
+ i = txq->cqe_s;
+ do {
+ cqe = &txq->cqes[txq->cq_ci & txq->cqe_m];
+ ret = check_cqe(cqe, txq->cqe_s, txq->cq_ci);
+ if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
+ if (likely(ret != MLX5_CQE_STATUS_ERR)) {
+ /* No new CQEs in completion queue. */
+ MLX5_ASSERT(ret == MLX5_CQE_STATUS_HW_OWN);
+ break;
+ }
+ }
+ ++txq->cq_ci;
+ } while (--i);
+ /* Move all CQEs to HW ownership. */
+ for (i = 0; i < txq->cqe_s; i++) {
+ cqe = &txq->cqes[i];
+ cqe->op_own = MLX5_CQE_INVALIDATE;
+ }
+ /* Resync CQE and WQE (WQ in reset state). */
+ rte_io_wmb();
+ *txq->cq_db = rte_cpu_to_be_32(txq->cq_ci);
+ txq->cq_pi = txq->cq_ci;
+ rte_io_wmb();
+}
+
+/**
+ * Tx queue stop. Device queue goes to the idle state,
+ * all involved mbufs are freed from elts/WQ.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param idx
+ * Tx queue index.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_tx_queue_stop_primary(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_txq_data *txq = (*priv->txqs)[idx];
+ struct mlx5_txq_ctrl *txq_ctrl =
+ container_of(txq, struct mlx5_txq_ctrl, txq);
+ int ret;
+
+ MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
+ /* Move QP to RESET state. */
+ ret = priv->obj_ops.txq_obj_modify(txq_ctrl->obj, MLX5_TXQ_MOD_RDY2RST,
+ (uint8_t)priv->dev_port);
+ if (ret)
+ return ret;
+ /* Handle all send completions. */
+ txq_sync_cq(txq);
+ /* Free elts stored in the SQ. */
+ txq_free_elts(txq_ctrl);
+ /* Prevent writing new pkts to SQ by setting no free WQE.*/
+ txq->wqe_ci = txq->wqe_s;
+ txq->wqe_pi = 0;
+ txq->elts_comp = 0;
+ /* Set the actual queue state. */
+ dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
+ return 0;
+}
+
+/**
+ * Tx queue stop. Device queue goes to the idle state,
+ * all involved mbufs are freed from elts/WQ.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param idx
+ * Tx queue index.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_tx_queue_stop(struct rte_eth_dev *dev, uint16_t idx)
+{
+ int ret;
+
+ if (rte_eth_dev_is_tx_hairpin_queue(dev, idx)) {
+ DRV_LOG(ERR, "Hairpin queue can't be stopped");
+ rte_errno = EINVAL;
+ return -EINVAL;
+ }
+ if (dev->data->tx_queue_state[idx] == RTE_ETH_QUEUE_STATE_STOPPED)
+ return 0;
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+ ret = mlx5_mp_os_req_queue_control(dev, idx,
+ MLX5_MP_REQ_QUEUE_TX_STOP);
+ } else {
+ ret = mlx5_tx_queue_stop_primary(dev, idx);
+ }
+ return ret;
+}
+
+/**
+ * Rx queue start. Device queue goes to the ready state,
+ * all required mbufs are allocated and WQ is replenished.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param idx
+ * RX queue index.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_tx_queue_start_primary(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_txq_data *txq = (*priv->txqs)[idx];
+ struct mlx5_txq_ctrl *txq_ctrl =
+ container_of(txq, struct mlx5_txq_ctrl, txq);
+ int ret;
+
+ MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
+ ret = priv->obj_ops.txq_obj_modify(txq_ctrl->obj,
+ MLX5_TXQ_MOD_RST2RDY,
+ (uint8_t)priv->dev_port);
+ if (ret)
+ return ret;
+ txq_ctrl->txq.wqe_ci = 0;
+ txq_ctrl->txq.wqe_pi = 0;
+ txq_ctrl->txq.elts_comp = 0;
+ /* Set the actual queue state. */
+ dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
+ return 0;
+}
+
+/**
+ * Rx queue start. Device queue goes to the ready state,
+ * all required mbufs are allocated and WQ is replenished.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param idx
+ * RX queue index.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_tx_queue_start(struct rte_eth_dev *dev, uint16_t idx)
+{
+ int ret;
+
+ if (rte_eth_dev_is_tx_hairpin_queue(dev, idx)) {
+ DRV_LOG(ERR, "Hairpin queue can't be started");
+ rte_errno = EINVAL;
+ return -EINVAL;
+ }
+ if (dev->data->tx_queue_state[idx] == RTE_ETH_QUEUE_STATE_STARTED)
+ return 0;
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+ ret = mlx5_mp_os_req_queue_control(dev, idx,
+ MLX5_MP_REQ_QUEUE_TX_START);
+ } else {
+ ret = mlx5_tx_queue_start_primary(dev, idx);
+ }
+ return ret;
+}
+
/**
* Tx queue presetup checks.
*
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-mlx5_tx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc)
+mlx5_tx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t *desc)
{
struct mlx5_priv *priv = dev->data->dev_private;
- if (desc <= MLX5_TX_COMP_THRESH) {
+ if (*desc <= MLX5_TX_COMP_THRESH) {
DRV_LOG(WARNING,
"port %u number of descriptors requested for Tx queue"
" %u must be higher than MLX5_TX_COMP_THRESH, using %u"
- " instead of %u",
- dev->data->port_id, idx, MLX5_TX_COMP_THRESH + 1, desc);
- desc = MLX5_TX_COMP_THRESH + 1;
+ " instead of %u", dev->data->port_id, idx,
+ MLX5_TX_COMP_THRESH + 1, *desc);
+ *desc = MLX5_TX_COMP_THRESH + 1;
}
- if (!rte_is_power_of_2(desc)) {
- desc = 1 << log2above(desc);
+ if (!rte_is_power_of_2(*desc)) {
+ *desc = 1 << log2above(*desc);
DRV_LOG(WARNING,
"port %u increased number of descriptors in Tx queue"
" %u to the next power of two (%d)",
- dev->data->port_id, idx, desc);
+ dev->data->port_id, idx, *desc);
}
DRV_LOG(DEBUG, "port %u configuring queue %u for %u descriptors",
- dev->data->port_id, idx, desc);
+ dev->data->port_id, idx, *desc);
if (idx >= priv->txqs_n) {
DRV_LOG(ERR, "port %u Tx queue index out of range (%u >= %u)",
dev->data->port_id, idx, priv->txqs_n);
mlx5_txq_release(dev, idx);
return 0;
}
+
/**
* DPDK callback to configure a TX queue.
*
container_of(txq, struct mlx5_txq_ctrl, txq);
int res;
- res = mlx5_tx_queue_pre_setup(dev, idx, desc);
+ res = mlx5_tx_queue_pre_setup(dev, idx, &desc);
if (res)
return res;
txq_ctrl = mlx5_txq_new(dev, idx, desc, socket, conf);
container_of(txq, struct mlx5_txq_ctrl, txq);
int res;
- res = mlx5_tx_queue_pre_setup(dev, idx, desc);
+ res = mlx5_tx_queue_pre_setup(dev, idx, &desc);
if (res)
return res;
- if (hairpin_conf->peer_count != 1 ||
- hairpin_conf->peers[0].port != dev->data->port_id ||
- hairpin_conf->peers[0].queue >= priv->rxqs_n) {
- DRV_LOG(ERR, "port %u unable to setup hairpin queue index %u "
- " invalid hairpind configuration", dev->data->port_id,
- idx);
+ if (hairpin_conf->peer_count != 1) {
rte_errno = EINVAL;
+ DRV_LOG(ERR, "port %u unable to setup Tx hairpin queue index %u"
+ " peer count is %u", dev->data->port_id,
+ idx, hairpin_conf->peer_count);
return -rte_errno;
}
+ if (hairpin_conf->peers[0].port == dev->data->port_id) {
+ if (hairpin_conf->peers[0].queue >= priv->rxqs_n) {
+ rte_errno = EINVAL;
+ DRV_LOG(ERR, "port %u unable to setup Tx hairpin queue"
+ " index %u, Rx %u is larger than %u",
+ dev->data->port_id, idx,
+ hairpin_conf->peers[0].queue, priv->txqs_n);
+ return -rte_errno;
+ }
+ } else {
+ if (hairpin_conf->manual_bind == 0 ||
+ hairpin_conf->tx_explicit == 0) {
+ rte_errno = EINVAL;
+ DRV_LOG(ERR, "port %u unable to setup Tx hairpin queue"
+ " index %u peer port %u with attributes %u %u",
+ dev->data->port_id, idx,
+ hairpin_conf->peers[0].port,
+ hairpin_conf->manual_bind,
+ hairpin_conf->tx_explicit);
+ return -rte_errno;
+ }
+ }
txq_ctrl = mlx5_txq_hairpin_new(dev, idx, desc, hairpin_conf);
if (!txq_ctrl) {
DRV_LOG(ERR, "port %u unable to allocate queue index %u",
DRV_LOG(DEBUG, "port %u adding Tx queue %u to list",
dev->data->port_id, idx);
(*priv->txqs)[idx] = &txq_ctrl->txq;
+ dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_HAIRPIN;
return 0;
}
priv = txq_ctrl->priv;
for (i = 0; (i != priv->txqs_n); ++i)
if ((*priv->txqs)[i] == txq) {
- mlx5_txq_release(ETH_DEV(priv), i);
DRV_LOG(DEBUG, "port %u removing Tx queue %u from list",
PORT_ID(priv), txq->idx);
+ mlx5_txq_release(ETH_DEV(priv), i);
break;
}
}
* @param txq_ctrl
* Pointer to Tx queue control structure.
*/
-static void
+void
txq_uar_init(struct mlx5_txq_ctrl *txq_ctrl)
{
struct mlx5_priv *priv = txq_ctrl->priv;
struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(priv));
- const size_t page_size = sysconf(_SC_PAGESIZE);
#ifndef RTE_ARCH_64
unsigned int lock_idx;
#endif
+ const size_t page_size = rte_mem_page_size();
+ if (page_size == (size_t)-1) {
+ DRV_LOG(ERR, "Failed to get mem page size");
+ rte_errno = ENOMEM;
+ }
if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
return;
/* Assign an UAR lock according to UAR page number */
lock_idx = (txq_ctrl->uar_mmap_offset / page_size) &
MLX5_UAR_PAGE_NUM_MASK;
- txq_ctrl->txq.uar_lock = &priv->uar_lock[lock_idx];
+ txq_ctrl->txq.uar_lock = &priv->sh->uar_lock[lock_idx];
#endif
}
void *addr;
uintptr_t uar_va;
uintptr_t offset;
- const size_t page_size = sysconf(_SC_PAGESIZE);
+ const size_t page_size = rte_mem_page_size();
+ if (page_size == (size_t)-1) {
+ DRV_LOG(ERR, "Failed to get mem page size");
+ rte_errno = ENOMEM;
+ return -rte_errno;
+ }
if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
return 0;
*/
uar_va = (uintptr_t)txq_ctrl->bf_reg;
offset = uar_va & (page_size - 1); /* Offset in page. */
- addr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, fd,
- txq_ctrl->uar_mmap_offset);
- if (addr == MAP_FAILED) {
+ addr = rte_mem_map(NULL, page_size, RTE_PROT_WRITE, RTE_MAP_SHARED,
+ fd, txq_ctrl->uar_mmap_offset);
+ if (!addr) {
DRV_LOG(ERR,
"port %u mmap failed for BF reg of txq %u",
txq->port_id, txq->idx);
txq_uar_uninit_secondary(struct mlx5_txq_ctrl *txq_ctrl)
{
struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(txq_ctrl->priv));
- const size_t page_size = sysconf(_SC_PAGESIZE);
void *addr;
+ const size_t page_size = rte_mem_page_size();
+ if (page_size == (size_t)-1) {
+ DRV_LOG(ERR, "Failed to get mem page size");
+ rte_errno = ENOMEM;
+ }
if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
return;
addr = ppriv->uar_table[txq_ctrl->txq.idx];
- munmap(RTE_PTR_ALIGN_FLOOR(addr, page_size), page_size);
+ rte_mem_unmap(RTE_PTR_ALIGN_FLOOR(addr, page_size), page_size);
+}
+
+/**
+ * Deinitialize Tx UAR registers for secondary process.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ */
+void
+mlx5_tx_uar_uninit_secondary(struct rte_eth_dev *dev)
+{
+ struct mlx5_proc_priv *ppriv = (struct mlx5_proc_priv *)
+ dev->process_private;
+ const size_t page_size = rte_mem_page_size();
+ void *addr;
+ unsigned int i;
+
+ if (page_size == (size_t)-1) {
+ DRV_LOG(ERR, "Failed to get mem page size");
+ return;
+ }
+ MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
+ for (i = 0; i != ppriv->uar_table_sz; ++i) {
+ if (!ppriv->uar_table[i])
+ continue;
+ addr = ppriv->uar_table[i];
+ rte_mem_unmap(RTE_PTR_ALIGN_FLOOR(addr, page_size), page_size);
+
+ }
}
/**
return -rte_errno;
}
-/**
- * Create the Tx hairpin queue object.
- *
- * @param dev
- * Pointer to Ethernet device.
- * @param idx
- * Queue index in DPDK Tx queue array
- *
- * @return
- * The hairpin DevX object initialised, NULL otherwise and rte_errno is set.
- */
-static struct mlx5_txq_obj *
-mlx5_txq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
-{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
- struct mlx5_txq_ctrl *txq_ctrl =
- container_of(txq_data, struct mlx5_txq_ctrl, txq);
- struct mlx5_devx_create_sq_attr attr = { 0 };
- struct mlx5_txq_obj *tmpl = NULL;
- int ret = 0;
- uint32_t max_wq_data;
-
- MLX5_ASSERT(txq_data);
- MLX5_ASSERT(!txq_ctrl->obj);
- tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0,
- txq_ctrl->socket);
- if (!tmpl) {
- DRV_LOG(ERR,
- "port %u Tx queue %u cannot allocate memory resources",
- dev->data->port_id, txq_data->idx);
- rte_errno = ENOMEM;
- goto error;
- }
- tmpl->type = MLX5_TXQ_OBJ_TYPE_DEVX_HAIRPIN;
- tmpl->txq_ctrl = txq_ctrl;
- attr.hairpin = 1;
- attr.tis_lst_sz = 1;
- max_wq_data = priv->config.hca_attr.log_max_hairpin_wq_data_sz;
- /* Jumbo frames > 9KB should be supported, and more packets. */
- if (priv->config.log_hp_size != (uint32_t)MLX5_ARG_UNSET) {
- if (priv->config.log_hp_size > max_wq_data) {
- DRV_LOG(ERR, "total data size %u power of 2 is "
- "too large for hairpin",
- priv->config.log_hp_size);
- rte_errno = ERANGE;
- return NULL;
- }
- attr.wq_attr.log_hairpin_data_sz = priv->config.log_hp_size;
- } else {
- attr.wq_attr.log_hairpin_data_sz =
- (max_wq_data < MLX5_HAIRPIN_JUMBO_LOG_SIZE) ?
- max_wq_data : MLX5_HAIRPIN_JUMBO_LOG_SIZE;
- }
- /* Set the packets number to the maximum value for performance. */
- attr.wq_attr.log_hairpin_num_packets =
- attr.wq_attr.log_hairpin_data_sz -
- MLX5_HAIRPIN_QUEUE_STRIDE;
- attr.tis_num = priv->sh->tis->id;
- tmpl->sq = mlx5_devx_cmd_create_sq(priv->sh->ctx, &attr);
- if (!tmpl->sq) {
- DRV_LOG(ERR,
- "port %u tx hairpin queue %u can't create sq object",
- dev->data->port_id, idx);
- rte_errno = errno;
- goto error;
- }
- DRV_LOG(DEBUG, "port %u sxq %u updated with %p", dev->data->port_id,
- idx, (void *)&tmpl);
- rte_atomic32_inc(&tmpl->refcnt);
- LIST_INSERT_HEAD(&priv->txqsobj, tmpl, next);
- return tmpl;
-error:
- ret = rte_errno; /* Save rte_errno before cleanup. */
- if (tmpl->tis)
- mlx5_devx_cmd_destroy(tmpl->tis);
- if (tmpl->sq)
- mlx5_devx_cmd_destroy(tmpl->sq);
- rte_errno = ret; /* Restore rte_errno. */
- return NULL;
-}
-
-/**
- * Create the Tx queue Verbs object.
- *
- * @param dev
- * Pointer to Ethernet device.
- * @param idx
- * Queue index in DPDK Tx queue array.
- * @param type
- * Type of the Tx queue object to create.
- *
- * @return
- * The Verbs object initialised, NULL otherwise and rte_errno is set.
- */
-struct mlx5_txq_obj *
-mlx5_txq_obj_new(struct rte_eth_dev *dev, uint16_t idx,
- enum mlx5_txq_obj_type type)
-{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
- struct mlx5_txq_ctrl *txq_ctrl =
- container_of(txq_data, struct mlx5_txq_ctrl, txq);
- struct mlx5_txq_obj tmpl;
- struct mlx5_txq_obj *txq_obj = NULL;
- union {
- struct ibv_qp_init_attr_ex init;
- struct ibv_cq_init_attr_ex cq;
- struct ibv_qp_attr mod;
- } attr;
- unsigned int cqe_n;
- struct mlx5dv_qp qp = { .comp_mask = MLX5DV_QP_MASK_UAR_MMAP_OFFSET };
- struct mlx5dv_cq cq_info;
- struct mlx5dv_obj obj;
- const int desc = 1 << txq_data->elts_n;
- int ret = 0;
-
- if (type == MLX5_TXQ_OBJ_TYPE_DEVX_HAIRPIN)
- return mlx5_txq_obj_hairpin_new(dev, idx);
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
- /* If using DevX, need additional mask to read tisn value. */
- if (priv->config.devx && !priv->sh->tdn)
- qp.comp_mask |= MLX5DV_QP_MASK_RAW_QP_HANDLES;
-#endif
- MLX5_ASSERT(txq_data);
- priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_TX_QUEUE;
- priv->verbs_alloc_ctx.obj = txq_ctrl;
- if (mlx5_getenv_int("MLX5_ENABLE_CQE_COMPRESSION")) {
- DRV_LOG(ERR,
- "port %u MLX5_ENABLE_CQE_COMPRESSION must never be set",
- dev->data->port_id);
- rte_errno = EINVAL;
- return NULL;
- }
- memset(&tmpl, 0, sizeof(struct mlx5_txq_obj));
- attr.cq = (struct ibv_cq_init_attr_ex){
- .comp_mask = 0,
- };
- cqe_n = desc / MLX5_TX_COMP_THRESH +
- 1 + MLX5_TX_COMP_THRESH_INLINE_DIV;
- tmpl.cq = mlx5_glue->create_cq(priv->sh->ctx, cqe_n, NULL, NULL, 0);
- if (tmpl.cq == NULL) {
- DRV_LOG(ERR, "port %u Tx queue %u CQ creation failure",
- dev->data->port_id, idx);
- rte_errno = errno;
- goto error;
- }
- attr.init = (struct ibv_qp_init_attr_ex){
- /* CQ to be associated with the send queue. */
- .send_cq = tmpl.cq,
- /* CQ to be associated with the receive queue. */
- .recv_cq = tmpl.cq,
- .cap = {
- /* Max number of outstanding WRs. */
- .max_send_wr =
- ((priv->sh->device_attr.orig_attr.max_qp_wr <
- desc) ?
- priv->sh->device_attr.orig_attr.max_qp_wr :
- desc),
- /*
- * Max number of scatter/gather elements in a WR,
- * must be 1 to prevent libmlx5 from trying to affect
- * too much memory. TX gather is not impacted by the
- * device_attr.max_sge limit and will still work
- * properly.
- */
- .max_send_sge = 1,
- },
- .qp_type = IBV_QPT_RAW_PACKET,
- /*
- * Do *NOT* enable this, completions events are managed per
- * Tx burst.
- */
- .sq_sig_all = 0,
- .pd = priv->sh->pd,
- .comp_mask = IBV_QP_INIT_ATTR_PD,
- };
- if (txq_data->inlen_send)
- attr.init.cap.max_inline_data = txq_ctrl->max_inline_data;
- if (txq_data->tso_en) {
- attr.init.max_tso_header = txq_ctrl->max_tso_header;
- attr.init.comp_mask |= IBV_QP_INIT_ATTR_MAX_TSO_HEADER;
- }
- tmpl.qp = mlx5_glue->create_qp_ex(priv->sh->ctx, &attr.init);
- if (tmpl.qp == NULL) {
- DRV_LOG(ERR, "port %u Tx queue %u QP creation failure",
- dev->data->port_id, idx);
- rte_errno = errno;
- goto error;
- }
- attr.mod = (struct ibv_qp_attr){
- /* Move the QP to this state. */
- .qp_state = IBV_QPS_INIT,
- /* IB device port number. */
- .port_num = (uint8_t)priv->ibv_port,
- };
- ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod,
- (IBV_QP_STATE | IBV_QP_PORT));
- if (ret) {
- DRV_LOG(ERR,
- "port %u Tx queue %u QP state to IBV_QPS_INIT failed",
- dev->data->port_id, idx);
- rte_errno = errno;
- goto error;
- }
- attr.mod = (struct ibv_qp_attr){
- .qp_state = IBV_QPS_RTR
- };
- ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
- if (ret) {
- DRV_LOG(ERR,
- "port %u Tx queue %u QP state to IBV_QPS_RTR failed",
- dev->data->port_id, idx);
- rte_errno = errno;
- goto error;
- }
- attr.mod.qp_state = IBV_QPS_RTS;
- ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
- if (ret) {
- DRV_LOG(ERR,
- "port %u Tx queue %u QP state to IBV_QPS_RTS failed",
- dev->data->port_id, idx);
- rte_errno = errno;
- goto error;
- }
- txq_obj = rte_calloc_socket(__func__, 1, sizeof(struct mlx5_txq_obj), 0,
- txq_ctrl->socket);
- if (!txq_obj) {
- DRV_LOG(ERR, "port %u Tx queue %u cannot allocate memory",
- dev->data->port_id, idx);
- rte_errno = ENOMEM;
- goto error;
- }
- obj.cq.in = tmpl.cq;
- obj.cq.out = &cq_info;
- obj.qp.in = tmpl.qp;
- obj.qp.out = &qp;
- ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_QP);
- if (ret != 0) {
- rte_errno = errno;
- goto error;
- }
- if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
- DRV_LOG(ERR,
- "port %u wrong MLX5_CQE_SIZE environment variable"
- " value: it should be set to %u",
- dev->data->port_id, RTE_CACHE_LINE_SIZE);
- rte_errno = EINVAL;
- goto error;
- }
- txq_data->cqe_n = log2above(cq_info.cqe_cnt);
- txq_data->cqe_s = 1 << txq_data->cqe_n;
- txq_data->cqe_m = txq_data->cqe_s - 1;
- txq_data->qp_num_8s = tmpl.qp->qp_num << 8;
- txq_data->wqes = qp.sq.buf;
- txq_data->wqe_n = log2above(qp.sq.wqe_cnt);
- txq_data->wqe_s = 1 << txq_data->wqe_n;
- txq_data->wqe_m = txq_data->wqe_s - 1;
- txq_data->wqes_end = txq_data->wqes + txq_data->wqe_s;
- txq_data->qp_db = &qp.dbrec[MLX5_SND_DBR];
- txq_data->cq_db = cq_info.dbrec;
- txq_data->cqes = (volatile struct mlx5_cqe *)cq_info.buf;
- txq_data->cq_ci = 0;
- txq_data->cq_pi = 0;
- txq_data->wqe_ci = 0;
- txq_data->wqe_pi = 0;
- txq_data->wqe_comp = 0;
- txq_data->wqe_thres = txq_data->wqe_s / MLX5_TX_COMP_THRESH_INLINE_DIV;
- txq_data->fcqs = rte_calloc_socket(__func__,
- txq_data->cqe_s,
- sizeof(*txq_data->fcqs),
- RTE_CACHE_LINE_SIZE,
- txq_ctrl->socket);
- if (!txq_data->fcqs) {
- DRV_LOG(ERR, "port %u Tx queue %u cannot allocate memory (FCQ)",
- dev->data->port_id, idx);
- rte_errno = ENOMEM;
- goto error;
- }
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
- /*
- * If using DevX need to query and store TIS transport domain value.
- * This is done once per port.
- * Will use this value on Rx, when creating matching TIR.
- */
- if (priv->config.devx && !priv->sh->tdn) {
- ret = mlx5_devx_cmd_qp_query_tis_td(tmpl.qp, qp.tisn,
- &priv->sh->tdn);
- if (ret) {
- DRV_LOG(ERR, "Fail to query port %u Tx queue %u QP TIS "
- "transport domain", dev->data->port_id, idx);
- rte_errno = EINVAL;
- goto error;
- } else {
- DRV_LOG(DEBUG, "port %u Tx queue %u TIS number %d "
- "transport domain %d", dev->data->port_id,
- idx, qp.tisn, priv->sh->tdn);
- }
- }
-#endif
- txq_obj->qp = tmpl.qp;
- txq_obj->cq = tmpl.cq;
- rte_atomic32_inc(&txq_obj->refcnt);
- txq_ctrl->bf_reg = qp.bf.reg;
- if (qp.comp_mask & MLX5DV_QP_MASK_UAR_MMAP_OFFSET) {
- txq_ctrl->uar_mmap_offset = qp.uar_mmap_offset;
- DRV_LOG(DEBUG, "port %u: uar_mmap_offset 0x%"PRIx64,
- dev->data->port_id, txq_ctrl->uar_mmap_offset);
- } else {
- DRV_LOG(ERR,
- "port %u failed to retrieve UAR info, invalid"
- " libmlx5.so",
- dev->data->port_id);
- rte_errno = EINVAL;
- goto error;
- }
- txq_uar_init(txq_ctrl);
- LIST_INSERT_HEAD(&priv->txqsobj, txq_obj, next);
- txq_obj->txq_ctrl = txq_ctrl;
- priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
- return txq_obj;
-error:
- ret = rte_errno; /* Save rte_errno before cleanup. */
- if (tmpl.cq)
- claim_zero(mlx5_glue->destroy_cq(tmpl.cq));
- if (tmpl.qp)
- claim_zero(mlx5_glue->destroy_qp(tmpl.qp));
- if (txq_data && txq_data->fcqs)
- rte_free(txq_data->fcqs);
- if (txq_obj)
- rte_free(txq_obj);
- priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
- rte_errno = ret; /* Restore rte_errno. */
- return NULL;
-}
-
-/**
- * Get an Tx queue Verbs object.
- *
- * @param dev
- * Pointer to Ethernet device.
- * @param idx
- * Queue index in DPDK Tx queue array.
- *
- * @return
- * The Verbs object if it exists.
- */
-struct mlx5_txq_obj *
-mlx5_txq_obj_get(struct rte_eth_dev *dev, uint16_t idx)
-{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_txq_ctrl *txq_ctrl;
-
- if (idx >= priv->txqs_n)
- return NULL;
- if (!(*priv->txqs)[idx])
- return NULL;
- txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
- if (txq_ctrl->obj)
- rte_atomic32_inc(&txq_ctrl->obj->refcnt);
- return txq_ctrl->obj;
-}
-
-/**
- * Release an Tx verbs queue object.
- *
- * @param txq_obj
- * Verbs Tx queue object.
- *
- * @return
- * 1 while a reference on it exists, 0 when freed.
- */
-int
-mlx5_txq_obj_release(struct mlx5_txq_obj *txq_obj)
-{
- MLX5_ASSERT(txq_obj);
- if (rte_atomic32_dec_and_test(&txq_obj->refcnt)) {
- if (txq_obj->type == MLX5_TXQ_OBJ_TYPE_DEVX_HAIRPIN) {
- if (txq_obj->tis)
- claim_zero(mlx5_devx_cmd_destroy(txq_obj->tis));
- } else {
- claim_zero(mlx5_glue->destroy_qp(txq_obj->qp));
- claim_zero(mlx5_glue->destroy_cq(txq_obj->cq));
- if (txq_obj->txq_ctrl->txq.fcqs)
- rte_free(txq_obj->txq_ctrl->txq.fcqs);
- }
- LIST_REMOVE(txq_obj, next);
- rte_free(txq_obj);
- return 0;
- }
- return 1;
-}
-
/**
* Verify the Verbs Tx queue list is empty
*
struct mlx5_priv *priv = txq_ctrl->priv;
unsigned int wqe_size;
- wqe_size = priv->sh->device_attr.orig_attr.max_qp_wr / desc;
+ wqe_size = priv->sh->device_attr.max_qp_wr / desc;
if (!wqe_size)
return 0;
/*
bool vlan_inline;
unsigned int temp;
+ txq_ctrl->txq.fast_free =
+ !!((txq_ctrl->txq.offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) &&
+ !(txq_ctrl->txq.offloads & DEV_TX_OFFLOAD_MULTI_SEGS) &&
+ !config->mprq.enabled);
if (config->txqs_inline == MLX5_ARG_UNSET)
txqs_inline =
#if defined(RTE_ARCH_ARM64)
" Tx queue size (%d)",
txq_ctrl->txq.inlen_mode, max_inline,
priv->dev_data->port_id,
- priv->sh->device_attr.orig_attr.max_qp_wr);
+ priv->sh->device_attr.max_qp_wr);
goto error;
}
if (txq_ctrl->txq.inlen_send > max_inline &&
" Tx queue size (%d)",
txq_ctrl->txq.inlen_send, max_inline,
priv->dev_data->port_id,
- priv->sh->device_attr.orig_attr.max_qp_wr);
+ priv->sh->device_attr.max_qp_wr);
goto error;
}
if (txq_ctrl->txq.inlen_empw > max_inline &&
" Tx queue size (%d)",
txq_ctrl->txq.inlen_empw, max_inline,
priv->dev_data->port_id,
- priv->sh->device_attr.orig_attr.max_qp_wr);
+ priv->sh->device_attr.max_qp_wr);
goto error;
}
if (txq_ctrl->txq.tso_en && max_inline < MLX5_MAX_TSO_HEADER) {
" Tx queue size (%d)",
MLX5_MAX_TSO_HEADER, max_inline,
priv->dev_data->port_id,
- priv->sh->device_attr.orig_attr.max_qp_wr);
+ priv->sh->device_attr.max_qp_wr);
goto error;
}
if (txq_ctrl->txq.inlen_send > max_inline) {
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_txq_ctrl *tmpl;
- tmpl = rte_calloc_socket("TXQ", 1,
- sizeof(*tmpl) +
- desc * sizeof(struct rte_mbuf *),
- 0, socket);
+ tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl) +
+ desc * sizeof(struct rte_mbuf *), 0, socket);
if (!tmpl) {
rte_errno = ENOMEM;
return NULL;
if (txq_adjust_params(tmpl))
goto error;
if (txq_calc_wqebb_cnt(tmpl) >
- priv->sh->device_attr.orig_attr.max_qp_wr) {
+ priv->sh->device_attr.max_qp_wr) {
DRV_LOG(ERR,
"port %u Tx WQEBB count (%d) exceeds the limit (%d),"
" try smaller queue size",
dev->data->port_id, txq_calc_wqebb_cnt(tmpl),
- priv->sh->device_attr.orig_attr.max_qp_wr);
+ priv->sh->device_attr.max_qp_wr);
rte_errno = ENOMEM;
goto error;
}
- rte_atomic32_inc(&tmpl->refcnt);
+ __atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
tmpl->type = MLX5_TXQ_TYPE_STANDARD;
LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
return tmpl;
error:
- rte_free(tmpl);
+ mlx5_mr_btree_free(&tmpl->txq.mr_ctrl.cache_bh);
+ mlx5_free(tmpl);
return NULL;
}
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_txq_ctrl *tmpl;
- tmpl = rte_calloc_socket("TXQ", 1,
- sizeof(*tmpl), 0, SOCKET_ID_ANY);
+ tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
+ SOCKET_ID_ANY);
if (!tmpl) {
rte_errno = ENOMEM;
return NULL;
tmpl->txq.idx = idx;
tmpl->hairpin_conf = *hairpin_conf;
tmpl->type = MLX5_TXQ_TYPE_HAIRPIN;
- rte_atomic32_inc(&tmpl->refcnt);
+ __atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
return tmpl;
}
mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
struct mlx5_txq_ctrl *ctrl = NULL;
- if ((*priv->txqs)[idx]) {
- ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl,
- txq);
- mlx5_txq_obj_get(dev, idx);
- rte_atomic32_inc(&ctrl->refcnt);
+ if (txq_data) {
+ ctrl = container_of(txq_data, struct mlx5_txq_ctrl, txq);
+ __atomic_fetch_add(&ctrl->refcnt, 1, __ATOMIC_RELAXED);
}
return ctrl;
}
mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_txq_ctrl *txq;
+ struct mlx5_txq_ctrl *txq_ctrl;
if (!(*priv->txqs)[idx])
return 0;
- txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
- if (txq->obj && !mlx5_txq_obj_release(txq->obj))
- txq->obj = NULL;
- if (rte_atomic32_dec_and_test(&txq->refcnt)) {
- txq_free_elts(txq);
- mlx5_mr_btree_free(&txq->txq.mr_ctrl.cache_bh);
- LIST_REMOVE(txq, next);
- rte_free(txq);
+ txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
+ if (__atomic_sub_fetch(&txq_ctrl->refcnt, 1, __ATOMIC_RELAXED) > 1)
+ return 1;
+ if (txq_ctrl->obj) {
+ priv->obj_ops.txq_obj_release(txq_ctrl->obj);
+ LIST_REMOVE(txq_ctrl->obj, next);
+ mlx5_free(txq_ctrl->obj);
+ txq_ctrl->obj = NULL;
+ }
+ if (txq_ctrl->type == MLX5_TXQ_TYPE_STANDARD) {
+ if (txq_ctrl->txq.fcqs) {
+ mlx5_free(txq_ctrl->txq.fcqs);
+ txq_ctrl->txq.fcqs = NULL;
+ }
+ txq_free_elts(txq_ctrl);
+ dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
+ }
+ if (!__atomic_load_n(&txq_ctrl->refcnt, __ATOMIC_RELAXED)) {
+ if (txq_ctrl->type == MLX5_TXQ_TYPE_STANDARD)
+ mlx5_mr_btree_free(&txq_ctrl->txq.mr_ctrl.cache_bh);
+ LIST_REMOVE(txq_ctrl, next);
+ mlx5_free(txq_ctrl);
(*priv->txqs)[idx] = NULL;
- return 0;
}
- return 1;
+ return 0;
}
/**
if (!(*priv->txqs)[idx])
return -1;
txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
- return (rte_atomic32_read(&txq->refcnt) == 1);
+ return (__atomic_load_n(&txq->refcnt, __ATOMIC_RELAXED) == 1);
}
/**
}
return ret;
}
+
+/**
+ * Set the Tx queue dynamic timestamp (mask and offset)
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ */
+void
+mlx5_txq_dynf_timestamp_set(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
+ struct mlx5_txq_data *data;
+ int off, nbit;
+ unsigned int i;
+ uint64_t mask = 0;
+
+ nbit = rte_mbuf_dynflag_lookup
+ (RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME, NULL);
+ off = rte_mbuf_dynfield_lookup
+ (RTE_MBUF_DYNFIELD_TIMESTAMP_NAME, NULL);
+ if (nbit >= 0 && off >= 0 && sh->txpp.refcnt)
+ mask = 1ULL << nbit;
+ for (i = 0; i != priv->txqs_n; ++i) {
+ data = (*priv->txqs)[i];
+ if (!data)
+ continue;
+ data->sh = sh;
+ data->ts_mask = mask;
+ data->ts_offset = off;
+ }
+}