1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
14 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
16 #pragma GCC diagnostic ignored "-Wpedantic"
18 #include <infiniband/verbs.h>
19 #include <infiniband/mlx5dv.h>
21 #pragma GCC diagnostic error "-Wpedantic"
25 #include <rte_malloc.h>
26 #include <rte_ethdev_driver.h>
27 #include <rte_common.h>
28 #include <rte_eal_paging.h>
30 #include <mlx5_glue.h>
31 #include <mlx5_devx_cmds.h>
32 #include <mlx5_common.h>
33 #include <mlx5_common_mr.h>
34 #include <mlx5_common_os.h>
35 #include <mlx5_malloc.h>
37 #include "mlx5_defs.h"
38 #include "mlx5_utils.h"
40 #include "mlx5_rxtx.h"
41 #include "mlx5_autoconf.h"
44 * Allocate TX queue elements.
47 * Pointer to TX queue structure.
50 txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl)
52 const unsigned int elts_n = 1 << txq_ctrl->txq.elts_n;
55 for (i = 0; (i != elts_n); ++i)
56 txq_ctrl->txq.elts[i] = NULL;
57 DRV_LOG(DEBUG, "port %u Tx queue %u allocated and configured %u WRs",
58 PORT_ID(txq_ctrl->priv), txq_ctrl->txq.idx, elts_n);
59 txq_ctrl->txq.elts_head = 0;
60 txq_ctrl->txq.elts_tail = 0;
61 txq_ctrl->txq.elts_comp = 0;
65 * Free TX queue elements.
68 * Pointer to TX queue structure.
71 txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl)
73 const uint16_t elts_n = 1 << txq_ctrl->txq.elts_n;
74 const uint16_t elts_m = elts_n - 1;
75 uint16_t elts_head = txq_ctrl->txq.elts_head;
76 uint16_t elts_tail = txq_ctrl->txq.elts_tail;
77 struct rte_mbuf *(*elts)[elts_n] = &txq_ctrl->txq.elts;
79 DRV_LOG(DEBUG, "port %u Tx queue %u freeing WRs",
80 PORT_ID(txq_ctrl->priv), txq_ctrl->txq.idx);
81 txq_ctrl->txq.elts_head = 0;
82 txq_ctrl->txq.elts_tail = 0;
83 txq_ctrl->txq.elts_comp = 0;
85 while (elts_tail != elts_head) {
86 struct rte_mbuf *elt = (*elts)[elts_tail & elts_m];
88 MLX5_ASSERT(elt != NULL);
89 rte_pktmbuf_free_seg(elt);
90 #ifdef RTE_LIBRTE_MLX5_DEBUG
92 memset(&(*elts)[elts_tail & elts_m],
94 sizeof((*elts)[elts_tail & elts_m]));
101 * Returns the per-port supported offloads.
104 * Pointer to Ethernet device.
107 * Supported Tx offloads.
110 mlx5_get_tx_port_offloads(struct rte_eth_dev *dev)
112 struct mlx5_priv *priv = dev->data->dev_private;
113 uint64_t offloads = (DEV_TX_OFFLOAD_MULTI_SEGS |
114 DEV_TX_OFFLOAD_VLAN_INSERT);
115 struct mlx5_dev_config *config = &priv->config;
118 offloads |= (DEV_TX_OFFLOAD_IPV4_CKSUM |
119 DEV_TX_OFFLOAD_UDP_CKSUM |
120 DEV_TX_OFFLOAD_TCP_CKSUM);
122 offloads |= DEV_TX_OFFLOAD_TCP_TSO;
124 offloads |= DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP;
127 offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
129 offloads |= (DEV_TX_OFFLOAD_IP_TNL_TSO |
130 DEV_TX_OFFLOAD_UDP_TNL_TSO);
132 if (config->tunnel_en) {
134 offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
136 offloads |= (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
137 DEV_TX_OFFLOAD_GRE_TNL_TSO |
138 DEV_TX_OFFLOAD_GENEVE_TNL_TSO);
144 * Tx queue presetup checks.
147 * Pointer to Ethernet device structure.
151 * Number of descriptors to configure in queue.
154 * 0 on success, a negative errno value otherwise and rte_errno is set.
157 mlx5_tx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t *desc)
159 struct mlx5_priv *priv = dev->data->dev_private;
161 if (*desc <= MLX5_TX_COMP_THRESH) {
163 "port %u number of descriptors requested for Tx queue"
164 " %u must be higher than MLX5_TX_COMP_THRESH, using %u"
165 " instead of %u", dev->data->port_id, idx,
166 MLX5_TX_COMP_THRESH + 1, *desc);
167 *desc = MLX5_TX_COMP_THRESH + 1;
169 if (!rte_is_power_of_2(*desc)) {
170 *desc = 1 << log2above(*desc);
172 "port %u increased number of descriptors in Tx queue"
173 " %u to the next power of two (%d)",
174 dev->data->port_id, idx, *desc);
176 DRV_LOG(DEBUG, "port %u configuring queue %u for %u descriptors",
177 dev->data->port_id, idx, *desc);
178 if (idx >= priv->txqs_n) {
179 DRV_LOG(ERR, "port %u Tx queue index out of range (%u >= %u)",
180 dev->data->port_id, idx, priv->txqs_n);
181 rte_errno = EOVERFLOW;
184 if (!mlx5_txq_releasable(dev, idx)) {
186 DRV_LOG(ERR, "port %u unable to release queue index %u",
187 dev->data->port_id, idx);
190 mlx5_txq_release(dev, idx);
194 * DPDK callback to configure a TX queue.
197 * Pointer to Ethernet device structure.
201 * Number of descriptors to configure in queue.
203 * NUMA socket on which memory must be allocated.
205 * Thresholds parameters.
208 * 0 on success, a negative errno value otherwise and rte_errno is set.
211 mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
212 unsigned int socket, const struct rte_eth_txconf *conf)
214 struct mlx5_priv *priv = dev->data->dev_private;
215 struct mlx5_txq_data *txq = (*priv->txqs)[idx];
216 struct mlx5_txq_ctrl *txq_ctrl =
217 container_of(txq, struct mlx5_txq_ctrl, txq);
220 res = mlx5_tx_queue_pre_setup(dev, idx, &desc);
223 txq_ctrl = mlx5_txq_new(dev, idx, desc, socket, conf);
225 DRV_LOG(ERR, "port %u unable to allocate queue index %u",
226 dev->data->port_id, idx);
229 DRV_LOG(DEBUG, "port %u adding Tx queue %u to list",
230 dev->data->port_id, idx);
231 (*priv->txqs)[idx] = &txq_ctrl->txq;
236 * DPDK callback to configure a TX hairpin queue.
239 * Pointer to Ethernet device structure.
243 * Number of descriptors to configure in queue.
244 * @param[in] hairpin_conf
245 * The hairpin binding configuration.
248 * 0 on success, a negative errno value otherwise and rte_errno is set.
251 mlx5_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
253 const struct rte_eth_hairpin_conf *hairpin_conf)
255 struct mlx5_priv *priv = dev->data->dev_private;
256 struct mlx5_txq_data *txq = (*priv->txqs)[idx];
257 struct mlx5_txq_ctrl *txq_ctrl =
258 container_of(txq, struct mlx5_txq_ctrl, txq);
261 res = mlx5_tx_queue_pre_setup(dev, idx, &desc);
264 if (hairpin_conf->peer_count != 1 ||
265 hairpin_conf->peers[0].port != dev->data->port_id ||
266 hairpin_conf->peers[0].queue >= priv->rxqs_n) {
267 DRV_LOG(ERR, "port %u unable to setup hairpin queue index %u "
268 " invalid hairpind configuration", dev->data->port_id,
273 txq_ctrl = mlx5_txq_hairpin_new(dev, idx, desc, hairpin_conf);
275 DRV_LOG(ERR, "port %u unable to allocate queue index %u",
276 dev->data->port_id, idx);
279 DRV_LOG(DEBUG, "port %u adding Tx queue %u to list",
280 dev->data->port_id, idx);
281 (*priv->txqs)[idx] = &txq_ctrl->txq;
286 * DPDK callback to release a TX queue.
289 * Generic TX queue pointer.
292 mlx5_tx_queue_release(void *dpdk_txq)
294 struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
295 struct mlx5_txq_ctrl *txq_ctrl;
296 struct mlx5_priv *priv;
301 txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
302 priv = txq_ctrl->priv;
303 for (i = 0; (i != priv->txqs_n); ++i)
304 if ((*priv->txqs)[i] == txq) {
305 DRV_LOG(DEBUG, "port %u removing Tx queue %u from list",
306 PORT_ID(priv), txq->idx);
307 mlx5_txq_release(ETH_DEV(priv), i);
313 * Configure the doorbell register non-cached attribute.
316 * Pointer to Tx queue control structure.
321 txq_uar_ncattr_init(struct mlx5_txq_ctrl *txq_ctrl, size_t page_size)
323 struct mlx5_priv *priv = txq_ctrl->priv;
326 txq_ctrl->txq.db_heu = priv->config.dbnc == MLX5_TXDB_HEURISTIC;
327 txq_ctrl->txq.db_nc = 0;
328 /* Check the doorbell register mapping type. */
329 cmd = txq_ctrl->uar_mmap_offset / page_size;
330 cmd >>= MLX5_UAR_MMAP_CMD_SHIFT;
331 cmd &= MLX5_UAR_MMAP_CMD_MASK;
332 if (cmd == MLX5_MMAP_GET_NC_PAGES_CMD)
333 txq_ctrl->txq.db_nc = 1;
337 * Initialize Tx UAR registers for primary process.
340 * Pointer to Tx queue control structure.
343 txq_uar_init(struct mlx5_txq_ctrl *txq_ctrl)
345 struct mlx5_priv *priv = txq_ctrl->priv;
346 struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(priv));
348 unsigned int lock_idx;
350 const size_t page_size = rte_mem_page_size();
351 if (page_size == (size_t)-1) {
352 DRV_LOG(ERR, "Failed to get mem page size");
356 if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
358 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
360 ppriv->uar_table[txq_ctrl->txq.idx] = txq_ctrl->bf_reg;
361 txq_uar_ncattr_init(txq_ctrl, page_size);
363 /* Assign an UAR lock according to UAR page number */
364 lock_idx = (txq_ctrl->uar_mmap_offset / page_size) &
365 MLX5_UAR_PAGE_NUM_MASK;
366 txq_ctrl->txq.uar_lock = &priv->sh->uar_lock[lock_idx];
371 * Remap UAR register of a Tx queue for secondary process.
373 * Remapped address is stored at the table in the process private structure of
374 * the device, indexed by queue index.
377 * Pointer to Tx queue control structure.
379 * Verbs file descriptor to map UAR pages.
382 * 0 on success, a negative errno value otherwise and rte_errno is set.
385 txq_uar_init_secondary(struct mlx5_txq_ctrl *txq_ctrl, int fd)
387 struct mlx5_priv *priv = txq_ctrl->priv;
388 struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(priv));
389 struct mlx5_txq_data *txq = &txq_ctrl->txq;
393 const size_t page_size = rte_mem_page_size();
394 if (page_size == (size_t)-1) {
395 DRV_LOG(ERR, "Failed to get mem page size");
400 if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
404 * As rdma-core, UARs are mapped in size of OS page
405 * size. Ref to libmlx5 function: mlx5_init_context()
407 uar_va = (uintptr_t)txq_ctrl->bf_reg;
408 offset = uar_va & (page_size - 1); /* Offset in page. */
409 addr = rte_mem_map(NULL, page_size, RTE_PROT_WRITE, RTE_MAP_SHARED,
410 fd, txq_ctrl->uar_mmap_offset);
413 "port %u mmap failed for BF reg of txq %u",
414 txq->port_id, txq->idx);
418 addr = RTE_PTR_ADD(addr, offset);
419 ppriv->uar_table[txq->idx] = addr;
420 txq_uar_ncattr_init(txq_ctrl, page_size);
425 * Unmap UAR register of a Tx queue for secondary process.
428 * Pointer to Tx queue control structure.
431 txq_uar_uninit_secondary(struct mlx5_txq_ctrl *txq_ctrl)
433 struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(txq_ctrl->priv));
435 const size_t page_size = rte_mem_page_size();
436 if (page_size == (size_t)-1) {
437 DRV_LOG(ERR, "Failed to get mem page size");
441 if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
443 addr = ppriv->uar_table[txq_ctrl->txq.idx];
444 rte_mem_unmap(RTE_PTR_ALIGN_FLOOR(addr, page_size), page_size);
448 * Deinitialize Tx UAR registers for secondary process.
451 * Pointer to Ethernet device.
454 mlx5_tx_uar_uninit_secondary(struct rte_eth_dev *dev)
456 struct mlx5_priv *priv = dev->data->dev_private;
457 struct mlx5_txq_data *txq;
458 struct mlx5_txq_ctrl *txq_ctrl;
461 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
462 for (i = 0; i != priv->txqs_n; ++i) {
463 if (!(*priv->txqs)[i])
465 txq = (*priv->txqs)[i];
466 txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
467 txq_uar_uninit_secondary(txq_ctrl);
472 * Initialize Tx UAR registers for secondary process.
475 * Pointer to Ethernet device.
477 * Verbs file descriptor to map UAR pages.
480 * 0 on success, a negative errno value otherwise and rte_errno is set.
483 mlx5_tx_uar_init_secondary(struct rte_eth_dev *dev, int fd)
485 struct mlx5_priv *priv = dev->data->dev_private;
486 struct mlx5_txq_data *txq;
487 struct mlx5_txq_ctrl *txq_ctrl;
491 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
492 for (i = 0; i != priv->txqs_n; ++i) {
493 if (!(*priv->txqs)[i])
495 txq = (*priv->txqs)[i];
496 txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
497 if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
499 MLX5_ASSERT(txq->idx == (uint16_t)i);
500 ret = txq_uar_init_secondary(txq_ctrl, fd);
508 if (!(*priv->txqs)[i])
510 txq = (*priv->txqs)[i];
511 txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
512 txq_uar_uninit_secondary(txq_ctrl);
518 * Create the Tx hairpin queue object.
521 * Pointer to Ethernet device.
523 * Queue index in DPDK Tx queue array
526 * The hairpin DevX object initialised, NULL otherwise and rte_errno is set.
528 static struct mlx5_txq_obj *
529 mlx5_txq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
531 struct mlx5_priv *priv = dev->data->dev_private;
532 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
533 struct mlx5_txq_ctrl *txq_ctrl =
534 container_of(txq_data, struct mlx5_txq_ctrl, txq);
535 struct mlx5_devx_create_sq_attr attr = { 0 };
536 struct mlx5_txq_obj *tmpl = NULL;
537 uint32_t max_wq_data;
539 MLX5_ASSERT(txq_data);
540 MLX5_ASSERT(!txq_ctrl->obj);
541 tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
545 "port %u Tx queue %u cannot allocate memory resources",
546 dev->data->port_id, txq_data->idx);
550 tmpl->type = MLX5_TXQ_OBJ_TYPE_DEVX_HAIRPIN;
551 tmpl->txq_ctrl = txq_ctrl;
554 max_wq_data = priv->config.hca_attr.log_max_hairpin_wq_data_sz;
555 /* Jumbo frames > 9KB should be supported, and more packets. */
556 if (priv->config.log_hp_size != (uint32_t)MLX5_ARG_UNSET) {
557 if (priv->config.log_hp_size > max_wq_data) {
558 DRV_LOG(ERR, "total data size %u power of 2 is "
559 "too large for hairpin",
560 priv->config.log_hp_size);
565 attr.wq_attr.log_hairpin_data_sz = priv->config.log_hp_size;
567 attr.wq_attr.log_hairpin_data_sz =
568 (max_wq_data < MLX5_HAIRPIN_JUMBO_LOG_SIZE) ?
569 max_wq_data : MLX5_HAIRPIN_JUMBO_LOG_SIZE;
571 /* Set the packets number to the maximum value for performance. */
572 attr.wq_attr.log_hairpin_num_packets =
573 attr.wq_attr.log_hairpin_data_sz -
574 MLX5_HAIRPIN_QUEUE_STRIDE;
575 attr.tis_num = priv->sh->tis->id;
576 tmpl->sq = mlx5_devx_cmd_create_sq(priv->sh->ctx, &attr);
579 "port %u tx hairpin queue %u can't create sq object",
580 dev->data->port_id, idx);
585 DRV_LOG(DEBUG, "port %u sxq %u updated with %p", dev->data->port_id,
587 rte_atomic32_inc(&tmpl->refcnt);
588 LIST_INSERT_HEAD(&priv->txqsobj, tmpl, next);
593 * Destroy the Tx queue DevX object.
596 * Txq object to destroy
599 txq_release_sq_resources(struct mlx5_txq_obj *txq_obj)
601 MLX5_ASSERT(txq_obj->type == MLX5_TXQ_OBJ_TYPE_DEVX_SQ);
603 if (txq_obj->sq_devx)
604 claim_zero(mlx5_devx_cmd_destroy(txq_obj->sq_devx));
605 if (txq_obj->sq_dbrec_page)
606 claim_zero(mlx5_release_dbr
607 (&txq_obj->txq_ctrl->priv->dbrpgs,
609 (txq_obj->sq_dbrec_page->umem),
610 txq_obj->sq_dbrec_offset));
611 if (txq_obj->sq_umem)
612 claim_zero(mlx5_glue->devx_umem_dereg(txq_obj->sq_umem));
614 mlx5_free(txq_obj->sq_buf);
615 if (txq_obj->cq_devx)
616 claim_zero(mlx5_devx_cmd_destroy(txq_obj->cq_devx));
617 if (txq_obj->cq_dbrec_page)
618 claim_zero(mlx5_release_dbr
619 (&txq_obj->txq_ctrl->priv->dbrpgs,
621 (txq_obj->cq_dbrec_page->umem),
622 txq_obj->cq_dbrec_offset));
623 if (txq_obj->cq_umem)
624 claim_zero(mlx5_glue->devx_umem_dereg(txq_obj->cq_umem));
626 mlx5_free(txq_obj->cq_buf);
630 * Create the Tx queue DevX object.
633 * Pointer to Ethernet device.
635 * Queue index in DPDK Tx queue array
638 * The DevX object initialised, NULL otherwise and rte_errno is set.
640 static struct mlx5_txq_obj *
641 mlx5_txq_obj_devx_new(struct rte_eth_dev *dev, uint16_t idx)
643 #ifndef HAVE_MLX5DV_DEVX_UAR_OFFSET
644 DRV_LOG(ERR, "port %u Tx queue %u cannot create with DevX, no UAR",
645 dev->data->port_id, idx);
649 struct mlx5_priv *priv = dev->data->dev_private;
650 struct mlx5_dev_ctx_shared *sh = priv->sh;
651 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
652 struct mlx5_txq_ctrl *txq_ctrl =
653 container_of(txq_data, struct mlx5_txq_ctrl, txq);
654 struct mlx5_devx_create_sq_attr sq_attr = { 0 };
655 struct mlx5_devx_modify_sq_attr msq_attr = { 0 };
656 struct mlx5_devx_cq_attr cq_attr = { 0 };
657 struct mlx5_txq_obj *txq_obj = NULL;
659 struct mlx5_cqe *cqe;
661 size_t alignment = (size_t)-1;
664 MLX5_ASSERT(txq_data);
665 MLX5_ASSERT(!txq_ctrl->obj);
666 page_size = rte_mem_page_size();
667 if (page_size == (size_t)-1) {
668 DRV_LOG(ERR, "Failed to get mem page size");
672 txq_obj = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
673 sizeof(struct mlx5_txq_obj), 0,
677 "port %u Tx queue %u cannot allocate memory resources",
678 dev->data->port_id, txq_data->idx);
682 txq_obj->type = MLX5_TXQ_OBJ_TYPE_DEVX_SQ;
683 txq_obj->txq_ctrl = txq_ctrl;
685 /* Create the Completion Queue. */
686 nqe = (1UL << txq_data->elts_n) / MLX5_TX_COMP_THRESH +
687 1 + MLX5_TX_COMP_THRESH_INLINE_DIV;
688 nqe = 1UL << log2above(nqe);
689 if (nqe > UINT16_MAX) {
691 "port %u Tx queue %u requests to many CQEs %u",
692 dev->data->port_id, txq_data->idx, nqe);
696 /* Allocate memory buffer for CQEs. */
697 alignment = MLX5_CQE_BUF_ALIGNMENT;
698 if (alignment == (size_t)-1) {
699 DRV_LOG(ERR, "Failed to get mem page size");
703 txq_obj->cq_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
704 nqe * sizeof(struct mlx5_cqe),
707 if (!txq_obj->cq_buf) {
709 "port %u Tx queue %u cannot allocate memory (CQ)",
710 dev->data->port_id, txq_data->idx);
714 txq_data->cqe_n = log2above(nqe);
715 txq_data->cqe_s = 1 << txq_data->cqe_n;
716 txq_data->cqe_m = txq_data->cqe_s - 1;
717 txq_data->cqes = (volatile struct mlx5_cqe *)txq_obj->cq_buf;
720 /* Register allocated buffer in user space with DevX. */
721 txq_obj->cq_umem = mlx5_glue->devx_umem_reg
723 (void *)txq_obj->cq_buf,
724 nqe * sizeof(struct mlx5_cqe),
725 IBV_ACCESS_LOCAL_WRITE);
726 if (!txq_obj->cq_umem) {
729 "port %u Tx queue %u cannot register memory (CQ)",
730 dev->data->port_id, txq_data->idx);
733 /* Allocate doorbell record for completion queue. */
734 txq_obj->cq_dbrec_offset = mlx5_get_dbr(sh->ctx,
736 &txq_obj->cq_dbrec_page);
737 if (txq_obj->cq_dbrec_offset < 0)
739 txq_data->cq_db = (volatile uint32_t *)(txq_obj->cq_dbrec_page->dbrs +
740 txq_obj->cq_dbrec_offset);
741 *txq_data->cq_db = 0;
742 /* Create completion queue object with DevX. */
743 cq_attr.cqe_size = (sizeof(struct mlx5_cqe) == 128) ?
744 MLX5_CQE_SIZE_128B : MLX5_CQE_SIZE_64B;
745 cq_attr.uar_page_id = sh->tx_uar->page_id;
746 cq_attr.eqn = sh->txpp.eqn;
747 cq_attr.q_umem_valid = 1;
748 cq_attr.q_umem_offset = (uintptr_t)txq_obj->cq_buf % page_size;
749 cq_attr.q_umem_id = txq_obj->cq_umem->umem_id;
750 cq_attr.db_umem_valid = 1;
751 cq_attr.db_umem_offset = txq_obj->cq_dbrec_offset;
752 cq_attr.db_umem_id = mlx5_os_get_umem_id(txq_obj->cq_dbrec_page->umem);
753 cq_attr.log_cq_size = rte_log2_u32(nqe);
754 cq_attr.log_page_size = rte_log2_u32(page_size);
755 txq_obj->cq_devx = mlx5_devx_cmd_create_cq(sh->ctx, &cq_attr);
756 if (!txq_obj->cq_devx) {
758 DRV_LOG(ERR, "port %u Tx queue %u CQ creation failure",
759 dev->data->port_id, idx);
762 /* Initial fill CQ buffer with invalid CQE opcode. */
763 cqe = (struct mlx5_cqe *)txq_obj->cq_buf;
764 for (i = 0; i < txq_data->cqe_s; i++) {
765 cqe->op_own = (MLX5_CQE_INVALID << 4) | MLX5_CQE_OWNER_MASK;
768 /* Create the Work Queue. */
769 nqe = RTE_MIN(1UL << txq_data->elts_n,
770 (uint32_t)sh->device_attr.max_qp_wr);
771 txq_obj->sq_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
772 nqe * sizeof(struct mlx5_wqe),
773 page_size, sh->numa_node);
774 if (!txq_obj->sq_buf) {
776 "port %u Tx queue %u cannot allocate memory (SQ)",
777 dev->data->port_id, txq_data->idx);
781 txq_data->wqe_n = log2above(nqe);
782 txq_data->wqe_s = 1 << txq_data->wqe_n;
783 txq_data->wqe_m = txq_data->wqe_s - 1;
784 txq_data->wqes = (struct mlx5_wqe *)txq_obj->sq_buf;
785 txq_data->wqes_end = txq_data->wqes + txq_data->wqe_s;
786 txq_data->wqe_ci = 0;
787 txq_data->wqe_pi = 0;
788 txq_data->wqe_comp = 0;
789 txq_data->wqe_thres = txq_data->wqe_s / MLX5_TX_COMP_THRESH_INLINE_DIV;
790 /* Register allocated buffer in user space with DevX. */
791 txq_obj->sq_umem = mlx5_glue->devx_umem_reg
793 (void *)txq_obj->sq_buf,
794 nqe * sizeof(struct mlx5_wqe),
795 IBV_ACCESS_LOCAL_WRITE);
796 if (!txq_obj->sq_umem) {
799 "port %u Tx queue %u cannot register memory (SQ)",
800 dev->data->port_id, txq_data->idx);
803 /* Allocate doorbell record for completion queue. */
804 txq_obj->cq_dbrec_offset = mlx5_get_dbr(sh->ctx,
806 &txq_obj->sq_dbrec_page);
807 if (txq_obj->sq_dbrec_offset < 0)
809 txq_data->qp_db = (volatile uint32_t *)
810 (txq_obj->sq_dbrec_page->dbrs +
811 txq_obj->sq_dbrec_offset +
812 MLX5_SND_DBR * sizeof(uint32_t));
813 *txq_data->qp_db = 0;
814 /* Create Send Queue object with DevX. */
815 sq_attr.tis_lst_sz = 1;
816 sq_attr.tis_num = sh->tis->id;
817 sq_attr.state = MLX5_SQC_STATE_RST;
818 sq_attr.cqn = txq_obj->cq_devx->id;
819 sq_attr.flush_in_error_en = 1;
820 sq_attr.allow_multi_pkt_send_wqe = !!priv->config.mps;
821 sq_attr.allow_swp = !!priv->config.swp;
822 sq_attr.min_wqe_inline_mode = priv->config.hca_attr.vport_inline_mode;
823 sq_attr.wq_attr.uar_page = sh->tx_uar->page_id;
824 sq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
825 sq_attr.wq_attr.pd = sh->pdn;
826 sq_attr.wq_attr.log_wq_stride = rte_log2_u32(MLX5_WQE_SIZE);
827 sq_attr.wq_attr.log_wq_sz = txq_data->wqe_n;
828 sq_attr.wq_attr.dbr_umem_valid = 1;
829 sq_attr.wq_attr.dbr_addr = txq_obj->cq_dbrec_offset;
830 sq_attr.wq_attr.dbr_umem_id =
831 mlx5_os_get_umem_id(txq_obj->cq_dbrec_page->umem);
832 sq_attr.wq_attr.wq_umem_valid = 1;
833 sq_attr.wq_attr.wq_umem_id = txq_obj->sq_umem->umem_id;
834 sq_attr.wq_attr.wq_umem_offset = (uintptr_t)txq_obj->sq_buf % page_size;
835 txq_obj->sq_devx = mlx5_devx_cmd_create_sq(sh->ctx, &sq_attr);
836 if (!txq_obj->sq_devx) {
838 DRV_LOG(ERR, "port %u Tx queue %u SQ creation failure",
839 dev->data->port_id, idx);
842 txq_data->qp_num_8s = txq_obj->sq_devx->id << 8;
843 /* Change Send Queue state to Ready-to-Send. */
844 msq_attr.sq_state = MLX5_SQC_STATE_RST;
845 msq_attr.state = MLX5_SQC_STATE_RDY;
846 ret = mlx5_devx_cmd_modify_sq(txq_obj->sq_devx, &msq_attr);
850 "port %u Tx queue %u SP state to SQC_STATE_RDY failed",
851 dev->data->port_id, idx);
854 txq_data->fcqs = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
855 txq_data->cqe_s * sizeof(*txq_data->fcqs),
858 if (!txq_data->fcqs) {
859 DRV_LOG(ERR, "port %u Tx queue %u cannot allocate memory (FCQ)",
860 dev->data->port_id, idx);
864 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
866 * If using DevX need to query and store TIS transport domain value.
867 * This is done once per port.
868 * Will use this value on Rx, when creating matching TIR.
870 if (priv->config.devx && !priv->sh->tdn)
871 priv->sh->tdn = priv->sh->td->id;
873 MLX5_ASSERT(sh->tx_uar);
874 MLX5_ASSERT(sh->tx_uar->reg_addr);
875 txq_ctrl->bf_reg = sh->tx_uar->reg_addr;
876 txq_ctrl->uar_mmap_offset = sh->tx_uar->mmap_off;
877 rte_atomic32_set(&txq_obj->refcnt, 1);
878 txq_uar_init(txq_ctrl);
879 LIST_INSERT_HEAD(&priv->txqsobj, txq_obj, next);
882 ret = rte_errno; /* Save rte_errno before cleanup. */
883 txq_release_sq_resources(txq_obj);
884 if (txq_data->fcqs) {
885 mlx5_free(txq_data->fcqs);
886 txq_data->fcqs = NULL;
889 rte_errno = ret; /* Restore rte_errno. */
895 * Create the Tx queue Verbs object.
898 * Pointer to Ethernet device.
900 * Queue index in DPDK Tx queue array.
902 * Type of the Tx queue object to create.
905 * The Verbs object initialised, NULL otherwise and rte_errno is set.
907 struct mlx5_txq_obj *
908 mlx5_txq_obj_new(struct rte_eth_dev *dev, uint16_t idx,
909 enum mlx5_txq_obj_type type)
911 struct mlx5_priv *priv = dev->data->dev_private;
912 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
913 struct mlx5_txq_ctrl *txq_ctrl =
914 container_of(txq_data, struct mlx5_txq_ctrl, txq);
915 struct mlx5_txq_obj tmpl;
916 struct mlx5_txq_obj *txq_obj = NULL;
918 struct ibv_qp_init_attr_ex init;
919 struct ibv_cq_init_attr_ex cq;
920 struct ibv_qp_attr mod;
923 struct mlx5dv_qp qp = { .comp_mask = MLX5DV_QP_MASK_UAR_MMAP_OFFSET };
924 struct mlx5dv_cq cq_info;
925 struct mlx5dv_obj obj;
926 const int desc = 1 << txq_data->elts_n;
929 if (type == MLX5_TXQ_OBJ_TYPE_DEVX_HAIRPIN)
930 return mlx5_txq_obj_hairpin_new(dev, idx);
931 if (type == MLX5_TXQ_OBJ_TYPE_DEVX_SQ)
932 return mlx5_txq_obj_devx_new(dev, idx);
933 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
934 /* If using DevX, need additional mask to read tisn value. */
935 if (priv->config.devx && !priv->sh->tdn)
936 qp.comp_mask |= MLX5DV_QP_MASK_RAW_QP_HANDLES;
938 MLX5_ASSERT(txq_data);
939 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_TX_QUEUE;
940 priv->verbs_alloc_ctx.obj = txq_ctrl;
941 if (mlx5_getenv_int("MLX5_ENABLE_CQE_COMPRESSION")) {
943 "port %u MLX5_ENABLE_CQE_COMPRESSION must never be set",
948 memset(&tmpl, 0, sizeof(struct mlx5_txq_obj));
949 attr.cq = (struct ibv_cq_init_attr_ex){
952 cqe_n = desc / MLX5_TX_COMP_THRESH +
953 1 + MLX5_TX_COMP_THRESH_INLINE_DIV;
954 tmpl.cq = mlx5_glue->create_cq(priv->sh->ctx, cqe_n, NULL, NULL, 0);
955 if (tmpl.cq == NULL) {
956 DRV_LOG(ERR, "port %u Tx queue %u CQ creation failure",
957 dev->data->port_id, idx);
961 attr.init = (struct ibv_qp_init_attr_ex){
962 /* CQ to be associated with the send queue. */
964 /* CQ to be associated with the receive queue. */
967 /* Max number of outstanding WRs. */
969 ((priv->sh->device_attr.max_qp_wr <
971 priv->sh->device_attr.max_qp_wr :
974 * Max number of scatter/gather elements in a WR,
975 * must be 1 to prevent libmlx5 from trying to affect
976 * too much memory. TX gather is not impacted by the
977 * device_attr.max_sge limit and will still work
982 .qp_type = IBV_QPT_RAW_PACKET,
984 * Do *NOT* enable this, completions events are managed per
989 .comp_mask = IBV_QP_INIT_ATTR_PD,
991 if (txq_data->inlen_send)
992 attr.init.cap.max_inline_data = txq_ctrl->max_inline_data;
993 if (txq_data->tso_en) {
994 attr.init.max_tso_header = txq_ctrl->max_tso_header;
995 attr.init.comp_mask |= IBV_QP_INIT_ATTR_MAX_TSO_HEADER;
997 tmpl.qp = mlx5_glue->create_qp_ex(priv->sh->ctx, &attr.init);
998 if (tmpl.qp == NULL) {
999 DRV_LOG(ERR, "port %u Tx queue %u QP creation failure",
1000 dev->data->port_id, idx);
1004 attr.mod = (struct ibv_qp_attr){
1005 /* Move the QP to this state. */
1006 .qp_state = IBV_QPS_INIT,
1007 /* IB device port number. */
1008 .port_num = (uint8_t)priv->dev_port,
1010 ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod,
1011 (IBV_QP_STATE | IBV_QP_PORT));
1014 "port %u Tx queue %u QP state to IBV_QPS_INIT failed",
1015 dev->data->port_id, idx);
1019 attr.mod = (struct ibv_qp_attr){
1020 .qp_state = IBV_QPS_RTR
1022 ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
1025 "port %u Tx queue %u QP state to IBV_QPS_RTR failed",
1026 dev->data->port_id, idx);
1030 attr.mod.qp_state = IBV_QPS_RTS;
1031 ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
1034 "port %u Tx queue %u QP state to IBV_QPS_RTS failed",
1035 dev->data->port_id, idx);
1039 txq_obj = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
1040 sizeof(struct mlx5_txq_obj), 0,
1043 DRV_LOG(ERR, "port %u Tx queue %u cannot allocate memory",
1044 dev->data->port_id, idx);
1048 obj.cq.in = tmpl.cq;
1049 obj.cq.out = &cq_info;
1050 obj.qp.in = tmpl.qp;
1052 ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_QP);
1057 if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
1059 "port %u wrong MLX5_CQE_SIZE environment variable"
1060 " value: it should be set to %u",
1061 dev->data->port_id, RTE_CACHE_LINE_SIZE);
1065 txq_data->cqe_n = log2above(cq_info.cqe_cnt);
1066 txq_data->cqe_s = 1 << txq_data->cqe_n;
1067 txq_data->cqe_m = txq_data->cqe_s - 1;
1068 txq_data->qp_num_8s = tmpl.qp->qp_num << 8;
1069 txq_data->wqes = qp.sq.buf;
1070 txq_data->wqe_n = log2above(qp.sq.wqe_cnt);
1071 txq_data->wqe_s = 1 << txq_data->wqe_n;
1072 txq_data->wqe_m = txq_data->wqe_s - 1;
1073 txq_data->wqes_end = txq_data->wqes + txq_data->wqe_s;
1074 txq_data->qp_db = &qp.dbrec[MLX5_SND_DBR];
1075 txq_data->cq_db = cq_info.dbrec;
1076 txq_data->cqes = (volatile struct mlx5_cqe *)cq_info.buf;
1077 txq_data->cq_ci = 0;
1078 txq_data->cq_pi = 0;
1079 txq_data->wqe_ci = 0;
1080 txq_data->wqe_pi = 0;
1081 txq_data->wqe_comp = 0;
1082 txq_data->wqe_thres = txq_data->wqe_s / MLX5_TX_COMP_THRESH_INLINE_DIV;
1083 txq_data->fcqs = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
1084 txq_data->cqe_s * sizeof(*txq_data->fcqs),
1085 RTE_CACHE_LINE_SIZE, txq_ctrl->socket);
1086 if (!txq_data->fcqs) {
1087 DRV_LOG(ERR, "port %u Tx queue %u cannot allocate memory (FCQ)",
1088 dev->data->port_id, idx);
1092 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
1094 * If using DevX need to query and store TIS transport domain value.
1095 * This is done once per port.
1096 * Will use this value on Rx, when creating matching TIR.
1098 if (priv->config.devx && !priv->sh->tdn) {
1099 ret = mlx5_devx_cmd_qp_query_tis_td(tmpl.qp, qp.tisn,
1102 DRV_LOG(ERR, "Fail to query port %u Tx queue %u QP TIS "
1103 "transport domain", dev->data->port_id, idx);
1107 DRV_LOG(DEBUG, "port %u Tx queue %u TIS number %d "
1108 "transport domain %d", dev->data->port_id,
1109 idx, qp.tisn, priv->sh->tdn);
1113 txq_obj->qp = tmpl.qp;
1114 txq_obj->cq = tmpl.cq;
1115 rte_atomic32_inc(&txq_obj->refcnt);
1116 txq_ctrl->bf_reg = qp.bf.reg;
1117 if (qp.comp_mask & MLX5DV_QP_MASK_UAR_MMAP_OFFSET) {
1118 txq_ctrl->uar_mmap_offset = qp.uar_mmap_offset;
1119 DRV_LOG(DEBUG, "port %u: uar_mmap_offset 0x%"PRIx64,
1120 dev->data->port_id, txq_ctrl->uar_mmap_offset);
1123 "port %u failed to retrieve UAR info, invalid"
1125 dev->data->port_id);
1129 txq_uar_init(txq_ctrl);
1130 LIST_INSERT_HEAD(&priv->txqsobj, txq_obj, next);
1131 txq_obj->txq_ctrl = txq_ctrl;
1132 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
1135 ret = rte_errno; /* Save rte_errno before cleanup. */
1137 claim_zero(mlx5_glue->destroy_cq(tmpl.cq));
1139 claim_zero(mlx5_glue->destroy_qp(tmpl.qp));
1140 if (txq_data && txq_data->fcqs) {
1141 mlx5_free(txq_data->fcqs);
1142 txq_data->fcqs = NULL;
1146 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
1147 rte_errno = ret; /* Restore rte_errno. */
1152 * Get an Tx queue Verbs object.
1155 * Pointer to Ethernet device.
1157 * Queue index in DPDK Tx queue array.
1160 * The Verbs object if it exists.
1162 struct mlx5_txq_obj *
1163 mlx5_txq_obj_get(struct rte_eth_dev *dev, uint16_t idx)
1165 struct mlx5_priv *priv = dev->data->dev_private;
1166 struct mlx5_txq_ctrl *txq_ctrl;
1168 if (idx >= priv->txqs_n)
1170 if (!(*priv->txqs)[idx])
1172 txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
1174 rte_atomic32_inc(&txq_ctrl->obj->refcnt);
1175 return txq_ctrl->obj;
1179 * Release an Tx verbs queue object.
1182 * Verbs Tx queue object.
1185 * 1 while a reference on it exists, 0 when freed.
1188 mlx5_txq_obj_release(struct mlx5_txq_obj *txq_obj)
1190 MLX5_ASSERT(txq_obj);
1191 if (rte_atomic32_dec_and_test(&txq_obj->refcnt)) {
1192 if (txq_obj->type == MLX5_TXQ_OBJ_TYPE_DEVX_HAIRPIN) {
1194 claim_zero(mlx5_devx_cmd_destroy(txq_obj->tis));
1195 } else if (txq_obj->type == MLX5_TXQ_OBJ_TYPE_DEVX_SQ) {
1196 txq_release_sq_resources(txq_obj);
1198 claim_zero(mlx5_glue->destroy_qp(txq_obj->qp));
1199 claim_zero(mlx5_glue->destroy_cq(txq_obj->cq));
1201 if (txq_obj->txq_ctrl->txq.fcqs) {
1202 mlx5_free(txq_obj->txq_ctrl->txq.fcqs);
1203 txq_obj->txq_ctrl->txq.fcqs = NULL;
1205 LIST_REMOVE(txq_obj, next);
1213 * Verify the Verbs Tx queue list is empty
1216 * Pointer to Ethernet device.
1219 * The number of object not released.
1222 mlx5_txq_obj_verify(struct rte_eth_dev *dev)
1224 struct mlx5_priv *priv = dev->data->dev_private;
1226 struct mlx5_txq_obj *txq_obj;
1228 LIST_FOREACH(txq_obj, &priv->txqsobj, next) {
1229 DRV_LOG(DEBUG, "port %u Verbs Tx queue %u still referenced",
1230 dev->data->port_id, txq_obj->txq_ctrl->txq.idx);
1237 * Calculate the total number of WQEBB for Tx queue.
1239 * Simplified version of calc_sq_size() in rdma-core.
1242 * Pointer to Tx queue control structure.
1245 * The number of WQEBB.
1248 txq_calc_wqebb_cnt(struct mlx5_txq_ctrl *txq_ctrl)
1250 unsigned int wqe_size;
1251 const unsigned int desc = 1 << txq_ctrl->txq.elts_n;
1253 wqe_size = MLX5_WQE_CSEG_SIZE +
1254 MLX5_WQE_ESEG_SIZE +
1256 MLX5_ESEG_MIN_INLINE_SIZE +
1257 txq_ctrl->max_inline_data;
1258 return rte_align32pow2(wqe_size * desc) / MLX5_WQE_SIZE;
1262 * Calculate the maximal inline data size for Tx queue.
1265 * Pointer to Tx queue control structure.
1268 * The maximal inline data size.
1271 txq_calc_inline_max(struct mlx5_txq_ctrl *txq_ctrl)
1273 const unsigned int desc = 1 << txq_ctrl->txq.elts_n;
1274 struct mlx5_priv *priv = txq_ctrl->priv;
1275 unsigned int wqe_size;
1277 wqe_size = priv->sh->device_attr.max_qp_wr / desc;
1281 * This calculation is derived from tthe source of
1282 * mlx5_calc_send_wqe() in rdma_core library.
1284 wqe_size = wqe_size * MLX5_WQE_SIZE -
1285 MLX5_WQE_CSEG_SIZE -
1286 MLX5_WQE_ESEG_SIZE -
1289 MLX5_DSEG_MIN_INLINE_SIZE;
1294 * Set Tx queue parameters from device configuration.
1297 * Pointer to Tx queue control structure.
1300 txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
1302 struct mlx5_priv *priv = txq_ctrl->priv;
1303 struct mlx5_dev_config *config = &priv->config;
1304 unsigned int inlen_send; /* Inline data for ordinary SEND.*/
1305 unsigned int inlen_empw; /* Inline data for enhanced MPW. */
1306 unsigned int inlen_mode; /* Minimal required Inline data. */
1307 unsigned int txqs_inline; /* Min Tx queues to enable inline. */
1308 uint64_t dev_txoff = priv->dev_data->dev_conf.txmode.offloads;
1309 bool tso = txq_ctrl->txq.offloads & (DEV_TX_OFFLOAD_TCP_TSO |
1310 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1311 DEV_TX_OFFLOAD_GRE_TNL_TSO |
1312 DEV_TX_OFFLOAD_IP_TNL_TSO |
1313 DEV_TX_OFFLOAD_UDP_TNL_TSO);
1317 if (config->txqs_inline == MLX5_ARG_UNSET)
1319 #if defined(RTE_ARCH_ARM64)
1320 (priv->pci_dev->id.device_id ==
1321 PCI_DEVICE_ID_MELLANOX_CONNECTX5BF) ?
1322 MLX5_INLINE_MAX_TXQS_BLUEFIELD :
1324 MLX5_INLINE_MAX_TXQS;
1326 txqs_inline = (unsigned int)config->txqs_inline;
1327 inlen_send = (config->txq_inline_max == MLX5_ARG_UNSET) ?
1328 MLX5_SEND_DEF_INLINE_LEN :
1329 (unsigned int)config->txq_inline_max;
1330 inlen_empw = (config->txq_inline_mpw == MLX5_ARG_UNSET) ?
1331 MLX5_EMPW_DEF_INLINE_LEN :
1332 (unsigned int)config->txq_inline_mpw;
1333 inlen_mode = (config->txq_inline_min == MLX5_ARG_UNSET) ?
1334 0 : (unsigned int)config->txq_inline_min;
1335 if (config->mps != MLX5_MPW_ENHANCED && config->mps != MLX5_MPW)
1338 * If there is requested minimal amount of data to inline
1339 * we MUST enable inlining. This is a case for ConnectX-4
1340 * which usually requires L2 inlined for correct operating
1341 * and ConnectX-4 Lx which requires L2-L4 inlined to
1342 * support E-Switch Flows.
1345 if (inlen_mode <= MLX5_ESEG_MIN_INLINE_SIZE) {
1347 * Optimize minimal inlining for single
1348 * segment packets to fill one WQEBB
1351 temp = MLX5_ESEG_MIN_INLINE_SIZE;
1353 temp = inlen_mode - MLX5_ESEG_MIN_INLINE_SIZE;
1354 temp = RTE_ALIGN(temp, MLX5_WSEG_SIZE) +
1355 MLX5_ESEG_MIN_INLINE_SIZE;
1356 temp = RTE_MIN(temp, MLX5_SEND_MAX_INLINE_LEN);
1358 if (temp != inlen_mode) {
1360 "port %u minimal required inline setting"
1361 " aligned from %u to %u",
1362 PORT_ID(priv), inlen_mode, temp);
1367 * If port is configured to support VLAN insertion and device
1368 * does not support this feature by HW (for NICs before ConnectX-5
1369 * or in case of wqe_vlan_insert flag is not set) we must enable
1370 * data inline on all queues because it is supported by single
1373 txq_ctrl->txq.vlan_en = config->hw_vlan_insert;
1374 vlan_inline = (dev_txoff & DEV_TX_OFFLOAD_VLAN_INSERT) &&
1375 !config->hw_vlan_insert;
1377 * If there are few Tx queues it is prioritized
1378 * to save CPU cycles and disable data inlining at all.
1380 if (inlen_send && priv->txqs_n >= txqs_inline) {
1382 * The data sent with ordinal MLX5_OPCODE_SEND
1383 * may be inlined in Ethernet Segment, align the
1384 * length accordingly to fit entire WQEBBs.
1386 temp = RTE_MAX(inlen_send,
1387 MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE);
1388 temp -= MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE;
1389 temp = RTE_ALIGN(temp, MLX5_WQE_SIZE);
1390 temp += MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE;
1391 temp = RTE_MIN(temp, MLX5_WQE_SIZE_MAX +
1392 MLX5_ESEG_MIN_INLINE_SIZE -
1393 MLX5_WQE_CSEG_SIZE -
1394 MLX5_WQE_ESEG_SIZE -
1395 MLX5_WQE_DSEG_SIZE * 2);
1396 temp = RTE_MIN(temp, MLX5_SEND_MAX_INLINE_LEN);
1397 temp = RTE_MAX(temp, inlen_mode);
1398 if (temp != inlen_send) {
1400 "port %u ordinary send inline setting"
1401 " aligned from %u to %u",
1402 PORT_ID(priv), inlen_send, temp);
1406 * Not aligned to cache lines, but to WQEs.
1407 * First bytes of data (initial alignment)
1408 * is going to be copied explicitly at the
1409 * beginning of inlining buffer in Ethernet
1412 MLX5_ASSERT(inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
1413 MLX5_ASSERT(inlen_send <= MLX5_WQE_SIZE_MAX +
1414 MLX5_ESEG_MIN_INLINE_SIZE -
1415 MLX5_WQE_CSEG_SIZE -
1416 MLX5_WQE_ESEG_SIZE -
1417 MLX5_WQE_DSEG_SIZE * 2);
1418 } else if (inlen_mode) {
1420 * If minimal inlining is requested we must
1421 * enable inlining in general, despite the
1422 * number of configured queues. Ignore the
1423 * txq_inline_max devarg, this is not
1424 * full-featured inline.
1426 inlen_send = inlen_mode;
1428 } else if (vlan_inline) {
1430 * Hardware does not report offload for
1431 * VLAN insertion, we must enable data inline
1432 * to implement feature by software.
1434 inlen_send = MLX5_ESEG_MIN_INLINE_SIZE;
1440 txq_ctrl->txq.inlen_send = inlen_send;
1441 txq_ctrl->txq.inlen_mode = inlen_mode;
1442 txq_ctrl->txq.inlen_empw = 0;
1443 if (inlen_send && inlen_empw && priv->txqs_n >= txqs_inline) {
1445 * The data sent with MLX5_OPCODE_ENHANCED_MPSW
1446 * may be inlined in Data Segment, align the
1447 * length accordingly to fit entire WQEBBs.
1449 temp = RTE_MAX(inlen_empw,
1450 MLX5_WQE_SIZE + MLX5_DSEG_MIN_INLINE_SIZE);
1451 temp -= MLX5_DSEG_MIN_INLINE_SIZE;
1452 temp = RTE_ALIGN(temp, MLX5_WQE_SIZE);
1453 temp += MLX5_DSEG_MIN_INLINE_SIZE;
1454 temp = RTE_MIN(temp, MLX5_WQE_SIZE_MAX +
1455 MLX5_DSEG_MIN_INLINE_SIZE -
1456 MLX5_WQE_CSEG_SIZE -
1457 MLX5_WQE_ESEG_SIZE -
1458 MLX5_WQE_DSEG_SIZE);
1459 temp = RTE_MIN(temp, MLX5_EMPW_MAX_INLINE_LEN);
1460 if (temp != inlen_empw) {
1462 "port %u enhanced empw inline setting"
1463 " aligned from %u to %u",
1464 PORT_ID(priv), inlen_empw, temp);
1467 MLX5_ASSERT(inlen_empw >= MLX5_ESEG_MIN_INLINE_SIZE);
1468 MLX5_ASSERT(inlen_empw <= MLX5_WQE_SIZE_MAX +
1469 MLX5_DSEG_MIN_INLINE_SIZE -
1470 MLX5_WQE_CSEG_SIZE -
1471 MLX5_WQE_ESEG_SIZE -
1472 MLX5_WQE_DSEG_SIZE);
1473 txq_ctrl->txq.inlen_empw = inlen_empw;
1475 txq_ctrl->max_inline_data = RTE_MAX(inlen_send, inlen_empw);
1477 txq_ctrl->max_tso_header = MLX5_MAX_TSO_HEADER;
1478 txq_ctrl->max_inline_data = RTE_MAX(txq_ctrl->max_inline_data,
1479 MLX5_MAX_TSO_HEADER);
1480 txq_ctrl->txq.tso_en = 1;
1482 txq_ctrl->txq.tunnel_en = config->tunnel_en | config->swp;
1483 txq_ctrl->txq.swp_en = ((DEV_TX_OFFLOAD_IP_TNL_TSO |
1484 DEV_TX_OFFLOAD_UDP_TNL_TSO |
1485 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) &
1486 txq_ctrl->txq.offloads) && config->swp;
1490 * Adjust Tx queue data inline parameters for large queue sizes.
1491 * The data inline feature requires multiple WQEs to fit the packets,
1492 * and if the large amount of Tx descriptors is requested by application
1493 * the total WQE amount may exceed the hardware capabilities. If the
1494 * default inline setting are used we can try to adjust these ones and
1495 * meet the hardware requirements and not exceed the queue size.
1498 * Pointer to Tx queue control structure.
1501 * Zero on success, otherwise the parameters can not be adjusted.
1504 txq_adjust_params(struct mlx5_txq_ctrl *txq_ctrl)
1506 struct mlx5_priv *priv = txq_ctrl->priv;
1507 struct mlx5_dev_config *config = &priv->config;
1508 unsigned int max_inline;
1510 max_inline = txq_calc_inline_max(txq_ctrl);
1511 if (!txq_ctrl->txq.inlen_send) {
1513 * Inline data feature is not engaged at all.
1514 * There is nothing to adjust.
1518 if (txq_ctrl->max_inline_data <= max_inline) {
1520 * The requested inline data length does not
1521 * exceed queue capabilities.
1525 if (txq_ctrl->txq.inlen_mode > max_inline) {
1527 "minimal data inline requirements (%u) are not"
1528 " satisfied (%u) on port %u, try the smaller"
1529 " Tx queue size (%d)",
1530 txq_ctrl->txq.inlen_mode, max_inline,
1531 priv->dev_data->port_id,
1532 priv->sh->device_attr.max_qp_wr);
1535 if (txq_ctrl->txq.inlen_send > max_inline &&
1536 config->txq_inline_max != MLX5_ARG_UNSET &&
1537 config->txq_inline_max > (int)max_inline) {
1539 "txq_inline_max requirements (%u) are not"
1540 " satisfied (%u) on port %u, try the smaller"
1541 " Tx queue size (%d)",
1542 txq_ctrl->txq.inlen_send, max_inline,
1543 priv->dev_data->port_id,
1544 priv->sh->device_attr.max_qp_wr);
1547 if (txq_ctrl->txq.inlen_empw > max_inline &&
1548 config->txq_inline_mpw != MLX5_ARG_UNSET &&
1549 config->txq_inline_mpw > (int)max_inline) {
1551 "txq_inline_mpw requirements (%u) are not"
1552 " satisfied (%u) on port %u, try the smaller"
1553 " Tx queue size (%d)",
1554 txq_ctrl->txq.inlen_empw, max_inline,
1555 priv->dev_data->port_id,
1556 priv->sh->device_attr.max_qp_wr);
1559 if (txq_ctrl->txq.tso_en && max_inline < MLX5_MAX_TSO_HEADER) {
1561 "tso header inline requirements (%u) are not"
1562 " satisfied (%u) on port %u, try the smaller"
1563 " Tx queue size (%d)",
1564 MLX5_MAX_TSO_HEADER, max_inline,
1565 priv->dev_data->port_id,
1566 priv->sh->device_attr.max_qp_wr);
1569 if (txq_ctrl->txq.inlen_send > max_inline) {
1571 "adjust txq_inline_max (%u->%u)"
1572 " due to large Tx queue on port %u",
1573 txq_ctrl->txq.inlen_send, max_inline,
1574 priv->dev_data->port_id);
1575 txq_ctrl->txq.inlen_send = max_inline;
1577 if (txq_ctrl->txq.inlen_empw > max_inline) {
1579 "adjust txq_inline_mpw (%u->%u)"
1580 "due to large Tx queue on port %u",
1581 txq_ctrl->txq.inlen_empw, max_inline,
1582 priv->dev_data->port_id);
1583 txq_ctrl->txq.inlen_empw = max_inline;
1585 txq_ctrl->max_inline_data = RTE_MAX(txq_ctrl->txq.inlen_send,
1586 txq_ctrl->txq.inlen_empw);
1587 MLX5_ASSERT(txq_ctrl->max_inline_data <= max_inline);
1588 MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= max_inline);
1589 MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= txq_ctrl->txq.inlen_send);
1590 MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= txq_ctrl->txq.inlen_empw ||
1591 !txq_ctrl->txq.inlen_empw);
1599 * Create a DPDK Tx queue.
1602 * Pointer to Ethernet device.
1606 * Number of descriptors to configure in queue.
1608 * NUMA socket on which memory must be allocated.
1610 * Thresholds parameters.
1613 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
1615 struct mlx5_txq_ctrl *
1616 mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1617 unsigned int socket, const struct rte_eth_txconf *conf)
1619 struct mlx5_priv *priv = dev->data->dev_private;
1620 struct mlx5_txq_ctrl *tmpl;
1622 tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl) +
1623 desc * sizeof(struct rte_mbuf *), 0, socket);
1628 if (mlx5_mr_btree_init(&tmpl->txq.mr_ctrl.cache_bh,
1629 MLX5_MR_BTREE_CACHE_N, socket)) {
1630 /* rte_errno is already set. */
1633 /* Save pointer of global generation number to check memory event. */
1634 tmpl->txq.mr_ctrl.dev_gen_ptr = &priv->sh->share_cache.dev_gen;
1635 MLX5_ASSERT(desc > MLX5_TX_COMP_THRESH);
1636 tmpl->txq.offloads = conf->offloads |
1637 dev->data->dev_conf.txmode.offloads;
1639 tmpl->socket = socket;
1640 tmpl->txq.elts_n = log2above(desc);
1641 tmpl->txq.elts_s = desc;
1642 tmpl->txq.elts_m = desc - 1;
1643 tmpl->txq.port_id = dev->data->port_id;
1644 tmpl->txq.idx = idx;
1645 txq_set_params(tmpl);
1646 if (txq_adjust_params(tmpl))
1648 if (txq_calc_wqebb_cnt(tmpl) >
1649 priv->sh->device_attr.max_qp_wr) {
1651 "port %u Tx WQEBB count (%d) exceeds the limit (%d),"
1652 " try smaller queue size",
1653 dev->data->port_id, txq_calc_wqebb_cnt(tmpl),
1654 priv->sh->device_attr.max_qp_wr);
1658 rte_atomic32_inc(&tmpl->refcnt);
1659 tmpl->type = MLX5_TXQ_TYPE_STANDARD;
1660 LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
1668 * Create a DPDK Tx hairpin queue.
1671 * Pointer to Ethernet device.
1675 * Number of descriptors to configure in queue.
1676 * @param hairpin_conf
1677 * The hairpin configuration.
1680 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
1682 struct mlx5_txq_ctrl *
1683 mlx5_txq_hairpin_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1684 const struct rte_eth_hairpin_conf *hairpin_conf)
1686 struct mlx5_priv *priv = dev->data->dev_private;
1687 struct mlx5_txq_ctrl *tmpl;
1689 tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
1696 tmpl->socket = SOCKET_ID_ANY;
1697 tmpl->txq.elts_n = log2above(desc);
1698 tmpl->txq.port_id = dev->data->port_id;
1699 tmpl->txq.idx = idx;
1700 tmpl->hairpin_conf = *hairpin_conf;
1701 tmpl->type = MLX5_TXQ_TYPE_HAIRPIN;
1702 rte_atomic32_inc(&tmpl->refcnt);
1703 LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
1711 * Pointer to Ethernet device.
1716 * A pointer to the queue if it exists.
1718 struct mlx5_txq_ctrl *
1719 mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx)
1721 struct mlx5_priv *priv = dev->data->dev_private;
1722 struct mlx5_txq_ctrl *ctrl = NULL;
1724 if ((*priv->txqs)[idx]) {
1725 ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl,
1727 mlx5_txq_obj_get(dev, idx);
1728 rte_atomic32_inc(&ctrl->refcnt);
1734 * Release a Tx queue.
1737 * Pointer to Ethernet device.
1742 * 1 while a reference on it exists, 0 when freed.
1745 mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx)
1747 struct mlx5_priv *priv = dev->data->dev_private;
1748 struct mlx5_txq_ctrl *txq;
1750 if (!(*priv->txqs)[idx])
1752 txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
1753 if (txq->obj && !mlx5_txq_obj_release(txq->obj))
1755 if (rte_atomic32_dec_and_test(&txq->refcnt)) {
1757 mlx5_mr_btree_free(&txq->txq.mr_ctrl.cache_bh);
1758 LIST_REMOVE(txq, next);
1760 (*priv->txqs)[idx] = NULL;
1767 * Verify if the queue can be released.
1770 * Pointer to Ethernet device.
1775 * 1 if the queue can be released.
1778 mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx)
1780 struct mlx5_priv *priv = dev->data->dev_private;
1781 struct mlx5_txq_ctrl *txq;
1783 if (!(*priv->txqs)[idx])
1785 txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
1786 return (rte_atomic32_read(&txq->refcnt) == 1);
1790 * Verify the Tx Queue list is empty
1793 * Pointer to Ethernet device.
1796 * The number of object not released.
1799 mlx5_txq_verify(struct rte_eth_dev *dev)
1801 struct mlx5_priv *priv = dev->data->dev_private;
1802 struct mlx5_txq_ctrl *txq_ctrl;
1805 LIST_FOREACH(txq_ctrl, &priv->txqsctrl, next) {
1806 DRV_LOG(DEBUG, "port %u Tx queue %u still referenced",
1807 dev->data->port_id, txq_ctrl->txq.idx);
1814 * Set the Tx queue dynamic timestamp (mask and offset)
1817 * Pointer to the Ethernet device structure.
1820 mlx5_txq_dynf_timestamp_set(struct rte_eth_dev *dev)
1822 struct mlx5_priv *priv = dev->data->dev_private;
1823 struct mlx5_dev_ctx_shared *sh = priv->sh;
1824 struct mlx5_txq_data *data;
1829 nbit = rte_mbuf_dynflag_lookup
1830 (RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME, NULL);
1831 off = rte_mbuf_dynfield_lookup
1832 (RTE_MBUF_DYNFIELD_TIMESTAMP_NAME, NULL);
1833 if (nbit > 0 && off >= 0 && sh->txpp.refcnt)
1834 mask = 1ULL << nbit;
1835 for (i = 0; i != priv->txqs_n; ++i) {
1836 data = (*priv->txqs)[i];
1840 data->ts_mask = mask;
1841 data->ts_offset = off;