1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
15 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
17 #pragma GCC diagnostic ignored "-Wpedantic"
19 #include <infiniband/verbs.h>
20 #include <infiniband/mlx5dv.h>
22 #pragma GCC diagnostic error "-Wpedantic"
26 #include <rte_malloc.h>
27 #include <rte_ethdev_driver.h>
28 #include <rte_common.h>
30 #include <mlx5_glue.h>
31 #include <mlx5_devx_cmds.h>
32 #include <mlx5_common.h>
33 #include <mlx5_common_mr.h>
34 #include <mlx5_common_os.h>
35 #include <mlx5_malloc.h>
37 #include "mlx5_defs.h"
38 #include "mlx5_utils.h"
40 #include "mlx5_rxtx.h"
41 #include "mlx5_autoconf.h"
44 * Allocate TX queue elements.
47 * Pointer to TX queue structure.
50 txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl)
52 const unsigned int elts_n = 1 << txq_ctrl->txq.elts_n;
55 for (i = 0; (i != elts_n); ++i)
56 txq_ctrl->txq.elts[i] = NULL;
57 DRV_LOG(DEBUG, "port %u Tx queue %u allocated and configured %u WRs",
58 PORT_ID(txq_ctrl->priv), txq_ctrl->txq.idx, elts_n);
59 txq_ctrl->txq.elts_head = 0;
60 txq_ctrl->txq.elts_tail = 0;
61 txq_ctrl->txq.elts_comp = 0;
65 * Free TX queue elements.
68 * Pointer to TX queue structure.
71 txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl)
73 const uint16_t elts_n = 1 << txq_ctrl->txq.elts_n;
74 const uint16_t elts_m = elts_n - 1;
75 uint16_t elts_head = txq_ctrl->txq.elts_head;
76 uint16_t elts_tail = txq_ctrl->txq.elts_tail;
77 struct rte_mbuf *(*elts)[elts_n] = &txq_ctrl->txq.elts;
79 DRV_LOG(DEBUG, "port %u Tx queue %u freeing WRs",
80 PORT_ID(txq_ctrl->priv), txq_ctrl->txq.idx);
81 txq_ctrl->txq.elts_head = 0;
82 txq_ctrl->txq.elts_tail = 0;
83 txq_ctrl->txq.elts_comp = 0;
85 while (elts_tail != elts_head) {
86 struct rte_mbuf *elt = (*elts)[elts_tail & elts_m];
88 MLX5_ASSERT(elt != NULL);
89 rte_pktmbuf_free_seg(elt);
90 #ifdef RTE_LIBRTE_MLX5_DEBUG
92 memset(&(*elts)[elts_tail & elts_m],
94 sizeof((*elts)[elts_tail & elts_m]));
101 * Returns the per-port supported offloads.
104 * Pointer to Ethernet device.
107 * Supported Tx offloads.
110 mlx5_get_tx_port_offloads(struct rte_eth_dev *dev)
112 struct mlx5_priv *priv = dev->data->dev_private;
113 uint64_t offloads = (DEV_TX_OFFLOAD_MULTI_SEGS |
114 DEV_TX_OFFLOAD_VLAN_INSERT);
115 struct mlx5_dev_config *config = &priv->config;
118 offloads |= (DEV_TX_OFFLOAD_IPV4_CKSUM |
119 DEV_TX_OFFLOAD_UDP_CKSUM |
120 DEV_TX_OFFLOAD_TCP_CKSUM);
122 offloads |= DEV_TX_OFFLOAD_TCP_TSO;
124 offloads |= DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP;
127 offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
129 offloads |= (DEV_TX_OFFLOAD_IP_TNL_TSO |
130 DEV_TX_OFFLOAD_UDP_TNL_TSO);
132 if (config->tunnel_en) {
134 offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
136 offloads |= (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
137 DEV_TX_OFFLOAD_GRE_TNL_TSO |
138 DEV_TX_OFFLOAD_GENEVE_TNL_TSO);
144 * Tx queue presetup checks.
147 * Pointer to Ethernet device structure.
151 * Number of descriptors to configure in queue.
154 * 0 on success, a negative errno value otherwise and rte_errno is set.
157 mlx5_tx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t *desc)
159 struct mlx5_priv *priv = dev->data->dev_private;
161 if (*desc <= MLX5_TX_COMP_THRESH) {
163 "port %u number of descriptors requested for Tx queue"
164 " %u must be higher than MLX5_TX_COMP_THRESH, using %u"
165 " instead of %u", dev->data->port_id, idx,
166 MLX5_TX_COMP_THRESH + 1, *desc);
167 *desc = MLX5_TX_COMP_THRESH + 1;
169 if (!rte_is_power_of_2(*desc)) {
170 *desc = 1 << log2above(*desc);
172 "port %u increased number of descriptors in Tx queue"
173 " %u to the next power of two (%d)",
174 dev->data->port_id, idx, *desc);
176 DRV_LOG(DEBUG, "port %u configuring queue %u for %u descriptors",
177 dev->data->port_id, idx, *desc);
178 if (idx >= priv->txqs_n) {
179 DRV_LOG(ERR, "port %u Tx queue index out of range (%u >= %u)",
180 dev->data->port_id, idx, priv->txqs_n);
181 rte_errno = EOVERFLOW;
184 if (!mlx5_txq_releasable(dev, idx)) {
186 DRV_LOG(ERR, "port %u unable to release queue index %u",
187 dev->data->port_id, idx);
190 mlx5_txq_release(dev, idx);
194 * DPDK callback to configure a TX queue.
197 * Pointer to Ethernet device structure.
201 * Number of descriptors to configure in queue.
203 * NUMA socket on which memory must be allocated.
205 * Thresholds parameters.
208 * 0 on success, a negative errno value otherwise and rte_errno is set.
211 mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
212 unsigned int socket, const struct rte_eth_txconf *conf)
214 struct mlx5_priv *priv = dev->data->dev_private;
215 struct mlx5_txq_data *txq = (*priv->txqs)[idx];
216 struct mlx5_txq_ctrl *txq_ctrl =
217 container_of(txq, struct mlx5_txq_ctrl, txq);
220 res = mlx5_tx_queue_pre_setup(dev, idx, &desc);
223 txq_ctrl = mlx5_txq_new(dev, idx, desc, socket, conf);
225 DRV_LOG(ERR, "port %u unable to allocate queue index %u",
226 dev->data->port_id, idx);
229 DRV_LOG(DEBUG, "port %u adding Tx queue %u to list",
230 dev->data->port_id, idx);
231 (*priv->txqs)[idx] = &txq_ctrl->txq;
236 * DPDK callback to configure a TX hairpin queue.
239 * Pointer to Ethernet device structure.
243 * Number of descriptors to configure in queue.
244 * @param[in] hairpin_conf
245 * The hairpin binding configuration.
248 * 0 on success, a negative errno value otherwise and rte_errno is set.
251 mlx5_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
253 const struct rte_eth_hairpin_conf *hairpin_conf)
255 struct mlx5_priv *priv = dev->data->dev_private;
256 struct mlx5_txq_data *txq = (*priv->txqs)[idx];
257 struct mlx5_txq_ctrl *txq_ctrl =
258 container_of(txq, struct mlx5_txq_ctrl, txq);
261 res = mlx5_tx_queue_pre_setup(dev, idx, &desc);
264 if (hairpin_conf->peer_count != 1 ||
265 hairpin_conf->peers[0].port != dev->data->port_id ||
266 hairpin_conf->peers[0].queue >= priv->rxqs_n) {
267 DRV_LOG(ERR, "port %u unable to setup hairpin queue index %u "
268 " invalid hairpind configuration", dev->data->port_id,
273 txq_ctrl = mlx5_txq_hairpin_new(dev, idx, desc, hairpin_conf);
275 DRV_LOG(ERR, "port %u unable to allocate queue index %u",
276 dev->data->port_id, idx);
279 DRV_LOG(DEBUG, "port %u adding Tx queue %u to list",
280 dev->data->port_id, idx);
281 (*priv->txqs)[idx] = &txq_ctrl->txq;
286 * DPDK callback to release a TX queue.
289 * Generic TX queue pointer.
292 mlx5_tx_queue_release(void *dpdk_txq)
294 struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
295 struct mlx5_txq_ctrl *txq_ctrl;
296 struct mlx5_priv *priv;
301 txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
302 priv = txq_ctrl->priv;
303 for (i = 0; (i != priv->txqs_n); ++i)
304 if ((*priv->txqs)[i] == txq) {
305 DRV_LOG(DEBUG, "port %u removing Tx queue %u from list",
306 PORT_ID(priv), txq->idx);
307 mlx5_txq_release(ETH_DEV(priv), i);
313 * Configure the doorbell register non-cached attribute.
316 * Pointer to Tx queue control structure.
321 txq_uar_ncattr_init(struct mlx5_txq_ctrl *txq_ctrl, size_t page_size)
323 struct mlx5_priv *priv = txq_ctrl->priv;
326 txq_ctrl->txq.db_heu = priv->config.dbnc == MLX5_TXDB_HEURISTIC;
327 txq_ctrl->txq.db_nc = 0;
328 /* Check the doorbell register mapping type. */
329 cmd = txq_ctrl->uar_mmap_offset / page_size;
330 cmd >>= MLX5_UAR_MMAP_CMD_SHIFT;
331 cmd &= MLX5_UAR_MMAP_CMD_MASK;
332 if (cmd == MLX5_MMAP_GET_NC_PAGES_CMD)
333 txq_ctrl->txq.db_nc = 1;
337 * Initialize Tx UAR registers for primary process.
340 * Pointer to Tx queue control structure.
343 txq_uar_init(struct mlx5_txq_ctrl *txq_ctrl)
345 struct mlx5_priv *priv = txq_ctrl->priv;
346 struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(priv));
347 const size_t page_size = sysconf(_SC_PAGESIZE);
349 unsigned int lock_idx;
352 if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
354 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
356 ppriv->uar_table[txq_ctrl->txq.idx] = txq_ctrl->bf_reg;
357 txq_uar_ncattr_init(txq_ctrl, page_size);
359 /* Assign an UAR lock according to UAR page number */
360 lock_idx = (txq_ctrl->uar_mmap_offset / page_size) &
361 MLX5_UAR_PAGE_NUM_MASK;
362 txq_ctrl->txq.uar_lock = &priv->sh->uar_lock[lock_idx];
367 * Remap UAR register of a Tx queue for secondary process.
369 * Remapped address is stored at the table in the process private structure of
370 * the device, indexed by queue index.
373 * Pointer to Tx queue control structure.
375 * Verbs file descriptor to map UAR pages.
378 * 0 on success, a negative errno value otherwise and rte_errno is set.
381 txq_uar_init_secondary(struct mlx5_txq_ctrl *txq_ctrl, int fd)
383 struct mlx5_priv *priv = txq_ctrl->priv;
384 struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(priv));
385 struct mlx5_txq_data *txq = &txq_ctrl->txq;
389 const size_t page_size = sysconf(_SC_PAGESIZE);
391 if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
395 * As rdma-core, UARs are mapped in size of OS page
396 * size. Ref to libmlx5 function: mlx5_init_context()
398 uar_va = (uintptr_t)txq_ctrl->bf_reg;
399 offset = uar_va & (page_size - 1); /* Offset in page. */
400 addr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, fd,
401 txq_ctrl->uar_mmap_offset);
402 if (addr == MAP_FAILED) {
404 "port %u mmap failed for BF reg of txq %u",
405 txq->port_id, txq->idx);
409 addr = RTE_PTR_ADD(addr, offset);
410 ppriv->uar_table[txq->idx] = addr;
411 txq_uar_ncattr_init(txq_ctrl, page_size);
416 * Unmap UAR register of a Tx queue for secondary process.
419 * Pointer to Tx queue control structure.
422 txq_uar_uninit_secondary(struct mlx5_txq_ctrl *txq_ctrl)
424 struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(txq_ctrl->priv));
425 const size_t page_size = sysconf(_SC_PAGESIZE);
428 if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
430 addr = ppriv->uar_table[txq_ctrl->txq.idx];
431 munmap(RTE_PTR_ALIGN_FLOOR(addr, page_size), page_size);
435 * Deinitialize Tx UAR registers for secondary process.
438 * Pointer to Ethernet device.
441 mlx5_tx_uar_uninit_secondary(struct rte_eth_dev *dev)
443 struct mlx5_priv *priv = dev->data->dev_private;
444 struct mlx5_txq_data *txq;
445 struct mlx5_txq_ctrl *txq_ctrl;
448 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
449 for (i = 0; i != priv->txqs_n; ++i) {
450 if (!(*priv->txqs)[i])
452 txq = (*priv->txqs)[i];
453 txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
454 txq_uar_uninit_secondary(txq_ctrl);
459 * Initialize Tx UAR registers for secondary process.
462 * Pointer to Ethernet device.
464 * Verbs file descriptor to map UAR pages.
467 * 0 on success, a negative errno value otherwise and rte_errno is set.
470 mlx5_tx_uar_init_secondary(struct rte_eth_dev *dev, int fd)
472 struct mlx5_priv *priv = dev->data->dev_private;
473 struct mlx5_txq_data *txq;
474 struct mlx5_txq_ctrl *txq_ctrl;
478 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
479 for (i = 0; i != priv->txqs_n; ++i) {
480 if (!(*priv->txqs)[i])
482 txq = (*priv->txqs)[i];
483 txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
484 if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
486 MLX5_ASSERT(txq->idx == (uint16_t)i);
487 ret = txq_uar_init_secondary(txq_ctrl, fd);
495 if (!(*priv->txqs)[i])
497 txq = (*priv->txqs)[i];
498 txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
499 txq_uar_uninit_secondary(txq_ctrl);
505 * Create the Tx hairpin queue object.
508 * Pointer to Ethernet device.
510 * Queue index in DPDK Tx queue array
513 * The hairpin DevX object initialised, NULL otherwise and rte_errno is set.
515 static struct mlx5_txq_obj *
516 mlx5_txq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
518 struct mlx5_priv *priv = dev->data->dev_private;
519 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
520 struct mlx5_txq_ctrl *txq_ctrl =
521 container_of(txq_data, struct mlx5_txq_ctrl, txq);
522 struct mlx5_devx_create_sq_attr attr = { 0 };
523 struct mlx5_txq_obj *tmpl = NULL;
524 uint32_t max_wq_data;
526 MLX5_ASSERT(txq_data);
527 MLX5_ASSERT(!txq_ctrl->obj);
528 tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
532 "port %u Tx queue %u cannot allocate memory resources",
533 dev->data->port_id, txq_data->idx);
537 tmpl->type = MLX5_TXQ_OBJ_TYPE_DEVX_HAIRPIN;
538 tmpl->txq_ctrl = txq_ctrl;
541 max_wq_data = priv->config.hca_attr.log_max_hairpin_wq_data_sz;
542 /* Jumbo frames > 9KB should be supported, and more packets. */
543 if (priv->config.log_hp_size != (uint32_t)MLX5_ARG_UNSET) {
544 if (priv->config.log_hp_size > max_wq_data) {
545 DRV_LOG(ERR, "total data size %u power of 2 is "
546 "too large for hairpin",
547 priv->config.log_hp_size);
552 attr.wq_attr.log_hairpin_data_sz = priv->config.log_hp_size;
554 attr.wq_attr.log_hairpin_data_sz =
555 (max_wq_data < MLX5_HAIRPIN_JUMBO_LOG_SIZE) ?
556 max_wq_data : MLX5_HAIRPIN_JUMBO_LOG_SIZE;
558 /* Set the packets number to the maximum value for performance. */
559 attr.wq_attr.log_hairpin_num_packets =
560 attr.wq_attr.log_hairpin_data_sz -
561 MLX5_HAIRPIN_QUEUE_STRIDE;
562 attr.tis_num = priv->sh->tis->id;
563 tmpl->sq = mlx5_devx_cmd_create_sq(priv->sh->ctx, &attr);
566 "port %u tx hairpin queue %u can't create sq object",
567 dev->data->port_id, idx);
572 DRV_LOG(DEBUG, "port %u sxq %u updated with %p", dev->data->port_id,
574 rte_atomic32_inc(&tmpl->refcnt);
575 LIST_INSERT_HEAD(&priv->txqsobj, tmpl, next);
580 * Destroy the Tx queue DevX object.
583 * Txq object to destroy
586 txq_release_sq_resources(struct mlx5_txq_obj *txq_obj)
588 MLX5_ASSERT(txq_obj->type == MLX5_TXQ_OBJ_TYPE_DEVX_SQ);
590 if (txq_obj->sq_devx)
591 claim_zero(mlx5_devx_cmd_destroy(txq_obj->sq_devx));
592 if (txq_obj->sq_dbrec_page)
593 claim_zero(mlx5_release_dbr
594 (&txq_obj->txq_ctrl->priv->dbrpgs,
596 (txq_obj->sq_dbrec_page->umem),
597 txq_obj->sq_dbrec_offset));
598 if (txq_obj->sq_umem)
599 claim_zero(mlx5_glue->devx_umem_dereg(txq_obj->sq_umem));
601 mlx5_free(txq_obj->sq_buf);
602 if (txq_obj->cq_devx)
603 claim_zero(mlx5_devx_cmd_destroy(txq_obj->cq_devx));
604 if (txq_obj->cq_dbrec_page)
605 claim_zero(mlx5_release_dbr
606 (&txq_obj->txq_ctrl->priv->dbrpgs,
608 (txq_obj->cq_dbrec_page->umem),
609 txq_obj->cq_dbrec_offset));
610 if (txq_obj->cq_umem)
611 claim_zero(mlx5_glue->devx_umem_dereg(txq_obj->cq_umem));
613 mlx5_free(txq_obj->cq_buf);
617 * Create the Tx queue DevX object.
620 * Pointer to Ethernet device.
622 * Queue index in DPDK Tx queue array
625 * The DevX object initialised, NULL otherwise and rte_errno is set.
627 static struct mlx5_txq_obj *
628 mlx5_txq_obj_devx_new(struct rte_eth_dev *dev, uint16_t idx)
630 #ifndef HAVE_MLX5DV_DEVX_UAR_OFFSET
631 DRV_LOG(ERR, "port %u Tx queue %u cannot create with DevX, no UAR",
632 dev->data->port_id, idx);
636 struct mlx5_priv *priv = dev->data->dev_private;
637 struct mlx5_dev_ctx_shared *sh = priv->sh;
638 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
639 struct mlx5_txq_ctrl *txq_ctrl =
640 container_of(txq_data, struct mlx5_txq_ctrl, txq);
641 struct mlx5_devx_create_sq_attr sq_attr = { 0 };
642 struct mlx5_devx_modify_sq_attr msq_attr = { 0 };
643 struct mlx5_devx_cq_attr cq_attr = { 0 };
644 struct mlx5_txq_obj *txq_obj = NULL;
645 size_t page_size = sysconf(_SC_PAGESIZE);
646 struct mlx5_cqe *cqe;
650 MLX5_ASSERT(txq_data);
651 MLX5_ASSERT(!txq_ctrl->obj);
652 txq_obj = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
653 sizeof(struct mlx5_txq_obj), 0,
657 "port %u Tx queue %u cannot allocate memory resources",
658 dev->data->port_id, txq_data->idx);
662 txq_obj->type = MLX5_TXQ_OBJ_TYPE_DEVX_SQ;
663 txq_obj->txq_ctrl = txq_ctrl;
665 /* Create the Completion Queue. */
666 nqe = (1UL << txq_data->elts_n) / MLX5_TX_COMP_THRESH +
667 1 + MLX5_TX_COMP_THRESH_INLINE_DIV;
668 nqe = 1UL << log2above(nqe);
669 if (nqe > UINT16_MAX) {
671 "port %u Tx queue %u requests to many CQEs %u",
672 dev->data->port_id, txq_data->idx, nqe);
676 /* Allocate memory buffer for CQEs. */
677 txq_obj->cq_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
678 nqe * sizeof(struct mlx5_cqe),
679 MLX5_CQE_BUF_ALIGNMENT,
681 if (!txq_obj->cq_buf) {
683 "port %u Tx queue %u cannot allocate memory (CQ)",
684 dev->data->port_id, txq_data->idx);
688 txq_data->cqe_n = log2above(nqe);
689 txq_data->cqe_s = 1 << txq_data->cqe_n;
690 txq_data->cqe_m = txq_data->cqe_s - 1;
691 txq_data->cqes = (volatile struct mlx5_cqe *)txq_obj->cq_buf;
694 /* Register allocated buffer in user space with DevX. */
695 txq_obj->cq_umem = mlx5_glue->devx_umem_reg
697 (void *)txq_obj->cq_buf,
698 nqe * sizeof(struct mlx5_cqe),
699 IBV_ACCESS_LOCAL_WRITE);
700 if (!txq_obj->cq_umem) {
703 "port %u Tx queue %u cannot register memory (CQ)",
704 dev->data->port_id, txq_data->idx);
707 /* Allocate doorbell record for completion queue. */
708 txq_obj->cq_dbrec_offset = mlx5_get_dbr(sh->ctx,
710 &txq_obj->cq_dbrec_page);
711 if (txq_obj->cq_dbrec_offset < 0)
713 txq_data->cq_db = (volatile uint32_t *)(txq_obj->cq_dbrec_page->dbrs +
714 txq_obj->cq_dbrec_offset);
715 *txq_data->cq_db = 0;
716 /* Create completion queue object with DevX. */
717 cq_attr.cqe_size = (sizeof(struct mlx5_cqe) == 128) ?
718 MLX5_CQE_SIZE_128B : MLX5_CQE_SIZE_64B;
719 cq_attr.uar_page_id = sh->tx_uar->page_id;
720 cq_attr.eqn = sh->txpp.eqn;
721 cq_attr.q_umem_valid = 1;
722 cq_attr.q_umem_offset = (uintptr_t)txq_obj->cq_buf % page_size;
723 cq_attr.q_umem_id = txq_obj->cq_umem->umem_id;
724 cq_attr.db_umem_valid = 1;
725 cq_attr.db_umem_offset = txq_obj->cq_dbrec_offset;
726 cq_attr.db_umem_id = mlx5_os_get_umem_id(txq_obj->cq_dbrec_page->umem);
727 cq_attr.log_cq_size = rte_log2_u32(nqe);
728 cq_attr.log_page_size = rte_log2_u32(page_size);
729 txq_obj->cq_devx = mlx5_devx_cmd_create_cq(sh->ctx, &cq_attr);
730 if (!txq_obj->cq_devx) {
732 DRV_LOG(ERR, "port %u Tx queue %u CQ creation failure",
733 dev->data->port_id, idx);
736 /* Initial fill CQ buffer with invalid CQE opcode. */
737 cqe = (struct mlx5_cqe *)txq_obj->cq_buf;
738 for (i = 0; i < txq_data->cqe_s; i++) {
739 cqe->op_own = (MLX5_CQE_INVALID << 4) | MLX5_CQE_OWNER_MASK;
742 /* Create the Work Queue. */
743 nqe = RTE_MIN(1UL << txq_data->elts_n,
744 (uint32_t)sh->device_attr.max_qp_wr);
745 txq_obj->sq_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
746 nqe * sizeof(struct mlx5_wqe),
747 page_size, sh->numa_node);
748 if (!txq_obj->sq_buf) {
750 "port %u Tx queue %u cannot allocate memory (SQ)",
751 dev->data->port_id, txq_data->idx);
755 txq_data->wqe_n = log2above(nqe);
756 txq_data->wqe_s = 1 << txq_data->wqe_n;
757 txq_data->wqe_m = txq_data->wqe_s - 1;
758 txq_data->wqes = (struct mlx5_wqe *)txq_obj->sq_buf;
759 txq_data->wqes_end = txq_data->wqes + txq_data->wqe_s;
760 txq_data->wqe_ci = 0;
761 txq_data->wqe_pi = 0;
762 txq_data->wqe_comp = 0;
763 txq_data->wqe_thres = txq_data->wqe_s / MLX5_TX_COMP_THRESH_INLINE_DIV;
764 /* Register allocated buffer in user space with DevX. */
765 txq_obj->sq_umem = mlx5_glue->devx_umem_reg
767 (void *)txq_obj->sq_buf,
768 nqe * sizeof(struct mlx5_wqe),
769 IBV_ACCESS_LOCAL_WRITE);
770 if (!txq_obj->sq_umem) {
773 "port %u Tx queue %u cannot register memory (SQ)",
774 dev->data->port_id, txq_data->idx);
777 /* Allocate doorbell record for completion queue. */
778 txq_obj->cq_dbrec_offset = mlx5_get_dbr(sh->ctx,
780 &txq_obj->sq_dbrec_page);
781 if (txq_obj->sq_dbrec_offset < 0)
783 txq_data->qp_db = (volatile uint32_t *)
784 (txq_obj->sq_dbrec_page->dbrs +
785 txq_obj->sq_dbrec_offset +
786 MLX5_SND_DBR * sizeof(uint32_t));
787 *txq_data->qp_db = 0;
788 /* Create Send Queue object with DevX. */
789 sq_attr.tis_lst_sz = 1;
790 sq_attr.tis_num = sh->tis->id;
791 sq_attr.state = MLX5_SQC_STATE_RST;
792 sq_attr.cqn = txq_obj->cq_devx->id;
793 sq_attr.flush_in_error_en = 1;
794 sq_attr.allow_multi_pkt_send_wqe = !!priv->config.mps;
795 sq_attr.allow_swp = !!priv->config.swp;
796 sq_attr.min_wqe_inline_mode = priv->config.hca_attr.vport_inline_mode;
797 sq_attr.wq_attr.uar_page = sh->tx_uar->page_id;
798 sq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
799 sq_attr.wq_attr.pd = sh->pdn;
800 sq_attr.wq_attr.log_wq_stride = rte_log2_u32(MLX5_WQE_SIZE);
801 sq_attr.wq_attr.log_wq_sz = txq_data->wqe_n;
802 sq_attr.wq_attr.dbr_umem_valid = 1;
803 sq_attr.wq_attr.dbr_addr = txq_obj->cq_dbrec_offset;
804 sq_attr.wq_attr.dbr_umem_id =
805 mlx5_os_get_umem_id(txq_obj->cq_dbrec_page->umem);
806 sq_attr.wq_attr.wq_umem_valid = 1;
807 sq_attr.wq_attr.wq_umem_id = txq_obj->sq_umem->umem_id;
808 sq_attr.wq_attr.wq_umem_offset = (uintptr_t)txq_obj->sq_buf % page_size;
809 txq_obj->sq_devx = mlx5_devx_cmd_create_sq(sh->ctx, &sq_attr);
810 if (!txq_obj->sq_devx) {
812 DRV_LOG(ERR, "port %u Tx queue %u SQ creation failure",
813 dev->data->port_id, idx);
816 txq_data->qp_num_8s = txq_obj->sq_devx->id << 8;
817 /* Change Send Queue state to Ready-to-Send. */
818 msq_attr.sq_state = MLX5_SQC_STATE_RST;
819 msq_attr.state = MLX5_SQC_STATE_RDY;
820 ret = mlx5_devx_cmd_modify_sq(txq_obj->sq_devx, &msq_attr);
824 "port %u Tx queue %u SP state to SQC_STATE_RDY failed",
825 dev->data->port_id, idx);
828 txq_data->fcqs = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
829 txq_data->cqe_s * sizeof(*txq_data->fcqs),
832 if (!txq_data->fcqs) {
833 DRV_LOG(ERR, "port %u Tx queue %u cannot allocate memory (FCQ)",
834 dev->data->port_id, idx);
838 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
840 * If using DevX need to query and store TIS transport domain value.
841 * This is done once per port.
842 * Will use this value on Rx, when creating matching TIR.
844 if (priv->config.devx && !priv->sh->tdn)
845 priv->sh->tdn = priv->sh->td->id;
847 MLX5_ASSERT(sh->tx_uar);
848 MLX5_ASSERT(sh->tx_uar->reg_addr);
849 txq_ctrl->bf_reg = sh->tx_uar->reg_addr;
850 txq_ctrl->uar_mmap_offset = sh->tx_uar->mmap_off;
851 rte_atomic32_set(&txq_obj->refcnt, 1);
852 txq_uar_init(txq_ctrl);
853 LIST_INSERT_HEAD(&priv->txqsobj, txq_obj, next);
856 ret = rte_errno; /* Save rte_errno before cleanup. */
857 txq_release_sq_resources(txq_obj);
858 if (txq_data->fcqs) {
859 mlx5_free(txq_data->fcqs);
860 txq_data->fcqs = NULL;
863 rte_errno = ret; /* Restore rte_errno. */
869 * Create the Tx queue Verbs object.
872 * Pointer to Ethernet device.
874 * Queue index in DPDK Tx queue array.
876 * Type of the Tx queue object to create.
879 * The Verbs object initialised, NULL otherwise and rte_errno is set.
881 struct mlx5_txq_obj *
882 mlx5_txq_obj_new(struct rte_eth_dev *dev, uint16_t idx,
883 enum mlx5_txq_obj_type type)
885 struct mlx5_priv *priv = dev->data->dev_private;
886 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
887 struct mlx5_txq_ctrl *txq_ctrl =
888 container_of(txq_data, struct mlx5_txq_ctrl, txq);
889 struct mlx5_txq_obj tmpl;
890 struct mlx5_txq_obj *txq_obj = NULL;
892 struct ibv_qp_init_attr_ex init;
893 struct ibv_cq_init_attr_ex cq;
894 struct ibv_qp_attr mod;
897 struct mlx5dv_qp qp = { .comp_mask = MLX5DV_QP_MASK_UAR_MMAP_OFFSET };
898 struct mlx5dv_cq cq_info;
899 struct mlx5dv_obj obj;
900 const int desc = 1 << txq_data->elts_n;
903 if (type == MLX5_TXQ_OBJ_TYPE_DEVX_HAIRPIN)
904 return mlx5_txq_obj_hairpin_new(dev, idx);
905 if (type == MLX5_TXQ_OBJ_TYPE_DEVX_SQ)
906 return mlx5_txq_obj_devx_new(dev, idx);
907 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
908 /* If using DevX, need additional mask to read tisn value. */
909 if (priv->config.devx && !priv->sh->tdn)
910 qp.comp_mask |= MLX5DV_QP_MASK_RAW_QP_HANDLES;
912 MLX5_ASSERT(txq_data);
913 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_TX_QUEUE;
914 priv->verbs_alloc_ctx.obj = txq_ctrl;
915 if (mlx5_getenv_int("MLX5_ENABLE_CQE_COMPRESSION")) {
917 "port %u MLX5_ENABLE_CQE_COMPRESSION must never be set",
922 memset(&tmpl, 0, sizeof(struct mlx5_txq_obj));
923 attr.cq = (struct ibv_cq_init_attr_ex){
926 cqe_n = desc / MLX5_TX_COMP_THRESH +
927 1 + MLX5_TX_COMP_THRESH_INLINE_DIV;
928 tmpl.cq = mlx5_glue->create_cq(priv->sh->ctx, cqe_n, NULL, NULL, 0);
929 if (tmpl.cq == NULL) {
930 DRV_LOG(ERR, "port %u Tx queue %u CQ creation failure",
931 dev->data->port_id, idx);
935 attr.init = (struct ibv_qp_init_attr_ex){
936 /* CQ to be associated with the send queue. */
938 /* CQ to be associated with the receive queue. */
941 /* Max number of outstanding WRs. */
943 ((priv->sh->device_attr.max_qp_wr <
945 priv->sh->device_attr.max_qp_wr :
948 * Max number of scatter/gather elements in a WR,
949 * must be 1 to prevent libmlx5 from trying to affect
950 * too much memory. TX gather is not impacted by the
951 * device_attr.max_sge limit and will still work
956 .qp_type = IBV_QPT_RAW_PACKET,
958 * Do *NOT* enable this, completions events are managed per
963 .comp_mask = IBV_QP_INIT_ATTR_PD,
965 if (txq_data->inlen_send)
966 attr.init.cap.max_inline_data = txq_ctrl->max_inline_data;
967 if (txq_data->tso_en) {
968 attr.init.max_tso_header = txq_ctrl->max_tso_header;
969 attr.init.comp_mask |= IBV_QP_INIT_ATTR_MAX_TSO_HEADER;
971 tmpl.qp = mlx5_glue->create_qp_ex(priv->sh->ctx, &attr.init);
972 if (tmpl.qp == NULL) {
973 DRV_LOG(ERR, "port %u Tx queue %u QP creation failure",
974 dev->data->port_id, idx);
978 attr.mod = (struct ibv_qp_attr){
979 /* Move the QP to this state. */
980 .qp_state = IBV_QPS_INIT,
981 /* IB device port number. */
982 .port_num = (uint8_t)priv->dev_port,
984 ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod,
985 (IBV_QP_STATE | IBV_QP_PORT));
988 "port %u Tx queue %u QP state to IBV_QPS_INIT failed",
989 dev->data->port_id, idx);
993 attr.mod = (struct ibv_qp_attr){
994 .qp_state = IBV_QPS_RTR
996 ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
999 "port %u Tx queue %u QP state to IBV_QPS_RTR failed",
1000 dev->data->port_id, idx);
1004 attr.mod.qp_state = IBV_QPS_RTS;
1005 ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
1008 "port %u Tx queue %u QP state to IBV_QPS_RTS failed",
1009 dev->data->port_id, idx);
1013 txq_obj = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
1014 sizeof(struct mlx5_txq_obj), 0,
1017 DRV_LOG(ERR, "port %u Tx queue %u cannot allocate memory",
1018 dev->data->port_id, idx);
1022 obj.cq.in = tmpl.cq;
1023 obj.cq.out = &cq_info;
1024 obj.qp.in = tmpl.qp;
1026 ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_QP);
1031 if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
1033 "port %u wrong MLX5_CQE_SIZE environment variable"
1034 " value: it should be set to %u",
1035 dev->data->port_id, RTE_CACHE_LINE_SIZE);
1039 txq_data->cqe_n = log2above(cq_info.cqe_cnt);
1040 txq_data->cqe_s = 1 << txq_data->cqe_n;
1041 txq_data->cqe_m = txq_data->cqe_s - 1;
1042 txq_data->qp_num_8s = tmpl.qp->qp_num << 8;
1043 txq_data->wqes = qp.sq.buf;
1044 txq_data->wqe_n = log2above(qp.sq.wqe_cnt);
1045 txq_data->wqe_s = 1 << txq_data->wqe_n;
1046 txq_data->wqe_m = txq_data->wqe_s - 1;
1047 txq_data->wqes_end = txq_data->wqes + txq_data->wqe_s;
1048 txq_data->qp_db = &qp.dbrec[MLX5_SND_DBR];
1049 txq_data->cq_db = cq_info.dbrec;
1050 txq_data->cqes = (volatile struct mlx5_cqe *)cq_info.buf;
1051 txq_data->cq_ci = 0;
1052 txq_data->cq_pi = 0;
1053 txq_data->wqe_ci = 0;
1054 txq_data->wqe_pi = 0;
1055 txq_data->wqe_comp = 0;
1056 txq_data->wqe_thres = txq_data->wqe_s / MLX5_TX_COMP_THRESH_INLINE_DIV;
1057 txq_data->fcqs = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
1058 txq_data->cqe_s * sizeof(*txq_data->fcqs),
1059 RTE_CACHE_LINE_SIZE, txq_ctrl->socket);
1060 if (!txq_data->fcqs) {
1061 DRV_LOG(ERR, "port %u Tx queue %u cannot allocate memory (FCQ)",
1062 dev->data->port_id, idx);
1066 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
1068 * If using DevX need to query and store TIS transport domain value.
1069 * This is done once per port.
1070 * Will use this value on Rx, when creating matching TIR.
1072 if (priv->config.devx && !priv->sh->tdn) {
1073 ret = mlx5_devx_cmd_qp_query_tis_td(tmpl.qp, qp.tisn,
1076 DRV_LOG(ERR, "Fail to query port %u Tx queue %u QP TIS "
1077 "transport domain", dev->data->port_id, idx);
1081 DRV_LOG(DEBUG, "port %u Tx queue %u TIS number %d "
1082 "transport domain %d", dev->data->port_id,
1083 idx, qp.tisn, priv->sh->tdn);
1087 txq_obj->qp = tmpl.qp;
1088 txq_obj->cq = tmpl.cq;
1089 rte_atomic32_inc(&txq_obj->refcnt);
1090 txq_ctrl->bf_reg = qp.bf.reg;
1091 if (qp.comp_mask & MLX5DV_QP_MASK_UAR_MMAP_OFFSET) {
1092 txq_ctrl->uar_mmap_offset = qp.uar_mmap_offset;
1093 DRV_LOG(DEBUG, "port %u: uar_mmap_offset 0x%"PRIx64,
1094 dev->data->port_id, txq_ctrl->uar_mmap_offset);
1097 "port %u failed to retrieve UAR info, invalid"
1099 dev->data->port_id);
1103 txq_uar_init(txq_ctrl);
1104 LIST_INSERT_HEAD(&priv->txqsobj, txq_obj, next);
1105 txq_obj->txq_ctrl = txq_ctrl;
1106 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
1109 ret = rte_errno; /* Save rte_errno before cleanup. */
1111 claim_zero(mlx5_glue->destroy_cq(tmpl.cq));
1113 claim_zero(mlx5_glue->destroy_qp(tmpl.qp));
1114 if (txq_data && txq_data->fcqs) {
1115 mlx5_free(txq_data->fcqs);
1116 txq_data->fcqs = NULL;
1120 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
1121 rte_errno = ret; /* Restore rte_errno. */
1126 * Get an Tx queue Verbs object.
1129 * Pointer to Ethernet device.
1131 * Queue index in DPDK Tx queue array.
1134 * The Verbs object if it exists.
1136 struct mlx5_txq_obj *
1137 mlx5_txq_obj_get(struct rte_eth_dev *dev, uint16_t idx)
1139 struct mlx5_priv *priv = dev->data->dev_private;
1140 struct mlx5_txq_ctrl *txq_ctrl;
1142 if (idx >= priv->txqs_n)
1144 if (!(*priv->txqs)[idx])
1146 txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
1148 rte_atomic32_inc(&txq_ctrl->obj->refcnt);
1149 return txq_ctrl->obj;
1153 * Release an Tx verbs queue object.
1156 * Verbs Tx queue object.
1159 * 1 while a reference on it exists, 0 when freed.
1162 mlx5_txq_obj_release(struct mlx5_txq_obj *txq_obj)
1164 MLX5_ASSERT(txq_obj);
1165 if (rte_atomic32_dec_and_test(&txq_obj->refcnt)) {
1166 if (txq_obj->type == MLX5_TXQ_OBJ_TYPE_DEVX_HAIRPIN) {
1168 claim_zero(mlx5_devx_cmd_destroy(txq_obj->tis));
1169 } else if (txq_obj->type == MLX5_TXQ_OBJ_TYPE_DEVX_SQ) {
1170 txq_release_sq_resources(txq_obj);
1172 claim_zero(mlx5_glue->destroy_qp(txq_obj->qp));
1173 claim_zero(mlx5_glue->destroy_cq(txq_obj->cq));
1175 if (txq_obj->txq_ctrl->txq.fcqs) {
1176 mlx5_free(txq_obj->txq_ctrl->txq.fcqs);
1177 txq_obj->txq_ctrl->txq.fcqs = NULL;
1179 LIST_REMOVE(txq_obj, next);
1187 * Verify the Verbs Tx queue list is empty
1190 * Pointer to Ethernet device.
1193 * The number of object not released.
1196 mlx5_txq_obj_verify(struct rte_eth_dev *dev)
1198 struct mlx5_priv *priv = dev->data->dev_private;
1200 struct mlx5_txq_obj *txq_obj;
1202 LIST_FOREACH(txq_obj, &priv->txqsobj, next) {
1203 DRV_LOG(DEBUG, "port %u Verbs Tx queue %u still referenced",
1204 dev->data->port_id, txq_obj->txq_ctrl->txq.idx);
1211 * Calculate the total number of WQEBB for Tx queue.
1213 * Simplified version of calc_sq_size() in rdma-core.
1216 * Pointer to Tx queue control structure.
1219 * The number of WQEBB.
1222 txq_calc_wqebb_cnt(struct mlx5_txq_ctrl *txq_ctrl)
1224 unsigned int wqe_size;
1225 const unsigned int desc = 1 << txq_ctrl->txq.elts_n;
1227 wqe_size = MLX5_WQE_CSEG_SIZE +
1228 MLX5_WQE_ESEG_SIZE +
1230 MLX5_ESEG_MIN_INLINE_SIZE +
1231 txq_ctrl->max_inline_data;
1232 return rte_align32pow2(wqe_size * desc) / MLX5_WQE_SIZE;
1236 * Calculate the maximal inline data size for Tx queue.
1239 * Pointer to Tx queue control structure.
1242 * The maximal inline data size.
1245 txq_calc_inline_max(struct mlx5_txq_ctrl *txq_ctrl)
1247 const unsigned int desc = 1 << txq_ctrl->txq.elts_n;
1248 struct mlx5_priv *priv = txq_ctrl->priv;
1249 unsigned int wqe_size;
1251 wqe_size = priv->sh->device_attr.max_qp_wr / desc;
1255 * This calculation is derived from tthe source of
1256 * mlx5_calc_send_wqe() in rdma_core library.
1258 wqe_size = wqe_size * MLX5_WQE_SIZE -
1259 MLX5_WQE_CSEG_SIZE -
1260 MLX5_WQE_ESEG_SIZE -
1263 MLX5_DSEG_MIN_INLINE_SIZE;
1268 * Set Tx queue parameters from device configuration.
1271 * Pointer to Tx queue control structure.
1274 txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
1276 struct mlx5_priv *priv = txq_ctrl->priv;
1277 struct mlx5_dev_config *config = &priv->config;
1278 unsigned int inlen_send; /* Inline data for ordinary SEND.*/
1279 unsigned int inlen_empw; /* Inline data for enhanced MPW. */
1280 unsigned int inlen_mode; /* Minimal required Inline data. */
1281 unsigned int txqs_inline; /* Min Tx queues to enable inline. */
1282 uint64_t dev_txoff = priv->dev_data->dev_conf.txmode.offloads;
1283 bool tso = txq_ctrl->txq.offloads & (DEV_TX_OFFLOAD_TCP_TSO |
1284 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1285 DEV_TX_OFFLOAD_GRE_TNL_TSO |
1286 DEV_TX_OFFLOAD_IP_TNL_TSO |
1287 DEV_TX_OFFLOAD_UDP_TNL_TSO);
1291 if (config->txqs_inline == MLX5_ARG_UNSET)
1293 #if defined(RTE_ARCH_ARM64)
1294 (priv->pci_dev->id.device_id ==
1295 PCI_DEVICE_ID_MELLANOX_CONNECTX5BF) ?
1296 MLX5_INLINE_MAX_TXQS_BLUEFIELD :
1298 MLX5_INLINE_MAX_TXQS;
1300 txqs_inline = (unsigned int)config->txqs_inline;
1301 inlen_send = (config->txq_inline_max == MLX5_ARG_UNSET) ?
1302 MLX5_SEND_DEF_INLINE_LEN :
1303 (unsigned int)config->txq_inline_max;
1304 inlen_empw = (config->txq_inline_mpw == MLX5_ARG_UNSET) ?
1305 MLX5_EMPW_DEF_INLINE_LEN :
1306 (unsigned int)config->txq_inline_mpw;
1307 inlen_mode = (config->txq_inline_min == MLX5_ARG_UNSET) ?
1308 0 : (unsigned int)config->txq_inline_min;
1309 if (config->mps != MLX5_MPW_ENHANCED && config->mps != MLX5_MPW)
1312 * If there is requested minimal amount of data to inline
1313 * we MUST enable inlining. This is a case for ConnectX-4
1314 * which usually requires L2 inlined for correct operating
1315 * and ConnectX-4 Lx which requires L2-L4 inlined to
1316 * support E-Switch Flows.
1319 if (inlen_mode <= MLX5_ESEG_MIN_INLINE_SIZE) {
1321 * Optimize minimal inlining for single
1322 * segment packets to fill one WQEBB
1325 temp = MLX5_ESEG_MIN_INLINE_SIZE;
1327 temp = inlen_mode - MLX5_ESEG_MIN_INLINE_SIZE;
1328 temp = RTE_ALIGN(temp, MLX5_WSEG_SIZE) +
1329 MLX5_ESEG_MIN_INLINE_SIZE;
1330 temp = RTE_MIN(temp, MLX5_SEND_MAX_INLINE_LEN);
1332 if (temp != inlen_mode) {
1334 "port %u minimal required inline setting"
1335 " aligned from %u to %u",
1336 PORT_ID(priv), inlen_mode, temp);
1341 * If port is configured to support VLAN insertion and device
1342 * does not support this feature by HW (for NICs before ConnectX-5
1343 * or in case of wqe_vlan_insert flag is not set) we must enable
1344 * data inline on all queues because it is supported by single
1347 txq_ctrl->txq.vlan_en = config->hw_vlan_insert;
1348 vlan_inline = (dev_txoff & DEV_TX_OFFLOAD_VLAN_INSERT) &&
1349 !config->hw_vlan_insert;
1351 * If there are few Tx queues it is prioritized
1352 * to save CPU cycles and disable data inlining at all.
1354 if (inlen_send && priv->txqs_n >= txqs_inline) {
1356 * The data sent with ordinal MLX5_OPCODE_SEND
1357 * may be inlined in Ethernet Segment, align the
1358 * length accordingly to fit entire WQEBBs.
1360 temp = RTE_MAX(inlen_send,
1361 MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE);
1362 temp -= MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE;
1363 temp = RTE_ALIGN(temp, MLX5_WQE_SIZE);
1364 temp += MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE;
1365 temp = RTE_MIN(temp, MLX5_WQE_SIZE_MAX +
1366 MLX5_ESEG_MIN_INLINE_SIZE -
1367 MLX5_WQE_CSEG_SIZE -
1368 MLX5_WQE_ESEG_SIZE -
1369 MLX5_WQE_DSEG_SIZE * 2);
1370 temp = RTE_MIN(temp, MLX5_SEND_MAX_INLINE_LEN);
1371 temp = RTE_MAX(temp, inlen_mode);
1372 if (temp != inlen_send) {
1374 "port %u ordinary send inline setting"
1375 " aligned from %u to %u",
1376 PORT_ID(priv), inlen_send, temp);
1380 * Not aligned to cache lines, but to WQEs.
1381 * First bytes of data (initial alignment)
1382 * is going to be copied explicitly at the
1383 * beginning of inlining buffer in Ethernet
1386 MLX5_ASSERT(inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
1387 MLX5_ASSERT(inlen_send <= MLX5_WQE_SIZE_MAX +
1388 MLX5_ESEG_MIN_INLINE_SIZE -
1389 MLX5_WQE_CSEG_SIZE -
1390 MLX5_WQE_ESEG_SIZE -
1391 MLX5_WQE_DSEG_SIZE * 2);
1392 } else if (inlen_mode) {
1394 * If minimal inlining is requested we must
1395 * enable inlining in general, despite the
1396 * number of configured queues. Ignore the
1397 * txq_inline_max devarg, this is not
1398 * full-featured inline.
1400 inlen_send = inlen_mode;
1402 } else if (vlan_inline) {
1404 * Hardware does not report offload for
1405 * VLAN insertion, we must enable data inline
1406 * to implement feature by software.
1408 inlen_send = MLX5_ESEG_MIN_INLINE_SIZE;
1414 txq_ctrl->txq.inlen_send = inlen_send;
1415 txq_ctrl->txq.inlen_mode = inlen_mode;
1416 txq_ctrl->txq.inlen_empw = 0;
1417 if (inlen_send && inlen_empw && priv->txqs_n >= txqs_inline) {
1419 * The data sent with MLX5_OPCODE_ENHANCED_MPSW
1420 * may be inlined in Data Segment, align the
1421 * length accordingly to fit entire WQEBBs.
1423 temp = RTE_MAX(inlen_empw,
1424 MLX5_WQE_SIZE + MLX5_DSEG_MIN_INLINE_SIZE);
1425 temp -= MLX5_DSEG_MIN_INLINE_SIZE;
1426 temp = RTE_ALIGN(temp, MLX5_WQE_SIZE);
1427 temp += MLX5_DSEG_MIN_INLINE_SIZE;
1428 temp = RTE_MIN(temp, MLX5_WQE_SIZE_MAX +
1429 MLX5_DSEG_MIN_INLINE_SIZE -
1430 MLX5_WQE_CSEG_SIZE -
1431 MLX5_WQE_ESEG_SIZE -
1432 MLX5_WQE_DSEG_SIZE);
1433 temp = RTE_MIN(temp, MLX5_EMPW_MAX_INLINE_LEN);
1434 if (temp != inlen_empw) {
1436 "port %u enhanced empw inline setting"
1437 " aligned from %u to %u",
1438 PORT_ID(priv), inlen_empw, temp);
1441 MLX5_ASSERT(inlen_empw >= MLX5_ESEG_MIN_INLINE_SIZE);
1442 MLX5_ASSERT(inlen_empw <= MLX5_WQE_SIZE_MAX +
1443 MLX5_DSEG_MIN_INLINE_SIZE -
1444 MLX5_WQE_CSEG_SIZE -
1445 MLX5_WQE_ESEG_SIZE -
1446 MLX5_WQE_DSEG_SIZE);
1447 txq_ctrl->txq.inlen_empw = inlen_empw;
1449 txq_ctrl->max_inline_data = RTE_MAX(inlen_send, inlen_empw);
1451 txq_ctrl->max_tso_header = MLX5_MAX_TSO_HEADER;
1452 txq_ctrl->max_inline_data = RTE_MAX(txq_ctrl->max_inline_data,
1453 MLX5_MAX_TSO_HEADER);
1454 txq_ctrl->txq.tso_en = 1;
1456 txq_ctrl->txq.tunnel_en = config->tunnel_en | config->swp;
1457 txq_ctrl->txq.swp_en = ((DEV_TX_OFFLOAD_IP_TNL_TSO |
1458 DEV_TX_OFFLOAD_UDP_TNL_TSO |
1459 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) &
1460 txq_ctrl->txq.offloads) && config->swp;
1464 * Adjust Tx queue data inline parameters for large queue sizes.
1465 * The data inline feature requires multiple WQEs to fit the packets,
1466 * and if the large amount of Tx descriptors is requested by application
1467 * the total WQE amount may exceed the hardware capabilities. If the
1468 * default inline setting are used we can try to adjust these ones and
1469 * meet the hardware requirements and not exceed the queue size.
1472 * Pointer to Tx queue control structure.
1475 * Zero on success, otherwise the parameters can not be adjusted.
1478 txq_adjust_params(struct mlx5_txq_ctrl *txq_ctrl)
1480 struct mlx5_priv *priv = txq_ctrl->priv;
1481 struct mlx5_dev_config *config = &priv->config;
1482 unsigned int max_inline;
1484 max_inline = txq_calc_inline_max(txq_ctrl);
1485 if (!txq_ctrl->txq.inlen_send) {
1487 * Inline data feature is not engaged at all.
1488 * There is nothing to adjust.
1492 if (txq_ctrl->max_inline_data <= max_inline) {
1494 * The requested inline data length does not
1495 * exceed queue capabilities.
1499 if (txq_ctrl->txq.inlen_mode > max_inline) {
1501 "minimal data inline requirements (%u) are not"
1502 " satisfied (%u) on port %u, try the smaller"
1503 " Tx queue size (%d)",
1504 txq_ctrl->txq.inlen_mode, max_inline,
1505 priv->dev_data->port_id,
1506 priv->sh->device_attr.max_qp_wr);
1509 if (txq_ctrl->txq.inlen_send > max_inline &&
1510 config->txq_inline_max != MLX5_ARG_UNSET &&
1511 config->txq_inline_max > (int)max_inline) {
1513 "txq_inline_max requirements (%u) are not"
1514 " satisfied (%u) on port %u, try the smaller"
1515 " Tx queue size (%d)",
1516 txq_ctrl->txq.inlen_send, max_inline,
1517 priv->dev_data->port_id,
1518 priv->sh->device_attr.max_qp_wr);
1521 if (txq_ctrl->txq.inlen_empw > max_inline &&
1522 config->txq_inline_mpw != MLX5_ARG_UNSET &&
1523 config->txq_inline_mpw > (int)max_inline) {
1525 "txq_inline_mpw requirements (%u) are not"
1526 " satisfied (%u) on port %u, try the smaller"
1527 " Tx queue size (%d)",
1528 txq_ctrl->txq.inlen_empw, max_inline,
1529 priv->dev_data->port_id,
1530 priv->sh->device_attr.max_qp_wr);
1533 if (txq_ctrl->txq.tso_en && max_inline < MLX5_MAX_TSO_HEADER) {
1535 "tso header inline requirements (%u) are not"
1536 " satisfied (%u) on port %u, try the smaller"
1537 " Tx queue size (%d)",
1538 MLX5_MAX_TSO_HEADER, max_inline,
1539 priv->dev_data->port_id,
1540 priv->sh->device_attr.max_qp_wr);
1543 if (txq_ctrl->txq.inlen_send > max_inline) {
1545 "adjust txq_inline_max (%u->%u)"
1546 " due to large Tx queue on port %u",
1547 txq_ctrl->txq.inlen_send, max_inline,
1548 priv->dev_data->port_id);
1549 txq_ctrl->txq.inlen_send = max_inline;
1551 if (txq_ctrl->txq.inlen_empw > max_inline) {
1553 "adjust txq_inline_mpw (%u->%u)"
1554 "due to large Tx queue on port %u",
1555 txq_ctrl->txq.inlen_empw, max_inline,
1556 priv->dev_data->port_id);
1557 txq_ctrl->txq.inlen_empw = max_inline;
1559 txq_ctrl->max_inline_data = RTE_MAX(txq_ctrl->txq.inlen_send,
1560 txq_ctrl->txq.inlen_empw);
1561 MLX5_ASSERT(txq_ctrl->max_inline_data <= max_inline);
1562 MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= max_inline);
1563 MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= txq_ctrl->txq.inlen_send);
1564 MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= txq_ctrl->txq.inlen_empw ||
1565 !txq_ctrl->txq.inlen_empw);
1573 * Create a DPDK Tx queue.
1576 * Pointer to Ethernet device.
1580 * Number of descriptors to configure in queue.
1582 * NUMA socket on which memory must be allocated.
1584 * Thresholds parameters.
1587 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
1589 struct mlx5_txq_ctrl *
1590 mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1591 unsigned int socket, const struct rte_eth_txconf *conf)
1593 struct mlx5_priv *priv = dev->data->dev_private;
1594 struct mlx5_txq_ctrl *tmpl;
1596 tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl) +
1597 desc * sizeof(struct rte_mbuf *), 0, socket);
1602 if (mlx5_mr_btree_init(&tmpl->txq.mr_ctrl.cache_bh,
1603 MLX5_MR_BTREE_CACHE_N, socket)) {
1604 /* rte_errno is already set. */
1607 /* Save pointer of global generation number to check memory event. */
1608 tmpl->txq.mr_ctrl.dev_gen_ptr = &priv->sh->share_cache.dev_gen;
1609 MLX5_ASSERT(desc > MLX5_TX_COMP_THRESH);
1610 tmpl->txq.offloads = conf->offloads |
1611 dev->data->dev_conf.txmode.offloads;
1613 tmpl->socket = socket;
1614 tmpl->txq.elts_n = log2above(desc);
1615 tmpl->txq.elts_s = desc;
1616 tmpl->txq.elts_m = desc - 1;
1617 tmpl->txq.port_id = dev->data->port_id;
1618 tmpl->txq.idx = idx;
1619 txq_set_params(tmpl);
1620 if (txq_adjust_params(tmpl))
1622 if (txq_calc_wqebb_cnt(tmpl) >
1623 priv->sh->device_attr.max_qp_wr) {
1625 "port %u Tx WQEBB count (%d) exceeds the limit (%d),"
1626 " try smaller queue size",
1627 dev->data->port_id, txq_calc_wqebb_cnt(tmpl),
1628 priv->sh->device_attr.max_qp_wr);
1632 rte_atomic32_inc(&tmpl->refcnt);
1633 tmpl->type = MLX5_TXQ_TYPE_STANDARD;
1634 LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
1642 * Create a DPDK Tx hairpin queue.
1645 * Pointer to Ethernet device.
1649 * Number of descriptors to configure in queue.
1650 * @param hairpin_conf
1651 * The hairpin configuration.
1654 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
1656 struct mlx5_txq_ctrl *
1657 mlx5_txq_hairpin_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1658 const struct rte_eth_hairpin_conf *hairpin_conf)
1660 struct mlx5_priv *priv = dev->data->dev_private;
1661 struct mlx5_txq_ctrl *tmpl;
1663 tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
1670 tmpl->socket = SOCKET_ID_ANY;
1671 tmpl->txq.elts_n = log2above(desc);
1672 tmpl->txq.port_id = dev->data->port_id;
1673 tmpl->txq.idx = idx;
1674 tmpl->hairpin_conf = *hairpin_conf;
1675 tmpl->type = MLX5_TXQ_TYPE_HAIRPIN;
1676 rte_atomic32_inc(&tmpl->refcnt);
1677 LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
1685 * Pointer to Ethernet device.
1690 * A pointer to the queue if it exists.
1692 struct mlx5_txq_ctrl *
1693 mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx)
1695 struct mlx5_priv *priv = dev->data->dev_private;
1696 struct mlx5_txq_ctrl *ctrl = NULL;
1698 if ((*priv->txqs)[idx]) {
1699 ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl,
1701 mlx5_txq_obj_get(dev, idx);
1702 rte_atomic32_inc(&ctrl->refcnt);
1708 * Release a Tx queue.
1711 * Pointer to Ethernet device.
1716 * 1 while a reference on it exists, 0 when freed.
1719 mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx)
1721 struct mlx5_priv *priv = dev->data->dev_private;
1722 struct mlx5_txq_ctrl *txq;
1724 if (!(*priv->txqs)[idx])
1726 txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
1727 if (txq->obj && !mlx5_txq_obj_release(txq->obj))
1729 if (rte_atomic32_dec_and_test(&txq->refcnt)) {
1731 mlx5_mr_btree_free(&txq->txq.mr_ctrl.cache_bh);
1732 LIST_REMOVE(txq, next);
1734 (*priv->txqs)[idx] = NULL;
1741 * Verify if the queue can be released.
1744 * Pointer to Ethernet device.
1749 * 1 if the queue can be released.
1752 mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx)
1754 struct mlx5_priv *priv = dev->data->dev_private;
1755 struct mlx5_txq_ctrl *txq;
1757 if (!(*priv->txqs)[idx])
1759 txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
1760 return (rte_atomic32_read(&txq->refcnt) == 1);
1764 * Verify the Tx Queue list is empty
1767 * Pointer to Ethernet device.
1770 * The number of object not released.
1773 mlx5_txq_verify(struct rte_eth_dev *dev)
1775 struct mlx5_priv *priv = dev->data->dev_private;
1776 struct mlx5_txq_ctrl *txq_ctrl;
1779 LIST_FOREACH(txq_ctrl, &priv->txqsctrl, next) {
1780 DRV_LOG(DEBUG, "port %u Tx queue %u still referenced",
1781 dev->data->port_id, txq_ctrl->txq.idx);
1788 * Set the Tx queue dynamic timestamp (mask and offset)
1791 * Pointer to the Ethernet device structure.
1794 mlx5_txq_dynf_timestamp_set(struct rte_eth_dev *dev)
1796 struct mlx5_priv *priv = dev->data->dev_private;
1797 struct mlx5_dev_ctx_shared *sh = priv->sh;
1798 struct mlx5_txq_data *data;
1803 nbit = rte_mbuf_dynflag_lookup
1804 (RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME, NULL);
1805 off = rte_mbuf_dynfield_lookup
1806 (RTE_MBUF_DYNFIELD_TIMESTAMP_NAME, NULL);
1807 if (nbit > 0 && off >= 0 && sh->txpp.refcnt)
1808 mask = 1ULL << nbit;
1809 for (i = 0; i != priv->txqs_n; ++i) {
1810 data = (*priv->txqs)[i];
1814 data->ts_mask = mask;
1815 data->ts_offset = off;