1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
15 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
17 #pragma GCC diagnostic ignored "-Wpedantic"
19 #include <infiniband/verbs.h>
20 #include <infiniband/mlx5dv.h>
22 #pragma GCC diagnostic error "-Wpedantic"
26 #include <rte_malloc.h>
27 #include <rte_ethdev_driver.h>
28 #include <rte_common.h>
30 #include <mlx5_glue.h>
31 #include <mlx5_devx_cmds.h>
32 #include <mlx5_common.h>
34 #include "mlx5_defs.h"
35 #include "mlx5_utils.h"
37 #include "mlx5_rxtx.h"
38 #include "mlx5_autoconf.h"
41 * Allocate TX queue elements.
44 * Pointer to TX queue structure.
47 txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl)
49 const unsigned int elts_n = 1 << txq_ctrl->txq.elts_n;
52 for (i = 0; (i != elts_n); ++i)
53 txq_ctrl->txq.elts[i] = NULL;
54 DRV_LOG(DEBUG, "port %u Tx queue %u allocated and configured %u WRs",
55 PORT_ID(txq_ctrl->priv), txq_ctrl->txq.idx, elts_n);
56 txq_ctrl->txq.elts_head = 0;
57 txq_ctrl->txq.elts_tail = 0;
58 txq_ctrl->txq.elts_comp = 0;
62 * Free TX queue elements.
65 * Pointer to TX queue structure.
68 txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl)
70 const uint16_t elts_n = 1 << txq_ctrl->txq.elts_n;
71 const uint16_t elts_m = elts_n - 1;
72 uint16_t elts_head = txq_ctrl->txq.elts_head;
73 uint16_t elts_tail = txq_ctrl->txq.elts_tail;
74 struct rte_mbuf *(*elts)[elts_n] = &txq_ctrl->txq.elts;
76 DRV_LOG(DEBUG, "port %u Tx queue %u freeing WRs",
77 PORT_ID(txq_ctrl->priv), txq_ctrl->txq.idx);
78 txq_ctrl->txq.elts_head = 0;
79 txq_ctrl->txq.elts_tail = 0;
80 txq_ctrl->txq.elts_comp = 0;
82 while (elts_tail != elts_head) {
83 struct rte_mbuf *elt = (*elts)[elts_tail & elts_m];
85 MLX5_ASSERT(elt != NULL);
86 rte_pktmbuf_free_seg(elt);
87 #ifdef RTE_LIBRTE_MLX5_DEBUG
89 memset(&(*elts)[elts_tail & elts_m],
91 sizeof((*elts)[elts_tail & elts_m]));
98 * Returns the per-port supported offloads.
101 * Pointer to Ethernet device.
104 * Supported Tx offloads.
107 mlx5_get_tx_port_offloads(struct rte_eth_dev *dev)
109 struct mlx5_priv *priv = dev->data->dev_private;
110 uint64_t offloads = (DEV_TX_OFFLOAD_MULTI_SEGS |
111 DEV_TX_OFFLOAD_VLAN_INSERT);
112 struct mlx5_dev_config *config = &priv->config;
115 offloads |= (DEV_TX_OFFLOAD_IPV4_CKSUM |
116 DEV_TX_OFFLOAD_UDP_CKSUM |
117 DEV_TX_OFFLOAD_TCP_CKSUM);
119 offloads |= DEV_TX_OFFLOAD_TCP_TSO;
122 offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
124 offloads |= (DEV_TX_OFFLOAD_IP_TNL_TSO |
125 DEV_TX_OFFLOAD_UDP_TNL_TSO);
127 if (config->tunnel_en) {
129 offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
131 offloads |= (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
132 DEV_TX_OFFLOAD_GRE_TNL_TSO |
133 DEV_TX_OFFLOAD_GENEVE_TNL_TSO);
139 * Tx queue presetup checks.
142 * Pointer to Ethernet device structure.
146 * Number of descriptors to configure in queue.
149 * 0 on success, a negative errno value otherwise and rte_errno is set.
152 mlx5_tx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc)
154 struct mlx5_priv *priv = dev->data->dev_private;
156 if (desc <= MLX5_TX_COMP_THRESH) {
158 "port %u number of descriptors requested for Tx queue"
159 " %u must be higher than MLX5_TX_COMP_THRESH, using %u"
161 dev->data->port_id, idx, MLX5_TX_COMP_THRESH + 1, desc);
162 desc = MLX5_TX_COMP_THRESH + 1;
164 if (!rte_is_power_of_2(desc)) {
165 desc = 1 << log2above(desc);
167 "port %u increased number of descriptors in Tx queue"
168 " %u to the next power of two (%d)",
169 dev->data->port_id, idx, desc);
171 DRV_LOG(DEBUG, "port %u configuring queue %u for %u descriptors",
172 dev->data->port_id, idx, desc);
173 if (idx >= priv->txqs_n) {
174 DRV_LOG(ERR, "port %u Tx queue index out of range (%u >= %u)",
175 dev->data->port_id, idx, priv->txqs_n);
176 rte_errno = EOVERFLOW;
179 if (!mlx5_txq_releasable(dev, idx)) {
181 DRV_LOG(ERR, "port %u unable to release queue index %u",
182 dev->data->port_id, idx);
185 mlx5_txq_release(dev, idx);
189 * DPDK callback to configure a TX queue.
192 * Pointer to Ethernet device structure.
196 * Number of descriptors to configure in queue.
198 * NUMA socket on which memory must be allocated.
200 * Thresholds parameters.
203 * 0 on success, a negative errno value otherwise and rte_errno is set.
206 mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
207 unsigned int socket, const struct rte_eth_txconf *conf)
209 struct mlx5_priv *priv = dev->data->dev_private;
210 struct mlx5_txq_data *txq = (*priv->txqs)[idx];
211 struct mlx5_txq_ctrl *txq_ctrl =
212 container_of(txq, struct mlx5_txq_ctrl, txq);
215 res = mlx5_tx_queue_pre_setup(dev, idx, desc);
218 txq_ctrl = mlx5_txq_new(dev, idx, desc, socket, conf);
220 DRV_LOG(ERR, "port %u unable to allocate queue index %u",
221 dev->data->port_id, idx);
224 DRV_LOG(DEBUG, "port %u adding Tx queue %u to list",
225 dev->data->port_id, idx);
226 (*priv->txqs)[idx] = &txq_ctrl->txq;
231 * DPDK callback to configure a TX hairpin queue.
234 * Pointer to Ethernet device structure.
238 * Number of descriptors to configure in queue.
239 * @param[in] hairpin_conf
240 * The hairpin binding configuration.
243 * 0 on success, a negative errno value otherwise and rte_errno is set.
246 mlx5_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
248 const struct rte_eth_hairpin_conf *hairpin_conf)
250 struct mlx5_priv *priv = dev->data->dev_private;
251 struct mlx5_txq_data *txq = (*priv->txqs)[idx];
252 struct mlx5_txq_ctrl *txq_ctrl =
253 container_of(txq, struct mlx5_txq_ctrl, txq);
256 res = mlx5_tx_queue_pre_setup(dev, idx, desc);
259 if (hairpin_conf->peer_count != 1 ||
260 hairpin_conf->peers[0].port != dev->data->port_id ||
261 hairpin_conf->peers[0].queue >= priv->rxqs_n) {
262 DRV_LOG(ERR, "port %u unable to setup hairpin queue index %u "
263 " invalid hairpind configuration", dev->data->port_id,
268 txq_ctrl = mlx5_txq_hairpin_new(dev, idx, desc, hairpin_conf);
270 DRV_LOG(ERR, "port %u unable to allocate queue index %u",
271 dev->data->port_id, idx);
274 DRV_LOG(DEBUG, "port %u adding Tx queue %u to list",
275 dev->data->port_id, idx);
276 (*priv->txqs)[idx] = &txq_ctrl->txq;
281 * DPDK callback to release a TX queue.
284 * Generic TX queue pointer.
287 mlx5_tx_queue_release(void *dpdk_txq)
289 struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
290 struct mlx5_txq_ctrl *txq_ctrl;
291 struct mlx5_priv *priv;
296 txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
297 priv = txq_ctrl->priv;
298 for (i = 0; (i != priv->txqs_n); ++i)
299 if ((*priv->txqs)[i] == txq) {
300 mlx5_txq_release(ETH_DEV(priv), i);
301 DRV_LOG(DEBUG, "port %u removing Tx queue %u from list",
302 PORT_ID(priv), txq->idx);
308 * Configure the doorbell register non-cached attribute.
311 * Pointer to Tx queue control structure.
316 txq_uar_ncattr_init(struct mlx5_txq_ctrl *txq_ctrl, size_t page_size)
318 struct mlx5_priv *priv = txq_ctrl->priv;
321 txq_ctrl->txq.db_heu = priv->config.dbnc == MLX5_TXDB_HEURISTIC;
322 txq_ctrl->txq.db_nc = 0;
323 /* Check the doorbell register mapping type. */
324 cmd = txq_ctrl->uar_mmap_offset / page_size;
325 cmd >>= MLX5_UAR_MMAP_CMD_SHIFT;
326 cmd &= MLX5_UAR_MMAP_CMD_MASK;
327 if (cmd == MLX5_MMAP_GET_NC_PAGES_CMD)
328 txq_ctrl->txq.db_nc = 1;
332 * Initialize Tx UAR registers for primary process.
335 * Pointer to Tx queue control structure.
338 txq_uar_init(struct mlx5_txq_ctrl *txq_ctrl)
340 struct mlx5_priv *priv = txq_ctrl->priv;
341 struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(priv));
342 const size_t page_size = sysconf(_SC_PAGESIZE);
344 unsigned int lock_idx;
347 if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
349 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
351 ppriv->uar_table[txq_ctrl->txq.idx] = txq_ctrl->bf_reg;
352 txq_uar_ncattr_init(txq_ctrl, page_size);
354 /* Assign an UAR lock according to UAR page number */
355 lock_idx = (txq_ctrl->uar_mmap_offset / page_size) &
356 MLX5_UAR_PAGE_NUM_MASK;
357 txq_ctrl->txq.uar_lock = &priv->uar_lock[lock_idx];
362 * Remap UAR register of a Tx queue for secondary process.
364 * Remapped address is stored at the table in the process private structure of
365 * the device, indexed by queue index.
368 * Pointer to Tx queue control structure.
370 * Verbs file descriptor to map UAR pages.
373 * 0 on success, a negative errno value otherwise and rte_errno is set.
376 txq_uar_init_secondary(struct mlx5_txq_ctrl *txq_ctrl, int fd)
378 struct mlx5_priv *priv = txq_ctrl->priv;
379 struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(priv));
380 struct mlx5_txq_data *txq = &txq_ctrl->txq;
384 const size_t page_size = sysconf(_SC_PAGESIZE);
386 if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
390 * As rdma-core, UARs are mapped in size of OS page
391 * size. Ref to libmlx5 function: mlx5_init_context()
393 uar_va = (uintptr_t)txq_ctrl->bf_reg;
394 offset = uar_va & (page_size - 1); /* Offset in page. */
395 addr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, fd,
396 txq_ctrl->uar_mmap_offset);
397 if (addr == MAP_FAILED) {
399 "port %u mmap failed for BF reg of txq %u",
400 txq->port_id, txq->idx);
404 addr = RTE_PTR_ADD(addr, offset);
405 ppriv->uar_table[txq->idx] = addr;
406 txq_uar_ncattr_init(txq_ctrl, page_size);
411 * Unmap UAR register of a Tx queue for secondary process.
414 * Pointer to Tx queue control structure.
417 txq_uar_uninit_secondary(struct mlx5_txq_ctrl *txq_ctrl)
419 struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(txq_ctrl->priv));
420 const size_t page_size = sysconf(_SC_PAGESIZE);
423 if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
425 addr = ppriv->uar_table[txq_ctrl->txq.idx];
426 munmap(RTE_PTR_ALIGN_FLOOR(addr, page_size), page_size);
430 * Initialize Tx UAR registers for secondary process.
433 * Pointer to Ethernet device.
435 * Verbs file descriptor to map UAR pages.
438 * 0 on success, a negative errno value otherwise and rte_errno is set.
441 mlx5_tx_uar_init_secondary(struct rte_eth_dev *dev, int fd)
443 struct mlx5_priv *priv = dev->data->dev_private;
444 struct mlx5_txq_data *txq;
445 struct mlx5_txq_ctrl *txq_ctrl;
449 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
450 for (i = 0; i != priv->txqs_n; ++i) {
451 if (!(*priv->txqs)[i])
453 txq = (*priv->txqs)[i];
454 txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
455 if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
457 MLX5_ASSERT(txq->idx == (uint16_t)i);
458 ret = txq_uar_init_secondary(txq_ctrl, fd);
466 if (!(*priv->txqs)[i])
468 txq = (*priv->txqs)[i];
469 txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
470 txq_uar_uninit_secondary(txq_ctrl);
476 * Create the Tx hairpin queue object.
479 * Pointer to Ethernet device.
481 * Queue index in DPDK Tx queue array
484 * The hairpin DevX object initialised, NULL otherwise and rte_errno is set.
486 static struct mlx5_txq_obj *
487 mlx5_txq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
489 struct mlx5_priv *priv = dev->data->dev_private;
490 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
491 struct mlx5_txq_ctrl *txq_ctrl =
492 container_of(txq_data, struct mlx5_txq_ctrl, txq);
493 struct mlx5_devx_create_sq_attr attr = { 0 };
494 struct mlx5_txq_obj *tmpl = NULL;
496 uint32_t max_wq_data;
498 MLX5_ASSERT(txq_data);
499 MLX5_ASSERT(!txq_ctrl->obj);
500 tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0,
504 "port %u Tx queue %u cannot allocate memory resources",
505 dev->data->port_id, txq_data->idx);
509 tmpl->type = MLX5_TXQ_OBJ_TYPE_DEVX_HAIRPIN;
510 tmpl->txq_ctrl = txq_ctrl;
513 max_wq_data = priv->config.hca_attr.log_max_hairpin_wq_data_sz;
514 /* Jumbo frames > 9KB should be supported, and more packets. */
515 if (priv->config.log_hp_size != (uint32_t)MLX5_ARG_UNSET) {
516 if (priv->config.log_hp_size > max_wq_data) {
517 DRV_LOG(ERR, "total data size %u power of 2 is "
518 "too large for hairpin",
519 priv->config.log_hp_size);
523 attr.wq_attr.log_hairpin_data_sz = priv->config.log_hp_size;
525 attr.wq_attr.log_hairpin_data_sz =
526 (max_wq_data < MLX5_HAIRPIN_JUMBO_LOG_SIZE) ?
527 max_wq_data : MLX5_HAIRPIN_JUMBO_LOG_SIZE;
529 /* Set the packets number to the maximum value for performance. */
530 attr.wq_attr.log_hairpin_num_packets =
531 attr.wq_attr.log_hairpin_data_sz -
532 MLX5_HAIRPIN_QUEUE_STRIDE;
533 attr.tis_num = priv->sh->tis->id;
534 tmpl->sq = mlx5_devx_cmd_create_sq(priv->sh->ctx, &attr);
537 "port %u tx hairpin queue %u can't create sq object",
538 dev->data->port_id, idx);
542 DRV_LOG(DEBUG, "port %u sxq %u updated with %p", dev->data->port_id,
544 rte_atomic32_inc(&tmpl->refcnt);
545 LIST_INSERT_HEAD(&priv->txqsobj, tmpl, next);
548 ret = rte_errno; /* Save rte_errno before cleanup. */
550 mlx5_devx_cmd_destroy(tmpl->tis);
552 mlx5_devx_cmd_destroy(tmpl->sq);
553 rte_errno = ret; /* Restore rte_errno. */
558 * Create the Tx queue Verbs object.
561 * Pointer to Ethernet device.
563 * Queue index in DPDK Tx queue array.
565 * Type of the Tx queue object to create.
568 * The Verbs object initialised, NULL otherwise and rte_errno is set.
570 struct mlx5_txq_obj *
571 mlx5_txq_obj_new(struct rte_eth_dev *dev, uint16_t idx,
572 enum mlx5_txq_obj_type type)
574 struct mlx5_priv *priv = dev->data->dev_private;
575 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
576 struct mlx5_txq_ctrl *txq_ctrl =
577 container_of(txq_data, struct mlx5_txq_ctrl, txq);
578 struct mlx5_txq_obj tmpl;
579 struct mlx5_txq_obj *txq_obj = NULL;
581 struct ibv_qp_init_attr_ex init;
582 struct ibv_cq_init_attr_ex cq;
583 struct ibv_qp_attr mod;
586 struct mlx5dv_qp qp = { .comp_mask = MLX5DV_QP_MASK_UAR_MMAP_OFFSET };
587 struct mlx5dv_cq cq_info;
588 struct mlx5dv_obj obj;
589 const int desc = 1 << txq_data->elts_n;
592 if (type == MLX5_TXQ_OBJ_TYPE_DEVX_HAIRPIN)
593 return mlx5_txq_obj_hairpin_new(dev, idx);
594 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
595 /* If using DevX, need additional mask to read tisn value. */
596 if (priv->config.devx && !priv->sh->tdn)
597 qp.comp_mask |= MLX5DV_QP_MASK_RAW_QP_HANDLES;
599 MLX5_ASSERT(txq_data);
600 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_TX_QUEUE;
601 priv->verbs_alloc_ctx.obj = txq_ctrl;
602 if (mlx5_getenv_int("MLX5_ENABLE_CQE_COMPRESSION")) {
604 "port %u MLX5_ENABLE_CQE_COMPRESSION must never be set",
609 memset(&tmpl, 0, sizeof(struct mlx5_txq_obj));
610 attr.cq = (struct ibv_cq_init_attr_ex){
613 cqe_n = desc / MLX5_TX_COMP_THRESH +
614 1 + MLX5_TX_COMP_THRESH_INLINE_DIV;
615 tmpl.cq = mlx5_glue->create_cq(priv->sh->ctx, cqe_n, NULL, NULL, 0);
616 if (tmpl.cq == NULL) {
617 DRV_LOG(ERR, "port %u Tx queue %u CQ creation failure",
618 dev->data->port_id, idx);
622 attr.init = (struct ibv_qp_init_attr_ex){
623 /* CQ to be associated with the send queue. */
625 /* CQ to be associated with the receive queue. */
628 /* Max number of outstanding WRs. */
630 ((priv->sh->device_attr.orig_attr.max_qp_wr <
632 priv->sh->device_attr.orig_attr.max_qp_wr :
635 * Max number of scatter/gather elements in a WR,
636 * must be 1 to prevent libmlx5 from trying to affect
637 * too much memory. TX gather is not impacted by the
638 * device_attr.max_sge limit and will still work
643 .qp_type = IBV_QPT_RAW_PACKET,
645 * Do *NOT* enable this, completions events are managed per
650 .comp_mask = IBV_QP_INIT_ATTR_PD,
652 if (txq_data->inlen_send)
653 attr.init.cap.max_inline_data = txq_ctrl->max_inline_data;
654 if (txq_data->tso_en) {
655 attr.init.max_tso_header = txq_ctrl->max_tso_header;
656 attr.init.comp_mask |= IBV_QP_INIT_ATTR_MAX_TSO_HEADER;
658 tmpl.qp = mlx5_glue->create_qp_ex(priv->sh->ctx, &attr.init);
659 if (tmpl.qp == NULL) {
660 DRV_LOG(ERR, "port %u Tx queue %u QP creation failure",
661 dev->data->port_id, idx);
665 attr.mod = (struct ibv_qp_attr){
666 /* Move the QP to this state. */
667 .qp_state = IBV_QPS_INIT,
668 /* IB device port number. */
669 .port_num = (uint8_t)priv->ibv_port,
671 ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod,
672 (IBV_QP_STATE | IBV_QP_PORT));
675 "port %u Tx queue %u QP state to IBV_QPS_INIT failed",
676 dev->data->port_id, idx);
680 attr.mod = (struct ibv_qp_attr){
681 .qp_state = IBV_QPS_RTR
683 ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
686 "port %u Tx queue %u QP state to IBV_QPS_RTR failed",
687 dev->data->port_id, idx);
691 attr.mod.qp_state = IBV_QPS_RTS;
692 ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
695 "port %u Tx queue %u QP state to IBV_QPS_RTS failed",
696 dev->data->port_id, idx);
700 txq_obj = rte_calloc_socket(__func__, 1, sizeof(struct mlx5_txq_obj), 0,
703 DRV_LOG(ERR, "port %u Tx queue %u cannot allocate memory",
704 dev->data->port_id, idx);
709 obj.cq.out = &cq_info;
712 ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_QP);
717 if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
719 "port %u wrong MLX5_CQE_SIZE environment variable"
720 " value: it should be set to %u",
721 dev->data->port_id, RTE_CACHE_LINE_SIZE);
725 txq_data->cqe_n = log2above(cq_info.cqe_cnt);
726 txq_data->cqe_s = 1 << txq_data->cqe_n;
727 txq_data->cqe_m = txq_data->cqe_s - 1;
728 txq_data->qp_num_8s = tmpl.qp->qp_num << 8;
729 txq_data->wqes = qp.sq.buf;
730 txq_data->wqe_n = log2above(qp.sq.wqe_cnt);
731 txq_data->wqe_s = 1 << txq_data->wqe_n;
732 txq_data->wqe_m = txq_data->wqe_s - 1;
733 txq_data->wqes_end = txq_data->wqes + txq_data->wqe_s;
734 txq_data->qp_db = &qp.dbrec[MLX5_SND_DBR];
735 txq_data->cq_db = cq_info.dbrec;
736 txq_data->cqes = (volatile struct mlx5_cqe *)cq_info.buf;
739 txq_data->wqe_ci = 0;
740 txq_data->wqe_pi = 0;
741 txq_data->wqe_comp = 0;
742 txq_data->wqe_thres = txq_data->wqe_s / MLX5_TX_COMP_THRESH_INLINE_DIV;
743 txq_data->fcqs = rte_calloc_socket(__func__,
745 sizeof(*txq_data->fcqs),
748 if (!txq_data->fcqs) {
749 DRV_LOG(ERR, "port %u Tx queue %u cannot allocate memory (FCQ)",
750 dev->data->port_id, idx);
754 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
756 * If using DevX need to query and store TIS transport domain value.
757 * This is done once per port.
758 * Will use this value on Rx, when creating matching TIR.
760 if (priv->config.devx && !priv->sh->tdn) {
761 ret = mlx5_devx_cmd_qp_query_tis_td(tmpl.qp, qp.tisn,
764 DRV_LOG(ERR, "Fail to query port %u Tx queue %u QP TIS "
765 "transport domain", dev->data->port_id, idx);
769 DRV_LOG(DEBUG, "port %u Tx queue %u TIS number %d "
770 "transport domain %d", dev->data->port_id,
771 idx, qp.tisn, priv->sh->tdn);
775 txq_obj->qp = tmpl.qp;
776 txq_obj->cq = tmpl.cq;
777 rte_atomic32_inc(&txq_obj->refcnt);
778 txq_ctrl->bf_reg = qp.bf.reg;
779 if (qp.comp_mask & MLX5DV_QP_MASK_UAR_MMAP_OFFSET) {
780 txq_ctrl->uar_mmap_offset = qp.uar_mmap_offset;
781 DRV_LOG(DEBUG, "port %u: uar_mmap_offset 0x%"PRIx64,
782 dev->data->port_id, txq_ctrl->uar_mmap_offset);
785 "port %u failed to retrieve UAR info, invalid"
791 txq_uar_init(txq_ctrl);
792 LIST_INSERT_HEAD(&priv->txqsobj, txq_obj, next);
793 txq_obj->txq_ctrl = txq_ctrl;
794 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
797 ret = rte_errno; /* Save rte_errno before cleanup. */
799 claim_zero(mlx5_glue->destroy_cq(tmpl.cq));
801 claim_zero(mlx5_glue->destroy_qp(tmpl.qp));
802 if (txq_data && txq_data->fcqs)
803 rte_free(txq_data->fcqs);
806 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
807 rte_errno = ret; /* Restore rte_errno. */
812 * Get an Tx queue Verbs object.
815 * Pointer to Ethernet device.
817 * Queue index in DPDK Tx queue array.
820 * The Verbs object if it exists.
822 struct mlx5_txq_obj *
823 mlx5_txq_obj_get(struct rte_eth_dev *dev, uint16_t idx)
825 struct mlx5_priv *priv = dev->data->dev_private;
826 struct mlx5_txq_ctrl *txq_ctrl;
828 if (idx >= priv->txqs_n)
830 if (!(*priv->txqs)[idx])
832 txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
834 rte_atomic32_inc(&txq_ctrl->obj->refcnt);
835 return txq_ctrl->obj;
839 * Release an Tx verbs queue object.
842 * Verbs Tx queue object.
845 * 1 while a reference on it exists, 0 when freed.
848 mlx5_txq_obj_release(struct mlx5_txq_obj *txq_obj)
850 MLX5_ASSERT(txq_obj);
851 if (rte_atomic32_dec_and_test(&txq_obj->refcnt)) {
852 if (txq_obj->type == MLX5_TXQ_OBJ_TYPE_DEVX_HAIRPIN) {
854 claim_zero(mlx5_devx_cmd_destroy(txq_obj->tis));
856 claim_zero(mlx5_glue->destroy_qp(txq_obj->qp));
857 claim_zero(mlx5_glue->destroy_cq(txq_obj->cq));
858 if (txq_obj->txq_ctrl->txq.fcqs)
859 rte_free(txq_obj->txq_ctrl->txq.fcqs);
861 LIST_REMOVE(txq_obj, next);
869 * Verify the Verbs Tx queue list is empty
872 * Pointer to Ethernet device.
875 * The number of object not released.
878 mlx5_txq_obj_verify(struct rte_eth_dev *dev)
880 struct mlx5_priv *priv = dev->data->dev_private;
882 struct mlx5_txq_obj *txq_obj;
884 LIST_FOREACH(txq_obj, &priv->txqsobj, next) {
885 DRV_LOG(DEBUG, "port %u Verbs Tx queue %u still referenced",
886 dev->data->port_id, txq_obj->txq_ctrl->txq.idx);
893 * Calculate the total number of WQEBB for Tx queue.
895 * Simplified version of calc_sq_size() in rdma-core.
898 * Pointer to Tx queue control structure.
901 * The number of WQEBB.
904 txq_calc_wqebb_cnt(struct mlx5_txq_ctrl *txq_ctrl)
906 unsigned int wqe_size;
907 const unsigned int desc = 1 << txq_ctrl->txq.elts_n;
909 wqe_size = MLX5_WQE_CSEG_SIZE +
912 MLX5_ESEG_MIN_INLINE_SIZE +
913 txq_ctrl->max_inline_data;
914 return rte_align32pow2(wqe_size * desc) / MLX5_WQE_SIZE;
918 * Calculate the maximal inline data size for Tx queue.
921 * Pointer to Tx queue control structure.
924 * The maximal inline data size.
927 txq_calc_inline_max(struct mlx5_txq_ctrl *txq_ctrl)
929 const unsigned int desc = 1 << txq_ctrl->txq.elts_n;
930 struct mlx5_priv *priv = txq_ctrl->priv;
931 unsigned int wqe_size;
933 wqe_size = priv->sh->device_attr.orig_attr.max_qp_wr / desc;
937 * This calculation is derived from tthe source of
938 * mlx5_calc_send_wqe() in rdma_core library.
940 wqe_size = wqe_size * MLX5_WQE_SIZE -
945 MLX5_DSEG_MIN_INLINE_SIZE;
950 * Set Tx queue parameters from device configuration.
953 * Pointer to Tx queue control structure.
956 txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
958 struct mlx5_priv *priv = txq_ctrl->priv;
959 struct mlx5_dev_config *config = &priv->config;
960 unsigned int inlen_send; /* Inline data for ordinary SEND.*/
961 unsigned int inlen_empw; /* Inline data for enhanced MPW. */
962 unsigned int inlen_mode; /* Minimal required Inline data. */
963 unsigned int txqs_inline; /* Min Tx queues to enable inline. */
964 uint64_t dev_txoff = priv->dev_data->dev_conf.txmode.offloads;
965 bool tso = txq_ctrl->txq.offloads & (DEV_TX_OFFLOAD_TCP_TSO |
966 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
967 DEV_TX_OFFLOAD_GRE_TNL_TSO |
968 DEV_TX_OFFLOAD_IP_TNL_TSO |
969 DEV_TX_OFFLOAD_UDP_TNL_TSO);
973 if (config->txqs_inline == MLX5_ARG_UNSET)
975 #if defined(RTE_ARCH_ARM64)
976 (priv->pci_dev->id.device_id ==
977 PCI_DEVICE_ID_MELLANOX_CONNECTX5BF) ?
978 MLX5_INLINE_MAX_TXQS_BLUEFIELD :
980 MLX5_INLINE_MAX_TXQS;
982 txqs_inline = (unsigned int)config->txqs_inline;
983 inlen_send = (config->txq_inline_max == MLX5_ARG_UNSET) ?
984 MLX5_SEND_DEF_INLINE_LEN :
985 (unsigned int)config->txq_inline_max;
986 inlen_empw = (config->txq_inline_mpw == MLX5_ARG_UNSET) ?
987 MLX5_EMPW_DEF_INLINE_LEN :
988 (unsigned int)config->txq_inline_mpw;
989 inlen_mode = (config->txq_inline_min == MLX5_ARG_UNSET) ?
990 0 : (unsigned int)config->txq_inline_min;
991 if (config->mps != MLX5_MPW_ENHANCED && config->mps != MLX5_MPW)
994 * If there is requested minimal amount of data to inline
995 * we MUST enable inlining. This is a case for ConnectX-4
996 * which usually requires L2 inlined for correct operating
997 * and ConnectX-4 Lx which requires L2-L4 inlined to
998 * support E-Switch Flows.
1001 if (inlen_mode <= MLX5_ESEG_MIN_INLINE_SIZE) {
1003 * Optimize minimal inlining for single
1004 * segment packets to fill one WQEBB
1007 temp = MLX5_ESEG_MIN_INLINE_SIZE;
1009 temp = inlen_mode - MLX5_ESEG_MIN_INLINE_SIZE;
1010 temp = RTE_ALIGN(temp, MLX5_WSEG_SIZE) +
1011 MLX5_ESEG_MIN_INLINE_SIZE;
1012 temp = RTE_MIN(temp, MLX5_SEND_MAX_INLINE_LEN);
1014 if (temp != inlen_mode) {
1016 "port %u minimal required inline setting"
1017 " aligned from %u to %u",
1018 PORT_ID(priv), inlen_mode, temp);
1023 * If port is configured to support VLAN insertion and device
1024 * does not support this feature by HW (for NICs before ConnectX-5
1025 * or in case of wqe_vlan_insert flag is not set) we must enable
1026 * data inline on all queues because it is supported by single
1029 txq_ctrl->txq.vlan_en = config->hw_vlan_insert;
1030 vlan_inline = (dev_txoff & DEV_TX_OFFLOAD_VLAN_INSERT) &&
1031 !config->hw_vlan_insert;
1033 * If there are few Tx queues it is prioritized
1034 * to save CPU cycles and disable data inlining at all.
1036 if (inlen_send && priv->txqs_n >= txqs_inline) {
1038 * The data sent with ordinal MLX5_OPCODE_SEND
1039 * may be inlined in Ethernet Segment, align the
1040 * length accordingly to fit entire WQEBBs.
1042 temp = RTE_MAX(inlen_send,
1043 MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE);
1044 temp -= MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE;
1045 temp = RTE_ALIGN(temp, MLX5_WQE_SIZE);
1046 temp += MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE;
1047 temp = RTE_MIN(temp, MLX5_WQE_SIZE_MAX +
1048 MLX5_ESEG_MIN_INLINE_SIZE -
1049 MLX5_WQE_CSEG_SIZE -
1050 MLX5_WQE_ESEG_SIZE -
1051 MLX5_WQE_DSEG_SIZE * 2);
1052 temp = RTE_MIN(temp, MLX5_SEND_MAX_INLINE_LEN);
1053 temp = RTE_MAX(temp, inlen_mode);
1054 if (temp != inlen_send) {
1056 "port %u ordinary send inline setting"
1057 " aligned from %u to %u",
1058 PORT_ID(priv), inlen_send, temp);
1062 * Not aligned to cache lines, but to WQEs.
1063 * First bytes of data (initial alignment)
1064 * is going to be copied explicitly at the
1065 * beginning of inlining buffer in Ethernet
1068 MLX5_ASSERT(inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
1069 MLX5_ASSERT(inlen_send <= MLX5_WQE_SIZE_MAX +
1070 MLX5_ESEG_MIN_INLINE_SIZE -
1071 MLX5_WQE_CSEG_SIZE -
1072 MLX5_WQE_ESEG_SIZE -
1073 MLX5_WQE_DSEG_SIZE * 2);
1074 } else if (inlen_mode) {
1076 * If minimal inlining is requested we must
1077 * enable inlining in general, despite the
1078 * number of configured queues. Ignore the
1079 * txq_inline_max devarg, this is not
1080 * full-featured inline.
1082 inlen_send = inlen_mode;
1084 } else if (vlan_inline) {
1086 * Hardware does not report offload for
1087 * VLAN insertion, we must enable data inline
1088 * to implement feature by software.
1090 inlen_send = MLX5_ESEG_MIN_INLINE_SIZE;
1096 txq_ctrl->txq.inlen_send = inlen_send;
1097 txq_ctrl->txq.inlen_mode = inlen_mode;
1098 txq_ctrl->txq.inlen_empw = 0;
1099 if (inlen_send && inlen_empw && priv->txqs_n >= txqs_inline) {
1101 * The data sent with MLX5_OPCODE_ENHANCED_MPSW
1102 * may be inlined in Data Segment, align the
1103 * length accordingly to fit entire WQEBBs.
1105 temp = RTE_MAX(inlen_empw,
1106 MLX5_WQE_SIZE + MLX5_DSEG_MIN_INLINE_SIZE);
1107 temp -= MLX5_DSEG_MIN_INLINE_SIZE;
1108 temp = RTE_ALIGN(temp, MLX5_WQE_SIZE);
1109 temp += MLX5_DSEG_MIN_INLINE_SIZE;
1110 temp = RTE_MIN(temp, MLX5_WQE_SIZE_MAX +
1111 MLX5_DSEG_MIN_INLINE_SIZE -
1112 MLX5_WQE_CSEG_SIZE -
1113 MLX5_WQE_ESEG_SIZE -
1114 MLX5_WQE_DSEG_SIZE);
1115 temp = RTE_MIN(temp, MLX5_EMPW_MAX_INLINE_LEN);
1116 if (temp != inlen_empw) {
1118 "port %u enhanced empw inline setting"
1119 " aligned from %u to %u",
1120 PORT_ID(priv), inlen_empw, temp);
1123 MLX5_ASSERT(inlen_empw >= MLX5_ESEG_MIN_INLINE_SIZE);
1124 MLX5_ASSERT(inlen_empw <= MLX5_WQE_SIZE_MAX +
1125 MLX5_DSEG_MIN_INLINE_SIZE -
1126 MLX5_WQE_CSEG_SIZE -
1127 MLX5_WQE_ESEG_SIZE -
1128 MLX5_WQE_DSEG_SIZE);
1129 txq_ctrl->txq.inlen_empw = inlen_empw;
1131 txq_ctrl->max_inline_data = RTE_MAX(inlen_send, inlen_empw);
1133 txq_ctrl->max_tso_header = MLX5_MAX_TSO_HEADER;
1134 txq_ctrl->max_inline_data = RTE_MAX(txq_ctrl->max_inline_data,
1135 MLX5_MAX_TSO_HEADER);
1136 txq_ctrl->txq.tso_en = 1;
1138 txq_ctrl->txq.tunnel_en = config->tunnel_en | config->swp;
1139 txq_ctrl->txq.swp_en = ((DEV_TX_OFFLOAD_IP_TNL_TSO |
1140 DEV_TX_OFFLOAD_UDP_TNL_TSO |
1141 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) &
1142 txq_ctrl->txq.offloads) && config->swp;
1146 * Adjust Tx queue data inline parameters for large queue sizes.
1147 * The data inline feature requires multiple WQEs to fit the packets,
1148 * and if the large amount of Tx descriptors is requested by application
1149 * the total WQE amount may exceed the hardware capabilities. If the
1150 * default inline setting are used we can try to adjust these ones and
1151 * meet the hardware requirements and not exceed the queue size.
1154 * Pointer to Tx queue control structure.
1157 * Zero on success, otherwise the parameters can not be adjusted.
1160 txq_adjust_params(struct mlx5_txq_ctrl *txq_ctrl)
1162 struct mlx5_priv *priv = txq_ctrl->priv;
1163 struct mlx5_dev_config *config = &priv->config;
1164 unsigned int max_inline;
1166 max_inline = txq_calc_inline_max(txq_ctrl);
1167 if (!txq_ctrl->txq.inlen_send) {
1169 * Inline data feature is not engaged at all.
1170 * There is nothing to adjust.
1174 if (txq_ctrl->max_inline_data <= max_inline) {
1176 * The requested inline data length does not
1177 * exceed queue capabilities.
1181 if (txq_ctrl->txq.inlen_mode > max_inline) {
1183 "minimal data inline requirements (%u) are not"
1184 " satisfied (%u) on port %u, try the smaller"
1185 " Tx queue size (%d)",
1186 txq_ctrl->txq.inlen_mode, max_inline,
1187 priv->dev_data->port_id,
1188 priv->sh->device_attr.orig_attr.max_qp_wr);
1191 if (txq_ctrl->txq.inlen_send > max_inline &&
1192 config->txq_inline_max != MLX5_ARG_UNSET &&
1193 config->txq_inline_max > (int)max_inline) {
1195 "txq_inline_max requirements (%u) are not"
1196 " satisfied (%u) on port %u, try the smaller"
1197 " Tx queue size (%d)",
1198 txq_ctrl->txq.inlen_send, max_inline,
1199 priv->dev_data->port_id,
1200 priv->sh->device_attr.orig_attr.max_qp_wr);
1203 if (txq_ctrl->txq.inlen_empw > max_inline &&
1204 config->txq_inline_mpw != MLX5_ARG_UNSET &&
1205 config->txq_inline_mpw > (int)max_inline) {
1207 "txq_inline_mpw requirements (%u) are not"
1208 " satisfied (%u) on port %u, try the smaller"
1209 " Tx queue size (%d)",
1210 txq_ctrl->txq.inlen_empw, max_inline,
1211 priv->dev_data->port_id,
1212 priv->sh->device_attr.orig_attr.max_qp_wr);
1215 if (txq_ctrl->txq.tso_en && max_inline < MLX5_MAX_TSO_HEADER) {
1217 "tso header inline requirements (%u) are not"
1218 " satisfied (%u) on port %u, try the smaller"
1219 " Tx queue size (%d)",
1220 MLX5_MAX_TSO_HEADER, max_inline,
1221 priv->dev_data->port_id,
1222 priv->sh->device_attr.orig_attr.max_qp_wr);
1225 if (txq_ctrl->txq.inlen_send > max_inline) {
1227 "adjust txq_inline_max (%u->%u)"
1228 " due to large Tx queue on port %u",
1229 txq_ctrl->txq.inlen_send, max_inline,
1230 priv->dev_data->port_id);
1231 txq_ctrl->txq.inlen_send = max_inline;
1233 if (txq_ctrl->txq.inlen_empw > max_inline) {
1235 "adjust txq_inline_mpw (%u->%u)"
1236 "due to large Tx queue on port %u",
1237 txq_ctrl->txq.inlen_empw, max_inline,
1238 priv->dev_data->port_id);
1239 txq_ctrl->txq.inlen_empw = max_inline;
1241 txq_ctrl->max_inline_data = RTE_MAX(txq_ctrl->txq.inlen_send,
1242 txq_ctrl->txq.inlen_empw);
1243 MLX5_ASSERT(txq_ctrl->max_inline_data <= max_inline);
1244 MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= max_inline);
1245 MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= txq_ctrl->txq.inlen_send);
1246 MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= txq_ctrl->txq.inlen_empw ||
1247 !txq_ctrl->txq.inlen_empw);
1255 * Create a DPDK Tx queue.
1258 * Pointer to Ethernet device.
1262 * Number of descriptors to configure in queue.
1264 * NUMA socket on which memory must be allocated.
1266 * Thresholds parameters.
1269 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
1271 struct mlx5_txq_ctrl *
1272 mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1273 unsigned int socket, const struct rte_eth_txconf *conf)
1275 struct mlx5_priv *priv = dev->data->dev_private;
1276 struct mlx5_txq_ctrl *tmpl;
1278 tmpl = rte_calloc_socket("TXQ", 1,
1280 desc * sizeof(struct rte_mbuf *),
1286 if (mlx5_mr_btree_init(&tmpl->txq.mr_ctrl.cache_bh,
1287 MLX5_MR_BTREE_CACHE_N, socket)) {
1288 /* rte_errno is already set. */
1291 /* Save pointer of global generation number to check memory event. */
1292 tmpl->txq.mr_ctrl.dev_gen_ptr = &priv->sh->mr.dev_gen;
1293 MLX5_ASSERT(desc > MLX5_TX_COMP_THRESH);
1294 tmpl->txq.offloads = conf->offloads |
1295 dev->data->dev_conf.txmode.offloads;
1297 tmpl->socket = socket;
1298 tmpl->txq.elts_n = log2above(desc);
1299 tmpl->txq.elts_s = desc;
1300 tmpl->txq.elts_m = desc - 1;
1301 tmpl->txq.port_id = dev->data->port_id;
1302 tmpl->txq.idx = idx;
1303 txq_set_params(tmpl);
1304 if (txq_adjust_params(tmpl))
1306 if (txq_calc_wqebb_cnt(tmpl) >
1307 priv->sh->device_attr.orig_attr.max_qp_wr) {
1309 "port %u Tx WQEBB count (%d) exceeds the limit (%d),"
1310 " try smaller queue size",
1311 dev->data->port_id, txq_calc_wqebb_cnt(tmpl),
1312 priv->sh->device_attr.orig_attr.max_qp_wr);
1316 rte_atomic32_inc(&tmpl->refcnt);
1317 tmpl->type = MLX5_TXQ_TYPE_STANDARD;
1318 LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
1326 * Create a DPDK Tx hairpin queue.
1329 * Pointer to Ethernet device.
1333 * Number of descriptors to configure in queue.
1334 * @param hairpin_conf
1335 * The hairpin configuration.
1338 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
1340 struct mlx5_txq_ctrl *
1341 mlx5_txq_hairpin_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1342 const struct rte_eth_hairpin_conf *hairpin_conf)
1344 struct mlx5_priv *priv = dev->data->dev_private;
1345 struct mlx5_txq_ctrl *tmpl;
1347 tmpl = rte_calloc_socket("TXQ", 1,
1348 sizeof(*tmpl), 0, SOCKET_ID_ANY);
1354 tmpl->socket = SOCKET_ID_ANY;
1355 tmpl->txq.elts_n = log2above(desc);
1356 tmpl->txq.port_id = dev->data->port_id;
1357 tmpl->txq.idx = idx;
1358 tmpl->hairpin_conf = *hairpin_conf;
1359 tmpl->type = MLX5_TXQ_TYPE_HAIRPIN;
1360 rte_atomic32_inc(&tmpl->refcnt);
1361 LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
1369 * Pointer to Ethernet device.
1374 * A pointer to the queue if it exists.
1376 struct mlx5_txq_ctrl *
1377 mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx)
1379 struct mlx5_priv *priv = dev->data->dev_private;
1380 struct mlx5_txq_ctrl *ctrl = NULL;
1382 if ((*priv->txqs)[idx]) {
1383 ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl,
1385 mlx5_txq_obj_get(dev, idx);
1386 rte_atomic32_inc(&ctrl->refcnt);
1392 * Release a Tx queue.
1395 * Pointer to Ethernet device.
1400 * 1 while a reference on it exists, 0 when freed.
1403 mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx)
1405 struct mlx5_priv *priv = dev->data->dev_private;
1406 struct mlx5_txq_ctrl *txq;
1408 if (!(*priv->txqs)[idx])
1410 txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
1411 if (txq->obj && !mlx5_txq_obj_release(txq->obj))
1413 if (rte_atomic32_dec_and_test(&txq->refcnt)) {
1415 mlx5_mr_btree_free(&txq->txq.mr_ctrl.cache_bh);
1416 LIST_REMOVE(txq, next);
1418 (*priv->txqs)[idx] = NULL;
1425 * Verify if the queue can be released.
1428 * Pointer to Ethernet device.
1433 * 1 if the queue can be released.
1436 mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx)
1438 struct mlx5_priv *priv = dev->data->dev_private;
1439 struct mlx5_txq_ctrl *txq;
1441 if (!(*priv->txqs)[idx])
1443 txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
1444 return (rte_atomic32_read(&txq->refcnt) == 1);
1448 * Verify the Tx Queue list is empty
1451 * Pointer to Ethernet device.
1454 * The number of object not released.
1457 mlx5_txq_verify(struct rte_eth_dev *dev)
1459 struct mlx5_priv *priv = dev->data->dev_private;
1460 struct mlx5_txq_ctrl *txq_ctrl;
1463 LIST_FOREACH(txq_ctrl, &priv->txqsctrl, next) {
1464 DRV_LOG(DEBUG, "port %u Tx queue %u still referenced",
1465 dev->data->port_id, txq_ctrl->txq.idx);