1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
14 #include <rte_malloc.h>
15 #include <rte_ethdev_driver.h>
16 #include <rte_common.h>
17 #include <rte_eal_paging.h>
19 #include <mlx5_glue.h>
20 #include <mlx5_devx_cmds.h>
21 #include <mlx5_common.h>
22 #include <mlx5_common_mr.h>
23 #include <mlx5_common_os.h>
24 #include <mlx5_malloc.h>
26 #include "mlx5_defs.h"
27 #include "mlx5_utils.h"
29 #include "mlx5_rxtx.h"
30 #include "mlx5_autoconf.h"
33 * Allocate TX queue elements.
36 * Pointer to TX queue structure.
39 txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl)
41 const unsigned int elts_n = 1 << txq_ctrl->txq.elts_n;
44 for (i = 0; (i != elts_n); ++i)
45 txq_ctrl->txq.elts[i] = NULL;
46 DRV_LOG(DEBUG, "port %u Tx queue %u allocated and configured %u WRs",
47 PORT_ID(txq_ctrl->priv), txq_ctrl->txq.idx, elts_n);
48 txq_ctrl->txq.elts_head = 0;
49 txq_ctrl->txq.elts_tail = 0;
50 txq_ctrl->txq.elts_comp = 0;
54 * Free TX queue elements.
57 * Pointer to TX queue structure.
60 txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl)
62 const uint16_t elts_n = 1 << txq_ctrl->txq.elts_n;
63 const uint16_t elts_m = elts_n - 1;
64 uint16_t elts_head = txq_ctrl->txq.elts_head;
65 uint16_t elts_tail = txq_ctrl->txq.elts_tail;
66 struct rte_mbuf *(*elts)[elts_n] = &txq_ctrl->txq.elts;
68 DRV_LOG(DEBUG, "port %u Tx queue %u freeing WRs",
69 PORT_ID(txq_ctrl->priv), txq_ctrl->txq.idx);
70 txq_ctrl->txq.elts_head = 0;
71 txq_ctrl->txq.elts_tail = 0;
72 txq_ctrl->txq.elts_comp = 0;
74 while (elts_tail != elts_head) {
75 struct rte_mbuf *elt = (*elts)[elts_tail & elts_m];
77 MLX5_ASSERT(elt != NULL);
78 rte_pktmbuf_free_seg(elt);
79 #ifdef RTE_LIBRTE_MLX5_DEBUG
81 memset(&(*elts)[elts_tail & elts_m],
83 sizeof((*elts)[elts_tail & elts_m]));
90 * Returns the per-port supported offloads.
93 * Pointer to Ethernet device.
96 * Supported Tx offloads.
99 mlx5_get_tx_port_offloads(struct rte_eth_dev *dev)
101 struct mlx5_priv *priv = dev->data->dev_private;
102 uint64_t offloads = (DEV_TX_OFFLOAD_MULTI_SEGS |
103 DEV_TX_OFFLOAD_VLAN_INSERT);
104 struct mlx5_dev_config *config = &priv->config;
107 offloads |= (DEV_TX_OFFLOAD_IPV4_CKSUM |
108 DEV_TX_OFFLOAD_UDP_CKSUM |
109 DEV_TX_OFFLOAD_TCP_CKSUM);
111 offloads |= DEV_TX_OFFLOAD_TCP_TSO;
113 offloads |= DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP;
116 offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
118 offloads |= (DEV_TX_OFFLOAD_IP_TNL_TSO |
119 DEV_TX_OFFLOAD_UDP_TNL_TSO);
121 if (config->tunnel_en) {
123 offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
125 offloads |= (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
126 DEV_TX_OFFLOAD_GRE_TNL_TSO |
127 DEV_TX_OFFLOAD_GENEVE_TNL_TSO);
132 /* Fetches and drops all SW-owned and error CQEs to synchronize CQ. */
134 txq_sync_cq(struct mlx5_txq_data *txq)
136 volatile struct mlx5_cqe *cqe;
141 cqe = &txq->cqes[txq->cq_ci & txq->cqe_m];
142 ret = check_cqe(cqe, txq->cqe_s, txq->cq_ci);
143 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
144 if (likely(ret != MLX5_CQE_STATUS_ERR)) {
145 /* No new CQEs in completion queue. */
146 MLX5_ASSERT(ret == MLX5_CQE_STATUS_HW_OWN);
152 /* Move all CQEs to HW ownership. */
153 for (i = 0; i < txq->cqe_s; i++) {
155 cqe->op_own = MLX5_CQE_INVALIDATE;
157 /* Resync CQE and WQE (WQ in reset state). */
159 *txq->cq_db = rte_cpu_to_be_32(txq->cq_ci);
164 * Tx queue stop. Device queue goes to the idle state,
165 * all involved mbufs are freed from elts/WQ.
168 * Pointer to Ethernet device structure.
173 * 0 on success, a negative errno value otherwise and rte_errno is set.
176 mlx5_tx_queue_stop_primary(struct rte_eth_dev *dev, uint16_t idx)
178 struct mlx5_priv *priv = dev->data->dev_private;
179 struct mlx5_txq_data *txq = (*priv->txqs)[idx];
180 struct mlx5_txq_ctrl *txq_ctrl =
181 container_of(txq, struct mlx5_txq_ctrl, txq);
184 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
185 /* Move QP to RESET state. */
186 if (txq_ctrl->obj->type == MLX5_TXQ_OBJ_TYPE_DEVX_SQ) {
187 struct mlx5_devx_modify_sq_attr msq_attr = { 0 };
189 /* Change queue state to reset with DevX. */
190 msq_attr.sq_state = MLX5_SQC_STATE_RDY;
191 msq_attr.state = MLX5_SQC_STATE_RST;
192 ret = mlx5_devx_cmd_modify_sq(txq_ctrl->obj->sq_devx,
195 DRV_LOG(ERR, "Cannot change the "
196 "Tx QP state to RESET %s",
202 struct ibv_qp_attr mod = {
203 .qp_state = IBV_QPS_RESET,
204 .port_num = (uint8_t)priv->dev_port,
206 struct ibv_qp *qp = txq_ctrl->obj->qp;
208 /* Change queue state to reset with Verbs. */
209 ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE);
211 DRV_LOG(ERR, "Cannot change the Tx QP state to RESET "
212 "%s", strerror(errno));
217 /* Handle all send completions. */
219 /* Free elts stored in the SQ. */
220 txq_free_elts(txq_ctrl);
221 /* Prevent writing new pkts to SQ by setting no free WQE.*/
222 txq->wqe_ci = txq->wqe_s;
225 /* Set the actual queue state. */
226 dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
231 * Tx queue stop. Device queue goes to the idle state,
232 * all involved mbufs are freed from elts/WQ.
235 * Pointer to Ethernet device structure.
240 * 0 on success, a negative errno value otherwise and rte_errno is set.
243 mlx5_tx_queue_stop(struct rte_eth_dev *dev, uint16_t idx)
247 if (rte_eth_dev_is_tx_hairpin_queue(dev, idx)) {
248 DRV_LOG(ERR, "Hairpin queue can't be stopped");
252 if (dev->data->tx_queue_state[idx] == RTE_ETH_QUEUE_STATE_STOPPED)
254 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
255 ret = mlx5_mp_os_req_queue_control(dev, idx,
256 MLX5_MP_REQ_QUEUE_TX_STOP);
258 ret = mlx5_tx_queue_stop_primary(dev, idx);
264 * Rx queue start. Device queue goes to the ready state,
265 * all required mbufs are allocated and WQ is replenished.
268 * Pointer to Ethernet device structure.
273 * 0 on success, a negative errno value otherwise and rte_errno is set.
276 mlx5_tx_queue_start_primary(struct rte_eth_dev *dev, uint16_t idx)
278 struct mlx5_priv *priv = dev->data->dev_private;
279 struct mlx5_txq_data *txq = (*priv->txqs)[idx];
280 struct mlx5_txq_ctrl *txq_ctrl =
281 container_of(txq, struct mlx5_txq_ctrl, txq);
284 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
285 if (txq_ctrl->obj->type == MLX5_TXQ_OBJ_TYPE_DEVX_SQ) {
286 struct mlx5_devx_modify_sq_attr msq_attr = { 0 };
287 struct mlx5_txq_obj *obj = txq_ctrl->obj;
289 msq_attr.sq_state = MLX5_SQC_STATE_RDY;
290 msq_attr.state = MLX5_SQC_STATE_RST;
291 ret = mlx5_devx_cmd_modify_sq(obj->sq_devx, &msq_attr);
295 "Cannot change the Tx QP state to RESET "
296 "%s", strerror(errno));
299 msq_attr.sq_state = MLX5_SQC_STATE_RST;
300 msq_attr.state = MLX5_SQC_STATE_RDY;
301 ret = mlx5_devx_cmd_modify_sq(obj->sq_devx, &msq_attr);
305 "Cannot change the Tx QP state to READY "
306 "%s", strerror(errno));
310 struct ibv_qp_attr mod = {
311 .qp_state = IBV_QPS_RESET,
312 .port_num = (uint8_t)priv->dev_port,
314 struct ibv_qp *qp = txq_ctrl->obj->qp;
316 ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE);
318 DRV_LOG(ERR, "Cannot change the Tx QP state to RESET "
319 "%s", strerror(errno));
323 mod.qp_state = IBV_QPS_INIT;
324 ret = mlx5_glue->modify_qp(qp, &mod,
325 (IBV_QP_STATE | IBV_QP_PORT));
327 DRV_LOG(ERR, "Cannot change Tx QP state to INIT %s",
332 mod.qp_state = IBV_QPS_RTR;
333 ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE);
335 DRV_LOG(ERR, "Cannot change Tx QP state to RTR %s",
340 mod.qp_state = IBV_QPS_RTS;
341 ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE);
343 DRV_LOG(ERR, "Cannot change Tx QP state to RTS %s",
349 txq_ctrl->txq.wqe_ci = 0;
350 txq_ctrl->txq.wqe_pi = 0;
351 txq_ctrl->txq.elts_comp = 0;
352 /* Set the actual queue state. */
353 dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
358 * Rx queue start. Device queue goes to the ready state,
359 * all required mbufs are allocated and WQ is replenished.
362 * Pointer to Ethernet device structure.
367 * 0 on success, a negative errno value otherwise and rte_errno is set.
370 mlx5_tx_queue_start(struct rte_eth_dev *dev, uint16_t idx)
374 if (rte_eth_dev_is_tx_hairpin_queue(dev, idx)) {
375 DRV_LOG(ERR, "Hairpin queue can't be started");
379 if (dev->data->tx_queue_state[idx] == RTE_ETH_QUEUE_STATE_STARTED)
381 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
382 ret = mlx5_mp_os_req_queue_control(dev, idx,
383 MLX5_MP_REQ_QUEUE_TX_START);
385 ret = mlx5_tx_queue_start_primary(dev, idx);
391 * Tx queue presetup checks.
394 * Pointer to Ethernet device structure.
398 * Number of descriptors to configure in queue.
401 * 0 on success, a negative errno value otherwise and rte_errno is set.
404 mlx5_tx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t *desc)
406 struct mlx5_priv *priv = dev->data->dev_private;
408 if (*desc <= MLX5_TX_COMP_THRESH) {
410 "port %u number of descriptors requested for Tx queue"
411 " %u must be higher than MLX5_TX_COMP_THRESH, using %u"
412 " instead of %u", dev->data->port_id, idx,
413 MLX5_TX_COMP_THRESH + 1, *desc);
414 *desc = MLX5_TX_COMP_THRESH + 1;
416 if (!rte_is_power_of_2(*desc)) {
417 *desc = 1 << log2above(*desc);
419 "port %u increased number of descriptors in Tx queue"
420 " %u to the next power of two (%d)",
421 dev->data->port_id, idx, *desc);
423 DRV_LOG(DEBUG, "port %u configuring queue %u for %u descriptors",
424 dev->data->port_id, idx, *desc);
425 if (idx >= priv->txqs_n) {
426 DRV_LOG(ERR, "port %u Tx queue index out of range (%u >= %u)",
427 dev->data->port_id, idx, priv->txqs_n);
428 rte_errno = EOVERFLOW;
431 if (!mlx5_txq_releasable(dev, idx)) {
433 DRV_LOG(ERR, "port %u unable to release queue index %u",
434 dev->data->port_id, idx);
437 mlx5_txq_release(dev, idx);
442 * DPDK callback to configure a TX queue.
445 * Pointer to Ethernet device structure.
449 * Number of descriptors to configure in queue.
451 * NUMA socket on which memory must be allocated.
453 * Thresholds parameters.
456 * 0 on success, a negative errno value otherwise and rte_errno is set.
459 mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
460 unsigned int socket, const struct rte_eth_txconf *conf)
462 struct mlx5_priv *priv = dev->data->dev_private;
463 struct mlx5_txq_data *txq = (*priv->txqs)[idx];
464 struct mlx5_txq_ctrl *txq_ctrl =
465 container_of(txq, struct mlx5_txq_ctrl, txq);
468 res = mlx5_tx_queue_pre_setup(dev, idx, &desc);
471 txq_ctrl = mlx5_txq_new(dev, idx, desc, socket, conf);
473 DRV_LOG(ERR, "port %u unable to allocate queue index %u",
474 dev->data->port_id, idx);
477 DRV_LOG(DEBUG, "port %u adding Tx queue %u to list",
478 dev->data->port_id, idx);
479 (*priv->txqs)[idx] = &txq_ctrl->txq;
480 dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
485 * DPDK callback to configure a TX hairpin queue.
488 * Pointer to Ethernet device structure.
492 * Number of descriptors to configure in queue.
493 * @param[in] hairpin_conf
494 * The hairpin binding configuration.
497 * 0 on success, a negative errno value otherwise and rte_errno is set.
500 mlx5_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
502 const struct rte_eth_hairpin_conf *hairpin_conf)
504 struct mlx5_priv *priv = dev->data->dev_private;
505 struct mlx5_txq_data *txq = (*priv->txqs)[idx];
506 struct mlx5_txq_ctrl *txq_ctrl =
507 container_of(txq, struct mlx5_txq_ctrl, txq);
510 res = mlx5_tx_queue_pre_setup(dev, idx, &desc);
513 if (hairpin_conf->peer_count != 1 ||
514 hairpin_conf->peers[0].port != dev->data->port_id ||
515 hairpin_conf->peers[0].queue >= priv->rxqs_n) {
516 DRV_LOG(ERR, "port %u unable to setup hairpin queue index %u "
517 " invalid hairpind configuration", dev->data->port_id,
522 txq_ctrl = mlx5_txq_hairpin_new(dev, idx, desc, hairpin_conf);
524 DRV_LOG(ERR, "port %u unable to allocate queue index %u",
525 dev->data->port_id, idx);
528 DRV_LOG(DEBUG, "port %u adding Tx queue %u to list",
529 dev->data->port_id, idx);
530 (*priv->txqs)[idx] = &txq_ctrl->txq;
531 dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_HAIRPIN;
536 * DPDK callback to release a TX queue.
539 * Generic TX queue pointer.
542 mlx5_tx_queue_release(void *dpdk_txq)
544 struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
545 struct mlx5_txq_ctrl *txq_ctrl;
546 struct mlx5_priv *priv;
551 txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
552 priv = txq_ctrl->priv;
553 for (i = 0; (i != priv->txqs_n); ++i)
554 if ((*priv->txqs)[i] == txq) {
555 DRV_LOG(DEBUG, "port %u removing Tx queue %u from list",
556 PORT_ID(priv), txq->idx);
557 mlx5_txq_release(ETH_DEV(priv), i);
563 * Configure the doorbell register non-cached attribute.
566 * Pointer to Tx queue control structure.
571 txq_uar_ncattr_init(struct mlx5_txq_ctrl *txq_ctrl, size_t page_size)
573 struct mlx5_priv *priv = txq_ctrl->priv;
576 txq_ctrl->txq.db_heu = priv->config.dbnc == MLX5_TXDB_HEURISTIC;
577 txq_ctrl->txq.db_nc = 0;
578 /* Check the doorbell register mapping type. */
579 cmd = txq_ctrl->uar_mmap_offset / page_size;
580 cmd >>= MLX5_UAR_MMAP_CMD_SHIFT;
581 cmd &= MLX5_UAR_MMAP_CMD_MASK;
582 if (cmd == MLX5_MMAP_GET_NC_PAGES_CMD)
583 txq_ctrl->txq.db_nc = 1;
587 * Initialize Tx UAR registers for primary process.
590 * Pointer to Tx queue control structure.
593 txq_uar_init(struct mlx5_txq_ctrl *txq_ctrl)
595 struct mlx5_priv *priv = txq_ctrl->priv;
596 struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(priv));
598 unsigned int lock_idx;
600 const size_t page_size = rte_mem_page_size();
601 if (page_size == (size_t)-1) {
602 DRV_LOG(ERR, "Failed to get mem page size");
606 if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
608 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
610 ppriv->uar_table[txq_ctrl->txq.idx] = txq_ctrl->bf_reg;
611 txq_uar_ncattr_init(txq_ctrl, page_size);
613 /* Assign an UAR lock according to UAR page number */
614 lock_idx = (txq_ctrl->uar_mmap_offset / page_size) &
615 MLX5_UAR_PAGE_NUM_MASK;
616 txq_ctrl->txq.uar_lock = &priv->sh->uar_lock[lock_idx];
621 * Remap UAR register of a Tx queue for secondary process.
623 * Remapped address is stored at the table in the process private structure of
624 * the device, indexed by queue index.
627 * Pointer to Tx queue control structure.
629 * Verbs file descriptor to map UAR pages.
632 * 0 on success, a negative errno value otherwise and rte_errno is set.
635 txq_uar_init_secondary(struct mlx5_txq_ctrl *txq_ctrl, int fd)
637 struct mlx5_priv *priv = txq_ctrl->priv;
638 struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(priv));
639 struct mlx5_txq_data *txq = &txq_ctrl->txq;
643 const size_t page_size = rte_mem_page_size();
644 if (page_size == (size_t)-1) {
645 DRV_LOG(ERR, "Failed to get mem page size");
650 if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
654 * As rdma-core, UARs are mapped in size of OS page
655 * size. Ref to libmlx5 function: mlx5_init_context()
657 uar_va = (uintptr_t)txq_ctrl->bf_reg;
658 offset = uar_va & (page_size - 1); /* Offset in page. */
659 addr = rte_mem_map(NULL, page_size, RTE_PROT_WRITE, RTE_MAP_SHARED,
660 fd, txq_ctrl->uar_mmap_offset);
663 "port %u mmap failed for BF reg of txq %u",
664 txq->port_id, txq->idx);
668 addr = RTE_PTR_ADD(addr, offset);
669 ppriv->uar_table[txq->idx] = addr;
670 txq_uar_ncattr_init(txq_ctrl, page_size);
675 * Unmap UAR register of a Tx queue for secondary process.
678 * Pointer to Tx queue control structure.
681 txq_uar_uninit_secondary(struct mlx5_txq_ctrl *txq_ctrl)
683 struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(txq_ctrl->priv));
685 const size_t page_size = rte_mem_page_size();
686 if (page_size == (size_t)-1) {
687 DRV_LOG(ERR, "Failed to get mem page size");
691 if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
693 addr = ppriv->uar_table[txq_ctrl->txq.idx];
694 rte_mem_unmap(RTE_PTR_ALIGN_FLOOR(addr, page_size), page_size);
698 * Deinitialize Tx UAR registers for secondary process.
701 * Pointer to Ethernet device.
704 mlx5_tx_uar_uninit_secondary(struct rte_eth_dev *dev)
706 struct mlx5_priv *priv = dev->data->dev_private;
707 struct mlx5_txq_data *txq;
708 struct mlx5_txq_ctrl *txq_ctrl;
711 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
712 for (i = 0; i != priv->txqs_n; ++i) {
713 if (!(*priv->txqs)[i])
715 txq = (*priv->txqs)[i];
716 txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
717 txq_uar_uninit_secondary(txq_ctrl);
722 * Initialize Tx UAR registers for secondary process.
725 * Pointer to Ethernet device.
727 * Verbs file descriptor to map UAR pages.
730 * 0 on success, a negative errno value otherwise and rte_errno is set.
733 mlx5_tx_uar_init_secondary(struct rte_eth_dev *dev, int fd)
735 struct mlx5_priv *priv = dev->data->dev_private;
736 struct mlx5_txq_data *txq;
737 struct mlx5_txq_ctrl *txq_ctrl;
741 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
742 for (i = 0; i != priv->txqs_n; ++i) {
743 if (!(*priv->txqs)[i])
745 txq = (*priv->txqs)[i];
746 txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
747 if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
749 MLX5_ASSERT(txq->idx == (uint16_t)i);
750 ret = txq_uar_init_secondary(txq_ctrl, fd);
758 if (!(*priv->txqs)[i])
760 txq = (*priv->txqs)[i];
761 txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
762 txq_uar_uninit_secondary(txq_ctrl);
768 * Create the Tx hairpin queue object.
771 * Pointer to Ethernet device.
773 * Queue index in DPDK Tx queue array
776 * The hairpin DevX object initialised, NULL otherwise and rte_errno is set.
778 static struct mlx5_txq_obj *
779 mlx5_txq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
781 struct mlx5_priv *priv = dev->data->dev_private;
782 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
783 struct mlx5_txq_ctrl *txq_ctrl =
784 container_of(txq_data, struct mlx5_txq_ctrl, txq);
785 struct mlx5_devx_create_sq_attr attr = { 0 };
786 struct mlx5_txq_obj *tmpl = NULL;
787 uint32_t max_wq_data;
789 MLX5_ASSERT(txq_data);
790 MLX5_ASSERT(!txq_ctrl->obj);
791 tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
795 "port %u Tx queue %u cannot allocate memory resources",
796 dev->data->port_id, txq_data->idx);
800 tmpl->type = MLX5_TXQ_OBJ_TYPE_DEVX_HAIRPIN;
801 tmpl->txq_ctrl = txq_ctrl;
804 max_wq_data = priv->config.hca_attr.log_max_hairpin_wq_data_sz;
805 /* Jumbo frames > 9KB should be supported, and more packets. */
806 if (priv->config.log_hp_size != (uint32_t)MLX5_ARG_UNSET) {
807 if (priv->config.log_hp_size > max_wq_data) {
808 DRV_LOG(ERR, "total data size %u power of 2 is "
809 "too large for hairpin",
810 priv->config.log_hp_size);
815 attr.wq_attr.log_hairpin_data_sz = priv->config.log_hp_size;
817 attr.wq_attr.log_hairpin_data_sz =
818 (max_wq_data < MLX5_HAIRPIN_JUMBO_LOG_SIZE) ?
819 max_wq_data : MLX5_HAIRPIN_JUMBO_LOG_SIZE;
821 /* Set the packets number to the maximum value for performance. */
822 attr.wq_attr.log_hairpin_num_packets =
823 attr.wq_attr.log_hairpin_data_sz -
824 MLX5_HAIRPIN_QUEUE_STRIDE;
825 attr.tis_num = priv->sh->tis->id;
826 tmpl->sq = mlx5_devx_cmd_create_sq(priv->sh->ctx, &attr);
829 "port %u tx hairpin queue %u can't create sq object",
830 dev->data->port_id, idx);
835 DRV_LOG(DEBUG, "port %u sxq %u updated with %p", dev->data->port_id,
837 LIST_INSERT_HEAD(&priv->txqsobj, tmpl, next);
842 * Release DevX SQ resources.
845 * DevX Tx queue object.
848 txq_release_devx_sq_resources(struct mlx5_txq_obj *txq_obj)
850 if (txq_obj->sq_devx)
851 claim_zero(mlx5_devx_cmd_destroy(txq_obj->sq_devx));
852 if (txq_obj->sq_umem)
853 claim_zero(mlx5_glue->devx_umem_dereg(txq_obj->sq_umem));
855 mlx5_free(txq_obj->sq_buf);
856 if (txq_obj->sq_dbrec_page)
857 claim_zero(mlx5_release_dbr(&txq_obj->txq_ctrl->priv->dbrpgs,
859 (txq_obj->sq_dbrec_page->umem),
860 txq_obj->sq_dbrec_offset));
864 * Release DevX Tx CQ resources.
867 * DevX Tx queue object.
870 txq_release_devx_cq_resources(struct mlx5_txq_obj *txq_obj)
872 if (txq_obj->cq_devx)
873 claim_zero(mlx5_devx_cmd_destroy(txq_obj->cq_devx));
874 if (txq_obj->cq_umem)
875 claim_zero(mlx5_glue->devx_umem_dereg(txq_obj->cq_umem));
877 mlx5_free(txq_obj->cq_buf);
878 if (txq_obj->cq_dbrec_page)
879 claim_zero(mlx5_release_dbr(&txq_obj->txq_ctrl->priv->dbrpgs,
881 (txq_obj->cq_dbrec_page->umem),
882 txq_obj->cq_dbrec_offset));
886 * Destroy the Tx queue DevX object.
889 * Txq object to destroy
892 txq_release_devx_resources(struct mlx5_txq_obj *txq_obj)
894 MLX5_ASSERT(txq_obj->type == MLX5_TXQ_OBJ_TYPE_DEVX_SQ);
896 txq_release_devx_sq_resources(txq_obj);
897 txq_release_devx_cq_resources(txq_obj);
900 #ifdef HAVE_MLX5DV_DEVX_UAR_OFFSET
902 * Create a DevX CQ object for a Tx queue.
905 * Pointer to Ethernet device.
907 * Number of entries in the CQ.
909 * Queue index in DPDK Tx queue array.
911 * Type of the Tx queue object to create.
914 * The DevX CQ object initialized, NULL otherwise and rte_errno is set.
916 static struct mlx5_devx_obj *
917 mlx5_devx_cq_new(struct rte_eth_dev *dev, uint32_t cqe_n, uint16_t idx,
918 struct mlx5_txq_obj *txq_obj)
920 struct mlx5_priv *priv = dev->data->dev_private;
921 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
922 struct mlx5_devx_obj *cq_obj = NULL;
923 struct mlx5_devx_cq_attr cq_attr = { 0 };
924 struct mlx5_cqe *cqe;
930 MLX5_ASSERT(txq_data);
931 MLX5_ASSERT(txq_obj);
932 page_size = rte_mem_page_size();
933 if (page_size == (size_t)-1) {
934 DRV_LOG(ERR, "Failed to get mem page size");
938 /* Allocate memory buffer for CQEs. */
939 alignment = MLX5_CQE_BUF_ALIGNMENT;
940 if (alignment == (size_t)-1) {
941 DRV_LOG(ERR, "Failed to get CQE buf alignment");
945 cqe_n = 1UL << log2above(cqe_n);
946 if (cqe_n > UINT16_MAX) {
948 "port %u Tx queue %u requests to many CQEs %u",
949 dev->data->port_id, txq_data->idx, cqe_n);
953 txq_obj->cq_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
954 cqe_n * sizeof(struct mlx5_cqe),
956 priv->sh->numa_node);
957 if (!txq_obj->cq_buf) {
959 "port %u Tx queue %u cannot allocate memory (CQ)",
960 dev->data->port_id, txq_data->idx);
964 /* Register allocated buffer in user space with DevX. */
965 txq_obj->cq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx,
966 (void *)txq_obj->cq_buf,
967 cqe_n * sizeof(struct mlx5_cqe),
968 IBV_ACCESS_LOCAL_WRITE);
969 if (!txq_obj->cq_umem) {
972 "port %u Tx queue %u cannot register memory (CQ)",
973 dev->data->port_id, txq_data->idx);
976 /* Allocate doorbell record for completion queue. */
977 txq_obj->cq_dbrec_offset = mlx5_get_dbr(priv->sh->ctx,
979 &txq_obj->cq_dbrec_page);
980 if (txq_obj->cq_dbrec_offset < 0) {
982 DRV_LOG(ERR, "Failed to allocate CQ door-bell.");
985 cq_attr.cqe_size = (sizeof(struct mlx5_cqe) == 128) ?
986 MLX5_CQE_SIZE_128B : MLX5_CQE_SIZE_64B;
987 cq_attr.uar_page_id = mlx5_os_get_devx_uar_page_id(priv->sh->tx_uar);
988 cq_attr.eqn = priv->sh->txpp.eqn;
989 cq_attr.q_umem_valid = 1;
990 cq_attr.q_umem_offset = (uintptr_t)txq_obj->cq_buf % page_size;
991 cq_attr.q_umem_id = mlx5_os_get_umem_id(txq_obj->cq_umem);
992 cq_attr.db_umem_valid = 1;
993 cq_attr.db_umem_offset = txq_obj->cq_dbrec_offset;
994 cq_attr.db_umem_id = mlx5_os_get_umem_id(txq_obj->cq_dbrec_page->umem);
995 cq_attr.log_cq_size = rte_log2_u32(cqe_n);
996 cq_attr.log_page_size = rte_log2_u32(page_size);
997 /* Create completion queue object with DevX. */
998 cq_obj = mlx5_devx_cmd_create_cq(priv->sh->ctx, &cq_attr);
1001 DRV_LOG(ERR, "port %u Tx queue %u CQ creation failure",
1002 dev->data->port_id, idx);
1005 txq_data->cqe_n = log2above(cqe_n);
1006 txq_data->cqe_s = 1 << txq_data->cqe_n;
1007 /* Initial fill CQ buffer with invalid CQE opcode. */
1008 cqe = (struct mlx5_cqe *)txq_obj->cq_buf;
1009 for (i = 0; i < txq_data->cqe_s; i++) {
1010 cqe->op_own = (MLX5_CQE_INVALID << 4) | MLX5_CQE_OWNER_MASK;
1016 txq_release_devx_cq_resources(txq_obj);
1022 * Create a SQ object using DevX.
1025 * Pointer to Ethernet device.
1027 * Queue index in DPDK Tx queue array.
1029 * Type of the Tx queue object to create.
1032 * The DevX object initialized, NULL otherwise and rte_errno is set.
1034 static struct mlx5_devx_obj *
1035 mlx5_devx_sq_new(struct rte_eth_dev *dev, uint16_t idx,
1036 struct mlx5_txq_obj *txq_obj)
1038 struct mlx5_priv *priv = dev->data->dev_private;
1039 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
1040 struct mlx5_devx_create_sq_attr sq_attr = { 0 };
1041 struct mlx5_devx_obj *sq_obj = NULL;
1046 MLX5_ASSERT(txq_data);
1047 MLX5_ASSERT(txq_obj);
1048 page_size = rte_mem_page_size();
1049 if (page_size == (size_t)-1) {
1050 DRV_LOG(ERR, "Failed to get mem page size");
1054 wqe_n = RTE_MIN(1UL << txq_data->elts_n,
1055 (uint32_t)priv->sh->device_attr.max_qp_wr);
1056 txq_obj->sq_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
1057 wqe_n * sizeof(struct mlx5_wqe),
1058 page_size, priv->sh->numa_node);
1059 if (!txq_obj->sq_buf) {
1061 "port %u Tx queue %u cannot allocate memory (SQ)",
1062 dev->data->port_id, txq_data->idx);
1066 /* Register allocated buffer in user space with DevX. */
1067 txq_obj->sq_umem = mlx5_glue->devx_umem_reg
1069 (void *)txq_obj->sq_buf,
1070 wqe_n * sizeof(struct mlx5_wqe),
1071 IBV_ACCESS_LOCAL_WRITE);
1072 if (!txq_obj->sq_umem) {
1075 "port %u Tx queue %u cannot register memory (SQ)",
1076 dev->data->port_id, txq_data->idx);
1079 /* Allocate doorbell record for send queue. */
1080 txq_obj->sq_dbrec_offset = mlx5_get_dbr(priv->sh->ctx,
1082 &txq_obj->sq_dbrec_page);
1083 if (txq_obj->sq_dbrec_offset < 0) {
1085 DRV_LOG(ERR, "Failed to allocate SQ door-bell.");
1088 sq_attr.tis_lst_sz = 1;
1089 sq_attr.tis_num = priv->sh->tis->id;
1090 sq_attr.state = MLX5_SQC_STATE_RST;
1091 sq_attr.cqn = txq_obj->cq_devx->id;
1092 sq_attr.flush_in_error_en = 1;
1093 sq_attr.allow_multi_pkt_send_wqe = !!priv->config.mps;
1094 sq_attr.allow_swp = !!priv->config.swp;
1095 sq_attr.min_wqe_inline_mode = priv->config.hca_attr.vport_inline_mode;
1096 sq_attr.wq_attr.uar_page =
1097 mlx5_os_get_devx_uar_page_id(priv->sh->tx_uar);
1098 sq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
1099 sq_attr.wq_attr.pd = priv->sh->pdn;
1100 sq_attr.wq_attr.log_wq_stride = rte_log2_u32(MLX5_WQE_SIZE);
1101 sq_attr.wq_attr.log_wq_sz = log2above(wqe_n);
1102 sq_attr.wq_attr.dbr_umem_valid = 1;
1103 sq_attr.wq_attr.dbr_addr = txq_obj->sq_dbrec_offset;
1104 sq_attr.wq_attr.dbr_umem_id =
1105 mlx5_os_get_umem_id(txq_obj->sq_dbrec_page->umem);
1106 sq_attr.wq_attr.wq_umem_valid = 1;
1107 sq_attr.wq_attr.wq_umem_id = mlx5_os_get_umem_id(txq_obj->sq_umem);
1108 sq_attr.wq_attr.wq_umem_offset = (uintptr_t)txq_obj->sq_buf % page_size;
1109 /* Create Send Queue object with DevX. */
1110 sq_obj = mlx5_devx_cmd_create_sq(priv->sh->ctx, &sq_attr);
1113 DRV_LOG(ERR, "port %u Tx queue %u SQ creation failure",
1114 dev->data->port_id, idx);
1117 txq_data->wqe_n = log2above(wqe_n);
1121 txq_release_devx_sq_resources(txq_obj);
1128 * Create the Tx queue DevX object.
1131 * Pointer to Ethernet device.
1133 * Queue index in DPDK Tx queue array.
1136 * The DevX object initialised, NULL otherwise and rte_errno is set.
1138 static struct mlx5_txq_obj *
1139 mlx5_txq_obj_devx_new(struct rte_eth_dev *dev, uint16_t idx)
1141 #ifndef HAVE_MLX5DV_DEVX_UAR_OFFSET
1142 DRV_LOG(ERR, "port %u Tx queue %u cannot create with DevX, no UAR",
1143 dev->data->port_id, idx);
1147 struct mlx5_priv *priv = dev->data->dev_private;
1148 struct mlx5_dev_ctx_shared *sh = priv->sh;
1149 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
1150 struct mlx5_txq_ctrl *txq_ctrl =
1151 container_of(txq_data, struct mlx5_txq_ctrl, txq);
1152 struct mlx5_devx_modify_sq_attr msq_attr = { 0 };
1153 struct mlx5_txq_obj *txq_obj = NULL;
1158 MLX5_ASSERT(txq_data);
1159 MLX5_ASSERT(!txq_ctrl->obj);
1160 txq_obj = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
1161 sizeof(struct mlx5_txq_obj), 0,
1165 "port %u Tx queue %u cannot allocate memory resources",
1166 dev->data->port_id, txq_data->idx);
1170 txq_obj->type = MLX5_TXQ_OBJ_TYPE_DEVX_SQ;
1171 txq_obj->txq_ctrl = txq_ctrl;
1173 /* Create the Completion Queue. */
1174 cqe_n = (1UL << txq_data->elts_n) / MLX5_TX_COMP_THRESH +
1175 1 + MLX5_TX_COMP_THRESH_INLINE_DIV;
1176 /* Create completion queue object with DevX. */
1177 txq_obj->cq_devx = mlx5_devx_cq_new(dev, cqe_n, idx, txq_obj);
1178 if (!txq_obj->cq_devx) {
1182 txq_data->cqe_m = txq_data->cqe_s - 1;
1183 txq_data->cqes = (volatile struct mlx5_cqe *)txq_obj->cq_buf;
1184 txq_data->cq_ci = 0;
1185 txq_data->cq_pi = 0;
1186 txq_data->cq_db = (volatile uint32_t *)(txq_obj->cq_dbrec_page->dbrs +
1187 txq_obj->cq_dbrec_offset);
1188 *txq_data->cq_db = 0;
1189 /* Create Send Queue object with DevX. */
1190 txq_obj->sq_devx = mlx5_devx_sq_new(dev, idx, txq_obj);
1191 if (!txq_obj->sq_devx) {
1195 /* Create the Work Queue. */
1196 txq_data->wqe_s = 1 << txq_data->wqe_n;
1197 txq_data->wqe_m = txq_data->wqe_s - 1;
1198 txq_data->wqes = (struct mlx5_wqe *)txq_obj->sq_buf;
1199 txq_data->wqes_end = txq_data->wqes + txq_data->wqe_s;
1200 txq_data->wqe_ci = 0;
1201 txq_data->wqe_pi = 0;
1202 txq_data->wqe_comp = 0;
1203 txq_data->wqe_thres = txq_data->wqe_s / MLX5_TX_COMP_THRESH_INLINE_DIV;
1204 txq_data->qp_db = (volatile uint32_t *)
1205 (txq_obj->sq_dbrec_page->dbrs +
1206 txq_obj->sq_dbrec_offset +
1207 MLX5_SND_DBR * sizeof(uint32_t));
1208 *txq_data->qp_db = 0;
1209 txq_data->qp_num_8s = txq_obj->sq_devx->id << 8;
1210 /* Change Send Queue state to Ready-to-Send. */
1211 msq_attr.sq_state = MLX5_SQC_STATE_RST;
1212 msq_attr.state = MLX5_SQC_STATE_RDY;
1213 ret = mlx5_devx_cmd_modify_sq(txq_obj->sq_devx, &msq_attr);
1217 "port %u Tx queue %u SP state to SQC_STATE_RDY failed",
1218 dev->data->port_id, idx);
1221 txq_data->fcqs = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
1222 txq_data->cqe_s * sizeof(*txq_data->fcqs),
1223 RTE_CACHE_LINE_SIZE,
1225 if (!txq_data->fcqs) {
1226 DRV_LOG(ERR, "port %u Tx queue %u cannot allocate memory (FCQ)",
1227 dev->data->port_id, idx);
1231 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
1233 * If using DevX need to query and store TIS transport domain value.
1234 * This is done once per port.
1235 * Will use this value on Rx, when creating matching TIR.
1237 if (priv->config.devx && !priv->sh->tdn)
1238 priv->sh->tdn = priv->sh->td->id;
1240 MLX5_ASSERT(sh->tx_uar);
1241 reg_addr = mlx5_os_get_devx_uar_reg_addr(sh->tx_uar);
1242 MLX5_ASSERT(reg_addr);
1243 txq_ctrl->bf_reg = reg_addr;
1244 txq_ctrl->uar_mmap_offset =
1245 mlx5_os_get_devx_uar_mmap_offset(sh->tx_uar);
1246 txq_uar_init(txq_ctrl);
1247 LIST_INSERT_HEAD(&priv->txqsobj, txq_obj, next);
1250 ret = rte_errno; /* Save rte_errno before cleanup. */
1251 txq_release_devx_resources(txq_obj);
1252 if (txq_data->fcqs) {
1253 mlx5_free(txq_data->fcqs);
1254 txq_data->fcqs = NULL;
1257 rte_errno = ret; /* Restore rte_errno. */
1263 * Create the Tx queue Verbs object.
1266 * Pointer to Ethernet device.
1268 * Queue index in DPDK Tx queue array.
1270 * Type of the Tx queue object to create.
1273 * The Verbs object initialised, NULL otherwise and rte_errno is set.
1275 struct mlx5_txq_obj *
1276 mlx5_txq_obj_new(struct rte_eth_dev *dev, uint16_t idx,
1277 enum mlx5_txq_obj_type type)
1279 struct mlx5_priv *priv = dev->data->dev_private;
1280 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
1281 struct mlx5_txq_ctrl *txq_ctrl =
1282 container_of(txq_data, struct mlx5_txq_ctrl, txq);
1283 struct mlx5_txq_obj tmpl;
1284 struct mlx5_txq_obj *txq_obj = NULL;
1286 struct ibv_qp_init_attr_ex init;
1287 struct ibv_qp_attr mod;
1290 struct mlx5dv_qp qp = { .comp_mask = MLX5DV_QP_MASK_UAR_MMAP_OFFSET };
1291 struct mlx5dv_cq cq_info;
1292 struct mlx5dv_obj obj;
1293 const int desc = 1 << txq_data->elts_n;
1296 if (type == MLX5_TXQ_OBJ_TYPE_DEVX_HAIRPIN)
1297 return mlx5_txq_obj_hairpin_new(dev, idx);
1298 if (type == MLX5_TXQ_OBJ_TYPE_DEVX_SQ)
1299 return mlx5_txq_obj_devx_new(dev, idx);
1300 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
1301 /* If using DevX, need additional mask to read tisn value. */
1302 if (priv->config.devx && !priv->sh->tdn)
1303 qp.comp_mask |= MLX5DV_QP_MASK_RAW_QP_HANDLES;
1305 MLX5_ASSERT(txq_data);
1306 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_TX_QUEUE;
1307 priv->verbs_alloc_ctx.obj = txq_ctrl;
1308 if (mlx5_getenv_int("MLX5_ENABLE_CQE_COMPRESSION")) {
1310 "port %u MLX5_ENABLE_CQE_COMPRESSION must never be set",
1311 dev->data->port_id);
1315 memset(&tmpl, 0, sizeof(struct mlx5_txq_obj));
1316 cqe_n = desc / MLX5_TX_COMP_THRESH +
1317 1 + MLX5_TX_COMP_THRESH_INLINE_DIV;
1318 tmpl.cq = mlx5_glue->create_cq(priv->sh->ctx, cqe_n, NULL, NULL, 0);
1319 if (tmpl.cq == NULL) {
1320 DRV_LOG(ERR, "port %u Tx queue %u CQ creation failure",
1321 dev->data->port_id, idx);
1325 attr.init = (struct ibv_qp_init_attr_ex){
1326 /* CQ to be associated with the send queue. */
1328 /* CQ to be associated with the receive queue. */
1331 /* Max number of outstanding WRs. */
1333 ((priv->sh->device_attr.max_qp_wr <
1335 priv->sh->device_attr.max_qp_wr :
1338 * Max number of scatter/gather elements in a WR,
1339 * must be 1 to prevent libmlx5 from trying to affect
1340 * too much memory. TX gather is not impacted by the
1341 * device_attr.max_sge limit and will still work
1346 .qp_type = IBV_QPT_RAW_PACKET,
1348 * Do *NOT* enable this, completions events are managed per
1353 .comp_mask = IBV_QP_INIT_ATTR_PD,
1355 if (txq_data->inlen_send)
1356 attr.init.cap.max_inline_data = txq_ctrl->max_inline_data;
1357 if (txq_data->tso_en) {
1358 attr.init.max_tso_header = txq_ctrl->max_tso_header;
1359 attr.init.comp_mask |= IBV_QP_INIT_ATTR_MAX_TSO_HEADER;
1361 tmpl.qp = mlx5_glue->create_qp_ex(priv->sh->ctx, &attr.init);
1362 if (tmpl.qp == NULL) {
1363 DRV_LOG(ERR, "port %u Tx queue %u QP creation failure",
1364 dev->data->port_id, idx);
1368 attr.mod = (struct ibv_qp_attr){
1369 /* Move the QP to this state. */
1370 .qp_state = IBV_QPS_INIT,
1371 /* IB device port number. */
1372 .port_num = (uint8_t)priv->dev_port,
1374 ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod,
1375 (IBV_QP_STATE | IBV_QP_PORT));
1378 "port %u Tx queue %u QP state to IBV_QPS_INIT failed",
1379 dev->data->port_id, idx);
1383 attr.mod = (struct ibv_qp_attr){
1384 .qp_state = IBV_QPS_RTR
1386 ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
1389 "port %u Tx queue %u QP state to IBV_QPS_RTR failed",
1390 dev->data->port_id, idx);
1394 attr.mod.qp_state = IBV_QPS_RTS;
1395 ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
1398 "port %u Tx queue %u QP state to IBV_QPS_RTS failed",
1399 dev->data->port_id, idx);
1403 txq_obj = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
1404 sizeof(struct mlx5_txq_obj), 0,
1407 DRV_LOG(ERR, "port %u Tx queue %u cannot allocate memory",
1408 dev->data->port_id, idx);
1412 obj.cq.in = tmpl.cq;
1413 obj.cq.out = &cq_info;
1414 obj.qp.in = tmpl.qp;
1416 ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_QP);
1421 if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
1423 "port %u wrong MLX5_CQE_SIZE environment variable"
1424 " value: it should be set to %u",
1425 dev->data->port_id, RTE_CACHE_LINE_SIZE);
1429 txq_data->cqe_n = log2above(cq_info.cqe_cnt);
1430 txq_data->cqe_s = 1 << txq_data->cqe_n;
1431 txq_data->cqe_m = txq_data->cqe_s - 1;
1432 txq_data->qp_num_8s = ((struct ibv_qp *)tmpl.qp)->qp_num << 8;
1433 txq_data->wqes = qp.sq.buf;
1434 txq_data->wqe_n = log2above(qp.sq.wqe_cnt);
1435 txq_data->wqe_s = 1 << txq_data->wqe_n;
1436 txq_data->wqe_m = txq_data->wqe_s - 1;
1437 txq_data->wqes_end = txq_data->wqes + txq_data->wqe_s;
1438 txq_data->qp_db = &qp.dbrec[MLX5_SND_DBR];
1439 txq_data->cq_db = cq_info.dbrec;
1440 txq_data->cqes = (volatile struct mlx5_cqe *)cq_info.buf;
1441 txq_data->cq_ci = 0;
1442 txq_data->cq_pi = 0;
1443 txq_data->wqe_ci = 0;
1444 txq_data->wqe_pi = 0;
1445 txq_data->wqe_comp = 0;
1446 txq_data->wqe_thres = txq_data->wqe_s / MLX5_TX_COMP_THRESH_INLINE_DIV;
1447 txq_data->fcqs = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
1448 txq_data->cqe_s * sizeof(*txq_data->fcqs),
1449 RTE_CACHE_LINE_SIZE, txq_ctrl->socket);
1450 if (!txq_data->fcqs) {
1451 DRV_LOG(ERR, "port %u Tx queue %u cannot allocate memory (FCQ)",
1452 dev->data->port_id, idx);
1456 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
1458 * If using DevX need to query and store TIS transport domain value.
1459 * This is done once per port.
1460 * Will use this value on Rx, when creating matching TIR.
1462 if (priv->config.devx && !priv->sh->tdn) {
1463 ret = mlx5_devx_cmd_qp_query_tis_td(tmpl.qp, qp.tisn,
1466 DRV_LOG(ERR, "Fail to query port %u Tx queue %u QP TIS "
1467 "transport domain", dev->data->port_id, idx);
1471 DRV_LOG(DEBUG, "port %u Tx queue %u TIS number %d "
1472 "transport domain %d", dev->data->port_id,
1473 idx, qp.tisn, priv->sh->tdn);
1477 txq_obj->qp = tmpl.qp;
1478 txq_obj->cq = tmpl.cq;
1479 txq_ctrl->bf_reg = qp.bf.reg;
1480 if (qp.comp_mask & MLX5DV_QP_MASK_UAR_MMAP_OFFSET) {
1481 txq_ctrl->uar_mmap_offset = qp.uar_mmap_offset;
1482 DRV_LOG(DEBUG, "port %u: uar_mmap_offset 0x%"PRIx64,
1483 dev->data->port_id, txq_ctrl->uar_mmap_offset);
1486 "port %u failed to retrieve UAR info, invalid"
1488 dev->data->port_id);
1492 txq_uar_init(txq_ctrl);
1493 LIST_INSERT_HEAD(&priv->txqsobj, txq_obj, next);
1494 txq_obj->txq_ctrl = txq_ctrl;
1495 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
1498 ret = rte_errno; /* Save rte_errno before cleanup. */
1500 claim_zero(mlx5_glue->destroy_cq(tmpl.cq));
1502 claim_zero(mlx5_glue->destroy_qp(tmpl.qp));
1503 if (txq_data && txq_data->fcqs) {
1504 mlx5_free(txq_data->fcqs);
1505 txq_data->fcqs = NULL;
1509 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
1510 rte_errno = ret; /* Restore rte_errno. */
1515 * Release an Tx verbs queue object.
1518 * Verbs Tx queue object..
1521 mlx5_txq_obj_release(struct mlx5_txq_obj *txq_obj)
1523 MLX5_ASSERT(txq_obj);
1524 if (txq_obj->type == MLX5_TXQ_OBJ_TYPE_DEVX_HAIRPIN) {
1526 claim_zero(mlx5_devx_cmd_destroy(txq_obj->tis));
1527 } else if (txq_obj->type == MLX5_TXQ_OBJ_TYPE_DEVX_SQ) {
1528 txq_release_devx_resources(txq_obj);
1530 claim_zero(mlx5_glue->destroy_qp(txq_obj->qp));
1531 claim_zero(mlx5_glue->destroy_cq(txq_obj->cq));
1533 if (txq_obj->txq_ctrl->txq.fcqs) {
1534 mlx5_free(txq_obj->txq_ctrl->txq.fcqs);
1535 txq_obj->txq_ctrl->txq.fcqs = NULL;
1537 LIST_REMOVE(txq_obj, next);
1542 * Verify the Verbs Tx queue list is empty
1545 * Pointer to Ethernet device.
1548 * The number of object not released.
1551 mlx5_txq_obj_verify(struct rte_eth_dev *dev)
1553 struct mlx5_priv *priv = dev->data->dev_private;
1555 struct mlx5_txq_obj *txq_obj;
1557 LIST_FOREACH(txq_obj, &priv->txqsobj, next) {
1558 DRV_LOG(DEBUG, "port %u Verbs Tx queue %u still referenced",
1559 dev->data->port_id, txq_obj->txq_ctrl->txq.idx);
1566 * Calculate the total number of WQEBB for Tx queue.
1568 * Simplified version of calc_sq_size() in rdma-core.
1571 * Pointer to Tx queue control structure.
1574 * The number of WQEBB.
1577 txq_calc_wqebb_cnt(struct mlx5_txq_ctrl *txq_ctrl)
1579 unsigned int wqe_size;
1580 const unsigned int desc = 1 << txq_ctrl->txq.elts_n;
1582 wqe_size = MLX5_WQE_CSEG_SIZE +
1583 MLX5_WQE_ESEG_SIZE +
1585 MLX5_ESEG_MIN_INLINE_SIZE +
1586 txq_ctrl->max_inline_data;
1587 return rte_align32pow2(wqe_size * desc) / MLX5_WQE_SIZE;
1591 * Calculate the maximal inline data size for Tx queue.
1594 * Pointer to Tx queue control structure.
1597 * The maximal inline data size.
1600 txq_calc_inline_max(struct mlx5_txq_ctrl *txq_ctrl)
1602 const unsigned int desc = 1 << txq_ctrl->txq.elts_n;
1603 struct mlx5_priv *priv = txq_ctrl->priv;
1604 unsigned int wqe_size;
1606 wqe_size = priv->sh->device_attr.max_qp_wr / desc;
1610 * This calculation is derived from tthe source of
1611 * mlx5_calc_send_wqe() in rdma_core library.
1613 wqe_size = wqe_size * MLX5_WQE_SIZE -
1614 MLX5_WQE_CSEG_SIZE -
1615 MLX5_WQE_ESEG_SIZE -
1618 MLX5_DSEG_MIN_INLINE_SIZE;
1623 * Set Tx queue parameters from device configuration.
1626 * Pointer to Tx queue control structure.
1629 txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
1631 struct mlx5_priv *priv = txq_ctrl->priv;
1632 struct mlx5_dev_config *config = &priv->config;
1633 unsigned int inlen_send; /* Inline data for ordinary SEND.*/
1634 unsigned int inlen_empw; /* Inline data for enhanced MPW. */
1635 unsigned int inlen_mode; /* Minimal required Inline data. */
1636 unsigned int txqs_inline; /* Min Tx queues to enable inline. */
1637 uint64_t dev_txoff = priv->dev_data->dev_conf.txmode.offloads;
1638 bool tso = txq_ctrl->txq.offloads & (DEV_TX_OFFLOAD_TCP_TSO |
1639 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1640 DEV_TX_OFFLOAD_GRE_TNL_TSO |
1641 DEV_TX_OFFLOAD_IP_TNL_TSO |
1642 DEV_TX_OFFLOAD_UDP_TNL_TSO);
1646 if (config->txqs_inline == MLX5_ARG_UNSET)
1648 #if defined(RTE_ARCH_ARM64)
1649 (priv->pci_dev->id.device_id ==
1650 PCI_DEVICE_ID_MELLANOX_CONNECTX5BF) ?
1651 MLX5_INLINE_MAX_TXQS_BLUEFIELD :
1653 MLX5_INLINE_MAX_TXQS;
1655 txqs_inline = (unsigned int)config->txqs_inline;
1656 inlen_send = (config->txq_inline_max == MLX5_ARG_UNSET) ?
1657 MLX5_SEND_DEF_INLINE_LEN :
1658 (unsigned int)config->txq_inline_max;
1659 inlen_empw = (config->txq_inline_mpw == MLX5_ARG_UNSET) ?
1660 MLX5_EMPW_DEF_INLINE_LEN :
1661 (unsigned int)config->txq_inline_mpw;
1662 inlen_mode = (config->txq_inline_min == MLX5_ARG_UNSET) ?
1663 0 : (unsigned int)config->txq_inline_min;
1664 if (config->mps != MLX5_MPW_ENHANCED && config->mps != MLX5_MPW)
1667 * If there is requested minimal amount of data to inline
1668 * we MUST enable inlining. This is a case for ConnectX-4
1669 * which usually requires L2 inlined for correct operating
1670 * and ConnectX-4 Lx which requires L2-L4 inlined to
1671 * support E-Switch Flows.
1674 if (inlen_mode <= MLX5_ESEG_MIN_INLINE_SIZE) {
1676 * Optimize minimal inlining for single
1677 * segment packets to fill one WQEBB
1680 temp = MLX5_ESEG_MIN_INLINE_SIZE;
1682 temp = inlen_mode - MLX5_ESEG_MIN_INLINE_SIZE;
1683 temp = RTE_ALIGN(temp, MLX5_WSEG_SIZE) +
1684 MLX5_ESEG_MIN_INLINE_SIZE;
1685 temp = RTE_MIN(temp, MLX5_SEND_MAX_INLINE_LEN);
1687 if (temp != inlen_mode) {
1689 "port %u minimal required inline setting"
1690 " aligned from %u to %u",
1691 PORT_ID(priv), inlen_mode, temp);
1696 * If port is configured to support VLAN insertion and device
1697 * does not support this feature by HW (for NICs before ConnectX-5
1698 * or in case of wqe_vlan_insert flag is not set) we must enable
1699 * data inline on all queues because it is supported by single
1702 txq_ctrl->txq.vlan_en = config->hw_vlan_insert;
1703 vlan_inline = (dev_txoff & DEV_TX_OFFLOAD_VLAN_INSERT) &&
1704 !config->hw_vlan_insert;
1706 * If there are few Tx queues it is prioritized
1707 * to save CPU cycles and disable data inlining at all.
1709 if (inlen_send && priv->txqs_n >= txqs_inline) {
1711 * The data sent with ordinal MLX5_OPCODE_SEND
1712 * may be inlined in Ethernet Segment, align the
1713 * length accordingly to fit entire WQEBBs.
1715 temp = RTE_MAX(inlen_send,
1716 MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE);
1717 temp -= MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE;
1718 temp = RTE_ALIGN(temp, MLX5_WQE_SIZE);
1719 temp += MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE;
1720 temp = RTE_MIN(temp, MLX5_WQE_SIZE_MAX +
1721 MLX5_ESEG_MIN_INLINE_SIZE -
1722 MLX5_WQE_CSEG_SIZE -
1723 MLX5_WQE_ESEG_SIZE -
1724 MLX5_WQE_DSEG_SIZE * 2);
1725 temp = RTE_MIN(temp, MLX5_SEND_MAX_INLINE_LEN);
1726 temp = RTE_MAX(temp, inlen_mode);
1727 if (temp != inlen_send) {
1729 "port %u ordinary send inline setting"
1730 " aligned from %u to %u",
1731 PORT_ID(priv), inlen_send, temp);
1735 * Not aligned to cache lines, but to WQEs.
1736 * First bytes of data (initial alignment)
1737 * is going to be copied explicitly at the
1738 * beginning of inlining buffer in Ethernet
1741 MLX5_ASSERT(inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
1742 MLX5_ASSERT(inlen_send <= MLX5_WQE_SIZE_MAX +
1743 MLX5_ESEG_MIN_INLINE_SIZE -
1744 MLX5_WQE_CSEG_SIZE -
1745 MLX5_WQE_ESEG_SIZE -
1746 MLX5_WQE_DSEG_SIZE * 2);
1747 } else if (inlen_mode) {
1749 * If minimal inlining is requested we must
1750 * enable inlining in general, despite the
1751 * number of configured queues. Ignore the
1752 * txq_inline_max devarg, this is not
1753 * full-featured inline.
1755 inlen_send = inlen_mode;
1757 } else if (vlan_inline) {
1759 * Hardware does not report offload for
1760 * VLAN insertion, we must enable data inline
1761 * to implement feature by software.
1763 inlen_send = MLX5_ESEG_MIN_INLINE_SIZE;
1769 txq_ctrl->txq.inlen_send = inlen_send;
1770 txq_ctrl->txq.inlen_mode = inlen_mode;
1771 txq_ctrl->txq.inlen_empw = 0;
1772 if (inlen_send && inlen_empw && priv->txqs_n >= txqs_inline) {
1774 * The data sent with MLX5_OPCODE_ENHANCED_MPSW
1775 * may be inlined in Data Segment, align the
1776 * length accordingly to fit entire WQEBBs.
1778 temp = RTE_MAX(inlen_empw,
1779 MLX5_WQE_SIZE + MLX5_DSEG_MIN_INLINE_SIZE);
1780 temp -= MLX5_DSEG_MIN_INLINE_SIZE;
1781 temp = RTE_ALIGN(temp, MLX5_WQE_SIZE);
1782 temp += MLX5_DSEG_MIN_INLINE_SIZE;
1783 temp = RTE_MIN(temp, MLX5_WQE_SIZE_MAX +
1784 MLX5_DSEG_MIN_INLINE_SIZE -
1785 MLX5_WQE_CSEG_SIZE -
1786 MLX5_WQE_ESEG_SIZE -
1787 MLX5_WQE_DSEG_SIZE);
1788 temp = RTE_MIN(temp, MLX5_EMPW_MAX_INLINE_LEN);
1789 if (temp != inlen_empw) {
1791 "port %u enhanced empw inline setting"
1792 " aligned from %u to %u",
1793 PORT_ID(priv), inlen_empw, temp);
1796 MLX5_ASSERT(inlen_empw >= MLX5_ESEG_MIN_INLINE_SIZE);
1797 MLX5_ASSERT(inlen_empw <= MLX5_WQE_SIZE_MAX +
1798 MLX5_DSEG_MIN_INLINE_SIZE -
1799 MLX5_WQE_CSEG_SIZE -
1800 MLX5_WQE_ESEG_SIZE -
1801 MLX5_WQE_DSEG_SIZE);
1802 txq_ctrl->txq.inlen_empw = inlen_empw;
1804 txq_ctrl->max_inline_data = RTE_MAX(inlen_send, inlen_empw);
1806 txq_ctrl->max_tso_header = MLX5_MAX_TSO_HEADER;
1807 txq_ctrl->max_inline_data = RTE_MAX(txq_ctrl->max_inline_data,
1808 MLX5_MAX_TSO_HEADER);
1809 txq_ctrl->txq.tso_en = 1;
1811 txq_ctrl->txq.tunnel_en = config->tunnel_en | config->swp;
1812 txq_ctrl->txq.swp_en = ((DEV_TX_OFFLOAD_IP_TNL_TSO |
1813 DEV_TX_OFFLOAD_UDP_TNL_TSO |
1814 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) &
1815 txq_ctrl->txq.offloads) && config->swp;
1819 * Adjust Tx queue data inline parameters for large queue sizes.
1820 * The data inline feature requires multiple WQEs to fit the packets,
1821 * and if the large amount of Tx descriptors is requested by application
1822 * the total WQE amount may exceed the hardware capabilities. If the
1823 * default inline setting are used we can try to adjust these ones and
1824 * meet the hardware requirements and not exceed the queue size.
1827 * Pointer to Tx queue control structure.
1830 * Zero on success, otherwise the parameters can not be adjusted.
1833 txq_adjust_params(struct mlx5_txq_ctrl *txq_ctrl)
1835 struct mlx5_priv *priv = txq_ctrl->priv;
1836 struct mlx5_dev_config *config = &priv->config;
1837 unsigned int max_inline;
1839 max_inline = txq_calc_inline_max(txq_ctrl);
1840 if (!txq_ctrl->txq.inlen_send) {
1842 * Inline data feature is not engaged at all.
1843 * There is nothing to adjust.
1847 if (txq_ctrl->max_inline_data <= max_inline) {
1849 * The requested inline data length does not
1850 * exceed queue capabilities.
1854 if (txq_ctrl->txq.inlen_mode > max_inline) {
1856 "minimal data inline requirements (%u) are not"
1857 " satisfied (%u) on port %u, try the smaller"
1858 " Tx queue size (%d)",
1859 txq_ctrl->txq.inlen_mode, max_inline,
1860 priv->dev_data->port_id,
1861 priv->sh->device_attr.max_qp_wr);
1864 if (txq_ctrl->txq.inlen_send > max_inline &&
1865 config->txq_inline_max != MLX5_ARG_UNSET &&
1866 config->txq_inline_max > (int)max_inline) {
1868 "txq_inline_max requirements (%u) are not"
1869 " satisfied (%u) on port %u, try the smaller"
1870 " Tx queue size (%d)",
1871 txq_ctrl->txq.inlen_send, max_inline,
1872 priv->dev_data->port_id,
1873 priv->sh->device_attr.max_qp_wr);
1876 if (txq_ctrl->txq.inlen_empw > max_inline &&
1877 config->txq_inline_mpw != MLX5_ARG_UNSET &&
1878 config->txq_inline_mpw > (int)max_inline) {
1880 "txq_inline_mpw requirements (%u) are not"
1881 " satisfied (%u) on port %u, try the smaller"
1882 " Tx queue size (%d)",
1883 txq_ctrl->txq.inlen_empw, max_inline,
1884 priv->dev_data->port_id,
1885 priv->sh->device_attr.max_qp_wr);
1888 if (txq_ctrl->txq.tso_en && max_inline < MLX5_MAX_TSO_HEADER) {
1890 "tso header inline requirements (%u) are not"
1891 " satisfied (%u) on port %u, try the smaller"
1892 " Tx queue size (%d)",
1893 MLX5_MAX_TSO_HEADER, max_inline,
1894 priv->dev_data->port_id,
1895 priv->sh->device_attr.max_qp_wr);
1898 if (txq_ctrl->txq.inlen_send > max_inline) {
1900 "adjust txq_inline_max (%u->%u)"
1901 " due to large Tx queue on port %u",
1902 txq_ctrl->txq.inlen_send, max_inline,
1903 priv->dev_data->port_id);
1904 txq_ctrl->txq.inlen_send = max_inline;
1906 if (txq_ctrl->txq.inlen_empw > max_inline) {
1908 "adjust txq_inline_mpw (%u->%u)"
1909 "due to large Tx queue on port %u",
1910 txq_ctrl->txq.inlen_empw, max_inline,
1911 priv->dev_data->port_id);
1912 txq_ctrl->txq.inlen_empw = max_inline;
1914 txq_ctrl->max_inline_data = RTE_MAX(txq_ctrl->txq.inlen_send,
1915 txq_ctrl->txq.inlen_empw);
1916 MLX5_ASSERT(txq_ctrl->max_inline_data <= max_inline);
1917 MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= max_inline);
1918 MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= txq_ctrl->txq.inlen_send);
1919 MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= txq_ctrl->txq.inlen_empw ||
1920 !txq_ctrl->txq.inlen_empw);
1928 * Create a DPDK Tx queue.
1931 * Pointer to Ethernet device.
1935 * Number of descriptors to configure in queue.
1937 * NUMA socket on which memory must be allocated.
1939 * Thresholds parameters.
1942 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
1944 struct mlx5_txq_ctrl *
1945 mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1946 unsigned int socket, const struct rte_eth_txconf *conf)
1948 struct mlx5_priv *priv = dev->data->dev_private;
1949 struct mlx5_txq_ctrl *tmpl;
1951 tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl) +
1952 desc * sizeof(struct rte_mbuf *), 0, socket);
1957 if (mlx5_mr_btree_init(&tmpl->txq.mr_ctrl.cache_bh,
1958 MLX5_MR_BTREE_CACHE_N, socket)) {
1959 /* rte_errno is already set. */
1962 /* Save pointer of global generation number to check memory event. */
1963 tmpl->txq.mr_ctrl.dev_gen_ptr = &priv->sh->share_cache.dev_gen;
1964 MLX5_ASSERT(desc > MLX5_TX_COMP_THRESH);
1965 tmpl->txq.offloads = conf->offloads |
1966 dev->data->dev_conf.txmode.offloads;
1968 tmpl->socket = socket;
1969 tmpl->txq.elts_n = log2above(desc);
1970 tmpl->txq.elts_s = desc;
1971 tmpl->txq.elts_m = desc - 1;
1972 tmpl->txq.port_id = dev->data->port_id;
1973 tmpl->txq.idx = idx;
1974 txq_set_params(tmpl);
1975 if (txq_adjust_params(tmpl))
1977 if (txq_calc_wqebb_cnt(tmpl) >
1978 priv->sh->device_attr.max_qp_wr) {
1980 "port %u Tx WQEBB count (%d) exceeds the limit (%d),"
1981 " try smaller queue size",
1982 dev->data->port_id, txq_calc_wqebb_cnt(tmpl),
1983 priv->sh->device_attr.max_qp_wr);
1987 rte_atomic32_inc(&tmpl->refcnt);
1988 tmpl->type = MLX5_TXQ_TYPE_STANDARD;
1989 LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
1997 * Create a DPDK Tx hairpin queue.
2000 * Pointer to Ethernet device.
2004 * Number of descriptors to configure in queue.
2005 * @param hairpin_conf
2006 * The hairpin configuration.
2009 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
2011 struct mlx5_txq_ctrl *
2012 mlx5_txq_hairpin_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
2013 const struct rte_eth_hairpin_conf *hairpin_conf)
2015 struct mlx5_priv *priv = dev->data->dev_private;
2016 struct mlx5_txq_ctrl *tmpl;
2018 tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
2025 tmpl->socket = SOCKET_ID_ANY;
2026 tmpl->txq.elts_n = log2above(desc);
2027 tmpl->txq.port_id = dev->data->port_id;
2028 tmpl->txq.idx = idx;
2029 tmpl->hairpin_conf = *hairpin_conf;
2030 tmpl->type = MLX5_TXQ_TYPE_HAIRPIN;
2031 rte_atomic32_inc(&tmpl->refcnt);
2032 LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
2040 * Pointer to Ethernet device.
2045 * A pointer to the queue if it exists.
2047 struct mlx5_txq_ctrl *
2048 mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx)
2050 struct mlx5_priv *priv = dev->data->dev_private;
2051 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
2052 struct mlx5_txq_ctrl *ctrl = NULL;
2055 ctrl = container_of(txq_data, struct mlx5_txq_ctrl, txq);
2056 rte_atomic32_inc(&ctrl->refcnt);
2062 * Release a Tx queue.
2065 * Pointer to Ethernet device.
2070 * 1 while a reference on it exists, 0 when freed.
2073 mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx)
2075 struct mlx5_priv *priv = dev->data->dev_private;
2076 struct mlx5_txq_ctrl *txq;
2078 if (!(*priv->txqs)[idx])
2080 txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
2081 if (!rte_atomic32_dec_and_test(&txq->refcnt))
2084 mlx5_txq_obj_release(txq->obj);
2088 mlx5_mr_btree_free(&txq->txq.mr_ctrl.cache_bh);
2089 LIST_REMOVE(txq, next);
2091 (*priv->txqs)[idx] = NULL;
2092 dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
2097 * Verify if the queue can be released.
2100 * Pointer to Ethernet device.
2105 * 1 if the queue can be released.
2108 mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx)
2110 struct mlx5_priv *priv = dev->data->dev_private;
2111 struct mlx5_txq_ctrl *txq;
2113 if (!(*priv->txqs)[idx])
2115 txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
2116 return (rte_atomic32_read(&txq->refcnt) == 1);
2120 * Verify the Tx Queue list is empty
2123 * Pointer to Ethernet device.
2126 * The number of object not released.
2129 mlx5_txq_verify(struct rte_eth_dev *dev)
2131 struct mlx5_priv *priv = dev->data->dev_private;
2132 struct mlx5_txq_ctrl *txq_ctrl;
2135 LIST_FOREACH(txq_ctrl, &priv->txqsctrl, next) {
2136 DRV_LOG(DEBUG, "port %u Tx queue %u still referenced",
2137 dev->data->port_id, txq_ctrl->txq.idx);
2144 * Set the Tx queue dynamic timestamp (mask and offset)
2147 * Pointer to the Ethernet device structure.
2150 mlx5_txq_dynf_timestamp_set(struct rte_eth_dev *dev)
2152 struct mlx5_priv *priv = dev->data->dev_private;
2153 struct mlx5_dev_ctx_shared *sh = priv->sh;
2154 struct mlx5_txq_data *data;
2159 nbit = rte_mbuf_dynflag_lookup
2160 (RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME, NULL);
2161 off = rte_mbuf_dynfield_lookup
2162 (RTE_MBUF_DYNFIELD_TIMESTAMP_NAME, NULL);
2163 if (nbit > 0 && off >= 0 && sh->txpp.refcnt)
2164 mask = 1ULL << nbit;
2165 for (i = 0; i != priv->txqs_n; ++i) {
2166 data = (*priv->txqs)[i];
2170 data->ts_mask = mask;
2171 data->ts_offset = off;