1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
14 #include <rte_malloc.h>
15 #include <ethdev_driver.h>
16 #include <rte_bus_pci.h>
17 #include <rte_common.h>
18 #include <rte_eal_paging.h>
20 #include <mlx5_common.h>
21 #include <mlx5_common_mr.h>
22 #include <mlx5_malloc.h>
24 #include "mlx5_defs.h"
25 #include "mlx5_utils.h"
28 #include "mlx5_rxtx.h"
29 #include "mlx5_autoconf.h"
32 * Allocate TX queue elements.
35 * Pointer to TX queue structure.
38 txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl)
40 const unsigned int elts_n = 1 << txq_ctrl->txq.elts_n;
43 for (i = 0; (i != elts_n); ++i)
44 txq_ctrl->txq.elts[i] = NULL;
45 DRV_LOG(DEBUG, "port %u Tx queue %u allocated and configured %u WRs",
46 PORT_ID(txq_ctrl->priv), txq_ctrl->txq.idx, elts_n);
47 txq_ctrl->txq.elts_head = 0;
48 txq_ctrl->txq.elts_tail = 0;
49 txq_ctrl->txq.elts_comp = 0;
53 * Free TX queue elements.
56 * Pointer to TX queue structure.
59 txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl)
61 const uint16_t elts_n = 1 << txq_ctrl->txq.elts_n;
62 const uint16_t elts_m = elts_n - 1;
63 uint16_t elts_head = txq_ctrl->txq.elts_head;
64 uint16_t elts_tail = txq_ctrl->txq.elts_tail;
65 struct rte_mbuf *(*elts)[elts_n] = &txq_ctrl->txq.elts;
67 DRV_LOG(DEBUG, "port %u Tx queue %u freeing WRs",
68 PORT_ID(txq_ctrl->priv), txq_ctrl->txq.idx);
69 txq_ctrl->txq.elts_head = 0;
70 txq_ctrl->txq.elts_tail = 0;
71 txq_ctrl->txq.elts_comp = 0;
73 while (elts_tail != elts_head) {
74 struct rte_mbuf *elt = (*elts)[elts_tail & elts_m];
76 MLX5_ASSERT(elt != NULL);
77 rte_pktmbuf_free_seg(elt);
78 #ifdef RTE_LIBRTE_MLX5_DEBUG
80 memset(&(*elts)[elts_tail & elts_m],
82 sizeof((*elts)[elts_tail & elts_m]));
89 * Returns the per-port supported offloads.
92 * Pointer to Ethernet device.
95 * Supported Tx offloads.
98 mlx5_get_tx_port_offloads(struct rte_eth_dev *dev)
100 struct mlx5_priv *priv = dev->data->dev_private;
101 uint64_t offloads = (RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
102 RTE_ETH_TX_OFFLOAD_VLAN_INSERT);
103 struct mlx5_dev_config *config = &priv->config;
104 struct mlx5_dev_cap *dev_cap = &priv->sh->dev_cap;
106 if (dev_cap->hw_csum)
107 offloads |= (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
108 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
109 RTE_ETH_TX_OFFLOAD_TCP_CKSUM);
111 offloads |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
113 offloads |= RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP;
115 if (dev_cap->swp & MLX5_SW_PARSING_CSUM_CAP)
116 offloads |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM;
117 if (dev_cap->swp & MLX5_SW_PARSING_TSO_CAP)
118 offloads |= (RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
119 RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO);
121 if (dev_cap->tunnel_en) {
122 if (dev_cap->hw_csum)
123 offloads |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM;
125 if (dev_cap->tunnel_en &
126 MLX5_TUNNELED_OFFLOADS_VXLAN_CAP)
127 offloads |= RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO;
128 if (dev_cap->tunnel_en &
129 MLX5_TUNNELED_OFFLOADS_GRE_CAP)
130 offloads |= RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO;
131 if (dev_cap->tunnel_en &
132 MLX5_TUNNELED_OFFLOADS_GENEVE_CAP)
133 offloads |= RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO;
136 if (!config->mprq.enabled)
137 offloads |= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
141 /* Fetches and drops all SW-owned and error CQEs to synchronize CQ. */
143 txq_sync_cq(struct mlx5_txq_data *txq)
145 volatile struct mlx5_cqe *cqe;
150 cqe = &txq->cqes[txq->cq_ci & txq->cqe_m];
151 ret = check_cqe(cqe, txq->cqe_s, txq->cq_ci);
152 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
153 if (likely(ret != MLX5_CQE_STATUS_ERR)) {
154 /* No new CQEs in completion queue. */
155 MLX5_ASSERT(ret == MLX5_CQE_STATUS_HW_OWN);
161 /* Move all CQEs to HW ownership. */
162 for (i = 0; i < txq->cqe_s; i++) {
164 cqe->op_own = MLX5_CQE_INVALIDATE;
166 /* Resync CQE and WQE (WQ in reset state). */
168 *txq->cq_db = rte_cpu_to_be_32(txq->cq_ci);
169 txq->cq_pi = txq->cq_ci;
174 * Tx queue stop. Device queue goes to the idle state,
175 * all involved mbufs are freed from elts/WQ.
178 * Pointer to Ethernet device structure.
183 * 0 on success, a negative errno value otherwise and rte_errno is set.
186 mlx5_tx_queue_stop_primary(struct rte_eth_dev *dev, uint16_t idx)
188 struct mlx5_priv *priv = dev->data->dev_private;
189 struct mlx5_txq_data *txq = (*priv->txqs)[idx];
190 struct mlx5_txq_ctrl *txq_ctrl =
191 container_of(txq, struct mlx5_txq_ctrl, txq);
194 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
195 /* Move QP to RESET state. */
196 ret = priv->obj_ops.txq_obj_modify(txq_ctrl->obj, MLX5_TXQ_MOD_RDY2RST,
197 (uint8_t)priv->dev_port);
200 /* Handle all send completions. */
202 /* Free elts stored in the SQ. */
203 txq_free_elts(txq_ctrl);
204 /* Prevent writing new pkts to SQ by setting no free WQE.*/
205 txq->wqe_ci = txq->wqe_s;
208 /* Set the actual queue state. */
209 dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
214 * Tx queue stop. Device queue goes to the idle state,
215 * all involved mbufs are freed from elts/WQ.
218 * Pointer to Ethernet device structure.
223 * 0 on success, a negative errno value otherwise and rte_errno is set.
226 mlx5_tx_queue_stop(struct rte_eth_dev *dev, uint16_t idx)
230 if (rte_eth_dev_is_tx_hairpin_queue(dev, idx)) {
231 DRV_LOG(ERR, "Hairpin queue can't be stopped");
235 if (dev->data->tx_queue_state[idx] == RTE_ETH_QUEUE_STATE_STOPPED)
237 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
238 ret = mlx5_mp_os_req_queue_control(dev, idx,
239 MLX5_MP_REQ_QUEUE_TX_STOP);
241 ret = mlx5_tx_queue_stop_primary(dev, idx);
247 * Rx queue start. Device queue goes to the ready state,
248 * all required mbufs are allocated and WQ is replenished.
251 * Pointer to Ethernet device structure.
256 * 0 on success, a negative errno value otherwise and rte_errno is set.
259 mlx5_tx_queue_start_primary(struct rte_eth_dev *dev, uint16_t idx)
261 struct mlx5_priv *priv = dev->data->dev_private;
262 struct mlx5_txq_data *txq = (*priv->txqs)[idx];
263 struct mlx5_txq_ctrl *txq_ctrl =
264 container_of(txq, struct mlx5_txq_ctrl, txq);
267 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
268 ret = priv->obj_ops.txq_obj_modify(txq_ctrl->obj,
269 MLX5_TXQ_MOD_RST2RDY,
270 (uint8_t)priv->dev_port);
273 txq_ctrl->txq.wqe_ci = 0;
274 txq_ctrl->txq.wqe_pi = 0;
275 txq_ctrl->txq.elts_comp = 0;
276 /* Set the actual queue state. */
277 dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
282 * Rx queue start. Device queue goes to the ready state,
283 * all required mbufs are allocated and WQ is replenished.
286 * Pointer to Ethernet device structure.
291 * 0 on success, a negative errno value otherwise and rte_errno is set.
294 mlx5_tx_queue_start(struct rte_eth_dev *dev, uint16_t idx)
298 if (rte_eth_dev_is_tx_hairpin_queue(dev, idx)) {
299 DRV_LOG(ERR, "Hairpin queue can't be started");
303 if (dev->data->tx_queue_state[idx] == RTE_ETH_QUEUE_STATE_STARTED)
305 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
306 ret = mlx5_mp_os_req_queue_control(dev, idx,
307 MLX5_MP_REQ_QUEUE_TX_START);
309 ret = mlx5_tx_queue_start_primary(dev, idx);
315 * Tx queue presetup checks.
318 * Pointer to Ethernet device structure.
322 * Number of descriptors to configure in queue.
325 * 0 on success, a negative errno value otherwise and rte_errno is set.
328 mlx5_tx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t *desc)
330 struct mlx5_priv *priv = dev->data->dev_private;
332 if (*desc <= MLX5_TX_COMP_THRESH) {
334 "port %u number of descriptors requested for Tx queue"
335 " %u must be higher than MLX5_TX_COMP_THRESH, using %u"
336 " instead of %u", dev->data->port_id, idx,
337 MLX5_TX_COMP_THRESH + 1, *desc);
338 *desc = MLX5_TX_COMP_THRESH + 1;
340 if (!rte_is_power_of_2(*desc)) {
341 *desc = 1 << log2above(*desc);
343 "port %u increased number of descriptors in Tx queue"
344 " %u to the next power of two (%d)",
345 dev->data->port_id, idx, *desc);
347 DRV_LOG(DEBUG, "port %u configuring queue %u for %u descriptors",
348 dev->data->port_id, idx, *desc);
349 if (idx >= priv->txqs_n) {
350 DRV_LOG(ERR, "port %u Tx queue index out of range (%u >= %u)",
351 dev->data->port_id, idx, priv->txqs_n);
352 rte_errno = EOVERFLOW;
355 if (!mlx5_txq_releasable(dev, idx)) {
357 DRV_LOG(ERR, "port %u unable to release queue index %u",
358 dev->data->port_id, idx);
361 mlx5_txq_release(dev, idx);
366 * DPDK callback to configure a TX queue.
369 * Pointer to Ethernet device structure.
373 * Number of descriptors to configure in queue.
375 * NUMA socket on which memory must be allocated.
377 * Thresholds parameters.
380 * 0 on success, a negative errno value otherwise and rte_errno is set.
383 mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
384 unsigned int socket, const struct rte_eth_txconf *conf)
386 struct mlx5_priv *priv = dev->data->dev_private;
387 struct mlx5_txq_data *txq = (*priv->txqs)[idx];
388 struct mlx5_txq_ctrl *txq_ctrl =
389 container_of(txq, struct mlx5_txq_ctrl, txq);
392 res = mlx5_tx_queue_pre_setup(dev, idx, &desc);
395 txq_ctrl = mlx5_txq_new(dev, idx, desc, socket, conf);
397 DRV_LOG(ERR, "port %u unable to allocate queue index %u",
398 dev->data->port_id, idx);
401 DRV_LOG(DEBUG, "port %u adding Tx queue %u to list",
402 dev->data->port_id, idx);
403 (*priv->txqs)[idx] = &txq_ctrl->txq;
408 * DPDK callback to configure a TX hairpin queue.
411 * Pointer to Ethernet device structure.
415 * Number of descriptors to configure in queue.
416 * @param[in] hairpin_conf
417 * The hairpin binding configuration.
420 * 0 on success, a negative errno value otherwise and rte_errno is set.
423 mlx5_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
425 const struct rte_eth_hairpin_conf *hairpin_conf)
427 struct mlx5_priv *priv = dev->data->dev_private;
428 struct mlx5_txq_data *txq = (*priv->txqs)[idx];
429 struct mlx5_txq_ctrl *txq_ctrl =
430 container_of(txq, struct mlx5_txq_ctrl, txq);
433 res = mlx5_tx_queue_pre_setup(dev, idx, &desc);
436 if (hairpin_conf->peer_count != 1) {
438 DRV_LOG(ERR, "port %u unable to setup Tx hairpin queue index %u"
439 " peer count is %u", dev->data->port_id,
440 idx, hairpin_conf->peer_count);
443 if (hairpin_conf->peers[0].port == dev->data->port_id) {
444 if (hairpin_conf->peers[0].queue >= priv->rxqs_n) {
446 DRV_LOG(ERR, "port %u unable to setup Tx hairpin queue"
447 " index %u, Rx %u is larger than %u",
448 dev->data->port_id, idx,
449 hairpin_conf->peers[0].queue, priv->txqs_n);
453 if (hairpin_conf->manual_bind == 0 ||
454 hairpin_conf->tx_explicit == 0) {
456 DRV_LOG(ERR, "port %u unable to setup Tx hairpin queue"
457 " index %u peer port %u with attributes %u %u",
458 dev->data->port_id, idx,
459 hairpin_conf->peers[0].port,
460 hairpin_conf->manual_bind,
461 hairpin_conf->tx_explicit);
465 txq_ctrl = mlx5_txq_hairpin_new(dev, idx, desc, hairpin_conf);
467 DRV_LOG(ERR, "port %u unable to allocate queue index %u",
468 dev->data->port_id, idx);
471 DRV_LOG(DEBUG, "port %u adding Tx queue %u to list",
472 dev->data->port_id, idx);
473 (*priv->txqs)[idx] = &txq_ctrl->txq;
474 dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_HAIRPIN;
479 * DPDK callback to release a TX queue.
482 * Pointer to Ethernet device structure.
484 * Transmit queue index.
487 mlx5_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
489 struct mlx5_txq_data *txq = dev->data->tx_queues[qid];
493 DRV_LOG(DEBUG, "port %u removing Tx queue %u from list",
494 dev->data->port_id, qid);
495 mlx5_txq_release(dev, qid);
499 * Remap UAR register of a Tx queue for secondary process.
501 * Remapped address is stored at the table in the process private structure of
502 * the device, indexed by queue index.
505 * Pointer to Tx queue control structure.
507 * Verbs file descriptor to map UAR pages.
510 * 0 on success, a negative errno value otherwise and rte_errno is set.
513 txq_uar_init_secondary(struct mlx5_txq_ctrl *txq_ctrl, int fd)
515 struct mlx5_priv *priv = txq_ctrl->priv;
516 struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(priv));
517 struct mlx5_proc_priv *primary_ppriv = priv->sh->pppriv;
518 struct mlx5_txq_data *txq = &txq_ctrl->txq;
522 const size_t page_size = rte_mem_page_size();
523 if (page_size == (size_t)-1) {
524 DRV_LOG(ERR, "Failed to get mem page size");
529 if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
533 * As rdma-core, UARs are mapped in size of OS page
534 * size. Ref to libmlx5 function: mlx5_init_context()
536 uar_va = (uintptr_t)primary_ppriv->uar_table[txq->idx].db;
537 offset = uar_va & (page_size - 1); /* Offset in page. */
538 addr = rte_mem_map(NULL, page_size, RTE_PROT_WRITE, RTE_MAP_SHARED,
539 fd, txq_ctrl->uar_mmap_offset);
541 DRV_LOG(ERR, "Port %u mmap failed for BF reg of txq %u.",
542 txq->port_id, txq->idx);
546 addr = RTE_PTR_ADD(addr, offset);
547 ppriv->uar_table[txq->idx].db = addr;
549 ppriv->uar_table[txq->idx].sl_p =
550 primary_ppriv->uar_table[txq->idx].sl_p;
556 * Unmap UAR register of a Tx queue for secondary process.
559 * Pointer to Tx queue control structure.
562 txq_uar_uninit_secondary(struct mlx5_txq_ctrl *txq_ctrl)
564 struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(txq_ctrl->priv));
566 const size_t page_size = rte_mem_page_size();
567 if (page_size == (size_t)-1) {
568 DRV_LOG(ERR, "Failed to get mem page size");
572 if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
574 addr = ppriv->uar_table[txq_ctrl->txq.idx].db;
575 rte_mem_unmap(RTE_PTR_ALIGN_FLOOR(addr, page_size), page_size);
579 * Deinitialize Tx UAR registers for secondary process.
582 * Pointer to Ethernet device.
585 mlx5_tx_uar_uninit_secondary(struct rte_eth_dev *dev)
587 struct mlx5_proc_priv *ppriv = (struct mlx5_proc_priv *)
588 dev->process_private;
589 const size_t page_size = rte_mem_page_size();
593 if (page_size == (size_t)-1) {
594 DRV_LOG(ERR, "Failed to get mem page size");
597 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
598 for (i = 0; i != ppriv->uar_table_sz; ++i) {
599 if (!ppriv->uar_table[i].db)
601 addr = ppriv->uar_table[i].db;
602 rte_mem_unmap(RTE_PTR_ALIGN_FLOOR(addr, page_size), page_size);
608 * Initialize Tx UAR registers for secondary process.
611 * Pointer to Ethernet device.
613 * Verbs file descriptor to map UAR pages.
616 * 0 on success, a negative errno value otherwise and rte_errno is set.
619 mlx5_tx_uar_init_secondary(struct rte_eth_dev *dev, int fd)
621 struct mlx5_priv *priv = dev->data->dev_private;
622 struct mlx5_txq_data *txq;
623 struct mlx5_txq_ctrl *txq_ctrl;
627 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
628 for (i = 0; i != priv->txqs_n; ++i) {
629 if (!(*priv->txqs)[i])
631 txq = (*priv->txqs)[i];
632 txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
633 if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
635 MLX5_ASSERT(txq->idx == (uint16_t)i);
636 ret = txq_uar_init_secondary(txq_ctrl, fd);
644 if (!(*priv->txqs)[i])
646 txq = (*priv->txqs)[i];
647 txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
648 txq_uar_uninit_secondary(txq_ctrl);
654 * Verify the Verbs Tx queue list is empty
657 * Pointer to Ethernet device.
660 * The number of object not released.
663 mlx5_txq_obj_verify(struct rte_eth_dev *dev)
665 struct mlx5_priv *priv = dev->data->dev_private;
667 struct mlx5_txq_obj *txq_obj;
669 LIST_FOREACH(txq_obj, &priv->txqsobj, next) {
670 DRV_LOG(DEBUG, "port %u Verbs Tx queue %u still referenced",
671 dev->data->port_id, txq_obj->txq_ctrl->txq.idx);
678 * Calculate the total number of WQEBB for Tx queue.
680 * Simplified version of calc_sq_size() in rdma-core.
683 * Pointer to Tx queue control structure.
686 * The number of WQEBB.
689 txq_calc_wqebb_cnt(struct mlx5_txq_ctrl *txq_ctrl)
691 unsigned int wqe_size;
692 const unsigned int desc = 1 << txq_ctrl->txq.elts_n;
694 wqe_size = MLX5_WQE_CSEG_SIZE +
697 MLX5_ESEG_MIN_INLINE_SIZE +
698 txq_ctrl->max_inline_data;
699 return rte_align32pow2(wqe_size * desc) / MLX5_WQE_SIZE;
703 * Calculate the maximal inline data size for Tx queue.
706 * Pointer to Tx queue control structure.
709 * The maximal inline data size.
712 txq_calc_inline_max(struct mlx5_txq_ctrl *txq_ctrl)
714 const unsigned int desc = 1 << txq_ctrl->txq.elts_n;
715 struct mlx5_priv *priv = txq_ctrl->priv;
716 unsigned int wqe_size;
718 wqe_size = priv->sh->dev_cap.max_qp_wr / desc;
722 * This calculation is derived from tthe source of
723 * mlx5_calc_send_wqe() in rdma_core library.
725 wqe_size = wqe_size * MLX5_WQE_SIZE -
730 MLX5_DSEG_MIN_INLINE_SIZE;
735 * Set Tx queue parameters from device configuration.
738 * Pointer to Tx queue control structure.
741 txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
743 struct mlx5_priv *priv = txq_ctrl->priv;
744 struct mlx5_dev_config *config = &priv->config;
745 struct mlx5_dev_cap *dev_cap = &priv->sh->dev_cap;
746 unsigned int inlen_send; /* Inline data for ordinary SEND.*/
747 unsigned int inlen_empw; /* Inline data for enhanced MPW. */
748 unsigned int inlen_mode; /* Minimal required Inline data. */
749 unsigned int txqs_inline; /* Min Tx queues to enable inline. */
750 uint64_t dev_txoff = priv->dev_data->dev_conf.txmode.offloads;
751 bool tso = txq_ctrl->txq.offloads & (RTE_ETH_TX_OFFLOAD_TCP_TSO |
752 RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
753 RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
754 RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
755 RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO);
759 txq_ctrl->txq.fast_free =
760 !!((txq_ctrl->txq.offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) &&
761 !(txq_ctrl->txq.offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS) &&
762 !config->mprq.enabled);
763 if (config->txqs_inline == MLX5_ARG_UNSET)
765 #if defined(RTE_ARCH_ARM64)
766 (priv->pci_dev && priv->pci_dev->id.device_id ==
767 PCI_DEVICE_ID_MELLANOX_CONNECTX5BF) ?
768 MLX5_INLINE_MAX_TXQS_BLUEFIELD :
770 MLX5_INLINE_MAX_TXQS;
772 txqs_inline = (unsigned int)config->txqs_inline;
773 inlen_send = (config->txq_inline_max == MLX5_ARG_UNSET) ?
774 MLX5_SEND_DEF_INLINE_LEN :
775 (unsigned int)config->txq_inline_max;
776 inlen_empw = (config->txq_inline_mpw == MLX5_ARG_UNSET) ?
777 MLX5_EMPW_DEF_INLINE_LEN :
778 (unsigned int)config->txq_inline_mpw;
779 inlen_mode = (config->txq_inline_min == MLX5_ARG_UNSET) ?
780 0 : (unsigned int)config->txq_inline_min;
781 if (config->mps != MLX5_MPW_ENHANCED && config->mps != MLX5_MPW)
784 * If there is requested minimal amount of data to inline
785 * we MUST enable inlining. This is a case for ConnectX-4
786 * which usually requires L2 inlined for correct operating
787 * and ConnectX-4 Lx which requires L2-L4 inlined to
788 * support E-Switch Flows.
791 if (inlen_mode <= MLX5_ESEG_MIN_INLINE_SIZE) {
793 * Optimize minimal inlining for single
794 * segment packets to fill one WQEBB
797 temp = MLX5_ESEG_MIN_INLINE_SIZE;
799 temp = inlen_mode - MLX5_ESEG_MIN_INLINE_SIZE;
800 temp = RTE_ALIGN(temp, MLX5_WSEG_SIZE) +
801 MLX5_ESEG_MIN_INLINE_SIZE;
802 temp = RTE_MIN(temp, MLX5_SEND_MAX_INLINE_LEN);
804 if (temp != inlen_mode) {
806 "port %u minimal required inline setting"
807 " aligned from %u to %u",
808 PORT_ID(priv), inlen_mode, temp);
813 * If port is configured to support VLAN insertion and device
814 * does not support this feature by HW (for NICs before ConnectX-5
815 * or in case of wqe_vlan_insert flag is not set) we must enable
816 * data inline on all queues because it is supported by single
819 txq_ctrl->txq.vlan_en = config->hw_vlan_insert;
820 vlan_inline = (dev_txoff & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) &&
821 !config->hw_vlan_insert;
823 * If there are few Tx queues it is prioritized
824 * to save CPU cycles and disable data inlining at all.
826 if (inlen_send && priv->txqs_n >= txqs_inline) {
828 * The data sent with ordinal MLX5_OPCODE_SEND
829 * may be inlined in Ethernet Segment, align the
830 * length accordingly to fit entire WQEBBs.
832 temp = RTE_MAX(inlen_send,
833 MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE);
834 temp -= MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE;
835 temp = RTE_ALIGN(temp, MLX5_WQE_SIZE);
836 temp += MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE;
837 temp = RTE_MIN(temp, MLX5_WQE_SIZE_MAX +
838 MLX5_ESEG_MIN_INLINE_SIZE -
841 MLX5_WQE_DSEG_SIZE * 2);
842 temp = RTE_MIN(temp, MLX5_SEND_MAX_INLINE_LEN);
843 temp = RTE_MAX(temp, inlen_mode);
844 if (temp != inlen_send) {
846 "port %u ordinary send inline setting"
847 " aligned from %u to %u",
848 PORT_ID(priv), inlen_send, temp);
852 * Not aligned to cache lines, but to WQEs.
853 * First bytes of data (initial alignment)
854 * is going to be copied explicitly at the
855 * beginning of inlining buffer in Ethernet
858 MLX5_ASSERT(inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
859 MLX5_ASSERT(inlen_send <= MLX5_WQE_SIZE_MAX +
860 MLX5_ESEG_MIN_INLINE_SIZE -
863 MLX5_WQE_DSEG_SIZE * 2);
864 } else if (inlen_mode) {
866 * If minimal inlining is requested we must
867 * enable inlining in general, despite the
868 * number of configured queues. Ignore the
869 * txq_inline_max devarg, this is not
870 * full-featured inline.
872 inlen_send = inlen_mode;
874 } else if (vlan_inline) {
876 * Hardware does not report offload for
877 * VLAN insertion, we must enable data inline
878 * to implement feature by software.
880 inlen_send = MLX5_ESEG_MIN_INLINE_SIZE;
886 txq_ctrl->txq.inlen_send = inlen_send;
887 txq_ctrl->txq.inlen_mode = inlen_mode;
888 txq_ctrl->txq.inlen_empw = 0;
889 if (inlen_send && inlen_empw && priv->txqs_n >= txqs_inline) {
891 * The data sent with MLX5_OPCODE_ENHANCED_MPSW
892 * may be inlined in Data Segment, align the
893 * length accordingly to fit entire WQEBBs.
895 temp = RTE_MAX(inlen_empw,
896 MLX5_WQE_SIZE + MLX5_DSEG_MIN_INLINE_SIZE);
897 temp -= MLX5_DSEG_MIN_INLINE_SIZE;
898 temp = RTE_ALIGN(temp, MLX5_WQE_SIZE);
899 temp += MLX5_DSEG_MIN_INLINE_SIZE;
900 temp = RTE_MIN(temp, MLX5_WQE_SIZE_MAX +
901 MLX5_DSEG_MIN_INLINE_SIZE -
905 temp = RTE_MIN(temp, MLX5_EMPW_MAX_INLINE_LEN);
906 if (temp != inlen_empw) {
908 "port %u enhanced empw inline setting"
909 " aligned from %u to %u",
910 PORT_ID(priv), inlen_empw, temp);
913 MLX5_ASSERT(inlen_empw >= MLX5_ESEG_MIN_INLINE_SIZE);
914 MLX5_ASSERT(inlen_empw <= MLX5_WQE_SIZE_MAX +
915 MLX5_DSEG_MIN_INLINE_SIZE -
919 txq_ctrl->txq.inlen_empw = inlen_empw;
921 txq_ctrl->max_inline_data = RTE_MAX(inlen_send, inlen_empw);
923 txq_ctrl->max_tso_header = MLX5_MAX_TSO_HEADER;
924 txq_ctrl->max_inline_data = RTE_MAX(txq_ctrl->max_inline_data,
925 MLX5_MAX_TSO_HEADER);
926 txq_ctrl->txq.tso_en = 1;
928 if (((RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO & txq_ctrl->txq.offloads) &&
929 (dev_cap->tunnel_en & MLX5_TUNNELED_OFFLOADS_VXLAN_CAP)) |
930 ((RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO & txq_ctrl->txq.offloads) &&
931 (dev_cap->tunnel_en & MLX5_TUNNELED_OFFLOADS_GRE_CAP)) |
932 ((RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO & txq_ctrl->txq.offloads) &&
933 (dev_cap->tunnel_en & MLX5_TUNNELED_OFFLOADS_GENEVE_CAP)) |
934 (dev_cap->swp & MLX5_SW_PARSING_TSO_CAP))
935 txq_ctrl->txq.tunnel_en = 1;
936 txq_ctrl->txq.swp_en = (((RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
937 RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO) &
938 txq_ctrl->txq.offloads) && (dev_cap->swp &
939 MLX5_SW_PARSING_TSO_CAP)) |
940 ((RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM &
941 txq_ctrl->txq.offloads) && (dev_cap->swp &
942 MLX5_SW_PARSING_CSUM_CAP));
946 * Adjust Tx queue data inline parameters for large queue sizes.
947 * The data inline feature requires multiple WQEs to fit the packets,
948 * and if the large amount of Tx descriptors is requested by application
949 * the total WQE amount may exceed the hardware capabilities. If the
950 * default inline setting are used we can try to adjust these ones and
951 * meet the hardware requirements and not exceed the queue size.
954 * Pointer to Tx queue control structure.
957 * Zero on success, otherwise the parameters can not be adjusted.
960 txq_adjust_params(struct mlx5_txq_ctrl *txq_ctrl)
962 struct mlx5_priv *priv = txq_ctrl->priv;
963 struct mlx5_dev_config *config = &priv->config;
964 unsigned int max_inline;
966 max_inline = txq_calc_inline_max(txq_ctrl);
967 if (!txq_ctrl->txq.inlen_send) {
969 * Inline data feature is not engaged at all.
970 * There is nothing to adjust.
974 if (txq_ctrl->max_inline_data <= max_inline) {
976 * The requested inline data length does not
977 * exceed queue capabilities.
981 if (txq_ctrl->txq.inlen_mode > max_inline) {
983 "minimal data inline requirements (%u) are not"
984 " satisfied (%u) on port %u, try the smaller"
985 " Tx queue size (%d)",
986 txq_ctrl->txq.inlen_mode, max_inline,
987 priv->dev_data->port_id, priv->sh->dev_cap.max_qp_wr);
990 if (txq_ctrl->txq.inlen_send > max_inline &&
991 config->txq_inline_max != MLX5_ARG_UNSET &&
992 config->txq_inline_max > (int)max_inline) {
994 "txq_inline_max requirements (%u) are not"
995 " satisfied (%u) on port %u, try the smaller"
996 " Tx queue size (%d)",
997 txq_ctrl->txq.inlen_send, max_inline,
998 priv->dev_data->port_id, priv->sh->dev_cap.max_qp_wr);
1001 if (txq_ctrl->txq.inlen_empw > max_inline &&
1002 config->txq_inline_mpw != MLX5_ARG_UNSET &&
1003 config->txq_inline_mpw > (int)max_inline) {
1005 "txq_inline_mpw requirements (%u) are not"
1006 " satisfied (%u) on port %u, try the smaller"
1007 " Tx queue size (%d)",
1008 txq_ctrl->txq.inlen_empw, max_inline,
1009 priv->dev_data->port_id, priv->sh->dev_cap.max_qp_wr);
1012 if (txq_ctrl->txq.tso_en && max_inline < MLX5_MAX_TSO_HEADER) {
1014 "tso header inline requirements (%u) are not"
1015 " satisfied (%u) on port %u, try the smaller"
1016 " Tx queue size (%d)",
1017 MLX5_MAX_TSO_HEADER, max_inline,
1018 priv->dev_data->port_id, priv->sh->dev_cap.max_qp_wr);
1021 if (txq_ctrl->txq.inlen_send > max_inline) {
1023 "adjust txq_inline_max (%u->%u)"
1024 " due to large Tx queue on port %u",
1025 txq_ctrl->txq.inlen_send, max_inline,
1026 priv->dev_data->port_id);
1027 txq_ctrl->txq.inlen_send = max_inline;
1029 if (txq_ctrl->txq.inlen_empw > max_inline) {
1031 "adjust txq_inline_mpw (%u->%u)"
1032 "due to large Tx queue on port %u",
1033 txq_ctrl->txq.inlen_empw, max_inline,
1034 priv->dev_data->port_id);
1035 txq_ctrl->txq.inlen_empw = max_inline;
1037 txq_ctrl->max_inline_data = RTE_MAX(txq_ctrl->txq.inlen_send,
1038 txq_ctrl->txq.inlen_empw);
1039 MLX5_ASSERT(txq_ctrl->max_inline_data <= max_inline);
1040 MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= max_inline);
1041 MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= txq_ctrl->txq.inlen_send);
1042 MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= txq_ctrl->txq.inlen_empw ||
1043 !txq_ctrl->txq.inlen_empw);
1051 * Create a DPDK Tx queue.
1054 * Pointer to Ethernet device.
1058 * Number of descriptors to configure in queue.
1060 * NUMA socket on which memory must be allocated.
1062 * Thresholds parameters.
1065 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
1067 struct mlx5_txq_ctrl *
1068 mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1069 unsigned int socket, const struct rte_eth_txconf *conf)
1071 struct mlx5_priv *priv = dev->data->dev_private;
1072 struct mlx5_txq_ctrl *tmpl;
1074 tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl) +
1075 desc * sizeof(struct rte_mbuf *), 0, socket);
1080 if (mlx5_mr_ctrl_init(&tmpl->txq.mr_ctrl,
1081 &priv->sh->cdev->mr_scache.dev_gen, socket)) {
1082 /* rte_errno is already set. */
1085 MLX5_ASSERT(desc > MLX5_TX_COMP_THRESH);
1086 tmpl->txq.offloads = conf->offloads |
1087 dev->data->dev_conf.txmode.offloads;
1089 tmpl->socket = socket;
1090 tmpl->txq.elts_n = log2above(desc);
1091 tmpl->txq.elts_s = desc;
1092 tmpl->txq.elts_m = desc - 1;
1093 tmpl->txq.port_id = dev->data->port_id;
1094 tmpl->txq.idx = idx;
1095 txq_set_params(tmpl);
1096 if (txq_adjust_params(tmpl))
1098 if (txq_calc_wqebb_cnt(tmpl) >
1099 priv->sh->dev_cap.max_qp_wr) {
1101 "port %u Tx WQEBB count (%d) exceeds the limit (%d),"
1102 " try smaller queue size",
1103 dev->data->port_id, txq_calc_wqebb_cnt(tmpl),
1104 priv->sh->dev_cap.max_qp_wr);
1108 __atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
1109 tmpl->type = MLX5_TXQ_TYPE_STANDARD;
1110 LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
1113 mlx5_mr_btree_free(&tmpl->txq.mr_ctrl.cache_bh);
1119 * Create a DPDK Tx hairpin queue.
1122 * Pointer to Ethernet device.
1126 * Number of descriptors to configure in queue.
1127 * @param hairpin_conf
1128 * The hairpin configuration.
1131 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
1133 struct mlx5_txq_ctrl *
1134 mlx5_txq_hairpin_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1135 const struct rte_eth_hairpin_conf *hairpin_conf)
1137 struct mlx5_priv *priv = dev->data->dev_private;
1138 struct mlx5_txq_ctrl *tmpl;
1140 tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
1147 tmpl->socket = SOCKET_ID_ANY;
1148 tmpl->txq.elts_n = log2above(desc);
1149 tmpl->txq.port_id = dev->data->port_id;
1150 tmpl->txq.idx = idx;
1151 tmpl->hairpin_conf = *hairpin_conf;
1152 tmpl->type = MLX5_TXQ_TYPE_HAIRPIN;
1153 __atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
1154 LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
1162 * Pointer to Ethernet device.
1167 * A pointer to the queue if it exists.
1169 struct mlx5_txq_ctrl *
1170 mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx)
1172 struct mlx5_priv *priv = dev->data->dev_private;
1173 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
1174 struct mlx5_txq_ctrl *ctrl = NULL;
1177 ctrl = container_of(txq_data, struct mlx5_txq_ctrl, txq);
1178 __atomic_fetch_add(&ctrl->refcnt, 1, __ATOMIC_RELAXED);
1184 * Release a Tx queue.
1187 * Pointer to Ethernet device.
1192 * 1 while a reference on it exists, 0 when freed.
1195 mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx)
1197 struct mlx5_priv *priv = dev->data->dev_private;
1198 struct mlx5_txq_ctrl *txq_ctrl;
1200 if (priv->txqs == NULL || (*priv->txqs)[idx] == NULL)
1202 txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
1203 if (__atomic_sub_fetch(&txq_ctrl->refcnt, 1, __ATOMIC_RELAXED) > 1)
1205 if (txq_ctrl->obj) {
1206 priv->obj_ops.txq_obj_release(txq_ctrl->obj);
1207 LIST_REMOVE(txq_ctrl->obj, next);
1208 mlx5_free(txq_ctrl->obj);
1209 txq_ctrl->obj = NULL;
1211 if (txq_ctrl->type == MLX5_TXQ_TYPE_STANDARD) {
1212 if (txq_ctrl->txq.fcqs) {
1213 mlx5_free(txq_ctrl->txq.fcqs);
1214 txq_ctrl->txq.fcqs = NULL;
1216 txq_free_elts(txq_ctrl);
1217 dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
1219 if (!__atomic_load_n(&txq_ctrl->refcnt, __ATOMIC_RELAXED)) {
1220 if (txq_ctrl->type == MLX5_TXQ_TYPE_STANDARD)
1221 mlx5_mr_btree_free(&txq_ctrl->txq.mr_ctrl.cache_bh);
1222 LIST_REMOVE(txq_ctrl, next);
1223 mlx5_free(txq_ctrl);
1224 (*priv->txqs)[idx] = NULL;
1230 * Verify if the queue can be released.
1233 * Pointer to Ethernet device.
1238 * 1 if the queue can be released.
1241 mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx)
1243 struct mlx5_priv *priv = dev->data->dev_private;
1244 struct mlx5_txq_ctrl *txq;
1246 if (!(*priv->txqs)[idx])
1248 txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
1249 return (__atomic_load_n(&txq->refcnt, __ATOMIC_RELAXED) == 1);
1253 * Verify the Tx Queue list is empty
1256 * Pointer to Ethernet device.
1259 * The number of object not released.
1262 mlx5_txq_verify(struct rte_eth_dev *dev)
1264 struct mlx5_priv *priv = dev->data->dev_private;
1265 struct mlx5_txq_ctrl *txq_ctrl;
1268 LIST_FOREACH(txq_ctrl, &priv->txqsctrl, next) {
1269 DRV_LOG(DEBUG, "port %u Tx queue %u still referenced",
1270 dev->data->port_id, txq_ctrl->txq.idx);
1277 * Set the Tx queue dynamic timestamp (mask and offset)
1280 * Pointer to the Ethernet device structure.
1283 mlx5_txq_dynf_timestamp_set(struct rte_eth_dev *dev)
1285 struct mlx5_priv *priv = dev->data->dev_private;
1286 struct mlx5_dev_ctx_shared *sh = priv->sh;
1287 struct mlx5_txq_data *data;
1292 nbit = rte_mbuf_dynflag_lookup
1293 (RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME, NULL);
1294 off = rte_mbuf_dynfield_lookup
1295 (RTE_MBUF_DYNFIELD_TIMESTAMP_NAME, NULL);
1296 if (nbit >= 0 && off >= 0 && sh->txpp.refcnt)
1297 mask = 1ULL << nbit;
1298 for (i = 0; i != priv->txqs_n; ++i) {
1299 data = (*priv->txqs)[i];
1303 data->ts_mask = mask;
1304 data->ts_offset = off;