1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
14 #include <rte_malloc.h>
15 #include <rte_ethdev_driver.h>
16 #include <rte_common.h>
17 #include <rte_eal_paging.h>
19 #include <mlx5_common.h>
20 #include <mlx5_common_mr.h>
21 #include <mlx5_malloc.h>
23 #include "mlx5_defs.h"
24 #include "mlx5_utils.h"
26 #include "mlx5_rxtx.h"
27 #include "mlx5_autoconf.h"
30 * Allocate TX queue elements.
33 * Pointer to TX queue structure.
36 txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl)
38 const unsigned int elts_n = 1 << txq_ctrl->txq.elts_n;
41 for (i = 0; (i != elts_n); ++i)
42 txq_ctrl->txq.elts[i] = NULL;
43 DRV_LOG(DEBUG, "port %u Tx queue %u allocated and configured %u WRs",
44 PORT_ID(txq_ctrl->priv), txq_ctrl->txq.idx, elts_n);
45 txq_ctrl->txq.elts_head = 0;
46 txq_ctrl->txq.elts_tail = 0;
47 txq_ctrl->txq.elts_comp = 0;
51 * Free TX queue elements.
54 * Pointer to TX queue structure.
57 txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl)
59 const uint16_t elts_n = 1 << txq_ctrl->txq.elts_n;
60 const uint16_t elts_m = elts_n - 1;
61 uint16_t elts_head = txq_ctrl->txq.elts_head;
62 uint16_t elts_tail = txq_ctrl->txq.elts_tail;
63 struct rte_mbuf *(*elts)[elts_n] = &txq_ctrl->txq.elts;
65 DRV_LOG(DEBUG, "port %u Tx queue %u freeing WRs",
66 PORT_ID(txq_ctrl->priv), txq_ctrl->txq.idx);
67 txq_ctrl->txq.elts_head = 0;
68 txq_ctrl->txq.elts_tail = 0;
69 txq_ctrl->txq.elts_comp = 0;
71 while (elts_tail != elts_head) {
72 struct rte_mbuf *elt = (*elts)[elts_tail & elts_m];
74 MLX5_ASSERT(elt != NULL);
75 rte_pktmbuf_free_seg(elt);
76 #ifdef RTE_LIBRTE_MLX5_DEBUG
78 memset(&(*elts)[elts_tail & elts_m],
80 sizeof((*elts)[elts_tail & elts_m]));
87 * Returns the per-port supported offloads.
90 * Pointer to Ethernet device.
93 * Supported Tx offloads.
96 mlx5_get_tx_port_offloads(struct rte_eth_dev *dev)
98 struct mlx5_priv *priv = dev->data->dev_private;
99 uint64_t offloads = (DEV_TX_OFFLOAD_MULTI_SEGS |
100 DEV_TX_OFFLOAD_VLAN_INSERT);
101 struct mlx5_dev_config *config = &priv->config;
104 offloads |= (DEV_TX_OFFLOAD_IPV4_CKSUM |
105 DEV_TX_OFFLOAD_UDP_CKSUM |
106 DEV_TX_OFFLOAD_TCP_CKSUM);
108 offloads |= DEV_TX_OFFLOAD_TCP_TSO;
110 offloads |= DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP;
113 offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
115 offloads |= (DEV_TX_OFFLOAD_IP_TNL_TSO |
116 DEV_TX_OFFLOAD_UDP_TNL_TSO);
118 if (config->tunnel_en) {
120 offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
122 offloads |= (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
123 DEV_TX_OFFLOAD_GRE_TNL_TSO |
124 DEV_TX_OFFLOAD_GENEVE_TNL_TSO);
129 /* Fetches and drops all SW-owned and error CQEs to synchronize CQ. */
131 txq_sync_cq(struct mlx5_txq_data *txq)
133 volatile struct mlx5_cqe *cqe;
138 cqe = &txq->cqes[txq->cq_ci & txq->cqe_m];
139 ret = check_cqe(cqe, txq->cqe_s, txq->cq_ci);
140 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
141 if (likely(ret != MLX5_CQE_STATUS_ERR)) {
142 /* No new CQEs in completion queue. */
143 MLX5_ASSERT(ret == MLX5_CQE_STATUS_HW_OWN);
149 /* Move all CQEs to HW ownership. */
150 for (i = 0; i < txq->cqe_s; i++) {
152 cqe->op_own = MLX5_CQE_INVALIDATE;
154 /* Resync CQE and WQE (WQ in reset state). */
156 *txq->cq_db = rte_cpu_to_be_32(txq->cq_ci);
161 * Tx queue stop. Device queue goes to the idle state,
162 * all involved mbufs are freed from elts/WQ.
165 * Pointer to Ethernet device structure.
170 * 0 on success, a negative errno value otherwise and rte_errno is set.
173 mlx5_tx_queue_stop_primary(struct rte_eth_dev *dev, uint16_t idx)
175 struct mlx5_priv *priv = dev->data->dev_private;
176 struct mlx5_txq_data *txq = (*priv->txqs)[idx];
177 struct mlx5_txq_ctrl *txq_ctrl =
178 container_of(txq, struct mlx5_txq_ctrl, txq);
181 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
182 /* Move QP to RESET state. */
183 ret = priv->obj_ops.txq_obj_modify(txq_ctrl->obj, MLX5_TXQ_MOD_RDY2RST,
184 (uint8_t)priv->dev_port);
187 /* Handle all send completions. */
189 /* Free elts stored in the SQ. */
190 txq_free_elts(txq_ctrl);
191 /* Prevent writing new pkts to SQ by setting no free WQE.*/
192 txq->wqe_ci = txq->wqe_s;
195 /* Set the actual queue state. */
196 dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
201 * Tx queue stop. Device queue goes to the idle state,
202 * all involved mbufs are freed from elts/WQ.
205 * Pointer to Ethernet device structure.
210 * 0 on success, a negative errno value otherwise and rte_errno is set.
213 mlx5_tx_queue_stop(struct rte_eth_dev *dev, uint16_t idx)
217 if (rte_eth_dev_is_tx_hairpin_queue(dev, idx)) {
218 DRV_LOG(ERR, "Hairpin queue can't be stopped");
222 if (dev->data->tx_queue_state[idx] == RTE_ETH_QUEUE_STATE_STOPPED)
224 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
225 ret = mlx5_mp_os_req_queue_control(dev, idx,
226 MLX5_MP_REQ_QUEUE_TX_STOP);
228 ret = mlx5_tx_queue_stop_primary(dev, idx);
234 * Rx queue start. Device queue goes to the ready state,
235 * all required mbufs are allocated and WQ is replenished.
238 * Pointer to Ethernet device structure.
243 * 0 on success, a negative errno value otherwise and rte_errno is set.
246 mlx5_tx_queue_start_primary(struct rte_eth_dev *dev, uint16_t idx)
248 struct mlx5_priv *priv = dev->data->dev_private;
249 struct mlx5_txq_data *txq = (*priv->txqs)[idx];
250 struct mlx5_txq_ctrl *txq_ctrl =
251 container_of(txq, struct mlx5_txq_ctrl, txq);
254 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
255 ret = priv->obj_ops.txq_obj_modify(txq_ctrl->obj,
256 MLX5_TXQ_MOD_RDY2RDY,
257 (uint8_t)priv->dev_port);
260 txq_ctrl->txq.wqe_ci = 0;
261 txq_ctrl->txq.wqe_pi = 0;
262 txq_ctrl->txq.elts_comp = 0;
263 /* Set the actual queue state. */
264 dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
269 * Rx queue start. Device queue goes to the ready state,
270 * all required mbufs are allocated and WQ is replenished.
273 * Pointer to Ethernet device structure.
278 * 0 on success, a negative errno value otherwise and rte_errno is set.
281 mlx5_tx_queue_start(struct rte_eth_dev *dev, uint16_t idx)
285 if (rte_eth_dev_is_tx_hairpin_queue(dev, idx)) {
286 DRV_LOG(ERR, "Hairpin queue can't be started");
290 if (dev->data->tx_queue_state[idx] == RTE_ETH_QUEUE_STATE_STARTED)
292 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
293 ret = mlx5_mp_os_req_queue_control(dev, idx,
294 MLX5_MP_REQ_QUEUE_TX_START);
296 ret = mlx5_tx_queue_start_primary(dev, idx);
302 * Tx queue presetup checks.
305 * Pointer to Ethernet device structure.
309 * Number of descriptors to configure in queue.
312 * 0 on success, a negative errno value otherwise and rte_errno is set.
315 mlx5_tx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t *desc)
317 struct mlx5_priv *priv = dev->data->dev_private;
319 if (*desc <= MLX5_TX_COMP_THRESH) {
321 "port %u number of descriptors requested for Tx queue"
322 " %u must be higher than MLX5_TX_COMP_THRESH, using %u"
323 " instead of %u", dev->data->port_id, idx,
324 MLX5_TX_COMP_THRESH + 1, *desc);
325 *desc = MLX5_TX_COMP_THRESH + 1;
327 if (!rte_is_power_of_2(*desc)) {
328 *desc = 1 << log2above(*desc);
330 "port %u increased number of descriptors in Tx queue"
331 " %u to the next power of two (%d)",
332 dev->data->port_id, idx, *desc);
334 DRV_LOG(DEBUG, "port %u configuring queue %u for %u descriptors",
335 dev->data->port_id, idx, *desc);
336 if (idx >= priv->txqs_n) {
337 DRV_LOG(ERR, "port %u Tx queue index out of range (%u >= %u)",
338 dev->data->port_id, idx, priv->txqs_n);
339 rte_errno = EOVERFLOW;
342 if (!mlx5_txq_releasable(dev, idx)) {
344 DRV_LOG(ERR, "port %u unable to release queue index %u",
345 dev->data->port_id, idx);
348 mlx5_txq_release(dev, idx);
353 * DPDK callback to configure a TX queue.
356 * Pointer to Ethernet device structure.
360 * Number of descriptors to configure in queue.
362 * NUMA socket on which memory must be allocated.
364 * Thresholds parameters.
367 * 0 on success, a negative errno value otherwise and rte_errno is set.
370 mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
371 unsigned int socket, const struct rte_eth_txconf *conf)
373 struct mlx5_priv *priv = dev->data->dev_private;
374 struct mlx5_txq_data *txq = (*priv->txqs)[idx];
375 struct mlx5_txq_ctrl *txq_ctrl =
376 container_of(txq, struct mlx5_txq_ctrl, txq);
379 res = mlx5_tx_queue_pre_setup(dev, idx, &desc);
382 txq_ctrl = mlx5_txq_new(dev, idx, desc, socket, conf);
384 DRV_LOG(ERR, "port %u unable to allocate queue index %u",
385 dev->data->port_id, idx);
388 DRV_LOG(DEBUG, "port %u adding Tx queue %u to list",
389 dev->data->port_id, idx);
390 (*priv->txqs)[idx] = &txq_ctrl->txq;
391 dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
396 * DPDK callback to configure a TX hairpin queue.
399 * Pointer to Ethernet device structure.
403 * Number of descriptors to configure in queue.
404 * @param[in] hairpin_conf
405 * The hairpin binding configuration.
408 * 0 on success, a negative errno value otherwise and rte_errno is set.
411 mlx5_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
413 const struct rte_eth_hairpin_conf *hairpin_conf)
415 struct mlx5_priv *priv = dev->data->dev_private;
416 struct mlx5_txq_data *txq = (*priv->txqs)[idx];
417 struct mlx5_txq_ctrl *txq_ctrl =
418 container_of(txq, struct mlx5_txq_ctrl, txq);
421 res = mlx5_tx_queue_pre_setup(dev, idx, &desc);
424 if (hairpin_conf->peer_count != 1 ||
425 hairpin_conf->peers[0].port != dev->data->port_id ||
426 hairpin_conf->peers[0].queue >= priv->rxqs_n) {
427 DRV_LOG(ERR, "port %u unable to setup hairpin queue index %u "
428 " invalid hairpind configuration", dev->data->port_id,
433 txq_ctrl = mlx5_txq_hairpin_new(dev, idx, desc, hairpin_conf);
435 DRV_LOG(ERR, "port %u unable to allocate queue index %u",
436 dev->data->port_id, idx);
439 DRV_LOG(DEBUG, "port %u adding Tx queue %u to list",
440 dev->data->port_id, idx);
441 (*priv->txqs)[idx] = &txq_ctrl->txq;
442 dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_HAIRPIN;
447 * DPDK callback to release a TX queue.
450 * Generic TX queue pointer.
453 mlx5_tx_queue_release(void *dpdk_txq)
455 struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
456 struct mlx5_txq_ctrl *txq_ctrl;
457 struct mlx5_priv *priv;
462 txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
463 priv = txq_ctrl->priv;
464 for (i = 0; (i != priv->txqs_n); ++i)
465 if ((*priv->txqs)[i] == txq) {
466 DRV_LOG(DEBUG, "port %u removing Tx queue %u from list",
467 PORT_ID(priv), txq->idx);
468 mlx5_txq_release(ETH_DEV(priv), i);
474 * Configure the doorbell register non-cached attribute.
477 * Pointer to Tx queue control structure.
482 txq_uar_ncattr_init(struct mlx5_txq_ctrl *txq_ctrl, size_t page_size)
484 struct mlx5_priv *priv = txq_ctrl->priv;
487 txq_ctrl->txq.db_heu = priv->config.dbnc == MLX5_TXDB_HEURISTIC;
488 txq_ctrl->txq.db_nc = 0;
489 /* Check the doorbell register mapping type. */
490 cmd = txq_ctrl->uar_mmap_offset / page_size;
491 cmd >>= MLX5_UAR_MMAP_CMD_SHIFT;
492 cmd &= MLX5_UAR_MMAP_CMD_MASK;
493 if (cmd == MLX5_MMAP_GET_NC_PAGES_CMD)
494 txq_ctrl->txq.db_nc = 1;
498 * Initialize Tx UAR registers for primary process.
501 * Pointer to Tx queue control structure.
504 txq_uar_init(struct mlx5_txq_ctrl *txq_ctrl)
506 struct mlx5_priv *priv = txq_ctrl->priv;
507 struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(priv));
509 unsigned int lock_idx;
511 const size_t page_size = rte_mem_page_size();
512 if (page_size == (size_t)-1) {
513 DRV_LOG(ERR, "Failed to get mem page size");
517 if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
519 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
521 ppriv->uar_table[txq_ctrl->txq.idx] = txq_ctrl->bf_reg;
522 txq_uar_ncattr_init(txq_ctrl, page_size);
524 /* Assign an UAR lock according to UAR page number */
525 lock_idx = (txq_ctrl->uar_mmap_offset / page_size) &
526 MLX5_UAR_PAGE_NUM_MASK;
527 txq_ctrl->txq.uar_lock = &priv->sh->uar_lock[lock_idx];
532 * Remap UAR register of a Tx queue for secondary process.
534 * Remapped address is stored at the table in the process private structure of
535 * the device, indexed by queue index.
538 * Pointer to Tx queue control structure.
540 * Verbs file descriptor to map UAR pages.
543 * 0 on success, a negative errno value otherwise and rte_errno is set.
546 txq_uar_init_secondary(struct mlx5_txq_ctrl *txq_ctrl, int fd)
548 struct mlx5_priv *priv = txq_ctrl->priv;
549 struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(priv));
550 struct mlx5_txq_data *txq = &txq_ctrl->txq;
554 const size_t page_size = rte_mem_page_size();
555 if (page_size == (size_t)-1) {
556 DRV_LOG(ERR, "Failed to get mem page size");
561 if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
565 * As rdma-core, UARs are mapped in size of OS page
566 * size. Ref to libmlx5 function: mlx5_init_context()
568 uar_va = (uintptr_t)txq_ctrl->bf_reg;
569 offset = uar_va & (page_size - 1); /* Offset in page. */
570 addr = rte_mem_map(NULL, page_size, RTE_PROT_WRITE, RTE_MAP_SHARED,
571 fd, txq_ctrl->uar_mmap_offset);
574 "port %u mmap failed for BF reg of txq %u",
575 txq->port_id, txq->idx);
579 addr = RTE_PTR_ADD(addr, offset);
580 ppriv->uar_table[txq->idx] = addr;
581 txq_uar_ncattr_init(txq_ctrl, page_size);
586 * Unmap UAR register of a Tx queue for secondary process.
589 * Pointer to Tx queue control structure.
592 txq_uar_uninit_secondary(struct mlx5_txq_ctrl *txq_ctrl)
594 struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(txq_ctrl->priv));
596 const size_t page_size = rte_mem_page_size();
597 if (page_size == (size_t)-1) {
598 DRV_LOG(ERR, "Failed to get mem page size");
602 if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
604 addr = ppriv->uar_table[txq_ctrl->txq.idx];
605 rte_mem_unmap(RTE_PTR_ALIGN_FLOOR(addr, page_size), page_size);
609 * Deinitialize Tx UAR registers for secondary process.
612 * Pointer to Ethernet device.
615 mlx5_tx_uar_uninit_secondary(struct rte_eth_dev *dev)
617 struct mlx5_priv *priv = dev->data->dev_private;
618 struct mlx5_txq_data *txq;
619 struct mlx5_txq_ctrl *txq_ctrl;
622 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
623 for (i = 0; i != priv->txqs_n; ++i) {
624 if (!(*priv->txqs)[i])
626 txq = (*priv->txqs)[i];
627 txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
628 txq_uar_uninit_secondary(txq_ctrl);
633 * Initialize Tx UAR registers for secondary process.
636 * Pointer to Ethernet device.
638 * Verbs file descriptor to map UAR pages.
641 * 0 on success, a negative errno value otherwise and rte_errno is set.
644 mlx5_tx_uar_init_secondary(struct rte_eth_dev *dev, int fd)
646 struct mlx5_priv *priv = dev->data->dev_private;
647 struct mlx5_txq_data *txq;
648 struct mlx5_txq_ctrl *txq_ctrl;
652 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
653 for (i = 0; i != priv->txqs_n; ++i) {
654 if (!(*priv->txqs)[i])
656 txq = (*priv->txqs)[i];
657 txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
658 if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
660 MLX5_ASSERT(txq->idx == (uint16_t)i);
661 ret = txq_uar_init_secondary(txq_ctrl, fd);
669 if (!(*priv->txqs)[i])
671 txq = (*priv->txqs)[i];
672 txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
673 txq_uar_uninit_secondary(txq_ctrl);
679 * Verify the Verbs Tx queue list is empty
682 * Pointer to Ethernet device.
685 * The number of object not released.
688 mlx5_txq_obj_verify(struct rte_eth_dev *dev)
690 struct mlx5_priv *priv = dev->data->dev_private;
692 struct mlx5_txq_obj *txq_obj;
694 LIST_FOREACH(txq_obj, &priv->txqsobj, next) {
695 DRV_LOG(DEBUG, "port %u Verbs Tx queue %u still referenced",
696 dev->data->port_id, txq_obj->txq_ctrl->txq.idx);
703 * Calculate the total number of WQEBB for Tx queue.
705 * Simplified version of calc_sq_size() in rdma-core.
708 * Pointer to Tx queue control structure.
711 * The number of WQEBB.
714 txq_calc_wqebb_cnt(struct mlx5_txq_ctrl *txq_ctrl)
716 unsigned int wqe_size;
717 const unsigned int desc = 1 << txq_ctrl->txq.elts_n;
719 wqe_size = MLX5_WQE_CSEG_SIZE +
722 MLX5_ESEG_MIN_INLINE_SIZE +
723 txq_ctrl->max_inline_data;
724 return rte_align32pow2(wqe_size * desc) / MLX5_WQE_SIZE;
728 * Calculate the maximal inline data size for Tx queue.
731 * Pointer to Tx queue control structure.
734 * The maximal inline data size.
737 txq_calc_inline_max(struct mlx5_txq_ctrl *txq_ctrl)
739 const unsigned int desc = 1 << txq_ctrl->txq.elts_n;
740 struct mlx5_priv *priv = txq_ctrl->priv;
741 unsigned int wqe_size;
743 wqe_size = priv->sh->device_attr.max_qp_wr / desc;
747 * This calculation is derived from tthe source of
748 * mlx5_calc_send_wqe() in rdma_core library.
750 wqe_size = wqe_size * MLX5_WQE_SIZE -
755 MLX5_DSEG_MIN_INLINE_SIZE;
760 * Set Tx queue parameters from device configuration.
763 * Pointer to Tx queue control structure.
766 txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
768 struct mlx5_priv *priv = txq_ctrl->priv;
769 struct mlx5_dev_config *config = &priv->config;
770 unsigned int inlen_send; /* Inline data for ordinary SEND.*/
771 unsigned int inlen_empw; /* Inline data for enhanced MPW. */
772 unsigned int inlen_mode; /* Minimal required Inline data. */
773 unsigned int txqs_inline; /* Min Tx queues to enable inline. */
774 uint64_t dev_txoff = priv->dev_data->dev_conf.txmode.offloads;
775 bool tso = txq_ctrl->txq.offloads & (DEV_TX_OFFLOAD_TCP_TSO |
776 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
777 DEV_TX_OFFLOAD_GRE_TNL_TSO |
778 DEV_TX_OFFLOAD_IP_TNL_TSO |
779 DEV_TX_OFFLOAD_UDP_TNL_TSO);
783 if (config->txqs_inline == MLX5_ARG_UNSET)
785 #if defined(RTE_ARCH_ARM64)
786 (priv->pci_dev->id.device_id ==
787 PCI_DEVICE_ID_MELLANOX_CONNECTX5BF) ?
788 MLX5_INLINE_MAX_TXQS_BLUEFIELD :
790 MLX5_INLINE_MAX_TXQS;
792 txqs_inline = (unsigned int)config->txqs_inline;
793 inlen_send = (config->txq_inline_max == MLX5_ARG_UNSET) ?
794 MLX5_SEND_DEF_INLINE_LEN :
795 (unsigned int)config->txq_inline_max;
796 inlen_empw = (config->txq_inline_mpw == MLX5_ARG_UNSET) ?
797 MLX5_EMPW_DEF_INLINE_LEN :
798 (unsigned int)config->txq_inline_mpw;
799 inlen_mode = (config->txq_inline_min == MLX5_ARG_UNSET) ?
800 0 : (unsigned int)config->txq_inline_min;
801 if (config->mps != MLX5_MPW_ENHANCED && config->mps != MLX5_MPW)
804 * If there is requested minimal amount of data to inline
805 * we MUST enable inlining. This is a case for ConnectX-4
806 * which usually requires L2 inlined for correct operating
807 * and ConnectX-4 Lx which requires L2-L4 inlined to
808 * support E-Switch Flows.
811 if (inlen_mode <= MLX5_ESEG_MIN_INLINE_SIZE) {
813 * Optimize minimal inlining for single
814 * segment packets to fill one WQEBB
817 temp = MLX5_ESEG_MIN_INLINE_SIZE;
819 temp = inlen_mode - MLX5_ESEG_MIN_INLINE_SIZE;
820 temp = RTE_ALIGN(temp, MLX5_WSEG_SIZE) +
821 MLX5_ESEG_MIN_INLINE_SIZE;
822 temp = RTE_MIN(temp, MLX5_SEND_MAX_INLINE_LEN);
824 if (temp != inlen_mode) {
826 "port %u minimal required inline setting"
827 " aligned from %u to %u",
828 PORT_ID(priv), inlen_mode, temp);
833 * If port is configured to support VLAN insertion and device
834 * does not support this feature by HW (for NICs before ConnectX-5
835 * or in case of wqe_vlan_insert flag is not set) we must enable
836 * data inline on all queues because it is supported by single
839 txq_ctrl->txq.vlan_en = config->hw_vlan_insert;
840 vlan_inline = (dev_txoff & DEV_TX_OFFLOAD_VLAN_INSERT) &&
841 !config->hw_vlan_insert;
843 * If there are few Tx queues it is prioritized
844 * to save CPU cycles and disable data inlining at all.
846 if (inlen_send && priv->txqs_n >= txqs_inline) {
848 * The data sent with ordinal MLX5_OPCODE_SEND
849 * may be inlined in Ethernet Segment, align the
850 * length accordingly to fit entire WQEBBs.
852 temp = RTE_MAX(inlen_send,
853 MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE);
854 temp -= MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE;
855 temp = RTE_ALIGN(temp, MLX5_WQE_SIZE);
856 temp += MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE;
857 temp = RTE_MIN(temp, MLX5_WQE_SIZE_MAX +
858 MLX5_ESEG_MIN_INLINE_SIZE -
861 MLX5_WQE_DSEG_SIZE * 2);
862 temp = RTE_MIN(temp, MLX5_SEND_MAX_INLINE_LEN);
863 temp = RTE_MAX(temp, inlen_mode);
864 if (temp != inlen_send) {
866 "port %u ordinary send inline setting"
867 " aligned from %u to %u",
868 PORT_ID(priv), inlen_send, temp);
872 * Not aligned to cache lines, but to WQEs.
873 * First bytes of data (initial alignment)
874 * is going to be copied explicitly at the
875 * beginning of inlining buffer in Ethernet
878 MLX5_ASSERT(inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
879 MLX5_ASSERT(inlen_send <= MLX5_WQE_SIZE_MAX +
880 MLX5_ESEG_MIN_INLINE_SIZE -
883 MLX5_WQE_DSEG_SIZE * 2);
884 } else if (inlen_mode) {
886 * If minimal inlining is requested we must
887 * enable inlining in general, despite the
888 * number of configured queues. Ignore the
889 * txq_inline_max devarg, this is not
890 * full-featured inline.
892 inlen_send = inlen_mode;
894 } else if (vlan_inline) {
896 * Hardware does not report offload for
897 * VLAN insertion, we must enable data inline
898 * to implement feature by software.
900 inlen_send = MLX5_ESEG_MIN_INLINE_SIZE;
906 txq_ctrl->txq.inlen_send = inlen_send;
907 txq_ctrl->txq.inlen_mode = inlen_mode;
908 txq_ctrl->txq.inlen_empw = 0;
909 if (inlen_send && inlen_empw && priv->txqs_n >= txqs_inline) {
911 * The data sent with MLX5_OPCODE_ENHANCED_MPSW
912 * may be inlined in Data Segment, align the
913 * length accordingly to fit entire WQEBBs.
915 temp = RTE_MAX(inlen_empw,
916 MLX5_WQE_SIZE + MLX5_DSEG_MIN_INLINE_SIZE);
917 temp -= MLX5_DSEG_MIN_INLINE_SIZE;
918 temp = RTE_ALIGN(temp, MLX5_WQE_SIZE);
919 temp += MLX5_DSEG_MIN_INLINE_SIZE;
920 temp = RTE_MIN(temp, MLX5_WQE_SIZE_MAX +
921 MLX5_DSEG_MIN_INLINE_SIZE -
925 temp = RTE_MIN(temp, MLX5_EMPW_MAX_INLINE_LEN);
926 if (temp != inlen_empw) {
928 "port %u enhanced empw inline setting"
929 " aligned from %u to %u",
930 PORT_ID(priv), inlen_empw, temp);
933 MLX5_ASSERT(inlen_empw >= MLX5_ESEG_MIN_INLINE_SIZE);
934 MLX5_ASSERT(inlen_empw <= MLX5_WQE_SIZE_MAX +
935 MLX5_DSEG_MIN_INLINE_SIZE -
939 txq_ctrl->txq.inlen_empw = inlen_empw;
941 txq_ctrl->max_inline_data = RTE_MAX(inlen_send, inlen_empw);
943 txq_ctrl->max_tso_header = MLX5_MAX_TSO_HEADER;
944 txq_ctrl->max_inline_data = RTE_MAX(txq_ctrl->max_inline_data,
945 MLX5_MAX_TSO_HEADER);
946 txq_ctrl->txq.tso_en = 1;
948 txq_ctrl->txq.tunnel_en = config->tunnel_en | config->swp;
949 txq_ctrl->txq.swp_en = ((DEV_TX_OFFLOAD_IP_TNL_TSO |
950 DEV_TX_OFFLOAD_UDP_TNL_TSO |
951 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) &
952 txq_ctrl->txq.offloads) && config->swp;
956 * Adjust Tx queue data inline parameters for large queue sizes.
957 * The data inline feature requires multiple WQEs to fit the packets,
958 * and if the large amount of Tx descriptors is requested by application
959 * the total WQE amount may exceed the hardware capabilities. If the
960 * default inline setting are used we can try to adjust these ones and
961 * meet the hardware requirements and not exceed the queue size.
964 * Pointer to Tx queue control structure.
967 * Zero on success, otherwise the parameters can not be adjusted.
970 txq_adjust_params(struct mlx5_txq_ctrl *txq_ctrl)
972 struct mlx5_priv *priv = txq_ctrl->priv;
973 struct mlx5_dev_config *config = &priv->config;
974 unsigned int max_inline;
976 max_inline = txq_calc_inline_max(txq_ctrl);
977 if (!txq_ctrl->txq.inlen_send) {
979 * Inline data feature is not engaged at all.
980 * There is nothing to adjust.
984 if (txq_ctrl->max_inline_data <= max_inline) {
986 * The requested inline data length does not
987 * exceed queue capabilities.
991 if (txq_ctrl->txq.inlen_mode > max_inline) {
993 "minimal data inline requirements (%u) are not"
994 " satisfied (%u) on port %u, try the smaller"
995 " Tx queue size (%d)",
996 txq_ctrl->txq.inlen_mode, max_inline,
997 priv->dev_data->port_id,
998 priv->sh->device_attr.max_qp_wr);
1001 if (txq_ctrl->txq.inlen_send > max_inline &&
1002 config->txq_inline_max != MLX5_ARG_UNSET &&
1003 config->txq_inline_max > (int)max_inline) {
1005 "txq_inline_max requirements (%u) are not"
1006 " satisfied (%u) on port %u, try the smaller"
1007 " Tx queue size (%d)",
1008 txq_ctrl->txq.inlen_send, max_inline,
1009 priv->dev_data->port_id,
1010 priv->sh->device_attr.max_qp_wr);
1013 if (txq_ctrl->txq.inlen_empw > max_inline &&
1014 config->txq_inline_mpw != MLX5_ARG_UNSET &&
1015 config->txq_inline_mpw > (int)max_inline) {
1017 "txq_inline_mpw requirements (%u) are not"
1018 " satisfied (%u) on port %u, try the smaller"
1019 " Tx queue size (%d)",
1020 txq_ctrl->txq.inlen_empw, max_inline,
1021 priv->dev_data->port_id,
1022 priv->sh->device_attr.max_qp_wr);
1025 if (txq_ctrl->txq.tso_en && max_inline < MLX5_MAX_TSO_HEADER) {
1027 "tso header inline requirements (%u) are not"
1028 " satisfied (%u) on port %u, try the smaller"
1029 " Tx queue size (%d)",
1030 MLX5_MAX_TSO_HEADER, max_inline,
1031 priv->dev_data->port_id,
1032 priv->sh->device_attr.max_qp_wr);
1035 if (txq_ctrl->txq.inlen_send > max_inline) {
1037 "adjust txq_inline_max (%u->%u)"
1038 " due to large Tx queue on port %u",
1039 txq_ctrl->txq.inlen_send, max_inline,
1040 priv->dev_data->port_id);
1041 txq_ctrl->txq.inlen_send = max_inline;
1043 if (txq_ctrl->txq.inlen_empw > max_inline) {
1045 "adjust txq_inline_mpw (%u->%u)"
1046 "due to large Tx queue on port %u",
1047 txq_ctrl->txq.inlen_empw, max_inline,
1048 priv->dev_data->port_id);
1049 txq_ctrl->txq.inlen_empw = max_inline;
1051 txq_ctrl->max_inline_data = RTE_MAX(txq_ctrl->txq.inlen_send,
1052 txq_ctrl->txq.inlen_empw);
1053 MLX5_ASSERT(txq_ctrl->max_inline_data <= max_inline);
1054 MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= max_inline);
1055 MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= txq_ctrl->txq.inlen_send);
1056 MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= txq_ctrl->txq.inlen_empw ||
1057 !txq_ctrl->txq.inlen_empw);
1065 * Create a DPDK Tx queue.
1068 * Pointer to Ethernet device.
1072 * Number of descriptors to configure in queue.
1074 * NUMA socket on which memory must be allocated.
1076 * Thresholds parameters.
1079 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
1081 struct mlx5_txq_ctrl *
1082 mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1083 unsigned int socket, const struct rte_eth_txconf *conf)
1085 struct mlx5_priv *priv = dev->data->dev_private;
1086 struct mlx5_txq_ctrl *tmpl;
1088 tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl) +
1089 desc * sizeof(struct rte_mbuf *), 0, socket);
1094 if (mlx5_mr_btree_init(&tmpl->txq.mr_ctrl.cache_bh,
1095 MLX5_MR_BTREE_CACHE_N, socket)) {
1096 /* rte_errno is already set. */
1099 /* Save pointer of global generation number to check memory event. */
1100 tmpl->txq.mr_ctrl.dev_gen_ptr = &priv->sh->share_cache.dev_gen;
1101 MLX5_ASSERT(desc > MLX5_TX_COMP_THRESH);
1102 tmpl->txq.offloads = conf->offloads |
1103 dev->data->dev_conf.txmode.offloads;
1105 tmpl->socket = socket;
1106 tmpl->txq.elts_n = log2above(desc);
1107 tmpl->txq.elts_s = desc;
1108 tmpl->txq.elts_m = desc - 1;
1109 tmpl->txq.port_id = dev->data->port_id;
1110 tmpl->txq.idx = idx;
1111 txq_set_params(tmpl);
1112 if (txq_adjust_params(tmpl))
1114 if (txq_calc_wqebb_cnt(tmpl) >
1115 priv->sh->device_attr.max_qp_wr) {
1117 "port %u Tx WQEBB count (%d) exceeds the limit (%d),"
1118 " try smaller queue size",
1119 dev->data->port_id, txq_calc_wqebb_cnt(tmpl),
1120 priv->sh->device_attr.max_qp_wr);
1124 __atomic_add_fetch(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
1125 tmpl->type = MLX5_TXQ_TYPE_STANDARD;
1126 LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
1134 * Create a DPDK Tx hairpin queue.
1137 * Pointer to Ethernet device.
1141 * Number of descriptors to configure in queue.
1142 * @param hairpin_conf
1143 * The hairpin configuration.
1146 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
1148 struct mlx5_txq_ctrl *
1149 mlx5_txq_hairpin_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1150 const struct rte_eth_hairpin_conf *hairpin_conf)
1152 struct mlx5_priv *priv = dev->data->dev_private;
1153 struct mlx5_txq_ctrl *tmpl;
1155 tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
1162 tmpl->socket = SOCKET_ID_ANY;
1163 tmpl->txq.elts_n = log2above(desc);
1164 tmpl->txq.port_id = dev->data->port_id;
1165 tmpl->txq.idx = idx;
1166 tmpl->hairpin_conf = *hairpin_conf;
1167 tmpl->type = MLX5_TXQ_TYPE_HAIRPIN;
1168 __atomic_add_fetch(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
1169 LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
1177 * Pointer to Ethernet device.
1182 * A pointer to the queue if it exists.
1184 struct mlx5_txq_ctrl *
1185 mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx)
1187 struct mlx5_priv *priv = dev->data->dev_private;
1188 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
1189 struct mlx5_txq_ctrl *ctrl = NULL;
1192 ctrl = container_of(txq_data, struct mlx5_txq_ctrl, txq);
1193 __atomic_add_fetch(&ctrl->refcnt, 1, __ATOMIC_RELAXED);
1199 * Release a Tx queue.
1202 * Pointer to Ethernet device.
1207 * 1 while a reference on it exists, 0 when freed.
1210 mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx)
1212 struct mlx5_priv *priv = dev->data->dev_private;
1213 struct mlx5_txq_ctrl *txq_ctrl;
1215 if (!(*priv->txqs)[idx])
1217 txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
1218 if (__atomic_sub_fetch(&txq_ctrl->refcnt, 1, __ATOMIC_RELAXED) > 1)
1220 if (txq_ctrl->obj) {
1221 priv->obj_ops.txq_obj_release(txq_ctrl->obj);
1222 LIST_REMOVE(txq_ctrl->obj, next);
1223 mlx5_free(txq_ctrl->obj);
1224 txq_ctrl->obj = NULL;
1226 if (txq_ctrl->type == MLX5_TXQ_TYPE_STANDARD) {
1227 if (txq_ctrl->txq.fcqs) {
1228 mlx5_free(txq_ctrl->txq.fcqs);
1229 txq_ctrl->txq.fcqs = NULL;
1231 txq_free_elts(txq_ctrl);
1233 dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
1234 if (!__atomic_load_n(&txq_ctrl->refcnt, __ATOMIC_RELAXED)) {
1235 if (txq_ctrl->type == MLX5_TXQ_TYPE_STANDARD)
1236 mlx5_mr_btree_free(&txq_ctrl->txq.mr_ctrl.cache_bh);
1237 LIST_REMOVE(txq_ctrl, next);
1238 mlx5_free(txq_ctrl);
1239 (*priv->txqs)[idx] = NULL;
1245 * Verify if the queue can be released.
1248 * Pointer to Ethernet device.
1253 * 1 if the queue can be released.
1256 mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx)
1258 struct mlx5_priv *priv = dev->data->dev_private;
1259 struct mlx5_txq_ctrl *txq;
1261 if (!(*priv->txqs)[idx])
1263 txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
1264 return (__atomic_load_n(&txq->refcnt, __ATOMIC_RELAXED) == 1);
1268 * Verify the Tx Queue list is empty
1271 * Pointer to Ethernet device.
1274 * The number of object not released.
1277 mlx5_txq_verify(struct rte_eth_dev *dev)
1279 struct mlx5_priv *priv = dev->data->dev_private;
1280 struct mlx5_txq_ctrl *txq_ctrl;
1283 LIST_FOREACH(txq_ctrl, &priv->txqsctrl, next) {
1284 DRV_LOG(DEBUG, "port %u Tx queue %u still referenced",
1285 dev->data->port_id, txq_ctrl->txq.idx);
1292 * Set the Tx queue dynamic timestamp (mask and offset)
1295 * Pointer to the Ethernet device structure.
1298 mlx5_txq_dynf_timestamp_set(struct rte_eth_dev *dev)
1300 struct mlx5_priv *priv = dev->data->dev_private;
1301 struct mlx5_dev_ctx_shared *sh = priv->sh;
1302 struct mlx5_txq_data *data;
1307 nbit = rte_mbuf_dynflag_lookup
1308 (RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME, NULL);
1309 off = rte_mbuf_dynfield_lookup
1310 (RTE_MBUF_DYNFIELD_TIMESTAMP_NAME, NULL);
1311 if (nbit >= 0 && off >= 0 && sh->txpp.refcnt)
1312 mask = 1ULL << nbit;
1313 for (i = 0; i != priv->txqs_n; ++i) {
1314 data = (*priv->txqs)[i];
1318 data->ts_mask = mask;
1319 data->ts_offset = off;