1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
14 #include <rte_malloc.h>
15 #include <ethdev_driver.h>
16 #include <rte_bus_pci.h>
17 #include <rte_common.h>
18 #include <rte_eal_paging.h>
20 #include <mlx5_common.h>
21 #include <mlx5_common_mr.h>
22 #include <mlx5_malloc.h>
24 #include "mlx5_defs.h"
25 #include "mlx5_utils.h"
28 #include "mlx5_rxtx.h"
29 #include "mlx5_autoconf.h"
32 * Allocate TX queue elements.
35 * Pointer to TX queue structure.
38 txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl)
40 const unsigned int elts_n = 1 << txq_ctrl->txq.elts_n;
43 for (i = 0; (i != elts_n); ++i)
44 txq_ctrl->txq.elts[i] = NULL;
45 DRV_LOG(DEBUG, "port %u Tx queue %u allocated and configured %u WRs",
46 PORT_ID(txq_ctrl->priv), txq_ctrl->txq.idx, elts_n);
47 txq_ctrl->txq.elts_head = 0;
48 txq_ctrl->txq.elts_tail = 0;
49 txq_ctrl->txq.elts_comp = 0;
53 * Free TX queue elements.
56 * Pointer to TX queue structure.
59 txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl)
61 const uint16_t elts_n = 1 << txq_ctrl->txq.elts_n;
62 const uint16_t elts_m = elts_n - 1;
63 uint16_t elts_head = txq_ctrl->txq.elts_head;
64 uint16_t elts_tail = txq_ctrl->txq.elts_tail;
65 struct rte_mbuf *(*elts)[elts_n] = &txq_ctrl->txq.elts;
67 DRV_LOG(DEBUG, "port %u Tx queue %u freeing WRs",
68 PORT_ID(txq_ctrl->priv), txq_ctrl->txq.idx);
69 txq_ctrl->txq.elts_head = 0;
70 txq_ctrl->txq.elts_tail = 0;
71 txq_ctrl->txq.elts_comp = 0;
73 while (elts_tail != elts_head) {
74 struct rte_mbuf *elt = (*elts)[elts_tail & elts_m];
76 MLX5_ASSERT(elt != NULL);
77 rte_pktmbuf_free_seg(elt);
78 #ifdef RTE_LIBRTE_MLX5_DEBUG
80 memset(&(*elts)[elts_tail & elts_m],
82 sizeof((*elts)[elts_tail & elts_m]));
89 * Returns the per-port supported offloads.
92 * Pointer to Ethernet device.
95 * Supported Tx offloads.
98 mlx5_get_tx_port_offloads(struct rte_eth_dev *dev)
100 struct mlx5_priv *priv = dev->data->dev_private;
101 uint64_t offloads = (DEV_TX_OFFLOAD_MULTI_SEGS |
102 DEV_TX_OFFLOAD_VLAN_INSERT);
103 struct mlx5_dev_config *config = &priv->config;
106 offloads |= (DEV_TX_OFFLOAD_IPV4_CKSUM |
107 DEV_TX_OFFLOAD_UDP_CKSUM |
108 DEV_TX_OFFLOAD_TCP_CKSUM);
110 offloads |= DEV_TX_OFFLOAD_TCP_TSO;
112 offloads |= DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP;
114 if (config->swp & MLX5_SW_PARSING_CSUM_CAP)
115 offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
116 if (config->swp & MLX5_SW_PARSING_TSO_CAP)
117 offloads |= (DEV_TX_OFFLOAD_IP_TNL_TSO |
118 DEV_TX_OFFLOAD_UDP_TNL_TSO);
120 if (config->tunnel_en) {
122 offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
124 offloads |= (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
125 DEV_TX_OFFLOAD_GRE_TNL_TSO |
126 DEV_TX_OFFLOAD_GENEVE_TNL_TSO);
128 if (!config->mprq.enabled)
129 offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
133 /* Fetches and drops all SW-owned and error CQEs to synchronize CQ. */
135 txq_sync_cq(struct mlx5_txq_data *txq)
137 volatile struct mlx5_cqe *cqe;
142 cqe = &txq->cqes[txq->cq_ci & txq->cqe_m];
143 ret = check_cqe(cqe, txq->cqe_s, txq->cq_ci);
144 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
145 if (likely(ret != MLX5_CQE_STATUS_ERR)) {
146 /* No new CQEs in completion queue. */
147 MLX5_ASSERT(ret == MLX5_CQE_STATUS_HW_OWN);
153 /* Move all CQEs to HW ownership. */
154 for (i = 0; i < txq->cqe_s; i++) {
156 cqe->op_own = MLX5_CQE_INVALIDATE;
158 /* Resync CQE and WQE (WQ in reset state). */
160 *txq->cq_db = rte_cpu_to_be_32(txq->cq_ci);
161 txq->cq_pi = txq->cq_ci;
166 * Tx queue stop. Device queue goes to the idle state,
167 * all involved mbufs are freed from elts/WQ.
170 * Pointer to Ethernet device structure.
175 * 0 on success, a negative errno value otherwise and rte_errno is set.
178 mlx5_tx_queue_stop_primary(struct rte_eth_dev *dev, uint16_t idx)
180 struct mlx5_priv *priv = dev->data->dev_private;
181 struct mlx5_txq_data *txq = (*priv->txqs)[idx];
182 struct mlx5_txq_ctrl *txq_ctrl =
183 container_of(txq, struct mlx5_txq_ctrl, txq);
186 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
187 /* Move QP to RESET state. */
188 ret = priv->obj_ops.txq_obj_modify(txq_ctrl->obj, MLX5_TXQ_MOD_RDY2RST,
189 (uint8_t)priv->dev_port);
192 /* Handle all send completions. */
194 /* Free elts stored in the SQ. */
195 txq_free_elts(txq_ctrl);
196 /* Prevent writing new pkts to SQ by setting no free WQE.*/
197 txq->wqe_ci = txq->wqe_s;
200 /* Set the actual queue state. */
201 dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
206 * Tx queue stop. Device queue goes to the idle state,
207 * all involved mbufs are freed from elts/WQ.
210 * Pointer to Ethernet device structure.
215 * 0 on success, a negative errno value otherwise and rte_errno is set.
218 mlx5_tx_queue_stop(struct rte_eth_dev *dev, uint16_t idx)
222 if (rte_eth_dev_is_tx_hairpin_queue(dev, idx)) {
223 DRV_LOG(ERR, "Hairpin queue can't be stopped");
227 if (dev->data->tx_queue_state[idx] == RTE_ETH_QUEUE_STATE_STOPPED)
229 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
230 ret = mlx5_mp_os_req_queue_control(dev, idx,
231 MLX5_MP_REQ_QUEUE_TX_STOP);
233 ret = mlx5_tx_queue_stop_primary(dev, idx);
239 * Rx queue start. Device queue goes to the ready state,
240 * all required mbufs are allocated and WQ is replenished.
243 * Pointer to Ethernet device structure.
248 * 0 on success, a negative errno value otherwise and rte_errno is set.
251 mlx5_tx_queue_start_primary(struct rte_eth_dev *dev, uint16_t idx)
253 struct mlx5_priv *priv = dev->data->dev_private;
254 struct mlx5_txq_data *txq = (*priv->txqs)[idx];
255 struct mlx5_txq_ctrl *txq_ctrl =
256 container_of(txq, struct mlx5_txq_ctrl, txq);
259 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
260 ret = priv->obj_ops.txq_obj_modify(txq_ctrl->obj,
261 MLX5_TXQ_MOD_RST2RDY,
262 (uint8_t)priv->dev_port);
265 txq_ctrl->txq.wqe_ci = 0;
266 txq_ctrl->txq.wqe_pi = 0;
267 txq_ctrl->txq.elts_comp = 0;
268 /* Set the actual queue state. */
269 dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
274 * Rx queue start. Device queue goes to the ready state,
275 * all required mbufs are allocated and WQ is replenished.
278 * Pointer to Ethernet device structure.
283 * 0 on success, a negative errno value otherwise and rte_errno is set.
286 mlx5_tx_queue_start(struct rte_eth_dev *dev, uint16_t idx)
290 if (rte_eth_dev_is_tx_hairpin_queue(dev, idx)) {
291 DRV_LOG(ERR, "Hairpin queue can't be started");
295 if (dev->data->tx_queue_state[idx] == RTE_ETH_QUEUE_STATE_STARTED)
297 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
298 ret = mlx5_mp_os_req_queue_control(dev, idx,
299 MLX5_MP_REQ_QUEUE_TX_START);
301 ret = mlx5_tx_queue_start_primary(dev, idx);
307 * Tx queue presetup checks.
310 * Pointer to Ethernet device structure.
314 * Number of descriptors to configure in queue.
317 * 0 on success, a negative errno value otherwise and rte_errno is set.
320 mlx5_tx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t *desc)
322 struct mlx5_priv *priv = dev->data->dev_private;
324 if (*desc <= MLX5_TX_COMP_THRESH) {
326 "port %u number of descriptors requested for Tx queue"
327 " %u must be higher than MLX5_TX_COMP_THRESH, using %u"
328 " instead of %u", dev->data->port_id, idx,
329 MLX5_TX_COMP_THRESH + 1, *desc);
330 *desc = MLX5_TX_COMP_THRESH + 1;
332 if (!rte_is_power_of_2(*desc)) {
333 *desc = 1 << log2above(*desc);
335 "port %u increased number of descriptors in Tx queue"
336 " %u to the next power of two (%d)",
337 dev->data->port_id, idx, *desc);
339 DRV_LOG(DEBUG, "port %u configuring queue %u for %u descriptors",
340 dev->data->port_id, idx, *desc);
341 if (idx >= priv->txqs_n) {
342 DRV_LOG(ERR, "port %u Tx queue index out of range (%u >= %u)",
343 dev->data->port_id, idx, priv->txqs_n);
344 rte_errno = EOVERFLOW;
347 if (!mlx5_txq_releasable(dev, idx)) {
349 DRV_LOG(ERR, "port %u unable to release queue index %u",
350 dev->data->port_id, idx);
353 mlx5_txq_release(dev, idx);
358 * DPDK callback to configure a TX queue.
361 * Pointer to Ethernet device structure.
365 * Number of descriptors to configure in queue.
367 * NUMA socket on which memory must be allocated.
369 * Thresholds parameters.
372 * 0 on success, a negative errno value otherwise and rte_errno is set.
375 mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
376 unsigned int socket, const struct rte_eth_txconf *conf)
378 struct mlx5_priv *priv = dev->data->dev_private;
379 struct mlx5_txq_data *txq = (*priv->txqs)[idx];
380 struct mlx5_txq_ctrl *txq_ctrl =
381 container_of(txq, struct mlx5_txq_ctrl, txq);
384 res = mlx5_tx_queue_pre_setup(dev, idx, &desc);
387 txq_ctrl = mlx5_txq_new(dev, idx, desc, socket, conf);
389 DRV_LOG(ERR, "port %u unable to allocate queue index %u",
390 dev->data->port_id, idx);
393 DRV_LOG(DEBUG, "port %u adding Tx queue %u to list",
394 dev->data->port_id, idx);
395 (*priv->txqs)[idx] = &txq_ctrl->txq;
400 * DPDK callback to configure a TX hairpin queue.
403 * Pointer to Ethernet device structure.
407 * Number of descriptors to configure in queue.
408 * @param[in] hairpin_conf
409 * The hairpin binding configuration.
412 * 0 on success, a negative errno value otherwise and rte_errno is set.
415 mlx5_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
417 const struct rte_eth_hairpin_conf *hairpin_conf)
419 struct mlx5_priv *priv = dev->data->dev_private;
420 struct mlx5_txq_data *txq = (*priv->txqs)[idx];
421 struct mlx5_txq_ctrl *txq_ctrl =
422 container_of(txq, struct mlx5_txq_ctrl, txq);
425 res = mlx5_tx_queue_pre_setup(dev, idx, &desc);
428 if (hairpin_conf->peer_count != 1) {
430 DRV_LOG(ERR, "port %u unable to setup Tx hairpin queue index %u"
431 " peer count is %u", dev->data->port_id,
432 idx, hairpin_conf->peer_count);
435 if (hairpin_conf->peers[0].port == dev->data->port_id) {
436 if (hairpin_conf->peers[0].queue >= priv->rxqs_n) {
438 DRV_LOG(ERR, "port %u unable to setup Tx hairpin queue"
439 " index %u, Rx %u is larger than %u",
440 dev->data->port_id, idx,
441 hairpin_conf->peers[0].queue, priv->txqs_n);
445 if (hairpin_conf->manual_bind == 0 ||
446 hairpin_conf->tx_explicit == 0) {
448 DRV_LOG(ERR, "port %u unable to setup Tx hairpin queue"
449 " index %u peer port %u with attributes %u %u",
450 dev->data->port_id, idx,
451 hairpin_conf->peers[0].port,
452 hairpin_conf->manual_bind,
453 hairpin_conf->tx_explicit);
457 txq_ctrl = mlx5_txq_hairpin_new(dev, idx, desc, hairpin_conf);
459 DRV_LOG(ERR, "port %u unable to allocate queue index %u",
460 dev->data->port_id, idx);
463 DRV_LOG(DEBUG, "port %u adding Tx queue %u to list",
464 dev->data->port_id, idx);
465 (*priv->txqs)[idx] = &txq_ctrl->txq;
466 dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_HAIRPIN;
471 * DPDK callback to release a TX queue.
474 * Pointer to Ethernet device structure.
476 * Transmit queue index.
479 mlx5_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
481 struct mlx5_txq_data *txq = dev->data->tx_queues[qid];
485 DRV_LOG(DEBUG, "port %u removing Tx queue %u from list",
486 dev->data->port_id, qid);
487 mlx5_txq_release(dev, qid);
491 * Configure the doorbell register non-cached attribute.
494 * Pointer to Tx queue control structure.
499 txq_uar_ncattr_init(struct mlx5_txq_ctrl *txq_ctrl, size_t page_size)
501 struct mlx5_priv *priv = txq_ctrl->priv;
504 txq_ctrl->txq.db_heu = priv->config.dbnc == MLX5_TXDB_HEURISTIC;
505 txq_ctrl->txq.db_nc = 0;
506 /* Check the doorbell register mapping type. */
507 cmd = txq_ctrl->uar_mmap_offset / page_size;
508 cmd >>= MLX5_UAR_MMAP_CMD_SHIFT;
509 cmd &= MLX5_UAR_MMAP_CMD_MASK;
510 if (cmd == MLX5_MMAP_GET_NC_PAGES_CMD)
511 txq_ctrl->txq.db_nc = 1;
515 * Initialize Tx UAR registers for primary process.
518 * Pointer to Tx queue control structure.
521 txq_uar_init(struct mlx5_txq_ctrl *txq_ctrl)
523 struct mlx5_priv *priv = txq_ctrl->priv;
524 struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(priv));
526 unsigned int lock_idx;
528 const size_t page_size = rte_mem_page_size();
529 if (page_size == (size_t)-1) {
530 DRV_LOG(ERR, "Failed to get mem page size");
534 if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
536 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
538 ppriv->uar_table[txq_ctrl->txq.idx] = txq_ctrl->bf_reg;
539 txq_uar_ncattr_init(txq_ctrl, page_size);
541 /* Assign an UAR lock according to UAR page number */
542 lock_idx = (txq_ctrl->uar_mmap_offset / page_size) &
543 MLX5_UAR_PAGE_NUM_MASK;
544 txq_ctrl->txq.uar_lock = &priv->sh->uar_lock[lock_idx];
549 * Remap UAR register of a Tx queue for secondary process.
551 * Remapped address is stored at the table in the process private structure of
552 * the device, indexed by queue index.
555 * Pointer to Tx queue control structure.
557 * Verbs file descriptor to map UAR pages.
560 * 0 on success, a negative errno value otherwise and rte_errno is set.
563 txq_uar_init_secondary(struct mlx5_txq_ctrl *txq_ctrl, int fd)
565 struct mlx5_priv *priv = txq_ctrl->priv;
566 struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(priv));
567 struct mlx5_txq_data *txq = &txq_ctrl->txq;
571 const size_t page_size = rte_mem_page_size();
572 if (page_size == (size_t)-1) {
573 DRV_LOG(ERR, "Failed to get mem page size");
578 if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
582 * As rdma-core, UARs are mapped in size of OS page
583 * size. Ref to libmlx5 function: mlx5_init_context()
585 uar_va = (uintptr_t)txq_ctrl->bf_reg;
586 offset = uar_va & (page_size - 1); /* Offset in page. */
587 addr = rte_mem_map(NULL, page_size, RTE_PROT_WRITE, RTE_MAP_SHARED,
588 fd, txq_ctrl->uar_mmap_offset);
591 "port %u mmap failed for BF reg of txq %u",
592 txq->port_id, txq->idx);
596 addr = RTE_PTR_ADD(addr, offset);
597 ppriv->uar_table[txq->idx] = addr;
598 txq_uar_ncattr_init(txq_ctrl, page_size);
603 * Unmap UAR register of a Tx queue for secondary process.
606 * Pointer to Tx queue control structure.
609 txq_uar_uninit_secondary(struct mlx5_txq_ctrl *txq_ctrl)
611 struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(txq_ctrl->priv));
613 const size_t page_size = rte_mem_page_size();
614 if (page_size == (size_t)-1) {
615 DRV_LOG(ERR, "Failed to get mem page size");
619 if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
621 addr = ppriv->uar_table[txq_ctrl->txq.idx];
622 rte_mem_unmap(RTE_PTR_ALIGN_FLOOR(addr, page_size), page_size);
626 * Deinitialize Tx UAR registers for secondary process.
629 * Pointer to Ethernet device.
632 mlx5_tx_uar_uninit_secondary(struct rte_eth_dev *dev)
634 struct mlx5_proc_priv *ppriv = (struct mlx5_proc_priv *)
635 dev->process_private;
636 const size_t page_size = rte_mem_page_size();
640 if (page_size == (size_t)-1) {
641 DRV_LOG(ERR, "Failed to get mem page size");
644 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
645 for (i = 0; i != ppriv->uar_table_sz; ++i) {
646 if (!ppriv->uar_table[i])
648 addr = ppriv->uar_table[i];
649 rte_mem_unmap(RTE_PTR_ALIGN_FLOOR(addr, page_size), page_size);
655 * Initialize Tx UAR registers for secondary process.
658 * Pointer to Ethernet device.
660 * Verbs file descriptor to map UAR pages.
663 * 0 on success, a negative errno value otherwise and rte_errno is set.
666 mlx5_tx_uar_init_secondary(struct rte_eth_dev *dev, int fd)
668 struct mlx5_priv *priv = dev->data->dev_private;
669 struct mlx5_txq_data *txq;
670 struct mlx5_txq_ctrl *txq_ctrl;
674 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
675 for (i = 0; i != priv->txqs_n; ++i) {
676 if (!(*priv->txqs)[i])
678 txq = (*priv->txqs)[i];
679 txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
680 if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
682 MLX5_ASSERT(txq->idx == (uint16_t)i);
683 ret = txq_uar_init_secondary(txq_ctrl, fd);
691 if (!(*priv->txqs)[i])
693 txq = (*priv->txqs)[i];
694 txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
695 txq_uar_uninit_secondary(txq_ctrl);
701 * Verify the Verbs Tx queue list is empty
704 * Pointer to Ethernet device.
707 * The number of object not released.
710 mlx5_txq_obj_verify(struct rte_eth_dev *dev)
712 struct mlx5_priv *priv = dev->data->dev_private;
714 struct mlx5_txq_obj *txq_obj;
716 LIST_FOREACH(txq_obj, &priv->txqsobj, next) {
717 DRV_LOG(DEBUG, "port %u Verbs Tx queue %u still referenced",
718 dev->data->port_id, txq_obj->txq_ctrl->txq.idx);
725 * Calculate the total number of WQEBB for Tx queue.
727 * Simplified version of calc_sq_size() in rdma-core.
730 * Pointer to Tx queue control structure.
733 * The number of WQEBB.
736 txq_calc_wqebb_cnt(struct mlx5_txq_ctrl *txq_ctrl)
738 unsigned int wqe_size;
739 const unsigned int desc = 1 << txq_ctrl->txq.elts_n;
741 wqe_size = MLX5_WQE_CSEG_SIZE +
744 MLX5_ESEG_MIN_INLINE_SIZE +
745 txq_ctrl->max_inline_data;
746 return rte_align32pow2(wqe_size * desc) / MLX5_WQE_SIZE;
750 * Calculate the maximal inline data size for Tx queue.
753 * Pointer to Tx queue control structure.
756 * The maximal inline data size.
759 txq_calc_inline_max(struct mlx5_txq_ctrl *txq_ctrl)
761 const unsigned int desc = 1 << txq_ctrl->txq.elts_n;
762 struct mlx5_priv *priv = txq_ctrl->priv;
763 unsigned int wqe_size;
765 wqe_size = priv->sh->device_attr.max_qp_wr / desc;
769 * This calculation is derived from tthe source of
770 * mlx5_calc_send_wqe() in rdma_core library.
772 wqe_size = wqe_size * MLX5_WQE_SIZE -
777 MLX5_DSEG_MIN_INLINE_SIZE;
782 * Set Tx queue parameters from device configuration.
785 * Pointer to Tx queue control structure.
788 txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
790 struct mlx5_priv *priv = txq_ctrl->priv;
791 struct mlx5_dev_config *config = &priv->config;
792 unsigned int inlen_send; /* Inline data for ordinary SEND.*/
793 unsigned int inlen_empw; /* Inline data for enhanced MPW. */
794 unsigned int inlen_mode; /* Minimal required Inline data. */
795 unsigned int txqs_inline; /* Min Tx queues to enable inline. */
796 uint64_t dev_txoff = priv->dev_data->dev_conf.txmode.offloads;
797 bool tso = txq_ctrl->txq.offloads & (DEV_TX_OFFLOAD_TCP_TSO |
798 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
799 DEV_TX_OFFLOAD_GRE_TNL_TSO |
800 DEV_TX_OFFLOAD_IP_TNL_TSO |
801 DEV_TX_OFFLOAD_UDP_TNL_TSO);
805 txq_ctrl->txq.fast_free =
806 !!((txq_ctrl->txq.offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) &&
807 !(txq_ctrl->txq.offloads & DEV_TX_OFFLOAD_MULTI_SEGS) &&
808 !config->mprq.enabled);
809 if (config->txqs_inline == MLX5_ARG_UNSET)
811 #if defined(RTE_ARCH_ARM64)
812 (priv->pci_dev && priv->pci_dev->id.device_id ==
813 PCI_DEVICE_ID_MELLANOX_CONNECTX5BF) ?
814 MLX5_INLINE_MAX_TXQS_BLUEFIELD :
816 MLX5_INLINE_MAX_TXQS;
818 txqs_inline = (unsigned int)config->txqs_inline;
819 inlen_send = (config->txq_inline_max == MLX5_ARG_UNSET) ?
820 MLX5_SEND_DEF_INLINE_LEN :
821 (unsigned int)config->txq_inline_max;
822 inlen_empw = (config->txq_inline_mpw == MLX5_ARG_UNSET) ?
823 MLX5_EMPW_DEF_INLINE_LEN :
824 (unsigned int)config->txq_inline_mpw;
825 inlen_mode = (config->txq_inline_min == MLX5_ARG_UNSET) ?
826 0 : (unsigned int)config->txq_inline_min;
827 if (config->mps != MLX5_MPW_ENHANCED && config->mps != MLX5_MPW)
830 * If there is requested minimal amount of data to inline
831 * we MUST enable inlining. This is a case for ConnectX-4
832 * which usually requires L2 inlined for correct operating
833 * and ConnectX-4 Lx which requires L2-L4 inlined to
834 * support E-Switch Flows.
837 if (inlen_mode <= MLX5_ESEG_MIN_INLINE_SIZE) {
839 * Optimize minimal inlining for single
840 * segment packets to fill one WQEBB
843 temp = MLX5_ESEG_MIN_INLINE_SIZE;
845 temp = inlen_mode - MLX5_ESEG_MIN_INLINE_SIZE;
846 temp = RTE_ALIGN(temp, MLX5_WSEG_SIZE) +
847 MLX5_ESEG_MIN_INLINE_SIZE;
848 temp = RTE_MIN(temp, MLX5_SEND_MAX_INLINE_LEN);
850 if (temp != inlen_mode) {
852 "port %u minimal required inline setting"
853 " aligned from %u to %u",
854 PORT_ID(priv), inlen_mode, temp);
859 * If port is configured to support VLAN insertion and device
860 * does not support this feature by HW (for NICs before ConnectX-5
861 * or in case of wqe_vlan_insert flag is not set) we must enable
862 * data inline on all queues because it is supported by single
865 txq_ctrl->txq.vlan_en = config->hw_vlan_insert;
866 vlan_inline = (dev_txoff & DEV_TX_OFFLOAD_VLAN_INSERT) &&
867 !config->hw_vlan_insert;
869 * If there are few Tx queues it is prioritized
870 * to save CPU cycles and disable data inlining at all.
872 if (inlen_send && priv->txqs_n >= txqs_inline) {
874 * The data sent with ordinal MLX5_OPCODE_SEND
875 * may be inlined in Ethernet Segment, align the
876 * length accordingly to fit entire WQEBBs.
878 temp = RTE_MAX(inlen_send,
879 MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE);
880 temp -= MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE;
881 temp = RTE_ALIGN(temp, MLX5_WQE_SIZE);
882 temp += MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE;
883 temp = RTE_MIN(temp, MLX5_WQE_SIZE_MAX +
884 MLX5_ESEG_MIN_INLINE_SIZE -
887 MLX5_WQE_DSEG_SIZE * 2);
888 temp = RTE_MIN(temp, MLX5_SEND_MAX_INLINE_LEN);
889 temp = RTE_MAX(temp, inlen_mode);
890 if (temp != inlen_send) {
892 "port %u ordinary send inline setting"
893 " aligned from %u to %u",
894 PORT_ID(priv), inlen_send, temp);
898 * Not aligned to cache lines, but to WQEs.
899 * First bytes of data (initial alignment)
900 * is going to be copied explicitly at the
901 * beginning of inlining buffer in Ethernet
904 MLX5_ASSERT(inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
905 MLX5_ASSERT(inlen_send <= MLX5_WQE_SIZE_MAX +
906 MLX5_ESEG_MIN_INLINE_SIZE -
909 MLX5_WQE_DSEG_SIZE * 2);
910 } else if (inlen_mode) {
912 * If minimal inlining is requested we must
913 * enable inlining in general, despite the
914 * number of configured queues. Ignore the
915 * txq_inline_max devarg, this is not
916 * full-featured inline.
918 inlen_send = inlen_mode;
920 } else if (vlan_inline) {
922 * Hardware does not report offload for
923 * VLAN insertion, we must enable data inline
924 * to implement feature by software.
926 inlen_send = MLX5_ESEG_MIN_INLINE_SIZE;
932 txq_ctrl->txq.inlen_send = inlen_send;
933 txq_ctrl->txq.inlen_mode = inlen_mode;
934 txq_ctrl->txq.inlen_empw = 0;
935 if (inlen_send && inlen_empw && priv->txqs_n >= txqs_inline) {
937 * The data sent with MLX5_OPCODE_ENHANCED_MPSW
938 * may be inlined in Data Segment, align the
939 * length accordingly to fit entire WQEBBs.
941 temp = RTE_MAX(inlen_empw,
942 MLX5_WQE_SIZE + MLX5_DSEG_MIN_INLINE_SIZE);
943 temp -= MLX5_DSEG_MIN_INLINE_SIZE;
944 temp = RTE_ALIGN(temp, MLX5_WQE_SIZE);
945 temp += MLX5_DSEG_MIN_INLINE_SIZE;
946 temp = RTE_MIN(temp, MLX5_WQE_SIZE_MAX +
947 MLX5_DSEG_MIN_INLINE_SIZE -
951 temp = RTE_MIN(temp, MLX5_EMPW_MAX_INLINE_LEN);
952 if (temp != inlen_empw) {
954 "port %u enhanced empw inline setting"
955 " aligned from %u to %u",
956 PORT_ID(priv), inlen_empw, temp);
959 MLX5_ASSERT(inlen_empw >= MLX5_ESEG_MIN_INLINE_SIZE);
960 MLX5_ASSERT(inlen_empw <= MLX5_WQE_SIZE_MAX +
961 MLX5_DSEG_MIN_INLINE_SIZE -
965 txq_ctrl->txq.inlen_empw = inlen_empw;
967 txq_ctrl->max_inline_data = RTE_MAX(inlen_send, inlen_empw);
969 txq_ctrl->max_tso_header = MLX5_MAX_TSO_HEADER;
970 txq_ctrl->max_inline_data = RTE_MAX(txq_ctrl->max_inline_data,
971 MLX5_MAX_TSO_HEADER);
972 txq_ctrl->txq.tso_en = 1;
974 txq_ctrl->txq.tunnel_en = config->tunnel_en | config->swp;
975 txq_ctrl->txq.swp_en = (((DEV_TX_OFFLOAD_IP_TNL_TSO |
976 DEV_TX_OFFLOAD_UDP_TNL_TSO) &
977 txq_ctrl->txq.offloads) && (config->swp &
978 MLX5_SW_PARSING_TSO_CAP)) |
979 ((DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM &
980 txq_ctrl->txq.offloads) && (config->swp &
981 MLX5_SW_PARSING_CSUM_CAP));
985 * Adjust Tx queue data inline parameters for large queue sizes.
986 * The data inline feature requires multiple WQEs to fit the packets,
987 * and if the large amount of Tx descriptors is requested by application
988 * the total WQE amount may exceed the hardware capabilities. If the
989 * default inline setting are used we can try to adjust these ones and
990 * meet the hardware requirements and not exceed the queue size.
993 * Pointer to Tx queue control structure.
996 * Zero on success, otherwise the parameters can not be adjusted.
999 txq_adjust_params(struct mlx5_txq_ctrl *txq_ctrl)
1001 struct mlx5_priv *priv = txq_ctrl->priv;
1002 struct mlx5_dev_config *config = &priv->config;
1003 unsigned int max_inline;
1005 max_inline = txq_calc_inline_max(txq_ctrl);
1006 if (!txq_ctrl->txq.inlen_send) {
1008 * Inline data feature is not engaged at all.
1009 * There is nothing to adjust.
1013 if (txq_ctrl->max_inline_data <= max_inline) {
1015 * The requested inline data length does not
1016 * exceed queue capabilities.
1020 if (txq_ctrl->txq.inlen_mode > max_inline) {
1022 "minimal data inline requirements (%u) are not"
1023 " satisfied (%u) on port %u, try the smaller"
1024 " Tx queue size (%d)",
1025 txq_ctrl->txq.inlen_mode, max_inline,
1026 priv->dev_data->port_id,
1027 priv->sh->device_attr.max_qp_wr);
1030 if (txq_ctrl->txq.inlen_send > max_inline &&
1031 config->txq_inline_max != MLX5_ARG_UNSET &&
1032 config->txq_inline_max > (int)max_inline) {
1034 "txq_inline_max requirements (%u) are not"
1035 " satisfied (%u) on port %u, try the smaller"
1036 " Tx queue size (%d)",
1037 txq_ctrl->txq.inlen_send, max_inline,
1038 priv->dev_data->port_id,
1039 priv->sh->device_attr.max_qp_wr);
1042 if (txq_ctrl->txq.inlen_empw > max_inline &&
1043 config->txq_inline_mpw != MLX5_ARG_UNSET &&
1044 config->txq_inline_mpw > (int)max_inline) {
1046 "txq_inline_mpw requirements (%u) are not"
1047 " satisfied (%u) on port %u, try the smaller"
1048 " Tx queue size (%d)",
1049 txq_ctrl->txq.inlen_empw, max_inline,
1050 priv->dev_data->port_id,
1051 priv->sh->device_attr.max_qp_wr);
1054 if (txq_ctrl->txq.tso_en && max_inline < MLX5_MAX_TSO_HEADER) {
1056 "tso header inline requirements (%u) are not"
1057 " satisfied (%u) on port %u, try the smaller"
1058 " Tx queue size (%d)",
1059 MLX5_MAX_TSO_HEADER, max_inline,
1060 priv->dev_data->port_id,
1061 priv->sh->device_attr.max_qp_wr);
1064 if (txq_ctrl->txq.inlen_send > max_inline) {
1066 "adjust txq_inline_max (%u->%u)"
1067 " due to large Tx queue on port %u",
1068 txq_ctrl->txq.inlen_send, max_inline,
1069 priv->dev_data->port_id);
1070 txq_ctrl->txq.inlen_send = max_inline;
1072 if (txq_ctrl->txq.inlen_empw > max_inline) {
1074 "adjust txq_inline_mpw (%u->%u)"
1075 "due to large Tx queue on port %u",
1076 txq_ctrl->txq.inlen_empw, max_inline,
1077 priv->dev_data->port_id);
1078 txq_ctrl->txq.inlen_empw = max_inline;
1080 txq_ctrl->max_inline_data = RTE_MAX(txq_ctrl->txq.inlen_send,
1081 txq_ctrl->txq.inlen_empw);
1082 MLX5_ASSERT(txq_ctrl->max_inline_data <= max_inline);
1083 MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= max_inline);
1084 MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= txq_ctrl->txq.inlen_send);
1085 MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= txq_ctrl->txq.inlen_empw ||
1086 !txq_ctrl->txq.inlen_empw);
1094 * Create a DPDK Tx queue.
1097 * Pointer to Ethernet device.
1101 * Number of descriptors to configure in queue.
1103 * NUMA socket on which memory must be allocated.
1105 * Thresholds parameters.
1108 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
1110 struct mlx5_txq_ctrl *
1111 mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1112 unsigned int socket, const struct rte_eth_txconf *conf)
1114 struct mlx5_priv *priv = dev->data->dev_private;
1115 struct mlx5_txq_ctrl *tmpl;
1117 tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl) +
1118 desc * sizeof(struct rte_mbuf *), 0, socket);
1123 if (mlx5_mr_btree_init(&tmpl->txq.mr_ctrl.cache_bh,
1124 MLX5_MR_BTREE_CACHE_N, socket)) {
1125 /* rte_errno is already set. */
1128 /* Save pointer of global generation number to check memory event. */
1129 tmpl->txq.mr_ctrl.dev_gen_ptr = &priv->sh->share_cache.dev_gen;
1130 MLX5_ASSERT(desc > MLX5_TX_COMP_THRESH);
1131 tmpl->txq.offloads = conf->offloads |
1132 dev->data->dev_conf.txmode.offloads;
1134 tmpl->socket = socket;
1135 tmpl->txq.elts_n = log2above(desc);
1136 tmpl->txq.elts_s = desc;
1137 tmpl->txq.elts_m = desc - 1;
1138 tmpl->txq.port_id = dev->data->port_id;
1139 tmpl->txq.idx = idx;
1140 txq_set_params(tmpl);
1141 if (txq_adjust_params(tmpl))
1143 if (txq_calc_wqebb_cnt(tmpl) >
1144 priv->sh->device_attr.max_qp_wr) {
1146 "port %u Tx WQEBB count (%d) exceeds the limit (%d),"
1147 " try smaller queue size",
1148 dev->data->port_id, txq_calc_wqebb_cnt(tmpl),
1149 priv->sh->device_attr.max_qp_wr);
1153 __atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
1154 tmpl->type = MLX5_TXQ_TYPE_STANDARD;
1155 LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
1158 mlx5_mr_btree_free(&tmpl->txq.mr_ctrl.cache_bh);
1164 * Create a DPDK Tx hairpin queue.
1167 * Pointer to Ethernet device.
1171 * Number of descriptors to configure in queue.
1172 * @param hairpin_conf
1173 * The hairpin configuration.
1176 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
1178 struct mlx5_txq_ctrl *
1179 mlx5_txq_hairpin_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1180 const struct rte_eth_hairpin_conf *hairpin_conf)
1182 struct mlx5_priv *priv = dev->data->dev_private;
1183 struct mlx5_txq_ctrl *tmpl;
1185 tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
1192 tmpl->socket = SOCKET_ID_ANY;
1193 tmpl->txq.elts_n = log2above(desc);
1194 tmpl->txq.port_id = dev->data->port_id;
1195 tmpl->txq.idx = idx;
1196 tmpl->hairpin_conf = *hairpin_conf;
1197 tmpl->type = MLX5_TXQ_TYPE_HAIRPIN;
1198 __atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
1199 LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
1207 * Pointer to Ethernet device.
1212 * A pointer to the queue if it exists.
1214 struct mlx5_txq_ctrl *
1215 mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx)
1217 struct mlx5_priv *priv = dev->data->dev_private;
1218 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
1219 struct mlx5_txq_ctrl *ctrl = NULL;
1222 ctrl = container_of(txq_data, struct mlx5_txq_ctrl, txq);
1223 __atomic_fetch_add(&ctrl->refcnt, 1, __ATOMIC_RELAXED);
1229 * Release a Tx queue.
1232 * Pointer to Ethernet device.
1237 * 1 while a reference on it exists, 0 when freed.
1240 mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx)
1242 struct mlx5_priv *priv = dev->data->dev_private;
1243 struct mlx5_txq_ctrl *txq_ctrl;
1245 if (priv->txqs == NULL || (*priv->txqs)[idx] == NULL)
1247 txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
1248 if (__atomic_sub_fetch(&txq_ctrl->refcnt, 1, __ATOMIC_RELAXED) > 1)
1250 if (txq_ctrl->obj) {
1251 priv->obj_ops.txq_obj_release(txq_ctrl->obj);
1252 LIST_REMOVE(txq_ctrl->obj, next);
1253 mlx5_free(txq_ctrl->obj);
1254 txq_ctrl->obj = NULL;
1256 if (txq_ctrl->type == MLX5_TXQ_TYPE_STANDARD) {
1257 if (txq_ctrl->txq.fcqs) {
1258 mlx5_free(txq_ctrl->txq.fcqs);
1259 txq_ctrl->txq.fcqs = NULL;
1261 txq_free_elts(txq_ctrl);
1262 dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
1264 if (!__atomic_load_n(&txq_ctrl->refcnt, __ATOMIC_RELAXED)) {
1265 if (txq_ctrl->type == MLX5_TXQ_TYPE_STANDARD)
1266 mlx5_mr_btree_free(&txq_ctrl->txq.mr_ctrl.cache_bh);
1267 LIST_REMOVE(txq_ctrl, next);
1268 mlx5_free(txq_ctrl);
1269 (*priv->txqs)[idx] = NULL;
1275 * Verify if the queue can be released.
1278 * Pointer to Ethernet device.
1283 * 1 if the queue can be released.
1286 mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx)
1288 struct mlx5_priv *priv = dev->data->dev_private;
1289 struct mlx5_txq_ctrl *txq;
1291 if (!(*priv->txqs)[idx])
1293 txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
1294 return (__atomic_load_n(&txq->refcnt, __ATOMIC_RELAXED) == 1);
1298 * Verify the Tx Queue list is empty
1301 * Pointer to Ethernet device.
1304 * The number of object not released.
1307 mlx5_txq_verify(struct rte_eth_dev *dev)
1309 struct mlx5_priv *priv = dev->data->dev_private;
1310 struct mlx5_txq_ctrl *txq_ctrl;
1313 LIST_FOREACH(txq_ctrl, &priv->txqsctrl, next) {
1314 DRV_LOG(DEBUG, "port %u Tx queue %u still referenced",
1315 dev->data->port_id, txq_ctrl->txq.idx);
1322 * Set the Tx queue dynamic timestamp (mask and offset)
1325 * Pointer to the Ethernet device structure.
1328 mlx5_txq_dynf_timestamp_set(struct rte_eth_dev *dev)
1330 struct mlx5_priv *priv = dev->data->dev_private;
1331 struct mlx5_dev_ctx_shared *sh = priv->sh;
1332 struct mlx5_txq_data *data;
1337 nbit = rte_mbuf_dynflag_lookup
1338 (RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME, NULL);
1339 off = rte_mbuf_dynfield_lookup
1340 (RTE_MBUF_DYNFIELD_TIMESTAMP_NAME, NULL);
1341 if (nbit >= 0 && off >= 0 && sh->txpp.refcnt)
1342 mask = 1ULL << nbit;
1343 for (i = 0; i != priv->txqs_n; ++i) {
1344 data = (*priv->txqs)[i];
1348 data->ts_mask = mask;
1349 data->ts_offset = off;