1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
14 #include <rte_malloc.h>
15 #include <ethdev_driver.h>
16 #include <rte_bus_pci.h>
17 #include <rte_common.h>
18 #include <rte_eal_paging.h>
20 #include <mlx5_common.h>
21 #include <mlx5_common_mr.h>
22 #include <mlx5_malloc.h>
24 #include "mlx5_defs.h"
25 #include "mlx5_utils.h"
28 #include "mlx5_rxtx.h"
29 #include "mlx5_autoconf.h"
32 * Allocate TX queue elements.
35 * Pointer to TX queue structure.
38 txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl)
40 const unsigned int elts_n = 1 << txq_ctrl->txq.elts_n;
43 for (i = 0; (i != elts_n); ++i)
44 txq_ctrl->txq.elts[i] = NULL;
45 DRV_LOG(DEBUG, "port %u Tx queue %u allocated and configured %u WRs",
46 PORT_ID(txq_ctrl->priv), txq_ctrl->txq.idx, elts_n);
47 txq_ctrl->txq.elts_head = 0;
48 txq_ctrl->txq.elts_tail = 0;
49 txq_ctrl->txq.elts_comp = 0;
53 * Free TX queue elements.
56 * Pointer to TX queue structure.
59 txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl)
61 const uint16_t elts_n = 1 << txq_ctrl->txq.elts_n;
62 const uint16_t elts_m = elts_n - 1;
63 uint16_t elts_head = txq_ctrl->txq.elts_head;
64 uint16_t elts_tail = txq_ctrl->txq.elts_tail;
65 struct rte_mbuf *(*elts)[elts_n] = &txq_ctrl->txq.elts;
67 DRV_LOG(DEBUG, "port %u Tx queue %u freeing WRs",
68 PORT_ID(txq_ctrl->priv), txq_ctrl->txq.idx);
69 txq_ctrl->txq.elts_head = 0;
70 txq_ctrl->txq.elts_tail = 0;
71 txq_ctrl->txq.elts_comp = 0;
73 while (elts_tail != elts_head) {
74 struct rte_mbuf *elt = (*elts)[elts_tail & elts_m];
76 MLX5_ASSERT(elt != NULL);
77 rte_pktmbuf_free_seg(elt);
78 #ifdef RTE_LIBRTE_MLX5_DEBUG
80 memset(&(*elts)[elts_tail & elts_m],
82 sizeof((*elts)[elts_tail & elts_m]));
89 * Returns the per-port supported offloads.
92 * Pointer to Ethernet device.
95 * Supported Tx offloads.
98 mlx5_get_tx_port_offloads(struct rte_eth_dev *dev)
100 struct mlx5_priv *priv = dev->data->dev_private;
101 uint64_t offloads = (RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
102 RTE_ETH_TX_OFFLOAD_VLAN_INSERT);
103 struct mlx5_dev_config *config = &priv->config;
106 offloads |= (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
107 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
108 RTE_ETH_TX_OFFLOAD_TCP_CKSUM);
110 offloads |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
112 offloads |= RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP;
114 if (config->swp & MLX5_SW_PARSING_CSUM_CAP)
115 offloads |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM;
116 if (config->swp & MLX5_SW_PARSING_TSO_CAP)
117 offloads |= (RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
118 RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO);
120 if (config->tunnel_en) {
122 offloads |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM;
124 if (config->tunnel_en &
125 MLX5_TUNNELED_OFFLOADS_VXLAN_CAP)
126 offloads |= RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO;
127 if (config->tunnel_en &
128 MLX5_TUNNELED_OFFLOADS_GRE_CAP)
129 offloads |= RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO;
130 if (config->tunnel_en &
131 MLX5_TUNNELED_OFFLOADS_GENEVE_CAP)
132 offloads |= RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO;
135 if (!config->mprq.enabled)
136 offloads |= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
140 /* Fetches and drops all SW-owned and error CQEs to synchronize CQ. */
142 txq_sync_cq(struct mlx5_txq_data *txq)
144 volatile struct mlx5_cqe *cqe;
149 cqe = &txq->cqes[txq->cq_ci & txq->cqe_m];
150 ret = check_cqe(cqe, txq->cqe_s, txq->cq_ci);
151 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
152 if (likely(ret != MLX5_CQE_STATUS_ERR)) {
153 /* No new CQEs in completion queue. */
154 MLX5_ASSERT(ret == MLX5_CQE_STATUS_HW_OWN);
160 /* Move all CQEs to HW ownership. */
161 for (i = 0; i < txq->cqe_s; i++) {
163 cqe->op_own = MLX5_CQE_INVALIDATE;
165 /* Resync CQE and WQE (WQ in reset state). */
167 *txq->cq_db = rte_cpu_to_be_32(txq->cq_ci);
168 txq->cq_pi = txq->cq_ci;
173 * Tx queue stop. Device queue goes to the idle state,
174 * all involved mbufs are freed from elts/WQ.
177 * Pointer to Ethernet device structure.
182 * 0 on success, a negative errno value otherwise and rte_errno is set.
185 mlx5_tx_queue_stop_primary(struct rte_eth_dev *dev, uint16_t idx)
187 struct mlx5_priv *priv = dev->data->dev_private;
188 struct mlx5_txq_data *txq = (*priv->txqs)[idx];
189 struct mlx5_txq_ctrl *txq_ctrl =
190 container_of(txq, struct mlx5_txq_ctrl, txq);
193 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
194 /* Move QP to RESET state. */
195 ret = priv->obj_ops.txq_obj_modify(txq_ctrl->obj, MLX5_TXQ_MOD_RDY2RST,
196 (uint8_t)priv->dev_port);
199 /* Handle all send completions. */
201 /* Free elts stored in the SQ. */
202 txq_free_elts(txq_ctrl);
203 /* Prevent writing new pkts to SQ by setting no free WQE.*/
204 txq->wqe_ci = txq->wqe_s;
207 /* Set the actual queue state. */
208 dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
213 * Tx queue stop. Device queue goes to the idle state,
214 * all involved mbufs are freed from elts/WQ.
217 * Pointer to Ethernet device structure.
222 * 0 on success, a negative errno value otherwise and rte_errno is set.
225 mlx5_tx_queue_stop(struct rte_eth_dev *dev, uint16_t idx)
229 if (rte_eth_dev_is_tx_hairpin_queue(dev, idx)) {
230 DRV_LOG(ERR, "Hairpin queue can't be stopped");
234 if (dev->data->tx_queue_state[idx] == RTE_ETH_QUEUE_STATE_STOPPED)
236 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
237 ret = mlx5_mp_os_req_queue_control(dev, idx,
238 MLX5_MP_REQ_QUEUE_TX_STOP);
240 ret = mlx5_tx_queue_stop_primary(dev, idx);
246 * Rx queue start. Device queue goes to the ready state,
247 * all required mbufs are allocated and WQ is replenished.
250 * Pointer to Ethernet device structure.
255 * 0 on success, a negative errno value otherwise and rte_errno is set.
258 mlx5_tx_queue_start_primary(struct rte_eth_dev *dev, uint16_t idx)
260 struct mlx5_priv *priv = dev->data->dev_private;
261 struct mlx5_txq_data *txq = (*priv->txqs)[idx];
262 struct mlx5_txq_ctrl *txq_ctrl =
263 container_of(txq, struct mlx5_txq_ctrl, txq);
266 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
267 ret = priv->obj_ops.txq_obj_modify(txq_ctrl->obj,
268 MLX5_TXQ_MOD_RST2RDY,
269 (uint8_t)priv->dev_port);
272 txq_ctrl->txq.wqe_ci = 0;
273 txq_ctrl->txq.wqe_pi = 0;
274 txq_ctrl->txq.elts_comp = 0;
275 /* Set the actual queue state. */
276 dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
281 * Rx queue start. Device queue goes to the ready state,
282 * all required mbufs are allocated and WQ is replenished.
285 * Pointer to Ethernet device structure.
290 * 0 on success, a negative errno value otherwise and rte_errno is set.
293 mlx5_tx_queue_start(struct rte_eth_dev *dev, uint16_t idx)
297 if (rte_eth_dev_is_tx_hairpin_queue(dev, idx)) {
298 DRV_LOG(ERR, "Hairpin queue can't be started");
302 if (dev->data->tx_queue_state[idx] == RTE_ETH_QUEUE_STATE_STARTED)
304 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
305 ret = mlx5_mp_os_req_queue_control(dev, idx,
306 MLX5_MP_REQ_QUEUE_TX_START);
308 ret = mlx5_tx_queue_start_primary(dev, idx);
314 * Tx queue presetup checks.
317 * Pointer to Ethernet device structure.
321 * Number of descriptors to configure in queue.
324 * 0 on success, a negative errno value otherwise and rte_errno is set.
327 mlx5_tx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t *desc)
329 struct mlx5_priv *priv = dev->data->dev_private;
331 if (*desc <= MLX5_TX_COMP_THRESH) {
333 "port %u number of descriptors requested for Tx queue"
334 " %u must be higher than MLX5_TX_COMP_THRESH, using %u"
335 " instead of %u", dev->data->port_id, idx,
336 MLX5_TX_COMP_THRESH + 1, *desc);
337 *desc = MLX5_TX_COMP_THRESH + 1;
339 if (!rte_is_power_of_2(*desc)) {
340 *desc = 1 << log2above(*desc);
342 "port %u increased number of descriptors in Tx queue"
343 " %u to the next power of two (%d)",
344 dev->data->port_id, idx, *desc);
346 DRV_LOG(DEBUG, "port %u configuring queue %u for %u descriptors",
347 dev->data->port_id, idx, *desc);
348 if (idx >= priv->txqs_n) {
349 DRV_LOG(ERR, "port %u Tx queue index out of range (%u >= %u)",
350 dev->data->port_id, idx, priv->txqs_n);
351 rte_errno = EOVERFLOW;
354 if (!mlx5_txq_releasable(dev, idx)) {
356 DRV_LOG(ERR, "port %u unable to release queue index %u",
357 dev->data->port_id, idx);
360 mlx5_txq_release(dev, idx);
365 * DPDK callback to configure a TX queue.
368 * Pointer to Ethernet device structure.
372 * Number of descriptors to configure in queue.
374 * NUMA socket on which memory must be allocated.
376 * Thresholds parameters.
379 * 0 on success, a negative errno value otherwise and rte_errno is set.
382 mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
383 unsigned int socket, const struct rte_eth_txconf *conf)
385 struct mlx5_priv *priv = dev->data->dev_private;
386 struct mlx5_txq_data *txq = (*priv->txqs)[idx];
387 struct mlx5_txq_ctrl *txq_ctrl =
388 container_of(txq, struct mlx5_txq_ctrl, txq);
391 res = mlx5_tx_queue_pre_setup(dev, idx, &desc);
394 txq_ctrl = mlx5_txq_new(dev, idx, desc, socket, conf);
396 DRV_LOG(ERR, "port %u unable to allocate queue index %u",
397 dev->data->port_id, idx);
400 DRV_LOG(DEBUG, "port %u adding Tx queue %u to list",
401 dev->data->port_id, idx);
402 (*priv->txqs)[idx] = &txq_ctrl->txq;
407 * DPDK callback to configure a TX hairpin queue.
410 * Pointer to Ethernet device structure.
414 * Number of descriptors to configure in queue.
415 * @param[in] hairpin_conf
416 * The hairpin binding configuration.
419 * 0 on success, a negative errno value otherwise and rte_errno is set.
422 mlx5_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
424 const struct rte_eth_hairpin_conf *hairpin_conf)
426 struct mlx5_priv *priv = dev->data->dev_private;
427 struct mlx5_txq_data *txq = (*priv->txqs)[idx];
428 struct mlx5_txq_ctrl *txq_ctrl =
429 container_of(txq, struct mlx5_txq_ctrl, txq);
432 res = mlx5_tx_queue_pre_setup(dev, idx, &desc);
435 if (hairpin_conf->peer_count != 1) {
437 DRV_LOG(ERR, "port %u unable to setup Tx hairpin queue index %u"
438 " peer count is %u", dev->data->port_id,
439 idx, hairpin_conf->peer_count);
442 if (hairpin_conf->peers[0].port == dev->data->port_id) {
443 if (hairpin_conf->peers[0].queue >= priv->rxqs_n) {
445 DRV_LOG(ERR, "port %u unable to setup Tx hairpin queue"
446 " index %u, Rx %u is larger than %u",
447 dev->data->port_id, idx,
448 hairpin_conf->peers[0].queue, priv->txqs_n);
452 if (hairpin_conf->manual_bind == 0 ||
453 hairpin_conf->tx_explicit == 0) {
455 DRV_LOG(ERR, "port %u unable to setup Tx hairpin queue"
456 " index %u peer port %u with attributes %u %u",
457 dev->data->port_id, idx,
458 hairpin_conf->peers[0].port,
459 hairpin_conf->manual_bind,
460 hairpin_conf->tx_explicit);
464 txq_ctrl = mlx5_txq_hairpin_new(dev, idx, desc, hairpin_conf);
466 DRV_LOG(ERR, "port %u unable to allocate queue index %u",
467 dev->data->port_id, idx);
470 DRV_LOG(DEBUG, "port %u adding Tx queue %u to list",
471 dev->data->port_id, idx);
472 (*priv->txqs)[idx] = &txq_ctrl->txq;
473 dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_HAIRPIN;
478 * DPDK callback to release a TX queue.
481 * Pointer to Ethernet device structure.
483 * Transmit queue index.
486 mlx5_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
488 struct mlx5_txq_data *txq = dev->data->tx_queues[qid];
492 DRV_LOG(DEBUG, "port %u removing Tx queue %u from list",
493 dev->data->port_id, qid);
494 mlx5_txq_release(dev, qid);
498 * Remap UAR register of a Tx queue for secondary process.
500 * Remapped address is stored at the table in the process private structure of
501 * the device, indexed by queue index.
504 * Pointer to Tx queue control structure.
506 * Verbs file descriptor to map UAR pages.
509 * 0 on success, a negative errno value otherwise and rte_errno is set.
512 txq_uar_init_secondary(struct mlx5_txq_ctrl *txq_ctrl, int fd)
514 struct mlx5_priv *priv = txq_ctrl->priv;
515 struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(priv));
516 struct mlx5_proc_priv *primary_ppriv = priv->sh->pppriv;
517 struct mlx5_txq_data *txq = &txq_ctrl->txq;
521 const size_t page_size = rte_mem_page_size();
522 if (page_size == (size_t)-1) {
523 DRV_LOG(ERR, "Failed to get mem page size");
528 if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
532 * As rdma-core, UARs are mapped in size of OS page
533 * size. Ref to libmlx5 function: mlx5_init_context()
535 uar_va = (uintptr_t)primary_ppriv->uar_table[txq->idx].db;
536 offset = uar_va & (page_size - 1); /* Offset in page. */
537 addr = rte_mem_map(NULL, page_size, RTE_PROT_WRITE, RTE_MAP_SHARED,
538 fd, txq_ctrl->uar_mmap_offset);
540 DRV_LOG(ERR, "Port %u mmap failed for BF reg of txq %u.",
541 txq->port_id, txq->idx);
545 addr = RTE_PTR_ADD(addr, offset);
546 ppriv->uar_table[txq->idx].db = addr;
548 ppriv->uar_table[txq->idx].sl_p =
549 primary_ppriv->uar_table[txq->idx].sl_p;
555 * Unmap UAR register of a Tx queue for secondary process.
558 * Pointer to Tx queue control structure.
561 txq_uar_uninit_secondary(struct mlx5_txq_ctrl *txq_ctrl)
563 struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(txq_ctrl->priv));
565 const size_t page_size = rte_mem_page_size();
566 if (page_size == (size_t)-1) {
567 DRV_LOG(ERR, "Failed to get mem page size");
571 if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
573 addr = ppriv->uar_table[txq_ctrl->txq.idx].db;
574 rte_mem_unmap(RTE_PTR_ALIGN_FLOOR(addr, page_size), page_size);
578 * Deinitialize Tx UAR registers for secondary process.
581 * Pointer to Ethernet device.
584 mlx5_tx_uar_uninit_secondary(struct rte_eth_dev *dev)
586 struct mlx5_proc_priv *ppriv = (struct mlx5_proc_priv *)
587 dev->process_private;
588 const size_t page_size = rte_mem_page_size();
592 if (page_size == (size_t)-1) {
593 DRV_LOG(ERR, "Failed to get mem page size");
596 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
597 for (i = 0; i != ppriv->uar_table_sz; ++i) {
598 if (!ppriv->uar_table[i].db)
600 addr = ppriv->uar_table[i].db;
601 rte_mem_unmap(RTE_PTR_ALIGN_FLOOR(addr, page_size), page_size);
607 * Initialize Tx UAR registers for secondary process.
610 * Pointer to Ethernet device.
612 * Verbs file descriptor to map UAR pages.
615 * 0 on success, a negative errno value otherwise and rte_errno is set.
618 mlx5_tx_uar_init_secondary(struct rte_eth_dev *dev, int fd)
620 struct mlx5_priv *priv = dev->data->dev_private;
621 struct mlx5_txq_data *txq;
622 struct mlx5_txq_ctrl *txq_ctrl;
626 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
627 for (i = 0; i != priv->txqs_n; ++i) {
628 if (!(*priv->txqs)[i])
630 txq = (*priv->txqs)[i];
631 txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
632 if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
634 MLX5_ASSERT(txq->idx == (uint16_t)i);
635 ret = txq_uar_init_secondary(txq_ctrl, fd);
643 if (!(*priv->txqs)[i])
645 txq = (*priv->txqs)[i];
646 txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
647 txq_uar_uninit_secondary(txq_ctrl);
653 * Verify the Verbs Tx queue list is empty
656 * Pointer to Ethernet device.
659 * The number of object not released.
662 mlx5_txq_obj_verify(struct rte_eth_dev *dev)
664 struct mlx5_priv *priv = dev->data->dev_private;
666 struct mlx5_txq_obj *txq_obj;
668 LIST_FOREACH(txq_obj, &priv->txqsobj, next) {
669 DRV_LOG(DEBUG, "port %u Verbs Tx queue %u still referenced",
670 dev->data->port_id, txq_obj->txq_ctrl->txq.idx);
677 * Calculate the total number of WQEBB for Tx queue.
679 * Simplified version of calc_sq_size() in rdma-core.
682 * Pointer to Tx queue control structure.
685 * The number of WQEBB.
688 txq_calc_wqebb_cnt(struct mlx5_txq_ctrl *txq_ctrl)
690 unsigned int wqe_size;
691 const unsigned int desc = 1 << txq_ctrl->txq.elts_n;
693 wqe_size = MLX5_WQE_CSEG_SIZE +
696 MLX5_ESEG_MIN_INLINE_SIZE +
697 txq_ctrl->max_inline_data;
698 return rte_align32pow2(wqe_size * desc) / MLX5_WQE_SIZE;
702 * Calculate the maximal inline data size for Tx queue.
705 * Pointer to Tx queue control structure.
708 * The maximal inline data size.
711 txq_calc_inline_max(struct mlx5_txq_ctrl *txq_ctrl)
713 const unsigned int desc = 1 << txq_ctrl->txq.elts_n;
714 struct mlx5_priv *priv = txq_ctrl->priv;
715 unsigned int wqe_size;
717 wqe_size = priv->sh->device_attr.max_qp_wr / desc;
721 * This calculation is derived from tthe source of
722 * mlx5_calc_send_wqe() in rdma_core library.
724 wqe_size = wqe_size * MLX5_WQE_SIZE -
729 MLX5_DSEG_MIN_INLINE_SIZE;
734 * Set Tx queue parameters from device configuration.
737 * Pointer to Tx queue control structure.
740 txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
742 struct mlx5_priv *priv = txq_ctrl->priv;
743 struct mlx5_dev_config *config = &priv->config;
744 unsigned int inlen_send; /* Inline data for ordinary SEND.*/
745 unsigned int inlen_empw; /* Inline data for enhanced MPW. */
746 unsigned int inlen_mode; /* Minimal required Inline data. */
747 unsigned int txqs_inline; /* Min Tx queues to enable inline. */
748 uint64_t dev_txoff = priv->dev_data->dev_conf.txmode.offloads;
749 bool tso = txq_ctrl->txq.offloads & (RTE_ETH_TX_OFFLOAD_TCP_TSO |
750 RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
751 RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
752 RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
753 RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO);
757 txq_ctrl->txq.fast_free =
758 !!((txq_ctrl->txq.offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) &&
759 !(txq_ctrl->txq.offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS) &&
760 !config->mprq.enabled);
761 if (config->txqs_inline == MLX5_ARG_UNSET)
763 #if defined(RTE_ARCH_ARM64)
764 (priv->pci_dev && priv->pci_dev->id.device_id ==
765 PCI_DEVICE_ID_MELLANOX_CONNECTX5BF) ?
766 MLX5_INLINE_MAX_TXQS_BLUEFIELD :
768 MLX5_INLINE_MAX_TXQS;
770 txqs_inline = (unsigned int)config->txqs_inline;
771 inlen_send = (config->txq_inline_max == MLX5_ARG_UNSET) ?
772 MLX5_SEND_DEF_INLINE_LEN :
773 (unsigned int)config->txq_inline_max;
774 inlen_empw = (config->txq_inline_mpw == MLX5_ARG_UNSET) ?
775 MLX5_EMPW_DEF_INLINE_LEN :
776 (unsigned int)config->txq_inline_mpw;
777 inlen_mode = (config->txq_inline_min == MLX5_ARG_UNSET) ?
778 0 : (unsigned int)config->txq_inline_min;
779 if (config->mps != MLX5_MPW_ENHANCED && config->mps != MLX5_MPW)
782 * If there is requested minimal amount of data to inline
783 * we MUST enable inlining. This is a case for ConnectX-4
784 * which usually requires L2 inlined for correct operating
785 * and ConnectX-4 Lx which requires L2-L4 inlined to
786 * support E-Switch Flows.
789 if (inlen_mode <= MLX5_ESEG_MIN_INLINE_SIZE) {
791 * Optimize minimal inlining for single
792 * segment packets to fill one WQEBB
795 temp = MLX5_ESEG_MIN_INLINE_SIZE;
797 temp = inlen_mode - MLX5_ESEG_MIN_INLINE_SIZE;
798 temp = RTE_ALIGN(temp, MLX5_WSEG_SIZE) +
799 MLX5_ESEG_MIN_INLINE_SIZE;
800 temp = RTE_MIN(temp, MLX5_SEND_MAX_INLINE_LEN);
802 if (temp != inlen_mode) {
804 "port %u minimal required inline setting"
805 " aligned from %u to %u",
806 PORT_ID(priv), inlen_mode, temp);
811 * If port is configured to support VLAN insertion and device
812 * does not support this feature by HW (for NICs before ConnectX-5
813 * or in case of wqe_vlan_insert flag is not set) we must enable
814 * data inline on all queues because it is supported by single
817 txq_ctrl->txq.vlan_en = config->hw_vlan_insert;
818 vlan_inline = (dev_txoff & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) &&
819 !config->hw_vlan_insert;
821 * If there are few Tx queues it is prioritized
822 * to save CPU cycles and disable data inlining at all.
824 if (inlen_send && priv->txqs_n >= txqs_inline) {
826 * The data sent with ordinal MLX5_OPCODE_SEND
827 * may be inlined in Ethernet Segment, align the
828 * length accordingly to fit entire WQEBBs.
830 temp = RTE_MAX(inlen_send,
831 MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE);
832 temp -= MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE;
833 temp = RTE_ALIGN(temp, MLX5_WQE_SIZE);
834 temp += MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE;
835 temp = RTE_MIN(temp, MLX5_WQE_SIZE_MAX +
836 MLX5_ESEG_MIN_INLINE_SIZE -
839 MLX5_WQE_DSEG_SIZE * 2);
840 temp = RTE_MIN(temp, MLX5_SEND_MAX_INLINE_LEN);
841 temp = RTE_MAX(temp, inlen_mode);
842 if (temp != inlen_send) {
844 "port %u ordinary send inline setting"
845 " aligned from %u to %u",
846 PORT_ID(priv), inlen_send, temp);
850 * Not aligned to cache lines, but to WQEs.
851 * First bytes of data (initial alignment)
852 * is going to be copied explicitly at the
853 * beginning of inlining buffer in Ethernet
856 MLX5_ASSERT(inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
857 MLX5_ASSERT(inlen_send <= MLX5_WQE_SIZE_MAX +
858 MLX5_ESEG_MIN_INLINE_SIZE -
861 MLX5_WQE_DSEG_SIZE * 2);
862 } else if (inlen_mode) {
864 * If minimal inlining is requested we must
865 * enable inlining in general, despite the
866 * number of configured queues. Ignore the
867 * txq_inline_max devarg, this is not
868 * full-featured inline.
870 inlen_send = inlen_mode;
872 } else if (vlan_inline) {
874 * Hardware does not report offload for
875 * VLAN insertion, we must enable data inline
876 * to implement feature by software.
878 inlen_send = MLX5_ESEG_MIN_INLINE_SIZE;
884 txq_ctrl->txq.inlen_send = inlen_send;
885 txq_ctrl->txq.inlen_mode = inlen_mode;
886 txq_ctrl->txq.inlen_empw = 0;
887 if (inlen_send && inlen_empw && priv->txqs_n >= txqs_inline) {
889 * The data sent with MLX5_OPCODE_ENHANCED_MPSW
890 * may be inlined in Data Segment, align the
891 * length accordingly to fit entire WQEBBs.
893 temp = RTE_MAX(inlen_empw,
894 MLX5_WQE_SIZE + MLX5_DSEG_MIN_INLINE_SIZE);
895 temp -= MLX5_DSEG_MIN_INLINE_SIZE;
896 temp = RTE_ALIGN(temp, MLX5_WQE_SIZE);
897 temp += MLX5_DSEG_MIN_INLINE_SIZE;
898 temp = RTE_MIN(temp, MLX5_WQE_SIZE_MAX +
899 MLX5_DSEG_MIN_INLINE_SIZE -
903 temp = RTE_MIN(temp, MLX5_EMPW_MAX_INLINE_LEN);
904 if (temp != inlen_empw) {
906 "port %u enhanced empw inline setting"
907 " aligned from %u to %u",
908 PORT_ID(priv), inlen_empw, temp);
911 MLX5_ASSERT(inlen_empw >= MLX5_ESEG_MIN_INLINE_SIZE);
912 MLX5_ASSERT(inlen_empw <= MLX5_WQE_SIZE_MAX +
913 MLX5_DSEG_MIN_INLINE_SIZE -
917 txq_ctrl->txq.inlen_empw = inlen_empw;
919 txq_ctrl->max_inline_data = RTE_MAX(inlen_send, inlen_empw);
921 txq_ctrl->max_tso_header = MLX5_MAX_TSO_HEADER;
922 txq_ctrl->max_inline_data = RTE_MAX(txq_ctrl->max_inline_data,
923 MLX5_MAX_TSO_HEADER);
924 txq_ctrl->txq.tso_en = 1;
926 if (((RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO & txq_ctrl->txq.offloads) &&
927 (config->tunnel_en & MLX5_TUNNELED_OFFLOADS_VXLAN_CAP)) |
928 ((RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO & txq_ctrl->txq.offloads) &&
929 (config->tunnel_en & MLX5_TUNNELED_OFFLOADS_GRE_CAP)) |
930 ((RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO & txq_ctrl->txq.offloads) &&
931 (config->tunnel_en & MLX5_TUNNELED_OFFLOADS_GENEVE_CAP)) |
932 (config->swp & MLX5_SW_PARSING_TSO_CAP))
933 txq_ctrl->txq.tunnel_en = 1;
934 txq_ctrl->txq.swp_en = (((RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
935 RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO) &
936 txq_ctrl->txq.offloads) && (config->swp &
937 MLX5_SW_PARSING_TSO_CAP)) |
938 ((RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM &
939 txq_ctrl->txq.offloads) && (config->swp &
940 MLX5_SW_PARSING_CSUM_CAP));
944 * Adjust Tx queue data inline parameters for large queue sizes.
945 * The data inline feature requires multiple WQEs to fit the packets,
946 * and if the large amount of Tx descriptors is requested by application
947 * the total WQE amount may exceed the hardware capabilities. If the
948 * default inline setting are used we can try to adjust these ones and
949 * meet the hardware requirements and not exceed the queue size.
952 * Pointer to Tx queue control structure.
955 * Zero on success, otherwise the parameters can not be adjusted.
958 txq_adjust_params(struct mlx5_txq_ctrl *txq_ctrl)
960 struct mlx5_priv *priv = txq_ctrl->priv;
961 struct mlx5_dev_config *config = &priv->config;
962 unsigned int max_inline;
964 max_inline = txq_calc_inline_max(txq_ctrl);
965 if (!txq_ctrl->txq.inlen_send) {
967 * Inline data feature is not engaged at all.
968 * There is nothing to adjust.
972 if (txq_ctrl->max_inline_data <= max_inline) {
974 * The requested inline data length does not
975 * exceed queue capabilities.
979 if (txq_ctrl->txq.inlen_mode > max_inline) {
981 "minimal data inline requirements (%u) are not"
982 " satisfied (%u) on port %u, try the smaller"
983 " Tx queue size (%d)",
984 txq_ctrl->txq.inlen_mode, max_inline,
985 priv->dev_data->port_id,
986 priv->sh->device_attr.max_qp_wr);
989 if (txq_ctrl->txq.inlen_send > max_inline &&
990 config->txq_inline_max != MLX5_ARG_UNSET &&
991 config->txq_inline_max > (int)max_inline) {
993 "txq_inline_max requirements (%u) are not"
994 " satisfied (%u) on port %u, try the smaller"
995 " Tx queue size (%d)",
996 txq_ctrl->txq.inlen_send, max_inline,
997 priv->dev_data->port_id,
998 priv->sh->device_attr.max_qp_wr);
1001 if (txq_ctrl->txq.inlen_empw > max_inline &&
1002 config->txq_inline_mpw != MLX5_ARG_UNSET &&
1003 config->txq_inline_mpw > (int)max_inline) {
1005 "txq_inline_mpw requirements (%u) are not"
1006 " satisfied (%u) on port %u, try the smaller"
1007 " Tx queue size (%d)",
1008 txq_ctrl->txq.inlen_empw, max_inline,
1009 priv->dev_data->port_id,
1010 priv->sh->device_attr.max_qp_wr);
1013 if (txq_ctrl->txq.tso_en && max_inline < MLX5_MAX_TSO_HEADER) {
1015 "tso header inline requirements (%u) are not"
1016 " satisfied (%u) on port %u, try the smaller"
1017 " Tx queue size (%d)",
1018 MLX5_MAX_TSO_HEADER, max_inline,
1019 priv->dev_data->port_id,
1020 priv->sh->device_attr.max_qp_wr);
1023 if (txq_ctrl->txq.inlen_send > max_inline) {
1025 "adjust txq_inline_max (%u->%u)"
1026 " due to large Tx queue on port %u",
1027 txq_ctrl->txq.inlen_send, max_inline,
1028 priv->dev_data->port_id);
1029 txq_ctrl->txq.inlen_send = max_inline;
1031 if (txq_ctrl->txq.inlen_empw > max_inline) {
1033 "adjust txq_inline_mpw (%u->%u)"
1034 "due to large Tx queue on port %u",
1035 txq_ctrl->txq.inlen_empw, max_inline,
1036 priv->dev_data->port_id);
1037 txq_ctrl->txq.inlen_empw = max_inline;
1039 txq_ctrl->max_inline_data = RTE_MAX(txq_ctrl->txq.inlen_send,
1040 txq_ctrl->txq.inlen_empw);
1041 MLX5_ASSERT(txq_ctrl->max_inline_data <= max_inline);
1042 MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= max_inline);
1043 MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= txq_ctrl->txq.inlen_send);
1044 MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= txq_ctrl->txq.inlen_empw ||
1045 !txq_ctrl->txq.inlen_empw);
1053 * Create a DPDK Tx queue.
1056 * Pointer to Ethernet device.
1060 * Number of descriptors to configure in queue.
1062 * NUMA socket on which memory must be allocated.
1064 * Thresholds parameters.
1067 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
1069 struct mlx5_txq_ctrl *
1070 mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1071 unsigned int socket, const struct rte_eth_txconf *conf)
1073 struct mlx5_priv *priv = dev->data->dev_private;
1074 struct mlx5_txq_ctrl *tmpl;
1076 tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl) +
1077 desc * sizeof(struct rte_mbuf *), 0, socket);
1082 if (mlx5_mr_ctrl_init(&tmpl->txq.mr_ctrl,
1083 &priv->sh->cdev->mr_scache.dev_gen, socket)) {
1084 /* rte_errno is already set. */
1087 MLX5_ASSERT(desc > MLX5_TX_COMP_THRESH);
1088 tmpl->txq.offloads = conf->offloads |
1089 dev->data->dev_conf.txmode.offloads;
1091 tmpl->socket = socket;
1092 tmpl->txq.elts_n = log2above(desc);
1093 tmpl->txq.elts_s = desc;
1094 tmpl->txq.elts_m = desc - 1;
1095 tmpl->txq.port_id = dev->data->port_id;
1096 tmpl->txq.idx = idx;
1097 txq_set_params(tmpl);
1098 if (txq_adjust_params(tmpl))
1100 if (txq_calc_wqebb_cnt(tmpl) >
1101 priv->sh->device_attr.max_qp_wr) {
1103 "port %u Tx WQEBB count (%d) exceeds the limit (%d),"
1104 " try smaller queue size",
1105 dev->data->port_id, txq_calc_wqebb_cnt(tmpl),
1106 priv->sh->device_attr.max_qp_wr);
1110 __atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
1111 tmpl->type = MLX5_TXQ_TYPE_STANDARD;
1112 LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
1115 mlx5_mr_btree_free(&tmpl->txq.mr_ctrl.cache_bh);
1121 * Create a DPDK Tx hairpin queue.
1124 * Pointer to Ethernet device.
1128 * Number of descriptors to configure in queue.
1129 * @param hairpin_conf
1130 * The hairpin configuration.
1133 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
1135 struct mlx5_txq_ctrl *
1136 mlx5_txq_hairpin_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1137 const struct rte_eth_hairpin_conf *hairpin_conf)
1139 struct mlx5_priv *priv = dev->data->dev_private;
1140 struct mlx5_txq_ctrl *tmpl;
1142 tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
1149 tmpl->socket = SOCKET_ID_ANY;
1150 tmpl->txq.elts_n = log2above(desc);
1151 tmpl->txq.port_id = dev->data->port_id;
1152 tmpl->txq.idx = idx;
1153 tmpl->hairpin_conf = *hairpin_conf;
1154 tmpl->type = MLX5_TXQ_TYPE_HAIRPIN;
1155 __atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
1156 LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
1164 * Pointer to Ethernet device.
1169 * A pointer to the queue if it exists.
1171 struct mlx5_txq_ctrl *
1172 mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx)
1174 struct mlx5_priv *priv = dev->data->dev_private;
1175 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
1176 struct mlx5_txq_ctrl *ctrl = NULL;
1179 ctrl = container_of(txq_data, struct mlx5_txq_ctrl, txq);
1180 __atomic_fetch_add(&ctrl->refcnt, 1, __ATOMIC_RELAXED);
1186 * Release a Tx queue.
1189 * Pointer to Ethernet device.
1194 * 1 while a reference on it exists, 0 when freed.
1197 mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx)
1199 struct mlx5_priv *priv = dev->data->dev_private;
1200 struct mlx5_txq_ctrl *txq_ctrl;
1202 if (priv->txqs == NULL || (*priv->txqs)[idx] == NULL)
1204 txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
1205 if (__atomic_sub_fetch(&txq_ctrl->refcnt, 1, __ATOMIC_RELAXED) > 1)
1207 if (txq_ctrl->obj) {
1208 priv->obj_ops.txq_obj_release(txq_ctrl->obj);
1209 LIST_REMOVE(txq_ctrl->obj, next);
1210 mlx5_free(txq_ctrl->obj);
1211 txq_ctrl->obj = NULL;
1213 if (txq_ctrl->type == MLX5_TXQ_TYPE_STANDARD) {
1214 if (txq_ctrl->txq.fcqs) {
1215 mlx5_free(txq_ctrl->txq.fcqs);
1216 txq_ctrl->txq.fcqs = NULL;
1218 txq_free_elts(txq_ctrl);
1219 dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
1221 if (!__atomic_load_n(&txq_ctrl->refcnt, __ATOMIC_RELAXED)) {
1222 if (txq_ctrl->type == MLX5_TXQ_TYPE_STANDARD)
1223 mlx5_mr_btree_free(&txq_ctrl->txq.mr_ctrl.cache_bh);
1224 LIST_REMOVE(txq_ctrl, next);
1225 mlx5_free(txq_ctrl);
1226 (*priv->txqs)[idx] = NULL;
1232 * Verify if the queue can be released.
1235 * Pointer to Ethernet device.
1240 * 1 if the queue can be released.
1243 mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx)
1245 struct mlx5_priv *priv = dev->data->dev_private;
1246 struct mlx5_txq_ctrl *txq;
1248 if (!(*priv->txqs)[idx])
1250 txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
1251 return (__atomic_load_n(&txq->refcnt, __ATOMIC_RELAXED) == 1);
1255 * Verify the Tx Queue list is empty
1258 * Pointer to Ethernet device.
1261 * The number of object not released.
1264 mlx5_txq_verify(struct rte_eth_dev *dev)
1266 struct mlx5_priv *priv = dev->data->dev_private;
1267 struct mlx5_txq_ctrl *txq_ctrl;
1270 LIST_FOREACH(txq_ctrl, &priv->txqsctrl, next) {
1271 DRV_LOG(DEBUG, "port %u Tx queue %u still referenced",
1272 dev->data->port_id, txq_ctrl->txq.idx);
1279 * Set the Tx queue dynamic timestamp (mask and offset)
1282 * Pointer to the Ethernet device structure.
1285 mlx5_txq_dynf_timestamp_set(struct rte_eth_dev *dev)
1287 struct mlx5_priv *priv = dev->data->dev_private;
1288 struct mlx5_dev_ctx_shared *sh = priv->sh;
1289 struct mlx5_txq_data *data;
1294 nbit = rte_mbuf_dynflag_lookup
1295 (RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME, NULL);
1296 off = rte_mbuf_dynfield_lookup
1297 (RTE_MBUF_DYNFIELD_TIMESTAMP_NAME, NULL);
1298 if (nbit >= 0 && off >= 0 && sh->txpp.refcnt)
1299 mask = 1ULL << nbit;
1300 for (i = 0; i != priv->txqs_n; ++i) {
1301 data = (*priv->txqs)[i];
1305 data->ts_mask = mask;
1306 data->ts_offset = off;