1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
14 #include <rte_malloc.h>
15 #include <ethdev_driver.h>
16 #include <rte_common.h>
17 #include <rte_eal_paging.h>
19 #include <mlx5_common.h>
20 #include <mlx5_common_mr.h>
21 #include <mlx5_malloc.h>
23 #include "mlx5_defs.h"
24 #include "mlx5_utils.h"
27 #include "mlx5_rxtx.h"
28 #include "mlx5_autoconf.h"
31 * Allocate TX queue elements.
34 * Pointer to TX queue structure.
37 txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl)
39 const unsigned int elts_n = 1 << txq_ctrl->txq.elts_n;
42 for (i = 0; (i != elts_n); ++i)
43 txq_ctrl->txq.elts[i] = NULL;
44 DRV_LOG(DEBUG, "port %u Tx queue %u allocated and configured %u WRs",
45 PORT_ID(txq_ctrl->priv), txq_ctrl->txq.idx, elts_n);
46 txq_ctrl->txq.elts_head = 0;
47 txq_ctrl->txq.elts_tail = 0;
48 txq_ctrl->txq.elts_comp = 0;
52 * Free TX queue elements.
55 * Pointer to TX queue structure.
58 txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl)
60 const uint16_t elts_n = 1 << txq_ctrl->txq.elts_n;
61 const uint16_t elts_m = elts_n - 1;
62 uint16_t elts_head = txq_ctrl->txq.elts_head;
63 uint16_t elts_tail = txq_ctrl->txq.elts_tail;
64 struct rte_mbuf *(*elts)[elts_n] = &txq_ctrl->txq.elts;
66 DRV_LOG(DEBUG, "port %u Tx queue %u freeing WRs",
67 PORT_ID(txq_ctrl->priv), txq_ctrl->txq.idx);
68 txq_ctrl->txq.elts_head = 0;
69 txq_ctrl->txq.elts_tail = 0;
70 txq_ctrl->txq.elts_comp = 0;
72 while (elts_tail != elts_head) {
73 struct rte_mbuf *elt = (*elts)[elts_tail & elts_m];
75 MLX5_ASSERT(elt != NULL);
76 rte_pktmbuf_free_seg(elt);
77 #ifdef RTE_LIBRTE_MLX5_DEBUG
79 memset(&(*elts)[elts_tail & elts_m],
81 sizeof((*elts)[elts_tail & elts_m]));
88 * Returns the per-port supported offloads.
91 * Pointer to Ethernet device.
94 * Supported Tx offloads.
97 mlx5_get_tx_port_offloads(struct rte_eth_dev *dev)
99 struct mlx5_priv *priv = dev->data->dev_private;
100 uint64_t offloads = (DEV_TX_OFFLOAD_MULTI_SEGS |
101 DEV_TX_OFFLOAD_VLAN_INSERT);
102 struct mlx5_dev_config *config = &priv->config;
105 offloads |= (DEV_TX_OFFLOAD_IPV4_CKSUM |
106 DEV_TX_OFFLOAD_UDP_CKSUM |
107 DEV_TX_OFFLOAD_TCP_CKSUM);
109 offloads |= DEV_TX_OFFLOAD_TCP_TSO;
111 offloads |= DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP;
114 offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
116 offloads |= (DEV_TX_OFFLOAD_IP_TNL_TSO |
117 DEV_TX_OFFLOAD_UDP_TNL_TSO);
119 if (config->tunnel_en) {
121 offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
123 offloads |= (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
124 DEV_TX_OFFLOAD_GRE_TNL_TSO |
125 DEV_TX_OFFLOAD_GENEVE_TNL_TSO);
127 if (!config->mprq.enabled)
128 offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
132 /* Fetches and drops all SW-owned and error CQEs to synchronize CQ. */
134 txq_sync_cq(struct mlx5_txq_data *txq)
136 volatile struct mlx5_cqe *cqe;
141 cqe = &txq->cqes[txq->cq_ci & txq->cqe_m];
142 ret = check_cqe(cqe, txq->cqe_s, txq->cq_ci);
143 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
144 if (likely(ret != MLX5_CQE_STATUS_ERR)) {
145 /* No new CQEs in completion queue. */
146 MLX5_ASSERT(ret == MLX5_CQE_STATUS_HW_OWN);
152 /* Move all CQEs to HW ownership. */
153 for (i = 0; i < txq->cqe_s; i++) {
155 cqe->op_own = MLX5_CQE_INVALIDATE;
157 /* Resync CQE and WQE (WQ in reset state). */
159 *txq->cq_db = rte_cpu_to_be_32(txq->cq_ci);
160 txq->cq_pi = txq->cq_ci;
165 * Tx queue stop. Device queue goes to the idle state,
166 * all involved mbufs are freed from elts/WQ.
169 * Pointer to Ethernet device structure.
174 * 0 on success, a negative errno value otherwise and rte_errno is set.
177 mlx5_tx_queue_stop_primary(struct rte_eth_dev *dev, uint16_t idx)
179 struct mlx5_priv *priv = dev->data->dev_private;
180 struct mlx5_txq_data *txq = (*priv->txqs)[idx];
181 struct mlx5_txq_ctrl *txq_ctrl =
182 container_of(txq, struct mlx5_txq_ctrl, txq);
185 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
186 /* Move QP to RESET state. */
187 ret = priv->obj_ops.txq_obj_modify(txq_ctrl->obj, MLX5_TXQ_MOD_RDY2RST,
188 (uint8_t)priv->dev_port);
191 /* Handle all send completions. */
193 /* Free elts stored in the SQ. */
194 txq_free_elts(txq_ctrl);
195 /* Prevent writing new pkts to SQ by setting no free WQE.*/
196 txq->wqe_ci = txq->wqe_s;
199 /* Set the actual queue state. */
200 dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
205 * Tx queue stop. Device queue goes to the idle state,
206 * all involved mbufs are freed from elts/WQ.
209 * Pointer to Ethernet device structure.
214 * 0 on success, a negative errno value otherwise and rte_errno is set.
217 mlx5_tx_queue_stop(struct rte_eth_dev *dev, uint16_t idx)
221 if (rte_eth_dev_is_tx_hairpin_queue(dev, idx)) {
222 DRV_LOG(ERR, "Hairpin queue can't be stopped");
226 if (dev->data->tx_queue_state[idx] == RTE_ETH_QUEUE_STATE_STOPPED)
228 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
229 ret = mlx5_mp_os_req_queue_control(dev, idx,
230 MLX5_MP_REQ_QUEUE_TX_STOP);
232 ret = mlx5_tx_queue_stop_primary(dev, idx);
238 * Rx queue start. Device queue goes to the ready state,
239 * all required mbufs are allocated and WQ is replenished.
242 * Pointer to Ethernet device structure.
247 * 0 on success, a negative errno value otherwise and rte_errno is set.
250 mlx5_tx_queue_start_primary(struct rte_eth_dev *dev, uint16_t idx)
252 struct mlx5_priv *priv = dev->data->dev_private;
253 struct mlx5_txq_data *txq = (*priv->txqs)[idx];
254 struct mlx5_txq_ctrl *txq_ctrl =
255 container_of(txq, struct mlx5_txq_ctrl, txq);
258 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
259 ret = priv->obj_ops.txq_obj_modify(txq_ctrl->obj,
260 MLX5_TXQ_MOD_RST2RDY,
261 (uint8_t)priv->dev_port);
264 txq_ctrl->txq.wqe_ci = 0;
265 txq_ctrl->txq.wqe_pi = 0;
266 txq_ctrl->txq.elts_comp = 0;
267 /* Set the actual queue state. */
268 dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
273 * Rx queue start. Device queue goes to the ready state,
274 * all required mbufs are allocated and WQ is replenished.
277 * Pointer to Ethernet device structure.
282 * 0 on success, a negative errno value otherwise and rte_errno is set.
285 mlx5_tx_queue_start(struct rte_eth_dev *dev, uint16_t idx)
289 if (rte_eth_dev_is_tx_hairpin_queue(dev, idx)) {
290 DRV_LOG(ERR, "Hairpin queue can't be started");
294 if (dev->data->tx_queue_state[idx] == RTE_ETH_QUEUE_STATE_STARTED)
296 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
297 ret = mlx5_mp_os_req_queue_control(dev, idx,
298 MLX5_MP_REQ_QUEUE_TX_START);
300 ret = mlx5_tx_queue_start_primary(dev, idx);
306 * Tx queue presetup checks.
309 * Pointer to Ethernet device structure.
313 * Number of descriptors to configure in queue.
316 * 0 on success, a negative errno value otherwise and rte_errno is set.
319 mlx5_tx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t *desc)
321 struct mlx5_priv *priv = dev->data->dev_private;
323 if (*desc <= MLX5_TX_COMP_THRESH) {
325 "port %u number of descriptors requested for Tx queue"
326 " %u must be higher than MLX5_TX_COMP_THRESH, using %u"
327 " instead of %u", dev->data->port_id, idx,
328 MLX5_TX_COMP_THRESH + 1, *desc);
329 *desc = MLX5_TX_COMP_THRESH + 1;
331 if (!rte_is_power_of_2(*desc)) {
332 *desc = 1 << log2above(*desc);
334 "port %u increased number of descriptors in Tx queue"
335 " %u to the next power of two (%d)",
336 dev->data->port_id, idx, *desc);
338 DRV_LOG(DEBUG, "port %u configuring queue %u for %u descriptors",
339 dev->data->port_id, idx, *desc);
340 if (idx >= priv->txqs_n) {
341 DRV_LOG(ERR, "port %u Tx queue index out of range (%u >= %u)",
342 dev->data->port_id, idx, priv->txqs_n);
343 rte_errno = EOVERFLOW;
346 if (!mlx5_txq_releasable(dev, idx)) {
348 DRV_LOG(ERR, "port %u unable to release queue index %u",
349 dev->data->port_id, idx);
352 mlx5_txq_release(dev, idx);
357 * DPDK callback to configure a TX queue.
360 * Pointer to Ethernet device structure.
364 * Number of descriptors to configure in queue.
366 * NUMA socket on which memory must be allocated.
368 * Thresholds parameters.
371 * 0 on success, a negative errno value otherwise and rte_errno is set.
374 mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
375 unsigned int socket, const struct rte_eth_txconf *conf)
377 struct mlx5_priv *priv = dev->data->dev_private;
378 struct mlx5_txq_data *txq = (*priv->txqs)[idx];
379 struct mlx5_txq_ctrl *txq_ctrl =
380 container_of(txq, struct mlx5_txq_ctrl, txq);
383 res = mlx5_tx_queue_pre_setup(dev, idx, &desc);
386 txq_ctrl = mlx5_txq_new(dev, idx, desc, socket, conf);
388 DRV_LOG(ERR, "port %u unable to allocate queue index %u",
389 dev->data->port_id, idx);
392 DRV_LOG(DEBUG, "port %u adding Tx queue %u to list",
393 dev->data->port_id, idx);
394 (*priv->txqs)[idx] = &txq_ctrl->txq;
399 * DPDK callback to configure a TX hairpin queue.
402 * Pointer to Ethernet device structure.
406 * Number of descriptors to configure in queue.
407 * @param[in] hairpin_conf
408 * The hairpin binding configuration.
411 * 0 on success, a negative errno value otherwise and rte_errno is set.
414 mlx5_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
416 const struct rte_eth_hairpin_conf *hairpin_conf)
418 struct mlx5_priv *priv = dev->data->dev_private;
419 struct mlx5_txq_data *txq = (*priv->txqs)[idx];
420 struct mlx5_txq_ctrl *txq_ctrl =
421 container_of(txq, struct mlx5_txq_ctrl, txq);
424 res = mlx5_tx_queue_pre_setup(dev, idx, &desc);
427 if (hairpin_conf->peer_count != 1) {
429 DRV_LOG(ERR, "port %u unable to setup Tx hairpin queue index %u"
430 " peer count is %u", dev->data->port_id,
431 idx, hairpin_conf->peer_count);
434 if (hairpin_conf->peers[0].port == dev->data->port_id) {
435 if (hairpin_conf->peers[0].queue >= priv->rxqs_n) {
437 DRV_LOG(ERR, "port %u unable to setup Tx hairpin queue"
438 " index %u, Rx %u is larger than %u",
439 dev->data->port_id, idx,
440 hairpin_conf->peers[0].queue, priv->txqs_n);
444 if (hairpin_conf->manual_bind == 0 ||
445 hairpin_conf->tx_explicit == 0) {
447 DRV_LOG(ERR, "port %u unable to setup Tx hairpin queue"
448 " index %u peer port %u with attributes %u %u",
449 dev->data->port_id, idx,
450 hairpin_conf->peers[0].port,
451 hairpin_conf->manual_bind,
452 hairpin_conf->tx_explicit);
456 txq_ctrl = mlx5_txq_hairpin_new(dev, idx, desc, hairpin_conf);
458 DRV_LOG(ERR, "port %u unable to allocate queue index %u",
459 dev->data->port_id, idx);
462 DRV_LOG(DEBUG, "port %u adding Tx queue %u to list",
463 dev->data->port_id, idx);
464 (*priv->txqs)[idx] = &txq_ctrl->txq;
465 dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_HAIRPIN;
470 * DPDK callback to release a TX queue.
473 * Generic TX queue pointer.
476 mlx5_tx_queue_release(void *dpdk_txq)
478 struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
479 struct mlx5_txq_ctrl *txq_ctrl;
480 struct mlx5_priv *priv;
485 txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
486 priv = txq_ctrl->priv;
487 for (i = 0; (i != priv->txqs_n); ++i)
488 if ((*priv->txqs)[i] == txq) {
489 DRV_LOG(DEBUG, "port %u removing Tx queue %u from list",
490 PORT_ID(priv), txq->idx);
491 mlx5_txq_release(ETH_DEV(priv), i);
497 * Configure the doorbell register non-cached attribute.
500 * Pointer to Tx queue control structure.
505 txq_uar_ncattr_init(struct mlx5_txq_ctrl *txq_ctrl, size_t page_size)
507 struct mlx5_priv *priv = txq_ctrl->priv;
510 txq_ctrl->txq.db_heu = priv->config.dbnc == MLX5_TXDB_HEURISTIC;
511 txq_ctrl->txq.db_nc = 0;
512 /* Check the doorbell register mapping type. */
513 cmd = txq_ctrl->uar_mmap_offset / page_size;
514 cmd >>= MLX5_UAR_MMAP_CMD_SHIFT;
515 cmd &= MLX5_UAR_MMAP_CMD_MASK;
516 if (cmd == MLX5_MMAP_GET_NC_PAGES_CMD)
517 txq_ctrl->txq.db_nc = 1;
521 * Initialize Tx UAR registers for primary process.
524 * Pointer to Tx queue control structure.
527 txq_uar_init(struct mlx5_txq_ctrl *txq_ctrl)
529 struct mlx5_priv *priv = txq_ctrl->priv;
530 struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(priv));
532 unsigned int lock_idx;
534 const size_t page_size = rte_mem_page_size();
535 if (page_size == (size_t)-1) {
536 DRV_LOG(ERR, "Failed to get mem page size");
540 if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
542 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
544 ppriv->uar_table[txq_ctrl->txq.idx] = txq_ctrl->bf_reg;
545 txq_uar_ncattr_init(txq_ctrl, page_size);
547 /* Assign an UAR lock according to UAR page number */
548 lock_idx = (txq_ctrl->uar_mmap_offset / page_size) &
549 MLX5_UAR_PAGE_NUM_MASK;
550 txq_ctrl->txq.uar_lock = &priv->sh->uar_lock[lock_idx];
555 * Remap UAR register of a Tx queue for secondary process.
557 * Remapped address is stored at the table in the process private structure of
558 * the device, indexed by queue index.
561 * Pointer to Tx queue control structure.
563 * Verbs file descriptor to map UAR pages.
566 * 0 on success, a negative errno value otherwise and rte_errno is set.
569 txq_uar_init_secondary(struct mlx5_txq_ctrl *txq_ctrl, int fd)
571 struct mlx5_priv *priv = txq_ctrl->priv;
572 struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(priv));
573 struct mlx5_txq_data *txq = &txq_ctrl->txq;
577 const size_t page_size = rte_mem_page_size();
578 if (page_size == (size_t)-1) {
579 DRV_LOG(ERR, "Failed to get mem page size");
584 if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
588 * As rdma-core, UARs are mapped in size of OS page
589 * size. Ref to libmlx5 function: mlx5_init_context()
591 uar_va = (uintptr_t)txq_ctrl->bf_reg;
592 offset = uar_va & (page_size - 1); /* Offset in page. */
593 addr = rte_mem_map(NULL, page_size, RTE_PROT_WRITE, RTE_MAP_SHARED,
594 fd, txq_ctrl->uar_mmap_offset);
597 "port %u mmap failed for BF reg of txq %u",
598 txq->port_id, txq->idx);
602 addr = RTE_PTR_ADD(addr, offset);
603 ppriv->uar_table[txq->idx] = addr;
604 txq_uar_ncattr_init(txq_ctrl, page_size);
609 * Unmap UAR register of a Tx queue for secondary process.
612 * Pointer to Tx queue control structure.
615 txq_uar_uninit_secondary(struct mlx5_txq_ctrl *txq_ctrl)
617 struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(txq_ctrl->priv));
619 const size_t page_size = rte_mem_page_size();
620 if (page_size == (size_t)-1) {
621 DRV_LOG(ERR, "Failed to get mem page size");
625 if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
627 addr = ppriv->uar_table[txq_ctrl->txq.idx];
628 rte_mem_unmap(RTE_PTR_ALIGN_FLOOR(addr, page_size), page_size);
632 * Deinitialize Tx UAR registers for secondary process.
635 * Pointer to Ethernet device.
638 mlx5_tx_uar_uninit_secondary(struct rte_eth_dev *dev)
640 struct mlx5_proc_priv *ppriv = (struct mlx5_proc_priv *)
641 dev->process_private;
642 const size_t page_size = rte_mem_page_size();
646 if (page_size == (size_t)-1) {
647 DRV_LOG(ERR, "Failed to get mem page size");
650 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
651 for (i = 0; i != ppriv->uar_table_sz; ++i) {
652 if (!ppriv->uar_table[i])
654 addr = ppriv->uar_table[i];
655 rte_mem_unmap(RTE_PTR_ALIGN_FLOOR(addr, page_size), page_size);
661 * Initialize Tx UAR registers for secondary process.
664 * Pointer to Ethernet device.
666 * Verbs file descriptor to map UAR pages.
669 * 0 on success, a negative errno value otherwise and rte_errno is set.
672 mlx5_tx_uar_init_secondary(struct rte_eth_dev *dev, int fd)
674 struct mlx5_priv *priv = dev->data->dev_private;
675 struct mlx5_txq_data *txq;
676 struct mlx5_txq_ctrl *txq_ctrl;
680 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
681 for (i = 0; i != priv->txqs_n; ++i) {
682 if (!(*priv->txqs)[i])
684 txq = (*priv->txqs)[i];
685 txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
686 if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
688 MLX5_ASSERT(txq->idx == (uint16_t)i);
689 ret = txq_uar_init_secondary(txq_ctrl, fd);
697 if (!(*priv->txqs)[i])
699 txq = (*priv->txqs)[i];
700 txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
701 txq_uar_uninit_secondary(txq_ctrl);
707 * Verify the Verbs Tx queue list is empty
710 * Pointer to Ethernet device.
713 * The number of object not released.
716 mlx5_txq_obj_verify(struct rte_eth_dev *dev)
718 struct mlx5_priv *priv = dev->data->dev_private;
720 struct mlx5_txq_obj *txq_obj;
722 LIST_FOREACH(txq_obj, &priv->txqsobj, next) {
723 DRV_LOG(DEBUG, "port %u Verbs Tx queue %u still referenced",
724 dev->data->port_id, txq_obj->txq_ctrl->txq.idx);
731 * Calculate the total number of WQEBB for Tx queue.
733 * Simplified version of calc_sq_size() in rdma-core.
736 * Pointer to Tx queue control structure.
739 * The number of WQEBB.
742 txq_calc_wqebb_cnt(struct mlx5_txq_ctrl *txq_ctrl)
744 unsigned int wqe_size;
745 const unsigned int desc = 1 << txq_ctrl->txq.elts_n;
747 wqe_size = MLX5_WQE_CSEG_SIZE +
750 MLX5_ESEG_MIN_INLINE_SIZE +
751 txq_ctrl->max_inline_data;
752 return rte_align32pow2(wqe_size * desc) / MLX5_WQE_SIZE;
756 * Calculate the maximal inline data size for Tx queue.
759 * Pointer to Tx queue control structure.
762 * The maximal inline data size.
765 txq_calc_inline_max(struct mlx5_txq_ctrl *txq_ctrl)
767 const unsigned int desc = 1 << txq_ctrl->txq.elts_n;
768 struct mlx5_priv *priv = txq_ctrl->priv;
769 unsigned int wqe_size;
771 wqe_size = priv->sh->device_attr.max_qp_wr / desc;
775 * This calculation is derived from tthe source of
776 * mlx5_calc_send_wqe() in rdma_core library.
778 wqe_size = wqe_size * MLX5_WQE_SIZE -
783 MLX5_DSEG_MIN_INLINE_SIZE;
788 * Set Tx queue parameters from device configuration.
791 * Pointer to Tx queue control structure.
794 txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
796 struct mlx5_priv *priv = txq_ctrl->priv;
797 struct mlx5_dev_config *config = &priv->config;
798 unsigned int inlen_send; /* Inline data for ordinary SEND.*/
799 unsigned int inlen_empw; /* Inline data for enhanced MPW. */
800 unsigned int inlen_mode; /* Minimal required Inline data. */
801 unsigned int txqs_inline; /* Min Tx queues to enable inline. */
802 uint64_t dev_txoff = priv->dev_data->dev_conf.txmode.offloads;
803 bool tso = txq_ctrl->txq.offloads & (DEV_TX_OFFLOAD_TCP_TSO |
804 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
805 DEV_TX_OFFLOAD_GRE_TNL_TSO |
806 DEV_TX_OFFLOAD_IP_TNL_TSO |
807 DEV_TX_OFFLOAD_UDP_TNL_TSO);
811 txq_ctrl->txq.fast_free =
812 !!((txq_ctrl->txq.offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) &&
813 !(txq_ctrl->txq.offloads & DEV_TX_OFFLOAD_MULTI_SEGS) &&
814 !config->mprq.enabled);
815 if (config->txqs_inline == MLX5_ARG_UNSET)
817 #if defined(RTE_ARCH_ARM64)
818 (priv->pci_dev->id.device_id ==
819 PCI_DEVICE_ID_MELLANOX_CONNECTX5BF) ?
820 MLX5_INLINE_MAX_TXQS_BLUEFIELD :
822 MLX5_INLINE_MAX_TXQS;
824 txqs_inline = (unsigned int)config->txqs_inline;
825 inlen_send = (config->txq_inline_max == MLX5_ARG_UNSET) ?
826 MLX5_SEND_DEF_INLINE_LEN :
827 (unsigned int)config->txq_inline_max;
828 inlen_empw = (config->txq_inline_mpw == MLX5_ARG_UNSET) ?
829 MLX5_EMPW_DEF_INLINE_LEN :
830 (unsigned int)config->txq_inline_mpw;
831 inlen_mode = (config->txq_inline_min == MLX5_ARG_UNSET) ?
832 0 : (unsigned int)config->txq_inline_min;
833 if (config->mps != MLX5_MPW_ENHANCED && config->mps != MLX5_MPW)
836 * If there is requested minimal amount of data to inline
837 * we MUST enable inlining. This is a case for ConnectX-4
838 * which usually requires L2 inlined for correct operating
839 * and ConnectX-4 Lx which requires L2-L4 inlined to
840 * support E-Switch Flows.
843 if (inlen_mode <= MLX5_ESEG_MIN_INLINE_SIZE) {
845 * Optimize minimal inlining for single
846 * segment packets to fill one WQEBB
849 temp = MLX5_ESEG_MIN_INLINE_SIZE;
851 temp = inlen_mode - MLX5_ESEG_MIN_INLINE_SIZE;
852 temp = RTE_ALIGN(temp, MLX5_WSEG_SIZE) +
853 MLX5_ESEG_MIN_INLINE_SIZE;
854 temp = RTE_MIN(temp, MLX5_SEND_MAX_INLINE_LEN);
856 if (temp != inlen_mode) {
858 "port %u minimal required inline setting"
859 " aligned from %u to %u",
860 PORT_ID(priv), inlen_mode, temp);
865 * If port is configured to support VLAN insertion and device
866 * does not support this feature by HW (for NICs before ConnectX-5
867 * or in case of wqe_vlan_insert flag is not set) we must enable
868 * data inline on all queues because it is supported by single
871 txq_ctrl->txq.vlan_en = config->hw_vlan_insert;
872 vlan_inline = (dev_txoff & DEV_TX_OFFLOAD_VLAN_INSERT) &&
873 !config->hw_vlan_insert;
875 * If there are few Tx queues it is prioritized
876 * to save CPU cycles and disable data inlining at all.
878 if (inlen_send && priv->txqs_n >= txqs_inline) {
880 * The data sent with ordinal MLX5_OPCODE_SEND
881 * may be inlined in Ethernet Segment, align the
882 * length accordingly to fit entire WQEBBs.
884 temp = RTE_MAX(inlen_send,
885 MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE);
886 temp -= MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE;
887 temp = RTE_ALIGN(temp, MLX5_WQE_SIZE);
888 temp += MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE;
889 temp = RTE_MIN(temp, MLX5_WQE_SIZE_MAX +
890 MLX5_ESEG_MIN_INLINE_SIZE -
893 MLX5_WQE_DSEG_SIZE * 2);
894 temp = RTE_MIN(temp, MLX5_SEND_MAX_INLINE_LEN);
895 temp = RTE_MAX(temp, inlen_mode);
896 if (temp != inlen_send) {
898 "port %u ordinary send inline setting"
899 " aligned from %u to %u",
900 PORT_ID(priv), inlen_send, temp);
904 * Not aligned to cache lines, but to WQEs.
905 * First bytes of data (initial alignment)
906 * is going to be copied explicitly at the
907 * beginning of inlining buffer in Ethernet
910 MLX5_ASSERT(inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
911 MLX5_ASSERT(inlen_send <= MLX5_WQE_SIZE_MAX +
912 MLX5_ESEG_MIN_INLINE_SIZE -
915 MLX5_WQE_DSEG_SIZE * 2);
916 } else if (inlen_mode) {
918 * If minimal inlining is requested we must
919 * enable inlining in general, despite the
920 * number of configured queues. Ignore the
921 * txq_inline_max devarg, this is not
922 * full-featured inline.
924 inlen_send = inlen_mode;
926 } else if (vlan_inline) {
928 * Hardware does not report offload for
929 * VLAN insertion, we must enable data inline
930 * to implement feature by software.
932 inlen_send = MLX5_ESEG_MIN_INLINE_SIZE;
938 txq_ctrl->txq.inlen_send = inlen_send;
939 txq_ctrl->txq.inlen_mode = inlen_mode;
940 txq_ctrl->txq.inlen_empw = 0;
941 if (inlen_send && inlen_empw && priv->txqs_n >= txqs_inline) {
943 * The data sent with MLX5_OPCODE_ENHANCED_MPSW
944 * may be inlined in Data Segment, align the
945 * length accordingly to fit entire WQEBBs.
947 temp = RTE_MAX(inlen_empw,
948 MLX5_WQE_SIZE + MLX5_DSEG_MIN_INLINE_SIZE);
949 temp -= MLX5_DSEG_MIN_INLINE_SIZE;
950 temp = RTE_ALIGN(temp, MLX5_WQE_SIZE);
951 temp += MLX5_DSEG_MIN_INLINE_SIZE;
952 temp = RTE_MIN(temp, MLX5_WQE_SIZE_MAX +
953 MLX5_DSEG_MIN_INLINE_SIZE -
957 temp = RTE_MIN(temp, MLX5_EMPW_MAX_INLINE_LEN);
958 if (temp != inlen_empw) {
960 "port %u enhanced empw inline setting"
961 " aligned from %u to %u",
962 PORT_ID(priv), inlen_empw, temp);
965 MLX5_ASSERT(inlen_empw >= MLX5_ESEG_MIN_INLINE_SIZE);
966 MLX5_ASSERT(inlen_empw <= MLX5_WQE_SIZE_MAX +
967 MLX5_DSEG_MIN_INLINE_SIZE -
971 txq_ctrl->txq.inlen_empw = inlen_empw;
973 txq_ctrl->max_inline_data = RTE_MAX(inlen_send, inlen_empw);
975 txq_ctrl->max_tso_header = MLX5_MAX_TSO_HEADER;
976 txq_ctrl->max_inline_data = RTE_MAX(txq_ctrl->max_inline_data,
977 MLX5_MAX_TSO_HEADER);
978 txq_ctrl->txq.tso_en = 1;
980 txq_ctrl->txq.tunnel_en = config->tunnel_en | config->swp;
981 txq_ctrl->txq.swp_en = ((DEV_TX_OFFLOAD_IP_TNL_TSO |
982 DEV_TX_OFFLOAD_UDP_TNL_TSO |
983 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) &
984 txq_ctrl->txq.offloads) && config->swp;
988 * Adjust Tx queue data inline parameters for large queue sizes.
989 * The data inline feature requires multiple WQEs to fit the packets,
990 * and if the large amount of Tx descriptors is requested by application
991 * the total WQE amount may exceed the hardware capabilities. If the
992 * default inline setting are used we can try to adjust these ones and
993 * meet the hardware requirements and not exceed the queue size.
996 * Pointer to Tx queue control structure.
999 * Zero on success, otherwise the parameters can not be adjusted.
1002 txq_adjust_params(struct mlx5_txq_ctrl *txq_ctrl)
1004 struct mlx5_priv *priv = txq_ctrl->priv;
1005 struct mlx5_dev_config *config = &priv->config;
1006 unsigned int max_inline;
1008 max_inline = txq_calc_inline_max(txq_ctrl);
1009 if (!txq_ctrl->txq.inlen_send) {
1011 * Inline data feature is not engaged at all.
1012 * There is nothing to adjust.
1016 if (txq_ctrl->max_inline_data <= max_inline) {
1018 * The requested inline data length does not
1019 * exceed queue capabilities.
1023 if (txq_ctrl->txq.inlen_mode > max_inline) {
1025 "minimal data inline requirements (%u) are not"
1026 " satisfied (%u) on port %u, try the smaller"
1027 " Tx queue size (%d)",
1028 txq_ctrl->txq.inlen_mode, max_inline,
1029 priv->dev_data->port_id,
1030 priv->sh->device_attr.max_qp_wr);
1033 if (txq_ctrl->txq.inlen_send > max_inline &&
1034 config->txq_inline_max != MLX5_ARG_UNSET &&
1035 config->txq_inline_max > (int)max_inline) {
1037 "txq_inline_max requirements (%u) are not"
1038 " satisfied (%u) on port %u, try the smaller"
1039 " Tx queue size (%d)",
1040 txq_ctrl->txq.inlen_send, max_inline,
1041 priv->dev_data->port_id,
1042 priv->sh->device_attr.max_qp_wr);
1045 if (txq_ctrl->txq.inlen_empw > max_inline &&
1046 config->txq_inline_mpw != MLX5_ARG_UNSET &&
1047 config->txq_inline_mpw > (int)max_inline) {
1049 "txq_inline_mpw requirements (%u) are not"
1050 " satisfied (%u) on port %u, try the smaller"
1051 " Tx queue size (%d)",
1052 txq_ctrl->txq.inlen_empw, max_inline,
1053 priv->dev_data->port_id,
1054 priv->sh->device_attr.max_qp_wr);
1057 if (txq_ctrl->txq.tso_en && max_inline < MLX5_MAX_TSO_HEADER) {
1059 "tso header inline requirements (%u) are not"
1060 " satisfied (%u) on port %u, try the smaller"
1061 " Tx queue size (%d)",
1062 MLX5_MAX_TSO_HEADER, max_inline,
1063 priv->dev_data->port_id,
1064 priv->sh->device_attr.max_qp_wr);
1067 if (txq_ctrl->txq.inlen_send > max_inline) {
1069 "adjust txq_inline_max (%u->%u)"
1070 " due to large Tx queue on port %u",
1071 txq_ctrl->txq.inlen_send, max_inline,
1072 priv->dev_data->port_id);
1073 txq_ctrl->txq.inlen_send = max_inline;
1075 if (txq_ctrl->txq.inlen_empw > max_inline) {
1077 "adjust txq_inline_mpw (%u->%u)"
1078 "due to large Tx queue on port %u",
1079 txq_ctrl->txq.inlen_empw, max_inline,
1080 priv->dev_data->port_id);
1081 txq_ctrl->txq.inlen_empw = max_inline;
1083 txq_ctrl->max_inline_data = RTE_MAX(txq_ctrl->txq.inlen_send,
1084 txq_ctrl->txq.inlen_empw);
1085 MLX5_ASSERT(txq_ctrl->max_inline_data <= max_inline);
1086 MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= max_inline);
1087 MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= txq_ctrl->txq.inlen_send);
1088 MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= txq_ctrl->txq.inlen_empw ||
1089 !txq_ctrl->txq.inlen_empw);
1097 * Create a DPDK Tx queue.
1100 * Pointer to Ethernet device.
1104 * Number of descriptors to configure in queue.
1106 * NUMA socket on which memory must be allocated.
1108 * Thresholds parameters.
1111 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
1113 struct mlx5_txq_ctrl *
1114 mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1115 unsigned int socket, const struct rte_eth_txconf *conf)
1117 struct mlx5_priv *priv = dev->data->dev_private;
1118 struct mlx5_txq_ctrl *tmpl;
1120 tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl) +
1121 desc * sizeof(struct rte_mbuf *), 0, socket);
1126 if (mlx5_mr_btree_init(&tmpl->txq.mr_ctrl.cache_bh,
1127 MLX5_MR_BTREE_CACHE_N, socket)) {
1128 /* rte_errno is already set. */
1131 /* Save pointer of global generation number to check memory event. */
1132 tmpl->txq.mr_ctrl.dev_gen_ptr = &priv->sh->share_cache.dev_gen;
1133 MLX5_ASSERT(desc > MLX5_TX_COMP_THRESH);
1134 tmpl->txq.offloads = conf->offloads |
1135 dev->data->dev_conf.txmode.offloads;
1137 tmpl->socket = socket;
1138 tmpl->txq.elts_n = log2above(desc);
1139 tmpl->txq.elts_s = desc;
1140 tmpl->txq.elts_m = desc - 1;
1141 tmpl->txq.port_id = dev->data->port_id;
1142 tmpl->txq.idx = idx;
1143 txq_set_params(tmpl);
1144 if (txq_adjust_params(tmpl))
1146 if (txq_calc_wqebb_cnt(tmpl) >
1147 priv->sh->device_attr.max_qp_wr) {
1149 "port %u Tx WQEBB count (%d) exceeds the limit (%d),"
1150 " try smaller queue size",
1151 dev->data->port_id, txq_calc_wqebb_cnt(tmpl),
1152 priv->sh->device_attr.max_qp_wr);
1156 __atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
1157 tmpl->type = MLX5_TXQ_TYPE_STANDARD;
1158 LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
1161 mlx5_mr_btree_free(&tmpl->txq.mr_ctrl.cache_bh);
1167 * Create a DPDK Tx hairpin queue.
1170 * Pointer to Ethernet device.
1174 * Number of descriptors to configure in queue.
1175 * @param hairpin_conf
1176 * The hairpin configuration.
1179 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
1181 struct mlx5_txq_ctrl *
1182 mlx5_txq_hairpin_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1183 const struct rte_eth_hairpin_conf *hairpin_conf)
1185 struct mlx5_priv *priv = dev->data->dev_private;
1186 struct mlx5_txq_ctrl *tmpl;
1188 tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
1195 tmpl->socket = SOCKET_ID_ANY;
1196 tmpl->txq.elts_n = log2above(desc);
1197 tmpl->txq.port_id = dev->data->port_id;
1198 tmpl->txq.idx = idx;
1199 tmpl->hairpin_conf = *hairpin_conf;
1200 tmpl->type = MLX5_TXQ_TYPE_HAIRPIN;
1201 __atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
1202 LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
1210 * Pointer to Ethernet device.
1215 * A pointer to the queue if it exists.
1217 struct mlx5_txq_ctrl *
1218 mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx)
1220 struct mlx5_priv *priv = dev->data->dev_private;
1221 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
1222 struct mlx5_txq_ctrl *ctrl = NULL;
1225 ctrl = container_of(txq_data, struct mlx5_txq_ctrl, txq);
1226 __atomic_fetch_add(&ctrl->refcnt, 1, __ATOMIC_RELAXED);
1232 * Release a Tx queue.
1235 * Pointer to Ethernet device.
1240 * 1 while a reference on it exists, 0 when freed.
1243 mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx)
1245 struct mlx5_priv *priv = dev->data->dev_private;
1246 struct mlx5_txq_ctrl *txq_ctrl;
1248 if (!(*priv->txqs)[idx])
1250 txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
1251 if (__atomic_sub_fetch(&txq_ctrl->refcnt, 1, __ATOMIC_RELAXED) > 1)
1253 if (txq_ctrl->obj) {
1254 priv->obj_ops.txq_obj_release(txq_ctrl->obj);
1255 LIST_REMOVE(txq_ctrl->obj, next);
1256 mlx5_free(txq_ctrl->obj);
1257 txq_ctrl->obj = NULL;
1259 if (txq_ctrl->type == MLX5_TXQ_TYPE_STANDARD) {
1260 if (txq_ctrl->txq.fcqs) {
1261 mlx5_free(txq_ctrl->txq.fcqs);
1262 txq_ctrl->txq.fcqs = NULL;
1264 txq_free_elts(txq_ctrl);
1265 dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
1267 if (!__atomic_load_n(&txq_ctrl->refcnt, __ATOMIC_RELAXED)) {
1268 if (txq_ctrl->type == MLX5_TXQ_TYPE_STANDARD)
1269 mlx5_mr_btree_free(&txq_ctrl->txq.mr_ctrl.cache_bh);
1270 LIST_REMOVE(txq_ctrl, next);
1271 mlx5_free(txq_ctrl);
1272 (*priv->txqs)[idx] = NULL;
1278 * Verify if the queue can be released.
1281 * Pointer to Ethernet device.
1286 * 1 if the queue can be released.
1289 mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx)
1291 struct mlx5_priv *priv = dev->data->dev_private;
1292 struct mlx5_txq_ctrl *txq;
1294 if (!(*priv->txqs)[idx])
1296 txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
1297 return (__atomic_load_n(&txq->refcnt, __ATOMIC_RELAXED) == 1);
1301 * Verify the Tx Queue list is empty
1304 * Pointer to Ethernet device.
1307 * The number of object not released.
1310 mlx5_txq_verify(struct rte_eth_dev *dev)
1312 struct mlx5_priv *priv = dev->data->dev_private;
1313 struct mlx5_txq_ctrl *txq_ctrl;
1316 LIST_FOREACH(txq_ctrl, &priv->txqsctrl, next) {
1317 DRV_LOG(DEBUG, "port %u Tx queue %u still referenced",
1318 dev->data->port_id, txq_ctrl->txq.idx);
1325 * Set the Tx queue dynamic timestamp (mask and offset)
1328 * Pointer to the Ethernet device structure.
1331 mlx5_txq_dynf_timestamp_set(struct rte_eth_dev *dev)
1333 struct mlx5_priv *priv = dev->data->dev_private;
1334 struct mlx5_dev_ctx_shared *sh = priv->sh;
1335 struct mlx5_txq_data *data;
1340 nbit = rte_mbuf_dynflag_lookup
1341 (RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME, NULL);
1342 off = rte_mbuf_dynfield_lookup
1343 (RTE_MBUF_DYNFIELD_TIMESTAMP_NAME, NULL);
1344 if (nbit >= 0 && off >= 0 && sh->txpp.refcnt)
1345 mask = 1ULL << nbit;
1346 for (i = 0; i != priv->txqs_n; ++i) {
1347 data = (*priv->txqs)[i];
1351 data->ts_mask = mask;
1352 data->ts_offset = off;