1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
14 #include <rte_malloc.h>
15 #include <rte_ethdev_driver.h>
16 #include <rte_common.h>
17 #include <rte_eal_paging.h>
19 #include <mlx5_glue.h>
20 #include <mlx5_devx_cmds.h>
21 #include <mlx5_common.h>
22 #include <mlx5_common_mr.h>
23 #include <mlx5_malloc.h>
25 #include "mlx5_defs.h"
26 #include "mlx5_utils.h"
28 #include "mlx5_rxtx.h"
29 #include "mlx5_autoconf.h"
32 * Allocate TX queue elements.
35 * Pointer to TX queue structure.
38 txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl)
40 const unsigned int elts_n = 1 << txq_ctrl->txq.elts_n;
43 for (i = 0; (i != elts_n); ++i)
44 txq_ctrl->txq.elts[i] = NULL;
45 DRV_LOG(DEBUG, "port %u Tx queue %u allocated and configured %u WRs",
46 PORT_ID(txq_ctrl->priv), txq_ctrl->txq.idx, elts_n);
47 txq_ctrl->txq.elts_head = 0;
48 txq_ctrl->txq.elts_tail = 0;
49 txq_ctrl->txq.elts_comp = 0;
53 * Free TX queue elements.
56 * Pointer to TX queue structure.
59 txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl)
61 const uint16_t elts_n = 1 << txq_ctrl->txq.elts_n;
62 const uint16_t elts_m = elts_n - 1;
63 uint16_t elts_head = txq_ctrl->txq.elts_head;
64 uint16_t elts_tail = txq_ctrl->txq.elts_tail;
65 struct rte_mbuf *(*elts)[elts_n] = &txq_ctrl->txq.elts;
67 DRV_LOG(DEBUG, "port %u Tx queue %u freeing WRs",
68 PORT_ID(txq_ctrl->priv), txq_ctrl->txq.idx);
69 txq_ctrl->txq.elts_head = 0;
70 txq_ctrl->txq.elts_tail = 0;
71 txq_ctrl->txq.elts_comp = 0;
73 while (elts_tail != elts_head) {
74 struct rte_mbuf *elt = (*elts)[elts_tail & elts_m];
76 MLX5_ASSERT(elt != NULL);
77 rte_pktmbuf_free_seg(elt);
78 #ifdef RTE_LIBRTE_MLX5_DEBUG
80 memset(&(*elts)[elts_tail & elts_m],
82 sizeof((*elts)[elts_tail & elts_m]));
89 * Returns the per-port supported offloads.
92 * Pointer to Ethernet device.
95 * Supported Tx offloads.
98 mlx5_get_tx_port_offloads(struct rte_eth_dev *dev)
100 struct mlx5_priv *priv = dev->data->dev_private;
101 uint64_t offloads = (DEV_TX_OFFLOAD_MULTI_SEGS |
102 DEV_TX_OFFLOAD_VLAN_INSERT);
103 struct mlx5_dev_config *config = &priv->config;
106 offloads |= (DEV_TX_OFFLOAD_IPV4_CKSUM |
107 DEV_TX_OFFLOAD_UDP_CKSUM |
108 DEV_TX_OFFLOAD_TCP_CKSUM);
110 offloads |= DEV_TX_OFFLOAD_TCP_TSO;
112 offloads |= DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP;
115 offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
117 offloads |= (DEV_TX_OFFLOAD_IP_TNL_TSO |
118 DEV_TX_OFFLOAD_UDP_TNL_TSO);
120 if (config->tunnel_en) {
122 offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
124 offloads |= (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
125 DEV_TX_OFFLOAD_GRE_TNL_TSO |
126 DEV_TX_OFFLOAD_GENEVE_TNL_TSO);
131 /* Fetches and drops all SW-owned and error CQEs to synchronize CQ. */
133 txq_sync_cq(struct mlx5_txq_data *txq)
135 volatile struct mlx5_cqe *cqe;
140 cqe = &txq->cqes[txq->cq_ci & txq->cqe_m];
141 ret = check_cqe(cqe, txq->cqe_s, txq->cq_ci);
142 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
143 if (likely(ret != MLX5_CQE_STATUS_ERR)) {
144 /* No new CQEs in completion queue. */
145 MLX5_ASSERT(ret == MLX5_CQE_STATUS_HW_OWN);
151 /* Move all CQEs to HW ownership. */
152 for (i = 0; i < txq->cqe_s; i++) {
154 cqe->op_own = MLX5_CQE_INVALIDATE;
156 /* Resync CQE and WQE (WQ in reset state). */
158 *txq->cq_db = rte_cpu_to_be_32(txq->cq_ci);
163 * Tx queue stop. Device queue goes to the idle state,
164 * all involved mbufs are freed from elts/WQ.
167 * Pointer to Ethernet device structure.
172 * 0 on success, a negative errno value otherwise and rte_errno is set.
175 mlx5_tx_queue_stop_primary(struct rte_eth_dev *dev, uint16_t idx)
177 struct mlx5_priv *priv = dev->data->dev_private;
178 struct mlx5_txq_data *txq = (*priv->txqs)[idx];
179 struct mlx5_txq_ctrl *txq_ctrl =
180 container_of(txq, struct mlx5_txq_ctrl, txq);
183 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
184 /* Move QP to RESET state. */
185 if (txq_ctrl->obj->type == MLX5_TXQ_OBJ_TYPE_DEVX_SQ) {
186 struct mlx5_devx_modify_sq_attr msq_attr = { 0 };
188 /* Change queue state to reset with DevX. */
189 msq_attr.sq_state = MLX5_SQC_STATE_RDY;
190 msq_attr.state = MLX5_SQC_STATE_RST;
191 ret = mlx5_devx_cmd_modify_sq(txq_ctrl->obj->sq_devx,
194 DRV_LOG(ERR, "Cannot change the "
195 "Tx QP state to RESET %s",
201 struct ibv_qp_attr mod = {
202 .qp_state = IBV_QPS_RESET,
203 .port_num = (uint8_t)priv->dev_port,
205 struct ibv_qp *qp = txq_ctrl->obj->qp;
207 /* Change queue state to reset with Verbs. */
208 ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE);
210 DRV_LOG(ERR, "Cannot change the Tx QP state to RESET "
211 "%s", strerror(errno));
216 /* Handle all send completions. */
218 /* Free elts stored in the SQ. */
219 txq_free_elts(txq_ctrl);
220 /* Prevent writing new pkts to SQ by setting no free WQE.*/
221 txq->wqe_ci = txq->wqe_s;
224 /* Set the actual queue state. */
225 dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
230 * Tx queue stop. Device queue goes to the idle state,
231 * all involved mbufs are freed from elts/WQ.
234 * Pointer to Ethernet device structure.
239 * 0 on success, a negative errno value otherwise and rte_errno is set.
242 mlx5_tx_queue_stop(struct rte_eth_dev *dev, uint16_t idx)
246 if (rte_eth_dev_is_tx_hairpin_queue(dev, idx)) {
247 DRV_LOG(ERR, "Hairpin queue can't be stopped");
251 if (dev->data->tx_queue_state[idx] == RTE_ETH_QUEUE_STATE_STOPPED)
253 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
254 ret = mlx5_mp_os_req_queue_control(dev, idx,
255 MLX5_MP_REQ_QUEUE_TX_STOP);
257 ret = mlx5_tx_queue_stop_primary(dev, idx);
263 * Rx queue start. Device queue goes to the ready state,
264 * all required mbufs are allocated and WQ is replenished.
267 * Pointer to Ethernet device structure.
272 * 0 on success, a negative errno value otherwise and rte_errno is set.
275 mlx5_tx_queue_start_primary(struct rte_eth_dev *dev, uint16_t idx)
277 struct mlx5_priv *priv = dev->data->dev_private;
278 struct mlx5_txq_data *txq = (*priv->txqs)[idx];
279 struct mlx5_txq_ctrl *txq_ctrl =
280 container_of(txq, struct mlx5_txq_ctrl, txq);
283 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
284 if (txq_ctrl->obj->type == MLX5_TXQ_OBJ_TYPE_DEVX_SQ) {
285 struct mlx5_devx_modify_sq_attr msq_attr = { 0 };
286 struct mlx5_txq_obj *obj = txq_ctrl->obj;
288 msq_attr.sq_state = MLX5_SQC_STATE_RDY;
289 msq_attr.state = MLX5_SQC_STATE_RST;
290 ret = mlx5_devx_cmd_modify_sq(obj->sq_devx, &msq_attr);
294 "Cannot change the Tx QP state to RESET "
295 "%s", strerror(errno));
298 msq_attr.sq_state = MLX5_SQC_STATE_RST;
299 msq_attr.state = MLX5_SQC_STATE_RDY;
300 ret = mlx5_devx_cmd_modify_sq(obj->sq_devx, &msq_attr);
304 "Cannot change the Tx QP state to READY "
305 "%s", strerror(errno));
309 struct ibv_qp_attr mod = {
310 .qp_state = IBV_QPS_RESET,
311 .port_num = (uint8_t)priv->dev_port,
313 struct ibv_qp *qp = txq_ctrl->obj->qp;
315 ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE);
317 DRV_LOG(ERR, "Cannot change the Tx QP state to RESET "
318 "%s", strerror(errno));
322 mod.qp_state = IBV_QPS_INIT;
323 ret = mlx5_glue->modify_qp(qp, &mod,
324 (IBV_QP_STATE | IBV_QP_PORT));
326 DRV_LOG(ERR, "Cannot change Tx QP state to INIT %s",
331 mod.qp_state = IBV_QPS_RTR;
332 ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE);
334 DRV_LOG(ERR, "Cannot change Tx QP state to RTR %s",
339 mod.qp_state = IBV_QPS_RTS;
340 ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE);
342 DRV_LOG(ERR, "Cannot change Tx QP state to RTS %s",
348 txq_ctrl->txq.wqe_ci = 0;
349 txq_ctrl->txq.wqe_pi = 0;
350 txq_ctrl->txq.elts_comp = 0;
351 /* Set the actual queue state. */
352 dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
357 * Rx queue start. Device queue goes to the ready state,
358 * all required mbufs are allocated and WQ is replenished.
361 * Pointer to Ethernet device structure.
366 * 0 on success, a negative errno value otherwise and rte_errno is set.
369 mlx5_tx_queue_start(struct rte_eth_dev *dev, uint16_t idx)
373 if (rte_eth_dev_is_tx_hairpin_queue(dev, idx)) {
374 DRV_LOG(ERR, "Hairpin queue can't be started");
378 if (dev->data->tx_queue_state[idx] == RTE_ETH_QUEUE_STATE_STARTED)
380 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
381 ret = mlx5_mp_os_req_queue_control(dev, idx,
382 MLX5_MP_REQ_QUEUE_TX_START);
384 ret = mlx5_tx_queue_start_primary(dev, idx);
390 * Tx queue presetup checks.
393 * Pointer to Ethernet device structure.
397 * Number of descriptors to configure in queue.
400 * 0 on success, a negative errno value otherwise and rte_errno is set.
403 mlx5_tx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t *desc)
405 struct mlx5_priv *priv = dev->data->dev_private;
407 if (*desc <= MLX5_TX_COMP_THRESH) {
409 "port %u number of descriptors requested for Tx queue"
410 " %u must be higher than MLX5_TX_COMP_THRESH, using %u"
411 " instead of %u", dev->data->port_id, idx,
412 MLX5_TX_COMP_THRESH + 1, *desc);
413 *desc = MLX5_TX_COMP_THRESH + 1;
415 if (!rte_is_power_of_2(*desc)) {
416 *desc = 1 << log2above(*desc);
418 "port %u increased number of descriptors in Tx queue"
419 " %u to the next power of two (%d)",
420 dev->data->port_id, idx, *desc);
422 DRV_LOG(DEBUG, "port %u configuring queue %u for %u descriptors",
423 dev->data->port_id, idx, *desc);
424 if (idx >= priv->txqs_n) {
425 DRV_LOG(ERR, "port %u Tx queue index out of range (%u >= %u)",
426 dev->data->port_id, idx, priv->txqs_n);
427 rte_errno = EOVERFLOW;
430 if (!mlx5_txq_releasable(dev, idx)) {
432 DRV_LOG(ERR, "port %u unable to release queue index %u",
433 dev->data->port_id, idx);
436 mlx5_txq_release(dev, idx);
441 * DPDK callback to configure a TX queue.
444 * Pointer to Ethernet device structure.
448 * Number of descriptors to configure in queue.
450 * NUMA socket on which memory must be allocated.
452 * Thresholds parameters.
455 * 0 on success, a negative errno value otherwise and rte_errno is set.
458 mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
459 unsigned int socket, const struct rte_eth_txconf *conf)
461 struct mlx5_priv *priv = dev->data->dev_private;
462 struct mlx5_txq_data *txq = (*priv->txqs)[idx];
463 struct mlx5_txq_ctrl *txq_ctrl =
464 container_of(txq, struct mlx5_txq_ctrl, txq);
467 res = mlx5_tx_queue_pre_setup(dev, idx, &desc);
470 txq_ctrl = mlx5_txq_new(dev, idx, desc, socket, conf);
472 DRV_LOG(ERR, "port %u unable to allocate queue index %u",
473 dev->data->port_id, idx);
476 DRV_LOG(DEBUG, "port %u adding Tx queue %u to list",
477 dev->data->port_id, idx);
478 (*priv->txqs)[idx] = &txq_ctrl->txq;
479 dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
484 * DPDK callback to configure a TX hairpin queue.
487 * Pointer to Ethernet device structure.
491 * Number of descriptors to configure in queue.
492 * @param[in] hairpin_conf
493 * The hairpin binding configuration.
496 * 0 on success, a negative errno value otherwise and rte_errno is set.
499 mlx5_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
501 const struct rte_eth_hairpin_conf *hairpin_conf)
503 struct mlx5_priv *priv = dev->data->dev_private;
504 struct mlx5_txq_data *txq = (*priv->txqs)[idx];
505 struct mlx5_txq_ctrl *txq_ctrl =
506 container_of(txq, struct mlx5_txq_ctrl, txq);
509 res = mlx5_tx_queue_pre_setup(dev, idx, &desc);
512 if (hairpin_conf->peer_count != 1 ||
513 hairpin_conf->peers[0].port != dev->data->port_id ||
514 hairpin_conf->peers[0].queue >= priv->rxqs_n) {
515 DRV_LOG(ERR, "port %u unable to setup hairpin queue index %u "
516 " invalid hairpind configuration", dev->data->port_id,
521 txq_ctrl = mlx5_txq_hairpin_new(dev, idx, desc, hairpin_conf);
523 DRV_LOG(ERR, "port %u unable to allocate queue index %u",
524 dev->data->port_id, idx);
527 DRV_LOG(DEBUG, "port %u adding Tx queue %u to list",
528 dev->data->port_id, idx);
529 (*priv->txqs)[idx] = &txq_ctrl->txq;
530 dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_HAIRPIN;
535 * DPDK callback to release a TX queue.
538 * Generic TX queue pointer.
541 mlx5_tx_queue_release(void *dpdk_txq)
543 struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
544 struct mlx5_txq_ctrl *txq_ctrl;
545 struct mlx5_priv *priv;
550 txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
551 priv = txq_ctrl->priv;
552 for (i = 0; (i != priv->txqs_n); ++i)
553 if ((*priv->txqs)[i] == txq) {
554 DRV_LOG(DEBUG, "port %u removing Tx queue %u from list",
555 PORT_ID(priv), txq->idx);
556 mlx5_txq_release(ETH_DEV(priv), i);
562 * Configure the doorbell register non-cached attribute.
565 * Pointer to Tx queue control structure.
570 txq_uar_ncattr_init(struct mlx5_txq_ctrl *txq_ctrl, size_t page_size)
572 struct mlx5_priv *priv = txq_ctrl->priv;
575 txq_ctrl->txq.db_heu = priv->config.dbnc == MLX5_TXDB_HEURISTIC;
576 txq_ctrl->txq.db_nc = 0;
577 /* Check the doorbell register mapping type. */
578 cmd = txq_ctrl->uar_mmap_offset / page_size;
579 cmd >>= MLX5_UAR_MMAP_CMD_SHIFT;
580 cmd &= MLX5_UAR_MMAP_CMD_MASK;
581 if (cmd == MLX5_MMAP_GET_NC_PAGES_CMD)
582 txq_ctrl->txq.db_nc = 1;
586 * Initialize Tx UAR registers for primary process.
589 * Pointer to Tx queue control structure.
592 txq_uar_init(struct mlx5_txq_ctrl *txq_ctrl)
594 struct mlx5_priv *priv = txq_ctrl->priv;
595 struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(priv));
597 unsigned int lock_idx;
599 const size_t page_size = rte_mem_page_size();
600 if (page_size == (size_t)-1) {
601 DRV_LOG(ERR, "Failed to get mem page size");
605 if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
607 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
609 ppriv->uar_table[txq_ctrl->txq.idx] = txq_ctrl->bf_reg;
610 txq_uar_ncattr_init(txq_ctrl, page_size);
612 /* Assign an UAR lock according to UAR page number */
613 lock_idx = (txq_ctrl->uar_mmap_offset / page_size) &
614 MLX5_UAR_PAGE_NUM_MASK;
615 txq_ctrl->txq.uar_lock = &priv->sh->uar_lock[lock_idx];
620 * Remap UAR register of a Tx queue for secondary process.
622 * Remapped address is stored at the table in the process private structure of
623 * the device, indexed by queue index.
626 * Pointer to Tx queue control structure.
628 * Verbs file descriptor to map UAR pages.
631 * 0 on success, a negative errno value otherwise and rte_errno is set.
634 txq_uar_init_secondary(struct mlx5_txq_ctrl *txq_ctrl, int fd)
636 struct mlx5_priv *priv = txq_ctrl->priv;
637 struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(priv));
638 struct mlx5_txq_data *txq = &txq_ctrl->txq;
642 const size_t page_size = rte_mem_page_size();
643 if (page_size == (size_t)-1) {
644 DRV_LOG(ERR, "Failed to get mem page size");
649 if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
653 * As rdma-core, UARs are mapped in size of OS page
654 * size. Ref to libmlx5 function: mlx5_init_context()
656 uar_va = (uintptr_t)txq_ctrl->bf_reg;
657 offset = uar_va & (page_size - 1); /* Offset in page. */
658 addr = rte_mem_map(NULL, page_size, RTE_PROT_WRITE, RTE_MAP_SHARED,
659 fd, txq_ctrl->uar_mmap_offset);
662 "port %u mmap failed for BF reg of txq %u",
663 txq->port_id, txq->idx);
667 addr = RTE_PTR_ADD(addr, offset);
668 ppriv->uar_table[txq->idx] = addr;
669 txq_uar_ncattr_init(txq_ctrl, page_size);
674 * Unmap UAR register of a Tx queue for secondary process.
677 * Pointer to Tx queue control structure.
680 txq_uar_uninit_secondary(struct mlx5_txq_ctrl *txq_ctrl)
682 struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(txq_ctrl->priv));
684 const size_t page_size = rte_mem_page_size();
685 if (page_size == (size_t)-1) {
686 DRV_LOG(ERR, "Failed to get mem page size");
690 if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
692 addr = ppriv->uar_table[txq_ctrl->txq.idx];
693 rte_mem_unmap(RTE_PTR_ALIGN_FLOOR(addr, page_size), page_size);
697 * Deinitialize Tx UAR registers for secondary process.
700 * Pointer to Ethernet device.
703 mlx5_tx_uar_uninit_secondary(struct rte_eth_dev *dev)
705 struct mlx5_priv *priv = dev->data->dev_private;
706 struct mlx5_txq_data *txq;
707 struct mlx5_txq_ctrl *txq_ctrl;
710 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
711 for (i = 0; i != priv->txqs_n; ++i) {
712 if (!(*priv->txqs)[i])
714 txq = (*priv->txqs)[i];
715 txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
716 txq_uar_uninit_secondary(txq_ctrl);
721 * Initialize Tx UAR registers for secondary process.
724 * Pointer to Ethernet device.
726 * Verbs file descriptor to map UAR pages.
729 * 0 on success, a negative errno value otherwise and rte_errno is set.
732 mlx5_tx_uar_init_secondary(struct rte_eth_dev *dev, int fd)
734 struct mlx5_priv *priv = dev->data->dev_private;
735 struct mlx5_txq_data *txq;
736 struct mlx5_txq_ctrl *txq_ctrl;
740 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
741 for (i = 0; i != priv->txqs_n; ++i) {
742 if (!(*priv->txqs)[i])
744 txq = (*priv->txqs)[i];
745 txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
746 if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
748 MLX5_ASSERT(txq->idx == (uint16_t)i);
749 ret = txq_uar_init_secondary(txq_ctrl, fd);
757 if (!(*priv->txqs)[i])
759 txq = (*priv->txqs)[i];
760 txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
761 txq_uar_uninit_secondary(txq_ctrl);
767 * Verify the Verbs Tx queue list is empty
770 * Pointer to Ethernet device.
773 * The number of object not released.
776 mlx5_txq_obj_verify(struct rte_eth_dev *dev)
778 struct mlx5_priv *priv = dev->data->dev_private;
780 struct mlx5_txq_obj *txq_obj;
782 LIST_FOREACH(txq_obj, &priv->txqsobj, next) {
783 DRV_LOG(DEBUG, "port %u Verbs Tx queue %u still referenced",
784 dev->data->port_id, txq_obj->txq_ctrl->txq.idx);
791 * Calculate the total number of WQEBB for Tx queue.
793 * Simplified version of calc_sq_size() in rdma-core.
796 * Pointer to Tx queue control structure.
799 * The number of WQEBB.
802 txq_calc_wqebb_cnt(struct mlx5_txq_ctrl *txq_ctrl)
804 unsigned int wqe_size;
805 const unsigned int desc = 1 << txq_ctrl->txq.elts_n;
807 wqe_size = MLX5_WQE_CSEG_SIZE +
810 MLX5_ESEG_MIN_INLINE_SIZE +
811 txq_ctrl->max_inline_data;
812 return rte_align32pow2(wqe_size * desc) / MLX5_WQE_SIZE;
816 * Calculate the maximal inline data size for Tx queue.
819 * Pointer to Tx queue control structure.
822 * The maximal inline data size.
825 txq_calc_inline_max(struct mlx5_txq_ctrl *txq_ctrl)
827 const unsigned int desc = 1 << txq_ctrl->txq.elts_n;
828 struct mlx5_priv *priv = txq_ctrl->priv;
829 unsigned int wqe_size;
831 wqe_size = priv->sh->device_attr.max_qp_wr / desc;
835 * This calculation is derived from tthe source of
836 * mlx5_calc_send_wqe() in rdma_core library.
838 wqe_size = wqe_size * MLX5_WQE_SIZE -
843 MLX5_DSEG_MIN_INLINE_SIZE;
848 * Set Tx queue parameters from device configuration.
851 * Pointer to Tx queue control structure.
854 txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
856 struct mlx5_priv *priv = txq_ctrl->priv;
857 struct mlx5_dev_config *config = &priv->config;
858 unsigned int inlen_send; /* Inline data for ordinary SEND.*/
859 unsigned int inlen_empw; /* Inline data for enhanced MPW. */
860 unsigned int inlen_mode; /* Minimal required Inline data. */
861 unsigned int txqs_inline; /* Min Tx queues to enable inline. */
862 uint64_t dev_txoff = priv->dev_data->dev_conf.txmode.offloads;
863 bool tso = txq_ctrl->txq.offloads & (DEV_TX_OFFLOAD_TCP_TSO |
864 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
865 DEV_TX_OFFLOAD_GRE_TNL_TSO |
866 DEV_TX_OFFLOAD_IP_TNL_TSO |
867 DEV_TX_OFFLOAD_UDP_TNL_TSO);
871 if (config->txqs_inline == MLX5_ARG_UNSET)
873 #if defined(RTE_ARCH_ARM64)
874 (priv->pci_dev->id.device_id ==
875 PCI_DEVICE_ID_MELLANOX_CONNECTX5BF) ?
876 MLX5_INLINE_MAX_TXQS_BLUEFIELD :
878 MLX5_INLINE_MAX_TXQS;
880 txqs_inline = (unsigned int)config->txqs_inline;
881 inlen_send = (config->txq_inline_max == MLX5_ARG_UNSET) ?
882 MLX5_SEND_DEF_INLINE_LEN :
883 (unsigned int)config->txq_inline_max;
884 inlen_empw = (config->txq_inline_mpw == MLX5_ARG_UNSET) ?
885 MLX5_EMPW_DEF_INLINE_LEN :
886 (unsigned int)config->txq_inline_mpw;
887 inlen_mode = (config->txq_inline_min == MLX5_ARG_UNSET) ?
888 0 : (unsigned int)config->txq_inline_min;
889 if (config->mps != MLX5_MPW_ENHANCED && config->mps != MLX5_MPW)
892 * If there is requested minimal amount of data to inline
893 * we MUST enable inlining. This is a case for ConnectX-4
894 * which usually requires L2 inlined for correct operating
895 * and ConnectX-4 Lx which requires L2-L4 inlined to
896 * support E-Switch Flows.
899 if (inlen_mode <= MLX5_ESEG_MIN_INLINE_SIZE) {
901 * Optimize minimal inlining for single
902 * segment packets to fill one WQEBB
905 temp = MLX5_ESEG_MIN_INLINE_SIZE;
907 temp = inlen_mode - MLX5_ESEG_MIN_INLINE_SIZE;
908 temp = RTE_ALIGN(temp, MLX5_WSEG_SIZE) +
909 MLX5_ESEG_MIN_INLINE_SIZE;
910 temp = RTE_MIN(temp, MLX5_SEND_MAX_INLINE_LEN);
912 if (temp != inlen_mode) {
914 "port %u minimal required inline setting"
915 " aligned from %u to %u",
916 PORT_ID(priv), inlen_mode, temp);
921 * If port is configured to support VLAN insertion and device
922 * does not support this feature by HW (for NICs before ConnectX-5
923 * or in case of wqe_vlan_insert flag is not set) we must enable
924 * data inline on all queues because it is supported by single
927 txq_ctrl->txq.vlan_en = config->hw_vlan_insert;
928 vlan_inline = (dev_txoff & DEV_TX_OFFLOAD_VLAN_INSERT) &&
929 !config->hw_vlan_insert;
931 * If there are few Tx queues it is prioritized
932 * to save CPU cycles and disable data inlining at all.
934 if (inlen_send && priv->txqs_n >= txqs_inline) {
936 * The data sent with ordinal MLX5_OPCODE_SEND
937 * may be inlined in Ethernet Segment, align the
938 * length accordingly to fit entire WQEBBs.
940 temp = RTE_MAX(inlen_send,
941 MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE);
942 temp -= MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE;
943 temp = RTE_ALIGN(temp, MLX5_WQE_SIZE);
944 temp += MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE;
945 temp = RTE_MIN(temp, MLX5_WQE_SIZE_MAX +
946 MLX5_ESEG_MIN_INLINE_SIZE -
949 MLX5_WQE_DSEG_SIZE * 2);
950 temp = RTE_MIN(temp, MLX5_SEND_MAX_INLINE_LEN);
951 temp = RTE_MAX(temp, inlen_mode);
952 if (temp != inlen_send) {
954 "port %u ordinary send inline setting"
955 " aligned from %u to %u",
956 PORT_ID(priv), inlen_send, temp);
960 * Not aligned to cache lines, but to WQEs.
961 * First bytes of data (initial alignment)
962 * is going to be copied explicitly at the
963 * beginning of inlining buffer in Ethernet
966 MLX5_ASSERT(inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
967 MLX5_ASSERT(inlen_send <= MLX5_WQE_SIZE_MAX +
968 MLX5_ESEG_MIN_INLINE_SIZE -
971 MLX5_WQE_DSEG_SIZE * 2);
972 } else if (inlen_mode) {
974 * If minimal inlining is requested we must
975 * enable inlining in general, despite the
976 * number of configured queues. Ignore the
977 * txq_inline_max devarg, this is not
978 * full-featured inline.
980 inlen_send = inlen_mode;
982 } else if (vlan_inline) {
984 * Hardware does not report offload for
985 * VLAN insertion, we must enable data inline
986 * to implement feature by software.
988 inlen_send = MLX5_ESEG_MIN_INLINE_SIZE;
994 txq_ctrl->txq.inlen_send = inlen_send;
995 txq_ctrl->txq.inlen_mode = inlen_mode;
996 txq_ctrl->txq.inlen_empw = 0;
997 if (inlen_send && inlen_empw && priv->txqs_n >= txqs_inline) {
999 * The data sent with MLX5_OPCODE_ENHANCED_MPSW
1000 * may be inlined in Data Segment, align the
1001 * length accordingly to fit entire WQEBBs.
1003 temp = RTE_MAX(inlen_empw,
1004 MLX5_WQE_SIZE + MLX5_DSEG_MIN_INLINE_SIZE);
1005 temp -= MLX5_DSEG_MIN_INLINE_SIZE;
1006 temp = RTE_ALIGN(temp, MLX5_WQE_SIZE);
1007 temp += MLX5_DSEG_MIN_INLINE_SIZE;
1008 temp = RTE_MIN(temp, MLX5_WQE_SIZE_MAX +
1009 MLX5_DSEG_MIN_INLINE_SIZE -
1010 MLX5_WQE_CSEG_SIZE -
1011 MLX5_WQE_ESEG_SIZE -
1012 MLX5_WQE_DSEG_SIZE);
1013 temp = RTE_MIN(temp, MLX5_EMPW_MAX_INLINE_LEN);
1014 if (temp != inlen_empw) {
1016 "port %u enhanced empw inline setting"
1017 " aligned from %u to %u",
1018 PORT_ID(priv), inlen_empw, temp);
1021 MLX5_ASSERT(inlen_empw >= MLX5_ESEG_MIN_INLINE_SIZE);
1022 MLX5_ASSERT(inlen_empw <= MLX5_WQE_SIZE_MAX +
1023 MLX5_DSEG_MIN_INLINE_SIZE -
1024 MLX5_WQE_CSEG_SIZE -
1025 MLX5_WQE_ESEG_SIZE -
1026 MLX5_WQE_DSEG_SIZE);
1027 txq_ctrl->txq.inlen_empw = inlen_empw;
1029 txq_ctrl->max_inline_data = RTE_MAX(inlen_send, inlen_empw);
1031 txq_ctrl->max_tso_header = MLX5_MAX_TSO_HEADER;
1032 txq_ctrl->max_inline_data = RTE_MAX(txq_ctrl->max_inline_data,
1033 MLX5_MAX_TSO_HEADER);
1034 txq_ctrl->txq.tso_en = 1;
1036 txq_ctrl->txq.tunnel_en = config->tunnel_en | config->swp;
1037 txq_ctrl->txq.swp_en = ((DEV_TX_OFFLOAD_IP_TNL_TSO |
1038 DEV_TX_OFFLOAD_UDP_TNL_TSO |
1039 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) &
1040 txq_ctrl->txq.offloads) && config->swp;
1044 * Adjust Tx queue data inline parameters for large queue sizes.
1045 * The data inline feature requires multiple WQEs to fit the packets,
1046 * and if the large amount of Tx descriptors is requested by application
1047 * the total WQE amount may exceed the hardware capabilities. If the
1048 * default inline setting are used we can try to adjust these ones and
1049 * meet the hardware requirements and not exceed the queue size.
1052 * Pointer to Tx queue control structure.
1055 * Zero on success, otherwise the parameters can not be adjusted.
1058 txq_adjust_params(struct mlx5_txq_ctrl *txq_ctrl)
1060 struct mlx5_priv *priv = txq_ctrl->priv;
1061 struct mlx5_dev_config *config = &priv->config;
1062 unsigned int max_inline;
1064 max_inline = txq_calc_inline_max(txq_ctrl);
1065 if (!txq_ctrl->txq.inlen_send) {
1067 * Inline data feature is not engaged at all.
1068 * There is nothing to adjust.
1072 if (txq_ctrl->max_inline_data <= max_inline) {
1074 * The requested inline data length does not
1075 * exceed queue capabilities.
1079 if (txq_ctrl->txq.inlen_mode > max_inline) {
1081 "minimal data inline requirements (%u) are not"
1082 " satisfied (%u) on port %u, try the smaller"
1083 " Tx queue size (%d)",
1084 txq_ctrl->txq.inlen_mode, max_inline,
1085 priv->dev_data->port_id,
1086 priv->sh->device_attr.max_qp_wr);
1089 if (txq_ctrl->txq.inlen_send > max_inline &&
1090 config->txq_inline_max != MLX5_ARG_UNSET &&
1091 config->txq_inline_max > (int)max_inline) {
1093 "txq_inline_max requirements (%u) are not"
1094 " satisfied (%u) on port %u, try the smaller"
1095 " Tx queue size (%d)",
1096 txq_ctrl->txq.inlen_send, max_inline,
1097 priv->dev_data->port_id,
1098 priv->sh->device_attr.max_qp_wr);
1101 if (txq_ctrl->txq.inlen_empw > max_inline &&
1102 config->txq_inline_mpw != MLX5_ARG_UNSET &&
1103 config->txq_inline_mpw > (int)max_inline) {
1105 "txq_inline_mpw requirements (%u) are not"
1106 " satisfied (%u) on port %u, try the smaller"
1107 " Tx queue size (%d)",
1108 txq_ctrl->txq.inlen_empw, max_inline,
1109 priv->dev_data->port_id,
1110 priv->sh->device_attr.max_qp_wr);
1113 if (txq_ctrl->txq.tso_en && max_inline < MLX5_MAX_TSO_HEADER) {
1115 "tso header inline requirements (%u) are not"
1116 " satisfied (%u) on port %u, try the smaller"
1117 " Tx queue size (%d)",
1118 MLX5_MAX_TSO_HEADER, max_inline,
1119 priv->dev_data->port_id,
1120 priv->sh->device_attr.max_qp_wr);
1123 if (txq_ctrl->txq.inlen_send > max_inline) {
1125 "adjust txq_inline_max (%u->%u)"
1126 " due to large Tx queue on port %u",
1127 txq_ctrl->txq.inlen_send, max_inline,
1128 priv->dev_data->port_id);
1129 txq_ctrl->txq.inlen_send = max_inline;
1131 if (txq_ctrl->txq.inlen_empw > max_inline) {
1133 "adjust txq_inline_mpw (%u->%u)"
1134 "due to large Tx queue on port %u",
1135 txq_ctrl->txq.inlen_empw, max_inline,
1136 priv->dev_data->port_id);
1137 txq_ctrl->txq.inlen_empw = max_inline;
1139 txq_ctrl->max_inline_data = RTE_MAX(txq_ctrl->txq.inlen_send,
1140 txq_ctrl->txq.inlen_empw);
1141 MLX5_ASSERT(txq_ctrl->max_inline_data <= max_inline);
1142 MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= max_inline);
1143 MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= txq_ctrl->txq.inlen_send);
1144 MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= txq_ctrl->txq.inlen_empw ||
1145 !txq_ctrl->txq.inlen_empw);
1153 * Create a DPDK Tx queue.
1156 * Pointer to Ethernet device.
1160 * Number of descriptors to configure in queue.
1162 * NUMA socket on which memory must be allocated.
1164 * Thresholds parameters.
1167 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
1169 struct mlx5_txq_ctrl *
1170 mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1171 unsigned int socket, const struct rte_eth_txconf *conf)
1173 struct mlx5_priv *priv = dev->data->dev_private;
1174 struct mlx5_txq_ctrl *tmpl;
1176 tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl) +
1177 desc * sizeof(struct rte_mbuf *), 0, socket);
1182 if (mlx5_mr_btree_init(&tmpl->txq.mr_ctrl.cache_bh,
1183 MLX5_MR_BTREE_CACHE_N, socket)) {
1184 /* rte_errno is already set. */
1187 /* Save pointer of global generation number to check memory event. */
1188 tmpl->txq.mr_ctrl.dev_gen_ptr = &priv->sh->share_cache.dev_gen;
1189 MLX5_ASSERT(desc > MLX5_TX_COMP_THRESH);
1190 tmpl->txq.offloads = conf->offloads |
1191 dev->data->dev_conf.txmode.offloads;
1193 tmpl->socket = socket;
1194 tmpl->txq.elts_n = log2above(desc);
1195 tmpl->txq.elts_s = desc;
1196 tmpl->txq.elts_m = desc - 1;
1197 tmpl->txq.port_id = dev->data->port_id;
1198 tmpl->txq.idx = idx;
1199 txq_set_params(tmpl);
1200 if (txq_adjust_params(tmpl))
1202 if (txq_calc_wqebb_cnt(tmpl) >
1203 priv->sh->device_attr.max_qp_wr) {
1205 "port %u Tx WQEBB count (%d) exceeds the limit (%d),"
1206 " try smaller queue size",
1207 dev->data->port_id, txq_calc_wqebb_cnt(tmpl),
1208 priv->sh->device_attr.max_qp_wr);
1212 rte_atomic32_inc(&tmpl->refcnt);
1213 tmpl->type = MLX5_TXQ_TYPE_STANDARD;
1214 LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
1222 * Create a DPDK Tx hairpin queue.
1225 * Pointer to Ethernet device.
1229 * Number of descriptors to configure in queue.
1230 * @param hairpin_conf
1231 * The hairpin configuration.
1234 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
1236 struct mlx5_txq_ctrl *
1237 mlx5_txq_hairpin_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1238 const struct rte_eth_hairpin_conf *hairpin_conf)
1240 struct mlx5_priv *priv = dev->data->dev_private;
1241 struct mlx5_txq_ctrl *tmpl;
1243 tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
1250 tmpl->socket = SOCKET_ID_ANY;
1251 tmpl->txq.elts_n = log2above(desc);
1252 tmpl->txq.port_id = dev->data->port_id;
1253 tmpl->txq.idx = idx;
1254 tmpl->hairpin_conf = *hairpin_conf;
1255 tmpl->type = MLX5_TXQ_TYPE_HAIRPIN;
1256 rte_atomic32_inc(&tmpl->refcnt);
1257 LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
1265 * Pointer to Ethernet device.
1270 * A pointer to the queue if it exists.
1272 struct mlx5_txq_ctrl *
1273 mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx)
1275 struct mlx5_priv *priv = dev->data->dev_private;
1276 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
1277 struct mlx5_txq_ctrl *ctrl = NULL;
1280 ctrl = container_of(txq_data, struct mlx5_txq_ctrl, txq);
1281 rte_atomic32_inc(&ctrl->refcnt);
1287 * Release a Tx queue.
1290 * Pointer to Ethernet device.
1295 * 1 while a reference on it exists, 0 when freed.
1298 mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx)
1300 struct mlx5_priv *priv = dev->data->dev_private;
1301 struct mlx5_txq_ctrl *txq_ctrl;
1303 if (!(*priv->txqs)[idx])
1305 txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
1306 if (!rte_atomic32_dec_and_test(&txq_ctrl->refcnt))
1308 if (txq_ctrl->obj) {
1309 priv->obj_ops.txq_obj_release(txq_ctrl->obj);
1310 LIST_REMOVE(txq_ctrl->obj, next);
1311 mlx5_free(txq_ctrl->obj);
1312 txq_ctrl->obj = NULL;
1314 if (txq_ctrl->type == MLX5_TXQ_TYPE_STANDARD) {
1315 if (txq_ctrl->txq.fcqs) {
1316 mlx5_free(txq_ctrl->txq.fcqs);
1317 txq_ctrl->txq.fcqs = NULL;
1319 txq_free_elts(txq_ctrl);
1320 mlx5_mr_btree_free(&txq_ctrl->txq.mr_ctrl.cache_bh);
1322 LIST_REMOVE(txq_ctrl, next);
1323 mlx5_free(txq_ctrl);
1324 (*priv->txqs)[idx] = NULL;
1325 dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
1330 * Verify if the queue can be released.
1333 * Pointer to Ethernet device.
1338 * 1 if the queue can be released.
1341 mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx)
1343 struct mlx5_priv *priv = dev->data->dev_private;
1344 struct mlx5_txq_ctrl *txq;
1346 if (!(*priv->txqs)[idx])
1348 txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
1349 return (rte_atomic32_read(&txq->refcnt) == 1);
1353 * Verify the Tx Queue list is empty
1356 * Pointer to Ethernet device.
1359 * The number of object not released.
1362 mlx5_txq_verify(struct rte_eth_dev *dev)
1364 struct mlx5_priv *priv = dev->data->dev_private;
1365 struct mlx5_txq_ctrl *txq_ctrl;
1368 LIST_FOREACH(txq_ctrl, &priv->txqsctrl, next) {
1369 DRV_LOG(DEBUG, "port %u Tx queue %u still referenced",
1370 dev->data->port_id, txq_ctrl->txq.idx);
1377 * Set the Tx queue dynamic timestamp (mask and offset)
1380 * Pointer to the Ethernet device structure.
1383 mlx5_txq_dynf_timestamp_set(struct rte_eth_dev *dev)
1385 struct mlx5_priv *priv = dev->data->dev_private;
1386 struct mlx5_dev_ctx_shared *sh = priv->sh;
1387 struct mlx5_txq_data *data;
1392 nbit = rte_mbuf_dynflag_lookup
1393 (RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME, NULL);
1394 off = rte_mbuf_dynfield_lookup
1395 (RTE_MBUF_DYNFIELD_TIMESTAMP_NAME, NULL);
1396 if (nbit > 0 && off >= 0 && sh->txpp.refcnt)
1397 mask = 1ULL << nbit;
1398 for (i = 0; i != priv->txqs_n; ++i) {
1399 data = (*priv->txqs)[i];
1403 data->ts_mask = mask;
1404 data->ts_offset = off;