1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
16 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
18 #pragma GCC diagnostic ignored "-Wpedantic"
20 #include <infiniband/verbs.h>
21 #include <infiniband/mlx5dv.h>
23 #pragma GCC diagnostic error "-Wpedantic"
27 #include <rte_malloc.h>
28 #include <rte_ethdev_driver.h>
29 #include <rte_common.h>
31 #include "mlx5_utils.h"
32 #include "mlx5_defs.h"
34 #include "mlx5_rxtx.h"
35 #include "mlx5_autoconf.h"
36 #include "mlx5_glue.h"
39 * Allocate TX queue elements.
42 * Pointer to TX queue structure.
45 txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl)
47 const unsigned int elts_n = 1 << txq_ctrl->txq.elts_n;
50 for (i = 0; (i != elts_n); ++i)
51 txq_ctrl->txq.elts[i] = NULL;
52 DRV_LOG(DEBUG, "port %u Tx queue %u allocated and configured %u WRs",
53 PORT_ID(txq_ctrl->priv), txq_ctrl->txq.idx, elts_n);
54 txq_ctrl->txq.elts_head = 0;
55 txq_ctrl->txq.elts_tail = 0;
56 txq_ctrl->txq.elts_comp = 0;
60 * Free TX queue elements.
63 * Pointer to TX queue structure.
66 txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl)
68 const uint16_t elts_n = 1 << txq_ctrl->txq.elts_n;
69 const uint16_t elts_m = elts_n - 1;
70 uint16_t elts_head = txq_ctrl->txq.elts_head;
71 uint16_t elts_tail = txq_ctrl->txq.elts_tail;
72 struct rte_mbuf *(*elts)[elts_n] = &txq_ctrl->txq.elts;
74 DRV_LOG(DEBUG, "port %u Tx queue %u freeing WRs",
75 PORT_ID(txq_ctrl->priv), txq_ctrl->txq.idx);
76 txq_ctrl->txq.elts_head = 0;
77 txq_ctrl->txq.elts_tail = 0;
78 txq_ctrl->txq.elts_comp = 0;
80 while (elts_tail != elts_head) {
81 struct rte_mbuf *elt = (*elts)[elts_tail & elts_m];
84 rte_pktmbuf_free_seg(elt);
87 memset(&(*elts)[elts_tail & elts_m],
89 sizeof((*elts)[elts_tail & elts_m]));
96 * Returns the per-port supported offloads.
99 * Pointer to Ethernet device.
102 * Supported Tx offloads.
105 mlx5_get_tx_port_offloads(struct rte_eth_dev *dev)
107 struct mlx5_priv *priv = dev->data->dev_private;
108 uint64_t offloads = (DEV_TX_OFFLOAD_MULTI_SEGS |
109 DEV_TX_OFFLOAD_VLAN_INSERT);
110 struct mlx5_dev_config *config = &priv->config;
113 offloads |= (DEV_TX_OFFLOAD_IPV4_CKSUM |
114 DEV_TX_OFFLOAD_UDP_CKSUM |
115 DEV_TX_OFFLOAD_TCP_CKSUM);
117 offloads |= DEV_TX_OFFLOAD_TCP_TSO;
120 offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
122 offloads |= (DEV_TX_OFFLOAD_IP_TNL_TSO |
123 DEV_TX_OFFLOAD_UDP_TNL_TSO);
125 if (config->tunnel_en) {
127 offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
129 offloads |= (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
130 DEV_TX_OFFLOAD_GRE_TNL_TSO |
131 DEV_TX_OFFLOAD_GENEVE_TNL_TSO);
137 * Tx queue presetup checks.
140 * Pointer to Ethernet device structure.
144 * Number of descriptors to configure in queue.
147 * 0 on success, a negative errno value otherwise and rte_errno is set.
150 mlx5_tx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc)
152 struct mlx5_priv *priv = dev->data->dev_private;
154 if (desc <= MLX5_TX_COMP_THRESH) {
156 "port %u number of descriptors requested for Tx queue"
157 " %u must be higher than MLX5_TX_COMP_THRESH, using %u"
159 dev->data->port_id, idx, MLX5_TX_COMP_THRESH + 1, desc);
160 desc = MLX5_TX_COMP_THRESH + 1;
162 if (!rte_is_power_of_2(desc)) {
163 desc = 1 << log2above(desc);
165 "port %u increased number of descriptors in Tx queue"
166 " %u to the next power of two (%d)",
167 dev->data->port_id, idx, desc);
169 DRV_LOG(DEBUG, "port %u configuring queue %u for %u descriptors",
170 dev->data->port_id, idx, desc);
171 if (idx >= priv->txqs_n) {
172 DRV_LOG(ERR, "port %u Tx queue index out of range (%u >= %u)",
173 dev->data->port_id, idx, priv->txqs_n);
174 rte_errno = EOVERFLOW;
177 if (!mlx5_txq_releasable(dev, idx)) {
179 DRV_LOG(ERR, "port %u unable to release queue index %u",
180 dev->data->port_id, idx);
183 mlx5_txq_release(dev, idx);
187 * DPDK callback to configure a TX queue.
190 * Pointer to Ethernet device structure.
194 * Number of descriptors to configure in queue.
196 * NUMA socket on which memory must be allocated.
198 * Thresholds parameters.
201 * 0 on success, a negative errno value otherwise and rte_errno is set.
204 mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
205 unsigned int socket, const struct rte_eth_txconf *conf)
207 struct mlx5_priv *priv = dev->data->dev_private;
208 struct mlx5_txq_data *txq = (*priv->txqs)[idx];
209 struct mlx5_txq_ctrl *txq_ctrl =
210 container_of(txq, struct mlx5_txq_ctrl, txq);
213 res = mlx5_tx_queue_pre_setup(dev, idx, desc);
216 txq_ctrl = mlx5_txq_new(dev, idx, desc, socket, conf);
218 DRV_LOG(ERR, "port %u unable to allocate queue index %u",
219 dev->data->port_id, idx);
222 DRV_LOG(DEBUG, "port %u adding Tx queue %u to list",
223 dev->data->port_id, idx);
224 (*priv->txqs)[idx] = &txq_ctrl->txq;
229 * DPDK callback to configure a TX hairpin queue.
232 * Pointer to Ethernet device structure.
236 * Number of descriptors to configure in queue.
237 * @param[in] hairpin_conf
238 * The hairpin binding configuration.
241 * 0 on success, a negative errno value otherwise and rte_errno is set.
244 mlx5_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
246 const struct rte_eth_hairpin_conf *hairpin_conf)
248 struct mlx5_priv *priv = dev->data->dev_private;
249 struct mlx5_txq_data *txq = (*priv->txqs)[idx];
250 struct mlx5_txq_ctrl *txq_ctrl =
251 container_of(txq, struct mlx5_txq_ctrl, txq);
254 res = mlx5_tx_queue_pre_setup(dev, idx, desc);
257 if (hairpin_conf->peer_count != 1 ||
258 hairpin_conf->peers[0].port != dev->data->port_id ||
259 hairpin_conf->peers[0].queue >= priv->rxqs_n) {
260 DRV_LOG(ERR, "port %u unable to setup hairpin queue index %u "
261 " invalid hairpind configuration", dev->data->port_id,
266 txq_ctrl = mlx5_txq_hairpin_new(dev, idx, desc, hairpin_conf);
268 DRV_LOG(ERR, "port %u unable to allocate queue index %u",
269 dev->data->port_id, idx);
272 DRV_LOG(DEBUG, "port %u adding Tx queue %u to list",
273 dev->data->port_id, idx);
274 (*priv->txqs)[idx] = &txq_ctrl->txq;
275 txq_ctrl->type = MLX5_TXQ_TYPE_HAIRPIN;
280 * DPDK callback to release a TX queue.
283 * Generic TX queue pointer.
286 mlx5_tx_queue_release(void *dpdk_txq)
288 struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
289 struct mlx5_txq_ctrl *txq_ctrl;
290 struct mlx5_priv *priv;
295 txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
296 priv = txq_ctrl->priv;
297 for (i = 0; (i != priv->txqs_n); ++i)
298 if ((*priv->txqs)[i] == txq) {
299 mlx5_txq_release(ETH_DEV(priv), i);
300 DRV_LOG(DEBUG, "port %u removing Tx queue %u from list",
301 PORT_ID(priv), txq->idx);
307 * Configure the doorbell register non-cached attribute.
310 * Pointer to Tx queue control structure.
315 txq_uar_ncattr_init(struct mlx5_txq_ctrl *txq_ctrl, size_t page_size)
317 struct mlx5_priv *priv = txq_ctrl->priv;
320 txq_ctrl->txq.db_heu = priv->config.dbnc == MLX5_TXDB_HEURISTIC;
321 txq_ctrl->txq.db_nc = 0;
322 /* Check the doorbell register mapping type. */
323 cmd = txq_ctrl->uar_mmap_offset / page_size;
324 cmd >>= MLX5_UAR_MMAP_CMD_SHIFT;
325 cmd &= MLX5_UAR_MMAP_CMD_MASK;
326 if (cmd == MLX5_MMAP_GET_NC_PAGES_CMD)
327 txq_ctrl->txq.db_nc = 1;
331 * Initialize Tx UAR registers for primary process.
334 * Pointer to Tx queue control structure.
337 txq_uar_init(struct mlx5_txq_ctrl *txq_ctrl)
339 struct mlx5_priv *priv = txq_ctrl->priv;
340 struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(priv));
341 const size_t page_size = sysconf(_SC_PAGESIZE);
343 unsigned int lock_idx;
346 if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
348 assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
350 ppriv->uar_table[txq_ctrl->txq.idx] = txq_ctrl->bf_reg;
351 txq_uar_ncattr_init(txq_ctrl, page_size);
353 /* Assign an UAR lock according to UAR page number */
354 lock_idx = (txq_ctrl->uar_mmap_offset / page_size) &
355 MLX5_UAR_PAGE_NUM_MASK;
356 txq_ctrl->txq.uar_lock = &priv->uar_lock[lock_idx];
361 * Remap UAR register of a Tx queue for secondary process.
363 * Remapped address is stored at the table in the process private structure of
364 * the device, indexed by queue index.
367 * Pointer to Tx queue control structure.
369 * Verbs file descriptor to map UAR pages.
372 * 0 on success, a negative errno value otherwise and rte_errno is set.
375 txq_uar_init_secondary(struct mlx5_txq_ctrl *txq_ctrl, int fd)
377 struct mlx5_priv *priv = txq_ctrl->priv;
378 struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(priv));
379 struct mlx5_txq_data *txq = &txq_ctrl->txq;
383 const size_t page_size = sysconf(_SC_PAGESIZE);
385 if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
389 * As rdma-core, UARs are mapped in size of OS page
390 * size. Ref to libmlx5 function: mlx5_init_context()
392 uar_va = (uintptr_t)txq_ctrl->bf_reg;
393 offset = uar_va & (page_size - 1); /* Offset in page. */
394 addr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, fd,
395 txq_ctrl->uar_mmap_offset);
396 if (addr == MAP_FAILED) {
398 "port %u mmap failed for BF reg of txq %u",
399 txq->port_id, txq->idx);
403 addr = RTE_PTR_ADD(addr, offset);
404 ppriv->uar_table[txq->idx] = addr;
405 txq_uar_ncattr_init(txq_ctrl, page_size);
410 * Unmap UAR register of a Tx queue for secondary process.
413 * Pointer to Tx queue control structure.
416 txq_uar_uninit_secondary(struct mlx5_txq_ctrl *txq_ctrl)
418 struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(txq_ctrl->priv));
419 const size_t page_size = sysconf(_SC_PAGESIZE);
422 if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
424 addr = ppriv->uar_table[txq_ctrl->txq.idx];
425 munmap(RTE_PTR_ALIGN_FLOOR(addr, page_size), page_size);
429 * Initialize Tx UAR registers for secondary process.
432 * Pointer to Ethernet device.
434 * Verbs file descriptor to map UAR pages.
437 * 0 on success, a negative errno value otherwise and rte_errno is set.
440 mlx5_tx_uar_init_secondary(struct rte_eth_dev *dev, int fd)
442 struct mlx5_priv *priv = dev->data->dev_private;
443 struct mlx5_txq_data *txq;
444 struct mlx5_txq_ctrl *txq_ctrl;
448 assert(rte_eal_process_type() == RTE_PROC_SECONDARY);
449 for (i = 0; i != priv->txqs_n; ++i) {
450 if (!(*priv->txqs)[i])
452 txq = (*priv->txqs)[i];
453 txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
454 if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
456 assert(txq->idx == (uint16_t)i);
457 ret = txq_uar_init_secondary(txq_ctrl, fd);
465 if (!(*priv->txqs)[i])
467 txq = (*priv->txqs)[i];
468 txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
469 txq_uar_uninit_secondary(txq_ctrl);
475 * Create the Tx hairpin queue object.
478 * Pointer to Ethernet device.
480 * Queue index in DPDK Tx queue array
483 * The hairpin DevX object initialised, NULL otherwise and rte_errno is set.
485 static struct mlx5_txq_obj *
486 mlx5_txq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
488 struct mlx5_priv *priv = dev->data->dev_private;
489 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
490 struct mlx5_txq_ctrl *txq_ctrl =
491 container_of(txq_data, struct mlx5_txq_ctrl, txq);
492 struct mlx5_devx_create_sq_attr attr = { 0 };
493 struct mlx5_txq_obj *tmpl = NULL;
497 assert(!txq_ctrl->obj);
498 tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0,
502 "port %u Tx queue %u cannot allocate memory resources",
503 dev->data->port_id, txq_data->idx);
507 tmpl->type = MLX5_TXQ_OBJ_TYPE_DEVX_HAIRPIN;
508 tmpl->txq_ctrl = txq_ctrl;
511 /* Workaround for hairpin startup */
512 attr.wq_attr.log_hairpin_num_packets = log2above(32);
513 /* Workaround for packets larger than 1KB */
514 attr.wq_attr.log_hairpin_data_sz =
515 priv->config.hca_attr.log_max_hairpin_wq_data_sz;
516 attr.tis_num = priv->sh->tis->id;
517 tmpl->sq = mlx5_devx_cmd_create_sq(priv->sh->ctx, &attr);
520 "port %u tx hairpin queue %u can't create sq object",
521 dev->data->port_id, idx);
525 DRV_LOG(DEBUG, "port %u sxq %u updated with %p", dev->data->port_id,
527 rte_atomic32_inc(&tmpl->refcnt);
528 LIST_INSERT_HEAD(&priv->txqsobj, tmpl, next);
531 ret = rte_errno; /* Save rte_errno before cleanup. */
533 mlx5_devx_cmd_destroy(tmpl->tis);
535 mlx5_devx_cmd_destroy(tmpl->sq);
536 rte_errno = ret; /* Restore rte_errno. */
541 * Create the Tx queue Verbs object.
544 * Pointer to Ethernet device.
546 * Queue index in DPDK Tx queue array.
548 * Type of the Tx queue object to create.
551 * The Verbs object initialised, NULL otherwise and rte_errno is set.
553 struct mlx5_txq_obj *
554 mlx5_txq_obj_new(struct rte_eth_dev *dev, uint16_t idx,
555 enum mlx5_txq_obj_type type)
557 struct mlx5_priv *priv = dev->data->dev_private;
558 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
559 struct mlx5_txq_ctrl *txq_ctrl =
560 container_of(txq_data, struct mlx5_txq_ctrl, txq);
561 struct mlx5_txq_obj tmpl;
562 struct mlx5_txq_obj *txq_obj = NULL;
564 struct ibv_qp_init_attr_ex init;
565 struct ibv_cq_init_attr_ex cq;
566 struct ibv_qp_attr mod;
569 struct mlx5dv_qp qp = { .comp_mask = MLX5DV_QP_MASK_UAR_MMAP_OFFSET };
570 struct mlx5dv_cq cq_info;
571 struct mlx5dv_obj obj;
572 const int desc = 1 << txq_data->elts_n;
575 if (type == MLX5_TXQ_OBJ_TYPE_DEVX_HAIRPIN)
576 return mlx5_txq_obj_hairpin_new(dev, idx);
577 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
578 /* If using DevX, need additional mask to read tisn value. */
579 if (priv->config.devx && !priv->sh->tdn)
580 qp.comp_mask |= MLX5DV_QP_MASK_RAW_QP_HANDLES;
583 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_TX_QUEUE;
584 priv->verbs_alloc_ctx.obj = txq_ctrl;
585 if (mlx5_getenv_int("MLX5_ENABLE_CQE_COMPRESSION")) {
587 "port %u MLX5_ENABLE_CQE_COMPRESSION must never be set",
592 memset(&tmpl, 0, sizeof(struct mlx5_txq_obj));
593 attr.cq = (struct ibv_cq_init_attr_ex){
596 cqe_n = desc / MLX5_TX_COMP_THRESH +
597 1 + MLX5_TX_COMP_THRESH_INLINE_DIV;
598 tmpl.cq = mlx5_glue->create_cq(priv->sh->ctx, cqe_n, NULL, NULL, 0);
599 if (tmpl.cq == NULL) {
600 DRV_LOG(ERR, "port %u Tx queue %u CQ creation failure",
601 dev->data->port_id, idx);
605 attr.init = (struct ibv_qp_init_attr_ex){
606 /* CQ to be associated with the send queue. */
608 /* CQ to be associated with the receive queue. */
611 /* Max number of outstanding WRs. */
613 ((priv->sh->device_attr.orig_attr.max_qp_wr <
615 priv->sh->device_attr.orig_attr.max_qp_wr :
618 * Max number of scatter/gather elements in a WR,
619 * must be 1 to prevent libmlx5 from trying to affect
620 * too much memory. TX gather is not impacted by the
621 * device_attr.max_sge limit and will still work
626 .qp_type = IBV_QPT_RAW_PACKET,
628 * Do *NOT* enable this, completions events are managed per
633 .comp_mask = IBV_QP_INIT_ATTR_PD,
635 if (txq_data->inlen_send)
636 attr.init.cap.max_inline_data = txq_ctrl->max_inline_data;
637 if (txq_data->tso_en) {
638 attr.init.max_tso_header = txq_ctrl->max_tso_header;
639 attr.init.comp_mask |= IBV_QP_INIT_ATTR_MAX_TSO_HEADER;
641 tmpl.qp = mlx5_glue->create_qp_ex(priv->sh->ctx, &attr.init);
642 if (tmpl.qp == NULL) {
643 DRV_LOG(ERR, "port %u Tx queue %u QP creation failure",
644 dev->data->port_id, idx);
648 attr.mod = (struct ibv_qp_attr){
649 /* Move the QP to this state. */
650 .qp_state = IBV_QPS_INIT,
651 /* IB device port number. */
652 .port_num = (uint8_t)priv->ibv_port,
654 ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod,
655 (IBV_QP_STATE | IBV_QP_PORT));
658 "port %u Tx queue %u QP state to IBV_QPS_INIT failed",
659 dev->data->port_id, idx);
663 attr.mod = (struct ibv_qp_attr){
664 .qp_state = IBV_QPS_RTR
666 ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
669 "port %u Tx queue %u QP state to IBV_QPS_RTR failed",
670 dev->data->port_id, idx);
674 attr.mod.qp_state = IBV_QPS_RTS;
675 ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
678 "port %u Tx queue %u QP state to IBV_QPS_RTS failed",
679 dev->data->port_id, idx);
683 txq_obj = rte_calloc_socket(__func__, 1, sizeof(struct mlx5_txq_obj), 0,
686 DRV_LOG(ERR, "port %u Tx queue %u cannot allocate memory",
687 dev->data->port_id, idx);
692 obj.cq.out = &cq_info;
695 ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_QP);
700 if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
702 "port %u wrong MLX5_CQE_SIZE environment variable"
703 " value: it should be set to %u",
704 dev->data->port_id, RTE_CACHE_LINE_SIZE);
708 txq_data->cqe_n = log2above(cq_info.cqe_cnt);
709 txq_data->cqe_s = 1 << txq_data->cqe_n;
710 txq_data->cqe_m = txq_data->cqe_s - 1;
711 txq_data->qp_num_8s = tmpl.qp->qp_num << 8;
712 txq_data->wqes = qp.sq.buf;
713 txq_data->wqe_n = log2above(qp.sq.wqe_cnt);
714 txq_data->wqe_s = 1 << txq_data->wqe_n;
715 txq_data->wqe_m = txq_data->wqe_s - 1;
716 txq_data->wqes_end = txq_data->wqes + txq_data->wqe_s;
717 txq_data->qp_db = &qp.dbrec[MLX5_SND_DBR];
718 txq_data->cq_db = cq_info.dbrec;
719 txq_data->cqes = (volatile struct mlx5_cqe *)cq_info.buf;
724 txq_data->wqe_ci = 0;
725 txq_data->wqe_pi = 0;
726 txq_data->wqe_comp = 0;
727 txq_data->wqe_thres = txq_data->wqe_s / MLX5_TX_COMP_THRESH_INLINE_DIV;
728 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
730 * If using DevX need to query and store TIS transport domain value.
731 * This is done once per port.
732 * Will use this value on Rx, when creating matching TIR.
734 if (priv->config.devx && !priv->sh->tdn) {
735 ret = mlx5_devx_cmd_qp_query_tis_td(tmpl.qp, qp.tisn,
738 DRV_LOG(ERR, "Fail to query port %u Tx queue %u QP TIS "
739 "transport domain", dev->data->port_id, idx);
743 DRV_LOG(DEBUG, "port %u Tx queue %u TIS number %d "
744 "transport domain %d", dev->data->port_id,
745 idx, qp.tisn, priv->sh->tdn);
749 txq_obj->qp = tmpl.qp;
750 txq_obj->cq = tmpl.cq;
751 rte_atomic32_inc(&txq_obj->refcnt);
752 txq_ctrl->bf_reg = qp.bf.reg;
753 if (qp.comp_mask & MLX5DV_QP_MASK_UAR_MMAP_OFFSET) {
754 txq_ctrl->uar_mmap_offset = qp.uar_mmap_offset;
755 DRV_LOG(DEBUG, "port %u: uar_mmap_offset 0x%"PRIx64,
756 dev->data->port_id, txq_ctrl->uar_mmap_offset);
759 "port %u failed to retrieve UAR info, invalid"
765 txq_uar_init(txq_ctrl);
766 LIST_INSERT_HEAD(&priv->txqsobj, txq_obj, next);
767 txq_obj->txq_ctrl = txq_ctrl;
768 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
771 ret = rte_errno; /* Save rte_errno before cleanup. */
773 claim_zero(mlx5_glue->destroy_cq(tmpl.cq));
775 claim_zero(mlx5_glue->destroy_qp(tmpl.qp));
778 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
779 rte_errno = ret; /* Restore rte_errno. */
784 * Get an Tx queue Verbs object.
787 * Pointer to Ethernet device.
789 * Queue index in DPDK Tx queue array.
792 * The Verbs object if it exists.
794 struct mlx5_txq_obj *
795 mlx5_txq_obj_get(struct rte_eth_dev *dev, uint16_t idx)
797 struct mlx5_priv *priv = dev->data->dev_private;
798 struct mlx5_txq_ctrl *txq_ctrl;
800 if (idx >= priv->txqs_n)
802 if (!(*priv->txqs)[idx])
804 txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
806 rte_atomic32_inc(&txq_ctrl->obj->refcnt);
807 return txq_ctrl->obj;
811 * Release an Tx verbs queue object.
814 * Verbs Tx queue object.
817 * 1 while a reference on it exists, 0 when freed.
820 mlx5_txq_obj_release(struct mlx5_txq_obj *txq_obj)
823 if (rte_atomic32_dec_and_test(&txq_obj->refcnt)) {
824 if (txq_obj->type == MLX5_TXQ_OBJ_TYPE_DEVX_HAIRPIN) {
826 claim_zero(mlx5_devx_cmd_destroy(txq_obj->tis));
828 claim_zero(mlx5_glue->destroy_qp(txq_obj->qp));
829 claim_zero(mlx5_glue->destroy_cq(txq_obj->cq));
831 LIST_REMOVE(txq_obj, next);
839 * Verify the Verbs Tx queue list is empty
842 * Pointer to Ethernet device.
845 * The number of object not released.
848 mlx5_txq_obj_verify(struct rte_eth_dev *dev)
850 struct mlx5_priv *priv = dev->data->dev_private;
852 struct mlx5_txq_obj *txq_obj;
854 LIST_FOREACH(txq_obj, &priv->txqsobj, next) {
855 DRV_LOG(DEBUG, "port %u Verbs Tx queue %u still referenced",
856 dev->data->port_id, txq_obj->txq_ctrl->txq.idx);
863 * Calculate the total number of WQEBB for Tx queue.
865 * Simplified version of calc_sq_size() in rdma-core.
868 * Pointer to Tx queue control structure.
871 * The number of WQEBB.
874 txq_calc_wqebb_cnt(struct mlx5_txq_ctrl *txq_ctrl)
876 unsigned int wqe_size;
877 const unsigned int desc = 1 << txq_ctrl->txq.elts_n;
879 wqe_size = MLX5_WQE_CSEG_SIZE +
882 MLX5_ESEG_MIN_INLINE_SIZE +
883 txq_ctrl->max_inline_data;
884 return rte_align32pow2(wqe_size * desc) / MLX5_WQE_SIZE;
888 * Calculate the maximal inline data size for Tx queue.
891 * Pointer to Tx queue control structure.
894 * The maximal inline data size.
897 txq_calc_inline_max(struct mlx5_txq_ctrl *txq_ctrl)
899 const unsigned int desc = 1 << txq_ctrl->txq.elts_n;
900 struct mlx5_priv *priv = txq_ctrl->priv;
901 unsigned int wqe_size;
903 wqe_size = priv->sh->device_attr.orig_attr.max_qp_wr / desc;
907 * This calculation is derived from tthe source of
908 * mlx5_calc_send_wqe() in rdma_core library.
910 wqe_size = wqe_size * MLX5_WQE_SIZE -
915 MLX5_DSEG_MIN_INLINE_SIZE;
920 * Set Tx queue parameters from device configuration.
923 * Pointer to Tx queue control structure.
926 txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
928 struct mlx5_priv *priv = txq_ctrl->priv;
929 struct mlx5_dev_config *config = &priv->config;
930 unsigned int inlen_send; /* Inline data for ordinary SEND.*/
931 unsigned int inlen_empw; /* Inline data for enhanced MPW. */
932 unsigned int inlen_mode; /* Minimal required Inline data. */
933 unsigned int txqs_inline; /* Min Tx queues to enable inline. */
934 uint64_t dev_txoff = priv->dev_data->dev_conf.txmode.offloads;
935 bool tso = txq_ctrl->txq.offloads & (DEV_TX_OFFLOAD_TCP_TSO |
936 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
937 DEV_TX_OFFLOAD_GRE_TNL_TSO |
938 DEV_TX_OFFLOAD_IP_TNL_TSO |
939 DEV_TX_OFFLOAD_UDP_TNL_TSO);
943 if (config->txqs_inline == MLX5_ARG_UNSET)
945 #if defined(RTE_ARCH_ARM64)
946 (priv->pci_dev->id.device_id ==
947 PCI_DEVICE_ID_MELLANOX_CONNECTX5BF) ?
948 MLX5_INLINE_MAX_TXQS_BLUEFIELD :
950 MLX5_INLINE_MAX_TXQS;
952 txqs_inline = (unsigned int)config->txqs_inline;
953 inlen_send = (config->txq_inline_max == MLX5_ARG_UNSET) ?
954 MLX5_SEND_DEF_INLINE_LEN :
955 (unsigned int)config->txq_inline_max;
956 inlen_empw = (config->txq_inline_mpw == MLX5_ARG_UNSET) ?
957 MLX5_EMPW_DEF_INLINE_LEN :
958 (unsigned int)config->txq_inline_mpw;
959 inlen_mode = (config->txq_inline_min == MLX5_ARG_UNSET) ?
960 0 : (unsigned int)config->txq_inline_min;
961 if (config->mps != MLX5_MPW_ENHANCED && config->mps != MLX5_MPW)
964 * If there is requested minimal amount of data to inline
965 * we MUST enable inlining. This is a case for ConnectX-4
966 * which usually requires L2 inlined for correct operating
967 * and ConnectX-4LX which requires L2-L4 inlined to
968 * support E-Switch Flows.
971 if (inlen_mode <= MLX5_ESEG_MIN_INLINE_SIZE) {
973 * Optimize minimal inlining for single
974 * segment packets to fill one WQEBB
977 temp = MLX5_ESEG_MIN_INLINE_SIZE;
979 temp = inlen_mode - MLX5_ESEG_MIN_INLINE_SIZE;
980 temp = RTE_ALIGN(temp, MLX5_WSEG_SIZE) +
981 MLX5_ESEG_MIN_INLINE_SIZE;
982 temp = RTE_MIN(temp, MLX5_SEND_MAX_INLINE_LEN);
984 if (temp != inlen_mode) {
986 "port %u minimal required inline setting"
987 " aligned from %u to %u",
988 PORT_ID(priv), inlen_mode, temp);
993 * If port is configured to support VLAN insertion and device
994 * does not support this feature by HW (for NICs before ConnectX-5
995 * or in case of wqe_vlan_insert flag is not set) we must enable
996 * data inline on all queues because it is supported by single
999 txq_ctrl->txq.vlan_en = config->hw_vlan_insert;
1000 vlan_inline = (dev_txoff & DEV_TX_OFFLOAD_VLAN_INSERT) &&
1001 !config->hw_vlan_insert;
1003 * If there are few Tx queues it is prioritized
1004 * to save CPU cycles and disable data inlining at all.
1006 if (inlen_send && priv->txqs_n >= txqs_inline) {
1008 * The data sent with ordinal MLX5_OPCODE_SEND
1009 * may be inlined in Ethernet Segment, align the
1010 * length accordingly to fit entire WQEBBs.
1012 temp = RTE_MAX(inlen_send,
1013 MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE);
1014 temp -= MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE;
1015 temp = RTE_ALIGN(temp, MLX5_WQE_SIZE);
1016 temp += MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE;
1017 temp = RTE_MIN(temp, MLX5_WQE_SIZE_MAX +
1018 MLX5_ESEG_MIN_INLINE_SIZE -
1019 MLX5_WQE_CSEG_SIZE -
1020 MLX5_WQE_ESEG_SIZE -
1021 MLX5_WQE_DSEG_SIZE * 2);
1022 temp = RTE_MIN(temp, MLX5_SEND_MAX_INLINE_LEN);
1023 temp = RTE_MAX(temp, inlen_mode);
1024 if (temp != inlen_send) {
1026 "port %u ordinary send inline setting"
1027 " aligned from %u to %u",
1028 PORT_ID(priv), inlen_send, temp);
1032 * Not aligned to cache lines, but to WQEs.
1033 * First bytes of data (initial alignment)
1034 * is going to be copied explicitly at the
1035 * beginning of inlining buffer in Ethernet
1038 assert(inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
1039 assert(inlen_send <= MLX5_WQE_SIZE_MAX +
1040 MLX5_ESEG_MIN_INLINE_SIZE -
1041 MLX5_WQE_CSEG_SIZE -
1042 MLX5_WQE_ESEG_SIZE -
1043 MLX5_WQE_DSEG_SIZE * 2);
1044 } else if (inlen_mode) {
1046 * If minimal inlining is requested we must
1047 * enable inlining in general, despite the
1048 * number of configured queues. Ignore the
1049 * txq_inline_max devarg, this is not
1050 * full-featured inline.
1052 inlen_send = inlen_mode;
1054 } else if (vlan_inline) {
1056 * Hardware does not report offload for
1057 * VLAN insertion, we must enable data inline
1058 * to implement feature by software.
1060 inlen_send = MLX5_ESEG_MIN_INLINE_SIZE;
1066 txq_ctrl->txq.inlen_send = inlen_send;
1067 txq_ctrl->txq.inlen_mode = inlen_mode;
1068 txq_ctrl->txq.inlen_empw = 0;
1069 if (inlen_send && inlen_empw && priv->txqs_n >= txqs_inline) {
1071 * The data sent with MLX5_OPCODE_ENHANCED_MPSW
1072 * may be inlined in Data Segment, align the
1073 * length accordingly to fit entire WQEBBs.
1075 temp = RTE_MAX(inlen_empw,
1076 MLX5_WQE_SIZE + MLX5_DSEG_MIN_INLINE_SIZE);
1077 temp -= MLX5_DSEG_MIN_INLINE_SIZE;
1078 temp = RTE_ALIGN(temp, MLX5_WQE_SIZE);
1079 temp += MLX5_DSEG_MIN_INLINE_SIZE;
1080 temp = RTE_MIN(temp, MLX5_WQE_SIZE_MAX +
1081 MLX5_DSEG_MIN_INLINE_SIZE -
1082 MLX5_WQE_CSEG_SIZE -
1083 MLX5_WQE_ESEG_SIZE -
1084 MLX5_WQE_DSEG_SIZE);
1085 temp = RTE_MIN(temp, MLX5_EMPW_MAX_INLINE_LEN);
1086 if (temp != inlen_empw) {
1088 "port %u enhanced empw inline setting"
1089 " aligned from %u to %u",
1090 PORT_ID(priv), inlen_empw, temp);
1093 assert(inlen_empw >= MLX5_ESEG_MIN_INLINE_SIZE);
1094 assert(inlen_empw <= MLX5_WQE_SIZE_MAX +
1095 MLX5_DSEG_MIN_INLINE_SIZE -
1096 MLX5_WQE_CSEG_SIZE -
1097 MLX5_WQE_ESEG_SIZE -
1098 MLX5_WQE_DSEG_SIZE);
1099 txq_ctrl->txq.inlen_empw = inlen_empw;
1101 txq_ctrl->max_inline_data = RTE_MAX(inlen_send, inlen_empw);
1103 txq_ctrl->max_tso_header = MLX5_MAX_TSO_HEADER;
1104 txq_ctrl->max_inline_data = RTE_MAX(txq_ctrl->max_inline_data,
1105 MLX5_MAX_TSO_HEADER);
1106 txq_ctrl->txq.tso_en = 1;
1108 txq_ctrl->txq.tunnel_en = config->tunnel_en | config->swp;
1109 txq_ctrl->txq.swp_en = ((DEV_TX_OFFLOAD_IP_TNL_TSO |
1110 DEV_TX_OFFLOAD_UDP_TNL_TSO |
1111 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) &
1112 txq_ctrl->txq.offloads) && config->swp;
1116 * Adjust Tx queue data inline parameters for large queue sizes.
1117 * The data inline feature requires multiple WQEs to fit the packets,
1118 * and if the large amount of Tx descriptors is requested by application
1119 * the total WQE amount may exceed the hardware capabilities. If the
1120 * default inline setting are used we can try to adjust these ones and
1121 * meet the hardware requirements and not exceed the queue size.
1124 * Pointer to Tx queue control structure.
1127 * Zero on success, otherwise the parameters can not be adjusted.
1130 txq_adjust_params(struct mlx5_txq_ctrl *txq_ctrl)
1132 struct mlx5_priv *priv = txq_ctrl->priv;
1133 struct mlx5_dev_config *config = &priv->config;
1134 unsigned int max_inline;
1136 max_inline = txq_calc_inline_max(txq_ctrl);
1137 if (!txq_ctrl->txq.inlen_send) {
1139 * Inline data feature is not engaged at all.
1140 * There is nothing to adjust.
1144 if (txq_ctrl->max_inline_data <= max_inline) {
1146 * The requested inline data length does not
1147 * exceed queue capabilities.
1151 if (txq_ctrl->txq.inlen_mode > max_inline) {
1153 "minimal data inline requirements (%u) are not"
1154 " satisfied (%u) on port %u, try the smaller"
1155 " Tx queue size (%d)",
1156 txq_ctrl->txq.inlen_mode, max_inline,
1157 priv->dev_data->port_id,
1158 priv->sh->device_attr.orig_attr.max_qp_wr);
1161 if (txq_ctrl->txq.inlen_send > max_inline &&
1162 config->txq_inline_max != MLX5_ARG_UNSET &&
1163 config->txq_inline_max > (int)max_inline) {
1165 "txq_inline_max requirements (%u) are not"
1166 " satisfied (%u) on port %u, try the smaller"
1167 " Tx queue size (%d)",
1168 txq_ctrl->txq.inlen_send, max_inline,
1169 priv->dev_data->port_id,
1170 priv->sh->device_attr.orig_attr.max_qp_wr);
1173 if (txq_ctrl->txq.inlen_empw > max_inline &&
1174 config->txq_inline_mpw != MLX5_ARG_UNSET &&
1175 config->txq_inline_mpw > (int)max_inline) {
1177 "txq_inline_mpw requirements (%u) are not"
1178 " satisfied (%u) on port %u, try the smaller"
1179 " Tx queue size (%d)",
1180 txq_ctrl->txq.inlen_empw, max_inline,
1181 priv->dev_data->port_id,
1182 priv->sh->device_attr.orig_attr.max_qp_wr);
1185 if (txq_ctrl->txq.tso_en && max_inline < MLX5_MAX_TSO_HEADER) {
1187 "tso header inline requirements (%u) are not"
1188 " satisfied (%u) on port %u, try the smaller"
1189 " Tx queue size (%d)",
1190 MLX5_MAX_TSO_HEADER, max_inline,
1191 priv->dev_data->port_id,
1192 priv->sh->device_attr.orig_attr.max_qp_wr);
1195 if (txq_ctrl->txq.inlen_send > max_inline) {
1197 "adjust txq_inline_max (%u->%u)"
1198 " due to large Tx queue on port %u",
1199 txq_ctrl->txq.inlen_send, max_inline,
1200 priv->dev_data->port_id);
1201 txq_ctrl->txq.inlen_send = max_inline;
1203 if (txq_ctrl->txq.inlen_empw > max_inline) {
1205 "adjust txq_inline_mpw (%u->%u)"
1206 "due to large Tx queue on port %u",
1207 txq_ctrl->txq.inlen_empw, max_inline,
1208 priv->dev_data->port_id);
1209 txq_ctrl->txq.inlen_empw = max_inline;
1211 txq_ctrl->max_inline_data = RTE_MAX(txq_ctrl->txq.inlen_send,
1212 txq_ctrl->txq.inlen_empw);
1213 assert(txq_ctrl->max_inline_data <= max_inline);
1214 assert(txq_ctrl->txq.inlen_mode <= max_inline);
1215 assert(txq_ctrl->txq.inlen_mode <= txq_ctrl->txq.inlen_send);
1216 assert(txq_ctrl->txq.inlen_mode <= txq_ctrl->txq.inlen_empw ||
1217 !txq_ctrl->txq.inlen_empw);
1225 * Create a DPDK Tx queue.
1228 * Pointer to Ethernet device.
1232 * Number of descriptors to configure in queue.
1234 * NUMA socket on which memory must be allocated.
1236 * Thresholds parameters.
1239 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
1241 struct mlx5_txq_ctrl *
1242 mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1243 unsigned int socket, const struct rte_eth_txconf *conf)
1245 struct mlx5_priv *priv = dev->data->dev_private;
1246 struct mlx5_txq_ctrl *tmpl;
1248 tmpl = rte_calloc_socket("TXQ", 1,
1250 desc * sizeof(struct rte_mbuf *),
1256 if (mlx5_mr_btree_init(&tmpl->txq.mr_ctrl.cache_bh,
1257 MLX5_MR_BTREE_CACHE_N, socket)) {
1258 /* rte_errno is already set. */
1261 /* Save pointer of global generation number to check memory event. */
1262 tmpl->txq.mr_ctrl.dev_gen_ptr = &priv->sh->mr.dev_gen;
1263 assert(desc > MLX5_TX_COMP_THRESH);
1264 tmpl->txq.offloads = conf->offloads |
1265 dev->data->dev_conf.txmode.offloads;
1267 tmpl->socket = socket;
1268 tmpl->txq.elts_n = log2above(desc);
1269 tmpl->txq.elts_s = desc;
1270 tmpl->txq.elts_m = desc - 1;
1271 tmpl->txq.port_id = dev->data->port_id;
1272 tmpl->txq.idx = idx;
1273 txq_set_params(tmpl);
1274 if (txq_adjust_params(tmpl))
1276 if (txq_calc_wqebb_cnt(tmpl) >
1277 priv->sh->device_attr.orig_attr.max_qp_wr) {
1279 "port %u Tx WQEBB count (%d) exceeds the limit (%d),"
1280 " try smaller queue size",
1281 dev->data->port_id, txq_calc_wqebb_cnt(tmpl),
1282 priv->sh->device_attr.orig_attr.max_qp_wr);
1286 rte_atomic32_inc(&tmpl->refcnt);
1287 tmpl->type = MLX5_TXQ_TYPE_STANDARD;
1288 LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
1296 * Create a DPDK Tx hairpin queue.
1299 * Pointer to Ethernet device.
1303 * Number of descriptors to configure in queue.
1304 * @param hairpin_conf
1305 * The hairpin configuration.
1308 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
1310 struct mlx5_txq_ctrl *
1311 mlx5_txq_hairpin_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1312 const struct rte_eth_hairpin_conf *hairpin_conf)
1314 struct mlx5_priv *priv = dev->data->dev_private;
1315 struct mlx5_txq_ctrl *tmpl;
1317 tmpl = rte_calloc_socket("TXQ", 1,
1318 sizeof(*tmpl), 0, SOCKET_ID_ANY);
1324 tmpl->socket = SOCKET_ID_ANY;
1325 tmpl->txq.elts_n = log2above(desc);
1326 tmpl->txq.port_id = dev->data->port_id;
1327 tmpl->txq.idx = idx;
1328 tmpl->hairpin_conf = *hairpin_conf;
1329 tmpl->type = MLX5_TXQ_TYPE_HAIRPIN;
1330 rte_atomic32_inc(&tmpl->refcnt);
1331 LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
1339 * Pointer to Ethernet device.
1344 * A pointer to the queue if it exists.
1346 struct mlx5_txq_ctrl *
1347 mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx)
1349 struct mlx5_priv *priv = dev->data->dev_private;
1350 struct mlx5_txq_ctrl *ctrl = NULL;
1352 if ((*priv->txqs)[idx]) {
1353 ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl,
1355 mlx5_txq_obj_get(dev, idx);
1356 rte_atomic32_inc(&ctrl->refcnt);
1362 * Release a Tx queue.
1365 * Pointer to Ethernet device.
1370 * 1 while a reference on it exists, 0 when freed.
1373 mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx)
1375 struct mlx5_priv *priv = dev->data->dev_private;
1376 struct mlx5_txq_ctrl *txq;
1378 if (!(*priv->txqs)[idx])
1380 txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
1381 if (txq->obj && !mlx5_txq_obj_release(txq->obj))
1383 if (rte_atomic32_dec_and_test(&txq->refcnt)) {
1385 mlx5_mr_btree_free(&txq->txq.mr_ctrl.cache_bh);
1386 LIST_REMOVE(txq, next);
1388 (*priv->txqs)[idx] = NULL;
1395 * Verify if the queue can be released.
1398 * Pointer to Ethernet device.
1403 * 1 if the queue can be released.
1406 mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx)
1408 struct mlx5_priv *priv = dev->data->dev_private;
1409 struct mlx5_txq_ctrl *txq;
1411 if (!(*priv->txqs)[idx])
1413 txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
1414 return (rte_atomic32_read(&txq->refcnt) == 1);
1418 * Verify the Tx Queue list is empty
1421 * Pointer to Ethernet device.
1424 * The number of object not released.
1427 mlx5_txq_verify(struct rte_eth_dev *dev)
1429 struct mlx5_priv *priv = dev->data->dev_private;
1430 struct mlx5_txq_ctrl *txq_ctrl;
1433 LIST_FOREACH(txq_ctrl, &priv->txqsctrl, next) {
1434 DRV_LOG(DEBUG, "port %u Tx queue %u still referenced",
1435 dev->data->port_id, txq_ctrl->txq.idx);