1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
16 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
18 #pragma GCC diagnostic ignored "-Wpedantic"
20 #include <infiniband/verbs.h>
22 #pragma GCC diagnostic error "-Wpedantic"
26 #include <rte_malloc.h>
27 #include <rte_ethdev_driver.h>
28 #include <rte_common.h>
30 #include "mlx5_utils.h"
31 #include "mlx5_defs.h"
33 #include "mlx5_rxtx.h"
34 #include "mlx5_autoconf.h"
35 #include "mlx5_glue.h"
38 * Allocate TX queue elements.
41 * Pointer to TX queue structure.
44 txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl)
46 const unsigned int elts_n = 1 << txq_ctrl->txq.elts_n;
49 for (i = 0; (i != elts_n); ++i)
50 txq_ctrl->txq.elts[i] = NULL;
51 DRV_LOG(DEBUG, "port %u Tx queue %u allocated and configured %u WRs",
52 PORT_ID(txq_ctrl->priv), txq_ctrl->txq.idx, elts_n);
53 txq_ctrl->txq.elts_head = 0;
54 txq_ctrl->txq.elts_tail = 0;
55 txq_ctrl->txq.elts_comp = 0;
59 * Free TX queue elements.
62 * Pointer to TX queue structure.
65 txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl)
67 const uint16_t elts_n = 1 << txq_ctrl->txq.elts_n;
68 const uint16_t elts_m = elts_n - 1;
69 uint16_t elts_head = txq_ctrl->txq.elts_head;
70 uint16_t elts_tail = txq_ctrl->txq.elts_tail;
71 struct rte_mbuf *(*elts)[elts_n] = &txq_ctrl->txq.elts;
73 DRV_LOG(DEBUG, "port %u Tx queue %u freeing WRs",
74 PORT_ID(txq_ctrl->priv), txq_ctrl->txq.idx);
75 txq_ctrl->txq.elts_head = 0;
76 txq_ctrl->txq.elts_tail = 0;
77 txq_ctrl->txq.elts_comp = 0;
79 while (elts_tail != elts_head) {
80 struct rte_mbuf *elt = (*elts)[elts_tail & elts_m];
83 rte_pktmbuf_free_seg(elt);
86 memset(&(*elts)[elts_tail & elts_m],
88 sizeof((*elts)[elts_tail & elts_m]));
95 * Returns the per-port supported offloads.
98 * Pointer to Ethernet device.
101 * Supported Tx offloads.
104 mlx5_get_tx_port_offloads(struct rte_eth_dev *dev)
106 struct mlx5_priv *priv = dev->data->dev_private;
107 uint64_t offloads = (DEV_TX_OFFLOAD_MULTI_SEGS |
108 DEV_TX_OFFLOAD_VLAN_INSERT);
109 struct mlx5_dev_config *config = &priv->config;
112 offloads |= (DEV_TX_OFFLOAD_IPV4_CKSUM |
113 DEV_TX_OFFLOAD_UDP_CKSUM |
114 DEV_TX_OFFLOAD_TCP_CKSUM);
116 offloads |= DEV_TX_OFFLOAD_TCP_TSO;
119 offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
121 offloads |= (DEV_TX_OFFLOAD_IP_TNL_TSO |
122 DEV_TX_OFFLOAD_UDP_TNL_TSO);
124 if (config->tunnel_en) {
126 offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
128 offloads |= (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
129 DEV_TX_OFFLOAD_GRE_TNL_TSO);
135 * Tx queue presetup checks.
138 * Pointer to Ethernet device structure.
142 * Number of descriptors to configure in queue.
145 * 0 on success, a negative errno value otherwise and rte_errno is set.
148 mlx5_tx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc)
150 struct mlx5_priv *priv = dev->data->dev_private;
152 if (desc <= MLX5_TX_COMP_THRESH) {
154 "port %u number of descriptors requested for Tx queue"
155 " %u must be higher than MLX5_TX_COMP_THRESH, using %u"
157 dev->data->port_id, idx, MLX5_TX_COMP_THRESH + 1, desc);
158 desc = MLX5_TX_COMP_THRESH + 1;
160 if (!rte_is_power_of_2(desc)) {
161 desc = 1 << log2above(desc);
163 "port %u increased number of descriptors in Tx queue"
164 " %u to the next power of two (%d)",
165 dev->data->port_id, idx, desc);
167 DRV_LOG(DEBUG, "port %u configuring queue %u for %u descriptors",
168 dev->data->port_id, idx, desc);
169 if (idx >= priv->txqs_n) {
170 DRV_LOG(ERR, "port %u Tx queue index out of range (%u >= %u)",
171 dev->data->port_id, idx, priv->txqs_n);
172 rte_errno = EOVERFLOW;
175 if (!mlx5_txq_releasable(dev, idx)) {
177 DRV_LOG(ERR, "port %u unable to release queue index %u",
178 dev->data->port_id, idx);
181 mlx5_txq_release(dev, idx);
185 * DPDK callback to configure a TX queue.
188 * Pointer to Ethernet device structure.
192 * Number of descriptors to configure in queue.
194 * NUMA socket on which memory must be allocated.
196 * Thresholds parameters.
199 * 0 on success, a negative errno value otherwise and rte_errno is set.
202 mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
203 unsigned int socket, const struct rte_eth_txconf *conf)
205 struct mlx5_priv *priv = dev->data->dev_private;
206 struct mlx5_txq_data *txq = (*priv->txqs)[idx];
207 struct mlx5_txq_ctrl *txq_ctrl =
208 container_of(txq, struct mlx5_txq_ctrl, txq);
211 res = mlx5_tx_queue_pre_setup(dev, idx, desc);
214 txq_ctrl = mlx5_txq_new(dev, idx, desc, socket, conf);
216 DRV_LOG(ERR, "port %u unable to allocate queue index %u",
217 dev->data->port_id, idx);
220 DRV_LOG(DEBUG, "port %u adding Tx queue %u to list",
221 dev->data->port_id, idx);
222 (*priv->txqs)[idx] = &txq_ctrl->txq;
227 * DPDK callback to configure a TX hairpin queue.
230 * Pointer to Ethernet device structure.
234 * Number of descriptors to configure in queue.
235 * @param[in] hairpin_conf
236 * The hairpin binding configuration.
239 * 0 on success, a negative errno value otherwise and rte_errno is set.
242 mlx5_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
244 const struct rte_eth_hairpin_conf *hairpin_conf)
246 struct mlx5_priv *priv = dev->data->dev_private;
247 struct mlx5_txq_data *txq = (*priv->txqs)[idx];
248 struct mlx5_txq_ctrl *txq_ctrl =
249 container_of(txq, struct mlx5_txq_ctrl, txq);
252 res = mlx5_tx_queue_pre_setup(dev, idx, desc);
255 if (hairpin_conf->peer_count != 1 ||
256 hairpin_conf->peers[0].port != dev->data->port_id ||
257 hairpin_conf->peers[0].queue >= priv->rxqs_n) {
258 DRV_LOG(ERR, "port %u unable to setup hairpin queue index %u "
259 " invalid hairpind configuration", dev->data->port_id,
264 txq_ctrl = mlx5_txq_hairpin_new(dev, idx, desc, hairpin_conf);
266 DRV_LOG(ERR, "port %u unable to allocate queue index %u",
267 dev->data->port_id, idx);
270 DRV_LOG(DEBUG, "port %u adding Tx queue %u to list",
271 dev->data->port_id, idx);
272 (*priv->txqs)[idx] = &txq_ctrl->txq;
273 txq_ctrl->type = MLX5_TXQ_TYPE_HAIRPIN;
278 * DPDK callback to release a TX queue.
281 * Generic TX queue pointer.
284 mlx5_tx_queue_release(void *dpdk_txq)
286 struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
287 struct mlx5_txq_ctrl *txq_ctrl;
288 struct mlx5_priv *priv;
293 txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
294 priv = txq_ctrl->priv;
295 for (i = 0; (i != priv->txqs_n); ++i)
296 if ((*priv->txqs)[i] == txq) {
297 mlx5_txq_release(ETH_DEV(priv), i);
298 DRV_LOG(DEBUG, "port %u removing Tx queue %u from list",
299 PORT_ID(priv), txq->idx);
305 * Initialize Tx UAR registers for primary process.
308 * Pointer to Tx queue control structure.
311 txq_uar_init(struct mlx5_txq_ctrl *txq_ctrl)
313 struct mlx5_priv *priv = txq_ctrl->priv;
314 struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(priv));
316 unsigned int lock_idx;
317 const size_t page_size = sysconf(_SC_PAGESIZE);
320 if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
322 assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
324 ppriv->uar_table[txq_ctrl->txq.idx] = txq_ctrl->bf_reg;
326 /* Assign an UAR lock according to UAR page number */
327 lock_idx = (txq_ctrl->uar_mmap_offset / page_size) &
328 MLX5_UAR_PAGE_NUM_MASK;
329 txq_ctrl->txq.uar_lock = &priv->uar_lock[lock_idx];
334 * Remap UAR register of a Tx queue for secondary process.
336 * Remapped address is stored at the table in the process private structure of
337 * the device, indexed by queue index.
340 * Pointer to Tx queue control structure.
342 * Verbs file descriptor to map UAR pages.
345 * 0 on success, a negative errno value otherwise and rte_errno is set.
348 txq_uar_init_secondary(struct mlx5_txq_ctrl *txq_ctrl, int fd)
350 struct mlx5_priv *priv = txq_ctrl->priv;
351 struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(priv));
352 struct mlx5_txq_data *txq = &txq_ctrl->txq;
356 const size_t page_size = sysconf(_SC_PAGESIZE);
358 if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
362 * As rdma-core, UARs are mapped in size of OS page
363 * size. Ref to libmlx5 function: mlx5_init_context()
365 uar_va = (uintptr_t)txq_ctrl->bf_reg;
366 offset = uar_va & (page_size - 1); /* Offset in page. */
367 addr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, fd,
368 txq_ctrl->uar_mmap_offset);
369 if (addr == MAP_FAILED) {
371 "port %u mmap failed for BF reg of txq %u",
372 txq->port_id, txq->idx);
376 addr = RTE_PTR_ADD(addr, offset);
377 ppriv->uar_table[txq->idx] = addr;
382 * Unmap UAR register of a Tx queue for secondary process.
385 * Pointer to Tx queue control structure.
388 txq_uar_uninit_secondary(struct mlx5_txq_ctrl *txq_ctrl)
390 struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(txq_ctrl->priv));
391 const size_t page_size = sysconf(_SC_PAGESIZE);
394 if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
396 addr = ppriv->uar_table[txq_ctrl->txq.idx];
397 munmap(RTE_PTR_ALIGN_FLOOR(addr, page_size), page_size);
401 * Initialize Tx UAR registers for secondary process.
404 * Pointer to Ethernet device.
406 * Verbs file descriptor to map UAR pages.
409 * 0 on success, a negative errno value otherwise and rte_errno is set.
412 mlx5_tx_uar_init_secondary(struct rte_eth_dev *dev, int fd)
414 struct mlx5_priv *priv = dev->data->dev_private;
415 struct mlx5_txq_data *txq;
416 struct mlx5_txq_ctrl *txq_ctrl;
420 assert(rte_eal_process_type() == RTE_PROC_SECONDARY);
421 for (i = 0; i != priv->txqs_n; ++i) {
422 if (!(*priv->txqs)[i])
424 txq = (*priv->txqs)[i];
425 txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
426 if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
428 assert(txq->idx == (uint16_t)i);
429 ret = txq_uar_init_secondary(txq_ctrl, fd);
437 if (!(*priv->txqs)[i])
439 txq = (*priv->txqs)[i];
440 txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
441 txq_uar_uninit_secondary(txq_ctrl);
447 * Create the Tx hairpin queue object.
450 * Pointer to Ethernet device.
452 * Queue index in DPDK Tx queue array
455 * The hairpin DevX object initialised, NULL otherwise and rte_errno is set.
457 static struct mlx5_txq_obj *
458 mlx5_txq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
460 struct mlx5_priv *priv = dev->data->dev_private;
461 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
462 struct mlx5_txq_ctrl *txq_ctrl =
463 container_of(txq_data, struct mlx5_txq_ctrl, txq);
464 struct mlx5_devx_create_sq_attr attr = { 0 };
465 struct mlx5_txq_obj *tmpl = NULL;
469 assert(!txq_ctrl->obj);
470 tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0,
474 "port %u Tx queue %u cannot allocate memory resources",
475 dev->data->port_id, txq_data->idx);
479 tmpl->type = MLX5_TXQ_OBJ_TYPE_DEVX_HAIRPIN;
480 tmpl->txq_ctrl = txq_ctrl;
483 /* Workaround for hairpin startup */
484 attr.wq_attr.log_hairpin_num_packets = log2above(32);
485 /* Workaround for packets larger than 1KB */
486 attr.wq_attr.log_hairpin_data_sz =
487 priv->config.hca_attr.log_max_hairpin_wq_data_sz;
488 attr.tis_num = priv->sh->tis->id;
489 tmpl->sq = mlx5_devx_cmd_create_sq(priv->sh->ctx, &attr);
492 "port %u tx hairpin queue %u can't create sq object",
493 dev->data->port_id, idx);
497 DRV_LOG(DEBUG, "port %u sxq %u updated with %p", dev->data->port_id,
499 rte_atomic32_inc(&tmpl->refcnt);
500 LIST_INSERT_HEAD(&priv->txqsobj, tmpl, next);
503 ret = rte_errno; /* Save rte_errno before cleanup. */
505 mlx5_devx_cmd_destroy(tmpl->tis);
507 mlx5_devx_cmd_destroy(tmpl->sq);
508 rte_errno = ret; /* Restore rte_errno. */
513 * Create the Tx queue Verbs object.
516 * Pointer to Ethernet device.
518 * Queue index in DPDK Tx queue array.
520 * Type of the Tx queue object to create.
523 * The Verbs object initialised, NULL otherwise and rte_errno is set.
525 struct mlx5_txq_obj *
526 mlx5_txq_obj_new(struct rte_eth_dev *dev, uint16_t idx,
527 enum mlx5_txq_obj_type type)
529 struct mlx5_priv *priv = dev->data->dev_private;
530 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
531 struct mlx5_txq_ctrl *txq_ctrl =
532 container_of(txq_data, struct mlx5_txq_ctrl, txq);
533 struct mlx5_txq_obj tmpl;
534 struct mlx5_txq_obj *txq_obj = NULL;
536 struct ibv_qp_init_attr_ex init;
537 struct ibv_cq_init_attr_ex cq;
538 struct ibv_qp_attr mod;
541 struct mlx5dv_qp qp = { .comp_mask = MLX5DV_QP_MASK_UAR_MMAP_OFFSET };
542 struct mlx5dv_cq cq_info;
543 struct mlx5dv_obj obj;
544 const int desc = 1 << txq_data->elts_n;
547 if (type == MLX5_TXQ_OBJ_TYPE_DEVX_HAIRPIN)
548 return mlx5_txq_obj_hairpin_new(dev, idx);
549 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
550 /* If using DevX, need additional mask to read tisn value. */
551 if (priv->config.devx && !priv->sh->tdn)
552 qp.comp_mask |= MLX5DV_QP_MASK_RAW_QP_HANDLES;
555 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_TX_QUEUE;
556 priv->verbs_alloc_ctx.obj = txq_ctrl;
557 if (mlx5_getenv_int("MLX5_ENABLE_CQE_COMPRESSION")) {
559 "port %u MLX5_ENABLE_CQE_COMPRESSION must never be set",
564 memset(&tmpl, 0, sizeof(struct mlx5_txq_obj));
565 attr.cq = (struct ibv_cq_init_attr_ex){
568 cqe_n = desc / MLX5_TX_COMP_THRESH +
569 1 + MLX5_TX_COMP_THRESH_INLINE_DIV;
570 tmpl.cq = mlx5_glue->create_cq(priv->sh->ctx, cqe_n, NULL, NULL, 0);
571 if (tmpl.cq == NULL) {
572 DRV_LOG(ERR, "port %u Tx queue %u CQ creation failure",
573 dev->data->port_id, idx);
577 attr.init = (struct ibv_qp_init_attr_ex){
578 /* CQ to be associated with the send queue. */
580 /* CQ to be associated with the receive queue. */
583 /* Max number of outstanding WRs. */
585 ((priv->sh->device_attr.orig_attr.max_qp_wr <
587 priv->sh->device_attr.orig_attr.max_qp_wr :
590 * Max number of scatter/gather elements in a WR,
591 * must be 1 to prevent libmlx5 from trying to affect
592 * too much memory. TX gather is not impacted by the
593 * device_attr.max_sge limit and will still work
598 .qp_type = IBV_QPT_RAW_PACKET,
600 * Do *NOT* enable this, completions events are managed per
605 .comp_mask = IBV_QP_INIT_ATTR_PD,
607 if (txq_data->inlen_send)
608 attr.init.cap.max_inline_data = txq_ctrl->max_inline_data;
609 if (txq_data->tso_en) {
610 attr.init.max_tso_header = txq_ctrl->max_tso_header;
611 attr.init.comp_mask |= IBV_QP_INIT_ATTR_MAX_TSO_HEADER;
613 tmpl.qp = mlx5_glue->create_qp_ex(priv->sh->ctx, &attr.init);
614 if (tmpl.qp == NULL) {
615 DRV_LOG(ERR, "port %u Tx queue %u QP creation failure",
616 dev->data->port_id, idx);
620 attr.mod = (struct ibv_qp_attr){
621 /* Move the QP to this state. */
622 .qp_state = IBV_QPS_INIT,
623 /* IB device port number. */
624 .port_num = (uint8_t)priv->ibv_port,
626 ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod,
627 (IBV_QP_STATE | IBV_QP_PORT));
630 "port %u Tx queue %u QP state to IBV_QPS_INIT failed",
631 dev->data->port_id, idx);
635 attr.mod = (struct ibv_qp_attr){
636 .qp_state = IBV_QPS_RTR
638 ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
641 "port %u Tx queue %u QP state to IBV_QPS_RTR failed",
642 dev->data->port_id, idx);
646 attr.mod.qp_state = IBV_QPS_RTS;
647 ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
650 "port %u Tx queue %u QP state to IBV_QPS_RTS failed",
651 dev->data->port_id, idx);
655 txq_obj = rte_calloc_socket(__func__, 1, sizeof(struct mlx5_txq_obj), 0,
658 DRV_LOG(ERR, "port %u Tx queue %u cannot allocate memory",
659 dev->data->port_id, idx);
664 obj.cq.out = &cq_info;
667 ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_QP);
672 if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
674 "port %u wrong MLX5_CQE_SIZE environment variable"
675 " value: it should be set to %u",
676 dev->data->port_id, RTE_CACHE_LINE_SIZE);
680 txq_data->cqe_n = log2above(cq_info.cqe_cnt);
681 txq_data->cqe_s = 1 << txq_data->cqe_n;
682 txq_data->cqe_m = txq_data->cqe_s - 1;
683 txq_data->qp_num_8s = tmpl.qp->qp_num << 8;
684 txq_data->wqes = qp.sq.buf;
685 txq_data->wqe_n = log2above(qp.sq.wqe_cnt);
686 txq_data->wqe_s = 1 << txq_data->wqe_n;
687 txq_data->wqe_m = txq_data->wqe_s - 1;
688 txq_data->wqes_end = txq_data->wqes + txq_data->wqe_s;
689 txq_data->qp_db = &qp.dbrec[MLX5_SND_DBR];
690 txq_data->cq_db = cq_info.dbrec;
691 txq_data->cqes = (volatile struct mlx5_cqe *)cq_info.buf;
696 txq_data->wqe_ci = 0;
697 txq_data->wqe_pi = 0;
698 txq_data->wqe_comp = 0;
699 txq_data->wqe_thres = txq_data->wqe_s / MLX5_TX_COMP_THRESH_INLINE_DIV;
700 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
702 * If using DevX need to query and store TIS transport domain value.
703 * This is done once per port.
704 * Will use this value on Rx, when creating matching TIR.
706 if (priv->config.devx && !priv->sh->tdn) {
707 ret = mlx5_devx_cmd_qp_query_tis_td(tmpl.qp, qp.tisn,
710 DRV_LOG(ERR, "Fail to query port %u Tx queue %u QP TIS "
711 "transport domain", dev->data->port_id, idx);
715 DRV_LOG(DEBUG, "port %u Tx queue %u TIS number %d "
716 "transport domain %d", dev->data->port_id,
717 idx, qp.tisn, priv->sh->tdn);
721 txq_obj->qp = tmpl.qp;
722 txq_obj->cq = tmpl.cq;
723 rte_atomic32_inc(&txq_obj->refcnt);
724 txq_ctrl->bf_reg = qp.bf.reg;
725 if (qp.comp_mask & MLX5DV_QP_MASK_UAR_MMAP_OFFSET) {
726 txq_ctrl->uar_mmap_offset = qp.uar_mmap_offset;
727 DRV_LOG(DEBUG, "port %u: uar_mmap_offset 0x%"PRIx64,
728 dev->data->port_id, txq_ctrl->uar_mmap_offset);
731 "port %u failed to retrieve UAR info, invalid"
737 txq_uar_init(txq_ctrl);
738 LIST_INSERT_HEAD(&priv->txqsobj, txq_obj, next);
739 txq_obj->txq_ctrl = txq_ctrl;
740 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
743 ret = rte_errno; /* Save rte_errno before cleanup. */
745 claim_zero(mlx5_glue->destroy_cq(tmpl.cq));
747 claim_zero(mlx5_glue->destroy_qp(tmpl.qp));
750 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
751 rte_errno = ret; /* Restore rte_errno. */
756 * Get an Tx queue Verbs object.
759 * Pointer to Ethernet device.
761 * Queue index in DPDK Tx queue array.
764 * The Verbs object if it exists.
766 struct mlx5_txq_obj *
767 mlx5_txq_obj_get(struct rte_eth_dev *dev, uint16_t idx)
769 struct mlx5_priv *priv = dev->data->dev_private;
770 struct mlx5_txq_ctrl *txq_ctrl;
772 if (idx >= priv->txqs_n)
774 if (!(*priv->txqs)[idx])
776 txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
778 rte_atomic32_inc(&txq_ctrl->obj->refcnt);
779 return txq_ctrl->obj;
783 * Release an Tx verbs queue object.
786 * Verbs Tx queue object.
789 * 1 while a reference on it exists, 0 when freed.
792 mlx5_txq_obj_release(struct mlx5_txq_obj *txq_obj)
795 if (rte_atomic32_dec_and_test(&txq_obj->refcnt)) {
796 if (txq_obj->type == MLX5_TXQ_OBJ_TYPE_DEVX_HAIRPIN) {
798 claim_zero(mlx5_devx_cmd_destroy(txq_obj->tis));
800 claim_zero(mlx5_glue->destroy_qp(txq_obj->qp));
801 claim_zero(mlx5_glue->destroy_cq(txq_obj->cq));
803 LIST_REMOVE(txq_obj, next);
811 * Verify the Verbs Tx queue list is empty
814 * Pointer to Ethernet device.
817 * The number of object not released.
820 mlx5_txq_obj_verify(struct rte_eth_dev *dev)
822 struct mlx5_priv *priv = dev->data->dev_private;
824 struct mlx5_txq_obj *txq_obj;
826 LIST_FOREACH(txq_obj, &priv->txqsobj, next) {
827 DRV_LOG(DEBUG, "port %u Verbs Tx queue %u still referenced",
828 dev->data->port_id, txq_obj->txq_ctrl->txq.idx);
835 * Calculate the total number of WQEBB for Tx queue.
837 * Simplified version of calc_sq_size() in rdma-core.
840 * Pointer to Tx queue control structure.
843 * The number of WQEBB.
846 txq_calc_wqebb_cnt(struct mlx5_txq_ctrl *txq_ctrl)
848 unsigned int wqe_size;
849 const unsigned int desc = 1 << txq_ctrl->txq.elts_n;
851 wqe_size = MLX5_WQE_CSEG_SIZE +
854 MLX5_ESEG_MIN_INLINE_SIZE +
855 txq_ctrl->max_inline_data;
856 return rte_align32pow2(wqe_size * desc) / MLX5_WQE_SIZE;
860 * Calculate the maximal inline data size for Tx queue.
863 * Pointer to Tx queue control structure.
866 * The maximal inline data size.
869 txq_calc_inline_max(struct mlx5_txq_ctrl *txq_ctrl)
871 const unsigned int desc = 1 << txq_ctrl->txq.elts_n;
872 struct mlx5_priv *priv = txq_ctrl->priv;
873 unsigned int wqe_size;
875 wqe_size = priv->sh->device_attr.orig_attr.max_qp_wr / desc;
879 * This calculation is derived from tthe source of
880 * mlx5_calc_send_wqe() in rdma_core library.
882 wqe_size = wqe_size * MLX5_WQE_SIZE -
887 MLX5_DSEG_MIN_INLINE_SIZE;
892 * Set Tx queue parameters from device configuration.
895 * Pointer to Tx queue control structure.
898 txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
900 struct mlx5_priv *priv = txq_ctrl->priv;
901 struct mlx5_dev_config *config = &priv->config;
902 unsigned int inlen_send; /* Inline data for ordinary SEND.*/
903 unsigned int inlen_empw; /* Inline data for enhanced MPW. */
904 unsigned int inlen_mode; /* Minimal required Inline data. */
905 unsigned int txqs_inline; /* Min Tx queues to enable inline. */
906 uint64_t dev_txoff = priv->dev_data->dev_conf.txmode.offloads;
907 bool tso = txq_ctrl->txq.offloads & (DEV_TX_OFFLOAD_TCP_TSO |
908 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
909 DEV_TX_OFFLOAD_GRE_TNL_TSO |
910 DEV_TX_OFFLOAD_IP_TNL_TSO |
911 DEV_TX_OFFLOAD_UDP_TNL_TSO);
915 if (config->txqs_inline == MLX5_ARG_UNSET)
917 #if defined(RTE_ARCH_ARM64)
918 (priv->pci_dev->id.device_id ==
919 PCI_DEVICE_ID_MELLANOX_CONNECTX5BF) ?
920 MLX5_INLINE_MAX_TXQS_BLUEFIELD :
922 MLX5_INLINE_MAX_TXQS;
924 txqs_inline = (unsigned int)config->txqs_inline;
925 inlen_send = (config->txq_inline_max == MLX5_ARG_UNSET) ?
926 MLX5_SEND_DEF_INLINE_LEN :
927 (unsigned int)config->txq_inline_max;
928 inlen_empw = (config->txq_inline_mpw == MLX5_ARG_UNSET) ?
929 MLX5_EMPW_DEF_INLINE_LEN :
930 (unsigned int)config->txq_inline_mpw;
931 inlen_mode = (config->txq_inline_min == MLX5_ARG_UNSET) ?
932 0 : (unsigned int)config->txq_inline_min;
933 if (config->mps != MLX5_MPW_ENHANCED)
936 * If there is requested minimal amount of data to inline
937 * we MUST enable inlining. This is a case for ConnectX-4
938 * which usually requires L2 inlined for correct operating
939 * and ConnectX-4LX which requires L2-L4 inlined to
940 * support E-Switch Flows.
943 if (inlen_mode <= MLX5_ESEG_MIN_INLINE_SIZE) {
945 * Optimize minimal inlining for single
946 * segment packets to fill one WQEBB
949 temp = MLX5_ESEG_MIN_INLINE_SIZE;
951 temp = inlen_mode - MLX5_ESEG_MIN_INLINE_SIZE;
952 temp = RTE_ALIGN(temp, MLX5_WSEG_SIZE) +
953 MLX5_ESEG_MIN_INLINE_SIZE;
954 temp = RTE_MIN(temp, MLX5_SEND_MAX_INLINE_LEN);
956 if (temp != inlen_mode) {
958 "port %u minimal required inline setting"
959 " aligned from %u to %u",
960 PORT_ID(priv), inlen_mode, temp);
965 * If port is configured to support VLAN insertion and device
966 * does not support this feature by HW (for NICs before ConnectX-5
967 * or in case of wqe_vlan_insert flag is not set) we must enable
968 * data inline on all queues because it is supported by single
971 txq_ctrl->txq.vlan_en = config->hw_vlan_insert;
972 vlan_inline = (dev_txoff & DEV_TX_OFFLOAD_VLAN_INSERT) &&
973 !config->hw_vlan_insert;
975 * If there are few Tx queues it is prioritized
976 * to save CPU cycles and disable data inlining at all.
978 if (inlen_send && priv->txqs_n >= txqs_inline) {
980 * The data sent with ordinal MLX5_OPCODE_SEND
981 * may be inlined in Ethernet Segment, align the
982 * length accordingly to fit entire WQEBBs.
984 temp = RTE_MAX(inlen_send,
985 MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE);
986 temp -= MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE;
987 temp = RTE_ALIGN(temp, MLX5_WQE_SIZE);
988 temp += MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE;
989 temp = RTE_MIN(temp, MLX5_WQE_SIZE_MAX +
990 MLX5_ESEG_MIN_INLINE_SIZE -
993 MLX5_WQE_DSEG_SIZE * 2);
994 temp = RTE_MIN(temp, MLX5_SEND_MAX_INLINE_LEN);
995 temp = RTE_MAX(temp, inlen_mode);
996 if (temp != inlen_send) {
998 "port %u ordinary send inline setting"
999 " aligned from %u to %u",
1000 PORT_ID(priv), inlen_send, temp);
1004 * Not aligned to cache lines, but to WQEs.
1005 * First bytes of data (initial alignment)
1006 * is going to be copied explicitly at the
1007 * beginning of inlining buffer in Ethernet
1010 assert(inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
1011 assert(inlen_send <= MLX5_WQE_SIZE_MAX +
1012 MLX5_ESEG_MIN_INLINE_SIZE -
1013 MLX5_WQE_CSEG_SIZE -
1014 MLX5_WQE_ESEG_SIZE -
1015 MLX5_WQE_DSEG_SIZE * 2);
1016 } else if (inlen_mode) {
1018 * If minimal inlining is requested we must
1019 * enable inlining in general, despite the
1020 * number of configured queues. Ignore the
1021 * txq_inline_max devarg, this is not
1022 * full-featured inline.
1024 inlen_send = inlen_mode;
1026 } else if (vlan_inline) {
1028 * Hardware does not report offload for
1029 * VLAN insertion, we must enable data inline
1030 * to implement feature by software.
1032 inlen_send = MLX5_ESEG_MIN_INLINE_SIZE;
1038 txq_ctrl->txq.inlen_send = inlen_send;
1039 txq_ctrl->txq.inlen_mode = inlen_mode;
1040 txq_ctrl->txq.inlen_empw = 0;
1041 if (inlen_send && inlen_empw && priv->txqs_n >= txqs_inline) {
1043 * The data sent with MLX5_OPCODE_ENHANCED_MPSW
1044 * may be inlined in Data Segment, align the
1045 * length accordingly to fit entire WQEBBs.
1047 temp = RTE_MAX(inlen_empw,
1048 MLX5_WQE_SIZE + MLX5_DSEG_MIN_INLINE_SIZE);
1049 temp -= MLX5_DSEG_MIN_INLINE_SIZE;
1050 temp = RTE_ALIGN(temp, MLX5_WQE_SIZE);
1051 temp += MLX5_DSEG_MIN_INLINE_SIZE;
1052 temp = RTE_MIN(temp, MLX5_WQE_SIZE_MAX +
1053 MLX5_DSEG_MIN_INLINE_SIZE -
1054 MLX5_WQE_CSEG_SIZE -
1055 MLX5_WQE_ESEG_SIZE -
1056 MLX5_WQE_DSEG_SIZE);
1057 temp = RTE_MIN(temp, MLX5_EMPW_MAX_INLINE_LEN);
1058 if (temp != inlen_empw) {
1060 "port %u enhanced empw inline setting"
1061 " aligned from %u to %u",
1062 PORT_ID(priv), inlen_empw, temp);
1065 assert(inlen_empw >= MLX5_ESEG_MIN_INLINE_SIZE);
1066 assert(inlen_empw <= MLX5_WQE_SIZE_MAX +
1067 MLX5_DSEG_MIN_INLINE_SIZE -
1068 MLX5_WQE_CSEG_SIZE -
1069 MLX5_WQE_ESEG_SIZE -
1070 MLX5_WQE_DSEG_SIZE);
1071 txq_ctrl->txq.inlen_empw = inlen_empw;
1073 txq_ctrl->max_inline_data = RTE_MAX(inlen_send, inlen_empw);
1075 txq_ctrl->max_tso_header = MLX5_MAX_TSO_HEADER;
1076 txq_ctrl->max_inline_data = RTE_MAX(txq_ctrl->max_inline_data,
1077 MLX5_MAX_TSO_HEADER);
1078 txq_ctrl->txq.tso_en = 1;
1080 txq_ctrl->txq.tunnel_en = config->tunnel_en | config->swp;
1081 txq_ctrl->txq.swp_en = ((DEV_TX_OFFLOAD_IP_TNL_TSO |
1082 DEV_TX_OFFLOAD_UDP_TNL_TSO |
1083 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) &
1084 txq_ctrl->txq.offloads) && config->swp;
1088 * Adjust Tx queue data inline parameters for large queue sizes.
1089 * The data inline feature requires multiple WQEs to fit the packets,
1090 * and if the large amount of Tx descriptors is requested by application
1091 * the total WQE amount may exceed the hardware capabilities. If the
1092 * default inline setting are used we can try to adjust these ones and
1093 * meet the hardware requirements and not exceed the queue size.
1096 * Pointer to Tx queue control structure.
1099 * Zero on success, otherwise the parameters can not be adjusted.
1102 txq_adjust_params(struct mlx5_txq_ctrl *txq_ctrl)
1104 struct mlx5_priv *priv = txq_ctrl->priv;
1105 struct mlx5_dev_config *config = &priv->config;
1106 unsigned int max_inline;
1108 max_inline = txq_calc_inline_max(txq_ctrl);
1109 if (!txq_ctrl->txq.inlen_send) {
1111 * Inline data feature is not engaged at all.
1112 * There is nothing to adjust.
1116 if (txq_ctrl->max_inline_data <= max_inline) {
1118 * The requested inline data length does not
1119 * exceed queue capabilities.
1123 if (txq_ctrl->txq.inlen_mode > max_inline) {
1125 "minimal data inline requirements (%u) are not"
1126 " satisfied (%u) on port %u, try the smaller"
1127 " Tx queue size (%d)",
1128 txq_ctrl->txq.inlen_mode, max_inline,
1129 priv->dev_data->port_id,
1130 priv->sh->device_attr.orig_attr.max_qp_wr);
1133 if (txq_ctrl->txq.inlen_send > max_inline &&
1134 config->txq_inline_max != MLX5_ARG_UNSET &&
1135 config->txq_inline_max > (int)max_inline) {
1137 "txq_inline_max requirements (%u) are not"
1138 " satisfied (%u) on port %u, try the smaller"
1139 " Tx queue size (%d)",
1140 txq_ctrl->txq.inlen_send, max_inline,
1141 priv->dev_data->port_id,
1142 priv->sh->device_attr.orig_attr.max_qp_wr);
1145 if (txq_ctrl->txq.inlen_empw > max_inline &&
1146 config->txq_inline_mpw != MLX5_ARG_UNSET &&
1147 config->txq_inline_mpw > (int)max_inline) {
1149 "txq_inline_mpw requirements (%u) are not"
1150 " satisfied (%u) on port %u, try the smaller"
1151 " Tx queue size (%d)",
1152 txq_ctrl->txq.inlen_empw, max_inline,
1153 priv->dev_data->port_id,
1154 priv->sh->device_attr.orig_attr.max_qp_wr);
1157 if (txq_ctrl->txq.tso_en && max_inline < MLX5_MAX_TSO_HEADER) {
1159 "tso header inline requirements (%u) are not"
1160 " satisfied (%u) on port %u, try the smaller"
1161 " Tx queue size (%d)",
1162 MLX5_MAX_TSO_HEADER, max_inline,
1163 priv->dev_data->port_id,
1164 priv->sh->device_attr.orig_attr.max_qp_wr);
1167 if (txq_ctrl->txq.inlen_send > max_inline) {
1169 "adjust txq_inline_max (%u->%u)"
1170 " due to large Tx queue on port %u",
1171 txq_ctrl->txq.inlen_send, max_inline,
1172 priv->dev_data->port_id);
1173 txq_ctrl->txq.inlen_send = max_inline;
1175 if (txq_ctrl->txq.inlen_empw > max_inline) {
1177 "adjust txq_inline_mpw (%u->%u)"
1178 "due to large Tx queue on port %u",
1179 txq_ctrl->txq.inlen_empw, max_inline,
1180 priv->dev_data->port_id);
1181 txq_ctrl->txq.inlen_empw = max_inline;
1183 txq_ctrl->max_inline_data = RTE_MAX(txq_ctrl->txq.inlen_send,
1184 txq_ctrl->txq.inlen_empw);
1185 assert(txq_ctrl->max_inline_data <= max_inline);
1186 assert(txq_ctrl->txq.inlen_mode <= max_inline);
1187 assert(txq_ctrl->txq.inlen_mode <= txq_ctrl->txq.inlen_send);
1188 assert(txq_ctrl->txq.inlen_mode <= txq_ctrl->txq.inlen_empw);
1196 * Create a DPDK Tx queue.
1199 * Pointer to Ethernet device.
1203 * Number of descriptors to configure in queue.
1205 * NUMA socket on which memory must be allocated.
1207 * Thresholds parameters.
1210 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
1212 struct mlx5_txq_ctrl *
1213 mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1214 unsigned int socket, const struct rte_eth_txconf *conf)
1216 struct mlx5_priv *priv = dev->data->dev_private;
1217 struct mlx5_txq_ctrl *tmpl;
1219 tmpl = rte_calloc_socket("TXQ", 1,
1221 desc * sizeof(struct rte_mbuf *),
1227 if (mlx5_mr_btree_init(&tmpl->txq.mr_ctrl.cache_bh,
1228 MLX5_MR_BTREE_CACHE_N, socket)) {
1229 /* rte_errno is already set. */
1232 /* Save pointer of global generation number to check memory event. */
1233 tmpl->txq.mr_ctrl.dev_gen_ptr = &priv->sh->mr.dev_gen;
1234 assert(desc > MLX5_TX_COMP_THRESH);
1235 tmpl->txq.offloads = conf->offloads |
1236 dev->data->dev_conf.txmode.offloads;
1238 tmpl->socket = socket;
1239 tmpl->txq.elts_n = log2above(desc);
1240 tmpl->txq.elts_s = desc;
1241 tmpl->txq.elts_m = desc - 1;
1242 tmpl->txq.port_id = dev->data->port_id;
1243 tmpl->txq.idx = idx;
1244 txq_set_params(tmpl);
1245 if (txq_adjust_params(tmpl))
1247 if (txq_calc_wqebb_cnt(tmpl) >
1248 priv->sh->device_attr.orig_attr.max_qp_wr) {
1250 "port %u Tx WQEBB count (%d) exceeds the limit (%d),"
1251 " try smaller queue size",
1252 dev->data->port_id, txq_calc_wqebb_cnt(tmpl),
1253 priv->sh->device_attr.orig_attr.max_qp_wr);
1257 rte_atomic32_inc(&tmpl->refcnt);
1258 tmpl->type = MLX5_TXQ_TYPE_STANDARD;
1259 LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
1267 * Create a DPDK Tx hairpin queue.
1270 * Pointer to Ethernet device.
1274 * Number of descriptors to configure in queue.
1275 * @param hairpin_conf
1276 * The hairpin configuration.
1279 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
1281 struct mlx5_txq_ctrl *
1282 mlx5_txq_hairpin_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1283 const struct rte_eth_hairpin_conf *hairpin_conf)
1285 struct mlx5_priv *priv = dev->data->dev_private;
1286 struct mlx5_txq_ctrl *tmpl;
1288 tmpl = rte_calloc_socket("TXQ", 1,
1289 sizeof(*tmpl), 0, SOCKET_ID_ANY);
1295 tmpl->socket = SOCKET_ID_ANY;
1296 tmpl->txq.elts_n = log2above(desc);
1297 tmpl->txq.port_id = dev->data->port_id;
1298 tmpl->txq.idx = idx;
1299 tmpl->hairpin_conf = *hairpin_conf;
1300 tmpl->type = MLX5_TXQ_TYPE_HAIRPIN;
1301 rte_atomic32_inc(&tmpl->refcnt);
1302 LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
1310 * Pointer to Ethernet device.
1315 * A pointer to the queue if it exists.
1317 struct mlx5_txq_ctrl *
1318 mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx)
1320 struct mlx5_priv *priv = dev->data->dev_private;
1321 struct mlx5_txq_ctrl *ctrl = NULL;
1323 if ((*priv->txqs)[idx]) {
1324 ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl,
1326 mlx5_txq_obj_get(dev, idx);
1327 rte_atomic32_inc(&ctrl->refcnt);
1333 * Release a Tx queue.
1336 * Pointer to Ethernet device.
1341 * 1 while a reference on it exists, 0 when freed.
1344 mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx)
1346 struct mlx5_priv *priv = dev->data->dev_private;
1347 struct mlx5_txq_ctrl *txq;
1349 if (!(*priv->txqs)[idx])
1351 txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
1352 if (txq->obj && !mlx5_txq_obj_release(txq->obj))
1354 if (rte_atomic32_dec_and_test(&txq->refcnt)) {
1356 mlx5_mr_btree_free(&txq->txq.mr_ctrl.cache_bh);
1357 LIST_REMOVE(txq, next);
1359 (*priv->txqs)[idx] = NULL;
1366 * Verify if the queue can be released.
1369 * Pointer to Ethernet device.
1374 * 1 if the queue can be released.
1377 mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx)
1379 struct mlx5_priv *priv = dev->data->dev_private;
1380 struct mlx5_txq_ctrl *txq;
1382 if (!(*priv->txqs)[idx])
1384 txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
1385 return (rte_atomic32_read(&txq->refcnt) == 1);
1389 * Verify the Tx Queue list is empty
1392 * Pointer to Ethernet device.
1395 * The number of object not released.
1398 mlx5_txq_verify(struct rte_eth_dev *dev)
1400 struct mlx5_priv *priv = dev->data->dev_private;
1401 struct mlx5_txq_ctrl *txq_ctrl;
1404 LIST_FOREACH(txq_ctrl, &priv->txqsctrl, next) {
1405 DRV_LOG(DEBUG, "port %u Tx queue %u still referenced",
1406 dev->data->port_id, txq_ctrl->txq.idx);