1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
16 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
18 #pragma GCC diagnostic ignored "-Wpedantic"
20 #include <infiniband/verbs.h>
22 #pragma GCC diagnostic error "-Wpedantic"
26 #include <rte_malloc.h>
27 #include <rte_ethdev_driver.h>
28 #include <rte_common.h>
30 #include "mlx5_utils.h"
31 #include "mlx5_defs.h"
33 #include "mlx5_rxtx.h"
34 #include "mlx5_autoconf.h"
35 #include "mlx5_glue.h"
38 * Allocate TX queue elements.
41 * Pointer to TX queue structure.
44 txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl)
46 const unsigned int elts_n = 1 << txq_ctrl->txq.elts_n;
49 for (i = 0; (i != elts_n); ++i)
50 txq_ctrl->txq.elts[i] = NULL;
51 DRV_LOG(DEBUG, "port %u Tx queue %u allocated and configured %u WRs",
52 PORT_ID(txq_ctrl->priv), txq_ctrl->txq.idx, elts_n);
53 txq_ctrl->txq.elts_head = 0;
54 txq_ctrl->txq.elts_tail = 0;
55 txq_ctrl->txq.elts_comp = 0;
59 * Free TX queue elements.
62 * Pointer to TX queue structure.
65 txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl)
67 const uint16_t elts_n = 1 << txq_ctrl->txq.elts_n;
68 const uint16_t elts_m = elts_n - 1;
69 uint16_t elts_head = txq_ctrl->txq.elts_head;
70 uint16_t elts_tail = txq_ctrl->txq.elts_tail;
71 struct rte_mbuf *(*elts)[elts_n] = &txq_ctrl->txq.elts;
73 DRV_LOG(DEBUG, "port %u Tx queue %u freeing WRs",
74 PORT_ID(txq_ctrl->priv), txq_ctrl->txq.idx);
75 txq_ctrl->txq.elts_head = 0;
76 txq_ctrl->txq.elts_tail = 0;
77 txq_ctrl->txq.elts_comp = 0;
79 while (elts_tail != elts_head) {
80 struct rte_mbuf *elt = (*elts)[elts_tail & elts_m];
83 rte_pktmbuf_free_seg(elt);
86 memset(&(*elts)[elts_tail & elts_m],
88 sizeof((*elts)[elts_tail & elts_m]));
95 * Returns the per-port supported offloads.
98 * Pointer to Ethernet device.
101 * Supported Tx offloads.
104 mlx5_get_tx_port_offloads(struct rte_eth_dev *dev)
106 struct mlx5_priv *priv = dev->data->dev_private;
107 uint64_t offloads = (DEV_TX_OFFLOAD_MULTI_SEGS |
108 DEV_TX_OFFLOAD_VLAN_INSERT);
109 struct mlx5_dev_config *config = &priv->config;
112 offloads |= (DEV_TX_OFFLOAD_IPV4_CKSUM |
113 DEV_TX_OFFLOAD_UDP_CKSUM |
114 DEV_TX_OFFLOAD_TCP_CKSUM);
116 offloads |= DEV_TX_OFFLOAD_TCP_TSO;
119 offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
121 offloads |= (DEV_TX_OFFLOAD_IP_TNL_TSO |
122 DEV_TX_OFFLOAD_UDP_TNL_TSO);
124 if (config->tunnel_en) {
126 offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
128 offloads |= (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
129 DEV_TX_OFFLOAD_GRE_TNL_TSO);
131 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
132 if (config->dv_flow_en)
133 offloads |= DEV_TX_OFFLOAD_MATCH_METADATA;
139 * DPDK callback to configure a TX queue.
142 * Pointer to Ethernet device structure.
146 * Number of descriptors to configure in queue.
148 * NUMA socket on which memory must be allocated.
150 * Thresholds parameters.
153 * 0 on success, a negative errno value otherwise and rte_errno is set.
156 mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
157 unsigned int socket, const struct rte_eth_txconf *conf)
159 struct mlx5_priv *priv = dev->data->dev_private;
160 struct mlx5_txq_data *txq = (*priv->txqs)[idx];
161 struct mlx5_txq_ctrl *txq_ctrl =
162 container_of(txq, struct mlx5_txq_ctrl, txq);
164 if (desc <= MLX5_TX_COMP_THRESH) {
166 "port %u number of descriptors requested for Tx queue"
167 " %u must be higher than MLX5_TX_COMP_THRESH, using %u"
169 dev->data->port_id, idx, MLX5_TX_COMP_THRESH + 1, desc);
170 desc = MLX5_TX_COMP_THRESH + 1;
172 if (!rte_is_power_of_2(desc)) {
173 desc = 1 << log2above(desc);
175 "port %u increased number of descriptors in Tx queue"
176 " %u to the next power of two (%d)",
177 dev->data->port_id, idx, desc);
179 DRV_LOG(DEBUG, "port %u configuring queue %u for %u descriptors",
180 dev->data->port_id, idx, desc);
181 if (idx >= priv->txqs_n) {
182 DRV_LOG(ERR, "port %u Tx queue index out of range (%u >= %u)",
183 dev->data->port_id, idx, priv->txqs_n);
184 rte_errno = EOVERFLOW;
187 if (!mlx5_txq_releasable(dev, idx)) {
189 DRV_LOG(ERR, "port %u unable to release queue index %u",
190 dev->data->port_id, idx);
193 mlx5_txq_release(dev, idx);
194 txq_ctrl = mlx5_txq_new(dev, idx, desc, socket, conf);
196 DRV_LOG(ERR, "port %u unable to allocate queue index %u",
197 dev->data->port_id, idx);
200 DRV_LOG(DEBUG, "port %u adding Tx queue %u to list",
201 dev->data->port_id, idx);
202 (*priv->txqs)[idx] = &txq_ctrl->txq;
207 * DPDK callback to release a TX queue.
210 * Generic TX queue pointer.
213 mlx5_tx_queue_release(void *dpdk_txq)
215 struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
216 struct mlx5_txq_ctrl *txq_ctrl;
217 struct mlx5_priv *priv;
222 txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
223 priv = txq_ctrl->priv;
224 for (i = 0; (i != priv->txqs_n); ++i)
225 if ((*priv->txqs)[i] == txq) {
226 mlx5_txq_release(ETH_DEV(priv), i);
227 DRV_LOG(DEBUG, "port %u removing Tx queue %u from list",
228 PORT_ID(priv), txq->idx);
234 * Initialize Tx UAR registers for primary process.
237 * Pointer to Tx queue control structure.
240 txq_uar_init(struct mlx5_txq_ctrl *txq_ctrl)
242 struct mlx5_priv *priv = txq_ctrl->priv;
243 struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(priv));
245 unsigned int lock_idx;
246 const size_t page_size = sysconf(_SC_PAGESIZE);
249 assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
251 ppriv->uar_table[txq_ctrl->txq.idx] = txq_ctrl->bf_reg;
253 /* Assign an UAR lock according to UAR page number */
254 lock_idx = (txq_ctrl->uar_mmap_offset / page_size) &
255 MLX5_UAR_PAGE_NUM_MASK;
256 txq_ctrl->txq.uar_lock = &priv->uar_lock[lock_idx];
261 * Remap UAR register of a Tx queue for secondary process.
263 * Remapped address is stored at the table in the process private structure of
264 * the device, indexed by queue index.
267 * Pointer to Tx queue control structure.
269 * Verbs file descriptor to map UAR pages.
272 * 0 on success, a negative errno value otherwise and rte_errno is set.
275 txq_uar_init_secondary(struct mlx5_txq_ctrl *txq_ctrl, int fd)
277 struct mlx5_priv *priv = txq_ctrl->priv;
278 struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(priv));
279 struct mlx5_txq_data *txq = &txq_ctrl->txq;
283 const size_t page_size = sysconf(_SC_PAGESIZE);
287 * As rdma-core, UARs are mapped in size of OS page
288 * size. Ref to libmlx5 function: mlx5_init_context()
290 uar_va = (uintptr_t)txq_ctrl->bf_reg;
291 offset = uar_va & (page_size - 1); /* Offset in page. */
292 addr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, fd,
293 txq_ctrl->uar_mmap_offset);
294 if (addr == MAP_FAILED) {
296 "port %u mmap failed for BF reg of txq %u",
297 txq->port_id, txq->idx);
301 addr = RTE_PTR_ADD(addr, offset);
302 ppriv->uar_table[txq->idx] = addr;
307 * Unmap UAR register of a Tx queue for secondary process.
310 * Pointer to Tx queue control structure.
313 txq_uar_uninit_secondary(struct mlx5_txq_ctrl *txq_ctrl)
315 struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(txq_ctrl->priv));
316 const size_t page_size = sysconf(_SC_PAGESIZE);
319 addr = ppriv->uar_table[txq_ctrl->txq.idx];
320 munmap(RTE_PTR_ALIGN_FLOOR(addr, page_size), page_size);
324 * Initialize Tx UAR registers for secondary process.
327 * Pointer to Ethernet device.
329 * Verbs file descriptor to map UAR pages.
332 * 0 on success, a negative errno value otherwise and rte_errno is set.
335 mlx5_tx_uar_init_secondary(struct rte_eth_dev *dev, int fd)
337 struct mlx5_priv *priv = dev->data->dev_private;
338 struct mlx5_txq_data *txq;
339 struct mlx5_txq_ctrl *txq_ctrl;
343 assert(rte_eal_process_type() == RTE_PROC_SECONDARY);
344 for (i = 0; i != priv->txqs_n; ++i) {
345 if (!(*priv->txqs)[i])
347 txq = (*priv->txqs)[i];
348 txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
349 assert(txq->idx == (uint16_t)i);
350 ret = txq_uar_init_secondary(txq_ctrl, fd);
358 if (!(*priv->txqs)[i])
360 txq = (*priv->txqs)[i];
361 txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
362 txq_uar_uninit_secondary(txq_ctrl);
368 * Create the Tx queue Verbs object.
371 * Pointer to Ethernet device.
373 * Queue index in DPDK Tx queue array.
376 * The Verbs object initialised, NULL otherwise and rte_errno is set.
378 struct mlx5_txq_ibv *
379 mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
381 struct mlx5_priv *priv = dev->data->dev_private;
382 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
383 struct mlx5_txq_ctrl *txq_ctrl =
384 container_of(txq_data, struct mlx5_txq_ctrl, txq);
385 struct mlx5_txq_ibv tmpl;
386 struct mlx5_txq_ibv *txq_ibv = NULL;
388 struct ibv_qp_init_attr_ex init;
389 struct ibv_cq_init_attr_ex cq;
390 struct ibv_qp_attr mod;
393 struct mlx5dv_qp qp = { .comp_mask = MLX5DV_QP_MASK_UAR_MMAP_OFFSET };
394 struct mlx5dv_cq cq_info;
395 struct mlx5dv_obj obj;
396 const int desc = 1 << txq_data->elts_n;
399 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
400 /* If using DevX, need additional mask to read tisn value. */
401 if (priv->config.devx && !priv->sh->tdn)
402 qp.comp_mask |= MLX5DV_QP_MASK_RAW_QP_HANDLES;
405 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_TX_QUEUE;
406 priv->verbs_alloc_ctx.obj = txq_ctrl;
407 if (mlx5_getenv_int("MLX5_ENABLE_CQE_COMPRESSION")) {
409 "port %u MLX5_ENABLE_CQE_COMPRESSION must never be set",
414 memset(&tmpl, 0, sizeof(struct mlx5_txq_ibv));
415 attr.cq = (struct ibv_cq_init_attr_ex){
418 cqe_n = desc / MLX5_TX_COMP_THRESH +
419 1 + MLX5_TX_COMP_THRESH_INLINE_DIV;
420 tmpl.cq = mlx5_glue->create_cq(priv->sh->ctx, cqe_n, NULL, NULL, 0);
421 if (tmpl.cq == NULL) {
422 DRV_LOG(ERR, "port %u Tx queue %u CQ creation failure",
423 dev->data->port_id, idx);
427 attr.init = (struct ibv_qp_init_attr_ex){
428 /* CQ to be associated with the send queue. */
430 /* CQ to be associated with the receive queue. */
433 /* Max number of outstanding WRs. */
435 ((priv->sh->device_attr.orig_attr.max_qp_wr <
437 priv->sh->device_attr.orig_attr.max_qp_wr :
440 * Max number of scatter/gather elements in a WR,
441 * must be 1 to prevent libmlx5 from trying to affect
442 * too much memory. TX gather is not impacted by the
443 * device_attr.max_sge limit and will still work
448 .qp_type = IBV_QPT_RAW_PACKET,
450 * Do *NOT* enable this, completions events are managed per
455 .comp_mask = IBV_QP_INIT_ATTR_PD,
457 if (txq_data->inlen_send)
458 attr.init.cap.max_inline_data = txq_ctrl->max_inline_data;
459 if (txq_data->tso_en) {
460 attr.init.max_tso_header = txq_ctrl->max_tso_header;
461 attr.init.comp_mask |= IBV_QP_INIT_ATTR_MAX_TSO_HEADER;
463 tmpl.qp = mlx5_glue->create_qp_ex(priv->sh->ctx, &attr.init);
464 if (tmpl.qp == NULL) {
465 DRV_LOG(ERR, "port %u Tx queue %u QP creation failure",
466 dev->data->port_id, idx);
470 attr.mod = (struct ibv_qp_attr){
471 /* Move the QP to this state. */
472 .qp_state = IBV_QPS_INIT,
473 /* IB device port number. */
474 .port_num = (uint8_t)priv->ibv_port,
476 ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod,
477 (IBV_QP_STATE | IBV_QP_PORT));
480 "port %u Tx queue %u QP state to IBV_QPS_INIT failed",
481 dev->data->port_id, idx);
485 attr.mod = (struct ibv_qp_attr){
486 .qp_state = IBV_QPS_RTR
488 ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
491 "port %u Tx queue %u QP state to IBV_QPS_RTR failed",
492 dev->data->port_id, idx);
496 attr.mod.qp_state = IBV_QPS_RTS;
497 ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
500 "port %u Tx queue %u QP state to IBV_QPS_RTS failed",
501 dev->data->port_id, idx);
505 txq_ibv = rte_calloc_socket(__func__, 1, sizeof(struct mlx5_txq_ibv), 0,
508 DRV_LOG(ERR, "port %u Tx queue %u cannot allocate memory",
509 dev->data->port_id, idx);
514 obj.cq.out = &cq_info;
517 ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_QP);
522 if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
524 "port %u wrong MLX5_CQE_SIZE environment variable"
525 " value: it should be set to %u",
526 dev->data->port_id, RTE_CACHE_LINE_SIZE);
530 txq_data->cqe_n = log2above(cq_info.cqe_cnt);
531 txq_data->cqe_s = 1 << txq_data->cqe_n;
532 txq_data->cqe_m = txq_data->cqe_s - 1;
533 txq_data->qp_num_8s = tmpl.qp->qp_num << 8;
534 txq_data->wqes = qp.sq.buf;
535 txq_data->wqe_n = log2above(qp.sq.wqe_cnt);
536 txq_data->wqe_s = 1 << txq_data->wqe_n;
537 txq_data->wqe_m = txq_data->wqe_s - 1;
538 txq_data->wqes_end = txq_data->wqes + txq_data->wqe_s;
539 txq_data->qp_db = &qp.dbrec[MLX5_SND_DBR];
540 txq_data->cq_db = cq_info.dbrec;
541 txq_data->cqes = (volatile struct mlx5_cqe *)cq_info.buf;
546 txq_data->wqe_ci = 0;
547 txq_data->wqe_pi = 0;
548 txq_data->wqe_comp = 0;
549 txq_data->wqe_thres = txq_data->wqe_s / MLX5_TX_COMP_THRESH_INLINE_DIV;
550 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
552 * If using DevX need to query and store TIS transport domain value.
553 * This is done once per port.
554 * Will use this value on Rx, when creating matching TIR.
556 if (priv->config.devx && !priv->sh->tdn) {
557 ret = mlx5_devx_cmd_qp_query_tis_td(tmpl.qp, qp.tisn,
560 DRV_LOG(ERR, "Fail to query port %u Tx queue %u QP TIS "
561 "transport domain", dev->data->port_id, idx);
565 DRV_LOG(DEBUG, "port %u Tx queue %u TIS number %d "
566 "transport domain %d", dev->data->port_id,
567 idx, qp.tisn, priv->sh->tdn);
571 txq_ibv->qp = tmpl.qp;
572 txq_ibv->cq = tmpl.cq;
573 rte_atomic32_inc(&txq_ibv->refcnt);
574 txq_ctrl->bf_reg = qp.bf.reg;
575 if (qp.comp_mask & MLX5DV_QP_MASK_UAR_MMAP_OFFSET) {
576 txq_ctrl->uar_mmap_offset = qp.uar_mmap_offset;
577 DRV_LOG(DEBUG, "port %u: uar_mmap_offset 0x%"PRIx64,
578 dev->data->port_id, txq_ctrl->uar_mmap_offset);
581 "port %u failed to retrieve UAR info, invalid"
587 txq_uar_init(txq_ctrl);
588 LIST_INSERT_HEAD(&priv->txqsibv, txq_ibv, next);
589 txq_ibv->txq_ctrl = txq_ctrl;
590 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
593 ret = rte_errno; /* Save rte_errno before cleanup. */
595 claim_zero(mlx5_glue->destroy_cq(tmpl.cq));
597 claim_zero(mlx5_glue->destroy_qp(tmpl.qp));
600 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
601 rte_errno = ret; /* Restore rte_errno. */
606 * Get an Tx queue Verbs object.
609 * Pointer to Ethernet device.
611 * Queue index in DPDK Tx queue array.
614 * The Verbs object if it exists.
616 struct mlx5_txq_ibv *
617 mlx5_txq_ibv_get(struct rte_eth_dev *dev, uint16_t idx)
619 struct mlx5_priv *priv = dev->data->dev_private;
620 struct mlx5_txq_ctrl *txq_ctrl;
622 if (idx >= priv->txqs_n)
624 if (!(*priv->txqs)[idx])
626 txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
628 rte_atomic32_inc(&txq_ctrl->ibv->refcnt);
629 return txq_ctrl->ibv;
633 * Release an Tx verbs queue object.
636 * Verbs Tx queue object.
639 * 1 while a reference on it exists, 0 when freed.
642 mlx5_txq_ibv_release(struct mlx5_txq_ibv *txq_ibv)
645 if (rte_atomic32_dec_and_test(&txq_ibv->refcnt)) {
646 claim_zero(mlx5_glue->destroy_qp(txq_ibv->qp));
647 claim_zero(mlx5_glue->destroy_cq(txq_ibv->cq));
648 LIST_REMOVE(txq_ibv, next);
656 * Verify the Verbs Tx queue list is empty
659 * Pointer to Ethernet device.
662 * The number of object not released.
665 mlx5_txq_ibv_verify(struct rte_eth_dev *dev)
667 struct mlx5_priv *priv = dev->data->dev_private;
669 struct mlx5_txq_ibv *txq_ibv;
671 LIST_FOREACH(txq_ibv, &priv->txqsibv, next) {
672 DRV_LOG(DEBUG, "port %u Verbs Tx queue %u still referenced",
673 dev->data->port_id, txq_ibv->txq_ctrl->txq.idx);
680 * Calculate the total number of WQEBB for Tx queue.
682 * Simplified version of calc_sq_size() in rdma-core.
685 * Pointer to Tx queue control structure.
688 * The number of WQEBB.
691 txq_calc_wqebb_cnt(struct mlx5_txq_ctrl *txq_ctrl)
693 unsigned int wqe_size;
694 const unsigned int desc = 1 << txq_ctrl->txq.elts_n;
696 wqe_size = MLX5_WQE_CSEG_SIZE +
699 MLX5_ESEG_MIN_INLINE_SIZE +
700 txq_ctrl->max_inline_data;
701 return rte_align32pow2(wqe_size * desc) / MLX5_WQE_SIZE;
705 * Calculate the maximal inline data size for Tx queue.
708 * Pointer to Tx queue control structure.
711 * The maximal inline data size.
714 txq_calc_inline_max(struct mlx5_txq_ctrl *txq_ctrl)
716 const unsigned int desc = 1 << txq_ctrl->txq.elts_n;
717 struct mlx5_priv *priv = txq_ctrl->priv;
718 unsigned int wqe_size;
720 wqe_size = priv->sh->device_attr.orig_attr.max_qp_wr / desc;
724 * This calculation is derived from tthe source of
725 * mlx5_calc_send_wqe() in rdma_core library.
727 wqe_size = wqe_size * MLX5_WQE_SIZE -
732 MLX5_DSEG_MIN_INLINE_SIZE;
737 * Set Tx queue parameters from device configuration.
740 * Pointer to Tx queue control structure.
743 txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
745 struct mlx5_priv *priv = txq_ctrl->priv;
746 struct mlx5_dev_config *config = &priv->config;
747 unsigned int inlen_send; /* Inline data for ordinary SEND.*/
748 unsigned int inlen_empw; /* Inline data for enhanced MPW. */
749 unsigned int inlen_mode; /* Minimal required Inline data. */
750 unsigned int txqs_inline; /* Min Tx queues to enable inline. */
751 uint64_t dev_txoff = priv->dev_data->dev_conf.txmode.offloads;
752 bool tso = txq_ctrl->txq.offloads & (DEV_TX_OFFLOAD_TCP_TSO |
753 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
754 DEV_TX_OFFLOAD_GRE_TNL_TSO |
755 DEV_TX_OFFLOAD_IP_TNL_TSO |
756 DEV_TX_OFFLOAD_UDP_TNL_TSO);
760 if (config->txqs_inline == MLX5_ARG_UNSET)
762 #if defined(RTE_ARCH_ARM64)
763 (priv->pci_dev->id.device_id ==
764 PCI_DEVICE_ID_MELLANOX_CONNECTX5BF) ?
765 MLX5_INLINE_MAX_TXQS_BLUEFIELD :
767 MLX5_INLINE_MAX_TXQS;
769 txqs_inline = (unsigned int)config->txqs_inline;
770 inlen_send = (config->txq_inline_max == MLX5_ARG_UNSET) ?
771 MLX5_SEND_DEF_INLINE_LEN :
772 (unsigned int)config->txq_inline_max;
773 inlen_empw = (config->txq_inline_mpw == MLX5_ARG_UNSET) ?
774 MLX5_EMPW_DEF_INLINE_LEN :
775 (unsigned int)config->txq_inline_mpw;
776 inlen_mode = (config->txq_inline_min == MLX5_ARG_UNSET) ?
777 0 : (unsigned int)config->txq_inline_min;
778 if (config->mps != MLX5_MPW_ENHANCED)
781 * If there is requested minimal amount of data to inline
782 * we MUST enable inlining. This is a case for ConnectX-4
783 * which usually requires L2 inlined for correct operating
784 * and ConnectX-4LX which requires L2-L4 inlined to
785 * support E-Switch Flows.
788 if (inlen_mode <= MLX5_ESEG_MIN_INLINE_SIZE) {
790 * Optimize minimal inlining for single
791 * segment packets to fill one WQEBB
794 temp = MLX5_ESEG_MIN_INLINE_SIZE;
796 temp = inlen_mode - MLX5_ESEG_MIN_INLINE_SIZE;
797 temp = RTE_ALIGN(temp, MLX5_WSEG_SIZE) +
798 MLX5_ESEG_MIN_INLINE_SIZE;
799 temp = RTE_MIN(temp, MLX5_SEND_MAX_INLINE_LEN);
801 if (temp != inlen_mode) {
803 "port %u minimal required inline setting"
804 " aligned from %u to %u",
805 PORT_ID(priv), inlen_mode, temp);
810 * If port is configured to support VLAN insertion and device
811 * does not support this feature by HW (for NICs before ConnectX-5
812 * or in case of wqe_vlan_insert flag is not set) we must enable
813 * data inline on all queues because it is supported by single
816 txq_ctrl->txq.vlan_en = config->hw_vlan_insert;
817 vlan_inline = (dev_txoff & DEV_TX_OFFLOAD_VLAN_INSERT) &&
818 !config->hw_vlan_insert;
820 * If there are few Tx queues it is prioritized
821 * to save CPU cycles and disable data inlining at all.
823 if (inlen_send && priv->txqs_n >= txqs_inline) {
825 * The data sent with ordinal MLX5_OPCODE_SEND
826 * may be inlined in Ethernet Segment, align the
827 * length accordingly to fit entire WQEBBs.
829 temp = RTE_MAX(inlen_send,
830 MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE);
831 temp -= MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE;
832 temp = RTE_ALIGN(temp, MLX5_WQE_SIZE);
833 temp += MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE;
834 temp = RTE_MIN(temp, MLX5_WQE_SIZE_MAX +
835 MLX5_ESEG_MIN_INLINE_SIZE -
838 MLX5_WQE_DSEG_SIZE * 2);
839 temp = RTE_MIN(temp, MLX5_SEND_MAX_INLINE_LEN);
840 temp = RTE_MAX(temp, inlen_mode);
841 if (temp != inlen_send) {
843 "port %u ordinary send inline setting"
844 " aligned from %u to %u",
845 PORT_ID(priv), inlen_send, temp);
849 * Not aligned to cache lines, but to WQEs.
850 * First bytes of data (initial alignment)
851 * is going to be copied explicitly at the
852 * beginning of inlining buffer in Ethernet
855 assert(inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
856 assert(inlen_send <= MLX5_WQE_SIZE_MAX +
857 MLX5_ESEG_MIN_INLINE_SIZE -
860 MLX5_WQE_DSEG_SIZE * 2);
861 } else if (inlen_mode) {
863 * If minimal inlining is requested we must
864 * enable inlining in general, despite the
865 * number of configured queues. Ignore the
866 * txq_inline_max devarg, this is not
867 * full-featured inline.
869 inlen_send = inlen_mode;
871 } else if (vlan_inline) {
873 * Hardware does not report offload for
874 * VLAN insertion, we must enable data inline
875 * to implement feature by software.
877 inlen_send = MLX5_ESEG_MIN_INLINE_SIZE;
883 txq_ctrl->txq.inlen_send = inlen_send;
884 txq_ctrl->txq.inlen_mode = inlen_mode;
885 txq_ctrl->txq.inlen_empw = 0;
886 if (inlen_send && inlen_empw && priv->txqs_n >= txqs_inline) {
888 * The data sent with MLX5_OPCODE_ENHANCED_MPSW
889 * may be inlined in Data Segment, align the
890 * length accordingly to fit entire WQEBBs.
892 temp = RTE_MAX(inlen_empw,
893 MLX5_WQE_SIZE + MLX5_DSEG_MIN_INLINE_SIZE);
894 temp -= MLX5_DSEG_MIN_INLINE_SIZE;
895 temp = RTE_ALIGN(temp, MLX5_WQE_SIZE);
896 temp += MLX5_DSEG_MIN_INLINE_SIZE;
897 temp = RTE_MIN(temp, MLX5_WQE_SIZE_MAX +
898 MLX5_DSEG_MIN_INLINE_SIZE -
902 temp = RTE_MIN(temp, MLX5_EMPW_MAX_INLINE_LEN);
903 if (temp != inlen_empw) {
905 "port %u enhanced empw inline setting"
906 " aligned from %u to %u",
907 PORT_ID(priv), inlen_empw, temp);
910 assert(inlen_empw >= MLX5_ESEG_MIN_INLINE_SIZE);
911 assert(inlen_empw <= MLX5_WQE_SIZE_MAX +
912 MLX5_DSEG_MIN_INLINE_SIZE -
916 txq_ctrl->txq.inlen_empw = inlen_empw;
918 txq_ctrl->max_inline_data = RTE_MAX(inlen_send, inlen_empw);
920 txq_ctrl->max_tso_header = MLX5_MAX_TSO_HEADER;
921 txq_ctrl->max_inline_data = RTE_MAX(txq_ctrl->max_inline_data,
922 MLX5_MAX_TSO_HEADER);
923 txq_ctrl->txq.tso_en = 1;
925 txq_ctrl->txq.tunnel_en = config->tunnel_en | config->swp;
926 txq_ctrl->txq.swp_en = ((DEV_TX_OFFLOAD_IP_TNL_TSO |
927 DEV_TX_OFFLOAD_UDP_TNL_TSO |
928 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) &
929 txq_ctrl->txq.offloads) && config->swp;
933 * Adjust Tx queue data inline parameters for large queue sizes.
934 * The data inline feature requires multiple WQEs to fit the packets,
935 * and if the large amount of Tx descriptors is requested by application
936 * the total WQE amount may exceed the hardware capabilities. If the
937 * default inline setting are used we can try to adjust these ones and
938 * meet the hardware requirements and not exceed the queue size.
941 * Pointer to Tx queue control structure.
944 * Zero on success, otherwise the parameters can not be adjusted.
947 txq_adjust_params(struct mlx5_txq_ctrl *txq_ctrl)
949 struct mlx5_priv *priv = txq_ctrl->priv;
950 struct mlx5_dev_config *config = &priv->config;
951 unsigned int max_inline;
953 max_inline = txq_calc_inline_max(txq_ctrl);
954 if (!txq_ctrl->txq.inlen_send) {
956 * Inline data feature is not engaged at all.
957 * There is nothing to adjust.
961 if (txq_ctrl->max_inline_data <= max_inline) {
963 * The requested inline data length does not
964 * exceed queue capabilities.
968 if (txq_ctrl->txq.inlen_mode > max_inline) {
970 "minimal data inline requirements (%u) are not"
971 " satisfied (%u) on port %u, try the smaller"
972 " Tx queue size (%d)",
973 txq_ctrl->txq.inlen_mode, max_inline,
974 priv->dev_data->port_id,
975 priv->sh->device_attr.orig_attr.max_qp_wr);
978 if (txq_ctrl->txq.inlen_send > max_inline &&
979 config->txq_inline_max != MLX5_ARG_UNSET &&
980 config->txq_inline_max > (int)max_inline) {
982 "txq_inline_max requirements (%u) are not"
983 " satisfied (%u) on port %u, try the smaller"
984 " Tx queue size (%d)",
985 txq_ctrl->txq.inlen_send, max_inline,
986 priv->dev_data->port_id,
987 priv->sh->device_attr.orig_attr.max_qp_wr);
990 if (txq_ctrl->txq.inlen_empw > max_inline &&
991 config->txq_inline_mpw != MLX5_ARG_UNSET &&
992 config->txq_inline_mpw > (int)max_inline) {
994 "txq_inline_mpw requirements (%u) are not"
995 " satisfied (%u) on port %u, try the smaller"
996 " Tx queue size (%d)",
997 txq_ctrl->txq.inlen_empw, max_inline,
998 priv->dev_data->port_id,
999 priv->sh->device_attr.orig_attr.max_qp_wr);
1002 if (txq_ctrl->txq.tso_en && max_inline < MLX5_MAX_TSO_HEADER) {
1004 "tso header inline requirements (%u) are not"
1005 " satisfied (%u) on port %u, try the smaller"
1006 " Tx queue size (%d)",
1007 MLX5_MAX_TSO_HEADER, max_inline,
1008 priv->dev_data->port_id,
1009 priv->sh->device_attr.orig_attr.max_qp_wr);
1012 if (txq_ctrl->txq.inlen_send > max_inline) {
1014 "adjust txq_inline_max (%u->%u)"
1015 " due to large Tx queue on port %u",
1016 txq_ctrl->txq.inlen_send, max_inline,
1017 priv->dev_data->port_id);
1018 txq_ctrl->txq.inlen_send = max_inline;
1020 if (txq_ctrl->txq.inlen_empw > max_inline) {
1022 "adjust txq_inline_mpw (%u->%u)"
1023 "due to large Tx queue on port %u",
1024 txq_ctrl->txq.inlen_empw, max_inline,
1025 priv->dev_data->port_id);
1026 txq_ctrl->txq.inlen_empw = max_inline;
1028 txq_ctrl->max_inline_data = RTE_MAX(txq_ctrl->txq.inlen_send,
1029 txq_ctrl->txq.inlen_empw);
1030 assert(txq_ctrl->max_inline_data <= max_inline);
1031 assert(txq_ctrl->txq.inlen_mode <= max_inline);
1032 assert(txq_ctrl->txq.inlen_mode <= txq_ctrl->txq.inlen_send);
1033 assert(txq_ctrl->txq.inlen_mode <= txq_ctrl->txq.inlen_empw);
1041 * Create a DPDK Tx queue.
1044 * Pointer to Ethernet device.
1048 * Number of descriptors to configure in queue.
1050 * NUMA socket on which memory must be allocated.
1052 * Thresholds parameters.
1055 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
1057 struct mlx5_txq_ctrl *
1058 mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1059 unsigned int socket, const struct rte_eth_txconf *conf)
1061 struct mlx5_priv *priv = dev->data->dev_private;
1062 struct mlx5_txq_ctrl *tmpl;
1064 tmpl = rte_calloc_socket("TXQ", 1,
1066 desc * sizeof(struct rte_mbuf *),
1072 if (mlx5_mr_btree_init(&tmpl->txq.mr_ctrl.cache_bh,
1073 MLX5_MR_BTREE_CACHE_N, socket)) {
1074 /* rte_errno is already set. */
1077 /* Save pointer of global generation number to check memory event. */
1078 tmpl->txq.mr_ctrl.dev_gen_ptr = &priv->sh->mr.dev_gen;
1079 assert(desc > MLX5_TX_COMP_THRESH);
1080 tmpl->txq.offloads = conf->offloads |
1081 dev->data->dev_conf.txmode.offloads;
1083 tmpl->socket = socket;
1084 tmpl->txq.elts_n = log2above(desc);
1085 tmpl->txq.elts_s = desc;
1086 tmpl->txq.elts_m = desc - 1;
1087 tmpl->txq.port_id = dev->data->port_id;
1088 tmpl->txq.idx = idx;
1089 txq_set_params(tmpl);
1090 if (txq_adjust_params(tmpl))
1092 if (txq_calc_wqebb_cnt(tmpl) >
1093 priv->sh->device_attr.orig_attr.max_qp_wr) {
1095 "port %u Tx WQEBB count (%d) exceeds the limit (%d),"
1096 " try smaller queue size",
1097 dev->data->port_id, txq_calc_wqebb_cnt(tmpl),
1098 priv->sh->device_attr.orig_attr.max_qp_wr);
1102 rte_atomic32_inc(&tmpl->refcnt);
1103 LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
1114 * Pointer to Ethernet device.
1119 * A pointer to the queue if it exists.
1121 struct mlx5_txq_ctrl *
1122 mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx)
1124 struct mlx5_priv *priv = dev->data->dev_private;
1125 struct mlx5_txq_ctrl *ctrl = NULL;
1127 if ((*priv->txqs)[idx]) {
1128 ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl,
1130 mlx5_txq_ibv_get(dev, idx);
1131 rte_atomic32_inc(&ctrl->refcnt);
1137 * Release a Tx queue.
1140 * Pointer to Ethernet device.
1145 * 1 while a reference on it exists, 0 when freed.
1148 mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx)
1150 struct mlx5_priv *priv = dev->data->dev_private;
1151 struct mlx5_txq_ctrl *txq;
1153 if (!(*priv->txqs)[idx])
1155 txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
1156 if (txq->ibv && !mlx5_txq_ibv_release(txq->ibv))
1158 if (rte_atomic32_dec_and_test(&txq->refcnt)) {
1160 mlx5_mr_btree_free(&txq->txq.mr_ctrl.cache_bh);
1161 LIST_REMOVE(txq, next);
1163 (*priv->txqs)[idx] = NULL;
1170 * Verify if the queue can be released.
1173 * Pointer to Ethernet device.
1178 * 1 if the queue can be released.
1181 mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx)
1183 struct mlx5_priv *priv = dev->data->dev_private;
1184 struct mlx5_txq_ctrl *txq;
1186 if (!(*priv->txqs)[idx])
1188 txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
1189 return (rte_atomic32_read(&txq->refcnt) == 1);
1193 * Verify the Tx Queue list is empty
1196 * Pointer to Ethernet device.
1199 * The number of object not released.
1202 mlx5_txq_verify(struct rte_eth_dev *dev)
1204 struct mlx5_priv *priv = dev->data->dev_private;
1205 struct mlx5_txq_ctrl *txq_ctrl;
1208 LIST_FOREACH(txq_ctrl, &priv->txqsctrl, next) {
1209 DRV_LOG(DEBUG, "port %u Tx queue %u still referenced",
1210 dev->data->port_id, txq_ctrl->txq.idx);