1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
15 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
17 #pragma GCC diagnostic ignored "-Wpedantic"
19 #include <infiniband/verbs.h>
21 #pragma GCC diagnostic error "-Wpedantic"
25 #include <rte_malloc.h>
26 #include <rte_ethdev_driver.h>
27 #include <rte_common.h>
29 #include "mlx5_utils.h"
30 #include "mlx5_defs.h"
32 #include "mlx5_rxtx.h"
33 #include "mlx5_autoconf.h"
34 #include "mlx5_glue.h"
37 * Allocate TX queue elements.
40 * Pointer to TX queue structure.
43 txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl)
45 const unsigned int elts_n = 1 << txq_ctrl->txq.elts_n;
48 for (i = 0; (i != elts_n); ++i)
49 (*txq_ctrl->txq.elts)[i] = NULL;
50 DRV_LOG(DEBUG, "port %u Tx queue %u allocated and configured %u WRs",
51 PORT_ID(txq_ctrl->priv), txq_ctrl->txq.idx, elts_n);
52 txq_ctrl->txq.elts_head = 0;
53 txq_ctrl->txq.elts_tail = 0;
54 txq_ctrl->txq.elts_comp = 0;
58 * Free TX queue elements.
61 * Pointer to TX queue structure.
64 txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl)
66 const uint16_t elts_n = 1 << txq_ctrl->txq.elts_n;
67 const uint16_t elts_m = elts_n - 1;
68 uint16_t elts_head = txq_ctrl->txq.elts_head;
69 uint16_t elts_tail = txq_ctrl->txq.elts_tail;
70 struct rte_mbuf *(*elts)[elts_n] = txq_ctrl->txq.elts;
72 DRV_LOG(DEBUG, "port %u Tx queue %u freeing WRs",
73 PORT_ID(txq_ctrl->priv), txq_ctrl->txq.idx);
74 txq_ctrl->txq.elts_head = 0;
75 txq_ctrl->txq.elts_tail = 0;
76 txq_ctrl->txq.elts_comp = 0;
78 while (elts_tail != elts_head) {
79 struct rte_mbuf *elt = (*elts)[elts_tail & elts_m];
82 rte_pktmbuf_free_seg(elt);
85 memset(&(*elts)[elts_tail & elts_m],
87 sizeof((*elts)[elts_tail & elts_m]));
94 * Returns the per-port supported offloads.
97 * Pointer to Ethernet device.
100 * Supported Tx offloads.
103 mlx5_get_tx_port_offloads(struct rte_eth_dev *dev)
105 struct mlx5_priv *priv = dev->data->dev_private;
106 uint64_t offloads = (DEV_TX_OFFLOAD_MULTI_SEGS |
107 DEV_TX_OFFLOAD_VLAN_INSERT);
108 struct mlx5_dev_config *config = &priv->config;
111 offloads |= (DEV_TX_OFFLOAD_IPV4_CKSUM |
112 DEV_TX_OFFLOAD_UDP_CKSUM |
113 DEV_TX_OFFLOAD_TCP_CKSUM);
115 offloads |= DEV_TX_OFFLOAD_TCP_TSO;
118 offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
120 offloads |= (DEV_TX_OFFLOAD_IP_TNL_TSO |
121 DEV_TX_OFFLOAD_UDP_TNL_TSO);
123 if (config->tunnel_en) {
125 offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
127 offloads |= (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
128 DEV_TX_OFFLOAD_GRE_TNL_TSO);
130 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
131 if (config->dv_flow_en)
132 offloads |= DEV_TX_OFFLOAD_MATCH_METADATA;
138 * DPDK callback to configure a TX queue.
141 * Pointer to Ethernet device structure.
145 * Number of descriptors to configure in queue.
147 * NUMA socket on which memory must be allocated.
149 * Thresholds parameters.
152 * 0 on success, a negative errno value otherwise and rte_errno is set.
155 mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
156 unsigned int socket, const struct rte_eth_txconf *conf)
158 struct mlx5_priv *priv = dev->data->dev_private;
159 struct mlx5_txq_data *txq = (*priv->txqs)[idx];
160 struct mlx5_txq_ctrl *txq_ctrl =
161 container_of(txq, struct mlx5_txq_ctrl, txq);
163 if (desc <= MLX5_TX_COMP_THRESH) {
165 "port %u number of descriptors requested for Tx queue"
166 " %u must be higher than MLX5_TX_COMP_THRESH, using %u"
168 dev->data->port_id, idx, MLX5_TX_COMP_THRESH + 1, desc);
169 desc = MLX5_TX_COMP_THRESH + 1;
171 if (!rte_is_power_of_2(desc)) {
172 desc = 1 << log2above(desc);
174 "port %u increased number of descriptors in Tx queue"
175 " %u to the next power of two (%d)",
176 dev->data->port_id, idx, desc);
178 DRV_LOG(DEBUG, "port %u configuring queue %u for %u descriptors",
179 dev->data->port_id, idx, desc);
180 if (idx >= priv->txqs_n) {
181 DRV_LOG(ERR, "port %u Tx queue index out of range (%u >= %u)",
182 dev->data->port_id, idx, priv->txqs_n);
183 rte_errno = EOVERFLOW;
186 if (!mlx5_txq_releasable(dev, idx)) {
188 DRV_LOG(ERR, "port %u unable to release queue index %u",
189 dev->data->port_id, idx);
192 mlx5_txq_release(dev, idx);
193 txq_ctrl = mlx5_txq_new(dev, idx, desc, socket, conf);
195 DRV_LOG(ERR, "port %u unable to allocate queue index %u",
196 dev->data->port_id, idx);
199 DRV_LOG(DEBUG, "port %u adding Tx queue %u to list",
200 dev->data->port_id, idx);
201 (*priv->txqs)[idx] = &txq_ctrl->txq;
206 * DPDK callback to release a TX queue.
209 * Generic TX queue pointer.
212 mlx5_tx_queue_release(void *dpdk_txq)
214 struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
215 struct mlx5_txq_ctrl *txq_ctrl;
216 struct mlx5_priv *priv;
221 txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
222 priv = txq_ctrl->priv;
223 for (i = 0; (i != priv->txqs_n); ++i)
224 if ((*priv->txqs)[i] == txq) {
225 mlx5_txq_release(ETH_DEV(priv), i);
226 DRV_LOG(DEBUG, "port %u removing Tx queue %u from list",
227 PORT_ID(priv), txq->idx);
233 * Initialize Tx UAR registers for primary process.
236 * Pointer to Tx queue control structure.
239 txq_uar_init(struct mlx5_txq_ctrl *txq_ctrl)
241 struct mlx5_priv *priv = txq_ctrl->priv;
242 struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(priv));
244 unsigned int lock_idx;
245 const size_t page_size = sysconf(_SC_PAGESIZE);
248 assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
250 ppriv->uar_table[txq_ctrl->txq.idx] = txq_ctrl->bf_reg;
252 /* Assign an UAR lock according to UAR page number */
253 lock_idx = (txq_ctrl->uar_mmap_offset / page_size) &
254 MLX5_UAR_PAGE_NUM_MASK;
255 txq_ctrl->txq.uar_lock = &priv->uar_lock[lock_idx];
260 * Remap UAR register of a Tx queue for secondary process.
262 * Remapped address is stored at the table in the process private structure of
263 * the device, indexed by queue index.
266 * Pointer to Tx queue control structure.
268 * Verbs file descriptor to map UAR pages.
271 * 0 on success, a negative errno value otherwise and rte_errno is set.
274 txq_uar_init_secondary(struct mlx5_txq_ctrl *txq_ctrl, int fd)
276 struct mlx5_priv *priv = txq_ctrl->priv;
277 struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(priv));
278 struct mlx5_txq_data *txq = &txq_ctrl->txq;
282 const size_t page_size = sysconf(_SC_PAGESIZE);
286 * As rdma-core, UARs are mapped in size of OS page
287 * size. Ref to libmlx5 function: mlx5_init_context()
289 uar_va = (uintptr_t)txq_ctrl->bf_reg;
290 offset = uar_va & (page_size - 1); /* Offset in page. */
291 addr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, fd,
292 txq_ctrl->uar_mmap_offset);
293 if (addr == MAP_FAILED) {
295 "port %u mmap failed for BF reg of txq %u",
296 txq->port_id, txq->idx);
300 addr = RTE_PTR_ADD(addr, offset);
301 ppriv->uar_table[txq->idx] = addr;
306 * Unmap UAR register of a Tx queue for secondary process.
309 * Pointer to Tx queue control structure.
312 txq_uar_uninit_secondary(struct mlx5_txq_ctrl *txq_ctrl)
314 struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(txq_ctrl->priv));
315 const size_t page_size = sysconf(_SC_PAGESIZE);
318 addr = ppriv->uar_table[txq_ctrl->txq.idx];
319 munmap(RTE_PTR_ALIGN_FLOOR(addr, page_size), page_size);
323 * Initialize Tx UAR registers for secondary process.
326 * Pointer to Ethernet device.
328 * Verbs file descriptor to map UAR pages.
331 * 0 on success, a negative errno value otherwise and rte_errno is set.
334 mlx5_tx_uar_init_secondary(struct rte_eth_dev *dev, int fd)
336 struct mlx5_priv *priv = dev->data->dev_private;
337 struct mlx5_txq_data *txq;
338 struct mlx5_txq_ctrl *txq_ctrl;
342 assert(rte_eal_process_type() == RTE_PROC_SECONDARY);
343 for (i = 0; i != priv->txqs_n; ++i) {
344 if (!(*priv->txqs)[i])
346 txq = (*priv->txqs)[i];
347 txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
348 assert(txq->idx == (uint16_t)i);
349 ret = txq_uar_init_secondary(txq_ctrl, fd);
357 if (!(*priv->txqs)[i])
359 txq = (*priv->txqs)[i];
360 txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
361 txq_uar_uninit_secondary(txq_ctrl);
367 * Check if the burst function is using eMPW.
369 * @param tx_pkt_burst
370 * Tx burst function pointer.
373 * 1 if the burst function is using eMPW, 0 otherwise.
376 is_empw_burst_func(eth_tx_burst_t tx_pkt_burst)
378 if (tx_pkt_burst == mlx5_tx_burst_raw_vec ||
379 tx_pkt_burst == mlx5_tx_burst_vec ||
380 tx_pkt_burst == mlx5_tx_burst_empw)
386 * Create the Tx queue Verbs object.
389 * Pointer to Ethernet device.
391 * Queue index in DPDK Tx queue array.
394 * The Verbs object initialised, NULL otherwise and rte_errno is set.
396 struct mlx5_txq_ibv *
397 mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
399 struct mlx5_priv *priv = dev->data->dev_private;
400 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
401 struct mlx5_txq_ctrl *txq_ctrl =
402 container_of(txq_data, struct mlx5_txq_ctrl, txq);
403 struct mlx5_txq_ibv tmpl;
404 struct mlx5_txq_ibv *txq_ibv = NULL;
406 struct ibv_qp_init_attr_ex init;
407 struct ibv_cq_init_attr_ex cq;
408 struct ibv_qp_attr mod;
409 struct ibv_cq_ex cq_attr;
412 struct mlx5dv_qp qp = { .comp_mask = MLX5DV_QP_MASK_UAR_MMAP_OFFSET };
413 struct mlx5dv_cq cq_info;
414 struct mlx5dv_obj obj;
415 const int desc = 1 << txq_data->elts_n;
416 eth_tx_burst_t tx_pkt_burst = mlx5_select_tx_function(dev);
420 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_TX_QUEUE;
421 priv->verbs_alloc_ctx.obj = txq_ctrl;
422 if (mlx5_getenv_int("MLX5_ENABLE_CQE_COMPRESSION")) {
424 "port %u MLX5_ENABLE_CQE_COMPRESSION must never be set",
429 memset(&tmpl, 0, sizeof(struct mlx5_txq_ibv));
430 attr.cq = (struct ibv_cq_init_attr_ex){
433 cqe_n = ((desc / MLX5_TX_COMP_THRESH) - 1) ?
434 ((desc / MLX5_TX_COMP_THRESH) - 1) : 1;
435 if (is_empw_burst_func(tx_pkt_burst))
436 cqe_n += MLX5_TX_COMP_THRESH_INLINE_DIV;
437 tmpl.cq = mlx5_glue->create_cq(priv->sh->ctx, cqe_n, NULL, NULL, 0);
438 if (tmpl.cq == NULL) {
439 DRV_LOG(ERR, "port %u Tx queue %u CQ creation failure",
440 dev->data->port_id, idx);
444 attr.init = (struct ibv_qp_init_attr_ex){
445 /* CQ to be associated with the send queue. */
447 /* CQ to be associated with the receive queue. */
450 /* Max number of outstanding WRs. */
452 ((priv->sh->device_attr.orig_attr.max_qp_wr <
454 priv->sh->device_attr.orig_attr.max_qp_wr :
457 * Max number of scatter/gather elements in a WR,
458 * must be 1 to prevent libmlx5 from trying to affect
459 * too much memory. TX gather is not impacted by the
460 * device_attr.max_sge limit and will still work
465 .qp_type = IBV_QPT_RAW_PACKET,
467 * Do *NOT* enable this, completions events are managed per
472 .comp_mask = IBV_QP_INIT_ATTR_PD,
474 if (txq_data->max_inline)
475 attr.init.cap.max_inline_data = txq_ctrl->max_inline_data;
476 if (txq_data->tso_en) {
477 attr.init.max_tso_header = txq_ctrl->max_tso_header;
478 attr.init.comp_mask |= IBV_QP_INIT_ATTR_MAX_TSO_HEADER;
480 tmpl.qp = mlx5_glue->create_qp_ex(priv->sh->ctx, &attr.init);
481 if (tmpl.qp == NULL) {
482 DRV_LOG(ERR, "port %u Tx queue %u QP creation failure",
483 dev->data->port_id, idx);
487 attr.mod = (struct ibv_qp_attr){
488 /* Move the QP to this state. */
489 .qp_state = IBV_QPS_INIT,
490 /* IB device port number. */
491 .port_num = (uint8_t)priv->ibv_port,
493 ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod,
494 (IBV_QP_STATE | IBV_QP_PORT));
497 "port %u Tx queue %u QP state to IBV_QPS_INIT failed",
498 dev->data->port_id, idx);
502 attr.mod = (struct ibv_qp_attr){
503 .qp_state = IBV_QPS_RTR
505 ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
508 "port %u Tx queue %u QP state to IBV_QPS_RTR failed",
509 dev->data->port_id, idx);
513 attr.mod.qp_state = IBV_QPS_RTS;
514 ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
517 "port %u Tx queue %u QP state to IBV_QPS_RTS failed",
518 dev->data->port_id, idx);
522 txq_ibv = rte_calloc_socket(__func__, 1, sizeof(struct mlx5_txq_ibv), 0,
525 DRV_LOG(ERR, "port %u Tx queue %u cannot allocate memory",
526 dev->data->port_id, idx);
531 obj.cq.out = &cq_info;
534 ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_QP);
539 if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
541 "port %u wrong MLX5_CQE_SIZE environment variable"
542 " value: it should be set to %u",
543 dev->data->port_id, RTE_CACHE_LINE_SIZE);
547 txq_data->cqe_n = log2above(cq_info.cqe_cnt);
548 txq_data->qp_num_8s = tmpl.qp->qp_num << 8;
549 txq_data->wqes = qp.sq.buf;
550 txq_data->wqe_n = log2above(qp.sq.wqe_cnt);
551 txq_data->qp_db = &qp.dbrec[MLX5_SND_DBR];
552 txq_data->cq_db = cq_info.dbrec;
554 (volatile struct mlx5_cqe (*)[])
555 (uintptr_t)cq_info.buf;
560 txq_data->wqe_ci = 0;
561 txq_data->wqe_pi = 0;
562 txq_ibv->qp = tmpl.qp;
563 txq_ibv->cq = tmpl.cq;
564 rte_atomic32_inc(&txq_ibv->refcnt);
565 txq_ctrl->bf_reg = qp.bf.reg;
566 txq_uar_init(txq_ctrl);
567 if (qp.comp_mask & MLX5DV_QP_MASK_UAR_MMAP_OFFSET) {
568 txq_ctrl->uar_mmap_offset = qp.uar_mmap_offset;
569 DRV_LOG(DEBUG, "port %u: uar_mmap_offset 0x%lx",
570 dev->data->port_id, txq_ctrl->uar_mmap_offset);
573 "port %u failed to retrieve UAR info, invalid"
579 LIST_INSERT_HEAD(&priv->txqsibv, txq_ibv, next);
580 txq_ibv->txq_ctrl = txq_ctrl;
581 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
584 ret = rte_errno; /* Save rte_errno before cleanup. */
586 claim_zero(mlx5_glue->destroy_cq(tmpl.cq));
588 claim_zero(mlx5_glue->destroy_qp(tmpl.qp));
591 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
592 rte_errno = ret; /* Restore rte_errno. */
597 * Get an Tx queue Verbs object.
600 * Pointer to Ethernet device.
602 * Queue index in DPDK Tx queue array.
605 * The Verbs object if it exists.
607 struct mlx5_txq_ibv *
608 mlx5_txq_ibv_get(struct rte_eth_dev *dev, uint16_t idx)
610 struct mlx5_priv *priv = dev->data->dev_private;
611 struct mlx5_txq_ctrl *txq_ctrl;
613 if (idx >= priv->txqs_n)
615 if (!(*priv->txqs)[idx])
617 txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
619 rte_atomic32_inc(&txq_ctrl->ibv->refcnt);
620 return txq_ctrl->ibv;
624 * Release an Tx verbs queue object.
627 * Verbs Tx queue object.
630 * 1 while a reference on it exists, 0 when freed.
633 mlx5_txq_ibv_release(struct mlx5_txq_ibv *txq_ibv)
636 if (rte_atomic32_dec_and_test(&txq_ibv->refcnt)) {
637 claim_zero(mlx5_glue->destroy_qp(txq_ibv->qp));
638 claim_zero(mlx5_glue->destroy_cq(txq_ibv->cq));
639 LIST_REMOVE(txq_ibv, next);
647 * Verify the Verbs Tx queue list is empty
650 * Pointer to Ethernet device.
653 * The number of object not released.
656 mlx5_txq_ibv_verify(struct rte_eth_dev *dev)
658 struct mlx5_priv *priv = dev->data->dev_private;
660 struct mlx5_txq_ibv *txq_ibv;
662 LIST_FOREACH(txq_ibv, &priv->txqsibv, next) {
663 DRV_LOG(DEBUG, "port %u Verbs Tx queue %u still referenced",
664 dev->data->port_id, txq_ibv->txq_ctrl->txq.idx);
671 * Calcuate the total number of WQEBB for Tx queue.
673 * Simplified version of calc_sq_size() in rdma-core.
676 * Pointer to Tx queue control structure.
679 * The number of WQEBB.
682 txq_calc_wqebb_cnt(struct mlx5_txq_ctrl *txq_ctrl)
684 unsigned int wqe_size;
685 const unsigned int desc = 1 << txq_ctrl->txq.elts_n;
687 wqe_size = MLX5_WQE_SIZE + txq_ctrl->max_inline_data;
688 return rte_align32pow2(wqe_size * desc) / MLX5_WQE_SIZE;
692 * Set Tx queue parameters from device configuration.
695 * Pointer to Tx queue control structure.
698 txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
700 struct mlx5_priv *priv = txq_ctrl->priv;
701 struct mlx5_dev_config *config = &priv->config;
702 const unsigned int max_tso_inline =
703 ((MLX5_MAX_TSO_HEADER + (RTE_CACHE_LINE_SIZE - 1)) /
704 RTE_CACHE_LINE_SIZE);
705 unsigned int txq_inline;
706 unsigned int txqs_inline;
707 unsigned int inline_max_packet_sz;
708 eth_tx_burst_t tx_pkt_burst =
709 mlx5_select_tx_function(ETH_DEV(priv));
710 int is_empw_func = is_empw_burst_func(tx_pkt_burst);
711 int tso = !!(txq_ctrl->txq.offloads & (DEV_TX_OFFLOAD_TCP_TSO |
712 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
713 DEV_TX_OFFLOAD_GRE_TNL_TSO |
714 DEV_TX_OFFLOAD_IP_TNL_TSO |
715 DEV_TX_OFFLOAD_UDP_TNL_TSO));
717 txq_inline = (config->txq_inline == MLX5_ARG_UNSET) ?
718 0 : config->txq_inline;
719 txqs_inline = (config->txqs_inline == MLX5_ARG_UNSET) ?
720 0 : config->txqs_inline;
721 inline_max_packet_sz =
722 (config->inline_max_packet_sz == MLX5_ARG_UNSET) ?
723 0 : config->inline_max_packet_sz;
725 if (config->txq_inline == MLX5_ARG_UNSET)
726 txq_inline = MLX5_WQE_SIZE_MAX - MLX5_WQE_SIZE;
727 if (config->txqs_inline == MLX5_ARG_UNSET)
728 txqs_inline = MLX5_EMPW_MIN_TXQS;
729 if (config->inline_max_packet_sz == MLX5_ARG_UNSET)
730 inline_max_packet_sz = MLX5_EMPW_MAX_INLINE_LEN;
731 txq_ctrl->txq.mpw_hdr_dseg = config->mpw_hdr_dseg;
732 txq_ctrl->txq.inline_max_packet_sz = inline_max_packet_sz;
734 if (txq_inline && priv->txqs_n >= txqs_inline) {
737 txq_ctrl->txq.max_inline =
738 ((txq_inline + (RTE_CACHE_LINE_SIZE - 1)) /
739 RTE_CACHE_LINE_SIZE);
741 /* To minimize the size of data set, avoid requesting
744 txq_ctrl->max_inline_data =
745 ((RTE_MIN(txq_inline,
746 inline_max_packet_sz) +
747 (RTE_CACHE_LINE_SIZE - 1)) /
748 RTE_CACHE_LINE_SIZE) * RTE_CACHE_LINE_SIZE;
750 txq_ctrl->max_inline_data =
751 txq_ctrl->txq.max_inline * RTE_CACHE_LINE_SIZE;
754 * Check if the inline size is too large in a way which
755 * can make the WQE DS to overflow.
756 * Considering in calculation:
761 ds_cnt = 2 + (txq_ctrl->txq.max_inline / MLX5_WQE_DWORD_SIZE);
762 if (ds_cnt > MLX5_DSEG_MAX) {
763 unsigned int max_inline = (MLX5_DSEG_MAX - 2) *
766 max_inline = max_inline - (max_inline %
767 RTE_CACHE_LINE_SIZE);
769 "port %u txq inline is too large (%d) setting"
770 " it to the maximum possible: %d\n",
771 PORT_ID(priv), txq_inline, max_inline);
772 txq_ctrl->txq.max_inline = max_inline /
777 txq_ctrl->max_tso_header = max_tso_inline * RTE_CACHE_LINE_SIZE;
778 txq_ctrl->txq.max_inline = RTE_MAX(txq_ctrl->txq.max_inline,
780 txq_ctrl->txq.tso_en = 1;
782 txq_ctrl->txq.tunnel_en = config->tunnel_en | config->swp;
783 txq_ctrl->txq.swp_en = ((DEV_TX_OFFLOAD_IP_TNL_TSO |
784 DEV_TX_OFFLOAD_UDP_TNL_TSO |
785 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) &
786 txq_ctrl->txq.offloads) && config->swp;
790 * Create a DPDK Tx queue.
793 * Pointer to Ethernet device.
797 * Number of descriptors to configure in queue.
799 * NUMA socket on which memory must be allocated.
801 * Thresholds parameters.
804 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
806 struct mlx5_txq_ctrl *
807 mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
808 unsigned int socket, const struct rte_eth_txconf *conf)
810 struct mlx5_priv *priv = dev->data->dev_private;
811 struct mlx5_txq_ctrl *tmpl;
813 tmpl = rte_calloc_socket("TXQ", 1,
815 desc * sizeof(struct rte_mbuf *),
821 if (mlx5_mr_btree_init(&tmpl->txq.mr_ctrl.cache_bh,
822 MLX5_MR_BTREE_CACHE_N, socket)) {
823 /* rte_errno is already set. */
826 /* Save pointer of global generation number to check memory event. */
827 tmpl->txq.mr_ctrl.dev_gen_ptr = &priv->sh->mr.dev_gen;
828 assert(desc > MLX5_TX_COMP_THRESH);
829 tmpl->txq.offloads = conf->offloads |
830 dev->data->dev_conf.txmode.offloads;
832 tmpl->socket = socket;
833 tmpl->txq.elts_n = log2above(desc);
834 tmpl->txq.port_id = dev->data->port_id;
836 txq_set_params(tmpl);
837 if (txq_calc_wqebb_cnt(tmpl) >
838 priv->sh->device_attr.orig_attr.max_qp_wr) {
840 "port %u Tx WQEBB count (%d) exceeds the limit (%d),"
841 " try smaller queue size",
842 dev->data->port_id, txq_calc_wqebb_cnt(tmpl),
843 priv->sh->device_attr.orig_attr.max_qp_wr);
848 (struct rte_mbuf *(*)[1 << tmpl->txq.elts_n])(tmpl + 1);
849 rte_atomic32_inc(&tmpl->refcnt);
850 LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
861 * Pointer to Ethernet device.
866 * A pointer to the queue if it exists.
868 struct mlx5_txq_ctrl *
869 mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx)
871 struct mlx5_priv *priv = dev->data->dev_private;
872 struct mlx5_txq_ctrl *ctrl = NULL;
874 if ((*priv->txqs)[idx]) {
875 ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl,
877 mlx5_txq_ibv_get(dev, idx);
878 rte_atomic32_inc(&ctrl->refcnt);
884 * Release a Tx queue.
887 * Pointer to Ethernet device.
892 * 1 while a reference on it exists, 0 when freed.
895 mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx)
897 struct mlx5_priv *priv = dev->data->dev_private;
898 struct mlx5_txq_ctrl *txq;
900 if (!(*priv->txqs)[idx])
902 txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
903 if (txq->ibv && !mlx5_txq_ibv_release(txq->ibv))
905 if (rte_atomic32_dec_and_test(&txq->refcnt)) {
907 mlx5_mr_btree_free(&txq->txq.mr_ctrl.cache_bh);
908 LIST_REMOVE(txq, next);
910 (*priv->txqs)[idx] = NULL;
917 * Verify if the queue can be released.
920 * Pointer to Ethernet device.
925 * 1 if the queue can be released.
928 mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx)
930 struct mlx5_priv *priv = dev->data->dev_private;
931 struct mlx5_txq_ctrl *txq;
933 if (!(*priv->txqs)[idx])
935 txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
936 return (rte_atomic32_read(&txq->refcnt) == 1);
940 * Verify the Tx Queue list is empty
943 * Pointer to Ethernet device.
946 * The number of object not released.
949 mlx5_txq_verify(struct rte_eth_dev *dev)
951 struct mlx5_priv *priv = dev->data->dev_private;
952 struct mlx5_txq_ctrl *txq_ctrl;
955 LIST_FOREACH(txq_ctrl, &priv->txqsctrl, next) {
956 DRV_LOG(DEBUG, "port %u Tx queue %u still referenced",
957 dev->data->port_id, txq_ctrl->txq.idx);