4 * Copyright 2015 6WIND S.A.
5 * Copyright 2015 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
45 #pragma GCC diagnostic ignored "-Wpedantic"
47 #include <infiniband/verbs.h>
49 #pragma GCC diagnostic error "-Wpedantic"
53 #include <rte_malloc.h>
54 #include <rte_ethdev_driver.h>
55 #include <rte_common.h>
57 #include "mlx5_utils.h"
58 #include "mlx5_defs.h"
60 #include "mlx5_rxtx.h"
61 #include "mlx5_autoconf.h"
62 #include "mlx5_glue.h"
65 * Allocate TX queue elements.
68 * Pointer to TX queue structure.
71 txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl)
73 const unsigned int elts_n = 1 << txq_ctrl->txq.elts_n;
76 for (i = 0; (i != elts_n); ++i)
77 (*txq_ctrl->txq.elts)[i] = NULL;
78 DEBUG("%p: allocated and configured %u WRs", (void *)txq_ctrl, elts_n);
79 txq_ctrl->txq.elts_head = 0;
80 txq_ctrl->txq.elts_tail = 0;
81 txq_ctrl->txq.elts_comp = 0;
85 * Free TX queue elements.
88 * Pointer to TX queue structure.
91 txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl)
93 const uint16_t elts_n = 1 << txq_ctrl->txq.elts_n;
94 const uint16_t elts_m = elts_n - 1;
95 uint16_t elts_head = txq_ctrl->txq.elts_head;
96 uint16_t elts_tail = txq_ctrl->txq.elts_tail;
97 struct rte_mbuf *(*elts)[elts_n] = txq_ctrl->txq.elts;
99 DEBUG("%p: freeing WRs", (void *)txq_ctrl);
100 txq_ctrl->txq.elts_head = 0;
101 txq_ctrl->txq.elts_tail = 0;
102 txq_ctrl->txq.elts_comp = 0;
104 while (elts_tail != elts_head) {
105 struct rte_mbuf *elt = (*elts)[elts_tail & elts_m];
108 rte_pktmbuf_free_seg(elt);
111 memset(&(*elts)[elts_tail & elts_m],
113 sizeof((*elts)[elts_tail & elts_m]));
120 * Returns the per-port supported offloads.
123 * Pointer to private structure.
126 * Supported Tx offloads.
129 mlx5_priv_get_tx_port_offloads(struct priv *priv)
131 uint64_t offloads = (DEV_TX_OFFLOAD_MULTI_SEGS |
132 DEV_TX_OFFLOAD_VLAN_INSERT);
133 struct mlx5_dev_config *config = &priv->config;
136 offloads |= (DEV_TX_OFFLOAD_IPV4_CKSUM |
137 DEV_TX_OFFLOAD_UDP_CKSUM |
138 DEV_TX_OFFLOAD_TCP_CKSUM);
140 offloads |= DEV_TX_OFFLOAD_TCP_TSO;
141 if (config->tunnel_en) {
143 offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
145 offloads |= (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
146 DEV_TX_OFFLOAD_GRE_TNL_TSO);
152 * Checks if the per-queue offload configuration is valid.
155 * Pointer to private structure.
157 * Per-queue offloads configuration.
160 * 1 if the configuration is valid, 0 otherwise.
163 priv_is_tx_queue_offloads_allowed(struct priv *priv, uint64_t offloads)
165 uint64_t port_offloads = priv->dev->data->dev_conf.txmode.offloads;
166 uint64_t port_supp_offloads = mlx5_priv_get_tx_port_offloads(priv);
168 /* There are no Tx offloads which are per queue. */
169 if ((offloads & port_supp_offloads) != offloads)
171 if ((port_offloads ^ offloads) & port_supp_offloads)
177 * DPDK callback to configure a TX queue.
180 * Pointer to Ethernet device structure.
184 * Number of descriptors to configure in queue.
186 * NUMA socket on which memory must be allocated.
188 * Thresholds parameters.
191 * 0 on success, negative errno value on failure.
194 mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
195 unsigned int socket, const struct rte_eth_txconf *conf)
197 struct priv *priv = dev->data->dev_private;
198 struct mlx5_txq_data *txq = (*priv->txqs)[idx];
199 struct mlx5_txq_ctrl *txq_ctrl =
200 container_of(txq, struct mlx5_txq_ctrl, txq);
205 * Don't verify port offloads for application which
208 if (!!(conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) &&
209 !priv_is_tx_queue_offloads_allowed(priv, conf->offloads)) {
211 ERROR("%p: Tx queue offloads 0x%" PRIx64 " don't match port "
212 "offloads 0x%" PRIx64 " or supported offloads 0x%" PRIx64,
213 (void *)dev, conf->offloads,
214 dev->data->dev_conf.txmode.offloads,
215 mlx5_priv_get_tx_port_offloads(priv));
218 if (desc <= MLX5_TX_COMP_THRESH) {
219 WARN("%p: number of descriptors requested for TX queue %u"
220 " must be higher than MLX5_TX_COMP_THRESH, using"
222 (void *)dev, idx, MLX5_TX_COMP_THRESH + 1, desc);
223 desc = MLX5_TX_COMP_THRESH + 1;
225 if (!rte_is_power_of_2(desc)) {
226 desc = 1 << log2above(desc);
227 WARN("%p: increased number of descriptors in TX queue %u"
228 " to the next power of two (%d)",
229 (void *)dev, idx, desc);
231 DEBUG("%p: configuring queue %u for %u descriptors",
232 (void *)dev, idx, desc);
233 if (idx >= priv->txqs_n) {
234 ERROR("%p: queue index out of range (%u >= %u)",
235 (void *)dev, idx, priv->txqs_n);
239 if (!mlx5_priv_txq_releasable(priv, idx)) {
241 ERROR("%p: unable to release queue index %u",
245 mlx5_priv_txq_release(priv, idx);
246 txq_ctrl = mlx5_priv_txq_new(priv, idx, desc, socket, conf);
248 ERROR("%p: unable to allocate queue index %u",
253 DEBUG("%p: adding TX queue %p to list",
254 (void *)dev, (void *)txq_ctrl);
255 (*priv->txqs)[idx] = &txq_ctrl->txq;
262 * DPDK callback to release a TX queue.
265 * Generic TX queue pointer.
268 mlx5_tx_queue_release(void *dpdk_txq)
270 struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
271 struct mlx5_txq_ctrl *txq_ctrl;
277 txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
278 priv = txq_ctrl->priv;
280 for (i = 0; (i != priv->txqs_n); ++i)
281 if ((*priv->txqs)[i] == txq) {
282 DEBUG("%p: removing TX queue %p from list",
283 (void *)priv->dev, (void *)txq_ctrl);
284 mlx5_priv_txq_release(priv, i);
292 * Mmap TX UAR(HW doorbell) pages into reserved UAR address space.
293 * Both primary and secondary process do mmap to make UAR address
297 * Pointer to private structure.
299 * Verbs file descriptor to map UAR pages.
302 * 0 on success, errno value on failure.
305 priv_tx_uar_remap(struct priv *priv, int fd)
308 uintptr_t pages[priv->txqs_n];
309 unsigned int pages_n = 0;
314 struct mlx5_txq_data *txq;
315 struct mlx5_txq_ctrl *txq_ctrl;
317 size_t page_size = sysconf(_SC_PAGESIZE);
320 memset(pages, 0, priv->txqs_n * sizeof(uintptr_t));
322 * As rdma-core, UARs are mapped in size of OS page size.
323 * Use aligned address to avoid duplicate mmap.
324 * Ref to libmlx5 function: mlx5_init_context()
326 for (i = 0; i != priv->txqs_n; ++i) {
327 txq = (*priv->txqs)[i];
328 txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
329 /* UAR addr form verbs used to find dup and offset in page. */
330 uar_va = (uintptr_t)txq_ctrl->bf_reg_orig;
331 off = uar_va & (page_size - 1); /* offset in page. */
332 uar_va = RTE_ALIGN_FLOOR(uar_va, page_size); /* page addr. */
334 for (j = 0; j != pages_n; ++j) {
335 if (pages[j] == uar_va) {
340 /* new address in reserved UAR address space. */
341 addr = RTE_PTR_ADD(priv->uar_base,
342 uar_va & (MLX5_UAR_SIZE - 1));
343 if (!already_mapped) {
344 pages[pages_n++] = uar_va;
345 /* fixed mmap to specified address in reserved
348 ret = mmap(addr, page_size,
349 PROT_WRITE, MAP_FIXED | MAP_SHARED, fd,
350 txq_ctrl->uar_mmap_offset);
352 /* fixed mmap have to return same address */
353 ERROR("call to mmap failed on UAR for txq %d\n",
359 if (rte_eal_process_type() == RTE_PROC_PRIMARY) /* save once */
360 txq_ctrl->txq.bf_reg = RTE_PTR_ADD((void *)addr, off);
362 assert(txq_ctrl->txq.bf_reg ==
363 RTE_PTR_ADD((void *)addr, off));
369 * Check if the burst function is using eMPW.
371 * @param tx_pkt_burst
372 * Tx burst function pointer.
375 * 1 if the burst function is using eMPW, 0 otherwise.
378 is_empw_burst_func(eth_tx_burst_t tx_pkt_burst)
380 if (tx_pkt_burst == mlx5_tx_burst_raw_vec ||
381 tx_pkt_burst == mlx5_tx_burst_vec ||
382 tx_pkt_burst == mlx5_tx_burst_empw)
388 * Create the Tx queue Verbs object.
391 * Pointer to private structure.
393 * Queue index in DPDK Rx queue array
396 * The Verbs object initialised if it can be created.
399 mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx)
401 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
402 struct mlx5_txq_ctrl *txq_ctrl =
403 container_of(txq_data, struct mlx5_txq_ctrl, txq);
404 struct mlx5_txq_ibv tmpl;
405 struct mlx5_txq_ibv *txq_ibv;
407 struct ibv_qp_init_attr_ex init;
408 struct ibv_cq_init_attr_ex cq;
409 struct ibv_qp_attr mod;
410 struct ibv_cq_ex cq_attr;
413 struct mlx5dv_qp qp = { .comp_mask = MLX5DV_QP_MASK_UAR_MMAP_OFFSET };
414 struct mlx5dv_cq cq_info;
415 struct mlx5dv_obj obj;
416 const int desc = 1 << txq_data->elts_n;
417 eth_tx_burst_t tx_pkt_burst = priv_select_tx_function(priv, priv->dev);
421 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_TX_QUEUE;
422 priv->verbs_alloc_ctx.obj = txq_ctrl;
423 if (mlx5_getenv_int("MLX5_ENABLE_CQE_COMPRESSION")) {
424 ERROR("MLX5_ENABLE_CQE_COMPRESSION must never be set");
427 memset(&tmpl, 0, sizeof(struct mlx5_txq_ibv));
428 /* MRs will be registered in mp2mr[] later. */
429 attr.cq = (struct ibv_cq_init_attr_ex){
432 cqe_n = ((desc / MLX5_TX_COMP_THRESH) - 1) ?
433 ((desc / MLX5_TX_COMP_THRESH) - 1) : 1;
434 if (is_empw_burst_func(tx_pkt_burst))
435 cqe_n += MLX5_TX_COMP_THRESH_INLINE_DIV;
436 tmpl.cq = mlx5_glue->create_cq(priv->ctx, cqe_n, NULL, NULL, 0);
437 if (tmpl.cq == NULL) {
438 ERROR("%p: CQ creation failure", (void *)txq_ctrl);
441 attr.init = (struct ibv_qp_init_attr_ex){
442 /* CQ to be associated with the send queue. */
444 /* CQ to be associated with the receive queue. */
447 /* Max number of outstanding WRs. */
449 ((priv->device_attr.orig_attr.max_qp_wr <
451 priv->device_attr.orig_attr.max_qp_wr :
454 * Max number of scatter/gather elements in a WR,
455 * must be 1 to prevent libmlx5 from trying to affect
456 * too much memory. TX gather is not impacted by the
457 * priv->device_attr.max_sge limit and will still work
462 .qp_type = IBV_QPT_RAW_PACKET,
464 * Do *NOT* enable this, completions events are managed per
469 .comp_mask = IBV_QP_INIT_ATTR_PD,
471 if (txq_data->max_inline)
472 attr.init.cap.max_inline_data = txq_ctrl->max_inline_data;
473 if (txq_data->tso_en) {
474 attr.init.max_tso_header = txq_ctrl->max_tso_header;
475 attr.init.comp_mask |= IBV_QP_INIT_ATTR_MAX_TSO_HEADER;
477 tmpl.qp = mlx5_glue->create_qp_ex(priv->ctx, &attr.init);
478 if (tmpl.qp == NULL) {
479 ERROR("%p: QP creation failure", (void *)txq_ctrl);
482 attr.mod = (struct ibv_qp_attr){
483 /* Move the QP to this state. */
484 .qp_state = IBV_QPS_INIT,
485 /* Primary port number. */
486 .port_num = priv->port
488 ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod,
489 (IBV_QP_STATE | IBV_QP_PORT));
491 ERROR("%p: QP state to IBV_QPS_INIT failed", (void *)txq_ctrl);
494 attr.mod = (struct ibv_qp_attr){
495 .qp_state = IBV_QPS_RTR
497 ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
499 ERROR("%p: QP state to IBV_QPS_RTR failed", (void *)txq_ctrl);
502 attr.mod.qp_state = IBV_QPS_RTS;
503 ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
505 ERROR("%p: QP state to IBV_QPS_RTS failed", (void *)txq_ctrl);
508 txq_ibv = rte_calloc_socket(__func__, 1, sizeof(struct mlx5_txq_ibv), 0,
511 ERROR("%p: cannot allocate memory", (void *)txq_ctrl);
515 obj.cq.out = &cq_info;
518 ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_QP);
521 if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
522 ERROR("Wrong MLX5_CQE_SIZE environment variable value: "
523 "it should be set to %u", RTE_CACHE_LINE_SIZE);
526 txq_data->cqe_n = log2above(cq_info.cqe_cnt);
527 txq_data->qp_num_8s = tmpl.qp->qp_num << 8;
528 txq_data->wqes = qp.sq.buf;
529 txq_data->wqe_n = log2above(qp.sq.wqe_cnt);
530 txq_data->qp_db = &qp.dbrec[MLX5_SND_DBR];
531 txq_ctrl->bf_reg_orig = qp.bf.reg;
532 txq_data->cq_db = cq_info.dbrec;
534 (volatile struct mlx5_cqe (*)[])
535 (uintptr_t)cq_info.buf;
540 txq_data->wqe_ci = 0;
541 txq_data->wqe_pi = 0;
542 txq_ibv->qp = tmpl.qp;
543 txq_ibv->cq = tmpl.cq;
544 rte_atomic32_inc(&txq_ibv->refcnt);
545 if (qp.comp_mask & MLX5DV_QP_MASK_UAR_MMAP_OFFSET) {
546 txq_ctrl->uar_mmap_offset = qp.uar_mmap_offset;
548 ERROR("Failed to retrieve UAR info, invalid libmlx5.so version");
551 DEBUG("%p: Verbs Tx queue %p: refcnt %d", (void *)priv,
552 (void *)txq_ibv, rte_atomic32_read(&txq_ibv->refcnt));
553 LIST_INSERT_HEAD(&priv->txqsibv, txq_ibv, next);
554 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
558 claim_zero(mlx5_glue->destroy_cq(tmpl.cq));
560 claim_zero(mlx5_glue->destroy_qp(tmpl.qp));
561 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
566 * Get an Tx queue Verbs object.
569 * Pointer to private structure.
571 * Queue index in DPDK Rx queue array
574 * The Verbs object if it exists.
577 mlx5_priv_txq_ibv_get(struct priv *priv, uint16_t idx)
579 struct mlx5_txq_ctrl *txq_ctrl;
581 if (idx >= priv->txqs_n)
583 if (!(*priv->txqs)[idx])
585 txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
587 rte_atomic32_inc(&txq_ctrl->ibv->refcnt);
588 DEBUG("%p: Verbs Tx queue %p: refcnt %d", (void *)priv,
589 (void *)txq_ctrl->ibv,
590 rte_atomic32_read(&txq_ctrl->ibv->refcnt));
592 return txq_ctrl->ibv;
596 * Release an Tx verbs queue object.
599 * Pointer to private structure.
601 * Verbs Tx queue object.
604 * 0 on success, errno on failure.
607 mlx5_priv_txq_ibv_release(struct priv *priv, struct mlx5_txq_ibv *txq_ibv)
611 DEBUG("%p: Verbs Tx queue %p: refcnt %d", (void *)priv,
612 (void *)txq_ibv, rte_atomic32_read(&txq_ibv->refcnt));
613 if (rte_atomic32_dec_and_test(&txq_ibv->refcnt)) {
614 claim_zero(mlx5_glue->destroy_qp(txq_ibv->qp));
615 claim_zero(mlx5_glue->destroy_cq(txq_ibv->cq));
616 LIST_REMOVE(txq_ibv, next);
624 * Return true if a single reference exists on the object.
627 * Pointer to private structure.
629 * Verbs Tx queue object.
632 mlx5_priv_txq_ibv_releasable(struct priv *priv, struct mlx5_txq_ibv *txq_ibv)
636 return (rte_atomic32_read(&txq_ibv->refcnt) == 1);
640 * Verify the Verbs Tx queue list is empty
643 * Pointer to private structure.
645 * @return the number of object not released.
648 mlx5_priv_txq_ibv_verify(struct priv *priv)
651 struct mlx5_txq_ibv *txq_ibv;
653 LIST_FOREACH(txq_ibv, &priv->txqsibv, next) {
654 DEBUG("%p: Verbs Tx queue %p still referenced", (void *)priv,
662 * Set Tx queue parameters from device configuration.
665 * Pointer to Tx queue control structure.
668 txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
670 struct priv *priv = txq_ctrl->priv;
671 struct mlx5_dev_config *config = &priv->config;
672 const unsigned int max_tso_inline =
673 ((MLX5_MAX_TSO_HEADER + (RTE_CACHE_LINE_SIZE - 1)) /
674 RTE_CACHE_LINE_SIZE);
675 unsigned int txq_inline;
676 unsigned int txqs_inline;
677 unsigned int inline_max_packet_sz;
678 eth_tx_burst_t tx_pkt_burst = priv_select_tx_function(priv, priv->dev);
679 int is_empw_func = is_empw_burst_func(tx_pkt_burst);
680 int tso = !!(txq_ctrl->txq.offloads & DEV_TX_OFFLOAD_TCP_TSO);
682 txq_inline = (config->txq_inline == MLX5_ARG_UNSET) ?
683 0 : config->txq_inline;
684 txqs_inline = (config->txqs_inline == MLX5_ARG_UNSET) ?
685 0 : config->txqs_inline;
686 inline_max_packet_sz =
687 (config->inline_max_packet_sz == MLX5_ARG_UNSET) ?
688 0 : config->inline_max_packet_sz;
690 if (config->txq_inline == MLX5_ARG_UNSET)
691 txq_inline = MLX5_WQE_SIZE_MAX - MLX5_WQE_SIZE;
692 if (config->txqs_inline == MLX5_ARG_UNSET)
693 txqs_inline = MLX5_EMPW_MIN_TXQS;
694 if (config->inline_max_packet_sz == MLX5_ARG_UNSET)
695 inline_max_packet_sz = MLX5_EMPW_MAX_INLINE_LEN;
696 txq_ctrl->txq.mpw_hdr_dseg = config->mpw_hdr_dseg;
697 txq_ctrl->txq.inline_max_packet_sz = inline_max_packet_sz;
699 if (txq_inline && priv->txqs_n >= txqs_inline) {
702 txq_ctrl->txq.max_inline =
703 ((txq_inline + (RTE_CACHE_LINE_SIZE - 1)) /
704 RTE_CACHE_LINE_SIZE);
706 /* To minimize the size of data set, avoid requesting
709 txq_ctrl->max_inline_data =
710 ((RTE_MIN(txq_inline,
711 inline_max_packet_sz) +
712 (RTE_CACHE_LINE_SIZE - 1)) /
713 RTE_CACHE_LINE_SIZE) * RTE_CACHE_LINE_SIZE;
715 int inline_diff = txq_ctrl->txq.max_inline -
719 * Adjust inline value as Verbs aggregates
720 * tso_inline and txq_inline fields.
722 txq_ctrl->max_inline_data = inline_diff > 0 ?
724 RTE_CACHE_LINE_SIZE :
727 txq_ctrl->max_inline_data =
728 txq_ctrl->txq.max_inline * RTE_CACHE_LINE_SIZE;
731 * Check if the inline size is too large in a way which
732 * can make the WQE DS to overflow.
733 * Considering in calculation:
738 ds_cnt = 2 + (txq_ctrl->txq.max_inline / MLX5_WQE_DWORD_SIZE);
739 if (ds_cnt > MLX5_DSEG_MAX) {
740 unsigned int max_inline = (MLX5_DSEG_MAX - 2) *
743 max_inline = max_inline - (max_inline %
744 RTE_CACHE_LINE_SIZE);
745 WARN("txq inline is too large (%d) setting it to "
746 "the maximum possible: %d\n",
747 txq_inline, max_inline);
748 txq_ctrl->txq.max_inline = max_inline /
753 txq_ctrl->max_tso_header = max_tso_inline * RTE_CACHE_LINE_SIZE;
754 txq_ctrl->txq.max_inline = RTE_MAX(txq_ctrl->txq.max_inline,
756 txq_ctrl->txq.tso_en = 1;
758 txq_ctrl->txq.tunnel_en = config->tunnel_en;
762 * Create a DPDK Tx queue.
765 * Pointer to private structure.
769 * Number of descriptors to configure in queue.
771 * NUMA socket on which memory must be allocated.
773 * Thresholds parameters.
776 * A DPDK queue object on success.
778 struct mlx5_txq_ctrl*
779 mlx5_priv_txq_new(struct priv *priv, uint16_t idx, uint16_t desc,
781 const struct rte_eth_txconf *conf)
783 struct mlx5_txq_ctrl *tmpl;
785 tmpl = rte_calloc_socket("TXQ", 1,
787 desc * sizeof(struct rte_mbuf *),
791 assert(desc > MLX5_TX_COMP_THRESH);
792 tmpl->txq.offloads = conf->offloads;
794 tmpl->socket = socket;
795 tmpl->txq.elts_n = log2above(desc);
796 txq_set_params(tmpl);
797 /* MRs will be registered in mp2mr[] later. */
798 DEBUG("priv->device_attr.max_qp_wr is %d",
799 priv->device_attr.orig_attr.max_qp_wr);
800 DEBUG("priv->device_attr.max_sge is %d",
801 priv->device_attr.orig_attr.max_sge);
803 (struct rte_mbuf *(*)[1 << tmpl->txq.elts_n])(tmpl + 1);
804 tmpl->txq.stats.idx = idx;
805 rte_atomic32_inc(&tmpl->refcnt);
806 DEBUG("%p: Tx queue %p: refcnt %d", (void *)priv,
807 (void *)tmpl, rte_atomic32_read(&tmpl->refcnt));
808 LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
816 * Pointer to private structure.
821 * A pointer to the queue if it exists.
823 struct mlx5_txq_ctrl*
824 mlx5_priv_txq_get(struct priv *priv, uint16_t idx)
826 struct mlx5_txq_ctrl *ctrl = NULL;
828 if ((*priv->txqs)[idx]) {
829 ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl,
833 mlx5_priv_txq_ibv_get(priv, idx);
834 for (i = 0; i != MLX5_PMD_TX_MP_CACHE; ++i) {
835 struct mlx5_mr *mr = NULL;
838 if (ctrl->txq.mp2mr[i]) {
839 mr = priv_mr_get(priv, ctrl->txq.mp2mr[i]->mp);
843 rte_atomic32_inc(&ctrl->refcnt);
844 DEBUG("%p: Tx queue %p: refcnt %d", (void *)priv,
845 (void *)ctrl, rte_atomic32_read(&ctrl->refcnt));
851 * Release a Tx queue.
854 * Pointer to private structure.
859 * 0 on success, errno on failure.
862 mlx5_priv_txq_release(struct priv *priv, uint16_t idx)
865 struct mlx5_txq_ctrl *txq;
866 size_t page_size = sysconf(_SC_PAGESIZE);
868 if (!(*priv->txqs)[idx])
870 txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
871 DEBUG("%p: Tx queue %p: refcnt %d", (void *)priv,
872 (void *)txq, rte_atomic32_read(&txq->refcnt));
876 ret = mlx5_priv_txq_ibv_release(priv, txq->ibv);
880 for (i = 0; i != MLX5_PMD_TX_MP_CACHE; ++i) {
881 if (txq->txq.mp2mr[i]) {
882 priv_mr_release(priv, txq->txq.mp2mr[i]);
883 txq->txq.mp2mr[i] = NULL;
887 munmap((void *)RTE_ALIGN_FLOOR((uintptr_t)txq->txq.bf_reg,
888 page_size), page_size);
889 if (rte_atomic32_dec_and_test(&txq->refcnt)) {
891 LIST_REMOVE(txq, next);
893 (*priv->txqs)[idx] = NULL;
900 * Verify if the queue can be released.
903 * Pointer to private structure.
908 * 1 if the queue can be released.
911 mlx5_priv_txq_releasable(struct priv *priv, uint16_t idx)
913 struct mlx5_txq_ctrl *txq;
915 if (!(*priv->txqs)[idx])
917 txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
918 return (rte_atomic32_read(&txq->refcnt) == 1);
922 * Verify the Tx Queue list is empty
925 * Pointer to private structure.
927 * @return the number of object not released.
930 mlx5_priv_txq_verify(struct priv *priv)
932 struct mlx5_txq_ctrl *txq;
935 LIST_FOREACH(txq, &priv->txqsctrl, next) {
936 DEBUG("%p: Tx Queue %p still referenced", (void *)priv,