1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox.
15 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
17 #pragma GCC diagnostic ignored "-Wpedantic"
19 #include <infiniband/verbs.h>
21 #pragma GCC diagnostic error "-Wpedantic"
25 #include <rte_malloc.h>
26 #include <rte_ethdev_driver.h>
27 #include <rte_common.h>
29 #include "mlx5_utils.h"
30 #include "mlx5_defs.h"
32 #include "mlx5_rxtx.h"
33 #include "mlx5_autoconf.h"
34 #include "mlx5_glue.h"
37 * Allocate TX queue elements.
40 * Pointer to TX queue structure.
43 txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl)
45 const unsigned int elts_n = 1 << txq_ctrl->txq.elts_n;
48 for (i = 0; (i != elts_n); ++i)
49 (*txq_ctrl->txq.elts)[i] = NULL;
50 DEBUG("%p: allocated and configured %u WRs", (void *)txq_ctrl, elts_n);
51 txq_ctrl->txq.elts_head = 0;
52 txq_ctrl->txq.elts_tail = 0;
53 txq_ctrl->txq.elts_comp = 0;
57 * Free TX queue elements.
60 * Pointer to TX queue structure.
63 txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl)
65 const uint16_t elts_n = 1 << txq_ctrl->txq.elts_n;
66 const uint16_t elts_m = elts_n - 1;
67 uint16_t elts_head = txq_ctrl->txq.elts_head;
68 uint16_t elts_tail = txq_ctrl->txq.elts_tail;
69 struct rte_mbuf *(*elts)[elts_n] = txq_ctrl->txq.elts;
71 DEBUG("%p: freeing WRs", (void *)txq_ctrl);
72 txq_ctrl->txq.elts_head = 0;
73 txq_ctrl->txq.elts_tail = 0;
74 txq_ctrl->txq.elts_comp = 0;
76 while (elts_tail != elts_head) {
77 struct rte_mbuf *elt = (*elts)[elts_tail & elts_m];
80 rte_pktmbuf_free_seg(elt);
83 memset(&(*elts)[elts_tail & elts_m],
85 sizeof((*elts)[elts_tail & elts_m]));
92 * Returns the per-port supported offloads.
95 * Pointer to Ethernet device.
98 * Supported Tx offloads.
101 mlx5_get_tx_port_offloads(struct rte_eth_dev *dev)
103 struct priv *priv = dev->data->dev_private;
104 uint64_t offloads = (DEV_TX_OFFLOAD_MULTI_SEGS |
105 DEV_TX_OFFLOAD_VLAN_INSERT);
106 struct mlx5_dev_config *config = &priv->config;
109 offloads |= (DEV_TX_OFFLOAD_IPV4_CKSUM |
110 DEV_TX_OFFLOAD_UDP_CKSUM |
111 DEV_TX_OFFLOAD_TCP_CKSUM);
113 offloads |= DEV_TX_OFFLOAD_TCP_TSO;
114 if (config->tunnel_en) {
116 offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
118 offloads |= (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
119 DEV_TX_OFFLOAD_GRE_TNL_TSO);
125 * Checks if the per-queue offload configuration is valid.
128 * Pointer to Ethernet device.
130 * Per-queue offloads configuration.
133 * 1 if the configuration is valid, 0 otherwise.
136 mlx5_is_tx_queue_offloads_allowed(struct rte_eth_dev *dev, uint64_t offloads)
138 uint64_t port_offloads = dev->data->dev_conf.txmode.offloads;
139 uint64_t port_supp_offloads = mlx5_get_tx_port_offloads(dev);
141 /* There are no Tx offloads which are per queue. */
142 if ((offloads & port_supp_offloads) != offloads)
144 if ((port_offloads ^ offloads) & port_supp_offloads)
150 * DPDK callback to configure a TX queue.
153 * Pointer to Ethernet device structure.
157 * Number of descriptors to configure in queue.
159 * NUMA socket on which memory must be allocated.
161 * Thresholds parameters.
164 * 0 on success, a negative errno value otherwise and rte_errno is set.
167 mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
168 unsigned int socket, const struct rte_eth_txconf *conf)
170 struct priv *priv = dev->data->dev_private;
171 struct mlx5_txq_data *txq = (*priv->txqs)[idx];
172 struct mlx5_txq_ctrl *txq_ctrl =
173 container_of(txq, struct mlx5_txq_ctrl, txq);
176 * Don't verify port offloads for application which
179 if (!!(conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) &&
180 !mlx5_is_tx_queue_offloads_allowed(dev, conf->offloads)) {
182 ERROR("%p: Tx queue offloads 0x%" PRIx64 " don't match port "
183 "offloads 0x%" PRIx64 " or supported offloads 0x%" PRIx64,
184 (void *)dev, conf->offloads,
185 dev->data->dev_conf.txmode.offloads,
186 mlx5_get_tx_port_offloads(dev));
189 if (desc <= MLX5_TX_COMP_THRESH) {
190 WARN("%p: number of descriptors requested for TX queue %u"
191 " must be higher than MLX5_TX_COMP_THRESH, using"
193 (void *)dev, idx, MLX5_TX_COMP_THRESH + 1, desc);
194 desc = MLX5_TX_COMP_THRESH + 1;
196 if (!rte_is_power_of_2(desc)) {
197 desc = 1 << log2above(desc);
198 WARN("%p: increased number of descriptors in TX queue %u"
199 " to the next power of two (%d)",
200 (void *)dev, idx, desc);
202 DEBUG("%p: configuring queue %u for %u descriptors",
203 (void *)dev, idx, desc);
204 if (idx >= priv->txqs_n) {
205 ERROR("%p: queue index out of range (%u >= %u)",
206 (void *)dev, idx, priv->txqs_n);
207 rte_errno = EOVERFLOW;
210 if (!mlx5_txq_releasable(dev, idx)) {
212 ERROR("%p: unable to release queue index %u",
216 mlx5_txq_release(dev, idx);
217 txq_ctrl = mlx5_txq_new(dev, idx, desc, socket, conf);
219 ERROR("%p: unable to allocate queue index %u",
223 DEBUG("%p: adding TX queue %p to list",
224 (void *)dev, (void *)txq_ctrl);
225 (*priv->txqs)[idx] = &txq_ctrl->txq;
230 * DPDK callback to release a TX queue.
233 * Generic TX queue pointer.
236 mlx5_tx_queue_release(void *dpdk_txq)
238 struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
239 struct mlx5_txq_ctrl *txq_ctrl;
245 txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
246 priv = txq_ctrl->priv;
247 for (i = 0; (i != priv->txqs_n); ++i)
248 if ((*priv->txqs)[i] == txq) {
249 mlx5_txq_release(priv->dev, i);
250 DEBUG("%p: removing TX queue %p from list",
251 (void *)priv->dev, (void *)txq_ctrl);
258 * Mmap TX UAR(HW doorbell) pages into reserved UAR address space.
259 * Both primary and secondary process do mmap to make UAR address
263 * Pointer to Ethernet device.
265 * Verbs file descriptor to map UAR pages.
268 * 0 on success, a negative errno value otherwise and rte_errno is set.
271 mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd)
273 struct priv *priv = dev->data->dev_private;
275 uintptr_t pages[priv->txqs_n];
276 unsigned int pages_n = 0;
281 struct mlx5_txq_data *txq;
282 struct mlx5_txq_ctrl *txq_ctrl;
284 size_t page_size = sysconf(_SC_PAGESIZE);
286 memset(pages, 0, priv->txqs_n * sizeof(uintptr_t));
288 * As rdma-core, UARs are mapped in size of OS page size.
289 * Use aligned address to avoid duplicate mmap.
290 * Ref to libmlx5 function: mlx5_init_context()
292 for (i = 0; i != priv->txqs_n; ++i) {
293 if (!(*priv->txqs)[i])
295 txq = (*priv->txqs)[i];
296 txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
297 /* UAR addr form verbs used to find dup and offset in page. */
298 uar_va = (uintptr_t)txq_ctrl->bf_reg_orig;
299 off = uar_va & (page_size - 1); /* offset in page. */
300 uar_va = RTE_ALIGN_FLOOR(uar_va, page_size); /* page addr. */
302 for (j = 0; j != pages_n; ++j) {
303 if (pages[j] == uar_va) {
308 /* new address in reserved UAR address space. */
309 addr = RTE_PTR_ADD(priv->uar_base,
310 uar_va & (MLX5_UAR_SIZE - 1));
311 if (!already_mapped) {
312 pages[pages_n++] = uar_va;
313 /* fixed mmap to specified address in reserved
316 ret = mmap(addr, page_size,
317 PROT_WRITE, MAP_FIXED | MAP_SHARED, fd,
318 txq_ctrl->uar_mmap_offset);
320 /* fixed mmap have to return same address */
321 ERROR("call to mmap failed on UAR for txq %d\n",
327 if (rte_eal_process_type() == RTE_PROC_PRIMARY) /* save once */
328 txq_ctrl->txq.bf_reg = RTE_PTR_ADD((void *)addr, off);
330 assert(txq_ctrl->txq.bf_reg ==
331 RTE_PTR_ADD((void *)addr, off));
337 * Check if the burst function is using eMPW.
339 * @param tx_pkt_burst
340 * Tx burst function pointer.
343 * 1 if the burst function is using eMPW, 0 otherwise.
346 is_empw_burst_func(eth_tx_burst_t tx_pkt_burst)
348 if (tx_pkt_burst == mlx5_tx_burst_raw_vec ||
349 tx_pkt_burst == mlx5_tx_burst_vec ||
350 tx_pkt_burst == mlx5_tx_burst_empw)
356 * Create the Tx queue Verbs object.
359 * Pointer to Ethernet device.
361 * Queue index in DPDK Rx queue array
364 * The Verbs object initialised, NULL otherwise and rte_errno is set.
366 struct mlx5_txq_ibv *
367 mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
369 struct priv *priv = dev->data->dev_private;
370 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
371 struct mlx5_txq_ctrl *txq_ctrl =
372 container_of(txq_data, struct mlx5_txq_ctrl, txq);
373 struct mlx5_txq_ibv tmpl;
374 struct mlx5_txq_ibv *txq_ibv;
376 struct ibv_qp_init_attr_ex init;
377 struct ibv_cq_init_attr_ex cq;
378 struct ibv_qp_attr mod;
379 struct ibv_cq_ex cq_attr;
382 struct mlx5dv_qp qp = { .comp_mask = MLX5DV_QP_MASK_UAR_MMAP_OFFSET };
383 struct mlx5dv_cq cq_info;
384 struct mlx5dv_obj obj;
385 const int desc = 1 << txq_data->elts_n;
386 eth_tx_burst_t tx_pkt_burst = mlx5_select_tx_function(dev);
390 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_TX_QUEUE;
391 priv->verbs_alloc_ctx.obj = txq_ctrl;
392 if (mlx5_getenv_int("MLX5_ENABLE_CQE_COMPRESSION")) {
393 ERROR("MLX5_ENABLE_CQE_COMPRESSION must never be set");
397 memset(&tmpl, 0, sizeof(struct mlx5_txq_ibv));
398 /* MRs will be registered in mp2mr[] later. */
399 attr.cq = (struct ibv_cq_init_attr_ex){
402 cqe_n = ((desc / MLX5_TX_COMP_THRESH) - 1) ?
403 ((desc / MLX5_TX_COMP_THRESH) - 1) : 1;
404 if (is_empw_burst_func(tx_pkt_burst))
405 cqe_n += MLX5_TX_COMP_THRESH_INLINE_DIV;
406 tmpl.cq = mlx5_glue->create_cq(priv->ctx, cqe_n, NULL, NULL, 0);
407 if (tmpl.cq == NULL) {
408 ERROR("%p: CQ creation failure", (void *)txq_ctrl);
412 attr.init = (struct ibv_qp_init_attr_ex){
413 /* CQ to be associated with the send queue. */
415 /* CQ to be associated with the receive queue. */
418 /* Max number of outstanding WRs. */
420 ((priv->device_attr.orig_attr.max_qp_wr <
422 priv->device_attr.orig_attr.max_qp_wr :
425 * Max number of scatter/gather elements in a WR,
426 * must be 1 to prevent libmlx5 from trying to affect
427 * too much memory. TX gather is not impacted by the
428 * priv->device_attr.max_sge limit and will still work
433 .qp_type = IBV_QPT_RAW_PACKET,
435 * Do *NOT* enable this, completions events are managed per
440 .comp_mask = IBV_QP_INIT_ATTR_PD,
442 if (txq_data->max_inline)
443 attr.init.cap.max_inline_data = txq_ctrl->max_inline_data;
444 if (txq_data->tso_en) {
445 attr.init.max_tso_header = txq_ctrl->max_tso_header;
446 attr.init.comp_mask |= IBV_QP_INIT_ATTR_MAX_TSO_HEADER;
448 tmpl.qp = mlx5_glue->create_qp_ex(priv->ctx, &attr.init);
449 if (tmpl.qp == NULL) {
450 ERROR("%p: QP creation failure", (void *)txq_ctrl);
454 attr.mod = (struct ibv_qp_attr){
455 /* Move the QP to this state. */
456 .qp_state = IBV_QPS_INIT,
457 /* Primary port number. */
458 .port_num = priv->port
460 ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod,
461 (IBV_QP_STATE | IBV_QP_PORT));
463 ERROR("%p: QP state to IBV_QPS_INIT failed", (void *)txq_ctrl);
467 attr.mod = (struct ibv_qp_attr){
468 .qp_state = IBV_QPS_RTR
470 ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
472 ERROR("%p: QP state to IBV_QPS_RTR failed", (void *)txq_ctrl);
476 attr.mod.qp_state = IBV_QPS_RTS;
477 ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
479 ERROR("%p: QP state to IBV_QPS_RTS failed", (void *)txq_ctrl);
483 txq_ibv = rte_calloc_socket(__func__, 1, sizeof(struct mlx5_txq_ibv), 0,
486 ERROR("%p: cannot allocate memory", (void *)txq_ctrl);
491 obj.cq.out = &cq_info;
494 ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_QP);
499 if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
500 ERROR("Wrong MLX5_CQE_SIZE environment variable value: "
501 "it should be set to %u", RTE_CACHE_LINE_SIZE);
505 txq_data->cqe_n = log2above(cq_info.cqe_cnt);
506 txq_data->qp_num_8s = tmpl.qp->qp_num << 8;
507 txq_data->wqes = qp.sq.buf;
508 txq_data->wqe_n = log2above(qp.sq.wqe_cnt);
509 txq_data->qp_db = &qp.dbrec[MLX5_SND_DBR];
510 txq_ctrl->bf_reg_orig = qp.bf.reg;
511 txq_data->cq_db = cq_info.dbrec;
513 (volatile struct mlx5_cqe (*)[])
514 (uintptr_t)cq_info.buf;
519 txq_data->wqe_ci = 0;
520 txq_data->wqe_pi = 0;
521 txq_ibv->qp = tmpl.qp;
522 txq_ibv->cq = tmpl.cq;
523 rte_atomic32_inc(&txq_ibv->refcnt);
524 if (qp.comp_mask & MLX5DV_QP_MASK_UAR_MMAP_OFFSET) {
525 txq_ctrl->uar_mmap_offset = qp.uar_mmap_offset;
527 ERROR("Failed to retrieve UAR info, invalid libmlx5.so version");
531 DEBUG("%p: Verbs Tx queue %p: refcnt %d", (void *)dev,
532 (void *)txq_ibv, rte_atomic32_read(&txq_ibv->refcnt));
533 LIST_INSERT_HEAD(&priv->txqsibv, txq_ibv, next);
534 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
537 ret = rte_errno; /* Save rte_errno before cleanup. */
539 claim_zero(mlx5_glue->destroy_cq(tmpl.cq));
541 claim_zero(mlx5_glue->destroy_qp(tmpl.qp));
542 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
543 rte_errno = ret; /* Restore rte_errno. */
548 * Get an Tx queue Verbs object.
551 * Pointer to Ethernet device.
553 * Queue index in DPDK Rx queue array
556 * The Verbs object if it exists.
558 struct mlx5_txq_ibv *
559 mlx5_txq_ibv_get(struct rte_eth_dev *dev, uint16_t idx)
561 struct priv *priv = dev->data->dev_private;
562 struct mlx5_txq_ctrl *txq_ctrl;
564 if (idx >= priv->txqs_n)
566 if (!(*priv->txqs)[idx])
568 txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
570 rte_atomic32_inc(&txq_ctrl->ibv->refcnt);
571 DEBUG("%p: Verbs Tx queue %p: refcnt %d", (void *)dev,
572 (void *)txq_ctrl->ibv,
573 rte_atomic32_read(&txq_ctrl->ibv->refcnt));
575 return txq_ctrl->ibv;
579 * Release an Tx verbs queue object.
582 * Verbs Tx queue object.
585 * 1 while a reference on it exists, 0 when freed.
588 mlx5_txq_ibv_release(struct mlx5_txq_ibv *txq_ibv)
591 DEBUG("Verbs Tx queue %p: refcnt %d",
592 (void *)txq_ibv, rte_atomic32_read(&txq_ibv->refcnt));
593 if (rte_atomic32_dec_and_test(&txq_ibv->refcnt)) {
594 claim_zero(mlx5_glue->destroy_qp(txq_ibv->qp));
595 claim_zero(mlx5_glue->destroy_cq(txq_ibv->cq));
596 LIST_REMOVE(txq_ibv, next);
604 * Return true if a single reference exists on the object.
607 * Verbs Tx queue object.
610 mlx5_txq_ibv_releasable(struct mlx5_txq_ibv *txq_ibv)
613 return (rte_atomic32_read(&txq_ibv->refcnt) == 1);
617 * Verify the Verbs Tx queue list is empty
620 * Pointer to Ethernet device.
623 * The number of object not released.
626 mlx5_txq_ibv_verify(struct rte_eth_dev *dev)
628 struct priv *priv = dev->data->dev_private;
630 struct mlx5_txq_ibv *txq_ibv;
632 LIST_FOREACH(txq_ibv, &priv->txqsibv, next) {
633 DEBUG("%p: Verbs Tx queue %p still referenced", (void *)dev,
641 * Set Tx queue parameters from device configuration.
644 * Pointer to Tx queue control structure.
647 txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
649 struct priv *priv = txq_ctrl->priv;
650 struct mlx5_dev_config *config = &priv->config;
651 const unsigned int max_tso_inline =
652 ((MLX5_MAX_TSO_HEADER + (RTE_CACHE_LINE_SIZE - 1)) /
653 RTE_CACHE_LINE_SIZE);
654 unsigned int txq_inline;
655 unsigned int txqs_inline;
656 unsigned int inline_max_packet_sz;
657 eth_tx_burst_t tx_pkt_burst =
658 mlx5_select_tx_function(txq_ctrl->priv->dev);
659 int is_empw_func = is_empw_burst_func(tx_pkt_burst);
660 int tso = !!(txq_ctrl->txq.offloads & DEV_TX_OFFLOAD_TCP_TSO);
662 txq_inline = (config->txq_inline == MLX5_ARG_UNSET) ?
663 0 : config->txq_inline;
664 txqs_inline = (config->txqs_inline == MLX5_ARG_UNSET) ?
665 0 : config->txqs_inline;
666 inline_max_packet_sz =
667 (config->inline_max_packet_sz == MLX5_ARG_UNSET) ?
668 0 : config->inline_max_packet_sz;
670 if (config->txq_inline == MLX5_ARG_UNSET)
671 txq_inline = MLX5_WQE_SIZE_MAX - MLX5_WQE_SIZE;
672 if (config->txqs_inline == MLX5_ARG_UNSET)
673 txqs_inline = MLX5_EMPW_MIN_TXQS;
674 if (config->inline_max_packet_sz == MLX5_ARG_UNSET)
675 inline_max_packet_sz = MLX5_EMPW_MAX_INLINE_LEN;
676 txq_ctrl->txq.mpw_hdr_dseg = config->mpw_hdr_dseg;
677 txq_ctrl->txq.inline_max_packet_sz = inline_max_packet_sz;
679 if (txq_inline && priv->txqs_n >= txqs_inline) {
682 txq_ctrl->txq.max_inline =
683 ((txq_inline + (RTE_CACHE_LINE_SIZE - 1)) /
684 RTE_CACHE_LINE_SIZE);
686 /* To minimize the size of data set, avoid requesting
689 txq_ctrl->max_inline_data =
690 ((RTE_MIN(txq_inline,
691 inline_max_packet_sz) +
692 (RTE_CACHE_LINE_SIZE - 1)) /
693 RTE_CACHE_LINE_SIZE) * RTE_CACHE_LINE_SIZE;
695 int inline_diff = txq_ctrl->txq.max_inline -
699 * Adjust inline value as Verbs aggregates
700 * tso_inline and txq_inline fields.
702 txq_ctrl->max_inline_data = inline_diff > 0 ?
704 RTE_CACHE_LINE_SIZE :
707 txq_ctrl->max_inline_data =
708 txq_ctrl->txq.max_inline * RTE_CACHE_LINE_SIZE;
711 * Check if the inline size is too large in a way which
712 * can make the WQE DS to overflow.
713 * Considering in calculation:
718 ds_cnt = 2 + (txq_ctrl->txq.max_inline / MLX5_WQE_DWORD_SIZE);
719 if (ds_cnt > MLX5_DSEG_MAX) {
720 unsigned int max_inline = (MLX5_DSEG_MAX - 2) *
723 max_inline = max_inline - (max_inline %
724 RTE_CACHE_LINE_SIZE);
725 WARN("txq inline is too large (%d) setting it to "
726 "the maximum possible: %d\n",
727 txq_inline, max_inline);
728 txq_ctrl->txq.max_inline = max_inline /
733 txq_ctrl->max_tso_header = max_tso_inline * RTE_CACHE_LINE_SIZE;
734 txq_ctrl->txq.max_inline = RTE_MAX(txq_ctrl->txq.max_inline,
736 txq_ctrl->txq.tso_en = 1;
738 txq_ctrl->txq.tunnel_en = config->tunnel_en;
742 * Create a DPDK Tx queue.
745 * Pointer to Ethernet device.
749 * Number of descriptors to configure in queue.
751 * NUMA socket on which memory must be allocated.
753 * Thresholds parameters.
756 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
758 struct mlx5_txq_ctrl *
759 mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
760 unsigned int socket, const struct rte_eth_txconf *conf)
762 struct priv *priv = dev->data->dev_private;
763 struct mlx5_txq_ctrl *tmpl;
765 tmpl = rte_calloc_socket("TXQ", 1,
767 desc * sizeof(struct rte_mbuf *),
773 assert(desc > MLX5_TX_COMP_THRESH);
774 tmpl->txq.offloads = conf->offloads;
776 tmpl->socket = socket;
777 tmpl->txq.elts_n = log2above(desc);
778 txq_set_params(tmpl);
779 /* MRs will be registered in mp2mr[] later. */
780 DEBUG("priv->device_attr.max_qp_wr is %d",
781 priv->device_attr.orig_attr.max_qp_wr);
782 DEBUG("priv->device_attr.max_sge is %d",
783 priv->device_attr.orig_attr.max_sge);
785 (struct rte_mbuf *(*)[1 << tmpl->txq.elts_n])(tmpl + 1);
786 tmpl->txq.stats.idx = idx;
787 rte_atomic32_inc(&tmpl->refcnt);
788 DEBUG("%p: Tx queue %p: refcnt %d", (void *)dev,
789 (void *)tmpl, rte_atomic32_read(&tmpl->refcnt));
790 LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
798 * Pointer to Ethernet device.
803 * A pointer to the queue if it exists.
805 struct mlx5_txq_ctrl *
806 mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx)
808 struct priv *priv = dev->data->dev_private;
809 struct mlx5_txq_ctrl *ctrl = NULL;
811 if ((*priv->txqs)[idx]) {
812 ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl,
816 mlx5_txq_ibv_get(dev, idx);
817 for (i = 0; i != MLX5_PMD_TX_MP_CACHE; ++i) {
818 if (ctrl->txq.mp2mr[i])
821 ctrl->txq.mp2mr[i]->mp));
823 rte_atomic32_inc(&ctrl->refcnt);
824 DEBUG("%p: Tx queue %p: refcnt %d", (void *)dev,
825 (void *)ctrl, rte_atomic32_read(&ctrl->refcnt));
831 * Release a Tx queue.
834 * Pointer to Ethernet device.
839 * 1 while a reference on it exists, 0 when freed.
842 mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx)
844 struct priv *priv = dev->data->dev_private;
846 struct mlx5_txq_ctrl *txq;
847 size_t page_size = sysconf(_SC_PAGESIZE);
849 if (!(*priv->txqs)[idx])
851 txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
852 DEBUG("%p: Tx queue %p: refcnt %d", (void *)dev,
853 (void *)txq, rte_atomic32_read(&txq->refcnt));
854 if (txq->ibv && !mlx5_txq_ibv_release(txq->ibv))
856 for (i = 0; i != MLX5_PMD_TX_MP_CACHE; ++i) {
857 if (txq->txq.mp2mr[i]) {
858 mlx5_mr_release(txq->txq.mp2mr[i]);
859 txq->txq.mp2mr[i] = NULL;
863 munmap((void *)RTE_ALIGN_FLOOR((uintptr_t)txq->txq.bf_reg,
864 page_size), page_size);
865 if (rte_atomic32_dec_and_test(&txq->refcnt)) {
867 LIST_REMOVE(txq, next);
869 (*priv->txqs)[idx] = NULL;
876 * Verify if the queue can be released.
879 * Pointer to Ethernet device.
884 * 1 if the queue can be released.
887 mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx)
889 struct priv *priv = dev->data->dev_private;
890 struct mlx5_txq_ctrl *txq;
892 if (!(*priv->txqs)[idx])
894 txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
895 return (rte_atomic32_read(&txq->refcnt) == 1);
899 * Verify the Tx Queue list is empty
902 * Pointer to Ethernet device.
905 * The number of object not released.
908 mlx5_txq_verify(struct rte_eth_dev *dev)
910 struct priv *priv = dev->data->dev_private;
911 struct mlx5_txq_ctrl *txq;
914 LIST_FOREACH(txq, &priv->txqsctrl, next) {
915 DEBUG("%p: Tx Queue %p still referenced", (void *)dev,