1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
15 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
17 #pragma GCC diagnostic ignored "-Wpedantic"
19 #include <infiniband/verbs.h>
21 #pragma GCC diagnostic error "-Wpedantic"
25 #include <rte_malloc.h>
26 #include <rte_ethdev_driver.h>
27 #include <rte_common.h>
29 #include "mlx5_utils.h"
30 #include "mlx5_defs.h"
32 #include "mlx5_rxtx.h"
33 #include "mlx5_autoconf.h"
34 #include "mlx5_glue.h"
37 * Allocate TX queue elements.
40 * Pointer to TX queue structure.
43 txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl)
45 const unsigned int elts_n = 1 << txq_ctrl->txq.elts_n;
48 for (i = 0; (i != elts_n); ++i)
49 (*txq_ctrl->txq.elts)[i] = NULL;
50 DRV_LOG(DEBUG, "port %u Tx queue %u allocated and configured %u WRs",
51 txq_ctrl->priv->dev->data->port_id, txq_ctrl->idx, elts_n);
52 txq_ctrl->txq.elts_head = 0;
53 txq_ctrl->txq.elts_tail = 0;
54 txq_ctrl->txq.elts_comp = 0;
58 * Free TX queue elements.
61 * Pointer to TX queue structure.
64 txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl)
66 const uint16_t elts_n = 1 << txq_ctrl->txq.elts_n;
67 const uint16_t elts_m = elts_n - 1;
68 uint16_t elts_head = txq_ctrl->txq.elts_head;
69 uint16_t elts_tail = txq_ctrl->txq.elts_tail;
70 struct rte_mbuf *(*elts)[elts_n] = txq_ctrl->txq.elts;
72 DRV_LOG(DEBUG, "port %u Tx queue %u freeing WRs",
73 txq_ctrl->priv->dev->data->port_id, txq_ctrl->idx);
74 txq_ctrl->txq.elts_head = 0;
75 txq_ctrl->txq.elts_tail = 0;
76 txq_ctrl->txq.elts_comp = 0;
78 while (elts_tail != elts_head) {
79 struct rte_mbuf *elt = (*elts)[elts_tail & elts_m];
82 rte_pktmbuf_free_seg(elt);
85 memset(&(*elts)[elts_tail & elts_m],
87 sizeof((*elts)[elts_tail & elts_m]));
94 * Returns the per-port supported offloads.
97 * Pointer to Ethernet device.
100 * Supported Tx offloads.
103 mlx5_get_tx_port_offloads(struct rte_eth_dev *dev)
105 struct priv *priv = dev->data->dev_private;
106 uint64_t offloads = (DEV_TX_OFFLOAD_MULTI_SEGS |
107 DEV_TX_OFFLOAD_VLAN_INSERT);
108 struct mlx5_dev_config *config = &priv->config;
111 offloads |= (DEV_TX_OFFLOAD_IPV4_CKSUM |
112 DEV_TX_OFFLOAD_UDP_CKSUM |
113 DEV_TX_OFFLOAD_TCP_CKSUM);
115 offloads |= DEV_TX_OFFLOAD_TCP_TSO;
116 if (config->tunnel_en) {
118 offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
120 offloads |= (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
121 DEV_TX_OFFLOAD_GRE_TNL_TSO);
123 offloads |= (DEV_TX_OFFLOAD_IP_TNL_TSO |
124 DEV_TX_OFFLOAD_UDP_TNL_TSO);
130 * Checks if the per-queue offload configuration is valid.
133 * Pointer to Ethernet device.
135 * Per-queue offloads configuration.
138 * 1 if the configuration is valid, 0 otherwise.
141 mlx5_is_tx_queue_offloads_allowed(struct rte_eth_dev *dev, uint64_t offloads)
143 uint64_t port_offloads = dev->data->dev_conf.txmode.offloads;
144 uint64_t port_supp_offloads = mlx5_get_tx_port_offloads(dev);
146 /* There are no Tx offloads which are per queue. */
147 if ((offloads & port_supp_offloads) != offloads)
149 if ((port_offloads ^ offloads) & port_supp_offloads)
155 * DPDK callback to configure a TX queue.
158 * Pointer to Ethernet device structure.
162 * Number of descriptors to configure in queue.
164 * NUMA socket on which memory must be allocated.
166 * Thresholds parameters.
169 * 0 on success, a negative errno value otherwise and rte_errno is set.
172 mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
173 unsigned int socket, const struct rte_eth_txconf *conf)
175 struct priv *priv = dev->data->dev_private;
176 struct mlx5_txq_data *txq = (*priv->txqs)[idx];
177 struct mlx5_txq_ctrl *txq_ctrl =
178 container_of(txq, struct mlx5_txq_ctrl, txq);
181 * Don't verify port offloads for application which
184 if (!!(conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) &&
185 !mlx5_is_tx_queue_offloads_allowed(dev, conf->offloads)) {
188 "port %u Tx queue offloads 0x%" PRIx64 " don't match"
189 " port offloads 0x%" PRIx64 " or supported offloads 0x%"
191 dev->data->port_id, conf->offloads,
192 dev->data->dev_conf.txmode.offloads,
193 mlx5_get_tx_port_offloads(dev));
196 if (desc <= MLX5_TX_COMP_THRESH) {
198 "port %u number of descriptors requested for Tx queue"
199 " %u must be higher than MLX5_TX_COMP_THRESH, using %u"
201 dev->data->port_id, idx, MLX5_TX_COMP_THRESH + 1, desc);
202 desc = MLX5_TX_COMP_THRESH + 1;
204 if (!rte_is_power_of_2(desc)) {
205 desc = 1 << log2above(desc);
207 "port %u increased number of descriptors in Tx queue"
208 " %u to the next power of two (%d)",
209 dev->data->port_id, idx, desc);
211 DRV_LOG(DEBUG, "port %u configuring queue %u for %u descriptors",
212 dev->data->port_id, idx, desc);
213 if (idx >= priv->txqs_n) {
214 DRV_LOG(ERR, "port %u Tx queue index out of range (%u >= %u)",
215 dev->data->port_id, idx, priv->txqs_n);
216 rte_errno = EOVERFLOW;
219 if (!mlx5_txq_releasable(dev, idx)) {
221 DRV_LOG(ERR, "port %u unable to release queue index %u",
222 dev->data->port_id, idx);
225 mlx5_txq_release(dev, idx);
226 txq_ctrl = mlx5_txq_new(dev, idx, desc, socket, conf);
228 DRV_LOG(ERR, "port %u unable to allocate queue index %u",
229 dev->data->port_id, idx);
232 DRV_LOG(DEBUG, "port %u adding Tx queue %u to list",
233 dev->data->port_id, idx);
234 (*priv->txqs)[idx] = &txq_ctrl->txq;
239 * DPDK callback to release a TX queue.
242 * Generic TX queue pointer.
245 mlx5_tx_queue_release(void *dpdk_txq)
247 struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
248 struct mlx5_txq_ctrl *txq_ctrl;
254 txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
255 priv = txq_ctrl->priv;
256 for (i = 0; (i != priv->txqs_n); ++i)
257 if ((*priv->txqs)[i] == txq) {
258 mlx5_txq_release(priv->dev, i);
259 DRV_LOG(DEBUG, "port %u removing Tx queue %u from list",
260 priv->dev->data->port_id, txq_ctrl->idx);
267 * Mmap TX UAR(HW doorbell) pages into reserved UAR address space.
268 * Both primary and secondary process do mmap to make UAR address
272 * Pointer to Ethernet device.
274 * Verbs file descriptor to map UAR pages.
277 * 0 on success, a negative errno value otherwise and rte_errno is set.
280 mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd)
282 struct priv *priv = dev->data->dev_private;
284 uintptr_t pages[priv->txqs_n];
285 unsigned int pages_n = 0;
290 struct mlx5_txq_data *txq;
291 struct mlx5_txq_ctrl *txq_ctrl;
293 size_t page_size = sysconf(_SC_PAGESIZE);
295 memset(pages, 0, priv->txqs_n * sizeof(uintptr_t));
297 * As rdma-core, UARs are mapped in size of OS page size.
298 * Use aligned address to avoid duplicate mmap.
299 * Ref to libmlx5 function: mlx5_init_context()
301 for (i = 0; i != priv->txqs_n; ++i) {
302 if (!(*priv->txqs)[i])
304 txq = (*priv->txqs)[i];
305 txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
306 assert(txq_ctrl->idx == (uint16_t)i);
307 /* UAR addr form verbs used to find dup and offset in page. */
308 uar_va = (uintptr_t)txq_ctrl->bf_reg_orig;
309 off = uar_va & (page_size - 1); /* offset in page. */
310 uar_va = RTE_ALIGN_FLOOR(uar_va, page_size); /* page addr. */
312 for (j = 0; j != pages_n; ++j) {
313 if (pages[j] == uar_va) {
318 /* new address in reserved UAR address space. */
319 addr = RTE_PTR_ADD(priv->uar_base,
320 uar_va & (MLX5_UAR_SIZE - 1));
321 if (!already_mapped) {
322 pages[pages_n++] = uar_va;
323 /* fixed mmap to specified address in reserved
326 ret = mmap(addr, page_size,
327 PROT_WRITE, MAP_FIXED | MAP_SHARED, fd,
328 txq_ctrl->uar_mmap_offset);
330 /* fixed mmap have to return same address */
332 "port %u call to mmap failed on UAR"
334 dev->data->port_id, txq_ctrl->idx);
339 if (rte_eal_process_type() == RTE_PROC_PRIMARY) /* save once */
340 txq_ctrl->txq.bf_reg = RTE_PTR_ADD((void *)addr, off);
342 assert(txq_ctrl->txq.bf_reg ==
343 RTE_PTR_ADD((void *)addr, off));
349 * Check if the burst function is using eMPW.
351 * @param tx_pkt_burst
352 * Tx burst function pointer.
355 * 1 if the burst function is using eMPW, 0 otherwise.
358 is_empw_burst_func(eth_tx_burst_t tx_pkt_burst)
360 if (tx_pkt_burst == mlx5_tx_burst_raw_vec ||
361 tx_pkt_burst == mlx5_tx_burst_vec ||
362 tx_pkt_burst == mlx5_tx_burst_empw)
368 * Create the Tx queue Verbs object.
371 * Pointer to Ethernet device.
373 * Queue index in DPDK Rx queue array
376 * The Verbs object initialised, NULL otherwise and rte_errno is set.
378 struct mlx5_txq_ibv *
379 mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
381 struct priv *priv = dev->data->dev_private;
382 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
383 struct mlx5_txq_ctrl *txq_ctrl =
384 container_of(txq_data, struct mlx5_txq_ctrl, txq);
385 struct mlx5_txq_ibv tmpl;
386 struct mlx5_txq_ibv *txq_ibv;
388 struct ibv_qp_init_attr_ex init;
389 struct ibv_cq_init_attr_ex cq;
390 struct ibv_qp_attr mod;
391 struct ibv_cq_ex cq_attr;
394 struct mlx5dv_qp qp = { .comp_mask = MLX5DV_QP_MASK_UAR_MMAP_OFFSET };
395 struct mlx5dv_cq cq_info;
396 struct mlx5dv_obj obj;
397 const int desc = 1 << txq_data->elts_n;
398 eth_tx_burst_t tx_pkt_burst = mlx5_select_tx_function(dev);
402 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_TX_QUEUE;
403 priv->verbs_alloc_ctx.obj = txq_ctrl;
404 if (mlx5_getenv_int("MLX5_ENABLE_CQE_COMPRESSION")) {
406 "port %u MLX5_ENABLE_CQE_COMPRESSION must never be set",
411 memset(&tmpl, 0, sizeof(struct mlx5_txq_ibv));
412 /* MRs will be registered in mp2mr[] later. */
413 attr.cq = (struct ibv_cq_init_attr_ex){
416 cqe_n = ((desc / MLX5_TX_COMP_THRESH) - 1) ?
417 ((desc / MLX5_TX_COMP_THRESH) - 1) : 1;
418 if (is_empw_burst_func(tx_pkt_burst))
419 cqe_n += MLX5_TX_COMP_THRESH_INLINE_DIV;
420 tmpl.cq = mlx5_glue->create_cq(priv->ctx, cqe_n, NULL, NULL, 0);
421 if (tmpl.cq == NULL) {
422 DRV_LOG(ERR, "port %u Tx queue %u CQ creation failure",
423 dev->data->port_id, idx);
427 attr.init = (struct ibv_qp_init_attr_ex){
428 /* CQ to be associated with the send queue. */
430 /* CQ to be associated with the receive queue. */
433 /* Max number of outstanding WRs. */
435 ((priv->device_attr.orig_attr.max_qp_wr <
437 priv->device_attr.orig_attr.max_qp_wr :
440 * Max number of scatter/gather elements in a WR,
441 * must be 1 to prevent libmlx5 from trying to affect
442 * too much memory. TX gather is not impacted by the
443 * priv->device_attr.max_sge limit and will still work
448 .qp_type = IBV_QPT_RAW_PACKET,
450 * Do *NOT* enable this, completions events are managed per
455 .comp_mask = IBV_QP_INIT_ATTR_PD,
457 if (txq_data->max_inline)
458 attr.init.cap.max_inline_data = txq_ctrl->max_inline_data;
459 if (txq_data->tso_en) {
460 attr.init.max_tso_header = txq_ctrl->max_tso_header;
461 attr.init.comp_mask |= IBV_QP_INIT_ATTR_MAX_TSO_HEADER;
463 tmpl.qp = mlx5_glue->create_qp_ex(priv->ctx, &attr.init);
464 if (tmpl.qp == NULL) {
465 DRV_LOG(ERR, "port %u Tx queue %u QP creation failure",
466 dev->data->port_id, idx);
470 attr.mod = (struct ibv_qp_attr){
471 /* Move the QP to this state. */
472 .qp_state = IBV_QPS_INIT,
473 /* Primary port number. */
474 .port_num = priv->port
476 ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod,
477 (IBV_QP_STATE | IBV_QP_PORT));
480 "port %u Tx queue %u QP state to IBV_QPS_INIT failed",
481 dev->data->port_id, idx);
485 attr.mod = (struct ibv_qp_attr){
486 .qp_state = IBV_QPS_RTR
488 ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
491 "port %u Tx queue %u QP state to IBV_QPS_RTR failed",
492 dev->data->port_id, idx);
496 attr.mod.qp_state = IBV_QPS_RTS;
497 ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
500 "port %u Tx queue %u QP state to IBV_QPS_RTS failed",
501 dev->data->port_id, idx);
505 txq_ibv = rte_calloc_socket(__func__, 1, sizeof(struct mlx5_txq_ibv), 0,
508 DRV_LOG(ERR, "port %u Tx queue %u cannot allocate memory",
509 dev->data->port_id, idx);
514 obj.cq.out = &cq_info;
517 ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_QP);
522 if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
524 "port %u wrong MLX5_CQE_SIZE environment variable"
525 " value: it should be set to %u",
526 dev->data->port_id, RTE_CACHE_LINE_SIZE);
530 txq_data->cqe_n = log2above(cq_info.cqe_cnt);
531 txq_data->qp_num_8s = tmpl.qp->qp_num << 8;
532 txq_data->wqes = qp.sq.buf;
533 txq_data->wqe_n = log2above(qp.sq.wqe_cnt);
534 txq_data->qp_db = &qp.dbrec[MLX5_SND_DBR];
535 txq_ctrl->bf_reg_orig = qp.bf.reg;
536 txq_data->cq_db = cq_info.dbrec;
538 (volatile struct mlx5_cqe (*)[])
539 (uintptr_t)cq_info.buf;
544 txq_data->wqe_ci = 0;
545 txq_data->wqe_pi = 0;
546 txq_ibv->qp = tmpl.qp;
547 txq_ibv->cq = tmpl.cq;
548 rte_atomic32_inc(&txq_ibv->refcnt);
549 if (qp.comp_mask & MLX5DV_QP_MASK_UAR_MMAP_OFFSET) {
550 txq_ctrl->uar_mmap_offset = qp.uar_mmap_offset;
553 "port %u failed to retrieve UAR info, invalid"
559 DRV_LOG(DEBUG, "port %u Verbs Tx queue %u: refcnt %d",
560 dev->data->port_id, idx, rte_atomic32_read(&txq_ibv->refcnt));
561 LIST_INSERT_HEAD(&priv->txqsibv, txq_ibv, next);
562 txq_ibv->txq_ctrl = txq_ctrl;
563 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
566 ret = rte_errno; /* Save rte_errno before cleanup. */
568 claim_zero(mlx5_glue->destroy_cq(tmpl.cq));
570 claim_zero(mlx5_glue->destroy_qp(tmpl.qp));
571 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
572 rte_errno = ret; /* Restore rte_errno. */
577 * Get an Tx queue Verbs object.
580 * Pointer to Ethernet device.
582 * Queue index in DPDK Rx queue array
585 * The Verbs object if it exists.
587 struct mlx5_txq_ibv *
588 mlx5_txq_ibv_get(struct rte_eth_dev *dev, uint16_t idx)
590 struct priv *priv = dev->data->dev_private;
591 struct mlx5_txq_ctrl *txq_ctrl;
593 if (idx >= priv->txqs_n)
595 if (!(*priv->txqs)[idx])
597 txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
599 rte_atomic32_inc(&txq_ctrl->ibv->refcnt);
600 DRV_LOG(DEBUG, "port %u Verbs Tx queue %u: refcnt %d",
601 dev->data->port_id, txq_ctrl->idx,
602 rte_atomic32_read(&txq_ctrl->ibv->refcnt));
604 return txq_ctrl->ibv;
608 * Release an Tx verbs queue object.
611 * Verbs Tx queue object.
614 * 1 while a reference on it exists, 0 when freed.
617 mlx5_txq_ibv_release(struct mlx5_txq_ibv *txq_ibv)
620 DRV_LOG(DEBUG, "port %u Verbs Tx queue %u: refcnt %d",
621 txq_ibv->txq_ctrl->priv->dev->data->port_id,
622 txq_ibv->txq_ctrl->idx, rte_atomic32_read(&txq_ibv->refcnt));
623 if (rte_atomic32_dec_and_test(&txq_ibv->refcnt)) {
624 claim_zero(mlx5_glue->destroy_qp(txq_ibv->qp));
625 claim_zero(mlx5_glue->destroy_cq(txq_ibv->cq));
626 LIST_REMOVE(txq_ibv, next);
634 * Return true if a single reference exists on the object.
637 * Verbs Tx queue object.
640 mlx5_txq_ibv_releasable(struct mlx5_txq_ibv *txq_ibv)
643 return (rte_atomic32_read(&txq_ibv->refcnt) == 1);
647 * Verify the Verbs Tx queue list is empty
650 * Pointer to Ethernet device.
653 * The number of object not released.
656 mlx5_txq_ibv_verify(struct rte_eth_dev *dev)
658 struct priv *priv = dev->data->dev_private;
660 struct mlx5_txq_ibv *txq_ibv;
662 LIST_FOREACH(txq_ibv, &priv->txqsibv, next) {
663 DRV_LOG(DEBUG, "port %u Verbs Tx queue %u still referenced",
664 dev->data->port_id, txq_ibv->txq_ctrl->idx);
671 * Set Tx queue parameters from device configuration.
674 * Pointer to Tx queue control structure.
677 txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
679 struct priv *priv = txq_ctrl->priv;
680 struct mlx5_dev_config *config = &priv->config;
681 const unsigned int max_tso_inline =
682 ((MLX5_MAX_TSO_HEADER + (RTE_CACHE_LINE_SIZE - 1)) /
683 RTE_CACHE_LINE_SIZE);
684 unsigned int txq_inline;
685 unsigned int txqs_inline;
686 unsigned int inline_max_packet_sz;
687 eth_tx_burst_t tx_pkt_burst =
688 mlx5_select_tx_function(txq_ctrl->priv->dev);
689 int is_empw_func = is_empw_burst_func(tx_pkt_burst);
690 int tso = !!(txq_ctrl->txq.offloads & (DEV_TX_OFFLOAD_TCP_TSO |
691 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
692 DEV_TX_OFFLOAD_GRE_TNL_TSO |
693 DEV_TX_OFFLOAD_IP_TNL_TSO |
694 DEV_TX_OFFLOAD_UDP_TNL_TSO));
696 txq_inline = (config->txq_inline == MLX5_ARG_UNSET) ?
697 0 : config->txq_inline;
698 txqs_inline = (config->txqs_inline == MLX5_ARG_UNSET) ?
699 0 : config->txqs_inline;
700 inline_max_packet_sz =
701 (config->inline_max_packet_sz == MLX5_ARG_UNSET) ?
702 0 : config->inline_max_packet_sz;
704 if (config->txq_inline == MLX5_ARG_UNSET)
705 txq_inline = MLX5_WQE_SIZE_MAX - MLX5_WQE_SIZE;
706 if (config->txqs_inline == MLX5_ARG_UNSET)
707 txqs_inline = MLX5_EMPW_MIN_TXQS;
708 if (config->inline_max_packet_sz == MLX5_ARG_UNSET)
709 inline_max_packet_sz = MLX5_EMPW_MAX_INLINE_LEN;
710 txq_ctrl->txq.mpw_hdr_dseg = config->mpw_hdr_dseg;
711 txq_ctrl->txq.inline_max_packet_sz = inline_max_packet_sz;
713 if (txq_inline && priv->txqs_n >= txqs_inline) {
716 txq_ctrl->txq.max_inline =
717 ((txq_inline + (RTE_CACHE_LINE_SIZE - 1)) /
718 RTE_CACHE_LINE_SIZE);
720 /* To minimize the size of data set, avoid requesting
723 txq_ctrl->max_inline_data =
724 ((RTE_MIN(txq_inline,
725 inline_max_packet_sz) +
726 (RTE_CACHE_LINE_SIZE - 1)) /
727 RTE_CACHE_LINE_SIZE) * RTE_CACHE_LINE_SIZE;
729 int inline_diff = txq_ctrl->txq.max_inline -
733 * Adjust inline value as Verbs aggregates
734 * tso_inline and txq_inline fields.
736 txq_ctrl->max_inline_data = inline_diff > 0 ?
738 RTE_CACHE_LINE_SIZE :
741 txq_ctrl->max_inline_data =
742 txq_ctrl->txq.max_inline * RTE_CACHE_LINE_SIZE;
745 * Check if the inline size is too large in a way which
746 * can make the WQE DS to overflow.
747 * Considering in calculation:
752 ds_cnt = 2 + (txq_ctrl->txq.max_inline / MLX5_WQE_DWORD_SIZE);
753 if (ds_cnt > MLX5_DSEG_MAX) {
754 unsigned int max_inline = (MLX5_DSEG_MAX - 2) *
757 max_inline = max_inline - (max_inline %
758 RTE_CACHE_LINE_SIZE);
760 "port %u txq inline is too large (%d) setting"
761 " it to the maximum possible: %d\n",
762 priv->dev->data->port_id, txq_inline,
764 txq_ctrl->txq.max_inline = max_inline /
769 txq_ctrl->max_tso_header = max_tso_inline * RTE_CACHE_LINE_SIZE;
770 txq_ctrl->txq.max_inline = RTE_MAX(txq_ctrl->txq.max_inline,
772 txq_ctrl->txq.tso_en = 1;
774 txq_ctrl->txq.tunnel_en = config->tunnel_en;
775 txq_ctrl->txq.swp_en = ((DEV_TX_OFFLOAD_IP_TNL_TSO |
776 DEV_TX_OFFLOAD_UDP_TNL_TSO) &
777 txq_ctrl->txq.offloads) && config->swp;
781 * Create a DPDK Tx queue.
784 * Pointer to Ethernet device.
788 * Number of descriptors to configure in queue.
790 * NUMA socket on which memory must be allocated.
792 * Thresholds parameters.
795 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
797 struct mlx5_txq_ctrl *
798 mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
799 unsigned int socket, const struct rte_eth_txconf *conf)
801 struct priv *priv = dev->data->dev_private;
802 struct mlx5_txq_ctrl *tmpl;
804 tmpl = rte_calloc_socket("TXQ", 1,
806 desc * sizeof(struct rte_mbuf *),
812 assert(desc > MLX5_TX_COMP_THRESH);
813 tmpl->txq.offloads = conf->offloads;
815 tmpl->socket = socket;
816 tmpl->txq.elts_n = log2above(desc);
818 txq_set_params(tmpl);
819 /* MRs will be registered in mp2mr[] later. */
820 DRV_LOG(DEBUG, "port %u priv->device_attr.max_qp_wr is %d",
821 dev->data->port_id, priv->device_attr.orig_attr.max_qp_wr);
822 DRV_LOG(DEBUG, "port %u priv->device_attr.max_sge is %d",
823 dev->data->port_id, priv->device_attr.orig_attr.max_sge);
825 (struct rte_mbuf *(*)[1 << tmpl->txq.elts_n])(tmpl + 1);
826 tmpl->txq.stats.idx = idx;
827 rte_atomic32_inc(&tmpl->refcnt);
828 DRV_LOG(DEBUG, "port %u Tx queue %u: refcnt %d", dev->data->port_id,
829 idx, rte_atomic32_read(&tmpl->refcnt));
830 LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
838 * Pointer to Ethernet device.
843 * A pointer to the queue if it exists.
845 struct mlx5_txq_ctrl *
846 mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx)
848 struct priv *priv = dev->data->dev_private;
849 struct mlx5_txq_ctrl *ctrl = NULL;
851 if ((*priv->txqs)[idx]) {
852 ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl,
856 mlx5_txq_ibv_get(dev, idx);
857 for (i = 0; i != MLX5_PMD_TX_MP_CACHE; ++i) {
858 if (ctrl->txq.mp2mr[i])
861 ctrl->txq.mp2mr[i]->mp));
863 rte_atomic32_inc(&ctrl->refcnt);
864 DRV_LOG(DEBUG, "port %u Tx queue %u refcnt %d",
866 ctrl->idx, rte_atomic32_read(&ctrl->refcnt));
872 * Release a Tx queue.
875 * Pointer to Ethernet device.
880 * 1 while a reference on it exists, 0 when freed.
883 mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx)
885 struct priv *priv = dev->data->dev_private;
887 struct mlx5_txq_ctrl *txq;
888 size_t page_size = sysconf(_SC_PAGESIZE);
890 if (!(*priv->txqs)[idx])
892 txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
893 DRV_LOG(DEBUG, "port %u Tx queue %u: refcnt %d", dev->data->port_id,
894 txq->idx, rte_atomic32_read(&txq->refcnt));
895 if (txq->ibv && !mlx5_txq_ibv_release(txq->ibv))
897 for (i = 0; i != MLX5_PMD_TX_MP_CACHE; ++i) {
898 if (txq->txq.mp2mr[i]) {
899 mlx5_mr_release(txq->txq.mp2mr[i]);
900 txq->txq.mp2mr[i] = NULL;
904 munmap((void *)RTE_ALIGN_FLOOR((uintptr_t)txq->txq.bf_reg,
905 page_size), page_size);
906 if (rte_atomic32_dec_and_test(&txq->refcnt)) {
908 LIST_REMOVE(txq, next);
910 (*priv->txqs)[idx] = NULL;
917 * Verify if the queue can be released.
920 * Pointer to Ethernet device.
925 * 1 if the queue can be released.
928 mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx)
930 struct priv *priv = dev->data->dev_private;
931 struct mlx5_txq_ctrl *txq;
933 if (!(*priv->txqs)[idx])
935 txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
936 return (rte_atomic32_read(&txq->refcnt) == 1);
940 * Verify the Tx Queue list is empty
943 * Pointer to Ethernet device.
946 * The number of object not released.
949 mlx5_txq_verify(struct rte_eth_dev *dev)
951 struct priv *priv = dev->data->dev_private;
952 struct mlx5_txq_ctrl *txq;
955 LIST_FOREACH(txq, &priv->txqsctrl, next) {
956 DRV_LOG(DEBUG, "port %u Tx queue %u still referenced",
957 dev->data->port_id, txq->idx);