4 * Copyright 2015 6WIND S.A.
5 * Copyright 2015 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
45 #pragma GCC diagnostic ignored "-Wpedantic"
47 #include <infiniband/verbs.h>
49 #pragma GCC diagnostic error "-Wpedantic"
53 #include <rte_malloc.h>
54 #include <rte_ethdev.h>
55 #include <rte_common.h>
57 #include "mlx5_utils.h"
58 #include "mlx5_defs.h"
60 #include "mlx5_rxtx.h"
61 #include "mlx5_autoconf.h"
64 * Allocate TX queue elements.
67 * Pointer to TX queue structure.
70 txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl)
72 const unsigned int elts_n = 1 << txq_ctrl->txq.elts_n;
75 for (i = 0; (i != elts_n); ++i)
76 (*txq_ctrl->txq.elts)[i] = NULL;
77 DEBUG("%p: allocated and configured %u WRs", (void *)txq_ctrl, elts_n);
78 txq_ctrl->txq.elts_head = 0;
79 txq_ctrl->txq.elts_tail = 0;
80 txq_ctrl->txq.elts_comp = 0;
84 * Free TX queue elements.
87 * Pointer to TX queue structure.
90 txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl)
92 const uint16_t elts_n = 1 << txq_ctrl->txq.elts_n;
93 const uint16_t elts_m = elts_n - 1;
94 uint16_t elts_head = txq_ctrl->txq.elts_head;
95 uint16_t elts_tail = txq_ctrl->txq.elts_tail;
96 struct rte_mbuf *(*elts)[elts_n] = txq_ctrl->txq.elts;
98 DEBUG("%p: freeing WRs", (void *)txq_ctrl);
99 txq_ctrl->txq.elts_head = 0;
100 txq_ctrl->txq.elts_tail = 0;
101 txq_ctrl->txq.elts_comp = 0;
103 while (elts_tail != elts_head) {
104 struct rte_mbuf *elt = (*elts)[elts_tail & elts_m];
107 rte_pktmbuf_free_seg(elt);
110 memset(&(*elts)[elts_tail & elts_m],
112 sizeof((*elts)[elts_tail & elts_m]));
119 * DPDK callback to configure a TX queue.
122 * Pointer to Ethernet device structure.
126 * Number of descriptors to configure in queue.
128 * NUMA socket on which memory must be allocated.
130 * Thresholds parameters.
133 * 0 on success, negative errno value on failure.
136 mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
137 unsigned int socket, const struct rte_eth_txconf *conf)
139 struct priv *priv = dev->data->dev_private;
140 struct mlx5_txq_data *txq = (*priv->txqs)[idx];
141 struct mlx5_txq_ctrl *txq_ctrl =
142 container_of(txq, struct mlx5_txq_ctrl, txq);
145 if (mlx5_is_secondary())
146 return -E_RTE_SECONDARY;
149 if (desc <= MLX5_TX_COMP_THRESH) {
150 WARN("%p: number of descriptors requested for TX queue %u"
151 " must be higher than MLX5_TX_COMP_THRESH, using"
153 (void *)dev, idx, MLX5_TX_COMP_THRESH + 1, desc);
154 desc = MLX5_TX_COMP_THRESH + 1;
156 if (!rte_is_power_of_2(desc)) {
157 desc = 1 << log2above(desc);
158 WARN("%p: increased number of descriptors in TX queue %u"
159 " to the next power of two (%d)",
160 (void *)dev, idx, desc);
162 DEBUG("%p: configuring queue %u for %u descriptors",
163 (void *)dev, idx, desc);
164 if (idx >= priv->txqs_n) {
165 ERROR("%p: queue index out of range (%u >= %u)",
166 (void *)dev, idx, priv->txqs_n);
170 if (!mlx5_priv_txq_releasable(priv, idx)) {
172 ERROR("%p: unable to release queue index %u",
176 mlx5_priv_txq_release(priv, idx);
177 txq_ctrl = mlx5_priv_txq_new(priv, idx, desc, socket, conf);
179 ERROR("%p: unable to allocate queue index %u",
184 DEBUG("%p: adding TX queue %p to list",
185 (void *)dev, (void *)txq_ctrl);
186 (*priv->txqs)[idx] = &txq_ctrl->txq;
193 * DPDK callback to release a TX queue.
196 * Generic TX queue pointer.
199 mlx5_tx_queue_release(void *dpdk_txq)
201 struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
202 struct mlx5_txq_ctrl *txq_ctrl;
206 if (mlx5_is_secondary())
211 txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
212 priv = txq_ctrl->priv;
214 for (i = 0; (i != priv->txqs_n); ++i)
215 if ((*priv->txqs)[i] == txq) {
216 DEBUG("%p: removing TX queue %p from list",
217 (void *)priv->dev, (void *)txq_ctrl);
218 mlx5_priv_txq_release(priv, i);
226 * Map locally UAR used in Tx queues for BlueFlame doorbell.
229 * Pointer to private structure.
231 * Verbs file descriptor to map UAR pages.
234 * 0 on success, errno value on failure.
237 priv_tx_uar_remap(struct priv *priv, int fd)
240 uintptr_t pages[priv->txqs_n];
241 unsigned int pages_n = 0;
244 struct mlx5_txq_data *txq;
245 struct mlx5_txq_ctrl *txq_ctrl;
247 size_t page_size = sysconf(_SC_PAGESIZE);
250 * As rdma-core, UARs are mapped in size of OS page size.
251 * Use aligned address to avoid duplicate mmap.
252 * Ref to libmlx5 function: mlx5_init_context()
254 for (i = 0; i != priv->txqs_n; ++i) {
255 txq = (*priv->txqs)[i];
256 txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
257 uar_va = (uintptr_t)txq_ctrl->txq.bf_reg;
258 uar_va = RTE_ALIGN_FLOOR(uar_va, page_size);
260 for (j = 0; j != pages_n; ++j) {
261 if (pages[j] == uar_va) {
268 pages[pages_n++] = uar_va;
269 addr = mmap((void *)uar_va, page_size,
270 PROT_WRITE, MAP_FIXED | MAP_SHARED, fd,
271 txq_ctrl->uar_mmap_offset);
272 if (addr != (void *)uar_va) {
273 ERROR("call to mmap failed on UAR for txq %d\n", i);
281 * Create the Tx queue Verbs object.
284 * Pointer to private structure.
286 * Queue index in DPDK Rx queue array
289 * The Verbs object initialised if it can be created.
292 mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx)
294 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
295 struct mlx5_txq_ctrl *txq_ctrl =
296 container_of(txq_data, struct mlx5_txq_ctrl, txq);
297 struct mlx5_txq_ibv tmpl;
298 struct mlx5_txq_ibv *txq_ibv;
300 struct ibv_qp_init_attr_ex init;
301 struct ibv_cq_init_attr_ex cq;
302 struct ibv_qp_attr mod;
303 struct ibv_cq_ex cq_attr;
307 struct mlx5dv_cq cq_info;
308 struct mlx5dv_obj obj;
309 const int desc = 1 << txq_data->elts_n;
313 if (mlx5_getenv_int("MLX5_ENABLE_CQE_COMPRESSION")) {
314 ERROR("MLX5_ENABLE_CQE_COMPRESSION must never be set");
317 memset(&tmpl, 0, sizeof(struct mlx5_txq_ibv));
318 /* MRs will be registered in mp2mr[] later. */
319 attr.cq = (struct ibv_cq_init_attr_ex){
322 cqe_n = ((desc / MLX5_TX_COMP_THRESH) - 1) ?
323 ((desc / MLX5_TX_COMP_THRESH) - 1) : 1;
324 if (priv->mps == MLX5_MPW_ENHANCED)
325 cqe_n += MLX5_TX_COMP_THRESH_INLINE_DIV;
326 tmpl.cq = ibv_create_cq(priv->ctx, cqe_n, NULL, NULL, 0);
327 if (tmpl.cq == NULL) {
328 ERROR("%p: CQ creation failure", (void *)txq_ctrl);
331 attr.init = (struct ibv_qp_init_attr_ex){
332 /* CQ to be associated with the send queue. */
334 /* CQ to be associated with the receive queue. */
337 /* Max number of outstanding WRs. */
339 ((priv->device_attr.orig_attr.max_qp_wr <
341 priv->device_attr.orig_attr.max_qp_wr :
344 * Max number of scatter/gather elements in a WR,
345 * must be 1 to prevent libmlx5 from trying to affect
346 * too much memory. TX gather is not impacted by the
347 * priv->device_attr.max_sge limit and will still work
352 .qp_type = IBV_QPT_RAW_PACKET,
354 * Do *NOT* enable this, completions events are managed per
359 .comp_mask = IBV_QP_INIT_ATTR_PD,
361 if (txq_data->inline_en)
362 attr.init.cap.max_inline_data = txq_ctrl->max_inline_data;
363 if (txq_data->tso_en) {
364 attr.init.max_tso_header = txq_ctrl->max_tso_header;
365 attr.init.comp_mask |= IBV_QP_INIT_ATTR_MAX_TSO_HEADER;
367 tmpl.qp = ibv_create_qp_ex(priv->ctx, &attr.init);
368 if (tmpl.qp == NULL) {
369 ERROR("%p: QP creation failure", (void *)txq_ctrl);
372 attr.mod = (struct ibv_qp_attr){
373 /* Move the QP to this state. */
374 .qp_state = IBV_QPS_INIT,
375 /* Primary port number. */
376 .port_num = priv->port
378 ret = ibv_modify_qp(tmpl.qp, &attr.mod, (IBV_QP_STATE | IBV_QP_PORT));
380 ERROR("%p: QP state to IBV_QPS_INIT failed", (void *)txq_ctrl);
383 attr.mod = (struct ibv_qp_attr){
384 .qp_state = IBV_QPS_RTR
386 ret = ibv_modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
388 ERROR("%p: QP state to IBV_QPS_RTR failed", (void *)txq_ctrl);
391 attr.mod.qp_state = IBV_QPS_RTS;
392 ret = ibv_modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
394 ERROR("%p: QP state to IBV_QPS_RTS failed", (void *)txq_ctrl);
397 txq_ibv = rte_calloc_socket(__func__, 1, sizeof(struct mlx5_txq_ibv), 0,
400 ERROR("%p: cannot allocate memory", (void *)txq_ctrl);
404 obj.cq.out = &cq_info;
407 ret = mlx5dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_QP);
410 if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
411 ERROR("Wrong MLX5_CQE_SIZE environment variable value: "
412 "it should be set to %u", RTE_CACHE_LINE_SIZE);
415 txq_data->cqe_n = log2above(cq_info.cqe_cnt);
416 txq_data->qp_num_8s = tmpl.qp->qp_num << 8;
417 txq_data->wqes = qp.sq.buf;
418 txq_data->wqe_n = log2above(qp.sq.wqe_cnt);
419 txq_data->qp_db = &qp.dbrec[MLX5_SND_DBR];
420 txq_data->bf_reg = qp.bf.reg;
421 txq_data->cq_db = cq_info.dbrec;
423 (volatile struct mlx5_cqe (*)[])
424 (uintptr_t)cq_info.buf;
427 txq_data->wqe_ci = 0;
428 txq_data->wqe_pi = 0;
429 txq_ibv->qp = tmpl.qp;
430 txq_ibv->cq = tmpl.cq;
431 rte_atomic32_inc(&txq_ibv->refcnt);
432 DEBUG("%p: Verbs Tx queue %p: refcnt %d", (void *)priv,
433 (void *)txq_ibv, rte_atomic32_read(&txq_ibv->refcnt));
434 LIST_INSERT_HEAD(&priv->txqsibv, txq_ibv, next);
438 claim_zero(ibv_destroy_cq(tmpl.cq));
440 claim_zero(ibv_destroy_qp(tmpl.qp));
445 * Get an Tx queue Verbs object.
448 * Pointer to private structure.
450 * Queue index in DPDK Rx queue array
453 * The Verbs object if it exists.
456 mlx5_priv_txq_ibv_get(struct priv *priv, uint16_t idx)
458 struct mlx5_txq_ctrl *txq_ctrl;
460 if (idx >= priv->txqs_n)
462 if (!(*priv->txqs)[idx])
464 txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
466 rte_atomic32_inc(&txq_ctrl->ibv->refcnt);
467 DEBUG("%p: Verbs Tx queue %p: refcnt %d", (void *)priv,
468 (void *)txq_ctrl->ibv,
469 rte_atomic32_read(&txq_ctrl->ibv->refcnt));
471 return txq_ctrl->ibv;
475 * Release an Tx verbs queue object.
478 * Pointer to private structure.
480 * Verbs Tx queue object.
483 * 0 on success, errno on failure.
486 mlx5_priv_txq_ibv_release(struct priv *priv, struct mlx5_txq_ibv *txq_ibv)
490 DEBUG("%p: Verbs Tx queue %p: refcnt %d", (void *)priv,
491 (void *)txq_ibv, rte_atomic32_read(&txq_ibv->refcnt));
492 if (rte_atomic32_dec_and_test(&txq_ibv->refcnt)) {
493 claim_zero(ibv_destroy_qp(txq_ibv->qp));
494 claim_zero(ibv_destroy_cq(txq_ibv->cq));
495 LIST_REMOVE(txq_ibv, next);
503 * Return true if a single reference exists on the object.
506 * Pointer to private structure.
508 * Verbs Tx queue object.
511 mlx5_priv_txq_ibv_releasable(struct priv *priv, struct mlx5_txq_ibv *txq_ibv)
515 return (rte_atomic32_read(&txq_ibv->refcnt) == 1);
519 * Verify the Verbs Tx queue list is empty
522 * Pointer to private structure.
524 * @return the number of object not released.
527 mlx5_priv_txq_ibv_verify(struct priv *priv)
530 struct mlx5_txq_ibv *txq_ibv;
532 LIST_FOREACH(txq_ibv, &priv->txqsibv, next) {
533 DEBUG("%p: Verbs Tx queue %p still referenced", (void *)priv,
541 * Create a DPDK Tx queue.
544 * Pointer to private structure.
548 * Number of descriptors to configure in queue.
550 * NUMA socket on which memory must be allocated.
552 * Thresholds parameters.
555 * A DPDK queue object on success.
557 struct mlx5_txq_ctrl*
558 mlx5_priv_txq_new(struct priv *priv, uint16_t idx, uint16_t desc,
560 const struct rte_eth_txconf *conf)
562 const unsigned int max_tso_inline =
563 ((MLX5_MAX_TSO_HEADER + (RTE_CACHE_LINE_SIZE - 1)) /
564 RTE_CACHE_LINE_SIZE);
565 struct mlx5_txq_ctrl *tmpl;
567 tmpl = rte_calloc_socket("TXQ", 1,
569 desc * sizeof(struct rte_mbuf *),
573 assert(desc > MLX5_TX_COMP_THRESH);
574 tmpl->txq.flags = conf->txq_flags;
576 tmpl->txq.elts_n = log2above(desc);
577 if (priv->mps == MLX5_MPW_ENHANCED)
578 tmpl->txq.mpw_hdr_dseg = priv->mpw_hdr_dseg;
579 /* MRs will be registered in mp2mr[] later. */
580 DEBUG("priv->device_attr.max_qp_wr is %d",
581 priv->device_attr.orig_attr.max_qp_wr);
582 DEBUG("priv->device_attr.max_sge is %d",
583 priv->device_attr.orig_attr.max_sge);
584 if (priv->txq_inline && (priv->txqs_n >= priv->txqs_inline)) {
587 tmpl->txq.max_inline =
588 ((priv->txq_inline + (RTE_CACHE_LINE_SIZE - 1)) /
589 RTE_CACHE_LINE_SIZE);
590 tmpl->txq.inline_en = 1;
591 /* TSO and MPS can't be enabled concurrently. */
592 assert(!priv->tso || !priv->mps);
593 if (priv->mps == MLX5_MPW_ENHANCED) {
594 tmpl->txq.inline_max_packet_sz =
595 priv->inline_max_packet_sz;
596 /* To minimize the size of data set, avoid requesting
599 tmpl->max_inline_data =
600 ((RTE_MIN(priv->txq_inline,
601 priv->inline_max_packet_sz) +
602 (RTE_CACHE_LINE_SIZE - 1)) /
603 RTE_CACHE_LINE_SIZE) * RTE_CACHE_LINE_SIZE;
604 } else if (priv->tso) {
605 int inline_diff = tmpl->txq.max_inline - max_tso_inline;
608 * Adjust inline value as Verbs aggregates
609 * tso_inline and txq_inline fields.
611 tmpl->max_inline_data = inline_diff > 0 ?
613 RTE_CACHE_LINE_SIZE :
616 tmpl->max_inline_data =
617 tmpl->txq.max_inline * RTE_CACHE_LINE_SIZE;
620 * Check if the inline size is too large in a way which
621 * can make the WQE DS to overflow.
622 * Considering in calculation:
627 ds_cnt = 2 + (tmpl->txq.max_inline / MLX5_WQE_DWORD_SIZE);
628 if (ds_cnt > MLX5_DSEG_MAX) {
629 unsigned int max_inline = (MLX5_DSEG_MAX - 2) *
632 max_inline = max_inline - (max_inline %
633 RTE_CACHE_LINE_SIZE);
634 WARN("txq inline is too large (%d) setting it to "
635 "the maximum possible: %d\n",
636 priv->txq_inline, max_inline);
637 tmpl->txq.max_inline = max_inline / RTE_CACHE_LINE_SIZE;
641 tmpl->max_tso_header = max_tso_inline * RTE_CACHE_LINE_SIZE;
642 tmpl->txq.max_inline = RTE_MAX(tmpl->txq.max_inline,
644 tmpl->txq.tso_en = 1;
647 tmpl->txq.tunnel_en = 1;
649 (struct rte_mbuf *(*)[1 << tmpl->txq.elts_n])(tmpl + 1);
650 tmpl->txq.stats.idx = idx;
651 rte_atomic32_inc(&tmpl->refcnt);
652 DEBUG("%p: Tx queue %p: refcnt %d", (void *)priv,
653 (void *)tmpl, rte_atomic32_read(&tmpl->refcnt));
654 LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
662 * Pointer to private structure.
667 * A pointer to the queue if it exists.
669 struct mlx5_txq_ctrl*
670 mlx5_priv_txq_get(struct priv *priv, uint16_t idx)
672 struct mlx5_txq_ctrl *ctrl = NULL;
674 if ((*priv->txqs)[idx]) {
675 ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl,
679 mlx5_priv_txq_ibv_get(priv, idx);
680 for (i = 0; i != MLX5_PMD_TX_MP_CACHE; ++i) {
681 struct mlx5_mr *mr = NULL;
684 if (ctrl->txq.mp2mr[i]) {
685 mr = priv_mr_get(priv, ctrl->txq.mp2mr[i]->mp);
689 rte_atomic32_inc(&ctrl->refcnt);
690 DEBUG("%p: Tx queue %p: refcnt %d", (void *)priv,
691 (void *)ctrl, rte_atomic32_read(&ctrl->refcnt));
697 * Release a Tx queue.
700 * Pointer to private structure.
705 * 0 on success, errno on failure.
708 mlx5_priv_txq_release(struct priv *priv, uint16_t idx)
711 struct mlx5_txq_ctrl *txq;
713 if (!(*priv->txqs)[idx])
715 txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
716 DEBUG("%p: Tx queue %p: refcnt %d", (void *)priv,
717 (void *)txq, rte_atomic32_read(&txq->refcnt));
721 ret = mlx5_priv_txq_ibv_release(priv, txq->ibv);
725 for (i = 0; i != MLX5_PMD_TX_MP_CACHE; ++i) {
726 if (txq->txq.mp2mr[i]) {
727 priv_mr_release(priv, txq->txq.mp2mr[i]);
728 txq->txq.mp2mr[i] = NULL;
731 if (rte_atomic32_dec_and_test(&txq->refcnt)) {
733 LIST_REMOVE(txq, next);
735 (*priv->txqs)[idx] = NULL;
742 * Verify if the queue can be released.
745 * Pointer to private structure.
750 * 1 if the queue can be released.
753 mlx5_priv_txq_releasable(struct priv *priv, uint16_t idx)
755 struct mlx5_txq_ctrl *txq;
757 if (!(*priv->txqs)[idx])
759 txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
760 return (rte_atomic32_read(&txq->refcnt) == 1);
764 * Verify the Tx Queue list is empty
767 * Pointer to private structure.
769 * @return the number of object not released.
772 mlx5_priv_txq_verify(struct priv *priv)
774 struct mlx5_txq_ctrl *txq;
777 LIST_FOREACH(txq, &priv->txqsctrl, next) {
778 DEBUG("%p: Tx Queue %p still referenced", (void *)priv,