4 * Copyright 2015 6WIND S.A.
5 * Copyright 2015 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
45 #pragma GCC diagnostic ignored "-Wpedantic"
47 #include <infiniband/verbs.h>
49 #pragma GCC diagnostic error "-Wpedantic"
53 #include <rte_malloc.h>
54 #include <rte_ethdev.h>
55 #include <rte_common.h>
57 #include "mlx5_utils.h"
58 #include "mlx5_defs.h"
60 #include "mlx5_rxtx.h"
61 #include "mlx5_autoconf.h"
64 * Allocate TX queue elements.
67 * Pointer to TX queue structure.
69 * Number of elements to allocate.
72 txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl, unsigned int elts_n)
76 for (i = 0; (i != elts_n); ++i)
77 (*txq_ctrl->txq.elts)[i] = NULL;
78 for (i = 0; (i != (1u << txq_ctrl->txq.wqe_n)); ++i) {
79 volatile struct mlx5_wqe64 *wqe =
80 (volatile struct mlx5_wqe64 *)
81 txq_ctrl->txq.wqes + i;
83 memset((void *)(uintptr_t)wqe, 0x0, sizeof(*wqe));
85 DEBUG("%p: allocated and configured %u WRs", (void *)txq_ctrl, elts_n);
86 txq_ctrl->txq.elts_head = 0;
87 txq_ctrl->txq.elts_tail = 0;
88 txq_ctrl->txq.elts_comp = 0;
92 * Free TX queue elements.
95 * Pointer to TX queue structure.
98 txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl)
100 const uint16_t elts_n = 1 << txq_ctrl->txq.elts_n;
101 const uint16_t elts_m = elts_n - 1;
102 uint16_t elts_head = txq_ctrl->txq.elts_head;
103 uint16_t elts_tail = txq_ctrl->txq.elts_tail;
104 struct rte_mbuf *(*elts)[elts_n] = txq_ctrl->txq.elts;
106 DEBUG("%p: freeing WRs", (void *)txq_ctrl);
107 txq_ctrl->txq.elts_head = 0;
108 txq_ctrl->txq.elts_tail = 0;
109 txq_ctrl->txq.elts_comp = 0;
111 while (elts_tail != elts_head) {
112 struct rte_mbuf *elt = (*elts)[elts_tail & elts_m];
115 rte_pktmbuf_free_seg(elt);
118 memset(&(*elts)[elts_tail & elts_m],
120 sizeof((*elts)[elts_tail & elts_m]));
127 * Clean up a TX queue.
129 * Destroy objects, free allocated memory and reset the structure for reuse.
132 * Pointer to TX queue structure.
135 mlx5_txq_cleanup(struct mlx5_txq_ctrl *txq_ctrl)
139 DEBUG("cleaning up %p", (void *)txq_ctrl);
140 txq_free_elts(txq_ctrl);
141 if (txq_ctrl->qp != NULL)
142 claim_zero(ibv_destroy_qp(txq_ctrl->qp));
143 if (txq_ctrl->cq != NULL)
144 claim_zero(ibv_destroy_cq(txq_ctrl->cq));
145 for (i = 0; (i != RTE_DIM(txq_ctrl->txq.mp2mr)); ++i) {
146 if (txq_ctrl->txq.mp2mr[i].mr == NULL)
148 claim_zero(ibv_dereg_mr(txq_ctrl->txq.mp2mr[i].mr));
150 memset(txq_ctrl, 0, sizeof(*txq_ctrl));
154 * Initialize TX queue.
157 * Pointer to TX queue control template.
159 * Pointer to TX queue control.
162 * 0 on success, errno value on failure.
165 txq_setup(struct mlx5_txq_ctrl *tmpl, struct mlx5_txq_ctrl *txq_ctrl)
168 struct ibv_cq *ibcq = tmpl->cq;
169 struct mlx5dv_cq cq_info;
170 struct mlx5dv_obj obj;
173 qp.comp_mask = MLX5DV_QP_MASK_UAR_MMAP_OFFSET;
175 obj.cq.out = &cq_info;
176 obj.qp.in = tmpl->qp;
178 ret = mlx5dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_QP);
182 if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
183 ERROR("Wrong MLX5_CQE_SIZE environment variable value: "
184 "it should be set to %u", RTE_CACHE_LINE_SIZE);
187 tmpl->txq.cqe_n = log2above(cq_info.cqe_cnt);
188 tmpl->txq.qp_num_8s = tmpl->qp->qp_num << 8;
189 tmpl->txq.wqes = qp.sq.buf;
190 tmpl->txq.wqe_n = log2above(qp.sq.wqe_cnt);
191 tmpl->txq.qp_db = &qp.dbrec[MLX5_SND_DBR];
192 tmpl->txq.bf_reg = qp.bf.reg;
193 tmpl->txq.cq_db = cq_info.dbrec;
195 (volatile struct mlx5_cqe (*)[])
196 (uintptr_t)cq_info.buf;
198 (struct rte_mbuf *(*)[1 << tmpl->txq.elts_n])
199 ((uintptr_t)txq_ctrl + sizeof(*txq_ctrl));
200 if (qp.comp_mask | MLX5DV_QP_MASK_UAR_MMAP_OFFSET) {
201 tmpl->uar_mmap_offset = qp.uar_mmap_offset;
203 ERROR("Failed to retrieve UAR info, invalid libmlx5.so version");
211 * Configure a TX queue.
214 * Pointer to Ethernet device structure.
216 * Pointer to TX queue structure.
218 * Number of descriptors to configure in queue.
220 * NUMA socket on which memory must be allocated.
222 * Thresholds parameters.
225 * 0 on success, errno value on failure.
228 mlx5_txq_ctrl_setup(struct rte_eth_dev *dev, struct mlx5_txq_ctrl *txq_ctrl,
229 uint16_t desc, unsigned int socket,
230 const struct rte_eth_txconf *conf)
232 struct priv *priv = mlx5_get_priv(dev);
233 struct mlx5_txq_ctrl tmpl = {
238 struct ibv_qp_init_attr_ex init;
239 struct ibv_cq_init_attr_ex cq;
240 struct ibv_qp_attr mod;
241 struct ibv_cq_ex cq_attr;
244 const unsigned int max_tso_inline = ((MLX5_MAX_TSO_HEADER +
245 (RTE_CACHE_LINE_SIZE - 1)) /
246 RTE_CACHE_LINE_SIZE);
249 if (mlx5_getenv_int("MLX5_ENABLE_CQE_COMPRESSION")) {
251 ERROR("MLX5_ENABLE_CQE_COMPRESSION must never be set");
254 tmpl.txq.flags = conf->txq_flags;
255 assert(desc > MLX5_TX_COMP_THRESH);
256 tmpl.txq.elts_n = log2above(desc);
257 if (priv->mps == MLX5_MPW_ENHANCED)
258 tmpl.txq.mpw_hdr_dseg = priv->mpw_hdr_dseg;
259 /* MRs will be registered in mp2mr[] later. */
260 attr.cq = (struct ibv_cq_init_attr_ex){
263 cqe_n = ((desc / MLX5_TX_COMP_THRESH) - 1) ?
264 ((desc / MLX5_TX_COMP_THRESH) - 1) : 1;
265 if (priv->mps == MLX5_MPW_ENHANCED)
266 cqe_n += MLX5_TX_COMP_THRESH_INLINE_DIV;
267 tmpl.cq = ibv_create_cq(priv->ctx,
270 if (tmpl.cq == NULL) {
272 ERROR("%p: CQ creation failure: %s",
273 (void *)dev, strerror(ret));
276 DEBUG("priv->device_attr.max_qp_wr is %d",
277 priv->device_attr.orig_attr.max_qp_wr);
278 DEBUG("priv->device_attr.max_sge is %d",
279 priv->device_attr.orig_attr.max_sge);
280 attr.init = (struct ibv_qp_init_attr_ex){
281 /* CQ to be associated with the send queue. */
283 /* CQ to be associated with the receive queue. */
286 /* Max number of outstanding WRs. */
288 ((priv->device_attr.orig_attr.max_qp_wr < desc) ?
289 priv->device_attr.orig_attr.max_qp_wr :
292 * Max number of scatter/gather elements in a WR,
293 * must be 1 to prevent libmlx5 from trying to affect
294 * too much memory. TX gather is not impacted by the
295 * priv->device_attr.max_sge limit and will still work
300 .qp_type = IBV_QPT_RAW_PACKET,
301 /* Do *NOT* enable this, completions events are managed per
305 .comp_mask = IBV_QP_INIT_ATTR_PD,
307 if (priv->txq_inline && (priv->txqs_n >= priv->txqs_inline)) {
310 tmpl.txq.max_inline =
311 ((priv->txq_inline + (RTE_CACHE_LINE_SIZE - 1)) /
312 RTE_CACHE_LINE_SIZE);
313 tmpl.txq.inline_en = 1;
314 /* TSO and MPS can't be enabled concurrently. */
315 assert(!priv->tso || !priv->mps);
316 if (priv->mps == MLX5_MPW_ENHANCED) {
317 tmpl.txq.inline_max_packet_sz =
318 priv->inline_max_packet_sz;
319 /* To minimize the size of data set, avoid requesting
322 attr.init.cap.max_inline_data =
323 ((RTE_MIN(priv->txq_inline,
324 priv->inline_max_packet_sz) +
325 (RTE_CACHE_LINE_SIZE - 1)) /
326 RTE_CACHE_LINE_SIZE) * RTE_CACHE_LINE_SIZE;
327 } else if (priv->tso) {
328 int inline_diff = tmpl.txq.max_inline - max_tso_inline;
331 * Adjust inline value as Verbs aggregates
332 * tso_inline and txq_inline fields.
334 attr.init.cap.max_inline_data = inline_diff > 0 ?
336 RTE_CACHE_LINE_SIZE :
339 attr.init.cap.max_inline_data =
340 tmpl.txq.max_inline * RTE_CACHE_LINE_SIZE;
343 * Check if the inline size is too large in a way which
344 * can make the WQE DS to overflow.
345 * Considering in calculation:
351 (attr.init.cap.max_inline_data / MLX5_WQE_DWORD_SIZE);
352 if (ds_cnt > MLX5_DSEG_MAX) {
353 unsigned int max_inline = (MLX5_DSEG_MAX - 2) *
356 max_inline = max_inline - (max_inline %
357 RTE_CACHE_LINE_SIZE);
358 WARN("txq inline is too large (%d) setting it to "
359 "the maximum possible: %d\n",
360 priv->txq_inline, max_inline);
361 tmpl.txq.max_inline = max_inline / RTE_CACHE_LINE_SIZE;
362 attr.init.cap.max_inline_data = max_inline;
366 attr.init.max_tso_header =
367 max_tso_inline * RTE_CACHE_LINE_SIZE;
368 attr.init.comp_mask |= IBV_QP_INIT_ATTR_MAX_TSO_HEADER;
369 tmpl.txq.max_inline = RTE_MAX(tmpl.txq.max_inline,
374 tmpl.txq.tunnel_en = 1;
375 tmpl.qp = ibv_create_qp_ex(priv->ctx, &attr.init);
376 if (tmpl.qp == NULL) {
377 ret = (errno ? errno : EINVAL);
378 ERROR("%p: QP creation failure: %s",
379 (void *)dev, strerror(ret));
382 DEBUG("TX queue capabilities: max_send_wr=%u, max_send_sge=%u,"
383 " max_inline_data=%u",
384 attr.init.cap.max_send_wr,
385 attr.init.cap.max_send_sge,
386 attr.init.cap.max_inline_data);
387 attr.mod = (struct ibv_qp_attr){
388 /* Move the QP to this state. */
389 .qp_state = IBV_QPS_INIT,
390 /* Primary port number. */
391 .port_num = priv->port
393 ret = ibv_modify_qp(tmpl.qp, &attr.mod,
394 (IBV_QP_STATE | IBV_QP_PORT));
396 ERROR("%p: QP state to IBV_QPS_INIT failed: %s",
397 (void *)dev, strerror(ret));
400 ret = txq_setup(&tmpl, txq_ctrl);
402 ERROR("%p: cannot initialize TX queue structure: %s",
403 (void *)dev, strerror(ret));
406 txq_alloc_elts(&tmpl, desc);
407 attr.mod = (struct ibv_qp_attr){
408 .qp_state = IBV_QPS_RTR
410 ret = ibv_modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
412 ERROR("%p: QP state to IBV_QPS_RTR failed: %s",
413 (void *)dev, strerror(ret));
416 attr.mod.qp_state = IBV_QPS_RTS;
417 ret = ibv_modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
419 ERROR("%p: QP state to IBV_QPS_RTS failed: %s",
420 (void *)dev, strerror(ret));
423 /* Clean up txq in case we're reinitializing it. */
424 DEBUG("%p: cleaning-up old txq just in case", (void *)txq_ctrl);
425 mlx5_txq_cleanup(txq_ctrl);
427 DEBUG("%p: txq updated with %p", (void *)txq_ctrl, (void *)&tmpl);
428 /* Pre-register known mempools. */
429 rte_mempool_walk(mlx5_txq_mp2mr_iter, txq_ctrl);
433 mlx5_txq_cleanup(&tmpl);
439 * DPDK callback to configure a TX queue.
442 * Pointer to Ethernet device structure.
446 * Number of descriptors to configure in queue.
448 * NUMA socket on which memory must be allocated.
450 * Thresholds parameters.
453 * 0 on success, negative errno value on failure.
456 mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
457 unsigned int socket, const struct rte_eth_txconf *conf)
459 struct priv *priv = dev->data->dev_private;
460 struct mlx5_txq_data *txq = (*priv->txqs)[idx];
461 struct mlx5_txq_ctrl *txq_ctrl =
462 container_of(txq, struct mlx5_txq_ctrl, txq);
465 if (mlx5_is_secondary())
466 return -E_RTE_SECONDARY;
469 if (desc <= MLX5_TX_COMP_THRESH) {
470 WARN("%p: number of descriptors requested for TX queue %u"
471 " must be higher than MLX5_TX_COMP_THRESH, using"
473 (void *)dev, idx, MLX5_TX_COMP_THRESH + 1, desc);
474 desc = MLX5_TX_COMP_THRESH + 1;
476 if (!rte_is_power_of_2(desc)) {
477 desc = 1 << log2above(desc);
478 WARN("%p: increased number of descriptors in TX queue %u"
479 " to the next power of two (%d)",
480 (void *)dev, idx, desc);
482 DEBUG("%p: configuring queue %u for %u descriptors",
483 (void *)dev, idx, desc);
484 if (idx >= priv->txqs_n) {
485 ERROR("%p: queue index out of range (%u >= %u)",
486 (void *)dev, idx, priv->txqs_n);
491 DEBUG("%p: reusing already allocated queue index %u (%p)",
492 (void *)dev, idx, (void *)txq);
493 if (dev->data->dev_started) {
497 (*priv->txqs)[idx] = NULL;
498 mlx5_txq_cleanup(txq_ctrl);
499 /* Resize if txq size is changed. */
500 if (txq_ctrl->txq.elts_n != log2above(desc)) {
501 txq_ctrl = rte_realloc(txq_ctrl,
503 desc * sizeof(struct rte_mbuf *),
504 RTE_CACHE_LINE_SIZE);
506 ERROR("%p: unable to reallocate queue index %u",
514 rte_calloc_socket("TXQ", 1,
516 desc * sizeof(struct rte_mbuf *),
518 if (txq_ctrl == NULL) {
519 ERROR("%p: unable to allocate queue index %u",
525 ret = mlx5_txq_ctrl_setup(dev, txq_ctrl, desc, socket, conf);
529 txq_ctrl->txq.stats.idx = idx;
530 DEBUG("%p: adding TX queue %p to list",
531 (void *)dev, (void *)txq_ctrl);
532 (*priv->txqs)[idx] = &txq_ctrl->txq;
539 * DPDK callback to release a TX queue.
542 * Generic TX queue pointer.
545 mlx5_tx_queue_release(void *dpdk_txq)
547 struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
548 struct mlx5_txq_ctrl *txq_ctrl;
552 if (mlx5_is_secondary())
557 txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
558 priv = txq_ctrl->priv;
560 for (i = 0; (i != priv->txqs_n); ++i)
561 if ((*priv->txqs)[i] == txq) {
562 DEBUG("%p: removing TX queue %p from list",
563 (void *)priv->dev, (void *)txq_ctrl);
564 (*priv->txqs)[i] = NULL;
567 mlx5_txq_cleanup(txq_ctrl);
574 * Map locally UAR used in Tx queues for BlueFlame doorbell.
577 * Pointer to private structure.
579 * Verbs file descriptor to map UAR pages.
582 * 0 on success, errno value on failure.
585 priv_tx_uar_remap(struct priv *priv, int fd)
588 uintptr_t pages[priv->txqs_n];
589 unsigned int pages_n = 0;
592 struct mlx5_txq_data *txq;
593 struct mlx5_txq_ctrl *txq_ctrl;
595 size_t page_size = sysconf(_SC_PAGESIZE);
598 * As rdma-core, UARs are mapped in size of OS page size.
599 * Use aligned address to avoid duplicate mmap.
600 * Ref to libmlx5 function: mlx5_init_context()
602 for (i = 0; i != priv->txqs_n; ++i) {
603 txq = (*priv->txqs)[i];
604 txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
605 uar_va = (uintptr_t)txq_ctrl->txq.bf_reg;
606 uar_va = RTE_ALIGN_FLOOR(uar_va, page_size);
608 for (j = 0; j != pages_n; ++j) {
609 if (pages[j] == uar_va) {
616 pages[pages_n++] = uar_va;
617 addr = mmap((void *)uar_va, page_size,
618 PROT_WRITE, MAP_FIXED | MAP_SHARED, fd,
619 txq_ctrl->uar_mmap_offset);
620 if (addr != (void *)uar_va) {
621 ERROR("call to mmap failed on UAR for txq %d\n", i);