1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2017 6WIND S.A.
3 * Copyright 2017 Mellanox Technologies, Ltd
8 * Tx queues configuration for mlx4 driver.
19 /* Verbs headers do not support -pedantic. */
21 #pragma GCC diagnostic ignored "-Wpedantic"
23 #include <infiniband/verbs.h>
25 #pragma GCC diagnostic error "-Wpedantic"
28 #include <rte_common.h>
29 #include <rte_errno.h>
30 #include <ethdev_driver.h>
31 #include <rte_malloc.h>
33 #include <rte_mempool.h>
36 #include "mlx4_glue.h"
38 #include "mlx4_rxtx.h"
39 #include "mlx4_utils.h"
42 * Initialize Tx UAR registers for primary process.
45 * Pointer to Tx queue structure.
48 txq_uar_init(struct txq *txq)
50 struct mlx4_priv *priv = txq->priv;
51 struct mlx4_proc_priv *ppriv = MLX4_PROC_PRIV(PORT_ID(priv));
53 MLX4_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
55 ppriv->uar_table[txq->stats.idx] = txq->msq.db;
58 #ifdef HAVE_IBV_MLX4_UAR_MMAP_OFFSET
60 * Remap UAR register of a Tx queue for secondary process.
62 * Remapped address is stored at the table in the process private structure of
63 * the device, indexed by queue index.
66 * Pointer to Tx queue structure.
68 * Verbs file descriptor to map UAR pages.
71 * 0 on success, a negative errno value otherwise and rte_errno is set.
74 txq_uar_init_secondary(struct txq *txq, int fd)
76 struct mlx4_priv *priv = txq->priv;
77 struct mlx4_proc_priv *ppriv = MLX4_PROC_PRIV(PORT_ID(priv));
81 const size_t page_size = sysconf(_SC_PAGESIZE);
85 * As rdma-core, UARs are mapped in size of OS page
86 * size. Ref to libmlx4 function: mlx4_init_context()
88 uar_va = (uintptr_t)txq->msq.db;
89 offset = uar_va & (page_size - 1); /* Offset in page. */
90 addr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, fd,
91 txq->msq.uar_mmap_offset);
92 if (addr == MAP_FAILED) {
93 ERROR("port %u mmap failed for BF reg of txq %u",
94 txq->port_id, txq->stats.idx);
98 addr = RTE_PTR_ADD(addr, offset);
99 ppriv->uar_table[txq->stats.idx] = addr;
104 * Unmap UAR register of a Tx queue for secondary process.
107 * Pointer to Tx queue structure.
110 txq_uar_uninit_secondary(struct txq *txq)
112 struct mlx4_proc_priv *ppriv = MLX4_PROC_PRIV(PORT_ID(txq->priv));
113 const size_t page_size = sysconf(_SC_PAGESIZE);
116 addr = ppriv->uar_table[txq->stats.idx];
117 munmap(RTE_PTR_ALIGN_FLOOR(addr, page_size), page_size);
121 * Initialize Tx UAR registers for secondary process.
124 * Pointer to Ethernet device.
126 * Verbs file descriptor to map UAR pages.
129 * 0 on success, a negative errno value otherwise and rte_errno is set.
132 mlx4_tx_uar_init_secondary(struct rte_eth_dev *dev, int fd)
134 const unsigned int txqs_n = dev->data->nb_tx_queues;
139 MLX4_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
140 for (i = 0; i != txqs_n; ++i) {
141 txq = dev->data->tx_queues[i];
144 MLX4_ASSERT(txq->stats.idx == (uint16_t)i);
145 ret = txq_uar_init_secondary(txq, fd);
153 txq = dev->data->tx_queues[i];
156 txq_uar_uninit_secondary(txq);
162 mlx4_tx_uar_uninit_secondary(struct rte_eth_dev *dev)
164 struct mlx4_proc_priv *ppriv =
165 (struct mlx4_proc_priv *)dev->process_private;
166 const size_t page_size = sysconf(_SC_PAGESIZE);
170 if (page_size == (size_t)-1) {
171 ERROR("Failed to get mem page size");
174 for (i = 0; i < ppriv->uar_table_sz; i++) {
175 addr = ppriv->uar_table[i];
177 munmap(RTE_PTR_ALIGN_FLOOR(addr, page_size), page_size);
183 mlx4_tx_uar_init_secondary(struct rte_eth_dev *dev __rte_unused,
186 MLX4_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
187 ERROR("UAR remap is not supported");
193 mlx4_tx_uar_uninit_secondary(struct rte_eth_dev *dev __rte_unused)
195 assert(rte_eal_process_type() == RTE_PROC_SECONDARY);
196 ERROR("UAR remap is not supported");
201 * Free Tx queue elements.
204 * Pointer to Tx queue structure.
207 mlx4_txq_free_elts(struct txq *txq)
209 unsigned int elts_head = txq->elts_head;
210 unsigned int elts_tail = txq->elts_tail;
211 struct txq_elt (*elts)[txq->elts_n] = txq->elts;
212 unsigned int elts_m = txq->elts_n - 1;
214 DEBUG("%p: freeing WRs", (void *)txq);
215 while (elts_tail != elts_head) {
216 struct txq_elt *elt = &(*elts)[elts_tail++ & elts_m];
218 MLX4_ASSERT(elt->buf != NULL);
219 rte_pktmbuf_free(elt->buf);
223 txq->elts_tail = txq->elts_head;
227 * Retrieves information needed in order to directly access the Tx queue.
230 * Pointer to Tx queue structure.
232 * Pointer to device information for this Tx queue.
235 mlx4_txq_fill_dv_obj_info(struct txq *txq, struct mlx4dv_obj *mlxdv)
237 struct mlx4_sq *sq = &txq->msq;
238 struct mlx4_cq *cq = &txq->mcq;
239 struct mlx4dv_qp *dqp = mlxdv->qp.out;
240 struct mlx4dv_cq *dcq = mlxdv->cq.out;
242 /* Total length, including headroom and spare WQEs. */
243 sq->size = (uint32_t)dqp->rq.offset - (uint32_t)dqp->sq.offset;
244 sq->buf = (uint8_t *)dqp->buf.buf + dqp->sq.offset;
245 sq->eob = sq->buf + sq->size;
246 uint32_t headroom_size = 2048 + (1 << dqp->sq.wqe_shift);
247 /* Continuous headroom size bytes must always stay freed. */
248 sq->remain_size = sq->size - headroom_size;
249 sq->owner_opcode = MLX4_OPCODE_SEND | (0u << MLX4_SQ_OWNER_BIT);
250 sq->stamp = rte_cpu_to_be_32(MLX4_SQ_STAMP_VAL |
251 (0u << MLX4_SQ_OWNER_BIT));
252 #ifdef HAVE_IBV_MLX4_UAR_MMAP_OFFSET
253 sq->uar_mmap_offset = dqp->uar_mmap_offset;
255 sq->uar_mmap_offset = -1; /* Make mmap() fail. */
258 sq->doorbell_qpn = dqp->doorbell_qpn;
259 cq->buf = dcq->buf.buf;
260 cq->cqe_cnt = dcq->cqe_cnt;
261 cq->set_ci_db = dcq->set_ci_db;
262 cq->cqe_64 = (dcq->cqe_size & 64) ? 1 : 0;
266 * Returns the per-port supported offloads.
269 * Pointer to private structure.
272 * Supported Tx offloads.
275 mlx4_get_tx_port_offloads(struct mlx4_priv *priv)
277 uint64_t offloads = DEV_TX_OFFLOAD_MULTI_SEGS;
280 offloads |= (DEV_TX_OFFLOAD_IPV4_CKSUM |
281 DEV_TX_OFFLOAD_UDP_CKSUM |
282 DEV_TX_OFFLOAD_TCP_CKSUM);
285 offloads |= DEV_TX_OFFLOAD_TCP_TSO;
286 if (priv->hw_csum_l2tun) {
287 offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
289 offloads |= (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
290 DEV_TX_OFFLOAD_GRE_TNL_TSO);
296 * DPDK callback to configure a Tx queue.
299 * Pointer to Ethernet device structure.
303 * Number of descriptors to configure in queue.
305 * NUMA socket on which memory must be allocated.
307 * Thresholds parameters.
310 * 0 on success, negative errno value otherwise and rte_errno is set.
313 mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
314 unsigned int socket, const struct rte_eth_txconf *conf)
316 struct mlx4_priv *priv = dev->data->dev_private;
317 struct mlx4dv_obj mlxdv;
318 struct mlx4dv_qp dv_qp;
319 struct mlx4dv_cq dv_cq;
320 struct txq_elt (*elts)[rte_align32pow2(desc)];
321 struct ibv_qp_init_attr qp_init_attr;
324 struct mlx4_malloc_vec vec[] = {
326 .align = RTE_CACHE_LINE_SIZE,
327 .size = sizeof(*txq),
328 .addr = (void **)&txq,
331 .align = RTE_CACHE_LINE_SIZE,
332 .size = sizeof(*elts),
333 .addr = (void **)&elts,
336 .align = RTE_CACHE_LINE_SIZE,
337 .size = MLX4_MAX_WQE_SIZE,
338 .addr = (void **)&bounce_buf,
344 offloads = conf->offloads | dev->data->dev_conf.txmode.offloads;
345 DEBUG("%p: configuring queue %u for %u descriptors",
346 (void *)dev, idx, desc);
347 if (idx >= dev->data->nb_tx_queues) {
348 rte_errno = EOVERFLOW;
349 ERROR("%p: queue index out of range (%u >= %u)",
350 (void *)dev, idx, dev->data->nb_tx_queues);
353 txq = dev->data->tx_queues[idx];
356 DEBUG("%p: Tx queue %u already configured, release it first",
362 ERROR("%p: invalid number of Tx descriptors", (void *)dev);
365 if (desc != RTE_DIM(*elts)) {
366 desc = RTE_DIM(*elts);
367 WARN("%p: increased number of descriptors in Tx queue %u"
368 " to the next power of two (%u)",
369 (void *)dev, idx, desc);
371 /* Allocate and initialize Tx queue. */
372 mlx4_zmallocv_socket("TXQ", vec, RTE_DIM(vec), socket);
374 ERROR("%p: unable to allocate queue index %u",
380 .port_id = dev->data->port_id,
390 * Request send completion every MLX4_PMD_TX_PER_COMP_REQ
391 * packets or at least 4 times per ring.
394 RTE_MIN(MLX4_PMD_TX_PER_COMP_REQ, desc / 4),
396 RTE_MIN(MLX4_PMD_TX_PER_COMP_REQ, desc / 4),
397 .csum = priv->hw_csum &&
398 (offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
399 DEV_TX_OFFLOAD_UDP_CKSUM |
400 DEV_TX_OFFLOAD_TCP_CKSUM)),
401 .csum_l2tun = priv->hw_csum_l2tun &&
403 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM),
404 /* Enable Tx loopback for VF devices. */
406 .bounce_buf = bounce_buf,
408 priv->verbs_alloc_ctx.type = MLX4_VERBS_ALLOC_TYPE_TX_QUEUE;
409 priv->verbs_alloc_ctx.obj = txq;
410 txq->cq = mlx4_glue->create_cq(priv->ctx, desc, NULL, NULL, 0);
413 ERROR("%p: CQ creation failure: %s",
414 (void *)dev, strerror(rte_errno));
417 qp_init_attr = (struct ibv_qp_init_attr){
422 RTE_MIN(priv->device_attr.max_qp_wr, desc),
424 .max_inline_data = MLX4_PMD_MAX_INLINE,
426 .qp_type = IBV_QPT_RAW_PACKET,
427 /* No completion events must occur by default. */
430 txq->qp = mlx4_glue->create_qp(priv->pd, &qp_init_attr);
432 rte_errno = errno ? errno : EINVAL;
433 ERROR("%p: QP creation failure: %s",
434 (void *)dev, strerror(rte_errno));
437 txq->max_inline = qp_init_attr.cap.max_inline_data;
438 ret = mlx4_glue->modify_qp
440 &(struct ibv_qp_attr){
441 .qp_state = IBV_QPS_INIT,
442 .port_num = priv->port,
444 IBV_QP_STATE | IBV_QP_PORT);
447 ERROR("%p: QP state to IBV_QPS_INIT failed: %s",
448 (void *)dev, strerror(rte_errno));
451 ret = mlx4_glue->modify_qp
453 &(struct ibv_qp_attr){
454 .qp_state = IBV_QPS_RTR,
459 ERROR("%p: QP state to IBV_QPS_RTR failed: %s",
460 (void *)dev, strerror(rte_errno));
463 ret = mlx4_glue->modify_qp
465 &(struct ibv_qp_attr){
466 .qp_state = IBV_QPS_RTS,
471 ERROR("%p: QP state to IBV_QPS_RTS failed: %s",
472 (void *)dev, strerror(rte_errno));
475 /* Retrieve device queue information. */
476 #ifdef HAVE_IBV_MLX4_UAR_MMAP_OFFSET
477 dv_qp = (struct mlx4dv_qp){
478 .comp_mask = MLX4DV_QP_MASK_UAR_MMAP_OFFSET,
481 mlxdv.cq.in = txq->cq;
482 mlxdv.cq.out = &dv_cq;
483 mlxdv.qp.in = txq->qp;
484 mlxdv.qp.out = &dv_qp;
485 ret = mlx4_glue->dv_init_obj(&mlxdv, MLX4DV_OBJ_QP | MLX4DV_OBJ_CQ);
488 ERROR("%p: failed to obtain information needed for"
489 " accessing the device queues", (void *)dev);
492 #ifdef HAVE_IBV_MLX4_UAR_MMAP_OFFSET
493 if (!(dv_qp.comp_mask & MLX4DV_QP_MASK_UAR_MMAP_OFFSET)) {
494 WARN("%p: failed to obtain UAR mmap offset", (void *)dev);
495 dv_qp.uar_mmap_offset = -1; /* Make mmap() fail. */
498 mlx4_txq_fill_dv_obj_info(txq, &mlxdv);
500 /* Save first wqe pointer in the first element. */
501 (&(*txq->elts)[0])->wqe =
502 (volatile struct mlx4_wqe_ctrl_seg *)txq->msq.buf;
503 if (mlx4_mr_btree_init(&txq->mr_ctrl.cache_bh,
504 MLX4_MR_BTREE_CACHE_N, socket)) {
505 /* rte_errno is already set. */
508 /* Save pointer of global generation number to check memory event. */
509 txq->mr_ctrl.dev_gen_ptr = &priv->mr.dev_gen;
510 DEBUG("%p: adding Tx queue %p to list", (void *)dev, (void *)txq);
511 dev->data->tx_queues[idx] = txq;
512 priv->verbs_alloc_ctx.type = MLX4_VERBS_ALLOC_TYPE_NONE;
515 dev->data->tx_queues[idx] = NULL;
517 mlx4_tx_queue_release(txq);
519 MLX4_ASSERT(rte_errno > 0);
520 priv->verbs_alloc_ctx.type = MLX4_VERBS_ALLOC_TYPE_NONE;
525 * DPDK callback to release a Tx queue.
528 * Generic Tx queue pointer.
531 mlx4_tx_queue_release(void *dpdk_txq)
533 struct txq *txq = (struct txq *)dpdk_txq;
534 struct mlx4_priv *priv;
540 for (i = 0; i != ETH_DEV(priv)->data->nb_tx_queues; ++i)
541 if (ETH_DEV(priv)->data->tx_queues[i] == txq) {
542 DEBUG("%p: removing Tx queue %p from list",
543 (void *)ETH_DEV(priv), (void *)txq);
544 ETH_DEV(priv)->data->tx_queues[i] = NULL;
547 mlx4_txq_free_elts(txq);
549 claim_zero(mlx4_glue->destroy_qp(txq->qp));
551 claim_zero(mlx4_glue->destroy_cq(txq->cq));
552 mlx4_mr_btree_free(&txq->mr_ctrl.cache_bh);