1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
12 #include <sys/queue.h>
15 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
17 #pragma GCC diagnostic ignored "-Wpedantic"
19 #include <infiniband/verbs.h>
20 #include <infiniband/mlx5dv.h>
22 #pragma GCC diagnostic error "-Wpedantic"
26 #include <rte_malloc.h>
27 #include <rte_ethdev_driver.h>
28 #include <rte_common.h>
29 #include <rte_interrupts.h>
30 #include <rte_debug.h>
34 #include "mlx5_rxtx.h"
35 #include "mlx5_utils.h"
36 #include "mlx5_autoconf.h"
37 #include "mlx5_defs.h"
38 #include "mlx5_glue.h"
40 /* Default RSS hash key also used for ConnectX-3. */
41 uint8_t rss_hash_default_key[] = {
42 0x2c, 0xc6, 0x81, 0xd1,
43 0x5b, 0xdb, 0xf4, 0xf7,
44 0xfc, 0xa2, 0x83, 0x19,
45 0xdb, 0x1a, 0x3e, 0x94,
46 0x6b, 0x9e, 0x38, 0xd9,
47 0x2c, 0x9c, 0x03, 0xd1,
48 0xad, 0x99, 0x44, 0xa7,
49 0xd9, 0x56, 0x3d, 0x59,
50 0x06, 0x3c, 0x25, 0xf3,
51 0xfc, 0x1f, 0xdc, 0x2a,
54 /* Length of the default RSS hash key. */
55 static_assert(MLX5_RSS_HASH_KEY_LEN ==
56 (unsigned int)sizeof(rss_hash_default_key),
57 "wrong RSS default key size.");
60 * Check whether Multi-Packet RQ can be enabled for the device.
63 * Pointer to Ethernet device.
66 * 1 if supported, negative errno value if not.
69 mlx5_check_mprq_support(struct rte_eth_dev *dev)
71 struct mlx5_priv *priv = dev->data->dev_private;
73 if (priv->config.mprq.enabled &&
74 priv->rxqs_n >= priv->config.mprq.min_rxqs_num)
80 * Check whether Multi-Packet RQ is enabled for the Rx queue.
83 * Pointer to receive queue structure.
86 * 0 if disabled, otherwise enabled.
89 mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq)
91 return rxq->strd_num_n > 0;
95 * Check whether Multi-Packet RQ is enabled for the device.
98 * Pointer to Ethernet device.
101 * 0 if disabled, otherwise enabled.
104 mlx5_mprq_enabled(struct rte_eth_dev *dev)
106 struct mlx5_priv *priv = dev->data->dev_private;
110 if (mlx5_check_mprq_support(dev) < 0)
112 /* All the configured queues should be enabled. */
113 for (i = 0; i < priv->rxqs_n; ++i) {
114 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
118 if (mlx5_rxq_mprq_enabled(rxq))
121 /* Multi-Packet RQ can't be partially configured. */
122 assert(n == 0 || n == priv->rxqs_n);
123 return n == priv->rxqs_n;
127 * Check whether LRO is supported and enabled for the device.
130 * Pointer to Ethernet device.
133 * 0 if disabled, 1 if enabled.
136 mlx5_lro_on(struct rte_eth_dev *dev)
138 return (MLX5_LRO_SUPPORTED(dev) && MLX5_LRO_ENABLED(dev));
142 * Allocate RX queue elements for Multi-Packet RQ.
145 * Pointer to RX queue structure.
148 * 0 on success, a negative errno value otherwise and rte_errno is set.
151 rxq_alloc_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
153 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
154 unsigned int wqe_n = 1 << rxq->elts_n;
158 /* Iterate on segments. */
159 for (i = 0; i <= wqe_n; ++i) {
160 struct mlx5_mprq_buf *buf;
162 if (rte_mempool_get(rxq->mprq_mp, (void **)&buf) < 0) {
163 DRV_LOG(ERR, "port %u empty mbuf pool", rxq->port_id);
168 (*rxq->mprq_bufs)[i] = buf;
170 rxq->mprq_repl = buf;
173 "port %u Rx queue %u allocated and configured %u segments",
174 rxq->port_id, rxq->idx, wqe_n);
177 err = rte_errno; /* Save rte_errno before cleanup. */
179 for (i = 0; (i != wqe_n); ++i) {
180 if ((*rxq->mprq_bufs)[i] != NULL)
181 rte_mempool_put(rxq->mprq_mp,
182 (*rxq->mprq_bufs)[i]);
183 (*rxq->mprq_bufs)[i] = NULL;
185 DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything",
186 rxq->port_id, rxq->idx);
187 rte_errno = err; /* Restore rte_errno. */
192 * Allocate RX queue elements for Single-Packet RQ.
195 * Pointer to RX queue structure.
198 * 0 on success, errno value on failure.
201 rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
203 const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n;
204 unsigned int elts_n = 1 << rxq_ctrl->rxq.elts_n;
208 /* Iterate on segments. */
209 for (i = 0; (i != elts_n); ++i) {
210 struct rte_mbuf *buf;
212 buf = rte_pktmbuf_alloc(rxq_ctrl->rxq.mp);
214 DRV_LOG(ERR, "port %u empty mbuf pool",
215 PORT_ID(rxq_ctrl->priv));
219 /* Headroom is reserved by rte_pktmbuf_alloc(). */
220 assert(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM);
221 /* Buffer is supposed to be empty. */
222 assert(rte_pktmbuf_data_len(buf) == 0);
223 assert(rte_pktmbuf_pkt_len(buf) == 0);
225 /* Only the first segment keeps headroom. */
227 SET_DATA_OFF(buf, 0);
228 PORT(buf) = rxq_ctrl->rxq.port_id;
229 DATA_LEN(buf) = rte_pktmbuf_tailroom(buf);
230 PKT_LEN(buf) = DATA_LEN(buf);
232 (*rxq_ctrl->rxq.elts)[i] = buf;
234 /* If Rx vector is activated. */
235 if (mlx5_rxq_check_vec_support(&rxq_ctrl->rxq) > 0) {
236 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
237 struct rte_mbuf *mbuf_init = &rxq->fake_mbuf;
240 /* Initialize default rearm_data for vPMD. */
241 mbuf_init->data_off = RTE_PKTMBUF_HEADROOM;
242 rte_mbuf_refcnt_set(mbuf_init, 1);
243 mbuf_init->nb_segs = 1;
244 mbuf_init->port = rxq->port_id;
246 * prevent compiler reordering:
247 * rearm_data covers previous fields.
249 rte_compiler_barrier();
250 rxq->mbuf_initializer =
251 *(uint64_t *)&mbuf_init->rearm_data;
252 /* Padding with a fake mbuf for vectorized Rx. */
253 for (j = 0; j < MLX5_VPMD_DESCS_PER_LOOP; ++j)
254 (*rxq->elts)[elts_n + j] = &rxq->fake_mbuf;
257 "port %u Rx queue %u allocated and configured %u segments"
259 PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx, elts_n,
260 elts_n / (1 << rxq_ctrl->rxq.sges_n));
263 err = rte_errno; /* Save rte_errno before cleanup. */
265 for (i = 0; (i != elts_n); ++i) {
266 if ((*rxq_ctrl->rxq.elts)[i] != NULL)
267 rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]);
268 (*rxq_ctrl->rxq.elts)[i] = NULL;
270 DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything",
271 PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx);
272 rte_errno = err; /* Restore rte_errno. */
277 * Allocate RX queue elements.
280 * Pointer to RX queue structure.
283 * 0 on success, errno value on failure.
286 rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
288 return mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
289 rxq_alloc_elts_mprq(rxq_ctrl) : rxq_alloc_elts_sprq(rxq_ctrl);
293 * Free RX queue elements for Multi-Packet RQ.
296 * Pointer to RX queue structure.
299 rxq_free_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
301 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
304 DRV_LOG(DEBUG, "port %u Multi-Packet Rx queue %u freeing WRs",
305 rxq->port_id, rxq->idx);
306 if (rxq->mprq_bufs == NULL)
308 assert(mlx5_rxq_check_vec_support(rxq) < 0);
309 for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
310 if ((*rxq->mprq_bufs)[i] != NULL)
311 mlx5_mprq_buf_free((*rxq->mprq_bufs)[i]);
312 (*rxq->mprq_bufs)[i] = NULL;
314 if (rxq->mprq_repl != NULL) {
315 mlx5_mprq_buf_free(rxq->mprq_repl);
316 rxq->mprq_repl = NULL;
321 * Free RX queue elements for Single-Packet RQ.
324 * Pointer to RX queue structure.
327 rxq_free_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
329 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
330 const uint16_t q_n = (1 << rxq->elts_n);
331 const uint16_t q_mask = q_n - 1;
332 uint16_t used = q_n - (rxq->rq_ci - rxq->rq_pi);
335 DRV_LOG(DEBUG, "port %u Rx queue %u freeing WRs",
336 PORT_ID(rxq_ctrl->priv), rxq->idx);
337 if (rxq->elts == NULL)
340 * Some mbuf in the Ring belongs to the application. They cannot be
343 if (mlx5_rxq_check_vec_support(rxq) > 0) {
344 for (i = 0; i < used; ++i)
345 (*rxq->elts)[(rxq->rq_ci + i) & q_mask] = NULL;
346 rxq->rq_pi = rxq->rq_ci;
348 for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
349 if ((*rxq->elts)[i] != NULL)
350 rte_pktmbuf_free_seg((*rxq->elts)[i]);
351 (*rxq->elts)[i] = NULL;
356 * Free RX queue elements.
359 * Pointer to RX queue structure.
362 rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
364 if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq))
365 rxq_free_elts_mprq(rxq_ctrl);
367 rxq_free_elts_sprq(rxq_ctrl);
371 * Returns the per-queue supported offloads.
374 * Pointer to Ethernet device.
377 * Supported Rx offloads.
380 mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev)
382 struct mlx5_priv *priv = dev->data->dev_private;
383 struct mlx5_dev_config *config = &priv->config;
384 uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER |
385 DEV_RX_OFFLOAD_TIMESTAMP |
386 DEV_RX_OFFLOAD_JUMBO_FRAME);
388 if (config->hw_fcs_strip)
389 offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
392 offloads |= (DEV_RX_OFFLOAD_IPV4_CKSUM |
393 DEV_RX_OFFLOAD_UDP_CKSUM |
394 DEV_RX_OFFLOAD_TCP_CKSUM);
395 if (config->hw_vlan_strip)
396 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
402 * Returns the per-port supported offloads.
405 * Pointer to Ethernet device.
408 * Supported Rx offloads.
411 mlx5_get_rx_port_offloads(struct rte_eth_dev *dev)
413 uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
415 if (MLX5_LRO_SUPPORTED(dev))
416 offloads |= DEV_RX_OFFLOAD_TCP_LRO;
421 * Verify if the queue can be released.
424 * Pointer to Ethernet device.
429 * 1 if the queue can be released
430 * 0 if the queue can not be released, there are references to it.
431 * Negative errno and rte_errno is set if queue doesn't exist.
434 mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx)
436 struct mlx5_priv *priv = dev->data->dev_private;
437 struct mlx5_rxq_ctrl *rxq_ctrl;
439 if (!(*priv->rxqs)[idx]) {
443 rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
444 return (rte_atomic32_read(&rxq_ctrl->refcnt) == 1);
450 * Pointer to Ethernet device structure.
454 * Number of descriptors to configure in queue.
456 * NUMA socket on which memory must be allocated.
458 * Thresholds parameters.
460 * Memory pool for buffer allocations.
463 * 0 on success, a negative errno value otherwise and rte_errno is set.
466 mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
467 unsigned int socket, const struct rte_eth_rxconf *conf,
468 struct rte_mempool *mp)
470 struct mlx5_priv *priv = dev->data->dev_private;
471 struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
472 struct mlx5_rxq_ctrl *rxq_ctrl =
473 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
475 if (!rte_is_power_of_2(desc)) {
476 desc = 1 << log2above(desc);
478 "port %u increased number of descriptors in Rx queue %u"
479 " to the next power of two (%d)",
480 dev->data->port_id, idx, desc);
482 DRV_LOG(DEBUG, "port %u configuring Rx queue %u for %u descriptors",
483 dev->data->port_id, idx, desc);
484 if (idx >= priv->rxqs_n) {
485 DRV_LOG(ERR, "port %u Rx queue index out of range (%u >= %u)",
486 dev->data->port_id, idx, priv->rxqs_n);
487 rte_errno = EOVERFLOW;
490 if (!mlx5_rxq_releasable(dev, idx)) {
491 DRV_LOG(ERR, "port %u unable to release queue index %u",
492 dev->data->port_id, idx);
496 mlx5_rxq_release(dev, idx);
497 rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, mp);
499 DRV_LOG(ERR, "port %u unable to allocate queue index %u",
500 dev->data->port_id, idx);
504 DRV_LOG(DEBUG, "port %u adding Rx queue %u to list",
505 dev->data->port_id, idx);
506 (*priv->rxqs)[idx] = &rxq_ctrl->rxq;
511 * DPDK callback to release a RX queue.
514 * Generic RX queue pointer.
517 mlx5_rx_queue_release(void *dpdk_rxq)
519 struct mlx5_rxq_data *rxq = (struct mlx5_rxq_data *)dpdk_rxq;
520 struct mlx5_rxq_ctrl *rxq_ctrl;
521 struct mlx5_priv *priv;
525 rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);
526 priv = rxq_ctrl->priv;
527 if (!mlx5_rxq_releasable(ETH_DEV(priv), rxq_ctrl->rxq.idx))
528 rte_panic("port %u Rx queue %u is still used by a flow and"
529 " cannot be removed\n",
530 PORT_ID(priv), rxq->idx);
531 mlx5_rxq_release(ETH_DEV(priv), rxq_ctrl->rxq.idx);
535 * Get an Rx queue Verbs/DevX object.
538 * Pointer to Ethernet device.
540 * Queue index in DPDK Rx queue array
543 * The Verbs/DevX object if it exists.
545 static struct mlx5_rxq_obj *
546 mlx5_rxq_obj_get(struct rte_eth_dev *dev, uint16_t idx)
548 struct mlx5_priv *priv = dev->data->dev_private;
549 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
550 struct mlx5_rxq_ctrl *rxq_ctrl;
552 if (idx >= priv->rxqs_n)
556 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
558 rte_atomic32_inc(&rxq_ctrl->obj->refcnt);
559 return rxq_ctrl->obj;
563 * Release the resources allocated for an RQ DevX object.
566 * DevX Rx queue object.
569 rxq_release_rq_resources(struct mlx5_rxq_ctrl *rxq_ctrl)
571 if (rxq_ctrl->rxq.wqes) {
572 rte_free((void *)(uintptr_t)rxq_ctrl->rxq.wqes);
573 rxq_ctrl->rxq.wqes = NULL;
575 if (rxq_ctrl->wq_umem) {
576 mlx5_glue->devx_umem_dereg(rxq_ctrl->wq_umem);
577 rxq_ctrl->wq_umem = NULL;
582 * Release an Rx verbs/DevX queue object.
585 * Verbs/DevX Rx queue object.
588 * 1 while a reference on it exists, 0 when freed.
591 mlx5_rxq_obj_release(struct mlx5_rxq_obj *rxq_obj)
594 if (rxq_obj->type == MLX5_RXQ_OBJ_TYPE_IBV)
597 if (rte_atomic32_dec_and_test(&rxq_obj->refcnt)) {
598 rxq_free_elts(rxq_obj->rxq_ctrl);
599 if (rxq_obj->type == MLX5_RXQ_OBJ_TYPE_IBV) {
600 claim_zero(mlx5_glue->destroy_wq(rxq_obj->wq));
601 } else if (rxq_obj->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ) {
602 claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
603 rxq_release_rq_resources(rxq_obj->rxq_ctrl);
605 claim_zero(mlx5_glue->destroy_cq(rxq_obj->cq));
606 if (rxq_obj->channel)
607 claim_zero(mlx5_glue->destroy_comp_channel
609 LIST_REMOVE(rxq_obj, next);
617 * Allocate queue vector and fill epoll fd list for Rx interrupts.
620 * Pointer to Ethernet device.
623 * 0 on success, a negative errno value otherwise and rte_errno is set.
626 mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
628 struct mlx5_priv *priv = dev->data->dev_private;
630 unsigned int rxqs_n = priv->rxqs_n;
631 unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
632 unsigned int count = 0;
633 struct rte_intr_handle *intr_handle = dev->intr_handle;
635 if (!dev->data->dev_conf.intr_conf.rxq)
637 mlx5_rx_intr_vec_disable(dev);
638 intr_handle->intr_vec = malloc(n * sizeof(intr_handle->intr_vec[0]));
639 if (intr_handle->intr_vec == NULL) {
641 "port %u failed to allocate memory for interrupt"
642 " vector, Rx interrupts will not be supported",
647 intr_handle->type = RTE_INTR_HANDLE_EXT;
648 for (i = 0; i != n; ++i) {
649 /* This rxq obj must not be released in this function. */
650 struct mlx5_rxq_obj *rxq_obj = mlx5_rxq_obj_get(dev, i);
655 /* Skip queues that cannot request interrupts. */
656 if (!rxq_obj || !rxq_obj->channel) {
657 /* Use invalid intr_vec[] index to disable entry. */
658 intr_handle->intr_vec[i] =
659 RTE_INTR_VEC_RXTX_OFFSET +
660 RTE_MAX_RXTX_INTR_VEC_ID;
663 if (count >= RTE_MAX_RXTX_INTR_VEC_ID) {
665 "port %u too many Rx queues for interrupt"
666 " vector size (%d), Rx interrupts cannot be"
668 dev->data->port_id, RTE_MAX_RXTX_INTR_VEC_ID);
669 mlx5_rx_intr_vec_disable(dev);
673 fd = rxq_obj->channel->fd;
674 flags = fcntl(fd, F_GETFL);
675 rc = fcntl(fd, F_SETFL, flags | O_NONBLOCK);
679 "port %u failed to make Rx interrupt file"
680 " descriptor %d non-blocking for queue index"
682 dev->data->port_id, fd, i);
683 mlx5_rx_intr_vec_disable(dev);
686 intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count;
687 intr_handle->efds[count] = fd;
691 mlx5_rx_intr_vec_disable(dev);
693 intr_handle->nb_efd = count;
698 * Clean up Rx interrupts handler.
701 * Pointer to Ethernet device.
704 mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev)
706 struct mlx5_priv *priv = dev->data->dev_private;
707 struct rte_intr_handle *intr_handle = dev->intr_handle;
709 unsigned int rxqs_n = priv->rxqs_n;
710 unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
712 if (!dev->data->dev_conf.intr_conf.rxq)
714 if (!intr_handle->intr_vec)
716 for (i = 0; i != n; ++i) {
717 struct mlx5_rxq_ctrl *rxq_ctrl;
718 struct mlx5_rxq_data *rxq_data;
720 if (intr_handle->intr_vec[i] == RTE_INTR_VEC_RXTX_OFFSET +
721 RTE_MAX_RXTX_INTR_VEC_ID)
724 * Need to access directly the queue to release the reference
725 * kept in mlx5_rx_intr_vec_enable().
727 rxq_data = (*priv->rxqs)[i];
728 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
730 mlx5_rxq_obj_release(rxq_ctrl->obj);
733 rte_intr_free_epoll_fd(intr_handle);
734 if (intr_handle->intr_vec)
735 free(intr_handle->intr_vec);
736 intr_handle->nb_efd = 0;
737 intr_handle->intr_vec = NULL;
741 * MLX5 CQ notification .
744 * Pointer to receive queue structure.
746 * Sequence number per receive queue .
749 mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq)
752 uint32_t doorbell_hi;
754 void *cq_db_reg = (char *)rxq->cq_uar + MLX5_CQ_DOORBELL;
756 sq_n = sq_n_rxq & MLX5_CQ_SQN_MASK;
757 doorbell_hi = sq_n << MLX5_CQ_SQN_OFFSET | (rxq->cq_ci & MLX5_CI_MASK);
758 doorbell = (uint64_t)doorbell_hi << 32;
759 doorbell |= rxq->cqn;
760 rxq->cq_db[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi);
761 mlx5_uar_write64(rte_cpu_to_be_64(doorbell),
762 cq_db_reg, rxq->uar_lock_cq);
766 * DPDK callback for Rx queue interrupt enable.
769 * Pointer to Ethernet device structure.
774 * 0 on success, a negative errno value otherwise and rte_errno is set.
777 mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
779 struct mlx5_priv *priv = dev->data->dev_private;
780 struct mlx5_rxq_data *rxq_data;
781 struct mlx5_rxq_ctrl *rxq_ctrl;
783 rxq_data = (*priv->rxqs)[rx_queue_id];
788 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
790 struct mlx5_rxq_obj *rxq_obj;
792 rxq_obj = mlx5_rxq_obj_get(dev, rx_queue_id);
797 mlx5_arm_cq(rxq_data, rxq_data->cq_arm_sn);
798 mlx5_rxq_obj_release(rxq_obj);
804 * DPDK callback for Rx queue interrupt disable.
807 * Pointer to Ethernet device structure.
812 * 0 on success, a negative errno value otherwise and rte_errno is set.
815 mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
817 struct mlx5_priv *priv = dev->data->dev_private;
818 struct mlx5_rxq_data *rxq_data;
819 struct mlx5_rxq_ctrl *rxq_ctrl;
820 struct mlx5_rxq_obj *rxq_obj = NULL;
821 struct ibv_cq *ev_cq;
825 rxq_data = (*priv->rxqs)[rx_queue_id];
830 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
833 rxq_obj = mlx5_rxq_obj_get(dev, rx_queue_id);
838 ret = mlx5_glue->get_cq_event(rxq_obj->channel, &ev_cq, &ev_ctx);
839 if (ret || ev_cq != rxq_obj->cq) {
843 rxq_data->cq_arm_sn++;
844 mlx5_glue->ack_cq_events(rxq_obj->cq, 1);
845 mlx5_rxq_obj_release(rxq_obj);
848 ret = rte_errno; /* Save rte_errno before cleanup. */
850 mlx5_rxq_obj_release(rxq_obj);
851 DRV_LOG(WARNING, "port %u unable to disable interrupt on Rx queue %d",
852 dev->data->port_id, rx_queue_id);
853 rte_errno = ret; /* Restore rte_errno. */
858 * Create a CQ Verbs object.
861 * Pointer to Ethernet device.
863 * Pointer to device private data.
865 * Pointer to Rx queue data.
867 * Number of CQEs in CQ.
869 * Pointer to Rx queue object data.
872 * The Verbs object initialised, NULL otherwise and rte_errno is set.
874 static struct ibv_cq *
875 mlx5_ibv_cq_new(struct rte_eth_dev *dev, struct mlx5_priv *priv,
876 struct mlx5_rxq_data *rxq_data,
877 unsigned int cqe_n, struct mlx5_rxq_obj *rxq_obj)
880 struct ibv_cq_init_attr_ex ibv;
881 struct mlx5dv_cq_init_attr mlx5;
884 cq_attr.ibv = (struct ibv_cq_init_attr_ex){
886 .channel = rxq_obj->channel,
889 cq_attr.mlx5 = (struct mlx5dv_cq_init_attr){
892 if (priv->config.cqe_comp && !rxq_data->hw_timestamp) {
893 cq_attr.mlx5.comp_mask |=
894 MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
895 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
896 cq_attr.mlx5.cqe_comp_res_format =
897 mlx5_rxq_mprq_enabled(rxq_data) ?
898 MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX :
899 MLX5DV_CQE_RES_FORMAT_HASH;
901 cq_attr.mlx5.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH;
904 * For vectorized Rx, it must not be doubled in order to
905 * make cq_ci and rq_ci aligned.
907 if (mlx5_rxq_check_vec_support(rxq_data) < 0)
908 cq_attr.ibv.cqe *= 2;
909 } else if (priv->config.cqe_comp && rxq_data->hw_timestamp) {
911 "port %u Rx CQE compression is disabled for HW"
915 #ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD
916 if (priv->config.cqe_pad) {
917 cq_attr.mlx5.comp_mask |= MLX5DV_CQ_INIT_ATTR_MASK_FLAGS;
918 cq_attr.mlx5.flags |= MLX5DV_CQ_INIT_ATTR_FLAGS_CQE_PAD;
921 return mlx5_glue->cq_ex_to_cq(mlx5_glue->dv_create_cq(priv->sh->ctx,
927 * Create a WQ Verbs object.
930 * Pointer to Ethernet device.
932 * Pointer to device private data.
934 * Pointer to Rx queue data.
936 * Queue index in DPDK Rx queue array
938 * Number of WQEs in WQ.
940 * Pointer to Rx queue object data.
943 * The Verbs object initialised, NULL otherwise and rte_errno is set.
945 static struct ibv_wq *
946 mlx5_ibv_wq_new(struct rte_eth_dev *dev, struct mlx5_priv *priv,
947 struct mlx5_rxq_data *rxq_data, uint16_t idx,
948 unsigned int wqe_n, struct mlx5_rxq_obj *rxq_obj)
951 struct ibv_wq_init_attr ibv;
952 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
953 struct mlx5dv_wq_init_attr mlx5;
957 wq_attr.ibv = (struct ibv_wq_init_attr){
958 .wq_context = NULL, /* Could be useful in the future. */
959 .wq_type = IBV_WQT_RQ,
960 /* Max number of outstanding WRs. */
961 .max_wr = wqe_n >> rxq_data->sges_n,
962 /* Max number of scatter/gather elements in a WR. */
963 .max_sge = 1 << rxq_data->sges_n,
966 .comp_mask = IBV_WQ_FLAGS_CVLAN_STRIPPING | 0,
967 .create_flags = (rxq_data->vlan_strip ?
968 IBV_WQ_FLAGS_CVLAN_STRIPPING : 0),
970 /* By default, FCS (CRC) is stripped by hardware. */
971 if (rxq_data->crc_present) {
972 wq_attr.ibv.create_flags |= IBV_WQ_FLAGS_SCATTER_FCS;
973 wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
975 if (priv->config.hw_padding) {
976 #if defined(HAVE_IBV_WQ_FLAG_RX_END_PADDING)
977 wq_attr.ibv.create_flags |= IBV_WQ_FLAG_RX_END_PADDING;
978 wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
979 #elif defined(HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING)
980 wq_attr.ibv.create_flags |= IBV_WQ_FLAGS_PCI_WRITE_END_PADDING;
981 wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
984 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
985 wq_attr.mlx5 = (struct mlx5dv_wq_init_attr){
988 if (mlx5_rxq_mprq_enabled(rxq_data)) {
989 struct mlx5dv_striding_rq_init_attr *mprq_attr =
990 &wq_attr.mlx5.striding_rq_attrs;
992 wq_attr.mlx5.comp_mask |= MLX5DV_WQ_INIT_ATTR_MASK_STRIDING_RQ;
993 *mprq_attr = (struct mlx5dv_striding_rq_init_attr){
994 .single_stride_log_num_of_bytes = rxq_data->strd_sz_n,
995 .single_wqe_log_num_of_strides = rxq_data->strd_num_n,
996 .two_byte_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT,
999 rxq_obj->wq = mlx5_glue->dv_create_wq(priv->sh->ctx, &wq_attr.ibv,
1002 rxq_obj->wq = mlx5_glue->create_wq(priv->sh->ctx, &wq_attr.ibv);
1006 * Make sure number of WRs*SGEs match expectations since a queue
1007 * cannot allocate more than "desc" buffers.
1009 if (wq_attr.ibv.max_wr != (wqe_n >> rxq_data->sges_n) ||
1010 wq_attr.ibv.max_sge != (1u << rxq_data->sges_n)) {
1012 "port %u Rx queue %u requested %u*%u but got"
1014 dev->data->port_id, idx,
1015 wqe_n >> rxq_data->sges_n,
1016 (1 << rxq_data->sges_n),
1017 wq_attr.ibv.max_wr, wq_attr.ibv.max_sge);
1018 claim_zero(mlx5_glue->destroy_wq(rxq_obj->wq));
1027 * Fill common fields of create RQ attributes structure.
1030 * Pointer to Rx queue data.
1032 * CQ number to use with this RQ.
1034 * RQ attributes structure to fill..
1037 mlx5_devx_create_rq_attr_fill(struct mlx5_rxq_data *rxq_data, uint32_t cqn,
1038 struct mlx5_devx_create_rq_attr *rq_attr)
1040 rq_attr->state = MLX5_RQC_STATE_RST;
1041 rq_attr->vsd = (rxq_data->vlan_strip) ? 0 : 1;
1043 rq_attr->scatter_fcs = (rxq_data->crc_present) ? 1 : 0;
1047 * Fill common fields of DevX WQ attributes structure.
1050 * Pointer to device private data.
1052 * Pointer to Rx queue control structure.
1054 * WQ attributes structure to fill..
1057 mlx5_devx_wq_attr_fill(struct mlx5_priv *priv, struct mlx5_rxq_ctrl *rxq_ctrl,
1058 struct mlx5_devx_wq_attr *wq_attr)
1060 wq_attr->end_padding_mode = priv->config.cqe_pad ?
1061 MLX5_WQ_END_PAD_MODE_ALIGN :
1062 MLX5_WQ_END_PAD_MODE_NONE;
1063 wq_attr->pd = priv->sh->pdn;
1064 wq_attr->dbr_addr = rxq_ctrl->dbr_offset;
1065 wq_attr->dbr_umem_id = rxq_ctrl->dbr_umem_id;
1066 wq_attr->dbr_umem_valid = 1;
1067 wq_attr->wq_umem_id = rxq_ctrl->wq_umem->umem_id;
1068 wq_attr->wq_umem_valid = 1;
1072 * Create a RQ object using DevX.
1075 * Pointer to Ethernet device.
1077 * Queue index in DPDK Rx queue array
1079 * CQ number to use with this RQ.
1082 * The DevX object initialised, NULL otherwise and rte_errno is set.
1084 static struct mlx5_devx_obj *
1085 mlx5_devx_rq_new(struct rte_eth_dev *dev, uint16_t idx, uint32_t cqn)
1087 struct mlx5_priv *priv = dev->data->dev_private;
1088 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
1089 struct mlx5_rxq_ctrl *rxq_ctrl =
1090 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
1091 struct mlx5_devx_create_rq_attr rq_attr;
1092 uint32_t wqe_n = 1 << (rxq_data->elts_n - rxq_data->sges_n);
1093 uint32_t wq_size = 0;
1094 uint32_t wqe_size = 0;
1095 uint32_t log_wqe_size = 0;
1097 struct mlx5_devx_obj *rq;
1099 memset(&rq_attr, 0, sizeof(rq_attr));
1100 /* Fill RQ attributes. */
1101 rq_attr.mem_rq_type = MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE;
1102 rq_attr.flush_in_error_en = 1;
1103 mlx5_devx_create_rq_attr_fill(rxq_data, cqn, &rq_attr);
1104 /* Fill WQ attributes for this RQ. */
1105 if (mlx5_rxq_mprq_enabled(rxq_data)) {
1106 rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ;
1108 * Number of strides in each WQE:
1109 * 512*2^single_wqe_log_num_of_strides.
1111 rq_attr.wq_attr.single_wqe_log_num_of_strides =
1112 rxq_data->strd_num_n -
1113 MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
1114 /* Stride size = (2^single_stride_log_num_of_bytes)*64B. */
1115 rq_attr.wq_attr.single_stride_log_num_of_bytes =
1116 rxq_data->strd_sz_n -
1117 MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES;
1118 wqe_size = sizeof(struct mlx5_wqe_mprq);
1120 rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
1121 wqe_size = sizeof(struct mlx5_wqe_data_seg);
1123 log_wqe_size = log2above(wqe_size) + rxq_data->sges_n;
1124 rq_attr.wq_attr.log_wq_stride = log_wqe_size;
1125 rq_attr.wq_attr.log_wq_sz = rxq_data->elts_n - rxq_data->sges_n;
1126 /* Calculate and allocate WQ memory space. */
1127 wqe_size = 1 << log_wqe_size; /* round up power of two.*/
1128 wq_size = wqe_n * wqe_size;
1129 buf = rte_calloc_socket(__func__, 1, wq_size, MLX5_WQE_BUF_ALIGNMENT,
1133 rxq_data->wqes = buf;
1134 rxq_ctrl->wq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx,
1136 if (!rxq_ctrl->wq_umem) {
1140 mlx5_devx_wq_attr_fill(priv, rxq_ctrl, &rq_attr.wq_attr);
1141 rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &rq_attr, rxq_ctrl->socket);
1143 rxq_release_rq_resources(rxq_ctrl);
1148 * Create the Rx queue Verbs/DevX object.
1151 * Pointer to Ethernet device.
1153 * Queue index in DPDK Rx queue array
1155 * Type of Rx queue object to create.
1158 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
1160 struct mlx5_rxq_obj *
1161 mlx5_rxq_obj_new(struct rte_eth_dev *dev, uint16_t idx,
1162 enum mlx5_rxq_obj_type type)
1164 struct mlx5_priv *priv = dev->data->dev_private;
1165 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
1166 struct mlx5_rxq_ctrl *rxq_ctrl =
1167 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
1168 struct ibv_wq_attr mod;
1170 unsigned int wqe_n = 1 << rxq_data->elts_n;
1171 struct mlx5_rxq_obj *tmpl = NULL;
1172 struct mlx5dv_cq cq_info;
1173 struct mlx5dv_rwq rwq;
1175 struct mlx5dv_obj obj;
1178 assert(!rxq_ctrl->obj);
1179 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_RX_QUEUE;
1180 priv->verbs_alloc_ctx.obj = rxq_ctrl;
1181 tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0,
1185 "port %u Rx queue %u cannot allocate verbs resources",
1186 dev->data->port_id, rxq_data->idx);
1191 tmpl->rxq_ctrl = rxq_ctrl;
1192 if (rxq_ctrl->irq) {
1193 tmpl->channel = mlx5_glue->create_comp_channel(priv->sh->ctx);
1194 if (!tmpl->channel) {
1195 DRV_LOG(ERR, "port %u: comp channel creation failure",
1196 dev->data->port_id);
1201 if (mlx5_rxq_mprq_enabled(rxq_data))
1202 cqe_n = wqe_n * (1 << rxq_data->strd_num_n) - 1;
1205 tmpl->cq = mlx5_ibv_cq_new(dev, priv, rxq_data, cqe_n, tmpl);
1207 DRV_LOG(ERR, "port %u Rx queue %u CQ creation failure",
1208 dev->data->port_id, idx);
1212 obj.cq.in = tmpl->cq;
1213 obj.cq.out = &cq_info;
1214 ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ);
1219 if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
1221 "port %u wrong MLX5_CQE_SIZE environment variable"
1222 " value: it should be set to %u",
1223 dev->data->port_id, RTE_CACHE_LINE_SIZE);
1227 DRV_LOG(DEBUG, "port %u device_attr.max_qp_wr is %d",
1228 dev->data->port_id, priv->sh->device_attr.orig_attr.max_qp_wr);
1229 DRV_LOG(DEBUG, "port %u device_attr.max_sge is %d",
1230 dev->data->port_id, priv->sh->device_attr.orig_attr.max_sge);
1231 /* Allocate door-bell for types created with DevX. */
1232 if (tmpl->type != MLX5_RXQ_OBJ_TYPE_IBV) {
1233 struct mlx5_devx_dbr_page *dbr_page;
1236 dbr_offset = mlx5_get_dbr(dev, &dbr_page);
1239 rxq_ctrl->dbr_offset = dbr_offset;
1240 rxq_ctrl->dbr_umem_id = dbr_page->umem->umem_id;
1241 rxq_ctrl->dbr_umem_id_valid = 1;
1242 rxq_data->rq_db = (uint32_t *)((uintptr_t)dbr_page->dbrs +
1243 (uintptr_t)rxq_ctrl->dbr_offset);
1245 if (tmpl->type == MLX5_RXQ_OBJ_TYPE_IBV) {
1246 tmpl->wq = mlx5_ibv_wq_new(dev, priv, rxq_data, idx, wqe_n,
1249 DRV_LOG(ERR, "port %u Rx queue %u WQ creation failure",
1250 dev->data->port_id, idx);
1254 /* Change queue state to ready. */
1255 mod = (struct ibv_wq_attr){
1256 .attr_mask = IBV_WQ_ATTR_STATE,
1257 .wq_state = IBV_WQS_RDY,
1259 ret = mlx5_glue->modify_wq(tmpl->wq, &mod);
1262 "port %u Rx queue %u WQ state to IBV_WQS_RDY"
1263 " failed", dev->data->port_id, idx);
1267 obj.rwq.in = tmpl->wq;
1269 ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_RWQ);
1274 rxq_data->wqes = rwq.buf;
1275 rxq_data->rq_db = rwq.dbrec;
1276 } else if (tmpl->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ) {
1277 struct mlx5_devx_modify_rq_attr rq_attr;
1279 memset(&rq_attr, 0, sizeof(rq_attr));
1280 tmpl->rq = mlx5_devx_rq_new(dev, idx, cq_info.cqn);
1282 DRV_LOG(ERR, "port %u Rx queue %u RQ creation failure",
1283 dev->data->port_id, idx);
1287 /* Change queue state to ready. */
1288 rq_attr.rq_state = MLX5_RQC_STATE_RST;
1289 rq_attr.state = MLX5_RQC_STATE_RDY;
1290 ret = mlx5_devx_cmd_modify_rq(tmpl->rq, &rq_attr);
1294 /* Fill the rings. */
1295 rxq_data->cqe_n = log2above(cq_info.cqe_cnt);
1296 rxq_data->cq_db = cq_info.dbrec;
1297 rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)cq_info.buf;
1298 rxq_data->cq_uar = cq_info.cq_uar;
1299 rxq_data->cqn = cq_info.cqn;
1300 rxq_data->cq_arm_sn = 0;
1301 mlx5_rxq_initialize(rxq_data);
1302 rxq_data->cq_ci = 0;
1303 DRV_LOG(DEBUG, "port %u rxq %u updated with %p", dev->data->port_id,
1304 idx, (void *)&tmpl);
1305 rte_atomic32_inc(&tmpl->refcnt);
1306 LIST_INSERT_HEAD(&priv->rxqsobj, tmpl, next);
1307 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
1311 ret = rte_errno; /* Save rte_errno before cleanup. */
1312 if (tmpl->type == MLX5_RXQ_OBJ_TYPE_IBV && tmpl->wq)
1313 claim_zero(mlx5_glue->destroy_wq(tmpl->wq));
1314 else if (tmpl->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ && tmpl->rq)
1315 claim_zero(mlx5_devx_cmd_destroy(tmpl->rq));
1317 claim_zero(mlx5_glue->destroy_cq(tmpl->cq));
1319 claim_zero(mlx5_glue->destroy_comp_channel
1322 rte_errno = ret; /* Restore rte_errno. */
1324 if (type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ)
1325 rxq_release_rq_resources(rxq_ctrl);
1326 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
1331 * Verify the Rx queue objects list is empty
1334 * Pointer to Ethernet device.
1337 * The number of objects not released.
1340 mlx5_rxq_obj_verify(struct rte_eth_dev *dev)
1342 struct mlx5_priv *priv = dev->data->dev_private;
1344 struct mlx5_rxq_obj *rxq_obj;
1346 LIST_FOREACH(rxq_obj, &priv->rxqsobj, next) {
1347 DRV_LOG(DEBUG, "port %u Rx queue %u still referenced",
1348 dev->data->port_id, rxq_obj->rxq_ctrl->rxq.idx);
1355 * Callback function to initialize mbufs for Multi-Packet RQ.
1358 mlx5_mprq_buf_init(struct rte_mempool *mp, void *opaque_arg,
1359 void *_m, unsigned int i __rte_unused)
1361 struct mlx5_mprq_buf *buf = _m;
1362 struct rte_mbuf_ext_shared_info *shinfo;
1363 unsigned int strd_n = (unsigned int)(uintptr_t)opaque_arg;
1366 memset(_m, 0, sizeof(*buf));
1368 rte_atomic16_set(&buf->refcnt, 1);
1369 for (j = 0; j != strd_n; ++j) {
1370 shinfo = &buf->shinfos[j];
1371 shinfo->free_cb = mlx5_mprq_buf_free_cb;
1372 shinfo->fcb_opaque = buf;
1377 * Free mempool of Multi-Packet RQ.
1380 * Pointer to Ethernet device.
1383 * 0 on success, negative errno value on failure.
1386 mlx5_mprq_free_mp(struct rte_eth_dev *dev)
1388 struct mlx5_priv *priv = dev->data->dev_private;
1389 struct rte_mempool *mp = priv->mprq_mp;
1394 DRV_LOG(DEBUG, "port %u freeing mempool (%s) for Multi-Packet RQ",
1395 dev->data->port_id, mp->name);
1397 * If a buffer in the pool has been externally attached to a mbuf and it
1398 * is still in use by application, destroying the Rx queue can spoil
1399 * the packet. It is unlikely to happen but if application dynamically
1400 * creates and destroys with holding Rx packets, this can happen.
1402 * TODO: It is unavoidable for now because the mempool for Multi-Packet
1403 * RQ isn't provided by application but managed by PMD.
1405 if (!rte_mempool_full(mp)) {
1407 "port %u mempool for Multi-Packet RQ is still in use",
1408 dev->data->port_id);
1412 rte_mempool_free(mp);
1413 /* Unset mempool for each Rx queue. */
1414 for (i = 0; i != priv->rxqs_n; ++i) {
1415 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1419 rxq->mprq_mp = NULL;
1421 priv->mprq_mp = NULL;
1426 * Allocate a mempool for Multi-Packet RQ. All configured Rx queues share the
1427 * mempool. If already allocated, reuse it if there're enough elements.
1428 * Otherwise, resize it.
1431 * Pointer to Ethernet device.
1434 * 0 on success, negative errno value on failure.
1437 mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)
1439 struct mlx5_priv *priv = dev->data->dev_private;
1440 struct rte_mempool *mp = priv->mprq_mp;
1441 char name[RTE_MEMPOOL_NAMESIZE];
1442 unsigned int desc = 0;
1443 unsigned int buf_len;
1444 unsigned int obj_num;
1445 unsigned int obj_size;
1446 unsigned int strd_num_n = 0;
1447 unsigned int strd_sz_n = 0;
1450 if (!mlx5_mprq_enabled(dev))
1452 /* Count the total number of descriptors configured. */
1453 for (i = 0; i != priv->rxqs_n; ++i) {
1454 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1458 desc += 1 << rxq->elts_n;
1459 /* Get the max number of strides. */
1460 if (strd_num_n < rxq->strd_num_n)
1461 strd_num_n = rxq->strd_num_n;
1462 /* Get the max size of a stride. */
1463 if (strd_sz_n < rxq->strd_sz_n)
1464 strd_sz_n = rxq->strd_sz_n;
1466 assert(strd_num_n && strd_sz_n);
1467 buf_len = (1 << strd_num_n) * (1 << strd_sz_n);
1468 obj_size = sizeof(struct mlx5_mprq_buf) + buf_len + (1 << strd_num_n) *
1469 sizeof(struct rte_mbuf_ext_shared_info) + RTE_PKTMBUF_HEADROOM;
1471 * Received packets can be either memcpy'd or externally referenced. In
1472 * case that the packet is attached to an mbuf as an external buffer, as
1473 * it isn't possible to predict how the buffers will be queued by
1474 * application, there's no option to exactly pre-allocate needed buffers
1475 * in advance but to speculatively prepares enough buffers.
1477 * In the data path, if this Mempool is depleted, PMD will try to memcpy
1478 * received packets to buffers provided by application (rxq->mp) until
1479 * this Mempool gets available again.
1482 obj_num = desc + MLX5_MPRQ_MP_CACHE_SZ * priv->rxqs_n;
1484 * rte_mempool_create_empty() has sanity check to refuse large cache
1485 * size compared to the number of elements.
1486 * CACHE_FLUSHTHRESH_MULTIPLIER is defined in a C file, so using a
1487 * constant number 2 instead.
1489 obj_num = RTE_MAX(obj_num, MLX5_MPRQ_MP_CACHE_SZ * 2);
1490 /* Check a mempool is already allocated and if it can be resued. */
1491 if (mp != NULL && mp->elt_size >= obj_size && mp->size >= obj_num) {
1492 DRV_LOG(DEBUG, "port %u mempool %s is being reused",
1493 dev->data->port_id, mp->name);
1496 } else if (mp != NULL) {
1497 DRV_LOG(DEBUG, "port %u mempool %s should be resized, freeing it",
1498 dev->data->port_id, mp->name);
1500 * If failed to free, which means it may be still in use, no way
1501 * but to keep using the existing one. On buffer underrun,
1502 * packets will be memcpy'd instead of external buffer
1505 if (mlx5_mprq_free_mp(dev)) {
1506 if (mp->elt_size >= obj_size)
1512 snprintf(name, sizeof(name), "port-%u-mprq", dev->data->port_id);
1513 mp = rte_mempool_create(name, obj_num, obj_size, MLX5_MPRQ_MP_CACHE_SZ,
1514 0, NULL, NULL, mlx5_mprq_buf_init,
1515 (void *)(uintptr_t)(1 << strd_num_n),
1516 dev->device->numa_node, 0);
1519 "port %u failed to allocate a mempool for"
1520 " Multi-Packet RQ, count=%u, size=%u",
1521 dev->data->port_id, obj_num, obj_size);
1527 /* Set mempool for each Rx queue. */
1528 for (i = 0; i != priv->rxqs_n; ++i) {
1529 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1535 DRV_LOG(INFO, "port %u Multi-Packet RQ is configured",
1536 dev->data->port_id);
1540 #define MLX5_MAX_LRO_SIZE (UINT8_MAX * 256u)
1541 #define MLX5_MAX_TCP_HDR_OFFSET ((unsigned int)(sizeof(struct rte_ether_hdr) + \
1542 sizeof(struct rte_vlan_hdr) * 2 + \
1543 sizeof(struct rte_ipv6_hdr)))
1544 #define MAX_TCP_OPTION_SIZE 40u
1545 #define MLX5_MAX_LRO_HEADER_FIX ((unsigned int)(MLX5_MAX_TCP_HDR_OFFSET + \
1546 sizeof(struct rte_tcp_hdr) + \
1547 MAX_TCP_OPTION_SIZE))
1550 * Adjust the maximum LRO massage size.
1553 * Pointer to Ethernet device.
1554 * @param max_lro_size
1555 * The maximum size for LRO packet.
1558 mlx5_max_lro_msg_size_adjust(struct rte_eth_dev *dev, uint32_t max_lro_size)
1560 struct mlx5_priv *priv = dev->data->dev_private;
1562 if (priv->config.hca_attr.lro_max_msg_sz_mode ==
1563 MLX5_LRO_MAX_MSG_SIZE_START_FROM_L4 && max_lro_size >
1564 MLX5_MAX_TCP_HDR_OFFSET)
1565 max_lro_size -= MLX5_MAX_TCP_HDR_OFFSET;
1566 max_lro_size = RTE_MIN(max_lro_size, MLX5_MAX_LRO_SIZE);
1567 assert(max_lro_size >= 256u);
1568 max_lro_size /= 256u;
1569 if (priv->max_lro_msg_size)
1570 priv->max_lro_msg_size =
1571 RTE_MIN((uint32_t)priv->max_lro_msg_size, max_lro_size);
1573 priv->max_lro_msg_size = max_lro_size;
1577 * Create a DPDK Rx queue.
1580 * Pointer to Ethernet device.
1584 * Number of descriptors to configure in queue.
1586 * NUMA socket on which memory must be allocated.
1589 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
1591 struct mlx5_rxq_ctrl *
1592 mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1593 unsigned int socket, const struct rte_eth_rxconf *conf,
1594 struct rte_mempool *mp)
1596 struct mlx5_priv *priv = dev->data->dev_private;
1597 struct mlx5_rxq_ctrl *tmpl;
1598 unsigned int mb_len = rte_pktmbuf_data_room_size(mp);
1599 unsigned int mprq_stride_size;
1600 struct mlx5_dev_config *config = &priv->config;
1601 unsigned int strd_headroom_en;
1603 * Always allocate extra slots, even if eventually
1604 * the vector Rx will not be used.
1607 desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
1608 uint64_t offloads = conf->offloads |
1609 dev->data->dev_conf.rxmode.offloads;
1610 const int mprq_en = mlx5_check_mprq_support(dev) > 0;
1611 unsigned int max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
1612 unsigned int non_scatter_min_mbuf_size = max_rx_pkt_len +
1613 RTE_PKTMBUF_HEADROOM;
1614 unsigned int max_lro_size = 0;
1615 unsigned int first_mb_free_size = mb_len - RTE_PKTMBUF_HEADROOM;
1617 if (non_scatter_min_mbuf_size > mb_len && !(offloads &
1618 DEV_RX_OFFLOAD_SCATTER)) {
1619 DRV_LOG(ERR, "port %u Rx queue %u: Scatter offload is not"
1620 " configured and no enough mbuf space(%u) to contain "
1621 "the maximum RX packet length(%u) with head-room(%u)",
1622 dev->data->port_id, idx, mb_len, max_rx_pkt_len,
1623 RTE_PKTMBUF_HEADROOM);
1627 tmpl = rte_calloc_socket("RXQ", 1,
1629 desc_n * sizeof(struct rte_mbuf *),
1635 if (mlx5_mr_btree_init(&tmpl->rxq.mr_ctrl.cache_bh,
1636 MLX5_MR_BTREE_CACHE_N, socket)) {
1637 /* rte_errno is already set. */
1640 tmpl->socket = socket;
1641 if (dev->data->dev_conf.intr_conf.rxq)
1644 * LRO packet may consume all the stride memory, hence we cannot
1645 * guaranty head-room near the packet memory in the stride.
1646 * In this case scatter is, for sure, enabled and an empty mbuf may be
1647 * added in the start for the head-room.
1649 if (mlx5_lro_on(dev) && RTE_PKTMBUF_HEADROOM > 0 &&
1650 non_scatter_min_mbuf_size > mb_len) {
1651 strd_headroom_en = 0;
1652 mprq_stride_size = RTE_MIN(max_rx_pkt_len,
1653 1u << config->mprq.max_stride_size_n);
1655 strd_headroom_en = 1;
1656 mprq_stride_size = non_scatter_min_mbuf_size;
1659 * This Rx queue can be configured as a Multi-Packet RQ if all of the
1660 * following conditions are met:
1661 * - MPRQ is enabled.
1662 * - The number of descs is more than the number of strides.
1663 * - max_rx_pkt_len plus overhead is less than the max size of a
1665 * Otherwise, enable Rx scatter if necessary.
1668 desc > (1U << config->mprq.stride_num_n) &&
1669 mprq_stride_size <= (1U << config->mprq.max_stride_size_n)) {
1670 /* TODO: Rx scatter isn't supported yet. */
1671 tmpl->rxq.sges_n = 0;
1672 /* Trim the number of descs needed. */
1673 desc >>= config->mprq.stride_num_n;
1674 tmpl->rxq.strd_num_n = config->mprq.stride_num_n;
1675 tmpl->rxq.strd_sz_n = RTE_MAX(log2above(mprq_stride_size),
1676 config->mprq.min_stride_size_n);
1677 tmpl->rxq.strd_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT;
1678 tmpl->rxq.strd_headroom_en = strd_headroom_en;
1679 tmpl->rxq.mprq_max_memcpy_len = RTE_MIN(first_mb_free_size,
1680 config->mprq.max_memcpy_len);
1681 max_lro_size = RTE_MIN(max_rx_pkt_len,
1682 (1u << tmpl->rxq.strd_num_n) *
1683 (1u << tmpl->rxq.strd_sz_n));
1685 "port %u Rx queue %u: Multi-Packet RQ is enabled"
1686 " strd_num_n = %u, strd_sz_n = %u",
1687 dev->data->port_id, idx,
1688 tmpl->rxq.strd_num_n, tmpl->rxq.strd_sz_n);
1689 } else if (max_rx_pkt_len <= first_mb_free_size) {
1690 tmpl->rxq.sges_n = 0;
1691 max_lro_size = max_rx_pkt_len;
1692 } else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
1693 unsigned int size = non_scatter_min_mbuf_size;
1694 unsigned int sges_n;
1696 if (mlx5_lro_on(dev) && first_mb_free_size <
1697 MLX5_MAX_LRO_HEADER_FIX) {
1698 DRV_LOG(ERR, "Not enough space in the first segment(%u)"
1699 " to include the max header size(%u) for LRO",
1700 first_mb_free_size, MLX5_MAX_LRO_HEADER_FIX);
1701 rte_errno = ENOTSUP;
1705 * Determine the number of SGEs needed for a full packet
1706 * and round it to the next power of two.
1708 sges_n = log2above((size / mb_len) + !!(size % mb_len));
1709 if (sges_n > MLX5_MAX_LOG_RQ_SEGS) {
1711 "port %u too many SGEs (%u) needed to handle"
1712 " requested maximum packet size %u, the maximum"
1713 " supported are %u", dev->data->port_id,
1714 1 << sges_n, max_rx_pkt_len,
1715 1u << MLX5_MAX_LOG_RQ_SEGS);
1716 rte_errno = ENOTSUP;
1719 tmpl->rxq.sges_n = sges_n;
1720 max_lro_size = max_rx_pkt_len;
1722 if (mprq_en && !mlx5_rxq_mprq_enabled(&tmpl->rxq))
1724 "port %u MPRQ is requested but cannot be enabled"
1725 " (requested: desc = %u, stride_sz = %u,"
1726 " supported: min_stride_num = %u, max_stride_sz = %u).",
1727 dev->data->port_id, desc, mprq_stride_size,
1728 (1 << config->mprq.stride_num_n),
1729 (1 << config->mprq.max_stride_size_n));
1730 DRV_LOG(DEBUG, "port %u maximum number of segments per packet: %u",
1731 dev->data->port_id, 1 << tmpl->rxq.sges_n);
1732 if (desc % (1 << tmpl->rxq.sges_n)) {
1734 "port %u number of Rx queue descriptors (%u) is not a"
1735 " multiple of SGEs per packet (%u)",
1738 1 << tmpl->rxq.sges_n);
1742 mlx5_max_lro_msg_size_adjust(dev, max_lro_size);
1743 /* Toggle RX checksum offload if hardware supports it. */
1744 tmpl->rxq.csum = !!(offloads & DEV_RX_OFFLOAD_CHECKSUM);
1745 tmpl->rxq.hw_timestamp = !!(offloads & DEV_RX_OFFLOAD_TIMESTAMP);
1746 /* Configure VLAN stripping. */
1747 tmpl->rxq.vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
1748 /* By default, FCS (CRC) is stripped by hardware. */
1749 tmpl->rxq.crc_present = 0;
1750 if (offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
1751 if (config->hw_fcs_strip) {
1753 * RQs used for LRO-enabled TIRs should not be
1754 * configured to scatter the FCS.
1756 if (mlx5_lro_on(dev))
1758 "port %u CRC stripping has been "
1759 "disabled but will still be performed "
1760 "by hardware, because LRO is enabled",
1761 dev->data->port_id);
1763 tmpl->rxq.crc_present = 1;
1766 "port %u CRC stripping has been disabled but will"
1767 " still be performed by hardware, make sure MLNX_OFED"
1768 " and firmware are up to date",
1769 dev->data->port_id);
1773 "port %u CRC stripping is %s, %u bytes will be subtracted from"
1774 " incoming frames to hide it",
1776 tmpl->rxq.crc_present ? "disabled" : "enabled",
1777 tmpl->rxq.crc_present << 2);
1779 tmpl->rxq.rss_hash = !!priv->rss_conf.rss_hf &&
1780 (!!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS));
1781 tmpl->rxq.port_id = dev->data->port_id;
1784 tmpl->rxq.elts_n = log2above(desc);
1785 tmpl->rxq.rq_repl_thresh =
1786 MLX5_VPMD_RXQ_RPLNSH_THRESH(1 << tmpl->rxq.elts_n);
1788 (struct rte_mbuf *(*)[1 << tmpl->rxq.elts_n])(tmpl + 1);
1790 tmpl->rxq.uar_lock_cq = &priv->uar_lock_cq;
1792 tmpl->rxq.idx = idx;
1793 rte_atomic32_inc(&tmpl->refcnt);
1794 LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
1805 * Pointer to Ethernet device.
1810 * A pointer to the queue if it exists, NULL otherwise.
1812 struct mlx5_rxq_ctrl *
1813 mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
1815 struct mlx5_priv *priv = dev->data->dev_private;
1816 struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
1818 if ((*priv->rxqs)[idx]) {
1819 rxq_ctrl = container_of((*priv->rxqs)[idx],
1820 struct mlx5_rxq_ctrl,
1822 mlx5_rxq_obj_get(dev, idx);
1823 rte_atomic32_inc(&rxq_ctrl->refcnt);
1829 * Release a Rx queue.
1832 * Pointer to Ethernet device.
1837 * 1 while a reference on it exists, 0 when freed.
1840 mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
1842 struct mlx5_priv *priv = dev->data->dev_private;
1843 struct mlx5_rxq_ctrl *rxq_ctrl;
1845 if (!(*priv->rxqs)[idx])
1847 rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
1848 assert(rxq_ctrl->priv);
1849 if (rxq_ctrl->obj && !mlx5_rxq_obj_release(rxq_ctrl->obj))
1850 rxq_ctrl->obj = NULL;
1851 if (rte_atomic32_dec_and_test(&rxq_ctrl->refcnt)) {
1852 if (rxq_ctrl->dbr_umem_id_valid)
1853 claim_zero(mlx5_release_dbr(dev, rxq_ctrl->dbr_umem_id,
1854 rxq_ctrl->dbr_offset));
1855 mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
1856 LIST_REMOVE(rxq_ctrl, next);
1858 (*priv->rxqs)[idx] = NULL;
1865 * Verify the Rx Queue list is empty
1868 * Pointer to Ethernet device.
1871 * The number of object not released.
1874 mlx5_rxq_verify(struct rte_eth_dev *dev)
1876 struct mlx5_priv *priv = dev->data->dev_private;
1877 struct mlx5_rxq_ctrl *rxq_ctrl;
1880 LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {
1881 DRV_LOG(DEBUG, "port %u Rx Queue %u still referenced",
1882 dev->data->port_id, rxq_ctrl->rxq.idx);
1889 * Create an indirection table.
1892 * Pointer to Ethernet device.
1894 * Queues entering in the indirection table.
1896 * Number of queues in the array.
1899 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
1901 static struct mlx5_ind_table_obj *
1902 mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues,
1903 uint32_t queues_n, enum mlx5_ind_tbl_type type)
1905 struct mlx5_priv *priv = dev->data->dev_private;
1906 struct mlx5_ind_table_obj *ind_tbl;
1907 unsigned int i = 0, j = 0, k = 0;
1909 ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl) +
1910 queues_n * sizeof(uint16_t), 0);
1915 ind_tbl->type = type;
1916 if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV) {
1917 const unsigned int wq_n = rte_is_power_of_2(queues_n) ?
1918 log2above(queues_n) :
1919 log2above(priv->config.ind_table_max_size);
1920 struct ibv_wq *wq[1 << wq_n];
1922 for (i = 0; i != queues_n; ++i) {
1923 struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev,
1927 wq[i] = rxq->obj->wq;
1928 ind_tbl->queues[i] = queues[i];
1930 ind_tbl->queues_n = queues_n;
1931 /* Finalise indirection table. */
1932 k = i; /* Retain value of i for use in error case. */
1933 for (j = 0; k != (unsigned int)(1 << wq_n); ++k, ++j)
1935 ind_tbl->ind_table = mlx5_glue->create_rwq_ind_table
1937 &(struct ibv_rwq_ind_table_init_attr){
1938 .log_ind_tbl_size = wq_n,
1942 if (!ind_tbl->ind_table) {
1946 } else { /* ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX */
1947 struct mlx5_devx_rqt_attr *rqt_attr = NULL;
1949 rqt_attr = rte_calloc(__func__, 1, sizeof(*rqt_attr) +
1950 queues_n * sizeof(uint16_t), 0);
1952 DRV_LOG(ERR, "port %u cannot allocate RQT resources",
1953 dev->data->port_id);
1957 rqt_attr->rqt_max_size = priv->config.ind_table_max_size;
1958 rqt_attr->rqt_actual_size = queues_n;
1959 for (i = 0; i != queues_n; ++i) {
1960 struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev,
1964 rqt_attr->rq_list[i] = rxq->obj->rq->id;
1965 ind_tbl->queues[i] = queues[i];
1967 ind_tbl->rqt = mlx5_devx_cmd_create_rqt(priv->sh->ctx,
1970 if (!ind_tbl->rqt) {
1971 DRV_LOG(ERR, "port %u cannot create DevX RQT",
1972 dev->data->port_id);
1976 ind_tbl->queues_n = queues_n;
1978 rte_atomic32_inc(&ind_tbl->refcnt);
1979 LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
1982 for (j = 0; j < i; j++)
1983 mlx5_rxq_release(dev, ind_tbl->queues[j]);
1985 DEBUG("port %u cannot create indirection table", dev->data->port_id);
1990 * Get an indirection table.
1993 * Pointer to Ethernet device.
1995 * Queues entering in the indirection table.
1997 * Number of queues in the array.
2000 * An indirection table if found.
2002 static struct mlx5_ind_table_obj *
2003 mlx5_ind_table_obj_get(struct rte_eth_dev *dev, const uint16_t *queues,
2006 struct mlx5_priv *priv = dev->data->dev_private;
2007 struct mlx5_ind_table_obj *ind_tbl;
2009 LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
2010 if ((ind_tbl->queues_n == queues_n) &&
2011 (memcmp(ind_tbl->queues, queues,
2012 ind_tbl->queues_n * sizeof(ind_tbl->queues[0]))
2019 rte_atomic32_inc(&ind_tbl->refcnt);
2020 for (i = 0; i != ind_tbl->queues_n; ++i)
2021 mlx5_rxq_get(dev, ind_tbl->queues[i]);
2027 * Release an indirection table.
2030 * Pointer to Ethernet device.
2032 * Indirection table to release.
2035 * 1 while a reference on it exists, 0 when freed.
2038 mlx5_ind_table_obj_release(struct rte_eth_dev *dev,
2039 struct mlx5_ind_table_obj *ind_tbl)
2043 if (rte_atomic32_dec_and_test(&ind_tbl->refcnt)) {
2044 if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV)
2045 claim_zero(mlx5_glue->destroy_rwq_ind_table
2046 (ind_tbl->ind_table));
2047 else if (ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX)
2048 claim_zero(mlx5_devx_cmd_destroy(ind_tbl->rqt));
2050 for (i = 0; i != ind_tbl->queues_n; ++i)
2051 claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i]));
2052 if (!rte_atomic32_read(&ind_tbl->refcnt)) {
2053 LIST_REMOVE(ind_tbl, next);
2061 * Verify the Rx Queue list is empty
2064 * Pointer to Ethernet device.
2067 * The number of object not released.
2070 mlx5_ind_table_obj_verify(struct rte_eth_dev *dev)
2072 struct mlx5_priv *priv = dev->data->dev_private;
2073 struct mlx5_ind_table_obj *ind_tbl;
2076 LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
2078 "port %u indirection table obj %p still referenced",
2079 dev->data->port_id, (void *)ind_tbl);
2086 * Create an Rx Hash queue.
2089 * Pointer to Ethernet device.
2091 * RSS key for the Rx hash queue.
2092 * @param rss_key_len
2094 * @param hash_fields
2095 * Verbs protocol hash field to make the RSS on.
2097 * Queues entering in hash queue. In case of empty hash_fields only the
2098 * first queue index will be taken for the indirection table.
2104 * Flow rule is relevant for LRO, i.e. contains IPv4/IPv6 and TCP.
2107 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
2110 mlx5_hrxq_new(struct rte_eth_dev *dev,
2111 const uint8_t *rss_key, uint32_t rss_key_len,
2112 uint64_t hash_fields,
2113 const uint16_t *queues, uint32_t queues_n,
2114 int tunnel __rte_unused, int lro)
2116 struct mlx5_priv *priv = dev->data->dev_private;
2117 struct mlx5_hrxq *hrxq;
2118 struct ibv_qp *qp = NULL;
2119 struct mlx5_ind_table_obj *ind_tbl;
2121 struct mlx5_devx_obj *tir = NULL;
2123 queues_n = hash_fields ? queues_n : 1;
2124 ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
2126 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[queues[0]];
2127 struct mlx5_rxq_ctrl *rxq_ctrl =
2128 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
2129 enum mlx5_ind_tbl_type type;
2131 type = rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_IBV ?
2132 MLX5_IND_TBL_TYPE_IBV : MLX5_IND_TBL_TYPE_DEVX;
2133 ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n, type);
2139 if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV) {
2140 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
2141 struct mlx5dv_qp_init_attr qp_init_attr;
2143 memset(&qp_init_attr, 0, sizeof(qp_init_attr));
2145 qp_init_attr.comp_mask =
2146 MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;
2147 qp_init_attr.create_flags =
2148 MLX5DV_QP_CREATE_TUNNEL_OFFLOADS;
2150 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2151 if (dev->data->dev_conf.lpbk_mode) {
2153 * Allow packet sent from NIC loop back
2154 * w/o source MAC check.
2156 qp_init_attr.comp_mask |=
2157 MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;
2158 qp_init_attr.create_flags |=
2159 MLX5DV_QP_CREATE_TIR_ALLOW_SELF_LOOPBACK_UC;
2162 qp = mlx5_glue->dv_create_qp
2164 &(struct ibv_qp_init_attr_ex){
2165 .qp_type = IBV_QPT_RAW_PACKET,
2167 IBV_QP_INIT_ATTR_PD |
2168 IBV_QP_INIT_ATTR_IND_TABLE |
2169 IBV_QP_INIT_ATTR_RX_HASH,
2170 .rx_hash_conf = (struct ibv_rx_hash_conf){
2172 IBV_RX_HASH_FUNC_TOEPLITZ,
2173 .rx_hash_key_len = rss_key_len,
2175 (void *)(uintptr_t)rss_key,
2176 .rx_hash_fields_mask = hash_fields,
2178 .rwq_ind_tbl = ind_tbl->ind_table,
2183 qp = mlx5_glue->create_qp_ex
2185 &(struct ibv_qp_init_attr_ex){
2186 .qp_type = IBV_QPT_RAW_PACKET,
2188 IBV_QP_INIT_ATTR_PD |
2189 IBV_QP_INIT_ATTR_IND_TABLE |
2190 IBV_QP_INIT_ATTR_RX_HASH,
2191 .rx_hash_conf = (struct ibv_rx_hash_conf){
2193 IBV_RX_HASH_FUNC_TOEPLITZ,
2194 .rx_hash_key_len = rss_key_len,
2196 (void *)(uintptr_t)rss_key,
2197 .rx_hash_fields_mask = hash_fields,
2199 .rwq_ind_tbl = ind_tbl->ind_table,
2207 } else { /* ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX */
2208 struct mlx5_devx_tir_attr tir_attr;
2210 memset(&tir_attr, 0, sizeof(tir_attr));
2211 tir_attr.disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT;
2212 tir_attr.rx_hash_fn = MLX5_RX_HASH_FN_TOEPLITZ;
2213 memcpy(&tir_attr.rx_hash_field_selector_outer, &hash_fields,
2215 tir_attr.transport_domain = priv->sh->tdn;
2216 memcpy(tir_attr.rx_hash_toeplitz_key, rss_key, rss_key_len);
2217 tir_attr.indirect_table = ind_tbl->rqt->id;
2218 if (dev->data->dev_conf.lpbk_mode)
2219 tir_attr.self_lb_block =
2220 MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
2222 tir_attr.lro_timeout_period_usecs =
2223 priv->config.lro.timeout;
2224 tir_attr.lro_max_msg_sz = priv->max_lro_msg_size;
2225 tir_attr.lro_enable_mask = lro;
2227 tir = mlx5_devx_cmd_create_tir(priv->sh->ctx, &tir_attr);
2229 DRV_LOG(ERR, "port %u cannot create DevX TIR",
2230 dev->data->port_id);
2235 hrxq = rte_calloc(__func__, 1, sizeof(*hrxq) + rss_key_len, 0);
2238 hrxq->ind_table = ind_tbl;
2239 if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV) {
2241 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2243 mlx5_glue->dv_create_flow_action_dest_ibv_qp(hrxq->qp);
2244 if (!hrxq->action) {
2249 } else { /* ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX */
2251 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2252 hrxq->action = mlx5_glue->dv_create_flow_action_dest_devx_tir
2254 if (!hrxq->action) {
2260 hrxq->rss_key_len = rss_key_len;
2261 hrxq->hash_fields = hash_fields;
2262 memcpy(hrxq->rss_key, rss_key, rss_key_len);
2263 rte_atomic32_inc(&hrxq->refcnt);
2264 LIST_INSERT_HEAD(&priv->hrxqs, hrxq, next);
2267 err = rte_errno; /* Save rte_errno before cleanup. */
2268 mlx5_ind_table_obj_release(dev, ind_tbl);
2270 claim_zero(mlx5_glue->destroy_qp(qp));
2272 claim_zero(mlx5_devx_cmd_destroy(tir));
2273 rte_errno = err; /* Restore rte_errno. */
2278 * Get an Rx Hash queue.
2281 * Pointer to Ethernet device.
2283 * RSS configuration for the Rx hash queue.
2285 * Queues entering in hash queue. In case of empty hash_fields only the
2286 * first queue index will be taken for the indirection table.
2291 * An hash Rx queue on success.
2294 mlx5_hrxq_get(struct rte_eth_dev *dev,
2295 const uint8_t *rss_key, uint32_t rss_key_len,
2296 uint64_t hash_fields,
2297 const uint16_t *queues, uint32_t queues_n)
2299 struct mlx5_priv *priv = dev->data->dev_private;
2300 struct mlx5_hrxq *hrxq;
2302 queues_n = hash_fields ? queues_n : 1;
2303 LIST_FOREACH(hrxq, &priv->hrxqs, next) {
2304 struct mlx5_ind_table_obj *ind_tbl;
2306 if (hrxq->rss_key_len != rss_key_len)
2308 if (memcmp(hrxq->rss_key, rss_key, rss_key_len))
2310 if (hrxq->hash_fields != hash_fields)
2312 ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
2315 if (ind_tbl != hrxq->ind_table) {
2316 mlx5_ind_table_obj_release(dev, ind_tbl);
2319 rte_atomic32_inc(&hrxq->refcnt);
2326 * Release the hash Rx queue.
2329 * Pointer to Ethernet device.
2331 * Pointer to Hash Rx queue to release.
2334 * 1 while a reference on it exists, 0 when freed.
2337 mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
2339 if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
2340 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2341 mlx5_glue->destroy_flow_action(hrxq->action);
2343 if (hrxq->ind_table->type == MLX5_IND_TBL_TYPE_IBV)
2344 claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
2345 else /* hrxq->ind_table->type == MLX5_IND_TBL_TYPE_DEVX */
2346 claim_zero(mlx5_devx_cmd_destroy(hrxq->tir));
2347 mlx5_ind_table_obj_release(dev, hrxq->ind_table);
2348 LIST_REMOVE(hrxq, next);
2352 claim_nonzero(mlx5_ind_table_obj_release(dev, hrxq->ind_table));
2357 * Verify the Rx Queue list is empty
2360 * Pointer to Ethernet device.
2363 * The number of object not released.
2366 mlx5_hrxq_verify(struct rte_eth_dev *dev)
2368 struct mlx5_priv *priv = dev->data->dev_private;
2369 struct mlx5_hrxq *hrxq;
2372 LIST_FOREACH(hrxq, &priv->hrxqs, next) {
2374 "port %u hash Rx queue %p still referenced",
2375 dev->data->port_id, (void *)hrxq);
2382 * Create a drop Rx queue Verbs/DevX object.
2385 * Pointer to Ethernet device.
2388 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
2390 static struct mlx5_rxq_obj *
2391 mlx5_rxq_obj_drop_new(struct rte_eth_dev *dev)
2393 struct mlx5_priv *priv = dev->data->dev_private;
2394 struct ibv_context *ctx = priv->sh->ctx;
2396 struct ibv_wq *wq = NULL;
2397 struct mlx5_rxq_obj *rxq;
2399 if (priv->drop_queue.rxq)
2400 return priv->drop_queue.rxq;
2401 cq = mlx5_glue->create_cq(ctx, 1, NULL, NULL, 0);
2403 DEBUG("port %u cannot allocate CQ for drop queue",
2404 dev->data->port_id);
2408 wq = mlx5_glue->create_wq(ctx,
2409 &(struct ibv_wq_init_attr){
2410 .wq_type = IBV_WQT_RQ,
2417 DEBUG("port %u cannot allocate WQ for drop queue",
2418 dev->data->port_id);
2422 rxq = rte_calloc(__func__, 1, sizeof(*rxq), 0);
2424 DEBUG("port %u cannot allocate drop Rx queue memory",
2425 dev->data->port_id);
2431 priv->drop_queue.rxq = rxq;
2435 claim_zero(mlx5_glue->destroy_wq(wq));
2437 claim_zero(mlx5_glue->destroy_cq(cq));
2442 * Release a drop Rx queue Verbs/DevX object.
2445 * Pointer to Ethernet device.
2448 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
2451 mlx5_rxq_obj_drop_release(struct rte_eth_dev *dev)
2453 struct mlx5_priv *priv = dev->data->dev_private;
2454 struct mlx5_rxq_obj *rxq = priv->drop_queue.rxq;
2457 claim_zero(mlx5_glue->destroy_wq(rxq->wq));
2459 claim_zero(mlx5_glue->destroy_cq(rxq->cq));
2461 priv->drop_queue.rxq = NULL;
2465 * Create a drop indirection table.
2468 * Pointer to Ethernet device.
2471 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
2473 static struct mlx5_ind_table_obj *
2474 mlx5_ind_table_obj_drop_new(struct rte_eth_dev *dev)
2476 struct mlx5_priv *priv = dev->data->dev_private;
2477 struct mlx5_ind_table_obj *ind_tbl;
2478 struct mlx5_rxq_obj *rxq;
2479 struct mlx5_ind_table_obj tmpl;
2481 rxq = mlx5_rxq_obj_drop_new(dev);
2484 tmpl.ind_table = mlx5_glue->create_rwq_ind_table
2486 &(struct ibv_rwq_ind_table_init_attr){
2487 .log_ind_tbl_size = 0,
2488 .ind_tbl = &rxq->wq,
2491 if (!tmpl.ind_table) {
2492 DEBUG("port %u cannot allocate indirection table for drop"
2494 dev->data->port_id);
2498 ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl), 0);
2503 ind_tbl->ind_table = tmpl.ind_table;
2506 mlx5_rxq_obj_drop_release(dev);
2511 * Release a drop indirection table.
2514 * Pointer to Ethernet device.
2517 mlx5_ind_table_obj_drop_release(struct rte_eth_dev *dev)
2519 struct mlx5_priv *priv = dev->data->dev_private;
2520 struct mlx5_ind_table_obj *ind_tbl = priv->drop_queue.hrxq->ind_table;
2522 claim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl->ind_table));
2523 mlx5_rxq_obj_drop_release(dev);
2525 priv->drop_queue.hrxq->ind_table = NULL;
2529 * Create a drop Rx Hash queue.
2532 * Pointer to Ethernet device.
2535 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
2538 mlx5_hrxq_drop_new(struct rte_eth_dev *dev)
2540 struct mlx5_priv *priv = dev->data->dev_private;
2541 struct mlx5_ind_table_obj *ind_tbl;
2543 struct mlx5_hrxq *hrxq;
2545 if (priv->drop_queue.hrxq) {
2546 rte_atomic32_inc(&priv->drop_queue.hrxq->refcnt);
2547 return priv->drop_queue.hrxq;
2549 ind_tbl = mlx5_ind_table_obj_drop_new(dev);
2552 qp = mlx5_glue->create_qp_ex(priv->sh->ctx,
2553 &(struct ibv_qp_init_attr_ex){
2554 .qp_type = IBV_QPT_RAW_PACKET,
2556 IBV_QP_INIT_ATTR_PD |
2557 IBV_QP_INIT_ATTR_IND_TABLE |
2558 IBV_QP_INIT_ATTR_RX_HASH,
2559 .rx_hash_conf = (struct ibv_rx_hash_conf){
2561 IBV_RX_HASH_FUNC_TOEPLITZ,
2562 .rx_hash_key_len = MLX5_RSS_HASH_KEY_LEN,
2563 .rx_hash_key = rss_hash_default_key,
2564 .rx_hash_fields_mask = 0,
2566 .rwq_ind_tbl = ind_tbl->ind_table,
2570 DEBUG("port %u cannot allocate QP for drop queue",
2571 dev->data->port_id);
2575 hrxq = rte_calloc(__func__, 1, sizeof(*hrxq), 0);
2578 "port %u cannot allocate memory for drop queue",
2579 dev->data->port_id);
2583 hrxq->ind_table = ind_tbl;
2585 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2586 hrxq->action = mlx5_glue->dv_create_flow_action_dest_ibv_qp(hrxq->qp);
2587 if (!hrxq->action) {
2592 priv->drop_queue.hrxq = hrxq;
2593 rte_atomic32_set(&hrxq->refcnt, 1);
2597 mlx5_ind_table_obj_drop_release(dev);
2602 * Release a drop hash Rx queue.
2605 * Pointer to Ethernet device.
2608 mlx5_hrxq_drop_release(struct rte_eth_dev *dev)
2610 struct mlx5_priv *priv = dev->data->dev_private;
2611 struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;
2613 if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
2614 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2615 mlx5_glue->destroy_flow_action(hrxq->action);
2617 claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
2618 mlx5_ind_table_obj_drop_release(dev);
2620 priv->drop_queue.hrxq = NULL;