1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
12 #include <sys/queue.h>
15 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
17 #pragma GCC diagnostic ignored "-Wpedantic"
19 #include <infiniband/verbs.h>
20 #include <infiniband/mlx5dv.h>
22 #pragma GCC diagnostic error "-Wpedantic"
26 #include <rte_malloc.h>
27 #include <rte_ethdev_driver.h>
28 #include <rte_common.h>
29 #include <rte_interrupts.h>
30 #include <rte_debug.h>
34 #include "mlx5_rxtx.h"
35 #include "mlx5_utils.h"
36 #include "mlx5_autoconf.h"
37 #include "mlx5_defs.h"
38 #include "mlx5_glue.h"
40 /* Default RSS hash key also used for ConnectX-3. */
41 uint8_t rss_hash_default_key[] = {
42 0x2c, 0xc6, 0x81, 0xd1,
43 0x5b, 0xdb, 0xf4, 0xf7,
44 0xfc, 0xa2, 0x83, 0x19,
45 0xdb, 0x1a, 0x3e, 0x94,
46 0x6b, 0x9e, 0x38, 0xd9,
47 0x2c, 0x9c, 0x03, 0xd1,
48 0xad, 0x99, 0x44, 0xa7,
49 0xd9, 0x56, 0x3d, 0x59,
50 0x06, 0x3c, 0x25, 0xf3,
51 0xfc, 0x1f, 0xdc, 0x2a,
54 /* Length of the default RSS hash key. */
55 static_assert(MLX5_RSS_HASH_KEY_LEN ==
56 (unsigned int)sizeof(rss_hash_default_key),
57 "wrong RSS default key size.");
60 * Check whether Multi-Packet RQ can be enabled for the device.
63 * Pointer to Ethernet device.
66 * 1 if supported, negative errno value if not.
69 mlx5_check_mprq_support(struct rte_eth_dev *dev)
71 struct mlx5_priv *priv = dev->data->dev_private;
73 if (priv->config.mprq.enabled &&
74 priv->rxqs_n >= priv->config.mprq.min_rxqs_num)
80 * Check whether Multi-Packet RQ is enabled for the Rx queue.
83 * Pointer to receive queue structure.
86 * 0 if disabled, otherwise enabled.
89 mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq)
91 return rxq->strd_num_n > 0;
95 * Check whether Multi-Packet RQ is enabled for the device.
96 * MPRQ can be enabled explicitly, or implicitly by enabling LRO.
99 * Pointer to Ethernet device.
102 * 0 if disabled, otherwise enabled.
105 mlx5_mprq_enabled(struct rte_eth_dev *dev)
107 struct mlx5_priv *priv = dev->data->dev_private;
111 if (mlx5_check_mprq_support(dev) < 0)
113 /* All the configured queues should be enabled. */
114 for (i = 0; i < priv->rxqs_n; ++i) {
115 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
119 if (mlx5_rxq_mprq_enabled(rxq))
122 /* Multi-Packet RQ can't be partially configured. */
123 assert(n == 0 || n == priv->rxqs_n);
124 return n == priv->rxqs_n;
128 * Check whether LRO is supported and enabled for the device.
131 * Pointer to Ethernet device.
134 * 0 if disabled, 1 if enabled.
137 mlx5_lro_on(struct rte_eth_dev *dev)
139 return (MLX5_LRO_SUPPORTED(dev) && MLX5_LRO_ENABLED(dev));
143 * Allocate RX queue elements for Multi-Packet RQ.
146 * Pointer to RX queue structure.
149 * 0 on success, a negative errno value otherwise and rte_errno is set.
152 rxq_alloc_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
154 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
155 unsigned int wqe_n = 1 << rxq->elts_n;
159 /* Iterate on segments. */
160 for (i = 0; i <= wqe_n; ++i) {
161 struct mlx5_mprq_buf *buf;
163 if (rte_mempool_get(rxq->mprq_mp, (void **)&buf) < 0) {
164 DRV_LOG(ERR, "port %u empty mbuf pool", rxq->port_id);
169 (*rxq->mprq_bufs)[i] = buf;
171 rxq->mprq_repl = buf;
174 "port %u Rx queue %u allocated and configured %u segments",
175 rxq->port_id, rxq->idx, wqe_n);
178 err = rte_errno; /* Save rte_errno before cleanup. */
180 for (i = 0; (i != wqe_n); ++i) {
181 if ((*rxq->mprq_bufs)[i] != NULL)
182 rte_mempool_put(rxq->mprq_mp,
183 (*rxq->mprq_bufs)[i]);
184 (*rxq->mprq_bufs)[i] = NULL;
186 DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything",
187 rxq->port_id, rxq->idx);
188 rte_errno = err; /* Restore rte_errno. */
193 * Allocate RX queue elements for Single-Packet RQ.
196 * Pointer to RX queue structure.
199 * 0 on success, errno value on failure.
202 rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
204 const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n;
205 unsigned int elts_n = 1 << rxq_ctrl->rxq.elts_n;
209 /* Iterate on segments. */
210 for (i = 0; (i != elts_n); ++i) {
211 struct rte_mbuf *buf;
213 buf = rte_pktmbuf_alloc(rxq_ctrl->rxq.mp);
215 DRV_LOG(ERR, "port %u empty mbuf pool",
216 PORT_ID(rxq_ctrl->priv));
220 /* Headroom is reserved by rte_pktmbuf_alloc(). */
221 assert(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM);
222 /* Buffer is supposed to be empty. */
223 assert(rte_pktmbuf_data_len(buf) == 0);
224 assert(rte_pktmbuf_pkt_len(buf) == 0);
226 /* Only the first segment keeps headroom. */
228 SET_DATA_OFF(buf, 0);
229 PORT(buf) = rxq_ctrl->rxq.port_id;
230 DATA_LEN(buf) = rte_pktmbuf_tailroom(buf);
231 PKT_LEN(buf) = DATA_LEN(buf);
233 (*rxq_ctrl->rxq.elts)[i] = buf;
235 /* If Rx vector is activated. */
236 if (mlx5_rxq_check_vec_support(&rxq_ctrl->rxq) > 0) {
237 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
238 struct rte_mbuf *mbuf_init = &rxq->fake_mbuf;
241 /* Initialize default rearm_data for vPMD. */
242 mbuf_init->data_off = RTE_PKTMBUF_HEADROOM;
243 rte_mbuf_refcnt_set(mbuf_init, 1);
244 mbuf_init->nb_segs = 1;
245 mbuf_init->port = rxq->port_id;
247 * prevent compiler reordering:
248 * rearm_data covers previous fields.
250 rte_compiler_barrier();
251 rxq->mbuf_initializer =
252 *(uint64_t *)&mbuf_init->rearm_data;
253 /* Padding with a fake mbuf for vectorized Rx. */
254 for (j = 0; j < MLX5_VPMD_DESCS_PER_LOOP; ++j)
255 (*rxq->elts)[elts_n + j] = &rxq->fake_mbuf;
258 "port %u Rx queue %u allocated and configured %u segments"
260 PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx, elts_n,
261 elts_n / (1 << rxq_ctrl->rxq.sges_n));
264 err = rte_errno; /* Save rte_errno before cleanup. */
266 for (i = 0; (i != elts_n); ++i) {
267 if ((*rxq_ctrl->rxq.elts)[i] != NULL)
268 rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]);
269 (*rxq_ctrl->rxq.elts)[i] = NULL;
271 DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything",
272 PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx);
273 rte_errno = err; /* Restore rte_errno. */
278 * Allocate RX queue elements.
281 * Pointer to RX queue structure.
284 * 0 on success, errno value on failure.
287 rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
289 return mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
290 rxq_alloc_elts_mprq(rxq_ctrl) : rxq_alloc_elts_sprq(rxq_ctrl);
294 * Free RX queue elements for Multi-Packet RQ.
297 * Pointer to RX queue structure.
300 rxq_free_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
302 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
305 DRV_LOG(DEBUG, "port %u Multi-Packet Rx queue %u freeing WRs",
306 rxq->port_id, rxq->idx);
307 if (rxq->mprq_bufs == NULL)
309 assert(mlx5_rxq_check_vec_support(rxq) < 0);
310 for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
311 if ((*rxq->mprq_bufs)[i] != NULL)
312 mlx5_mprq_buf_free((*rxq->mprq_bufs)[i]);
313 (*rxq->mprq_bufs)[i] = NULL;
315 if (rxq->mprq_repl != NULL) {
316 mlx5_mprq_buf_free(rxq->mprq_repl);
317 rxq->mprq_repl = NULL;
322 * Free RX queue elements for Single-Packet RQ.
325 * Pointer to RX queue structure.
328 rxq_free_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
330 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
331 const uint16_t q_n = (1 << rxq->elts_n);
332 const uint16_t q_mask = q_n - 1;
333 uint16_t used = q_n - (rxq->rq_ci - rxq->rq_pi);
336 DRV_LOG(DEBUG, "port %u Rx queue %u freeing WRs",
337 PORT_ID(rxq_ctrl->priv), rxq->idx);
338 if (rxq->elts == NULL)
341 * Some mbuf in the Ring belongs to the application. They cannot be
344 if (mlx5_rxq_check_vec_support(rxq) > 0) {
345 for (i = 0; i < used; ++i)
346 (*rxq->elts)[(rxq->rq_ci + i) & q_mask] = NULL;
347 rxq->rq_pi = rxq->rq_ci;
349 for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
350 if ((*rxq->elts)[i] != NULL)
351 rte_pktmbuf_free_seg((*rxq->elts)[i]);
352 (*rxq->elts)[i] = NULL;
357 * Free RX queue elements.
360 * Pointer to RX queue structure.
363 rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
365 if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq))
366 rxq_free_elts_mprq(rxq_ctrl);
368 rxq_free_elts_sprq(rxq_ctrl);
372 * Returns the per-queue supported offloads.
375 * Pointer to Ethernet device.
378 * Supported Rx offloads.
381 mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev)
383 struct mlx5_priv *priv = dev->data->dev_private;
384 struct mlx5_dev_config *config = &priv->config;
385 uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER |
386 DEV_RX_OFFLOAD_TIMESTAMP |
387 DEV_RX_OFFLOAD_JUMBO_FRAME);
389 if (config->hw_fcs_strip)
390 offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
393 offloads |= (DEV_RX_OFFLOAD_IPV4_CKSUM |
394 DEV_RX_OFFLOAD_UDP_CKSUM |
395 DEV_RX_OFFLOAD_TCP_CKSUM);
396 if (config->hw_vlan_strip)
397 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
403 * Returns the per-port supported offloads.
406 * Pointer to Ethernet device.
409 * Supported Rx offloads.
412 mlx5_get_rx_port_offloads(struct rte_eth_dev *dev)
414 uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
416 if (MLX5_LRO_SUPPORTED(dev))
417 offloads |= DEV_RX_OFFLOAD_TCP_LRO;
422 * Verify if the queue can be released.
425 * Pointer to Ethernet device.
430 * 1 if the queue can be released
431 * 0 if the queue can not be released, there are references to it.
432 * Negative errno and rte_errno is set if queue doesn't exist.
435 mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx)
437 struct mlx5_priv *priv = dev->data->dev_private;
438 struct mlx5_rxq_ctrl *rxq_ctrl;
440 if (!(*priv->rxqs)[idx]) {
444 rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
445 return (rte_atomic32_read(&rxq_ctrl->refcnt) == 1);
451 * Pointer to Ethernet device structure.
455 * Number of descriptors to configure in queue.
457 * NUMA socket on which memory must be allocated.
459 * Thresholds parameters.
461 * Memory pool for buffer allocations.
464 * 0 on success, a negative errno value otherwise and rte_errno is set.
467 mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
468 unsigned int socket, const struct rte_eth_rxconf *conf,
469 struct rte_mempool *mp)
471 struct mlx5_priv *priv = dev->data->dev_private;
472 struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
473 struct mlx5_rxq_ctrl *rxq_ctrl =
474 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
476 if (!rte_is_power_of_2(desc)) {
477 desc = 1 << log2above(desc);
479 "port %u increased number of descriptors in Rx queue %u"
480 " to the next power of two (%d)",
481 dev->data->port_id, idx, desc);
483 DRV_LOG(DEBUG, "port %u configuring Rx queue %u for %u descriptors",
484 dev->data->port_id, idx, desc);
485 if (idx >= priv->rxqs_n) {
486 DRV_LOG(ERR, "port %u Rx queue index out of range (%u >= %u)",
487 dev->data->port_id, idx, priv->rxqs_n);
488 rte_errno = EOVERFLOW;
491 if (!mlx5_rxq_releasable(dev, idx)) {
492 DRV_LOG(ERR, "port %u unable to release queue index %u",
493 dev->data->port_id, idx);
497 mlx5_rxq_release(dev, idx);
498 rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, mp);
500 DRV_LOG(ERR, "port %u unable to allocate queue index %u",
501 dev->data->port_id, idx);
505 DRV_LOG(DEBUG, "port %u adding Rx queue %u to list",
506 dev->data->port_id, idx);
507 (*priv->rxqs)[idx] = &rxq_ctrl->rxq;
512 * DPDK callback to release a RX queue.
515 * Generic RX queue pointer.
518 mlx5_rx_queue_release(void *dpdk_rxq)
520 struct mlx5_rxq_data *rxq = (struct mlx5_rxq_data *)dpdk_rxq;
521 struct mlx5_rxq_ctrl *rxq_ctrl;
522 struct mlx5_priv *priv;
526 rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);
527 priv = rxq_ctrl->priv;
528 if (!mlx5_rxq_releasable(ETH_DEV(priv), rxq_ctrl->rxq.idx))
529 rte_panic("port %u Rx queue %u is still used by a flow and"
530 " cannot be removed\n",
531 PORT_ID(priv), rxq->idx);
532 mlx5_rxq_release(ETH_DEV(priv), rxq_ctrl->rxq.idx);
536 * Get an Rx queue Verbs/DevX object.
539 * Pointer to Ethernet device.
541 * Queue index in DPDK Rx queue array
544 * The Verbs/DevX object if it exists.
546 static struct mlx5_rxq_obj *
547 mlx5_rxq_obj_get(struct rte_eth_dev *dev, uint16_t idx)
549 struct mlx5_priv *priv = dev->data->dev_private;
550 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
551 struct mlx5_rxq_ctrl *rxq_ctrl;
553 if (idx >= priv->rxqs_n)
557 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
559 rte_atomic32_inc(&rxq_ctrl->obj->refcnt);
560 return rxq_ctrl->obj;
564 * Release the resources allocated for an RQ DevX object.
567 * DevX Rx queue object.
570 rxq_release_rq_resources(struct mlx5_rxq_ctrl *rxq_ctrl)
572 if (rxq_ctrl->rxq.wqes) {
573 rte_free((void *)(uintptr_t)rxq_ctrl->rxq.wqes);
574 rxq_ctrl->rxq.wqes = NULL;
576 if (rxq_ctrl->wq_umem) {
577 mlx5_glue->devx_umem_dereg(rxq_ctrl->wq_umem);
578 rxq_ctrl->wq_umem = NULL;
583 * Release an Rx verbs/DevX queue object.
586 * Verbs/DevX Rx queue object.
589 * 1 while a reference on it exists, 0 when freed.
592 mlx5_rxq_obj_release(struct mlx5_rxq_obj *rxq_obj)
595 if (rxq_obj->type == MLX5_RXQ_OBJ_TYPE_IBV)
598 if (rte_atomic32_dec_and_test(&rxq_obj->refcnt)) {
599 rxq_free_elts(rxq_obj->rxq_ctrl);
600 if (rxq_obj->type == MLX5_RXQ_OBJ_TYPE_IBV) {
601 claim_zero(mlx5_glue->destroy_wq(rxq_obj->wq));
602 } else if (rxq_obj->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ) {
603 claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
604 rxq_release_rq_resources(rxq_obj->rxq_ctrl);
606 claim_zero(mlx5_glue->destroy_cq(rxq_obj->cq));
607 if (rxq_obj->channel)
608 claim_zero(mlx5_glue->destroy_comp_channel
610 LIST_REMOVE(rxq_obj, next);
618 * Allocate queue vector and fill epoll fd list for Rx interrupts.
621 * Pointer to Ethernet device.
624 * 0 on success, a negative errno value otherwise and rte_errno is set.
627 mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
629 struct mlx5_priv *priv = dev->data->dev_private;
631 unsigned int rxqs_n = priv->rxqs_n;
632 unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
633 unsigned int count = 0;
634 struct rte_intr_handle *intr_handle = dev->intr_handle;
636 if (!dev->data->dev_conf.intr_conf.rxq)
638 mlx5_rx_intr_vec_disable(dev);
639 intr_handle->intr_vec = malloc(n * sizeof(intr_handle->intr_vec[0]));
640 if (intr_handle->intr_vec == NULL) {
642 "port %u failed to allocate memory for interrupt"
643 " vector, Rx interrupts will not be supported",
648 intr_handle->type = RTE_INTR_HANDLE_EXT;
649 for (i = 0; i != n; ++i) {
650 /* This rxq obj must not be released in this function. */
651 struct mlx5_rxq_obj *rxq_obj = mlx5_rxq_obj_get(dev, i);
656 /* Skip queues that cannot request interrupts. */
657 if (!rxq_obj || !rxq_obj->channel) {
658 /* Use invalid intr_vec[] index to disable entry. */
659 intr_handle->intr_vec[i] =
660 RTE_INTR_VEC_RXTX_OFFSET +
661 RTE_MAX_RXTX_INTR_VEC_ID;
664 if (count >= RTE_MAX_RXTX_INTR_VEC_ID) {
666 "port %u too many Rx queues for interrupt"
667 " vector size (%d), Rx interrupts cannot be"
669 dev->data->port_id, RTE_MAX_RXTX_INTR_VEC_ID);
670 mlx5_rx_intr_vec_disable(dev);
674 fd = rxq_obj->channel->fd;
675 flags = fcntl(fd, F_GETFL);
676 rc = fcntl(fd, F_SETFL, flags | O_NONBLOCK);
680 "port %u failed to make Rx interrupt file"
681 " descriptor %d non-blocking for queue index"
683 dev->data->port_id, fd, i);
684 mlx5_rx_intr_vec_disable(dev);
687 intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count;
688 intr_handle->efds[count] = fd;
692 mlx5_rx_intr_vec_disable(dev);
694 intr_handle->nb_efd = count;
699 * Clean up Rx interrupts handler.
702 * Pointer to Ethernet device.
705 mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev)
707 struct mlx5_priv *priv = dev->data->dev_private;
708 struct rte_intr_handle *intr_handle = dev->intr_handle;
710 unsigned int rxqs_n = priv->rxqs_n;
711 unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
713 if (!dev->data->dev_conf.intr_conf.rxq)
715 if (!intr_handle->intr_vec)
717 for (i = 0; i != n; ++i) {
718 struct mlx5_rxq_ctrl *rxq_ctrl;
719 struct mlx5_rxq_data *rxq_data;
721 if (intr_handle->intr_vec[i] == RTE_INTR_VEC_RXTX_OFFSET +
722 RTE_MAX_RXTX_INTR_VEC_ID)
725 * Need to access directly the queue to release the reference
726 * kept in mlx5_rx_intr_vec_enable().
728 rxq_data = (*priv->rxqs)[i];
729 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
731 mlx5_rxq_obj_release(rxq_ctrl->obj);
734 rte_intr_free_epoll_fd(intr_handle);
735 if (intr_handle->intr_vec)
736 free(intr_handle->intr_vec);
737 intr_handle->nb_efd = 0;
738 intr_handle->intr_vec = NULL;
742 * MLX5 CQ notification .
745 * Pointer to receive queue structure.
747 * Sequence number per receive queue .
750 mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq)
753 uint32_t doorbell_hi;
755 void *cq_db_reg = (char *)rxq->cq_uar + MLX5_CQ_DOORBELL;
757 sq_n = sq_n_rxq & MLX5_CQ_SQN_MASK;
758 doorbell_hi = sq_n << MLX5_CQ_SQN_OFFSET | (rxq->cq_ci & MLX5_CI_MASK);
759 doorbell = (uint64_t)doorbell_hi << 32;
760 doorbell |= rxq->cqn;
761 rxq->cq_db[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi);
762 mlx5_uar_write64(rte_cpu_to_be_64(doorbell),
763 cq_db_reg, rxq->uar_lock_cq);
767 * DPDK callback for Rx queue interrupt enable.
770 * Pointer to Ethernet device structure.
775 * 0 on success, a negative errno value otherwise and rte_errno is set.
778 mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
780 struct mlx5_priv *priv = dev->data->dev_private;
781 struct mlx5_rxq_data *rxq_data;
782 struct mlx5_rxq_ctrl *rxq_ctrl;
784 rxq_data = (*priv->rxqs)[rx_queue_id];
789 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
791 struct mlx5_rxq_obj *rxq_obj;
793 rxq_obj = mlx5_rxq_obj_get(dev, rx_queue_id);
798 mlx5_arm_cq(rxq_data, rxq_data->cq_arm_sn);
799 mlx5_rxq_obj_release(rxq_obj);
805 * DPDK callback for Rx queue interrupt disable.
808 * Pointer to Ethernet device structure.
813 * 0 on success, a negative errno value otherwise and rte_errno is set.
816 mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
818 struct mlx5_priv *priv = dev->data->dev_private;
819 struct mlx5_rxq_data *rxq_data;
820 struct mlx5_rxq_ctrl *rxq_ctrl;
821 struct mlx5_rxq_obj *rxq_obj = NULL;
822 struct ibv_cq *ev_cq;
826 rxq_data = (*priv->rxqs)[rx_queue_id];
831 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
834 rxq_obj = mlx5_rxq_obj_get(dev, rx_queue_id);
839 ret = mlx5_glue->get_cq_event(rxq_obj->channel, &ev_cq, &ev_ctx);
840 if (ret || ev_cq != rxq_obj->cq) {
844 rxq_data->cq_arm_sn++;
845 mlx5_glue->ack_cq_events(rxq_obj->cq, 1);
846 mlx5_rxq_obj_release(rxq_obj);
849 ret = rte_errno; /* Save rte_errno before cleanup. */
851 mlx5_rxq_obj_release(rxq_obj);
852 DRV_LOG(WARNING, "port %u unable to disable interrupt on Rx queue %d",
853 dev->data->port_id, rx_queue_id);
854 rte_errno = ret; /* Restore rte_errno. */
859 * Create a CQ Verbs object.
862 * Pointer to Ethernet device.
864 * Pointer to device private data.
866 * Pointer to Rx queue data.
868 * Number of CQEs in CQ.
870 * Pointer to Rx queue object data.
873 * The Verbs object initialised, NULL otherwise and rte_errno is set.
875 static struct ibv_cq *
876 mlx5_ibv_cq_new(struct rte_eth_dev *dev, struct mlx5_priv *priv,
877 struct mlx5_rxq_data *rxq_data,
878 unsigned int cqe_n, struct mlx5_rxq_obj *rxq_obj)
881 struct ibv_cq_init_attr_ex ibv;
882 struct mlx5dv_cq_init_attr mlx5;
885 cq_attr.ibv = (struct ibv_cq_init_attr_ex){
887 .channel = rxq_obj->channel,
890 cq_attr.mlx5 = (struct mlx5dv_cq_init_attr){
893 if (priv->config.cqe_comp && !rxq_data->hw_timestamp) {
894 cq_attr.mlx5.comp_mask |=
895 MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
896 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
897 cq_attr.mlx5.cqe_comp_res_format =
898 mlx5_rxq_mprq_enabled(rxq_data) ?
899 MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX :
900 MLX5DV_CQE_RES_FORMAT_HASH;
902 cq_attr.mlx5.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH;
905 * For vectorized Rx, it must not be doubled in order to
906 * make cq_ci and rq_ci aligned.
908 if (mlx5_rxq_check_vec_support(rxq_data) < 0)
909 cq_attr.ibv.cqe *= 2;
910 } else if (priv->config.cqe_comp && rxq_data->hw_timestamp) {
912 "port %u Rx CQE compression is disabled for HW"
916 #ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD
917 if (priv->config.cqe_pad) {
918 cq_attr.mlx5.comp_mask |= MLX5DV_CQ_INIT_ATTR_MASK_FLAGS;
919 cq_attr.mlx5.flags |= MLX5DV_CQ_INIT_ATTR_FLAGS_CQE_PAD;
922 return mlx5_glue->cq_ex_to_cq(mlx5_glue->dv_create_cq(priv->sh->ctx,
928 * Create a WQ Verbs object.
931 * Pointer to Ethernet device.
933 * Pointer to device private data.
935 * Pointer to Rx queue data.
937 * Queue index in DPDK Rx queue array
939 * Number of WQEs in WQ.
941 * Pointer to Rx queue object data.
944 * The Verbs object initialised, NULL otherwise and rte_errno is set.
946 static struct ibv_wq *
947 mlx5_ibv_wq_new(struct rte_eth_dev *dev, struct mlx5_priv *priv,
948 struct mlx5_rxq_data *rxq_data, uint16_t idx,
949 unsigned int wqe_n, struct mlx5_rxq_obj *rxq_obj)
952 struct ibv_wq_init_attr ibv;
953 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
954 struct mlx5dv_wq_init_attr mlx5;
958 wq_attr.ibv = (struct ibv_wq_init_attr){
959 .wq_context = NULL, /* Could be useful in the future. */
960 .wq_type = IBV_WQT_RQ,
961 /* Max number of outstanding WRs. */
962 .max_wr = wqe_n >> rxq_data->sges_n,
963 /* Max number of scatter/gather elements in a WR. */
964 .max_sge = 1 << rxq_data->sges_n,
967 .comp_mask = IBV_WQ_FLAGS_CVLAN_STRIPPING | 0,
968 .create_flags = (rxq_data->vlan_strip ?
969 IBV_WQ_FLAGS_CVLAN_STRIPPING : 0),
971 /* By default, FCS (CRC) is stripped by hardware. */
972 if (rxq_data->crc_present) {
973 wq_attr.ibv.create_flags |= IBV_WQ_FLAGS_SCATTER_FCS;
974 wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
976 if (priv->config.hw_padding) {
977 #if defined(HAVE_IBV_WQ_FLAG_RX_END_PADDING)
978 wq_attr.ibv.create_flags |= IBV_WQ_FLAG_RX_END_PADDING;
979 wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
980 #elif defined(HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING)
981 wq_attr.ibv.create_flags |= IBV_WQ_FLAGS_PCI_WRITE_END_PADDING;
982 wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
985 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
986 wq_attr.mlx5 = (struct mlx5dv_wq_init_attr){
989 if (mlx5_rxq_mprq_enabled(rxq_data)) {
990 struct mlx5dv_striding_rq_init_attr *mprq_attr =
991 &wq_attr.mlx5.striding_rq_attrs;
993 wq_attr.mlx5.comp_mask |= MLX5DV_WQ_INIT_ATTR_MASK_STRIDING_RQ;
994 *mprq_attr = (struct mlx5dv_striding_rq_init_attr){
995 .single_stride_log_num_of_bytes = rxq_data->strd_sz_n,
996 .single_wqe_log_num_of_strides = rxq_data->strd_num_n,
997 .two_byte_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT,
1000 rxq_obj->wq = mlx5_glue->dv_create_wq(priv->sh->ctx, &wq_attr.ibv,
1003 rxq_obj->wq = mlx5_glue->create_wq(priv->sh->ctx, &wq_attr.ibv);
1007 * Make sure number of WRs*SGEs match expectations since a queue
1008 * cannot allocate more than "desc" buffers.
1010 if (wq_attr.ibv.max_wr != (wqe_n >> rxq_data->sges_n) ||
1011 wq_attr.ibv.max_sge != (1u << rxq_data->sges_n)) {
1013 "port %u Rx queue %u requested %u*%u but got"
1015 dev->data->port_id, idx,
1016 wqe_n >> rxq_data->sges_n,
1017 (1 << rxq_data->sges_n),
1018 wq_attr.ibv.max_wr, wq_attr.ibv.max_sge);
1019 claim_zero(mlx5_glue->destroy_wq(rxq_obj->wq));
1028 * Fill common fields of create RQ attributes structure.
1031 * Pointer to Rx queue data.
1033 * CQ number to use with this RQ.
1035 * RQ attributes structure to fill..
1038 mlx5_devx_create_rq_attr_fill(struct mlx5_rxq_data *rxq_data, uint32_t cqn,
1039 struct mlx5_devx_create_rq_attr *rq_attr)
1041 rq_attr->state = MLX5_RQC_STATE_RST;
1042 rq_attr->vsd = (rxq_data->vlan_strip) ? 0 : 1;
1044 rq_attr->scatter_fcs = (rxq_data->crc_present) ? 1 : 0;
1048 * Fill common fields of DevX WQ attributes structure.
1051 * Pointer to device private data.
1053 * Pointer to Rx queue control structure.
1055 * WQ attributes structure to fill..
1058 mlx5_devx_wq_attr_fill(struct mlx5_priv *priv, struct mlx5_rxq_ctrl *rxq_ctrl,
1059 struct mlx5_devx_wq_attr *wq_attr)
1061 wq_attr->end_padding_mode = priv->config.cqe_pad ?
1062 MLX5_WQ_END_PAD_MODE_ALIGN :
1063 MLX5_WQ_END_PAD_MODE_NONE;
1064 wq_attr->pd = priv->sh->pdn;
1065 wq_attr->dbr_addr = rxq_ctrl->dbr_offset;
1066 wq_attr->dbr_umem_id = rxq_ctrl->dbr_umem_id;
1067 wq_attr->dbr_umem_valid = 1;
1068 wq_attr->wq_umem_id = rxq_ctrl->wq_umem->umem_id;
1069 wq_attr->wq_umem_valid = 1;
1073 * Create a RQ object using DevX.
1076 * Pointer to Ethernet device.
1078 * Queue index in DPDK Rx queue array
1080 * CQ number to use with this RQ.
1083 * The DevX object initialised, NULL otherwise and rte_errno is set.
1085 static struct mlx5_devx_obj *
1086 mlx5_devx_rq_new(struct rte_eth_dev *dev, uint16_t idx, uint32_t cqn)
1088 struct mlx5_priv *priv = dev->data->dev_private;
1089 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
1090 struct mlx5_rxq_ctrl *rxq_ctrl =
1091 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
1092 struct mlx5_devx_create_rq_attr rq_attr;
1093 uint32_t wqe_n = 1 << rxq_data->elts_n;
1094 uint32_t wq_size = 0;
1095 uint32_t wqe_size = 0;
1096 uint32_t log_wqe_size = 0;
1098 struct mlx5_devx_obj *rq;
1100 memset(&rq_attr, 0, sizeof(rq_attr));
1101 /* Fill RQ attributes. */
1102 rq_attr.mem_rq_type = MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE;
1103 rq_attr.flush_in_error_en = 1;
1104 mlx5_devx_create_rq_attr_fill(rxq_data, cqn, &rq_attr);
1105 /* Fill WQ attributes for this RQ. */
1106 if (mlx5_rxq_mprq_enabled(rxq_data)) {
1107 rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ;
1109 * Number of strides in each WQE:
1110 * 512*2^single_wqe_log_num_of_strides.
1112 rq_attr.wq_attr.single_wqe_log_num_of_strides =
1113 rxq_data->strd_num_n -
1114 MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
1115 /* Stride size = (2^single_stride_log_num_of_bytes)*64B. */
1116 rq_attr.wq_attr.single_stride_log_num_of_bytes =
1117 rxq_data->strd_sz_n -
1118 MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES;
1119 wqe_size = sizeof(struct mlx5_wqe_mprq);
1122 int num_scatter = 0;
1124 rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
1125 max_sge = 1 << rxq_data->sges_n;
1126 num_scatter = RTE_MAX(max_sge, 1);
1127 wqe_size = sizeof(struct mlx5_wqe_data_seg) * num_scatter;
1129 log_wqe_size = log2above(wqe_size);
1130 rq_attr.wq_attr.log_wq_stride = log_wqe_size;
1131 rq_attr.wq_attr.log_wq_sz = rxq_data->elts_n;
1132 /* Calculate and allocate WQ memory space. */
1133 wqe_size = 1 << log_wqe_size; /* round up power of two.*/
1134 wq_size = wqe_n * wqe_size;
1135 buf = rte_calloc_socket(__func__, 1, wq_size, RTE_CACHE_LINE_SIZE,
1139 rxq_data->wqes = buf;
1140 rxq_ctrl->wq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx,
1142 if (!rxq_ctrl->wq_umem) {
1146 mlx5_devx_wq_attr_fill(priv, rxq_ctrl, &rq_attr.wq_attr);
1147 rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &rq_attr, rxq_ctrl->socket);
1149 rxq_release_rq_resources(rxq_ctrl);
1154 * Create the Rx queue Verbs/DevX object.
1157 * Pointer to Ethernet device.
1159 * Queue index in DPDK Rx queue array
1161 * Type of Rx queue object to create.
1164 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
1166 struct mlx5_rxq_obj *
1167 mlx5_rxq_obj_new(struct rte_eth_dev *dev, uint16_t idx,
1168 enum mlx5_rxq_obj_type type)
1170 struct mlx5_priv *priv = dev->data->dev_private;
1171 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
1172 struct mlx5_rxq_ctrl *rxq_ctrl =
1173 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
1174 struct ibv_wq_attr mod;
1176 unsigned int wqe_n = 1 << rxq_data->elts_n;
1177 struct mlx5_rxq_obj *tmpl = NULL;
1178 struct mlx5dv_cq cq_info;
1179 struct mlx5dv_rwq rwq;
1181 struct mlx5dv_obj obj;
1184 assert(!rxq_ctrl->obj);
1185 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_RX_QUEUE;
1186 priv->verbs_alloc_ctx.obj = rxq_ctrl;
1187 tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0,
1191 "port %u Rx queue %u cannot allocate verbs resources",
1192 dev->data->port_id, rxq_data->idx);
1197 tmpl->rxq_ctrl = rxq_ctrl;
1198 if (rxq_ctrl->irq) {
1199 tmpl->channel = mlx5_glue->create_comp_channel(priv->sh->ctx);
1200 if (!tmpl->channel) {
1201 DRV_LOG(ERR, "port %u: comp channel creation failure",
1202 dev->data->port_id);
1207 if (mlx5_rxq_mprq_enabled(rxq_data))
1208 cqe_n = wqe_n * (1 << rxq_data->strd_num_n) - 1;
1211 tmpl->cq = mlx5_ibv_cq_new(dev, priv, rxq_data, cqe_n, tmpl);
1213 DRV_LOG(ERR, "port %u Rx queue %u CQ creation failure",
1214 dev->data->port_id, idx);
1218 obj.cq.in = tmpl->cq;
1219 obj.cq.out = &cq_info;
1220 ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ);
1225 if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
1227 "port %u wrong MLX5_CQE_SIZE environment variable"
1228 " value: it should be set to %u",
1229 dev->data->port_id, RTE_CACHE_LINE_SIZE);
1233 DRV_LOG(DEBUG, "port %u device_attr.max_qp_wr is %d",
1234 dev->data->port_id, priv->sh->device_attr.orig_attr.max_qp_wr);
1235 DRV_LOG(DEBUG, "port %u device_attr.max_sge is %d",
1236 dev->data->port_id, priv->sh->device_attr.orig_attr.max_sge);
1237 /* Allocate door-bell for types created with DevX. */
1238 if (tmpl->type != MLX5_RXQ_OBJ_TYPE_IBV) {
1239 struct mlx5_devx_dbr_page *dbr_page;
1242 dbr_offset = mlx5_get_dbr(dev, &dbr_page);
1245 rxq_ctrl->dbr_offset = dbr_offset;
1246 rxq_ctrl->dbr_umem_id = dbr_page->umem->umem_id;
1247 rxq_ctrl->dbr_umem_id_valid = 1;
1248 rxq_data->rq_db = (uint32_t *)((uintptr_t)dbr_page->dbrs +
1249 (uintptr_t)rxq_ctrl->dbr_offset);
1251 if (tmpl->type == MLX5_RXQ_OBJ_TYPE_IBV) {
1252 tmpl->wq = mlx5_ibv_wq_new(dev, priv, rxq_data, idx, wqe_n,
1255 DRV_LOG(ERR, "port %u Rx queue %u WQ creation failure",
1256 dev->data->port_id, idx);
1260 /* Change queue state to ready. */
1261 mod = (struct ibv_wq_attr){
1262 .attr_mask = IBV_WQ_ATTR_STATE,
1263 .wq_state = IBV_WQS_RDY,
1265 ret = mlx5_glue->modify_wq(tmpl->wq, &mod);
1268 "port %u Rx queue %u WQ state to IBV_WQS_RDY"
1269 " failed", dev->data->port_id, idx);
1273 obj.rwq.in = tmpl->wq;
1275 ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_RWQ);
1280 rxq_data->wqes = rwq.buf;
1281 rxq_data->rq_db = rwq.dbrec;
1282 } else if (tmpl->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ) {
1283 struct mlx5_devx_modify_rq_attr rq_attr;
1285 memset(&rq_attr, 0, sizeof(rq_attr));
1286 tmpl->rq = mlx5_devx_rq_new(dev, idx, cq_info.cqn);
1288 DRV_LOG(ERR, "port %u Rx queue %u RQ creation failure",
1289 dev->data->port_id, idx);
1293 /* Change queue state to ready. */
1294 rq_attr.rq_state = MLX5_RQC_STATE_RST;
1295 rq_attr.state = MLX5_RQC_STATE_RDY;
1296 ret = mlx5_devx_cmd_modify_rq(tmpl->rq, &rq_attr);
1300 /* Fill the rings. */
1301 rxq_data->cqe_n = log2above(cq_info.cqe_cnt);
1302 rxq_data->cq_db = cq_info.dbrec;
1303 rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)cq_info.buf;
1304 rxq_data->cq_uar = cq_info.cq_uar;
1305 rxq_data->cqn = cq_info.cqn;
1306 rxq_data->cq_arm_sn = 0;
1307 mlx5_rxq_initialize(rxq_data);
1308 rxq_data->cq_ci = 0;
1309 DRV_LOG(DEBUG, "port %u rxq %u updated with %p", dev->data->port_id,
1310 idx, (void *)&tmpl);
1311 rte_atomic32_inc(&tmpl->refcnt);
1312 LIST_INSERT_HEAD(&priv->rxqsobj, tmpl, next);
1313 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
1317 ret = rte_errno; /* Save rte_errno before cleanup. */
1318 if (tmpl->type == MLX5_RXQ_OBJ_TYPE_IBV && tmpl->wq)
1319 claim_zero(mlx5_glue->destroy_wq(tmpl->wq));
1320 else if (tmpl->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ && tmpl->rq)
1321 claim_zero(mlx5_devx_cmd_destroy(tmpl->rq));
1323 claim_zero(mlx5_glue->destroy_cq(tmpl->cq));
1325 claim_zero(mlx5_glue->destroy_comp_channel
1328 rte_errno = ret; /* Restore rte_errno. */
1330 if (type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ)
1331 rxq_release_rq_resources(rxq_ctrl);
1332 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
1337 * Verify the Rx queue objects list is empty
1340 * Pointer to Ethernet device.
1343 * The number of objects not released.
1346 mlx5_rxq_obj_verify(struct rte_eth_dev *dev)
1348 struct mlx5_priv *priv = dev->data->dev_private;
1350 struct mlx5_rxq_obj *rxq_obj;
1352 LIST_FOREACH(rxq_obj, &priv->rxqsobj, next) {
1353 DRV_LOG(DEBUG, "port %u Rx queue %u still referenced",
1354 dev->data->port_id, rxq_obj->rxq_ctrl->rxq.idx);
1361 * Callback function to initialize mbufs for Multi-Packet RQ.
1364 mlx5_mprq_buf_init(struct rte_mempool *mp, void *opaque_arg,
1365 void *_m, unsigned int i __rte_unused)
1367 struct mlx5_mprq_buf *buf = _m;
1368 struct rte_mbuf_ext_shared_info *shinfo;
1369 unsigned int strd_n = (unsigned int)(uintptr_t)opaque_arg;
1372 memset(_m, 0, sizeof(*buf));
1374 rte_atomic16_set(&buf->refcnt, 1);
1375 for (j = 0; j != strd_n; ++j) {
1376 shinfo = &buf->shinfos[j];
1377 shinfo->free_cb = mlx5_mprq_buf_free_cb;
1378 shinfo->fcb_opaque = buf;
1383 * Free mempool of Multi-Packet RQ.
1386 * Pointer to Ethernet device.
1389 * 0 on success, negative errno value on failure.
1392 mlx5_mprq_free_mp(struct rte_eth_dev *dev)
1394 struct mlx5_priv *priv = dev->data->dev_private;
1395 struct rte_mempool *mp = priv->mprq_mp;
1400 DRV_LOG(DEBUG, "port %u freeing mempool (%s) for Multi-Packet RQ",
1401 dev->data->port_id, mp->name);
1403 * If a buffer in the pool has been externally attached to a mbuf and it
1404 * is still in use by application, destroying the Rx queue can spoil
1405 * the packet. It is unlikely to happen but if application dynamically
1406 * creates and destroys with holding Rx packets, this can happen.
1408 * TODO: It is unavoidable for now because the mempool for Multi-Packet
1409 * RQ isn't provided by application but managed by PMD.
1411 if (!rte_mempool_full(mp)) {
1413 "port %u mempool for Multi-Packet RQ is still in use",
1414 dev->data->port_id);
1418 rte_mempool_free(mp);
1419 /* Unset mempool for each Rx queue. */
1420 for (i = 0; i != priv->rxqs_n; ++i) {
1421 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1425 rxq->mprq_mp = NULL;
1427 priv->mprq_mp = NULL;
1432 * Allocate a mempool for Multi-Packet RQ. All configured Rx queues share the
1433 * mempool. If already allocated, reuse it if there're enough elements.
1434 * Otherwise, resize it.
1437 * Pointer to Ethernet device.
1440 * 0 on success, negative errno value on failure.
1443 mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)
1445 struct mlx5_priv *priv = dev->data->dev_private;
1446 struct rte_mempool *mp = priv->mprq_mp;
1447 char name[RTE_MEMPOOL_NAMESIZE];
1448 unsigned int desc = 0;
1449 unsigned int buf_len;
1450 unsigned int obj_num;
1451 unsigned int obj_size;
1452 unsigned int strd_num_n = 0;
1453 unsigned int strd_sz_n = 0;
1456 if (!mlx5_mprq_enabled(dev))
1458 /* Count the total number of descriptors configured. */
1459 for (i = 0; i != priv->rxqs_n; ++i) {
1460 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1464 desc += 1 << rxq->elts_n;
1465 /* Get the max number of strides. */
1466 if (strd_num_n < rxq->strd_num_n)
1467 strd_num_n = rxq->strd_num_n;
1468 /* Get the max size of a stride. */
1469 if (strd_sz_n < rxq->strd_sz_n)
1470 strd_sz_n = rxq->strd_sz_n;
1472 assert(strd_num_n && strd_sz_n);
1473 buf_len = (1 << strd_num_n) * (1 << strd_sz_n);
1474 obj_size = sizeof(struct mlx5_mprq_buf) + buf_len + (1 << strd_num_n) *
1475 sizeof(struct rte_mbuf_ext_shared_info) + RTE_PKTMBUF_HEADROOM;
1477 * Received packets can be either memcpy'd or externally referenced. In
1478 * case that the packet is attached to an mbuf as an external buffer, as
1479 * it isn't possible to predict how the buffers will be queued by
1480 * application, there's no option to exactly pre-allocate needed buffers
1481 * in advance but to speculatively prepares enough buffers.
1483 * In the data path, if this Mempool is depleted, PMD will try to memcpy
1484 * received packets to buffers provided by application (rxq->mp) until
1485 * this Mempool gets available again.
1488 obj_num = desc + MLX5_MPRQ_MP_CACHE_SZ * priv->rxqs_n;
1490 * rte_mempool_create_empty() has sanity check to refuse large cache
1491 * size compared to the number of elements.
1492 * CACHE_FLUSHTHRESH_MULTIPLIER is defined in a C file, so using a
1493 * constant number 2 instead.
1495 obj_num = RTE_MAX(obj_num, MLX5_MPRQ_MP_CACHE_SZ * 2);
1496 /* Check a mempool is already allocated and if it can be resued. */
1497 if (mp != NULL && mp->elt_size >= obj_size && mp->size >= obj_num) {
1498 DRV_LOG(DEBUG, "port %u mempool %s is being reused",
1499 dev->data->port_id, mp->name);
1502 } else if (mp != NULL) {
1503 DRV_LOG(DEBUG, "port %u mempool %s should be resized, freeing it",
1504 dev->data->port_id, mp->name);
1506 * If failed to free, which means it may be still in use, no way
1507 * but to keep using the existing one. On buffer underrun,
1508 * packets will be memcpy'd instead of external buffer
1511 if (mlx5_mprq_free_mp(dev)) {
1512 if (mp->elt_size >= obj_size)
1518 snprintf(name, sizeof(name), "port-%u-mprq", dev->data->port_id);
1519 mp = rte_mempool_create(name, obj_num, obj_size, MLX5_MPRQ_MP_CACHE_SZ,
1520 0, NULL, NULL, mlx5_mprq_buf_init,
1521 (void *)(uintptr_t)(1 << strd_num_n),
1522 dev->device->numa_node, 0);
1525 "port %u failed to allocate a mempool for"
1526 " Multi-Packet RQ, count=%u, size=%u",
1527 dev->data->port_id, obj_num, obj_size);
1533 /* Set mempool for each Rx queue. */
1534 for (i = 0; i != priv->rxqs_n; ++i) {
1535 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1541 DRV_LOG(INFO, "port %u Multi-Packet RQ is configured",
1542 dev->data->port_id);
1546 #define MLX5_MAX_LRO_SIZE (UINT8_MAX * 256u)
1547 #define MLX5_MAX_TCP_HDR_OFFSET ((unsigned int)(sizeof(struct rte_ether_hdr) + \
1548 sizeof(struct rte_vlan_hdr) * 2 + \
1549 sizeof(struct rte_ipv6_hdr)))
1551 * Adjust the maximum LRO massage size.
1554 * Pointer to Ethernet device.
1555 * @param max_lro_size
1556 * The maximum size for LRO packet.
1559 mlx5_max_lro_msg_size_adjust(struct rte_eth_dev *dev, uint32_t max_lro_size)
1561 struct mlx5_priv *priv = dev->data->dev_private;
1563 if (priv->config.hca_attr.lro_max_msg_sz_mode ==
1564 MLX5_LRO_MAX_MSG_SIZE_START_FROM_L4 && max_lro_size >
1565 MLX5_MAX_TCP_HDR_OFFSET)
1566 max_lro_size -= MLX5_MAX_TCP_HDR_OFFSET;
1567 max_lro_size = RTE_MIN(max_lro_size, MLX5_MAX_LRO_SIZE);
1568 assert(max_lro_size >= 256u);
1569 max_lro_size /= 256u;
1570 if (priv->max_lro_msg_size)
1571 priv->max_lro_msg_size =
1572 RTE_MIN((uint32_t)priv->max_lro_msg_size, max_lro_size);
1574 priv->max_lro_msg_size = max_lro_size;
1578 * Create a DPDK Rx queue.
1581 * Pointer to Ethernet device.
1585 * Number of descriptors to configure in queue.
1587 * NUMA socket on which memory must be allocated.
1590 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
1592 struct mlx5_rxq_ctrl *
1593 mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1594 unsigned int socket, const struct rte_eth_rxconf *conf,
1595 struct rte_mempool *mp)
1597 struct mlx5_priv *priv = dev->data->dev_private;
1598 struct mlx5_rxq_ctrl *tmpl;
1599 unsigned int mb_len = rte_pktmbuf_data_room_size(mp);
1600 unsigned int mprq_stride_size;
1601 struct mlx5_dev_config *config = &priv->config;
1603 * LRO packet may consume all the stride memory, hence we cannot
1604 * guaranty head-room. A new striding RQ feature may be added in CX6 DX
1605 * to allow head-room and tail-room for the LRO packets.
1607 unsigned int strd_headroom_en = mlx5_lro_on(dev) ? 0 : 1;
1609 * Always allocate extra slots, even if eventually
1610 * the vector Rx will not be used.
1613 desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
1614 uint64_t offloads = conf->offloads |
1615 dev->data->dev_conf.rxmode.offloads;
1616 const int mprq_en = mlx5_check_mprq_support(dev) > 0;
1617 unsigned int max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
1618 unsigned int non_scatter_min_mbuf_size = max_rx_pkt_len +
1619 RTE_PKTMBUF_HEADROOM;
1621 if (non_scatter_min_mbuf_size > mb_len && !(offloads &
1622 DEV_RX_OFFLOAD_SCATTER)) {
1623 DRV_LOG(ERR, "port %u Rx queue %u: Scatter offload is not"
1624 " configured and no enough mbuf space(%u) to contain "
1625 "the maximum RX packet length(%u) with head-room(%u)",
1626 dev->data->port_id, idx, mb_len, max_rx_pkt_len,
1627 RTE_PKTMBUF_HEADROOM);
1631 tmpl = rte_calloc_socket("RXQ", 1,
1633 desc_n * sizeof(struct rte_mbuf *),
1639 if (mlx5_mr_btree_init(&tmpl->rxq.mr_ctrl.cache_bh,
1640 MLX5_MR_BTREE_CACHE_N, socket)) {
1641 /* rte_errno is already set. */
1644 tmpl->socket = socket;
1645 if (dev->data->dev_conf.intr_conf.rxq)
1648 * This Rx queue can be configured as a Multi-Packet RQ if all of the
1649 * following conditions are met:
1650 * - MPRQ is enabled.
1651 * - The number of descs is more than the number of strides.
1652 * - max_rx_pkt_len plus overhead is less than the max size of a
1654 * Otherwise, enable Rx scatter if necessary.
1656 mprq_stride_size = max_rx_pkt_len + RTE_PKTMBUF_HEADROOM *
1659 desc > (1U << config->mprq.stride_num_n) &&
1660 mprq_stride_size <= (1U << config->mprq.max_stride_size_n)) {
1661 /* TODO: Rx scatter isn't supported yet. */
1662 tmpl->rxq.sges_n = 0;
1663 /* Trim the number of descs needed. */
1664 desc >>= config->mprq.stride_num_n;
1665 tmpl->rxq.strd_num_n = config->mprq.stride_num_n;
1666 tmpl->rxq.strd_sz_n = RTE_MAX(log2above(mprq_stride_size),
1667 config->mprq.min_stride_size_n);
1668 tmpl->rxq.strd_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT;
1669 tmpl->rxq.strd_headroom_en = strd_headroom_en;
1670 tmpl->rxq.mprq_max_memcpy_len = RTE_MIN(mb_len -
1671 RTE_PKTMBUF_HEADROOM, config->mprq.max_memcpy_len);
1672 mlx5_max_lro_msg_size_adjust(dev, RTE_MIN(max_rx_pkt_len,
1673 (1u << tmpl->rxq.strd_num_n) * (1u << tmpl->rxq.strd_sz_n)));
1675 "port %u Rx queue %u: Multi-Packet RQ is enabled"
1676 " strd_num_n = %u, strd_sz_n = %u",
1677 dev->data->port_id, idx,
1678 tmpl->rxq.strd_num_n, tmpl->rxq.strd_sz_n);
1679 } else if (max_rx_pkt_len <= (mb_len - RTE_PKTMBUF_HEADROOM)) {
1680 tmpl->rxq.sges_n = 0;
1681 } else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
1682 unsigned int size = non_scatter_min_mbuf_size;
1683 unsigned int sges_n;
1686 * Determine the number of SGEs needed for a full packet
1687 * and round it to the next power of two.
1689 sges_n = log2above((size / mb_len) + !!(size % mb_len));
1690 tmpl->rxq.sges_n = sges_n;
1691 /* Make sure rxq.sges_n did not overflow. */
1692 size = mb_len * (1 << tmpl->rxq.sges_n);
1693 size -= RTE_PKTMBUF_HEADROOM;
1694 if (size < max_rx_pkt_len) {
1696 "port %u too many SGEs (%u) needed to handle"
1697 " requested maximum packet size %u",
1701 rte_errno = EOVERFLOW;
1705 if (mprq_en && !mlx5_rxq_mprq_enabled(&tmpl->rxq))
1707 "port %u MPRQ is requested but cannot be enabled"
1708 " (requested: desc = %u, stride_sz = %u,"
1709 " supported: min_stride_num = %u, max_stride_sz = %u).",
1710 dev->data->port_id, desc, mprq_stride_size,
1711 (1 << config->mprq.stride_num_n),
1712 (1 << config->mprq.max_stride_size_n));
1713 DRV_LOG(DEBUG, "port %u maximum number of segments per packet: %u",
1714 dev->data->port_id, 1 << tmpl->rxq.sges_n);
1715 if (desc % (1 << tmpl->rxq.sges_n)) {
1717 "port %u number of Rx queue descriptors (%u) is not a"
1718 " multiple of SGEs per packet (%u)",
1721 1 << tmpl->rxq.sges_n);
1725 /* Toggle RX checksum offload if hardware supports it. */
1726 tmpl->rxq.csum = !!(offloads & DEV_RX_OFFLOAD_CHECKSUM);
1727 tmpl->rxq.hw_timestamp = !!(offloads & DEV_RX_OFFLOAD_TIMESTAMP);
1728 /* Configure VLAN stripping. */
1729 tmpl->rxq.vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
1730 /* By default, FCS (CRC) is stripped by hardware. */
1731 tmpl->rxq.crc_present = 0;
1732 if (offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
1733 if (config->hw_fcs_strip) {
1735 * RQs used for LRO-enabled TIRs should not be
1736 * configured to scatter the FCS.
1738 if (mlx5_lro_on(dev))
1740 "port %u CRC stripping has been "
1741 "disabled but will still be performed "
1742 "by hardware, because LRO is enabled",
1743 dev->data->port_id);
1745 tmpl->rxq.crc_present = 1;
1748 "port %u CRC stripping has been disabled but will"
1749 " still be performed by hardware, make sure MLNX_OFED"
1750 " and firmware are up to date",
1751 dev->data->port_id);
1755 "port %u CRC stripping is %s, %u bytes will be subtracted from"
1756 " incoming frames to hide it",
1758 tmpl->rxq.crc_present ? "disabled" : "enabled",
1759 tmpl->rxq.crc_present << 2);
1761 tmpl->rxq.rss_hash = !!priv->rss_conf.rss_hf &&
1762 (!!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS));
1763 tmpl->rxq.port_id = dev->data->port_id;
1766 tmpl->rxq.elts_n = log2above(desc);
1767 tmpl->rxq.rq_repl_thresh =
1768 MLX5_VPMD_RXQ_RPLNSH_THRESH(1 << tmpl->rxq.elts_n);
1770 (struct rte_mbuf *(*)[1 << tmpl->rxq.elts_n])(tmpl + 1);
1772 tmpl->rxq.uar_lock_cq = &priv->uar_lock_cq;
1774 tmpl->rxq.idx = idx;
1775 rte_atomic32_inc(&tmpl->refcnt);
1776 LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
1787 * Pointer to Ethernet device.
1792 * A pointer to the queue if it exists, NULL otherwise.
1794 struct mlx5_rxq_ctrl *
1795 mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
1797 struct mlx5_priv *priv = dev->data->dev_private;
1798 struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
1800 if ((*priv->rxqs)[idx]) {
1801 rxq_ctrl = container_of((*priv->rxqs)[idx],
1802 struct mlx5_rxq_ctrl,
1804 mlx5_rxq_obj_get(dev, idx);
1805 rte_atomic32_inc(&rxq_ctrl->refcnt);
1811 * Release a Rx queue.
1814 * Pointer to Ethernet device.
1819 * 1 while a reference on it exists, 0 when freed.
1822 mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
1824 struct mlx5_priv *priv = dev->data->dev_private;
1825 struct mlx5_rxq_ctrl *rxq_ctrl;
1827 if (!(*priv->rxqs)[idx])
1829 rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
1830 assert(rxq_ctrl->priv);
1831 if (rxq_ctrl->obj && !mlx5_rxq_obj_release(rxq_ctrl->obj))
1832 rxq_ctrl->obj = NULL;
1833 if (rte_atomic32_dec_and_test(&rxq_ctrl->refcnt)) {
1834 if (rxq_ctrl->dbr_umem_id_valid)
1835 claim_zero(mlx5_release_dbr(dev, rxq_ctrl->dbr_umem_id,
1836 rxq_ctrl->dbr_offset));
1837 mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
1838 LIST_REMOVE(rxq_ctrl, next);
1840 (*priv->rxqs)[idx] = NULL;
1847 * Verify the Rx Queue list is empty
1850 * Pointer to Ethernet device.
1853 * The number of object not released.
1856 mlx5_rxq_verify(struct rte_eth_dev *dev)
1858 struct mlx5_priv *priv = dev->data->dev_private;
1859 struct mlx5_rxq_ctrl *rxq_ctrl;
1862 LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {
1863 DRV_LOG(DEBUG, "port %u Rx Queue %u still referenced",
1864 dev->data->port_id, rxq_ctrl->rxq.idx);
1871 * Create an indirection table.
1874 * Pointer to Ethernet device.
1876 * Queues entering in the indirection table.
1878 * Number of queues in the array.
1881 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
1883 static struct mlx5_ind_table_obj *
1884 mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues,
1885 uint32_t queues_n, enum mlx5_ind_tbl_type type)
1887 struct mlx5_priv *priv = dev->data->dev_private;
1888 struct mlx5_ind_table_obj *ind_tbl;
1889 unsigned int i = 0, j = 0, k = 0;
1891 ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl) +
1892 queues_n * sizeof(uint16_t), 0);
1897 ind_tbl->type = type;
1898 if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV) {
1899 const unsigned int wq_n = rte_is_power_of_2(queues_n) ?
1900 log2above(queues_n) :
1901 log2above(priv->config.ind_table_max_size);
1902 struct ibv_wq *wq[1 << wq_n];
1904 for (i = 0; i != queues_n; ++i) {
1905 struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev,
1909 wq[i] = rxq->obj->wq;
1910 ind_tbl->queues[i] = queues[i];
1912 ind_tbl->queues_n = queues_n;
1913 /* Finalise indirection table. */
1914 k = i; /* Retain value of i for use in error case. */
1915 for (j = 0; k != (unsigned int)(1 << wq_n); ++k, ++j)
1917 ind_tbl->ind_table = mlx5_glue->create_rwq_ind_table
1919 &(struct ibv_rwq_ind_table_init_attr){
1920 .log_ind_tbl_size = wq_n,
1924 if (!ind_tbl->ind_table) {
1928 } else { /* ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX */
1929 struct mlx5_devx_rqt_attr *rqt_attr = NULL;
1931 rqt_attr = rte_calloc(__func__, 1, sizeof(*rqt_attr) +
1932 queues_n * sizeof(uint16_t), 0);
1934 DRV_LOG(ERR, "port %u cannot allocate RQT resources",
1935 dev->data->port_id);
1939 rqt_attr->rqt_max_size = priv->config.ind_table_max_size;
1940 rqt_attr->rqt_actual_size = queues_n;
1941 for (i = 0; i != queues_n; ++i) {
1942 struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev,
1946 rqt_attr->rq_list[i] = rxq->obj->rq->id;
1947 ind_tbl->queues[i] = queues[i];
1949 ind_tbl->rqt = mlx5_devx_cmd_create_rqt(priv->sh->ctx,
1952 if (!ind_tbl->rqt) {
1953 DRV_LOG(ERR, "port %u cannot create DevX RQT",
1954 dev->data->port_id);
1958 ind_tbl->queues_n = queues_n;
1960 rte_atomic32_inc(&ind_tbl->refcnt);
1961 LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
1964 for (j = 0; j < i; j++)
1965 mlx5_rxq_release(dev, ind_tbl->queues[j]);
1967 DEBUG("port %u cannot create indirection table", dev->data->port_id);
1972 * Get an indirection table.
1975 * Pointer to Ethernet device.
1977 * Queues entering in the indirection table.
1979 * Number of queues in the array.
1982 * An indirection table if found.
1984 static struct mlx5_ind_table_obj *
1985 mlx5_ind_table_obj_get(struct rte_eth_dev *dev, const uint16_t *queues,
1988 struct mlx5_priv *priv = dev->data->dev_private;
1989 struct mlx5_ind_table_obj *ind_tbl;
1991 LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
1992 if ((ind_tbl->queues_n == queues_n) &&
1993 (memcmp(ind_tbl->queues, queues,
1994 ind_tbl->queues_n * sizeof(ind_tbl->queues[0]))
2001 rte_atomic32_inc(&ind_tbl->refcnt);
2002 for (i = 0; i != ind_tbl->queues_n; ++i)
2003 mlx5_rxq_get(dev, ind_tbl->queues[i]);
2009 * Release an indirection table.
2012 * Pointer to Ethernet device.
2014 * Indirection table to release.
2017 * 1 while a reference on it exists, 0 when freed.
2020 mlx5_ind_table_obj_release(struct rte_eth_dev *dev,
2021 struct mlx5_ind_table_obj *ind_tbl)
2025 if (rte_atomic32_dec_and_test(&ind_tbl->refcnt)) {
2026 if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV)
2027 claim_zero(mlx5_glue->destroy_rwq_ind_table
2028 (ind_tbl->ind_table));
2029 else if (ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX)
2030 claim_zero(mlx5_devx_cmd_destroy(ind_tbl->rqt));
2032 for (i = 0; i != ind_tbl->queues_n; ++i)
2033 claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i]));
2034 if (!rte_atomic32_read(&ind_tbl->refcnt)) {
2035 LIST_REMOVE(ind_tbl, next);
2043 * Verify the Rx Queue list is empty
2046 * Pointer to Ethernet device.
2049 * The number of object not released.
2052 mlx5_ind_table_obj_verify(struct rte_eth_dev *dev)
2054 struct mlx5_priv *priv = dev->data->dev_private;
2055 struct mlx5_ind_table_obj *ind_tbl;
2058 LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
2060 "port %u indirection table obj %p still referenced",
2061 dev->data->port_id, (void *)ind_tbl);
2068 * Create an Rx Hash queue.
2071 * Pointer to Ethernet device.
2073 * RSS key for the Rx hash queue.
2074 * @param rss_key_len
2076 * @param hash_fields
2077 * Verbs protocol hash field to make the RSS on.
2079 * Queues entering in hash queue. In case of empty hash_fields only the
2080 * first queue index will be taken for the indirection table.
2086 * Flow rule is relevant for LRO, i.e. contains IPv4/IPv6 and TCP.
2089 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
2092 mlx5_hrxq_new(struct rte_eth_dev *dev,
2093 const uint8_t *rss_key, uint32_t rss_key_len,
2094 uint64_t hash_fields,
2095 const uint16_t *queues, uint32_t queues_n,
2096 int tunnel __rte_unused, int lro)
2098 struct mlx5_priv *priv = dev->data->dev_private;
2099 struct mlx5_hrxq *hrxq;
2100 struct ibv_qp *qp = NULL;
2101 struct mlx5_ind_table_obj *ind_tbl;
2103 struct mlx5_devx_obj *tir = NULL;
2105 queues_n = hash_fields ? queues_n : 1;
2106 ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
2108 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[queues[0]];
2109 struct mlx5_rxq_ctrl *rxq_ctrl =
2110 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
2111 enum mlx5_ind_tbl_type type;
2113 type = rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_IBV ?
2114 MLX5_IND_TBL_TYPE_IBV : MLX5_IND_TBL_TYPE_DEVX;
2115 ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n, type);
2121 if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV) {
2122 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
2123 struct mlx5dv_qp_init_attr qp_init_attr;
2125 memset(&qp_init_attr, 0, sizeof(qp_init_attr));
2127 qp_init_attr.comp_mask =
2128 MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;
2129 qp_init_attr.create_flags =
2130 MLX5DV_QP_CREATE_TUNNEL_OFFLOADS;
2132 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2133 if (dev->data->dev_conf.lpbk_mode) {
2135 * Allow packet sent from NIC loop back
2136 * w/o source MAC check.
2138 qp_init_attr.comp_mask |=
2139 MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;
2140 qp_init_attr.create_flags |=
2141 MLX5DV_QP_CREATE_TIR_ALLOW_SELF_LOOPBACK_UC;
2144 qp = mlx5_glue->dv_create_qp
2146 &(struct ibv_qp_init_attr_ex){
2147 .qp_type = IBV_QPT_RAW_PACKET,
2149 IBV_QP_INIT_ATTR_PD |
2150 IBV_QP_INIT_ATTR_IND_TABLE |
2151 IBV_QP_INIT_ATTR_RX_HASH,
2152 .rx_hash_conf = (struct ibv_rx_hash_conf){
2154 IBV_RX_HASH_FUNC_TOEPLITZ,
2155 .rx_hash_key_len = rss_key_len,
2157 (void *)(uintptr_t)rss_key,
2158 .rx_hash_fields_mask = hash_fields,
2160 .rwq_ind_tbl = ind_tbl->ind_table,
2165 qp = mlx5_glue->create_qp_ex
2167 &(struct ibv_qp_init_attr_ex){
2168 .qp_type = IBV_QPT_RAW_PACKET,
2170 IBV_QP_INIT_ATTR_PD |
2171 IBV_QP_INIT_ATTR_IND_TABLE |
2172 IBV_QP_INIT_ATTR_RX_HASH,
2173 .rx_hash_conf = (struct ibv_rx_hash_conf){
2175 IBV_RX_HASH_FUNC_TOEPLITZ,
2176 .rx_hash_key_len = rss_key_len,
2178 (void *)(uintptr_t)rss_key,
2179 .rx_hash_fields_mask = hash_fields,
2181 .rwq_ind_tbl = ind_tbl->ind_table,
2189 } else { /* ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX */
2190 struct mlx5_devx_tir_attr tir_attr;
2192 memset(&tir_attr, 0, sizeof(tir_attr));
2193 tir_attr.disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT;
2194 tir_attr.rx_hash_fn = MLX5_RX_HASH_FN_TOEPLITZ;
2195 memcpy(&tir_attr.rx_hash_field_selector_outer, &hash_fields,
2197 tir_attr.transport_domain = priv->sh->tdn;
2198 memcpy(tir_attr.rx_hash_toeplitz_key, rss_key, rss_key_len);
2199 tir_attr.indirect_table = ind_tbl->rqt->id;
2200 if (dev->data->dev_conf.lpbk_mode)
2201 tir_attr.self_lb_block =
2202 MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
2204 tir_attr.lro_timeout_period_usecs =
2205 priv->config.lro.timeout;
2206 tir_attr.lro_max_msg_sz = priv->max_lro_msg_size;
2207 tir_attr.lro_enable_mask = lro;
2209 tir = mlx5_devx_cmd_create_tir(priv->sh->ctx, &tir_attr);
2211 DRV_LOG(ERR, "port %u cannot create DevX TIR",
2212 dev->data->port_id);
2217 hrxq = rte_calloc(__func__, 1, sizeof(*hrxq) + rss_key_len, 0);
2220 hrxq->ind_table = ind_tbl;
2221 if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV) {
2223 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2225 mlx5_glue->dv_create_flow_action_dest_ibv_qp(hrxq->qp);
2226 if (!hrxq->action) {
2231 } else { /* ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX */
2233 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2234 hrxq->action = mlx5_glue->dv_create_flow_action_dest_devx_tir
2236 if (!hrxq->action) {
2242 hrxq->rss_key_len = rss_key_len;
2243 hrxq->hash_fields = hash_fields;
2244 memcpy(hrxq->rss_key, rss_key, rss_key_len);
2245 rte_atomic32_inc(&hrxq->refcnt);
2246 LIST_INSERT_HEAD(&priv->hrxqs, hrxq, next);
2249 err = rte_errno; /* Save rte_errno before cleanup. */
2250 mlx5_ind_table_obj_release(dev, ind_tbl);
2252 claim_zero(mlx5_glue->destroy_qp(qp));
2254 claim_zero(mlx5_devx_cmd_destroy(tir));
2255 rte_errno = err; /* Restore rte_errno. */
2260 * Get an Rx Hash queue.
2263 * Pointer to Ethernet device.
2265 * RSS configuration for the Rx hash queue.
2267 * Queues entering in hash queue. In case of empty hash_fields only the
2268 * first queue index will be taken for the indirection table.
2273 * An hash Rx queue on success.
2276 mlx5_hrxq_get(struct rte_eth_dev *dev,
2277 const uint8_t *rss_key, uint32_t rss_key_len,
2278 uint64_t hash_fields,
2279 const uint16_t *queues, uint32_t queues_n)
2281 struct mlx5_priv *priv = dev->data->dev_private;
2282 struct mlx5_hrxq *hrxq;
2284 queues_n = hash_fields ? queues_n : 1;
2285 LIST_FOREACH(hrxq, &priv->hrxqs, next) {
2286 struct mlx5_ind_table_obj *ind_tbl;
2288 if (hrxq->rss_key_len != rss_key_len)
2290 if (memcmp(hrxq->rss_key, rss_key, rss_key_len))
2292 if (hrxq->hash_fields != hash_fields)
2294 ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
2297 if (ind_tbl != hrxq->ind_table) {
2298 mlx5_ind_table_obj_release(dev, ind_tbl);
2301 rte_atomic32_inc(&hrxq->refcnt);
2308 * Release the hash Rx queue.
2311 * Pointer to Ethernet device.
2313 * Pointer to Hash Rx queue to release.
2316 * 1 while a reference on it exists, 0 when freed.
2319 mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
2321 if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
2322 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2323 mlx5_glue->destroy_flow_action(hrxq->action);
2325 if (hrxq->ind_table->type == MLX5_IND_TBL_TYPE_IBV)
2326 claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
2327 else /* hrxq->ind_table->type == MLX5_IND_TBL_TYPE_DEVX */
2328 claim_zero(mlx5_devx_cmd_destroy(hrxq->tir));
2329 mlx5_ind_table_obj_release(dev, hrxq->ind_table);
2330 LIST_REMOVE(hrxq, next);
2334 claim_nonzero(mlx5_ind_table_obj_release(dev, hrxq->ind_table));
2339 * Verify the Rx Queue list is empty
2342 * Pointer to Ethernet device.
2345 * The number of object not released.
2348 mlx5_hrxq_verify(struct rte_eth_dev *dev)
2350 struct mlx5_priv *priv = dev->data->dev_private;
2351 struct mlx5_hrxq *hrxq;
2354 LIST_FOREACH(hrxq, &priv->hrxqs, next) {
2356 "port %u hash Rx queue %p still referenced",
2357 dev->data->port_id, (void *)hrxq);
2364 * Create a drop Rx queue Verbs/DevX object.
2367 * Pointer to Ethernet device.
2370 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
2372 static struct mlx5_rxq_obj *
2373 mlx5_rxq_obj_drop_new(struct rte_eth_dev *dev)
2375 struct mlx5_priv *priv = dev->data->dev_private;
2376 struct ibv_context *ctx = priv->sh->ctx;
2378 struct ibv_wq *wq = NULL;
2379 struct mlx5_rxq_obj *rxq;
2381 if (priv->drop_queue.rxq)
2382 return priv->drop_queue.rxq;
2383 cq = mlx5_glue->create_cq(ctx, 1, NULL, NULL, 0);
2385 DEBUG("port %u cannot allocate CQ for drop queue",
2386 dev->data->port_id);
2390 wq = mlx5_glue->create_wq(ctx,
2391 &(struct ibv_wq_init_attr){
2392 .wq_type = IBV_WQT_RQ,
2399 DEBUG("port %u cannot allocate WQ for drop queue",
2400 dev->data->port_id);
2404 rxq = rte_calloc(__func__, 1, sizeof(*rxq), 0);
2406 DEBUG("port %u cannot allocate drop Rx queue memory",
2407 dev->data->port_id);
2413 priv->drop_queue.rxq = rxq;
2417 claim_zero(mlx5_glue->destroy_wq(wq));
2419 claim_zero(mlx5_glue->destroy_cq(cq));
2424 * Release a drop Rx queue Verbs/DevX object.
2427 * Pointer to Ethernet device.
2430 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
2433 mlx5_rxq_obj_drop_release(struct rte_eth_dev *dev)
2435 struct mlx5_priv *priv = dev->data->dev_private;
2436 struct mlx5_rxq_obj *rxq = priv->drop_queue.rxq;
2439 claim_zero(mlx5_glue->destroy_wq(rxq->wq));
2441 claim_zero(mlx5_glue->destroy_cq(rxq->cq));
2443 priv->drop_queue.rxq = NULL;
2447 * Create a drop indirection table.
2450 * Pointer to Ethernet device.
2453 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
2455 static struct mlx5_ind_table_obj *
2456 mlx5_ind_table_obj_drop_new(struct rte_eth_dev *dev)
2458 struct mlx5_priv *priv = dev->data->dev_private;
2459 struct mlx5_ind_table_obj *ind_tbl;
2460 struct mlx5_rxq_obj *rxq;
2461 struct mlx5_ind_table_obj tmpl;
2463 rxq = mlx5_rxq_obj_drop_new(dev);
2466 tmpl.ind_table = mlx5_glue->create_rwq_ind_table
2468 &(struct ibv_rwq_ind_table_init_attr){
2469 .log_ind_tbl_size = 0,
2470 .ind_tbl = &rxq->wq,
2473 if (!tmpl.ind_table) {
2474 DEBUG("port %u cannot allocate indirection table for drop"
2476 dev->data->port_id);
2480 ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl), 0);
2485 ind_tbl->ind_table = tmpl.ind_table;
2488 mlx5_rxq_obj_drop_release(dev);
2493 * Release a drop indirection table.
2496 * Pointer to Ethernet device.
2499 mlx5_ind_table_obj_drop_release(struct rte_eth_dev *dev)
2501 struct mlx5_priv *priv = dev->data->dev_private;
2502 struct mlx5_ind_table_obj *ind_tbl = priv->drop_queue.hrxq->ind_table;
2504 claim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl->ind_table));
2505 mlx5_rxq_obj_drop_release(dev);
2507 priv->drop_queue.hrxq->ind_table = NULL;
2511 * Create a drop Rx Hash queue.
2514 * Pointer to Ethernet device.
2517 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
2520 mlx5_hrxq_drop_new(struct rte_eth_dev *dev)
2522 struct mlx5_priv *priv = dev->data->dev_private;
2523 struct mlx5_ind_table_obj *ind_tbl;
2525 struct mlx5_hrxq *hrxq;
2527 if (priv->drop_queue.hrxq) {
2528 rte_atomic32_inc(&priv->drop_queue.hrxq->refcnt);
2529 return priv->drop_queue.hrxq;
2531 ind_tbl = mlx5_ind_table_obj_drop_new(dev);
2534 qp = mlx5_glue->create_qp_ex(priv->sh->ctx,
2535 &(struct ibv_qp_init_attr_ex){
2536 .qp_type = IBV_QPT_RAW_PACKET,
2538 IBV_QP_INIT_ATTR_PD |
2539 IBV_QP_INIT_ATTR_IND_TABLE |
2540 IBV_QP_INIT_ATTR_RX_HASH,
2541 .rx_hash_conf = (struct ibv_rx_hash_conf){
2543 IBV_RX_HASH_FUNC_TOEPLITZ,
2544 .rx_hash_key_len = MLX5_RSS_HASH_KEY_LEN,
2545 .rx_hash_key = rss_hash_default_key,
2546 .rx_hash_fields_mask = 0,
2548 .rwq_ind_tbl = ind_tbl->ind_table,
2552 DEBUG("port %u cannot allocate QP for drop queue",
2553 dev->data->port_id);
2557 hrxq = rte_calloc(__func__, 1, sizeof(*hrxq), 0);
2560 "port %u cannot allocate memory for drop queue",
2561 dev->data->port_id);
2565 hrxq->ind_table = ind_tbl;
2567 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2568 hrxq->action = mlx5_glue->dv_create_flow_action_dest_ibv_qp(hrxq->qp);
2569 if (!hrxq->action) {
2574 priv->drop_queue.hrxq = hrxq;
2575 rte_atomic32_set(&hrxq->refcnt, 1);
2579 mlx5_ind_table_obj_drop_release(dev);
2584 * Release a drop hash Rx queue.
2587 * Pointer to Ethernet device.
2590 mlx5_hrxq_drop_release(struct rte_eth_dev *dev)
2592 struct mlx5_priv *priv = dev->data->dev_private;
2593 struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;
2595 if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
2596 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2597 mlx5_glue->destroy_flow_action(hrxq->action);
2599 claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
2600 mlx5_ind_table_obj_drop_release(dev);
2602 priv->drop_queue.hrxq = NULL;