1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
12 #include <sys/queue.h>
15 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
17 #pragma GCC diagnostic ignored "-Wpedantic"
19 #include <infiniband/verbs.h>
20 #include <infiniband/mlx5dv.h>
22 #pragma GCC diagnostic error "-Wpedantic"
26 #include <rte_malloc.h>
27 #include <rte_ethdev_driver.h>
28 #include <rte_common.h>
29 #include <rte_interrupts.h>
30 #include <rte_debug.h>
34 #include "mlx5_rxtx.h"
35 #include "mlx5_utils.h"
36 #include "mlx5_autoconf.h"
37 #include "mlx5_defs.h"
38 #include "mlx5_glue.h"
40 /* Default RSS hash key also used for ConnectX-3. */
41 uint8_t rss_hash_default_key[] = {
42 0x2c, 0xc6, 0x81, 0xd1,
43 0x5b, 0xdb, 0xf4, 0xf7,
44 0xfc, 0xa2, 0x83, 0x19,
45 0xdb, 0x1a, 0x3e, 0x94,
46 0x6b, 0x9e, 0x38, 0xd9,
47 0x2c, 0x9c, 0x03, 0xd1,
48 0xad, 0x99, 0x44, 0xa7,
49 0xd9, 0x56, 0x3d, 0x59,
50 0x06, 0x3c, 0x25, 0xf3,
51 0xfc, 0x1f, 0xdc, 0x2a,
54 /* Length of the default RSS hash key. */
55 static_assert(MLX5_RSS_HASH_KEY_LEN ==
56 (unsigned int)sizeof(rss_hash_default_key),
57 "wrong RSS default key size.");
60 * Check whether Multi-Packet RQ can be enabled for the device.
63 * Pointer to Ethernet device.
66 * 1 if supported, negative errno value if not.
69 mlx5_check_mprq_support(struct rte_eth_dev *dev)
71 struct mlx5_priv *priv = dev->data->dev_private;
73 if (priv->config.mprq.enabled &&
74 priv->rxqs_n >= priv->config.mprq.min_rxqs_num)
80 * Check whether Multi-Packet RQ is enabled for the Rx queue.
83 * Pointer to receive queue structure.
86 * 0 if disabled, otherwise enabled.
89 mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq)
91 return rxq->strd_num_n > 0;
95 * Check whether Multi-Packet RQ is enabled for the device.
98 * Pointer to Ethernet device.
101 * 0 if disabled, otherwise enabled.
104 mlx5_mprq_enabled(struct rte_eth_dev *dev)
106 struct mlx5_priv *priv = dev->data->dev_private;
111 if (mlx5_check_mprq_support(dev) < 0)
113 /* All the configured queues should be enabled. */
114 for (i = 0; i < priv->rxqs_n; ++i) {
115 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
116 struct mlx5_rxq_ctrl *rxq_ctrl = container_of
117 (rxq, struct mlx5_rxq_ctrl, rxq);
119 if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
122 if (mlx5_rxq_mprq_enabled(rxq))
125 /* Multi-Packet RQ can't be partially configured. */
126 assert(n == 0 || n == n_ibv);
131 * Allocate RX queue elements for Multi-Packet RQ.
134 * Pointer to RX queue structure.
137 * 0 on success, a negative errno value otherwise and rte_errno is set.
140 rxq_alloc_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
142 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
143 unsigned int wqe_n = 1 << rxq->elts_n;
147 /* Iterate on segments. */
148 for (i = 0; i <= wqe_n; ++i) {
149 struct mlx5_mprq_buf *buf;
151 if (rte_mempool_get(rxq->mprq_mp, (void **)&buf) < 0) {
152 DRV_LOG(ERR, "port %u empty mbuf pool", rxq->port_id);
157 (*rxq->mprq_bufs)[i] = buf;
159 rxq->mprq_repl = buf;
162 "port %u Rx queue %u allocated and configured %u segments",
163 rxq->port_id, rxq->idx, wqe_n);
166 err = rte_errno; /* Save rte_errno before cleanup. */
168 for (i = 0; (i != wqe_n); ++i) {
169 if ((*rxq->mprq_bufs)[i] != NULL)
170 rte_mempool_put(rxq->mprq_mp,
171 (*rxq->mprq_bufs)[i]);
172 (*rxq->mprq_bufs)[i] = NULL;
174 DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything",
175 rxq->port_id, rxq->idx);
176 rte_errno = err; /* Restore rte_errno. */
181 * Allocate RX queue elements for Single-Packet RQ.
184 * Pointer to RX queue structure.
187 * 0 on success, errno value on failure.
190 rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
192 const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n;
193 unsigned int elts_n = 1 << rxq_ctrl->rxq.elts_n;
197 /* Iterate on segments. */
198 for (i = 0; (i != elts_n); ++i) {
199 struct rte_mbuf *buf;
201 buf = rte_pktmbuf_alloc(rxq_ctrl->rxq.mp);
203 DRV_LOG(ERR, "port %u empty mbuf pool",
204 PORT_ID(rxq_ctrl->priv));
208 /* Headroom is reserved by rte_pktmbuf_alloc(). */
209 assert(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM);
210 /* Buffer is supposed to be empty. */
211 assert(rte_pktmbuf_data_len(buf) == 0);
212 assert(rte_pktmbuf_pkt_len(buf) == 0);
214 /* Only the first segment keeps headroom. */
216 SET_DATA_OFF(buf, 0);
217 PORT(buf) = rxq_ctrl->rxq.port_id;
218 DATA_LEN(buf) = rte_pktmbuf_tailroom(buf);
219 PKT_LEN(buf) = DATA_LEN(buf);
221 (*rxq_ctrl->rxq.elts)[i] = buf;
223 /* If Rx vector is activated. */
224 if (mlx5_rxq_check_vec_support(&rxq_ctrl->rxq) > 0) {
225 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
226 struct rte_mbuf *mbuf_init = &rxq->fake_mbuf;
229 /* Initialize default rearm_data for vPMD. */
230 mbuf_init->data_off = RTE_PKTMBUF_HEADROOM;
231 rte_mbuf_refcnt_set(mbuf_init, 1);
232 mbuf_init->nb_segs = 1;
233 mbuf_init->port = rxq->port_id;
235 * prevent compiler reordering:
236 * rearm_data covers previous fields.
238 rte_compiler_barrier();
239 rxq->mbuf_initializer =
240 *(uint64_t *)&mbuf_init->rearm_data;
241 /* Padding with a fake mbuf for vectorized Rx. */
242 for (j = 0; j < MLX5_VPMD_DESCS_PER_LOOP; ++j)
243 (*rxq->elts)[elts_n + j] = &rxq->fake_mbuf;
246 "port %u Rx queue %u allocated and configured %u segments"
248 PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx, elts_n,
249 elts_n / (1 << rxq_ctrl->rxq.sges_n));
252 err = rte_errno; /* Save rte_errno before cleanup. */
254 for (i = 0; (i != elts_n); ++i) {
255 if ((*rxq_ctrl->rxq.elts)[i] != NULL)
256 rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]);
257 (*rxq_ctrl->rxq.elts)[i] = NULL;
259 DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything",
260 PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx);
261 rte_errno = err; /* Restore rte_errno. */
266 * Allocate RX queue elements.
269 * Pointer to RX queue structure.
272 * 0 on success, errno value on failure.
275 rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
277 return mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
278 rxq_alloc_elts_mprq(rxq_ctrl) : rxq_alloc_elts_sprq(rxq_ctrl);
282 * Free RX queue elements for Multi-Packet RQ.
285 * Pointer to RX queue structure.
288 rxq_free_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
290 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
293 DRV_LOG(DEBUG, "port %u Multi-Packet Rx queue %u freeing WRs",
294 rxq->port_id, rxq->idx);
295 if (rxq->mprq_bufs == NULL)
297 assert(mlx5_rxq_check_vec_support(rxq) < 0);
298 for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
299 if ((*rxq->mprq_bufs)[i] != NULL)
300 mlx5_mprq_buf_free((*rxq->mprq_bufs)[i]);
301 (*rxq->mprq_bufs)[i] = NULL;
303 if (rxq->mprq_repl != NULL) {
304 mlx5_mprq_buf_free(rxq->mprq_repl);
305 rxq->mprq_repl = NULL;
310 * Free RX queue elements for Single-Packet RQ.
313 * Pointer to RX queue structure.
316 rxq_free_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
318 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
319 const uint16_t q_n = (1 << rxq->elts_n);
320 const uint16_t q_mask = q_n - 1;
321 uint16_t used = q_n - (rxq->rq_ci - rxq->rq_pi);
324 DRV_LOG(DEBUG, "port %u Rx queue %u freeing WRs",
325 PORT_ID(rxq_ctrl->priv), rxq->idx);
326 if (rxq->elts == NULL)
329 * Some mbuf in the Ring belongs to the application. They cannot be
332 if (mlx5_rxq_check_vec_support(rxq) > 0) {
333 for (i = 0; i < used; ++i)
334 (*rxq->elts)[(rxq->rq_ci + i) & q_mask] = NULL;
335 rxq->rq_pi = rxq->rq_ci;
337 for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
338 if ((*rxq->elts)[i] != NULL)
339 rte_pktmbuf_free_seg((*rxq->elts)[i]);
340 (*rxq->elts)[i] = NULL;
345 * Free RX queue elements.
348 * Pointer to RX queue structure.
351 rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
353 if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq))
354 rxq_free_elts_mprq(rxq_ctrl);
356 rxq_free_elts_sprq(rxq_ctrl);
360 * Returns the per-queue supported offloads.
363 * Pointer to Ethernet device.
366 * Supported Rx offloads.
369 mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev)
371 struct mlx5_priv *priv = dev->data->dev_private;
372 struct mlx5_dev_config *config = &priv->config;
373 uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER |
374 DEV_RX_OFFLOAD_TIMESTAMP |
375 DEV_RX_OFFLOAD_JUMBO_FRAME);
377 if (config->hw_fcs_strip)
378 offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
381 offloads |= (DEV_RX_OFFLOAD_IPV4_CKSUM |
382 DEV_RX_OFFLOAD_UDP_CKSUM |
383 DEV_RX_OFFLOAD_TCP_CKSUM);
384 if (config->hw_vlan_strip)
385 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
386 if (MLX5_LRO_SUPPORTED(dev))
387 offloads |= DEV_RX_OFFLOAD_TCP_LRO;
393 * Returns the per-port supported offloads.
396 * Supported Rx offloads.
399 mlx5_get_rx_port_offloads(void)
401 uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
407 * Verify if the queue can be released.
410 * Pointer to Ethernet device.
415 * 1 if the queue can be released
416 * 0 if the queue can not be released, there are references to it.
417 * Negative errno and rte_errno is set if queue doesn't exist.
420 mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx)
422 struct mlx5_priv *priv = dev->data->dev_private;
423 struct mlx5_rxq_ctrl *rxq_ctrl;
425 if (!(*priv->rxqs)[idx]) {
429 rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
430 return (rte_atomic32_read(&rxq_ctrl->refcnt) == 1);
434 * Rx queue presetup checks.
437 * Pointer to Ethernet device structure.
441 * Number of descriptors to configure in queue.
444 * 0 on success, a negative errno value otherwise and rte_errno is set.
447 mlx5_rx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc)
449 struct mlx5_priv *priv = dev->data->dev_private;
451 if (!rte_is_power_of_2(desc)) {
452 desc = 1 << log2above(desc);
454 "port %u increased number of descriptors in Rx queue %u"
455 " to the next power of two (%d)",
456 dev->data->port_id, idx, desc);
458 DRV_LOG(DEBUG, "port %u configuring Rx queue %u for %u descriptors",
459 dev->data->port_id, idx, desc);
460 if (idx >= priv->rxqs_n) {
461 DRV_LOG(ERR, "port %u Rx queue index out of range (%u >= %u)",
462 dev->data->port_id, idx, priv->rxqs_n);
463 rte_errno = EOVERFLOW;
466 if (!mlx5_rxq_releasable(dev, idx)) {
467 DRV_LOG(ERR, "port %u unable to release queue index %u",
468 dev->data->port_id, idx);
472 mlx5_rxq_release(dev, idx);
479 * Pointer to Ethernet device structure.
483 * Number of descriptors to configure in queue.
485 * NUMA socket on which memory must be allocated.
487 * Thresholds parameters.
489 * Memory pool for buffer allocations.
492 * 0 on success, a negative errno value otherwise and rte_errno is set.
495 mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
496 unsigned int socket, const struct rte_eth_rxconf *conf,
497 struct rte_mempool *mp)
499 struct mlx5_priv *priv = dev->data->dev_private;
500 struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
501 struct mlx5_rxq_ctrl *rxq_ctrl =
502 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
505 res = mlx5_rx_queue_pre_setup(dev, idx, desc);
508 rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, mp);
510 DRV_LOG(ERR, "port %u unable to allocate queue index %u",
511 dev->data->port_id, idx);
515 DRV_LOG(DEBUG, "port %u adding Rx queue %u to list",
516 dev->data->port_id, idx);
517 (*priv->rxqs)[idx] = &rxq_ctrl->rxq;
524 * Pointer to Ethernet device structure.
528 * Number of descriptors to configure in queue.
529 * @param hairpin_conf
530 * Hairpin configuration parameters.
533 * 0 on success, a negative errno value otherwise and rte_errno is set.
536 mlx5_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
538 const struct rte_eth_hairpin_conf *hairpin_conf)
540 struct mlx5_priv *priv = dev->data->dev_private;
541 struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
542 struct mlx5_rxq_ctrl *rxq_ctrl =
543 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
546 res = mlx5_rx_queue_pre_setup(dev, idx, desc);
549 if (hairpin_conf->peer_count != 1 ||
550 hairpin_conf->peers[0].port != dev->data->port_id ||
551 hairpin_conf->peers[0].queue >= priv->txqs_n) {
552 DRV_LOG(ERR, "port %u unable to setup hairpin queue index %u "
553 " invalid hairpind configuration", dev->data->port_id,
558 rxq_ctrl = mlx5_rxq_hairpin_new(dev, idx, desc, hairpin_conf);
560 DRV_LOG(ERR, "port %u unable to allocate queue index %u",
561 dev->data->port_id, idx);
565 DRV_LOG(DEBUG, "port %u adding Rx queue %u to list",
566 dev->data->port_id, idx);
567 (*priv->rxqs)[idx] = &rxq_ctrl->rxq;
572 * DPDK callback to release a RX queue.
575 * Generic RX queue pointer.
578 mlx5_rx_queue_release(void *dpdk_rxq)
580 struct mlx5_rxq_data *rxq = (struct mlx5_rxq_data *)dpdk_rxq;
581 struct mlx5_rxq_ctrl *rxq_ctrl;
582 struct mlx5_priv *priv;
586 rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);
587 priv = rxq_ctrl->priv;
588 if (!mlx5_rxq_releasable(ETH_DEV(priv), rxq_ctrl->rxq.idx))
589 rte_panic("port %u Rx queue %u is still used by a flow and"
590 " cannot be removed\n",
591 PORT_ID(priv), rxq->idx);
592 mlx5_rxq_release(ETH_DEV(priv), rxq_ctrl->rxq.idx);
596 * Get an Rx queue Verbs/DevX object.
599 * Pointer to Ethernet device.
601 * Queue index in DPDK Rx queue array
604 * The Verbs/DevX object if it exists.
606 static struct mlx5_rxq_obj *
607 mlx5_rxq_obj_get(struct rte_eth_dev *dev, uint16_t idx)
609 struct mlx5_priv *priv = dev->data->dev_private;
610 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
611 struct mlx5_rxq_ctrl *rxq_ctrl;
613 if (idx >= priv->rxqs_n)
617 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
619 rte_atomic32_inc(&rxq_ctrl->obj->refcnt);
620 return rxq_ctrl->obj;
624 * Release the resources allocated for an RQ DevX object.
627 * DevX Rx queue object.
630 rxq_release_rq_resources(struct mlx5_rxq_ctrl *rxq_ctrl)
632 if (rxq_ctrl->rxq.wqes) {
633 rte_free((void *)(uintptr_t)rxq_ctrl->rxq.wqes);
634 rxq_ctrl->rxq.wqes = NULL;
636 if (rxq_ctrl->wq_umem) {
637 mlx5_glue->devx_umem_dereg(rxq_ctrl->wq_umem);
638 rxq_ctrl->wq_umem = NULL;
643 * Release an Rx hairpin related resources.
646 * Hairpin Rx queue object.
649 rxq_obj_hairpin_release(struct mlx5_rxq_obj *rxq_obj)
651 struct mlx5_devx_modify_rq_attr rq_attr = { 0 };
654 rq_attr.state = MLX5_RQC_STATE_RST;
655 rq_attr.rq_state = MLX5_RQC_STATE_RDY;
656 mlx5_devx_cmd_modify_rq(rxq_obj->rq, &rq_attr);
657 claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
661 * Release an Rx verbs/DevX queue object.
664 * Verbs/DevX Rx queue object.
667 * 1 while a reference on it exists, 0 when freed.
670 mlx5_rxq_obj_release(struct mlx5_rxq_obj *rxq_obj)
673 if (rxq_obj->type == MLX5_RXQ_OBJ_TYPE_IBV)
676 if (rte_atomic32_dec_and_test(&rxq_obj->refcnt)) {
677 switch (rxq_obj->type) {
678 case MLX5_RXQ_OBJ_TYPE_IBV:
679 rxq_free_elts(rxq_obj->rxq_ctrl);
680 claim_zero(mlx5_glue->destroy_wq(rxq_obj->wq));
681 claim_zero(mlx5_glue->destroy_cq(rxq_obj->cq));
683 case MLX5_RXQ_OBJ_TYPE_DEVX_RQ:
684 rxq_free_elts(rxq_obj->rxq_ctrl);
685 claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
686 rxq_release_rq_resources(rxq_obj->rxq_ctrl);
687 claim_zero(mlx5_glue->destroy_cq(rxq_obj->cq));
689 case MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN:
690 rxq_obj_hairpin_release(rxq_obj);
693 if (rxq_obj->channel)
694 claim_zero(mlx5_glue->destroy_comp_channel
696 LIST_REMOVE(rxq_obj, next);
704 * Allocate queue vector and fill epoll fd list for Rx interrupts.
707 * Pointer to Ethernet device.
710 * 0 on success, a negative errno value otherwise and rte_errno is set.
713 mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
715 struct mlx5_priv *priv = dev->data->dev_private;
717 unsigned int rxqs_n = priv->rxqs_n;
718 unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
719 unsigned int count = 0;
720 struct rte_intr_handle *intr_handle = dev->intr_handle;
722 if (!dev->data->dev_conf.intr_conf.rxq)
724 mlx5_rx_intr_vec_disable(dev);
725 intr_handle->intr_vec = malloc(n * sizeof(intr_handle->intr_vec[0]));
726 if (intr_handle->intr_vec == NULL) {
728 "port %u failed to allocate memory for interrupt"
729 " vector, Rx interrupts will not be supported",
734 intr_handle->type = RTE_INTR_HANDLE_EXT;
735 for (i = 0; i != n; ++i) {
736 /* This rxq obj must not be released in this function. */
737 struct mlx5_rxq_obj *rxq_obj = mlx5_rxq_obj_get(dev, i);
742 /* Skip queues that cannot request interrupts. */
743 if (!rxq_obj || !rxq_obj->channel) {
744 /* Use invalid intr_vec[] index to disable entry. */
745 intr_handle->intr_vec[i] =
746 RTE_INTR_VEC_RXTX_OFFSET +
747 RTE_MAX_RXTX_INTR_VEC_ID;
750 if (count >= RTE_MAX_RXTX_INTR_VEC_ID) {
752 "port %u too many Rx queues for interrupt"
753 " vector size (%d), Rx interrupts cannot be"
755 dev->data->port_id, RTE_MAX_RXTX_INTR_VEC_ID);
756 mlx5_rx_intr_vec_disable(dev);
760 fd = rxq_obj->channel->fd;
761 flags = fcntl(fd, F_GETFL);
762 rc = fcntl(fd, F_SETFL, flags | O_NONBLOCK);
766 "port %u failed to make Rx interrupt file"
767 " descriptor %d non-blocking for queue index"
769 dev->data->port_id, fd, i);
770 mlx5_rx_intr_vec_disable(dev);
773 intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count;
774 intr_handle->efds[count] = fd;
778 mlx5_rx_intr_vec_disable(dev);
780 intr_handle->nb_efd = count;
785 * Clean up Rx interrupts handler.
788 * Pointer to Ethernet device.
791 mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev)
793 struct mlx5_priv *priv = dev->data->dev_private;
794 struct rte_intr_handle *intr_handle = dev->intr_handle;
796 unsigned int rxqs_n = priv->rxqs_n;
797 unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
799 if (!dev->data->dev_conf.intr_conf.rxq)
801 if (!intr_handle->intr_vec)
803 for (i = 0; i != n; ++i) {
804 struct mlx5_rxq_ctrl *rxq_ctrl;
805 struct mlx5_rxq_data *rxq_data;
807 if (intr_handle->intr_vec[i] == RTE_INTR_VEC_RXTX_OFFSET +
808 RTE_MAX_RXTX_INTR_VEC_ID)
811 * Need to access directly the queue to release the reference
812 * kept in mlx5_rx_intr_vec_enable().
814 rxq_data = (*priv->rxqs)[i];
815 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
817 mlx5_rxq_obj_release(rxq_ctrl->obj);
820 rte_intr_free_epoll_fd(intr_handle);
821 if (intr_handle->intr_vec)
822 free(intr_handle->intr_vec);
823 intr_handle->nb_efd = 0;
824 intr_handle->intr_vec = NULL;
828 * MLX5 CQ notification .
831 * Pointer to receive queue structure.
833 * Sequence number per receive queue .
836 mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq)
839 uint32_t doorbell_hi;
841 void *cq_db_reg = (char *)rxq->cq_uar + MLX5_CQ_DOORBELL;
843 sq_n = sq_n_rxq & MLX5_CQ_SQN_MASK;
844 doorbell_hi = sq_n << MLX5_CQ_SQN_OFFSET | (rxq->cq_ci & MLX5_CI_MASK);
845 doorbell = (uint64_t)doorbell_hi << 32;
846 doorbell |= rxq->cqn;
847 rxq->cq_db[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi);
848 mlx5_uar_write64(rte_cpu_to_be_64(doorbell),
849 cq_db_reg, rxq->uar_lock_cq);
853 * DPDK callback for Rx queue interrupt enable.
856 * Pointer to Ethernet device structure.
861 * 0 on success, a negative errno value otherwise and rte_errno is set.
864 mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
866 struct mlx5_priv *priv = dev->data->dev_private;
867 struct mlx5_rxq_data *rxq_data;
868 struct mlx5_rxq_ctrl *rxq_ctrl;
870 rxq_data = (*priv->rxqs)[rx_queue_id];
875 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
877 struct mlx5_rxq_obj *rxq_obj;
879 rxq_obj = mlx5_rxq_obj_get(dev, rx_queue_id);
884 mlx5_arm_cq(rxq_data, rxq_data->cq_arm_sn);
885 mlx5_rxq_obj_release(rxq_obj);
891 * DPDK callback for Rx queue interrupt disable.
894 * Pointer to Ethernet device structure.
899 * 0 on success, a negative errno value otherwise and rte_errno is set.
902 mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
904 struct mlx5_priv *priv = dev->data->dev_private;
905 struct mlx5_rxq_data *rxq_data;
906 struct mlx5_rxq_ctrl *rxq_ctrl;
907 struct mlx5_rxq_obj *rxq_obj = NULL;
908 struct ibv_cq *ev_cq;
912 rxq_data = (*priv->rxqs)[rx_queue_id];
917 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
920 rxq_obj = mlx5_rxq_obj_get(dev, rx_queue_id);
925 ret = mlx5_glue->get_cq_event(rxq_obj->channel, &ev_cq, &ev_ctx);
926 if (ret || ev_cq != rxq_obj->cq) {
930 rxq_data->cq_arm_sn++;
931 mlx5_glue->ack_cq_events(rxq_obj->cq, 1);
932 mlx5_rxq_obj_release(rxq_obj);
935 ret = rte_errno; /* Save rte_errno before cleanup. */
937 mlx5_rxq_obj_release(rxq_obj);
938 DRV_LOG(WARNING, "port %u unable to disable interrupt on Rx queue %d",
939 dev->data->port_id, rx_queue_id);
940 rte_errno = ret; /* Restore rte_errno. */
945 * Create a CQ Verbs object.
948 * Pointer to Ethernet device.
950 * Pointer to device private data.
952 * Pointer to Rx queue data.
954 * Number of CQEs in CQ.
956 * Pointer to Rx queue object data.
959 * The Verbs object initialised, NULL otherwise and rte_errno is set.
961 static struct ibv_cq *
962 mlx5_ibv_cq_new(struct rte_eth_dev *dev, struct mlx5_priv *priv,
963 struct mlx5_rxq_data *rxq_data,
964 unsigned int cqe_n, struct mlx5_rxq_obj *rxq_obj)
967 struct ibv_cq_init_attr_ex ibv;
968 struct mlx5dv_cq_init_attr mlx5;
971 cq_attr.ibv = (struct ibv_cq_init_attr_ex){
973 .channel = rxq_obj->channel,
976 cq_attr.mlx5 = (struct mlx5dv_cq_init_attr){
979 if (priv->config.cqe_comp && !rxq_data->hw_timestamp &&
981 cq_attr.mlx5.comp_mask |=
982 MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
983 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
984 cq_attr.mlx5.cqe_comp_res_format =
985 mlx5_rxq_mprq_enabled(rxq_data) ?
986 MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX :
987 MLX5DV_CQE_RES_FORMAT_HASH;
989 cq_attr.mlx5.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH;
992 * For vectorized Rx, it must not be doubled in order to
993 * make cq_ci and rq_ci aligned.
995 if (mlx5_rxq_check_vec_support(rxq_data) < 0)
996 cq_attr.ibv.cqe *= 2;
997 } else if (priv->config.cqe_comp && rxq_data->hw_timestamp) {
999 "port %u Rx CQE compression is disabled for HW"
1001 dev->data->port_id);
1002 } else if (priv->config.cqe_comp && rxq_data->lro) {
1004 "port %u Rx CQE compression is disabled for LRO",
1005 dev->data->port_id);
1007 #ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD
1008 if (priv->config.cqe_pad) {
1009 cq_attr.mlx5.comp_mask |= MLX5DV_CQ_INIT_ATTR_MASK_FLAGS;
1010 cq_attr.mlx5.flags |= MLX5DV_CQ_INIT_ATTR_FLAGS_CQE_PAD;
1013 return mlx5_glue->cq_ex_to_cq(mlx5_glue->dv_create_cq(priv->sh->ctx,
1019 * Create a WQ Verbs object.
1022 * Pointer to Ethernet device.
1024 * Pointer to device private data.
1026 * Pointer to Rx queue data.
1028 * Queue index in DPDK Rx queue array
1030 * Number of WQEs in WQ.
1032 * Pointer to Rx queue object data.
1035 * The Verbs object initialised, NULL otherwise and rte_errno is set.
1037 static struct ibv_wq *
1038 mlx5_ibv_wq_new(struct rte_eth_dev *dev, struct mlx5_priv *priv,
1039 struct mlx5_rxq_data *rxq_data, uint16_t idx,
1040 unsigned int wqe_n, struct mlx5_rxq_obj *rxq_obj)
1043 struct ibv_wq_init_attr ibv;
1044 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
1045 struct mlx5dv_wq_init_attr mlx5;
1049 wq_attr.ibv = (struct ibv_wq_init_attr){
1050 .wq_context = NULL, /* Could be useful in the future. */
1051 .wq_type = IBV_WQT_RQ,
1052 /* Max number of outstanding WRs. */
1053 .max_wr = wqe_n >> rxq_data->sges_n,
1054 /* Max number of scatter/gather elements in a WR. */
1055 .max_sge = 1 << rxq_data->sges_n,
1058 .comp_mask = IBV_WQ_FLAGS_CVLAN_STRIPPING | 0,
1059 .create_flags = (rxq_data->vlan_strip ?
1060 IBV_WQ_FLAGS_CVLAN_STRIPPING : 0),
1062 /* By default, FCS (CRC) is stripped by hardware. */
1063 if (rxq_data->crc_present) {
1064 wq_attr.ibv.create_flags |= IBV_WQ_FLAGS_SCATTER_FCS;
1065 wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
1067 if (priv->config.hw_padding) {
1068 #if defined(HAVE_IBV_WQ_FLAG_RX_END_PADDING)
1069 wq_attr.ibv.create_flags |= IBV_WQ_FLAG_RX_END_PADDING;
1070 wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
1071 #elif defined(HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING)
1072 wq_attr.ibv.create_flags |= IBV_WQ_FLAGS_PCI_WRITE_END_PADDING;
1073 wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
1076 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
1077 wq_attr.mlx5 = (struct mlx5dv_wq_init_attr){
1080 if (mlx5_rxq_mprq_enabled(rxq_data)) {
1081 struct mlx5dv_striding_rq_init_attr *mprq_attr =
1082 &wq_attr.mlx5.striding_rq_attrs;
1084 wq_attr.mlx5.comp_mask |= MLX5DV_WQ_INIT_ATTR_MASK_STRIDING_RQ;
1085 *mprq_attr = (struct mlx5dv_striding_rq_init_attr){
1086 .single_stride_log_num_of_bytes = rxq_data->strd_sz_n,
1087 .single_wqe_log_num_of_strides = rxq_data->strd_num_n,
1088 .two_byte_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT,
1091 rxq_obj->wq = mlx5_glue->dv_create_wq(priv->sh->ctx, &wq_attr.ibv,
1094 rxq_obj->wq = mlx5_glue->create_wq(priv->sh->ctx, &wq_attr.ibv);
1098 * Make sure number of WRs*SGEs match expectations since a queue
1099 * cannot allocate more than "desc" buffers.
1101 if (wq_attr.ibv.max_wr != (wqe_n >> rxq_data->sges_n) ||
1102 wq_attr.ibv.max_sge != (1u << rxq_data->sges_n)) {
1104 "port %u Rx queue %u requested %u*%u but got"
1106 dev->data->port_id, idx,
1107 wqe_n >> rxq_data->sges_n,
1108 (1 << rxq_data->sges_n),
1109 wq_attr.ibv.max_wr, wq_attr.ibv.max_sge);
1110 claim_zero(mlx5_glue->destroy_wq(rxq_obj->wq));
1119 * Fill common fields of create RQ attributes structure.
1122 * Pointer to Rx queue data.
1124 * CQ number to use with this RQ.
1126 * RQ attributes structure to fill..
1129 mlx5_devx_create_rq_attr_fill(struct mlx5_rxq_data *rxq_data, uint32_t cqn,
1130 struct mlx5_devx_create_rq_attr *rq_attr)
1132 rq_attr->state = MLX5_RQC_STATE_RST;
1133 rq_attr->vsd = (rxq_data->vlan_strip) ? 0 : 1;
1135 rq_attr->scatter_fcs = (rxq_data->crc_present) ? 1 : 0;
1139 * Fill common fields of DevX WQ attributes structure.
1142 * Pointer to device private data.
1144 * Pointer to Rx queue control structure.
1146 * WQ attributes structure to fill..
1149 mlx5_devx_wq_attr_fill(struct mlx5_priv *priv, struct mlx5_rxq_ctrl *rxq_ctrl,
1150 struct mlx5_devx_wq_attr *wq_attr)
1152 wq_attr->end_padding_mode = priv->config.cqe_pad ?
1153 MLX5_WQ_END_PAD_MODE_ALIGN :
1154 MLX5_WQ_END_PAD_MODE_NONE;
1155 wq_attr->pd = priv->sh->pdn;
1156 wq_attr->dbr_addr = rxq_ctrl->dbr_offset;
1157 wq_attr->dbr_umem_id = rxq_ctrl->dbr_umem_id;
1158 wq_attr->dbr_umem_valid = 1;
1159 wq_attr->wq_umem_id = rxq_ctrl->wq_umem->umem_id;
1160 wq_attr->wq_umem_valid = 1;
1164 * Create a RQ object using DevX.
1167 * Pointer to Ethernet device.
1169 * Queue index in DPDK Rx queue array
1171 * CQ number to use with this RQ.
1174 * The DevX object initialised, NULL otherwise and rte_errno is set.
1176 static struct mlx5_devx_obj *
1177 mlx5_devx_rq_new(struct rte_eth_dev *dev, uint16_t idx, uint32_t cqn)
1179 struct mlx5_priv *priv = dev->data->dev_private;
1180 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
1181 struct mlx5_rxq_ctrl *rxq_ctrl =
1182 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
1183 struct mlx5_devx_create_rq_attr rq_attr;
1184 uint32_t wqe_n = 1 << (rxq_data->elts_n - rxq_data->sges_n);
1185 uint32_t wq_size = 0;
1186 uint32_t wqe_size = 0;
1187 uint32_t log_wqe_size = 0;
1189 struct mlx5_devx_obj *rq;
1191 memset(&rq_attr, 0, sizeof(rq_attr));
1192 /* Fill RQ attributes. */
1193 rq_attr.mem_rq_type = MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE;
1194 rq_attr.flush_in_error_en = 1;
1195 mlx5_devx_create_rq_attr_fill(rxq_data, cqn, &rq_attr);
1196 /* Fill WQ attributes for this RQ. */
1197 if (mlx5_rxq_mprq_enabled(rxq_data)) {
1198 rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ;
1200 * Number of strides in each WQE:
1201 * 512*2^single_wqe_log_num_of_strides.
1203 rq_attr.wq_attr.single_wqe_log_num_of_strides =
1204 rxq_data->strd_num_n -
1205 MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
1206 /* Stride size = (2^single_stride_log_num_of_bytes)*64B. */
1207 rq_attr.wq_attr.single_stride_log_num_of_bytes =
1208 rxq_data->strd_sz_n -
1209 MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES;
1210 wqe_size = sizeof(struct mlx5_wqe_mprq);
1212 rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
1213 wqe_size = sizeof(struct mlx5_wqe_data_seg);
1215 log_wqe_size = log2above(wqe_size) + rxq_data->sges_n;
1216 rq_attr.wq_attr.log_wq_stride = log_wqe_size;
1217 rq_attr.wq_attr.log_wq_sz = rxq_data->elts_n - rxq_data->sges_n;
1218 /* Calculate and allocate WQ memory space. */
1219 wqe_size = 1 << log_wqe_size; /* round up power of two.*/
1220 wq_size = wqe_n * wqe_size;
1221 buf = rte_calloc_socket(__func__, 1, wq_size, MLX5_WQE_BUF_ALIGNMENT,
1225 rxq_data->wqes = buf;
1226 rxq_ctrl->wq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx,
1228 if (!rxq_ctrl->wq_umem) {
1232 mlx5_devx_wq_attr_fill(priv, rxq_ctrl, &rq_attr.wq_attr);
1233 rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &rq_attr, rxq_ctrl->socket);
1235 rxq_release_rq_resources(rxq_ctrl);
1240 * Create the Rx hairpin queue object.
1243 * Pointer to Ethernet device.
1245 * Queue index in DPDK Rx queue array
1248 * The hairpin DevX object initialised, NULL otherwise and rte_errno is set.
1250 static struct mlx5_rxq_obj *
1251 mlx5_rxq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
1253 struct mlx5_priv *priv = dev->data->dev_private;
1254 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
1255 struct mlx5_rxq_ctrl *rxq_ctrl =
1256 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
1257 struct mlx5_devx_create_rq_attr attr = { 0 };
1258 struct mlx5_rxq_obj *tmpl = NULL;
1262 assert(!rxq_ctrl->obj);
1263 tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0,
1267 "port %u Rx queue %u cannot allocate verbs resources",
1268 dev->data->port_id, rxq_data->idx);
1272 tmpl->type = MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN;
1273 tmpl->rxq_ctrl = rxq_ctrl;
1275 /* Workaround for hairpin startup */
1276 attr.wq_attr.log_hairpin_num_packets = log2above(32);
1277 /* Workaround for packets larger than 1KB */
1278 attr.wq_attr.log_hairpin_data_sz =
1279 priv->config.hca_attr.log_max_hairpin_wq_data_sz;
1280 tmpl->rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &attr,
1284 "port %u Rx hairpin queue %u can't create rq object",
1285 dev->data->port_id, idx);
1289 DRV_LOG(DEBUG, "port %u rxq %u updated with %p", dev->data->port_id,
1290 idx, (void *)&tmpl);
1291 rte_atomic32_inc(&tmpl->refcnt);
1292 LIST_INSERT_HEAD(&priv->rxqsobj, tmpl, next);
1293 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
1296 ret = rte_errno; /* Save rte_errno before cleanup. */
1298 mlx5_devx_cmd_destroy(tmpl->rq);
1299 rte_errno = ret; /* Restore rte_errno. */
1304 * Create the Rx queue Verbs/DevX object.
1307 * Pointer to Ethernet device.
1309 * Queue index in DPDK Rx queue array
1311 * Type of Rx queue object to create.
1314 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
1316 struct mlx5_rxq_obj *
1317 mlx5_rxq_obj_new(struct rte_eth_dev *dev, uint16_t idx,
1318 enum mlx5_rxq_obj_type type)
1320 struct mlx5_priv *priv = dev->data->dev_private;
1321 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
1322 struct mlx5_rxq_ctrl *rxq_ctrl =
1323 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
1324 struct ibv_wq_attr mod;
1326 unsigned int wqe_n = 1 << rxq_data->elts_n;
1327 struct mlx5_rxq_obj *tmpl = NULL;
1328 struct mlx5dv_cq cq_info;
1329 struct mlx5dv_rwq rwq;
1331 struct mlx5dv_obj obj;
1334 assert(!rxq_ctrl->obj);
1335 if (type == MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN)
1336 return mlx5_rxq_obj_hairpin_new(dev, idx);
1337 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_RX_QUEUE;
1338 priv->verbs_alloc_ctx.obj = rxq_ctrl;
1339 tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0,
1343 "port %u Rx queue %u cannot allocate verbs resources",
1344 dev->data->port_id, rxq_data->idx);
1349 tmpl->rxq_ctrl = rxq_ctrl;
1350 if (rxq_ctrl->irq) {
1351 tmpl->channel = mlx5_glue->create_comp_channel(priv->sh->ctx);
1352 if (!tmpl->channel) {
1353 DRV_LOG(ERR, "port %u: comp channel creation failure",
1354 dev->data->port_id);
1359 if (mlx5_rxq_mprq_enabled(rxq_data))
1360 cqe_n = wqe_n * (1 << rxq_data->strd_num_n) - 1;
1363 tmpl->cq = mlx5_ibv_cq_new(dev, priv, rxq_data, cqe_n, tmpl);
1365 DRV_LOG(ERR, "port %u Rx queue %u CQ creation failure",
1366 dev->data->port_id, idx);
1370 obj.cq.in = tmpl->cq;
1371 obj.cq.out = &cq_info;
1372 ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ);
1377 if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
1379 "port %u wrong MLX5_CQE_SIZE environment variable"
1380 " value: it should be set to %u",
1381 dev->data->port_id, RTE_CACHE_LINE_SIZE);
1385 DRV_LOG(DEBUG, "port %u device_attr.max_qp_wr is %d",
1386 dev->data->port_id, priv->sh->device_attr.orig_attr.max_qp_wr);
1387 DRV_LOG(DEBUG, "port %u device_attr.max_sge is %d",
1388 dev->data->port_id, priv->sh->device_attr.orig_attr.max_sge);
1389 /* Allocate door-bell for types created with DevX. */
1390 if (tmpl->type != MLX5_RXQ_OBJ_TYPE_IBV) {
1391 struct mlx5_devx_dbr_page *dbr_page;
1394 dbr_offset = mlx5_get_dbr(dev, &dbr_page);
1397 rxq_ctrl->dbr_offset = dbr_offset;
1398 rxq_ctrl->dbr_umem_id = dbr_page->umem->umem_id;
1399 rxq_ctrl->dbr_umem_id_valid = 1;
1400 rxq_data->rq_db = (uint32_t *)((uintptr_t)dbr_page->dbrs +
1401 (uintptr_t)rxq_ctrl->dbr_offset);
1403 if (tmpl->type == MLX5_RXQ_OBJ_TYPE_IBV) {
1404 tmpl->wq = mlx5_ibv_wq_new(dev, priv, rxq_data, idx, wqe_n,
1407 DRV_LOG(ERR, "port %u Rx queue %u WQ creation failure",
1408 dev->data->port_id, idx);
1412 /* Change queue state to ready. */
1413 mod = (struct ibv_wq_attr){
1414 .attr_mask = IBV_WQ_ATTR_STATE,
1415 .wq_state = IBV_WQS_RDY,
1417 ret = mlx5_glue->modify_wq(tmpl->wq, &mod);
1420 "port %u Rx queue %u WQ state to IBV_WQS_RDY"
1421 " failed", dev->data->port_id, idx);
1425 obj.rwq.in = tmpl->wq;
1427 ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_RWQ);
1432 rxq_data->wqes = rwq.buf;
1433 rxq_data->rq_db = rwq.dbrec;
1434 } else if (tmpl->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ) {
1435 struct mlx5_devx_modify_rq_attr rq_attr;
1437 memset(&rq_attr, 0, sizeof(rq_attr));
1438 tmpl->rq = mlx5_devx_rq_new(dev, idx, cq_info.cqn);
1440 DRV_LOG(ERR, "port %u Rx queue %u RQ creation failure",
1441 dev->data->port_id, idx);
1445 /* Change queue state to ready. */
1446 rq_attr.rq_state = MLX5_RQC_STATE_RST;
1447 rq_attr.state = MLX5_RQC_STATE_RDY;
1448 ret = mlx5_devx_cmd_modify_rq(tmpl->rq, &rq_attr);
1452 /* Fill the rings. */
1453 rxq_data->cqe_n = log2above(cq_info.cqe_cnt);
1454 rxq_data->cq_db = cq_info.dbrec;
1455 rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)cq_info.buf;
1456 rxq_data->cq_uar = cq_info.cq_uar;
1457 rxq_data->cqn = cq_info.cqn;
1458 rxq_data->cq_arm_sn = 0;
1459 mlx5_rxq_initialize(rxq_data);
1460 rxq_data->cq_ci = 0;
1461 DRV_LOG(DEBUG, "port %u rxq %u updated with %p", dev->data->port_id,
1462 idx, (void *)&tmpl);
1463 rte_atomic32_inc(&tmpl->refcnt);
1464 LIST_INSERT_HEAD(&priv->rxqsobj, tmpl, next);
1465 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
1469 ret = rte_errno; /* Save rte_errno before cleanup. */
1470 if (tmpl->type == MLX5_RXQ_OBJ_TYPE_IBV && tmpl->wq)
1471 claim_zero(mlx5_glue->destroy_wq(tmpl->wq));
1472 else if (tmpl->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ && tmpl->rq)
1473 claim_zero(mlx5_devx_cmd_destroy(tmpl->rq));
1475 claim_zero(mlx5_glue->destroy_cq(tmpl->cq));
1477 claim_zero(mlx5_glue->destroy_comp_channel
1480 rte_errno = ret; /* Restore rte_errno. */
1482 if (type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ)
1483 rxq_release_rq_resources(rxq_ctrl);
1484 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
1489 * Verify the Rx queue objects list is empty
1492 * Pointer to Ethernet device.
1495 * The number of objects not released.
1498 mlx5_rxq_obj_verify(struct rte_eth_dev *dev)
1500 struct mlx5_priv *priv = dev->data->dev_private;
1502 struct mlx5_rxq_obj *rxq_obj;
1504 LIST_FOREACH(rxq_obj, &priv->rxqsobj, next) {
1505 DRV_LOG(DEBUG, "port %u Rx queue %u still referenced",
1506 dev->data->port_id, rxq_obj->rxq_ctrl->rxq.idx);
1513 * Callback function to initialize mbufs for Multi-Packet RQ.
1516 mlx5_mprq_buf_init(struct rte_mempool *mp, void *opaque_arg,
1517 void *_m, unsigned int i __rte_unused)
1519 struct mlx5_mprq_buf *buf = _m;
1520 struct rte_mbuf_ext_shared_info *shinfo;
1521 unsigned int strd_n = (unsigned int)(uintptr_t)opaque_arg;
1524 memset(_m, 0, sizeof(*buf));
1526 rte_atomic16_set(&buf->refcnt, 1);
1527 for (j = 0; j != strd_n; ++j) {
1528 shinfo = &buf->shinfos[j];
1529 shinfo->free_cb = mlx5_mprq_buf_free_cb;
1530 shinfo->fcb_opaque = buf;
1535 * Free mempool of Multi-Packet RQ.
1538 * Pointer to Ethernet device.
1541 * 0 on success, negative errno value on failure.
1544 mlx5_mprq_free_mp(struct rte_eth_dev *dev)
1546 struct mlx5_priv *priv = dev->data->dev_private;
1547 struct rte_mempool *mp = priv->mprq_mp;
1552 DRV_LOG(DEBUG, "port %u freeing mempool (%s) for Multi-Packet RQ",
1553 dev->data->port_id, mp->name);
1555 * If a buffer in the pool has been externally attached to a mbuf and it
1556 * is still in use by application, destroying the Rx queue can spoil
1557 * the packet. It is unlikely to happen but if application dynamically
1558 * creates and destroys with holding Rx packets, this can happen.
1560 * TODO: It is unavoidable for now because the mempool for Multi-Packet
1561 * RQ isn't provided by application but managed by PMD.
1563 if (!rte_mempool_full(mp)) {
1565 "port %u mempool for Multi-Packet RQ is still in use",
1566 dev->data->port_id);
1570 rte_mempool_free(mp);
1571 /* Unset mempool for each Rx queue. */
1572 for (i = 0; i != priv->rxqs_n; ++i) {
1573 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1577 rxq->mprq_mp = NULL;
1579 priv->mprq_mp = NULL;
1584 * Allocate a mempool for Multi-Packet RQ. All configured Rx queues share the
1585 * mempool. If already allocated, reuse it if there're enough elements.
1586 * Otherwise, resize it.
1589 * Pointer to Ethernet device.
1592 * 0 on success, negative errno value on failure.
1595 mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)
1597 struct mlx5_priv *priv = dev->data->dev_private;
1598 struct rte_mempool *mp = priv->mprq_mp;
1599 char name[RTE_MEMPOOL_NAMESIZE];
1600 unsigned int desc = 0;
1601 unsigned int buf_len;
1602 unsigned int obj_num;
1603 unsigned int obj_size;
1604 unsigned int strd_num_n = 0;
1605 unsigned int strd_sz_n = 0;
1607 unsigned int n_ibv = 0;
1609 if (!mlx5_mprq_enabled(dev))
1611 /* Count the total number of descriptors configured. */
1612 for (i = 0; i != priv->rxqs_n; ++i) {
1613 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1614 struct mlx5_rxq_ctrl *rxq_ctrl = container_of
1615 (rxq, struct mlx5_rxq_ctrl, rxq);
1617 if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
1620 desc += 1 << rxq->elts_n;
1621 /* Get the max number of strides. */
1622 if (strd_num_n < rxq->strd_num_n)
1623 strd_num_n = rxq->strd_num_n;
1624 /* Get the max size of a stride. */
1625 if (strd_sz_n < rxq->strd_sz_n)
1626 strd_sz_n = rxq->strd_sz_n;
1628 assert(strd_num_n && strd_sz_n);
1629 buf_len = (1 << strd_num_n) * (1 << strd_sz_n);
1630 obj_size = sizeof(struct mlx5_mprq_buf) + buf_len + (1 << strd_num_n) *
1631 sizeof(struct rte_mbuf_ext_shared_info) + RTE_PKTMBUF_HEADROOM;
1633 * Received packets can be either memcpy'd or externally referenced. In
1634 * case that the packet is attached to an mbuf as an external buffer, as
1635 * it isn't possible to predict how the buffers will be queued by
1636 * application, there's no option to exactly pre-allocate needed buffers
1637 * in advance but to speculatively prepares enough buffers.
1639 * In the data path, if this Mempool is depleted, PMD will try to memcpy
1640 * received packets to buffers provided by application (rxq->mp) until
1641 * this Mempool gets available again.
1644 obj_num = desc + MLX5_MPRQ_MP_CACHE_SZ * n_ibv;
1646 * rte_mempool_create_empty() has sanity check to refuse large cache
1647 * size compared to the number of elements.
1648 * CACHE_FLUSHTHRESH_MULTIPLIER is defined in a C file, so using a
1649 * constant number 2 instead.
1651 obj_num = RTE_MAX(obj_num, MLX5_MPRQ_MP_CACHE_SZ * 2);
1652 /* Check a mempool is already allocated and if it can be resued. */
1653 if (mp != NULL && mp->elt_size >= obj_size && mp->size >= obj_num) {
1654 DRV_LOG(DEBUG, "port %u mempool %s is being reused",
1655 dev->data->port_id, mp->name);
1658 } else if (mp != NULL) {
1659 DRV_LOG(DEBUG, "port %u mempool %s should be resized, freeing it",
1660 dev->data->port_id, mp->name);
1662 * If failed to free, which means it may be still in use, no way
1663 * but to keep using the existing one. On buffer underrun,
1664 * packets will be memcpy'd instead of external buffer
1667 if (mlx5_mprq_free_mp(dev)) {
1668 if (mp->elt_size >= obj_size)
1674 snprintf(name, sizeof(name), "port-%u-mprq", dev->data->port_id);
1675 mp = rte_mempool_create(name, obj_num, obj_size, MLX5_MPRQ_MP_CACHE_SZ,
1676 0, NULL, NULL, mlx5_mprq_buf_init,
1677 (void *)(uintptr_t)(1 << strd_num_n),
1678 dev->device->numa_node, 0);
1681 "port %u failed to allocate a mempool for"
1682 " Multi-Packet RQ, count=%u, size=%u",
1683 dev->data->port_id, obj_num, obj_size);
1689 /* Set mempool for each Rx queue. */
1690 for (i = 0; i != priv->rxqs_n; ++i) {
1691 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1692 struct mlx5_rxq_ctrl *rxq_ctrl = container_of
1693 (rxq, struct mlx5_rxq_ctrl, rxq);
1695 if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
1699 DRV_LOG(INFO, "port %u Multi-Packet RQ is configured",
1700 dev->data->port_id);
1704 #define MLX5_MAX_LRO_SIZE (UINT8_MAX * 256u)
1705 #define MLX5_MAX_TCP_HDR_OFFSET ((unsigned int)(sizeof(struct rte_ether_hdr) + \
1706 sizeof(struct rte_vlan_hdr) * 2 + \
1707 sizeof(struct rte_ipv6_hdr)))
1708 #define MAX_TCP_OPTION_SIZE 40u
1709 #define MLX5_MAX_LRO_HEADER_FIX ((unsigned int)(MLX5_MAX_TCP_HDR_OFFSET + \
1710 sizeof(struct rte_tcp_hdr) + \
1711 MAX_TCP_OPTION_SIZE))
1714 * Adjust the maximum LRO massage size.
1717 * Pointer to Ethernet device.
1718 * @param max_lro_size
1719 * The maximum size for LRO packet.
1722 mlx5_max_lro_msg_size_adjust(struct rte_eth_dev *dev, uint32_t max_lro_size)
1724 struct mlx5_priv *priv = dev->data->dev_private;
1726 if (priv->config.hca_attr.lro_max_msg_sz_mode ==
1727 MLX5_LRO_MAX_MSG_SIZE_START_FROM_L4 && max_lro_size >
1728 MLX5_MAX_TCP_HDR_OFFSET)
1729 max_lro_size -= MLX5_MAX_TCP_HDR_OFFSET;
1730 max_lro_size = RTE_MIN(max_lro_size, MLX5_MAX_LRO_SIZE);
1731 assert(max_lro_size >= 256u);
1732 max_lro_size /= 256u;
1733 if (priv->max_lro_msg_size)
1734 priv->max_lro_msg_size =
1735 RTE_MIN((uint32_t)priv->max_lro_msg_size, max_lro_size);
1737 priv->max_lro_msg_size = max_lro_size;
1741 * Create a DPDK Rx queue.
1744 * Pointer to Ethernet device.
1748 * Number of descriptors to configure in queue.
1750 * NUMA socket on which memory must be allocated.
1753 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
1755 struct mlx5_rxq_ctrl *
1756 mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1757 unsigned int socket, const struct rte_eth_rxconf *conf,
1758 struct rte_mempool *mp)
1760 struct mlx5_priv *priv = dev->data->dev_private;
1761 struct mlx5_rxq_ctrl *tmpl;
1762 unsigned int mb_len = rte_pktmbuf_data_room_size(mp);
1763 unsigned int mprq_stride_size;
1764 struct mlx5_dev_config *config = &priv->config;
1765 unsigned int strd_headroom_en;
1767 * Always allocate extra slots, even if eventually
1768 * the vector Rx will not be used.
1771 desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
1772 uint64_t offloads = conf->offloads |
1773 dev->data->dev_conf.rxmode.offloads;
1774 unsigned int lro_on_queue = !!(offloads & DEV_RX_OFFLOAD_TCP_LRO);
1775 const int mprq_en = mlx5_check_mprq_support(dev) > 0;
1776 unsigned int max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
1777 unsigned int non_scatter_min_mbuf_size = max_rx_pkt_len +
1778 RTE_PKTMBUF_HEADROOM;
1779 unsigned int max_lro_size = 0;
1780 unsigned int first_mb_free_size = mb_len - RTE_PKTMBUF_HEADROOM;
1782 if (non_scatter_min_mbuf_size > mb_len && !(offloads &
1783 DEV_RX_OFFLOAD_SCATTER)) {
1784 DRV_LOG(ERR, "port %u Rx queue %u: Scatter offload is not"
1785 " configured and no enough mbuf space(%u) to contain "
1786 "the maximum RX packet length(%u) with head-room(%u)",
1787 dev->data->port_id, idx, mb_len, max_rx_pkt_len,
1788 RTE_PKTMBUF_HEADROOM);
1792 tmpl = rte_calloc_socket("RXQ", 1,
1794 desc_n * sizeof(struct rte_mbuf *),
1800 tmpl->type = MLX5_RXQ_TYPE_STANDARD;
1801 if (mlx5_mr_btree_init(&tmpl->rxq.mr_ctrl.cache_bh,
1802 MLX5_MR_BTREE_CACHE_N, socket)) {
1803 /* rte_errno is already set. */
1806 tmpl->socket = socket;
1807 if (dev->data->dev_conf.intr_conf.rxq)
1810 * LRO packet may consume all the stride memory, hence we cannot
1811 * guaranty head-room near the packet memory in the stride.
1812 * In this case scatter is, for sure, enabled and an empty mbuf may be
1813 * added in the start for the head-room.
1815 if (lro_on_queue && RTE_PKTMBUF_HEADROOM > 0 &&
1816 non_scatter_min_mbuf_size > mb_len) {
1817 strd_headroom_en = 0;
1818 mprq_stride_size = RTE_MIN(max_rx_pkt_len,
1819 1u << config->mprq.max_stride_size_n);
1821 strd_headroom_en = 1;
1822 mprq_stride_size = non_scatter_min_mbuf_size;
1825 * This Rx queue can be configured as a Multi-Packet RQ if all of the
1826 * following conditions are met:
1827 * - MPRQ is enabled.
1828 * - The number of descs is more than the number of strides.
1829 * - max_rx_pkt_len plus overhead is less than the max size of a
1831 * Otherwise, enable Rx scatter if necessary.
1834 desc > (1U << config->mprq.stride_num_n) &&
1835 mprq_stride_size <= (1U << config->mprq.max_stride_size_n)) {
1836 /* TODO: Rx scatter isn't supported yet. */
1837 tmpl->rxq.sges_n = 0;
1838 /* Trim the number of descs needed. */
1839 desc >>= config->mprq.stride_num_n;
1840 tmpl->rxq.strd_num_n = config->mprq.stride_num_n;
1841 tmpl->rxq.strd_sz_n = RTE_MAX(log2above(mprq_stride_size),
1842 config->mprq.min_stride_size_n);
1843 tmpl->rxq.strd_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT;
1844 tmpl->rxq.strd_headroom_en = strd_headroom_en;
1845 tmpl->rxq.mprq_max_memcpy_len = RTE_MIN(first_mb_free_size,
1846 config->mprq.max_memcpy_len);
1847 max_lro_size = RTE_MIN(max_rx_pkt_len,
1848 (1u << tmpl->rxq.strd_num_n) *
1849 (1u << tmpl->rxq.strd_sz_n));
1851 "port %u Rx queue %u: Multi-Packet RQ is enabled"
1852 " strd_num_n = %u, strd_sz_n = %u",
1853 dev->data->port_id, idx,
1854 tmpl->rxq.strd_num_n, tmpl->rxq.strd_sz_n);
1855 } else if (max_rx_pkt_len <= first_mb_free_size) {
1856 tmpl->rxq.sges_n = 0;
1857 max_lro_size = max_rx_pkt_len;
1858 } else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
1859 unsigned int size = non_scatter_min_mbuf_size;
1860 unsigned int sges_n;
1862 if (lro_on_queue && first_mb_free_size <
1863 MLX5_MAX_LRO_HEADER_FIX) {
1864 DRV_LOG(ERR, "Not enough space in the first segment(%u)"
1865 " to include the max header size(%u) for LRO",
1866 first_mb_free_size, MLX5_MAX_LRO_HEADER_FIX);
1867 rte_errno = ENOTSUP;
1871 * Determine the number of SGEs needed for a full packet
1872 * and round it to the next power of two.
1874 sges_n = log2above((size / mb_len) + !!(size % mb_len));
1875 if (sges_n > MLX5_MAX_LOG_RQ_SEGS) {
1877 "port %u too many SGEs (%u) needed to handle"
1878 " requested maximum packet size %u, the maximum"
1879 " supported are %u", dev->data->port_id,
1880 1 << sges_n, max_rx_pkt_len,
1881 1u << MLX5_MAX_LOG_RQ_SEGS);
1882 rte_errno = ENOTSUP;
1885 tmpl->rxq.sges_n = sges_n;
1886 max_lro_size = max_rx_pkt_len;
1888 if (mprq_en && !mlx5_rxq_mprq_enabled(&tmpl->rxq))
1890 "port %u MPRQ is requested but cannot be enabled"
1891 " (requested: desc = %u, stride_sz = %u,"
1892 " supported: min_stride_num = %u, max_stride_sz = %u).",
1893 dev->data->port_id, desc, mprq_stride_size,
1894 (1 << config->mprq.stride_num_n),
1895 (1 << config->mprq.max_stride_size_n));
1896 DRV_LOG(DEBUG, "port %u maximum number of segments per packet: %u",
1897 dev->data->port_id, 1 << tmpl->rxq.sges_n);
1898 if (desc % (1 << tmpl->rxq.sges_n)) {
1900 "port %u number of Rx queue descriptors (%u) is not a"
1901 " multiple of SGEs per packet (%u)",
1904 1 << tmpl->rxq.sges_n);
1908 mlx5_max_lro_msg_size_adjust(dev, max_lro_size);
1909 /* Toggle RX checksum offload if hardware supports it. */
1910 tmpl->rxq.csum = !!(offloads & DEV_RX_OFFLOAD_CHECKSUM);
1911 tmpl->rxq.hw_timestamp = !!(offloads & DEV_RX_OFFLOAD_TIMESTAMP);
1912 /* Configure VLAN stripping. */
1913 tmpl->rxq.vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
1914 /* By default, FCS (CRC) is stripped by hardware. */
1915 tmpl->rxq.crc_present = 0;
1916 tmpl->rxq.lro = lro_on_queue;
1917 if (offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
1918 if (config->hw_fcs_strip) {
1920 * RQs used for LRO-enabled TIRs should not be
1921 * configured to scatter the FCS.
1925 "port %u CRC stripping has been "
1926 "disabled but will still be performed "
1927 "by hardware, because LRO is enabled",
1928 dev->data->port_id);
1930 tmpl->rxq.crc_present = 1;
1933 "port %u CRC stripping has been disabled but will"
1934 " still be performed by hardware, make sure MLNX_OFED"
1935 " and firmware are up to date",
1936 dev->data->port_id);
1940 "port %u CRC stripping is %s, %u bytes will be subtracted from"
1941 " incoming frames to hide it",
1943 tmpl->rxq.crc_present ? "disabled" : "enabled",
1944 tmpl->rxq.crc_present << 2);
1946 tmpl->rxq.rss_hash = !!priv->rss_conf.rss_hf &&
1947 (!!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS));
1948 tmpl->rxq.port_id = dev->data->port_id;
1951 tmpl->rxq.elts_n = log2above(desc);
1952 tmpl->rxq.rq_repl_thresh =
1953 MLX5_VPMD_RXQ_RPLNSH_THRESH(1 << tmpl->rxq.elts_n);
1955 (struct rte_mbuf *(*)[1 << tmpl->rxq.elts_n])(tmpl + 1);
1957 tmpl->rxq.uar_lock_cq = &priv->uar_lock_cq;
1959 tmpl->rxq.idx = idx;
1960 rte_atomic32_inc(&tmpl->refcnt);
1961 LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
1969 * Create a DPDK Rx hairpin queue.
1972 * Pointer to Ethernet device.
1976 * Number of descriptors to configure in queue.
1977 * @param hairpin_conf
1978 * The hairpin binding configuration.
1981 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
1983 struct mlx5_rxq_ctrl *
1984 mlx5_rxq_hairpin_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1985 const struct rte_eth_hairpin_conf *hairpin_conf)
1987 struct mlx5_priv *priv = dev->data->dev_private;
1988 struct mlx5_rxq_ctrl *tmpl;
1990 tmpl = rte_calloc_socket("RXQ", 1, sizeof(*tmpl), 0, SOCKET_ID_ANY);
1995 tmpl->type = MLX5_RXQ_TYPE_HAIRPIN;
1996 tmpl->socket = SOCKET_ID_ANY;
1997 tmpl->rxq.rss_hash = 0;
1998 tmpl->rxq.port_id = dev->data->port_id;
2000 tmpl->rxq.mp = NULL;
2001 tmpl->rxq.elts_n = log2above(desc);
2002 tmpl->rxq.elts = NULL;
2003 tmpl->rxq.mr_ctrl.cache_bh = (struct mlx5_mr_btree) { 0 };
2004 tmpl->hairpin_conf = *hairpin_conf;
2005 tmpl->rxq.idx = idx;
2006 rte_atomic32_inc(&tmpl->refcnt);
2007 LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
2015 * Pointer to Ethernet device.
2020 * A pointer to the queue if it exists, NULL otherwise.
2022 struct mlx5_rxq_ctrl *
2023 mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
2025 struct mlx5_priv *priv = dev->data->dev_private;
2026 struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
2028 if ((*priv->rxqs)[idx]) {
2029 rxq_ctrl = container_of((*priv->rxqs)[idx],
2030 struct mlx5_rxq_ctrl,
2032 mlx5_rxq_obj_get(dev, idx);
2033 rte_atomic32_inc(&rxq_ctrl->refcnt);
2039 * Release a Rx queue.
2042 * Pointer to Ethernet device.
2047 * 1 while a reference on it exists, 0 when freed.
2050 mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
2052 struct mlx5_priv *priv = dev->data->dev_private;
2053 struct mlx5_rxq_ctrl *rxq_ctrl;
2055 if (!(*priv->rxqs)[idx])
2057 rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
2058 assert(rxq_ctrl->priv);
2059 if (rxq_ctrl->obj && !mlx5_rxq_obj_release(rxq_ctrl->obj))
2060 rxq_ctrl->obj = NULL;
2061 if (rte_atomic32_dec_and_test(&rxq_ctrl->refcnt)) {
2062 if (rxq_ctrl->dbr_umem_id_valid)
2063 claim_zero(mlx5_release_dbr(dev, rxq_ctrl->dbr_umem_id,
2064 rxq_ctrl->dbr_offset));
2065 if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD)
2066 mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
2067 LIST_REMOVE(rxq_ctrl, next);
2069 (*priv->rxqs)[idx] = NULL;
2076 * Verify the Rx Queue list is empty
2079 * Pointer to Ethernet device.
2082 * The number of object not released.
2085 mlx5_rxq_verify(struct rte_eth_dev *dev)
2087 struct mlx5_priv *priv = dev->data->dev_private;
2088 struct mlx5_rxq_ctrl *rxq_ctrl;
2091 LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {
2092 DRV_LOG(DEBUG, "port %u Rx Queue %u still referenced",
2093 dev->data->port_id, rxq_ctrl->rxq.idx);
2100 * Get a Rx queue type.
2103 * Pointer to Ethernet device.
2108 * The Rx queue type.
2111 mlx5_rxq_get_type(struct rte_eth_dev *dev, uint16_t idx)
2113 struct mlx5_priv *priv = dev->data->dev_private;
2114 struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
2116 if ((*priv->rxqs)[idx]) {
2117 rxq_ctrl = container_of((*priv->rxqs)[idx],
2118 struct mlx5_rxq_ctrl,
2120 return rxq_ctrl->type;
2122 return MLX5_RXQ_TYPE_UNDEFINED;
2126 * Create an indirection table.
2129 * Pointer to Ethernet device.
2131 * Queues entering in the indirection table.
2133 * Number of queues in the array.
2136 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
2138 static struct mlx5_ind_table_obj *
2139 mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues,
2140 uint32_t queues_n, enum mlx5_ind_tbl_type type)
2142 struct mlx5_priv *priv = dev->data->dev_private;
2143 struct mlx5_ind_table_obj *ind_tbl;
2144 unsigned int i = 0, j = 0, k = 0;
2146 ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl) +
2147 queues_n * sizeof(uint16_t), 0);
2152 ind_tbl->type = type;
2153 if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV) {
2154 const unsigned int wq_n = rte_is_power_of_2(queues_n) ?
2155 log2above(queues_n) :
2156 log2above(priv->config.ind_table_max_size);
2157 struct ibv_wq *wq[1 << wq_n];
2159 for (i = 0; i != queues_n; ++i) {
2160 struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev,
2164 wq[i] = rxq->obj->wq;
2165 ind_tbl->queues[i] = queues[i];
2167 ind_tbl->queues_n = queues_n;
2168 /* Finalise indirection table. */
2169 k = i; /* Retain value of i for use in error case. */
2170 for (j = 0; k != (unsigned int)(1 << wq_n); ++k, ++j)
2172 ind_tbl->ind_table = mlx5_glue->create_rwq_ind_table
2174 &(struct ibv_rwq_ind_table_init_attr){
2175 .log_ind_tbl_size = wq_n,
2179 if (!ind_tbl->ind_table) {
2183 } else { /* ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX */
2184 struct mlx5_devx_rqt_attr *rqt_attr = NULL;
2185 const unsigned int rqt_n =
2186 1 << (rte_is_power_of_2(queues_n) ?
2187 log2above(queues_n) :
2188 log2above(priv->config.ind_table_max_size));
2190 rqt_attr = rte_calloc(__func__, 1, sizeof(*rqt_attr) +
2191 rqt_n * sizeof(uint32_t), 0);
2193 DRV_LOG(ERR, "port %u cannot allocate RQT resources",
2194 dev->data->port_id);
2198 rqt_attr->rqt_max_size = priv->config.ind_table_max_size;
2199 rqt_attr->rqt_actual_size = rqt_n;
2200 for (i = 0; i != queues_n; ++i) {
2201 struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev,
2205 rqt_attr->rq_list[i] = rxq->obj->rq->id;
2206 ind_tbl->queues[i] = queues[i];
2208 k = i; /* Retain value of i for use in error case. */
2209 for (j = 0; k != rqt_n; ++k, ++j)
2210 rqt_attr->rq_list[k] = rqt_attr->rq_list[j];
2211 ind_tbl->rqt = mlx5_devx_cmd_create_rqt(priv->sh->ctx,
2214 if (!ind_tbl->rqt) {
2215 DRV_LOG(ERR, "port %u cannot create DevX RQT",
2216 dev->data->port_id);
2220 ind_tbl->queues_n = queues_n;
2222 rte_atomic32_inc(&ind_tbl->refcnt);
2223 LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
2226 for (j = 0; j < i; j++)
2227 mlx5_rxq_release(dev, ind_tbl->queues[j]);
2229 DEBUG("port %u cannot create indirection table", dev->data->port_id);
2234 * Get an indirection table.
2237 * Pointer to Ethernet device.
2239 * Queues entering in the indirection table.
2241 * Number of queues in the array.
2244 * An indirection table if found.
2246 static struct mlx5_ind_table_obj *
2247 mlx5_ind_table_obj_get(struct rte_eth_dev *dev, const uint16_t *queues,
2250 struct mlx5_priv *priv = dev->data->dev_private;
2251 struct mlx5_ind_table_obj *ind_tbl;
2253 LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
2254 if ((ind_tbl->queues_n == queues_n) &&
2255 (memcmp(ind_tbl->queues, queues,
2256 ind_tbl->queues_n * sizeof(ind_tbl->queues[0]))
2263 rte_atomic32_inc(&ind_tbl->refcnt);
2264 for (i = 0; i != ind_tbl->queues_n; ++i)
2265 mlx5_rxq_get(dev, ind_tbl->queues[i]);
2271 * Release an indirection table.
2274 * Pointer to Ethernet device.
2276 * Indirection table to release.
2279 * 1 while a reference on it exists, 0 when freed.
2282 mlx5_ind_table_obj_release(struct rte_eth_dev *dev,
2283 struct mlx5_ind_table_obj *ind_tbl)
2287 if (rte_atomic32_dec_and_test(&ind_tbl->refcnt)) {
2288 if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV)
2289 claim_zero(mlx5_glue->destroy_rwq_ind_table
2290 (ind_tbl->ind_table));
2291 else if (ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX)
2292 claim_zero(mlx5_devx_cmd_destroy(ind_tbl->rqt));
2294 for (i = 0; i != ind_tbl->queues_n; ++i)
2295 claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i]));
2296 if (!rte_atomic32_read(&ind_tbl->refcnt)) {
2297 LIST_REMOVE(ind_tbl, next);
2305 * Verify the Rx Queue list is empty
2308 * Pointer to Ethernet device.
2311 * The number of object not released.
2314 mlx5_ind_table_obj_verify(struct rte_eth_dev *dev)
2316 struct mlx5_priv *priv = dev->data->dev_private;
2317 struct mlx5_ind_table_obj *ind_tbl;
2320 LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
2322 "port %u indirection table obj %p still referenced",
2323 dev->data->port_id, (void *)ind_tbl);
2330 * Create an Rx Hash queue.
2333 * Pointer to Ethernet device.
2335 * RSS key for the Rx hash queue.
2336 * @param rss_key_len
2338 * @param hash_fields
2339 * Verbs protocol hash field to make the RSS on.
2341 * Queues entering in hash queue. In case of empty hash_fields only the
2342 * first queue index will be taken for the indirection table.
2349 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
2352 mlx5_hrxq_new(struct rte_eth_dev *dev,
2353 const uint8_t *rss_key, uint32_t rss_key_len,
2354 uint64_t hash_fields,
2355 const uint16_t *queues, uint32_t queues_n,
2356 int tunnel __rte_unused)
2358 struct mlx5_priv *priv = dev->data->dev_private;
2359 struct mlx5_hrxq *hrxq;
2360 struct ibv_qp *qp = NULL;
2361 struct mlx5_ind_table_obj *ind_tbl;
2363 struct mlx5_devx_obj *tir = NULL;
2364 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[queues[0]];
2365 struct mlx5_rxq_ctrl *rxq_ctrl =
2366 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
2368 queues_n = hash_fields ? queues_n : 1;
2369 ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
2371 enum mlx5_ind_tbl_type type;
2373 type = rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_IBV ?
2374 MLX5_IND_TBL_TYPE_IBV : MLX5_IND_TBL_TYPE_DEVX;
2375 ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n, type);
2381 if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV) {
2382 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
2383 struct mlx5dv_qp_init_attr qp_init_attr;
2385 memset(&qp_init_attr, 0, sizeof(qp_init_attr));
2387 qp_init_attr.comp_mask =
2388 MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;
2389 qp_init_attr.create_flags =
2390 MLX5DV_QP_CREATE_TUNNEL_OFFLOADS;
2392 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2393 if (dev->data->dev_conf.lpbk_mode) {
2395 * Allow packet sent from NIC loop back
2396 * w/o source MAC check.
2398 qp_init_attr.comp_mask |=
2399 MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;
2400 qp_init_attr.create_flags |=
2401 MLX5DV_QP_CREATE_TIR_ALLOW_SELF_LOOPBACK_UC;
2404 qp = mlx5_glue->dv_create_qp
2406 &(struct ibv_qp_init_attr_ex){
2407 .qp_type = IBV_QPT_RAW_PACKET,
2409 IBV_QP_INIT_ATTR_PD |
2410 IBV_QP_INIT_ATTR_IND_TABLE |
2411 IBV_QP_INIT_ATTR_RX_HASH,
2412 .rx_hash_conf = (struct ibv_rx_hash_conf){
2414 IBV_RX_HASH_FUNC_TOEPLITZ,
2415 .rx_hash_key_len = rss_key_len,
2417 (void *)(uintptr_t)rss_key,
2418 .rx_hash_fields_mask = hash_fields,
2420 .rwq_ind_tbl = ind_tbl->ind_table,
2425 qp = mlx5_glue->create_qp_ex
2427 &(struct ibv_qp_init_attr_ex){
2428 .qp_type = IBV_QPT_RAW_PACKET,
2430 IBV_QP_INIT_ATTR_PD |
2431 IBV_QP_INIT_ATTR_IND_TABLE |
2432 IBV_QP_INIT_ATTR_RX_HASH,
2433 .rx_hash_conf = (struct ibv_rx_hash_conf){
2435 IBV_RX_HASH_FUNC_TOEPLITZ,
2436 .rx_hash_key_len = rss_key_len,
2438 (void *)(uintptr_t)rss_key,
2439 .rx_hash_fields_mask = hash_fields,
2441 .rwq_ind_tbl = ind_tbl->ind_table,
2449 } else { /* ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX */
2450 struct mlx5_devx_tir_attr tir_attr;
2454 /* Enable TIR LRO only if all the queues were configured for. */
2455 for (i = 0; i < queues_n; ++i) {
2456 if (!(*priv->rxqs)[queues[i]]->lro) {
2461 memset(&tir_attr, 0, sizeof(tir_attr));
2462 tir_attr.disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT;
2463 tir_attr.rx_hash_fn = MLX5_RX_HASH_FN_TOEPLITZ;
2464 memcpy(&tir_attr.rx_hash_field_selector_outer, &hash_fields,
2466 if (rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN)
2467 tir_attr.transport_domain = priv->sh->td->id;
2469 tir_attr.transport_domain = priv->sh->tdn;
2470 memcpy(tir_attr.rx_hash_toeplitz_key, rss_key, rss_key_len);
2471 tir_attr.indirect_table = ind_tbl->rqt->id;
2472 if (dev->data->dev_conf.lpbk_mode)
2473 tir_attr.self_lb_block =
2474 MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
2476 tir_attr.lro_timeout_period_usecs =
2477 priv->config.lro.timeout;
2478 tir_attr.lro_max_msg_sz = priv->max_lro_msg_size;
2479 tir_attr.lro_enable_mask =
2480 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
2481 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO;
2483 tir = mlx5_devx_cmd_create_tir(priv->sh->ctx, &tir_attr);
2485 DRV_LOG(ERR, "port %u cannot create DevX TIR",
2486 dev->data->port_id);
2491 hrxq = rte_calloc(__func__, 1, sizeof(*hrxq) + rss_key_len, 0);
2494 hrxq->ind_table = ind_tbl;
2495 if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV) {
2497 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2499 mlx5_glue->dv_create_flow_action_dest_ibv_qp(hrxq->qp);
2500 if (!hrxq->action) {
2505 } else { /* ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX */
2507 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2508 hrxq->action = mlx5_glue->dv_create_flow_action_dest_devx_tir
2510 if (!hrxq->action) {
2516 hrxq->rss_key_len = rss_key_len;
2517 hrxq->hash_fields = hash_fields;
2518 memcpy(hrxq->rss_key, rss_key, rss_key_len);
2519 rte_atomic32_inc(&hrxq->refcnt);
2520 LIST_INSERT_HEAD(&priv->hrxqs, hrxq, next);
2523 err = rte_errno; /* Save rte_errno before cleanup. */
2524 mlx5_ind_table_obj_release(dev, ind_tbl);
2526 claim_zero(mlx5_glue->destroy_qp(qp));
2528 claim_zero(mlx5_devx_cmd_destroy(tir));
2529 rte_errno = err; /* Restore rte_errno. */
2534 * Get an Rx Hash queue.
2537 * Pointer to Ethernet device.
2539 * RSS configuration for the Rx hash queue.
2541 * Queues entering in hash queue. In case of empty hash_fields only the
2542 * first queue index will be taken for the indirection table.
2547 * An hash Rx queue on success.
2550 mlx5_hrxq_get(struct rte_eth_dev *dev,
2551 const uint8_t *rss_key, uint32_t rss_key_len,
2552 uint64_t hash_fields,
2553 const uint16_t *queues, uint32_t queues_n)
2555 struct mlx5_priv *priv = dev->data->dev_private;
2556 struct mlx5_hrxq *hrxq;
2558 queues_n = hash_fields ? queues_n : 1;
2559 LIST_FOREACH(hrxq, &priv->hrxqs, next) {
2560 struct mlx5_ind_table_obj *ind_tbl;
2562 if (hrxq->rss_key_len != rss_key_len)
2564 if (memcmp(hrxq->rss_key, rss_key, rss_key_len))
2566 if (hrxq->hash_fields != hash_fields)
2568 ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
2571 if (ind_tbl != hrxq->ind_table) {
2572 mlx5_ind_table_obj_release(dev, ind_tbl);
2575 rte_atomic32_inc(&hrxq->refcnt);
2582 * Release the hash Rx queue.
2585 * Pointer to Ethernet device.
2587 * Pointer to Hash Rx queue to release.
2590 * 1 while a reference on it exists, 0 when freed.
2593 mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
2595 if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
2596 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2597 mlx5_glue->destroy_flow_action(hrxq->action);
2599 if (hrxq->ind_table->type == MLX5_IND_TBL_TYPE_IBV)
2600 claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
2601 else /* hrxq->ind_table->type == MLX5_IND_TBL_TYPE_DEVX */
2602 claim_zero(mlx5_devx_cmd_destroy(hrxq->tir));
2603 mlx5_ind_table_obj_release(dev, hrxq->ind_table);
2604 LIST_REMOVE(hrxq, next);
2608 claim_nonzero(mlx5_ind_table_obj_release(dev, hrxq->ind_table));
2613 * Verify the Rx Queue list is empty
2616 * Pointer to Ethernet device.
2619 * The number of object not released.
2622 mlx5_hrxq_verify(struct rte_eth_dev *dev)
2624 struct mlx5_priv *priv = dev->data->dev_private;
2625 struct mlx5_hrxq *hrxq;
2628 LIST_FOREACH(hrxq, &priv->hrxqs, next) {
2630 "port %u hash Rx queue %p still referenced",
2631 dev->data->port_id, (void *)hrxq);
2638 * Create a drop Rx queue Verbs/DevX object.
2641 * Pointer to Ethernet device.
2644 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
2646 static struct mlx5_rxq_obj *
2647 mlx5_rxq_obj_drop_new(struct rte_eth_dev *dev)
2649 struct mlx5_priv *priv = dev->data->dev_private;
2650 struct ibv_context *ctx = priv->sh->ctx;
2652 struct ibv_wq *wq = NULL;
2653 struct mlx5_rxq_obj *rxq;
2655 if (priv->drop_queue.rxq)
2656 return priv->drop_queue.rxq;
2657 cq = mlx5_glue->create_cq(ctx, 1, NULL, NULL, 0);
2659 DEBUG("port %u cannot allocate CQ for drop queue",
2660 dev->data->port_id);
2664 wq = mlx5_glue->create_wq(ctx,
2665 &(struct ibv_wq_init_attr){
2666 .wq_type = IBV_WQT_RQ,
2673 DEBUG("port %u cannot allocate WQ for drop queue",
2674 dev->data->port_id);
2678 rxq = rte_calloc(__func__, 1, sizeof(*rxq), 0);
2680 DEBUG("port %u cannot allocate drop Rx queue memory",
2681 dev->data->port_id);
2687 priv->drop_queue.rxq = rxq;
2691 claim_zero(mlx5_glue->destroy_wq(wq));
2693 claim_zero(mlx5_glue->destroy_cq(cq));
2698 * Release a drop Rx queue Verbs/DevX object.
2701 * Pointer to Ethernet device.
2704 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
2707 mlx5_rxq_obj_drop_release(struct rte_eth_dev *dev)
2709 struct mlx5_priv *priv = dev->data->dev_private;
2710 struct mlx5_rxq_obj *rxq = priv->drop_queue.rxq;
2713 claim_zero(mlx5_glue->destroy_wq(rxq->wq));
2715 claim_zero(mlx5_glue->destroy_cq(rxq->cq));
2717 priv->drop_queue.rxq = NULL;
2721 * Create a drop indirection table.
2724 * Pointer to Ethernet device.
2727 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
2729 static struct mlx5_ind_table_obj *
2730 mlx5_ind_table_obj_drop_new(struct rte_eth_dev *dev)
2732 struct mlx5_priv *priv = dev->data->dev_private;
2733 struct mlx5_ind_table_obj *ind_tbl;
2734 struct mlx5_rxq_obj *rxq;
2735 struct mlx5_ind_table_obj tmpl;
2737 rxq = mlx5_rxq_obj_drop_new(dev);
2740 tmpl.ind_table = mlx5_glue->create_rwq_ind_table
2742 &(struct ibv_rwq_ind_table_init_attr){
2743 .log_ind_tbl_size = 0,
2744 .ind_tbl = &rxq->wq,
2747 if (!tmpl.ind_table) {
2748 DEBUG("port %u cannot allocate indirection table for drop"
2750 dev->data->port_id);
2754 ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl), 0);
2759 ind_tbl->ind_table = tmpl.ind_table;
2762 mlx5_rxq_obj_drop_release(dev);
2767 * Release a drop indirection table.
2770 * Pointer to Ethernet device.
2773 mlx5_ind_table_obj_drop_release(struct rte_eth_dev *dev)
2775 struct mlx5_priv *priv = dev->data->dev_private;
2776 struct mlx5_ind_table_obj *ind_tbl = priv->drop_queue.hrxq->ind_table;
2778 claim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl->ind_table));
2779 mlx5_rxq_obj_drop_release(dev);
2781 priv->drop_queue.hrxq->ind_table = NULL;
2785 * Create a drop Rx Hash queue.
2788 * Pointer to Ethernet device.
2791 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
2794 mlx5_hrxq_drop_new(struct rte_eth_dev *dev)
2796 struct mlx5_priv *priv = dev->data->dev_private;
2797 struct mlx5_ind_table_obj *ind_tbl = NULL;
2798 struct ibv_qp *qp = NULL;
2799 struct mlx5_hrxq *hrxq = NULL;
2801 if (priv->drop_queue.hrxq) {
2802 rte_atomic32_inc(&priv->drop_queue.hrxq->refcnt);
2803 return priv->drop_queue.hrxq;
2805 hrxq = rte_calloc(__func__, 1, sizeof(*hrxq), 0);
2808 "port %u cannot allocate memory for drop queue",
2809 dev->data->port_id);
2813 priv->drop_queue.hrxq = hrxq;
2814 ind_tbl = mlx5_ind_table_obj_drop_new(dev);
2817 hrxq->ind_table = ind_tbl;
2818 qp = mlx5_glue->create_qp_ex(priv->sh->ctx,
2819 &(struct ibv_qp_init_attr_ex){
2820 .qp_type = IBV_QPT_RAW_PACKET,
2822 IBV_QP_INIT_ATTR_PD |
2823 IBV_QP_INIT_ATTR_IND_TABLE |
2824 IBV_QP_INIT_ATTR_RX_HASH,
2825 .rx_hash_conf = (struct ibv_rx_hash_conf){
2827 IBV_RX_HASH_FUNC_TOEPLITZ,
2828 .rx_hash_key_len = MLX5_RSS_HASH_KEY_LEN,
2829 .rx_hash_key = rss_hash_default_key,
2830 .rx_hash_fields_mask = 0,
2832 .rwq_ind_tbl = ind_tbl->ind_table,
2836 DEBUG("port %u cannot allocate QP for drop queue",
2837 dev->data->port_id);
2842 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2843 hrxq->action = mlx5_glue->dv_create_flow_action_dest_ibv_qp(hrxq->qp);
2844 if (!hrxq->action) {
2849 rte_atomic32_set(&hrxq->refcnt, 1);
2852 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2853 if (hrxq && hrxq->action)
2854 mlx5_glue->destroy_flow_action(hrxq->action);
2857 claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
2859 mlx5_ind_table_obj_drop_release(dev);
2861 priv->drop_queue.hrxq = NULL;
2868 * Release a drop hash Rx queue.
2871 * Pointer to Ethernet device.
2874 mlx5_hrxq_drop_release(struct rte_eth_dev *dev)
2876 struct mlx5_priv *priv = dev->data->dev_private;
2877 struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;
2879 if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
2880 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2881 mlx5_glue->destroy_flow_action(hrxq->action);
2883 claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
2884 mlx5_ind_table_obj_drop_release(dev);
2886 priv->drop_queue.hrxq = NULL;