1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
12 #include <sys/queue.h>
15 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
17 #pragma GCC diagnostic ignored "-Wpedantic"
19 #include <infiniband/verbs.h>
20 #include <infiniband/mlx5dv.h>
22 #pragma GCC diagnostic error "-Wpedantic"
26 #include <rte_malloc.h>
27 #include <rte_ethdev_driver.h>
28 #include <rte_common.h>
29 #include <rte_interrupts.h>
30 #include <rte_debug.h>
33 #include <mlx5_glue.h>
34 #include <mlx5_devx_cmds.h>
36 #include "mlx5_defs.h"
38 #include "mlx5_rxtx.h"
39 #include "mlx5_utils.h"
40 #include "mlx5_autoconf.h"
41 #include "mlx5_flow.h"
44 /* Default RSS hash key also used for ConnectX-3. */
45 uint8_t rss_hash_default_key[] = {
46 0x2c, 0xc6, 0x81, 0xd1,
47 0x5b, 0xdb, 0xf4, 0xf7,
48 0xfc, 0xa2, 0x83, 0x19,
49 0xdb, 0x1a, 0x3e, 0x94,
50 0x6b, 0x9e, 0x38, 0xd9,
51 0x2c, 0x9c, 0x03, 0xd1,
52 0xad, 0x99, 0x44, 0xa7,
53 0xd9, 0x56, 0x3d, 0x59,
54 0x06, 0x3c, 0x25, 0xf3,
55 0xfc, 0x1f, 0xdc, 0x2a,
58 /* Length of the default RSS hash key. */
59 static_assert(MLX5_RSS_HASH_KEY_LEN ==
60 (unsigned int)sizeof(rss_hash_default_key),
61 "wrong RSS default key size.");
64 * Check whether Multi-Packet RQ can be enabled for the device.
67 * Pointer to Ethernet device.
70 * 1 if supported, negative errno value if not.
73 mlx5_check_mprq_support(struct rte_eth_dev *dev)
75 struct mlx5_priv *priv = dev->data->dev_private;
77 if (priv->config.mprq.enabled &&
78 priv->rxqs_n >= priv->config.mprq.min_rxqs_num)
84 * Check whether Multi-Packet RQ is enabled for the Rx queue.
87 * Pointer to receive queue structure.
90 * 0 if disabled, otherwise enabled.
93 mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq)
95 return rxq->strd_num_n > 0;
99 * Check whether Multi-Packet RQ is enabled for the device.
102 * Pointer to Ethernet device.
105 * 0 if disabled, otherwise enabled.
108 mlx5_mprq_enabled(struct rte_eth_dev *dev)
110 struct mlx5_priv *priv = dev->data->dev_private;
115 if (mlx5_check_mprq_support(dev) < 0)
117 /* All the configured queues should be enabled. */
118 for (i = 0; i < priv->rxqs_n; ++i) {
119 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
120 struct mlx5_rxq_ctrl *rxq_ctrl = container_of
121 (rxq, struct mlx5_rxq_ctrl, rxq);
123 if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
126 if (mlx5_rxq_mprq_enabled(rxq))
129 /* Multi-Packet RQ can't be partially configured. */
130 assert(n == 0 || n == n_ibv);
135 * Allocate RX queue elements for Multi-Packet RQ.
138 * Pointer to RX queue structure.
141 * 0 on success, a negative errno value otherwise and rte_errno is set.
144 rxq_alloc_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
146 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
147 unsigned int wqe_n = 1 << rxq->elts_n;
151 /* Iterate on segments. */
152 for (i = 0; i <= wqe_n; ++i) {
153 struct mlx5_mprq_buf *buf;
155 if (rte_mempool_get(rxq->mprq_mp, (void **)&buf) < 0) {
156 DRV_LOG(ERR, "port %u empty mbuf pool", rxq->port_id);
161 (*rxq->mprq_bufs)[i] = buf;
163 rxq->mprq_repl = buf;
166 "port %u Rx queue %u allocated and configured %u segments",
167 rxq->port_id, rxq->idx, wqe_n);
170 err = rte_errno; /* Save rte_errno before cleanup. */
172 for (i = 0; (i != wqe_n); ++i) {
173 if ((*rxq->mprq_bufs)[i] != NULL)
174 rte_mempool_put(rxq->mprq_mp,
175 (*rxq->mprq_bufs)[i]);
176 (*rxq->mprq_bufs)[i] = NULL;
178 DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything",
179 rxq->port_id, rxq->idx);
180 rte_errno = err; /* Restore rte_errno. */
185 * Allocate RX queue elements for Single-Packet RQ.
188 * Pointer to RX queue structure.
191 * 0 on success, errno value on failure.
194 rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
196 const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n;
197 unsigned int elts_n = 1 << rxq_ctrl->rxq.elts_n;
201 /* Iterate on segments. */
202 for (i = 0; (i != elts_n); ++i) {
203 struct rte_mbuf *buf;
205 buf = rte_pktmbuf_alloc(rxq_ctrl->rxq.mp);
207 DRV_LOG(ERR, "port %u empty mbuf pool",
208 PORT_ID(rxq_ctrl->priv));
212 /* Headroom is reserved by rte_pktmbuf_alloc(). */
213 assert(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM);
214 /* Buffer is supposed to be empty. */
215 assert(rte_pktmbuf_data_len(buf) == 0);
216 assert(rte_pktmbuf_pkt_len(buf) == 0);
218 /* Only the first segment keeps headroom. */
220 SET_DATA_OFF(buf, 0);
221 PORT(buf) = rxq_ctrl->rxq.port_id;
222 DATA_LEN(buf) = rte_pktmbuf_tailroom(buf);
223 PKT_LEN(buf) = DATA_LEN(buf);
225 (*rxq_ctrl->rxq.elts)[i] = buf;
227 /* If Rx vector is activated. */
228 if (mlx5_rxq_check_vec_support(&rxq_ctrl->rxq) > 0) {
229 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
230 struct rte_mbuf *mbuf_init = &rxq->fake_mbuf;
231 struct rte_pktmbuf_pool_private *priv =
232 (struct rte_pktmbuf_pool_private *)
233 rte_mempool_get_priv(rxq_ctrl->rxq.mp);
236 /* Initialize default rearm_data for vPMD. */
237 mbuf_init->data_off = RTE_PKTMBUF_HEADROOM;
238 rte_mbuf_refcnt_set(mbuf_init, 1);
239 mbuf_init->nb_segs = 1;
240 mbuf_init->port = rxq->port_id;
241 if (priv->flags & RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF)
242 mbuf_init->ol_flags = EXT_ATTACHED_MBUF;
244 * prevent compiler reordering:
245 * rearm_data covers previous fields.
247 rte_compiler_barrier();
248 rxq->mbuf_initializer =
249 *(rte_xmm_t *)&mbuf_init->rearm_data;
250 /* Padding with a fake mbuf for vectorized Rx. */
251 for (j = 0; j < MLX5_VPMD_DESCS_PER_LOOP; ++j)
252 (*rxq->elts)[elts_n + j] = &rxq->fake_mbuf;
255 "port %u Rx queue %u allocated and configured %u segments"
257 PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx, elts_n,
258 elts_n / (1 << rxq_ctrl->rxq.sges_n));
261 err = rte_errno; /* Save rte_errno before cleanup. */
263 for (i = 0; (i != elts_n); ++i) {
264 if ((*rxq_ctrl->rxq.elts)[i] != NULL)
265 rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]);
266 (*rxq_ctrl->rxq.elts)[i] = NULL;
268 DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything",
269 PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx);
270 rte_errno = err; /* Restore rte_errno. */
275 * Allocate RX queue elements.
278 * Pointer to RX queue structure.
281 * 0 on success, errno value on failure.
284 rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
286 return mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
287 rxq_alloc_elts_mprq(rxq_ctrl) : rxq_alloc_elts_sprq(rxq_ctrl);
291 * Free RX queue elements for Multi-Packet RQ.
294 * Pointer to RX queue structure.
297 rxq_free_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
299 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
302 DRV_LOG(DEBUG, "port %u Multi-Packet Rx queue %u freeing WRs",
303 rxq->port_id, rxq->idx);
304 if (rxq->mprq_bufs == NULL)
306 assert(mlx5_rxq_check_vec_support(rxq) < 0);
307 for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
308 if ((*rxq->mprq_bufs)[i] != NULL)
309 mlx5_mprq_buf_free((*rxq->mprq_bufs)[i]);
310 (*rxq->mprq_bufs)[i] = NULL;
312 if (rxq->mprq_repl != NULL) {
313 mlx5_mprq_buf_free(rxq->mprq_repl);
314 rxq->mprq_repl = NULL;
319 * Free RX queue elements for Single-Packet RQ.
322 * Pointer to RX queue structure.
325 rxq_free_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
327 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
328 const uint16_t q_n = (1 << rxq->elts_n);
329 const uint16_t q_mask = q_n - 1;
330 uint16_t used = q_n - (rxq->rq_ci - rxq->rq_pi);
333 DRV_LOG(DEBUG, "port %u Rx queue %u freeing WRs",
334 PORT_ID(rxq_ctrl->priv), rxq->idx);
335 if (rxq->elts == NULL)
338 * Some mbuf in the Ring belongs to the application. They cannot be
341 if (mlx5_rxq_check_vec_support(rxq) > 0) {
342 for (i = 0; i < used; ++i)
343 (*rxq->elts)[(rxq->rq_ci + i) & q_mask] = NULL;
344 rxq->rq_pi = rxq->rq_ci;
346 for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
347 if ((*rxq->elts)[i] != NULL)
348 rte_pktmbuf_free_seg((*rxq->elts)[i]);
349 (*rxq->elts)[i] = NULL;
354 * Free RX queue elements.
357 * Pointer to RX queue structure.
360 rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
362 if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq))
363 rxq_free_elts_mprq(rxq_ctrl);
365 rxq_free_elts_sprq(rxq_ctrl);
369 * Returns the per-queue supported offloads.
372 * Pointer to Ethernet device.
375 * Supported Rx offloads.
378 mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev)
380 struct mlx5_priv *priv = dev->data->dev_private;
381 struct mlx5_dev_config *config = &priv->config;
382 uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER |
383 DEV_RX_OFFLOAD_TIMESTAMP |
384 DEV_RX_OFFLOAD_JUMBO_FRAME |
385 DEV_RX_OFFLOAD_RSS_HASH);
387 if (config->hw_fcs_strip)
388 offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
391 offloads |= (DEV_RX_OFFLOAD_IPV4_CKSUM |
392 DEV_RX_OFFLOAD_UDP_CKSUM |
393 DEV_RX_OFFLOAD_TCP_CKSUM);
394 if (config->hw_vlan_strip)
395 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
396 if (MLX5_LRO_SUPPORTED(dev))
397 offloads |= DEV_RX_OFFLOAD_TCP_LRO;
403 * Returns the per-port supported offloads.
406 * Supported Rx offloads.
409 mlx5_get_rx_port_offloads(void)
411 uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
417 * Verify if the queue can be released.
420 * Pointer to Ethernet device.
425 * 1 if the queue can be released
426 * 0 if the queue can not be released, there are references to it.
427 * Negative errno and rte_errno is set if queue doesn't exist.
430 mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx)
432 struct mlx5_priv *priv = dev->data->dev_private;
433 struct mlx5_rxq_ctrl *rxq_ctrl;
435 if (!(*priv->rxqs)[idx]) {
439 rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
440 return (rte_atomic32_read(&rxq_ctrl->refcnt) == 1);
444 * Rx queue presetup checks.
447 * Pointer to Ethernet device structure.
451 * Number of descriptors to configure in queue.
454 * 0 on success, a negative errno value otherwise and rte_errno is set.
457 mlx5_rx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc)
459 struct mlx5_priv *priv = dev->data->dev_private;
461 if (!rte_is_power_of_2(desc)) {
462 desc = 1 << log2above(desc);
464 "port %u increased number of descriptors in Rx queue %u"
465 " to the next power of two (%d)",
466 dev->data->port_id, idx, desc);
468 DRV_LOG(DEBUG, "port %u configuring Rx queue %u for %u descriptors",
469 dev->data->port_id, idx, desc);
470 if (idx >= priv->rxqs_n) {
471 DRV_LOG(ERR, "port %u Rx queue index out of range (%u >= %u)",
472 dev->data->port_id, idx, priv->rxqs_n);
473 rte_errno = EOVERFLOW;
476 if (!mlx5_rxq_releasable(dev, idx)) {
477 DRV_LOG(ERR, "port %u unable to release queue index %u",
478 dev->data->port_id, idx);
482 mlx5_rxq_release(dev, idx);
489 * Pointer to Ethernet device structure.
493 * Number of descriptors to configure in queue.
495 * NUMA socket on which memory must be allocated.
497 * Thresholds parameters.
499 * Memory pool for buffer allocations.
502 * 0 on success, a negative errno value otherwise and rte_errno is set.
505 mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
506 unsigned int socket, const struct rte_eth_rxconf *conf,
507 struct rte_mempool *mp)
509 struct mlx5_priv *priv = dev->data->dev_private;
510 struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
511 struct mlx5_rxq_ctrl *rxq_ctrl =
512 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
515 res = mlx5_rx_queue_pre_setup(dev, idx, desc);
518 rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, mp);
520 DRV_LOG(ERR, "port %u unable to allocate queue index %u",
521 dev->data->port_id, idx);
525 DRV_LOG(DEBUG, "port %u adding Rx queue %u to list",
526 dev->data->port_id, idx);
527 (*priv->rxqs)[idx] = &rxq_ctrl->rxq;
534 * Pointer to Ethernet device structure.
538 * Number of descriptors to configure in queue.
539 * @param hairpin_conf
540 * Hairpin configuration parameters.
543 * 0 on success, a negative errno value otherwise and rte_errno is set.
546 mlx5_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
548 const struct rte_eth_hairpin_conf *hairpin_conf)
550 struct mlx5_priv *priv = dev->data->dev_private;
551 struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
552 struct mlx5_rxq_ctrl *rxq_ctrl =
553 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
556 res = mlx5_rx_queue_pre_setup(dev, idx, desc);
559 if (hairpin_conf->peer_count != 1 ||
560 hairpin_conf->peers[0].port != dev->data->port_id ||
561 hairpin_conf->peers[0].queue >= priv->txqs_n) {
562 DRV_LOG(ERR, "port %u unable to setup hairpin queue index %u "
563 " invalid hairpind configuration", dev->data->port_id,
568 rxq_ctrl = mlx5_rxq_hairpin_new(dev, idx, desc, hairpin_conf);
570 DRV_LOG(ERR, "port %u unable to allocate queue index %u",
571 dev->data->port_id, idx);
575 DRV_LOG(DEBUG, "port %u adding Rx queue %u to list",
576 dev->data->port_id, idx);
577 (*priv->rxqs)[idx] = &rxq_ctrl->rxq;
582 * DPDK callback to release a RX queue.
585 * Generic RX queue pointer.
588 mlx5_rx_queue_release(void *dpdk_rxq)
590 struct mlx5_rxq_data *rxq = (struct mlx5_rxq_data *)dpdk_rxq;
591 struct mlx5_rxq_ctrl *rxq_ctrl;
592 struct mlx5_priv *priv;
596 rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);
597 priv = rxq_ctrl->priv;
598 if (!mlx5_rxq_releasable(ETH_DEV(priv), rxq_ctrl->rxq.idx))
599 rte_panic("port %u Rx queue %u is still used by a flow and"
600 " cannot be removed\n",
601 PORT_ID(priv), rxq->idx);
602 mlx5_rxq_release(ETH_DEV(priv), rxq_ctrl->rxq.idx);
606 * Get an Rx queue Verbs/DevX object.
609 * Pointer to Ethernet device.
611 * Queue index in DPDK Rx queue array
614 * The Verbs/DevX object if it exists.
616 static struct mlx5_rxq_obj *
617 mlx5_rxq_obj_get(struct rte_eth_dev *dev, uint16_t idx)
619 struct mlx5_priv *priv = dev->data->dev_private;
620 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
621 struct mlx5_rxq_ctrl *rxq_ctrl;
623 if (idx >= priv->rxqs_n)
627 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
629 rte_atomic32_inc(&rxq_ctrl->obj->refcnt);
630 return rxq_ctrl->obj;
634 * Release the resources allocated for an RQ DevX object.
637 * DevX Rx queue object.
640 rxq_release_rq_resources(struct mlx5_rxq_ctrl *rxq_ctrl)
642 if (rxq_ctrl->rxq.wqes) {
643 rte_free((void *)(uintptr_t)rxq_ctrl->rxq.wqes);
644 rxq_ctrl->rxq.wqes = NULL;
646 if (rxq_ctrl->wq_umem) {
647 mlx5_glue->devx_umem_dereg(rxq_ctrl->wq_umem);
648 rxq_ctrl->wq_umem = NULL;
653 * Release an Rx hairpin related resources.
656 * Hairpin Rx queue object.
659 rxq_obj_hairpin_release(struct mlx5_rxq_obj *rxq_obj)
661 struct mlx5_devx_modify_rq_attr rq_attr = { 0 };
664 rq_attr.state = MLX5_RQC_STATE_RST;
665 rq_attr.rq_state = MLX5_RQC_STATE_RDY;
666 mlx5_devx_cmd_modify_rq(rxq_obj->rq, &rq_attr);
667 claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
671 * Release an Rx verbs/DevX queue object.
674 * Verbs/DevX Rx queue object.
677 * 1 while a reference on it exists, 0 when freed.
680 mlx5_rxq_obj_release(struct mlx5_rxq_obj *rxq_obj)
683 if (rte_atomic32_dec_and_test(&rxq_obj->refcnt)) {
684 switch (rxq_obj->type) {
685 case MLX5_RXQ_OBJ_TYPE_IBV:
688 rxq_free_elts(rxq_obj->rxq_ctrl);
689 claim_zero(mlx5_glue->destroy_wq(rxq_obj->wq));
690 claim_zero(mlx5_glue->destroy_cq(rxq_obj->cq));
692 case MLX5_RXQ_OBJ_TYPE_DEVX_RQ:
695 rxq_free_elts(rxq_obj->rxq_ctrl);
696 claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
697 rxq_release_rq_resources(rxq_obj->rxq_ctrl);
698 claim_zero(mlx5_glue->destroy_cq(rxq_obj->cq));
700 case MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN:
702 rxq_obj_hairpin_release(rxq_obj);
705 if (rxq_obj->channel)
706 claim_zero(mlx5_glue->destroy_comp_channel
708 LIST_REMOVE(rxq_obj, next);
716 * Allocate queue vector and fill epoll fd list for Rx interrupts.
719 * Pointer to Ethernet device.
722 * 0 on success, a negative errno value otherwise and rte_errno is set.
725 mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
727 struct mlx5_priv *priv = dev->data->dev_private;
729 unsigned int rxqs_n = priv->rxqs_n;
730 unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
731 unsigned int count = 0;
732 struct rte_intr_handle *intr_handle = dev->intr_handle;
734 if (!dev->data->dev_conf.intr_conf.rxq)
736 mlx5_rx_intr_vec_disable(dev);
737 intr_handle->intr_vec = malloc(n * sizeof(intr_handle->intr_vec[0]));
738 if (intr_handle->intr_vec == NULL) {
740 "port %u failed to allocate memory for interrupt"
741 " vector, Rx interrupts will not be supported",
746 intr_handle->type = RTE_INTR_HANDLE_EXT;
747 for (i = 0; i != n; ++i) {
748 /* This rxq obj must not be released in this function. */
749 struct mlx5_rxq_obj *rxq_obj = mlx5_rxq_obj_get(dev, i);
754 /* Skip queues that cannot request interrupts. */
755 if (!rxq_obj || !rxq_obj->channel) {
756 /* Use invalid intr_vec[] index to disable entry. */
757 intr_handle->intr_vec[i] =
758 RTE_INTR_VEC_RXTX_OFFSET +
759 RTE_MAX_RXTX_INTR_VEC_ID;
762 if (count >= RTE_MAX_RXTX_INTR_VEC_ID) {
764 "port %u too many Rx queues for interrupt"
765 " vector size (%d), Rx interrupts cannot be"
767 dev->data->port_id, RTE_MAX_RXTX_INTR_VEC_ID);
768 mlx5_rx_intr_vec_disable(dev);
772 fd = rxq_obj->channel->fd;
773 flags = fcntl(fd, F_GETFL);
774 rc = fcntl(fd, F_SETFL, flags | O_NONBLOCK);
778 "port %u failed to make Rx interrupt file"
779 " descriptor %d non-blocking for queue index"
781 dev->data->port_id, fd, i);
782 mlx5_rx_intr_vec_disable(dev);
785 intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count;
786 intr_handle->efds[count] = fd;
790 mlx5_rx_intr_vec_disable(dev);
792 intr_handle->nb_efd = count;
797 * Clean up Rx interrupts handler.
800 * Pointer to Ethernet device.
803 mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev)
805 struct mlx5_priv *priv = dev->data->dev_private;
806 struct rte_intr_handle *intr_handle = dev->intr_handle;
808 unsigned int rxqs_n = priv->rxqs_n;
809 unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
811 if (!dev->data->dev_conf.intr_conf.rxq)
813 if (!intr_handle->intr_vec)
815 for (i = 0; i != n; ++i) {
816 struct mlx5_rxq_ctrl *rxq_ctrl;
817 struct mlx5_rxq_data *rxq_data;
819 if (intr_handle->intr_vec[i] == RTE_INTR_VEC_RXTX_OFFSET +
820 RTE_MAX_RXTX_INTR_VEC_ID)
823 * Need to access directly the queue to release the reference
824 * kept in mlx5_rx_intr_vec_enable().
826 rxq_data = (*priv->rxqs)[i];
827 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
829 mlx5_rxq_obj_release(rxq_ctrl->obj);
832 rte_intr_free_epoll_fd(intr_handle);
833 if (intr_handle->intr_vec)
834 free(intr_handle->intr_vec);
835 intr_handle->nb_efd = 0;
836 intr_handle->intr_vec = NULL;
840 * MLX5 CQ notification .
843 * Pointer to receive queue structure.
845 * Sequence number per receive queue .
848 mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq)
851 uint32_t doorbell_hi;
853 void *cq_db_reg = (char *)rxq->cq_uar + MLX5_CQ_DOORBELL;
855 sq_n = sq_n_rxq & MLX5_CQ_SQN_MASK;
856 doorbell_hi = sq_n << MLX5_CQ_SQN_OFFSET | (rxq->cq_ci & MLX5_CI_MASK);
857 doorbell = (uint64_t)doorbell_hi << 32;
858 doorbell |= rxq->cqn;
859 rxq->cq_db[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi);
860 mlx5_uar_write64(rte_cpu_to_be_64(doorbell),
861 cq_db_reg, rxq->uar_lock_cq);
865 * DPDK callback for Rx queue interrupt enable.
868 * Pointer to Ethernet device structure.
873 * 0 on success, a negative errno value otherwise and rte_errno is set.
876 mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
878 struct mlx5_priv *priv = dev->data->dev_private;
879 struct mlx5_rxq_data *rxq_data;
880 struct mlx5_rxq_ctrl *rxq_ctrl;
882 rxq_data = (*priv->rxqs)[rx_queue_id];
887 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
889 struct mlx5_rxq_obj *rxq_obj;
891 rxq_obj = mlx5_rxq_obj_get(dev, rx_queue_id);
896 mlx5_arm_cq(rxq_data, rxq_data->cq_arm_sn);
897 mlx5_rxq_obj_release(rxq_obj);
903 * DPDK callback for Rx queue interrupt disable.
906 * Pointer to Ethernet device structure.
911 * 0 on success, a negative errno value otherwise and rte_errno is set.
914 mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
916 struct mlx5_priv *priv = dev->data->dev_private;
917 struct mlx5_rxq_data *rxq_data;
918 struct mlx5_rxq_ctrl *rxq_ctrl;
919 struct mlx5_rxq_obj *rxq_obj = NULL;
920 struct ibv_cq *ev_cq;
924 rxq_data = (*priv->rxqs)[rx_queue_id];
929 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
932 rxq_obj = mlx5_rxq_obj_get(dev, rx_queue_id);
937 ret = mlx5_glue->get_cq_event(rxq_obj->channel, &ev_cq, &ev_ctx);
938 if (ret || ev_cq != rxq_obj->cq) {
942 rxq_data->cq_arm_sn++;
943 mlx5_glue->ack_cq_events(rxq_obj->cq, 1);
944 mlx5_rxq_obj_release(rxq_obj);
947 ret = rte_errno; /* Save rte_errno before cleanup. */
949 mlx5_rxq_obj_release(rxq_obj);
950 DRV_LOG(WARNING, "port %u unable to disable interrupt on Rx queue %d",
951 dev->data->port_id, rx_queue_id);
952 rte_errno = ret; /* Restore rte_errno. */
957 * Create a CQ Verbs object.
960 * Pointer to Ethernet device.
962 * Pointer to device private data.
964 * Pointer to Rx queue data.
966 * Number of CQEs in CQ.
968 * Pointer to Rx queue object data.
971 * The Verbs object initialised, NULL otherwise and rte_errno is set.
973 static struct ibv_cq *
974 mlx5_ibv_cq_new(struct rte_eth_dev *dev, struct mlx5_priv *priv,
975 struct mlx5_rxq_data *rxq_data,
976 unsigned int cqe_n, struct mlx5_rxq_obj *rxq_obj)
979 struct ibv_cq_init_attr_ex ibv;
980 struct mlx5dv_cq_init_attr mlx5;
983 cq_attr.ibv = (struct ibv_cq_init_attr_ex){
985 .channel = rxq_obj->channel,
988 cq_attr.mlx5 = (struct mlx5dv_cq_init_attr){
991 if (priv->config.cqe_comp && !rxq_data->hw_timestamp &&
993 cq_attr.mlx5.comp_mask |=
994 MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
995 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
996 cq_attr.mlx5.cqe_comp_res_format =
997 mlx5_rxq_mprq_enabled(rxq_data) ?
998 MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX :
999 MLX5DV_CQE_RES_FORMAT_HASH;
1001 cq_attr.mlx5.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH;
1004 * For vectorized Rx, it must not be doubled in order to
1005 * make cq_ci and rq_ci aligned.
1007 if (mlx5_rxq_check_vec_support(rxq_data) < 0)
1008 cq_attr.ibv.cqe *= 2;
1009 } else if (priv->config.cqe_comp && rxq_data->hw_timestamp) {
1011 "port %u Rx CQE compression is disabled for HW"
1013 dev->data->port_id);
1014 } else if (priv->config.cqe_comp && rxq_data->lro) {
1016 "port %u Rx CQE compression is disabled for LRO",
1017 dev->data->port_id);
1019 #ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD
1020 if (priv->config.cqe_pad) {
1021 cq_attr.mlx5.comp_mask |= MLX5DV_CQ_INIT_ATTR_MASK_FLAGS;
1022 cq_attr.mlx5.flags |= MLX5DV_CQ_INIT_ATTR_FLAGS_CQE_PAD;
1025 return mlx5_glue->cq_ex_to_cq(mlx5_glue->dv_create_cq(priv->sh->ctx,
1031 * Create a WQ Verbs object.
1034 * Pointer to Ethernet device.
1036 * Pointer to device private data.
1038 * Pointer to Rx queue data.
1040 * Queue index in DPDK Rx queue array
1042 * Number of WQEs in WQ.
1044 * Pointer to Rx queue object data.
1047 * The Verbs object initialised, NULL otherwise and rte_errno is set.
1049 static struct ibv_wq *
1050 mlx5_ibv_wq_new(struct rte_eth_dev *dev, struct mlx5_priv *priv,
1051 struct mlx5_rxq_data *rxq_data, uint16_t idx,
1052 unsigned int wqe_n, struct mlx5_rxq_obj *rxq_obj)
1055 struct ibv_wq_init_attr ibv;
1056 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
1057 struct mlx5dv_wq_init_attr mlx5;
1061 wq_attr.ibv = (struct ibv_wq_init_attr){
1062 .wq_context = NULL, /* Could be useful in the future. */
1063 .wq_type = IBV_WQT_RQ,
1064 /* Max number of outstanding WRs. */
1065 .max_wr = wqe_n >> rxq_data->sges_n,
1066 /* Max number of scatter/gather elements in a WR. */
1067 .max_sge = 1 << rxq_data->sges_n,
1070 .comp_mask = IBV_WQ_FLAGS_CVLAN_STRIPPING | 0,
1071 .create_flags = (rxq_data->vlan_strip ?
1072 IBV_WQ_FLAGS_CVLAN_STRIPPING : 0),
1074 /* By default, FCS (CRC) is stripped by hardware. */
1075 if (rxq_data->crc_present) {
1076 wq_attr.ibv.create_flags |= IBV_WQ_FLAGS_SCATTER_FCS;
1077 wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
1079 if (priv->config.hw_padding) {
1080 #if defined(HAVE_IBV_WQ_FLAG_RX_END_PADDING)
1081 wq_attr.ibv.create_flags |= IBV_WQ_FLAG_RX_END_PADDING;
1082 wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
1083 #elif defined(HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING)
1084 wq_attr.ibv.create_flags |= IBV_WQ_FLAGS_PCI_WRITE_END_PADDING;
1085 wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
1088 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
1089 wq_attr.mlx5 = (struct mlx5dv_wq_init_attr){
1092 if (mlx5_rxq_mprq_enabled(rxq_data)) {
1093 struct mlx5dv_striding_rq_init_attr *mprq_attr =
1094 &wq_attr.mlx5.striding_rq_attrs;
1096 wq_attr.mlx5.comp_mask |= MLX5DV_WQ_INIT_ATTR_MASK_STRIDING_RQ;
1097 *mprq_attr = (struct mlx5dv_striding_rq_init_attr){
1098 .single_stride_log_num_of_bytes = rxq_data->strd_sz_n,
1099 .single_wqe_log_num_of_strides = rxq_data->strd_num_n,
1100 .two_byte_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT,
1103 rxq_obj->wq = mlx5_glue->dv_create_wq(priv->sh->ctx, &wq_attr.ibv,
1106 rxq_obj->wq = mlx5_glue->create_wq(priv->sh->ctx, &wq_attr.ibv);
1110 * Make sure number of WRs*SGEs match expectations since a queue
1111 * cannot allocate more than "desc" buffers.
1113 if (wq_attr.ibv.max_wr != (wqe_n >> rxq_data->sges_n) ||
1114 wq_attr.ibv.max_sge != (1u << rxq_data->sges_n)) {
1116 "port %u Rx queue %u requested %u*%u but got"
1118 dev->data->port_id, idx,
1119 wqe_n >> rxq_data->sges_n,
1120 (1 << rxq_data->sges_n),
1121 wq_attr.ibv.max_wr, wq_attr.ibv.max_sge);
1122 claim_zero(mlx5_glue->destroy_wq(rxq_obj->wq));
1131 * Fill common fields of create RQ attributes structure.
1134 * Pointer to Rx queue data.
1136 * CQ number to use with this RQ.
1138 * RQ attributes structure to fill..
1141 mlx5_devx_create_rq_attr_fill(struct mlx5_rxq_data *rxq_data, uint32_t cqn,
1142 struct mlx5_devx_create_rq_attr *rq_attr)
1144 rq_attr->state = MLX5_RQC_STATE_RST;
1145 rq_attr->vsd = (rxq_data->vlan_strip) ? 0 : 1;
1147 rq_attr->scatter_fcs = (rxq_data->crc_present) ? 1 : 0;
1151 * Fill common fields of DevX WQ attributes structure.
1154 * Pointer to device private data.
1156 * Pointer to Rx queue control structure.
1158 * WQ attributes structure to fill..
1161 mlx5_devx_wq_attr_fill(struct mlx5_priv *priv, struct mlx5_rxq_ctrl *rxq_ctrl,
1162 struct mlx5_devx_wq_attr *wq_attr)
1164 wq_attr->end_padding_mode = priv->config.cqe_pad ?
1165 MLX5_WQ_END_PAD_MODE_ALIGN :
1166 MLX5_WQ_END_PAD_MODE_NONE;
1167 wq_attr->pd = priv->sh->pdn;
1168 wq_attr->dbr_addr = rxq_ctrl->dbr_offset;
1169 wq_attr->dbr_umem_id = rxq_ctrl->dbr_umem_id;
1170 wq_attr->dbr_umem_valid = 1;
1171 wq_attr->wq_umem_id = rxq_ctrl->wq_umem->umem_id;
1172 wq_attr->wq_umem_valid = 1;
1176 * Create a RQ object using DevX.
1179 * Pointer to Ethernet device.
1181 * Queue index in DPDK Rx queue array
1183 * CQ number to use with this RQ.
1186 * The DevX object initialised, NULL otherwise and rte_errno is set.
1188 static struct mlx5_devx_obj *
1189 mlx5_devx_rq_new(struct rte_eth_dev *dev, uint16_t idx, uint32_t cqn)
1191 struct mlx5_priv *priv = dev->data->dev_private;
1192 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
1193 struct mlx5_rxq_ctrl *rxq_ctrl =
1194 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
1195 struct mlx5_devx_create_rq_attr rq_attr;
1196 uint32_t wqe_n = 1 << (rxq_data->elts_n - rxq_data->sges_n);
1197 uint32_t wq_size = 0;
1198 uint32_t wqe_size = 0;
1199 uint32_t log_wqe_size = 0;
1201 struct mlx5_devx_obj *rq;
1203 memset(&rq_attr, 0, sizeof(rq_attr));
1204 /* Fill RQ attributes. */
1205 rq_attr.mem_rq_type = MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE;
1206 rq_attr.flush_in_error_en = 1;
1207 mlx5_devx_create_rq_attr_fill(rxq_data, cqn, &rq_attr);
1208 /* Fill WQ attributes for this RQ. */
1209 if (mlx5_rxq_mprq_enabled(rxq_data)) {
1210 rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ;
1212 * Number of strides in each WQE:
1213 * 512*2^single_wqe_log_num_of_strides.
1215 rq_attr.wq_attr.single_wqe_log_num_of_strides =
1216 rxq_data->strd_num_n -
1217 MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
1218 /* Stride size = (2^single_stride_log_num_of_bytes)*64B. */
1219 rq_attr.wq_attr.single_stride_log_num_of_bytes =
1220 rxq_data->strd_sz_n -
1221 MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES;
1222 wqe_size = sizeof(struct mlx5_wqe_mprq);
1224 rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
1225 wqe_size = sizeof(struct mlx5_wqe_data_seg);
1227 log_wqe_size = log2above(wqe_size) + rxq_data->sges_n;
1228 rq_attr.wq_attr.log_wq_stride = log_wqe_size;
1229 rq_attr.wq_attr.log_wq_sz = rxq_data->elts_n - rxq_data->sges_n;
1230 /* Calculate and allocate WQ memory space. */
1231 wqe_size = 1 << log_wqe_size; /* round up power of two.*/
1232 wq_size = wqe_n * wqe_size;
1233 buf = rte_calloc_socket(__func__, 1, wq_size, MLX5_WQE_BUF_ALIGNMENT,
1237 rxq_data->wqes = buf;
1238 rxq_ctrl->wq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx,
1240 if (!rxq_ctrl->wq_umem) {
1244 mlx5_devx_wq_attr_fill(priv, rxq_ctrl, &rq_attr.wq_attr);
1245 rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &rq_attr, rxq_ctrl->socket);
1247 rxq_release_rq_resources(rxq_ctrl);
1252 * Create the Rx hairpin queue object.
1255 * Pointer to Ethernet device.
1257 * Queue index in DPDK Rx queue array
1260 * The hairpin DevX object initialised, NULL otherwise and rte_errno is set.
1262 static struct mlx5_rxq_obj *
1263 mlx5_rxq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
1265 struct mlx5_priv *priv = dev->data->dev_private;
1266 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
1267 struct mlx5_rxq_ctrl *rxq_ctrl =
1268 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
1269 struct mlx5_devx_create_rq_attr attr = { 0 };
1270 struct mlx5_rxq_obj *tmpl = NULL;
1274 assert(!rxq_ctrl->obj);
1275 tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0,
1279 "port %u Rx queue %u cannot allocate verbs resources",
1280 dev->data->port_id, rxq_data->idx);
1284 tmpl->type = MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN;
1285 tmpl->rxq_ctrl = rxq_ctrl;
1287 /* Workaround for hairpin startup */
1288 attr.wq_attr.log_hairpin_num_packets = log2above(32);
1289 /* Workaround for packets larger than 1KB */
1290 attr.wq_attr.log_hairpin_data_sz =
1291 priv->config.hca_attr.log_max_hairpin_wq_data_sz;
1292 tmpl->rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &attr,
1296 "port %u Rx hairpin queue %u can't create rq object",
1297 dev->data->port_id, idx);
1301 DRV_LOG(DEBUG, "port %u rxq %u updated with %p", dev->data->port_id,
1302 idx, (void *)&tmpl);
1303 rte_atomic32_inc(&tmpl->refcnt);
1304 LIST_INSERT_HEAD(&priv->rxqsobj, tmpl, next);
1305 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
1308 ret = rte_errno; /* Save rte_errno before cleanup. */
1310 mlx5_devx_cmd_destroy(tmpl->rq);
1311 rte_errno = ret; /* Restore rte_errno. */
1316 * Create the Rx queue Verbs/DevX object.
1319 * Pointer to Ethernet device.
1321 * Queue index in DPDK Rx queue array
1323 * Type of Rx queue object to create.
1326 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
1328 struct mlx5_rxq_obj *
1329 mlx5_rxq_obj_new(struct rte_eth_dev *dev, uint16_t idx,
1330 enum mlx5_rxq_obj_type type)
1332 struct mlx5_priv *priv = dev->data->dev_private;
1333 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
1334 struct mlx5_rxq_ctrl *rxq_ctrl =
1335 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
1336 struct ibv_wq_attr mod;
1338 unsigned int wqe_n = 1 << rxq_data->elts_n;
1339 struct mlx5_rxq_obj *tmpl = NULL;
1340 struct mlx5dv_cq cq_info;
1341 struct mlx5dv_rwq rwq;
1343 struct mlx5dv_obj obj;
1346 assert(!rxq_ctrl->obj);
1347 if (type == MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN)
1348 return mlx5_rxq_obj_hairpin_new(dev, idx);
1349 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_RX_QUEUE;
1350 priv->verbs_alloc_ctx.obj = rxq_ctrl;
1351 tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0,
1355 "port %u Rx queue %u cannot allocate verbs resources",
1356 dev->data->port_id, rxq_data->idx);
1361 tmpl->rxq_ctrl = rxq_ctrl;
1362 if (rxq_ctrl->irq) {
1363 tmpl->channel = mlx5_glue->create_comp_channel(priv->sh->ctx);
1364 if (!tmpl->channel) {
1365 DRV_LOG(ERR, "port %u: comp channel creation failure",
1366 dev->data->port_id);
1371 if (mlx5_rxq_mprq_enabled(rxq_data))
1372 cqe_n = wqe_n * (1 << rxq_data->strd_num_n) - 1;
1375 tmpl->cq = mlx5_ibv_cq_new(dev, priv, rxq_data, cqe_n, tmpl);
1377 DRV_LOG(ERR, "port %u Rx queue %u CQ creation failure",
1378 dev->data->port_id, idx);
1382 obj.cq.in = tmpl->cq;
1383 obj.cq.out = &cq_info;
1384 ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ);
1389 if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
1391 "port %u wrong MLX5_CQE_SIZE environment variable"
1392 " value: it should be set to %u",
1393 dev->data->port_id, RTE_CACHE_LINE_SIZE);
1397 DRV_LOG(DEBUG, "port %u device_attr.max_qp_wr is %d",
1398 dev->data->port_id, priv->sh->device_attr.orig_attr.max_qp_wr);
1399 DRV_LOG(DEBUG, "port %u device_attr.max_sge is %d",
1400 dev->data->port_id, priv->sh->device_attr.orig_attr.max_sge);
1401 /* Allocate door-bell for types created with DevX. */
1402 if (tmpl->type != MLX5_RXQ_OBJ_TYPE_IBV) {
1403 struct mlx5_devx_dbr_page *dbr_page;
1406 dbr_offset = mlx5_get_dbr(dev, &dbr_page);
1409 rxq_ctrl->dbr_offset = dbr_offset;
1410 rxq_ctrl->dbr_umem_id = dbr_page->umem->umem_id;
1411 rxq_ctrl->dbr_umem_id_valid = 1;
1412 rxq_data->rq_db = (uint32_t *)((uintptr_t)dbr_page->dbrs +
1413 (uintptr_t)rxq_ctrl->dbr_offset);
1415 if (tmpl->type == MLX5_RXQ_OBJ_TYPE_IBV) {
1416 tmpl->wq = mlx5_ibv_wq_new(dev, priv, rxq_data, idx, wqe_n,
1419 DRV_LOG(ERR, "port %u Rx queue %u WQ creation failure",
1420 dev->data->port_id, idx);
1424 /* Change queue state to ready. */
1425 mod = (struct ibv_wq_attr){
1426 .attr_mask = IBV_WQ_ATTR_STATE,
1427 .wq_state = IBV_WQS_RDY,
1429 ret = mlx5_glue->modify_wq(tmpl->wq, &mod);
1432 "port %u Rx queue %u WQ state to IBV_WQS_RDY"
1433 " failed", dev->data->port_id, idx);
1437 obj.rwq.in = tmpl->wq;
1439 ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_RWQ);
1444 rxq_data->wqes = rwq.buf;
1445 rxq_data->rq_db = rwq.dbrec;
1446 } else if (tmpl->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ) {
1447 struct mlx5_devx_modify_rq_attr rq_attr;
1449 memset(&rq_attr, 0, sizeof(rq_attr));
1450 tmpl->rq = mlx5_devx_rq_new(dev, idx, cq_info.cqn);
1452 DRV_LOG(ERR, "port %u Rx queue %u RQ creation failure",
1453 dev->data->port_id, idx);
1457 /* Change queue state to ready. */
1458 rq_attr.rq_state = MLX5_RQC_STATE_RST;
1459 rq_attr.state = MLX5_RQC_STATE_RDY;
1460 ret = mlx5_devx_cmd_modify_rq(tmpl->rq, &rq_attr);
1464 /* Fill the rings. */
1465 rxq_data->cqe_n = log2above(cq_info.cqe_cnt);
1466 rxq_data->cq_db = cq_info.dbrec;
1467 rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)cq_info.buf;
1468 rxq_data->cq_uar = cq_info.cq_uar;
1469 rxq_data->cqn = cq_info.cqn;
1470 rxq_data->cq_arm_sn = 0;
1471 mlx5_rxq_initialize(rxq_data);
1472 rxq_data->cq_ci = 0;
1473 DRV_LOG(DEBUG, "port %u rxq %u updated with %p", dev->data->port_id,
1474 idx, (void *)&tmpl);
1475 rte_atomic32_inc(&tmpl->refcnt);
1476 LIST_INSERT_HEAD(&priv->rxqsobj, tmpl, next);
1477 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
1481 ret = rte_errno; /* Save rte_errno before cleanup. */
1482 if (tmpl->type == MLX5_RXQ_OBJ_TYPE_IBV && tmpl->wq)
1483 claim_zero(mlx5_glue->destroy_wq(tmpl->wq));
1484 else if (tmpl->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ && tmpl->rq)
1485 claim_zero(mlx5_devx_cmd_destroy(tmpl->rq));
1487 claim_zero(mlx5_glue->destroy_cq(tmpl->cq));
1489 claim_zero(mlx5_glue->destroy_comp_channel
1492 rte_errno = ret; /* Restore rte_errno. */
1494 if (type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ)
1495 rxq_release_rq_resources(rxq_ctrl);
1496 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
1501 * Verify the Rx queue objects list is empty
1504 * Pointer to Ethernet device.
1507 * The number of objects not released.
1510 mlx5_rxq_obj_verify(struct rte_eth_dev *dev)
1512 struct mlx5_priv *priv = dev->data->dev_private;
1514 struct mlx5_rxq_obj *rxq_obj;
1516 LIST_FOREACH(rxq_obj, &priv->rxqsobj, next) {
1517 DRV_LOG(DEBUG, "port %u Rx queue %u still referenced",
1518 dev->data->port_id, rxq_obj->rxq_ctrl->rxq.idx);
1525 * Callback function to initialize mbufs for Multi-Packet RQ.
1528 mlx5_mprq_buf_init(struct rte_mempool *mp, void *opaque_arg,
1529 void *_m, unsigned int i __rte_unused)
1531 struct mlx5_mprq_buf *buf = _m;
1532 struct rte_mbuf_ext_shared_info *shinfo;
1533 unsigned int strd_n = (unsigned int)(uintptr_t)opaque_arg;
1536 memset(_m, 0, sizeof(*buf));
1538 rte_atomic16_set(&buf->refcnt, 1);
1539 for (j = 0; j != strd_n; ++j) {
1540 shinfo = &buf->shinfos[j];
1541 shinfo->free_cb = mlx5_mprq_buf_free_cb;
1542 shinfo->fcb_opaque = buf;
1547 * Free mempool of Multi-Packet RQ.
1550 * Pointer to Ethernet device.
1553 * 0 on success, negative errno value on failure.
1556 mlx5_mprq_free_mp(struct rte_eth_dev *dev)
1558 struct mlx5_priv *priv = dev->data->dev_private;
1559 struct rte_mempool *mp = priv->mprq_mp;
1564 DRV_LOG(DEBUG, "port %u freeing mempool (%s) for Multi-Packet RQ",
1565 dev->data->port_id, mp->name);
1567 * If a buffer in the pool has been externally attached to a mbuf and it
1568 * is still in use by application, destroying the Rx queue can spoil
1569 * the packet. It is unlikely to happen but if application dynamically
1570 * creates and destroys with holding Rx packets, this can happen.
1572 * TODO: It is unavoidable for now because the mempool for Multi-Packet
1573 * RQ isn't provided by application but managed by PMD.
1575 if (!rte_mempool_full(mp)) {
1577 "port %u mempool for Multi-Packet RQ is still in use",
1578 dev->data->port_id);
1582 rte_mempool_free(mp);
1583 /* Unset mempool for each Rx queue. */
1584 for (i = 0; i != priv->rxqs_n; ++i) {
1585 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1589 rxq->mprq_mp = NULL;
1591 priv->mprq_mp = NULL;
1596 * Allocate a mempool for Multi-Packet RQ. All configured Rx queues share the
1597 * mempool. If already allocated, reuse it if there're enough elements.
1598 * Otherwise, resize it.
1601 * Pointer to Ethernet device.
1604 * 0 on success, negative errno value on failure.
1607 mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)
1609 struct mlx5_priv *priv = dev->data->dev_private;
1610 struct rte_mempool *mp = priv->mprq_mp;
1611 char name[RTE_MEMPOOL_NAMESIZE];
1612 unsigned int desc = 0;
1613 unsigned int buf_len;
1614 unsigned int obj_num;
1615 unsigned int obj_size;
1616 unsigned int strd_num_n = 0;
1617 unsigned int strd_sz_n = 0;
1619 unsigned int n_ibv = 0;
1621 if (!mlx5_mprq_enabled(dev))
1623 /* Count the total number of descriptors configured. */
1624 for (i = 0; i != priv->rxqs_n; ++i) {
1625 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1626 struct mlx5_rxq_ctrl *rxq_ctrl = container_of
1627 (rxq, struct mlx5_rxq_ctrl, rxq);
1629 if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
1632 desc += 1 << rxq->elts_n;
1633 /* Get the max number of strides. */
1634 if (strd_num_n < rxq->strd_num_n)
1635 strd_num_n = rxq->strd_num_n;
1636 /* Get the max size of a stride. */
1637 if (strd_sz_n < rxq->strd_sz_n)
1638 strd_sz_n = rxq->strd_sz_n;
1640 assert(strd_num_n && strd_sz_n);
1641 buf_len = (1 << strd_num_n) * (1 << strd_sz_n);
1642 obj_size = sizeof(struct mlx5_mprq_buf) + buf_len + (1 << strd_num_n) *
1643 sizeof(struct rte_mbuf_ext_shared_info) + RTE_PKTMBUF_HEADROOM;
1645 * Received packets can be either memcpy'd or externally referenced. In
1646 * case that the packet is attached to an mbuf as an external buffer, as
1647 * it isn't possible to predict how the buffers will be queued by
1648 * application, there's no option to exactly pre-allocate needed buffers
1649 * in advance but to speculatively prepares enough buffers.
1651 * In the data path, if this Mempool is depleted, PMD will try to memcpy
1652 * received packets to buffers provided by application (rxq->mp) until
1653 * this Mempool gets available again.
1656 obj_num = desc + MLX5_MPRQ_MP_CACHE_SZ * n_ibv;
1658 * rte_mempool_create_empty() has sanity check to refuse large cache
1659 * size compared to the number of elements.
1660 * CACHE_FLUSHTHRESH_MULTIPLIER is defined in a C file, so using a
1661 * constant number 2 instead.
1663 obj_num = RTE_MAX(obj_num, MLX5_MPRQ_MP_CACHE_SZ * 2);
1664 /* Check a mempool is already allocated and if it can be resued. */
1665 if (mp != NULL && mp->elt_size >= obj_size && mp->size >= obj_num) {
1666 DRV_LOG(DEBUG, "port %u mempool %s is being reused",
1667 dev->data->port_id, mp->name);
1670 } else if (mp != NULL) {
1671 DRV_LOG(DEBUG, "port %u mempool %s should be resized, freeing it",
1672 dev->data->port_id, mp->name);
1674 * If failed to free, which means it may be still in use, no way
1675 * but to keep using the existing one. On buffer underrun,
1676 * packets will be memcpy'd instead of external buffer
1679 if (mlx5_mprq_free_mp(dev)) {
1680 if (mp->elt_size >= obj_size)
1686 snprintf(name, sizeof(name), "port-%u-mprq", dev->data->port_id);
1687 mp = rte_mempool_create(name, obj_num, obj_size, MLX5_MPRQ_MP_CACHE_SZ,
1688 0, NULL, NULL, mlx5_mprq_buf_init,
1689 (void *)(uintptr_t)(1 << strd_num_n),
1690 dev->device->numa_node, 0);
1693 "port %u failed to allocate a mempool for"
1694 " Multi-Packet RQ, count=%u, size=%u",
1695 dev->data->port_id, obj_num, obj_size);
1701 /* Set mempool for each Rx queue. */
1702 for (i = 0; i != priv->rxqs_n; ++i) {
1703 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1704 struct mlx5_rxq_ctrl *rxq_ctrl = container_of
1705 (rxq, struct mlx5_rxq_ctrl, rxq);
1707 if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
1711 DRV_LOG(INFO, "port %u Multi-Packet RQ is configured",
1712 dev->data->port_id);
1716 #define MLX5_MAX_TCP_HDR_OFFSET ((unsigned int)(sizeof(struct rte_ether_hdr) + \
1717 sizeof(struct rte_vlan_hdr) * 2 + \
1718 sizeof(struct rte_ipv6_hdr)))
1719 #define MAX_TCP_OPTION_SIZE 40u
1720 #define MLX5_MAX_LRO_HEADER_FIX ((unsigned int)(MLX5_MAX_TCP_HDR_OFFSET + \
1721 sizeof(struct rte_tcp_hdr) + \
1722 MAX_TCP_OPTION_SIZE))
1725 * Adjust the maximum LRO massage size.
1728 * Pointer to Ethernet device.
1731 * @param max_lro_size
1732 * The maximum size for LRO packet.
1735 mlx5_max_lro_msg_size_adjust(struct rte_eth_dev *dev, uint16_t idx,
1736 uint32_t max_lro_size)
1738 struct mlx5_priv *priv = dev->data->dev_private;
1740 if (priv->config.hca_attr.lro_max_msg_sz_mode ==
1741 MLX5_LRO_MAX_MSG_SIZE_START_FROM_L4 && max_lro_size >
1742 MLX5_MAX_TCP_HDR_OFFSET)
1743 max_lro_size -= MLX5_MAX_TCP_HDR_OFFSET;
1744 max_lro_size = RTE_MIN(max_lro_size, MLX5_MAX_LRO_SIZE);
1745 assert(max_lro_size >= MLX5_LRO_SEG_CHUNK_SIZE);
1746 max_lro_size /= MLX5_LRO_SEG_CHUNK_SIZE;
1747 if (priv->max_lro_msg_size)
1748 priv->max_lro_msg_size =
1749 RTE_MIN((uint32_t)priv->max_lro_msg_size, max_lro_size);
1751 priv->max_lro_msg_size = max_lro_size;
1753 "port %u Rx Queue %u max LRO message size adjusted to %u bytes",
1754 dev->data->port_id, idx,
1755 priv->max_lro_msg_size * MLX5_LRO_SEG_CHUNK_SIZE);
1759 * Create a DPDK Rx queue.
1762 * Pointer to Ethernet device.
1766 * Number of descriptors to configure in queue.
1768 * NUMA socket on which memory must be allocated.
1771 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
1773 struct mlx5_rxq_ctrl *
1774 mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1775 unsigned int socket, const struct rte_eth_rxconf *conf,
1776 struct rte_mempool *mp)
1778 struct mlx5_priv *priv = dev->data->dev_private;
1779 struct mlx5_rxq_ctrl *tmpl;
1780 unsigned int mb_len = rte_pktmbuf_data_room_size(mp);
1781 unsigned int mprq_stride_size;
1782 struct mlx5_dev_config *config = &priv->config;
1783 unsigned int strd_headroom_en;
1785 * Always allocate extra slots, even if eventually
1786 * the vector Rx will not be used.
1789 desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
1790 uint64_t offloads = conf->offloads |
1791 dev->data->dev_conf.rxmode.offloads;
1792 unsigned int lro_on_queue = !!(offloads & DEV_RX_OFFLOAD_TCP_LRO);
1793 const int mprq_en = mlx5_check_mprq_support(dev) > 0;
1794 unsigned int max_rx_pkt_len = lro_on_queue ?
1795 dev->data->dev_conf.rxmode.max_lro_pkt_size :
1796 dev->data->dev_conf.rxmode.max_rx_pkt_len;
1797 unsigned int non_scatter_min_mbuf_size = max_rx_pkt_len +
1798 RTE_PKTMBUF_HEADROOM;
1799 unsigned int max_lro_size = 0;
1800 unsigned int first_mb_free_size = mb_len - RTE_PKTMBUF_HEADROOM;
1802 if (non_scatter_min_mbuf_size > mb_len && !(offloads &
1803 DEV_RX_OFFLOAD_SCATTER)) {
1804 DRV_LOG(ERR, "port %u Rx queue %u: Scatter offload is not"
1805 " configured and no enough mbuf space(%u) to contain "
1806 "the maximum RX packet length(%u) with head-room(%u)",
1807 dev->data->port_id, idx, mb_len, max_rx_pkt_len,
1808 RTE_PKTMBUF_HEADROOM);
1812 tmpl = rte_calloc_socket("RXQ", 1,
1814 desc_n * sizeof(struct rte_mbuf *),
1820 tmpl->type = MLX5_RXQ_TYPE_STANDARD;
1821 if (mlx5_mr_btree_init(&tmpl->rxq.mr_ctrl.cache_bh,
1822 MLX5_MR_BTREE_CACHE_N, socket)) {
1823 /* rte_errno is already set. */
1826 tmpl->socket = socket;
1827 if (dev->data->dev_conf.intr_conf.rxq)
1830 * LRO packet may consume all the stride memory, hence we cannot
1831 * guaranty head-room near the packet memory in the stride.
1832 * In this case scatter is, for sure, enabled and an empty mbuf may be
1833 * added in the start for the head-room.
1835 if (lro_on_queue && RTE_PKTMBUF_HEADROOM > 0 &&
1836 non_scatter_min_mbuf_size > mb_len) {
1837 strd_headroom_en = 0;
1838 mprq_stride_size = RTE_MIN(max_rx_pkt_len,
1839 1u << config->mprq.max_stride_size_n);
1841 strd_headroom_en = 1;
1842 mprq_stride_size = non_scatter_min_mbuf_size;
1845 * This Rx queue can be configured as a Multi-Packet RQ if all of the
1846 * following conditions are met:
1847 * - MPRQ is enabled.
1848 * - The number of descs is more than the number of strides.
1849 * - max_rx_pkt_len plus overhead is less than the max size of a
1851 * Otherwise, enable Rx scatter if necessary.
1854 desc > (1U << config->mprq.stride_num_n) &&
1855 mprq_stride_size <= (1U << config->mprq.max_stride_size_n)) {
1856 /* TODO: Rx scatter isn't supported yet. */
1857 tmpl->rxq.sges_n = 0;
1858 /* Trim the number of descs needed. */
1859 desc >>= config->mprq.stride_num_n;
1860 tmpl->rxq.strd_num_n = config->mprq.stride_num_n;
1861 tmpl->rxq.strd_sz_n = RTE_MAX(log2above(mprq_stride_size),
1862 config->mprq.min_stride_size_n);
1863 tmpl->rxq.strd_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT;
1864 tmpl->rxq.strd_headroom_en = strd_headroom_en;
1865 tmpl->rxq.mprq_max_memcpy_len = RTE_MIN(first_mb_free_size,
1866 config->mprq.max_memcpy_len);
1867 max_lro_size = RTE_MIN(max_rx_pkt_len,
1868 (1u << tmpl->rxq.strd_num_n) *
1869 (1u << tmpl->rxq.strd_sz_n));
1871 "port %u Rx queue %u: Multi-Packet RQ is enabled"
1872 " strd_num_n = %u, strd_sz_n = %u",
1873 dev->data->port_id, idx,
1874 tmpl->rxq.strd_num_n, tmpl->rxq.strd_sz_n);
1875 } else if (max_rx_pkt_len <= first_mb_free_size) {
1876 tmpl->rxq.sges_n = 0;
1877 max_lro_size = max_rx_pkt_len;
1878 } else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
1879 unsigned int size = non_scatter_min_mbuf_size;
1880 unsigned int sges_n;
1882 if (lro_on_queue && first_mb_free_size <
1883 MLX5_MAX_LRO_HEADER_FIX) {
1884 DRV_LOG(ERR, "Not enough space in the first segment(%u)"
1885 " to include the max header size(%u) for LRO",
1886 first_mb_free_size, MLX5_MAX_LRO_HEADER_FIX);
1887 rte_errno = ENOTSUP;
1891 * Determine the number of SGEs needed for a full packet
1892 * and round it to the next power of two.
1894 sges_n = log2above((size / mb_len) + !!(size % mb_len));
1895 if (sges_n > MLX5_MAX_LOG_RQ_SEGS) {
1897 "port %u too many SGEs (%u) needed to handle"
1898 " requested maximum packet size %u, the maximum"
1899 " supported are %u", dev->data->port_id,
1900 1 << sges_n, max_rx_pkt_len,
1901 1u << MLX5_MAX_LOG_RQ_SEGS);
1902 rte_errno = ENOTSUP;
1905 tmpl->rxq.sges_n = sges_n;
1906 max_lro_size = max_rx_pkt_len;
1908 if (mprq_en && !mlx5_rxq_mprq_enabled(&tmpl->rxq))
1910 "port %u MPRQ is requested but cannot be enabled"
1911 " (requested: desc = %u, stride_sz = %u,"
1912 " supported: min_stride_num = %u, max_stride_sz = %u).",
1913 dev->data->port_id, desc, mprq_stride_size,
1914 (1 << config->mprq.stride_num_n),
1915 (1 << config->mprq.max_stride_size_n));
1916 DRV_LOG(DEBUG, "port %u maximum number of segments per packet: %u",
1917 dev->data->port_id, 1 << tmpl->rxq.sges_n);
1918 if (desc % (1 << tmpl->rxq.sges_n)) {
1920 "port %u number of Rx queue descriptors (%u) is not a"
1921 " multiple of SGEs per packet (%u)",
1924 1 << tmpl->rxq.sges_n);
1928 mlx5_max_lro_msg_size_adjust(dev, idx, max_lro_size);
1929 /* Toggle RX checksum offload if hardware supports it. */
1930 tmpl->rxq.csum = !!(offloads & DEV_RX_OFFLOAD_CHECKSUM);
1931 tmpl->rxq.hw_timestamp = !!(offloads & DEV_RX_OFFLOAD_TIMESTAMP);
1932 /* Configure VLAN stripping. */
1933 tmpl->rxq.vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
1934 /* By default, FCS (CRC) is stripped by hardware. */
1935 tmpl->rxq.crc_present = 0;
1936 tmpl->rxq.lro = lro_on_queue;
1937 if (offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
1938 if (config->hw_fcs_strip) {
1940 * RQs used for LRO-enabled TIRs should not be
1941 * configured to scatter the FCS.
1945 "port %u CRC stripping has been "
1946 "disabled but will still be performed "
1947 "by hardware, because LRO is enabled",
1948 dev->data->port_id);
1950 tmpl->rxq.crc_present = 1;
1953 "port %u CRC stripping has been disabled but will"
1954 " still be performed by hardware, make sure MLNX_OFED"
1955 " and firmware are up to date",
1956 dev->data->port_id);
1960 "port %u CRC stripping is %s, %u bytes will be subtracted from"
1961 " incoming frames to hide it",
1963 tmpl->rxq.crc_present ? "disabled" : "enabled",
1964 tmpl->rxq.crc_present << 2);
1966 tmpl->rxq.rss_hash = !!priv->rss_conf.rss_hf &&
1967 (!!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS));
1968 tmpl->rxq.port_id = dev->data->port_id;
1971 tmpl->rxq.elts_n = log2above(desc);
1972 tmpl->rxq.rq_repl_thresh =
1973 MLX5_VPMD_RXQ_RPLNSH_THRESH(1 << tmpl->rxq.elts_n);
1975 (struct rte_mbuf *(*)[1 << tmpl->rxq.elts_n])(tmpl + 1);
1977 tmpl->rxq.uar_lock_cq = &priv->uar_lock_cq;
1979 tmpl->rxq.idx = idx;
1980 rte_atomic32_inc(&tmpl->refcnt);
1981 LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
1989 * Create a DPDK Rx hairpin queue.
1992 * Pointer to Ethernet device.
1996 * Number of descriptors to configure in queue.
1997 * @param hairpin_conf
1998 * The hairpin binding configuration.
2001 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
2003 struct mlx5_rxq_ctrl *
2004 mlx5_rxq_hairpin_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
2005 const struct rte_eth_hairpin_conf *hairpin_conf)
2007 struct mlx5_priv *priv = dev->data->dev_private;
2008 struct mlx5_rxq_ctrl *tmpl;
2010 tmpl = rte_calloc_socket("RXQ", 1, sizeof(*tmpl), 0, SOCKET_ID_ANY);
2015 tmpl->type = MLX5_RXQ_TYPE_HAIRPIN;
2016 tmpl->socket = SOCKET_ID_ANY;
2017 tmpl->rxq.rss_hash = 0;
2018 tmpl->rxq.port_id = dev->data->port_id;
2020 tmpl->rxq.mp = NULL;
2021 tmpl->rxq.elts_n = log2above(desc);
2022 tmpl->rxq.elts = NULL;
2023 tmpl->rxq.mr_ctrl.cache_bh = (struct mlx5_mr_btree) { 0 };
2024 tmpl->hairpin_conf = *hairpin_conf;
2025 tmpl->rxq.idx = idx;
2026 rte_atomic32_inc(&tmpl->refcnt);
2027 LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
2035 * Pointer to Ethernet device.
2040 * A pointer to the queue if it exists, NULL otherwise.
2042 struct mlx5_rxq_ctrl *
2043 mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
2045 struct mlx5_priv *priv = dev->data->dev_private;
2046 struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
2048 if ((*priv->rxqs)[idx]) {
2049 rxq_ctrl = container_of((*priv->rxqs)[idx],
2050 struct mlx5_rxq_ctrl,
2052 mlx5_rxq_obj_get(dev, idx);
2053 rte_atomic32_inc(&rxq_ctrl->refcnt);
2059 * Release a Rx queue.
2062 * Pointer to Ethernet device.
2067 * 1 while a reference on it exists, 0 when freed.
2070 mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
2072 struct mlx5_priv *priv = dev->data->dev_private;
2073 struct mlx5_rxq_ctrl *rxq_ctrl;
2075 if (!(*priv->rxqs)[idx])
2077 rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
2078 assert(rxq_ctrl->priv);
2079 if (rxq_ctrl->obj && !mlx5_rxq_obj_release(rxq_ctrl->obj))
2080 rxq_ctrl->obj = NULL;
2081 if (rte_atomic32_dec_and_test(&rxq_ctrl->refcnt)) {
2082 if (rxq_ctrl->dbr_umem_id_valid)
2083 claim_zero(mlx5_release_dbr(dev, rxq_ctrl->dbr_umem_id,
2084 rxq_ctrl->dbr_offset));
2085 if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD)
2086 mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
2087 LIST_REMOVE(rxq_ctrl, next);
2089 (*priv->rxqs)[idx] = NULL;
2096 * Verify the Rx Queue list is empty
2099 * Pointer to Ethernet device.
2102 * The number of object not released.
2105 mlx5_rxq_verify(struct rte_eth_dev *dev)
2107 struct mlx5_priv *priv = dev->data->dev_private;
2108 struct mlx5_rxq_ctrl *rxq_ctrl;
2111 LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {
2112 DRV_LOG(DEBUG, "port %u Rx Queue %u still referenced",
2113 dev->data->port_id, rxq_ctrl->rxq.idx);
2120 * Get a Rx queue type.
2123 * Pointer to Ethernet device.
2128 * The Rx queue type.
2131 mlx5_rxq_get_type(struct rte_eth_dev *dev, uint16_t idx)
2133 struct mlx5_priv *priv = dev->data->dev_private;
2134 struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
2136 if (idx < priv->rxqs_n && (*priv->rxqs)[idx]) {
2137 rxq_ctrl = container_of((*priv->rxqs)[idx],
2138 struct mlx5_rxq_ctrl,
2140 return rxq_ctrl->type;
2142 return MLX5_RXQ_TYPE_UNDEFINED;
2146 * Create an indirection table.
2149 * Pointer to Ethernet device.
2151 * Queues entering in the indirection table.
2153 * Number of queues in the array.
2156 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
2158 static struct mlx5_ind_table_obj *
2159 mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues,
2160 uint32_t queues_n, enum mlx5_ind_tbl_type type)
2162 struct mlx5_priv *priv = dev->data->dev_private;
2163 struct mlx5_ind_table_obj *ind_tbl;
2164 unsigned int i = 0, j = 0, k = 0;
2166 ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl) +
2167 queues_n * sizeof(uint16_t), 0);
2172 ind_tbl->type = type;
2173 if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV) {
2174 const unsigned int wq_n = rte_is_power_of_2(queues_n) ?
2175 log2above(queues_n) :
2176 log2above(priv->config.ind_table_max_size);
2177 struct ibv_wq *wq[1 << wq_n];
2179 for (i = 0; i != queues_n; ++i) {
2180 struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev,
2184 wq[i] = rxq->obj->wq;
2185 ind_tbl->queues[i] = queues[i];
2187 ind_tbl->queues_n = queues_n;
2188 /* Finalise indirection table. */
2189 k = i; /* Retain value of i for use in error case. */
2190 for (j = 0; k != (unsigned int)(1 << wq_n); ++k, ++j)
2192 ind_tbl->ind_table = mlx5_glue->create_rwq_ind_table
2194 &(struct ibv_rwq_ind_table_init_attr){
2195 .log_ind_tbl_size = wq_n,
2199 if (!ind_tbl->ind_table) {
2203 } else { /* ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX */
2204 struct mlx5_devx_rqt_attr *rqt_attr = NULL;
2205 const unsigned int rqt_n =
2206 1 << (rte_is_power_of_2(queues_n) ?
2207 log2above(queues_n) :
2208 log2above(priv->config.ind_table_max_size));
2210 rqt_attr = rte_calloc(__func__, 1, sizeof(*rqt_attr) +
2211 rqt_n * sizeof(uint32_t), 0);
2213 DRV_LOG(ERR, "port %u cannot allocate RQT resources",
2214 dev->data->port_id);
2218 rqt_attr->rqt_max_size = priv->config.ind_table_max_size;
2219 rqt_attr->rqt_actual_size = rqt_n;
2220 for (i = 0; i != queues_n; ++i) {
2221 struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev,
2225 rqt_attr->rq_list[i] = rxq->obj->rq->id;
2226 ind_tbl->queues[i] = queues[i];
2228 k = i; /* Retain value of i for use in error case. */
2229 for (j = 0; k != rqt_n; ++k, ++j)
2230 rqt_attr->rq_list[k] = rqt_attr->rq_list[j];
2231 ind_tbl->rqt = mlx5_devx_cmd_create_rqt(priv->sh->ctx,
2234 if (!ind_tbl->rqt) {
2235 DRV_LOG(ERR, "port %u cannot create DevX RQT",
2236 dev->data->port_id);
2240 ind_tbl->queues_n = queues_n;
2242 rte_atomic32_inc(&ind_tbl->refcnt);
2243 LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
2246 for (j = 0; j < i; j++)
2247 mlx5_rxq_release(dev, ind_tbl->queues[j]);
2249 DEBUG("port %u cannot create indirection table", dev->data->port_id);
2254 * Get an indirection table.
2257 * Pointer to Ethernet device.
2259 * Queues entering in the indirection table.
2261 * Number of queues in the array.
2264 * An indirection table if found.
2266 static struct mlx5_ind_table_obj *
2267 mlx5_ind_table_obj_get(struct rte_eth_dev *dev, const uint16_t *queues,
2270 struct mlx5_priv *priv = dev->data->dev_private;
2271 struct mlx5_ind_table_obj *ind_tbl;
2273 LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
2274 if ((ind_tbl->queues_n == queues_n) &&
2275 (memcmp(ind_tbl->queues, queues,
2276 ind_tbl->queues_n * sizeof(ind_tbl->queues[0]))
2283 rte_atomic32_inc(&ind_tbl->refcnt);
2284 for (i = 0; i != ind_tbl->queues_n; ++i)
2285 mlx5_rxq_get(dev, ind_tbl->queues[i]);
2291 * Release an indirection table.
2294 * Pointer to Ethernet device.
2296 * Indirection table to release.
2299 * 1 while a reference on it exists, 0 when freed.
2302 mlx5_ind_table_obj_release(struct rte_eth_dev *dev,
2303 struct mlx5_ind_table_obj *ind_tbl)
2307 if (rte_atomic32_dec_and_test(&ind_tbl->refcnt)) {
2308 if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV)
2309 claim_zero(mlx5_glue->destroy_rwq_ind_table
2310 (ind_tbl->ind_table));
2311 else if (ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX)
2312 claim_zero(mlx5_devx_cmd_destroy(ind_tbl->rqt));
2314 for (i = 0; i != ind_tbl->queues_n; ++i)
2315 claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i]));
2316 if (!rte_atomic32_read(&ind_tbl->refcnt)) {
2317 LIST_REMOVE(ind_tbl, next);
2325 * Verify the Rx Queue list is empty
2328 * Pointer to Ethernet device.
2331 * The number of object not released.
2334 mlx5_ind_table_obj_verify(struct rte_eth_dev *dev)
2336 struct mlx5_priv *priv = dev->data->dev_private;
2337 struct mlx5_ind_table_obj *ind_tbl;
2340 LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
2342 "port %u indirection table obj %p still referenced",
2343 dev->data->port_id, (void *)ind_tbl);
2350 * Create an Rx Hash queue.
2353 * Pointer to Ethernet device.
2355 * RSS key for the Rx hash queue.
2356 * @param rss_key_len
2358 * @param hash_fields
2359 * Verbs protocol hash field to make the RSS on.
2361 * Queues entering in hash queue. In case of empty hash_fields only the
2362 * first queue index will be taken for the indirection table.
2369 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
2372 mlx5_hrxq_new(struct rte_eth_dev *dev,
2373 const uint8_t *rss_key, uint32_t rss_key_len,
2374 uint64_t hash_fields,
2375 const uint16_t *queues, uint32_t queues_n,
2376 int tunnel __rte_unused)
2378 struct mlx5_priv *priv = dev->data->dev_private;
2379 struct mlx5_hrxq *hrxq;
2380 struct ibv_qp *qp = NULL;
2381 struct mlx5_ind_table_obj *ind_tbl;
2383 struct mlx5_devx_obj *tir = NULL;
2384 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[queues[0]];
2385 struct mlx5_rxq_ctrl *rxq_ctrl =
2386 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
2388 queues_n = hash_fields ? queues_n : 1;
2389 ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
2391 enum mlx5_ind_tbl_type type;
2393 type = rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_IBV ?
2394 MLX5_IND_TBL_TYPE_IBV : MLX5_IND_TBL_TYPE_DEVX;
2395 ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n, type);
2401 if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV) {
2402 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
2403 struct mlx5dv_qp_init_attr qp_init_attr;
2405 memset(&qp_init_attr, 0, sizeof(qp_init_attr));
2407 qp_init_attr.comp_mask =
2408 MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;
2409 qp_init_attr.create_flags =
2410 MLX5DV_QP_CREATE_TUNNEL_OFFLOADS;
2412 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2413 if (dev->data->dev_conf.lpbk_mode) {
2415 * Allow packet sent from NIC loop back
2416 * w/o source MAC check.
2418 qp_init_attr.comp_mask |=
2419 MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;
2420 qp_init_attr.create_flags |=
2421 MLX5DV_QP_CREATE_TIR_ALLOW_SELF_LOOPBACK_UC;
2424 qp = mlx5_glue->dv_create_qp
2426 &(struct ibv_qp_init_attr_ex){
2427 .qp_type = IBV_QPT_RAW_PACKET,
2429 IBV_QP_INIT_ATTR_PD |
2430 IBV_QP_INIT_ATTR_IND_TABLE |
2431 IBV_QP_INIT_ATTR_RX_HASH,
2432 .rx_hash_conf = (struct ibv_rx_hash_conf){
2434 IBV_RX_HASH_FUNC_TOEPLITZ,
2435 .rx_hash_key_len = rss_key_len,
2437 (void *)(uintptr_t)rss_key,
2438 .rx_hash_fields_mask = hash_fields,
2440 .rwq_ind_tbl = ind_tbl->ind_table,
2445 qp = mlx5_glue->create_qp_ex
2447 &(struct ibv_qp_init_attr_ex){
2448 .qp_type = IBV_QPT_RAW_PACKET,
2450 IBV_QP_INIT_ATTR_PD |
2451 IBV_QP_INIT_ATTR_IND_TABLE |
2452 IBV_QP_INIT_ATTR_RX_HASH,
2453 .rx_hash_conf = (struct ibv_rx_hash_conf){
2455 IBV_RX_HASH_FUNC_TOEPLITZ,
2456 .rx_hash_key_len = rss_key_len,
2458 (void *)(uintptr_t)rss_key,
2459 .rx_hash_fields_mask = hash_fields,
2461 .rwq_ind_tbl = ind_tbl->ind_table,
2469 } else { /* ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX */
2470 struct mlx5_devx_tir_attr tir_attr;
2474 /* Enable TIR LRO only if all the queues were configured for. */
2475 for (i = 0; i < queues_n; ++i) {
2476 if (!(*priv->rxqs)[queues[i]]->lro) {
2481 memset(&tir_attr, 0, sizeof(tir_attr));
2482 tir_attr.disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT;
2483 tir_attr.rx_hash_fn = MLX5_RX_HASH_FN_TOEPLITZ;
2484 tir_attr.tunneled_offload_en = !!tunnel;
2485 /* If needed, translate hash_fields bitmap to PRM format. */
2487 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
2488 struct mlx5_rx_hash_field_select *rx_hash_field_select =
2489 hash_fields & IBV_RX_HASH_INNER ?
2490 &tir_attr.rx_hash_field_selector_inner :
2491 &tir_attr.rx_hash_field_selector_outer;
2493 struct mlx5_rx_hash_field_select *rx_hash_field_select =
2494 &tir_attr.rx_hash_field_selector_outer;
2497 /* 1 bit: 0: IPv4, 1: IPv6. */
2498 rx_hash_field_select->l3_prot_type =
2499 !!(hash_fields & MLX5_IPV6_IBV_RX_HASH);
2500 /* 1 bit: 0: TCP, 1: UDP. */
2501 rx_hash_field_select->l4_prot_type =
2502 !!(hash_fields & MLX5_UDP_IBV_RX_HASH);
2503 /* Bitmask which sets which fields to use in RX Hash. */
2504 rx_hash_field_select->selected_fields =
2505 ((!!(hash_fields & MLX5_L3_SRC_IBV_RX_HASH)) <<
2506 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP) |
2507 (!!(hash_fields & MLX5_L3_DST_IBV_RX_HASH)) <<
2508 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP |
2509 (!!(hash_fields & MLX5_L4_SRC_IBV_RX_HASH)) <<
2510 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT |
2511 (!!(hash_fields & MLX5_L4_DST_IBV_RX_HASH)) <<
2512 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT;
2514 if (rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN)
2515 tir_attr.transport_domain = priv->sh->td->id;
2517 tir_attr.transport_domain = priv->sh->tdn;
2518 memcpy(tir_attr.rx_hash_toeplitz_key, rss_key, rss_key_len);
2519 tir_attr.indirect_table = ind_tbl->rqt->id;
2520 if (dev->data->dev_conf.lpbk_mode)
2521 tir_attr.self_lb_block =
2522 MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
2524 tir_attr.lro_timeout_period_usecs =
2525 priv->config.lro.timeout;
2526 tir_attr.lro_max_msg_sz = priv->max_lro_msg_size;
2527 tir_attr.lro_enable_mask =
2528 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
2529 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO;
2531 tir = mlx5_devx_cmd_create_tir(priv->sh->ctx, &tir_attr);
2533 DRV_LOG(ERR, "port %u cannot create DevX TIR",
2534 dev->data->port_id);
2539 hrxq = rte_calloc(__func__, 1, sizeof(*hrxq) + rss_key_len, 0);
2542 hrxq->ind_table = ind_tbl;
2543 if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV) {
2545 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2547 mlx5_glue->dv_create_flow_action_dest_ibv_qp(hrxq->qp);
2548 if (!hrxq->action) {
2553 } else { /* ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX */
2555 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2556 hrxq->action = mlx5_glue->dv_create_flow_action_dest_devx_tir
2558 if (!hrxq->action) {
2564 hrxq->rss_key_len = rss_key_len;
2565 hrxq->hash_fields = hash_fields;
2566 memcpy(hrxq->rss_key, rss_key, rss_key_len);
2567 rte_atomic32_inc(&hrxq->refcnt);
2568 LIST_INSERT_HEAD(&priv->hrxqs, hrxq, next);
2571 err = rte_errno; /* Save rte_errno before cleanup. */
2572 mlx5_ind_table_obj_release(dev, ind_tbl);
2574 claim_zero(mlx5_glue->destroy_qp(qp));
2576 claim_zero(mlx5_devx_cmd_destroy(tir));
2577 rte_errno = err; /* Restore rte_errno. */
2582 * Get an Rx Hash queue.
2585 * Pointer to Ethernet device.
2587 * RSS configuration for the Rx hash queue.
2589 * Queues entering in hash queue. In case of empty hash_fields only the
2590 * first queue index will be taken for the indirection table.
2595 * An hash Rx queue on success.
2598 mlx5_hrxq_get(struct rte_eth_dev *dev,
2599 const uint8_t *rss_key, uint32_t rss_key_len,
2600 uint64_t hash_fields,
2601 const uint16_t *queues, uint32_t queues_n)
2603 struct mlx5_priv *priv = dev->data->dev_private;
2604 struct mlx5_hrxq *hrxq;
2606 queues_n = hash_fields ? queues_n : 1;
2607 LIST_FOREACH(hrxq, &priv->hrxqs, next) {
2608 struct mlx5_ind_table_obj *ind_tbl;
2610 if (hrxq->rss_key_len != rss_key_len)
2612 if (memcmp(hrxq->rss_key, rss_key, rss_key_len))
2614 if (hrxq->hash_fields != hash_fields)
2616 ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
2619 if (ind_tbl != hrxq->ind_table) {
2620 mlx5_ind_table_obj_release(dev, ind_tbl);
2623 rte_atomic32_inc(&hrxq->refcnt);
2630 * Release the hash Rx queue.
2633 * Pointer to Ethernet device.
2635 * Pointer to Hash Rx queue to release.
2638 * 1 while a reference on it exists, 0 when freed.
2641 mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
2643 if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
2644 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2645 mlx5_glue->destroy_flow_action(hrxq->action);
2647 if (hrxq->ind_table->type == MLX5_IND_TBL_TYPE_IBV)
2648 claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
2649 else /* hrxq->ind_table->type == MLX5_IND_TBL_TYPE_DEVX */
2650 claim_zero(mlx5_devx_cmd_destroy(hrxq->tir));
2651 mlx5_ind_table_obj_release(dev, hrxq->ind_table);
2652 LIST_REMOVE(hrxq, next);
2656 claim_nonzero(mlx5_ind_table_obj_release(dev, hrxq->ind_table));
2661 * Verify the Rx Queue list is empty
2664 * Pointer to Ethernet device.
2667 * The number of object not released.
2670 mlx5_hrxq_verify(struct rte_eth_dev *dev)
2672 struct mlx5_priv *priv = dev->data->dev_private;
2673 struct mlx5_hrxq *hrxq;
2676 LIST_FOREACH(hrxq, &priv->hrxqs, next) {
2678 "port %u hash Rx queue %p still referenced",
2679 dev->data->port_id, (void *)hrxq);
2686 * Create a drop Rx queue Verbs/DevX object.
2689 * Pointer to Ethernet device.
2692 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
2694 static struct mlx5_rxq_obj *
2695 mlx5_rxq_obj_drop_new(struct rte_eth_dev *dev)
2697 struct mlx5_priv *priv = dev->data->dev_private;
2698 struct ibv_context *ctx = priv->sh->ctx;
2700 struct ibv_wq *wq = NULL;
2701 struct mlx5_rxq_obj *rxq;
2703 if (priv->drop_queue.rxq)
2704 return priv->drop_queue.rxq;
2705 cq = mlx5_glue->create_cq(ctx, 1, NULL, NULL, 0);
2707 DEBUG("port %u cannot allocate CQ for drop queue",
2708 dev->data->port_id);
2712 wq = mlx5_glue->create_wq(ctx,
2713 &(struct ibv_wq_init_attr){
2714 .wq_type = IBV_WQT_RQ,
2721 DEBUG("port %u cannot allocate WQ for drop queue",
2722 dev->data->port_id);
2726 rxq = rte_calloc(__func__, 1, sizeof(*rxq), 0);
2728 DEBUG("port %u cannot allocate drop Rx queue memory",
2729 dev->data->port_id);
2735 priv->drop_queue.rxq = rxq;
2739 claim_zero(mlx5_glue->destroy_wq(wq));
2741 claim_zero(mlx5_glue->destroy_cq(cq));
2746 * Release a drop Rx queue Verbs/DevX object.
2749 * Pointer to Ethernet device.
2752 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
2755 mlx5_rxq_obj_drop_release(struct rte_eth_dev *dev)
2757 struct mlx5_priv *priv = dev->data->dev_private;
2758 struct mlx5_rxq_obj *rxq = priv->drop_queue.rxq;
2761 claim_zero(mlx5_glue->destroy_wq(rxq->wq));
2763 claim_zero(mlx5_glue->destroy_cq(rxq->cq));
2765 priv->drop_queue.rxq = NULL;
2769 * Create a drop indirection table.
2772 * Pointer to Ethernet device.
2775 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
2777 static struct mlx5_ind_table_obj *
2778 mlx5_ind_table_obj_drop_new(struct rte_eth_dev *dev)
2780 struct mlx5_priv *priv = dev->data->dev_private;
2781 struct mlx5_ind_table_obj *ind_tbl;
2782 struct mlx5_rxq_obj *rxq;
2783 struct mlx5_ind_table_obj tmpl;
2785 rxq = mlx5_rxq_obj_drop_new(dev);
2788 tmpl.ind_table = mlx5_glue->create_rwq_ind_table
2790 &(struct ibv_rwq_ind_table_init_attr){
2791 .log_ind_tbl_size = 0,
2792 .ind_tbl = &rxq->wq,
2795 if (!tmpl.ind_table) {
2796 DEBUG("port %u cannot allocate indirection table for drop"
2798 dev->data->port_id);
2802 ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl), 0);
2807 ind_tbl->ind_table = tmpl.ind_table;
2810 mlx5_rxq_obj_drop_release(dev);
2815 * Release a drop indirection table.
2818 * Pointer to Ethernet device.
2821 mlx5_ind_table_obj_drop_release(struct rte_eth_dev *dev)
2823 struct mlx5_priv *priv = dev->data->dev_private;
2824 struct mlx5_ind_table_obj *ind_tbl = priv->drop_queue.hrxq->ind_table;
2826 claim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl->ind_table));
2827 mlx5_rxq_obj_drop_release(dev);
2829 priv->drop_queue.hrxq->ind_table = NULL;
2833 * Create a drop Rx Hash queue.
2836 * Pointer to Ethernet device.
2839 * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
2842 mlx5_hrxq_drop_new(struct rte_eth_dev *dev)
2844 struct mlx5_priv *priv = dev->data->dev_private;
2845 struct mlx5_ind_table_obj *ind_tbl = NULL;
2846 struct ibv_qp *qp = NULL;
2847 struct mlx5_hrxq *hrxq = NULL;
2849 if (priv->drop_queue.hrxq) {
2850 rte_atomic32_inc(&priv->drop_queue.hrxq->refcnt);
2851 return priv->drop_queue.hrxq;
2853 hrxq = rte_calloc(__func__, 1, sizeof(*hrxq), 0);
2856 "port %u cannot allocate memory for drop queue",
2857 dev->data->port_id);
2861 priv->drop_queue.hrxq = hrxq;
2862 ind_tbl = mlx5_ind_table_obj_drop_new(dev);
2865 hrxq->ind_table = ind_tbl;
2866 qp = mlx5_glue->create_qp_ex(priv->sh->ctx,
2867 &(struct ibv_qp_init_attr_ex){
2868 .qp_type = IBV_QPT_RAW_PACKET,
2870 IBV_QP_INIT_ATTR_PD |
2871 IBV_QP_INIT_ATTR_IND_TABLE |
2872 IBV_QP_INIT_ATTR_RX_HASH,
2873 .rx_hash_conf = (struct ibv_rx_hash_conf){
2875 IBV_RX_HASH_FUNC_TOEPLITZ,
2876 .rx_hash_key_len = MLX5_RSS_HASH_KEY_LEN,
2877 .rx_hash_key = rss_hash_default_key,
2878 .rx_hash_fields_mask = 0,
2880 .rwq_ind_tbl = ind_tbl->ind_table,
2884 DEBUG("port %u cannot allocate QP for drop queue",
2885 dev->data->port_id);
2890 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2891 hrxq->action = mlx5_glue->dv_create_flow_action_dest_ibv_qp(hrxq->qp);
2892 if (!hrxq->action) {
2897 rte_atomic32_set(&hrxq->refcnt, 1);
2900 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2901 if (hrxq && hrxq->action)
2902 mlx5_glue->destroy_flow_action(hrxq->action);
2905 claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
2907 mlx5_ind_table_obj_drop_release(dev);
2909 priv->drop_queue.hrxq = NULL;
2916 * Release a drop hash Rx queue.
2919 * Pointer to Ethernet device.
2922 mlx5_hrxq_drop_release(struct rte_eth_dev *dev)
2924 struct mlx5_priv *priv = dev->data->dev_private;
2925 struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;
2927 if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
2928 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2929 mlx5_glue->destroy_flow_action(hrxq->action);
2931 claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
2932 mlx5_ind_table_obj_drop_release(dev);
2934 priv->drop_queue.hrxq = NULL;