1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox.
12 #include <sys/queue.h>
15 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
17 #pragma GCC diagnostic ignored "-Wpedantic"
19 #include <infiniband/verbs.h>
20 #include <infiniband/mlx5dv.h>
22 #pragma GCC diagnostic error "-Wpedantic"
26 #include <rte_malloc.h>
27 #include <rte_ethdev_driver.h>
28 #include <rte_common.h>
29 #include <rte_interrupts.h>
30 #include <rte_debug.h>
34 #include "mlx5_rxtx.h"
35 #include "mlx5_utils.h"
36 #include "mlx5_autoconf.h"
37 #include "mlx5_defs.h"
38 #include "mlx5_glue.h"
40 /* Default RSS hash key also used for ConnectX-3. */
41 uint8_t rss_hash_default_key[] = {
42 0x2c, 0xc6, 0x81, 0xd1,
43 0x5b, 0xdb, 0xf4, 0xf7,
44 0xfc, 0xa2, 0x83, 0x19,
45 0xdb, 0x1a, 0x3e, 0x94,
46 0x6b, 0x9e, 0x38, 0xd9,
47 0x2c, 0x9c, 0x03, 0xd1,
48 0xad, 0x99, 0x44, 0xa7,
49 0xd9, 0x56, 0x3d, 0x59,
50 0x06, 0x3c, 0x25, 0xf3,
51 0xfc, 0x1f, 0xdc, 0x2a,
54 /* Length of the default RSS hash key. */
55 const size_t rss_hash_default_key_len = sizeof(rss_hash_default_key);
58 * Allocate RX queue elements.
61 * Pointer to RX queue structure.
64 * 0 on success, a negative errno value otherwise and rte_errno is set.
67 rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
69 const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n;
70 unsigned int elts_n = 1 << rxq_ctrl->rxq.elts_n;
74 /* Iterate on segments. */
75 for (i = 0; (i != elts_n); ++i) {
78 buf = rte_pktmbuf_alloc(rxq_ctrl->rxq.mp);
80 DRV_LOG(ERR, "port %u empty mbuf pool",
81 rxq_ctrl->priv->dev->data->port_id);
85 /* Headroom is reserved by rte_pktmbuf_alloc(). */
86 assert(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM);
87 /* Buffer is supposed to be empty. */
88 assert(rte_pktmbuf_data_len(buf) == 0);
89 assert(rte_pktmbuf_pkt_len(buf) == 0);
91 /* Only the first segment keeps headroom. */
94 PORT(buf) = rxq_ctrl->rxq.port_id;
95 DATA_LEN(buf) = rte_pktmbuf_tailroom(buf);
96 PKT_LEN(buf) = DATA_LEN(buf);
98 (*rxq_ctrl->rxq.elts)[i] = buf;
100 /* If Rx vector is activated. */
101 if (mlx5_rxq_check_vec_support(&rxq_ctrl->rxq) > 0) {
102 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
103 struct rte_mbuf *mbuf_init = &rxq->fake_mbuf;
106 /* Initialize default rearm_data for vPMD. */
107 mbuf_init->data_off = RTE_PKTMBUF_HEADROOM;
108 rte_mbuf_refcnt_set(mbuf_init, 1);
109 mbuf_init->nb_segs = 1;
110 mbuf_init->port = rxq->port_id;
112 * prevent compiler reordering:
113 * rearm_data covers previous fields.
115 rte_compiler_barrier();
116 rxq->mbuf_initializer =
117 *(uint64_t *)&mbuf_init->rearm_data;
118 /* Padding with a fake mbuf for vectorized Rx. */
119 for (j = 0; j < MLX5_VPMD_DESCS_PER_LOOP; ++j)
120 (*rxq->elts)[elts_n + j] = &rxq->fake_mbuf;
123 "port %u Rx queue %u allocated and configured %u segments"
125 rxq_ctrl->priv->dev->data->port_id, rxq_ctrl->idx, elts_n,
126 elts_n / (1 << rxq_ctrl->rxq.sges_n));
129 err = rte_errno; /* Save rte_errno before cleanup. */
131 for (i = 0; (i != elts_n); ++i) {
132 if ((*rxq_ctrl->rxq.elts)[i] != NULL)
133 rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]);
134 (*rxq_ctrl->rxq.elts)[i] = NULL;
136 DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything",
137 rxq_ctrl->priv->dev->data->port_id, rxq_ctrl->idx);
138 rte_errno = err; /* Restore rte_errno. */
143 * Free RX queue elements.
146 * Pointer to RX queue structure.
149 rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
151 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
152 const uint16_t q_n = (1 << rxq->elts_n);
153 const uint16_t q_mask = q_n - 1;
154 uint16_t used = q_n - (rxq->rq_ci - rxq->rq_pi);
157 DRV_LOG(DEBUG, "port %u Rx queue %u freeing WRs",
158 rxq_ctrl->priv->dev->data->port_id, rxq_ctrl->idx);
159 if (rxq->elts == NULL)
162 * Some mbuf in the Ring belongs to the application. They cannot be
165 if (mlx5_rxq_check_vec_support(rxq) > 0) {
166 for (i = 0; i < used; ++i)
167 (*rxq->elts)[(rxq->rq_ci + i) & q_mask] = NULL;
168 rxq->rq_pi = rxq->rq_ci;
170 for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
171 if ((*rxq->elts)[i] != NULL)
172 rte_pktmbuf_free_seg((*rxq->elts)[i]);
173 (*rxq->elts)[i] = NULL;
178 * Clean up a RX queue.
180 * Destroy objects, free allocated memory and reset the structure for reuse.
183 * Pointer to RX queue structure.
186 mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *rxq_ctrl)
188 DRV_LOG(DEBUG, "port %u cleaning up Rx queue %u",
189 rxq_ctrl->priv->dev->data->port_id, rxq_ctrl->idx);
191 mlx5_rxq_ibv_release(rxq_ctrl->ibv);
192 memset(rxq_ctrl, 0, sizeof(*rxq_ctrl));
196 * Returns the per-queue supported offloads.
199 * Pointer to Ethernet device.
202 * Supported Rx offloads.
205 mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev)
207 struct priv *priv = dev->data->dev_private;
208 struct mlx5_dev_config *config = &priv->config;
209 uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER |
210 DEV_RX_OFFLOAD_TIMESTAMP |
211 DEV_RX_OFFLOAD_JUMBO_FRAME);
213 if (config->hw_fcs_strip)
214 offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
216 offloads |= (DEV_RX_OFFLOAD_IPV4_CKSUM |
217 DEV_RX_OFFLOAD_UDP_CKSUM |
218 DEV_RX_OFFLOAD_TCP_CKSUM);
219 if (config->hw_vlan_strip)
220 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
226 * Returns the per-port supported offloads.
229 * Supported Rx offloads.
232 mlx5_get_rx_port_offloads(void)
234 uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
240 * Checks if the per-queue offload configuration is valid.
243 * Pointer to Ethernet device.
245 * Per-queue offloads configuration.
248 * 1 if the configuration is valid, 0 otherwise.
251 mlx5_is_rx_queue_offloads_allowed(struct rte_eth_dev *dev, uint64_t offloads)
253 uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
254 uint64_t queue_supp_offloads = mlx5_get_rx_queue_offloads(dev);
255 uint64_t port_supp_offloads = mlx5_get_rx_port_offloads();
257 if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
260 if (((port_offloads ^ offloads) & port_supp_offloads))
268 * Pointer to Ethernet device structure.
272 * Number of descriptors to configure in queue.
274 * NUMA socket on which memory must be allocated.
276 * Thresholds parameters.
278 * Memory pool for buffer allocations.
281 * 0 on success, a negative errno value otherwise and rte_errno is set.
284 mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
285 unsigned int socket, const struct rte_eth_rxconf *conf,
286 struct rte_mempool *mp)
288 struct priv *priv = dev->data->dev_private;
289 struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
290 struct mlx5_rxq_ctrl *rxq_ctrl =
291 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
293 if (!rte_is_power_of_2(desc)) {
294 desc = 1 << log2above(desc);
296 "port %u increased number of descriptors in Rx queue %u"
297 " to the next power of two (%d)",
298 dev->data->port_id, idx, desc);
300 DRV_LOG(DEBUG, "port %u configuring Rx queue %u for %u descriptors",
301 dev->data->port_id, idx, desc);
302 if (idx >= priv->rxqs_n) {
303 DRV_LOG(ERR, "port %u Rx queue index out of range (%u >= %u)",
304 dev->data->port_id, idx, priv->rxqs_n);
305 rte_errno = EOVERFLOW;
308 if (!mlx5_is_rx_queue_offloads_allowed(dev, conf->offloads)) {
310 "port %u Rx queue offloads 0x%" PRIx64 " don't match"
311 " port offloads 0x%" PRIx64 " or supported offloads 0x%"
313 dev->data->port_id, conf->offloads,
314 dev->data->dev_conf.rxmode.offloads,
315 (mlx5_get_rx_port_offloads() |
316 mlx5_get_rx_queue_offloads(dev)));
320 if (!mlx5_rxq_releasable(dev, idx)) {
321 DRV_LOG(ERR, "port %u unable to release queue index %u",
322 dev->data->port_id, idx);
326 mlx5_rxq_release(dev, idx);
327 rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, mp);
329 DRV_LOG(ERR, "port %u unable to allocate queue index %u",
330 dev->data->port_id, idx);
334 DRV_LOG(DEBUG, "port %u adding Rx queue %u to list",
335 dev->data->port_id, idx);
336 (*priv->rxqs)[idx] = &rxq_ctrl->rxq;
341 * DPDK callback to release a RX queue.
344 * Generic RX queue pointer.
347 mlx5_rx_queue_release(void *dpdk_rxq)
349 struct mlx5_rxq_data *rxq = (struct mlx5_rxq_data *)dpdk_rxq;
350 struct mlx5_rxq_ctrl *rxq_ctrl;
355 rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);
356 priv = rxq_ctrl->priv;
357 if (!mlx5_rxq_releasable(priv->dev, rxq_ctrl->rxq.stats.idx))
358 rte_panic("port %u Rx queue %u is still used by a flow and"
359 " cannot be removed\n", priv->dev->data->port_id,
361 mlx5_rxq_release(priv->dev, rxq_ctrl->rxq.stats.idx);
365 * Allocate queue vector and fill epoll fd list for Rx interrupts.
368 * Pointer to Ethernet device.
371 * 0 on success, a negative errno value otherwise and rte_errno is set.
374 mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
376 struct priv *priv = dev->data->dev_private;
378 unsigned int rxqs_n = priv->rxqs_n;
379 unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
380 unsigned int count = 0;
381 struct rte_intr_handle *intr_handle = priv->dev->intr_handle;
383 if (!priv->dev->data->dev_conf.intr_conf.rxq)
385 mlx5_rx_intr_vec_disable(dev);
386 intr_handle->intr_vec = malloc(n * sizeof(intr_handle->intr_vec[0]));
387 if (intr_handle->intr_vec == NULL) {
389 "port %u failed to allocate memory for interrupt"
390 " vector, Rx interrupts will not be supported",
395 intr_handle->type = RTE_INTR_HANDLE_EXT;
396 for (i = 0; i != n; ++i) {
397 /* This rxq ibv must not be released in this function. */
398 struct mlx5_rxq_ibv *rxq_ibv = mlx5_rxq_ibv_get(dev, i);
403 /* Skip queues that cannot request interrupts. */
404 if (!rxq_ibv || !rxq_ibv->channel) {
405 /* Use invalid intr_vec[] index to disable entry. */
406 intr_handle->intr_vec[i] =
407 RTE_INTR_VEC_RXTX_OFFSET +
408 RTE_MAX_RXTX_INTR_VEC_ID;
411 if (count >= RTE_MAX_RXTX_INTR_VEC_ID) {
413 "port %u too many Rx queues for interrupt"
414 " vector size (%d), Rx interrupts cannot be"
416 dev->data->port_id, RTE_MAX_RXTX_INTR_VEC_ID);
417 mlx5_rx_intr_vec_disable(dev);
421 fd = rxq_ibv->channel->fd;
422 flags = fcntl(fd, F_GETFL);
423 rc = fcntl(fd, F_SETFL, flags | O_NONBLOCK);
427 "port %u failed to make Rx interrupt file"
428 " descriptor %d non-blocking for queue index"
430 dev->data->port_id, fd, i);
431 mlx5_rx_intr_vec_disable(dev);
434 intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count;
435 intr_handle->efds[count] = fd;
439 mlx5_rx_intr_vec_disable(dev);
441 intr_handle->nb_efd = count;
446 * Clean up Rx interrupts handler.
449 * Pointer to Ethernet device.
452 mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev)
454 struct priv *priv = dev->data->dev_private;
455 struct rte_intr_handle *intr_handle = priv->dev->intr_handle;
457 unsigned int rxqs_n = priv->rxqs_n;
458 unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
460 if (!priv->dev->data->dev_conf.intr_conf.rxq)
462 if (!intr_handle->intr_vec)
464 for (i = 0; i != n; ++i) {
465 struct mlx5_rxq_ctrl *rxq_ctrl;
466 struct mlx5_rxq_data *rxq_data;
468 if (intr_handle->intr_vec[i] == RTE_INTR_VEC_RXTX_OFFSET +
469 RTE_MAX_RXTX_INTR_VEC_ID)
472 * Need to access directly the queue to release the reference
473 * kept in priv_rx_intr_vec_enable().
475 rxq_data = (*priv->rxqs)[i];
476 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
477 mlx5_rxq_ibv_release(rxq_ctrl->ibv);
480 rte_intr_free_epoll_fd(intr_handle);
481 if (intr_handle->intr_vec)
482 free(intr_handle->intr_vec);
483 intr_handle->nb_efd = 0;
484 intr_handle->intr_vec = NULL;
488 * MLX5 CQ notification .
491 * Pointer to receive queue structure.
493 * Sequence number per receive queue .
496 mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq)
499 uint32_t doorbell_hi;
501 void *cq_db_reg = (char *)rxq->cq_uar + MLX5_CQ_DOORBELL;
503 sq_n = sq_n_rxq & MLX5_CQ_SQN_MASK;
504 doorbell_hi = sq_n << MLX5_CQ_SQN_OFFSET | (rxq->cq_ci & MLX5_CI_MASK);
505 doorbell = (uint64_t)doorbell_hi << 32;
506 doorbell |= rxq->cqn;
507 rxq->cq_db[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi);
508 rte_write64(rte_cpu_to_be_64(doorbell), cq_db_reg);
512 * DPDK callback for Rx queue interrupt enable.
515 * Pointer to Ethernet device structure.
520 * 0 on success, a negative errno value otherwise and rte_errno is set.
523 mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
525 struct priv *priv = dev->data->dev_private;
526 struct mlx5_rxq_data *rxq_data;
527 struct mlx5_rxq_ctrl *rxq_ctrl;
529 rxq_data = (*priv->rxqs)[rx_queue_id];
534 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
536 struct mlx5_rxq_ibv *rxq_ibv;
538 rxq_ibv = mlx5_rxq_ibv_get(dev, rx_queue_id);
543 mlx5_arm_cq(rxq_data, rxq_data->cq_arm_sn);
544 mlx5_rxq_ibv_release(rxq_ibv);
550 * DPDK callback for Rx queue interrupt disable.
553 * Pointer to Ethernet device structure.
558 * 0 on success, a negative errno value otherwise and rte_errno is set.
561 mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
563 struct priv *priv = dev->data->dev_private;
564 struct mlx5_rxq_data *rxq_data;
565 struct mlx5_rxq_ctrl *rxq_ctrl;
566 struct mlx5_rxq_ibv *rxq_ibv = NULL;
567 struct ibv_cq *ev_cq;
571 rxq_data = (*priv->rxqs)[rx_queue_id];
576 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
579 rxq_ibv = mlx5_rxq_ibv_get(dev, rx_queue_id);
584 ret = mlx5_glue->get_cq_event(rxq_ibv->channel, &ev_cq, &ev_ctx);
585 if (ret || ev_cq != rxq_ibv->cq) {
589 rxq_data->cq_arm_sn++;
590 mlx5_glue->ack_cq_events(rxq_ibv->cq, 1);
593 ret = rte_errno; /* Save rte_errno before cleanup. */
595 mlx5_rxq_ibv_release(rxq_ibv);
596 DRV_LOG(WARNING, "port %u unable to disable interrupt on Rx queue %d",
597 dev->data->port_id, rx_queue_id);
598 rte_errno = ret; /* Restore rte_errno. */
603 * Create the Rx queue Verbs object.
606 * Pointer to Ethernet device.
608 * Queue index in DPDK Rx queue array
611 * The Verbs object initialised, NULL otherwise and rte_errno is set.
613 struct mlx5_rxq_ibv *
614 mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
616 struct priv *priv = dev->data->dev_private;
617 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
618 struct mlx5_rxq_ctrl *rxq_ctrl =
619 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
620 struct ibv_wq_attr mod;
623 struct ibv_cq_init_attr_ex ibv;
624 struct mlx5dv_cq_init_attr mlx5;
626 struct ibv_wq_init_attr wq;
627 struct ibv_cq_ex cq_attr;
629 unsigned int cqe_n = (1 << rxq_data->elts_n) - 1;
630 struct mlx5_rxq_ibv *tmpl;
631 struct mlx5dv_cq cq_info;
632 struct mlx5dv_rwq rwq;
635 struct mlx5dv_obj obj;
636 struct mlx5_dev_config *config = &priv->config;
639 assert(!rxq_ctrl->ibv);
640 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_RX_QUEUE;
641 priv->verbs_alloc_ctx.obj = rxq_ctrl;
642 tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0,
646 "port %u Rx queue %u cannot allocate verbs resources",
647 dev->data->port_id, rxq_ctrl->idx);
651 tmpl->rxq_ctrl = rxq_ctrl;
652 /* Use the entire RX mempool as the memory region. */
653 tmpl->mr = mlx5_mr_get(dev, rxq_data->mp);
655 tmpl->mr = mlx5_mr_new(dev, rxq_data->mp);
657 DRV_LOG(ERR, "port %u: memeroy region creation failure",
663 tmpl->channel = mlx5_glue->create_comp_channel(priv->ctx);
664 if (!tmpl->channel) {
665 DRV_LOG(ERR, "port %u: comp channel creation failure",
671 attr.cq.ibv = (struct ibv_cq_init_attr_ex){
673 .channel = tmpl->channel,
676 attr.cq.mlx5 = (struct mlx5dv_cq_init_attr){
679 if (config->cqe_comp && !rxq_data->hw_timestamp) {
680 attr.cq.mlx5.comp_mask |=
681 MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
682 attr.cq.mlx5.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH;
684 * For vectorized Rx, it must not be doubled in order to
685 * make cq_ci and rq_ci aligned.
687 if (mlx5_rxq_check_vec_support(rxq_data) < 0)
688 attr.cq.ibv.cqe *= 2;
689 } else if (config->cqe_comp && rxq_data->hw_timestamp) {
691 "port %u Rx CQE compression is disabled for HW"
695 tmpl->cq = mlx5_glue->cq_ex_to_cq
696 (mlx5_glue->dv_create_cq(priv->ctx, &attr.cq.ibv,
698 if (tmpl->cq == NULL) {
699 DRV_LOG(ERR, "port %u Rx queue %u CQ creation failure",
700 dev->data->port_id, idx);
704 DRV_LOG(DEBUG, "port %u priv->device_attr.max_qp_wr is %d",
705 dev->data->port_id, priv->device_attr.orig_attr.max_qp_wr);
706 DRV_LOG(DEBUG, "port %u priv->device_attr.max_sge is %d",
707 dev->data->port_id, priv->device_attr.orig_attr.max_sge);
708 attr.wq = (struct ibv_wq_init_attr){
709 .wq_context = NULL, /* Could be useful in the future. */
710 .wq_type = IBV_WQT_RQ,
711 /* Max number of outstanding WRs. */
712 .max_wr = (1 << rxq_data->elts_n) >> rxq_data->sges_n,
713 /* Max number of scatter/gather elements in a WR. */
714 .max_sge = 1 << rxq_data->sges_n,
718 IBV_WQ_FLAGS_CVLAN_STRIPPING |
720 .create_flags = (rxq_data->vlan_strip ?
721 IBV_WQ_FLAGS_CVLAN_STRIPPING :
724 /* By default, FCS (CRC) is stripped by hardware. */
725 if (rxq_data->crc_present) {
726 attr.wq.create_flags |= IBV_WQ_FLAGS_SCATTER_FCS;
727 attr.wq.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
729 #ifdef HAVE_IBV_WQ_FLAG_RX_END_PADDING
730 if (config->hw_padding) {
731 attr.wq.create_flags |= IBV_WQ_FLAG_RX_END_PADDING;
732 attr.wq.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
735 tmpl->wq = mlx5_glue->create_wq(priv->ctx, &attr.wq);
736 if (tmpl->wq == NULL) {
737 DRV_LOG(ERR, "port %u Rx queue %u WQ creation failure",
738 dev->data->port_id, idx);
743 * Make sure number of WRs*SGEs match expectations since a queue
744 * cannot allocate more than "desc" buffers.
746 if (((int)attr.wq.max_wr !=
747 ((1 << rxq_data->elts_n) >> rxq_data->sges_n)) ||
748 ((int)attr.wq.max_sge != (1 << rxq_data->sges_n))) {
750 "port %u Rx queue %u requested %u*%u but got %u*%u"
752 dev->data->port_id, idx,
753 ((1 << rxq_data->elts_n) >> rxq_data->sges_n),
754 (1 << rxq_data->sges_n),
755 attr.wq.max_wr, attr.wq.max_sge);
759 /* Change queue state to ready. */
760 mod = (struct ibv_wq_attr){
761 .attr_mask = IBV_WQ_ATTR_STATE,
762 .wq_state = IBV_WQS_RDY,
764 ret = mlx5_glue->modify_wq(tmpl->wq, &mod);
767 "port %u Rx queue %u WQ state to IBV_WQS_RDY failed",
768 dev->data->port_id, idx);
772 obj.cq.in = tmpl->cq;
773 obj.cq.out = &cq_info;
774 obj.rwq.in = tmpl->wq;
776 ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_RWQ);
781 if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
783 "port %u wrong MLX5_CQE_SIZE environment variable"
784 " value: it should be set to %u",
785 dev->data->port_id, RTE_CACHE_LINE_SIZE);
789 /* Fill the rings. */
790 rxq_data->wqes = (volatile struct mlx5_wqe_data_seg (*)[])
792 for (i = 0; (i != (unsigned int)(1 << rxq_data->elts_n)); ++i) {
793 struct rte_mbuf *buf = (*rxq_data->elts)[i];
794 volatile struct mlx5_wqe_data_seg *scat = &(*rxq_data->wqes)[i];
796 /* scat->addr must be able to store a pointer. */
797 assert(sizeof(scat->addr) >= sizeof(uintptr_t));
798 *scat = (struct mlx5_wqe_data_seg){
799 .addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf,
801 .byte_count = rte_cpu_to_be_32(DATA_LEN(buf)),
802 .lkey = tmpl->mr->lkey,
805 rxq_data->rq_db = rwq.dbrec;
806 rxq_data->cqe_n = log2above(cq_info.cqe_cnt);
810 rxq_data->zip = (struct rxq_zip){
813 rxq_data->cq_db = cq_info.dbrec;
814 rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)cq_info.buf;
815 rxq_data->cq_uar = cq_info.cq_uar;
816 rxq_data->cqn = cq_info.cqn;
817 rxq_data->cq_arm_sn = 0;
818 /* Update doorbell counter. */
819 rxq_data->rq_ci = (1 << rxq_data->elts_n) >> rxq_data->sges_n;
821 *rxq_data->rq_db = rte_cpu_to_be_32(rxq_data->rq_ci);
822 DRV_LOG(DEBUG, "port %u rxq %u updated with %p", dev->data->port_id,
824 rte_atomic32_inc(&tmpl->refcnt);
825 DRV_LOG(DEBUG, "port %u Verbs Rx queue %u: refcnt %d",
826 dev->data->port_id, idx, rte_atomic32_read(&tmpl->refcnt));
827 LIST_INSERT_HEAD(&priv->rxqsibv, tmpl, next);
828 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
831 ret = rte_errno; /* Save rte_errno before cleanup. */
833 claim_zero(mlx5_glue->destroy_wq(tmpl->wq));
835 claim_zero(mlx5_glue->destroy_cq(tmpl->cq));
837 claim_zero(mlx5_glue->destroy_comp_channel(tmpl->channel));
839 mlx5_mr_release(tmpl->mr);
840 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
841 rte_errno = ret; /* Restore rte_errno. */
846 * Get an Rx queue Verbs object.
849 * Pointer to Ethernet device.
851 * Queue index in DPDK Rx queue array
854 * The Verbs object if it exists.
856 struct mlx5_rxq_ibv *
857 mlx5_rxq_ibv_get(struct rte_eth_dev *dev, uint16_t idx)
859 struct priv *priv = dev->data->dev_private;
860 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
861 struct mlx5_rxq_ctrl *rxq_ctrl;
863 if (idx >= priv->rxqs_n)
867 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
869 mlx5_mr_get(dev, rxq_data->mp);
870 rte_atomic32_inc(&rxq_ctrl->ibv->refcnt);
871 DRV_LOG(DEBUG, "port %u Verbs Rx queue %u: refcnt %d",
872 dev->data->port_id, rxq_ctrl->idx,
873 rte_atomic32_read(&rxq_ctrl->ibv->refcnt));
875 return rxq_ctrl->ibv;
879 * Release an Rx verbs queue object.
882 * Verbs Rx queue object.
885 * 1 while a reference on it exists, 0 when freed.
888 mlx5_rxq_ibv_release(struct mlx5_rxq_ibv *rxq_ibv)
896 ret = mlx5_mr_release(rxq_ibv->mr);
899 DRV_LOG(DEBUG, "port %u Verbs Rx queue %u: refcnt %d",
900 rxq_ibv->rxq_ctrl->priv->dev->data->port_id,
901 rxq_ibv->rxq_ctrl->idx, rte_atomic32_read(&rxq_ibv->refcnt));
902 if (rte_atomic32_dec_and_test(&rxq_ibv->refcnt)) {
903 rxq_free_elts(rxq_ibv->rxq_ctrl);
904 claim_zero(mlx5_glue->destroy_wq(rxq_ibv->wq));
905 claim_zero(mlx5_glue->destroy_cq(rxq_ibv->cq));
906 if (rxq_ibv->channel)
907 claim_zero(mlx5_glue->destroy_comp_channel
909 LIST_REMOVE(rxq_ibv, next);
917 * Verify the Verbs Rx queue list is empty
920 * Pointer to Ethernet device.
923 * The number of object not released.
926 mlx5_rxq_ibv_verify(struct rte_eth_dev *dev)
928 struct priv *priv = dev->data->dev_private;
930 struct mlx5_rxq_ibv *rxq_ibv;
932 LIST_FOREACH(rxq_ibv, &priv->rxqsibv, next) {
933 DRV_LOG(DEBUG, "port %u Verbs Rx queue %u still referenced",
934 dev->data->port_id, rxq_ibv->rxq_ctrl->idx);
941 * Return true if a single reference exists on the object.
944 * Verbs Rx queue object.
947 mlx5_rxq_ibv_releasable(struct mlx5_rxq_ibv *rxq_ibv)
950 return (rte_atomic32_read(&rxq_ibv->refcnt) == 1);
954 * Create a DPDK Rx queue.
957 * Pointer to Ethernet device.
961 * Number of descriptors to configure in queue.
963 * NUMA socket on which memory must be allocated.
966 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
968 struct mlx5_rxq_ctrl *
969 mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
970 unsigned int socket, const struct rte_eth_rxconf *conf,
971 struct rte_mempool *mp)
973 struct priv *priv = dev->data->dev_private;
974 struct mlx5_rxq_ctrl *tmpl;
975 unsigned int mb_len = rte_pktmbuf_data_room_size(mp);
976 struct mlx5_dev_config *config = &priv->config;
978 * Always allocate extra slots, even if eventually
979 * the vector Rx will not be used.
981 const uint16_t desc_n =
982 desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
984 tmpl = rte_calloc_socket("RXQ", 1,
986 desc_n * sizeof(struct rte_mbuf *),
992 tmpl->socket = socket;
993 if (priv->dev->data->dev_conf.intr_conf.rxq)
995 /* Enable scattered packets support for this queue if necessary. */
996 assert(mb_len >= RTE_PKTMBUF_HEADROOM);
997 if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
998 (mb_len - RTE_PKTMBUF_HEADROOM)) {
999 tmpl->rxq.sges_n = 0;
1000 } else if (conf->offloads & DEV_RX_OFFLOAD_SCATTER) {
1002 RTE_PKTMBUF_HEADROOM +
1003 dev->data->dev_conf.rxmode.max_rx_pkt_len;
1004 unsigned int sges_n;
1007 * Determine the number of SGEs needed for a full packet
1008 * and round it to the next power of two.
1010 sges_n = log2above((size / mb_len) + !!(size % mb_len));
1011 tmpl->rxq.sges_n = sges_n;
1012 /* Make sure rxq.sges_n did not overflow. */
1013 size = mb_len * (1 << tmpl->rxq.sges_n);
1014 size -= RTE_PKTMBUF_HEADROOM;
1015 if (size < dev->data->dev_conf.rxmode.max_rx_pkt_len) {
1017 "port %u too many SGEs (%u) needed to handle"
1018 " requested maximum packet size %u",
1021 dev->data->dev_conf.rxmode.max_rx_pkt_len);
1022 rte_errno = EOVERFLOW;
1027 "port %u the requested maximum Rx packet size (%u) is"
1028 " larger than a single mbuf (%u) and scattered mode has"
1029 " not been requested",
1031 dev->data->dev_conf.rxmode.max_rx_pkt_len,
1032 mb_len - RTE_PKTMBUF_HEADROOM);
1034 DRV_LOG(DEBUG, "port %u maximum number of segments per packet: %u",
1035 dev->data->port_id, 1 << tmpl->rxq.sges_n);
1036 if (desc % (1 << tmpl->rxq.sges_n)) {
1038 "port %u number of Rx queue descriptors (%u) is not a"
1039 " multiple of SGEs per packet (%u)",
1042 1 << tmpl->rxq.sges_n);
1046 /* Toggle RX checksum offload if hardware supports it. */
1047 tmpl->rxq.csum = !!(conf->offloads & DEV_RX_OFFLOAD_CHECKSUM);
1048 tmpl->rxq.csum_l2tun = (!!(conf->offloads & DEV_RX_OFFLOAD_CHECKSUM) &&
1049 priv->config.tunnel_en);
1050 tmpl->rxq.hw_timestamp = !!(conf->offloads & DEV_RX_OFFLOAD_TIMESTAMP);
1051 /* Configure VLAN stripping. */
1052 tmpl->rxq.vlan_strip = !!(conf->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
1053 /* By default, FCS (CRC) is stripped by hardware. */
1054 if (conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
1055 tmpl->rxq.crc_present = 0;
1056 } else if (config->hw_fcs_strip) {
1057 tmpl->rxq.crc_present = 1;
1060 "port %u CRC stripping has been disabled but will"
1061 " still be performed by hardware, make sure MLNX_OFED"
1062 " and firmware are up to date",
1063 dev->data->port_id);
1064 tmpl->rxq.crc_present = 0;
1067 "port %u CRC stripping is %s, %u bytes will be subtracted from"
1068 " incoming frames to hide it",
1070 tmpl->rxq.crc_present ? "disabled" : "enabled",
1071 tmpl->rxq.crc_present << 2);
1073 tmpl->rxq.rss_hash = priv->rxqs_n > 1;
1074 tmpl->rxq.port_id = dev->data->port_id;
1077 tmpl->rxq.stats.idx = idx;
1078 tmpl->rxq.elts_n = log2above(desc);
1080 (struct rte_mbuf *(*)[1 << tmpl->rxq.elts_n])(tmpl + 1);
1082 rte_atomic32_inc(&tmpl->refcnt);
1083 DRV_LOG(DEBUG, "port %u Rx queue %u: refcnt %d", dev->data->port_id,
1084 idx, rte_atomic32_read(&tmpl->refcnt));
1085 LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
1096 * Pointer to Ethernet device.
1101 * A pointer to the queue if it exists, NULL otherwise.
1103 struct mlx5_rxq_ctrl *
1104 mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
1106 struct priv *priv = dev->data->dev_private;
1107 struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
1109 if ((*priv->rxqs)[idx]) {
1110 rxq_ctrl = container_of((*priv->rxqs)[idx],
1111 struct mlx5_rxq_ctrl,
1113 mlx5_rxq_ibv_get(dev, idx);
1114 rte_atomic32_inc(&rxq_ctrl->refcnt);
1115 DRV_LOG(DEBUG, "port %u Rx queue %u: refcnt %d",
1116 dev->data->port_id, rxq_ctrl->idx,
1117 rte_atomic32_read(&rxq_ctrl->refcnt));
1123 * Release a Rx queue.
1126 * Pointer to Ethernet device.
1131 * 1 while a reference on it exists, 0 when freed.
1134 mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
1136 struct priv *priv = dev->data->dev_private;
1137 struct mlx5_rxq_ctrl *rxq_ctrl;
1139 if (!(*priv->rxqs)[idx])
1141 rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
1142 assert(rxq_ctrl->priv);
1143 if (rxq_ctrl->ibv && !mlx5_rxq_ibv_release(rxq_ctrl->ibv))
1144 rxq_ctrl->ibv = NULL;
1145 DRV_LOG(DEBUG, "port %u Rx queue %u: refcnt %d", dev->data->port_id,
1146 rxq_ctrl->idx, rte_atomic32_read(&rxq_ctrl->refcnt));
1147 if (rte_atomic32_dec_and_test(&rxq_ctrl->refcnt)) {
1148 LIST_REMOVE(rxq_ctrl, next);
1150 (*priv->rxqs)[idx] = NULL;
1157 * Verify if the queue can be released.
1160 * Pointer to Ethernet device.
1165 * 1 if the queue can be released, negative errno otherwise and rte_errno is
1169 mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx)
1171 struct priv *priv = dev->data->dev_private;
1172 struct mlx5_rxq_ctrl *rxq_ctrl;
1174 if (!(*priv->rxqs)[idx]) {
1178 rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
1179 return (rte_atomic32_read(&rxq_ctrl->refcnt) == 1);
1183 * Verify the Rx Queue list is empty
1186 * Pointer to Ethernet device.
1189 * The number of object not released.
1192 mlx5_rxq_verify(struct rte_eth_dev *dev)
1194 struct priv *priv = dev->data->dev_private;
1195 struct mlx5_rxq_ctrl *rxq_ctrl;
1198 LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {
1199 DRV_LOG(DEBUG, "port %u Rx Queue %u still referenced",
1200 dev->data->port_id, rxq_ctrl->idx);
1207 * Create an indirection table.
1210 * Pointer to Ethernet device.
1212 * Queues entering in the indirection table.
1214 * Number of queues in the array.
1217 * The Verbs object initialised, NULL otherwise and rte_errno is set.
1219 struct mlx5_ind_table_ibv *
1220 mlx5_ind_table_ibv_new(struct rte_eth_dev *dev, uint16_t queues[],
1223 struct priv *priv = dev->data->dev_private;
1224 struct mlx5_ind_table_ibv *ind_tbl;
1225 const unsigned int wq_n = rte_is_power_of_2(queues_n) ?
1226 log2above(queues_n) :
1227 log2above(priv->config.ind_table_max_size);
1228 struct ibv_wq *wq[1 << wq_n];
1232 ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl) +
1233 queues_n * sizeof(uint16_t), 0);
1238 for (i = 0; i != queues_n; ++i) {
1239 struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev, queues[i]);
1243 wq[i] = rxq->ibv->wq;
1244 ind_tbl->queues[i] = queues[i];
1246 ind_tbl->queues_n = queues_n;
1247 /* Finalise indirection table. */
1248 for (j = 0; i != (unsigned int)(1 << wq_n); ++i, ++j)
1250 ind_tbl->ind_table = mlx5_glue->create_rwq_ind_table
1252 &(struct ibv_rwq_ind_table_init_attr){
1253 .log_ind_tbl_size = wq_n,
1257 if (!ind_tbl->ind_table) {
1261 rte_atomic32_inc(&ind_tbl->refcnt);
1262 LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
1263 DRV_LOG(DEBUG, "port %u indirection table %p: refcnt %d",
1264 dev->data->port_id, (void *)ind_tbl,
1265 rte_atomic32_read(&ind_tbl->refcnt));
1269 DRV_LOG(DEBUG, "port %u cannot create indirection table",
1270 dev->data->port_id);
1275 * Get an indirection table.
1278 * Pointer to Ethernet device.
1280 * Queues entering in the indirection table.
1282 * Number of queues in the array.
1285 * An indirection table if found.
1287 struct mlx5_ind_table_ibv *
1288 mlx5_ind_table_ibv_get(struct rte_eth_dev *dev, uint16_t queues[],
1291 struct priv *priv = dev->data->dev_private;
1292 struct mlx5_ind_table_ibv *ind_tbl;
1294 LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
1295 if ((ind_tbl->queues_n == queues_n) &&
1296 (memcmp(ind_tbl->queues, queues,
1297 ind_tbl->queues_n * sizeof(ind_tbl->queues[0]))
1304 rte_atomic32_inc(&ind_tbl->refcnt);
1305 DRV_LOG(DEBUG, "port %u indirection table %p: refcnt %d",
1306 dev->data->port_id, (void *)ind_tbl,
1307 rte_atomic32_read(&ind_tbl->refcnt));
1308 for (i = 0; i != ind_tbl->queues_n; ++i)
1309 mlx5_rxq_get(dev, ind_tbl->queues[i]);
1315 * Release an indirection table.
1318 * Pointer to Ethernet device.
1320 * Indirection table to release.
1323 * 1 while a reference on it exists, 0 when freed.
1326 mlx5_ind_table_ibv_release(struct rte_eth_dev *dev,
1327 struct mlx5_ind_table_ibv *ind_tbl)
1331 DRV_LOG(DEBUG, "port %u indirection table %p: refcnt %d",
1332 ((struct priv *)dev->data->dev_private)->port,
1333 (void *)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt));
1334 if (rte_atomic32_dec_and_test(&ind_tbl->refcnt))
1335 claim_zero(mlx5_glue->destroy_rwq_ind_table
1336 (ind_tbl->ind_table));
1337 for (i = 0; i != ind_tbl->queues_n; ++i)
1338 claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i]));
1339 if (!rte_atomic32_read(&ind_tbl->refcnt)) {
1340 LIST_REMOVE(ind_tbl, next);
1348 * Verify the Rx Queue list is empty
1351 * Pointer to Ethernet device.
1354 * The number of object not released.
1357 mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev)
1359 struct priv *priv = dev->data->dev_private;
1360 struct mlx5_ind_table_ibv *ind_tbl;
1363 LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
1365 "port %u Verbs indirection table %p still referenced",
1366 dev->data->port_id, (void *)ind_tbl);
1373 * Create an Rx Hash queue.
1376 * Pointer to Ethernet device.
1378 * RSS key for the Rx hash queue.
1379 * @param rss_key_len
1381 * @param hash_fields
1382 * Verbs protocol hash field to make the RSS on.
1384 * Queues entering in hash queue. In case of empty hash_fields only the
1385 * first queue index will be taken for the indirection table.
1390 * The Verbs object initialised, NULL otherwise and rte_errno is set.
1393 mlx5_hrxq_new(struct rte_eth_dev *dev, uint8_t *rss_key, uint8_t rss_key_len,
1394 uint64_t hash_fields, uint16_t queues[], uint16_t queues_n)
1396 struct priv *priv = dev->data->dev_private;
1397 struct mlx5_hrxq *hrxq;
1398 struct mlx5_ind_table_ibv *ind_tbl;
1402 queues_n = hash_fields ? queues_n : 1;
1403 ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n);
1405 ind_tbl = mlx5_ind_table_ibv_new(dev, queues, queues_n);
1410 qp = mlx5_glue->create_qp_ex
1412 &(struct ibv_qp_init_attr_ex){
1413 .qp_type = IBV_QPT_RAW_PACKET,
1415 IBV_QP_INIT_ATTR_PD |
1416 IBV_QP_INIT_ATTR_IND_TABLE |
1417 IBV_QP_INIT_ATTR_RX_HASH,
1418 .rx_hash_conf = (struct ibv_rx_hash_conf){
1419 .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,
1420 .rx_hash_key_len = rss_key_len,
1421 .rx_hash_key = rss_key,
1422 .rx_hash_fields_mask = hash_fields,
1424 .rwq_ind_tbl = ind_tbl->ind_table,
1431 hrxq = rte_calloc(__func__, 1, sizeof(*hrxq) + rss_key_len, 0);
1434 hrxq->ind_table = ind_tbl;
1436 hrxq->rss_key_len = rss_key_len;
1437 hrxq->hash_fields = hash_fields;
1438 memcpy(hrxq->rss_key, rss_key, rss_key_len);
1439 rte_atomic32_inc(&hrxq->refcnt);
1440 LIST_INSERT_HEAD(&priv->hrxqs, hrxq, next);
1441 DRV_LOG(DEBUG, "port %u hash Rx queue %p: refcnt %d",
1442 dev->data->port_id, (void *)hrxq,
1443 rte_atomic32_read(&hrxq->refcnt));
1446 err = rte_errno; /* Save rte_errno before cleanup. */
1447 mlx5_ind_table_ibv_release(dev, ind_tbl);
1449 claim_zero(mlx5_glue->destroy_qp(qp));
1450 rte_errno = err; /* Restore rte_errno. */
1455 * Get an Rx Hash queue.
1458 * Pointer to Ethernet device.
1460 * RSS configuration for the Rx hash queue.
1462 * Queues entering in hash queue. In case of empty hash_fields only the
1463 * first queue index will be taken for the indirection table.
1468 * An hash Rx queue on success.
1471 mlx5_hrxq_get(struct rte_eth_dev *dev, uint8_t *rss_key, uint8_t rss_key_len,
1472 uint64_t hash_fields, uint16_t queues[], uint16_t queues_n)
1474 struct priv *priv = dev->data->dev_private;
1475 struct mlx5_hrxq *hrxq;
1477 queues_n = hash_fields ? queues_n : 1;
1478 LIST_FOREACH(hrxq, &priv->hrxqs, next) {
1479 struct mlx5_ind_table_ibv *ind_tbl;
1481 if (hrxq->rss_key_len != rss_key_len)
1483 if (memcmp(hrxq->rss_key, rss_key, rss_key_len))
1485 if (hrxq->hash_fields != hash_fields)
1487 ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n);
1490 if (ind_tbl != hrxq->ind_table) {
1491 mlx5_ind_table_ibv_release(dev, ind_tbl);
1494 rte_atomic32_inc(&hrxq->refcnt);
1495 DRV_LOG(DEBUG, "port %u hash Rx queue %p: refcnt %d",
1496 dev->data->port_id, (void *)hrxq,
1497 rte_atomic32_read(&hrxq->refcnt));
1504 * Release the hash Rx queue.
1507 * Pointer to Ethernet device.
1509 * Pointer to Hash Rx queue to release.
1512 * 1 while a reference on it exists, 0 when freed.
1515 mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
1517 DRV_LOG(DEBUG, "port %u hash Rx queue %p: refcnt %d",
1518 ((struct priv *)dev->data->dev_private)->port,
1519 (void *)hrxq, rte_atomic32_read(&hrxq->refcnt));
1520 if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
1521 claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
1522 mlx5_ind_table_ibv_release(dev, hrxq->ind_table);
1523 LIST_REMOVE(hrxq, next);
1527 claim_nonzero(mlx5_ind_table_ibv_release(dev, hrxq->ind_table));
1532 * Verify the Rx Queue list is empty
1535 * Pointer to Ethernet device.
1538 * The number of object not released.
1541 mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev)
1543 struct priv *priv = dev->data->dev_private;
1544 struct mlx5_hrxq *hrxq;
1547 LIST_FOREACH(hrxq, &priv->hrxqs, next) {
1549 "port %u Verbs hash Rx queue %p still referenced",
1550 dev->data->port_id, (void *)hrxq);