1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox.
12 #include <sys/queue.h>
15 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
17 #pragma GCC diagnostic ignored "-Wpedantic"
19 #include <infiniband/verbs.h>
20 #include <infiniband/mlx5dv.h>
22 #pragma GCC diagnostic error "-Wpedantic"
26 #include <rte_malloc.h>
27 #include <rte_ethdev_driver.h>
28 #include <rte_common.h>
29 #include <rte_interrupts.h>
30 #include <rte_debug.h>
34 #include "mlx5_rxtx.h"
35 #include "mlx5_utils.h"
36 #include "mlx5_autoconf.h"
37 #include "mlx5_defs.h"
38 #include "mlx5_glue.h"
40 /* Default RSS hash key also used for ConnectX-3. */
41 uint8_t rss_hash_default_key[] = {
42 0x2c, 0xc6, 0x81, 0xd1,
43 0x5b, 0xdb, 0xf4, 0xf7,
44 0xfc, 0xa2, 0x83, 0x19,
45 0xdb, 0x1a, 0x3e, 0x94,
46 0x6b, 0x9e, 0x38, 0xd9,
47 0x2c, 0x9c, 0x03, 0xd1,
48 0xad, 0x99, 0x44, 0xa7,
49 0xd9, 0x56, 0x3d, 0x59,
50 0x06, 0x3c, 0x25, 0xf3,
51 0xfc, 0x1f, 0xdc, 0x2a,
54 /* Length of the default RSS hash key. */
55 const size_t rss_hash_default_key_len = sizeof(rss_hash_default_key);
58 * Allocate RX queue elements.
61 * Pointer to RX queue structure.
64 * 0 on success, a negative errno value otherwise and rte_errno is set.
67 rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
69 const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n;
70 unsigned int elts_n = 1 << rxq_ctrl->rxq.elts_n;
74 /* Iterate on segments. */
75 for (i = 0; (i != elts_n); ++i) {
78 buf = rte_pktmbuf_alloc(rxq_ctrl->rxq.mp);
80 ERROR("port %u empty mbuf pool",
81 rxq_ctrl->priv->dev->data->port_id);
85 /* Headroom is reserved by rte_pktmbuf_alloc(). */
86 assert(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM);
87 /* Buffer is supposed to be empty. */
88 assert(rte_pktmbuf_data_len(buf) == 0);
89 assert(rte_pktmbuf_pkt_len(buf) == 0);
91 /* Only the first segment keeps headroom. */
94 PORT(buf) = rxq_ctrl->rxq.port_id;
95 DATA_LEN(buf) = rte_pktmbuf_tailroom(buf);
96 PKT_LEN(buf) = DATA_LEN(buf);
98 (*rxq_ctrl->rxq.elts)[i] = buf;
100 /* If Rx vector is activated. */
101 if (mlx5_rxq_check_vec_support(&rxq_ctrl->rxq) > 0) {
102 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
103 struct rte_mbuf *mbuf_init = &rxq->fake_mbuf;
106 /* Initialize default rearm_data for vPMD. */
107 mbuf_init->data_off = RTE_PKTMBUF_HEADROOM;
108 rte_mbuf_refcnt_set(mbuf_init, 1);
109 mbuf_init->nb_segs = 1;
110 mbuf_init->port = rxq->port_id;
112 * prevent compiler reordering:
113 * rearm_data covers previous fields.
115 rte_compiler_barrier();
116 rxq->mbuf_initializer =
117 *(uint64_t *)&mbuf_init->rearm_data;
118 /* Padding with a fake mbuf for vectorized Rx. */
119 for (j = 0; j < MLX5_VPMD_DESCS_PER_LOOP; ++j)
120 (*rxq->elts)[elts_n + j] = &rxq->fake_mbuf;
122 DEBUG("port %u Rx queue %u allocated and configured %u segments"
123 " (max %u packets)", rxq_ctrl->priv->dev->data->port_id,
124 rxq_ctrl->idx, elts_n, elts_n / (1 << rxq_ctrl->rxq.sges_n));
127 err = rte_errno; /* Save rte_errno before cleanup. */
129 for (i = 0; (i != elts_n); ++i) {
130 if ((*rxq_ctrl->rxq.elts)[i] != NULL)
131 rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]);
132 (*rxq_ctrl->rxq.elts)[i] = NULL;
134 DEBUG("port %u Rx queue %u failed, freed everything",
135 rxq_ctrl->priv->dev->data->port_id, rxq_ctrl->idx);
136 rte_errno = err; /* Restore rte_errno. */
141 * Free RX queue elements.
144 * Pointer to RX queue structure.
147 rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
149 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
150 const uint16_t q_n = (1 << rxq->elts_n);
151 const uint16_t q_mask = q_n - 1;
152 uint16_t used = q_n - (rxq->rq_ci - rxq->rq_pi);
155 DEBUG("port %u Rx queue %u freeing WRs",
156 rxq_ctrl->priv->dev->data->port_id, rxq_ctrl->idx);
157 if (rxq->elts == NULL)
160 * Some mbuf in the Ring belongs to the application. They cannot be
163 if (mlx5_rxq_check_vec_support(rxq) > 0) {
164 for (i = 0; i < used; ++i)
165 (*rxq->elts)[(rxq->rq_ci + i) & q_mask] = NULL;
166 rxq->rq_pi = rxq->rq_ci;
168 for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
169 if ((*rxq->elts)[i] != NULL)
170 rte_pktmbuf_free_seg((*rxq->elts)[i]);
171 (*rxq->elts)[i] = NULL;
176 * Clean up a RX queue.
178 * Destroy objects, free allocated memory and reset the structure for reuse.
181 * Pointer to RX queue structure.
184 mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *rxq_ctrl)
186 DEBUG("port %u cleaning up Rx queue %u",
187 rxq_ctrl->priv->dev->data->port_id, rxq_ctrl->idx);
189 mlx5_rxq_ibv_release(rxq_ctrl->ibv);
190 memset(rxq_ctrl, 0, sizeof(*rxq_ctrl));
194 * Returns the per-queue supported offloads.
197 * Pointer to Ethernet device.
200 * Supported Rx offloads.
203 mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev)
205 struct priv *priv = dev->data->dev_private;
206 struct mlx5_dev_config *config = &priv->config;
207 uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER |
208 DEV_RX_OFFLOAD_TIMESTAMP |
209 DEV_RX_OFFLOAD_JUMBO_FRAME);
211 if (config->hw_fcs_strip)
212 offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
214 offloads |= (DEV_RX_OFFLOAD_IPV4_CKSUM |
215 DEV_RX_OFFLOAD_UDP_CKSUM |
216 DEV_RX_OFFLOAD_TCP_CKSUM);
217 if (config->hw_vlan_strip)
218 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
224 * Returns the per-port supported offloads.
227 * Supported Rx offloads.
230 mlx5_get_rx_port_offloads(void)
232 uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
238 * Checks if the per-queue offload configuration is valid.
241 * Pointer to Ethernet device.
243 * Per-queue offloads configuration.
246 * 1 if the configuration is valid, 0 otherwise.
249 mlx5_is_rx_queue_offloads_allowed(struct rte_eth_dev *dev, uint64_t offloads)
251 uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
252 uint64_t queue_supp_offloads = mlx5_get_rx_queue_offloads(dev);
253 uint64_t port_supp_offloads = mlx5_get_rx_port_offloads();
255 if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
258 if (((port_offloads ^ offloads) & port_supp_offloads))
266 * Pointer to Ethernet device structure.
270 * Number of descriptors to configure in queue.
272 * NUMA socket on which memory must be allocated.
274 * Thresholds parameters.
276 * Memory pool for buffer allocations.
279 * 0 on success, a negative errno value otherwise and rte_errno is set.
282 mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
283 unsigned int socket, const struct rte_eth_rxconf *conf,
284 struct rte_mempool *mp)
286 struct priv *priv = dev->data->dev_private;
287 struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
288 struct mlx5_rxq_ctrl *rxq_ctrl =
289 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
291 if (!rte_is_power_of_2(desc)) {
292 desc = 1 << log2above(desc);
293 WARN("port %u increased number of descriptors in Rx queue %u"
294 " to the next power of two (%d)",
295 dev->data->port_id, idx, desc);
297 DEBUG("port %u configuring Rx queue %u for %u descriptors",
298 dev->data->port_id, idx, desc);
299 if (idx >= priv->rxqs_n) {
300 ERROR("port %u Rx queue index out of range (%u >= %u)",
301 dev->data->port_id, idx, priv->rxqs_n);
302 rte_errno = EOVERFLOW;
305 if (!mlx5_is_rx_queue_offloads_allowed(dev, conf->offloads)) {
306 ERROR("port %u Rx queue offloads 0x%" PRIx64 " don't match"
307 " port offloads 0x%" PRIx64 " or supported offloads 0x%"
309 dev->data->port_id, conf->offloads,
310 dev->data->dev_conf.rxmode.offloads,
311 (mlx5_get_rx_port_offloads() |
312 mlx5_get_rx_queue_offloads(dev)));
316 if (!mlx5_rxq_releasable(dev, idx)) {
317 ERROR("port %u unable to release queue index %u",
318 dev->data->port_id, idx);
322 mlx5_rxq_release(dev, idx);
323 rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, mp);
325 ERROR("port %u unable to allocate queue index %u",
326 dev->data->port_id, idx);
330 DEBUG("port %u adding Rx queue %u to list", dev->data->port_id, idx);
331 (*priv->rxqs)[idx] = &rxq_ctrl->rxq;
336 * DPDK callback to release a RX queue.
339 * Generic RX queue pointer.
342 mlx5_rx_queue_release(void *dpdk_rxq)
344 struct mlx5_rxq_data *rxq = (struct mlx5_rxq_data *)dpdk_rxq;
345 struct mlx5_rxq_ctrl *rxq_ctrl;
350 rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);
351 priv = rxq_ctrl->priv;
352 if (!mlx5_rxq_releasable(priv->dev, rxq_ctrl->rxq.stats.idx))
353 rte_panic("port %u Rx queue %u is still used by a flow and"
354 " cannot be removed\n", priv->dev->data->port_id,
356 mlx5_rxq_release(priv->dev, rxq_ctrl->rxq.stats.idx);
360 * Allocate queue vector and fill epoll fd list for Rx interrupts.
363 * Pointer to Ethernet device.
366 * 0 on success, a negative errno value otherwise and rte_errno is set.
369 mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
371 struct priv *priv = dev->data->dev_private;
373 unsigned int rxqs_n = priv->rxqs_n;
374 unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
375 unsigned int count = 0;
376 struct rte_intr_handle *intr_handle = priv->dev->intr_handle;
378 if (!priv->dev->data->dev_conf.intr_conf.rxq)
380 mlx5_rx_intr_vec_disable(dev);
381 intr_handle->intr_vec = malloc(n * sizeof(intr_handle->intr_vec[0]));
382 if (intr_handle->intr_vec == NULL) {
383 ERROR("port %u failed to allocate memory for interrupt vector,"
384 " Rx interrupts will not be supported",
389 intr_handle->type = RTE_INTR_HANDLE_EXT;
390 for (i = 0; i != n; ++i) {
391 /* This rxq ibv must not be released in this function. */
392 struct mlx5_rxq_ibv *rxq_ibv = mlx5_rxq_ibv_get(dev, i);
397 /* Skip queues that cannot request interrupts. */
398 if (!rxq_ibv || !rxq_ibv->channel) {
399 /* Use invalid intr_vec[] index to disable entry. */
400 intr_handle->intr_vec[i] =
401 RTE_INTR_VEC_RXTX_OFFSET +
402 RTE_MAX_RXTX_INTR_VEC_ID;
405 if (count >= RTE_MAX_RXTX_INTR_VEC_ID) {
406 ERROR("port %u too many Rx queues for interrupt vector"
407 " size (%d), Rx interrupts cannot be enabled",
408 dev->data->port_id, RTE_MAX_RXTX_INTR_VEC_ID);
409 mlx5_rx_intr_vec_disable(dev);
413 fd = rxq_ibv->channel->fd;
414 flags = fcntl(fd, F_GETFL);
415 rc = fcntl(fd, F_SETFL, flags | O_NONBLOCK);
418 ERROR("port %u failed to make Rx interrupt file"
419 " descriptor %d non-blocking for queue index %d",
420 dev->data->port_id, fd, i);
421 mlx5_rx_intr_vec_disable(dev);
424 intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count;
425 intr_handle->efds[count] = fd;
429 mlx5_rx_intr_vec_disable(dev);
431 intr_handle->nb_efd = count;
436 * Clean up Rx interrupts handler.
439 * Pointer to Ethernet device.
442 mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev)
444 struct priv *priv = dev->data->dev_private;
445 struct rte_intr_handle *intr_handle = priv->dev->intr_handle;
447 unsigned int rxqs_n = priv->rxqs_n;
448 unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
450 if (!priv->dev->data->dev_conf.intr_conf.rxq)
452 if (!intr_handle->intr_vec)
454 for (i = 0; i != n; ++i) {
455 struct mlx5_rxq_ctrl *rxq_ctrl;
456 struct mlx5_rxq_data *rxq_data;
458 if (intr_handle->intr_vec[i] == RTE_INTR_VEC_RXTX_OFFSET +
459 RTE_MAX_RXTX_INTR_VEC_ID)
462 * Need to access directly the queue to release the reference
463 * kept in priv_rx_intr_vec_enable().
465 rxq_data = (*priv->rxqs)[i];
466 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
467 mlx5_rxq_ibv_release(rxq_ctrl->ibv);
470 rte_intr_free_epoll_fd(intr_handle);
471 if (intr_handle->intr_vec)
472 free(intr_handle->intr_vec);
473 intr_handle->nb_efd = 0;
474 intr_handle->intr_vec = NULL;
478 * MLX5 CQ notification .
481 * Pointer to receive queue structure.
483 * Sequence number per receive queue .
486 mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq)
489 uint32_t doorbell_hi;
491 void *cq_db_reg = (char *)rxq->cq_uar + MLX5_CQ_DOORBELL;
493 sq_n = sq_n_rxq & MLX5_CQ_SQN_MASK;
494 doorbell_hi = sq_n << MLX5_CQ_SQN_OFFSET | (rxq->cq_ci & MLX5_CI_MASK);
495 doorbell = (uint64_t)doorbell_hi << 32;
496 doorbell |= rxq->cqn;
497 rxq->cq_db[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi);
498 rte_write64(rte_cpu_to_be_64(doorbell), cq_db_reg);
502 * DPDK callback for Rx queue interrupt enable.
505 * Pointer to Ethernet device structure.
510 * 0 on success, a negative errno value otherwise and rte_errno is set.
513 mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
515 struct priv *priv = dev->data->dev_private;
516 struct mlx5_rxq_data *rxq_data;
517 struct mlx5_rxq_ctrl *rxq_ctrl;
519 rxq_data = (*priv->rxqs)[rx_queue_id];
524 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
526 struct mlx5_rxq_ibv *rxq_ibv;
528 rxq_ibv = mlx5_rxq_ibv_get(dev, rx_queue_id);
533 mlx5_arm_cq(rxq_data, rxq_data->cq_arm_sn);
534 mlx5_rxq_ibv_release(rxq_ibv);
540 * DPDK callback for Rx queue interrupt disable.
543 * Pointer to Ethernet device structure.
548 * 0 on success, a negative errno value otherwise and rte_errno is set.
551 mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
553 struct priv *priv = dev->data->dev_private;
554 struct mlx5_rxq_data *rxq_data;
555 struct mlx5_rxq_ctrl *rxq_ctrl;
556 struct mlx5_rxq_ibv *rxq_ibv = NULL;
557 struct ibv_cq *ev_cq;
561 rxq_data = (*priv->rxqs)[rx_queue_id];
566 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
569 rxq_ibv = mlx5_rxq_ibv_get(dev, rx_queue_id);
574 ret = mlx5_glue->get_cq_event(rxq_ibv->channel, &ev_cq, &ev_ctx);
575 if (ret || ev_cq != rxq_ibv->cq) {
579 rxq_data->cq_arm_sn++;
580 mlx5_glue->ack_cq_events(rxq_ibv->cq, 1);
583 ret = rte_errno; /* Save rte_errno before cleanup. */
585 mlx5_rxq_ibv_release(rxq_ibv);
586 WARN("port %u unable to disable interrupt on Rx queue %d",
587 dev->data->port_id, rx_queue_id);
588 rte_errno = ret; /* Restore rte_errno. */
593 * Create the Rx queue Verbs object.
596 * Pointer to Ethernet device.
598 * Queue index in DPDK Rx queue array
601 * The Verbs object initialised, NULL otherwise and rte_errno is set.
603 struct mlx5_rxq_ibv *
604 mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
606 struct priv *priv = dev->data->dev_private;
607 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
608 struct mlx5_rxq_ctrl *rxq_ctrl =
609 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
610 struct ibv_wq_attr mod;
613 struct ibv_cq_init_attr_ex ibv;
614 struct mlx5dv_cq_init_attr mlx5;
616 struct ibv_wq_init_attr wq;
617 struct ibv_cq_ex cq_attr;
619 unsigned int cqe_n = (1 << rxq_data->elts_n) - 1;
620 struct mlx5_rxq_ibv *tmpl;
621 struct mlx5dv_cq cq_info;
622 struct mlx5dv_rwq rwq;
625 struct mlx5dv_obj obj;
626 struct mlx5_dev_config *config = &priv->config;
629 assert(!rxq_ctrl->ibv);
630 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_RX_QUEUE;
631 priv->verbs_alloc_ctx.obj = rxq_ctrl;
632 tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0,
635 ERROR("port %u Rx queue %u cannot allocate verbs resources",
636 dev->data->port_id, rxq_ctrl->idx);
640 tmpl->rxq_ctrl = rxq_ctrl;
641 /* Use the entire RX mempool as the memory region. */
642 tmpl->mr = mlx5_mr_get(dev, rxq_data->mp);
644 tmpl->mr = mlx5_mr_new(dev, rxq_data->mp);
646 ERROR("port %u: memory region creation failure",
652 tmpl->channel = mlx5_glue->create_comp_channel(priv->ctx);
653 if (!tmpl->channel) {
654 ERROR("port %u: comp channel creation failure",
660 attr.cq.ibv = (struct ibv_cq_init_attr_ex){
662 .channel = tmpl->channel,
665 attr.cq.mlx5 = (struct mlx5dv_cq_init_attr){
668 if (config->cqe_comp && !rxq_data->hw_timestamp) {
669 attr.cq.mlx5.comp_mask |=
670 MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
671 attr.cq.mlx5.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH;
673 * For vectorized Rx, it must not be doubled in order to
674 * make cq_ci and rq_ci aligned.
676 if (mlx5_rxq_check_vec_support(rxq_data) < 0)
677 attr.cq.ibv.cqe *= 2;
678 } else if (config->cqe_comp && rxq_data->hw_timestamp) {
679 DEBUG("port %u Rx CQE compression is disabled for HW timestamp",
682 tmpl->cq = mlx5_glue->cq_ex_to_cq
683 (mlx5_glue->dv_create_cq(priv->ctx, &attr.cq.ibv,
685 if (tmpl->cq == NULL) {
686 ERROR("port %u Rx queue %u CQ creation failure",
687 dev->data->port_id, idx);
691 DEBUG("port %u priv->device_attr.max_qp_wr is %d",
692 dev->data->port_id, priv->device_attr.orig_attr.max_qp_wr);
693 DEBUG("port %u priv->device_attr.max_sge is %d",
694 dev->data->port_id, priv->device_attr.orig_attr.max_sge);
695 attr.wq = (struct ibv_wq_init_attr){
696 .wq_context = NULL, /* Could be useful in the future. */
697 .wq_type = IBV_WQT_RQ,
698 /* Max number of outstanding WRs. */
699 .max_wr = (1 << rxq_data->elts_n) >> rxq_data->sges_n,
700 /* Max number of scatter/gather elements in a WR. */
701 .max_sge = 1 << rxq_data->sges_n,
705 IBV_WQ_FLAGS_CVLAN_STRIPPING |
707 .create_flags = (rxq_data->vlan_strip ?
708 IBV_WQ_FLAGS_CVLAN_STRIPPING :
711 /* By default, FCS (CRC) is stripped by hardware. */
712 if (rxq_data->crc_present) {
713 attr.wq.create_flags |= IBV_WQ_FLAGS_SCATTER_FCS;
714 attr.wq.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
716 #ifdef HAVE_IBV_WQ_FLAG_RX_END_PADDING
717 if (config->hw_padding) {
718 attr.wq.create_flags |= IBV_WQ_FLAG_RX_END_PADDING;
719 attr.wq.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
722 tmpl->wq = mlx5_glue->create_wq(priv->ctx, &attr.wq);
723 if (tmpl->wq == NULL) {
724 ERROR("port %u Rx queue %u WQ creation failure",
725 dev->data->port_id, idx);
730 * Make sure number of WRs*SGEs match expectations since a queue
731 * cannot allocate more than "desc" buffers.
733 if (((int)attr.wq.max_wr !=
734 ((1 << rxq_data->elts_n) >> rxq_data->sges_n)) ||
735 ((int)attr.wq.max_sge != (1 << rxq_data->sges_n))) {
736 ERROR("port %u Rx queue %u requested %u*%u but got %u*%u"
738 dev->data->port_id, idx,
739 ((1 << rxq_data->elts_n) >> rxq_data->sges_n),
740 (1 << rxq_data->sges_n),
741 attr.wq.max_wr, attr.wq.max_sge);
745 /* Change queue state to ready. */
746 mod = (struct ibv_wq_attr){
747 .attr_mask = IBV_WQ_ATTR_STATE,
748 .wq_state = IBV_WQS_RDY,
750 ret = mlx5_glue->modify_wq(tmpl->wq, &mod);
752 ERROR("port %u Rx queue %u WQ state to IBV_WQS_RDY failed",
753 dev->data->port_id, idx);
757 obj.cq.in = tmpl->cq;
758 obj.cq.out = &cq_info;
759 obj.rwq.in = tmpl->wq;
761 ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_RWQ);
766 if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
767 ERROR("port %u wrong MLX5_CQE_SIZE environment variable value: "
768 "it should be set to %u", dev->data->port_id,
769 RTE_CACHE_LINE_SIZE);
773 /* Fill the rings. */
774 rxq_data->wqes = (volatile struct mlx5_wqe_data_seg (*)[])
776 for (i = 0; (i != (unsigned int)(1 << rxq_data->elts_n)); ++i) {
777 struct rte_mbuf *buf = (*rxq_data->elts)[i];
778 volatile struct mlx5_wqe_data_seg *scat = &(*rxq_data->wqes)[i];
780 /* scat->addr must be able to store a pointer. */
781 assert(sizeof(scat->addr) >= sizeof(uintptr_t));
782 *scat = (struct mlx5_wqe_data_seg){
783 .addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf,
785 .byte_count = rte_cpu_to_be_32(DATA_LEN(buf)),
786 .lkey = tmpl->mr->lkey,
789 rxq_data->rq_db = rwq.dbrec;
790 rxq_data->cqe_n = log2above(cq_info.cqe_cnt);
794 rxq_data->zip = (struct rxq_zip){
797 rxq_data->cq_db = cq_info.dbrec;
798 rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)cq_info.buf;
799 rxq_data->cq_uar = cq_info.cq_uar;
800 rxq_data->cqn = cq_info.cqn;
801 rxq_data->cq_arm_sn = 0;
802 /* Update doorbell counter. */
803 rxq_data->rq_ci = (1 << rxq_data->elts_n) >> rxq_data->sges_n;
805 *rxq_data->rq_db = rte_cpu_to_be_32(rxq_data->rq_ci);
806 DEBUG("port %u rxq %u updated with %p", dev->data->port_id, idx,
808 rte_atomic32_inc(&tmpl->refcnt);
809 DEBUG("port %u Verbs Rx queue %u: refcnt %d", dev->data->port_id, idx,
810 rte_atomic32_read(&tmpl->refcnt));
811 LIST_INSERT_HEAD(&priv->rxqsibv, tmpl, next);
812 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
815 ret = rte_errno; /* Save rte_errno before cleanup. */
817 claim_zero(mlx5_glue->destroy_wq(tmpl->wq));
819 claim_zero(mlx5_glue->destroy_cq(tmpl->cq));
821 claim_zero(mlx5_glue->destroy_comp_channel(tmpl->channel));
823 mlx5_mr_release(tmpl->mr);
824 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
825 rte_errno = ret; /* Restore rte_errno. */
830 * Get an Rx queue Verbs object.
833 * Pointer to Ethernet device.
835 * Queue index in DPDK Rx queue array
838 * The Verbs object if it exists.
840 struct mlx5_rxq_ibv *
841 mlx5_rxq_ibv_get(struct rte_eth_dev *dev, uint16_t idx)
843 struct priv *priv = dev->data->dev_private;
844 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
845 struct mlx5_rxq_ctrl *rxq_ctrl;
847 if (idx >= priv->rxqs_n)
851 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
853 mlx5_mr_get(dev, rxq_data->mp);
854 rte_atomic32_inc(&rxq_ctrl->ibv->refcnt);
855 DEBUG("port %u Verbs Rx queue %u: refcnt %d",
856 dev->data->port_id, rxq_ctrl->idx,
857 rte_atomic32_read(&rxq_ctrl->ibv->refcnt));
859 return rxq_ctrl->ibv;
863 * Release an Rx verbs queue object.
866 * Verbs Rx queue object.
869 * 1 while a reference on it exists, 0 when freed.
872 mlx5_rxq_ibv_release(struct mlx5_rxq_ibv *rxq_ibv)
880 ret = mlx5_mr_release(rxq_ibv->mr);
883 DEBUG("port %u Verbs Rx queue %u: refcnt %d",
884 rxq_ibv->rxq_ctrl->priv->dev->data->port_id,
885 rxq_ibv->rxq_ctrl->idx, rte_atomic32_read(&rxq_ibv->refcnt));
886 if (rte_atomic32_dec_and_test(&rxq_ibv->refcnt)) {
887 rxq_free_elts(rxq_ibv->rxq_ctrl);
888 claim_zero(mlx5_glue->destroy_wq(rxq_ibv->wq));
889 claim_zero(mlx5_glue->destroy_cq(rxq_ibv->cq));
890 if (rxq_ibv->channel)
891 claim_zero(mlx5_glue->destroy_comp_channel
893 LIST_REMOVE(rxq_ibv, next);
901 * Verify the Verbs Rx queue list is empty
904 * Pointer to Ethernet device.
907 * The number of object not released.
910 mlx5_rxq_ibv_verify(struct rte_eth_dev *dev)
912 struct priv *priv = dev->data->dev_private;
914 struct mlx5_rxq_ibv *rxq_ibv;
916 LIST_FOREACH(rxq_ibv, &priv->rxqsibv, next) {
917 DEBUG("port %u Verbs Rx queue %u still referenced",
918 dev->data->port_id, rxq_ibv->rxq_ctrl->idx);
925 * Return true if a single reference exists on the object.
928 * Verbs Rx queue object.
931 mlx5_rxq_ibv_releasable(struct mlx5_rxq_ibv *rxq_ibv)
934 return (rte_atomic32_read(&rxq_ibv->refcnt) == 1);
938 * Create a DPDK Rx queue.
941 * Pointer to Ethernet device.
945 * Number of descriptors to configure in queue.
947 * NUMA socket on which memory must be allocated.
950 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
952 struct mlx5_rxq_ctrl *
953 mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
954 unsigned int socket, const struct rte_eth_rxconf *conf,
955 struct rte_mempool *mp)
957 struct priv *priv = dev->data->dev_private;
958 struct mlx5_rxq_ctrl *tmpl;
959 unsigned int mb_len = rte_pktmbuf_data_room_size(mp);
960 struct mlx5_dev_config *config = &priv->config;
962 * Always allocate extra slots, even if eventually
963 * the vector Rx will not be used.
965 const uint16_t desc_n =
966 desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
968 tmpl = rte_calloc_socket("RXQ", 1,
970 desc_n * sizeof(struct rte_mbuf *),
976 tmpl->socket = socket;
977 if (priv->dev->data->dev_conf.intr_conf.rxq)
979 /* Enable scattered packets support for this queue if necessary. */
980 assert(mb_len >= RTE_PKTMBUF_HEADROOM);
981 if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
982 (mb_len - RTE_PKTMBUF_HEADROOM)) {
983 tmpl->rxq.sges_n = 0;
984 } else if (conf->offloads & DEV_RX_OFFLOAD_SCATTER) {
986 RTE_PKTMBUF_HEADROOM +
987 dev->data->dev_conf.rxmode.max_rx_pkt_len;
991 * Determine the number of SGEs needed for a full packet
992 * and round it to the next power of two.
994 sges_n = log2above((size / mb_len) + !!(size % mb_len));
995 tmpl->rxq.sges_n = sges_n;
996 /* Make sure rxq.sges_n did not overflow. */
997 size = mb_len * (1 << tmpl->rxq.sges_n);
998 size -= RTE_PKTMBUF_HEADROOM;
999 if (size < dev->data->dev_conf.rxmode.max_rx_pkt_len) {
1000 ERROR("port %u too many SGEs (%u) needed to handle"
1001 " requested maximum packet size %u",
1004 dev->data->dev_conf.rxmode.max_rx_pkt_len);
1005 rte_errno = EOVERFLOW;
1009 WARN("port %u the requested maximum Rx packet size (%u) is"
1010 " larger than a single mbuf (%u) and scattered"
1011 " mode has not been requested",
1013 dev->data->dev_conf.rxmode.max_rx_pkt_len,
1014 mb_len - RTE_PKTMBUF_HEADROOM);
1016 DEBUG("port %u maximum number of segments per packet: %u",
1017 dev->data->port_id, 1 << tmpl->rxq.sges_n);
1018 if (desc % (1 << tmpl->rxq.sges_n)) {
1019 ERROR("port %u number of Rx queue descriptors (%u) is not a"
1020 " multiple of SGEs per packet (%u)",
1023 1 << tmpl->rxq.sges_n);
1027 /* Toggle RX checksum offload if hardware supports it. */
1028 tmpl->rxq.csum = !!(conf->offloads & DEV_RX_OFFLOAD_CHECKSUM);
1029 tmpl->rxq.csum_l2tun = (!!(conf->offloads & DEV_RX_OFFLOAD_CHECKSUM) &&
1030 priv->config.tunnel_en);
1031 tmpl->rxq.hw_timestamp = !!(conf->offloads & DEV_RX_OFFLOAD_TIMESTAMP);
1032 /* Configure VLAN stripping. */
1033 tmpl->rxq.vlan_strip = !!(conf->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
1034 /* By default, FCS (CRC) is stripped by hardware. */
1035 if (conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
1036 tmpl->rxq.crc_present = 0;
1037 } else if (config->hw_fcs_strip) {
1038 tmpl->rxq.crc_present = 1;
1040 WARN("port %u CRC stripping has been disabled but will still"
1041 " be performed by hardware, make sure MLNX_OFED and"
1042 " firmware are up to date",
1043 dev->data->port_id);
1044 tmpl->rxq.crc_present = 0;
1046 DEBUG("port %u CRC stripping is %s, %u bytes will be subtracted from"
1047 " incoming frames to hide it",
1049 tmpl->rxq.crc_present ? "disabled" : "enabled",
1050 tmpl->rxq.crc_present << 2);
1052 tmpl->rxq.rss_hash = priv->rxqs_n > 1;
1053 tmpl->rxq.port_id = dev->data->port_id;
1056 tmpl->rxq.stats.idx = idx;
1057 tmpl->rxq.elts_n = log2above(desc);
1059 (struct rte_mbuf *(*)[1 << tmpl->rxq.elts_n])(tmpl + 1);
1061 rte_atomic32_inc(&tmpl->refcnt);
1062 DEBUG("port %u Rx queue %u: refcnt %d", dev->data->port_id,
1063 idx, rte_atomic32_read(&tmpl->refcnt));
1064 LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
1075 * Pointer to Ethernet device.
1080 * A pointer to the queue if it exists, NULL otherwise.
1082 struct mlx5_rxq_ctrl *
1083 mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
1085 struct priv *priv = dev->data->dev_private;
1086 struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
1088 if ((*priv->rxqs)[idx]) {
1089 rxq_ctrl = container_of((*priv->rxqs)[idx],
1090 struct mlx5_rxq_ctrl,
1092 mlx5_rxq_ibv_get(dev, idx);
1093 rte_atomic32_inc(&rxq_ctrl->refcnt);
1094 DEBUG("port %u Rx queue %u: refcnt %d", dev->data->port_id,
1095 rxq_ctrl->idx, rte_atomic32_read(&rxq_ctrl->refcnt));
1101 * Release a Rx queue.
1104 * Pointer to Ethernet device.
1109 * 1 while a reference on it exists, 0 when freed.
1112 mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
1114 struct priv *priv = dev->data->dev_private;
1115 struct mlx5_rxq_ctrl *rxq_ctrl;
1117 if (!(*priv->rxqs)[idx])
1119 rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
1120 assert(rxq_ctrl->priv);
1121 if (rxq_ctrl->ibv && !mlx5_rxq_ibv_release(rxq_ctrl->ibv))
1122 rxq_ctrl->ibv = NULL;
1123 DEBUG("port %u Rx queue %u: refcnt %d", dev->data->port_id,
1124 rxq_ctrl->idx, rte_atomic32_read(&rxq_ctrl->refcnt));
1125 if (rte_atomic32_dec_and_test(&rxq_ctrl->refcnt)) {
1126 LIST_REMOVE(rxq_ctrl, next);
1128 (*priv->rxqs)[idx] = NULL;
1135 * Verify if the queue can be released.
1138 * Pointer to Ethernet device.
1143 * 1 if the queue can be released, negative errno otherwise and rte_errno is
1147 mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx)
1149 struct priv *priv = dev->data->dev_private;
1150 struct mlx5_rxq_ctrl *rxq_ctrl;
1152 if (!(*priv->rxqs)[idx]) {
1156 rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
1157 return (rte_atomic32_read(&rxq_ctrl->refcnt) == 1);
1161 * Verify the Rx Queue list is empty
1164 * Pointer to Ethernet device.
1167 * The number of object not released.
1170 mlx5_rxq_verify(struct rte_eth_dev *dev)
1172 struct priv *priv = dev->data->dev_private;
1173 struct mlx5_rxq_ctrl *rxq_ctrl;
1176 LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {
1177 DEBUG("port %u Rx queue %u still referenced",
1178 dev->data->port_id, rxq_ctrl->idx);
1185 * Create an indirection table.
1188 * Pointer to Ethernet device.
1190 * Queues entering in the indirection table.
1192 * Number of queues in the array.
1195 * The Verbs object initialised, NULL otherwise and rte_errno is set.
1197 struct mlx5_ind_table_ibv *
1198 mlx5_ind_table_ibv_new(struct rte_eth_dev *dev, uint16_t queues[],
1201 struct priv *priv = dev->data->dev_private;
1202 struct mlx5_ind_table_ibv *ind_tbl;
1203 const unsigned int wq_n = rte_is_power_of_2(queues_n) ?
1204 log2above(queues_n) :
1205 log2above(priv->config.ind_table_max_size);
1206 struct ibv_wq *wq[1 << wq_n];
1210 ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl) +
1211 queues_n * sizeof(uint16_t), 0);
1216 for (i = 0; i != queues_n; ++i) {
1217 struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev, queues[i]);
1221 wq[i] = rxq->ibv->wq;
1222 ind_tbl->queues[i] = queues[i];
1224 ind_tbl->queues_n = queues_n;
1225 /* Finalise indirection table. */
1226 for (j = 0; i != (unsigned int)(1 << wq_n); ++i, ++j)
1228 ind_tbl->ind_table = mlx5_glue->create_rwq_ind_table
1230 &(struct ibv_rwq_ind_table_init_attr){
1231 .log_ind_tbl_size = wq_n,
1235 if (!ind_tbl->ind_table) {
1239 rte_atomic32_inc(&ind_tbl->refcnt);
1240 LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
1241 DEBUG("port %u indirection table %p: refcnt %d", dev->data->port_id,
1242 (void *)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt));
1246 DEBUG("port %u cannot create indirection table", dev->data->port_id);
1251 * Get an indirection table.
1254 * Pointer to Ethernet device.
1256 * Queues entering in the indirection table.
1258 * Number of queues in the array.
1261 * An indirection table if found.
1263 struct mlx5_ind_table_ibv *
1264 mlx5_ind_table_ibv_get(struct rte_eth_dev *dev, uint16_t queues[],
1267 struct priv *priv = dev->data->dev_private;
1268 struct mlx5_ind_table_ibv *ind_tbl;
1270 LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
1271 if ((ind_tbl->queues_n == queues_n) &&
1272 (memcmp(ind_tbl->queues, queues,
1273 ind_tbl->queues_n * sizeof(ind_tbl->queues[0]))
1280 rte_atomic32_inc(&ind_tbl->refcnt);
1281 DEBUG("port %u indirection table %p: refcnt %d",
1282 dev->data->port_id, (void *)ind_tbl,
1283 rte_atomic32_read(&ind_tbl->refcnt));
1284 for (i = 0; i != ind_tbl->queues_n; ++i)
1285 mlx5_rxq_get(dev, ind_tbl->queues[i]);
1291 * Release an indirection table.
1294 * Pointer to Ethernet device.
1296 * Indirection table to release.
1299 * 1 while a reference on it exists, 0 when freed.
1302 mlx5_ind_table_ibv_release(struct rte_eth_dev *dev,
1303 struct mlx5_ind_table_ibv *ind_tbl)
1307 DEBUG("port %u indirection table %p: refcnt %d",
1308 ((struct priv *)dev->data->dev_private)->port,
1309 (void *)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt));
1310 if (rte_atomic32_dec_and_test(&ind_tbl->refcnt))
1311 claim_zero(mlx5_glue->destroy_rwq_ind_table
1312 (ind_tbl->ind_table));
1313 for (i = 0; i != ind_tbl->queues_n; ++i)
1314 claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i]));
1315 if (!rte_atomic32_read(&ind_tbl->refcnt)) {
1316 LIST_REMOVE(ind_tbl, next);
1324 * Verify the Rx Queue list is empty
1327 * Pointer to Ethernet device.
1330 * The number of object not released.
1333 mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev)
1335 struct priv *priv = dev->data->dev_private;
1336 struct mlx5_ind_table_ibv *ind_tbl;
1339 LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
1340 DEBUG("port %u Verbs indirection table %p still referenced",
1341 dev->data->port_id, (void *)ind_tbl);
1348 * Create an Rx Hash queue.
1351 * Pointer to Ethernet device.
1353 * RSS key for the Rx hash queue.
1354 * @param rss_key_len
1356 * @param hash_fields
1357 * Verbs protocol hash field to make the RSS on.
1359 * Queues entering in hash queue. In case of empty hash_fields only the
1360 * first queue index will be taken for the indirection table.
1365 * The Verbs object initialised, NULL otherwise and rte_errno is set.
1368 mlx5_hrxq_new(struct rte_eth_dev *dev, uint8_t *rss_key, uint8_t rss_key_len,
1369 uint64_t hash_fields, uint16_t queues[], uint16_t queues_n)
1371 struct priv *priv = dev->data->dev_private;
1372 struct mlx5_hrxq *hrxq;
1373 struct mlx5_ind_table_ibv *ind_tbl;
1377 queues_n = hash_fields ? queues_n : 1;
1378 ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n);
1380 ind_tbl = mlx5_ind_table_ibv_new(dev, queues, queues_n);
1385 qp = mlx5_glue->create_qp_ex
1387 &(struct ibv_qp_init_attr_ex){
1388 .qp_type = IBV_QPT_RAW_PACKET,
1390 IBV_QP_INIT_ATTR_PD |
1391 IBV_QP_INIT_ATTR_IND_TABLE |
1392 IBV_QP_INIT_ATTR_RX_HASH,
1393 .rx_hash_conf = (struct ibv_rx_hash_conf){
1394 .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,
1395 .rx_hash_key_len = rss_key_len,
1396 .rx_hash_key = rss_key,
1397 .rx_hash_fields_mask = hash_fields,
1399 .rwq_ind_tbl = ind_tbl->ind_table,
1406 hrxq = rte_calloc(__func__, 1, sizeof(*hrxq) + rss_key_len, 0);
1409 hrxq->ind_table = ind_tbl;
1411 hrxq->rss_key_len = rss_key_len;
1412 hrxq->hash_fields = hash_fields;
1413 memcpy(hrxq->rss_key, rss_key, rss_key_len);
1414 rte_atomic32_inc(&hrxq->refcnt);
1415 LIST_INSERT_HEAD(&priv->hrxqs, hrxq, next);
1416 DEBUG("port %u hash Rx queue %p: refcnt %d", dev->data->port_id,
1417 (void *)hrxq, rte_atomic32_read(&hrxq->refcnt));
1420 err = rte_errno; /* Save rte_errno before cleanup. */
1421 mlx5_ind_table_ibv_release(dev, ind_tbl);
1423 claim_zero(mlx5_glue->destroy_qp(qp));
1424 rte_errno = err; /* Restore rte_errno. */
1429 * Get an Rx Hash queue.
1432 * Pointer to Ethernet device.
1434 * RSS configuration for the Rx hash queue.
1436 * Queues entering in hash queue. In case of empty hash_fields only the
1437 * first queue index will be taken for the indirection table.
1442 * An hash Rx queue on success.
1445 mlx5_hrxq_get(struct rte_eth_dev *dev, uint8_t *rss_key, uint8_t rss_key_len,
1446 uint64_t hash_fields, uint16_t queues[], uint16_t queues_n)
1448 struct priv *priv = dev->data->dev_private;
1449 struct mlx5_hrxq *hrxq;
1451 queues_n = hash_fields ? queues_n : 1;
1452 LIST_FOREACH(hrxq, &priv->hrxqs, next) {
1453 struct mlx5_ind_table_ibv *ind_tbl;
1455 if (hrxq->rss_key_len != rss_key_len)
1457 if (memcmp(hrxq->rss_key, rss_key, rss_key_len))
1459 if (hrxq->hash_fields != hash_fields)
1461 ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n);
1464 if (ind_tbl != hrxq->ind_table) {
1465 mlx5_ind_table_ibv_release(dev, ind_tbl);
1468 rte_atomic32_inc(&hrxq->refcnt);
1469 DEBUG("port %u hash Rx queue %p: refcnt %d", dev->data->port_id,
1470 (void *)hrxq, rte_atomic32_read(&hrxq->refcnt));
1477 * Release the hash Rx queue.
1480 * Pointer to Ethernet device.
1482 * Pointer to Hash Rx queue to release.
1485 * 1 while a reference on it exists, 0 when freed.
1488 mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
1490 DEBUG("port %u hash Rx queue %p: refcnt %d",
1491 ((struct priv *)dev->data->dev_private)->port,
1492 (void *)hrxq, rte_atomic32_read(&hrxq->refcnt));
1493 if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
1494 claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
1495 mlx5_ind_table_ibv_release(dev, hrxq->ind_table);
1496 LIST_REMOVE(hrxq, next);
1500 claim_nonzero(mlx5_ind_table_ibv_release(dev, hrxq->ind_table));
1505 * Verify the Rx Queue list is empty
1508 * Pointer to Ethernet device.
1511 * The number of object not released.
1514 mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev)
1516 struct priv *priv = dev->data->dev_private;
1517 struct mlx5_hrxq *hrxq;
1520 LIST_FOREACH(hrxq, &priv->hrxqs, next) {
1521 DEBUG("port %u Verbs hash Rx queue %p still referenced",
1522 dev->data->port_id, (void *)hrxq);