1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
12 #include <sys/queue.h>
15 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
17 #pragma GCC diagnostic ignored "-Wpedantic"
19 #include <infiniband/verbs.h>
20 #include <infiniband/mlx5dv.h>
22 #pragma GCC diagnostic error "-Wpedantic"
26 #include <rte_malloc.h>
27 #include <rte_ethdev_driver.h>
28 #include <rte_common.h>
29 #include <rte_interrupts.h>
30 #include <rte_debug.h>
34 #include "mlx5_rxtx.h"
35 #include "mlx5_utils.h"
36 #include "mlx5_autoconf.h"
37 #include "mlx5_defs.h"
38 #include "mlx5_glue.h"
40 /* Default RSS hash key also used for ConnectX-3. */
41 uint8_t rss_hash_default_key[] = {
42 0x2c, 0xc6, 0x81, 0xd1,
43 0x5b, 0xdb, 0xf4, 0xf7,
44 0xfc, 0xa2, 0x83, 0x19,
45 0xdb, 0x1a, 0x3e, 0x94,
46 0x6b, 0x9e, 0x38, 0xd9,
47 0x2c, 0x9c, 0x03, 0xd1,
48 0xad, 0x99, 0x44, 0xa7,
49 0xd9, 0x56, 0x3d, 0x59,
50 0x06, 0x3c, 0x25, 0xf3,
51 0xfc, 0x1f, 0xdc, 0x2a,
54 /* Length of the default RSS hash key. */
55 const size_t rss_hash_default_key_len = sizeof(rss_hash_default_key);
58 * Allocate RX queue elements.
61 * Pointer to RX queue structure.
64 * 0 on success, a negative errno value otherwise and rte_errno is set.
67 rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
69 const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n;
70 unsigned int elts_n = 1 << rxq_ctrl->rxq.elts_n;
74 /* Iterate on segments. */
75 for (i = 0; (i != elts_n); ++i) {
78 buf = rte_pktmbuf_alloc(rxq_ctrl->rxq.mp);
80 DRV_LOG(ERR, "port %u empty mbuf pool",
81 PORT_ID(rxq_ctrl->priv));
85 /* Headroom is reserved by rte_pktmbuf_alloc(). */
86 assert(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM);
87 /* Buffer is supposed to be empty. */
88 assert(rte_pktmbuf_data_len(buf) == 0);
89 assert(rte_pktmbuf_pkt_len(buf) == 0);
91 /* Only the first segment keeps headroom. */
94 PORT(buf) = rxq_ctrl->rxq.port_id;
95 DATA_LEN(buf) = rte_pktmbuf_tailroom(buf);
96 PKT_LEN(buf) = DATA_LEN(buf);
98 (*rxq_ctrl->rxq.elts)[i] = buf;
100 /* If Rx vector is activated. */
101 if (mlx5_rxq_check_vec_support(&rxq_ctrl->rxq) > 0) {
102 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
103 struct rte_mbuf *mbuf_init = &rxq->fake_mbuf;
106 /* Initialize default rearm_data for vPMD. */
107 mbuf_init->data_off = RTE_PKTMBUF_HEADROOM;
108 rte_mbuf_refcnt_set(mbuf_init, 1);
109 mbuf_init->nb_segs = 1;
110 mbuf_init->port = rxq->port_id;
112 * prevent compiler reordering:
113 * rearm_data covers previous fields.
115 rte_compiler_barrier();
116 rxq->mbuf_initializer =
117 *(uint64_t *)&mbuf_init->rearm_data;
118 /* Padding with a fake mbuf for vectorized Rx. */
119 for (j = 0; j < MLX5_VPMD_DESCS_PER_LOOP; ++j)
120 (*rxq->elts)[elts_n + j] = &rxq->fake_mbuf;
123 "port %u Rx queue %u allocated and configured %u segments"
125 PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx, elts_n,
126 elts_n / (1 << rxq_ctrl->rxq.sges_n));
129 err = rte_errno; /* Save rte_errno before cleanup. */
131 for (i = 0; (i != elts_n); ++i) {
132 if ((*rxq_ctrl->rxq.elts)[i] != NULL)
133 rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]);
134 (*rxq_ctrl->rxq.elts)[i] = NULL;
136 DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything",
137 PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx);
138 rte_errno = err; /* Restore rte_errno. */
143 * Free RX queue elements.
146 * Pointer to RX queue structure.
149 rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
151 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
152 const uint16_t q_n = (1 << rxq->elts_n);
153 const uint16_t q_mask = q_n - 1;
154 uint16_t used = q_n - (rxq->rq_ci - rxq->rq_pi);
157 DRV_LOG(DEBUG, "port %u Rx queue %u freeing WRs",
158 PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx);
159 if (rxq->elts == NULL)
162 * Some mbuf in the Ring belongs to the application. They cannot be
165 if (mlx5_rxq_check_vec_support(rxq) > 0) {
166 for (i = 0; i < used; ++i)
167 (*rxq->elts)[(rxq->rq_ci + i) & q_mask] = NULL;
168 rxq->rq_pi = rxq->rq_ci;
170 for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
171 if ((*rxq->elts)[i] != NULL)
172 rte_pktmbuf_free_seg((*rxq->elts)[i]);
173 (*rxq->elts)[i] = NULL;
178 * Clean up a RX queue.
180 * Destroy objects, free allocated memory and reset the structure for reuse.
183 * Pointer to RX queue structure.
186 mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *rxq_ctrl)
188 DRV_LOG(DEBUG, "port %u cleaning up Rx queue %u",
189 PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx);
191 mlx5_rxq_ibv_release(rxq_ctrl->ibv);
192 memset(rxq_ctrl, 0, sizeof(*rxq_ctrl));
196 * Returns the per-queue supported offloads.
199 * Pointer to Ethernet device.
202 * Supported Rx offloads.
205 mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev)
207 struct priv *priv = dev->data->dev_private;
208 struct mlx5_dev_config *config = &priv->config;
209 uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER |
210 DEV_RX_OFFLOAD_TIMESTAMP |
211 DEV_RX_OFFLOAD_JUMBO_FRAME);
213 if (config->hw_fcs_strip)
214 offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
216 offloads |= (DEV_RX_OFFLOAD_IPV4_CKSUM |
217 DEV_RX_OFFLOAD_UDP_CKSUM |
218 DEV_RX_OFFLOAD_TCP_CKSUM);
219 if (config->hw_vlan_strip)
220 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
226 * Returns the per-port supported offloads.
229 * Supported Rx offloads.
232 mlx5_get_rx_port_offloads(void)
234 uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
242 * Pointer to Ethernet device structure.
246 * Number of descriptors to configure in queue.
248 * NUMA socket on which memory must be allocated.
250 * Thresholds parameters.
252 * Memory pool for buffer allocations.
255 * 0 on success, a negative errno value otherwise and rte_errno is set.
258 mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
259 unsigned int socket, const struct rte_eth_rxconf *conf,
260 struct rte_mempool *mp)
262 struct priv *priv = dev->data->dev_private;
263 struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
264 struct mlx5_rxq_ctrl *rxq_ctrl =
265 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
267 if (!rte_is_power_of_2(desc)) {
268 desc = 1 << log2above(desc);
270 "port %u increased number of descriptors in Rx queue %u"
271 " to the next power of two (%d)",
272 dev->data->port_id, idx, desc);
274 DRV_LOG(DEBUG, "port %u configuring Rx queue %u for %u descriptors",
275 dev->data->port_id, idx, desc);
276 if (idx >= priv->rxqs_n) {
277 DRV_LOG(ERR, "port %u Rx queue index out of range (%u >= %u)",
278 dev->data->port_id, idx, priv->rxqs_n);
279 rte_errno = EOVERFLOW;
282 if (!mlx5_rxq_releasable(dev, idx)) {
283 DRV_LOG(ERR, "port %u unable to release queue index %u",
284 dev->data->port_id, idx);
288 mlx5_rxq_release(dev, idx);
289 rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, mp);
291 DRV_LOG(ERR, "port %u unable to allocate queue index %u",
292 dev->data->port_id, idx);
296 DRV_LOG(DEBUG, "port %u adding Rx queue %u to list",
297 dev->data->port_id, idx);
298 (*priv->rxqs)[idx] = &rxq_ctrl->rxq;
303 * DPDK callback to release a RX queue.
306 * Generic RX queue pointer.
309 mlx5_rx_queue_release(void *dpdk_rxq)
311 struct mlx5_rxq_data *rxq = (struct mlx5_rxq_data *)dpdk_rxq;
312 struct mlx5_rxq_ctrl *rxq_ctrl;
317 rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);
318 priv = rxq_ctrl->priv;
319 if (!mlx5_rxq_releasable(ETH_DEV(priv), rxq_ctrl->rxq.stats.idx))
320 rte_panic("port %u Rx queue %u is still used by a flow and"
321 " cannot be removed\n",
322 PORT_ID(priv), rxq_ctrl->idx);
323 mlx5_rxq_release(ETH_DEV(priv), rxq_ctrl->rxq.stats.idx);
327 * Allocate queue vector and fill epoll fd list for Rx interrupts.
330 * Pointer to Ethernet device.
333 * 0 on success, a negative errno value otherwise and rte_errno is set.
336 mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
338 struct priv *priv = dev->data->dev_private;
340 unsigned int rxqs_n = priv->rxqs_n;
341 unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
342 unsigned int count = 0;
343 struct rte_intr_handle *intr_handle = dev->intr_handle;
345 if (!dev->data->dev_conf.intr_conf.rxq)
347 mlx5_rx_intr_vec_disable(dev);
348 intr_handle->intr_vec = malloc(n * sizeof(intr_handle->intr_vec[0]));
349 if (intr_handle->intr_vec == NULL) {
351 "port %u failed to allocate memory for interrupt"
352 " vector, Rx interrupts will not be supported",
357 intr_handle->type = RTE_INTR_HANDLE_EXT;
358 for (i = 0; i != n; ++i) {
359 /* This rxq ibv must not be released in this function. */
360 struct mlx5_rxq_ibv *rxq_ibv = mlx5_rxq_ibv_get(dev, i);
365 /* Skip queues that cannot request interrupts. */
366 if (!rxq_ibv || !rxq_ibv->channel) {
367 /* Use invalid intr_vec[] index to disable entry. */
368 intr_handle->intr_vec[i] =
369 RTE_INTR_VEC_RXTX_OFFSET +
370 RTE_MAX_RXTX_INTR_VEC_ID;
373 if (count >= RTE_MAX_RXTX_INTR_VEC_ID) {
375 "port %u too many Rx queues for interrupt"
376 " vector size (%d), Rx interrupts cannot be"
378 dev->data->port_id, RTE_MAX_RXTX_INTR_VEC_ID);
379 mlx5_rx_intr_vec_disable(dev);
383 fd = rxq_ibv->channel->fd;
384 flags = fcntl(fd, F_GETFL);
385 rc = fcntl(fd, F_SETFL, flags | O_NONBLOCK);
389 "port %u failed to make Rx interrupt file"
390 " descriptor %d non-blocking for queue index"
392 dev->data->port_id, fd, i);
393 mlx5_rx_intr_vec_disable(dev);
396 intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count;
397 intr_handle->efds[count] = fd;
401 mlx5_rx_intr_vec_disable(dev);
403 intr_handle->nb_efd = count;
408 * Clean up Rx interrupts handler.
411 * Pointer to Ethernet device.
414 mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev)
416 struct priv *priv = dev->data->dev_private;
417 struct rte_intr_handle *intr_handle = dev->intr_handle;
419 unsigned int rxqs_n = priv->rxqs_n;
420 unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
422 if (!dev->data->dev_conf.intr_conf.rxq)
424 if (!intr_handle->intr_vec)
426 for (i = 0; i != n; ++i) {
427 struct mlx5_rxq_ctrl *rxq_ctrl;
428 struct mlx5_rxq_data *rxq_data;
430 if (intr_handle->intr_vec[i] == RTE_INTR_VEC_RXTX_OFFSET +
431 RTE_MAX_RXTX_INTR_VEC_ID)
434 * Need to access directly the queue to release the reference
435 * kept in priv_rx_intr_vec_enable().
437 rxq_data = (*priv->rxqs)[i];
438 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
439 mlx5_rxq_ibv_release(rxq_ctrl->ibv);
442 rte_intr_free_epoll_fd(intr_handle);
443 if (intr_handle->intr_vec)
444 free(intr_handle->intr_vec);
445 intr_handle->nb_efd = 0;
446 intr_handle->intr_vec = NULL;
450 * MLX5 CQ notification .
453 * Pointer to receive queue structure.
455 * Sequence number per receive queue .
458 mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq)
461 uint32_t doorbell_hi;
463 void *cq_db_reg = (char *)rxq->cq_uar + MLX5_CQ_DOORBELL;
465 sq_n = sq_n_rxq & MLX5_CQ_SQN_MASK;
466 doorbell_hi = sq_n << MLX5_CQ_SQN_OFFSET | (rxq->cq_ci & MLX5_CI_MASK);
467 doorbell = (uint64_t)doorbell_hi << 32;
468 doorbell |= rxq->cqn;
469 rxq->cq_db[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi);
470 rte_write64(rte_cpu_to_be_64(doorbell), cq_db_reg);
474 * DPDK callback for Rx queue interrupt enable.
477 * Pointer to Ethernet device structure.
482 * 0 on success, a negative errno value otherwise and rte_errno is set.
485 mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
487 struct priv *priv = dev->data->dev_private;
488 struct mlx5_rxq_data *rxq_data;
489 struct mlx5_rxq_ctrl *rxq_ctrl;
491 rxq_data = (*priv->rxqs)[rx_queue_id];
496 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
498 struct mlx5_rxq_ibv *rxq_ibv;
500 rxq_ibv = mlx5_rxq_ibv_get(dev, rx_queue_id);
505 mlx5_arm_cq(rxq_data, rxq_data->cq_arm_sn);
506 mlx5_rxq_ibv_release(rxq_ibv);
512 * DPDK callback for Rx queue interrupt disable.
515 * Pointer to Ethernet device structure.
520 * 0 on success, a negative errno value otherwise and rte_errno is set.
523 mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
525 struct priv *priv = dev->data->dev_private;
526 struct mlx5_rxq_data *rxq_data;
527 struct mlx5_rxq_ctrl *rxq_ctrl;
528 struct mlx5_rxq_ibv *rxq_ibv = NULL;
529 struct ibv_cq *ev_cq;
533 rxq_data = (*priv->rxqs)[rx_queue_id];
538 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
541 rxq_ibv = mlx5_rxq_ibv_get(dev, rx_queue_id);
546 ret = mlx5_glue->get_cq_event(rxq_ibv->channel, &ev_cq, &ev_ctx);
547 if (ret || ev_cq != rxq_ibv->cq) {
551 rxq_data->cq_arm_sn++;
552 mlx5_glue->ack_cq_events(rxq_ibv->cq, 1);
555 ret = rte_errno; /* Save rte_errno before cleanup. */
557 mlx5_rxq_ibv_release(rxq_ibv);
558 DRV_LOG(WARNING, "port %u unable to disable interrupt on Rx queue %d",
559 dev->data->port_id, rx_queue_id);
560 rte_errno = ret; /* Restore rte_errno. */
565 * Create the Rx queue Verbs object.
568 * Pointer to Ethernet device.
570 * Queue index in DPDK Rx queue array
573 * The Verbs object initialised, NULL otherwise and rte_errno is set.
575 struct mlx5_rxq_ibv *
576 mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
578 struct priv *priv = dev->data->dev_private;
579 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
580 struct mlx5_rxq_ctrl *rxq_ctrl =
581 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
582 struct ibv_wq_attr mod;
585 struct ibv_cq_init_attr_ex ibv;
586 struct mlx5dv_cq_init_attr mlx5;
588 struct ibv_wq_init_attr wq;
589 struct ibv_cq_ex cq_attr;
591 unsigned int cqe_n = (1 << rxq_data->elts_n) - 1;
592 struct mlx5_rxq_ibv *tmpl;
593 struct mlx5dv_cq cq_info;
594 struct mlx5dv_rwq rwq;
597 struct mlx5dv_obj obj;
598 struct mlx5_dev_config *config = &priv->config;
601 assert(!rxq_ctrl->ibv);
602 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_RX_QUEUE;
603 priv->verbs_alloc_ctx.obj = rxq_ctrl;
604 tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0,
608 "port %u Rx queue %u cannot allocate verbs resources",
609 dev->data->port_id, rxq_ctrl->idx);
613 tmpl->rxq_ctrl = rxq_ctrl;
615 tmpl->channel = mlx5_glue->create_comp_channel(priv->ctx);
616 if (!tmpl->channel) {
617 DRV_LOG(ERR, "port %u: comp channel creation failure",
623 attr.cq.ibv = (struct ibv_cq_init_attr_ex){
625 .channel = tmpl->channel,
628 attr.cq.mlx5 = (struct mlx5dv_cq_init_attr){
631 if (config->cqe_comp && !rxq_data->hw_timestamp) {
632 attr.cq.mlx5.comp_mask |=
633 MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
634 attr.cq.mlx5.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH;
636 * For vectorized Rx, it must not be doubled in order to
637 * make cq_ci and rq_ci aligned.
639 if (mlx5_rxq_check_vec_support(rxq_data) < 0)
640 attr.cq.ibv.cqe *= 2;
641 } else if (config->cqe_comp && rxq_data->hw_timestamp) {
643 "port %u Rx CQE compression is disabled for HW"
647 tmpl->cq = mlx5_glue->cq_ex_to_cq
648 (mlx5_glue->dv_create_cq(priv->ctx, &attr.cq.ibv,
650 if (tmpl->cq == NULL) {
651 DRV_LOG(ERR, "port %u Rx queue %u CQ creation failure",
652 dev->data->port_id, idx);
656 DRV_LOG(DEBUG, "port %u priv->device_attr.max_qp_wr is %d",
657 dev->data->port_id, priv->device_attr.orig_attr.max_qp_wr);
658 DRV_LOG(DEBUG, "port %u priv->device_attr.max_sge is %d",
659 dev->data->port_id, priv->device_attr.orig_attr.max_sge);
660 attr.wq = (struct ibv_wq_init_attr){
661 .wq_context = NULL, /* Could be useful in the future. */
662 .wq_type = IBV_WQT_RQ,
663 /* Max number of outstanding WRs. */
664 .max_wr = (1 << rxq_data->elts_n) >> rxq_data->sges_n,
665 /* Max number of scatter/gather elements in a WR. */
666 .max_sge = 1 << rxq_data->sges_n,
670 IBV_WQ_FLAGS_CVLAN_STRIPPING |
672 .create_flags = (rxq_data->vlan_strip ?
673 IBV_WQ_FLAGS_CVLAN_STRIPPING :
676 /* By default, FCS (CRC) is stripped by hardware. */
677 if (rxq_data->crc_present) {
678 attr.wq.create_flags |= IBV_WQ_FLAGS_SCATTER_FCS;
679 attr.wq.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
681 #ifdef HAVE_IBV_WQ_FLAG_RX_END_PADDING
682 if (config->hw_padding) {
683 attr.wq.create_flags |= IBV_WQ_FLAG_RX_END_PADDING;
684 attr.wq.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
687 tmpl->wq = mlx5_glue->create_wq(priv->ctx, &attr.wq);
688 if (tmpl->wq == NULL) {
689 DRV_LOG(ERR, "port %u Rx queue %u WQ creation failure",
690 dev->data->port_id, idx);
695 * Make sure number of WRs*SGEs match expectations since a queue
696 * cannot allocate more than "desc" buffers.
698 if (((int)attr.wq.max_wr !=
699 ((1 << rxq_data->elts_n) >> rxq_data->sges_n)) ||
700 ((int)attr.wq.max_sge != (1 << rxq_data->sges_n))) {
702 "port %u Rx queue %u requested %u*%u but got %u*%u"
704 dev->data->port_id, idx,
705 ((1 << rxq_data->elts_n) >> rxq_data->sges_n),
706 (1 << rxq_data->sges_n),
707 attr.wq.max_wr, attr.wq.max_sge);
711 /* Change queue state to ready. */
712 mod = (struct ibv_wq_attr){
713 .attr_mask = IBV_WQ_ATTR_STATE,
714 .wq_state = IBV_WQS_RDY,
716 ret = mlx5_glue->modify_wq(tmpl->wq, &mod);
719 "port %u Rx queue %u WQ state to IBV_WQS_RDY failed",
720 dev->data->port_id, idx);
724 obj.cq.in = tmpl->cq;
725 obj.cq.out = &cq_info;
726 obj.rwq.in = tmpl->wq;
728 ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_RWQ);
733 if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
735 "port %u wrong MLX5_CQE_SIZE environment variable"
736 " value: it should be set to %u",
737 dev->data->port_id, RTE_CACHE_LINE_SIZE);
741 /* Fill the rings. */
742 rxq_data->wqes = (volatile struct mlx5_wqe_data_seg (*)[])
744 for (i = 0; (i != (unsigned int)(1 << rxq_data->elts_n)); ++i) {
745 struct rte_mbuf *buf = (*rxq_data->elts)[i];
746 volatile struct mlx5_wqe_data_seg *scat = &(*rxq_data->wqes)[i];
748 /* scat->addr must be able to store a pointer. */
749 assert(sizeof(scat->addr) >= sizeof(uintptr_t));
750 *scat = (struct mlx5_wqe_data_seg){
751 .addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf,
753 .byte_count = rte_cpu_to_be_32(DATA_LEN(buf)),
757 rxq_data->rq_db = rwq.dbrec;
758 rxq_data->cqe_n = log2above(cq_info.cqe_cnt);
762 rxq_data->zip = (struct rxq_zip){
765 rxq_data->cq_db = cq_info.dbrec;
766 rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)cq_info.buf;
767 rxq_data->cq_uar = cq_info.cq_uar;
768 rxq_data->cqn = cq_info.cqn;
769 rxq_data->cq_arm_sn = 0;
770 /* Update doorbell counter. */
771 rxq_data->rq_ci = (1 << rxq_data->elts_n) >> rxq_data->sges_n;
773 *rxq_data->rq_db = rte_cpu_to_be_32(rxq_data->rq_ci);
774 DRV_LOG(DEBUG, "port %u rxq %u updated with %p", dev->data->port_id,
776 rte_atomic32_inc(&tmpl->refcnt);
777 DRV_LOG(DEBUG, "port %u Verbs Rx queue %u: refcnt %d",
778 dev->data->port_id, idx, rte_atomic32_read(&tmpl->refcnt));
779 LIST_INSERT_HEAD(&priv->rxqsibv, tmpl, next);
780 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
783 ret = rte_errno; /* Save rte_errno before cleanup. */
785 claim_zero(mlx5_glue->destroy_wq(tmpl->wq));
787 claim_zero(mlx5_glue->destroy_cq(tmpl->cq));
789 claim_zero(mlx5_glue->destroy_comp_channel(tmpl->channel));
790 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
791 rte_errno = ret; /* Restore rte_errno. */
796 * Get an Rx queue Verbs object.
799 * Pointer to Ethernet device.
801 * Queue index in DPDK Rx queue array
804 * The Verbs object if it exists.
806 struct mlx5_rxq_ibv *
807 mlx5_rxq_ibv_get(struct rte_eth_dev *dev, uint16_t idx)
809 struct priv *priv = dev->data->dev_private;
810 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
811 struct mlx5_rxq_ctrl *rxq_ctrl;
813 if (idx >= priv->rxqs_n)
817 rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
819 rte_atomic32_inc(&rxq_ctrl->ibv->refcnt);
820 DRV_LOG(DEBUG, "port %u Verbs Rx queue %u: refcnt %d",
821 dev->data->port_id, rxq_ctrl->idx,
822 rte_atomic32_read(&rxq_ctrl->ibv->refcnt));
824 return rxq_ctrl->ibv;
828 * Release an Rx verbs queue object.
831 * Verbs Rx queue object.
834 * 1 while a reference on it exists, 0 when freed.
837 mlx5_rxq_ibv_release(struct mlx5_rxq_ibv *rxq_ibv)
842 DRV_LOG(DEBUG, "port %u Verbs Rx queue %u: refcnt %d",
843 PORT_ID(rxq_ibv->rxq_ctrl->priv),
844 rxq_ibv->rxq_ctrl->idx, rte_atomic32_read(&rxq_ibv->refcnt));
845 if (rte_atomic32_dec_and_test(&rxq_ibv->refcnt)) {
846 rxq_free_elts(rxq_ibv->rxq_ctrl);
847 claim_zero(mlx5_glue->destroy_wq(rxq_ibv->wq));
848 claim_zero(mlx5_glue->destroy_cq(rxq_ibv->cq));
849 if (rxq_ibv->channel)
850 claim_zero(mlx5_glue->destroy_comp_channel
852 LIST_REMOVE(rxq_ibv, next);
860 * Verify the Verbs Rx queue list is empty
863 * Pointer to Ethernet device.
866 * The number of object not released.
869 mlx5_rxq_ibv_verify(struct rte_eth_dev *dev)
871 struct priv *priv = dev->data->dev_private;
873 struct mlx5_rxq_ibv *rxq_ibv;
875 LIST_FOREACH(rxq_ibv, &priv->rxqsibv, next) {
876 DRV_LOG(DEBUG, "port %u Verbs Rx queue %u still referenced",
877 dev->data->port_id, rxq_ibv->rxq_ctrl->idx);
884 * Return true if a single reference exists on the object.
887 * Verbs Rx queue object.
890 mlx5_rxq_ibv_releasable(struct mlx5_rxq_ibv *rxq_ibv)
893 return (rte_atomic32_read(&rxq_ibv->refcnt) == 1);
897 * Create a DPDK Rx queue.
900 * Pointer to Ethernet device.
904 * Number of descriptors to configure in queue.
906 * NUMA socket on which memory must be allocated.
909 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
911 struct mlx5_rxq_ctrl *
912 mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
913 unsigned int socket, const struct rte_eth_rxconf *conf,
914 struct rte_mempool *mp)
916 struct priv *priv = dev->data->dev_private;
917 struct mlx5_rxq_ctrl *tmpl;
918 unsigned int mb_len = rte_pktmbuf_data_room_size(mp);
919 struct mlx5_dev_config *config = &priv->config;
921 * Always allocate extra slots, even if eventually
922 * the vector Rx will not be used.
924 const uint16_t desc_n =
925 desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
926 uint64_t offloads = conf->offloads |
927 dev->data->dev_conf.rxmode.offloads;
929 tmpl = rte_calloc_socket("RXQ", 1,
931 desc_n * sizeof(struct rte_mbuf *),
937 tmpl->socket = socket;
938 if (dev->data->dev_conf.intr_conf.rxq)
940 /* Enable scattered packets support for this queue if necessary. */
941 assert(mb_len >= RTE_PKTMBUF_HEADROOM);
942 if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
943 (mb_len - RTE_PKTMBUF_HEADROOM)) {
944 tmpl->rxq.sges_n = 0;
945 } else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
947 RTE_PKTMBUF_HEADROOM +
948 dev->data->dev_conf.rxmode.max_rx_pkt_len;
952 * Determine the number of SGEs needed for a full packet
953 * and round it to the next power of two.
955 sges_n = log2above((size / mb_len) + !!(size % mb_len));
956 tmpl->rxq.sges_n = sges_n;
957 /* Make sure rxq.sges_n did not overflow. */
958 size = mb_len * (1 << tmpl->rxq.sges_n);
959 size -= RTE_PKTMBUF_HEADROOM;
960 if (size < dev->data->dev_conf.rxmode.max_rx_pkt_len) {
962 "port %u too many SGEs (%u) needed to handle"
963 " requested maximum packet size %u",
966 dev->data->dev_conf.rxmode.max_rx_pkt_len);
967 rte_errno = EOVERFLOW;
972 "port %u the requested maximum Rx packet size (%u) is"
973 " larger than a single mbuf (%u) and scattered mode has"
974 " not been requested",
976 dev->data->dev_conf.rxmode.max_rx_pkt_len,
977 mb_len - RTE_PKTMBUF_HEADROOM);
979 DRV_LOG(DEBUG, "port %u maximum number of segments per packet: %u",
980 dev->data->port_id, 1 << tmpl->rxq.sges_n);
981 if (desc % (1 << tmpl->rxq.sges_n)) {
983 "port %u number of Rx queue descriptors (%u) is not a"
984 " multiple of SGEs per packet (%u)",
987 1 << tmpl->rxq.sges_n);
991 /* Toggle RX checksum offload if hardware supports it. */
992 tmpl->rxq.csum = !!(offloads & DEV_RX_OFFLOAD_CHECKSUM);
993 tmpl->rxq.hw_timestamp = !!(offloads & DEV_RX_OFFLOAD_TIMESTAMP);
994 /* Configure VLAN stripping. */
995 tmpl->rxq.vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
996 /* By default, FCS (CRC) is stripped by hardware. */
997 if (offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
998 tmpl->rxq.crc_present = 0;
999 } else if (config->hw_fcs_strip) {
1000 tmpl->rxq.crc_present = 1;
1003 "port %u CRC stripping has been disabled but will"
1004 " still be performed by hardware, make sure MLNX_OFED"
1005 " and firmware are up to date",
1006 dev->data->port_id);
1007 tmpl->rxq.crc_present = 0;
1010 "port %u CRC stripping is %s, %u bytes will be subtracted from"
1011 " incoming frames to hide it",
1013 tmpl->rxq.crc_present ? "disabled" : "enabled",
1014 tmpl->rxq.crc_present << 2);
1016 tmpl->rxq.rss_hash = !!priv->rss_conf.rss_hf &&
1017 (!!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS));
1018 tmpl->rxq.port_id = dev->data->port_id;
1021 tmpl->rxq.stats.idx = idx;
1022 tmpl->rxq.elts_n = log2above(desc);
1024 (struct rte_mbuf *(*)[1 << tmpl->rxq.elts_n])(tmpl + 1);
1026 rte_atomic32_inc(&tmpl->refcnt);
1027 DRV_LOG(DEBUG, "port %u Rx queue %u: refcnt %d", dev->data->port_id,
1028 idx, rte_atomic32_read(&tmpl->refcnt));
1029 LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
1040 * Pointer to Ethernet device.
1045 * A pointer to the queue if it exists, NULL otherwise.
1047 struct mlx5_rxq_ctrl *
1048 mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
1050 struct priv *priv = dev->data->dev_private;
1051 struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
1053 if ((*priv->rxqs)[idx]) {
1054 rxq_ctrl = container_of((*priv->rxqs)[idx],
1055 struct mlx5_rxq_ctrl,
1057 mlx5_rxq_ibv_get(dev, idx);
1058 rte_atomic32_inc(&rxq_ctrl->refcnt);
1059 DRV_LOG(DEBUG, "port %u Rx queue %u: refcnt %d",
1060 dev->data->port_id, rxq_ctrl->idx,
1061 rte_atomic32_read(&rxq_ctrl->refcnt));
1067 * Release a Rx queue.
1070 * Pointer to Ethernet device.
1075 * 1 while a reference on it exists, 0 when freed.
1078 mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
1080 struct priv *priv = dev->data->dev_private;
1081 struct mlx5_rxq_ctrl *rxq_ctrl;
1083 if (!(*priv->rxqs)[idx])
1085 rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
1086 assert(rxq_ctrl->priv);
1087 if (rxq_ctrl->ibv && !mlx5_rxq_ibv_release(rxq_ctrl->ibv))
1088 rxq_ctrl->ibv = NULL;
1089 DRV_LOG(DEBUG, "port %u Rx queue %u: refcnt %d", dev->data->port_id,
1090 rxq_ctrl->idx, rte_atomic32_read(&rxq_ctrl->refcnt));
1091 if (rte_atomic32_dec_and_test(&rxq_ctrl->refcnt)) {
1092 LIST_REMOVE(rxq_ctrl, next);
1094 (*priv->rxqs)[idx] = NULL;
1101 * Verify if the queue can be released.
1104 * Pointer to Ethernet device.
1109 * 1 if the queue can be released, negative errno otherwise and rte_errno is
1113 mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx)
1115 struct priv *priv = dev->data->dev_private;
1116 struct mlx5_rxq_ctrl *rxq_ctrl;
1118 if (!(*priv->rxqs)[idx]) {
1122 rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
1123 return (rte_atomic32_read(&rxq_ctrl->refcnt) == 1);
1127 * Verify the Rx Queue list is empty
1130 * Pointer to Ethernet device.
1133 * The number of object not released.
1136 mlx5_rxq_verify(struct rte_eth_dev *dev)
1138 struct priv *priv = dev->data->dev_private;
1139 struct mlx5_rxq_ctrl *rxq_ctrl;
1142 LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {
1143 DRV_LOG(DEBUG, "port %u Rx Queue %u still referenced",
1144 dev->data->port_id, rxq_ctrl->idx);
1151 * Create an indirection table.
1154 * Pointer to Ethernet device.
1156 * Queues entering in the indirection table.
1158 * Number of queues in the array.
1161 * The Verbs object initialised, NULL otherwise and rte_errno is set.
1163 struct mlx5_ind_table_ibv *
1164 mlx5_ind_table_ibv_new(struct rte_eth_dev *dev, const uint16_t *queues,
1167 struct priv *priv = dev->data->dev_private;
1168 struct mlx5_ind_table_ibv *ind_tbl;
1169 const unsigned int wq_n = rte_is_power_of_2(queues_n) ?
1170 log2above(queues_n) :
1171 log2above(priv->config.ind_table_max_size);
1172 struct ibv_wq *wq[1 << wq_n];
1176 ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl) +
1177 queues_n * sizeof(uint16_t), 0);
1182 for (i = 0; i != queues_n; ++i) {
1183 struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev, queues[i]);
1187 wq[i] = rxq->ibv->wq;
1188 ind_tbl->queues[i] = queues[i];
1190 ind_tbl->queues_n = queues_n;
1191 /* Finalise indirection table. */
1192 for (j = 0; i != (unsigned int)(1 << wq_n); ++i, ++j)
1194 ind_tbl->ind_table = mlx5_glue->create_rwq_ind_table
1196 &(struct ibv_rwq_ind_table_init_attr){
1197 .log_ind_tbl_size = wq_n,
1201 if (!ind_tbl->ind_table) {
1205 rte_atomic32_inc(&ind_tbl->refcnt);
1206 LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
1207 DEBUG("port %u new indirection table %p: queues:%u refcnt:%d",
1208 dev->data->port_id, (void *)ind_tbl, 1 << wq_n,
1209 rte_atomic32_read(&ind_tbl->refcnt));
1213 DRV_LOG(DEBUG, "port %u cannot create indirection table",
1214 dev->data->port_id);
1219 * Get an indirection table.
1222 * Pointer to Ethernet device.
1224 * Queues entering in the indirection table.
1226 * Number of queues in the array.
1229 * An indirection table if found.
1231 struct mlx5_ind_table_ibv *
1232 mlx5_ind_table_ibv_get(struct rte_eth_dev *dev, const uint16_t *queues,
1235 struct priv *priv = dev->data->dev_private;
1236 struct mlx5_ind_table_ibv *ind_tbl;
1238 LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
1239 if ((ind_tbl->queues_n == queues_n) &&
1240 (memcmp(ind_tbl->queues, queues,
1241 ind_tbl->queues_n * sizeof(ind_tbl->queues[0]))
1248 rte_atomic32_inc(&ind_tbl->refcnt);
1249 DRV_LOG(DEBUG, "port %u indirection table %p: refcnt %d",
1250 dev->data->port_id, (void *)ind_tbl,
1251 rte_atomic32_read(&ind_tbl->refcnt));
1252 for (i = 0; i != ind_tbl->queues_n; ++i)
1253 mlx5_rxq_get(dev, ind_tbl->queues[i]);
1259 * Release an indirection table.
1262 * Pointer to Ethernet device.
1264 * Indirection table to release.
1267 * 1 while a reference on it exists, 0 when freed.
1270 mlx5_ind_table_ibv_release(struct rte_eth_dev *dev,
1271 struct mlx5_ind_table_ibv *ind_tbl)
1275 DRV_LOG(DEBUG, "port %u indirection table %p: refcnt %d",
1276 dev->data->port_id, (void *)ind_tbl,
1277 rte_atomic32_read(&ind_tbl->refcnt));
1278 if (rte_atomic32_dec_and_test(&ind_tbl->refcnt)) {
1279 claim_zero(mlx5_glue->destroy_rwq_ind_table
1280 (ind_tbl->ind_table));
1281 DEBUG("port %u delete indirection table %p: queues: %u",
1282 dev->data->port_id, (void *)ind_tbl, ind_tbl->queues_n);
1284 for (i = 0; i != ind_tbl->queues_n; ++i)
1285 claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i]));
1286 if (!rte_atomic32_read(&ind_tbl->refcnt)) {
1287 LIST_REMOVE(ind_tbl, next);
1295 * Verify the Rx Queue list is empty
1298 * Pointer to Ethernet device.
1301 * The number of object not released.
1304 mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev)
1306 struct priv *priv = dev->data->dev_private;
1307 struct mlx5_ind_table_ibv *ind_tbl;
1310 LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
1312 "port %u Verbs indirection table %p still referenced",
1313 dev->data->port_id, (void *)ind_tbl);
1320 * Create an Rx Hash queue.
1323 * Pointer to Ethernet device.
1325 * RSS key for the Rx hash queue.
1326 * @param rss_key_len
1328 * @param hash_fields
1329 * Verbs protocol hash field to make the RSS on.
1331 * Queues entering in hash queue. In case of empty hash_fields only the
1332 * first queue index will be taken for the indirection table.
1336 * Tunnel type, implies tunnel offloading like inner checksum if available.
1338 * RSS hash on tunnel level.
1341 * The Verbs object initialised, NULL otherwise and rte_errno is set.
1344 mlx5_hrxq_new(struct rte_eth_dev *dev,
1345 const uint8_t *rss_key, uint32_t rss_key_len,
1346 uint64_t hash_fields,
1347 const uint16_t *queues, uint32_t queues_n,
1348 uint32_t tunnel, uint32_t rss_level)
1350 struct priv *priv = dev->data->dev_private;
1351 struct mlx5_hrxq *hrxq;
1352 struct mlx5_ind_table_ibv *ind_tbl;
1355 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
1356 struct mlx5dv_qp_init_attr qp_init_attr = {0};
1359 queues_n = hash_fields ? queues_n : 1;
1360 ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n);
1362 ind_tbl = mlx5_ind_table_ibv_new(dev, queues, queues_n);
1368 rss_key_len = rss_hash_default_key_len;
1369 rss_key = rss_hash_default_key;
1371 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
1373 qp_init_attr.comp_mask =
1374 MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;
1375 qp_init_attr.create_flags = MLX5DV_QP_CREATE_TUNNEL_OFFLOADS;
1377 qp = mlx5_glue->dv_create_qp
1379 &(struct ibv_qp_init_attr_ex){
1380 .qp_type = IBV_QPT_RAW_PACKET,
1382 IBV_QP_INIT_ATTR_PD |
1383 IBV_QP_INIT_ATTR_IND_TABLE |
1384 IBV_QP_INIT_ATTR_RX_HASH,
1385 .rx_hash_conf = (struct ibv_rx_hash_conf){
1386 .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,
1387 .rx_hash_key_len = rss_key_len ? rss_key_len :
1388 rss_hash_default_key_len,
1389 .rx_hash_key = rss_key ?
1390 (void *)(uintptr_t)rss_key :
1391 rss_hash_default_key,
1392 .rx_hash_fields_mask = hash_fields |
1393 (tunnel && rss_level > 1 ?
1394 (uint32_t)IBV_RX_HASH_INNER : 0),
1396 .rwq_ind_tbl = ind_tbl->ind_table,
1400 DEBUG("port %u new QP:%p ind_tbl:%p hash_fields:0x%" PRIx64
1401 " tunnel:0x%x level:%u dv_attr:comp_mask:0x%" PRIx64
1402 " create_flags:0x%x",
1403 dev->data->port_id, (void *)qp, (void *)ind_tbl,
1404 (tunnel && rss_level == 2 ? (uint32_t)IBV_RX_HASH_INNER : 0) |
1405 hash_fields, tunnel, rss_level,
1406 qp_init_attr.comp_mask, qp_init_attr.create_flags);
1408 qp = mlx5_glue->create_qp_ex
1410 &(struct ibv_qp_init_attr_ex){
1411 .qp_type = IBV_QPT_RAW_PACKET,
1413 IBV_QP_INIT_ATTR_PD |
1414 IBV_QP_INIT_ATTR_IND_TABLE |
1415 IBV_QP_INIT_ATTR_RX_HASH,
1416 .rx_hash_conf = (struct ibv_rx_hash_conf){
1417 .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,
1418 .rx_hash_key_len = rss_key_len ? rss_key_len :
1419 rss_hash_default_key_len,
1420 .rx_hash_key = rss_key ?
1421 (void *)(uintptr_t)rss_key :
1422 rss_hash_default_key,
1423 .rx_hash_fields_mask = hash_fields,
1425 .rwq_ind_tbl = ind_tbl->ind_table,
1428 DEBUG("port %u new QP:%p ind_tbl:%p hash_fields:0x%" PRIx64
1429 " tunnel:0x%x level:%hhu",
1430 dev->data->port_id, (void *)qp, (void *)ind_tbl,
1431 hash_fields, tunnel, rss_level);
1437 hrxq = rte_calloc(__func__, 1, sizeof(*hrxq) + rss_key_len, 0);
1440 hrxq->ind_table = ind_tbl;
1442 hrxq->rss_key_len = rss_key_len;
1443 hrxq->hash_fields = hash_fields;
1444 hrxq->tunnel = tunnel;
1445 hrxq->rss_level = rss_level;
1446 memcpy(hrxq->rss_key, rss_key, rss_key_len);
1447 rte_atomic32_inc(&hrxq->refcnt);
1448 LIST_INSERT_HEAD(&priv->hrxqs, hrxq, next);
1449 DRV_LOG(DEBUG, "port %u hash Rx queue %p: refcnt %d",
1450 dev->data->port_id, (void *)hrxq,
1451 rte_atomic32_read(&hrxq->refcnt));
1454 err = rte_errno; /* Save rte_errno before cleanup. */
1455 mlx5_ind_table_ibv_release(dev, ind_tbl);
1457 claim_zero(mlx5_glue->destroy_qp(qp));
1458 rte_errno = err; /* Restore rte_errno. */
1463 * Get an Rx Hash queue.
1466 * Pointer to Ethernet device.
1468 * RSS configuration for the Rx hash queue.
1470 * Queues entering in hash queue. In case of empty hash_fields only the
1471 * first queue index will be taken for the indirection table.
1475 * Tunnel type, implies tunnel offloading like inner checksum if available.
1477 * RSS hash on tunnel level
1480 * An hash Rx queue on success.
1483 mlx5_hrxq_get(struct rte_eth_dev *dev,
1484 const uint8_t *rss_key, uint32_t rss_key_len,
1485 uint64_t hash_fields,
1486 const uint16_t *queues, uint32_t queues_n,
1487 uint32_t tunnel, uint32_t rss_level)
1489 struct priv *priv = dev->data->dev_private;
1490 struct mlx5_hrxq *hrxq;
1492 queues_n = hash_fields ? queues_n : 1;
1493 LIST_FOREACH(hrxq, &priv->hrxqs, next) {
1494 struct mlx5_ind_table_ibv *ind_tbl;
1496 if (hrxq->rss_key_len != rss_key_len)
1498 if (memcmp(hrxq->rss_key, rss_key, rss_key_len))
1500 if (hrxq->hash_fields != hash_fields)
1502 if (hrxq->tunnel != tunnel)
1504 if (hrxq->rss_level != rss_level)
1506 ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n);
1509 if (ind_tbl != hrxq->ind_table) {
1510 mlx5_ind_table_ibv_release(dev, ind_tbl);
1513 rte_atomic32_inc(&hrxq->refcnt);
1514 DRV_LOG(DEBUG, "port %u hash Rx queue %p: refcnt %d",
1515 dev->data->port_id, (void *)hrxq,
1516 rte_atomic32_read(&hrxq->refcnt));
1523 * Release the hash Rx queue.
1526 * Pointer to Ethernet device.
1528 * Pointer to Hash Rx queue to release.
1531 * 1 while a reference on it exists, 0 when freed.
1534 mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
1536 DRV_LOG(DEBUG, "port %u hash Rx queue %p: refcnt %d",
1537 dev->data->port_id, (void *)hrxq,
1538 rte_atomic32_read(&hrxq->refcnt));
1539 if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
1540 claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
1541 DEBUG("port %u delete QP %p: hash: 0x%" PRIx64 ", tunnel:"
1543 dev->data->port_id, (void *)hrxq, hrxq->hash_fields,
1544 hrxq->tunnel, hrxq->rss_level);
1545 mlx5_ind_table_ibv_release(dev, hrxq->ind_table);
1546 LIST_REMOVE(hrxq, next);
1550 claim_nonzero(mlx5_ind_table_ibv_release(dev, hrxq->ind_table));
1555 * Verify the Rx Queue list is empty
1558 * Pointer to Ethernet device.
1561 * The number of object not released.
1564 mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev)
1566 struct priv *priv = dev->data->dev_private;
1567 struct mlx5_hrxq *hrxq;
1570 LIST_FOREACH(hrxq, &priv->hrxqs, next) {
1572 "port %u Verbs hash Rx queue %p still referenced",
1573 dev->data->port_id, (void *)hrxq);