1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2017 6WIND S.A.
3 * Copyright 2017 Mellanox Technologies, Ltd
8 * Rx queues configuration for mlx4 driver.
17 /* Verbs headers do not support -pedantic. */
19 #pragma GCC diagnostic ignored "-Wpedantic"
21 #include <infiniband/mlx4dv.h>
22 #include <infiniband/verbs.h>
24 #pragma GCC diagnostic error "-Wpedantic"
27 #include <rte_byteorder.h>
28 #include <rte_common.h>
29 #include <rte_errno.h>
30 #include <rte_ethdev_driver.h>
32 #include <rte_malloc.h>
34 #include <rte_mempool.h>
37 #include "mlx4_glue.h"
38 #include "mlx4_flow.h"
39 #include "mlx4_rxtx.h"
40 #include "mlx4_utils.h"
43 * Historical RSS hash key.
45 * This used to be the default for mlx4 in Linux before v3.19 switched to
46 * generating random hash keys through netdev_rss_key_fill().
48 * It is used in this PMD for consistency with past DPDK releases but can
49 * now be overridden through user configuration.
51 * Note: this is not const to work around API quirks.
54 mlx4_rss_hash_key_default[MLX4_RSS_HASH_KEY_SIZE] = {
55 0x2c, 0xc6, 0x81, 0xd1,
56 0x5b, 0xdb, 0xf4, 0xf7,
57 0xfc, 0xa2, 0x83, 0x19,
58 0xdb, 0x1a, 0x3e, 0x94,
59 0x6b, 0x9e, 0x38, 0xd9,
60 0x2c, 0x9c, 0x03, 0xd1,
61 0xad, 0x99, 0x44, 0xa7,
62 0xd9, 0x56, 0x3d, 0x59,
63 0x06, 0x3c, 0x25, 0xf3,
64 0xfc, 0x1f, 0xdc, 0x2a,
68 * Obtain a RSS context with specified properties.
70 * Used when creating a flow rule targeting one or several Rx queues.
72 * If a matching RSS context already exists, it is returned with its
73 * reference count incremented.
76 * Pointer to private structure.
78 * Fields for RSS processing (Verbs format).
80 * Hash key to use (whose size is exactly MLX4_RSS_HASH_KEY_SIZE).
82 * Number of target queues.
87 * Pointer to RSS context on success, NULL otherwise and rte_errno is set.
90 mlx4_rss_get(struct mlx4_priv *priv, uint64_t fields,
91 const uint8_t key[MLX4_RSS_HASH_KEY_SIZE],
92 uint16_t queues, const uint16_t queue_id[])
95 size_t queue_id_size = sizeof(queue_id[0]) * queues;
97 LIST_FOREACH(rss, &priv->rss, next)
98 if (fields == rss->fields &&
99 queues == rss->queues &&
100 !memcmp(key, rss->key, MLX4_RSS_HASH_KEY_SIZE) &&
101 !memcmp(queue_id, rss->queue_id, queue_id_size)) {
105 rss = rte_malloc(__func__, offsetof(struct mlx4_rss, queue_id) +
109 *rss = (struct mlx4_rss){
118 memcpy(rss->key, key, MLX4_RSS_HASH_KEY_SIZE);
119 memcpy(rss->queue_id, queue_id, queue_id_size);
120 LIST_INSERT_HEAD(&priv->rss, rss, next);
128 * Release a RSS context instance.
130 * Used when destroying a flow rule targeting one or several Rx queues.
132 * This function decrements the reference count of the context and destroys
133 * it after reaching 0. The context must have no users at this point; all
134 * prior calls to mlx4_rss_attach() must have been followed by matching
135 * calls to mlx4_rss_detach().
138 * RSS context to release.
141 mlx4_rss_put(struct mlx4_rss *rss)
146 assert(!rss->usecnt);
149 LIST_REMOVE(rss, next);
154 * Attach a user to a RSS context instance.
156 * Used when the RSS QP and indirection table objects must be instantiated,
157 * that is, when a flow rule must be enabled.
159 * This function increments the usage count of the context.
162 * RSS context to attach to.
165 * 0 on success, a negative errno value otherwise and rte_errno is set.
168 mlx4_rss_attach(struct mlx4_rss *rss)
177 struct ibv_wq *ind_tbl[rss->queues];
178 struct mlx4_priv *priv = rss->priv;
179 struct rte_eth_dev *dev = ETH_DEV(priv);
184 if (!rte_is_power_of_2(RTE_DIM(ind_tbl))) {
186 msg = "number of RSS queues must be a power of two";
189 for (i = 0; i != RTE_DIM(ind_tbl); ++i) {
190 uint16_t id = rss->queue_id[i];
191 struct rxq *rxq = NULL;
193 if (id < dev->data->nb_rx_queues)
194 rxq = dev->data->rx_queues[id];
197 msg = "RSS target queue is not configured";
200 ret = mlx4_rxq_attach(rxq);
203 msg = "unable to attach RSS target queue";
206 ind_tbl[i] = rxq->wq;
208 rss->ind = mlx4_glue->create_rwq_ind_table
210 &(struct ibv_rwq_ind_table_init_attr){
211 .log_ind_tbl_size = rte_log2_u32(RTE_DIM(ind_tbl)),
216 ret = errno ? errno : EINVAL;
217 msg = "RSS indirection table creation failure";
220 rss->qp = mlx4_glue->create_qp_ex
222 &(struct ibv_qp_init_attr_ex){
223 .comp_mask = (IBV_QP_INIT_ATTR_PD |
224 IBV_QP_INIT_ATTR_RX_HASH |
225 IBV_QP_INIT_ATTR_IND_TABLE),
226 .qp_type = IBV_QPT_RAW_PACKET,
228 .rwq_ind_tbl = rss->ind,
230 .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,
231 .rx_hash_key_len = MLX4_RSS_HASH_KEY_SIZE,
232 .rx_hash_key = rss->key,
233 .rx_hash_fields_mask = rss->fields,
237 ret = errno ? errno : EINVAL;
238 msg = "RSS hash QP creation failure";
241 ret = mlx4_glue->modify_qp
243 &(struct ibv_qp_attr){
244 .qp_state = IBV_QPS_INIT,
245 .port_num = priv->port,
247 IBV_QP_STATE | IBV_QP_PORT);
249 msg = "failed to switch RSS hash QP to INIT state";
252 ret = mlx4_glue->modify_qp
254 &(struct ibv_qp_attr){
255 .qp_state = IBV_QPS_RTR,
259 msg = "failed to switch RSS hash QP to RTR state";
265 claim_zero(mlx4_glue->destroy_qp(rss->qp));
269 claim_zero(mlx4_glue->destroy_rwq_ind_table(rss->ind));
273 mlx4_rxq_detach(dev->data->rx_queues[rss->queue_id[i]]);
274 ERROR("mlx4: %s", msg);
281 * Detach a user from a RSS context instance.
283 * Used when disabling (not destroying) a flow rule.
285 * This function decrements the usage count of the context and destroys
286 * usage resources after reaching 0.
289 * RSS context to detach from.
292 mlx4_rss_detach(struct mlx4_rss *rss)
294 struct mlx4_priv *priv = rss->priv;
295 struct rte_eth_dev *dev = ETH_DEV(priv);
303 claim_zero(mlx4_glue->destroy_qp(rss->qp));
305 claim_zero(mlx4_glue->destroy_rwq_ind_table(rss->ind));
307 for (i = 0; i != rss->queues; ++i)
308 mlx4_rxq_detach(dev->data->rx_queues[rss->queue_id[i]]);
312 * Initialize common RSS context resources.
314 * Because ConnectX-3 hardware limitations require a fixed order in the
315 * indirection table, WQs must be allocated sequentially to be part of a
316 * common RSS context.
318 * Since a newly created WQ cannot be moved to a different context, this
319 * function allocates them all at once, one for each configured Rx queue,
320 * as well as all related resources (CQs and mbufs).
322 * This must therefore be done before creating any Rx flow rules relying on
323 * indirection tables.
326 * Pointer to private structure.
329 * 0 on success, a negative errno value otherwise and rte_errno is set.
332 mlx4_rss_init(struct mlx4_priv *priv)
334 struct rte_eth_dev *dev = ETH_DEV(priv);
335 uint8_t log2_range = rte_log2_u32(dev->data->nb_rx_queues);
336 uint32_t wq_num_prev = 0;
343 if (ETH_DEV(priv)->data->nb_rx_queues > priv->hw_rss_max_qps) {
344 ERROR("RSS does not support more than %d queues",
345 priv->hw_rss_max_qps);
349 /* Prepare range for RSS contexts before creating the first WQ. */
350 ret = mlx4_glue->dv_set_context_attr
352 MLX4DV_SET_CTX_ATTR_LOG_WQS_RANGE_SZ,
355 ERROR("cannot set up range size for RSS context to %u"
356 " (for %u Rx queues), error: %s",
357 1 << log2_range, dev->data->nb_rx_queues, strerror(ret));
361 for (i = 0; i != ETH_DEV(priv)->data->nb_rx_queues; ++i) {
362 struct rxq *rxq = ETH_DEV(priv)->data->rx_queues[i];
367 /* Attach the configured Rx queues. */
369 assert(!rxq->usecnt);
370 ret = mlx4_rxq_attach(rxq);
372 wq_num = rxq->wq->wq_num;
376 msg = "unable to create Rx queue resources";
380 * WQs are temporarily allocated for unconfigured Rx queues
381 * to maintain proper index alignment in indirection table
382 * by skipping unused WQ numbers.
384 * The reason this works at all even though these WQs are
385 * immediately destroyed is that WQNs are allocated
386 * sequentially and are guaranteed to never be reused in the
387 * same context by the underlying implementation.
389 cq = mlx4_glue->create_cq(priv->ctx, 1, NULL, NULL, 0);
392 msg = "placeholder CQ creation failure";
395 wq = mlx4_glue->create_wq
397 &(struct ibv_wq_init_attr){
398 .wq_type = IBV_WQT_RQ,
406 claim_zero(mlx4_glue->destroy_wq(wq));
408 wq_num = 0; /* Shut up GCC 4.8 warnings. */
410 claim_zero(mlx4_glue->destroy_cq(cq));
413 msg = "placeholder WQ creation failure";
418 * While guaranteed by the implementation, make sure WQ
419 * numbers are really sequential (as the saying goes,
420 * trust, but verify).
422 if (i && wq_num - wq_num_prev != 1) {
424 mlx4_rxq_detach(rxq);
426 msg = "WQ numbers are not sequential";
429 wq_num_prev = wq_num;
434 ERROR("cannot initialize common RSS resources (queue %u): %s: %s",
435 i, msg, strerror(ret));
437 struct rxq *rxq = ETH_DEV(priv)->data->rx_queues[i];
440 mlx4_rxq_detach(rxq);
447 * Release common RSS context resources.
449 * As the reverse of mlx4_rss_init(), this must be done after removing all
450 * flow rules relying on indirection tables.
453 * Pointer to private structure.
456 mlx4_rss_deinit(struct mlx4_priv *priv)
462 for (i = 0; i != ETH_DEV(priv)->data->nb_rx_queues; ++i) {
463 struct rxq *rxq = ETH_DEV(priv)->data->rx_queues[i];
466 assert(rxq->usecnt == 1);
467 mlx4_rxq_detach(rxq);
474 * Attach a user to a Rx queue.
476 * Used when the resources of an Rx queue must be instantiated for it to
477 * become in a usable state.
479 * This function increments the usage count of the Rx queue.
482 * Pointer to Rx queue structure.
485 * 0 on success, negative errno value otherwise and rte_errno is set.
488 mlx4_rxq_attach(struct rxq *rxq)
498 struct mlx4_priv *priv = rxq->priv;
499 struct rte_eth_dev *dev = ETH_DEV(priv);
500 const uint32_t elts_n = 1 << rxq->elts_n;
501 const uint32_t sges_n = 1 << rxq->sges_n;
502 struct rte_mbuf *(*elts)[elts_n] = rxq->elts;
503 struct mlx4dv_obj mlxdv;
504 struct mlx4dv_rwq dv_rwq;
505 struct mlx4dv_cq dv_cq = { .comp_mask = MLX4DV_CQ_MASK_UAR, };
507 struct ibv_cq *cq = NULL;
508 struct ibv_wq *wq = NULL;
509 uint32_t create_flags = 0;
510 uint32_t comp_mask = 0;
511 volatile struct mlx4_wqe_data_seg (*wqes)[];
515 assert(rte_is_power_of_2(elts_n));
516 priv->verbs_alloc_ctx.type = MLX4_VERBS_ALLOC_TYPE_RX_QUEUE;
517 priv->verbs_alloc_ctx.obj = rxq;
518 cq = mlx4_glue->create_cq(priv->ctx, elts_n / sges_n, NULL,
522 msg = "CQ creation failure";
525 /* By default, FCS (CRC) is stripped by hardware. */
526 if (rxq->crc_present) {
527 create_flags |= IBV_WQ_FLAGS_SCATTER_FCS;
528 comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
530 wq = mlx4_glue->create_wq
532 &(struct ibv_wq_init_attr){
533 .wq_type = IBV_WQT_RQ,
534 .max_wr = elts_n / sges_n,
538 .comp_mask = comp_mask,
539 .create_flags = create_flags,
542 ret = errno ? errno : EINVAL;
543 msg = "WQ creation failure";
546 ret = mlx4_glue->modify_wq
548 &(struct ibv_wq_attr){
549 .attr_mask = IBV_WQ_ATTR_STATE,
550 .wq_state = IBV_WQS_RDY,
553 msg = "WQ state change to IBV_WQS_RDY failed";
556 /* Retrieve device queue information. */
558 mlxdv.cq.out = &dv_cq;
560 mlxdv.rwq.out = &dv_rwq;
561 ret = mlx4_glue->dv_init_obj(&mlxdv, MLX4DV_OBJ_RWQ | MLX4DV_OBJ_CQ);
563 msg = "failed to obtain device information from WQ/CQ objects";
566 /* Pre-register Rx mempool. */
567 DEBUG("port %u Rx queue %u registering mp %s having %u chunks",
568 ETH_DEV(priv)->data->port_id, rxq->stats.idx,
569 rxq->mp->name, rxq->mp->nb_mem_chunks);
570 mlx4_mr_update_mp(dev, &rxq->mr_ctrl, rxq->mp);
571 wqes = (volatile struct mlx4_wqe_data_seg (*)[])
572 ((uintptr_t)dv_rwq.buf.buf + dv_rwq.rq.offset);
573 for (i = 0; i != RTE_DIM(*elts); ++i) {
574 volatile struct mlx4_wqe_data_seg *scat = &(*wqes)[i];
575 struct rte_mbuf *buf = rte_pktmbuf_alloc(rxq->mp);
579 rte_pktmbuf_free_seg((*elts)[i]);
583 msg = "cannot allocate mbuf";
586 /* Headroom is reserved by rte_pktmbuf_alloc(). */
587 assert(buf->data_off == RTE_PKTMBUF_HEADROOM);
588 /* Buffer is supposed to be empty. */
589 assert(rte_pktmbuf_data_len(buf) == 0);
590 assert(rte_pktmbuf_pkt_len(buf) == 0);
591 /* Only the first segment keeps headroom. */
594 buf->port = rxq->port_id;
595 buf->data_len = rte_pktmbuf_tailroom(buf);
596 buf->pkt_len = rte_pktmbuf_tailroom(buf);
598 *scat = (struct mlx4_wqe_data_seg){
599 .addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf,
601 .byte_count = rte_cpu_to_be_32(buf->data_len),
602 .lkey = mlx4_rx_mb2mr(rxq, buf),
606 DEBUG("%p: allocated and configured %u segments (max %u packets)",
607 (void *)rxq, elts_n, elts_n / sges_n);
611 rxq->rq_db = dv_rwq.rdb;
612 rxq->mcq.buf = dv_cq.buf.buf;
613 rxq->mcq.cqe_cnt = dv_cq.cqe_cnt;
614 rxq->mcq.set_ci_db = dv_cq.set_ci_db;
615 rxq->mcq.cqe_64 = (dv_cq.cqe_size & 64) ? 1 : 0;
616 rxq->mcq.arm_db = dv_cq.arm_db;
617 rxq->mcq.arm_sn = dv_cq.arm_sn;
618 rxq->mcq.cqn = dv_cq.cqn;
619 rxq->mcq.cq_uar = dv_cq.cq_uar;
620 rxq->mcq.cq_db_reg = (uint8_t *)dv_cq.cq_uar + MLX4_CQ_DOORBELL;
621 /* Update doorbell counter. */
622 rxq->rq_ci = elts_n / sges_n;
624 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
625 priv->verbs_alloc_ctx.type = MLX4_VERBS_ALLOC_TYPE_NONE;
629 claim_zero(mlx4_glue->destroy_wq(wq));
631 claim_zero(mlx4_glue->destroy_cq(cq));
634 ERROR("error while attaching Rx queue %p: %s: %s",
635 (void *)rxq, msg, strerror(ret));
636 priv->verbs_alloc_ctx.type = MLX4_VERBS_ALLOC_TYPE_NONE;
641 * Detach a user from a Rx queue.
643 * This function decrements the usage count of the Rx queue and destroys
644 * usage resources after reaching 0.
647 * Pointer to Rx queue structure.
650 mlx4_rxq_detach(struct rxq *rxq)
653 struct rte_mbuf *(*elts)[1 << rxq->elts_n] = rxq->elts;
658 memset(&rxq->mcq, 0, sizeof(rxq->mcq));
661 claim_zero(mlx4_glue->destroy_wq(rxq->wq));
663 claim_zero(mlx4_glue->destroy_cq(rxq->cq));
665 DEBUG("%p: freeing Rx queue elements", (void *)rxq);
666 for (i = 0; (i != RTE_DIM(*elts)); ++i) {
669 rte_pktmbuf_free_seg((*elts)[i]);
675 * Returns the per-queue supported offloads.
678 * Pointer to private structure.
681 * Supported Tx offloads.
684 mlx4_get_rx_queue_offloads(struct mlx4_priv *priv)
686 uint64_t offloads = DEV_RX_OFFLOAD_SCATTER |
687 DEV_RX_OFFLOAD_KEEP_CRC |
688 DEV_RX_OFFLOAD_JUMBO_FRAME;
691 offloads |= DEV_RX_OFFLOAD_CHECKSUM;
696 * Returns the per-port supported offloads.
699 * Pointer to private structure.
702 * Supported Rx offloads.
705 mlx4_get_rx_port_offloads(struct mlx4_priv *priv)
707 uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
714 * DPDK callback to configure a Rx queue.
717 * Pointer to Ethernet device structure.
721 * Number of descriptors to configure in queue.
723 * NUMA socket on which memory must be allocated.
725 * Thresholds parameters.
727 * Memory pool for buffer allocations.
730 * 0 on success, negative errno value otherwise and rte_errno is set.
733 mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
734 unsigned int socket, const struct rte_eth_rxconf *conf,
735 struct rte_mempool *mp)
737 struct mlx4_priv *priv = dev->data->dev_private;
738 uint32_t mb_len = rte_pktmbuf_data_room_size(mp);
739 struct rte_mbuf *(*elts)[rte_align32pow2(desc)];
741 struct mlx4_malloc_vec vec[] = {
743 .align = RTE_CACHE_LINE_SIZE,
744 .size = sizeof(*rxq),
745 .addr = (void **)&rxq,
748 .align = RTE_CACHE_LINE_SIZE,
749 .size = sizeof(*elts),
750 .addr = (void **)&elts,
754 uint32_t crc_present;
757 offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads;
759 DEBUG("%p: configuring queue %u for %u descriptors",
760 (void *)dev, idx, desc);
762 if (idx >= dev->data->nb_rx_queues) {
763 rte_errno = EOVERFLOW;
764 ERROR("%p: queue index out of range (%u >= %u)",
765 (void *)dev, idx, dev->data->nb_rx_queues);
768 rxq = dev->data->rx_queues[idx];
771 ERROR("%p: Rx queue %u already configured, release it first",
777 ERROR("%p: invalid number of Rx descriptors", (void *)dev);
780 if (desc != RTE_DIM(*elts)) {
781 desc = RTE_DIM(*elts);
782 WARN("%p: increased number of descriptors in Rx queue %u"
783 " to the next power of two (%u)",
784 (void *)dev, idx, desc);
786 /* By default, FCS (CRC) is stripped by hardware. */
788 if (offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
789 if (priv->hw_fcs_strip) {
792 WARN("%p: CRC stripping has been disabled but will still"
793 " be performed by hardware, make sure MLNX_OFED and"
794 " firmware are up to date",
798 DEBUG("%p: CRC stripping is %s, %u bytes will be subtracted from"
799 " incoming frames to hide it",
801 crc_present ? "disabled" : "enabled",
803 /* Allocate and initialize Rx queue. */
804 mlx4_zmallocv_socket("RXQ", vec, RTE_DIM(vec), socket);
806 ERROR("%p: unable to allocate queue index %u",
813 .port_id = dev->data->port_id,
815 .elts_n = rte_log2_u32(desc),
817 /* Toggle Rx checksum offload if hardware supports it. */
818 .csum = priv->hw_csum &&
819 (offloads & DEV_RX_OFFLOAD_CHECKSUM),
820 .csum_l2tun = priv->hw_csum_l2tun &&
821 (offloads & DEV_RX_OFFLOAD_CHECKSUM),
822 .crc_present = crc_present,
823 .l2tun_offload = priv->hw_csum_l2tun,
829 /* Enable scattered packets support for this queue if necessary. */
830 assert(mb_len >= RTE_PKTMBUF_HEADROOM);
831 if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
832 (mb_len - RTE_PKTMBUF_HEADROOM)) {
834 } else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
836 RTE_PKTMBUF_HEADROOM +
837 dev->data->dev_conf.rxmode.max_rx_pkt_len;
841 * Determine the number of SGEs needed for a full packet
842 * and round it to the next power of two.
844 sges_n = rte_log2_u32((size / mb_len) + !!(size % mb_len));
845 rxq->sges_n = sges_n;
846 /* Make sure sges_n did not overflow. */
847 size = mb_len * (1 << rxq->sges_n);
848 size -= RTE_PKTMBUF_HEADROOM;
849 if (size < dev->data->dev_conf.rxmode.max_rx_pkt_len) {
850 rte_errno = EOVERFLOW;
851 ERROR("%p: too many SGEs (%u) needed to handle"
852 " requested maximum packet size %u",
855 dev->data->dev_conf.rxmode.max_rx_pkt_len);
859 WARN("%p: the requested maximum Rx packet size (%u) is"
860 " larger than a single mbuf (%u) and scattered"
861 " mode has not been requested",
863 dev->data->dev_conf.rxmode.max_rx_pkt_len,
864 mb_len - RTE_PKTMBUF_HEADROOM);
866 DEBUG("%p: maximum number of segments per packet: %u",
867 (void *)dev, 1 << rxq->sges_n);
868 if (desc % (1 << rxq->sges_n)) {
870 ERROR("%p: number of Rx queue descriptors (%u) is not a"
871 " multiple of maximum segments per packet (%u)",
877 if (mlx4_mr_btree_init(&rxq->mr_ctrl.cache_bh,
878 MLX4_MR_BTREE_CACHE_N, socket)) {
879 /* rte_errno is already set. */
882 if (dev->data->dev_conf.intr_conf.rxq) {
883 rxq->channel = mlx4_glue->create_comp_channel(priv->ctx);
884 if (rxq->channel == NULL) {
886 ERROR("%p: Rx interrupt completion channel creation"
888 (void *)dev, strerror(rte_errno));
891 if (mlx4_fd_set_non_blocking(rxq->channel->fd) < 0) {
892 ERROR("%p: unable to make Rx interrupt completion"
893 " channel non-blocking: %s",
894 (void *)dev, strerror(rte_errno));
898 DEBUG("%p: adding Rx queue %p to list", (void *)dev, (void *)rxq);
899 dev->data->rx_queues[idx] = rxq;
902 dev->data->rx_queues[idx] = NULL;
904 mlx4_rx_queue_release(rxq);
906 assert(rte_errno > 0);
911 * DPDK callback to release a Rx queue.
914 * Generic Rx queue pointer.
917 mlx4_rx_queue_release(void *dpdk_rxq)
919 struct rxq *rxq = (struct rxq *)dpdk_rxq;
920 struct mlx4_priv *priv;
926 for (i = 0; i != ETH_DEV(priv)->data->nb_rx_queues; ++i)
927 if (ETH_DEV(priv)->data->rx_queues[i] == rxq) {
928 DEBUG("%p: removing Rx queue %p from list",
929 (void *)ETH_DEV(priv), (void *)rxq);
930 ETH_DEV(priv)->data->rx_queues[i] = NULL;
938 claim_zero(mlx4_glue->destroy_comp_channel(rxq->channel));
939 mlx4_mr_btree_free(&rxq->mr_ctrl.cache_bh);