4 * Copyright 2015 6WIND S.A.
5 * Copyright 2015 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
44 #pragma GCC diagnostic ignored "-Wpedantic"
46 #include <infiniband/verbs.h>
47 #include <infiniband/arch.h>
48 #include <infiniband/mlx5_hw.h>
50 #pragma GCC diagnostic error "-Wpedantic"
53 /* DPDK headers don't like -pedantic. */
55 #pragma GCC diagnostic ignored "-Wpedantic"
58 #include <rte_malloc.h>
59 #include <rte_ethdev.h>
60 #include <rte_common.h>
61 #include <rte_interrupts.h>
62 #include <rte_debug.h>
64 #pragma GCC diagnostic error "-Wpedantic"
68 #include "mlx5_rxtx.h"
69 #include "mlx5_utils.h"
70 #include "mlx5_autoconf.h"
71 #include "mlx5_defs.h"
73 /* Initialization data for hash RX queues. */
74 const struct hash_rxq_init hash_rxq_init[] = {
76 .hash_fields = (IBV_EXP_RX_HASH_SRC_IPV4 |
77 IBV_EXP_RX_HASH_DST_IPV4 |
78 IBV_EXP_RX_HASH_SRC_PORT_TCP |
79 IBV_EXP_RX_HASH_DST_PORT_TCP),
80 .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV4_TCP,
82 .flow_spec.tcp_udp = {
83 .type = IBV_EXP_FLOW_SPEC_TCP,
84 .size = sizeof(hash_rxq_init[0].flow_spec.tcp_udp),
86 .underlayer = &hash_rxq_init[HASH_RXQ_IPV4],
89 .hash_fields = (IBV_EXP_RX_HASH_SRC_IPV4 |
90 IBV_EXP_RX_HASH_DST_IPV4 |
91 IBV_EXP_RX_HASH_SRC_PORT_UDP |
92 IBV_EXP_RX_HASH_DST_PORT_UDP),
93 .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV4_UDP,
95 .flow_spec.tcp_udp = {
96 .type = IBV_EXP_FLOW_SPEC_UDP,
97 .size = sizeof(hash_rxq_init[0].flow_spec.tcp_udp),
99 .underlayer = &hash_rxq_init[HASH_RXQ_IPV4],
102 .hash_fields = (IBV_EXP_RX_HASH_SRC_IPV4 |
103 IBV_EXP_RX_HASH_DST_IPV4),
104 .dpdk_rss_hf = (ETH_RSS_IPV4 |
108 .type = IBV_EXP_FLOW_SPEC_IPV4,
109 .size = sizeof(hash_rxq_init[0].flow_spec.ipv4),
111 .underlayer = &hash_rxq_init[HASH_RXQ_ETH],
114 .hash_fields = (IBV_EXP_RX_HASH_SRC_IPV6 |
115 IBV_EXP_RX_HASH_DST_IPV6 |
116 IBV_EXP_RX_HASH_SRC_PORT_TCP |
117 IBV_EXP_RX_HASH_DST_PORT_TCP),
118 .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV6_TCP,
120 .flow_spec.tcp_udp = {
121 .type = IBV_EXP_FLOW_SPEC_TCP,
122 .size = sizeof(hash_rxq_init[0].flow_spec.tcp_udp),
124 .underlayer = &hash_rxq_init[HASH_RXQ_IPV6],
127 .hash_fields = (IBV_EXP_RX_HASH_SRC_IPV6 |
128 IBV_EXP_RX_HASH_DST_IPV6 |
129 IBV_EXP_RX_HASH_SRC_PORT_UDP |
130 IBV_EXP_RX_HASH_DST_PORT_UDP),
131 .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV6_UDP,
133 .flow_spec.tcp_udp = {
134 .type = IBV_EXP_FLOW_SPEC_UDP,
135 .size = sizeof(hash_rxq_init[0].flow_spec.tcp_udp),
137 .underlayer = &hash_rxq_init[HASH_RXQ_IPV6],
140 .hash_fields = (IBV_EXP_RX_HASH_SRC_IPV6 |
141 IBV_EXP_RX_HASH_DST_IPV6),
142 .dpdk_rss_hf = (ETH_RSS_IPV6 |
146 .type = IBV_EXP_FLOW_SPEC_IPV6,
147 .size = sizeof(hash_rxq_init[0].flow_spec.ipv6),
149 .underlayer = &hash_rxq_init[HASH_RXQ_ETH],
156 .type = IBV_EXP_FLOW_SPEC_ETH,
157 .size = sizeof(hash_rxq_init[0].flow_spec.eth),
163 /* Number of entries in hash_rxq_init[]. */
164 const unsigned int hash_rxq_init_n = RTE_DIM(hash_rxq_init);
166 /* Initialization data for hash RX queue indirection tables. */
167 static const struct ind_table_init ind_table_init[] = {
169 .max_size = -1u, /* Superseded by HW limitations. */
171 1 << HASH_RXQ_TCPV4 |
172 1 << HASH_RXQ_UDPV4 |
174 1 << HASH_RXQ_TCPV6 |
175 1 << HASH_RXQ_UDPV6 |
182 .hash_types = 1 << HASH_RXQ_ETH,
187 #define IND_TABLE_INIT_N RTE_DIM(ind_table_init)
189 /* Default RSS hash key also used for ConnectX-3. */
190 uint8_t rss_hash_default_key[] = {
191 0x2c, 0xc6, 0x81, 0xd1,
192 0x5b, 0xdb, 0xf4, 0xf7,
193 0xfc, 0xa2, 0x83, 0x19,
194 0xdb, 0x1a, 0x3e, 0x94,
195 0x6b, 0x9e, 0x38, 0xd9,
196 0x2c, 0x9c, 0x03, 0xd1,
197 0xad, 0x99, 0x44, 0xa7,
198 0xd9, 0x56, 0x3d, 0x59,
199 0x06, 0x3c, 0x25, 0xf3,
200 0xfc, 0x1f, 0xdc, 0x2a,
203 /* Length of the default RSS hash key. */
204 const size_t rss_hash_default_key_len = sizeof(rss_hash_default_key);
207 * Populate flow steering rule for a given hash RX queue type using
208 * information from hash_rxq_init[]. Nothing is written to flow_attr when
209 * flow_attr_size is not large enough, but the required size is still returned.
212 * Pointer to private structure.
213 * @param[out] flow_attr
214 * Pointer to flow attribute structure to fill. Note that the allocated
215 * area must be larger and large enough to hold all flow specifications.
216 * @param flow_attr_size
217 * Entire size of flow_attr and trailing room for flow specifications.
219 * Hash RX queue type to use for flow steering rule.
222 * Total size of the flow attribute buffer. No errors are defined.
225 priv_flow_attr(struct priv *priv, struct ibv_exp_flow_attr *flow_attr,
226 size_t flow_attr_size, enum hash_rxq_type type)
228 size_t offset = sizeof(*flow_attr);
229 const struct hash_rxq_init *init = &hash_rxq_init[type];
231 assert(priv != NULL);
232 assert((size_t)type < RTE_DIM(hash_rxq_init));
234 offset += init->flow_spec.hdr.size;
235 init = init->underlayer;
236 } while (init != NULL);
237 if (offset > flow_attr_size)
239 flow_attr_size = offset;
240 init = &hash_rxq_init[type];
241 *flow_attr = (struct ibv_exp_flow_attr){
242 .type = IBV_EXP_FLOW_ATTR_NORMAL,
243 /* Priorities < 3 are reserved for flow director. */
244 .priority = init->flow_priority + 3,
250 offset -= init->flow_spec.hdr.size;
251 memcpy((void *)((uintptr_t)flow_attr + offset),
253 init->flow_spec.hdr.size);
254 ++flow_attr->num_of_specs;
255 init = init->underlayer;
256 } while (init != NULL);
257 return flow_attr_size;
261 * Convert hash type position in indirection table initializer to
262 * hash RX queue type.
265 * Indirection table initializer.
267 * Hash type position.
270 * Hash RX queue type.
272 static enum hash_rxq_type
273 hash_rxq_type_from_pos(const struct ind_table_init *table, unsigned int pos)
275 enum hash_rxq_type type = HASH_RXQ_TCPV4;
277 assert(pos < table->hash_types_n);
279 if ((table->hash_types & (1 << type)) && (pos-- == 0))
287 * Filter out disabled hash RX queue types from ind_table_init[].
290 * Pointer to private structure.
295 * Number of table entries.
298 priv_make_ind_table_init(struct priv *priv,
299 struct ind_table_init (*table)[IND_TABLE_INIT_N])
304 unsigned int table_n = 0;
305 /* Mandatory to receive frames not handled by normal hash RX queues. */
306 unsigned int hash_types_sup = 1 << HASH_RXQ_ETH;
308 rss_hf = priv->rss_hf;
309 /* Process other protocols only if more than one queue. */
310 if (priv->rxqs_n > 1)
311 for (i = 0; (i != hash_rxq_init_n); ++i)
312 if (rss_hf & hash_rxq_init[i].dpdk_rss_hf)
313 hash_types_sup |= (1 << i);
315 /* Filter out entries whose protocols are not in the set. */
316 for (i = 0, j = 0; (i != IND_TABLE_INIT_N); ++i) {
320 /* j is increased only if the table has valid protocols. */
322 (*table)[j] = ind_table_init[i];
323 (*table)[j].hash_types &= hash_types_sup;
324 for (h = 0, nb = 0; (h != hash_rxq_init_n); ++h)
325 if (((*table)[j].hash_types >> h) & 0x1)
327 (*table)[i].hash_types_n = nb;
337 * Initialize hash RX queues and indirection table.
340 * Pointer to private structure.
343 * 0 on success, errno value on failure.
346 priv_create_hash_rxqs(struct priv *priv)
348 struct ibv_exp_wq *wqs[priv->reta_idx_n];
349 struct ind_table_init ind_table_init[IND_TABLE_INIT_N];
350 unsigned int ind_tables_n =
351 priv_make_ind_table_init(priv, &ind_table_init);
352 unsigned int hash_rxqs_n = 0;
353 struct hash_rxq (*hash_rxqs)[] = NULL;
354 struct ibv_exp_rwq_ind_table *(*ind_tables)[] = NULL;
360 assert(priv->ind_tables == NULL);
361 assert(priv->ind_tables_n == 0);
362 assert(priv->hash_rxqs == NULL);
363 assert(priv->hash_rxqs_n == 0);
364 assert(priv->pd != NULL);
365 assert(priv->ctx != NULL);
368 if (priv->rxqs_n == 0)
370 assert(priv->rxqs != NULL);
371 if (ind_tables_n == 0) {
372 ERROR("all hash RX queue types have been filtered out,"
373 " indirection table cannot be created");
376 if (priv->rxqs_n & (priv->rxqs_n - 1)) {
377 INFO("%u RX queues are configured, consider rounding this"
378 " number to the next power of two for better balancing",
380 DEBUG("indirection table extended to assume %u WQs",
383 for (i = 0; (i != priv->reta_idx_n); ++i) {
384 struct rxq_ctrl *rxq_ctrl;
386 rxq_ctrl = container_of((*priv->rxqs)[(*priv->reta_idx)[i]],
387 struct rxq_ctrl, rxq);
388 wqs[i] = rxq_ctrl->wq;
390 /* Get number of hash RX queues to configure. */
391 for (i = 0, hash_rxqs_n = 0; (i != ind_tables_n); ++i)
392 hash_rxqs_n += ind_table_init[i].hash_types_n;
393 DEBUG("allocating %u hash RX queues for %u WQs, %u indirection tables",
394 hash_rxqs_n, priv->rxqs_n, ind_tables_n);
395 /* Create indirection tables. */
396 ind_tables = rte_calloc(__func__, ind_tables_n,
397 sizeof((*ind_tables)[0]), 0);
398 if (ind_tables == NULL) {
400 ERROR("cannot allocate indirection tables container: %s",
404 for (i = 0; (i != ind_tables_n); ++i) {
405 struct ibv_exp_rwq_ind_table_init_attr ind_init_attr = {
407 .log_ind_tbl_size = 0, /* Set below. */
411 unsigned int ind_tbl_size = ind_table_init[i].max_size;
412 struct ibv_exp_rwq_ind_table *ind_table;
414 if (priv->reta_idx_n < ind_tbl_size)
415 ind_tbl_size = priv->reta_idx_n;
416 ind_init_attr.log_ind_tbl_size = log2above(ind_tbl_size);
418 ind_table = ibv_exp_create_rwq_ind_table(priv->ctx,
420 if (ind_table != NULL) {
421 (*ind_tables)[i] = ind_table;
424 /* Not clear whether errno is set. */
425 err = (errno ? errno : EINVAL);
426 ERROR("RX indirection table creation failed with error %d: %s",
430 /* Allocate array that holds hash RX queues and related data. */
431 hash_rxqs = rte_calloc(__func__, hash_rxqs_n,
432 sizeof((*hash_rxqs)[0]), 0);
433 if (hash_rxqs == NULL) {
435 ERROR("cannot allocate hash RX queues container: %s",
439 for (i = 0, j = 0, k = 0;
440 ((i != hash_rxqs_n) && (j != ind_tables_n));
442 struct hash_rxq *hash_rxq = &(*hash_rxqs)[i];
443 enum hash_rxq_type type =
444 hash_rxq_type_from_pos(&ind_table_init[j], k);
445 struct rte_eth_rss_conf *priv_rss_conf =
446 (*priv->rss_conf)[type];
447 struct ibv_exp_rx_hash_conf hash_conf = {
448 .rx_hash_function = IBV_EXP_RX_HASH_FUNC_TOEPLITZ,
449 .rx_hash_key_len = (priv_rss_conf ?
450 priv_rss_conf->rss_key_len :
451 rss_hash_default_key_len),
452 .rx_hash_key = (priv_rss_conf ?
453 priv_rss_conf->rss_key :
454 rss_hash_default_key),
455 .rx_hash_fields_mask = hash_rxq_init[type].hash_fields,
456 .rwq_ind_tbl = (*ind_tables)[j],
458 struct ibv_exp_qp_init_attr qp_init_attr = {
459 .max_inl_recv = 0, /* Currently not supported. */
460 .qp_type = IBV_QPT_RAW_PACKET,
461 .comp_mask = (IBV_EXP_QP_INIT_ATTR_PD |
462 IBV_EXP_QP_INIT_ATTR_RX_HASH),
464 .rx_hash_conf = &hash_conf,
465 .port_num = priv->port,
468 DEBUG("using indirection table %u for hash RX queue %u type %d",
470 *hash_rxq = (struct hash_rxq){
472 .qp = ibv_exp_create_qp(priv->ctx, &qp_init_attr),
475 if (hash_rxq->qp == NULL) {
476 err = (errno ? errno : EINVAL);
477 ERROR("Hash RX QP creation failure: %s",
481 if (++k < ind_table_init[j].hash_types_n)
483 /* Switch to the next indirection table and reset hash RX
484 * queue type array index. */
488 priv->ind_tables = ind_tables;
489 priv->ind_tables_n = ind_tables_n;
490 priv->hash_rxqs = hash_rxqs;
491 priv->hash_rxqs_n = hash_rxqs_n;
495 if (hash_rxqs != NULL) {
496 for (i = 0; (i != hash_rxqs_n); ++i) {
497 struct ibv_qp *qp = (*hash_rxqs)[i].qp;
501 claim_zero(ibv_destroy_qp(qp));
505 if (ind_tables != NULL) {
506 for (j = 0; (j != ind_tables_n); ++j) {
507 struct ibv_exp_rwq_ind_table *ind_table =
510 if (ind_table == NULL)
512 claim_zero(ibv_exp_destroy_rwq_ind_table(ind_table));
514 rte_free(ind_tables);
520 * Clean up hash RX queues and indirection table.
523 * Pointer to private structure.
526 priv_destroy_hash_rxqs(struct priv *priv)
530 DEBUG("destroying %u hash RX queues", priv->hash_rxqs_n);
531 if (priv->hash_rxqs_n == 0) {
532 assert(priv->hash_rxqs == NULL);
533 assert(priv->ind_tables == NULL);
536 for (i = 0; (i != priv->hash_rxqs_n); ++i) {
537 struct hash_rxq *hash_rxq = &(*priv->hash_rxqs)[i];
540 assert(hash_rxq->priv == priv);
541 assert(hash_rxq->qp != NULL);
542 /* Also check that there are no remaining flows. */
543 for (j = 0; (j != RTE_DIM(hash_rxq->special_flow)); ++j)
545 (k != RTE_DIM(hash_rxq->special_flow[j]));
547 assert(hash_rxq->special_flow[j][k] == NULL);
548 for (j = 0; (j != RTE_DIM(hash_rxq->mac_flow)); ++j)
549 for (k = 0; (k != RTE_DIM(hash_rxq->mac_flow[j])); ++k)
550 assert(hash_rxq->mac_flow[j][k] == NULL);
551 claim_zero(ibv_destroy_qp(hash_rxq->qp));
553 priv->hash_rxqs_n = 0;
554 rte_free(priv->hash_rxqs);
555 priv->hash_rxqs = NULL;
556 for (i = 0; (i != priv->ind_tables_n); ++i) {
557 struct ibv_exp_rwq_ind_table *ind_table =
558 (*priv->ind_tables)[i];
560 assert(ind_table != NULL);
561 claim_zero(ibv_exp_destroy_rwq_ind_table(ind_table));
563 priv->ind_tables_n = 0;
564 rte_free(priv->ind_tables);
565 priv->ind_tables = NULL;
569 * Check whether a given flow type is allowed.
572 * Pointer to private structure.
574 * Flow type to check.
577 * Nonzero if the given flow type is allowed.
580 priv_allow_flow_type(struct priv *priv, enum hash_rxq_flow_type type)
582 /* Only FLOW_TYPE_PROMISC is allowed when promiscuous mode
583 * has been requested. */
584 if (priv->promisc_req)
585 return type == HASH_RXQ_FLOW_TYPE_PROMISC;
587 case HASH_RXQ_FLOW_TYPE_PROMISC:
588 return !!priv->promisc_req;
589 case HASH_RXQ_FLOW_TYPE_ALLMULTI:
590 return !!priv->allmulti_req;
591 case HASH_RXQ_FLOW_TYPE_BROADCAST:
592 case HASH_RXQ_FLOW_TYPE_IPV6MULTI:
593 /* If allmulti is enabled, broadcast and ipv6multi
594 * are unnecessary. */
595 return !priv->allmulti_req;
596 case HASH_RXQ_FLOW_TYPE_MAC:
599 /* Unsupported flow type is not allowed. */
606 * Automatically enable/disable flows according to configuration.
612 * 0 on success, errno value on failure.
615 priv_rehash_flows(struct priv *priv)
617 enum hash_rxq_flow_type i;
619 for (i = HASH_RXQ_FLOW_TYPE_PROMISC;
620 i != RTE_DIM((*priv->hash_rxqs)[0].special_flow);
622 if (!priv_allow_flow_type(priv, i)) {
623 priv_special_flow_disable(priv, i);
625 int ret = priv_special_flow_enable(priv, i);
630 if (priv_allow_flow_type(priv, HASH_RXQ_FLOW_TYPE_MAC))
631 return priv_mac_addrs_enable(priv);
632 priv_mac_addrs_disable(priv);
637 * Allocate RX queue elements.
640 * Pointer to RX queue structure.
642 * Number of elements to allocate.
644 * If not NULL, fetch buffers from this array instead of allocating them
645 * with rte_pktmbuf_alloc().
648 * 0 on success, errno value on failure.
651 rxq_alloc_elts(struct rxq_ctrl *rxq_ctrl, unsigned int elts_n,
652 struct rte_mbuf *(*pool)[])
654 const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n;
658 /* Iterate on segments. */
659 for (i = 0; (i != elts_n); ++i) {
660 struct rte_mbuf *buf;
661 volatile struct mlx5_wqe_data_seg *scat =
662 &(*rxq_ctrl->rxq.wqes)[i];
667 rte_pktmbuf_reset(buf);
668 rte_pktmbuf_refcnt_update(buf, 1);
670 buf = rte_pktmbuf_alloc(rxq_ctrl->rxq.mp);
672 assert(pool == NULL);
673 ERROR("%p: empty mbuf pool", (void *)rxq_ctrl);
677 /* Headroom is reserved by rte_pktmbuf_alloc(). */
678 assert(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM);
679 /* Buffer is supposed to be empty. */
680 assert(rte_pktmbuf_data_len(buf) == 0);
681 assert(rte_pktmbuf_pkt_len(buf) == 0);
683 /* Only the first segment keeps headroom. */
685 SET_DATA_OFF(buf, 0);
686 PORT(buf) = rxq_ctrl->rxq.port_id;
687 DATA_LEN(buf) = rte_pktmbuf_tailroom(buf);
688 PKT_LEN(buf) = DATA_LEN(buf);
690 /* scat->addr must be able to store a pointer. */
691 assert(sizeof(scat->addr) >= sizeof(uintptr_t));
692 *scat = (struct mlx5_wqe_data_seg){
693 .addr = htonll(rte_pktmbuf_mtod(buf, uintptr_t)),
694 .byte_count = htonl(DATA_LEN(buf)),
695 .lkey = htonl(rxq_ctrl->mr->lkey),
697 (*rxq_ctrl->rxq.elts)[i] = buf;
699 DEBUG("%p: allocated and configured %u segments (max %u packets)",
700 (void *)rxq_ctrl, elts_n, elts_n / (1 << rxq_ctrl->rxq.sges_n));
704 assert(pool == NULL);
706 for (i = 0; (i != elts_n); ++i) {
707 if ((*rxq_ctrl->rxq.elts)[i] != NULL)
708 rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]);
709 (*rxq_ctrl->rxq.elts)[i] = NULL;
711 DEBUG("%p: failed, freed everything", (void *)rxq_ctrl);
717 * Free RX queue elements.
720 * Pointer to RX queue structure.
723 rxq_free_elts(struct rxq_ctrl *rxq_ctrl)
727 DEBUG("%p: freeing WRs", (void *)rxq_ctrl);
728 if (rxq_ctrl->rxq.elts == NULL)
731 for (i = 0; (i != (1u << rxq_ctrl->rxq.elts_n)); ++i) {
732 if ((*rxq_ctrl->rxq.elts)[i] != NULL)
733 rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]);
734 (*rxq_ctrl->rxq.elts)[i] = NULL;
739 * Clean up a RX queue.
741 * Destroy objects, free allocated memory and reset the structure for reuse.
744 * Pointer to RX queue structure.
747 rxq_cleanup(struct rxq_ctrl *rxq_ctrl)
749 DEBUG("cleaning up %p", (void *)rxq_ctrl);
750 rxq_free_elts(rxq_ctrl);
751 if (rxq_ctrl->fdir_queue != NULL)
752 priv_fdir_queue_destroy(rxq_ctrl->priv, rxq_ctrl->fdir_queue);
753 if (rxq_ctrl->wq != NULL)
754 claim_zero(ibv_exp_destroy_wq(rxq_ctrl->wq));
755 if (rxq_ctrl->cq != NULL)
756 claim_zero(ibv_destroy_cq(rxq_ctrl->cq));
757 if (rxq_ctrl->channel != NULL)
758 claim_zero(ibv_destroy_comp_channel(rxq_ctrl->channel));
759 if (rxq_ctrl->mr != NULL)
760 claim_zero(ibv_dereg_mr(rxq_ctrl->mr));
761 memset(rxq_ctrl, 0, sizeof(*rxq_ctrl));
765 * Reconfigure RX queue buffers.
767 * rxq_rehash() does not allocate mbufs, which, if not done from the right
768 * thread (such as a control thread), may corrupt the pool.
769 * In case of failure, the queue is left untouched.
772 * Pointer to Ethernet device structure.
777 * 0 on success, errno value on failure.
780 rxq_rehash(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl)
782 unsigned int elts_n = 1 << rxq_ctrl->rxq.elts_n;
784 struct ibv_exp_wq_attr mod;
787 DEBUG("%p: rehashing queue %p with %u SGE(s) per packet",
788 (void *)dev, (void *)rxq_ctrl, 1 << rxq_ctrl->rxq.sges_n);
789 assert(!(elts_n % (1 << rxq_ctrl->rxq.sges_n)));
790 /* From now on, any failure will render the queue unusable.
791 * Reinitialize WQ. */
792 mod = (struct ibv_exp_wq_attr){
793 .attr_mask = IBV_EXP_WQ_ATTR_STATE,
794 .wq_state = IBV_EXP_WQS_RESET,
796 err = ibv_exp_modify_wq(rxq_ctrl->wq, &mod);
798 ERROR("%p: cannot reset WQ: %s", (void *)dev, strerror(err));
802 /* Snatch mbufs from original queue. */
803 claim_zero(rxq_alloc_elts(rxq_ctrl, elts_n, rxq_ctrl->rxq.elts));
804 for (i = 0; i != elts_n; ++i) {
805 struct rte_mbuf *buf = (*rxq_ctrl->rxq.elts)[i];
807 assert(rte_mbuf_refcnt_read(buf) == 2);
808 rte_pktmbuf_free_seg(buf);
810 /* Change queue state to ready. */
811 mod = (struct ibv_exp_wq_attr){
812 .attr_mask = IBV_EXP_WQ_ATTR_STATE,
813 .wq_state = IBV_EXP_WQS_RDY,
815 err = ibv_exp_modify_wq(rxq_ctrl->wq, &mod);
817 ERROR("%p: WQ state to IBV_EXP_WQS_RDY failed: %s",
818 (void *)dev, strerror(err));
821 /* Update doorbell counter. */
822 rxq_ctrl->rxq.rq_ci = elts_n >> rxq_ctrl->rxq.sges_n;
824 *rxq_ctrl->rxq.rq_db = htonl(rxq_ctrl->rxq.rq_ci);
831 * Initialize RX queue.
834 * Pointer to RX queue control template.
837 * 0 on success, errno value on failure.
840 rxq_setup(struct rxq_ctrl *tmpl)
842 struct ibv_cq *ibcq = tmpl->cq;
843 struct ibv_mlx5_cq_info cq_info;
844 struct mlx5_rwq *rwq = container_of(tmpl->wq, struct mlx5_rwq, wq);
845 struct rte_mbuf *(*elts)[1 << tmpl->rxq.elts_n] =
846 rte_calloc_socket("RXQ", 1, sizeof(*elts), 0, tmpl->socket);
848 if (ibv_mlx5_exp_get_cq_info(ibcq, &cq_info)) {
849 ERROR("Unable to query CQ info. check your OFED.");
852 if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
853 ERROR("Wrong MLX5_CQE_SIZE environment variable value: "
854 "it should be set to %u", RTE_CACHE_LINE_SIZE);
859 tmpl->rxq.rq_db = rwq->rq.db;
860 tmpl->rxq.cqe_n = log2above(cq_info.cqe_cnt);
863 tmpl->rxq.cq_db = cq_info.dbrec;
865 (volatile struct mlx5_wqe_data_seg (*)[])
866 (uintptr_t)rwq->rq.buff;
868 (volatile struct mlx5_cqe (*)[])
869 (uintptr_t)cq_info.buf;
870 tmpl->rxq.elts = elts;
875 * Configure a RX queue.
878 * Pointer to Ethernet device structure.
880 * Pointer to RX queue structure.
882 * Number of descriptors to configure in queue.
884 * NUMA socket on which memory must be allocated.
886 * Thresholds parameters.
888 * Memory pool for buffer allocations.
891 * 0 on success, errno value on failure.
894 rxq_ctrl_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl,
895 uint16_t desc, unsigned int socket,
896 const struct rte_eth_rxconf *conf, struct rte_mempool *mp)
898 struct priv *priv = dev->data->dev_private;
899 struct rxq_ctrl tmpl = {
903 .elts_n = log2above(desc),
905 .rss_hash = priv->rxqs_n > 1,
908 struct ibv_exp_wq_attr mod;
910 struct ibv_exp_cq_init_attr cq;
911 struct ibv_exp_wq_init_attr wq;
912 struct ibv_exp_cq_attr cq_attr;
914 unsigned int mb_len = rte_pktmbuf_data_room_size(mp);
915 unsigned int cqe_n = desc - 1;
916 struct rte_mbuf *(*elts)[desc] = NULL;
919 (void)conf; /* Thresholds configuration (ignored). */
920 /* Enable scattered packets support for this queue if necessary. */
921 assert(mb_len >= RTE_PKTMBUF_HEADROOM);
922 if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
923 (mb_len - RTE_PKTMBUF_HEADROOM)) {
925 } else if (dev->data->dev_conf.rxmode.enable_scatter) {
927 RTE_PKTMBUF_HEADROOM +
928 dev->data->dev_conf.rxmode.max_rx_pkt_len;
932 * Determine the number of SGEs needed for a full packet
933 * and round it to the next power of two.
935 sges_n = log2above((size / mb_len) + !!(size % mb_len));
936 tmpl.rxq.sges_n = sges_n;
937 /* Make sure rxq.sges_n did not overflow. */
938 size = mb_len * (1 << tmpl.rxq.sges_n);
939 size -= RTE_PKTMBUF_HEADROOM;
940 if (size < dev->data->dev_conf.rxmode.max_rx_pkt_len) {
941 ERROR("%p: too many SGEs (%u) needed to handle"
942 " requested maximum packet size %u",
945 dev->data->dev_conf.rxmode.max_rx_pkt_len);
949 WARN("%p: the requested maximum Rx packet size (%u) is"
950 " larger than a single mbuf (%u) and scattered"
951 " mode has not been requested",
953 dev->data->dev_conf.rxmode.max_rx_pkt_len,
954 mb_len - RTE_PKTMBUF_HEADROOM);
956 DEBUG("%p: maximum number of segments per packet: %u",
957 (void *)dev, 1 << tmpl.rxq.sges_n);
958 if (desc % (1 << tmpl.rxq.sges_n)) {
959 ERROR("%p: number of RX queue descriptors (%u) is not a"
960 " multiple of SGEs per packet (%u)",
963 1 << tmpl.rxq.sges_n);
966 /* Toggle RX checksum offload if hardware supports it. */
968 tmpl.rxq.csum = !!dev->data->dev_conf.rxmode.hw_ip_checksum;
969 if (priv->hw_csum_l2tun)
970 tmpl.rxq.csum_l2tun =
971 !!dev->data->dev_conf.rxmode.hw_ip_checksum;
972 /* Use the entire RX mempool as the memory region. */
973 tmpl.mr = mlx5_mp2mr(priv->pd, mp);
974 if (tmpl.mr == NULL) {
976 ERROR("%p: MR creation failure: %s",
977 (void *)dev, strerror(ret));
980 if (dev->data->dev_conf.intr_conf.rxq) {
981 tmpl.channel = ibv_create_comp_channel(priv->ctx);
982 if (tmpl.channel == NULL) {
984 ERROR("%p: Rx interrupt completion channel creation"
986 (void *)dev, strerror(ret));
990 attr.cq = (struct ibv_exp_cq_init_attr){
993 if (priv->cqe_comp) {
994 attr.cq.comp_mask |= IBV_EXP_CQ_INIT_ATTR_FLAGS;
995 attr.cq.flags |= IBV_EXP_CQ_COMPRESSED_CQE;
996 cqe_n = (desc * 2) - 1; /* Double the number of CQEs. */
998 tmpl.cq = ibv_exp_create_cq(priv->ctx, cqe_n, NULL, tmpl.channel, 0,
1000 if (tmpl.cq == NULL) {
1002 ERROR("%p: CQ creation failure: %s",
1003 (void *)dev, strerror(ret));
1006 DEBUG("priv->device_attr.max_qp_wr is %d",
1007 priv->device_attr.max_qp_wr);
1008 DEBUG("priv->device_attr.max_sge is %d",
1009 priv->device_attr.max_sge);
1010 /* Configure VLAN stripping. */
1011 tmpl.rxq.vlan_strip = (priv->hw_vlan_strip &&
1012 !!dev->data->dev_conf.rxmode.hw_vlan_strip);
1013 attr.wq = (struct ibv_exp_wq_init_attr){
1014 .wq_context = NULL, /* Could be useful in the future. */
1015 .wq_type = IBV_EXP_WQT_RQ,
1016 /* Max number of outstanding WRs. */
1017 .max_recv_wr = desc >> tmpl.rxq.sges_n,
1018 /* Max number of scatter/gather elements in a WR. */
1019 .max_recv_sge = 1 << tmpl.rxq.sges_n,
1023 IBV_EXP_CREATE_WQ_VLAN_OFFLOADS |
1025 .vlan_offloads = (tmpl.rxq.vlan_strip ?
1026 IBV_EXP_RECEIVE_WQ_CVLAN_STRIP :
1029 /* By default, FCS (CRC) is stripped by hardware. */
1030 if (dev->data->dev_conf.rxmode.hw_strip_crc) {
1031 tmpl.rxq.crc_present = 0;
1032 } else if (priv->hw_fcs_strip) {
1033 /* Ask HW/Verbs to leave CRC in place when supported. */
1034 attr.wq.flags |= IBV_EXP_CREATE_WQ_FLAG_SCATTER_FCS;
1035 attr.wq.comp_mask |= IBV_EXP_CREATE_WQ_FLAGS;
1036 tmpl.rxq.crc_present = 1;
1038 WARN("%p: CRC stripping has been disabled but will still"
1039 " be performed by hardware, make sure MLNX_OFED and"
1040 " firmware are up to date",
1042 tmpl.rxq.crc_present = 0;
1044 DEBUG("%p: CRC stripping is %s, %u bytes will be subtracted from"
1045 " incoming frames to hide it",
1047 tmpl.rxq.crc_present ? "disabled" : "enabled",
1048 tmpl.rxq.crc_present << 2);
1049 if (!mlx5_getenv_int("MLX5_PMD_ENABLE_PADDING"))
1050 ; /* Nothing else to do. */
1051 else if (priv->hw_padding) {
1052 INFO("%p: enabling packet padding on queue %p",
1053 (void *)dev, (void *)rxq_ctrl);
1054 attr.wq.flags |= IBV_EXP_CREATE_WQ_FLAG_RX_END_PADDING;
1055 attr.wq.comp_mask |= IBV_EXP_CREATE_WQ_FLAGS;
1057 WARN("%p: packet padding has been requested but is not"
1058 " supported, make sure MLNX_OFED and firmware are"
1062 tmpl.wq = ibv_exp_create_wq(priv->ctx, &attr.wq);
1063 if (tmpl.wq == NULL) {
1064 ret = (errno ? errno : EINVAL);
1065 ERROR("%p: WQ creation failure: %s",
1066 (void *)dev, strerror(ret));
1070 * Make sure number of WRs*SGEs match expectations since a queue
1071 * cannot allocate more than "desc" buffers.
1073 if (((int)attr.wq.max_recv_wr != (desc >> tmpl.rxq.sges_n)) ||
1074 ((int)attr.wq.max_recv_sge != (1 << tmpl.rxq.sges_n))) {
1075 ERROR("%p: requested %u*%u but got %u*%u WRs*SGEs",
1077 (desc >> tmpl.rxq.sges_n), (1 << tmpl.rxq.sges_n),
1078 attr.wq.max_recv_wr, attr.wq.max_recv_sge);
1083 tmpl.rxq.port_id = dev->data->port_id;
1084 DEBUG("%p: RTE port ID: %u", (void *)rxq_ctrl, tmpl.rxq.port_id);
1085 /* Change queue state to ready. */
1086 mod = (struct ibv_exp_wq_attr){
1087 .attr_mask = IBV_EXP_WQ_ATTR_STATE,
1088 .wq_state = IBV_EXP_WQS_RDY,
1090 ret = ibv_exp_modify_wq(tmpl.wq, &mod);
1092 ERROR("%p: WQ state to IBV_EXP_WQS_RDY failed: %s",
1093 (void *)dev, strerror(ret));
1096 ret = rxq_setup(&tmpl);
1098 ERROR("%p: cannot initialize RX queue structure: %s",
1099 (void *)dev, strerror(ret));
1102 /* Reuse buffers from original queue if possible. */
1103 if (rxq_ctrl->rxq.elts_n) {
1104 assert(1 << rxq_ctrl->rxq.elts_n == desc);
1105 assert(rxq_ctrl->rxq.elts != tmpl.rxq.elts);
1106 ret = rxq_alloc_elts(&tmpl, desc, rxq_ctrl->rxq.elts);
1108 ret = rxq_alloc_elts(&tmpl, desc, NULL);
1110 ERROR("%p: RXQ allocation failed: %s",
1111 (void *)dev, strerror(ret));
1114 /* Clean up rxq in case we're reinitializing it. */
1115 DEBUG("%p: cleaning-up old rxq just in case", (void *)rxq_ctrl);
1116 rxq_cleanup(rxq_ctrl);
1117 /* Move mbuf pointers to dedicated storage area in RX queue. */
1118 elts = (void *)(rxq_ctrl + 1);
1119 rte_memcpy(elts, tmpl.rxq.elts, sizeof(*elts));
1121 memset(tmpl.rxq.elts, 0x55, sizeof(*elts));
1123 rte_free(tmpl.rxq.elts);
1124 tmpl.rxq.elts = elts;
1126 /* Update doorbell counter. */
1127 rxq_ctrl->rxq.rq_ci = desc >> rxq_ctrl->rxq.sges_n;
1129 *rxq_ctrl->rxq.rq_db = htonl(rxq_ctrl->rxq.rq_ci);
1130 DEBUG("%p: rxq updated with %p", (void *)rxq_ctrl, (void *)&tmpl);
1134 elts = tmpl.rxq.elts;
1142 * DPDK callback to configure a RX queue.
1145 * Pointer to Ethernet device structure.
1149 * Number of descriptors to configure in queue.
1151 * NUMA socket on which memory must be allocated.
1153 * Thresholds parameters.
1155 * Memory pool for buffer allocations.
1158 * 0 on success, negative errno value on failure.
1161 mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1162 unsigned int socket, const struct rte_eth_rxconf *conf,
1163 struct rte_mempool *mp)
1165 struct priv *priv = dev->data->dev_private;
1166 struct rxq *rxq = (*priv->rxqs)[idx];
1167 struct rxq_ctrl *rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq);
1170 if (mlx5_is_secondary())
1171 return -E_RTE_SECONDARY;
1174 if (!rte_is_power_of_2(desc)) {
1175 desc = 1 << log2above(desc);
1176 WARN("%p: increased number of descriptors in RX queue %u"
1177 " to the next power of two (%d)",
1178 (void *)dev, idx, desc);
1180 DEBUG("%p: configuring queue %u for %u descriptors",
1181 (void *)dev, idx, desc);
1182 if (idx >= priv->rxqs_n) {
1183 ERROR("%p: queue index out of range (%u >= %u)",
1184 (void *)dev, idx, priv->rxqs_n);
1189 DEBUG("%p: reusing already allocated queue index %u (%p)",
1190 (void *)dev, idx, (void *)rxq);
1191 if (priv->started) {
1195 (*priv->rxqs)[idx] = NULL;
1196 rxq_cleanup(rxq_ctrl);
1197 /* Resize if rxq size is changed. */
1198 if (rxq_ctrl->rxq.elts_n != log2above(desc)) {
1199 rxq_ctrl = rte_realloc(rxq_ctrl,
1201 desc * sizeof(struct rte_mbuf *),
1202 RTE_CACHE_LINE_SIZE);
1204 ERROR("%p: unable to reallocate queue index %u",
1211 rxq_ctrl = rte_calloc_socket("RXQ", 1, sizeof(*rxq_ctrl) +
1212 desc * sizeof(struct rte_mbuf *),
1214 if (rxq_ctrl == NULL) {
1215 ERROR("%p: unable to allocate queue index %u",
1221 ret = rxq_ctrl_setup(dev, rxq_ctrl, desc, socket, conf, mp);
1225 rxq_ctrl->rxq.stats.idx = idx;
1226 DEBUG("%p: adding RX queue %p to list",
1227 (void *)dev, (void *)rxq_ctrl);
1228 (*priv->rxqs)[idx] = &rxq_ctrl->rxq;
1229 /* Update receive callback. */
1230 priv_select_rx_function(priv);
1237 * DPDK callback to release a RX queue.
1240 * Generic RX queue pointer.
1243 mlx5_rx_queue_release(void *dpdk_rxq)
1245 struct rxq *rxq = (struct rxq *)dpdk_rxq;
1246 struct rxq_ctrl *rxq_ctrl;
1250 if (mlx5_is_secondary())
1255 rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq);
1256 priv = rxq_ctrl->priv;
1258 if (priv_flow_rxq_in_use(priv, rxq))
1259 rte_panic("Rx queue %p is still used by a flow and cannot be"
1260 " removed\n", (void *)rxq_ctrl);
1261 for (i = 0; (i != priv->rxqs_n); ++i)
1262 if ((*priv->rxqs)[i] == rxq) {
1263 DEBUG("%p: removing RX queue %p from list",
1264 (void *)priv->dev, (void *)rxq_ctrl);
1265 (*priv->rxqs)[i] = NULL;
1268 rxq_cleanup(rxq_ctrl);
1274 * DPDK callback for RX in secondary processes.
1276 * This function configures all queues from primary process information
1277 * if necessary before reverting to the normal RX burst callback.
1280 * Generic pointer to RX queue structure.
1282 * Array to store received packets.
1284 * Maximum number of packets in array.
1287 * Number of packets successfully received (<= pkts_n).
1290 mlx5_rx_burst_secondary_setup(void *dpdk_rxq, struct rte_mbuf **pkts,
1293 struct rxq *rxq = dpdk_rxq;
1294 struct rxq_ctrl *rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq);
1295 struct priv *priv = mlx5_secondary_data_setup(rxq_ctrl->priv);
1296 struct priv *primary_priv;
1302 mlx5_secondary_data[priv->dev->data->port_id].primary_priv;
1303 /* Look for queue index in both private structures. */
1304 for (index = 0; index != priv->rxqs_n; ++index)
1305 if (((*primary_priv->rxqs)[index] == rxq) ||
1306 ((*priv->rxqs)[index] == rxq))
1308 if (index == priv->rxqs_n)
1310 rxq = (*priv->rxqs)[index];
1311 return priv->dev->rx_pkt_burst(rxq, pkts, pkts_n);
1315 * Allocate queue vector and fill epoll fd list for Rx interrupts.
1318 * Pointer to private structure.
1321 * 0 on success, negative on failure.
1324 priv_rx_intr_vec_enable(struct priv *priv)
1327 unsigned int rxqs_n = priv->rxqs_n;
1328 unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
1329 unsigned int count = 0;
1330 struct rte_intr_handle *intr_handle = priv->dev->intr_handle;
1332 if (!priv->dev->data->dev_conf.intr_conf.rxq)
1334 priv_rx_intr_vec_disable(priv);
1335 intr_handle->intr_vec = malloc(sizeof(intr_handle->intr_vec[rxqs_n]));
1336 if (intr_handle->intr_vec == NULL) {
1337 ERROR("failed to allocate memory for interrupt vector,"
1338 " Rx interrupts will not be supported");
1341 intr_handle->type = RTE_INTR_HANDLE_EXT;
1342 for (i = 0; i != n; ++i) {
1343 struct rxq *rxq = (*priv->rxqs)[i];
1344 struct rxq_ctrl *rxq_ctrl =
1345 container_of(rxq, struct rxq_ctrl, rxq);
1350 /* Skip queues that cannot request interrupts. */
1351 if (!rxq || !rxq_ctrl->channel) {
1352 /* Use invalid intr_vec[] index to disable entry. */
1353 intr_handle->intr_vec[i] =
1354 RTE_INTR_VEC_RXTX_OFFSET +
1355 RTE_MAX_RXTX_INTR_VEC_ID;
1358 if (count >= RTE_MAX_RXTX_INTR_VEC_ID) {
1359 ERROR("too many Rx queues for interrupt vector size"
1360 " (%d), Rx interrupts cannot be enabled",
1361 RTE_MAX_RXTX_INTR_VEC_ID);
1362 priv_rx_intr_vec_disable(priv);
1365 fd = rxq_ctrl->channel->fd;
1366 flags = fcntl(fd, F_GETFL);
1367 rc = fcntl(fd, F_SETFL, flags | O_NONBLOCK);
1369 ERROR("failed to make Rx interrupt file descriptor"
1370 " %d non-blocking for queue index %d", fd, i);
1371 priv_rx_intr_vec_disable(priv);
1374 intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count;
1375 intr_handle->efds[count] = fd;
1379 priv_rx_intr_vec_disable(priv);
1381 intr_handle->nb_efd = count;
1386 * Clean up Rx interrupts handler.
1389 * Pointer to private structure.
1392 priv_rx_intr_vec_disable(struct priv *priv)
1394 struct rte_intr_handle *intr_handle = priv->dev->intr_handle;
1396 rte_intr_free_epoll_fd(intr_handle);
1397 free(intr_handle->intr_vec);
1398 intr_handle->nb_efd = 0;
1399 intr_handle->intr_vec = NULL;
1402 #ifdef HAVE_UPDATE_CQ_CI
1405 * DPDK callback for Rx queue interrupt enable.
1408 * Pointer to Ethernet device structure.
1409 * @param rx_queue_id
1413 * 0 on success, negative on failure.
1416 mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1418 struct priv *priv = mlx5_get_priv(dev);
1419 struct rxq *rxq = (*priv->rxqs)[rx_queue_id];
1420 struct rxq_ctrl *rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq);
1423 if (!rxq || !rxq_ctrl->channel) {
1426 ibv_mlx5_exp_update_cq_ci(rxq_ctrl->cq, rxq->cq_ci);
1427 ret = ibv_req_notify_cq(rxq_ctrl->cq, 0);
1430 WARN("unable to arm interrupt on rx queue %d", rx_queue_id);
1435 * DPDK callback for Rx queue interrupt disable.
1438 * Pointer to Ethernet device structure.
1439 * @param rx_queue_id
1443 * 0 on success, negative on failure.
1446 mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1448 struct priv *priv = mlx5_get_priv(dev);
1449 struct rxq *rxq = (*priv->rxqs)[rx_queue_id];
1450 struct rxq_ctrl *rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq);
1451 struct ibv_cq *ev_cq;
1455 if (!rxq || !rxq_ctrl->channel) {
1458 ret = ibv_get_cq_event(rxq_ctrl->cq->channel, &ev_cq, &ev_ctx);
1459 if (ret || ev_cq != rxq_ctrl->cq)
1463 WARN("unable to disable interrupt on rx queue %d",
1466 ibv_ack_cq_events(rxq_ctrl->cq, 1);
1470 #endif /* HAVE_UPDATE_CQ_CI */