4 * Copyright 2015 6WIND S.A.
5 * Copyright 2015 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
43 #pragma GCC diagnostic ignored "-pedantic"
45 #include <infiniband/verbs.h>
47 #pragma GCC diagnostic error "-pedantic"
50 /* DPDK headers don't like -pedantic. */
52 #pragma GCC diagnostic ignored "-pedantic"
55 #include <rte_malloc.h>
56 #include <rte_ethdev.h>
57 #include <rte_common.h>
59 #pragma GCC diagnostic error "-pedantic"
63 #include "mlx5_rxtx.h"
64 #include "mlx5_utils.h"
65 #include "mlx5_autoconf.h"
66 #include "mlx5_defs.h"
68 /* Initialization data for hash RX queues. */
69 const struct hash_rxq_init hash_rxq_init[] = {
71 .hash_fields = (IBV_EXP_RX_HASH_SRC_IPV4 |
72 IBV_EXP_RX_HASH_DST_IPV4 |
73 IBV_EXP_RX_HASH_SRC_PORT_TCP |
74 IBV_EXP_RX_HASH_DST_PORT_TCP),
75 .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV4_TCP,
77 .flow_spec.tcp_udp = {
78 .type = IBV_EXP_FLOW_SPEC_TCP,
79 .size = sizeof(hash_rxq_init[0].flow_spec.tcp_udp),
81 .underlayer = &hash_rxq_init[HASH_RXQ_IPV4],
84 .hash_fields = (IBV_EXP_RX_HASH_SRC_IPV4 |
85 IBV_EXP_RX_HASH_DST_IPV4 |
86 IBV_EXP_RX_HASH_SRC_PORT_UDP |
87 IBV_EXP_RX_HASH_DST_PORT_UDP),
88 .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV4_UDP,
90 .flow_spec.tcp_udp = {
91 .type = IBV_EXP_FLOW_SPEC_UDP,
92 .size = sizeof(hash_rxq_init[0].flow_spec.tcp_udp),
94 .underlayer = &hash_rxq_init[HASH_RXQ_IPV4],
97 .hash_fields = (IBV_EXP_RX_HASH_SRC_IPV4 |
98 IBV_EXP_RX_HASH_DST_IPV4),
99 .dpdk_rss_hf = (ETH_RSS_IPV4 |
103 .type = IBV_EXP_FLOW_SPEC_IPV4,
104 .size = sizeof(hash_rxq_init[0].flow_spec.ipv4),
106 .underlayer = &hash_rxq_init[HASH_RXQ_ETH],
108 #ifdef HAVE_FLOW_SPEC_IPV6
110 .hash_fields = (IBV_EXP_RX_HASH_SRC_IPV6 |
111 IBV_EXP_RX_HASH_DST_IPV6 |
112 IBV_EXP_RX_HASH_SRC_PORT_TCP |
113 IBV_EXP_RX_HASH_DST_PORT_TCP),
114 .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV6_TCP,
116 .flow_spec.tcp_udp = {
117 .type = IBV_EXP_FLOW_SPEC_TCP,
118 .size = sizeof(hash_rxq_init[0].flow_spec.tcp_udp),
120 .underlayer = &hash_rxq_init[HASH_RXQ_IPV6],
123 .hash_fields = (IBV_EXP_RX_HASH_SRC_IPV6 |
124 IBV_EXP_RX_HASH_DST_IPV6 |
125 IBV_EXP_RX_HASH_SRC_PORT_UDP |
126 IBV_EXP_RX_HASH_DST_PORT_UDP),
127 .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV6_UDP,
129 .flow_spec.tcp_udp = {
130 .type = IBV_EXP_FLOW_SPEC_UDP,
131 .size = sizeof(hash_rxq_init[0].flow_spec.tcp_udp),
133 .underlayer = &hash_rxq_init[HASH_RXQ_IPV6],
136 .hash_fields = (IBV_EXP_RX_HASH_SRC_IPV6 |
137 IBV_EXP_RX_HASH_DST_IPV6),
138 .dpdk_rss_hf = (ETH_RSS_IPV6 |
142 .type = IBV_EXP_FLOW_SPEC_IPV6,
143 .size = sizeof(hash_rxq_init[0].flow_spec.ipv6),
145 .underlayer = &hash_rxq_init[HASH_RXQ_ETH],
147 #endif /* HAVE_FLOW_SPEC_IPV6 */
153 .type = IBV_EXP_FLOW_SPEC_ETH,
154 .size = sizeof(hash_rxq_init[0].flow_spec.eth),
160 /* Number of entries in hash_rxq_init[]. */
161 const unsigned int hash_rxq_init_n = RTE_DIM(hash_rxq_init);
163 /* Initialization data for hash RX queue indirection tables. */
164 static const struct ind_table_init ind_table_init[] = {
166 .max_size = -1u, /* Superseded by HW limitations. */
168 1 << HASH_RXQ_TCPV4 |
169 1 << HASH_RXQ_UDPV4 |
171 #ifdef HAVE_FLOW_SPEC_IPV6
172 1 << HASH_RXQ_TCPV6 |
173 1 << HASH_RXQ_UDPV6 |
175 #endif /* HAVE_FLOW_SPEC_IPV6 */
177 #ifdef HAVE_FLOW_SPEC_IPV6
179 #else /* HAVE_FLOW_SPEC_IPV6 */
181 #endif /* HAVE_FLOW_SPEC_IPV6 */
185 .hash_types = 1 << HASH_RXQ_ETH,
190 #define IND_TABLE_INIT_N RTE_DIM(ind_table_init)
192 /* Default RSS hash key also used for ConnectX-3. */
193 uint8_t rss_hash_default_key[] = {
194 0x2c, 0xc6, 0x81, 0xd1,
195 0x5b, 0xdb, 0xf4, 0xf7,
196 0xfc, 0xa2, 0x83, 0x19,
197 0xdb, 0x1a, 0x3e, 0x94,
198 0x6b, 0x9e, 0x38, 0xd9,
199 0x2c, 0x9c, 0x03, 0xd1,
200 0xad, 0x99, 0x44, 0xa7,
201 0xd9, 0x56, 0x3d, 0x59,
202 0x06, 0x3c, 0x25, 0xf3,
203 0xfc, 0x1f, 0xdc, 0x2a,
206 /* Length of the default RSS hash key. */
207 const size_t rss_hash_default_key_len = sizeof(rss_hash_default_key);
210 * Populate flow steering rule for a given hash RX queue type using
211 * information from hash_rxq_init[]. Nothing is written to flow_attr when
212 * flow_attr_size is not large enough, but the required size is still returned.
215 * Pointer to private structure.
216 * @param[out] flow_attr
217 * Pointer to flow attribute structure to fill. Note that the allocated
218 * area must be larger and large enough to hold all flow specifications.
219 * @param flow_attr_size
220 * Entire size of flow_attr and trailing room for flow specifications.
222 * Hash RX queue type to use for flow steering rule.
225 * Total size of the flow attribute buffer. No errors are defined.
228 priv_flow_attr(struct priv *priv, struct ibv_exp_flow_attr *flow_attr,
229 size_t flow_attr_size, enum hash_rxq_type type)
231 size_t offset = sizeof(*flow_attr);
232 const struct hash_rxq_init *init = &hash_rxq_init[type];
234 assert(priv != NULL);
235 assert((size_t)type < RTE_DIM(hash_rxq_init));
237 offset += init->flow_spec.hdr.size;
238 init = init->underlayer;
239 } while (init != NULL);
240 if (offset > flow_attr_size)
242 flow_attr_size = offset;
243 init = &hash_rxq_init[type];
244 *flow_attr = (struct ibv_exp_flow_attr){
245 .type = IBV_EXP_FLOW_ATTR_NORMAL,
246 #ifdef MLX5_FDIR_SUPPORT
247 /* Priorities < 3 are reserved for flow director. */
248 .priority = init->flow_priority + 3,
249 #else /* MLX5_FDIR_SUPPORT */
250 .priority = init->flow_priority,
251 #endif /* MLX5_FDIR_SUPPORT */
257 offset -= init->flow_spec.hdr.size;
258 memcpy((void *)((uintptr_t)flow_attr + offset),
260 init->flow_spec.hdr.size);
261 ++flow_attr->num_of_specs;
262 init = init->underlayer;
263 } while (init != NULL);
264 return flow_attr_size;
268 * Convert hash type position in indirection table initializer to
269 * hash RX queue type.
272 * Indirection table initializer.
274 * Hash type position.
277 * Hash RX queue type.
279 static enum hash_rxq_type
280 hash_rxq_type_from_pos(const struct ind_table_init *table, unsigned int pos)
282 enum hash_rxq_type type = 0;
284 assert(pos < table->hash_types_n);
286 if ((table->hash_types & (1 << type)) && (pos-- == 0))
294 * Filter out disabled hash RX queue types from ind_table_init[].
297 * Pointer to private structure.
302 * Number of table entries.
305 priv_make_ind_table_init(struct priv *priv,
306 struct ind_table_init (*table)[IND_TABLE_INIT_N])
311 unsigned int table_n = 0;
312 /* Mandatory to receive frames not handled by normal hash RX queues. */
313 unsigned int hash_types_sup = 1 << HASH_RXQ_ETH;
315 rss_hf = priv->rss_hf;
316 /* Process other protocols only if more than one queue. */
317 if (priv->rxqs_n > 1)
318 for (i = 0; (i != hash_rxq_init_n); ++i)
319 if (rss_hf & hash_rxq_init[i].dpdk_rss_hf)
320 hash_types_sup |= (1 << i);
322 /* Filter out entries whose protocols are not in the set. */
323 for (i = 0, j = 0; (i != IND_TABLE_INIT_N); ++i) {
327 /* j is increased only if the table has valid protocols. */
329 (*table)[j] = ind_table_init[i];
330 (*table)[j].hash_types &= hash_types_sup;
331 for (h = 0, nb = 0; (h != hash_rxq_init_n); ++h)
332 if (((*table)[j].hash_types >> h) & 0x1)
334 (*table)[i].hash_types_n = nb;
344 * Initialize hash RX queues and indirection table.
347 * Pointer to private structure.
350 * 0 on success, errno value on failure.
353 priv_create_hash_rxqs(struct priv *priv)
355 struct ibv_exp_wq *wqs[priv->reta_idx_n];
356 struct ind_table_init ind_table_init[IND_TABLE_INIT_N];
357 unsigned int ind_tables_n =
358 priv_make_ind_table_init(priv, &ind_table_init);
359 unsigned int hash_rxqs_n = 0;
360 struct hash_rxq (*hash_rxqs)[] = NULL;
361 struct ibv_exp_rwq_ind_table *(*ind_tables)[] = NULL;
367 assert(priv->ind_tables == NULL);
368 assert(priv->ind_tables_n == 0);
369 assert(priv->hash_rxqs == NULL);
370 assert(priv->hash_rxqs_n == 0);
371 assert(priv->pd != NULL);
372 assert(priv->ctx != NULL);
373 if (priv->rxqs_n == 0)
375 assert(priv->rxqs != NULL);
376 if (ind_tables_n == 0) {
377 ERROR("all hash RX queue types have been filtered out,"
378 " indirection table cannot be created");
381 if (priv->rxqs_n & (priv->rxqs_n - 1)) {
382 INFO("%u RX queues are configured, consider rounding this"
383 " number to the next power of two for better balancing",
385 DEBUG("indirection table extended to assume %u WQs",
388 for (i = 0; (i != priv->reta_idx_n); ++i)
389 wqs[i] = (*priv->rxqs)[(*priv->reta_idx)[i]]->wq;
390 /* Get number of hash RX queues to configure. */
391 for (i = 0, hash_rxqs_n = 0; (i != ind_tables_n); ++i)
392 hash_rxqs_n += ind_table_init[i].hash_types_n;
393 DEBUG("allocating %u hash RX queues for %u WQs, %u indirection tables",
394 hash_rxqs_n, priv->rxqs_n, ind_tables_n);
395 /* Create indirection tables. */
396 ind_tables = rte_calloc(__func__, ind_tables_n,
397 sizeof((*ind_tables)[0]), 0);
398 if (ind_tables == NULL) {
400 ERROR("cannot allocate indirection tables container: %s",
404 for (i = 0; (i != ind_tables_n); ++i) {
405 struct ibv_exp_rwq_ind_table_init_attr ind_init_attr = {
407 .log_ind_tbl_size = 0, /* Set below. */
411 unsigned int ind_tbl_size = ind_table_init[i].max_size;
412 struct ibv_exp_rwq_ind_table *ind_table;
414 if (priv->reta_idx_n < ind_tbl_size)
415 ind_tbl_size = priv->reta_idx_n;
416 ind_init_attr.log_ind_tbl_size = log2above(ind_tbl_size);
418 ind_table = ibv_exp_create_rwq_ind_table(priv->ctx,
420 if (ind_table != NULL) {
421 (*ind_tables)[i] = ind_table;
424 /* Not clear whether errno is set. */
425 err = (errno ? errno : EINVAL);
426 ERROR("RX indirection table creation failed with error %d: %s",
430 /* Allocate array that holds hash RX queues and related data. */
431 hash_rxqs = rte_calloc(__func__, hash_rxqs_n,
432 sizeof((*hash_rxqs)[0]), 0);
433 if (hash_rxqs == NULL) {
435 ERROR("cannot allocate hash RX queues container: %s",
439 for (i = 0, j = 0, k = 0;
440 ((i != hash_rxqs_n) && (j != ind_tables_n));
442 struct hash_rxq *hash_rxq = &(*hash_rxqs)[i];
443 enum hash_rxq_type type =
444 hash_rxq_type_from_pos(&ind_table_init[j], k);
445 struct rte_eth_rss_conf *priv_rss_conf =
446 (*priv->rss_conf)[type];
447 struct ibv_exp_rx_hash_conf hash_conf = {
448 .rx_hash_function = IBV_EXP_RX_HASH_FUNC_TOEPLITZ,
449 .rx_hash_key_len = (priv_rss_conf ?
450 priv_rss_conf->rss_key_len :
451 rss_hash_default_key_len),
452 .rx_hash_key = (priv_rss_conf ?
453 priv_rss_conf->rss_key :
454 rss_hash_default_key),
455 .rx_hash_fields_mask = hash_rxq_init[type].hash_fields,
456 .rwq_ind_tbl = (*ind_tables)[j],
458 struct ibv_exp_qp_init_attr qp_init_attr = {
459 .max_inl_recv = 0, /* Currently not supported. */
460 .qp_type = IBV_QPT_RAW_PACKET,
461 .comp_mask = (IBV_EXP_QP_INIT_ATTR_PD |
462 IBV_EXP_QP_INIT_ATTR_RX_HASH),
464 .rx_hash_conf = &hash_conf,
465 .port_num = priv->port,
468 DEBUG("using indirection table %u for hash RX queue %u type %d",
470 *hash_rxq = (struct hash_rxq){
472 .qp = ibv_exp_create_qp(priv->ctx, &qp_init_attr),
475 if (hash_rxq->qp == NULL) {
476 err = (errno ? errno : EINVAL);
477 ERROR("Hash RX QP creation failure: %s",
481 if (++k < ind_table_init[j].hash_types_n)
483 /* Switch to the next indirection table and reset hash RX
484 * queue type array index. */
488 priv->ind_tables = ind_tables;
489 priv->ind_tables_n = ind_tables_n;
490 priv->hash_rxqs = hash_rxqs;
491 priv->hash_rxqs_n = hash_rxqs_n;
495 if (hash_rxqs != NULL) {
496 for (i = 0; (i != hash_rxqs_n); ++i) {
497 struct ibv_qp *qp = (*hash_rxqs)[i].qp;
501 claim_zero(ibv_destroy_qp(qp));
505 if (ind_tables != NULL) {
506 for (j = 0; (j != ind_tables_n); ++j) {
507 struct ibv_exp_rwq_ind_table *ind_table =
510 if (ind_table == NULL)
512 claim_zero(ibv_exp_destroy_rwq_ind_table(ind_table));
514 rte_free(ind_tables);
520 * Clean up hash RX queues and indirection table.
523 * Pointer to private structure.
526 priv_destroy_hash_rxqs(struct priv *priv)
530 DEBUG("destroying %u hash RX queues", priv->hash_rxqs_n);
531 if (priv->hash_rxqs_n == 0) {
532 assert(priv->hash_rxqs == NULL);
533 assert(priv->ind_tables == NULL);
536 for (i = 0; (i != priv->hash_rxqs_n); ++i) {
537 struct hash_rxq *hash_rxq = &(*priv->hash_rxqs)[i];
540 assert(hash_rxq->priv == priv);
541 assert(hash_rxq->qp != NULL);
542 /* Also check that there are no remaining flows. */
543 for (j = 0; (j != RTE_DIM(hash_rxq->special_flow)); ++j)
545 (k != RTE_DIM(hash_rxq->special_flow[j]));
547 assert(hash_rxq->special_flow[j][k] == NULL);
548 for (j = 0; (j != RTE_DIM(hash_rxq->mac_flow)); ++j)
549 for (k = 0; (k != RTE_DIM(hash_rxq->mac_flow[j])); ++k)
550 assert(hash_rxq->mac_flow[j][k] == NULL);
551 claim_zero(ibv_destroy_qp(hash_rxq->qp));
553 priv->hash_rxqs_n = 0;
554 rte_free(priv->hash_rxqs);
555 priv->hash_rxqs = NULL;
556 for (i = 0; (i != priv->ind_tables_n); ++i) {
557 struct ibv_exp_rwq_ind_table *ind_table =
558 (*priv->ind_tables)[i];
560 assert(ind_table != NULL);
561 claim_zero(ibv_exp_destroy_rwq_ind_table(ind_table));
563 priv->ind_tables_n = 0;
564 rte_free(priv->ind_tables);
565 priv->ind_tables = NULL;
569 * Check whether a given flow type is allowed.
572 * Pointer to private structure.
574 * Flow type to check.
577 * Nonzero if the given flow type is allowed.
580 priv_allow_flow_type(struct priv *priv, enum hash_rxq_flow_type type)
582 /* Only FLOW_TYPE_PROMISC is allowed when promiscuous mode
583 * has been requested. */
584 if (priv->promisc_req)
585 return type == HASH_RXQ_FLOW_TYPE_PROMISC;
587 case HASH_RXQ_FLOW_TYPE_PROMISC:
588 return !!priv->promisc_req;
589 case HASH_RXQ_FLOW_TYPE_ALLMULTI:
590 return !!priv->allmulti_req;
591 case HASH_RXQ_FLOW_TYPE_BROADCAST:
592 #ifdef HAVE_FLOW_SPEC_IPV6
593 case HASH_RXQ_FLOW_TYPE_IPV6MULTI:
594 #endif /* HAVE_FLOW_SPEC_IPV6 */
595 /* If allmulti is enabled, broadcast and ipv6multi
596 * are unnecessary. */
597 return !priv->allmulti_req;
598 case HASH_RXQ_FLOW_TYPE_MAC:
601 /* Unsupported flow type is not allowed. */
608 * Automatically enable/disable flows according to configuration.
614 * 0 on success, errno value on failure.
617 priv_rehash_flows(struct priv *priv)
621 for (i = 0; (i != RTE_DIM((*priv->hash_rxqs)[0].special_flow)); ++i)
622 if (!priv_allow_flow_type(priv, i)) {
623 priv_special_flow_disable(priv, i);
625 int ret = priv_special_flow_enable(priv, i);
630 if (priv_allow_flow_type(priv, HASH_RXQ_FLOW_TYPE_MAC))
631 return priv_mac_addrs_enable(priv);
632 priv_mac_addrs_disable(priv);
637 * Allocate RX queue elements with scattered packets support.
640 * Pointer to RX queue structure.
642 * Number of elements to allocate.
644 * If not NULL, fetch buffers from this array instead of allocating them
645 * with rte_pktmbuf_alloc().
648 * 0 on success, errno value on failure.
651 rxq_alloc_elts_sp(struct rxq *rxq, unsigned int elts_n,
652 struct rte_mbuf **pool)
655 struct rxq_elt_sp (*elts)[elts_n] =
656 rte_calloc_socket("RXQ elements", 1, sizeof(*elts), 0,
661 ERROR("%p: can't allocate packets array", (void *)rxq);
665 /* For each WR (packet). */
666 for (i = 0; (i != elts_n); ++i) {
668 struct rxq_elt_sp *elt = &(*elts)[i];
669 struct ibv_sge (*sges)[RTE_DIM(elt->sges)] = &elt->sges;
671 /* These two arrays must have the same size. */
672 assert(RTE_DIM(elt->sges) == RTE_DIM(elt->bufs));
673 /* For each SGE (segment). */
674 for (j = 0; (j != RTE_DIM(elt->bufs)); ++j) {
675 struct ibv_sge *sge = &(*sges)[j];
676 struct rte_mbuf *buf;
681 rte_pktmbuf_reset(buf);
683 buf = rte_pktmbuf_alloc(rxq->mp);
685 assert(pool == NULL);
686 ERROR("%p: empty mbuf pool", (void *)rxq);
691 /* Headroom is reserved by rte_pktmbuf_alloc(). */
692 assert(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM);
693 /* Buffer is supposed to be empty. */
694 assert(rte_pktmbuf_data_len(buf) == 0);
695 assert(rte_pktmbuf_pkt_len(buf) == 0);
696 /* sge->addr must be able to store a pointer. */
697 assert(sizeof(sge->addr) >= sizeof(uintptr_t));
699 /* The first SGE keeps its headroom. */
700 sge->addr = rte_pktmbuf_mtod(buf, uintptr_t);
701 sge->length = (buf->buf_len -
702 RTE_PKTMBUF_HEADROOM);
704 /* Subsequent SGEs lose theirs. */
705 assert(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM);
706 SET_DATA_OFF(buf, 0);
707 sge->addr = (uintptr_t)buf->buf_addr;
708 sge->length = buf->buf_len;
710 sge->lkey = rxq->mr->lkey;
711 /* Redundant check for tailroom. */
712 assert(sge->length == rte_pktmbuf_tailroom(buf));
715 DEBUG("%p: allocated and configured %u WRs (%zu segments)",
716 (void *)rxq, elts_n, (elts_n * RTE_DIM((*elts)[0].sges)));
717 rxq->elts_n = elts_n;
724 assert(pool == NULL);
725 for (i = 0; (i != RTE_DIM(*elts)); ++i) {
727 struct rxq_elt_sp *elt = &(*elts)[i];
729 for (j = 0; (j != RTE_DIM(elt->bufs)); ++j) {
730 struct rte_mbuf *buf = elt->bufs[j];
733 rte_pktmbuf_free_seg(buf);
738 DEBUG("%p: failed, freed everything", (void *)rxq);
744 * Free RX queue elements with scattered packets support.
747 * Pointer to RX queue structure.
750 rxq_free_elts_sp(struct rxq *rxq)
753 unsigned int elts_n = rxq->elts_n;
754 struct rxq_elt_sp (*elts)[elts_n] = rxq->elts.sp;
756 DEBUG("%p: freeing WRs", (void *)rxq);
761 for (i = 0; (i != RTE_DIM(*elts)); ++i) {
763 struct rxq_elt_sp *elt = &(*elts)[i];
765 for (j = 0; (j != RTE_DIM(elt->bufs)); ++j) {
766 struct rte_mbuf *buf = elt->bufs[j];
769 rte_pktmbuf_free_seg(buf);
776 * Allocate RX queue elements.
779 * Pointer to RX queue structure.
781 * Number of elements to allocate.
783 * If not NULL, fetch buffers from this array instead of allocating them
784 * with rte_pktmbuf_alloc().
787 * 0 on success, errno value on failure.
790 rxq_alloc_elts(struct rxq *rxq, unsigned int elts_n, struct rte_mbuf **pool)
793 struct rxq_elt (*elts)[elts_n] =
794 rte_calloc_socket("RXQ elements", 1, sizeof(*elts), 0,
799 ERROR("%p: can't allocate packets array", (void *)rxq);
803 /* For each WR (packet). */
804 for (i = 0; (i != elts_n); ++i) {
805 struct rxq_elt *elt = &(*elts)[i];
806 struct ibv_sge *sge = &(*elts)[i].sge;
807 struct rte_mbuf *buf;
812 rte_pktmbuf_reset(buf);
814 buf = rte_pktmbuf_alloc(rxq->mp);
816 assert(pool == NULL);
817 ERROR("%p: empty mbuf pool", (void *)rxq);
822 /* Headroom is reserved by rte_pktmbuf_alloc(). */
823 assert(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM);
824 /* Buffer is supposed to be empty. */
825 assert(rte_pktmbuf_data_len(buf) == 0);
826 assert(rte_pktmbuf_pkt_len(buf) == 0);
827 /* sge->addr must be able to store a pointer. */
828 assert(sizeof(sge->addr) >= sizeof(uintptr_t));
829 /* SGE keeps its headroom. */
830 sge->addr = (uintptr_t)
831 ((uint8_t *)buf->buf_addr + RTE_PKTMBUF_HEADROOM);
832 sge->length = (buf->buf_len - RTE_PKTMBUF_HEADROOM);
833 sge->lkey = rxq->mr->lkey;
834 /* Redundant check for tailroom. */
835 assert(sge->length == rte_pktmbuf_tailroom(buf));
837 DEBUG("%p: allocated and configured %u single-segment WRs",
838 (void *)rxq, elts_n);
839 rxq->elts_n = elts_n;
841 rxq->elts.no_sp = elts;
846 assert(pool == NULL);
847 for (i = 0; (i != RTE_DIM(*elts)); ++i) {
848 struct rxq_elt *elt = &(*elts)[i];
849 struct rte_mbuf *buf = elt->buf;
852 rte_pktmbuf_free_seg(buf);
856 DEBUG("%p: failed, freed everything", (void *)rxq);
862 * Free RX queue elements.
865 * Pointer to RX queue structure.
868 rxq_free_elts(struct rxq *rxq)
871 unsigned int elts_n = rxq->elts_n;
872 struct rxq_elt (*elts)[elts_n] = rxq->elts.no_sp;
874 DEBUG("%p: freeing WRs", (void *)rxq);
876 rxq->elts.no_sp = NULL;
879 for (i = 0; (i != RTE_DIM(*elts)); ++i) {
880 struct rxq_elt *elt = &(*elts)[i];
881 struct rte_mbuf *buf = elt->buf;
884 rte_pktmbuf_free_seg(buf);
890 * Clean up a RX queue.
892 * Destroy objects, free allocated memory and reset the structure for reuse.
895 * Pointer to RX queue structure.
898 rxq_cleanup(struct rxq *rxq)
900 struct ibv_exp_release_intf_params params;
902 DEBUG("cleaning up %p", (void *)rxq);
904 rxq_free_elts_sp(rxq);
909 if (rxq->if_wq != NULL) {
910 assert(rxq->priv != NULL);
911 assert(rxq->priv->ctx != NULL);
912 assert(rxq->wq != NULL);
913 params = (struct ibv_exp_release_intf_params){
916 claim_zero(ibv_exp_release_intf(rxq->priv->ctx,
920 if (rxq->if_cq != NULL) {
921 assert(rxq->priv != NULL);
922 assert(rxq->priv->ctx != NULL);
923 assert(rxq->cq != NULL);
924 params = (struct ibv_exp_release_intf_params){
927 claim_zero(ibv_exp_release_intf(rxq->priv->ctx,
932 claim_zero(ibv_exp_destroy_wq(rxq->wq));
934 claim_zero(ibv_destroy_cq(rxq->cq));
935 if (rxq->rd != NULL) {
936 struct ibv_exp_destroy_res_domain_attr attr = {
940 assert(rxq->priv != NULL);
941 assert(rxq->priv->ctx != NULL);
942 claim_zero(ibv_exp_destroy_res_domain(rxq->priv->ctx,
947 claim_zero(ibv_dereg_mr(rxq->mr));
948 memset(rxq, 0, sizeof(*rxq));
952 * Reconfigure a RX queue with new parameters.
954 * rxq_rehash() does not allocate mbufs, which, if not done from the right
955 * thread (such as a control thread), may corrupt the pool.
956 * In case of failure, the queue is left untouched.
959 * Pointer to Ethernet device structure.
964 * 0 on success, errno value on failure.
967 rxq_rehash(struct rte_eth_dev *dev, struct rxq *rxq)
969 struct priv *priv = rxq->priv;
970 struct rxq tmpl = *rxq;
973 struct rte_mbuf **pool;
975 struct ibv_exp_wq_attr mod;
976 unsigned int mb_len = rte_pktmbuf_data_room_size(rxq->mp);
979 DEBUG("%p: rehashing queue %p", (void *)dev, (void *)rxq);
980 /* Number of descriptors and mbufs currently allocated. */
981 desc_n = (tmpl.elts_n * (tmpl.sp ? MLX5_PMD_SGE_WR_N : 1));
983 /* Toggle RX checksum offload if hardware supports it. */
985 tmpl.csum = !!dev->data->dev_conf.rxmode.hw_ip_checksum;
986 rxq->csum = tmpl.csum;
988 if (priv->hw_csum_l2tun) {
989 tmpl.csum_l2tun = !!dev->data->dev_conf.rxmode.hw_ip_checksum;
990 rxq->csum_l2tun = tmpl.csum_l2tun;
992 /* Enable scattered packets support for this queue if necessary. */
993 assert(mb_len >= RTE_PKTMBUF_HEADROOM);
994 if ((dev->data->dev_conf.rxmode.jumbo_frame) &&
995 (dev->data->dev_conf.rxmode.max_rx_pkt_len >
996 (mb_len - RTE_PKTMBUF_HEADROOM))) {
998 desc_n /= MLX5_PMD_SGE_WR_N;
1001 DEBUG("%p: %s scattered packets support (%u WRs)",
1002 (void *)dev, (tmpl.sp ? "enabling" : "disabling"), desc_n);
1003 /* If scatter mode is the same as before, nothing to do. */
1004 if (tmpl.sp == rxq->sp) {
1005 DEBUG("%p: nothing to do", (void *)dev);
1008 /* From now on, any failure will render the queue unusable.
1009 * Reinitialize WQ. */
1010 mod = (struct ibv_exp_wq_attr){
1011 .attr_mask = IBV_EXP_WQ_ATTR_STATE,
1012 .wq_state = IBV_EXP_WQS_RESET,
1014 err = ibv_exp_modify_wq(tmpl.wq, &mod);
1016 ERROR("%p: cannot reset WQ: %s", (void *)dev, strerror(err));
1020 /* Allocate pool. */
1021 pool = rte_malloc(__func__, (mbuf_n * sizeof(*pool)), 0);
1023 ERROR("%p: cannot allocate memory", (void *)dev);
1026 /* Snatch mbufs from original queue. */
1029 struct rxq_elt_sp (*elts)[rxq->elts_n] = rxq->elts.sp;
1031 for (i = 0; (i != RTE_DIM(*elts)); ++i) {
1032 struct rxq_elt_sp *elt = &(*elts)[i];
1035 for (j = 0; (j != RTE_DIM(elt->bufs)); ++j) {
1036 assert(elt->bufs[j] != NULL);
1037 pool[k++] = elt->bufs[j];
1041 struct rxq_elt (*elts)[rxq->elts_n] = rxq->elts.no_sp;
1043 for (i = 0; (i != RTE_DIM(*elts)); ++i) {
1044 struct rxq_elt *elt = &(*elts)[i];
1045 struct rte_mbuf *buf = elt->buf;
1050 assert(k == mbuf_n);
1052 tmpl.elts.sp = NULL;
1053 assert((void *)&tmpl.elts.sp == (void *)&tmpl.elts.no_sp);
1055 rxq_alloc_elts_sp(&tmpl, desc_n, pool) :
1056 rxq_alloc_elts(&tmpl, desc_n, pool));
1058 ERROR("%p: cannot reallocate WRs, aborting", (void *)dev);
1063 assert(tmpl.elts_n == desc_n);
1064 assert(tmpl.elts.sp != NULL);
1066 /* Clean up original data. */
1068 rte_free(rxq->elts.sp);
1069 rxq->elts.sp = NULL;
1070 /* Change queue state to ready. */
1071 mod = (struct ibv_exp_wq_attr){
1072 .attr_mask = IBV_EXP_WQ_ATTR_STATE,
1073 .wq_state = IBV_EXP_WQS_RDY,
1075 err = ibv_exp_modify_wq(tmpl.wq, &mod);
1077 ERROR("%p: WQ state to IBV_EXP_WQS_RDY failed: %s",
1078 (void *)dev, strerror(err));
1082 assert(tmpl.if_wq != NULL);
1084 struct rxq_elt_sp (*elts)[tmpl.elts_n] = tmpl.elts.sp;
1086 for (i = 0; (i != RTE_DIM(*elts)); ++i) {
1087 err = tmpl.if_wq->recv_sg_list
1090 RTE_DIM((*elts)[i].sges));
1095 struct rxq_elt (*elts)[tmpl.elts_n] = tmpl.elts.no_sp;
1097 for (i = 0; (i != RTE_DIM(*elts)); ++i) {
1098 err = tmpl.if_wq->recv_burst(
1107 ERROR("%p: failed to post SGEs with error %d",
1109 /* Set err because it does not contain a valid errno value. */
1114 tmpl.recv = tmpl.if_wq->recv_sg_list;
1116 tmpl.recv = tmpl.if_wq->recv_burst;
1124 * Configure a RX queue.
1127 * Pointer to Ethernet device structure.
1129 * Pointer to RX queue structure.
1131 * Number of descriptors to configure in queue.
1133 * NUMA socket on which memory must be allocated.
1135 * Thresholds parameters.
1137 * Memory pool for buffer allocations.
1140 * 0 on success, errno value on failure.
1143 rxq_setup(struct rte_eth_dev *dev, struct rxq *rxq, uint16_t desc,
1144 unsigned int socket, const struct rte_eth_rxconf *conf,
1145 struct rte_mempool *mp)
1147 struct priv *priv = dev->data->dev_private;
1153 struct ibv_exp_wq_attr mod;
1155 struct ibv_exp_query_intf_params params;
1156 struct ibv_exp_cq_init_attr cq;
1157 struct ibv_exp_res_domain_init_attr rd;
1158 struct ibv_exp_wq_init_attr wq;
1160 enum ibv_exp_query_intf_status status;
1161 unsigned int mb_len = rte_pktmbuf_data_room_size(mp);
1164 unsigned int cq_size = desc;
1166 (void)conf; /* Thresholds configuration (ignored). */
1167 if ((desc == 0) || (desc % MLX5_PMD_SGE_WR_N)) {
1168 ERROR("%p: invalid number of RX descriptors (must be a"
1169 " multiple of %d)", (void *)dev, MLX5_PMD_SGE_WR_N);
1172 /* Toggle RX checksum offload if hardware supports it. */
1174 tmpl.csum = !!dev->data->dev_conf.rxmode.hw_ip_checksum;
1175 if (priv->hw_csum_l2tun)
1176 tmpl.csum_l2tun = !!dev->data->dev_conf.rxmode.hw_ip_checksum;
1177 /* Enable scattered packets support for this queue if necessary. */
1178 assert(mb_len >= RTE_PKTMBUF_HEADROOM);
1179 if ((dev->data->dev_conf.rxmode.jumbo_frame) &&
1180 (dev->data->dev_conf.rxmode.max_rx_pkt_len >
1181 (mb_len - RTE_PKTMBUF_HEADROOM))) {
1183 desc /= MLX5_PMD_SGE_WR_N;
1185 DEBUG("%p: %s scattered packets support (%u WRs)",
1186 (void *)dev, (tmpl.sp ? "enabling" : "disabling"), desc);
1187 /* Use the entire RX mempool as the memory region. */
1188 tmpl.mr = mlx5_mp2mr(priv->pd, mp);
1189 if (tmpl.mr == NULL) {
1191 ERROR("%p: MR creation failure: %s",
1192 (void *)dev, strerror(ret));
1195 attr.rd = (struct ibv_exp_res_domain_init_attr){
1196 .comp_mask = (IBV_EXP_RES_DOMAIN_THREAD_MODEL |
1197 IBV_EXP_RES_DOMAIN_MSG_MODEL),
1198 .thread_model = IBV_EXP_THREAD_SINGLE,
1199 .msg_model = IBV_EXP_MSG_HIGH_BW,
1201 tmpl.rd = ibv_exp_create_res_domain(priv->ctx, &attr.rd);
1202 if (tmpl.rd == NULL) {
1204 ERROR("%p: RD creation failure: %s",
1205 (void *)dev, strerror(ret));
1208 attr.cq = (struct ibv_exp_cq_init_attr){
1209 .comp_mask = IBV_EXP_CQ_INIT_ATTR_RES_DOMAIN,
1210 .res_domain = tmpl.rd,
1212 tmpl.cq = ibv_exp_create_cq(priv->ctx, cq_size, NULL, NULL, 0,
1214 if (tmpl.cq == NULL) {
1216 ERROR("%p: CQ creation failure: %s",
1217 (void *)dev, strerror(ret));
1220 DEBUG("priv->device_attr.max_qp_wr is %d",
1221 priv->device_attr.max_qp_wr);
1222 DEBUG("priv->device_attr.max_sge is %d",
1223 priv->device_attr.max_sge);
1224 /* Configure VLAN stripping. */
1225 tmpl.vlan_strip = dev->data->dev_conf.rxmode.hw_vlan_strip;
1226 attr.wq = (struct ibv_exp_wq_init_attr){
1227 .wq_context = NULL, /* Could be useful in the future. */
1228 .wq_type = IBV_EXP_WQT_RQ,
1229 /* Max number of outstanding WRs. */
1230 .max_recv_wr = ((priv->device_attr.max_qp_wr < (int)cq_size) ?
1231 priv->device_attr.max_qp_wr :
1233 /* Max number of scatter/gather elements in a WR. */
1234 .max_recv_sge = ((priv->device_attr.max_sge <
1235 MLX5_PMD_SGE_WR_N) ?
1236 priv->device_attr.max_sge :
1241 IBV_EXP_CREATE_WQ_RES_DOMAIN |
1242 #ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS
1243 IBV_EXP_CREATE_WQ_VLAN_OFFLOADS |
1244 #endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
1246 .res_domain = tmpl.rd,
1247 #ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS
1248 .vlan_offloads = (tmpl.vlan_strip ?
1249 IBV_EXP_RECEIVE_WQ_CVLAN_STRIP :
1251 #endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
1254 #ifdef HAVE_VERBS_FCS
1255 /* By default, FCS (CRC) is stripped by hardware. */
1256 if (dev->data->dev_conf.rxmode.hw_strip_crc) {
1257 tmpl.crc_present = 0;
1258 } else if (priv->hw_fcs_strip) {
1259 /* Ask HW/Verbs to leave CRC in place when supported. */
1260 attr.wq.flags |= IBV_EXP_CREATE_WQ_FLAG_SCATTER_FCS;
1261 attr.wq.comp_mask |= IBV_EXP_CREATE_WQ_FLAGS;
1262 tmpl.crc_present = 1;
1264 WARN("%p: CRC stripping has been disabled but will still"
1265 " be performed by hardware, make sure MLNX_OFED and"
1266 " firmware are up to date",
1268 tmpl.crc_present = 0;
1270 DEBUG("%p: CRC stripping is %s, %u bytes will be subtracted from"
1271 " incoming frames to hide it",
1273 tmpl.crc_present ? "disabled" : "enabled",
1274 tmpl.crc_present << 2);
1275 #endif /* HAVE_VERBS_FCS */
1277 #ifdef HAVE_VERBS_RX_END_PADDING
1278 if (!mlx5_getenv_int("MLX5_PMD_ENABLE_PADDING"))
1279 ; /* Nothing else to do. */
1280 else if (priv->hw_padding) {
1281 INFO("%p: enabling packet padding on queue %p",
1282 (void *)dev, (void *)rxq);
1283 attr.wq.flags |= IBV_EXP_CREATE_WQ_FLAG_RX_END_PADDING;
1284 attr.wq.comp_mask |= IBV_EXP_CREATE_WQ_FLAGS;
1286 WARN("%p: packet padding has been requested but is not"
1287 " supported, make sure MLNX_OFED and firmware are"
1290 #endif /* HAVE_VERBS_RX_END_PADDING */
1292 tmpl.wq = ibv_exp_create_wq(priv->ctx, &attr.wq);
1293 if (tmpl.wq == NULL) {
1294 ret = (errno ? errno : EINVAL);
1295 ERROR("%p: WQ creation failure: %s",
1296 (void *)dev, strerror(ret));
1300 ret = rxq_alloc_elts_sp(&tmpl, desc, NULL);
1302 ret = rxq_alloc_elts(&tmpl, desc, NULL);
1304 ERROR("%p: RXQ allocation failed: %s",
1305 (void *)dev, strerror(ret));
1309 tmpl.port_id = dev->data->port_id;
1310 DEBUG("%p: RTE port ID: %u", (void *)rxq, tmpl.port_id);
1311 attr.params = (struct ibv_exp_query_intf_params){
1312 .intf_scope = IBV_EXP_INTF_GLOBAL,
1313 #ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS
1315 #endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
1316 .intf = IBV_EXP_INTF_CQ,
1319 tmpl.if_cq = ibv_exp_query_intf(priv->ctx, &attr.params, &status);
1320 if (tmpl.if_cq == NULL) {
1321 ERROR("%p: CQ interface family query failed with status %d",
1322 (void *)dev, status);
1325 attr.params = (struct ibv_exp_query_intf_params){
1326 .intf_scope = IBV_EXP_INTF_GLOBAL,
1327 .intf = IBV_EXP_INTF_WQ,
1330 tmpl.if_wq = ibv_exp_query_intf(priv->ctx, &attr.params, &status);
1331 if (tmpl.if_wq == NULL) {
1332 ERROR("%p: WQ interface family query failed with status %d",
1333 (void *)dev, status);
1336 /* Change queue state to ready. */
1337 mod = (struct ibv_exp_wq_attr){
1338 .attr_mask = IBV_EXP_WQ_ATTR_STATE,
1339 .wq_state = IBV_EXP_WQS_RDY,
1341 ret = ibv_exp_modify_wq(tmpl.wq, &mod);
1343 ERROR("%p: WQ state to IBV_EXP_WQS_RDY failed: %s",
1344 (void *)dev, strerror(ret));
1349 struct rxq_elt_sp (*elts)[tmpl.elts_n] = tmpl.elts.sp;
1351 for (i = 0; (i != RTE_DIM(*elts)); ++i) {
1352 ret = tmpl.if_wq->recv_sg_list
1355 RTE_DIM((*elts)[i].sges));
1360 struct rxq_elt (*elts)[tmpl.elts_n] = tmpl.elts.no_sp;
1362 for (i = 0; (i != RTE_DIM(*elts)); ++i) {
1363 ret = tmpl.if_wq->recv_burst(
1372 ERROR("%p: failed to post SGEs with error %d",
1374 /* Set ret because it does not contain a valid errno value. */
1378 /* Clean up rxq in case we're reinitializing it. */
1379 DEBUG("%p: cleaning-up old rxq just in case", (void *)rxq);
1382 DEBUG("%p: rxq updated with %p", (void *)rxq, (void *)&tmpl);
1384 /* Assign function in queue. */
1385 #ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS
1386 rxq->poll = rxq->if_cq->poll_length_flags_cvlan;
1387 #else /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
1388 rxq->poll = rxq->if_cq->poll_length_flags;
1389 #endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
1391 rxq->recv = rxq->if_wq->recv_sg_list;
1393 rxq->recv = rxq->if_wq->recv_burst;
1402 * DPDK callback to configure a RX queue.
1405 * Pointer to Ethernet device structure.
1409 * Number of descriptors to configure in queue.
1411 * NUMA socket on which memory must be allocated.
1413 * Thresholds parameters.
1415 * Memory pool for buffer allocations.
1418 * 0 on success, negative errno value on failure.
1421 mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1422 unsigned int socket, const struct rte_eth_rxconf *conf,
1423 struct rte_mempool *mp)
1425 struct priv *priv = dev->data->dev_private;
1426 struct rxq *rxq = (*priv->rxqs)[idx];
1429 if (mlx5_is_secondary())
1430 return -E_RTE_SECONDARY;
1433 DEBUG("%p: configuring queue %u for %u descriptors",
1434 (void *)dev, idx, desc);
1435 if (idx >= priv->rxqs_n) {
1436 ERROR("%p: queue index out of range (%u >= %u)",
1437 (void *)dev, idx, priv->rxqs_n);
1442 DEBUG("%p: reusing already allocated queue index %u (%p)",
1443 (void *)dev, idx, (void *)rxq);
1444 if (priv->started) {
1448 (*priv->rxqs)[idx] = NULL;
1451 rxq = rte_calloc_socket("RXQ", 1, sizeof(*rxq), 0, socket);
1453 ERROR("%p: unable to allocate queue index %u",
1459 ret = rxq_setup(dev, rxq, desc, socket, conf, mp);
1463 rxq->stats.idx = idx;
1464 DEBUG("%p: adding RX queue %p to list",
1465 (void *)dev, (void *)rxq);
1466 (*priv->rxqs)[idx] = rxq;
1467 /* Update receive callback. */
1469 dev->rx_pkt_burst = mlx5_rx_burst_sp;
1471 dev->rx_pkt_burst = mlx5_rx_burst;
1478 * DPDK callback to release a RX queue.
1481 * Generic RX queue pointer.
1484 mlx5_rx_queue_release(void *dpdk_rxq)
1486 struct rxq *rxq = (struct rxq *)dpdk_rxq;
1490 if (mlx5_is_secondary())
1497 for (i = 0; (i != priv->rxqs_n); ++i)
1498 if ((*priv->rxqs)[i] == rxq) {
1499 DEBUG("%p: removing RX queue %p from list",
1500 (void *)priv->dev, (void *)rxq);
1501 (*priv->rxqs)[i] = NULL;
1510 * DPDK callback for RX in secondary processes.
1512 * This function configures all queues from primary process information
1513 * if necessary before reverting to the normal RX burst callback.
1516 * Generic pointer to RX queue structure.
1518 * Array to store received packets.
1520 * Maximum number of packets in array.
1523 * Number of packets successfully received (<= pkts_n).
1526 mlx5_rx_burst_secondary_setup(void *dpdk_rxq, struct rte_mbuf **pkts,
1529 struct rxq *rxq = dpdk_rxq;
1530 struct priv *priv = mlx5_secondary_data_setup(rxq->priv);
1531 struct priv *primary_priv;
1537 mlx5_secondary_data[priv->dev->data->port_id].primary_priv;
1538 /* Look for queue index in both private structures. */
1539 for (index = 0; index != priv->rxqs_n; ++index)
1540 if (((*primary_priv->rxqs)[index] == rxq) ||
1541 ((*priv->rxqs)[index] == rxq))
1543 if (index == priv->rxqs_n)
1545 rxq = (*priv->rxqs)[index];
1546 return priv->dev->rx_pkt_burst(rxq, pkts, pkts_n);