4 * Copyright 2015 6WIND S.A.
5 * Copyright 2015 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
44 #pragma GCC diagnostic ignored "-Wpedantic"
46 #include <infiniband/verbs.h>
47 #include <infiniband/mlx5dv.h>
49 #pragma GCC diagnostic error "-Wpedantic"
53 #include <rte_malloc.h>
54 #include <rte_ethdev.h>
55 #include <rte_common.h>
56 #include <rte_interrupts.h>
57 #include <rte_debug.h>
61 #include "mlx5_rxtx.h"
62 #include "mlx5_utils.h"
63 #include "mlx5_autoconf.h"
64 #include "mlx5_defs.h"
66 /* Initialization data for hash RX queues. */
67 const struct hash_rxq_init hash_rxq_init[] = {
69 .hash_fields = (IBV_RX_HASH_SRC_IPV4 |
70 IBV_RX_HASH_DST_IPV4 |
71 IBV_RX_HASH_SRC_PORT_TCP |
72 IBV_RX_HASH_DST_PORT_TCP),
73 .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV4_TCP,
75 .flow_spec.tcp_udp = {
76 .type = IBV_FLOW_SPEC_TCP,
77 .size = sizeof(hash_rxq_init[0].flow_spec.tcp_udp),
79 .underlayer = &hash_rxq_init[HASH_RXQ_IPV4],
82 .hash_fields = (IBV_RX_HASH_SRC_IPV4 |
83 IBV_RX_HASH_DST_IPV4 |
84 IBV_RX_HASH_SRC_PORT_UDP |
85 IBV_RX_HASH_DST_PORT_UDP),
86 .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV4_UDP,
88 .flow_spec.tcp_udp = {
89 .type = IBV_FLOW_SPEC_UDP,
90 .size = sizeof(hash_rxq_init[0].flow_spec.tcp_udp),
92 .underlayer = &hash_rxq_init[HASH_RXQ_IPV4],
95 .hash_fields = (IBV_RX_HASH_SRC_IPV4 |
96 IBV_RX_HASH_DST_IPV4),
97 .dpdk_rss_hf = (ETH_RSS_IPV4 |
101 .type = IBV_FLOW_SPEC_IPV4,
102 .size = sizeof(hash_rxq_init[0].flow_spec.ipv4),
104 .underlayer = &hash_rxq_init[HASH_RXQ_ETH],
107 .hash_fields = (IBV_RX_HASH_SRC_IPV6 |
108 IBV_RX_HASH_DST_IPV6 |
109 IBV_RX_HASH_SRC_PORT_TCP |
110 IBV_RX_HASH_DST_PORT_TCP),
111 .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV6_TCP,
113 .flow_spec.tcp_udp = {
114 .type = IBV_FLOW_SPEC_TCP,
115 .size = sizeof(hash_rxq_init[0].flow_spec.tcp_udp),
117 .underlayer = &hash_rxq_init[HASH_RXQ_IPV6],
120 .hash_fields = (IBV_RX_HASH_SRC_IPV6 |
121 IBV_RX_HASH_DST_IPV6 |
122 IBV_RX_HASH_SRC_PORT_UDP |
123 IBV_RX_HASH_DST_PORT_UDP),
124 .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV6_UDP,
126 .flow_spec.tcp_udp = {
127 .type = IBV_FLOW_SPEC_UDP,
128 .size = sizeof(hash_rxq_init[0].flow_spec.tcp_udp),
130 .underlayer = &hash_rxq_init[HASH_RXQ_IPV6],
133 .hash_fields = (IBV_RX_HASH_SRC_IPV6 |
134 IBV_RX_HASH_DST_IPV6),
135 .dpdk_rss_hf = (ETH_RSS_IPV6 |
139 .type = IBV_FLOW_SPEC_IPV6,
140 .size = sizeof(hash_rxq_init[0].flow_spec.ipv6),
142 .underlayer = &hash_rxq_init[HASH_RXQ_ETH],
149 .type = IBV_FLOW_SPEC_ETH,
150 .size = sizeof(hash_rxq_init[0].flow_spec.eth),
156 /* Number of entries in hash_rxq_init[]. */
157 const unsigned int hash_rxq_init_n = RTE_DIM(hash_rxq_init);
159 /* Initialization data for hash RX queue indirection tables. */
160 static const struct ind_table_init ind_table_init[] = {
162 .max_size = -1u, /* Superseded by HW limitations. */
164 1 << HASH_RXQ_TCPV4 |
165 1 << HASH_RXQ_UDPV4 |
167 1 << HASH_RXQ_TCPV6 |
168 1 << HASH_RXQ_UDPV6 |
175 .hash_types = 1 << HASH_RXQ_ETH,
180 #define IND_TABLE_INIT_N RTE_DIM(ind_table_init)
182 /* Default RSS hash key also used for ConnectX-3. */
183 uint8_t rss_hash_default_key[] = {
184 0x2c, 0xc6, 0x81, 0xd1,
185 0x5b, 0xdb, 0xf4, 0xf7,
186 0xfc, 0xa2, 0x83, 0x19,
187 0xdb, 0x1a, 0x3e, 0x94,
188 0x6b, 0x9e, 0x38, 0xd9,
189 0x2c, 0x9c, 0x03, 0xd1,
190 0xad, 0x99, 0x44, 0xa7,
191 0xd9, 0x56, 0x3d, 0x59,
192 0x06, 0x3c, 0x25, 0xf3,
193 0xfc, 0x1f, 0xdc, 0x2a,
196 /* Length of the default RSS hash key. */
197 const size_t rss_hash_default_key_len = sizeof(rss_hash_default_key);
200 * Populate flow steering rule for a given hash RX queue type using
201 * information from hash_rxq_init[]. Nothing is written to flow_attr when
202 * flow_attr_size is not large enough, but the required size is still returned.
205 * Pointer to private structure.
206 * @param[out] flow_attr
207 * Pointer to flow attribute structure to fill. Note that the allocated
208 * area must be larger and large enough to hold all flow specifications.
209 * @param flow_attr_size
210 * Entire size of flow_attr and trailing room for flow specifications.
212 * Hash RX queue type to use for flow steering rule.
215 * Total size of the flow attribute buffer. No errors are defined.
218 priv_flow_attr(struct priv *priv, struct ibv_flow_attr *flow_attr,
219 size_t flow_attr_size, enum hash_rxq_type type)
221 size_t offset = sizeof(*flow_attr);
222 const struct hash_rxq_init *init = &hash_rxq_init[type];
224 assert(priv != NULL);
225 assert((size_t)type < RTE_DIM(hash_rxq_init));
227 offset += init->flow_spec.hdr.size;
228 init = init->underlayer;
229 } while (init != NULL);
230 if (offset > flow_attr_size)
232 flow_attr_size = offset;
233 init = &hash_rxq_init[type];
234 *flow_attr = (struct ibv_flow_attr){
235 .type = IBV_FLOW_ATTR_NORMAL,
236 /* Priorities < 3 are reserved for flow director. */
237 .priority = init->flow_priority + 3,
243 offset -= init->flow_spec.hdr.size;
244 memcpy((void *)((uintptr_t)flow_attr + offset),
246 init->flow_spec.hdr.size);
247 ++flow_attr->num_of_specs;
248 init = init->underlayer;
249 } while (init != NULL);
250 return flow_attr_size;
254 * Convert hash type position in indirection table initializer to
255 * hash RX queue type.
258 * Indirection table initializer.
260 * Hash type position.
263 * Hash RX queue type.
265 static enum hash_rxq_type
266 hash_rxq_type_from_pos(const struct ind_table_init *table, unsigned int pos)
268 enum hash_rxq_type type = HASH_RXQ_TCPV4;
270 assert(pos < table->hash_types_n);
272 if ((table->hash_types & (1 << type)) && (pos-- == 0))
280 * Filter out disabled hash RX queue types from ind_table_init[].
283 * Pointer to private structure.
288 * Number of table entries.
291 priv_make_ind_table_init(struct priv *priv,
292 struct ind_table_init (*table)[IND_TABLE_INIT_N])
297 unsigned int table_n = 0;
298 /* Mandatory to receive frames not handled by normal hash RX queues. */
299 unsigned int hash_types_sup = 1 << HASH_RXQ_ETH;
301 rss_hf = priv->rss_hf;
302 /* Process other protocols only if more than one queue. */
303 if (priv->rxqs_n > 1)
304 for (i = 0; (i != hash_rxq_init_n); ++i)
305 if (rss_hf & hash_rxq_init[i].dpdk_rss_hf)
306 hash_types_sup |= (1 << i);
308 /* Filter out entries whose protocols are not in the set. */
309 for (i = 0, j = 0; (i != IND_TABLE_INIT_N); ++i) {
313 /* j is increased only if the table has valid protocols. */
315 (*table)[j] = ind_table_init[i];
316 (*table)[j].hash_types &= hash_types_sup;
317 for (h = 0, nb = 0; (h != hash_rxq_init_n); ++h)
318 if (((*table)[j].hash_types >> h) & 0x1)
320 (*table)[i].hash_types_n = nb;
330 * Initialize hash RX queues and indirection table.
333 * Pointer to private structure.
336 * 0 on success, errno value on failure.
339 priv_create_hash_rxqs(struct priv *priv)
341 struct ibv_wq *wqs[priv->reta_idx_n];
342 struct ind_table_init ind_table_init[IND_TABLE_INIT_N];
343 unsigned int ind_tables_n =
344 priv_make_ind_table_init(priv, &ind_table_init);
345 unsigned int hash_rxqs_n = 0;
346 struct hash_rxq (*hash_rxqs)[] = NULL;
347 struct ibv_rwq_ind_table *(*ind_tables)[] = NULL;
353 assert(priv->ind_tables == NULL);
354 assert(priv->ind_tables_n == 0);
355 assert(priv->hash_rxqs == NULL);
356 assert(priv->hash_rxqs_n == 0);
357 assert(priv->pd != NULL);
358 assert(priv->ctx != NULL);
361 if (priv->rxqs_n == 0)
363 assert(priv->rxqs != NULL);
364 if (ind_tables_n == 0) {
365 ERROR("all hash RX queue types have been filtered out,"
366 " indirection table cannot be created");
369 if (priv->rxqs_n & (priv->rxqs_n - 1)) {
370 INFO("%u RX queues are configured, consider rounding this"
371 " number to the next power of two for better balancing",
373 DEBUG("indirection table extended to assume %u WQs",
376 for (i = 0; (i != priv->reta_idx_n); ++i) {
377 struct mlx5_rxq_ctrl *rxq_ctrl;
379 rxq_ctrl = container_of((*priv->rxqs)[(*priv->reta_idx)[i]],
380 struct mlx5_rxq_ctrl, rxq);
381 wqs[i] = rxq_ctrl->wq;
383 /* Get number of hash RX queues to configure. */
384 for (i = 0, hash_rxqs_n = 0; (i != ind_tables_n); ++i)
385 hash_rxqs_n += ind_table_init[i].hash_types_n;
386 DEBUG("allocating %u hash RX queues for %u WQs, %u indirection tables",
387 hash_rxqs_n, priv->rxqs_n, ind_tables_n);
388 /* Create indirection tables. */
389 ind_tables = rte_calloc(__func__, ind_tables_n,
390 sizeof((*ind_tables)[0]), 0);
391 if (ind_tables == NULL) {
393 ERROR("cannot allocate indirection tables container: %s",
397 for (i = 0; (i != ind_tables_n); ++i) {
398 struct ibv_rwq_ind_table_init_attr ind_init_attr = {
399 .log_ind_tbl_size = 0, /* Set below. */
403 unsigned int ind_tbl_size = ind_table_init[i].max_size;
404 struct ibv_rwq_ind_table *ind_table;
406 if (priv->reta_idx_n < ind_tbl_size)
407 ind_tbl_size = priv->reta_idx_n;
408 ind_init_attr.log_ind_tbl_size = log2above(ind_tbl_size);
410 ind_table = ibv_create_rwq_ind_table(priv->ctx,
412 if (ind_table != NULL) {
413 (*ind_tables)[i] = ind_table;
416 /* Not clear whether errno is set. */
417 err = (errno ? errno : EINVAL);
418 ERROR("RX indirection table creation failed with error %d: %s",
422 /* Allocate array that holds hash RX queues and related data. */
423 hash_rxqs = rte_calloc(__func__, hash_rxqs_n,
424 sizeof((*hash_rxqs)[0]), 0);
425 if (hash_rxqs == NULL) {
427 ERROR("cannot allocate hash RX queues container: %s",
431 for (i = 0, j = 0, k = 0;
432 ((i != hash_rxqs_n) && (j != ind_tables_n));
434 struct hash_rxq *hash_rxq = &(*hash_rxqs)[i];
435 enum hash_rxq_type type =
436 hash_rxq_type_from_pos(&ind_table_init[j], k);
437 struct rte_eth_rss_conf *priv_rss_conf =
438 (*priv->rss_conf)[type];
439 struct ibv_rx_hash_conf hash_conf = {
440 .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,
441 .rx_hash_key_len = (priv_rss_conf ?
442 priv_rss_conf->rss_key_len :
443 rss_hash_default_key_len),
444 .rx_hash_key = (priv_rss_conf ?
445 priv_rss_conf->rss_key :
446 rss_hash_default_key),
447 .rx_hash_fields_mask = hash_rxq_init[type].hash_fields,
449 struct ibv_qp_init_attr_ex qp_init_attr = {
450 .qp_type = IBV_QPT_RAW_PACKET,
451 .comp_mask = (IBV_QP_INIT_ATTR_PD |
452 IBV_QP_INIT_ATTR_IND_TABLE |
453 IBV_QP_INIT_ATTR_RX_HASH),
454 .rx_hash_conf = hash_conf,
455 .rwq_ind_tbl = (*ind_tables)[j],
459 DEBUG("using indirection table %u for hash RX queue %u type %d",
461 *hash_rxq = (struct hash_rxq){
463 .qp = ibv_create_qp_ex(priv->ctx, &qp_init_attr),
466 if (hash_rxq->qp == NULL) {
467 err = (errno ? errno : EINVAL);
468 ERROR("Hash RX QP creation failure: %s",
472 if (++k < ind_table_init[j].hash_types_n)
474 /* Switch to the next indirection table and reset hash RX
475 * queue type array index. */
479 priv->ind_tables = ind_tables;
480 priv->ind_tables_n = ind_tables_n;
481 priv->hash_rxqs = hash_rxqs;
482 priv->hash_rxqs_n = hash_rxqs_n;
486 if (hash_rxqs != NULL) {
487 for (i = 0; (i != hash_rxqs_n); ++i) {
488 struct ibv_qp *qp = (*hash_rxqs)[i].qp;
492 claim_zero(ibv_destroy_qp(qp));
496 if (ind_tables != NULL) {
497 for (j = 0; (j != ind_tables_n); ++j) {
498 struct ibv_rwq_ind_table *ind_table =
501 if (ind_table == NULL)
503 claim_zero(ibv_destroy_rwq_ind_table(ind_table));
505 rte_free(ind_tables);
511 * Clean up hash RX queues and indirection table.
514 * Pointer to private structure.
517 priv_destroy_hash_rxqs(struct priv *priv)
521 DEBUG("destroying %u hash RX queues", priv->hash_rxqs_n);
522 if (priv->hash_rxqs_n == 0) {
523 assert(priv->hash_rxqs == NULL);
524 assert(priv->ind_tables == NULL);
527 for (i = 0; (i != priv->hash_rxqs_n); ++i) {
528 struct hash_rxq *hash_rxq = &(*priv->hash_rxqs)[i];
531 assert(hash_rxq->priv == priv);
532 assert(hash_rxq->qp != NULL);
533 /* Also check that there are no remaining flows. */
534 for (j = 0; (j != RTE_DIM(hash_rxq->special_flow)); ++j)
536 (k != RTE_DIM(hash_rxq->special_flow[j]));
538 assert(hash_rxq->special_flow[j][k] == NULL);
539 for (j = 0; (j != RTE_DIM(hash_rxq->mac_flow)); ++j)
540 for (k = 0; (k != RTE_DIM(hash_rxq->mac_flow[j])); ++k)
541 assert(hash_rxq->mac_flow[j][k] == NULL);
542 claim_zero(ibv_destroy_qp(hash_rxq->qp));
544 priv->hash_rxqs_n = 0;
545 rte_free(priv->hash_rxqs);
546 priv->hash_rxqs = NULL;
547 for (i = 0; (i != priv->ind_tables_n); ++i) {
548 struct ibv_rwq_ind_table *ind_table =
549 (*priv->ind_tables)[i];
551 assert(ind_table != NULL);
552 claim_zero(ibv_destroy_rwq_ind_table(ind_table));
554 priv->ind_tables_n = 0;
555 rte_free(priv->ind_tables);
556 priv->ind_tables = NULL;
560 * Check whether a given flow type is allowed.
563 * Pointer to private structure.
565 * Flow type to check.
568 * Nonzero if the given flow type is allowed.
571 priv_allow_flow_type(struct priv *priv, enum hash_rxq_flow_type type)
573 /* Only FLOW_TYPE_PROMISC is allowed when promiscuous mode
574 * has been requested. */
575 if (priv->promisc_req)
576 return type == HASH_RXQ_FLOW_TYPE_PROMISC;
578 case HASH_RXQ_FLOW_TYPE_PROMISC:
579 return !!priv->promisc_req;
580 case HASH_RXQ_FLOW_TYPE_ALLMULTI:
581 return !!priv->allmulti_req;
582 case HASH_RXQ_FLOW_TYPE_BROADCAST:
583 case HASH_RXQ_FLOW_TYPE_IPV6MULTI:
584 /* If allmulti is enabled, broadcast and ipv6multi
585 * are unnecessary. */
586 return !priv->allmulti_req;
587 case HASH_RXQ_FLOW_TYPE_MAC:
590 /* Unsupported flow type is not allowed. */
597 * Automatically enable/disable flows according to configuration.
603 * 0 on success, errno value on failure.
606 priv_rehash_flows(struct priv *priv)
608 enum hash_rxq_flow_type i;
610 for (i = HASH_RXQ_FLOW_TYPE_PROMISC;
611 i != RTE_DIM((*priv->hash_rxqs)[0].special_flow);
613 if (!priv_allow_flow_type(priv, i)) {
614 priv_special_flow_disable(priv, i);
616 int ret = priv_special_flow_enable(priv, i);
621 if (priv_allow_flow_type(priv, HASH_RXQ_FLOW_TYPE_MAC))
622 return priv_mac_addrs_enable(priv);
623 priv_mac_addrs_disable(priv);
628 * Allocate RX queue elements.
631 * Pointer to RX queue structure.
633 * Number of elements to allocate.
636 * 0 on success, errno value on failure.
639 rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl, unsigned int elts_n)
641 const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n;
645 /* Iterate on segments. */
646 for (i = 0; (i != elts_n); ++i) {
647 struct rte_mbuf *buf;
648 volatile struct mlx5_wqe_data_seg *scat =
649 &(*rxq_ctrl->rxq.wqes)[i];
651 buf = rte_pktmbuf_alloc(rxq_ctrl->rxq.mp);
653 ERROR("%p: empty mbuf pool", (void *)rxq_ctrl);
657 /* Headroom is reserved by rte_pktmbuf_alloc(). */
658 assert(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM);
659 /* Buffer is supposed to be empty. */
660 assert(rte_pktmbuf_data_len(buf) == 0);
661 assert(rte_pktmbuf_pkt_len(buf) == 0);
663 /* Only the first segment keeps headroom. */
665 SET_DATA_OFF(buf, 0);
666 PORT(buf) = rxq_ctrl->rxq.port_id;
667 DATA_LEN(buf) = rte_pktmbuf_tailroom(buf);
668 PKT_LEN(buf) = DATA_LEN(buf);
670 /* scat->addr must be able to store a pointer. */
671 assert(sizeof(scat->addr) >= sizeof(uintptr_t));
672 *scat = (struct mlx5_wqe_data_seg){
674 rte_cpu_to_be_64(rte_pktmbuf_mtod(buf, uintptr_t)),
675 .byte_count = rte_cpu_to_be_32(DATA_LEN(buf)),
676 .lkey = rte_cpu_to_be_32(rxq_ctrl->mr->lkey),
678 (*rxq_ctrl->rxq.elts)[i] = buf;
680 if (rxq_check_vec_support(&rxq_ctrl->rxq) > 0) {
681 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
682 struct rte_mbuf *mbuf_init = &rxq->fake_mbuf;
684 assert(rxq->elts_n == rxq->cqe_n);
685 /* Initialize default rearm_data for vPMD. */
686 mbuf_init->data_off = RTE_PKTMBUF_HEADROOM;
687 rte_mbuf_refcnt_set(mbuf_init, 1);
688 mbuf_init->nb_segs = 1;
689 mbuf_init->port = rxq->port_id;
691 * prevent compiler reordering:
692 * rearm_data covers previous fields.
694 rte_compiler_barrier();
695 rxq->mbuf_initializer = *(uint64_t *)&mbuf_init->rearm_data;
696 /* Padding with a fake mbuf for vectorized Rx. */
697 for (i = 0; i < MLX5_VPMD_DESCS_PER_LOOP; ++i)
698 (*rxq->elts)[elts_n + i] = &rxq->fake_mbuf;
700 DEBUG("%p: allocated and configured %u segments (max %u packets)",
701 (void *)rxq_ctrl, elts_n, elts_n / (1 << rxq_ctrl->rxq.sges_n));
706 for (i = 0; (i != elts_n); ++i) {
707 if ((*rxq_ctrl->rxq.elts)[i] != NULL)
708 rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]);
709 (*rxq_ctrl->rxq.elts)[i] = NULL;
711 DEBUG("%p: failed, freed everything", (void *)rxq_ctrl);
717 * Free RX queue elements.
720 * Pointer to RX queue structure.
723 rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
725 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
726 const uint16_t q_n = (1 << rxq->elts_n);
727 const uint16_t q_mask = q_n - 1;
728 uint16_t used = q_n - (rxq->rq_ci - rxq->rq_pi);
731 DEBUG("%p: freeing WRs", (void *)rxq_ctrl);
732 if (rxq->elts == NULL)
735 * Some mbuf in the Ring belongs to the application. They cannot be
738 if (rxq_check_vec_support(rxq) > 0) {
739 for (i = 0; i < used; ++i)
740 (*rxq->elts)[(rxq->rq_ci + i) & q_mask] = NULL;
741 rxq->rq_pi = rxq->rq_ci;
743 for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
744 if ((*rxq->elts)[i] != NULL)
745 rte_pktmbuf_free_seg((*rxq->elts)[i]);
746 (*rxq->elts)[i] = NULL;
751 * Clean up a RX queue.
753 * Destroy objects, free allocated memory and reset the structure for reuse.
756 * Pointer to RX queue structure.
759 mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *rxq_ctrl)
761 DEBUG("cleaning up %p", (void *)rxq_ctrl);
762 rxq_free_elts(rxq_ctrl);
763 if (rxq_ctrl->wq != NULL)
764 claim_zero(ibv_destroy_wq(rxq_ctrl->wq));
765 if (rxq_ctrl->cq != NULL)
766 claim_zero(ibv_destroy_cq(rxq_ctrl->cq));
767 if (rxq_ctrl->channel != NULL)
768 claim_zero(ibv_destroy_comp_channel(rxq_ctrl->channel));
769 if (rxq_ctrl->mr != NULL)
770 claim_zero(ibv_dereg_mr(rxq_ctrl->mr));
771 memset(rxq_ctrl, 0, sizeof(*rxq_ctrl));
775 * Initialize RX queue.
778 * Pointer to RX queue control template.
781 * 0 on success, errno value on failure.
784 rxq_setup(struct mlx5_rxq_ctrl *tmpl)
786 struct ibv_cq *ibcq = tmpl->cq;
787 struct mlx5dv_cq cq_info;
788 struct mlx5dv_rwq rwq;
789 const uint16_t desc_n =
790 (1 << tmpl->rxq.elts_n) + tmpl->priv->rx_vec_en *
791 MLX5_VPMD_DESCS_PER_LOOP;
792 struct rte_mbuf *(*elts)[desc_n] =
793 rte_calloc_socket("RXQ", 1, sizeof(*elts), 0, tmpl->socket);
794 struct mlx5dv_obj obj;
798 obj.cq.out = &cq_info;
799 obj.rwq.in = tmpl->wq;
801 ret = mlx5dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_RWQ);
805 if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
806 ERROR("Wrong MLX5_CQE_SIZE environment variable value: "
807 "it should be set to %u", RTE_CACHE_LINE_SIZE);
812 tmpl->rxq.rq_db = rwq.dbrec;
813 tmpl->rxq.cqe_n = log2above(cq_info.cqe_cnt);
817 tmpl->rxq.cq_db = cq_info.dbrec;
819 (volatile struct mlx5_wqe_data_seg (*)[])
822 (volatile struct mlx5_cqe (*)[])
823 (uintptr_t)cq_info.buf;
824 tmpl->rxq.elts = elts;
825 tmpl->rxq.cq_uar = cq_info.cq_uar;
826 tmpl->rxq.cqn = cq_info.cqn;
827 tmpl->rxq.cq_arm_sn = 0;
832 * Configure a RX queue.
835 * Pointer to Ethernet device structure.
837 * Pointer to RX queue structure.
839 * Number of descriptors to configure in queue.
841 * NUMA socket on which memory must be allocated.
843 * Thresholds parameters.
845 * Memory pool for buffer allocations.
848 * 0 on success, errno value on failure.
851 rxq_ctrl_setup(struct rte_eth_dev *dev, struct mlx5_rxq_ctrl *rxq_ctrl,
852 uint16_t desc, unsigned int socket,
853 const struct rte_eth_rxconf *conf, struct rte_mempool *mp)
855 struct priv *priv = dev->data->dev_private;
856 struct mlx5_rxq_ctrl tmpl = {
860 .elts_n = log2above(desc),
862 .rss_hash = priv->rxqs_n > 1,
865 struct ibv_wq_attr mod;
867 struct ibv_cq_init_attr_ex cq;
868 struct ibv_wq_init_attr wq;
869 struct ibv_cq_ex cq_attr;
871 unsigned int mb_len = rte_pktmbuf_data_room_size(mp);
872 unsigned int cqe_n = desc - 1;
873 const uint16_t desc_n =
874 desc + priv->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
875 struct rte_mbuf *(*elts)[desc_n] = NULL;
878 (void)conf; /* Thresholds configuration (ignored). */
879 /* Enable scattered packets support for this queue if necessary. */
880 assert(mb_len >= RTE_PKTMBUF_HEADROOM);
881 if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
882 (mb_len - RTE_PKTMBUF_HEADROOM)) {
884 } else if (dev->data->dev_conf.rxmode.enable_scatter) {
886 RTE_PKTMBUF_HEADROOM +
887 dev->data->dev_conf.rxmode.max_rx_pkt_len;
891 * Determine the number of SGEs needed for a full packet
892 * and round it to the next power of two.
894 sges_n = log2above((size / mb_len) + !!(size % mb_len));
895 tmpl.rxq.sges_n = sges_n;
896 /* Make sure rxq.sges_n did not overflow. */
897 size = mb_len * (1 << tmpl.rxq.sges_n);
898 size -= RTE_PKTMBUF_HEADROOM;
899 if (size < dev->data->dev_conf.rxmode.max_rx_pkt_len) {
900 ERROR("%p: too many SGEs (%u) needed to handle"
901 " requested maximum packet size %u",
904 dev->data->dev_conf.rxmode.max_rx_pkt_len);
908 WARN("%p: the requested maximum Rx packet size (%u) is"
909 " larger than a single mbuf (%u) and scattered"
910 " mode has not been requested",
912 dev->data->dev_conf.rxmode.max_rx_pkt_len,
913 mb_len - RTE_PKTMBUF_HEADROOM);
915 DEBUG("%p: maximum number of segments per packet: %u",
916 (void *)dev, 1 << tmpl.rxq.sges_n);
917 if (desc % (1 << tmpl.rxq.sges_n)) {
918 ERROR("%p: number of RX queue descriptors (%u) is not a"
919 " multiple of SGEs per packet (%u)",
922 1 << tmpl.rxq.sges_n);
925 /* Toggle RX checksum offload if hardware supports it. */
927 tmpl.rxq.csum = !!dev->data->dev_conf.rxmode.hw_ip_checksum;
928 if (priv->hw_csum_l2tun)
929 tmpl.rxq.csum_l2tun =
930 !!dev->data->dev_conf.rxmode.hw_ip_checksum;
931 /* Use the entire RX mempool as the memory region. */
932 tmpl.mr = mlx5_mp2mr(priv->pd, mp);
933 if (tmpl.mr == NULL) {
935 ERROR("%p: MR creation failure: %s",
936 (void *)dev, strerror(ret));
939 if (dev->data->dev_conf.intr_conf.rxq) {
940 tmpl.channel = ibv_create_comp_channel(priv->ctx);
941 if (tmpl.channel == NULL) {
943 ERROR("%p: Rx interrupt completion channel creation"
945 (void *)dev, strerror(ret));
949 attr.cq = (struct ibv_cq_init_attr_ex){
952 if (priv->cqe_comp) {
953 attr.cq.comp_mask |= IBV_CQ_INIT_ATTR_MASK_FLAGS;
954 attr.cq.flags |= MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
956 * For vectorized Rx, it must not be doubled in order to
957 * make cq_ci and rq_ci aligned.
959 if (rxq_check_vec_support(&tmpl.rxq) < 0)
960 cqe_n = (desc * 2) - 1; /* Double the number of CQEs. */
962 tmpl.cq = ibv_create_cq(priv->ctx, cqe_n, NULL, tmpl.channel, 0);
963 if (tmpl.cq == NULL) {
965 ERROR("%p: CQ creation failure: %s",
966 (void *)dev, strerror(ret));
969 DEBUG("priv->device_attr.max_qp_wr is %d",
970 priv->device_attr.orig_attr.max_qp_wr);
971 DEBUG("priv->device_attr.max_sge is %d",
972 priv->device_attr.orig_attr.max_sge);
973 /* Configure VLAN stripping. */
974 tmpl.rxq.vlan_strip = (priv->hw_vlan_strip &&
975 !!dev->data->dev_conf.rxmode.hw_vlan_strip);
976 attr.wq = (struct ibv_wq_init_attr){
977 .wq_context = NULL, /* Could be useful in the future. */
978 .wq_type = IBV_WQT_RQ,
979 /* Max number of outstanding WRs. */
980 .max_wr = desc >> tmpl.rxq.sges_n,
981 /* Max number of scatter/gather elements in a WR. */
982 .max_sge = 1 << tmpl.rxq.sges_n,
986 IBV_WQ_FLAGS_CVLAN_STRIPPING |
988 .create_flags = (tmpl.rxq.vlan_strip ?
989 IBV_WQ_FLAGS_CVLAN_STRIPPING :
992 /* By default, FCS (CRC) is stripped by hardware. */
993 if (dev->data->dev_conf.rxmode.hw_strip_crc) {
994 tmpl.rxq.crc_present = 0;
995 } else if (priv->hw_fcs_strip) {
996 /* Ask HW/Verbs to leave CRC in place when supported. */
997 attr.wq.create_flags |= IBV_WQ_FLAGS_SCATTER_FCS;
998 attr.wq.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
999 tmpl.rxq.crc_present = 1;
1001 WARN("%p: CRC stripping has been disabled but will still"
1002 " be performed by hardware, make sure MLNX_OFED and"
1003 " firmware are up to date",
1005 tmpl.rxq.crc_present = 0;
1007 DEBUG("%p: CRC stripping is %s, %u bytes will be subtracted from"
1008 " incoming frames to hide it",
1010 tmpl.rxq.crc_present ? "disabled" : "enabled",
1011 tmpl.rxq.crc_present << 2);
1012 #ifdef HAVE_IBV_WQ_FLAG_RX_END_PADDING
1013 if (!mlx5_getenv_int("MLX5_PMD_ENABLE_PADDING"))
1014 ; /* Nothing else to do. */
1015 else if (priv->hw_padding) {
1016 INFO("%p: enabling packet padding on queue %p",
1017 (void *)dev, (void *)rxq_ctrl);
1018 attr.wq.create_flags |= IBV_WQ_FLAG_RX_END_PADDING;
1019 attr.wq.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
1021 WARN("%p: packet padding has been requested but is not"
1022 " supported, make sure MLNX_OFED and firmware are"
1027 tmpl.wq = ibv_create_wq(priv->ctx, &attr.wq);
1028 if (tmpl.wq == NULL) {
1029 ret = (errno ? errno : EINVAL);
1030 ERROR("%p: WQ creation failure: %s",
1031 (void *)dev, strerror(ret));
1035 * Make sure number of WRs*SGEs match expectations since a queue
1036 * cannot allocate more than "desc" buffers.
1038 if (((int)attr.wq.max_wr != (desc >> tmpl.rxq.sges_n)) ||
1039 ((int)attr.wq.max_sge != (1 << tmpl.rxq.sges_n))) {
1040 ERROR("%p: requested %u*%u but got %u*%u WRs*SGEs",
1042 (desc >> tmpl.rxq.sges_n), (1 << tmpl.rxq.sges_n),
1043 attr.wq.max_wr, attr.wq.max_sge);
1048 tmpl.rxq.port_id = dev->data->port_id;
1049 DEBUG("%p: RTE port ID: %u", (void *)rxq_ctrl, tmpl.rxq.port_id);
1050 /* Change queue state to ready. */
1051 mod = (struct ibv_wq_attr){
1052 .attr_mask = IBV_WQ_ATTR_STATE,
1053 .wq_state = IBV_WQS_RDY,
1055 ret = ibv_modify_wq(tmpl.wq, &mod);
1057 ERROR("%p: WQ state to IBV_WQS_RDY failed: %s",
1058 (void *)dev, strerror(ret));
1061 ret = rxq_setup(&tmpl);
1063 ERROR("%p: cannot initialize RX queue structure: %s",
1064 (void *)dev, strerror(ret));
1067 ret = rxq_alloc_elts(&tmpl, desc);
1069 ERROR("%p: RXQ allocation failed: %s",
1070 (void *)dev, strerror(ret));
1073 /* Clean up rxq in case we're reinitializing it. */
1074 DEBUG("%p: cleaning-up old rxq just in case", (void *)rxq_ctrl);
1075 mlx5_rxq_cleanup(rxq_ctrl);
1076 /* Move mbuf pointers to dedicated storage area in RX queue. */
1077 elts = (void *)(rxq_ctrl + 1);
1078 rte_memcpy(elts, tmpl.rxq.elts, sizeof(*elts));
1080 memset(tmpl.rxq.elts, 0x55, sizeof(*elts));
1082 rte_free(tmpl.rxq.elts);
1083 tmpl.rxq.elts = elts;
1085 /* Update doorbell counter. */
1086 rxq_ctrl->rxq.rq_ci = desc >> rxq_ctrl->rxq.sges_n;
1088 *rxq_ctrl->rxq.rq_db = rte_cpu_to_be_32(rxq_ctrl->rxq.rq_ci);
1089 DEBUG("%p: rxq updated with %p", (void *)rxq_ctrl, (void *)&tmpl);
1093 elts = tmpl.rxq.elts;
1094 mlx5_rxq_cleanup(&tmpl);
1101 * DPDK callback to configure a RX queue.
1104 * Pointer to Ethernet device structure.
1108 * Number of descriptors to configure in queue.
1110 * NUMA socket on which memory must be allocated.
1112 * Thresholds parameters.
1114 * Memory pool for buffer allocations.
1117 * 0 on success, negative errno value on failure.
1120 mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1121 unsigned int socket, const struct rte_eth_rxconf *conf,
1122 struct rte_mempool *mp)
1124 struct priv *priv = dev->data->dev_private;
1125 struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
1126 struct mlx5_rxq_ctrl *rxq_ctrl =
1127 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
1128 const uint16_t desc_n =
1129 desc + priv->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
1132 if (mlx5_is_secondary())
1133 return -E_RTE_SECONDARY;
1136 if (!rte_is_power_of_2(desc)) {
1137 desc = 1 << log2above(desc);
1138 WARN("%p: increased number of descriptors in RX queue %u"
1139 " to the next power of two (%d)",
1140 (void *)dev, idx, desc);
1142 DEBUG("%p: configuring queue %u for %u descriptors",
1143 (void *)dev, idx, desc);
1144 if (idx >= priv->rxqs_n) {
1145 ERROR("%p: queue index out of range (%u >= %u)",
1146 (void *)dev, idx, priv->rxqs_n);
1151 DEBUG("%p: reusing already allocated queue index %u (%p)",
1152 (void *)dev, idx, (void *)rxq);
1153 if (dev->data->dev_started) {
1157 (*priv->rxqs)[idx] = NULL;
1158 mlx5_rxq_cleanup(rxq_ctrl);
1159 /* Resize if rxq size is changed. */
1160 if (rxq_ctrl->rxq.elts_n != log2above(desc)) {
1161 rxq_ctrl = rte_realloc(rxq_ctrl,
1162 sizeof(*rxq_ctrl) + desc_n *
1163 sizeof(struct rte_mbuf *),
1164 RTE_CACHE_LINE_SIZE);
1166 ERROR("%p: unable to reallocate queue index %u",
1173 rxq_ctrl = rte_calloc_socket("RXQ", 1, sizeof(*rxq_ctrl) +
1175 sizeof(struct rte_mbuf *),
1177 if (rxq_ctrl == NULL) {
1178 ERROR("%p: unable to allocate queue index %u",
1184 ret = rxq_ctrl_setup(dev, rxq_ctrl, desc, socket, conf, mp);
1188 rxq_ctrl->rxq.stats.idx = idx;
1189 DEBUG("%p: adding RX queue %p to list",
1190 (void *)dev, (void *)rxq_ctrl);
1191 (*priv->rxqs)[idx] = &rxq_ctrl->rxq;
1198 * DPDK callback to release a RX queue.
1201 * Generic RX queue pointer.
1204 mlx5_rx_queue_release(void *dpdk_rxq)
1206 struct mlx5_rxq_data *rxq = (struct mlx5_rxq_data *)dpdk_rxq;
1207 struct mlx5_rxq_ctrl *rxq_ctrl;
1211 if (mlx5_is_secondary())
1216 rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);
1217 priv = rxq_ctrl->priv;
1219 if (priv_flow_rxq_in_use(priv, rxq))
1220 rte_panic("Rx queue %p is still used by a flow and cannot be"
1221 " removed\n", (void *)rxq_ctrl);
1222 for (i = 0; (i != priv->rxqs_n); ++i)
1223 if ((*priv->rxqs)[i] == rxq) {
1224 DEBUG("%p: removing RX queue %p from list",
1225 (void *)priv->dev, (void *)rxq_ctrl);
1226 (*priv->rxqs)[i] = NULL;
1229 mlx5_rxq_cleanup(rxq_ctrl);
1235 * Allocate queue vector and fill epoll fd list for Rx interrupts.
1238 * Pointer to private structure.
1241 * 0 on success, negative on failure.
1244 priv_rx_intr_vec_enable(struct priv *priv)
1247 unsigned int rxqs_n = priv->rxqs_n;
1248 unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
1249 unsigned int count = 0;
1250 struct rte_intr_handle *intr_handle = priv->dev->intr_handle;
1252 assert(!mlx5_is_secondary());
1253 if (!priv->dev->data->dev_conf.intr_conf.rxq)
1255 priv_rx_intr_vec_disable(priv);
1256 intr_handle->intr_vec = malloc(sizeof(intr_handle->intr_vec[rxqs_n]));
1257 if (intr_handle->intr_vec == NULL) {
1258 ERROR("failed to allocate memory for interrupt vector,"
1259 " Rx interrupts will not be supported");
1262 intr_handle->type = RTE_INTR_HANDLE_EXT;
1263 for (i = 0; i != n; ++i) {
1264 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1265 struct mlx5_rxq_ctrl *rxq_ctrl =
1266 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
1271 /* Skip queues that cannot request interrupts. */
1272 if (!rxq || !rxq_ctrl->channel) {
1273 /* Use invalid intr_vec[] index to disable entry. */
1274 intr_handle->intr_vec[i] =
1275 RTE_INTR_VEC_RXTX_OFFSET +
1276 RTE_MAX_RXTX_INTR_VEC_ID;
1279 if (count >= RTE_MAX_RXTX_INTR_VEC_ID) {
1280 ERROR("too many Rx queues for interrupt vector size"
1281 " (%d), Rx interrupts cannot be enabled",
1282 RTE_MAX_RXTX_INTR_VEC_ID);
1283 priv_rx_intr_vec_disable(priv);
1286 fd = rxq_ctrl->channel->fd;
1287 flags = fcntl(fd, F_GETFL);
1288 rc = fcntl(fd, F_SETFL, flags | O_NONBLOCK);
1290 ERROR("failed to make Rx interrupt file descriptor"
1291 " %d non-blocking for queue index %d", fd, i);
1292 priv_rx_intr_vec_disable(priv);
1295 intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count;
1296 intr_handle->efds[count] = fd;
1300 priv_rx_intr_vec_disable(priv);
1302 intr_handle->nb_efd = count;
1307 * Clean up Rx interrupts handler.
1310 * Pointer to private structure.
1313 priv_rx_intr_vec_disable(struct priv *priv)
1315 struct rte_intr_handle *intr_handle = priv->dev->intr_handle;
1317 rte_intr_free_epoll_fd(intr_handle);
1318 free(intr_handle->intr_vec);
1319 intr_handle->nb_efd = 0;
1320 intr_handle->intr_vec = NULL;
1324 * MLX5 CQ notification .
1327 * Pointer to receive queue structure.
1329 * Sequence number per receive queue .
1332 mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq)
1335 uint32_t doorbell_hi;
1337 void *cq_db_reg = (char *)rxq->cq_uar + MLX5_CQ_DOORBELL;
1339 sq_n = sq_n_rxq & MLX5_CQ_SQN_MASK;
1340 doorbell_hi = sq_n << MLX5_CQ_SQN_OFFSET | (rxq->cq_ci & MLX5_CI_MASK);
1341 doorbell = (uint64_t)doorbell_hi << 32;
1342 doorbell |= rxq->cqn;
1343 rxq->cq_db[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi);
1345 rte_write64(rte_cpu_to_be_64(doorbell), cq_db_reg);
1349 * DPDK callback for Rx queue interrupt enable.
1352 * Pointer to Ethernet device structure.
1353 * @param rx_queue_id
1357 * 0 on success, negative on failure.
1360 mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1362 struct priv *priv = mlx5_get_priv(dev);
1363 struct mlx5_rxq_data *rxq = (*priv->rxqs)[rx_queue_id];
1364 struct mlx5_rxq_ctrl *rxq_ctrl =
1365 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
1368 if (!rxq || !rxq_ctrl->channel) {
1371 mlx5_arm_cq(rxq, rxq->cq_arm_sn);
1374 WARN("unable to arm interrupt on rx queue %d", rx_queue_id);
1379 * DPDK callback for Rx queue interrupt disable.
1382 * Pointer to Ethernet device structure.
1383 * @param rx_queue_id
1387 * 0 on success, negative on failure.
1390 mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1392 struct priv *priv = mlx5_get_priv(dev);
1393 struct mlx5_rxq_data *rxq = (*priv->rxqs)[rx_queue_id];
1394 struct mlx5_rxq_ctrl *rxq_ctrl =
1395 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
1396 struct ibv_cq *ev_cq;
1400 if (!rxq || !rxq_ctrl->channel) {
1403 ret = ibv_get_cq_event(rxq_ctrl->cq->channel, &ev_cq, &ev_ctx);
1405 if (ret || ev_cq != rxq_ctrl->cq)
1409 WARN("unable to disable interrupt on rx queue %d",
1412 ibv_ack_cq_events(rxq_ctrl->cq, 1);