4 * Copyright 2015 6WIND S.A.
5 * Copyright 2015 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
44 #pragma GCC diagnostic ignored "-Wpedantic"
46 #include <infiniband/verbs.h>
47 #include <infiniband/mlx5dv.h>
49 #pragma GCC diagnostic error "-Wpedantic"
53 #include <rte_malloc.h>
54 #include <rte_ethdev.h>
55 #include <rte_common.h>
56 #include <rte_interrupts.h>
57 #include <rte_debug.h>
61 #include "mlx5_rxtx.h"
62 #include "mlx5_utils.h"
63 #include "mlx5_autoconf.h"
64 #include "mlx5_defs.h"
66 /* Initialization data for hash RX queues. */
67 const struct hash_rxq_init hash_rxq_init[] = {
69 .hash_fields = (IBV_RX_HASH_SRC_IPV4 |
70 IBV_RX_HASH_DST_IPV4 |
71 IBV_RX_HASH_SRC_PORT_TCP |
72 IBV_RX_HASH_DST_PORT_TCP),
73 .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV4_TCP,
75 .flow_spec.tcp_udp = {
76 .type = IBV_FLOW_SPEC_TCP,
77 .size = sizeof(hash_rxq_init[0].flow_spec.tcp_udp),
79 .underlayer = &hash_rxq_init[HASH_RXQ_IPV4],
82 .hash_fields = (IBV_RX_HASH_SRC_IPV4 |
83 IBV_RX_HASH_DST_IPV4 |
84 IBV_RX_HASH_SRC_PORT_UDP |
85 IBV_RX_HASH_DST_PORT_UDP),
86 .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV4_UDP,
88 .flow_spec.tcp_udp = {
89 .type = IBV_FLOW_SPEC_UDP,
90 .size = sizeof(hash_rxq_init[0].flow_spec.tcp_udp),
92 .underlayer = &hash_rxq_init[HASH_RXQ_IPV4],
95 .hash_fields = (IBV_RX_HASH_SRC_IPV4 |
96 IBV_RX_HASH_DST_IPV4),
97 .dpdk_rss_hf = (ETH_RSS_IPV4 |
101 .type = IBV_FLOW_SPEC_IPV4,
102 .size = sizeof(hash_rxq_init[0].flow_spec.ipv4),
104 .underlayer = &hash_rxq_init[HASH_RXQ_ETH],
107 .hash_fields = (IBV_RX_HASH_SRC_IPV6 |
108 IBV_RX_HASH_DST_IPV6 |
109 IBV_RX_HASH_SRC_PORT_TCP |
110 IBV_RX_HASH_DST_PORT_TCP),
111 .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV6_TCP,
113 .flow_spec.tcp_udp = {
114 .type = IBV_FLOW_SPEC_TCP,
115 .size = sizeof(hash_rxq_init[0].flow_spec.tcp_udp),
117 .underlayer = &hash_rxq_init[HASH_RXQ_IPV6],
120 .hash_fields = (IBV_RX_HASH_SRC_IPV6 |
121 IBV_RX_HASH_DST_IPV6 |
122 IBV_RX_HASH_SRC_PORT_UDP |
123 IBV_RX_HASH_DST_PORT_UDP),
124 .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV6_UDP,
126 .flow_spec.tcp_udp = {
127 .type = IBV_FLOW_SPEC_UDP,
128 .size = sizeof(hash_rxq_init[0].flow_spec.tcp_udp),
130 .underlayer = &hash_rxq_init[HASH_RXQ_IPV6],
133 .hash_fields = (IBV_RX_HASH_SRC_IPV6 |
134 IBV_RX_HASH_DST_IPV6),
135 .dpdk_rss_hf = (ETH_RSS_IPV6 |
139 .type = IBV_FLOW_SPEC_IPV6,
140 .size = sizeof(hash_rxq_init[0].flow_spec.ipv6),
142 .underlayer = &hash_rxq_init[HASH_RXQ_ETH],
149 .type = IBV_FLOW_SPEC_ETH,
150 .size = sizeof(hash_rxq_init[0].flow_spec.eth),
156 /* Number of entries in hash_rxq_init[]. */
157 const unsigned int hash_rxq_init_n = RTE_DIM(hash_rxq_init);
159 /* Initialization data for hash RX queue indirection tables. */
160 static const struct ind_table_init ind_table_init[] = {
162 .max_size = -1u, /* Superseded by HW limitations. */
164 1 << HASH_RXQ_TCPV4 |
165 1 << HASH_RXQ_UDPV4 |
167 1 << HASH_RXQ_TCPV6 |
168 1 << HASH_RXQ_UDPV6 |
175 .hash_types = 1 << HASH_RXQ_ETH,
180 #define IND_TABLE_INIT_N RTE_DIM(ind_table_init)
182 /* Default RSS hash key also used for ConnectX-3. */
183 uint8_t rss_hash_default_key[] = {
184 0x2c, 0xc6, 0x81, 0xd1,
185 0x5b, 0xdb, 0xf4, 0xf7,
186 0xfc, 0xa2, 0x83, 0x19,
187 0xdb, 0x1a, 0x3e, 0x94,
188 0x6b, 0x9e, 0x38, 0xd9,
189 0x2c, 0x9c, 0x03, 0xd1,
190 0xad, 0x99, 0x44, 0xa7,
191 0xd9, 0x56, 0x3d, 0x59,
192 0x06, 0x3c, 0x25, 0xf3,
193 0xfc, 0x1f, 0xdc, 0x2a,
196 /* Length of the default RSS hash key. */
197 const size_t rss_hash_default_key_len = sizeof(rss_hash_default_key);
200 * Populate flow steering rule for a given hash RX queue type using
201 * information from hash_rxq_init[]. Nothing is written to flow_attr when
202 * flow_attr_size is not large enough, but the required size is still returned.
205 * Pointer to private structure.
206 * @param[out] flow_attr
207 * Pointer to flow attribute structure to fill. Note that the allocated
208 * area must be larger and large enough to hold all flow specifications.
209 * @param flow_attr_size
210 * Entire size of flow_attr and trailing room for flow specifications.
212 * Hash RX queue type to use for flow steering rule.
215 * Total size of the flow attribute buffer. No errors are defined.
218 priv_flow_attr(struct priv *priv, struct ibv_flow_attr *flow_attr,
219 size_t flow_attr_size, enum hash_rxq_type type)
221 size_t offset = sizeof(*flow_attr);
222 const struct hash_rxq_init *init = &hash_rxq_init[type];
224 assert(priv != NULL);
225 assert((size_t)type < RTE_DIM(hash_rxq_init));
227 offset += init->flow_spec.hdr.size;
228 init = init->underlayer;
229 } while (init != NULL);
230 if (offset > flow_attr_size)
232 flow_attr_size = offset;
233 init = &hash_rxq_init[type];
234 *flow_attr = (struct ibv_flow_attr){
235 .type = IBV_FLOW_ATTR_NORMAL,
236 /* Priorities < 3 are reserved for flow director. */
237 .priority = init->flow_priority + 3,
243 offset -= init->flow_spec.hdr.size;
244 memcpy((void *)((uintptr_t)flow_attr + offset),
246 init->flow_spec.hdr.size);
247 ++flow_attr->num_of_specs;
248 init = init->underlayer;
249 } while (init != NULL);
250 return flow_attr_size;
254 * Convert hash type position in indirection table initializer to
255 * hash RX queue type.
258 * Indirection table initializer.
260 * Hash type position.
263 * Hash RX queue type.
265 static enum hash_rxq_type
266 hash_rxq_type_from_pos(const struct ind_table_init *table, unsigned int pos)
268 enum hash_rxq_type type = HASH_RXQ_TCPV4;
270 assert(pos < table->hash_types_n);
272 if ((table->hash_types & (1 << type)) && (pos-- == 0))
280 * Filter out disabled hash RX queue types from ind_table_init[].
283 * Pointer to private structure.
288 * Number of table entries.
291 priv_make_ind_table_init(struct priv *priv,
292 struct ind_table_init (*table)[IND_TABLE_INIT_N])
297 unsigned int table_n = 0;
298 /* Mandatory to receive frames not handled by normal hash RX queues. */
299 unsigned int hash_types_sup = 1 << HASH_RXQ_ETH;
301 rss_hf = priv->rss_hf;
302 /* Process other protocols only if more than one queue. */
303 if (priv->rxqs_n > 1)
304 for (i = 0; (i != hash_rxq_init_n); ++i)
305 if (rss_hf & hash_rxq_init[i].dpdk_rss_hf)
306 hash_types_sup |= (1 << i);
308 /* Filter out entries whose protocols are not in the set. */
309 for (i = 0, j = 0; (i != IND_TABLE_INIT_N); ++i) {
313 /* j is increased only if the table has valid protocols. */
315 (*table)[j] = ind_table_init[i];
316 (*table)[j].hash_types &= hash_types_sup;
317 for (h = 0, nb = 0; (h != hash_rxq_init_n); ++h)
318 if (((*table)[j].hash_types >> h) & 0x1)
320 (*table)[i].hash_types_n = nb;
330 * Initialize hash RX queues and indirection table.
333 * Pointer to private structure.
336 * 0 on success, errno value on failure.
339 priv_create_hash_rxqs(struct priv *priv)
341 struct ibv_wq *wqs[priv->reta_idx_n];
342 struct ind_table_init ind_table_init[IND_TABLE_INIT_N];
343 unsigned int ind_tables_n =
344 priv_make_ind_table_init(priv, &ind_table_init);
345 unsigned int hash_rxqs_n = 0;
346 struct hash_rxq (*hash_rxqs)[] = NULL;
347 struct ibv_rwq_ind_table *(*ind_tables)[] = NULL;
353 assert(priv->ind_tables == NULL);
354 assert(priv->ind_tables_n == 0);
355 assert(priv->hash_rxqs == NULL);
356 assert(priv->hash_rxqs_n == 0);
357 assert(priv->pd != NULL);
358 assert(priv->ctx != NULL);
361 if (priv->rxqs_n == 0)
363 assert(priv->rxqs != NULL);
364 if (ind_tables_n == 0) {
365 ERROR("all hash RX queue types have been filtered out,"
366 " indirection table cannot be created");
369 if (priv->rxqs_n & (priv->rxqs_n - 1)) {
370 INFO("%u RX queues are configured, consider rounding this"
371 " number to the next power of two for better balancing",
373 DEBUG("indirection table extended to assume %u WQs",
376 for (i = 0; (i != priv->reta_idx_n); ++i) {
377 struct mlx5_rxq_ctrl *rxq_ctrl;
379 rxq_ctrl = container_of((*priv->rxqs)[(*priv->reta_idx)[i]],
380 struct mlx5_rxq_ctrl, rxq);
381 wqs[i] = rxq_ctrl->wq;
383 /* Get number of hash RX queues to configure. */
384 for (i = 0, hash_rxqs_n = 0; (i != ind_tables_n); ++i)
385 hash_rxqs_n += ind_table_init[i].hash_types_n;
386 DEBUG("allocating %u hash RX queues for %u WQs, %u indirection tables",
387 hash_rxqs_n, priv->rxqs_n, ind_tables_n);
388 /* Create indirection tables. */
389 ind_tables = rte_calloc(__func__, ind_tables_n,
390 sizeof((*ind_tables)[0]), 0);
391 if (ind_tables == NULL) {
393 ERROR("cannot allocate indirection tables container: %s",
397 for (i = 0; (i != ind_tables_n); ++i) {
398 struct ibv_rwq_ind_table_init_attr ind_init_attr = {
399 .log_ind_tbl_size = 0, /* Set below. */
403 unsigned int ind_tbl_size = ind_table_init[i].max_size;
404 struct ibv_rwq_ind_table *ind_table;
406 if (priv->reta_idx_n < ind_tbl_size)
407 ind_tbl_size = priv->reta_idx_n;
408 ind_init_attr.log_ind_tbl_size = log2above(ind_tbl_size);
410 ind_table = ibv_create_rwq_ind_table(priv->ctx,
412 if (ind_table != NULL) {
413 (*ind_tables)[i] = ind_table;
416 /* Not clear whether errno is set. */
417 err = (errno ? errno : EINVAL);
418 ERROR("RX indirection table creation failed with error %d: %s",
422 /* Allocate array that holds hash RX queues and related data. */
423 hash_rxqs = rte_calloc(__func__, hash_rxqs_n,
424 sizeof((*hash_rxqs)[0]), 0);
425 if (hash_rxqs == NULL) {
427 ERROR("cannot allocate hash RX queues container: %s",
431 for (i = 0, j = 0, k = 0;
432 ((i != hash_rxqs_n) && (j != ind_tables_n));
434 struct hash_rxq *hash_rxq = &(*hash_rxqs)[i];
435 enum hash_rxq_type type =
436 hash_rxq_type_from_pos(&ind_table_init[j], k);
437 struct rte_eth_rss_conf *priv_rss_conf =
438 (*priv->rss_conf)[type];
439 struct ibv_rx_hash_conf hash_conf = {
440 .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,
441 .rx_hash_key_len = (priv_rss_conf ?
442 priv_rss_conf->rss_key_len :
443 rss_hash_default_key_len),
444 .rx_hash_key = (priv_rss_conf ?
445 priv_rss_conf->rss_key :
446 rss_hash_default_key),
447 .rx_hash_fields_mask = hash_rxq_init[type].hash_fields,
449 struct ibv_qp_init_attr_ex qp_init_attr = {
450 .qp_type = IBV_QPT_RAW_PACKET,
451 .comp_mask = (IBV_QP_INIT_ATTR_PD |
452 IBV_QP_INIT_ATTR_IND_TABLE |
453 IBV_QP_INIT_ATTR_RX_HASH),
454 .rx_hash_conf = hash_conf,
455 .rwq_ind_tbl = (*ind_tables)[j],
459 DEBUG("using indirection table %u for hash RX queue %u type %d",
461 *hash_rxq = (struct hash_rxq){
463 .qp = ibv_create_qp_ex(priv->ctx, &qp_init_attr),
466 if (hash_rxq->qp == NULL) {
467 err = (errno ? errno : EINVAL);
468 ERROR("Hash RX QP creation failure: %s",
472 if (++k < ind_table_init[j].hash_types_n)
474 /* Switch to the next indirection table and reset hash RX
475 * queue type array index. */
479 priv->ind_tables = ind_tables;
480 priv->ind_tables_n = ind_tables_n;
481 priv->hash_rxqs = hash_rxqs;
482 priv->hash_rxqs_n = hash_rxqs_n;
486 if (hash_rxqs != NULL) {
487 for (i = 0; (i != hash_rxqs_n); ++i) {
488 struct ibv_qp *qp = (*hash_rxqs)[i].qp;
492 claim_zero(ibv_destroy_qp(qp));
496 if (ind_tables != NULL) {
497 for (j = 0; (j != ind_tables_n); ++j) {
498 struct ibv_rwq_ind_table *ind_table =
501 if (ind_table == NULL)
503 claim_zero(ibv_destroy_rwq_ind_table(ind_table));
505 rte_free(ind_tables);
511 * Clean up hash RX queues and indirection table.
514 * Pointer to private structure.
517 priv_destroy_hash_rxqs(struct priv *priv)
521 DEBUG("destroying %u hash RX queues", priv->hash_rxqs_n);
522 if (priv->hash_rxqs_n == 0) {
523 assert(priv->hash_rxqs == NULL);
524 assert(priv->ind_tables == NULL);
527 for (i = 0; (i != priv->hash_rxqs_n); ++i) {
528 struct hash_rxq *hash_rxq = &(*priv->hash_rxqs)[i];
531 assert(hash_rxq->priv == priv);
532 assert(hash_rxq->qp != NULL);
533 /* Also check that there are no remaining flows. */
534 for (j = 0; (j != RTE_DIM(hash_rxq->special_flow)); ++j)
536 (k != RTE_DIM(hash_rxq->special_flow[j]));
538 assert(hash_rxq->special_flow[j][k] == NULL);
539 for (j = 0; (j != RTE_DIM(hash_rxq->mac_flow)); ++j)
540 for (k = 0; (k != RTE_DIM(hash_rxq->mac_flow[j])); ++k)
541 assert(hash_rxq->mac_flow[j][k] == NULL);
542 claim_zero(ibv_destroy_qp(hash_rxq->qp));
544 priv->hash_rxqs_n = 0;
545 rte_free(priv->hash_rxqs);
546 priv->hash_rxqs = NULL;
547 for (i = 0; (i != priv->ind_tables_n); ++i) {
548 struct ibv_rwq_ind_table *ind_table =
549 (*priv->ind_tables)[i];
551 assert(ind_table != NULL);
552 claim_zero(ibv_destroy_rwq_ind_table(ind_table));
554 priv->ind_tables_n = 0;
555 rte_free(priv->ind_tables);
556 priv->ind_tables = NULL;
560 * Check whether a given flow type is allowed.
563 * Pointer to private structure.
565 * Flow type to check.
568 * Nonzero if the given flow type is allowed.
571 priv_allow_flow_type(struct priv *priv, enum hash_rxq_flow_type type)
573 /* Only FLOW_TYPE_PROMISC is allowed when promiscuous mode
574 * has been requested. */
575 if (priv->promisc_req)
576 return type == HASH_RXQ_FLOW_TYPE_PROMISC;
578 case HASH_RXQ_FLOW_TYPE_PROMISC:
579 return !!priv->promisc_req;
580 case HASH_RXQ_FLOW_TYPE_ALLMULTI:
581 return !!priv->allmulti_req;
582 case HASH_RXQ_FLOW_TYPE_BROADCAST:
583 case HASH_RXQ_FLOW_TYPE_IPV6MULTI:
584 /* If allmulti is enabled, broadcast and ipv6multi
585 * are unnecessary. */
586 return !priv->allmulti_req;
587 case HASH_RXQ_FLOW_TYPE_MAC:
590 /* Unsupported flow type is not allowed. */
597 * Automatically enable/disable flows according to configuration.
603 * 0 on success, errno value on failure.
606 priv_rehash_flows(struct priv *priv)
608 enum hash_rxq_flow_type i;
610 for (i = HASH_RXQ_FLOW_TYPE_PROMISC;
611 i != RTE_DIM((*priv->hash_rxqs)[0].special_flow);
613 if (!priv_allow_flow_type(priv, i)) {
614 priv_special_flow_disable(priv, i);
616 int ret = priv_special_flow_enable(priv, i);
621 if (priv_allow_flow_type(priv, HASH_RXQ_FLOW_TYPE_MAC))
622 return priv_mac_addrs_enable(priv);
623 priv_mac_addrs_disable(priv);
628 * Allocate RX queue elements.
631 * Pointer to RX queue structure.
633 * Number of elements to allocate.
636 * 0 on success, errno value on failure.
639 rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl, unsigned int elts_n)
641 const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n;
645 /* Iterate on segments. */
646 for (i = 0; (i != elts_n); ++i) {
647 struct rte_mbuf *buf;
648 volatile struct mlx5_wqe_data_seg *scat =
649 &(*rxq_ctrl->rxq.wqes)[i];
651 buf = rte_pktmbuf_alloc(rxq_ctrl->rxq.mp);
653 ERROR("%p: empty mbuf pool", (void *)rxq_ctrl);
657 /* Headroom is reserved by rte_pktmbuf_alloc(). */
658 assert(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM);
659 /* Buffer is supposed to be empty. */
660 assert(rte_pktmbuf_data_len(buf) == 0);
661 assert(rte_pktmbuf_pkt_len(buf) == 0);
663 /* Only the first segment keeps headroom. */
665 SET_DATA_OFF(buf, 0);
666 PORT(buf) = rxq_ctrl->rxq.port_id;
667 DATA_LEN(buf) = rte_pktmbuf_tailroom(buf);
668 PKT_LEN(buf) = DATA_LEN(buf);
670 /* scat->addr must be able to store a pointer. */
671 assert(sizeof(scat->addr) >= sizeof(uintptr_t));
672 *scat = (struct mlx5_wqe_data_seg){
674 rte_cpu_to_be_64(rte_pktmbuf_mtod(buf, uintptr_t)),
675 .byte_count = rte_cpu_to_be_32(DATA_LEN(buf)),
676 .lkey = rxq_ctrl->mr->lkey,
678 (*rxq_ctrl->rxq.elts)[i] = buf;
680 if (rxq_check_vec_support(&rxq_ctrl->rxq) > 0) {
681 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
682 struct rte_mbuf *mbuf_init = &rxq->fake_mbuf;
684 assert(rxq->elts_n == rxq->cqe_n);
685 /* Initialize default rearm_data for vPMD. */
686 mbuf_init->data_off = RTE_PKTMBUF_HEADROOM;
687 rte_mbuf_refcnt_set(mbuf_init, 1);
688 mbuf_init->nb_segs = 1;
689 mbuf_init->port = rxq->port_id;
691 * prevent compiler reordering:
692 * rearm_data covers previous fields.
694 rte_compiler_barrier();
695 rxq->mbuf_initializer = *(uint64_t *)&mbuf_init->rearm_data;
696 /* Padding with a fake mbuf for vectorized Rx. */
697 for (i = 0; i < MLX5_VPMD_DESCS_PER_LOOP; ++i)
698 (*rxq->elts)[elts_n + i] = &rxq->fake_mbuf;
700 DEBUG("%p: allocated and configured %u segments (max %u packets)",
701 (void *)rxq_ctrl, elts_n, elts_n / (1 << rxq_ctrl->rxq.sges_n));
706 for (i = 0; (i != elts_n); ++i) {
707 if ((*rxq_ctrl->rxq.elts)[i] != NULL)
708 rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]);
709 (*rxq_ctrl->rxq.elts)[i] = NULL;
711 DEBUG("%p: failed, freed everything", (void *)rxq_ctrl);
717 * Free RX queue elements.
720 * Pointer to RX queue structure.
723 rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
725 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
726 const uint16_t q_n = (1 << rxq->elts_n);
727 const uint16_t q_mask = q_n - 1;
728 uint16_t used = q_n - (rxq->rq_ci - rxq->rq_pi);
731 DEBUG("%p: freeing WRs", (void *)rxq_ctrl);
732 if (rxq->elts == NULL)
735 * Some mbuf in the Ring belongs to the application. They cannot be
738 if (rxq_check_vec_support(rxq) > 0) {
739 for (i = 0; i < used; ++i)
740 (*rxq->elts)[(rxq->rq_ci + i) & q_mask] = NULL;
741 rxq->rq_pi = rxq->rq_ci;
743 for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
744 if ((*rxq->elts)[i] != NULL)
745 rte_pktmbuf_free_seg((*rxq->elts)[i]);
746 (*rxq->elts)[i] = NULL;
751 * Clean up a RX queue.
753 * Destroy objects, free allocated memory and reset the structure for reuse.
756 * Pointer to RX queue structure.
759 mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *rxq_ctrl)
761 DEBUG("cleaning up %p", (void *)rxq_ctrl);
762 rxq_free_elts(rxq_ctrl);
763 if (rxq_ctrl->wq != NULL)
764 claim_zero(ibv_destroy_wq(rxq_ctrl->wq));
765 if (rxq_ctrl->cq != NULL)
766 claim_zero(ibv_destroy_cq(rxq_ctrl->cq));
767 if (rxq_ctrl->channel != NULL)
768 claim_zero(ibv_destroy_comp_channel(rxq_ctrl->channel));
769 if (rxq_ctrl->mr != NULL)
770 priv_mr_release(rxq_ctrl->priv, rxq_ctrl->mr);
771 memset(rxq_ctrl, 0, sizeof(*rxq_ctrl));
775 * Initialize RX queue.
778 * Pointer to RX queue control template.
781 * 0 on success, errno value on failure.
784 rxq_setup(struct mlx5_rxq_ctrl *tmpl)
786 struct ibv_cq *ibcq = tmpl->cq;
787 struct mlx5dv_cq cq_info;
788 struct mlx5dv_rwq rwq;
789 const uint16_t desc_n =
790 (1 << tmpl->rxq.elts_n) + tmpl->priv->rx_vec_en *
791 MLX5_VPMD_DESCS_PER_LOOP;
792 struct rte_mbuf *(*elts)[desc_n] =
793 rte_calloc_socket("RXQ", 1, sizeof(*elts), 0, tmpl->socket);
794 struct mlx5dv_obj obj;
798 obj.cq.out = &cq_info;
799 obj.rwq.in = tmpl->wq;
801 ret = mlx5dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_RWQ);
805 if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
806 ERROR("Wrong MLX5_CQE_SIZE environment variable value: "
807 "it should be set to %u", RTE_CACHE_LINE_SIZE);
812 tmpl->rxq.rq_db = rwq.dbrec;
813 tmpl->rxq.cqe_n = log2above(cq_info.cqe_cnt);
817 tmpl->rxq.cq_db = cq_info.dbrec;
819 (volatile struct mlx5_wqe_data_seg (*)[])
822 (volatile struct mlx5_cqe (*)[])
823 (uintptr_t)cq_info.buf;
824 tmpl->rxq.elts = elts;
825 tmpl->rxq.cq_uar = cq_info.cq_uar;
826 tmpl->rxq.cqn = cq_info.cqn;
827 tmpl->rxq.cq_arm_sn = 0;
832 * Configure a RX queue.
835 * Pointer to Ethernet device structure.
837 * Pointer to RX queue structure.
839 * Number of descriptors to configure in queue.
841 * NUMA socket on which memory must be allocated.
843 * Thresholds parameters.
845 * Memory pool for buffer allocations.
848 * 0 on success, errno value on failure.
851 rxq_ctrl_setup(struct rte_eth_dev *dev, struct mlx5_rxq_ctrl *rxq_ctrl,
852 uint16_t desc, unsigned int socket,
853 const struct rte_eth_rxconf *conf, struct rte_mempool *mp)
855 struct priv *priv = dev->data->dev_private;
856 struct mlx5_rxq_ctrl tmpl = {
860 .elts_n = log2above(desc),
862 .rss_hash = priv->rxqs_n > 1,
865 struct ibv_wq_attr mod;
867 struct ibv_cq_init_attr_ex cq;
868 struct ibv_wq_init_attr wq;
869 struct ibv_cq_ex cq_attr;
871 unsigned int mb_len = rte_pktmbuf_data_room_size(mp);
872 unsigned int cqe_n = desc - 1;
873 const uint16_t desc_n =
874 desc + priv->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
875 struct rte_mbuf *(*elts)[desc_n] = NULL;
878 (void)conf; /* Thresholds configuration (ignored). */
879 /* Enable scattered packets support for this queue if necessary. */
880 assert(mb_len >= RTE_PKTMBUF_HEADROOM);
881 if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
882 (mb_len - RTE_PKTMBUF_HEADROOM)) {
884 } else if (dev->data->dev_conf.rxmode.enable_scatter) {
886 RTE_PKTMBUF_HEADROOM +
887 dev->data->dev_conf.rxmode.max_rx_pkt_len;
891 * Determine the number of SGEs needed for a full packet
892 * and round it to the next power of two.
894 sges_n = log2above((size / mb_len) + !!(size % mb_len));
895 tmpl.rxq.sges_n = sges_n;
896 /* Make sure rxq.sges_n did not overflow. */
897 size = mb_len * (1 << tmpl.rxq.sges_n);
898 size -= RTE_PKTMBUF_HEADROOM;
899 if (size < dev->data->dev_conf.rxmode.max_rx_pkt_len) {
900 ERROR("%p: too many SGEs (%u) needed to handle"
901 " requested maximum packet size %u",
904 dev->data->dev_conf.rxmode.max_rx_pkt_len);
908 WARN("%p: the requested maximum Rx packet size (%u) is"
909 " larger than a single mbuf (%u) and scattered"
910 " mode has not been requested",
912 dev->data->dev_conf.rxmode.max_rx_pkt_len,
913 mb_len - RTE_PKTMBUF_HEADROOM);
915 DEBUG("%p: maximum number of segments per packet: %u",
916 (void *)dev, 1 << tmpl.rxq.sges_n);
917 if (desc % (1 << tmpl.rxq.sges_n)) {
918 ERROR("%p: number of RX queue descriptors (%u) is not a"
919 " multiple of SGEs per packet (%u)",
922 1 << tmpl.rxq.sges_n);
925 /* Toggle RX checksum offload if hardware supports it. */
927 tmpl.rxq.csum = !!dev->data->dev_conf.rxmode.hw_ip_checksum;
928 if (priv->hw_csum_l2tun)
929 tmpl.rxq.csum_l2tun =
930 !!dev->data->dev_conf.rxmode.hw_ip_checksum;
931 /* Use the entire RX mempool as the memory region. */
932 tmpl.mr = priv_mr_get(priv, mp);
933 if (tmpl.mr == NULL) {
934 tmpl.mr = priv_mr_new(priv, mp);
935 if (tmpl.mr == NULL) {
937 ERROR("%p: MR creation failure: %s",
938 (void *)dev, strerror(ret));
942 if (dev->data->dev_conf.intr_conf.rxq) {
943 tmpl.channel = ibv_create_comp_channel(priv->ctx);
944 if (tmpl.channel == NULL) {
946 ERROR("%p: Rx interrupt completion channel creation"
948 (void *)dev, strerror(ret));
952 attr.cq = (struct ibv_cq_init_attr_ex){
955 if (priv->cqe_comp) {
956 attr.cq.comp_mask |= IBV_CQ_INIT_ATTR_MASK_FLAGS;
957 attr.cq.flags |= MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
959 * For vectorized Rx, it must not be doubled in order to
960 * make cq_ci and rq_ci aligned.
962 if (rxq_check_vec_support(&tmpl.rxq) < 0)
963 cqe_n = (desc * 2) - 1; /* Double the number of CQEs. */
965 tmpl.cq = ibv_create_cq(priv->ctx, cqe_n, NULL, tmpl.channel, 0);
966 if (tmpl.cq == NULL) {
968 ERROR("%p: CQ creation failure: %s",
969 (void *)dev, strerror(ret));
972 DEBUG("priv->device_attr.max_qp_wr is %d",
973 priv->device_attr.orig_attr.max_qp_wr);
974 DEBUG("priv->device_attr.max_sge is %d",
975 priv->device_attr.orig_attr.max_sge);
976 /* Configure VLAN stripping. */
977 tmpl.rxq.vlan_strip = (priv->hw_vlan_strip &&
978 !!dev->data->dev_conf.rxmode.hw_vlan_strip);
979 attr.wq = (struct ibv_wq_init_attr){
980 .wq_context = NULL, /* Could be useful in the future. */
981 .wq_type = IBV_WQT_RQ,
982 /* Max number of outstanding WRs. */
983 .max_wr = desc >> tmpl.rxq.sges_n,
984 /* Max number of scatter/gather elements in a WR. */
985 .max_sge = 1 << tmpl.rxq.sges_n,
989 IBV_WQ_FLAGS_CVLAN_STRIPPING |
991 .create_flags = (tmpl.rxq.vlan_strip ?
992 IBV_WQ_FLAGS_CVLAN_STRIPPING :
995 /* By default, FCS (CRC) is stripped by hardware. */
996 if (dev->data->dev_conf.rxmode.hw_strip_crc) {
997 tmpl.rxq.crc_present = 0;
998 } else if (priv->hw_fcs_strip) {
999 /* Ask HW/Verbs to leave CRC in place when supported. */
1000 attr.wq.create_flags |= IBV_WQ_FLAGS_SCATTER_FCS;
1001 attr.wq.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
1002 tmpl.rxq.crc_present = 1;
1004 WARN("%p: CRC stripping has been disabled but will still"
1005 " be performed by hardware, make sure MLNX_OFED and"
1006 " firmware are up to date",
1008 tmpl.rxq.crc_present = 0;
1010 DEBUG("%p: CRC stripping is %s, %u bytes will be subtracted from"
1011 " incoming frames to hide it",
1013 tmpl.rxq.crc_present ? "disabled" : "enabled",
1014 tmpl.rxq.crc_present << 2);
1015 #ifdef HAVE_IBV_WQ_FLAG_RX_END_PADDING
1016 if (!mlx5_getenv_int("MLX5_PMD_ENABLE_PADDING"))
1017 ; /* Nothing else to do. */
1018 else if (priv->hw_padding) {
1019 INFO("%p: enabling packet padding on queue %p",
1020 (void *)dev, (void *)rxq_ctrl);
1021 attr.wq.create_flags |= IBV_WQ_FLAG_RX_END_PADDING;
1022 attr.wq.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
1024 WARN("%p: packet padding has been requested but is not"
1025 " supported, make sure MLNX_OFED and firmware are"
1030 tmpl.wq = ibv_create_wq(priv->ctx, &attr.wq);
1031 if (tmpl.wq == NULL) {
1032 ret = (errno ? errno : EINVAL);
1033 ERROR("%p: WQ creation failure: %s",
1034 (void *)dev, strerror(ret));
1038 * Make sure number of WRs*SGEs match expectations since a queue
1039 * cannot allocate more than "desc" buffers.
1041 if (((int)attr.wq.max_wr != (desc >> tmpl.rxq.sges_n)) ||
1042 ((int)attr.wq.max_sge != (1 << tmpl.rxq.sges_n))) {
1043 ERROR("%p: requested %u*%u but got %u*%u WRs*SGEs",
1045 (desc >> tmpl.rxq.sges_n), (1 << tmpl.rxq.sges_n),
1046 attr.wq.max_wr, attr.wq.max_sge);
1051 tmpl.rxq.port_id = dev->data->port_id;
1052 DEBUG("%p: RTE port ID: %u", (void *)rxq_ctrl, tmpl.rxq.port_id);
1053 /* Change queue state to ready. */
1054 mod = (struct ibv_wq_attr){
1055 .attr_mask = IBV_WQ_ATTR_STATE,
1056 .wq_state = IBV_WQS_RDY,
1058 ret = ibv_modify_wq(tmpl.wq, &mod);
1060 ERROR("%p: WQ state to IBV_WQS_RDY failed: %s",
1061 (void *)dev, strerror(ret));
1064 ret = rxq_setup(&tmpl);
1066 ERROR("%p: cannot initialize RX queue structure: %s",
1067 (void *)dev, strerror(ret));
1070 ret = rxq_alloc_elts(&tmpl, desc);
1072 ERROR("%p: RXQ allocation failed: %s",
1073 (void *)dev, strerror(ret));
1076 /* Clean up rxq in case we're reinitializing it. */
1077 DEBUG("%p: cleaning-up old rxq just in case", (void *)rxq_ctrl);
1078 mlx5_rxq_cleanup(rxq_ctrl);
1079 /* Move mbuf pointers to dedicated storage area in RX queue. */
1080 elts = (void *)(rxq_ctrl + 1);
1081 rte_memcpy(elts, tmpl.rxq.elts, sizeof(*elts));
1083 memset(tmpl.rxq.elts, 0x55, sizeof(*elts));
1085 rte_free(tmpl.rxq.elts);
1086 tmpl.rxq.elts = elts;
1088 /* Update doorbell counter. */
1089 rxq_ctrl->rxq.rq_ci = desc >> rxq_ctrl->rxq.sges_n;
1091 *rxq_ctrl->rxq.rq_db = rte_cpu_to_be_32(rxq_ctrl->rxq.rq_ci);
1092 DEBUG("%p: rxq updated with %p", (void *)rxq_ctrl, (void *)&tmpl);
1096 elts = tmpl.rxq.elts;
1097 mlx5_rxq_cleanup(&tmpl);
1104 * DPDK callback to configure a RX queue.
1107 * Pointer to Ethernet device structure.
1111 * Number of descriptors to configure in queue.
1113 * NUMA socket on which memory must be allocated.
1115 * Thresholds parameters.
1117 * Memory pool for buffer allocations.
1120 * 0 on success, negative errno value on failure.
1123 mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1124 unsigned int socket, const struct rte_eth_rxconf *conf,
1125 struct rte_mempool *mp)
1127 struct priv *priv = dev->data->dev_private;
1128 struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
1129 struct mlx5_rxq_ctrl *rxq_ctrl =
1130 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
1131 const uint16_t desc_n =
1132 desc + priv->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
1135 if (mlx5_is_secondary())
1136 return -E_RTE_SECONDARY;
1139 if (!rte_is_power_of_2(desc)) {
1140 desc = 1 << log2above(desc);
1141 WARN("%p: increased number of descriptors in RX queue %u"
1142 " to the next power of two (%d)",
1143 (void *)dev, idx, desc);
1145 DEBUG("%p: configuring queue %u for %u descriptors",
1146 (void *)dev, idx, desc);
1147 if (idx >= priv->rxqs_n) {
1148 ERROR("%p: queue index out of range (%u >= %u)",
1149 (void *)dev, idx, priv->rxqs_n);
1154 DEBUG("%p: reusing already allocated queue index %u (%p)",
1155 (void *)dev, idx, (void *)rxq);
1156 if (dev->data->dev_started) {
1160 (*priv->rxqs)[idx] = NULL;
1161 mlx5_rxq_cleanup(rxq_ctrl);
1162 /* Resize if rxq size is changed. */
1163 if (rxq_ctrl->rxq.elts_n != log2above(desc)) {
1164 rxq_ctrl = rte_realloc(rxq_ctrl,
1165 sizeof(*rxq_ctrl) + desc_n *
1166 sizeof(struct rte_mbuf *),
1167 RTE_CACHE_LINE_SIZE);
1169 ERROR("%p: unable to reallocate queue index %u",
1176 rxq_ctrl = rte_calloc_socket("RXQ", 1, sizeof(*rxq_ctrl) +
1178 sizeof(struct rte_mbuf *),
1180 if (rxq_ctrl == NULL) {
1181 ERROR("%p: unable to allocate queue index %u",
1187 ret = rxq_ctrl_setup(dev, rxq_ctrl, desc, socket, conf, mp);
1191 rxq_ctrl->rxq.stats.idx = idx;
1192 DEBUG("%p: adding RX queue %p to list",
1193 (void *)dev, (void *)rxq_ctrl);
1194 (*priv->rxqs)[idx] = &rxq_ctrl->rxq;
1201 * DPDK callback to release a RX queue.
1204 * Generic RX queue pointer.
1207 mlx5_rx_queue_release(void *dpdk_rxq)
1209 struct mlx5_rxq_data *rxq = (struct mlx5_rxq_data *)dpdk_rxq;
1210 struct mlx5_rxq_ctrl *rxq_ctrl;
1214 if (mlx5_is_secondary())
1219 rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);
1220 priv = rxq_ctrl->priv;
1222 if (priv_flow_rxq_in_use(priv, rxq))
1223 rte_panic("Rx queue %p is still used by a flow and cannot be"
1224 " removed\n", (void *)rxq_ctrl);
1225 for (i = 0; (i != priv->rxqs_n); ++i)
1226 if ((*priv->rxqs)[i] == rxq) {
1227 DEBUG("%p: removing RX queue %p from list",
1228 (void *)priv->dev, (void *)rxq_ctrl);
1229 (*priv->rxqs)[i] = NULL;
1232 mlx5_rxq_cleanup(rxq_ctrl);
1238 * Allocate queue vector and fill epoll fd list for Rx interrupts.
1241 * Pointer to private structure.
1244 * 0 on success, negative on failure.
1247 priv_rx_intr_vec_enable(struct priv *priv)
1250 unsigned int rxqs_n = priv->rxqs_n;
1251 unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
1252 unsigned int count = 0;
1253 struct rte_intr_handle *intr_handle = priv->dev->intr_handle;
1255 assert(!mlx5_is_secondary());
1256 if (!priv->dev->data->dev_conf.intr_conf.rxq)
1258 priv_rx_intr_vec_disable(priv);
1259 intr_handle->intr_vec = malloc(sizeof(intr_handle->intr_vec[rxqs_n]));
1260 if (intr_handle->intr_vec == NULL) {
1261 ERROR("failed to allocate memory for interrupt vector,"
1262 " Rx interrupts will not be supported");
1265 intr_handle->type = RTE_INTR_HANDLE_EXT;
1266 for (i = 0; i != n; ++i) {
1267 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1268 struct mlx5_rxq_ctrl *rxq_ctrl =
1269 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
1274 /* Skip queues that cannot request interrupts. */
1275 if (!rxq || !rxq_ctrl->channel) {
1276 /* Use invalid intr_vec[] index to disable entry. */
1277 intr_handle->intr_vec[i] =
1278 RTE_INTR_VEC_RXTX_OFFSET +
1279 RTE_MAX_RXTX_INTR_VEC_ID;
1282 if (count >= RTE_MAX_RXTX_INTR_VEC_ID) {
1283 ERROR("too many Rx queues for interrupt vector size"
1284 " (%d), Rx interrupts cannot be enabled",
1285 RTE_MAX_RXTX_INTR_VEC_ID);
1286 priv_rx_intr_vec_disable(priv);
1289 fd = rxq_ctrl->channel->fd;
1290 flags = fcntl(fd, F_GETFL);
1291 rc = fcntl(fd, F_SETFL, flags | O_NONBLOCK);
1293 ERROR("failed to make Rx interrupt file descriptor"
1294 " %d non-blocking for queue index %d", fd, i);
1295 priv_rx_intr_vec_disable(priv);
1298 intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count;
1299 intr_handle->efds[count] = fd;
1303 priv_rx_intr_vec_disable(priv);
1305 intr_handle->nb_efd = count;
1310 * Clean up Rx interrupts handler.
1313 * Pointer to private structure.
1316 priv_rx_intr_vec_disable(struct priv *priv)
1318 struct rte_intr_handle *intr_handle = priv->dev->intr_handle;
1320 rte_intr_free_epoll_fd(intr_handle);
1321 free(intr_handle->intr_vec);
1322 intr_handle->nb_efd = 0;
1323 intr_handle->intr_vec = NULL;
1327 * MLX5 CQ notification .
1330 * Pointer to receive queue structure.
1332 * Sequence number per receive queue .
1335 mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq)
1338 uint32_t doorbell_hi;
1340 void *cq_db_reg = (char *)rxq->cq_uar + MLX5_CQ_DOORBELL;
1342 sq_n = sq_n_rxq & MLX5_CQ_SQN_MASK;
1343 doorbell_hi = sq_n << MLX5_CQ_SQN_OFFSET | (rxq->cq_ci & MLX5_CI_MASK);
1344 doorbell = (uint64_t)doorbell_hi << 32;
1345 doorbell |= rxq->cqn;
1346 rxq->cq_db[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi);
1348 rte_write64(rte_cpu_to_be_64(doorbell), cq_db_reg);
1352 * DPDK callback for Rx queue interrupt enable.
1355 * Pointer to Ethernet device structure.
1356 * @param rx_queue_id
1360 * 0 on success, negative on failure.
1363 mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1365 struct priv *priv = mlx5_get_priv(dev);
1366 struct mlx5_rxq_data *rxq = (*priv->rxqs)[rx_queue_id];
1367 struct mlx5_rxq_ctrl *rxq_ctrl =
1368 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
1371 if (!rxq || !rxq_ctrl->channel) {
1374 mlx5_arm_cq(rxq, rxq->cq_arm_sn);
1377 WARN("unable to arm interrupt on rx queue %d", rx_queue_id);
1382 * DPDK callback for Rx queue interrupt disable.
1385 * Pointer to Ethernet device structure.
1386 * @param rx_queue_id
1390 * 0 on success, negative on failure.
1393 mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1395 struct priv *priv = mlx5_get_priv(dev);
1396 struct mlx5_rxq_data *rxq = (*priv->rxqs)[rx_queue_id];
1397 struct mlx5_rxq_ctrl *rxq_ctrl =
1398 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
1399 struct ibv_cq *ev_cq;
1403 if (!rxq || !rxq_ctrl->channel) {
1406 ret = ibv_get_cq_event(rxq_ctrl->cq->channel, &ev_cq, &ev_ctx);
1408 if (ret || ev_cq != rxq_ctrl->cq)
1412 WARN("unable to disable interrupt on rx queue %d",
1415 ibv_ack_cq_events(rxq_ctrl->cq, 1);