4 * Copyright 2017 6WIND S.A.
5 * Copyright 2017 Mellanox
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Rx queues configuration for mlx4 driver.
45 /* Verbs headers do not support -pedantic. */
47 #pragma GCC diagnostic ignored "-Wpedantic"
49 #include <infiniband/mlx4dv.h>
50 #include <infiniband/verbs.h>
52 #pragma GCC diagnostic error "-Wpedantic"
55 #include <rte_byteorder.h>
56 #include <rte_common.h>
57 #include <rte_errno.h>
58 #include <rte_ethdev.h>
60 #include <rte_malloc.h>
62 #include <rte_mempool.h>
65 #include "mlx4_flow.h"
66 #include "mlx4_rxtx.h"
67 #include "mlx4_utils.h"
70 * Historical RSS hash key.
72 * This used to be the default for mlx4 in Linux before v3.19 switched to
73 * generating random hash keys through netdev_rss_key_fill().
75 * It is used in this PMD for consistency with past DPDK releases but can
76 * now be overridden through user configuration.
78 * Note: this is not const to work around API quirks.
81 mlx4_rss_hash_key_default[MLX4_RSS_HASH_KEY_SIZE] = {
82 0x2c, 0xc6, 0x81, 0xd1,
83 0x5b, 0xdb, 0xf4, 0xf7,
84 0xfc, 0xa2, 0x83, 0x19,
85 0xdb, 0x1a, 0x3e, 0x94,
86 0x6b, 0x9e, 0x38, 0xd9,
87 0x2c, 0x9c, 0x03, 0xd1,
88 0xad, 0x99, 0x44, 0xa7,
89 0xd9, 0x56, 0x3d, 0x59,
90 0x06, 0x3c, 0x25, 0xf3,
91 0xfc, 0x1f, 0xdc, 0x2a,
95 * Obtain a RSS context with specified properties.
97 * Used when creating a flow rule targeting one or several Rx queues.
99 * If a matching RSS context already exists, it is returned with its
100 * reference count incremented.
103 * Pointer to private structure.
105 * Fields for RSS processing (Verbs format).
107 * Hash key to use (whose size is exactly MLX4_RSS_HASH_KEY_SIZE).
109 * Number of target queues.
110 * @param[in] queue_id
114 * Pointer to RSS context on success, NULL otherwise and rte_errno is set.
117 mlx4_rss_get(struct priv *priv, uint64_t fields,
118 uint8_t key[MLX4_RSS_HASH_KEY_SIZE],
119 uint16_t queues, const uint16_t queue_id[])
121 struct mlx4_rss *rss;
122 size_t queue_id_size = sizeof(queue_id[0]) * queues;
124 LIST_FOREACH(rss, &priv->rss, next)
125 if (fields == rss->fields &&
126 queues == rss->queues &&
127 !memcmp(key, rss->key, MLX4_RSS_HASH_KEY_SIZE) &&
128 !memcmp(queue_id, rss->queue_id, queue_id_size)) {
132 rss = rte_malloc(__func__, offsetof(struct mlx4_rss, queue_id) +
136 *rss = (struct mlx4_rss){
145 memcpy(rss->key, key, MLX4_RSS_HASH_KEY_SIZE);
146 memcpy(rss->queue_id, queue_id, queue_id_size);
147 LIST_INSERT_HEAD(&priv->rss, rss, next);
155 * Release a RSS context instance.
157 * Used when destroying a flow rule targeting one or several Rx queues.
159 * This function decrements the reference count of the context and destroys
160 * it after reaching 0. The context must have no users at this point; all
161 * prior calls to mlx4_rss_attach() must have been followed by matching
162 * calls to mlx4_rss_detach().
165 * RSS context to release.
167 void mlx4_rss_put(struct mlx4_rss *rss)
172 assert(!rss->usecnt);
175 LIST_REMOVE(rss, next);
180 * Attach a user to a RSS context instance.
182 * Used when the RSS QP and indirection table objects must be instantiated,
183 * that is, when a flow rule must be enabled.
185 * This function increments the usage count of the context.
188 * RSS context to attach to.
191 * 0 on success, a negative errno value otherwise and rte_errno is set.
193 int mlx4_rss_attach(struct mlx4_rss *rss)
202 struct ibv_wq *ind_tbl[rss->queues];
203 struct priv *priv = rss->priv;
208 if (!rte_is_power_of_2(RTE_DIM(ind_tbl))) {
210 msg = "number of RSS queues must be a power of two";
213 for (i = 0; i != RTE_DIM(ind_tbl); ++i) {
214 uint16_t id = rss->queue_id[i];
215 struct rxq *rxq = NULL;
217 if (id < priv->dev->data->nb_rx_queues)
218 rxq = priv->dev->data->rx_queues[id];
221 msg = "RSS target queue is not configured";
224 ret = mlx4_rxq_attach(rxq);
227 msg = "unable to attach RSS target queue";
230 ind_tbl[i] = rxq->wq;
232 rss->ind = ibv_create_rwq_ind_table
234 &(struct ibv_rwq_ind_table_init_attr){
235 .log_ind_tbl_size = rte_log2_u32(RTE_DIM(ind_tbl)),
240 ret = errno ? errno : EINVAL;
241 msg = "RSS indirection table creation failure";
244 rss->qp = ibv_create_qp_ex
246 &(struct ibv_qp_init_attr_ex){
247 .comp_mask = (IBV_QP_INIT_ATTR_PD |
248 IBV_QP_INIT_ATTR_RX_HASH |
249 IBV_QP_INIT_ATTR_IND_TABLE),
250 .qp_type = IBV_QPT_RAW_PACKET,
252 .rwq_ind_tbl = rss->ind,
254 .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,
255 .rx_hash_key_len = MLX4_RSS_HASH_KEY_SIZE,
256 .rx_hash_key = rss->key,
257 .rx_hash_fields_mask = rss->fields,
261 ret = errno ? errno : EINVAL;
262 msg = "RSS hash QP creation failure";
267 &(struct ibv_qp_attr){
268 .qp_state = IBV_QPS_INIT,
269 .port_num = priv->port,
271 IBV_QP_STATE | IBV_QP_PORT);
273 msg = "failed to switch RSS hash QP to INIT state";
278 &(struct ibv_qp_attr){
279 .qp_state = IBV_QPS_RTR,
283 msg = "failed to switch RSS hash QP to RTR state";
289 claim_zero(ibv_destroy_qp(rss->qp));
293 claim_zero(ibv_destroy_rwq_ind_table(rss->ind));
297 mlx4_rxq_detach(priv->dev->data->rx_queues[rss->queue_id[i]]);
298 ERROR("mlx4: %s", msg);
305 * Detach a user from a RSS context instance.
307 * Used when disabling (not destroying) a flow rule.
309 * This function decrements the usage count of the context and destroys
310 * usage resources after reaching 0.
313 * RSS context to detach from.
315 void mlx4_rss_detach(struct mlx4_rss *rss)
317 struct priv *priv = rss->priv;
325 claim_zero(ibv_destroy_qp(rss->qp));
327 claim_zero(ibv_destroy_rwq_ind_table(rss->ind));
329 for (i = 0; i != rss->queues; ++i)
330 mlx4_rxq_detach(priv->dev->data->rx_queues[rss->queue_id[i]]);
334 * Initialize common RSS context resources.
336 * Because ConnectX-3 hardware limitations require a fixed order in the
337 * indirection table, WQs must be allocated sequentially to be part of a
338 * common RSS context.
340 * Since a newly created WQ cannot be moved to a different context, this
341 * function allocates them all at once, one for each configured Rx queue,
342 * as well as all related resources (CQs and mbufs).
344 * This must therefore be done before creating any Rx flow rules relying on
345 * indirection tables.
348 * Pointer to private structure.
351 * 0 on success, a negative errno value otherwise and rte_errno is set.
354 mlx4_rss_init(struct priv *priv)
356 struct rte_eth_dev *dev = priv->dev;
357 uint8_t log2_range = rte_log2_u32(dev->data->nb_rx_queues);
358 uint32_t wq_num_prev = 0;
363 /* Prepare range for RSS contexts before creating the first WQ. */
364 ret = mlx4dv_set_context_attr(priv->ctx,
365 MLX4DV_SET_CTX_ATTR_LOG_WQS_RANGE_SZ,
368 ERROR("cannot set up range size for RSS context to %u"
369 " (for %u Rx queues), error: %s",
370 1 << log2_range, dev->data->nb_rx_queues, strerror(ret));
374 for (i = 0; i != priv->dev->data->nb_rx_queues; ++i) {
375 struct rxq *rxq = priv->dev->data->rx_queues[i];
380 /* Attach the configured Rx queues. */
382 assert(!rxq->usecnt);
383 ret = mlx4_rxq_attach(rxq);
385 wq_num = rxq->wq->wq_num;
389 msg = "unable to create Rx queue resources";
393 * WQs are temporarily allocated for unconfigured Rx queues
394 * to maintain proper index alignment in indirection table
395 * by skipping unused WQ numbers.
397 * The reason this works at all even though these WQs are
398 * immediately destroyed is that WQNs are allocated
399 * sequentially and are guaranteed to never be reused in the
400 * same context by the underlying implementation.
402 cq = ibv_create_cq(priv->ctx, 1, NULL, NULL, 0);
405 msg = "placeholder CQ creation failure";
410 &(struct ibv_wq_init_attr){
411 .wq_type = IBV_WQT_RQ,
419 claim_zero(ibv_destroy_wq(wq));
421 wq_num = 0; /* Shut up GCC 4.8 warnings. */
423 claim_zero(ibv_destroy_cq(cq));
426 msg = "placeholder WQ creation failure";
431 * While guaranteed by the implementation, make sure WQ
432 * numbers are really sequential (as the saying goes,
433 * trust, but verify).
435 if (i && wq_num - wq_num_prev != 1) {
437 mlx4_rxq_detach(rxq);
439 msg = "WQ numbers are not sequential";
442 wq_num_prev = wq_num;
446 ERROR("cannot initialize common RSS resources (queue %u): %s: %s",
447 i, msg, strerror(ret));
449 struct rxq *rxq = priv->dev->data->rx_queues[i];
452 mlx4_rxq_detach(rxq);
459 * Release common RSS context resources.
461 * As the reverse of mlx4_rss_init(), this must be done after removing all
462 * flow rules relying on indirection tables.
465 * Pointer to private structure.
468 mlx4_rss_deinit(struct priv *priv)
472 for (i = 0; i != priv->dev->data->nb_rx_queues; ++i) {
473 struct rxq *rxq = priv->dev->data->rx_queues[i];
476 assert(rxq->usecnt == 1);
477 mlx4_rxq_detach(rxq);
483 * Attach a user to a Rx queue.
485 * Used when the resources of an Rx queue must be instantiated for it to
486 * become in a usable state.
488 * This function increments the usage count of the Rx queue.
491 * Pointer to Rx queue structure.
494 * 0 on success, negative errno value otherwise and rte_errno is set.
497 mlx4_rxq_attach(struct rxq *rxq)
507 struct priv *priv = rxq->priv;
508 const uint32_t elts_n = 1 << rxq->elts_n;
509 const uint32_t sges_n = 1 << rxq->sges_n;
510 struct rte_mbuf *(*elts)[elts_n] = rxq->elts;
511 struct mlx4dv_obj mlxdv;
512 struct mlx4dv_rwq dv_rwq;
513 struct mlx4dv_cq dv_cq;
515 struct ibv_cq *cq = NULL;
516 struct ibv_wq *wq = NULL;
517 volatile struct mlx4_wqe_data_seg (*wqes)[];
521 assert(rte_is_power_of_2(elts_n));
522 cq = ibv_create_cq(priv->ctx, elts_n / sges_n, NULL, rxq->channel, 0);
525 msg = "CQ creation failure";
530 &(struct ibv_wq_init_attr){
531 .wq_type = IBV_WQT_RQ,
532 .max_wr = elts_n / sges_n,
538 ret = errno ? errno : EINVAL;
539 msg = "WQ creation failure";
544 &(struct ibv_wq_attr){
545 .attr_mask = IBV_WQ_ATTR_STATE,
546 .wq_state = IBV_WQS_RDY,
549 msg = "WQ state change to IBV_WQS_RDY failed";
552 /* Retrieve device queue information. */
554 mlxdv.cq.out = &dv_cq;
556 mlxdv.rwq.out = &dv_rwq;
557 ret = mlx4dv_init_obj(&mlxdv, MLX4DV_OBJ_RWQ | MLX4DV_OBJ_CQ);
559 msg = "failed to obtain device information from WQ/CQ objects";
562 wqes = (volatile struct mlx4_wqe_data_seg (*)[])
563 ((uintptr_t)dv_rwq.buf.buf + dv_rwq.rq.offset);
564 for (i = 0; i != RTE_DIM(*elts); ++i) {
565 volatile struct mlx4_wqe_data_seg *scat = &(*wqes)[i];
566 struct rte_mbuf *buf = rte_pktmbuf_alloc(rxq->mp);
570 rte_pktmbuf_free_seg((*elts)[i]);
574 msg = "cannot allocate mbuf";
577 /* Headroom is reserved by rte_pktmbuf_alloc(). */
578 assert(buf->data_off == RTE_PKTMBUF_HEADROOM);
579 /* Buffer is supposed to be empty. */
580 assert(rte_pktmbuf_data_len(buf) == 0);
581 assert(rte_pktmbuf_pkt_len(buf) == 0);
582 /* Only the first segment keeps headroom. */
585 buf->port = rxq->port_id;
586 buf->data_len = rte_pktmbuf_tailroom(buf);
587 buf->pkt_len = rte_pktmbuf_tailroom(buf);
589 *scat = (struct mlx4_wqe_data_seg){
590 .addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf,
592 .byte_count = rte_cpu_to_be_32(buf->data_len),
593 .lkey = rte_cpu_to_be_32(rxq->mr->lkey),
597 DEBUG("%p: allocated and configured %u segments (max %u packets)",
598 (void *)rxq, elts_n, elts_n / sges_n);
602 rxq->rq_db = dv_rwq.rdb;
603 rxq->mcq.buf = dv_cq.buf.buf;
604 rxq->mcq.cqe_cnt = dv_cq.cqe_cnt;
605 rxq->mcq.set_ci_db = dv_cq.set_ci_db;
606 rxq->mcq.cqe_64 = (dv_cq.cqe_size & 64) ? 1 : 0;
607 /* Update doorbell counter. */
608 rxq->rq_ci = elts_n / sges_n;
610 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
614 claim_zero(ibv_destroy_wq(wq));
616 claim_zero(ibv_destroy_cq(cq));
618 ERROR("error while attaching Rx queue %p: %s: %s",
619 (void *)rxq, msg, strerror(ret));
624 * Detach a user from a Rx queue.
626 * This function decrements the usage count of the Rx queue and destroys
627 * usage resources after reaching 0.
630 * Pointer to Rx queue structure.
633 mlx4_rxq_detach(struct rxq *rxq)
636 struct rte_mbuf *(*elts)[1 << rxq->elts_n] = rxq->elts;
641 memset(&rxq->mcq, 0, sizeof(rxq->mcq));
644 claim_zero(ibv_destroy_wq(rxq->wq));
646 claim_zero(ibv_destroy_cq(rxq->cq));
648 DEBUG("%p: freeing Rx queue elements", (void *)rxq);
649 for (i = 0; (i != RTE_DIM(*elts)); ++i) {
652 rte_pktmbuf_free_seg((*elts)[i]);
658 * DPDK callback to configure a Rx queue.
661 * Pointer to Ethernet device structure.
665 * Number of descriptors to configure in queue.
667 * NUMA socket on which memory must be allocated.
669 * Thresholds parameters.
671 * Memory pool for buffer allocations.
674 * 0 on success, negative errno value otherwise and rte_errno is set.
677 mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
678 unsigned int socket, const struct rte_eth_rxconf *conf,
679 struct rte_mempool *mp)
681 struct priv *priv = dev->data->dev_private;
682 uint32_t mb_len = rte_pktmbuf_data_room_size(mp);
683 struct rte_mbuf *(*elts)[rte_align32pow2(desc)];
685 struct mlx4_malloc_vec vec[] = {
687 .align = RTE_CACHE_LINE_SIZE,
688 .size = sizeof(*rxq),
689 .addr = (void **)&rxq,
692 .align = RTE_CACHE_LINE_SIZE,
693 .size = sizeof(*elts),
694 .addr = (void **)&elts,
699 (void)conf; /* Thresholds configuration (ignored). */
700 DEBUG("%p: configuring queue %u for %u descriptors",
701 (void *)dev, idx, desc);
702 if (idx >= dev->data->nb_rx_queues) {
703 rte_errno = EOVERFLOW;
704 ERROR("%p: queue index out of range (%u >= %u)",
705 (void *)dev, idx, dev->data->nb_rx_queues);
708 rxq = dev->data->rx_queues[idx];
711 ERROR("%p: Rx queue %u already configured, release it first",
717 ERROR("%p: invalid number of Rx descriptors", (void *)dev);
720 if (desc != RTE_DIM(*elts)) {
721 desc = RTE_DIM(*elts);
722 WARN("%p: increased number of descriptors in Rx queue %u"
723 " to the next power of two (%u)",
724 (void *)dev, idx, desc);
726 /* Allocate and initialize Rx queue. */
727 mlx4_zmallocv_socket("RXQ", vec, RTE_DIM(vec), socket);
729 ERROR("%p: unable to allocate queue index %u",
736 .port_id = dev->data->port_id,
738 .elts_n = rte_log2_u32(desc),
740 /* Toggle Rx checksum offload if hardware supports it. */
741 .csum = (priv->hw_csum &&
742 dev->data->dev_conf.rxmode.hw_ip_checksum),
743 .csum_l2tun = (priv->hw_csum_l2tun &&
744 dev->data->dev_conf.rxmode.hw_ip_checksum),
750 /* Enable scattered packets support for this queue if necessary. */
751 assert(mb_len >= RTE_PKTMBUF_HEADROOM);
752 if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
753 (mb_len - RTE_PKTMBUF_HEADROOM)) {
755 } else if (dev->data->dev_conf.rxmode.enable_scatter) {
757 RTE_PKTMBUF_HEADROOM +
758 dev->data->dev_conf.rxmode.max_rx_pkt_len;
762 * Determine the number of SGEs needed for a full packet
763 * and round it to the next power of two.
765 sges_n = rte_log2_u32((size / mb_len) + !!(size % mb_len));
766 rxq->sges_n = sges_n;
767 /* Make sure sges_n did not overflow. */
768 size = mb_len * (1 << rxq->sges_n);
769 size -= RTE_PKTMBUF_HEADROOM;
770 if (size < dev->data->dev_conf.rxmode.max_rx_pkt_len) {
771 rte_errno = EOVERFLOW;
772 ERROR("%p: too many SGEs (%u) needed to handle"
773 " requested maximum packet size %u",
776 dev->data->dev_conf.rxmode.max_rx_pkt_len);
780 WARN("%p: the requested maximum Rx packet size (%u) is"
781 " larger than a single mbuf (%u) and scattered"
782 " mode has not been requested",
784 dev->data->dev_conf.rxmode.max_rx_pkt_len,
785 mb_len - RTE_PKTMBUF_HEADROOM);
787 DEBUG("%p: maximum number of segments per packet: %u",
788 (void *)dev, 1 << rxq->sges_n);
789 if (desc % (1 << rxq->sges_n)) {
791 ERROR("%p: number of Rx queue descriptors (%u) is not a"
792 " multiple of maximum segments per packet (%u)",
798 /* Use the entire Rx mempool as the memory region. */
799 rxq->mr = mlx4_mp2mr(priv->pd, mp);
802 ERROR("%p: MR creation failure: %s",
803 (void *)dev, strerror(rte_errno));
806 if (dev->data->dev_conf.intr_conf.rxq) {
807 rxq->channel = ibv_create_comp_channel(priv->ctx);
808 if (rxq->channel == NULL) {
810 ERROR("%p: Rx interrupt completion channel creation"
812 (void *)dev, strerror(rte_errno));
815 if (mlx4_fd_set_non_blocking(rxq->channel->fd) < 0) {
816 ERROR("%p: unable to make Rx interrupt completion"
817 " channel non-blocking: %s",
818 (void *)dev, strerror(rte_errno));
822 DEBUG("%p: adding Rx queue %p to list", (void *)dev, (void *)rxq);
823 dev->data->rx_queues[idx] = rxq;
826 dev->data->rx_queues[idx] = NULL;
828 mlx4_rx_queue_release(rxq);
830 assert(rte_errno > 0);
835 * DPDK callback to release a Rx queue.
838 * Generic Rx queue pointer.
841 mlx4_rx_queue_release(void *dpdk_rxq)
843 struct rxq *rxq = (struct rxq *)dpdk_rxq;
850 for (i = 0; i != priv->dev->data->nb_rx_queues; ++i)
851 if (priv->dev->data->rx_queues[i] == rxq) {
852 DEBUG("%p: removing Rx queue %p from list",
853 (void *)priv->dev, (void *)rxq);
854 priv->dev->data->rx_queues[i] = NULL;
862 claim_zero(ibv_destroy_comp_channel(rxq->channel));
864 claim_zero(ibv_dereg_mr(rxq->mr));