1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2017 6WIND S.A.
3 * Copyright 2017 Mellanox Technologies, Ltd
8 * Rx queues configuration for mlx4 driver.
17 /* Verbs headers do not support -pedantic. */
19 #pragma GCC diagnostic ignored "-Wpedantic"
21 #include <infiniband/mlx4dv.h>
22 #include <infiniband/verbs.h>
24 #pragma GCC diagnostic error "-Wpedantic"
27 #include <rte_byteorder.h>
28 #include <rte_common.h>
29 #include <rte_errno.h>
30 #include <rte_ethdev_driver.h>
32 #include <rte_malloc.h>
34 #include <rte_mempool.h>
37 #include "mlx4_glue.h"
38 #include "mlx4_flow.h"
39 #include "mlx4_rxtx.h"
40 #include "mlx4_utils.h"
43 * Historical RSS hash key.
45 * This used to be the default for mlx4 in Linux before v3.19 switched to
46 * generating random hash keys through netdev_rss_key_fill().
48 * It is used in this PMD for consistency with past DPDK releases but can
49 * now be overridden through user configuration.
51 * Note: this is not const to work around API quirks.
54 mlx4_rss_hash_key_default[MLX4_RSS_HASH_KEY_SIZE] = {
55 0x2c, 0xc6, 0x81, 0xd1,
56 0x5b, 0xdb, 0xf4, 0xf7,
57 0xfc, 0xa2, 0x83, 0x19,
58 0xdb, 0x1a, 0x3e, 0x94,
59 0x6b, 0x9e, 0x38, 0xd9,
60 0x2c, 0x9c, 0x03, 0xd1,
61 0xad, 0x99, 0x44, 0xa7,
62 0xd9, 0x56, 0x3d, 0x59,
63 0x06, 0x3c, 0x25, 0xf3,
64 0xfc, 0x1f, 0xdc, 0x2a,
68 * Obtain a RSS context with specified properties.
70 * Used when creating a flow rule targeting one or several Rx queues.
72 * If a matching RSS context already exists, it is returned with its
73 * reference count incremented.
76 * Pointer to private structure.
78 * Fields for RSS processing (Verbs format).
80 * Hash key to use (whose size is exactly MLX4_RSS_HASH_KEY_SIZE).
82 * Number of target queues.
87 * Pointer to RSS context on success, NULL otherwise and rte_errno is set.
90 mlx4_rss_get(struct priv *priv, uint64_t fields,
91 const uint8_t key[MLX4_RSS_HASH_KEY_SIZE],
92 uint16_t queues, const uint16_t queue_id[])
95 size_t queue_id_size = sizeof(queue_id[0]) * queues;
97 LIST_FOREACH(rss, &priv->rss, next)
98 if (fields == rss->fields &&
99 queues == rss->queues &&
100 !memcmp(key, rss->key, MLX4_RSS_HASH_KEY_SIZE) &&
101 !memcmp(queue_id, rss->queue_id, queue_id_size)) {
105 rss = rte_malloc(__func__, offsetof(struct mlx4_rss, queue_id) +
109 *rss = (struct mlx4_rss){
118 memcpy(rss->key, key, MLX4_RSS_HASH_KEY_SIZE);
119 memcpy(rss->queue_id, queue_id, queue_id_size);
120 LIST_INSERT_HEAD(&priv->rss, rss, next);
128 * Release a RSS context instance.
130 * Used when destroying a flow rule targeting one or several Rx queues.
132 * This function decrements the reference count of the context and destroys
133 * it after reaching 0. The context must have no users at this point; all
134 * prior calls to mlx4_rss_attach() must have been followed by matching
135 * calls to mlx4_rss_detach().
138 * RSS context to release.
141 mlx4_rss_put(struct mlx4_rss *rss)
146 assert(!rss->usecnt);
149 LIST_REMOVE(rss, next);
154 * Attach a user to a RSS context instance.
156 * Used when the RSS QP and indirection table objects must be instantiated,
157 * that is, when a flow rule must be enabled.
159 * This function increments the usage count of the context.
162 * RSS context to attach to.
165 * 0 on success, a negative errno value otherwise and rte_errno is set.
168 mlx4_rss_attach(struct mlx4_rss *rss)
177 struct ibv_wq *ind_tbl[rss->queues];
178 struct priv *priv = rss->priv;
183 if (!rte_is_power_of_2(RTE_DIM(ind_tbl))) {
185 msg = "number of RSS queues must be a power of two";
188 for (i = 0; i != RTE_DIM(ind_tbl); ++i) {
189 uint16_t id = rss->queue_id[i];
190 struct rxq *rxq = NULL;
192 if (id < priv->dev->data->nb_rx_queues)
193 rxq = priv->dev->data->rx_queues[id];
196 msg = "RSS target queue is not configured";
199 ret = mlx4_rxq_attach(rxq);
202 msg = "unable to attach RSS target queue";
205 ind_tbl[i] = rxq->wq;
207 rss->ind = mlx4_glue->create_rwq_ind_table
209 &(struct ibv_rwq_ind_table_init_attr){
210 .log_ind_tbl_size = rte_log2_u32(RTE_DIM(ind_tbl)),
215 ret = errno ? errno : EINVAL;
216 msg = "RSS indirection table creation failure";
219 rss->qp = mlx4_glue->create_qp_ex
221 &(struct ibv_qp_init_attr_ex){
222 .comp_mask = (IBV_QP_INIT_ATTR_PD |
223 IBV_QP_INIT_ATTR_RX_HASH |
224 IBV_QP_INIT_ATTR_IND_TABLE),
225 .qp_type = IBV_QPT_RAW_PACKET,
227 .rwq_ind_tbl = rss->ind,
229 .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,
230 .rx_hash_key_len = MLX4_RSS_HASH_KEY_SIZE,
231 .rx_hash_key = rss->key,
232 .rx_hash_fields_mask = rss->fields,
236 ret = errno ? errno : EINVAL;
237 msg = "RSS hash QP creation failure";
240 ret = mlx4_glue->modify_qp
242 &(struct ibv_qp_attr){
243 .qp_state = IBV_QPS_INIT,
244 .port_num = priv->port,
246 IBV_QP_STATE | IBV_QP_PORT);
248 msg = "failed to switch RSS hash QP to INIT state";
251 ret = mlx4_glue->modify_qp
253 &(struct ibv_qp_attr){
254 .qp_state = IBV_QPS_RTR,
258 msg = "failed to switch RSS hash QP to RTR state";
264 claim_zero(mlx4_glue->destroy_qp(rss->qp));
268 claim_zero(mlx4_glue->destroy_rwq_ind_table(rss->ind));
272 mlx4_rxq_detach(priv->dev->data->rx_queues[rss->queue_id[i]]);
273 ERROR("mlx4: %s", msg);
280 * Detach a user from a RSS context instance.
282 * Used when disabling (not destroying) a flow rule.
284 * This function decrements the usage count of the context and destroys
285 * usage resources after reaching 0.
288 * RSS context to detach from.
291 mlx4_rss_detach(struct mlx4_rss *rss)
293 struct priv *priv = rss->priv;
301 claim_zero(mlx4_glue->destroy_qp(rss->qp));
303 claim_zero(mlx4_glue->destroy_rwq_ind_table(rss->ind));
305 for (i = 0; i != rss->queues; ++i)
306 mlx4_rxq_detach(priv->dev->data->rx_queues[rss->queue_id[i]]);
310 * Initialize common RSS context resources.
312 * Because ConnectX-3 hardware limitations require a fixed order in the
313 * indirection table, WQs must be allocated sequentially to be part of a
314 * common RSS context.
316 * Since a newly created WQ cannot be moved to a different context, this
317 * function allocates them all at once, one for each configured Rx queue,
318 * as well as all related resources (CQs and mbufs).
320 * This must therefore be done before creating any Rx flow rules relying on
321 * indirection tables.
324 * Pointer to private structure.
327 * 0 on success, a negative errno value otherwise and rte_errno is set.
330 mlx4_rss_init(struct priv *priv)
332 struct rte_eth_dev *dev = priv->dev;
333 uint8_t log2_range = rte_log2_u32(dev->data->nb_rx_queues);
334 uint32_t wq_num_prev = 0;
341 /* Prepare range for RSS contexts before creating the first WQ. */
342 ret = mlx4_glue->dv_set_context_attr
344 MLX4DV_SET_CTX_ATTR_LOG_WQS_RANGE_SZ,
347 ERROR("cannot set up range size for RSS context to %u"
348 " (for %u Rx queues), error: %s",
349 1 << log2_range, dev->data->nb_rx_queues, strerror(ret));
353 for (i = 0; i != priv->dev->data->nb_rx_queues; ++i) {
354 struct rxq *rxq = priv->dev->data->rx_queues[i];
359 /* Attach the configured Rx queues. */
361 assert(!rxq->usecnt);
362 ret = mlx4_rxq_attach(rxq);
364 wq_num = rxq->wq->wq_num;
368 msg = "unable to create Rx queue resources";
372 * WQs are temporarily allocated for unconfigured Rx queues
373 * to maintain proper index alignment in indirection table
374 * by skipping unused WQ numbers.
376 * The reason this works at all even though these WQs are
377 * immediately destroyed is that WQNs are allocated
378 * sequentially and are guaranteed to never be reused in the
379 * same context by the underlying implementation.
381 cq = mlx4_glue->create_cq(priv->ctx, 1, NULL, NULL, 0);
384 msg = "placeholder CQ creation failure";
387 wq = mlx4_glue->create_wq
389 &(struct ibv_wq_init_attr){
390 .wq_type = IBV_WQT_RQ,
398 claim_zero(mlx4_glue->destroy_wq(wq));
400 wq_num = 0; /* Shut up GCC 4.8 warnings. */
402 claim_zero(mlx4_glue->destroy_cq(cq));
405 msg = "placeholder WQ creation failure";
410 * While guaranteed by the implementation, make sure WQ
411 * numbers are really sequential (as the saying goes,
412 * trust, but verify).
414 if (i && wq_num - wq_num_prev != 1) {
416 mlx4_rxq_detach(rxq);
418 msg = "WQ numbers are not sequential";
421 wq_num_prev = wq_num;
426 ERROR("cannot initialize common RSS resources (queue %u): %s: %s",
427 i, msg, strerror(ret));
429 struct rxq *rxq = priv->dev->data->rx_queues[i];
432 mlx4_rxq_detach(rxq);
439 * Release common RSS context resources.
441 * As the reverse of mlx4_rss_init(), this must be done after removing all
442 * flow rules relying on indirection tables.
445 * Pointer to private structure.
448 mlx4_rss_deinit(struct priv *priv)
454 for (i = 0; i != priv->dev->data->nb_rx_queues; ++i) {
455 struct rxq *rxq = priv->dev->data->rx_queues[i];
458 assert(rxq->usecnt == 1);
459 mlx4_rxq_detach(rxq);
466 * Attach a user to a Rx queue.
468 * Used when the resources of an Rx queue must be instantiated for it to
469 * become in a usable state.
471 * This function increments the usage count of the Rx queue.
474 * Pointer to Rx queue structure.
477 * 0 on success, negative errno value otherwise and rte_errno is set.
480 mlx4_rxq_attach(struct rxq *rxq)
490 struct priv *priv = rxq->priv;
491 const uint32_t elts_n = 1 << rxq->elts_n;
492 const uint32_t sges_n = 1 << rxq->sges_n;
493 struct rte_mbuf *(*elts)[elts_n] = rxq->elts;
494 struct mlx4dv_obj mlxdv;
495 struct mlx4dv_rwq dv_rwq;
496 struct mlx4dv_cq dv_cq = { .comp_mask = MLX4DV_CQ_MASK_UAR, };
498 struct ibv_cq *cq = NULL;
499 struct ibv_wq *wq = NULL;
500 uint32_t create_flags = 0;
501 uint32_t comp_mask = 0;
502 volatile struct mlx4_wqe_data_seg (*wqes)[];
506 assert(rte_is_power_of_2(elts_n));
507 cq = mlx4_glue->create_cq(priv->ctx, elts_n / sges_n, NULL,
511 msg = "CQ creation failure";
514 /* By default, FCS (CRC) is stripped by hardware. */
515 if (rxq->crc_present) {
516 create_flags |= IBV_WQ_FLAGS_SCATTER_FCS;
517 comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
519 wq = mlx4_glue->create_wq
521 &(struct ibv_wq_init_attr){
522 .wq_type = IBV_WQT_RQ,
523 .max_wr = elts_n / sges_n,
527 .comp_mask = comp_mask,
528 .create_flags = create_flags,
531 ret = errno ? errno : EINVAL;
532 msg = "WQ creation failure";
535 ret = mlx4_glue->modify_wq
537 &(struct ibv_wq_attr){
538 .attr_mask = IBV_WQ_ATTR_STATE,
539 .wq_state = IBV_WQS_RDY,
542 msg = "WQ state change to IBV_WQS_RDY failed";
545 /* Retrieve device queue information. */
547 mlxdv.cq.out = &dv_cq;
549 mlxdv.rwq.out = &dv_rwq;
550 ret = mlx4_glue->dv_init_obj(&mlxdv, MLX4DV_OBJ_RWQ | MLX4DV_OBJ_CQ);
552 msg = "failed to obtain device information from WQ/CQ objects";
555 wqes = (volatile struct mlx4_wqe_data_seg (*)[])
556 ((uintptr_t)dv_rwq.buf.buf + dv_rwq.rq.offset);
557 for (i = 0; i != RTE_DIM(*elts); ++i) {
558 volatile struct mlx4_wqe_data_seg *scat = &(*wqes)[i];
559 struct rte_mbuf *buf = rte_pktmbuf_alloc(rxq->mp);
563 rte_pktmbuf_free_seg((*elts)[i]);
567 msg = "cannot allocate mbuf";
570 /* Headroom is reserved by rte_pktmbuf_alloc(). */
571 assert(buf->data_off == RTE_PKTMBUF_HEADROOM);
572 /* Buffer is supposed to be empty. */
573 assert(rte_pktmbuf_data_len(buf) == 0);
574 assert(rte_pktmbuf_pkt_len(buf) == 0);
575 /* Only the first segment keeps headroom. */
578 buf->port = rxq->port_id;
579 buf->data_len = rte_pktmbuf_tailroom(buf);
580 buf->pkt_len = rte_pktmbuf_tailroom(buf);
582 *scat = (struct mlx4_wqe_data_seg){
583 .addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf,
585 .byte_count = rte_cpu_to_be_32(buf->data_len),
590 DEBUG("%p: allocated and configured %u segments (max %u packets)",
591 (void *)rxq, elts_n, elts_n / sges_n);
595 rxq->rq_db = dv_rwq.rdb;
596 rxq->mcq.buf = dv_cq.buf.buf;
597 rxq->mcq.cqe_cnt = dv_cq.cqe_cnt;
598 rxq->mcq.set_ci_db = dv_cq.set_ci_db;
599 rxq->mcq.cqe_64 = (dv_cq.cqe_size & 64) ? 1 : 0;
600 rxq->mcq.arm_db = dv_cq.arm_db;
601 rxq->mcq.arm_sn = dv_cq.arm_sn;
602 rxq->mcq.cqn = dv_cq.cqn;
603 rxq->mcq.cq_uar = dv_cq.cq_uar;
604 rxq->mcq.cq_db_reg = (uint8_t *)dv_cq.cq_uar + MLX4_CQ_DOORBELL;
605 /* Update doorbell counter. */
606 rxq->rq_ci = elts_n / sges_n;
608 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
612 claim_zero(mlx4_glue->destroy_wq(wq));
614 claim_zero(mlx4_glue->destroy_cq(cq));
617 ERROR("error while attaching Rx queue %p: %s: %s",
618 (void *)rxq, msg, strerror(ret));
623 * Detach a user from a Rx queue.
625 * This function decrements the usage count of the Rx queue and destroys
626 * usage resources after reaching 0.
629 * Pointer to Rx queue structure.
632 mlx4_rxq_detach(struct rxq *rxq)
635 struct rte_mbuf *(*elts)[1 << rxq->elts_n] = rxq->elts;
640 memset(&rxq->mcq, 0, sizeof(rxq->mcq));
643 claim_zero(mlx4_glue->destroy_wq(rxq->wq));
645 claim_zero(mlx4_glue->destroy_cq(rxq->cq));
647 DEBUG("%p: freeing Rx queue elements", (void *)rxq);
648 for (i = 0; (i != RTE_DIM(*elts)); ++i) {
651 rte_pktmbuf_free_seg((*elts)[i]);
657 * Returns the per-queue supported offloads.
660 * Pointer to private structure.
663 * Supported Tx offloads.
666 mlx4_get_rx_queue_offloads(struct priv *priv)
668 uint64_t offloads = DEV_RX_OFFLOAD_SCATTER |
669 DEV_RX_OFFLOAD_CRC_STRIP;
672 offloads |= DEV_RX_OFFLOAD_CHECKSUM;
677 * Returns the per-port supported offloads.
680 * Pointer to private structure.
683 * Supported Rx offloads.
686 mlx4_get_rx_port_offloads(struct priv *priv)
688 uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
695 * DPDK callback to configure a Rx queue.
698 * Pointer to Ethernet device structure.
702 * Number of descriptors to configure in queue.
704 * NUMA socket on which memory must be allocated.
706 * Thresholds parameters.
708 * Memory pool for buffer allocations.
711 * 0 on success, negative errno value otherwise and rte_errno is set.
714 mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
715 unsigned int socket, const struct rte_eth_rxconf *conf,
716 struct rte_mempool *mp)
718 struct priv *priv = dev->data->dev_private;
719 uint32_t mb_len = rte_pktmbuf_data_room_size(mp);
720 struct rte_mbuf *(*elts)[rte_align32pow2(desc)];
722 struct mlx4_malloc_vec vec[] = {
724 .align = RTE_CACHE_LINE_SIZE,
725 .size = sizeof(*rxq),
726 .addr = (void **)&rxq,
729 .align = RTE_CACHE_LINE_SIZE,
730 .size = sizeof(*elts),
731 .addr = (void **)&elts,
735 uint32_t crc_present;
738 offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads;
740 DEBUG("%p: configuring queue %u for %u descriptors",
741 (void *)dev, idx, desc);
743 if (idx >= dev->data->nb_rx_queues) {
744 rte_errno = EOVERFLOW;
745 ERROR("%p: queue index out of range (%u >= %u)",
746 (void *)dev, idx, dev->data->nb_rx_queues);
749 rxq = dev->data->rx_queues[idx];
752 ERROR("%p: Rx queue %u already configured, release it first",
758 ERROR("%p: invalid number of Rx descriptors", (void *)dev);
761 if (desc != RTE_DIM(*elts)) {
762 desc = RTE_DIM(*elts);
763 WARN("%p: increased number of descriptors in Rx queue %u"
764 " to the next power of two (%u)",
765 (void *)dev, idx, desc);
767 /* By default, FCS (CRC) is stripped by hardware. */
768 if (offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
770 } else if (priv->hw_fcs_strip) {
773 WARN("%p: CRC stripping has been disabled but will still"
774 " be performed by hardware, make sure MLNX_OFED and"
775 " firmware are up to date",
779 DEBUG("%p: CRC stripping is %s, %u bytes will be subtracted from"
780 " incoming frames to hide it",
782 crc_present ? "disabled" : "enabled",
784 /* Allocate and initialize Rx queue. */
785 mlx4_zmallocv_socket("RXQ", vec, RTE_DIM(vec), socket);
787 ERROR("%p: unable to allocate queue index %u",
794 .port_id = dev->data->port_id,
796 .elts_n = rte_log2_u32(desc),
798 /* Toggle Rx checksum offload if hardware supports it. */
799 .csum = priv->hw_csum &&
800 (offloads & DEV_RX_OFFLOAD_CHECKSUM),
801 .csum_l2tun = priv->hw_csum_l2tun &&
802 (offloads & DEV_RX_OFFLOAD_CHECKSUM),
803 .crc_present = crc_present,
804 .l2tun_offload = priv->hw_csum_l2tun,
810 /* Enable scattered packets support for this queue if necessary. */
811 assert(mb_len >= RTE_PKTMBUF_HEADROOM);
812 if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
813 (mb_len - RTE_PKTMBUF_HEADROOM)) {
815 } else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
817 RTE_PKTMBUF_HEADROOM +
818 dev->data->dev_conf.rxmode.max_rx_pkt_len;
822 * Determine the number of SGEs needed for a full packet
823 * and round it to the next power of two.
825 sges_n = rte_log2_u32((size / mb_len) + !!(size % mb_len));
826 rxq->sges_n = sges_n;
827 /* Make sure sges_n did not overflow. */
828 size = mb_len * (1 << rxq->sges_n);
829 size -= RTE_PKTMBUF_HEADROOM;
830 if (size < dev->data->dev_conf.rxmode.max_rx_pkt_len) {
831 rte_errno = EOVERFLOW;
832 ERROR("%p: too many SGEs (%u) needed to handle"
833 " requested maximum packet size %u",
836 dev->data->dev_conf.rxmode.max_rx_pkt_len);
840 WARN("%p: the requested maximum Rx packet size (%u) is"
841 " larger than a single mbuf (%u) and scattered"
842 " mode has not been requested",
844 dev->data->dev_conf.rxmode.max_rx_pkt_len,
845 mb_len - RTE_PKTMBUF_HEADROOM);
847 DEBUG("%p: maximum number of segments per packet: %u",
848 (void *)dev, 1 << rxq->sges_n);
849 if (desc % (1 << rxq->sges_n)) {
851 ERROR("%p: number of Rx queue descriptors (%u) is not a"
852 " multiple of maximum segments per packet (%u)",
858 if (dev->data->dev_conf.intr_conf.rxq) {
859 rxq->channel = mlx4_glue->create_comp_channel(priv->ctx);
860 if (rxq->channel == NULL) {
862 ERROR("%p: Rx interrupt completion channel creation"
864 (void *)dev, strerror(rte_errno));
867 if (mlx4_fd_set_non_blocking(rxq->channel->fd) < 0) {
868 ERROR("%p: unable to make Rx interrupt completion"
869 " channel non-blocking: %s",
870 (void *)dev, strerror(rte_errno));
874 DEBUG("%p: adding Rx queue %p to list", (void *)dev, (void *)rxq);
875 dev->data->rx_queues[idx] = rxq;
878 dev->data->rx_queues[idx] = NULL;
880 mlx4_rx_queue_release(rxq);
882 assert(rte_errno > 0);
887 * DPDK callback to release a Rx queue.
890 * Generic Rx queue pointer.
893 mlx4_rx_queue_release(void *dpdk_rxq)
895 struct rxq *rxq = (struct rxq *)dpdk_rxq;
902 for (i = 0; i != priv->dev->data->nb_rx_queues; ++i)
903 if (priv->dev->data->rx_queues[i] == rxq) {
904 DEBUG("%p: removing Rx queue %p from list",
905 (void *)priv->dev, (void *)rxq);
906 priv->dev->data->rx_queues[i] = NULL;
914 claim_zero(mlx4_glue->destroy_comp_channel(rxq->channel));