4 * Copyright 2017 6WIND S.A.
5 * Copyright 2017 Mellanox
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Rx queues configuration for mlx4 driver.
45 /* Verbs headers do not support -pedantic. */
47 #pragma GCC diagnostic ignored "-Wpedantic"
49 #include <infiniband/verbs.h>
51 #pragma GCC diagnostic error "-Wpedantic"
54 #include <rte_byteorder.h>
55 #include <rte_common.h>
56 #include <rte_errno.h>
57 #include <rte_ethdev.h>
59 #include <rte_malloc.h>
61 #include <rte_mempool.h>
64 #include "mlx4_flow.h"
65 #include "mlx4_rxtx.h"
66 #include "mlx4_utils.h"
69 * Historical RSS hash key.
71 * This used to be the default for mlx4 in Linux before v3.19 switched to
72 * generating random hash keys through netdev_rss_key_fill().
74 * It is used in this PMD for consistency with past DPDK releases but can
75 * now be overridden through user configuration.
77 * Note: this is not const to work around API quirks.
80 mlx4_rss_hash_key_default[MLX4_RSS_HASH_KEY_SIZE] = {
81 0x2c, 0xc6, 0x81, 0xd1,
82 0x5b, 0xdb, 0xf4, 0xf7,
83 0xfc, 0xa2, 0x83, 0x19,
84 0xdb, 0x1a, 0x3e, 0x94,
85 0x6b, 0x9e, 0x38, 0xd9,
86 0x2c, 0x9c, 0x03, 0xd1,
87 0xad, 0x99, 0x44, 0xa7,
88 0xd9, 0x56, 0x3d, 0x59,
89 0x06, 0x3c, 0x25, 0xf3,
90 0xfc, 0x1f, 0xdc, 0x2a,
94 * Obtain a RSS context with specified properties.
96 * Used when creating a flow rule targeting one or several Rx queues.
98 * If a matching RSS context already exists, it is returned with its
99 * reference count incremented.
102 * Pointer to private structure.
104 * Fields for RSS processing (Verbs format).
106 * Hash key to use (whose size is exactly MLX4_RSS_HASH_KEY_SIZE).
108 * Number of target queues.
109 * @param[in] queue_id
113 * Pointer to RSS context on success, NULL otherwise and rte_errno is set.
116 mlx4_rss_get(struct priv *priv, uint64_t fields,
117 uint8_t key[MLX4_RSS_HASH_KEY_SIZE],
118 uint16_t queues, const uint16_t queue_id[])
120 struct mlx4_rss *rss;
121 size_t queue_id_size = sizeof(queue_id[0]) * queues;
123 LIST_FOREACH(rss, &priv->rss, next)
124 if (fields == rss->fields &&
125 queues == rss->queues &&
126 !memcmp(key, rss->key, MLX4_RSS_HASH_KEY_SIZE) &&
127 !memcmp(queue_id, rss->queue_id, queue_id_size)) {
131 rss = rte_malloc(__func__, offsetof(struct mlx4_rss, queue_id) +
135 *rss = (struct mlx4_rss){
144 memcpy(rss->key, key, MLX4_RSS_HASH_KEY_SIZE);
145 memcpy(rss->queue_id, queue_id, queue_id_size);
146 LIST_INSERT_HEAD(&priv->rss, rss, next);
154 * Release a RSS context instance.
156 * Used when destroying a flow rule targeting one or several Rx queues.
158 * This function decrements the reference count of the context and destroys
159 * it after reaching 0. The context must have no users at this point; all
160 * prior calls to mlx4_rss_attach() must have been followed by matching
161 * calls to mlx4_rss_detach().
164 * RSS context to release.
166 void mlx4_rss_put(struct mlx4_rss *rss)
171 assert(!rss->usecnt);
174 LIST_REMOVE(rss, next);
179 * Attach a user to a RSS context instance.
181 * Used when the RSS QP and indirection table objects must be instantiated,
182 * that is, when a flow rule must be enabled.
184 * This function increments the usage count of the context.
187 * RSS context to attach to.
189 int mlx4_rss_attach(struct mlx4_rss *rss)
198 struct ibv_wq *ind_tbl[rss->queues];
199 struct priv *priv = rss->priv;
204 if (!rte_is_power_of_2(RTE_DIM(ind_tbl))) {
205 msg = "number of RSS queues must be a power of two";
208 for (i = 0; i != RTE_DIM(ind_tbl); ++i) {
209 uint16_t id = rss->queue_id[i];
210 struct rxq *rxq = NULL;
212 if (id < priv->dev->data->nb_rx_queues)
213 rxq = priv->dev->data->rx_queues[id];
215 msg = "RSS target queue is not configured";
218 ind_tbl[i] = rxq->wq;
220 rss->ind = ibv_create_rwq_ind_table
222 &(struct ibv_rwq_ind_table_init_attr){
223 .log_ind_tbl_size = rte_log2_u32(RTE_DIM(ind_tbl)),
228 msg = "RSS indirection table creation failure";
231 rss->qp = ibv_create_qp_ex
233 &(struct ibv_qp_init_attr_ex){
234 .comp_mask = (IBV_QP_INIT_ATTR_PD |
235 IBV_QP_INIT_ATTR_RX_HASH |
236 IBV_QP_INIT_ATTR_IND_TABLE),
237 .qp_type = IBV_QPT_RAW_PACKET,
239 .rwq_ind_tbl = rss->ind,
241 .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,
242 .rx_hash_key_len = MLX4_RSS_HASH_KEY_SIZE,
243 .rx_hash_key = rss->key,
244 .rx_hash_fields_mask = rss->fields,
248 msg = "RSS hash QP creation failure";
253 &(struct ibv_qp_attr){
254 .qp_state = IBV_QPS_INIT,
255 .port_num = priv->port,
257 IBV_QP_STATE | IBV_QP_PORT);
259 msg = "failed to switch RSS hash QP to INIT state";
264 &(struct ibv_qp_attr){
265 .qp_state = IBV_QPS_RTR,
269 msg = "failed to switch RSS hash QP to RTR state";
274 ERROR("mlx4: %s", msg);
281 * Detach a user from a RSS context instance.
283 * Used when disabling (not destroying) a flow rule.
285 * This function decrements the usage count of the context and destroys
286 * usage resources after reaching 0.
289 * RSS context to detach from.
291 void mlx4_rss_detach(struct mlx4_rss *rss)
298 claim_zero(ibv_destroy_qp(rss->qp));
300 claim_zero(ibv_destroy_rwq_ind_table(rss->ind));
305 * Allocate Rx queue elements.
308 * Pointer to Rx queue structure.
311 * 0 on success, negative errno value otherwise and rte_errno is set.
314 mlx4_rxq_alloc_elts(struct rxq *rxq)
316 const uint32_t elts_n = 1 << rxq->elts_n;
317 const uint32_t sges_n = 1 << rxq->sges_n;
318 struct rte_mbuf *(*elts)[elts_n] = rxq->elts;
321 assert(rte_is_power_of_2(elts_n));
322 for (i = 0; i != RTE_DIM(*elts); ++i) {
323 volatile struct mlx4_wqe_data_seg *scat = &(*rxq->wqes)[i];
324 struct rte_mbuf *buf = rte_pktmbuf_alloc(rxq->mp);
328 rte_pktmbuf_free_seg((*elts)[i]);
334 /* Headroom is reserved by rte_pktmbuf_alloc(). */
335 assert(buf->data_off == RTE_PKTMBUF_HEADROOM);
336 /* Buffer is supposed to be empty. */
337 assert(rte_pktmbuf_data_len(buf) == 0);
338 assert(rte_pktmbuf_pkt_len(buf) == 0);
339 /* Only the first segment keeps headroom. */
342 buf->port = rxq->port_id;
343 buf->data_len = rte_pktmbuf_tailroom(buf);
344 buf->pkt_len = rte_pktmbuf_tailroom(buf);
346 *scat = (struct mlx4_wqe_data_seg){
347 .addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf,
349 .byte_count = rte_cpu_to_be_32(buf->data_len),
350 .lkey = rte_cpu_to_be_32(rxq->mr->lkey),
354 DEBUG("%p: allocated and configured %u segments (max %u packets)",
355 (void *)rxq, elts_n, elts_n / sges_n);
360 * Free Rx queue elements.
363 * Pointer to Rx queue structure.
366 mlx4_rxq_free_elts(struct rxq *rxq)
369 struct rte_mbuf *(*elts)[1 << rxq->elts_n] = rxq->elts;
371 DEBUG("%p: freeing Rx queue elements", (void *)rxq);
372 for (i = 0; (i != RTE_DIM(*elts)); ++i) {
375 rte_pktmbuf_free_seg((*elts)[i]);
381 * DPDK callback to configure a Rx queue.
384 * Pointer to Ethernet device structure.
388 * Number of descriptors to configure in queue.
390 * NUMA socket on which memory must be allocated.
392 * Thresholds parameters.
394 * Memory pool for buffer allocations.
397 * 0 on success, negative errno value otherwise and rte_errno is set.
400 mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
401 unsigned int socket, const struct rte_eth_rxconf *conf,
402 struct rte_mempool *mp)
404 struct priv *priv = dev->data->dev_private;
405 struct mlx4dv_obj mlxdv;
406 struct mlx4dv_rwq dv_rwq;
407 struct mlx4dv_cq dv_cq;
408 uint32_t mb_len = rte_pktmbuf_data_room_size(mp);
409 struct rte_mbuf *(*elts)[rte_align32pow2(desc)];
410 struct rte_flow_error error;
412 struct mlx4_malloc_vec vec[] = {
414 .align = RTE_CACHE_LINE_SIZE,
415 .size = sizeof(*rxq),
416 .addr = (void **)&rxq,
419 .align = RTE_CACHE_LINE_SIZE,
420 .size = sizeof(*elts),
421 .addr = (void **)&elts,
426 (void)conf; /* Thresholds configuration (ignored). */
427 DEBUG("%p: configuring queue %u for %u descriptors",
428 (void *)dev, idx, desc);
429 if (idx >= dev->data->nb_rx_queues) {
430 rte_errno = EOVERFLOW;
431 ERROR("%p: queue index out of range (%u >= %u)",
432 (void *)dev, idx, dev->data->nb_rx_queues);
435 rxq = dev->data->rx_queues[idx];
438 ERROR("%p: Rx queue %u already configured, release it first",
444 ERROR("%p: invalid number of Rx descriptors", (void *)dev);
447 if (desc != RTE_DIM(*elts)) {
448 desc = RTE_DIM(*elts);
449 WARN("%p: increased number of descriptors in Rx queue %u"
450 " to the next power of two (%u)",
451 (void *)dev, idx, desc);
453 /* Allocate and initialize Rx queue. */
454 mlx4_zmallocv_socket("RXQ", vec, RTE_DIM(vec), socket);
456 ERROR("%p: unable to allocate queue index %u",
463 .port_id = dev->data->port_id,
465 .elts_n = rte_log2_u32(desc),
470 /* Enable scattered packets support for this queue if necessary. */
471 assert(mb_len >= RTE_PKTMBUF_HEADROOM);
472 if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
473 (mb_len - RTE_PKTMBUF_HEADROOM)) {
475 } else if (dev->data->dev_conf.rxmode.enable_scatter) {
477 RTE_PKTMBUF_HEADROOM +
478 dev->data->dev_conf.rxmode.max_rx_pkt_len;
482 * Determine the number of SGEs needed for a full packet
483 * and round it to the next power of two.
485 sges_n = rte_log2_u32((size / mb_len) + !!(size % mb_len));
486 rxq->sges_n = sges_n;
487 /* Make sure sges_n did not overflow. */
488 size = mb_len * (1 << rxq->sges_n);
489 size -= RTE_PKTMBUF_HEADROOM;
490 if (size < dev->data->dev_conf.rxmode.max_rx_pkt_len) {
491 rte_errno = EOVERFLOW;
492 ERROR("%p: too many SGEs (%u) needed to handle"
493 " requested maximum packet size %u",
496 dev->data->dev_conf.rxmode.max_rx_pkt_len);
500 WARN("%p: the requested maximum Rx packet size (%u) is"
501 " larger than a single mbuf (%u) and scattered"
502 " mode has not been requested",
504 dev->data->dev_conf.rxmode.max_rx_pkt_len,
505 mb_len - RTE_PKTMBUF_HEADROOM);
507 DEBUG("%p: maximum number of segments per packet: %u",
508 (void *)dev, 1 << rxq->sges_n);
509 if (desc % (1 << rxq->sges_n)) {
511 ERROR("%p: number of Rx queue descriptors (%u) is not a"
512 " multiple of maximum segments per packet (%u)",
518 /* Use the entire Rx mempool as the memory region. */
519 rxq->mr = mlx4_mp2mr(priv->pd, mp);
522 ERROR("%p: MR creation failure: %s",
523 (void *)dev, strerror(rte_errno));
526 if (dev->data->dev_conf.intr_conf.rxq) {
527 rxq->channel = ibv_create_comp_channel(priv->ctx);
528 if (rxq->channel == NULL) {
530 ERROR("%p: Rx interrupt completion channel creation"
532 (void *)dev, strerror(rte_errno));
535 if (mlx4_fd_set_non_blocking(rxq->channel->fd) < 0) {
536 ERROR("%p: unable to make Rx interrupt completion"
537 " channel non-blocking: %s",
538 (void *)dev, strerror(rte_errno));
542 rxq->cq = ibv_create_cq(priv->ctx, desc >> rxq->sges_n, NULL,
546 ERROR("%p: CQ creation failure: %s",
547 (void *)dev, strerror(rte_errno));
550 rxq->wq = ibv_create_wq
552 &(struct ibv_wq_init_attr){
553 .wq_type = IBV_WQT_RQ,
554 .max_wr = desc >> rxq->sges_n,
555 .max_sge = 1 << rxq->sges_n,
560 rte_errno = errno ? errno : EINVAL;
561 ERROR("%p: WQ creation failure: %s",
562 (void *)dev, strerror(rte_errno));
567 &(struct ibv_wq_attr){
568 .attr_mask = IBV_WQ_ATTR_STATE,
569 .wq_state = IBV_WQS_RDY,
573 ERROR("%p: WQ state to IBV_WPS_RDY failed: %s",
574 (void *)dev, strerror(rte_errno));
577 /* Retrieve device queue information. */
578 mlxdv.cq.in = rxq->cq;
579 mlxdv.cq.out = &dv_cq;
580 mlxdv.rwq.in = rxq->wq;
581 mlxdv.rwq.out = &dv_rwq;
582 ret = mlx4dv_init_obj(&mlxdv, MLX4DV_OBJ_RWQ | MLX4DV_OBJ_CQ);
585 ERROR("%p: failed to obtain device information", (void *)dev);
589 (volatile struct mlx4_wqe_data_seg (*)[])
590 ((uintptr_t)dv_rwq.buf.buf + dv_rwq.rq.offset);
591 rxq->rq_db = dv_rwq.rdb;
593 rxq->mcq.buf = dv_cq.buf.buf;
594 rxq->mcq.cqe_cnt = dv_cq.cqe_cnt;
595 rxq->mcq.set_ci_db = dv_cq.set_ci_db;
596 rxq->mcq.cqe_64 = (dv_cq.cqe_size & 64) ? 1 : 0;
597 ret = mlx4_rxq_alloc_elts(rxq);
599 ERROR("%p: RXQ allocation failed: %s",
600 (void *)dev, strerror(rte_errno));
603 DEBUG("%p: adding Rx queue %p to list", (void *)dev, (void *)rxq);
604 dev->data->rx_queues[idx] = rxq;
605 /* Enable associated flows. */
606 ret = mlx4_flow_sync(priv, &error);
608 /* Update doorbell counter. */
609 rxq->rq_ci = desc >> rxq->sges_n;
611 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
614 ERROR("cannot re-attach flow rules to queue %u"
615 " (code %d, \"%s\"), flow error type %d, cause %p, message: %s",
616 idx, -ret, strerror(-ret), error.type, error.cause,
617 error.message ? error.message : "(unspecified)");
619 dev->data->rx_queues[idx] = NULL;
621 mlx4_rx_queue_release(rxq);
623 assert(rte_errno > 0);
628 * DPDK callback to release a Rx queue.
631 * Generic Rx queue pointer.
634 mlx4_rx_queue_release(void *dpdk_rxq)
636 struct rxq *rxq = (struct rxq *)dpdk_rxq;
643 for (i = 0; i != priv->dev->data->nb_rx_queues; ++i)
644 if (priv->dev->data->rx_queues[i] == rxq) {
645 DEBUG("%p: removing Rx queue %p from list",
646 (void *)priv->dev, (void *)rxq);
647 priv->dev->data->rx_queues[i] = NULL;
650 mlx4_flow_sync(priv, NULL);
651 mlx4_rxq_free_elts(rxq);
653 claim_zero(ibv_destroy_wq(rxq->wq));
655 claim_zero(ibv_destroy_cq(rxq->cq));
657 claim_zero(ibv_destroy_comp_channel(rxq->channel));
659 claim_zero(ibv_dereg_mr(rxq->mr));