4 * Copyright 2017 6WIND S.A.
5 * Copyright 2017 Mellanox
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Rx queues configuration for mlx4 driver.
45 /* Verbs headers do not support -pedantic. */
47 #pragma GCC diagnostic ignored "-Wpedantic"
49 #include <infiniband/verbs.h>
51 #pragma GCC diagnostic error "-Wpedantic"
54 #include <rte_common.h>
55 #include <rte_errno.h>
56 #include <rte_ethdev.h>
58 #include <rte_malloc.h>
60 #include <rte_mempool.h>
63 #include "mlx4_flow.h"
64 #include "mlx4_rxtx.h"
65 #include "mlx4_utils.h"
68 * Historical RSS hash key.
70 * This used to be the default for mlx4 in Linux before v3.19 switched to
71 * generating random hash keys through netdev_rss_key_fill().
73 * It is used in this PMD for consistency with past DPDK releases but can
74 * now be overridden through user configuration.
76 * Note: this is not const to work around API quirks.
79 mlx4_rss_hash_key_default[MLX4_RSS_HASH_KEY_SIZE] = {
80 0x2c, 0xc6, 0x81, 0xd1,
81 0x5b, 0xdb, 0xf4, 0xf7,
82 0xfc, 0xa2, 0x83, 0x19,
83 0xdb, 0x1a, 0x3e, 0x94,
84 0x6b, 0x9e, 0x38, 0xd9,
85 0x2c, 0x9c, 0x03, 0xd1,
86 0xad, 0x99, 0x44, 0xa7,
87 0xd9, 0x56, 0x3d, 0x59,
88 0x06, 0x3c, 0x25, 0xf3,
89 0xfc, 0x1f, 0xdc, 0x2a,
93 * Obtain a RSS context with specified properties.
95 * Used when creating a flow rule targeting one or several Rx queues.
97 * If a matching RSS context already exists, it is returned with its
98 * reference count incremented.
101 * Pointer to private structure.
103 * Fields for RSS processing (Verbs format).
105 * Hash key to use (whose size is exactly MLX4_RSS_HASH_KEY_SIZE).
107 * Number of target queues.
108 * @param[in] queue_id
112 * Pointer to RSS context on success, NULL otherwise and rte_errno is set.
115 mlx4_rss_get(struct priv *priv, uint64_t fields,
116 uint8_t key[MLX4_RSS_HASH_KEY_SIZE],
117 uint16_t queues, const uint16_t queue_id[])
119 struct mlx4_rss *rss;
120 size_t queue_id_size = sizeof(queue_id[0]) * queues;
122 LIST_FOREACH(rss, &priv->rss, next)
123 if (fields == rss->fields &&
124 queues == rss->queues &&
125 !memcmp(key, rss->key, MLX4_RSS_HASH_KEY_SIZE) &&
126 !memcmp(queue_id, rss->queue_id, queue_id_size)) {
130 rss = rte_malloc(__func__, offsetof(struct mlx4_rss, queue_id) +
134 *rss = (struct mlx4_rss){
143 memcpy(rss->key, key, MLX4_RSS_HASH_KEY_SIZE);
144 memcpy(rss->queue_id, queue_id, queue_id_size);
145 LIST_INSERT_HEAD(&priv->rss, rss, next);
153 * Release a RSS context instance.
155 * Used when destroying a flow rule targeting one or several Rx queues.
157 * This function decrements the reference count of the context and destroys
158 * it after reaching 0. The context must have no users at this point; all
159 * prior calls to mlx4_rss_attach() must have been followed by matching
160 * calls to mlx4_rss_detach().
163 * RSS context to release.
165 void mlx4_rss_put(struct mlx4_rss *rss)
170 assert(!rss->usecnt);
173 LIST_REMOVE(rss, next);
178 * Attach a user to a RSS context instance.
180 * Used when the RSS QP and indirection table objects must be instantiated,
181 * that is, when a flow rule must be enabled.
183 * This function increments the usage count of the context.
186 * RSS context to attach to.
188 int mlx4_rss_attach(struct mlx4_rss *rss)
197 struct ibv_wq *ind_tbl[rss->queues];
198 struct priv *priv = rss->priv;
203 if (!rte_is_power_of_2(RTE_DIM(ind_tbl))) {
204 msg = "number of RSS queues must be a power of two";
207 for (i = 0; i != RTE_DIM(ind_tbl); ++i) {
208 uint16_t id = rss->queue_id[i];
209 struct rxq *rxq = NULL;
211 if (id < priv->dev->data->nb_rx_queues)
212 rxq = priv->dev->data->rx_queues[id];
214 msg = "RSS target queue is not configured";
217 ind_tbl[i] = rxq->wq;
219 rss->ind = ibv_create_rwq_ind_table
221 &(struct ibv_rwq_ind_table_init_attr){
222 .log_ind_tbl_size = rte_log2_u32(RTE_DIM(ind_tbl)),
227 msg = "RSS indirection table creation failure";
230 rss->qp = ibv_create_qp_ex
232 &(struct ibv_qp_init_attr_ex){
233 .comp_mask = (IBV_QP_INIT_ATTR_PD |
234 IBV_QP_INIT_ATTR_RX_HASH |
235 IBV_QP_INIT_ATTR_IND_TABLE),
236 .qp_type = IBV_QPT_RAW_PACKET,
238 .rwq_ind_tbl = rss->ind,
240 .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,
241 .rx_hash_key_len = MLX4_RSS_HASH_KEY_SIZE,
242 .rx_hash_key = rss->key,
243 .rx_hash_fields_mask = rss->fields,
247 msg = "RSS hash QP creation failure";
252 &(struct ibv_qp_attr){
253 .qp_state = IBV_QPS_INIT,
254 .port_num = priv->port,
256 IBV_QP_STATE | IBV_QP_PORT);
258 msg = "failed to switch RSS hash QP to INIT state";
263 &(struct ibv_qp_attr){
264 .qp_state = IBV_QPS_RTR,
268 msg = "failed to switch RSS hash QP to RTR state";
273 ERROR("mlx4: %s", msg);
280 * Detach a user from a RSS context instance.
282 * Used when disabling (not destroying) a flow rule.
284 * This function decrements the usage count of the context and destroys
285 * usage resources after reaching 0.
288 * RSS context to detach from.
290 void mlx4_rss_detach(struct mlx4_rss *rss)
297 claim_zero(ibv_destroy_qp(rss->qp));
299 claim_zero(ibv_destroy_rwq_ind_table(rss->ind));
304 * Allocate Rx queue elements.
307 * Pointer to Rx queue structure.
310 * 0 on success, negative errno value otherwise and rte_errno is set.
313 mlx4_rxq_alloc_elts(struct rxq *rxq)
315 struct rxq_elt (*elts)[rxq->elts_n] = rxq->elts;
318 /* For each WR (packet). */
319 for (i = 0; i != RTE_DIM(*elts); ++i) {
320 struct rxq_elt *elt = &(*elts)[i];
321 struct ibv_recv_wr *wr = &elt->wr;
322 struct ibv_sge *sge = &(*elts)[i].sge;
323 struct rte_mbuf *buf = rte_pktmbuf_alloc(rxq->mp);
327 rte_pktmbuf_free_seg((*elts)[i].buf);
328 (*elts)[i].buf = NULL;
334 wr->next = &(*elts)[(i + 1)].wr;
337 /* Headroom is reserved by rte_pktmbuf_alloc(). */
338 assert(buf->data_off == RTE_PKTMBUF_HEADROOM);
339 /* Buffer is supposed to be empty. */
340 assert(rte_pktmbuf_data_len(buf) == 0);
341 assert(rte_pktmbuf_pkt_len(buf) == 0);
342 /* sge->addr must be able to store a pointer. */
343 assert(sizeof(sge->addr) >= sizeof(uintptr_t));
344 /* SGE keeps its headroom. */
345 sge->addr = (uintptr_t)
346 ((uint8_t *)buf->buf_addr + RTE_PKTMBUF_HEADROOM);
347 sge->length = (buf->buf_len - RTE_PKTMBUF_HEADROOM);
348 sge->lkey = rxq->mr->lkey;
349 /* Redundant check for tailroom. */
350 assert(sge->length == rte_pktmbuf_tailroom(buf));
352 /* The last WR pointer must be NULL. */
353 (*elts)[(i - 1)].wr.next = NULL;
358 * Free Rx queue elements.
361 * Pointer to Rx queue structure.
364 mlx4_rxq_free_elts(struct rxq *rxq)
367 struct rxq_elt (*elts)[rxq->elts_n] = rxq->elts;
369 DEBUG("%p: freeing WRs", (void *)rxq);
370 for (i = 0; (i != RTE_DIM(*elts)); ++i) {
373 rte_pktmbuf_free_seg((*elts)[i].buf);
374 (*elts)[i].buf = NULL;
379 * DPDK callback to configure a Rx queue.
382 * Pointer to Ethernet device structure.
386 * Number of descriptors to configure in queue.
388 * NUMA socket on which memory must be allocated.
390 * Thresholds parameters.
392 * Memory pool for buffer allocations.
395 * 0 on success, negative errno value otherwise and rte_errno is set.
398 mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
399 unsigned int socket, const struct rte_eth_rxconf *conf,
400 struct rte_mempool *mp)
402 struct priv *priv = dev->data->dev_private;
403 uint32_t mb_len = rte_pktmbuf_data_room_size(mp);
404 struct rxq_elt (*elts)[desc];
405 struct rte_flow_error error;
407 struct mlx4_malloc_vec vec[] = {
409 .align = RTE_CACHE_LINE_SIZE,
410 .size = sizeof(*rxq),
411 .addr = (void **)&rxq,
414 .align = RTE_CACHE_LINE_SIZE,
415 .size = sizeof(*elts),
416 .addr = (void **)&elts,
421 (void)conf; /* Thresholds configuration (ignored). */
422 DEBUG("%p: configuring queue %u for %u descriptors",
423 (void *)dev, idx, desc);
424 if (idx >= dev->data->nb_rx_queues) {
425 rte_errno = EOVERFLOW;
426 ERROR("%p: queue index out of range (%u >= %u)",
427 (void *)dev, idx, dev->data->nb_rx_queues);
430 rxq = dev->data->rx_queues[idx];
433 ERROR("%p: Rx queue %u already configured, release it first",
439 ERROR("%p: invalid number of Rx descriptors", (void *)dev);
442 /* Allocate and initialize Rx queue. */
443 mlx4_zmallocv_socket("RXQ", vec, RTE_DIM(vec), socket);
445 ERROR("%p: unable to allocate queue index %u",
452 .port_id = dev->data->port_id,
459 /* Enable scattered packets support for this queue if necessary. */
460 assert(mb_len >= RTE_PKTMBUF_HEADROOM);
461 if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
462 (mb_len - RTE_PKTMBUF_HEADROOM)) {
464 } else if (dev->data->dev_conf.rxmode.enable_scatter) {
465 WARN("%p: scattered mode has been requested but is"
466 " not supported, this may lead to packet loss",
469 WARN("%p: the requested maximum Rx packet size (%u) is"
470 " larger than a single mbuf (%u) and scattered"
471 " mode has not been requested",
473 dev->data->dev_conf.rxmode.max_rx_pkt_len,
474 mb_len - RTE_PKTMBUF_HEADROOM);
476 /* Use the entire Rx mempool as the memory region. */
477 rxq->mr = mlx4_mp2mr(priv->pd, mp);
480 ERROR("%p: MR creation failure: %s",
481 (void *)dev, strerror(rte_errno));
484 if (dev->data->dev_conf.intr_conf.rxq) {
485 rxq->channel = ibv_create_comp_channel(priv->ctx);
486 if (rxq->channel == NULL) {
488 ERROR("%p: Rx interrupt completion channel creation"
490 (void *)dev, strerror(rte_errno));
493 if (mlx4_fd_set_non_blocking(rxq->channel->fd) < 0) {
494 ERROR("%p: unable to make Rx interrupt completion"
495 " channel non-blocking: %s",
496 (void *)dev, strerror(rte_errno));
500 rxq->cq = ibv_create_cq(priv->ctx, desc, NULL, rxq->channel, 0);
503 ERROR("%p: CQ creation failure: %s",
504 (void *)dev, strerror(rte_errno));
507 rxq->wq = ibv_create_wq
509 &(struct ibv_wq_init_attr){
510 .wq_type = IBV_WQT_RQ,
511 .max_wr = RTE_MIN(priv->device_attr.max_qp_wr, desc),
517 rte_errno = errno ? errno : EINVAL;
518 ERROR("%p: WQ creation failure: %s",
519 (void *)dev, strerror(rte_errno));
524 &(struct ibv_wq_attr){
525 .attr_mask = IBV_WQ_ATTR_STATE,
526 .wq_state = IBV_WQS_RDY,
530 ERROR("%p: WQ state to IBV_WPS_RDY failed: %s",
531 (void *)dev, strerror(rte_errno));
534 ret = mlx4_rxq_alloc_elts(rxq);
536 ERROR("%p: RXQ allocation failed: %s",
537 (void *)dev, strerror(rte_errno));
540 ret = ibv_post_wq_recv(rxq->wq, &(*rxq->elts)[0].wr,
541 &(struct ibv_recv_wr *){ NULL });
544 ERROR("%p: ibv_post_recv() failed: %s",
546 strerror(rte_errno));
549 DEBUG("%p: adding Rx queue %p to list", (void *)dev, (void *)rxq);
550 dev->data->rx_queues[idx] = rxq;
551 /* Enable associated flows. */
552 ret = mlx4_flow_sync(priv, &error);
555 ERROR("cannot re-attach flow rules to queue %u"
556 " (code %d, \"%s\"), flow error type %d, cause %p, message: %s",
557 idx, -ret, strerror(-ret), error.type, error.cause,
558 error.message ? error.message : "(unspecified)");
560 dev->data->rx_queues[idx] = NULL;
562 mlx4_rx_queue_release(rxq);
564 assert(rte_errno > 0);
569 * DPDK callback to release a Rx queue.
572 * Generic Rx queue pointer.
575 mlx4_rx_queue_release(void *dpdk_rxq)
577 struct rxq *rxq = (struct rxq *)dpdk_rxq;
584 for (i = 0; i != priv->dev->data->nb_rx_queues; ++i)
585 if (priv->dev->data->rx_queues[i] == rxq) {
586 DEBUG("%p: removing Rx queue %p from list",
587 (void *)priv->dev, (void *)rxq);
588 priv->dev->data->rx_queues[i] = NULL;
591 mlx4_flow_sync(priv, NULL);
592 mlx4_rxq_free_elts(rxq);
594 claim_zero(ibv_destroy_wq(rxq->wq));
596 claim_zero(ibv_destroy_cq(rxq->cq));
598 claim_zero(ibv_destroy_comp_channel(rxq->channel));
600 claim_zero(ibv_dereg_mr(rxq->mr));