4 * Copyright 2017 6WIND S.A.
5 * Copyright 2017 Mellanox
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Rx queues configuration for mlx4 driver.
45 /* Verbs headers do not support -pedantic. */
47 #pragma GCC diagnostic ignored "-Wpedantic"
49 #include <infiniband/verbs.h>
51 #pragma GCC diagnostic error "-Wpedantic"
54 #include <rte_common.h>
55 #include <rte_errno.h>
56 #include <rte_ethdev.h>
57 #include <rte_malloc.h>
59 #include <rte_mempool.h>
62 #include "mlx4_rxtx.h"
63 #include "mlx4_utils.h"
66 * Allocate Rx queue elements.
69 * Pointer to Rx queue structure.
71 * Number of elements to allocate.
74 * 0 on success, negative errno value otherwise and rte_errno is set.
77 mlx4_rxq_alloc_elts(struct rxq *rxq, unsigned int elts_n)
80 struct rxq_elt (*elts)[elts_n] =
81 rte_calloc_socket("RXQ elements", 1, sizeof(*elts), 0,
86 ERROR("%p: can't allocate packets array", (void *)rxq);
89 /* For each WR (packet). */
90 for (i = 0; (i != elts_n); ++i) {
91 struct rxq_elt *elt = &(*elts)[i];
92 struct ibv_recv_wr *wr = &elt->wr;
93 struct ibv_sge *sge = &(*elts)[i].sge;
94 struct rte_mbuf *buf = rte_pktmbuf_alloc(rxq->mp);
98 ERROR("%p: empty mbuf pool", (void *)rxq);
102 wr->next = &(*elts)[(i + 1)].wr;
105 /* Headroom is reserved by rte_pktmbuf_alloc(). */
106 assert(buf->data_off == RTE_PKTMBUF_HEADROOM);
107 /* Buffer is supposed to be empty. */
108 assert(rte_pktmbuf_data_len(buf) == 0);
109 assert(rte_pktmbuf_pkt_len(buf) == 0);
110 /* sge->addr must be able to store a pointer. */
111 assert(sizeof(sge->addr) >= sizeof(uintptr_t));
112 /* SGE keeps its headroom. */
113 sge->addr = (uintptr_t)
114 ((uint8_t *)buf->buf_addr + RTE_PKTMBUF_HEADROOM);
115 sge->length = (buf->buf_len - RTE_PKTMBUF_HEADROOM);
116 sge->lkey = rxq->mr->lkey;
117 /* Redundant check for tailroom. */
118 assert(sge->length == rte_pktmbuf_tailroom(buf));
120 /* The last WR pointer must be NULL. */
121 (*elts)[(i - 1)].wr.next = NULL;
122 DEBUG("%p: allocated and configured %u single-segment WRs",
123 (void *)rxq, elts_n);
124 rxq->elts_n = elts_n;
130 for (i = 0; (i != RTE_DIM(*elts)); ++i)
131 rte_pktmbuf_free_seg((*elts)[i].buf);
134 DEBUG("%p: failed, freed everything", (void *)rxq);
135 assert(rte_errno > 0);
140 * Free Rx queue elements.
143 * Pointer to Rx queue structure.
146 mlx4_rxq_free_elts(struct rxq *rxq)
149 unsigned int elts_n = rxq->elts_n;
150 struct rxq_elt (*elts)[elts_n] = rxq->elts;
152 DEBUG("%p: freeing WRs", (void *)rxq);
157 for (i = 0; (i != RTE_DIM(*elts)); ++i)
158 rte_pktmbuf_free_seg((*elts)[i].buf);
163 * Clean up a Rx queue.
165 * Destroy objects, free allocated memory and reset the structure for reuse.
168 * Pointer to Rx queue structure.
171 mlx4_rxq_cleanup(struct rxq *rxq)
173 DEBUG("cleaning up %p", (void *)rxq);
174 mlx4_rxq_free_elts(rxq);
176 claim_zero(ibv_destroy_qp(rxq->qp));
178 claim_zero(ibv_destroy_cq(rxq->cq));
179 if (rxq->channel != NULL)
180 claim_zero(ibv_destroy_comp_channel(rxq->channel));
182 claim_zero(ibv_dereg_mr(rxq->mr));
183 memset(rxq, 0, sizeof(*rxq));
187 * Allocate a Queue Pair.
188 * Optionally setup inline receive if supported.
191 * Pointer to private structure.
193 * Completion queue to associate with QP.
195 * Number of descriptors in QP (hint only).
198 * QP pointer or NULL in case of error and rte_errno is set.
200 static struct ibv_qp *
201 mlx4_rxq_setup_qp(struct priv *priv, struct ibv_cq *cq, uint16_t desc)
204 struct ibv_qp_init_attr attr = {
205 /* CQ to be associated with the send queue. */
207 /* CQ to be associated with the receive queue. */
210 /* Max number of outstanding WRs. */
211 .max_recv_wr = ((priv->device_attr.max_qp_wr < desc) ?
212 priv->device_attr.max_qp_wr :
214 /* Max number of scatter/gather elements in a WR. */
217 .qp_type = IBV_QPT_RAW_PACKET,
220 qp = ibv_create_qp(priv->pd, &attr);
222 rte_errno = errno ? errno : EINVAL;
227 * Configure a Rx queue.
230 * Pointer to Ethernet device structure.
232 * Pointer to Rx queue structure.
234 * Number of descriptors to configure in queue.
236 * NUMA socket on which memory must be allocated.
238 * Thresholds parameters.
240 * Memory pool for buffer allocations.
243 * 0 on success, negative errno value otherwise and rte_errno is set.
246 mlx4_rxq_setup(struct rte_eth_dev *dev, struct rxq *rxq, uint16_t desc,
247 unsigned int socket, const struct rte_eth_rxconf *conf,
248 struct rte_mempool *mp)
250 struct priv *priv = dev->data->dev_private;
256 struct ibv_qp_attr mod;
257 struct ibv_recv_wr *bad_wr;
261 (void)conf; /* Thresholds configuration (ignored). */
262 mb_len = rte_pktmbuf_data_room_size(mp);
265 ERROR("%p: invalid number of Rx descriptors", (void *)dev);
268 /* Enable scattered packets support for this queue if necessary. */
269 assert(mb_len >= RTE_PKTMBUF_HEADROOM);
270 if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
271 (mb_len - RTE_PKTMBUF_HEADROOM)) {
273 } else if (dev->data->dev_conf.rxmode.enable_scatter) {
274 WARN("%p: scattered mode has been requested but is"
275 " not supported, this may lead to packet loss",
278 WARN("%p: the requested maximum Rx packet size (%u) is"
279 " larger than a single mbuf (%u) and scattered"
280 " mode has not been requested",
282 dev->data->dev_conf.rxmode.max_rx_pkt_len,
283 mb_len - RTE_PKTMBUF_HEADROOM);
285 /* Use the entire Rx mempool as the memory region. */
286 tmpl.mr = mlx4_mp2mr(priv->pd, mp);
287 if (tmpl.mr == NULL) {
289 ERROR("%p: MR creation failure: %s",
290 (void *)dev, strerror(rte_errno));
293 if (dev->data->dev_conf.intr_conf.rxq) {
294 tmpl.channel = ibv_create_comp_channel(priv->ctx);
295 if (tmpl.channel == NULL) {
297 ERROR("%p: Rx interrupt completion channel creation"
299 (void *)dev, strerror(rte_errno));
302 if (mlx4_fd_set_non_blocking(tmpl.channel->fd) < 0) {
303 ERROR("%p: unable to make Rx interrupt completion"
304 " channel non-blocking: %s",
305 (void *)dev, strerror(rte_errno));
309 tmpl.cq = ibv_create_cq(priv->ctx, desc, NULL, tmpl.channel, 0);
310 if (tmpl.cq == NULL) {
312 ERROR("%p: CQ creation failure: %s",
313 (void *)dev, strerror(rte_errno));
316 DEBUG("priv->device_attr.max_qp_wr is %d",
317 priv->device_attr.max_qp_wr);
318 DEBUG("priv->device_attr.max_sge is %d",
319 priv->device_attr.max_sge);
320 tmpl.qp = mlx4_rxq_setup_qp(priv, tmpl.cq, desc);
321 if (tmpl.qp == NULL) {
322 ERROR("%p: QP creation failure: %s",
323 (void *)dev, strerror(rte_errno));
326 mod = (struct ibv_qp_attr){
327 /* Move the QP to this state. */
328 .qp_state = IBV_QPS_INIT,
329 /* Primary port number. */
330 .port_num = priv->port
332 ret = ibv_modify_qp(tmpl.qp, &mod, IBV_QP_STATE | IBV_QP_PORT);
335 ERROR("%p: QP state to IBV_QPS_INIT failed: %s",
336 (void *)dev, strerror(rte_errno));
339 ret = mlx4_rxq_alloc_elts(&tmpl, desc);
341 ERROR("%p: RXQ allocation failed: %s",
342 (void *)dev, strerror(rte_errno));
345 ret = ibv_post_recv(tmpl.qp, &(*tmpl.elts)[0].wr, &bad_wr);
348 ERROR("%p: ibv_post_recv() failed for WR %p: %s",
351 strerror(rte_errno));
354 mod = (struct ibv_qp_attr){
355 .qp_state = IBV_QPS_RTR
357 ret = ibv_modify_qp(tmpl.qp, &mod, IBV_QP_STATE);
360 ERROR("%p: QP state to IBV_QPS_RTR failed: %s",
361 (void *)dev, strerror(rte_errno));
365 tmpl.port_id = dev->data->port_id;
366 DEBUG("%p: RTE port ID: %u", (void *)rxq, tmpl.port_id);
367 /* Clean up rxq in case we're reinitializing it. */
368 DEBUG("%p: cleaning-up old rxq just in case", (void *)rxq);
369 mlx4_rxq_cleanup(rxq);
371 DEBUG("%p: rxq updated with %p", (void *)rxq, (void *)&tmpl);
375 mlx4_rxq_cleanup(&tmpl);
377 assert(rte_errno > 0);
382 * DPDK callback to configure a Rx queue.
385 * Pointer to Ethernet device structure.
389 * Number of descriptors to configure in queue.
391 * NUMA socket on which memory must be allocated.
393 * Thresholds parameters.
395 * Memory pool for buffer allocations.
398 * 0 on success, negative errno value otherwise and rte_errno is set.
401 mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
402 unsigned int socket, const struct rte_eth_rxconf *conf,
403 struct rte_mempool *mp)
405 struct priv *priv = dev->data->dev_private;
406 struct rxq *rxq = dev->data->rx_queues[idx];
409 DEBUG("%p: configuring queue %u for %u descriptors",
410 (void *)dev, idx, desc);
411 if (idx >= dev->data->nb_rx_queues) {
412 rte_errno = EOVERFLOW;
413 ERROR("%p: queue index out of range (%u >= %u)",
414 (void *)dev, idx, dev->data->nb_rx_queues);
418 DEBUG("%p: reusing already allocated queue index %u (%p)",
419 (void *)dev, idx, (void *)rxq);
424 dev->data->rx_queues[idx] = NULL;
426 mlx4_mac_addr_del(priv);
427 mlx4_rxq_cleanup(rxq);
429 rxq = rte_calloc_socket("RXQ", 1, sizeof(*rxq), 0, socket);
432 ERROR("%p: unable to allocate queue index %u",
437 ret = mlx4_rxq_setup(dev, rxq, desc, socket, conf, mp);
441 rxq->stats.idx = idx;
442 DEBUG("%p: adding Rx queue %p to list",
443 (void *)dev, (void *)rxq);
444 dev->data->rx_queues[idx] = rxq;
445 /* Update receive callback. */
446 dev->rx_pkt_burst = mlx4_rx_burst;
452 * DPDK callback to release a Rx queue.
455 * Generic Rx queue pointer.
458 mlx4_rx_queue_release(void *dpdk_rxq)
460 struct rxq *rxq = (struct rxq *)dpdk_rxq;
467 for (i = 0; i != priv->dev->data->nb_rx_queues; ++i)
468 if (priv->dev->data->rx_queues[i] == rxq) {
469 DEBUG("%p: removing Rx queue %p from list",
470 (void *)priv->dev, (void *)rxq);
471 priv->dev->data->rx_queues[i] = NULL;
473 mlx4_mac_addr_del(priv);
476 mlx4_rxq_cleanup(rxq);
481 * Unregister a MAC address.
484 * Pointer to private structure.
487 mlx4_mac_addr_del(struct priv *priv)
490 uint8_t (*mac)[ETHER_ADDR_LEN] = &priv->mac.addr_bytes;
495 DEBUG("%p: removing MAC address %02x:%02x:%02x:%02x:%02x:%02x",
497 (*mac)[0], (*mac)[1], (*mac)[2], (*mac)[3], (*mac)[4], (*mac)[5]);
498 claim_zero(ibv_destroy_flow(priv->mac_flow));
499 priv->mac_flow = NULL;
503 * Register a MAC address.
505 * The MAC address is registered in queue 0.
508 * Pointer to private structure.
511 * 0 on success, negative errno value otherwise and rte_errno is set.
514 mlx4_mac_addr_add(struct priv *priv)
516 uint8_t (*mac)[ETHER_ADDR_LEN] = &priv->mac.addr_bytes;
518 struct ibv_flow *flow;
520 /* If device isn't started, this is all we need to do. */
525 if (priv->dev->data->rx_queues && priv->dev->data->rx_queues[0])
526 rxq = priv->dev->data->rx_queues[0];
530 /* Allocate flow specification on the stack. */
531 struct __attribute__((packed)) {
532 struct ibv_flow_attr attr;
533 struct ibv_flow_spec_eth spec;
535 struct ibv_flow_attr *attr = &data.attr;
536 struct ibv_flow_spec_eth *spec = &data.spec;
539 mlx4_mac_addr_del(priv);
541 * No padding must be inserted by the compiler between attr and spec.
542 * This layout is expected by libibverbs.
544 assert(((uint8_t *)attr + sizeof(*attr)) == (uint8_t *)spec);
545 *attr = (struct ibv_flow_attr){
546 .type = IBV_FLOW_ATTR_NORMAL,
552 *spec = (struct ibv_flow_spec_eth){
553 .type = IBV_FLOW_SPEC_ETH,
554 .size = sizeof(*spec),
557 (*mac)[0], (*mac)[1], (*mac)[2],
558 (*mac)[3], (*mac)[4], (*mac)[5]
562 .dst_mac = "\xff\xff\xff\xff\xff\xff",
565 DEBUG("%p: adding MAC address %02x:%02x:%02x:%02x:%02x:%02x",
567 (*mac)[0], (*mac)[1], (*mac)[2], (*mac)[3], (*mac)[4], (*mac)[5]);
568 /* Create related flow. */
569 flow = ibv_create_flow(rxq->qp, attr);
571 rte_errno = errno ? errno : EINVAL;
572 ERROR("%p: flow configuration failed, errno=%d: %s",
573 (void *)rxq, rte_errno, strerror(errno));
576 assert(priv->mac_flow == NULL);
577 priv->mac_flow = flow;