4 * Copyright 2017 6WIND S.A.
5 * Copyright 2017 Mellanox
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Rx queues configuration for mlx4 driver.
45 /* Verbs headers do not support -pedantic. */
47 #pragma GCC diagnostic ignored "-Wpedantic"
49 #include <infiniband/verbs.h>
51 #pragma GCC diagnostic error "-Wpedantic"
54 #include <rte_common.h>
55 #include <rte_errno.h>
56 #include <rte_ethdev.h>
57 #include <rte_malloc.h>
59 #include <rte_mempool.h>
62 #include "mlx4_rxtx.h"
63 #include "mlx4_utils.h"
66 * Allocate Rx queue elements.
69 * Pointer to Rx queue structure.
71 * Number of elements to allocate.
74 * 0 on success, negative errno value otherwise and rte_errno is set.
77 mlx4_rxq_alloc_elts(struct rxq *rxq, unsigned int elts_n)
80 struct rxq_elt (*elts)[elts_n] =
81 rte_calloc_socket("RXQ elements", 1, sizeof(*elts), 0,
86 ERROR("%p: can't allocate packets array", (void *)rxq);
89 /* For each WR (packet). */
90 for (i = 0; (i != elts_n); ++i) {
91 struct rxq_elt *elt = &(*elts)[i];
92 struct ibv_recv_wr *wr = &elt->wr;
93 struct ibv_sge *sge = &(*elts)[i].sge;
94 struct rte_mbuf *buf = rte_pktmbuf_alloc(rxq->mp);
98 ERROR("%p: empty mbuf pool", (void *)rxq);
102 wr->next = &(*elts)[(i + 1)].wr;
105 /* Headroom is reserved by rte_pktmbuf_alloc(). */
106 assert(buf->data_off == RTE_PKTMBUF_HEADROOM);
107 /* Buffer is supposed to be empty. */
108 assert(rte_pktmbuf_data_len(buf) == 0);
109 assert(rte_pktmbuf_pkt_len(buf) == 0);
110 /* sge->addr must be able to store a pointer. */
111 assert(sizeof(sge->addr) >= sizeof(uintptr_t));
112 /* SGE keeps its headroom. */
113 sge->addr = (uintptr_t)
114 ((uint8_t *)buf->buf_addr + RTE_PKTMBUF_HEADROOM);
115 sge->length = (buf->buf_len - RTE_PKTMBUF_HEADROOM);
116 sge->lkey = rxq->mr->lkey;
117 /* Redundant check for tailroom. */
118 assert(sge->length == rte_pktmbuf_tailroom(buf));
120 /* The last WR pointer must be NULL. */
121 (*elts)[(i - 1)].wr.next = NULL;
122 DEBUG("%p: allocated and configured %u single-segment WRs",
123 (void *)rxq, elts_n);
124 rxq->elts_n = elts_n;
130 for (i = 0; (i != RTE_DIM(*elts)); ++i)
131 rte_pktmbuf_free_seg((*elts)[i].buf);
134 DEBUG("%p: failed, freed everything", (void *)rxq);
135 assert(rte_errno > 0);
140 * Free Rx queue elements.
143 * Pointer to Rx queue structure.
146 mlx4_rxq_free_elts(struct rxq *rxq)
149 unsigned int elts_n = rxq->elts_n;
150 struct rxq_elt (*elts)[elts_n] = rxq->elts;
152 DEBUG("%p: freeing WRs", (void *)rxq);
157 for (i = 0; (i != RTE_DIM(*elts)); ++i)
158 rte_pktmbuf_free_seg((*elts)[i].buf);
163 * Clean up a Rx queue.
165 * Destroy objects, free allocated memory and reset the structure for reuse.
168 * Pointer to Rx queue structure.
171 mlx4_rxq_cleanup(struct rxq *rxq)
173 DEBUG("cleaning up %p", (void *)rxq);
174 mlx4_rxq_free_elts(rxq);
176 claim_zero(ibv_destroy_qp(rxq->qp));
178 claim_zero(ibv_destroy_cq(rxq->cq));
179 if (rxq->channel != NULL)
180 claim_zero(ibv_destroy_comp_channel(rxq->channel));
182 claim_zero(ibv_dereg_mr(rxq->mr));
183 memset(rxq, 0, sizeof(*rxq));
187 * Configure a Rx queue.
190 * Pointer to Ethernet device structure.
192 * Pointer to Rx queue structure.
194 * Number of descriptors to configure in queue.
196 * NUMA socket on which memory must be allocated.
198 * Thresholds parameters.
200 * Memory pool for buffer allocations.
203 * 0 on success, negative errno value otherwise and rte_errno is set.
206 mlx4_rxq_setup(struct rte_eth_dev *dev, struct rxq *rxq, uint16_t desc,
207 unsigned int socket, const struct rte_eth_rxconf *conf,
208 struct rte_mempool *mp)
210 struct priv *priv = dev->data->dev_private;
216 struct ibv_qp_attr mod;
217 struct ibv_qp_init_attr qp_init;
218 struct ibv_recv_wr *bad_wr;
222 (void)conf; /* Thresholds configuration (ignored). */
223 mb_len = rte_pktmbuf_data_room_size(mp);
226 ERROR("%p: invalid number of Rx descriptors", (void *)dev);
229 /* Enable scattered packets support for this queue if necessary. */
230 assert(mb_len >= RTE_PKTMBUF_HEADROOM);
231 if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
232 (mb_len - RTE_PKTMBUF_HEADROOM)) {
234 } else if (dev->data->dev_conf.rxmode.enable_scatter) {
235 WARN("%p: scattered mode has been requested but is"
236 " not supported, this may lead to packet loss",
239 WARN("%p: the requested maximum Rx packet size (%u) is"
240 " larger than a single mbuf (%u) and scattered"
241 " mode has not been requested",
243 dev->data->dev_conf.rxmode.max_rx_pkt_len,
244 mb_len - RTE_PKTMBUF_HEADROOM);
246 /* Use the entire Rx mempool as the memory region. */
247 tmpl.mr = mlx4_mp2mr(priv->pd, mp);
248 if (tmpl.mr == NULL) {
250 ERROR("%p: MR creation failure: %s",
251 (void *)dev, strerror(rte_errno));
254 if (dev->data->dev_conf.intr_conf.rxq) {
255 tmpl.channel = ibv_create_comp_channel(priv->ctx);
256 if (tmpl.channel == NULL) {
258 ERROR("%p: Rx interrupt completion channel creation"
260 (void *)dev, strerror(rte_errno));
263 if (mlx4_fd_set_non_blocking(tmpl.channel->fd) < 0) {
264 ERROR("%p: unable to make Rx interrupt completion"
265 " channel non-blocking: %s",
266 (void *)dev, strerror(rte_errno));
270 tmpl.cq = ibv_create_cq(priv->ctx, desc, NULL, tmpl.channel, 0);
271 if (tmpl.cq == NULL) {
273 ERROR("%p: CQ creation failure: %s",
274 (void *)dev, strerror(rte_errno));
277 DEBUG("priv->device_attr.max_qp_wr is %d",
278 priv->device_attr.max_qp_wr);
279 DEBUG("priv->device_attr.max_sge is %d",
280 priv->device_attr.max_sge);
281 qp_init = (struct ibv_qp_init_attr){
282 /* CQ to be associated with the send queue. */
284 /* CQ to be associated with the receive queue. */
287 /* Max number of outstanding WRs. */
288 .max_recv_wr = ((priv->device_attr.max_qp_wr < desc) ?
289 priv->device_attr.max_qp_wr :
291 /* Max number of scatter/gather elements in a WR. */
294 .qp_type = IBV_QPT_RAW_PACKET,
296 tmpl.qp = ibv_create_qp(priv->pd, &qp_init);
297 if (tmpl.qp == NULL) {
298 rte_errno = errno ? errno : EINVAL;
299 ERROR("%p: QP creation failure: %s",
300 (void *)dev, strerror(rte_errno));
303 mod = (struct ibv_qp_attr){
304 /* Move the QP to this state. */
305 .qp_state = IBV_QPS_INIT,
306 /* Primary port number. */
307 .port_num = priv->port
309 ret = ibv_modify_qp(tmpl.qp, &mod, IBV_QP_STATE | IBV_QP_PORT);
312 ERROR("%p: QP state to IBV_QPS_INIT failed: %s",
313 (void *)dev, strerror(rte_errno));
316 ret = mlx4_rxq_alloc_elts(&tmpl, desc);
318 ERROR("%p: RXQ allocation failed: %s",
319 (void *)dev, strerror(rte_errno));
322 ret = ibv_post_recv(tmpl.qp, &(*tmpl.elts)[0].wr, &bad_wr);
325 ERROR("%p: ibv_post_recv() failed for WR %p: %s",
328 strerror(rte_errno));
331 mod = (struct ibv_qp_attr){
332 .qp_state = IBV_QPS_RTR
334 ret = ibv_modify_qp(tmpl.qp, &mod, IBV_QP_STATE);
337 ERROR("%p: QP state to IBV_QPS_RTR failed: %s",
338 (void *)dev, strerror(rte_errno));
342 tmpl.port_id = dev->data->port_id;
343 DEBUG("%p: RTE port ID: %u", (void *)rxq, tmpl.port_id);
344 /* Clean up rxq in case we're reinitializing it. */
345 DEBUG("%p: cleaning-up old rxq just in case", (void *)rxq);
346 mlx4_rxq_cleanup(rxq);
348 DEBUG("%p: rxq updated with %p", (void *)rxq, (void *)&tmpl);
352 mlx4_rxq_cleanup(&tmpl);
354 assert(rte_errno > 0);
359 * DPDK callback to configure a Rx queue.
362 * Pointer to Ethernet device structure.
366 * Number of descriptors to configure in queue.
368 * NUMA socket on which memory must be allocated.
370 * Thresholds parameters.
372 * Memory pool for buffer allocations.
375 * 0 on success, negative errno value otherwise and rte_errno is set.
378 mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
379 unsigned int socket, const struct rte_eth_rxconf *conf,
380 struct rte_mempool *mp)
382 struct priv *priv = dev->data->dev_private;
383 struct rxq *rxq = dev->data->rx_queues[idx];
386 DEBUG("%p: configuring queue %u for %u descriptors",
387 (void *)dev, idx, desc);
388 if (idx >= dev->data->nb_rx_queues) {
389 rte_errno = EOVERFLOW;
390 ERROR("%p: queue index out of range (%u >= %u)",
391 (void *)dev, idx, dev->data->nb_rx_queues);
395 DEBUG("%p: reusing already allocated queue index %u (%p)",
396 (void *)dev, idx, (void *)rxq);
401 dev->data->rx_queues[idx] = NULL;
403 mlx4_mac_addr_del(priv);
404 mlx4_rxq_cleanup(rxq);
406 rxq = rte_calloc_socket("RXQ", 1, sizeof(*rxq), 0, socket);
409 ERROR("%p: unable to allocate queue index %u",
414 ret = mlx4_rxq_setup(dev, rxq, desc, socket, conf, mp);
418 rxq->stats.idx = idx;
419 DEBUG("%p: adding Rx queue %p to list",
420 (void *)dev, (void *)rxq);
421 dev->data->rx_queues[idx] = rxq;
422 /* Update receive callback. */
423 dev->rx_pkt_burst = mlx4_rx_burst;
429 * DPDK callback to release a Rx queue.
432 * Generic Rx queue pointer.
435 mlx4_rx_queue_release(void *dpdk_rxq)
437 struct rxq *rxq = (struct rxq *)dpdk_rxq;
444 for (i = 0; i != priv->dev->data->nb_rx_queues; ++i)
445 if (priv->dev->data->rx_queues[i] == rxq) {
446 DEBUG("%p: removing Rx queue %p from list",
447 (void *)priv->dev, (void *)rxq);
448 priv->dev->data->rx_queues[i] = NULL;
450 mlx4_mac_addr_del(priv);
453 mlx4_rxq_cleanup(rxq);
458 * Unregister a MAC address.
461 * Pointer to private structure.
464 mlx4_mac_addr_del(struct priv *priv)
467 uint8_t (*mac)[ETHER_ADDR_LEN] = &priv->mac.addr_bytes;
472 DEBUG("%p: removing MAC address %02x:%02x:%02x:%02x:%02x:%02x",
474 (*mac)[0], (*mac)[1], (*mac)[2], (*mac)[3], (*mac)[4], (*mac)[5]);
475 claim_zero(ibv_destroy_flow(priv->mac_flow));
476 priv->mac_flow = NULL;
480 * Register a MAC address.
482 * The MAC address is registered in queue 0.
485 * Pointer to private structure.
488 * 0 on success, negative errno value otherwise and rte_errno is set.
491 mlx4_mac_addr_add(struct priv *priv)
493 uint8_t (*mac)[ETHER_ADDR_LEN] = &priv->mac.addr_bytes;
495 struct ibv_flow *flow;
497 /* If device isn't started, this is all we need to do. */
502 if (priv->dev->data->rx_queues && priv->dev->data->rx_queues[0])
503 rxq = priv->dev->data->rx_queues[0];
507 /* Allocate flow specification on the stack. */
508 struct __attribute__((packed)) {
509 struct ibv_flow_attr attr;
510 struct ibv_flow_spec_eth spec;
512 struct ibv_flow_attr *attr = &data.attr;
513 struct ibv_flow_spec_eth *spec = &data.spec;
516 mlx4_mac_addr_del(priv);
518 * No padding must be inserted by the compiler between attr and spec.
519 * This layout is expected by libibverbs.
521 assert(((uint8_t *)attr + sizeof(*attr)) == (uint8_t *)spec);
522 *attr = (struct ibv_flow_attr){
523 .type = IBV_FLOW_ATTR_NORMAL,
529 *spec = (struct ibv_flow_spec_eth){
530 .type = IBV_FLOW_SPEC_ETH,
531 .size = sizeof(*spec),
534 (*mac)[0], (*mac)[1], (*mac)[2],
535 (*mac)[3], (*mac)[4], (*mac)[5]
539 .dst_mac = "\xff\xff\xff\xff\xff\xff",
542 DEBUG("%p: adding MAC address %02x:%02x:%02x:%02x:%02x:%02x",
544 (*mac)[0], (*mac)[1], (*mac)[2], (*mac)[3], (*mac)[4], (*mac)[5]);
545 /* Create related flow. */
546 flow = ibv_create_flow(rxq->qp, attr);
548 rte_errno = errno ? errno : EINVAL;
549 ERROR("%p: flow configuration failed, errno=%d: %s",
550 (void *)rxq, rte_errno, strerror(errno));
553 assert(priv->mac_flow == NULL);
554 priv->mac_flow = flow;