4 * Copyright 2015 6WIND S.A.
5 * Copyright 2015 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
45 #pragma GCC diagnostic ignored "-pedantic"
47 #include <infiniband/verbs.h>
49 #pragma GCC diagnostic error "-pedantic"
52 /* DPDK headers don't like -pedantic. */
54 #pragma GCC diagnostic ignored "-pedantic"
56 #include <rte_malloc.h>
57 #include <rte_ethdev.h>
59 #include <rte_common.h>
61 #pragma GCC diagnostic error "-pedantic"
65 #include "mlx5_utils.h"
66 #include "mlx5_rxtx.h"
67 #include "mlx5_autoconf.h"
70 * DPDK callback to close the device.
72 * Destroy all queues and objects, free memory.
75 * Pointer to Ethernet device structure.
78 mlx5_dev_close(struct rte_eth_dev *dev)
80 struct priv *priv = dev->data->dev_private;
85 DEBUG("%p: closing device \"%s\"",
87 ((priv->ctx != NULL) ? priv->ctx->device->name : ""));
88 /* Prevent crashes when queues are still in use. */
89 dev->rx_pkt_burst = removed_rx_burst;
90 dev->tx_pkt_burst = removed_tx_burst;
91 if (priv->rxqs != NULL) {
92 /* XXX race condition if mlx5_rx_burst() is still running. */
94 for (i = 0; (i != priv->rxqs_n); ++i) {
95 tmp = (*priv->rxqs)[i];
98 (*priv->rxqs)[i] = NULL;
105 if (priv->txqs != NULL) {
106 /* XXX race condition if mlx5_tx_burst() is still running. */
108 for (i = 0; (i != priv->txqs_n); ++i) {
109 tmp = (*priv->txqs)[i];
112 (*priv->txqs)[i] = NULL;
120 rxq_cleanup(&priv->rxq_parent);
121 if (priv->pd != NULL) {
122 assert(priv->ctx != NULL);
123 claim_zero(ibv_dealloc_pd(priv->pd));
124 claim_zero(ibv_close_device(priv->ctx));
126 assert(priv->ctx == NULL);
128 memset(priv, 0, sizeof(*priv));
131 static const struct eth_dev_ops mlx5_dev_ops = {
132 .dev_configure = mlx5_dev_configure,
133 .dev_start = mlx5_dev_start,
134 .dev_stop = mlx5_dev_stop,
135 .dev_close = mlx5_dev_close,
136 .stats_get = mlx5_stats_get,
137 .stats_reset = mlx5_stats_reset,
138 .dev_infos_get = mlx5_dev_infos_get,
139 .rx_queue_setup = mlx5_rx_queue_setup,
140 .tx_queue_setup = mlx5_tx_queue_setup,
141 .rx_queue_release = mlx5_rx_queue_release,
142 .tx_queue_release = mlx5_tx_queue_release,
143 .mac_addr_remove = mlx5_mac_addr_remove,
144 .mac_addr_add = mlx5_mac_addr_add,
145 .mtu_set = mlx5_dev_set_mtu,
149 struct rte_pci_addr pci_addr; /* associated PCI address */
150 uint32_t ports; /* physical ports bitfield. */
154 * Get device index in mlx5_dev[] from PCI bus address.
156 * @param[in] pci_addr
157 * PCI bus address to look for.
160 * mlx5_dev[] index on success, -1 on failure.
163 mlx5_dev_idx(struct rte_pci_addr *pci_addr)
168 assert(pci_addr != NULL);
169 for (i = 0; (i != RTE_DIM(mlx5_dev)); ++i) {
170 if ((mlx5_dev[i].pci_addr.domain == pci_addr->domain) &&
171 (mlx5_dev[i].pci_addr.bus == pci_addr->bus) &&
172 (mlx5_dev[i].pci_addr.devid == pci_addr->devid) &&
173 (mlx5_dev[i].pci_addr.function == pci_addr->function))
175 if ((mlx5_dev[i].ports == 0) && (ret == -1))
181 static struct eth_driver mlx5_driver;
184 * DPDK callback to register a PCI device.
186 * This function creates an Ethernet device for each port of a given
190 * PCI driver structure (mlx5_driver).
192 * PCI device information.
195 * 0 on success, negative errno value on failure.
198 mlx5_pci_devinit(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
200 struct ibv_device **list;
201 struct ibv_device *ibv_dev;
203 struct ibv_context *attr_ctx = NULL;
204 struct ibv_device_attr device_attr;
210 assert(pci_drv == &mlx5_driver.pci_drv);
211 /* Get mlx5_dev[] index. */
212 idx = mlx5_dev_idx(&pci_dev->addr);
214 ERROR("this driver cannot support any more adapters");
217 DEBUG("using driver device index %d", idx);
219 /* Save PCI address. */
220 mlx5_dev[idx].pci_addr = pci_dev->addr;
221 list = ibv_get_device_list(&i);
224 if (errno == ENOSYS) {
225 WARN("cannot list devices, is ib_uverbs loaded?");
232 * For each listed device, check related sysfs entry against
233 * the provided PCI ID.
236 struct rte_pci_addr pci_addr;
239 DEBUG("checking device \"%s\"", list[i]->name);
240 if (mlx5_ibv_device_to_pci_addr(list[i], &pci_addr))
242 if ((pci_dev->addr.domain != pci_addr.domain) ||
243 (pci_dev->addr.bus != pci_addr.bus) ||
244 (pci_dev->addr.devid != pci_addr.devid) ||
245 (pci_dev->addr.function != pci_addr.function))
247 vf = ((pci_dev->id.device_id ==
248 PCI_DEVICE_ID_MELLANOX_CONNECTX4VF) ||
249 (pci_dev->id.device_id ==
250 PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF));
251 INFO("PCI information matches, using device \"%s\" (VF: %s)",
252 list[i]->name, (vf ? "true" : "false"));
253 attr_ctx = ibv_open_device(list[i]);
257 if (attr_ctx == NULL) {
258 ibv_free_device_list(list);
261 WARN("cannot access device, is mlx5_ib loaded?");
264 WARN("cannot use device, are drivers up to date?");
272 DEBUG("device opened");
273 if (ibv_query_device(attr_ctx, &device_attr))
275 INFO("%u port(s) detected", device_attr.phys_port_cnt);
277 for (i = 0; i < device_attr.phys_port_cnt; i++) {
278 uint32_t port = i + 1; /* ports are indexed from one */
279 uint32_t test = (1 << i);
280 struct ibv_context *ctx = NULL;
281 struct ibv_port_attr port_attr;
282 struct ibv_pd *pd = NULL;
283 struct priv *priv = NULL;
284 struct rte_eth_dev *eth_dev;
285 #ifdef HAVE_EXP_QUERY_DEVICE
286 struct ibv_exp_device_attr exp_device_attr;
287 #endif /* HAVE_EXP_QUERY_DEVICE */
288 struct ether_addr mac;
290 #ifdef HAVE_EXP_QUERY_DEVICE
291 exp_device_attr.comp_mask = IBV_EXP_DEVICE_ATTR_EXP_CAP_FLAGS;
293 exp_device_attr.comp_mask |= IBV_EXP_DEVICE_ATTR_RSS_TBL_SZ;
294 #endif /* RSS_SUPPORT */
295 #endif /* HAVE_EXP_QUERY_DEVICE */
297 DEBUG("using port %u (%08" PRIx32 ")", port, test);
299 ctx = ibv_open_device(ibv_dev);
303 /* Check port status. */
304 err = ibv_query_port(ctx, port, &port_attr);
306 ERROR("port query failed: %s", strerror(err));
309 if (port_attr.state != IBV_PORT_ACTIVE)
310 DEBUG("port %d is not active: \"%s\" (%d)",
311 port, ibv_port_state_str(port_attr.state),
314 /* Allocate protection domain. */
315 pd = ibv_alloc_pd(ctx);
317 ERROR("PD allocation failure");
322 mlx5_dev[idx].ports |= test;
324 /* from rte_ethdev.c */
325 priv = rte_zmalloc("ethdev private structure",
327 RTE_CACHE_LINE_SIZE);
329 ERROR("priv allocation failure");
335 priv->device_attr = device_attr;
338 priv->mtu = ETHER_MTU;
339 #ifdef HAVE_EXP_QUERY_DEVICE
340 if (ibv_exp_query_device(ctx, &exp_device_attr)) {
341 ERROR("ibv_exp_query_device() failed");
345 if ((exp_device_attr.exp_device_cap_flags &
346 IBV_EXP_DEVICE_QPG) &&
347 (exp_device_attr.exp_device_cap_flags &
348 IBV_EXP_DEVICE_UD_RSS) &&
349 (exp_device_attr.comp_mask &
350 IBV_EXP_DEVICE_ATTR_RSS_TBL_SZ) &&
351 (exp_device_attr.max_rss_tbl_sz > 0)) {
354 priv->max_rss_tbl_sz = exp_device_attr.max_rss_tbl_sz;
358 priv->max_rss_tbl_sz = 0;
360 priv->hw_tss = !!(exp_device_attr.exp_device_cap_flags &
361 IBV_EXP_DEVICE_UD_TSS);
362 DEBUG("device flags: %s%s%s",
363 (priv->hw_qpg ? "IBV_DEVICE_QPG " : ""),
364 (priv->hw_tss ? "IBV_DEVICE_TSS " : ""),
365 (priv->hw_rss ? "IBV_DEVICE_RSS " : ""));
367 DEBUG("maximum RSS indirection table size: %u",
368 exp_device_attr.max_rss_tbl_sz);
369 #endif /* RSS_SUPPORT */
372 ((exp_device_attr.exp_device_cap_flags &
373 IBV_EXP_DEVICE_RX_CSUM_TCP_UDP_PKT) &&
374 (exp_device_attr.exp_device_cap_flags &
375 IBV_EXP_DEVICE_RX_CSUM_IP_PKT));
376 DEBUG("checksum offloading is %ssupported",
377 (priv->hw_csum ? "" : "not "));
379 priv->hw_csum_l2tun = !!(exp_device_attr.exp_device_cap_flags &
380 IBV_EXP_DEVICE_VXLAN_SUPPORT);
381 DEBUG("L2 tunnel checksum offloads are %ssupported",
382 (priv->hw_csum_l2tun ? "" : "not "));
384 #endif /* HAVE_EXP_QUERY_DEVICE */
387 /* Configure the first MAC address by default. */
388 if (priv_get_mac(priv, &mac.addr_bytes)) {
389 ERROR("cannot get MAC address, is mlx5_en loaded?"
390 " (errno: %s)", strerror(errno));
393 INFO("port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x",
395 mac.addr_bytes[0], mac.addr_bytes[1],
396 mac.addr_bytes[2], mac.addr_bytes[3],
397 mac.addr_bytes[4], mac.addr_bytes[5]);
398 /* Register MAC and broadcast addresses. */
399 claim_zero(priv_mac_addr_add(priv, 0,
400 (const uint8_t (*)[ETHER_ADDR_LEN])
402 claim_zero(priv_mac_addr_add(priv, (RTE_DIM(priv->mac) - 1),
403 &(const uint8_t [ETHER_ADDR_LEN])
404 { "\xff\xff\xff\xff\xff\xff" }));
407 char ifname[IF_NAMESIZE];
409 if (priv_get_ifname(priv, &ifname) == 0)
410 DEBUG("port %u ifname is \"%s\"",
413 DEBUG("port %u ifname is unknown", priv->port);
416 /* Get actual MTU if possible. */
417 priv_get_mtu(priv, &priv->mtu);
418 DEBUG("port %u MTU is %u", priv->port, priv->mtu);
420 /* from rte_ethdev.c */
422 char name[RTE_ETH_NAME_MAX_LEN];
424 snprintf(name, sizeof(name), "%s port %u",
425 ibv_get_device_name(ibv_dev), port);
426 eth_dev = rte_eth_dev_allocate(name, RTE_ETH_DEV_PCI);
428 if (eth_dev == NULL) {
429 ERROR("can not allocate rte ethdev");
434 eth_dev->data->dev_private = priv;
435 eth_dev->pci_dev = pci_dev;
436 eth_dev->driver = &mlx5_driver;
437 eth_dev->data->rx_mbuf_alloc_failed = 0;
438 eth_dev->data->mtu = ETHER_MTU;
441 eth_dev->dev_ops = &mlx5_dev_ops;
442 eth_dev->data->mac_addrs = priv->mac;
444 /* Bring Ethernet device up. */
445 DEBUG("forcing Ethernet interface up");
446 priv_set_flags(priv, ~IFF_UP, IFF_UP);
452 claim_zero(ibv_dealloc_pd(pd));
454 claim_zero(ibv_close_device(ctx));
459 * XXX if something went wrong in the loop above, there is a resource
460 * leak (ctx, pd, priv, dpdk ethdev) but we can do nothing about it as
461 * long as the dpdk does not provide a way to deallocate a ethdev and a
462 * way to enumerate the registered ethdevs to free the previous ones.
465 /* no port found, complain */
466 if (!mlx5_dev[idx].ports) {
473 claim_zero(ibv_close_device(attr_ctx));
475 ibv_free_device_list(list);
480 static const struct rte_pci_id mlx5_pci_id_map[] = {
482 .vendor_id = PCI_VENDOR_ID_MELLANOX,
483 .device_id = PCI_DEVICE_ID_MELLANOX_CONNECTX4,
484 .subsystem_vendor_id = PCI_ANY_ID,
485 .subsystem_device_id = PCI_ANY_ID
488 .vendor_id = PCI_VENDOR_ID_MELLANOX,
489 .device_id = PCI_DEVICE_ID_MELLANOX_CONNECTX4VF,
490 .subsystem_vendor_id = PCI_ANY_ID,
491 .subsystem_device_id = PCI_ANY_ID
494 .vendor_id = PCI_VENDOR_ID_MELLANOX,
495 .device_id = PCI_DEVICE_ID_MELLANOX_CONNECTX4LX,
496 .subsystem_vendor_id = PCI_ANY_ID,
497 .subsystem_device_id = PCI_ANY_ID
500 .vendor_id = PCI_VENDOR_ID_MELLANOX,
501 .device_id = PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF,
502 .subsystem_vendor_id = PCI_ANY_ID,
503 .subsystem_device_id = PCI_ANY_ID
510 static struct eth_driver mlx5_driver = {
512 .name = MLX5_DRIVER_NAME,
513 .id_table = mlx5_pci_id_map,
514 .devinit = mlx5_pci_devinit,
516 .dev_private_size = sizeof(struct priv)
520 * Driver initialization routine.
523 rte_mlx5_pmd_init(const char *name, const char *args)
528 * RDMAV_HUGEPAGES_SAFE tells ibv_fork_init() we intend to use
529 * huge pages. Calling ibv_fork_init() during init allows
530 * applications to use fork() safely for purposes other than
531 * using this PMD, which is not supported in forked processes.
533 setenv("RDMAV_HUGEPAGES_SAFE", "1", 1);
535 rte_eal_pci_register(&mlx5_driver.pci_drv);
539 static struct rte_driver rte_mlx5_driver = {
541 .name = MLX5_DRIVER_NAME,
542 .init = rte_mlx5_pmd_init,
545 PMD_REGISTER_DRIVER(rte_mlx5_driver)