4 * Copyright 2012 6WIND S.A.
5 * Copyright 2012 Mellanox
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * mlx4 driver initialization.
48 /* Verbs headers do not support -pedantic. */
50 #pragma GCC diagnostic ignored "-Wpedantic"
52 #include <infiniband/verbs.h>
54 #pragma GCC diagnostic error "-Wpedantic"
57 #include <rte_common.h>
59 #include <rte_errno.h>
60 #include <rte_ethdev.h>
61 #include <rte_ethdev_pci.h>
62 #include <rte_ether.h>
63 #include <rte_interrupts.h>
64 #include <rte_kvargs.h>
65 #include <rte_malloc.h>
69 #include "mlx4_flow.h"
70 #include "mlx4_rxtx.h"
71 #include "mlx4_utils.h"
73 /** Configuration structure for device arguments. */
76 uint32_t present; /**< Bit-field for existing ports. */
77 uint32_t enabled; /**< Bit-field for user-enabled ports. */
81 /* Available parameters list. */
82 const char *pmd_mlx4_init_params[] = {
88 * DPDK callback for Ethernet device configuration.
91 * Pointer to Ethernet device structure.
94 * 0 on success, negative errno value otherwise and rte_errno is set.
97 mlx4_dev_configure(struct rte_eth_dev *dev)
104 * DPDK callback to start the device.
106 * Simulate device start by attaching all configured flows.
109 * Pointer to Ethernet device structure.
112 * 0 on success, negative errno value otherwise and rte_errno is set.
115 mlx4_dev_start(struct rte_eth_dev *dev)
117 struct priv *priv = dev->data->dev_private;
122 DEBUG("%p: attaching configured flows to all RX queues", (void *)dev);
124 ret = mlx4_mac_addr_add(priv);
127 ret = mlx4_intr_install(priv);
129 ERROR("%p: interrupt handler installation failed",
133 ret = mlx4_flow_start(priv);
135 ERROR("%p: flow start failed: %s",
136 (void *)dev, strerror(ret));
142 mlx4_mac_addr_del(priv);
148 * DPDK callback to stop the device.
150 * Simulate device stop by detaching all configured flows.
153 * Pointer to Ethernet device structure.
156 mlx4_dev_stop(struct rte_eth_dev *dev)
158 struct priv *priv = dev->data->dev_private;
162 DEBUG("%p: detaching flows from all RX queues", (void *)dev);
164 mlx4_flow_stop(priv);
165 mlx4_intr_uninstall(priv);
166 mlx4_mac_addr_del(priv);
170 * DPDK callback to close the device.
172 * Destroy all queues and objects, free memory.
175 * Pointer to Ethernet device structure.
178 mlx4_dev_close(struct rte_eth_dev *dev)
180 struct priv *priv = dev->data->dev_private;
185 DEBUG("%p: closing device \"%s\"",
187 ((priv->ctx != NULL) ? priv->ctx->device->name : ""));
188 mlx4_mac_addr_del(priv);
189 dev->rx_pkt_burst = mlx4_rx_burst_removed;
190 dev->tx_pkt_burst = mlx4_tx_burst_removed;
191 for (i = 0; i != dev->data->nb_rx_queues; ++i)
192 mlx4_rx_queue_release(dev->data->rx_queues[i]);
193 for (i = 0; i != dev->data->nb_tx_queues; ++i)
194 mlx4_tx_queue_release(dev->data->tx_queues[i]);
195 if (priv->pd != NULL) {
196 assert(priv->ctx != NULL);
197 claim_zero(ibv_dealloc_pd(priv->pd));
198 claim_zero(ibv_close_device(priv->ctx));
200 assert(priv->ctx == NULL);
201 mlx4_intr_uninstall(priv);
202 memset(priv, 0, sizeof(*priv));
205 static const struct eth_dev_ops mlx4_dev_ops = {
206 .dev_configure = mlx4_dev_configure,
207 .dev_start = mlx4_dev_start,
208 .dev_stop = mlx4_dev_stop,
209 .dev_set_link_down = mlx4_dev_set_link_down,
210 .dev_set_link_up = mlx4_dev_set_link_up,
211 .dev_close = mlx4_dev_close,
212 .link_update = mlx4_link_update,
213 .stats_get = mlx4_stats_get,
214 .stats_reset = mlx4_stats_reset,
215 .dev_infos_get = mlx4_dev_infos_get,
216 .rx_queue_setup = mlx4_rx_queue_setup,
217 .tx_queue_setup = mlx4_tx_queue_setup,
218 .rx_queue_release = mlx4_rx_queue_release,
219 .tx_queue_release = mlx4_tx_queue_release,
220 .flow_ctrl_get = mlx4_flow_ctrl_get,
221 .flow_ctrl_set = mlx4_flow_ctrl_set,
222 .mtu_set = mlx4_mtu_set,
223 .filter_ctrl = mlx4_filter_ctrl,
224 .rx_queue_intr_enable = mlx4_rx_intr_enable,
225 .rx_queue_intr_disable = mlx4_rx_intr_disable,
229 * Get PCI information from struct ibv_device.
232 * Pointer to Ethernet device structure.
233 * @param[out] pci_addr
234 * PCI bus address output buffer.
237 * 0 on success, negative errno value otherwise and rte_errno is set.
240 mlx4_ibv_device_to_pci_addr(const struct ibv_device *device,
241 struct rte_pci_addr *pci_addr)
245 MKSTR(path, "%s/device/uevent", device->ibdev_path);
247 file = fopen(path, "rb");
252 while (fgets(line, sizeof(line), file) == line) {
253 size_t len = strlen(line);
256 /* Truncate long lines. */
257 if (len == (sizeof(line) - 1))
258 while (line[(len - 1)] != '\n') {
262 line[(len - 1)] = ret;
264 /* Extract information. */
267 "%" SCNx32 ":%" SCNx8 ":%" SCNx8 ".%" SCNx8 "\n",
271 &pci_addr->function) == 4) {
281 * Verify and store value for device argument.
284 * Key argument to verify.
286 * Value associated with key.
287 * @param[in, out] conf
288 * Shared configuration data.
291 * 0 on success, negative errno value otherwise and rte_errno is set.
294 mlx4_arg_parse(const char *key, const char *val, struct mlx4_conf *conf)
299 tmp = strtoul(val, NULL, 0);
302 WARN("%s: \"%s\" is not a valid integer", key, val);
305 if (strcmp(MLX4_PMD_PORT_KVARG, key) == 0) {
306 uint32_t ports = rte_log2_u32(conf->ports.present);
309 ERROR("port index %lu outside range [0,%" PRIu32 ")",
313 if (!(conf->ports.present & (1 << tmp))) {
315 ERROR("invalid port index %lu", tmp);
318 conf->ports.enabled |= 1 << tmp;
321 WARN("%s: unknown parameter", key);
328 * Parse device parameters.
331 * Device arguments structure.
334 * 0 on success, negative errno value otherwise and rte_errno is set.
337 mlx4_args(struct rte_devargs *devargs, struct mlx4_conf *conf)
339 struct rte_kvargs *kvlist;
340 unsigned int arg_count;
346 kvlist = rte_kvargs_parse(devargs->args, pmd_mlx4_init_params);
347 if (kvlist == NULL) {
349 ERROR("failed to parse kvargs");
352 /* Process parameters. */
353 for (i = 0; pmd_mlx4_init_params[i]; ++i) {
354 arg_count = rte_kvargs_count(kvlist, MLX4_PMD_PORT_KVARG);
355 while (arg_count-- > 0) {
356 ret = rte_kvargs_process(kvlist,
358 (int (*)(const char *,
368 rte_kvargs_free(kvlist);
372 static struct rte_pci_driver mlx4_driver;
375 * DPDK callback to register a PCI device.
377 * This function creates an Ethernet device for each port of a given
381 * PCI driver structure (mlx4_driver).
383 * PCI device information.
386 * 0 on success, negative errno value otherwise and rte_errno is set.
389 mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
391 struct ibv_device **list;
392 struct ibv_device *ibv_dev;
394 struct ibv_context *attr_ctx = NULL;
395 struct ibv_device_attr device_attr;
396 struct mlx4_conf conf = {
403 assert(pci_drv == &mlx4_driver);
404 list = ibv_get_device_list(&i);
408 if (rte_errno == ENOSYS)
409 ERROR("cannot list devices, is ib_uverbs loaded?");
414 * For each listed device, check related sysfs entry against
415 * the provided PCI ID.
418 struct rte_pci_addr pci_addr;
421 DEBUG("checking device \"%s\"", list[i]->name);
422 if (mlx4_ibv_device_to_pci_addr(list[i], &pci_addr))
424 if ((pci_dev->addr.domain != pci_addr.domain) ||
425 (pci_dev->addr.bus != pci_addr.bus) ||
426 (pci_dev->addr.devid != pci_addr.devid) ||
427 (pci_dev->addr.function != pci_addr.function))
429 vf = (pci_dev->id.device_id ==
430 PCI_DEVICE_ID_MELLANOX_CONNECTX3VF);
431 INFO("PCI information matches, using device \"%s\" (VF: %s)",
432 list[i]->name, (vf ? "true" : "false"));
433 attr_ctx = ibv_open_device(list[i]);
437 if (attr_ctx == NULL) {
438 ibv_free_device_list(list);
442 ERROR("cannot access device, is mlx4_ib loaded?");
446 ERROR("cannot use device, are drivers up to date?");
454 DEBUG("device opened");
455 if (ibv_query_device(attr_ctx, &device_attr)) {
459 INFO("%u port(s) detected", device_attr.phys_port_cnt);
460 conf.ports.present |= (UINT64_C(1) << device_attr.phys_port_cnt) - 1;
461 if (mlx4_args(pci_dev->device.devargs, &conf)) {
462 ERROR("failed to process device arguments");
466 /* Use all ports when none are defined */
467 if (!conf.ports.enabled)
468 conf.ports.enabled = conf.ports.present;
469 for (i = 0; i < device_attr.phys_port_cnt; i++) {
470 uint32_t port = i + 1; /* ports are indexed from one */
471 struct ibv_context *ctx = NULL;
472 struct ibv_port_attr port_attr;
473 struct ibv_pd *pd = NULL;
474 struct priv *priv = NULL;
475 struct rte_eth_dev *eth_dev = NULL;
476 struct ether_addr mac;
478 /* If port is not enabled, skip. */
479 if (!(conf.ports.enabled & (1 << i)))
481 DEBUG("using port %u", port);
482 ctx = ibv_open_device(ibv_dev);
487 /* Check port status. */
488 err = ibv_query_port(ctx, port, &port_attr);
491 ERROR("port query failed: %s", strerror(rte_errno));
494 if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) {
496 ERROR("port %d is not configured in Ethernet mode",
500 if (port_attr.state != IBV_PORT_ACTIVE)
501 DEBUG("port %d is not active: \"%s\" (%d)",
502 port, ibv_port_state_str(port_attr.state),
504 /* Make asynchronous FD non-blocking to handle interrupts. */
505 if (mlx4_fd_set_non_blocking(ctx->async_fd) < 0) {
506 ERROR("cannot make asynchronous FD non-blocking: %s",
507 strerror(rte_errno));
510 /* Allocate protection domain. */
511 pd = ibv_alloc_pd(ctx);
514 ERROR("PD allocation failure");
517 /* from rte_ethdev.c */
518 priv = rte_zmalloc("ethdev private structure",
520 RTE_CACHE_LINE_SIZE);
523 ERROR("priv allocation failure");
527 priv->device_attr = device_attr;
530 priv->mtu = ETHER_MTU;
532 /* Configure the first MAC address by default. */
533 if (mlx4_get_mac(priv, &mac.addr_bytes)) {
534 ERROR("cannot get MAC address, is mlx4_en loaded?"
535 " (rte_errno: %s)", strerror(rte_errno));
538 INFO("port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x",
540 mac.addr_bytes[0], mac.addr_bytes[1],
541 mac.addr_bytes[2], mac.addr_bytes[3],
542 mac.addr_bytes[4], mac.addr_bytes[5]);
543 /* Register MAC address. */
545 if (mlx4_mac_addr_add(priv))
549 char ifname[IF_NAMESIZE];
551 if (mlx4_get_ifname(priv, &ifname) == 0)
552 DEBUG("port %u ifname is \"%s\"",
555 DEBUG("port %u ifname is unknown", priv->port);
558 /* Get actual MTU if possible. */
559 mlx4_mtu_get(priv, &priv->mtu);
560 DEBUG("port %u MTU is %u", priv->port, priv->mtu);
561 /* from rte_ethdev.c */
563 char name[RTE_ETH_NAME_MAX_LEN];
565 snprintf(name, sizeof(name), "%s port %u",
566 ibv_get_device_name(ibv_dev), port);
567 eth_dev = rte_eth_dev_allocate(name);
569 if (eth_dev == NULL) {
570 ERROR("can not allocate rte ethdev");
574 eth_dev->data->dev_private = priv;
575 eth_dev->data->mac_addrs = &priv->mac;
576 eth_dev->device = &pci_dev->device;
577 rte_eth_copy_pci_info(eth_dev, pci_dev);
578 eth_dev->device->driver = &mlx4_driver.driver;
579 /* Initialize local interrupt handle for current port. */
580 priv->intr_handle = (struct rte_intr_handle){
582 .type = RTE_INTR_HANDLE_EXT,
585 * Override ethdev interrupt handle pointer with private
586 * handle instead of that of the parent PCI device used by
587 * default. This prevents it from being shared between all
588 * ports of the same PCI device since each of them is
589 * associated its own Verbs context.
591 * Rx interrupts in particular require this as the PMD has
592 * no control over the registration of queue interrupts
593 * besides setting up eth_dev->intr_handle, the rest is
594 * handled by rte_intr_rx_ctl().
596 eth_dev->intr_handle = &priv->intr_handle;
598 eth_dev->dev_ops = &mlx4_dev_ops;
599 eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
600 /* Bring Ethernet device up. */
601 DEBUG("forcing Ethernet interface up");
602 mlx4_dev_set_link_up(priv->dev);
603 /* Update link status once if waiting for LSC. */
604 if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
605 mlx4_link_update(eth_dev, 0);
610 claim_zero(ibv_dealloc_pd(pd));
612 claim_zero(ibv_close_device(ctx));
614 rte_eth_dev_release_port(eth_dev);
617 if (i == device_attr.phys_port_cnt)
620 * XXX if something went wrong in the loop above, there is a resource
621 * leak (ctx, pd, priv, dpdk ethdev) but we can do nothing about it as
622 * long as the dpdk does not provide a way to deallocate a ethdev and a
623 * way to enumerate the registered ethdevs to free the previous ones.
627 claim_zero(ibv_close_device(attr_ctx));
629 ibv_free_device_list(list);
630 assert(rte_errno >= 0);
634 static const struct rte_pci_id mlx4_pci_id_map[] = {
636 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
637 PCI_DEVICE_ID_MELLANOX_CONNECTX3)
640 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
641 PCI_DEVICE_ID_MELLANOX_CONNECTX3PRO)
644 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
645 PCI_DEVICE_ID_MELLANOX_CONNECTX3VF)
652 static struct rte_pci_driver mlx4_driver = {
654 .name = MLX4_DRIVER_NAME
656 .id_table = mlx4_pci_id_map,
657 .probe = mlx4_pci_probe,
658 .drv_flags = RTE_PCI_DRV_INTR_LSC |
659 RTE_PCI_DRV_INTR_RMV,
663 * Driver initialization routine.
665 RTE_INIT(rte_mlx4_pmd_init);
667 rte_mlx4_pmd_init(void)
670 * RDMAV_HUGEPAGES_SAFE tells ibv_fork_init() we intend to use
671 * huge pages. Calling ibv_fork_init() during init allows
672 * applications to use fork() safely for purposes other than
673 * using this PMD, which is not supported in forked processes.
675 setenv("RDMAV_HUGEPAGES_SAFE", "1", 1);
677 rte_pci_register(&mlx4_driver);
680 RTE_PMD_EXPORT_NAME(net_mlx4, __COUNTER__);
681 RTE_PMD_REGISTER_PCI_TABLE(net_mlx4, mlx4_pci_id_map);
682 RTE_PMD_REGISTER_KMOD_DEP(net_mlx4,
683 "* ib_uverbs & mlx4_en & mlx4_core & mlx4_ib");