1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2012 6WIND S.A.
3 * Copyright 2012 Mellanox Technologies, Ltd
8 * mlx4 driver initialization.
22 /* Verbs headers do not support -pedantic. */
24 #pragma GCC diagnostic ignored "-Wpedantic"
26 #include <infiniband/verbs.h>
28 #pragma GCC diagnostic error "-Wpedantic"
31 #include <rte_common.h>
32 #include <rte_config.h>
34 #include <rte_errno.h>
35 #include <rte_ethdev_driver.h>
36 #include <rte_ethdev_pci.h>
37 #include <rte_ether.h>
39 #include <rte_interrupts.h>
40 #include <rte_kvargs.h>
41 #include <rte_malloc.h>
45 #include "mlx4_glue.h"
46 #include "mlx4_flow.h"
48 #include "mlx4_rxtx.h"
49 #include "mlx4_utils.h"
51 struct mlx4_dev_list mlx4_mem_event_cb_list =
52 LIST_HEAD_INITIALIZER(mlx4_mem_event_cb_list);
54 rte_rwlock_t mlx4_mem_event_rwlock = RTE_RWLOCK_INITIALIZER;
56 /** Configuration structure for device arguments. */
59 uint32_t present; /**< Bit-field for existing ports. */
60 uint32_t enabled; /**< Bit-field for user-enabled ports. */
64 /* Available parameters list. */
65 const char *pmd_mlx4_init_params[] = {
70 static void mlx4_dev_stop(struct rte_eth_dev *dev);
73 * DPDK callback for Ethernet device configuration.
76 * Pointer to Ethernet device structure.
79 * 0 on success, negative errno value otherwise and rte_errno is set.
82 mlx4_dev_configure(struct rte_eth_dev *dev)
84 struct priv *priv = dev->data->dev_private;
85 struct rte_flow_error error;
88 /* Prepare internal flow rules. */
89 ret = mlx4_flow_sync(priv, &error);
91 ERROR("cannot set up internal flow rules (code %d, \"%s\"),"
92 " flow error type %d, cause %p, message: %s",
93 -ret, strerror(-ret), error.type, error.cause,
94 error.message ? error.message : "(unspecified)");
97 ret = mlx4_intr_install(priv);
99 ERROR("%p: interrupt handler installation failed",
102 * Once the device is added to the list of memory event callback, its
103 * global MR cache table cannot be expanded on the fly because of
104 * deadlock. If it overflows, lookup should be done by searching MR list
105 * linearly, which is slow.
107 if (mlx4_mr_btree_init(&priv->mr.cache, MLX4_MR_BTREE_CACHE_N * 2,
108 dev->device->numa_node)) {
109 /* rte_errno is already set. */
112 rte_rwlock_write_lock(&mlx4_mem_event_rwlock);
113 LIST_INSERT_HEAD(&mlx4_mem_event_cb_list, priv, mem_event_cb);
114 rte_rwlock_write_unlock(&mlx4_mem_event_rwlock);
120 * DPDK callback to start the device.
122 * Simulate device start by initializing common RSS resources and attaching
123 * all configured flows.
126 * Pointer to Ethernet device structure.
129 * 0 on success, negative errno value otherwise and rte_errno is set.
132 mlx4_dev_start(struct rte_eth_dev *dev)
134 struct priv *priv = dev->data->dev_private;
135 struct rte_flow_error error;
140 DEBUG("%p: attaching configured flows to all RX queues", (void *)dev);
142 ret = mlx4_rss_init(priv);
144 ERROR("%p: cannot initialize RSS resources: %s",
145 (void *)dev, strerror(-ret));
149 mlx4_mr_dump_dev(dev);
151 ret = mlx4_rxq_intr_enable(priv);
153 ERROR("%p: interrupt handler installation failed",
157 ret = mlx4_flow_sync(priv, &error);
159 ERROR("%p: cannot attach flow rules (code %d, \"%s\"),"
160 " flow error type %d, cause %p, message: %s",
162 -ret, strerror(-ret), error.type, error.cause,
163 error.message ? error.message : "(unspecified)");
167 dev->tx_pkt_burst = mlx4_tx_burst;
168 dev->rx_pkt_burst = mlx4_rx_burst;
176 * DPDK callback to stop the device.
178 * Simulate device stop by detaching all configured flows.
181 * Pointer to Ethernet device structure.
184 mlx4_dev_stop(struct rte_eth_dev *dev)
186 struct priv *priv = dev->data->dev_private;
190 DEBUG("%p: detaching flows from all RX queues", (void *)dev);
192 dev->tx_pkt_burst = mlx4_tx_burst_removed;
193 dev->rx_pkt_burst = mlx4_rx_burst_removed;
195 mlx4_flow_sync(priv, NULL);
196 mlx4_rxq_intr_disable(priv);
197 mlx4_rss_deinit(priv);
201 * DPDK callback to close the device.
203 * Destroy all queues and objects, free memory.
206 * Pointer to Ethernet device structure.
209 mlx4_dev_close(struct rte_eth_dev *dev)
211 struct priv *priv = dev->data->dev_private;
214 DEBUG("%p: closing device \"%s\"",
216 ((priv->ctx != NULL) ? priv->ctx->device->name : ""));
217 dev->rx_pkt_burst = mlx4_rx_burst_removed;
218 dev->tx_pkt_burst = mlx4_tx_burst_removed;
220 mlx4_flow_clean(priv);
221 mlx4_rss_deinit(priv);
222 for (i = 0; i != dev->data->nb_rx_queues; ++i)
223 mlx4_rx_queue_release(dev->data->rx_queues[i]);
224 for (i = 0; i != dev->data->nb_tx_queues; ++i)
225 mlx4_tx_queue_release(dev->data->tx_queues[i]);
226 mlx4_mr_release(dev);
227 if (priv->pd != NULL) {
228 assert(priv->ctx != NULL);
229 claim_zero(mlx4_glue->dealloc_pd(priv->pd));
230 claim_zero(mlx4_glue->close_device(priv->ctx));
232 assert(priv->ctx == NULL);
233 mlx4_intr_uninstall(priv);
234 memset(priv, 0, sizeof(*priv));
237 static const struct eth_dev_ops mlx4_dev_ops = {
238 .dev_configure = mlx4_dev_configure,
239 .dev_start = mlx4_dev_start,
240 .dev_stop = mlx4_dev_stop,
241 .dev_set_link_down = mlx4_dev_set_link_down,
242 .dev_set_link_up = mlx4_dev_set_link_up,
243 .dev_close = mlx4_dev_close,
244 .link_update = mlx4_link_update,
245 .promiscuous_enable = mlx4_promiscuous_enable,
246 .promiscuous_disable = mlx4_promiscuous_disable,
247 .allmulticast_enable = mlx4_allmulticast_enable,
248 .allmulticast_disable = mlx4_allmulticast_disable,
249 .mac_addr_remove = mlx4_mac_addr_remove,
250 .mac_addr_add = mlx4_mac_addr_add,
251 .mac_addr_set = mlx4_mac_addr_set,
252 .stats_get = mlx4_stats_get,
253 .stats_reset = mlx4_stats_reset,
254 .dev_infos_get = mlx4_dev_infos_get,
255 .dev_supported_ptypes_get = mlx4_dev_supported_ptypes_get,
256 .vlan_filter_set = mlx4_vlan_filter_set,
257 .rx_queue_setup = mlx4_rx_queue_setup,
258 .tx_queue_setup = mlx4_tx_queue_setup,
259 .rx_queue_release = mlx4_rx_queue_release,
260 .tx_queue_release = mlx4_tx_queue_release,
261 .flow_ctrl_get = mlx4_flow_ctrl_get,
262 .flow_ctrl_set = mlx4_flow_ctrl_set,
263 .mtu_set = mlx4_mtu_set,
264 .filter_ctrl = mlx4_filter_ctrl,
265 .rx_queue_intr_enable = mlx4_rx_intr_enable,
266 .rx_queue_intr_disable = mlx4_rx_intr_disable,
267 .is_removed = mlx4_is_removed,
271 * Get PCI information from struct ibv_device.
274 * Pointer to Ethernet device structure.
275 * @param[out] pci_addr
276 * PCI bus address output buffer.
279 * 0 on success, negative errno value otherwise and rte_errno is set.
282 mlx4_ibv_device_to_pci_addr(const struct ibv_device *device,
283 struct rte_pci_addr *pci_addr)
287 MKSTR(path, "%s/device/uevent", device->ibdev_path);
289 file = fopen(path, "rb");
294 while (fgets(line, sizeof(line), file) == line) {
295 size_t len = strlen(line);
298 /* Truncate long lines. */
299 if (len == (sizeof(line) - 1))
300 while (line[(len - 1)] != '\n') {
304 line[(len - 1)] = ret;
306 /* Extract information. */
309 "%" SCNx32 ":%" SCNx8 ":%" SCNx8 ".%" SCNx8 "\n",
313 &pci_addr->function) == 4) {
323 * Verify and store value for device argument.
326 * Key argument to verify.
328 * Value associated with key.
329 * @param[in, out] conf
330 * Shared configuration data.
333 * 0 on success, negative errno value otherwise and rte_errno is set.
336 mlx4_arg_parse(const char *key, const char *val, struct mlx4_conf *conf)
341 tmp = strtoul(val, NULL, 0);
344 WARN("%s: \"%s\" is not a valid integer", key, val);
347 if (strcmp(MLX4_PMD_PORT_KVARG, key) == 0) {
348 uint32_t ports = rte_log2_u32(conf->ports.present + 1);
351 ERROR("port index %lu outside range [0,%" PRIu32 ")",
355 if (!(conf->ports.present & (1 << tmp))) {
357 ERROR("invalid port index %lu", tmp);
360 conf->ports.enabled |= 1 << tmp;
363 WARN("%s: unknown parameter", key);
370 * Parse device parameters.
373 * Device arguments structure.
376 * 0 on success, negative errno value otherwise and rte_errno is set.
379 mlx4_args(struct rte_devargs *devargs, struct mlx4_conf *conf)
381 struct rte_kvargs *kvlist;
382 unsigned int arg_count;
388 kvlist = rte_kvargs_parse(devargs->args, pmd_mlx4_init_params);
389 if (kvlist == NULL) {
391 ERROR("failed to parse kvargs");
394 /* Process parameters. */
395 for (i = 0; pmd_mlx4_init_params[i]; ++i) {
396 arg_count = rte_kvargs_count(kvlist, MLX4_PMD_PORT_KVARG);
397 while (arg_count-- > 0) {
398 ret = rte_kvargs_process(kvlist,
400 (int (*)(const char *,
410 rte_kvargs_free(kvlist);
415 * Interpret RSS capabilities reported by device.
417 * This function returns the set of usable Verbs RSS hash fields, kernel
418 * quirks taken into account.
423 * Verbs protection domain.
424 * @param device_attr_ex
425 * Extended device attributes to interpret.
428 * Usable RSS hash fields mask in Verbs format.
431 mlx4_hw_rss_sup(struct ibv_context *ctx, struct ibv_pd *pd,
432 struct ibv_device_attr_ex *device_attr_ex)
434 uint64_t hw_rss_sup = device_attr_ex->rss_caps.rx_hash_fields_mask;
435 struct ibv_cq *cq = NULL;
436 struct ibv_wq *wq = NULL;
437 struct ibv_rwq_ind_table *ind = NULL;
438 struct ibv_qp *qp = NULL;
441 WARN("no RSS capabilities reported; disabling support for UDP"
442 " RSS and inner VXLAN RSS");
443 return IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4 |
444 IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6 |
445 IBV_RX_HASH_SRC_PORT_TCP | IBV_RX_HASH_DST_PORT_TCP;
447 if (!(hw_rss_sup & IBV_RX_HASH_INNER))
450 * Although reported as supported, missing code in some Linux
451 * versions (v4.15, v4.16) prevents the creation of hash QPs with
454 * There is no choice but to attempt to instantiate a temporary RSS
455 * context in order to confirm its support.
457 cq = mlx4_glue->create_cq(ctx, 1, NULL, NULL, 0);
458 wq = cq ? mlx4_glue->create_wq
460 &(struct ibv_wq_init_attr){
461 .wq_type = IBV_WQT_RQ,
467 ind = wq ? mlx4_glue->create_rwq_ind_table
469 &(struct ibv_rwq_ind_table_init_attr){
470 .log_ind_tbl_size = 0,
474 qp = ind ? mlx4_glue->create_qp_ex
476 &(struct ibv_qp_init_attr_ex){
478 (IBV_QP_INIT_ATTR_PD |
479 IBV_QP_INIT_ATTR_RX_HASH |
480 IBV_QP_INIT_ATTR_IND_TABLE),
481 .qp_type = IBV_QPT_RAW_PACKET,
485 .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,
486 .rx_hash_key_len = MLX4_RSS_HASH_KEY_SIZE,
487 .rx_hash_key = mlx4_rss_hash_key_default,
488 .rx_hash_fields_mask = hw_rss_sup,
492 WARN("disabling unusable inner RSS capability due to kernel"
494 hw_rss_sup &= ~IBV_RX_HASH_INNER;
496 claim_zero(mlx4_glue->destroy_qp(qp));
499 claim_zero(mlx4_glue->destroy_rwq_ind_table(ind));
501 claim_zero(mlx4_glue->destroy_wq(wq));
503 claim_zero(mlx4_glue->destroy_cq(cq));
507 static struct rte_pci_driver mlx4_driver;
510 * DPDK callback to register a PCI device.
512 * This function creates an Ethernet device for each port of a given
516 * PCI driver structure (mlx4_driver).
518 * PCI device information.
521 * 0 on success, negative errno value otherwise and rte_errno is set.
524 mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
526 struct ibv_device **list;
527 struct ibv_device *ibv_dev;
529 struct ibv_context *attr_ctx = NULL;
530 struct ibv_device_attr device_attr;
531 struct ibv_device_attr_ex device_attr_ex;
532 struct mlx4_conf conf = {
539 assert(pci_drv == &mlx4_driver);
540 list = mlx4_glue->get_device_list(&i);
544 if (rte_errno == ENOSYS)
545 ERROR("cannot list devices, is ib_uverbs loaded?");
550 * For each listed device, check related sysfs entry against
551 * the provided PCI ID.
554 struct rte_pci_addr pci_addr;
557 DEBUG("checking device \"%s\"", list[i]->name);
558 if (mlx4_ibv_device_to_pci_addr(list[i], &pci_addr))
560 if ((pci_dev->addr.domain != pci_addr.domain) ||
561 (pci_dev->addr.bus != pci_addr.bus) ||
562 (pci_dev->addr.devid != pci_addr.devid) ||
563 (pci_dev->addr.function != pci_addr.function))
565 vf = (pci_dev->id.device_id ==
566 PCI_DEVICE_ID_MELLANOX_CONNECTX3VF);
567 INFO("PCI information matches, using device \"%s\" (VF: %s)",
568 list[i]->name, (vf ? "true" : "false"));
569 attr_ctx = mlx4_glue->open_device(list[i]);
573 if (attr_ctx == NULL) {
574 mlx4_glue->free_device_list(list);
578 ERROR("cannot access device, is mlx4_ib loaded?");
582 ERROR("cannot use device, are drivers up to date?");
590 DEBUG("device opened");
591 if (mlx4_glue->query_device(attr_ctx, &device_attr)) {
595 INFO("%u port(s) detected", device_attr.phys_port_cnt);
596 conf.ports.present |= (UINT64_C(1) << device_attr.phys_port_cnt) - 1;
597 if (mlx4_args(pci_dev->device.devargs, &conf)) {
598 ERROR("failed to process device arguments");
602 /* Use all ports when none are defined */
603 if (!conf.ports.enabled)
604 conf.ports.enabled = conf.ports.present;
605 /* Retrieve extended device attributes. */
606 if (mlx4_glue->query_device_ex(attr_ctx, NULL, &device_attr_ex)) {
610 assert(device_attr.max_sge >= MLX4_MAX_SGE);
611 for (i = 0; i < device_attr.phys_port_cnt; i++) {
612 uint32_t port = i + 1; /* ports are indexed from one */
613 struct ibv_context *ctx = NULL;
614 struct ibv_port_attr port_attr;
615 struct ibv_pd *pd = NULL;
616 struct priv *priv = NULL;
617 struct rte_eth_dev *eth_dev = NULL;
618 struct ether_addr mac;
620 /* If port is not enabled, skip. */
621 if (!(conf.ports.enabled & (1 << i)))
623 DEBUG("using port %u", port);
624 ctx = mlx4_glue->open_device(ibv_dev);
629 /* Check port status. */
630 err = mlx4_glue->query_port(ctx, port, &port_attr);
633 ERROR("port query failed: %s", strerror(rte_errno));
636 if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) {
638 ERROR("port %d is not configured in Ethernet mode",
642 if (port_attr.state != IBV_PORT_ACTIVE)
643 DEBUG("port %d is not active: \"%s\" (%d)",
644 port, mlx4_glue->port_state_str(port_attr.state),
646 /* Make asynchronous FD non-blocking to handle interrupts. */
647 if (mlx4_fd_set_non_blocking(ctx->async_fd) < 0) {
648 ERROR("cannot make asynchronous FD non-blocking: %s",
649 strerror(rte_errno));
652 /* Allocate protection domain. */
653 pd = mlx4_glue->alloc_pd(ctx);
656 ERROR("PD allocation failure");
659 /* from rte_ethdev.c */
660 priv = rte_zmalloc("ethdev private structure",
662 RTE_CACHE_LINE_SIZE);
665 ERROR("priv allocation failure");
669 priv->device_attr = device_attr;
672 priv->mtu = ETHER_MTU;
674 priv->hw_csum = !!(device_attr.device_cap_flags &
675 IBV_DEVICE_RAW_IP_CSUM);
676 DEBUG("checksum offloading is %ssupported",
677 (priv->hw_csum ? "" : "not "));
678 /* Only ConnectX-3 Pro supports tunneling. */
679 priv->hw_csum_l2tun =
681 (device_attr.vendor_part_id ==
682 PCI_DEVICE_ID_MELLANOX_CONNECTX3PRO);
683 DEBUG("L2 tunnel checksum offloads are %ssupported",
684 priv->hw_csum_l2tun ? "" : "not ");
685 priv->hw_rss_sup = mlx4_hw_rss_sup(priv->ctx, priv->pd,
687 DEBUG("supported RSS hash fields mask: %016" PRIx64,
689 priv->hw_fcs_strip = !!(device_attr_ex.raw_packet_caps &
690 IBV_RAW_PACKET_CAP_SCATTER_FCS);
691 DEBUG("FCS stripping toggling is %ssupported",
692 priv->hw_fcs_strip ? "" : "not ");
693 /* Configure the first MAC address by default. */
694 if (mlx4_get_mac(priv, &mac.addr_bytes)) {
695 ERROR("cannot get MAC address, is mlx4_en loaded?"
696 " (rte_errno: %s)", strerror(rte_errno));
699 INFO("port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x",
701 mac.addr_bytes[0], mac.addr_bytes[1],
702 mac.addr_bytes[2], mac.addr_bytes[3],
703 mac.addr_bytes[4], mac.addr_bytes[5]);
704 /* Register MAC address. */
708 char ifname[IF_NAMESIZE];
710 if (mlx4_get_ifname(priv, &ifname) == 0)
711 DEBUG("port %u ifname is \"%s\"",
714 DEBUG("port %u ifname is unknown", priv->port);
717 /* Get actual MTU if possible. */
718 mlx4_mtu_get(priv, &priv->mtu);
719 DEBUG("port %u MTU is %u", priv->port, priv->mtu);
720 /* from rte_ethdev.c */
722 char name[RTE_ETH_NAME_MAX_LEN];
724 snprintf(name, sizeof(name), "%s port %u",
725 mlx4_glue->get_device_name(ibv_dev), port);
726 eth_dev = rte_eth_dev_allocate(name);
728 if (eth_dev == NULL) {
729 ERROR("can not allocate rte ethdev");
733 eth_dev->data->dev_private = priv;
734 eth_dev->data->mac_addrs = priv->mac;
735 eth_dev->device = &pci_dev->device;
736 rte_eth_copy_pci_info(eth_dev, pci_dev);
737 eth_dev->device->driver = &mlx4_driver.driver;
738 /* Initialize local interrupt handle for current port. */
739 priv->intr_handle = (struct rte_intr_handle){
741 .type = RTE_INTR_HANDLE_EXT,
744 * Override ethdev interrupt handle pointer with private
745 * handle instead of that of the parent PCI device used by
746 * default. This prevents it from being shared between all
747 * ports of the same PCI device since each of them is
748 * associated its own Verbs context.
750 * Rx interrupts in particular require this as the PMD has
751 * no control over the registration of queue interrupts
752 * besides setting up eth_dev->intr_handle, the rest is
753 * handled by rte_intr_rx_ctl().
755 eth_dev->intr_handle = &priv->intr_handle;
757 eth_dev->dev_ops = &mlx4_dev_ops;
758 /* Bring Ethernet device up. */
759 DEBUG("forcing Ethernet interface up");
760 mlx4_dev_set_link_up(priv->dev);
761 /* Update link status once if waiting for LSC. */
762 if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
763 mlx4_link_update(eth_dev, 0);
764 rte_eth_dev_probing_finish(eth_dev);
769 claim_zero(mlx4_glue->dealloc_pd(pd));
771 claim_zero(mlx4_glue->close_device(ctx));
773 rte_eth_dev_release_port(eth_dev);
776 if (i == device_attr.phys_port_cnt)
779 * XXX if something went wrong in the loop above, there is a resource
780 * leak (ctx, pd, priv, dpdk ethdev) but we can do nothing about it as
781 * long as the dpdk does not provide a way to deallocate a ethdev and a
782 * way to enumerate the registered ethdevs to free the previous ones.
786 claim_zero(mlx4_glue->close_device(attr_ctx));
788 mlx4_glue->free_device_list(list);
789 assert(rte_errno >= 0);
793 static const struct rte_pci_id mlx4_pci_id_map[] = {
795 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
796 PCI_DEVICE_ID_MELLANOX_CONNECTX3)
799 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
800 PCI_DEVICE_ID_MELLANOX_CONNECTX3PRO)
803 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
804 PCI_DEVICE_ID_MELLANOX_CONNECTX3VF)
811 static struct rte_pci_driver mlx4_driver = {
813 .name = MLX4_DRIVER_NAME
815 .id_table = mlx4_pci_id_map,
816 .probe = mlx4_pci_probe,
817 .drv_flags = RTE_PCI_DRV_INTR_LSC |
818 RTE_PCI_DRV_INTR_RMV,
821 #ifdef RTE_LIBRTE_MLX4_DLOPEN_DEPS
824 * Suffix RTE_EAL_PMD_PATH with "-glue".
826 * This function performs a sanity check on RTE_EAL_PMD_PATH before
827 * suffixing its last component.
830 * Output buffer, should be large enough otherwise NULL is returned.
835 * Pointer to @p buf or @p NULL in case suffix cannot be appended.
838 mlx4_glue_path(char *buf, size_t size)
840 static const char *const bad[] = { "/", ".", "..", NULL };
841 const char *path = RTE_EAL_PMD_PATH;
842 size_t len = strlen(path);
846 while (len && path[len - 1] == '/')
848 for (off = len; off && path[off - 1] != '/'; --off)
850 for (i = 0; bad[i]; ++i)
851 if (!strncmp(path + off, bad[i], (int)(len - off)))
853 i = snprintf(buf, size, "%.*s-glue", (int)len, path);
854 if (i == -1 || (size_t)i >= size)
858 ERROR("unable to append \"-glue\" to last component of"
859 " RTE_EAL_PMD_PATH (\"" RTE_EAL_PMD_PATH "\"),"
860 " please re-configure DPDK");
865 * Initialization routine for run-time dependency on rdma-core.
870 char glue_path[sizeof(RTE_EAL_PMD_PATH) - 1 + sizeof("-glue")];
871 const char *path[] = {
873 * A basic security check is necessary before trusting
874 * MLX4_GLUE_PATH, which may override RTE_EAL_PMD_PATH.
876 (geteuid() == getuid() && getegid() == getgid() ?
877 getenv("MLX4_GLUE_PATH") : NULL),
879 * When RTE_EAL_PMD_PATH is set, use its glue-suffixed
880 * variant, otherwise let dlopen() look up libraries on its
884 mlx4_glue_path(glue_path, sizeof(glue_path)) : ""),
891 while (!handle && i != RTE_DIM(path)) {
900 end = strpbrk(path[i], ":;");
902 end = path[i] + strlen(path[i]);
908 ret = snprintf(name, sizeof(name), "%.*s%s" MLX4_GLUE,
910 (!len || *(end - 1) == '/') ? "" : "/");
913 if (sizeof(name) != (size_t)ret + 1)
915 DEBUG("looking for rdma-core glue as \"%s\"", name);
916 handle = dlopen(name, RTLD_LAZY);
927 WARN("cannot load glue library: %s", dlmsg);
930 sym = dlsym(handle, "mlx4_glue");
935 ERROR("cannot resolve glue symbol: %s", dlmsg);
943 WARN("cannot initialize PMD due to missing run-time"
944 " dependency on rdma-core libraries (libibverbs,"
952 * Driver initialization routine.
954 RTE_INIT(rte_mlx4_pmd_init);
956 rte_mlx4_pmd_init(void)
959 * MLX4_DEVICE_FATAL_CLEANUP tells ibv_destroy functions we
960 * want to get success errno value in case of calling them
961 * when the device was removed.
963 setenv("MLX4_DEVICE_FATAL_CLEANUP", "1", 1);
965 * RDMAV_HUGEPAGES_SAFE tells ibv_fork_init() we intend to use
966 * huge pages. Calling ibv_fork_init() during init allows
967 * applications to use fork() safely for purposes other than
968 * using this PMD, which is not supported in forked processes.
970 setenv("RDMAV_HUGEPAGES_SAFE", "1", 1);
971 #ifdef RTE_LIBRTE_MLX4_DLOPEN_DEPS
972 if (mlx4_glue_init())
977 /* Glue structure must not contain any NULL pointers. */
981 for (i = 0; i != sizeof(*mlx4_glue) / sizeof(void *); ++i)
982 assert(((const void *const *)mlx4_glue)[i]);
985 if (strcmp(mlx4_glue->version, MLX4_GLUE_VERSION)) {
986 ERROR("rdma-core glue \"%s\" mismatch: \"%s\" is required",
987 mlx4_glue->version, MLX4_GLUE_VERSION);
990 mlx4_glue->fork_init();
991 rte_pci_register(&mlx4_driver);
992 rte_mem_event_callback_register("MLX4_MEM_EVENT_CB",
993 mlx4_mr_mem_event_cb, NULL);
996 RTE_PMD_EXPORT_NAME(net_mlx4, __COUNTER__);
997 RTE_PMD_REGISTER_PCI_TABLE(net_mlx4, mlx4_pci_id_map);
998 RTE_PMD_REGISTER_KMOD_DEP(net_mlx4,
999 "* ib_uverbs & mlx4_en & mlx4_core & mlx4_ib");