1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2012 6WIND S.A.
3 * Copyright 2012 Mellanox Technologies, Ltd
8 * mlx4 driver initialization.
22 /* Verbs headers do not support -pedantic. */
24 #pragma GCC diagnostic ignored "-Wpedantic"
26 #include <infiniband/verbs.h>
28 #pragma GCC diagnostic error "-Wpedantic"
31 #include <rte_common.h>
32 #include <rte_config.h>
34 #include <rte_errno.h>
35 #include <rte_ethdev_driver.h>
36 #include <rte_ethdev_pci.h>
37 #include <rte_ether.h>
39 #include <rte_interrupts.h>
40 #include <rte_kvargs.h>
41 #include <rte_malloc.h>
45 #include "mlx4_glue.h"
46 #include "mlx4_flow.h"
48 #include "mlx4_rxtx.h"
49 #include "mlx4_utils.h"
51 struct mlx4_dev_list mlx4_mem_event_cb_list =
52 LIST_HEAD_INITIALIZER(mlx4_mem_event_cb_list);
54 rte_rwlock_t mlx4_mem_event_rwlock = RTE_RWLOCK_INITIALIZER;
56 /** Configuration structure for device arguments. */
59 uint32_t present; /**< Bit-field for existing ports. */
60 uint32_t enabled; /**< Bit-field for user-enabled ports. */
64 /* Available parameters list. */
65 const char *pmd_mlx4_init_params[] = {
70 static void mlx4_dev_stop(struct rte_eth_dev *dev);
72 #ifdef HAVE_IBV_MLX4_BUF_ALLOCATORS
74 * Verbs callback to allocate a memory. This function should allocate the space
75 * according to the size provided residing inside a huge page.
76 * Please note that all allocation must respect the alignment from libmlx4
77 * (i.e. currently sysconf(_SC_PAGESIZE)).
80 * The size in bytes of the memory to allocate.
82 * A pointer to the callback data.
85 * Allocated buffer, NULL otherwise and rte_errno is set.
88 mlx4_alloc_verbs_buf(size_t size, void *data)
90 struct mlx4_priv *priv = data;
92 size_t alignment = sysconf(_SC_PAGESIZE);
93 unsigned int socket = SOCKET_ID_ANY;
95 if (priv->verbs_alloc_ctx.type == MLX4_VERBS_ALLOC_TYPE_TX_QUEUE) {
96 const struct txq *txq = priv->verbs_alloc_ctx.obj;
99 } else if (priv->verbs_alloc_ctx.type ==
100 MLX4_VERBS_ALLOC_TYPE_RX_QUEUE) {
101 const struct rxq *rxq = priv->verbs_alloc_ctx.obj;
103 socket = rxq->socket;
105 assert(data != NULL);
106 ret = rte_malloc_socket(__func__, size, alignment, socket);
113 * Verbs callback to free a memory.
116 * A pointer to the memory to free.
118 * A pointer to the callback data.
121 mlx4_free_verbs_buf(void *ptr, void *data __rte_unused)
123 assert(data != NULL);
129 * DPDK callback for Ethernet device configuration.
132 * Pointer to Ethernet device structure.
135 * 0 on success, negative errno value otherwise and rte_errno is set.
138 mlx4_dev_configure(struct rte_eth_dev *dev)
140 struct mlx4_priv *priv = dev->data->dev_private;
141 struct rte_flow_error error;
144 /* Prepare internal flow rules. */
145 ret = mlx4_flow_sync(priv, &error);
147 ERROR("cannot set up internal flow rules (code %d, \"%s\"),"
148 " flow error type %d, cause %p, message: %s",
149 -ret, strerror(-ret), error.type, error.cause,
150 error.message ? error.message : "(unspecified)");
153 ret = mlx4_intr_install(priv);
155 ERROR("%p: interrupt handler installation failed",
162 * DPDK callback to start the device.
164 * Simulate device start by initializing common RSS resources and attaching
165 * all configured flows.
168 * Pointer to Ethernet device structure.
171 * 0 on success, negative errno value otherwise and rte_errno is set.
174 mlx4_dev_start(struct rte_eth_dev *dev)
176 struct mlx4_priv *priv = dev->data->dev_private;
177 struct rte_flow_error error;
182 DEBUG("%p: attaching configured flows to all RX queues", (void *)dev);
184 ret = mlx4_rss_init(priv);
186 ERROR("%p: cannot initialize RSS resources: %s",
187 (void *)dev, strerror(-ret));
191 mlx4_mr_dump_dev(dev);
193 ret = mlx4_rxq_intr_enable(priv);
195 ERROR("%p: interrupt handler installation failed",
199 ret = mlx4_flow_sync(priv, &error);
201 ERROR("%p: cannot attach flow rules (code %d, \"%s\"),"
202 " flow error type %d, cause %p, message: %s",
204 -ret, strerror(-ret), error.type, error.cause,
205 error.message ? error.message : "(unspecified)");
209 dev->tx_pkt_burst = mlx4_tx_burst;
210 dev->rx_pkt_burst = mlx4_rx_burst;
218 * DPDK callback to stop the device.
220 * Simulate device stop by detaching all configured flows.
223 * Pointer to Ethernet device structure.
226 mlx4_dev_stop(struct rte_eth_dev *dev)
228 struct mlx4_priv *priv = dev->data->dev_private;
232 DEBUG("%p: detaching flows from all RX queues", (void *)dev);
234 dev->tx_pkt_burst = mlx4_tx_burst_removed;
235 dev->rx_pkt_burst = mlx4_rx_burst_removed;
237 mlx4_flow_sync(priv, NULL);
238 mlx4_rxq_intr_disable(priv);
239 mlx4_rss_deinit(priv);
243 * DPDK callback to close the device.
245 * Destroy all queues and objects, free memory.
248 * Pointer to Ethernet device structure.
251 mlx4_dev_close(struct rte_eth_dev *dev)
253 struct mlx4_priv *priv = dev->data->dev_private;
256 DEBUG("%p: closing device \"%s\"",
258 ((priv->ctx != NULL) ? priv->ctx->device->name : ""));
259 dev->rx_pkt_burst = mlx4_rx_burst_removed;
260 dev->tx_pkt_burst = mlx4_tx_burst_removed;
262 mlx4_flow_clean(priv);
263 mlx4_rss_deinit(priv);
264 for (i = 0; i != dev->data->nb_rx_queues; ++i)
265 mlx4_rx_queue_release(dev->data->rx_queues[i]);
266 for (i = 0; i != dev->data->nb_tx_queues; ++i)
267 mlx4_tx_queue_release(dev->data->tx_queues[i]);
268 mlx4_mr_release(dev);
269 if (priv->pd != NULL) {
270 assert(priv->ctx != NULL);
271 claim_zero(mlx4_glue->dealloc_pd(priv->pd));
272 claim_zero(mlx4_glue->close_device(priv->ctx));
274 assert(priv->ctx == NULL);
275 mlx4_intr_uninstall(priv);
276 memset(priv, 0, sizeof(*priv));
279 static const struct eth_dev_ops mlx4_dev_ops = {
280 .dev_configure = mlx4_dev_configure,
281 .dev_start = mlx4_dev_start,
282 .dev_stop = mlx4_dev_stop,
283 .dev_set_link_down = mlx4_dev_set_link_down,
284 .dev_set_link_up = mlx4_dev_set_link_up,
285 .dev_close = mlx4_dev_close,
286 .link_update = mlx4_link_update,
287 .promiscuous_enable = mlx4_promiscuous_enable,
288 .promiscuous_disable = mlx4_promiscuous_disable,
289 .allmulticast_enable = mlx4_allmulticast_enable,
290 .allmulticast_disable = mlx4_allmulticast_disable,
291 .mac_addr_remove = mlx4_mac_addr_remove,
292 .mac_addr_add = mlx4_mac_addr_add,
293 .mac_addr_set = mlx4_mac_addr_set,
294 .stats_get = mlx4_stats_get,
295 .stats_reset = mlx4_stats_reset,
296 .fw_version_get = mlx4_fw_version_get,
297 .dev_infos_get = mlx4_dev_infos_get,
298 .dev_supported_ptypes_get = mlx4_dev_supported_ptypes_get,
299 .vlan_filter_set = mlx4_vlan_filter_set,
300 .rx_queue_setup = mlx4_rx_queue_setup,
301 .tx_queue_setup = mlx4_tx_queue_setup,
302 .rx_queue_release = mlx4_rx_queue_release,
303 .tx_queue_release = mlx4_tx_queue_release,
304 .flow_ctrl_get = mlx4_flow_ctrl_get,
305 .flow_ctrl_set = mlx4_flow_ctrl_set,
306 .mtu_set = mlx4_mtu_set,
307 .filter_ctrl = mlx4_filter_ctrl,
308 .rx_queue_intr_enable = mlx4_rx_intr_enable,
309 .rx_queue_intr_disable = mlx4_rx_intr_disable,
310 .is_removed = mlx4_is_removed,
314 * Get PCI information from struct ibv_device.
317 * Pointer to Ethernet device structure.
318 * @param[out] pci_addr
319 * PCI bus address output buffer.
322 * 0 on success, negative errno value otherwise and rte_errno is set.
325 mlx4_ibv_device_to_pci_addr(const struct ibv_device *device,
326 struct rte_pci_addr *pci_addr)
330 MKSTR(path, "%s/device/uevent", device->ibdev_path);
332 file = fopen(path, "rb");
337 while (fgets(line, sizeof(line), file) == line) {
338 size_t len = strlen(line);
341 /* Truncate long lines. */
342 if (len == (sizeof(line) - 1))
343 while (line[(len - 1)] != '\n') {
347 line[(len - 1)] = ret;
349 /* Extract information. */
352 "%" SCNx32 ":%" SCNx8 ":%" SCNx8 ".%" SCNx8 "\n",
356 &pci_addr->function) == 4) {
366 * Verify and store value for device argument.
369 * Key argument to verify.
371 * Value associated with key.
372 * @param[in, out] conf
373 * Shared configuration data.
376 * 0 on success, negative errno value otherwise and rte_errno is set.
379 mlx4_arg_parse(const char *key, const char *val, struct mlx4_conf *conf)
384 tmp = strtoul(val, NULL, 0);
387 WARN("%s: \"%s\" is not a valid integer", key, val);
390 if (strcmp(MLX4_PMD_PORT_KVARG, key) == 0) {
391 uint32_t ports = rte_log2_u32(conf->ports.present + 1);
394 ERROR("port index %lu outside range [0,%" PRIu32 ")",
398 if (!(conf->ports.present & (1 << tmp))) {
400 ERROR("invalid port index %lu", tmp);
403 conf->ports.enabled |= 1 << tmp;
406 WARN("%s: unknown parameter", key);
413 * Parse device parameters.
416 * Device arguments structure.
419 * 0 on success, negative errno value otherwise and rte_errno is set.
422 mlx4_args(struct rte_devargs *devargs, struct mlx4_conf *conf)
424 struct rte_kvargs *kvlist;
425 unsigned int arg_count;
431 kvlist = rte_kvargs_parse(devargs->args, pmd_mlx4_init_params);
432 if (kvlist == NULL) {
434 ERROR("failed to parse kvargs");
437 /* Process parameters. */
438 for (i = 0; pmd_mlx4_init_params[i]; ++i) {
439 arg_count = rte_kvargs_count(kvlist, MLX4_PMD_PORT_KVARG);
440 while (arg_count-- > 0) {
441 ret = rte_kvargs_process(kvlist,
443 (int (*)(const char *,
453 rte_kvargs_free(kvlist);
458 * Interpret RSS capabilities reported by device.
460 * This function returns the set of usable Verbs RSS hash fields, kernel
461 * quirks taken into account.
466 * Verbs protection domain.
467 * @param device_attr_ex
468 * Extended device attributes to interpret.
471 * Usable RSS hash fields mask in Verbs format.
474 mlx4_hw_rss_sup(struct ibv_context *ctx, struct ibv_pd *pd,
475 struct ibv_device_attr_ex *device_attr_ex)
477 uint64_t hw_rss_sup = device_attr_ex->rss_caps.rx_hash_fields_mask;
478 struct ibv_cq *cq = NULL;
479 struct ibv_wq *wq = NULL;
480 struct ibv_rwq_ind_table *ind = NULL;
481 struct ibv_qp *qp = NULL;
484 WARN("no RSS capabilities reported; disabling support for UDP"
485 " RSS and inner VXLAN RSS");
486 return IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4 |
487 IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6 |
488 IBV_RX_HASH_SRC_PORT_TCP | IBV_RX_HASH_DST_PORT_TCP;
490 if (!(hw_rss_sup & IBV_RX_HASH_INNER))
493 * Although reported as supported, missing code in some Linux
494 * versions (v4.15, v4.16) prevents the creation of hash QPs with
497 * There is no choice but to attempt to instantiate a temporary RSS
498 * context in order to confirm its support.
500 cq = mlx4_glue->create_cq(ctx, 1, NULL, NULL, 0);
501 wq = cq ? mlx4_glue->create_wq
503 &(struct ibv_wq_init_attr){
504 .wq_type = IBV_WQT_RQ,
510 ind = wq ? mlx4_glue->create_rwq_ind_table
512 &(struct ibv_rwq_ind_table_init_attr){
513 .log_ind_tbl_size = 0,
517 qp = ind ? mlx4_glue->create_qp_ex
519 &(struct ibv_qp_init_attr_ex){
521 (IBV_QP_INIT_ATTR_PD |
522 IBV_QP_INIT_ATTR_RX_HASH |
523 IBV_QP_INIT_ATTR_IND_TABLE),
524 .qp_type = IBV_QPT_RAW_PACKET,
528 .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,
529 .rx_hash_key_len = MLX4_RSS_HASH_KEY_SIZE,
530 .rx_hash_key = mlx4_rss_hash_key_default,
531 .rx_hash_fields_mask = hw_rss_sup,
535 WARN("disabling unusable inner RSS capability due to kernel"
537 hw_rss_sup &= ~IBV_RX_HASH_INNER;
539 claim_zero(mlx4_glue->destroy_qp(qp));
542 claim_zero(mlx4_glue->destroy_rwq_ind_table(ind));
544 claim_zero(mlx4_glue->destroy_wq(wq));
546 claim_zero(mlx4_glue->destroy_cq(cq));
550 static struct rte_pci_driver mlx4_driver;
553 * DPDK callback to register a PCI device.
555 * This function creates an Ethernet device for each port of a given
559 * PCI driver structure (mlx4_driver).
561 * PCI device information.
564 * 0 on success, negative errno value otherwise and rte_errno is set.
567 mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
569 struct ibv_device **list;
570 struct ibv_device *ibv_dev;
572 struct ibv_context *attr_ctx = NULL;
573 struct ibv_device_attr device_attr;
574 struct ibv_device_attr_ex device_attr_ex;
575 struct mlx4_conf conf = {
582 assert(pci_drv == &mlx4_driver);
583 list = mlx4_glue->get_device_list(&i);
587 if (rte_errno == ENOSYS)
588 ERROR("cannot list devices, is ib_uverbs loaded?");
593 * For each listed device, check related sysfs entry against
594 * the provided PCI ID.
597 struct rte_pci_addr pci_addr;
600 DEBUG("checking device \"%s\"", list[i]->name);
601 if (mlx4_ibv_device_to_pci_addr(list[i], &pci_addr))
603 if ((pci_dev->addr.domain != pci_addr.domain) ||
604 (pci_dev->addr.bus != pci_addr.bus) ||
605 (pci_dev->addr.devid != pci_addr.devid) ||
606 (pci_dev->addr.function != pci_addr.function))
608 vf = (pci_dev->id.device_id ==
609 PCI_DEVICE_ID_MELLANOX_CONNECTX3VF);
610 INFO("PCI information matches, using device \"%s\" (VF: %s)",
611 list[i]->name, (vf ? "true" : "false"));
612 attr_ctx = mlx4_glue->open_device(list[i]);
616 if (attr_ctx == NULL) {
617 mlx4_glue->free_device_list(list);
621 ERROR("cannot access device, is mlx4_ib loaded?");
625 ERROR("cannot use device, are drivers up to date?");
633 DEBUG("device opened");
634 if (mlx4_glue->query_device(attr_ctx, &device_attr)) {
638 INFO("%u port(s) detected", device_attr.phys_port_cnt);
639 conf.ports.present |= (UINT64_C(1) << device_attr.phys_port_cnt) - 1;
640 if (mlx4_args(pci_dev->device.devargs, &conf)) {
641 ERROR("failed to process device arguments");
645 /* Use all ports when none are defined */
646 if (!conf.ports.enabled)
647 conf.ports.enabled = conf.ports.present;
648 /* Retrieve extended device attributes. */
649 if (mlx4_glue->query_device_ex(attr_ctx, NULL, &device_attr_ex)) {
653 assert(device_attr.max_sge >= MLX4_MAX_SGE);
654 for (i = 0; i < device_attr.phys_port_cnt; i++) {
655 uint32_t port = i + 1; /* ports are indexed from one */
656 struct ibv_context *ctx = NULL;
657 struct ibv_port_attr port_attr;
658 struct ibv_pd *pd = NULL;
659 struct mlx4_priv *priv = NULL;
660 struct rte_eth_dev *eth_dev = NULL;
661 struct ether_addr mac;
663 /* If port is not enabled, skip. */
664 if (!(conf.ports.enabled & (1 << i)))
666 DEBUG("using port %u", port);
667 ctx = mlx4_glue->open_device(ibv_dev);
672 /* Check port status. */
673 err = mlx4_glue->query_port(ctx, port, &port_attr);
676 ERROR("port query failed: %s", strerror(err));
679 if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) {
681 ERROR("port %d is not configured in Ethernet mode",
685 if (port_attr.state != IBV_PORT_ACTIVE)
686 DEBUG("port %d is not active: \"%s\" (%d)",
687 port, mlx4_glue->port_state_str(port_attr.state),
689 /* Make asynchronous FD non-blocking to handle interrupts. */
690 err = mlx4_fd_set_non_blocking(ctx->async_fd);
692 ERROR("cannot make asynchronous FD non-blocking: %s",
696 /* Allocate protection domain. */
697 pd = mlx4_glue->alloc_pd(ctx);
700 ERROR("PD allocation failure");
703 /* from rte_ethdev.c */
704 priv = rte_zmalloc("ethdev private structure",
706 RTE_CACHE_LINE_SIZE);
709 ERROR("priv allocation failure");
713 priv->device_attr = device_attr;
716 priv->mtu = ETHER_MTU;
718 priv->hw_csum = !!(device_attr.device_cap_flags &
719 IBV_DEVICE_RAW_IP_CSUM);
720 DEBUG("checksum offloading is %ssupported",
721 (priv->hw_csum ? "" : "not "));
722 /* Only ConnectX-3 Pro supports tunneling. */
723 priv->hw_csum_l2tun =
725 (device_attr.vendor_part_id ==
726 PCI_DEVICE_ID_MELLANOX_CONNECTX3PRO);
727 DEBUG("L2 tunnel checksum offloads are %ssupported",
728 priv->hw_csum_l2tun ? "" : "not ");
729 priv->hw_rss_sup = mlx4_hw_rss_sup(priv->ctx, priv->pd,
731 DEBUG("supported RSS hash fields mask: %016" PRIx64,
733 priv->hw_rss_max_qps =
734 device_attr_ex.rss_caps.max_rwq_indirection_table_size;
735 DEBUG("MAX RSS queues %d", priv->hw_rss_max_qps);
736 priv->hw_fcs_strip = !!(device_attr_ex.raw_packet_caps &
737 IBV_RAW_PACKET_CAP_SCATTER_FCS);
738 DEBUG("FCS stripping toggling is %ssupported",
739 priv->hw_fcs_strip ? "" : "not ");
741 ((device_attr_ex.tso_caps.max_tso > 0) &&
742 (device_attr_ex.tso_caps.supported_qpts &
743 (1 << IBV_QPT_RAW_PACKET)));
745 priv->tso_max_payload_sz =
746 device_attr_ex.tso_caps.max_tso;
747 DEBUG("TSO is %ssupported",
748 priv->tso ? "" : "not ");
749 /* Configure the first MAC address by default. */
750 err = mlx4_get_mac(priv, &mac.addr_bytes);
752 ERROR("cannot get MAC address, is mlx4_en loaded?"
753 " (error: %s)", strerror(err));
756 INFO("port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x",
758 mac.addr_bytes[0], mac.addr_bytes[1],
759 mac.addr_bytes[2], mac.addr_bytes[3],
760 mac.addr_bytes[4], mac.addr_bytes[5]);
761 /* Register MAC address. */
765 char ifname[IF_NAMESIZE];
767 if (mlx4_get_ifname(priv, &ifname) == 0)
768 DEBUG("port %u ifname is \"%s\"",
771 DEBUG("port %u ifname is unknown", priv->port);
774 /* Get actual MTU if possible. */
775 mlx4_mtu_get(priv, &priv->mtu);
776 DEBUG("port %u MTU is %u", priv->port, priv->mtu);
777 /* from rte_ethdev.c */
779 char name[RTE_ETH_NAME_MAX_LEN];
781 snprintf(name, sizeof(name), "%s port %u",
782 mlx4_glue->get_device_name(ibv_dev), port);
783 eth_dev = rte_eth_dev_allocate(name);
785 if (eth_dev == NULL) {
787 ERROR("can not allocate rte ethdev");
790 eth_dev->data->dev_private = priv;
791 eth_dev->data->mac_addrs = priv->mac;
792 eth_dev->device = &pci_dev->device;
793 rte_eth_copy_pci_info(eth_dev, pci_dev);
794 /* Initialize local interrupt handle for current port. */
795 priv->intr_handle = (struct rte_intr_handle){
797 .type = RTE_INTR_HANDLE_EXT,
800 * Override ethdev interrupt handle pointer with private
801 * handle instead of that of the parent PCI device used by
802 * default. This prevents it from being shared between all
803 * ports of the same PCI device since each of them is
804 * associated its own Verbs context.
806 * Rx interrupts in particular require this as the PMD has
807 * no control over the registration of queue interrupts
808 * besides setting up eth_dev->intr_handle, the rest is
809 * handled by rte_intr_rx_ctl().
811 eth_dev->intr_handle = &priv->intr_handle;
812 priv->dev_data = eth_dev->data;
813 eth_dev->dev_ops = &mlx4_dev_ops;
814 #ifdef HAVE_IBV_MLX4_BUF_ALLOCATORS
815 /* Hint libmlx4 to use PMD allocator for data plane resources */
816 struct mlx4dv_ctx_allocators alctr = {
817 .alloc = &mlx4_alloc_verbs_buf,
818 .free = &mlx4_free_verbs_buf,
821 mlx4_glue->dv_set_context_attr
822 (ctx, MLX4DV_SET_CTX_ATTR_BUF_ALLOCATORS,
823 (void *)((uintptr_t)&alctr));
825 /* Bring Ethernet device up. */
826 DEBUG("forcing Ethernet interface up");
827 mlx4_dev_set_link_up(eth_dev);
828 /* Update link status once if waiting for LSC. */
829 if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
830 mlx4_link_update(eth_dev, 0);
832 * Once the device is added to the list of memory event
833 * callback, its global MR cache table cannot be expanded
834 * on the fly because of deadlock. If it overflows, lookup
835 * should be done by searching MR list linearly, which is slow.
837 err = mlx4_mr_btree_init(&priv->mr.cache,
838 MLX4_MR_BTREE_CACHE_N * 2,
839 eth_dev->device->numa_node);
841 /* rte_errno is already set. */
844 /* Add device to memory callback list. */
845 rte_rwlock_write_lock(&mlx4_mem_event_rwlock);
846 LIST_INSERT_HEAD(&mlx4_mem_event_cb_list, priv, mem_event_cb);
847 rte_rwlock_write_unlock(&mlx4_mem_event_rwlock);
848 rte_eth_dev_probing_finish(eth_dev);
853 eth_dev->data->dev_private = NULL;
855 claim_zero(mlx4_glue->dealloc_pd(pd));
857 claim_zero(mlx4_glue->close_device(ctx));
858 if (eth_dev != NULL) {
859 /* mac_addrs must not be freed because part of dev_private */
860 eth_dev->data->mac_addrs = NULL;
861 rte_eth_dev_release_port(eth_dev);
866 * XXX if something went wrong in the loop above, there is a resource
867 * leak (ctx, pd, priv, dpdk ethdev) but we can do nothing about it as
868 * long as the dpdk does not provide a way to deallocate a ethdev and a
869 * way to enumerate the registered ethdevs to free the previous ones.
873 claim_zero(mlx4_glue->close_device(attr_ctx));
875 mlx4_glue->free_device_list(list);
881 static const struct rte_pci_id mlx4_pci_id_map[] = {
883 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
884 PCI_DEVICE_ID_MELLANOX_CONNECTX3)
887 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
888 PCI_DEVICE_ID_MELLANOX_CONNECTX3PRO)
891 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
892 PCI_DEVICE_ID_MELLANOX_CONNECTX3VF)
899 static struct rte_pci_driver mlx4_driver = {
901 .name = MLX4_DRIVER_NAME
903 .id_table = mlx4_pci_id_map,
904 .probe = mlx4_pci_probe,
905 .drv_flags = RTE_PCI_DRV_INTR_LSC |
906 RTE_PCI_DRV_INTR_RMV,
909 #ifdef RTE_IBVERBS_LINK_DLOPEN
912 * Suffix RTE_EAL_PMD_PATH with "-glue".
914 * This function performs a sanity check on RTE_EAL_PMD_PATH before
915 * suffixing its last component.
918 * Output buffer, should be large enough otherwise NULL is returned.
923 * Pointer to @p buf or @p NULL in case suffix cannot be appended.
926 mlx4_glue_path(char *buf, size_t size)
928 static const char *const bad[] = { "/", ".", "..", NULL };
929 const char *path = RTE_EAL_PMD_PATH;
930 size_t len = strlen(path);
934 while (len && path[len - 1] == '/')
936 for (off = len; off && path[off - 1] != '/'; --off)
938 for (i = 0; bad[i]; ++i)
939 if (!strncmp(path + off, bad[i], (int)(len - off)))
941 i = snprintf(buf, size, "%.*s-glue", (int)len, path);
942 if (i == -1 || (size_t)i >= size)
946 ERROR("unable to append \"-glue\" to last component of"
947 " RTE_EAL_PMD_PATH (\"" RTE_EAL_PMD_PATH "\"),"
948 " please re-configure DPDK");
953 * Initialization routine for run-time dependency on rdma-core.
958 char glue_path[sizeof(RTE_EAL_PMD_PATH) - 1 + sizeof("-glue")];
959 const char *path[] = {
961 * A basic security check is necessary before trusting
962 * MLX4_GLUE_PATH, which may override RTE_EAL_PMD_PATH.
964 (geteuid() == getuid() && getegid() == getgid() ?
965 getenv("MLX4_GLUE_PATH") : NULL),
967 * When RTE_EAL_PMD_PATH is set, use its glue-suffixed
968 * variant, otherwise let dlopen() look up libraries on its
972 mlx4_glue_path(glue_path, sizeof(glue_path)) : ""),
979 while (!handle && i != RTE_DIM(path)) {
988 end = strpbrk(path[i], ":;");
990 end = path[i] + strlen(path[i]);
996 ret = snprintf(name, sizeof(name), "%.*s%s" MLX4_GLUE,
998 (!len || *(end - 1) == '/') ? "" : "/");
1001 if (sizeof(name) != (size_t)ret + 1)
1003 DEBUG("looking for rdma-core glue as \"%s\"", name);
1004 handle = dlopen(name, RTLD_LAZY);
1015 WARN("cannot load glue library: %s", dlmsg);
1018 sym = dlsym(handle, "mlx4_glue");
1019 if (!sym || !*sym) {
1023 ERROR("cannot resolve glue symbol: %s", dlmsg);
1031 WARN("cannot initialize PMD due to missing run-time"
1032 " dependency on rdma-core libraries (libibverbs,"
1040 * Driver initialization routine.
1042 RTE_INIT(rte_mlx4_pmd_init)
1045 * MLX4_DEVICE_FATAL_CLEANUP tells ibv_destroy functions we
1046 * want to get success errno value in case of calling them
1047 * when the device was removed.
1049 setenv("MLX4_DEVICE_FATAL_CLEANUP", "1", 1);
1051 * RDMAV_HUGEPAGES_SAFE tells ibv_fork_init() we intend to use
1052 * huge pages. Calling ibv_fork_init() during init allows
1053 * applications to use fork() safely for purposes other than
1054 * using this PMD, which is not supported in forked processes.
1056 setenv("RDMAV_HUGEPAGES_SAFE", "1", 1);
1057 #ifdef RTE_IBVERBS_LINK_DLOPEN
1058 if (mlx4_glue_init())
1063 /* Glue structure must not contain any NULL pointers. */
1067 for (i = 0; i != sizeof(*mlx4_glue) / sizeof(void *); ++i)
1068 assert(((const void *const *)mlx4_glue)[i]);
1071 if (strcmp(mlx4_glue->version, MLX4_GLUE_VERSION)) {
1072 ERROR("rdma-core glue \"%s\" mismatch: \"%s\" is required",
1073 mlx4_glue->version, MLX4_GLUE_VERSION);
1076 mlx4_glue->fork_init();
1077 rte_pci_register(&mlx4_driver);
1078 rte_mem_event_callback_register("MLX4_MEM_EVENT_CB",
1079 mlx4_mr_mem_event_cb, NULL);
1082 RTE_PMD_EXPORT_NAME(net_mlx4, __COUNTER__);
1083 RTE_PMD_REGISTER_PCI_TABLE(net_mlx4, mlx4_pci_id_map);
1084 RTE_PMD_REGISTER_KMOD_DEP(net_mlx4,
1085 "* ib_uverbs & mlx4_en & mlx4_core & mlx4_ib");