1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2012 6WIND S.A.
3 * Copyright 2012 Mellanox Technologies, Ltd
8 * mlx4 driver initialization.
23 /* Verbs headers do not support -pedantic. */
25 #pragma GCC diagnostic ignored "-Wpedantic"
27 #include <infiniband/verbs.h>
29 #pragma GCC diagnostic error "-Wpedantic"
32 #include <rte_common.h>
33 #include <rte_config.h>
35 #include <rte_errno.h>
36 #include <rte_ethdev_driver.h>
37 #include <rte_ethdev_pci.h>
38 #include <rte_ether.h>
40 #include <rte_interrupts.h>
41 #include <rte_kvargs.h>
42 #include <rte_malloc.h>
46 #include "mlx4_glue.h"
47 #include "mlx4_flow.h"
49 #include "mlx4_rxtx.h"
50 #include "mlx4_utils.h"
52 static const char *MZ_MLX4_PMD_SHARED_DATA = "mlx4_pmd_shared_data";
54 /* Shared memory between primary and secondary processes. */
55 struct mlx4_shared_data *mlx4_shared_data;
57 /* Spinlock for mlx4_shared_data allocation. */
58 static rte_spinlock_t mlx4_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
60 /* Process local data for secondary processes. */
61 static struct mlx4_local_data mlx4_local_data;
63 /** Configuration structure for device arguments. */
66 uint32_t present; /**< Bit-field for existing ports. */
67 uint32_t enabled; /**< Bit-field for user-enabled ports. */
70 /** Whether memseg should be extended for MR creation. */
73 /* Available parameters list. */
74 const char *pmd_mlx4_init_params[] = {
76 MLX4_MR_EXT_MEMSEG_EN_KVARG,
80 static void mlx4_dev_stop(struct rte_eth_dev *dev);
83 * Initialize shared data between primary and secondary process.
85 * A memzone is reserved by primary process and secondary processes attach to
89 * 0 on success, a negative errno value otherwise and rte_errno is set.
92 mlx4_init_shared_data(void)
94 const struct rte_memzone *mz;
97 rte_spinlock_lock(&mlx4_shared_data_lock);
98 if (mlx4_shared_data == NULL) {
99 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
100 /* Allocate shared memory. */
101 mz = rte_memzone_reserve(MZ_MLX4_PMD_SHARED_DATA,
102 sizeof(*mlx4_shared_data),
105 ERROR("Cannot allocate mlx4 shared data\n");
109 mlx4_shared_data = mz->addr;
110 memset(mlx4_shared_data, 0, sizeof(*mlx4_shared_data));
111 rte_spinlock_init(&mlx4_shared_data->lock);
113 /* Lookup allocated shared memory. */
114 mz = rte_memzone_lookup(MZ_MLX4_PMD_SHARED_DATA);
116 ERROR("Cannot attach mlx4 shared data\n");
120 mlx4_shared_data = mz->addr;
121 memset(&mlx4_local_data, 0, sizeof(mlx4_local_data));
125 rte_spinlock_unlock(&mlx4_shared_data_lock);
130 * Uninitialize shared data between primary and secondary process.
132 * The pointer of secondary process is dereferenced and primary process frees
136 mlx4_uninit_shared_data(void)
138 const struct rte_memzone *mz;
140 rte_spinlock_lock(&mlx4_shared_data_lock);
141 if (mlx4_shared_data) {
142 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
143 mz = rte_memzone_lookup(MZ_MLX4_PMD_SHARED_DATA);
144 rte_memzone_free(mz);
146 memset(&mlx4_local_data, 0, sizeof(mlx4_local_data));
148 mlx4_shared_data = NULL;
150 rte_spinlock_unlock(&mlx4_shared_data_lock);
153 #ifdef HAVE_IBV_MLX4_BUF_ALLOCATORS
155 * Verbs callback to allocate a memory. This function should allocate the space
156 * according to the size provided residing inside a huge page.
157 * Please note that all allocation must respect the alignment from libmlx4
158 * (i.e. currently sysconf(_SC_PAGESIZE)).
161 * The size in bytes of the memory to allocate.
163 * A pointer to the callback data.
166 * Allocated buffer, NULL otherwise and rte_errno is set.
169 mlx4_alloc_verbs_buf(size_t size, void *data)
171 struct mlx4_priv *priv = data;
173 size_t alignment = sysconf(_SC_PAGESIZE);
174 unsigned int socket = SOCKET_ID_ANY;
176 if (priv->verbs_alloc_ctx.type == MLX4_VERBS_ALLOC_TYPE_TX_QUEUE) {
177 const struct txq *txq = priv->verbs_alloc_ctx.obj;
179 socket = txq->socket;
180 } else if (priv->verbs_alloc_ctx.type ==
181 MLX4_VERBS_ALLOC_TYPE_RX_QUEUE) {
182 const struct rxq *rxq = priv->verbs_alloc_ctx.obj;
184 socket = rxq->socket;
186 assert(data != NULL);
187 ret = rte_malloc_socket(__func__, size, alignment, socket);
194 * Verbs callback to free a memory.
197 * A pointer to the memory to free.
199 * A pointer to the callback data.
202 mlx4_free_verbs_buf(void *ptr, void *data __rte_unused)
204 assert(data != NULL);
210 * DPDK callback for Ethernet device configuration.
213 * Pointer to Ethernet device structure.
216 * 0 on success, negative errno value otherwise and rte_errno is set.
219 mlx4_dev_configure(struct rte_eth_dev *dev)
221 struct mlx4_priv *priv = dev->data->dev_private;
222 struct rte_flow_error error;
225 /* Prepare internal flow rules. */
226 ret = mlx4_flow_sync(priv, &error);
228 ERROR("cannot set up internal flow rules (code %d, \"%s\"),"
229 " flow error type %d, cause %p, message: %s",
230 -ret, strerror(-ret), error.type, error.cause,
231 error.message ? error.message : "(unspecified)");
234 ret = mlx4_intr_install(priv);
236 ERROR("%p: interrupt handler installation failed",
243 * DPDK callback to start the device.
245 * Simulate device start by initializing common RSS resources and attaching
246 * all configured flows.
249 * Pointer to Ethernet device structure.
252 * 0 on success, negative errno value otherwise and rte_errno is set.
255 mlx4_dev_start(struct rte_eth_dev *dev)
257 struct mlx4_priv *priv = dev->data->dev_private;
258 struct rte_flow_error error;
263 DEBUG("%p: attaching configured flows to all RX queues", (void *)dev);
265 ret = mlx4_tx_uar_remap(dev, priv->ctx->cmd_fd);
267 ERROR("%p: cannot remap UAR", (void *)dev);
270 ret = mlx4_rss_init(priv);
272 ERROR("%p: cannot initialize RSS resources: %s",
273 (void *)dev, strerror(-ret));
277 mlx4_mr_dump_dev(dev);
279 ret = mlx4_rxq_intr_enable(priv);
281 ERROR("%p: interrupt handler installation failed",
285 ret = mlx4_flow_sync(priv, &error);
287 ERROR("%p: cannot attach flow rules (code %d, \"%s\"),"
288 " flow error type %d, cause %p, message: %s",
290 -ret, strerror(-ret), error.type, error.cause,
291 error.message ? error.message : "(unspecified)");
295 dev->tx_pkt_burst = mlx4_tx_burst;
296 dev->rx_pkt_burst = mlx4_rx_burst;
297 /* Enable datapath on secondary process. */
298 mlx4_mp_req_start_rxtx(dev);
306 * DPDK callback to stop the device.
308 * Simulate device stop by detaching all configured flows.
311 * Pointer to Ethernet device structure.
314 mlx4_dev_stop(struct rte_eth_dev *dev)
316 struct mlx4_priv *priv = dev->data->dev_private;
317 #ifdef HAVE_IBV_MLX4_UAR_MMAP_OFFSET
318 const size_t page_size = sysconf(_SC_PAGESIZE);
324 DEBUG("%p: detaching flows from all RX queues", (void *)dev);
326 dev->tx_pkt_burst = mlx4_tx_burst_removed;
327 dev->rx_pkt_burst = mlx4_rx_burst_removed;
329 /* Disable datapath on secondary process. */
330 mlx4_mp_req_stop_rxtx(dev);
331 mlx4_flow_sync(priv, NULL);
332 mlx4_rxq_intr_disable(priv);
333 mlx4_rss_deinit(priv);
334 #ifdef HAVE_IBV_MLX4_UAR_MMAP_OFFSET
335 for (i = 0; i != dev->data->nb_tx_queues; ++i) {
338 txq = dev->data->tx_queues[i];
341 munmap((void *)RTE_ALIGN_FLOOR((uintptr_t)txq->msq.db,
342 page_size), page_size);
348 * DPDK callback to close the device.
350 * Destroy all queues and objects, free memory.
353 * Pointer to Ethernet device structure.
356 mlx4_dev_close(struct rte_eth_dev *dev)
358 struct mlx4_priv *priv = dev->data->dev_private;
361 DEBUG("%p: closing device \"%s\"",
363 ((priv->ctx != NULL) ? priv->ctx->device->name : ""));
364 dev->rx_pkt_burst = mlx4_rx_burst_removed;
365 dev->tx_pkt_burst = mlx4_tx_burst_removed;
367 /* Disable datapath on secondary process. */
368 mlx4_mp_req_stop_rxtx(dev);
369 mlx4_flow_clean(priv);
370 mlx4_rss_deinit(priv);
371 for (i = 0; i != dev->data->nb_rx_queues; ++i)
372 mlx4_rx_queue_release(dev->data->rx_queues[i]);
373 for (i = 0; i != dev->data->nb_tx_queues; ++i)
374 mlx4_tx_queue_release(dev->data->tx_queues[i]);
375 mlx4_mr_release(dev);
376 if (priv->pd != NULL) {
377 assert(priv->ctx != NULL);
378 claim_zero(mlx4_glue->dealloc_pd(priv->pd));
379 claim_zero(mlx4_glue->close_device(priv->ctx));
381 assert(priv->ctx == NULL);
382 mlx4_intr_uninstall(priv);
383 memset(priv, 0, sizeof(*priv));
386 static const struct eth_dev_ops mlx4_dev_ops = {
387 .dev_configure = mlx4_dev_configure,
388 .dev_start = mlx4_dev_start,
389 .dev_stop = mlx4_dev_stop,
390 .dev_set_link_down = mlx4_dev_set_link_down,
391 .dev_set_link_up = mlx4_dev_set_link_up,
392 .dev_close = mlx4_dev_close,
393 .link_update = mlx4_link_update,
394 .promiscuous_enable = mlx4_promiscuous_enable,
395 .promiscuous_disable = mlx4_promiscuous_disable,
396 .allmulticast_enable = mlx4_allmulticast_enable,
397 .allmulticast_disable = mlx4_allmulticast_disable,
398 .mac_addr_remove = mlx4_mac_addr_remove,
399 .mac_addr_add = mlx4_mac_addr_add,
400 .mac_addr_set = mlx4_mac_addr_set,
401 .stats_get = mlx4_stats_get,
402 .stats_reset = mlx4_stats_reset,
403 .fw_version_get = mlx4_fw_version_get,
404 .dev_infos_get = mlx4_dev_infos_get,
405 .dev_supported_ptypes_get = mlx4_dev_supported_ptypes_get,
406 .vlan_filter_set = mlx4_vlan_filter_set,
407 .rx_queue_setup = mlx4_rx_queue_setup,
408 .tx_queue_setup = mlx4_tx_queue_setup,
409 .rx_queue_release = mlx4_rx_queue_release,
410 .tx_queue_release = mlx4_tx_queue_release,
411 .flow_ctrl_get = mlx4_flow_ctrl_get,
412 .flow_ctrl_set = mlx4_flow_ctrl_set,
413 .mtu_set = mlx4_mtu_set,
414 .filter_ctrl = mlx4_filter_ctrl,
415 .rx_queue_intr_enable = mlx4_rx_intr_enable,
416 .rx_queue_intr_disable = mlx4_rx_intr_disable,
417 .is_removed = mlx4_is_removed,
420 /* Available operations from secondary process. */
421 static const struct eth_dev_ops mlx4_dev_sec_ops = {
422 .stats_get = mlx4_stats_get,
423 .stats_reset = mlx4_stats_reset,
424 .fw_version_get = mlx4_fw_version_get,
425 .dev_infos_get = mlx4_dev_infos_get,
429 * Get PCI information from struct ibv_device.
432 * Pointer to Ethernet device structure.
433 * @param[out] pci_addr
434 * PCI bus address output buffer.
437 * 0 on success, negative errno value otherwise and rte_errno is set.
440 mlx4_ibv_device_to_pci_addr(const struct ibv_device *device,
441 struct rte_pci_addr *pci_addr)
445 MKSTR(path, "%s/device/uevent", device->ibdev_path);
447 file = fopen(path, "rb");
452 while (fgets(line, sizeof(line), file) == line) {
453 size_t len = strlen(line);
456 /* Truncate long lines. */
457 if (len == (sizeof(line) - 1))
458 while (line[(len - 1)] != '\n') {
462 line[(len - 1)] = ret;
464 /* Extract information. */
467 "%" SCNx32 ":%" SCNx8 ":%" SCNx8 ".%" SCNx8 "\n",
471 &pci_addr->function) == 4) {
481 * Verify and store value for device argument.
484 * Key argument to verify.
486 * Value associated with key.
487 * @param[in, out] conf
488 * Shared configuration data.
491 * 0 on success, negative errno value otherwise and rte_errno is set.
494 mlx4_arg_parse(const char *key, const char *val, struct mlx4_conf *conf)
499 tmp = strtoul(val, NULL, 0);
502 WARN("%s: \"%s\" is not a valid integer", key, val);
505 if (strcmp(MLX4_PMD_PORT_KVARG, key) == 0) {
506 uint32_t ports = rte_log2_u32(conf->ports.present + 1);
509 ERROR("port index %lu outside range [0,%" PRIu32 ")",
513 if (!(conf->ports.present & (1 << tmp))) {
515 ERROR("invalid port index %lu", tmp);
518 conf->ports.enabled |= 1 << tmp;
519 } else if (strcmp(MLX4_MR_EXT_MEMSEG_EN_KVARG, key) == 0) {
520 conf->mr_ext_memseg_en = !!tmp;
523 WARN("%s: unknown parameter", key);
530 * Parse device parameters.
533 * Device arguments structure.
536 * 0 on success, negative errno value otherwise and rte_errno is set.
539 mlx4_args(struct rte_devargs *devargs, struct mlx4_conf *conf)
541 struct rte_kvargs *kvlist;
542 unsigned int arg_count;
548 kvlist = rte_kvargs_parse(devargs->args, pmd_mlx4_init_params);
549 if (kvlist == NULL) {
551 ERROR("failed to parse kvargs");
554 /* Process parameters. */
555 for (i = 0; pmd_mlx4_init_params[i]; ++i) {
556 arg_count = rte_kvargs_count(kvlist, pmd_mlx4_init_params[i]);
557 while (arg_count-- > 0) {
558 ret = rte_kvargs_process(kvlist,
559 pmd_mlx4_init_params[i],
560 (int (*)(const char *,
570 rte_kvargs_free(kvlist);
575 * Interpret RSS capabilities reported by device.
577 * This function returns the set of usable Verbs RSS hash fields, kernel
578 * quirks taken into account.
583 * Verbs protection domain.
584 * @param device_attr_ex
585 * Extended device attributes to interpret.
588 * Usable RSS hash fields mask in Verbs format.
591 mlx4_hw_rss_sup(struct ibv_context *ctx, struct ibv_pd *pd,
592 struct ibv_device_attr_ex *device_attr_ex)
594 uint64_t hw_rss_sup = device_attr_ex->rss_caps.rx_hash_fields_mask;
595 struct ibv_cq *cq = NULL;
596 struct ibv_wq *wq = NULL;
597 struct ibv_rwq_ind_table *ind = NULL;
598 struct ibv_qp *qp = NULL;
601 WARN("no RSS capabilities reported; disabling support for UDP"
602 " RSS and inner VXLAN RSS");
603 return IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4 |
604 IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6 |
605 IBV_RX_HASH_SRC_PORT_TCP | IBV_RX_HASH_DST_PORT_TCP;
607 if (!(hw_rss_sup & IBV_RX_HASH_INNER))
610 * Although reported as supported, missing code in some Linux
611 * versions (v4.15, v4.16) prevents the creation of hash QPs with
614 * There is no choice but to attempt to instantiate a temporary RSS
615 * context in order to confirm its support.
617 cq = mlx4_glue->create_cq(ctx, 1, NULL, NULL, 0);
618 wq = cq ? mlx4_glue->create_wq
620 &(struct ibv_wq_init_attr){
621 .wq_type = IBV_WQT_RQ,
627 ind = wq ? mlx4_glue->create_rwq_ind_table
629 &(struct ibv_rwq_ind_table_init_attr){
630 .log_ind_tbl_size = 0,
634 qp = ind ? mlx4_glue->create_qp_ex
636 &(struct ibv_qp_init_attr_ex){
638 (IBV_QP_INIT_ATTR_PD |
639 IBV_QP_INIT_ATTR_RX_HASH |
640 IBV_QP_INIT_ATTR_IND_TABLE),
641 .qp_type = IBV_QPT_RAW_PACKET,
645 .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,
646 .rx_hash_key_len = MLX4_RSS_HASH_KEY_SIZE,
647 .rx_hash_key = mlx4_rss_hash_key_default,
648 .rx_hash_fields_mask = hw_rss_sup,
652 WARN("disabling unusable inner RSS capability due to kernel"
654 hw_rss_sup &= ~IBV_RX_HASH_INNER;
656 claim_zero(mlx4_glue->destroy_qp(qp));
659 claim_zero(mlx4_glue->destroy_rwq_ind_table(ind));
661 claim_zero(mlx4_glue->destroy_wq(wq));
663 claim_zero(mlx4_glue->destroy_cq(cq));
667 static struct rte_pci_driver mlx4_driver;
670 find_lower_va_bound(const struct rte_memseg_list *msl,
671 const struct rte_memseg *ms, void *arg)
680 *addr = RTE_MIN(*addr, ms->addr);
686 * Reserve UAR address space for primary process.
688 * Process local resource is used by both primary and secondary to avoid
689 * duplicate reservation. The space has to be available on both primary and
690 * secondary process, TXQ UAR maps to this area using fixed mmap w/o double
694 * 0 on success, a negative errno value otherwise and rte_errno is set.
697 mlx4_uar_init_primary(void)
699 struct mlx4_shared_data *sd = mlx4_shared_data;
700 void *addr = (void *)0;
704 /* find out lower bound of hugepage segments */
705 rte_memseg_walk(find_lower_va_bound, &addr);
706 /* keep distance to hugepages to minimize potential conflicts. */
707 addr = RTE_PTR_SUB(addr, (uintptr_t)(MLX4_UAR_OFFSET + MLX4_UAR_SIZE));
708 /* anonymous mmap, no real memory consumption. */
709 addr = mmap(addr, MLX4_UAR_SIZE,
710 PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
711 if (addr == MAP_FAILED) {
712 ERROR("failed to reserve UAR address space, please"
713 " adjust MLX4_UAR_SIZE or try --base-virtaddr");
717 /* Accept either same addr or a new addr returned from mmap if target
720 INFO("reserved UAR address space: %p", addr);
721 sd->uar_base = addr; /* for primary and secondary UAR re-mmap. */
726 * Unmap UAR address space reserved for primary process.
729 mlx4_uar_uninit_primary(void)
731 struct mlx4_shared_data *sd = mlx4_shared_data;
735 munmap(sd->uar_base, MLX4_UAR_SIZE);
740 * Reserve UAR address space for secondary process, align with primary process.
743 * 0 on success, a negative errno value otherwise and rte_errno is set.
746 mlx4_uar_init_secondary(void)
748 struct mlx4_shared_data *sd = mlx4_shared_data;
749 struct mlx4_local_data *ld = &mlx4_local_data;
752 if (ld->uar_base) { /* Already reserved. */
753 assert(sd->uar_base == ld->uar_base);
756 assert(sd->uar_base);
757 /* anonymous mmap, no real memory consumption. */
758 addr = mmap(sd->uar_base, MLX4_UAR_SIZE,
759 PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
760 if (addr == MAP_FAILED) {
761 ERROR("UAR mmap failed: %p size: %llu",
762 sd->uar_base, MLX4_UAR_SIZE);
766 if (sd->uar_base != addr) {
767 ERROR("UAR address %p size %llu occupied, please"
768 " adjust MLX4_UAR_OFFSET or try EAL parameter"
770 sd->uar_base, MLX4_UAR_SIZE);
775 INFO("reserved UAR address space: %p", addr);
780 * Unmap UAR address space reserved for secondary process.
783 mlx4_uar_uninit_secondary(void)
785 struct mlx4_local_data *ld = &mlx4_local_data;
789 munmap(ld->uar_base, MLX4_UAR_SIZE);
794 * PMD global initialization.
796 * Independent from individual device, this function initializes global
797 * per-PMD data structures distinguishing primary and secondary processes.
798 * Hence, each initialization is called once per a process.
801 * 0 on success, a negative errno value otherwise and rte_errno is set.
806 struct mlx4_shared_data *sd;
807 struct mlx4_local_data *ld = &mlx4_local_data;
810 if (mlx4_init_shared_data())
812 sd = mlx4_shared_data;
814 rte_spinlock_lock(&sd->lock);
815 switch (rte_eal_process_type()) {
816 case RTE_PROC_PRIMARY:
819 LIST_INIT(&sd->mem_event_cb_list);
820 rte_rwlock_init(&sd->mem_event_rwlock);
821 rte_mem_event_callback_register("MLX4_MEM_EVENT_CB",
822 mlx4_mr_mem_event_cb, NULL);
823 mlx4_mp_init_primary();
824 ret = mlx4_uar_init_primary();
827 sd->init_done = true;
829 case RTE_PROC_SECONDARY:
832 mlx4_mp_init_secondary();
833 ret = mlx4_uar_init_secondary();
837 ld->init_done = true;
842 rte_spinlock_unlock(&sd->lock);
845 switch (rte_eal_process_type()) {
846 case RTE_PROC_PRIMARY:
847 mlx4_uar_uninit_primary();
848 mlx4_mp_uninit_primary();
849 rte_mem_event_callback_unregister("MLX4_MEM_EVENT_CB", NULL);
851 case RTE_PROC_SECONDARY:
852 mlx4_uar_uninit_secondary();
853 mlx4_mp_uninit_secondary();
858 rte_spinlock_unlock(&sd->lock);
859 mlx4_uninit_shared_data();
864 * DPDK callback to register a PCI device.
866 * This function creates an Ethernet device for each port of a given
870 * PCI driver structure (mlx4_driver).
872 * PCI device information.
875 * 0 on success, negative errno value otherwise and rte_errno is set.
878 mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
880 struct ibv_device **list;
881 struct ibv_device *ibv_dev;
883 struct ibv_context *attr_ctx = NULL;
884 struct ibv_device_attr device_attr;
885 struct ibv_device_attr_ex device_attr_ex;
886 struct mlx4_conf conf = {
888 .mr_ext_memseg_en = 1,
894 err = mlx4_init_once();
896 ERROR("unable to init PMD global data: %s",
897 strerror(rte_errno));
900 assert(pci_drv == &mlx4_driver);
901 list = mlx4_glue->get_device_list(&i);
905 if (rte_errno == ENOSYS)
906 ERROR("cannot list devices, is ib_uverbs loaded?");
911 * For each listed device, check related sysfs entry against
912 * the provided PCI ID.
915 struct rte_pci_addr pci_addr;
918 DEBUG("checking device \"%s\"", list[i]->name);
919 if (mlx4_ibv_device_to_pci_addr(list[i], &pci_addr))
921 if ((pci_dev->addr.domain != pci_addr.domain) ||
922 (pci_dev->addr.bus != pci_addr.bus) ||
923 (pci_dev->addr.devid != pci_addr.devid) ||
924 (pci_dev->addr.function != pci_addr.function))
926 vf = (pci_dev->id.device_id ==
927 PCI_DEVICE_ID_MELLANOX_CONNECTX3VF);
928 INFO("PCI information matches, using device \"%s\" (VF: %s)",
929 list[i]->name, (vf ? "true" : "false"));
930 attr_ctx = mlx4_glue->open_device(list[i]);
934 if (attr_ctx == NULL) {
935 mlx4_glue->free_device_list(list);
939 ERROR("cannot access device, is mlx4_ib loaded?");
943 ERROR("cannot use device, are drivers up to date?");
951 DEBUG("device opened");
952 if (mlx4_glue->query_device(attr_ctx, &device_attr)) {
956 INFO("%u port(s) detected", device_attr.phys_port_cnt);
957 conf.ports.present |= (UINT64_C(1) << device_attr.phys_port_cnt) - 1;
958 if (mlx4_args(pci_dev->device.devargs, &conf)) {
959 ERROR("failed to process device arguments");
963 /* Use all ports when none are defined */
964 if (!conf.ports.enabled)
965 conf.ports.enabled = conf.ports.present;
966 /* Retrieve extended device attributes. */
967 if (mlx4_glue->query_device_ex(attr_ctx, NULL, &device_attr_ex)) {
971 assert(device_attr.max_sge >= MLX4_MAX_SGE);
972 for (i = 0; i < device_attr.phys_port_cnt; i++) {
973 uint32_t port = i + 1; /* ports are indexed from one */
974 struct ibv_context *ctx = NULL;
975 struct ibv_port_attr port_attr;
976 struct ibv_pd *pd = NULL;
977 struct mlx4_priv *priv = NULL;
978 struct rte_eth_dev *eth_dev = NULL;
979 struct ether_addr mac;
980 char name[RTE_ETH_NAME_MAX_LEN];
982 /* If port is not enabled, skip. */
983 if (!(conf.ports.enabled & (1 << i)))
985 DEBUG("using port %u", port);
986 ctx = mlx4_glue->open_device(ibv_dev);
991 snprintf(name, sizeof(name), "%s port %u",
992 mlx4_glue->get_device_name(ibv_dev), port);
993 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
994 eth_dev = rte_eth_dev_attach_secondary(name);
995 if (eth_dev == NULL) {
996 ERROR("can not attach rte ethdev");
1001 priv = eth_dev->data->dev_private;
1002 if (!priv->verbs_alloc_ctx.enabled) {
1003 ERROR("secondary process is not supported"
1004 " due to lack of external allocator"
1006 rte_errno = ENOTSUP;
1010 eth_dev->device = &pci_dev->device;
1011 eth_dev->dev_ops = &mlx4_dev_sec_ops;
1012 /* Receive command fd from primary process. */
1013 err = mlx4_mp_req_verbs_cmd_fd(eth_dev);
1018 /* Remap UAR for Tx queues. */
1019 err = mlx4_tx_uar_remap(eth_dev, err);
1025 * Ethdev pointer is still required as input since
1026 * the primary device is not accessible from the
1027 * secondary process.
1029 eth_dev->tx_pkt_burst = mlx4_tx_burst;
1030 eth_dev->rx_pkt_burst = mlx4_rx_burst;
1031 claim_zero(mlx4_glue->close_device(ctx));
1032 rte_eth_copy_pci_info(eth_dev, pci_dev);
1033 rte_eth_dev_probing_finish(eth_dev);
1036 /* Check port status. */
1037 err = mlx4_glue->query_port(ctx, port, &port_attr);
1040 ERROR("port query failed: %s", strerror(err));
1043 if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) {
1045 ERROR("port %d is not configured in Ethernet mode",
1049 if (port_attr.state != IBV_PORT_ACTIVE)
1050 DEBUG("port %d is not active: \"%s\" (%d)",
1051 port, mlx4_glue->port_state_str(port_attr.state),
1053 /* Make asynchronous FD non-blocking to handle interrupts. */
1054 err = mlx4_fd_set_non_blocking(ctx->async_fd);
1056 ERROR("cannot make asynchronous FD non-blocking: %s",
1060 /* Allocate protection domain. */
1061 pd = mlx4_glue->alloc_pd(ctx);
1064 ERROR("PD allocation failure");
1067 /* from rte_ethdev.c */
1068 priv = rte_zmalloc("ethdev private structure",
1070 RTE_CACHE_LINE_SIZE);
1073 ERROR("priv allocation failure");
1077 priv->device_attr = device_attr;
1080 priv->mtu = ETHER_MTU;
1082 priv->hw_csum = !!(device_attr.device_cap_flags &
1083 IBV_DEVICE_RAW_IP_CSUM);
1084 DEBUG("checksum offloading is %ssupported",
1085 (priv->hw_csum ? "" : "not "));
1086 /* Only ConnectX-3 Pro supports tunneling. */
1087 priv->hw_csum_l2tun =
1089 (device_attr.vendor_part_id ==
1090 PCI_DEVICE_ID_MELLANOX_CONNECTX3PRO);
1091 DEBUG("L2 tunnel checksum offloads are %ssupported",
1092 priv->hw_csum_l2tun ? "" : "not ");
1093 priv->hw_rss_sup = mlx4_hw_rss_sup(priv->ctx, priv->pd,
1095 DEBUG("supported RSS hash fields mask: %016" PRIx64,
1097 priv->hw_rss_max_qps =
1098 device_attr_ex.rss_caps.max_rwq_indirection_table_size;
1099 DEBUG("MAX RSS queues %d", priv->hw_rss_max_qps);
1100 priv->hw_fcs_strip = !!(device_attr_ex.raw_packet_caps &
1101 IBV_RAW_PACKET_CAP_SCATTER_FCS);
1102 DEBUG("FCS stripping toggling is %ssupported",
1103 priv->hw_fcs_strip ? "" : "not ");
1105 ((device_attr_ex.tso_caps.max_tso > 0) &&
1106 (device_attr_ex.tso_caps.supported_qpts &
1107 (1 << IBV_QPT_RAW_PACKET)));
1109 priv->tso_max_payload_sz =
1110 device_attr_ex.tso_caps.max_tso;
1111 DEBUG("TSO is %ssupported",
1112 priv->tso ? "" : "not ");
1113 priv->mr_ext_memseg_en = conf.mr_ext_memseg_en;
1114 /* Configure the first MAC address by default. */
1115 err = mlx4_get_mac(priv, &mac.addr_bytes);
1117 ERROR("cannot get MAC address, is mlx4_en loaded?"
1118 " (error: %s)", strerror(err));
1121 INFO("port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x",
1123 mac.addr_bytes[0], mac.addr_bytes[1],
1124 mac.addr_bytes[2], mac.addr_bytes[3],
1125 mac.addr_bytes[4], mac.addr_bytes[5]);
1126 /* Register MAC address. */
1130 char ifname[IF_NAMESIZE];
1132 if (mlx4_get_ifname(priv, &ifname) == 0)
1133 DEBUG("port %u ifname is \"%s\"",
1134 priv->port, ifname);
1136 DEBUG("port %u ifname is unknown", priv->port);
1139 /* Get actual MTU if possible. */
1140 mlx4_mtu_get(priv, &priv->mtu);
1141 DEBUG("port %u MTU is %u", priv->port, priv->mtu);
1142 eth_dev = rte_eth_dev_allocate(name);
1143 if (eth_dev == NULL) {
1145 ERROR("can not allocate rte ethdev");
1148 eth_dev->data->dev_private = priv;
1149 eth_dev->data->mac_addrs = priv->mac;
1150 eth_dev->device = &pci_dev->device;
1151 rte_eth_copy_pci_info(eth_dev, pci_dev);
1152 /* Initialize local interrupt handle for current port. */
1153 priv->intr_handle = (struct rte_intr_handle){
1155 .type = RTE_INTR_HANDLE_EXT,
1158 * Override ethdev interrupt handle pointer with private
1159 * handle instead of that of the parent PCI device used by
1160 * default. This prevents it from being shared between all
1161 * ports of the same PCI device since each of them is
1162 * associated its own Verbs context.
1164 * Rx interrupts in particular require this as the PMD has
1165 * no control over the registration of queue interrupts
1166 * besides setting up eth_dev->intr_handle, the rest is
1167 * handled by rte_intr_rx_ctl().
1169 eth_dev->intr_handle = &priv->intr_handle;
1170 priv->dev_data = eth_dev->data;
1171 eth_dev->dev_ops = &mlx4_dev_ops;
1172 #ifdef HAVE_IBV_MLX4_BUF_ALLOCATORS
1173 /* Hint libmlx4 to use PMD allocator for data plane resources */
1174 struct mlx4dv_ctx_allocators alctr = {
1175 .alloc = &mlx4_alloc_verbs_buf,
1176 .free = &mlx4_free_verbs_buf,
1179 err = mlx4_glue->dv_set_context_attr
1180 (ctx, MLX4DV_SET_CTX_ATTR_BUF_ALLOCATORS,
1181 (void *)((uintptr_t)&alctr));
1183 WARN("Verbs external allocator is not supported");
1185 priv->verbs_alloc_ctx.enabled = 1;
1187 /* Bring Ethernet device up. */
1188 DEBUG("forcing Ethernet interface up");
1189 mlx4_dev_set_link_up(eth_dev);
1190 /* Update link status once if waiting for LSC. */
1191 if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
1192 mlx4_link_update(eth_dev, 0);
1194 * Once the device is added to the list of memory event
1195 * callback, its global MR cache table cannot be expanded
1196 * on the fly because of deadlock. If it overflows, lookup
1197 * should be done by searching MR list linearly, which is slow.
1199 err = mlx4_mr_btree_init(&priv->mr.cache,
1200 MLX4_MR_BTREE_CACHE_N * 2,
1201 eth_dev->device->numa_node);
1203 /* rte_errno is already set. */
1206 /* Add device to memory callback list. */
1207 rte_rwlock_write_lock(&mlx4_shared_data->mem_event_rwlock);
1208 LIST_INSERT_HEAD(&mlx4_shared_data->mem_event_cb_list,
1209 priv, mem_event_cb);
1210 rte_rwlock_write_unlock(&mlx4_shared_data->mem_event_rwlock);
1211 rte_eth_dev_probing_finish(eth_dev);
1215 if (eth_dev != NULL)
1216 eth_dev->data->dev_private = NULL;
1218 claim_zero(mlx4_glue->dealloc_pd(pd));
1220 claim_zero(mlx4_glue->close_device(ctx));
1221 if (eth_dev != NULL) {
1222 /* mac_addrs must not be freed because part of dev_private */
1223 eth_dev->data->mac_addrs = NULL;
1224 rte_eth_dev_release_port(eth_dev);
1229 * XXX if something went wrong in the loop above, there is a resource
1230 * leak (ctx, pd, priv, dpdk ethdev) but we can do nothing about it as
1231 * long as the dpdk does not provide a way to deallocate a ethdev and a
1232 * way to enumerate the registered ethdevs to free the previous ones.
1236 claim_zero(mlx4_glue->close_device(attr_ctx));
1238 mlx4_glue->free_device_list(list);
1244 static const struct rte_pci_id mlx4_pci_id_map[] = {
1246 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
1247 PCI_DEVICE_ID_MELLANOX_CONNECTX3)
1250 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
1251 PCI_DEVICE_ID_MELLANOX_CONNECTX3PRO)
1254 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
1255 PCI_DEVICE_ID_MELLANOX_CONNECTX3VF)
1262 static struct rte_pci_driver mlx4_driver = {
1264 .name = MLX4_DRIVER_NAME
1266 .id_table = mlx4_pci_id_map,
1267 .probe = mlx4_pci_probe,
1268 .drv_flags = RTE_PCI_DRV_INTR_LSC |
1269 RTE_PCI_DRV_INTR_RMV,
1272 #ifdef RTE_IBVERBS_LINK_DLOPEN
1275 * Suffix RTE_EAL_PMD_PATH with "-glue".
1277 * This function performs a sanity check on RTE_EAL_PMD_PATH before
1278 * suffixing its last component.
1281 * Output buffer, should be large enough otherwise NULL is returned.
1286 * Pointer to @p buf or @p NULL in case suffix cannot be appended.
1289 mlx4_glue_path(char *buf, size_t size)
1291 static const char *const bad[] = { "/", ".", "..", NULL };
1292 const char *path = RTE_EAL_PMD_PATH;
1293 size_t len = strlen(path);
1297 while (len && path[len - 1] == '/')
1299 for (off = len; off && path[off - 1] != '/'; --off)
1301 for (i = 0; bad[i]; ++i)
1302 if (!strncmp(path + off, bad[i], (int)(len - off)))
1304 i = snprintf(buf, size, "%.*s-glue", (int)len, path);
1305 if (i == -1 || (size_t)i >= size)
1309 ERROR("unable to append \"-glue\" to last component of"
1310 " RTE_EAL_PMD_PATH (\"" RTE_EAL_PMD_PATH "\"),"
1311 " please re-configure DPDK");
1316 * Initialization routine for run-time dependency on rdma-core.
1319 mlx4_glue_init(void)
1321 char glue_path[sizeof(RTE_EAL_PMD_PATH) - 1 + sizeof("-glue")];
1322 const char *path[] = {
1324 * A basic security check is necessary before trusting
1325 * MLX4_GLUE_PATH, which may override RTE_EAL_PMD_PATH.
1327 (geteuid() == getuid() && getegid() == getgid() ?
1328 getenv("MLX4_GLUE_PATH") : NULL),
1330 * When RTE_EAL_PMD_PATH is set, use its glue-suffixed
1331 * variant, otherwise let dlopen() look up libraries on its
1334 (*RTE_EAL_PMD_PATH ?
1335 mlx4_glue_path(glue_path, sizeof(glue_path)) : ""),
1338 void *handle = NULL;
1342 while (!handle && i != RTE_DIM(path)) {
1351 end = strpbrk(path[i], ":;");
1353 end = path[i] + strlen(path[i]);
1354 len = end - path[i];
1359 ret = snprintf(name, sizeof(name), "%.*s%s" MLX4_GLUE,
1361 (!len || *(end - 1) == '/') ? "" : "/");
1364 if (sizeof(name) != (size_t)ret + 1)
1366 DEBUG("looking for rdma-core glue as \"%s\"", name);
1367 handle = dlopen(name, RTLD_LAZY);
1378 WARN("cannot load glue library: %s", dlmsg);
1381 sym = dlsym(handle, "mlx4_glue");
1382 if (!sym || !*sym) {
1386 ERROR("cannot resolve glue symbol: %s", dlmsg);
1394 WARN("cannot initialize PMD due to missing run-time"
1395 " dependency on rdma-core libraries (libibverbs,"
1403 * Driver initialization routine.
1405 RTE_INIT(rte_mlx4_pmd_init)
1408 * MLX4_DEVICE_FATAL_CLEANUP tells ibv_destroy functions we
1409 * want to get success errno value in case of calling them
1410 * when the device was removed.
1412 setenv("MLX4_DEVICE_FATAL_CLEANUP", "1", 1);
1414 * RDMAV_HUGEPAGES_SAFE tells ibv_fork_init() we intend to use
1415 * huge pages. Calling ibv_fork_init() during init allows
1416 * applications to use fork() safely for purposes other than
1417 * using this PMD, which is not supported in forked processes.
1419 setenv("RDMAV_HUGEPAGES_SAFE", "1", 1);
1420 #ifdef RTE_IBVERBS_LINK_DLOPEN
1421 if (mlx4_glue_init())
1426 /* Glue structure must not contain any NULL pointers. */
1430 for (i = 0; i != sizeof(*mlx4_glue) / sizeof(void *); ++i)
1431 assert(((const void *const *)mlx4_glue)[i]);
1434 if (strcmp(mlx4_glue->version, MLX4_GLUE_VERSION)) {
1435 ERROR("rdma-core glue \"%s\" mismatch: \"%s\" is required",
1436 mlx4_glue->version, MLX4_GLUE_VERSION);
1439 mlx4_glue->fork_init();
1440 rte_pci_register(&mlx4_driver);
1443 RTE_PMD_EXPORT_NAME(net_mlx4, __COUNTER__);
1444 RTE_PMD_REGISTER_PCI_TABLE(net_mlx4, mlx4_pci_id_map);
1445 RTE_PMD_REGISTER_KMOD_DEP(net_mlx4,
1446 "* ib_uverbs & mlx4_en & mlx4_core & mlx4_ib");