1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
16 #include <linux/rtnetlink.h>
19 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
21 #pragma GCC diagnostic ignored "-Wpedantic"
23 #include <infiniband/verbs.h>
25 #pragma GCC diagnostic error "-Wpedantic"
28 #include <rte_malloc.h>
29 #include <rte_ethdev_driver.h>
30 #include <rte_ethdev_pci.h>
32 #include <rte_bus_pci.h>
33 #include <rte_common.h>
34 #include <rte_config.h>
35 #include <rte_eal_memconfig.h>
36 #include <rte_kvargs.h>
37 #include <rte_rwlock.h>
38 #include <rte_spinlock.h>
39 #include <rte_string_fns.h>
42 #include "mlx5_utils.h"
43 #include "mlx5_rxtx.h"
44 #include "mlx5_autoconf.h"
45 #include "mlx5_defs.h"
46 #include "mlx5_glue.h"
48 #include "mlx5_flow.h"
50 /* Device parameter to enable RX completion queue compression. */
51 #define MLX5_RXQ_CQE_COMP_EN "rxq_cqe_comp_en"
53 /* Device parameter to enable RX completion entry padding to 128B. */
54 #define MLX5_RXQ_CQE_PAD_EN "rxq_cqe_pad_en"
56 /* Device parameter to enable padding Rx packet to cacheline size. */
57 #define MLX5_RXQ_PKT_PAD_EN "rxq_pkt_pad_en"
59 /* Device parameter to enable Multi-Packet Rx queue. */
60 #define MLX5_RX_MPRQ_EN "mprq_en"
62 /* Device parameter to configure log 2 of the number of strides for MPRQ. */
63 #define MLX5_RX_MPRQ_LOG_STRIDE_NUM "mprq_log_stride_num"
65 /* Device parameter to limit the size of memcpy'd packet for MPRQ. */
66 #define MLX5_RX_MPRQ_MAX_MEMCPY_LEN "mprq_max_memcpy_len"
68 /* Device parameter to set the minimum number of Rx queues to enable MPRQ. */
69 #define MLX5_RXQS_MIN_MPRQ "rxqs_min_mprq"
71 /* Device parameter to configure inline send. */
72 #define MLX5_TXQ_INLINE "txq_inline"
75 * Device parameter to configure the number of TX queues threshold for
76 * enabling inline send.
78 #define MLX5_TXQS_MIN_INLINE "txqs_min_inline"
81 * Device parameter to configure the number of TX queues threshold for
82 * enabling vectorized Tx.
84 #define MLX5_TXQS_MAX_VEC "txqs_max_vec"
86 /* Device parameter to enable multi-packet send WQEs. */
87 #define MLX5_TXQ_MPW_EN "txq_mpw_en"
89 /* Device parameter to include 2 dsegs in the title WQEBB. */
90 #define MLX5_TXQ_MPW_HDR_DSEG_EN "txq_mpw_hdr_dseg_en"
92 /* Device parameter to limit the size of inlining packet. */
93 #define MLX5_TXQ_MAX_INLINE_LEN "txq_max_inline_len"
95 /* Device parameter to enable hardware Tx vector. */
96 #define MLX5_TX_VEC_EN "tx_vec_en"
98 /* Device parameter to enable hardware Rx vector. */
99 #define MLX5_RX_VEC_EN "rx_vec_en"
101 /* Allow L3 VXLAN flow creation. */
102 #define MLX5_L3_VXLAN_EN "l3_vxlan_en"
104 /* Activate DV flow steering. */
105 #define MLX5_DV_FLOW_EN "dv_flow_en"
107 /* Activate Netlink support in VF mode. */
108 #define MLX5_VF_NL_EN "vf_nl_en"
110 /* Enable extending memsegs when creating a MR. */
111 #define MLX5_MR_EXT_MEMSEG_EN "mr_ext_memseg_en"
113 /* Select port representors to instantiate. */
114 #define MLX5_REPRESENTOR "representor"
116 #ifndef HAVE_IBV_MLX5_MOD_MPW
117 #define MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED (1 << 2)
118 #define MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW (1 << 3)
121 #ifndef HAVE_IBV_MLX5_MOD_CQE_128B_COMP
122 #define MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP (1 << 4)
125 static const char *MZ_MLX5_PMD_SHARED_DATA = "mlx5_pmd_shared_data";
127 /* Shared memory between primary and secondary processes. */
128 struct mlx5_shared_data *mlx5_shared_data;
130 /* Spinlock for mlx5_shared_data allocation. */
131 static rte_spinlock_t mlx5_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
133 /* Process local data for secondary processes. */
134 static struct mlx5_local_data mlx5_local_data;
136 /** Driver-specific log messages type. */
139 /** Data associated with devices to spawn. */
140 struct mlx5_dev_spawn_data {
141 uint32_t ifindex; /**< Network interface index. */
142 uint32_t max_port; /**< IB device maximal port index. */
143 uint32_t ibv_port; /**< IB device physical port index. */
144 struct mlx5_switch_info info; /**< Switch information. */
145 struct ibv_device *ibv_dev; /**< Associated IB device. */
146 struct rte_eth_dev *eth_dev; /**< Associated Ethernet device. */
149 static LIST_HEAD(, mlx5_ibv_shared) mlx5_ibv_list = LIST_HEAD_INITIALIZER();
150 static pthread_mutex_t mlx5_ibv_list_mutex = PTHREAD_MUTEX_INITIALIZER;
153 * Allocate shared IB device context. If there is multiport device the
154 * master and representors will share this context, if there is single
155 * port dedicated IB device, the context will be used by only given
156 * port due to unification.
158 * Routine first searches the context for the specified IB device name,
159 * if found the shared context assumed and reference counter is incremented.
160 * If no context found the new one is created and initialized with specified
161 * IB device context and parameters.
164 * Pointer to the IB device attributes (name, port, etc).
167 * Pointer to mlx5_ibv_shared object on success,
168 * otherwise NULL and rte_errno is set.
170 static struct mlx5_ibv_shared *
171 mlx5_alloc_shared_ibctx(const struct mlx5_dev_spawn_data *spawn)
173 struct mlx5_ibv_shared *sh;
178 /* Secondary process should not create the shared context. */
179 assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
180 pthread_mutex_lock(&mlx5_ibv_list_mutex);
181 /* Search for IB context by device name. */
182 LIST_FOREACH(sh, &mlx5_ibv_list, next) {
183 if (!strcmp(sh->ibdev_name, spawn->ibv_dev->name)) {
188 /* No device found, we have to create new shared context. */
189 assert(spawn->max_port);
190 sh = rte_zmalloc("ethdev shared ib context",
191 sizeof(struct mlx5_ibv_shared) +
193 sizeof(struct mlx5_ibv_shared_port),
194 RTE_CACHE_LINE_SIZE);
196 DRV_LOG(ERR, "shared context allocation failure");
200 /* Try to open IB device with DV first, then usual Verbs. */
202 sh->ctx = mlx5_glue->dv_open_device(spawn->ibv_dev);
205 DRV_LOG(DEBUG, "DevX is supported");
207 sh->ctx = mlx5_glue->open_device(spawn->ibv_dev);
209 err = errno ? errno : ENODEV;
212 DRV_LOG(DEBUG, "DevX is NOT supported");
214 err = mlx5_glue->query_device_ex(sh->ctx, NULL, &sh->device_attr);
216 DRV_LOG(DEBUG, "ibv_query_device_ex() failed");
220 sh->max_port = spawn->max_port;
221 strncpy(sh->ibdev_name, sh->ctx->device->name,
222 sizeof(sh->ibdev_name));
223 strncpy(sh->ibdev_path, sh->ctx->device->ibdev_path,
224 sizeof(sh->ibdev_path));
225 pthread_mutex_init(&sh->intr_mutex, NULL);
227 * Setting port_id to max unallowed value means
228 * there is no interrupt subhandler installed for
229 * the given port index i.
231 for (i = 0; i < sh->max_port; i++)
232 sh->port[i].ih_port_id = RTE_MAX_ETHPORTS;
233 sh->pd = mlx5_glue->alloc_pd(sh->ctx);
234 if (sh->pd == NULL) {
235 DRV_LOG(ERR, "PD allocation failure");
239 LIST_INSERT_HEAD(&mlx5_ibv_list, sh, next);
241 pthread_mutex_unlock(&mlx5_ibv_list_mutex);
244 pthread_mutex_unlock(&mlx5_ibv_list_mutex);
247 claim_zero(mlx5_glue->dealloc_pd(sh->pd));
249 claim_zero(mlx5_glue->close_device(sh->ctx));
257 * Free shared IB device context. Decrement counter and if zero free
258 * all allocated resources and close handles.
261 * Pointer to mlx5_ibv_shared object to free
264 mlx5_free_shared_ibctx(struct mlx5_ibv_shared *sh)
266 pthread_mutex_lock(&mlx5_ibv_list_mutex);
268 /* Check the object presence in the list. */
269 struct mlx5_ibv_shared *lctx;
271 LIST_FOREACH(lctx, &mlx5_ibv_list, next)
276 DRV_LOG(ERR, "Freeing non-existing shared IB context");
282 /* Secondary process should not free the shared context. */
283 assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
286 LIST_REMOVE(sh, next);
288 * Ensure there is no async event handler installed.
289 * Only primary process handles async device events.
291 assert(!sh->intr_cnt);
293 rte_intr_callback_unregister
294 (&sh->intr_handle, mlx5_dev_interrupt_handler, sh);
295 pthread_mutex_destroy(&sh->intr_mutex);
297 claim_zero(mlx5_glue->dealloc_pd(sh->pd));
299 claim_zero(mlx5_glue->close_device(sh->ctx));
302 pthread_mutex_unlock(&mlx5_ibv_list_mutex);
306 * Initialize DR related data within private structure.
307 * Routine checks the reference counter and does actual
308 * resources creation/initialization only if counter is zero.
311 * Pointer to the private device data structure.
314 * Zero on success, positive error code otherwise.
317 mlx5_alloc_shared_dr(struct mlx5_priv *priv)
319 #ifdef HAVE_MLX5DV_DR
320 struct mlx5_ibv_shared *sh = priv->sh;
326 /* Shared DV/DR structures is already initialized. */
331 /* Reference counter is zero, we should initialize structures. */
332 ns = mlx5dv_dr_create_ns(sh->ctx, MLX5DV_DR_NS_DOMAIN_INGRESS_BYPASS);
334 DRV_LOG(ERR, "ingress mlx5dv_dr_create_ns failed");
339 ns = mlx5dv_dr_create_ns(sh->ctx, MLX5DV_DR_NS_DOMAIN_EGRESS_BYPASS);
341 DRV_LOG(ERR, "egress mlx5dv_dr_create_ns failed");
345 pthread_mutex_init(&sh->dv_mutex, NULL);
352 /* Rollback the created objects. */
354 mlx5dv_dr_destroy_ns(sh->rx_ns);
358 mlx5dv_dr_destroy_ns(sh->tx_ns);
369 * Destroy DR related data within private structure.
372 * Pointer to the private device data structure.
375 mlx5_free_shared_dr(struct mlx5_priv *priv)
377 #ifdef HAVE_MLX5DV_DR
378 struct mlx5_ibv_shared *sh;
380 if (!priv->dr_shared)
385 assert(sh->dv_refcnt);
386 if (sh->dv_refcnt && --sh->dv_refcnt)
389 mlx5dv_dr_destroy_ns(sh->rx_ns);
393 mlx5dv_dr_destroy_ns(sh->tx_ns);
396 pthread_mutex_destroy(&sh->dv_mutex);
403 * Initialize shared data between primary and secondary process.
405 * A memzone is reserved by primary process and secondary processes attach to
409 * 0 on success, a negative errno value otherwise and rte_errno is set.
412 mlx5_init_shared_data(void)
414 const struct rte_memzone *mz;
417 rte_spinlock_lock(&mlx5_shared_data_lock);
418 if (mlx5_shared_data == NULL) {
419 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
420 /* Allocate shared memory. */
421 mz = rte_memzone_reserve(MZ_MLX5_PMD_SHARED_DATA,
422 sizeof(*mlx5_shared_data),
426 "Cannot allocate mlx5 shared data\n");
430 mlx5_shared_data = mz->addr;
431 memset(mlx5_shared_data, 0, sizeof(*mlx5_shared_data));
432 rte_spinlock_init(&mlx5_shared_data->lock);
434 /* Lookup allocated shared memory. */
435 mz = rte_memzone_lookup(MZ_MLX5_PMD_SHARED_DATA);
438 "Cannot attach mlx5 shared data\n");
442 mlx5_shared_data = mz->addr;
443 memset(&mlx5_local_data, 0, sizeof(mlx5_local_data));
447 rte_spinlock_unlock(&mlx5_shared_data_lock);
452 * Uninitialize shared data between primary and secondary process.
454 * The pointer of secondary process is dereferenced and primary process frees
458 mlx5_uninit_shared_data(void)
460 const struct rte_memzone *mz;
462 rte_spinlock_lock(&mlx5_shared_data_lock);
463 if (mlx5_shared_data) {
464 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
465 mz = rte_memzone_lookup(MZ_MLX5_PMD_SHARED_DATA);
466 rte_memzone_free(mz);
468 memset(&mlx5_local_data, 0, sizeof(mlx5_local_data));
470 mlx5_shared_data = NULL;
472 rte_spinlock_unlock(&mlx5_shared_data_lock);
476 * Retrieve integer value from environment variable.
479 * Environment variable name.
482 * Integer value, 0 if the variable is not set.
485 mlx5_getenv_int(const char *name)
487 const char *val = getenv(name);
495 * Verbs callback to allocate a memory. This function should allocate the space
496 * according to the size provided residing inside a huge page.
497 * Please note that all allocation must respect the alignment from libmlx5
498 * (i.e. currently sysconf(_SC_PAGESIZE)).
501 * The size in bytes of the memory to allocate.
503 * A pointer to the callback data.
506 * Allocated buffer, NULL otherwise and rte_errno is set.
509 mlx5_alloc_verbs_buf(size_t size, void *data)
511 struct mlx5_priv *priv = data;
513 size_t alignment = sysconf(_SC_PAGESIZE);
514 unsigned int socket = SOCKET_ID_ANY;
516 if (priv->verbs_alloc_ctx.type == MLX5_VERBS_ALLOC_TYPE_TX_QUEUE) {
517 const struct mlx5_txq_ctrl *ctrl = priv->verbs_alloc_ctx.obj;
519 socket = ctrl->socket;
520 } else if (priv->verbs_alloc_ctx.type ==
521 MLX5_VERBS_ALLOC_TYPE_RX_QUEUE) {
522 const struct mlx5_rxq_ctrl *ctrl = priv->verbs_alloc_ctx.obj;
524 socket = ctrl->socket;
526 assert(data != NULL);
527 ret = rte_malloc_socket(__func__, size, alignment, socket);
534 * Verbs callback to free a memory.
537 * A pointer to the memory to free.
539 * A pointer to the callback data.
542 mlx5_free_verbs_buf(void *ptr, void *data __rte_unused)
544 assert(data != NULL);
549 * DPDK callback to close the device.
551 * Destroy all queues and objects, free memory.
554 * Pointer to Ethernet device structure.
557 mlx5_dev_close(struct rte_eth_dev *dev)
559 struct mlx5_priv *priv = dev->data->dev_private;
563 DRV_LOG(DEBUG, "port %u closing device \"%s\"",
565 ((priv->sh->ctx != NULL) ? priv->sh->ctx->device->name : ""));
566 /* In case mlx5_dev_stop() has not been called. */
567 mlx5_dev_interrupt_handler_uninstall(dev);
568 mlx5_traffic_disable(dev);
569 mlx5_flow_flush(dev, NULL);
570 /* Prevent crashes when queues are still in use. */
571 dev->rx_pkt_burst = removed_rx_burst;
572 dev->tx_pkt_burst = removed_tx_burst;
574 /* Disable datapath on secondary process. */
575 mlx5_mp_req_stop_rxtx(dev);
576 if (priv->rxqs != NULL) {
577 /* XXX race condition if mlx5_rx_burst() is still running. */
579 for (i = 0; (i != priv->rxqs_n); ++i)
580 mlx5_rxq_release(dev, i);
584 if (priv->txqs != NULL) {
585 /* XXX race condition if mlx5_tx_burst() is still running. */
587 for (i = 0; (i != priv->txqs_n); ++i)
588 mlx5_txq_release(dev, i);
592 mlx5_mprq_free_mp(dev);
593 mlx5_mr_release(dev);
595 mlx5_free_shared_dr(priv);
596 if (priv->rss_conf.rss_key != NULL)
597 rte_free(priv->rss_conf.rss_key);
598 if (priv->reta_idx != NULL)
599 rte_free(priv->reta_idx);
601 mlx5_nl_mac_addr_flush(dev);
602 if (priv->nl_socket_route >= 0)
603 close(priv->nl_socket_route);
604 if (priv->nl_socket_rdma >= 0)
605 close(priv->nl_socket_rdma);
606 if (priv->tcf_context)
607 mlx5_flow_tcf_context_destroy(priv->tcf_context);
610 * Free the shared context in last turn, because the cleanup
611 * routines above may use some shared fields, like
612 * mlx5_nl_mac_addr_flush() uses ibdev_path for retrieveing
613 * ifindex if Netlink fails.
615 mlx5_free_shared_ibctx(priv->sh);
618 ret = mlx5_hrxq_ibv_verify(dev);
620 DRV_LOG(WARNING, "port %u some hash Rx queue still remain",
622 ret = mlx5_ind_table_ibv_verify(dev);
624 DRV_LOG(WARNING, "port %u some indirection table still remain",
626 ret = mlx5_rxq_ibv_verify(dev);
628 DRV_LOG(WARNING, "port %u some Verbs Rx queue still remain",
630 ret = mlx5_rxq_verify(dev);
632 DRV_LOG(WARNING, "port %u some Rx queues still remain",
634 ret = mlx5_txq_ibv_verify(dev);
636 DRV_LOG(WARNING, "port %u some Verbs Tx queue still remain",
638 ret = mlx5_txq_verify(dev);
640 DRV_LOG(WARNING, "port %u some Tx queues still remain",
642 ret = mlx5_flow_verify(dev);
644 DRV_LOG(WARNING, "port %u some flows still remain",
646 if (priv->domain_id != RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
650 RTE_ETH_FOREACH_DEV_OF(port_id, dev->device) {
651 struct mlx5_priv *opriv =
652 rte_eth_devices[port_id].data->dev_private;
655 opriv->domain_id != priv->domain_id ||
656 &rte_eth_devices[port_id] == dev)
661 claim_zero(rte_eth_switch_domain_free(priv->domain_id));
663 memset(priv, 0, sizeof(*priv));
664 priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
666 * Reset mac_addrs to NULL such that it is not freed as part of
667 * rte_eth_dev_release_port(). mac_addrs is part of dev_private so
668 * it is freed when dev_private is freed.
670 dev->data->mac_addrs = NULL;
673 const struct eth_dev_ops mlx5_dev_ops = {
674 .dev_configure = mlx5_dev_configure,
675 .dev_start = mlx5_dev_start,
676 .dev_stop = mlx5_dev_stop,
677 .dev_set_link_down = mlx5_set_link_down,
678 .dev_set_link_up = mlx5_set_link_up,
679 .dev_close = mlx5_dev_close,
680 .promiscuous_enable = mlx5_promiscuous_enable,
681 .promiscuous_disable = mlx5_promiscuous_disable,
682 .allmulticast_enable = mlx5_allmulticast_enable,
683 .allmulticast_disable = mlx5_allmulticast_disable,
684 .link_update = mlx5_link_update,
685 .stats_get = mlx5_stats_get,
686 .stats_reset = mlx5_stats_reset,
687 .xstats_get = mlx5_xstats_get,
688 .xstats_reset = mlx5_xstats_reset,
689 .xstats_get_names = mlx5_xstats_get_names,
690 .fw_version_get = mlx5_fw_version_get,
691 .dev_infos_get = mlx5_dev_infos_get,
692 .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
693 .vlan_filter_set = mlx5_vlan_filter_set,
694 .rx_queue_setup = mlx5_rx_queue_setup,
695 .tx_queue_setup = mlx5_tx_queue_setup,
696 .rx_queue_release = mlx5_rx_queue_release,
697 .tx_queue_release = mlx5_tx_queue_release,
698 .flow_ctrl_get = mlx5_dev_get_flow_ctrl,
699 .flow_ctrl_set = mlx5_dev_set_flow_ctrl,
700 .mac_addr_remove = mlx5_mac_addr_remove,
701 .mac_addr_add = mlx5_mac_addr_add,
702 .mac_addr_set = mlx5_mac_addr_set,
703 .set_mc_addr_list = mlx5_set_mc_addr_list,
704 .mtu_set = mlx5_dev_set_mtu,
705 .vlan_strip_queue_set = mlx5_vlan_strip_queue_set,
706 .vlan_offload_set = mlx5_vlan_offload_set,
707 .reta_update = mlx5_dev_rss_reta_update,
708 .reta_query = mlx5_dev_rss_reta_query,
709 .rss_hash_update = mlx5_rss_hash_update,
710 .rss_hash_conf_get = mlx5_rss_hash_conf_get,
711 .filter_ctrl = mlx5_dev_filter_ctrl,
712 .rx_descriptor_status = mlx5_rx_descriptor_status,
713 .tx_descriptor_status = mlx5_tx_descriptor_status,
714 .rx_queue_count = mlx5_rx_queue_count,
715 .rx_queue_intr_enable = mlx5_rx_intr_enable,
716 .rx_queue_intr_disable = mlx5_rx_intr_disable,
717 .is_removed = mlx5_is_removed,
720 /* Available operations from secondary process. */
721 static const struct eth_dev_ops mlx5_dev_sec_ops = {
722 .stats_get = mlx5_stats_get,
723 .stats_reset = mlx5_stats_reset,
724 .xstats_get = mlx5_xstats_get,
725 .xstats_reset = mlx5_xstats_reset,
726 .xstats_get_names = mlx5_xstats_get_names,
727 .fw_version_get = mlx5_fw_version_get,
728 .dev_infos_get = mlx5_dev_infos_get,
729 .rx_descriptor_status = mlx5_rx_descriptor_status,
730 .tx_descriptor_status = mlx5_tx_descriptor_status,
733 /* Available operations in flow isolated mode. */
734 const struct eth_dev_ops mlx5_dev_ops_isolate = {
735 .dev_configure = mlx5_dev_configure,
736 .dev_start = mlx5_dev_start,
737 .dev_stop = mlx5_dev_stop,
738 .dev_set_link_down = mlx5_set_link_down,
739 .dev_set_link_up = mlx5_set_link_up,
740 .dev_close = mlx5_dev_close,
741 .promiscuous_enable = mlx5_promiscuous_enable,
742 .promiscuous_disable = mlx5_promiscuous_disable,
743 .allmulticast_enable = mlx5_allmulticast_enable,
744 .allmulticast_disable = mlx5_allmulticast_disable,
745 .link_update = mlx5_link_update,
746 .stats_get = mlx5_stats_get,
747 .stats_reset = mlx5_stats_reset,
748 .xstats_get = mlx5_xstats_get,
749 .xstats_reset = mlx5_xstats_reset,
750 .xstats_get_names = mlx5_xstats_get_names,
751 .fw_version_get = mlx5_fw_version_get,
752 .dev_infos_get = mlx5_dev_infos_get,
753 .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
754 .vlan_filter_set = mlx5_vlan_filter_set,
755 .rx_queue_setup = mlx5_rx_queue_setup,
756 .tx_queue_setup = mlx5_tx_queue_setup,
757 .rx_queue_release = mlx5_rx_queue_release,
758 .tx_queue_release = mlx5_tx_queue_release,
759 .flow_ctrl_get = mlx5_dev_get_flow_ctrl,
760 .flow_ctrl_set = mlx5_dev_set_flow_ctrl,
761 .mac_addr_remove = mlx5_mac_addr_remove,
762 .mac_addr_add = mlx5_mac_addr_add,
763 .mac_addr_set = mlx5_mac_addr_set,
764 .set_mc_addr_list = mlx5_set_mc_addr_list,
765 .mtu_set = mlx5_dev_set_mtu,
766 .vlan_strip_queue_set = mlx5_vlan_strip_queue_set,
767 .vlan_offload_set = mlx5_vlan_offload_set,
768 .filter_ctrl = mlx5_dev_filter_ctrl,
769 .rx_descriptor_status = mlx5_rx_descriptor_status,
770 .tx_descriptor_status = mlx5_tx_descriptor_status,
771 .rx_queue_intr_enable = mlx5_rx_intr_enable,
772 .rx_queue_intr_disable = mlx5_rx_intr_disable,
773 .is_removed = mlx5_is_removed,
777 * Verify and store value for device argument.
780 * Key argument to verify.
782 * Value associated with key.
787 * 0 on success, a negative errno value otherwise and rte_errno is set.
790 mlx5_args_check(const char *key, const char *val, void *opaque)
792 struct mlx5_dev_config *config = opaque;
795 /* No-op, port representors are processed in mlx5_dev_spawn(). */
796 if (!strcmp(MLX5_REPRESENTOR, key))
799 tmp = strtoul(val, NULL, 0);
802 DRV_LOG(WARNING, "%s: \"%s\" is not a valid integer", key, val);
805 if (strcmp(MLX5_RXQ_CQE_COMP_EN, key) == 0) {
806 config->cqe_comp = !!tmp;
807 } else if (strcmp(MLX5_RXQ_CQE_PAD_EN, key) == 0) {
808 config->cqe_pad = !!tmp;
809 } else if (strcmp(MLX5_RXQ_PKT_PAD_EN, key) == 0) {
810 config->hw_padding = !!tmp;
811 } else if (strcmp(MLX5_RX_MPRQ_EN, key) == 0) {
812 config->mprq.enabled = !!tmp;
813 } else if (strcmp(MLX5_RX_MPRQ_LOG_STRIDE_NUM, key) == 0) {
814 config->mprq.stride_num_n = tmp;
815 } else if (strcmp(MLX5_RX_MPRQ_MAX_MEMCPY_LEN, key) == 0) {
816 config->mprq.max_memcpy_len = tmp;
817 } else if (strcmp(MLX5_RXQS_MIN_MPRQ, key) == 0) {
818 config->mprq.min_rxqs_num = tmp;
819 } else if (strcmp(MLX5_TXQ_INLINE, key) == 0) {
820 config->txq_inline = tmp;
821 } else if (strcmp(MLX5_TXQS_MIN_INLINE, key) == 0) {
822 config->txqs_inline = tmp;
823 } else if (strcmp(MLX5_TXQS_MAX_VEC, key) == 0) {
824 config->txqs_vec = tmp;
825 } else if (strcmp(MLX5_TXQ_MPW_EN, key) == 0) {
827 } else if (strcmp(MLX5_TXQ_MPW_HDR_DSEG_EN, key) == 0) {
828 config->mpw_hdr_dseg = !!tmp;
829 } else if (strcmp(MLX5_TXQ_MAX_INLINE_LEN, key) == 0) {
830 config->inline_max_packet_sz = tmp;
831 } else if (strcmp(MLX5_TX_VEC_EN, key) == 0) {
832 config->tx_vec_en = !!tmp;
833 } else if (strcmp(MLX5_RX_VEC_EN, key) == 0) {
834 config->rx_vec_en = !!tmp;
835 } else if (strcmp(MLX5_L3_VXLAN_EN, key) == 0) {
836 config->l3_vxlan_en = !!tmp;
837 } else if (strcmp(MLX5_VF_NL_EN, key) == 0) {
838 config->vf_nl_en = !!tmp;
839 } else if (strcmp(MLX5_DV_FLOW_EN, key) == 0) {
840 config->dv_flow_en = !!tmp;
841 } else if (strcmp(MLX5_MR_EXT_MEMSEG_EN, key) == 0) {
842 config->mr_ext_memseg_en = !!tmp;
844 DRV_LOG(WARNING, "%s: unknown parameter", key);
852 * Parse device parameters.
855 * Pointer to device configuration structure.
857 * Device arguments structure.
860 * 0 on success, a negative errno value otherwise and rte_errno is set.
863 mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs)
865 const char **params = (const char *[]){
866 MLX5_RXQ_CQE_COMP_EN,
870 MLX5_RX_MPRQ_LOG_STRIDE_NUM,
871 MLX5_RX_MPRQ_MAX_MEMCPY_LEN,
874 MLX5_TXQS_MIN_INLINE,
877 MLX5_TXQ_MPW_HDR_DSEG_EN,
878 MLX5_TXQ_MAX_INLINE_LEN,
884 MLX5_MR_EXT_MEMSEG_EN,
888 struct rte_kvargs *kvlist;
894 /* Following UGLY cast is done to pass checkpatch. */
895 kvlist = rte_kvargs_parse(devargs->args, params);
898 /* Process parameters. */
899 for (i = 0; (params[i] != NULL); ++i) {
900 if (rte_kvargs_count(kvlist, params[i])) {
901 ret = rte_kvargs_process(kvlist, params[i],
902 mlx5_args_check, config);
905 rte_kvargs_free(kvlist);
910 rte_kvargs_free(kvlist);
914 static struct rte_pci_driver mlx5_driver;
917 find_lower_va_bound(const struct rte_memseg_list *msl,
918 const struct rte_memseg *ms, void *arg)
927 *addr = RTE_MIN(*addr, ms->addr);
933 * Reserve UAR address space for primary process.
935 * Process local resource is used by both primary and secondary to avoid
936 * duplicate reservation. The space has to be available on both primary and
937 * secondary process, TXQ UAR maps to this area using fixed mmap w/o double
941 * 0 on success, a negative errno value otherwise and rte_errno is set.
944 mlx5_uar_init_primary(void)
946 struct mlx5_shared_data *sd = mlx5_shared_data;
947 void *addr = (void *)0;
951 /* find out lower bound of hugepage segments */
952 rte_memseg_walk(find_lower_va_bound, &addr);
953 /* keep distance to hugepages to minimize potential conflicts. */
954 addr = RTE_PTR_SUB(addr, (uintptr_t)(MLX5_UAR_OFFSET + MLX5_UAR_SIZE));
955 /* anonymous mmap, no real memory consumption. */
956 addr = mmap(addr, MLX5_UAR_SIZE,
957 PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
958 if (addr == MAP_FAILED) {
960 "Failed to reserve UAR address space, please"
961 " adjust MLX5_UAR_SIZE or try --base-virtaddr");
965 /* Accept either same addr or a new addr returned from mmap if target
968 DRV_LOG(INFO, "Reserved UAR address space: %p", addr);
969 sd->uar_base = addr; /* for primary and secondary UAR re-mmap. */
974 * Unmap UAR address space reserved for primary process.
977 mlx5_uar_uninit_primary(void)
979 struct mlx5_shared_data *sd = mlx5_shared_data;
983 munmap(sd->uar_base, MLX5_UAR_SIZE);
988 * Reserve UAR address space for secondary process, align with primary process.
991 * 0 on success, a negative errno value otherwise and rte_errno is set.
994 mlx5_uar_init_secondary(void)
996 struct mlx5_shared_data *sd = mlx5_shared_data;
997 struct mlx5_local_data *ld = &mlx5_local_data;
1000 if (ld->uar_base) { /* Already reserved. */
1001 assert(sd->uar_base == ld->uar_base);
1004 assert(sd->uar_base);
1005 /* anonymous mmap, no real memory consumption. */
1006 addr = mmap(sd->uar_base, MLX5_UAR_SIZE,
1007 PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
1008 if (addr == MAP_FAILED) {
1009 DRV_LOG(ERR, "UAR mmap failed: %p size: %llu",
1010 sd->uar_base, MLX5_UAR_SIZE);
1014 if (sd->uar_base != addr) {
1016 "UAR address %p size %llu occupied, please"
1017 " adjust MLX5_UAR_OFFSET or try EAL parameter"
1019 sd->uar_base, MLX5_UAR_SIZE);
1023 ld->uar_base = addr;
1024 DRV_LOG(INFO, "Reserved UAR address space: %p", addr);
1029 * Unmap UAR address space reserved for secondary process.
1032 mlx5_uar_uninit_secondary(void)
1034 struct mlx5_local_data *ld = &mlx5_local_data;
1038 munmap(ld->uar_base, MLX5_UAR_SIZE);
1039 ld->uar_base = NULL;
1043 * PMD global initialization.
1045 * Independent from individual device, this function initializes global
1046 * per-PMD data structures distinguishing primary and secondary processes.
1047 * Hence, each initialization is called once per a process.
1050 * 0 on success, a negative errno value otherwise and rte_errno is set.
1053 mlx5_init_once(void)
1055 struct mlx5_shared_data *sd;
1056 struct mlx5_local_data *ld = &mlx5_local_data;
1059 if (mlx5_init_shared_data())
1061 sd = mlx5_shared_data;
1063 rte_spinlock_lock(&sd->lock);
1064 switch (rte_eal_process_type()) {
1065 case RTE_PROC_PRIMARY:
1068 LIST_INIT(&sd->mem_event_cb_list);
1069 rte_rwlock_init(&sd->mem_event_rwlock);
1070 rte_mem_event_callback_register("MLX5_MEM_EVENT_CB",
1071 mlx5_mr_mem_event_cb, NULL);
1072 mlx5_mp_init_primary();
1073 ret = mlx5_uar_init_primary();
1076 sd->init_done = true;
1078 case RTE_PROC_SECONDARY:
1081 mlx5_mp_init_secondary();
1082 ret = mlx5_uar_init_secondary();
1085 ++sd->secondary_cnt;
1086 ld->init_done = true;
1091 rte_spinlock_unlock(&sd->lock);
1094 switch (rte_eal_process_type()) {
1095 case RTE_PROC_PRIMARY:
1096 mlx5_uar_uninit_primary();
1097 mlx5_mp_uninit_primary();
1098 rte_mem_event_callback_unregister("MLX5_MEM_EVENT_CB", NULL);
1100 case RTE_PROC_SECONDARY:
1101 mlx5_uar_uninit_secondary();
1102 mlx5_mp_uninit_secondary();
1107 rte_spinlock_unlock(&sd->lock);
1108 mlx5_uninit_shared_data();
1113 * Spawn an Ethernet device from Verbs information.
1116 * Backing DPDK device.
1118 * Verbs device parameters (name, port, switch_info) to spawn.
1120 * Device configuration parameters.
1123 * A valid Ethernet device object on success, NULL otherwise and rte_errno
1124 * is set. The following errors are defined:
1126 * EBUSY: device is not supposed to be spawned.
1127 * EEXIST: device is already spawned
1129 static struct rte_eth_dev *
1130 mlx5_dev_spawn(struct rte_device *dpdk_dev,
1131 struct mlx5_dev_spawn_data *spawn,
1132 struct mlx5_dev_config config)
1134 const struct mlx5_switch_info *switch_info = &spawn->info;
1135 struct mlx5_ibv_shared *sh = NULL;
1136 struct ibv_port_attr port_attr;
1137 struct mlx5dv_context dv_attr = { .comp_mask = 0 };
1138 struct rte_eth_dev *eth_dev = NULL;
1139 struct mlx5_priv *priv = NULL;
1141 unsigned int hw_padding = 0;
1143 unsigned int cqe_comp;
1144 unsigned int cqe_pad = 0;
1145 unsigned int tunnel_en = 0;
1146 unsigned int mpls_en = 0;
1147 unsigned int swp = 0;
1148 unsigned int mprq = 0;
1149 unsigned int mprq_min_stride_size_n = 0;
1150 unsigned int mprq_max_stride_size_n = 0;
1151 unsigned int mprq_min_stride_num_n = 0;
1152 unsigned int mprq_max_stride_num_n = 0;
1153 struct ether_addr mac;
1154 char name[RTE_ETH_NAME_MAX_LEN];
1155 int own_domain_id = 0;
1159 /* Determine if this port representor is supposed to be spawned. */
1160 if (switch_info->representor && dpdk_dev->devargs) {
1161 struct rte_eth_devargs eth_da;
1163 err = rte_eth_devargs_parse(dpdk_dev->devargs->args, ð_da);
1166 DRV_LOG(ERR, "failed to process device arguments: %s",
1167 strerror(rte_errno));
1170 for (i = 0; i < eth_da.nb_representor_ports; ++i)
1171 if (eth_da.representor_ports[i] ==
1172 (uint16_t)switch_info->port_name)
1174 if (i == eth_da.nb_representor_ports) {
1179 /* Build device name. */
1180 if (!switch_info->representor)
1181 strlcpy(name, dpdk_dev->name, sizeof(name));
1183 snprintf(name, sizeof(name), "%s_representor_%u",
1184 dpdk_dev->name, switch_info->port_name);
1185 /* check if the device is already spawned */
1186 if (rte_eth_dev_get_port_by_name(name, &port_id) == 0) {
1190 DRV_LOG(DEBUG, "naming Ethernet device \"%s\"", name);
1191 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
1192 eth_dev = rte_eth_dev_attach_secondary(name);
1193 if (eth_dev == NULL) {
1194 DRV_LOG(ERR, "can not attach rte ethdev");
1198 eth_dev->device = dpdk_dev;
1199 eth_dev->dev_ops = &mlx5_dev_sec_ops;
1200 /* Receive command fd from primary process */
1201 err = mlx5_mp_req_verbs_cmd_fd(eth_dev);
1204 /* Remap UAR for Tx queues. */
1205 err = mlx5_tx_uar_remap(eth_dev, err);
1209 * Ethdev pointer is still required as input since
1210 * the primary device is not accessible from the
1211 * secondary process.
1213 eth_dev->rx_pkt_burst = mlx5_select_rx_function(eth_dev);
1214 eth_dev->tx_pkt_burst = mlx5_select_tx_function(eth_dev);
1217 sh = mlx5_alloc_shared_ibctx(spawn);
1220 config.devx = sh->devx;
1221 #ifdef HAVE_IBV_MLX5_MOD_SWP
1222 dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_SWP;
1225 * Multi-packet send is supported by ConnectX-4 Lx PF as well
1226 * as all ConnectX-5 devices.
1228 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
1229 dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS;
1231 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
1232 dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_STRIDING_RQ;
1234 mlx5_glue->dv_query_device(sh->ctx, &dv_attr);
1235 if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) {
1236 if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) {
1237 DRV_LOG(DEBUG, "enhanced MPW is supported");
1238 mps = MLX5_MPW_ENHANCED;
1240 DRV_LOG(DEBUG, "MPW is supported");
1244 DRV_LOG(DEBUG, "MPW isn't supported");
1245 mps = MLX5_MPW_DISABLED;
1247 #ifdef HAVE_IBV_MLX5_MOD_SWP
1248 if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_SWP)
1249 swp = dv_attr.sw_parsing_caps.sw_parsing_offloads;
1250 DRV_LOG(DEBUG, "SWP support: %u", swp);
1253 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
1254 if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_STRIDING_RQ) {
1255 struct mlx5dv_striding_rq_caps mprq_caps =
1256 dv_attr.striding_rq_caps;
1258 DRV_LOG(DEBUG, "\tmin_single_stride_log_num_of_bytes: %d",
1259 mprq_caps.min_single_stride_log_num_of_bytes);
1260 DRV_LOG(DEBUG, "\tmax_single_stride_log_num_of_bytes: %d",
1261 mprq_caps.max_single_stride_log_num_of_bytes);
1262 DRV_LOG(DEBUG, "\tmin_single_wqe_log_num_of_strides: %d",
1263 mprq_caps.min_single_wqe_log_num_of_strides);
1264 DRV_LOG(DEBUG, "\tmax_single_wqe_log_num_of_strides: %d",
1265 mprq_caps.max_single_wqe_log_num_of_strides);
1266 DRV_LOG(DEBUG, "\tsupported_qpts: %d",
1267 mprq_caps.supported_qpts);
1268 DRV_LOG(DEBUG, "device supports Multi-Packet RQ");
1270 mprq_min_stride_size_n =
1271 mprq_caps.min_single_stride_log_num_of_bytes;
1272 mprq_max_stride_size_n =
1273 mprq_caps.max_single_stride_log_num_of_bytes;
1274 mprq_min_stride_num_n =
1275 mprq_caps.min_single_wqe_log_num_of_strides;
1276 mprq_max_stride_num_n =
1277 mprq_caps.max_single_wqe_log_num_of_strides;
1278 config.mprq.stride_num_n = RTE_MAX(MLX5_MPRQ_STRIDE_NUM_N,
1279 mprq_min_stride_num_n);
1282 if (RTE_CACHE_LINE_SIZE == 128 &&
1283 !(dv_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP))
1287 config.cqe_comp = cqe_comp;
1288 #ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD
1289 /* Whether device supports 128B Rx CQE padding. */
1290 cqe_pad = RTE_CACHE_LINE_SIZE == 128 &&
1291 (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_PAD);
1293 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
1294 if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS) {
1295 tunnel_en = ((dv_attr.tunnel_offloads_caps &
1296 MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN) &&
1297 (dv_attr.tunnel_offloads_caps &
1298 MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE));
1300 DRV_LOG(DEBUG, "tunnel offloading is %ssupported",
1301 tunnel_en ? "" : "not ");
1304 "tunnel offloading disabled due to old OFED/rdma-core version");
1306 config.tunnel_en = tunnel_en;
1307 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
1308 mpls_en = ((dv_attr.tunnel_offloads_caps &
1309 MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_GRE) &&
1310 (dv_attr.tunnel_offloads_caps &
1311 MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_UDP));
1312 DRV_LOG(DEBUG, "MPLS over GRE/UDP tunnel offloading is %ssupported",
1313 mpls_en ? "" : "not ");
1315 DRV_LOG(WARNING, "MPLS over GRE/UDP tunnel offloading disabled due to"
1316 " old OFED/rdma-core version or firmware configuration");
1318 config.mpls_en = mpls_en;
1319 /* Check port status. */
1320 err = mlx5_glue->query_port(sh->ctx, spawn->ibv_port, &port_attr);
1322 DRV_LOG(ERR, "port query failed: %s", strerror(err));
1325 if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) {
1326 DRV_LOG(ERR, "port is not configured in Ethernet mode");
1330 if (port_attr.state != IBV_PORT_ACTIVE)
1331 DRV_LOG(DEBUG, "port is not active: \"%s\" (%d)",
1332 mlx5_glue->port_state_str(port_attr.state),
1334 /* Allocate private eth device data. */
1335 priv = rte_zmalloc("ethdev private structure",
1337 RTE_CACHE_LINE_SIZE);
1339 DRV_LOG(ERR, "priv allocation failure");
1344 priv->ibv_port = spawn->ibv_port;
1345 priv->mtu = ETHER_MTU;
1347 /* Initialize UAR access locks for 32bit implementations. */
1348 rte_spinlock_init(&priv->uar_lock_cq);
1349 for (i = 0; i < MLX5_UAR_PAGE_NUM_MAX; i++)
1350 rte_spinlock_init(&priv->uar_lock[i]);
1352 /* Some internal functions rely on Netlink sockets, open them now. */
1353 priv->nl_socket_rdma = mlx5_nl_init(NETLINK_RDMA);
1354 priv->nl_socket_route = mlx5_nl_init(NETLINK_ROUTE);
1356 priv->representor = !!switch_info->representor;
1357 priv->master = !!switch_info->master;
1358 priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
1360 * Currently we support single E-Switch per PF configurations
1361 * only and vport_id field contains the vport index for
1362 * associated VF, which is deduced from representor port name.
1363 * For example, let's have the IB device port 10, it has
1364 * attached network device eth0, which has port name attribute
1365 * pf0vf2, we can deduce the VF number as 2, and set vport index
1366 * as 3 (2+1). This assigning schema should be changed if the
1367 * multiple E-Switch instances per PF configurations or/and PCI
1368 * subfunctions are added.
1370 priv->vport_id = switch_info->representor ?
1371 switch_info->port_name + 1 : -1;
1372 /* representor_id field keeps the unmodified port/VF index. */
1373 priv->representor_id = switch_info->representor ?
1374 switch_info->port_name : -1;
1376 * Look for sibling devices in order to reuse their switch domain
1377 * if any, otherwise allocate one.
1379 RTE_ETH_FOREACH_DEV_OF(port_id, dpdk_dev) {
1380 const struct mlx5_priv *opriv =
1381 rte_eth_devices[port_id].data->dev_private;
1385 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID)
1387 priv->domain_id = opriv->domain_id;
1390 if (priv->domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
1391 err = rte_eth_switch_domain_alloc(&priv->domain_id);
1394 DRV_LOG(ERR, "unable to allocate switch domain: %s",
1395 strerror(rte_errno));
1400 err = mlx5_args(&config, dpdk_dev->devargs);
1403 DRV_LOG(ERR, "failed to process device arguments: %s",
1404 strerror(rte_errno));
1407 config.hw_csum = !!(sh->device_attr.device_cap_flags_ex &
1408 IBV_DEVICE_RAW_IP_CSUM);
1409 DRV_LOG(DEBUG, "checksum offloading is %ssupported",
1410 (config.hw_csum ? "" : "not "));
1411 #if !defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) && \
1412 !defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
1413 DRV_LOG(DEBUG, "counters are not supported");
1415 #ifndef HAVE_IBV_FLOW_DV_SUPPORT
1416 if (config.dv_flow_en) {
1417 DRV_LOG(WARNING, "DV flow is not supported");
1418 config.dv_flow_en = 0;
1421 config.ind_table_max_size =
1422 sh->device_attr.rss_caps.max_rwq_indirection_table_size;
1424 * Remove this check once DPDK supports larger/variable
1425 * indirection tables.
1427 if (config.ind_table_max_size > (unsigned int)ETH_RSS_RETA_SIZE_512)
1428 config.ind_table_max_size = ETH_RSS_RETA_SIZE_512;
1429 DRV_LOG(DEBUG, "maximum Rx indirection table size is %u",
1430 config.ind_table_max_size);
1431 config.hw_vlan_strip = !!(sh->device_attr.raw_packet_caps &
1432 IBV_RAW_PACKET_CAP_CVLAN_STRIPPING);
1433 DRV_LOG(DEBUG, "VLAN stripping is %ssupported",
1434 (config.hw_vlan_strip ? "" : "not "));
1435 config.hw_fcs_strip = !!(sh->device_attr.raw_packet_caps &
1436 IBV_RAW_PACKET_CAP_SCATTER_FCS);
1437 DRV_LOG(DEBUG, "FCS stripping configuration is %ssupported",
1438 (config.hw_fcs_strip ? "" : "not "));
1439 #if defined(HAVE_IBV_WQ_FLAG_RX_END_PADDING)
1440 hw_padding = !!sh->device_attr.rx_pad_end_addr_align;
1441 #elif defined(HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING)
1442 hw_padding = !!(sh->device_attr.device_cap_flags_ex &
1443 IBV_DEVICE_PCI_WRITE_END_PADDING);
1445 if (config.hw_padding && !hw_padding) {
1446 DRV_LOG(DEBUG, "Rx end alignment padding isn't supported");
1447 config.hw_padding = 0;
1448 } else if (config.hw_padding) {
1449 DRV_LOG(DEBUG, "Rx end alignment padding is enabled");
1451 config.tso = (sh->device_attr.tso_caps.max_tso > 0 &&
1452 (sh->device_attr.tso_caps.supported_qpts &
1453 (1 << IBV_QPT_RAW_PACKET)));
1455 config.tso_max_payload_sz = sh->device_attr.tso_caps.max_tso;
1457 * MPW is disabled by default, while the Enhanced MPW is enabled
1460 if (config.mps == MLX5_ARG_UNSET)
1461 config.mps = (mps == MLX5_MPW_ENHANCED) ? MLX5_MPW_ENHANCED :
1464 config.mps = config.mps ? mps : MLX5_MPW_DISABLED;
1465 DRV_LOG(INFO, "%sMPS is %s",
1466 config.mps == MLX5_MPW_ENHANCED ? "enhanced " : "",
1467 config.mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
1468 if (config.cqe_comp && !cqe_comp) {
1469 DRV_LOG(WARNING, "Rx CQE compression isn't supported");
1470 config.cqe_comp = 0;
1472 if (config.cqe_pad && !cqe_pad) {
1473 DRV_LOG(WARNING, "Rx CQE padding isn't supported");
1475 } else if (config.cqe_pad) {
1476 DRV_LOG(INFO, "Rx CQE padding is enabled");
1478 if (config.mprq.enabled && mprq) {
1479 if (config.mprq.stride_num_n > mprq_max_stride_num_n ||
1480 config.mprq.stride_num_n < mprq_min_stride_num_n) {
1481 config.mprq.stride_num_n =
1482 RTE_MAX(MLX5_MPRQ_STRIDE_NUM_N,
1483 mprq_min_stride_num_n);
1485 "the number of strides"
1486 " for Multi-Packet RQ is out of range,"
1487 " setting default value (%u)",
1488 1 << config.mprq.stride_num_n);
1490 config.mprq.min_stride_size_n = mprq_min_stride_size_n;
1491 config.mprq.max_stride_size_n = mprq_max_stride_size_n;
1492 } else if (config.mprq.enabled && !mprq) {
1493 DRV_LOG(WARNING, "Multi-Packet RQ isn't supported");
1494 config.mprq.enabled = 0;
1496 eth_dev = rte_eth_dev_allocate(name);
1497 if (eth_dev == NULL) {
1498 DRV_LOG(ERR, "can not allocate rte ethdev");
1502 /* Flag to call rte_eth_dev_release_port() in rte_eth_dev_close(). */
1503 eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
1504 if (priv->representor) {
1505 eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
1506 eth_dev->data->representor_id = priv->representor_id;
1508 eth_dev->data->dev_private = priv;
1509 priv->dev_data = eth_dev->data;
1510 eth_dev->data->mac_addrs = priv->mac;
1511 eth_dev->device = dpdk_dev;
1512 /* Configure the first MAC address by default. */
1513 if (mlx5_get_mac(eth_dev, &mac.addr_bytes)) {
1515 "port %u cannot get MAC address, is mlx5_en"
1516 " loaded? (errno: %s)",
1517 eth_dev->data->port_id, strerror(rte_errno));
1522 "port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x",
1523 eth_dev->data->port_id,
1524 mac.addr_bytes[0], mac.addr_bytes[1],
1525 mac.addr_bytes[2], mac.addr_bytes[3],
1526 mac.addr_bytes[4], mac.addr_bytes[5]);
1529 char ifname[IF_NAMESIZE];
1531 if (mlx5_get_ifname(eth_dev, &ifname) == 0)
1532 DRV_LOG(DEBUG, "port %u ifname is \"%s\"",
1533 eth_dev->data->port_id, ifname);
1535 DRV_LOG(DEBUG, "port %u ifname is unknown",
1536 eth_dev->data->port_id);
1539 /* Get actual MTU if possible. */
1540 err = mlx5_get_mtu(eth_dev, &priv->mtu);
1545 DRV_LOG(DEBUG, "port %u MTU is %u", eth_dev->data->port_id,
1547 /* Initialize burst functions to prevent crashes before link-up. */
1548 eth_dev->rx_pkt_burst = removed_rx_burst;
1549 eth_dev->tx_pkt_burst = removed_tx_burst;
1550 eth_dev->dev_ops = &mlx5_dev_ops;
1551 /* Register MAC address. */
1552 claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0));
1553 if (config.vf && config.vf_nl_en)
1554 mlx5_nl_mac_addr_sync(eth_dev);
1555 priv->tcf_context = mlx5_flow_tcf_context_create();
1556 if (!priv->tcf_context) {
1559 "flow rules relying on switch offloads will not be"
1560 " supported: cannot open libmnl socket: %s",
1561 strerror(rte_errno));
1563 struct rte_flow_error error;
1564 unsigned int ifindex = mlx5_ifindex(eth_dev);
1569 "cannot retrieve network interface index";
1571 err = mlx5_flow_tcf_init(priv->tcf_context,
1576 "flow rules relying on switch offloads will"
1577 " not be supported: %s: %s",
1578 error.message, strerror(rte_errno));
1579 mlx5_flow_tcf_context_destroy(priv->tcf_context);
1580 priv->tcf_context = NULL;
1583 if (config.dv_flow_en) {
1584 err = mlx5_alloc_shared_dr(priv);
1588 TAILQ_INIT(&priv->flows);
1589 TAILQ_INIT(&priv->ctrl_flows);
1590 /* Hint libmlx5 to use PMD allocator for data plane resources */
1591 struct mlx5dv_ctx_allocators alctr = {
1592 .alloc = &mlx5_alloc_verbs_buf,
1593 .free = &mlx5_free_verbs_buf,
1596 mlx5_glue->dv_set_context_attr(sh->ctx,
1597 MLX5DV_CTX_ATTR_BUF_ALLOCATORS,
1598 (void *)((uintptr_t)&alctr));
1599 /* Bring Ethernet device up. */
1600 DRV_LOG(DEBUG, "port %u forcing Ethernet interface up",
1601 eth_dev->data->port_id);
1602 mlx5_set_link_up(eth_dev);
1604 * Even though the interrupt handler is not installed yet,
1605 * interrupts will still trigger on the async_fd from
1606 * Verbs context returned by ibv_open_device().
1608 mlx5_link_update(eth_dev, 0);
1609 /* Store device configuration on private structure. */
1610 priv->config = config;
1611 /* Supported Verbs flow priority number detection. */
1612 err = mlx5_flow_discover_priorities(eth_dev);
1617 priv->config.flow_prio = err;
1619 * Once the device is added to the list of memory event
1620 * callback, its global MR cache table cannot be expanded
1621 * on the fly because of deadlock. If it overflows, lookup
1622 * should be done by searching MR list linearly, which is slow.
1624 err = mlx5_mr_btree_init(&priv->mr.cache,
1625 MLX5_MR_BTREE_CACHE_N * 2,
1626 eth_dev->device->numa_node);
1631 /* Add device to memory callback list. */
1632 rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
1633 LIST_INSERT_HEAD(&mlx5_shared_data->mem_event_cb_list,
1634 priv, mem_event_cb);
1635 rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
1640 mlx5_free_shared_dr(priv);
1641 if (priv->nl_socket_route >= 0)
1642 close(priv->nl_socket_route);
1643 if (priv->nl_socket_rdma >= 0)
1644 close(priv->nl_socket_rdma);
1645 if (priv->tcf_context)
1646 mlx5_flow_tcf_context_destroy(priv->tcf_context);
1648 claim_zero(rte_eth_switch_domain_free(priv->domain_id));
1650 if (eth_dev != NULL)
1651 eth_dev->data->dev_private = NULL;
1653 if (eth_dev != NULL) {
1654 /* mac_addrs must not be freed alone because part of dev_private */
1655 eth_dev->data->mac_addrs = NULL;
1656 rte_eth_dev_release_port(eth_dev);
1659 mlx5_free_shared_ibctx(sh);
1666 * Comparison callback to sort device data.
1668 * This is meant to be used with qsort().
1671 * Pointer to pointer to first data object.
1673 * Pointer to pointer to second data object.
1676 * 0 if both objects are equal, less than 0 if the first argument is less
1677 * than the second, greater than 0 otherwise.
1680 mlx5_dev_spawn_data_cmp(const void *a, const void *b)
1682 const struct mlx5_switch_info *si_a =
1683 &((const struct mlx5_dev_spawn_data *)a)->info;
1684 const struct mlx5_switch_info *si_b =
1685 &((const struct mlx5_dev_spawn_data *)b)->info;
1688 /* Master device first. */
1689 ret = si_b->master - si_a->master;
1692 /* Then representor devices. */
1693 ret = si_b->representor - si_a->representor;
1696 /* Unidentified devices come last in no specific order. */
1697 if (!si_a->representor)
1699 /* Order representors by name. */
1700 return si_a->port_name - si_b->port_name;
1704 * DPDK callback to register a PCI device.
1706 * This function spawns Ethernet devices out of a given PCI device.
1708 * @param[in] pci_drv
1709 * PCI driver structure (mlx5_driver).
1710 * @param[in] pci_dev
1711 * PCI device information.
1714 * 0 on success, a negative errno value otherwise and rte_errno is set.
1717 mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1718 struct rte_pci_device *pci_dev)
1720 struct ibv_device **ibv_list;
1722 * Number of found IB Devices matching with requested PCI BDF.
1723 * nd != 1 means there are multiple IB devices over the same
1724 * PCI device and we have representors and master.
1726 unsigned int nd = 0;
1728 * Number of found IB device Ports. nd = 1 and np = 1..n means
1729 * we have the single multiport IB device, and there may be
1730 * representors attached to some of found ports.
1732 unsigned int np = 0;
1734 * Number of DPDK ethernet devices to Spawn - either over
1735 * multiple IB devices or multiple ports of single IB device.
1736 * Actually this is the number of iterations to spawn.
1738 unsigned int ns = 0;
1739 struct mlx5_dev_config dev_config;
1742 ret = mlx5_init_once();
1744 DRV_LOG(ERR, "unable to init PMD global data: %s",
1745 strerror(rte_errno));
1748 assert(pci_drv == &mlx5_driver);
1750 ibv_list = mlx5_glue->get_device_list(&ret);
1752 rte_errno = errno ? errno : ENOSYS;
1753 DRV_LOG(ERR, "cannot list devices, is ib_uverbs loaded?");
1757 * First scan the list of all Infiniband devices to find
1758 * matching ones, gathering into the list.
1760 struct ibv_device *ibv_match[ret + 1];
1766 struct rte_pci_addr pci_addr;
1768 DRV_LOG(DEBUG, "checking device \"%s\"", ibv_list[ret]->name);
1769 if (mlx5_ibv_device_to_pci_addr(ibv_list[ret], &pci_addr))
1771 if (pci_dev->addr.domain != pci_addr.domain ||
1772 pci_dev->addr.bus != pci_addr.bus ||
1773 pci_dev->addr.devid != pci_addr.devid ||
1774 pci_dev->addr.function != pci_addr.function)
1776 DRV_LOG(INFO, "PCI information matches for device \"%s\"",
1777 ibv_list[ret]->name);
1778 ibv_match[nd++] = ibv_list[ret];
1780 ibv_match[nd] = NULL;
1782 /* No device matches, just complain and bail out. */
1783 mlx5_glue->free_device_list(ibv_list);
1785 "no Verbs device matches PCI device " PCI_PRI_FMT ","
1786 " are kernel drivers loaded?",
1787 pci_dev->addr.domain, pci_dev->addr.bus,
1788 pci_dev->addr.devid, pci_dev->addr.function);
1793 nl_route = mlx5_nl_init(NETLINK_ROUTE);
1794 nl_rdma = mlx5_nl_init(NETLINK_RDMA);
1797 * Found single matching device may have multiple ports.
1798 * Each port may be representor, we have to check the port
1799 * number and check the representors existence.
1802 np = mlx5_nl_portnum(nl_rdma, ibv_match[0]->name);
1804 DRV_LOG(WARNING, "can not get IB device \"%s\""
1805 " ports number", ibv_match[0]->name);
1808 * Now we can determine the maximal
1809 * amount of devices to be spawned.
1811 struct mlx5_dev_spawn_data list[np ? np : nd];
1815 * Single IB device with multiple ports found,
1816 * it may be E-Switch master device and representors.
1817 * We have to perform identification trough the ports.
1819 assert(nl_rdma >= 0);
1822 for (i = 1; i <= np; ++i) {
1823 list[ns].max_port = np;
1824 list[ns].ibv_port = i;
1825 list[ns].ibv_dev = ibv_match[0];
1826 list[ns].eth_dev = NULL;
1827 list[ns].ifindex = mlx5_nl_ifindex
1828 (nl_rdma, list[ns].ibv_dev->name, i);
1829 if (!list[ns].ifindex) {
1831 * No network interface index found for the
1832 * specified port, it means there is no
1833 * representor on this port. It's OK,
1834 * there can be disabled ports, for example
1835 * if sriov_numvfs < sriov_totalvfs.
1841 ret = mlx5_nl_switch_info
1845 if (ret || (!list[ns].info.representor &&
1846 !list[ns].info.master)) {
1848 * We failed to recognize representors with
1849 * Netlink, let's try to perform the task
1852 ret = mlx5_sysfs_switch_info
1856 if (!ret && (list[ns].info.representor ^
1857 list[ns].info.master))
1862 "unable to recognize master/representors"
1863 " on the IB device with multiple ports");
1870 * The existence of several matching entries (nd > 1) means
1871 * port representors have been instantiated. No existing Verbs
1872 * call nor sysfs entries can tell them apart, this can only
1873 * be done through Netlink calls assuming kernel drivers are
1874 * recent enough to support them.
1876 * In the event of identification failure through Netlink,
1877 * try again through sysfs, then:
1879 * 1. A single IB device matches (nd == 1) with single
1880 * port (np=0/1) and is not a representor, assume
1881 * no switch support.
1883 * 2. Otherwise no safe assumptions can be made;
1884 * complain louder and bail out.
1887 for (i = 0; i != nd; ++i) {
1888 memset(&list[ns].info, 0, sizeof(list[ns].info));
1889 list[ns].max_port = 1;
1890 list[ns].ibv_port = 1;
1891 list[ns].ibv_dev = ibv_match[i];
1892 list[ns].eth_dev = NULL;
1893 list[ns].ifindex = 0;
1895 list[ns].ifindex = mlx5_nl_ifindex
1896 (nl_rdma, list[ns].ibv_dev->name, 1);
1897 if (!list[ns].ifindex) {
1898 char ifname[IF_NAMESIZE];
1901 * Netlink failed, it may happen with old
1902 * ib_core kernel driver (before 4.16).
1903 * We can assume there is old driver because
1904 * here we are processing single ports IB
1905 * devices. Let's try sysfs to retrieve
1906 * the ifindex. The method works for
1907 * master device only.
1911 * Multiple devices found, assume
1912 * representors, can not distinguish
1913 * master/representor and retrieve
1914 * ifindex via sysfs.
1918 ret = mlx5_get_master_ifname
1919 (ibv_match[i]->ibdev_path, &ifname);
1922 if_nametoindex(ifname);
1923 if (!list[ns].ifindex) {
1925 * No network interface index found
1926 * for the specified device, it means
1927 * there it is neither representor
1935 ret = mlx5_nl_switch_info
1939 if (ret || (!list[ns].info.representor &&
1940 !list[ns].info.master)) {
1942 * We failed to recognize representors with
1943 * Netlink, let's try to perform the task
1946 ret = mlx5_sysfs_switch_info
1950 if (!ret && (list[ns].info.representor ^
1951 list[ns].info.master)) {
1953 } else if ((nd == 1) &&
1954 !list[ns].info.representor &&
1955 !list[ns].info.master) {
1957 * Single IB device with
1958 * one physical port and
1959 * attached network device.
1960 * May be SRIOV is not enabled
1961 * or there is no representors.
1963 DRV_LOG(INFO, "no E-Switch support detected");
1970 "unable to recognize master/representors"
1971 " on the multiple IB devices");
1979 * Sort list to probe devices in natural order for users convenience
1980 * (i.e. master first, then representors from lowest to highest ID).
1982 qsort(list, ns, sizeof(*list), mlx5_dev_spawn_data_cmp);
1983 /* Default configuration. */
1984 dev_config = (struct mlx5_dev_config){
1986 .mps = MLX5_ARG_UNSET,
1989 .txq_inline = MLX5_ARG_UNSET,
1990 .txqs_inline = MLX5_ARG_UNSET,
1991 .txqs_vec = MLX5_ARG_UNSET,
1992 .inline_max_packet_sz = MLX5_ARG_UNSET,
1994 .mr_ext_memseg_en = 1,
1996 .enabled = 0, /* Disabled by default. */
1997 .stride_num_n = MLX5_MPRQ_STRIDE_NUM_N,
1998 .max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN,
1999 .min_rxqs_num = MLX5_MPRQ_MIN_RXQS,
2002 /* Device specific configuration. */
2003 switch (pci_dev->id.device_id) {
2004 case PCI_DEVICE_ID_MELLANOX_CONNECTX5BF:
2005 dev_config.txqs_vec = MLX5_VPMD_MAX_TXQS_BLUEFIELD;
2007 case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF:
2008 case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF:
2009 case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF:
2010 case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF:
2016 /* Set architecture-dependent default value if unset. */
2017 if (dev_config.txqs_vec == MLX5_ARG_UNSET)
2018 dev_config.txqs_vec = MLX5_VPMD_MAX_TXQS;
2019 for (i = 0; i != ns; ++i) {
2022 list[i].eth_dev = mlx5_dev_spawn(&pci_dev->device,
2025 if (!list[i].eth_dev) {
2026 if (rte_errno != EBUSY && rte_errno != EEXIST)
2028 /* Device is disabled or already spawned. Ignore it. */
2031 restore = list[i].eth_dev->data->dev_flags;
2032 rte_eth_copy_pci_info(list[i].eth_dev, pci_dev);
2033 /* Restore non-PCI flags cleared by the above call. */
2034 list[i].eth_dev->data->dev_flags |= restore;
2035 rte_eth_dev_probing_finish(list[i].eth_dev);
2039 "probe of PCI device " PCI_PRI_FMT " aborted after"
2040 " encountering an error: %s",
2041 pci_dev->addr.domain, pci_dev->addr.bus,
2042 pci_dev->addr.devid, pci_dev->addr.function,
2043 strerror(rte_errno));
2047 if (!list[i].eth_dev)
2049 mlx5_dev_close(list[i].eth_dev);
2050 /* mac_addrs must not be freed because in dev_private */
2051 list[i].eth_dev->data->mac_addrs = NULL;
2052 claim_zero(rte_eth_dev_release_port(list[i].eth_dev));
2054 /* Restore original error. */
2061 * Do the routine cleanup:
2062 * - close opened Netlink sockets
2063 * - free the Infiniband device list
2070 mlx5_glue->free_device_list(ibv_list);
2075 * DPDK callback to remove a PCI device.
2077 * This function removes all Ethernet devices belong to a given PCI device.
2079 * @param[in] pci_dev
2080 * Pointer to the PCI device.
2083 * 0 on success, the function cannot fail.
2086 mlx5_pci_remove(struct rte_pci_device *pci_dev)
2089 struct rte_eth_dev *port;
2091 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
2092 port = &rte_eth_devices[port_id];
2093 if (port->state != RTE_ETH_DEV_UNUSED &&
2094 port->device == &pci_dev->device)
2095 rte_eth_dev_close(port_id);
2100 static const struct rte_pci_id mlx5_pci_id_map[] = {
2102 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
2103 PCI_DEVICE_ID_MELLANOX_CONNECTX4)
2106 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
2107 PCI_DEVICE_ID_MELLANOX_CONNECTX4VF)
2110 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
2111 PCI_DEVICE_ID_MELLANOX_CONNECTX4LX)
2114 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
2115 PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF)
2118 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
2119 PCI_DEVICE_ID_MELLANOX_CONNECTX5)
2122 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
2123 PCI_DEVICE_ID_MELLANOX_CONNECTX5VF)
2126 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
2127 PCI_DEVICE_ID_MELLANOX_CONNECTX5EX)
2130 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
2131 PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF)
2134 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
2135 PCI_DEVICE_ID_MELLANOX_CONNECTX5BF)
2138 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
2139 PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF)
2142 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
2143 PCI_DEVICE_ID_MELLANOX_CONNECTX6)
2146 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
2147 PCI_DEVICE_ID_MELLANOX_CONNECTX6VF)
2154 static struct rte_pci_driver mlx5_driver = {
2156 .name = MLX5_DRIVER_NAME
2158 .id_table = mlx5_pci_id_map,
2159 .probe = mlx5_pci_probe,
2160 .remove = mlx5_pci_remove,
2161 .dma_map = mlx5_dma_map,
2162 .dma_unmap = mlx5_dma_unmap,
2163 .drv_flags = (RTE_PCI_DRV_INTR_LSC | RTE_PCI_DRV_INTR_RMV |
2164 RTE_PCI_DRV_PROBE_AGAIN),
2167 #ifdef RTE_IBVERBS_LINK_DLOPEN
2170 * Suffix RTE_EAL_PMD_PATH with "-glue".
2172 * This function performs a sanity check on RTE_EAL_PMD_PATH before
2173 * suffixing its last component.
2176 * Output buffer, should be large enough otherwise NULL is returned.
2181 * Pointer to @p buf or @p NULL in case suffix cannot be appended.
2184 mlx5_glue_path(char *buf, size_t size)
2186 static const char *const bad[] = { "/", ".", "..", NULL };
2187 const char *path = RTE_EAL_PMD_PATH;
2188 size_t len = strlen(path);
2192 while (len && path[len - 1] == '/')
2194 for (off = len; off && path[off - 1] != '/'; --off)
2196 for (i = 0; bad[i]; ++i)
2197 if (!strncmp(path + off, bad[i], (int)(len - off)))
2199 i = snprintf(buf, size, "%.*s-glue", (int)len, path);
2200 if (i == -1 || (size_t)i >= size)
2205 "unable to append \"-glue\" to last component of"
2206 " RTE_EAL_PMD_PATH (\"" RTE_EAL_PMD_PATH "\"),"
2207 " please re-configure DPDK");
2212 * Initialization routine for run-time dependency on rdma-core.
2215 mlx5_glue_init(void)
2217 char glue_path[sizeof(RTE_EAL_PMD_PATH) - 1 + sizeof("-glue")];
2218 const char *path[] = {
2220 * A basic security check is necessary before trusting
2221 * MLX5_GLUE_PATH, which may override RTE_EAL_PMD_PATH.
2223 (geteuid() == getuid() && getegid() == getgid() ?
2224 getenv("MLX5_GLUE_PATH") : NULL),
2226 * When RTE_EAL_PMD_PATH is set, use its glue-suffixed
2227 * variant, otherwise let dlopen() look up libraries on its
2230 (*RTE_EAL_PMD_PATH ?
2231 mlx5_glue_path(glue_path, sizeof(glue_path)) : ""),
2234 void *handle = NULL;
2238 while (!handle && i != RTE_DIM(path)) {
2247 end = strpbrk(path[i], ":;");
2249 end = path[i] + strlen(path[i]);
2250 len = end - path[i];
2255 ret = snprintf(name, sizeof(name), "%.*s%s" MLX5_GLUE,
2257 (!len || *(end - 1) == '/') ? "" : "/");
2260 if (sizeof(name) != (size_t)ret + 1)
2262 DRV_LOG(DEBUG, "looking for rdma-core glue as \"%s\"",
2264 handle = dlopen(name, RTLD_LAZY);
2275 DRV_LOG(WARNING, "cannot load glue library: %s", dlmsg);
2278 sym = dlsym(handle, "mlx5_glue");
2279 if (!sym || !*sym) {
2283 DRV_LOG(ERR, "cannot resolve glue symbol: %s", dlmsg);
2292 "cannot initialize PMD due to missing run-time dependency on"
2293 " rdma-core libraries (libibverbs, libmlx5)");
2300 * Driver initialization routine.
2302 RTE_INIT(rte_mlx5_pmd_init)
2304 /* Initialize driver log type. */
2305 mlx5_logtype = rte_log_register("pmd.net.mlx5");
2306 if (mlx5_logtype >= 0)
2307 rte_log_set_level(mlx5_logtype, RTE_LOG_NOTICE);
2309 /* Build the static tables for Verbs conversion. */
2310 mlx5_set_ptype_table();
2311 mlx5_set_cksum_table();
2312 mlx5_set_swp_types_table();
2314 * RDMAV_HUGEPAGES_SAFE tells ibv_fork_init() we intend to use
2315 * huge pages. Calling ibv_fork_init() during init allows
2316 * applications to use fork() safely for purposes other than
2317 * using this PMD, which is not supported in forked processes.
2319 setenv("RDMAV_HUGEPAGES_SAFE", "1", 1);
2320 /* Match the size of Rx completion entry to the size of a cacheline. */
2321 if (RTE_CACHE_LINE_SIZE == 128)
2322 setenv("MLX5_CQE_SIZE", "128", 0);
2324 * MLX5_DEVICE_FATAL_CLEANUP tells ibv_destroy functions to
2325 * cleanup all the Verbs resources even when the device was removed.
2327 setenv("MLX5_DEVICE_FATAL_CLEANUP", "1", 1);
2328 #ifdef RTE_IBVERBS_LINK_DLOPEN
2329 if (mlx5_glue_init())
2334 /* Glue structure must not contain any NULL pointers. */
2338 for (i = 0; i != sizeof(*mlx5_glue) / sizeof(void *); ++i)
2339 assert(((const void *const *)mlx5_glue)[i]);
2342 if (strcmp(mlx5_glue->version, MLX5_GLUE_VERSION)) {
2344 "rdma-core glue \"%s\" mismatch: \"%s\" is required",
2345 mlx5_glue->version, MLX5_GLUE_VERSION);
2348 mlx5_glue->fork_init();
2349 rte_pci_register(&mlx5_driver);
2352 RTE_PMD_EXPORT_NAME(net_mlx5, __COUNTER__);
2353 RTE_PMD_REGISTER_PCI_TABLE(net_mlx5, mlx5_pci_id_map);
2354 RTE_PMD_REGISTER_KMOD_DEP(net_mlx5, "* ib_uverbs & mlx5_core & mlx5_ib");