1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
16 #include <linux/rtnetlink.h>
19 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
21 #pragma GCC diagnostic ignored "-Wpedantic"
23 #include <infiniband/verbs.h>
25 #pragma GCC diagnostic error "-Wpedantic"
28 #include <rte_malloc.h>
29 #include <rte_ethdev_driver.h>
30 #include <rte_ethdev_pci.h>
32 #include <rte_bus_pci.h>
33 #include <rte_common.h>
34 #include <rte_config.h>
35 #include <rte_eal_memconfig.h>
36 #include <rte_kvargs.h>
37 #include <rte_rwlock.h>
38 #include <rte_spinlock.h>
39 #include <rte_string_fns.h>
40 #include <rte_alarm.h>
43 #include "mlx5_utils.h"
44 #include "mlx5_rxtx.h"
45 #include "mlx5_autoconf.h"
46 #include "mlx5_defs.h"
47 #include "mlx5_glue.h"
49 #include "mlx5_flow.h"
51 /* Device parameter to enable RX completion queue compression. */
52 #define MLX5_RXQ_CQE_COMP_EN "rxq_cqe_comp_en"
54 /* Device parameter to enable RX completion entry padding to 128B. */
55 #define MLX5_RXQ_CQE_PAD_EN "rxq_cqe_pad_en"
57 /* Device parameter to enable padding Rx packet to cacheline size. */
58 #define MLX5_RXQ_PKT_PAD_EN "rxq_pkt_pad_en"
60 /* Device parameter to enable Multi-Packet Rx queue. */
61 #define MLX5_RX_MPRQ_EN "mprq_en"
63 /* Device parameter to configure log 2 of the number of strides for MPRQ. */
64 #define MLX5_RX_MPRQ_LOG_STRIDE_NUM "mprq_log_stride_num"
66 /* Device parameter to limit the size of memcpy'd packet for MPRQ. */
67 #define MLX5_RX_MPRQ_MAX_MEMCPY_LEN "mprq_max_memcpy_len"
69 /* Device parameter to set the minimum number of Rx queues to enable MPRQ. */
70 #define MLX5_RXQS_MIN_MPRQ "rxqs_min_mprq"
72 /* Device parameter to configure inline send. Deprecated, ignored.*/
73 #define MLX5_TXQ_INLINE "txq_inline"
75 /* Device parameter to limit packet size to inline with ordinary SEND. */
76 #define MLX5_TXQ_INLINE_MAX "txq_inline_max"
78 /* Device parameter to configure minimal data size to inline. */
79 #define MLX5_TXQ_INLINE_MIN "txq_inline_min"
81 /* Device parameter to limit packet size to inline with Enhanced MPW. */
82 #define MLX5_TXQ_INLINE_MPW "txq_inline_mpw"
85 * Device parameter to configure the number of TX queues threshold for
86 * enabling inline send.
88 #define MLX5_TXQS_MIN_INLINE "txqs_min_inline"
91 * Device parameter to configure the number of TX queues threshold for
92 * enabling vectorized Tx, deprecated, ignored (no vectorized Tx routines).
94 #define MLX5_TXQS_MAX_VEC "txqs_max_vec"
96 /* Device parameter to enable multi-packet send WQEs. */
97 #define MLX5_TXQ_MPW_EN "txq_mpw_en"
100 * Device parameter to include 2 dsegs in the title WQEBB.
101 * Deprecated, ignored.
103 #define MLX5_TXQ_MPW_HDR_DSEG_EN "txq_mpw_hdr_dseg_en"
106 * Device parameter to limit the size of inlining packet.
107 * Deprecated, ignored.
109 #define MLX5_TXQ_MAX_INLINE_LEN "txq_max_inline_len"
112 * Device parameter to enable hardware Tx vector.
113 * Deprecated, ignored (no vectorized Tx routines anymore).
115 #define MLX5_TX_VEC_EN "tx_vec_en"
117 /* Device parameter to enable hardware Rx vector. */
118 #define MLX5_RX_VEC_EN "rx_vec_en"
120 /* Allow L3 VXLAN flow creation. */
121 #define MLX5_L3_VXLAN_EN "l3_vxlan_en"
123 /* Activate DV E-Switch flow steering. */
124 #define MLX5_DV_ESW_EN "dv_esw_en"
126 /* Activate DV flow steering. */
127 #define MLX5_DV_FLOW_EN "dv_flow_en"
129 /* Activate Netlink support in VF mode. */
130 #define MLX5_VF_NL_EN "vf_nl_en"
132 /* Enable extending memsegs when creating a MR. */
133 #define MLX5_MR_EXT_MEMSEG_EN "mr_ext_memseg_en"
135 /* Select port representors to instantiate. */
136 #define MLX5_REPRESENTOR "representor"
138 /* Device parameter to configure the maximum number of dump files per queue. */
139 #define MLX5_MAX_DUMP_FILES_NUM "max_dump_files_num"
141 #ifndef HAVE_IBV_MLX5_MOD_MPW
142 #define MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED (1 << 2)
143 #define MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW (1 << 3)
146 #ifndef HAVE_IBV_MLX5_MOD_CQE_128B_COMP
147 #define MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP (1 << 4)
150 static const char *MZ_MLX5_PMD_SHARED_DATA = "mlx5_pmd_shared_data";
152 /* Shared memory between primary and secondary processes. */
153 struct mlx5_shared_data *mlx5_shared_data;
155 /* Spinlock for mlx5_shared_data allocation. */
156 static rte_spinlock_t mlx5_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
158 /* Process local data for secondary processes. */
159 static struct mlx5_local_data mlx5_local_data;
161 /** Driver-specific log messages type. */
164 /** Data associated with devices to spawn. */
165 struct mlx5_dev_spawn_data {
166 uint32_t ifindex; /**< Network interface index. */
167 uint32_t max_port; /**< IB device maximal port index. */
168 uint32_t ibv_port; /**< IB device physical port index. */
169 struct mlx5_switch_info info; /**< Switch information. */
170 struct ibv_device *ibv_dev; /**< Associated IB device. */
171 struct rte_eth_dev *eth_dev; /**< Associated Ethernet device. */
172 struct rte_pci_device *pci_dev; /**< Backend PCI device. */
175 static LIST_HEAD(, mlx5_ibv_shared) mlx5_ibv_list = LIST_HEAD_INITIALIZER();
176 static pthread_mutex_t mlx5_ibv_list_mutex = PTHREAD_MUTEX_INITIALIZER;
179 * Initialize the counters management structure.
182 * Pointer to mlx5_ibv_shared object to free
185 mlx5_flow_counters_mng_init(struct mlx5_ibv_shared *sh)
189 TAILQ_INIT(&sh->cmng.flow_counters);
190 for (i = 0; i < RTE_DIM(sh->cmng.ccont); ++i)
191 TAILQ_INIT(&sh->cmng.ccont[i].pool_list);
195 * Destroy all the resources allocated for a counter memory management.
198 * Pointer to the memory management structure.
201 mlx5_flow_destroy_counter_stat_mem_mng(struct mlx5_counter_stats_mem_mng *mng)
203 uint8_t *mem = (uint8_t *)(uintptr_t)mng->raws[0].data;
205 LIST_REMOVE(mng, next);
206 claim_zero(mlx5_devx_cmd_destroy(mng->dm));
207 claim_zero(mlx5_glue->devx_umem_dereg(mng->umem));
212 * Close and release all the resources of the counters management.
215 * Pointer to mlx5_ibv_shared object to free.
218 mlx5_flow_counters_mng_close(struct mlx5_ibv_shared *sh)
220 struct mlx5_counter_stats_mem_mng *mng;
227 rte_eal_alarm_cancel(mlx5_flow_query_alarm, sh);
228 if (rte_errno != EINPROGRESS)
232 for (i = 0; i < RTE_DIM(sh->cmng.ccont); ++i) {
233 struct mlx5_flow_counter_pool *pool;
234 uint32_t batch = !!(i % 2);
236 if (!sh->cmng.ccont[i].pools)
238 pool = TAILQ_FIRST(&sh->cmng.ccont[i].pool_list);
243 (mlx5_devx_cmd_destroy(pool->min_dcs));
245 for (j = 0; j < MLX5_COUNTERS_PER_POOL; ++j) {
246 if (pool->counters_raw[j].action)
248 (mlx5_glue->destroy_flow_action
249 (pool->counters_raw[j].action));
250 if (!batch && pool->counters_raw[j].dcs)
251 claim_zero(mlx5_devx_cmd_destroy
252 (pool->counters_raw[j].dcs));
254 TAILQ_REMOVE(&sh->cmng.ccont[i].pool_list, pool,
257 pool = TAILQ_FIRST(&sh->cmng.ccont[i].pool_list);
259 rte_free(sh->cmng.ccont[i].pools);
261 mng = LIST_FIRST(&sh->cmng.mem_mngs);
263 mlx5_flow_destroy_counter_stat_mem_mng(mng);
264 mng = LIST_FIRST(&sh->cmng.mem_mngs);
266 memset(&sh->cmng, 0, sizeof(sh->cmng));
270 * Allocate shared IB device context. If there is multiport device the
271 * master and representors will share this context, if there is single
272 * port dedicated IB device, the context will be used by only given
273 * port due to unification.
275 * Routine first searches the context for the specified IB device name,
276 * if found the shared context assumed and reference counter is incremented.
277 * If no context found the new one is created and initialized with specified
278 * IB device context and parameters.
281 * Pointer to the IB device attributes (name, port, etc).
284 * Pointer to mlx5_ibv_shared object on success,
285 * otherwise NULL and rte_errno is set.
287 static struct mlx5_ibv_shared *
288 mlx5_alloc_shared_ibctx(const struct mlx5_dev_spawn_data *spawn)
290 struct mlx5_ibv_shared *sh;
295 /* Secondary process should not create the shared context. */
296 assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
297 pthread_mutex_lock(&mlx5_ibv_list_mutex);
298 /* Search for IB context by device name. */
299 LIST_FOREACH(sh, &mlx5_ibv_list, next) {
300 if (!strcmp(sh->ibdev_name, spawn->ibv_dev->name)) {
305 /* No device found, we have to create new shared context. */
306 assert(spawn->max_port);
307 sh = rte_zmalloc("ethdev shared ib context",
308 sizeof(struct mlx5_ibv_shared) +
310 sizeof(struct mlx5_ibv_shared_port),
311 RTE_CACHE_LINE_SIZE);
313 DRV_LOG(ERR, "shared context allocation failure");
317 /* Try to open IB device with DV first, then usual Verbs. */
319 sh->ctx = mlx5_glue->dv_open_device(spawn->ibv_dev);
322 DRV_LOG(DEBUG, "DevX is supported");
324 sh->ctx = mlx5_glue->open_device(spawn->ibv_dev);
326 err = errno ? errno : ENODEV;
329 DRV_LOG(DEBUG, "DevX is NOT supported");
331 err = mlx5_glue->query_device_ex(sh->ctx, NULL, &sh->device_attr);
333 DRV_LOG(DEBUG, "ibv_query_device_ex() failed");
337 sh->max_port = spawn->max_port;
338 strncpy(sh->ibdev_name, sh->ctx->device->name,
339 sizeof(sh->ibdev_name));
340 strncpy(sh->ibdev_path, sh->ctx->device->ibdev_path,
341 sizeof(sh->ibdev_path));
342 sh->pci_dev = spawn->pci_dev;
343 pthread_mutex_init(&sh->intr_mutex, NULL);
345 * Setting port_id to max unallowed value means
346 * there is no interrupt subhandler installed for
347 * the given port index i.
349 for (i = 0; i < sh->max_port; i++)
350 sh->port[i].ih_port_id = RTE_MAX_ETHPORTS;
351 sh->pd = mlx5_glue->alloc_pd(sh->ctx);
352 if (sh->pd == NULL) {
353 DRV_LOG(ERR, "PD allocation failure");
358 * Once the device is added to the list of memory event
359 * callback, its global MR cache table cannot be expanded
360 * on the fly because of deadlock. If it overflows, lookup
361 * should be done by searching MR list linearly, which is slow.
363 * At this point the device is not added to the memory
364 * event list yet, context is just being created.
366 err = mlx5_mr_btree_init(&sh->mr.cache,
367 MLX5_MR_BTREE_CACHE_N * 2,
368 sh->pci_dev->device.numa_node);
373 mlx5_flow_counters_mng_init(sh);
374 LIST_INSERT_HEAD(&mlx5_ibv_list, sh, next);
376 pthread_mutex_unlock(&mlx5_ibv_list_mutex);
379 pthread_mutex_unlock(&mlx5_ibv_list_mutex);
382 claim_zero(mlx5_glue->dealloc_pd(sh->pd));
384 claim_zero(mlx5_glue->close_device(sh->ctx));
392 * Free shared IB device context. Decrement counter and if zero free
393 * all allocated resources and close handles.
396 * Pointer to mlx5_ibv_shared object to free
399 mlx5_free_shared_ibctx(struct mlx5_ibv_shared *sh)
401 pthread_mutex_lock(&mlx5_ibv_list_mutex);
403 /* Check the object presence in the list. */
404 struct mlx5_ibv_shared *lctx;
406 LIST_FOREACH(lctx, &mlx5_ibv_list, next)
411 DRV_LOG(ERR, "Freeing non-existing shared IB context");
417 /* Secondary process should not free the shared context. */
418 assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
421 /* Release created Memory Regions. */
423 LIST_REMOVE(sh, next);
425 * Ensure there is no async event handler installed.
426 * Only primary process handles async device events.
428 mlx5_flow_counters_mng_close(sh);
429 assert(!sh->intr_cnt);
431 mlx5_intr_callback_unregister
432 (&sh->intr_handle, mlx5_dev_interrupt_handler, sh);
433 pthread_mutex_destroy(&sh->intr_mutex);
435 claim_zero(mlx5_glue->dealloc_pd(sh->pd));
437 claim_zero(mlx5_glue->close_device(sh->ctx));
440 pthread_mutex_unlock(&mlx5_ibv_list_mutex);
444 * Initialize DR related data within private structure.
445 * Routine checks the reference counter and does actual
446 * resources creation/initialization only if counter is zero.
449 * Pointer to the private device data structure.
452 * Zero on success, positive error code otherwise.
455 mlx5_alloc_shared_dr(struct mlx5_priv *priv)
457 #ifdef HAVE_MLX5DV_DR
458 struct mlx5_ibv_shared *sh = priv->sh;
464 /* Shared DV/DR structures is already initialized. */
469 /* Reference counter is zero, we should initialize structures. */
470 domain = mlx5_glue->dr_create_domain(sh->ctx,
471 MLX5DV_DR_DOMAIN_TYPE_NIC_RX);
473 DRV_LOG(ERR, "ingress mlx5dv_dr_create_domain failed");
477 sh->rx_domain = domain;
478 domain = mlx5_glue->dr_create_domain(sh->ctx,
479 MLX5DV_DR_DOMAIN_TYPE_NIC_TX);
481 DRV_LOG(ERR, "egress mlx5dv_dr_create_domain failed");
485 pthread_mutex_init(&sh->dv_mutex, NULL);
486 sh->tx_domain = domain;
487 #ifdef HAVE_MLX5DV_DR_ESWITCH
488 if (priv->config.dv_esw_en) {
489 domain = mlx5_glue->dr_create_domain
490 (sh->ctx, MLX5DV_DR_DOMAIN_TYPE_FDB);
492 DRV_LOG(ERR, "FDB mlx5dv_dr_create_domain failed");
496 sh->fdb_domain = domain;
497 sh->esw_drop_action = mlx5_glue->dr_create_flow_action_drop();
505 /* Rollback the created objects. */
507 mlx5_glue->dr_destroy_domain(sh->rx_domain);
508 sh->rx_domain = NULL;
511 mlx5_glue->dr_destroy_domain(sh->tx_domain);
512 sh->tx_domain = NULL;
514 if (sh->fdb_domain) {
515 mlx5_glue->dr_destroy_domain(sh->fdb_domain);
516 sh->fdb_domain = NULL;
518 if (sh->esw_drop_action) {
519 mlx5_glue->destroy_flow_action(sh->esw_drop_action);
520 sh->esw_drop_action = NULL;
530 * Destroy DR related data within private structure.
533 * Pointer to the private device data structure.
536 mlx5_free_shared_dr(struct mlx5_priv *priv)
538 #ifdef HAVE_MLX5DV_DR
539 struct mlx5_ibv_shared *sh;
541 if (!priv->dr_shared)
546 assert(sh->dv_refcnt);
547 if (sh->dv_refcnt && --sh->dv_refcnt)
550 mlx5_glue->dr_destroy_domain(sh->rx_domain);
551 sh->rx_domain = NULL;
554 mlx5_glue->dr_destroy_domain(sh->tx_domain);
555 sh->tx_domain = NULL;
557 #ifdef HAVE_MLX5DV_DR_ESWITCH
558 if (sh->fdb_domain) {
559 mlx5_glue->dr_destroy_domain(sh->fdb_domain);
560 sh->fdb_domain = NULL;
562 if (sh->esw_drop_action) {
563 mlx5_glue->destroy_flow_action(sh->esw_drop_action);
564 sh->esw_drop_action = NULL;
567 pthread_mutex_destroy(&sh->dv_mutex);
574 * Initialize shared data between primary and secondary process.
576 * A memzone is reserved by primary process and secondary processes attach to
580 * 0 on success, a negative errno value otherwise and rte_errno is set.
583 mlx5_init_shared_data(void)
585 const struct rte_memzone *mz;
588 rte_spinlock_lock(&mlx5_shared_data_lock);
589 if (mlx5_shared_data == NULL) {
590 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
591 /* Allocate shared memory. */
592 mz = rte_memzone_reserve(MZ_MLX5_PMD_SHARED_DATA,
593 sizeof(*mlx5_shared_data),
597 "Cannot allocate mlx5 shared data\n");
601 mlx5_shared_data = mz->addr;
602 memset(mlx5_shared_data, 0, sizeof(*mlx5_shared_data));
603 rte_spinlock_init(&mlx5_shared_data->lock);
605 /* Lookup allocated shared memory. */
606 mz = rte_memzone_lookup(MZ_MLX5_PMD_SHARED_DATA);
609 "Cannot attach mlx5 shared data\n");
613 mlx5_shared_data = mz->addr;
614 memset(&mlx5_local_data, 0, sizeof(mlx5_local_data));
618 rte_spinlock_unlock(&mlx5_shared_data_lock);
623 * Retrieve integer value from environment variable.
626 * Environment variable name.
629 * Integer value, 0 if the variable is not set.
632 mlx5_getenv_int(const char *name)
634 const char *val = getenv(name);
642 * Verbs callback to allocate a memory. This function should allocate the space
643 * according to the size provided residing inside a huge page.
644 * Please note that all allocation must respect the alignment from libmlx5
645 * (i.e. currently sysconf(_SC_PAGESIZE)).
648 * The size in bytes of the memory to allocate.
650 * A pointer to the callback data.
653 * Allocated buffer, NULL otherwise and rte_errno is set.
656 mlx5_alloc_verbs_buf(size_t size, void *data)
658 struct mlx5_priv *priv = data;
660 size_t alignment = sysconf(_SC_PAGESIZE);
661 unsigned int socket = SOCKET_ID_ANY;
663 if (priv->verbs_alloc_ctx.type == MLX5_VERBS_ALLOC_TYPE_TX_QUEUE) {
664 const struct mlx5_txq_ctrl *ctrl = priv->verbs_alloc_ctx.obj;
666 socket = ctrl->socket;
667 } else if (priv->verbs_alloc_ctx.type ==
668 MLX5_VERBS_ALLOC_TYPE_RX_QUEUE) {
669 const struct mlx5_rxq_ctrl *ctrl = priv->verbs_alloc_ctx.obj;
671 socket = ctrl->socket;
673 assert(data != NULL);
674 ret = rte_malloc_socket(__func__, size, alignment, socket);
681 * Verbs callback to free a memory.
684 * A pointer to the memory to free.
686 * A pointer to the callback data.
689 mlx5_free_verbs_buf(void *ptr, void *data __rte_unused)
691 assert(data != NULL);
696 * Initialize process private data structure.
699 * Pointer to Ethernet device structure.
702 * 0 on success, a negative errno value otherwise and rte_errno is set.
705 mlx5_proc_priv_init(struct rte_eth_dev *dev)
707 struct mlx5_priv *priv = dev->data->dev_private;
708 struct mlx5_proc_priv *ppriv;
712 * UAR register table follows the process private structure. BlueFlame
713 * registers for Tx queues are stored in the table.
716 sizeof(struct mlx5_proc_priv) + priv->txqs_n * sizeof(void *);
717 ppriv = rte_malloc_socket("mlx5_proc_priv", ppriv_size,
718 RTE_CACHE_LINE_SIZE, dev->device->numa_node);
723 ppriv->uar_table_sz = ppriv_size;
724 dev->process_private = ppriv;
729 * Un-initialize process private data structure.
732 * Pointer to Ethernet device structure.
735 mlx5_proc_priv_uninit(struct rte_eth_dev *dev)
737 if (!dev->process_private)
739 rte_free(dev->process_private);
740 dev->process_private = NULL;
744 * DPDK callback to close the device.
746 * Destroy all queues and objects, free memory.
749 * Pointer to Ethernet device structure.
752 mlx5_dev_close(struct rte_eth_dev *dev)
754 struct mlx5_priv *priv = dev->data->dev_private;
758 DRV_LOG(DEBUG, "port %u closing device \"%s\"",
760 ((priv->sh->ctx != NULL) ? priv->sh->ctx->device->name : ""));
761 /* In case mlx5_dev_stop() has not been called. */
762 mlx5_dev_interrupt_handler_uninstall(dev);
763 mlx5_traffic_disable(dev);
764 mlx5_flow_flush(dev, NULL);
765 /* Prevent crashes when queues are still in use. */
766 dev->rx_pkt_burst = removed_rx_burst;
767 dev->tx_pkt_burst = removed_tx_burst;
769 /* Disable datapath on secondary process. */
770 mlx5_mp_req_stop_rxtx(dev);
771 if (priv->rxqs != NULL) {
772 /* XXX race condition if mlx5_rx_burst() is still running. */
774 for (i = 0; (i != priv->rxqs_n); ++i)
775 mlx5_rxq_release(dev, i);
779 if (priv->txqs != NULL) {
780 /* XXX race condition if mlx5_tx_burst() is still running. */
782 for (i = 0; (i != priv->txqs_n); ++i)
783 mlx5_txq_release(dev, i);
787 mlx5_proc_priv_uninit(dev);
788 mlx5_mprq_free_mp(dev);
789 /* Remove from memory callback device list. */
790 rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
792 LIST_REMOVE(priv->sh, mem_event_cb);
793 rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
794 mlx5_free_shared_dr(priv);
795 if (priv->rss_conf.rss_key != NULL)
796 rte_free(priv->rss_conf.rss_key);
797 if (priv->reta_idx != NULL)
798 rte_free(priv->reta_idx);
800 mlx5_nl_mac_addr_flush(dev);
801 if (priv->nl_socket_route >= 0)
802 close(priv->nl_socket_route);
803 if (priv->nl_socket_rdma >= 0)
804 close(priv->nl_socket_rdma);
807 * Free the shared context in last turn, because the cleanup
808 * routines above may use some shared fields, like
809 * mlx5_nl_mac_addr_flush() uses ibdev_path for retrieveing
810 * ifindex if Netlink fails.
812 mlx5_free_shared_ibctx(priv->sh);
815 ret = mlx5_hrxq_ibv_verify(dev);
817 DRV_LOG(WARNING, "port %u some hash Rx queue still remain",
819 ret = mlx5_ind_table_ibv_verify(dev);
821 DRV_LOG(WARNING, "port %u some indirection table still remain",
823 ret = mlx5_rxq_ibv_verify(dev);
825 DRV_LOG(WARNING, "port %u some Verbs Rx queue still remain",
827 ret = mlx5_rxq_verify(dev);
829 DRV_LOG(WARNING, "port %u some Rx queues still remain",
831 ret = mlx5_txq_ibv_verify(dev);
833 DRV_LOG(WARNING, "port %u some Verbs Tx queue still remain",
835 ret = mlx5_txq_verify(dev);
837 DRV_LOG(WARNING, "port %u some Tx queues still remain",
839 ret = mlx5_flow_verify(dev);
841 DRV_LOG(WARNING, "port %u some flows still remain",
843 if (priv->domain_id != RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
847 RTE_ETH_FOREACH_DEV_OF(port_id, dev->device) {
848 struct mlx5_priv *opriv =
849 rte_eth_devices[port_id].data->dev_private;
852 opriv->domain_id != priv->domain_id ||
853 &rte_eth_devices[port_id] == dev)
858 claim_zero(rte_eth_switch_domain_free(priv->domain_id));
860 memset(priv, 0, sizeof(*priv));
861 priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
863 * Reset mac_addrs to NULL such that it is not freed as part of
864 * rte_eth_dev_release_port(). mac_addrs is part of dev_private so
865 * it is freed when dev_private is freed.
867 dev->data->mac_addrs = NULL;
870 const struct eth_dev_ops mlx5_dev_ops = {
871 .dev_configure = mlx5_dev_configure,
872 .dev_start = mlx5_dev_start,
873 .dev_stop = mlx5_dev_stop,
874 .dev_set_link_down = mlx5_set_link_down,
875 .dev_set_link_up = mlx5_set_link_up,
876 .dev_close = mlx5_dev_close,
877 .promiscuous_enable = mlx5_promiscuous_enable,
878 .promiscuous_disable = mlx5_promiscuous_disable,
879 .allmulticast_enable = mlx5_allmulticast_enable,
880 .allmulticast_disable = mlx5_allmulticast_disable,
881 .link_update = mlx5_link_update,
882 .stats_get = mlx5_stats_get,
883 .stats_reset = mlx5_stats_reset,
884 .xstats_get = mlx5_xstats_get,
885 .xstats_reset = mlx5_xstats_reset,
886 .xstats_get_names = mlx5_xstats_get_names,
887 .fw_version_get = mlx5_fw_version_get,
888 .dev_infos_get = mlx5_dev_infos_get,
889 .read_clock = mlx5_read_clock,
890 .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
891 .vlan_filter_set = mlx5_vlan_filter_set,
892 .rx_queue_setup = mlx5_rx_queue_setup,
893 .tx_queue_setup = mlx5_tx_queue_setup,
894 .rx_queue_release = mlx5_rx_queue_release,
895 .tx_queue_release = mlx5_tx_queue_release,
896 .flow_ctrl_get = mlx5_dev_get_flow_ctrl,
897 .flow_ctrl_set = mlx5_dev_set_flow_ctrl,
898 .mac_addr_remove = mlx5_mac_addr_remove,
899 .mac_addr_add = mlx5_mac_addr_add,
900 .mac_addr_set = mlx5_mac_addr_set,
901 .set_mc_addr_list = mlx5_set_mc_addr_list,
902 .mtu_set = mlx5_dev_set_mtu,
903 .vlan_strip_queue_set = mlx5_vlan_strip_queue_set,
904 .vlan_offload_set = mlx5_vlan_offload_set,
905 .reta_update = mlx5_dev_rss_reta_update,
906 .reta_query = mlx5_dev_rss_reta_query,
907 .rss_hash_update = mlx5_rss_hash_update,
908 .rss_hash_conf_get = mlx5_rss_hash_conf_get,
909 .filter_ctrl = mlx5_dev_filter_ctrl,
910 .rx_descriptor_status = mlx5_rx_descriptor_status,
911 .tx_descriptor_status = mlx5_tx_descriptor_status,
912 .rx_queue_count = mlx5_rx_queue_count,
913 .rx_queue_intr_enable = mlx5_rx_intr_enable,
914 .rx_queue_intr_disable = mlx5_rx_intr_disable,
915 .is_removed = mlx5_is_removed,
918 /* Available operations from secondary process. */
919 static const struct eth_dev_ops mlx5_dev_sec_ops = {
920 .stats_get = mlx5_stats_get,
921 .stats_reset = mlx5_stats_reset,
922 .xstats_get = mlx5_xstats_get,
923 .xstats_reset = mlx5_xstats_reset,
924 .xstats_get_names = mlx5_xstats_get_names,
925 .fw_version_get = mlx5_fw_version_get,
926 .dev_infos_get = mlx5_dev_infos_get,
927 .rx_descriptor_status = mlx5_rx_descriptor_status,
928 .tx_descriptor_status = mlx5_tx_descriptor_status,
931 /* Available operations in flow isolated mode. */
932 const struct eth_dev_ops mlx5_dev_ops_isolate = {
933 .dev_configure = mlx5_dev_configure,
934 .dev_start = mlx5_dev_start,
935 .dev_stop = mlx5_dev_stop,
936 .dev_set_link_down = mlx5_set_link_down,
937 .dev_set_link_up = mlx5_set_link_up,
938 .dev_close = mlx5_dev_close,
939 .promiscuous_enable = mlx5_promiscuous_enable,
940 .promiscuous_disable = mlx5_promiscuous_disable,
941 .allmulticast_enable = mlx5_allmulticast_enable,
942 .allmulticast_disable = mlx5_allmulticast_disable,
943 .link_update = mlx5_link_update,
944 .stats_get = mlx5_stats_get,
945 .stats_reset = mlx5_stats_reset,
946 .xstats_get = mlx5_xstats_get,
947 .xstats_reset = mlx5_xstats_reset,
948 .xstats_get_names = mlx5_xstats_get_names,
949 .fw_version_get = mlx5_fw_version_get,
950 .dev_infos_get = mlx5_dev_infos_get,
951 .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
952 .vlan_filter_set = mlx5_vlan_filter_set,
953 .rx_queue_setup = mlx5_rx_queue_setup,
954 .tx_queue_setup = mlx5_tx_queue_setup,
955 .rx_queue_release = mlx5_rx_queue_release,
956 .tx_queue_release = mlx5_tx_queue_release,
957 .flow_ctrl_get = mlx5_dev_get_flow_ctrl,
958 .flow_ctrl_set = mlx5_dev_set_flow_ctrl,
959 .mac_addr_remove = mlx5_mac_addr_remove,
960 .mac_addr_add = mlx5_mac_addr_add,
961 .mac_addr_set = mlx5_mac_addr_set,
962 .set_mc_addr_list = mlx5_set_mc_addr_list,
963 .mtu_set = mlx5_dev_set_mtu,
964 .vlan_strip_queue_set = mlx5_vlan_strip_queue_set,
965 .vlan_offload_set = mlx5_vlan_offload_set,
966 .filter_ctrl = mlx5_dev_filter_ctrl,
967 .rx_descriptor_status = mlx5_rx_descriptor_status,
968 .tx_descriptor_status = mlx5_tx_descriptor_status,
969 .rx_queue_intr_enable = mlx5_rx_intr_enable,
970 .rx_queue_intr_disable = mlx5_rx_intr_disable,
971 .is_removed = mlx5_is_removed,
975 * Verify and store value for device argument.
978 * Key argument to verify.
980 * Value associated with key.
985 * 0 on success, a negative errno value otherwise and rte_errno is set.
988 mlx5_args_check(const char *key, const char *val, void *opaque)
990 struct mlx5_dev_config *config = opaque;
993 /* No-op, port representors are processed in mlx5_dev_spawn(). */
994 if (!strcmp(MLX5_REPRESENTOR, key))
997 tmp = strtoul(val, NULL, 0);
1000 DRV_LOG(WARNING, "%s: \"%s\" is not a valid integer", key, val);
1003 if (strcmp(MLX5_RXQ_CQE_COMP_EN, key) == 0) {
1004 config->cqe_comp = !!tmp;
1005 } else if (strcmp(MLX5_RXQ_CQE_PAD_EN, key) == 0) {
1006 config->cqe_pad = !!tmp;
1007 } else if (strcmp(MLX5_RXQ_PKT_PAD_EN, key) == 0) {
1008 config->hw_padding = !!tmp;
1009 } else if (strcmp(MLX5_RX_MPRQ_EN, key) == 0) {
1010 config->mprq.enabled = !!tmp;
1011 } else if (strcmp(MLX5_RX_MPRQ_LOG_STRIDE_NUM, key) == 0) {
1012 config->mprq.stride_num_n = tmp;
1013 } else if (strcmp(MLX5_RX_MPRQ_MAX_MEMCPY_LEN, key) == 0) {
1014 config->mprq.max_memcpy_len = tmp;
1015 } else if (strcmp(MLX5_RXQS_MIN_MPRQ, key) == 0) {
1016 config->mprq.min_rxqs_num = tmp;
1017 } else if (strcmp(MLX5_TXQ_INLINE, key) == 0) {
1018 DRV_LOG(WARNING, "%s: deprecated parameter,"
1019 " converted to txq_inline_max", key);
1020 config->txq_inline_max = tmp;
1021 } else if (strcmp(MLX5_TXQ_INLINE_MAX, key) == 0) {
1022 config->txq_inline_max = tmp;
1023 } else if (strcmp(MLX5_TXQ_INLINE_MIN, key) == 0) {
1024 config->txq_inline_min = tmp;
1025 } else if (strcmp(MLX5_TXQ_INLINE_MPW, key) == 0) {
1026 config->txq_inline_mpw = tmp;
1027 } else if (strcmp(MLX5_TXQS_MIN_INLINE, key) == 0) {
1028 config->txqs_inline = tmp;
1029 } else if (strcmp(MLX5_TXQS_MAX_VEC, key) == 0) {
1030 DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key);
1031 } else if (strcmp(MLX5_TXQ_MPW_EN, key) == 0) {
1032 config->mps = !!tmp;
1033 } else if (strcmp(MLX5_TXQ_MPW_HDR_DSEG_EN, key) == 0) {
1034 DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key);
1035 } else if (strcmp(MLX5_TXQ_MAX_INLINE_LEN, key) == 0) {
1036 DRV_LOG(WARNING, "%s: deprecated parameter,"
1037 " converted to txq_inline_mpw", key);
1038 config->txq_inline_mpw = tmp;
1039 } else if (strcmp(MLX5_TX_VEC_EN, key) == 0) {
1040 DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key);
1041 } else if (strcmp(MLX5_RX_VEC_EN, key) == 0) {
1042 config->rx_vec_en = !!tmp;
1043 } else if (strcmp(MLX5_L3_VXLAN_EN, key) == 0) {
1044 config->l3_vxlan_en = !!tmp;
1045 } else if (strcmp(MLX5_VF_NL_EN, key) == 0) {
1046 config->vf_nl_en = !!tmp;
1047 } else if (strcmp(MLX5_DV_ESW_EN, key) == 0) {
1048 config->dv_esw_en = !!tmp;
1049 } else if (strcmp(MLX5_DV_FLOW_EN, key) == 0) {
1050 config->dv_flow_en = !!tmp;
1051 } else if (strcmp(MLX5_MR_EXT_MEMSEG_EN, key) == 0) {
1052 config->mr_ext_memseg_en = !!tmp;
1053 } else if (strcmp(MLX5_MAX_DUMP_FILES_NUM, key) == 0) {
1054 config->max_dump_files_num = tmp;
1056 DRV_LOG(WARNING, "%s: unknown parameter", key);
1064 * Parse device parameters.
1067 * Pointer to device configuration structure.
1069 * Device arguments structure.
1072 * 0 on success, a negative errno value otherwise and rte_errno is set.
1075 mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs)
1077 const char **params = (const char *[]){
1078 MLX5_RXQ_CQE_COMP_EN,
1079 MLX5_RXQ_CQE_PAD_EN,
1080 MLX5_RXQ_PKT_PAD_EN,
1082 MLX5_RX_MPRQ_LOG_STRIDE_NUM,
1083 MLX5_RX_MPRQ_MAX_MEMCPY_LEN,
1086 MLX5_TXQ_INLINE_MIN,
1087 MLX5_TXQ_INLINE_MAX,
1088 MLX5_TXQ_INLINE_MPW,
1089 MLX5_TXQS_MIN_INLINE,
1092 MLX5_TXQ_MPW_HDR_DSEG_EN,
1093 MLX5_TXQ_MAX_INLINE_LEN,
1100 MLX5_MR_EXT_MEMSEG_EN,
1102 MLX5_MAX_DUMP_FILES_NUM,
1105 struct rte_kvargs *kvlist;
1109 if (devargs == NULL)
1111 /* Following UGLY cast is done to pass checkpatch. */
1112 kvlist = rte_kvargs_parse(devargs->args, params);
1113 if (kvlist == NULL) {
1117 /* Process parameters. */
1118 for (i = 0; (params[i] != NULL); ++i) {
1119 if (rte_kvargs_count(kvlist, params[i])) {
1120 ret = rte_kvargs_process(kvlist, params[i],
1121 mlx5_args_check, config);
1124 rte_kvargs_free(kvlist);
1129 rte_kvargs_free(kvlist);
1133 static struct rte_pci_driver mlx5_driver;
1136 * PMD global initialization.
1138 * Independent from individual device, this function initializes global
1139 * per-PMD data structures distinguishing primary and secondary processes.
1140 * Hence, each initialization is called once per a process.
1143 * 0 on success, a negative errno value otherwise and rte_errno is set.
1146 mlx5_init_once(void)
1148 struct mlx5_shared_data *sd;
1149 struct mlx5_local_data *ld = &mlx5_local_data;
1152 if (mlx5_init_shared_data())
1154 sd = mlx5_shared_data;
1156 rte_spinlock_lock(&sd->lock);
1157 switch (rte_eal_process_type()) {
1158 case RTE_PROC_PRIMARY:
1161 LIST_INIT(&sd->mem_event_cb_list);
1162 rte_rwlock_init(&sd->mem_event_rwlock);
1163 rte_mem_event_callback_register("MLX5_MEM_EVENT_CB",
1164 mlx5_mr_mem_event_cb, NULL);
1165 ret = mlx5_mp_init_primary();
1168 sd->init_done = true;
1170 case RTE_PROC_SECONDARY:
1173 ret = mlx5_mp_init_secondary();
1176 ++sd->secondary_cnt;
1177 ld->init_done = true;
1183 rte_spinlock_unlock(&sd->lock);
1188 * Configures the minimal amount of data to inline into WQE
1189 * while sending packets.
1191 * - the txq_inline_min has the maximal priority, if this
1192 * key is specified in devargs
1193 * - if DevX is enabled the inline mode is queried from the
1194 * device (HCA attributes and NIC vport context if needed).
1195 * - otherwise L2 mode (18 bytes) is assumed for ConnectX-4/4LX
1196 * and none (0 bytes) for other NICs
1199 * Verbs device parameters (name, port, switch_info) to spawn.
1201 * Device configuration parameters.
1204 mlx5_set_min_inline(struct mlx5_dev_spawn_data *spawn,
1205 struct mlx5_dev_config *config)
1207 if (config->txq_inline_min != MLX5_ARG_UNSET) {
1208 /* Application defines size of inlined data explicitly. */
1209 switch (spawn->pci_dev->id.device_id) {
1210 case PCI_DEVICE_ID_MELLANOX_CONNECTX4:
1211 case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF:
1212 case PCI_DEVICE_ID_MELLANOX_CONNECTX4LX:
1213 case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF:
1214 if (config->txq_inline_min <
1215 (int)MLX5_INLINE_HSIZE_L2) {
1217 "txq_inline_mix aligned to minimal"
1218 " ConnectX-4 required value %d",
1219 (int)MLX5_INLINE_HSIZE_L2);
1220 config->txq_inline_min = MLX5_INLINE_HSIZE_L2;
1226 if (config->hca_attr.eth_net_offloads) {
1227 /* We have DevX enabled, inline mode queried successfully. */
1228 switch (config->hca_attr.wqe_inline_mode) {
1229 case MLX5_CAP_INLINE_MODE_L2:
1230 /* outer L2 header must be inlined. */
1231 config->txq_inline_min = MLX5_INLINE_HSIZE_L2;
1233 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
1234 /* No inline data are required by NIC. */
1235 config->txq_inline_min = MLX5_INLINE_HSIZE_NONE;
1236 config->hw_vlan_insert =
1237 config->hca_attr.wqe_vlan_insert;
1238 DRV_LOG(DEBUG, "Tx VLAN insertion is supported");
1240 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
1241 /* inline mode is defined by NIC vport context. */
1242 if (!config->hca_attr.eth_virt)
1244 switch (config->hca_attr.vport_inline_mode) {
1245 case MLX5_INLINE_MODE_NONE:
1246 config->txq_inline_min =
1247 MLX5_INLINE_HSIZE_NONE;
1249 case MLX5_INLINE_MODE_L2:
1250 config->txq_inline_min =
1251 MLX5_INLINE_HSIZE_L2;
1253 case MLX5_INLINE_MODE_IP:
1254 config->txq_inline_min =
1255 MLX5_INLINE_HSIZE_L3;
1257 case MLX5_INLINE_MODE_TCP_UDP:
1258 config->txq_inline_min =
1259 MLX5_INLINE_HSIZE_L4;
1261 case MLX5_INLINE_MODE_INNER_L2:
1262 config->txq_inline_min =
1263 MLX5_INLINE_HSIZE_INNER_L2;
1265 case MLX5_INLINE_MODE_INNER_IP:
1266 config->txq_inline_min =
1267 MLX5_INLINE_HSIZE_INNER_L3;
1269 case MLX5_INLINE_MODE_INNER_TCP_UDP:
1270 config->txq_inline_min =
1271 MLX5_INLINE_HSIZE_INNER_L4;
1277 * We get here if we are unable to deduce
1278 * inline data size with DevX. Try PCI ID
1279 * to determine old NICs.
1281 switch (spawn->pci_dev->id.device_id) {
1282 case PCI_DEVICE_ID_MELLANOX_CONNECTX4:
1283 case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF:
1284 case PCI_DEVICE_ID_MELLANOX_CONNECTX4LX:
1285 case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF:
1286 config->txq_inline_min = MLX5_INLINE_HSIZE_L2;
1287 config->hw_vlan_insert = 0;
1289 case PCI_DEVICE_ID_MELLANOX_CONNECTX5:
1290 case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF:
1291 case PCI_DEVICE_ID_MELLANOX_CONNECTX5EX:
1292 case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF:
1294 * These NICs support VLAN insertion from WQE and
1295 * report the wqe_vlan_insert flag. But there is the bug
1296 * and PFC control may be broken, so disable feature.
1298 config->hw_vlan_insert = 0;
1301 config->txq_inline_min = MLX5_INLINE_HSIZE_NONE;
1305 DRV_LOG(DEBUG, "min tx inline configured: %d", config->txq_inline_min);
1309 * Spawn an Ethernet device from Verbs information.
1312 * Backing DPDK device.
1314 * Verbs device parameters (name, port, switch_info) to spawn.
1316 * Device configuration parameters.
1319 * A valid Ethernet device object on success, NULL otherwise and rte_errno
1320 * is set. The following errors are defined:
1322 * EBUSY: device is not supposed to be spawned.
1323 * EEXIST: device is already spawned
1325 static struct rte_eth_dev *
1326 mlx5_dev_spawn(struct rte_device *dpdk_dev,
1327 struct mlx5_dev_spawn_data *spawn,
1328 struct mlx5_dev_config config)
1330 const struct mlx5_switch_info *switch_info = &spawn->info;
1331 struct mlx5_ibv_shared *sh = NULL;
1332 struct ibv_port_attr port_attr;
1333 struct mlx5dv_context dv_attr = { .comp_mask = 0 };
1334 struct rte_eth_dev *eth_dev = NULL;
1335 struct mlx5_priv *priv = NULL;
1337 unsigned int hw_padding = 0;
1339 unsigned int cqe_comp;
1340 unsigned int cqe_pad = 0;
1341 unsigned int tunnel_en = 0;
1342 unsigned int mpls_en = 0;
1343 unsigned int swp = 0;
1344 unsigned int mprq = 0;
1345 unsigned int mprq_min_stride_size_n = 0;
1346 unsigned int mprq_max_stride_size_n = 0;
1347 unsigned int mprq_min_stride_num_n = 0;
1348 unsigned int mprq_max_stride_num_n = 0;
1349 struct rte_ether_addr mac;
1350 char name[RTE_ETH_NAME_MAX_LEN];
1351 int own_domain_id = 0;
1355 /* Determine if this port representor is supposed to be spawned. */
1356 if (switch_info->representor && dpdk_dev->devargs) {
1357 struct rte_eth_devargs eth_da;
1359 err = rte_eth_devargs_parse(dpdk_dev->devargs->args, ð_da);
1362 DRV_LOG(ERR, "failed to process device arguments: %s",
1363 strerror(rte_errno));
1366 for (i = 0; i < eth_da.nb_representor_ports; ++i)
1367 if (eth_da.representor_ports[i] ==
1368 (uint16_t)switch_info->port_name)
1370 if (i == eth_da.nb_representor_ports) {
1375 /* Build device name. */
1376 if (!switch_info->representor)
1377 strlcpy(name, dpdk_dev->name, sizeof(name));
1379 snprintf(name, sizeof(name), "%s_representor_%u",
1380 dpdk_dev->name, switch_info->port_name);
1381 /* check if the device is already spawned */
1382 if (rte_eth_dev_get_port_by_name(name, &port_id) == 0) {
1386 DRV_LOG(DEBUG, "naming Ethernet device \"%s\"", name);
1387 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
1388 eth_dev = rte_eth_dev_attach_secondary(name);
1389 if (eth_dev == NULL) {
1390 DRV_LOG(ERR, "can not attach rte ethdev");
1394 eth_dev->device = dpdk_dev;
1395 eth_dev->dev_ops = &mlx5_dev_sec_ops;
1396 err = mlx5_proc_priv_init(eth_dev);
1399 /* Receive command fd from primary process */
1400 err = mlx5_mp_req_verbs_cmd_fd(eth_dev);
1403 /* Remap UAR for Tx queues. */
1404 err = mlx5_tx_uar_init_secondary(eth_dev, err);
1408 * Ethdev pointer is still required as input since
1409 * the primary device is not accessible from the
1410 * secondary process.
1412 eth_dev->rx_pkt_burst = mlx5_select_rx_function(eth_dev);
1413 eth_dev->tx_pkt_burst = mlx5_select_tx_function(eth_dev);
1416 sh = mlx5_alloc_shared_ibctx(spawn);
1419 config.devx = sh->devx;
1420 #ifdef HAVE_IBV_MLX5_MOD_SWP
1421 dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_SWP;
1424 * Multi-packet send is supported by ConnectX-4 Lx PF as well
1425 * as all ConnectX-5 devices.
1427 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
1428 dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS;
1430 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
1431 dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_STRIDING_RQ;
1433 mlx5_glue->dv_query_device(sh->ctx, &dv_attr);
1434 if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) {
1435 if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) {
1436 DRV_LOG(DEBUG, "enhanced MPW is supported");
1437 mps = MLX5_MPW_ENHANCED;
1439 DRV_LOG(DEBUG, "MPW is supported");
1443 DRV_LOG(DEBUG, "MPW isn't supported");
1444 mps = MLX5_MPW_DISABLED;
1446 #ifdef HAVE_IBV_MLX5_MOD_SWP
1447 if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_SWP)
1448 swp = dv_attr.sw_parsing_caps.sw_parsing_offloads;
1449 DRV_LOG(DEBUG, "SWP support: %u", swp);
1452 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
1453 if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_STRIDING_RQ) {
1454 struct mlx5dv_striding_rq_caps mprq_caps =
1455 dv_attr.striding_rq_caps;
1457 DRV_LOG(DEBUG, "\tmin_single_stride_log_num_of_bytes: %d",
1458 mprq_caps.min_single_stride_log_num_of_bytes);
1459 DRV_LOG(DEBUG, "\tmax_single_stride_log_num_of_bytes: %d",
1460 mprq_caps.max_single_stride_log_num_of_bytes);
1461 DRV_LOG(DEBUG, "\tmin_single_wqe_log_num_of_strides: %d",
1462 mprq_caps.min_single_wqe_log_num_of_strides);
1463 DRV_LOG(DEBUG, "\tmax_single_wqe_log_num_of_strides: %d",
1464 mprq_caps.max_single_wqe_log_num_of_strides);
1465 DRV_LOG(DEBUG, "\tsupported_qpts: %d",
1466 mprq_caps.supported_qpts);
1467 DRV_LOG(DEBUG, "device supports Multi-Packet RQ");
1469 mprq_min_stride_size_n =
1470 mprq_caps.min_single_stride_log_num_of_bytes;
1471 mprq_max_stride_size_n =
1472 mprq_caps.max_single_stride_log_num_of_bytes;
1473 mprq_min_stride_num_n =
1474 mprq_caps.min_single_wqe_log_num_of_strides;
1475 mprq_max_stride_num_n =
1476 mprq_caps.max_single_wqe_log_num_of_strides;
1477 config.mprq.stride_num_n = RTE_MAX(MLX5_MPRQ_STRIDE_NUM_N,
1478 mprq_min_stride_num_n);
1481 if (RTE_CACHE_LINE_SIZE == 128 &&
1482 !(dv_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP))
1486 config.cqe_comp = cqe_comp;
1487 #ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD
1488 /* Whether device supports 128B Rx CQE padding. */
1489 cqe_pad = RTE_CACHE_LINE_SIZE == 128 &&
1490 (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_PAD);
1492 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
1493 if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS) {
1494 tunnel_en = ((dv_attr.tunnel_offloads_caps &
1495 MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN) &&
1496 (dv_attr.tunnel_offloads_caps &
1497 MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE));
1499 DRV_LOG(DEBUG, "tunnel offloading is %ssupported",
1500 tunnel_en ? "" : "not ");
1503 "tunnel offloading disabled due to old OFED/rdma-core version");
1505 config.tunnel_en = tunnel_en;
1506 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
1507 mpls_en = ((dv_attr.tunnel_offloads_caps &
1508 MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_GRE) &&
1509 (dv_attr.tunnel_offloads_caps &
1510 MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_UDP));
1511 DRV_LOG(DEBUG, "MPLS over GRE/UDP tunnel offloading is %ssupported",
1512 mpls_en ? "" : "not ");
1514 DRV_LOG(WARNING, "MPLS over GRE/UDP tunnel offloading disabled due to"
1515 " old OFED/rdma-core version or firmware configuration");
1517 config.mpls_en = mpls_en;
1518 /* Check port status. */
1519 err = mlx5_glue->query_port(sh->ctx, spawn->ibv_port, &port_attr);
1521 DRV_LOG(ERR, "port query failed: %s", strerror(err));
1524 if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) {
1525 DRV_LOG(ERR, "port is not configured in Ethernet mode");
1529 if (port_attr.state != IBV_PORT_ACTIVE)
1530 DRV_LOG(DEBUG, "port is not active: \"%s\" (%d)",
1531 mlx5_glue->port_state_str(port_attr.state),
1533 /* Allocate private eth device data. */
1534 priv = rte_zmalloc("ethdev private structure",
1536 RTE_CACHE_LINE_SIZE);
1538 DRV_LOG(ERR, "priv allocation failure");
1543 priv->ibv_port = spawn->ibv_port;
1544 priv->mtu = RTE_ETHER_MTU;
1546 /* Initialize UAR access locks for 32bit implementations. */
1547 rte_spinlock_init(&priv->uar_lock_cq);
1548 for (i = 0; i < MLX5_UAR_PAGE_NUM_MAX; i++)
1549 rte_spinlock_init(&priv->uar_lock[i]);
1551 /* Some internal functions rely on Netlink sockets, open them now. */
1552 priv->nl_socket_rdma = mlx5_nl_init(NETLINK_RDMA);
1553 priv->nl_socket_route = mlx5_nl_init(NETLINK_ROUTE);
1555 priv->representor = !!switch_info->representor;
1556 priv->master = !!switch_info->master;
1557 priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
1559 * Currently we support single E-Switch per PF configurations
1560 * only and vport_id field contains the vport index for
1561 * associated VF, which is deduced from representor port name.
1562 * For example, let's have the IB device port 10, it has
1563 * attached network device eth0, which has port name attribute
1564 * pf0vf2, we can deduce the VF number as 2, and set vport index
1565 * as 3 (2+1). This assigning schema should be changed if the
1566 * multiple E-Switch instances per PF configurations or/and PCI
1567 * subfunctions are added.
1569 priv->vport_id = switch_info->representor ?
1570 switch_info->port_name + 1 : -1;
1571 /* representor_id field keeps the unmodified port/VF index. */
1572 priv->representor_id = switch_info->representor ?
1573 switch_info->port_name : -1;
1575 * Look for sibling devices in order to reuse their switch domain
1576 * if any, otherwise allocate one.
1578 RTE_ETH_FOREACH_DEV_OF(port_id, dpdk_dev) {
1579 const struct mlx5_priv *opriv =
1580 rte_eth_devices[port_id].data->dev_private;
1584 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID)
1586 priv->domain_id = opriv->domain_id;
1589 if (priv->domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
1590 err = rte_eth_switch_domain_alloc(&priv->domain_id);
1593 DRV_LOG(ERR, "unable to allocate switch domain: %s",
1594 strerror(rte_errno));
1599 err = mlx5_args(&config, dpdk_dev->devargs);
1602 DRV_LOG(ERR, "failed to process device arguments: %s",
1603 strerror(rte_errno));
1606 config.hw_csum = !!(sh->device_attr.device_cap_flags_ex &
1607 IBV_DEVICE_RAW_IP_CSUM);
1608 DRV_LOG(DEBUG, "checksum offloading is %ssupported",
1609 (config.hw_csum ? "" : "not "));
1610 #if !defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) && \
1611 !defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
1612 DRV_LOG(DEBUG, "counters are not supported");
1614 #ifndef HAVE_IBV_FLOW_DV_SUPPORT
1615 if (config.dv_flow_en) {
1616 DRV_LOG(WARNING, "DV flow is not supported");
1617 config.dv_flow_en = 0;
1620 config.ind_table_max_size =
1621 sh->device_attr.rss_caps.max_rwq_indirection_table_size;
1623 * Remove this check once DPDK supports larger/variable
1624 * indirection tables.
1626 if (config.ind_table_max_size > (unsigned int)ETH_RSS_RETA_SIZE_512)
1627 config.ind_table_max_size = ETH_RSS_RETA_SIZE_512;
1628 DRV_LOG(DEBUG, "maximum Rx indirection table size is %u",
1629 config.ind_table_max_size);
1630 config.hw_vlan_strip = !!(sh->device_attr.raw_packet_caps &
1631 IBV_RAW_PACKET_CAP_CVLAN_STRIPPING);
1632 DRV_LOG(DEBUG, "VLAN stripping is %ssupported",
1633 (config.hw_vlan_strip ? "" : "not "));
1634 config.hw_fcs_strip = !!(sh->device_attr.raw_packet_caps &
1635 IBV_RAW_PACKET_CAP_SCATTER_FCS);
1636 DRV_LOG(DEBUG, "FCS stripping configuration is %ssupported",
1637 (config.hw_fcs_strip ? "" : "not "));
1638 #if defined(HAVE_IBV_WQ_FLAG_RX_END_PADDING)
1639 hw_padding = !!sh->device_attr.rx_pad_end_addr_align;
1640 #elif defined(HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING)
1641 hw_padding = !!(sh->device_attr.device_cap_flags_ex &
1642 IBV_DEVICE_PCI_WRITE_END_PADDING);
1644 if (config.hw_padding && !hw_padding) {
1645 DRV_LOG(DEBUG, "Rx end alignment padding isn't supported");
1646 config.hw_padding = 0;
1647 } else if (config.hw_padding) {
1648 DRV_LOG(DEBUG, "Rx end alignment padding is enabled");
1650 config.tso = (sh->device_attr.tso_caps.max_tso > 0 &&
1651 (sh->device_attr.tso_caps.supported_qpts &
1652 (1 << IBV_QPT_RAW_PACKET)));
1654 config.tso_max_payload_sz = sh->device_attr.tso_caps.max_tso;
1656 * MPW is disabled by default, while the Enhanced MPW is enabled
1659 if (config.mps == MLX5_ARG_UNSET)
1660 config.mps = (mps == MLX5_MPW_ENHANCED) ? MLX5_MPW_ENHANCED :
1663 config.mps = config.mps ? mps : MLX5_MPW_DISABLED;
1664 DRV_LOG(INFO, "%sMPS is %s",
1665 config.mps == MLX5_MPW_ENHANCED ? "enhanced " : "",
1666 config.mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
1667 if (config.cqe_comp && !cqe_comp) {
1668 DRV_LOG(WARNING, "Rx CQE compression isn't supported");
1669 config.cqe_comp = 0;
1671 if (config.cqe_pad && !cqe_pad) {
1672 DRV_LOG(WARNING, "Rx CQE padding isn't supported");
1674 } else if (config.cqe_pad) {
1675 DRV_LOG(INFO, "Rx CQE padding is enabled");
1677 if (config.mprq.enabled && mprq) {
1678 if (config.mprq.stride_num_n > mprq_max_stride_num_n ||
1679 config.mprq.stride_num_n < mprq_min_stride_num_n) {
1680 config.mprq.stride_num_n =
1681 RTE_MAX(MLX5_MPRQ_STRIDE_NUM_N,
1682 mprq_min_stride_num_n);
1684 "the number of strides"
1685 " for Multi-Packet RQ is out of range,"
1686 " setting default value (%u)",
1687 1 << config.mprq.stride_num_n);
1689 config.mprq.min_stride_size_n = mprq_min_stride_size_n;
1690 config.mprq.max_stride_size_n = mprq_max_stride_size_n;
1691 } else if (config.mprq.enabled && !mprq) {
1692 DRV_LOG(WARNING, "Multi-Packet RQ isn't supported");
1693 config.mprq.enabled = 0;
1695 if (config.max_dump_files_num == 0)
1696 config.max_dump_files_num = 128;
1697 eth_dev = rte_eth_dev_allocate(name);
1698 if (eth_dev == NULL) {
1699 DRV_LOG(ERR, "can not allocate rte ethdev");
1703 /* Flag to call rte_eth_dev_release_port() in rte_eth_dev_close(). */
1704 eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
1705 if (priv->representor) {
1706 eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
1707 eth_dev->data->representor_id = priv->representor_id;
1709 eth_dev->data->dev_private = priv;
1710 priv->dev_data = eth_dev->data;
1711 eth_dev->data->mac_addrs = priv->mac;
1712 eth_dev->device = dpdk_dev;
1713 /* Configure the first MAC address by default. */
1714 if (mlx5_get_mac(eth_dev, &mac.addr_bytes)) {
1716 "port %u cannot get MAC address, is mlx5_en"
1717 " loaded? (errno: %s)",
1718 eth_dev->data->port_id, strerror(rte_errno));
1723 "port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x",
1724 eth_dev->data->port_id,
1725 mac.addr_bytes[0], mac.addr_bytes[1],
1726 mac.addr_bytes[2], mac.addr_bytes[3],
1727 mac.addr_bytes[4], mac.addr_bytes[5]);
1730 char ifname[IF_NAMESIZE];
1732 if (mlx5_get_ifname(eth_dev, &ifname) == 0)
1733 DRV_LOG(DEBUG, "port %u ifname is \"%s\"",
1734 eth_dev->data->port_id, ifname);
1736 DRV_LOG(DEBUG, "port %u ifname is unknown",
1737 eth_dev->data->port_id);
1740 /* Get actual MTU if possible. */
1741 err = mlx5_get_mtu(eth_dev, &priv->mtu);
1746 DRV_LOG(DEBUG, "port %u MTU is %u", eth_dev->data->port_id,
1748 /* Initialize burst functions to prevent crashes before link-up. */
1749 eth_dev->rx_pkt_burst = removed_rx_burst;
1750 eth_dev->tx_pkt_burst = removed_tx_burst;
1751 eth_dev->dev_ops = &mlx5_dev_ops;
1752 /* Register MAC address. */
1753 claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0));
1754 if (config.vf && config.vf_nl_en)
1755 mlx5_nl_mac_addr_sync(eth_dev);
1756 TAILQ_INIT(&priv->flows);
1757 TAILQ_INIT(&priv->ctrl_flows);
1758 /* Hint libmlx5 to use PMD allocator for data plane resources */
1759 struct mlx5dv_ctx_allocators alctr = {
1760 .alloc = &mlx5_alloc_verbs_buf,
1761 .free = &mlx5_free_verbs_buf,
1764 mlx5_glue->dv_set_context_attr(sh->ctx,
1765 MLX5DV_CTX_ATTR_BUF_ALLOCATORS,
1766 (void *)((uintptr_t)&alctr));
1767 /* Bring Ethernet device up. */
1768 DRV_LOG(DEBUG, "port %u forcing Ethernet interface up",
1769 eth_dev->data->port_id);
1770 mlx5_set_link_up(eth_dev);
1772 * Even though the interrupt handler is not installed yet,
1773 * interrupts will still trigger on the async_fd from
1774 * Verbs context returned by ibv_open_device().
1776 mlx5_link_update(eth_dev, 0);
1777 #ifdef HAVE_IBV_DEVX_OBJ
1779 priv->counter_fallback = 0;
1780 err = mlx5_devx_cmd_query_hca_attr(sh->ctx, &config.hca_attr);
1785 if (!config.hca_attr.flow_counters_dump)
1786 priv->counter_fallback = 1;
1787 #ifndef HAVE_IBV_DEVX_ASYNC
1788 priv->counter_fallback = 1;
1790 if (priv->counter_fallback)
1791 DRV_LOG(INFO, "Use fall-back DV counter management\n");
1794 #ifdef HAVE_MLX5DV_DR_ESWITCH
1795 if (!(config.hca_attr.eswitch_manager && config.dv_flow_en &&
1796 (switch_info->representor || switch_info->master)))
1797 config.dv_esw_en = 0;
1799 config.dv_esw_en = 0;
1801 /* Detect minimal data bytes to inline. */
1802 mlx5_set_min_inline(spawn, &config);
1803 /* Store device configuration on private structure. */
1804 priv->config = config;
1805 if (config.dv_flow_en) {
1806 err = mlx5_alloc_shared_dr(priv);
1810 /* Supported Verbs flow priority number detection. */
1811 err = mlx5_flow_discover_priorities(eth_dev);
1816 priv->config.flow_prio = err;
1817 /* Add device to memory callback list. */
1818 rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
1819 LIST_INSERT_HEAD(&mlx5_shared_data->mem_event_cb_list,
1821 rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
1826 mlx5_free_shared_dr(priv);
1827 if (priv->nl_socket_route >= 0)
1828 close(priv->nl_socket_route);
1829 if (priv->nl_socket_rdma >= 0)
1830 close(priv->nl_socket_rdma);
1832 claim_zero(rte_eth_switch_domain_free(priv->domain_id));
1834 if (eth_dev != NULL)
1835 eth_dev->data->dev_private = NULL;
1837 if (eth_dev != NULL) {
1838 /* mac_addrs must not be freed alone because part of dev_private */
1839 eth_dev->data->mac_addrs = NULL;
1840 rte_eth_dev_release_port(eth_dev);
1843 mlx5_free_shared_ibctx(sh);
1850 * Comparison callback to sort device data.
1852 * This is meant to be used with qsort().
1855 * Pointer to pointer to first data object.
1857 * Pointer to pointer to second data object.
1860 * 0 if both objects are equal, less than 0 if the first argument is less
1861 * than the second, greater than 0 otherwise.
1864 mlx5_dev_spawn_data_cmp(const void *a, const void *b)
1866 const struct mlx5_switch_info *si_a =
1867 &((const struct mlx5_dev_spawn_data *)a)->info;
1868 const struct mlx5_switch_info *si_b =
1869 &((const struct mlx5_dev_spawn_data *)b)->info;
1872 /* Master device first. */
1873 ret = si_b->master - si_a->master;
1876 /* Then representor devices. */
1877 ret = si_b->representor - si_a->representor;
1880 /* Unidentified devices come last in no specific order. */
1881 if (!si_a->representor)
1883 /* Order representors by name. */
1884 return si_a->port_name - si_b->port_name;
1888 * DPDK callback to register a PCI device.
1890 * This function spawns Ethernet devices out of a given PCI device.
1892 * @param[in] pci_drv
1893 * PCI driver structure (mlx5_driver).
1894 * @param[in] pci_dev
1895 * PCI device information.
1898 * 0 on success, a negative errno value otherwise and rte_errno is set.
1901 mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1902 struct rte_pci_device *pci_dev)
1904 struct ibv_device **ibv_list;
1906 * Number of found IB Devices matching with requested PCI BDF.
1907 * nd != 1 means there are multiple IB devices over the same
1908 * PCI device and we have representors and master.
1910 unsigned int nd = 0;
1912 * Number of found IB device Ports. nd = 1 and np = 1..n means
1913 * we have the single multiport IB device, and there may be
1914 * representors attached to some of found ports.
1916 unsigned int np = 0;
1918 * Number of DPDK ethernet devices to Spawn - either over
1919 * multiple IB devices or multiple ports of single IB device.
1920 * Actually this is the number of iterations to spawn.
1922 unsigned int ns = 0;
1923 struct mlx5_dev_config dev_config;
1926 ret = mlx5_init_once();
1928 DRV_LOG(ERR, "unable to init PMD global data: %s",
1929 strerror(rte_errno));
1932 assert(pci_drv == &mlx5_driver);
1934 ibv_list = mlx5_glue->get_device_list(&ret);
1936 rte_errno = errno ? errno : ENOSYS;
1937 DRV_LOG(ERR, "cannot list devices, is ib_uverbs loaded?");
1941 * First scan the list of all Infiniband devices to find
1942 * matching ones, gathering into the list.
1944 struct ibv_device *ibv_match[ret + 1];
1950 struct rte_pci_addr pci_addr;
1952 DRV_LOG(DEBUG, "checking device \"%s\"", ibv_list[ret]->name);
1953 if (mlx5_ibv_device_to_pci_addr(ibv_list[ret], &pci_addr))
1955 if (pci_dev->addr.domain != pci_addr.domain ||
1956 pci_dev->addr.bus != pci_addr.bus ||
1957 pci_dev->addr.devid != pci_addr.devid ||
1958 pci_dev->addr.function != pci_addr.function)
1960 DRV_LOG(INFO, "PCI information matches for device \"%s\"",
1961 ibv_list[ret]->name);
1962 ibv_match[nd++] = ibv_list[ret];
1964 ibv_match[nd] = NULL;
1966 /* No device matches, just complain and bail out. */
1967 mlx5_glue->free_device_list(ibv_list);
1969 "no Verbs device matches PCI device " PCI_PRI_FMT ","
1970 " are kernel drivers loaded?",
1971 pci_dev->addr.domain, pci_dev->addr.bus,
1972 pci_dev->addr.devid, pci_dev->addr.function);
1977 nl_route = mlx5_nl_init(NETLINK_ROUTE);
1978 nl_rdma = mlx5_nl_init(NETLINK_RDMA);
1981 * Found single matching device may have multiple ports.
1982 * Each port may be representor, we have to check the port
1983 * number and check the representors existence.
1986 np = mlx5_nl_portnum(nl_rdma, ibv_match[0]->name);
1988 DRV_LOG(WARNING, "can not get IB device \"%s\""
1989 " ports number", ibv_match[0]->name);
1992 * Now we can determine the maximal
1993 * amount of devices to be spawned.
1995 struct mlx5_dev_spawn_data list[np ? np : nd];
1999 * Single IB device with multiple ports found,
2000 * it may be E-Switch master device and representors.
2001 * We have to perform identification trough the ports.
2003 assert(nl_rdma >= 0);
2006 for (i = 1; i <= np; ++i) {
2007 list[ns].max_port = np;
2008 list[ns].ibv_port = i;
2009 list[ns].ibv_dev = ibv_match[0];
2010 list[ns].eth_dev = NULL;
2011 list[ns].pci_dev = pci_dev;
2012 list[ns].ifindex = mlx5_nl_ifindex
2013 (nl_rdma, list[ns].ibv_dev->name, i);
2014 if (!list[ns].ifindex) {
2016 * No network interface index found for the
2017 * specified port, it means there is no
2018 * representor on this port. It's OK,
2019 * there can be disabled ports, for example
2020 * if sriov_numvfs < sriov_totalvfs.
2026 ret = mlx5_nl_switch_info
2030 if (ret || (!list[ns].info.representor &&
2031 !list[ns].info.master)) {
2033 * We failed to recognize representors with
2034 * Netlink, let's try to perform the task
2037 ret = mlx5_sysfs_switch_info
2041 if (!ret && (list[ns].info.representor ^
2042 list[ns].info.master))
2047 "unable to recognize master/representors"
2048 " on the IB device with multiple ports");
2055 * The existence of several matching entries (nd > 1) means
2056 * port representors have been instantiated. No existing Verbs
2057 * call nor sysfs entries can tell them apart, this can only
2058 * be done through Netlink calls assuming kernel drivers are
2059 * recent enough to support them.
2061 * In the event of identification failure through Netlink,
2062 * try again through sysfs, then:
2064 * 1. A single IB device matches (nd == 1) with single
2065 * port (np=0/1) and is not a representor, assume
2066 * no switch support.
2068 * 2. Otherwise no safe assumptions can be made;
2069 * complain louder and bail out.
2072 for (i = 0; i != nd; ++i) {
2073 memset(&list[ns].info, 0, sizeof(list[ns].info));
2074 list[ns].max_port = 1;
2075 list[ns].ibv_port = 1;
2076 list[ns].ibv_dev = ibv_match[i];
2077 list[ns].eth_dev = NULL;
2078 list[ns].pci_dev = pci_dev;
2079 list[ns].ifindex = 0;
2081 list[ns].ifindex = mlx5_nl_ifindex
2082 (nl_rdma, list[ns].ibv_dev->name, 1);
2083 if (!list[ns].ifindex) {
2084 char ifname[IF_NAMESIZE];
2087 * Netlink failed, it may happen with old
2088 * ib_core kernel driver (before 4.16).
2089 * We can assume there is old driver because
2090 * here we are processing single ports IB
2091 * devices. Let's try sysfs to retrieve
2092 * the ifindex. The method works for
2093 * master device only.
2097 * Multiple devices found, assume
2098 * representors, can not distinguish
2099 * master/representor and retrieve
2100 * ifindex via sysfs.
2104 ret = mlx5_get_master_ifname
2105 (ibv_match[i]->ibdev_path, &ifname);
2108 if_nametoindex(ifname);
2109 if (!list[ns].ifindex) {
2111 * No network interface index found
2112 * for the specified device, it means
2113 * there it is neither representor
2121 ret = mlx5_nl_switch_info
2125 if (ret || (!list[ns].info.representor &&
2126 !list[ns].info.master)) {
2128 * We failed to recognize representors with
2129 * Netlink, let's try to perform the task
2132 ret = mlx5_sysfs_switch_info
2136 if (!ret && (list[ns].info.representor ^
2137 list[ns].info.master)) {
2139 } else if ((nd == 1) &&
2140 !list[ns].info.representor &&
2141 !list[ns].info.master) {
2143 * Single IB device with
2144 * one physical port and
2145 * attached network device.
2146 * May be SRIOV is not enabled
2147 * or there is no representors.
2149 DRV_LOG(INFO, "no E-Switch support detected");
2156 "unable to recognize master/representors"
2157 " on the multiple IB devices");
2165 * Sort list to probe devices in natural order for users convenience
2166 * (i.e. master first, then representors from lowest to highest ID).
2168 qsort(list, ns, sizeof(*list), mlx5_dev_spawn_data_cmp);
2169 /* Default configuration. */
2170 dev_config = (struct mlx5_dev_config){
2172 .mps = MLX5_ARG_UNSET,
2174 .txq_inline_max = MLX5_ARG_UNSET,
2175 .txq_inline_min = MLX5_ARG_UNSET,
2176 .txq_inline_mpw = MLX5_ARG_UNSET,
2177 .txqs_inline = MLX5_ARG_UNSET,
2179 .mr_ext_memseg_en = 1,
2181 .enabled = 0, /* Disabled by default. */
2182 .stride_num_n = MLX5_MPRQ_STRIDE_NUM_N,
2183 .max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN,
2184 .min_rxqs_num = MLX5_MPRQ_MIN_RXQS,
2188 /* Device specific configuration. */
2189 switch (pci_dev->id.device_id) {
2190 case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF:
2191 case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF:
2192 case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF:
2193 case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF:
2199 for (i = 0; i != ns; ++i) {
2202 list[i].eth_dev = mlx5_dev_spawn(&pci_dev->device,
2205 if (!list[i].eth_dev) {
2206 if (rte_errno != EBUSY && rte_errno != EEXIST)
2208 /* Device is disabled or already spawned. Ignore it. */
2211 restore = list[i].eth_dev->data->dev_flags;
2212 rte_eth_copy_pci_info(list[i].eth_dev, pci_dev);
2213 /* Restore non-PCI flags cleared by the above call. */
2214 list[i].eth_dev->data->dev_flags |= restore;
2215 rte_eth_dev_probing_finish(list[i].eth_dev);
2219 "probe of PCI device " PCI_PRI_FMT " aborted after"
2220 " encountering an error: %s",
2221 pci_dev->addr.domain, pci_dev->addr.bus,
2222 pci_dev->addr.devid, pci_dev->addr.function,
2223 strerror(rte_errno));
2227 if (!list[i].eth_dev)
2229 mlx5_dev_close(list[i].eth_dev);
2230 /* mac_addrs must not be freed because in dev_private */
2231 list[i].eth_dev->data->mac_addrs = NULL;
2232 claim_zero(rte_eth_dev_release_port(list[i].eth_dev));
2234 /* Restore original error. */
2241 * Do the routine cleanup:
2242 * - close opened Netlink sockets
2243 * - free the Infiniband device list
2250 mlx5_glue->free_device_list(ibv_list);
2255 * DPDK callback to remove a PCI device.
2257 * This function removes all Ethernet devices belong to a given PCI device.
2259 * @param[in] pci_dev
2260 * Pointer to the PCI device.
2263 * 0 on success, the function cannot fail.
2266 mlx5_pci_remove(struct rte_pci_device *pci_dev)
2270 RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device)
2271 rte_eth_dev_close(port_id);
2275 static const struct rte_pci_id mlx5_pci_id_map[] = {
2277 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
2278 PCI_DEVICE_ID_MELLANOX_CONNECTX4)
2281 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
2282 PCI_DEVICE_ID_MELLANOX_CONNECTX4VF)
2285 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
2286 PCI_DEVICE_ID_MELLANOX_CONNECTX4LX)
2289 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
2290 PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF)
2293 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
2294 PCI_DEVICE_ID_MELLANOX_CONNECTX5)
2297 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
2298 PCI_DEVICE_ID_MELLANOX_CONNECTX5VF)
2301 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
2302 PCI_DEVICE_ID_MELLANOX_CONNECTX5EX)
2305 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
2306 PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF)
2309 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
2310 PCI_DEVICE_ID_MELLANOX_CONNECTX5BF)
2313 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
2314 PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF)
2317 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
2318 PCI_DEVICE_ID_MELLANOX_CONNECTX6)
2321 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
2322 PCI_DEVICE_ID_MELLANOX_CONNECTX6VF)
2329 static struct rte_pci_driver mlx5_driver = {
2331 .name = MLX5_DRIVER_NAME
2333 .id_table = mlx5_pci_id_map,
2334 .probe = mlx5_pci_probe,
2335 .remove = mlx5_pci_remove,
2336 .dma_map = mlx5_dma_map,
2337 .dma_unmap = mlx5_dma_unmap,
2338 .drv_flags = RTE_PCI_DRV_INTR_LSC | RTE_PCI_DRV_INTR_RMV |
2339 RTE_PCI_DRV_PROBE_AGAIN,
2342 #ifdef RTE_IBVERBS_LINK_DLOPEN
2345 * Suffix RTE_EAL_PMD_PATH with "-glue".
2347 * This function performs a sanity check on RTE_EAL_PMD_PATH before
2348 * suffixing its last component.
2351 * Output buffer, should be large enough otherwise NULL is returned.
2356 * Pointer to @p buf or @p NULL in case suffix cannot be appended.
2359 mlx5_glue_path(char *buf, size_t size)
2361 static const char *const bad[] = { "/", ".", "..", NULL };
2362 const char *path = RTE_EAL_PMD_PATH;
2363 size_t len = strlen(path);
2367 while (len && path[len - 1] == '/')
2369 for (off = len; off && path[off - 1] != '/'; --off)
2371 for (i = 0; bad[i]; ++i)
2372 if (!strncmp(path + off, bad[i], (int)(len - off)))
2374 i = snprintf(buf, size, "%.*s-glue", (int)len, path);
2375 if (i == -1 || (size_t)i >= size)
2380 "unable to append \"-glue\" to last component of"
2381 " RTE_EAL_PMD_PATH (\"" RTE_EAL_PMD_PATH "\"),"
2382 " please re-configure DPDK");
2387 * Initialization routine for run-time dependency on rdma-core.
2390 mlx5_glue_init(void)
2392 char glue_path[sizeof(RTE_EAL_PMD_PATH) - 1 + sizeof("-glue")];
2393 const char *path[] = {
2395 * A basic security check is necessary before trusting
2396 * MLX5_GLUE_PATH, which may override RTE_EAL_PMD_PATH.
2398 (geteuid() == getuid() && getegid() == getgid() ?
2399 getenv("MLX5_GLUE_PATH") : NULL),
2401 * When RTE_EAL_PMD_PATH is set, use its glue-suffixed
2402 * variant, otherwise let dlopen() look up libraries on its
2405 (*RTE_EAL_PMD_PATH ?
2406 mlx5_glue_path(glue_path, sizeof(glue_path)) : ""),
2409 void *handle = NULL;
2413 while (!handle && i != RTE_DIM(path)) {
2422 end = strpbrk(path[i], ":;");
2424 end = path[i] + strlen(path[i]);
2425 len = end - path[i];
2430 ret = snprintf(name, sizeof(name), "%.*s%s" MLX5_GLUE,
2432 (!len || *(end - 1) == '/') ? "" : "/");
2435 if (sizeof(name) != (size_t)ret + 1)
2437 DRV_LOG(DEBUG, "looking for rdma-core glue as \"%s\"",
2439 handle = dlopen(name, RTLD_LAZY);
2450 DRV_LOG(WARNING, "cannot load glue library: %s", dlmsg);
2453 sym = dlsym(handle, "mlx5_glue");
2454 if (!sym || !*sym) {
2458 DRV_LOG(ERR, "cannot resolve glue symbol: %s", dlmsg);
2467 "cannot initialize PMD due to missing run-time dependency on"
2468 " rdma-core libraries (libibverbs, libmlx5)");
2475 * Driver initialization routine.
2477 RTE_INIT(rte_mlx5_pmd_init)
2479 /* Initialize driver log type. */
2480 mlx5_logtype = rte_log_register("pmd.net.mlx5");
2481 if (mlx5_logtype >= 0)
2482 rte_log_set_level(mlx5_logtype, RTE_LOG_NOTICE);
2484 /* Build the static tables for Verbs conversion. */
2485 mlx5_set_ptype_table();
2486 mlx5_set_cksum_table();
2487 mlx5_set_swp_types_table();
2489 * RDMAV_HUGEPAGES_SAFE tells ibv_fork_init() we intend to use
2490 * huge pages. Calling ibv_fork_init() during init allows
2491 * applications to use fork() safely for purposes other than
2492 * using this PMD, which is not supported in forked processes.
2494 setenv("RDMAV_HUGEPAGES_SAFE", "1", 1);
2495 /* Match the size of Rx completion entry to the size of a cacheline. */
2496 if (RTE_CACHE_LINE_SIZE == 128)
2497 setenv("MLX5_CQE_SIZE", "128", 0);
2499 * MLX5_DEVICE_FATAL_CLEANUP tells ibv_destroy functions to
2500 * cleanup all the Verbs resources even when the device was removed.
2502 setenv("MLX5_DEVICE_FATAL_CLEANUP", "1", 1);
2503 #ifdef RTE_IBVERBS_LINK_DLOPEN
2504 if (mlx5_glue_init())
2509 /* Glue structure must not contain any NULL pointers. */
2513 for (i = 0; i != sizeof(*mlx5_glue) / sizeof(void *); ++i)
2514 assert(((const void *const *)mlx5_glue)[i]);
2517 if (strcmp(mlx5_glue->version, MLX5_GLUE_VERSION)) {
2519 "rdma-core glue \"%s\" mismatch: \"%s\" is required",
2520 mlx5_glue->version, MLX5_GLUE_VERSION);
2523 mlx5_glue->fork_init();
2524 rte_pci_register(&mlx5_driver);
2527 RTE_PMD_EXPORT_NAME(net_mlx5, __COUNTER__);
2528 RTE_PMD_REGISTER_PCI_TABLE(net_mlx5, mlx5_pci_id_map);
2529 RTE_PMD_REGISTER_KMOD_DEP(net_mlx5, "* ib_uverbs & mlx5_core & mlx5_ib");