1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
16 #include <linux/netlink.h>
17 #include <linux/rtnetlink.h>
20 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
22 #pragma GCC diagnostic ignored "-Wpedantic"
24 #include <infiniband/verbs.h>
26 #pragma GCC diagnostic error "-Wpedantic"
29 #include <rte_malloc.h>
30 #include <rte_ethdev_driver.h>
31 #include <rte_ethdev_pci.h>
33 #include <rte_bus_pci.h>
34 #include <rte_common.h>
35 #include <rte_config.h>
36 #include <rte_eal_memconfig.h>
37 #include <rte_kvargs.h>
38 #include <rte_rwlock.h>
39 #include <rte_spinlock.h>
40 #include <rte_string_fns.h>
43 #include "mlx5_utils.h"
44 #include "mlx5_rxtx.h"
45 #include "mlx5_autoconf.h"
46 #include "mlx5_defs.h"
47 #include "mlx5_glue.h"
50 /* Device parameter to enable RX completion queue compression. */
51 #define MLX5_RXQ_CQE_COMP_EN "rxq_cqe_comp_en"
53 /* Device parameter to enable Multi-Packet Rx queue. */
54 #define MLX5_RX_MPRQ_EN "mprq_en"
56 /* Device parameter to configure log 2 of the number of strides for MPRQ. */
57 #define MLX5_RX_MPRQ_LOG_STRIDE_NUM "mprq_log_stride_num"
59 /* Device parameter to limit the size of memcpy'd packet for MPRQ. */
60 #define MLX5_RX_MPRQ_MAX_MEMCPY_LEN "mprq_max_memcpy_len"
62 /* Device parameter to set the minimum number of Rx queues to enable MPRQ. */
63 #define MLX5_RXQS_MIN_MPRQ "rxqs_min_mprq"
65 /* Device parameter to configure inline send. */
66 #define MLX5_TXQ_INLINE "txq_inline"
69 * Device parameter to configure the number of TX queues threshold for
70 * enabling inline send.
72 #define MLX5_TXQS_MIN_INLINE "txqs_min_inline"
74 /* Device parameter to enable multi-packet send WQEs. */
75 #define MLX5_TXQ_MPW_EN "txq_mpw_en"
77 /* Device parameter to include 2 dsegs in the title WQEBB. */
78 #define MLX5_TXQ_MPW_HDR_DSEG_EN "txq_mpw_hdr_dseg_en"
80 /* Device parameter to limit the size of inlining packet. */
81 #define MLX5_TXQ_MAX_INLINE_LEN "txq_max_inline_len"
83 /* Device parameter to enable hardware Tx vector. */
84 #define MLX5_TX_VEC_EN "tx_vec_en"
86 /* Device parameter to enable hardware Rx vector. */
87 #define MLX5_RX_VEC_EN "rx_vec_en"
89 /* Allow L3 VXLAN flow creation. */
90 #define MLX5_L3_VXLAN_EN "l3_vxlan_en"
92 /* Activate Netlink support in VF mode. */
93 #define MLX5_VF_NL_EN "vf_nl_en"
95 #ifndef HAVE_IBV_MLX5_MOD_MPW
96 #define MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED (1 << 2)
97 #define MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW (1 << 3)
100 #ifndef HAVE_IBV_MLX5_MOD_CQE_128B_COMP
101 #define MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP (1 << 4)
104 static const char *MZ_MLX5_PMD_SHARED_DATA = "mlx5_pmd_shared_data";
106 /* Shared memory between primary and secondary processes. */
107 struct mlx5_shared_data *mlx5_shared_data;
109 /* Spinlock for mlx5_shared_data allocation. */
110 static rte_spinlock_t mlx5_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
112 /** Driver-specific log messages type. */
116 * Prepare shared data between primary and secondary process.
119 mlx5_prepare_shared_data(void)
121 const struct rte_memzone *mz;
123 rte_spinlock_lock(&mlx5_shared_data_lock);
124 if (mlx5_shared_data == NULL) {
125 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
126 /* Allocate shared memory. */
127 mz = rte_memzone_reserve(MZ_MLX5_PMD_SHARED_DATA,
128 sizeof(*mlx5_shared_data),
131 /* Lookup allocated shared memory. */
132 mz = rte_memzone_lookup(MZ_MLX5_PMD_SHARED_DATA);
135 rte_panic("Cannot allocate mlx5 shared data\n");
136 mlx5_shared_data = mz->addr;
137 /* Initialize shared data. */
138 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
139 LIST_INIT(&mlx5_shared_data->mem_event_cb_list);
140 rte_rwlock_init(&mlx5_shared_data->mem_event_rwlock);
142 rte_mem_event_callback_register("MLX5_MEM_EVENT_CB",
143 mlx5_mr_mem_event_cb, NULL);
145 rte_spinlock_unlock(&mlx5_shared_data_lock);
149 * Retrieve integer value from environment variable.
152 * Environment variable name.
155 * Integer value, 0 if the variable is not set.
158 mlx5_getenv_int(const char *name)
160 const char *val = getenv(name);
168 * Verbs callback to allocate a memory. This function should allocate the space
169 * according to the size provided residing inside a huge page.
170 * Please note that all allocation must respect the alignment from libmlx5
171 * (i.e. currently sysconf(_SC_PAGESIZE)).
174 * The size in bytes of the memory to allocate.
176 * A pointer to the callback data.
179 * Allocated buffer, NULL otherwise and rte_errno is set.
182 mlx5_alloc_verbs_buf(size_t size, void *data)
184 struct priv *priv = data;
186 size_t alignment = sysconf(_SC_PAGESIZE);
187 unsigned int socket = SOCKET_ID_ANY;
189 if (priv->verbs_alloc_ctx.type == MLX5_VERBS_ALLOC_TYPE_TX_QUEUE) {
190 const struct mlx5_txq_ctrl *ctrl = priv->verbs_alloc_ctx.obj;
192 socket = ctrl->socket;
193 } else if (priv->verbs_alloc_ctx.type ==
194 MLX5_VERBS_ALLOC_TYPE_RX_QUEUE) {
195 const struct mlx5_rxq_ctrl *ctrl = priv->verbs_alloc_ctx.obj;
197 socket = ctrl->socket;
199 assert(data != NULL);
200 ret = rte_malloc_socket(__func__, size, alignment, socket);
207 * Verbs callback to free a memory.
210 * A pointer to the memory to free.
212 * A pointer to the callback data.
215 mlx5_free_verbs_buf(void *ptr, void *data __rte_unused)
217 assert(data != NULL);
222 * DPDK callback to close the device.
224 * Destroy all queues and objects, free memory.
227 * Pointer to Ethernet device structure.
230 mlx5_dev_close(struct rte_eth_dev *dev)
232 struct priv *priv = dev->data->dev_private;
236 DRV_LOG(DEBUG, "port %u closing device \"%s\"",
238 ((priv->ctx != NULL) ? priv->ctx->device->name : ""));
239 /* In case mlx5_dev_stop() has not been called. */
240 mlx5_dev_interrupt_handler_uninstall(dev);
241 mlx5_traffic_disable(dev);
242 /* Prevent crashes when queues are still in use. */
243 dev->rx_pkt_burst = removed_rx_burst;
244 dev->tx_pkt_burst = removed_tx_burst;
245 if (priv->rxqs != NULL) {
246 /* XXX race condition if mlx5_rx_burst() is still running. */
248 for (i = 0; (i != priv->rxqs_n); ++i)
249 mlx5_rxq_release(dev, i);
253 if (priv->txqs != NULL) {
254 /* XXX race condition if mlx5_tx_burst() is still running. */
256 for (i = 0; (i != priv->txqs_n); ++i)
257 mlx5_txq_release(dev, i);
261 mlx5_flow_delete_drop_queue(dev);
262 mlx5_mprq_free_mp(dev);
263 mlx5_mr_release(dev);
264 if (priv->pd != NULL) {
265 assert(priv->ctx != NULL);
266 claim_zero(mlx5_glue->dealloc_pd(priv->pd));
267 claim_zero(mlx5_glue->close_device(priv->ctx));
269 assert(priv->ctx == NULL);
270 if (priv->rss_conf.rss_key != NULL)
271 rte_free(priv->rss_conf.rss_key);
272 if (priv->reta_idx != NULL)
273 rte_free(priv->reta_idx);
274 if (priv->primary_socket)
275 mlx5_socket_uninit(dev);
277 mlx5_nl_mac_addr_flush(dev);
278 if (priv->nl_socket_route >= 0)
279 close(priv->nl_socket_route);
280 if (priv->nl_socket_rdma >= 0)
281 close(priv->nl_socket_rdma);
282 ret = mlx5_hrxq_ibv_verify(dev);
284 DRV_LOG(WARNING, "port %u some hash Rx queue still remain",
286 ret = mlx5_ind_table_ibv_verify(dev);
288 DRV_LOG(WARNING, "port %u some indirection table still remain",
290 ret = mlx5_rxq_ibv_verify(dev);
292 DRV_LOG(WARNING, "port %u some Verbs Rx queue still remain",
294 ret = mlx5_rxq_verify(dev);
296 DRV_LOG(WARNING, "port %u some Rx queues still remain",
298 ret = mlx5_txq_ibv_verify(dev);
300 DRV_LOG(WARNING, "port %u some Verbs Tx queue still remain",
302 ret = mlx5_txq_verify(dev);
304 DRV_LOG(WARNING, "port %u some Tx queues still remain",
306 ret = mlx5_flow_verify(dev);
308 DRV_LOG(WARNING, "port %u some flows still remain",
310 if (priv->domain_id != RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
312 unsigned int i = mlx5_dev_to_port_id(dev->device, NULL, 0);
315 i = RTE_MIN(mlx5_dev_to_port_id(dev->device, port_id, i), i);
318 rte_eth_devices[port_id[i]].data->dev_private;
321 opriv->domain_id != priv->domain_id ||
322 &rte_eth_devices[port_id[i]] == dev)
327 claim_zero(rte_eth_switch_domain_free(priv->domain_id));
329 memset(priv, 0, sizeof(*priv));
330 priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
333 const struct eth_dev_ops mlx5_dev_ops = {
334 .dev_configure = mlx5_dev_configure,
335 .dev_start = mlx5_dev_start,
336 .dev_stop = mlx5_dev_stop,
337 .dev_set_link_down = mlx5_set_link_down,
338 .dev_set_link_up = mlx5_set_link_up,
339 .dev_close = mlx5_dev_close,
340 .promiscuous_enable = mlx5_promiscuous_enable,
341 .promiscuous_disable = mlx5_promiscuous_disable,
342 .allmulticast_enable = mlx5_allmulticast_enable,
343 .allmulticast_disable = mlx5_allmulticast_disable,
344 .link_update = mlx5_link_update,
345 .stats_get = mlx5_stats_get,
346 .stats_reset = mlx5_stats_reset,
347 .xstats_get = mlx5_xstats_get,
348 .xstats_reset = mlx5_xstats_reset,
349 .xstats_get_names = mlx5_xstats_get_names,
350 .dev_infos_get = mlx5_dev_infos_get,
351 .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
352 .vlan_filter_set = mlx5_vlan_filter_set,
353 .rx_queue_setup = mlx5_rx_queue_setup,
354 .tx_queue_setup = mlx5_tx_queue_setup,
355 .rx_queue_release = mlx5_rx_queue_release,
356 .tx_queue_release = mlx5_tx_queue_release,
357 .flow_ctrl_get = mlx5_dev_get_flow_ctrl,
358 .flow_ctrl_set = mlx5_dev_set_flow_ctrl,
359 .mac_addr_remove = mlx5_mac_addr_remove,
360 .mac_addr_add = mlx5_mac_addr_add,
361 .mac_addr_set = mlx5_mac_addr_set,
362 .set_mc_addr_list = mlx5_set_mc_addr_list,
363 .mtu_set = mlx5_dev_set_mtu,
364 .vlan_strip_queue_set = mlx5_vlan_strip_queue_set,
365 .vlan_offload_set = mlx5_vlan_offload_set,
366 .reta_update = mlx5_dev_rss_reta_update,
367 .reta_query = mlx5_dev_rss_reta_query,
368 .rss_hash_update = mlx5_rss_hash_update,
369 .rss_hash_conf_get = mlx5_rss_hash_conf_get,
370 .filter_ctrl = mlx5_dev_filter_ctrl,
371 .rx_descriptor_status = mlx5_rx_descriptor_status,
372 .tx_descriptor_status = mlx5_tx_descriptor_status,
373 .rx_queue_intr_enable = mlx5_rx_intr_enable,
374 .rx_queue_intr_disable = mlx5_rx_intr_disable,
375 .is_removed = mlx5_is_removed,
378 static const struct eth_dev_ops mlx5_dev_sec_ops = {
379 .stats_get = mlx5_stats_get,
380 .stats_reset = mlx5_stats_reset,
381 .xstats_get = mlx5_xstats_get,
382 .xstats_reset = mlx5_xstats_reset,
383 .xstats_get_names = mlx5_xstats_get_names,
384 .dev_infos_get = mlx5_dev_infos_get,
385 .rx_descriptor_status = mlx5_rx_descriptor_status,
386 .tx_descriptor_status = mlx5_tx_descriptor_status,
389 /* Available operators in flow isolated mode. */
390 const struct eth_dev_ops mlx5_dev_ops_isolate = {
391 .dev_configure = mlx5_dev_configure,
392 .dev_start = mlx5_dev_start,
393 .dev_stop = mlx5_dev_stop,
394 .dev_set_link_down = mlx5_set_link_down,
395 .dev_set_link_up = mlx5_set_link_up,
396 .dev_close = mlx5_dev_close,
397 .link_update = mlx5_link_update,
398 .stats_get = mlx5_stats_get,
399 .stats_reset = mlx5_stats_reset,
400 .xstats_get = mlx5_xstats_get,
401 .xstats_reset = mlx5_xstats_reset,
402 .xstats_get_names = mlx5_xstats_get_names,
403 .dev_infos_get = mlx5_dev_infos_get,
404 .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
405 .vlan_filter_set = mlx5_vlan_filter_set,
406 .rx_queue_setup = mlx5_rx_queue_setup,
407 .tx_queue_setup = mlx5_tx_queue_setup,
408 .rx_queue_release = mlx5_rx_queue_release,
409 .tx_queue_release = mlx5_tx_queue_release,
410 .flow_ctrl_get = mlx5_dev_get_flow_ctrl,
411 .flow_ctrl_set = mlx5_dev_set_flow_ctrl,
412 .mac_addr_remove = mlx5_mac_addr_remove,
413 .mac_addr_add = mlx5_mac_addr_add,
414 .mac_addr_set = mlx5_mac_addr_set,
415 .set_mc_addr_list = mlx5_set_mc_addr_list,
416 .mtu_set = mlx5_dev_set_mtu,
417 .vlan_strip_queue_set = mlx5_vlan_strip_queue_set,
418 .vlan_offload_set = mlx5_vlan_offload_set,
419 .filter_ctrl = mlx5_dev_filter_ctrl,
420 .rx_descriptor_status = mlx5_rx_descriptor_status,
421 .tx_descriptor_status = mlx5_tx_descriptor_status,
422 .rx_queue_intr_enable = mlx5_rx_intr_enable,
423 .rx_queue_intr_disable = mlx5_rx_intr_disable,
424 .is_removed = mlx5_is_removed,
428 * Verify and store value for device argument.
431 * Key argument to verify.
433 * Value associated with key.
438 * 0 on success, a negative errno value otherwise and rte_errno is set.
441 mlx5_args_check(const char *key, const char *val, void *opaque)
443 struct mlx5_dev_config *config = opaque;
447 tmp = strtoul(val, NULL, 0);
450 DRV_LOG(WARNING, "%s: \"%s\" is not a valid integer", key, val);
453 if (strcmp(MLX5_RXQ_CQE_COMP_EN, key) == 0) {
454 config->cqe_comp = !!tmp;
455 } else if (strcmp(MLX5_RX_MPRQ_EN, key) == 0) {
456 config->mprq.enabled = !!tmp;
457 } else if (strcmp(MLX5_RX_MPRQ_LOG_STRIDE_NUM, key) == 0) {
458 config->mprq.stride_num_n = tmp;
459 } else if (strcmp(MLX5_RX_MPRQ_MAX_MEMCPY_LEN, key) == 0) {
460 config->mprq.max_memcpy_len = tmp;
461 } else if (strcmp(MLX5_RXQS_MIN_MPRQ, key) == 0) {
462 config->mprq.min_rxqs_num = tmp;
463 } else if (strcmp(MLX5_TXQ_INLINE, key) == 0) {
464 config->txq_inline = tmp;
465 } else if (strcmp(MLX5_TXQS_MIN_INLINE, key) == 0) {
466 config->txqs_inline = tmp;
467 } else if (strcmp(MLX5_TXQ_MPW_EN, key) == 0) {
468 config->mps = !!tmp ? config->mps : 0;
469 } else if (strcmp(MLX5_TXQ_MPW_HDR_DSEG_EN, key) == 0) {
470 config->mpw_hdr_dseg = !!tmp;
471 } else if (strcmp(MLX5_TXQ_MAX_INLINE_LEN, key) == 0) {
472 config->inline_max_packet_sz = tmp;
473 } else if (strcmp(MLX5_TX_VEC_EN, key) == 0) {
474 config->tx_vec_en = !!tmp;
475 } else if (strcmp(MLX5_RX_VEC_EN, key) == 0) {
476 config->rx_vec_en = !!tmp;
477 } else if (strcmp(MLX5_L3_VXLAN_EN, key) == 0) {
478 config->l3_vxlan_en = !!tmp;
479 } else if (strcmp(MLX5_VF_NL_EN, key) == 0) {
480 config->vf_nl_en = !!tmp;
482 DRV_LOG(WARNING, "%s: unknown parameter", key);
490 * Parse device parameters.
493 * Pointer to device configuration structure.
495 * Device arguments structure.
498 * 0 on success, a negative errno value otherwise and rte_errno is set.
501 mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs)
503 const char **params = (const char *[]){
504 MLX5_RXQ_CQE_COMP_EN,
506 MLX5_RX_MPRQ_LOG_STRIDE_NUM,
507 MLX5_RX_MPRQ_MAX_MEMCPY_LEN,
510 MLX5_TXQS_MIN_INLINE,
512 MLX5_TXQ_MPW_HDR_DSEG_EN,
513 MLX5_TXQ_MAX_INLINE_LEN,
520 struct rte_kvargs *kvlist;
526 /* Following UGLY cast is done to pass checkpatch. */
527 kvlist = rte_kvargs_parse(devargs->args, params);
530 /* Process parameters. */
531 for (i = 0; (params[i] != NULL); ++i) {
532 if (rte_kvargs_count(kvlist, params[i])) {
533 ret = rte_kvargs_process(kvlist, params[i],
534 mlx5_args_check, config);
537 rte_kvargs_free(kvlist);
542 rte_kvargs_free(kvlist);
546 static struct rte_pci_driver mlx5_driver;
549 * Reserved UAR address space for TXQ UAR(hw doorbell) mapping, process
550 * local resource used by both primary and secondary to avoid duplicate
552 * The space has to be available on both primary and secondary process,
553 * TXQ UAR maps to this area using fixed mmap w/o double check.
555 static void *uar_base;
558 find_lower_va_bound(const struct rte_memseg_list *msl __rte_unused,
559 const struct rte_memseg *ms, void *arg)
566 *addr = RTE_MIN(*addr, ms->addr);
572 * Reserve UAR address space for primary process.
575 * Pointer to Ethernet device.
578 * 0 on success, a negative errno value otherwise and rte_errno is set.
581 mlx5_uar_init_primary(struct rte_eth_dev *dev)
583 struct priv *priv = dev->data->dev_private;
584 void *addr = (void *)0;
586 if (uar_base) { /* UAR address space mapped. */
587 priv->uar_base = uar_base;
590 /* find out lower bound of hugepage segments */
591 rte_memseg_walk(find_lower_va_bound, &addr);
593 /* keep distance to hugepages to minimize potential conflicts. */
594 addr = RTE_PTR_SUB(addr, MLX5_UAR_OFFSET + MLX5_UAR_SIZE);
595 /* anonymous mmap, no real memory consumption. */
596 addr = mmap(addr, MLX5_UAR_SIZE,
597 PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
598 if (addr == MAP_FAILED) {
600 "port %u failed to reserve UAR address space, please"
601 " adjust MLX5_UAR_SIZE or try --base-virtaddr",
606 /* Accept either same addr or a new addr returned from mmap if target
609 DRV_LOG(INFO, "port %u reserved UAR address space: %p",
610 dev->data->port_id, addr);
611 priv->uar_base = addr; /* for primary and secondary UAR re-mmap. */
612 uar_base = addr; /* process local, don't reserve again. */
617 * Reserve UAR address space for secondary process, align with
621 * Pointer to Ethernet device.
624 * 0 on success, a negative errno value otherwise and rte_errno is set.
627 mlx5_uar_init_secondary(struct rte_eth_dev *dev)
629 struct priv *priv = dev->data->dev_private;
632 assert(priv->uar_base);
633 if (uar_base) { /* already reserved. */
634 assert(uar_base == priv->uar_base);
637 /* anonymous mmap, no real memory consumption. */
638 addr = mmap(priv->uar_base, MLX5_UAR_SIZE,
639 PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
640 if (addr == MAP_FAILED) {
641 DRV_LOG(ERR, "port %u UAR mmap failed: %p size: %llu",
642 dev->data->port_id, priv->uar_base, MLX5_UAR_SIZE);
646 if (priv->uar_base != addr) {
648 "port %u UAR address %p size %llu occupied, please"
649 " adjust MLX5_UAR_OFFSET or try EAL parameter"
651 dev->data->port_id, priv->uar_base, MLX5_UAR_SIZE);
655 uar_base = addr; /* process local, don't reserve again */
656 DRV_LOG(INFO, "port %u reserved UAR address space: %p",
657 dev->data->port_id, addr);
662 * Spawn an Ethernet device from Verbs information.
665 * Backing DPDK device.
669 * If nonzero, enable VF-specific features.
670 * @param[in] switch_info
671 * Switch properties of Ethernet device.
674 * A valid Ethernet device object on success, NULL otherwise and rte_errno
677 static struct rte_eth_dev *
678 mlx5_dev_spawn(struct rte_device *dpdk_dev,
679 struct ibv_device *ibv_dev,
681 const struct mlx5_switch_info *switch_info)
683 struct ibv_context *ctx;
684 struct ibv_device_attr_ex attr;
685 struct ibv_port_attr port_attr;
686 struct ibv_pd *pd = NULL;
687 struct mlx5dv_context dv_attr = { .comp_mask = 0 };
688 struct mlx5_dev_config config = {
693 .txq_inline = MLX5_ARG_UNSET,
694 .txqs_inline = MLX5_ARG_UNSET,
695 .inline_max_packet_sz = MLX5_ARG_UNSET,
699 .stride_num_n = MLX5_MPRQ_STRIDE_NUM_N,
700 .max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN,
701 .min_rxqs_num = MLX5_MPRQ_MIN_RXQS,
704 struct rte_eth_dev *eth_dev = NULL;
705 struct priv *priv = NULL;
708 unsigned int cqe_comp;
709 unsigned int tunnel_en = 0;
710 unsigned int mpls_en = 0;
711 unsigned int swp = 0;
712 unsigned int verb_priorities = 0;
713 unsigned int mprq = 0;
714 unsigned int mprq_min_stride_size_n = 0;
715 unsigned int mprq_max_stride_size_n = 0;
716 unsigned int mprq_min_stride_num_n = 0;
717 unsigned int mprq_max_stride_num_n = 0;
718 #ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
719 struct ibv_counter_set_description cs_desc = { .counter_type = 0 };
721 struct ether_addr mac;
722 char name[RTE_ETH_NAME_MAX_LEN];
723 int own_domain_id = 0;
726 /* Prepare shared data between primary and secondary process. */
727 mlx5_prepare_shared_data();
729 ctx = mlx5_glue->open_device(ibv_dev);
731 rte_errno = errno ? errno : ENODEV;
734 #ifdef HAVE_IBV_MLX5_MOD_SWP
735 dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_SWP;
738 * Multi-packet send is supported by ConnectX-4 Lx PF as well
739 * as all ConnectX-5 devices.
741 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
742 dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS;
744 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
745 dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_STRIDING_RQ;
747 mlx5_glue->dv_query_device(ctx, &dv_attr);
748 if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) {
749 if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) {
750 DRV_LOG(DEBUG, "enhanced MPW is supported");
751 mps = MLX5_MPW_ENHANCED;
753 DRV_LOG(DEBUG, "MPW is supported");
757 DRV_LOG(DEBUG, "MPW isn't supported");
758 mps = MLX5_MPW_DISABLED;
761 #ifdef HAVE_IBV_MLX5_MOD_SWP
762 if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_SWP)
763 swp = dv_attr.sw_parsing_caps.sw_parsing_offloads;
764 DRV_LOG(DEBUG, "SWP support: %u", swp);
767 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
768 if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_STRIDING_RQ) {
769 struct mlx5dv_striding_rq_caps mprq_caps =
770 dv_attr.striding_rq_caps;
772 DRV_LOG(DEBUG, "\tmin_single_stride_log_num_of_bytes: %d",
773 mprq_caps.min_single_stride_log_num_of_bytes);
774 DRV_LOG(DEBUG, "\tmax_single_stride_log_num_of_bytes: %d",
775 mprq_caps.max_single_stride_log_num_of_bytes);
776 DRV_LOG(DEBUG, "\tmin_single_wqe_log_num_of_strides: %d",
777 mprq_caps.min_single_wqe_log_num_of_strides);
778 DRV_LOG(DEBUG, "\tmax_single_wqe_log_num_of_strides: %d",
779 mprq_caps.max_single_wqe_log_num_of_strides);
780 DRV_LOG(DEBUG, "\tsupported_qpts: %d",
781 mprq_caps.supported_qpts);
782 DRV_LOG(DEBUG, "device supports Multi-Packet RQ");
784 mprq_min_stride_size_n =
785 mprq_caps.min_single_stride_log_num_of_bytes;
786 mprq_max_stride_size_n =
787 mprq_caps.max_single_stride_log_num_of_bytes;
788 mprq_min_stride_num_n =
789 mprq_caps.min_single_wqe_log_num_of_strides;
790 mprq_max_stride_num_n =
791 mprq_caps.max_single_wqe_log_num_of_strides;
792 config.mprq.stride_num_n = RTE_MAX(MLX5_MPRQ_STRIDE_NUM_N,
793 mprq_min_stride_num_n);
796 if (RTE_CACHE_LINE_SIZE == 128 &&
797 !(dv_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP))
801 config.cqe_comp = cqe_comp;
802 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
803 if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS) {
804 tunnel_en = ((dv_attr.tunnel_offloads_caps &
805 MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN) &&
806 (dv_attr.tunnel_offloads_caps &
807 MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE));
809 DRV_LOG(DEBUG, "tunnel offloading is %ssupported",
810 tunnel_en ? "" : "not ");
813 "tunnel offloading disabled due to old OFED/rdma-core version");
815 config.tunnel_en = tunnel_en;
816 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
817 mpls_en = ((dv_attr.tunnel_offloads_caps &
818 MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_GRE) &&
819 (dv_attr.tunnel_offloads_caps &
820 MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_UDP));
821 DRV_LOG(DEBUG, "MPLS over GRE/UDP tunnel offloading is %ssupported",
822 mpls_en ? "" : "not ");
824 DRV_LOG(WARNING, "MPLS over GRE/UDP tunnel offloading disabled due to"
825 " old OFED/rdma-core version or firmware configuration");
827 config.mpls_en = mpls_en;
828 err = mlx5_glue->query_device_ex(ctx, NULL, &attr);
830 DEBUG("ibv_query_device_ex() failed");
833 if (!switch_info->representor)
834 rte_strlcpy(name, dpdk_dev->name, sizeof(name));
836 snprintf(name, sizeof(name), "%s_representor_%u",
837 dpdk_dev->name, switch_info->port_name);
838 DRV_LOG(DEBUG, "naming Ethernet device \"%s\"", name);
839 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
840 eth_dev = rte_eth_dev_attach_secondary(name);
841 if (eth_dev == NULL) {
842 DRV_LOG(ERR, "can not attach rte ethdev");
847 eth_dev->device = dpdk_dev;
848 eth_dev->dev_ops = &mlx5_dev_sec_ops;
849 err = mlx5_uar_init_secondary(eth_dev);
854 /* Receive command fd from primary process */
855 err = mlx5_socket_connect(eth_dev);
860 /* Remap UAR for Tx queues. */
861 err = mlx5_tx_uar_remap(eth_dev, err);
867 * Ethdev pointer is still required as input since
868 * the primary device is not accessible from the
871 eth_dev->rx_pkt_burst = mlx5_select_rx_function(eth_dev);
872 eth_dev->tx_pkt_burst = mlx5_select_tx_function(eth_dev);
873 claim_zero(mlx5_glue->close_device(ctx));
876 /* Check port status. */
877 err = mlx5_glue->query_port(ctx, 1, &port_attr);
879 DRV_LOG(ERR, "port query failed: %s", strerror(err));
882 if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) {
883 DRV_LOG(ERR, "port is not configured in Ethernet mode");
887 if (port_attr.state != IBV_PORT_ACTIVE)
888 DRV_LOG(DEBUG, "port is not active: \"%s\" (%d)",
889 mlx5_glue->port_state_str(port_attr.state),
891 /* Allocate protection domain. */
892 pd = mlx5_glue->alloc_pd(ctx);
894 DRV_LOG(ERR, "PD allocation failure");
898 priv = rte_zmalloc("ethdev private structure",
900 RTE_CACHE_LINE_SIZE);
902 DRV_LOG(ERR, "priv allocation failure");
907 strncpy(priv->ibdev_name, priv->ctx->device->name,
908 sizeof(priv->ibdev_name));
909 strncpy(priv->ibdev_path, priv->ctx->device->ibdev_path,
910 sizeof(priv->ibdev_path));
911 priv->device_attr = attr;
913 priv->mtu = ETHER_MTU;
914 /* Some internal functions rely on Netlink sockets, open them now. */
915 priv->nl_socket_rdma = mlx5_nl_init(0, NETLINK_RDMA);
916 priv->nl_socket_route = mlx5_nl_init(RTMGRP_LINK, NETLINK_ROUTE);
918 priv->representor = !!switch_info->representor;
919 priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
920 priv->representor_id =
921 switch_info->representor ? switch_info->port_name : -1;
923 * Look for sibling devices in order to reuse their switch domain
924 * if any, otherwise allocate one.
926 i = mlx5_dev_to_port_id(dpdk_dev, NULL, 0);
930 i = RTE_MIN(mlx5_dev_to_port_id(dpdk_dev, port_id, i), i);
932 const struct priv *opriv =
933 rte_eth_devices[port_id[i]].data->dev_private;
937 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID)
939 priv->domain_id = opriv->domain_id;
943 if (priv->domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
944 err = rte_eth_switch_domain_alloc(&priv->domain_id);
947 DRV_LOG(ERR, "unable to allocate switch domain: %s",
948 strerror(rte_errno));
953 err = mlx5_args(&config, dpdk_dev->devargs);
956 DRV_LOG(ERR, "failed to process device arguments: %s",
957 strerror(rte_errno));
960 config.hw_csum = !!(attr.device_cap_flags_ex & IBV_DEVICE_RAW_IP_CSUM);
961 DRV_LOG(DEBUG, "checksum offloading is %ssupported",
962 (config.hw_csum ? "" : "not "));
963 #ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
964 config.flow_counter_en = !!attr.max_counter_sets;
965 mlx5_glue->describe_counter_set(ctx, 0, &cs_desc);
966 DRV_LOG(DEBUG, "counter type = %d, num of cs = %ld, attributes = %d",
967 cs_desc.counter_type, cs_desc.num_of_cs,
970 config.ind_table_max_size =
971 attr.rss_caps.max_rwq_indirection_table_size;
973 * Remove this check once DPDK supports larger/variable
974 * indirection tables.
976 if (config.ind_table_max_size > (unsigned int)ETH_RSS_RETA_SIZE_512)
977 config.ind_table_max_size = ETH_RSS_RETA_SIZE_512;
978 DRV_LOG(DEBUG, "maximum Rx indirection table size is %u",
979 config.ind_table_max_size);
980 config.hw_vlan_strip = !!(attr.raw_packet_caps &
981 IBV_RAW_PACKET_CAP_CVLAN_STRIPPING);
982 DRV_LOG(DEBUG, "VLAN stripping is %ssupported",
983 (config.hw_vlan_strip ? "" : "not "));
984 config.hw_fcs_strip = !!(attr.raw_packet_caps &
985 IBV_RAW_PACKET_CAP_SCATTER_FCS);
986 DRV_LOG(DEBUG, "FCS stripping configuration is %ssupported",
987 (config.hw_fcs_strip ? "" : "not "));
988 #ifdef HAVE_IBV_WQ_FLAG_RX_END_PADDING
989 config.hw_padding = !!attr.rx_pad_end_addr_align;
991 DRV_LOG(DEBUG, "hardware Rx end alignment padding is %ssupported",
992 (config.hw_padding ? "" : "not "));
993 config.tso = (attr.tso_caps.max_tso > 0 &&
994 (attr.tso_caps.supported_qpts &
995 (1 << IBV_QPT_RAW_PACKET)));
997 config.tso_max_payload_sz = attr.tso_caps.max_tso;
998 if (config.mps && !mps) {
1000 "multi-packet send not supported on this device"
1001 " (" MLX5_TXQ_MPW_EN ")");
1005 DRV_LOG(INFO, "%sMPS is %s",
1006 config.mps == MLX5_MPW_ENHANCED ? "enhanced " : "",
1007 config.mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
1008 if (config.cqe_comp && !cqe_comp) {
1009 DRV_LOG(WARNING, "Rx CQE compression isn't supported");
1010 config.cqe_comp = 0;
1012 if (config.mprq.enabled && mprq) {
1013 if (config.mprq.stride_num_n > mprq_max_stride_num_n ||
1014 config.mprq.stride_num_n < mprq_min_stride_num_n) {
1015 config.mprq.stride_num_n =
1016 RTE_MAX(MLX5_MPRQ_STRIDE_NUM_N,
1017 mprq_min_stride_num_n);
1019 "the number of strides"
1020 " for Multi-Packet RQ is out of range,"
1021 " setting default value (%u)",
1022 1 << config.mprq.stride_num_n);
1024 config.mprq.min_stride_size_n = mprq_min_stride_size_n;
1025 config.mprq.max_stride_size_n = mprq_max_stride_size_n;
1026 } else if (config.mprq.enabled && !mprq) {
1027 DRV_LOG(WARNING, "Multi-Packet RQ isn't supported");
1028 config.mprq.enabled = 0;
1030 eth_dev = rte_eth_dev_allocate(name);
1031 if (eth_dev == NULL) {
1032 DRV_LOG(ERR, "can not allocate rte ethdev");
1036 if (priv->representor)
1037 eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
1038 eth_dev->data->dev_private = priv;
1039 priv->dev_data = eth_dev->data;
1040 eth_dev->data->mac_addrs = priv->mac;
1041 eth_dev->device = dpdk_dev;
1042 eth_dev->device->driver = &mlx5_driver.driver;
1043 err = mlx5_uar_init_primary(eth_dev);
1048 /* Configure the first MAC address by default. */
1049 if (mlx5_get_mac(eth_dev, &mac.addr_bytes)) {
1051 "port %u cannot get MAC address, is mlx5_en"
1052 " loaded? (errno: %s)",
1053 eth_dev->data->port_id, strerror(rte_errno));
1058 "port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x",
1059 eth_dev->data->port_id,
1060 mac.addr_bytes[0], mac.addr_bytes[1],
1061 mac.addr_bytes[2], mac.addr_bytes[3],
1062 mac.addr_bytes[4], mac.addr_bytes[5]);
1065 char ifname[IF_NAMESIZE];
1067 if (mlx5_get_ifname(eth_dev, &ifname) == 0)
1068 DRV_LOG(DEBUG, "port %u ifname is \"%s\"",
1069 eth_dev->data->port_id, ifname);
1071 DRV_LOG(DEBUG, "port %u ifname is unknown",
1072 eth_dev->data->port_id);
1075 /* Get actual MTU if possible. */
1076 err = mlx5_get_mtu(eth_dev, &priv->mtu);
1081 DRV_LOG(DEBUG, "port %u MTU is %u", eth_dev->data->port_id,
1083 /* Initialize burst functions to prevent crashes before link-up. */
1084 eth_dev->rx_pkt_burst = removed_rx_burst;
1085 eth_dev->tx_pkt_burst = removed_tx_burst;
1086 eth_dev->dev_ops = &mlx5_dev_ops;
1087 /* Register MAC address. */
1088 claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0));
1089 if (vf && config.vf_nl_en)
1090 mlx5_nl_mac_addr_sync(eth_dev);
1091 TAILQ_INIT(&priv->flows);
1092 TAILQ_INIT(&priv->ctrl_flows);
1093 /* Hint libmlx5 to use PMD allocator for data plane resources */
1094 struct mlx5dv_ctx_allocators alctr = {
1095 .alloc = &mlx5_alloc_verbs_buf,
1096 .free = &mlx5_free_verbs_buf,
1099 mlx5_glue->dv_set_context_attr(ctx, MLX5DV_CTX_ATTR_BUF_ALLOCATORS,
1100 (void *)((uintptr_t)&alctr));
1101 /* Bring Ethernet device up. */
1102 DRV_LOG(DEBUG, "port %u forcing Ethernet interface up",
1103 eth_dev->data->port_id);
1104 mlx5_set_link_up(eth_dev);
1106 * Even though the interrupt handler is not installed yet,
1107 * interrupts will still trigger on the asyn_fd from
1108 * Verbs context returned by ibv_open_device().
1110 mlx5_link_update(eth_dev, 0);
1111 /* Store device configuration on private structure. */
1112 priv->config = config;
1113 /* Create drop queue. */
1114 err = mlx5_flow_create_drop_queue(eth_dev);
1116 DRV_LOG(ERR, "port %u drop queue allocation failed: %s",
1117 eth_dev->data->port_id, strerror(rte_errno));
1121 /* Supported Verbs flow priority number detection. */
1122 if (verb_priorities == 0)
1123 verb_priorities = mlx5_get_max_verbs_prio(eth_dev);
1124 if (verb_priorities < MLX5_VERBS_FLOW_PRIO_8) {
1125 DRV_LOG(ERR, "port %u wrong Verbs flow priorities: %u",
1126 eth_dev->data->port_id, verb_priorities);
1130 priv->config.max_verbs_prio = verb_priorities;
1132 * Once the device is added to the list of memory event
1133 * callback, its global MR cache table cannot be expanded
1134 * on the fly because of deadlock. If it overflows, lookup
1135 * should be done by searching MR list linearly, which is slow.
1137 err = mlx5_mr_btree_init(&priv->mr.cache,
1138 MLX5_MR_BTREE_CACHE_N * 2,
1139 eth_dev->device->numa_node);
1144 /* Add device to memory callback list. */
1145 rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
1146 LIST_INSERT_HEAD(&mlx5_shared_data->mem_event_cb_list,
1147 priv, mem_event_cb);
1148 rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
1152 if (priv->nl_socket_route >= 0)
1153 close(priv->nl_socket_route);
1154 if (priv->nl_socket_rdma >= 0)
1155 close(priv->nl_socket_rdma);
1157 claim_zero(rte_eth_switch_domain_free(priv->domain_id));
1161 claim_zero(mlx5_glue->dealloc_pd(pd));
1163 rte_eth_dev_release_port(eth_dev);
1165 claim_zero(mlx5_glue->close_device(ctx));
1172 * DPDK callback to register a PCI device.
1174 * This function spawns Ethernet devices out of a given PCI device.
1176 * @param[in] pci_drv
1177 * PCI driver structure (mlx5_driver).
1178 * @param[in] pci_dev
1179 * PCI device information.
1182 * 0 on success, a negative errno value otherwise and rte_errno is set.
1185 mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1186 struct rte_pci_device *pci_dev)
1188 struct ibv_device **ibv_list;
1193 assert(pci_drv == &mlx5_driver);
1195 ibv_list = mlx5_glue->get_device_list(&ret);
1197 rte_errno = errno ? errno : ENOSYS;
1198 DRV_LOG(ERR, "cannot list devices, is ib_uverbs loaded?");
1202 struct ibv_device *ibv_match[ret + 1];
1205 struct rte_pci_addr pci_addr;
1207 DRV_LOG(DEBUG, "checking device \"%s\"", ibv_list[ret]->name);
1208 if (mlx5_ibv_device_to_pci_addr(ibv_list[ret], &pci_addr))
1210 if (pci_dev->addr.domain != pci_addr.domain ||
1211 pci_dev->addr.bus != pci_addr.bus ||
1212 pci_dev->addr.devid != pci_addr.devid ||
1213 pci_dev->addr.function != pci_addr.function)
1215 DRV_LOG(INFO, "PCI information matches for device \"%s\"",
1216 ibv_list[ret]->name);
1217 ibv_match[n++] = ibv_list[ret];
1219 ibv_match[n] = NULL;
1221 unsigned int ifindex[n];
1222 struct mlx5_switch_info info[n];
1223 struct rte_eth_dev *eth_list[n];
1224 int nl_route = n ? mlx5_nl_init(0, NETLINK_ROUTE) : -1;
1225 int nl_rdma = n ? mlx5_nl_init(0, NETLINK_RDMA) : -1;
1230 * The existence of several matching entries (n > 1) means port
1231 * representors have been instantiated. No existing Verbs call nor
1232 * /sys entries can tell them apart, this can only be done through
1233 * Netlink calls assuming kernel drivers are recent enough to
1236 * In the event of identification failure through Netlink, either:
1238 * 1. No device matches (n == 0), complain and bail out.
1239 * 2. A single IB device matches (n == 1) and is not a representor,
1240 * assume no switch support.
1241 * 3. Otherwise no safe assumptions can be made; complain louder and
1244 for (i = 0; i != n; ++i) {
1248 ifindex[i] = mlx5_nl_ifindex(nl_rdma,
1249 ibv_match[i]->name);
1252 mlx5_nl_switch_info(nl_route, ifindex[i], &info[i])) {
1254 memset(&info[i], 0, sizeof(info[i]));
1262 /* Count unidentified devices. */
1263 for (u = 0, i = 0; i != n; ++i)
1264 if (!info[i].master && !info[i].representor)
1267 if (n == 1 && u == 1) {
1269 DRV_LOG(INFO, "no switch support detected");
1273 "unable to tell which of the matching devices"
1274 " is the master (lack of kernel support?)");
1278 switch (pci_dev->id.device_id) {
1279 case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF:
1280 case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF:
1281 case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF:
1282 case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF:
1288 for (i = 0; i != n; ++i) {
1291 eth_list[i] = mlx5_dev_spawn(&pci_dev->device, ibv_match[i],
1295 restore = eth_list[i]->data->dev_flags;
1296 rte_eth_copy_pci_info(eth_list[i], pci_dev);
1297 /* Restore non-PCI flags cleared by the above call. */
1298 eth_list[i]->data->dev_flags |= restore;
1299 rte_eth_dev_probing_finish(eth_list[i]);
1301 mlx5_glue->free_device_list(ibv_list);
1304 "no Verbs device matches PCI device " PCI_PRI_FMT ","
1305 " are kernel drivers loaded?",
1306 pci_dev->addr.domain, pci_dev->addr.bus,
1307 pci_dev->addr.devid, pci_dev->addr.function);
1310 } else if (i != n) {
1312 "probe of PCI device " PCI_PRI_FMT " aborted after"
1313 " encountering an error: %s",
1314 pci_dev->addr.domain, pci_dev->addr.bus,
1315 pci_dev->addr.devid, pci_dev->addr.function,
1316 strerror(rte_errno));
1320 mlx5_dev_close(eth_list[i]);
1321 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1322 rte_free(eth_list[i]->data->dev_private);
1323 claim_zero(rte_eth_dev_release_port(eth_list[i]));
1325 /* Restore original error. */
1333 static const struct rte_pci_id mlx5_pci_id_map[] = {
1335 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
1336 PCI_DEVICE_ID_MELLANOX_CONNECTX4)
1339 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
1340 PCI_DEVICE_ID_MELLANOX_CONNECTX4VF)
1343 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
1344 PCI_DEVICE_ID_MELLANOX_CONNECTX4LX)
1347 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
1348 PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF)
1351 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
1352 PCI_DEVICE_ID_MELLANOX_CONNECTX5)
1355 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
1356 PCI_DEVICE_ID_MELLANOX_CONNECTX5VF)
1359 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
1360 PCI_DEVICE_ID_MELLANOX_CONNECTX5EX)
1363 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
1364 PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF)
1367 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
1368 PCI_DEVICE_ID_MELLANOX_CONNECTX5BF)
1375 static struct rte_pci_driver mlx5_driver = {
1377 .name = MLX5_DRIVER_NAME
1379 .id_table = mlx5_pci_id_map,
1380 .probe = mlx5_pci_probe,
1381 .drv_flags = RTE_PCI_DRV_INTR_LSC | RTE_PCI_DRV_INTR_RMV,
1384 #ifdef RTE_LIBRTE_MLX5_DLOPEN_DEPS
1387 * Suffix RTE_EAL_PMD_PATH with "-glue".
1389 * This function performs a sanity check on RTE_EAL_PMD_PATH before
1390 * suffixing its last component.
1393 * Output buffer, should be large enough otherwise NULL is returned.
1398 * Pointer to @p buf or @p NULL in case suffix cannot be appended.
1401 mlx5_glue_path(char *buf, size_t size)
1403 static const char *const bad[] = { "/", ".", "..", NULL };
1404 const char *path = RTE_EAL_PMD_PATH;
1405 size_t len = strlen(path);
1409 while (len && path[len - 1] == '/')
1411 for (off = len; off && path[off - 1] != '/'; --off)
1413 for (i = 0; bad[i]; ++i)
1414 if (!strncmp(path + off, bad[i], (int)(len - off)))
1416 i = snprintf(buf, size, "%.*s-glue", (int)len, path);
1417 if (i == -1 || (size_t)i >= size)
1422 "unable to append \"-glue\" to last component of"
1423 " RTE_EAL_PMD_PATH (\"" RTE_EAL_PMD_PATH "\"),"
1424 " please re-configure DPDK");
1429 * Initialization routine for run-time dependency on rdma-core.
1432 mlx5_glue_init(void)
1434 char glue_path[sizeof(RTE_EAL_PMD_PATH) - 1 + sizeof("-glue")];
1435 const char *path[] = {
1437 * A basic security check is necessary before trusting
1438 * MLX5_GLUE_PATH, which may override RTE_EAL_PMD_PATH.
1440 (geteuid() == getuid() && getegid() == getgid() ?
1441 getenv("MLX5_GLUE_PATH") : NULL),
1443 * When RTE_EAL_PMD_PATH is set, use its glue-suffixed
1444 * variant, otherwise let dlopen() look up libraries on its
1447 (*RTE_EAL_PMD_PATH ?
1448 mlx5_glue_path(glue_path, sizeof(glue_path)) : ""),
1451 void *handle = NULL;
1455 while (!handle && i != RTE_DIM(path)) {
1464 end = strpbrk(path[i], ":;");
1466 end = path[i] + strlen(path[i]);
1467 len = end - path[i];
1472 ret = snprintf(name, sizeof(name), "%.*s%s" MLX5_GLUE,
1474 (!len || *(end - 1) == '/') ? "" : "/");
1477 if (sizeof(name) != (size_t)ret + 1)
1479 DRV_LOG(DEBUG, "looking for rdma-core glue as \"%s\"",
1481 handle = dlopen(name, RTLD_LAZY);
1492 DRV_LOG(WARNING, "cannot load glue library: %s", dlmsg);
1495 sym = dlsym(handle, "mlx5_glue");
1496 if (!sym || !*sym) {
1500 DRV_LOG(ERR, "cannot resolve glue symbol: %s", dlmsg);
1509 "cannot initialize PMD due to missing run-time dependency on"
1510 " rdma-core libraries (libibverbs, libmlx5)");
1517 * Driver initialization routine.
1519 RTE_INIT(rte_mlx5_pmd_init)
1521 /* Initialize driver log type. */
1522 mlx5_logtype = rte_log_register("pmd.net.mlx5");
1523 if (mlx5_logtype >= 0)
1524 rte_log_set_level(mlx5_logtype, RTE_LOG_NOTICE);
1526 /* Build the static tables for Verbs conversion. */
1527 mlx5_set_ptype_table();
1528 mlx5_set_cksum_table();
1529 mlx5_set_swp_types_table();
1531 * RDMAV_HUGEPAGES_SAFE tells ibv_fork_init() we intend to use
1532 * huge pages. Calling ibv_fork_init() during init allows
1533 * applications to use fork() safely for purposes other than
1534 * using this PMD, which is not supported in forked processes.
1536 setenv("RDMAV_HUGEPAGES_SAFE", "1", 1);
1537 /* Match the size of Rx completion entry to the size of a cacheline. */
1538 if (RTE_CACHE_LINE_SIZE == 128)
1539 setenv("MLX5_CQE_SIZE", "128", 0);
1541 * MLX5_DEVICE_FATAL_CLEANUP tells ibv_destroy functions to
1542 * cleanup all the Verbs resources even when the device was removed.
1544 setenv("MLX5_DEVICE_FATAL_CLEANUP", "1", 1);
1545 #ifdef RTE_LIBRTE_MLX5_DLOPEN_DEPS
1546 if (mlx5_glue_init())
1551 /* Glue structure must not contain any NULL pointers. */
1555 for (i = 0; i != sizeof(*mlx5_glue) / sizeof(void *); ++i)
1556 assert(((const void *const *)mlx5_glue)[i]);
1559 if (strcmp(mlx5_glue->version, MLX5_GLUE_VERSION)) {
1561 "rdma-core glue \"%s\" mismatch: \"%s\" is required",
1562 mlx5_glue->version, MLX5_GLUE_VERSION);
1565 mlx5_glue->fork_init();
1566 rte_pci_register(&mlx5_driver);
1569 RTE_PMD_EXPORT_NAME(net_mlx5, __COUNTER__);
1570 RTE_PMD_REGISTER_PCI_TABLE(net_mlx5, mlx5_pci_id_map);
1571 RTE_PMD_REGISTER_KMOD_DEP(net_mlx5, "* ib_uverbs & mlx5_core & mlx5_ib");