1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
16 #include <linux/rtnetlink.h>
19 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
21 #pragma GCC diagnostic ignored "-Wpedantic"
23 #include <infiniband/verbs.h>
25 #pragma GCC diagnostic error "-Wpedantic"
28 #include <rte_malloc.h>
29 #include <rte_ethdev_driver.h>
30 #include <rte_ethdev_pci.h>
32 #include <rte_bus_pci.h>
33 #include <rte_common.h>
34 #include <rte_config.h>
35 #include <rte_eal_memconfig.h>
36 #include <rte_kvargs.h>
37 #include <rte_rwlock.h>
38 #include <rte_spinlock.h>
41 #include "mlx5_utils.h"
42 #include "mlx5_rxtx.h"
43 #include "mlx5_autoconf.h"
44 #include "mlx5_defs.h"
45 #include "mlx5_glue.h"
48 /* Device parameter to enable RX completion queue compression. */
49 #define MLX5_RXQ_CQE_COMP_EN "rxq_cqe_comp_en"
51 /* Device parameter to enable Multi-Packet Rx queue. */
52 #define MLX5_RX_MPRQ_EN "mprq_en"
54 /* Device parameter to configure log 2 of the number of strides for MPRQ. */
55 #define MLX5_RX_MPRQ_LOG_STRIDE_NUM "mprq_log_stride_num"
57 /* Device parameter to limit the size of memcpy'd packet for MPRQ. */
58 #define MLX5_RX_MPRQ_MAX_MEMCPY_LEN "mprq_max_memcpy_len"
60 /* Device parameter to set the minimum number of Rx queues to enable MPRQ. */
61 #define MLX5_RXQS_MIN_MPRQ "rxqs_min_mprq"
63 /* Device parameter to configure inline send. */
64 #define MLX5_TXQ_INLINE "txq_inline"
67 * Device parameter to configure the number of TX queues threshold for
68 * enabling inline send.
70 #define MLX5_TXQS_MIN_INLINE "txqs_min_inline"
72 /* Device parameter to enable multi-packet send WQEs. */
73 #define MLX5_TXQ_MPW_EN "txq_mpw_en"
75 /* Device parameter to include 2 dsegs in the title WQEBB. */
76 #define MLX5_TXQ_MPW_HDR_DSEG_EN "txq_mpw_hdr_dseg_en"
78 /* Device parameter to limit the size of inlining packet. */
79 #define MLX5_TXQ_MAX_INLINE_LEN "txq_max_inline_len"
81 /* Device parameter to enable hardware Tx vector. */
82 #define MLX5_TX_VEC_EN "tx_vec_en"
84 /* Device parameter to enable hardware Rx vector. */
85 #define MLX5_RX_VEC_EN "rx_vec_en"
87 /* Allow L3 VXLAN flow creation. */
88 #define MLX5_L3_VXLAN_EN "l3_vxlan_en"
90 /* Activate Netlink support in VF mode. */
91 #define MLX5_VF_NL_EN "vf_nl_en"
93 #ifndef HAVE_IBV_MLX5_MOD_MPW
94 #define MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED (1 << 2)
95 #define MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW (1 << 3)
98 #ifndef HAVE_IBV_MLX5_MOD_CQE_128B_COMP
99 #define MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP (1 << 4)
102 static const char *MZ_MLX5_PMD_SHARED_DATA = "mlx5_pmd_shared_data";
104 /* Shared memory between primary and secondary processes. */
105 struct mlx5_shared_data *mlx5_shared_data;
107 /* Spinlock for mlx5_shared_data allocation. */
108 static rte_spinlock_t mlx5_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
110 /** Driver-specific log messages type. */
114 * Prepare shared data between primary and secondary process.
117 mlx5_prepare_shared_data(void)
119 const struct rte_memzone *mz;
121 rte_spinlock_lock(&mlx5_shared_data_lock);
122 if (mlx5_shared_data == NULL) {
123 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
124 /* Allocate shared memory. */
125 mz = rte_memzone_reserve(MZ_MLX5_PMD_SHARED_DATA,
126 sizeof(*mlx5_shared_data),
129 /* Lookup allocated shared memory. */
130 mz = rte_memzone_lookup(MZ_MLX5_PMD_SHARED_DATA);
133 rte_panic("Cannot allocate mlx5 shared data\n");
134 mlx5_shared_data = mz->addr;
135 /* Initialize shared data. */
136 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
137 LIST_INIT(&mlx5_shared_data->mem_event_cb_list);
138 rte_rwlock_init(&mlx5_shared_data->mem_event_rwlock);
141 rte_spinlock_unlock(&mlx5_shared_data_lock);
145 * Retrieve integer value from environment variable.
148 * Environment variable name.
151 * Integer value, 0 if the variable is not set.
154 mlx5_getenv_int(const char *name)
156 const char *val = getenv(name);
164 * Verbs callback to allocate a memory. This function should allocate the space
165 * according to the size provided residing inside a huge page.
166 * Please note that all allocation must respect the alignment from libmlx5
167 * (i.e. currently sysconf(_SC_PAGESIZE)).
170 * The size in bytes of the memory to allocate.
172 * A pointer to the callback data.
175 * Allocated buffer, NULL otherwise and rte_errno is set.
178 mlx5_alloc_verbs_buf(size_t size, void *data)
180 struct priv *priv = data;
182 size_t alignment = sysconf(_SC_PAGESIZE);
183 unsigned int socket = SOCKET_ID_ANY;
185 if (priv->verbs_alloc_ctx.type == MLX5_VERBS_ALLOC_TYPE_TX_QUEUE) {
186 const struct mlx5_txq_ctrl *ctrl = priv->verbs_alloc_ctx.obj;
188 socket = ctrl->socket;
189 } else if (priv->verbs_alloc_ctx.type ==
190 MLX5_VERBS_ALLOC_TYPE_RX_QUEUE) {
191 const struct mlx5_rxq_ctrl *ctrl = priv->verbs_alloc_ctx.obj;
193 socket = ctrl->socket;
195 assert(data != NULL);
196 ret = rte_malloc_socket(__func__, size, alignment, socket);
203 * Verbs callback to free a memory.
206 * A pointer to the memory to free.
208 * A pointer to the callback data.
211 mlx5_free_verbs_buf(void *ptr, void *data __rte_unused)
213 assert(data != NULL);
218 * DPDK callback to close the device.
220 * Destroy all queues and objects, free memory.
223 * Pointer to Ethernet device structure.
226 mlx5_dev_close(struct rte_eth_dev *dev)
228 struct priv *priv = dev->data->dev_private;
232 DRV_LOG(DEBUG, "port %u closing device \"%s\"",
234 ((priv->ctx != NULL) ? priv->ctx->device->name : ""));
235 /* In case mlx5_dev_stop() has not been called. */
236 mlx5_dev_interrupt_handler_uninstall(dev);
237 mlx5_traffic_disable(dev);
238 /* Prevent crashes when queues are still in use. */
239 dev->rx_pkt_burst = removed_rx_burst;
240 dev->tx_pkt_burst = removed_tx_burst;
241 if (priv->rxqs != NULL) {
242 /* XXX race condition if mlx5_rx_burst() is still running. */
244 for (i = 0; (i != priv->rxqs_n); ++i)
245 mlx5_rxq_release(dev, i);
249 if (priv->txqs != NULL) {
250 /* XXX race condition if mlx5_tx_burst() is still running. */
252 for (i = 0; (i != priv->txqs_n); ++i)
253 mlx5_txq_release(dev, i);
257 mlx5_flow_delete_drop_queue(dev);
258 mlx5_mprq_free_mp(dev);
259 mlx5_mr_release(dev);
260 if (priv->pd != NULL) {
261 assert(priv->ctx != NULL);
262 claim_zero(mlx5_glue->dealloc_pd(priv->pd));
263 claim_zero(mlx5_glue->close_device(priv->ctx));
265 assert(priv->ctx == NULL);
266 if (priv->rss_conf.rss_key != NULL)
267 rte_free(priv->rss_conf.rss_key);
268 if (priv->reta_idx != NULL)
269 rte_free(priv->reta_idx);
270 if (priv->primary_socket)
271 mlx5_socket_uninit(dev);
273 mlx5_nl_mac_addr_flush(dev);
274 if (priv->nl_socket >= 0)
275 close(priv->nl_socket);
276 ret = mlx5_hrxq_ibv_verify(dev);
278 DRV_LOG(WARNING, "port %u some hash Rx queue still remain",
280 ret = mlx5_ind_table_ibv_verify(dev);
282 DRV_LOG(WARNING, "port %u some indirection table still remain",
284 ret = mlx5_rxq_ibv_verify(dev);
286 DRV_LOG(WARNING, "port %u some Verbs Rx queue still remain",
288 ret = mlx5_rxq_verify(dev);
290 DRV_LOG(WARNING, "port %u some Rx queues still remain",
292 ret = mlx5_txq_ibv_verify(dev);
294 DRV_LOG(WARNING, "port %u some Verbs Tx queue still remain",
296 ret = mlx5_txq_verify(dev);
298 DRV_LOG(WARNING, "port %u some Tx queues still remain",
300 ret = mlx5_flow_verify(dev);
302 DRV_LOG(WARNING, "port %u some flows still remain",
304 memset(priv, 0, sizeof(*priv));
307 const struct eth_dev_ops mlx5_dev_ops = {
308 .dev_configure = mlx5_dev_configure,
309 .dev_start = mlx5_dev_start,
310 .dev_stop = mlx5_dev_stop,
311 .dev_set_link_down = mlx5_set_link_down,
312 .dev_set_link_up = mlx5_set_link_up,
313 .dev_close = mlx5_dev_close,
314 .promiscuous_enable = mlx5_promiscuous_enable,
315 .promiscuous_disable = mlx5_promiscuous_disable,
316 .allmulticast_enable = mlx5_allmulticast_enable,
317 .allmulticast_disable = mlx5_allmulticast_disable,
318 .link_update = mlx5_link_update,
319 .stats_get = mlx5_stats_get,
320 .stats_reset = mlx5_stats_reset,
321 .xstats_get = mlx5_xstats_get,
322 .xstats_reset = mlx5_xstats_reset,
323 .xstats_get_names = mlx5_xstats_get_names,
324 .dev_infos_get = mlx5_dev_infos_get,
325 .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
326 .vlan_filter_set = mlx5_vlan_filter_set,
327 .rx_queue_setup = mlx5_rx_queue_setup,
328 .tx_queue_setup = mlx5_tx_queue_setup,
329 .rx_queue_release = mlx5_rx_queue_release,
330 .tx_queue_release = mlx5_tx_queue_release,
331 .flow_ctrl_get = mlx5_dev_get_flow_ctrl,
332 .flow_ctrl_set = mlx5_dev_set_flow_ctrl,
333 .mac_addr_remove = mlx5_mac_addr_remove,
334 .mac_addr_add = mlx5_mac_addr_add,
335 .mac_addr_set = mlx5_mac_addr_set,
336 .set_mc_addr_list = mlx5_set_mc_addr_list,
337 .mtu_set = mlx5_dev_set_mtu,
338 .vlan_strip_queue_set = mlx5_vlan_strip_queue_set,
339 .vlan_offload_set = mlx5_vlan_offload_set,
340 .reta_update = mlx5_dev_rss_reta_update,
341 .reta_query = mlx5_dev_rss_reta_query,
342 .rss_hash_update = mlx5_rss_hash_update,
343 .rss_hash_conf_get = mlx5_rss_hash_conf_get,
344 .filter_ctrl = mlx5_dev_filter_ctrl,
345 .rx_descriptor_status = mlx5_rx_descriptor_status,
346 .tx_descriptor_status = mlx5_tx_descriptor_status,
347 .rx_queue_intr_enable = mlx5_rx_intr_enable,
348 .rx_queue_intr_disable = mlx5_rx_intr_disable,
349 .is_removed = mlx5_is_removed,
352 static const struct eth_dev_ops mlx5_dev_sec_ops = {
353 .stats_get = mlx5_stats_get,
354 .stats_reset = mlx5_stats_reset,
355 .xstats_get = mlx5_xstats_get,
356 .xstats_reset = mlx5_xstats_reset,
357 .xstats_get_names = mlx5_xstats_get_names,
358 .dev_infos_get = mlx5_dev_infos_get,
359 .rx_descriptor_status = mlx5_rx_descriptor_status,
360 .tx_descriptor_status = mlx5_tx_descriptor_status,
363 /* Available operators in flow isolated mode. */
364 const struct eth_dev_ops mlx5_dev_ops_isolate = {
365 .dev_configure = mlx5_dev_configure,
366 .dev_start = mlx5_dev_start,
367 .dev_stop = mlx5_dev_stop,
368 .dev_set_link_down = mlx5_set_link_down,
369 .dev_set_link_up = mlx5_set_link_up,
370 .dev_close = mlx5_dev_close,
371 .link_update = mlx5_link_update,
372 .stats_get = mlx5_stats_get,
373 .stats_reset = mlx5_stats_reset,
374 .xstats_get = mlx5_xstats_get,
375 .xstats_reset = mlx5_xstats_reset,
376 .xstats_get_names = mlx5_xstats_get_names,
377 .dev_infos_get = mlx5_dev_infos_get,
378 .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
379 .vlan_filter_set = mlx5_vlan_filter_set,
380 .rx_queue_setup = mlx5_rx_queue_setup,
381 .tx_queue_setup = mlx5_tx_queue_setup,
382 .rx_queue_release = mlx5_rx_queue_release,
383 .tx_queue_release = mlx5_tx_queue_release,
384 .flow_ctrl_get = mlx5_dev_get_flow_ctrl,
385 .flow_ctrl_set = mlx5_dev_set_flow_ctrl,
386 .mac_addr_remove = mlx5_mac_addr_remove,
387 .mac_addr_add = mlx5_mac_addr_add,
388 .mac_addr_set = mlx5_mac_addr_set,
389 .set_mc_addr_list = mlx5_set_mc_addr_list,
390 .mtu_set = mlx5_dev_set_mtu,
391 .vlan_strip_queue_set = mlx5_vlan_strip_queue_set,
392 .vlan_offload_set = mlx5_vlan_offload_set,
393 .filter_ctrl = mlx5_dev_filter_ctrl,
394 .rx_descriptor_status = mlx5_rx_descriptor_status,
395 .tx_descriptor_status = mlx5_tx_descriptor_status,
396 .rx_queue_intr_enable = mlx5_rx_intr_enable,
397 .rx_queue_intr_disable = mlx5_rx_intr_disable,
398 .is_removed = mlx5_is_removed,
402 struct rte_pci_addr pci_addr; /* associated PCI address */
403 uint32_t ports; /* physical ports bitfield. */
407 * Get device index in mlx5_dev[] from PCI bus address.
409 * @param[in] pci_addr
410 * PCI bus address to look for.
413 * mlx5_dev[] index on success, -1 on failure.
416 mlx5_dev_idx(struct rte_pci_addr *pci_addr)
421 assert(pci_addr != NULL);
422 for (i = 0; (i != RTE_DIM(mlx5_dev)); ++i) {
423 if ((mlx5_dev[i].pci_addr.domain == pci_addr->domain) &&
424 (mlx5_dev[i].pci_addr.bus == pci_addr->bus) &&
425 (mlx5_dev[i].pci_addr.devid == pci_addr->devid) &&
426 (mlx5_dev[i].pci_addr.function == pci_addr->function))
428 if ((mlx5_dev[i].ports == 0) && (ret == -1))
435 * Verify and store value for device argument.
438 * Key argument to verify.
440 * Value associated with key.
445 * 0 on success, a negative errno value otherwise and rte_errno is set.
448 mlx5_args_check(const char *key, const char *val, void *opaque)
450 struct mlx5_dev_config *config = opaque;
454 tmp = strtoul(val, NULL, 0);
457 DRV_LOG(WARNING, "%s: \"%s\" is not a valid integer", key, val);
460 if (strcmp(MLX5_RXQ_CQE_COMP_EN, key) == 0) {
461 config->cqe_comp = !!tmp;
462 } else if (strcmp(MLX5_RX_MPRQ_EN, key) == 0) {
463 config->mprq.enabled = !!tmp;
464 } else if (strcmp(MLX5_RX_MPRQ_LOG_STRIDE_NUM, key) == 0) {
465 config->mprq.stride_num_n = tmp;
466 } else if (strcmp(MLX5_RX_MPRQ_MAX_MEMCPY_LEN, key) == 0) {
467 config->mprq.max_memcpy_len = tmp;
468 } else if (strcmp(MLX5_RXQS_MIN_MPRQ, key) == 0) {
469 config->mprq.min_rxqs_num = tmp;
470 } else if (strcmp(MLX5_TXQ_INLINE, key) == 0) {
471 config->txq_inline = tmp;
472 } else if (strcmp(MLX5_TXQS_MIN_INLINE, key) == 0) {
473 config->txqs_inline = tmp;
474 } else if (strcmp(MLX5_TXQ_MPW_EN, key) == 0) {
475 config->mps = !!tmp ? config->mps : 0;
476 } else if (strcmp(MLX5_TXQ_MPW_HDR_DSEG_EN, key) == 0) {
477 config->mpw_hdr_dseg = !!tmp;
478 } else if (strcmp(MLX5_TXQ_MAX_INLINE_LEN, key) == 0) {
479 config->inline_max_packet_sz = tmp;
480 } else if (strcmp(MLX5_TX_VEC_EN, key) == 0) {
481 config->tx_vec_en = !!tmp;
482 } else if (strcmp(MLX5_RX_VEC_EN, key) == 0) {
483 config->rx_vec_en = !!tmp;
484 } else if (strcmp(MLX5_L3_VXLAN_EN, key) == 0) {
485 config->l3_vxlan_en = !!tmp;
486 } else if (strcmp(MLX5_VF_NL_EN, key) == 0) {
487 config->vf_nl_en = !!tmp;
489 DRV_LOG(WARNING, "%s: unknown parameter", key);
497 * Parse device parameters.
500 * Pointer to device configuration structure.
502 * Device arguments structure.
505 * 0 on success, a negative errno value otherwise and rte_errno is set.
508 mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs)
510 const char **params = (const char *[]){
511 MLX5_RXQ_CQE_COMP_EN,
513 MLX5_RX_MPRQ_LOG_STRIDE_NUM,
514 MLX5_RX_MPRQ_MAX_MEMCPY_LEN,
517 MLX5_TXQS_MIN_INLINE,
519 MLX5_TXQ_MPW_HDR_DSEG_EN,
520 MLX5_TXQ_MAX_INLINE_LEN,
527 struct rte_kvargs *kvlist;
533 /* Following UGLY cast is done to pass checkpatch. */
534 kvlist = rte_kvargs_parse(devargs->args, params);
537 /* Process parameters. */
538 for (i = 0; (params[i] != NULL); ++i) {
539 if (rte_kvargs_count(kvlist, params[i])) {
540 ret = rte_kvargs_process(kvlist, params[i],
541 mlx5_args_check, config);
544 rte_kvargs_free(kvlist);
549 rte_kvargs_free(kvlist);
553 static struct rte_pci_driver mlx5_driver;
556 * Reserved UAR address space for TXQ UAR(hw doorbell) mapping, process
557 * local resource used by both primary and secondary to avoid duplicate
559 * The space has to be available on both primary and secondary process,
560 * TXQ UAR maps to this area using fixed mmap w/o double check.
562 static void *uar_base;
565 find_lower_va_bound(const struct rte_memseg_list *msl __rte_unused,
566 const struct rte_memseg *ms, void *arg)
573 *addr = RTE_MIN(*addr, ms->addr);
579 * Reserve UAR address space for primary process.
582 * Pointer to Ethernet device.
585 * 0 on success, a negative errno value otherwise and rte_errno is set.
588 mlx5_uar_init_primary(struct rte_eth_dev *dev)
590 struct priv *priv = dev->data->dev_private;
591 void *addr = (void *)0;
593 if (uar_base) { /* UAR address space mapped. */
594 priv->uar_base = uar_base;
597 /* find out lower bound of hugepage segments */
598 rte_memseg_walk(find_lower_va_bound, &addr);
600 /* keep distance to hugepages to minimize potential conflicts. */
601 addr = RTE_PTR_SUB(addr, MLX5_UAR_OFFSET + MLX5_UAR_SIZE);
602 /* anonymous mmap, no real memory consumption. */
603 addr = mmap(addr, MLX5_UAR_SIZE,
604 PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
605 if (addr == MAP_FAILED) {
607 "port %u failed to reserve UAR address space, please"
608 " adjust MLX5_UAR_SIZE or try --base-virtaddr",
613 /* Accept either same addr or a new addr returned from mmap if target
616 DRV_LOG(INFO, "port %u reserved UAR address space: %p",
617 dev->data->port_id, addr);
618 priv->uar_base = addr; /* for primary and secondary UAR re-mmap. */
619 uar_base = addr; /* process local, don't reserve again. */
624 * Reserve UAR address space for secondary process, align with
628 * Pointer to Ethernet device.
631 * 0 on success, a negative errno value otherwise and rte_errno is set.
634 mlx5_uar_init_secondary(struct rte_eth_dev *dev)
636 struct priv *priv = dev->data->dev_private;
639 assert(priv->uar_base);
640 if (uar_base) { /* already reserved. */
641 assert(uar_base == priv->uar_base);
644 /* anonymous mmap, no real memory consumption. */
645 addr = mmap(priv->uar_base, MLX5_UAR_SIZE,
646 PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
647 if (addr == MAP_FAILED) {
648 DRV_LOG(ERR, "port %u UAR mmap failed: %p size: %llu",
649 dev->data->port_id, priv->uar_base, MLX5_UAR_SIZE);
653 if (priv->uar_base != addr) {
655 "port %u UAR address %p size %llu occupied, please"
656 " adjust MLX5_UAR_OFFSET or try EAL parameter"
658 dev->data->port_id, priv->uar_base, MLX5_UAR_SIZE);
662 uar_base = addr; /* process local, don't reserve again */
663 DRV_LOG(INFO, "port %u reserved UAR address space: %p",
664 dev->data->port_id, addr);
669 * DPDK callback to register a PCI device.
671 * This function creates an Ethernet device for each port of a given
675 * PCI driver structure (mlx5_driver).
677 * PCI device information.
680 * 0 on success, a negative errno value otherwise and rte_errno is set.
683 mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
684 struct rte_pci_device *pci_dev)
686 struct ibv_device **list = NULL;
687 struct ibv_device *ibv_dev;
689 struct ibv_context *attr_ctx = NULL;
690 struct ibv_device_attr_ex device_attr;
693 unsigned int cqe_comp;
694 unsigned int tunnel_en = 0;
695 unsigned int mpls_en = 0;
696 unsigned int swp = 0;
697 unsigned int verb_priorities = 0;
698 unsigned int mprq = 0;
699 unsigned int mprq_min_stride_size_n = 0;
700 unsigned int mprq_max_stride_size_n = 0;
701 unsigned int mprq_min_stride_num_n = 0;
702 unsigned int mprq_max_stride_num_n = 0;
705 struct mlx5dv_context attrs_out = {0};
706 #ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
707 struct ibv_counter_set_description cs_desc;
710 /* Prepare shared data between primary and secondary process. */
711 mlx5_prepare_shared_data();
712 assert(pci_drv == &mlx5_driver);
713 /* Get mlx5_dev[] index. */
714 idx = mlx5_dev_idx(&pci_dev->addr);
716 DRV_LOG(ERR, "this driver cannot support any more adapters");
720 DRV_LOG(DEBUG, "using driver device index %d", idx);
721 /* Save PCI address. */
722 mlx5_dev[idx].pci_addr = pci_dev->addr;
723 list = mlx5_glue->get_device_list(&i);
729 "cannot list devices, is ib_uverbs loaded?");
734 * For each listed device, check related sysfs entry against
735 * the provided PCI ID.
738 struct rte_pci_addr pci_addr;
741 DRV_LOG(DEBUG, "checking device \"%s\"", list[i]->name);
742 if (mlx5_ibv_device_to_pci_addr(list[i], &pci_addr))
744 if ((pci_dev->addr.domain != pci_addr.domain) ||
745 (pci_dev->addr.bus != pci_addr.bus) ||
746 (pci_dev->addr.devid != pci_addr.devid) ||
747 (pci_dev->addr.function != pci_addr.function))
749 DRV_LOG(INFO, "PCI information matches, using device \"%s\"",
751 vf = ((pci_dev->id.device_id ==
752 PCI_DEVICE_ID_MELLANOX_CONNECTX4VF) ||
753 (pci_dev->id.device_id ==
754 PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF) ||
755 (pci_dev->id.device_id ==
756 PCI_DEVICE_ID_MELLANOX_CONNECTX5VF) ||
757 (pci_dev->id.device_id ==
758 PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF));
759 attr_ctx = mlx5_glue->open_device(list[i]);
764 if (attr_ctx == NULL) {
768 "cannot access device, is mlx5_ib loaded?");
773 "cannot use device, are drivers up to date?");
779 DRV_LOG(DEBUG, "device opened");
780 #ifdef HAVE_IBV_MLX5_MOD_SWP
781 attrs_out.comp_mask |= MLX5DV_CONTEXT_MASK_SWP;
784 * Multi-packet send is supported by ConnectX-4 Lx PF as well
785 * as all ConnectX-5 devices.
787 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
788 attrs_out.comp_mask |= MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS;
790 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
791 attrs_out.comp_mask |= MLX5DV_CONTEXT_MASK_STRIDING_RQ;
793 mlx5_glue->dv_query_device(attr_ctx, &attrs_out);
794 if (attrs_out.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) {
795 if (attrs_out.flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) {
796 DRV_LOG(DEBUG, "enhanced MPW is supported");
797 mps = MLX5_MPW_ENHANCED;
799 DRV_LOG(DEBUG, "MPW is supported");
803 DRV_LOG(DEBUG, "MPW isn't supported");
804 mps = MLX5_MPW_DISABLED;
806 #ifdef HAVE_IBV_MLX5_MOD_SWP
807 if (attrs_out.comp_mask & MLX5DV_CONTEXT_MASK_SWP)
808 swp = attrs_out.sw_parsing_caps.sw_parsing_offloads;
809 DRV_LOG(DEBUG, "SWP support: %u", swp);
811 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
812 if (attrs_out.comp_mask & MLX5DV_CONTEXT_MASK_STRIDING_RQ) {
813 struct mlx5dv_striding_rq_caps mprq_caps =
814 attrs_out.striding_rq_caps;
816 DRV_LOG(DEBUG, "\tmin_single_stride_log_num_of_bytes: %d",
817 mprq_caps.min_single_stride_log_num_of_bytes);
818 DRV_LOG(DEBUG, "\tmax_single_stride_log_num_of_bytes: %d",
819 mprq_caps.max_single_stride_log_num_of_bytes);
820 DRV_LOG(DEBUG, "\tmin_single_wqe_log_num_of_strides: %d",
821 mprq_caps.min_single_wqe_log_num_of_strides);
822 DRV_LOG(DEBUG, "\tmax_single_wqe_log_num_of_strides: %d",
823 mprq_caps.max_single_wqe_log_num_of_strides);
824 DRV_LOG(DEBUG, "\tsupported_qpts: %d",
825 mprq_caps.supported_qpts);
826 DRV_LOG(DEBUG, "device supports Multi-Packet RQ");
828 mprq_min_stride_size_n =
829 mprq_caps.min_single_stride_log_num_of_bytes;
830 mprq_max_stride_size_n =
831 mprq_caps.max_single_stride_log_num_of_bytes;
832 mprq_min_stride_num_n =
833 mprq_caps.min_single_wqe_log_num_of_strides;
834 mprq_max_stride_num_n =
835 mprq_caps.max_single_wqe_log_num_of_strides;
838 if (RTE_CACHE_LINE_SIZE == 128 &&
839 !(attrs_out.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP))
843 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
844 if (attrs_out.comp_mask & MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS) {
845 tunnel_en = ((attrs_out.tunnel_offloads_caps &
846 MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN) &&
847 (attrs_out.tunnel_offloads_caps &
848 MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE));
850 DRV_LOG(DEBUG, "tunnel offloading is %ssupported",
851 tunnel_en ? "" : "not ");
854 "tunnel offloading disabled due to old OFED/rdma-core version");
856 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
857 mpls_en = ((attrs_out.tunnel_offloads_caps &
858 MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_GRE) &&
859 (attrs_out.tunnel_offloads_caps &
860 MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_UDP));
861 DRV_LOG(DEBUG, "MPLS over GRE/UDP tunnel offloading is %ssupported",
862 mpls_en ? "" : "not ");
864 DRV_LOG(WARNING, "MPLS over GRE/UDP tunnel offloading disabled due to"
865 " old OFED/rdma-core version or firmware configuration");
867 err = mlx5_glue->query_device_ex(attr_ctx, NULL, &device_attr);
869 DEBUG("ibv_query_device_ex() failed");
872 DRV_LOG(INFO, "%u port(s) detected",
873 device_attr.orig_attr.phys_port_cnt);
874 for (i = 0; i < device_attr.orig_attr.phys_port_cnt; i++) {
875 char name[RTE_ETH_NAME_MAX_LEN];
877 uint32_t port = i + 1; /* ports are indexed from one */
878 uint32_t test = (1 << i);
879 struct ibv_context *ctx = NULL;
880 struct ibv_port_attr port_attr;
881 struct ibv_pd *pd = NULL;
882 struct priv *priv = NULL;
883 struct rte_eth_dev *eth_dev = NULL;
884 struct ibv_device_attr_ex device_attr_ex;
885 struct ether_addr mac;
886 struct mlx5_dev_config config = {
887 .cqe_comp = cqe_comp,
889 .tunnel_en = tunnel_en,
894 .txq_inline = MLX5_ARG_UNSET,
895 .txqs_inline = MLX5_ARG_UNSET,
896 .inline_max_packet_sz = MLX5_ARG_UNSET,
900 .enabled = 0, /* Disabled by default. */
901 .stride_num_n = RTE_MAX(MLX5_MPRQ_STRIDE_NUM_N,
902 mprq_min_stride_num_n),
903 .max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN,
904 .min_rxqs_num = MLX5_MPRQ_MIN_RXQS,
908 len = snprintf(name, sizeof(name), PCI_PRI_FMT,
909 pci_dev->addr.domain, pci_dev->addr.bus,
910 pci_dev->addr.devid, pci_dev->addr.function);
911 if (device_attr.orig_attr.phys_port_cnt > 1)
912 snprintf(name + len, sizeof(name), " port %u", i);
913 mlx5_dev[idx].ports |= test;
914 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
915 eth_dev = rte_eth_dev_attach_secondary(name);
916 if (eth_dev == NULL) {
917 DRV_LOG(ERR, "can not attach rte ethdev");
922 eth_dev->device = &pci_dev->device;
923 eth_dev->dev_ops = &mlx5_dev_sec_ops;
924 err = mlx5_uar_init_secondary(eth_dev);
929 /* Receive command fd from primary process */
930 err = mlx5_socket_connect(eth_dev);
935 /* Remap UAR for Tx queues. */
936 err = mlx5_tx_uar_remap(eth_dev, err);
942 * Ethdev pointer is still required as input since
943 * the primary device is not accessible from the
946 eth_dev->rx_pkt_burst =
947 mlx5_select_rx_function(eth_dev);
948 eth_dev->tx_pkt_burst =
949 mlx5_select_tx_function(eth_dev);
950 rte_eth_dev_probing_finish(eth_dev);
953 DRV_LOG(DEBUG, "using port %u (%08" PRIx32 ")", port, test);
954 ctx = mlx5_glue->open_device(ibv_dev);
959 /* Check port status. */
960 err = mlx5_glue->query_port(ctx, port, &port_attr);
962 DRV_LOG(ERR, "port query failed: %s", strerror(err));
965 if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) {
967 "port %d is not configured in Ethernet mode",
972 if (port_attr.state != IBV_PORT_ACTIVE)
973 DRV_LOG(DEBUG, "port %d is not active: \"%s\" (%d)",
975 mlx5_glue->port_state_str(port_attr.state),
977 /* Allocate protection domain. */
978 pd = mlx5_glue->alloc_pd(ctx);
980 DRV_LOG(ERR, "PD allocation failure");
984 mlx5_dev[idx].ports |= test;
985 /* from rte_ethdev.c */
986 priv = rte_zmalloc("ethdev private structure",
988 RTE_CACHE_LINE_SIZE);
990 DRV_LOG(ERR, "priv allocation failure");
995 strncpy(priv->ibdev_path, priv->ctx->device->ibdev_path,
996 sizeof(priv->ibdev_path));
997 priv->device_attr = device_attr;
1000 priv->mtu = ETHER_MTU;
1001 err = mlx5_args(&config, pci_dev->device.devargs);
1003 DRV_LOG(ERR, "failed to process device arguments: %s",
1008 err = mlx5_glue->query_device_ex(ctx, NULL, &device_attr_ex);
1010 DRV_LOG(ERR, "ibv_query_device_ex() failed");
1013 config.hw_csum = !!(device_attr_ex.device_cap_flags_ex &
1014 IBV_DEVICE_RAW_IP_CSUM);
1015 DRV_LOG(DEBUG, "checksum offloading is %ssupported",
1016 (config.hw_csum ? "" : "not "));
1017 #ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
1018 config.flow_counter_en = !!(device_attr.max_counter_sets);
1019 mlx5_glue->describe_counter_set(ctx, 0, &cs_desc);
1021 "counter type = %d, num of cs = %ld, attributes = %d",
1022 cs_desc.counter_type, cs_desc.num_of_cs,
1023 cs_desc.attributes);
1025 config.ind_table_max_size =
1026 device_attr_ex.rss_caps.max_rwq_indirection_table_size;
1027 /* Remove this check once DPDK supports larger/variable
1028 * indirection tables. */
1029 if (config.ind_table_max_size >
1030 (unsigned int)ETH_RSS_RETA_SIZE_512)
1031 config.ind_table_max_size = ETH_RSS_RETA_SIZE_512;
1032 DRV_LOG(DEBUG, "maximum Rx indirection table size is %u",
1033 config.ind_table_max_size);
1034 config.hw_vlan_strip = !!(device_attr_ex.raw_packet_caps &
1035 IBV_RAW_PACKET_CAP_CVLAN_STRIPPING);
1036 DRV_LOG(DEBUG, "VLAN stripping is %ssupported",
1037 (config.hw_vlan_strip ? "" : "not "));
1039 config.hw_fcs_strip = !!(device_attr_ex.raw_packet_caps &
1040 IBV_RAW_PACKET_CAP_SCATTER_FCS);
1041 DRV_LOG(DEBUG, "FCS stripping configuration is %ssupported",
1042 (config.hw_fcs_strip ? "" : "not "));
1044 #ifdef HAVE_IBV_WQ_FLAG_RX_END_PADDING
1045 config.hw_padding = !!device_attr_ex.rx_pad_end_addr_align;
1048 "hardware Rx end alignment padding is %ssupported",
1049 (config.hw_padding ? "" : "not "));
1051 config.tso = ((device_attr_ex.tso_caps.max_tso > 0) &&
1052 (device_attr_ex.tso_caps.supported_qpts &
1053 (1 << IBV_QPT_RAW_PACKET)));
1055 config.tso_max_payload_sz =
1056 device_attr_ex.tso_caps.max_tso;
1057 if (config.mps && !mps) {
1059 "multi-packet send not supported on this device"
1060 " (" MLX5_TXQ_MPW_EN ")");
1064 DRV_LOG(INFO, "%s MPS is %s",
1065 config.mps == MLX5_MPW_ENHANCED ? "enhanced " : "",
1066 config.mps != MLX5_MPW_DISABLED ? "enabled" :
1068 if (config.cqe_comp && !cqe_comp) {
1069 DRV_LOG(WARNING, "Rx CQE compression isn't supported");
1070 config.cqe_comp = 0;
1072 config.mprq.enabled = config.mprq.enabled && mprq;
1073 if (config.mprq.enabled) {
1074 if (config.mprq.stride_num_n > mprq_max_stride_num_n ||
1075 config.mprq.stride_num_n < mprq_min_stride_num_n) {
1076 config.mprq.stride_num_n =
1077 RTE_MAX(MLX5_MPRQ_STRIDE_NUM_N,
1078 mprq_min_stride_num_n);
1080 "the number of strides"
1081 " for Multi-Packet RQ is out of range,"
1082 " setting default value (%u)",
1083 1 << config.mprq.stride_num_n);
1085 config.mprq.min_stride_size_n = mprq_min_stride_size_n;
1086 config.mprq.max_stride_size_n = mprq_max_stride_size_n;
1088 eth_dev = rte_eth_dev_allocate(name);
1089 if (eth_dev == NULL) {
1090 DRV_LOG(ERR, "can not allocate rte ethdev");
1094 eth_dev->data->dev_private = priv;
1095 priv->dev_data = eth_dev->data;
1096 eth_dev->data->mac_addrs = priv->mac;
1097 eth_dev->device = &pci_dev->device;
1098 rte_eth_copy_pci_info(eth_dev, pci_dev);
1099 eth_dev->device->driver = &mlx5_driver.driver;
1100 err = mlx5_uar_init_primary(eth_dev);
1105 /* Configure the first MAC address by default. */
1106 if (mlx5_get_mac(eth_dev, &mac.addr_bytes)) {
1108 "port %u cannot get MAC address, is mlx5_en"
1109 " loaded? (errno: %s)",
1110 eth_dev->data->port_id, strerror(errno));
1115 "port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x",
1116 eth_dev->data->port_id,
1117 mac.addr_bytes[0], mac.addr_bytes[1],
1118 mac.addr_bytes[2], mac.addr_bytes[3],
1119 mac.addr_bytes[4], mac.addr_bytes[5]);
1122 char ifname[IF_NAMESIZE];
1124 if (mlx5_get_ifname(eth_dev, &ifname) == 0)
1125 DRV_LOG(DEBUG, "port %u ifname is \"%s\"",
1126 eth_dev->data->port_id, ifname);
1128 DRV_LOG(DEBUG, "port %u ifname is unknown",
1129 eth_dev->data->port_id);
1132 /* Get actual MTU if possible. */
1133 err = mlx5_get_mtu(eth_dev, &priv->mtu);
1138 DRV_LOG(DEBUG, "port %u MTU is %u", eth_dev->data->port_id,
1141 * Initialize burst functions to prevent crashes before link-up.
1143 eth_dev->rx_pkt_burst = removed_rx_burst;
1144 eth_dev->tx_pkt_burst = removed_tx_burst;
1145 eth_dev->dev_ops = &mlx5_dev_ops;
1146 /* Register MAC address. */
1147 claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0));
1148 priv->nl_socket = -1;
1150 if (vf && config.vf_nl_en) {
1151 priv->nl_socket = mlx5_nl_init(RTMGRP_LINK);
1152 if (priv->nl_socket < 0)
1153 priv->nl_socket = -1;
1154 mlx5_nl_mac_addr_sync(eth_dev);
1156 TAILQ_INIT(&priv->flows);
1157 TAILQ_INIT(&priv->ctrl_flows);
1158 /* Hint libmlx5 to use PMD allocator for data plane resources */
1159 struct mlx5dv_ctx_allocators alctr = {
1160 .alloc = &mlx5_alloc_verbs_buf,
1161 .free = &mlx5_free_verbs_buf,
1164 mlx5_glue->dv_set_context_attr(ctx,
1165 MLX5DV_CTX_ATTR_BUF_ALLOCATORS,
1166 (void *)((uintptr_t)&alctr));
1167 /* Bring Ethernet device up. */
1168 DRV_LOG(DEBUG, "port %u forcing Ethernet interface up",
1169 eth_dev->data->port_id);
1170 mlx5_set_link_up(eth_dev);
1172 * Even though the interrupt handler is not installed yet,
1173 * interrupts will still trigger on the asyn_fd from
1174 * Verbs context returned by ibv_open_device().
1176 mlx5_link_update(eth_dev, 0);
1177 /* Store device configuration on private structure. */
1178 priv->config = config;
1179 /* Create drop queue. */
1180 err = mlx5_flow_create_drop_queue(eth_dev);
1182 DRV_LOG(ERR, "port %u drop queue allocation failed: %s",
1183 eth_dev->data->port_id, strerror(rte_errno));
1187 /* Supported Verbs flow priority number detection. */
1188 if (verb_priorities == 0)
1189 verb_priorities = mlx5_get_max_verbs_prio(eth_dev);
1190 if (verb_priorities < MLX5_VERBS_FLOW_PRIO_8) {
1191 DRV_LOG(ERR, "port %u wrong Verbs flow priorities: %u",
1192 eth_dev->data->port_id, verb_priorities);
1195 priv->config.max_verbs_prio = verb_priorities;
1196 /* Add device to memory callback list. */
1197 rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
1198 LIST_INSERT_HEAD(&mlx5_shared_data->mem_event_cb_list,
1199 priv, mem_event_cb);
1200 rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
1201 rte_eth_dev_probing_finish(eth_dev);
1207 claim_zero(mlx5_glue->dealloc_pd(pd));
1209 claim_zero(mlx5_glue->close_device(ctx));
1210 if (eth_dev && rte_eal_process_type() == RTE_PROC_PRIMARY)
1211 rte_eth_dev_release_port(eth_dev);
1215 * XXX if something went wrong in the loop above, there is a resource
1216 * leak (ctx, pd, priv, dpdk ethdev) but we can do nothing about it as
1217 * long as the dpdk does not provide a way to deallocate a ethdev and a
1218 * way to enumerate the registered ethdevs to free the previous ones.
1220 /* no port found, complain */
1221 if (!mlx5_dev[idx].ports) {
1227 claim_zero(mlx5_glue->close_device(attr_ctx));
1229 mlx5_glue->free_device_list(list);
1237 static const struct rte_pci_id mlx5_pci_id_map[] = {
1239 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
1240 PCI_DEVICE_ID_MELLANOX_CONNECTX4)
1243 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
1244 PCI_DEVICE_ID_MELLANOX_CONNECTX4VF)
1247 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
1248 PCI_DEVICE_ID_MELLANOX_CONNECTX4LX)
1251 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
1252 PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF)
1255 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
1256 PCI_DEVICE_ID_MELLANOX_CONNECTX5)
1259 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
1260 PCI_DEVICE_ID_MELLANOX_CONNECTX5VF)
1263 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
1264 PCI_DEVICE_ID_MELLANOX_CONNECTX5EX)
1267 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
1268 PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF)
1271 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
1272 PCI_DEVICE_ID_MELLANOX_CONNECTX5BF)
1279 static struct rte_pci_driver mlx5_driver = {
1281 .name = MLX5_DRIVER_NAME
1283 .id_table = mlx5_pci_id_map,
1284 .probe = mlx5_pci_probe,
1285 .drv_flags = RTE_PCI_DRV_INTR_LSC | RTE_PCI_DRV_INTR_RMV,
1288 #ifdef RTE_LIBRTE_MLX5_DLOPEN_DEPS
1291 * Suffix RTE_EAL_PMD_PATH with "-glue".
1293 * This function performs a sanity check on RTE_EAL_PMD_PATH before
1294 * suffixing its last component.
1297 * Output buffer, should be large enough otherwise NULL is returned.
1302 * Pointer to @p buf or @p NULL in case suffix cannot be appended.
1305 mlx5_glue_path(char *buf, size_t size)
1307 static const char *const bad[] = { "/", ".", "..", NULL };
1308 const char *path = RTE_EAL_PMD_PATH;
1309 size_t len = strlen(path);
1313 while (len && path[len - 1] == '/')
1315 for (off = len; off && path[off - 1] != '/'; --off)
1317 for (i = 0; bad[i]; ++i)
1318 if (!strncmp(path + off, bad[i], (int)(len - off)))
1320 i = snprintf(buf, size, "%.*s-glue", (int)len, path);
1321 if (i == -1 || (size_t)i >= size)
1326 "unable to append \"-glue\" to last component of"
1327 " RTE_EAL_PMD_PATH (\"" RTE_EAL_PMD_PATH "\"),"
1328 " please re-configure DPDK");
1333 * Initialization routine for run-time dependency on rdma-core.
1336 mlx5_glue_init(void)
1338 char glue_path[sizeof(RTE_EAL_PMD_PATH) - 1 + sizeof("-glue")];
1339 const char *path[] = {
1341 * A basic security check is necessary before trusting
1342 * MLX5_GLUE_PATH, which may override RTE_EAL_PMD_PATH.
1344 (geteuid() == getuid() && getegid() == getgid() ?
1345 getenv("MLX5_GLUE_PATH") : NULL),
1347 * When RTE_EAL_PMD_PATH is set, use its glue-suffixed
1348 * variant, otherwise let dlopen() look up libraries on its
1351 (*RTE_EAL_PMD_PATH ?
1352 mlx5_glue_path(glue_path, sizeof(glue_path)) : ""),
1355 void *handle = NULL;
1359 while (!handle && i != RTE_DIM(path)) {
1368 end = strpbrk(path[i], ":;");
1370 end = path[i] + strlen(path[i]);
1371 len = end - path[i];
1376 ret = snprintf(name, sizeof(name), "%.*s%s" MLX5_GLUE,
1378 (!len || *(end - 1) == '/') ? "" : "/");
1381 if (sizeof(name) != (size_t)ret + 1)
1383 DRV_LOG(DEBUG, "looking for rdma-core glue as \"%s\"",
1385 handle = dlopen(name, RTLD_LAZY);
1396 DRV_LOG(WARNING, "cannot load glue library: %s", dlmsg);
1399 sym = dlsym(handle, "mlx5_glue");
1400 if (!sym || !*sym) {
1404 DRV_LOG(ERR, "cannot resolve glue symbol: %s", dlmsg);
1413 "cannot initialize PMD due to missing run-time dependency on"
1414 " rdma-core libraries (libibverbs, libmlx5)");
1421 * Driver initialization routine.
1423 RTE_INIT(rte_mlx5_pmd_init);
1425 rte_mlx5_pmd_init(void)
1427 /* Build the static tables for Verbs conversion. */
1428 mlx5_set_ptype_table();
1429 mlx5_set_cksum_table();
1430 mlx5_set_swp_types_table();
1432 * RDMAV_HUGEPAGES_SAFE tells ibv_fork_init() we intend to use
1433 * huge pages. Calling ibv_fork_init() during init allows
1434 * applications to use fork() safely for purposes other than
1435 * using this PMD, which is not supported in forked processes.
1437 setenv("RDMAV_HUGEPAGES_SAFE", "1", 1);
1438 /* Match the size of Rx completion entry to the size of a cacheline. */
1439 if (RTE_CACHE_LINE_SIZE == 128)
1440 setenv("MLX5_CQE_SIZE", "128", 0);
1441 #ifdef RTE_LIBRTE_MLX5_DLOPEN_DEPS
1442 if (mlx5_glue_init())
1447 /* Glue structure must not contain any NULL pointers. */
1451 for (i = 0; i != sizeof(*mlx5_glue) / sizeof(void *); ++i)
1452 assert(((const void *const *)mlx5_glue)[i]);
1455 if (strcmp(mlx5_glue->version, MLX5_GLUE_VERSION)) {
1457 "rdma-core glue \"%s\" mismatch: \"%s\" is required",
1458 mlx5_glue->version, MLX5_GLUE_VERSION);
1461 mlx5_glue->fork_init();
1462 rte_pci_register(&mlx5_driver);
1463 rte_mem_event_callback_register("MLX5_MEM_EVENT_CB",
1464 mlx5_mr_mem_event_cb, NULL);
1467 RTE_PMD_EXPORT_NAME(net_mlx5, __COUNTER__);
1468 RTE_PMD_REGISTER_PCI_TABLE(net_mlx5, mlx5_pci_id_map);
1469 RTE_PMD_REGISTER_KMOD_DEP(net_mlx5, "* ib_uverbs & mlx5_core & mlx5_ib");
1471 /** Initialize driver log type. */
1472 RTE_INIT(vdev_netvsc_init_log)
1474 mlx5_logtype = rte_log_register("pmd.net.mlx5");
1475 if (mlx5_logtype >= 0)
1476 rte_log_set_level(mlx5_logtype, RTE_LOG_NOTICE);