1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
16 #include <linux/rtnetlink.h>
19 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
21 #pragma GCC diagnostic ignored "-Wpedantic"
23 #include <infiniband/verbs.h>
25 #pragma GCC diagnostic error "-Wpedantic"
28 #include <rte_malloc.h>
29 #include <rte_ethdev_driver.h>
30 #include <rte_ethdev_pci.h>
32 #include <rte_bus_pci.h>
33 #include <rte_common.h>
34 #include <rte_config.h>
35 #include <rte_eal_memconfig.h>
36 #include <rte_kvargs.h>
39 #include "mlx5_utils.h"
40 #include "mlx5_rxtx.h"
41 #include "mlx5_autoconf.h"
42 #include "mlx5_defs.h"
43 #include "mlx5_glue.h"
46 /* Device parameter to enable RX completion queue compression. */
47 #define MLX5_RXQ_CQE_COMP_EN "rxq_cqe_comp_en"
49 /* Device parameter to configure inline send. */
50 #define MLX5_TXQ_INLINE "txq_inline"
53 * Device parameter to configure the number of TX queues threshold for
54 * enabling inline send.
56 #define MLX5_TXQS_MIN_INLINE "txqs_min_inline"
58 /* Device parameter to enable multi-packet send WQEs. */
59 #define MLX5_TXQ_MPW_EN "txq_mpw_en"
61 /* Device parameter to include 2 dsegs in the title WQEBB. */
62 #define MLX5_TXQ_MPW_HDR_DSEG_EN "txq_mpw_hdr_dseg_en"
64 /* Device parameter to limit the size of inlining packet. */
65 #define MLX5_TXQ_MAX_INLINE_LEN "txq_max_inline_len"
67 /* Device parameter to enable hardware Tx vector. */
68 #define MLX5_TX_VEC_EN "tx_vec_en"
70 /* Device parameter to enable hardware Rx vector. */
71 #define MLX5_RX_VEC_EN "rx_vec_en"
73 /* Allow L3 VXLAN flow creation. */
74 #define MLX5_L3_VXLAN_EN "l3_vxlan_en"
76 /* Activate Netlink support in VF mode. */
77 #define MLX5_VF_NL_EN "vf_nl_en"
79 #ifndef HAVE_IBV_MLX5_MOD_MPW
80 #define MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED (1 << 2)
81 #define MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW (1 << 3)
84 #ifndef HAVE_IBV_MLX5_MOD_CQE_128B_COMP
85 #define MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP (1 << 4)
88 static const char *MZ_MLX5_PMD_SHARED_DATA = "mlx5_pmd_shared_data";
90 /* Shared memory between primary and secondary processes. */
91 struct mlx5_shared_data *mlx5_shared_data;
93 /* Spinlock for mlx5_shared_data allocation. */
94 static rte_spinlock_t mlx5_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
96 /** Driver-specific log messages type. */
100 * Prepare shared data between primary and secondary process.
103 mlx5_prepare_shared_data(void)
105 const struct rte_memzone *mz;
107 rte_spinlock_lock(&mlx5_shared_data_lock);
108 if (mlx5_shared_data == NULL) {
109 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
110 /* Allocate shared memory. */
111 mz = rte_memzone_reserve(MZ_MLX5_PMD_SHARED_DATA,
112 sizeof(*mlx5_shared_data),
115 /* Lookup allocated shared memory. */
116 mz = rte_memzone_lookup(MZ_MLX5_PMD_SHARED_DATA);
119 rte_panic("Cannot allocate mlx5 shared data\n");
120 mlx5_shared_data = mz->addr;
121 /* Initialize shared data. */
122 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
123 LIST_INIT(&mlx5_shared_data->mem_event_cb_list);
124 rte_rwlock_init(&mlx5_shared_data->mem_event_rwlock);
127 rte_spinlock_unlock(&mlx5_shared_data_lock);
131 * Retrieve integer value from environment variable.
134 * Environment variable name.
137 * Integer value, 0 if the variable is not set.
140 mlx5_getenv_int(const char *name)
142 const char *val = getenv(name);
150 * Verbs callback to allocate a memory. This function should allocate the space
151 * according to the size provided residing inside a huge page.
152 * Please note that all allocation must respect the alignment from libmlx5
153 * (i.e. currently sysconf(_SC_PAGESIZE)).
156 * The size in bytes of the memory to allocate.
158 * A pointer to the callback data.
161 * Allocated buffer, NULL otherwise and rte_errno is set.
164 mlx5_alloc_verbs_buf(size_t size, void *data)
166 struct priv *priv = data;
168 size_t alignment = sysconf(_SC_PAGESIZE);
169 unsigned int socket = SOCKET_ID_ANY;
171 if (priv->verbs_alloc_ctx.type == MLX5_VERBS_ALLOC_TYPE_TX_QUEUE) {
172 const struct mlx5_txq_ctrl *ctrl = priv->verbs_alloc_ctx.obj;
174 socket = ctrl->socket;
175 } else if (priv->verbs_alloc_ctx.type ==
176 MLX5_VERBS_ALLOC_TYPE_RX_QUEUE) {
177 const struct mlx5_rxq_ctrl *ctrl = priv->verbs_alloc_ctx.obj;
179 socket = ctrl->socket;
181 assert(data != NULL);
182 ret = rte_malloc_socket(__func__, size, alignment, socket);
189 * Verbs callback to free a memory.
192 * A pointer to the memory to free.
194 * A pointer to the callback data.
197 mlx5_free_verbs_buf(void *ptr, void *data __rte_unused)
199 assert(data != NULL);
204 * DPDK callback to close the device.
206 * Destroy all queues and objects, free memory.
209 * Pointer to Ethernet device structure.
212 mlx5_dev_close(struct rte_eth_dev *dev)
214 struct priv *priv = dev->data->dev_private;
218 DRV_LOG(DEBUG, "port %u closing device \"%s\"",
220 ((priv->ctx != NULL) ? priv->ctx->device->name : ""));
221 /* In case mlx5_dev_stop() has not been called. */
222 mlx5_dev_interrupt_handler_uninstall(dev);
223 mlx5_traffic_disable(dev);
224 /* Prevent crashes when queues are still in use. */
225 dev->rx_pkt_burst = removed_rx_burst;
226 dev->tx_pkt_burst = removed_tx_burst;
227 if (priv->rxqs != NULL) {
228 /* XXX race condition if mlx5_rx_burst() is still running. */
230 for (i = 0; (i != priv->rxqs_n); ++i)
231 mlx5_rxq_release(dev, i);
235 if (priv->txqs != NULL) {
236 /* XXX race condition if mlx5_tx_burst() is still running. */
238 for (i = 0; (i != priv->txqs_n); ++i)
239 mlx5_txq_release(dev, i);
243 mlx5_flow_delete_drop_queue(dev);
244 mlx5_mr_release(dev);
245 if (priv->pd != NULL) {
246 assert(priv->ctx != NULL);
247 claim_zero(mlx5_glue->dealloc_pd(priv->pd));
248 claim_zero(mlx5_glue->close_device(priv->ctx));
250 assert(priv->ctx == NULL);
251 if (priv->rss_conf.rss_key != NULL)
252 rte_free(priv->rss_conf.rss_key);
253 if (priv->reta_idx != NULL)
254 rte_free(priv->reta_idx);
255 if (priv->primary_socket)
256 mlx5_socket_uninit(dev);
258 mlx5_nl_mac_addr_flush(dev);
259 if (priv->nl_socket >= 0)
260 close(priv->nl_socket);
261 ret = mlx5_hrxq_ibv_verify(dev);
263 DRV_LOG(WARNING, "port %u some hash Rx queue still remain",
265 ret = mlx5_ind_table_ibv_verify(dev);
267 DRV_LOG(WARNING, "port %u some indirection table still remain",
269 ret = mlx5_rxq_ibv_verify(dev);
271 DRV_LOG(WARNING, "port %u some Verbs Rx queue still remain",
273 ret = mlx5_rxq_verify(dev);
275 DRV_LOG(WARNING, "port %u some Rx queues still remain",
277 ret = mlx5_txq_ibv_verify(dev);
279 DRV_LOG(WARNING, "port %u some Verbs Tx queue still remain",
281 ret = mlx5_txq_verify(dev);
283 DRV_LOG(WARNING, "port %u some Tx queues still remain",
285 ret = mlx5_flow_verify(dev);
287 DRV_LOG(WARNING, "port %u some flows still remain",
289 memset(priv, 0, sizeof(*priv));
292 const struct eth_dev_ops mlx5_dev_ops = {
293 .dev_configure = mlx5_dev_configure,
294 .dev_start = mlx5_dev_start,
295 .dev_stop = mlx5_dev_stop,
296 .dev_set_link_down = mlx5_set_link_down,
297 .dev_set_link_up = mlx5_set_link_up,
298 .dev_close = mlx5_dev_close,
299 .promiscuous_enable = mlx5_promiscuous_enable,
300 .promiscuous_disable = mlx5_promiscuous_disable,
301 .allmulticast_enable = mlx5_allmulticast_enable,
302 .allmulticast_disable = mlx5_allmulticast_disable,
303 .link_update = mlx5_link_update,
304 .stats_get = mlx5_stats_get,
305 .stats_reset = mlx5_stats_reset,
306 .xstats_get = mlx5_xstats_get,
307 .xstats_reset = mlx5_xstats_reset,
308 .xstats_get_names = mlx5_xstats_get_names,
309 .dev_infos_get = mlx5_dev_infos_get,
310 .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
311 .vlan_filter_set = mlx5_vlan_filter_set,
312 .rx_queue_setup = mlx5_rx_queue_setup,
313 .tx_queue_setup = mlx5_tx_queue_setup,
314 .rx_queue_release = mlx5_rx_queue_release,
315 .tx_queue_release = mlx5_tx_queue_release,
316 .flow_ctrl_get = mlx5_dev_get_flow_ctrl,
317 .flow_ctrl_set = mlx5_dev_set_flow_ctrl,
318 .mac_addr_remove = mlx5_mac_addr_remove,
319 .mac_addr_add = mlx5_mac_addr_add,
320 .mac_addr_set = mlx5_mac_addr_set,
321 .set_mc_addr_list = mlx5_set_mc_addr_list,
322 .mtu_set = mlx5_dev_set_mtu,
323 .vlan_strip_queue_set = mlx5_vlan_strip_queue_set,
324 .vlan_offload_set = mlx5_vlan_offload_set,
325 .reta_update = mlx5_dev_rss_reta_update,
326 .reta_query = mlx5_dev_rss_reta_query,
327 .rss_hash_update = mlx5_rss_hash_update,
328 .rss_hash_conf_get = mlx5_rss_hash_conf_get,
329 .filter_ctrl = mlx5_dev_filter_ctrl,
330 .rx_descriptor_status = mlx5_rx_descriptor_status,
331 .tx_descriptor_status = mlx5_tx_descriptor_status,
332 .rx_queue_intr_enable = mlx5_rx_intr_enable,
333 .rx_queue_intr_disable = mlx5_rx_intr_disable,
334 .is_removed = mlx5_is_removed,
337 static const struct eth_dev_ops mlx5_dev_sec_ops = {
338 .stats_get = mlx5_stats_get,
339 .stats_reset = mlx5_stats_reset,
340 .xstats_get = mlx5_xstats_get,
341 .xstats_reset = mlx5_xstats_reset,
342 .xstats_get_names = mlx5_xstats_get_names,
343 .dev_infos_get = mlx5_dev_infos_get,
344 .rx_descriptor_status = mlx5_rx_descriptor_status,
345 .tx_descriptor_status = mlx5_tx_descriptor_status,
348 /* Available operators in flow isolated mode. */
349 const struct eth_dev_ops mlx5_dev_ops_isolate = {
350 .dev_configure = mlx5_dev_configure,
351 .dev_start = mlx5_dev_start,
352 .dev_stop = mlx5_dev_stop,
353 .dev_set_link_down = mlx5_set_link_down,
354 .dev_set_link_up = mlx5_set_link_up,
355 .dev_close = mlx5_dev_close,
356 .link_update = mlx5_link_update,
357 .stats_get = mlx5_stats_get,
358 .stats_reset = mlx5_stats_reset,
359 .xstats_get = mlx5_xstats_get,
360 .xstats_reset = mlx5_xstats_reset,
361 .xstats_get_names = mlx5_xstats_get_names,
362 .dev_infos_get = mlx5_dev_infos_get,
363 .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
364 .vlan_filter_set = mlx5_vlan_filter_set,
365 .rx_queue_setup = mlx5_rx_queue_setup,
366 .tx_queue_setup = mlx5_tx_queue_setup,
367 .rx_queue_release = mlx5_rx_queue_release,
368 .tx_queue_release = mlx5_tx_queue_release,
369 .flow_ctrl_get = mlx5_dev_get_flow_ctrl,
370 .flow_ctrl_set = mlx5_dev_set_flow_ctrl,
371 .mac_addr_remove = mlx5_mac_addr_remove,
372 .mac_addr_add = mlx5_mac_addr_add,
373 .mac_addr_set = mlx5_mac_addr_set,
374 .set_mc_addr_list = mlx5_set_mc_addr_list,
375 .mtu_set = mlx5_dev_set_mtu,
376 .vlan_strip_queue_set = mlx5_vlan_strip_queue_set,
377 .vlan_offload_set = mlx5_vlan_offload_set,
378 .filter_ctrl = mlx5_dev_filter_ctrl,
379 .rx_descriptor_status = mlx5_rx_descriptor_status,
380 .tx_descriptor_status = mlx5_tx_descriptor_status,
381 .rx_queue_intr_enable = mlx5_rx_intr_enable,
382 .rx_queue_intr_disable = mlx5_rx_intr_disable,
383 .is_removed = mlx5_is_removed,
387 struct rte_pci_addr pci_addr; /* associated PCI address */
388 uint32_t ports; /* physical ports bitfield. */
392 * Get device index in mlx5_dev[] from PCI bus address.
394 * @param[in] pci_addr
395 * PCI bus address to look for.
398 * mlx5_dev[] index on success, -1 on failure.
401 mlx5_dev_idx(struct rte_pci_addr *pci_addr)
406 assert(pci_addr != NULL);
407 for (i = 0; (i != RTE_DIM(mlx5_dev)); ++i) {
408 if ((mlx5_dev[i].pci_addr.domain == pci_addr->domain) &&
409 (mlx5_dev[i].pci_addr.bus == pci_addr->bus) &&
410 (mlx5_dev[i].pci_addr.devid == pci_addr->devid) &&
411 (mlx5_dev[i].pci_addr.function == pci_addr->function))
413 if ((mlx5_dev[i].ports == 0) && (ret == -1))
420 * Verify and store value for device argument.
423 * Key argument to verify.
425 * Value associated with key.
430 * 0 on success, a negative errno value otherwise and rte_errno is set.
433 mlx5_args_check(const char *key, const char *val, void *opaque)
435 struct mlx5_dev_config *config = opaque;
439 tmp = strtoul(val, NULL, 0);
442 DRV_LOG(WARNING, "%s: \"%s\" is not a valid integer", key, val);
445 if (strcmp(MLX5_RXQ_CQE_COMP_EN, key) == 0) {
446 config->cqe_comp = !!tmp;
447 } else if (strcmp(MLX5_TXQ_INLINE, key) == 0) {
448 config->txq_inline = tmp;
449 } else if (strcmp(MLX5_TXQS_MIN_INLINE, key) == 0) {
450 config->txqs_inline = tmp;
451 } else if (strcmp(MLX5_TXQ_MPW_EN, key) == 0) {
452 config->mps = !!tmp ? config->mps : 0;
453 } else if (strcmp(MLX5_TXQ_MPW_HDR_DSEG_EN, key) == 0) {
454 config->mpw_hdr_dseg = !!tmp;
455 } else if (strcmp(MLX5_TXQ_MAX_INLINE_LEN, key) == 0) {
456 config->inline_max_packet_sz = tmp;
457 } else if (strcmp(MLX5_TX_VEC_EN, key) == 0) {
458 config->tx_vec_en = !!tmp;
459 } else if (strcmp(MLX5_RX_VEC_EN, key) == 0) {
460 config->rx_vec_en = !!tmp;
461 } else if (strcmp(MLX5_L3_VXLAN_EN, key) == 0) {
462 config->l3_vxlan_en = !!tmp;
463 } else if (strcmp(MLX5_VF_NL_EN, key) == 0) {
464 config->vf_nl_en = !!tmp;
466 DRV_LOG(WARNING, "%s: unknown parameter", key);
474 * Parse device parameters.
477 * Pointer to device configuration structure.
479 * Device arguments structure.
482 * 0 on success, a negative errno value otherwise and rte_errno is set.
485 mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs)
487 const char **params = (const char *[]){
488 MLX5_RXQ_CQE_COMP_EN,
490 MLX5_TXQS_MIN_INLINE,
492 MLX5_TXQ_MPW_HDR_DSEG_EN,
493 MLX5_TXQ_MAX_INLINE_LEN,
500 struct rte_kvargs *kvlist;
506 /* Following UGLY cast is done to pass checkpatch. */
507 kvlist = rte_kvargs_parse(devargs->args, params);
510 /* Process parameters. */
511 for (i = 0; (params[i] != NULL); ++i) {
512 if (rte_kvargs_count(kvlist, params[i])) {
513 ret = rte_kvargs_process(kvlist, params[i],
514 mlx5_args_check, config);
517 rte_kvargs_free(kvlist);
522 rte_kvargs_free(kvlist);
526 static struct rte_pci_driver mlx5_driver;
529 * Reserved UAR address space for TXQ UAR(hw doorbell) mapping, process
530 * local resource used by both primary and secondary to avoid duplicate
532 * The space has to be available on both primary and secondary process,
533 * TXQ UAR maps to this area using fixed mmap w/o double check.
535 static void *uar_base;
538 find_lower_va_bound(const struct rte_memseg_list *msl __rte_unused,
539 const struct rte_memseg *ms, void *arg)
546 *addr = RTE_MIN(*addr, ms->addr);
552 * Reserve UAR address space for primary process.
555 * Pointer to Ethernet device.
558 * 0 on success, a negative errno value otherwise and rte_errno is set.
561 mlx5_uar_init_primary(struct rte_eth_dev *dev)
563 struct priv *priv = dev->data->dev_private;
564 void *addr = (void *)0;
566 if (uar_base) { /* UAR address space mapped. */
567 priv->uar_base = uar_base;
570 /* find out lower bound of hugepage segments */
571 rte_memseg_walk(find_lower_va_bound, &addr);
573 /* keep distance to hugepages to minimize potential conflicts. */
574 addr = RTE_PTR_SUB(addr, MLX5_UAR_OFFSET + MLX5_UAR_SIZE);
575 /* anonymous mmap, no real memory consumption. */
576 addr = mmap(addr, MLX5_UAR_SIZE,
577 PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
578 if (addr == MAP_FAILED) {
580 "port %u failed to reserve UAR address space, please"
581 " adjust MLX5_UAR_SIZE or try --base-virtaddr",
586 /* Accept either same addr or a new addr returned from mmap if target
589 DRV_LOG(INFO, "port %u reserved UAR address space: %p",
590 dev->data->port_id, addr);
591 priv->uar_base = addr; /* for primary and secondary UAR re-mmap. */
592 uar_base = addr; /* process local, don't reserve again. */
597 * Reserve UAR address space for secondary process, align with
601 * Pointer to Ethernet device.
604 * 0 on success, a negative errno value otherwise and rte_errno is set.
607 mlx5_uar_init_secondary(struct rte_eth_dev *dev)
609 struct priv *priv = dev->data->dev_private;
612 assert(priv->uar_base);
613 if (uar_base) { /* already reserved. */
614 assert(uar_base == priv->uar_base);
617 /* anonymous mmap, no real memory consumption. */
618 addr = mmap(priv->uar_base, MLX5_UAR_SIZE,
619 PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
620 if (addr == MAP_FAILED) {
621 DRV_LOG(ERR, "port %u UAR mmap failed: %p size: %llu",
622 dev->data->port_id, priv->uar_base, MLX5_UAR_SIZE);
626 if (priv->uar_base != addr) {
628 "port %u UAR address %p size %llu occupied, please"
629 " adjust MLX5_UAR_OFFSET or try EAL parameter"
631 dev->data->port_id, priv->uar_base, MLX5_UAR_SIZE);
635 uar_base = addr; /* process local, don't reserve again */
636 DRV_LOG(INFO, "port %u reserved UAR address space: %p",
637 dev->data->port_id, addr);
642 * DPDK callback to register a PCI device.
644 * This function creates an Ethernet device for each port of a given
648 * PCI driver structure (mlx5_driver).
650 * PCI device information.
653 * 0 on success, a negative errno value otherwise and rte_errno is set.
656 mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
657 struct rte_pci_device *pci_dev)
659 struct ibv_device **list = NULL;
660 struct ibv_device *ibv_dev;
662 struct ibv_context *attr_ctx = NULL;
663 struct ibv_device_attr_ex device_attr;
666 unsigned int cqe_comp;
667 unsigned int tunnel_en = 0;
668 unsigned int swp = 0;
669 unsigned int verb_priorities = 0;
672 struct mlx5dv_context attrs_out = {0};
673 #ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
674 struct ibv_counter_set_description cs_desc;
677 /* Prepare shared data between primary and secondary process. */
678 mlx5_prepare_shared_data();
679 assert(pci_drv == &mlx5_driver);
680 /* Get mlx5_dev[] index. */
681 idx = mlx5_dev_idx(&pci_dev->addr);
683 DRV_LOG(ERR, "this driver cannot support any more adapters");
687 DRV_LOG(DEBUG, "using driver device index %d", idx);
688 /* Save PCI address. */
689 mlx5_dev[idx].pci_addr = pci_dev->addr;
690 list = mlx5_glue->get_device_list(&i);
696 "cannot list devices, is ib_uverbs loaded?");
701 * For each listed device, check related sysfs entry against
702 * the provided PCI ID.
705 struct rte_pci_addr pci_addr;
708 DRV_LOG(DEBUG, "checking device \"%s\"", list[i]->name);
709 if (mlx5_ibv_device_to_pci_addr(list[i], &pci_addr))
711 if ((pci_dev->addr.domain != pci_addr.domain) ||
712 (pci_dev->addr.bus != pci_addr.bus) ||
713 (pci_dev->addr.devid != pci_addr.devid) ||
714 (pci_dev->addr.function != pci_addr.function))
716 DRV_LOG(INFO, "PCI information matches, using device \"%s\"",
718 vf = ((pci_dev->id.device_id ==
719 PCI_DEVICE_ID_MELLANOX_CONNECTX4VF) ||
720 (pci_dev->id.device_id ==
721 PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF) ||
722 (pci_dev->id.device_id ==
723 PCI_DEVICE_ID_MELLANOX_CONNECTX5VF) ||
724 (pci_dev->id.device_id ==
725 PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF));
726 attr_ctx = mlx5_glue->open_device(list[i]);
731 if (attr_ctx == NULL) {
735 "cannot access device, is mlx5_ib loaded?");
740 "cannot use device, are drivers up to date?");
746 DRV_LOG(DEBUG, "device opened");
747 #ifdef HAVE_IBV_MLX5_MOD_SWP
748 attrs_out.comp_mask |= MLX5DV_CONTEXT_MASK_SWP;
751 * Multi-packet send is supported by ConnectX-4 Lx PF as well
752 * as all ConnectX-5 devices.
754 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
755 attrs_out.comp_mask |= MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS;
757 mlx5_glue->dv_query_device(attr_ctx, &attrs_out);
758 if (attrs_out.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) {
759 if (attrs_out.flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) {
760 DRV_LOG(DEBUG, "enhanced MPW is supported");
761 mps = MLX5_MPW_ENHANCED;
763 DRV_LOG(DEBUG, "MPW is supported");
767 DRV_LOG(DEBUG, "MPW isn't supported");
768 mps = MLX5_MPW_DISABLED;
770 #ifdef HAVE_IBV_MLX5_MOD_SWP
771 if (attrs_out.comp_mask & MLX5DV_CONTEXT_MASK_SWP)
772 swp = attrs_out.sw_parsing_caps.sw_parsing_offloads;
773 DRV_LOG(DEBUG, "SWP support: %u", swp);
775 if (RTE_CACHE_LINE_SIZE == 128 &&
776 !(attrs_out.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP))
780 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
781 if (attrs_out.comp_mask & MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS) {
782 tunnel_en = ((attrs_out.tunnel_offloads_caps &
783 MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN) &&
784 (attrs_out.tunnel_offloads_caps &
785 MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE));
787 DRV_LOG(DEBUG, "tunnel offloading is %ssupported",
788 tunnel_en ? "" : "not ");
791 "tunnel offloading disabled due to old OFED/rdma-core version");
793 err = mlx5_glue->query_device_ex(attr_ctx, NULL, &device_attr);
795 DEBUG("ibv_query_device_ex() failed");
798 DRV_LOG(INFO, "%u port(s) detected",
799 device_attr.orig_attr.phys_port_cnt);
800 for (i = 0; i < device_attr.orig_attr.phys_port_cnt; i++) {
801 char name[RTE_ETH_NAME_MAX_LEN];
803 uint32_t port = i + 1; /* ports are indexed from one */
804 uint32_t test = (1 << i);
805 struct ibv_context *ctx = NULL;
806 struct ibv_port_attr port_attr;
807 struct ibv_pd *pd = NULL;
808 struct priv *priv = NULL;
809 struct rte_eth_dev *eth_dev = NULL;
810 struct ibv_device_attr_ex device_attr_ex;
811 struct ether_addr mac;
812 struct mlx5_dev_config config = {
813 .cqe_comp = cqe_comp,
815 .tunnel_en = tunnel_en,
819 .txq_inline = MLX5_ARG_UNSET,
820 .txqs_inline = MLX5_ARG_UNSET,
821 .inline_max_packet_sz = MLX5_ARG_UNSET,
826 len = snprintf(name, sizeof(name), PCI_PRI_FMT,
827 pci_dev->addr.domain, pci_dev->addr.bus,
828 pci_dev->addr.devid, pci_dev->addr.function);
829 if (device_attr.orig_attr.phys_port_cnt > 1)
830 snprintf(name + len, sizeof(name), " port %u", i);
831 mlx5_dev[idx].ports |= test;
832 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
833 eth_dev = rte_eth_dev_attach_secondary(name);
834 if (eth_dev == NULL) {
835 DRV_LOG(ERR, "can not attach rte ethdev");
840 eth_dev->device = &pci_dev->device;
841 eth_dev->dev_ops = &mlx5_dev_sec_ops;
842 err = mlx5_uar_init_secondary(eth_dev);
847 /* Receive command fd from primary process */
848 err = mlx5_socket_connect(eth_dev);
853 /* Remap UAR for Tx queues. */
854 err = mlx5_tx_uar_remap(eth_dev, err);
860 * Ethdev pointer is still required as input since
861 * the primary device is not accessible from the
864 eth_dev->rx_pkt_burst =
865 mlx5_select_rx_function(eth_dev);
866 eth_dev->tx_pkt_burst =
867 mlx5_select_tx_function(eth_dev);
870 DRV_LOG(DEBUG, "using port %u (%08" PRIx32 ")", port, test);
871 ctx = mlx5_glue->open_device(ibv_dev);
876 /* Check port status. */
877 err = mlx5_glue->query_port(ctx, port, &port_attr);
879 DRV_LOG(ERR, "port query failed: %s", strerror(err));
882 if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) {
884 "port %d is not configured in Ethernet mode",
889 if (port_attr.state != IBV_PORT_ACTIVE)
890 DRV_LOG(DEBUG, "port %d is not active: \"%s\" (%d)",
892 mlx5_glue->port_state_str(port_attr.state),
894 /* Allocate protection domain. */
895 pd = mlx5_glue->alloc_pd(ctx);
897 DRV_LOG(ERR, "PD allocation failure");
901 mlx5_dev[idx].ports |= test;
902 /* from rte_ethdev.c */
903 priv = rte_zmalloc("ethdev private structure",
905 RTE_CACHE_LINE_SIZE);
907 DRV_LOG(ERR, "priv allocation failure");
912 strncpy(priv->ibdev_path, priv->ctx->device->ibdev_path,
913 sizeof(priv->ibdev_path));
914 priv->device_attr = device_attr;
917 priv->mtu = ETHER_MTU;
918 err = mlx5_args(&config, pci_dev->device.devargs);
920 DRV_LOG(ERR, "failed to process device arguments: %s",
925 err = mlx5_glue->query_device_ex(ctx, NULL, &device_attr_ex);
927 DRV_LOG(ERR, "ibv_query_device_ex() failed");
930 config.hw_csum = !!(device_attr_ex.device_cap_flags_ex &
931 IBV_DEVICE_RAW_IP_CSUM);
932 DRV_LOG(DEBUG, "checksum offloading is %ssupported",
933 (config.hw_csum ? "" : "not "));
934 #ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
935 config.flow_counter_en = !!(device_attr.max_counter_sets);
936 mlx5_glue->describe_counter_set(ctx, 0, &cs_desc);
938 "counter type = %d, num of cs = %ld, attributes = %d",
939 cs_desc.counter_type, cs_desc.num_of_cs,
942 config.ind_table_max_size =
943 device_attr_ex.rss_caps.max_rwq_indirection_table_size;
944 /* Remove this check once DPDK supports larger/variable
945 * indirection tables. */
946 if (config.ind_table_max_size >
947 (unsigned int)ETH_RSS_RETA_SIZE_512)
948 config.ind_table_max_size = ETH_RSS_RETA_SIZE_512;
949 DRV_LOG(DEBUG, "maximum Rx indirection table size is %u",
950 config.ind_table_max_size);
951 config.hw_vlan_strip = !!(device_attr_ex.raw_packet_caps &
952 IBV_RAW_PACKET_CAP_CVLAN_STRIPPING);
953 DRV_LOG(DEBUG, "VLAN stripping is %ssupported",
954 (config.hw_vlan_strip ? "" : "not "));
956 config.hw_fcs_strip = !!(device_attr_ex.raw_packet_caps &
957 IBV_RAW_PACKET_CAP_SCATTER_FCS);
958 DRV_LOG(DEBUG, "FCS stripping configuration is %ssupported",
959 (config.hw_fcs_strip ? "" : "not "));
961 #ifdef HAVE_IBV_WQ_FLAG_RX_END_PADDING
962 config.hw_padding = !!device_attr_ex.rx_pad_end_addr_align;
965 "hardware Rx end alignment padding is %ssupported",
966 (config.hw_padding ? "" : "not "));
968 config.tso = ((device_attr_ex.tso_caps.max_tso > 0) &&
969 (device_attr_ex.tso_caps.supported_qpts &
970 (1 << IBV_QPT_RAW_PACKET)));
972 config.tso_max_payload_sz =
973 device_attr_ex.tso_caps.max_tso;
974 if (config.mps && !mps) {
976 "multi-packet send not supported on this device"
977 " (" MLX5_TXQ_MPW_EN ")");
981 DRV_LOG(INFO, "%s MPS is %s",
982 config.mps == MLX5_MPW_ENHANCED ? "enhanced " : "",
983 config.mps != MLX5_MPW_DISABLED ? "enabled" :
985 if (config.cqe_comp && !cqe_comp) {
986 DRV_LOG(WARNING, "Rx CQE compression isn't supported");
989 eth_dev = rte_eth_dev_allocate(name);
990 if (eth_dev == NULL) {
991 DRV_LOG(ERR, "can not allocate rte ethdev");
995 eth_dev->data->dev_private = priv;
996 priv->dev_data = eth_dev->data;
997 eth_dev->data->mac_addrs = priv->mac;
998 eth_dev->device = &pci_dev->device;
999 rte_eth_copy_pci_info(eth_dev, pci_dev);
1000 eth_dev->device->driver = &mlx5_driver.driver;
1001 err = mlx5_uar_init_primary(eth_dev);
1006 /* Configure the first MAC address by default. */
1007 if (mlx5_get_mac(eth_dev, &mac.addr_bytes)) {
1009 "port %u cannot get MAC address, is mlx5_en"
1010 " loaded? (errno: %s)",
1011 eth_dev->data->port_id, strerror(errno));
1016 "port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x",
1017 eth_dev->data->port_id,
1018 mac.addr_bytes[0], mac.addr_bytes[1],
1019 mac.addr_bytes[2], mac.addr_bytes[3],
1020 mac.addr_bytes[4], mac.addr_bytes[5]);
1023 char ifname[IF_NAMESIZE];
1025 if (mlx5_get_ifname(eth_dev, &ifname) == 0)
1026 DRV_LOG(DEBUG, "port %u ifname is \"%s\"",
1027 eth_dev->data->port_id, ifname);
1029 DRV_LOG(DEBUG, "port %u ifname is unknown",
1030 eth_dev->data->port_id);
1033 /* Get actual MTU if possible. */
1034 err = mlx5_get_mtu(eth_dev, &priv->mtu);
1039 DRV_LOG(DEBUG, "port %u MTU is %u", eth_dev->data->port_id,
1042 * Initialize burst functions to prevent crashes before link-up.
1044 eth_dev->rx_pkt_burst = removed_rx_burst;
1045 eth_dev->tx_pkt_burst = removed_tx_burst;
1046 eth_dev->dev_ops = &mlx5_dev_ops;
1047 /* Register MAC address. */
1048 claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0));
1049 priv->nl_socket = -1;
1051 if (vf && config.vf_nl_en) {
1052 priv->nl_socket = mlx5_nl_init(RTMGRP_LINK);
1053 if (priv->nl_socket < 0)
1054 priv->nl_socket = -1;
1055 mlx5_nl_mac_addr_sync(eth_dev);
1057 TAILQ_INIT(&priv->flows);
1058 TAILQ_INIT(&priv->ctrl_flows);
1059 /* Hint libmlx5 to use PMD allocator for data plane resources */
1060 struct mlx5dv_ctx_allocators alctr = {
1061 .alloc = &mlx5_alloc_verbs_buf,
1062 .free = &mlx5_free_verbs_buf,
1065 mlx5_glue->dv_set_context_attr(ctx,
1066 MLX5DV_CTX_ATTR_BUF_ALLOCATORS,
1067 (void *)((uintptr_t)&alctr));
1068 /* Bring Ethernet device up. */
1069 DRV_LOG(DEBUG, "port %u forcing Ethernet interface up",
1070 eth_dev->data->port_id);
1071 mlx5_set_link_up(eth_dev);
1073 * Even though the interrupt handler is not installed yet,
1074 * interrupts will still trigger on the asyn_fd from
1075 * Verbs context returned by ibv_open_device().
1077 mlx5_link_update(eth_dev, 0);
1078 /* Store device configuration on private structure. */
1079 priv->config = config;
1080 /* Create drop queue. */
1081 err = mlx5_flow_create_drop_queue(eth_dev);
1083 DRV_LOG(ERR, "port %u drop queue allocation failed: %s",
1084 eth_dev->data->port_id, strerror(rte_errno));
1088 /* Supported Verbs flow priority number detection. */
1089 if (verb_priorities == 0)
1090 verb_priorities = mlx5_get_max_verbs_prio(eth_dev);
1091 if (verb_priorities < MLX5_VERBS_FLOW_PRIO_8) {
1092 DRV_LOG(ERR, "port %u wrong Verbs flow priorities: %u",
1093 eth_dev->data->port_id, verb_priorities);
1096 priv->config.max_verbs_prio = verb_priorities;
1102 claim_zero(mlx5_glue->dealloc_pd(pd));
1104 claim_zero(mlx5_glue->close_device(ctx));
1105 if (eth_dev && rte_eal_process_type() == RTE_PROC_PRIMARY)
1106 rte_eth_dev_release_port(eth_dev);
1110 * XXX if something went wrong in the loop above, there is a resource
1111 * leak (ctx, pd, priv, dpdk ethdev) but we can do nothing about it as
1112 * long as the dpdk does not provide a way to deallocate a ethdev and a
1113 * way to enumerate the registered ethdevs to free the previous ones.
1115 /* no port found, complain */
1116 if (!mlx5_dev[idx].ports) {
1122 claim_zero(mlx5_glue->close_device(attr_ctx));
1124 mlx5_glue->free_device_list(list);
1132 static const struct rte_pci_id mlx5_pci_id_map[] = {
1134 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
1135 PCI_DEVICE_ID_MELLANOX_CONNECTX4)
1138 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
1139 PCI_DEVICE_ID_MELLANOX_CONNECTX4VF)
1142 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
1143 PCI_DEVICE_ID_MELLANOX_CONNECTX4LX)
1146 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
1147 PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF)
1150 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
1151 PCI_DEVICE_ID_MELLANOX_CONNECTX5)
1154 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
1155 PCI_DEVICE_ID_MELLANOX_CONNECTX5VF)
1158 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
1159 PCI_DEVICE_ID_MELLANOX_CONNECTX5EX)
1162 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
1163 PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF)
1170 static struct rte_pci_driver mlx5_driver = {
1172 .name = MLX5_DRIVER_NAME
1174 .id_table = mlx5_pci_id_map,
1175 .probe = mlx5_pci_probe,
1176 .drv_flags = RTE_PCI_DRV_INTR_LSC | RTE_PCI_DRV_INTR_RMV,
1179 #ifdef RTE_LIBRTE_MLX5_DLOPEN_DEPS
1182 * Suffix RTE_EAL_PMD_PATH with "-glue".
1184 * This function performs a sanity check on RTE_EAL_PMD_PATH before
1185 * suffixing its last component.
1188 * Output buffer, should be large enough otherwise NULL is returned.
1193 * Pointer to @p buf or @p NULL in case suffix cannot be appended.
1196 mlx5_glue_path(char *buf, size_t size)
1198 static const char *const bad[] = { "/", ".", "..", NULL };
1199 const char *path = RTE_EAL_PMD_PATH;
1200 size_t len = strlen(path);
1204 while (len && path[len - 1] == '/')
1206 for (off = len; off && path[off - 1] != '/'; --off)
1208 for (i = 0; bad[i]; ++i)
1209 if (!strncmp(path + off, bad[i], (int)(len - off)))
1211 i = snprintf(buf, size, "%.*s-glue", (int)len, path);
1212 if (i == -1 || (size_t)i >= size)
1217 "unable to append \"-glue\" to last component of"
1218 " RTE_EAL_PMD_PATH (\"" RTE_EAL_PMD_PATH "\"),"
1219 " please re-configure DPDK");
1224 * Initialization routine for run-time dependency on rdma-core.
1227 mlx5_glue_init(void)
1229 char glue_path[sizeof(RTE_EAL_PMD_PATH) - 1 + sizeof("-glue")];
1230 const char *path[] = {
1232 * A basic security check is necessary before trusting
1233 * MLX5_GLUE_PATH, which may override RTE_EAL_PMD_PATH.
1235 (geteuid() == getuid() && getegid() == getgid() ?
1236 getenv("MLX5_GLUE_PATH") : NULL),
1238 * When RTE_EAL_PMD_PATH is set, use its glue-suffixed
1239 * variant, otherwise let dlopen() look up libraries on its
1242 (*RTE_EAL_PMD_PATH ?
1243 mlx5_glue_path(glue_path, sizeof(glue_path)) : ""),
1246 void *handle = NULL;
1250 while (!handle && i != RTE_DIM(path)) {
1259 end = strpbrk(path[i], ":;");
1261 end = path[i] + strlen(path[i]);
1262 len = end - path[i];
1267 ret = snprintf(name, sizeof(name), "%.*s%s" MLX5_GLUE,
1269 (!len || *(end - 1) == '/') ? "" : "/");
1272 if (sizeof(name) != (size_t)ret + 1)
1274 DRV_LOG(DEBUG, "looking for rdma-core glue as \"%s\"",
1276 handle = dlopen(name, RTLD_LAZY);
1287 DRV_LOG(WARNING, "cannot load glue library: %s", dlmsg);
1290 sym = dlsym(handle, "mlx5_glue");
1291 if (!sym || !*sym) {
1295 DRV_LOG(ERR, "cannot resolve glue symbol: %s", dlmsg);
1304 "cannot initialize PMD due to missing run-time dependency on"
1305 " rdma-core libraries (libibverbs, libmlx5)");
1312 * Driver initialization routine.
1314 RTE_INIT(rte_mlx5_pmd_init);
1316 rte_mlx5_pmd_init(void)
1318 /* Build the static tables for Verbs conversion. */
1319 mlx5_set_ptype_table();
1320 mlx5_set_cksum_table();
1321 mlx5_set_swp_types_table();
1323 * RDMAV_HUGEPAGES_SAFE tells ibv_fork_init() we intend to use
1324 * huge pages. Calling ibv_fork_init() during init allows
1325 * applications to use fork() safely for purposes other than
1326 * using this PMD, which is not supported in forked processes.
1328 setenv("RDMAV_HUGEPAGES_SAFE", "1", 1);
1329 /* Match the size of Rx completion entry to the size of a cacheline. */
1330 if (RTE_CACHE_LINE_SIZE == 128)
1331 setenv("MLX5_CQE_SIZE", "128", 0);
1332 #ifdef RTE_LIBRTE_MLX5_DLOPEN_DEPS
1333 if (mlx5_glue_init())
1338 /* Glue structure must not contain any NULL pointers. */
1342 for (i = 0; i != sizeof(*mlx5_glue) / sizeof(void *); ++i)
1343 assert(((const void *const *)mlx5_glue)[i]);
1346 if (strcmp(mlx5_glue->version, MLX5_GLUE_VERSION)) {
1348 "rdma-core glue \"%s\" mismatch: \"%s\" is required",
1349 mlx5_glue->version, MLX5_GLUE_VERSION);
1352 mlx5_glue->fork_init();
1353 rte_pci_register(&mlx5_driver);
1354 rte_mem_event_callback_register("MLX5_MEM_EVENT_CB",
1355 mlx5_mr_mem_event_cb, NULL);
1358 RTE_PMD_EXPORT_NAME(net_mlx5, __COUNTER__);
1359 RTE_PMD_REGISTER_PCI_TABLE(net_mlx5, mlx5_pci_id_map);
1360 RTE_PMD_REGISTER_KMOD_DEP(net_mlx5, "* ib_uverbs & mlx5_core & mlx5_ib");
1362 /** Initialize driver log type. */
1363 RTE_INIT(vdev_netvsc_init_log)
1365 mlx5_logtype = rte_log_register("pmd.net.mlx5");
1366 if (mlx5_logtype >= 0)
1367 rte_log_set_level(mlx5_logtype, RTE_LOG_NOTICE);