1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox.
18 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
20 #pragma GCC diagnostic ignored "-Wpedantic"
22 #include <infiniband/verbs.h>
24 #pragma GCC diagnostic error "-Wpedantic"
27 #include <rte_malloc.h>
28 #include <rte_ethdev_driver.h>
29 #include <rte_ethdev_pci.h>
31 #include <rte_bus_pci.h>
32 #include <rte_common.h>
33 #include <rte_config.h>
34 #include <rte_eal_memconfig.h>
35 #include <rte_kvargs.h>
38 #include "mlx5_utils.h"
39 #include "mlx5_rxtx.h"
40 #include "mlx5_autoconf.h"
41 #include "mlx5_defs.h"
42 #include "mlx5_glue.h"
44 /* Device parameter to enable RX completion queue compression. */
45 #define MLX5_RXQ_CQE_COMP_EN "rxq_cqe_comp_en"
47 /* Device parameter to configure inline send. */
48 #define MLX5_TXQ_INLINE "txq_inline"
51 * Device parameter to configure the number of TX queues threshold for
52 * enabling inline send.
54 #define MLX5_TXQS_MIN_INLINE "txqs_min_inline"
56 /* Device parameter to enable multi-packet send WQEs. */
57 #define MLX5_TXQ_MPW_EN "txq_mpw_en"
59 /* Device parameter to include 2 dsegs in the title WQEBB. */
60 #define MLX5_TXQ_MPW_HDR_DSEG_EN "txq_mpw_hdr_dseg_en"
62 /* Device parameter to limit the size of inlining packet. */
63 #define MLX5_TXQ_MAX_INLINE_LEN "txq_max_inline_len"
65 /* Device parameter to enable hardware Tx vector. */
66 #define MLX5_TX_VEC_EN "tx_vec_en"
68 /* Device parameter to enable hardware Rx vector. */
69 #define MLX5_RX_VEC_EN "rx_vec_en"
71 #ifndef HAVE_IBV_MLX5_MOD_MPW
72 #define MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED (1 << 2)
73 #define MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW (1 << 3)
76 #ifndef HAVE_IBV_MLX5_MOD_CQE_128B_COMP
77 #define MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP (1 << 4)
81 * Retrieve integer value from environment variable.
84 * Environment variable name.
87 * Integer value, 0 if the variable is not set.
90 mlx5_getenv_int(const char *name)
92 const char *val = getenv(name);
100 * Verbs callback to allocate a memory. This function should allocate the space
101 * according to the size provided residing inside a huge page.
102 * Please note that all allocation must respect the alignment from libmlx5
103 * (i.e. currently sysconf(_SC_PAGESIZE)).
106 * The size in bytes of the memory to allocate.
108 * A pointer to the callback data.
111 * Allocated buffer, NULL otherwise and rte_errno is set.
114 mlx5_alloc_verbs_buf(size_t size, void *data)
116 struct priv *priv = data;
118 size_t alignment = sysconf(_SC_PAGESIZE);
119 unsigned int socket = SOCKET_ID_ANY;
121 if (priv->verbs_alloc_ctx.type == MLX5_VERBS_ALLOC_TYPE_TX_QUEUE) {
122 const struct mlx5_txq_ctrl *ctrl = priv->verbs_alloc_ctx.obj;
124 socket = ctrl->socket;
125 } else if (priv->verbs_alloc_ctx.type ==
126 MLX5_VERBS_ALLOC_TYPE_RX_QUEUE) {
127 const struct mlx5_rxq_ctrl *ctrl = priv->verbs_alloc_ctx.obj;
129 socket = ctrl->socket;
131 assert(data != NULL);
132 ret = rte_malloc_socket(__func__, size, alignment, socket);
139 * Verbs callback to free a memory.
142 * A pointer to the memory to free.
144 * A pointer to the callback data.
147 mlx5_free_verbs_buf(void *ptr, void *data __rte_unused)
149 assert(data != NULL);
154 * DPDK callback to close the device.
156 * Destroy all queues and objects, free memory.
159 * Pointer to Ethernet device structure.
162 mlx5_dev_close(struct rte_eth_dev *dev)
164 struct priv *priv = dev->data->dev_private;
168 DEBUG("port %u closing device \"%s\"",
170 ((priv->ctx != NULL) ? priv->ctx->device->name : ""));
171 /* In case mlx5_dev_stop() has not been called. */
172 mlx5_dev_interrupt_handler_uninstall(dev);
173 mlx5_traffic_disable(dev);
174 /* Prevent crashes when queues are still in use. */
175 dev->rx_pkt_burst = removed_rx_burst;
176 dev->tx_pkt_burst = removed_tx_burst;
177 if (priv->rxqs != NULL) {
178 /* XXX race condition if mlx5_rx_burst() is still running. */
180 for (i = 0; (i != priv->rxqs_n); ++i)
181 mlx5_rxq_release(dev, i);
185 if (priv->txqs != NULL) {
186 /* XXX race condition if mlx5_tx_burst() is still running. */
188 for (i = 0; (i != priv->txqs_n); ++i)
189 mlx5_txq_release(dev, i);
193 if (priv->pd != NULL) {
194 assert(priv->ctx != NULL);
195 claim_zero(mlx5_glue->dealloc_pd(priv->pd));
196 claim_zero(mlx5_glue->close_device(priv->ctx));
198 assert(priv->ctx == NULL);
199 if (priv->rss_conf.rss_key != NULL)
200 rte_free(priv->rss_conf.rss_key);
201 if (priv->reta_idx != NULL)
202 rte_free(priv->reta_idx);
203 if (priv->primary_socket)
204 mlx5_socket_uninit(dev);
205 ret = mlx5_hrxq_ibv_verify(dev);
207 WARN("port %u some hash Rx queue still remain",
209 ret = mlx5_ind_table_ibv_verify(dev);
211 WARN("port %u some indirection table still remain",
213 ret = mlx5_rxq_ibv_verify(dev);
215 WARN("port %u some Verbs Rx queue still remain",
217 ret = mlx5_rxq_verify(dev);
219 WARN("port %u some Rx queues still remain",
221 ret = mlx5_txq_ibv_verify(dev);
223 WARN("port %u some Verbs Tx queue still remain",
225 ret = mlx5_txq_verify(dev);
227 WARN("port %u some Tx queues still remain",
229 ret = mlx5_flow_verify(dev);
231 WARN("port %u some flows still remain", dev->data->port_id);
232 ret = mlx5_mr_verify(dev);
234 WARN("port %u some memory region still remain",
236 memset(priv, 0, sizeof(*priv));
239 const struct eth_dev_ops mlx5_dev_ops = {
240 .dev_configure = mlx5_dev_configure,
241 .dev_start = mlx5_dev_start,
242 .dev_stop = mlx5_dev_stop,
243 .dev_set_link_down = mlx5_set_link_down,
244 .dev_set_link_up = mlx5_set_link_up,
245 .dev_close = mlx5_dev_close,
246 .promiscuous_enable = mlx5_promiscuous_enable,
247 .promiscuous_disable = mlx5_promiscuous_disable,
248 .allmulticast_enable = mlx5_allmulticast_enable,
249 .allmulticast_disable = mlx5_allmulticast_disable,
250 .link_update = mlx5_link_update,
251 .stats_get = mlx5_stats_get,
252 .stats_reset = mlx5_stats_reset,
253 .xstats_get = mlx5_xstats_get,
254 .xstats_reset = mlx5_xstats_reset,
255 .xstats_get_names = mlx5_xstats_get_names,
256 .dev_infos_get = mlx5_dev_infos_get,
257 .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
258 .vlan_filter_set = mlx5_vlan_filter_set,
259 .rx_queue_setup = mlx5_rx_queue_setup,
260 .tx_queue_setup = mlx5_tx_queue_setup,
261 .rx_queue_release = mlx5_rx_queue_release,
262 .tx_queue_release = mlx5_tx_queue_release,
263 .flow_ctrl_get = mlx5_dev_get_flow_ctrl,
264 .flow_ctrl_set = mlx5_dev_set_flow_ctrl,
265 .mac_addr_remove = mlx5_mac_addr_remove,
266 .mac_addr_add = mlx5_mac_addr_add,
267 .mac_addr_set = mlx5_mac_addr_set,
268 .mtu_set = mlx5_dev_set_mtu,
269 .vlan_strip_queue_set = mlx5_vlan_strip_queue_set,
270 .vlan_offload_set = mlx5_vlan_offload_set,
271 .reta_update = mlx5_dev_rss_reta_update,
272 .reta_query = mlx5_dev_rss_reta_query,
273 .rss_hash_update = mlx5_rss_hash_update,
274 .rss_hash_conf_get = mlx5_rss_hash_conf_get,
275 .filter_ctrl = mlx5_dev_filter_ctrl,
276 .rx_descriptor_status = mlx5_rx_descriptor_status,
277 .tx_descriptor_status = mlx5_tx_descriptor_status,
278 .rx_queue_intr_enable = mlx5_rx_intr_enable,
279 .rx_queue_intr_disable = mlx5_rx_intr_disable,
280 .is_removed = mlx5_is_removed,
283 static const struct eth_dev_ops mlx5_dev_sec_ops = {
284 .stats_get = mlx5_stats_get,
285 .stats_reset = mlx5_stats_reset,
286 .xstats_get = mlx5_xstats_get,
287 .xstats_reset = mlx5_xstats_reset,
288 .xstats_get_names = mlx5_xstats_get_names,
289 .dev_infos_get = mlx5_dev_infos_get,
290 .rx_descriptor_status = mlx5_rx_descriptor_status,
291 .tx_descriptor_status = mlx5_tx_descriptor_status,
294 /* Available operators in flow isolated mode. */
295 const struct eth_dev_ops mlx5_dev_ops_isolate = {
296 .dev_configure = mlx5_dev_configure,
297 .dev_start = mlx5_dev_start,
298 .dev_stop = mlx5_dev_stop,
299 .dev_set_link_down = mlx5_set_link_down,
300 .dev_set_link_up = mlx5_set_link_up,
301 .dev_close = mlx5_dev_close,
302 .link_update = mlx5_link_update,
303 .stats_get = mlx5_stats_get,
304 .stats_reset = mlx5_stats_reset,
305 .xstats_get = mlx5_xstats_get,
306 .xstats_reset = mlx5_xstats_reset,
307 .xstats_get_names = mlx5_xstats_get_names,
308 .dev_infos_get = mlx5_dev_infos_get,
309 .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
310 .vlan_filter_set = mlx5_vlan_filter_set,
311 .rx_queue_setup = mlx5_rx_queue_setup,
312 .tx_queue_setup = mlx5_tx_queue_setup,
313 .rx_queue_release = mlx5_rx_queue_release,
314 .tx_queue_release = mlx5_tx_queue_release,
315 .flow_ctrl_get = mlx5_dev_get_flow_ctrl,
316 .flow_ctrl_set = mlx5_dev_set_flow_ctrl,
317 .mac_addr_remove = mlx5_mac_addr_remove,
318 .mac_addr_add = mlx5_mac_addr_add,
319 .mac_addr_set = mlx5_mac_addr_set,
320 .mtu_set = mlx5_dev_set_mtu,
321 .vlan_strip_queue_set = mlx5_vlan_strip_queue_set,
322 .vlan_offload_set = mlx5_vlan_offload_set,
323 .filter_ctrl = mlx5_dev_filter_ctrl,
324 .rx_descriptor_status = mlx5_rx_descriptor_status,
325 .tx_descriptor_status = mlx5_tx_descriptor_status,
326 .rx_queue_intr_enable = mlx5_rx_intr_enable,
327 .rx_queue_intr_disable = mlx5_rx_intr_disable,
328 .is_removed = mlx5_is_removed,
332 struct rte_pci_addr pci_addr; /* associated PCI address */
333 uint32_t ports; /* physical ports bitfield. */
337 * Get device index in mlx5_dev[] from PCI bus address.
339 * @param[in] pci_addr
340 * PCI bus address to look for.
343 * mlx5_dev[] index on success, -1 on failure.
346 mlx5_dev_idx(struct rte_pci_addr *pci_addr)
351 assert(pci_addr != NULL);
352 for (i = 0; (i != RTE_DIM(mlx5_dev)); ++i) {
353 if ((mlx5_dev[i].pci_addr.domain == pci_addr->domain) &&
354 (mlx5_dev[i].pci_addr.bus == pci_addr->bus) &&
355 (mlx5_dev[i].pci_addr.devid == pci_addr->devid) &&
356 (mlx5_dev[i].pci_addr.function == pci_addr->function))
358 if ((mlx5_dev[i].ports == 0) && (ret == -1))
365 * Verify and store value for device argument.
368 * Key argument to verify.
370 * Value associated with key.
375 * 0 on success, a negative errno value otherwise and rte_errno is set.
378 mlx5_args_check(const char *key, const char *val, void *opaque)
380 struct mlx5_dev_config *config = opaque;
384 tmp = strtoul(val, NULL, 0);
387 WARN("%s: \"%s\" is not a valid integer", key, val);
390 if (strcmp(MLX5_RXQ_CQE_COMP_EN, key) == 0) {
391 config->cqe_comp = !!tmp;
392 } else if (strcmp(MLX5_TXQ_INLINE, key) == 0) {
393 config->txq_inline = tmp;
394 } else if (strcmp(MLX5_TXQS_MIN_INLINE, key) == 0) {
395 config->txqs_inline = tmp;
396 } else if (strcmp(MLX5_TXQ_MPW_EN, key) == 0) {
397 config->mps = !!tmp ? config->mps : 0;
398 } else if (strcmp(MLX5_TXQ_MPW_HDR_DSEG_EN, key) == 0) {
399 config->mpw_hdr_dseg = !!tmp;
400 } else if (strcmp(MLX5_TXQ_MAX_INLINE_LEN, key) == 0) {
401 config->inline_max_packet_sz = tmp;
402 } else if (strcmp(MLX5_TX_VEC_EN, key) == 0) {
403 config->tx_vec_en = !!tmp;
404 } else if (strcmp(MLX5_RX_VEC_EN, key) == 0) {
405 config->rx_vec_en = !!tmp;
407 WARN("%s: unknown parameter", key);
415 * Parse device parameters.
418 * Pointer to device configuration structure.
420 * Device arguments structure.
423 * 0 on success, a negative errno value otherwise and rte_errno is set.
426 mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs)
428 const char **params = (const char *[]){
429 MLX5_RXQ_CQE_COMP_EN,
431 MLX5_TXQS_MIN_INLINE,
433 MLX5_TXQ_MPW_HDR_DSEG_EN,
434 MLX5_TXQ_MAX_INLINE_LEN,
439 struct rte_kvargs *kvlist;
445 /* Following UGLY cast is done to pass checkpatch. */
446 kvlist = rte_kvargs_parse(devargs->args, params);
449 /* Process parameters. */
450 for (i = 0; (params[i] != NULL); ++i) {
451 if (rte_kvargs_count(kvlist, params[i])) {
452 ret = rte_kvargs_process(kvlist, params[i],
453 mlx5_args_check, config);
456 rte_kvargs_free(kvlist);
461 rte_kvargs_free(kvlist);
465 static struct rte_pci_driver mlx5_driver;
468 * Reserved UAR address space for TXQ UAR(hw doorbell) mapping, process
469 * local resource used by both primary and secondary to avoid duplicate
471 * The space has to be available on both primary and secondary process,
472 * TXQ UAR maps to this area using fixed mmap w/o double check.
474 static void *uar_base;
477 * Reserve UAR address space for primary process.
480 * Pointer to Ethernet device.
483 * 0 on success, a negative errno value otherwise and rte_errno is set.
486 mlx5_uar_init_primary(struct rte_eth_dev *dev)
488 struct priv *priv = dev->data->dev_private;
489 void *addr = (void *)0;
491 const struct rte_mem_config *mcfg;
493 if (uar_base) { /* UAR address space mapped. */
494 priv->uar_base = uar_base;
497 /* find out lower bound of hugepage segments */
498 mcfg = rte_eal_get_configuration()->mem_config;
499 for (i = 0; i < RTE_MAX_MEMSEG && mcfg->memseg[i].addr; i++) {
501 addr = RTE_MIN(addr, mcfg->memseg[i].addr);
503 addr = mcfg->memseg[i].addr;
505 /* keep distance to hugepages to minimize potential conflicts. */
506 addr = RTE_PTR_SUB(addr, MLX5_UAR_OFFSET + MLX5_UAR_SIZE);
507 /* anonymous mmap, no real memory consumption. */
508 addr = mmap(addr, MLX5_UAR_SIZE,
509 PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
510 if (addr == MAP_FAILED) {
511 ERROR("port %u failed to reserve UAR address space, please"
512 " adjust MLX5_UAR_SIZE or try --base-virtaddr",
517 /* Accept either same addr or a new addr returned from mmap if target
520 INFO("port %u reserved UAR address space: %p", dev->data->port_id,
522 priv->uar_base = addr; /* for primary and secondary UAR re-mmap. */
523 uar_base = addr; /* process local, don't reserve again. */
528 * Reserve UAR address space for secondary process, align with
532 * Pointer to Ethernet device.
535 * 0 on success, a negative errno value otherwise and rte_errno is set.
538 mlx5_uar_init_secondary(struct rte_eth_dev *dev)
540 struct priv *priv = dev->data->dev_private;
543 assert(priv->uar_base);
544 if (uar_base) { /* already reserved. */
545 assert(uar_base == priv->uar_base);
548 /* anonymous mmap, no real memory consumption. */
549 addr = mmap(priv->uar_base, MLX5_UAR_SIZE,
550 PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
551 if (addr == MAP_FAILED) {
552 ERROR("port %u UAR mmap failed: %p size: %llu",
553 dev->data->port_id, priv->uar_base, MLX5_UAR_SIZE);
557 if (priv->uar_base != addr) {
558 ERROR("port %u UAR address %p size %llu occupied, please adjust "
559 "MLX5_UAR_OFFSET or try EAL parameter --base-virtaddr",
560 dev->data->port_id, priv->uar_base, MLX5_UAR_SIZE);
564 uar_base = addr; /* process local, don't reserve again */
565 INFO("port %u reserved UAR address space: %p", dev->data->port_id,
571 * DPDK callback to register a PCI device.
573 * This function creates an Ethernet device for each port of a given
577 * PCI driver structure (mlx5_driver).
579 * PCI device information.
582 * 0 on success, a negative errno value otherwise and rte_errno is set.
585 mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
586 struct rte_pci_device *pci_dev)
588 struct ibv_device **list = NULL;
589 struct ibv_device *ibv_dev;
591 struct ibv_context *attr_ctx = NULL;
592 struct ibv_device_attr_ex device_attr;
594 unsigned int cqe_comp;
595 unsigned int tunnel_en = 0;
598 struct mlx5dv_context attrs_out = {0};
599 #ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
600 struct ibv_counter_set_description cs_desc;
603 assert(pci_drv == &mlx5_driver);
604 /* Get mlx5_dev[] index. */
605 idx = mlx5_dev_idx(&pci_dev->addr);
607 ERROR("this driver cannot support any more adapters");
611 DEBUG("using driver device index %d", idx);
612 /* Save PCI address. */
613 mlx5_dev[idx].pci_addr = pci_dev->addr;
614 list = mlx5_glue->get_device_list(&i);
619 ERROR("cannot list devices, is ib_uverbs loaded?");
624 * For each listed device, check related sysfs entry against
625 * the provided PCI ID.
628 struct rte_pci_addr pci_addr;
631 DEBUG("checking device \"%s\"", list[i]->name);
632 if (mlx5_ibv_device_to_pci_addr(list[i], &pci_addr))
634 if ((pci_dev->addr.domain != pci_addr.domain) ||
635 (pci_dev->addr.bus != pci_addr.bus) ||
636 (pci_dev->addr.devid != pci_addr.devid) ||
637 (pci_dev->addr.function != pci_addr.function))
639 INFO("PCI information matches, using device \"%s\"",
641 attr_ctx = mlx5_glue->open_device(list[i]);
646 if (attr_ctx == NULL) {
647 mlx5_glue->free_device_list(list);
650 ERROR("cannot access device, is mlx5_ib loaded?");
654 ERROR("cannot use device, are drivers up to date?");
659 DEBUG("device opened");
661 * Multi-packet send is supported by ConnectX-4 Lx PF as well
662 * as all ConnectX-5 devices.
664 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
665 attrs_out.comp_mask |= MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS;
667 mlx5_glue->dv_query_device(attr_ctx, &attrs_out);
668 if (attrs_out.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) {
669 if (attrs_out.flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) {
670 DEBUG("enhanced MPW is supported");
671 mps = MLX5_MPW_ENHANCED;
673 DEBUG("MPW is supported");
677 DEBUG("MPW isn't supported");
678 mps = MLX5_MPW_DISABLED;
680 if (RTE_CACHE_LINE_SIZE == 128 &&
681 !(attrs_out.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP))
685 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
686 if (attrs_out.comp_mask & MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS) {
687 tunnel_en = ((attrs_out.tunnel_offloads_caps &
688 MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN) &&
689 (attrs_out.tunnel_offloads_caps &
690 MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE));
692 DEBUG("tunnel offloading is %ssupported", tunnel_en ? "" : "not ");
694 WARN("tunnel offloading disabled due to old OFED/rdma-core version");
696 if (mlx5_glue->query_device_ex(attr_ctx, NULL, &device_attr)) {
700 INFO("%u port(s) detected", device_attr.orig_attr.phys_port_cnt);
701 for (i = 0; i < device_attr.orig_attr.phys_port_cnt; i++) {
702 char name[RTE_ETH_NAME_MAX_LEN];
704 uint32_t port = i + 1; /* ports are indexed from one */
705 uint32_t test = (1 << i);
706 struct ibv_context *ctx = NULL;
707 struct ibv_port_attr port_attr;
708 struct ibv_pd *pd = NULL;
709 struct priv *priv = NULL;
710 struct rte_eth_dev *eth_dev = NULL;
711 struct ibv_device_attr_ex device_attr_ex;
712 struct ether_addr mac;
713 struct ibv_device_attr_ex device_attr;
714 struct mlx5_dev_config config = {
715 .cqe_comp = cqe_comp,
717 .tunnel_en = tunnel_en,
721 .txq_inline = MLX5_ARG_UNSET,
722 .txqs_inline = MLX5_ARG_UNSET,
723 .inline_max_packet_sz = MLX5_ARG_UNSET,
726 len = snprintf(name, sizeof(name), PCI_PRI_FMT,
727 pci_dev->addr.domain, pci_dev->addr.bus,
728 pci_dev->addr.devid, pci_dev->addr.function);
729 if (device_attr.orig_attr.phys_port_cnt > 1)
730 snprintf(name + len, sizeof(name), " port %u", i);
731 mlx5_dev[idx].ports |= test;
732 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
733 eth_dev = rte_eth_dev_attach_secondary(name);
734 if (eth_dev == NULL) {
735 ERROR("can not attach rte ethdev");
740 eth_dev->device = &pci_dev->device;
741 eth_dev->dev_ops = &mlx5_dev_sec_ops;
742 err = mlx5_uar_init_secondary(eth_dev);
745 /* Receive command fd from primary process */
746 err = mlx5_socket_connect(eth_dev);
749 /* Remap UAR for Tx queues. */
750 err = mlx5_tx_uar_remap(eth_dev, err);
754 * Ethdev pointer is still required as input since
755 * the primary device is not accessible from the
758 eth_dev->rx_pkt_burst =
759 mlx5_select_rx_function(eth_dev);
760 eth_dev->tx_pkt_burst =
761 mlx5_select_tx_function(eth_dev);
764 DEBUG("using port %u (%08" PRIx32 ")", port, test);
765 ctx = mlx5_glue->open_device(ibv_dev);
770 mlx5_glue->query_device_ex(ctx, NULL, &device_attr);
771 /* Check port status. */
772 err = mlx5_glue->query_port(ctx, port, &port_attr);
774 ERROR("port query failed: %s", strerror(err));
777 if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) {
778 ERROR("port %d is not configured in Ethernet mode",
783 if (port_attr.state != IBV_PORT_ACTIVE)
784 DEBUG("port %d is not active: \"%s\" (%d)",
785 port, mlx5_glue->port_state_str(port_attr.state),
787 /* Allocate protection domain. */
788 pd = mlx5_glue->alloc_pd(ctx);
790 ERROR("PD allocation failure");
794 mlx5_dev[idx].ports |= test;
795 /* from rte_ethdev.c */
796 priv = rte_zmalloc("ethdev private structure",
798 RTE_CACHE_LINE_SIZE);
800 ERROR("priv allocation failure");
805 strncpy(priv->ibdev_path, priv->ctx->device->ibdev_path,
806 sizeof(priv->ibdev_path));
807 priv->device_attr = device_attr;
810 priv->mtu = ETHER_MTU;
811 err = mlx5_args(&config, pci_dev->device.devargs);
813 ERROR("failed to process device arguments: %s",
817 if (mlx5_glue->query_device_ex(ctx, NULL, &device_attr_ex)) {
818 ERROR("ibv_query_device_ex() failed");
822 config.hw_csum = !!(device_attr_ex.device_cap_flags_ex &
823 IBV_DEVICE_RAW_IP_CSUM);
824 DEBUG("checksum offloading is %ssupported",
825 (config.hw_csum ? "" : "not "));
826 #ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
827 config.flow_counter_en = !!(device_attr.max_counter_sets);
828 mlx5_glue->describe_counter_set(ctx, 0, &cs_desc);
829 DEBUG("counter type = %d, num of cs = %ld, attributes = %d",
830 cs_desc.counter_type, cs_desc.num_of_cs,
833 config.ind_table_max_size =
834 device_attr_ex.rss_caps.max_rwq_indirection_table_size;
835 /* Remove this check once DPDK supports larger/variable
836 * indirection tables. */
837 if (config.ind_table_max_size >
838 (unsigned int)ETH_RSS_RETA_SIZE_512)
839 config.ind_table_max_size = ETH_RSS_RETA_SIZE_512;
840 DEBUG("maximum Rx indirection table size is %u",
841 config.ind_table_max_size);
842 config.hw_vlan_strip = !!(device_attr_ex.raw_packet_caps &
843 IBV_RAW_PACKET_CAP_CVLAN_STRIPPING);
844 DEBUG("VLAN stripping is %ssupported",
845 (config.hw_vlan_strip ? "" : "not "));
847 config.hw_fcs_strip = !!(device_attr_ex.raw_packet_caps &
848 IBV_RAW_PACKET_CAP_SCATTER_FCS);
849 DEBUG("FCS stripping configuration is %ssupported",
850 (config.hw_fcs_strip ? "" : "not "));
852 #ifdef HAVE_IBV_WQ_FLAG_RX_END_PADDING
853 config.hw_padding = !!device_attr_ex.rx_pad_end_addr_align;
855 DEBUG("hardware Rx end alignment padding is %ssupported",
856 (config.hw_padding ? "" : "not "));
857 config.tso = ((device_attr_ex.tso_caps.max_tso > 0) &&
858 (device_attr_ex.tso_caps.supported_qpts &
859 (1 << IBV_QPT_RAW_PACKET)));
861 config.tso_max_payload_sz =
862 device_attr_ex.tso_caps.max_tso;
863 if (config.mps && !mps) {
864 ERROR("multi-packet send not supported on this device"
865 " (" MLX5_TXQ_MPW_EN ")");
870 config.mps == MLX5_MPW_ENHANCED ? "enhanced " : "",
871 config.mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
872 if (config.cqe_comp && !cqe_comp) {
873 WARN("Rx CQE compression isn't supported");
876 eth_dev = rte_eth_dev_allocate(name);
877 if (eth_dev == NULL) {
878 ERROR("can not allocate rte ethdev");
882 eth_dev->data->dev_private = priv;
884 eth_dev->data->mac_addrs = priv->mac;
885 eth_dev->device = &pci_dev->device;
886 rte_eth_copy_pci_info(eth_dev, pci_dev);
887 eth_dev->device->driver = &mlx5_driver.driver;
888 err = mlx5_uar_init_primary(eth_dev);
891 /* Configure the first MAC address by default. */
892 if (mlx5_get_mac(eth_dev, &mac.addr_bytes)) {
893 ERROR("port %u cannot get MAC address, is mlx5_en"
894 " loaded? (errno: %s)", eth_dev->data->port_id,
899 INFO("port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x",
900 eth_dev->data->port_id,
901 mac.addr_bytes[0], mac.addr_bytes[1],
902 mac.addr_bytes[2], mac.addr_bytes[3],
903 mac.addr_bytes[4], mac.addr_bytes[5]);
906 char ifname[IF_NAMESIZE];
908 if (mlx5_get_ifname(eth_dev, &ifname) == 0)
909 DEBUG("port %u ifname is \"%s\"",
910 eth_dev->data->port_id, ifname);
912 DEBUG("port %u ifname is unknown",
913 eth_dev->data->port_id);
916 /* Get actual MTU if possible. */
917 err = mlx5_get_mtu(eth_dev, &priv->mtu);
920 DEBUG("port %u MTU is %u", eth_dev->data->port_id, priv->mtu);
922 * Initialize burst functions to prevent crashes before link-up.
924 eth_dev->rx_pkt_burst = removed_rx_burst;
925 eth_dev->tx_pkt_burst = removed_tx_burst;
926 eth_dev->dev_ops = &mlx5_dev_ops;
927 /* Register MAC address. */
928 claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0));
929 TAILQ_INIT(&priv->flows);
930 TAILQ_INIT(&priv->ctrl_flows);
931 /* Hint libmlx5 to use PMD allocator for data plane resources */
932 struct mlx5dv_ctx_allocators alctr = {
933 .alloc = &mlx5_alloc_verbs_buf,
934 .free = &mlx5_free_verbs_buf,
937 mlx5_glue->dv_set_context_attr(ctx,
938 MLX5DV_CTX_ATTR_BUF_ALLOCATORS,
939 (void *)((uintptr_t)&alctr));
940 /* Bring Ethernet device up. */
941 DEBUG("port %u forcing Ethernet interface up",
942 eth_dev->data->port_id);
943 mlx5_set_flags(eth_dev, ~IFF_UP, IFF_UP);
944 /* Store device configuration on private structure. */
945 priv->config = config;
951 claim_zero(mlx5_glue->dealloc_pd(pd));
953 claim_zero(mlx5_glue->close_device(ctx));
957 * XXX if something went wrong in the loop above, there is a resource
958 * leak (ctx, pd, priv, dpdk ethdev) but we can do nothing about it as
959 * long as the dpdk does not provide a way to deallocate a ethdev and a
960 * way to enumerate the registered ethdevs to free the previous ones.
962 /* no port found, complain */
963 if (!mlx5_dev[idx].ports) {
969 claim_zero(mlx5_glue->close_device(attr_ctx));
971 mlx5_glue->free_device_list(list);
979 static const struct rte_pci_id mlx5_pci_id_map[] = {
981 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
982 PCI_DEVICE_ID_MELLANOX_CONNECTX4)
985 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
986 PCI_DEVICE_ID_MELLANOX_CONNECTX4VF)
989 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
990 PCI_DEVICE_ID_MELLANOX_CONNECTX4LX)
993 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
994 PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF)
997 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
998 PCI_DEVICE_ID_MELLANOX_CONNECTX5)
1001 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
1002 PCI_DEVICE_ID_MELLANOX_CONNECTX5VF)
1005 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
1006 PCI_DEVICE_ID_MELLANOX_CONNECTX5EX)
1009 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
1010 PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF)
1017 static struct rte_pci_driver mlx5_driver = {
1019 .name = MLX5_DRIVER_NAME
1021 .id_table = mlx5_pci_id_map,
1022 .probe = mlx5_pci_probe,
1023 .drv_flags = RTE_PCI_DRV_INTR_LSC | RTE_PCI_DRV_INTR_RMV,
1026 #ifdef RTE_LIBRTE_MLX5_DLOPEN_DEPS
1029 * Suffix RTE_EAL_PMD_PATH with "-glue".
1031 * This function performs a sanity check on RTE_EAL_PMD_PATH before
1032 * suffixing its last component.
1035 * Output buffer, should be large enough otherwise NULL is returned.
1040 * Pointer to @p buf or @p NULL in case suffix cannot be appended.
1043 mlx5_glue_path(char *buf, size_t size)
1045 static const char *const bad[] = { "/", ".", "..", NULL };
1046 const char *path = RTE_EAL_PMD_PATH;
1047 size_t len = strlen(path);
1051 while (len && path[len - 1] == '/')
1053 for (off = len; off && path[off - 1] != '/'; --off)
1055 for (i = 0; bad[i]; ++i)
1056 if (!strncmp(path + off, bad[i], (int)(len - off)))
1058 i = snprintf(buf, size, "%.*s-glue", (int)len, path);
1059 if (i == -1 || (size_t)i >= size)
1063 ERROR("unable to append \"-glue\" to last component of"
1064 " RTE_EAL_PMD_PATH (\"" RTE_EAL_PMD_PATH "\"),"
1065 " please re-configure DPDK");
1070 * Initialization routine for run-time dependency on rdma-core.
1073 mlx5_glue_init(void)
1075 char glue_path[sizeof(RTE_EAL_PMD_PATH) - 1 + sizeof("-glue")];
1076 const char *path[] = {
1078 * A basic security check is necessary before trusting
1079 * MLX5_GLUE_PATH, which may override RTE_EAL_PMD_PATH.
1081 (geteuid() == getuid() && getegid() == getgid() ?
1082 getenv("MLX5_GLUE_PATH") : NULL),
1084 * When RTE_EAL_PMD_PATH is set, use its glue-suffixed
1085 * variant, otherwise let dlopen() look up libraries on its
1088 (*RTE_EAL_PMD_PATH ?
1089 mlx5_glue_path(glue_path, sizeof(glue_path)) : ""),
1092 void *handle = NULL;
1096 while (!handle && i != RTE_DIM(path)) {
1105 end = strpbrk(path[i], ":;");
1107 end = path[i] + strlen(path[i]);
1108 len = end - path[i];
1113 ret = snprintf(name, sizeof(name), "%.*s%s" MLX5_GLUE,
1115 (!len || *(end - 1) == '/') ? "" : "/");
1118 if (sizeof(name) != (size_t)ret + 1)
1120 DEBUG("looking for rdma-core glue as \"%s\"", name);
1121 handle = dlopen(name, RTLD_LAZY);
1132 WARN("cannot load glue library: %s", dlmsg);
1135 sym = dlsym(handle, "mlx5_glue");
1136 if (!sym || !*sym) {
1140 ERROR("cannot resolve glue symbol: %s", dlmsg);
1148 WARN("cannot initialize PMD due to missing run-time"
1149 " dependency on rdma-core libraries (libibverbs,"
1157 * Driver initialization routine.
1159 RTE_INIT(rte_mlx5_pmd_init);
1161 rte_mlx5_pmd_init(void)
1163 /* Build the static table for ptype conversion. */
1164 mlx5_set_ptype_table();
1166 * RDMAV_HUGEPAGES_SAFE tells ibv_fork_init() we intend to use
1167 * huge pages. Calling ibv_fork_init() during init allows
1168 * applications to use fork() safely for purposes other than
1169 * using this PMD, which is not supported in forked processes.
1171 setenv("RDMAV_HUGEPAGES_SAFE", "1", 1);
1172 /* Match the size of Rx completion entry to the size of a cacheline. */
1173 if (RTE_CACHE_LINE_SIZE == 128)
1174 setenv("MLX5_CQE_SIZE", "128", 0);
1175 #ifdef RTE_LIBRTE_MLX5_DLOPEN_DEPS
1176 if (mlx5_glue_init())
1181 /* Glue structure must not contain any NULL pointers. */
1185 for (i = 0; i != sizeof(*mlx5_glue) / sizeof(void *); ++i)
1186 assert(((const void *const *)mlx5_glue)[i]);
1189 if (strcmp(mlx5_glue->version, MLX5_GLUE_VERSION)) {
1190 ERROR("rdma-core glue \"%s\" mismatch: \"%s\" is required",
1191 mlx5_glue->version, MLX5_GLUE_VERSION);
1194 mlx5_glue->fork_init();
1195 rte_pci_register(&mlx5_driver);
1198 RTE_PMD_EXPORT_NAME(net_mlx5, __COUNTER__);
1199 RTE_PMD_REGISTER_PCI_TABLE(net_mlx5, mlx5_pci_id_map);
1200 RTE_PMD_REGISTER_KMOD_DEP(net_mlx5, "* ib_uverbs & mlx5_core & mlx5_ib");