1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2020 Mellanox Technologies, Ltd
14 #include <linux/rtnetlink.h>
15 #include <linux/sockios.h>
16 #include <linux/ethtool.h>
20 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
22 #pragma GCC diagnostic ignored "-Wpedantic"
24 #include <infiniband/verbs.h>
26 #pragma GCC diagnostic error "-Wpedantic"
29 #include <rte_malloc.h>
30 #include <rte_ethdev_driver.h>
31 #include <rte_ethdev_pci.h>
33 #include <rte_bus_pci.h>
34 #include <rte_common.h>
35 #include <rte_kvargs.h>
36 #include <rte_rwlock.h>
37 #include <rte_spinlock.h>
38 #include <rte_string_fns.h>
39 #include <rte_alarm.h>
41 #include <mlx5_glue.h>
42 #include <mlx5_devx_cmds.h>
43 #include <mlx5_common.h>
44 #include <mlx5_common_mp.h>
45 #include <mlx5_common_mr.h>
47 #include "mlx5_defs.h"
49 #include "mlx5_common_os.h"
50 #include "mlx5_utils.h"
51 #include "mlx5_rxtx.h"
52 #include "mlx5_autoconf.h"
54 #include "mlx5_flow.h"
55 #include "rte_pmd_mlx5.h"
56 #include "mlx5_verbs.h"
58 #define MLX5_TAGS_HLIST_ARRAY_SIZE 8192
60 #ifndef HAVE_IBV_MLX5_MOD_MPW
61 #define MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED (1 << 2)
62 #define MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW (1 << 3)
65 #ifndef HAVE_IBV_MLX5_MOD_CQE_128B_COMP
66 #define MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP (1 << 4)
70 * Get mlx5 device attributes. The glue function query_device_ex() is called
71 * with out parameter of type 'struct ibv_device_attr_ex *'. Then fill in mlx5
72 * device attributes from the glue out parameter.
75 * Pointer to ibv context.
78 * Pointer to mlx5 device attributes.
81 * 0 on success, non zero error number otherwise
84 mlx5_os_get_dev_attr(void *ctx, struct mlx5_dev_attr *device_attr)
87 struct ibv_device_attr_ex attr_ex;
88 memset(device_attr, 0, sizeof(*device_attr));
89 err = mlx5_glue->query_device_ex(ctx, NULL, &attr_ex);
93 device_attr->device_cap_flags_ex = attr_ex.device_cap_flags_ex;
94 device_attr->max_qp_wr = attr_ex.orig_attr.max_qp_wr;
95 device_attr->max_sge = attr_ex.orig_attr.max_sge;
96 device_attr->max_cq = attr_ex.orig_attr.max_cq;
97 device_attr->max_qp = attr_ex.orig_attr.max_qp;
98 device_attr->raw_packet_caps = attr_ex.raw_packet_caps;
99 device_attr->max_rwq_indirection_table_size =
100 attr_ex.rss_caps.max_rwq_indirection_table_size;
101 device_attr->max_tso = attr_ex.tso_caps.max_tso;
102 device_attr->tso_supported_qpts = attr_ex.tso_caps.supported_qpts;
104 struct mlx5dv_context dv_attr = { .comp_mask = 0 };
105 err = mlx5_glue->dv_query_device(ctx, &dv_attr);
109 device_attr->flags = dv_attr.flags;
110 device_attr->comp_mask = dv_attr.comp_mask;
111 #ifdef HAVE_IBV_MLX5_MOD_SWP
112 device_attr->sw_parsing_offloads =
113 dv_attr.sw_parsing_caps.sw_parsing_offloads;
115 device_attr->min_single_stride_log_num_of_bytes =
116 dv_attr.striding_rq_caps.min_single_stride_log_num_of_bytes;
117 device_attr->max_single_stride_log_num_of_bytes =
118 dv_attr.striding_rq_caps.max_single_stride_log_num_of_bytes;
119 device_attr->min_single_wqe_log_num_of_strides =
120 dv_attr.striding_rq_caps.min_single_wqe_log_num_of_strides;
121 device_attr->max_single_wqe_log_num_of_strides =
122 dv_attr.striding_rq_caps.max_single_wqe_log_num_of_strides;
123 device_attr->stride_supported_qpts =
124 dv_attr.striding_rq_caps.supported_qpts;
125 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
126 device_attr->tunnel_offloads_caps = dv_attr.tunnel_offloads_caps;
133 * Verbs callback to allocate a memory. This function should allocate the space
134 * according to the size provided residing inside a huge page.
135 * Please note that all allocation must respect the alignment from libmlx5
136 * (i.e. currently sysconf(_SC_PAGESIZE)).
139 * The size in bytes of the memory to allocate.
141 * A pointer to the callback data.
144 * Allocated buffer, NULL otherwise and rte_errno is set.
147 mlx5_alloc_verbs_buf(size_t size, void *data)
149 struct mlx5_priv *priv = data;
151 size_t alignment = sysconf(_SC_PAGESIZE);
152 unsigned int socket = SOCKET_ID_ANY;
154 if (priv->verbs_alloc_ctx.type == MLX5_VERBS_ALLOC_TYPE_TX_QUEUE) {
155 const struct mlx5_txq_ctrl *ctrl = priv->verbs_alloc_ctx.obj;
157 socket = ctrl->socket;
158 } else if (priv->verbs_alloc_ctx.type ==
159 MLX5_VERBS_ALLOC_TYPE_RX_QUEUE) {
160 const struct mlx5_rxq_ctrl *ctrl = priv->verbs_alloc_ctx.obj;
162 socket = ctrl->socket;
164 MLX5_ASSERT(data != NULL);
165 ret = rte_malloc_socket(__func__, size, alignment, socket);
172 * Verbs callback to free a memory.
175 * A pointer to the memory to free.
177 * A pointer to the callback data.
180 mlx5_free_verbs_buf(void *ptr, void *data __rte_unused)
182 MLX5_ASSERT(data != NULL);
187 * Initialize DR related data within private structure.
188 * Routine checks the reference counter and does actual
189 * resources creation/initialization only if counter is zero.
192 * Pointer to the private device data structure.
195 * Zero on success, positive error code otherwise.
198 mlx5_alloc_shared_dr(struct mlx5_priv *priv)
200 struct mlx5_dev_ctx_shared *sh = priv->sh;
201 char s[MLX5_HLIST_NAMESIZE];
205 err = mlx5_alloc_table_hash_list(priv);
207 DRV_LOG(DEBUG, "sh->flow_tbls[%p] already created, reuse\n",
208 (void *)sh->flow_tbls);
211 /* Create tags hash list table. */
212 snprintf(s, sizeof(s), "%s_tags", sh->ibdev_name);
213 sh->tag_table = mlx5_hlist_create(s, MLX5_TAGS_HLIST_ARRAY_SIZE);
214 if (!sh->tag_table) {
215 DRV_LOG(ERR, "tags with hash creation failed.");
219 #ifdef HAVE_MLX5DV_DR
223 /* Shared DV/DR structures is already initialized. */
228 /* Reference counter is zero, we should initialize structures. */
229 domain = mlx5_glue->dr_create_domain(sh->ctx,
230 MLX5DV_DR_DOMAIN_TYPE_NIC_RX);
232 DRV_LOG(ERR, "ingress mlx5dv_dr_create_domain failed");
236 sh->rx_domain = domain;
237 domain = mlx5_glue->dr_create_domain(sh->ctx,
238 MLX5DV_DR_DOMAIN_TYPE_NIC_TX);
240 DRV_LOG(ERR, "egress mlx5dv_dr_create_domain failed");
244 pthread_mutex_init(&sh->dv_mutex, NULL);
245 sh->tx_domain = domain;
246 #ifdef HAVE_MLX5DV_DR_ESWITCH
247 if (priv->config.dv_esw_en) {
248 domain = mlx5_glue->dr_create_domain
249 (sh->ctx, MLX5DV_DR_DOMAIN_TYPE_FDB);
251 DRV_LOG(ERR, "FDB mlx5dv_dr_create_domain failed");
255 sh->fdb_domain = domain;
256 sh->esw_drop_action = mlx5_glue->dr_create_flow_action_drop();
259 if (priv->config.reclaim_mode == MLX5_RCM_AGGR) {
260 mlx5_glue->dr_reclaim_domain_memory(sh->rx_domain, 1);
261 mlx5_glue->dr_reclaim_domain_memory(sh->tx_domain, 1);
263 mlx5_glue->dr_reclaim_domain_memory(sh->fdb_domain, 1);
265 sh->pop_vlan_action = mlx5_glue->dr_create_flow_action_pop_vlan();
266 #endif /* HAVE_MLX5DV_DR */
271 /* Rollback the created objects. */
273 mlx5_glue->dr_destroy_domain(sh->rx_domain);
274 sh->rx_domain = NULL;
277 mlx5_glue->dr_destroy_domain(sh->tx_domain);
278 sh->tx_domain = NULL;
280 if (sh->fdb_domain) {
281 mlx5_glue->dr_destroy_domain(sh->fdb_domain);
282 sh->fdb_domain = NULL;
284 if (sh->esw_drop_action) {
285 mlx5_glue->destroy_flow_action(sh->esw_drop_action);
286 sh->esw_drop_action = NULL;
288 if (sh->pop_vlan_action) {
289 mlx5_glue->destroy_flow_action(sh->pop_vlan_action);
290 sh->pop_vlan_action = NULL;
293 /* tags should be destroyed with flow before. */
294 mlx5_hlist_destroy(sh->tag_table, NULL, NULL);
295 sh->tag_table = NULL;
297 mlx5_free_table_hash_list(priv);
302 * Destroy DR related data within private structure.
305 * Pointer to the private device data structure.
308 mlx5_os_free_shared_dr(struct mlx5_priv *priv)
310 struct mlx5_dev_ctx_shared *sh;
312 if (!priv->dr_shared)
317 #ifdef HAVE_MLX5DV_DR
318 MLX5_ASSERT(sh->dv_refcnt);
319 if (sh->dv_refcnt && --sh->dv_refcnt)
322 mlx5_glue->dr_destroy_domain(sh->rx_domain);
323 sh->rx_domain = NULL;
326 mlx5_glue->dr_destroy_domain(sh->tx_domain);
327 sh->tx_domain = NULL;
329 #ifdef HAVE_MLX5DV_DR_ESWITCH
330 if (sh->fdb_domain) {
331 mlx5_glue->dr_destroy_domain(sh->fdb_domain);
332 sh->fdb_domain = NULL;
334 if (sh->esw_drop_action) {
335 mlx5_glue->destroy_flow_action(sh->esw_drop_action);
336 sh->esw_drop_action = NULL;
339 if (sh->pop_vlan_action) {
340 mlx5_glue->destroy_flow_action(sh->pop_vlan_action);
341 sh->pop_vlan_action = NULL;
343 pthread_mutex_destroy(&sh->dv_mutex);
344 #endif /* HAVE_MLX5DV_DR */
346 /* tags should be destroyed with flow before. */
347 mlx5_hlist_destroy(sh->tag_table, NULL, NULL);
348 sh->tag_table = NULL;
350 mlx5_free_table_hash_list(priv);
354 * Spawn an Ethernet device from Verbs information.
357 * Backing DPDK device.
359 * Verbs device parameters (name, port, switch_info) to spawn.
361 * Device configuration parameters.
364 * A valid Ethernet device object on success, NULL otherwise and rte_errno
365 * is set. The following errors are defined:
367 * EBUSY: device is not supposed to be spawned.
368 * EEXIST: device is already spawned
370 static struct rte_eth_dev *
371 mlx5_dev_spawn(struct rte_device *dpdk_dev,
372 struct mlx5_dev_spawn_data *spawn,
373 struct mlx5_dev_config config)
375 const struct mlx5_switch_info *switch_info = &spawn->info;
376 struct mlx5_dev_ctx_shared *sh = NULL;
377 struct ibv_port_attr port_attr;
378 struct mlx5dv_context dv_attr = { .comp_mask = 0 };
379 struct rte_eth_dev *eth_dev = NULL;
380 struct mlx5_priv *priv = NULL;
382 unsigned int hw_padding = 0;
384 unsigned int cqe_comp;
385 unsigned int cqe_pad = 0;
386 unsigned int tunnel_en = 0;
387 unsigned int mpls_en = 0;
388 unsigned int swp = 0;
389 unsigned int mprq = 0;
390 unsigned int mprq_min_stride_size_n = 0;
391 unsigned int mprq_max_stride_size_n = 0;
392 unsigned int mprq_min_stride_num_n = 0;
393 unsigned int mprq_max_stride_num_n = 0;
394 struct rte_ether_addr mac;
395 char name[RTE_ETH_NAME_MAX_LEN];
396 int own_domain_id = 0;
399 #ifdef HAVE_MLX5DV_DR_DEVX_PORT
400 struct mlx5dv_devx_port devx_port = { .comp_mask = 0 };
403 /* Determine if this port representor is supposed to be spawned. */
404 if (switch_info->representor && dpdk_dev->devargs) {
405 struct rte_eth_devargs eth_da;
407 err = rte_eth_devargs_parse(dpdk_dev->devargs->args, ð_da);
410 DRV_LOG(ERR, "failed to process device arguments: %s",
411 strerror(rte_errno));
414 for (i = 0; i < eth_da.nb_representor_ports; ++i)
415 if (eth_da.representor_ports[i] ==
416 (uint16_t)switch_info->port_name)
418 if (i == eth_da.nb_representor_ports) {
423 /* Build device name. */
424 if (spawn->pf_bond < 0) {
426 if (!switch_info->representor)
427 strlcpy(name, dpdk_dev->name, sizeof(name));
429 snprintf(name, sizeof(name), "%s_representor_%u",
430 dpdk_dev->name, switch_info->port_name);
432 /* Bonding device. */
433 if (!switch_info->representor)
434 snprintf(name, sizeof(name), "%s_%s",
436 mlx5_os_get_dev_device_name(spawn->phys_dev));
438 snprintf(name, sizeof(name), "%s_%s_representor_%u",
440 mlx5_os_get_dev_device_name(spawn->phys_dev),
441 switch_info->port_name);
443 /* check if the device is already spawned */
444 if (rte_eth_dev_get_port_by_name(name, &port_id) == 0) {
448 DRV_LOG(DEBUG, "naming Ethernet device \"%s\"", name);
449 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
450 struct mlx5_mp_id mp_id;
452 eth_dev = rte_eth_dev_attach_secondary(name);
453 if (eth_dev == NULL) {
454 DRV_LOG(ERR, "can not attach rte ethdev");
458 eth_dev->device = dpdk_dev;
459 eth_dev->dev_ops = &mlx5_os_dev_sec_ops;
460 err = mlx5_proc_priv_init(eth_dev);
463 mp_id.port_id = eth_dev->data->port_id;
464 strlcpy(mp_id.name, MLX5_MP_NAME, RTE_MP_MAX_NAME_LEN);
465 /* Receive command fd from primary process */
466 err = mlx5_mp_req_verbs_cmd_fd(&mp_id);
469 /* Remap UAR for Tx queues. */
470 err = mlx5_tx_uar_init_secondary(eth_dev, err);
474 * Ethdev pointer is still required as input since
475 * the primary device is not accessible from the
478 eth_dev->rx_pkt_burst = mlx5_select_rx_function(eth_dev);
479 eth_dev->tx_pkt_burst = mlx5_select_tx_function(eth_dev);
482 mlx5_dev_close(eth_dev);
486 * Some parameters ("tx_db_nc" in particularly) are needed in
487 * advance to create dv/verbs device context. We proceed the
488 * devargs here to get ones, and later proceed devargs again
489 * to override some hardware settings.
491 err = mlx5_args(&config, dpdk_dev->devargs);
494 DRV_LOG(ERR, "failed to process device arguments: %s",
495 strerror(rte_errno));
498 sh = mlx5_alloc_shared_dev_ctx(spawn, &config);
501 config.devx = sh->devx;
502 #ifdef HAVE_MLX5DV_DR_ACTION_DEST_DEVX_TIR
505 #ifdef HAVE_IBV_MLX5_MOD_SWP
506 dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_SWP;
509 * Multi-packet send is supported by ConnectX-4 Lx PF as well
510 * as all ConnectX-5 devices.
512 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
513 dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS;
515 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
516 dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_STRIDING_RQ;
518 mlx5_glue->dv_query_device(sh->ctx, &dv_attr);
519 if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) {
520 if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) {
521 DRV_LOG(DEBUG, "enhanced MPW is supported");
522 mps = MLX5_MPW_ENHANCED;
524 DRV_LOG(DEBUG, "MPW is supported");
528 DRV_LOG(DEBUG, "MPW isn't supported");
529 mps = MLX5_MPW_DISABLED;
531 #ifdef HAVE_IBV_MLX5_MOD_SWP
532 if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_SWP)
533 swp = dv_attr.sw_parsing_caps.sw_parsing_offloads;
534 DRV_LOG(DEBUG, "SWP support: %u", swp);
537 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
538 if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_STRIDING_RQ) {
539 struct mlx5dv_striding_rq_caps mprq_caps =
540 dv_attr.striding_rq_caps;
542 DRV_LOG(DEBUG, "\tmin_single_stride_log_num_of_bytes: %d",
543 mprq_caps.min_single_stride_log_num_of_bytes);
544 DRV_LOG(DEBUG, "\tmax_single_stride_log_num_of_bytes: %d",
545 mprq_caps.max_single_stride_log_num_of_bytes);
546 DRV_LOG(DEBUG, "\tmin_single_wqe_log_num_of_strides: %d",
547 mprq_caps.min_single_wqe_log_num_of_strides);
548 DRV_LOG(DEBUG, "\tmax_single_wqe_log_num_of_strides: %d",
549 mprq_caps.max_single_wqe_log_num_of_strides);
550 DRV_LOG(DEBUG, "\tsupported_qpts: %d",
551 mprq_caps.supported_qpts);
552 DRV_LOG(DEBUG, "device supports Multi-Packet RQ");
554 mprq_min_stride_size_n =
555 mprq_caps.min_single_stride_log_num_of_bytes;
556 mprq_max_stride_size_n =
557 mprq_caps.max_single_stride_log_num_of_bytes;
558 mprq_min_stride_num_n =
559 mprq_caps.min_single_wqe_log_num_of_strides;
560 mprq_max_stride_num_n =
561 mprq_caps.max_single_wqe_log_num_of_strides;
564 if (RTE_CACHE_LINE_SIZE == 128 &&
565 !(dv_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP))
569 config.cqe_comp = cqe_comp;
570 #ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD
571 /* Whether device supports 128B Rx CQE padding. */
572 cqe_pad = RTE_CACHE_LINE_SIZE == 128 &&
573 (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_PAD);
575 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
576 if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS) {
577 tunnel_en = ((dv_attr.tunnel_offloads_caps &
578 MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN) &&
579 (dv_attr.tunnel_offloads_caps &
580 MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE) &&
581 (dv_attr.tunnel_offloads_caps &
582 MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GENEVE));
584 DRV_LOG(DEBUG, "tunnel offloading is %ssupported",
585 tunnel_en ? "" : "not ");
588 "tunnel offloading disabled due to old OFED/rdma-core version");
590 config.tunnel_en = tunnel_en;
591 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
592 mpls_en = ((dv_attr.tunnel_offloads_caps &
593 MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_GRE) &&
594 (dv_attr.tunnel_offloads_caps &
595 MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_UDP));
596 DRV_LOG(DEBUG, "MPLS over GRE/UDP tunnel offloading is %ssupported",
597 mpls_en ? "" : "not ");
599 DRV_LOG(WARNING, "MPLS over GRE/UDP tunnel offloading disabled due to"
600 " old OFED/rdma-core version or firmware configuration");
602 config.mpls_en = mpls_en;
603 /* Check port status. */
604 err = mlx5_glue->query_port(sh->ctx, spawn->phys_port, &port_attr);
606 DRV_LOG(ERR, "port query failed: %s", strerror(err));
609 if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) {
610 DRV_LOG(ERR, "port is not configured in Ethernet mode");
614 if (port_attr.state != IBV_PORT_ACTIVE)
615 DRV_LOG(DEBUG, "port is not active: \"%s\" (%d)",
616 mlx5_glue->port_state_str(port_attr.state),
618 /* Allocate private eth device data. */
619 priv = rte_zmalloc("ethdev private structure",
621 RTE_CACHE_LINE_SIZE);
623 DRV_LOG(ERR, "priv allocation failure");
628 priv->dev_port = spawn->phys_port;
629 priv->pci_dev = spawn->pci_dev;
630 priv->mtu = RTE_ETHER_MTU;
631 priv->mp_id.port_id = port_id;
632 strlcpy(priv->mp_id.name, MLX5_MP_NAME, RTE_MP_MAX_NAME_LEN);
634 /* Initialize UAR access locks for 32bit implementations. */
635 rte_spinlock_init(&priv->uar_lock_cq);
636 for (i = 0; i < MLX5_UAR_PAGE_NUM_MAX; i++)
637 rte_spinlock_init(&priv->uar_lock[i]);
639 /* Some internal functions rely on Netlink sockets, open them now. */
640 priv->nl_socket_rdma = mlx5_nl_init(NETLINK_RDMA);
641 priv->nl_socket_route = mlx5_nl_init(NETLINK_ROUTE);
642 priv->representor = !!switch_info->representor;
643 priv->master = !!switch_info->master;
644 priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
645 priv->vport_meta_tag = 0;
646 priv->vport_meta_mask = 0;
647 priv->pf_bond = spawn->pf_bond;
648 #ifdef HAVE_MLX5DV_DR_DEVX_PORT
650 * The DevX port query API is implemented. E-Switch may use
651 * either vport or reg_c[0] metadata register to match on
652 * vport index. The engaged part of metadata register is
655 if (switch_info->representor || switch_info->master) {
656 devx_port.comp_mask = MLX5DV_DEVX_PORT_VPORT |
657 MLX5DV_DEVX_PORT_MATCH_REG_C_0;
658 err = mlx5_glue->devx_port_query(sh->ctx, spawn->phys_port,
662 "can't query devx port %d on device %s",
664 mlx5_os_get_dev_device_name(spawn->phys_dev));
665 devx_port.comp_mask = 0;
668 if (devx_port.comp_mask & MLX5DV_DEVX_PORT_MATCH_REG_C_0) {
669 priv->vport_meta_tag = devx_port.reg_c_0.value;
670 priv->vport_meta_mask = devx_port.reg_c_0.mask;
671 if (!priv->vport_meta_mask) {
672 DRV_LOG(ERR, "vport zero mask for port %d"
673 " on bonding device %s",
675 mlx5_os_get_dev_device_name
680 if (priv->vport_meta_tag & ~priv->vport_meta_mask) {
681 DRV_LOG(ERR, "invalid vport tag for port %d"
682 " on bonding device %s",
684 mlx5_os_get_dev_device_name
690 if (devx_port.comp_mask & MLX5DV_DEVX_PORT_VPORT) {
691 priv->vport_id = devx_port.vport_num;
692 } else if (spawn->pf_bond >= 0) {
693 DRV_LOG(ERR, "can't deduce vport index for port %d"
694 " on bonding device %s",
696 mlx5_os_get_dev_device_name(spawn->phys_dev));
700 /* Suppose vport index in compatible way. */
701 priv->vport_id = switch_info->representor ?
702 switch_info->port_name + 1 : -1;
706 * Kernel/rdma_core support single E-Switch per PF configurations
707 * only and vport_id field contains the vport index for
708 * associated VF, which is deduced from representor port name.
709 * For example, let's have the IB device port 10, it has
710 * attached network device eth0, which has port name attribute
711 * pf0vf2, we can deduce the VF number as 2, and set vport index
712 * as 3 (2+1). This assigning schema should be changed if the
713 * multiple E-Switch instances per PF configurations or/and PCI
714 * subfunctions are added.
716 priv->vport_id = switch_info->representor ?
717 switch_info->port_name + 1 : -1;
719 /* representor_id field keeps the unmodified VF index. */
720 priv->representor_id = switch_info->representor ?
721 switch_info->port_name : -1;
723 * Look for sibling devices in order to reuse their switch domain
724 * if any, otherwise allocate one.
726 MLX5_ETH_FOREACH_DEV(port_id, priv->pci_dev) {
727 const struct mlx5_priv *opriv =
728 rte_eth_devices[port_id].data->dev_private;
731 opriv->sh != priv->sh ||
733 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID)
735 priv->domain_id = opriv->domain_id;
738 if (priv->domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
739 err = rte_eth_switch_domain_alloc(&priv->domain_id);
742 DRV_LOG(ERR, "unable to allocate switch domain: %s",
743 strerror(rte_errno));
748 /* Override some values set by hardware configuration. */
749 mlx5_args(&config, dpdk_dev->devargs);
750 err = mlx5_dev_check_sibling_config(priv, &config);
753 config.hw_csum = !!(sh->device_attr.device_cap_flags_ex &
754 IBV_DEVICE_RAW_IP_CSUM);
755 DRV_LOG(DEBUG, "checksum offloading is %ssupported",
756 (config.hw_csum ? "" : "not "));
757 #if !defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) && \
758 !defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
759 DRV_LOG(DEBUG, "counters are not supported");
761 #if !defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_MLX5DV_DR)
762 if (config.dv_flow_en) {
763 DRV_LOG(WARNING, "DV flow is not supported");
764 config.dv_flow_en = 0;
767 config.ind_table_max_size =
768 sh->device_attr.max_rwq_indirection_table_size;
770 * Remove this check once DPDK supports larger/variable
771 * indirection tables.
773 if (config.ind_table_max_size > (unsigned int)ETH_RSS_RETA_SIZE_512)
774 config.ind_table_max_size = ETH_RSS_RETA_SIZE_512;
775 DRV_LOG(DEBUG, "maximum Rx indirection table size is %u",
776 config.ind_table_max_size);
777 config.hw_vlan_strip = !!(sh->device_attr.raw_packet_caps &
778 IBV_RAW_PACKET_CAP_CVLAN_STRIPPING);
779 DRV_LOG(DEBUG, "VLAN stripping is %ssupported",
780 (config.hw_vlan_strip ? "" : "not "));
781 config.hw_fcs_strip = !!(sh->device_attr.raw_packet_caps &
782 IBV_RAW_PACKET_CAP_SCATTER_FCS);
783 DRV_LOG(DEBUG, "FCS stripping configuration is %ssupported",
784 (config.hw_fcs_strip ? "" : "not "));
785 #if defined(HAVE_IBV_WQ_FLAG_RX_END_PADDING)
786 hw_padding = !!sh->device_attr.rx_pad_end_addr_align;
787 #elif defined(HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING)
788 hw_padding = !!(sh->device_attr.device_cap_flags_ex &
789 IBV_DEVICE_PCI_WRITE_END_PADDING);
791 if (config.hw_padding && !hw_padding) {
792 DRV_LOG(DEBUG, "Rx end alignment padding isn't supported");
793 config.hw_padding = 0;
794 } else if (config.hw_padding) {
795 DRV_LOG(DEBUG, "Rx end alignment padding is enabled");
797 config.tso = (sh->device_attr.max_tso > 0 &&
798 (sh->device_attr.tso_supported_qpts &
799 (1 << IBV_QPT_RAW_PACKET)));
801 config.tso_max_payload_sz = sh->device_attr.max_tso;
803 * MPW is disabled by default, while the Enhanced MPW is enabled
806 if (config.mps == MLX5_ARG_UNSET)
807 config.mps = (mps == MLX5_MPW_ENHANCED) ? MLX5_MPW_ENHANCED :
810 config.mps = config.mps ? mps : MLX5_MPW_DISABLED;
811 DRV_LOG(INFO, "%sMPS is %s",
812 config.mps == MLX5_MPW_ENHANCED ? "enhanced " :
813 config.mps == MLX5_MPW ? "legacy " : "",
814 config.mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
815 if (config.cqe_comp && !cqe_comp) {
816 DRV_LOG(WARNING, "Rx CQE compression isn't supported");
819 if (config.cqe_pad && !cqe_pad) {
820 DRV_LOG(WARNING, "Rx CQE padding isn't supported");
822 } else if (config.cqe_pad) {
823 DRV_LOG(INFO, "Rx CQE padding is enabled");
826 priv->counter_fallback = 0;
827 err = mlx5_devx_cmd_query_hca_attr(sh->ctx, &config.hca_attr);
832 if (!config.hca_attr.flow_counters_dump)
833 priv->counter_fallback = 1;
834 #ifndef HAVE_IBV_DEVX_ASYNC
835 priv->counter_fallback = 1;
837 if (priv->counter_fallback)
838 DRV_LOG(INFO, "Use fall-back DV counter management");
839 /* Check for LRO support. */
840 if (config.dest_tir && config.hca_attr.lro_cap &&
842 /* TBD check tunnel lro caps. */
843 config.lro.supported = config.hca_attr.lro_cap;
844 DRV_LOG(DEBUG, "Device supports LRO");
846 * If LRO timeout is not configured by application,
847 * use the minimal supported value.
849 if (!config.lro.timeout)
851 config.hca_attr.lro_timer_supported_periods[0];
852 DRV_LOG(DEBUG, "LRO session timeout set to %d usec",
855 #if defined(HAVE_MLX5DV_DR) && defined(HAVE_MLX5_DR_CREATE_ACTION_FLOW_METER)
856 if (config.hca_attr.qos.sup && config.hca_attr.qos.srtcm_sup &&
859 config.hca_attr.qos.flow_meter_reg_c_ids;
861 * Meter needs two REG_C's for color match and pre-sfx
862 * flow match. Here get the REG_C for color match.
863 * REG_C_0 and REG_C_1 is reserved for metadata feature.
866 if (__builtin_popcount(reg_c_mask) < 1) {
868 DRV_LOG(WARNING, "No available register for"
871 priv->mtr_color_reg = ffs(reg_c_mask) - 1 +
874 priv->mtr_reg_share =
875 config.hca_attr.qos.flow_meter_reg_share;
876 DRV_LOG(DEBUG, "The REG_C meter uses is %d",
877 priv->mtr_color_reg);
883 DRV_LOG(DEBUG, "Timestamp counter frequency %u kHz",
884 config.hca_attr.dev_freq_khz);
885 DRV_LOG(DEBUG, "Packet pacing is %ssupported",
886 config.hca_attr.qos.packet_pacing ? "" : "not ");
887 DRV_LOG(DEBUG, "Cross channel ops are %ssupported",
888 config.hca_attr.cross_channel ? "" : "not ");
889 DRV_LOG(DEBUG, "WQE index ignore is %ssupported",
890 config.hca_attr.wqe_index_ignore ? "" : "not ");
891 DRV_LOG(DEBUG, "Non-wire SQ feature is %ssupported",
892 config.hca_attr.non_wire_sq ? "" : "not ");
893 DRV_LOG(DEBUG, "Static WQE SQ feature is %ssupported (%d)",
894 config.hca_attr.log_max_static_sq_wq ? "" : "not ",
895 config.hca_attr.log_max_static_sq_wq);
896 DRV_LOG(DEBUG, "WQE rate PP mode is %ssupported",
897 config.hca_attr.qos.wqe_rate_pp ? "" : "not ");
899 DRV_LOG(ERR, "DevX is required for packet pacing");
903 if (!config.hca_attr.qos.packet_pacing) {
904 DRV_LOG(ERR, "Packet pacing is not supported");
908 if (!config.hca_attr.cross_channel) {
909 DRV_LOG(ERR, "Cross channel operations are"
910 " required for packet pacing");
914 if (!config.hca_attr.wqe_index_ignore) {
915 DRV_LOG(ERR, "WQE index ignore feature is"
916 " required for packet pacing");
920 if (!config.hca_attr.non_wire_sq) {
921 DRV_LOG(ERR, "Non-wire SQ feature is"
922 " required for packet pacing");
926 if (!config.hca_attr.log_max_static_sq_wq) {
927 DRV_LOG(ERR, "Static WQE SQ feature is"
928 " required for packet pacing");
932 if (!config.hca_attr.qos.wqe_rate_pp) {
933 DRV_LOG(ERR, "WQE rate mode is required"
934 " for packet pacing");
938 #ifndef HAVE_MLX5DV_DEVX_UAR_OFFSET
939 DRV_LOG(ERR, "DevX does not provide UAR offset,"
940 " can't create queues for packet pacing");
945 if (config.mprq.enabled && mprq) {
946 if (config.mprq.stride_num_n &&
947 (config.mprq.stride_num_n > mprq_max_stride_num_n ||
948 config.mprq.stride_num_n < mprq_min_stride_num_n)) {
949 config.mprq.stride_num_n =
950 RTE_MIN(RTE_MAX(MLX5_MPRQ_STRIDE_NUM_N,
951 mprq_min_stride_num_n),
952 mprq_max_stride_num_n);
954 "the number of strides"
955 " for Multi-Packet RQ is out of range,"
956 " setting default value (%u)",
957 1 << config.mprq.stride_num_n);
959 if (config.mprq.stride_size_n &&
960 (config.mprq.stride_size_n > mprq_max_stride_size_n ||
961 config.mprq.stride_size_n < mprq_min_stride_size_n)) {
962 config.mprq.stride_size_n =
963 RTE_MIN(RTE_MAX(MLX5_MPRQ_STRIDE_SIZE_N,
964 mprq_min_stride_size_n),
965 mprq_max_stride_size_n);
967 "the size of a stride"
968 " for Multi-Packet RQ is out of range,"
969 " setting default value (%u)",
970 1 << config.mprq.stride_size_n);
972 config.mprq.min_stride_size_n = mprq_min_stride_size_n;
973 config.mprq.max_stride_size_n = mprq_max_stride_size_n;
974 } else if (config.mprq.enabled && !mprq) {
975 DRV_LOG(WARNING, "Multi-Packet RQ isn't supported");
976 config.mprq.enabled = 0;
978 if (config.max_dump_files_num == 0)
979 config.max_dump_files_num = 128;
980 eth_dev = rte_eth_dev_allocate(name);
981 if (eth_dev == NULL) {
982 DRV_LOG(ERR, "can not allocate rte ethdev");
986 /* Flag to call rte_eth_dev_release_port() in rte_eth_dev_close(). */
987 eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
988 if (priv->representor) {
989 eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
990 eth_dev->data->representor_id = priv->representor_id;
993 * Store associated network device interface index. This index
994 * is permanent throughout the lifetime of device. So, we may store
995 * the ifindex here and use the cached value further.
997 MLX5_ASSERT(spawn->ifindex);
998 priv->if_index = spawn->ifindex;
999 eth_dev->data->dev_private = priv;
1000 priv->dev_data = eth_dev->data;
1001 eth_dev->data->mac_addrs = priv->mac;
1002 eth_dev->device = dpdk_dev;
1003 /* Configure the first MAC address by default. */
1004 if (mlx5_get_mac(eth_dev, &mac.addr_bytes)) {
1006 "port %u cannot get MAC address, is mlx5_en"
1007 " loaded? (errno: %s)",
1008 eth_dev->data->port_id, strerror(rte_errno));
1013 "port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x",
1014 eth_dev->data->port_id,
1015 mac.addr_bytes[0], mac.addr_bytes[1],
1016 mac.addr_bytes[2], mac.addr_bytes[3],
1017 mac.addr_bytes[4], mac.addr_bytes[5]);
1018 #ifdef RTE_LIBRTE_MLX5_DEBUG
1020 char ifname[IF_NAMESIZE];
1022 if (mlx5_get_ifname(eth_dev, &ifname) == 0)
1023 DRV_LOG(DEBUG, "port %u ifname is \"%s\"",
1024 eth_dev->data->port_id, ifname);
1026 DRV_LOG(DEBUG, "port %u ifname is unknown",
1027 eth_dev->data->port_id);
1030 /* Get actual MTU if possible. */
1031 err = mlx5_get_mtu(eth_dev, &priv->mtu);
1036 DRV_LOG(DEBUG, "port %u MTU is %u", eth_dev->data->port_id,
1038 /* Initialize burst functions to prevent crashes before link-up. */
1039 eth_dev->rx_pkt_burst = removed_rx_burst;
1040 eth_dev->tx_pkt_burst = removed_tx_burst;
1041 eth_dev->dev_ops = &mlx5_os_dev_ops;
1042 /* Register MAC address. */
1043 claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0));
1044 if (config.vf && config.vf_nl_en)
1045 mlx5_nl_mac_addr_sync(priv->nl_socket_route,
1046 mlx5_ifindex(eth_dev),
1047 eth_dev->data->mac_addrs,
1048 MLX5_MAX_MAC_ADDRESSES);
1050 priv->ctrl_flows = 0;
1051 TAILQ_INIT(&priv->flow_meters);
1052 TAILQ_INIT(&priv->flow_meter_profiles);
1053 /* Hint libmlx5 to use PMD allocator for data plane resources */
1054 mlx5_glue->dv_set_context_attr(sh->ctx,
1055 MLX5DV_CTX_ATTR_BUF_ALLOCATORS,
1056 (void *)((uintptr_t)&(struct mlx5dv_ctx_allocators){
1057 .alloc = &mlx5_alloc_verbs_buf,
1058 .free = &mlx5_free_verbs_buf,
1061 /* Bring Ethernet device up. */
1062 DRV_LOG(DEBUG, "port %u forcing Ethernet interface up",
1063 eth_dev->data->port_id);
1064 mlx5_set_link_up(eth_dev);
1066 * Even though the interrupt handler is not installed yet,
1067 * interrupts will still trigger on the async_fd from
1068 * Verbs context returned by ibv_open_device().
1070 mlx5_link_update(eth_dev, 0);
1071 #ifdef HAVE_MLX5DV_DR_ESWITCH
1072 if (!(config.hca_attr.eswitch_manager && config.dv_flow_en &&
1073 (switch_info->representor || switch_info->master)))
1074 config.dv_esw_en = 0;
1076 config.dv_esw_en = 0;
1078 /* Detect minimal data bytes to inline. */
1079 mlx5_set_min_inline(spawn, &config);
1080 /* Store device configuration on private structure. */
1081 priv->config = config;
1082 /* Create context for virtual machine VLAN workaround. */
1083 priv->vmwa_context = mlx5_vlan_vmwa_init(eth_dev, spawn->ifindex);
1084 if (config.dv_flow_en) {
1085 err = mlx5_alloc_shared_dr(priv);
1089 * RSS id is shared with meter flow id. Meter flow id can only
1090 * use the 24 MSB of the register.
1092 priv->qrss_id_pool = mlx5_flow_id_pool_alloc(UINT32_MAX >>
1093 MLX5_MTR_COLOR_BITS);
1094 if (!priv->qrss_id_pool) {
1095 DRV_LOG(ERR, "can't create flow id pool");
1100 /* Supported Verbs flow priority number detection. */
1101 err = mlx5_flow_discover_priorities(eth_dev);
1106 priv->config.flow_prio = err;
1107 if (!priv->config.dv_esw_en &&
1108 priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
1109 DRV_LOG(WARNING, "metadata mode %u is not supported "
1110 "(no E-Switch)", priv->config.dv_xmeta_en);
1111 priv->config.dv_xmeta_en = MLX5_XMETA_MODE_LEGACY;
1113 mlx5_set_metadata_mask(eth_dev);
1114 if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
1115 !priv->sh->dv_regc0_mask) {
1116 DRV_LOG(ERR, "metadata mode %u is not supported "
1117 "(no metadata reg_c[0] is available)",
1118 priv->config.dv_xmeta_en);
1123 * Allocate the buffer for flow creating, just once.
1124 * The allocation must be done before any flow creating.
1126 mlx5_flow_alloc_intermediate(eth_dev);
1127 /* Query availability of metadata reg_c's. */
1128 err = mlx5_flow_discover_mreg_c(eth_dev);
1133 if (!mlx5_flow_ext_mreg_supported(eth_dev)) {
1135 "port %u extensive metadata register is not supported",
1136 eth_dev->data->port_id);
1137 if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
1138 DRV_LOG(ERR, "metadata mode %u is not supported "
1139 "(no metadata registers available)",
1140 priv->config.dv_xmeta_en);
1145 if (priv->config.dv_flow_en &&
1146 priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
1147 mlx5_flow_ext_mreg_supported(eth_dev) &&
1148 priv->sh->dv_regc0_mask) {
1149 priv->mreg_cp_tbl = mlx5_hlist_create(MLX5_FLOW_MREG_HNAME,
1150 MLX5_FLOW_MREG_HTABLE_SZ);
1151 if (!priv->mreg_cp_tbl) {
1159 if (priv->mreg_cp_tbl)
1160 mlx5_hlist_destroy(priv->mreg_cp_tbl, NULL, NULL);
1162 mlx5_os_free_shared_dr(priv);
1163 if (priv->nl_socket_route >= 0)
1164 close(priv->nl_socket_route);
1165 if (priv->nl_socket_rdma >= 0)
1166 close(priv->nl_socket_rdma);
1167 if (priv->vmwa_context)
1168 mlx5_vlan_vmwa_exit(priv->vmwa_context);
1169 if (priv->qrss_id_pool)
1170 mlx5_flow_id_pool_release(priv->qrss_id_pool);
1172 claim_zero(rte_eth_switch_domain_free(priv->domain_id));
1174 if (eth_dev != NULL)
1175 eth_dev->data->dev_private = NULL;
1177 if (eth_dev != NULL) {
1178 /* mac_addrs must not be freed alone because part of
1181 eth_dev->data->mac_addrs = NULL;
1182 rte_eth_dev_release_port(eth_dev);
1185 mlx5_free_shared_dev_ctx(sh);
1186 MLX5_ASSERT(err > 0);
1192 * Comparison callback to sort device data.
1194 * This is meant to be used with qsort().
1197 * Pointer to pointer to first data object.
1199 * Pointer to pointer to second data object.
1202 * 0 if both objects are equal, less than 0 if the first argument is less
1203 * than the second, greater than 0 otherwise.
1206 mlx5_dev_spawn_data_cmp(const void *a, const void *b)
1208 const struct mlx5_switch_info *si_a =
1209 &((const struct mlx5_dev_spawn_data *)a)->info;
1210 const struct mlx5_switch_info *si_b =
1211 &((const struct mlx5_dev_spawn_data *)b)->info;
1214 /* Master device first. */
1215 ret = si_b->master - si_a->master;
1218 /* Then representor devices. */
1219 ret = si_b->representor - si_a->representor;
1222 /* Unidentified devices come last in no specific order. */
1223 if (!si_a->representor)
1225 /* Order representors by name. */
1226 return si_a->port_name - si_b->port_name;
1230 * Match PCI information for possible slaves of bonding device.
1232 * @param[in] ibv_dev
1233 * Pointer to Infiniband device structure.
1234 * @param[in] pci_dev
1235 * Pointer to PCI device structure to match PCI address.
1236 * @param[in] nl_rdma
1237 * Netlink RDMA group socket handle.
1240 * negative value if no bonding device found, otherwise
1241 * positive index of slave PF in bonding.
1244 mlx5_device_bond_pci_match(const struct ibv_device *ibv_dev,
1245 const struct rte_pci_device *pci_dev,
1248 char ifname[IF_NAMESIZE + 1];
1249 unsigned int ifindex;
1255 * Try to get master device name. If something goes
1256 * wrong suppose the lack of kernel support and no
1261 if (!strstr(ibv_dev->name, "bond"))
1263 np = mlx5_nl_portnum(nl_rdma, ibv_dev->name);
1267 * The Master device might not be on the predefined
1268 * port (not on port index 1, it is not garanted),
1269 * we have to scan all Infiniband device port and
1272 for (i = 1; i <= np; ++i) {
1273 /* Check whether Infiniband port is populated. */
1274 ifindex = mlx5_nl_ifindex(nl_rdma, ibv_dev->name, i);
1277 if (!if_indextoname(ifindex, ifname))
1279 /* Try to read bonding slave names from sysfs. */
1281 "/sys/class/net/%s/master/bonding/slaves", ifname);
1282 file = fopen(slaves, "r");
1288 /* Use safe format to check maximal buffer length. */
1289 MLX5_ASSERT(atol(RTE_STR(IF_NAMESIZE)) == IF_NAMESIZE);
1290 while (fscanf(file, "%" RTE_STR(IF_NAMESIZE) "s", ifname) == 1) {
1291 char tmp_str[IF_NAMESIZE + 32];
1292 struct rte_pci_addr pci_addr;
1293 struct mlx5_switch_info info;
1295 /* Process slave interface names in the loop. */
1296 snprintf(tmp_str, sizeof(tmp_str),
1297 "/sys/class/net/%s", ifname);
1298 if (mlx5_dev_to_pci_addr(tmp_str, &pci_addr)) {
1299 DRV_LOG(WARNING, "can not get PCI address"
1300 " for netdev \"%s\"", ifname);
1303 if (pci_dev->addr.domain != pci_addr.domain ||
1304 pci_dev->addr.bus != pci_addr.bus ||
1305 pci_dev->addr.devid != pci_addr.devid ||
1306 pci_dev->addr.function != pci_addr.function)
1308 /* Slave interface PCI address match found. */
1310 snprintf(tmp_str, sizeof(tmp_str),
1311 "/sys/class/net/%s/phys_port_name", ifname);
1312 file = fopen(tmp_str, "rb");
1315 info.name_type = MLX5_PHYS_PORT_NAME_TYPE_NOTSET;
1316 if (fscanf(file, "%32s", tmp_str) == 1)
1317 mlx5_translate_port_name(tmp_str, &info);
1318 if (info.name_type == MLX5_PHYS_PORT_NAME_TYPE_LEGACY ||
1319 info.name_type == MLX5_PHYS_PORT_NAME_TYPE_UPLINK)
1320 pf = info.port_name;
1329 * DPDK callback to register a PCI device.
1331 * This function spawns Ethernet devices out of a given PCI device.
1333 * @param[in] pci_drv
1334 * PCI driver structure (mlx5_driver).
1335 * @param[in] pci_dev
1336 * PCI device information.
1339 * 0 on success, a negative errno value otherwise and rte_errno is set.
1342 mlx5_os_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1343 struct rte_pci_device *pci_dev)
1345 struct ibv_device **ibv_list;
1347 * Number of found IB Devices matching with requested PCI BDF.
1348 * nd != 1 means there are multiple IB devices over the same
1349 * PCI device and we have representors and master.
1351 unsigned int nd = 0;
1353 * Number of found IB device Ports. nd = 1 and np = 1..n means
1354 * we have the single multiport IB device, and there may be
1355 * representors attached to some of found ports.
1357 unsigned int np = 0;
1359 * Number of DPDK ethernet devices to Spawn - either over
1360 * multiple IB devices or multiple ports of single IB device.
1361 * Actually this is the number of iterations to spawn.
1363 unsigned int ns = 0;
1366 * < 0 - no bonding device (single one)
1367 * >= 0 - bonding device (value is slave PF index)
1370 struct mlx5_dev_spawn_data *list = NULL;
1371 struct mlx5_dev_config dev_config;
1374 if (mlx5_class_get(pci_dev->device.devargs) != MLX5_CLASS_NET) {
1375 DRV_LOG(DEBUG, "Skip probing - should be probed by other mlx5"
1379 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1380 mlx5_pmd_socket_init();
1381 ret = mlx5_init_once();
1383 DRV_LOG(ERR, "unable to init PMD global data: %s",
1384 strerror(rte_errno));
1387 MLX5_ASSERT(pci_drv == &mlx5_driver);
1389 ibv_list = mlx5_glue->get_device_list(&ret);
1391 rte_errno = errno ? errno : ENOSYS;
1392 DRV_LOG(ERR, "cannot list devices, is ib_uverbs loaded?");
1396 * First scan the list of all Infiniband devices to find
1397 * matching ones, gathering into the list.
1399 struct ibv_device *ibv_match[ret + 1];
1400 int nl_route = mlx5_nl_init(NETLINK_ROUTE);
1401 int nl_rdma = mlx5_nl_init(NETLINK_RDMA);
1405 struct rte_pci_addr pci_addr;
1407 DRV_LOG(DEBUG, "checking device \"%s\"", ibv_list[ret]->name);
1408 bd = mlx5_device_bond_pci_match
1409 (ibv_list[ret], pci_dev, nl_rdma);
1412 * Bonding device detected. Only one match is allowed,
1413 * the bonding is supported over multi-port IB device,
1414 * there should be no matches on representor PCI
1415 * functions or non VF LAG bonding devices with
1416 * specified address.
1420 "multiple PCI match on bonding device"
1421 "\"%s\" found", ibv_list[ret]->name);
1426 DRV_LOG(INFO, "PCI information matches for"
1427 " slave %d bonding device \"%s\"",
1428 bd, ibv_list[ret]->name);
1429 ibv_match[nd++] = ibv_list[ret];
1432 if (mlx5_dev_to_pci_addr
1433 (ibv_list[ret]->ibdev_path, &pci_addr))
1435 if (pci_dev->addr.domain != pci_addr.domain ||
1436 pci_dev->addr.bus != pci_addr.bus ||
1437 pci_dev->addr.devid != pci_addr.devid ||
1438 pci_dev->addr.function != pci_addr.function)
1440 DRV_LOG(INFO, "PCI information matches for device \"%s\"",
1441 ibv_list[ret]->name);
1442 ibv_match[nd++] = ibv_list[ret];
1444 ibv_match[nd] = NULL;
1446 /* No device matches, just complain and bail out. */
1448 "no Verbs device matches PCI device " PCI_PRI_FMT ","
1449 " are kernel drivers loaded?",
1450 pci_dev->addr.domain, pci_dev->addr.bus,
1451 pci_dev->addr.devid, pci_dev->addr.function);
1458 * Found single matching device may have multiple ports.
1459 * Each port may be representor, we have to check the port
1460 * number and check the representors existence.
1463 np = mlx5_nl_portnum(nl_rdma, ibv_match[0]->name);
1465 DRV_LOG(WARNING, "can not get IB device \"%s\""
1466 " ports number", ibv_match[0]->name);
1467 if (bd >= 0 && !np) {
1468 DRV_LOG(ERR, "can not get ports"
1469 " for bonding device");
1475 #ifndef HAVE_MLX5DV_DR_DEVX_PORT
1478 * This may happen if there is VF LAG kernel support and
1479 * application is compiled with older rdma_core library.
1482 "No kernel/verbs support for VF LAG bonding found.");
1483 rte_errno = ENOTSUP;
1489 * Now we can determine the maximal
1490 * amount of devices to be spawned.
1492 list = rte_zmalloc("device spawn data",
1493 sizeof(struct mlx5_dev_spawn_data) *
1495 RTE_CACHE_LINE_SIZE);
1497 DRV_LOG(ERR, "spawn data array allocation failure");
1502 if (bd >= 0 || np > 1) {
1504 * Single IB device with multiple ports found,
1505 * it may be E-Switch master device and representors.
1506 * We have to perform identification through the ports.
1508 MLX5_ASSERT(nl_rdma >= 0);
1509 MLX5_ASSERT(ns == 0);
1510 MLX5_ASSERT(nd == 1);
1512 for (i = 1; i <= np; ++i) {
1513 list[ns].max_port = np;
1514 list[ns].phys_port = i;
1515 list[ns].phys_dev = ibv_match[0];
1516 list[ns].eth_dev = NULL;
1517 list[ns].pci_dev = pci_dev;
1518 list[ns].pf_bond = bd;
1519 list[ns].ifindex = mlx5_nl_ifindex
1521 mlx5_os_get_dev_device_name
1522 (list[ns].phys_dev), i);
1523 if (!list[ns].ifindex) {
1525 * No network interface index found for the
1526 * specified port, it means there is no
1527 * representor on this port. It's OK,
1528 * there can be disabled ports, for example
1529 * if sriov_numvfs < sriov_totalvfs.
1535 ret = mlx5_nl_switch_info
1539 if (ret || (!list[ns].info.representor &&
1540 !list[ns].info.master)) {
1542 * We failed to recognize representors with
1543 * Netlink, let's try to perform the task
1546 ret = mlx5_sysfs_switch_info
1550 if (!ret && bd >= 0) {
1551 switch (list[ns].info.name_type) {
1552 case MLX5_PHYS_PORT_NAME_TYPE_UPLINK:
1553 if (list[ns].info.port_name == bd)
1556 case MLX5_PHYS_PORT_NAME_TYPE_PFHPF:
1558 case MLX5_PHYS_PORT_NAME_TYPE_PFVF:
1559 if (list[ns].info.pf_num == bd)
1567 if (!ret && (list[ns].info.representor ^
1568 list[ns].info.master))
1573 "unable to recognize master/representors"
1574 " on the IB device with multiple ports");
1581 * The existence of several matching entries (nd > 1) means
1582 * port representors have been instantiated. No existing Verbs
1583 * call nor sysfs entries can tell them apart, this can only
1584 * be done through Netlink calls assuming kernel drivers are
1585 * recent enough to support them.
1587 * In the event of identification failure through Netlink,
1588 * try again through sysfs, then:
1590 * 1. A single IB device matches (nd == 1) with single
1591 * port (np=0/1) and is not a representor, assume
1592 * no switch support.
1594 * 2. Otherwise no safe assumptions can be made;
1595 * complain louder and bail out.
1597 for (i = 0; i != nd; ++i) {
1598 memset(&list[ns].info, 0, sizeof(list[ns].info));
1599 list[ns].max_port = 1;
1600 list[ns].phys_port = 1;
1601 list[ns].phys_dev = ibv_match[i];
1602 list[ns].eth_dev = NULL;
1603 list[ns].pci_dev = pci_dev;
1604 list[ns].pf_bond = -1;
1605 list[ns].ifindex = 0;
1607 list[ns].ifindex = mlx5_nl_ifindex
1609 mlx5_os_get_dev_device_name
1610 (list[ns].phys_dev), 1);
1611 if (!list[ns].ifindex) {
1612 char ifname[IF_NAMESIZE];
1615 * Netlink failed, it may happen with old
1616 * ib_core kernel driver (before 4.16).
1617 * We can assume there is old driver because
1618 * here we are processing single ports IB
1619 * devices. Let's try sysfs to retrieve
1620 * the ifindex. The method works for
1621 * master device only.
1625 * Multiple devices found, assume
1626 * representors, can not distinguish
1627 * master/representor and retrieve
1628 * ifindex via sysfs.
1632 ret = mlx5_get_ifname_sysfs
1633 (ibv_match[i]->ibdev_path, ifname);
1636 if_nametoindex(ifname);
1637 if (!list[ns].ifindex) {
1639 * No network interface index found
1640 * for the specified device, it means
1641 * there it is neither representor
1649 ret = mlx5_nl_switch_info
1653 if (ret || (!list[ns].info.representor &&
1654 !list[ns].info.master)) {
1656 * We failed to recognize representors with
1657 * Netlink, let's try to perform the task
1660 ret = mlx5_sysfs_switch_info
1664 if (!ret && (list[ns].info.representor ^
1665 list[ns].info.master)) {
1667 } else if ((nd == 1) &&
1668 !list[ns].info.representor &&
1669 !list[ns].info.master) {
1671 * Single IB device with
1672 * one physical port and
1673 * attached network device.
1674 * May be SRIOV is not enabled
1675 * or there is no representors.
1677 DRV_LOG(INFO, "no E-Switch support detected");
1684 "unable to recognize master/representors"
1685 " on the multiple IB devices");
1693 * Sort list to probe devices in natural order for users convenience
1694 * (i.e. master first, then representors from lowest to highest ID).
1696 qsort(list, ns, sizeof(*list), mlx5_dev_spawn_data_cmp);
1697 /* Default configuration. */
1698 dev_config = (struct mlx5_dev_config){
1700 .mps = MLX5_ARG_UNSET,
1701 .dbnc = MLX5_ARG_UNSET,
1703 .txq_inline_max = MLX5_ARG_UNSET,
1704 .txq_inline_min = MLX5_ARG_UNSET,
1705 .txq_inline_mpw = MLX5_ARG_UNSET,
1706 .txqs_inline = MLX5_ARG_UNSET,
1708 .mr_ext_memseg_en = 1,
1710 .enabled = 0, /* Disabled by default. */
1713 .max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN,
1714 .min_rxqs_num = MLX5_MPRQ_MIN_RXQS,
1718 .log_hp_size = MLX5_ARG_UNSET,
1720 /* Device specific configuration. */
1721 switch (pci_dev->id.device_id) {
1722 case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF:
1723 case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF:
1724 case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF:
1725 case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF:
1726 case PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF:
1727 case PCI_DEVICE_ID_MELLANOX_CONNECTX6VF:
1728 case PCI_DEVICE_ID_MELLANOX_CONNECTX6DXVF:
1734 for (i = 0; i != ns; ++i) {
1737 list[i].eth_dev = mlx5_dev_spawn(&pci_dev->device,
1740 if (!list[i].eth_dev) {
1741 if (rte_errno != EBUSY && rte_errno != EEXIST)
1743 /* Device is disabled or already spawned. Ignore it. */
1746 restore = list[i].eth_dev->data->dev_flags;
1747 rte_eth_copy_pci_info(list[i].eth_dev, pci_dev);
1748 /* Restore non-PCI flags cleared by the above call. */
1749 list[i].eth_dev->data->dev_flags |= restore;
1750 rte_eth_dev_probing_finish(list[i].eth_dev);
1754 "probe of PCI device " PCI_PRI_FMT " aborted after"
1755 " encountering an error: %s",
1756 pci_dev->addr.domain, pci_dev->addr.bus,
1757 pci_dev->addr.devid, pci_dev->addr.function,
1758 strerror(rte_errno));
1762 if (!list[i].eth_dev)
1764 mlx5_dev_close(list[i].eth_dev);
1765 /* mac_addrs must not be freed because in dev_private */
1766 list[i].eth_dev->data->mac_addrs = NULL;
1767 claim_zero(rte_eth_dev_release_port(list[i].eth_dev));
1769 /* Restore original error. */
1776 * Do the routine cleanup:
1777 * - close opened Netlink sockets
1778 * - free allocated spawn data array
1779 * - free the Infiniband device list
1787 MLX5_ASSERT(ibv_list);
1788 mlx5_glue->free_device_list(ibv_list);
1793 mlx5_config_doorbell_mapping_env(const struct mlx5_dev_config *config)
1798 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
1799 /* Get environment variable to store. */
1800 env = getenv(MLX5_SHUT_UP_BF);
1801 value = env ? !!strcmp(env, "0") : MLX5_ARG_UNSET;
1802 if (config->dbnc == MLX5_ARG_UNSET)
1803 setenv(MLX5_SHUT_UP_BF, MLX5_SHUT_UP_BF_DEFAULT, 1);
1805 setenv(MLX5_SHUT_UP_BF,
1806 config->dbnc == MLX5_TXDB_NCACHED ? "1" : "0", 1);
1811 mlx5_restore_doorbell_mapping_env(int value)
1813 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
1814 /* Restore the original environment variable state. */
1815 if (value == MLX5_ARG_UNSET)
1816 unsetenv(MLX5_SHUT_UP_BF);
1818 setenv(MLX5_SHUT_UP_BF, value ? "1" : "0", 1);
1822 * Extract pdn of PD object using DV API.
1825 * Pointer to the verbs PD object.
1827 * Pointer to the PD object number variable.
1830 * 0 on success, error value otherwise.
1833 mlx5_os_get_pdn(void *pd, uint32_t *pdn)
1835 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
1836 struct mlx5dv_obj obj;
1837 struct mlx5dv_pd pd_info;
1841 obj.pd.out = &pd_info;
1842 ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_PD);
1844 DRV_LOG(DEBUG, "Fail to get PD object info");
1853 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
1857 * Function API to open IB device.
1859 * This function calls the Linux glue APIs to open a device.
1862 * Pointer to the IB device attributes (name, port, etc).
1863 * @param[out] config
1864 * Pointer to device configuration structure.
1866 * Pointer to shared context structure.
1869 * 0 on success, a positive error value otherwise.
1872 mlx5_os_open_device(const struct mlx5_dev_spawn_data *spawn,
1873 const struct mlx5_dev_config *config,
1874 struct mlx5_dev_ctx_shared *sh)
1879 * Configure environment variable "MLX5_BF_SHUT_UP"
1880 * before the device creation. The rdma_core library
1881 * checks the variable at device creation and
1882 * stores the result internally.
1884 dbmap_env = mlx5_config_doorbell_mapping_env(config);
1885 /* Try to open IB device with DV first, then usual Verbs. */
1887 sh->ctx = mlx5_glue->dv_open_device(spawn->phys_dev);
1890 DRV_LOG(DEBUG, "DevX is supported");
1891 /* The device is created, no need for environment. */
1892 mlx5_restore_doorbell_mapping_env(dbmap_env);
1894 /* The environment variable is still configured. */
1895 sh->ctx = mlx5_glue->open_device(spawn->phys_dev);
1896 err = errno ? errno : ENODEV;
1898 * The environment variable is not needed anymore,
1899 * all device creation attempts are completed.
1901 mlx5_restore_doorbell_mapping_env(dbmap_env);
1904 DRV_LOG(DEBUG, "DevX is NOT supported");
1911 * Install shared asynchronous device events handler.
1912 * This function is implemented to support event sharing
1913 * between multiple ports of single IB device.
1916 * Pointer to mlx5_dev_ctx_shared object.
1919 mlx5_os_dev_shared_handler_install(struct mlx5_dev_ctx_shared *sh)
1924 sh->intr_handle.fd = -1;
1925 flags = fcntl(((struct ibv_context *)sh->ctx)->async_fd, F_GETFL);
1926 ret = fcntl(((struct ibv_context *)sh->ctx)->async_fd,
1927 F_SETFL, flags | O_NONBLOCK);
1929 DRV_LOG(INFO, "failed to change file descriptor async event"
1932 sh->intr_handle.fd = ((struct ibv_context *)sh->ctx)->async_fd;
1933 sh->intr_handle.type = RTE_INTR_HANDLE_EXT;
1934 if (rte_intr_callback_register(&sh->intr_handle,
1935 mlx5_dev_interrupt_handler, sh)) {
1936 DRV_LOG(INFO, "Fail to install the shared interrupt.");
1937 sh->intr_handle.fd = -1;
1941 #ifdef HAVE_IBV_DEVX_ASYNC
1942 sh->intr_handle_devx.fd = -1;
1944 (void *)mlx5_glue->devx_create_cmd_comp(sh->ctx);
1945 struct mlx5dv_devx_cmd_comp *devx_comp = sh->devx_comp;
1947 DRV_LOG(INFO, "failed to allocate devx_comp.");
1950 flags = fcntl(devx_comp->fd, F_GETFL);
1951 ret = fcntl(devx_comp->fd, F_SETFL, flags | O_NONBLOCK);
1953 DRV_LOG(INFO, "failed to change file descriptor"
1957 sh->intr_handle_devx.fd = devx_comp->fd;
1958 sh->intr_handle_devx.type = RTE_INTR_HANDLE_EXT;
1959 if (rte_intr_callback_register(&sh->intr_handle_devx,
1960 mlx5_dev_interrupt_handler_devx, sh)) {
1961 DRV_LOG(INFO, "Fail to install the devx shared"
1963 sh->intr_handle_devx.fd = -1;
1965 #endif /* HAVE_IBV_DEVX_ASYNC */
1970 * Uninstall shared asynchronous device events handler.
1971 * This function is implemented to support event sharing
1972 * between multiple ports of single IB device.
1975 * Pointer to mlx5_dev_ctx_shared object.
1978 mlx5_os_dev_shared_handler_uninstall(struct mlx5_dev_ctx_shared *sh)
1980 if (sh->intr_handle.fd >= 0)
1981 mlx5_intr_callback_unregister(&sh->intr_handle,
1982 mlx5_dev_interrupt_handler, sh);
1983 #ifdef HAVE_IBV_DEVX_ASYNC
1984 if (sh->intr_handle_devx.fd >= 0)
1985 rte_intr_callback_unregister(&sh->intr_handle_devx,
1986 mlx5_dev_interrupt_handler_devx, sh);
1988 mlx5_glue->devx_destroy_cmd_comp(sh->devx_comp);
1993 * Read statistics by a named counter.
1996 * Pointer to the private device data structure.
1997 * @param[in] ctr_name
1998 * Pointer to the name of the statistic counter to read
2000 * Pointer to read statistic value.
2002 * 0 on success and stat is valud, 1 if failed to read the value
2007 mlx5_os_read_dev_stat(struct mlx5_priv *priv, const char *ctr_name,
2013 MKSTR(path, "%s/ports/%d/hw_counters/%s",
2014 priv->sh->ibdev_path,
2017 fd = open(path, O_RDONLY);
2019 char buf[21] = {'\0'};
2020 ssize_t n = read(fd, buf, sizeof(buf));
2024 *stat = strtoull(buf, NULL, 10);
2034 * Read device counters table.
2037 * Pointer to Ethernet device.
2039 * Counters table output buffer.
2042 * 0 on success and stats is filled, negative errno value otherwise and
2046 mlx5_os_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats)
2048 struct mlx5_priv *priv = dev->data->dev_private;
2049 struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
2052 unsigned int stats_sz = xstats_ctrl->stats_n * sizeof(uint64_t);
2053 unsigned char et_stat_buf[sizeof(struct ethtool_stats) + stats_sz];
2054 struct ethtool_stats *et_stats = (struct ethtool_stats *)et_stat_buf;
2057 et_stats->cmd = ETHTOOL_GSTATS;
2058 et_stats->n_stats = xstats_ctrl->stats_n;
2059 ifr.ifr_data = (caddr_t)et_stats;
2060 ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
2063 "port %u unable to read statistic values from device",
2064 dev->data->port_id);
2067 for (i = 0; i != xstats_ctrl->mlx5_stats_n; ++i) {
2068 if (xstats_ctrl->info[i].dev) {
2069 ret = mlx5_os_read_dev_stat(priv,
2070 xstats_ctrl->info[i].ctr_name,
2072 /* return last xstats counter if fail to read. */
2074 xstats_ctrl->xstats[i] = stats[i];
2076 stats[i] = xstats_ctrl->xstats[i];
2078 stats[i] = (uint64_t)
2079 et_stats->data[xstats_ctrl->dev_table_idx[i]];
2086 * Query the number of statistics provided by ETHTOOL.
2089 * Pointer to Ethernet device.
2092 * Number of statistics on success, negative errno value otherwise and
2096 mlx5_os_get_stats_n(struct rte_eth_dev *dev)
2098 struct ethtool_drvinfo drvinfo;
2102 drvinfo.cmd = ETHTOOL_GDRVINFO;
2103 ifr.ifr_data = (caddr_t)&drvinfo;
2104 ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
2106 DRV_LOG(WARNING, "port %u unable to query number of statistics",
2107 dev->data->port_id);
2110 return drvinfo.n_stats;
2113 static const struct mlx5_counter_ctrl mlx5_counters_init[] = {
2115 .dpdk_name = "rx_port_unicast_bytes",
2116 .ctr_name = "rx_vport_unicast_bytes",
2119 .dpdk_name = "rx_port_multicast_bytes",
2120 .ctr_name = "rx_vport_multicast_bytes",
2123 .dpdk_name = "rx_port_broadcast_bytes",
2124 .ctr_name = "rx_vport_broadcast_bytes",
2127 .dpdk_name = "rx_port_unicast_packets",
2128 .ctr_name = "rx_vport_unicast_packets",
2131 .dpdk_name = "rx_port_multicast_packets",
2132 .ctr_name = "rx_vport_multicast_packets",
2135 .dpdk_name = "rx_port_broadcast_packets",
2136 .ctr_name = "rx_vport_broadcast_packets",
2139 .dpdk_name = "tx_port_unicast_bytes",
2140 .ctr_name = "tx_vport_unicast_bytes",
2143 .dpdk_name = "tx_port_multicast_bytes",
2144 .ctr_name = "tx_vport_multicast_bytes",
2147 .dpdk_name = "tx_port_broadcast_bytes",
2148 .ctr_name = "tx_vport_broadcast_bytes",
2151 .dpdk_name = "tx_port_unicast_packets",
2152 .ctr_name = "tx_vport_unicast_packets",
2155 .dpdk_name = "tx_port_multicast_packets",
2156 .ctr_name = "tx_vport_multicast_packets",
2159 .dpdk_name = "tx_port_broadcast_packets",
2160 .ctr_name = "tx_vport_broadcast_packets",
2163 .dpdk_name = "rx_wqe_err",
2164 .ctr_name = "rx_wqe_err",
2167 .dpdk_name = "rx_crc_errors_phy",
2168 .ctr_name = "rx_crc_errors_phy",
2171 .dpdk_name = "rx_in_range_len_errors_phy",
2172 .ctr_name = "rx_in_range_len_errors_phy",
2175 .dpdk_name = "rx_symbol_err_phy",
2176 .ctr_name = "rx_symbol_err_phy",
2179 .dpdk_name = "tx_errors_phy",
2180 .ctr_name = "tx_errors_phy",
2183 .dpdk_name = "rx_out_of_buffer",
2184 .ctr_name = "out_of_buffer",
2188 .dpdk_name = "tx_packets_phy",
2189 .ctr_name = "tx_packets_phy",
2192 .dpdk_name = "rx_packets_phy",
2193 .ctr_name = "rx_packets_phy",
2196 .dpdk_name = "tx_discards_phy",
2197 .ctr_name = "tx_discards_phy",
2200 .dpdk_name = "rx_discards_phy",
2201 .ctr_name = "rx_discards_phy",
2204 .dpdk_name = "tx_bytes_phy",
2205 .ctr_name = "tx_bytes_phy",
2208 .dpdk_name = "rx_bytes_phy",
2209 .ctr_name = "rx_bytes_phy",
2211 /* Representor only */
2213 .dpdk_name = "rx_packets",
2214 .ctr_name = "vport_rx_packets",
2217 .dpdk_name = "rx_bytes",
2218 .ctr_name = "vport_rx_bytes",
2221 .dpdk_name = "tx_packets",
2222 .ctr_name = "vport_tx_packets",
2225 .dpdk_name = "tx_bytes",
2226 .ctr_name = "vport_tx_bytes",
2230 static const unsigned int xstats_n = RTE_DIM(mlx5_counters_init);
2233 * Init the structures to read device counters.
2236 * Pointer to Ethernet device.
2239 mlx5_os_stats_init(struct rte_eth_dev *dev)
2241 struct mlx5_priv *priv = dev->data->dev_private;
2242 struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
2243 struct mlx5_stats_ctrl *stats_ctrl = &priv->stats_ctrl;
2247 struct ethtool_gstrings *strings = NULL;
2248 unsigned int dev_stats_n;
2249 unsigned int str_sz;
2252 /* So that it won't aggregate for each init. */
2253 xstats_ctrl->mlx5_stats_n = 0;
2254 ret = mlx5_os_get_stats_n(dev);
2256 DRV_LOG(WARNING, "port %u no extended statistics available",
2257 dev->data->port_id);
2261 /* Allocate memory to grab stat names and values. */
2262 str_sz = dev_stats_n * ETH_GSTRING_LEN;
2263 strings = (struct ethtool_gstrings *)
2264 rte_malloc("xstats_strings",
2265 str_sz + sizeof(struct ethtool_gstrings), 0);
2267 DRV_LOG(WARNING, "port %u unable to allocate memory for xstats",
2268 dev->data->port_id);
2271 strings->cmd = ETHTOOL_GSTRINGS;
2272 strings->string_set = ETH_SS_STATS;
2273 strings->len = dev_stats_n;
2274 ifr.ifr_data = (caddr_t)strings;
2275 ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
2277 DRV_LOG(WARNING, "port %u unable to get statistic names",
2278 dev->data->port_id);
2281 for (i = 0; i != dev_stats_n; ++i) {
2282 const char *curr_string = (const char *)
2283 &strings->data[i * ETH_GSTRING_LEN];
2285 for (j = 0; j != xstats_n; ++j) {
2286 if (!strcmp(mlx5_counters_init[j].ctr_name,
2288 unsigned int idx = xstats_ctrl->mlx5_stats_n++;
2290 xstats_ctrl->dev_table_idx[idx] = i;
2291 xstats_ctrl->info[idx] = mlx5_counters_init[j];
2296 /* Add dev counters. */
2297 for (i = 0; i != xstats_n; ++i) {
2298 if (mlx5_counters_init[i].dev) {
2299 unsigned int idx = xstats_ctrl->mlx5_stats_n++;
2301 xstats_ctrl->info[idx] = mlx5_counters_init[i];
2302 xstats_ctrl->hw_stats[idx] = 0;
2305 MLX5_ASSERT(xstats_ctrl->mlx5_stats_n <= MLX5_MAX_XSTATS);
2306 xstats_ctrl->stats_n = dev_stats_n;
2307 /* Copy to base at first time. */
2308 ret = mlx5_os_read_dev_counters(dev, xstats_ctrl->base);
2310 DRV_LOG(ERR, "port %u cannot read device counters: %s",
2311 dev->data->port_id, strerror(rte_errno));
2312 mlx5_os_read_dev_stat(priv, "out_of_buffer", &stats_ctrl->imissed_base);
2313 stats_ctrl->imissed = 0;
2319 * Set the reg_mr and dereg_mr call backs
2321 * @param reg_mr_cb[out]
2322 * Pointer to reg_mr func
2323 * @param dereg_mr_cb[out]
2324 * Pointer to dereg_mr func
2328 mlx5_os_set_reg_mr_cb(mlx5_reg_mr_t *reg_mr_cb,
2329 mlx5_dereg_mr_t *dereg_mr_cb)
2331 *reg_mr_cb = mlx5_verbs_ops.reg_mr;
2332 *dereg_mr_cb = mlx5_verbs_ops.dereg_mr;
2335 const struct eth_dev_ops mlx5_os_dev_ops = {
2336 .dev_configure = mlx5_dev_configure,
2337 .dev_start = mlx5_dev_start,
2338 .dev_stop = mlx5_dev_stop,
2339 .dev_set_link_down = mlx5_set_link_down,
2340 .dev_set_link_up = mlx5_set_link_up,
2341 .dev_close = mlx5_dev_close,
2342 .promiscuous_enable = mlx5_promiscuous_enable,
2343 .promiscuous_disable = mlx5_promiscuous_disable,
2344 .allmulticast_enable = mlx5_allmulticast_enable,
2345 .allmulticast_disable = mlx5_allmulticast_disable,
2346 .link_update = mlx5_link_update,
2347 .stats_get = mlx5_stats_get,
2348 .stats_reset = mlx5_stats_reset,
2349 .xstats_get = mlx5_xstats_get,
2350 .xstats_reset = mlx5_xstats_reset,
2351 .xstats_get_names = mlx5_xstats_get_names,
2352 .fw_version_get = mlx5_fw_version_get,
2353 .dev_infos_get = mlx5_dev_infos_get,
2354 .read_clock = mlx5_read_clock,
2355 .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
2356 .vlan_filter_set = mlx5_vlan_filter_set,
2357 .rx_queue_setup = mlx5_rx_queue_setup,
2358 .rx_hairpin_queue_setup = mlx5_rx_hairpin_queue_setup,
2359 .tx_queue_setup = mlx5_tx_queue_setup,
2360 .tx_hairpin_queue_setup = mlx5_tx_hairpin_queue_setup,
2361 .rx_queue_release = mlx5_rx_queue_release,
2362 .tx_queue_release = mlx5_tx_queue_release,
2363 .flow_ctrl_get = mlx5_dev_get_flow_ctrl,
2364 .flow_ctrl_set = mlx5_dev_set_flow_ctrl,
2365 .mac_addr_remove = mlx5_mac_addr_remove,
2366 .mac_addr_add = mlx5_mac_addr_add,
2367 .mac_addr_set = mlx5_mac_addr_set,
2368 .set_mc_addr_list = mlx5_set_mc_addr_list,
2369 .mtu_set = mlx5_dev_set_mtu,
2370 .vlan_strip_queue_set = mlx5_vlan_strip_queue_set,
2371 .vlan_offload_set = mlx5_vlan_offload_set,
2372 .reta_update = mlx5_dev_rss_reta_update,
2373 .reta_query = mlx5_dev_rss_reta_query,
2374 .rss_hash_update = mlx5_rss_hash_update,
2375 .rss_hash_conf_get = mlx5_rss_hash_conf_get,
2376 .filter_ctrl = mlx5_dev_filter_ctrl,
2377 .rx_descriptor_status = mlx5_rx_descriptor_status,
2378 .tx_descriptor_status = mlx5_tx_descriptor_status,
2379 .rxq_info_get = mlx5_rxq_info_get,
2380 .txq_info_get = mlx5_txq_info_get,
2381 .rx_burst_mode_get = mlx5_rx_burst_mode_get,
2382 .tx_burst_mode_get = mlx5_tx_burst_mode_get,
2383 .rx_queue_count = mlx5_rx_queue_count,
2384 .rx_queue_intr_enable = mlx5_rx_intr_enable,
2385 .rx_queue_intr_disable = mlx5_rx_intr_disable,
2386 .is_removed = mlx5_is_removed,
2387 .udp_tunnel_port_add = mlx5_udp_tunnel_port_add,
2388 .get_module_info = mlx5_get_module_info,
2389 .get_module_eeprom = mlx5_get_module_eeprom,
2390 .hairpin_cap_get = mlx5_hairpin_cap_get,
2391 .mtr_ops_get = mlx5_flow_meter_ops_get,
2394 /* Available operations from secondary process. */
2395 const struct eth_dev_ops mlx5_os_dev_sec_ops = {
2396 .stats_get = mlx5_stats_get,
2397 .stats_reset = mlx5_stats_reset,
2398 .xstats_get = mlx5_xstats_get,
2399 .xstats_reset = mlx5_xstats_reset,
2400 .xstats_get_names = mlx5_xstats_get_names,
2401 .fw_version_get = mlx5_fw_version_get,
2402 .dev_infos_get = mlx5_dev_infos_get,
2403 .rx_descriptor_status = mlx5_rx_descriptor_status,
2404 .tx_descriptor_status = mlx5_tx_descriptor_status,
2405 .rxq_info_get = mlx5_rxq_info_get,
2406 .txq_info_get = mlx5_txq_info_get,
2407 .rx_burst_mode_get = mlx5_rx_burst_mode_get,
2408 .tx_burst_mode_get = mlx5_tx_burst_mode_get,
2409 .get_module_info = mlx5_get_module_info,
2410 .get_module_eeprom = mlx5_get_module_eeprom,
2413 /* Available operations in flow isolated mode. */
2414 const struct eth_dev_ops mlx5_os_dev_ops_isolate = {
2415 .dev_configure = mlx5_dev_configure,
2416 .dev_start = mlx5_dev_start,
2417 .dev_stop = mlx5_dev_stop,
2418 .dev_set_link_down = mlx5_set_link_down,
2419 .dev_set_link_up = mlx5_set_link_up,
2420 .dev_close = mlx5_dev_close,
2421 .promiscuous_enable = mlx5_promiscuous_enable,
2422 .promiscuous_disable = mlx5_promiscuous_disable,
2423 .allmulticast_enable = mlx5_allmulticast_enable,
2424 .allmulticast_disable = mlx5_allmulticast_disable,
2425 .link_update = mlx5_link_update,
2426 .stats_get = mlx5_stats_get,
2427 .stats_reset = mlx5_stats_reset,
2428 .xstats_get = mlx5_xstats_get,
2429 .xstats_reset = mlx5_xstats_reset,
2430 .xstats_get_names = mlx5_xstats_get_names,
2431 .fw_version_get = mlx5_fw_version_get,
2432 .dev_infos_get = mlx5_dev_infos_get,
2433 .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
2434 .vlan_filter_set = mlx5_vlan_filter_set,
2435 .rx_queue_setup = mlx5_rx_queue_setup,
2436 .rx_hairpin_queue_setup = mlx5_rx_hairpin_queue_setup,
2437 .tx_queue_setup = mlx5_tx_queue_setup,
2438 .tx_hairpin_queue_setup = mlx5_tx_hairpin_queue_setup,
2439 .rx_queue_release = mlx5_rx_queue_release,
2440 .tx_queue_release = mlx5_tx_queue_release,
2441 .flow_ctrl_get = mlx5_dev_get_flow_ctrl,
2442 .flow_ctrl_set = mlx5_dev_set_flow_ctrl,
2443 .mac_addr_remove = mlx5_mac_addr_remove,
2444 .mac_addr_add = mlx5_mac_addr_add,
2445 .mac_addr_set = mlx5_mac_addr_set,
2446 .set_mc_addr_list = mlx5_set_mc_addr_list,
2447 .mtu_set = mlx5_dev_set_mtu,
2448 .vlan_strip_queue_set = mlx5_vlan_strip_queue_set,
2449 .vlan_offload_set = mlx5_vlan_offload_set,
2450 .filter_ctrl = mlx5_dev_filter_ctrl,
2451 .rx_descriptor_status = mlx5_rx_descriptor_status,
2452 .tx_descriptor_status = mlx5_tx_descriptor_status,
2453 .rxq_info_get = mlx5_rxq_info_get,
2454 .txq_info_get = mlx5_txq_info_get,
2455 .rx_burst_mode_get = mlx5_rx_burst_mode_get,
2456 .tx_burst_mode_get = mlx5_tx_burst_mode_get,
2457 .rx_queue_intr_enable = mlx5_rx_intr_enable,
2458 .rx_queue_intr_disable = mlx5_rx_intr_disable,
2459 .is_removed = mlx5_is_removed,
2460 .get_module_info = mlx5_get_module_info,
2461 .get_module_eeprom = mlx5_get_module_eeprom,
2462 .hairpin_cap_get = mlx5_hairpin_cap_get,
2463 .mtr_ops_get = mlx5_flow_meter_ops_get,